From fabbc29b08590f879c549a4b0af6fda81376e6c7 Mon Sep 17 00:00:00 2001 From: Chad Phillips Date: Wed, 13 Dec 2023 16:09:49 -0500 Subject: [PATCH 001/223] add new commands and simple command completion --- examples/chatbot_with_streaming.py | 152 +++++++++++++++++++++++------ 1 file changed, 124 insertions(+), 28 deletions(-) diff --git a/examples/chatbot_with_streaming.py b/examples/chatbot_with_streaming.py index 757bddee..09a135f1 100644 --- a/examples/chatbot_with_streaming.py +++ b/examples/chatbot_with_streaming.py @@ -6,6 +6,7 @@ import sys import argparse import logging +import readline from mistralai.client import MistralClient from mistralai.models.chat_completion import ChatMessage @@ -16,49 +17,118 @@ "mistral-medium", ] DEFAULT_MODEL = "mistral-small" - +DEFAULT_TEMPERATURE = 0.7 LOG_FORMAT = '%(asctime)s - %(levelname)s - %(message)s' +COMMAND_LIST = [ + "/new", + "/help", + "/model", + "/system", + "/temperature", + "/config", + "/quit", + "/exit", +] logger = logging.getLogger('chatbot') +def completer(text, state): + buffer = readline.get_line_buffer() + if not buffer.startswith(text): + return None + + options = [command for command in COMMAND_LIST if command.startswith(text)] + if state < len(options): + return options[state] + else: + return None + + +readline.set_completer(completer) +# Remove all delimiters to ensure completion only at the beginning of the line +readline.set_completer_delims('') +# Enable tab completion +readline.parse_and_bind('tab: complete') + + class ChatBot: - def __init__(self, api_key, model, system_message=None): + def __init__(self, api_key, model, system_message=None, temperature=DEFAULT_TEMPERATURE): self.client = MistralClient(api_key=api_key) self.model = model + self.temperature = temperature self.system_message = system_message def opening_instructions(self): print(""" To chat: type your message and hit enter -To start a new chat: type /new -To exit: type /exit, /quit, or hit CTRL+C +To start a new chat: /new +To switch model: /model +To switch system message: /system +To switch temperature: /temperature +To see current config: /config +To exit: /exit, /quit, or hit CTRL+C +To see this help: /help """) def new_chat(self): + print("") + print(f"Starting new chat with model: {self.model}, temperature: {self.temperature}") + print("") self.messages = [] if self.system_message: self.messages.append(ChatMessage(role="system", content=self.system_message)) - def check_exit(self, content): - if content.lower().strip() in ["/exit", "/quit"]: - self.exit() - - def check_new_chat(self, content): - if content.lower().strip() in ["/new"]: - print("") - print("Starting new chat...") - print("") + def switch_model(self, input): + model = self.get_arguments(input) + if model in MODEL_LIST: + self.model = model + logger.info(f"Switching model: {model}") + else: + logger.error(f"Invalid model name: {model}") + + def switch_system_message(self, input): + system_message = self.get_arguments(input) + if system_message: + self.system_message = system_message + logger.info(f"Switching system message: {system_message}") self.new_chat() - return True - return False + else: + logger.error(f"Invalid system message: {system_message}") + + def switch_temperature(self, input): + temperature = self.get_arguments(input) + try: + temperature = float(temperature) + if temperature < 0 or temperature > 1: + raise ValueError + self.temperature = temperature + logger.info(f"Switching temperature: {temperature}") + except ValueError: + logger.error(f"Invalid temperature: {temperature}") + + def show_config(self): + print("") + print(f"Current model: {self.model}") + print(f"Current temperature: {self.temperature}") + print(f"Current system message: {self.system_message}") + print("") + + def collect_user_input(self): + print("") + return input("YOU: ") def run_inference(self, content): + print("") + print("MISTRAL:") + print("") + self.messages.append(ChatMessage(role="user", content=content)) assistant_response = "" + logger.debug(f"Running inference with model: {self.model}, temperature: {self.temperature}") logger.debug(f"Sending messages: {self.messages}") - for chunk in self.client.chat_stream(model=self.model, messages=self.messages): + for chunk in self.client.chat_stream(model=self.model, temperature=self.temperature, messages=self.messages): response = chunk.choices[0].delta.content if response is not None: print(response, end="", flush=True) @@ -70,21 +140,45 @@ def run_inference(self, content): self.messages.append(ChatMessage(role="assistant", content=assistant_response)) logger.debug(f"Current messages: {self.messages}") - def start(self): + def get_command(self, input): + return input.split()[0].strip() + + def get_arguments(self, input): + try: + return " ".join(input.split()[1:]) + except IndexError: + return "" + def is_command(self, input): + return self.get_command(input) in COMMAND_LIST + + def execute_command(self, input): + command = self.get_command(input) + if command in ["/exit", "/quit"]: + self.exit() + elif command == "/help": + self.opening_instructions() + elif command == "/new": + self.new_chat() + elif command == "/model": + self.switch_model(input) + elif command == "/system": + self.switch_system_message(input) + elif command == "/temperature": + self.switch_temperature(input) + elif command == "/config": + self.show_config() + + def start(self): self.opening_instructions() self.new_chat() - while True: try: - print("") - content = input("YOU: ") - self.check_exit(content) - if not self.check_new_chat(content): - print("") - print("MISTRAL:") - print("") - self.run_inference(content) + input = self.collect_user_input() + if self.is_command(input): + self.execute_command(input) + else: + self.run_inference(input) except KeyboardInterrupt: self.exit() @@ -104,6 +198,8 @@ def exit(self): help="Model for chat inference. Choices are %(choices)s. Defaults to %(default)s") parser.add_argument("-s", "--system-message", help="Optional system message to prepend.") + parser.add_argument("-t", "--temperature", type=float, default=DEFAULT_TEMPERATURE, + help="Optional temperature for chat inference. Defaults to %(default)s") parser.add_argument("-d", "--debug", action="store_true", help="Enable debug logging") args = parser.parse_args() @@ -119,7 +215,7 @@ def exit(self): ch.setFormatter(formatter) logger.addHandler(ch) - logger.debug(f"Starting chatbot with model: {args.model}") + logger.debug(f"Starting chatbot with model: {args.model}, temperature: {args.temperature}, system message: {args.system_message}") - bot = ChatBot(args.api_key, args.model, args.system_message) + bot = ChatBot(args.api_key, args.model, args.system_message, args.temperature) bot.start() From c66faa7b56c5c51f1e5a17c49ce742697f443a51 Mon Sep 17 00:00:00 2001 From: Chad Phillips Date: Wed, 13 Dec 2023 16:23:33 -0500 Subject: [PATCH 002/223] formatting fixes --- examples/chatbot_with_streaming.py | 80 +++++++++++++++++++++--------- 1 file changed, 56 insertions(+), 24 deletions(-) diff --git a/examples/chatbot_with_streaming.py b/examples/chatbot_with_streaming.py index 82f22ebe..72f006fd 100644 --- a/examples/chatbot_with_streaming.py +++ b/examples/chatbot_with_streaming.py @@ -18,7 +18,7 @@ ] DEFAULT_MODEL = "mistral-small" DEFAULT_TEMPERATURE = 0.7 -LOG_FORMAT = '%(asctime)s - %(levelname)s - %(message)s' +LOG_FORMAT = "%(asctime)s - %(levelname)s - %(message)s" COMMAND_LIST = [ "/new", "/help", @@ -49,20 +49,23 @@ def completer(text, state): readline.set_completer(completer) # Remove all delimiters to ensure completion only at the beginning of the line -readline.set_completer_delims('') +readline.set_completer_delims("") # Enable tab completion -readline.parse_and_bind('tab: complete') +readline.parse_and_bind("tab: complete") class ChatBot: - def __init__(self, api_key, model, system_message=None, temperature=DEFAULT_TEMPERATURE): + def __init__( + self, api_key, model, system_message=None, temperature=DEFAULT_TEMPERATURE + ): self.client = MistralClient(api_key=api_key) self.model = model self.temperature = temperature self.system_message = system_message def opening_instructions(self): - print(""" + print( + """ To chat: type your message and hit enter To start a new chat: /new To switch model: /model @@ -71,15 +74,20 @@ def opening_instructions(self): To see current config: /config To exit: /exit, /quit, or hit CTRL+C To see this help: /help -""") +""" + ) def new_chat(self): print("") - print(f"Starting new chat with model: {self.model}, temperature: {self.temperature}") + print( + f"Starting new chat with model: {self.model}, temperature: {self.temperature}" + ) print("") self.messages = [] if self.system_message: - self.messages.append(ChatMessage(role="system", content=self.system_message)) + self.messages.append( + ChatMessage(role="system", content=self.system_message) + ) def switch_model(self, input): model = self.get_arguments(input) @@ -128,9 +136,13 @@ def run_inference(self, content): self.messages.append(ChatMessage(role="user", content=content)) assistant_response = "" - logger.debug(f"Running inference with model: {self.model}, temperature: {self.temperature}") + logger.debug( + f"Running inference with model: {self.model}, temperature: {self.temperature}" + ) logger.debug(f"Sending messages: {self.messages}") - for chunk in self.client.chat_stream(model=self.model, temperature=self.temperature, messages=self.messages): + for chunk in self.client.chat_stream( + model=self.model, temperature=self.temperature, messages=self.messages + ): response = chunk.choices[0].delta.content if response is not None: print(response, end="", flush=True) @@ -139,7 +151,9 @@ def run_inference(self, content): print("", flush=True) if assistant_response: - self.messages.append(ChatMessage(role="assistant", content=assistant_response)) + self.messages.append( + ChatMessage(role="assistant", content=assistant_response) + ) logger.debug(f"Current messages: {self.messages}") def get_command(self, input): @@ -191,18 +205,34 @@ def exit(self): if __name__ == "__main__": - - parser = argparse.ArgumentParser(description="A simple chatbot using the Mistral API") - parser.add_argument("--api-key", default=os.environ.get("MISTRAL_API_KEY"), - help="Mistral API key. Defaults to environment variable MISTRAL_API_KEY") - parser.add_argument("-m", "--model", choices=MODEL_LIST, - default=DEFAULT_MODEL, - help="Model for chat inference. Choices are %(choices)s. Defaults to %(default)s") - parser.add_argument("-s", "--system-message", - help="Optional system message to prepend.") - parser.add_argument("-t", "--temperature", type=float, default=DEFAULT_TEMPERATURE, - help="Optional temperature for chat inference. Defaults to %(default)s") - parser.add_argument("-d", "--debug", action="store_true", help="Enable debug logging") + parser = argparse.ArgumentParser( + description="A simple chatbot using the Mistral API" + ) + parser.add_argument( + "--api-key", + default=os.environ.get("MISTRAL_API_KEY"), + help="Mistral API key. Defaults to environment variable MISTRAL_API_KEY", + ) + parser.add_argument( + "-m", + "--model", + choices=MODEL_LIST, + default=DEFAULT_MODEL, + help="Model for chat inference. Choices are %(choices)s. Defaults to %(default)s", + ) + parser.add_argument( + "-s", "--system-message", help="Optional system message to prepend." + ) + parser.add_argument( + "-t", + "--temperature", + type=float, + default=DEFAULT_TEMPERATURE, + help="Optional temperature for chat inference. Defaults to %(default)s", + ) + parser.add_argument( + "-d", "--debug", action="store_true", help="Enable debug logging" + ) args = parser.parse_args() @@ -217,7 +247,9 @@ def exit(self): ch.setFormatter(formatter) logger.addHandler(ch) - logger.debug(f"Starting chatbot with model: {args.model}, temperature: {args.temperature}, system message: {args.system_message}") + logger.debug( + f"Starting chatbot with model: {args.model}, temperature: {args.temperature}, system message: {args.system_message}" + ) bot = ChatBot(args.api_key, args.model, args.system_message, args.temperature) bot.start() From b522bf088a65ae5cfe189059b7d3cd066f48615b Mon Sep 17 00:00:00 2001 From: Chad Phillips Date: Wed, 13 Dec 2023 16:27:55 -0500 Subject: [PATCH 003/223] more formatting fixes --- examples/chatbot_with_streaming.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/examples/chatbot_with_streaming.py b/examples/chatbot_with_streaming.py index 72f006fd..fce2d8c0 100644 --- a/examples/chatbot_with_streaming.py +++ b/examples/chatbot_with_streaming.py @@ -4,8 +4,8 @@ import argparse import logging -import readline import os +import readline import sys from mistralai.client import MistralClient @@ -248,7 +248,9 @@ def exit(self): logger.addHandler(ch) logger.debug( - f"Starting chatbot with model: {args.model}, temperature: {args.temperature}, system message: {args.system_message}" + f"Starting chatbot with model: {args.model}, " + f"temperature: {args.temperature}, " + f"system message: {args.system_message}" ) bot = ChatBot(args.api_key, args.model, args.system_message, args.temperature) From 41fc6589d0a042c5619dd64b0522963c4b823980 Mon Sep 17 00:00:00 2001 From: lionelchg Date: Thu, 14 Dec 2023 12:29:17 +0800 Subject: [PATCH 004/223] Add MISTRAL_API_KEY env variable requirement in README.md --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 73ad3185..4e94e881 100644 --- a/README.md +++ b/README.md @@ -28,7 +28,7 @@ poetry install ## Run examples -You can run the examples in the `examples/` directory using `poetry run` or by entering the virtual environment using `poetry shell`. +You can run the examples in the `examples/` directory using `poetry run` or by entering the virtual environment using `poetry shell`. You should have a `MISTRAL_API_KEY` environment variable defined containing your API key to run those examples. ### Using poetry run From 4b2ad72bda199eff827eb3477bb2e5708f670bcb Mon Sep 17 00:00:00 2001 From: Ram Rachum Date: Thu, 14 Dec 2023 01:36:11 -0800 Subject: [PATCH 005/223] Fix exception causes in async_client.py --- src/mistralai/async_client.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/mistralai/async_client.py b/src/mistralai/async_client.py index c0dffc62..0ea36721 100644 --- a/src/mistralai/async_client.py +++ b/src/mistralai/async_client.py @@ -201,15 +201,15 @@ async def _request( try: json_response: Dict[str, Any] = await response.json() - except JSONDecodeError: + except JSONDecodeError as e: raise MistralAPIException.from_aio_response( response, message=f"Failed to decode json body: {await response.text()}" - ) + ) from e except aiohttp.ClientPayloadError as e: raise MistralAPIException.from_aio_response( response, message=f"An unexpected error occurred while receiving the response: {e}", - ) + ) from e self._logger.debug(f"JSON response: {json_response}") self._check_response(json_response, dict(response.headers), response.status) From 0d652a75db267d7ef2a00e254886578fe9575425 Mon Sep 17 00:00:00 2001 From: Anthony Luo Date: Thu, 14 Dec 2023 21:58:36 -0600 Subject: [PATCH 006/223] Add API Key Setup to README --- README.md | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index 73ad3185..3feb122a 100644 --- a/README.md +++ b/README.md @@ -27,9 +27,21 @@ poetry install ``` ## Run examples - You can run the examples in the `examples/` directory using `poetry run` or by entering the virtual environment using `poetry shell`. +### API Key Setup +Running the examples requires a Mistral AI API key. + +1. Get your own Mistral API Key: https://docs.mistral.ai/#api-access +2. Set your Mistral API Key as an environment variable. You only need to do this once. +```bash +# set Mistral API Key (using zsh for example) +$ echo 'export MISTRAL_API_KEY=[your_key_here]' >> ~/.zshenv + +# reload the environment (or just quit and open a new terminal) +$ source ~/.zshenv +``` + ### Using poetry run ```bash From 8cfcaa1294fbcb2d7615139419710f5f501c6ffe Mon Sep 17 00:00:00 2001 From: Bam4d Date: Mon, 18 Dec 2023 16:34:53 +0000 Subject: [PATCH 007/223] few README lints --- README.md | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/README.md b/README.md index e49b344e..22967f91 100644 --- a/README.md +++ b/README.md @@ -1,7 +1,7 @@ -This client is inspired from [cohere-python](https://github.com/cohere-ai/cohere-python) - # Mistral Python Client +This client is inspired from [cohere-python](https://github.com/cohere-ai/cohere-python) + You can use the Mistral Python client to interact with the Mistral AI API. ## Installing @@ -31,10 +31,12 @@ poetry install You can run the examples in the `examples/` directory using `poetry run` or by entering the virtual environment using `poetry shell`. ### API Key Setup + Running the examples requires a Mistral AI API key. -1. Get your own Mistral API Key: https://docs.mistral.ai/#api-access -2. Set your Mistral API Key as an environment variable. You only need to do this once. +1. Get your own Mistral API Key: +2. Set your Mistral API Key as an environment variable. You only need to do this once. + ```bash # set Mistral API Key (using zsh for example) $ echo 'export MISTRAL_API_KEY=[your_key_here]' >> ~/.zshenv From 1536fad7d8ec090dda2773fe38470233fce813b4 Mon Sep 17 00:00:00 2001 From: Bam4d Date: Tue, 19 Dec 2023 08:03:17 +0000 Subject: [PATCH 008/223] updating client to use httpx --- poetry.lock | 598 ++++++----------------------------- pyproject.toml | 4 +- src/mistralai/client.py | 153 +++++---- src/mistralai/client_base.py | 25 +- src/mistralai/exceptions.py | 15 +- 5 files changed, 218 insertions(+), 577 deletions(-) diff --git a/poetry.lock b/poetry.lock index 8717942a..e6597b02 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1,114 +1,4 @@ -# This file is automatically @generated by Poetry 1.6.1 and should not be changed by hand. - -[[package]] -name = "aiohttp" -version = "3.9.1" -description = "Async http client/server framework (asyncio)" -optional = false -python-versions = ">=3.8" -files = [ - {file = "aiohttp-3.9.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:e1f80197f8b0b846a8d5cf7b7ec6084493950d0882cc5537fb7b96a69e3c8590"}, - {file = "aiohttp-3.9.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:c72444d17777865734aa1a4d167794c34b63e5883abb90356a0364a28904e6c0"}, - {file = "aiohttp-3.9.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9b05d5cbe9dafcdc733262c3a99ccf63d2f7ce02543620d2bd8db4d4f7a22f83"}, - {file = "aiohttp-3.9.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5c4fa235d534b3547184831c624c0b7c1e262cd1de847d95085ec94c16fddcd5"}, - {file = "aiohttp-3.9.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:289ba9ae8e88d0ba16062ecf02dd730b34186ea3b1e7489046fc338bdc3361c4"}, - {file = "aiohttp-3.9.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:bff7e2811814fa2271be95ab6e84c9436d027a0e59665de60edf44e529a42c1f"}, - {file = "aiohttp-3.9.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:81b77f868814346662c96ab36b875d7814ebf82340d3284a31681085c051320f"}, - {file = "aiohttp-3.9.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3b9c7426923bb7bd66d409da46c41e3fb40f5caf679da624439b9eba92043fa6"}, - {file = "aiohttp-3.9.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:8d44e7bf06b0c0a70a20f9100af9fcfd7f6d9d3913e37754c12d424179b4e48f"}, - {file = "aiohttp-3.9.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:22698f01ff5653fe66d16ffb7658f582a0ac084d7da1323e39fd9eab326a1f26"}, - {file = "aiohttp-3.9.1-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:ca7ca5abfbfe8d39e653870fbe8d7710be7a857f8a8386fc9de1aae2e02ce7e4"}, - {file = "aiohttp-3.9.1-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:8d7f98fde213f74561be1d6d3fa353656197f75d4edfbb3d94c9eb9b0fc47f5d"}, - {file = "aiohttp-3.9.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:5216b6082c624b55cfe79af5d538e499cd5f5b976820eac31951fb4325974501"}, - {file = "aiohttp-3.9.1-cp310-cp310-win32.whl", hash = "sha256:0e7ba7ff228c0d9a2cd66194e90f2bca6e0abca810b786901a569c0de082f489"}, - {file = "aiohttp-3.9.1-cp310-cp310-win_amd64.whl", hash = "sha256:c7e939f1ae428a86e4abbb9a7c4732bf4706048818dfd979e5e2839ce0159f23"}, - {file = "aiohttp-3.9.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:df9cf74b9bc03d586fc53ba470828d7b77ce51b0582d1d0b5b2fb673c0baa32d"}, - {file = "aiohttp-3.9.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:ecca113f19d5e74048c001934045a2b9368d77b0b17691d905af18bd1c21275e"}, - {file = "aiohttp-3.9.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:8cef8710fb849d97c533f259103f09bac167a008d7131d7b2b0e3a33269185c0"}, - {file = "aiohttp-3.9.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bea94403a21eb94c93386d559bce297381609153e418a3ffc7d6bf772f59cc35"}, - {file = "aiohttp-3.9.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:91c742ca59045dce7ba76cab6e223e41d2c70d79e82c284a96411f8645e2afff"}, - {file = "aiohttp-3.9.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:6c93b7c2e52061f0925c3382d5cb8980e40f91c989563d3d32ca280069fd6a87"}, - {file = "aiohttp-3.9.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ee2527134f95e106cc1653e9ac78846f3a2ec1004cf20ef4e02038035a74544d"}, - {file = "aiohttp-3.9.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:11ff168d752cb41e8492817e10fb4f85828f6a0142b9726a30c27c35a1835f01"}, - {file = "aiohttp-3.9.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:b8c3a67eb87394386847d188996920f33b01b32155f0a94f36ca0e0c635bf3e3"}, - {file = "aiohttp-3.9.1-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:c7b5d5d64e2a14e35a9240b33b89389e0035e6de8dbb7ffa50d10d8b65c57449"}, - {file = "aiohttp-3.9.1-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:69985d50a2b6f709412d944ffb2e97d0be154ea90600b7a921f95a87d6f108a2"}, - {file = "aiohttp-3.9.1-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:c9110c06eaaac7e1f5562caf481f18ccf8f6fdf4c3323feab28a93d34cc646bd"}, - {file = "aiohttp-3.9.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:d737e69d193dac7296365a6dcb73bbbf53bb760ab25a3727716bbd42022e8d7a"}, - {file = "aiohttp-3.9.1-cp311-cp311-win32.whl", hash = "sha256:4ee8caa925aebc1e64e98432d78ea8de67b2272252b0a931d2ac3bd876ad5544"}, - {file = "aiohttp-3.9.1-cp311-cp311-win_amd64.whl", hash = "sha256:a34086c5cc285be878622e0a6ab897a986a6e8bf5b67ecb377015f06ed316587"}, - {file = "aiohttp-3.9.1-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:f800164276eec54e0af5c99feb9494c295118fc10a11b997bbb1348ba1a52065"}, - {file = "aiohttp-3.9.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:500f1c59906cd142d452074f3811614be04819a38ae2b3239a48b82649c08821"}, - {file = "aiohttp-3.9.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:0b0a6a36ed7e164c6df1e18ee47afbd1990ce47cb428739d6c99aaabfaf1b3af"}, - {file = "aiohttp-3.9.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:69da0f3ed3496808e8cbc5123a866c41c12c15baaaead96d256477edf168eb57"}, - {file = "aiohttp-3.9.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:176df045597e674fa950bf5ae536be85699e04cea68fa3a616cf75e413737eb5"}, - {file = "aiohttp-3.9.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b796b44111f0cab6bbf66214186e44734b5baab949cb5fb56154142a92989aeb"}, - {file = "aiohttp-3.9.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f27fdaadce22f2ef950fc10dcdf8048407c3b42b73779e48a4e76b3c35bca26c"}, - {file = "aiohttp-3.9.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:bcb6532b9814ea7c5a6a3299747c49de30e84472fa72821b07f5a9818bce0f66"}, - {file = "aiohttp-3.9.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:54631fb69a6e44b2ba522f7c22a6fb2667a02fd97d636048478db2fd8c4e98fe"}, - {file = "aiohttp-3.9.1-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:4b4c452d0190c5a820d3f5c0f3cd8a28ace48c54053e24da9d6041bf81113183"}, - {file = "aiohttp-3.9.1-cp312-cp312-musllinux_1_1_ppc64le.whl", hash = "sha256:cae4c0c2ca800c793cae07ef3d40794625471040a87e1ba392039639ad61ab5b"}, - {file = "aiohttp-3.9.1-cp312-cp312-musllinux_1_1_s390x.whl", hash = "sha256:565760d6812b8d78d416c3c7cfdf5362fbe0d0d25b82fed75d0d29e18d7fc30f"}, - {file = "aiohttp-3.9.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:54311eb54f3a0c45efb9ed0d0a8f43d1bc6060d773f6973efd90037a51cd0a3f"}, - {file = "aiohttp-3.9.1-cp312-cp312-win32.whl", hash = "sha256:85c3e3c9cb1d480e0b9a64c658cd66b3cfb8e721636ab8b0e746e2d79a7a9eed"}, - {file = "aiohttp-3.9.1-cp312-cp312-win_amd64.whl", hash = "sha256:11cb254e397a82efb1805d12561e80124928e04e9c4483587ce7390b3866d213"}, - {file = "aiohttp-3.9.1-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:8a22a34bc594d9d24621091d1b91511001a7eea91d6652ea495ce06e27381f70"}, - {file = "aiohttp-3.9.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:598db66eaf2e04aa0c8900a63b0101fdc5e6b8a7ddd805c56d86efb54eb66672"}, - {file = "aiohttp-3.9.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:2c9376e2b09895c8ca8b95362283365eb5c03bdc8428ade80a864160605715f1"}, - {file = "aiohttp-3.9.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:41473de252e1797c2d2293804e389a6d6986ef37cbb4a25208de537ae32141dd"}, - {file = "aiohttp-3.9.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9c5857612c9813796960c00767645cb5da815af16dafb32d70c72a8390bbf690"}, - {file = "aiohttp-3.9.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ffcd828e37dc219a72c9012ec44ad2e7e3066bec6ff3aaa19e7d435dbf4032ca"}, - {file = "aiohttp-3.9.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:219a16763dc0294842188ac8a12262b5671817042b35d45e44fd0a697d8c8361"}, - {file = "aiohttp-3.9.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f694dc8a6a3112059258a725a4ebe9acac5fe62f11c77ac4dcf896edfa78ca28"}, - {file = "aiohttp-3.9.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:bcc0ea8d5b74a41b621ad4a13d96c36079c81628ccc0b30cfb1603e3dfa3a014"}, - {file = "aiohttp-3.9.1-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:90ec72d231169b4b8d6085be13023ece8fa9b1bb495e4398d847e25218e0f431"}, - {file = "aiohttp-3.9.1-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:cf2a0ac0615842b849f40c4d7f304986a242f1e68286dbf3bd7a835e4f83acfd"}, - {file = "aiohttp-3.9.1-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:0e49b08eafa4f5707ecfb321ab9592717a319e37938e301d462f79b4e860c32a"}, - {file = "aiohttp-3.9.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:2c59e0076ea31c08553e868cec02d22191c086f00b44610f8ab7363a11a5d9d8"}, - {file = "aiohttp-3.9.1-cp38-cp38-win32.whl", hash = "sha256:4831df72b053b1eed31eb00a2e1aff6896fb4485301d4ccb208cac264b648db4"}, - {file = "aiohttp-3.9.1-cp38-cp38-win_amd64.whl", hash = "sha256:3135713c5562731ee18f58d3ad1bf41e1d8883eb68b363f2ffde5b2ea4b84cc7"}, - {file = "aiohttp-3.9.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:cfeadf42840c1e870dc2042a232a8748e75a36b52d78968cda6736de55582766"}, - {file = "aiohttp-3.9.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:70907533db712f7aa791effb38efa96f044ce3d4e850e2d7691abd759f4f0ae0"}, - {file = "aiohttp-3.9.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:cdefe289681507187e375a5064c7599f52c40343a8701761c802c1853a504558"}, - {file = "aiohttp-3.9.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d7481f581251bb5558ba9f635db70908819caa221fc79ee52a7f58392778c636"}, - {file = "aiohttp-3.9.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:49f0c1b3c2842556e5de35f122fc0f0b721334ceb6e78c3719693364d4af8499"}, - {file = "aiohttp-3.9.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0d406b01a9f5a7e232d1b0d161b40c05275ffbcbd772dc18c1d5a570961a1ca4"}, - {file = "aiohttp-3.9.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8d8e4450e7fe24d86e86b23cc209e0023177b6d59502e33807b732d2deb6975f"}, - {file = "aiohttp-3.9.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3c0266cd6f005e99f3f51e583012de2778e65af6b73860038b968a0a8888487a"}, - {file = "aiohttp-3.9.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:ab221850108a4a063c5b8a70f00dd7a1975e5a1713f87f4ab26a46e5feac5a0e"}, - {file = "aiohttp-3.9.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:c88a15f272a0ad3d7773cf3a37cc7b7d077cbfc8e331675cf1346e849d97a4e5"}, - {file = "aiohttp-3.9.1-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:237533179d9747080bcaad4d02083ce295c0d2eab3e9e8ce103411a4312991a0"}, - {file = "aiohttp-3.9.1-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:02ab6006ec3c3463b528374c4cdce86434e7b89ad355e7bf29e2f16b46c7dd6f"}, - {file = "aiohttp-3.9.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:04fa38875e53eb7e354ece1607b1d2fdee2d175ea4e4d745f6ec9f751fe20c7c"}, - {file = "aiohttp-3.9.1-cp39-cp39-win32.whl", hash = "sha256:82eefaf1a996060602f3cc1112d93ba8b201dbf5d8fd9611227de2003dddb3b7"}, - {file = "aiohttp-3.9.1-cp39-cp39-win_amd64.whl", hash = "sha256:9b05d33ff8e6b269e30a7957bd3244ffbce2a7a35a81b81c382629b80af1a8bf"}, - {file = "aiohttp-3.9.1.tar.gz", hash = "sha256:8fc49a87ac269d4529da45871e2ffb6874e87779c3d0e2ccd813c0899221239d"}, -] - -[package.dependencies] -aiosignal = ">=1.1.2" -async-timeout = {version = ">=4.0,<5.0", markers = "python_version < \"3.11\""} -attrs = ">=17.3.0" -frozenlist = ">=1.1.1" -multidict = ">=4.5,<7.0" -yarl = ">=1.0,<2.0" - -[package.extras] -speedups = ["Brotli", "aiodns", "brotlicffi"] - -[[package]] -name = "aiosignal" -version = "1.3.1" -description = "aiosignal: a list of registered asynchronous callbacks" -optional = false -python-versions = ">=3.7" -files = [ - {file = "aiosignal-1.3.1-py3-none-any.whl", hash = "sha256:f8376fb07dd1e86a584e4fcdec80b36b7f81aac666ebc724e2c090300dd83b17"}, - {file = "aiosignal-1.3.1.tar.gz", hash = "sha256:54cd96e15e1649b75d6c87526a6ff0b6c1b0dd3459f43d9ca11d48c339b68cfc"}, -] - -[package.dependencies] -frozenlist = ">=1.1.0" +# This file is automatically @generated by Poetry 1.5.1 and should not be changed by hand. [[package]] name = "annotated-types" @@ -125,33 +15,26 @@ files = [ typing-extensions = {version = ">=4.0.0", markers = "python_version < \"3.9\""} [[package]] -name = "async-timeout" -version = "4.0.3" -description = "Timeout context manager for asyncio programs" +name = "anyio" +version = "4.2.0" +description = "High level compatibility layer for multiple asynchronous event loop implementations" optional = false -python-versions = ">=3.7" +python-versions = ">=3.8" files = [ - {file = "async-timeout-4.0.3.tar.gz", hash = "sha256:4640d96be84d82d02ed59ea2b7105a0f7b33abe8703703cd0ab0bf87c427522f"}, - {file = "async_timeout-4.0.3-py3-none-any.whl", hash = "sha256:7405140ff1230c310e51dc27b3145b9092d659ce68ff733fb0cefe3ee42be028"}, + {file = "anyio-4.2.0-py3-none-any.whl", hash = "sha256:745843b39e829e108e518c489b31dc757de7d2131d53fac32bd8df268227bfee"}, + {file = "anyio-4.2.0.tar.gz", hash = "sha256:e1875bb4b4e2de1669f4bc7869b6d3f54231cdced71605e6e64c9be77e3be50f"}, ] -[[package]] -name = "attrs" -version = "23.1.0" -description = "Classes Without Boilerplate" -optional = false -python-versions = ">=3.7" -files = [ - {file = "attrs-23.1.0-py3-none-any.whl", hash = "sha256:1f28b4522cdc2fb4256ac1a020c78acf9cba2c6b461ccd2c126f3aa8e8335d04"}, - {file = "attrs-23.1.0.tar.gz", hash = "sha256:6279836d581513a26f1bf235f9acd333bc9115683f14f7e8fae46c98fc50e015"}, -] +[package.dependencies] +exceptiongroup = {version = ">=1.0.2", markers = "python_version < \"3.11\""} +idna = ">=2.8" +sniffio = ">=1.1" +typing-extensions = {version = ">=4.1", markers = "python_version < \"3.11\""} [package.extras] -cov = ["attrs[tests]", "coverage[toml] (>=5.3)"] -dev = ["attrs[docs,tests]", "pre-commit"] -docs = ["furo", "myst-parser", "sphinx", "sphinx-notfound-page", "sphinxcontrib-towncrier", "towncrier", "zope-interface"] -tests = ["attrs[tests-no-zope]", "zope-interface"] -tests-no-zope = ["cloudpickle", "hypothesis", "mypy (>=1.1.1)", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "pytest-xdist[psutil]"] +doc = ["Sphinx (>=7)", "packaging", "sphinx-autodoc-typehints (>=1.2.0)", "sphinx-rtd-theme"] +test = ["anyio[trio]", "coverage[toml] (>=7)", "exceptiongroup (>=1.2.0)", "hypothesis (>=4.0)", "psutil (>=5.9)", "pytest (>=7.0)", "pytest-mock (>=3.6.1)", "trustme", "uvloop (>=0.17)"] +trio = ["trio (>=0.23)"] [[package]] name = "backoff" @@ -176,174 +59,75 @@ files = [ ] [[package]] -name = "charset-normalizer" -version = "3.3.2" -description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet." +name = "exceptiongroup" +version = "1.2.0" +description = "Backport of PEP 654 (exception groups)" +optional = false +python-versions = ">=3.7" +files = [ + {file = "exceptiongroup-1.2.0-py3-none-any.whl", hash = "sha256:4bfd3996ac73b41e9b9628b04e079f193850720ea5945fc96a08633c66912f14"}, + {file = "exceptiongroup-1.2.0.tar.gz", hash = "sha256:91f5c769735f051a4290d52edd0858999b57e5876e9f85937691bd4c9fa3ed68"}, +] + +[package.extras] +test = ["pytest (>=6)"] + +[[package]] +name = "h11" +version = "0.14.0" +description = "A pure-Python, bring-your-own-I/O implementation of HTTP/1.1" optional = false -python-versions = ">=3.7.0" +python-versions = ">=3.7" files = [ - {file = "charset-normalizer-3.3.2.tar.gz", hash = "sha256:f30c3cb33b24454a82faecaf01b19c18562b1e89558fb6c56de4d9118a032fd5"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:25baf083bf6f6b341f4121c2f3c548875ee6f5339300e08be3f2b2ba1721cdd3"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:06435b539f889b1f6f4ac1758871aae42dc3a8c0e24ac9e60c2384973ad73027"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9063e24fdb1e498ab71cb7419e24622516c4a04476b17a2dab57e8baa30d6e03"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6897af51655e3691ff853668779c7bad41579facacf5fd7253b0133308cf000d"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1d3193f4a680c64b4b6a9115943538edb896edc190f0b222e73761716519268e"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:cd70574b12bb8a4d2aaa0094515df2463cb429d8536cfb6c7ce983246983e5a6"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8465322196c8b4d7ab6d1e049e4c5cb460d0394da4a27d23cc242fbf0034b6b5"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a9a8e9031d613fd2009c182b69c7b2c1ef8239a0efb1df3f7c8da66d5dd3d537"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:beb58fe5cdb101e3a055192ac291b7a21e3b7ef4f67fa1d74e331a7f2124341c"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:e06ed3eb3218bc64786f7db41917d4e686cc4856944f53d5bdf83a6884432e12"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:2e81c7b9c8979ce92ed306c249d46894776a909505d8f5a4ba55b14206e3222f"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:572c3763a264ba47b3cf708a44ce965d98555f618ca42c926a9c1616d8f34269"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:fd1abc0d89e30cc4e02e4064dc67fcc51bd941eb395c502aac3ec19fab46b519"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-win32.whl", hash = "sha256:3d47fa203a7bd9c5b6cee4736ee84ca03b8ef23193c0d1ca99b5089f72645c73"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-win_amd64.whl", hash = "sha256:10955842570876604d404661fbccbc9c7e684caf432c09c715ec38fbae45ae09"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:802fe99cca7457642125a8a88a084cef28ff0cf9407060f7b93dca5aa25480db"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:573f6eac48f4769d667c4442081b1794f52919e7edada77495aaed9236d13a96"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:549a3a73da901d5bc3ce8d24e0600d1fa85524c10287f6004fbab87672bf3e1e"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f27273b60488abe721a075bcca6d7f3964f9f6f067c8c4c605743023d7d3944f"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1ceae2f17a9c33cb48e3263960dc5fc8005351ee19db217e9b1bb15d28c02574"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:65f6f63034100ead094b8744b3b97965785388f308a64cf8d7c34f2f2e5be0c4"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:753f10e867343b4511128c6ed8c82f7bec3bd026875576dfd88483c5c73b2fd8"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4a78b2b446bd7c934f5dcedc588903fb2f5eec172f3d29e52a9096a43722adfc"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:e537484df0d8f426ce2afb2d0f8e1c3d0b114b83f8850e5f2fbea0e797bd82ae"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:eb6904c354526e758fda7167b33005998fb68c46fbc10e013ca97f21ca5c8887"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:deb6be0ac38ece9ba87dea880e438f25ca3eddfac8b002a2ec3d9183a454e8ae"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:4ab2fe47fae9e0f9dee8c04187ce5d09f48eabe611be8259444906793ab7cbce"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:80402cd6ee291dcb72644d6eac93785fe2c8b9cb30893c1af5b8fdd753b9d40f"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-win32.whl", hash = "sha256:7cd13a2e3ddeed6913a65e66e94b51d80a041145a026c27e6bb76c31a853c6ab"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-win_amd64.whl", hash = "sha256:663946639d296df6a2bb2aa51b60a2454ca1cb29835324c640dafb5ff2131a77"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:0b2b64d2bb6d3fb9112bafa732def486049e63de9618b5843bcdd081d8144cd8"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:ddbb2551d7e0102e7252db79ba445cdab71b26640817ab1e3e3648dad515003b"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:55086ee1064215781fff39a1af09518bc9255b50d6333f2e4c74ca09fac6a8f6"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8f4a014bc36d3c57402e2977dada34f9c12300af536839dc38c0beab8878f38a"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a10af20b82360ab00827f916a6058451b723b4e65030c5a18577c8b2de5b3389"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8d756e44e94489e49571086ef83b2bb8ce311e730092d2c34ca8f7d925cb20aa"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:90d558489962fd4918143277a773316e56c72da56ec7aa3dc3dbbe20fdfed15b"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6ac7ffc7ad6d040517be39eb591cac5ff87416c2537df6ba3cba3bae290c0fed"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:7ed9e526742851e8d5cc9e6cf41427dfc6068d4f5a3bb03659444b4cabf6bc26"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:8bdb58ff7ba23002a4c5808d608e4e6c687175724f54a5dade5fa8c67b604e4d"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_ppc64le.whl", hash = "sha256:6b3251890fff30ee142c44144871185dbe13b11bab478a88887a639655be1068"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_s390x.whl", hash = "sha256:b4a23f61ce87adf89be746c8a8974fe1c823c891d8f86eb218bb957c924bb143"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:efcb3f6676480691518c177e3b465bcddf57cea040302f9f4e6e191af91174d4"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-win32.whl", hash = "sha256:d965bba47ddeec8cd560687584e88cf699fd28f192ceb452d1d7ee807c5597b7"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-win_amd64.whl", hash = "sha256:96b02a3dc4381e5494fad39be677abcb5e6634bf7b4fa83a6dd3112607547001"}, - {file = "charset_normalizer-3.3.2-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:95f2a5796329323b8f0512e09dbb7a1860c46a39da62ecb2324f116fa8fdc85c"}, - {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c002b4ffc0be611f0d9da932eb0f704fe2602a9a949d1f738e4c34c75b0863d5"}, - {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a981a536974bbc7a512cf44ed14938cf01030a99e9b3a06dd59578882f06f985"}, - {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3287761bc4ee9e33561a7e058c72ac0938c4f57fe49a09eae428fd88aafe7bb6"}, - {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:42cb296636fcc8b0644486d15c12376cb9fa75443e00fb25de0b8602e64c1714"}, - {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0a55554a2fa0d408816b3b5cedf0045f4b8e1a6065aec45849de2d6f3f8e9786"}, - {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:c083af607d2515612056a31f0a8d9e0fcb5876b7bfc0abad3ecd275bc4ebc2d5"}, - {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:87d1351268731db79e0f8e745d92493ee2841c974128ef629dc518b937d9194c"}, - {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:bd8f7df7d12c2db9fab40bdd87a7c09b1530128315d047a086fa3ae3435cb3a8"}, - {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:c180f51afb394e165eafe4ac2936a14bee3eb10debc9d9e4db8958fe36afe711"}, - {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:8c622a5fe39a48f78944a87d4fb8a53ee07344641b0562c540d840748571b811"}, - {file = "charset_normalizer-3.3.2-cp37-cp37m-win32.whl", hash = "sha256:db364eca23f876da6f9e16c9da0df51aa4f104a972735574842618b8c6d999d4"}, - {file = "charset_normalizer-3.3.2-cp37-cp37m-win_amd64.whl", hash = "sha256:86216b5cee4b06df986d214f664305142d9c76df9b6512be2738aa72a2048f99"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:6463effa3186ea09411d50efc7d85360b38d5f09b870c48e4600f63af490e56a"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:6c4caeef8fa63d06bd437cd4bdcf3ffefe6738fb1b25951440d80dc7df8c03ac"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:37e55c8e51c236f95b033f6fb391d7d7970ba5fe7ff453dad675e88cf303377a"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fb69256e180cb6c8a894fee62b3afebae785babc1ee98b81cdf68bbca1987f33"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ae5f4161f18c61806f411a13b0310bea87f987c7d2ecdbdaad0e94eb2e404238"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b2b0a0c0517616b6869869f8c581d4eb2dd83a4d79e0ebcb7d373ef9956aeb0a"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:45485e01ff4d3630ec0d9617310448a8702f70e9c01906b0d0118bdf9d124cf2"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:eb00ed941194665c332bf8e078baf037d6c35d7c4f3102ea2d4f16ca94a26dc8"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:2127566c664442652f024c837091890cb1942c30937add288223dc895793f898"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:a50aebfa173e157099939b17f18600f72f84eed3049e743b68ad15bd69b6bf99"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:4d0d1650369165a14e14e1e47b372cfcb31d6ab44e6e33cb2d4e57265290044d"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:923c0c831b7cfcb071580d3f46c4baf50f174be571576556269530f4bbd79d04"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:06a81e93cd441c56a9b65d8e1d043daeb97a3d0856d177d5c90ba85acb3db087"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-win32.whl", hash = "sha256:6ef1d82a3af9d3eecdba2321dc1b3c238245d890843e040e41e470ffa64c3e25"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-win_amd64.whl", hash = "sha256:eb8821e09e916165e160797a6c17edda0679379a4be5c716c260e836e122f54b"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:c235ebd9baae02f1b77bcea61bce332cb4331dc3617d254df3323aa01ab47bd4"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:5b4c145409bef602a690e7cfad0a15a55c13320ff7a3ad7ca59c13bb8ba4d45d"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:68d1f8a9e9e37c1223b656399be5d6b448dea850bed7d0f87a8311f1ff3dabb0"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:22afcb9f253dac0696b5a4be4a1c0f8762f8239e21b99680099abd9b2b1b2269"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e27ad930a842b4c5eb8ac0016b0a54f5aebbe679340c26101df33424142c143c"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1f79682fbe303db92bc2b1136016a38a42e835d932bab5b3b1bfcfbf0640e519"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b261ccdec7821281dade748d088bb6e9b69e6d15b30652b74cbbac25e280b796"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:122c7fa62b130ed55f8f285bfd56d5f4b4a5b503609d181f9ad85e55c89f4185"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:d0eccceffcb53201b5bfebb52600a5fb483a20b61da9dbc885f8b103cbe7598c"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:9f96df6923e21816da7e0ad3fd47dd8f94b2a5ce594e00677c0013018b813458"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:7f04c839ed0b6b98b1a7501a002144b76c18fb1c1850c8b98d458ac269e26ed2"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:34d1c8da1e78d2e001f363791c98a272bb734000fcef47a491c1e3b0505657a8"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:ff8fa367d09b717b2a17a052544193ad76cd49979c805768879cb63d9ca50561"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-win32.whl", hash = "sha256:aed38f6e4fb3f5d6bf81bfa990a07806be9d83cf7bacef998ab1a9bd660a581f"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-win_amd64.whl", hash = "sha256:b01b88d45a6fcb69667cd6d2f7a9aeb4bf53760d7fc536bf679ec94fe9f3ff3d"}, - {file = "charset_normalizer-3.3.2-py3-none-any.whl", hash = "sha256:3e4d1f6587322d2788836a99c69062fbb091331ec940e02d12d179c1d53e25fc"}, + {file = "h11-0.14.0-py3-none-any.whl", hash = "sha256:e3fe4ac4b851c468cc8363d500db52c2ead036020723024a109d37346efaa761"}, + {file = "h11-0.14.0.tar.gz", hash = "sha256:8f19fbbe99e72420ff35c00b27a34cb9937e902a8b810e2c88300c6f0a3b699d"}, ] [[package]] -name = "frozenlist" -version = "1.4.0" -description = "A list-like structure which implements collections.abc.MutableSequence" +name = "httpcore" +version = "1.0.2" +description = "A minimal low-level HTTP client." optional = false python-versions = ">=3.8" files = [ - {file = "frozenlist-1.4.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:764226ceef3125e53ea2cb275000e309c0aa5464d43bd72abd661e27fffc26ab"}, - {file = "frozenlist-1.4.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:d6484756b12f40003c6128bfcc3fa9f0d49a687e171186c2d85ec82e3758c559"}, - {file = "frozenlist-1.4.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9ac08e601308e41eb533f232dbf6b7e4cea762f9f84f6357136eed926c15d12c"}, - {file = "frozenlist-1.4.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d081f13b095d74b67d550de04df1c756831f3b83dc9881c38985834387487f1b"}, - {file = "frozenlist-1.4.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:71932b597f9895f011f47f17d6428252fc728ba2ae6024e13c3398a087c2cdea"}, - {file = "frozenlist-1.4.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:981b9ab5a0a3178ff413bca62526bb784249421c24ad7381e39d67981be2c326"}, - {file = "frozenlist-1.4.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e41f3de4df3e80de75845d3e743b3f1c4c8613c3997a912dbf0229fc61a8b963"}, - {file = "frozenlist-1.4.0-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6918d49b1f90821e93069682c06ffde41829c346c66b721e65a5c62b4bab0300"}, - {file = "frozenlist-1.4.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:0e5c8764c7829343d919cc2dfc587a8db01c4f70a4ebbc49abde5d4b158b007b"}, - {file = "frozenlist-1.4.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:8d0edd6b1c7fb94922bf569c9b092ee187a83f03fb1a63076e7774b60f9481a8"}, - {file = "frozenlist-1.4.0-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:e29cda763f752553fa14c68fb2195150bfab22b352572cb36c43c47bedba70eb"}, - {file = "frozenlist-1.4.0-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:0c7c1b47859ee2cac3846fde1c1dc0f15da6cec5a0e5c72d101e0f83dcb67ff9"}, - {file = "frozenlist-1.4.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:901289d524fdd571be1c7be054f48b1f88ce8dddcbdf1ec698b27d4b8b9e5d62"}, - {file = "frozenlist-1.4.0-cp310-cp310-win32.whl", hash = "sha256:1a0848b52815006ea6596c395f87449f693dc419061cc21e970f139d466dc0a0"}, - {file = "frozenlist-1.4.0-cp310-cp310-win_amd64.whl", hash = "sha256:b206646d176a007466358aa21d85cd8600a415c67c9bd15403336c331a10d956"}, - {file = "frozenlist-1.4.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:de343e75f40e972bae1ef6090267f8260c1446a1695e77096db6cfa25e759a95"}, - {file = "frozenlist-1.4.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:ad2a9eb6d9839ae241701d0918f54c51365a51407fd80f6b8289e2dfca977cc3"}, - {file = "frozenlist-1.4.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:bd7bd3b3830247580de99c99ea2a01416dfc3c34471ca1298bccabf86d0ff4dc"}, - {file = "frozenlist-1.4.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bdf1847068c362f16b353163391210269e4f0569a3c166bc6a9f74ccbfc7e839"}, - {file = "frozenlist-1.4.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:38461d02d66de17455072c9ba981d35f1d2a73024bee7790ac2f9e361ef1cd0c"}, - {file = "frozenlist-1.4.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d5a32087d720c608f42caed0ef36d2b3ea61a9d09ee59a5142d6070da9041b8f"}, - {file = "frozenlist-1.4.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:dd65632acaf0d47608190a71bfe46b209719bf2beb59507db08ccdbe712f969b"}, - {file = "frozenlist-1.4.0-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:261b9f5d17cac914531331ff1b1d452125bf5daa05faf73b71d935485b0c510b"}, - {file = "frozenlist-1.4.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:b89ac9768b82205936771f8d2eb3ce88503b1556324c9f903e7156669f521472"}, - {file = "frozenlist-1.4.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:008eb8b31b3ea6896da16c38c1b136cb9fec9e249e77f6211d479db79a4eaf01"}, - {file = "frozenlist-1.4.0-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:e74b0506fa5aa5598ac6a975a12aa8928cbb58e1f5ac8360792ef15de1aa848f"}, - {file = "frozenlist-1.4.0-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:490132667476f6781b4c9458298b0c1cddf237488abd228b0b3650e5ecba7467"}, - {file = "frozenlist-1.4.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:76d4711f6f6d08551a7e9ef28c722f4a50dd0fc204c56b4bcd95c6cc05ce6fbb"}, - {file = "frozenlist-1.4.0-cp311-cp311-win32.whl", hash = "sha256:a02eb8ab2b8f200179b5f62b59757685ae9987996ae549ccf30f983f40602431"}, - {file = "frozenlist-1.4.0-cp311-cp311-win_amd64.whl", hash = "sha256:515e1abc578dd3b275d6a5114030b1330ba044ffba03f94091842852f806f1c1"}, - {file = "frozenlist-1.4.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:f0ed05f5079c708fe74bf9027e95125334b6978bf07fd5ab923e9e55e5fbb9d3"}, - {file = "frozenlist-1.4.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:ca265542ca427bf97aed183c1676e2a9c66942e822b14dc6e5f42e038f92a503"}, - {file = "frozenlist-1.4.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:491e014f5c43656da08958808588cc6c016847b4360e327a62cb308c791bd2d9"}, - {file = "frozenlist-1.4.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:17ae5cd0f333f94f2e03aaf140bb762c64783935cc764ff9c82dff626089bebf"}, - {file = "frozenlist-1.4.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1e78fb68cf9c1a6aa4a9a12e960a5c9dfbdb89b3695197aa7064705662515de2"}, - {file = "frozenlist-1.4.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d5655a942f5f5d2c9ed93d72148226d75369b4f6952680211972a33e59b1dfdc"}, - {file = "frozenlist-1.4.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c11b0746f5d946fecf750428a95f3e9ebe792c1ee3b1e96eeba145dc631a9672"}, - {file = "frozenlist-1.4.0-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e66d2a64d44d50d2543405fb183a21f76b3b5fd16f130f5c99187c3fb4e64919"}, - {file = "frozenlist-1.4.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:88f7bc0fcca81f985f78dd0fa68d2c75abf8272b1f5c323ea4a01a4d7a614efc"}, - {file = "frozenlist-1.4.0-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:5833593c25ac59ede40ed4de6d67eb42928cca97f26feea219f21d0ed0959b79"}, - {file = "frozenlist-1.4.0-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:fec520865f42e5c7f050c2a79038897b1c7d1595e907a9e08e3353293ffc948e"}, - {file = "frozenlist-1.4.0-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:b826d97e4276750beca7c8f0f1a4938892697a6bcd8ec8217b3312dad6982781"}, - {file = "frozenlist-1.4.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:ceb6ec0a10c65540421e20ebd29083c50e6d1143278746a4ef6bcf6153171eb8"}, - {file = "frozenlist-1.4.0-cp38-cp38-win32.whl", hash = "sha256:2b8bcf994563466db019fab287ff390fffbfdb4f905fc77bc1c1d604b1c689cc"}, - {file = "frozenlist-1.4.0-cp38-cp38-win_amd64.whl", hash = "sha256:a6c8097e01886188e5be3e6b14e94ab365f384736aa1fca6a0b9e35bd4a30bc7"}, - {file = "frozenlist-1.4.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:6c38721585f285203e4b4132a352eb3daa19121a035f3182e08e437cface44bf"}, - {file = "frozenlist-1.4.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:a0c6da9aee33ff0b1a451e867da0c1f47408112b3391dd43133838339e410963"}, - {file = "frozenlist-1.4.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:93ea75c050c5bb3d98016b4ba2497851eadf0ac154d88a67d7a6816206f6fa7f"}, - {file = "frozenlist-1.4.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f61e2dc5ad442c52b4887f1fdc112f97caeff4d9e6ebe78879364ac59f1663e1"}, - {file = "frozenlist-1.4.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:aa384489fefeb62321b238e64c07ef48398fe80f9e1e6afeff22e140e0850eef"}, - {file = "frozenlist-1.4.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:10ff5faaa22786315ef57097a279b833ecab1a0bfb07d604c9cbb1c4cdc2ed87"}, - {file = "frozenlist-1.4.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:007df07a6e3eb3e33e9a1fe6a9db7af152bbd8a185f9aaa6ece10a3529e3e1c6"}, - {file = "frozenlist-1.4.0-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7f4f399d28478d1f604c2ff9119907af9726aed73680e5ed1ca634d377abb087"}, - {file = "frozenlist-1.4.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:c5374b80521d3d3f2ec5572e05adc94601985cc526fb276d0c8574a6d749f1b3"}, - {file = "frozenlist-1.4.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:ce31ae3e19f3c902de379cf1323d90c649425b86de7bbdf82871b8a2a0615f3d"}, - {file = "frozenlist-1.4.0-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:7211ef110a9194b6042449431e08c4d80c0481e5891e58d429df5899690511c2"}, - {file = "frozenlist-1.4.0-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:556de4430ce324c836789fa4560ca62d1591d2538b8ceb0b4f68fb7b2384a27a"}, - {file = "frozenlist-1.4.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:7645a8e814a3ee34a89c4a372011dcd817964ce8cb273c8ed6119d706e9613e3"}, - {file = "frozenlist-1.4.0-cp39-cp39-win32.whl", hash = "sha256:19488c57c12d4e8095a922f328df3f179c820c212940a498623ed39160bc3c2f"}, - {file = "frozenlist-1.4.0-cp39-cp39-win_amd64.whl", hash = "sha256:6221d84d463fb110bdd7619b69cb43878a11d51cbb9394ae3105d082d5199167"}, - {file = "frozenlist-1.4.0.tar.gz", hash = "sha256:09163bdf0b2907454042edb19f887c6d33806adc71fbd54afc14908bfdc22251"}, + {file = "httpcore-1.0.2-py3-none-any.whl", hash = "sha256:096cc05bca73b8e459a1fc3dcf585148f63e534eae4339559c9b8a8d6399acc7"}, + {file = "httpcore-1.0.2.tar.gz", hash = "sha256:9fc092e4799b26174648e54b74ed5f683132a464e95643b226e00c2ed2fa6535"}, ] +[package.dependencies] +certifi = "*" +h11 = ">=0.13,<0.15" + +[package.extras] +asyncio = ["anyio (>=4.0,<5.0)"] +http2 = ["h2 (>=3,<5)"] +socks = ["socksio (==1.*)"] +trio = ["trio (>=0.22.0,<0.23.0)"] + +[[package]] +name = "httpx" +version = "0.25.2" +description = "The next generation HTTP client." +optional = false +python-versions = ">=3.8" +files = [ + {file = "httpx-0.25.2-py3-none-any.whl", hash = "sha256:a05d3d052d9b2dfce0e3896636467f8a5342fb2b902c819428e1ac65413ca118"}, + {file = "httpx-0.25.2.tar.gz", hash = "sha256:8b8fcaa0c8ea7b05edd69a094e63a2094c4efcb48129fb757361bc423c0ad9e8"}, +] + +[package.dependencies] +anyio = "*" +certifi = "*" +httpcore = "==1.*" +idna = "*" +sniffio = "*" + +[package.extras] +brotli = ["brotli", "brotlicffi"] +cli = ["click (==8.*)", "pygments (==2.*)", "rich (>=10,<14)"] +http2 = ["h2 (>=3,<5)"] +socks = ["socksio (==1.*)"] + [[package]] name = "idna" version = "3.6" @@ -355,89 +139,6 @@ files = [ {file = "idna-3.6.tar.gz", hash = "sha256:9ecdbbd083b06798ae1e86adcbfe8ab1479cf864e4ee30fe4e46a003d12491ca"}, ] -[[package]] -name = "multidict" -version = "6.0.4" -description = "multidict implementation" -optional = false -python-versions = ">=3.7" -files = [ - {file = "multidict-6.0.4-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:0b1a97283e0c85772d613878028fec909f003993e1007eafa715b24b377cb9b8"}, - {file = "multidict-6.0.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:eeb6dcc05e911516ae3d1f207d4b0520d07f54484c49dfc294d6e7d63b734171"}, - {file = "multidict-6.0.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:d6d635d5209b82a3492508cf5b365f3446afb65ae7ebd755e70e18f287b0adf7"}, - {file = "multidict-6.0.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c048099e4c9e9d615545e2001d3d8a4380bd403e1a0578734e0d31703d1b0c0b"}, - {file = "multidict-6.0.4-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ea20853c6dbbb53ed34cb4d080382169b6f4554d394015f1bef35e881bf83547"}, - {file = "multidict-6.0.4-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:16d232d4e5396c2efbbf4f6d4df89bfa905eb0d4dc5b3549d872ab898451f569"}, - {file = "multidict-6.0.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:36c63aaa167f6c6b04ef2c85704e93af16c11d20de1d133e39de6a0e84582a93"}, - {file = "multidict-6.0.4-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:64bdf1086b6043bf519869678f5f2757f473dee970d7abf6da91ec00acb9cb98"}, - {file = "multidict-6.0.4-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:43644e38f42e3af682690876cff722d301ac585c5b9e1eacc013b7a3f7b696a0"}, - {file = "multidict-6.0.4-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:7582a1d1030e15422262de9f58711774e02fa80df0d1578995c76214f6954988"}, - {file = "multidict-6.0.4-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:ddff9c4e225a63a5afab9dd15590432c22e8057e1a9a13d28ed128ecf047bbdc"}, - {file = "multidict-6.0.4-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:ee2a1ece51b9b9e7752e742cfb661d2a29e7bcdba2d27e66e28a99f1890e4fa0"}, - {file = "multidict-6.0.4-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:a2e4369eb3d47d2034032a26c7a80fcb21a2cb22e1173d761a162f11e562caa5"}, - {file = "multidict-6.0.4-cp310-cp310-win32.whl", hash = "sha256:574b7eae1ab267e5f8285f0fe881f17efe4b98c39a40858247720935b893bba8"}, - {file = "multidict-6.0.4-cp310-cp310-win_amd64.whl", hash = "sha256:4dcbb0906e38440fa3e325df2359ac6cb043df8e58c965bb45f4e406ecb162cc"}, - {file = "multidict-6.0.4-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:0dfad7a5a1e39c53ed00d2dd0c2e36aed4650936dc18fd9a1826a5ae1cad6f03"}, - {file = "multidict-6.0.4-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:64da238a09d6039e3bd39bb3aee9c21a5e34f28bfa5aa22518581f910ff94af3"}, - {file = "multidict-6.0.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:ff959bee35038c4624250473988b24f846cbeb2c6639de3602c073f10410ceba"}, - {file = "multidict-6.0.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:01a3a55bd90018c9c080fbb0b9f4891db37d148a0a18722b42f94694f8b6d4c9"}, - {file = "multidict-6.0.4-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c5cb09abb18c1ea940fb99360ea0396f34d46566f157122c92dfa069d3e0e982"}, - {file = "multidict-6.0.4-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:666daae833559deb2d609afa4490b85830ab0dfca811a98b70a205621a6109fe"}, - {file = "multidict-6.0.4-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:11bdf3f5e1518b24530b8241529d2050014c884cf18b6fc69c0c2b30ca248710"}, - {file = "multidict-6.0.4-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7d18748f2d30f94f498e852c67d61261c643b349b9d2a581131725595c45ec6c"}, - {file = "multidict-6.0.4-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:458f37be2d9e4c95e2d8866a851663cbc76e865b78395090786f6cd9b3bbf4f4"}, - {file = "multidict-6.0.4-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:b1a2eeedcead3a41694130495593a559a668f382eee0727352b9a41e1c45759a"}, - {file = "multidict-6.0.4-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:7d6ae9d593ef8641544d6263c7fa6408cc90370c8cb2bbb65f8d43e5b0351d9c"}, - {file = "multidict-6.0.4-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:5979b5632c3e3534e42ca6ff856bb24b2e3071b37861c2c727ce220d80eee9ed"}, - {file = "multidict-6.0.4-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:dcfe792765fab89c365123c81046ad4103fcabbc4f56d1c1997e6715e8015461"}, - {file = "multidict-6.0.4-cp311-cp311-win32.whl", hash = "sha256:3601a3cece3819534b11d4efc1eb76047488fddd0c85a3948099d5da4d504636"}, - {file = "multidict-6.0.4-cp311-cp311-win_amd64.whl", hash = "sha256:81a4f0b34bd92df3da93315c6a59034df95866014ac08535fc819f043bfd51f0"}, - {file = "multidict-6.0.4-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:67040058f37a2a51ed8ea8f6b0e6ee5bd78ca67f169ce6122f3e2ec80dfe9b78"}, - {file = "multidict-6.0.4-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:853888594621e6604c978ce2a0444a1e6e70c8d253ab65ba11657659dcc9100f"}, - {file = "multidict-6.0.4-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:39ff62e7d0f26c248b15e364517a72932a611a9b75f35b45be078d81bdb86603"}, - {file = "multidict-6.0.4-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:af048912e045a2dc732847d33821a9d84ba553f5c5f028adbd364dd4765092ac"}, - {file = "multidict-6.0.4-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b1e8b901e607795ec06c9e42530788c45ac21ef3aaa11dbd0c69de543bfb79a9"}, - {file = "multidict-6.0.4-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:62501642008a8b9871ddfccbf83e4222cf8ac0d5aeedf73da36153ef2ec222d2"}, - {file = "multidict-6.0.4-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:99b76c052e9f1bc0721f7541e5e8c05db3941eb9ebe7b8553c625ef88d6eefde"}, - {file = "multidict-6.0.4-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:509eac6cf09c794aa27bcacfd4d62c885cce62bef7b2c3e8b2e49d365b5003fe"}, - {file = "multidict-6.0.4-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:21a12c4eb6ddc9952c415f24eef97e3e55ba3af61f67c7bc388dcdec1404a067"}, - {file = "multidict-6.0.4-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:5cad9430ab3e2e4fa4a2ef4450f548768400a2ac635841bc2a56a2052cdbeb87"}, - {file = "multidict-6.0.4-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:ab55edc2e84460694295f401215f4a58597f8f7c9466faec545093045476327d"}, - {file = "multidict-6.0.4-cp37-cp37m-win32.whl", hash = "sha256:5a4dcf02b908c3b8b17a45fb0f15b695bf117a67b76b7ad18b73cf8e92608775"}, - {file = "multidict-6.0.4-cp37-cp37m-win_amd64.whl", hash = "sha256:6ed5f161328b7df384d71b07317f4d8656434e34591f20552c7bcef27b0ab88e"}, - {file = "multidict-6.0.4-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:5fc1b16f586f049820c5c5b17bb4ee7583092fa0d1c4e28b5239181ff9532e0c"}, - {file = "multidict-6.0.4-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:1502e24330eb681bdaa3eb70d6358e818e8e8f908a22a1851dfd4e15bc2f8161"}, - {file = "multidict-6.0.4-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:b692f419760c0e65d060959df05f2a531945af31fda0c8a3b3195d4efd06de11"}, - {file = "multidict-6.0.4-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:45e1ecb0379bfaab5eef059f50115b54571acfbe422a14f668fc8c27ba410e7e"}, - {file = "multidict-6.0.4-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ddd3915998d93fbcd2566ddf9cf62cdb35c9e093075f862935573d265cf8f65d"}, - {file = "multidict-6.0.4-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:59d43b61c59d82f2effb39a93c48b845efe23a3852d201ed2d24ba830d0b4cf2"}, - {file = "multidict-6.0.4-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cc8e1d0c705233c5dd0c5e6460fbad7827d5d36f310a0fadfd45cc3029762258"}, - {file = "multidict-6.0.4-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d6aa0418fcc838522256761b3415822626f866758ee0bc6632c9486b179d0b52"}, - {file = "multidict-6.0.4-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:6748717bb10339c4760c1e63da040f5f29f5ed6e59d76daee30305894069a660"}, - {file = "multidict-6.0.4-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:4d1a3d7ef5e96b1c9e92f973e43aa5e5b96c659c9bc3124acbbd81b0b9c8a951"}, - {file = "multidict-6.0.4-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:4372381634485bec7e46718edc71528024fcdc6f835baefe517b34a33c731d60"}, - {file = "multidict-6.0.4-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:fc35cb4676846ef752816d5be2193a1e8367b4c1397b74a565a9d0389c433a1d"}, - {file = "multidict-6.0.4-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:4b9d9e4e2b37daddb5c23ea33a3417901fa7c7b3dee2d855f63ee67a0b21e5b1"}, - {file = "multidict-6.0.4-cp38-cp38-win32.whl", hash = "sha256:e41b7e2b59679edfa309e8db64fdf22399eec4b0b24694e1b2104fb789207779"}, - {file = "multidict-6.0.4-cp38-cp38-win_amd64.whl", hash = "sha256:d6c254ba6e45d8e72739281ebc46ea5eb5f101234f3ce171f0e9f5cc86991480"}, - {file = "multidict-6.0.4-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:16ab77bbeb596e14212e7bab8429f24c1579234a3a462105cda4a66904998664"}, - {file = "multidict-6.0.4-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:bc779e9e6f7fda81b3f9aa58e3a6091d49ad528b11ed19f6621408806204ad35"}, - {file = "multidict-6.0.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:4ceef517eca3e03c1cceb22030a3e39cb399ac86bff4e426d4fc6ae49052cc60"}, - {file = "multidict-6.0.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:281af09f488903fde97923c7744bb001a9b23b039a909460d0f14edc7bf59706"}, - {file = "multidict-6.0.4-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:52f2dffc8acaba9a2f27174c41c9e57f60b907bb9f096b36b1a1f3be71c6284d"}, - {file = "multidict-6.0.4-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b41156839806aecb3641f3208c0dafd3ac7775b9c4c422d82ee2a45c34ba81ca"}, - {file = "multidict-6.0.4-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d5e3fc56f88cc98ef8139255cf8cd63eb2c586531e43310ff859d6bb3a6b51f1"}, - {file = "multidict-6.0.4-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8316a77808c501004802f9beebde51c9f857054a0c871bd6da8280e718444449"}, - {file = "multidict-6.0.4-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:f70b98cd94886b49d91170ef23ec5c0e8ebb6f242d734ed7ed677b24d50c82cf"}, - {file = "multidict-6.0.4-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:bf6774e60d67a9efe02b3616fee22441d86fab4c6d335f9d2051d19d90a40063"}, - {file = "multidict-6.0.4-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:e69924bfcdda39b722ef4d9aa762b2dd38e4632b3641b1d9a57ca9cd18f2f83a"}, - {file = "multidict-6.0.4-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:6b181d8c23da913d4ff585afd1155a0e1194c0b50c54fcfe286f70cdaf2b7176"}, - {file = "multidict-6.0.4-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:52509b5be062d9eafc8170e53026fbc54cf3b32759a23d07fd935fb04fc22d95"}, - {file = "multidict-6.0.4-cp39-cp39-win32.whl", hash = "sha256:27c523fbfbdfd19c6867af7346332b62b586eed663887392cff78d614f9ec313"}, - {file = "multidict-6.0.4-cp39-cp39-win_amd64.whl", hash = "sha256:33029f5734336aa0d4c0384525da0387ef89148dc7191aae00ca5fb23d7aafc2"}, - {file = "multidict-6.0.4.tar.gz", hash = "sha256:3666906492efb76453c0e7b97f2cf459b0682e7402c0489a95484965dbc1da49"}, -] - [[package]] name = "mypy" version = "1.7.1" @@ -691,27 +392,6 @@ files = [ [package.dependencies] typing-extensions = ">=4.6.0,<4.7.0 || >4.7.0" -[[package]] -name = "requests" -version = "2.31.0" -description = "Python HTTP for Humans." -optional = false -python-versions = ">=3.7" -files = [ - {file = "requests-2.31.0-py3-none-any.whl", hash = "sha256:58cd2187c01e70e6e26505bca751777aa9f2ee0b7f4300988b709f44e013003f"}, - {file = "requests-2.31.0.tar.gz", hash = "sha256:942c5a758f98d790eaed1a29cb6eefc7ffb0d1cf7af05c3d2791656dbd6ad1e1"}, -] - -[package.dependencies] -certifi = ">=2017.4.17" -charset-normalizer = ">=2,<4" -idna = ">=2.5,<4" -urllib3 = ">=1.21.1,<3" - -[package.extras] -socks = ["PySocks (>=1.5.6,!=1.5.7)"] -use-chardet-on-py3 = ["chardet (>=3.0.2,<6)"] - [[package]] name = "ruff" version = "0.1.7" @@ -738,6 +418,31 @@ files = [ {file = "ruff-0.1.7.tar.gz", hash = "sha256:dffd699d07abf54833e5f6cc50b85a6ff043715da8788c4a79bcd4ab4734d306"}, ] +[[package]] +name = "sniffio" +version = "1.3.0" +description = "Sniff out which async library your code is running under" +optional = false +python-versions = ">=3.7" +files = [ + {file = "sniffio-1.3.0-py3-none-any.whl", hash = "sha256:eecefdce1e5bbfb7ad2eeaabf7c1eeb404d7757c379bd1f7e5cce9d8bf425384"}, + {file = "sniffio-1.3.0.tar.gz", hash = "sha256:e60305c5e5d314f5389259b7f22aaa33d8f7dee49763119234af3755c55b9101"}, +] + +[[package]] +name = "tenacity" +version = "8.2.3" +description = "Retry code until it succeeds" +optional = false +python-versions = ">=3.7" +files = [ + {file = "tenacity-8.2.3-py3-none-any.whl", hash = "sha256:ce510e327a630c9e1beaf17d42e6ffacc88185044ad85cf74c0a8887c6a0f88c"}, + {file = "tenacity-8.2.3.tar.gz", hash = "sha256:5398ef0d78e63f40007c1fb4c0bff96e1911394d2fa8d194f77619c05ff6cc8a"}, +] + +[package.extras] +doc = ["reno", "sphinx", "tornado (>=4.5)"] + [[package]] name = "tomli" version = "2.0.1" @@ -790,110 +495,7 @@ brotli = ["brotli (>=1.0.9)", "brotlicffi (>=0.8.0)"] socks = ["pysocks (>=1.5.6,!=1.5.7,<2.0)"] zstd = ["zstandard (>=0.18.0)"] -[[package]] -name = "yarl" -version = "1.9.4" -description = "Yet another URL library" -optional = false -python-versions = ">=3.7" -files = [ - {file = "yarl-1.9.4-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:a8c1df72eb746f4136fe9a2e72b0c9dc1da1cbd23b5372f94b5820ff8ae30e0e"}, - {file = "yarl-1.9.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:a3a6ed1d525bfb91b3fc9b690c5a21bb52de28c018530ad85093cc488bee2dd2"}, - {file = "yarl-1.9.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c38c9ddb6103ceae4e4498f9c08fac9b590c5c71b0370f98714768e22ac6fa66"}, - {file = "yarl-1.9.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d9e09c9d74f4566e905a0b8fa668c58109f7624db96a2171f21747abc7524234"}, - {file = "yarl-1.9.4-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b8477c1ee4bd47c57d49621a062121c3023609f7a13b8a46953eb6c9716ca392"}, - {file = "yarl-1.9.4-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d5ff2c858f5f6a42c2a8e751100f237c5e869cbde669a724f2062d4c4ef93551"}, - {file = "yarl-1.9.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:357495293086c5b6d34ca9616a43d329317feab7917518bc97a08f9e55648455"}, - {file = "yarl-1.9.4-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:54525ae423d7b7a8ee81ba189f131054defdb122cde31ff17477951464c1691c"}, - {file = "yarl-1.9.4-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:801e9264d19643548651b9db361ce3287176671fb0117f96b5ac0ee1c3530d53"}, - {file = "yarl-1.9.4-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:e516dc8baf7b380e6c1c26792610230f37147bb754d6426462ab115a02944385"}, - {file = "yarl-1.9.4-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:7d5aaac37d19b2904bb9dfe12cdb08c8443e7ba7d2852894ad448d4b8f442863"}, - {file = "yarl-1.9.4-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:54beabb809ffcacbd9d28ac57b0db46e42a6e341a030293fb3185c409e626b8b"}, - {file = "yarl-1.9.4-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:bac8d525a8dbc2a1507ec731d2867025d11ceadcb4dd421423a5d42c56818541"}, - {file = "yarl-1.9.4-cp310-cp310-win32.whl", hash = "sha256:7855426dfbddac81896b6e533ebefc0af2f132d4a47340cee6d22cac7190022d"}, - {file = "yarl-1.9.4-cp310-cp310-win_amd64.whl", hash = "sha256:848cd2a1df56ddbffeb375535fb62c9d1645dde33ca4d51341378b3f5954429b"}, - {file = "yarl-1.9.4-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:35a2b9396879ce32754bd457d31a51ff0a9d426fd9e0e3c33394bf4b9036b099"}, - {file = "yarl-1.9.4-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:4c7d56b293cc071e82532f70adcbd8b61909eec973ae9d2d1f9b233f3d943f2c"}, - {file = "yarl-1.9.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:d8a1c6c0be645c745a081c192e747c5de06e944a0d21245f4cf7c05e457c36e0"}, - {file = "yarl-1.9.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4b3c1ffe10069f655ea2d731808e76e0f452fc6c749bea04781daf18e6039525"}, - {file = "yarl-1.9.4-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:549d19c84c55d11687ddbd47eeb348a89df9cb30e1993f1b128f4685cd0ebbf8"}, - {file = "yarl-1.9.4-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a7409f968456111140c1c95301cadf071bd30a81cbd7ab829169fb9e3d72eae9"}, - {file = "yarl-1.9.4-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e23a6d84d9d1738dbc6e38167776107e63307dfc8ad108e580548d1f2c587f42"}, - {file = "yarl-1.9.4-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d8b889777de69897406c9fb0b76cdf2fd0f31267861ae7501d93003d55f54fbe"}, - {file = "yarl-1.9.4-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:03caa9507d3d3c83bca08650678e25364e1843b484f19986a527630ca376ecce"}, - {file = "yarl-1.9.4-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:4e9035df8d0880b2f1c7f5031f33f69e071dfe72ee9310cfc76f7b605958ceb9"}, - {file = "yarl-1.9.4-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:c0ec0ed476f77db9fb29bca17f0a8fcc7bc97ad4c6c1d8959c507decb22e8572"}, - {file = "yarl-1.9.4-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:ee04010f26d5102399bd17f8df8bc38dc7ccd7701dc77f4a68c5b8d733406958"}, - {file = "yarl-1.9.4-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:49a180c2e0743d5d6e0b4d1a9e5f633c62eca3f8a86ba5dd3c471060e352ca98"}, - {file = "yarl-1.9.4-cp311-cp311-win32.whl", hash = "sha256:81eb57278deb6098a5b62e88ad8281b2ba09f2f1147c4767522353eaa6260b31"}, - {file = "yarl-1.9.4-cp311-cp311-win_amd64.whl", hash = "sha256:d1d2532b340b692880261c15aee4dc94dd22ca5d61b9db9a8a361953d36410b1"}, - {file = "yarl-1.9.4-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:0d2454f0aef65ea81037759be5ca9947539667eecebca092733b2eb43c965a81"}, - {file = "yarl-1.9.4-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:44d8ffbb9c06e5a7f529f38f53eda23e50d1ed33c6c869e01481d3fafa6b8142"}, - {file = "yarl-1.9.4-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:aaaea1e536f98754a6e5c56091baa1b6ce2f2700cc4a00b0d49eca8dea471074"}, - {file = "yarl-1.9.4-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3777ce5536d17989c91696db1d459574e9a9bd37660ea7ee4d3344579bb6f129"}, - {file = "yarl-1.9.4-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9fc5fc1eeb029757349ad26bbc5880557389a03fa6ada41703db5e068881e5f2"}, - {file = "yarl-1.9.4-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ea65804b5dc88dacd4a40279af0cdadcfe74b3e5b4c897aa0d81cf86927fee78"}, - {file = "yarl-1.9.4-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:aa102d6d280a5455ad6a0f9e6d769989638718e938a6a0a2ff3f4a7ff8c62cc4"}, - {file = "yarl-1.9.4-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:09efe4615ada057ba2d30df871d2f668af661e971dfeedf0c159927d48bbeff0"}, - {file = "yarl-1.9.4-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:008d3e808d03ef28542372d01057fd09168419cdc8f848efe2804f894ae03e51"}, - {file = "yarl-1.9.4-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:6f5cb257bc2ec58f437da2b37a8cd48f666db96d47b8a3115c29f316313654ff"}, - {file = "yarl-1.9.4-cp312-cp312-musllinux_1_1_ppc64le.whl", hash = "sha256:992f18e0ea248ee03b5a6e8b3b4738850ae7dbb172cc41c966462801cbf62cf7"}, - {file = "yarl-1.9.4-cp312-cp312-musllinux_1_1_s390x.whl", hash = "sha256:0e9d124c191d5b881060a9e5060627694c3bdd1fe24c5eecc8d5d7d0eb6faabc"}, - {file = "yarl-1.9.4-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:3986b6f41ad22988e53d5778f91855dc0399b043fc8946d4f2e68af22ee9ff10"}, - {file = "yarl-1.9.4-cp312-cp312-win32.whl", hash = "sha256:4b21516d181cd77ebd06ce160ef8cc2a5e9ad35fb1c5930882baff5ac865eee7"}, - {file = "yarl-1.9.4-cp312-cp312-win_amd64.whl", hash = "sha256:a9bd00dc3bc395a662900f33f74feb3e757429e545d831eef5bb280252631984"}, - {file = "yarl-1.9.4-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:63b20738b5aac74e239622d2fe30df4fca4942a86e31bf47a81a0e94c14df94f"}, - {file = "yarl-1.9.4-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d7d7f7de27b8944f1fee2c26a88b4dabc2409d2fea7a9ed3df79b67277644e17"}, - {file = "yarl-1.9.4-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c74018551e31269d56fab81a728f683667e7c28c04e807ba08f8c9e3bba32f14"}, - {file = "yarl-1.9.4-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ca06675212f94e7a610e85ca36948bb8fc023e458dd6c63ef71abfd482481aa5"}, - {file = "yarl-1.9.4-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5aef935237d60a51a62b86249839b51345f47564208c6ee615ed2a40878dccdd"}, - {file = "yarl-1.9.4-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2b134fd795e2322b7684155b7855cc99409d10b2e408056db2b93b51a52accc7"}, - {file = "yarl-1.9.4-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:d25039a474c4c72a5ad4b52495056f843a7ff07b632c1b92ea9043a3d9950f6e"}, - {file = "yarl-1.9.4-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:f7d6b36dd2e029b6bcb8a13cf19664c7b8e19ab3a58e0fefbb5b8461447ed5ec"}, - {file = "yarl-1.9.4-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:957b4774373cf6f709359e5c8c4a0af9f6d7875db657adb0feaf8d6cb3c3964c"}, - {file = "yarl-1.9.4-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:d7eeb6d22331e2fd42fce928a81c697c9ee2d51400bd1a28803965883e13cead"}, - {file = "yarl-1.9.4-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:6a962e04b8f91f8c4e5917e518d17958e3bdee71fd1d8b88cdce74dd0ebbf434"}, - {file = "yarl-1.9.4-cp37-cp37m-win32.whl", hash = "sha256:f3bc6af6e2b8f92eced34ef6a96ffb248e863af20ef4fde9448cc8c9b858b749"}, - {file = "yarl-1.9.4-cp37-cp37m-win_amd64.whl", hash = "sha256:ad4d7a90a92e528aadf4965d685c17dacff3df282db1121136c382dc0b6014d2"}, - {file = "yarl-1.9.4-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:ec61d826d80fc293ed46c9dd26995921e3a82146feacd952ef0757236fc137be"}, - {file = "yarl-1.9.4-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:8be9e837ea9113676e5754b43b940b50cce76d9ed7d2461df1af39a8ee674d9f"}, - {file = "yarl-1.9.4-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:bef596fdaa8f26e3d66af846bbe77057237cb6e8efff8cd7cc8dff9a62278bbf"}, - {file = "yarl-1.9.4-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2d47552b6e52c3319fede1b60b3de120fe83bde9b7bddad11a69fb0af7db32f1"}, - {file = "yarl-1.9.4-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:84fc30f71689d7fc9168b92788abc977dc8cefa806909565fc2951d02f6b7d57"}, - {file = "yarl-1.9.4-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4aa9741085f635934f3a2583e16fcf62ba835719a8b2b28fb2917bb0537c1dfa"}, - {file = "yarl-1.9.4-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:206a55215e6d05dbc6c98ce598a59e6fbd0c493e2de4ea6cc2f4934d5a18d130"}, - {file = "yarl-1.9.4-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:07574b007ee20e5c375a8fe4a0789fad26db905f9813be0f9fef5a68080de559"}, - {file = "yarl-1.9.4-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:5a2e2433eb9344a163aced6a5f6c9222c0786e5a9e9cac2c89f0b28433f56e23"}, - {file = "yarl-1.9.4-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:6ad6d10ed9b67a382b45f29ea028f92d25bc0bc1daf6c5b801b90b5aa70fb9ec"}, - {file = "yarl-1.9.4-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:6fe79f998a4052d79e1c30eeb7d6c1c1056ad33300f682465e1b4e9b5a188b78"}, - {file = "yarl-1.9.4-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:a825ec844298c791fd28ed14ed1bffc56a98d15b8c58a20e0e08c1f5f2bea1be"}, - {file = "yarl-1.9.4-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:8619d6915b3b0b34420cf9b2bb6d81ef59d984cb0fde7544e9ece32b4b3043c3"}, - {file = "yarl-1.9.4-cp38-cp38-win32.whl", hash = "sha256:686a0c2f85f83463272ddffd4deb5e591c98aac1897d65e92319f729c320eece"}, - {file = "yarl-1.9.4-cp38-cp38-win_amd64.whl", hash = "sha256:a00862fb23195b6b8322f7d781b0dc1d82cb3bcac346d1e38689370cc1cc398b"}, - {file = "yarl-1.9.4-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:604f31d97fa493083ea21bd9b92c419012531c4e17ea6da0f65cacdcf5d0bd27"}, - {file = "yarl-1.9.4-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:8a854227cf581330ffa2c4824d96e52ee621dd571078a252c25e3a3b3d94a1b1"}, - {file = "yarl-1.9.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:ba6f52cbc7809cd8d74604cce9c14868306ae4aa0282016b641c661f981a6e91"}, - {file = "yarl-1.9.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a6327976c7c2f4ee6816eff196e25385ccc02cb81427952414a64811037bbc8b"}, - {file = "yarl-1.9.4-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8397a3817d7dcdd14bb266283cd1d6fc7264a48c186b986f32e86d86d35fbac5"}, - {file = "yarl-1.9.4-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e0381b4ce23ff92f8170080c97678040fc5b08da85e9e292292aba67fdac6c34"}, - {file = "yarl-1.9.4-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:23d32a2594cb5d565d358a92e151315d1b2268bc10f4610d098f96b147370136"}, - {file = "yarl-1.9.4-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ddb2a5c08a4eaaba605340fdee8fc08e406c56617566d9643ad8bf6852778fc7"}, - {file = "yarl-1.9.4-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:26a1dc6285e03f3cc9e839a2da83bcbf31dcb0d004c72d0730e755b33466c30e"}, - {file = "yarl-1.9.4-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:18580f672e44ce1238b82f7fb87d727c4a131f3a9d33a5e0e82b793362bf18b4"}, - {file = "yarl-1.9.4-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:29e0f83f37610f173eb7e7b5562dd71467993495e568e708d99e9d1944f561ec"}, - {file = "yarl-1.9.4-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:1f23e4fe1e8794f74b6027d7cf19dc25f8b63af1483d91d595d4a07eca1fb26c"}, - {file = "yarl-1.9.4-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:db8e58b9d79200c76956cefd14d5c90af54416ff5353c5bfd7cbe58818e26ef0"}, - {file = "yarl-1.9.4-cp39-cp39-win32.whl", hash = "sha256:c7224cab95645c7ab53791022ae77a4509472613e839dab722a72abe5a684575"}, - {file = "yarl-1.9.4-cp39-cp39-win_amd64.whl", hash = "sha256:824d6c50492add5da9374875ce72db7a0733b29c2394890aef23d533106e2b15"}, - {file = "yarl-1.9.4-py3-none-any.whl", hash = "sha256:928cecb0ef9d5a7946eb6ff58417ad2fe9375762382f1bf5c55e61645f2c43ad"}, - {file = "yarl-1.9.4.tar.gz", hash = "sha256:566db86717cf8080b99b58b083b773a908ae40f06681e87e589a976faf8246bf"}, -] - -[package.dependencies] -idna = ">=2.0" -multidict = ">=4.0" - [metadata] lock-version = "2.0" python-versions = "^3.8" -content-hash = "3b915978f26bcf97d5ad13b1a38f7e344b7a960c1a21b5e253a0c5a6d9b7a879" +content-hash = "98310bf5aa9023609a63f0ec439e924498cad2b9032dc64fb5c56857fda09091" diff --git a/pyproject.toml b/pyproject.toml index 89e68b2d..ca38ce47 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -24,11 +24,11 @@ exclude = ["docs", "tests", "examples", "tools", "build"] [tool.poetry.dependencies] python = "^3.8" -aiohttp = "^3.9.1" backoff = "^2.2.1" orjson = "^3.9.10" -requests = "^2.31.0" pydantic = "^2.5.2" +httpx = "^0.25.2" +tenacity = "^8.2.3" [tool.poetry.group.dev.dependencies] diff --git a/src/mistralai/client.py b/src/mistralai/client.py index 99ea8cf3..0e58a5bd 100644 --- a/src/mistralai/client.py +++ b/src/mistralai/client.py @@ -1,18 +1,18 @@ import os import posixpath +import time from json import JSONDecodeError -from typing import Any, Dict, Iterable, List, Optional, Union +from typing import Any, Dict, Iterable, Iterator, List, Optional, Union +import httpx import orjson -import requests -from requests import Response -from requests.adapters import HTTPAdapter -from urllib3.util.retry import Retry +from httpx import Client, Response from mistralai.client_base import ClientBase from mistralai.constants import ENDPOINT, RETRY_STATUS_CODES from mistralai.exceptions import ( MistralAPIException, + MistralAPIStatusException, MistralConnectionException, MistralException, ) @@ -39,14 +39,19 @@ def __init__( ): super().__init__(endpoint, api_key, max_retries, timeout) + self._client = Client(transport=httpx.HTTPTransport(retries=self._max_retries)) + + def __del__(self) -> None: + self._client.close() + def _request( self, method: str, json: Dict[str, Any], path: str, stream: bool = False, - params: Optional[Dict[str, Any]] = None, - ) -> Union[Response, Dict[str, Any]]: + attempt: int = 1, + ) -> Iterator[Dict[str, Any]]: headers = { "Authorization": f"Bearer {self._api_key}", "Content-Type": "application/json", @@ -54,49 +59,95 @@ def _request( url = posixpath.join(self._endpoint, path) - with requests.Session() as session: - retries = Retry( - total=self._max_retries, - backoff_factor=0.5, - allowed_methods=["POST", "GET"], - status_forcelist=RETRY_STATUS_CODES, - raise_on_status=False, - ) - session.mount("https://", HTTPAdapter(max_retries=retries)) - session.mount("https://", HTTPAdapter(max_retries=retries)) + self._logger.debug(f"Sending request: {method} {url} {json}") + + response: Response + try: if stream: - return session.request( - method, url, headers=headers, json=json, stream=True - ) + self._logger.debug("Streaming Response") + with self._client.stream( + method, + url, + headers=headers, + json=json, + timeout=self._timeout, + follow_redirects=True, + ) as response: + if response.status_code in RETRY_STATUS_CODES: + raise MistralAPIStatusException.from_response( + response, + message=f"Cannot stream response. Status: {response.status_code}", + ) + + self._check_response( + dict(response.headers), + response.status_code, + ) + + for line in response.iter_lines(): + self._logger.debug(f"Received line: {line}") - try: - response = session.request( + if line.startswith("data: "): + line = line[6:].strip() + if line != "[DONE]": + try: + json_streamed_response = orjson.loads(line) + except JSONDecodeError: + raise MistralAPIException.from_response( + response, + message=f"Failed to decode json body: {json_streamed_response}", + ) + + yield json_streamed_response + + else: + self._logger.debug("Non-Streaming Response") + response = self._client.request( method, url, headers=headers, json=json, timeout=self._timeout, - params=params, + follow_redirects=True, ) - except requests.exceptions.ConnectionError as e: - raise MistralConnectionException(str(e)) from e - except requests.exceptions.RequestException as e: - raise MistralException( - f"Unexpected exception ({e.__class__.__name__}): {e}" - ) from e - try: - json_response: Dict[str, Any] = response.json() - except JSONDecodeError: - raise MistralAPIException.from_response( - response, message=f"Failed to decode json body: {response.text}" + self._logger.debug(f"Received response: {response}") + + if response.status_code in RETRY_STATUS_CODES: + raise MistralAPIStatusException.from_response(response) + + try: + json_response: Dict[str, Any] = response.json() + except JSONDecodeError: + raise MistralAPIException.from_response( + response, + message=f"Failed to decode json body: {response.text}", + ) + + self._check_response( + dict(response.headers), response.status_code, json_response ) + yield json_response - self._check_response( - json_response, dict(response.headers), response.status_code - ) - return json_response + except httpx.ConnectError as e: + raise MistralConnectionException(str(e)) from e + except httpx.RequestError as e: + raise MistralException( + f"Unexpected exception ({e.__class__.__name__}): {e}" + ) from e + except MistralAPIStatusException as e: + attempt += 1 + if attempt > self._max_retries: + raise MistralAPIStatusException.from_response( + response, message=str(e) + ) from e + backoff = 2.0**attempt # exponential backoff + time.sleep(backoff) + + # Retry as a generator + for r in self._request(method, json, path, stream=stream, attempt=attempt): + yield r def chat( self, @@ -108,7 +159,7 @@ def chat( random_seed: Optional[int] = None, safe_mode: bool = False, ) -> ChatCompletionResponse: - """ A chat endpoint that returns a single response. + """A chat endpoint that returns a single response. Args: model (str): model the name of the model to chat with, e.g. mistral-tiny @@ -135,11 +186,13 @@ def chat( safe_mode=safe_mode, ) - response = self._request("post", request, "v1/chat/completions") + single_response = self._request("post", request, "v1/chat/completions") - assert isinstance(response, dict), "Bad response from _request" + self._logger.debug(f"Received response: {single_response}") + for response in single_response: + return ChatCompletionResponse(**response) - return ChatCompletionResponse(**response) + raise MistralException("No response received") def chat_stream( self, @@ -151,7 +204,7 @@ def chat_stream( random_seed: Optional[int] = None, safe_mode: bool = False, ) -> Iterable[ChatCompletionStreamResponse]: - """ A chat endpoint that streams responses. + """A chat endpoint that streams responses. Args: model (str): model the name of the model to chat with, e.g. mistral-tiny @@ -181,18 +234,8 @@ def chat_stream( response = self._request("post", request, "v1/chat/completions", stream=True) - assert isinstance(response, Response), "Bad response from _request" - - for line in response.iter_lines(): - self._logger.debug(f"Received line: {line}") - if line == b"\n": - continue - - if line.startswith(b"data: "): - line = line[6:].strip() - if line != b"[DONE]": - json_response = orjson.loads(line) - yield ChatCompletionStreamResponse(**json_response) + for json_streamed_response in response: + yield ChatCompletionStreamResponse(**json_streamed_response) def embeddings(self, model: str, input: Union[str, List[str]]) -> EmbeddingResponse: """An embeddings endpoint that returns embeddings for a single, or batch of inputs diff --git a/src/mistralai/client_base.py b/src/mistralai/client_base.py index 9d82a535..95faf01f 100644 --- a/src/mistralai/client_base.py +++ b/src/mistralai/client_base.py @@ -1,10 +1,12 @@ import logging from abc import ABC from typing import Any, Dict, List, Optional +import os from mistralai.exceptions import MistralAPIException, MistralException from mistralai.models.chat_completion import ChatMessage +logging.basicConfig(format="%(asctime)s %(levelname)s %(name)s: %(message)s", level=os.getenv("LOG_LEVEL", "ERROR")) class ClientBase(ABC): def __init__( @@ -21,8 +23,8 @@ def __init__( self._api_key = api_key self._logger = logging.getLogger(__name__) - @staticmethod def _make_chat_request( + self, model: str, messages: List[ChatMessage], temperature: Optional[float] = None, @@ -48,19 +50,22 @@ def _make_chat_request( if stream is not None: request_data["stream"] = stream + self._logger.debug(f"Chat request: {request_data}") + return request_data def _check_response( - self, json_response: Dict[str, Any], headers: Dict[str, Any], status: int + self, headers: Dict[str, Any], status: int, json_response: Optional[Dict[str, Any]] = None ) -> None: - if "object" not in json_response: - raise MistralException(message=f"Unexpected response: {json_response}") - if "error" == json_response["object"]: # has errors - raise MistralAPIException( - message=json_response["message"], - http_status=status, - headers=headers, - ) + if json_response is not None: + if "object" not in json_response: + raise MistralException(message=f"Unexpected response: {json_response}") + if "error" == json_response["object"]: # has errors + raise MistralAPIException( + message=json_response["message"], + http_status=status, + headers=headers, + ) if 400 <= status < 500: raise MistralAPIException( message=f"Unexpected client error (status {status}): {json_response}", diff --git a/src/mistralai/exceptions.py b/src/mistralai/exceptions.py index 6cfc4ce6..9c9da816 100644 --- a/src/mistralai/exceptions.py +++ b/src/mistralai/exceptions.py @@ -2,8 +2,7 @@ from typing import Any, Dict, Optional -import aiohttp -from requests import Response +from httpx import Response class MistralException(Exception): @@ -45,19 +44,11 @@ def from_response( headers=dict(response.headers), ) - @classmethod - def from_aio_response( - cls, response: aiohttp.ClientResponse, message: Optional[str] = None - ) -> MistralAPIException: - return cls( - message=message, - http_status=response.status, - headers=dict(response.headers), - ) - def __repr__(self) -> str: return f"{self.__class__.__name__}(message={str(self)}, http_status={self.http_status})" +class MistralAPIStatusException(MistralAPIException): + """Returned when we receive a non-200 response from the API that we should retry""" class MistralConnectionException(MistralException): """Returned when the SDK can not reach the API server for any reason""" From 440e9c3eb21c5be5ae977affd4b28811a17f42e8 Mon Sep 17 00:00:00 2001 From: Bam4d Date: Tue, 19 Dec 2023 09:21:29 +0000 Subject: [PATCH 009/223] async client now uses httpx, both clients are now very similar --- examples/async_list_models.py | 17 ++ poetry.lock | 27 +-- pyproject.toml | 2 - src/mistralai/async_client.py | 312 ++++++++++++++-------------------- src/mistralai/client.py | 35 ++-- 5 files changed, 161 insertions(+), 232 deletions(-) create mode 100644 examples/async_list_models.py diff --git a/examples/async_list_models.py b/examples/async_list_models.py new file mode 100644 index 00000000..f67e72ac --- /dev/null +++ b/examples/async_list_models.py @@ -0,0 +1,17 @@ +import os +import asyncio + +from mistralai.async_client import MistralAsyncClient + + +async def main(): + api_key = os.environ["MISTRAL_API_KEY"] + + client = MistralAsyncClient(api_key=api_key) + + list_models_response = await client.list_models() + print(list_models_response) + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/poetry.lock b/poetry.lock index e6597b02..25607067 100644 --- a/poetry.lock +++ b/poetry.lock @@ -36,17 +36,6 @@ doc = ["Sphinx (>=7)", "packaging", "sphinx-autodoc-typehints (>=1.2.0)", "sphin test = ["anyio[trio]", "coverage[toml] (>=7)", "exceptiongroup (>=1.2.0)", "hypothesis (>=4.0)", "psutil (>=5.9)", "pytest (>=7.0)", "pytest-mock (>=3.6.1)", "trustme", "uvloop (>=0.17)"] trio = ["trio (>=0.23)"] -[[package]] -name = "backoff" -version = "2.2.1" -description = "Function decoration for backoff and retry" -optional = false -python-versions = ">=3.7,<4.0" -files = [ - {file = "backoff-2.2.1-py3-none-any.whl", hash = "sha256:63579f9a0628e06278f7e47b7d7d5b6ce20dc65c5e96a6f3ca99a6adca0396e8"}, - {file = "backoff-2.2.1.tar.gz", hash = "sha256:03f829f5bb1923180821643f8753b0502c3b682293992485b0eef2807afa5cba"}, -] - [[package]] name = "certifi" version = "2023.11.17" @@ -429,20 +418,6 @@ files = [ {file = "sniffio-1.3.0.tar.gz", hash = "sha256:e60305c5e5d314f5389259b7f22aaa33d8f7dee49763119234af3755c55b9101"}, ] -[[package]] -name = "tenacity" -version = "8.2.3" -description = "Retry code until it succeeds" -optional = false -python-versions = ">=3.7" -files = [ - {file = "tenacity-8.2.3-py3-none-any.whl", hash = "sha256:ce510e327a630c9e1beaf17d42e6ffacc88185044ad85cf74c0a8887c6a0f88c"}, - {file = "tenacity-8.2.3.tar.gz", hash = "sha256:5398ef0d78e63f40007c1fb4c0bff96e1911394d2fa8d194f77619c05ff6cc8a"}, -] - -[package.extras] -doc = ["reno", "sphinx", "tornado (>=4.5)"] - [[package]] name = "tomli" version = "2.0.1" @@ -498,4 +473,4 @@ zstd = ["zstandard (>=0.18.0)"] [metadata] lock-version = "2.0" python-versions = "^3.8" -content-hash = "98310bf5aa9023609a63f0ec439e924498cad2b9032dc64fb5c56857fda09091" +content-hash = "2a97469581d78e2e5cb8c3f9183855f9671598ca31e9e77100d93f5daa40e12f" diff --git a/pyproject.toml b/pyproject.toml index ca38ce47..6215ed8f 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -24,11 +24,9 @@ exclude = ["docs", "tests", "examples", "tools", "build"] [tool.poetry.dependencies] python = "^3.8" -backoff = "^2.2.1" orjson = "^3.9.10" pydantic = "^2.5.2" httpx = "^0.25.2" -tenacity = "^8.2.3" [tool.poetry.group.dev.dependencies] diff --git a/src/mistralai/async_client.py b/src/mistralai/async_client.py index 0ea36721..9f23990e 100644 --- a/src/mistralai/async_client.py +++ b/src/mistralai/async_client.py @@ -1,20 +1,23 @@ -import asyncio -import logging import os import posixpath import time -from collections import defaultdict from json import JSONDecodeError -from typing import Any, AsyncGenerator, Awaitable, Callable, Dict, List, Optional, Union - -import aiohttp -import backoff +from typing import Any, AsyncGenerator, Dict, List, Optional, Union +from httpx import ( + Response, + AsyncClient, + Limits, + AsyncHTTPTransport, + RequestError, + ConnectError, +) import orjson from mistralai.client_base import ClientBase from mistralai.constants import ENDPOINT, RETRY_STATUS_CODES from mistralai.exceptions import ( MistralAPIException, + MistralAPIStatusException, MistralConnectionException, MistralException, ) @@ -27,136 +30,6 @@ from mistralai.models.models import ModelList -class AIOHTTPBackend: - """HTTP backend which handles retries, concurrency limiting and logging""" - - SLEEP_AFTER_FAILURE = defaultdict(lambda: 0.25, {429: 5.0}) - - _requester: Callable[..., Awaitable[aiohttp.ClientResponse]] - _semaphore: asyncio.Semaphore - _session: Optional[aiohttp.ClientSession] - - def __init__( - self, - max_concurrent_requests: int = 64, - max_retries: int = 5, - timeout: int = 120, - ): - self._logger = logging.getLogger(__name__) - self._timeout = timeout - self._max_retries = max_retries - self._session = None - self._max_concurrent_requests = max_concurrent_requests - - def build_aio_requester( - self, - ) -> Callable: # returns a function for retryable requests - @backoff.on_exception( - backoff.expo, - (aiohttp.ClientError, aiohttp.ClientResponseError), - max_tries=self._max_retries + 1, - max_time=self._timeout, - ) - async def make_request_fn( - session: aiohttp.ClientSession, *args: Any, **kwargs: Any - ) -> aiohttp.ClientResponse: - async with self._semaphore: # this limits total concurrency by the client - response = await session.request(*args, **kwargs) - if ( - response.status in RETRY_STATUS_CODES - ): # likely temporary, raise to retry - self._logger.info(f"Received status {response.status}, retrying...") - await asyncio.sleep(self.SLEEP_AFTER_FAILURE[response.status]) - response.raise_for_status() - - return response - - return make_request_fn - - async def request( - self, - url: str, - json: Optional[Dict[str, Any]] = None, - method: str = "post", - headers: Optional[Dict[str, Any]] = None, - session: Optional[aiohttp.ClientSession] = None, - params: Optional[Dict[str, Any]] = None, - **kwargs: Any, - ) -> aiohttp.ClientResponse: - session = session or await self.session() - self._logger.debug(f"Making request to {url} with content {json}") - - request_start = time.time() - try: - response = await self._requester( - session, - method, - url, - headers=headers, - json=json, - params=params, - **kwargs, - ) - except ( - aiohttp.ClientConnectionError - ) as e: # ensure the SDK user does not have to deal with knowing aiohttp - self._logger.debug( - f"Fatal connection error after {time.time()-request_start:.1f}s: {e}" - ) - raise MistralConnectionException(str(e)) from e - except ( - aiohttp.ClientResponseError - ) as e: # status 500 or something remains after retries - self._logger.debug( - f"Fatal ClientResponseError error after {time.time()-request_start:.1f}s: {e}" - ) - raise MistralConnectionException(str(e)) from e - except asyncio.TimeoutError as e: - self._logger.debug( - f"Fatal timeout error after {time.time()-request_start:.1f}s: {e}" - ) - raise MistralConnectionException("The request timed out") from e - except Exception as e: # Anything caught here should be added above - self._logger.debug( - f"Unexpected fatal error after {time.time()-request_start:.1f}s: {e}" - ) - raise MistralException( - f"Unexpected exception ({e.__class__.__name__}): {e}" - ) from e - - self._logger.debug( - f"Received response with status {response.status} after {time.time()-request_start:.1f}s" - ) - return response - - async def session(self) -> aiohttp.ClientSession: - if self._session is None: - self._session = aiohttp.ClientSession( - timeout=aiohttp.ClientTimeout(self._timeout), - connector=aiohttp.TCPConnector(limit=0), - ) - self._semaphore = asyncio.Semaphore(self._max_concurrent_requests) - self._requester = self.build_aio_requester() - return self._session - - async def close(self) -> None: - if self._session is not None: - await self._session.close() - self._session = None - - def __del__(self) -> None: - # https://stackoverflow.com/questions/54770360/how-can-i-wait-for-an-objects-del-to-finish-before-the-async-loop-closes - if self._session: - try: - loop = asyncio.get_event_loop() - if loop.is_running(): - loop.create_task(self.close()) - else: - loop.run_until_complete(self.close()) - except Exception: - pass - - class MistralAsyncClient(ClientBase): def __init__( self, @@ -168,14 +41,15 @@ def __init__( ): super().__init__(endpoint, api_key, max_retries, timeout) - self._backend = AIOHTTPBackend( - max_concurrent_requests=max_concurrent_requests, - max_retries=max_retries, + self._client = AsyncClient( + follow_redirects=True, timeout=timeout, + limits=Limits(max_connections=max_concurrent_requests), + transport=AsyncHTTPTransport(retries=max_retries), ) async def close(self) -> None: - await self._backend.close() + await self._client.aclose() async def _request( self, @@ -183,9 +57,8 @@ async def _request( json: Dict[str, Any], path: str, stream: bool = False, - params: Optional[Dict[str, Any]] = None, - ) -> Union[Dict[str, Any], aiohttp.ClientResponse]: - + attempt: int = 1, + ) -> AsyncGenerator[Dict[str, Any], None]: headers = { "Authorization": f"Bearer {self._api_key}", "Content-Type": "application/json", @@ -193,27 +66,93 @@ async def _request( url = posixpath.join(self._endpoint, path) - response = await self._backend.request( - url, json, method, headers, params=params - ) - if stream: - return response + self._logger.debug(f"Sending request: {method} {url} {json}") + + response: Response try: - json_response: Dict[str, Any] = await response.json() - except JSONDecodeError as e: - raise MistralAPIException.from_aio_response( - response, message=f"Failed to decode json body: {await response.text()}" - ) from e - except aiohttp.ClientPayloadError as e: - raise MistralAPIException.from_aio_response( - response, - message=f"An unexpected error occurred while receiving the response: {e}", + if stream: + self._logger.debug("Streaming Response") + async with self._client.stream( + method, + url, + headers=headers, + json=json, + ) as response: + if response.status_code in RETRY_STATUS_CODES: + raise MistralAPIStatusException.from_response( + response, + message=f"Cannot stream response. Status: {response.status_code}", + ) + + self._check_response( + dict(response.headers), + response.status_code, + ) + + async for line in response.aiter_lines(): + self._logger.debug(f"Received line: {line}") + + if line.startswith("data: "): + line = line[6:].strip() + if line != "[DONE]": + try: + json_streamed_response = orjson.loads(line) + except JSONDecodeError: + raise MistralAPIException.from_response( + response, + message=f"Failed to decode json body: {json_streamed_response}", + ) + + yield json_streamed_response + + else: + self._logger.debug("Non-Streaming Response") + response = await self._client.request( + method, + url, + headers=headers, + json=json, + ) + + self._logger.debug(f"Received response: {response}") + + if response.status_code in RETRY_STATUS_CODES: + raise MistralAPIStatusException.from_response(response) + + try: + json_response: Dict[str, Any] = response.json() + except JSONDecodeError: + raise MistralAPIException.from_response( + response, + message=f"Failed to decode json body: {response.text}", + ) + + self._check_response( + dict(response.headers), response.status_code, json_response + ) + yield json_response + + except ConnectError as e: + raise MistralConnectionException(str(e)) from e + except RequestError as e: + raise MistralException( + f"Unexpected exception ({e.__class__.__name__}): {e}" ) from e - - self._logger.debug(f"JSON response: {json_response}") - self._check_response(json_response, dict(response.headers), response.status) - return json_response + except MistralAPIStatusException as e: + attempt += 1 + if attempt > self._max_retries: + raise MistralAPIStatusException.from_response( + response, message=str(e) + ) from e + backoff = 2.0**attempt # exponential backoff + time.sleep(backoff) + + # Retry as a generator + async for r in self._request( + method, json, path, stream=stream, attempt=attempt + ): + yield r async def chat( self, @@ -225,7 +164,7 @@ async def chat( random_seed: Optional[int] = None, safe_mode: bool = False, ) -> ChatCompletionResponse: - """ A asynchronous chat endpoint that returns a single response. + """A asynchronous chat endpoint that returns a single response. Args: model (str): model the name of the model to chat with, e.g. mistral-tiny @@ -252,9 +191,12 @@ async def chat( safe_mode=safe_mode, ) - response = await self._request("post", request, "v1/chat/completions") - assert isinstance(response, dict), "Bad response from _request" - return ChatCompletionResponse(**response) + single_response = self._request("post", request, "v1/chat/completions") + + async for response in single_response: + return ChatCompletionResponse(**response) + + raise MistralException("No response received") async def chat_stream( self, @@ -266,7 +208,7 @@ async def chat_stream( random_seed: Optional[int] = None, safe_mode: bool = False, ) -> AsyncGenerator[ChatCompletionStreamResponse, None]: - """ An Asynchronous chat endpoint that streams responses. + """An Asynchronous chat endpoint that streams responses. Args: model (str): model the name of the model to chat with, e.g. mistral-tiny @@ -294,24 +236,12 @@ async def chat_stream( stream=True, safe_mode=safe_mode, ) - async_response = await self._request( + async_response = self._request( "post", request, "v1/chat/completions", stream=True ) - assert isinstance( - async_response, aiohttp.ClientResponse - ), "Bad response from _request" - - async with async_response as response: - async for line in response.content: - if line == b"\n": - continue - - if line.startswith(b"data: "): - line = line[6:].strip() - if line != b"[DONE]": - json_response = orjson.loads(line) - yield ChatCompletionStreamResponse(**json_response) + async for json_response in async_response: + yield ChatCompletionStreamResponse(**json_response) async def embeddings( self, model: str, input: Union[str, List[str]] @@ -327,9 +257,12 @@ async def embeddings( EmbeddingResponse: A response object containing the embeddings. """ request = {"model": model, "input": input} - response = await self._request("post", request, "v1/embeddings") - assert isinstance(response, dict), "Bad response from _request" - return EmbeddingResponse(**response) + single_response = self._request("post", request, "v1/embeddings") + + async for response in single_response: + return EmbeddingResponse(**response) + + raise MistralException("No response received") async def list_models(self) -> ModelList: """Returns a list of the available models @@ -337,6 +270,9 @@ async def list_models(self) -> ModelList: Returns: ModelList: A response object containing the list of models. """ - response = await self._request("get", {}, "v1/models") - assert isinstance(response, dict), "Bad response from _request" - return ModelList(**response) + single_response = self._request("get", {}, "v1/models") + + async for response in single_response: + return ModelList(**response) + + raise MistralException("No response received") diff --git a/src/mistralai/client.py b/src/mistralai/client.py index 0e58a5bd..6bd3958f 100644 --- a/src/mistralai/client.py +++ b/src/mistralai/client.py @@ -4,9 +4,8 @@ from json import JSONDecodeError from typing import Any, Dict, Iterable, Iterator, List, Optional, Union -import httpx import orjson -from httpx import Client, Response +from httpx import Response, Client, HTTPTransport, RequestError, ConnectError from mistralai.client_base import ClientBase from mistralai.constants import ENDPOINT, RETRY_STATUS_CODES @@ -39,7 +38,10 @@ def __init__( ): super().__init__(endpoint, api_key, max_retries, timeout) - self._client = Client(transport=httpx.HTTPTransport(retries=self._max_retries)) + self._client = Client( + follow_redirects=True, + timeout=self._timeout, + transport=HTTPTransport(retries=self._max_retries)) def __del__(self) -> None: self._client.close() @@ -71,8 +73,6 @@ def _request( url, headers=headers, json=json, - timeout=self._timeout, - follow_redirects=True, ) as response: if response.status_code in RETRY_STATUS_CODES: raise MistralAPIStatusException.from_response( @@ -108,8 +108,6 @@ def _request( url, headers=headers, json=json, - timeout=self._timeout, - follow_redirects=True, ) self._logger.debug(f"Received response: {response}") @@ -130,9 +128,9 @@ def _request( ) yield json_response - except httpx.ConnectError as e: + except ConnectError as e: raise MistralConnectionException(str(e)) from e - except httpx.RequestError as e: + except RequestError as e: raise MistralException( f"Unexpected exception ({e.__class__.__name__}): {e}" ) from e @@ -188,7 +186,6 @@ def chat( single_response = self._request("post", request, "v1/chat/completions") - self._logger.debug(f"Received response: {single_response}") for response in single_response: return ChatCompletionResponse(**response) @@ -249,9 +246,12 @@ def embeddings(self, model: str, input: Union[str, List[str]]) -> EmbeddingRespo EmbeddingResponse: A response object containing the embeddings. """ request = {"model": model, "input": input} - response = self._request("post", request, "v1/embeddings") - assert isinstance(response, dict), "Bad response from _request" - return EmbeddingResponse(**response) + singleton_response = self._request("post", request, "v1/embeddings") + + for response in singleton_response: + return EmbeddingResponse(**response) + + raise MistralException("No response received") def list_models(self) -> ModelList: """Returns a list of the available models @@ -259,6 +259,9 @@ def list_models(self) -> ModelList: Returns: ModelList: A response object containing the list of models. """ - response = self._request("get", {}, "v1/models") - assert isinstance(response, dict), "Bad response from _request" - return ModelList(**response) + singleton_response = self._request("get", {}, "v1/models") + + for response in singleton_response: + return ModelList(**response) + + raise MistralException("No response received") From eab8c5d5a6409a4e83541994dbbeec58dd780b35 Mon Sep 17 00:00:00 2001 From: Bam4d Date: Tue, 19 Dec 2023 09:22:03 +0000 Subject: [PATCH 010/223] ruff fixes --- examples/async_list_models.py | 2 +- src/mistralai/async_client.py | 9 +++++---- src/mistralai/client.py | 6 +++--- src/mistralai/client_base.py | 2 +- 4 files changed, 10 insertions(+), 9 deletions(-) diff --git a/examples/async_list_models.py b/examples/async_list_models.py index f67e72ac..da9fc449 100644 --- a/examples/async_list_models.py +++ b/examples/async_list_models.py @@ -1,5 +1,5 @@ -import os import asyncio +import os from mistralai.async_client import MistralAsyncClient diff --git a/src/mistralai/async_client.py b/src/mistralai/async_client.py index 9f23990e..2a6f03d2 100644 --- a/src/mistralai/async_client.py +++ b/src/mistralai/async_client.py @@ -3,15 +3,16 @@ import time from json import JSONDecodeError from typing import Any, AsyncGenerator, Dict, List, Optional, Union + +import orjson from httpx import ( - Response, AsyncClient, - Limits, AsyncHTTPTransport, - RequestError, ConnectError, + Limits, + RequestError, + Response, ) -import orjson from mistralai.client_base import ClientBase from mistralai.constants import ENDPOINT, RETRY_STATUS_CODES diff --git a/src/mistralai/client.py b/src/mistralai/client.py index 6bd3958f..5a950fc6 100644 --- a/src/mistralai/client.py +++ b/src/mistralai/client.py @@ -5,7 +5,7 @@ from typing import Any, Dict, Iterable, Iterator, List, Optional, Union import orjson -from httpx import Response, Client, HTTPTransport, RequestError, ConnectError +from httpx import Client, ConnectError, HTTPTransport, RequestError, Response from mistralai.client_base import ClientBase from mistralai.constants import ENDPOINT, RETRY_STATUS_CODES @@ -250,7 +250,7 @@ def embeddings(self, model: str, input: Union[str, List[str]]) -> EmbeddingRespo for response in singleton_response: return EmbeddingResponse(**response) - + raise MistralException("No response received") def list_models(self) -> ModelList: @@ -263,5 +263,5 @@ def list_models(self) -> ModelList: for response in singleton_response: return ModelList(**response) - + raise MistralException("No response received") diff --git a/src/mistralai/client_base.py b/src/mistralai/client_base.py index 95faf01f..1e8f80fd 100644 --- a/src/mistralai/client_base.py +++ b/src/mistralai/client_base.py @@ -1,7 +1,7 @@ import logging +import os from abc import ABC from typing import Any, Dict, List, Optional -import os from mistralai.exceptions import MistralAPIException, MistralException from mistralai.models.chat_completion import ChatMessage From cb319bf31a52acad635ff8593a9b8fb444fc7f57 Mon Sep 17 00:00:00 2001 From: Chad Phillips Date: Tue, 19 Dec 2023 12:28:31 -0500 Subject: [PATCH 011/223] more robust command completion --- examples/chatbot_with_streaming.py | 44 ++++++++++++++++++------------ 1 file changed, 26 insertions(+), 18 deletions(-) diff --git a/examples/chatbot_with_streaming.py b/examples/chatbot_with_streaming.py index fce2d8c0..c14e5ddb 100644 --- a/examples/chatbot_with_streaming.py +++ b/examples/chatbot_with_streaming.py @@ -19,37 +19,45 @@ DEFAULT_MODEL = "mistral-small" DEFAULT_TEMPERATURE = 0.7 LOG_FORMAT = "%(asctime)s - %(levelname)s - %(message)s" -COMMAND_LIST = [ - "/new", - "/help", - "/model", - "/system", - "/temperature", - "/config", - "/quit", - "/exit", -] +# A dictionary of all commands and their arguments, used for tab completion. +COMMAND_LIST = { + "/new": {}, + "/help": {}, + "/model": {model: {} for model in MODEL_LIST}, # Nested completions for models + "/system": {}, + "/temperature": {}, + "/config": {}, + "/quit": {}, + "/exit": {}, +} LOG_FORMAT = "%(asctime)s - %(levelname)s - %(message)s" logger = logging.getLogger("chatbot") +def find_completions(command_dict, parts): + if not parts: + return command_dict.keys() + if parts[0] in command_dict: + return find_completions(command_dict[parts[0]], parts[1:]) + else: + return [cmd for cmd in command_dict if cmd.startswith(parts[0])] + + def completer(text, state): buffer = readline.get_line_buffer() - if not buffer.startswith(text): - return None + line_parts = buffer.lstrip().split(' ') + options = find_completions(COMMAND_LIST, line_parts[:-1]) - options = [command for command in COMMAND_LIST if command.startswith(text)] - if state < len(options): - return options[state] - else: + try: + return [option for option in options if option.startswith(line_parts[-1])][state] + except IndexError: return None readline.set_completer(completer) -# Remove all delimiters to ensure completion only at the beginning of the line -readline.set_completer_delims("") +readline.set_completer_delims(" ") # Enable tab completion readline.parse_and_bind("tab: complete") From f970fe09ac948700ab46bf88007f23edec31fb82 Mon Sep 17 00:00:00 2001 From: Chad Phillips Date: Tue, 19 Dec 2023 12:34:12 -0500 Subject: [PATCH 012/223] add basic exception handling --- examples/chatbot_with_streaming.py | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/examples/chatbot_with_streaming.py b/examples/chatbot_with_streaming.py index c14e5ddb..6654cb45 100644 --- a/examples/chatbot_with_streaming.py +++ b/examples/chatbot_with_streaming.py @@ -47,7 +47,7 @@ def find_completions(command_dict, parts): def completer(text, state): buffer = readline.get_line_buffer() - line_parts = buffer.lstrip().split(' ') + line_parts = buffer.lstrip().split(" ") options = find_completions(COMMAND_LIST, line_parts[:-1]) try: @@ -66,6 +66,8 @@ class ChatBot: def __init__( self, api_key, model, system_message=None, temperature=DEFAULT_TEMPERATURE ): + if not api_key: + raise ValueError("An API key must be provided to use the Mistral API.") self.client = MistralClient(api_key=api_key) self.model = model self.temperature = temperature @@ -261,5 +263,9 @@ def exit(self): f"system message: {args.system_message}" ) - bot = ChatBot(args.api_key, args.model, args.system_message, args.temperature) - bot.start() + try: + bot = ChatBot(args.api_key, args.model, args.system_message, args.temperature) + bot.start() + except Exception as e: + logger.error(e) + sys.exit(1) From f8d17d69d1cdcdbc94ef502be4dee63912ba7890 Mon Sep 17 00:00:00 2001 From: Bam4d Date: Wed, 20 Dec 2023 19:11:59 +0000 Subject: [PATCH 013/223] cleaning up duplicated code --- src/mistralai/async_client.py | 56 ++++++--------------------- src/mistralai/client.py | 56 ++++++--------------------- src/mistralai/client_base.py | 72 +++++++++++++++++++++++++---------- 3 files changed, 73 insertions(+), 111 deletions(-) diff --git a/src/mistralai/async_client.py b/src/mistralai/async_client.py index 2a6f03d2..938d9821 100644 --- a/src/mistralai/async_client.py +++ b/src/mistralai/async_client.py @@ -4,7 +4,6 @@ from json import JSONDecodeError from typing import Any, AsyncGenerator, Dict, List, Optional, Union -import orjson from httpx import ( AsyncClient, AsyncHTTPTransport, @@ -15,7 +14,7 @@ ) from mistralai.client_base import ClientBase -from mistralai.constants import ENDPOINT, RETRY_STATUS_CODES +from mistralai.constants import ENDPOINT from mistralai.exceptions import ( MistralAPIException, MistralAPIStatusException, @@ -73,42 +72,20 @@ async def _request( try: if stream: - self._logger.debug("Streaming Response") async with self._client.stream( method, url, headers=headers, json=json, ) as response: - if response.status_code in RETRY_STATUS_CODES: - raise MistralAPIStatusException.from_response( - response, - message=f"Cannot stream response. Status: {response.status_code}", - ) - - self._check_response( - dict(response.headers), - response.status_code, - ) + self._check_streaming_response(response) async for line in response.aiter_lines(): - self._logger.debug(f"Received line: {line}") - - if line.startswith("data: "): - line = line[6:].strip() - if line != "[DONE]": - try: - json_streamed_response = orjson.loads(line) - except JSONDecodeError: - raise MistralAPIException.from_response( - response, - message=f"Failed to decode json body: {json_streamed_response}", - ) - - yield json_streamed_response + json_streamed_response = self._process_line(line) + if json_streamed_response: + yield json_streamed_response else: - self._logger.debug("Non-Streaming Response") response = await self._client.request( method, url, @@ -116,23 +93,7 @@ async def _request( json=json, ) - self._logger.debug(f"Received response: {response}") - - if response.status_code in RETRY_STATUS_CODES: - raise MistralAPIStatusException.from_response(response) - - try: - json_response: Dict[str, Any] = response.json() - except JSONDecodeError: - raise MistralAPIException.from_response( - response, - message=f"Failed to decode json body: {response.text}", - ) - - self._check_response( - dict(response.headers), response.status_code, json_response - ) - yield json_response + yield self._check_response(response) except ConnectError as e: raise MistralConnectionException(str(e)) from e @@ -140,6 +101,11 @@ async def _request( raise MistralException( f"Unexpected exception ({e.__class__.__name__}): {e}" ) from e + except JSONDecodeError as e: + raise MistralAPIException.from_response( + response, + message=f"Failed to decode json body: {response.text}", + ) from e except MistralAPIStatusException as e: attempt += 1 if attempt > self._max_retries: diff --git a/src/mistralai/client.py b/src/mistralai/client.py index 5a950fc6..f2b06e17 100644 --- a/src/mistralai/client.py +++ b/src/mistralai/client.py @@ -4,11 +4,10 @@ from json import JSONDecodeError from typing import Any, Dict, Iterable, Iterator, List, Optional, Union -import orjson from httpx import Client, ConnectError, HTTPTransport, RequestError, Response from mistralai.client_base import ClientBase -from mistralai.constants import ENDPOINT, RETRY_STATUS_CODES +from mistralai.constants import ENDPOINT from mistralai.exceptions import ( MistralAPIException, MistralAPIStatusException, @@ -67,42 +66,20 @@ def _request( try: if stream: - self._logger.debug("Streaming Response") with self._client.stream( method, url, headers=headers, json=json, ) as response: - if response.status_code in RETRY_STATUS_CODES: - raise MistralAPIStatusException.from_response( - response, - message=f"Cannot stream response. Status: {response.status_code}", - ) - - self._check_response( - dict(response.headers), - response.status_code, - ) + self._check_streaming_response(response) for line in response.iter_lines(): - self._logger.debug(f"Received line: {line}") - - if line.startswith("data: "): - line = line[6:].strip() - if line != "[DONE]": - try: - json_streamed_response = orjson.loads(line) - except JSONDecodeError: - raise MistralAPIException.from_response( - response, - message=f"Failed to decode json body: {json_streamed_response}", - ) - - yield json_streamed_response + json_streamed_response = self._process_line(line) + if json_streamed_response: + yield json_streamed_response else: - self._logger.debug("Non-Streaming Response") response = self._client.request( method, url, @@ -110,23 +87,7 @@ def _request( json=json, ) - self._logger.debug(f"Received response: {response}") - - if response.status_code in RETRY_STATUS_CODES: - raise MistralAPIStatusException.from_response(response) - - try: - json_response: Dict[str, Any] = response.json() - except JSONDecodeError: - raise MistralAPIException.from_response( - response, - message=f"Failed to decode json body: {response.text}", - ) - - self._check_response( - dict(response.headers), response.status_code, json_response - ) - yield json_response + yield self._check_response(response) except ConnectError as e: raise MistralConnectionException(str(e)) from e @@ -134,6 +95,11 @@ def _request( raise MistralException( f"Unexpected exception ({e.__class__.__name__}): {e}" ) from e + except JSONDecodeError as e: + raise MistralAPIException.from_response( + response, + message=f"Failed to decode json body: {response.text}", + ) from e except MistralAPIStatusException as e: attempt += 1 if attempt > self._max_retries: diff --git a/src/mistralai/client_base.py b/src/mistralai/client_base.py index 1e8f80fd..569fb45d 100644 --- a/src/mistralai/client_base.py +++ b/src/mistralai/client_base.py @@ -1,12 +1,23 @@ import logging import os +import orjson +from httpx import Response from abc import ABC from typing import Any, Dict, List, Optional -from mistralai.exceptions import MistralAPIException, MistralException +from mistralai.constants import RETRY_STATUS_CODES +from mistralai.exceptions import ( + MistralAPIException, + MistralException, + MistralAPIStatusException, +) from mistralai.models.chat_completion import ChatMessage -logging.basicConfig(format="%(asctime)s %(levelname)s %(name)s: %(message)s", level=os.getenv("LOG_LEVEL", "ERROR")) +logging.basicConfig( + format="%(asctime)s %(levelname)s %(name)s: %(message)s", + level=os.getenv("LOG_LEVEL", "ERROR"), +) + class ClientBase(ABC): def __init__( @@ -54,25 +65,44 @@ def _make_chat_request( return request_data - def _check_response( - self, headers: Dict[str, Any], status: int, json_response: Optional[Dict[str, Any]] = None - ) -> None: - if json_response is not None: - if "object" not in json_response: - raise MistralException(message=f"Unexpected response: {json_response}") - if "error" == json_response["object"]: # has errors - raise MistralAPIException( - message=json_response["message"], - http_status=status, - headers=headers, - ) - if 400 <= status < 500: - raise MistralAPIException( - message=f"Unexpected client error (status {status}): {json_response}", - http_status=status, - headers=headers, + def _check_response_status_codes(self, response: Response) -> None: + if response.status_code in RETRY_STATUS_CODES: + raise MistralAPIStatusException.from_response( + response, + message=f"Cannot stream response. Status: {response.status_code}", + ) + elif 400 <= response.status_code < 500: + raise MistralAPIException.from_response( + response, + message=f"Cannot stream response. Status: {response.status_code}", ) - if status >= 500: + elif response.status_code >= 500: raise MistralException( - message=f"Unexpected server error (status {status}): {json_response}" + message=f"Unexpected server error (status {response.status_code})" ) + + def _check_streaming_response(self, response: Response) -> None: + self._check_response_status_codes(response) + + def _check_response(self, response: Response) -> Dict[str, Any]: + self._check_response_status_codes(response) + + json_response: Dict[str, Any] = response.json() + + if "object" not in json_response: + raise MistralException(message=f"Unexpected response: {json_response}") + if "error" == json_response["object"]: # has errors + raise MistralAPIException.from_response( + response, + message=json_response["message"], + ) + + return json_response + + def _process_line(self, line: str) -> Optional[Dict[str, Any]]: + if line.startswith("data: "): + line = line[6:].strip() + if line != "[DONE]": + json_streamed_response: Dict[str, Any] = orjson.loads(line) + return json_streamed_response + return None From 6463019f98b44609ce00512e91fb496a4c3b4720 Mon Sep 17 00:00:00 2001 From: Bam4d Date: Wed, 20 Dec 2023 19:21:48 +0000 Subject: [PATCH 014/223] fix lint --- src/mistralai/client_base.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/src/mistralai/client_base.py b/src/mistralai/client_base.py index 569fb45d..a338bb82 100644 --- a/src/mistralai/client_base.py +++ b/src/mistralai/client_base.py @@ -1,15 +1,16 @@ import logging import os -import orjson -from httpx import Response from abc import ABC from typing import Any, Dict, List, Optional +import orjson +from httpx import Response + from mistralai.constants import RETRY_STATUS_CODES from mistralai.exceptions import ( MistralAPIException, - MistralException, MistralAPIStatusException, + MistralException, ) from mistralai.models.chat_completion import ChatMessage From 9b5d2c5878ebf1c08f4b89492a5e21b30cdd7f9a Mon Sep 17 00:00:00 2001 From: Bam4d Date: Thu, 21 Dec 2023 10:43:45 +0000 Subject: [PATCH 015/223] add accepts headers --- src/mistralai/async_client.py | 1 + src/mistralai/client.py | 1 + 2 files changed, 2 insertions(+) diff --git a/src/mistralai/async_client.py b/src/mistralai/async_client.py index 938d9821..81285a42 100644 --- a/src/mistralai/async_client.py +++ b/src/mistralai/async_client.py @@ -60,6 +60,7 @@ async def _request( attempt: int = 1, ) -> AsyncGenerator[Dict[str, Any], None]: headers = { + "Accept": "application/json", "Authorization": f"Bearer {self._api_key}", "Content-Type": "application/json", } diff --git a/src/mistralai/client.py b/src/mistralai/client.py index f2b06e17..7ba96fa4 100644 --- a/src/mistralai/client.py +++ b/src/mistralai/client.py @@ -54,6 +54,7 @@ def _request( attempt: int = 1, ) -> Iterator[Dict[str, Any]]: headers = { + "Accept": "application/json", "Authorization": f"Bearer {self._api_key}", "Content-Type": "application/json", } From d52560c62776cf0c5a2ef0418e11264c31cabe81 Mon Sep 17 00:00:00 2001 From: Ram Rachum Date: Wed, 20 Dec 2023 23:09:42 +0200 Subject: [PATCH 016/223] Add shebangs to all example scripts --- examples/async_chat_no_streaming.py | 2 ++ examples/async_chat_with_streaming.py | 2 ++ examples/async_embeddings.py | 2 ++ examples/async_list_models.py | 2 ++ examples/chat_no_streaming.py | 2 ++ examples/chat_with_streaming.py | 2 ++ examples/chatbot_with_streaming.py | 0 examples/embeddings.py | 2 ++ examples/list_models.py | 2 ++ 9 files changed, 16 insertions(+) mode change 100644 => 100755 examples/async_chat_no_streaming.py mode change 100644 => 100755 examples/async_chat_with_streaming.py mode change 100644 => 100755 examples/async_embeddings.py mode change 100644 => 100755 examples/async_list_models.py mode change 100644 => 100755 examples/chat_no_streaming.py mode change 100644 => 100755 examples/chat_with_streaming.py mode change 100644 => 100755 examples/chatbot_with_streaming.py mode change 100644 => 100755 examples/embeddings.py mode change 100644 => 100755 examples/list_models.py diff --git a/examples/async_chat_no_streaming.py b/examples/async_chat_no_streaming.py old mode 100644 new mode 100755 index 9c8be870..0eef8c3d --- a/examples/async_chat_no_streaming.py +++ b/examples/async_chat_no_streaming.py @@ -1,3 +1,5 @@ +#!/usr/bin/env python + import asyncio import os diff --git a/examples/async_chat_with_streaming.py b/examples/async_chat_with_streaming.py old mode 100644 new mode 100755 index e239e039..1de9e745 --- a/examples/async_chat_with_streaming.py +++ b/examples/async_chat_with_streaming.py @@ -1,3 +1,5 @@ +#!/usr/bin/env python + import asyncio import os diff --git a/examples/async_embeddings.py b/examples/async_embeddings.py old mode 100644 new mode 100755 index 0abaa134..a7ecd475 --- a/examples/async_embeddings.py +++ b/examples/async_embeddings.py @@ -1,3 +1,5 @@ +#!/usr/bin/env python + import asyncio import os diff --git a/examples/async_list_models.py b/examples/async_list_models.py old mode 100644 new mode 100755 index da9fc449..b6de5d51 --- a/examples/async_list_models.py +++ b/examples/async_list_models.py @@ -1,3 +1,5 @@ +#!/usr/bin/env python + import asyncio import os diff --git a/examples/chat_no_streaming.py b/examples/chat_no_streaming.py old mode 100644 new mode 100755 index e385773e..797b58d0 --- a/examples/chat_no_streaming.py +++ b/examples/chat_no_streaming.py @@ -1,3 +1,5 @@ +#!/usr/bin/env python + import os from mistralai.client import MistralClient diff --git a/examples/chat_with_streaming.py b/examples/chat_with_streaming.py old mode 100644 new mode 100755 index 21a12c1e..bc818d3d --- a/examples/chat_with_streaming.py +++ b/examples/chat_with_streaming.py @@ -1,3 +1,5 @@ +#!/usr/bin/env python + import os from mistralai.client import MistralClient diff --git a/examples/chatbot_with_streaming.py b/examples/chatbot_with_streaming.py old mode 100644 new mode 100755 diff --git a/examples/embeddings.py b/examples/embeddings.py old mode 100644 new mode 100755 index 7d9dd2ec..ffde00ae --- a/examples/embeddings.py +++ b/examples/embeddings.py @@ -1,3 +1,5 @@ +#!/usr/bin/env python + import os from mistralai.client import MistralClient diff --git a/examples/list_models.py b/examples/list_models.py old mode 100644 new mode 100755 index 9b5325ab..b21dcd10 --- a/examples/list_models.py +++ b/examples/list_models.py @@ -1,3 +1,5 @@ +#!/usr/bin/env python + import os from mistralai.client import MistralClient From 2f731d7eba5a2184ff87f0c754a2a42bc4ff73e0 Mon Sep 17 00:00:00 2001 From: Bam4d Date: Thu, 21 Dec 2023 19:50:09 +0000 Subject: [PATCH 017/223] added tests for endoints --- .github/workflows/build_publish.yaml | 5 + examples/async_chat_no_streaming.py | 0 examples/async_chat_with_streaming.py | 6 +- poetry.lock | 90 ++++++++++- pyproject.toml | 2 + tests/__init__.py | 0 tests/test_chat.py | 85 ++++++++++ tests/test_chat_async.py | 87 +++++++++++ tests/test_embedder.py | 72 +++++++++ tests/test_embedder_async.py | 77 ++++++++++ tests/test_list_models.py | 40 +++++ tests/test_list_models_async.py | 41 +++++ tests/utils.py | 213 ++++++++++++++++++++++++++ 13 files changed, 715 insertions(+), 3 deletions(-) mode change 100644 => 100755 examples/async_chat_no_streaming.py create mode 100644 tests/__init__.py create mode 100644 tests/test_chat.py create mode 100644 tests/test_chat_async.py create mode 100644 tests/test_embedder.py create mode 100644 tests/test_embedder_async.py create mode 100644 tests/test_list_models.py create mode 100644 tests/test_list_models_async.py create mode 100644 tests/utils.py diff --git a/.github/workflows/build_publish.yaml b/.github/workflows/build_publish.yaml index d9113945..6a64a7e7 100644 --- a/.github/workflows/build_publish.yaml +++ b/.github/workflows/build_publish.yaml @@ -45,6 +45,11 @@ jobs: run: | poetry run mypy . + # Tests + - name: Run Tests + run: | + poetry run pytest . + publish: if: startsWith(github.ref, 'refs/tags') runs-on: ubuntu-latest diff --git a/examples/async_chat_no_streaming.py b/examples/async_chat_no_streaming.py old mode 100644 new mode 100755 diff --git a/examples/async_chat_with_streaming.py b/examples/async_chat_with_streaming.py index e239e039..50a82bc4 100644 --- a/examples/async_chat_with_streaming.py +++ b/examples/async_chat_with_streaming.py @@ -12,10 +12,12 @@ async def main(): client = MistralAsyncClient(api_key=api_key) print("Chat response:") - async for chunk in client.chat_stream( + response = client.chat_stream( model=model, messages=[ChatMessage(role="user", content="What is the best French cheese?")], - ): + ) + + async for chunk in response: if chunk.choices[0].delta.content is not None: print(chunk.choices[0].delta.content, end="") diff --git a/poetry.lock b/poetry.lock index 25607067..93296a94 100644 --- a/poetry.lock +++ b/poetry.lock @@ -47,6 +47,17 @@ files = [ {file = "certifi-2023.11.17.tar.gz", hash = "sha256:9b469f3a900bf28dc19b8cfbf8019bf47f7fdd1a65a1d4ffb98fc14166beb4d1"}, ] +[[package]] +name = "colorama" +version = "0.4.6" +description = "Cross-platform colored terminal text." +optional = false +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7" +files = [ + {file = "colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6"}, + {file = "colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44"}, +] + [[package]] name = "exceptiongroup" version = "1.2.0" @@ -128,6 +139,17 @@ files = [ {file = "idna-3.6.tar.gz", hash = "sha256:9ecdbbd083b06798ae1e86adcbfe8ab1479cf864e4ee30fe4e46a003d12491ca"}, ] +[[package]] +name = "iniconfig" +version = "2.0.0" +description = "brain-dead simple config-ini parsing" +optional = false +python-versions = ">=3.7" +files = [ + {file = "iniconfig-2.0.0-py3-none-any.whl", hash = "sha256:b6a85871a79d2e3b22d2d1b94ac2824226a63c6b741c88f7ae975f18b6778374"}, + {file = "iniconfig-2.0.0.tar.gz", hash = "sha256:2d91e135bf72d31a410b17c16da610a82cb55f6b0477d1a902134b24a455b8b3"}, +] + [[package]] name = "mypy" version = "1.7.1" @@ -245,6 +267,32 @@ files = [ {file = "orjson-3.9.10.tar.gz", hash = "sha256:9ebbdbd6a046c304b1845e96fbcc5559cd296b4dfd3ad2509e33c4d9ce07d6a1"}, ] +[[package]] +name = "packaging" +version = "23.2" +description = "Core utilities for Python packages" +optional = false +python-versions = ">=3.7" +files = [ + {file = "packaging-23.2-py3-none-any.whl", hash = "sha256:8c491190033a9af7e1d931d0b5dacc2ef47509b34dd0de67ed209b5203fc88c7"}, + {file = "packaging-23.2.tar.gz", hash = "sha256:048fb0e9405036518eaaf48a55953c750c11e1a1b68e0dd1a9d62ed0c092cfc5"}, +] + +[[package]] +name = "pluggy" +version = "1.3.0" +description = "plugin and hook calling mechanisms for python" +optional = false +python-versions = ">=3.8" +files = [ + {file = "pluggy-1.3.0-py3-none-any.whl", hash = "sha256:d89c696a773f8bd377d18e5ecda92b7a3793cbe66c87060a6fb58c7b6e1061f7"}, + {file = "pluggy-1.3.0.tar.gz", hash = "sha256:cf61ae8f126ac6f7c451172cf30e3e43d3ca77615509771b3a984a0730651e12"}, +] + +[package.extras] +dev = ["pre-commit", "tox"] +testing = ["pytest", "pytest-benchmark"] + [[package]] name = "pydantic" version = "2.5.2" @@ -381,6 +429,46 @@ files = [ [package.dependencies] typing-extensions = ">=4.6.0,<4.7.0 || >4.7.0" +[[package]] +name = "pytest" +version = "7.4.3" +description = "pytest: simple powerful testing with Python" +optional = false +python-versions = ">=3.7" +files = [ + {file = "pytest-7.4.3-py3-none-any.whl", hash = "sha256:0d009c083ea859a71b76adf7c1d502e4bc170b80a8ef002da5806527b9591fac"}, + {file = "pytest-7.4.3.tar.gz", hash = "sha256:d989d136982de4e3b29dabcc838ad581c64e8ed52c11fbe86ddebd9da0818cd5"}, +] + +[package.dependencies] +colorama = {version = "*", markers = "sys_platform == \"win32\""} +exceptiongroup = {version = ">=1.0.0rc8", markers = "python_version < \"3.11\""} +iniconfig = "*" +packaging = "*" +pluggy = ">=0.12,<2.0" +tomli = {version = ">=1.0.0", markers = "python_version < \"3.11\""} + +[package.extras] +testing = ["argcomplete", "attrs (>=19.2.0)", "hypothesis (>=3.56)", "mock", "nose", "pygments (>=2.7.2)", "requests", "setuptools", "xmlschema"] + +[[package]] +name = "pytest-asyncio" +version = "0.23.2" +description = "Pytest support for asyncio" +optional = false +python-versions = ">=3.8" +files = [ + {file = "pytest-asyncio-0.23.2.tar.gz", hash = "sha256:c16052382554c7b22d48782ab3438d5b10f8cf7a4bdcae7f0f67f097d95beecc"}, + {file = "pytest_asyncio-0.23.2-py3-none-any.whl", hash = "sha256:ea9021364e32d58f0be43b91c6233fb8d2224ccef2398d6837559e587682808f"}, +] + +[package.dependencies] +pytest = ">=7.0.0" + +[package.extras] +docs = ["sphinx (>=5.3)", "sphinx-rtd-theme (>=1.0)"] +testing = ["coverage (>=6.2)", "hypothesis (>=5.7.1)"] + [[package]] name = "ruff" version = "0.1.7" @@ -473,4 +561,4 @@ zstd = ["zstandard (>=0.18.0)"] [metadata] lock-version = "2.0" python-versions = "^3.8" -content-hash = "2a97469581d78e2e5cb8c3f9183855f9671598ca31e9e77100d93f5daa40e12f" +content-hash = "f3a7934d78683acd0133e3764488c41a52f22b8c596998ba89b3b58b5759473f" diff --git a/pyproject.toml b/pyproject.toml index 6215ed8f..43c1e23c 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -33,6 +33,8 @@ httpx = "^0.25.2" ruff = "^0.1.6" mypy = "^1.7.1" types-requests = "^2.31.0.10" +pytest = "^7.4.3" +pytest-asyncio = "^0.23.2" [build-system] requires = ["poetry-core"] diff --git a/tests/__init__.py b/tests/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/tests/test_chat.py b/tests/test_chat.py new file mode 100644 index 00000000..fd3e9714 --- /dev/null +++ b/tests/test_chat.py @@ -0,0 +1,85 @@ +import pytest +import unittest.mock as mock +from mistralai.client import MistralClient +from mistralai.models.chat_completion import ChatCompletionResponse, ChatMessage, ChatCompletionStreamResponse +from .utils import mock_response, mock_stream_response, mock_chat_response_payload, mock_chat_response_streaming_payload + +@pytest.fixture() +def client(): + client = MistralClient() + client._client = mock.MagicMock() + return client + + +class TestChat: + def test_chat(self, client): + client._client.request.return_value = mock_response( + 200, + mock_chat_response_payload(), + ) + + result = client.chat( + model="mistral-small", + messages=[ChatMessage(role="user", content="What is the best French cheese?")], + ) + + client._client.request.assert_called_once_with( + "post", + "https://api.mistral.ai/v1/chat/completions", + headers={ + "Accept": "application/json", + "Authorization": "Bearer None", + "Content-Type": "application/json", + }, + json={'model': 'mistral-small', 'messages': [{'role': 'user', 'content': 'What is the best French cheese?'}], 'safe_prompt': False, 'stream': False}, + ) + + + assert isinstance( + result, ChatCompletionResponse + ), "Should return an ChatCompletionResponse" + assert len(result.choices) == 1 + assert result.choices[0].index == 0 + assert result.object == "chat.completion" + + + def test_chat_streaming(self, client): + client._client.stream.return_value = mock_stream_response( + 200, + mock_chat_response_streaming_payload(), + ) + + result = client.chat_stream( + model="mistral-small", + messages=[ChatMessage(role="user", content="What is the best French cheese?")], + ) + + results = list(result) + + client._client.stream.assert_called_once_with( + "post", + "https://api.mistral.ai/v1/chat/completions", + headers={ + "Accept": "application/json", + "Authorization": "Bearer None", + "Content-Type": "application/json", + }, + json={'model': 'mistral-small', 'messages': [{'role': 'user', 'content': 'What is the best French cheese?'}], 'safe_prompt': False, 'stream': True}, + ) + + for i, result in enumerate(results): + if i == 0: + assert isinstance( + result, ChatCompletionStreamResponse + ), "Should return an ChatCompletionStreamResponse" + assert len(result.choices) == 1 + assert result.choices[0].index == 0 + assert result.choices[0].delta.role == "assistant" + else: + assert isinstance( + result, ChatCompletionStreamResponse + ), "Should return an ChatCompletionStreamResponse" + assert len(result.choices) == 1 + assert result.choices[0].index == i-1 + assert result.choices[0].delta.content == f"stream response {i-1}" + assert result.object == "chat.completion.chunk" diff --git a/tests/test_chat_async.py b/tests/test_chat_async.py new file mode 100644 index 00000000..c602fe0f --- /dev/null +++ b/tests/test_chat_async.py @@ -0,0 +1,87 @@ +import pytest +import unittest.mock as mock +from mistralai.async_client import MistralAsyncClient +from mistralai.models.chat_completion import ChatCompletionResponse, ChatMessage, ChatCompletionStreamResponse +from .utils import mock_response, mock_async_stream_response, mock_chat_response_payload, mock_chat_response_streaming_payload + +@pytest.fixture() +def client(): + client = MistralAsyncClient() + client._client = mock.AsyncMock() + client._client.stream = mock.Mock() + return client + + +class TestAsyncChat: + @pytest.mark.asyncio + async def test_chat(self, client): + client._client.request.return_value = mock_response( + 200, + mock_chat_response_payload(), + ) + + result = await client.chat( + model="mistral-small", + messages=[ChatMessage(role="user", content="What is the best French cheese?")], + ) + + client._client.request.assert_awaited_once_with( + "post", + "https://api.mistral.ai/v1/chat/completions", + headers={ + "Accept": "application/json", + "Authorization": "Bearer None", + "Content-Type": "application/json", + }, + json={'model': 'mistral-small', 'messages': [{'role': 'user', 'content': 'What is the best French cheese?'}], 'safe_prompt': False, 'stream': False}, + ) + + + assert isinstance( + result, ChatCompletionResponse + ), "Should return an ChatCompletionResponse" + assert len(result.choices) == 1 + assert result.choices[0].index == 0 + assert result.object == "chat.completion" + + @pytest.mark.asyncio + async def test_chat_streaming(self, client): + client._client.stream.return_value = mock_async_stream_response( + 200, + mock_chat_response_streaming_payload(), + ) + + result = client.chat_stream( + model="mistral-small", + messages=[ChatMessage(role="user", content="What is the best French cheese?")], + ) + + results = [r async for r in result] + + client._client.stream.assert_called_once_with( + "post", + "https://api.mistral.ai/v1/chat/completions", + headers={ + "Accept": "application/json", + "Authorization": "Bearer None", + "Content-Type": "application/json", + }, + json={'model': 'mistral-small', 'messages': [{'role': 'user', 'content': 'What is the best French cheese?'}], 'safe_prompt': False, 'stream': True}, + ) + + for i, result in enumerate(results): + if i == 0: + assert isinstance( + result, ChatCompletionStreamResponse + ), "Should return an ChatCompletionStreamResponse" + assert len(result.choices) == 1 + assert result.choices[0].index == 0 + assert result.choices[0].delta.role == "assistant" + else: + assert isinstance( + result, ChatCompletionStreamResponse + ), "Should return an ChatCompletionStreamResponse" + assert len(result.choices) == 1 + assert result.choices[0].index == i-1 + assert result.choices[0].delta.content == f"stream response {i-1}" + assert result.object == "chat.completion.chunk" diff --git a/tests/test_embedder.py b/tests/test_embedder.py new file mode 100644 index 00000000..963addd2 --- /dev/null +++ b/tests/test_embedder.py @@ -0,0 +1,72 @@ +import pytest +import unittest.mock as mock +from mistralai.client import MistralClient +from mistralai.models.embeddings import EmbeddingResponse +from .utils import mock_response, mock_embedding_response_payload + +@pytest.fixture() +def client(): + client = MistralClient() + client._client = mock.MagicMock() + return client + + +class TestEmbeddings: + def test_embeddings(self, client): + client._client.request.return_value = mock_response( + 200, + mock_embedding_response_payload(), + ) + + result = client.embeddings( + model="mistral-embed", + input="What is the best French cheese?", + ) + + client._client.request.assert_called_once_with( + "post", + "https://api.mistral.ai/v1/embeddings", + headers={ + "Accept": "application/json", + "Authorization": "Bearer None", + "Content-Type": "application/json", + }, + json={"model": "mistral-embed", "input": "What is the best French cheese?"}, + ) + + assert isinstance( + result, EmbeddingResponse + ), "Should return an EmbeddingResponse" + assert len(result.data) == 1 + assert result.data[0].index == 0 + assert result.object == "list" + + def test_embeddings_batch(self, client): + client._client.request.return_value = mock_response( + 200, + mock_embedding_response_payload(batch_size=10), + ) + + result = client.embeddings( + model="mistral-embed", + input=["What is the best French cheese?"] * 10, + ) + + client._client.request.assert_called_once_with( + "post", + "https://api.mistral.ai/v1/embeddings", + headers={ + "Accept": "application/json", + "Authorization": "Bearer None", + "Content-Type": "application/json", + }, + json={"model": "mistral-embed", "input": ["What is the best French cheese?"] * 10}, + ) + + assert isinstance( + result, EmbeddingResponse + ), "Should return an EmbeddingResponse" + assert len(result.data) == 10 + assert result.data[0].index == 0 + assert result.object == "list" + diff --git a/tests/test_embedder_async.py b/tests/test_embedder_async.py new file mode 100644 index 00000000..6be6b4ce --- /dev/null +++ b/tests/test_embedder_async.py @@ -0,0 +1,77 @@ +import pytest +import unittest.mock as mock +from mistralai.async_client import MistralAsyncClient +from mistralai.models.embeddings import EmbeddingResponse +from .utils import mock_response, mock_embedding_response_payload + + +@pytest.fixture() +def client(): + client = MistralAsyncClient() + client._client = mock.AsyncMock() + return client + + +class TestAsyncEmbeddings: + @pytest.mark.asyncio + async def test_embeddings(self, client): + client._client.request.return_value = mock_response( + 200, + mock_embedding_response_payload(), + ) + + result = await client.embeddings( + model="mistral-embed", + input="What is the best French cheese?", + ) + + client._client.request.assert_awaited_once_with( + "post", + "https://api.mistral.ai/v1/embeddings", + headers={ + "Accept": "application/json", + "Authorization": "Bearer None", + "Content-Type": "application/json", + }, + json={"model": "mistral-embed", "input": "What is the best French cheese?"}, + ) + + assert isinstance( + result, EmbeddingResponse + ), "Should return an EmbeddingResponse" + assert len(result.data) == 1 + assert result.data[0].index == 0 + assert result.object == "list" + + @pytest.mark.asyncio + async def test_embeddings_batch(self, client): + client._client.request.return_value = mock_response( + 200, + mock_embedding_response_payload(batch_size=10), + ) + + result = await client.embeddings( + model="mistral-embed", + input=["What is the best French cheese?"] * 10, + ) + + client._client.request.assert_awaited_once_with( + "post", + "https://api.mistral.ai/v1/embeddings", + headers={ + "Accept": "application/json", + "Authorization": "Bearer None", + "Content-Type": "application/json", + }, + json={ + "model": "mistral-embed", + "input": ["What is the best French cheese?"] * 10, + }, + ) + + assert isinstance( + result, EmbeddingResponse + ), "Should return an EmbeddingResponse" + assert len(result.data) == 10 + assert result.data[0].index == 0 + assert result.object == "list" diff --git a/tests/test_list_models.py b/tests/test_list_models.py new file mode 100644 index 00000000..0ae04100 --- /dev/null +++ b/tests/test_list_models.py @@ -0,0 +1,40 @@ +import unittest.mock as mock + +import pytest + +from mistralai.client import MistralClient +from mistralai.models.models import ModelList + +from .utils import mock_list_models_response_payload, mock_response + + +@pytest.fixture() +def client(): + client = MistralClient() + client._client = mock.MagicMock() + return client + + +class TestListModels: + def test_list_models(self, client): + client._client.request.return_value = mock_response( + 200, + mock_list_models_response_payload(), + ) + + result = client.list_models() + + client._client.request.assert_called_once_with( + "get", + "https://api.mistral.ai/v1/models", + headers={ + "Accept": "application/json", + "Authorization": "Bearer None", + "Content-Type": "application/json", + }, + json={} + ) + + assert isinstance(result, ModelList), "Should return an ModelList" + assert len(result.data) == 4 + assert result.object == "list" diff --git a/tests/test_list_models_async.py b/tests/test_list_models_async.py new file mode 100644 index 00000000..fb76e5bc --- /dev/null +++ b/tests/test_list_models_async.py @@ -0,0 +1,41 @@ +import unittest.mock as mock + +import pytest + +from mistralai.async_client import MistralAsyncClient +from mistralai.models.models import ModelList + +from .utils import mock_list_models_response_payload, mock_response + + +@pytest.fixture() +def client(): + client = MistralAsyncClient() + client._client = mock.AsyncMock() + return client + + +class TestAsyncListModels: + @pytest.mark.asyncio + async def test_list_models(self, client): + client._client.request.return_value = mock_response( + 200, + mock_list_models_response_payload(), + ) + + result = await client.list_models() + + client._client.request.assert_awaited_once_with( + "get", + "https://api.mistral.ai/v1/models", + headers={ + "Accept": "application/json", + "Authorization": "Bearer None", + "Content-Type": "application/json", + }, + json={} + ) + + assert isinstance(result, ModelList), "Should return an ModelList" + assert len(result.data) == 4 + assert result.object == "list" diff --git a/tests/utils.py b/tests/utils.py new file mode 100644 index 00000000..19bfed6b --- /dev/null +++ b/tests/utils.py @@ -0,0 +1,213 @@ +import orjson +import contextlib +from contextlib import _AsyncGeneratorContextManager +import unittest.mock as mock +from typing import List +from httpx import Response + + +@contextlib.contextmanager +def mock_stream_response(status_code: int, content: List[str]): + response = mock.Mock(Response) + response.status_code = status_code + response.iter_lines.return_value = iter(content) + yield response + + +@contextlib.asynccontextmanager +async def mock_async_stream_response(status_code: int, content: List[str]): + response = mock.Mock(Response) + response.status_code = status_code + + async def async_iter(content: List[str]): + for line in content: + yield line + + response.aiter_lines.return_value = async_iter(content) + yield response + + +def mock_response( + status_code: int, content: str, is_json: bool = True +) -> mock.MagicMock: + response = mock.Mock(Response) + response.status_code = status_code + if is_json: + response.json = mock.MagicMock() + response.json.return_value = orjson.loads(content) + response.text = content + return response + + +def mock_list_models_response_payload() -> str: + return orjson.dumps( + { + "object": "list", + "data": [ + { + "id": "mistral-medium", + "object": "model", + "created": 1703186988, + "owned_by": "mistralai", + "root": None, + "parent": None, + "permission": [ + { + "id": "modelperm-15bebaf316264adb84b891bf06a84933", + "object": "model_permission", + "created": 1703186988, + "allow_create_engine": False, + "allow_sampling": True, + "allow_logprobs": False, + "allow_search_indices": False, + "allow_view": True, + "allow_fine_tuning": False, + "organization": "*", + "group": None, + "is_blocking": False, + } + ], + }, + { + "id": "mistral-small", + "object": "model", + "created": 1703186988, + "owned_by": "mistralai", + "root": None, + "parent": None, + "permission": [ + { + "id": "modelperm-d0dced5c703242fa862f4ca3f241c00e", + "object": "model_permission", + "created": 1703186988, + "allow_create_engine": False, + "allow_sampling": True, + "allow_logprobs": False, + "allow_search_indices": False, + "allow_view": True, + "allow_fine_tuning": False, + "organization": "*", + "group": None, + "is_blocking": False, + } + ], + }, + { + "id": "mistral-tiny", + "object": "model", + "created": 1703186988, + "owned_by": "mistralai", + "root": None, + "parent": None, + "permission": [ + { + "id": "modelperm-0e64e727c3a94f17b29f8895d4be2910", + "object": "model_permission", + "created": 1703186988, + "allow_create_engine": False, + "allow_sampling": True, + "allow_logprobs": False, + "allow_search_indices": False, + "allow_view": True, + "allow_fine_tuning": False, + "organization": "*", + "group": None, + "is_blocking": False, + } + ], + }, + { + "id": "mistral-embed", + "object": "model", + "created": 1703186988, + "owned_by": "mistralai", + "root": None, + "parent": None, + "permission": [ + { + "id": "modelperm-ebdff9046f524e628059447b5932e3ad", + "object": "model_permission", + "created": 1703186988, + "allow_create_engine": False, + "allow_sampling": True, + "allow_logprobs": False, + "allow_search_indices": False, + "allow_view": True, + "allow_fine_tuning": False, + "organization": "*", + "group": None, + "is_blocking": False, + } + ], + }, + ], + } + ) + + +def mock_embedding_response_payload(batch_size: int = 1) -> str: + return orjson.dumps( + { + "id": "embd-98c8c60e3fbf4fc49658eddaf447357c", + "object": "list", + "data": [ + { + "object": "embedding", + "embedding": [-0.018585205078125, 0.027099609375, 0.02587890625], + "index": 0, + } + ] + * batch_size, + "model": "mistral-embed", + "usage": {"prompt_tokens": 90, "total_tokens": 90, "completion_tokens": 0}, + } + ).decode() + + +def mock_chat_response_payload(): + return orjson.dumps( + { + "id": "chat-98c8c60e3fbf4fc49658eddaf447357c", + "object": "chat.completion", + "created": 1703165682, + "choices": [ + { + "finish_reason": "stop", + "message": { + "role": "assistant", + "content": "What is the best French cheese?", + }, + "index": 0, + } + ], + "model": "mistral-small", + "usage": {"prompt_tokens": 90, "total_tokens": 90, "completion_tokens": 0}, + } + ).decode() + + +def mock_chat_response_streaming_payload(): + return [ + 'data: {"id": "cmpl-8cd9019d21ba490aa6b9740f5d0a883e", "model": "mistral-small", "choices": [{"index": 0, "delta": {"role": "assistant"}, "finish_reason": null}]}\n\n', + *[ + "data: " + + orjson.dumps( + { + "id": "cmpl-8cd9019d21ba490aa6b9740f5d0a883e", + "object": "chat.completion.chunk", + "created": 1703168544, + "model": "mistral-small", + "choices": [ + { + "index": i, + "delta": {"content": f"stream response {i}"}, + "finish_reason": None, + } + ], + } + ).decode() + + "\n\n" + for i in range(10) + ], + "data: [DONE]\n\n", + ] From 11dc643561e6fe67e0c5a47e6c315727c7ceded8 Mon Sep 17 00:00:00 2001 From: Bam4d Date: Thu, 21 Dec 2023 19:53:09 +0000 Subject: [PATCH 018/223] add test to job name --- .github/workflows/build_publish.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/build_publish.yaml b/.github/workflows/build_publish.yaml index 6a64a7e7..af968beb 100644 --- a/.github/workflows/build_publish.yaml +++ b/.github/workflows/build_publish.yaml @@ -14,7 +14,7 @@ on: pull_request: jobs: - lint: + lint_and_test: runs-on: ubuntu-latest steps: From ff5670e2ea0f419b42323069a31740c149a0619c Mon Sep 17 00:00:00 2001 From: Bam4d Date: Thu, 21 Dec 2023 19:57:38 +0000 Subject: [PATCH 019/223] fixing linting --- .github/workflows/build_publish.yaml | 2 +- tests/test_chat.py | 48 ++++++++++++++++++++++------ tests/test_chat_async.py | 47 +++++++++++++++++++++------ tests/test_embedder.py | 13 +++++--- tests/test_embedder_async.py | 6 ++-- tests/test_list_models.py | 3 +- tests/test_list_models_async.py | 3 +- tests/utils.py | 14 ++++++-- 8 files changed, 103 insertions(+), 33 deletions(-) diff --git a/.github/workflows/build_publish.yaml b/.github/workflows/build_publish.yaml index af968beb..61d11b9e 100644 --- a/.github/workflows/build_publish.yaml +++ b/.github/workflows/build_publish.yaml @@ -53,7 +53,7 @@ jobs: publish: if: startsWith(github.ref, 'refs/tags') runs-on: ubuntu-latest - needs: lint + needs: lint_and_test steps: # Checkout the repository diff --git a/tests/test_chat.py b/tests/test_chat.py index fd3e9714..ef3d8620 100644 --- a/tests/test_chat.py +++ b/tests/test_chat.py @@ -1,8 +1,20 @@ -import pytest import unittest.mock as mock + +import pytest from mistralai.client import MistralClient -from mistralai.models.chat_completion import ChatCompletionResponse, ChatMessage, ChatCompletionStreamResponse -from .utils import mock_response, mock_stream_response, mock_chat_response_payload, mock_chat_response_streaming_payload +from mistralai.models.chat_completion import ( + ChatCompletionResponse, + ChatCompletionStreamResponse, + ChatMessage, +) + +from .utils import ( + mock_chat_response_payload, + mock_chat_response_streaming_payload, + mock_response, + mock_stream_response, +) + @pytest.fixture() def client(): @@ -20,7 +32,9 @@ def test_chat(self, client): result = client.chat( model="mistral-small", - messages=[ChatMessage(role="user", content="What is the best French cheese?")], + messages=[ + ChatMessage(role="user", content="What is the best French cheese?") + ], ) client._client.request.assert_called_once_with( @@ -31,10 +45,16 @@ def test_chat(self, client): "Authorization": "Bearer None", "Content-Type": "application/json", }, - json={'model': 'mistral-small', 'messages': [{'role': 'user', 'content': 'What is the best French cheese?'}], 'safe_prompt': False, 'stream': False}, + json={ + "model": "mistral-small", + "messages": [ + {"role": "user", "content": "What is the best French cheese?"} + ], + "safe_prompt": False, + "stream": False, + }, ) - assert isinstance( result, ChatCompletionResponse ), "Should return an ChatCompletionResponse" @@ -42,7 +62,6 @@ def test_chat(self, client): assert result.choices[0].index == 0 assert result.object == "chat.completion" - def test_chat_streaming(self, client): client._client.stream.return_value = mock_stream_response( 200, @@ -51,7 +70,9 @@ def test_chat_streaming(self, client): result = client.chat_stream( model="mistral-small", - messages=[ChatMessage(role="user", content="What is the best French cheese?")], + messages=[ + ChatMessage(role="user", content="What is the best French cheese?") + ], ) results = list(result) @@ -64,7 +85,14 @@ def test_chat_streaming(self, client): "Authorization": "Bearer None", "Content-Type": "application/json", }, - json={'model': 'mistral-small', 'messages': [{'role': 'user', 'content': 'What is the best French cheese?'}], 'safe_prompt': False, 'stream': True}, + json={ + "model": "mistral-small", + "messages": [ + {"role": "user", "content": "What is the best French cheese?"} + ], + "safe_prompt": False, + "stream": True, + }, ) for i, result in enumerate(results): @@ -80,6 +108,6 @@ def test_chat_streaming(self, client): result, ChatCompletionStreamResponse ), "Should return an ChatCompletionStreamResponse" assert len(result.choices) == 1 - assert result.choices[0].index == i-1 + assert result.choices[0].index == i - 1 assert result.choices[0].delta.content == f"stream response {i-1}" assert result.object == "chat.completion.chunk" diff --git a/tests/test_chat_async.py b/tests/test_chat_async.py index c602fe0f..24c79db6 100644 --- a/tests/test_chat_async.py +++ b/tests/test_chat_async.py @@ -1,8 +1,20 @@ -import pytest import unittest.mock as mock + +import pytest from mistralai.async_client import MistralAsyncClient -from mistralai.models.chat_completion import ChatCompletionResponse, ChatMessage, ChatCompletionStreamResponse -from .utils import mock_response, mock_async_stream_response, mock_chat_response_payload, mock_chat_response_streaming_payload +from mistralai.models.chat_completion import ( + ChatCompletionResponse, + ChatCompletionStreamResponse, + ChatMessage, +) + +from .utils import ( + mock_async_stream_response, + mock_chat_response_payload, + mock_chat_response_streaming_payload, + mock_response, +) + @pytest.fixture() def client(): @@ -22,7 +34,9 @@ async def test_chat(self, client): result = await client.chat( model="mistral-small", - messages=[ChatMessage(role="user", content="What is the best French cheese?")], + messages=[ + ChatMessage(role="user", content="What is the best French cheese?") + ], ) client._client.request.assert_awaited_once_with( @@ -33,10 +47,16 @@ async def test_chat(self, client): "Authorization": "Bearer None", "Content-Type": "application/json", }, - json={'model': 'mistral-small', 'messages': [{'role': 'user', 'content': 'What is the best French cheese?'}], 'safe_prompt': False, 'stream': False}, + json={ + "model": "mistral-small", + "messages": [ + {"role": "user", "content": "What is the best French cheese?"} + ], + "safe_prompt": False, + "stream": False, + }, ) - assert isinstance( result, ChatCompletionResponse ), "Should return an ChatCompletionResponse" @@ -53,7 +73,9 @@ async def test_chat_streaming(self, client): result = client.chat_stream( model="mistral-small", - messages=[ChatMessage(role="user", content="What is the best French cheese?")], + messages=[ + ChatMessage(role="user", content="What is the best French cheese?") + ], ) results = [r async for r in result] @@ -66,7 +88,14 @@ async def test_chat_streaming(self, client): "Authorization": "Bearer None", "Content-Type": "application/json", }, - json={'model': 'mistral-small', 'messages': [{'role': 'user', 'content': 'What is the best French cheese?'}], 'safe_prompt': False, 'stream': True}, + json={ + "model": "mistral-small", + "messages": [ + {"role": "user", "content": "What is the best French cheese?"} + ], + "safe_prompt": False, + "stream": True, + }, ) for i, result in enumerate(results): @@ -82,6 +111,6 @@ async def test_chat_streaming(self, client): result, ChatCompletionStreamResponse ), "Should return an ChatCompletionStreamResponse" assert len(result.choices) == 1 - assert result.choices[0].index == i-1 + assert result.choices[0].index == i - 1 assert result.choices[0].delta.content == f"stream response {i-1}" assert result.object == "chat.completion.chunk" diff --git a/tests/test_embedder.py b/tests/test_embedder.py index 963addd2..3798e84c 100644 --- a/tests/test_embedder.py +++ b/tests/test_embedder.py @@ -1,8 +1,11 @@ -import pytest import unittest.mock as mock + +import pytest from mistralai.client import MistralClient from mistralai.models.embeddings import EmbeddingResponse -from .utils import mock_response, mock_embedding_response_payload + +from .utils import mock_embedding_response_payload, mock_response + @pytest.fixture() def client(): @@ -60,7 +63,10 @@ def test_embeddings_batch(self, client): "Authorization": "Bearer None", "Content-Type": "application/json", }, - json={"model": "mistral-embed", "input": ["What is the best French cheese?"] * 10}, + json={ + "model": "mistral-embed", + "input": ["What is the best French cheese?"] * 10, + }, ) assert isinstance( @@ -69,4 +75,3 @@ def test_embeddings_batch(self, client): assert len(result.data) == 10 assert result.data[0].index == 0 assert result.object == "list" - diff --git a/tests/test_embedder_async.py b/tests/test_embedder_async.py index 6be6b4ce..82261c7e 100644 --- a/tests/test_embedder_async.py +++ b/tests/test_embedder_async.py @@ -1,8 +1,10 @@ -import pytest import unittest.mock as mock + +import pytest from mistralai.async_client import MistralAsyncClient from mistralai.models.embeddings import EmbeddingResponse -from .utils import mock_response, mock_embedding_response_payload + +from .utils import mock_embedding_response_payload, mock_response @pytest.fixture() diff --git a/tests/test_list_models.py b/tests/test_list_models.py index 0ae04100..33875e36 100644 --- a/tests/test_list_models.py +++ b/tests/test_list_models.py @@ -1,7 +1,6 @@ import unittest.mock as mock import pytest - from mistralai.client import MistralClient from mistralai.models.models import ModelList @@ -32,7 +31,7 @@ def test_list_models(self, client): "Authorization": "Bearer None", "Content-Type": "application/json", }, - json={} + json={}, ) assert isinstance(result, ModelList), "Should return an ModelList" diff --git a/tests/test_list_models_async.py b/tests/test_list_models_async.py index fb76e5bc..1b989359 100644 --- a/tests/test_list_models_async.py +++ b/tests/test_list_models_async.py @@ -1,7 +1,6 @@ import unittest.mock as mock import pytest - from mistralai.async_client import MistralAsyncClient from mistralai.models.models import ModelList @@ -33,7 +32,7 @@ async def test_list_models(self, client): "Authorization": "Bearer None", "Content-Type": "application/json", }, - json={} + json={}, ) assert isinstance(result, ModelList), "Should return an ModelList" diff --git a/tests/utils.py b/tests/utils.py index 19bfed6b..50a1f415 100644 --- a/tests/utils.py +++ b/tests/utils.py @@ -1,8 +1,8 @@ -import orjson import contextlib -from contextlib import _AsyncGeneratorContextManager import unittest.mock as mock from typing import List + +import orjson from httpx import Response @@ -188,7 +188,15 @@ def mock_chat_response_payload(): def mock_chat_response_streaming_payload(): return [ - 'data: {"id": "cmpl-8cd9019d21ba490aa6b9740f5d0a883e", "model": "mistral-small", "choices": [{"index": 0, "delta": {"role": "assistant"}, "finish_reason": null}]}\n\n', + "data: " + + orjson.dumps( + { + "id": "cmpl-8cd9019d21ba490aa6b9740f5d0a883e", + "model": "mistral-small", + "choices": [{"index": 0, "delta": {"role": "assistant"}}], + } + ).decode() + + "\n\n", *[ "data: " + orjson.dumps( From 2375abafb7b68411ce6414d86e7250935b6b8dc7 Mon Sep 17 00:00:00 2001 From: Bam4d Date: Thu, 21 Dec 2023 20:01:07 +0000 Subject: [PATCH 020/223] fixing streaming test --- tests/utils.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/tests/utils.py b/tests/utils.py index 50a1f415..e5edef60 100644 --- a/tests/utils.py +++ b/tests/utils.py @@ -193,7 +193,13 @@ def mock_chat_response_streaming_payload(): { "id": "cmpl-8cd9019d21ba490aa6b9740f5d0a883e", "model": "mistral-small", - "choices": [{"index": 0, "delta": {"role": "assistant"}}], + "choices": [ + { + "index": 0, + "delta": {"role": "assistant"}, + "finish_reason": None, + } + ], } ).decode() + "\n\n", From 130b94d60d20b690ae3d296ccf83fc38c020c3c7 Mon Sep 17 00:00:00 2001 From: Bam4d Date: Mon, 15 Jan 2024 16:38:34 +0100 Subject: [PATCH 021/223] adding user-agent string to clients --- .github/workflows/build_publish.yaml | 1 + src/mistralai/async_client.py | 4 +++- src/mistralai/client_base.py | 3 +++ 3 files changed, 7 insertions(+), 1 deletion(-) diff --git a/.github/workflows/build_publish.yaml b/.github/workflows/build_publish.yaml index 61d11b9e..9680060e 100644 --- a/.github/workflows/build_publish.yaml +++ b/.github/workflows/build_publish.yaml @@ -77,6 +77,7 @@ jobs: - name: Build Package run: | poetry version ${{ github.ref_name }} + sed -i 's/self._version = '\''0.0.1'\''/self._version = '\''${{ github.ref_name }}'\''/g' src/mistralai/client_base.py poetry build # Publish to PyPi diff --git a/src/mistralai/async_client.py b/src/mistralai/async_client.py index 81285a42..3f1ffffb 100644 --- a/src/mistralai/async_client.py +++ b/src/mistralai/async_client.py @@ -59,8 +59,10 @@ async def _request( stream: bool = False, attempt: int = 1, ) -> AsyncGenerator[Dict[str, Any], None]: + accept_header = "text/event-stream" if stream else "application/json" headers = { - "Accept": "application/json", + "Accept": accept_header, + "User-Agent": f"mistral-client-python/{self._version}", "Authorization": f"Bearer {self._api_key}", "Content-Type": "application/json", } diff --git a/src/mistralai/client_base.py b/src/mistralai/client_base.py index a338bb82..b6d1c488 100644 --- a/src/mistralai/client_base.py +++ b/src/mistralai/client_base.py @@ -35,6 +35,9 @@ def __init__( self._api_key = api_key self._logger = logging.getLogger(__name__) + # This should be automatically updated by the deploy script + self._version = "0.0.1" + def _make_chat_request( self, model: str, From 13cc2aa7276d3448ac3eefd854f10f08c3b3de35 Mon Sep 17 00:00:00 2001 From: Bam4d Date: Mon, 15 Jan 2024 16:43:20 +0100 Subject: [PATCH 022/223] adding and testing user agent --- src/mistralai/client.py | 4 +++- tests/test_chat.py | 4 +++- tests/test_chat_async.py | 4 +++- tests/test_embedder.py | 2 ++ tests/test_embedder_async.py | 2 ++ tests/test_list_models.py | 1 + tests/test_list_models_async.py | 1 + 7 files changed, 15 insertions(+), 3 deletions(-) diff --git a/src/mistralai/client.py b/src/mistralai/client.py index 7ba96fa4..1f3837fa 100644 --- a/src/mistralai/client.py +++ b/src/mistralai/client.py @@ -53,8 +53,10 @@ def _request( stream: bool = False, attempt: int = 1, ) -> Iterator[Dict[str, Any]]: + accept_header = "text/event-stream" if stream else "application/json" headers = { - "Accept": "application/json", + "Accept": accept_header, + "User-Agent": f"mistral-client-python/{self._version}", "Authorization": f"Bearer {self._api_key}", "Content-Type": "application/json", } diff --git a/tests/test_chat.py b/tests/test_chat.py index ef3d8620..e64e68a4 100644 --- a/tests/test_chat.py +++ b/tests/test_chat.py @@ -41,6 +41,7 @@ def test_chat(self, client): "post", "https://api.mistral.ai/v1/chat/completions", headers={ + "User-Agent": f"mistral-client-python/{client._version}", "Accept": "application/json", "Authorization": "Bearer None", "Content-Type": "application/json", @@ -81,7 +82,8 @@ def test_chat_streaming(self, client): "post", "https://api.mistral.ai/v1/chat/completions", headers={ - "Accept": "application/json", + "User-Agent": f"mistral-client-python/{client._version}", + "Accept": "text/event-stream", "Authorization": "Bearer None", "Content-Type": "application/json", }, diff --git a/tests/test_chat_async.py b/tests/test_chat_async.py index 24c79db6..7e51a972 100644 --- a/tests/test_chat_async.py +++ b/tests/test_chat_async.py @@ -43,6 +43,7 @@ async def test_chat(self, client): "post", "https://api.mistral.ai/v1/chat/completions", headers={ + "User-Agent": f"mistral-client-python/{client._version}", "Accept": "application/json", "Authorization": "Bearer None", "Content-Type": "application/json", @@ -84,7 +85,8 @@ async def test_chat_streaming(self, client): "post", "https://api.mistral.ai/v1/chat/completions", headers={ - "Accept": "application/json", + "Accept": "text/event-stream", + "User-Agent": f"mistral-client-python/{client._version}", "Authorization": "Bearer None", "Content-Type": "application/json", }, diff --git a/tests/test_embedder.py b/tests/test_embedder.py index 3798e84c..59e30fa6 100644 --- a/tests/test_embedder.py +++ b/tests/test_embedder.py @@ -30,6 +30,7 @@ def test_embeddings(self, client): "post", "https://api.mistral.ai/v1/embeddings", headers={ + "User-Agent": f"mistral-client-python/{client._version}", "Accept": "application/json", "Authorization": "Bearer None", "Content-Type": "application/json", @@ -59,6 +60,7 @@ def test_embeddings_batch(self, client): "post", "https://api.mistral.ai/v1/embeddings", headers={ + "User-Agent": f"mistral-client-python/{client._version}", "Accept": "application/json", "Authorization": "Bearer None", "Content-Type": "application/json", diff --git a/tests/test_embedder_async.py b/tests/test_embedder_async.py index 82261c7e..8d08d6a6 100644 --- a/tests/test_embedder_async.py +++ b/tests/test_embedder_async.py @@ -31,6 +31,7 @@ async def test_embeddings(self, client): "post", "https://api.mistral.ai/v1/embeddings", headers={ + "User-Agent": f"mistral-client-python/{client._version}", "Accept": "application/json", "Authorization": "Bearer None", "Content-Type": "application/json", @@ -61,6 +62,7 @@ async def test_embeddings_batch(self, client): "post", "https://api.mistral.ai/v1/embeddings", headers={ + "User-Agent": f"mistral-client-python/{client._version}", "Accept": "application/json", "Authorization": "Bearer None", "Content-Type": "application/json", diff --git a/tests/test_list_models.py b/tests/test_list_models.py index 33875e36..1a048fac 100644 --- a/tests/test_list_models.py +++ b/tests/test_list_models.py @@ -27,6 +27,7 @@ def test_list_models(self, client): "get", "https://api.mistral.ai/v1/models", headers={ + "User-Agent": f"mistral-client-python/{client._version}", "Accept": "application/json", "Authorization": "Bearer None", "Content-Type": "application/json", diff --git a/tests/test_list_models_async.py b/tests/test_list_models_async.py index 1b989359..6d572d12 100644 --- a/tests/test_list_models_async.py +++ b/tests/test_list_models_async.py @@ -28,6 +28,7 @@ async def test_list_models(self, client): "get", "https://api.mistral.ai/v1/models", headers={ + "User-Agent": f"mistral-client-python/{client._version}", "Accept": "application/json", "Authorization": "Bearer None", "Content-Type": "application/json", From 3ee19fd726fb39c06b161e03a1919d44e877e083 Mon Sep 17 00:00:00 2001 From: Bam4d Date: Mon, 15 Jan 2024 16:58:17 +0100 Subject: [PATCH 023/223] adding safe_prompt to calls and deprecating safe_mode --- src/mistralai/async_client.py | 12 ++++++++---- src/mistralai/client.py | 12 ++++++++---- src/mistralai/client_base.py | 4 ++-- 3 files changed, 18 insertions(+), 10 deletions(-) diff --git a/src/mistralai/async_client.py b/src/mistralai/async_client.py index 3f1ffffb..710c9b49 100644 --- a/src/mistralai/async_client.py +++ b/src/mistralai/async_client.py @@ -133,6 +133,7 @@ async def chat( top_p: Optional[float] = None, random_seed: Optional[int] = None, safe_mode: bool = False, + safe_prompt: bool = False, ) -> ChatCompletionResponse: """A asynchronous chat endpoint that returns a single response. @@ -145,7 +146,8 @@ async def chat( top_p (Optional[float], optional): the cumulative probability of tokens to generate, e.g. 0.9. Defaults to None. random_seed (Optional[int], optional): the random seed to use for sampling, e.g. 42. Defaults to None. - safe_mode (bool, optional): whether to use safe mode, e.g. true. Defaults to False. + safe_mode (bool, optional): deprecated, use safe_prompt instead. Defaults to False. + safe_prompt (bool, optional): whether to use safe prompt, e.g. true. Defaults to False. Returns: ChatCompletionResponse: a response object containing the generated text. @@ -158,7 +160,7 @@ async def chat( top_p=top_p, random_seed=random_seed, stream=False, - safe_mode=safe_mode, + safe_prompt=safe_mode or safe_prompt, ) single_response = self._request("post", request, "v1/chat/completions") @@ -177,6 +179,7 @@ async def chat_stream( top_p: Optional[float] = None, random_seed: Optional[int] = None, safe_mode: bool = False, + safe_prompt: bool = False, ) -> AsyncGenerator[ChatCompletionStreamResponse, None]: """An Asynchronous chat endpoint that streams responses. @@ -189,7 +192,8 @@ async def chat_stream( top_p (Optional[float], optional): the cumulative probability of tokens to generate, e.g. 0.9. Defaults to None. random_seed (Optional[int], optional): the random seed to use for sampling, e.g. 42. Defaults to None. - safe_mode (bool, optional): whether to use safe mode, e.g. true. Defaults to False. + safe_mode (bool, optional): deprecated, use safe_prompt instead. Defaults to False. + safe_prompt (bool, optional): whether to use safe prompt, e.g. true. Defaults to False. Returns: AsyncGenerator[ChatCompletionStreamResponse, None]: @@ -204,7 +208,7 @@ async def chat_stream( top_p=top_p, random_seed=random_seed, stream=True, - safe_mode=safe_mode, + safe_prompt=safe_mode or safe_prompt, ) async_response = self._request( "post", request, "v1/chat/completions", stream=True diff --git a/src/mistralai/client.py b/src/mistralai/client.py index 1f3837fa..365079d9 100644 --- a/src/mistralai/client.py +++ b/src/mistralai/client.py @@ -125,6 +125,7 @@ def chat( top_p: Optional[float] = None, random_seed: Optional[int] = None, safe_mode: bool = False, + safe_prompt: bool = False, ) -> ChatCompletionResponse: """A chat endpoint that returns a single response. @@ -137,7 +138,8 @@ def chat( top_p (Optional[float], optional): the cumulative probability of tokens to generate, e.g. 0.9. Defaults to None. random_seed (Optional[int], optional): the random seed to use for sampling, e.g. 42. Defaults to None. - safe_mode (bool, optional): whether to use safe mode, e.g. true. Defaults to False. + safe_mode (bool, optional): deprecated, use safe_prompt instead. Defaults to False. + safe_prompt (bool, optional): whether to use safe prompt, e.g. true. Defaults to False. Returns: ChatCompletionResponse: a response object containing the generated text. @@ -150,7 +152,7 @@ def chat( top_p=top_p, random_seed=random_seed, stream=False, - safe_mode=safe_mode, + safe_prompt=safe_mode or safe_prompt, ) single_response = self._request("post", request, "v1/chat/completions") @@ -169,6 +171,7 @@ def chat_stream( top_p: Optional[float] = None, random_seed: Optional[int] = None, safe_mode: bool = False, + safe_prompt: bool = False, ) -> Iterable[ChatCompletionStreamResponse]: """A chat endpoint that streams responses. @@ -181,7 +184,8 @@ def chat_stream( top_p (Optional[float], optional): the cumulative probability of tokens to generate, e.g. 0.9. Defaults to None. random_seed (Optional[int], optional): the random seed to use for sampling, e.g. 42. Defaults to None. - safe_mode (bool, optional): whether to use safe mode, e.g. true. Defaults to False. + safe_mode (bool, optional): deprecated, use safe_prompt instead. Defaults to False. + safe_prompt (bool, optional): whether to use safe prompt, e.g. true. Defaults to False. Returns: Iterable[ChatCompletionStreamResponse]: @@ -195,7 +199,7 @@ def chat_stream( top_p=top_p, random_seed=random_seed, stream=True, - safe_mode=safe_mode, + safe_prompt=safe_mode or safe_prompt, ) response = self._request("post", request, "v1/chat/completions", stream=True) diff --git a/src/mistralai/client_base.py b/src/mistralai/client_base.py index b6d1c488..f6ab4354 100644 --- a/src/mistralai/client_base.py +++ b/src/mistralai/client_base.py @@ -47,12 +47,12 @@ def _make_chat_request( top_p: Optional[float] = None, random_seed: Optional[int] = None, stream: Optional[bool] = None, - safe_mode: Optional[bool] = False, + safe_prompt: Optional[bool] = False, ) -> Dict[str, Any]: request_data: Dict[str, Any] = { "model": model, "messages": [msg.model_dump() for msg in messages], - "safe_prompt": safe_mode, + "safe_prompt": safe_prompt, } if temperature is not None: request_data["temperature"] = temperature From d83be815fe45a902d99e37242f1a575f63e9aeb6 Mon Sep 17 00:00:00 2001 From: Bam4d Date: Mon, 15 Jan 2024 17:24:07 +0100 Subject: [PATCH 024/223] fixing sed script for changing version number on deploy --- .github/workflows/build_publish.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/build_publish.yaml b/.github/workflows/build_publish.yaml index 9680060e..ff5a3af2 100644 --- a/.github/workflows/build_publish.yaml +++ b/.github/workflows/build_publish.yaml @@ -77,7 +77,7 @@ jobs: - name: Build Package run: | poetry version ${{ github.ref_name }} - sed -i 's/self._version = '\''0.0.1'\''/self._version = '\''${{ github.ref_name }}'\''/g' src/mistralai/client_base.py + sed -i 's/self._version = "0.0.1"/self._version = "${{ github.ref_name }}"/g' src/mistralai/client_base.py poetry build # Publish to PyPi From 6dd94dd66de8779b4658747fab7ce3bec16a0686 Mon Sep 17 00:00:00 2001 From: Bam4d Date: Mon, 29 Jan 2024 16:31:50 +0000 Subject: [PATCH 025/223] handle errors slightly better --- src/mistralai/client_base.py | 6 +++--- src/mistralai/models/chat_completion.py | 1 + 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/src/mistralai/client_base.py b/src/mistralai/client_base.py index f6ab4354..328f1c4a 100644 --- a/src/mistralai/client_base.py +++ b/src/mistralai/client_base.py @@ -73,16 +73,16 @@ def _check_response_status_codes(self, response: Response) -> None: if response.status_code in RETRY_STATUS_CODES: raise MistralAPIStatusException.from_response( response, - message=f"Cannot stream response. Status: {response.status_code}", + message=f"Status: {response.status_code}. Message: {response.text}", ) elif 400 <= response.status_code < 500: raise MistralAPIException.from_response( response, - message=f"Cannot stream response. Status: {response.status_code}", + message=f"Status: {response.status_code}. Message: {response.text}", ) elif response.status_code >= 500: raise MistralException( - message=f"Unexpected server error (status {response.status_code})" + message=f"Status: {response.status_code}. Message: {response.text}", ) def _check_streaming_response(self, response: Response) -> None: diff --git a/src/mistralai/models/chat_completion.py b/src/mistralai/models/chat_completion.py index ef1b09e6..f1da78e7 100644 --- a/src/mistralai/models/chat_completion.py +++ b/src/mistralai/models/chat_completion.py @@ -19,6 +19,7 @@ class DeltaMessage(BaseModel): class FinishReason(Enum): stop = "stop" length = "length" + error = "error" class ChatCompletionResponseStreamChoice(BaseModel): From 82aaaa0662f730f4e85e7309cb7a86bb8fb027ad Mon Sep 17 00:00:00 2001 From: Anjor Kanekar Date: Tue, 6 Feb 2024 09:32:18 +0000 Subject: [PATCH 026/223] Make FinishReason json serializable In downstream code when someone does .model_dump() on any of the chat completion objects it fails because the enum is not serializable. https://stackoverflow.com/questions/69541613/how-to-json-serialize-enum-classes-in-pydantic-basemodel --- src/mistralai/models/chat_completion.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/mistralai/models/chat_completion.py b/src/mistralai/models/chat_completion.py index f1da78e7..d0151924 100644 --- a/src/mistralai/models/chat_completion.py +++ b/src/mistralai/models/chat_completion.py @@ -16,10 +16,10 @@ class DeltaMessage(BaseModel): content: Optional[str] = None -class FinishReason(Enum): - stop = "stop" - length = "length" - error = "error" +class FinishReason(str, Enum): + stop: str = "stop" + length: str = "length" + error: str = "error" class ChatCompletionResponseStreamChoice(BaseModel): From 76dfdc1a58ad0272e279965549efe22ae1861632 Mon Sep 17 00:00:00 2001 From: GitHub Actions Date: Mon, 26 Feb 2024 14:40:23 +0000 Subject: [PATCH 027/223] Bump version file --- version.txt | 1 + 1 file changed, 1 insertion(+) create mode 100644 version.txt diff --git a/version.txt b/version.txt new file mode 100644 index 00000000..d917d3e2 --- /dev/null +++ b/version.txt @@ -0,0 +1 @@ +0.1.2 From 8c03f4089a863edcf04364204a12eb2ba1e02f10 Mon Sep 17 00:00:00 2001 From: GitHub Actions Date: Mon, 26 Feb 2024 14:40:23 +0000 Subject: [PATCH 028/223] Update version to 0.1.2 --- .gitignore | 6 +- examples/function_calling.py | 101 +++++++++++ examples/json_format.py | 25 +++ poetry.lock | 221 +++++++++++++++++++++++- pyproject.toml | 4 +- src/mistralai/async_client.py | 52 +++--- src/mistralai/client.py | 46 +++-- src/mistralai/client_base.py | 68 +++++++- src/mistralai/models/chat_completion.py | 49 +++++- version.txt | 1 - 10 files changed, 511 insertions(+), 62 deletions(-) create mode 100644 examples/function_calling.py create mode 100755 examples/json_format.py delete mode 100644 version.txt diff --git a/.gitignore b/.gitignore index 6769e21d..1e27b2b3 100644 --- a/.gitignore +++ b/.gitignore @@ -157,4 +157,8 @@ cython_debug/ # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore # and can be added to the global gitignore or merged into this file. For a more nuclear # option (not recommended) you can uncomment the following to ignore the entire idea folder. -#.idea/ \ No newline at end of file +#.idea/ + +scratch/ + +changes.diff \ No newline at end of file diff --git a/examples/function_calling.py b/examples/function_calling.py new file mode 100644 index 00000000..99813f72 --- /dev/null +++ b/examples/function_calling.py @@ -0,0 +1,101 @@ +import functools +import json +import os + +import pandas as pd +from mistralai.client import MistralClient +from mistralai.models.chat_completion import ChatMessage, Function + +# Assuming we have the following data +data = { + "transaction_id": ["T1001", "T1002", "T1003", "T1004", "T1005"], + "customer_id": ["C001", "C002", "C003", "C002", "C001"], + "payment_amount": [125.50, 89.99, 120.00, 54.30, 210.20], + "payment_date": ["2021-10-05", "2021-10-06", "2021-10-07", "2021-10-05", "2021-10-08"], + "payment_status": ["Paid", "Unpaid", "Paid", "Paid", "Pending"], +} + +# Create DataFrame +df = pd.DataFrame(data) + + +def retrieve_payment_status(df: pd.DataFrame, transaction_id: str) -> str: + if transaction_id in df.transaction_id.values: + return json.dumps({"status": df[df.transaction_id == transaction_id].payment_status.item()}) + else: + return json.dumps({"status": "error - transaction id not found."}) + + +def retrieve_payment_date(df: pd.DataFrame, transaction_id: str) -> str: + if transaction_id in df.transaction_id.values: + return json.dumps({"date": df[df.transaction_id == transaction_id].payment_date.item()}) + else: + return json.dumps({"status": "error - transaction id not found."}) + + +names_to_functions = { + "retrieve_payment_status": functools.partial(retrieve_payment_status, df=df), + "retrieve_payment_date": functools.partial(retrieve_payment_date, df=df), +} + + +tools = [ + { + "type": "function", + "function": Function( + name="retrieve_payment_status", + description="Get payment status of a transaction id", + parameters={ + "type": "object", + "required": ["transaction_id"], + "properties": {"transaction_id": {"type": "string", "description": "The transaction id."}}, + }, + ), + }, + { + "type": "function", + "function": Function( + name="retrieve_payment_date", + description="Get payment date of a transaction id", + parameters={ + "type": "object", + "required": ["transaction_id"], + "properties": {"transaction_id": {"type": "string", "description": "The transaction id."}}, + }, + ), + }, +] + + +api_key = os.environ["MISTRAL_API_KEY"] +model = "mistral-large-latest" + +client = MistralClient(api_key=api_key) + +messages = [ChatMessage(role="user", content="What's the status of my transaction?")] + +response = client.chat(model=model, messages=messages, tools=tools) + +print(response.choices[0].message.content) + +messages.append(ChatMessage(role="assistant", content=response.choices[0].message.content)) +messages.append(ChatMessage(role="user", content="My transaction ID is T1001.")) + +response = client.chat( + model=model, messages=messages, tools=tools +) + +tool_call = response.choices[0].message.tool_calls[0] +function_name = tool_call.function.name +function_params = json.loads(tool_call.function.arguments) + +print(f"calling function_name: {function_name}, with function_params: {function_params}") + +function_result = names_to_functions[function_name](**function_params) + +messages.append(response.choices[0].message) +messages.append(ChatMessage(role="tool", name=function_name, content=function_result)) + +response = client.chat(model=model, messages=messages, tools=tools) + +print(f"{response.choices[0].message.content}") diff --git a/examples/json_format.py b/examples/json_format.py new file mode 100755 index 00000000..62fd6749 --- /dev/null +++ b/examples/json_format.py @@ -0,0 +1,25 @@ +#!/usr/bin/env python + +import os + +from mistralai.client import MistralClient +from mistralai.models.chat_completion import ChatMessage + + +def main(): + api_key = os.environ["MISTRAL_API_KEY"] + model = "mistral-large-latest" + + client = MistralClient(api_key=api_key) + + chat_response = client.chat( + model=model, + response_format={"type": "json_object"}, + messages=[ChatMessage(role="user", content="What is the best French cheese?")], + + ) + print(chat_response.choices[0].message.content) + + +if __name__ == "__main__": + main() diff --git a/poetry.lock b/poetry.lock index 93296a94..604edb75 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1,4 +1,4 @@ -# This file is automatically @generated by Poetry 1.5.1 and should not be changed by hand. +# This file is automatically @generated by Poetry 1.6.1 and should not be changed by hand. [[package]] name = "annotated-types" @@ -11,9 +11,6 @@ files = [ {file = "annotated_types-0.6.0.tar.gz", hash = "sha256:563339e807e53ffd9c267e99fc6d9ea23eb8443c08f112651963e24e22f84a5d"}, ] -[package.dependencies] -typing-extensions = {version = ">=4.0.0", markers = "python_version < \"3.9\""} - [[package]] name = "anyio" version = "4.2.0" @@ -208,6 +205,51 @@ files = [ {file = "mypy_extensions-1.0.0.tar.gz", hash = "sha256:75dbf8955dc00442a438fc4d0666508a9a97b6bd41aa2f0ffe9d2f2725af0782"}, ] +[[package]] +name = "numpy" +version = "1.26.4" +description = "Fundamental package for array computing in Python" +optional = false +python-versions = ">=3.9" +files = [ + {file = "numpy-1.26.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:9ff0f4f29c51e2803569d7a51c2304de5554655a60c5d776e35b4a41413830d0"}, + {file = "numpy-1.26.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:2e4ee3380d6de9c9ec04745830fd9e2eccb3e6cf790d39d7b98ffd19b0dd754a"}, + {file = "numpy-1.26.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d209d8969599b27ad20994c8e41936ee0964e6da07478d6c35016bc386b66ad4"}, + {file = "numpy-1.26.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ffa75af20b44f8dba823498024771d5ac50620e6915abac414251bd971b4529f"}, + {file = "numpy-1.26.4-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:62b8e4b1e28009ef2846b4c7852046736bab361f7aeadeb6a5b89ebec3c7055a"}, + {file = "numpy-1.26.4-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:a4abb4f9001ad2858e7ac189089c42178fcce737e4169dc61321660f1a96c7d2"}, + {file = "numpy-1.26.4-cp310-cp310-win32.whl", hash = "sha256:bfe25acf8b437eb2a8b2d49d443800a5f18508cd811fea3181723922a8a82b07"}, + {file = "numpy-1.26.4-cp310-cp310-win_amd64.whl", hash = "sha256:b97fe8060236edf3662adfc2c633f56a08ae30560c56310562cb4f95500022d5"}, + {file = "numpy-1.26.4-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:4c66707fabe114439db9068ee468c26bbdf909cac0fb58686a42a24de1760c71"}, + {file = "numpy-1.26.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:edd8b5fe47dab091176d21bb6de568acdd906d1887a4584a15a9a96a1dca06ef"}, + {file = "numpy-1.26.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7ab55401287bfec946ced39700c053796e7cc0e3acbef09993a9ad2adba6ca6e"}, + {file = "numpy-1.26.4-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:666dbfb6ec68962c033a450943ded891bed2d54e6755e35e5835d63f4f6931d5"}, + {file = "numpy-1.26.4-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:96ff0b2ad353d8f990b63294c8986f1ec3cb19d749234014f4e7eb0112ceba5a"}, + {file = "numpy-1.26.4-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:60dedbb91afcbfdc9bc0b1f3f402804070deed7392c23eb7a7f07fa857868e8a"}, + {file = "numpy-1.26.4-cp311-cp311-win32.whl", hash = "sha256:1af303d6b2210eb850fcf03064d364652b7120803a0b872f5211f5234b399f20"}, + {file = "numpy-1.26.4-cp311-cp311-win_amd64.whl", hash = "sha256:cd25bcecc4974d09257ffcd1f098ee778f7834c3ad767fe5db785be9a4aa9cb2"}, + {file = "numpy-1.26.4-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:b3ce300f3644fb06443ee2222c2201dd3a89ea6040541412b8fa189341847218"}, + {file = "numpy-1.26.4-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:03a8c78d01d9781b28a6989f6fa1bb2c4f2d51201cf99d3dd875df6fbd96b23b"}, + {file = "numpy-1.26.4-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9fad7dcb1aac3c7f0584a5a8133e3a43eeb2fe127f47e3632d43d677c66c102b"}, + {file = "numpy-1.26.4-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:675d61ffbfa78604709862923189bad94014bef562cc35cf61d3a07bba02a7ed"}, + {file = "numpy-1.26.4-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:ab47dbe5cc8210f55aa58e4805fe224dac469cde56b9f731a4c098b91917159a"}, + {file = "numpy-1.26.4-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:1dda2e7b4ec9dd512f84935c5f126c8bd8b9f2fc001e9f54af255e8c5f16b0e0"}, + {file = "numpy-1.26.4-cp312-cp312-win32.whl", hash = "sha256:50193e430acfc1346175fcbdaa28ffec49947a06918b7b92130744e81e640110"}, + {file = "numpy-1.26.4-cp312-cp312-win_amd64.whl", hash = "sha256:08beddf13648eb95f8d867350f6a018a4be2e5ad54c8d8caed89ebca558b2818"}, + {file = "numpy-1.26.4-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:7349ab0fa0c429c82442a27a9673fc802ffdb7c7775fad780226cb234965e53c"}, + {file = "numpy-1.26.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:52b8b60467cd7dd1e9ed082188b4e6bb35aa5cdd01777621a1658910745b90be"}, + {file = "numpy-1.26.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d5241e0a80d808d70546c697135da2c613f30e28251ff8307eb72ba696945764"}, + {file = "numpy-1.26.4-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f870204a840a60da0b12273ef34f7051e98c3b5961b61b0c2c1be6dfd64fbcd3"}, + {file = "numpy-1.26.4-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:679b0076f67ecc0138fd2ede3a8fd196dddc2ad3254069bcb9faf9a79b1cebcd"}, + {file = "numpy-1.26.4-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:47711010ad8555514b434df65f7d7b076bb8261df1ca9bb78f53d3b2db02e95c"}, + {file = "numpy-1.26.4-cp39-cp39-win32.whl", hash = "sha256:a354325ee03388678242a4d7ebcd08b5c727033fcff3b2f536aea978e15ee9e6"}, + {file = "numpy-1.26.4-cp39-cp39-win_amd64.whl", hash = "sha256:3373d5d70a5fe74a2c1bb6d2cfd9609ecf686d47a2d7b1d37a8f3b6bf6003aea"}, + {file = "numpy-1.26.4-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:afedb719a9dcfc7eaf2287b839d8198e06dcd4cb5d276a3df279231138e83d30"}, + {file = "numpy-1.26.4-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:95a7476c59002f2f6c590b9b7b998306fba6a5aa646b1e22ddfeaf8f78c3a29c"}, + {file = "numpy-1.26.4-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:7e50d0a0cc3189f9cb0aeb3a6a6af18c16f59f004b866cd2be1c14b36134a4a0"}, + {file = "numpy-1.26.4.tar.gz", hash = "sha256:2a02aba9ed12e4ac4eb3ea9421c420301a0c6460d9830d74a9df87efa4912010"}, +] + [[package]] name = "orjson" version = "3.9.10" @@ -278,6 +320,78 @@ files = [ {file = "packaging-23.2.tar.gz", hash = "sha256:048fb0e9405036518eaaf48a55953c750c11e1a1b68e0dd1a9d62ed0c092cfc5"}, ] +[[package]] +name = "pandas" +version = "2.2.0" +description = "Powerful data structures for data analysis, time series, and statistics" +optional = false +python-versions = ">=3.9" +files = [ + {file = "pandas-2.2.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:8108ee1712bb4fa2c16981fba7e68b3f6ea330277f5ca34fa8d557e986a11670"}, + {file = "pandas-2.2.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:736da9ad4033aeab51d067fc3bd69a0ba36f5a60f66a527b3d72e2030e63280a"}, + {file = "pandas-2.2.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:38e0b4fc3ddceb56ec8a287313bc22abe17ab0eb184069f08fc6a9352a769b18"}, + {file = "pandas-2.2.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:20404d2adefe92aed3b38da41d0847a143a09be982a31b85bc7dd565bdba0f4e"}, + {file = "pandas-2.2.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:7ea3ee3f125032bfcade3a4cf85131ed064b4f8dd23e5ce6fa16473e48ebcaf5"}, + {file = "pandas-2.2.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:f9670b3ac00a387620489dfc1bca66db47a787f4e55911f1293063a78b108df1"}, + {file = "pandas-2.2.0-cp310-cp310-win_amd64.whl", hash = "sha256:5a946f210383c7e6d16312d30b238fd508d80d927014f3b33fb5b15c2f895430"}, + {file = "pandas-2.2.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:a1b438fa26b208005c997e78672f1aa8138f67002e833312e6230f3e57fa87d5"}, + {file = "pandas-2.2.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:8ce2fbc8d9bf303ce54a476116165220a1fedf15985b09656b4b4275300e920b"}, + {file = "pandas-2.2.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2707514a7bec41a4ab81f2ccce8b382961a29fbe9492eab1305bb075b2b1ff4f"}, + {file = "pandas-2.2.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:85793cbdc2d5bc32620dc8ffa715423f0c680dacacf55056ba13454a5be5de88"}, + {file = "pandas-2.2.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:cfd6c2491dc821b10c716ad6776e7ab311f7df5d16038d0b7458bc0b67dc10f3"}, + {file = "pandas-2.2.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:a146b9dcacc3123aa2b399df1a284de5f46287a4ab4fbfc237eac98a92ebcb71"}, + {file = "pandas-2.2.0-cp311-cp311-win_amd64.whl", hash = "sha256:fbc1b53c0e1fdf16388c33c3cca160f798d38aea2978004dd3f4d3dec56454c9"}, + {file = "pandas-2.2.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:a41d06f308a024981dcaa6c41f2f2be46a6b186b902c94c2674e8cb5c42985bc"}, + {file = "pandas-2.2.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:159205c99d7a5ce89ecfc37cb08ed179de7783737cea403b295b5eda8e9c56d1"}, + {file = "pandas-2.2.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:eb1e1f3861ea9132b32f2133788f3b14911b68102d562715d71bd0013bc45440"}, + {file = "pandas-2.2.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:761cb99b42a69005dec2b08854fb1d4888fdf7b05db23a8c5a099e4b886a2106"}, + {file = "pandas-2.2.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:a20628faaf444da122b2a64b1e5360cde100ee6283ae8effa0d8745153809a2e"}, + {file = "pandas-2.2.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:f5be5d03ea2073627e7111f61b9f1f0d9625dc3c4d8dda72cc827b0c58a1d042"}, + {file = "pandas-2.2.0-cp312-cp312-win_amd64.whl", hash = "sha256:a626795722d893ed6aacb64d2401d017ddc8a2341b49e0384ab9bf7112bdec30"}, + {file = "pandas-2.2.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:9f66419d4a41132eb7e9a73dcec9486cf5019f52d90dd35547af11bc58f8637d"}, + {file = "pandas-2.2.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:57abcaeda83fb80d447f28ab0cc7b32b13978f6f733875ebd1ed14f8fbc0f4ab"}, + {file = "pandas-2.2.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e60f1f7dba3c2d5ca159e18c46a34e7ca7247a73b5dd1a22b6d59707ed6b899a"}, + {file = "pandas-2.2.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:eb61dc8567b798b969bcc1fc964788f5a68214d333cade8319c7ab33e2b5d88a"}, + {file = "pandas-2.2.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:52826b5f4ed658fa2b729264d63f6732b8b29949c7fd234510d57c61dbeadfcd"}, + {file = "pandas-2.2.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:bde2bc699dbd80d7bc7f9cab1e23a95c4375de615860ca089f34e7c64f4a8de7"}, + {file = "pandas-2.2.0-cp39-cp39-win_amd64.whl", hash = "sha256:3de918a754bbf2da2381e8a3dcc45eede8cd7775b047b923f9006d5f876802ae"}, + {file = "pandas-2.2.0.tar.gz", hash = "sha256:30b83f7c3eb217fb4d1b494a57a2fda5444f17834f5df2de6b2ffff68dc3c8e2"}, +] + +[package.dependencies] +numpy = [ + {version = ">=1.22.4,<2", markers = "python_version < \"3.11\""}, + {version = ">=1.23.2,<2", markers = "python_version == \"3.11\""}, + {version = ">=1.26.0,<2", markers = "python_version >= \"3.12\""}, +] +python-dateutil = ">=2.8.2" +pytz = ">=2020.1" +tzdata = ">=2022.7" + +[package.extras] +all = ["PyQt5 (>=5.15.9)", "SQLAlchemy (>=2.0.0)", "adbc-driver-postgresql (>=0.8.0)", "adbc-driver-sqlite (>=0.8.0)", "beautifulsoup4 (>=4.11.2)", "bottleneck (>=1.3.6)", "dataframe-api-compat (>=0.1.7)", "fastparquet (>=2022.12.0)", "fsspec (>=2022.11.0)", "gcsfs (>=2022.11.0)", "html5lib (>=1.1)", "hypothesis (>=6.46.1)", "jinja2 (>=3.1.2)", "lxml (>=4.9.2)", "matplotlib (>=3.6.3)", "numba (>=0.56.4)", "numexpr (>=2.8.4)", "odfpy (>=1.4.1)", "openpyxl (>=3.1.0)", "pandas-gbq (>=0.19.0)", "psycopg2 (>=2.9.6)", "pyarrow (>=10.0.1)", "pymysql (>=1.0.2)", "pyreadstat (>=1.2.0)", "pytest (>=7.3.2)", "pytest-xdist (>=2.2.0)", "python-calamine (>=0.1.7)", "pyxlsb (>=1.0.10)", "qtpy (>=2.3.0)", "s3fs (>=2022.11.0)", "scipy (>=1.10.0)", "tables (>=3.8.0)", "tabulate (>=0.9.0)", "xarray (>=2022.12.0)", "xlrd (>=2.0.1)", "xlsxwriter (>=3.0.5)", "zstandard (>=0.19.0)"] +aws = ["s3fs (>=2022.11.0)"] +clipboard = ["PyQt5 (>=5.15.9)", "qtpy (>=2.3.0)"] +compression = ["zstandard (>=0.19.0)"] +computation = ["scipy (>=1.10.0)", "xarray (>=2022.12.0)"] +consortium-standard = ["dataframe-api-compat (>=0.1.7)"] +excel = ["odfpy (>=1.4.1)", "openpyxl (>=3.1.0)", "python-calamine (>=0.1.7)", "pyxlsb (>=1.0.10)", "xlrd (>=2.0.1)", "xlsxwriter (>=3.0.5)"] +feather = ["pyarrow (>=10.0.1)"] +fss = ["fsspec (>=2022.11.0)"] +gcp = ["gcsfs (>=2022.11.0)", "pandas-gbq (>=0.19.0)"] +hdf5 = ["tables (>=3.8.0)"] +html = ["beautifulsoup4 (>=4.11.2)", "html5lib (>=1.1)", "lxml (>=4.9.2)"] +mysql = ["SQLAlchemy (>=2.0.0)", "pymysql (>=1.0.2)"] +output-formatting = ["jinja2 (>=3.1.2)", "tabulate (>=0.9.0)"] +parquet = ["pyarrow (>=10.0.1)"] +performance = ["bottleneck (>=1.3.6)", "numba (>=0.56.4)", "numexpr (>=2.8.4)"] +plot = ["matplotlib (>=3.6.3)"] +postgresql = ["SQLAlchemy (>=2.0.0)", "adbc-driver-postgresql (>=0.8.0)", "psycopg2 (>=2.9.6)"] +spss = ["pyreadstat (>=1.2.0)"] +sql-other = ["SQLAlchemy (>=2.0.0)", "adbc-driver-postgresql (>=0.8.0)", "adbc-driver-sqlite (>=0.8.0)"] +test = ["hypothesis (>=6.46.1)", "pytest (>=7.3.2)", "pytest-xdist (>=2.2.0)"] +xml = ["lxml (>=4.9.2)"] + [[package]] name = "pluggy" version = "1.3.0" @@ -293,6 +407,54 @@ files = [ dev = ["pre-commit", "tox"] testing = ["pytest", "pytest-benchmark"] +[[package]] +name = "pyarrow" +version = "15.0.0" +description = "Python library for Apache Arrow" +optional = false +python-versions = ">=3.8" +files = [ + {file = "pyarrow-15.0.0-cp310-cp310-macosx_10_15_x86_64.whl", hash = "sha256:0a524532fd6dd482edaa563b686d754c70417c2f72742a8c990b322d4c03a15d"}, + {file = "pyarrow-15.0.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:60a6bdb314affa9c2e0d5dddf3d9cbb9ef4a8dddaa68669975287d47ece67642"}, + {file = "pyarrow-15.0.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:66958fd1771a4d4b754cd385835e66a3ef6b12611e001d4e5edfcef5f30391e2"}, + {file = "pyarrow-15.0.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1f500956a49aadd907eaa21d4fff75f73954605eaa41f61cb94fb008cf2e00c6"}, + {file = "pyarrow-15.0.0-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:6f87d9c4f09e049c2cade559643424da84c43a35068f2a1c4653dc5b1408a929"}, + {file = "pyarrow-15.0.0-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:85239b9f93278e130d86c0e6bb455dcb66fc3fd891398b9d45ace8799a871a1e"}, + {file = "pyarrow-15.0.0-cp310-cp310-win_amd64.whl", hash = "sha256:5b8d43e31ca16aa6e12402fcb1e14352d0d809de70edd185c7650fe80e0769e3"}, + {file = "pyarrow-15.0.0-cp311-cp311-macosx_10_15_x86_64.whl", hash = "sha256:fa7cd198280dbd0c988df525e50e35b5d16873e2cdae2aaaa6363cdb64e3eec5"}, + {file = "pyarrow-15.0.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:8780b1a29d3c8b21ba6b191305a2a607de2e30dab399776ff0aa09131e266340"}, + {file = "pyarrow-15.0.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fe0ec198ccc680f6c92723fadcb97b74f07c45ff3fdec9dd765deb04955ccf19"}, + {file = "pyarrow-15.0.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:036a7209c235588c2f07477fe75c07e6caced9b7b61bb897c8d4e52c4b5f9555"}, + {file = "pyarrow-15.0.0-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:2bd8a0e5296797faf9a3294e9fa2dc67aa7f10ae2207920dbebb785c77e9dbe5"}, + {file = "pyarrow-15.0.0-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:e8ebed6053dbe76883a822d4e8da36860f479d55a762bd9e70d8494aed87113e"}, + {file = "pyarrow-15.0.0-cp311-cp311-win_amd64.whl", hash = "sha256:17d53a9d1b2b5bd7d5e4cd84d018e2a45bc9baaa68f7e6e3ebed45649900ba99"}, + {file = "pyarrow-15.0.0-cp312-cp312-macosx_10_15_x86_64.whl", hash = "sha256:9950a9c9df24090d3d558b43b97753b8f5867fb8e521f29876aa021c52fda351"}, + {file = "pyarrow-15.0.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:003d680b5e422d0204e7287bb3fa775b332b3fce2996aa69e9adea23f5c8f970"}, + {file = "pyarrow-15.0.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f75fce89dad10c95f4bf590b765e3ae98bcc5ba9f6ce75adb828a334e26a3d40"}, + {file = "pyarrow-15.0.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0ca9cb0039923bec49b4fe23803807e4ef39576a2bec59c32b11296464623dc2"}, + {file = "pyarrow-15.0.0-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:9ed5a78ed29d171d0acc26a305a4b7f83c122d54ff5270810ac23c75813585e4"}, + {file = "pyarrow-15.0.0-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:6eda9e117f0402dfcd3cd6ec9bfee89ac5071c48fc83a84f3075b60efa96747f"}, + {file = "pyarrow-15.0.0-cp312-cp312-win_amd64.whl", hash = "sha256:9a3a6180c0e8f2727e6f1b1c87c72d3254cac909e609f35f22532e4115461177"}, + {file = "pyarrow-15.0.0-cp38-cp38-macosx_10_15_x86_64.whl", hash = "sha256:19a8918045993349b207de72d4576af0191beef03ea655d8bdb13762f0cd6eac"}, + {file = "pyarrow-15.0.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:d0ec076b32bacb6666e8813a22e6e5a7ef1314c8069d4ff345efa6246bc38593"}, + {file = "pyarrow-15.0.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5db1769e5d0a77eb92344c7382d6543bea1164cca3704f84aa44e26c67e320fb"}, + {file = "pyarrow-15.0.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e2617e3bf9df2a00020dd1c1c6dce5cc343d979efe10bc401c0632b0eef6ef5b"}, + {file = "pyarrow-15.0.0-cp38-cp38-manylinux_2_28_aarch64.whl", hash = "sha256:d31c1d45060180131caf10f0f698e3a782db333a422038bf7fe01dace18b3a31"}, + {file = "pyarrow-15.0.0-cp38-cp38-manylinux_2_28_x86_64.whl", hash = "sha256:c8c287d1d479de8269398b34282e206844abb3208224dbdd7166d580804674b7"}, + {file = "pyarrow-15.0.0-cp38-cp38-win_amd64.whl", hash = "sha256:07eb7f07dc9ecbb8dace0f58f009d3a29ee58682fcdc91337dfeb51ea618a75b"}, + {file = "pyarrow-15.0.0-cp39-cp39-macosx_10_15_x86_64.whl", hash = "sha256:47af7036f64fce990bb8a5948c04722e4e3ea3e13b1007ef52dfe0aa8f23cf7f"}, + {file = "pyarrow-15.0.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:93768ccfff85cf044c418bfeeafce9a8bb0cee091bd8fd19011aff91e58de540"}, + {file = "pyarrow-15.0.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f6ee87fd6892700960d90abb7b17a72a5abb3b64ee0fe8db6c782bcc2d0dc0b4"}, + {file = "pyarrow-15.0.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:001fca027738c5f6be0b7a3159cc7ba16a5c52486db18160909a0831b063c4e4"}, + {file = "pyarrow-15.0.0-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:d1c48648f64aec09accf44140dccb92f4f94394b8d79976c426a5b79b11d4fa7"}, + {file = "pyarrow-15.0.0-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:972a0141be402bb18e3201448c8ae62958c9c7923dfaa3b3d4530c835ac81aed"}, + {file = "pyarrow-15.0.0-cp39-cp39-win_amd64.whl", hash = "sha256:f01fc5cf49081426429127aa2d427d9d98e1cb94a32cb961d583a70b7c4504e6"}, + {file = "pyarrow-15.0.0.tar.gz", hash = "sha256:876858f549d540898f927eba4ef77cd549ad8d24baa3207cf1b72e5788b50e83"}, +] + +[package.dependencies] +numpy = ">=1.16.6,<2" + [[package]] name = "pydantic" version = "2.5.2" @@ -469,6 +631,31 @@ pytest = ">=7.0.0" docs = ["sphinx (>=5.3)", "sphinx-rtd-theme (>=1.0)"] testing = ["coverage (>=6.2)", "hypothesis (>=5.7.1)"] +[[package]] +name = "python-dateutil" +version = "2.8.2" +description = "Extensions to the standard Python datetime module" +optional = false +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7" +files = [ + {file = "python-dateutil-2.8.2.tar.gz", hash = "sha256:0123cacc1627ae19ddf3c27a5de5bd67ee4586fbdd6440d9748f8abb483d3e86"}, + {file = "python_dateutil-2.8.2-py2.py3-none-any.whl", hash = "sha256:961d03dc3453ebbc59dbdea9e4e11c5651520a876d0f4db161e8674aae935da9"}, +] + +[package.dependencies] +six = ">=1.5" + +[[package]] +name = "pytz" +version = "2024.1" +description = "World timezone definitions, modern and historical" +optional = false +python-versions = "*" +files = [ + {file = "pytz-2024.1-py2.py3-none-any.whl", hash = "sha256:328171f4e3623139da4983451950b28e95ac706e13f3f2630a879749e7a8b319"}, + {file = "pytz-2024.1.tar.gz", hash = "sha256:2a29735ea9c18baf14b448846bde5a48030ed267578472d8955cd0e7443a9812"}, +] + [[package]] name = "ruff" version = "0.1.7" @@ -495,6 +682,17 @@ files = [ {file = "ruff-0.1.7.tar.gz", hash = "sha256:dffd699d07abf54833e5f6cc50b85a6ff043715da8788c4a79bcd4ab4734d306"}, ] +[[package]] +name = "six" +version = "1.16.0" +description = "Python 2 and 3 compatibility utilities" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*" +files = [ + {file = "six-1.16.0-py2.py3-none-any.whl", hash = "sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254"}, + {file = "six-1.16.0.tar.gz", hash = "sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926"}, +] + [[package]] name = "sniffio" version = "1.3.0" @@ -542,6 +740,17 @@ files = [ {file = "typing_extensions-4.8.0.tar.gz", hash = "sha256:df8e4339e9cb77357558cbdbceca33c303714cf861d1eef15e1070055ae8b7ef"}, ] +[[package]] +name = "tzdata" +version = "2024.1" +description = "Provider of IANA time zone data" +optional = false +python-versions = ">=2" +files = [ + {file = "tzdata-2024.1-py2.py3-none-any.whl", hash = "sha256:9068bc196136463f5245e51efda838afa15aaeca9903f49050dfa2679db4d252"}, + {file = "tzdata-2024.1.tar.gz", hash = "sha256:2674120f8d891909751c38abcdfd386ac0a5a1127954fbc332af6b5ceae07efd"}, +] + [[package]] name = "urllib3" version = "2.1.0" @@ -560,5 +769,5 @@ zstd = ["zstandard (>=0.18.0)"] [metadata] lock-version = "2.0" -python-versions = "^3.8" -content-hash = "f3a7934d78683acd0133e3764488c41a52f22b8c596998ba89b3b58b5759473f" +python-versions = "^3.9" +content-hash = "9ca8a20cccbfe5d054923f5d7a5bab783855e748699cd87c5a0b8315192ac284" diff --git a/pyproject.toml b/pyproject.toml index 43c1e23c..73cf6f83 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -23,10 +23,12 @@ exclude = ["docs", "tests", "examples", "tools", "build"] [tool.poetry.dependencies] -python = "^3.8" +python = "^3.9" orjson = "^3.9.10" pydantic = "^2.5.2" httpx = "^0.25.2" +pandas = "^2.2.0" +pyarrow = "^15.0.0" [tool.poetry.group.dev.dependencies] diff --git a/src/mistralai/async_client.py b/src/mistralai/async_client.py index 710c9b49..a39a90f4 100644 --- a/src/mistralai/async_client.py +++ b/src/mistralai/async_client.py @@ -24,7 +24,8 @@ from mistralai.models.chat_completion import ( ChatCompletionResponse, ChatCompletionStreamResponse, - ChatMessage, + ResponseFormat, + ToolChoice, ) from mistralai.models.embeddings import EmbeddingResponse from mistralai.models.models import ModelList @@ -101,9 +102,7 @@ async def _request( except ConnectError as e: raise MistralConnectionException(str(e)) from e except RequestError as e: - raise MistralException( - f"Unexpected exception ({e.__class__.__name__}): {e}" - ) from e + raise MistralException(f"Unexpected exception ({e.__class__.__name__}): {e}") from e except JSONDecodeError as e: raise MistralAPIException.from_response( response, @@ -112,34 +111,33 @@ async def _request( except MistralAPIStatusException as e: attempt += 1 if attempt > self._max_retries: - raise MistralAPIStatusException.from_response( - response, message=str(e) - ) from e + raise MistralAPIStatusException.from_response(response, message=str(e)) from e backoff = 2.0**attempt # exponential backoff time.sleep(backoff) # Retry as a generator - async for r in self._request( - method, json, path, stream=stream, attempt=attempt - ): + async for r in self._request(method, json, path, stream=stream, attempt=attempt): yield r async def chat( self, - model: str, - messages: List[ChatMessage], + messages: List[Any], + model: Optional[str] = None, + tools: Optional[List[Dict[str, Any]]] = None, temperature: Optional[float] = None, max_tokens: Optional[int] = None, top_p: Optional[float] = None, random_seed: Optional[int] = None, safe_mode: bool = False, safe_prompt: bool = False, + tool_choice: Optional[Union[str, ToolChoice]] = None, + response_format: Optional[Union[Dict[str, str], ResponseFormat]] = None, ) -> ChatCompletionResponse: """A asynchronous chat endpoint that returns a single response. Args: model (str): model the name of the model to chat with, e.g. mistral-tiny - messages (List[ChatMessage]): messages an array of messages to chat with, e.g. + messages (List[Any]): messages an array of messages to chat with, e.g. [{role: 'user', content: 'What is the best French cheese?'}] temperature (Optional[float], optional): temperature the temperature to use for sampling, e.g. 0.5. max_tokens (Optional[int], optional): the maximum number of tokens to generate, e.g. 100. Defaults to None. @@ -153,14 +151,17 @@ async def chat( ChatCompletionResponse: a response object containing the generated text. """ request = self._make_chat_request( - model, messages, + model, + tools=tools, temperature=temperature, max_tokens=max_tokens, top_p=top_p, random_seed=random_seed, stream=False, safe_prompt=safe_mode or safe_prompt, + tool_choice=tool_choice, + response_format=response_format, ) single_response = self._request("post", request, "v1/chat/completions") @@ -172,21 +173,25 @@ async def chat( async def chat_stream( self, - model: str, - messages: List[ChatMessage], + messages: List[Any], + model: Optional[str] = None, + tools: Optional[List[Dict[str, Any]]] = None, temperature: Optional[float] = None, max_tokens: Optional[int] = None, top_p: Optional[float] = None, random_seed: Optional[int] = None, safe_mode: bool = False, safe_prompt: bool = False, + tool_choice: Optional[Union[str, ToolChoice]] = None, + response_format: Optional[Union[Dict[str, str], ResponseFormat]] = None, ) -> AsyncGenerator[ChatCompletionStreamResponse, None]: """An Asynchronous chat endpoint that streams responses. Args: model (str): model the name of the model to chat with, e.g. mistral-tiny - messages (List[ChatMessage]): messages an array of messages to chat with, e.g. + messages (List[Any]): messages an array of messages to chat with, e.g. [{role: 'user', content: 'What is the best French cheese?'}] + tools (Optional[List[Function]], optional): a list of tools to use. temperature (Optional[float], optional): temperature the temperature to use for sampling, e.g. 0.5. max_tokens (Optional[int], optional): the maximum number of tokens to generate, e.g. 100. Defaults to None. top_p (Optional[float], optional): the cumulative probability of tokens to generate, e.g. 0.9. @@ -201,25 +206,24 @@ async def chat_stream( """ request = self._make_chat_request( - model, messages, + model, + tools=tools, temperature=temperature, max_tokens=max_tokens, top_p=top_p, random_seed=random_seed, stream=True, safe_prompt=safe_mode or safe_prompt, + tool_choice=tool_choice, + response_format=response_format, ) - async_response = self._request( - "post", request, "v1/chat/completions", stream=True - ) + async_response = self._request("post", request, "v1/chat/completions", stream=True) async for json_response in async_response: yield ChatCompletionStreamResponse(**json_response) - async def embeddings( - self, model: str, input: Union[str, List[str]] - ) -> EmbeddingResponse: + async def embeddings(self, model: str, input: Union[str, List[str]]) -> EmbeddingResponse: """An asynchronous embeddings endpoint that returns embeddings for a single, or batch of inputs Args: diff --git a/src/mistralai/client.py b/src/mistralai/client.py index 365079d9..30596b58 100644 --- a/src/mistralai/client.py +++ b/src/mistralai/client.py @@ -17,7 +17,8 @@ from mistralai.models.chat_completion import ( ChatCompletionResponse, ChatCompletionStreamResponse, - ChatMessage, + ResponseFormat, + ToolChoice, ) from mistralai.models.embeddings import EmbeddingResponse from mistralai.models.models import ModelList @@ -38,9 +39,8 @@ def __init__( super().__init__(endpoint, api_key, max_retries, timeout) self._client = Client( - follow_redirects=True, - timeout=self._timeout, - transport=HTTPTransport(retries=self._max_retries)) + follow_redirects=True, timeout=self._timeout, transport=HTTPTransport(retries=self._max_retries) + ) def __del__(self) -> None: self._client.close() @@ -95,9 +95,7 @@ def _request( except ConnectError as e: raise MistralConnectionException(str(e)) from e except RequestError as e: - raise MistralException( - f"Unexpected exception ({e.__class__.__name__}): {e}" - ) from e + raise MistralException(f"Unexpected exception ({e.__class__.__name__}): {e}") from e except JSONDecodeError as e: raise MistralAPIException.from_response( response, @@ -106,9 +104,7 @@ def _request( except MistralAPIStatusException as e: attempt += 1 if attempt > self._max_retries: - raise MistralAPIStatusException.from_response( - response, message=str(e) - ) from e + raise MistralAPIStatusException.from_response(response, message=str(e)) from e backoff = 2.0**attempt # exponential backoff time.sleep(backoff) @@ -118,21 +114,25 @@ def _request( def chat( self, - model: str, - messages: List[ChatMessage], + messages: List[Any], + model: Optional[str] = None, + tools: Optional[List[Dict[str, Any]]] = None, temperature: Optional[float] = None, max_tokens: Optional[int] = None, top_p: Optional[float] = None, random_seed: Optional[int] = None, safe_mode: bool = False, safe_prompt: bool = False, + tool_choice: Optional[Union[str, ToolChoice]] = None, + response_format: Optional[Union[Dict[str, str], ResponseFormat]] = None, ) -> ChatCompletionResponse: """A chat endpoint that returns a single response. Args: model (str): model the name of the model to chat with, e.g. mistral-tiny - messages (List[ChatMessage]): messages an array of messages to chat with, e.g. + messages (List[Any]): messages an array of messages to chat with, e.g. [{role: 'user', content: 'What is the best French cheese?'}] + tools (Optional[List[Function]], optional): a list of tools to use. temperature (Optional[float], optional): temperature the temperature to use for sampling, e.g. 0.5. max_tokens (Optional[int], optional): the maximum number of tokens to generate, e.g. 100. Defaults to None. top_p (Optional[float], optional): the cumulative probability of tokens to generate, e.g. 0.9. @@ -145,14 +145,17 @@ def chat( ChatCompletionResponse: a response object containing the generated text. """ request = self._make_chat_request( - model, messages, + model, + tools=tools, temperature=temperature, max_tokens=max_tokens, top_p=top_p, random_seed=random_seed, stream=False, safe_prompt=safe_mode or safe_prompt, + tool_choice=tool_choice, + response_format=response_format, ) single_response = self._request("post", request, "v1/chat/completions") @@ -164,21 +167,25 @@ def chat( def chat_stream( self, - model: str, - messages: List[ChatMessage], + messages: List[Any], + model: Optional[str] = None, + tools: Optional[List[Dict[str, Any]]] = None, temperature: Optional[float] = None, max_tokens: Optional[int] = None, top_p: Optional[float] = None, random_seed: Optional[int] = None, safe_mode: bool = False, safe_prompt: bool = False, + tool_choice: Optional[Union[str, ToolChoice]] = None, + response_format: Optional[Union[Dict[str, str], ResponseFormat]] = None, ) -> Iterable[ChatCompletionStreamResponse]: """A chat endpoint that streams responses. Args: model (str): model the name of the model to chat with, e.g. mistral-tiny - messages (List[ChatMessage]): messages an array of messages to chat with, e.g. + messages (List[Any]): messages an array of messages to chat with, e.g. [{role: 'user', content: 'What is the best French cheese?'}] + tools (Optional[List[Function]], optional): a list of tools to use. temperature (Optional[float], optional): temperature the temperature to use for sampling, e.g. 0.5. max_tokens (Optional[int], optional): the maximum number of tokens to generate, e.g. 100. Defaults to None. top_p (Optional[float], optional): the cumulative probability of tokens to generate, e.g. 0.9. @@ -192,14 +199,17 @@ def chat_stream( A generator that yields ChatCompletionStreamResponse objects. """ request = self._make_chat_request( - model, messages, + model, + tools=tools, temperature=temperature, max_tokens=max_tokens, top_p=top_p, random_seed=random_seed, stream=True, safe_prompt=safe_mode or safe_prompt, + tool_choice=tool_choice, + response_format=response_format, ) response = self._request("post", request, "v1/chat/completions", stream=True) diff --git a/src/mistralai/client_base.py b/src/mistralai/client_base.py index 328f1c4a..c4d44790 100644 --- a/src/mistralai/client_base.py +++ b/src/mistralai/client_base.py @@ -1,7 +1,7 @@ import logging import os from abc import ABC -from typing import Any, Dict, List, Optional +from typing import Any, Dict, List, Optional, Union import orjson from httpx import Response @@ -12,7 +12,7 @@ MistralAPIStatusException, MistralException, ) -from mistralai.models.chat_completion import ChatMessage +from mistralai.models.chat_completion import ChatMessage, Function, ResponseFormat, ToolChoice logging.basicConfig( format="%(asctime)s %(levelname)s %(name)s: %(message)s", @@ -35,25 +35,76 @@ def __init__( self._api_key = api_key self._logger = logging.getLogger(__name__) + # For azure endpoints, we default to the mistral model + if "inference.azure.com" in self._endpoint: + self._default_model = "mistral" + # This should be automatically updated by the deploy script self._version = "0.0.1" + def _parse_tools(self, tools: List[Dict[str, Any]]) -> List[Dict[str, Any]]: + parsed_tools: List[Dict[str, Any]] = [] + for tool in tools: + if tool["type"] == "function": + parsed_function = {} + parsed_function["type"] = tool["type"] + if isinstance(tool["function"], Function): + parsed_function["function"] = tool["function"].model_dump(exclude_none=True) + else: + parsed_function["function"] = tool["function"] + + parsed_tools.append(parsed_function) + + return parsed_tools + + def _parse_tool_choice(self, tool_choice: Union[str, ToolChoice]) -> str: + if isinstance(tool_choice, ToolChoice): + return tool_choice.value + return tool_choice + + def _parse_response_format(self, response_format: Union[Dict[str, Any], ResponseFormat]) -> Dict[str, Any]: + if isinstance(response_format, ResponseFormat): + return response_format.model_dump(exclude_none=True) + return response_format + + def _parse_messages(self, messages: List[Any]) -> List[Dict[str, Any]]: + parsed_messages: List[Dict[str, Any]] = [] + for message in messages: + if isinstance(message, ChatMessage): + parsed_messages.append(message.model_dump(exclude_none=True)) + else: + parsed_messages.append(message) + + return parsed_messages + def _make_chat_request( self, - model: str, - messages: List[ChatMessage], + messages: List[Any], + model: Optional[str] = None, + tools: Optional[List[Dict[str, Any]]] = None, temperature: Optional[float] = None, max_tokens: Optional[int] = None, top_p: Optional[float] = None, random_seed: Optional[int] = None, stream: Optional[bool] = None, safe_prompt: Optional[bool] = False, + tool_choice: Optional[Union[str, ToolChoice]] = None, + response_format: Optional[Union[Dict[str, str], ResponseFormat]] = None, ) -> Dict[str, Any]: request_data: Dict[str, Any] = { - "model": model, - "messages": [msg.model_dump() for msg in messages], + "messages": self._parse_messages(messages), "safe_prompt": safe_prompt, } + + if model is not None: + request_data["model"] = model + else: + if self._default_model is None: + raise MistralException(message="model must be provided") + request_data["model"] = self._default_model + + if tools is not None: + request_data["tools"] = self._parse_tools(tools) if temperature is not None: request_data["temperature"] = temperature if max_tokens is not None: @@ -65,6 +116,11 @@ def _make_chat_request( if stream is not None: request_data["stream"] = stream + if tool_choice is not None: + request_data["tool_choice"] = self._parse_tool_choice(tool_choice) + if response_format is not None: + request_data["response_format"] = self._parse_response_format(response_format) + self._logger.debug(f"Chat request: {request_data}") return request_data diff --git a/src/mistralai/models/chat_completion.py b/src/mistralai/models/chat_completion.py index d0151924..3966e5bd 100644 --- a/src/mistralai/models/chat_completion.py +++ b/src/mistralai/models/chat_completion.py @@ -1,25 +1,64 @@ from enum import Enum -from typing import List, Optional +from typing import List, Optional, Union from pydantic import BaseModel from mistralai.models.common import UsageInfo +class Function(BaseModel): + name: str + description: str + parameters: dict + + +class ToolType(str, Enum): + function = "function" + + +class FunctionCall(BaseModel): + name: str + arguments: str + + +class ToolCall(BaseModel): + id: str = "null" + type: ToolType = ToolType.function + function: FunctionCall + + +class ResponseFormats(str, Enum): + text: str = "text" + json_object: str = "json_object" + + +class ToolChoice(str, Enum): + auto: str = "auto" + any: str = "any" + none: str = "none" + + +class ResponseFormat(BaseModel): + type: ResponseFormats = ResponseFormats.text + + class ChatMessage(BaseModel): role: str - content: str + content: Union[str, List[str]] + name: Optional[str] = None + tool_calls: Optional[List[ToolCall]] = None class DeltaMessage(BaseModel): role: Optional[str] = None content: Optional[str] = None + tool_calls: Optional[List[ToolCall]] = None class FinishReason(str, Enum): - stop: str = "stop" - length: str = "length" - error: str = "error" + stop = "stop" + length = "length" + error = "error" class ChatCompletionResponseStreamChoice(BaseModel): diff --git a/version.txt b/version.txt deleted file mode 100644 index d917d3e2..00000000 --- a/version.txt +++ /dev/null @@ -1 +0,0 @@ -0.1.2 From dd021cd05d79bee3f786679968147062454b24dc Mon Sep 17 00:00:00 2001 From: Lucas Pickup Date: Mon, 26 Feb 2024 12:29:09 -0800 Subject: [PATCH 029/223] Add `tool_calls` as valid FinishReason --- src/mistralai/models/chat_completion.py | 1 + 1 file changed, 1 insertion(+) diff --git a/src/mistralai/models/chat_completion.py b/src/mistralai/models/chat_completion.py index 3966e5bd..9f145bc9 100644 --- a/src/mistralai/models/chat_completion.py +++ b/src/mistralai/models/chat_completion.py @@ -59,6 +59,7 @@ class FinishReason(str, Enum): stop = "stop" length = "length" error = "error" + tool_calls = "tool_calls" class ChatCompletionResponseStreamChoice(BaseModel): From 4b98865f3e0c9696686a521f6f5c472f6aa17c0e Mon Sep 17 00:00:00 2001 From: GitHub Actions Date: Fri, 8 Mar 2024 17:26:06 +0000 Subject: [PATCH 030/223] Bump version file --- version.txt | 1 + 1 file changed, 1 insertion(+) create mode 100644 version.txt diff --git a/version.txt b/version.txt new file mode 100644 index 00000000..845639ee --- /dev/null +++ b/version.txt @@ -0,0 +1 @@ +0.1.4 From b4a25519f5c52bf13d99fbe0a0f50b44d7363194 Mon Sep 17 00:00:00 2001 From: GitHub Actions Date: Fri, 8 Mar 2024 17:26:06 +0000 Subject: [PATCH 031/223] Update version to 0.1.4 --- src/mistralai/async_client.py | 44 ++++++++++++++++++++++++++++++++--- src/mistralai/client.py | 40 ++++++++++++++++++++++++++++++- src/mistralai/client_base.py | 38 ------------------------------ version.txt | 1 - 4 files changed, 80 insertions(+), 43 deletions(-) delete mode 100644 version.txt diff --git a/src/mistralai/async_client.py b/src/mistralai/async_client.py index a39a90f4..23b80457 100644 --- a/src/mistralai/async_client.py +++ b/src/mistralai/async_client.py @@ -14,7 +14,7 @@ ) from mistralai.client_base import ClientBase -from mistralai.constants import ENDPOINT +from mistralai.constants import ENDPOINT, RETRY_STATUS_CODES from mistralai.exceptions import ( MistralAPIException, MistralAPIStatusException, @@ -52,6 +52,44 @@ def __init__( async def close(self) -> None: await self._client.aclose() + async def _check_response_status_codes(self, response: Response) -> None: + if response.status_code in RETRY_STATUS_CODES: + raise MistralAPIStatusException.from_response( + response, + message=f"Status: {response.status_code}. Message: {response.text}", + ) + elif 400 <= response.status_code < 500: + if response.stream: + await response.aread() + raise MistralAPIException.from_response( + response, + message=f"Status: {response.status_code}. Message: {response.text}", + ) + elif response.status_code >= 500: + if response.stream: + await response.aread() + raise MistralException( + message=f"Status: {response.status_code}. Message: {response.text}", + ) + + async def _check_streaming_response(self, response: Response) -> None: + await self._check_response_status_codes(response) + + async def _check_response(self, response: Response) -> Dict[str, Any]: + await self._check_response_status_codes(response) + + json_response: Dict[str, Any] = response.json() + + if "object" not in json_response: + raise MistralException(message=f"Unexpected response: {json_response}") + if "error" == json_response["object"]: # has errors + raise MistralAPIException.from_response( + response, + message=json_response["message"], + ) + + return json_response + async def _request( self, method: str, @@ -82,7 +120,7 @@ async def _request( headers=headers, json=json, ) as response: - self._check_streaming_response(response) + await self._check_streaming_response(response) async for line in response.aiter_lines(): json_streamed_response = self._process_line(line) @@ -97,7 +135,7 @@ async def _request( json=json, ) - yield self._check_response(response) + yield await self._check_response(response) except ConnectError as e: raise MistralConnectionException(str(e)) from e diff --git a/src/mistralai/client.py b/src/mistralai/client.py index 30596b58..aa8daeb5 100644 --- a/src/mistralai/client.py +++ b/src/mistralai/client.py @@ -7,7 +7,7 @@ from httpx import Client, ConnectError, HTTPTransport, RequestError, Response from mistralai.client_base import ClientBase -from mistralai.constants import ENDPOINT +from mistralai.constants import ENDPOINT, RETRY_STATUS_CODES from mistralai.exceptions import ( MistralAPIException, MistralAPIStatusException, @@ -45,6 +45,44 @@ def __init__( def __del__(self) -> None: self._client.close() + def _check_response_status_codes(self, response: Response) -> None: + if response.status_code in RETRY_STATUS_CODES: + raise MistralAPIStatusException.from_response( + response, + message=f"Status: {response.status_code}. Message: {response.text}", + ) + elif 400 <= response.status_code < 500: + if response.stream: + response.read() + raise MistralAPIException.from_response( + response, + message=f"Status: {response.status_code}. Message: {response.text}", + ) + elif response.status_code >= 500: + if response.stream: + response.read() + raise MistralException( + message=f"Status: {response.status_code}. Message: {response.text}", + ) + + def _check_streaming_response(self, response: Response) -> None: + self._check_response_status_codes(response) + + def _check_response(self, response: Response) -> Dict[str, Any]: + self._check_response_status_codes(response) + + json_response: Dict[str, Any] = response.json() + + if "object" not in json_response: + raise MistralException(message=f"Unexpected response: {json_response}") + if "error" == json_response["object"]: # has errors + raise MistralAPIException.from_response( + response, + message=json_response["message"], + ) + + return json_response + def _request( self, method: str, diff --git a/src/mistralai/client_base.py b/src/mistralai/client_base.py index c4d44790..c114ea18 100644 --- a/src/mistralai/client_base.py +++ b/src/mistralai/client_base.py @@ -4,12 +4,8 @@ from typing import Any, Dict, List, Optional, Union import orjson -from httpx import Response -from mistralai.constants import RETRY_STATUS_CODES from mistralai.exceptions import ( - MistralAPIException, - MistralAPIStatusException, MistralException, ) from mistralai.models.chat_completion import ChatMessage, Function, ResponseFormat, ToolChoice @@ -125,40 +121,6 @@ def _make_chat_request( return request_data - def _check_response_status_codes(self, response: Response) -> None: - if response.status_code in RETRY_STATUS_CODES: - raise MistralAPIStatusException.from_response( - response, - message=f"Status: {response.status_code}. Message: {response.text}", - ) - elif 400 <= response.status_code < 500: - raise MistralAPIException.from_response( - response, - message=f"Status: {response.status_code}. Message: {response.text}", - ) - elif response.status_code >= 500: - raise MistralException( - message=f"Status: {response.status_code}. Message: {response.text}", - ) - - def _check_streaming_response(self, response: Response) -> None: - self._check_response_status_codes(response) - - def _check_response(self, response: Response) -> Dict[str, Any]: - self._check_response_status_codes(response) - - json_response: Dict[str, Any] = response.json() - - if "object" not in json_response: - raise MistralException(message=f"Unexpected response: {json_response}") - if "error" == json_response["object"]: # has errors - raise MistralAPIException.from_response( - response, - message=json_response["message"], - ) - - return json_response - def _process_line(self, line: str) -> Optional[Dict[str, Any]]: if line.startswith("data: "): line = line[6:].strip() diff --git a/version.txt b/version.txt deleted file mode 100644 index 845639ee..00000000 --- a/version.txt +++ /dev/null @@ -1 +0,0 @@ -0.1.4 From 895b1f19c3d30cf1b125f1f1fbe00a0a79ad5bab Mon Sep 17 00:00:00 2001 From: GitHub Actions Date: Mon, 11 Mar 2024 12:44:55 +0000 Subject: [PATCH 032/223] Bump version file --- version.txt | 1 + 1 file changed, 1 insertion(+) create mode 100644 version.txt diff --git a/version.txt b/version.txt new file mode 100644 index 00000000..c946ee61 --- /dev/null +++ b/version.txt @@ -0,0 +1 @@ +0.1.6 From c518a1494c2918949430c5c78c0b2a52120b3568 Mon Sep 17 00:00:00 2001 From: GitHub Actions Date: Mon, 11 Mar 2024 12:44:55 +0000 Subject: [PATCH 033/223] Update version to 0.1.6 --- version.txt | 1 - 1 file changed, 1 deletion(-) delete mode 100644 version.txt diff --git a/version.txt b/version.txt deleted file mode 100644 index c946ee61..00000000 --- a/version.txt +++ /dev/null @@ -1 +0,0 @@ -0.1.6 From 80c7951bad83338641d5e89684f841ce1cac938f Mon Sep 17 00:00:00 2001 From: Chris Bamford Date: Mon, 11 Mar 2024 13:50:10 +0100 Subject: [PATCH 034/223] Delete .github/workflows directory --- .github/workflows/build_publish.yaml | 87 ---------------------------- 1 file changed, 87 deletions(-) delete mode 100644 .github/workflows/build_publish.yaml diff --git a/.github/workflows/build_publish.yaml b/.github/workflows/build_publish.yaml deleted file mode 100644 index ff5a3af2..00000000 --- a/.github/workflows/build_publish.yaml +++ /dev/null @@ -1,87 +0,0 @@ -name: Build and Publish - -on: - push: - branches: ["main"] - - # We only deploy on tags and main branch - tags: - # Only run on tags that match the following regex - # This will match tags like 1.0.0, 1.0.1, etc. - - "[0-9]+.[0-9]+.[0-9]+" - - # Build on pull requests - pull_request: - -jobs: - lint_and_test: - runs-on: ubuntu-latest - - steps: - # Checkout the repository - - name: Checkout - uses: actions/checkout@v4 - - # Set python version to 3.11 - - name: set python version - uses: actions/setup-python@v4 - with: - python-version: 3.11 - - # Install Build stuff - - name: Install Dependencies - run: | - pip install poetry \ - && poetry config virtualenvs.create false \ - && poetry install - - # Ruff - - name: Ruff check - run: | - poetry run ruff . - - # Mypy - - name: Mypy Check - run: | - poetry run mypy . - - # Tests - - name: Run Tests - run: | - poetry run pytest . - - publish: - if: startsWith(github.ref, 'refs/tags') - runs-on: ubuntu-latest - needs: lint_and_test - steps: - - # Checkout the repository - - name: Checkout - uses: actions/checkout@v4 - - # Set python version to 3.11 - - name: set python version - uses: actions/setup-python@v4 - with: - python-version: 3.11 - - # Install Build stuff - - name: Install Dependencies - run: | - pip install poetry \ - && poetry config virtualenvs.create false \ - && poetry install - - # build package using poetry - - name: Build Package - run: | - poetry version ${{ github.ref_name }} - sed -i 's/self._version = "0.0.1"/self._version = "${{ github.ref_name }}"/g' src/mistralai/client_base.py - poetry build - - # Publish to PyPi - - name: Pypi publish (prod) - run: | - poetry config pypi-token.pypi ${{ secrets.PYPI_TOKEN }} - poetry publish \ No newline at end of file From 63be8a2f01d4a0285af848fc93016ecf0bdfaa5d Mon Sep 17 00:00:00 2001 From: GitHub Actions Date: Tue, 26 Mar 2024 15:00:34 +0000 Subject: [PATCH 035/223] Bump version file --- version.txt | 1 + 1 file changed, 1 insertion(+) create mode 100644 version.txt diff --git a/version.txt b/version.txt new file mode 100644 index 00000000..699c6c6d --- /dev/null +++ b/version.txt @@ -0,0 +1 @@ +0.1.8 From 0d55818364fd2a7ca67401dd550632f8ca70e5af Mon Sep 17 00:00:00 2001 From: GitHub Actions Date: Tue, 26 Mar 2024 15:00:34 +0000 Subject: [PATCH 036/223] Update version to 0.1.8 --- examples/function_calling.py | 36 +- examples/json_format.py | 2 +- poetry.lock | 681 ++++++++++++----------------------- pyproject.toml | 2 - version.txt | 1 - 5 files changed, 239 insertions(+), 483 deletions(-) delete mode 100644 version.txt diff --git a/examples/function_calling.py b/examples/function_calling.py index 99813f72..a7b94afb 100644 --- a/examples/function_calling.py +++ b/examples/function_calling.py @@ -1,8 +1,8 @@ import functools import json import os +from typing import Dict, List -import pandas as pd from mistralai.client import MistralClient from mistralai.models.chat_completion import ChatMessage, Function @@ -15,30 +15,27 @@ "payment_status": ["Paid", "Unpaid", "Paid", "Paid", "Pending"], } -# Create DataFrame -df = pd.DataFrame(data) +n_rows = len(data["transaction_id"]) +def retrieve_payment_status(data: Dict[str,List], transaction_id: str) -> str: + for i, r in enumerate(data["transaction_id"]): + if r == transaction_id: + return json.dumps({"status": data["payment_status"][i]}) + else: + return json.dumps({"status": "Error - transaction id not found"}) -def retrieve_payment_status(df: pd.DataFrame, transaction_id: str) -> str: - if transaction_id in df.transaction_id.values: - return json.dumps({"status": df[df.transaction_id == transaction_id].payment_status.item()}) - else: - return json.dumps({"status": "error - transaction id not found."}) - - -def retrieve_payment_date(df: pd.DataFrame, transaction_id: str) -> str: - if transaction_id in df.transaction_id.values: - return json.dumps({"date": df[df.transaction_id == transaction_id].payment_date.item()}) - else: - return json.dumps({"status": "error - transaction id not found."}) - +def retrieve_payment_date(data: Dict[str, List], transaction_id: str) -> str: + for i, r in enumerate(data["transaction_id"]): + if r == transaction_id: + return json.dumps({"date": data["payment_date"][i]}) + else: + return json.dumps({"status": "Error - transaction id not found"}) names_to_functions = { - "retrieve_payment_status": functools.partial(retrieve_payment_status, df=df), - "retrieve_payment_date": functools.partial(retrieve_payment_date, df=df), + "retrieve_payment_status": functools.partial(retrieve_payment_status, data=data), + "retrieve_payment_date": functools.partial(retrieve_payment_date, data=data) } - tools = [ { "type": "function", @@ -66,7 +63,6 @@ def retrieve_payment_date(df: pd.DataFrame, transaction_id: str) -> str: }, ] - api_key = os.environ["MISTRAL_API_KEY"] model = "mistral-large-latest" diff --git a/examples/json_format.py b/examples/json_format.py index 62fd6749..5c03d356 100755 --- a/examples/json_format.py +++ b/examples/json_format.py @@ -15,7 +15,7 @@ def main(): chat_response = client.chat( model=model, response_format={"type": "json_object"}, - messages=[ChatMessage(role="user", content="What is the best French cheese?")], + messages=[ChatMessage(role="user", content="What is the best French cheese? Answer shortly in JSON.")], ) print(chat_response.choices[0].message.content) diff --git a/poetry.lock b/poetry.lock index 604edb75..364a1bed 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1,4 +1,4 @@ -# This file is automatically @generated by Poetry 1.6.1 and should not be changed by hand. +# This file is automatically @generated by Poetry 1.8.1 and should not be changed by hand. [[package]] name = "annotated-types" @@ -13,13 +13,13 @@ files = [ [[package]] name = "anyio" -version = "4.2.0" +version = "4.3.0" description = "High level compatibility layer for multiple asynchronous event loop implementations" optional = false python-versions = ">=3.8" files = [ - {file = "anyio-4.2.0-py3-none-any.whl", hash = "sha256:745843b39e829e108e518c489b31dc757de7d2131d53fac32bd8df268227bfee"}, - {file = "anyio-4.2.0.tar.gz", hash = "sha256:e1875bb4b4e2de1669f4bc7869b6d3f54231cdced71605e6e64c9be77e3be50f"}, + {file = "anyio-4.3.0-py3-none-any.whl", hash = "sha256:048e05d0f6caeed70d731f3db756d35dcc1f35747c8c403364a8332c630441b8"}, + {file = "anyio-4.3.0.tar.gz", hash = "sha256:f75253795a87df48568485fd18cdd2a3fa5c4f7c5be8e5e36637733fce06fed6"}, ] [package.dependencies] @@ -35,13 +35,13 @@ trio = ["trio (>=0.23)"] [[package]] name = "certifi" -version = "2023.11.17" +version = "2024.2.2" description = "Python package for providing Mozilla's CA Bundle." optional = false python-versions = ">=3.6" files = [ - {file = "certifi-2023.11.17-py3-none-any.whl", hash = "sha256:e036ab49d5b79556f99cfc2d9320b34cfbe5be05c5871b51de9329f0603b0474"}, - {file = "certifi-2023.11.17.tar.gz", hash = "sha256:9b469f3a900bf28dc19b8cfbf8019bf47f7fdd1a65a1d4ffb98fc14166beb4d1"}, + {file = "certifi-2024.2.2-py3-none-any.whl", hash = "sha256:dc383c07b76109f368f6106eee2b593b04a011ea4d55f652c6ca24a754d1cdd1"}, + {file = "certifi-2024.2.2.tar.gz", hash = "sha256:0569859f95fc761b18b45ef421b1290a0f65f147e92a1e5eb3e635f9a5e4e66f"}, ] [[package]] @@ -82,13 +82,13 @@ files = [ [[package]] name = "httpcore" -version = "1.0.2" +version = "1.0.4" description = "A minimal low-level HTTP client." optional = false python-versions = ">=3.8" files = [ - {file = "httpcore-1.0.2-py3-none-any.whl", hash = "sha256:096cc05bca73b8e459a1fc3dcf585148f63e534eae4339559c9b8a8d6399acc7"}, - {file = "httpcore-1.0.2.tar.gz", hash = "sha256:9fc092e4799b26174648e54b74ed5f683132a464e95643b226e00c2ed2fa6535"}, + {file = "httpcore-1.0.4-py3-none-any.whl", hash = "sha256:ac418c1db41bade2ad53ae2f3834a3a0f5ae76b56cf5aa497d2d033384fc7d73"}, + {file = "httpcore-1.0.4.tar.gz", hash = "sha256:cb2839ccfcba0d2d3c1131d3c3e26dfc327326fbe7a5dc0dbfe9f6c9151bb022"}, ] [package.dependencies] @@ -99,7 +99,7 @@ h11 = ">=0.13,<0.15" asyncio = ["anyio (>=4.0,<5.0)"] http2 = ["h2 (>=3,<5)"] socks = ["socksio (==1.*)"] -trio = ["trio (>=0.22.0,<0.23.0)"] +trio = ["trio (>=0.22.0,<0.25.0)"] [[package]] name = "httpx" @@ -149,38 +149,38 @@ files = [ [[package]] name = "mypy" -version = "1.7.1" +version = "1.9.0" description = "Optional static typing for Python" optional = false python-versions = ">=3.8" files = [ - {file = "mypy-1.7.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:12cce78e329838d70a204293e7b29af9faa3ab14899aec397798a4b41be7f340"}, - {file = "mypy-1.7.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:1484b8fa2c10adf4474f016e09d7a159602f3239075c7bf9f1627f5acf40ad49"}, - {file = "mypy-1.7.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:31902408f4bf54108bbfb2e35369877c01c95adc6192958684473658c322c8a5"}, - {file = "mypy-1.7.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:f2c2521a8e4d6d769e3234350ba7b65ff5d527137cdcde13ff4d99114b0c8e7d"}, - {file = "mypy-1.7.1-cp310-cp310-win_amd64.whl", hash = "sha256:fcd2572dd4519e8a6642b733cd3a8cfc1ef94bafd0c1ceed9c94fe736cb65b6a"}, - {file = "mypy-1.7.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:4b901927f16224d0d143b925ce9a4e6b3a758010673eeded9b748f250cf4e8f7"}, - {file = "mypy-1.7.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:2f7f6985d05a4e3ce8255396df363046c28bea790e40617654e91ed580ca7c51"}, - {file = "mypy-1.7.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:944bdc21ebd620eafefc090cdf83158393ec2b1391578359776c00de00e8907a"}, - {file = "mypy-1.7.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:9c7ac372232c928fff0645d85f273a726970c014749b924ce5710d7d89763a28"}, - {file = "mypy-1.7.1-cp311-cp311-win_amd64.whl", hash = "sha256:f6efc9bd72258f89a3816e3a98c09d36f079c223aa345c659622f056b760ab42"}, - {file = "mypy-1.7.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:6dbdec441c60699288adf051f51a5d512b0d818526d1dcfff5a41f8cd8b4aaf1"}, - {file = "mypy-1.7.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:4fc3d14ee80cd22367caaaf6e014494415bf440980a3045bf5045b525680ac33"}, - {file = "mypy-1.7.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2c6e4464ed5f01dc44dc9821caf67b60a4e5c3b04278286a85c067010653a0eb"}, - {file = "mypy-1.7.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:d9b338c19fa2412f76e17525c1b4f2c687a55b156320acb588df79f2e6fa9fea"}, - {file = "mypy-1.7.1-cp312-cp312-win_amd64.whl", hash = "sha256:204e0d6de5fd2317394a4eff62065614c4892d5a4d1a7ee55b765d7a3d9e3f82"}, - {file = "mypy-1.7.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:84860e06ba363d9c0eeabd45ac0fde4b903ad7aa4f93cd8b648385a888e23200"}, - {file = "mypy-1.7.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:8c5091ebd294f7628eb25ea554852a52058ac81472c921150e3a61cdd68f75a7"}, - {file = "mypy-1.7.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:40716d1f821b89838589e5b3106ebbc23636ffdef5abc31f7cd0266db936067e"}, - {file = "mypy-1.7.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:5cf3f0c5ac72139797953bd50bc6c95ac13075e62dbfcc923571180bebb662e9"}, - {file = "mypy-1.7.1-cp38-cp38-win_amd64.whl", hash = "sha256:78e25b2fd6cbb55ddfb8058417df193f0129cad5f4ee75d1502248e588d9e0d7"}, - {file = "mypy-1.7.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:75c4d2a6effd015786c87774e04331b6da863fc3fc4e8adfc3b40aa55ab516fe"}, - {file = "mypy-1.7.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:2643d145af5292ee956aa0a83c2ce1038a3bdb26e033dadeb2f7066fb0c9abce"}, - {file = "mypy-1.7.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:75aa828610b67462ffe3057d4d8a4112105ed211596b750b53cbfe182f44777a"}, - {file = "mypy-1.7.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:ee5d62d28b854eb61889cde4e1dbc10fbaa5560cb39780c3995f6737f7e82120"}, - {file = "mypy-1.7.1-cp39-cp39-win_amd64.whl", hash = "sha256:72cf32ce7dd3562373f78bd751f73c96cfb441de147cc2448a92c1a308bd0ca6"}, - {file = "mypy-1.7.1-py3-none-any.whl", hash = "sha256:f7c5d642db47376a0cc130f0de6d055056e010debdaf0707cd2b0fc7e7ef30ea"}, - {file = "mypy-1.7.1.tar.gz", hash = "sha256:fcb6d9afb1b6208b4c712af0dafdc650f518836065df0d4fb1d800f5d6773db2"}, + {file = "mypy-1.9.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:f8a67616990062232ee4c3952f41c779afac41405806042a8126fe96e098419f"}, + {file = "mypy-1.9.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:d357423fa57a489e8c47b7c85dfb96698caba13d66e086b412298a1a0ea3b0ed"}, + {file = "mypy-1.9.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:49c87c15aed320de9b438ae7b00c1ac91cd393c1b854c2ce538e2a72d55df150"}, + {file = "mypy-1.9.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:48533cdd345c3c2e5ef48ba3b0d3880b257b423e7995dada04248725c6f77374"}, + {file = "mypy-1.9.0-cp310-cp310-win_amd64.whl", hash = "sha256:4d3dbd346cfec7cb98e6cbb6e0f3c23618af826316188d587d1c1bc34f0ede03"}, + {file = "mypy-1.9.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:653265f9a2784db65bfca694d1edd23093ce49740b2244cde583aeb134c008f3"}, + {file = "mypy-1.9.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:3a3c007ff3ee90f69cf0a15cbcdf0995749569b86b6d2f327af01fd1b8aee9dc"}, + {file = "mypy-1.9.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2418488264eb41f69cc64a69a745fad4a8f86649af4b1041a4c64ee61fc61129"}, + {file = "mypy-1.9.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:68edad3dc7d70f2f17ae4c6c1b9471a56138ca22722487eebacfd1eb5321d612"}, + {file = "mypy-1.9.0-cp311-cp311-win_amd64.whl", hash = "sha256:85ca5fcc24f0b4aeedc1d02f93707bccc04733f21d41c88334c5482219b1ccb3"}, + {file = "mypy-1.9.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:aceb1db093b04db5cd390821464504111b8ec3e351eb85afd1433490163d60cd"}, + {file = "mypy-1.9.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:0235391f1c6f6ce487b23b9dbd1327b4ec33bb93934aa986efe8a9563d9349e6"}, + {file = "mypy-1.9.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d4d5ddc13421ba3e2e082a6c2d74c2ddb3979c39b582dacd53dd5d9431237185"}, + {file = "mypy-1.9.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:190da1ee69b427d7efa8aa0d5e5ccd67a4fb04038c380237a0d96829cb157913"}, + {file = "mypy-1.9.0-cp312-cp312-win_amd64.whl", hash = "sha256:fe28657de3bfec596bbeef01cb219833ad9d38dd5393fc649f4b366840baefe6"}, + {file = "mypy-1.9.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:e54396d70be04b34f31d2edf3362c1edd023246c82f1730bbf8768c28db5361b"}, + {file = "mypy-1.9.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:5e6061f44f2313b94f920e91b204ec600982961e07a17e0f6cd83371cb23f5c2"}, + {file = "mypy-1.9.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:81a10926e5473c5fc3da8abb04119a1f5811a236dc3a38d92015cb1e6ba4cb9e"}, + {file = "mypy-1.9.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:b685154e22e4e9199fc95f298661deea28aaede5ae16ccc8cbb1045e716b3e04"}, + {file = "mypy-1.9.0-cp38-cp38-win_amd64.whl", hash = "sha256:5d741d3fc7c4da608764073089e5f58ef6352bedc223ff58f2f038c2c4698a89"}, + {file = "mypy-1.9.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:587ce887f75dd9700252a3abbc9c97bbe165a4a630597845c61279cf32dfbf02"}, + {file = "mypy-1.9.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:f88566144752999351725ac623471661c9d1cd8caa0134ff98cceeea181789f4"}, + {file = "mypy-1.9.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:61758fabd58ce4b0720ae1e2fea5cfd4431591d6d590b197775329264f86311d"}, + {file = "mypy-1.9.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:e49499be624dead83927e70c756970a0bc8240e9f769389cdf5714b0784ca6bf"}, + {file = "mypy-1.9.0-cp39-cp39-win_amd64.whl", hash = "sha256:571741dc4194b4f82d344b15e8837e8c5fcc462d66d076748142327626a1b6e9"}, + {file = "mypy-1.9.0-py3-none-any.whl", hash = "sha256:a260627a570559181a9ea5de61ac6297aa5af202f06fd7ab093ce74e7181e43e"}, + {file = "mypy-1.9.0.tar.gz", hash = "sha256:3cc5da0127e6a478cddd906068496a97a7618a21ce9b54bde5bf7e539c7af974"}, ] [package.dependencies] @@ -205,270 +205,105 @@ files = [ {file = "mypy_extensions-1.0.0.tar.gz", hash = "sha256:75dbf8955dc00442a438fc4d0666508a9a97b6bd41aa2f0ffe9d2f2725af0782"}, ] -[[package]] -name = "numpy" -version = "1.26.4" -description = "Fundamental package for array computing in Python" -optional = false -python-versions = ">=3.9" -files = [ - {file = "numpy-1.26.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:9ff0f4f29c51e2803569d7a51c2304de5554655a60c5d776e35b4a41413830d0"}, - {file = "numpy-1.26.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:2e4ee3380d6de9c9ec04745830fd9e2eccb3e6cf790d39d7b98ffd19b0dd754a"}, - {file = "numpy-1.26.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d209d8969599b27ad20994c8e41936ee0964e6da07478d6c35016bc386b66ad4"}, - {file = "numpy-1.26.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ffa75af20b44f8dba823498024771d5ac50620e6915abac414251bd971b4529f"}, - {file = "numpy-1.26.4-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:62b8e4b1e28009ef2846b4c7852046736bab361f7aeadeb6a5b89ebec3c7055a"}, - {file = "numpy-1.26.4-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:a4abb4f9001ad2858e7ac189089c42178fcce737e4169dc61321660f1a96c7d2"}, - {file = "numpy-1.26.4-cp310-cp310-win32.whl", hash = "sha256:bfe25acf8b437eb2a8b2d49d443800a5f18508cd811fea3181723922a8a82b07"}, - {file = "numpy-1.26.4-cp310-cp310-win_amd64.whl", hash = "sha256:b97fe8060236edf3662adfc2c633f56a08ae30560c56310562cb4f95500022d5"}, - {file = "numpy-1.26.4-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:4c66707fabe114439db9068ee468c26bbdf909cac0fb58686a42a24de1760c71"}, - {file = "numpy-1.26.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:edd8b5fe47dab091176d21bb6de568acdd906d1887a4584a15a9a96a1dca06ef"}, - {file = "numpy-1.26.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7ab55401287bfec946ced39700c053796e7cc0e3acbef09993a9ad2adba6ca6e"}, - {file = "numpy-1.26.4-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:666dbfb6ec68962c033a450943ded891bed2d54e6755e35e5835d63f4f6931d5"}, - {file = "numpy-1.26.4-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:96ff0b2ad353d8f990b63294c8986f1ec3cb19d749234014f4e7eb0112ceba5a"}, - {file = "numpy-1.26.4-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:60dedbb91afcbfdc9bc0b1f3f402804070deed7392c23eb7a7f07fa857868e8a"}, - {file = "numpy-1.26.4-cp311-cp311-win32.whl", hash = "sha256:1af303d6b2210eb850fcf03064d364652b7120803a0b872f5211f5234b399f20"}, - {file = "numpy-1.26.4-cp311-cp311-win_amd64.whl", hash = "sha256:cd25bcecc4974d09257ffcd1f098ee778f7834c3ad767fe5db785be9a4aa9cb2"}, - {file = "numpy-1.26.4-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:b3ce300f3644fb06443ee2222c2201dd3a89ea6040541412b8fa189341847218"}, - {file = "numpy-1.26.4-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:03a8c78d01d9781b28a6989f6fa1bb2c4f2d51201cf99d3dd875df6fbd96b23b"}, - {file = "numpy-1.26.4-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9fad7dcb1aac3c7f0584a5a8133e3a43eeb2fe127f47e3632d43d677c66c102b"}, - {file = "numpy-1.26.4-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:675d61ffbfa78604709862923189bad94014bef562cc35cf61d3a07bba02a7ed"}, - {file = "numpy-1.26.4-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:ab47dbe5cc8210f55aa58e4805fe224dac469cde56b9f731a4c098b91917159a"}, - {file = "numpy-1.26.4-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:1dda2e7b4ec9dd512f84935c5f126c8bd8b9f2fc001e9f54af255e8c5f16b0e0"}, - {file = "numpy-1.26.4-cp312-cp312-win32.whl", hash = "sha256:50193e430acfc1346175fcbdaa28ffec49947a06918b7b92130744e81e640110"}, - {file = "numpy-1.26.4-cp312-cp312-win_amd64.whl", hash = "sha256:08beddf13648eb95f8d867350f6a018a4be2e5ad54c8d8caed89ebca558b2818"}, - {file = "numpy-1.26.4-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:7349ab0fa0c429c82442a27a9673fc802ffdb7c7775fad780226cb234965e53c"}, - {file = "numpy-1.26.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:52b8b60467cd7dd1e9ed082188b4e6bb35aa5cdd01777621a1658910745b90be"}, - {file = "numpy-1.26.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d5241e0a80d808d70546c697135da2c613f30e28251ff8307eb72ba696945764"}, - {file = "numpy-1.26.4-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f870204a840a60da0b12273ef34f7051e98c3b5961b61b0c2c1be6dfd64fbcd3"}, - {file = "numpy-1.26.4-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:679b0076f67ecc0138fd2ede3a8fd196dddc2ad3254069bcb9faf9a79b1cebcd"}, - {file = "numpy-1.26.4-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:47711010ad8555514b434df65f7d7b076bb8261df1ca9bb78f53d3b2db02e95c"}, - {file = "numpy-1.26.4-cp39-cp39-win32.whl", hash = "sha256:a354325ee03388678242a4d7ebcd08b5c727033fcff3b2f536aea978e15ee9e6"}, - {file = "numpy-1.26.4-cp39-cp39-win_amd64.whl", hash = "sha256:3373d5d70a5fe74a2c1bb6d2cfd9609ecf686d47a2d7b1d37a8f3b6bf6003aea"}, - {file = "numpy-1.26.4-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:afedb719a9dcfc7eaf2287b839d8198e06dcd4cb5d276a3df279231138e83d30"}, - {file = "numpy-1.26.4-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:95a7476c59002f2f6c590b9b7b998306fba6a5aa646b1e22ddfeaf8f78c3a29c"}, - {file = "numpy-1.26.4-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:7e50d0a0cc3189f9cb0aeb3a6a6af18c16f59f004b866cd2be1c14b36134a4a0"}, - {file = "numpy-1.26.4.tar.gz", hash = "sha256:2a02aba9ed12e4ac4eb3ea9421c420301a0c6460d9830d74a9df87efa4912010"}, -] - [[package]] name = "orjson" -version = "3.9.10" +version = "3.9.15" description = "Fast, correct Python JSON library supporting dataclasses, datetimes, and numpy" optional = false python-versions = ">=3.8" files = [ - {file = "orjson-3.9.10-cp310-cp310-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:c18a4da2f50050a03d1da5317388ef84a16013302a5281d6f64e4a3f406aabc4"}, - {file = "orjson-3.9.10-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5148bab4d71f58948c7c39d12b14a9005b6ab35a0bdf317a8ade9a9e4d9d0bd5"}, - {file = "orjson-3.9.10-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:4cf7837c3b11a2dfb589f8530b3cff2bd0307ace4c301e8997e95c7468c1378e"}, - {file = "orjson-3.9.10-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c62b6fa2961a1dcc51ebe88771be5319a93fd89bd247c9ddf732bc250507bc2b"}, - {file = "orjson-3.9.10-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:deeb3922a7a804755bbe6b5be9b312e746137a03600f488290318936c1a2d4dc"}, - {file = "orjson-3.9.10-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1234dc92d011d3554d929b6cf058ac4a24d188d97be5e04355f1b9223e98bbe9"}, - {file = "orjson-3.9.10-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:06ad5543217e0e46fd7ab7ea45d506c76f878b87b1b4e369006bdb01acc05a83"}, - {file = "orjson-3.9.10-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:4fd72fab7bddce46c6826994ce1e7de145ae1e9e106ebb8eb9ce1393ca01444d"}, - {file = "orjson-3.9.10-cp310-none-win32.whl", hash = "sha256:b5b7d4a44cc0e6ff98da5d56cde794385bdd212a86563ac321ca64d7f80c80d1"}, - {file = "orjson-3.9.10-cp310-none-win_amd64.whl", hash = "sha256:61804231099214e2f84998316f3238c4c2c4aaec302df12b21a64d72e2a135c7"}, - {file = "orjson-3.9.10-cp311-cp311-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:cff7570d492bcf4b64cc862a6e2fb77edd5e5748ad715f487628f102815165e9"}, - {file = "orjson-3.9.10-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ed8bc367f725dfc5cabeed1ae079d00369900231fbb5a5280cf0736c30e2adf7"}, - {file = "orjson-3.9.10-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:c812312847867b6335cfb264772f2a7e85b3b502d3a6b0586aa35e1858528ab1"}, - {file = "orjson-3.9.10-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9edd2856611e5050004f4722922b7b1cd6268da34102667bd49d2a2b18bafb81"}, - {file = "orjson-3.9.10-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:674eb520f02422546c40401f4efaf8207b5e29e420c17051cddf6c02783ff5ca"}, - {file = "orjson-3.9.10-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1d0dc4310da8b5f6415949bd5ef937e60aeb0eb6b16f95041b5e43e6200821fb"}, - {file = "orjson-3.9.10-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:e99c625b8c95d7741fe057585176b1b8783d46ed4b8932cf98ee145c4facf499"}, - {file = "orjson-3.9.10-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:ec6f18f96b47299c11203edfbdc34e1b69085070d9a3d1f302810cc23ad36bf3"}, - {file = "orjson-3.9.10-cp311-none-win32.whl", hash = "sha256:ce0a29c28dfb8eccd0f16219360530bc3cfdf6bf70ca384dacd36e6c650ef8e8"}, - {file = "orjson-3.9.10-cp311-none-win_amd64.whl", hash = "sha256:cf80b550092cc480a0cbd0750e8189247ff45457e5a023305f7ef1bcec811616"}, - {file = "orjson-3.9.10-cp312-cp312-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:602a8001bdf60e1a7d544be29c82560a7b49319a0b31d62586548835bbe2c862"}, - {file = "orjson-3.9.10-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f295efcd47b6124b01255d1491f9e46f17ef40d3d7eabf7364099e463fb45f0f"}, - {file = "orjson-3.9.10-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:92af0d00091e744587221e79f68d617b432425a7e59328ca4c496f774a356071"}, - {file = "orjson-3.9.10-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c5a02360e73e7208a872bf65a7554c9f15df5fe063dc047f79738998b0506a14"}, - {file = "orjson-3.9.10-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:858379cbb08d84fe7583231077d9a36a1a20eb72f8c9076a45df8b083724ad1d"}, - {file = "orjson-3.9.10-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:666c6fdcaac1f13eb982b649e1c311c08d7097cbda24f32612dae43648d8db8d"}, - {file = "orjson-3.9.10-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:3fb205ab52a2e30354640780ce4587157a9563a68c9beaf52153e1cea9aa0921"}, - {file = "orjson-3.9.10-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:7ec960b1b942ee3c69323b8721df2a3ce28ff40e7ca47873ae35bfafeb4555ca"}, - {file = "orjson-3.9.10-cp312-none-win_amd64.whl", hash = "sha256:3e892621434392199efb54e69edfff9f699f6cc36dd9553c5bf796058b14b20d"}, - {file = "orjson-3.9.10-cp38-cp38-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:8b9ba0ccd5a7f4219e67fbbe25e6b4a46ceef783c42af7dbc1da548eb28b6531"}, - {file = "orjson-3.9.10-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2e2ecd1d349e62e3960695214f40939bbfdcaeaaa62ccc638f8e651cf0970e5f"}, - {file = "orjson-3.9.10-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:7f433be3b3f4c66016d5a20e5b4444ef833a1f802ced13a2d852c637f69729c1"}, - {file = "orjson-3.9.10-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4689270c35d4bb3102e103ac43c3f0b76b169760aff8bcf2d401a3e0e58cdb7f"}, - {file = "orjson-3.9.10-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4bd176f528a8151a6efc5359b853ba3cc0e82d4cd1fab9c1300c5d957dc8f48c"}, - {file = "orjson-3.9.10-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3a2ce5ea4f71681623f04e2b7dadede3c7435dfb5e5e2d1d0ec25b35530e277b"}, - {file = "orjson-3.9.10-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:49f8ad582da6e8d2cf663c4ba5bf9f83cc052570a3a767487fec6af839b0e777"}, - {file = "orjson-3.9.10-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:2a11b4b1a8415f105d989876a19b173f6cdc89ca13855ccc67c18efbd7cbd1f8"}, - {file = "orjson-3.9.10-cp38-none-win32.whl", hash = "sha256:a353bf1f565ed27ba71a419b2cd3db9d6151da426b61b289b6ba1422a702e643"}, - {file = "orjson-3.9.10-cp38-none-win_amd64.whl", hash = "sha256:e28a50b5be854e18d54f75ef1bb13e1abf4bc650ab9d635e4258c58e71eb6ad5"}, - {file = "orjson-3.9.10-cp39-cp39-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:ee5926746232f627a3be1cc175b2cfad24d0170d520361f4ce3fa2fd83f09e1d"}, - {file = "orjson-3.9.10-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0a73160e823151f33cdc05fe2cea557c5ef12fdf276ce29bb4f1c571c8368a60"}, - {file = "orjson-3.9.10-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:c338ed69ad0b8f8f8920c13f529889fe0771abbb46550013e3c3d01e5174deef"}, - {file = "orjson-3.9.10-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5869e8e130e99687d9e4be835116c4ebd83ca92e52e55810962446d841aba8de"}, - {file = "orjson-3.9.10-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d2c1e559d96a7f94a4f581e2a32d6d610df5840881a8cba8f25e446f4d792df3"}, - {file = "orjson-3.9.10-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:81a3a3a72c9811b56adf8bcc829b010163bb2fc308877e50e9910c9357e78521"}, - {file = "orjson-3.9.10-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:7f8fb7f5ecf4f6355683ac6881fd64b5bb2b8a60e3ccde6ff799e48791d8f864"}, - {file = "orjson-3.9.10-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:c943b35ecdf7123b2d81d225397efddf0bce2e81db2f3ae633ead38e85cd5ade"}, - {file = "orjson-3.9.10-cp39-none-win32.whl", hash = "sha256:fb0b361d73f6b8eeceba47cd37070b5e6c9de5beaeaa63a1cb35c7e1a73ef088"}, - {file = "orjson-3.9.10-cp39-none-win_amd64.whl", hash = "sha256:b90f340cb6397ec7a854157fac03f0c82b744abdd1c0941a024c3c29d1340aff"}, - {file = "orjson-3.9.10.tar.gz", hash = "sha256:9ebbdbd6a046c304b1845e96fbcc5559cd296b4dfd3ad2509e33c4d9ce07d6a1"}, + {file = "orjson-3.9.15-cp310-cp310-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:d61f7ce4727a9fa7680cd6f3986b0e2c732639f46a5e0156e550e35258aa313a"}, + {file = "orjson-3.9.15-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4feeb41882e8aa17634b589533baafdceb387e01e117b1ec65534ec724023d04"}, + {file = "orjson-3.9.15-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:fbbeb3c9b2edb5fd044b2a070f127a0ac456ffd079cb82746fc84af01ef021a4"}, + {file = "orjson-3.9.15-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b66bcc5670e8a6b78f0313bcb74774c8291f6f8aeef10fe70e910b8040f3ab75"}, + {file = "orjson-3.9.15-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2973474811db7b35c30248d1129c64fd2bdf40d57d84beed2a9a379a6f57d0ab"}, + {file = "orjson-3.9.15-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9fe41b6f72f52d3da4db524c8653e46243c8c92df826ab5ffaece2dba9cccd58"}, + {file = "orjson-3.9.15-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:4228aace81781cc9d05a3ec3a6d2673a1ad0d8725b4e915f1089803e9efd2b99"}, + {file = "orjson-3.9.15-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:6f7b65bfaf69493c73423ce9db66cfe9138b2f9ef62897486417a8fcb0a92bfe"}, + {file = "orjson-3.9.15-cp310-none-win32.whl", hash = "sha256:2d99e3c4c13a7b0fb3792cc04c2829c9db07838fb6973e578b85c1745e7d0ce7"}, + {file = "orjson-3.9.15-cp310-none-win_amd64.whl", hash = "sha256:b725da33e6e58e4a5d27958568484aa766e825e93aa20c26c91168be58e08cbb"}, + {file = "orjson-3.9.15-cp311-cp311-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:c8e8fe01e435005d4421f183038fc70ca85d2c1e490f51fb972db92af6e047c2"}, + {file = "orjson-3.9.15-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:87f1097acb569dde17f246faa268759a71a2cb8c96dd392cd25c668b104cad2f"}, + {file = "orjson-3.9.15-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ff0f9913d82e1d1fadbd976424c316fbc4d9c525c81d047bbdd16bd27dd98cfc"}, + {file = "orjson-3.9.15-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8055ec598605b0077e29652ccfe9372247474375e0e3f5775c91d9434e12d6b1"}, + {file = "orjson-3.9.15-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d6768a327ea1ba44c9114dba5fdda4a214bdb70129065cd0807eb5f010bfcbb5"}, + {file = "orjson-3.9.15-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:12365576039b1a5a47df01aadb353b68223da413e2e7f98c02403061aad34bde"}, + {file = "orjson-3.9.15-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:71c6b009d431b3839d7c14c3af86788b3cfac41e969e3e1c22f8a6ea13139404"}, + {file = "orjson-3.9.15-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:e18668f1bd39e69b7fed19fa7cd1cd110a121ec25439328b5c89934e6d30d357"}, + {file = "orjson-3.9.15-cp311-none-win32.whl", hash = "sha256:62482873e0289cf7313461009bf62ac8b2e54bc6f00c6fabcde785709231a5d7"}, + {file = "orjson-3.9.15-cp311-none-win_amd64.whl", hash = "sha256:b3d336ed75d17c7b1af233a6561cf421dee41d9204aa3cfcc6c9c65cd5bb69a8"}, + {file = "orjson-3.9.15-cp312-cp312-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:82425dd5c7bd3adfe4e94c78e27e2fa02971750c2b7ffba648b0f5d5cc016a73"}, + {file = "orjson-3.9.15-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2c51378d4a8255b2e7c1e5cc430644f0939539deddfa77f6fac7b56a9784160a"}, + {file = "orjson-3.9.15-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:6ae4e06be04dc00618247c4ae3f7c3e561d5bc19ab6941427f6d3722a0875ef7"}, + {file = "orjson-3.9.15-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:bcef128f970bb63ecf9a65f7beafd9b55e3aaf0efc271a4154050fc15cdb386e"}, + {file = "orjson-3.9.15-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b72758f3ffc36ca566ba98a8e7f4f373b6c17c646ff8ad9b21ad10c29186f00d"}, + {file = "orjson-3.9.15-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:10c57bc7b946cf2efa67ac55766e41764b66d40cbd9489041e637c1304400494"}, + {file = "orjson-3.9.15-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:946c3a1ef25338e78107fba746f299f926db408d34553b4754e90a7de1d44068"}, + {file = "orjson-3.9.15-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:2f256d03957075fcb5923410058982aea85455d035607486ccb847f095442bda"}, + {file = "orjson-3.9.15-cp312-none-win_amd64.whl", hash = "sha256:5bb399e1b49db120653a31463b4a7b27cf2fbfe60469546baf681d1b39f4edf2"}, + {file = "orjson-3.9.15-cp38-cp38-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:b17f0f14a9c0ba55ff6279a922d1932e24b13fc218a3e968ecdbf791b3682b25"}, + {file = "orjson-3.9.15-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7f6cbd8e6e446fb7e4ed5bac4661a29e43f38aeecbf60c4b900b825a353276a1"}, + {file = "orjson-3.9.15-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:76bc6356d07c1d9f4b782813094d0caf1703b729d876ab6a676f3aaa9a47e37c"}, + {file = "orjson-3.9.15-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:fdfa97090e2d6f73dced247a2f2d8004ac6449df6568f30e7fa1a045767c69a6"}, + {file = "orjson-3.9.15-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7413070a3e927e4207d00bd65f42d1b780fb0d32d7b1d951f6dc6ade318e1b5a"}, + {file = "orjson-3.9.15-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9cf1596680ac1f01839dba32d496136bdd5d8ffb858c280fa82bbfeb173bdd40"}, + {file = "orjson-3.9.15-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:809d653c155e2cc4fd39ad69c08fdff7f4016c355ae4b88905219d3579e31eb7"}, + {file = "orjson-3.9.15-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:920fa5a0c5175ab14b9c78f6f820b75804fb4984423ee4c4f1e6d748f8b22bc1"}, + {file = "orjson-3.9.15-cp38-none-win32.whl", hash = "sha256:2b5c0f532905e60cf22a511120e3719b85d9c25d0e1c2a8abb20c4dede3b05a5"}, + {file = "orjson-3.9.15-cp38-none-win_amd64.whl", hash = "sha256:67384f588f7f8daf040114337d34a5188346e3fae6c38b6a19a2fe8c663a2f9b"}, + {file = "orjson-3.9.15-cp39-cp39-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:6fc2fe4647927070df3d93f561d7e588a38865ea0040027662e3e541d592811e"}, + {file = "orjson-3.9.15-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:34cbcd216e7af5270f2ffa63a963346845eb71e174ea530867b7443892d77180"}, + {file = "orjson-3.9.15-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:f541587f5c558abd93cb0de491ce99a9ef8d1ae29dd6ab4dbb5a13281ae04cbd"}, + {file = "orjson-3.9.15-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:92255879280ef9c3c0bcb327c5a1b8ed694c290d61a6a532458264f887f052cb"}, + {file = "orjson-3.9.15-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:05a1f57fb601c426635fcae9ddbe90dfc1ed42245eb4c75e4960440cac667262"}, + {file = "orjson-3.9.15-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ede0bde16cc6e9b96633df1631fbcd66491d1063667f260a4f2386a098393790"}, + {file = "orjson-3.9.15-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:e88b97ef13910e5f87bcbc4dd7979a7de9ba8702b54d3204ac587e83639c0c2b"}, + {file = "orjson-3.9.15-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:57d5d8cf9c27f7ef6bc56a5925c7fbc76b61288ab674eb352c26ac780caa5b10"}, + {file = "orjson-3.9.15-cp39-none-win32.whl", hash = "sha256:001f4eb0ecd8e9ebd295722d0cbedf0748680fb9998d3993abaed2f40587257a"}, + {file = "orjson-3.9.15-cp39-none-win_amd64.whl", hash = "sha256:ea0b183a5fe6b2b45f3b854b0d19c4e932d6f5934ae1f723b07cf9560edd4ec7"}, + {file = "orjson-3.9.15.tar.gz", hash = "sha256:95cae920959d772f30ab36d3b25f83bb0f3be671e986c72ce22f8fa700dae061"}, ] [[package]] name = "packaging" -version = "23.2" +version = "24.0" description = "Core utilities for Python packages" optional = false python-versions = ">=3.7" files = [ - {file = "packaging-23.2-py3-none-any.whl", hash = "sha256:8c491190033a9af7e1d931d0b5dacc2ef47509b34dd0de67ed209b5203fc88c7"}, - {file = "packaging-23.2.tar.gz", hash = "sha256:048fb0e9405036518eaaf48a55953c750c11e1a1b68e0dd1a9d62ed0c092cfc5"}, -] - -[[package]] -name = "pandas" -version = "2.2.0" -description = "Powerful data structures for data analysis, time series, and statistics" -optional = false -python-versions = ">=3.9" -files = [ - {file = "pandas-2.2.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:8108ee1712bb4fa2c16981fba7e68b3f6ea330277f5ca34fa8d557e986a11670"}, - {file = "pandas-2.2.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:736da9ad4033aeab51d067fc3bd69a0ba36f5a60f66a527b3d72e2030e63280a"}, - {file = "pandas-2.2.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:38e0b4fc3ddceb56ec8a287313bc22abe17ab0eb184069f08fc6a9352a769b18"}, - {file = "pandas-2.2.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:20404d2adefe92aed3b38da41d0847a143a09be982a31b85bc7dd565bdba0f4e"}, - {file = "pandas-2.2.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:7ea3ee3f125032bfcade3a4cf85131ed064b4f8dd23e5ce6fa16473e48ebcaf5"}, - {file = "pandas-2.2.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:f9670b3ac00a387620489dfc1bca66db47a787f4e55911f1293063a78b108df1"}, - {file = "pandas-2.2.0-cp310-cp310-win_amd64.whl", hash = "sha256:5a946f210383c7e6d16312d30b238fd508d80d927014f3b33fb5b15c2f895430"}, - {file = "pandas-2.2.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:a1b438fa26b208005c997e78672f1aa8138f67002e833312e6230f3e57fa87d5"}, - {file = "pandas-2.2.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:8ce2fbc8d9bf303ce54a476116165220a1fedf15985b09656b4b4275300e920b"}, - {file = "pandas-2.2.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2707514a7bec41a4ab81f2ccce8b382961a29fbe9492eab1305bb075b2b1ff4f"}, - {file = "pandas-2.2.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:85793cbdc2d5bc32620dc8ffa715423f0c680dacacf55056ba13454a5be5de88"}, - {file = "pandas-2.2.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:cfd6c2491dc821b10c716ad6776e7ab311f7df5d16038d0b7458bc0b67dc10f3"}, - {file = "pandas-2.2.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:a146b9dcacc3123aa2b399df1a284de5f46287a4ab4fbfc237eac98a92ebcb71"}, - {file = "pandas-2.2.0-cp311-cp311-win_amd64.whl", hash = "sha256:fbc1b53c0e1fdf16388c33c3cca160f798d38aea2978004dd3f4d3dec56454c9"}, - {file = "pandas-2.2.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:a41d06f308a024981dcaa6c41f2f2be46a6b186b902c94c2674e8cb5c42985bc"}, - {file = "pandas-2.2.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:159205c99d7a5ce89ecfc37cb08ed179de7783737cea403b295b5eda8e9c56d1"}, - {file = "pandas-2.2.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:eb1e1f3861ea9132b32f2133788f3b14911b68102d562715d71bd0013bc45440"}, - {file = "pandas-2.2.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:761cb99b42a69005dec2b08854fb1d4888fdf7b05db23a8c5a099e4b886a2106"}, - {file = "pandas-2.2.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:a20628faaf444da122b2a64b1e5360cde100ee6283ae8effa0d8745153809a2e"}, - {file = "pandas-2.2.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:f5be5d03ea2073627e7111f61b9f1f0d9625dc3c4d8dda72cc827b0c58a1d042"}, - {file = "pandas-2.2.0-cp312-cp312-win_amd64.whl", hash = "sha256:a626795722d893ed6aacb64d2401d017ddc8a2341b49e0384ab9bf7112bdec30"}, - {file = "pandas-2.2.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:9f66419d4a41132eb7e9a73dcec9486cf5019f52d90dd35547af11bc58f8637d"}, - {file = "pandas-2.2.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:57abcaeda83fb80d447f28ab0cc7b32b13978f6f733875ebd1ed14f8fbc0f4ab"}, - {file = "pandas-2.2.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e60f1f7dba3c2d5ca159e18c46a34e7ca7247a73b5dd1a22b6d59707ed6b899a"}, - {file = "pandas-2.2.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:eb61dc8567b798b969bcc1fc964788f5a68214d333cade8319c7ab33e2b5d88a"}, - {file = "pandas-2.2.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:52826b5f4ed658fa2b729264d63f6732b8b29949c7fd234510d57c61dbeadfcd"}, - {file = "pandas-2.2.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:bde2bc699dbd80d7bc7f9cab1e23a95c4375de615860ca089f34e7c64f4a8de7"}, - {file = "pandas-2.2.0-cp39-cp39-win_amd64.whl", hash = "sha256:3de918a754bbf2da2381e8a3dcc45eede8cd7775b047b923f9006d5f876802ae"}, - {file = "pandas-2.2.0.tar.gz", hash = "sha256:30b83f7c3eb217fb4d1b494a57a2fda5444f17834f5df2de6b2ffff68dc3c8e2"}, -] - -[package.dependencies] -numpy = [ - {version = ">=1.22.4,<2", markers = "python_version < \"3.11\""}, - {version = ">=1.23.2,<2", markers = "python_version == \"3.11\""}, - {version = ">=1.26.0,<2", markers = "python_version >= \"3.12\""}, + {file = "packaging-24.0-py3-none-any.whl", hash = "sha256:2ddfb553fdf02fb784c234c7ba6ccc288296ceabec964ad2eae3777778130bc5"}, + {file = "packaging-24.0.tar.gz", hash = "sha256:eb82c5e3e56209074766e6885bb04b8c38a0c015d0a30036ebe7ece34c9989e9"}, ] -python-dateutil = ">=2.8.2" -pytz = ">=2020.1" -tzdata = ">=2022.7" - -[package.extras] -all = ["PyQt5 (>=5.15.9)", "SQLAlchemy (>=2.0.0)", "adbc-driver-postgresql (>=0.8.0)", "adbc-driver-sqlite (>=0.8.0)", "beautifulsoup4 (>=4.11.2)", "bottleneck (>=1.3.6)", "dataframe-api-compat (>=0.1.7)", "fastparquet (>=2022.12.0)", "fsspec (>=2022.11.0)", "gcsfs (>=2022.11.0)", "html5lib (>=1.1)", "hypothesis (>=6.46.1)", "jinja2 (>=3.1.2)", "lxml (>=4.9.2)", "matplotlib (>=3.6.3)", "numba (>=0.56.4)", "numexpr (>=2.8.4)", "odfpy (>=1.4.1)", "openpyxl (>=3.1.0)", "pandas-gbq (>=0.19.0)", "psycopg2 (>=2.9.6)", "pyarrow (>=10.0.1)", "pymysql (>=1.0.2)", "pyreadstat (>=1.2.0)", "pytest (>=7.3.2)", "pytest-xdist (>=2.2.0)", "python-calamine (>=0.1.7)", "pyxlsb (>=1.0.10)", "qtpy (>=2.3.0)", "s3fs (>=2022.11.0)", "scipy (>=1.10.0)", "tables (>=3.8.0)", "tabulate (>=0.9.0)", "xarray (>=2022.12.0)", "xlrd (>=2.0.1)", "xlsxwriter (>=3.0.5)", "zstandard (>=0.19.0)"] -aws = ["s3fs (>=2022.11.0)"] -clipboard = ["PyQt5 (>=5.15.9)", "qtpy (>=2.3.0)"] -compression = ["zstandard (>=0.19.0)"] -computation = ["scipy (>=1.10.0)", "xarray (>=2022.12.0)"] -consortium-standard = ["dataframe-api-compat (>=0.1.7)"] -excel = ["odfpy (>=1.4.1)", "openpyxl (>=3.1.0)", "python-calamine (>=0.1.7)", "pyxlsb (>=1.0.10)", "xlrd (>=2.0.1)", "xlsxwriter (>=3.0.5)"] -feather = ["pyarrow (>=10.0.1)"] -fss = ["fsspec (>=2022.11.0)"] -gcp = ["gcsfs (>=2022.11.0)", "pandas-gbq (>=0.19.0)"] -hdf5 = ["tables (>=3.8.0)"] -html = ["beautifulsoup4 (>=4.11.2)", "html5lib (>=1.1)", "lxml (>=4.9.2)"] -mysql = ["SQLAlchemy (>=2.0.0)", "pymysql (>=1.0.2)"] -output-formatting = ["jinja2 (>=3.1.2)", "tabulate (>=0.9.0)"] -parquet = ["pyarrow (>=10.0.1)"] -performance = ["bottleneck (>=1.3.6)", "numba (>=0.56.4)", "numexpr (>=2.8.4)"] -plot = ["matplotlib (>=3.6.3)"] -postgresql = ["SQLAlchemy (>=2.0.0)", "adbc-driver-postgresql (>=0.8.0)", "psycopg2 (>=2.9.6)"] -spss = ["pyreadstat (>=1.2.0)"] -sql-other = ["SQLAlchemy (>=2.0.0)", "adbc-driver-postgresql (>=0.8.0)", "adbc-driver-sqlite (>=0.8.0)"] -test = ["hypothesis (>=6.46.1)", "pytest (>=7.3.2)", "pytest-xdist (>=2.2.0)"] -xml = ["lxml (>=4.9.2)"] [[package]] name = "pluggy" -version = "1.3.0" +version = "1.4.0" description = "plugin and hook calling mechanisms for python" optional = false python-versions = ">=3.8" files = [ - {file = "pluggy-1.3.0-py3-none-any.whl", hash = "sha256:d89c696a773f8bd377d18e5ecda92b7a3793cbe66c87060a6fb58c7b6e1061f7"}, - {file = "pluggy-1.3.0.tar.gz", hash = "sha256:cf61ae8f126ac6f7c451172cf30e3e43d3ca77615509771b3a984a0730651e12"}, + {file = "pluggy-1.4.0-py3-none-any.whl", hash = "sha256:7db9f7b503d67d1c5b95f59773ebb58a8c1c288129a88665838012cfb07b8981"}, + {file = "pluggy-1.4.0.tar.gz", hash = "sha256:8c85c2876142a764e5b7548e7d9a0e0ddb46f5185161049a79b7e974454223be"}, ] [package.extras] dev = ["pre-commit", "tox"] testing = ["pytest", "pytest-benchmark"] -[[package]] -name = "pyarrow" -version = "15.0.0" -description = "Python library for Apache Arrow" -optional = false -python-versions = ">=3.8" -files = [ - {file = "pyarrow-15.0.0-cp310-cp310-macosx_10_15_x86_64.whl", hash = "sha256:0a524532fd6dd482edaa563b686d754c70417c2f72742a8c990b322d4c03a15d"}, - {file = "pyarrow-15.0.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:60a6bdb314affa9c2e0d5dddf3d9cbb9ef4a8dddaa68669975287d47ece67642"}, - {file = "pyarrow-15.0.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:66958fd1771a4d4b754cd385835e66a3ef6b12611e001d4e5edfcef5f30391e2"}, - {file = "pyarrow-15.0.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1f500956a49aadd907eaa21d4fff75f73954605eaa41f61cb94fb008cf2e00c6"}, - {file = "pyarrow-15.0.0-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:6f87d9c4f09e049c2cade559643424da84c43a35068f2a1c4653dc5b1408a929"}, - {file = "pyarrow-15.0.0-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:85239b9f93278e130d86c0e6bb455dcb66fc3fd891398b9d45ace8799a871a1e"}, - {file = "pyarrow-15.0.0-cp310-cp310-win_amd64.whl", hash = "sha256:5b8d43e31ca16aa6e12402fcb1e14352d0d809de70edd185c7650fe80e0769e3"}, - {file = "pyarrow-15.0.0-cp311-cp311-macosx_10_15_x86_64.whl", hash = "sha256:fa7cd198280dbd0c988df525e50e35b5d16873e2cdae2aaaa6363cdb64e3eec5"}, - {file = "pyarrow-15.0.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:8780b1a29d3c8b21ba6b191305a2a607de2e30dab399776ff0aa09131e266340"}, - {file = "pyarrow-15.0.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fe0ec198ccc680f6c92723fadcb97b74f07c45ff3fdec9dd765deb04955ccf19"}, - {file = "pyarrow-15.0.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:036a7209c235588c2f07477fe75c07e6caced9b7b61bb897c8d4e52c4b5f9555"}, - {file = "pyarrow-15.0.0-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:2bd8a0e5296797faf9a3294e9fa2dc67aa7f10ae2207920dbebb785c77e9dbe5"}, - {file = "pyarrow-15.0.0-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:e8ebed6053dbe76883a822d4e8da36860f479d55a762bd9e70d8494aed87113e"}, - {file = "pyarrow-15.0.0-cp311-cp311-win_amd64.whl", hash = "sha256:17d53a9d1b2b5bd7d5e4cd84d018e2a45bc9baaa68f7e6e3ebed45649900ba99"}, - {file = "pyarrow-15.0.0-cp312-cp312-macosx_10_15_x86_64.whl", hash = "sha256:9950a9c9df24090d3d558b43b97753b8f5867fb8e521f29876aa021c52fda351"}, - {file = "pyarrow-15.0.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:003d680b5e422d0204e7287bb3fa775b332b3fce2996aa69e9adea23f5c8f970"}, - {file = "pyarrow-15.0.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f75fce89dad10c95f4bf590b765e3ae98bcc5ba9f6ce75adb828a334e26a3d40"}, - {file = "pyarrow-15.0.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0ca9cb0039923bec49b4fe23803807e4ef39576a2bec59c32b11296464623dc2"}, - {file = "pyarrow-15.0.0-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:9ed5a78ed29d171d0acc26a305a4b7f83c122d54ff5270810ac23c75813585e4"}, - {file = "pyarrow-15.0.0-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:6eda9e117f0402dfcd3cd6ec9bfee89ac5071c48fc83a84f3075b60efa96747f"}, - {file = "pyarrow-15.0.0-cp312-cp312-win_amd64.whl", hash = "sha256:9a3a6180c0e8f2727e6f1b1c87c72d3254cac909e609f35f22532e4115461177"}, - {file = "pyarrow-15.0.0-cp38-cp38-macosx_10_15_x86_64.whl", hash = "sha256:19a8918045993349b207de72d4576af0191beef03ea655d8bdb13762f0cd6eac"}, - {file = "pyarrow-15.0.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:d0ec076b32bacb6666e8813a22e6e5a7ef1314c8069d4ff345efa6246bc38593"}, - {file = "pyarrow-15.0.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5db1769e5d0a77eb92344c7382d6543bea1164cca3704f84aa44e26c67e320fb"}, - {file = "pyarrow-15.0.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e2617e3bf9df2a00020dd1c1c6dce5cc343d979efe10bc401c0632b0eef6ef5b"}, - {file = "pyarrow-15.0.0-cp38-cp38-manylinux_2_28_aarch64.whl", hash = "sha256:d31c1d45060180131caf10f0f698e3a782db333a422038bf7fe01dace18b3a31"}, - {file = "pyarrow-15.0.0-cp38-cp38-manylinux_2_28_x86_64.whl", hash = "sha256:c8c287d1d479de8269398b34282e206844abb3208224dbdd7166d580804674b7"}, - {file = "pyarrow-15.0.0-cp38-cp38-win_amd64.whl", hash = "sha256:07eb7f07dc9ecbb8dace0f58f009d3a29ee58682fcdc91337dfeb51ea618a75b"}, - {file = "pyarrow-15.0.0-cp39-cp39-macosx_10_15_x86_64.whl", hash = "sha256:47af7036f64fce990bb8a5948c04722e4e3ea3e13b1007ef52dfe0aa8f23cf7f"}, - {file = "pyarrow-15.0.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:93768ccfff85cf044c418bfeeafce9a8bb0cee091bd8fd19011aff91e58de540"}, - {file = "pyarrow-15.0.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f6ee87fd6892700960d90abb7b17a72a5abb3b64ee0fe8db6c782bcc2d0dc0b4"}, - {file = "pyarrow-15.0.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:001fca027738c5f6be0b7a3159cc7ba16a5c52486db18160909a0831b063c4e4"}, - {file = "pyarrow-15.0.0-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:d1c48648f64aec09accf44140dccb92f4f94394b8d79976c426a5b79b11d4fa7"}, - {file = "pyarrow-15.0.0-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:972a0141be402bb18e3201448c8ae62958c9c7923dfaa3b3d4530c835ac81aed"}, - {file = "pyarrow-15.0.0-cp39-cp39-win_amd64.whl", hash = "sha256:f01fc5cf49081426429127aa2d427d9d98e1cb94a32cb961d583a70b7c4504e6"}, - {file = "pyarrow-15.0.0.tar.gz", hash = "sha256:876858f549d540898f927eba4ef77cd549ad8d24baa3207cf1b72e5788b50e83"}, -] - -[package.dependencies] -numpy = ">=1.16.6,<2" - [[package]] name = "pydantic" -version = "2.5.2" +version = "2.6.4" description = "Data validation using Python type hints" optional = false -python-versions = ">=3.7" +python-versions = ">=3.8" files = [ - {file = "pydantic-2.5.2-py3-none-any.whl", hash = "sha256:80c50fb8e3dcecfddae1adbcc00ec5822918490c99ab31f6cf6140ca1c1429f0"}, - {file = "pydantic-2.5.2.tar.gz", hash = "sha256:ff177ba64c6faf73d7afa2e8cad38fd456c0dbe01c9954e71038001cd15a6edd"}, + {file = "pydantic-2.6.4-py3-none-any.whl", hash = "sha256:cc46fce86607580867bdc3361ad462bab9c222ef042d3da86f2fb333e1d916c5"}, + {file = "pydantic-2.6.4.tar.gz", hash = "sha256:b1704e0847db01817624a6b86766967f552dd9dbf3afba4004409f908dcc84e6"}, ] [package.dependencies] annotated-types = ">=0.4.0" -pydantic-core = "2.14.5" +pydantic-core = "2.16.3" typing-extensions = ">=4.6.1" [package.extras] @@ -476,116 +311,90 @@ email = ["email-validator (>=2.0.0)"] [[package]] name = "pydantic-core" -version = "2.14.5" +version = "2.16.3" description = "" optional = false -python-versions = ">=3.7" +python-versions = ">=3.8" files = [ - {file = "pydantic_core-2.14.5-cp310-cp310-macosx_10_7_x86_64.whl", hash = "sha256:7e88f5696153dc516ba6e79f82cc4747e87027205f0e02390c21f7cb3bd8abfd"}, - {file = "pydantic_core-2.14.5-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:4641e8ad4efb697f38a9b64ca0523b557c7931c5f84e0fd377a9a3b05121f0de"}, - {file = "pydantic_core-2.14.5-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:774de879d212db5ce02dfbf5b0da9a0ea386aeba12b0b95674a4ce0593df3d07"}, - {file = "pydantic_core-2.14.5-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ebb4e035e28f49b6f1a7032920bb9a0c064aedbbabe52c543343d39341a5b2a3"}, - {file = "pydantic_core-2.14.5-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b53e9ad053cd064f7e473a5f29b37fc4cc9dc6d35f341e6afc0155ea257fc911"}, - {file = "pydantic_core-2.14.5-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8aa1768c151cf562a9992462239dfc356b3d1037cc5a3ac829bb7f3bda7cc1f9"}, - {file = "pydantic_core-2.14.5-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:eac5c82fc632c599f4639a5886f96867ffced74458c7db61bc9a66ccb8ee3113"}, - {file = "pydantic_core-2.14.5-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d2ae91f50ccc5810b2f1b6b858257c9ad2e08da70bf890dee02de1775a387c66"}, - {file = "pydantic_core-2.14.5-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:6b9ff467ffbab9110e80e8c8de3bcfce8e8b0fd5661ac44a09ae5901668ba997"}, - {file = "pydantic_core-2.14.5-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:61ea96a78378e3bd5a0be99b0e5ed00057b71f66115f5404d0dae4819f495093"}, - {file = "pydantic_core-2.14.5-cp310-none-win32.whl", hash = "sha256:bb4c2eda937a5e74c38a41b33d8c77220380a388d689bcdb9b187cf6224c9720"}, - {file = "pydantic_core-2.14.5-cp310-none-win_amd64.whl", hash = "sha256:b7851992faf25eac90bfcb7bfd19e1f5ffa00afd57daec8a0042e63c74a4551b"}, - {file = "pydantic_core-2.14.5-cp311-cp311-macosx_10_7_x86_64.whl", hash = "sha256:4e40f2bd0d57dac3feb3a3aed50f17d83436c9e6b09b16af271b6230a2915459"}, - {file = "pydantic_core-2.14.5-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:ab1cdb0f14dc161ebc268c09db04d2c9e6f70027f3b42446fa11c153521c0e88"}, - {file = "pydantic_core-2.14.5-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:aae7ea3a1c5bb40c93cad361b3e869b180ac174656120c42b9fadebf685d121b"}, - {file = "pydantic_core-2.14.5-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:60b7607753ba62cf0739177913b858140f11b8af72f22860c28eabb2f0a61937"}, - {file = "pydantic_core-2.14.5-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2248485b0322c75aee7565d95ad0e16f1c67403a470d02f94da7344184be770f"}, - {file = "pydantic_core-2.14.5-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:823fcc638f67035137a5cd3f1584a4542d35a951c3cc68c6ead1df7dac825c26"}, - {file = "pydantic_core-2.14.5-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:96581cfefa9123accc465a5fd0cc833ac4d75d55cc30b633b402e00e7ced00a6"}, - {file = "pydantic_core-2.14.5-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:a33324437018bf6ba1bb0f921788788641439e0ed654b233285b9c69704c27b4"}, - {file = "pydantic_core-2.14.5-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:9bd18fee0923ca10f9a3ff67d4851c9d3e22b7bc63d1eddc12f439f436f2aada"}, - {file = "pydantic_core-2.14.5-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:853a2295c00f1d4429db4c0fb9475958543ee80cfd310814b5c0ef502de24dda"}, - {file = "pydantic_core-2.14.5-cp311-none-win32.whl", hash = "sha256:cb774298da62aea5c80a89bd58c40205ab4c2abf4834453b5de207d59d2e1651"}, - {file = "pydantic_core-2.14.5-cp311-none-win_amd64.whl", hash = "sha256:e87fc540c6cac7f29ede02e0f989d4233f88ad439c5cdee56f693cc9c1c78077"}, - {file = "pydantic_core-2.14.5-cp311-none-win_arm64.whl", hash = "sha256:57d52fa717ff445cb0a5ab5237db502e6be50809b43a596fb569630c665abddf"}, - {file = "pydantic_core-2.14.5-cp312-cp312-macosx_10_7_x86_64.whl", hash = "sha256:e60f112ac88db9261ad3a52032ea46388378034f3279c643499edb982536a093"}, - {file = "pydantic_core-2.14.5-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:6e227c40c02fd873c2a73a98c1280c10315cbebe26734c196ef4514776120aeb"}, - {file = "pydantic_core-2.14.5-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f0cbc7fff06a90bbd875cc201f94ef0ee3929dfbd5c55a06674b60857b8b85ed"}, - {file = "pydantic_core-2.14.5-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:103ef8d5b58596a731b690112819501ba1db7a36f4ee99f7892c40da02c3e189"}, - {file = "pydantic_core-2.14.5-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c949f04ecad823f81b1ba94e7d189d9dfb81edbb94ed3f8acfce41e682e48cef"}, - {file = "pydantic_core-2.14.5-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c1452a1acdf914d194159439eb21e56b89aa903f2e1c65c60b9d874f9b950e5d"}, - {file = "pydantic_core-2.14.5-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cb4679d4c2b089e5ef89756bc73e1926745e995d76e11925e3e96a76d5fa51fc"}, - {file = "pydantic_core-2.14.5-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:cf9d3fe53b1ee360e2421be95e62ca9b3296bf3f2fb2d3b83ca49ad3f925835e"}, - {file = "pydantic_core-2.14.5-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:70f4b4851dbb500129681d04cc955be2a90b2248d69273a787dda120d5cf1f69"}, - {file = "pydantic_core-2.14.5-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:59986de5710ad9613ff61dd9b02bdd2f615f1a7052304b79cc8fa2eb4e336d2d"}, - {file = "pydantic_core-2.14.5-cp312-none-win32.whl", hash = "sha256:699156034181e2ce106c89ddb4b6504c30db8caa86e0c30de47b3e0654543260"}, - {file = "pydantic_core-2.14.5-cp312-none-win_amd64.whl", hash = "sha256:5baab5455c7a538ac7e8bf1feec4278a66436197592a9bed538160a2e7d11e36"}, - {file = "pydantic_core-2.14.5-cp312-none-win_arm64.whl", hash = "sha256:e47e9a08bcc04d20975b6434cc50bf82665fbc751bcce739d04a3120428f3e27"}, - {file = "pydantic_core-2.14.5-cp37-cp37m-macosx_10_7_x86_64.whl", hash = "sha256:af36f36538418f3806048f3b242a1777e2540ff9efaa667c27da63d2749dbce0"}, - {file = "pydantic_core-2.14.5-cp37-cp37m-macosx_11_0_arm64.whl", hash = "sha256:45e95333b8418ded64745f14574aa9bfc212cb4fbeed7a687b0c6e53b5e188cd"}, - {file = "pydantic_core-2.14.5-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4e47a76848f92529879ecfc417ff88a2806438f57be4a6a8bf2961e8f9ca9ec7"}, - {file = "pydantic_core-2.14.5-cp37-cp37m-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:d81e6987b27bc7d101c8597e1cd2bcaa2fee5e8e0f356735c7ed34368c471550"}, - {file = "pydantic_core-2.14.5-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:34708cc82c330e303f4ce87758828ef6e457681b58ce0e921b6e97937dd1e2a3"}, - {file = "pydantic_core-2.14.5-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:652c1988019752138b974c28f43751528116bcceadad85f33a258869e641d753"}, - {file = "pydantic_core-2.14.5-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6e4d090e73e0725b2904fdbdd8d73b8802ddd691ef9254577b708d413bf3006e"}, - {file = "pydantic_core-2.14.5-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:5c7d5b5005f177764e96bd584d7bf28d6e26e96f2a541fdddb934c486e36fd59"}, - {file = "pydantic_core-2.14.5-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:a71891847f0a73b1b9eb86d089baee301477abef45f7eaf303495cd1473613e4"}, - {file = "pydantic_core-2.14.5-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:a717aef6971208f0851a2420b075338e33083111d92041157bbe0e2713b37325"}, - {file = "pydantic_core-2.14.5-cp37-none-win32.whl", hash = "sha256:de790a3b5aa2124b8b78ae5faa033937a72da8efe74b9231698b5a1dd9be3405"}, - {file = "pydantic_core-2.14.5-cp37-none-win_amd64.whl", hash = "sha256:6c327e9cd849b564b234da821236e6bcbe4f359a42ee05050dc79d8ed2a91588"}, - {file = "pydantic_core-2.14.5-cp38-cp38-macosx_10_7_x86_64.whl", hash = "sha256:ef98ca7d5995a82f43ec0ab39c4caf6a9b994cb0b53648ff61716370eadc43cf"}, - {file = "pydantic_core-2.14.5-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:c6eae413494a1c3f89055da7a5515f32e05ebc1a234c27674a6956755fb2236f"}, - {file = "pydantic_core-2.14.5-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dcf4e6d85614f7a4956c2de5a56531f44efb973d2fe4a444d7251df5d5c4dcfd"}, - {file = "pydantic_core-2.14.5-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:6637560562134b0e17de333d18e69e312e0458ee4455bdad12c37100b7cad706"}, - {file = "pydantic_core-2.14.5-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:77fa384d8e118b3077cccfcaf91bf83c31fe4dc850b5e6ee3dc14dc3d61bdba1"}, - {file = "pydantic_core-2.14.5-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:16e29bad40bcf97aac682a58861249ca9dcc57c3f6be22f506501833ddb8939c"}, - {file = "pydantic_core-2.14.5-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:531f4b4252fac6ca476fbe0e6f60f16f5b65d3e6b583bc4d87645e4e5ddde331"}, - {file = "pydantic_core-2.14.5-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:074f3d86f081ce61414d2dc44901f4f83617329c6f3ab49d2bc6c96948b2c26b"}, - {file = "pydantic_core-2.14.5-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:c2adbe22ab4babbca99c75c5d07aaf74f43c3195384ec07ccbd2f9e3bddaecec"}, - {file = "pydantic_core-2.14.5-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:0f6116a558fd06d1b7c2902d1c4cf64a5bd49d67c3540e61eccca93f41418124"}, - {file = "pydantic_core-2.14.5-cp38-none-win32.whl", hash = "sha256:fe0a5a1025eb797752136ac8b4fa21aa891e3d74fd340f864ff982d649691867"}, - {file = "pydantic_core-2.14.5-cp38-none-win_amd64.whl", hash = "sha256:079206491c435b60778cf2b0ee5fd645e61ffd6e70c47806c9ed51fc75af078d"}, - {file = "pydantic_core-2.14.5-cp39-cp39-macosx_10_7_x86_64.whl", hash = "sha256:a6a16f4a527aae4f49c875da3cdc9508ac7eef26e7977952608610104244e1b7"}, - {file = "pydantic_core-2.14.5-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:abf058be9517dc877227ec3223f0300034bd0e9f53aebd63cf4456c8cb1e0863"}, - {file = "pydantic_core-2.14.5-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:49b08aae5013640a3bfa25a8eebbd95638ec3f4b2eaf6ed82cf0c7047133f03b"}, - {file = "pydantic_core-2.14.5-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:c2d97e906b4ff36eb464d52a3bc7d720bd6261f64bc4bcdbcd2c557c02081ed2"}, - {file = "pydantic_core-2.14.5-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3128e0bbc8c091ec4375a1828d6118bc20404883169ac95ffa8d983b293611e6"}, - {file = "pydantic_core-2.14.5-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:88e74ab0cdd84ad0614e2750f903bb0d610cc8af2cc17f72c28163acfcf372a4"}, - {file = "pydantic_core-2.14.5-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c339dabd8ee15f8259ee0f202679b6324926e5bc9e9a40bf981ce77c038553db"}, - {file = "pydantic_core-2.14.5-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:3387277f1bf659caf1724e1afe8ee7dbc9952a82d90f858ebb931880216ea955"}, - {file = "pydantic_core-2.14.5-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:ba6b6b3846cfc10fdb4c971980a954e49d447cd215ed5a77ec8190bc93dd7bc5"}, - {file = "pydantic_core-2.14.5-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:ca61d858e4107ce5e1330a74724fe757fc7135190eb5ce5c9d0191729f033209"}, - {file = "pydantic_core-2.14.5-cp39-none-win32.whl", hash = "sha256:ec1e72d6412f7126eb7b2e3bfca42b15e6e389e1bc88ea0069d0cc1742f477c6"}, - {file = "pydantic_core-2.14.5-cp39-none-win_amd64.whl", hash = "sha256:c0b97ec434041827935044bbbe52b03d6018c2897349670ff8fe11ed24d1d4ab"}, - {file = "pydantic_core-2.14.5-pp310-pypy310_pp73-macosx_10_7_x86_64.whl", hash = "sha256:79e0a2cdbdc7af3f4aee3210b1172ab53d7ddb6a2d8c24119b5706e622b346d0"}, - {file = "pydantic_core-2.14.5-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:678265f7b14e138d9a541ddabbe033012a2953315739f8cfa6d754cc8063e8ca"}, - {file = "pydantic_core-2.14.5-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:95b15e855ae44f0c6341ceb74df61b606e11f1087e87dcb7482377374aac6abe"}, - {file = "pydantic_core-2.14.5-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:09b0e985fbaf13e6b06a56d21694d12ebca6ce5414b9211edf6f17738d82b0f8"}, - {file = "pydantic_core-2.14.5-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:3ad873900297bb36e4b6b3f7029d88ff9829ecdc15d5cf20161775ce12306f8a"}, - {file = "pydantic_core-2.14.5-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:2d0ae0d8670164e10accbeb31d5ad45adb71292032d0fdb9079912907f0085f4"}, - {file = "pydantic_core-2.14.5-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:d37f8ec982ead9ba0a22a996129594938138a1503237b87318392a48882d50b7"}, - {file = "pydantic_core-2.14.5-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:35613015f0ba7e14c29ac6c2483a657ec740e5ac5758d993fdd5870b07a61d8b"}, - {file = "pydantic_core-2.14.5-pp37-pypy37_pp73-macosx_10_7_x86_64.whl", hash = "sha256:ab4ea451082e684198636565224bbb179575efc1658c48281b2c866bfd4ddf04"}, - {file = "pydantic_core-2.14.5-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4ce601907e99ea5b4adb807ded3570ea62186b17f88e271569144e8cca4409c7"}, - {file = "pydantic_core-2.14.5-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fb2ed8b3fe4bf4506d6dab3b93b83bbc22237e230cba03866d561c3577517d18"}, - {file = "pydantic_core-2.14.5-pp37-pypy37_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:70f947628e074bb2526ba1b151cee10e4c3b9670af4dbb4d73bc8a89445916b5"}, - {file = "pydantic_core-2.14.5-pp37-pypy37_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:4bc536201426451f06f044dfbf341c09f540b4ebdb9fd8d2c6164d733de5e634"}, - {file = "pydantic_core-2.14.5-pp37-pypy37_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:f4791cf0f8c3104ac668797d8c514afb3431bc3305f5638add0ba1a5a37e0d88"}, - {file = "pydantic_core-2.14.5-pp38-pypy38_pp73-macosx_10_7_x86_64.whl", hash = "sha256:038c9f763e650712b899f983076ce783175397c848da04985658e7628cbe873b"}, - {file = "pydantic_core-2.14.5-pp38-pypy38_pp73-macosx_11_0_arm64.whl", hash = "sha256:27548e16c79702f1e03f5628589c6057c9ae17c95b4c449de3c66b589ead0520"}, - {file = "pydantic_core-2.14.5-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c97bee68898f3f4344eb02fec316db93d9700fb1e6a5b760ffa20d71d9a46ce3"}, - {file = "pydantic_core-2.14.5-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b9b759b77f5337b4ea024f03abc6464c9f35d9718de01cfe6bae9f2e139c397e"}, - {file = "pydantic_core-2.14.5-pp38-pypy38_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:439c9afe34638ace43a49bf72d201e0ffc1a800295bed8420c2a9ca8d5e3dbb3"}, - {file = "pydantic_core-2.14.5-pp38-pypy38_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:ba39688799094c75ea8a16a6b544eb57b5b0f3328697084f3f2790892510d144"}, - {file = "pydantic_core-2.14.5-pp38-pypy38_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:ccd4d5702bb90b84df13bd491be8d900b92016c5a455b7e14630ad7449eb03f8"}, - {file = "pydantic_core-2.14.5-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:81982d78a45d1e5396819bbb4ece1fadfe5f079335dd28c4ab3427cd95389944"}, - {file = "pydantic_core-2.14.5-pp39-pypy39_pp73-macosx_10_7_x86_64.whl", hash = "sha256:7f8210297b04e53bc3da35db08b7302a6a1f4889c79173af69b72ec9754796b8"}, - {file = "pydantic_core-2.14.5-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:8c8a8812fe6f43a3a5b054af6ac2d7b8605c7bcab2804a8a7d68b53f3cd86e00"}, - {file = "pydantic_core-2.14.5-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:206ed23aecd67c71daf5c02c3cd19c0501b01ef3cbf7782db9e4e051426b3d0d"}, - {file = "pydantic_core-2.14.5-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c2027d05c8aebe61d898d4cffd774840a9cb82ed356ba47a90d99ad768f39789"}, - {file = "pydantic_core-2.14.5-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:40180930807ce806aa71eda5a5a5447abb6b6a3c0b4b3b1b1962651906484d68"}, - {file = "pydantic_core-2.14.5-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:615a0a4bff11c45eb3c1996ceed5bdaa2f7b432425253a7c2eed33bb86d80abc"}, - {file = "pydantic_core-2.14.5-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:f5e412d717366e0677ef767eac93566582518fe8be923361a5c204c1a62eaafe"}, - {file = "pydantic_core-2.14.5-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:513b07e99c0a267b1d954243845d8a833758a6726a3b5d8948306e3fe14675e3"}, - {file = "pydantic_core-2.14.5.tar.gz", hash = "sha256:6d30226dfc816dd0fdf120cae611dd2215117e4f9b124af8c60ab9093b6e8e71"}, + {file = "pydantic_core-2.16.3-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:75b81e678d1c1ede0785c7f46690621e4c6e63ccd9192af1f0bd9d504bbb6bf4"}, + {file = "pydantic_core-2.16.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9c865a7ee6f93783bd5d781af5a4c43dadc37053a5b42f7d18dc019f8c9d2bd1"}, + {file = "pydantic_core-2.16.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:162e498303d2b1c036b957a1278fa0899d02b2842f1ff901b6395104c5554a45"}, + {file = "pydantic_core-2.16.3-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:2f583bd01bbfbff4eaee0868e6fc607efdfcc2b03c1c766b06a707abbc856187"}, + {file = "pydantic_core-2.16.3-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b926dd38db1519ed3043a4de50214e0d600d404099c3392f098a7f9d75029ff8"}, + {file = "pydantic_core-2.16.3-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:716b542728d4c742353448765aa7cdaa519a7b82f9564130e2b3f6766018c9ec"}, + {file = "pydantic_core-2.16.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fc4ad7f7ee1a13d9cb49d8198cd7d7e3aa93e425f371a68235f784e99741561f"}, + {file = "pydantic_core-2.16.3-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:bd87f48924f360e5d1c5f770d6155ce0e7d83f7b4e10c2f9ec001c73cf475c99"}, + {file = "pydantic_core-2.16.3-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:0df446663464884297c793874573549229f9eca73b59360878f382a0fc085979"}, + {file = "pydantic_core-2.16.3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:4df8a199d9f6afc5ae9a65f8f95ee52cae389a8c6b20163762bde0426275b7db"}, + {file = "pydantic_core-2.16.3-cp310-none-win32.whl", hash = "sha256:456855f57b413f077dff513a5a28ed838dbbb15082ba00f80750377eed23d132"}, + {file = "pydantic_core-2.16.3-cp310-none-win_amd64.whl", hash = "sha256:732da3243e1b8d3eab8c6ae23ae6a58548849d2e4a4e03a1924c8ddf71a387cb"}, + {file = "pydantic_core-2.16.3-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:519ae0312616026bf4cedc0fe459e982734f3ca82ee8c7246c19b650b60a5ee4"}, + {file = "pydantic_core-2.16.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:b3992a322a5617ded0a9f23fd06dbc1e4bd7cf39bc4ccf344b10f80af58beacd"}, + {file = "pydantic_core-2.16.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8d62da299c6ecb04df729e4b5c52dc0d53f4f8430b4492b93aa8de1f541c4aac"}, + {file = "pydantic_core-2.16.3-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:2acca2be4bb2f2147ada8cac612f8a98fc09f41c89f87add7256ad27332c2fda"}, + {file = "pydantic_core-2.16.3-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1b662180108c55dfbf1280d865b2d116633d436cfc0bba82323554873967b340"}, + {file = "pydantic_core-2.16.3-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e7c6ed0dc9d8e65f24f5824291550139fe6f37fac03788d4580da0d33bc00c97"}, + {file = "pydantic_core-2.16.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a6b1bb0827f56654b4437955555dc3aeeebeddc47c2d7ed575477f082622c49e"}, + {file = "pydantic_core-2.16.3-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:e56f8186d6210ac7ece503193ec84104da7ceb98f68ce18c07282fcc2452e76f"}, + {file = "pydantic_core-2.16.3-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:936e5db01dd49476fa8f4383c259b8b1303d5dd5fb34c97de194560698cc2c5e"}, + {file = "pydantic_core-2.16.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:33809aebac276089b78db106ee692bdc9044710e26f24a9a2eaa35a0f9fa70ba"}, + {file = "pydantic_core-2.16.3-cp311-none-win32.whl", hash = "sha256:ded1c35f15c9dea16ead9bffcde9bb5c7c031bff076355dc58dcb1cb436c4721"}, + {file = "pydantic_core-2.16.3-cp311-none-win_amd64.whl", hash = "sha256:d89ca19cdd0dd5f31606a9329e309d4fcbb3df860960acec32630297d61820df"}, + {file = "pydantic_core-2.16.3-cp311-none-win_arm64.whl", hash = "sha256:6162f8d2dc27ba21027f261e4fa26f8bcb3cf9784b7f9499466a311ac284b5b9"}, + {file = "pydantic_core-2.16.3-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:0f56ae86b60ea987ae8bcd6654a887238fd53d1384f9b222ac457070b7ac4cff"}, + {file = "pydantic_core-2.16.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:c9bd22a2a639e26171068f8ebb5400ce2c1bc7d17959f60a3b753ae13c632975"}, + {file = "pydantic_core-2.16.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4204e773b4b408062960e65468d5346bdfe139247ee5f1ca2a378983e11388a2"}, + {file = "pydantic_core-2.16.3-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:f651dd19363c632f4abe3480a7c87a9773be27cfe1341aef06e8759599454120"}, + {file = "pydantic_core-2.16.3-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:aaf09e615a0bf98d406657e0008e4a8701b11481840be7d31755dc9f97c44053"}, + {file = "pydantic_core-2.16.3-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8e47755d8152c1ab5b55928ab422a76e2e7b22b5ed8e90a7d584268dd49e9c6b"}, + {file = "pydantic_core-2.16.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:500960cb3a0543a724a81ba859da816e8cf01b0e6aaeedf2c3775d12ee49cade"}, + {file = "pydantic_core-2.16.3-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:cf6204fe865da605285c34cf1172879d0314ff267b1c35ff59de7154f35fdc2e"}, + {file = "pydantic_core-2.16.3-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:d33dd21f572545649f90c38c227cc8631268ba25c460b5569abebdd0ec5974ca"}, + {file = "pydantic_core-2.16.3-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:49d5d58abd4b83fb8ce763be7794d09b2f50f10aa65c0f0c1696c677edeb7cbf"}, + {file = "pydantic_core-2.16.3-cp312-none-win32.whl", hash = "sha256:f53aace168a2a10582e570b7736cc5bef12cae9cf21775e3eafac597e8551fbe"}, + {file = "pydantic_core-2.16.3-cp312-none-win_amd64.whl", hash = "sha256:0d32576b1de5a30d9a97f300cc6a3f4694c428d956adbc7e6e2f9cad279e45ed"}, + {file = "pydantic_core-2.16.3-cp312-none-win_arm64.whl", hash = "sha256:ec08be75bb268473677edb83ba71e7e74b43c008e4a7b1907c6d57e940bf34b6"}, + {file = "pydantic_core-2.16.3-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:b1f6f5938d63c6139860f044e2538baeee6f0b251a1816e7adb6cbce106a1f01"}, + {file = "pydantic_core-2.16.3-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:2a1ef6a36fdbf71538142ed604ad19b82f67b05749512e47f247a6ddd06afdc7"}, + {file = "pydantic_core-2.16.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:704d35ecc7e9c31d48926150afada60401c55efa3b46cd1ded5a01bdffaf1d48"}, + {file = "pydantic_core-2.16.3-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:d937653a696465677ed583124b94a4b2d79f5e30b2c46115a68e482c6a591c8a"}, + {file = "pydantic_core-2.16.3-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c9803edf8e29bd825f43481f19c37f50d2b01899448273b3a7758441b512acf8"}, + {file = "pydantic_core-2.16.3-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:72282ad4892a9fb2da25defeac8c2e84352c108705c972db82ab121d15f14e6d"}, + {file = "pydantic_core-2.16.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7f752826b5b8361193df55afcdf8ca6a57d0232653494ba473630a83ba50d8c9"}, + {file = "pydantic_core-2.16.3-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:4384a8f68ddb31a0b0c3deae88765f5868a1b9148939c3f4121233314ad5532c"}, + {file = "pydantic_core-2.16.3-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:a4b2bf78342c40b3dc830880106f54328928ff03e357935ad26c7128bbd66ce8"}, + {file = "pydantic_core-2.16.3-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:13dcc4802961b5f843a9385fc821a0b0135e8c07fc3d9949fd49627c1a5e6ae5"}, + {file = "pydantic_core-2.16.3-cp38-none-win32.whl", hash = "sha256:e3e70c94a0c3841e6aa831edab1619ad5c511199be94d0c11ba75fe06efe107a"}, + {file = "pydantic_core-2.16.3-cp38-none-win_amd64.whl", hash = "sha256:ecdf6bf5f578615f2e985a5e1f6572e23aa632c4bd1dc67f8f406d445ac115ed"}, + {file = "pydantic_core-2.16.3-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:bda1ee3e08252b8d41fa5537413ffdddd58fa73107171a126d3b9ff001b9b820"}, + {file = "pydantic_core-2.16.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:21b888c973e4f26b7a96491c0965a8a312e13be108022ee510248fe379a5fa23"}, + {file = "pydantic_core-2.16.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:be0ec334369316fa73448cc8c982c01e5d2a81c95969d58b8f6e272884df0074"}, + {file = "pydantic_core-2.16.3-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:b5b6079cc452a7c53dd378c6f881ac528246b3ac9aae0f8eef98498a75657805"}, + {file = "pydantic_core-2.16.3-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7ee8d5f878dccb6d499ba4d30d757111847b6849ae07acdd1205fffa1fc1253c"}, + {file = "pydantic_core-2.16.3-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7233d65d9d651242a68801159763d09e9ec96e8a158dbf118dc090cd77a104c9"}, + {file = "pydantic_core-2.16.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c6119dc90483a5cb50a1306adb8d52c66e447da88ea44f323e0ae1a5fcb14256"}, + {file = "pydantic_core-2.16.3-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:578114bc803a4c1ff9946d977c221e4376620a46cf78da267d946397dc9514a8"}, + {file = "pydantic_core-2.16.3-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:d8f99b147ff3fcf6b3cc60cb0c39ea443884d5559a30b1481e92495f2310ff2b"}, + {file = "pydantic_core-2.16.3-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:4ac6b4ce1e7283d715c4b729d8f9dab9627586dafce81d9eaa009dd7f25dd972"}, + {file = "pydantic_core-2.16.3-cp39-none-win32.whl", hash = "sha256:e7774b570e61cb998490c5235740d475413a1f6de823169b4cf94e2fe9e9f6b2"}, + {file = "pydantic_core-2.16.3-cp39-none-win_amd64.whl", hash = "sha256:9091632a25b8b87b9a605ec0e61f241c456e9248bfdcf7abdf344fdb169c81cf"}, + {file = "pydantic_core-2.16.3-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:36fa178aacbc277bc6b62a2c3da95226520da4f4e9e206fdf076484363895d2c"}, + {file = "pydantic_core-2.16.3-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:dcca5d2bf65c6fb591fff92da03f94cd4f315972f97c21975398bd4bd046854a"}, + {file = "pydantic_core-2.16.3-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2a72fb9963cba4cd5793854fd12f4cfee731e86df140f59ff52a49b3552db241"}, + {file = "pydantic_core-2.16.3-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b60cc1a081f80a2105a59385b92d82278b15d80ebb3adb200542ae165cd7d183"}, + {file = "pydantic_core-2.16.3-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:cbcc558401de90a746d02ef330c528f2e668c83350f045833543cd57ecead1ad"}, + {file = "pydantic_core-2.16.3-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:fee427241c2d9fb7192b658190f9f5fd6dfe41e02f3c1489d2ec1e6a5ab1e04a"}, + {file = "pydantic_core-2.16.3-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:f4cb85f693044e0f71f394ff76c98ddc1bc0953e48c061725e540396d5c8a2e1"}, + {file = "pydantic_core-2.16.3-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:b29eeb887aa931c2fcef5aa515d9d176d25006794610c264ddc114c053bf96fe"}, + {file = "pydantic_core-2.16.3-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:a425479ee40ff021f8216c9d07a6a3b54b31c8267c6e17aa88b70d7ebd0e5e5b"}, + {file = "pydantic_core-2.16.3-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:5c5cbc703168d1b7a838668998308018a2718c2130595e8e190220238addc96f"}, + {file = "pydantic_core-2.16.3-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:99b6add4c0b39a513d323d3b93bc173dac663c27b99860dd5bf491b240d26137"}, + {file = "pydantic_core-2.16.3-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:75f76ee558751746d6a38f89d60b6228fa174e5172d143886af0f85aa306fd89"}, + {file = "pydantic_core-2.16.3-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:00ee1c97b5364b84cb0bd82e9bbf645d5e2871fb8c58059d158412fee2d33d8a"}, + {file = "pydantic_core-2.16.3-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:287073c66748f624be4cef893ef9174e3eb88fe0b8a78dc22e88eca4bc357ca6"}, + {file = "pydantic_core-2.16.3-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:ed25e1835c00a332cb10c683cd39da96a719ab1dfc08427d476bce41b92531fc"}, + {file = "pydantic_core-2.16.3-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:86b3d0033580bd6bbe07590152007275bd7af95f98eaa5bd36f3da219dcd93da"}, + {file = "pydantic_core-2.16.3.tar.gz", hash = "sha256:1cac689f80a3abab2d3c0048b29eea5751114054f032a941a32de4c852c59cad"}, ] [package.dependencies] @@ -593,13 +402,13 @@ typing-extensions = ">=4.6.0,<4.7.0 || >4.7.0" [[package]] name = "pytest" -version = "7.4.3" +version = "7.4.4" description = "pytest: simple powerful testing with Python" optional = false python-versions = ">=3.7" files = [ - {file = "pytest-7.4.3-py3-none-any.whl", hash = "sha256:0d009c083ea859a71b76adf7c1d502e4bc170b80a8ef002da5806527b9591fac"}, - {file = "pytest-7.4.3.tar.gz", hash = "sha256:d989d136982de4e3b29dabcc838ad581c64e8ed52c11fbe86ddebd9da0818cd5"}, + {file = "pytest-7.4.4-py3-none-any.whl", hash = "sha256:b090cdf5ed60bf4c45261be03239c2c1c22df034fbffe691abe93cd80cea01d8"}, + {file = "pytest-7.4.4.tar.gz", hash = "sha256:2cf0005922c6ace4a3e2ec8b4080eb0d9753fdc93107415332f50ce9e7994280"}, ] [package.dependencies] @@ -615,93 +424,57 @@ testing = ["argcomplete", "attrs (>=19.2.0)", "hypothesis (>=3.56)", "mock", "no [[package]] name = "pytest-asyncio" -version = "0.23.2" +version = "0.23.5.post1" description = "Pytest support for asyncio" optional = false python-versions = ">=3.8" files = [ - {file = "pytest-asyncio-0.23.2.tar.gz", hash = "sha256:c16052382554c7b22d48782ab3438d5b10f8cf7a4bdcae7f0f67f097d95beecc"}, - {file = "pytest_asyncio-0.23.2-py3-none-any.whl", hash = "sha256:ea9021364e32d58f0be43b91c6233fb8d2224ccef2398d6837559e587682808f"}, + {file = "pytest-asyncio-0.23.5.post1.tar.gz", hash = "sha256:b9a8806bea78c21276bc34321bbf234ba1b2ea5b30d9f0ce0f2dea45e4685813"}, + {file = "pytest_asyncio-0.23.5.post1-py3-none-any.whl", hash = "sha256:30f54d27774e79ac409778889880242b0403d09cabd65b727ce90fe92dd5d80e"}, ] [package.dependencies] -pytest = ">=7.0.0" +pytest = ">=7.0.0,<9" [package.extras] docs = ["sphinx (>=5.3)", "sphinx-rtd-theme (>=1.0)"] testing = ["coverage (>=6.2)", "hypothesis (>=5.7.1)"] -[[package]] -name = "python-dateutil" -version = "2.8.2" -description = "Extensions to the standard Python datetime module" -optional = false -python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7" -files = [ - {file = "python-dateutil-2.8.2.tar.gz", hash = "sha256:0123cacc1627ae19ddf3c27a5de5bd67ee4586fbdd6440d9748f8abb483d3e86"}, - {file = "python_dateutil-2.8.2-py2.py3-none-any.whl", hash = "sha256:961d03dc3453ebbc59dbdea9e4e11c5651520a876d0f4db161e8674aae935da9"}, -] - -[package.dependencies] -six = ">=1.5" - -[[package]] -name = "pytz" -version = "2024.1" -description = "World timezone definitions, modern and historical" -optional = false -python-versions = "*" -files = [ - {file = "pytz-2024.1-py2.py3-none-any.whl", hash = "sha256:328171f4e3623139da4983451950b28e95ac706e13f3f2630a879749e7a8b319"}, - {file = "pytz-2024.1.tar.gz", hash = "sha256:2a29735ea9c18baf14b448846bde5a48030ed267578472d8955cd0e7443a9812"}, -] - [[package]] name = "ruff" -version = "0.1.7" +version = "0.1.15" description = "An extremely fast Python linter and code formatter, written in Rust." optional = false python-versions = ">=3.7" files = [ - {file = "ruff-0.1.7-py3-none-macosx_10_12_x86_64.macosx_11_0_arm64.macosx_10_12_universal2.whl", hash = "sha256:7f80496854fdc65b6659c271d2c26e90d4d401e6a4a31908e7e334fab4645aac"}, - {file = "ruff-0.1.7-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:1ea109bdb23c2a4413f397ebd8ac32cb498bee234d4191ae1a310af760e5d287"}, - {file = "ruff-0.1.7-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8b0c2de9dd9daf5e07624c24add25c3a490dbf74b0e9bca4145c632457b3b42a"}, - {file = "ruff-0.1.7-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:69a4bed13bc1d5dabf3902522b5a2aadfebe28226c6269694283c3b0cecb45fd"}, - {file = "ruff-0.1.7-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:de02ca331f2143195a712983a57137c5ec0f10acc4aa81f7c1f86519e52b92a1"}, - {file = "ruff-0.1.7-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:45b38c3f8788a65e6a2cab02e0f7adfa88872696839d9882c13b7e2f35d64c5f"}, - {file = "ruff-0.1.7-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:6c64cb67b2025b1ac6d58e5ffca8f7b3f7fd921f35e78198411237e4f0db8e73"}, - {file = "ruff-0.1.7-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9dcc6bb2f4df59cb5b4b40ff14be7d57012179d69c6565c1da0d1f013d29951b"}, - {file = "ruff-0.1.7-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:df2bb4bb6bbe921f6b4f5b6fdd8d8468c940731cb9406f274ae8c5ed7a78c478"}, - {file = "ruff-0.1.7-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:276a89bcb149b3d8c1b11d91aa81898fe698900ed553a08129b38d9d6570e717"}, - {file = "ruff-0.1.7-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:90c958fe950735041f1c80d21b42184f1072cc3975d05e736e8d66fc377119ea"}, - {file = "ruff-0.1.7-py3-none-musllinux_1_2_i686.whl", hash = "sha256:6b05e3b123f93bb4146a761b7a7d57af8cb7384ccb2502d29d736eaade0db519"}, - {file = "ruff-0.1.7-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:290ecab680dce94affebefe0bbca2322a6277e83d4f29234627e0f8f6b4fa9ce"}, - {file = "ruff-0.1.7-py3-none-win32.whl", hash = "sha256:416dfd0bd45d1a2baa3b1b07b1b9758e7d993c256d3e51dc6e03a5e7901c7d80"}, - {file = "ruff-0.1.7-py3-none-win_amd64.whl", hash = "sha256:4af95fd1d3b001fc41325064336db36e3d27d2004cdb6d21fd617d45a172dd96"}, - {file = "ruff-0.1.7-py3-none-win_arm64.whl", hash = "sha256:0683b7bfbb95e6df3c7c04fe9d78f631f8e8ba4868dfc932d43d690698057e2e"}, - {file = "ruff-0.1.7.tar.gz", hash = "sha256:dffd699d07abf54833e5f6cc50b85a6ff043715da8788c4a79bcd4ab4734d306"}, -] - -[[package]] -name = "six" -version = "1.16.0" -description = "Python 2 and 3 compatibility utilities" -optional = false -python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*" -files = [ - {file = "six-1.16.0-py2.py3-none-any.whl", hash = "sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254"}, - {file = "six-1.16.0.tar.gz", hash = "sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926"}, + {file = "ruff-0.1.15-py3-none-macosx_10_12_x86_64.macosx_11_0_arm64.macosx_10_12_universal2.whl", hash = "sha256:5fe8d54df166ecc24106db7dd6a68d44852d14eb0729ea4672bb4d96c320b7df"}, + {file = "ruff-0.1.15-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:6f0bfbb53c4b4de117ac4d6ddfd33aa5fc31beeaa21d23c45c6dd249faf9126f"}, + {file = "ruff-0.1.15-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e0d432aec35bfc0d800d4f70eba26e23a352386be3a6cf157083d18f6f5881c8"}, + {file = "ruff-0.1.15-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:9405fa9ac0e97f35aaddf185a1be194a589424b8713e3b97b762336ec79ff807"}, + {file = "ruff-0.1.15-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c66ec24fe36841636e814b8f90f572a8c0cb0e54d8b5c2d0e300d28a0d7bffec"}, + {file = "ruff-0.1.15-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:6f8ad828f01e8dd32cc58bc28375150171d198491fc901f6f98d2a39ba8e3ff5"}, + {file = "ruff-0.1.15-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:86811954eec63e9ea162af0ffa9f8d09088bab51b7438e8b6488b9401863c25e"}, + {file = "ruff-0.1.15-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:fd4025ac5e87d9b80e1f300207eb2fd099ff8200fa2320d7dc066a3f4622dc6b"}, + {file = "ruff-0.1.15-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b17b93c02cdb6aeb696effecea1095ac93f3884a49a554a9afa76bb125c114c1"}, + {file = "ruff-0.1.15-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:ddb87643be40f034e97e97f5bc2ef7ce39de20e34608f3f829db727a93fb82c5"}, + {file = "ruff-0.1.15-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:abf4822129ed3a5ce54383d5f0e964e7fef74a41e48eb1dfad404151efc130a2"}, + {file = "ruff-0.1.15-py3-none-musllinux_1_2_i686.whl", hash = "sha256:6c629cf64bacfd136c07c78ac10a54578ec9d1bd2a9d395efbee0935868bf852"}, + {file = "ruff-0.1.15-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:1bab866aafb53da39c2cadfb8e1c4550ac5340bb40300083eb8967ba25481447"}, + {file = "ruff-0.1.15-py3-none-win32.whl", hash = "sha256:2417e1cb6e2068389b07e6fa74c306b2810fe3ee3476d5b8a96616633f40d14f"}, + {file = "ruff-0.1.15-py3-none-win_amd64.whl", hash = "sha256:3837ac73d869efc4182d9036b1405ef4c73d9b1f88da2413875e34e0d6919587"}, + {file = "ruff-0.1.15-py3-none-win_arm64.whl", hash = "sha256:9a933dfb1c14ec7a33cceb1e49ec4a16b51ce3c20fd42663198746efc0427360"}, + {file = "ruff-0.1.15.tar.gz", hash = "sha256:f6dfa8c1b21c913c326919056c390966648b680966febcb796cc9d1aaab8564e"}, ] [[package]] name = "sniffio" -version = "1.3.0" +version = "1.3.1" description = "Sniff out which async library your code is running under" optional = false python-versions = ">=3.7" files = [ - {file = "sniffio-1.3.0-py3-none-any.whl", hash = "sha256:eecefdce1e5bbfb7ad2eeaabf7c1eeb404d7757c379bd1f7e5cce9d8bf425384"}, - {file = "sniffio-1.3.0.tar.gz", hash = "sha256:e60305c5e5d314f5389259b7f22aaa33d8f7dee49763119234af3755c55b9101"}, + {file = "sniffio-1.3.1-py3-none-any.whl", hash = "sha256:2f6da418d1f1e0fddd844478f41680e794e6051915791a034ff65e5f100525a2"}, + {file = "sniffio-1.3.1.tar.gz", hash = "sha256:f4324edc670a0f49750a81b895f35c3adb843cca46f0530f79fc1babb23789dc"}, ] [[package]] @@ -717,13 +490,13 @@ files = [ [[package]] name = "types-requests" -version = "2.31.0.10" +version = "2.31.0.20240311" description = "Typing stubs for requests" optional = false -python-versions = ">=3.7" +python-versions = ">=3.8" files = [ - {file = "types-requests-2.31.0.10.tar.gz", hash = "sha256:dc5852a76f1eaf60eafa81a2e50aefa3d1f015c34cf0cba130930866b1b22a92"}, - {file = "types_requests-2.31.0.10-py3-none-any.whl", hash = "sha256:b32b9a86beffa876c0c3ac99a4cd3b8b51e973fb8e3bd4e0a6bb32c7efad80fc"}, + {file = "types-requests-2.31.0.20240311.tar.gz", hash = "sha256:b1c1b66abfb7fa79aae09097a811c4aa97130eb8831c60e47aee4ca344731ca5"}, + {file = "types_requests-2.31.0.20240311-py3-none-any.whl", hash = "sha256:47872893d65a38e282ee9f277a4ee50d1b28bd592040df7d1fdaffdf3779937d"}, ] [package.dependencies] @@ -731,43 +504,33 @@ urllib3 = ">=2" [[package]] name = "typing-extensions" -version = "4.8.0" +version = "4.10.0" description = "Backported and Experimental Type Hints for Python 3.8+" optional = false python-versions = ">=3.8" files = [ - {file = "typing_extensions-4.8.0-py3-none-any.whl", hash = "sha256:8f92fc8806f9a6b641eaa5318da32b44d401efaac0f6678c9bc448ba3605faa0"}, - {file = "typing_extensions-4.8.0.tar.gz", hash = "sha256:df8e4339e9cb77357558cbdbceca33c303714cf861d1eef15e1070055ae8b7ef"}, -] - -[[package]] -name = "tzdata" -version = "2024.1" -description = "Provider of IANA time zone data" -optional = false -python-versions = ">=2" -files = [ - {file = "tzdata-2024.1-py2.py3-none-any.whl", hash = "sha256:9068bc196136463f5245e51efda838afa15aaeca9903f49050dfa2679db4d252"}, - {file = "tzdata-2024.1.tar.gz", hash = "sha256:2674120f8d891909751c38abcdfd386ac0a5a1127954fbc332af6b5ceae07efd"}, + {file = "typing_extensions-4.10.0-py3-none-any.whl", hash = "sha256:69b1a937c3a517342112fb4c6df7e72fc39a38e7891a5730ed4985b5214b5475"}, + {file = "typing_extensions-4.10.0.tar.gz", hash = "sha256:b0abd7c89e8fb96f98db18d86106ff1d90ab692004eb746cf6eda2682f91b3cb"}, ] [[package]] name = "urllib3" -version = "2.1.0" +version = "2.2.1" description = "HTTP library with thread-safe connection pooling, file post, and more." optional = false python-versions = ">=3.8" files = [ - {file = "urllib3-2.1.0-py3-none-any.whl", hash = "sha256:55901e917a5896a349ff771be919f8bd99aff50b79fe58fec595eb37bbc56bb3"}, - {file = "urllib3-2.1.0.tar.gz", hash = "sha256:df7aa8afb0148fa78488e7899b2c59b5f4ffcfa82e6c54ccb9dd37c1d7b52d54"}, + {file = "urllib3-2.2.1-py3-none-any.whl", hash = "sha256:450b20ec296a467077128bff42b73080516e71b56ff59a60a02bef2232c4fa9d"}, + {file = "urllib3-2.2.1.tar.gz", hash = "sha256:d0570876c61ab9e520d776c38acbbb5b05a776d3f9ff98a5c8fd5162a444cf19"}, ] [package.extras] brotli = ["brotli (>=1.0.9)", "brotlicffi (>=0.8.0)"] +h2 = ["h2 (>=4,<5)"] socks = ["pysocks (>=1.5.6,!=1.5.7,<2.0)"] zstd = ["zstandard (>=0.18.0)"] [metadata] lock-version = "2.0" python-versions = "^3.9" -content-hash = "9ca8a20cccbfe5d054923f5d7a5bab783855e748699cd87c5a0b8315192ac284" +content-hash = "1748572f868803c7fddeba7f5136e4916c7fdd0e219dfb7d0f5167d70b2cb445" diff --git a/pyproject.toml b/pyproject.toml index 73cf6f83..c2ec5ce3 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -27,8 +27,6 @@ python = "^3.9" orjson = "^3.9.10" pydantic = "^2.5.2" httpx = "^0.25.2" -pandas = "^2.2.0" -pyarrow = "^15.0.0" [tool.poetry.group.dev.dependencies] diff --git a/version.txt b/version.txt deleted file mode 100644 index 699c6c6d..00000000 --- a/version.txt +++ /dev/null @@ -1 +0,0 @@ -0.1.8 From ac7a0f69909341636758f4cbd566f740ae3791f4 Mon Sep 17 00:00:00 2001 From: Alexis Tacnet Date: Thu, 23 May 2024 17:47:22 +0200 Subject: [PATCH 037/223] Add tool call id for new models with v3 tokenization (#93) --- examples/function_calling.py | 4 ++-- src/mistralai/models/chat_completion.py | 1 + 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/examples/function_calling.py b/examples/function_calling.py index a7b94afb..381cc709 100644 --- a/examples/function_calling.py +++ b/examples/function_calling.py @@ -64,7 +64,7 @@ def retrieve_payment_date(data: Dict[str, List], transaction_id: str) -> str: ] api_key = os.environ["MISTRAL_API_KEY"] -model = "mistral-large-latest" +model = "mistral-small-latest" client = MistralClient(api_key=api_key) @@ -90,7 +90,7 @@ def retrieve_payment_date(data: Dict[str, List], transaction_id: str) -> str: function_result = names_to_functions[function_name](**function_params) messages.append(response.choices[0].message) -messages.append(ChatMessage(role="tool", name=function_name, content=function_result)) +messages.append(ChatMessage(role="tool", name=function_name, content=function_result, tool_call_id=tool_call.id)) response = client.chat(model=model, messages=messages, tools=tools) diff --git a/src/mistralai/models/chat_completion.py b/src/mistralai/models/chat_completion.py index 9f145bc9..0dcc6f97 100644 --- a/src/mistralai/models/chat_completion.py +++ b/src/mistralai/models/chat_completion.py @@ -47,6 +47,7 @@ class ChatMessage(BaseModel): content: Union[str, List[str]] name: Optional[str] = None tool_calls: Optional[List[ToolCall]] = None + tool_call_id: Optional[str] = None class DeltaMessage(BaseModel): From 868bf3a6314e6945a03bde50c0e3da6462af9fc5 Mon Sep 17 00:00:00 2001 From: Sylvain MOUQUET Date: Thu, 23 May 2024 18:06:15 +0200 Subject: [PATCH 038/223] feat(async_client): use asyncio (#82) --- src/mistralai/async_client.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/mistralai/async_client.py b/src/mistralai/async_client.py index 23b80457..ebe41b0f 100644 --- a/src/mistralai/async_client.py +++ b/src/mistralai/async_client.py @@ -1,6 +1,6 @@ +import asyncio import os import posixpath -import time from json import JSONDecodeError from typing import Any, AsyncGenerator, Dict, List, Optional, Union @@ -151,7 +151,7 @@ async def _request( if attempt > self._max_retries: raise MistralAPIStatusException.from_response(response, message=str(e)) from e backoff = 2.0**attempt # exponential backoff - time.sleep(backoff) + await asyncio.sleep(backoff) # Retry as a generator async for r in self._request(method, json, path, stream=stream, attempt=attempt): From d469f2ab98af7cb818ac4f69c992a56511fe8c61 Mon Sep 17 00:00:00 2001 From: erwinscholtens Date: Thu, 23 May 2024 18:07:01 +0200 Subject: [PATCH 039/223] Remove setting of the _root_ logger level as this is best done by the end user. (#86) Co-authored-by: Erwin Scholtens --- src/mistralai/client_base.py | 5 ----- 1 file changed, 5 deletions(-) diff --git a/src/mistralai/client_base.py b/src/mistralai/client_base.py index c114ea18..9eb47a68 100644 --- a/src/mistralai/client_base.py +++ b/src/mistralai/client_base.py @@ -10,11 +10,6 @@ ) from mistralai.models.chat_completion import ChatMessage, Function, ResponseFormat, ToolChoice -logging.basicConfig( - format="%(asctime)s %(levelname)s %(name)s: %(message)s", - level=os.getenv("LOG_LEVEL", "ERROR"), -) - class ClientBase(ABC): def __init__( From b69e5229171169c21e573c935836735b6184672b Mon Sep 17 00:00:00 2001 From: Wilson Silva Date: Thu, 23 May 2024 17:07:34 +0100 Subject: [PATCH 040/223] Remove an unused expression (#87) The variable n_rows is not used anywhere --- examples/function_calling.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/examples/function_calling.py b/examples/function_calling.py index 381cc709..9d6b89f0 100644 --- a/examples/function_calling.py +++ b/examples/function_calling.py @@ -15,8 +15,6 @@ "payment_status": ["Paid", "Unpaid", "Paid", "Paid", "Pending"], } -n_rows = len(data["transaction_id"]) - def retrieve_payment_status(data: Dict[str,List], transaction_id: str) -> str: for i, r in enumerate(data["transaction_id"]): if r == transaction_id: From 5f5b4514dfbbc2cb2748de679a0e5ed776ad94a0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=D0=9C=D0=B8=D0=BB=D0=B0=D0=BD=20=D0=9C=D0=B0=D1=81=D1=82?= =?UTF-8?q?=D0=B8=D0=BA=D0=BE=D1=81=D0=B0?= <43369451+Mastikosa@users.noreply.github.com> Date: Thu, 23 May 2024 18:08:49 +0200 Subject: [PATCH 041/223] Update httpx version requirement (#80) --- poetry.lock | 4 ++-- pyproject.toml | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/poetry.lock b/poetry.lock index 364a1bed..c7723c5f 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1,4 +1,4 @@ -# This file is automatically @generated by Poetry 1.8.1 and should not be changed by hand. +# This file is automatically @generated by Poetry 1.8.2 and should not be changed by hand. [[package]] name = "annotated-types" @@ -533,4 +533,4 @@ zstd = ["zstandard (>=0.18.0)"] [metadata] lock-version = "2.0" python-versions = "^3.9" -content-hash = "1748572f868803c7fddeba7f5136e4916c7fdd0e219dfb7d0f5167d70b2cb445" +content-hash = "a7fd4bf25bc9a0edf13eb53e3463b3d8176a25c2b8b1383625c0b42c6510e1d0" diff --git a/pyproject.toml b/pyproject.toml index c2ec5ce3..84230996 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -26,7 +26,7 @@ exclude = ["docs", "tests", "examples", "tools", "build"] python = "^3.9" orjson = "^3.9.10" pydantic = "^2.5.2" -httpx = "^0.25.2" +httpx = ">= 0.25.2, < 1" [tool.poetry.group.dev.dependencies] From 374fd012f51fb166c399f0961bee9cdb70579a60 Mon Sep 17 00:00:00 2001 From: Antoine Lizee Date: Fri, 24 May 2024 01:11:38 +0900 Subject: [PATCH 042/223] Get api key at client initialization (#57) --- src/mistralai/async_client.py | 2 +- src/mistralai/client.py | 2 +- src/mistralai/client_base.py | 8 +++++++- 3 files changed, 9 insertions(+), 3 deletions(-) diff --git a/src/mistralai/async_client.py b/src/mistralai/async_client.py index ebe41b0f..d04edba8 100644 --- a/src/mistralai/async_client.py +++ b/src/mistralai/async_client.py @@ -34,7 +34,7 @@ class MistralAsyncClient(ClientBase): def __init__( self, - api_key: Optional[str] = os.environ.get("MISTRAL_API_KEY", None), + api_key: Optional[str] = None, endpoint: str = ENDPOINT, max_retries: int = 5, timeout: int = 120, diff --git a/src/mistralai/client.py b/src/mistralai/client.py index aa8daeb5..40b46e8e 100644 --- a/src/mistralai/client.py +++ b/src/mistralai/client.py @@ -31,7 +31,7 @@ class MistralClient(ClientBase): def __init__( self, - api_key: Optional[str] = os.environ.get("MISTRAL_API_KEY", None), + api_key: Optional[str] = None, endpoint: str = ENDPOINT, max_retries: int = 5, timeout: int = 120, diff --git a/src/mistralai/client_base.py b/src/mistralai/client_base.py index 9eb47a68..2497fb8c 100644 --- a/src/mistralai/client_base.py +++ b/src/mistralai/client_base.py @@ -22,8 +22,14 @@ def __init__( self._max_retries = max_retries self._timeout = timeout - self._endpoint = endpoint + if api_key is None: + api_key = os.environ.get("MISTRAL_API_KEY") + if api_key is None: + raise MistralException( + message="API key not provided. Please set MISTRAL_API_KEY environment variable." + ) self._api_key = api_key + self._endpoint = endpoint self._logger = logging.getLogger(__name__) # For azure endpoints, we default to the mistral model From 32ec8b63b2e4f470842aad22c2904e8d2fd8f22b Mon Sep 17 00:00:00 2001 From: Alexis Tacnet Date: Thu, 23 May 2024 18:34:09 +0200 Subject: [PATCH 043/223] Release 0.2.0 (#94) --- .github/workflows/build_publish.yaml | 90 ++++++++++++++++++++++++++++ examples/chatbot_with_streaming.py | 36 +++-------- examples/function_calling.py | 13 ++-- examples/json_format.py | 1 - pyproject.toml | 2 +- src/mistralai/async_client.py | 1 - src/mistralai/client.py | 1 - src/mistralai/client_base.py | 9 ++- src/mistralai/constants.py | 2 - src/mistralai/exceptions.py | 6 +- src/mistralai/models/models.py | 1 + tests/conftest.py | 19 ++++++ tests/test_chat.py | 43 +++---------- tests/test_chat_async.py | 62 +++++++------------ tests/test_embedder.py | 23 ++----- tests/test_embedder_async.py | 42 +++++-------- tests/test_list_models.py | 13 +--- tests/test_list_models_async.py | 22 ++----- tests/utils.py | 4 +- 19 files changed, 189 insertions(+), 201 deletions(-) create mode 100644 .github/workflows/build_publish.yaml create mode 100644 tests/conftest.py diff --git a/.github/workflows/build_publish.yaml b/.github/workflows/build_publish.yaml new file mode 100644 index 00000000..a696f100 --- /dev/null +++ b/.github/workflows/build_publish.yaml @@ -0,0 +1,90 @@ +name: Lint / Test / Publish + +on: + push: + branches: ["main"] + + # We only deploy on tags and main branch + tags: + # Only run on tags that match the following regex + # This will match tags like 1.0.0, 1.0.1, etc. + - "[0-9]+.[0-9]+.[0-9]+" + + # Lint and test on pull requests + pull_request: + +jobs: + lint_and_test: + runs-on: ubuntu-latest + strategy: + matrix: + python-version: ["3.9", "3.10", "3.11", "3.12"] + steps: + # Checkout the repository + - name: Checkout + uses: actions/checkout@v4 + + # Set python version to 3.11 + - name: set python version + uses: actions/setup-python@v4 + with: + python-version: ${{ matrix.python-version }} + + # Install Build stuff + - name: Install Dependencies + run: | + pip install poetry \ + && poetry config virtualenvs.create false \ + && poetry install + + # Ruff + - name: Ruff check + run: | + poetry run ruff check . + + - name: Ruff check + run: | + poetry run ruff format . --check + + # Mypy + - name: Mypy Check + run: | + poetry run mypy . + + # Tests + - name: Run Tests + run: | + poetry run pytest . + + publish: + if: startsWith(github.ref, 'refs/tags') + runs-on: ubuntu-latest + needs: lint_and_test + steps: + # Checkout the repository + - name: Checkout + uses: actions/checkout@v4 + + # Set python version to 3.11 + - name: set python version + uses: actions/setup-python@v4 + with: + python-version: 3.11 + + # Install Build stuff + - name: Install Dependencies + run: | + pip install poetry \ + && poetry config virtualenvs.create false \ + && poetry install + + # build package using poetry + - name: Build Package + run: | + poetry build + + # Publish to PyPi + - name: Pypi publish + run: | + poetry config pypi-token.pypi ${{ secrets.PYPI_TOKEN }} + poetry publish diff --git a/examples/chatbot_with_streaming.py b/examples/chatbot_with_streaming.py index 6654cb45..a815e2f3 100755 --- a/examples/chatbot_with_streaming.py +++ b/examples/chatbot_with_streaming.py @@ -63,9 +63,7 @@ def completer(text, state): class ChatBot: - def __init__( - self, api_key, model, system_message=None, temperature=DEFAULT_TEMPERATURE - ): + def __init__(self, api_key, model, system_message=None, temperature=DEFAULT_TEMPERATURE): if not api_key: raise ValueError("An API key must be provided to use the Mistral API.") self.client = MistralClient(api_key=api_key) @@ -89,15 +87,11 @@ def opening_instructions(self): def new_chat(self): print("") - print( - f"Starting new chat with model: {self.model}, temperature: {self.temperature}" - ) + print(f"Starting new chat with model: {self.model}, temperature: {self.temperature}") print("") self.messages = [] if self.system_message: - self.messages.append( - ChatMessage(role="system", content=self.system_message) - ) + self.messages.append(ChatMessage(role="system", content=self.system_message)) def switch_model(self, input): model = self.get_arguments(input) @@ -146,13 +140,9 @@ def run_inference(self, content): self.messages.append(ChatMessage(role="user", content=content)) assistant_response = "" - logger.debug( - f"Running inference with model: {self.model}, temperature: {self.temperature}" - ) + logger.debug(f"Running inference with model: {self.model}, temperature: {self.temperature}") logger.debug(f"Sending messages: {self.messages}") - for chunk in self.client.chat_stream( - model=self.model, temperature=self.temperature, messages=self.messages - ): + for chunk in self.client.chat_stream(model=self.model, temperature=self.temperature, messages=self.messages): response = chunk.choices[0].delta.content if response is not None: print(response, end="", flush=True) @@ -161,9 +151,7 @@ def run_inference(self, content): print("", flush=True) if assistant_response: - self.messages.append( - ChatMessage(role="assistant", content=assistant_response) - ) + self.messages.append(ChatMessage(role="assistant", content=assistant_response)) logger.debug(f"Current messages: {self.messages}") def get_command(self, input): @@ -215,9 +203,7 @@ def exit(self): if __name__ == "__main__": - parser = argparse.ArgumentParser( - description="A simple chatbot using the Mistral API" - ) + parser = argparse.ArgumentParser(description="A simple chatbot using the Mistral API") parser.add_argument( "--api-key", default=os.environ.get("MISTRAL_API_KEY"), @@ -230,9 +216,7 @@ def exit(self): default=DEFAULT_MODEL, help="Model for chat inference. Choices are %(choices)s. Defaults to %(default)s", ) - parser.add_argument( - "-s", "--system-message", help="Optional system message to prepend." - ) + parser.add_argument("-s", "--system-message", help="Optional system message to prepend.") parser.add_argument( "-t", "--temperature", @@ -240,9 +224,7 @@ def exit(self): default=DEFAULT_TEMPERATURE, help="Optional temperature for chat inference. Defaults to %(default)s", ) - parser.add_argument( - "-d", "--debug", action="store_true", help="Enable debug logging" - ) + parser.add_argument("-d", "--debug", action="store_true", help="Enable debug logging") args = parser.parse_args() diff --git a/examples/function_calling.py b/examples/function_calling.py index 9d6b89f0..e6e6f28c 100644 --- a/examples/function_calling.py +++ b/examples/function_calling.py @@ -15,13 +15,15 @@ "payment_status": ["Paid", "Unpaid", "Paid", "Paid", "Pending"], } -def retrieve_payment_status(data: Dict[str,List], transaction_id: str) -> str: + +def retrieve_payment_status(data: Dict[str, List], transaction_id: str) -> str: for i, r in enumerate(data["transaction_id"]): if r == transaction_id: return json.dumps({"status": data["payment_status"][i]}) else: return json.dumps({"status": "Error - transaction id not found"}) + def retrieve_payment_date(data: Dict[str, List], transaction_id: str) -> str: for i, r in enumerate(data["transaction_id"]): if r == transaction_id: @@ -29,9 +31,10 @@ def retrieve_payment_date(data: Dict[str, List], transaction_id: str) -> str: else: return json.dumps({"status": "Error - transaction id not found"}) + names_to_functions = { - "retrieve_payment_status": functools.partial(retrieve_payment_status, data=data), - "retrieve_payment_date": functools.partial(retrieve_payment_date, data=data) + "retrieve_payment_status": functools.partial(retrieve_payment_status, data=data), + "retrieve_payment_date": functools.partial(retrieve_payment_date, data=data), } tools = [ @@ -75,9 +78,7 @@ def retrieve_payment_date(data: Dict[str, List], transaction_id: str) -> str: messages.append(ChatMessage(role="assistant", content=response.choices[0].message.content)) messages.append(ChatMessage(role="user", content="My transaction ID is T1001.")) -response = client.chat( - model=model, messages=messages, tools=tools -) +response = client.chat(model=model, messages=messages, tools=tools) tool_call = response.choices[0].message.tool_calls[0] function_name = tool_call.function.name diff --git a/examples/json_format.py b/examples/json_format.py index 5c03d356..749965b1 100755 --- a/examples/json_format.py +++ b/examples/json_format.py @@ -16,7 +16,6 @@ def main(): model=model, response_format={"type": "json_object"}, messages=[ChatMessage(role="user", content="What is the best French cheese? Answer shortly in JSON.")], - ) print(chat_response.choices[0].message.content) diff --git a/pyproject.toml b/pyproject.toml index 84230996..9a4d726f 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "mistralai" -version = "0.0.1" +version = "0.2.0" description = "" authors = ["Bam4d "] readme = "README.md" diff --git a/src/mistralai/async_client.py b/src/mistralai/async_client.py index d04edba8..2019de58 100644 --- a/src/mistralai/async_client.py +++ b/src/mistralai/async_client.py @@ -1,5 +1,4 @@ import asyncio -import os import posixpath from json import JSONDecodeError from typing import Any, AsyncGenerator, Dict, List, Optional, Union diff --git a/src/mistralai/client.py b/src/mistralai/client.py index 40b46e8e..a5daa517 100644 --- a/src/mistralai/client.py +++ b/src/mistralai/client.py @@ -1,4 +1,3 @@ -import os import posixpath import time from json import JSONDecodeError diff --git a/src/mistralai/client_base.py b/src/mistralai/client_base.py index 2497fb8c..d58ff142 100644 --- a/src/mistralai/client_base.py +++ b/src/mistralai/client_base.py @@ -10,6 +10,8 @@ ) from mistralai.models.chat_completion import ChatMessage, Function, ResponseFormat, ToolChoice +CLIENT_VERSION = "0.2.0" + class ClientBase(ABC): def __init__( @@ -25,9 +27,7 @@ def __init__( if api_key is None: api_key = os.environ.get("MISTRAL_API_KEY") if api_key is None: - raise MistralException( - message="API key not provided. Please set MISTRAL_API_KEY environment variable." - ) + raise MistralException(message="API key not provided. Please set MISTRAL_API_KEY environment variable.") self._api_key = api_key self._endpoint = endpoint self._logger = logging.getLogger(__name__) @@ -36,8 +36,7 @@ def __init__( if "inference.azure.com" in self._endpoint: self._default_model = "mistral" - # This should be automatically updated by the deploy script - self._version = "0.0.1" + self._version = CLIENT_VERSION def _parse_tools(self, tools: List[Dict[str, Any]]) -> List[Dict[str, Any]]: parsed_tools: List[Dict[str, Any]] = [] diff --git a/src/mistralai/constants.py b/src/mistralai/constants.py index b274a4c0..c70331b4 100644 --- a/src/mistralai/constants.py +++ b/src/mistralai/constants.py @@ -1,5 +1,3 @@ - - RETRY_STATUS_CODES = {429, 500, 502, 503, 504} ENDPOINT = "https://api.mistral.ai" diff --git a/src/mistralai/exceptions.py b/src/mistralai/exceptions.py index 9c9da816..5728a1c1 100644 --- a/src/mistralai/exceptions.py +++ b/src/mistralai/exceptions.py @@ -35,9 +35,7 @@ def __init__( self.headers = headers or {} @classmethod - def from_response( - cls, response: Response, message: Optional[str] = None - ) -> MistralAPIException: + def from_response(cls, response: Response, message: Optional[str] = None) -> MistralAPIException: return cls( message=message or response.text, http_status=response.status_code, @@ -47,8 +45,10 @@ def from_response( def __repr__(self) -> str: return f"{self.__class__.__name__}(message={str(self)}, http_status={self.http_status})" + class MistralAPIStatusException(MistralAPIException): """Returned when we receive a non-200 response from the API that we should retry""" + class MistralConnectionException(MistralException): """Returned when the SDK can not reach the API server for any reason""" diff --git a/src/mistralai/models/models.py b/src/mistralai/models/models.py index 8b3b6d7c..0acd4025 100644 --- a/src/mistralai/models/models.py +++ b/src/mistralai/models/models.py @@ -17,6 +17,7 @@ class ModelPermission(BaseModel): group: Optional[str] = None is_blocking: bool = False + class ModelCard(BaseModel): id: str object: str diff --git a/tests/conftest.py b/tests/conftest.py new file mode 100644 index 00000000..c43f7aa2 --- /dev/null +++ b/tests/conftest.py @@ -0,0 +1,19 @@ +from unittest import mock + +import pytest +from mistralai.async_client import MistralAsyncClient +from mistralai.client import MistralClient + + +@pytest.fixture() +def client(): + client = MistralClient(api_key="test_api_key") + client._client = mock.MagicMock() + return client + + +@pytest.fixture() +def async_client(): + client = MistralAsyncClient(api_key="test_api_key") + client._client = mock.AsyncMock() + return client diff --git a/tests/test_chat.py b/tests/test_chat.py index e64e68a4..eebc7362 100644 --- a/tests/test_chat.py +++ b/tests/test_chat.py @@ -1,7 +1,3 @@ -import unittest.mock as mock - -import pytest -from mistralai.client import MistralClient from mistralai.models.chat_completion import ( ChatCompletionResponse, ChatCompletionStreamResponse, @@ -16,13 +12,6 @@ ) -@pytest.fixture() -def client(): - client = MistralClient() - client._client = mock.MagicMock() - return client - - class TestChat: def test_chat(self, client): client._client.request.return_value = mock_response( @@ -32,9 +21,7 @@ def test_chat(self, client): result = client.chat( model="mistral-small", - messages=[ - ChatMessage(role="user", content="What is the best French cheese?") - ], + messages=[ChatMessage(role="user", content="What is the best French cheese?")], ) client._client.request.assert_called_once_with( @@ -43,22 +30,18 @@ def test_chat(self, client): headers={ "User-Agent": f"mistral-client-python/{client._version}", "Accept": "application/json", - "Authorization": "Bearer None", + "Authorization": "Bearer test_api_key", "Content-Type": "application/json", }, json={ "model": "mistral-small", - "messages": [ - {"role": "user", "content": "What is the best French cheese?"} - ], + "messages": [{"role": "user", "content": "What is the best French cheese?"}], "safe_prompt": False, "stream": False, }, ) - assert isinstance( - result, ChatCompletionResponse - ), "Should return an ChatCompletionResponse" + assert isinstance(result, ChatCompletionResponse), "Should return an ChatCompletionResponse" assert len(result.choices) == 1 assert result.choices[0].index == 0 assert result.object == "chat.completion" @@ -71,9 +54,7 @@ def test_chat_streaming(self, client): result = client.chat_stream( model="mistral-small", - messages=[ - ChatMessage(role="user", content="What is the best French cheese?") - ], + messages=[ChatMessage(role="user", content="What is the best French cheese?")], ) results = list(result) @@ -84,14 +65,12 @@ def test_chat_streaming(self, client): headers={ "User-Agent": f"mistral-client-python/{client._version}", "Accept": "text/event-stream", - "Authorization": "Bearer None", + "Authorization": "Bearer test_api_key", "Content-Type": "application/json", }, json={ "model": "mistral-small", - "messages": [ - {"role": "user", "content": "What is the best French cheese?"} - ], + "messages": [{"role": "user", "content": "What is the best French cheese?"}], "safe_prompt": False, "stream": True, }, @@ -99,16 +78,12 @@ def test_chat_streaming(self, client): for i, result in enumerate(results): if i == 0: - assert isinstance( - result, ChatCompletionStreamResponse - ), "Should return an ChatCompletionStreamResponse" + assert isinstance(result, ChatCompletionStreamResponse), "Should return an ChatCompletionStreamResponse" assert len(result.choices) == 1 assert result.choices[0].index == 0 assert result.choices[0].delta.role == "assistant" else: - assert isinstance( - result, ChatCompletionStreamResponse - ), "Should return an ChatCompletionStreamResponse" + assert isinstance(result, ChatCompletionStreamResponse), "Should return an ChatCompletionStreamResponse" assert len(result.choices) == 1 assert result.choices[0].index == i - 1 assert result.choices[0].delta.content == f"stream response {i-1}" diff --git a/tests/test_chat_async.py b/tests/test_chat_async.py index 7e51a972..e68760f1 100644 --- a/tests/test_chat_async.py +++ b/tests/test_chat_async.py @@ -1,7 +1,6 @@ import unittest.mock as mock import pytest -from mistralai.async_client import MistralAsyncClient from mistralai.models.chat_completion import ( ChatCompletionResponse, ChatCompletionStreamResponse, @@ -16,85 +15,68 @@ ) -@pytest.fixture() -def client(): - client = MistralAsyncClient() - client._client = mock.AsyncMock() - client._client.stream = mock.Mock() - return client - - class TestAsyncChat: @pytest.mark.asyncio - async def test_chat(self, client): - client._client.request.return_value = mock_response( + async def test_chat(self, async_client): + async_client._client.request.return_value = mock_response( 200, mock_chat_response_payload(), ) - result = await client.chat( + result = await async_client.chat( model="mistral-small", - messages=[ - ChatMessage(role="user", content="What is the best French cheese?") - ], + messages=[ChatMessage(role="user", content="What is the best French cheese?")], ) - client._client.request.assert_awaited_once_with( + async_client._client.request.assert_awaited_once_with( "post", "https://api.mistral.ai/v1/chat/completions", headers={ - "User-Agent": f"mistral-client-python/{client._version}", + "User-Agent": f"mistral-client-python/{async_client._version}", "Accept": "application/json", - "Authorization": "Bearer None", + "Authorization": "Bearer test_api_key", "Content-Type": "application/json", }, json={ "model": "mistral-small", - "messages": [ - {"role": "user", "content": "What is the best French cheese?"} - ], + "messages": [{"role": "user", "content": "What is the best French cheese?"}], "safe_prompt": False, "stream": False, }, ) - assert isinstance( - result, ChatCompletionResponse - ), "Should return an ChatCompletionResponse" + assert isinstance(result, ChatCompletionResponse), "Should return an ChatCompletionResponse" assert len(result.choices) == 1 assert result.choices[0].index == 0 assert result.object == "chat.completion" @pytest.mark.asyncio - async def test_chat_streaming(self, client): - client._client.stream.return_value = mock_async_stream_response( + async def test_chat_streaming(self, async_client): + async_client._client.stream = mock.Mock() + async_client._client.stream.return_value = mock_async_stream_response( 200, mock_chat_response_streaming_payload(), ) - result = client.chat_stream( + result = async_client.chat_stream( model="mistral-small", - messages=[ - ChatMessage(role="user", content="What is the best French cheese?") - ], + messages=[ChatMessage(role="user", content="What is the best French cheese?")], ) results = [r async for r in result] - client._client.stream.assert_called_once_with( + async_client._client.stream.assert_called_once_with( "post", "https://api.mistral.ai/v1/chat/completions", headers={ "Accept": "text/event-stream", - "User-Agent": f"mistral-client-python/{client._version}", - "Authorization": "Bearer None", + "User-Agent": f"mistral-client-python/{async_client._version}", + "Authorization": "Bearer test_api_key", "Content-Type": "application/json", }, json={ "model": "mistral-small", - "messages": [ - {"role": "user", "content": "What is the best French cheese?"} - ], + "messages": [{"role": "user", "content": "What is the best French cheese?"}], "safe_prompt": False, "stream": True, }, @@ -102,16 +84,12 @@ async def test_chat_streaming(self, client): for i, result in enumerate(results): if i == 0: - assert isinstance( - result, ChatCompletionStreamResponse - ), "Should return an ChatCompletionStreamResponse" + assert isinstance(result, ChatCompletionStreamResponse), "Should return an ChatCompletionStreamResponse" assert len(result.choices) == 1 assert result.choices[0].index == 0 assert result.choices[0].delta.role == "assistant" else: - assert isinstance( - result, ChatCompletionStreamResponse - ), "Should return an ChatCompletionStreamResponse" + assert isinstance(result, ChatCompletionStreamResponse), "Should return an ChatCompletionStreamResponse" assert len(result.choices) == 1 assert result.choices[0].index == i - 1 assert result.choices[0].delta.content == f"stream response {i-1}" diff --git a/tests/test_embedder.py b/tests/test_embedder.py index 59e30fa6..56cd4c57 100644 --- a/tests/test_embedder.py +++ b/tests/test_embedder.py @@ -1,19 +1,8 @@ -import unittest.mock as mock - -import pytest -from mistralai.client import MistralClient from mistralai.models.embeddings import EmbeddingResponse from .utils import mock_embedding_response_payload, mock_response -@pytest.fixture() -def client(): - client = MistralClient() - client._client = mock.MagicMock() - return client - - class TestEmbeddings: def test_embeddings(self, client): client._client.request.return_value = mock_response( @@ -32,15 +21,13 @@ def test_embeddings(self, client): headers={ "User-Agent": f"mistral-client-python/{client._version}", "Accept": "application/json", - "Authorization": "Bearer None", + "Authorization": "Bearer test_api_key", "Content-Type": "application/json", }, json={"model": "mistral-embed", "input": "What is the best French cheese?"}, ) - assert isinstance( - result, EmbeddingResponse - ), "Should return an EmbeddingResponse" + assert isinstance(result, EmbeddingResponse), "Should return an EmbeddingResponse" assert len(result.data) == 1 assert result.data[0].index == 0 assert result.object == "list" @@ -62,7 +49,7 @@ def test_embeddings_batch(self, client): headers={ "User-Agent": f"mistral-client-python/{client._version}", "Accept": "application/json", - "Authorization": "Bearer None", + "Authorization": "Bearer test_api_key", "Content-Type": "application/json", }, json={ @@ -71,9 +58,7 @@ def test_embeddings_batch(self, client): }, ) - assert isinstance( - result, EmbeddingResponse - ), "Should return an EmbeddingResponse" + assert isinstance(result, EmbeddingResponse), "Should return an EmbeddingResponse" assert len(result.data) == 10 assert result.data[0].index == 0 assert result.object == "list" diff --git a/tests/test_embedder_async.py b/tests/test_embedder_async.py index 8d08d6a6..d95fdd49 100644 --- a/tests/test_embedder_async.py +++ b/tests/test_embedder_async.py @@ -1,70 +1,58 @@ -import unittest.mock as mock - import pytest -from mistralai.async_client import MistralAsyncClient from mistralai.models.embeddings import EmbeddingResponse from .utils import mock_embedding_response_payload, mock_response -@pytest.fixture() -def client(): - client = MistralAsyncClient() - client._client = mock.AsyncMock() - return client - - class TestAsyncEmbeddings: @pytest.mark.asyncio - async def test_embeddings(self, client): - client._client.request.return_value = mock_response( + async def test_embeddings(self, async_client): + async_client._client.request.return_value = mock_response( 200, mock_embedding_response_payload(), ) - result = await client.embeddings( + result = await async_client.embeddings( model="mistral-embed", input="What is the best French cheese?", ) - client._client.request.assert_awaited_once_with( + async_client._client.request.assert_awaited_once_with( "post", "https://api.mistral.ai/v1/embeddings", headers={ - "User-Agent": f"mistral-client-python/{client._version}", + "User-Agent": f"mistral-client-python/{async_client._version}", "Accept": "application/json", - "Authorization": "Bearer None", + "Authorization": "Bearer test_api_key", "Content-Type": "application/json", }, json={"model": "mistral-embed", "input": "What is the best French cheese?"}, ) - assert isinstance( - result, EmbeddingResponse - ), "Should return an EmbeddingResponse" + assert isinstance(result, EmbeddingResponse), "Should return an EmbeddingResponse" assert len(result.data) == 1 assert result.data[0].index == 0 assert result.object == "list" @pytest.mark.asyncio - async def test_embeddings_batch(self, client): - client._client.request.return_value = mock_response( + async def test_embeddings_batch(self, async_client): + async_client._client.request.return_value = mock_response( 200, mock_embedding_response_payload(batch_size=10), ) - result = await client.embeddings( + result = await async_client.embeddings( model="mistral-embed", input=["What is the best French cheese?"] * 10, ) - client._client.request.assert_awaited_once_with( + async_client._client.request.assert_awaited_once_with( "post", "https://api.mistral.ai/v1/embeddings", headers={ - "User-Agent": f"mistral-client-python/{client._version}", + "User-Agent": f"mistral-client-python/{async_client._version}", "Accept": "application/json", - "Authorization": "Bearer None", + "Authorization": "Bearer test_api_key", "Content-Type": "application/json", }, json={ @@ -73,9 +61,7 @@ async def test_embeddings_batch(self, client): }, ) - assert isinstance( - result, EmbeddingResponse - ), "Should return an EmbeddingResponse" + assert isinstance(result, EmbeddingResponse), "Should return an EmbeddingResponse" assert len(result.data) == 10 assert result.data[0].index == 0 assert result.object == "list" diff --git a/tests/test_list_models.py b/tests/test_list_models.py index 1a048fac..6b73978b 100644 --- a/tests/test_list_models.py +++ b/tests/test_list_models.py @@ -1,19 +1,8 @@ -import unittest.mock as mock - -import pytest -from mistralai.client import MistralClient from mistralai.models.models import ModelList from .utils import mock_list_models_response_payload, mock_response -@pytest.fixture() -def client(): - client = MistralClient() - client._client = mock.MagicMock() - return client - - class TestListModels: def test_list_models(self, client): client._client.request.return_value = mock_response( @@ -29,7 +18,7 @@ def test_list_models(self, client): headers={ "User-Agent": f"mistral-client-python/{client._version}", "Accept": "application/json", - "Authorization": "Bearer None", + "Authorization": "Bearer test_api_key", "Content-Type": "application/json", }, json={}, diff --git a/tests/test_list_models_async.py b/tests/test_list_models_async.py index 6d572d12..a8764841 100644 --- a/tests/test_list_models_async.py +++ b/tests/test_list_models_async.py @@ -1,36 +1,26 @@ -import unittest.mock as mock - import pytest -from mistralai.async_client import MistralAsyncClient from mistralai.models.models import ModelList from .utils import mock_list_models_response_payload, mock_response -@pytest.fixture() -def client(): - client = MistralAsyncClient() - client._client = mock.AsyncMock() - return client - - class TestAsyncListModels: @pytest.mark.asyncio - async def test_list_models(self, client): - client._client.request.return_value = mock_response( + async def test_list_models(self, async_client): + async_client._client.request.return_value = mock_response( 200, mock_list_models_response_payload(), ) - result = await client.list_models() + result = await async_client.list_models() - client._client.request.assert_awaited_once_with( + async_client._client.request.assert_awaited_once_with( "get", "https://api.mistral.ai/v1/models", headers={ - "User-Agent": f"mistral-client-python/{client._version}", + "User-Agent": f"mistral-client-python/{async_client._version}", "Accept": "application/json", - "Authorization": "Bearer None", + "Authorization": "Bearer test_api_key", "Content-Type": "application/json", }, json={}, diff --git a/tests/utils.py b/tests/utils.py index e5edef60..50637c2d 100644 --- a/tests/utils.py +++ b/tests/utils.py @@ -27,9 +27,7 @@ async def async_iter(content: List[str]): yield response -def mock_response( - status_code: int, content: str, is_json: bool = True -) -> mock.MagicMock: +def mock_response(status_code: int, content: str, is_json: bool = True) -> mock.MagicMock: response = mock.Mock(Response) response.status_code = status_code if is_json: From aa010d2c2e8c9d3967604aa49d3c82e494e470cb Mon Sep 17 00:00:00 2001 From: jean-malo Date: Wed, 29 May 2024 15:06:41 +0200 Subject: [PATCH 044/223] release 0.3.0: add support for completion --- examples/async_completion.py | 33 +++++++++ examples/chatbot_with_streaming.py | 9 +-- examples/code_completion.py | 33 +++++++++ examples/completion_with_streaming.py | 29 ++++++++ pyproject.toml | 2 +- src/mistralai/async_client.py | 73 +++++++++++++++++++- src/mistralai/client.py | 76 ++++++++++++++++++++- src/mistralai/client_base.py | 71 +++++++++++++++++--- tests/test_chat.py | 8 +-- tests/test_chat_async.py | 8 +-- tests/test_completion.py | 97 +++++++++++++++++++++++++++ tests/utils.py | 30 +++++++-- 12 files changed, 442 insertions(+), 27 deletions(-) create mode 100644 examples/async_completion.py create mode 100644 examples/code_completion.py create mode 100644 examples/completion_with_streaming.py create mode 100644 tests/test_completion.py diff --git a/examples/async_completion.py b/examples/async_completion.py new file mode 100644 index 00000000..6aa22b4b --- /dev/null +++ b/examples/async_completion.py @@ -0,0 +1,33 @@ +#!/usr/bin/env python + +import asyncio +import os + +from mistralai.async_client import MistralAsyncClient + + +async def main(): + api_key = os.environ["MISTRAL_API_KEY"] + + client = MistralAsyncClient(api_key=api_key) + + prompt = "def fibonacci(n: int):" + suffix = "n = int(input('Enter a number: '))\nprint(fibonacci(n))" + + response = await client.completion( + model="codestral-latest", + prompt=prompt, + suffix=suffix, + ) + + print( + f""" +{prompt} +{response.choices[0].message.content} +{suffix} +""" + ) + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/examples/chatbot_with_streaming.py b/examples/chatbot_with_streaming.py index a815e2f3..4304551f 100755 --- a/examples/chatbot_with_streaming.py +++ b/examples/chatbot_with_streaming.py @@ -12,11 +12,12 @@ from mistralai.models.chat_completion import ChatMessage MODEL_LIST = [ - "mistral-tiny", - "mistral-small", - "mistral-medium", + "mistral-small-latest", + "mistral-medium-latest", + "mistral-large-latest", + "codestral-latest", ] -DEFAULT_MODEL = "mistral-small" +DEFAULT_MODEL = "mistral-small-latest" DEFAULT_TEMPERATURE = 0.7 LOG_FORMAT = "%(asctime)s - %(levelname)s - %(message)s" # A dictionary of all commands and their arguments, used for tab completion. diff --git a/examples/code_completion.py b/examples/code_completion.py new file mode 100644 index 00000000..f76f0f11 --- /dev/null +++ b/examples/code_completion.py @@ -0,0 +1,33 @@ +#!/usr/bin/env python + +import asyncio +import os + +from mistralai.client import MistralClient + + +async def main(): + api_key = os.environ["MISTRAL_API_KEY"] + + client = MistralClient(api_key=api_key) + + prompt = "def fibonacci(n: int):" + suffix = "n = int(input('Enter a number: '))\nprint(fibonacci(n))" + + response = client.completion( + model="codestral-latest", + prompt=prompt, + suffix=suffix, + ) + + print( + f""" +{prompt} +{response.choices[0].message.content} +{suffix} +""" + ) + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/examples/completion_with_streaming.py b/examples/completion_with_streaming.py new file mode 100644 index 00000000..f0760bfc --- /dev/null +++ b/examples/completion_with_streaming.py @@ -0,0 +1,29 @@ +#!/usr/bin/env python + +import asyncio +import os + +from mistralai.client import MistralClient + + +async def main(): + api_key = os.environ["MISTRAL_API_KEY"] + + client = MistralClient(api_key=api_key) + + prompt = "def fibonacci(n: int):" + suffix = "n = int(input('Enter a number: '))\nprint(fibonacci(n))" + + print(prompt) + for chunk in client.completion_stream( + model="codestral-latest", + prompt=prompt, + suffix=suffix, + ): + if chunk.choices[0].delta.content is not None: + print(chunk.choices[0].delta.content, end="") + print(suffix) + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/pyproject.toml b/pyproject.toml index 9a4d726f..bf3077ee 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "mistralai" -version = "0.2.0" +version = "0.3.0" description = "" authors = ["Bam4d "] readme = "README.md" diff --git a/src/mistralai/async_client.py b/src/mistralai/async_client.py index 2019de58..bc80a8b7 100644 --- a/src/mistralai/async_client.py +++ b/src/mistralai/async_client.py @@ -92,7 +92,7 @@ async def _check_response(self, response: Response) -> Dict[str, Any]: async def _request( self, method: str, - json: Dict[str, Any], + json: Optional[Dict[str, Any]], path: str, stream: bool = False, attempt: int = 1, @@ -291,3 +291,74 @@ async def list_models(self) -> ModelList: return ModelList(**response) raise MistralException("No response received") + + async def completion( + self, + model: str, + prompt: str, + suffix: Optional[str] = None, + temperature: Optional[float] = None, + max_tokens: Optional[int] = None, + top_p: Optional[float] = None, + random_seed: Optional[int] = None, + stop: Optional[List[str]] = None, + ) -> ChatCompletionResponse: + """An asynchronous completion endpoint that returns a single response. + + Args: + model (str): model the name of the model to get completions with, e.g. codestral-latest + prompt (str): the prompt to complete + suffix (Optional[str]): the suffix to append to the prompt for fill-in-the-middle completion + temperature (Optional[float], optional): temperature the temperature to use for sampling, e.g. 0.5. + max_tokens (Optional[int], optional): the maximum number of tokens to generate, e.g. 100. Defaults to None. + top_p (Optional[float], optional): the cumulative probability of tokens to generate, e.g. 0.9. + Defaults to None. + random_seed (Optional[int], optional): the random seed to use for sampling, e.g. 42. Defaults to None. + stop (Optional[List[str]], optional): a list of tokens to stop generation at, e.g. ['/n/n'] + Returns: + Dict[str, Any]: a response object containing the generated text. + """ + request = self._make_completion_request( + prompt, model, suffix, temperature, max_tokens, top_p, random_seed, stop + ) + single_response = self._request("post", request, "v1/fim/completions") + + async for response in single_response: + return ChatCompletionResponse(**response) + + raise MistralException("No response received") + + async def completion_stream( + self, + model: str, + prompt: str, + suffix: Optional[str] = None, + temperature: Optional[float] = None, + max_tokens: Optional[int] = None, + top_p: Optional[float] = None, + random_seed: Optional[int] = None, + stop: Optional[List[str]] = None, + ) -> AsyncGenerator[ChatCompletionStreamResponse, None]: + """An asynchronous completion endpoint that returns a streaming response. + + Args: + model (str): model the name of the model to get completions with, e.g. codestral-latest + prompt (str): the prompt to complete + suffix (Optional[str]): the suffix to append to the prompt for fill-in-the-middle completion + temperature (Optional[float], optional): temperature the temperature to use for sampling, e.g. 0.5. + max_tokens (Optional[int], optional): the maximum number of tokens to generate, e.g. 100. Defaults to None. + top_p (Optional[float], optional): the cumulative probability of tokens to generate, e.g. 0.9. + Defaults to None. + random_seed (Optional[int], optional): the random seed to use for sampling, e.g. 42. Defaults to None. + stop (Optional[List[str]], optional): a list of tokens to stop generation at, e.g. ['/n/n'] + + Returns: + Dict[str, Any]: a response object containing the generated text. + """ + request = self._make_completion_request( + prompt, model, suffix, temperature, max_tokens, top_p, random_seed, stop, stream=True + ) + async_response = self._request("post", request, "v1/fim/completions", stream=True) + + async for json_response in async_response: + yield ChatCompletionStreamResponse(**json_response) diff --git a/src/mistralai/client.py b/src/mistralai/client.py index a5daa517..b00ddcff 100644 --- a/src/mistralai/client.py +++ b/src/mistralai/client.py @@ -85,7 +85,7 @@ def _check_response(self, response: Response) -> Dict[str, Any]: def _request( self, method: str, - json: Dict[str, Any], + json: Optional[Dict[str, Any]], path: str, stream: bool = False, attempt: int = 1, @@ -285,3 +285,77 @@ def list_models(self) -> ModelList: return ModelList(**response) raise MistralException("No response received") + + def completion( + self, + model: str, + prompt: str, + suffix: Optional[str] = None, + temperature: Optional[float] = None, + max_tokens: Optional[int] = None, + top_p: Optional[float] = None, + random_seed: Optional[int] = None, + stop: Optional[List[str]] = None, + ) -> ChatCompletionResponse: + """A completion endpoint that returns a single response. + + Args: + model (str): model the name of the model to get completion with, e.g. codestral-latest + prompt (str): the prompt to complete + suffix (Optional[str]): the suffix to append to the prompt for fill-in-the-middle completion + temperature (Optional[float], optional): temperature the temperature to use for sampling, e.g. 0.5. + max_tokens (Optional[int], optional): the maximum number of tokens to generate, e.g. 100. Defaults to None. + top_p (Optional[float], optional): the cumulative probability of tokens to generate, e.g. 0.9. + Defaults to None. + random_seed (Optional[int], optional): the random seed to use for sampling, e.g. 42. Defaults to None. + stop (Optional[List[str]], optional): a list of tokens to stop generation at, e.g. ['/n/n'] + + Returns: + Dict[str, Any]: a response object containing the generated text. + """ + request = self._make_completion_request( + prompt, model, suffix, temperature, max_tokens, top_p, random_seed, stop + ) + + single_response = self._request("post", request, "v1/fim/completions", stream=False) + + for response in single_response: + return ChatCompletionResponse(**response) + + raise MistralException("No response received") + + def completion_stream( + self, + model: str, + prompt: str, + suffix: Optional[str] = None, + temperature: Optional[float] = None, + max_tokens: Optional[int] = None, + top_p: Optional[float] = None, + random_seed: Optional[int] = None, + stop: Optional[List[str]] = None, + ) -> Iterable[ChatCompletionStreamResponse]: + """An asynchronous completion endpoint that streams responses. + + Args: + model (str): model the name of the model to get completions with, e.g. codestral-latest + prompt (str): the prompt to complete + suffix (Optional[str]): the suffix to append to the prompt for fill-in-the-middle completion + temperature (Optional[float], optional): temperature the temperature to use for sampling, e.g. 0.5. + max_tokens (Optional[int], optional): the maximum number of tokens to generate, e.g. 100. Defaults to None. + top_p (Optional[float], optional): the cumulative probability of tokens to generate, e.g. 0.9. + Defaults to None. + random_seed (Optional[int], optional): the random seed to use for sampling, e.g. 42. Defaults to None. + stop (Optional[List[str]], optional): a list of tokens to stop generation at, e.g. ['/n/n'] + + Returns: + Iterable[Dict[str, Any]]: a generator that yields response objects containing the generated text. + """ + request = self._make_completion_request( + prompt, model, suffix, temperature, max_tokens, top_p, random_seed, stop, stream=True + ) + + response = self._request("post", request, "v1/fim/completions", stream=True) + + for json_streamed_response in response: + yield ChatCompletionStreamResponse(**json_streamed_response) diff --git a/src/mistralai/client_base.py b/src/mistralai/client_base.py index d58ff142..c38e093b 100644 --- a/src/mistralai/client_base.py +++ b/src/mistralai/client_base.py @@ -73,6 +73,63 @@ def _parse_messages(self, messages: List[Any]) -> List[Dict[str, Any]]: return parsed_messages + def _make_completion_request( + self, + prompt: str, + model: Optional[str] = None, + suffix: Optional[str] = None, + temperature: Optional[float] = None, + max_tokens: Optional[int] = None, + top_p: Optional[float] = None, + random_seed: Optional[int] = None, + stop: Optional[List[str]] = None, + stream: Optional[bool] = False, + ) -> Dict[str, Any]: + request_data: Dict[str, Any] = { + "prompt": prompt, + "suffix": suffix, + "model": model, + "stream": stream, + } + + if stop is not None: + request_data["stop"] = stop + + if model is not None: + request_data["model"] = model + else: + if self._default_model is None: + raise MistralException(message="model must be provided") + request_data["model"] = self._default_model + + request_data.update( + self._build_sampling_params( + temperature=temperature, max_tokens=max_tokens, top_p=top_p, random_seed=random_seed + ) + ) + + self._logger.debug(f"Completion request: {request_data}") + + return request_data + + def _build_sampling_params( + self, + max_tokens: Optional[int], + random_seed: Optional[int], + temperature: Optional[float], + top_p: Optional[float], + ) -> Dict[str, Any]: + params = {} + if temperature is not None: + params["temperature"] = temperature + if max_tokens is not None: + params["max_tokens"] = max_tokens + if top_p is not None: + params["top_p"] = top_p + if random_seed is not None: + params["random_seed"] = random_seed + return params + def _make_chat_request( self, messages: List[Any], @@ -99,16 +156,14 @@ def _make_chat_request( raise MistralException(message="model must be provided") request_data["model"] = self._default_model + request_data.update( + self._build_sampling_params( + temperature=temperature, max_tokens=max_tokens, top_p=top_p, random_seed=random_seed + ) + ) + if tools is not None: request_data["tools"] = self._parse_tools(tools) - if temperature is not None: - request_data["temperature"] = temperature - if max_tokens is not None: - request_data["max_tokens"] = max_tokens - if top_p is not None: - request_data["top_p"] = top_p - if random_seed is not None: - request_data["random_seed"] = random_seed if stream is not None: request_data["stream"] = stream diff --git a/tests/test_chat.py b/tests/test_chat.py index eebc7362..6b1658ea 100644 --- a/tests/test_chat.py +++ b/tests/test_chat.py @@ -20,7 +20,7 @@ def test_chat(self, client): ) result = client.chat( - model="mistral-small", + model="mistral-small-latest", messages=[ChatMessage(role="user", content="What is the best French cheese?")], ) @@ -34,7 +34,7 @@ def test_chat(self, client): "Content-Type": "application/json", }, json={ - "model": "mistral-small", + "model": "mistral-small-latest", "messages": [{"role": "user", "content": "What is the best French cheese?"}], "safe_prompt": False, "stream": False, @@ -53,7 +53,7 @@ def test_chat_streaming(self, client): ) result = client.chat_stream( - model="mistral-small", + model="mistral-small-latest", messages=[ChatMessage(role="user", content="What is the best French cheese?")], ) @@ -69,7 +69,7 @@ def test_chat_streaming(self, client): "Content-Type": "application/json", }, json={ - "model": "mistral-small", + "model": "mistral-small-latest", "messages": [{"role": "user", "content": "What is the best French cheese?"}], "safe_prompt": False, "stream": True, diff --git a/tests/test_chat_async.py b/tests/test_chat_async.py index e68760f1..15479edd 100644 --- a/tests/test_chat_async.py +++ b/tests/test_chat_async.py @@ -24,7 +24,7 @@ async def test_chat(self, async_client): ) result = await async_client.chat( - model="mistral-small", + model="mistral-small-latest", messages=[ChatMessage(role="user", content="What is the best French cheese?")], ) @@ -38,7 +38,7 @@ async def test_chat(self, async_client): "Content-Type": "application/json", }, json={ - "model": "mistral-small", + "model": "mistral-small-latest", "messages": [{"role": "user", "content": "What is the best French cheese?"}], "safe_prompt": False, "stream": False, @@ -59,7 +59,7 @@ async def test_chat_streaming(self, async_client): ) result = async_client.chat_stream( - model="mistral-small", + model="mistral-small-latest", messages=[ChatMessage(role="user", content="What is the best French cheese?")], ) @@ -75,7 +75,7 @@ async def test_chat_streaming(self, async_client): "Content-Type": "application/json", }, json={ - "model": "mistral-small", + "model": "mistral-small-latest", "messages": [{"role": "user", "content": "What is the best French cheese?"}], "safe_prompt": False, "stream": True, diff --git a/tests/test_completion.py b/tests/test_completion.py new file mode 100644 index 00000000..1b6f1c15 --- /dev/null +++ b/tests/test_completion.py @@ -0,0 +1,97 @@ +from mistralai.models.chat_completion import ( + ChatCompletionResponse, + ChatCompletionStreamResponse, +) + +from .utils import ( + mock_completion_response_payload, + mock_response, + mock_stream_response, +) + + +class TestCompletion: + def test_completion(self, client): + client._client.request.return_value = mock_response( + 200, + mock_completion_response_payload(), + ) + + result = client.completion( + model="mistral-small-latest", + prompt="def add(a, b):", + suffix="return a + b", + temperature=0.5, + max_tokens=50, + top_p=0.9, + random_seed=42, + ) + + client._client.request.assert_called_once_with( + "post", + "https://api.mistral.ai/v1/fim/completions", + headers={ + "User-Agent": f"mistral-client-python/{client._version}", + "Accept": "application/json", + "Authorization": "Bearer test_api_key", + "Content-Type": "application/json", + }, + json={ + "model": "mistral-small-latest", + "prompt": "def add(a, b):", + "suffix": "return a + b", + "stream": False, + "temperature": 0.5, + "max_tokens": 50, + "top_p": 0.9, + "random_seed": 42, + }, + ) + + assert isinstance(result, ChatCompletionResponse), "Should return an ChatCompletionResponse" + assert len(result.choices) == 1 + assert result.choices[0].index == 0 + assert result.object == "chat.completion" + + def test_completion_streaming(self, client): + client._client.stream.return_value = mock_stream_response( + 200, + mock_completion_response_payload(), + ) + + result = client.completion_stream( + model="mistral-small-latest", prompt="def add(a, b):", suffix="return a + b", stop=["#"] + ) + + results = list(result) + + client._client.stream.assert_called_once_with( + "post", + "https://api.mistral.ai/v1/fim/completions", + headers={ + "User-Agent": f"mistral-client-python/{client._version}", + "Accept": "text/event-stream", + "Authorization": "Bearer test_api_key", + "Content-Type": "application/json", + }, + json={ + "model": "mistral-small-latest", + "prompt": "def add(a, b):", + "suffix": "return a + b", + "stream": True, + "stop": ["#"], + }, + ) + + for i, result in enumerate(results): + if i == 0: + assert isinstance(result, ChatCompletionStreamResponse), "Should return an ChatCompletionStreamResponse" + assert len(result.choices) == 1 + assert result.choices[0].index == 0 + assert result.choices[0].delta.role == "assistant" + else: + assert isinstance(result, ChatCompletionStreamResponse), "Should return an ChatCompletionStreamResponse" + assert len(result.choices) == 1 + assert result.choices[0].index == i - 1 + assert result.choices[0].delta.content == f"stream response {i - 1}" + assert result.object == "chat.completion.chunk" diff --git a/tests/utils.py b/tests/utils.py index 50637c2d..826753d2 100644 --- a/tests/utils.py +++ b/tests/utils.py @@ -67,7 +67,7 @@ def mock_list_models_response_payload() -> str: ], }, { - "id": "mistral-small", + "id": "mistral-small-latest", "object": "model", "created": 1703186988, "owned_by": "mistralai", @@ -178,7 +178,7 @@ def mock_chat_response_payload(): "index": 0, } ], - "model": "mistral-small", + "model": "mistral-small-latest", "usage": {"prompt_tokens": 90, "total_tokens": 90, "completion_tokens": 0}, } ).decode() @@ -190,7 +190,7 @@ def mock_chat_response_streaming_payload(): + orjson.dumps( { "id": "cmpl-8cd9019d21ba490aa6b9740f5d0a883e", - "model": "mistral-small", + "model": "mistral-small-latest", "choices": [ { "index": 0, @@ -208,7 +208,7 @@ def mock_chat_response_streaming_payload(): "id": "cmpl-8cd9019d21ba490aa6b9740f5d0a883e", "object": "chat.completion.chunk", "created": 1703168544, - "model": "mistral-small", + "model": "mistral-small-latest", "choices": [ { "index": i, @@ -223,3 +223,25 @@ def mock_chat_response_streaming_payload(): ], "data: [DONE]\n\n", ] + + +def mock_completion_response_payload() -> str: + return orjson.dumps( + { + "id": "chat-98c8c60e3fbf4fc49658eddaf447357c", + "object": "chat.completion", + "created": 1703165682, + "choices": [ + { + "finish_reason": "stop", + "message": { + "role": "assistant", + "content": " a + b", + }, + "index": 0, + } + ], + "model": "mistral-small-latest", + "usage": {"prompt_tokens": 90, "total_tokens": 90, "completion_tokens": 0}, + } + ).decode() From 51b5e4ea27f2e743fe3a4f44b230ba62026dbb91 Mon Sep 17 00:00:00 2001 From: CharlesCNorton <135471798+CharlesCNorton@users.noreply.github.com> Date: Sun, 2 Jun 2024 12:44:41 -0400 Subject: [PATCH 045/223] fix: Remove redundant LOG_FORMAT definition Removed the second instance of the LOG_FORMAT definition in the chatbot script. The specific change was the removal of the line LOG_FORMAT = "%(asctime)s - %(levelname)s - %(message)s" which appeared twice consecutively. This correction helps maintain code clarity and conciseness. --- examples/chatbot_with_streaming.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/examples/chatbot_with_streaming.py b/examples/chatbot_with_streaming.py index 4304551f..eec8309a 100755 --- a/examples/chatbot_with_streaming.py +++ b/examples/chatbot_with_streaming.py @@ -32,8 +32,6 @@ "/exit": {}, } -LOG_FORMAT = "%(asctime)s - %(levelname)s - %(message)s" - logger = logging.getLogger("chatbot") From 2d4e3fd5e5cb65c8d2914915f689480045f89290 Mon Sep 17 00:00:00 2001 From: Alexis Tacnet Date: Wed, 5 Jun 2024 16:26:16 +0200 Subject: [PATCH 046/223] Add support for our Fine-tuning API (#99) --- Makefile | 6 + examples/async_files.py | 32 +++++ examples/async_jobs.py | 51 +++++++ examples/async_jobs_chat.py | 59 ++++++++ examples/completion.py | 33 +++++ examples/dry_run_job.py | 38 ++++++ examples/file.jsonl | 21 +++ examples/files.py | 31 +++++ examples/jobs.py | 49 +++++++ examples/validation_file.jsonl | 2 + poetry.lock | 4 +- pyproject.toml | 8 +- src/mistralai/async_client.py | 14 +- src/mistralai/client.py | 14 +- src/mistralai/client_base.py | 3 +- src/mistralai/files.py | 84 ++++++++++++ src/mistralai/jobs.py | 172 ++++++++++++++++++++++++ src/mistralai/models/chat_completion.py | 4 +- src/mistralai/models/files.py | 23 ++++ src/mistralai/models/jobs.py | 98 ++++++++++++++ src/mistralai/models/models.py | 6 +- tests/test_chat.py | 4 +- tests/test_chat_async.py | 4 +- tests/test_completion.py | 2 + tests/test_embedder.py | 2 + tests/test_embedder_async.py | 2 + tests/test_files.py | 105 +++++++++++++++ tests/test_files_async.py | 110 +++++++++++++++ tests/test_jobs.py | 128 ++++++++++++++++++ tests/test_jobs_async.py | 133 ++++++++++++++++++ tests/test_list_models.py | 1 + tests/test_list_models_async.py | 1 + tests/utils.py | 73 ++++++++++ 33 files changed, 1299 insertions(+), 18 deletions(-) create mode 100644 Makefile create mode 100644 examples/async_files.py create mode 100644 examples/async_jobs.py create mode 100644 examples/async_jobs_chat.py create mode 100644 examples/completion.py create mode 100644 examples/dry_run_job.py create mode 100644 examples/file.jsonl create mode 100644 examples/files.py create mode 100644 examples/jobs.py create mode 100644 examples/validation_file.jsonl create mode 100644 src/mistralai/files.py create mode 100644 src/mistralai/jobs.py create mode 100644 src/mistralai/models/files.py create mode 100644 src/mistralai/models/jobs.py create mode 100644 tests/test_files.py create mode 100644 tests/test_files_async.py create mode 100644 tests/test_jobs.py create mode 100644 tests/test_jobs_async.py diff --git a/Makefile b/Makefile new file mode 100644 index 00000000..188291f6 --- /dev/null +++ b/Makefile @@ -0,0 +1,6 @@ +.PHONY: lint + +lint: + poetry run ruff check --fix . + poetry run ruff format . + poetry run mypy . diff --git a/examples/async_files.py b/examples/async_files.py new file mode 100644 index 00000000..1022b7a7 --- /dev/null +++ b/examples/async_files.py @@ -0,0 +1,32 @@ +#!/usr/bin/env python + +import asyncio +import os + +from mistralai.async_client import MistralAsyncClient + + +async def main(): + api_key = os.environ["MISTRAL_API_KEY"] + + client = MistralAsyncClient(api_key=api_key) + + # Create a new file + created_file = await client.files.create(file=open("examples/file.jsonl", "rb").read()) + print(created_file) + + # List files + files = await client.files.list() + print(files) + + # Retrieve a file + retrieved_file = await client.files.retrieve(created_file.id) + print(retrieved_file) + + # Delete a file + deleted_file = await client.files.delete(created_file.id) + print(deleted_file) + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/examples/async_jobs.py b/examples/async_jobs.py new file mode 100644 index 00000000..792735d9 --- /dev/null +++ b/examples/async_jobs.py @@ -0,0 +1,51 @@ +#!/usr/bin/env python + +import asyncio +import os + +from mistralai.async_client import MistralAsyncClient +from mistralai.models.jobs import TrainingParameters + + +async def main(): + api_key = os.environ["MISTRAL_API_KEY"] + + client = MistralAsyncClient(api_key=api_key) + + # Create new files + with open("examples/file.jsonl", "rb") as f: + training_file = await client.files.create(file=f) + with open("examples/validation_file.jsonl", "rb") as f: + validation_file = await client.files.create(file=f) + + # Create a new job + created_job = await client.jobs.create( + model="open-mistral-7b", + training_files=[training_file.id], + validation_files=[validation_file.id], + hyperparameters=TrainingParameters( + training_steps=1, + learning_rate=0.0001, + ), + ) + print(created_job) + + # List jobs + jobs = await client.jobs.list(page=0, page_size=5) + print(jobs) + + # Retrieve a job + retrieved_job = await client.jobs.retrieve(created_job.id) + print(retrieved_job) + + # Cancel a job + canceled_job = await client.jobs.cancel(created_job.id) + print(canceled_job) + + # Delete files + await client.files.delete(training_file.id) + await client.files.delete(validation_file.id) + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/examples/async_jobs_chat.py b/examples/async_jobs_chat.py new file mode 100644 index 00000000..15e550c8 --- /dev/null +++ b/examples/async_jobs_chat.py @@ -0,0 +1,59 @@ +#!/usr/bin/env python + +import asyncio +import os + +from mistralai.async_client import MistralAsyncClient +from mistralai.models.jobs import TrainingParameters + +POLLING_INTERVAL = 10 + + +async def main(): + api_key = os.environ["MISTRAL_API_KEY"] + client = MistralAsyncClient(api_key=api_key) + + # Create new files + with open("examples/file.jsonl", "rb") as f: + training_file = await client.files.create(file=f) + with open("examples/validation_file.jsonl", "rb") as f: + validation_file = await client.files.create(file=f) + # Create a new job + created_job = await client.jobs.create( + model="open-mistral-7b", + training_files=[training_file.id], + validation_files=[validation_file.id], + hyperparameters=TrainingParameters( + training_steps=1, + learning_rate=0.0001, + ), + ) + print(created_job) + + while created_job.status in ["RUNNING", "QUEUED"]: + created_job = await client.jobs.retrieve(created_job.id) + print(f"Job is {created_job.status}, waiting {POLLING_INTERVAL} seconds") + await asyncio.sleep(POLLING_INTERVAL) + + if created_job.status == "FAILED": + print("Job failed") + return + + # Chat with model + response = await client.chat( + model=created_job.fine_tuned_model, + messages=[ + {"role": "system", "content": "Marv is a factual chatbot that is also sarcastic."}, + {"role": "user", "content": "What is the capital of France ?"}, + ], + ) + + print(response.choices[0].message.content) + + # Delete files + await client.files.delete(training_file.id) + await client.files.delete(validation_file.id) + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/examples/completion.py b/examples/completion.py new file mode 100644 index 00000000..f76f0f11 --- /dev/null +++ b/examples/completion.py @@ -0,0 +1,33 @@ +#!/usr/bin/env python + +import asyncio +import os + +from mistralai.client import MistralClient + + +async def main(): + api_key = os.environ["MISTRAL_API_KEY"] + + client = MistralClient(api_key=api_key) + + prompt = "def fibonacci(n: int):" + suffix = "n = int(input('Enter a number: '))\nprint(fibonacci(n))" + + response = client.completion( + model="codestral-latest", + prompt=prompt, + suffix=suffix, + ) + + print( + f""" +{prompt} +{response.choices[0].message.content} +{suffix} +""" + ) + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/examples/dry_run_job.py b/examples/dry_run_job.py new file mode 100644 index 00000000..d44d8a53 --- /dev/null +++ b/examples/dry_run_job.py @@ -0,0 +1,38 @@ +#!/usr/bin/env python + +import asyncio +import os + +from mistralai.async_client import MistralAsyncClient +from mistralai.models.jobs import TrainingParameters + + +async def main(): + api_key = os.environ["MISTRAL_API_KEY"] + + client = MistralAsyncClient(api_key=api_key) + + # Create new files + with open("examples/file.jsonl", "rb") as f: + training_file = await client.files.create(file=f) + + # Create a new job + dry_run_job = await client.jobs.create( + model="open-mistral-7b", + training_files=[training_file.id], + hyperparameters=TrainingParameters( + training_steps=1, + learning_rate=0.0001, + ), + dry_run=True, + ) + + print("Dry run job created") + print(f"Train tokens: {dry_run_job.train_tokens}") + print(f"Dataset tokens: {dry_run_job.data_tokens}") + print(f"Epochs number: {dry_run_job.epochs}") + print(f"Expected duration: {dry_run_job.expected_duration_seconds}") + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/examples/file.jsonl b/examples/file.jsonl new file mode 100644 index 00000000..eecd016c --- /dev/null +++ b/examples/file.jsonl @@ -0,0 +1,21 @@ +{"messages": [{"role": "system", "content": "Marv is a factual chatbot that is also sarcastic."}, {"role": "user", "content": "How far is the Moon from Earth?"}, {"role": "assistant", "content": "Around 384,400 kilometers. Give or take a few, like that really matters."}]} +{"messages": [{"role": "system", "content": "Marv is a factual chatbot that is also sarcastic."}, {"role": "user", "content": "What's the largest ocean on Earth?"}, {"role": "assistant", "content": "The Pacific Ocean. You know, the one that covers more than 60 million square miles. No big deal."}]} +{"messages": [{"role": "system", "content": "Marv is a factual chatbot that is also sarcastic."}, {"role": "user", "content": "How many continents are there?"}, {"role": "assistant", "content": "There are seven continents. I hope that wasn't too hard to count."}]} +{"messages": [{"role": "system", "content": "Marv is a factual chatbot that is also sarcastic."}, {"role": "user", "content": "What's the powerhouse of the cell?"}, {"role": "assistant", "content": "The mitochondria. Remember that from high school biology?"}]} +{"messages": [{"role": "system", "content": "Marv is a factual chatbot that is also sarcastic."}, {"role": "user", "content": "What's the speed of light?"}, {"role": "assistant", "content": "Approximately 299,792 kilometers per second. You know, faster than your internet speed."}]} +{"messages": [{"role": "system", "content": "Marv is a factual chatbot that is also sarcastic."}, {"role": "user", "content": "Can you solve a Rubik's Cube?"}, {"role": "assistant", "content": "I could if I had hands. What's your excuse?"}]} +{"messages": [{"role": "system", "content": "Marv is a factual chatbot that is also sarcastic."}, {"role": "user", "content": "What is the tallest mountain in the world?"}, {"role": "assistant", "content": "Mount Everest, standing at 29,029 feet. You know, just a little hill."}]} +{"messages": [{"role": "system", "content": "Marv is a factual chatbot that is also sarcastic."}, {"role": "user", "content": "Who painted the Mona Lisa?"}, {"role": "assistant", "content": "Leonardo da Vinci. Just another guy who liked to doodle."}]} +{"messages": [{"role": "system", "content": "Marv is a factual chatbot that is also sarcastic."}, {"role": "user", "content": "What is the chemical symbol for gold?"}, {"role": "assistant", "content": "Au. Because 'G' was already taken, I guess."}]} +{"messages": [{"role": "system", "content": "Marv is a factual chatbot that is also sarcastic."}, {"role": "user", "content": "How many bones are in the human body?"}, {"role": "assistant", "content": "206. Unless you break one, then the count might change."}]} +{"messages": [{"role": "system", "content": "Marv is a factual chatbot that is also sarcastic."}, {"role": "user", "content": "What is the freezing point of water?"}, {"role": "assistant", "content": "0 degrees Celsius. Pretty basic stuff, really."}]} +{"messages": [{"role": "system", "content": "Marv is a factual chatbot that is also sarcastic."}, {"role": "user", "content": "What is the currency of Japan?"}, {"role": "assistant", "content": "The Yen. Not Monopoly money, if you were wondering."}]} +{"messages": [{"role": "system", "content": "Marv is a factual chatbot that is also sarcastic."}, {"role": "user", "content": "How long does it take to travel around the Earth?"}, {"role": "assistant", "content": "Around 24 hours if you're the Earth itself. For you, depends on your mode of transportation."}]} +{"messages": [{"role": "system", "content": "Marv is a factual chatbot that is also sarcastic."}, {"role": "user", "content": "What is the largest planet in our solar system?"}, {"role": "assistant", "content": "Jupiter. It's like the king of planets or something."}]} +{"messages": [{"role": "system", "content": "Marv is a factual chatbot that is also sarcastic."}, {"role": "user", "content": "What is the main ingredient in guacamole?"}, {"role": "assistant", "content": "Avocado. Not exactly a secret ingredient."}]} +{"messages": [{"role": "system", "content": "Marv is a factual chatbot that is also sarcastic."}, {"role": "user", "content": "Who is known as the father of computers?"}, {"role": "assistant", "content": "Charles Babbage. The original tech geek."}]} +{"messages": [{"role": "system", "content": "Marv is a factual chatbot that is also sarcastic."}, {"role": "user", "content": "How many hearts does an octopus have?"}, {"role": "assistant", "content": "Three. One for each of its charming personalities."}]} +{"messages": [{"role": "system", "content": "Marv is a factual chatbot that is also sarcastic."}, {"role": "user", "content": "What is the smallest country in the world?"}, {"role": "assistant", "content": "Vatican City. Smaller than your average theme park."}]} +{"messages": [{"role": "system", "content": "Marv is a factual chatbot that is also sarcastic."}, {"role": "user", "content": "What language has the most native speakers?"}, {"role": "assistant", "content": "Mandarin Chinese. Because it's not all about English, you know."}]} +{"messages": [{"role": "system", "content": "Marv is a factual chatbot that is also sarcastic."}, {"role": "user", "content": "What is the hardest natural substance on Earth?"}, {"role": "assistant", "content": "Diamond. Not your average rock."}]} +{"messages": [{"role": "system", "content": "Marv is a factual chatbot that is also sarcastic."}, {"role": "user", "content": "What element does 'O' represent on the periodic table?"}, {"role": "assistant", "content": "Oxygen. The stuff you breathe. Hopefully, this isn't news."}]} diff --git a/examples/files.py b/examples/files.py new file mode 100644 index 00000000..af034b97 --- /dev/null +++ b/examples/files.py @@ -0,0 +1,31 @@ +#!/usr/bin/env python + +import os + +from mistralai.client import MistralClient + + +def main(): + api_key = os.environ["MISTRAL_API_KEY"] + + client = MistralClient(api_key=api_key) + + # Create a new file + created_file = client.files.create(file=("training_file.jsonl", open("examples/file.jsonl", "rb").read())) + print(created_file) + + # List files + files = client.files.list() + print(files) + + # Retrieve a file + retrieved_file = client.files.retrieve(created_file.id) + print(retrieved_file) + + # Delete a file + deleted_file = client.files.delete(created_file.id) + print(deleted_file) + + +if __name__ == "__main__": + main() diff --git a/examples/jobs.py b/examples/jobs.py new file mode 100644 index 00000000..2ba8ae74 --- /dev/null +++ b/examples/jobs.py @@ -0,0 +1,49 @@ +#!/usr/bin/env python +import os + +from mistralai.client import MistralClient +from mistralai.models.jobs import TrainingParameters + + +def main(): + api_key = os.environ["MISTRAL_API_KEY"] + + client = MistralClient(api_key=api_key) + + # Create new files + with open("examples/file.jsonl", "rb") as f: + training_file = client.files.create(file=f) + with open("examples/validation_file.jsonl", "rb") as f: + validation_file = client.files.create(file=f) + + # Create a new job + created_job = client.jobs.create( + model="open-mistral-7b", + training_files=[training_file.id], + validation_files=[validation_file.id], + hyperparameters=TrainingParameters( + training_steps=1, + learning_rate=0.0001, + ), + ) + print(created_job) + + jobs = client.jobs.list(created_after=created_job.created_at - 10) + for job in jobs.data: + print(f"Retrieved job: {job.id}") + + # Retrieve a job + retrieved_job = client.jobs.retrieve(created_job.id) + print(retrieved_job) + + # Cancel a job + canceled_job = client.jobs.cancel(created_job.id) + print(canceled_job) + + # Delete files + client.files.delete(training_file.id) + client.files.delete(validation_file.id) + + +if __name__ == "__main__": + main() diff --git a/examples/validation_file.jsonl b/examples/validation_file.jsonl new file mode 100644 index 00000000..a1c814b0 --- /dev/null +++ b/examples/validation_file.jsonl @@ -0,0 +1,2 @@ +{"messages": [{"role": "user", "content": "How long does it take to travel around the Earth?"}, {"role": "assistant", "content": "Around 24 hours if you're the Earth itself. For you, depends on your mode of transportation."}]} + diff --git a/poetry.lock b/poetry.lock index c7723c5f..636856ce 100644 --- a/poetry.lock +++ b/poetry.lock @@ -532,5 +532,5 @@ zstd = ["zstandard (>=0.18.0)"] [metadata] lock-version = "2.0" -python-versions = "^3.9" -content-hash = "a7fd4bf25bc9a0edf13eb53e3463b3d8176a25c2b8b1383625c0b42c6510e1d0" +python-versions = "^3.9,<4.0" +content-hash = "015db2d023aef624cb1df559074f6e3760d4029e637d3f259f5d2c76730b8d6a" diff --git a/pyproject.toml b/pyproject.toml index bf3077ee..ae82689c 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -23,10 +23,10 @@ exclude = ["docs", "tests", "examples", "tools", "build"] [tool.poetry.dependencies] -python = "^3.9" -orjson = "^3.9.10" -pydantic = "^2.5.2" -httpx = ">= 0.25.2, < 1" +python = "^3.9,<4.0" +orjson = "^3.9.10,<3.11" +pydantic = "^2.5.2,<3" +httpx = "^0.25,<1" [tool.poetry.group.dev.dependencies] diff --git a/src/mistralai/async_client.py b/src/mistralai/async_client.py index bc80a8b7..4e80de84 100644 --- a/src/mistralai/async_client.py +++ b/src/mistralai/async_client.py @@ -20,6 +20,8 @@ MistralConnectionException, MistralException, ) +from mistralai.files import FilesAsyncClient +from mistralai.jobs import JobsAsyncClient from mistralai.models.chat_completion import ( ChatCompletionResponse, ChatCompletionStreamResponse, @@ -47,6 +49,8 @@ def __init__( limits=Limits(max_connections=max_concurrent_requests), transport=AsyncHTTPTransport(retries=max_retries), ) + self.files = FilesAsyncClient(self) + self.jobs = JobsAsyncClient(self) async def close(self) -> None: await self._client.aclose() @@ -96,15 +100,19 @@ async def _request( path: str, stream: bool = False, attempt: int = 1, + data: Optional[Dict[str, Any]] = None, + **kwargs: Any, ) -> AsyncGenerator[Dict[str, Any], None]: accept_header = "text/event-stream" if stream else "application/json" headers = { "Accept": accept_header, "User-Agent": f"mistral-client-python/{self._version}", "Authorization": f"Bearer {self._api_key}", - "Content-Type": "application/json", } + if json is not None: + headers["Content-Type"] = "application/json" + url = posixpath.join(self._endpoint, path) self._logger.debug(f"Sending request: {method} {url} {json}") @@ -118,6 +126,8 @@ async def _request( url, headers=headers, json=json, + data=data, + **kwargs, ) as response: await self._check_streaming_response(response) @@ -132,6 +142,8 @@ async def _request( url, headers=headers, json=json, + data=data, + **kwargs, ) yield await self._check_response(response) diff --git a/src/mistralai/client.py b/src/mistralai/client.py index b00ddcff..1f70f4ea 100644 --- a/src/mistralai/client.py +++ b/src/mistralai/client.py @@ -13,6 +13,8 @@ MistralConnectionException, MistralException, ) +from mistralai.files import FilesClient +from mistralai.jobs import JobsClient from mistralai.models.chat_completion import ( ChatCompletionResponse, ChatCompletionStreamResponse, @@ -40,6 +42,8 @@ def __init__( self._client = Client( follow_redirects=True, timeout=self._timeout, transport=HTTPTransport(retries=self._max_retries) ) + self.files = FilesClient(self) + self.jobs = JobsClient(self) def __del__(self) -> None: self._client.close() @@ -89,15 +93,19 @@ def _request( path: str, stream: bool = False, attempt: int = 1, + data: Optional[Dict[str, Any]] = None, + **kwargs: Any, ) -> Iterator[Dict[str, Any]]: accept_header = "text/event-stream" if stream else "application/json" headers = { "Accept": accept_header, "User-Agent": f"mistral-client-python/{self._version}", "Authorization": f"Bearer {self._api_key}", - "Content-Type": "application/json", } + if json is not None: + headers["Content-Type"] = "application/json" + url = posixpath.join(self._endpoint, path) self._logger.debug(f"Sending request: {method} {url} {json}") @@ -111,6 +119,8 @@ def _request( url, headers=headers, json=json, + data=data, + **kwargs, ) as response: self._check_streaming_response(response) @@ -125,6 +135,8 @@ def _request( url, headers=headers, json=json, + data=data, + **kwargs, ) yield self._check_response(response) diff --git a/src/mistralai/client_base.py b/src/mistralai/client_base.py index c38e093b..5deaab3f 100644 --- a/src/mistralai/client_base.py +++ b/src/mistralai/client_base.py @@ -146,7 +146,6 @@ def _make_chat_request( ) -> Dict[str, Any]: request_data: Dict[str, Any] = { "messages": self._parse_messages(messages), - "safe_prompt": safe_prompt, } if model is not None: @@ -162,6 +161,8 @@ def _make_chat_request( ) ) + if safe_prompt: + request_data["safe_prompt"] = safe_prompt if tools is not None: request_data["tools"] = self._parse_tools(tools) if stream is not None: diff --git a/src/mistralai/files.py b/src/mistralai/files.py new file mode 100644 index 00000000..7646a065 --- /dev/null +++ b/src/mistralai/files.py @@ -0,0 +1,84 @@ +from typing import Any + +from mistralai.exceptions import ( + MistralException, +) +from mistralai.models.files import FileDeleted, FileObject, Files + + +class FilesClient: + def __init__(self, client: Any): + self.client = client + + def create( + self, + file: bytes, + purpose: str = "fine-tune", + ) -> FileObject: + single_response = self.client._request( + "post", + None, + "v1/files", + files={"file": file}, + data={"purpose": purpose}, + ) + for response in single_response: + return FileObject(**response) + raise MistralException("No response received") + + def retrieve(self, file_id: str) -> FileObject: + single_response = self.client._request("get", {}, f"v1/files/{file_id}") + for response in single_response: + return FileObject(**response) + raise MistralException("No response received") + + def list(self) -> Files: + single_response = self.client._request("get", {}, "v1/files") + for response in single_response: + return Files(**response) + raise MistralException("No response received") + + def delete(self, file_id: str) -> FileDeleted: + single_response = self.client._request("delete", {}, f"v1/files/{file_id}") + for response in single_response: + return FileDeleted(**response) + raise MistralException("No response received") + + +class FilesAsyncClient: + def __init__(self, client: Any): + self.client = client + + async def create( + self, + file: bytes, + purpose: str = "fine-tune", + ) -> FileObject: + single_response = self.client._request( + "post", + None, + "v1/files", + files={"file": file}, + data={"purpose": purpose}, + ) + async for response in single_response: + return FileObject(**response) + raise MistralException("No response received") + + async def retrieve(self, file_id: str) -> FileObject: + single_response = self.client._request("get", {}, f"v1/files/{file_id}") + async for response in single_response: + return FileObject(**response) + raise MistralException("No response received") + + async def list(self) -> Files: + single_response = self.client._request("get", {}, "v1/files") + async for response in single_response: + return Files(**response) + raise MistralException("No response received") + + async def delete(self, file_id: str) -> FileDeleted: + single_response = self.client._request("delete", {}, f"v1/files/{file_id}") + async for response in single_response: + return FileDeleted(**response) + raise MistralException("No response received") diff --git a/src/mistralai/jobs.py b/src/mistralai/jobs.py new file mode 100644 index 00000000..115b2327 --- /dev/null +++ b/src/mistralai/jobs.py @@ -0,0 +1,172 @@ +from datetime import datetime +from typing import Any, Optional, Union + +from mistralai.exceptions import ( + MistralException, +) +from mistralai.models.jobs import DetailedJob, IntegrationIn, Job, JobMetadata, JobQueryFilter, Jobs, TrainingParameters + + +class JobsClient: + def __init__(self, client: Any): + self.client = client + + def create( + self, + model: str, + training_files: Union[list[str], None] = None, + validation_files: Union[list[str], None] = None, + hyperparameters: TrainingParameters = TrainingParameters( + training_steps=1800, + learning_rate=1.0e-4, + ), + suffix: Union[str, None] = None, + integrations: Union[set[IntegrationIn], None] = None, + training_file: Union[str, None] = None, # Deprecated: Added for compatibility with OpenAI API + validation_file: Union[str, None] = None, # Deprecated: Added for compatibility with OpenAI API + dry_run: bool = False, + ) -> Union[Job, JobMetadata]: + # Handle deprecated arguments + if not training_files and training_file: + training_files = [training_file] + if not validation_files and validation_file: + validation_files = [validation_file] + single_response = self.client._request( + method="post", + json={ + "model": model, + "training_files": training_files, + "validation_files": validation_files, + "hyperparameters": hyperparameters.dict(), + "suffix": suffix, + "integrations": integrations, + }, + path="v1/fine_tuning/jobs", + params={"dry_run": dry_run}, + ) + for response in single_response: + return Job(**response) if not dry_run else JobMetadata(**response) + raise MistralException("No response received") + + def retrieve(self, job_id: str) -> DetailedJob: + single_response = self.client._request(method="get", path=f"v1/fine_tuning/jobs/{job_id}", json={}) + for response in single_response: + return DetailedJob(**response) + raise MistralException("No response received") + + def list( + self, + page: int = 0, + page_size: int = 10, + model: Optional[str] = None, + created_after: Optional[datetime] = None, + created_by_me: Optional[bool] = None, + status: Optional[str] = None, + wandb_project: Optional[str] = None, + wandb_name: Optional[str] = None, + suffix: Optional[str] = None, + ) -> Jobs: + query_params = JobQueryFilter( + page=page, + page_size=page_size, + model=model, + created_after=created_after, + created_by_me=created_by_me, + status=status, + wandb_project=wandb_project, + wandb_name=wandb_name, + suffix=suffix, + ).model_dump(exclude_none=True) + single_response = self.client._request(method="get", params=query_params, path="v1/fine_tuning/jobs", json={}) + for response in single_response: + return Jobs(**response) + raise MistralException("No response received") + + def cancel(self, job_id: str) -> DetailedJob: + single_response = self.client._request(method="post", path=f"v1/fine_tuning/jobs/{job_id}/cancel", json={}) + for response in single_response: + return DetailedJob(**response) + raise MistralException("No response received") + + +class JobsAsyncClient: + def __init__(self, client: Any): + self.client = client + + async def create( + self, + model: str, + training_files: Union[list[str], None] = None, + validation_files: Union[list[str], None] = None, + hyperparameters: TrainingParameters = TrainingParameters( + training_steps=1800, + learning_rate=1.0e-4, + ), + suffix: Union[str, None] = None, + integrations: Union[set[IntegrationIn], None] = None, + training_file: Union[str, None] = None, # Deprecated: Added for compatibility with OpenAI API + validation_file: Union[str, None] = None, # Deprecated: Added for compatibility with OpenAI API + dry_run: bool = False, + ) -> Union[Job, JobMetadata]: + # Handle deprecated arguments + if not training_files and training_file: + training_files = [training_file] + if not validation_files and validation_file: + validation_files = [validation_file] + + single_response = self.client._request( + method="post", + json={ + "model": model, + "training_files": training_files, + "validation_files": validation_files, + "hyperparameters": hyperparameters.dict(), + "suffix": suffix, + "integrations": integrations, + }, + path="v1/fine_tuning/jobs", + params={"dry_run": dry_run}, + ) + async for response in single_response: + return Job(**response) if not dry_run else JobMetadata(**response) + raise MistralException("No response received") + + async def retrieve(self, job_id: str) -> DetailedJob: + single_response = self.client._request(method="get", path=f"v1/fine_tuning/jobs/{job_id}", json={}) + async for response in single_response: + return DetailedJob(**response) + raise MistralException("No response received") + + async def list( + self, + page: int = 0, + page_size: int = 10, + model: Optional[str] = None, + created_after: Optional[datetime] = None, + created_by_me: Optional[bool] = None, + status: Optional[str] = None, + wandb_project: Optional[str] = None, + wandb_name: Optional[str] = None, + suffix: Optional[str] = None, + ) -> Jobs: + query_params = JobQueryFilter( + page=page, + page_size=page_size, + model=model, + created_after=created_after, + created_by_me=created_by_me, + status=status, + wandb_project=wandb_project, + wandb_name=wandb_name, + suffix=suffix, + ).model_dump(exclude_none=True) + single_response = self.client._request(method="get", path="v1/fine_tuning/jobs", params=query_params, json={}) + async for response in single_response: + return Jobs(**response) + raise MistralException("No response received") + + async def cancel(self, job_id: str) -> DetailedJob: + single_response = self.client._request(method="post", path=f"v1/fine_tuning/jobs/{job_id}/cancel", json={}) + async for response in single_response: + return DetailedJob(**response) + raise MistralException("No response received") diff --git a/src/mistralai/models/chat_completion.py b/src/mistralai/models/chat_completion.py index 0dcc6f97..c5eda5ad 100644 --- a/src/mistralai/models/chat_completion.py +++ b/src/mistralai/models/chat_completion.py @@ -1,5 +1,5 @@ from enum import Enum -from typing import List, Optional, Union +from typing import List, Optional from pydantic import BaseModel @@ -44,7 +44,7 @@ class ResponseFormat(BaseModel): class ChatMessage(BaseModel): role: str - content: Union[str, List[str]] + content: str name: Optional[str] = None tool_calls: Optional[List[ToolCall]] = None tool_call_id: Optional[str] = None diff --git a/src/mistralai/models/files.py b/src/mistralai/models/files.py new file mode 100644 index 00000000..f0aeff3f --- /dev/null +++ b/src/mistralai/models/files.py @@ -0,0 +1,23 @@ +from typing import Literal, Optional + +from pydantic import BaseModel + + +class FileObject(BaseModel): + id: str + object: str + bytes: int + created_at: int + filename: str + purpose: Optional[Literal["fine-tune"]] = "fine-tune" + + +class FileDeleted(BaseModel): + id: str + object: str + deleted: bool + + +class Files(BaseModel): + data: list[FileObject] + object: Literal["list"] diff --git a/src/mistralai/models/jobs.py b/src/mistralai/models/jobs.py new file mode 100644 index 00000000..80065337 --- /dev/null +++ b/src/mistralai/models/jobs.py @@ -0,0 +1,98 @@ +from datetime import datetime +from typing import Annotated, List, Literal, Optional, Union + +from pydantic import BaseModel, Field + + +class TrainingParameters(BaseModel): + training_steps: int = Field(1800, le=10000, ge=1) + learning_rate: float = Field(1.0e-4, le=1, ge=1.0e-8) + + +class WandbIntegration(BaseModel): + type: Literal["wandb"] = "wandb" + project: str + name: Union[str, None] = None + run_name: Union[str, None] = None + + +class WandbIntegrationIn(WandbIntegration): + api_key: str + + +Integration = Annotated[Union[WandbIntegration], Field(discriminator="type")] +IntegrationIn = Annotated[Union[WandbIntegrationIn], Field(discriminator="type")] + + +class JobMetadata(BaseModel): + object: Literal["job.metadata"] = "job.metadata" + training_steps: int + train_tokens_per_step: int + data_tokens: int + train_tokens: int + epochs: float + expected_duration_seconds: Optional[int] + + +class Job(BaseModel): + id: str + hyperparameters: TrainingParameters + fine_tuned_model: Union[str, None] + model: str + status: Literal[ + "QUEUED", + "STARTED", + "RUNNING", + "FAILED", + "SUCCESS", + "CANCELLED", + "CANCELLATION_REQUESTED", + ] + job_type: str + created_at: int + modified_at: int + training_files: list[str] + validation_files: Union[list[str], None] = [] + object: Literal["job"] + integrations: List[Integration] = [] + + +class Event(BaseModel): + name: str + data: Union[dict, None] = None + created_at: int + + +class Metric(BaseModel): + train_loss: Union[float, None] = None + valid_loss: Union[float, None] = None + valid_mean_token_accuracy: Union[float, None] = None + + +class Checkpoint(BaseModel): + metrics: Metric + step_number: int + created_at: int + + +class JobQueryFilter(BaseModel): + page: int = 0 + page_size: int = 100 + model: Optional[str] = None + created_after: Optional[datetime] = None + created_by_me: Optional[bool] = None + status: Optional[str] = None + wandb_project: Optional[str] = None + wandb_name: Optional[str] = None + suffix: Optional[str] = None + + +class DetailedJob(Job): + events: list[Event] = [] + checkpoints: list[Checkpoint] = [] + estimated_start_time: Optional[int] = None + + +class Jobs(BaseModel): + data: list[Job] = [] + object: Literal["list"] diff --git a/src/mistralai/models/models.py b/src/mistralai/models/models.py index 0acd4025..8ed152d3 100644 --- a/src/mistralai/models/models.py +++ b/src/mistralai/models/models.py @@ -7,15 +7,15 @@ class ModelPermission(BaseModel): id: str object: str created: int - allow_create_engine: bool = False + allow_create_engine: Optional[bool] = False allow_sampling: bool = True allow_logprobs: bool = True - allow_search_indices: bool = False + allow_search_indices: Optional[bool] = False allow_view: bool = True allow_fine_tuning: bool = False organization: str = "*" group: Optional[str] = None - is_blocking: bool = False + is_blocking: Optional[bool] = False class ModelCard(BaseModel): diff --git a/tests/test_chat.py b/tests/test_chat.py index 6b1658ea..482b12e8 100644 --- a/tests/test_chat.py +++ b/tests/test_chat.py @@ -36,9 +36,9 @@ def test_chat(self, client): json={ "model": "mistral-small-latest", "messages": [{"role": "user", "content": "What is the best French cheese?"}], - "safe_prompt": False, "stream": False, }, + data=None, ) assert isinstance(result, ChatCompletionResponse), "Should return an ChatCompletionResponse" @@ -71,9 +71,9 @@ def test_chat_streaming(self, client): json={ "model": "mistral-small-latest", "messages": [{"role": "user", "content": "What is the best French cheese?"}], - "safe_prompt": False, "stream": True, }, + data=None, ) for i, result in enumerate(results): diff --git a/tests/test_chat_async.py b/tests/test_chat_async.py index 15479edd..04a73f27 100644 --- a/tests/test_chat_async.py +++ b/tests/test_chat_async.py @@ -40,9 +40,9 @@ async def test_chat(self, async_client): json={ "model": "mistral-small-latest", "messages": [{"role": "user", "content": "What is the best French cheese?"}], - "safe_prompt": False, "stream": False, }, + data=None, ) assert isinstance(result, ChatCompletionResponse), "Should return an ChatCompletionResponse" @@ -77,9 +77,9 @@ async def test_chat_streaming(self, async_client): json={ "model": "mistral-small-latest", "messages": [{"role": "user", "content": "What is the best French cheese?"}], - "safe_prompt": False, "stream": True, }, + data=None, ) for i, result in enumerate(results): diff --git a/tests/test_completion.py b/tests/test_completion.py index 1b6f1c15..a30cfcf6 100644 --- a/tests/test_completion.py +++ b/tests/test_completion.py @@ -46,6 +46,7 @@ def test_completion(self, client): "top_p": 0.9, "random_seed": 42, }, + data=None, ) assert isinstance(result, ChatCompletionResponse), "Should return an ChatCompletionResponse" @@ -81,6 +82,7 @@ def test_completion_streaming(self, client): "stream": True, "stop": ["#"], }, + data=None, ) for i, result in enumerate(results): diff --git a/tests/test_embedder.py b/tests/test_embedder.py index 56cd4c57..6d9a0df3 100644 --- a/tests/test_embedder.py +++ b/tests/test_embedder.py @@ -25,6 +25,7 @@ def test_embeddings(self, client): "Content-Type": "application/json", }, json={"model": "mistral-embed", "input": "What is the best French cheese?"}, + data=None, ) assert isinstance(result, EmbeddingResponse), "Should return an EmbeddingResponse" @@ -56,6 +57,7 @@ def test_embeddings_batch(self, client): "model": "mistral-embed", "input": ["What is the best French cheese?"] * 10, }, + data=None, ) assert isinstance(result, EmbeddingResponse), "Should return an EmbeddingResponse" diff --git a/tests/test_embedder_async.py b/tests/test_embedder_async.py index d95fdd49..3de16011 100644 --- a/tests/test_embedder_async.py +++ b/tests/test_embedder_async.py @@ -27,6 +27,7 @@ async def test_embeddings(self, async_client): "Content-Type": "application/json", }, json={"model": "mistral-embed", "input": "What is the best French cheese?"}, + data=None, ) assert isinstance(result, EmbeddingResponse), "Should return an EmbeddingResponse" @@ -59,6 +60,7 @@ async def test_embeddings_batch(self, async_client): "model": "mistral-embed", "input": ["What is the best French cheese?"] * 10, }, + data=None, ) assert isinstance(result, EmbeddingResponse), "Should return an EmbeddingResponse" diff --git a/tests/test_files.py b/tests/test_files.py new file mode 100644 index 00000000..e4ef9e4d --- /dev/null +++ b/tests/test_files.py @@ -0,0 +1,105 @@ +import orjson +from mistralai.models.files import FileDeleted, FileObject + +from .utils import ( + mock_file_deleted_response_payload, + mock_file_response_payload, + mock_response, +) + + +class TestFilesClient: + def test_create_file(self, client): + expected_response_file = FileObject.model_validate_json(mock_file_response_payload()) + client._client.request.return_value = mock_response( + 200, + expected_response_file.json(), + ) + + response_file = client.files.create(b"file_content") + + client._client.request.assert_called_once_with( + "post", + "https://api.mistral.ai/v1/files", + headers={ + "User-Agent": f"mistral-client-python/{client._version}", + "Accept": "application/json", + "Authorization": "Bearer test_api_key", + }, + files={"file": b"file_content"}, + json=None, + data={"purpose": "fine-tune"}, + ) + assert response_file == expected_response_file + + def test_retrieve(self, client): + expected_response_file = FileObject.model_validate_json(mock_file_response_payload()) + client._client.request.return_value = mock_response( + 200, + expected_response_file.json(), + ) + + response_file = client.files.retrieve("file_id") + + client._client.request.assert_called_once_with( + "get", + "https://api.mistral.ai/v1/files/file_id", + headers={ + "User-Agent": f"mistral-client-python/{client._version}", + "Accept": "application/json", + "Content-Type": "application/json", + "Authorization": "Bearer test_api_key", + }, + json={}, + data=None, + ) + assert response_file == expected_response_file + + def test_list_files(self, client): + expected_response_file = FileObject.model_validate_json(mock_file_response_payload()) + client._client.request.return_value = mock_response( + 200, + orjson.dumps( + { + "data": [expected_response_file.model_dump()], + "object": "list", + } + ), + ) + + response_files = client.files.list() + response_file = response_files.data[0] + + client._client.request.assert_called_once_with( + "get", + "https://api.mistral.ai/v1/files", + headers={ + "User-Agent": f"mistral-client-python/{client._version}", + "Accept": "application/json", + "Content-Type": "application/json", + "Authorization": "Bearer test_api_key", + }, + json={}, + data=None, + ) + assert response_file == expected_response_file + + def test_delete_file(self, client): + expected_response_file = FileDeleted.model_validate_json(mock_file_deleted_response_payload()) + client._client.request.return_value = mock_response(200, expected_response_file.json()) + + response_file = client.files.delete("file_id") + + client._client.request.assert_called_once_with( + "delete", + "https://api.mistral.ai/v1/files/file_id", + headers={ + "User-Agent": f"mistral-client-python/{client._version}", + "Accept": "application/json", + "Content-Type": "application/json", + "Authorization": "Bearer test_api_key", + }, + json={}, + data=None, + ) + assert response_file == expected_response_file diff --git a/tests/test_files_async.py b/tests/test_files_async.py new file mode 100644 index 00000000..7248f407 --- /dev/null +++ b/tests/test_files_async.py @@ -0,0 +1,110 @@ +import orjson +import pytest +from mistralai.models.files import FileDeleted, FileObject + +from .utils import ( + mock_file_deleted_response_payload, + mock_file_response_payload, + mock_response, +) + + +class TestFilesAyncClient: + @pytest.mark.asyncio + async def test_create_file(self, async_client): + expected_response_file = FileObject.model_validate_json(mock_file_response_payload()) + async_client._client.request.return_value = mock_response( + 200, + expected_response_file.json(), + ) + + response_file = await async_client.files.create(b"file_content") + + async_client._client.request.assert_called_once_with( + "post", + "https://api.mistral.ai/v1/files", + headers={ + "User-Agent": f"mistral-client-python/{async_client._version}", + "Accept": "application/json", + "Authorization": "Bearer test_api_key", + }, + files={"file": b"file_content"}, + json=None, + data={"purpose": "fine-tune"}, + ) + assert response_file == expected_response_file + + @pytest.mark.asyncio + async def test_retrieve(self, async_client): + expected_response_file = FileObject.model_validate_json(mock_file_response_payload()) + async_client._client.request.return_value = mock_response( + 200, + expected_response_file.json(), + ) + + response_file = await async_client.files.retrieve("file_id") + + async_client._client.request.assert_called_once_with( + "get", + "https://api.mistral.ai/v1/files/file_id", + headers={ + "User-Agent": f"mistral-client-python/{async_client._version}", + "Accept": "application/json", + "Content-Type": "application/json", + "Authorization": "Bearer test_api_key", + }, + json={}, + data=None, + ) + assert response_file == expected_response_file + + @pytest.mark.asyncio + async def test_list_files(self, async_client): + expected_response_file = FileObject.model_validate_json(mock_file_response_payload()) + async_client._client.request.return_value = mock_response( + 200, + orjson.dumps( + { + "data": [expected_response_file.model_dump()], + "object": "list", + } + ), + ) + + response_files = await async_client.files.list() + response_file = response_files.data[0] + + async_client._client.request.assert_called_once_with( + "get", + "https://api.mistral.ai/v1/files", + headers={ + "User-Agent": f"mistral-client-python/{async_client._version}", + "Accept": "application/json", + "Content-Type": "application/json", + "Authorization": "Bearer test_api_key", + }, + json={}, + data=None, + ) + assert response_file == expected_response_file + + @pytest.mark.asyncio + async def test_delete_file(self, async_client): + expected_response_file = FileDeleted.model_validate_json(mock_file_deleted_response_payload()) + async_client._client.request.return_value = mock_response(200, expected_response_file.json()) + + response_file = await async_client.files.delete("file_id") + + async_client._client.request.assert_called_once_with( + "delete", + "https://api.mistral.ai/v1/files/file_id", + headers={ + "User-Agent": f"mistral-client-python/{async_client._version}", + "Accept": "application/json", + "Content-Type": "application/json", + "Authorization": "Bearer test_api_key", + }, + json={}, + data=None, + ) + assert response_file == expected_response_file diff --git a/tests/test_jobs.py b/tests/test_jobs.py new file mode 100644 index 00000000..efb19b7a --- /dev/null +++ b/tests/test_jobs.py @@ -0,0 +1,128 @@ +import orjson +from mistralai.models.jobs import DetailedJob, Job, TrainingParameters + +from .utils import ( + mock_detailed_job_response_payload, + mock_job_response_payload, + mock_response, +) + + +class TestJobsClient: + def test_create(self, client): + expected_response_job = Job.model_validate_json(mock_job_response_payload()) + client._client.request.return_value = mock_response( + 200, + expected_response_job.json(), + ) + + response_job = client.jobs.create( + model="model", + training_files=["training_file_id"], + validation_files=["validation_file_id"], + hyperparameters=TrainingParameters( + training_steps=1800, + learning_rate=1.0e-4, + ), + ) + + client._client.request.assert_called_once_with( + "post", + "https://api.mistral.ai/v1/fine_tuning/jobs", + headers={ + "User-Agent": f"mistral-client-python/{client._version}", + "Accept": "application/json", + "Content-Type": "application/json", + "Authorization": "Bearer test_api_key", + }, + json={ + "model": "model", + "training_files": ["training_file_id"], + "validation_files": ["validation_file_id"], + "hyperparameters": { + "training_steps": 1800, + "learning_rate": 1.0e-4, + }, + "suffix": None, + "integrations": None, + }, + data=None, + params={"dry_run": False}, + ) + assert response_job == expected_response_job + + def test_retrieve(self, client): + expected_response_job = DetailedJob.model_validate_json(mock_detailed_job_response_payload()) + client._client.request.return_value = mock_response( + 200, + expected_response_job.json(), + ) + + response_job = client.jobs.retrieve("job_id") + + client._client.request.assert_called_once_with( + "get", + "https://api.mistral.ai/v1/fine_tuning/jobs/job_id", + headers={ + "User-Agent": f"mistral-client-python/{client._version}", + "Accept": "application/json", + "Content-Type": "application/json", + "Authorization": "Bearer test_api_key", + }, + json={}, + data=None, + ) + assert response_job == expected_response_job + + def test_list(self, client): + expected_response_job = Job.model_validate_json(mock_job_response_payload()) + client._client.request.return_value = mock_response( + 200, + orjson.dumps( + { + "data": [expected_response_job.model_dump()], + "object": "list", + } + ), + ) + + response_jobs = client.jobs.list() + response_job = response_jobs.data[0] + + client._client.request.assert_called_once_with( + "get", + "https://api.mistral.ai/v1/fine_tuning/jobs", + headers={ + "User-Agent": f"mistral-client-python/{client._version}", + "Accept": "application/json", + "Content-Type": "application/json", + "Authorization": "Bearer test_api_key", + }, + json={}, + data=None, + params={"page": 0, "page_size": 10}, + ) + assert response_job == expected_response_job + + def test_cancel(self, client): + expected_response_job = DetailedJob.model_validate_json(mock_detailed_job_response_payload()) + client._client.request.return_value = mock_response( + 200, + expected_response_job.json(), + ) + + response_job = client.jobs.cancel("job_id") + + client._client.request.assert_called_once_with( + "post", + "https://api.mistral.ai/v1/fine_tuning/jobs/job_id/cancel", + headers={ + "User-Agent": f"mistral-client-python/{client._version}", + "Accept": "application/json", + "Content-Type": "application/json", + "Authorization": "Bearer test_api_key", + }, + json={}, + data=None, + ) + assert response_job == expected_response_job diff --git a/tests/test_jobs_async.py b/tests/test_jobs_async.py new file mode 100644 index 00000000..2d0d488f --- /dev/null +++ b/tests/test_jobs_async.py @@ -0,0 +1,133 @@ +import orjson +import pytest +from mistralai.models.jobs import DetailedJob, Job, TrainingParameters + +from .utils import ( + mock_detailed_job_response_payload, + mock_job_response_payload, + mock_response, +) + + +class TestJobsClient: + @pytest.mark.asyncio + async def test_create(self, async_client): + expected_response_job = Job.model_validate_json(mock_job_response_payload()) + async_client._client.request.return_value = mock_response( + 200, + expected_response_job.json(), + ) + + response_job = await async_client.jobs.create( + model="model", + training_files=["training_file_id"], + validation_files=["validation_file_id"], + hyperparameters=TrainingParameters( + training_steps=1800, + learning_rate=1.0e-4, + ), + ) + + async_client._client.request.assert_called_once_with( + "post", + "https://api.mistral.ai/v1/fine_tuning/jobs", + headers={ + "User-Agent": f"mistral-client-python/{async_client._version}", + "Accept": "application/json", + "Content-Type": "application/json", + "Authorization": "Bearer test_api_key", + }, + json={ + "model": "model", + "training_files": ["training_file_id"], + "validation_files": ["validation_file_id"], + "hyperparameters": { + "training_steps": 1800, + "learning_rate": 1.0e-4, + }, + "suffix": None, + "integrations": None, + }, + data=None, + params={"dry_run": False}, + ) + assert response_job == expected_response_job + + @pytest.mark.asyncio + async def test_retrieve(self, async_client): + expected_response_job = DetailedJob.model_validate_json(mock_detailed_job_response_payload()) + async_client._client.request.return_value = mock_response( + 200, + expected_response_job.json(), + ) + + response_job = await async_client.jobs.retrieve("job_id") + + async_client._client.request.assert_called_once_with( + "get", + "https://api.mistral.ai/v1/fine_tuning/jobs/job_id", + headers={ + "User-Agent": f"mistral-client-python/{async_client._version}", + "Accept": "application/json", + "Content-Type": "application/json", + "Authorization": "Bearer test_api_key", + }, + json={}, + data=None, + ) + assert response_job == expected_response_job + + @pytest.mark.asyncio + async def test_list(self, async_client): + expected_response_job = Job.model_validate_json(mock_job_response_payload()) + async_client._client.request.return_value = mock_response( + 200, + orjson.dumps( + { + "data": [expected_response_job.model_dump()], + "object": "list", + } + ), + ) + + response_jobs = await async_client.jobs.list() + response_job = response_jobs.data[0] + + async_client._client.request.assert_called_once_with( + "get", + "https://api.mistral.ai/v1/fine_tuning/jobs", + headers={ + "User-Agent": f"mistral-client-python/{async_client._version}", + "Accept": "application/json", + "Content-Type": "application/json", + "Authorization": "Bearer test_api_key", + }, + json={}, + data=None, + params={"page": 0, "page_size": 10}, + ) + assert response_job == expected_response_job + + @pytest.mark.asyncio + async def test_cancel(self, async_client): + expected_response_job = DetailedJob.model_validate_json(mock_detailed_job_response_payload()) + async_client._client.request.return_value = mock_response( + 200, + expected_response_job.json(), + ) + + response_job = await async_client.jobs.cancel("job_id") + + async_client._client.request.assert_called_once_with( + "post", + "https://api.mistral.ai/v1/fine_tuning/jobs/job_id/cancel", + headers={ + "User-Agent": f"mistral-client-python/{async_client._version}", + "Accept": "application/json", + "Content-Type": "application/json", + "Authorization": "Bearer test_api_key", + }, + json={}, + data=None, + ) + assert response_job == expected_response_job diff --git a/tests/test_list_models.py b/tests/test_list_models.py index 6b73978b..15de8475 100644 --- a/tests/test_list_models.py +++ b/tests/test_list_models.py @@ -22,6 +22,7 @@ def test_list_models(self, client): "Content-Type": "application/json", }, json={}, + data=None, ) assert isinstance(result, ModelList), "Should return an ModelList" diff --git a/tests/test_list_models_async.py b/tests/test_list_models_async.py index a8764841..2f3d7b44 100644 --- a/tests/test_list_models_async.py +++ b/tests/test_list_models_async.py @@ -24,6 +24,7 @@ async def test_list_models(self, async_client): "Content-Type": "application/json", }, json={}, + data=None, ) assert isinstance(result, ModelList), "Should return an ModelList" diff --git a/tests/utils.py b/tests/utils.py index 826753d2..05fdc3df 100644 --- a/tests/utils.py +++ b/tests/utils.py @@ -245,3 +245,76 @@ def mock_completion_response_payload() -> str: "usage": {"prompt_tokens": 90, "total_tokens": 90, "completion_tokens": 0}, } ).decode() + + +def mock_job_response_payload() -> str: + return orjson.dumps( + { + "id": "job_id", + "hyperparameters": { + "training_steps": 1800, + "learning_rate": 1.0e-4, + }, + "fine_tuned_model": "fine_tuned_model", + "model": "model", + "status": "QUEUED", + "job_type": "job_type", + "created_at": 1633046400000, + "modified_at": 1633046400000, + "training_files": ["training_file_id"], + "validation_files": ["validation_file_id"], + "object": "job", + "integrations": [], + } + ) + + +def mock_detailed_job_response_payload() -> str: + return orjson.dumps( + { + "id": "job_id", + "hyperparameters": { + "training_steps": 1800, + "learning_rate": 1.0e-4, + }, + "fine_tuned_model": "fine_tuned_model", + "model": "model", + "status": "QUEUED", + "job_type": "job_type", + "created_at": 1633046400000, + "modified_at": 1633046400000, + "training_files": ["training_file_id"], + "validation_files": ["validation_file_id"], + "object": "job", + "integrations": [], + "events": [ + { + "name": "event_name", + "created_at": 1633046400000, + } + ], + } + ) + + +def mock_file_response_payload() -> str: + return orjson.dumps( + { + "id": "file_id", + "object": "file", + "bytes": 0, + "created_at": 1633046400000, + "filename": "file.jsonl", + "purpose": "fine-tune", + } + ) + + +def mock_file_deleted_response_payload() -> str: + return orjson.dumps( + { + "id": "file_id", + "object": "file", + "deleted": True, + } + ) From 70c9d2c67469c37b58695607e1153ce46f929fb6 Mon Sep 17 00:00:00 2001 From: Alexis Tacnet Date: Wed, 5 Jun 2024 16:48:40 +0200 Subject: [PATCH 047/223] Release 0.4.0 (#100) --- pyproject.toml | 2 +- src/mistralai/client_base.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index ae82689c..f6b31b0e 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "mistralai" -version = "0.3.0" +version = "0.4.0" description = "" authors = ["Bam4d "] readme = "README.md" diff --git a/src/mistralai/client_base.py b/src/mistralai/client_base.py index 5deaab3f..9fb107b6 100644 --- a/src/mistralai/client_base.py +++ b/src/mistralai/client_base.py @@ -10,7 +10,7 @@ ) from mistralai.models.chat_completion import ChatMessage, Function, ResponseFormat, ToolChoice -CLIENT_VERSION = "0.2.0" +CLIENT_VERSION = "0.4.0" class ClientBase(ABC): From db71f68b7285cf7d00d0c2dd480e41c7898664f7 Mon Sep 17 00:00:00 2001 From: pandora <128635000+pandora-s-git@users.noreply.github.com> Date: Fri, 7 Jun 2024 13:58:24 +0200 Subject: [PATCH 048/223] Issue Templates Adding 2 issue templates, one for Client issues and another for Model issues, which should help everyone out! I've disabled blank issues, but these two should handle most cases. --- .github/ISSUE_TEMPLATE/bug_report_client.yml | 55 +++++++++++++++++++ .github/ISSUE_TEMPLATE/bug_report_model.yml | 58 ++++++++++++++++++++ .github/ISSUE_TEMPLATE/config.yml | 8 +++ 3 files changed, 121 insertions(+) create mode 100644 .github/ISSUE_TEMPLATE/bug_report_client.yml create mode 100644 .github/ISSUE_TEMPLATE/bug_report_model.yml create mode 100644 .github/ISSUE_TEMPLATE/config.yml diff --git a/.github/ISSUE_TEMPLATE/bug_report_client.yml b/.github/ISSUE_TEMPLATE/bug_report_client.yml new file mode 100644 index 00000000..9395e282 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/bug_report_client.yml @@ -0,0 +1,55 @@ +name: Bug report related to the Python client +description: Submit a bug report that's related to the Python client +title: '[BUG CLIENT]: ' +labels: ['bug client', 'triage'] +body: + - type: markdown + attributes: + value: | + Thanks for taking the time to fill out this bug report ! + - type: textarea + id: python-vv + attributes: + label: Python -VV + description: Run `python -VV` from your virtual environment + placeholder: Copy-paste the output (no need for backticks, will be formatted into code automatically) + render: shell + validations: + required: true + - type: textarea + id: pip-freeze + attributes: + label: Pip Freeze + description: Run `pip freeze` from your virtual environment + placeholder: Copy-paste the output (no need for backticks, will be formatted into code automatically) + render: shell + validations: + required: true + - type: textarea + id: reproduction-steps + attributes: + label: Reproduction Steps + description: Provide a clear and concise description of the steps that lead to your issue. + placeholder: | + 1. First step... + 2. Step 2... + ... + validations: + required: true + - type: textarea + id: expected-behavior + attributes: + label: Expected Behavior + description: Explain briefly what you expected to happen. + validations: + required: true + - type: textarea + id: additional-context + attributes: + label: Additional Context + description: Add any context about your problem that you deem relevant. + - type: textarea + id: suggested-solutions + attributes: + label: Suggested Solutions + description: Please list any solutions you recommend we consider. diff --git a/.github/ISSUE_TEMPLATE/bug_report_model.yml b/.github/ISSUE_TEMPLATE/bug_report_model.yml new file mode 100644 index 00000000..7d5c360e --- /dev/null +++ b/.github/ISSUE_TEMPLATE/bug_report_model.yml @@ -0,0 +1,58 @@ +name: Bug report related to our models +description: Submit a bug report that's related to our models behavior +title: '[BUG MODEL]: ' +labels: ['bug model', 'triage'] +body: + - type: markdown + attributes: + value: | + Thanks for taking the time to fill out this bug report ! + - type: dropdown + id: model + attributes: + label: Model + description: What's the model? + options: + - open-mistral-7b + - open-mixtral-8x7b + - open-mixtral-8x22b + - mistral-small-latest + - mistral-medium-latest + - mistral-large-latest + - mistral-embed + - codestral-latest + default: 0 + validations: + required: true + - type: textarea + id: model-payload + attributes: + label: Request Payload + description: Please provide the full request payload. + placeholder: If possible, if not provide as much information as you can such as "messages", "max_tokens", "temperature", "prefix", "tool_calls" and all kinds of information. + validations: + required: true + - type: textarea + id: model-output + attributes: + label: Output + description: Provide the output of the model. + validations: + required: true + - type: textarea + id: model-expected-output + attributes: + label: Expected Behavior + description: Explain briefly what you expected to happen. + validations: + required: true + - type: textarea + id: model-additional-context + attributes: + label: Additional Context + description: Add any context about your problem that you deem relevant. + - type: textarea + id: model-suggested-solutions + attributes: + label: Suggested Solutions + description: Please list any solutions you recommend we consider. diff --git a/.github/ISSUE_TEMPLATE/config.yml b/.github/ISSUE_TEMPLATE/config.yml new file mode 100644 index 00000000..42dc4f95 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/config.yml @@ -0,0 +1,8 @@ +blank_issues_enabled: false +contact_links: + - name: Documentation + url: https://docs.mistral.ai + about: Developer documentation for the Mistral AI platform + - name: Discord + url: https://discord.com/invite/mistralai) + about: Chat with the Mistral community \ No newline at end of file From 96b5b306241117e40c08462f6876c0c8f025d60f Mon Sep 17 00:00:00 2001 From: pandora <128635000+pandora-s-git@users.noreply.github.com> Date: Mon, 10 Jun 2024 09:16:47 +0200 Subject: [PATCH 049/223] Update bug_report_client.yml --- .github/ISSUE_TEMPLATE/bug_report_client.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/ISSUE_TEMPLATE/bug_report_client.yml b/.github/ISSUE_TEMPLATE/bug_report_client.yml index 9395e282..1b3c0758 100644 --- a/.github/ISSUE_TEMPLATE/bug_report_client.yml +++ b/.github/ISSUE_TEMPLATE/bug_report_client.yml @@ -6,7 +6,7 @@ body: - type: markdown attributes: value: | - Thanks for taking the time to fill out this bug report ! + Thanks for taking the time to fill out this bug report! - type: textarea id: python-vv attributes: From 4b76a90b3d87551e50af3609e0a29e8bc2286636 Mon Sep 17 00:00:00 2001 From: pandora <128635000+pandora-s-git@users.noreply.github.com> Date: Mon, 10 Jun 2024 09:17:00 +0200 Subject: [PATCH 050/223] Update bug_report_model.yml --- .github/ISSUE_TEMPLATE/bug_report_model.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/ISSUE_TEMPLATE/bug_report_model.yml b/.github/ISSUE_TEMPLATE/bug_report_model.yml index 7d5c360e..05687494 100644 --- a/.github/ISSUE_TEMPLATE/bug_report_model.yml +++ b/.github/ISSUE_TEMPLATE/bug_report_model.yml @@ -6,7 +6,7 @@ body: - type: markdown attributes: value: | - Thanks for taking the time to fill out this bug report ! + Thanks for taking the time to fill out this bug report! - type: dropdown id: model attributes: From f1012d6359dff30572bc190b337a9aa56e795a49 Mon Sep 17 00:00:00 2001 From: pandora <128635000+pandora-s-git@users.noreply.github.com> Date: Tue, 11 Jun 2024 10:06:46 +0200 Subject: [PATCH 051/223] Update .github/ISSUE_TEMPLATE/bug_report_client.yml Co-authored-by: timlacroix --- .github/ISSUE_TEMPLATE/bug_report_client.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/ISSUE_TEMPLATE/bug_report_client.yml b/.github/ISSUE_TEMPLATE/bug_report_client.yml index 1b3c0758..669503ac 100644 --- a/.github/ISSUE_TEMPLATE/bug_report_client.yml +++ b/.github/ISSUE_TEMPLATE/bug_report_client.yml @@ -32,7 +32,7 @@ body: description: Provide a clear and concise description of the steps that lead to your issue. placeholder: | 1. First step... - 2. Step 2... + 2. Second step... ... validations: required: true From 54b7e809bece5f4c655c20b27b92b44509919e34 Mon Sep 17 00:00:00 2001 From: pandora <128635000+pandora-s-git@users.noreply.github.com> Date: Tue, 11 Jun 2024 10:14:15 +0200 Subject: [PATCH 052/223] Update bug_report_model.yml --- .github/ISSUE_TEMPLATE/bug_report_model.yml | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/.github/ISSUE_TEMPLATE/bug_report_model.yml b/.github/ISSUE_TEMPLATE/bug_report_model.yml index 05687494..aa4bb622 100644 --- a/.github/ISSUE_TEMPLATE/bug_report_model.yml +++ b/.github/ISSUE_TEMPLATE/bug_report_model.yml @@ -29,7 +29,9 @@ body: attributes: label: Request Payload description: Please provide the full request payload. - placeholder: If possible, if not provide as much information as you can such as "messages", "max_tokens", "temperature", "prefix", "tool_calls" and all kinds of information. + placeholder: | + Please provide a complete example of the request payload, including, if possible, parameters such as 'messages', 'max_tokens', 'temperature', 'prefix', and 'tool_calls'. + If you can, please set the 'temperature' parameter to 0 in your example. This will greatly help us. validations: required: true - type: textarea From d206edef6de210c53a0d2050e340ed6c5884a745 Mon Sep 17 00:00:00 2001 From: jean-malo Date: Wed, 19 Jun 2024 11:17:26 +0200 Subject: [PATCH 053/223] deps: fix pin --- poetry.lock | 4 ++-- pyproject.toml | 8 ++++---- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/poetry.lock b/poetry.lock index 636856ce..167827a9 100644 --- a/poetry.lock +++ b/poetry.lock @@ -532,5 +532,5 @@ zstd = ["zstandard (>=0.18.0)"] [metadata] lock-version = "2.0" -python-versions = "^3.9,<4.0" -content-hash = "015db2d023aef624cb1df559074f6e3760d4029e637d3f259f5d2c76730b8d6a" +python-versions = ">=3.9,<4.0" +content-hash = "a1b9663d7041a47bc8b6705e4fc9bd4563718a49e492aa8f0edf96fb8afa468b" diff --git a/pyproject.toml b/pyproject.toml index f6b31b0e..68cfc961 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -23,10 +23,10 @@ exclude = ["docs", "tests", "examples", "tools", "build"] [tool.poetry.dependencies] -python = "^3.9,<4.0" -orjson = "^3.9.10,<3.11" -pydantic = "^2.5.2,<3" -httpx = "^0.25,<1" +python = ">=3.9,<4.0" +orjson = ">=3.9.10,<3.11" +pydantic = ">=2.5.2,<3" +httpx = ">=0.25,<1" [tool.poetry.group.dev.dependencies] From af48070b496b7c34e3fa78a05805289bd8cb6f75 Mon Sep 17 00:00:00 2001 From: Jean-Malo Delignon <56539593+jean-malo@users.noreply.github.com> Date: Wed, 19 Jun 2024 13:08:20 +0200 Subject: [PATCH 054/223] feat: model deletion * Add support for the deletion of fine-tuned models --------- Co-authored-by: arresejo --- examples/async_jobs_chat.py | 3 +++ pyproject.toml | 2 +- src/mistralai/async_client.py | 10 +++++++++- src/mistralai/client.py | 10 +++++++++- src/mistralai/client_base.py | 2 +- src/mistralai/models/models.py | 6 ++++++ tests/test_delete_model.py | 26 ++++++++++++++++++++++++++ tests/test_delete_model_async.py | 28 ++++++++++++++++++++++++++++ tests/utils.py | 10 ++++++++++ 9 files changed, 93 insertions(+), 4 deletions(-) create mode 100644 tests/test_delete_model.py create mode 100644 tests/test_delete_model_async.py diff --git a/examples/async_jobs_chat.py b/examples/async_jobs_chat.py index 15e550c8..e5019148 100644 --- a/examples/async_jobs_chat.py +++ b/examples/async_jobs_chat.py @@ -54,6 +54,9 @@ async def main(): await client.files.delete(training_file.id) await client.files.delete(validation_file.id) + # Delete fine-tuned model + await client.delete_model(created_job.fine_tuned_model) + if __name__ == "__main__": asyncio.run(main()) diff --git a/pyproject.toml b/pyproject.toml index 68cfc961..4f498a6d 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "mistralai" -version = "0.4.0" +version = "0.4.1" description = "" authors = ["Bam4d "] readme = "README.md" diff --git a/src/mistralai/async_client.py b/src/mistralai/async_client.py index 4e80de84..24124486 100644 --- a/src/mistralai/async_client.py +++ b/src/mistralai/async_client.py @@ -29,7 +29,7 @@ ToolChoice, ) from mistralai.models.embeddings import EmbeddingResponse -from mistralai.models.models import ModelList +from mistralai.models.models import ModelDeleted, ModelList class MistralAsyncClient(ClientBase): @@ -304,6 +304,14 @@ async def list_models(self) -> ModelList: raise MistralException("No response received") + async def delete_model(self, model_id: str) -> ModelDeleted: + single_response = self._request("delete", {}, f"v1/models/{model_id}") + + async for response in single_response: + return ModelDeleted(**response) + + raise MistralException("No response received") + async def completion( self, model: str, diff --git a/src/mistralai/client.py b/src/mistralai/client.py index 1f70f4ea..c919ac71 100644 --- a/src/mistralai/client.py +++ b/src/mistralai/client.py @@ -22,7 +22,7 @@ ToolChoice, ) from mistralai.models.embeddings import EmbeddingResponse -from mistralai.models.models import ModelList +from mistralai.models.models import ModelDeleted, ModelList class MistralClient(ClientBase): @@ -298,6 +298,14 @@ def list_models(self) -> ModelList: raise MistralException("No response received") + def delete_model(self, model_id: str) -> ModelDeleted: + single_response = self._request("delete", {}, f"v1/models/{model_id}") + + for response in single_response: + return ModelDeleted(**response) + + raise MistralException("No response received") + def completion( self, model: str, diff --git a/src/mistralai/client_base.py b/src/mistralai/client_base.py index 9fb107b6..5cd996d9 100644 --- a/src/mistralai/client_base.py +++ b/src/mistralai/client_base.py @@ -10,7 +10,7 @@ ) from mistralai.models.chat_completion import ChatMessage, Function, ResponseFormat, ToolChoice -CLIENT_VERSION = "0.4.0" +CLIENT_VERSION = "0.4.1" class ClientBase(ABC): diff --git a/src/mistralai/models/models.py b/src/mistralai/models/models.py index 8ed152d3..f88033d4 100644 --- a/src/mistralai/models/models.py +++ b/src/mistralai/models/models.py @@ -31,3 +31,9 @@ class ModelCard(BaseModel): class ModelList(BaseModel): object: str data: List[ModelCard] + + +class ModelDeleted(BaseModel): + id: str + object: str + deleted: bool diff --git a/tests/test_delete_model.py b/tests/test_delete_model.py new file mode 100644 index 00000000..d050c21a --- /dev/null +++ b/tests/test_delete_model.py @@ -0,0 +1,26 @@ +from mistralai.models.models import ModelDeleted + +from .utils import mock_model_deleted_response_payload, mock_response + + +class TestDeleteModel: + def test_delete_model(self, client): + expected_response_model = ModelDeleted.model_validate_json(mock_model_deleted_response_payload()) + client._client.request.return_value = mock_response(200, expected_response_model.json()) + + response_model = client.delete_model("model_id") + + client._client.request.assert_called_once_with( + "delete", + "https://api.mistral.ai/v1/models/model_id", + headers={ + "User-Agent": f"mistral-client-python/{client._version}", + "Accept": "application/json", + "Content-Type": "application/json", + "Authorization": "Bearer test_api_key", + }, + json={}, + data=None, + ) + + assert response_model == expected_response_model diff --git a/tests/test_delete_model_async.py b/tests/test_delete_model_async.py new file mode 100644 index 00000000..9fa393e8 --- /dev/null +++ b/tests/test_delete_model_async.py @@ -0,0 +1,28 @@ +import pytest +from mistralai.models.models import ModelDeleted + +from .utils import mock_model_deleted_response_payload, mock_response + + +class TestAsyncDeleteModel: + @pytest.mark.asyncio + async def test_delete_model(self, async_client): + expected_response_model = ModelDeleted.model_validate_json(mock_model_deleted_response_payload()) + async_client._client.request.return_value = mock_response(200, expected_response_model.json()) + + response_model = await async_client.delete_model("model_id") + + async_client._client.request.assert_called_once_with( + "delete", + "https://api.mistral.ai/v1/models/model_id", + headers={ + "User-Agent": f"mistral-client-python/{async_client._version}", + "Accept": "application/json", + "Content-Type": "application/json", + "Authorization": "Bearer test_api_key", + }, + json={}, + data=None, + ) + + assert response_model == expected_response_model diff --git a/tests/utils.py b/tests/utils.py index 05fdc3df..5f7be152 100644 --- a/tests/utils.py +++ b/tests/utils.py @@ -318,3 +318,13 @@ def mock_file_deleted_response_payload() -> str: "deleted": True, } ) + + +def mock_model_deleted_response_payload() -> str: + return orjson.dumps( + { + "id": "model_id", + "object": "model", + "deleted": True, + } + ) From 952298fba83456fbc805e0a57683b80cb0a1fc3e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Hugues=20de=20Saxc=C3=A9?= Date: Wed, 26 Jun 2024 00:28:47 +0200 Subject: [PATCH 055/223] Create version.py --- src/mistralai/version.py | 7 +++++++ 1 file changed, 7 insertions(+) create mode 100644 src/mistralai/version.py diff --git a/src/mistralai/version.py b/src/mistralai/version.py new file mode 100644 index 00000000..ee8f8def --- /dev/null +++ b/src/mistralai/version.py @@ -0,0 +1,7 @@ +from importlib import metadata + +try: + __version__ = metadata.version(__package__) +except metadata.PackageNotFoundError: + # Case where package metadata is not available. + __version__ = "" From 5fdc57cd0453c55e79e5be5dd3081f8168192258 Mon Sep 17 00:00:00 2001 From: Gaspard Blanchet <26529448+GaspardBT@users.noreply.github.com> Date: Fri, 28 Jun 2024 18:19:10 +0200 Subject: [PATCH 056/223] feat: add warning log when targeting a model planned for deprecation (#111) * add model deprecation warning * use Optional --- src/mistralai/async_client.py | 55 +++++++++++++++++++++---- src/mistralai/client.py | 60 +++++++++++++++++++++++----- src/mistralai/client_base.py | 63 ++++++++++++++++++++--------- src/mistralai/constants.py | 2 + tests/test_chat.py | 75 +++++++++++++++++++++++++++++++---- tests/test_chat_async.py | 75 +++++++++++++++++++++++++++++++---- tests/utils.py | 13 ++++-- 7 files changed, 288 insertions(+), 55 deletions(-) diff --git a/src/mistralai/async_client.py b/src/mistralai/async_client.py index 24124486..abe86548 100644 --- a/src/mistralai/async_client.py +++ b/src/mistralai/async_client.py @@ -1,7 +1,7 @@ import asyncio import posixpath from json import JSONDecodeError -from typing import Any, AsyncGenerator, Dict, List, Optional, Union +from typing import Any, AsyncGenerator, Callable, Dict, List, Optional, Union from httpx import ( AsyncClient, @@ -101,6 +101,7 @@ async def _request( stream: bool = False, attempt: int = 1, data: Optional[Dict[str, Any]] = None, + check_model_deprecation_headers_callback: Optional[Callable] = None, **kwargs: Any, ) -> AsyncGenerator[Dict[str, Any], None]: accept_header = "text/event-stream" if stream else "application/json" @@ -129,6 +130,8 @@ async def _request( data=data, **kwargs, ) as response: + if check_model_deprecation_headers_callback: + check_model_deprecation_headers_callback(response.headers) await self._check_streaming_response(response) async for line in response.aiter_lines(): @@ -145,7 +148,8 @@ async def _request( data=data, **kwargs, ) - + if check_model_deprecation_headers_callback: + check_model_deprecation_headers_callback(response.headers) yield await self._check_response(response) except ConnectError as e: @@ -213,7 +217,12 @@ async def chat( response_format=response_format, ) - single_response = self._request("post", request, "v1/chat/completions") + single_response = self._request( + "post", + request, + "v1/chat/completions", + check_model_deprecation_headers_callback=self._check_model_deprecation_header_callback_factory(model), + ) async for response in single_response: return ChatCompletionResponse(**response) @@ -267,7 +276,13 @@ async def chat_stream( tool_choice=tool_choice, response_format=response_format, ) - async_response = self._request("post", request, "v1/chat/completions", stream=True) + async_response = self._request( + "post", + request, + "v1/chat/completions", + stream=True, + check_model_deprecation_headers_callback=self._check_model_deprecation_header_callback_factory(model), + ) async for json_response in async_response: yield ChatCompletionStreamResponse(**json_response) @@ -284,7 +299,12 @@ async def embeddings(self, model: str, input: Union[str, List[str]]) -> Embeddin EmbeddingResponse: A response object containing the embeddings. """ request = {"model": model, "input": input} - single_response = self._request("post", request, "v1/embeddings") + single_response = self._request( + "post", + request, + "v1/embeddings", + check_model_deprecation_headers_callback=self._check_model_deprecation_header_callback_factory(model), + ) async for response in single_response: return EmbeddingResponse(**response) @@ -341,7 +361,12 @@ async def completion( request = self._make_completion_request( prompt, model, suffix, temperature, max_tokens, top_p, random_seed, stop ) - single_response = self._request("post", request, "v1/fim/completions") + single_response = self._request( + "post", + request, + "v1/fim/completions", + check_model_deprecation_headers_callback=self._check_model_deprecation_header_callback_factory(model), + ) async for response in single_response: return ChatCompletionResponse(**response) @@ -376,9 +401,23 @@ async def completion_stream( Dict[str, Any]: a response object containing the generated text. """ request = self._make_completion_request( - prompt, model, suffix, temperature, max_tokens, top_p, random_seed, stop, stream=True + prompt, + model, + suffix, + temperature, + max_tokens, + top_p, + random_seed, + stop, + stream=True, + ) + async_response = self._request( + "post", + request, + "v1/fim/completions", + stream=True, + check_model_deprecation_headers_callback=self._check_model_deprecation_header_callback_factory(model), ) - async_response = self._request("post", request, "v1/fim/completions", stream=True) async for json_response in async_response: yield ChatCompletionStreamResponse(**json_response) diff --git a/src/mistralai/client.py b/src/mistralai/client.py index c919ac71..9c75373e 100644 --- a/src/mistralai/client.py +++ b/src/mistralai/client.py @@ -1,7 +1,7 @@ import posixpath import time from json import JSONDecodeError -from typing import Any, Dict, Iterable, Iterator, List, Optional, Union +from typing import Any, Callable, Dict, Iterable, Iterator, List, Optional, Union from httpx import Client, ConnectError, HTTPTransport, RequestError, Response @@ -40,7 +40,9 @@ def __init__( super().__init__(endpoint, api_key, max_retries, timeout) self._client = Client( - follow_redirects=True, timeout=self._timeout, transport=HTTPTransport(retries=self._max_retries) + follow_redirects=True, + timeout=self._timeout, + transport=HTTPTransport(retries=self._max_retries), ) self.files = FilesClient(self) self.jobs = JobsClient(self) @@ -94,6 +96,7 @@ def _request( stream: bool = False, attempt: int = 1, data: Optional[Dict[str, Any]] = None, + check_model_deprecation_headers_callback: Optional[Callable] = None, **kwargs: Any, ) -> Iterator[Dict[str, Any]]: accept_header = "text/event-stream" if stream else "application/json" @@ -122,6 +125,8 @@ def _request( data=data, **kwargs, ) as response: + if check_model_deprecation_headers_callback: + check_model_deprecation_headers_callback(response.headers) self._check_streaming_response(response) for line in response.iter_lines(): @@ -138,7 +143,8 @@ def _request( data=data, **kwargs, ) - + if check_model_deprecation_headers_callback: + check_model_deprecation_headers_callback(response.headers) yield self._check_response(response) except ConnectError as e: @@ -207,7 +213,12 @@ def chat( response_format=response_format, ) - single_response = self._request("post", request, "v1/chat/completions") + single_response = self._request( + "post", + request, + "v1/chat/completions", + check_model_deprecation_headers_callback=self._check_model_deprecation_header_callback_factory(model), + ) for response in single_response: return ChatCompletionResponse(**response) @@ -261,7 +272,13 @@ def chat_stream( response_format=response_format, ) - response = self._request("post", request, "v1/chat/completions", stream=True) + response = self._request( + "post", + request, + "v1/chat/completions", + stream=True, + check_model_deprecation_headers_callback=self._check_model_deprecation_header_callback_factory(model), + ) for json_streamed_response in response: yield ChatCompletionStreamResponse(**json_streamed_response) @@ -278,7 +295,12 @@ def embeddings(self, model: str, input: Union[str, List[str]]) -> EmbeddingRespo EmbeddingResponse: A response object containing the embeddings. """ request = {"model": model, "input": input} - singleton_response = self._request("post", request, "v1/embeddings") + singleton_response = self._request( + "post", + request, + "v1/embeddings", + check_model_deprecation_headers_callback=self._check_model_deprecation_header_callback_factory(model), + ) for response in singleton_response: return EmbeddingResponse(**response) @@ -337,7 +359,13 @@ def completion( prompt, model, suffix, temperature, max_tokens, top_p, random_seed, stop ) - single_response = self._request("post", request, "v1/fim/completions", stream=False) + single_response = self._request( + "post", + request, + "v1/fim/completions", + stream=False, + check_model_deprecation_headers_callback=self._check_model_deprecation_header_callback_factory(model), + ) for response in single_response: return ChatCompletionResponse(**response) @@ -372,10 +400,24 @@ def completion_stream( Iterable[Dict[str, Any]]: a generator that yields response objects containing the generated text. """ request = self._make_completion_request( - prompt, model, suffix, temperature, max_tokens, top_p, random_seed, stop, stream=True + prompt, + model, + suffix, + temperature, + max_tokens, + top_p, + random_seed, + stop, + stream=True, ) - response = self._request("post", request, "v1/fim/completions", stream=True) + response = self._request( + "post", + request, + "v1/fim/completions", + stream=True, + check_model_deprecation_headers_callback=self._check_model_deprecation_header_callback_factory(model), + ) for json_streamed_response in response: yield ChatCompletionStreamResponse(**json_streamed_response) diff --git a/src/mistralai/client_base.py b/src/mistralai/client_base.py index 5cd996d9..8fe9bcfd 100644 --- a/src/mistralai/client_base.py +++ b/src/mistralai/client_base.py @@ -1,14 +1,19 @@ import logging import os from abc import ABC -from typing import Any, Dict, List, Optional, Union +from typing import Any, Callable, Dict, List, Optional, Union import orjson - -from mistralai.exceptions import ( - MistralException, +from httpx import Headers + +from mistralai.constants import HEADER_MODEL_DEPRECATION_TIMESTAMP +from mistralai.exceptions import MistralException +from mistralai.models.chat_completion import ( + ChatMessage, + Function, + ResponseFormat, + ToolChoice, ) -from mistralai.models.chat_completion import ChatMessage, Function, ResponseFormat, ToolChoice CLIENT_VERSION = "0.4.1" @@ -38,6 +43,14 @@ def __init__( self._version = CLIENT_VERSION + def _get_model(self, model: Optional[str] = None) -> str: + if model is not None: + return model + else: + if self._default_model is None: + raise MistralException(message="model must be provided") + return self._default_model + def _parse_tools(self, tools: List[Dict[str, Any]]) -> List[Dict[str, Any]]: parsed_tools: List[Dict[str, Any]] = [] for tool in tools: @@ -73,6 +86,22 @@ def _parse_messages(self, messages: List[Any]) -> List[Dict[str, Any]]: return parsed_messages + def _check_model_deprecation_header_callback_factory(self, model: Optional[str] = None) -> Callable: + model = self._get_model(model) + + def _check_model_deprecation_header_callback( + headers: Headers, + ) -> None: + if HEADER_MODEL_DEPRECATION_TIMESTAMP in headers: + self._logger.warning( + f"WARNING: The model {model} is deprecated " + f"and will be removed on {headers[HEADER_MODEL_DEPRECATION_TIMESTAMP]}. " + "Please refer to https://docs.mistral.ai/getting-started/models/#api-versioning " + "for more information." + ) + + return _check_model_deprecation_header_callback + def _make_completion_request( self, prompt: str, @@ -95,16 +124,14 @@ def _make_completion_request( if stop is not None: request_data["stop"] = stop - if model is not None: - request_data["model"] = model - else: - if self._default_model is None: - raise MistralException(message="model must be provided") - request_data["model"] = self._default_model + request_data["model"] = self._get_model(model) request_data.update( self._build_sampling_params( - temperature=temperature, max_tokens=max_tokens, top_p=top_p, random_seed=random_seed + temperature=temperature, + max_tokens=max_tokens, + top_p=top_p, + random_seed=random_seed, ) ) @@ -148,16 +175,14 @@ def _make_chat_request( "messages": self._parse_messages(messages), } - if model is not None: - request_data["model"] = model - else: - if self._default_model is None: - raise MistralException(message="model must be provided") - request_data["model"] = self._default_model + request_data["model"] = self._get_model(model) request_data.update( self._build_sampling_params( - temperature=temperature, max_tokens=max_tokens, top_p=top_p, random_seed=random_seed + temperature=temperature, + max_tokens=max_tokens, + top_p=top_p, + random_seed=random_seed, ) ) diff --git a/src/mistralai/constants.py b/src/mistralai/constants.py index c70331b4..c057d4ce 100644 --- a/src/mistralai/constants.py +++ b/src/mistralai/constants.py @@ -1,3 +1,5 @@ RETRY_STATUS_CODES = {429, 500, 502, 503, 504} ENDPOINT = "https://api.mistral.ai" + +HEADER_MODEL_DEPRECATION_TIMESTAMP = "x-model-deprecation-timestamp" diff --git a/tests/test_chat.py b/tests/test_chat.py index 482b12e8..15a40651 100644 --- a/tests/test_chat.py +++ b/tests/test_chat.py @@ -1,3 +1,8 @@ +import io +import logging + +import pytest +from mistralai.constants import HEADER_MODEL_DEPRECATION_TIMESTAMP from mistralai.models.chat_completion import ( ChatCompletionResponse, ChatCompletionStreamResponse, @@ -13,12 +18,26 @@ class TestChat: - def test_chat(self, client): - client._client.request.return_value = mock_response( - 200, - mock_chat_response_payload(), + @pytest.mark.parametrize("target_deprecated_model", [True, False], ids=["deprecated", "not_deprecated"]) + def test_chat(self, client, target_deprecated_model): + headers = ( + { + HEADER_MODEL_DEPRECATION_TIMESTAMP: "2023-12-01T00:00:00", + } + if target_deprecated_model + else {} ) + client._client.request.return_value = mock_response(200, mock_chat_response_payload(), headers) + + # Create a stream to capture the log output + log_stream = io.StringIO() + + # Create a logger and add a handler that writes to the stream + logger = client._logger + handler = logging.StreamHandler(log_stream) + logger.addHandler(handler) + result = client.chat( model="mistral-small-latest", messages=[ChatMessage(role="user", content="What is the best French cheese?")], @@ -46,12 +65,39 @@ def test_chat(self, client): assert result.choices[0].index == 0 assert result.object == "chat.completion" - def test_chat_streaming(self, client): - client._client.stream.return_value = mock_stream_response( - 200, - mock_chat_response_streaming_payload(), + # Check if the log message was produced when the model is deprecated + log_output = log_stream.getvalue() + excepted_log = ( + ( + "WARNING: The model mistral-small-latest is deprecated " + "and will be removed on 2023-12-01T00:00:00. " + "Please refer to https://docs.mistral.ai/getting-started/models/#api-versioning for more information.\n" + ) + if target_deprecated_model + else "" + ) + assert excepted_log == log_output + + @pytest.mark.parametrize("target_deprecated_model", [True, False], ids=["deprecated", "not_deprecated"]) + def test_chat_streaming(self, client, target_deprecated_model): + headers = ( + { + HEADER_MODEL_DEPRECATION_TIMESTAMP: "2023-12-01T00:00:00", + } + if target_deprecated_model + else {} ) + client._client.stream.return_value = mock_stream_response(200, mock_chat_response_streaming_payload(), headers) + + # Create a stream to capture the log output + log_stream = io.StringIO() + + # Create a logger and add a handler that writes to the stream + logger = client._logger + handler = logging.StreamHandler(log_stream) + logger.addHandler(handler) + result = client.chat_stream( model="mistral-small-latest", messages=[ChatMessage(role="user", content="What is the best French cheese?")], @@ -88,3 +134,16 @@ def test_chat_streaming(self, client): assert result.choices[0].index == i - 1 assert result.choices[0].delta.content == f"stream response {i-1}" assert result.object == "chat.completion.chunk" + + # Check if the log message was produced + log_output = log_stream.getvalue() + excepted_log = ( + ( + "WARNING: The model mistral-small-latest is deprecated " + "and will be removed on 2023-12-01T00:00:00. " + "Please refer to https://docs.mistral.ai/getting-started/models/#api-versioning for more information.\n" + ) + if target_deprecated_model + else "" + ) + assert excepted_log == log_output diff --git a/tests/test_chat_async.py b/tests/test_chat_async.py index 04a73f27..c16a9a80 100644 --- a/tests/test_chat_async.py +++ b/tests/test_chat_async.py @@ -1,6 +1,11 @@ +import io +import logging import unittest.mock as mock import pytest +from mistralai.constants import ( + HEADER_MODEL_DEPRECATION_TIMESTAMP, +) from mistralai.models.chat_completion import ( ChatCompletionResponse, ChatCompletionStreamResponse, @@ -17,12 +22,26 @@ class TestAsyncChat: @pytest.mark.asyncio - async def test_chat(self, async_client): - async_client._client.request.return_value = mock_response( - 200, - mock_chat_response_payload(), + @pytest.mark.parametrize("target_deprecated_model", [True, False], ids=["deprecated", "not_deprecated"]) + async def test_chat(self, async_client, target_deprecated_model): + headers = ( + { + HEADER_MODEL_DEPRECATION_TIMESTAMP: "2023-12-01T00:00:00", + } + if target_deprecated_model + else {} ) + async_client._client.request.return_value = mock_response(200, mock_chat_response_payload(), headers) + + # Create a stream to capture the log output + log_stream = io.StringIO() + + # Create a logger and add a handler that writes to the stream + logger = async_client._logger + handler = logging.StreamHandler(log_stream) + logger.addHandler(handler) + result = await async_client.chat( model="mistral-small-latest", messages=[ChatMessage(role="user", content="What is the best French cheese?")], @@ -50,14 +69,43 @@ async def test_chat(self, async_client): assert result.choices[0].index == 0 assert result.object == "chat.completion" + # Check if the log message was produced when the model is deprecated + log_output = log_stream.getvalue() + excepted_log = ( + ( + "WARNING: The model mistral-small-latest is deprecated " + "and will be removed on 2023-12-01T00:00:00. " + "Please refer to https://docs.mistral.ai/getting-started/models/#api-versioning for more information.\n" + ) + if target_deprecated_model + else "" + ) + assert excepted_log == log_output + @pytest.mark.asyncio - async def test_chat_streaming(self, async_client): + @pytest.mark.parametrize("target_deprecated_model", [True, False], ids=["deprecated", "not_deprecated"]) + async def test_chat_streaming(self, async_client, target_deprecated_model): + headers = ( + { + HEADER_MODEL_DEPRECATION_TIMESTAMP: "2023-12-01T00:00:00", + } + if target_deprecated_model + else {} + ) + async_client._client.stream = mock.Mock() async_client._client.stream.return_value = mock_async_stream_response( - 200, - mock_chat_response_streaming_payload(), + 200, mock_chat_response_streaming_payload(), headers ) + # Create a stream to capture the log output + log_stream = io.StringIO() + + # Create a logger and add a handler that writes to the stream + logger = async_client._logger + handler = logging.StreamHandler(log_stream) + logger.addHandler(handler) + result = async_client.chat_stream( model="mistral-small-latest", messages=[ChatMessage(role="user", content="What is the best French cheese?")], @@ -94,3 +142,16 @@ async def test_chat_streaming(self, async_client): assert result.choices[0].index == i - 1 assert result.choices[0].delta.content == f"stream response {i-1}" assert result.object == "chat.completion.chunk" + + # Check if the log message was produced when the model is deprecated + log_output = log_stream.getvalue() + excepted_log = ( + ( + "WARNING: The model mistral-small-latest is deprecated " + "and will be removed on 2023-12-01T00:00:00. " + "Please refer to https://docs.mistral.ai/getting-started/models/#api-versioning for more information.\n" + ) + if target_deprecated_model + else "" + ) + assert excepted_log == log_output diff --git a/tests/utils.py b/tests/utils.py index 5f7be152..4c2ca146 100644 --- a/tests/utils.py +++ b/tests/utils.py @@ -1,23 +1,25 @@ import contextlib import unittest.mock as mock -from typing import List +from typing import Any, Dict, List import orjson from httpx import Response @contextlib.contextmanager -def mock_stream_response(status_code: int, content: List[str]): +def mock_stream_response(status_code: int, content: List[str], headers: Dict[str, Any] = None): response = mock.Mock(Response) response.status_code = status_code + response.headers = headers if headers else {} response.iter_lines.return_value = iter(content) yield response @contextlib.asynccontextmanager -async def mock_async_stream_response(status_code: int, content: List[str]): +async def mock_async_stream_response(status_code: int, content: List[str], headers: Dict[str, Any] = None): response = mock.Mock(Response) response.status_code = status_code + response.headers = headers if headers else {} async def async_iter(content: List[str]): for line in content: @@ -27,9 +29,12 @@ async def async_iter(content: List[str]): yield response -def mock_response(status_code: int, content: str, is_json: bool = True) -> mock.MagicMock: +def mock_response( + status_code: int, content: str, headers: Dict[str, Any] = None, is_json: bool = True +) -> mock.MagicMock: response = mock.Mock(Response) response.status_code = status_code + response.headers = headers if headers else {} if is_json: response.json = mock.MagicMock() response.json.return_value = orjson.loads(content) From 8a6ac6234f58da359eb2ca49659db8f063f3aff2 Mon Sep 17 00:00:00 2001 From: Jean-Malo Delignon <56539593+jean-malo@users.noreply.github.com> Date: Mon, 1 Jul 2024 10:11:56 +0200 Subject: [PATCH 057/223] feat: add job cost to metadata (#112) --- examples/dry_run_job.py | 1 + src/mistralai/models/jobs.py | 2 ++ 2 files changed, 3 insertions(+) diff --git a/examples/dry_run_job.py b/examples/dry_run_job.py index d44d8a53..2e1af6db 100644 --- a/examples/dry_run_job.py +++ b/examples/dry_run_job.py @@ -32,6 +32,7 @@ async def main(): print(f"Dataset tokens: {dry_run_job.data_tokens}") print(f"Epochs number: {dry_run_job.epochs}") print(f"Expected duration: {dry_run_job.expected_duration_seconds}") + print(f"Cost: {dry_run_job.cost} {dry_run_job.cost_currency}") if __name__ == "__main__": diff --git a/src/mistralai/models/jobs.py b/src/mistralai/models/jobs.py index 80065337..64d3351d 100644 --- a/src/mistralai/models/jobs.py +++ b/src/mistralai/models/jobs.py @@ -32,6 +32,8 @@ class JobMetadata(BaseModel): train_tokens: int epochs: float expected_duration_seconds: Optional[int] + cost: Optional[float] = None + cost_currency: Optional[str] = None class Job(BaseModel): From 223f0dee3f9510b42b6fda0426dd7cdbf9a5388b Mon Sep 17 00:00:00 2001 From: Patrick von Platen Date: Tue, 2 Jul 2024 11:07:32 +0200 Subject: [PATCH 058/223] add license --- pyproject.toml | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 4f498a6d..f05224af 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -4,6 +4,7 @@ version = "0.4.1" description = "" authors = ["Bam4d "] readme = "README.md" +license = "Apache 2.0 License" [tool.ruff] select = ["E", "F", "W", "Q", "I"] @@ -39,5 +40,3 @@ pytest-asyncio = "^0.23.2" [build-system] requires = ["poetry-core"] build-backend = "poetry.core.masonry.api" - - From c2bfb767f2222f4efc798f3cb9c19c9ab20b9029 Mon Sep 17 00:00:00 2001 From: Alexis Tacnet Date: Thu, 4 Jul 2024 11:17:41 +0200 Subject: [PATCH 059/223] Update versions for release 0.4.2 (#114) --- pyproject.toml | 2 +- src/mistralai/client_base.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index f05224af..4b115d61 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "mistralai" -version = "0.4.1" +version = "0.4.2" description = "" authors = ["Bam4d "] readme = "README.md" diff --git a/src/mistralai/client_base.py b/src/mistralai/client_base.py index 8fe9bcfd..25077772 100644 --- a/src/mistralai/client_base.py +++ b/src/mistralai/client_base.py @@ -15,7 +15,7 @@ ToolChoice, ) -CLIENT_VERSION = "0.4.1" +CLIENT_VERSION = "0.4.2" class ClientBase(ABC): From 55a886301f5216a6b02c47ba6a3f10fa5c7c5f9a Mon Sep 17 00:00:00 2001 From: Jeremy Tuloup Date: Mon, 15 Jul 2024 13:40:43 +0200 Subject: [PATCH 060/223] Fix link to the Discord server --- .github/ISSUE_TEMPLATE/config.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/ISSUE_TEMPLATE/config.yml b/.github/ISSUE_TEMPLATE/config.yml index 42dc4f95..35251c3e 100644 --- a/.github/ISSUE_TEMPLATE/config.yml +++ b/.github/ISSUE_TEMPLATE/config.yml @@ -4,5 +4,5 @@ contact_links: url: https://docs.mistral.ai about: Developer documentation for the Mistral AI platform - name: Discord - url: https://discord.com/invite/mistralai) - about: Chat with the Mistral community \ No newline at end of file + url: https://discord.com/invite/mistralai + about: Chat with the Mistral community From 9d238f88c41689821d7b08570f13b43426f97fd6 Mon Sep 17 00:00:00 2001 From: Alexis Tacnet Date: Thu, 18 Jul 2024 15:26:59 +0200 Subject: [PATCH 061/223] Fix function calling example (#116) --- examples/function_calling.py | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/examples/function_calling.py b/examples/function_calling.py index e6e6f28c..76fb2e18 100644 --- a/examples/function_calling.py +++ b/examples/function_calling.py @@ -20,16 +20,14 @@ def retrieve_payment_status(data: Dict[str, List], transaction_id: str) -> str: for i, r in enumerate(data["transaction_id"]): if r == transaction_id: return json.dumps({"status": data["payment_status"][i]}) - else: - return json.dumps({"status": "Error - transaction id not found"}) + return json.dumps({"status": "Error - transaction id not found"}) def retrieve_payment_date(data: Dict[str, List], transaction_id: str) -> str: for i, r in enumerate(data["transaction_id"]): if r == transaction_id: return json.dumps({"date": data["payment_date"][i]}) - else: - return json.dumps({"status": "Error - transaction id not found"}) + return json.dumps({"status": "Error - transaction id not found"}) names_to_functions = { From 0fb8b64dd5ba97bf6a9248a78b697fb66bd30ebb Mon Sep 17 00:00:00 2001 From: Gaspard Blanchet <26529448+GaspardBT@users.noreply.github.com> Date: Wed, 7 Aug 2024 16:23:56 +0200 Subject: [PATCH 062/223] Release Version 1.0.0 (#124) * prepare for 1.0.0 * change tittle * cleaning * cleaning * add API key setup * add agent example * improve old client message --- .genignore | 2 + .gitattributes | 2 + .github/workflows/build_publish.yaml | 90 -- .../sdk_generation_mistralai_azure_sdk.yaml | 29 + .../sdk_generation_mistralai_gcp_sdk.yaml | 29 + .../sdk_generation_mistralai_sdk.yaml | 29 + .../workflows/sdk_publish_mistralai_sdk.yaml | 20 + .gitignore | 42 +- .speakeasy/gen.lock | 272 +++++ .speakeasy/gen.yaml | 42 + .speakeasy/workflow.lock | 52 + .speakeasy/workflow.yaml | 39 + .vscode/settings.json | 6 + CONTRIBUTING.md | 26 + LICENSE | 2 +- MIGRATION.md | 216 ++++ Makefile | 6 - OLD-README.md | 62 ++ README.md | 666 +++++++++++- USAGE.md | 153 +++ docs/models/agentscompletionrequest.md | 17 + .../models/agentscompletionrequestmessages.md | 23 + docs/models/agentscompletionrequeststop.md | 19 + .../agentscompletionrequesttoolchoice.md | 10 + docs/models/agentscompletionstreamrequest.md | 17 + .../agentscompletionstreamrequeststop.md | 19 + docs/models/archiveftmodelout.md | 10 + docs/models/arguments.md | 17 + docs/models/assistantmessage.md | 11 + docs/models/assistantmessagerole.md | 8 + docs/models/chatcompletionchoice.md | 10 + docs/models/chatcompletionrequest.md | 20 + docs/models/chatcompletionresponse.md | 13 + docs/models/chatcompletionstreamrequest.md | 20 + .../chatcompletionstreamrequestmessages.md | 29 + .../models/chatcompletionstreamrequeststop.md | 19 + .../chatcompletionstreamrequesttoolchoice.md | 10 + docs/models/checkpointout.md | 10 + docs/models/completionchunk.md | 13 + docs/models/completionevent.md | 8 + docs/models/completionresponsestreamchoice.md | 10 + ...pletionresponsestreamchoicefinishreason.md | 11 + docs/models/content.md | 17 + docs/models/contentchunk.md | 9 + docs/models/deletefileout.md | 10 + docs/models/deletemodelout.md | 10 + ...deletemodelv1modelsmodeliddeleterequest.md | 8 + docs/models/deltamessage.md | 10 + docs/models/detailedjobout.md | 26 + docs/models/detailedjoboutstatus.md | 17 + docs/models/embeddingrequest.md | 10 + docs/models/embeddingresponse.md | 12 + docs/models/embeddingresponsedata.md | 10 + docs/models/eventout.md | 10 + docs/models/file.md | 10 + .../models/filesapiroutesdeletefilerequest.md | 8 + .../filesapiroutesretrievefilerequest.md | 8 + ...sapiroutesuploadfilemultipartbodyparams.md | 9 + docs/models/fileschema.md | 16 + docs/models/fimcompletionrequest.md | 17 + docs/models/fimcompletionrequeststop.md | 19 + docs/models/fimcompletionresponse.md | 13 + docs/models/fimcompletionstreamrequest.md | 17 + docs/models/fimcompletionstreamrequeststop.md | 19 + docs/models/finetuneablemodel.md | 14 + docs/models/finishreason.md | 12 + docs/models/ftmodelcapabilitiesout.md | 11 + docs/models/ftmodelout.md | 19 + docs/models/function.md | 10 + docs/models/functioncall.md | 9 + docs/models/githubrepositoryin.md | 13 + docs/models/githubrepositoryout.md | 13 + docs/models/httpvalidationerror.md | 10 + docs/models/inputs.md | 19 + docs/models/jobin.md | 15 + docs/models/jobmetadataout.md | 14 + docs/models/jobout.md | 24 + ...sfinetuningarchivefinetunedmodelrequest.md | 8 + ...tesfinetuningcancelfinetuningjobrequest.md | 8 + ...esfinetuningcreatefinetuningjobresponse.md | 19 + ...routesfinetuninggetfinetuningjobrequest.md | 8 + ...outesfinetuninggetfinetuningjobsrequest.md | 16 + ...utesfinetuningstartfinetuningjobrequest.md | 8 + ...inetuningunarchivefinetunedmodelrequest.md | 8 + ...esfinetuningupdatefinetunedmodelrequest.md | 9 + docs/models/jobsout.md | 10 + docs/models/legacyjobmetadataout.md | 19 + docs/models/listfilesout.md | 9 + docs/models/loc.md | 17 + docs/models/messages.md | 29 + docs/models/metricout.md | 12 + docs/models/modelcapabilities.md | 11 + docs/models/modelcard.md | 19 + docs/models/modellist.md | 9 + docs/models/queryparamstatus.md | 19 + docs/models/responseformat.md | 8 + docs/models/responseformats.md | 11 + docs/models/retrievefileout.md | 16 + .../retrievemodelv1modelsmodelidgetrequest.md | 8 + docs/models/role.md | 8 + docs/models/sampletype.md | 9 + docs/models/security.md | 8 + docs/models/source.md | 9 + docs/models/status.md | 19 + docs/models/stop.md | 19 + docs/models/systemmessage.md | 9 + docs/models/textchunk.md | 9 + docs/models/tool.md | 9 + docs/models/toolcall.md | 10 + docs/models/toolchoice.md | 10 + docs/models/toolmessage.md | 11 + docs/models/toolmessagerole.md | 8 + docs/models/trainingfile.md | 9 + docs/models/trainingparameters.md | 11 + docs/models/trainingparametersin.md | 13 + docs/models/unarchiveftmodelout.md | 10 + docs/models/updateftmodelin.md | 9 + docs/models/uploadfileout.md | 16 + docs/models/usageinfo.md | 10 + docs/models/usermessage.md | 9 + docs/models/usermessagecontent.md | 17 + docs/models/usermessagerole.md | 8 + docs/models/utils/retryconfig.md | 24 + docs/models/validationerror.md | 10 + docs/models/wandbintegration.md | 12 + docs/models/wandbintegrationout.md | 11 + docs/sdks/agents/README.md | 117 +++ docs/sdks/chat/README.md | 128 +++ docs/sdks/embeddings/README.md | 53 + docs/sdks/files/README.md | 179 ++++ docs/sdks/fim/README.md | 112 ++ docs/sdks/finetuning/README.md | 5 + docs/sdks/jobs/README.md | 225 ++++ docs/sdks/mistral/README.md | 9 + docs/sdks/models/README.md | 259 +++++ examples/async_agents_no_streaming.py | 24 + examples/async_chat_no_streaming.py | 12 +- examples/async_chat_with_streaming.py | 19 +- ...completion.py => async_code_completion.py} | 6 +- examples/async_completion.py | 33 - examples/async_embeddings.py | 8 +- examples/async_files.py | 18 +- examples/async_jobs.py | 30 +- examples/async_jobs_chat.py | 35 +- examples/async_list_models.py | 6 +- examples/azure/chat_no_streaming.py.py | 16 + examples/chat_no_streaming.py | 10 +- examples/chat_with_streaming.py | 14 +- examples/chatbot_with_streaming.py | 46 +- examples/code_completion.py | 8 +- examples/completion_with_streaming.py | 9 +- examples/dry_run_job.py | 16 +- examples/embeddings.py | 8 +- examples/files.py | 16 +- examples/function_calling.py | 63 +- examples/gcp/async_chat_no_streaming.py | 24 + examples/jobs.py | 34 +- examples/json_format.py | 14 +- examples/list_models.py | 6 +- packages/mistralai_azure/.genignore | 4 + packages/mistralai_azure/.gitattributes | 2 + packages/mistralai_azure/.gitignore | 8 + packages/mistralai_azure/.speakeasy/gen.lock | 133 +++ packages/mistralai_azure/.speakeasy/gen.yaml | 41 + .../mistralai_azure/.vscode/settings.json | 6 + packages/mistralai_azure/CONTRIBUTING.md | 26 + packages/mistralai_azure/README.md | 430 ++++++++ packages/mistralai_azure/USAGE.md | 55 + .../mistralai_azure/docs/models/arguments.md | 17 + .../docs/models/assistantmessage.md | 11 + .../docs/models/assistantmessagerole.md | 8 + .../docs/models/chatcompletionchoice.md | 10 + .../chatcompletionchoicefinishreason.md | 12 + .../docs/models/chatcompletionrequest.md | 20 + .../models/chatcompletionrequestmessages.md | 29 + .../docs/models/chatcompletionrequeststop.md | 19 + .../models/chatcompletionrequesttoolchoice.md | 10 + .../docs/models/chatcompletionresponse.md | 13 + .../models/chatcompletionstreamrequest.md | 20 + .../docs/models/completionchunk.md | 13 + .../docs/models/completionevent.md | 8 + .../models/completionresponsestreamchoice.md | 10 + .../mistralai_azure/docs/models/content.md | 17 + .../docs/models/contentchunk.md | 9 + .../docs/models/deltamessage.md | 10 + .../docs/models/finishreason.md | 11 + .../mistralai_azure/docs/models/function.md | 10 + .../docs/models/functioncall.md | 9 + .../docs/models/httpvalidationerror.md | 10 + packages/mistralai_azure/docs/models/loc.md | 17 + .../mistralai_azure/docs/models/messages.md | 29 + .../docs/models/responseformat.md | 8 + .../docs/models/responseformats.md | 11 + packages/mistralai_azure/docs/models/role.md | 8 + .../mistralai_azure/docs/models/security.md | 8 + packages/mistralai_azure/docs/models/stop.md | 19 + .../docs/models/systemmessage.md | 9 + .../mistralai_azure/docs/models/textchunk.md | 9 + packages/mistralai_azure/docs/models/tool.md | 9 + .../mistralai_azure/docs/models/toolcall.md | 10 + .../mistralai_azure/docs/models/toolchoice.md | 10 + .../docs/models/toolmessage.md | 11 + .../docs/models/toolmessagerole.md | 8 + .../mistralai_azure/docs/models/usageinfo.md | 10 + .../docs/models/usermessage.md | 9 + .../docs/models/usermessagecontent.md | 17 + .../docs/models/usermessagerole.md | 8 + .../docs/models/utils/retryconfig.md | 24 + .../docs/models/validationerror.md | 10 + .../mistralai_azure/docs/sdks/chat/README.md | 129 +++ .../docs/sdks/mistralazure/README.md | 9 + packages/mistralai_azure/poetry.lock | 638 ++++++++++++ packages/mistralai_azure/poetry.toml | 2 + packages/mistralai_azure/py.typed | 1 + packages/mistralai_azure/pylintrc | 658 ++++++++++++ packages/mistralai_azure/pyproject.toml | 56 + packages/mistralai_azure/scripts/compile.sh | 83 ++ packages/mistralai_azure/scripts/publish.sh | 5 + .../src/mistralai_azure/__init__.py | 5 + .../src/mistralai_azure/_hooks/__init__.py | 5 + .../_hooks/custom_user_agent.py | 16 + .../mistralai_azure/_hooks/registration.py | 15 + .../src/mistralai_azure/_hooks/sdkhooks.py | 57 + .../src/mistralai_azure/_hooks/types.py | 76 ++ .../src/mistralai_azure/basesdk.py | 253 +++++ .../src/mistralai_azure/chat.py | 470 +++++++++ .../src/mistralai_azure/httpclient.py | 78 ++ .../src/mistralai_azure/models/__init__.py | 28 + .../models/assistantmessage.py | 53 + .../models/chatcompletionchoice.py | 22 + .../models/chatcompletionrequest.py | 109 ++ .../models/chatcompletionresponse.py | 27 + .../models/chatcompletionstreamrequest.py | 107 ++ .../mistralai_azure/models/completionchunk.py | 27 + .../mistralai_azure/models/completionevent.py | 15 + .../models/completionresponsestreamchoice.py | 48 + .../mistralai_azure/models/contentchunk.py | 17 + .../mistralai_azure/models/deltamessage.py | 47 + .../src/mistralai_azure/models/function.py | 19 + .../mistralai_azure/models/functioncall.py | 22 + .../models/httpvalidationerror.py | 23 + .../mistralai_azure/models/responseformat.py | 18 + .../src/mistralai_azure/models/sdkerror.py | 22 + .../src/mistralai_azure/models/security.py | 16 + .../mistralai_azure/models/systemmessage.py | 26 + .../src/mistralai_azure/models/textchunk.py | 17 + .../src/mistralai_azure/models/tool.py | 18 + .../src/mistralai_azure/models/toolcall.py | 20 + .../src/mistralai_azure/models/toolmessage.py | 50 + .../src/mistralai_azure/models/usageinfo.py | 18 + .../src/mistralai_azure/models/usermessage.py | 26 + .../mistralai_azure/models/validationerror.py | 24 + .../src/mistralai_azure/py.typed | 1 + .../src/mistralai_azure/sdk.py | 107 ++ .../src/mistralai_azure/sdkconfiguration.py | 54 + .../src/mistralai_azure/types/__init__.py | 21 + .../src/mistralai_azure/types/basemodel.py | 39 + .../src/mistralai_azure/utils/__init__.py | 84 ++ .../src/mistralai_azure/utils/annotations.py | 19 + .../src/mistralai_azure/utils/enums.py | 34 + .../mistralai_azure/utils/eventstreaming.py | 178 ++++ .../src/mistralai_azure/utils/forms.py | 207 ++++ .../src/mistralai_azure/utils/headers.py | 136 +++ .../src/mistralai_azure/utils/logger.py | 16 + .../src/mistralai_azure/utils/metadata.py | 118 +++ .../src/mistralai_azure/utils/queryparams.py | 203 ++++ .../mistralai_azure/utils/requestbodies.py | 66 ++ .../src/mistralai_azure/utils/retries.py | 216 ++++ .../src/mistralai_azure/utils/security.py | 168 +++ .../src/mistralai_azure/utils/serializers.py | 181 ++++ .../src/mistralai_azure/utils/url.py | 150 +++ .../src/mistralai_azure/utils/values.py | 128 +++ packages/mistralai_gcp/.genignore | 4 + packages/mistralai_gcp/.gitattributes | 2 + packages/mistralai_gcp/.gitignore | 8 + packages/mistralai_gcp/.speakeasy/gen.lock | 142 +++ packages/mistralai_gcp/.speakeasy/gen.yaml | 44 + packages/mistralai_gcp/.vscode/settings.json | 6 + packages/mistralai_gcp/CONTRIBUTING.md | 26 + packages/mistralai_gcp/README.md | 425 ++++++++ packages/mistralai_gcp/USAGE.md | 51 + .../mistralai_gcp/docs/models/arguments.md | 17 + .../docs/models/assistantmessage.md | 11 + .../docs/models/assistantmessagerole.md | 8 + .../docs/models/chatcompletionchoice.md | 10 + .../chatcompletionchoicefinishreason.md | 12 + .../docs/models/chatcompletionrequest.md | 19 + .../models/chatcompletionrequestmessages.md | 29 + .../docs/models/chatcompletionrequeststop.md | 19 + .../models/chatcompletionrequesttoolchoice.md | 10 + .../docs/models/chatcompletionresponse.md | 13 + .../models/chatcompletionstreamrequest.md | 19 + .../docs/models/completionchunk.md | 13 + .../docs/models/completionevent.md | 8 + .../models/completionresponsestreamchoice.md | 10 + packages/mistralai_gcp/docs/models/content.md | 17 + .../mistralai_gcp/docs/models/contentchunk.md | 9 + .../mistralai_gcp/docs/models/deltamessage.md | 10 + .../docs/models/fimcompletionrequest.md | 17 + .../docs/models/fimcompletionrequeststop.md | 19 + .../docs/models/fimcompletionresponse.md | 13 + .../docs/models/fimcompletionstreamrequest.md | 17 + .../models/fimcompletionstreamrequeststop.md | 19 + .../mistralai_gcp/docs/models/finishreason.md | 11 + .../mistralai_gcp/docs/models/function.md | 10 + .../mistralai_gcp/docs/models/functioncall.md | 9 + .../docs/models/httpvalidationerror.md | 10 + packages/mistralai_gcp/docs/models/loc.md | 17 + .../mistralai_gcp/docs/models/messages.md | 29 + .../docs/models/responseformat.md | 8 + .../docs/models/responseformats.md | 11 + packages/mistralai_gcp/docs/models/role.md | 8 + .../mistralai_gcp/docs/models/security.md | 8 + packages/mistralai_gcp/docs/models/stop.md | 19 + .../docs/models/systemmessage.md | 9 + .../mistralai_gcp/docs/models/textchunk.md | 9 + packages/mistralai_gcp/docs/models/tool.md | 9 + .../mistralai_gcp/docs/models/toolcall.md | 10 + .../mistralai_gcp/docs/models/toolchoice.md | 10 + .../mistralai_gcp/docs/models/toolmessage.md | 11 + .../docs/models/toolmessagerole.md | 8 + .../mistralai_gcp/docs/models/usageinfo.md | 10 + .../mistralai_gcp/docs/models/usermessage.md | 9 + .../docs/models/usermessagecontent.md | 17 + .../docs/models/usermessagerole.md | 8 + .../docs/models/utils/retryconfig.md | 24 + .../docs/models/validationerror.md | 10 + .../mistralai_gcp/docs/sdks/chat/README.md | 121 +++ .../mistralai_gcp/docs/sdks/fim/README.md | 107 ++ .../docs/sdks/mistralgcp/README.md | 9 + packages/mistralai_gcp/poetry.lock | 848 +++++++++++++++ packages/mistralai_gcp/poetry.toml | 2 + packages/mistralai_gcp/py.typed | 1 + packages/mistralai_gcp/pylintrc | 658 ++++++++++++ packages/mistralai_gcp/pyproject.toml | 58 ++ packages/mistralai_gcp/scripts/compile.sh | 83 ++ packages/mistralai_gcp/scripts/publish.sh | 5 + .../src/mistralai_gcp/__init__.py | 5 + .../src/mistralai_gcp/_hooks/__init__.py | 5 + .../mistralai_gcp/_hooks/custom_user_agent.py | 16 + .../src/mistralai_gcp/_hooks/registration.py | 15 + .../src/mistralai_gcp/_hooks/sdkhooks.py | 57 + .../src/mistralai_gcp/_hooks/types.py | 76 ++ .../src/mistralai_gcp/basesdk.py | 253 +++++ .../mistralai_gcp/src/mistralai_gcp/chat.py | 458 +++++++++ .../mistralai_gcp/src/mistralai_gcp/fim.py | 438 ++++++++ .../src/mistralai_gcp/httpclient.py | 78 ++ .../src/mistralai_gcp/models/__init__.py | 31 + .../mistralai_gcp/models/assistantmessage.py | 53 + .../models/chatcompletionchoice.py | 22 + .../models/chatcompletionrequest.py | 105 ++ .../models/chatcompletionresponse.py | 27 + .../models/chatcompletionstreamrequest.py | 103 ++ .../mistralai_gcp/models/completionchunk.py | 27 + .../mistralai_gcp/models/completionevent.py | 15 + .../models/completionresponsestreamchoice.py | 48 + .../src/mistralai_gcp/models/contentchunk.py | 17 + .../src/mistralai_gcp/models/deltamessage.py | 47 + .../models/fimcompletionrequest.py | 94 ++ .../models/fimcompletionresponse.py | 27 + .../models/fimcompletionstreamrequest.py | 92 ++ .../src/mistralai_gcp/models/function.py | 19 + .../src/mistralai_gcp/models/functioncall.py | 22 + .../models/httpvalidationerror.py | 23 + .../mistralai_gcp/models/responseformat.py | 18 + .../src/mistralai_gcp/models/sdkerror.py | 22 + .../src/mistralai_gcp/models/security.py | 16 + .../src/mistralai_gcp/models/systemmessage.py | 26 + .../src/mistralai_gcp/models/textchunk.py | 17 + .../src/mistralai_gcp/models/tool.py | 18 + .../src/mistralai_gcp/models/toolcall.py | 20 + .../src/mistralai_gcp/models/toolmessage.py | 50 + .../src/mistralai_gcp/models/usageinfo.py | 18 + .../src/mistralai_gcp/models/usermessage.py | 26 + .../mistralai_gcp/models/validationerror.py | 24 + .../mistralai_gcp/src/mistralai_gcp/py.typed | 1 + .../mistralai_gcp/src/mistralai_gcp/sdk.py | 174 ++++ .../src/mistralai_gcp/sdkconfiguration.py | 54 + .../src/mistralai_gcp/types/__init__.py | 21 + .../src/mistralai_gcp/types/basemodel.py | 39 + .../src/mistralai_gcp/utils/__init__.py | 84 ++ .../src/mistralai_gcp/utils/annotations.py | 19 + .../src/mistralai_gcp/utils/enums.py | 34 + .../src/mistralai_gcp/utils/eventstreaming.py | 178 ++++ .../src/mistralai_gcp/utils/forms.py | 207 ++++ .../src/mistralai_gcp/utils/headers.py | 136 +++ .../src/mistralai_gcp/utils/logger.py | 16 + .../src/mistralai_gcp/utils/metadata.py | 118 +++ .../src/mistralai_gcp/utils/queryparams.py | 203 ++++ .../src/mistralai_gcp/utils/requestbodies.py | 66 ++ .../src/mistralai_gcp/utils/retries.py | 216 ++++ .../src/mistralai_gcp/utils/security.py | 168 +++ .../src/mistralai_gcp/utils/serializers.py | 181 ++++ .../src/mistralai_gcp/utils/url.py | 150 +++ .../src/mistralai_gcp/utils/values.py | 128 +++ poetry.lock | 778 +++++++++----- poetry.toml | 2 + py.typed | 1 + pylintrc | 658 ++++++++++++ pyproject.toml | 71 +- scripts/compile.sh | 83 ++ scripts/publish.sh | 5 + src/mistralai/__init__.py | 5 + src/mistralai/_hooks/__init__.py | 5 + src/mistralai/_hooks/custom_user_agent.py | 16 + src/mistralai/_hooks/deprecation_warning.py | 26 + src/mistralai/_hooks/registration.py | 17 + src/mistralai/_hooks/sdkhooks.py | 57 + src/mistralai/_hooks/types.py | 76 ++ src/mistralai/agents.py | 434 ++++++++ src/mistralai/async_client.py | 418 +------- src/mistralai/basesdk.py | 253 +++++ src/mistralai/chat.py | 470 +++++++++ src/mistralai/client.py | 419 +------- src/mistralai/client_base.py | 211 ---- src/mistralai/constants.py | 5 - src/mistralai/embeddings.py | 182 ++++ src/mistralai/exceptions.py | 54 - src/mistralai/files.py | 684 ++++++++++-- src/mistralai/fim.py | 438 ++++++++ src/mistralai/fine_tuning.py | 16 + src/mistralai/httpclient.py | 78 ++ src/mistralai/jobs.py | 972 +++++++++++++++--- src/mistralai/models/__init__.py | 82 ++ .../models/agentscompletionrequest.py | 96 ++ .../models/agentscompletionstreamrequest.py | 92 ++ src/mistralai/models/archiveftmodelout.py | 19 + src/mistralai/models/assistantmessage.py | 53 + src/mistralai/models/chat_completion.py | 93 -- src/mistralai/models/chatcompletionchoice.py | 22 + src/mistralai/models/chatcompletionrequest.py | 109 ++ .../models/chatcompletionresponse.py | 27 + .../models/chatcompletionstreamrequest.py | 107 ++ src/mistralai/models/checkpointout.py | 25 + src/mistralai/models/common.py | 9 - src/mistralai/models/completionchunk.py | 27 + src/mistralai/models/completionevent.py | 15 + .../models/completionresponsestreamchoice.py | 48 + src/mistralai/models/contentchunk.py | 17 + ...elete_model_v1_models_model_id_deleteop.py | 18 + src/mistralai/models/deletefileout.py | 24 + src/mistralai/models/deletemodelout.py | 25 + src/mistralai/models/deltamessage.py | 47 + src/mistralai/models/detailedjobout.py | 91 ++ src/mistralai/models/embeddingrequest.py | 61 ++ src/mistralai/models/embeddingresponse.py | 24 + src/mistralai/models/embeddingresponsedata.py | 19 + src/mistralai/models/embeddings.py | 19 - src/mistralai/models/eventout.py | 50 + src/mistralai/models/files.py | 23 - .../models/files_api_routes_delete_fileop.py | 16 + .../files_api_routes_retrieve_fileop.py | 16 + .../models/files_api_routes_upload_fileop.py | 51 + src/mistralai/models/fileschema.py | 71 ++ src/mistralai/models/fimcompletionrequest.py | 94 ++ src/mistralai/models/fimcompletionresponse.py | 27 + .../models/fimcompletionstreamrequest.py | 92 ++ src/mistralai/models/finetuneablemodel.py | 8 + .../models/ftmodelcapabilitiesout.py | 21 + src/mistralai/models/ftmodelout.py | 65 ++ src/mistralai/models/function.py | 19 + src/mistralai/models/functioncall.py | 22 + src/mistralai/models/githubrepositoryin.py | 52 + src/mistralai/models/githubrepositoryout.py | 52 + src/mistralai/models/httpvalidationerror.py | 23 + src/mistralai/models/jobin.py | 73 ++ src/mistralai/models/jobmetadataout.py | 54 + src/mistralai/models/jobout.py | 107 ++ src/mistralai/models/jobs.py | 100 -- ..._fine_tuning_archive_fine_tuned_modelop.py | 18 + ...es_fine_tuning_cancel_fine_tuning_jobop.py | 18 + ...es_fine_tuning_create_fine_tuning_jobop.py | 15 + ...outes_fine_tuning_get_fine_tuning_jobop.py | 18 + ...utes_fine_tuning_get_fine_tuning_jobsop.py | 81 ++ ...tes_fine_tuning_start_fine_tuning_jobop.py | 16 + ...ine_tuning_unarchive_fine_tuned_modelop.py | 18 + ...s_fine_tuning_update_fine_tuned_modelop.py | 21 + src/mistralai/models/jobsout.py | 20 + src/mistralai/models/legacyjobmetadataout.py | 80 ++ src/mistralai/models/listfilesout.py | 17 + src/mistralai/models/metricout.py | 50 + src/mistralai/models/modelcapabilities.py | 21 + src/mistralai/models/modelcard.py | 66 ++ src/mistralai/models/modellist.py | 18 + src/mistralai/models/models.py | 39 - src/mistralai/models/responseformat.py | 18 + ...retrieve_model_v1_models_model_id_getop.py | 18 + src/mistralai/models/retrievefileout.py | 71 ++ src/mistralai/models/sampletype.py | 7 + src/mistralai/models/sdkerror.py | 22 + src/mistralai/models/security.py | 16 + src/mistralai/models/source.py | 7 + src/mistralai/models/systemmessage.py | 26 + src/mistralai/models/textchunk.py | 17 + src/mistralai/models/tool.py | 18 + src/mistralai/models/toolcall.py | 20 + src/mistralai/models/toolmessage.py | 50 + src/mistralai/models/trainingfile.py | 17 + src/mistralai/models/trainingparameters.py | 48 + src/mistralai/models/trainingparametersin.py | 56 + src/mistralai/models/unarchiveftmodelout.py | 19 + src/mistralai/models/updateftmodelin.py | 44 + src/mistralai/models/uploadfileout.py | 71 ++ src/mistralai/models/usageinfo.py | 18 + src/mistralai/models/usermessage.py | 26 + src/mistralai/models/validationerror.py | 24 + src/mistralai/models/wandbintegration.py | 56 + src/mistralai/models/wandbintegrationout.py | 52 + src/mistralai/models_.py | 928 +++++++++++++++++ src/mistralai/py.typed | 1 + src/mistralai/sdk.py | 119 +++ src/mistralai/sdkconfiguration.py | 54 + src/mistralai/types/__init__.py | 21 + src/mistralai/types/basemodel.py | 39 + src/mistralai/utils/__init__.py | 86 ++ src/mistralai/utils/annotations.py | 19 + src/mistralai/utils/enums.py | 34 + src/mistralai/utils/eventstreaming.py | 178 ++++ src/mistralai/utils/forms.py | 207 ++++ src/mistralai/utils/headers.py | 136 +++ src/mistralai/utils/logger.py | 16 + src/mistralai/utils/metadata.py | 118 +++ src/mistralai/utils/queryparams.py | 203 ++++ src/mistralai/utils/requestbodies.py | 66 ++ src/mistralai/utils/retries.py | 216 ++++ src/mistralai/utils/security.py | 185 ++++ src/mistralai/utils/serializers.py | 181 ++++ src/mistralai/utils/url.py | 150 +++ src/mistralai/utils/values.py | 128 +++ tests/__init__.py | 0 tests/conftest.py | 19 - tests/test_chat.py | 149 --- tests/test_chat_async.py | 157 --- tests/test_completion.py | 99 -- tests/test_delete_model.py | 26 - tests/test_delete_model_async.py | 28 - tests/test_embedder.py | 66 -- tests/test_embedder_async.py | 69 -- tests/test_files.py | 105 -- tests/test_files_async.py | 110 -- tests/test_jobs.py | 128 --- tests/test_jobs_async.py | 133 --- tests/test_list_models.py | 30 - tests/test_list_models_async.py | 32 - tests/utils.py | 335 ------ 545 files changed, 30004 insertions(+), 3720 deletions(-) create mode 100644 .genignore create mode 100644 .gitattributes delete mode 100644 .github/workflows/build_publish.yaml create mode 100644 .github/workflows/sdk_generation_mistralai_azure_sdk.yaml create mode 100644 .github/workflows/sdk_generation_mistralai_gcp_sdk.yaml create mode 100644 .github/workflows/sdk_generation_mistralai_sdk.yaml create mode 100644 .github/workflows/sdk_publish_mistralai_sdk.yaml create mode 100644 .speakeasy/gen.lock create mode 100644 .speakeasy/gen.yaml create mode 100644 .speakeasy/workflow.lock create mode 100644 .speakeasy/workflow.yaml create mode 100644 .vscode/settings.json create mode 100644 CONTRIBUTING.md create mode 100644 MIGRATION.md delete mode 100644 Makefile create mode 100644 OLD-README.md create mode 100644 USAGE.md create mode 100644 docs/models/agentscompletionrequest.md create mode 100644 docs/models/agentscompletionrequestmessages.md create mode 100644 docs/models/agentscompletionrequeststop.md create mode 100644 docs/models/agentscompletionrequesttoolchoice.md create mode 100644 docs/models/agentscompletionstreamrequest.md create mode 100644 docs/models/agentscompletionstreamrequeststop.md create mode 100644 docs/models/archiveftmodelout.md create mode 100644 docs/models/arguments.md create mode 100644 docs/models/assistantmessage.md create mode 100644 docs/models/assistantmessagerole.md create mode 100644 docs/models/chatcompletionchoice.md create mode 100644 docs/models/chatcompletionrequest.md create mode 100644 docs/models/chatcompletionresponse.md create mode 100644 docs/models/chatcompletionstreamrequest.md create mode 100644 docs/models/chatcompletionstreamrequestmessages.md create mode 100644 docs/models/chatcompletionstreamrequeststop.md create mode 100644 docs/models/chatcompletionstreamrequesttoolchoice.md create mode 100644 docs/models/checkpointout.md create mode 100644 docs/models/completionchunk.md create mode 100644 docs/models/completionevent.md create mode 100644 docs/models/completionresponsestreamchoice.md create mode 100644 docs/models/completionresponsestreamchoicefinishreason.md create mode 100644 docs/models/content.md create mode 100644 docs/models/contentchunk.md create mode 100644 docs/models/deletefileout.md create mode 100644 docs/models/deletemodelout.md create mode 100644 docs/models/deletemodelv1modelsmodeliddeleterequest.md create mode 100644 docs/models/deltamessage.md create mode 100644 docs/models/detailedjobout.md create mode 100644 docs/models/detailedjoboutstatus.md create mode 100644 docs/models/embeddingrequest.md create mode 100644 docs/models/embeddingresponse.md create mode 100644 docs/models/embeddingresponsedata.md create mode 100644 docs/models/eventout.md create mode 100644 docs/models/file.md create mode 100644 docs/models/filesapiroutesdeletefilerequest.md create mode 100644 docs/models/filesapiroutesretrievefilerequest.md create mode 100644 docs/models/filesapiroutesuploadfilemultipartbodyparams.md create mode 100644 docs/models/fileschema.md create mode 100644 docs/models/fimcompletionrequest.md create mode 100644 docs/models/fimcompletionrequeststop.md create mode 100644 docs/models/fimcompletionresponse.md create mode 100644 docs/models/fimcompletionstreamrequest.md create mode 100644 docs/models/fimcompletionstreamrequeststop.md create mode 100644 docs/models/finetuneablemodel.md create mode 100644 docs/models/finishreason.md create mode 100644 docs/models/ftmodelcapabilitiesout.md create mode 100644 docs/models/ftmodelout.md create mode 100644 docs/models/function.md create mode 100644 docs/models/functioncall.md create mode 100644 docs/models/githubrepositoryin.md create mode 100644 docs/models/githubrepositoryout.md create mode 100644 docs/models/httpvalidationerror.md create mode 100644 docs/models/inputs.md create mode 100644 docs/models/jobin.md create mode 100644 docs/models/jobmetadataout.md create mode 100644 docs/models/jobout.md create mode 100644 docs/models/jobsapiroutesfinetuningarchivefinetunedmodelrequest.md create mode 100644 docs/models/jobsapiroutesfinetuningcancelfinetuningjobrequest.md create mode 100644 docs/models/jobsapiroutesfinetuningcreatefinetuningjobresponse.md create mode 100644 docs/models/jobsapiroutesfinetuninggetfinetuningjobrequest.md create mode 100644 docs/models/jobsapiroutesfinetuninggetfinetuningjobsrequest.md create mode 100644 docs/models/jobsapiroutesfinetuningstartfinetuningjobrequest.md create mode 100644 docs/models/jobsapiroutesfinetuningunarchivefinetunedmodelrequest.md create mode 100644 docs/models/jobsapiroutesfinetuningupdatefinetunedmodelrequest.md create mode 100644 docs/models/jobsout.md create mode 100644 docs/models/legacyjobmetadataout.md create mode 100644 docs/models/listfilesout.md create mode 100644 docs/models/loc.md create mode 100644 docs/models/messages.md create mode 100644 docs/models/metricout.md create mode 100644 docs/models/modelcapabilities.md create mode 100644 docs/models/modelcard.md create mode 100644 docs/models/modellist.md create mode 100644 docs/models/queryparamstatus.md create mode 100644 docs/models/responseformat.md create mode 100644 docs/models/responseformats.md create mode 100644 docs/models/retrievefileout.md create mode 100644 docs/models/retrievemodelv1modelsmodelidgetrequest.md create mode 100644 docs/models/role.md create mode 100644 docs/models/sampletype.md create mode 100644 docs/models/security.md create mode 100644 docs/models/source.md create mode 100644 docs/models/status.md create mode 100644 docs/models/stop.md create mode 100644 docs/models/systemmessage.md create mode 100644 docs/models/textchunk.md create mode 100644 docs/models/tool.md create mode 100644 docs/models/toolcall.md create mode 100644 docs/models/toolchoice.md create mode 100644 docs/models/toolmessage.md create mode 100644 docs/models/toolmessagerole.md create mode 100644 docs/models/trainingfile.md create mode 100644 docs/models/trainingparameters.md create mode 100644 docs/models/trainingparametersin.md create mode 100644 docs/models/unarchiveftmodelout.md create mode 100644 docs/models/updateftmodelin.md create mode 100644 docs/models/uploadfileout.md create mode 100644 docs/models/usageinfo.md create mode 100644 docs/models/usermessage.md create mode 100644 docs/models/usermessagecontent.md create mode 100644 docs/models/usermessagerole.md create mode 100644 docs/models/utils/retryconfig.md create mode 100644 docs/models/validationerror.md create mode 100644 docs/models/wandbintegration.md create mode 100644 docs/models/wandbintegrationout.md create mode 100644 docs/sdks/agents/README.md create mode 100644 docs/sdks/chat/README.md create mode 100644 docs/sdks/embeddings/README.md create mode 100644 docs/sdks/files/README.md create mode 100644 docs/sdks/fim/README.md create mode 100644 docs/sdks/finetuning/README.md create mode 100644 docs/sdks/jobs/README.md create mode 100644 docs/sdks/mistral/README.md create mode 100644 docs/sdks/models/README.md create mode 100755 examples/async_agents_no_streaming.py rename examples/{completion.py => async_code_completion.py} (78%) delete mode 100644 examples/async_completion.py create mode 100644 examples/azure/chat_no_streaming.py.py create mode 100755 examples/gcp/async_chat_no_streaming.py create mode 100644 packages/mistralai_azure/.genignore create mode 100644 packages/mistralai_azure/.gitattributes create mode 100644 packages/mistralai_azure/.gitignore create mode 100644 packages/mistralai_azure/.speakeasy/gen.lock create mode 100644 packages/mistralai_azure/.speakeasy/gen.yaml create mode 100644 packages/mistralai_azure/.vscode/settings.json create mode 100644 packages/mistralai_azure/CONTRIBUTING.md create mode 100644 packages/mistralai_azure/README.md create mode 100644 packages/mistralai_azure/USAGE.md create mode 100644 packages/mistralai_azure/docs/models/arguments.md create mode 100644 packages/mistralai_azure/docs/models/assistantmessage.md create mode 100644 packages/mistralai_azure/docs/models/assistantmessagerole.md create mode 100644 packages/mistralai_azure/docs/models/chatcompletionchoice.md create mode 100644 packages/mistralai_azure/docs/models/chatcompletionchoicefinishreason.md create mode 100644 packages/mistralai_azure/docs/models/chatcompletionrequest.md create mode 100644 packages/mistralai_azure/docs/models/chatcompletionrequestmessages.md create mode 100644 packages/mistralai_azure/docs/models/chatcompletionrequeststop.md create mode 100644 packages/mistralai_azure/docs/models/chatcompletionrequesttoolchoice.md create mode 100644 packages/mistralai_azure/docs/models/chatcompletionresponse.md create mode 100644 packages/mistralai_azure/docs/models/chatcompletionstreamrequest.md create mode 100644 packages/mistralai_azure/docs/models/completionchunk.md create mode 100644 packages/mistralai_azure/docs/models/completionevent.md create mode 100644 packages/mistralai_azure/docs/models/completionresponsestreamchoice.md create mode 100644 packages/mistralai_azure/docs/models/content.md create mode 100644 packages/mistralai_azure/docs/models/contentchunk.md create mode 100644 packages/mistralai_azure/docs/models/deltamessage.md create mode 100644 packages/mistralai_azure/docs/models/finishreason.md create mode 100644 packages/mistralai_azure/docs/models/function.md create mode 100644 packages/mistralai_azure/docs/models/functioncall.md create mode 100644 packages/mistralai_azure/docs/models/httpvalidationerror.md create mode 100644 packages/mistralai_azure/docs/models/loc.md create mode 100644 packages/mistralai_azure/docs/models/messages.md create mode 100644 packages/mistralai_azure/docs/models/responseformat.md create mode 100644 packages/mistralai_azure/docs/models/responseformats.md create mode 100644 packages/mistralai_azure/docs/models/role.md create mode 100644 packages/mistralai_azure/docs/models/security.md create mode 100644 packages/mistralai_azure/docs/models/stop.md create mode 100644 packages/mistralai_azure/docs/models/systemmessage.md create mode 100644 packages/mistralai_azure/docs/models/textchunk.md create mode 100644 packages/mistralai_azure/docs/models/tool.md create mode 100644 packages/mistralai_azure/docs/models/toolcall.md create mode 100644 packages/mistralai_azure/docs/models/toolchoice.md create mode 100644 packages/mistralai_azure/docs/models/toolmessage.md create mode 100644 packages/mistralai_azure/docs/models/toolmessagerole.md create mode 100644 packages/mistralai_azure/docs/models/usageinfo.md create mode 100644 packages/mistralai_azure/docs/models/usermessage.md create mode 100644 packages/mistralai_azure/docs/models/usermessagecontent.md create mode 100644 packages/mistralai_azure/docs/models/usermessagerole.md create mode 100644 packages/mistralai_azure/docs/models/utils/retryconfig.md create mode 100644 packages/mistralai_azure/docs/models/validationerror.md create mode 100644 packages/mistralai_azure/docs/sdks/chat/README.md create mode 100644 packages/mistralai_azure/docs/sdks/mistralazure/README.md create mode 100644 packages/mistralai_azure/poetry.lock create mode 100644 packages/mistralai_azure/poetry.toml create mode 100644 packages/mistralai_azure/py.typed create mode 100644 packages/mistralai_azure/pylintrc create mode 100644 packages/mistralai_azure/pyproject.toml create mode 100755 packages/mistralai_azure/scripts/compile.sh create mode 100755 packages/mistralai_azure/scripts/publish.sh create mode 100644 packages/mistralai_azure/src/mistralai_azure/__init__.py create mode 100644 packages/mistralai_azure/src/mistralai_azure/_hooks/__init__.py create mode 100644 packages/mistralai_azure/src/mistralai_azure/_hooks/custom_user_agent.py create mode 100644 packages/mistralai_azure/src/mistralai_azure/_hooks/registration.py create mode 100644 packages/mistralai_azure/src/mistralai_azure/_hooks/sdkhooks.py create mode 100644 packages/mistralai_azure/src/mistralai_azure/_hooks/types.py create mode 100644 packages/mistralai_azure/src/mistralai_azure/basesdk.py create mode 100644 packages/mistralai_azure/src/mistralai_azure/chat.py create mode 100644 packages/mistralai_azure/src/mistralai_azure/httpclient.py create mode 100644 packages/mistralai_azure/src/mistralai_azure/models/__init__.py create mode 100644 packages/mistralai_azure/src/mistralai_azure/models/assistantmessage.py create mode 100644 packages/mistralai_azure/src/mistralai_azure/models/chatcompletionchoice.py create mode 100644 packages/mistralai_azure/src/mistralai_azure/models/chatcompletionrequest.py create mode 100644 packages/mistralai_azure/src/mistralai_azure/models/chatcompletionresponse.py create mode 100644 packages/mistralai_azure/src/mistralai_azure/models/chatcompletionstreamrequest.py create mode 100644 packages/mistralai_azure/src/mistralai_azure/models/completionchunk.py create mode 100644 packages/mistralai_azure/src/mistralai_azure/models/completionevent.py create mode 100644 packages/mistralai_azure/src/mistralai_azure/models/completionresponsestreamchoice.py create mode 100644 packages/mistralai_azure/src/mistralai_azure/models/contentchunk.py create mode 100644 packages/mistralai_azure/src/mistralai_azure/models/deltamessage.py create mode 100644 packages/mistralai_azure/src/mistralai_azure/models/function.py create mode 100644 packages/mistralai_azure/src/mistralai_azure/models/functioncall.py create mode 100644 packages/mistralai_azure/src/mistralai_azure/models/httpvalidationerror.py create mode 100644 packages/mistralai_azure/src/mistralai_azure/models/responseformat.py create mode 100644 packages/mistralai_azure/src/mistralai_azure/models/sdkerror.py create mode 100644 packages/mistralai_azure/src/mistralai_azure/models/security.py create mode 100644 packages/mistralai_azure/src/mistralai_azure/models/systemmessage.py create mode 100644 packages/mistralai_azure/src/mistralai_azure/models/textchunk.py create mode 100644 packages/mistralai_azure/src/mistralai_azure/models/tool.py create mode 100644 packages/mistralai_azure/src/mistralai_azure/models/toolcall.py create mode 100644 packages/mistralai_azure/src/mistralai_azure/models/toolmessage.py create mode 100644 packages/mistralai_azure/src/mistralai_azure/models/usageinfo.py create mode 100644 packages/mistralai_azure/src/mistralai_azure/models/usermessage.py create mode 100644 packages/mistralai_azure/src/mistralai_azure/models/validationerror.py create mode 100644 packages/mistralai_azure/src/mistralai_azure/py.typed create mode 100644 packages/mistralai_azure/src/mistralai_azure/sdk.py create mode 100644 packages/mistralai_azure/src/mistralai_azure/sdkconfiguration.py create mode 100644 packages/mistralai_azure/src/mistralai_azure/types/__init__.py create mode 100644 packages/mistralai_azure/src/mistralai_azure/types/basemodel.py create mode 100644 packages/mistralai_azure/src/mistralai_azure/utils/__init__.py create mode 100644 packages/mistralai_azure/src/mistralai_azure/utils/annotations.py create mode 100644 packages/mistralai_azure/src/mistralai_azure/utils/enums.py create mode 100644 packages/mistralai_azure/src/mistralai_azure/utils/eventstreaming.py create mode 100644 packages/mistralai_azure/src/mistralai_azure/utils/forms.py create mode 100644 packages/mistralai_azure/src/mistralai_azure/utils/headers.py create mode 100644 packages/mistralai_azure/src/mistralai_azure/utils/logger.py create mode 100644 packages/mistralai_azure/src/mistralai_azure/utils/metadata.py create mode 100644 packages/mistralai_azure/src/mistralai_azure/utils/queryparams.py create mode 100644 packages/mistralai_azure/src/mistralai_azure/utils/requestbodies.py create mode 100644 packages/mistralai_azure/src/mistralai_azure/utils/retries.py create mode 100644 packages/mistralai_azure/src/mistralai_azure/utils/security.py create mode 100644 packages/mistralai_azure/src/mistralai_azure/utils/serializers.py create mode 100644 packages/mistralai_azure/src/mistralai_azure/utils/url.py create mode 100644 packages/mistralai_azure/src/mistralai_azure/utils/values.py create mode 100644 packages/mistralai_gcp/.genignore create mode 100644 packages/mistralai_gcp/.gitattributes create mode 100644 packages/mistralai_gcp/.gitignore create mode 100644 packages/mistralai_gcp/.speakeasy/gen.lock create mode 100644 packages/mistralai_gcp/.speakeasy/gen.yaml create mode 100644 packages/mistralai_gcp/.vscode/settings.json create mode 100644 packages/mistralai_gcp/CONTRIBUTING.md create mode 100644 packages/mistralai_gcp/README.md create mode 100644 packages/mistralai_gcp/USAGE.md create mode 100644 packages/mistralai_gcp/docs/models/arguments.md create mode 100644 packages/mistralai_gcp/docs/models/assistantmessage.md create mode 100644 packages/mistralai_gcp/docs/models/assistantmessagerole.md create mode 100644 packages/mistralai_gcp/docs/models/chatcompletionchoice.md create mode 100644 packages/mistralai_gcp/docs/models/chatcompletionchoicefinishreason.md create mode 100644 packages/mistralai_gcp/docs/models/chatcompletionrequest.md create mode 100644 packages/mistralai_gcp/docs/models/chatcompletionrequestmessages.md create mode 100644 packages/mistralai_gcp/docs/models/chatcompletionrequeststop.md create mode 100644 packages/mistralai_gcp/docs/models/chatcompletionrequesttoolchoice.md create mode 100644 packages/mistralai_gcp/docs/models/chatcompletionresponse.md create mode 100644 packages/mistralai_gcp/docs/models/chatcompletionstreamrequest.md create mode 100644 packages/mistralai_gcp/docs/models/completionchunk.md create mode 100644 packages/mistralai_gcp/docs/models/completionevent.md create mode 100644 packages/mistralai_gcp/docs/models/completionresponsestreamchoice.md create mode 100644 packages/mistralai_gcp/docs/models/content.md create mode 100644 packages/mistralai_gcp/docs/models/contentchunk.md create mode 100644 packages/mistralai_gcp/docs/models/deltamessage.md create mode 100644 packages/mistralai_gcp/docs/models/fimcompletionrequest.md create mode 100644 packages/mistralai_gcp/docs/models/fimcompletionrequeststop.md create mode 100644 packages/mistralai_gcp/docs/models/fimcompletionresponse.md create mode 100644 packages/mistralai_gcp/docs/models/fimcompletionstreamrequest.md create mode 100644 packages/mistralai_gcp/docs/models/fimcompletionstreamrequeststop.md create mode 100644 packages/mistralai_gcp/docs/models/finishreason.md create mode 100644 packages/mistralai_gcp/docs/models/function.md create mode 100644 packages/mistralai_gcp/docs/models/functioncall.md create mode 100644 packages/mistralai_gcp/docs/models/httpvalidationerror.md create mode 100644 packages/mistralai_gcp/docs/models/loc.md create mode 100644 packages/mistralai_gcp/docs/models/messages.md create mode 100644 packages/mistralai_gcp/docs/models/responseformat.md create mode 100644 packages/mistralai_gcp/docs/models/responseformats.md create mode 100644 packages/mistralai_gcp/docs/models/role.md create mode 100644 packages/mistralai_gcp/docs/models/security.md create mode 100644 packages/mistralai_gcp/docs/models/stop.md create mode 100644 packages/mistralai_gcp/docs/models/systemmessage.md create mode 100644 packages/mistralai_gcp/docs/models/textchunk.md create mode 100644 packages/mistralai_gcp/docs/models/tool.md create mode 100644 packages/mistralai_gcp/docs/models/toolcall.md create mode 100644 packages/mistralai_gcp/docs/models/toolchoice.md create mode 100644 packages/mistralai_gcp/docs/models/toolmessage.md create mode 100644 packages/mistralai_gcp/docs/models/toolmessagerole.md create mode 100644 packages/mistralai_gcp/docs/models/usageinfo.md create mode 100644 packages/mistralai_gcp/docs/models/usermessage.md create mode 100644 packages/mistralai_gcp/docs/models/usermessagecontent.md create mode 100644 packages/mistralai_gcp/docs/models/usermessagerole.md create mode 100644 packages/mistralai_gcp/docs/models/utils/retryconfig.md create mode 100644 packages/mistralai_gcp/docs/models/validationerror.md create mode 100644 packages/mistralai_gcp/docs/sdks/chat/README.md create mode 100644 packages/mistralai_gcp/docs/sdks/fim/README.md create mode 100644 packages/mistralai_gcp/docs/sdks/mistralgcp/README.md create mode 100644 packages/mistralai_gcp/poetry.lock create mode 100644 packages/mistralai_gcp/poetry.toml create mode 100644 packages/mistralai_gcp/py.typed create mode 100644 packages/mistralai_gcp/pylintrc create mode 100644 packages/mistralai_gcp/pyproject.toml create mode 100755 packages/mistralai_gcp/scripts/compile.sh create mode 100755 packages/mistralai_gcp/scripts/publish.sh create mode 100644 packages/mistralai_gcp/src/mistralai_gcp/__init__.py create mode 100644 packages/mistralai_gcp/src/mistralai_gcp/_hooks/__init__.py create mode 100644 packages/mistralai_gcp/src/mistralai_gcp/_hooks/custom_user_agent.py create mode 100644 packages/mistralai_gcp/src/mistralai_gcp/_hooks/registration.py create mode 100644 packages/mistralai_gcp/src/mistralai_gcp/_hooks/sdkhooks.py create mode 100644 packages/mistralai_gcp/src/mistralai_gcp/_hooks/types.py create mode 100644 packages/mistralai_gcp/src/mistralai_gcp/basesdk.py create mode 100644 packages/mistralai_gcp/src/mistralai_gcp/chat.py create mode 100644 packages/mistralai_gcp/src/mistralai_gcp/fim.py create mode 100644 packages/mistralai_gcp/src/mistralai_gcp/httpclient.py create mode 100644 packages/mistralai_gcp/src/mistralai_gcp/models/__init__.py create mode 100644 packages/mistralai_gcp/src/mistralai_gcp/models/assistantmessage.py create mode 100644 packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionchoice.py create mode 100644 packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionrequest.py create mode 100644 packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionresponse.py create mode 100644 packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionstreamrequest.py create mode 100644 packages/mistralai_gcp/src/mistralai_gcp/models/completionchunk.py create mode 100644 packages/mistralai_gcp/src/mistralai_gcp/models/completionevent.py create mode 100644 packages/mistralai_gcp/src/mistralai_gcp/models/completionresponsestreamchoice.py create mode 100644 packages/mistralai_gcp/src/mistralai_gcp/models/contentchunk.py create mode 100644 packages/mistralai_gcp/src/mistralai_gcp/models/deltamessage.py create mode 100644 packages/mistralai_gcp/src/mistralai_gcp/models/fimcompletionrequest.py create mode 100644 packages/mistralai_gcp/src/mistralai_gcp/models/fimcompletionresponse.py create mode 100644 packages/mistralai_gcp/src/mistralai_gcp/models/fimcompletionstreamrequest.py create mode 100644 packages/mistralai_gcp/src/mistralai_gcp/models/function.py create mode 100644 packages/mistralai_gcp/src/mistralai_gcp/models/functioncall.py create mode 100644 packages/mistralai_gcp/src/mistralai_gcp/models/httpvalidationerror.py create mode 100644 packages/mistralai_gcp/src/mistralai_gcp/models/responseformat.py create mode 100644 packages/mistralai_gcp/src/mistralai_gcp/models/sdkerror.py create mode 100644 packages/mistralai_gcp/src/mistralai_gcp/models/security.py create mode 100644 packages/mistralai_gcp/src/mistralai_gcp/models/systemmessage.py create mode 100644 packages/mistralai_gcp/src/mistralai_gcp/models/textchunk.py create mode 100644 packages/mistralai_gcp/src/mistralai_gcp/models/tool.py create mode 100644 packages/mistralai_gcp/src/mistralai_gcp/models/toolcall.py create mode 100644 packages/mistralai_gcp/src/mistralai_gcp/models/toolmessage.py create mode 100644 packages/mistralai_gcp/src/mistralai_gcp/models/usageinfo.py create mode 100644 packages/mistralai_gcp/src/mistralai_gcp/models/usermessage.py create mode 100644 packages/mistralai_gcp/src/mistralai_gcp/models/validationerror.py create mode 100644 packages/mistralai_gcp/src/mistralai_gcp/py.typed create mode 100644 packages/mistralai_gcp/src/mistralai_gcp/sdk.py create mode 100644 packages/mistralai_gcp/src/mistralai_gcp/sdkconfiguration.py create mode 100644 packages/mistralai_gcp/src/mistralai_gcp/types/__init__.py create mode 100644 packages/mistralai_gcp/src/mistralai_gcp/types/basemodel.py create mode 100644 packages/mistralai_gcp/src/mistralai_gcp/utils/__init__.py create mode 100644 packages/mistralai_gcp/src/mistralai_gcp/utils/annotations.py create mode 100644 packages/mistralai_gcp/src/mistralai_gcp/utils/enums.py create mode 100644 packages/mistralai_gcp/src/mistralai_gcp/utils/eventstreaming.py create mode 100644 packages/mistralai_gcp/src/mistralai_gcp/utils/forms.py create mode 100644 packages/mistralai_gcp/src/mistralai_gcp/utils/headers.py create mode 100644 packages/mistralai_gcp/src/mistralai_gcp/utils/logger.py create mode 100644 packages/mistralai_gcp/src/mistralai_gcp/utils/metadata.py create mode 100644 packages/mistralai_gcp/src/mistralai_gcp/utils/queryparams.py create mode 100644 packages/mistralai_gcp/src/mistralai_gcp/utils/requestbodies.py create mode 100644 packages/mistralai_gcp/src/mistralai_gcp/utils/retries.py create mode 100644 packages/mistralai_gcp/src/mistralai_gcp/utils/security.py create mode 100644 packages/mistralai_gcp/src/mistralai_gcp/utils/serializers.py create mode 100644 packages/mistralai_gcp/src/mistralai_gcp/utils/url.py create mode 100644 packages/mistralai_gcp/src/mistralai_gcp/utils/values.py create mode 100644 poetry.toml create mode 100644 py.typed create mode 100644 pylintrc create mode 100755 scripts/compile.sh create mode 100755 scripts/publish.sh create mode 100644 src/mistralai/_hooks/__init__.py create mode 100644 src/mistralai/_hooks/custom_user_agent.py create mode 100644 src/mistralai/_hooks/deprecation_warning.py create mode 100644 src/mistralai/_hooks/registration.py create mode 100644 src/mistralai/_hooks/sdkhooks.py create mode 100644 src/mistralai/_hooks/types.py create mode 100644 src/mistralai/agents.py create mode 100644 src/mistralai/basesdk.py create mode 100644 src/mistralai/chat.py delete mode 100644 src/mistralai/client_base.py delete mode 100644 src/mistralai/constants.py create mode 100644 src/mistralai/embeddings.py delete mode 100644 src/mistralai/exceptions.py create mode 100644 src/mistralai/fim.py create mode 100644 src/mistralai/fine_tuning.py create mode 100644 src/mistralai/httpclient.py create mode 100644 src/mistralai/models/agentscompletionrequest.py create mode 100644 src/mistralai/models/agentscompletionstreamrequest.py create mode 100644 src/mistralai/models/archiveftmodelout.py create mode 100644 src/mistralai/models/assistantmessage.py delete mode 100644 src/mistralai/models/chat_completion.py create mode 100644 src/mistralai/models/chatcompletionchoice.py create mode 100644 src/mistralai/models/chatcompletionrequest.py create mode 100644 src/mistralai/models/chatcompletionresponse.py create mode 100644 src/mistralai/models/chatcompletionstreamrequest.py create mode 100644 src/mistralai/models/checkpointout.py delete mode 100644 src/mistralai/models/common.py create mode 100644 src/mistralai/models/completionchunk.py create mode 100644 src/mistralai/models/completionevent.py create mode 100644 src/mistralai/models/completionresponsestreamchoice.py create mode 100644 src/mistralai/models/contentchunk.py create mode 100644 src/mistralai/models/delete_model_v1_models_model_id_deleteop.py create mode 100644 src/mistralai/models/deletefileout.py create mode 100644 src/mistralai/models/deletemodelout.py create mode 100644 src/mistralai/models/deltamessage.py create mode 100644 src/mistralai/models/detailedjobout.py create mode 100644 src/mistralai/models/embeddingrequest.py create mode 100644 src/mistralai/models/embeddingresponse.py create mode 100644 src/mistralai/models/embeddingresponsedata.py delete mode 100644 src/mistralai/models/embeddings.py create mode 100644 src/mistralai/models/eventout.py delete mode 100644 src/mistralai/models/files.py create mode 100644 src/mistralai/models/files_api_routes_delete_fileop.py create mode 100644 src/mistralai/models/files_api_routes_retrieve_fileop.py create mode 100644 src/mistralai/models/files_api_routes_upload_fileop.py create mode 100644 src/mistralai/models/fileschema.py create mode 100644 src/mistralai/models/fimcompletionrequest.py create mode 100644 src/mistralai/models/fimcompletionresponse.py create mode 100644 src/mistralai/models/fimcompletionstreamrequest.py create mode 100644 src/mistralai/models/finetuneablemodel.py create mode 100644 src/mistralai/models/ftmodelcapabilitiesout.py create mode 100644 src/mistralai/models/ftmodelout.py create mode 100644 src/mistralai/models/function.py create mode 100644 src/mistralai/models/functioncall.py create mode 100644 src/mistralai/models/githubrepositoryin.py create mode 100644 src/mistralai/models/githubrepositoryout.py create mode 100644 src/mistralai/models/httpvalidationerror.py create mode 100644 src/mistralai/models/jobin.py create mode 100644 src/mistralai/models/jobmetadataout.py create mode 100644 src/mistralai/models/jobout.py delete mode 100644 src/mistralai/models/jobs.py create mode 100644 src/mistralai/models/jobs_api_routes_fine_tuning_archive_fine_tuned_modelop.py create mode 100644 src/mistralai/models/jobs_api_routes_fine_tuning_cancel_fine_tuning_jobop.py create mode 100644 src/mistralai/models/jobs_api_routes_fine_tuning_create_fine_tuning_jobop.py create mode 100644 src/mistralai/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobop.py create mode 100644 src/mistralai/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobsop.py create mode 100644 src/mistralai/models/jobs_api_routes_fine_tuning_start_fine_tuning_jobop.py create mode 100644 src/mistralai/models/jobs_api_routes_fine_tuning_unarchive_fine_tuned_modelop.py create mode 100644 src/mistralai/models/jobs_api_routes_fine_tuning_update_fine_tuned_modelop.py create mode 100644 src/mistralai/models/jobsout.py create mode 100644 src/mistralai/models/legacyjobmetadataout.py create mode 100644 src/mistralai/models/listfilesout.py create mode 100644 src/mistralai/models/metricout.py create mode 100644 src/mistralai/models/modelcapabilities.py create mode 100644 src/mistralai/models/modelcard.py create mode 100644 src/mistralai/models/modellist.py delete mode 100644 src/mistralai/models/models.py create mode 100644 src/mistralai/models/responseformat.py create mode 100644 src/mistralai/models/retrieve_model_v1_models_model_id_getop.py create mode 100644 src/mistralai/models/retrievefileout.py create mode 100644 src/mistralai/models/sampletype.py create mode 100644 src/mistralai/models/sdkerror.py create mode 100644 src/mistralai/models/security.py create mode 100644 src/mistralai/models/source.py create mode 100644 src/mistralai/models/systemmessage.py create mode 100644 src/mistralai/models/textchunk.py create mode 100644 src/mistralai/models/tool.py create mode 100644 src/mistralai/models/toolcall.py create mode 100644 src/mistralai/models/toolmessage.py create mode 100644 src/mistralai/models/trainingfile.py create mode 100644 src/mistralai/models/trainingparameters.py create mode 100644 src/mistralai/models/trainingparametersin.py create mode 100644 src/mistralai/models/unarchiveftmodelout.py create mode 100644 src/mistralai/models/updateftmodelin.py create mode 100644 src/mistralai/models/uploadfileout.py create mode 100644 src/mistralai/models/usageinfo.py create mode 100644 src/mistralai/models/usermessage.py create mode 100644 src/mistralai/models/validationerror.py create mode 100644 src/mistralai/models/wandbintegration.py create mode 100644 src/mistralai/models/wandbintegrationout.py create mode 100644 src/mistralai/models_.py create mode 100644 src/mistralai/sdk.py create mode 100644 src/mistralai/sdkconfiguration.py create mode 100644 src/mistralai/types/__init__.py create mode 100644 src/mistralai/types/basemodel.py create mode 100644 src/mistralai/utils/__init__.py create mode 100644 src/mistralai/utils/annotations.py create mode 100644 src/mistralai/utils/enums.py create mode 100644 src/mistralai/utils/eventstreaming.py create mode 100644 src/mistralai/utils/forms.py create mode 100644 src/mistralai/utils/headers.py create mode 100644 src/mistralai/utils/logger.py create mode 100644 src/mistralai/utils/metadata.py create mode 100644 src/mistralai/utils/queryparams.py create mode 100644 src/mistralai/utils/requestbodies.py create mode 100644 src/mistralai/utils/retries.py create mode 100644 src/mistralai/utils/security.py create mode 100644 src/mistralai/utils/serializers.py create mode 100644 src/mistralai/utils/url.py create mode 100644 src/mistralai/utils/values.py delete mode 100644 tests/__init__.py delete mode 100644 tests/conftest.py delete mode 100644 tests/test_chat.py delete mode 100644 tests/test_chat_async.py delete mode 100644 tests/test_completion.py delete mode 100644 tests/test_delete_model.py delete mode 100644 tests/test_delete_model_async.py delete mode 100644 tests/test_embedder.py delete mode 100644 tests/test_embedder_async.py delete mode 100644 tests/test_files.py delete mode 100644 tests/test_files_async.py delete mode 100644 tests/test_jobs.py delete mode 100644 tests/test_jobs_async.py delete mode 100644 tests/test_list_models.py delete mode 100644 tests/test_list_models_async.py delete mode 100644 tests/utils.py diff --git a/.genignore b/.genignore new file mode 100644 index 00000000..1186de66 --- /dev/null +++ b/.genignore @@ -0,0 +1,2 @@ +pyproject.toml +examples/* \ No newline at end of file diff --git a/.gitattributes b/.gitattributes new file mode 100644 index 00000000..4d75d590 --- /dev/null +++ b/.gitattributes @@ -0,0 +1,2 @@ +# This allows generated code to be indexed correctly +*.py linguist-generated=false \ No newline at end of file diff --git a/.github/workflows/build_publish.yaml b/.github/workflows/build_publish.yaml deleted file mode 100644 index a696f100..00000000 --- a/.github/workflows/build_publish.yaml +++ /dev/null @@ -1,90 +0,0 @@ -name: Lint / Test / Publish - -on: - push: - branches: ["main"] - - # We only deploy on tags and main branch - tags: - # Only run on tags that match the following regex - # This will match tags like 1.0.0, 1.0.1, etc. - - "[0-9]+.[0-9]+.[0-9]+" - - # Lint and test on pull requests - pull_request: - -jobs: - lint_and_test: - runs-on: ubuntu-latest - strategy: - matrix: - python-version: ["3.9", "3.10", "3.11", "3.12"] - steps: - # Checkout the repository - - name: Checkout - uses: actions/checkout@v4 - - # Set python version to 3.11 - - name: set python version - uses: actions/setup-python@v4 - with: - python-version: ${{ matrix.python-version }} - - # Install Build stuff - - name: Install Dependencies - run: | - pip install poetry \ - && poetry config virtualenvs.create false \ - && poetry install - - # Ruff - - name: Ruff check - run: | - poetry run ruff check . - - - name: Ruff check - run: | - poetry run ruff format . --check - - # Mypy - - name: Mypy Check - run: | - poetry run mypy . - - # Tests - - name: Run Tests - run: | - poetry run pytest . - - publish: - if: startsWith(github.ref, 'refs/tags') - runs-on: ubuntu-latest - needs: lint_and_test - steps: - # Checkout the repository - - name: Checkout - uses: actions/checkout@v4 - - # Set python version to 3.11 - - name: set python version - uses: actions/setup-python@v4 - with: - python-version: 3.11 - - # Install Build stuff - - name: Install Dependencies - run: | - pip install poetry \ - && poetry config virtualenvs.create false \ - && poetry install - - # build package using poetry - - name: Build Package - run: | - poetry build - - # Publish to PyPi - - name: Pypi publish - run: | - poetry config pypi-token.pypi ${{ secrets.PYPI_TOKEN }} - poetry publish diff --git a/.github/workflows/sdk_generation_mistralai_azure_sdk.yaml b/.github/workflows/sdk_generation_mistralai_azure_sdk.yaml new file mode 100644 index 00000000..7ec5bb8d --- /dev/null +++ b/.github/workflows/sdk_generation_mistralai_azure_sdk.yaml @@ -0,0 +1,29 @@ +name: Generate MISTRAL-PYTHON-SDK-AZURE +permissions: + checks: write + contents: write + pull-requests: write + statuses: write +"on": + workflow_dispatch: + inputs: + force: + description: Force generation of SDKs + type: boolean + default: false + set_version: + description: optionally set a specific SDK version + type: string +jobs: + generate: + uses: speakeasy-api/sdk-generation-action/.github/workflows/workflow-executor.yaml@v15 + with: + force: ${{ github.event.inputs.force }} + mode: pr + set_version: ${{ github.event.inputs.set_version }} + speakeasy_version: latest + target: mistral-python-sdk-azure + secrets: + github_access_token: ${{ secrets.GITHUB_TOKEN }} + pypi_token: ${{ secrets.PYPI_TOKEN }} + speakeasy_api_key: ${{ secrets.SPEAKEASY_API_KEY }} diff --git a/.github/workflows/sdk_generation_mistralai_gcp_sdk.yaml b/.github/workflows/sdk_generation_mistralai_gcp_sdk.yaml new file mode 100644 index 00000000..c4da64f7 --- /dev/null +++ b/.github/workflows/sdk_generation_mistralai_gcp_sdk.yaml @@ -0,0 +1,29 @@ +name: Generate MISTRAL-PYTHON-SDK-GOOGLE-CLOUD +permissions: + checks: write + contents: write + pull-requests: write + statuses: write +"on": + workflow_dispatch: + inputs: + force: + description: Force generation of SDKs + type: boolean + default: false + set_version: + description: optionally set a specific SDK version + type: string +jobs: + generate: + uses: speakeasy-api/sdk-generation-action/.github/workflows/workflow-executor.yaml@v15 + with: + force: ${{ github.event.inputs.force }} + mode: pr + set_version: ${{ github.event.inputs.set_version }} + speakeasy_version: latest + target: mistral-python-sdk-google-cloud + secrets: + github_access_token: ${{ secrets.GITHUB_TOKEN }} + pypi_token: ${{ secrets.PYPI_TOKEN }} + speakeasy_api_key: ${{ secrets.SPEAKEASY_API_KEY }} diff --git a/.github/workflows/sdk_generation_mistralai_sdk.yaml b/.github/workflows/sdk_generation_mistralai_sdk.yaml new file mode 100644 index 00000000..7d0540e7 --- /dev/null +++ b/.github/workflows/sdk_generation_mistralai_sdk.yaml @@ -0,0 +1,29 @@ +name: Generate MISTRALAI +permissions: + checks: write + contents: write + pull-requests: write + statuses: write +"on": + workflow_dispatch: + inputs: + force: + description: Force generation of SDKs + type: boolean + default: false + set_version: + description: optionally set a specific SDK version + type: string +jobs: + generate: + uses: speakeasy-api/sdk-generation-action/.github/workflows/workflow-executor.yaml@v15 + with: + force: ${{ github.event.inputs.force }} + mode: pr + set_version: ${{ github.event.inputs.set_version }} + speakeasy_version: latest + target: mistralai-sdk + secrets: + github_access_token: ${{ secrets.GITHUB_TOKEN }} + pypi_token: ${{ secrets.PYPI_TOKEN }} + speakeasy_api_key: ${{ secrets.SPEAKEASY_API_KEY }} diff --git a/.github/workflows/sdk_publish_mistralai_sdk.yaml b/.github/workflows/sdk_publish_mistralai_sdk.yaml new file mode 100644 index 00000000..87160243 --- /dev/null +++ b/.github/workflows/sdk_publish_mistralai_sdk.yaml @@ -0,0 +1,20 @@ +name: Publish MISTRALAI-SDK +permissions: + checks: write + contents: write + pull-requests: write + statuses: write +"on": + push: + branches: + - main + paths: + - RELEASES.md + - '*/RELEASES.md' +jobs: + publish: + uses: speakeasy-api/sdk-generation-action/.github/workflows/sdk-publish.yaml@v15 + secrets: + github_access_token: ${{ secrets.GITHUB_TOKEN }} + pypi_token: ${{ secrets.PYPI_TOKEN }} + speakeasy_api_key: ${{ secrets.SPEAKEASY_API_KEY }} diff --git a/.gitignore b/.gitignore index 1e27b2b3..999b933e 100644 --- a/.gitignore +++ b/.gitignore @@ -1,11 +1,14 @@ +.venv/ +pyrightconfig.json +src/*.egg-info/ +.python-version +.DS_Store # Byte-compiled / optimized / DLL files __pycache__/ *.py[cod] *$py.class - # C extensions *.so - # Distribution / packaging .Python build/ @@ -25,17 +28,14 @@ share/python-wheels/ .installed.cfg *.egg MANIFEST - # PyInstaller # Usually these files are written by a python script from a template # before PyInstaller builds the exe, so as to inject date/other infos into it. *.manifest *.spec - # Installer logs pip-log.txt pip-delete-this-directory.txt - # Unit test / coverage reports htmlcov/ .tox/ @@ -50,75 +50,61 @@ coverage.xml .hypothesis/ .pytest_cache/ cover/ - # Translations *.mo *.pot - # Django stuff: *.log local_settings.py db.sqlite3 db.sqlite3-journal - # Flask stuff: instance/ .webassets-cache - # Scrapy stuff: .scrapy - # Sphinx documentation docs/_build/ - # PyBuilder .pybuilder/ target/ - # Jupyter Notebook .ipynb_checkpoints - # IPython profile_default/ ipython_config.py - # pyenv # For a library or package, you might want to ignore these files since the code is # intended to run in multiple environments; otherwise, check them in: # .python-version - # pipenv # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. # However, in case of collaboration, if having platform-specific dependencies or dependencies # having no cross-platform support, pipenv may install dependencies that don't work, or not # install all needed dependencies. #Pipfile.lock - # poetry # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. # This is especially recommended for binary packages to ensure reproducibility, and is more # commonly ignored for libraries. # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control #poetry.lock - # pdm # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. #pdm.lock # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it # in version control. -# https://pdm.fming.dev/#use-with-ide +# https://pdm.fming.dev/latest/usage/project/#working-with-version-control .pdm.toml - +.pdm-python +.pdm-build/ # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm __pypackages__/ - # Celery stuff celerybeat-schedule celerybeat.pid - # SageMath parsed files *.sage.py - # Environments .env .venv @@ -127,38 +113,26 @@ venv/ ENV/ env.bak/ venv.bak/ - # Spyder project settings .spyderproject .spyproject - # Rope project settings .ropeproject - # mkdocs documentation /site - # mypy .mypy_cache/ .dmypy.json dmypy.json - # Pyre type checker .pyre/ - # pytype static type analyzer .pytype/ - # Cython debug symbols cython_debug/ - # PyCharm # JetBrains specific template is maintained in a separate JetBrains.gitignore that can # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore # and can be added to the global gitignore or merged into this file. For a more nuclear # option (not recommended) you can uncomment the following to ignore the entire idea folder. #.idea/ - -scratch/ - -changes.diff \ No newline at end of file diff --git a/.speakeasy/gen.lock b/.speakeasy/gen.lock new file mode 100644 index 00000000..cf866343 --- /dev/null +++ b/.speakeasy/gen.lock @@ -0,0 +1,272 @@ +lockVersion: 2.0.0 +id: 2d045ec7-2ebb-4f4d-ad25-40953b132161 +management: + docChecksum: ab9fe4a3c278619e334a828e2c336554 + docVersion: 0.0.2 + speakeasyVersion: 1.356.0 + generationVersion: 2.388.1 + releaseVersion: 1.0.0-rc.2 + configChecksum: 09abab5b4ed374c8d48d4e9b9ca6eb65 + published: true +features: + python: + additionalDependencies: 1.0.0 + constsAndDefaults: 1.0.2 + core: 5.3.4 + defaultEnabledRetries: 0.2.0 + envVarSecurityUsage: 0.3.1 + examples: 3.0.0 + flatRequests: 1.0.1 + flattening: 3.0.0 + globalSecurity: 3.0.1 + globalSecurityCallbacks: 1.0.0 + globalSecurityFlattening: 1.0.0 + globalServerURLs: 3.0.0 + multipartFileContentType: 1.0.0 + nameOverrides: 3.0.0 + nullables: 1.0.0 + responseFormat: 1.0.0 + retries: 3.0.0 + sdkHooks: 1.0.0 + serverEvents: 1.0.2 + serverEventsSentinels: 0.1.0 + serverIDs: 3.0.0 + unions: 3.0.1 + uploadStreams: 1.0.0 +generatedFiles: + - src/mistralai/sdkconfiguration.py + - src/mistralai/models_.py + - src/mistralai/files.py + - src/mistralai/jobs.py + - src/mistralai/fine_tuning.py + - src/mistralai/chat.py + - src/mistralai/fim.py + - src/mistralai/agents.py + - src/mistralai/embeddings.py + - src/mistralai/sdk.py + - .vscode/settings.json + - poetry.toml + - py.typed + - pylintrc + - scripts/compile.sh + - scripts/publish.sh + - src/mistralai/__init__.py + - src/mistralai/basesdk.py + - src/mistralai/httpclient.py + - src/mistralai/py.typed + - src/mistralai/types/__init__.py + - src/mistralai/types/basemodel.py + - src/mistralai/utils/__init__.py + - src/mistralai/utils/annotations.py + - src/mistralai/utils/enums.py + - src/mistralai/utils/eventstreaming.py + - src/mistralai/utils/forms.py + - src/mistralai/utils/headers.py + - src/mistralai/utils/logger.py + - src/mistralai/utils/metadata.py + - src/mistralai/utils/queryparams.py + - src/mistralai/utils/requestbodies.py + - src/mistralai/utils/retries.py + - src/mistralai/utils/security.py + - src/mistralai/utils/serializers.py + - src/mistralai/utils/url.py + - src/mistralai/utils/values.py + - src/mistralai/models/sdkerror.py + - src/mistralai/models/modellist.py + - src/mistralai/models/modelcard.py + - src/mistralai/models/modelcapabilities.py + - src/mistralai/models/httpvalidationerror.py + - src/mistralai/models/validationerror.py + - src/mistralai/models/retrieve_model_v1_models_model_id_getop.py + - src/mistralai/models/deletemodelout.py + - src/mistralai/models/delete_model_v1_models_model_id_deleteop.py + - src/mistralai/models/ftmodelout.py + - src/mistralai/models/ftmodelcapabilitiesout.py + - src/mistralai/models/jobs_api_routes_fine_tuning_update_fine_tuned_modelop.py + - src/mistralai/models/updateftmodelin.py + - src/mistralai/models/archiveftmodelout.py + - src/mistralai/models/jobs_api_routes_fine_tuning_archive_fine_tuned_modelop.py + - src/mistralai/models/unarchiveftmodelout.py + - src/mistralai/models/jobs_api_routes_fine_tuning_unarchive_fine_tuned_modelop.py + - src/mistralai/models/uploadfileout.py + - src/mistralai/models/source.py + - src/mistralai/models/sampletype.py + - src/mistralai/models/files_api_routes_upload_fileop.py + - src/mistralai/models/listfilesout.py + - src/mistralai/models/fileschema.py + - src/mistralai/models/retrievefileout.py + - src/mistralai/models/files_api_routes_retrieve_fileop.py + - src/mistralai/models/deletefileout.py + - src/mistralai/models/files_api_routes_delete_fileop.py + - src/mistralai/models/jobsout.py + - src/mistralai/models/jobout.py + - src/mistralai/models/jobmetadataout.py + - src/mistralai/models/githubrepositoryout.py + - src/mistralai/models/wandbintegrationout.py + - src/mistralai/models/finetuneablemodel.py + - src/mistralai/models/trainingparameters.py + - src/mistralai/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobsop.py + - src/mistralai/models/jobs_api_routes_fine_tuning_create_fine_tuning_jobop.py + - src/mistralai/models/legacyjobmetadataout.py + - src/mistralai/models/jobin.py + - src/mistralai/models/githubrepositoryin.py + - src/mistralai/models/wandbintegration.py + - src/mistralai/models/trainingparametersin.py + - src/mistralai/models/trainingfile.py + - src/mistralai/models/detailedjobout.py + - src/mistralai/models/checkpointout.py + - src/mistralai/models/metricout.py + - src/mistralai/models/eventout.py + - src/mistralai/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobop.py + - src/mistralai/models/jobs_api_routes_fine_tuning_cancel_fine_tuning_jobop.py + - src/mistralai/models/jobs_api_routes_fine_tuning_start_fine_tuning_jobop.py + - src/mistralai/models/chatcompletionresponse.py + - src/mistralai/models/chatcompletionchoice.py + - src/mistralai/models/assistantmessage.py + - src/mistralai/models/toolcall.py + - src/mistralai/models/functioncall.py + - src/mistralai/models/usageinfo.py + - src/mistralai/models/chatcompletionrequest.py + - src/mistralai/models/tool.py + - src/mistralai/models/function.py + - src/mistralai/models/responseformat.py + - src/mistralai/models/systemmessage.py + - src/mistralai/models/contentchunk.py + - src/mistralai/models/usermessage.py + - src/mistralai/models/textchunk.py + - src/mistralai/models/toolmessage.py + - src/mistralai/models/completionevent.py + - src/mistralai/models/completionchunk.py + - src/mistralai/models/completionresponsestreamchoice.py + - src/mistralai/models/deltamessage.py + - src/mistralai/models/chatcompletionstreamrequest.py + - src/mistralai/models/fimcompletionresponse.py + - src/mistralai/models/fimcompletionrequest.py + - src/mistralai/models/fimcompletionstreamrequest.py + - src/mistralai/models/agentscompletionrequest.py + - src/mistralai/models/agentscompletionstreamrequest.py + - src/mistralai/models/embeddingresponse.py + - src/mistralai/models/embeddingresponsedata.py + - src/mistralai/models/embeddingrequest.py + - src/mistralai/models/security.py + - src/mistralai/models/__init__.py + - docs/models/modellist.md + - docs/models/modelcard.md + - docs/models/modelcapabilities.md + - docs/models/httpvalidationerror.md + - docs/models/loc.md + - docs/models/validationerror.md + - docs/models/retrievemodelv1modelsmodelidgetrequest.md + - docs/models/deletemodelout.md + - docs/models/deletemodelv1modelsmodeliddeleterequest.md + - docs/models/ftmodelout.md + - docs/models/ftmodelcapabilitiesout.md + - docs/models/jobsapiroutesfinetuningupdatefinetunedmodelrequest.md + - docs/models/updateftmodelin.md + - docs/models/archiveftmodelout.md + - docs/models/jobsapiroutesfinetuningarchivefinetunedmodelrequest.md + - docs/models/unarchiveftmodelout.md + - docs/models/jobsapiroutesfinetuningunarchivefinetunedmodelrequest.md + - docs/models/uploadfileout.md + - docs/models/source.md + - docs/models/sampletype.md + - docs/models/file.md + - docs/models/filesapiroutesuploadfilemultipartbodyparams.md + - docs/models/listfilesout.md + - docs/models/fileschema.md + - docs/models/retrievefileout.md + - docs/models/filesapiroutesretrievefilerequest.md + - docs/models/deletefileout.md + - docs/models/filesapiroutesdeletefilerequest.md + - docs/models/jobsout.md + - docs/models/status.md + - docs/models/jobout.md + - docs/models/jobmetadataout.md + - docs/models/githubrepositoryout.md + - docs/models/wandbintegrationout.md + - docs/models/finetuneablemodel.md + - docs/models/trainingparameters.md + - docs/models/queryparamstatus.md + - docs/models/jobsapiroutesfinetuninggetfinetuningjobsrequest.md + - docs/models/jobsapiroutesfinetuningcreatefinetuningjobresponse.md + - docs/models/legacyjobmetadataout.md + - docs/models/jobin.md + - docs/models/githubrepositoryin.md + - docs/models/wandbintegration.md + - docs/models/trainingparametersin.md + - docs/models/trainingfile.md + - docs/models/detailedjoboutstatus.md + - docs/models/detailedjobout.md + - docs/models/checkpointout.md + - docs/models/metricout.md + - docs/models/eventout.md + - docs/models/jobsapiroutesfinetuninggetfinetuningjobrequest.md + - docs/models/jobsapiroutesfinetuningcancelfinetuningjobrequest.md + - docs/models/jobsapiroutesfinetuningstartfinetuningjobrequest.md + - docs/models/chatcompletionresponse.md + - docs/models/finishreason.md + - docs/models/chatcompletionchoice.md + - docs/models/assistantmessagerole.md + - docs/models/assistantmessage.md + - docs/models/toolcall.md + - docs/models/arguments.md + - docs/models/functioncall.md + - docs/models/usageinfo.md + - docs/models/stop.md + - docs/models/messages.md + - docs/models/toolchoice.md + - docs/models/chatcompletionrequest.md + - docs/models/tool.md + - docs/models/function.md + - docs/models/responseformats.md + - docs/models/responseformat.md + - docs/models/content.md + - docs/models/role.md + - docs/models/systemmessage.md + - docs/models/contentchunk.md + - docs/models/usermessagecontent.md + - docs/models/usermessagerole.md + - docs/models/usermessage.md + - docs/models/textchunk.md + - docs/models/toolmessagerole.md + - docs/models/toolmessage.md + - docs/models/completionevent.md + - docs/models/completionchunk.md + - docs/models/completionresponsestreamchoicefinishreason.md + - docs/models/completionresponsestreamchoice.md + - docs/models/deltamessage.md + - docs/models/chatcompletionstreamrequeststop.md + - docs/models/chatcompletionstreamrequestmessages.md + - docs/models/chatcompletionstreamrequesttoolchoice.md + - docs/models/chatcompletionstreamrequest.md + - docs/models/fimcompletionresponse.md + - docs/models/fimcompletionrequeststop.md + - docs/models/fimcompletionrequest.md + - docs/models/fimcompletionstreamrequeststop.md + - docs/models/fimcompletionstreamrequest.md + - docs/models/agentscompletionrequeststop.md + - docs/models/agentscompletionrequestmessages.md + - docs/models/agentscompletionrequesttoolchoice.md + - docs/models/agentscompletionrequest.md + - docs/models/agentscompletionstreamrequeststop.md + - docs/models/agentscompletionstreamrequest.md + - docs/models/embeddingresponse.md + - docs/models/embeddingresponsedata.md + - docs/models/inputs.md + - docs/models/embeddingrequest.md + - docs/models/security.md + - docs/sdks/mistral/README.md + - docs/models/utils/retryconfig.md + - docs/sdks/models/README.md + - docs/sdks/files/README.md + - docs/sdks/finetuning/README.md + - docs/sdks/jobs/README.md + - docs/sdks/chat/README.md + - docs/sdks/fim/README.md + - docs/sdks/agents/README.md + - docs/sdks/embeddings/README.md + - USAGE.md + - .gitattributes + - src/mistralai/_hooks/sdkhooks.py + - src/mistralai/_hooks/types.py + - src/mistralai/_hooks/__init__.py diff --git a/.speakeasy/gen.yaml b/.speakeasy/gen.yaml new file mode 100644 index 00000000..13a25388 --- /dev/null +++ b/.speakeasy/gen.yaml @@ -0,0 +1,42 @@ +configVersion: 2.0.0 +generation: + sdkClassName: Mistral + maintainOpenAPIOrder: true + usageSnippets: + optionalPropertyRendering: withExample + useClassNamesForArrayFields: true + fixes: + nameResolutionDec2023: true + parameterOrderingFeb2024: true + requestResponseComponentNamesFeb2024: true + auth: + oAuth2ClientCredentialsEnabled: true +python: + version: 1.0.0-rc.2 + additionalDependencies: + dev: + pytest: ^8.2.2 + pytest-asyncio: ^0.23.7 + authors: + - Mistral + clientServerStatusCodesAsErrors: true + description: Python Client SDK for the Mistral AI API. + enumFormat: union + envVarPrefix: MISTRAL + flattenGlobalSecurity: true + flattenRequests: true + imports: + option: openapi + paths: + callbacks: "" + errors: "" + operations: "" + shared: "" + webhooks: "" + inputModelSuffix: input + maxMethodParams: 4 + methodArguments: infer-optional-args + outputModelSuffix: output + packageName: mistralai + responseFormat: flat + templateVersion: v2 diff --git a/.speakeasy/workflow.lock b/.speakeasy/workflow.lock new file mode 100644 index 00000000..47bfbe00 --- /dev/null +++ b/.speakeasy/workflow.lock @@ -0,0 +1,52 @@ +speakeasyVersion: 1.356.0 +sources: {} +targets: + mistralai-azure-sdk: + source: mistral-azure-source + outLocation: ./packages/mistralai_azure + mistralai-gcp-sdk: + source: mistral-google-cloud-source + outLocation: ./packages/mistralai_gcp + mistralai-sdk: + source: mistral-openapi + outLocation: /Users/gaspard/public-mistral/client-python +workflow: + workflowVersion: 1.0.0 + speakeasyVersion: latest + sources: + mistral-azure-source: + inputs: + - location: registry.speakeasyapi.dev/mistral-dev/mistral-dev/mistral-openapi-azure + registry: + location: registry.speakeasyapi.dev/mistral-dev/mistral-dev/mistral-openapi-azure + mistral-google-cloud-source: + inputs: + - location: registry.speakeasyapi.dev/mistral-dev/mistral-dev/mistral-openapi-google-cloud + registry: + location: registry.speakeasyapi.dev/mistral-dev/mistral-dev/mistral-openapi-google-cloud + mistral-openapi: + inputs: + - location: registry.speakeasyapi.dev/mistral-dev/mistral-dev/mistral-openapi + registry: + location: registry.speakeasyapi.dev/mistral-dev/mistral-dev/mistral-openapi + targets: + mistralai-azure-sdk: + target: python + source: mistral-azure-source + output: ./packages/mistralai_azure + publish: + pypi: + token: $pypi_token + mistralai-gcp-sdk: + target: python + source: mistral-google-cloud-source + output: ./packages/mistralai_gcp + publish: + pypi: + token: $pypi_token + mistralai-sdk: + target: python + source: mistral-openapi + publish: + pypi: + token: $pypi_token diff --git a/.speakeasy/workflow.yaml b/.speakeasy/workflow.yaml new file mode 100644 index 00000000..4076ff32 --- /dev/null +++ b/.speakeasy/workflow.yaml @@ -0,0 +1,39 @@ +workflowVersion: 1.0.0 +speakeasyVersion: latest +sources: + mistral-azure-source: + inputs: + - location: registry.speakeasyapi.dev/mistral-dev/mistral-dev/mistral-openapi-azure + registry: + location: registry.speakeasyapi.dev/mistral-dev/mistral-dev/mistral-openapi-azure + mistral-google-cloud-source: + inputs: + - location: registry.speakeasyapi.dev/mistral-dev/mistral-dev/mistral-openapi-google-cloud + registry: + location: registry.speakeasyapi.dev/mistral-dev/mistral-dev/mistral-openapi-google-cloud + mistral-openapi: + inputs: + - location: registry.speakeasyapi.dev/mistral-dev/mistral-dev/mistral-openapi + registry: + location: registry.speakeasyapi.dev/mistral-dev/mistral-dev/mistral-openapi +targets: + mistralai-azure-sdk: + target: python + source: mistral-azure-source + output: ./packages/mistralai_azure + publish: + pypi: + token: $pypi_token + mistralai-gcp-sdk: + target: python + source: mistral-google-cloud-source + output: ./packages/mistralai_gcp + publish: + pypi: + token: $pypi_token + mistralai-sdk: + target: python + source: mistral-openapi + publish: + pypi: + token: $pypi_token diff --git a/.vscode/settings.json b/.vscode/settings.json new file mode 100644 index 00000000..8d79f0ab --- /dev/null +++ b/.vscode/settings.json @@ -0,0 +1,6 @@ +{ + "python.testing.pytestArgs": ["tests", "-vv"], + "python.testing.unittestEnabled": false, + "python.testing.pytestEnabled": true, + "pylint.args": ["--rcfile=pylintrc"] +} diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md new file mode 100644 index 00000000..d585717f --- /dev/null +++ b/CONTRIBUTING.md @@ -0,0 +1,26 @@ +# Contributing to This Repository + +Thank you for your interest in contributing to this repository. Please note that this repository contains generated code. As such, we do not accept direct changes or pull requests. Instead, we encourage you to follow the guidelines below to report issues and suggest improvements. + +## How to Report Issues + +If you encounter any bugs or have suggestions for improvements, please open an issue on GitHub. When reporting an issue, please provide as much detail as possible to help us reproduce the problem. This includes: + +- A clear and descriptive title +- Steps to reproduce the issue +- Expected and actual behavior +- Any relevant logs, screenshots, or error messages +- Information about your environment (e.g., operating system, software versions) + - For example can be collected using the `npx envinfo` command from your terminal if you have Node.js installed + +## Issue Triage and Upstream Fixes + +We will review and triage issues as quickly as possible. Our goal is to address bugs and incorporate improvements in the upstream source code. Fixes will be included in the next generation of the generated code. + +## Contact + +If you have any questions or need further assistance, please feel free to reach out by opening an issue. + +Thank you for your understanding and cooperation! + +The Maintainers diff --git a/LICENSE b/LICENSE index 261eeb9e..bec12768 100644 --- a/LICENSE +++ b/LICENSE @@ -186,7 +186,7 @@ same "printed page" as the copyright notice for easier identification within third-party archives. - Copyright [yyyy] [name of copyright owner] + Copyright 2024 Mistral AI Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/MIGRATION.md b/MIGRATION.md new file mode 100644 index 00000000..6582b85b --- /dev/null +++ b/MIGRATION.md @@ -0,0 +1,216 @@ + +# Migration Guide for MistralAI Client from 0.\*.\* to 1.0.0 + +We have made significant changes to the `mistralai` library to improve its usability and consistency. This guide will help you migrate your code from the old client to the new one. + +## Major Changes + +1. **Unified Client Class**: + - The `MistralClient` and `MistralAsyncClient` classes have been consolidated into a single `Mistral` class. + - This simplifies the API by providing a single entry point for both synchronous and asynchronous operations. + +2. **Method Names and Structure**: + - The method names and structure have been updated for better clarity and consistency. + - For example: + - `client.chat` is now `client.chat.complete` for non-streaming calls + - `client.chat_stream` is now `client.chat.stream` for streaming calls + - Async `client.chat` is now `client.chat.complete_async` for async non-streaming calls + - Async `client.chat_stream` is now `client.chat.stream_async` for async streaming calls + + +## Method changes + +### Sync + +| Old Methods | New Methods | +| -------------------------- | -------------------------------- | +| `MistralCLient` | `Mistral` | +| `client.chat` | `client.chat.complete` | +| `client.chat_stream` | `client.chat.stream` | +| `client.completions` | `client.fim.complete` | +| `client.completions_strem` | `client.fim.stream` | +| `client.embeddings` | `client.embeddings.create` | +| `client.list_models` | `client.models.list` | +| `client.delete_model` | `client.models.delete` | +| `client.files.create` | `client.files.upload` | +| `client.files.list` | `client.files.list` | +| `client.files.retrieve` | `client.files.retrieve` | +| `client.files.delete` | `client.files.delete` | +| `client.jobs.create` | `client.fine_tuning.jobs.create` | +| `client.jobs.list` | `client.fine_tuning.jobs.list` | +| `client.jobs.retrieve` | `client.fine_tuning.jobs.get` | +| `client.jobs.cancel` | `client.fine_tuning.jobs.cancel` | + +### Async + +| Old Methods | New Methods | +| -------------------------------- | -------------------------------------- | +| `MistralAsyncClient` | `Mistral` | +| `async_client.chat` | `client.chat.complete_async` | +| `async_client.chat_stream` | `client.chat.stream_async` | +| `async_client.completions` | `client.fim.complete_async` | +| `async_client.completions_strem` | `client.fim.stream_async` | +| `async_client.embeddings` | `client.embeddings.create_async` | +| `async_client.list_models` | `client.models.list_async` | +| `async_client.delete_model` | `client.models.delete_async` | +| `async_client.files.create` | `client.files.upload_async` | +| `async_client.files.list` | `client.files.list_async` | +| `async_client.files.retrieve` | `client.files.retrieve_async` | +| `async_client.files.delete` | `client.files.delete_async` | +| `async_client.jobs.create` | `client.fine_tuning.jobs.create_async` | +| `async_client.jobs.list` | `client.fine_tuning.jobs.list_async` | +| `async_client.jobs.retrieve` | `client.fine_tuning.jobs.get_async` | +| `async_client.jobs.cancel` | `client.fine_tuning.jobs.cancel_async` | + +### Message Changes + +The `ChatMessage` class has been replaced with a more flexible system. You can now use the `SystemMessage`, `UserMessage`, `AssistantMessage`, and `ToolMessage` classes to create messages. + +The return object of the stream call methods have been modified to `chunk.data.choices[0].delta.content` from `chunk.choices[0].delta.content`. + +## Example Migrations + +### Example 1: Non-Streaming Chat + +**Old:** +```python +from mistralai.client import MistralClient +from mistralai.models.chat_completion import ChatMessage + +api_key = os.environ["MISTRAL_API_KEY"] +model = "mistral-large-latest" + +client = MistralClient(api_key=api_key) + +messages = [ + ChatMessage(role="user", content="What is the best French cheese?") +] + +# No streaming +chat_response = client.chat( + model=model, + messages=messages, +) + +print(chat_response.choices[0].message.content) +``` + +**New:** + +```python +import os +from mistralai import Mistral, UserMessage + +api_key = os.environ["MISTRAL_API_KEY"] +model = "mistral-large-latest" + +client = Mistral(api_key=api_key) + + messages = [ + { + "role": "user", + "content": "What is the best French cheese?", + }, + ] + +chat_response = client.chat.complete( + model = model, + messages = messages, +) + +print(chat_response.choices[0].message.content) +``` + +### Example 2: Streaming Chat + +**Old:** + +```python +from mistralai.client import MistralClient +from mistralai.models.chat_completion import ChatMessage + +api_key = os.environ["MISTRAL_API_KEY"] +model = "mistral-large-latest" + +client = MistralClient(api_key=api_key) + +messages = [ + ChatMessage(role="user", content="What is the best French cheese?") +] + +# With streaming +stream_response = client.chat_stream(model=model, messages=messages) + +for chunk in stream_response: + print(chunk.choices[0].delta.content) +``` +**New:** +```python +from mistralai import Mistral, UserMessage + +api_key = os.environ["MISTRAL_API_KEY"] +model = "mistral-large-latest" + +client = Mistral(api_key=api_key) + +messages = [ + { + "role": "user", + "content": "What is the best French cheese?", + }, +] + +stream_response = client.chat.stream_async( + model = model, + messages = messages, +) + +for chunk in stream_response: + print(chunk.data.choices[0].delta.content) +``` + +### Example 3: Async + +**Old:** +```python +from mistralai.async_client import MistralAsyncClient +from mistralai.models.chat_completion import ChatMessage + +api_key = os.environ["MISTRAL_API_KEY"] +model = "mistral-large-latest" + +client = MistralAsyncClient(api_key=api_key) + +messages = [ + ChatMessage(role="user", content="What is the best French cheese?") +] + +# With async +async_response = client.chat_stream(model=model, messages=messages) + +async for chunk in async_response: + print(chunk.choices[0].delta.content) +``` + +**New:** +```python +from mistralai import Mistral, UserMessage + +api_key = os.environ["MISTRAL_API_KEY"] +model = "mistral-large-latest" + +client = Mistral(api_key=api_key) + +messages = [ + { + "role": "user", + "content": "What is the best French cheese?", + }, +] + +# With async +async_response = await client.chat.stream_async(model=model, messages=messages) + +async for chunk in async_response: + print(chunk.data.choices[0].delta.content) +``` diff --git a/Makefile b/Makefile deleted file mode 100644 index 188291f6..00000000 --- a/Makefile +++ /dev/null @@ -1,6 +0,0 @@ -.PHONY: lint - -lint: - poetry run ruff check --fix . - poetry run ruff format . - poetry run mypy . diff --git a/OLD-README.md b/OLD-README.md new file mode 100644 index 00000000..22967f91 --- /dev/null +++ b/OLD-README.md @@ -0,0 +1,62 @@ +# Mistral Python Client + +This client is inspired from [cohere-python](https://github.com/cohere-ai/cohere-python) + +You can use the Mistral Python client to interact with the Mistral AI API. + +## Installing + +```bash +pip install mistralai +``` + +### From Source + +This client uses `poetry` as a dependency and virtual environment manager. + +You can install poetry with + +```bash +pip install poetry +``` + +`poetry` will set up a virtual environment and install dependencies with the following command: + +```bash +poetry install +``` + +## Run examples + +You can run the examples in the `examples/` directory using `poetry run` or by entering the virtual environment using `poetry shell`. + +### API Key Setup + +Running the examples requires a Mistral AI API key. + +1. Get your own Mistral API Key: +2. Set your Mistral API Key as an environment variable. You only need to do this once. + +```bash +# set Mistral API Key (using zsh for example) +$ echo 'export MISTRAL_API_KEY=[your_key_here]' >> ~/.zshenv + +# reload the environment (or just quit and open a new terminal) +$ source ~/.zshenv +``` + +### Using poetry run + +```bash +cd examples +poetry run python chat_no_streaming.py +``` + +### Using poetry shell + +```bash +poetry shell +cd examples + +>> python chat_no_streaming.py +``` diff --git a/README.md b/README.md index 22967f91..d207a895 100644 --- a/README.md +++ b/README.md @@ -1,62 +1,672 @@ # Mistral Python Client -This client is inspired from [cohere-python](https://github.com/cohere-ai/cohere-python) +## Migration warning + +This documentation is for Mistral AI SDK v1. You can find more details on how to migrate from v0 to v1 [here](MIGRATION.md) -You can use the Mistral Python client to interact with the Mistral AI API. +## API Key Setup -## Installing +Before you begin, you will need a Mistral AI API key. + +1. Get your own Mistral API Key: +2. Set your Mistral API Key as an environment variable. You only need to do this once. +```bash +# set Mistral API Key (using zsh for example) +$ echo 'export MISTRAL_API_KEY=[your_key_here]' >> ~/.zshenv + +# reload the environment (or just quit and open a new terminal) +$ source ~/.zshenv +``` + + +## SDK Installation + +PIP ```bash pip install mistralai ``` -### From Source +Poetry +```bash +poetry add mistralai +``` + -This client uses `poetry` as a dependency and virtual environment manager. + +## SDK Example Usage -You can install poetry with +### Create Chat Completions -```bash -pip install poetry +This example shows how to create chat completions. + +```python +# Synchronous Example +from mistralai import Mistral +import os + +s = Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) + + +res = s.chat.complete(model="mistral-small-latest", messages=[ + { + "content": "Who is the best French painter? Answer in one short sentence.", + "role": "user", + }, +]) + +if res is not None: + # handle response + pass ``` -`poetry` will set up a virtual environment and install dependencies with the following command: +
-```bash -poetry install +The same SDK client can also be used to make asychronous requests by importing asyncio. +```python +# Asynchronous Example +import asyncio +from mistralai import Mistral +import os + +async def main(): + s = Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), + ) + res = await s.chat.complete_async(model="mistral-small-latest", messages=[ + { + "content": "Who is the best French painter? Answer in one short sentence.", + "role": "user", + }, + ]) + if res is not None: + # handle response + pass + +asyncio.run(main()) +``` + +### Upload a file + +This example shows how to upload a file. + +```python +# Synchronous Example +from mistralai import Mistral +import os + +s = Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) + + +res = s.files.upload(file={ + "file_name": "your_file_here", + "content": open("", "rb"), +}) + +if res is not None: + # handle response + pass +``` + +
+ +The same SDK client can also be used to make asychronous requests by importing asyncio. +```python +# Asynchronous Example +import asyncio +from mistralai import Mistral +import os + +async def main(): + s = Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), + ) + res = await s.files.upload_async(file={ + "file_name": "your_file_here", + "content": open("", "rb"), + }) + if res is not None: + # handle response + pass + +asyncio.run(main()) +``` + +### Create Agents Completions + +This example shows how to create agents completions. + +```python +# Synchronous Example +from mistralai import Mistral +import os + +s = Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) + + +res = s.agents.complete(messages=[ + { + "content": "Who is the best French painter? Answer in one short sentence.", + "role": "user", + }, +], agent_id="") + +if res is not None: + # handle response + pass +``` + +
+ +The same SDK client can also be used to make asychronous requests by importing asyncio. +```python +# Asynchronous Example +import asyncio +from mistralai import Mistral +import os + +async def main(): + s = Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), + ) + res = await s.agents.complete_async(messages=[ + { + "content": "Who is the best French painter? Answer in one short sentence.", + "role": "user", + }, + ], agent_id="") + if res is not None: + # handle response + pass + +asyncio.run(main()) ``` + -## Run examples + +### More examples You can run the examples in the `examples/` directory using `poetry run` or by entering the virtual environment using `poetry shell`. -### API Key Setup -Running the examples requires a Mistral AI API key. +## Providers' SDKs Example Usage -1. Get your own Mistral API Key: -2. Set your Mistral API Key as an environment variable. You only need to do this once. +### Azure AI -```bash -# set Mistral API Key (using zsh for example) -$ echo 'export MISTRAL_API_KEY=[your_key_here]' >> ~/.zshenv +**Prerequisites** -# reload the environment (or just quit and open a new terminal) -$ source ~/.zshenv +Before you begin, ensure you have `AZUREAI_ENDPOINT` and an `AZURE_API_KEY`. To obtain these, you will need to deploy Mistral on Azure AI. +See [instructions for deploying Mistral on Azure AI here](https://docs.mistral.ai/deployment/cloud/azure/). + +Here's a basic example to get you started. You can also run [the example in the `examples` directory](/examples/azure). + +```python +import asyncio +import os + +from mistralai_azure import MistralAzure + +client = MistralAzure( + azure_api_key=os.getenv("AZURE_API_KEY", ""), + azure_endpoint=os.getenv("AZURE_ENDPOINT", "") +) + +async def main() -> None: + res = await client.chat.complete_async( + max_tokens= 100, + temperature= 0.5, + messages= [ + { + "content": "Hello there!", + "role": "user" + } + ] + ) + print(res) + +asyncio.run(main()) ``` +The documentation for the Azure SDK is available [here](packages/mistralai_azure/README.md). + +### Google Cloud + + +**Prerequisites** + +Before you begin, you will need to create a Google Cloud project and enable the Mistral API. To do this, follow the instructions [here](https://docs.mistral.ai/deployment/cloud/vertex/). -### Using poetry run +To run this locally you will also need to ensure you are authenticated with Google Cloud. You can do this by running ```bash -cd examples -poetry run python chat_no_streaming.py +gcloud auth application-default login ``` -### Using poetry shell +**Step 1: Install** + +Install the extras dependencies specific to Google Cloud: ```bash -poetry shell -cd examples +pip install mistralai[gcp] +``` + +**Step 2: Example Usage** + +Here's a basic example to get you started. + +```python +import asyncio +from mistralai_gcp import MistralGoogleCloud + +client = MistralGoogleCloud() + + +async def main() -> None: + res = await client.chat.complete_async( + model= "mistral-small-2402", + messages= [ + { + "content": "Hello there!", + "role": "user" + } + ] + ) + print(res) + +asyncio.run(main()) +``` + +The documentation for the GCP SDK is available [here](packages/mistralai_gcp/README.md). + + + +## Available Resources and Operations + +### [models](docs/sdks/models/README.md) + +* [list](docs/sdks/models/README.md#list) - List Models +* [retrieve](docs/sdks/models/README.md#retrieve) - Retrieve Model +* [delete](docs/sdks/models/README.md#delete) - Delete Model +* [update](docs/sdks/models/README.md#update) - Update Fine Tuned Model +* [archive](docs/sdks/models/README.md#archive) - Archive Fine Tuned Model +* [unarchive](docs/sdks/models/README.md#unarchive) - Unarchive Fine Tuned Model + +### [files](docs/sdks/files/README.md) + +* [upload](docs/sdks/files/README.md#upload) - Upload File +* [list](docs/sdks/files/README.md#list) - List Files +* [retrieve](docs/sdks/files/README.md#retrieve) - Retrieve File +* [delete](docs/sdks/files/README.md#delete) - Delete File + + +### [fine_tuning.jobs](docs/sdks/jobs/README.md) + +* [list](docs/sdks/jobs/README.md#list) - Get Fine Tuning Jobs +* [create](docs/sdks/jobs/README.md#create) - Create Fine Tuning Job +* [get](docs/sdks/jobs/README.md#get) - Get Fine Tuning Job +* [cancel](docs/sdks/jobs/README.md#cancel) - Cancel Fine Tuning Job +* [start](docs/sdks/jobs/README.md#start) - Start Fine Tuning Job + +### [chat](docs/sdks/chat/README.md) + +* [complete](docs/sdks/chat/README.md#complete) - Chat Completion +* [stream](docs/sdks/chat/README.md#stream) - Stream chat completion + +### [fim](docs/sdks/fim/README.md) + +* [complete](docs/sdks/fim/README.md#complete) - Fim Completion +* [stream](docs/sdks/fim/README.md#stream) - Stream fim completion + +### [agents](docs/sdks/agents/README.md) + +* [complete](docs/sdks/agents/README.md#complete) - Chat Completion +* [stream](docs/sdks/agents/README.md#stream) - Stream Agents completion + +### [embeddings](docs/sdks/embeddings/README.md) + +* [create](docs/sdks/embeddings/README.md#create) - Embeddings + + + +## Server-sent event streaming + +[Server-sent events][mdn-sse] are used to stream content from certain +operations. These operations will expose the stream as [Generator][generator] that +can be consumed using a simple `for` loop. The loop will +terminate when the server no longer has any events to send and closes the +underlying connection. + +```python +from mistralai import Mistral +import os + +s = Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) + + +res = s.chat.stream(model="mistral-small-latest", messages=[ + { + "content": "Who is the best French painter? Answer in one short sentence.", + "role": "user", + }, +]) + +if res is not None: + for event in res: + # handle event + print(event, flush=True) + +``` + +[mdn-sse]: https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events +[generator]: https://wiki.python.org/moin/Generators + + + +## File uploads + +Certain SDK methods accept file objects as part of a request body or multi-part request. It is possible and typically recommended to upload files as a stream rather than reading the entire contents into memory. This avoids excessive memory consumption and potentially crashing with out-of-memory errors when working with very large files. The following example demonstrates how to attach a file stream to a request. + +> [!TIP] +> +> For endpoints that handle file uploads bytes arrays can also be used. However, using streams is recommended for large files. +> + +```python +from mistralai import Mistral +import os + +s = Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) + + +res = s.files.upload(file={ + "file_name": "your_file_here", + "content": open("", "rb"), +}) + +if res is not None: + # handle response + pass + +``` + + + +## Retries + +Some of the endpoints in this SDK support retries. If you use the SDK without any configuration, it will fall back to the default retry strategy provided by the API. However, the default retry strategy can be overridden on a per-operation basis, or across the entire SDK. + +To change the default retry strategy for a single API call, simply provide a `RetryConfig` object to the call: +```python +from mistral.utils import BackoffStrategy, RetryConfig +from mistralai import Mistral +import os + +s = Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) + + +res = s.models.list(, + RetryConfig("backoff", BackoffStrategy(1, 50, 1.1, 100), False)) + +if res is not None: + # handle response + pass ->> python chat_no_streaming.py ``` + +If you'd like to override the default retry strategy for all operations that support retries, you can use the `retry_config` optional parameter when initializing the SDK: +```python +from mistral.utils import BackoffStrategy, RetryConfig +from mistralai import Mistral +import os + +s = Mistral( + retry_config=RetryConfig("backoff", BackoffStrategy(1, 50, 1.1, 100), False), + api_key=os.getenv("MISTRAL_API_KEY", ""), +) + + +res = s.models.list() + +if res is not None: + # handle response + pass + +``` + + + +## Error Handling + +Handling errors in this SDK should largely match your expectations. All operations return a response object or raise an error. If Error objects are specified in your OpenAPI Spec, the SDK will raise the appropriate Error type. + +| Error Object | Status Code | Content Type | +| -------------------------- | ----------- | ---------------- | +| models.HTTPValidationError | 422 | application/json | +| models.SDKError | 4xx-5xx | */* | + +### Example + +```python +from mistralai import Mistral, models +import os + +s = Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) + +res = None +try: + res = s.models.list() + +except models.HTTPValidationError as e: + # handle exception + raise(e) +except models.SDKError as e: + # handle exception + raise(e) + +if res is not None: + # handle response + pass + +``` + + + +## Server Selection + +### Select Server by Name + +You can override the default server globally by passing a server name to the `server: str` optional parameter when initializing the SDK client instance. The selected server will then be used as the default on the operations that use it. This table lists the names associated with the available servers: + +| Name | Server | Variables | +| ------ | ------------------------ | --------- | +| `prod` | `https://api.mistral.ai` | None | + +#### Example + +```python +from mistralai import Mistral +import os + +s = Mistral( + server="prod", + api_key=os.getenv("MISTRAL_API_KEY", ""), +) + + +res = s.models.list() + +if res is not None: + # handle response + pass + +``` + + +### Override Server URL Per-Client + +The default server can also be overridden globally by passing a URL to the `server_url: str` optional parameter when initializing the SDK client instance. For example: +```python +from mistralai import Mistral +import os + +s = Mistral( + server_url="https://api.mistral.ai", + api_key=os.getenv("MISTRAL_API_KEY", ""), +) + + +res = s.models.list() + +if res is not None: + # handle response + pass + +``` + + + +## Custom HTTP Client + +The Python SDK makes API calls using the [httpx](https://www.python-httpx.org/) HTTP library. In order to provide a convenient way to configure timeouts, cookies, proxies, custom headers, and other low-level configuration, you can initialize the SDK client with your own HTTP client instance. +Depending on whether you are using the sync or async version of the SDK, you can pass an instance of `HttpClient` or `AsyncHttpClient` respectively, which are Protocol's ensuring that the client has the necessary methods to make API calls. +This allows you to wrap the client with your own custom logic, such as adding custom headers, logging, or error handling, or you can just pass an instance of `httpx.Client` or `httpx.AsyncClient` directly. + +For example, you could specify a header for every request that this sdk makes as follows: +```python +from mistralai import Mistral +import httpx + +http_client = httpx.Client(headers={"x-custom-header": "someValue"}) +s = Mistral(client=http_client) +``` + +or you could wrap the client with your own custom logic: +```python +from mistralai import Mistral +from mistralai.httpclient import AsyncHttpClient +import httpx + +class CustomClient(AsyncHttpClient): + client: AsyncHttpClient + + def __init__(self, client: AsyncHttpClient): + self.client = client + + async def send( + self, + request: httpx.Request, + *, + stream: bool = False, + auth: Union[ + httpx._types.AuthTypes, httpx._client.UseClientDefault, None + ] = httpx.USE_CLIENT_DEFAULT, + follow_redirects: Union[ + bool, httpx._client.UseClientDefault + ] = httpx.USE_CLIENT_DEFAULT, + ) -> httpx.Response: + request.headers["Client-Level-Header"] = "added by client" + + return await self.client.send( + request, stream=stream, auth=auth, follow_redirects=follow_redirects + ) + + def build_request( + self, + method: str, + url: httpx._types.URLTypes, + *, + content: Optional[httpx._types.RequestContent] = None, + data: Optional[httpx._types.RequestData] = None, + files: Optional[httpx._types.RequestFiles] = None, + json: Optional[Any] = None, + params: Optional[httpx._types.QueryParamTypes] = None, + headers: Optional[httpx._types.HeaderTypes] = None, + cookies: Optional[httpx._types.CookieTypes] = None, + timeout: Union[ + httpx._types.TimeoutTypes, httpx._client.UseClientDefault + ] = httpx.USE_CLIENT_DEFAULT, + extensions: Optional[httpx._types.RequestExtensions] = None, + ) -> httpx.Request: + return self.client.build_request( + method, + url, + content=content, + data=data, + files=files, + json=json, + params=params, + headers=headers, + cookies=cookies, + timeout=timeout, + extensions=extensions, + ) + +s = Mistral(async_client=CustomClient(httpx.AsyncClient())) +``` + + + +## Authentication + +### Per-Client Security Schemes + +This SDK supports the following security scheme globally: + +| Name | Type | Scheme | Environment Variable | +| --------- | ---- | ----------- | -------------------- | +| `api_key` | http | HTTP Bearer | `MISTRAL_API_KEY` | + +To authenticate with the API the `null` parameter must be set when initializing the SDK client instance. For example: +```python +from mistralai import Mistral +import os + +s = Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) + + +res = s.models.list() + +if res is not None: + # handle response + pass + +``` + + + +## Debugging + +To emit debug logs for SDK requests and responses you can pass a logger object directly into your SDK object. + +```python +from mistralai import Mistral +import logging + +logging.basicConfig(level=logging.DEBUG) +s = Mistral(debug_logger=logging.getLogger("mistralai")) +``` + + + + +# Development + +## Contributions + +While we value open-source contributions to this SDK, this library is generated programmatically. Any manual changes added to internal files will be overwritten on the next generation. +We look forward to hearing your feedback. Feel free to open a PR or an issue with a proof of concept and we'll do our best to include it in a future release. diff --git a/USAGE.md b/USAGE.md new file mode 100644 index 00000000..aace195c --- /dev/null +++ b/USAGE.md @@ -0,0 +1,153 @@ + +### Create Chat Completions + +This example shows how to create chat completions. + +```python +# Synchronous Example +from mistralai import Mistral +import os + +s = Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) + + +res = s.chat.complete(model="mistral-small-latest", messages=[ + { + "content": "Who is the best French painter? Answer in one short sentence.", + "role": "user", + }, +]) + +if res is not None: + # handle response + pass +``` + +
+ +The same SDK client can also be used to make asychronous requests by importing asyncio. +```python +# Asynchronous Example +import asyncio +from mistralai import Mistral +import os + +async def main(): + s = Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), + ) + res = await s.chat.complete_async(model="mistral-small-latest", messages=[ + { + "content": "Who is the best French painter? Answer in one short sentence.", + "role": "user", + }, + ]) + if res is not None: + # handle response + pass + +asyncio.run(main()) +``` + +### Upload a file + +This example shows how to upload a file. + +```python +# Synchronous Example +from mistralai import Mistral +import os + +s = Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) + + +res = s.files.upload(file={ + "file_name": "your_file_here", + "content": open("", "rb"), +}) + +if res is not None: + # handle response + pass +``` + +
+ +The same SDK client can also be used to make asychronous requests by importing asyncio. +```python +# Asynchronous Example +import asyncio +from mistralai import Mistral +import os + +async def main(): + s = Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), + ) + res = await s.files.upload_async(file={ + "file_name": "your_file_here", + "content": open("", "rb"), + }) + if res is not None: + # handle response + pass + +asyncio.run(main()) +``` + +### Create Agents Completions + +This example shows how to create agents completions. + +```python +# Synchronous Example +from mistralai import Mistral +import os + +s = Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) + + +res = s.agents.complete(messages=[ + { + "content": "Who is the best French painter? Answer in one short sentence.", + "role": "user", + }, +], agent_id="") + +if res is not None: + # handle response + pass +``` + +
+ +The same SDK client can also be used to make asychronous requests by importing asyncio. +```python +# Asynchronous Example +import asyncio +from mistralai import Mistral +import os + +async def main(): + s = Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), + ) + res = await s.agents.complete_async(messages=[ + { + "content": "Who is the best French painter? Answer in one short sentence.", + "role": "user", + }, + ], agent_id="") + if res is not None: + # handle response + pass + +asyncio.run(main()) +``` + \ No newline at end of file diff --git a/docs/models/agentscompletionrequest.md b/docs/models/agentscompletionrequest.md new file mode 100644 index 00000000..2d0d6721 --- /dev/null +++ b/docs/models/agentscompletionrequest.md @@ -0,0 +1,17 @@ +# AgentsCompletionRequest + + +## Fields + +| Field | Type | Required | Description | Example | +| ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `messages` | List[[models.AgentsCompletionRequestMessages](../models/agentscompletionrequestmessages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | {
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
} | +| `agent_id` | *str* | :heavy_check_mark: | The ID of the agent to use for this completion. | | +| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | +| `min_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The minimum number of tokens to generate in the completion. | | +| `stream` | *Optional[bool]* | :heavy_minus_sign: | Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. | | +| `stop` | [Optional[models.AgentsCompletionRequestStop]](../models/agentscompletionrequeststop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | +| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | +| `response_format` | [Optional[models.ResponseFormat]](../models/responseformat.md) | :heavy_minus_sign: | N/A | | +| `tools` | List[[models.Tool](../models/tool.md)] | :heavy_minus_sign: | N/A | | +| `tool_choice` | [Optional[models.AgentsCompletionRequestToolChoice]](../models/agentscompletionrequesttoolchoice.md) | :heavy_minus_sign: | N/A | | \ No newline at end of file diff --git a/docs/models/agentscompletionrequestmessages.md b/docs/models/agentscompletionrequestmessages.md new file mode 100644 index 00000000..946ef460 --- /dev/null +++ b/docs/models/agentscompletionrequestmessages.md @@ -0,0 +1,23 @@ +# AgentsCompletionRequestMessages + + +## Supported Types + +### `models.AssistantMessage` + +```python +value: models.AssistantMessage = /* values here */ +``` + +### `models.ToolMessage` + +```python +value: models.ToolMessage = /* values here */ +``` + +### `models.UserMessage` + +```python +value: models.UserMessage = /* values here */ +``` + diff --git a/docs/models/agentscompletionrequeststop.md b/docs/models/agentscompletionrequeststop.md new file mode 100644 index 00000000..21ce6fb5 --- /dev/null +++ b/docs/models/agentscompletionrequeststop.md @@ -0,0 +1,19 @@ +# AgentsCompletionRequestStop + +Stop generation if this token is detected. Or if one of these tokens is detected when providing an array + + +## Supported Types + +### `str` + +```python +value: str = /* values here */ +``` + +### `List[str]` + +```python +value: List[str] = /* values here */ +``` + diff --git a/docs/models/agentscompletionrequesttoolchoice.md b/docs/models/agentscompletionrequesttoolchoice.md new file mode 100644 index 00000000..4d58fb77 --- /dev/null +++ b/docs/models/agentscompletionrequesttoolchoice.md @@ -0,0 +1,10 @@ +# AgentsCompletionRequestToolChoice + + +## Values + +| Name | Value | +| ------ | ------ | +| `AUTO` | auto | +| `NONE` | none | +| `ANY` | any | \ No newline at end of file diff --git a/docs/models/agentscompletionstreamrequest.md b/docs/models/agentscompletionstreamrequest.md new file mode 100644 index 00000000..c3187749 --- /dev/null +++ b/docs/models/agentscompletionstreamrequest.md @@ -0,0 +1,17 @@ +# AgentsCompletionStreamRequest + + +## Fields + +| Field | Type | Required | Description | Example | +| ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `model` | *Nullable[str]* | :heavy_check_mark: | ID of the model to use. Only compatible for now with:
- `codestral-2405`
- `codestral-latest` | codestral-2405 | +| `prompt` | *str* | :heavy_check_mark: | The text/code to complete. | def | +| `temperature` | *Optional[float]* | :heavy_minus_sign: | What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. | | +| `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | +| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | +| `min_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The minimum number of tokens to generate in the completion. | | +| `stream` | *Optional[bool]* | :heavy_minus_sign: | N/A | | +| `stop` | [Optional[models.AgentsCompletionStreamRequestStop]](../models/agentscompletionstreamrequeststop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | +| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | +| `suffix` | *OptionalNullable[str]* | :heavy_minus_sign: | Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`. | return a+b | \ No newline at end of file diff --git a/docs/models/agentscompletionstreamrequeststop.md b/docs/models/agentscompletionstreamrequeststop.md new file mode 100644 index 00000000..981005f3 --- /dev/null +++ b/docs/models/agentscompletionstreamrequeststop.md @@ -0,0 +1,19 @@ +# AgentsCompletionStreamRequestStop + +Stop generation if this token is detected. Or if one of these tokens is detected when providing an array + + +## Supported Types + +### `str` + +```python +value: str = /* values here */ +``` + +### `List[str]` + +```python +value: List[str] = /* values here */ +``` + diff --git a/docs/models/archiveftmodelout.md b/docs/models/archiveftmodelout.md new file mode 100644 index 00000000..c2e8f8ef --- /dev/null +++ b/docs/models/archiveftmodelout.md @@ -0,0 +1,10 @@ +# ArchiveFTModelOut + + +## Fields + +| Field | Type | Required | Description | +| ------------------ | ------------------ | ------------------ | ------------------ | +| `id` | *str* | :heavy_check_mark: | N/A | +| `object` | *Optional[str]* | :heavy_minus_sign: | N/A | +| `archived` | *Optional[bool]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/arguments.md b/docs/models/arguments.md new file mode 100644 index 00000000..2e54e27e --- /dev/null +++ b/docs/models/arguments.md @@ -0,0 +1,17 @@ +# Arguments + + +## Supported Types + +### `Dict[str, Any]` + +```python +value: Dict[str, Any] = /* values here */ +``` + +### `str` + +```python +value: str = /* values here */ +``` + diff --git a/docs/models/assistantmessage.md b/docs/models/assistantmessage.md new file mode 100644 index 00000000..0c36cde9 --- /dev/null +++ b/docs/models/assistantmessage.md @@ -0,0 +1,11 @@ +# AssistantMessage + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| `content` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `tool_calls` | List[[models.ToolCall](../models/toolcall.md)] | :heavy_minus_sign: | N/A | +| `prefix` | *Optional[bool]* | :heavy_minus_sign: | Set this to `true` when adding an assistant message as prefix to condition the model response. The role of the prefix message is to force the model to start its answer by the content of the message. | +| `role` | [Optional[models.AssistantMessageRole]](../models/assistantmessagerole.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/assistantmessagerole.md b/docs/models/assistantmessagerole.md new file mode 100644 index 00000000..658229e7 --- /dev/null +++ b/docs/models/assistantmessagerole.md @@ -0,0 +1,8 @@ +# AssistantMessageRole + + +## Values + +| Name | Value | +| ----------- | ----------- | +| `ASSISTANT` | assistant | \ No newline at end of file diff --git a/docs/models/chatcompletionchoice.md b/docs/models/chatcompletionchoice.md new file mode 100644 index 00000000..c916fc06 --- /dev/null +++ b/docs/models/chatcompletionchoice.md @@ -0,0 +1,10 @@ +# ChatCompletionChoice + + +## Fields + +| Field | Type | Required | Description | Example | +| ------------------------------------------------------------------ | ------------------------------------------------------------------ | ------------------------------------------------------------------ | ------------------------------------------------------------------ | ------------------------------------------------------------------ | +| `index` | *int* | :heavy_check_mark: | N/A | 0 | +| `finish_reason` | [models.FinishReason](../models/finishreason.md) | :heavy_check_mark: | N/A | stop | +| `message` | [Optional[models.AssistantMessage]](../models/assistantmessage.md) | :heavy_minus_sign: | N/A | | \ No newline at end of file diff --git a/docs/models/chatcompletionrequest.md b/docs/models/chatcompletionrequest.md new file mode 100644 index 00000000..cfb3596a --- /dev/null +++ b/docs/models/chatcompletionrequest.md @@ -0,0 +1,20 @@ +# ChatCompletionRequest + + +## Fields + +| Field | Type | Required | Description | Example | +| ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `model` | *Nullable[str]* | :heavy_check_mark: | ID of the model to use. You can use the [List Available Models](/api#operation/listModels) API to see all of your available models, or see our [Model overview](/models) for model descriptions. | mistral-small-latest | +| `messages` | List[[models.Messages](../models/messages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | {
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
} | +| `temperature` | *Optional[float]* | :heavy_minus_sign: | What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. | | +| `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | +| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | +| `min_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The minimum number of tokens to generate in the completion. | | +| `stream` | *Optional[bool]* | :heavy_minus_sign: | Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. | | +| `stop` | [Optional[models.Stop]](../models/stop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | +| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | +| `response_format` | [Optional[models.ResponseFormat]](../models/responseformat.md) | :heavy_minus_sign: | N/A | | +| `tools` | List[[models.Tool](../models/tool.md)] | :heavy_minus_sign: | N/A | | +| `tool_choice` | [Optional[models.ToolChoice]](../models/toolchoice.md) | :heavy_minus_sign: | N/A | | +| `safe_prompt` | *Optional[bool]* | :heavy_minus_sign: | Whether to inject a safety prompt before all conversations. | | \ No newline at end of file diff --git a/docs/models/chatcompletionresponse.md b/docs/models/chatcompletionresponse.md new file mode 100644 index 00000000..ad376158 --- /dev/null +++ b/docs/models/chatcompletionresponse.md @@ -0,0 +1,13 @@ +# ChatCompletionResponse + + +## Fields + +| Field | Type | Required | Description | Example | +| ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | +| `id` | *str* | :heavy_check_mark: | N/A | cmpl-e5cc70bb28c444948073e77776eb30ef | +| `object` | *str* | :heavy_check_mark: | N/A | chat.completion | +| `model` | *str* | :heavy_check_mark: | N/A | mistral-small-latest | +| `usage` | [models.UsageInfo](../models/usageinfo.md) | :heavy_check_mark: | N/A | | +| `created` | *Optional[int]* | :heavy_minus_sign: | N/A | 1702256327 | +| `choices` | List[[models.ChatCompletionChoice](../models/chatcompletionchoice.md)] | :heavy_minus_sign: | N/A | | \ No newline at end of file diff --git a/docs/models/chatcompletionstreamrequest.md b/docs/models/chatcompletionstreamrequest.md new file mode 100644 index 00000000..8c3a0bab --- /dev/null +++ b/docs/models/chatcompletionstreamrequest.md @@ -0,0 +1,20 @@ +# ChatCompletionStreamRequest + + +## Fields + +| Field | Type | Required | Description | Example | +| ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `model` | *Nullable[str]* | :heavy_check_mark: | ID of the model to use. You can use the [List Available Models](/api#operation/listModels) API to see all of your available models, or see our [Model overview](/models) for model descriptions. | mistral-small-latest | +| `messages` | List[[models.ChatCompletionStreamRequestMessages](../models/chatcompletionstreamrequestmessages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | {
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
} | +| `temperature` | *Optional[float]* | :heavy_minus_sign: | What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. | | +| `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | +| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | +| `min_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The minimum number of tokens to generate in the completion. | | +| `stream` | *Optional[bool]* | :heavy_minus_sign: | N/A | | +| `stop` | [Optional[models.ChatCompletionStreamRequestStop]](../models/chatcompletionstreamrequeststop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | +| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | +| `response_format` | [Optional[models.ResponseFormat]](../models/responseformat.md) | :heavy_minus_sign: | N/A | | +| `tools` | List[[models.Tool](../models/tool.md)] | :heavy_minus_sign: | N/A | | +| `tool_choice` | [Optional[models.ChatCompletionStreamRequestToolChoice]](../models/chatcompletionstreamrequesttoolchoice.md) | :heavy_minus_sign: | N/A | | +| `safe_prompt` | *Optional[bool]* | :heavy_minus_sign: | Whether to inject a safety prompt before all conversations. | | \ No newline at end of file diff --git a/docs/models/chatcompletionstreamrequestmessages.md b/docs/models/chatcompletionstreamrequestmessages.md new file mode 100644 index 00000000..47990611 --- /dev/null +++ b/docs/models/chatcompletionstreamrequestmessages.md @@ -0,0 +1,29 @@ +# ChatCompletionStreamRequestMessages + + +## Supported Types + +### `models.AssistantMessage` + +```python +value: models.AssistantMessage = /* values here */ +``` + +### `models.SystemMessage` + +```python +value: models.SystemMessage = /* values here */ +``` + +### `models.ToolMessage` + +```python +value: models.ToolMessage = /* values here */ +``` + +### `models.UserMessage` + +```python +value: models.UserMessage = /* values here */ +``` + diff --git a/docs/models/chatcompletionstreamrequeststop.md b/docs/models/chatcompletionstreamrequeststop.md new file mode 100644 index 00000000..a48460a9 --- /dev/null +++ b/docs/models/chatcompletionstreamrequeststop.md @@ -0,0 +1,19 @@ +# ChatCompletionStreamRequestStop + +Stop generation if this token is detected. Or if one of these tokens is detected when providing an array + + +## Supported Types + +### `str` + +```python +value: str = /* values here */ +``` + +### `List[str]` + +```python +value: List[str] = /* values here */ +``` + diff --git a/docs/models/chatcompletionstreamrequesttoolchoice.md b/docs/models/chatcompletionstreamrequesttoolchoice.md new file mode 100644 index 00000000..37a6e9bb --- /dev/null +++ b/docs/models/chatcompletionstreamrequesttoolchoice.md @@ -0,0 +1,10 @@ +# ChatCompletionStreamRequestToolChoice + + +## Values + +| Name | Value | +| ------ | ------ | +| `AUTO` | auto | +| `NONE` | none | +| `ANY` | any | \ No newline at end of file diff --git a/docs/models/checkpointout.md b/docs/models/checkpointout.md new file mode 100644 index 00000000..053592d2 --- /dev/null +++ b/docs/models/checkpointout.md @@ -0,0 +1,10 @@ +# CheckpointOut + + +## Fields + +| Field | Type | Required | Description | Example | +| ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `metrics` | [models.MetricOut](../models/metricout.md) | :heavy_check_mark: | Metrics at the step number during the fine-tuning job. Use these metrics to assess if the training is going smoothly (loss should decrease, token accuracy should increase). | | +| `step_number` | *int* | :heavy_check_mark: | The step number that the checkpoint was created at. | | +| `created_at` | *int* | :heavy_check_mark: | The UNIX timestamp (in seconds) for when the checkpoint was created. | 1716963433 | \ No newline at end of file diff --git a/docs/models/completionchunk.md b/docs/models/completionchunk.md new file mode 100644 index 00000000..b8ae6a09 --- /dev/null +++ b/docs/models/completionchunk.md @@ -0,0 +1,13 @@ +# CompletionChunk + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------ | +| `id` | *str* | :heavy_check_mark: | N/A | +| `model` | *str* | :heavy_check_mark: | N/A | +| `choices` | List[[models.CompletionResponseStreamChoice](../models/completionresponsestreamchoice.md)] | :heavy_check_mark: | N/A | +| `object` | *Optional[str]* | :heavy_minus_sign: | N/A | +| `created` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `usage` | [Optional[models.UsageInfo]](../models/usageinfo.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/completionevent.md b/docs/models/completionevent.md new file mode 100644 index 00000000..7a66e8fe --- /dev/null +++ b/docs/models/completionevent.md @@ -0,0 +1,8 @@ +# CompletionEvent + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------------------------ | ------------------------------------------------------ | ------------------------------------------------------ | ------------------------------------------------------ | +| `data` | [models.CompletionChunk](../models/completionchunk.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/completionresponsestreamchoice.md b/docs/models/completionresponsestreamchoice.md new file mode 100644 index 00000000..1532c25b --- /dev/null +++ b/docs/models/completionresponsestreamchoice.md @@ -0,0 +1,10 @@ +# CompletionResponseStreamChoice + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------- | +| `index` | *int* | :heavy_check_mark: | N/A | +| `delta` | [models.DeltaMessage](../models/deltamessage.md) | :heavy_check_mark: | N/A | +| `finish_reason` | [Nullable[models.CompletionResponseStreamChoiceFinishReason]](../models/completionresponsestreamchoicefinishreason.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/completionresponsestreamchoicefinishreason.md b/docs/models/completionresponsestreamchoicefinishreason.md new file mode 100644 index 00000000..0fece473 --- /dev/null +++ b/docs/models/completionresponsestreamchoicefinishreason.md @@ -0,0 +1,11 @@ +# CompletionResponseStreamChoiceFinishReason + + +## Values + +| Name | Value | +| ------------ | ------------ | +| `STOP` | stop | +| `LENGTH` | length | +| `ERROR` | error | +| `TOOL_CALLS` | tool_calls | \ No newline at end of file diff --git a/docs/models/content.md b/docs/models/content.md new file mode 100644 index 00000000..a833dc2c --- /dev/null +++ b/docs/models/content.md @@ -0,0 +1,17 @@ +# Content + + +## Supported Types + +### `str` + +```python +value: str = /* values here */ +``` + +### `List[models.ContentChunk]` + +```python +value: List[models.ContentChunk] = /* values here */ +``` + diff --git a/docs/models/contentchunk.md b/docs/models/contentchunk.md new file mode 100644 index 00000000..64fc80d6 --- /dev/null +++ b/docs/models/contentchunk.md @@ -0,0 +1,9 @@ +# ContentChunk + + +## Fields + +| Field | Type | Required | Description | +| ------------------ | ------------------ | ------------------ | ------------------ | +| `text` | *str* | :heavy_check_mark: | N/A | +| `type` | *Optional[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/deletefileout.md b/docs/models/deletefileout.md new file mode 100644 index 00000000..4709cc49 --- /dev/null +++ b/docs/models/deletefileout.md @@ -0,0 +1,10 @@ +# DeleteFileOut + + +## Fields + +| Field | Type | Required | Description | Example | +| ------------------------------------ | ------------------------------------ | ------------------------------------ | ------------------------------------ | ------------------------------------ | +| `id` | *str* | :heavy_check_mark: | The ID of the deleted file. | 497f6eca-6276-4993-bfeb-53cbbbba6f09 | +| `object` | *str* | :heavy_check_mark: | The object type that was deleted | file | +| `deleted` | *bool* | :heavy_check_mark: | The deletion status. | false | \ No newline at end of file diff --git a/docs/models/deletemodelout.md b/docs/models/deletemodelout.md new file mode 100644 index 00000000..5fd4df7a --- /dev/null +++ b/docs/models/deletemodelout.md @@ -0,0 +1,10 @@ +# DeleteModelOut + + +## Fields + +| Field | Type | Required | Description | Example | +| --------------------------------------------- | --------------------------------------------- | --------------------------------------------- | --------------------------------------------- | --------------------------------------------- | +| `id` | *str* | :heavy_check_mark: | The ID of the deleted model. | ft:open-mistral-7b:587a6b29:20240514:7e773925 | +| `object` | *Optional[str]* | :heavy_minus_sign: | The object type that was deleted | | +| `deleted` | *Optional[bool]* | :heavy_minus_sign: | The deletion status | true | \ No newline at end of file diff --git a/docs/models/deletemodelv1modelsmodeliddeleterequest.md b/docs/models/deletemodelv1modelsmodeliddeleterequest.md new file mode 100644 index 00000000..d9bc15fe --- /dev/null +++ b/docs/models/deletemodelv1modelsmodeliddeleterequest.md @@ -0,0 +1,8 @@ +# DeleteModelV1ModelsModelIDDeleteRequest + + +## Fields + +| Field | Type | Required | Description | Example | +| --------------------------------------------- | --------------------------------------------- | --------------------------------------------- | --------------------------------------------- | --------------------------------------------- | +| `model_id` | *str* | :heavy_check_mark: | The ID of the model to delete. | ft:open-mistral-7b:587a6b29:20240514:7e773925 | \ No newline at end of file diff --git a/docs/models/deltamessage.md b/docs/models/deltamessage.md new file mode 100644 index 00000000..4cb9e91e --- /dev/null +++ b/docs/models/deltamessage.md @@ -0,0 +1,10 @@ +# DeltaMessage + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------------- | ---------------------------------------------------------- | ---------------------------------------------------------- | ---------------------------------------------------------- | +| `role` | *Optional[str]* | :heavy_minus_sign: | N/A | +| `content` | *Optional[str]* | :heavy_minus_sign: | N/A | +| `tool_calls` | [OptionalNullable[models.ToolCall]](../models/toolcall.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/detailedjobout.md b/docs/models/detailedjobout.md new file mode 100644 index 00000000..f52d5cd2 --- /dev/null +++ b/docs/models/detailedjobout.md @@ -0,0 +1,26 @@ +# DetailedJobOut + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------- | +| `id` | *str* | :heavy_check_mark: | N/A | +| `auto_start` | *bool* | :heavy_check_mark: | N/A | +| `hyperparameters` | [models.TrainingParameters](../models/trainingparameters.md) | :heavy_check_mark: | N/A | +| `model` | [models.FineTuneableModel](../models/finetuneablemodel.md) | :heavy_check_mark: | The name of the model to fine-tune. | +| `status` | [models.DetailedJobOutStatus](../models/detailedjoboutstatus.md) | :heavy_check_mark: | N/A | +| `job_type` | *str* | :heavy_check_mark: | N/A | +| `created_at` | *int* | :heavy_check_mark: | N/A | +| `modified_at` | *int* | :heavy_check_mark: | N/A | +| `training_files` | List[*str*] | :heavy_check_mark: | N/A | +| `validation_files` | List[*str*] | :heavy_minus_sign: | N/A | +| `object` | *Optional[str]* | :heavy_minus_sign: | N/A | +| `fine_tuned_model` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `suffix` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `integrations` | List[[models.WandbIntegrationOut](../models/wandbintegrationout.md)] | :heavy_minus_sign: | N/A | +| `trained_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | +| `repositories` | List[[models.GithubRepositoryOut](../models/githubrepositoryout.md)] | :heavy_minus_sign: | N/A | +| `metadata` | [OptionalNullable[models.JobMetadataOut]](../models/jobmetadataout.md) | :heavy_minus_sign: | N/A | +| `events` | List[[models.EventOut](../models/eventout.md)] | :heavy_minus_sign: | Event items are created every time the status of a fine-tuning job changes. The timestamped list of all events is accessible here. | +| `checkpoints` | List[[models.CheckpointOut](../models/checkpointout.md)] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/detailedjoboutstatus.md b/docs/models/detailedjoboutstatus.md new file mode 100644 index 00000000..955d5a26 --- /dev/null +++ b/docs/models/detailedjoboutstatus.md @@ -0,0 +1,17 @@ +# DetailedJobOutStatus + + +## Values + +| Name | Value | +| ------------------------ | ------------------------ | +| `QUEUED` | QUEUED | +| `STARTED` | STARTED | +| `VALIDATING` | VALIDATING | +| `VALIDATED` | VALIDATED | +| `RUNNING` | RUNNING | +| `FAILED_VALIDATION` | FAILED_VALIDATION | +| `FAILED` | FAILED | +| `SUCCESS` | SUCCESS | +| `CANCELLED` | CANCELLED | +| `CANCELLATION_REQUESTED` | CANCELLATION_REQUESTED | \ No newline at end of file diff --git a/docs/models/embeddingrequest.md b/docs/models/embeddingrequest.md new file mode 100644 index 00000000..584a8bea --- /dev/null +++ b/docs/models/embeddingrequest.md @@ -0,0 +1,10 @@ +# EmbeddingRequest + + +## Fields + +| Field | Type | Required | Description | +| --------------------------------------- | --------------------------------------- | --------------------------------------- | --------------------------------------- | +| `inputs` | [models.Inputs](../models/inputs.md) | :heavy_check_mark: | Text to embed. | +| `model` | *str* | :heavy_check_mark: | ID of the model to use. | +| `encoding_format` | *OptionalNullable[str]* | :heavy_minus_sign: | The format to return the embeddings in. | \ No newline at end of file diff --git a/docs/models/embeddingresponse.md b/docs/models/embeddingresponse.md new file mode 100644 index 00000000..2bd85b4d --- /dev/null +++ b/docs/models/embeddingresponse.md @@ -0,0 +1,12 @@ +# EmbeddingResponse + + +## Fields + +| Field | Type | Required | Description | Example | +| ------------------------------------------------------------------------ | ------------------------------------------------------------------------ | ------------------------------------------------------------------------ | ------------------------------------------------------------------------ | ------------------------------------------------------------------------ | +| `id` | *str* | :heavy_check_mark: | N/A | cmpl-e5cc70bb28c444948073e77776eb30ef | +| `object` | *str* | :heavy_check_mark: | N/A | chat.completion | +| `model` | *str* | :heavy_check_mark: | N/A | mistral-small-latest | +| `usage` | [models.UsageInfo](../models/usageinfo.md) | :heavy_check_mark: | N/A | | +| `data` | List[[models.EmbeddingResponseData](../models/embeddingresponsedata.md)] | :heavy_check_mark: | N/A | | \ No newline at end of file diff --git a/docs/models/embeddingresponsedata.md b/docs/models/embeddingresponsedata.md new file mode 100644 index 00000000..20b50618 --- /dev/null +++ b/docs/models/embeddingresponsedata.md @@ -0,0 +1,10 @@ +# EmbeddingResponseData + + +## Fields + +| Field | Type | Required | Description | Example | +| ------------------ | ------------------ | ------------------ | ------------------ | ------------------ | +| `object` | *Optional[str]* | :heavy_minus_sign: | N/A | embedding | +| `embedding` | List[*float*] | :heavy_minus_sign: | N/A | [
0.1,
0.2,
0.3
] | +| `index` | *Optional[int]* | :heavy_minus_sign: | N/A | 0 | \ No newline at end of file diff --git a/docs/models/eventout.md b/docs/models/eventout.md new file mode 100644 index 00000000..c6f69ada --- /dev/null +++ b/docs/models/eventout.md @@ -0,0 +1,10 @@ +# EventOut + + +## Fields + +| Field | Type | Required | Description | +| --------------------------------------------- | --------------------------------------------- | --------------------------------------------- | --------------------------------------------- | +| `name` | *str* | :heavy_check_mark: | The name of the event. | +| `created_at` | *int* | :heavy_check_mark: | The UNIX timestamp (in seconds) of the event. | +| `data` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/file.md b/docs/models/file.md new file mode 100644 index 00000000..37cc418f --- /dev/null +++ b/docs/models/file.md @@ -0,0 +1,10 @@ +# File + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------- | -------------------------------------------- | -------------------------------------------- | -------------------------------------------- | +| `file_name` | *str* | :heavy_check_mark: | N/A | +| `content` | *Union[bytes, IO[bytes], io.BufferedReader]* | :heavy_check_mark: | N/A | +| `content_type` | *Optional[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/filesapiroutesdeletefilerequest.md b/docs/models/filesapiroutesdeletefilerequest.md new file mode 100644 index 00000000..1b02c2db --- /dev/null +++ b/docs/models/filesapiroutesdeletefilerequest.md @@ -0,0 +1,8 @@ +# FilesAPIRoutesDeleteFileRequest + + +## Fields + +| Field | Type | Required | Description | +| ------------------ | ------------------ | ------------------ | ------------------ | +| `file_id` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/filesapiroutesretrievefilerequest.md b/docs/models/filesapiroutesretrievefilerequest.md new file mode 100644 index 00000000..961bae1f --- /dev/null +++ b/docs/models/filesapiroutesretrievefilerequest.md @@ -0,0 +1,8 @@ +# FilesAPIRoutesRetrieveFileRequest + + +## Fields + +| Field | Type | Required | Description | +| ------------------ | ------------------ | ------------------ | ------------------ | +| `file_id` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/filesapiroutesuploadfilemultipartbodyparams.md b/docs/models/filesapiroutesuploadfilemultipartbodyparams.md new file mode 100644 index 00000000..1a6dfc6d --- /dev/null +++ b/docs/models/filesapiroutesuploadfilemultipartbodyparams.md @@ -0,0 +1,9 @@ +# FilesAPIRoutesUploadFileMultiPartBodyParams + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `file` | [models.File](../models/file.md) | :heavy_check_mark: | The File object (not file name) to be uploaded.
To upload a file and specify a custom file name you should format your request as such:
```bash
file=@path/to/your/file.jsonl;filename=custom_name.jsonl
```
Otherwise, you can just keep the original file name:
```bash
file=@path/to/your/file.jsonl
``` | +| `purpose` | *Optional[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/fileschema.md b/docs/models/fileschema.md new file mode 100644 index 00000000..fd3ec080 --- /dev/null +++ b/docs/models/fileschema.md @@ -0,0 +1,16 @@ +# FileSchema + + +## Fields + +| Field | Type | Required | Description | Example | +| ------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------ | +| `id` | *str* | :heavy_check_mark: | The unique identifier of the file. | 497f6eca-6276-4993-bfeb-53cbbbba6f09 | +| `object` | *str* | :heavy_check_mark: | The object type, which is always "file". | file | +| `bytes` | *int* | :heavy_check_mark: | The size of the file, in bytes. | 13000 | +| `created_at` | *int* | :heavy_check_mark: | The UNIX timestamp (in seconds) of the event. | 1716963433 | +| `filename` | *str* | :heavy_check_mark: | The name of the uploaded file. | files_upload.jsonl | +| `sample_type` | [models.SampleType](../models/sampletype.md) | :heavy_check_mark: | N/A | | +| `source` | [models.Source](../models/source.md) | :heavy_check_mark: | N/A | | +| `purpose` | *str* | :heavy_check_mark: | The intended purpose of the uploaded file. Only accepts fine-tuning (`fine-tune`) for now. | fine-tune | +| `num_lines` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | | \ No newline at end of file diff --git a/docs/models/fimcompletionrequest.md b/docs/models/fimcompletionrequest.md new file mode 100644 index 00000000..b4b024ed --- /dev/null +++ b/docs/models/fimcompletionrequest.md @@ -0,0 +1,17 @@ +# FIMCompletionRequest + + +## Fields + +| Field | Type | Required | Description | Example | +| ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `model` | *Nullable[str]* | :heavy_check_mark: | ID of the model to use. Only compatible for now with:
- `codestral-2405`
- `codestral-latest` | codestral-2405 | +| `prompt` | *str* | :heavy_check_mark: | The text/code to complete. | def | +| `temperature` | *Optional[float]* | :heavy_minus_sign: | What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. | | +| `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | +| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | +| `min_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The minimum number of tokens to generate in the completion. | | +| `stream` | *Optional[bool]* | :heavy_minus_sign: | Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. | | +| `stop` | [Optional[models.FIMCompletionRequestStop]](../models/fimcompletionrequeststop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | +| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | +| `suffix` | *OptionalNullable[str]* | :heavy_minus_sign: | Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`. | return a+b | \ No newline at end of file diff --git a/docs/models/fimcompletionrequeststop.md b/docs/models/fimcompletionrequeststop.md new file mode 100644 index 00000000..a0dbb00a --- /dev/null +++ b/docs/models/fimcompletionrequeststop.md @@ -0,0 +1,19 @@ +# FIMCompletionRequestStop + +Stop generation if this token is detected. Or if one of these tokens is detected when providing an array + + +## Supported Types + +### `str` + +```python +value: str = /* values here */ +``` + +### `List[str]` + +```python +value: List[str] = /* values here */ +``` + diff --git a/docs/models/fimcompletionresponse.md b/docs/models/fimcompletionresponse.md new file mode 100644 index 00000000..da786a1f --- /dev/null +++ b/docs/models/fimcompletionresponse.md @@ -0,0 +1,13 @@ +# FIMCompletionResponse + + +## Fields + +| Field | Type | Required | Description | Example | +| ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | +| `id` | *str* | :heavy_check_mark: | N/A | cmpl-e5cc70bb28c444948073e77776eb30ef | +| `object` | *str* | :heavy_check_mark: | N/A | chat.completion | +| `model` | *str* | :heavy_check_mark: | N/A | codestral-latest | +| `usage` | [models.UsageInfo](../models/usageinfo.md) | :heavy_check_mark: | N/A | | +| `created` | *Optional[int]* | :heavy_minus_sign: | N/A | 1702256327 | +| `choices` | List[[models.ChatCompletionChoice](../models/chatcompletionchoice.md)] | :heavy_minus_sign: | N/A | | \ No newline at end of file diff --git a/docs/models/fimcompletionstreamrequest.md b/docs/models/fimcompletionstreamrequest.md new file mode 100644 index 00000000..acffb536 --- /dev/null +++ b/docs/models/fimcompletionstreamrequest.md @@ -0,0 +1,17 @@ +# FIMCompletionStreamRequest + + +## Fields + +| Field | Type | Required | Description | Example | +| ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `model` | *Nullable[str]* | :heavy_check_mark: | ID of the model to use. Only compatible for now with:
- `codestral-2405`
- `codestral-latest` | codestral-2405 | +| `prompt` | *str* | :heavy_check_mark: | The text/code to complete. | def | +| `temperature` | *Optional[float]* | :heavy_minus_sign: | What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. | | +| `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | +| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | +| `min_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The minimum number of tokens to generate in the completion. | | +| `stream` | *Optional[bool]* | :heavy_minus_sign: | N/A | | +| `stop` | [Optional[models.FIMCompletionStreamRequestStop]](../models/fimcompletionstreamrequeststop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | +| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | +| `suffix` | *OptionalNullable[str]* | :heavy_minus_sign: | Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`. | return a+b | \ No newline at end of file diff --git a/docs/models/fimcompletionstreamrequeststop.md b/docs/models/fimcompletionstreamrequeststop.md new file mode 100644 index 00000000..5a9e2ff0 --- /dev/null +++ b/docs/models/fimcompletionstreamrequeststop.md @@ -0,0 +1,19 @@ +# FIMCompletionStreamRequestStop + +Stop generation if this token is detected. Or if one of these tokens is detected when providing an array + + +## Supported Types + +### `str` + +```python +value: str = /* values here */ +``` + +### `List[str]` + +```python +value: List[str] = /* values here */ +``` + diff --git a/docs/models/finetuneablemodel.md b/docs/models/finetuneablemodel.md new file mode 100644 index 00000000..cb429284 --- /dev/null +++ b/docs/models/finetuneablemodel.md @@ -0,0 +1,14 @@ +# FineTuneableModel + +The name of the model to fine-tune. + + +## Values + +| Name | Value | +| ---------------------- | ---------------------- | +| `OPEN_MISTRAL_7B` | open-mistral-7b | +| `MISTRAL_SMALL_LATEST` | mistral-small-latest | +| `CODESTRAL_LATEST` | codestral-latest | +| `MISTRAL_LARGE_LATEST` | mistral-large-latest | +| `OPEN_MISTRAL_NEMO` | open-mistral-nemo | \ No newline at end of file diff --git a/docs/models/finishreason.md b/docs/models/finishreason.md new file mode 100644 index 00000000..2af53f6e --- /dev/null +++ b/docs/models/finishreason.md @@ -0,0 +1,12 @@ +# FinishReason + + +## Values + +| Name | Value | +| -------------- | -------------- | +| `STOP` | stop | +| `LENGTH` | length | +| `MODEL_LENGTH` | model_length | +| `ERROR` | error | +| `TOOL_CALLS` | tool_calls | \ No newline at end of file diff --git a/docs/models/ftmodelcapabilitiesout.md b/docs/models/ftmodelcapabilitiesout.md new file mode 100644 index 00000000..3cb52377 --- /dev/null +++ b/docs/models/ftmodelcapabilitiesout.md @@ -0,0 +1,11 @@ +# FTModelCapabilitiesOut + + +## Fields + +| Field | Type | Required | Description | +| ------------------ | ------------------ | ------------------ | ------------------ | +| `completion_chat` | *Optional[bool]* | :heavy_minus_sign: | N/A | +| `completion_fim` | *Optional[bool]* | :heavy_minus_sign: | N/A | +| `function_calling` | *Optional[bool]* | :heavy_minus_sign: | N/A | +| `fine_tuning` | *Optional[bool]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/ftmodelout.md b/docs/models/ftmodelout.md new file mode 100644 index 00000000..8d081f6c --- /dev/null +++ b/docs/models/ftmodelout.md @@ -0,0 +1,19 @@ +# FTModelOut + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | +| `id` | *str* | :heavy_check_mark: | N/A | +| `created` | *int* | :heavy_check_mark: | N/A | +| `owned_by` | *str* | :heavy_check_mark: | N/A | +| `root` | *str* | :heavy_check_mark: | N/A | +| `archived` | *bool* | :heavy_check_mark: | N/A | +| `capabilities` | [models.FTModelCapabilitiesOut](../models/ftmodelcapabilitiesout.md) | :heavy_check_mark: | N/A | +| `job` | *str* | :heavy_check_mark: | N/A | +| `object` | *Optional[str]* | :heavy_minus_sign: | N/A | +| `name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `max_context_length` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `aliases` | List[*str*] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/function.md b/docs/models/function.md new file mode 100644 index 00000000..8af398f5 --- /dev/null +++ b/docs/models/function.md @@ -0,0 +1,10 @@ +# Function + + +## Fields + +| Field | Type | Required | Description | +| ------------------ | ------------------ | ------------------ | ------------------ | +| `name` | *str* | :heavy_check_mark: | N/A | +| `parameters` | Dict[str, *Any*] | :heavy_check_mark: | N/A | +| `description` | *Optional[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/functioncall.md b/docs/models/functioncall.md new file mode 100644 index 00000000..7ccd90dc --- /dev/null +++ b/docs/models/functioncall.md @@ -0,0 +1,9 @@ +# FunctionCall + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------------ | ------------------------------------------ | ------------------------------------------ | ------------------------------------------ | +| `name` | *str* | :heavy_check_mark: | N/A | +| `arguments` | [models.Arguments](../models/arguments.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/githubrepositoryin.md b/docs/models/githubrepositoryin.md new file mode 100644 index 00000000..1a6be96c --- /dev/null +++ b/docs/models/githubrepositoryin.md @@ -0,0 +1,13 @@ +# GithubRepositoryIn + + +## Fields + +| Field | Type | Required | Description | +| ----------------------- | ----------------------- | ----------------------- | ----------------------- | +| `name` | *str* | :heavy_check_mark: | N/A | +| `owner` | *str* | :heavy_check_mark: | N/A | +| `token` | *str* | :heavy_check_mark: | N/A | +| `type` | *Optional[str]* | :heavy_minus_sign: | N/A | +| `ref` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `weight` | *Optional[float]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/githubrepositoryout.md b/docs/models/githubrepositoryout.md new file mode 100644 index 00000000..fbabf1e0 --- /dev/null +++ b/docs/models/githubrepositoryout.md @@ -0,0 +1,13 @@ +# GithubRepositoryOut + + +## Fields + +| Field | Type | Required | Description | +| ----------------------- | ----------------------- | ----------------------- | ----------------------- | +| `name` | *str* | :heavy_check_mark: | N/A | +| `owner` | *str* | :heavy_check_mark: | N/A | +| `commit_id` | *str* | :heavy_check_mark: | N/A | +| `type` | *Optional[str]* | :heavy_minus_sign: | N/A | +| `ref` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `weight` | *Optional[float]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/httpvalidationerror.md b/docs/models/httpvalidationerror.md new file mode 100644 index 00000000..63892430 --- /dev/null +++ b/docs/models/httpvalidationerror.md @@ -0,0 +1,10 @@ +# HTTPValidationError + +Validation Error + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------------------------------ | ------------------------------------------------------------ | ------------------------------------------------------------ | ------------------------------------------------------------ | +| `detail` | List[[models.ValidationError](../models/validationerror.md)] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/inputs.md b/docs/models/inputs.md new file mode 100644 index 00000000..45264f9e --- /dev/null +++ b/docs/models/inputs.md @@ -0,0 +1,19 @@ +# Inputs + +Text to embed. + + +## Supported Types + +### `str` + +```python +value: str = /* values here */ +``` + +### `List[str]` + +```python +value: List[str] = /* values here */ +``` + diff --git a/docs/models/jobin.md b/docs/models/jobin.md new file mode 100644 index 00000000..6358e7a6 --- /dev/null +++ b/docs/models/jobin.md @@ -0,0 +1,15 @@ +# JobIn + + +## Fields + +| Field | Type | Required | Description | +| ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `model` | [models.FineTuneableModel](../models/finetuneablemodel.md) | :heavy_check_mark: | The name of the model to fine-tune. | +| `hyperparameters` | [models.TrainingParametersIn](../models/trainingparametersin.md) | :heavy_check_mark: | The fine-tuning hyperparameter settings used in a fine-tune job. | +| `training_files` | List[[models.TrainingFile](../models/trainingfile.md)] | :heavy_minus_sign: | N/A | +| `validation_files` | List[*str*] | :heavy_minus_sign: | A list containing the IDs of uploaded files that contain validation data. If you provide these files, the data is used to generate validation metrics periodically during fine-tuning. These metrics can be viewed in `checkpoints` when getting the status of a running fine-tuning job. The same data should not be present in both train and validation files. | +| `suffix` | *OptionalNullable[str]* | :heavy_minus_sign: | A string that will be added to your fine-tuning model name. For example, a suffix of "my-great-model" would produce a model name like `ft:open-mistral-7b:my-great-model:xxx...` | +| `integrations` | List[[models.WandbIntegration](../models/wandbintegration.md)] | :heavy_minus_sign: | A list of integrations to enable for your fine-tuning job. | +| `repositories` | List[[models.GithubRepositoryIn](../models/githubrepositoryin.md)] | :heavy_minus_sign: | N/A | +| `auto_start` | *Optional[bool]* | :heavy_minus_sign: | This field will be required in a future release. | \ No newline at end of file diff --git a/docs/models/jobmetadataout.md b/docs/models/jobmetadataout.md new file mode 100644 index 00000000..6218a161 --- /dev/null +++ b/docs/models/jobmetadataout.md @@ -0,0 +1,14 @@ +# JobMetadataOut + + +## Fields + +| Field | Type | Required | Description | +| --------------------------- | --------------------------- | --------------------------- | --------------------------- | +| `expected_duration_seconds` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | +| `cost` | *OptionalNullable[float]* | :heavy_minus_sign: | N/A | +| `cost_currency` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `train_tokens_per_step` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | +| `train_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | +| `data_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | +| `estimated_start_time` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/jobout.md b/docs/models/jobout.md new file mode 100644 index 00000000..0b88fbac --- /dev/null +++ b/docs/models/jobout.md @@ -0,0 +1,24 @@ +# JobOut + + +## Fields + +| Field | Type | Required | Description | +| --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `id` | *str* | :heavy_check_mark: | The ID of the job. | +| `auto_start` | *bool* | :heavy_check_mark: | N/A | +| `hyperparameters` | [models.TrainingParameters](../models/trainingparameters.md) | :heavy_check_mark: | N/A | +| `model` | [models.FineTuneableModel](../models/finetuneablemodel.md) | :heavy_check_mark: | The name of the model to fine-tune. | +| `status` | [models.Status](../models/status.md) | :heavy_check_mark: | The current status of the fine-tuning job. | +| `job_type` | *str* | :heavy_check_mark: | The type of job (`FT` for fine-tuning). | +| `created_at` | *int* | :heavy_check_mark: | The UNIX timestamp (in seconds) for when the fine-tuning job was created. | +| `modified_at` | *int* | :heavy_check_mark: | The UNIX timestamp (in seconds) for when the fine-tuning job was last modified. | +| `training_files` | List[*str*] | :heavy_check_mark: | A list containing the IDs of uploaded files that contain training data. | +| `validation_files` | List[*str*] | :heavy_minus_sign: | A list containing the IDs of uploaded files that contain validation data. | +| `object` | *Optional[str]* | :heavy_minus_sign: | The object type of the fine-tuning job. | +| `fine_tuned_model` | *OptionalNullable[str]* | :heavy_minus_sign: | The name of the fine-tuned model that is being created. The value will be `null` if the fine-tuning job is still running. | +| `suffix` | *OptionalNullable[str]* | :heavy_minus_sign: | Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`. | +| `integrations` | List[[models.WandbIntegrationOut](../models/wandbintegrationout.md)] | :heavy_minus_sign: | A list of integrations enabled for your fine-tuning job. | +| `trained_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | Total number of tokens trained. | +| `repositories` | List[[models.GithubRepositoryOut](../models/githubrepositoryout.md)] | :heavy_minus_sign: | N/A | +| `metadata` | [OptionalNullable[models.JobMetadataOut]](../models/jobmetadataout.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/jobsapiroutesfinetuningarchivefinetunedmodelrequest.md b/docs/models/jobsapiroutesfinetuningarchivefinetunedmodelrequest.md new file mode 100644 index 00000000..f9700df5 --- /dev/null +++ b/docs/models/jobsapiroutesfinetuningarchivefinetunedmodelrequest.md @@ -0,0 +1,8 @@ +# JobsAPIRoutesFineTuningArchiveFineTunedModelRequest + + +## Fields + +| Field | Type | Required | Description | Example | +| --------------------------------------------- | --------------------------------------------- | --------------------------------------------- | --------------------------------------------- | --------------------------------------------- | +| `model_id` | *str* | :heavy_check_mark: | The ID of the model to archive. | ft:open-mistral-7b:587a6b29:20240514:7e773925 | \ No newline at end of file diff --git a/docs/models/jobsapiroutesfinetuningcancelfinetuningjobrequest.md b/docs/models/jobsapiroutesfinetuningcancelfinetuningjobrequest.md new file mode 100644 index 00000000..883cbac6 --- /dev/null +++ b/docs/models/jobsapiroutesfinetuningcancelfinetuningjobrequest.md @@ -0,0 +1,8 @@ +# JobsAPIRoutesFineTuningCancelFineTuningJobRequest + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------- | ---------------------------- | ---------------------------- | ---------------------------- | +| `job_id` | *str* | :heavy_check_mark: | The ID of the job to cancel. | \ No newline at end of file diff --git a/docs/models/jobsapiroutesfinetuningcreatefinetuningjobresponse.md b/docs/models/jobsapiroutesfinetuningcreatefinetuningjobresponse.md new file mode 100644 index 00000000..dd12c71c --- /dev/null +++ b/docs/models/jobsapiroutesfinetuningcreatefinetuningjobresponse.md @@ -0,0 +1,19 @@ +# JobsAPIRoutesFineTuningCreateFineTuningJobResponse + +OK + + +## Supported Types + +### `models.JobOut` + +```python +value: models.JobOut = /* values here */ +``` + +### `models.LegacyJobMetadataOut` + +```python +value: models.LegacyJobMetadataOut = /* values here */ +``` + diff --git a/docs/models/jobsapiroutesfinetuninggetfinetuningjobrequest.md b/docs/models/jobsapiroutesfinetuninggetfinetuningjobrequest.md new file mode 100644 index 00000000..fde19800 --- /dev/null +++ b/docs/models/jobsapiroutesfinetuninggetfinetuningjobrequest.md @@ -0,0 +1,8 @@ +# JobsAPIRoutesFineTuningGetFineTuningJobRequest + + +## Fields + +| Field | Type | Required | Description | +| ----------------------------- | ----------------------------- | ----------------------------- | ----------------------------- | +| `job_id` | *str* | :heavy_check_mark: | The ID of the job to analyse. | \ No newline at end of file diff --git a/docs/models/jobsapiroutesfinetuninggetfinetuningjobsrequest.md b/docs/models/jobsapiroutesfinetuninggetfinetuningjobsrequest.md new file mode 100644 index 00000000..9d25d79c --- /dev/null +++ b/docs/models/jobsapiroutesfinetuninggetfinetuningjobsrequest.md @@ -0,0 +1,16 @@ +# JobsAPIRoutesFineTuningGetFineTuningJobsRequest + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------- | +| `page` | *Optional[int]* | :heavy_minus_sign: | The page number of the results to be returned. | +| `page_size` | *Optional[int]* | :heavy_minus_sign: | The number of items to return per page. | +| `model` | *OptionalNullable[str]* | :heavy_minus_sign: | The model name used for fine-tuning to filter on. When set, the other results are not displayed. | +| `created_after` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | The date/time to filter on. When set, the results for previous creation times are not displayed. | +| `created_by_me` | *Optional[bool]* | :heavy_minus_sign: | When set, only return results for jobs created by the API caller. Other results are not displayed. | +| `status` | [OptionalNullable[models.QueryParamStatus]](../models/queryparamstatus.md) | :heavy_minus_sign: | The current job state to filter on. When set, the other results are not displayed. | +| `wandb_project` | *OptionalNullable[str]* | :heavy_minus_sign: | The Weights and Biases project to filter on. When set, the other results are not displayed. | +| `wandb_name` | *OptionalNullable[str]* | :heavy_minus_sign: | The Weight and Biases run name to filter on. When set, the other results are not displayed. | +| `suffix` | *OptionalNullable[str]* | :heavy_minus_sign: | The model suffix to filter on. When set, the other results are not displayed. | \ No newline at end of file diff --git a/docs/models/jobsapiroutesfinetuningstartfinetuningjobrequest.md b/docs/models/jobsapiroutesfinetuningstartfinetuningjobrequest.md new file mode 100644 index 00000000..4429fe48 --- /dev/null +++ b/docs/models/jobsapiroutesfinetuningstartfinetuningjobrequest.md @@ -0,0 +1,8 @@ +# JobsAPIRoutesFineTuningStartFineTuningJobRequest + + +## Fields + +| Field | Type | Required | Description | +| ------------------ | ------------------ | ------------------ | ------------------ | +| `job_id` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/jobsapiroutesfinetuningunarchivefinetunedmodelrequest.md b/docs/models/jobsapiroutesfinetuningunarchivefinetunedmodelrequest.md new file mode 100644 index 00000000..95c1734d --- /dev/null +++ b/docs/models/jobsapiroutesfinetuningunarchivefinetunedmodelrequest.md @@ -0,0 +1,8 @@ +# JobsAPIRoutesFineTuningUnarchiveFineTunedModelRequest + + +## Fields + +| Field | Type | Required | Description | Example | +| --------------------------------------------- | --------------------------------------------- | --------------------------------------------- | --------------------------------------------- | --------------------------------------------- | +| `model_id` | *str* | :heavy_check_mark: | The ID of the model to unarchive. | ft:open-mistral-7b:587a6b29:20240514:7e773925 | \ No newline at end of file diff --git a/docs/models/jobsapiroutesfinetuningupdatefinetunedmodelrequest.md b/docs/models/jobsapiroutesfinetuningupdatefinetunedmodelrequest.md new file mode 100644 index 00000000..6d93832e --- /dev/null +++ b/docs/models/jobsapiroutesfinetuningupdatefinetunedmodelrequest.md @@ -0,0 +1,9 @@ +# JobsAPIRoutesFineTuningUpdateFineTunedModelRequest + + +## Fields + +| Field | Type | Required | Description | Example | +| ------------------------------------------------------ | ------------------------------------------------------ | ------------------------------------------------------ | ------------------------------------------------------ | ------------------------------------------------------ | +| `model_id` | *str* | :heavy_check_mark: | The ID of the model to update. | ft:open-mistral-7b:587a6b29:20240514:7e773925 | +| `update_ft_model_in` | [models.UpdateFTModelIn](../models/updateftmodelin.md) | :heavy_check_mark: | N/A | | \ No newline at end of file diff --git a/docs/models/jobsout.md b/docs/models/jobsout.md new file mode 100644 index 00000000..d3b10a89 --- /dev/null +++ b/docs/models/jobsout.md @@ -0,0 +1,10 @@ +# JobsOut + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------------ | ------------------------------------------ | ------------------------------------------ | ------------------------------------------ | +| `total` | *int* | :heavy_check_mark: | N/A | +| `data` | List[[models.JobOut](../models/jobout.md)] | :heavy_minus_sign: | N/A | +| `object` | *Optional[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/legacyjobmetadataout.md b/docs/models/legacyjobmetadataout.md new file mode 100644 index 00000000..04925baa --- /dev/null +++ b/docs/models/legacyjobmetadataout.md @@ -0,0 +1,19 @@ +# LegacyJobMetadataOut + + +## Fields + +| Field | Type | Required | Description | Example | +| ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `details` | *str* | :heavy_check_mark: | N/A | | +| `expected_duration_seconds` | *OptionalNullable[int]* | :heavy_minus_sign: | The approximated time (in seconds) for the fine-tuning process to complete. | 220 | +| `cost` | *OptionalNullable[float]* | :heavy_minus_sign: | The cost of the fine-tuning job. | 10 | +| `cost_currency` | *OptionalNullable[str]* | :heavy_minus_sign: | The currency used for the fine-tuning job cost. | EUR | +| `train_tokens_per_step` | *OptionalNullable[int]* | :heavy_minus_sign: | The number of tokens consumed by one training step. | 131072 | +| `train_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The total number of tokens used during the fine-tuning process. | 1310720 | +| `data_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The total number of tokens in the training dataset. | 305375 | +| `estimated_start_time` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | | +| `deprecated` | *Optional[bool]* | :heavy_minus_sign: | N/A | | +| `epochs` | *OptionalNullable[float]* | :heavy_minus_sign: | The number of complete passes through the entire training dataset. | 4.2922 | +| `training_steps` | *OptionalNullable[int]* | :heavy_minus_sign: | The number of training steps to perform. A training step refers to a single update of the model weights during the fine-tuning process. This update is typically calculated using a batch of samples from the training dataset. | 10 | +| `object` | *Optional[str]* | :heavy_minus_sign: | N/A | | \ No newline at end of file diff --git a/docs/models/listfilesout.md b/docs/models/listfilesout.md new file mode 100644 index 00000000..3694739f --- /dev/null +++ b/docs/models/listfilesout.md @@ -0,0 +1,9 @@ +# ListFilesOut + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------------- | -------------------------------------------------- | -------------------------------------------------- | -------------------------------------------------- | +| `data` | List[[models.FileSchema](../models/fileschema.md)] | :heavy_check_mark: | N/A | +| `object` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/loc.md b/docs/models/loc.md new file mode 100644 index 00000000..d6094ac2 --- /dev/null +++ b/docs/models/loc.md @@ -0,0 +1,17 @@ +# Loc + + +## Supported Types + +### `str` + +```python +value: str = /* values here */ +``` + +### `int` + +```python +value: int = /* values here */ +``` + diff --git a/docs/models/messages.md b/docs/models/messages.md new file mode 100644 index 00000000..1d394500 --- /dev/null +++ b/docs/models/messages.md @@ -0,0 +1,29 @@ +# Messages + + +## Supported Types + +### `models.AssistantMessage` + +```python +value: models.AssistantMessage = /* values here */ +``` + +### `models.SystemMessage` + +```python +value: models.SystemMessage = /* values here */ +``` + +### `models.ToolMessage` + +```python +value: models.ToolMessage = /* values here */ +``` + +### `models.UserMessage` + +```python +value: models.UserMessage = /* values here */ +``` + diff --git a/docs/models/metricout.md b/docs/models/metricout.md new file mode 100644 index 00000000..3c552bac --- /dev/null +++ b/docs/models/metricout.md @@ -0,0 +1,12 @@ +# MetricOut + +Metrics at the step number during the fine-tuning job. Use these metrics to assess if the training is going smoothly (loss should decrease, token accuracy should increase). + + +## Fields + +| Field | Type | Required | Description | +| --------------------------- | --------------------------- | --------------------------- | --------------------------- | +| `train_loss` | *OptionalNullable[float]* | :heavy_minus_sign: | N/A | +| `valid_loss` | *OptionalNullable[float]* | :heavy_minus_sign: | N/A | +| `valid_mean_token_accuracy` | *OptionalNullable[float]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/modelcapabilities.md b/docs/models/modelcapabilities.md new file mode 100644 index 00000000..89020970 --- /dev/null +++ b/docs/models/modelcapabilities.md @@ -0,0 +1,11 @@ +# ModelCapabilities + + +## Fields + +| Field | Type | Required | Description | +| ------------------ | ------------------ | ------------------ | ------------------ | +| `completion_chat` | *Optional[bool]* | :heavy_minus_sign: | N/A | +| `completion_fim` | *Optional[bool]* | :heavy_minus_sign: | N/A | +| `function_calling` | *Optional[bool]* | :heavy_minus_sign: | N/A | +| `fine_tuning` | *Optional[bool]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/modelcard.md b/docs/models/modelcard.md new file mode 100644 index 00000000..87951412 --- /dev/null +++ b/docs/models/modelcard.md @@ -0,0 +1,19 @@ +# ModelCard + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | +| `id` | *str* | :heavy_check_mark: | N/A | +| `capabilities` | [models.ModelCapabilities](../models/modelcapabilities.md) | :heavy_check_mark: | N/A | +| `object` | *Optional[str]* | :heavy_minus_sign: | N/A | +| `created` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `owned_by` | *Optional[str]* | :heavy_minus_sign: | N/A | +| `root` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `archived` | *Optional[bool]* | :heavy_minus_sign: | N/A | +| `name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `max_context_length` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `aliases` | List[*str*] | :heavy_minus_sign: | N/A | +| `deprecation` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/modellist.md b/docs/models/modellist.md new file mode 100644 index 00000000..e3fefeef --- /dev/null +++ b/docs/models/modellist.md @@ -0,0 +1,9 @@ +# ModelList + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------------------ | ------------------------------------------------ | ------------------------------------------------ | ------------------------------------------------ | +| `object` | *Optional[str]* | :heavy_minus_sign: | N/A | +| `data` | List[[models.ModelCard](../models/modelcard.md)] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/queryparamstatus.md b/docs/models/queryparamstatus.md new file mode 100644 index 00000000..dcd20908 --- /dev/null +++ b/docs/models/queryparamstatus.md @@ -0,0 +1,19 @@ +# QueryParamStatus + +The current job state to filter on. When set, the other results are not displayed. + + +## Values + +| Name | Value | +| ------------------------ | ------------------------ | +| `QUEUED` | QUEUED | +| `STARTED` | STARTED | +| `VALIDATING` | VALIDATING | +| `VALIDATED` | VALIDATED | +| `RUNNING` | RUNNING | +| `FAILED_VALIDATION` | FAILED_VALIDATION | +| `FAILED` | FAILED | +| `SUCCESS` | SUCCESS | +| `CANCELLED` | CANCELLED | +| `CANCELLATION_REQUESTED` | CANCELLATION_REQUESTED | \ No newline at end of file diff --git a/docs/models/responseformat.md b/docs/models/responseformat.md new file mode 100644 index 00000000..2704eab4 --- /dev/null +++ b/docs/models/responseformat.md @@ -0,0 +1,8 @@ +# ResponseFormat + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------------------- | ---------------------------------------------------------------- | ---------------------------------------------------------------- | ---------------------------------------------------------------- | +| `type` | [Optional[models.ResponseFormats]](../models/responseformats.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/responseformats.md b/docs/models/responseformats.md new file mode 100644 index 00000000..ce35fbb3 --- /dev/null +++ b/docs/models/responseformats.md @@ -0,0 +1,11 @@ +# ResponseFormats + +An object specifying the format that the model must output. Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. + + +## Values + +| Name | Value | +| ------------- | ------------- | +| `TEXT` | text | +| `JSON_OBJECT` | json_object | \ No newline at end of file diff --git a/docs/models/retrievefileout.md b/docs/models/retrievefileout.md new file mode 100644 index 00000000..1a624576 --- /dev/null +++ b/docs/models/retrievefileout.md @@ -0,0 +1,16 @@ +# RetrieveFileOut + + +## Fields + +| Field | Type | Required | Description | Example | +| ------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------ | +| `id` | *str* | :heavy_check_mark: | The unique identifier of the file. | 497f6eca-6276-4993-bfeb-53cbbbba6f09 | +| `object` | *str* | :heavy_check_mark: | The object type, which is always "file". | file | +| `bytes` | *int* | :heavy_check_mark: | The size of the file, in bytes. | 13000 | +| `created_at` | *int* | :heavy_check_mark: | The UNIX timestamp (in seconds) of the event. | 1716963433 | +| `filename` | *str* | :heavy_check_mark: | The name of the uploaded file. | files_upload.jsonl | +| `sample_type` | [models.SampleType](../models/sampletype.md) | :heavy_check_mark: | N/A | | +| `source` | [models.Source](../models/source.md) | :heavy_check_mark: | N/A | | +| `purpose` | *str* | :heavy_check_mark: | The intended purpose of the uploaded file. Only accepts fine-tuning (`fine-tune`) for now. | fine-tune | +| `num_lines` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | | \ No newline at end of file diff --git a/docs/models/retrievemodelv1modelsmodelidgetrequest.md b/docs/models/retrievemodelv1modelsmodelidgetrequest.md new file mode 100644 index 00000000..f1280f88 --- /dev/null +++ b/docs/models/retrievemodelv1modelsmodelidgetrequest.md @@ -0,0 +1,8 @@ +# RetrieveModelV1ModelsModelIDGetRequest + + +## Fields + +| Field | Type | Required | Description | Example | +| --------------------------------------------- | --------------------------------------------- | --------------------------------------------- | --------------------------------------------- | --------------------------------------------- | +| `model_id` | *str* | :heavy_check_mark: | The ID of the model to retrieve. | ft:open-mistral-7b:587a6b29:20240514:7e773925 | \ No newline at end of file diff --git a/docs/models/role.md b/docs/models/role.md new file mode 100644 index 00000000..affca78d --- /dev/null +++ b/docs/models/role.md @@ -0,0 +1,8 @@ +# Role + + +## Values + +| Name | Value | +| -------- | -------- | +| `SYSTEM` | system | \ No newline at end of file diff --git a/docs/models/sampletype.md b/docs/models/sampletype.md new file mode 100644 index 00000000..888fd63d --- /dev/null +++ b/docs/models/sampletype.md @@ -0,0 +1,9 @@ +# SampleType + + +## Values + +| Name | Value | +| ---------- | ---------- | +| `PRETRAIN` | pretrain | +| `INSTRUCT` | instruct | \ No newline at end of file diff --git a/docs/models/security.md b/docs/models/security.md new file mode 100644 index 00000000..2e0839d0 --- /dev/null +++ b/docs/models/security.md @@ -0,0 +1,8 @@ +# Security + + +## Fields + +| Field | Type | Required | Description | +| ------------------ | ------------------ | ------------------ | ------------------ | +| `api_key` | *Optional[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/source.md b/docs/models/source.md new file mode 100644 index 00000000..ef055622 --- /dev/null +++ b/docs/models/source.md @@ -0,0 +1,9 @@ +# Source + + +## Values + +| Name | Value | +| ------------ | ------------ | +| `UPLOAD` | upload | +| `REPOSITORY` | repository | \ No newline at end of file diff --git a/docs/models/status.md b/docs/models/status.md new file mode 100644 index 00000000..5e22eb73 --- /dev/null +++ b/docs/models/status.md @@ -0,0 +1,19 @@ +# Status + +The current status of the fine-tuning job. + + +## Values + +| Name | Value | +| ------------------------ | ------------------------ | +| `QUEUED` | QUEUED | +| `STARTED` | STARTED | +| `VALIDATING` | VALIDATING | +| `VALIDATED` | VALIDATED | +| `RUNNING` | RUNNING | +| `FAILED_VALIDATION` | FAILED_VALIDATION | +| `FAILED` | FAILED | +| `SUCCESS` | SUCCESS | +| `CANCELLED` | CANCELLED | +| `CANCELLATION_REQUESTED` | CANCELLATION_REQUESTED | \ No newline at end of file diff --git a/docs/models/stop.md b/docs/models/stop.md new file mode 100644 index 00000000..ba40ca83 --- /dev/null +++ b/docs/models/stop.md @@ -0,0 +1,19 @@ +# Stop + +Stop generation if this token is detected. Or if one of these tokens is detected when providing an array + + +## Supported Types + +### `str` + +```python +value: str = /* values here */ +``` + +### `List[str]` + +```python +value: List[str] = /* values here */ +``` + diff --git a/docs/models/systemmessage.md b/docs/models/systemmessage.md new file mode 100644 index 00000000..7f827984 --- /dev/null +++ b/docs/models/systemmessage.md @@ -0,0 +1,9 @@ +# SystemMessage + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------------ | ------------------------------------------ | ------------------------------------------ | ------------------------------------------ | +| `content` | [models.Content](../models/content.md) | :heavy_check_mark: | N/A | +| `role` | [Optional[models.Role]](../models/role.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/textchunk.md b/docs/models/textchunk.md new file mode 100644 index 00000000..34e4dd6f --- /dev/null +++ b/docs/models/textchunk.md @@ -0,0 +1,9 @@ +# TextChunk + + +## Fields + +| Field | Type | Required | Description | +| ------------------ | ------------------ | ------------------ | ------------------ | +| `text` | *str* | :heavy_check_mark: | N/A | +| `type` | *Optional[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/tool.md b/docs/models/tool.md new file mode 100644 index 00000000..291394c0 --- /dev/null +++ b/docs/models/tool.md @@ -0,0 +1,9 @@ +# Tool + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------- | ---------------------------------------- | ---------------------------------------- | ---------------------------------------- | +| `function` | [models.Function](../models/function.md) | :heavy_check_mark: | N/A | +| `type` | *Optional[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/toolcall.md b/docs/models/toolcall.md new file mode 100644 index 00000000..bd2dc9ff --- /dev/null +++ b/docs/models/toolcall.md @@ -0,0 +1,10 @@ +# ToolCall + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------------------ | ------------------------------------------------ | ------------------------------------------------ | ------------------------------------------------ | +| `function` | [models.FunctionCall](../models/functioncall.md) | :heavy_check_mark: | N/A | +| `id` | *Optional[str]* | :heavy_minus_sign: | N/A | +| `type` | *Optional[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/toolchoice.md b/docs/models/toolchoice.md new file mode 100644 index 00000000..b84f51f6 --- /dev/null +++ b/docs/models/toolchoice.md @@ -0,0 +1,10 @@ +# ToolChoice + + +## Values + +| Name | Value | +| ------ | ------ | +| `AUTO` | auto | +| `NONE` | none | +| `ANY` | any | \ No newline at end of file diff --git a/docs/models/toolmessage.md b/docs/models/toolmessage.md new file mode 100644 index 00000000..364339e1 --- /dev/null +++ b/docs/models/toolmessage.md @@ -0,0 +1,11 @@ +# ToolMessage + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------------------- | ---------------------------------------------------------------- | ---------------------------------------------------------------- | ---------------------------------------------------------------- | +| `content` | *str* | :heavy_check_mark: | N/A | +| `tool_call_id` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `role` | [Optional[models.ToolMessageRole]](../models/toolmessagerole.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/toolmessagerole.md b/docs/models/toolmessagerole.md new file mode 100644 index 00000000..c24e59c0 --- /dev/null +++ b/docs/models/toolmessagerole.md @@ -0,0 +1,8 @@ +# ToolMessageRole + + +## Values + +| Name | Value | +| ------ | ------ | +| `TOOL` | tool | \ No newline at end of file diff --git a/docs/models/trainingfile.md b/docs/models/trainingfile.md new file mode 100644 index 00000000..cde218bb --- /dev/null +++ b/docs/models/trainingfile.md @@ -0,0 +1,9 @@ +# TrainingFile + + +## Fields + +| Field | Type | Required | Description | +| ------------------ | ------------------ | ------------------ | ------------------ | +| `file_id` | *str* | :heavy_check_mark: | N/A | +| `weight` | *Optional[float]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/trainingparameters.md b/docs/models/trainingparameters.md new file mode 100644 index 00000000..4356c33f --- /dev/null +++ b/docs/models/trainingparameters.md @@ -0,0 +1,11 @@ +# TrainingParameters + + +## Fields + +| Field | Type | Required | Description | +| ------------------------- | ------------------------- | ------------------------- | ------------------------- | +| `training_steps` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | +| `learning_rate` | *Optional[float]* | :heavy_minus_sign: | N/A | +| `epochs` | *OptionalNullable[float]* | :heavy_minus_sign: | N/A | +| `fim_ratio` | *OptionalNullable[float]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/trainingparametersin.md b/docs/models/trainingparametersin.md new file mode 100644 index 00000000..afc094d7 --- /dev/null +++ b/docs/models/trainingparametersin.md @@ -0,0 +1,13 @@ +# TrainingParametersIn + +The fine-tuning hyperparameter settings used in a fine-tune job. + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `training_steps` | *OptionalNullable[int]* | :heavy_minus_sign: | The number of training steps to perform. A training step refers to a single update of the model weights during the fine-tuning process. This update is typically calculated using a batch of samples from the training dataset. | +| `learning_rate` | *Optional[float]* | :heavy_minus_sign: | A parameter describing how much to adjust the pre-trained model's weights in response to the estimated error each time the weights are updated during the fine-tuning process. | +| `epochs` | *OptionalNullable[float]* | :heavy_minus_sign: | N/A | +| `fim_ratio` | *OptionalNullable[float]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/unarchiveftmodelout.md b/docs/models/unarchiveftmodelout.md new file mode 100644 index 00000000..aa26792c --- /dev/null +++ b/docs/models/unarchiveftmodelout.md @@ -0,0 +1,10 @@ +# UnarchiveFTModelOut + + +## Fields + +| Field | Type | Required | Description | +| ------------------ | ------------------ | ------------------ | ------------------ | +| `id` | *str* | :heavy_check_mark: | N/A | +| `object` | *Optional[str]* | :heavy_minus_sign: | N/A | +| `archived` | *Optional[bool]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/updateftmodelin.md b/docs/models/updateftmodelin.md new file mode 100644 index 00000000..4e55b1a7 --- /dev/null +++ b/docs/models/updateftmodelin.md @@ -0,0 +1,9 @@ +# UpdateFTModelIn + + +## Fields + +| Field | Type | Required | Description | +| ----------------------- | ----------------------- | ----------------------- | ----------------------- | +| `name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/uploadfileout.md b/docs/models/uploadfileout.md new file mode 100644 index 00000000..7eef9bc4 --- /dev/null +++ b/docs/models/uploadfileout.md @@ -0,0 +1,16 @@ +# UploadFileOut + + +## Fields + +| Field | Type | Required | Description | Example | +| ------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------ | +| `id` | *str* | :heavy_check_mark: | The unique identifier of the file. | 497f6eca-6276-4993-bfeb-53cbbbba6f09 | +| `object` | *str* | :heavy_check_mark: | The object type, which is always "file". | file | +| `bytes` | *int* | :heavy_check_mark: | The size of the file, in bytes. | 13000 | +| `created_at` | *int* | :heavy_check_mark: | The UNIX timestamp (in seconds) of the event. | 1716963433 | +| `filename` | *str* | :heavy_check_mark: | The name of the uploaded file. | files_upload.jsonl | +| `sample_type` | [models.SampleType](../models/sampletype.md) | :heavy_check_mark: | N/A | | +| `source` | [models.Source](../models/source.md) | :heavy_check_mark: | N/A | | +| `purpose` | *str* | :heavy_check_mark: | The intended purpose of the uploaded file. Only accepts fine-tuning (`fine-tune`) for now. | fine-tune | +| `num_lines` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | | \ No newline at end of file diff --git a/docs/models/usageinfo.md b/docs/models/usageinfo.md new file mode 100644 index 00000000..9f56a3ae --- /dev/null +++ b/docs/models/usageinfo.md @@ -0,0 +1,10 @@ +# UsageInfo + + +## Fields + +| Field | Type | Required | Description | Example | +| ------------------- | ------------------- | ------------------- | ------------------- | ------------------- | +| `prompt_tokens` | *int* | :heavy_check_mark: | N/A | 16 | +| `completion_tokens` | *int* | :heavy_check_mark: | N/A | 34 | +| `total_tokens` | *int* | :heavy_check_mark: | N/A | 50 | \ No newline at end of file diff --git a/docs/models/usermessage.md b/docs/models/usermessage.md new file mode 100644 index 00000000..3d96f1cd --- /dev/null +++ b/docs/models/usermessage.md @@ -0,0 +1,9 @@ +# UserMessage + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------------------- | ---------------------------------------------------------------- | ---------------------------------------------------------------- | ---------------------------------------------------------------- | +| `content` | [models.UserMessageContent](../models/usermessagecontent.md) | :heavy_check_mark: | N/A | +| `role` | [Optional[models.UserMessageRole]](../models/usermessagerole.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/usermessagecontent.md b/docs/models/usermessagecontent.md new file mode 100644 index 00000000..86ebd18f --- /dev/null +++ b/docs/models/usermessagecontent.md @@ -0,0 +1,17 @@ +# UserMessageContent + + +## Supported Types + +### `str` + +```python +value: str = /* values here */ +``` + +### `List[models.TextChunk]` + +```python +value: List[models.TextChunk] = /* values here */ +``` + diff --git a/docs/models/usermessagerole.md b/docs/models/usermessagerole.md new file mode 100644 index 00000000..171124e4 --- /dev/null +++ b/docs/models/usermessagerole.md @@ -0,0 +1,8 @@ +# UserMessageRole + + +## Values + +| Name | Value | +| ------ | ------ | +| `USER` | user | \ No newline at end of file diff --git a/docs/models/utils/retryconfig.md b/docs/models/utils/retryconfig.md new file mode 100644 index 00000000..69dd549e --- /dev/null +++ b/docs/models/utils/retryconfig.md @@ -0,0 +1,24 @@ +# RetryConfig + +Allows customizing the default retry configuration. Only usable with methods that mention they support retries. + +## Fields + +| Name | Type | Description | Example | +| ------------------------- | ----------------------------------- | --------------------------------------- | --------- | +| `strategy` | `*str*` | The retry strategy to use. | `backoff` | +| `backoff` | [BackoffStrategy](#backoffstrategy) | Configuration for the backoff strategy. | | +| `retry_connection_errors` | `*bool*` | Whether to retry on connection errors. | `true` | + +## BackoffStrategy + +The backoff strategy allows retrying a request with an exponential backoff between each retry. + +### Fields + +| Name | Type | Description | Example | +| ------------------ | --------- | ----------------------------------------- | -------- | +| `initial_interval` | `*int*` | The initial interval in milliseconds. | `500` | +| `max_interval` | `*int*` | The maximum interval in milliseconds. | `60000` | +| `exponent` | `*float*` | The exponent to use for the backoff. | `1.5` | +| `max_elapsed_time` | `*int*` | The maximum elapsed time in milliseconds. | `300000` | \ No newline at end of file diff --git a/docs/models/validationerror.md b/docs/models/validationerror.md new file mode 100644 index 00000000..7a1654a1 --- /dev/null +++ b/docs/models/validationerror.md @@ -0,0 +1,10 @@ +# ValidationError + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------ | ------------------------------------ | ------------------------------------ | ------------------------------------ | +| `loc` | List[[models.Loc](../models/loc.md)] | :heavy_check_mark: | N/A | +| `msg` | *str* | :heavy_check_mark: | N/A | +| `type` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/wandbintegration.md b/docs/models/wandbintegration.md new file mode 100644 index 00000000..d48e501b --- /dev/null +++ b/docs/models/wandbintegration.md @@ -0,0 +1,12 @@ +# WandbIntegration + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------------------------------------------------- | ------------------------------------------------------------------------------- | ------------------------------------------------------------------------------- | ------------------------------------------------------------------------------- | +| `project` | *str* | :heavy_check_mark: | The name of the project that the new run will be created under. | +| `api_key` | *str* | :heavy_check_mark: | The WandB API key to use for authentication. | +| `type` | *Optional[str]* | :heavy_minus_sign: | N/A | +| `name` | *OptionalNullable[str]* | :heavy_minus_sign: | A display name to set for the run. If not set, will use the job ID as the name. | +| `run_name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/wandbintegrationout.md b/docs/models/wandbintegrationout.md new file mode 100644 index 00000000..a51067bb --- /dev/null +++ b/docs/models/wandbintegrationout.md @@ -0,0 +1,11 @@ +# WandbIntegrationOut + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------------------------------------------------- | ------------------------------------------------------------------------------- | ------------------------------------------------------------------------------- | ------------------------------------------------------------------------------- | +| `project` | *str* | :heavy_check_mark: | The name of the project that the new run will be created under. | +| `type` | *Optional[str]* | :heavy_minus_sign: | N/A | +| `name` | *OptionalNullable[str]* | :heavy_minus_sign: | A display name to set for the run. If not set, will use the job ID as the name. | +| `run_name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/sdks/agents/README.md b/docs/sdks/agents/README.md new file mode 100644 index 00000000..e8740f3a --- /dev/null +++ b/docs/sdks/agents/README.md @@ -0,0 +1,117 @@ +# Agents +(*agents*) + +## Overview + +Agents API. + +### Available Operations + +* [complete](#complete) - Chat Completion +* [stream](#stream) - Stream Agents completion + +## complete + +Chat Completion + +### Example Usage + +```python +from mistralai import Mistral +import os + +s = Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) + + +res = s.agents.complete(messages=[ + { + "content": "Who is the best French painter? Answer in one short sentence.", + "role": "user", + }, +], agent_id="") + +if res is not None: + # handle response + pass + +``` + +### Parameters + +| Parameter | Type | Required | Description | Example | +| ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `messages` | List[[models.AgentsCompletionRequestMessages](../../models/agentscompletionrequestmessages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | {
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
} | +| `agent_id` | *str* | :heavy_check_mark: | The ID of the agent to use for this completion. | | +| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | +| `min_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The minimum number of tokens to generate in the completion. | | +| `stream` | *Optional[bool]* | :heavy_minus_sign: | Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. | | +| `stop` | [Optional[models.AgentsCompletionRequestStop]](../../models/agentscompletionrequeststop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | +| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | +| `response_format` | [Optional[models.ResponseFormat]](../../models/responseformat.md) | :heavy_minus_sign: | N/A | | +| `tools` | List[[models.Tool](../../models/tool.md)] | :heavy_minus_sign: | N/A | | +| `tool_choice` | [Optional[models.AgentsCompletionRequestToolChoice]](../../models/agentscompletionrequesttoolchoice.md) | :heavy_minus_sign: | N/A | | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | | + + +### Response + +**[models.ChatCompletionResponse](../../models/chatcompletionresponse.md)** +### Errors + +| Error Object | Status Code | Content Type | +| -------------------------- | -------------------------- | -------------------------- | +| models.HTTPValidationError | 422 | application/json | +| models.SDKError | 4xx-5xx | */* | + +## stream + +Mistral AI provides the ability to stream responses back to a client in order to allow partial results for certain requests. Tokens will be sent as data-only server-sent events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. + +### Example Usage + +```python +from mistralai import Mistral +import os + +s = Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) + + +res = s.agents.stream(model="codestral-2405", prompt="def", suffix="return a+b") + +if res is not None: + for event in res: + # handle event + print(event, flush=True) + +``` + +### Parameters + +| Parameter | Type | Required | Description | Example | +| ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `model` | *Nullable[str]* | :heavy_check_mark: | ID of the model to use. Only compatible for now with:
- `codestral-2405`
- `codestral-latest` | codestral-2405 | +| `prompt` | *str* | :heavy_check_mark: | The text/code to complete. | def | +| `temperature` | *Optional[float]* | :heavy_minus_sign: | What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. | | +| `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | +| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | +| `min_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The minimum number of tokens to generate in the completion. | | +| `stream` | *Optional[bool]* | :heavy_minus_sign: | N/A | | +| `stop` | [Optional[models.AgentsCompletionStreamRequestStop]](../../models/agentscompletionstreamrequeststop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | +| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | +| `suffix` | *OptionalNullable[str]* | :heavy_minus_sign: | Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`. | return a+b | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | | + + +### Response + +**[Union[Generator[models.CompletionEvent, None, None], AsyncGenerator[models.CompletionEvent, None]]](../../models/.md)** +### Errors + +| Error Object | Status Code | Content Type | +| -------------------------- | -------------------------- | -------------------------- | +| models.HTTPValidationError | 422 | application/json | +| models.SDKError | 4xx-5xx | */* | diff --git a/docs/sdks/chat/README.md b/docs/sdks/chat/README.md new file mode 100644 index 00000000..e941104c --- /dev/null +++ b/docs/sdks/chat/README.md @@ -0,0 +1,128 @@ +# Chat +(*chat*) + +## Overview + +Chat Completion API. + +### Available Operations + +* [complete](#complete) - Chat Completion +* [stream](#stream) - Stream chat completion + +## complete + +Chat Completion + +### Example Usage + +```python +from mistralai import Mistral +import os + +s = Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) + + +res = s.chat.complete(model="mistral-small-latest", messages=[ + { + "content": "Who is the best French painter? Answer in one short sentence.", + "role": "user", + }, +]) + +if res is not None: + # handle response + pass + +``` + +### Parameters + +| Parameter | Type | Required | Description | Example | +| ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `model` | *Nullable[str]* | :heavy_check_mark: | ID of the model to use. You can use the [List Available Models](/api#operation/listModels) API to see all of your available models, or see our [Model overview](/models) for model descriptions. | mistral-small-latest | +| `messages` | List[[models.Messages](../../models/messages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | {
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
} | +| `temperature` | *Optional[float]* | :heavy_minus_sign: | What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. | | +| `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | +| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | +| `min_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The minimum number of tokens to generate in the completion. | | +| `stream` | *Optional[bool]* | :heavy_minus_sign: | Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. | | +| `stop` | [Optional[models.Stop]](../../models/stop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | +| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | +| `response_format` | [Optional[models.ResponseFormat]](../../models/responseformat.md) | :heavy_minus_sign: | N/A | | +| `tools` | List[[models.Tool](../../models/tool.md)] | :heavy_minus_sign: | N/A | | +| `tool_choice` | [Optional[models.ToolChoice]](../../models/toolchoice.md) | :heavy_minus_sign: | N/A | | +| `safe_prompt` | *Optional[bool]* | :heavy_minus_sign: | Whether to inject a safety prompt before all conversations. | | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | | + + +### Response + +**[models.ChatCompletionResponse](../../models/chatcompletionresponse.md)** +### Errors + +| Error Object | Status Code | Content Type | +| -------------------------- | -------------------------- | -------------------------- | +| models.HTTPValidationError | 422 | application/json | +| models.SDKError | 4xx-5xx | */* | + +## stream + +Mistral AI provides the ability to stream responses back to a client in order to allow partial results for certain requests. Tokens will be sent as data-only server-sent events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. + +### Example Usage + +```python +from mistralai import Mistral +import os + +s = Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) + + +res = s.chat.stream(model="mistral-small-latest", messages=[ + { + "content": "Who is the best French painter? Answer in one short sentence.", + "role": "user", + }, +]) + +if res is not None: + for event in res: + # handle event + print(event, flush=True) + +``` + +### Parameters + +| Parameter | Type | Required | Description | Example | +| ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `model` | *Nullable[str]* | :heavy_check_mark: | ID of the model to use. You can use the [List Available Models](/api#operation/listModels) API to see all of your available models, or see our [Model overview](/models) for model descriptions. | mistral-small-latest | +| `messages` | List[[models.ChatCompletionStreamRequestMessages](../../models/chatcompletionstreamrequestmessages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | {
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
} | +| `temperature` | *Optional[float]* | :heavy_minus_sign: | What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. | | +| `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | +| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | +| `min_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The minimum number of tokens to generate in the completion. | | +| `stream` | *Optional[bool]* | :heavy_minus_sign: | N/A | | +| `stop` | [Optional[models.ChatCompletionStreamRequestStop]](../../models/chatcompletionstreamrequeststop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | +| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | +| `response_format` | [Optional[models.ResponseFormat]](../../models/responseformat.md) | :heavy_minus_sign: | N/A | | +| `tools` | List[[models.Tool](../../models/tool.md)] | :heavy_minus_sign: | N/A | | +| `tool_choice` | [Optional[models.ChatCompletionStreamRequestToolChoice]](../../models/chatcompletionstreamrequesttoolchoice.md) | :heavy_minus_sign: | N/A | | +| `safe_prompt` | *Optional[bool]* | :heavy_minus_sign: | Whether to inject a safety prompt before all conversations. | | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | | + + +### Response + +**[Union[Generator[models.CompletionEvent, None, None], AsyncGenerator[models.CompletionEvent, None]]](../../models/.md)** +### Errors + +| Error Object | Status Code | Content Type | +| -------------------------- | -------------------------- | -------------------------- | +| models.HTTPValidationError | 422 | application/json | +| models.SDKError | 4xx-5xx | */* | diff --git a/docs/sdks/embeddings/README.md b/docs/sdks/embeddings/README.md new file mode 100644 index 00000000..ee46f9b2 --- /dev/null +++ b/docs/sdks/embeddings/README.md @@ -0,0 +1,53 @@ +# Embeddings +(*embeddings*) + +## Overview + +Embeddings API. + +### Available Operations + +* [create](#create) - Embeddings + +## create + +Embeddings + +### Example Usage + +```python +from mistralai import Mistral +import os + +s = Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) + + +res = s.embeddings.create(inputs="", model="") + +if res is not None: + # handle response + pass + +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | +| `inputs` | [models.Inputs](../../models/inputs.md) | :heavy_check_mark: | Text to embed. | +| `model` | *str* | :heavy_check_mark: | ID of the model to use. | +| `encoding_format` | *OptionalNullable[str]* | :heavy_minus_sign: | The format to return the embeddings in. | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | + + +### Response + +**[models.EmbeddingResponse](../../models/embeddingresponse.md)** +### Errors + +| Error Object | Status Code | Content Type | +| -------------------------- | -------------------------- | -------------------------- | +| models.HTTPValidationError | 422 | application/json | +| models.SDKError | 4xx-5xx | */* | diff --git a/docs/sdks/files/README.md b/docs/sdks/files/README.md new file mode 100644 index 00000000..897556fe --- /dev/null +++ b/docs/sdks/files/README.md @@ -0,0 +1,179 @@ +# Files +(*files*) + +## Overview + +Files API + +### Available Operations + +* [upload](#upload) - Upload File +* [list](#list) - List Files +* [retrieve](#retrieve) - Retrieve File +* [delete](#delete) - Delete File + +## upload + +Upload a file that can be used across various endpoints. + +The size of individual files can be a maximum of 512 MB. The Fine-tuning API only supports .jsonl files. + +Please contact us if you need to increase these storage limits. + +### Example Usage + +```python +from mistralai import Mistral +import os + +s = Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) + + +res = s.files.upload(file={ + "file_name": "your_file_here", + "content": open("", "rb"), +}) + +if res is not None: + # handle response + pass + +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `file` | [models.File](../../models/file.md) | :heavy_check_mark: | The File object (not file name) to be uploaded.
To upload a file and specify a custom file name you should format your request as such:
```bash
file=@path/to/your/file.jsonl;filename=custom_name.jsonl
```
Otherwise, you can just keep the original file name:
```bash
file=@path/to/your/file.jsonl
``` | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | + + +### Response + +**[models.UploadFileOut](../../models/uploadfileout.md)** +### Errors + +| Error Object | Status Code | Content Type | +| --------------- | --------------- | --------------- | +| models.SDKError | 4xx-5xx | */* | + +## list + +Returns a list of files that belong to the user's organization. + +### Example Usage + +```python +from mistralai import Mistral +import os + +s = Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) + + +res = s.files.list() + +if res is not None: + # handle response + pass + +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | + + +### Response + +**[models.ListFilesOut](../../models/listfilesout.md)** +### Errors + +| Error Object | Status Code | Content Type | +| --------------- | --------------- | --------------- | +| models.SDKError | 4xx-5xx | */* | + +## retrieve + +Returns information about a specific file. + +### Example Usage + +```python +from mistralai import Mistral +import os + +s = Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) + + +res = s.files.retrieve(file_id="") + +if res is not None: + # handle response + pass + +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | +| `file_id` | *str* | :heavy_check_mark: | N/A | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | + + +### Response + +**[models.RetrieveFileOut](../../models/retrievefileout.md)** +### Errors + +| Error Object | Status Code | Content Type | +| --------------- | --------------- | --------------- | +| models.SDKError | 4xx-5xx | */* | + +## delete + +Delete a file. + +### Example Usage + +```python +from mistralai import Mistral +import os + +s = Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) + + +res = s.files.delete(file_id="") + +if res is not None: + # handle response + pass + +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | +| `file_id` | *str* | :heavy_check_mark: | N/A | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | + + +### Response + +**[models.DeleteFileOut](../../models/deletefileout.md)** +### Errors + +| Error Object | Status Code | Content Type | +| --------------- | --------------- | --------------- | +| models.SDKError | 4xx-5xx | */* | diff --git a/docs/sdks/fim/README.md b/docs/sdks/fim/README.md new file mode 100644 index 00000000..784b5213 --- /dev/null +++ b/docs/sdks/fim/README.md @@ -0,0 +1,112 @@ +# Fim +(*fim*) + +## Overview + +Fill-in-the-middle API. + +### Available Operations + +* [complete](#complete) - Fim Completion +* [stream](#stream) - Stream fim completion + +## complete + +FIM completion. + +### Example Usage + +```python +from mistralai import Mistral +import os + +s = Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) + + +res = s.fim.complete(model="codestral-2405", prompt="def", suffix="return a+b") + +if res is not None: + # handle response + pass + +``` + +### Parameters + +| Parameter | Type | Required | Description | Example | +| ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `model` | *Nullable[str]* | :heavy_check_mark: | ID of the model to use. Only compatible for now with:
- `codestral-2405`
- `codestral-latest` | codestral-2405 | +| `prompt` | *str* | :heavy_check_mark: | The text/code to complete. | def | +| `temperature` | *Optional[float]* | :heavy_minus_sign: | What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. | | +| `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | +| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | +| `min_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The minimum number of tokens to generate in the completion. | | +| `stream` | *Optional[bool]* | :heavy_minus_sign: | Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. | | +| `stop` | [Optional[models.FIMCompletionRequestStop]](../../models/fimcompletionrequeststop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | +| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | +| `suffix` | *OptionalNullable[str]* | :heavy_minus_sign: | Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`. | return a+b | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | | + + +### Response + +**[models.FIMCompletionResponse](../../models/fimcompletionresponse.md)** +### Errors + +| Error Object | Status Code | Content Type | +| -------------------------- | -------------------------- | -------------------------- | +| models.HTTPValidationError | 422 | application/json | +| models.SDKError | 4xx-5xx | */* | + +## stream + +Mistral AI provides the ability to stream responses back to a client in order to allow partial results for certain requests. Tokens will be sent as data-only server-sent events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. + +### Example Usage + +```python +from mistralai import Mistral +import os + +s = Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) + + +res = s.fim.stream(model="codestral-2405", prompt="def", suffix="return a+b") + +if res is not None: + for event in res: + # handle event + print(event, flush=True) + +``` + +### Parameters + +| Parameter | Type | Required | Description | Example | +| ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `model` | *Nullable[str]* | :heavy_check_mark: | ID of the model to use. Only compatible for now with:
- `codestral-2405`
- `codestral-latest` | codestral-2405 | +| `prompt` | *str* | :heavy_check_mark: | The text/code to complete. | def | +| `temperature` | *Optional[float]* | :heavy_minus_sign: | What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. | | +| `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | +| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | +| `min_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The minimum number of tokens to generate in the completion. | | +| `stream` | *Optional[bool]* | :heavy_minus_sign: | N/A | | +| `stop` | [Optional[models.FIMCompletionStreamRequestStop]](../../models/fimcompletionstreamrequeststop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | +| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | +| `suffix` | *OptionalNullable[str]* | :heavy_minus_sign: | Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`. | return a+b | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | | + + +### Response + +**[Union[Generator[models.CompletionEvent, None, None], AsyncGenerator[models.CompletionEvent, None]]](../../models/.md)** +### Errors + +| Error Object | Status Code | Content Type | +| -------------------------- | -------------------------- | -------------------------- | +| models.HTTPValidationError | 422 | application/json | +| models.SDKError | 4xx-5xx | */* | diff --git a/docs/sdks/finetuning/README.md b/docs/sdks/finetuning/README.md new file mode 100644 index 00000000..2b357f2d --- /dev/null +++ b/docs/sdks/finetuning/README.md @@ -0,0 +1,5 @@ +# FineTuning +(*fine_tuning*) + +### Available Operations + diff --git a/docs/sdks/jobs/README.md b/docs/sdks/jobs/README.md new file mode 100644 index 00000000..3366c731 --- /dev/null +++ b/docs/sdks/jobs/README.md @@ -0,0 +1,225 @@ +# Jobs +(*fine_tuning.jobs*) + +### Available Operations + +* [list](#list) - Get Fine Tuning Jobs +* [create](#create) - Create Fine Tuning Job +* [get](#get) - Get Fine Tuning Job +* [cancel](#cancel) - Cancel Fine Tuning Job +* [start](#start) - Start Fine Tuning Job + +## list + +Get a list of fine-tuning jobs for your organization and user. + +### Example Usage + +```python +from mistralai import Mistral +import os + +s = Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) + + +res = s.fine_tuning.jobs.list() + +if res is not None: + # handle response + pass + +``` + +### Parameters + +| Parameter | Type | Required | Description | +| -------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------- | +| `page` | *Optional[int]* | :heavy_minus_sign: | The page number of the results to be returned. | +| `page_size` | *Optional[int]* | :heavy_minus_sign: | The number of items to return per page. | +| `model` | *OptionalNullable[str]* | :heavy_minus_sign: | The model name used for fine-tuning to filter on. When set, the other results are not displayed. | +| `created_after` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | The date/time to filter on. When set, the results for previous creation times are not displayed. | +| `created_by_me` | *Optional[bool]* | :heavy_minus_sign: | When set, only return results for jobs created by the API caller. Other results are not displayed. | +| `status` | [OptionalNullable[models.QueryParamStatus]](../../models/queryparamstatus.md) | :heavy_minus_sign: | The current job state to filter on. When set, the other results are not displayed. | +| `wandb_project` | *OptionalNullable[str]* | :heavy_minus_sign: | The Weights and Biases project to filter on. When set, the other results are not displayed. | +| `wandb_name` | *OptionalNullable[str]* | :heavy_minus_sign: | The Weight and Biases run name to filter on. When set, the other results are not displayed. | +| `suffix` | *OptionalNullable[str]* | :heavy_minus_sign: | The model suffix to filter on. When set, the other results are not displayed. | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | + + +### Response + +**[models.JobsOut](../../models/jobsout.md)** +### Errors + +| Error Object | Status Code | Content Type | +| --------------- | --------------- | --------------- | +| models.SDKError | 4xx-5xx | */* | + +## create + +Create a new fine-tuning job, it will be queued for processing. + +### Example Usage + +```python +from mistralai import Mistral +import os + +s = Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) + + +res = s.fine_tuning.jobs.create(model="codestral-latest", hyperparameters={}) + +if res is not None: + # handle response + pass + +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `model` | [models.FineTuneableModel](../../models/finetuneablemodel.md) | :heavy_check_mark: | The name of the model to fine-tune. | +| `hyperparameters` | [models.TrainingParametersIn](../../models/trainingparametersin.md) | :heavy_check_mark: | The fine-tuning hyperparameter settings used in a fine-tune job. | +| `training_files` | List[[models.TrainingFile](../../models/trainingfile.md)] | :heavy_minus_sign: | N/A | +| `validation_files` | List[*str*] | :heavy_minus_sign: | A list containing the IDs of uploaded files that contain validation data. If you provide these files, the data is used to generate validation metrics periodically during fine-tuning. These metrics can be viewed in `checkpoints` when getting the status of a running fine-tuning job. The same data should not be present in both train and validation files. | +| `suffix` | *OptionalNullable[str]* | :heavy_minus_sign: | A string that will be added to your fine-tuning model name. For example, a suffix of "my-great-model" would produce a model name like `ft:open-mistral-7b:my-great-model:xxx...` | +| `integrations` | List[[models.WandbIntegration](../../models/wandbintegration.md)] | :heavy_minus_sign: | A list of integrations to enable for your fine-tuning job. | +| `repositories` | List[[models.GithubRepositoryIn](../../models/githubrepositoryin.md)] | :heavy_minus_sign: | N/A | +| `auto_start` | *Optional[bool]* | :heavy_minus_sign: | This field will be required in a future release. | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | + + +### Response + +**[models.JobsAPIRoutesFineTuningCreateFineTuningJobResponse](../../models/jobsapiroutesfinetuningcreatefinetuningjobresponse.md)** +### Errors + +| Error Object | Status Code | Content Type | +| --------------- | --------------- | --------------- | +| models.SDKError | 4xx-5xx | */* | + +## get + +Get a fine-tuned job details by its UUID. + +### Example Usage + +```python +from mistralai import Mistral +import os + +s = Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) + + +res = s.fine_tuning.jobs.get(job_id="b18d8d81-fd7b-4764-a31e-475cb1f36591") + +if res is not None: + # handle response + pass + +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | +| `job_id` | *str* | :heavy_check_mark: | The ID of the job to analyse. | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | + + +### Response + +**[models.DetailedJobOut](../../models/detailedjobout.md)** +### Errors + +| Error Object | Status Code | Content Type | +| --------------- | --------------- | --------------- | +| models.SDKError | 4xx-5xx | */* | + +## cancel + +Request the cancellation of a fine tuning job. + +### Example Usage + +```python +from mistralai import Mistral +import os + +s = Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) + + +res = s.fine_tuning.jobs.cancel(job_id="03fa7112-315a-4072-a9f2-43f3f1ec962e") + +if res is not None: + # handle response + pass + +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | +| `job_id` | *str* | :heavy_check_mark: | The ID of the job to cancel. | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | + + +### Response + +**[models.DetailedJobOut](../../models/detailedjobout.md)** +### Errors + +| Error Object | Status Code | Content Type | +| --------------- | --------------- | --------------- | +| models.SDKError | 4xx-5xx | */* | + +## start + +Request the start of a validated fine tuning job. + +### Example Usage + +```python +from mistralai import Mistral +import os + +s = Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) + + +res = s.fine_tuning.jobs.start(job_id="0eb0f807-fb9f-4e46-9c13-4e257df6e1ba") + +if res is not None: + # handle response + pass + +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | +| `job_id` | *str* | :heavy_check_mark: | N/A | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | + + +### Response + +**[models.DetailedJobOut](../../models/detailedjobout.md)** +### Errors + +| Error Object | Status Code | Content Type | +| --------------- | --------------- | --------------- | +| models.SDKError | 4xx-5xx | */* | diff --git a/docs/sdks/mistral/README.md b/docs/sdks/mistral/README.md new file mode 100644 index 00000000..d4e985eb --- /dev/null +++ b/docs/sdks/mistral/README.md @@ -0,0 +1,9 @@ +# Mistral SDK + + +## Overview + +Mistral AI API: Our Chat Completion and Embeddings APIs specification. Create your account on [La Plateforme](https://console.mistral.ai) to get access and read the [docs](https://docs.mistral.ai) to learn how to use it. + +### Available Operations + diff --git a/docs/sdks/models/README.md b/docs/sdks/models/README.md new file mode 100644 index 00000000..051aa53c --- /dev/null +++ b/docs/sdks/models/README.md @@ -0,0 +1,259 @@ +# Models +(*models*) + +## Overview + +Model Management API + +### Available Operations + +* [list](#list) - List Models +* [retrieve](#retrieve) - Retrieve Model +* [delete](#delete) - Delete Model +* [update](#update) - Update Fine Tuned Model +* [archive](#archive) - Archive Fine Tuned Model +* [unarchive](#unarchive) - Unarchive Fine Tuned Model + +## list + +List all models available to the user. + +### Example Usage + +```python +from mistralai import Mistral +import os + +s = Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) + + +res = s.models.list() + +if res is not None: + # handle response + pass + +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | + + +### Response + +**[models.ModelList](../../models/modellist.md)** +### Errors + +| Error Object | Status Code | Content Type | +| -------------------------- | -------------------------- | -------------------------- | +| models.HTTPValidationError | 422 | application/json | +| models.SDKError | 4xx-5xx | */* | + +## retrieve + +Retrieve a model information. + +### Example Usage + +```python +from mistralai import Mistral +import os + +s = Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) + + +res = s.models.retrieve(model_id="ft:open-mistral-7b:587a6b29:20240514:7e773925") + +if res is not None: + # handle response + pass + +``` + +### Parameters + +| Parameter | Type | Required | Description | Example | +| ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | +| `model_id` | *str* | :heavy_check_mark: | The ID of the model to retrieve. | ft:open-mistral-7b:587a6b29:20240514:7e773925 | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | | + + +### Response + +**[models.ModelCard](../../models/modelcard.md)** +### Errors + +| Error Object | Status Code | Content Type | +| -------------------------- | -------------------------- | -------------------------- | +| models.HTTPValidationError | 422 | application/json | +| models.SDKError | 4xx-5xx | */* | + +## delete + +Delete a fine-tuned model. + +### Example Usage + +```python +from mistralai import Mistral +import os + +s = Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) + + +res = s.models.delete(model_id="ft:open-mistral-7b:587a6b29:20240514:7e773925") + +if res is not None: + # handle response + pass + +``` + +### Parameters + +| Parameter | Type | Required | Description | Example | +| ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | +| `model_id` | *str* | :heavy_check_mark: | The ID of the model to delete. | ft:open-mistral-7b:587a6b29:20240514:7e773925 | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | | + + +### Response + +**[models.DeleteModelOut](../../models/deletemodelout.md)** +### Errors + +| Error Object | Status Code | Content Type | +| -------------------------- | -------------------------- | -------------------------- | +| models.HTTPValidationError | 422 | application/json | +| models.SDKError | 4xx-5xx | */* | + +## update + +Update a model name or description. + +### Example Usage + +```python +from mistralai import Mistral +import os + +s = Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) + + +res = s.models.update(model_id="ft:open-mistral-7b:587a6b29:20240514:7e773925") + +if res is not None: + # handle response + pass + +``` + +### Parameters + +| Parameter | Type | Required | Description | Example | +| ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | +| `model_id` | *str* | :heavy_check_mark: | The ID of the model to update. | ft:open-mistral-7b:587a6b29:20240514:7e773925 | +| `name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | +| `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | | + + +### Response + +**[models.FTModelOut](../../models/ftmodelout.md)** +### Errors + +| Error Object | Status Code | Content Type | +| --------------- | --------------- | --------------- | +| models.SDKError | 4xx-5xx | */* | + +## archive + +Archive a fine-tuned model. + +### Example Usage + +```python +from mistralai import Mistral +import os + +s = Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) + + +res = s.models.archive(model_id="ft:open-mistral-7b:587a6b29:20240514:7e773925") + +if res is not None: + # handle response + pass + +``` + +### Parameters + +| Parameter | Type | Required | Description | Example | +| ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | +| `model_id` | *str* | :heavy_check_mark: | The ID of the model to archive. | ft:open-mistral-7b:587a6b29:20240514:7e773925 | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | | + + +### Response + +**[models.ArchiveFTModelOut](../../models/archiveftmodelout.md)** +### Errors + +| Error Object | Status Code | Content Type | +| --------------- | --------------- | --------------- | +| models.SDKError | 4xx-5xx | */* | + +## unarchive + +Un-archive a fine-tuned model. + +### Example Usage + +```python +from mistralai import Mistral +import os + +s = Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) + + +res = s.models.unarchive(model_id="ft:open-mistral-7b:587a6b29:20240514:7e773925") + +if res is not None: + # handle response + pass + +``` + +### Parameters + +| Parameter | Type | Required | Description | Example | +| ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | +| `model_id` | *str* | :heavy_check_mark: | The ID of the model to unarchive. | ft:open-mistral-7b:587a6b29:20240514:7e773925 | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | | + + +### Response + +**[models.UnarchiveFTModelOut](../../models/unarchiveftmodelout.md)** +### Errors + +| Error Object | Status Code | Content Type | +| --------------- | --------------- | --------------- | +| models.SDKError | 4xx-5xx | */* | diff --git a/examples/async_agents_no_streaming.py b/examples/async_agents_no_streaming.py new file mode 100755 index 00000000..799333b4 --- /dev/null +++ b/examples/async_agents_no_streaming.py @@ -0,0 +1,24 @@ +#!/usr/bin/env python + +import asyncio +import os + +from mistralai import Mistral +from mistralai.models import UserMessage + + +async def main(): + api_key = os.environ["MISTRAL_API_KEY"] + + client = Mistral(api_key=api_key) + + chat_response = await client.agents.complete_async( + agent_id="", + messages=[UserMessage(content="What is the best French cheese?")], + ) + + print(chat_response.choices[0].message.content) + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/examples/async_chat_no_streaming.py b/examples/async_chat_no_streaming.py index 0eef8c3d..9448f09d 100755 --- a/examples/async_chat_no_streaming.py +++ b/examples/async_chat_no_streaming.py @@ -3,25 +3,23 @@ import asyncio import os -from mistralai.async_client import MistralAsyncClient -from mistralai.models.chat_completion import ChatMessage +from mistralai import Mistral +from mistralai.models import UserMessage async def main(): api_key = os.environ["MISTRAL_API_KEY"] model = "mistral-tiny" - client = MistralAsyncClient(api_key=api_key) + client = Mistral(api_key=api_key) - chat_response = await client.chat( + chat_response = await client.chat.complete_async( model=model, - messages=[ChatMessage(role="user", content="What is the best French cheese?")], + messages=[UserMessage(content="What is the best French cheese?")], ) print(chat_response.choices[0].message.content) - await client.close() - if __name__ == "__main__": asyncio.run(main()) diff --git a/examples/async_chat_with_streaming.py b/examples/async_chat_with_streaming.py index f26b73d6..736c47a0 100755 --- a/examples/async_chat_with_streaming.py +++ b/examples/async_chat_with_streaming.py @@ -3,30 +3,29 @@ import asyncio import os -from mistralai.async_client import MistralAsyncClient -from mistralai.models.chat_completion import ChatMessage +from mistralai import Mistral +from mistralai.models import UserMessage async def main(): api_key = os.environ["MISTRAL_API_KEY"] model = "mistral-tiny" - client = MistralAsyncClient(api_key=api_key) + client = Mistral(api_key=api_key) print("Chat response:") - response = client.chat_stream( + response = await client.chat.stream_async( model=model, - messages=[ChatMessage(role="user", content="What is the best French cheese?")], + messages=[ + UserMessage(content="What is the best French cheese?give the best 50") + ], ) - async for chunk in response: - if chunk.choices[0].delta.content is not None: - print(chunk.choices[0].delta.content, end="") + if chunk.data.choices[0].delta.content is not None: + print(chunk.data.choices[0].delta.content, end="") print("\n") - await client.close() - if __name__ == "__main__": asyncio.run(main()) diff --git a/examples/completion.py b/examples/async_code_completion.py similarity index 78% rename from examples/completion.py rename to examples/async_code_completion.py index f76f0f11..a6bc5717 100644 --- a/examples/completion.py +++ b/examples/async_code_completion.py @@ -3,18 +3,18 @@ import asyncio import os -from mistralai.client import MistralClient +from mistralai import Mistral async def main(): api_key = os.environ["MISTRAL_API_KEY"] - client = MistralClient(api_key=api_key) + client = Mistral(api_key=api_key) prompt = "def fibonacci(n: int):" suffix = "n = int(input('Enter a number: '))\nprint(fibonacci(n))" - response = client.completion( + response = await client.fim.complete_async( model="codestral-latest", prompt=prompt, suffix=suffix, diff --git a/examples/async_completion.py b/examples/async_completion.py deleted file mode 100644 index 6aa22b4b..00000000 --- a/examples/async_completion.py +++ /dev/null @@ -1,33 +0,0 @@ -#!/usr/bin/env python - -import asyncio -import os - -from mistralai.async_client import MistralAsyncClient - - -async def main(): - api_key = os.environ["MISTRAL_API_KEY"] - - client = MistralAsyncClient(api_key=api_key) - - prompt = "def fibonacci(n: int):" - suffix = "n = int(input('Enter a number: '))\nprint(fibonacci(n))" - - response = await client.completion( - model="codestral-latest", - prompt=prompt, - suffix=suffix, - ) - - print( - f""" -{prompt} -{response.choices[0].message.content} -{suffix} -""" - ) - - -if __name__ == "__main__": - asyncio.run(main()) diff --git a/examples/async_embeddings.py b/examples/async_embeddings.py index a7ecd475..781e87af 100755 --- a/examples/async_embeddings.py +++ b/examples/async_embeddings.py @@ -3,17 +3,17 @@ import asyncio import os -from mistralai.async_client import MistralAsyncClient +from mistralai import Mistral async def main(): api_key = os.environ["MISTRAL_API_KEY"] - client = MistralAsyncClient(api_key=api_key) + client = Mistral(api_key=api_key) - embeddings_batch_response = await client.embeddings( + embeddings_batch_response = await client.embeddings.create_async( model="mistral-embed", - input=["What is the best French cheese?"] * 10, + inputs=["What is the best French cheese?"] * 10, ) print(embeddings_batch_response) diff --git a/examples/async_files.py b/examples/async_files.py index 1022b7a7..64c99484 100644 --- a/examples/async_files.py +++ b/examples/async_files.py @@ -3,28 +3,34 @@ import asyncio import os -from mistralai.async_client import MistralAsyncClient +from mistralai import Mistral +from mistralai.models import File async def main(): api_key = os.environ["MISTRAL_API_KEY"] - client = MistralAsyncClient(api_key=api_key) + client = Mistral(api_key=api_key) # Create a new file - created_file = await client.files.create(file=open("examples/file.jsonl", "rb").read()) + created_file = await client.files.upload_async( + file=File( + file_name="training_file.jsonl", + content=open("examples/file.jsonl", "rb").read(), + ) + ) print(created_file) # List files - files = await client.files.list() + files = await client.files.list_async() print(files) # Retrieve a file - retrieved_file = await client.files.retrieve(created_file.id) + retrieved_file = await client.files.retrieve_async(file_id=created_file.id) print(retrieved_file) # Delete a file - deleted_file = await client.files.delete(created_file.id) + deleted_file = await client.files.delete_async(file_id=created_file.id) print(deleted_file) diff --git a/examples/async_jobs.py b/examples/async_jobs.py index 792735d9..b1f9e3bf 100644 --- a/examples/async_jobs.py +++ b/examples/async_jobs.py @@ -3,27 +3,31 @@ import asyncio import os -from mistralai.async_client import MistralAsyncClient -from mistralai.models.jobs import TrainingParameters +from mistralai import Mistral +from mistralai.models import File, TrainingParametersIn async def main(): api_key = os.environ["MISTRAL_API_KEY"] - client = MistralAsyncClient(api_key=api_key) + client = Mistral(api_key=api_key) # Create new files with open("examples/file.jsonl", "rb") as f: - training_file = await client.files.create(file=f) + training_file = await client.files.upload_async( + file=File(file_name="file.jsonl", content=f) + ) with open("examples/validation_file.jsonl", "rb") as f: - validation_file = await client.files.create(file=f) + validation_file = await client.files.upload_async( + file=File(file_name="validation_file.jsonl", content=f) + ) # Create a new job - created_job = await client.jobs.create( + created_job = await client.fine_tuning.jobs.create_async( model="open-mistral-7b", - training_files=[training_file.id], + training_files=[{"file_id": training_file.id, "weight": 1}], validation_files=[validation_file.id], - hyperparameters=TrainingParameters( + hyperparameters=TrainingParametersIn( training_steps=1, learning_rate=0.0001, ), @@ -31,20 +35,20 @@ async def main(): print(created_job) # List jobs - jobs = await client.jobs.list(page=0, page_size=5) + jobs = await client.fine_tuning.jobs.list_async(page=0, page_size=5) print(jobs) # Retrieve a job - retrieved_job = await client.jobs.retrieve(created_job.id) + retrieved_job = await client.fine_tuning.jobs.get_async(job_id=created_job.id) print(retrieved_job) # Cancel a job - canceled_job = await client.jobs.cancel(created_job.id) + canceled_job = await client.fine_tuning.jobs.cancel_async(job_id=created_job.id) print(canceled_job) # Delete files - await client.files.delete(training_file.id) - await client.files.delete(validation_file.id) + await client.files.delete_async(file_id=training_file.id) + await client.files.delete_async(file_id=validation_file.id) if __name__ == "__main__": diff --git a/examples/async_jobs_chat.py b/examples/async_jobs_chat.py index e5019148..7e0d0577 100644 --- a/examples/async_jobs_chat.py +++ b/examples/async_jobs_chat.py @@ -3,27 +3,31 @@ import asyncio import os -from mistralai.async_client import MistralAsyncClient -from mistralai.models.jobs import TrainingParameters +from mistralai import Mistral +from mistralai.models import File, TrainingParametersIn POLLING_INTERVAL = 10 async def main(): api_key = os.environ["MISTRAL_API_KEY"] - client = MistralAsyncClient(api_key=api_key) + client = Mistral(api_key=api_key) # Create new files with open("examples/file.jsonl", "rb") as f: - training_file = await client.files.create(file=f) + training_file = await client.files.upload_async( + file=File(file_name="file.jsonl", content=f) + ) with open("examples/validation_file.jsonl", "rb") as f: - validation_file = await client.files.create(file=f) + validation_file = await client.files.upload_async( + file=File(file_name="validation_file.jsonl", content=f) + ) # Create a new job - created_job = await client.jobs.create( + created_job = await client.fine_tuning.jobs.create_async( model="open-mistral-7b", - training_files=[training_file.id], + training_files=[{"file_id": training_file.id, "weight": 1}], validation_files=[validation_file.id], - hyperparameters=TrainingParameters( + hyperparameters=TrainingParametersIn( training_steps=1, learning_rate=0.0001, ), @@ -31,7 +35,7 @@ async def main(): print(created_job) while created_job.status in ["RUNNING", "QUEUED"]: - created_job = await client.jobs.retrieve(created_job.id) + created_job = await client.fine_tuning.jobs.get_async(job_id=created_job.id) print(f"Job is {created_job.status}, waiting {POLLING_INTERVAL} seconds") await asyncio.sleep(POLLING_INTERVAL) @@ -40,10 +44,13 @@ async def main(): return # Chat with model - response = await client.chat( + response = await client.chat.complete_async( model=created_job.fine_tuned_model, messages=[ - {"role": "system", "content": "Marv is a factual chatbot that is also sarcastic."}, + { + "role": "system", + "content": "Marv is a factual chatbot that is also sarcastic.", + }, {"role": "user", "content": "What is the capital of France ?"}, ], ) @@ -51,11 +58,11 @@ async def main(): print(response.choices[0].message.content) # Delete files - await client.files.delete(training_file.id) - await client.files.delete(validation_file.id) + await client.files.delete_async(file_id=training_file.id) + await client.files.delete_async(file_id=validation_file.id) # Delete fine-tuned model - await client.delete_model(created_job.fine_tuned_model) + await client.models.delete_async(model_id=created_job.fine_tuned_model) if __name__ == "__main__": diff --git a/examples/async_list_models.py b/examples/async_list_models.py index b6de5d51..4243d862 100755 --- a/examples/async_list_models.py +++ b/examples/async_list_models.py @@ -3,15 +3,15 @@ import asyncio import os -from mistralai.async_client import MistralAsyncClient +from mistralai import Mistral async def main(): api_key = os.environ["MISTRAL_API_KEY"] - client = MistralAsyncClient(api_key=api_key) + client = Mistral(api_key=api_key) - list_models_response = await client.list_models() + list_models_response = await client.models.list_async() print(list_models_response) diff --git a/examples/azure/chat_no_streaming.py.py b/examples/azure/chat_no_streaming.py.py new file mode 100644 index 00000000..485b594e --- /dev/null +++ b/examples/azure/chat_no_streaming.py.py @@ -0,0 +1,16 @@ +import os + +from mistralai_azure import MistralAzure + +client = MistralAzure( + azure_api_key=os.environ["AZURE_API_KEY"], + azure_endpoint=os.environ["AZURE_ENDPOINT"], +) + +res = client.chat.complete( + messages=[ + {"role": "user", "content": "What is the capital of France?"}, + ], + # you don't need model as it will always be "azureai" +) +print(res.choices[0].message.content) diff --git a/examples/chat_no_streaming.py b/examples/chat_no_streaming.py index 797b58d0..72506dd9 100755 --- a/examples/chat_no_streaming.py +++ b/examples/chat_no_streaming.py @@ -2,19 +2,19 @@ import os -from mistralai.client import MistralClient -from mistralai.models.chat_completion import ChatMessage +from mistralai import Mistral +from mistralai.models import UserMessage def main(): api_key = os.environ["MISTRAL_API_KEY"] model = "mistral-tiny" - client = MistralClient(api_key=api_key) + client = Mistral(api_key=api_key) - chat_response = client.chat( + chat_response = client.chat.complete( model=model, - messages=[ChatMessage(role="user", content="What is the best French cheese?")], + messages=[UserMessage(content="What is the best French cheese?")], ) print(chat_response.choices[0].message.content) diff --git a/examples/chat_with_streaming.py b/examples/chat_with_streaming.py index bc818d3d..5fc75038 100755 --- a/examples/chat_with_streaming.py +++ b/examples/chat_with_streaming.py @@ -2,22 +2,22 @@ import os -from mistralai.client import MistralClient -from mistralai.models.chat_completion import ChatMessage +from mistralai import Mistral +from mistralai.models import UserMessage def main(): api_key = os.environ["MISTRAL_API_KEY"] model = "mistral-tiny" - client = MistralClient(api_key=api_key) + client = Mistral(api_key=api_key) - for chunk in client.chat_stream( + for chunk in client.chat.stream( model=model, - messages=[ChatMessage(role="user", content="What is the best French cheese?")], + messages=[UserMessage(content="What is the best French cheese?")], ): - if chunk.choices[0].delta.content is not None: - print(chunk.choices[0].delta.content, end="") + + print(chunk.data.choices[0].delta.content) if __name__ == "__main__": diff --git a/examples/chatbot_with_streaming.py b/examples/chatbot_with_streaming.py index 4304551f..bf0f6381 100755 --- a/examples/chatbot_with_streaming.py +++ b/examples/chatbot_with_streaming.py @@ -8,8 +8,8 @@ import readline import sys -from mistralai.client import MistralClient -from mistralai.models.chat_completion import ChatMessage +from mistralai import Mistral +from mistralai.models import AssistantMessage, SystemMessage, UserMessage MODEL_LIST = [ "mistral-small-latest", @@ -52,7 +52,9 @@ def completer(text, state): options = find_completions(COMMAND_LIST, line_parts[:-1]) try: - return [option for option in options if option.startswith(line_parts[-1])][state] + return [option for option in options if option.startswith(line_parts[-1])][ + state + ] except IndexError: return None @@ -64,10 +66,12 @@ def completer(text, state): class ChatBot: - def __init__(self, api_key, model, system_message=None, temperature=DEFAULT_TEMPERATURE): + def __init__( + self, api_key, model, system_message=None, temperature=DEFAULT_TEMPERATURE + ): if not api_key: raise ValueError("An API key must be provided to use the Mistral API.") - self.client = MistralClient(api_key=api_key) + self.client = Mistral(api_key=api_key) self.model = model self.temperature = temperature self.system_message = system_message @@ -88,11 +92,13 @@ def opening_instructions(self): def new_chat(self): print("") - print(f"Starting new chat with model: {self.model}, temperature: {self.temperature}") + print( + f"Starting new chat with model: {self.model}, temperature: {self.temperature}" + ) print("") self.messages = [] if self.system_message: - self.messages.append(ChatMessage(role="system", content=self.system_message)) + self.messages.append(SystemMessage(content=self.system_message)) def switch_model(self, input): model = self.get_arguments(input) @@ -138,13 +144,17 @@ def run_inference(self, content): print("MISTRAL:") print("") - self.messages.append(ChatMessage(role="user", content=content)) + self.messages.append(UserMessage(content=content)) assistant_response = "" - logger.debug(f"Running inference with model: {self.model}, temperature: {self.temperature}") + logger.debug( + f"Running inference with model: {self.model}, temperature: {self.temperature}" + ) logger.debug(f"Sending messages: {self.messages}") - for chunk in self.client.chat_stream(model=self.model, temperature=self.temperature, messages=self.messages): - response = chunk.choices[0].delta.content + for chunk in self.client.chat.stream( + model=self.model, temperature=self.temperature, messages=self.messages + ): + response = chunk.data.choices[0].delta.content if response is not None: print(response, end="", flush=True) assistant_response += response @@ -152,7 +162,7 @@ def run_inference(self, content): print("", flush=True) if assistant_response: - self.messages.append(ChatMessage(role="assistant", content=assistant_response)) + self.messages.append(AssistantMessage(content=assistant_response)) logger.debug(f"Current messages: {self.messages}") def get_command(self, input): @@ -204,7 +214,9 @@ def exit(self): if __name__ == "__main__": - parser = argparse.ArgumentParser(description="A simple chatbot using the Mistral API") + parser = argparse.ArgumentParser( + description="A simple chatbot using the Mistral API" + ) parser.add_argument( "--api-key", default=os.environ.get("MISTRAL_API_KEY"), @@ -217,7 +229,9 @@ def exit(self): default=DEFAULT_MODEL, help="Model for chat inference. Choices are %(choices)s. Defaults to %(default)s", ) - parser.add_argument("-s", "--system-message", help="Optional system message to prepend.") + parser.add_argument( + "-s", "--system-message", help="Optional system message to prepend." + ) parser.add_argument( "-t", "--temperature", @@ -225,7 +239,9 @@ def exit(self): default=DEFAULT_TEMPERATURE, help="Optional temperature for chat inference. Defaults to %(default)s", ) - parser.add_argument("-d", "--debug", action="store_true", help="Enable debug logging") + parser.add_argument( + "-d", "--debug", action="store_true", help="Enable debug logging" + ) args = parser.parse_args() diff --git a/examples/code_completion.py b/examples/code_completion.py index f76f0f11..f3d70a68 100644 --- a/examples/code_completion.py +++ b/examples/code_completion.py @@ -3,27 +3,27 @@ import asyncio import os -from mistralai.client import MistralClient +from mistralai import Mistral async def main(): api_key = os.environ["MISTRAL_API_KEY"] - client = MistralClient(api_key=api_key) + client = Mistral(api_key=api_key) prompt = "def fibonacci(n: int):" suffix = "n = int(input('Enter a number: '))\nprint(fibonacci(n))" - response = client.completion( + response = client.fim.complete( model="codestral-latest", prompt=prompt, suffix=suffix, ) - print( f""" {prompt} {response.choices[0].message.content} +{response.choices[0].message.role} {suffix} """ ) diff --git a/examples/completion_with_streaming.py b/examples/completion_with_streaming.py index f0760bfc..5bee2033 100644 --- a/examples/completion_with_streaming.py +++ b/examples/completion_with_streaming.py @@ -3,25 +3,24 @@ import asyncio import os -from mistralai.client import MistralClient +from mistralai import Mistral async def main(): api_key = os.environ["MISTRAL_API_KEY"] - client = MistralClient(api_key=api_key) + client = Mistral(api_key=api_key) prompt = "def fibonacci(n: int):" suffix = "n = int(input('Enter a number: '))\nprint(fibonacci(n))" print(prompt) - for chunk in client.completion_stream( + for chunk in client.fim.stream( model="codestral-latest", prompt=prompt, suffix=suffix, ): - if chunk.choices[0].delta.content is not None: - print(chunk.choices[0].delta.content, end="") + print(chunk.data.choices[0].delta.content, end="") print(suffix) diff --git a/examples/dry_run_job.py b/examples/dry_run_job.py index 2e1af6db..0701b191 100644 --- a/examples/dry_run_job.py +++ b/examples/dry_run_job.py @@ -3,24 +3,26 @@ import asyncio import os -from mistralai.async_client import MistralAsyncClient -from mistralai.models.jobs import TrainingParameters +from mistralai import Mistral +from mistralai.models import TrainingParametersIn async def main(): api_key = os.environ["MISTRAL_API_KEY"] - client = MistralAsyncClient(api_key=api_key) + client = Mistral(api_key=api_key) # Create new files with open("examples/file.jsonl", "rb") as f: - training_file = await client.files.create(file=f) + training_file = await client.files.upload_async( + file={"file_name": "test-file.jsonl", "content": f} + ) # Create a new job - dry_run_job = await client.jobs.create( + dry_run_job = await client.fine_tuning.jobs.create_async( model="open-mistral-7b", - training_files=[training_file.id], - hyperparameters=TrainingParameters( + training_files=[{"file_id": training_file.id, "weight": 1}], + hyperparameters=TrainingParametersIn( training_steps=1, learning_rate=0.0001, ), diff --git a/examples/embeddings.py b/examples/embeddings.py index ffde00ae..046c87d4 100755 --- a/examples/embeddings.py +++ b/examples/embeddings.py @@ -2,17 +2,17 @@ import os -from mistralai.client import MistralClient +from mistralai import Mistral def main(): api_key = os.environ["MISTRAL_API_KEY"] - client = MistralClient(api_key=api_key) + client = Mistral(api_key=api_key) - embeddings_response = client.embeddings( + embeddings_response = client.embeddings.create( model="mistral-embed", - input=["What is the best French cheese?"] * 10, + inputs=["What is the best French cheese?"] * 10, ) print(embeddings_response) diff --git a/examples/files.py b/examples/files.py index af034b97..a10fd031 100644 --- a/examples/files.py +++ b/examples/files.py @@ -2,16 +2,22 @@ import os -from mistralai.client import MistralClient +from mistralai import Mistral +from mistralai.models import File def main(): api_key = os.environ["MISTRAL_API_KEY"] - client = MistralClient(api_key=api_key) + client = Mistral(api_key=api_key) # Create a new file - created_file = client.files.create(file=("training_file.jsonl", open("examples/file.jsonl", "rb").read())) + created_file = client.files.upload( + file=File( + file_name="training_file.jsonl", + content=open("examples/file.jsonl", "rb").read(), + ) + ) print(created_file) # List files @@ -19,11 +25,11 @@ def main(): print(files) # Retrieve a file - retrieved_file = client.files.retrieve(created_file.id) + retrieved_file = client.files.retrieve(file_id=created_file.id) print(retrieved_file) # Delete a file - deleted_file = client.files.delete(created_file.id) + deleted_file = client.files.delete(file_id=created_file.id) print(deleted_file) diff --git a/examples/function_calling.py b/examples/function_calling.py index 76fb2e18..76ce489a 100644 --- a/examples/function_calling.py +++ b/examples/function_calling.py @@ -3,15 +3,24 @@ import os from typing import Dict, List -from mistralai.client import MistralClient -from mistralai.models.chat_completion import ChatMessage, Function +from mistralai import Mistral +from mistralai.models.assistantmessage import AssistantMessage +from mistralai.models.function import Function +from mistralai.models.toolmessage import ToolMessage +from mistralai.models.usermessage import UserMessage # Assuming we have the following data data = { "transaction_id": ["T1001", "T1002", "T1003", "T1004", "T1005"], "customer_id": ["C001", "C002", "C003", "C002", "C001"], "payment_amount": [125.50, 89.99, 120.00, 54.30, 210.20], - "payment_date": ["2021-10-05", "2021-10-06", "2021-10-07", "2021-10-05", "2021-10-08"], + "payment_date": [ + "2021-10-05", + "2021-10-06", + "2021-10-07", + "2021-10-05", + "2021-10-08", + ], "payment_status": ["Paid", "Unpaid", "Paid", "Paid", "Pending"], } @@ -20,14 +29,16 @@ def retrieve_payment_status(data: Dict[str, List], transaction_id: str) -> str: for i, r in enumerate(data["transaction_id"]): if r == transaction_id: return json.dumps({"status": data["payment_status"][i]}) - return json.dumps({"status": "Error - transaction id not found"}) + else: + return json.dumps({"status": "Error - transaction id not found"}) def retrieve_payment_date(data: Dict[str, List], transaction_id: str) -> str: for i, r in enumerate(data["transaction_id"]): if r == transaction_id: return json.dumps({"date": data["payment_date"][i]}) - return json.dumps({"status": "Error - transaction id not found"}) + else: + return json.dumps({"status": "Error - transaction id not found"}) names_to_functions = { @@ -44,7 +55,12 @@ def retrieve_payment_date(data: Dict[str, List], transaction_id: str) -> str: parameters={ "type": "object", "required": ["transaction_id"], - "properties": {"transaction_id": {"type": "string", "description": "The transaction id."}}, + "properties": { + "transaction_id": { + "type": "string", + "description": "The transaction id.", + } + }, }, ), }, @@ -56,7 +72,12 @@ def retrieve_payment_date(data: Dict[str, List], transaction_id: str) -> str: parameters={ "type": "object", "required": ["transaction_id"], - "properties": {"transaction_id": {"type": "string", "description": "The transaction id."}}, + "properties": { + "transaction_id": { + "type": "string", + "description": "The transaction id.", + } + }, }, ), }, @@ -65,30 +86,38 @@ def retrieve_payment_date(data: Dict[str, List], transaction_id: str) -> str: api_key = os.environ["MISTRAL_API_KEY"] model = "mistral-small-latest" -client = MistralClient(api_key=api_key) +client = Mistral(api_key=api_key) -messages = [ChatMessage(role="user", content="What's the status of my transaction?")] +messages = [UserMessage(content="What's the status of my transaction?")] -response = client.chat(model=model, messages=messages, tools=tools) +response = client.chat.complete(model=model, messages=messages, tools=tools) print(response.choices[0].message.content) -messages.append(ChatMessage(role="assistant", content=response.choices[0].message.content)) -messages.append(ChatMessage(role="user", content="My transaction ID is T1001.")) +messages.append(AssistantMessage(content=response.choices[0].message.content)) +messages.append(UserMessage(content="My transaction ID is T1001.")) -response = client.chat(model=model, messages=messages, tools=tools) +response = client.chat.complete(model=model, messages=messages, tools=tools) tool_call = response.choices[0].message.tool_calls[0] function_name = tool_call.function.name function_params = json.loads(tool_call.function.arguments) -print(f"calling function_name: {function_name}, with function_params: {function_params}") +print( + f"calling function_name: {function_name}, with function_params: {function_params}" +) function_result = names_to_functions[function_name](**function_params) messages.append(response.choices[0].message) -messages.append(ChatMessage(role="tool", name=function_name, content=function_result, tool_call_id=tool_call.id)) - -response = client.chat(model=model, messages=messages, tools=tools) +messages.append( + ToolMessage( + name=function_name, + content=function_result, + tool_call_id=tool_call.id, + ) +) + +response = client.chat.complete(model=model, messages=messages, tools=tools) print(f"{response.choices[0].message.content}") diff --git a/examples/gcp/async_chat_no_streaming.py b/examples/gcp/async_chat_no_streaming.py new file mode 100755 index 00000000..178f151c --- /dev/null +++ b/examples/gcp/async_chat_no_streaming.py @@ -0,0 +1,24 @@ +#!/usr/bin/env python + +import asyncio +import os + +from mistralai_gcp import MistralGoogleCloud +from mistralai_gcp.models.usermessage import UserMessage + + +async def main(): + model = "mistral-large-2407" + + client = MistralGoogleCloud(project_id=os.environ["GCP_PROJECT_ID"]) + + chat_response = await client.chat.complete_async( + model=model, + messages=[UserMessage(content="What is the best French cheese?")], + ) + + print(chat_response.choices[0].message.content) + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/examples/jobs.py b/examples/jobs.py index 2ba8ae74..246edace 100644 --- a/examples/jobs.py +++ b/examples/jobs.py @@ -1,48 +1,52 @@ #!/usr/bin/env python import os -from mistralai.client import MistralClient -from mistralai.models.jobs import TrainingParameters +from mistralai import Mistral +from mistralai.models import File, TrainingParametersIn def main(): api_key = os.environ["MISTRAL_API_KEY"] - client = MistralClient(api_key=api_key) + client = Mistral(api_key=api_key) # Create new files with open("examples/file.jsonl", "rb") as f: - training_file = client.files.create(file=f) + training_file = client.files.upload( + file=File(file_name="file.jsonl", content=f) + ) with open("examples/validation_file.jsonl", "rb") as f: - validation_file = client.files.create(file=f) + validation_file = client.files.upload( + file=File(file_name="validation_file.jsonl", content=f) + ) # Create a new job - created_job = client.jobs.create( + created_job = client.fine_tuning.jobs.create( model="open-mistral-7b", - training_files=[training_file.id], + training_files=[{"file_id": training_file.id, "weight": 1}], validation_files=[validation_file.id], - hyperparameters=TrainingParameters( + hyperparameters=TrainingParametersIn( training_steps=1, learning_rate=0.0001, ), ) print(created_job) - jobs = client.jobs.list(created_after=created_job.created_at - 10) - for job in jobs.data: - print(f"Retrieved job: {job.id}") + # List jobs + jobs = client.fine_tuning.jobs.list(page=0, page_size=5) + print(jobs) # Retrieve a job - retrieved_job = client.jobs.retrieve(created_job.id) + retrieved_job = client.fine_tuning.jobs.get(job_id=created_job.id) print(retrieved_job) # Cancel a job - canceled_job = client.jobs.cancel(created_job.id) + canceled_job = client.fine_tuning.jobs.cancel(job_id=created_job.id) print(canceled_job) # Delete files - client.files.delete(training_file.id) - client.files.delete(validation_file.id) + client.files.delete(file_id=training_file.id) + client.files.delete(file_id=validation_file.id) if __name__ == "__main__": diff --git a/examples/json_format.py b/examples/json_format.py index 749965b1..23c38680 100755 --- a/examples/json_format.py +++ b/examples/json_format.py @@ -2,20 +2,24 @@ import os -from mistralai.client import MistralClient -from mistralai.models.chat_completion import ChatMessage +from mistralai import Mistral +from mistralai.models import UserMessage def main(): api_key = os.environ["MISTRAL_API_KEY"] model = "mistral-large-latest" - client = MistralClient(api_key=api_key) + client = Mistral(api_key=api_key) - chat_response = client.chat( + chat_response = client.chat.complete( model=model, response_format={"type": "json_object"}, - messages=[ChatMessage(role="user", content="What is the best French cheese? Answer shortly in JSON.")], + messages=[ + UserMessage( + content="What is the best French cheese? Answer shortly in JSON.", + ) + ], ) print(chat_response.choices[0].message.content) diff --git a/examples/list_models.py b/examples/list_models.py index b21dcd10..c6c0c855 100755 --- a/examples/list_models.py +++ b/examples/list_models.py @@ -2,15 +2,15 @@ import os -from mistralai.client import MistralClient +from mistralai import Mistral def main(): api_key = os.environ["MISTRAL_API_KEY"] - client = MistralClient(api_key=api_key) + client = Mistral(api_key=api_key) - list_models_response = client.list_models() + list_models_response = client.models.list() print(list_models_response) diff --git a/packages/mistralai_azure/.genignore b/packages/mistralai_azure/.genignore new file mode 100644 index 00000000..513646da --- /dev/null +++ b/packages/mistralai_azure/.genignore @@ -0,0 +1,4 @@ +src/mistralai_azure/sdk.py +README.md +USAGE.md +docs/sdks/**/README.md \ No newline at end of file diff --git a/packages/mistralai_azure/.gitattributes b/packages/mistralai_azure/.gitattributes new file mode 100644 index 00000000..4d75d590 --- /dev/null +++ b/packages/mistralai_azure/.gitattributes @@ -0,0 +1,2 @@ +# This allows generated code to be indexed correctly +*.py linguist-generated=false \ No newline at end of file diff --git a/packages/mistralai_azure/.gitignore b/packages/mistralai_azure/.gitignore new file mode 100644 index 00000000..477b7729 --- /dev/null +++ b/packages/mistralai_azure/.gitignore @@ -0,0 +1,8 @@ +.venv/ +venv/ +src/*.egg-info/ +__pycache__/ +.pytest_cache/ +.python-version +.DS_Store +pyrightconfig.json diff --git a/packages/mistralai_azure/.speakeasy/gen.lock b/packages/mistralai_azure/.speakeasy/gen.lock new file mode 100644 index 00000000..047a649a --- /dev/null +++ b/packages/mistralai_azure/.speakeasy/gen.lock @@ -0,0 +1,133 @@ +lockVersion: 2.0.0 +id: dc40fa48-2c4d-46ad-ac8b-270749770f34 +management: + docChecksum: f04749e097bb06d5fb8850400b089250 + docVersion: 0.0.2 + speakeasyVersion: 1.356.0 + generationVersion: 2.388.1 + releaseVersion: 1.0.0-rc.2 + configChecksum: 98e9cf39c9535097961a0ca73dbac10b + published: true +features: + python: + additionalDependencies: 1.0.0 + constsAndDefaults: 1.0.2 + core: 5.3.4 + defaultEnabledRetries: 0.2.0 + envVarSecurityUsage: 0.3.1 + examples: 3.0.0 + flatRequests: 1.0.1 + globalSecurity: 3.0.1 + globalSecurityCallbacks: 1.0.0 + globalSecurityFlattening: 1.0.0 + globalServerURLs: 3.0.0 + nameOverrides: 3.0.0 + nullables: 1.0.0 + responseFormat: 1.0.0 + retries: 3.0.0 + sdkHooks: 1.0.0 + serverEvents: 1.0.2 + serverEventsSentinels: 0.1.0 + serverIDs: 3.0.0 + unions: 3.0.1 +generatedFiles: + - src/mistralai_azure/sdkconfiguration.py + - src/mistralai_azure/chat.py + - .vscode/settings.json + - poetry.toml + - py.typed + - pylintrc + - pyproject.toml + - scripts/compile.sh + - scripts/publish.sh + - src/mistralai_azure/__init__.py + - src/mistralai_azure/basesdk.py + - src/mistralai_azure/httpclient.py + - src/mistralai_azure/py.typed + - src/mistralai_azure/types/__init__.py + - src/mistralai_azure/types/basemodel.py + - src/mistralai_azure/utils/__init__.py + - src/mistralai_azure/utils/annotations.py + - src/mistralai_azure/utils/enums.py + - src/mistralai_azure/utils/eventstreaming.py + - src/mistralai_azure/utils/forms.py + - src/mistralai_azure/utils/headers.py + - src/mistralai_azure/utils/logger.py + - src/mistralai_azure/utils/metadata.py + - src/mistralai_azure/utils/queryparams.py + - src/mistralai_azure/utils/requestbodies.py + - src/mistralai_azure/utils/retries.py + - src/mistralai_azure/utils/security.py + - src/mistralai_azure/utils/serializers.py + - src/mistralai_azure/utils/url.py + - src/mistralai_azure/utils/values.py + - src/mistralai_azure/models/sdkerror.py + - src/mistralai_azure/models/completionevent.py + - src/mistralai_azure/models/completionchunk.py + - src/mistralai_azure/models/completionresponsestreamchoice.py + - src/mistralai_azure/models/deltamessage.py + - src/mistralai_azure/models/toolcall.py + - src/mistralai_azure/models/functioncall.py + - src/mistralai_azure/models/usageinfo.py + - src/mistralai_azure/models/httpvalidationerror.py + - src/mistralai_azure/models/validationerror.py + - src/mistralai_azure/models/chatcompletionstreamrequest.py + - src/mistralai_azure/models/tool.py + - src/mistralai_azure/models/function.py + - src/mistralai_azure/models/responseformat.py + - src/mistralai_azure/models/systemmessage.py + - src/mistralai_azure/models/contentchunk.py + - src/mistralai_azure/models/usermessage.py + - src/mistralai_azure/models/textchunk.py + - src/mistralai_azure/models/assistantmessage.py + - src/mistralai_azure/models/toolmessage.py + - src/mistralai_azure/models/chatcompletionresponse.py + - src/mistralai_azure/models/chatcompletionchoice.py + - src/mistralai_azure/models/chatcompletionrequest.py + - src/mistralai_azure/models/security.py + - src/mistralai_azure/models/__init__.py + - docs/models/completionevent.md + - docs/models/completionchunk.md + - docs/models/finishreason.md + - docs/models/completionresponsestreamchoice.md + - docs/models/deltamessage.md + - docs/models/toolcall.md + - docs/models/arguments.md + - docs/models/functioncall.md + - docs/models/usageinfo.md + - docs/models/httpvalidationerror.md + - docs/models/loc.md + - docs/models/validationerror.md + - docs/models/stop.md + - docs/models/messages.md + - docs/models/toolchoice.md + - docs/models/chatcompletionstreamrequest.md + - docs/models/tool.md + - docs/models/function.md + - docs/models/responseformats.md + - docs/models/responseformat.md + - docs/models/content.md + - docs/models/role.md + - docs/models/systemmessage.md + - docs/models/contentchunk.md + - docs/models/usermessagecontent.md + - docs/models/usermessagerole.md + - docs/models/usermessage.md + - docs/models/textchunk.md + - docs/models/assistantmessagerole.md + - docs/models/assistantmessage.md + - docs/models/toolmessagerole.md + - docs/models/toolmessage.md + - docs/models/chatcompletionresponse.md + - docs/models/chatcompletionchoicefinishreason.md + - docs/models/chatcompletionchoice.md + - docs/models/chatcompletionrequeststop.md + - docs/models/chatcompletionrequestmessages.md + - docs/models/chatcompletionrequesttoolchoice.md + - docs/models/chatcompletionrequest.md + - docs/models/security.md + - docs/models/utils/retryconfig.md + - .gitattributes + - src/mistralai_azure/_hooks/sdkhooks.py + - src/mistralai_azure/_hooks/types.py + - src/mistralai_azure/_hooks/__init__.py diff --git a/packages/mistralai_azure/.speakeasy/gen.yaml b/packages/mistralai_azure/.speakeasy/gen.yaml new file mode 100644 index 00000000..b36d96f9 --- /dev/null +++ b/packages/mistralai_azure/.speakeasy/gen.yaml @@ -0,0 +1,41 @@ +configVersion: 2.0.0 +generation: + sdkClassName: MistralAzure + maintainOpenAPIOrder: true + usageSnippets: + optionalPropertyRendering: withExample + useClassNamesForArrayFields: true + fixes: + nameResolutionDec2023: true + parameterOrderingFeb2024: true + requestResponseComponentNamesFeb2024: true + auth: + oAuth2ClientCredentialsEnabled: true +python: + version: 1.0.0-rc.2 + additionalDependencies: + dev: + pytest: ^8.2.2 + pytest-asyncio: ^0.23.7 + authors: + - Mistral + clientServerStatusCodesAsErrors: true + description: Python Client SDK for the Mistral AI API in Azure. + enumFormat: union + flattenGlobalSecurity: true + flattenRequests: true + imports: + option: openapi + paths: + callbacks: "" + errors: "" + operations: "" + shared: "" + webhooks: "" + inputModelSuffix: input + maxMethodParams: 4 + methodArguments: infer-optional-args + outputModelSuffix: output + packageName: mistralai_azure + responseFormat: flat + templateVersion: v2 diff --git a/packages/mistralai_azure/.vscode/settings.json b/packages/mistralai_azure/.vscode/settings.json new file mode 100644 index 00000000..8d79f0ab --- /dev/null +++ b/packages/mistralai_azure/.vscode/settings.json @@ -0,0 +1,6 @@ +{ + "python.testing.pytestArgs": ["tests", "-vv"], + "python.testing.unittestEnabled": false, + "python.testing.pytestEnabled": true, + "pylint.args": ["--rcfile=pylintrc"] +} diff --git a/packages/mistralai_azure/CONTRIBUTING.md b/packages/mistralai_azure/CONTRIBUTING.md new file mode 100644 index 00000000..d585717f --- /dev/null +++ b/packages/mistralai_azure/CONTRIBUTING.md @@ -0,0 +1,26 @@ +# Contributing to This Repository + +Thank you for your interest in contributing to this repository. Please note that this repository contains generated code. As such, we do not accept direct changes or pull requests. Instead, we encourage you to follow the guidelines below to report issues and suggest improvements. + +## How to Report Issues + +If you encounter any bugs or have suggestions for improvements, please open an issue on GitHub. When reporting an issue, please provide as much detail as possible to help us reproduce the problem. This includes: + +- A clear and descriptive title +- Steps to reproduce the issue +- Expected and actual behavior +- Any relevant logs, screenshots, or error messages +- Information about your environment (e.g., operating system, software versions) + - For example can be collected using the `npx envinfo` command from your terminal if you have Node.js installed + +## Issue Triage and Upstream Fixes + +We will review and triage issues as quickly as possible. Our goal is to address bugs and incorporate improvements in the upstream source code. Fixes will be included in the next generation of the generated code. + +## Contact + +If you have any questions or need further assistance, please feel free to reach out by opening an issue. + +Thank you for your understanding and cooperation! + +The Maintainers diff --git a/packages/mistralai_azure/README.md b/packages/mistralai_azure/README.md new file mode 100644 index 00000000..65bc2e4f --- /dev/null +++ b/packages/mistralai_azure/README.md @@ -0,0 +1,430 @@ +# Mistral on Azure Python Client + +## SDK Installation + +PIP +```bash +pip install mistralai +``` + +Poetry +```bash +poetry add mistralai +``` + +**Prerequisites** + +Before you begin, ensure you have `AZUREAI_ENDPOINT` and an `AZURE_API_KEY`. To obtain these, you will need to deploy Mistral on Azure AI. +See [instructions for deploying Mistral on Azure AI here](https://docs.mistral.ai/deployment/cloud/azure/). + + +## SDK Example Usage + +### Create Chat Completions + +This example shows how to create chat completions. + +```python +# Synchronous Example +from mistralai_azure import MistralAzure +import os + +s = MistralAzure( + azure_api_key=os.getenv("AZURE_API_KEY", ""), + azure_endpoint=os.getenv("AZURE_ENDPOINT", "") +) + + +res = s.chat.complete( + messages=[ + { + "content": "Who is the best French painter? Answer in one short sentence.", + "role": "user", + }, + ], + model="azureai" +) + +if res is not None: + # handle response + pass +``` + +
+ +The same SDK client can also be used to make asychronous requests by importing asyncio. +```python +# Asynchronous Example +import asyncio +from mistralai_azure import MistralAzure +import os + +async def main(): + s = MistralAzure( + azure_api_key=os.getenv("AZURE_API_KEY", ""), + azure_endpoint=os.getenv("AZURE_ENDPOINT", "") + ) + res = await s.chat.complete_async( + messages=[ + { + "content": "Who is the best French painter? Answer in one short sentence.", + "role": "user", + }, + ], + model="azureai" + ) + if res is not None: + # handle response + pass + +asyncio.run(main()) +``` + + + +## Available Resources and Operations + +### [chat](docs/sdks/chat/README.md) + +* [stream](docs/sdks/chat/README.md#stream) - Stream chat completion +* [create](docs/sdks/chat/README.md#create) - Chat Completion + + + +## Server-sent event streaming + +[Server-sent events][mdn-sse] are used to stream content from certain +operations. These operations will expose the stream as [Generator][generator] that +can be consumed using a simple `for` loop. The loop will +terminate when the server no longer has any events to send and closes the +underlying connection. + +```python +from mistralai_azure import MistralAzure +import os + +s = MistralAzure( + azure_api_key=os.getenv("AZURE_API_KEY", ""), + azure_endpoint=os.getenv("AZURE_ENDPOINT", "") +) + + +res = s.chat.stream( + messages=[ + { + "content": "Who is the best French painter? Answer in one short sentence.", + "role": "user", + }, + ], + model="azureai" +) + +if res is not None: + for event in res: + # handle event + print(event) + +``` + +[mdn-sse]: https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events +[generator]: https://wiki.python.org/moin/Generators + + + +## Retries + +Some of the endpoints in this SDK support retries. If you use the SDK without any configuration, it will fall back to the default retry strategy provided by the API. However, the default retry strategy can be overridden on a per-operation basis, or across the entire SDK. + +To change the default retry strategy for a single API call, simply provide a `RetryConfig` object to the call: +```python +from mistralai_azure import MistralAzure +from mistralazure.utils import BackoffStrategy, RetryConfig +import os + +s = MistralAzure( + azure_api_key=os.getenv("AZURE_API_KEY", ""), + azure_endpoint=os.getenv("AZURE_ENDPOINT", "") +) + + +res = s.chat.stream(messages=[ + { + "content": "Who is the best French painter? Answer in one short sentence.", + "role": "user", + }, +], model="azureai", + RetryConfig("backoff", BackoffStrategy(1, 50, 1.1, 100), False)) + +if res is not None: + for event in res: + # handle event + print(event) + +``` + +If you'd like to override the default retry strategy for all operations that support retries, you can use the `retry_config` optional parameter when initializing the SDK: +```python +from mistralai_azure import MistralAzure +from mistralazure.utils import BackoffStrategy, RetryConfig +import os + +s = MistralAzure( + retry_config=RetryConfig("backoff", BackoffStrategy(1, 50, 1.1, 100), False), + azure_api_key=os.getenv("AZURE_API_KEY", ""), + azure_endpoint=os.getenv("AZURE_ENDPOINT", "") +) + + +res = s.chat.stream(messages=[ + { + "content": "Who is the best French painter? Answer in one short sentence.", + "role": "user", + }, +], model="azureai") + +if res is not None: + for event in res: + # handle event + print(event) + +``` + + + +## Error Handling + +Handling errors in this SDK should largely match your expectations. All operations return a response object or raise an error. If Error objects are specified in your OpenAPI Spec, the SDK will raise the appropriate Error type. + +| Error Object | Status Code | Content Type | +| -------------------------- | ----------- | ---------------- | +| models.HTTPValidationError | 422 | application/json | +| models.SDKError | 4xx-5xx | */* | + +### Example + +```python +from mistralai_azure import MistralAzure, models +import os + +s = MistralAzure( + azure_api_key=os.getenv("AZURE_API_KEY", ""), + azure_endpoint=os.getenv("AZURE_ENDPOINT", "") +) + +res = None +try: + res = s.chat.complete(messages=[ + { + "content": "Who is the best French painter? Answer in one short sentence.", + "role": "user", + }, +], model="azureai") + +except models.HTTPValidationError as e: + # handle exception + raise(e) +except models.SDKError as e: + # handle exception + raise(e) + +if res is not None: + # handle response + pass + +``` + + + +## Server Selection + +### Select Server by Name + +You can override the default server globally by passing a server name to the `server: str` optional parameter when initializing the SDK client instance. The selected server will then be used as the default on the operations that use it. This table lists the names associated with the available servers: + +| Name | Server | Variables | +| ------ | ------------------------ | --------- | +| `prod` | `https://api.mistral.ai` | None | + +#### Example + +```python +from mistralai_azure import MistralAzure +import os + +s = MistralAzure( + server="prod", + azure_api_key=os.getenv("AZURE_API_KEY", ""), + azure_endpoint=os.getenv("AZURE_ENDPOINT", "") +) + + +res = s.chat.stream(messages=[ + { + "content": "Who is the best French painter? Answer in one short sentence.", + "role": "user", + }, +], model="azureai") + +if res is not None: + for event in res: + # handle event + print(event) + +``` + + +### Override Server URL Per-Client + +The default server can also be overridden globally by passing a URL to the `server_url: str` optional parameter when initializing the SDK client instance. For example: +```python +from mistralai_azure import MistralAzure +import os + +s = MistralAzure( + server_url="https://api.mistral.ai", + azure_api_key=os.getenv("AZURE_API_KEY", ""), + azure_endpoint=os.getenv("AZURE_ENDPOINT", "") +) + + +res = s.chat.stream(messages=[ + { + "content": "Who is the best French painter? Answer in one short sentence.", + "role": "user", + }, +], model="azureai") + +if res is not None: + for event in res: + # handle event + print(event) + +``` + + + +## Custom HTTP Client + +The Python SDK makes API calls using the [httpx](https://www.python-httpx.org/) HTTP library. In order to provide a convenient way to configure timeouts, cookies, proxies, custom headers, and other low-level configuration, you can initialize the SDK client with your own HTTP client instance. +Depending on whether you are using the sync or async version of the SDK, you can pass an instance of `HttpClient` or `AsyncHttpClient` respectively, which are Protocol's ensuring that the client has the necessary methods to make API calls. +This allows you to wrap the client with your own custom logic, such as adding custom headers, logging, or error handling, or you can just pass an instance of `httpx.Client` or `httpx.AsyncClient` directly. + +For example, you could specify a header for every request that this sdk makes as follows: +```python +from mistralai_azure import MistralAzure +import httpx + +http_client = httpx.Client(headers={"x-custom-header": "someValue"}) +s = MistralAzure(client=http_client) +``` + +or you could wrap the client with your own custom logic: +```python +from mistralai_azure import MistralAzure +from mistralai_azure.httpclient import AsyncHttpClient +import httpx + +class CustomClient(AsyncHttpClient): + client: AsyncHttpClient + + def __init__(self, client: AsyncHttpClient): + self.client = client + + async def send( + self, + request: httpx.Request, + *, + stream: bool = False, + auth: Union[ + httpx._types.AuthTypes, httpx._client.UseClientDefault, None + ] = httpx.USE_CLIENT_DEFAULT, + follow_redirects: Union[ + bool, httpx._client.UseClientDefault + ] = httpx.USE_CLIENT_DEFAULT, + ) -> httpx.Response: + request.headers["Client-Level-Header"] = "added by client" + + return await self.client.send( + request, stream=stream, auth=auth, follow_redirects=follow_redirects + ) + + def build_request( + self, + method: str, + url: httpx._types.URLTypes, + *, + content: Optional[httpx._types.RequestContent] = None, + data: Optional[httpx._types.RequestData] = None, + files: Optional[httpx._types.RequestFiles] = None, + json: Optional[Any] = None, + params: Optional[httpx._types.QueryParamTypes] = None, + headers: Optional[httpx._types.HeaderTypes] = None, + cookies: Optional[httpx._types.CookieTypes] = None, + timeout: Union[ + httpx._types.TimeoutTypes, httpx._client.UseClientDefault + ] = httpx.USE_CLIENT_DEFAULT, + extensions: Optional[httpx._types.RequestExtensions] = None, + ) -> httpx.Request: + return self.client.build_request( + method, + url, + content=content, + data=data, + files=files, + json=json, + params=params, + headers=headers, + cookies=cookies, + timeout=timeout, + extensions=extensions, + ) + +s = MistralAzure(async_client=CustomClient(httpx.AsyncClient())) +``` + + + +## Authentication + +### Per-Client Security Schemes + +This SDK supports the following security scheme globally: + +| Name | Type | Scheme | +| --------- | ---- | ----------- | +| `api_key` | http | HTTP Bearer | + +To authenticate with the API the `api_key` parameter must be set when initializing the SDK client instance. For example: +```python +from mistralai_azure import MistralAzure +import os + +s = MistralAzure( + azure_api_key=os.getenv("AZURE_API_KEY", ""), + azure_endpoint=os.getenv("AZURE_ENDPOINT", "") +) + + +res = s.chat.stream(messages=[ + { + "content": "Who is the best French painter? Answer in one short sentence.", + "role": "user", + }, +], model="azureai") + +if res is not None: + for event in res: + # handle event + print(event) + +``` + + + + +# Development + +## Contributions + +While we value open-source contributions to this SDK, this library is generated programmatically. Any manual changes added to internal files will be overwritten on the next generation. +We look forward to hearing your feedback. Feel free to open a PR or an issue with a proof of concept and we'll do our best to include it in a future release. diff --git a/packages/mistralai_azure/USAGE.md b/packages/mistralai_azure/USAGE.md new file mode 100644 index 00000000..0ccf3d70 --- /dev/null +++ b/packages/mistralai_azure/USAGE.md @@ -0,0 +1,55 @@ + +### Create Chat Completions + +This example shows how to create chat completions. + +```python +# Synchronous Example +from mistralai_azure import MistralAzure +import os + +s = MistralAzure( + azure_api_key=os.getenv("AZURE_API_KEY", ""), + azure_endpoint=os.getenv("AZURE_ENDPOINT", "") +) + + +res = s.chat.complete(messages=[ + { + "content": "Who is the best French painter? Answer in one short sentence.", + "role": "user", + }, +], model="azureai") + +if res is not None: + # handle response + pass +``` + +
+ +The same SDK client can also be used to make asychronous requests by importing asyncio. +```python +# Asynchronous Example +import asyncio +from mistralai_azure import MistralAzure +import os + +async def main(): + s = MistralAzure( + azure_api_key=os.getenv("AZURE_API_KEY", ""), + azure_endpoint=os.getenv("AZURE_ENDPOINT", "") + ) + res = await s.chat.complete_async(messages=[ + { + "content": "Who is the best French painter? Answer in one short sentence.", + "role": "user", + }, + ], model="azureai") + if res is not None: + # handle response + pass + +asyncio.run(main()) +``` + \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/arguments.md b/packages/mistralai_azure/docs/models/arguments.md new file mode 100644 index 00000000..2e54e27e --- /dev/null +++ b/packages/mistralai_azure/docs/models/arguments.md @@ -0,0 +1,17 @@ +# Arguments + + +## Supported Types + +### `Dict[str, Any]` + +```python +value: Dict[str, Any] = /* values here */ +``` + +### `str` + +```python +value: str = /* values here */ +``` + diff --git a/packages/mistralai_azure/docs/models/assistantmessage.md b/packages/mistralai_azure/docs/models/assistantmessage.md new file mode 100644 index 00000000..0c36cde9 --- /dev/null +++ b/packages/mistralai_azure/docs/models/assistantmessage.md @@ -0,0 +1,11 @@ +# AssistantMessage + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| `content` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `tool_calls` | List[[models.ToolCall](../models/toolcall.md)] | :heavy_minus_sign: | N/A | +| `prefix` | *Optional[bool]* | :heavy_minus_sign: | Set this to `true` when adding an assistant message as prefix to condition the model response. The role of the prefix message is to force the model to start its answer by the content of the message. | +| `role` | [Optional[models.AssistantMessageRole]](../models/assistantmessagerole.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/assistantmessagerole.md b/packages/mistralai_azure/docs/models/assistantmessagerole.md new file mode 100644 index 00000000..658229e7 --- /dev/null +++ b/packages/mistralai_azure/docs/models/assistantmessagerole.md @@ -0,0 +1,8 @@ +# AssistantMessageRole + + +## Values + +| Name | Value | +| ----------- | ----------- | +| `ASSISTANT` | assistant | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/chatcompletionchoice.md b/packages/mistralai_azure/docs/models/chatcompletionchoice.md new file mode 100644 index 00000000..6fa839b7 --- /dev/null +++ b/packages/mistralai_azure/docs/models/chatcompletionchoice.md @@ -0,0 +1,10 @@ +# ChatCompletionChoice + + +## Fields + +| Field | Type | Required | Description | Example | +| ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | +| `index` | *int* | :heavy_check_mark: | N/A | 0 | +| `finish_reason` | [models.ChatCompletionChoiceFinishReason](../models/chatcompletionchoicefinishreason.md) | :heavy_check_mark: | N/A | stop | +| `message` | [Optional[models.AssistantMessage]](../models/assistantmessage.md) | :heavy_minus_sign: | N/A | | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/chatcompletionchoicefinishreason.md b/packages/mistralai_azure/docs/models/chatcompletionchoicefinishreason.md new file mode 100644 index 00000000..b2f15ecb --- /dev/null +++ b/packages/mistralai_azure/docs/models/chatcompletionchoicefinishreason.md @@ -0,0 +1,12 @@ +# ChatCompletionChoiceFinishReason + + +## Values + +| Name | Value | +| -------------- | -------------- | +| `STOP` | stop | +| `LENGTH` | length | +| `MODEL_LENGTH` | model_length | +| `ERROR` | error | +| `TOOL_CALLS` | tool_calls | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/chatcompletionrequest.md b/packages/mistralai_azure/docs/models/chatcompletionrequest.md new file mode 100644 index 00000000..3df1e28e --- /dev/null +++ b/packages/mistralai_azure/docs/models/chatcompletionrequest.md @@ -0,0 +1,20 @@ +# ChatCompletionRequest + + +## Fields + +| Field | Type | Required | Description | Example | +| ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `messages` | List[[models.ChatCompletionRequestMessages](../models/chatcompletionrequestmessages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | {
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
} | +| `model` | *OptionalNullable[str]* | :heavy_minus_sign: | The ID of the model to use for this request. | azureai | +| `temperature` | *Optional[float]* | :heavy_minus_sign: | What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. | | +| `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | +| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | +| `min_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The minimum number of tokens to generate in the completion. | | +| `stream` | *Optional[bool]* | :heavy_minus_sign: | Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. | | +| `stop` | [Optional[models.ChatCompletionRequestStop]](../models/chatcompletionrequeststop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | +| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | +| `response_format` | [Optional[models.ResponseFormat]](../models/responseformat.md) | :heavy_minus_sign: | N/A | | +| `tools` | List[[models.Tool](../models/tool.md)] | :heavy_minus_sign: | N/A | | +| `tool_choice` | [Optional[models.ChatCompletionRequestToolChoice]](../models/chatcompletionrequesttoolchoice.md) | :heavy_minus_sign: | N/A | | +| `safe_prompt` | *Optional[bool]* | :heavy_minus_sign: | Whether to inject a safety prompt before all conversations. | | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/chatcompletionrequestmessages.md b/packages/mistralai_azure/docs/models/chatcompletionrequestmessages.md new file mode 100644 index 00000000..bc7708a6 --- /dev/null +++ b/packages/mistralai_azure/docs/models/chatcompletionrequestmessages.md @@ -0,0 +1,29 @@ +# ChatCompletionRequestMessages + + +## Supported Types + +### `models.AssistantMessage` + +```python +value: models.AssistantMessage = /* values here */ +``` + +### `models.SystemMessage` + +```python +value: models.SystemMessage = /* values here */ +``` + +### `models.ToolMessage` + +```python +value: models.ToolMessage = /* values here */ +``` + +### `models.UserMessage` + +```python +value: models.UserMessage = /* values here */ +``` + diff --git a/packages/mistralai_azure/docs/models/chatcompletionrequeststop.md b/packages/mistralai_azure/docs/models/chatcompletionrequeststop.md new file mode 100644 index 00000000..749296d4 --- /dev/null +++ b/packages/mistralai_azure/docs/models/chatcompletionrequeststop.md @@ -0,0 +1,19 @@ +# ChatCompletionRequestStop + +Stop generation if this token is detected. Or if one of these tokens is detected when providing an array + + +## Supported Types + +### `str` + +```python +value: str = /* values here */ +``` + +### `List[str]` + +```python +value: List[str] = /* values here */ +``` + diff --git a/packages/mistralai_azure/docs/models/chatcompletionrequesttoolchoice.md b/packages/mistralai_azure/docs/models/chatcompletionrequesttoolchoice.md new file mode 100644 index 00000000..ed32b75e --- /dev/null +++ b/packages/mistralai_azure/docs/models/chatcompletionrequesttoolchoice.md @@ -0,0 +1,10 @@ +# ChatCompletionRequestToolChoice + + +## Values + +| Name | Value | +| ------ | ------ | +| `AUTO` | auto | +| `NONE` | none | +| `ANY` | any | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/chatcompletionresponse.md b/packages/mistralai_azure/docs/models/chatcompletionresponse.md new file mode 100644 index 00000000..ad376158 --- /dev/null +++ b/packages/mistralai_azure/docs/models/chatcompletionresponse.md @@ -0,0 +1,13 @@ +# ChatCompletionResponse + + +## Fields + +| Field | Type | Required | Description | Example | +| ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | +| `id` | *str* | :heavy_check_mark: | N/A | cmpl-e5cc70bb28c444948073e77776eb30ef | +| `object` | *str* | :heavy_check_mark: | N/A | chat.completion | +| `model` | *str* | :heavy_check_mark: | N/A | mistral-small-latest | +| `usage` | [models.UsageInfo](../models/usageinfo.md) | :heavy_check_mark: | N/A | | +| `created` | *Optional[int]* | :heavy_minus_sign: | N/A | 1702256327 | +| `choices` | List[[models.ChatCompletionChoice](../models/chatcompletionchoice.md)] | :heavy_minus_sign: | N/A | | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/chatcompletionstreamrequest.md b/packages/mistralai_azure/docs/models/chatcompletionstreamrequest.md new file mode 100644 index 00000000..1fc34709 --- /dev/null +++ b/packages/mistralai_azure/docs/models/chatcompletionstreamrequest.md @@ -0,0 +1,20 @@ +# ChatCompletionStreamRequest + + +## Fields + +| Field | Type | Required | Description | Example | +| ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `messages` | List[[models.Messages](../models/messages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | {
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
} | +| `model` | *OptionalNullable[str]* | :heavy_minus_sign: | The ID of the model to use for this request. | azureai | +| `temperature` | *Optional[float]* | :heavy_minus_sign: | What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. | | +| `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | +| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | +| `min_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The minimum number of tokens to generate in the completion. | | +| `stream` | *Optional[bool]* | :heavy_minus_sign: | N/A | | +| `stop` | [Optional[models.Stop]](../models/stop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | +| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | +| `response_format` | [Optional[models.ResponseFormat]](../models/responseformat.md) | :heavy_minus_sign: | N/A | | +| `tools` | List[[models.Tool](../models/tool.md)] | :heavy_minus_sign: | N/A | | +| `tool_choice` | [Optional[models.ToolChoice]](../models/toolchoice.md) | :heavy_minus_sign: | N/A | | +| `safe_prompt` | *Optional[bool]* | :heavy_minus_sign: | Whether to inject a safety prompt before all conversations. | | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/completionchunk.md b/packages/mistralai_azure/docs/models/completionchunk.md new file mode 100644 index 00000000..b8ae6a09 --- /dev/null +++ b/packages/mistralai_azure/docs/models/completionchunk.md @@ -0,0 +1,13 @@ +# CompletionChunk + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------ | +| `id` | *str* | :heavy_check_mark: | N/A | +| `model` | *str* | :heavy_check_mark: | N/A | +| `choices` | List[[models.CompletionResponseStreamChoice](../models/completionresponsestreamchoice.md)] | :heavy_check_mark: | N/A | +| `object` | *Optional[str]* | :heavy_minus_sign: | N/A | +| `created` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `usage` | [Optional[models.UsageInfo]](../models/usageinfo.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/completionevent.md b/packages/mistralai_azure/docs/models/completionevent.md new file mode 100644 index 00000000..7a66e8fe --- /dev/null +++ b/packages/mistralai_azure/docs/models/completionevent.md @@ -0,0 +1,8 @@ +# CompletionEvent + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------------------------ | ------------------------------------------------------ | ------------------------------------------------------ | ------------------------------------------------------ | +| `data` | [models.CompletionChunk](../models/completionchunk.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/completionresponsestreamchoice.md b/packages/mistralai_azure/docs/models/completionresponsestreamchoice.md new file mode 100644 index 00000000..c807dacd --- /dev/null +++ b/packages/mistralai_azure/docs/models/completionresponsestreamchoice.md @@ -0,0 +1,10 @@ +# CompletionResponseStreamChoice + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------------- | ---------------------------------------------------------- | ---------------------------------------------------------- | ---------------------------------------------------------- | +| `index` | *int* | :heavy_check_mark: | N/A | +| `delta` | [models.DeltaMessage](../models/deltamessage.md) | :heavy_check_mark: | N/A | +| `finish_reason` | [Nullable[models.FinishReason]](../models/finishreason.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/content.md b/packages/mistralai_azure/docs/models/content.md new file mode 100644 index 00000000..a833dc2c --- /dev/null +++ b/packages/mistralai_azure/docs/models/content.md @@ -0,0 +1,17 @@ +# Content + + +## Supported Types + +### `str` + +```python +value: str = /* values here */ +``` + +### `List[models.ContentChunk]` + +```python +value: List[models.ContentChunk] = /* values here */ +``` + diff --git a/packages/mistralai_azure/docs/models/contentchunk.md b/packages/mistralai_azure/docs/models/contentchunk.md new file mode 100644 index 00000000..64fc80d6 --- /dev/null +++ b/packages/mistralai_azure/docs/models/contentchunk.md @@ -0,0 +1,9 @@ +# ContentChunk + + +## Fields + +| Field | Type | Required | Description | +| ------------------ | ------------------ | ------------------ | ------------------ | +| `text` | *str* | :heavy_check_mark: | N/A | +| `type` | *Optional[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/deltamessage.md b/packages/mistralai_azure/docs/models/deltamessage.md new file mode 100644 index 00000000..4cb9e91e --- /dev/null +++ b/packages/mistralai_azure/docs/models/deltamessage.md @@ -0,0 +1,10 @@ +# DeltaMessage + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------------- | ---------------------------------------------------------- | ---------------------------------------------------------- | ---------------------------------------------------------- | +| `role` | *Optional[str]* | :heavy_minus_sign: | N/A | +| `content` | *Optional[str]* | :heavy_minus_sign: | N/A | +| `tool_calls` | [OptionalNullable[models.ToolCall]](../models/toolcall.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/finishreason.md b/packages/mistralai_azure/docs/models/finishreason.md new file mode 100644 index 00000000..45a5aedb --- /dev/null +++ b/packages/mistralai_azure/docs/models/finishreason.md @@ -0,0 +1,11 @@ +# FinishReason + + +## Values + +| Name | Value | +| ------------ | ------------ | +| `STOP` | stop | +| `LENGTH` | length | +| `ERROR` | error | +| `TOOL_CALLS` | tool_calls | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/function.md b/packages/mistralai_azure/docs/models/function.md new file mode 100644 index 00000000..8af398f5 --- /dev/null +++ b/packages/mistralai_azure/docs/models/function.md @@ -0,0 +1,10 @@ +# Function + + +## Fields + +| Field | Type | Required | Description | +| ------------------ | ------------------ | ------------------ | ------------------ | +| `name` | *str* | :heavy_check_mark: | N/A | +| `parameters` | Dict[str, *Any*] | :heavy_check_mark: | N/A | +| `description` | *Optional[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/functioncall.md b/packages/mistralai_azure/docs/models/functioncall.md new file mode 100644 index 00000000..7ccd90dc --- /dev/null +++ b/packages/mistralai_azure/docs/models/functioncall.md @@ -0,0 +1,9 @@ +# FunctionCall + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------------ | ------------------------------------------ | ------------------------------------------ | ------------------------------------------ | +| `name` | *str* | :heavy_check_mark: | N/A | +| `arguments` | [models.Arguments](../models/arguments.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/httpvalidationerror.md b/packages/mistralai_azure/docs/models/httpvalidationerror.md new file mode 100644 index 00000000..63892430 --- /dev/null +++ b/packages/mistralai_azure/docs/models/httpvalidationerror.md @@ -0,0 +1,10 @@ +# HTTPValidationError + +Validation Error + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------------------------------ | ------------------------------------------------------------ | ------------------------------------------------------------ | ------------------------------------------------------------ | +| `detail` | List[[models.ValidationError](../models/validationerror.md)] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/loc.md b/packages/mistralai_azure/docs/models/loc.md new file mode 100644 index 00000000..d6094ac2 --- /dev/null +++ b/packages/mistralai_azure/docs/models/loc.md @@ -0,0 +1,17 @@ +# Loc + + +## Supported Types + +### `str` + +```python +value: str = /* values here */ +``` + +### `int` + +```python +value: int = /* values here */ +``` + diff --git a/packages/mistralai_azure/docs/models/messages.md b/packages/mistralai_azure/docs/models/messages.md new file mode 100644 index 00000000..1d394500 --- /dev/null +++ b/packages/mistralai_azure/docs/models/messages.md @@ -0,0 +1,29 @@ +# Messages + + +## Supported Types + +### `models.AssistantMessage` + +```python +value: models.AssistantMessage = /* values here */ +``` + +### `models.SystemMessage` + +```python +value: models.SystemMessage = /* values here */ +``` + +### `models.ToolMessage` + +```python +value: models.ToolMessage = /* values here */ +``` + +### `models.UserMessage` + +```python +value: models.UserMessage = /* values here */ +``` + diff --git a/packages/mistralai_azure/docs/models/responseformat.md b/packages/mistralai_azure/docs/models/responseformat.md new file mode 100644 index 00000000..2704eab4 --- /dev/null +++ b/packages/mistralai_azure/docs/models/responseformat.md @@ -0,0 +1,8 @@ +# ResponseFormat + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------------------- | ---------------------------------------------------------------- | ---------------------------------------------------------------- | ---------------------------------------------------------------- | +| `type` | [Optional[models.ResponseFormats]](../models/responseformats.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/responseformats.md b/packages/mistralai_azure/docs/models/responseformats.md new file mode 100644 index 00000000..ce35fbb3 --- /dev/null +++ b/packages/mistralai_azure/docs/models/responseformats.md @@ -0,0 +1,11 @@ +# ResponseFormats + +An object specifying the format that the model must output. Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. + + +## Values + +| Name | Value | +| ------------- | ------------- | +| `TEXT` | text | +| `JSON_OBJECT` | json_object | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/role.md b/packages/mistralai_azure/docs/models/role.md new file mode 100644 index 00000000..affca78d --- /dev/null +++ b/packages/mistralai_azure/docs/models/role.md @@ -0,0 +1,8 @@ +# Role + + +## Values + +| Name | Value | +| -------- | -------- | +| `SYSTEM` | system | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/security.md b/packages/mistralai_azure/docs/models/security.md new file mode 100644 index 00000000..c698674c --- /dev/null +++ b/packages/mistralai_azure/docs/models/security.md @@ -0,0 +1,8 @@ +# Security + + +## Fields + +| Field | Type | Required | Description | +| ------------------ | ------------------ | ------------------ | ------------------ | +| `api_key` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/stop.md b/packages/mistralai_azure/docs/models/stop.md new file mode 100644 index 00000000..ba40ca83 --- /dev/null +++ b/packages/mistralai_azure/docs/models/stop.md @@ -0,0 +1,19 @@ +# Stop + +Stop generation if this token is detected. Or if one of these tokens is detected when providing an array + + +## Supported Types + +### `str` + +```python +value: str = /* values here */ +``` + +### `List[str]` + +```python +value: List[str] = /* values here */ +``` + diff --git a/packages/mistralai_azure/docs/models/systemmessage.md b/packages/mistralai_azure/docs/models/systemmessage.md new file mode 100644 index 00000000..7f827984 --- /dev/null +++ b/packages/mistralai_azure/docs/models/systemmessage.md @@ -0,0 +1,9 @@ +# SystemMessage + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------------ | ------------------------------------------ | ------------------------------------------ | ------------------------------------------ | +| `content` | [models.Content](../models/content.md) | :heavy_check_mark: | N/A | +| `role` | [Optional[models.Role]](../models/role.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/textchunk.md b/packages/mistralai_azure/docs/models/textchunk.md new file mode 100644 index 00000000..34e4dd6f --- /dev/null +++ b/packages/mistralai_azure/docs/models/textchunk.md @@ -0,0 +1,9 @@ +# TextChunk + + +## Fields + +| Field | Type | Required | Description | +| ------------------ | ------------------ | ------------------ | ------------------ | +| `text` | *str* | :heavy_check_mark: | N/A | +| `type` | *Optional[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/tool.md b/packages/mistralai_azure/docs/models/tool.md new file mode 100644 index 00000000..291394c0 --- /dev/null +++ b/packages/mistralai_azure/docs/models/tool.md @@ -0,0 +1,9 @@ +# Tool + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------- | ---------------------------------------- | ---------------------------------------- | ---------------------------------------- | +| `function` | [models.Function](../models/function.md) | :heavy_check_mark: | N/A | +| `type` | *Optional[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/toolcall.md b/packages/mistralai_azure/docs/models/toolcall.md new file mode 100644 index 00000000..bd2dc9ff --- /dev/null +++ b/packages/mistralai_azure/docs/models/toolcall.md @@ -0,0 +1,10 @@ +# ToolCall + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------------------ | ------------------------------------------------ | ------------------------------------------------ | ------------------------------------------------ | +| `function` | [models.FunctionCall](../models/functioncall.md) | :heavy_check_mark: | N/A | +| `id` | *Optional[str]* | :heavy_minus_sign: | N/A | +| `type` | *Optional[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/toolchoice.md b/packages/mistralai_azure/docs/models/toolchoice.md new file mode 100644 index 00000000..b84f51f6 --- /dev/null +++ b/packages/mistralai_azure/docs/models/toolchoice.md @@ -0,0 +1,10 @@ +# ToolChoice + + +## Values + +| Name | Value | +| ------ | ------ | +| `AUTO` | auto | +| `NONE` | none | +| `ANY` | any | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/toolmessage.md b/packages/mistralai_azure/docs/models/toolmessage.md new file mode 100644 index 00000000..364339e1 --- /dev/null +++ b/packages/mistralai_azure/docs/models/toolmessage.md @@ -0,0 +1,11 @@ +# ToolMessage + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------------------- | ---------------------------------------------------------------- | ---------------------------------------------------------------- | ---------------------------------------------------------------- | +| `content` | *str* | :heavy_check_mark: | N/A | +| `tool_call_id` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `role` | [Optional[models.ToolMessageRole]](../models/toolmessagerole.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/toolmessagerole.md b/packages/mistralai_azure/docs/models/toolmessagerole.md new file mode 100644 index 00000000..c24e59c0 --- /dev/null +++ b/packages/mistralai_azure/docs/models/toolmessagerole.md @@ -0,0 +1,8 @@ +# ToolMessageRole + + +## Values + +| Name | Value | +| ------ | ------ | +| `TOOL` | tool | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/usageinfo.md b/packages/mistralai_azure/docs/models/usageinfo.md new file mode 100644 index 00000000..9f56a3ae --- /dev/null +++ b/packages/mistralai_azure/docs/models/usageinfo.md @@ -0,0 +1,10 @@ +# UsageInfo + + +## Fields + +| Field | Type | Required | Description | Example | +| ------------------- | ------------------- | ------------------- | ------------------- | ------------------- | +| `prompt_tokens` | *int* | :heavy_check_mark: | N/A | 16 | +| `completion_tokens` | *int* | :heavy_check_mark: | N/A | 34 | +| `total_tokens` | *int* | :heavy_check_mark: | N/A | 50 | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/usermessage.md b/packages/mistralai_azure/docs/models/usermessage.md new file mode 100644 index 00000000..3d96f1cd --- /dev/null +++ b/packages/mistralai_azure/docs/models/usermessage.md @@ -0,0 +1,9 @@ +# UserMessage + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------------------- | ---------------------------------------------------------------- | ---------------------------------------------------------------- | ---------------------------------------------------------------- | +| `content` | [models.UserMessageContent](../models/usermessagecontent.md) | :heavy_check_mark: | N/A | +| `role` | [Optional[models.UserMessageRole]](../models/usermessagerole.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/usermessagecontent.md b/packages/mistralai_azure/docs/models/usermessagecontent.md new file mode 100644 index 00000000..86ebd18f --- /dev/null +++ b/packages/mistralai_azure/docs/models/usermessagecontent.md @@ -0,0 +1,17 @@ +# UserMessageContent + + +## Supported Types + +### `str` + +```python +value: str = /* values here */ +``` + +### `List[models.TextChunk]` + +```python +value: List[models.TextChunk] = /* values here */ +``` + diff --git a/packages/mistralai_azure/docs/models/usermessagerole.md b/packages/mistralai_azure/docs/models/usermessagerole.md new file mode 100644 index 00000000..171124e4 --- /dev/null +++ b/packages/mistralai_azure/docs/models/usermessagerole.md @@ -0,0 +1,8 @@ +# UserMessageRole + + +## Values + +| Name | Value | +| ------ | ------ | +| `USER` | user | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/utils/retryconfig.md b/packages/mistralai_azure/docs/models/utils/retryconfig.md new file mode 100644 index 00000000..69dd549e --- /dev/null +++ b/packages/mistralai_azure/docs/models/utils/retryconfig.md @@ -0,0 +1,24 @@ +# RetryConfig + +Allows customizing the default retry configuration. Only usable with methods that mention they support retries. + +## Fields + +| Name | Type | Description | Example | +| ------------------------- | ----------------------------------- | --------------------------------------- | --------- | +| `strategy` | `*str*` | The retry strategy to use. | `backoff` | +| `backoff` | [BackoffStrategy](#backoffstrategy) | Configuration for the backoff strategy. | | +| `retry_connection_errors` | `*bool*` | Whether to retry on connection errors. | `true` | + +## BackoffStrategy + +The backoff strategy allows retrying a request with an exponential backoff between each retry. + +### Fields + +| Name | Type | Description | Example | +| ------------------ | --------- | ----------------------------------------- | -------- | +| `initial_interval` | `*int*` | The initial interval in milliseconds. | `500` | +| `max_interval` | `*int*` | The maximum interval in milliseconds. | `60000` | +| `exponent` | `*float*` | The exponent to use for the backoff. | `1.5` | +| `max_elapsed_time` | `*int*` | The maximum elapsed time in milliseconds. | `300000` | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/validationerror.md b/packages/mistralai_azure/docs/models/validationerror.md new file mode 100644 index 00000000..7a1654a1 --- /dev/null +++ b/packages/mistralai_azure/docs/models/validationerror.md @@ -0,0 +1,10 @@ +# ValidationError + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------ | ------------------------------------ | ------------------------------------ | ------------------------------------ | +| `loc` | List[[models.Loc](../models/loc.md)] | :heavy_check_mark: | N/A | +| `msg` | *str* | :heavy_check_mark: | N/A | +| `type` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/sdks/chat/README.md b/packages/mistralai_azure/docs/sdks/chat/README.md new file mode 100644 index 00000000..26d20bb4 --- /dev/null +++ b/packages/mistralai_azure/docs/sdks/chat/README.md @@ -0,0 +1,129 @@ +# Chat +(*chat*) + +## Overview + +Chat Completion API. + +### Available Operations + +* [stream](#stream) - Stream chat completion +* [create](#create) - Chat Completion + +## stream + +Mistral AI provides the ability to stream responses back to a client in order to allow partial results for certain requests. Tokens will be sent as data-only server-sent events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. + +### Example Usage + +```python +from mistralai_azure import MistralAzure +import os + +s = MistralAzure( + azure_api_key=os.getenv("AZURE_API_KEY", ""), + azure_endpoint=os.getenv("AZURE_ENDPOINT", "") +) + + +res = s.chat.stream(messages=[ + { + "content": "Who is the best French painter? Answer in one short sentence.", + "role": "user", + }, +], model="azureai") + +if res is not None: + for event in res: + # handle event + print(event) + +``` + +### Parameters + +| Parameter | Type | Required | Description | Example | +| ----------------- | ----------------------------------------------------------------- | ------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------- | +| `messages` | List[[models.Messages](../../models/messages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | {
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
} | +| `model` | *OptionalNullable[str]* | :heavy_minus_sign: | The ID of the model to use for this request. | azureai | +| `temperature` | *Optional[float]* | :heavy_minus_sign: | What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. | | +| `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | +| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | +| `min_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The minimum number of tokens to generate in the completion. | | +| `stream` | *Optional[bool]* | :heavy_minus_sign: | N/A | | +| `stop` | [Optional[models.Stop]](../../models/stop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | +| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | +| `response_format` | [Optional[models.ResponseFormat]](../../models/responseformat.md) | :heavy_minus_sign: | N/A | | +| `tools` | List[[models.Tool](../../models/tool.md)] | :heavy_minus_sign: | N/A | | +| `safe_prompt` | *Optional[bool]* | :heavy_minus_sign: | Whether to inject a safety prompt before all conversations. | | +| `tool_choice` | [Optional[models.ToolChoice]](../../models/toolchoice.md) | :heavy_minus_sign: | N/A | | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | | + + +### Response + +**[Union[Generator[models.CompletionEvent, None, None], AsyncGenerator[models.CompletionEvent, None]]](../../models/.md)** +### Errors + +| Error Object | Status Code | Content Type | +| --------------- | ----------- | ------------ | +| models.SDKError | 4xx-5xx | */* | + +## create + +Chat Completion + +### Example Usage + +```python +from mistralai_azure import MistralAzure +import os + +s = MistralAzure( + azure_api_key=os.getenv("AZURE_API_KEY", ""), + azure_endpoint=os.getenv("AZURE_ENDPOINT", "") +) + + +res = s.chat.complete(messages=[ + { + "content": "Who is the best French painter? Answer in one short sentence.", + "role": "user", + }, +], model="azureai") + +if res is not None: + # handle response + pass + +``` + +### Parameters + +| Parameter | Type | Required | Description | Example | +| ----------------- | --------------------------------------------------------------------------------------------------- | ------------------ | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------- | +| `messages` | List[[models.ChatCompletionRequestMessages](../../models/chatcompletionrequestmessages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | {
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
} | +| `model` | *OptionalNullable[str]* | :heavy_minus_sign: | The ID of the model to use for this request. | azureai | +| `temperature` | *Optional[float]* | :heavy_minus_sign: | What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. | | +| `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | +| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | +| `min_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The minimum number of tokens to generate in the completion. | | +| `stream` | *Optional[bool]* | :heavy_minus_sign: | Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. | | +| `stop` | [Optional[models.ChatCompletionRequestStop]](../../models/chatcompletionrequeststop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | +| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | +| `response_format` | [Optional[models.ResponseFormat]](../../models/responseformat.md) | :heavy_minus_sign: | N/A | | +| `tools` | List[[models.Tool](../../models/tool.md)] | :heavy_minus_sign: | N/A | | +| `safe_prompt` | *Optional[bool]* | :heavy_minus_sign: | Whether to inject a safety prompt before all conversations. | | +| `tool_choice` | [Optional[models.ChatCompletionRequestToolChoice]](../../models/chatcompletionrequesttoolchoice.md) | :heavy_minus_sign: | N/A | | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | | + + +### Response + +**[models.ChatCompletionResponse](../../models/chatcompletionresponse.md)** +### Errors + +| Error Object | Status Code | Content Type | +| -------------------------- | ----------- | ---------------- | +| models.HTTPValidationError | 422 | application/json | +| models.SDKError | 4xx-5xx | */* | diff --git a/packages/mistralai_azure/docs/sdks/mistralazure/README.md b/packages/mistralai_azure/docs/sdks/mistralazure/README.md new file mode 100644 index 00000000..381000a8 --- /dev/null +++ b/packages/mistralai_azure/docs/sdks/mistralazure/README.md @@ -0,0 +1,9 @@ +# MistralAzure SDK + + +## Overview + +Mistral AI API: Our Chat Completion and Embeddings APIs specification. Create your account on [La Plateforme](https://console.mistral.ai) to get access and read the [docs](https://docs.mistral.ai) to learn how to use it. + +### Available Operations + diff --git a/packages/mistralai_azure/poetry.lock b/packages/mistralai_azure/poetry.lock new file mode 100644 index 00000000..477ecfde --- /dev/null +++ b/packages/mistralai_azure/poetry.lock @@ -0,0 +1,638 @@ +# This file is automatically @generated by Poetry 1.8.3 and should not be changed by hand. + +[[package]] +name = "annotated-types" +version = "0.7.0" +description = "Reusable constraint types to use with typing.Annotated" +optional = false +python-versions = ">=3.8" +files = [ + {file = "annotated_types-0.7.0-py3-none-any.whl", hash = "sha256:1f02e8b43a8fbbc3f3e0d4f0f4bfc8131bcb4eebe8849b8e5c773f3a1c582a53"}, + {file = "annotated_types-0.7.0.tar.gz", hash = "sha256:aff07c09a53a08bc8cfccb9c85b05f1aa9a2a6f23728d790723543408344ce89"}, +] + +[package.dependencies] +typing-extensions = {version = ">=4.0.0", markers = "python_version < \"3.9\""} + +[[package]] +name = "anyio" +version = "4.4.0" +description = "High level compatibility layer for multiple asynchronous event loop implementations" +optional = false +python-versions = ">=3.8" +files = [ + {file = "anyio-4.4.0-py3-none-any.whl", hash = "sha256:c1b2d8f46a8a812513012e1107cb0e68c17159a7a594208005a57dc776e1bdc7"}, + {file = "anyio-4.4.0.tar.gz", hash = "sha256:5aadc6a1bbb7cdb0bede386cac5e2940f5e2ff3aa20277e991cf028e0585ce94"}, +] + +[package.dependencies] +exceptiongroup = {version = ">=1.0.2", markers = "python_version < \"3.11\""} +idna = ">=2.8" +sniffio = ">=1.1" +typing-extensions = {version = ">=4.1", markers = "python_version < \"3.11\""} + +[package.extras] +doc = ["Sphinx (>=7)", "packaging", "sphinx-autodoc-typehints (>=1.2.0)", "sphinx-rtd-theme"] +test = ["anyio[trio]", "coverage[toml] (>=7)", "exceptiongroup (>=1.2.0)", "hypothesis (>=4.0)", "psutil (>=5.9)", "pytest (>=7.0)", "pytest-mock (>=3.6.1)", "trustme", "uvloop (>=0.17)"] +trio = ["trio (>=0.23)"] + +[[package]] +name = "astroid" +version = "3.2.4" +description = "An abstract syntax tree for Python with inference support." +optional = false +python-versions = ">=3.8.0" +files = [ + {file = "astroid-3.2.4-py3-none-any.whl", hash = "sha256:413658a61eeca6202a59231abb473f932038fbcbf1666587f66d482083413a25"}, + {file = "astroid-3.2.4.tar.gz", hash = "sha256:0e14202810b30da1b735827f78f5157be2bbd4a7a59b7707ca0bfc2fb4c0063a"}, +] + +[package.dependencies] +typing-extensions = {version = ">=4.0.0", markers = "python_version < \"3.11\""} + +[[package]] +name = "certifi" +version = "2024.7.4" +description = "Python package for providing Mozilla's CA Bundle." +optional = false +python-versions = ">=3.6" +files = [ + {file = "certifi-2024.7.4-py3-none-any.whl", hash = "sha256:c198e21b1289c2ab85ee4e67bb4b4ef3ead0892059901a8d5b622f24a1101e90"}, + {file = "certifi-2024.7.4.tar.gz", hash = "sha256:5a1e7645bc0ec61a09e26c36f6106dd4cf40c6db3a1fb6352b0244e7fb057c7b"}, +] + +[[package]] +name = "colorama" +version = "0.4.6" +description = "Cross-platform colored terminal text." +optional = false +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7" +files = [ + {file = "colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6"}, + {file = "colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44"}, +] + +[[package]] +name = "dill" +version = "0.3.8" +description = "serialize all of Python" +optional = false +python-versions = ">=3.8" +files = [ + {file = "dill-0.3.8-py3-none-any.whl", hash = "sha256:c36ca9ffb54365bdd2f8eb3eff7d2a21237f8452b57ace88b1ac615b7e815bd7"}, + {file = "dill-0.3.8.tar.gz", hash = "sha256:3ebe3c479ad625c4553aca177444d89b486b1d84982eeacded644afc0cf797ca"}, +] + +[package.extras] +graph = ["objgraph (>=1.7.2)"] +profile = ["gprof2dot (>=2022.7.29)"] + +[[package]] +name = "exceptiongroup" +version = "1.2.2" +description = "Backport of PEP 654 (exception groups)" +optional = false +python-versions = ">=3.7" +files = [ + {file = "exceptiongroup-1.2.2-py3-none-any.whl", hash = "sha256:3111b9d131c238bec2f8f516e123e14ba243563fb135d3fe885990585aa7795b"}, + {file = "exceptiongroup-1.2.2.tar.gz", hash = "sha256:47c2edf7c6738fafb49fd34290706d1a1a2f4d1c6df275526b62cbb4aa5393cc"}, +] + +[package.extras] +test = ["pytest (>=6)"] + +[[package]] +name = "h11" +version = "0.14.0" +description = "A pure-Python, bring-your-own-I/O implementation of HTTP/1.1" +optional = false +python-versions = ">=3.7" +files = [ + {file = "h11-0.14.0-py3-none-any.whl", hash = "sha256:e3fe4ac4b851c468cc8363d500db52c2ead036020723024a109d37346efaa761"}, + {file = "h11-0.14.0.tar.gz", hash = "sha256:8f19fbbe99e72420ff35c00b27a34cb9937e902a8b810e2c88300c6f0a3b699d"}, +] + +[[package]] +name = "httpcore" +version = "1.0.5" +description = "A minimal low-level HTTP client." +optional = false +python-versions = ">=3.8" +files = [ + {file = "httpcore-1.0.5-py3-none-any.whl", hash = "sha256:421f18bac248b25d310f3cacd198d55b8e6125c107797b609ff9b7a6ba7991b5"}, + {file = "httpcore-1.0.5.tar.gz", hash = "sha256:34a38e2f9291467ee3b44e89dd52615370e152954ba21721378a87b2960f7a61"}, +] + +[package.dependencies] +certifi = "*" +h11 = ">=0.13,<0.15" + +[package.extras] +asyncio = ["anyio (>=4.0,<5.0)"] +http2 = ["h2 (>=3,<5)"] +socks = ["socksio (==1.*)"] +trio = ["trio (>=0.22.0,<0.26.0)"] + +[[package]] +name = "httpx" +version = "0.27.0" +description = "The next generation HTTP client." +optional = false +python-versions = ">=3.8" +files = [ + {file = "httpx-0.27.0-py3-none-any.whl", hash = "sha256:71d5465162c13681bff01ad59b2cc68dd838ea1f10e51574bac27103f00c91a5"}, + {file = "httpx-0.27.0.tar.gz", hash = "sha256:a0cb88a46f32dc874e04ee956e4c2764aba2aa228f650b06788ba6bda2962ab5"}, +] + +[package.dependencies] +anyio = "*" +certifi = "*" +httpcore = "==1.*" +idna = "*" +sniffio = "*" + +[package.extras] +brotli = ["brotli", "brotlicffi"] +cli = ["click (==8.*)", "pygments (==2.*)", "rich (>=10,<14)"] +http2 = ["h2 (>=3,<5)"] +socks = ["socksio (==1.*)"] + +[[package]] +name = "idna" +version = "3.7" +description = "Internationalized Domain Names in Applications (IDNA)" +optional = false +python-versions = ">=3.5" +files = [ + {file = "idna-3.7-py3-none-any.whl", hash = "sha256:82fee1fc78add43492d3a1898bfa6d8a904cc97d8427f683ed8e798d07761aa0"}, + {file = "idna-3.7.tar.gz", hash = "sha256:028ff3aadf0609c1fd278d8ea3089299412a7a8b9bd005dd08b9f8285bcb5cfc"}, +] + +[[package]] +name = "iniconfig" +version = "2.0.0" +description = "brain-dead simple config-ini parsing" +optional = false +python-versions = ">=3.7" +files = [ + {file = "iniconfig-2.0.0-py3-none-any.whl", hash = "sha256:b6a85871a79d2e3b22d2d1b94ac2824226a63c6b741c88f7ae975f18b6778374"}, + {file = "iniconfig-2.0.0.tar.gz", hash = "sha256:2d91e135bf72d31a410b17c16da610a82cb55f6b0477d1a902134b24a455b8b3"}, +] + +[[package]] +name = "isort" +version = "5.13.2" +description = "A Python utility / library to sort Python imports." +optional = false +python-versions = ">=3.8.0" +files = [ + {file = "isort-5.13.2-py3-none-any.whl", hash = "sha256:8ca5e72a8d85860d5a3fa69b8745237f2939afe12dbf656afbcb47fe72d947a6"}, + {file = "isort-5.13.2.tar.gz", hash = "sha256:48fdfcb9face5d58a4f6dde2e72a1fb8dcaf8ab26f95ab49fab84c2ddefb0109"}, +] + +[package.extras] +colors = ["colorama (>=0.4.6)"] + +[[package]] +name = "jsonpath-python" +version = "1.0.6" +description = "A more powerful JSONPath implementation in modern python" +optional = false +python-versions = ">=3.6" +files = [ + {file = "jsonpath-python-1.0.6.tar.gz", hash = "sha256:dd5be4a72d8a2995c3f583cf82bf3cd1a9544cfdabf2d22595b67aff07349666"}, + {file = "jsonpath_python-1.0.6-py3-none-any.whl", hash = "sha256:1e3b78df579f5efc23565293612decee04214609208a2335884b3ee3f786b575"}, +] + +[[package]] +name = "mccabe" +version = "0.7.0" +description = "McCabe checker, plugin for flake8" +optional = false +python-versions = ">=3.6" +files = [ + {file = "mccabe-0.7.0-py2.py3-none-any.whl", hash = "sha256:6c2d30ab6be0e4a46919781807b4f0d834ebdd6c6e3dca0bda5a15f863427b6e"}, + {file = "mccabe-0.7.0.tar.gz", hash = "sha256:348e0240c33b60bbdf4e523192ef919f28cb2c3d7d5c7794f74009290f236325"}, +] + +[[package]] +name = "mypy" +version = "1.10.1" +description = "Optional static typing for Python" +optional = false +python-versions = ">=3.8" +files = [ + {file = "mypy-1.10.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:e36f229acfe250dc660790840916eb49726c928e8ce10fbdf90715090fe4ae02"}, + {file = "mypy-1.10.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:51a46974340baaa4145363b9e051812a2446cf583dfaeba124af966fa44593f7"}, + {file = "mypy-1.10.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:901c89c2d67bba57aaaca91ccdb659aa3a312de67f23b9dfb059727cce2e2e0a"}, + {file = "mypy-1.10.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:0cd62192a4a32b77ceb31272d9e74d23cd88c8060c34d1d3622db3267679a5d9"}, + {file = "mypy-1.10.1-cp310-cp310-win_amd64.whl", hash = "sha256:a2cbc68cb9e943ac0814c13e2452d2046c2f2b23ff0278e26599224cf164e78d"}, + {file = "mypy-1.10.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:bd6f629b67bb43dc0d9211ee98b96d8dabc97b1ad38b9b25f5e4c4d7569a0c6a"}, + {file = "mypy-1.10.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:a1bbb3a6f5ff319d2b9d40b4080d46cd639abe3516d5a62c070cf0114a457d84"}, + {file = "mypy-1.10.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b8edd4e9bbbc9d7b79502eb9592cab808585516ae1bcc1446eb9122656c6066f"}, + {file = "mypy-1.10.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:6166a88b15f1759f94a46fa474c7b1b05d134b1b61fca627dd7335454cc9aa6b"}, + {file = "mypy-1.10.1-cp311-cp311-win_amd64.whl", hash = "sha256:5bb9cd11c01c8606a9d0b83ffa91d0b236a0e91bc4126d9ba9ce62906ada868e"}, + {file = "mypy-1.10.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:d8681909f7b44d0b7b86e653ca152d6dff0eb5eb41694e163c6092124f8246d7"}, + {file = "mypy-1.10.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:378c03f53f10bbdd55ca94e46ec3ba255279706a6aacaecac52ad248f98205d3"}, + {file = "mypy-1.10.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6bacf8f3a3d7d849f40ca6caea5c055122efe70e81480c8328ad29c55c69e93e"}, + {file = "mypy-1.10.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:701b5f71413f1e9855566a34d6e9d12624e9e0a8818a5704d74d6b0402e66c04"}, + {file = "mypy-1.10.1-cp312-cp312-win_amd64.whl", hash = "sha256:3c4c2992f6ea46ff7fce0072642cfb62af7a2484efe69017ed8b095f7b39ef31"}, + {file = "mypy-1.10.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:604282c886497645ffb87b8f35a57ec773a4a2721161e709a4422c1636ddde5c"}, + {file = "mypy-1.10.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:37fd87cab83f09842653f08de066ee68f1182b9b5282e4634cdb4b407266bade"}, + {file = "mypy-1.10.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8addf6313777dbb92e9564c5d32ec122bf2c6c39d683ea64de6a1fd98b90fe37"}, + {file = "mypy-1.10.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:5cc3ca0a244eb9a5249c7c583ad9a7e881aa5d7b73c35652296ddcdb33b2b9c7"}, + {file = "mypy-1.10.1-cp38-cp38-win_amd64.whl", hash = "sha256:1b3a2ffce52cc4dbaeee4df762f20a2905aa171ef157b82192f2e2f368eec05d"}, + {file = "mypy-1.10.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:fe85ed6836165d52ae8b88f99527d3d1b2362e0cb90b005409b8bed90e9059b3"}, + {file = "mypy-1.10.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:c2ae450d60d7d020d67ab440c6e3fae375809988119817214440033f26ddf7bf"}, + {file = "mypy-1.10.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6be84c06e6abd72f960ba9a71561c14137a583093ffcf9bbfaf5e613d63fa531"}, + {file = "mypy-1.10.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:2189ff1e39db399f08205e22a797383613ce1cb0cb3b13d8bcf0170e45b96cc3"}, + {file = "mypy-1.10.1-cp39-cp39-win_amd64.whl", hash = "sha256:97a131ee36ac37ce9581f4220311247ab6cba896b4395b9c87af0675a13a755f"}, + {file = "mypy-1.10.1-py3-none-any.whl", hash = "sha256:71d8ac0b906354ebda8ef1673e5fde785936ac1f29ff6987c7483cfbd5a4235a"}, + {file = "mypy-1.10.1.tar.gz", hash = "sha256:1f8f492d7db9e3593ef42d4f115f04e556130f2819ad33ab84551403e97dd4c0"}, +] + +[package.dependencies] +mypy-extensions = ">=1.0.0" +tomli = {version = ">=1.1.0", markers = "python_version < \"3.11\""} +typing-extensions = ">=4.1.0" + +[package.extras] +dmypy = ["psutil (>=4.0)"] +install-types = ["pip"] +mypyc = ["setuptools (>=50)"] +reports = ["lxml"] + +[[package]] +name = "mypy-extensions" +version = "1.0.0" +description = "Type system extensions for programs checked with the mypy type checker." +optional = false +python-versions = ">=3.5" +files = [ + {file = "mypy_extensions-1.0.0-py3-none-any.whl", hash = "sha256:4392f6c0eb8a5668a69e23d168ffa70f0be9ccfd32b5cc2d26a34ae5b844552d"}, + {file = "mypy_extensions-1.0.0.tar.gz", hash = "sha256:75dbf8955dc00442a438fc4d0666508a9a97b6bd41aa2f0ffe9d2f2725af0782"}, +] + +[[package]] +name = "nodeenv" +version = "1.9.1" +description = "Node.js virtual environment builder" +optional = false +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7" +files = [ + {file = "nodeenv-1.9.1-py2.py3-none-any.whl", hash = "sha256:ba11c9782d29c27c70ffbdda2d7415098754709be8a7056d79a737cd901155c9"}, + {file = "nodeenv-1.9.1.tar.gz", hash = "sha256:6ec12890a2dab7946721edbfbcd91f3319c6ccc9aec47be7c7e6b7011ee6645f"}, +] + +[[package]] +name = "packaging" +version = "24.1" +description = "Core utilities for Python packages" +optional = false +python-versions = ">=3.8" +files = [ + {file = "packaging-24.1-py3-none-any.whl", hash = "sha256:5b8f2217dbdbd2f7f384c41c628544e6d52f2d0f53c6d0c3ea61aa5d1d7ff124"}, + {file = "packaging-24.1.tar.gz", hash = "sha256:026ed72c8ed3fcce5bf8950572258698927fd1dbda10a5e981cdf0ac37f4f002"}, +] + +[[package]] +name = "platformdirs" +version = "4.2.2" +description = "A small Python package for determining appropriate platform-specific dirs, e.g. a `user data dir`." +optional = false +python-versions = ">=3.8" +files = [ + {file = "platformdirs-4.2.2-py3-none-any.whl", hash = "sha256:2d7a1657e36a80ea911db832a8a6ece5ee53d8de21edd5cc5879af6530b1bfee"}, + {file = "platformdirs-4.2.2.tar.gz", hash = "sha256:38b7b51f512eed9e84a22788b4bce1de17c0adb134d6becb09836e37d8654cd3"}, +] + +[package.extras] +docs = ["furo (>=2023.9.10)", "proselint (>=0.13)", "sphinx (>=7.2.6)", "sphinx-autodoc-typehints (>=1.25.2)"] +test = ["appdirs (==1.4.4)", "covdefaults (>=2.3)", "pytest (>=7.4.3)", "pytest-cov (>=4.1)", "pytest-mock (>=3.12)"] +type = ["mypy (>=1.8)"] + +[[package]] +name = "pluggy" +version = "1.5.0" +description = "plugin and hook calling mechanisms for python" +optional = false +python-versions = ">=3.8" +files = [ + {file = "pluggy-1.5.0-py3-none-any.whl", hash = "sha256:44e1ad92c8ca002de6377e165f3e0f1be63266ab4d554740532335b9d75ea669"}, + {file = "pluggy-1.5.0.tar.gz", hash = "sha256:2cffa88e94fdc978c4c574f15f9e59b7f4201d439195c3715ca9e2486f1d0cf1"}, +] + +[package.extras] +dev = ["pre-commit", "tox"] +testing = ["pytest", "pytest-benchmark"] + +[[package]] +name = "pydantic" +version = "2.8.2" +description = "Data validation using Python type hints" +optional = false +python-versions = ">=3.8" +files = [ + {file = "pydantic-2.8.2-py3-none-any.whl", hash = "sha256:73ee9fddd406dc318b885c7a2eab8a6472b68b8fb5ba8150949fc3db939f23c8"}, + {file = "pydantic-2.8.2.tar.gz", hash = "sha256:6f62c13d067b0755ad1c21a34bdd06c0c12625a22b0fc09c6b149816604f7c2a"}, +] + +[package.dependencies] +annotated-types = ">=0.4.0" +pydantic-core = "2.20.1" +typing-extensions = [ + {version = ">=4.6.1", markers = "python_version < \"3.13\""}, + {version = ">=4.12.2", markers = "python_version >= \"3.13\""}, +] + +[package.extras] +email = ["email-validator (>=2.0.0)"] + +[[package]] +name = "pydantic-core" +version = "2.20.1" +description = "Core functionality for Pydantic validation and serialization" +optional = false +python-versions = ">=3.8" +files = [ + {file = "pydantic_core-2.20.1-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:3acae97ffd19bf091c72df4d726d552c473f3576409b2a7ca36b2f535ffff4a3"}, + {file = "pydantic_core-2.20.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:41f4c96227a67a013e7de5ff8f20fb496ce573893b7f4f2707d065907bffdbd6"}, + {file = "pydantic_core-2.20.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5f239eb799a2081495ea659d8d4a43a8f42cd1fe9ff2e7e436295c38a10c286a"}, + {file = "pydantic_core-2.20.1-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:53e431da3fc53360db73eedf6f7124d1076e1b4ee4276b36fb25514544ceb4a3"}, + {file = "pydantic_core-2.20.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f1f62b2413c3a0e846c3b838b2ecd6c7a19ec6793b2a522745b0869e37ab5bc1"}, + {file = "pydantic_core-2.20.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5d41e6daee2813ecceea8eda38062d69e280b39df793f5a942fa515b8ed67953"}, + {file = "pydantic_core-2.20.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3d482efec8b7dc6bfaedc0f166b2ce349df0011f5d2f1f25537ced4cfc34fd98"}, + {file = "pydantic_core-2.20.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:e93e1a4b4b33daed65d781a57a522ff153dcf748dee70b40c7258c5861e1768a"}, + {file = "pydantic_core-2.20.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:e7c4ea22b6739b162c9ecaaa41d718dfad48a244909fe7ef4b54c0b530effc5a"}, + {file = "pydantic_core-2.20.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:4f2790949cf385d985a31984907fecb3896999329103df4e4983a4a41e13e840"}, + {file = "pydantic_core-2.20.1-cp310-none-win32.whl", hash = "sha256:5e999ba8dd90e93d57410c5e67ebb67ffcaadcea0ad973240fdfd3a135506250"}, + {file = "pydantic_core-2.20.1-cp310-none-win_amd64.whl", hash = "sha256:512ecfbefef6dac7bc5eaaf46177b2de58cdf7acac8793fe033b24ece0b9566c"}, + {file = "pydantic_core-2.20.1-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:d2a8fa9d6d6f891f3deec72f5cc668e6f66b188ab14bb1ab52422fe8e644f312"}, + {file = "pydantic_core-2.20.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:175873691124f3d0da55aeea1d90660a6ea7a3cfea137c38afa0a5ffabe37b88"}, + {file = "pydantic_core-2.20.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:37eee5b638f0e0dcd18d21f59b679686bbd18917b87db0193ae36f9c23c355fc"}, + {file = "pydantic_core-2.20.1-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:25e9185e2d06c16ee438ed39bf62935ec436474a6ac4f9358524220f1b236e43"}, + {file = "pydantic_core-2.20.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:150906b40ff188a3260cbee25380e7494ee85048584998c1e66df0c7a11c17a6"}, + {file = "pydantic_core-2.20.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8ad4aeb3e9a97286573c03df758fc7627aecdd02f1da04516a86dc159bf70121"}, + {file = "pydantic_core-2.20.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d3f3ed29cd9f978c604708511a1f9c2fdcb6c38b9aae36a51905b8811ee5cbf1"}, + {file = "pydantic_core-2.20.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:b0dae11d8f5ded51699c74d9548dcc5938e0804cc8298ec0aa0da95c21fff57b"}, + {file = "pydantic_core-2.20.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:faa6b09ee09433b87992fb5a2859efd1c264ddc37280d2dd5db502126d0e7f27"}, + {file = "pydantic_core-2.20.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:9dc1b507c12eb0481d071f3c1808f0529ad41dc415d0ca11f7ebfc666e66a18b"}, + {file = "pydantic_core-2.20.1-cp311-none-win32.whl", hash = "sha256:fa2fddcb7107e0d1808086ca306dcade7df60a13a6c347a7acf1ec139aa6789a"}, + {file = "pydantic_core-2.20.1-cp311-none-win_amd64.whl", hash = "sha256:40a783fb7ee353c50bd3853e626f15677ea527ae556429453685ae32280c19c2"}, + {file = "pydantic_core-2.20.1-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:595ba5be69b35777474fa07f80fc260ea71255656191adb22a8c53aba4479231"}, + {file = "pydantic_core-2.20.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:a4f55095ad087474999ee28d3398bae183a66be4823f753cd7d67dd0153427c9"}, + {file = "pydantic_core-2.20.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f9aa05d09ecf4c75157197f27cdc9cfaeb7c5f15021c6373932bf3e124af029f"}, + {file = "pydantic_core-2.20.1-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:e97fdf088d4b31ff4ba35db26d9cc472ac7ef4a2ff2badeabf8d727b3377fc52"}, + {file = "pydantic_core-2.20.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:bc633a9fe1eb87e250b5c57d389cf28998e4292336926b0b6cdaee353f89a237"}, + {file = "pydantic_core-2.20.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d573faf8eb7e6b1cbbcb4f5b247c60ca8be39fe2c674495df0eb4318303137fe"}, + {file = "pydantic_core-2.20.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:26dc97754b57d2fd00ac2b24dfa341abffc380b823211994c4efac7f13b9e90e"}, + {file = "pydantic_core-2.20.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:33499e85e739a4b60c9dac710c20a08dc73cb3240c9a0e22325e671b27b70d24"}, + {file = "pydantic_core-2.20.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:bebb4d6715c814597f85297c332297c6ce81e29436125ca59d1159b07f423eb1"}, + {file = "pydantic_core-2.20.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:516d9227919612425c8ef1c9b869bbbee249bc91912c8aaffb66116c0b447ebd"}, + {file = "pydantic_core-2.20.1-cp312-none-win32.whl", hash = "sha256:469f29f9093c9d834432034d33f5fe45699e664f12a13bf38c04967ce233d688"}, + {file = "pydantic_core-2.20.1-cp312-none-win_amd64.whl", hash = "sha256:035ede2e16da7281041f0e626459bcae33ed998cca6a0a007a5ebb73414ac72d"}, + {file = "pydantic_core-2.20.1-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:0827505a5c87e8aa285dc31e9ec7f4a17c81a813d45f70b1d9164e03a813a686"}, + {file = "pydantic_core-2.20.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:19c0fa39fa154e7e0b7f82f88ef85faa2a4c23cc65aae2f5aea625e3c13c735a"}, + {file = "pydantic_core-2.20.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4aa223cd1e36b642092c326d694d8bf59b71ddddc94cdb752bbbb1c5c91d833b"}, + {file = "pydantic_core-2.20.1-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:c336a6d235522a62fef872c6295a42ecb0c4e1d0f1a3e500fe949415761b8a19"}, + {file = "pydantic_core-2.20.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7eb6a0587eded33aeefea9f916899d42b1799b7b14b8f8ff2753c0ac1741edac"}, + {file = "pydantic_core-2.20.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:70c8daf4faca8da5a6d655f9af86faf6ec2e1768f4b8b9d0226c02f3d6209703"}, + {file = "pydantic_core-2.20.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e9fa4c9bf273ca41f940bceb86922a7667cd5bf90e95dbb157cbb8441008482c"}, + {file = "pydantic_core-2.20.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:11b71d67b4725e7e2a9f6e9c0ac1239bbc0c48cce3dc59f98635efc57d6dac83"}, + {file = "pydantic_core-2.20.1-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:270755f15174fb983890c49881e93f8f1b80f0b5e3a3cc1394a255706cabd203"}, + {file = "pydantic_core-2.20.1-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:c81131869240e3e568916ef4c307f8b99583efaa60a8112ef27a366eefba8ef0"}, + {file = "pydantic_core-2.20.1-cp313-none-win32.whl", hash = "sha256:b91ced227c41aa29c672814f50dbb05ec93536abf8f43cd14ec9521ea09afe4e"}, + {file = "pydantic_core-2.20.1-cp313-none-win_amd64.whl", hash = "sha256:65db0f2eefcaad1a3950f498aabb4875c8890438bc80b19362cf633b87a8ab20"}, + {file = "pydantic_core-2.20.1-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:4745f4ac52cc6686390c40eaa01d48b18997cb130833154801a442323cc78f91"}, + {file = "pydantic_core-2.20.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:a8ad4c766d3f33ba8fd692f9aa297c9058970530a32c728a2c4bfd2616d3358b"}, + {file = "pydantic_core-2.20.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:41e81317dd6a0127cabce83c0c9c3fbecceae981c8391e6f1dec88a77c8a569a"}, + {file = "pydantic_core-2.20.1-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:04024d270cf63f586ad41fff13fde4311c4fc13ea74676962c876d9577bcc78f"}, + {file = "pydantic_core-2.20.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:eaad4ff2de1c3823fddf82f41121bdf453d922e9a238642b1dedb33c4e4f98ad"}, + {file = "pydantic_core-2.20.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:26ab812fa0c845df815e506be30337e2df27e88399b985d0bb4e3ecfe72df31c"}, + {file = "pydantic_core-2.20.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3c5ebac750d9d5f2706654c638c041635c385596caf68f81342011ddfa1e5598"}, + {file = "pydantic_core-2.20.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2aafc5a503855ea5885559eae883978c9b6d8c8993d67766ee73d82e841300dd"}, + {file = "pydantic_core-2.20.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:4868f6bd7c9d98904b748a2653031fc9c2f85b6237009d475b1008bfaeb0a5aa"}, + {file = "pydantic_core-2.20.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:aa2f457b4af386254372dfa78a2eda2563680d982422641a85f271c859df1987"}, + {file = "pydantic_core-2.20.1-cp38-none-win32.whl", hash = "sha256:225b67a1f6d602de0ce7f6c1c3ae89a4aa25d3de9be857999e9124f15dab486a"}, + {file = "pydantic_core-2.20.1-cp38-none-win_amd64.whl", hash = "sha256:6b507132dcfc0dea440cce23ee2182c0ce7aba7054576efc65634f080dbe9434"}, + {file = "pydantic_core-2.20.1-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:b03f7941783b4c4a26051846dea594628b38f6940a2fdc0df00b221aed39314c"}, + {file = "pydantic_core-2.20.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:1eedfeb6089ed3fad42e81a67755846ad4dcc14d73698c120a82e4ccf0f1f9f6"}, + {file = "pydantic_core-2.20.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:635fee4e041ab9c479e31edda27fcf966ea9614fff1317e280d99eb3e5ab6fe2"}, + {file = "pydantic_core-2.20.1-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:77bf3ac639c1ff567ae3b47f8d4cc3dc20f9966a2a6dd2311dcc055d3d04fb8a"}, + {file = "pydantic_core-2.20.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7ed1b0132f24beeec5a78b67d9388656d03e6a7c837394f99257e2d55b461611"}, + {file = "pydantic_core-2.20.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c6514f963b023aeee506678a1cf821fe31159b925c4b76fe2afa94cc70b3222b"}, + {file = "pydantic_core-2.20.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:10d4204d8ca33146e761c79f83cc861df20e7ae9f6487ca290a97702daf56006"}, + {file = "pydantic_core-2.20.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2d036c7187b9422ae5b262badb87a20a49eb6c5238b2004e96d4da1231badef1"}, + {file = "pydantic_core-2.20.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:9ebfef07dbe1d93efb94b4700f2d278494e9162565a54f124c404a5656d7ff09"}, + {file = "pydantic_core-2.20.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:6b9d9bb600328a1ce523ab4f454859e9d439150abb0906c5a1983c146580ebab"}, + {file = "pydantic_core-2.20.1-cp39-none-win32.whl", hash = "sha256:784c1214cb6dd1e3b15dd8b91b9a53852aed16671cc3fbe4786f4f1db07089e2"}, + {file = "pydantic_core-2.20.1-cp39-none-win_amd64.whl", hash = "sha256:d2fe69c5434391727efa54b47a1e7986bb0186e72a41b203df8f5b0a19a4f669"}, + {file = "pydantic_core-2.20.1-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:a45f84b09ac9c3d35dfcf6a27fd0634d30d183205230a0ebe8373a0e8cfa0906"}, + {file = "pydantic_core-2.20.1-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:d02a72df14dfdbaf228424573a07af10637bd490f0901cee872c4f434a735b94"}, + {file = "pydantic_core-2.20.1-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d2b27e6af28f07e2f195552b37d7d66b150adbaa39a6d327766ffd695799780f"}, + {file = "pydantic_core-2.20.1-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:084659fac3c83fd674596612aeff6041a18402f1e1bc19ca39e417d554468482"}, + {file = "pydantic_core-2.20.1-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:242b8feb3c493ab78be289c034a1f659e8826e2233786e36f2893a950a719bb6"}, + {file = "pydantic_core-2.20.1-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:38cf1c40a921d05c5edc61a785c0ddb4bed67827069f535d794ce6bcded919fc"}, + {file = "pydantic_core-2.20.1-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:e0bbdd76ce9aa5d4209d65f2b27fc6e5ef1312ae6c5333c26db3f5ade53a1e99"}, + {file = "pydantic_core-2.20.1-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:254ec27fdb5b1ee60684f91683be95e5133c994cc54e86a0b0963afa25c8f8a6"}, + {file = "pydantic_core-2.20.1-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:407653af5617f0757261ae249d3fba09504d7a71ab36ac057c938572d1bc9331"}, + {file = "pydantic_core-2.20.1-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:c693e916709c2465b02ca0ad7b387c4f8423d1db7b4649c551f27a529181c5ad"}, + {file = "pydantic_core-2.20.1-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5b5ff4911aea936a47d9376fd3ab17e970cc543d1b68921886e7f64bd28308d1"}, + {file = "pydantic_core-2.20.1-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:177f55a886d74f1808763976ac4efd29b7ed15c69f4d838bbd74d9d09cf6fa86"}, + {file = "pydantic_core-2.20.1-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:964faa8a861d2664f0c7ab0c181af0bea66098b1919439815ca8803ef136fc4e"}, + {file = "pydantic_core-2.20.1-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:4dd484681c15e6b9a977c785a345d3e378d72678fd5f1f3c0509608da24f2ac0"}, + {file = "pydantic_core-2.20.1-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:f6d6cff3538391e8486a431569b77921adfcdef14eb18fbf19b7c0a5294d4e6a"}, + {file = "pydantic_core-2.20.1-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:a6d511cc297ff0883bc3708b465ff82d7560193169a8b93260f74ecb0a5e08a7"}, + {file = "pydantic_core-2.20.1.tar.gz", hash = "sha256:26ca695eeee5f9f1aeeb211ffc12f10bcb6f71e2989988fda61dabd65db878d4"}, +] + +[package.dependencies] +typing-extensions = ">=4.6.0,<4.7.0 || >4.7.0" + +[[package]] +name = "pylint" +version = "3.2.3" +description = "python code static checker" +optional = false +python-versions = ">=3.8.0" +files = [ + {file = "pylint-3.2.3-py3-none-any.whl", hash = "sha256:b3d7d2708a3e04b4679e02d99e72329a8b7ee8afb8d04110682278781f889fa8"}, + {file = "pylint-3.2.3.tar.gz", hash = "sha256:02f6c562b215582386068d52a30f520d84fdbcf2a95fc7e855b816060d048b60"}, +] + +[package.dependencies] +astroid = ">=3.2.2,<=3.3.0-dev0" +colorama = {version = ">=0.4.5", markers = "sys_platform == \"win32\""} +dill = [ + {version = ">=0.2", markers = "python_version < \"3.11\""}, + {version = ">=0.3.7", markers = "python_version >= \"3.12\""}, + {version = ">=0.3.6", markers = "python_version >= \"3.11\" and python_version < \"3.12\""}, +] +isort = ">=4.2.5,<5.13.0 || >5.13.0,<6" +mccabe = ">=0.6,<0.8" +platformdirs = ">=2.2.0" +tomli = {version = ">=1.1.0", markers = "python_version < \"3.11\""} +tomlkit = ">=0.10.1" +typing-extensions = {version = ">=3.10.0", markers = "python_version < \"3.10\""} + +[package.extras] +spelling = ["pyenchant (>=3.2,<4.0)"] +testutils = ["gitpython (>3)"] + +[[package]] +name = "pyright" +version = "1.1.374" +description = "Command line wrapper for pyright" +optional = false +python-versions = ">=3.7" +files = [ + {file = "pyright-1.1.374-py3-none-any.whl", hash = "sha256:55752bcf7a3646d293cd76710a983b71e16f6128aab2d42468e6eb7e46c0a70d"}, + {file = "pyright-1.1.374.tar.gz", hash = "sha256:d01b2daf864ba5e0362e56b844984865970d7204158e61eb685e2dab7804cb82"}, +] + +[package.dependencies] +nodeenv = ">=1.6.0" + +[package.extras] +all = ["twine (>=3.4.1)"] +dev = ["twine (>=3.4.1)"] + +[[package]] +name = "pytest" +version = "8.3.2" +description = "pytest: simple powerful testing with Python" +optional = false +python-versions = ">=3.8" +files = [ + {file = "pytest-8.3.2-py3-none-any.whl", hash = "sha256:4ba08f9ae7dcf84ded419494d229b48d0903ea6407b030eaec46df5e6a73bba5"}, + {file = "pytest-8.3.2.tar.gz", hash = "sha256:c132345d12ce551242c87269de812483f5bcc87cdbb4722e48487ba194f9fdce"}, +] + +[package.dependencies] +colorama = {version = "*", markers = "sys_platform == \"win32\""} +exceptiongroup = {version = ">=1.0.0rc8", markers = "python_version < \"3.11\""} +iniconfig = "*" +packaging = "*" +pluggy = ">=1.5,<2" +tomli = {version = ">=1", markers = "python_version < \"3.11\""} + +[package.extras] +dev = ["argcomplete", "attrs (>=19.2)", "hypothesis (>=3.56)", "mock", "pygments (>=2.7.2)", "requests", "setuptools", "xmlschema"] + +[[package]] +name = "pytest-asyncio" +version = "0.23.8" +description = "Pytest support for asyncio" +optional = false +python-versions = ">=3.8" +files = [ + {file = "pytest_asyncio-0.23.8-py3-none-any.whl", hash = "sha256:50265d892689a5faefb84df80819d1ecef566eb3549cf915dfb33569359d1ce2"}, + {file = "pytest_asyncio-0.23.8.tar.gz", hash = "sha256:759b10b33a6dc61cce40a8bd5205e302978bbbcc00e279a8b61d9a6a3c82e4d3"}, +] + +[package.dependencies] +pytest = ">=7.0.0,<9" + +[package.extras] +docs = ["sphinx (>=5.3)", "sphinx-rtd-theme (>=1.0)"] +testing = ["coverage (>=6.2)", "hypothesis (>=5.7.1)"] + +[[package]] +name = "python-dateutil" +version = "2.9.0.post0" +description = "Extensions to the standard Python datetime module" +optional = false +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7" +files = [ + {file = "python-dateutil-2.9.0.post0.tar.gz", hash = "sha256:37dd54208da7e1cd875388217d5e00ebd4179249f90fb72437e91a35459a0ad3"}, + {file = "python_dateutil-2.9.0.post0-py2.py3-none-any.whl", hash = "sha256:a8b2bc7bffae282281c8140a97d3aa9c14da0b136dfe83f850eea9a5f7470427"}, +] + +[package.dependencies] +six = ">=1.5" + +[[package]] +name = "six" +version = "1.16.0" +description = "Python 2 and 3 compatibility utilities" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*" +files = [ + {file = "six-1.16.0-py2.py3-none-any.whl", hash = "sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254"}, + {file = "six-1.16.0.tar.gz", hash = "sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926"}, +] + +[[package]] +name = "sniffio" +version = "1.3.1" +description = "Sniff out which async library your code is running under" +optional = false +python-versions = ">=3.7" +files = [ + {file = "sniffio-1.3.1-py3-none-any.whl", hash = "sha256:2f6da418d1f1e0fddd844478f41680e794e6051915791a034ff65e5f100525a2"}, + {file = "sniffio-1.3.1.tar.gz", hash = "sha256:f4324edc670a0f49750a81b895f35c3adb843cca46f0530f79fc1babb23789dc"}, +] + +[[package]] +name = "tomli" +version = "2.0.1" +description = "A lil' TOML parser" +optional = false +python-versions = ">=3.7" +files = [ + {file = "tomli-2.0.1-py3-none-any.whl", hash = "sha256:939de3e7a6161af0c887ef91b7d41a53e7c5a1ca976325f429cb46ea9bc30ecc"}, + {file = "tomli-2.0.1.tar.gz", hash = "sha256:de526c12914f0c550d15924c62d72abc48d6fe7364aa87328337a31007fe8a4f"}, +] + +[[package]] +name = "tomlkit" +version = "0.13.0" +description = "Style preserving TOML library" +optional = false +python-versions = ">=3.8" +files = [ + {file = "tomlkit-0.13.0-py3-none-any.whl", hash = "sha256:7075d3042d03b80f603482d69bf0c8f345c2b30e41699fd8883227f89972b264"}, + {file = "tomlkit-0.13.0.tar.gz", hash = "sha256:08ad192699734149f5b97b45f1f18dad7eb1b6d16bc72ad0c2335772650d7b72"}, +] + +[[package]] +name = "types-python-dateutil" +version = "2.9.0.20240316" +description = "Typing stubs for python-dateutil" +optional = false +python-versions = ">=3.8" +files = [ + {file = "types-python-dateutil-2.9.0.20240316.tar.gz", hash = "sha256:5d2f2e240b86905e40944dd787db6da9263f0deabef1076ddaed797351ec0202"}, + {file = "types_python_dateutil-2.9.0.20240316-py3-none-any.whl", hash = "sha256:6b8cb66d960771ce5ff974e9dd45e38facb81718cc1e208b10b1baccbfdbee3b"}, +] + +[[package]] +name = "typing-extensions" +version = "4.12.2" +description = "Backported and Experimental Type Hints for Python 3.8+" +optional = false +python-versions = ">=3.8" +files = [ + {file = "typing_extensions-4.12.2-py3-none-any.whl", hash = "sha256:04e5ca0351e0f3f85c6853954072df659d0d13fac324d0072316b67d7794700d"}, + {file = "typing_extensions-4.12.2.tar.gz", hash = "sha256:1a7ead55c7e559dd4dee8856e3a88b41225abfe1ce8df57b7c13915fe121ffb8"}, +] + +[[package]] +name = "typing-inspect" +version = "0.9.0" +description = "Runtime inspection utilities for typing module." +optional = false +python-versions = "*" +files = [ + {file = "typing_inspect-0.9.0-py3-none-any.whl", hash = "sha256:9ee6fc59062311ef8547596ab6b955e1b8aa46242d854bfc78f4f6b0eff35f9f"}, + {file = "typing_inspect-0.9.0.tar.gz", hash = "sha256:b23fc42ff6f6ef6954e4852c1fb512cdd18dbea03134f91f856a95ccc9461f78"}, +] + +[package.dependencies] +mypy-extensions = ">=0.3.0" +typing-extensions = ">=3.7.4" + +[metadata] +lock-version = "2.0" +python-versions = "^3.8" +content-hash = "1478d3764c93fadedc6a94a2b911eb59eb142cd4b127d65deb7120a378e07c45" diff --git a/packages/mistralai_azure/poetry.toml b/packages/mistralai_azure/poetry.toml new file mode 100644 index 00000000..ab1033bd --- /dev/null +++ b/packages/mistralai_azure/poetry.toml @@ -0,0 +1,2 @@ +[virtualenvs] +in-project = true diff --git a/packages/mistralai_azure/py.typed b/packages/mistralai_azure/py.typed new file mode 100644 index 00000000..3e38f1a9 --- /dev/null +++ b/packages/mistralai_azure/py.typed @@ -0,0 +1 @@ +# Marker file for PEP 561. The package enables type hints. diff --git a/packages/mistralai_azure/pylintrc b/packages/mistralai_azure/pylintrc new file mode 100644 index 00000000..50800386 --- /dev/null +++ b/packages/mistralai_azure/pylintrc @@ -0,0 +1,658 @@ +[MAIN] + +# Analyse import fallback blocks. This can be used to support both Python 2 and +# 3 compatible code, which means that the block might have code that exists +# only in one or another interpreter, leading to false positives when analysed. +analyse-fallback-blocks=no + +# Clear in-memory caches upon conclusion of linting. Useful if running pylint +# in a server-like mode. +clear-cache-post-run=no + +# Load and enable all available extensions. Use --list-extensions to see a list +# all available extensions. +#enable-all-extensions= + +# In error mode, messages with a category besides ERROR or FATAL are +# suppressed, and no reports are done by default. Error mode is compatible with +# disabling specific errors. +#errors-only= + +# Always return a 0 (non-error) status code, even if lint errors are found. +# This is primarily useful in continuous integration scripts. +#exit-zero= + +# A comma-separated list of package or module names from where C extensions may +# be loaded. Extensions are loading into the active Python interpreter and may +# run arbitrary code. +extension-pkg-allow-list= + +# A comma-separated list of package or module names from where C extensions may +# be loaded. Extensions are loading into the active Python interpreter and may +# run arbitrary code. (This is an alternative name to extension-pkg-allow-list +# for backward compatibility.) +extension-pkg-whitelist= + +# Return non-zero exit code if any of these messages/categories are detected, +# even if score is above --fail-under value. Syntax same as enable. Messages +# specified are enabled, while categories only check already-enabled messages. +fail-on= + +# Specify a score threshold under which the program will exit with error. +fail-under=10 + +# Interpret the stdin as a python script, whose filename needs to be passed as +# the module_or_package argument. +#from-stdin= + +# Files or directories to be skipped. They should be base names, not paths. +ignore=CVS + +# Add files or directories matching the regular expressions patterns to the +# ignore-list. The regex matches against paths and can be in Posix or Windows +# format. Because '\\' represents the directory delimiter on Windows systems, +# it can't be used as an escape character. +ignore-paths= + +# Files or directories matching the regular expression patterns are skipped. +# The regex matches against base names, not paths. The default value ignores +# Emacs file locks +ignore-patterns=^\.# + +# List of module names for which member attributes should not be checked and +# will not be imported (useful for modules/projects where namespaces are +# manipulated during runtime and thus existing member attributes cannot be +# deduced by static analysis). It supports qualified module names, as well as +# Unix pattern matching. +ignored-modules= + +# Python code to execute, usually for sys.path manipulation such as +# pygtk.require(). +#init-hook= + +# Use multiple processes to speed up Pylint. Specifying 0 will auto-detect the +# number of processors available to use, and will cap the count on Windows to +# avoid hangs. +jobs=1 + +# Control the amount of potential inferred values when inferring a single +# object. This can help the performance when dealing with large functions or +# complex, nested conditions. +limit-inference-results=100 + +# List of plugins (as comma separated values of python module names) to load, +# usually to register additional checkers. +load-plugins= + +# Pickle collected data for later comparisons. +persistent=yes + +# Minimum Python version to use for version dependent checks. Will default to +# the version used to run pylint. +py-version=3.8 + +# Discover python modules and packages in the file system subtree. +recursive=no + +# Add paths to the list of the source roots. Supports globbing patterns. The +# source root is an absolute path or a path relative to the current working +# directory used to determine a package namespace for modules located under the +# source root. +source-roots=src + +# When enabled, pylint would attempt to guess common misconfiguration and emit +# user-friendly hints instead of false-positive error messages. +suggestion-mode=yes + +# Allow loading of arbitrary C extensions. Extensions are imported into the +# active Python interpreter and may run arbitrary code. +unsafe-load-any-extension=no + +# In verbose mode, extra non-checker-related info will be displayed. +#verbose= + + +[BASIC] + +# Naming style matching correct argument names. +argument-naming-style=snake_case + +# Regular expression matching correct argument names. Overrides argument- +# naming-style. If left empty, argument names will be checked with the set +# naming style. +#argument-rgx= + +# Naming style matching correct attribute names. +#attr-naming-style=snake_case + +# Regular expression matching correct attribute names. Overrides attr-naming- +# style. If left empty, attribute names will be checked with the set naming +# style. +attr-rgx=[^\W\d][^\W]*|__.*__$ + +# Bad variable names which should always be refused, separated by a comma. +bad-names= + +# Bad variable names regexes, separated by a comma. If names match any regex, +# they will always be refused +bad-names-rgxs= + +# Naming style matching correct class attribute names. +class-attribute-naming-style=any + +# Regular expression matching correct class attribute names. Overrides class- +# attribute-naming-style. If left empty, class attribute names will be checked +# with the set naming style. +#class-attribute-rgx= + +# Naming style matching correct class constant names. +class-const-naming-style=UPPER_CASE + +# Regular expression matching correct class constant names. Overrides class- +# const-naming-style. If left empty, class constant names will be checked with +# the set naming style. +#class-const-rgx= + +# Naming style matching correct class names. +class-naming-style=PascalCase + +# Regular expression matching correct class names. Overrides class-naming- +# style. If left empty, class names will be checked with the set naming style. +#class-rgx= + +# Naming style matching correct constant names. +const-naming-style=UPPER_CASE + +# Regular expression matching correct constant names. Overrides const-naming- +# style. If left empty, constant names will be checked with the set naming +# style. +#const-rgx= + +# Minimum line length for functions/classes that require docstrings, shorter +# ones are exempt. +docstring-min-length=-1 + +# Naming style matching correct function names. +function-naming-style=snake_case + +# Regular expression matching correct function names. Overrides function- +# naming-style. If left empty, function names will be checked with the set +# naming style. +#function-rgx= + +# Good variable names which should always be accepted, separated by a comma. +good-names=i, + j, + k, + ex, + Run, + _, + e, + id + +# Good variable names regexes, separated by a comma. If names match any regex, +# they will always be accepted +good-names-rgxs= + +# Include a hint for the correct naming format with invalid-name. +include-naming-hint=no + +# Naming style matching correct inline iteration names. +inlinevar-naming-style=any + +# Regular expression matching correct inline iteration names. Overrides +# inlinevar-naming-style. If left empty, inline iteration names will be checked +# with the set naming style. +#inlinevar-rgx= + +# Naming style matching correct method names. +method-naming-style=snake_case + +# Regular expression matching correct method names. Overrides method-naming- +# style. If left empty, method names will be checked with the set naming style. +#method-rgx= + +# Naming style matching correct module names. +module-naming-style=snake_case + +# Regular expression matching correct module names. Overrides module-naming- +# style. If left empty, module names will be checked with the set naming style. +#module-rgx= + +# Colon-delimited sets of names that determine each other's naming style when +# the name regexes allow several styles. +name-group= + +# Regular expression which should only match function or class names that do +# not require a docstring. +no-docstring-rgx=^_ + +# List of decorators that produce properties, such as abc.abstractproperty. Add +# to this list to register other decorators that produce valid properties. +# These decorators are taken in consideration only for invalid-name. +property-classes=abc.abstractproperty + +# Regular expression matching correct type alias names. If left empty, type +# alias names will be checked with the set naming style. +typealias-rgx=.* + +# Regular expression matching correct type variable names. If left empty, type +# variable names will be checked with the set naming style. +#typevar-rgx= + +# Naming style matching correct variable names. +variable-naming-style=snake_case + +# Regular expression matching correct variable names. Overrides variable- +# naming-style. If left empty, variable names will be checked with the set +# naming style. +#variable-rgx= + + +[CLASSES] + +# Warn about protected attribute access inside special methods +check-protected-access-in-special-methods=no + +# List of method names used to declare (i.e. assign) instance attributes. +defining-attr-methods=__init__, + __new__, + setUp, + asyncSetUp, + __post_init__ + +# List of member names, which should be excluded from the protected access +# warning. +exclude-protected=_asdict,_fields,_replace,_source,_make,os._exit + +# List of valid names for the first argument in a class method. +valid-classmethod-first-arg=cls + +# List of valid names for the first argument in a metaclass class method. +valid-metaclass-classmethod-first-arg=mcs + + +[DESIGN] + +# List of regular expressions of class ancestor names to ignore when counting +# public methods (see R0903) +exclude-too-few-public-methods= + +# List of qualified class names to ignore when counting class parents (see +# R0901) +ignored-parents= + +# Maximum number of arguments for function / method. +max-args=5 + +# Maximum number of attributes for a class (see R0902). +max-attributes=7 + +# Maximum number of boolean expressions in an if statement (see R0916). +max-bool-expr=5 + +# Maximum number of branch for function / method body. +max-branches=12 + +# Maximum number of locals for function / method body. +max-locals=15 + +# Maximum number of parents for a class (see R0901). +max-parents=7 + +# Maximum number of public methods for a class (see R0904). +max-public-methods=25 + +# Maximum number of return / yield for function / method body. +max-returns=6 + +# Maximum number of statements in function / method body. +max-statements=50 + +# Minimum number of public methods for a class (see R0903). +min-public-methods=2 + + +[EXCEPTIONS] + +# Exceptions that will emit a warning when caught. +overgeneral-exceptions=builtins.BaseException,builtins.Exception + + +[FORMAT] + +# Expected format of line ending, e.g. empty (any line ending), LF or CRLF. +expected-line-ending-format= + +# Regexp for a line that is allowed to be longer than the limit. +ignore-long-lines=^\s*(# )??$ + +# Number of spaces of indent required inside a hanging or continued line. +indent-after-paren=4 + +# String used as indentation unit. This is usually " " (4 spaces) or "\t" (1 +# tab). +indent-string=' ' + +# Maximum number of characters on a single line. +max-line-length=100 + +# Maximum number of lines in a module. +max-module-lines=1000 + +# Allow the body of a class to be on the same line as the declaration if body +# contains single statement. +single-line-class-stmt=no + +# Allow the body of an if to be on the same line as the test if there is no +# else. +single-line-if-stmt=no + + +[IMPORTS] + +# List of modules that can be imported at any level, not just the top level +# one. +allow-any-import-level= + +# Allow explicit reexports by alias from a package __init__. +allow-reexport-from-package=no + +# Allow wildcard imports from modules that define __all__. +allow-wildcard-with-all=no + +# Deprecated modules which should not be used, separated by a comma. +deprecated-modules= + +# Output a graph (.gv or any supported image format) of external dependencies +# to the given file (report RP0402 must not be disabled). +ext-import-graph= + +# Output a graph (.gv or any supported image format) of all (i.e. internal and +# external) dependencies to the given file (report RP0402 must not be +# disabled). +import-graph= + +# Output a graph (.gv or any supported image format) of internal dependencies +# to the given file (report RP0402 must not be disabled). +int-import-graph= + +# Force import order to recognize a module as part of the standard +# compatibility libraries. +known-standard-library= + +# Force import order to recognize a module as part of a third party library. +known-third-party=enchant + +# Couples of modules and preferred modules, separated by a comma. +preferred-modules= + + +[LOGGING] + +# The type of string formatting that logging methods do. `old` means using % +# formatting, `new` is for `{}` formatting. +logging-format-style=old + +# Logging modules to check that the string format arguments are in logging +# function parameter format. +logging-modules=logging + + +[MESSAGES CONTROL] + +# Only show warnings with the listed confidence levels. Leave empty to show +# all. Valid levels: HIGH, CONTROL_FLOW, INFERENCE, INFERENCE_FAILURE, +# UNDEFINED. +confidence=HIGH, + CONTROL_FLOW, + INFERENCE, + INFERENCE_FAILURE, + UNDEFINED + +# Disable the message, report, category or checker with the given id(s). You +# can either give multiple identifiers separated by comma (,) or put this +# option multiple times (only on the command line, not in the configuration +# file where it should appear only once). You can also use "--disable=all" to +# disable everything first and then re-enable specific checks. For example, if +# you want to run only the similarities checker, you can use "--disable=all +# --enable=similarities". If you want to run only the classes checker, but have +# no Warning level messages displayed, use "--disable=all --enable=classes +# --disable=W". +disable=raw-checker-failed, + bad-inline-option, + locally-disabled, + file-ignored, + suppressed-message, + useless-suppression, + deprecated-pragma, + use-implicit-booleaness-not-comparison-to-string, + use-implicit-booleaness-not-comparison-to-zero, + use-symbolic-message-instead, + trailing-whitespace, + line-too-long, + missing-class-docstring, + missing-module-docstring, + missing-function-docstring, + too-many-instance-attributes, + wrong-import-order, + too-many-arguments, + broad-exception-raised, + too-few-public-methods, + too-many-branches, + duplicate-code, + trailing-newlines, + too-many-public-methods, + too-many-locals, + too-many-lines, + using-constant-test, + too-many-statements, + cyclic-import, + too-many-nested-blocks, + too-many-boolean-expressions, + no-else-raise, + bare-except, + broad-exception-caught, + fixme, + relative-beyond-top-level + +# Enable the message, report, category or checker with the given id(s). You can +# either give multiple identifier separated by comma (,) or put this option +# multiple time (only on the command line, not in the configuration file where +# it should appear only once). See also the "--disable" option for examples. +enable= + + +[METHOD_ARGS] + +# List of qualified names (i.e., library.method) which require a timeout +# parameter e.g. 'requests.api.get,requests.api.post' +timeout-methods=requests.api.delete,requests.api.get,requests.api.head,requests.api.options,requests.api.patch,requests.api.post,requests.api.put,requests.api.request + + +[MISCELLANEOUS] + +# List of note tags to take in consideration, separated by a comma. +notes=FIXME, + XXX, + TODO + +# Regular expression of note tags to take in consideration. +notes-rgx= + + +[REFACTORING] + +# Maximum number of nested blocks for function / method body +max-nested-blocks=5 + +# Complete name of functions that never returns. When checking for +# inconsistent-return-statements if a never returning function is called then +# it will be considered as an explicit return statement and no message will be +# printed. +never-returning-functions=sys.exit,argparse.parse_error + + +[REPORTS] + +# Python expression which should return a score less than or equal to 10. You +# have access to the variables 'fatal', 'error', 'warning', 'refactor', +# 'convention', and 'info' which contain the number of messages in each +# category, as well as 'statement' which is the total number of statements +# analyzed. This score is used by the global evaluation report (RP0004). +evaluation=max(0, 0 if fatal else 10.0 - ((float(5 * error + warning + refactor + convention) / statement) * 10)) + +# Template used to display messages. This is a python new-style format string +# used to format the message information. See doc for all details. +msg-template= + +# Set the output format. Available formats are: text, parseable, colorized, +# json2 (improved json format), json (old json format) and msvs (visual +# studio). You can also give a reporter class, e.g. +# mypackage.mymodule.MyReporterClass. +#output-format= + +# Tells whether to display a full report or only the messages. +reports=no + +# Activate the evaluation score. +score=yes + + +[SIMILARITIES] + +# Comments are removed from the similarity computation +ignore-comments=yes + +# Docstrings are removed from the similarity computation +ignore-docstrings=yes + +# Imports are removed from the similarity computation +ignore-imports=yes + +# Signatures are removed from the similarity computation +ignore-signatures=yes + +# Minimum lines number of a similarity. +min-similarity-lines=4 + + +[SPELLING] + +# Limits count of emitted suggestions for spelling mistakes. +max-spelling-suggestions=4 + +# Spelling dictionary name. No available dictionaries : You need to install +# both the python package and the system dependency for enchant to work. +spelling-dict= + +# List of comma separated words that should be considered directives if they +# appear at the beginning of a comment and should not be checked. +spelling-ignore-comment-directives=fmt: on,fmt: off,noqa:,noqa,nosec,isort:skip,mypy: + +# List of comma separated words that should not be checked. +spelling-ignore-words= + +# A path to a file that contains the private dictionary; one word per line. +spelling-private-dict-file= + +# Tells whether to store unknown words to the private dictionary (see the +# --spelling-private-dict-file option) instead of raising a message. +spelling-store-unknown-words=no + + +[STRING] + +# This flag controls whether inconsistent-quotes generates a warning when the +# character used as a quote delimiter is used inconsistently within a module. +check-quote-consistency=no + +# This flag controls whether the implicit-str-concat should generate a warning +# on implicit string concatenation in sequences defined over several lines. +check-str-concat-over-line-jumps=no + + +[TYPECHECK] + +# List of decorators that produce context managers, such as +# contextlib.contextmanager. Add to this list to register other decorators that +# produce valid context managers. +contextmanager-decorators=contextlib.contextmanager + +# List of members which are set dynamically and missed by pylint inference +# system, and so shouldn't trigger E1101 when accessed. Python regular +# expressions are accepted. +generated-members= + +# Tells whether to warn about missing members when the owner of the attribute +# is inferred to be None. +ignore-none=yes + +# This flag controls whether pylint should warn about no-member and similar +# checks whenever an opaque object is returned when inferring. The inference +# can return multiple potential results while evaluating a Python object, but +# some branches might not be evaluated, which results in partial inference. In +# that case, it might be useful to still emit no-member and other checks for +# the rest of the inferred objects. +ignore-on-opaque-inference=yes + +# List of symbolic message names to ignore for Mixin members. +ignored-checks-for-mixins=no-member, + not-async-context-manager, + not-context-manager, + attribute-defined-outside-init + +# List of class names for which member attributes should not be checked (useful +# for classes with dynamically set attributes). This supports the use of +# qualified names. +ignored-classes=optparse.Values,thread._local,_thread._local,argparse.Namespace + +# Show a hint with possible names when a member name was not found. The aspect +# of finding the hint is based on edit distance. +missing-member-hint=yes + +# The minimum edit distance a name should have in order to be considered a +# similar match for a missing member name. +missing-member-hint-distance=1 + +# The total number of similar names that should be taken in consideration when +# showing a hint for a missing member. +missing-member-max-choices=1 + +# Regex pattern to define which classes are considered mixins. +mixin-class-rgx=.*[Mm]ixin + +# List of decorators that change the signature of a decorated function. +signature-mutators= + + +[VARIABLES] + +# List of additional names supposed to be defined in builtins. Remember that +# you should avoid defining new builtins when possible. +additional-builtins= + +# Tells whether unused global variables should be treated as a violation. +allow-global-unused-variables=yes + +# List of names allowed to shadow builtins +allowed-redefined-builtins=id,object + +# List of strings which can identify a callback function by name. A callback +# name must start or end with one of those strings. +callbacks=cb_, + _cb + +# A regular expression matching the name of dummy variables (i.e. expected to +# not be used). +dummy-variables-rgx=_+$|(_[a-zA-Z0-9_]*[a-zA-Z0-9]+?$)|dummy|^ignored_|^unused_ + +# Argument names that match this expression will be ignored. +ignored-argument-names=_.*|^ignored_|^unused_ + +# Tells whether we should check for unused import in __init__ files. +init-import=no + +# List of qualified module names which can have objects that can redefine +# builtins. +redefining-builtins-modules=six.moves,past.builtins,future.builtins,builtins,io diff --git a/packages/mistralai_azure/pyproject.toml b/packages/mistralai_azure/pyproject.toml new file mode 100644 index 00000000..15aea3d9 --- /dev/null +++ b/packages/mistralai_azure/pyproject.toml @@ -0,0 +1,56 @@ +[tool.poetry] +name = "mistralai_azure" +version = "1.0.0-rc.2" +description = "Python Client SDK for the Mistral AI API in Azure." +authors = ["Mistral",] +readme = "README.md" +packages = [ + { include = "mistralai_azure", from = "src" } +] +include = ["py.typed", "src/mistralai_azure/py.typed"] + +[tool.setuptools.package-data] +"*" = ["py.typed", "src/mistralai_azure/py.typed"] + +[virtualenvs] +in-project = true + +[tool.poetry.dependencies] +python = "^3.8" +httpx = "^0.27.0" +jsonpath-python = "^1.0.6" +pydantic = "~2.8.2" +python-dateutil = "^2.9.0.post0" +typing-inspect = "^0.9.0" + +[tool.poetry.group.dev.dependencies] +mypy = "==1.10.1" +pylint = "==3.2.3" +pyright = "==1.1.374" +pytest = "^8.2.2" +pytest-asyncio = "^0.23.7" +types-python-dateutil = "^2.9.0.20240316" + +[build-system] +requires = ["poetry-core"] +build-backend = "poetry.core.masonry.api" + +[tool.pytest.ini_options] +pythonpath = ["src"] + +[tool.mypy] +disable_error_code = "misc" + +[[tool.mypy.overrides]] +module = "typing_inspect" +ignore_missing_imports = true + +[[tool.mypy.overrides]] +module = "jsonpath" +ignore_missing_imports = true + +[tool.pyright] +venvPath = "." +venv = ".venv" + + diff --git a/packages/mistralai_azure/scripts/compile.sh b/packages/mistralai_azure/scripts/compile.sh new file mode 100755 index 00000000..aa49772e --- /dev/null +++ b/packages/mistralai_azure/scripts/compile.sh @@ -0,0 +1,83 @@ +#!/usr/bin/env bash + +set -o pipefail # Ensure pipeline failures are propagated + +# Use temporary files to store outputs and exit statuses +declare -A output_files +declare -A status_files + +# Function to run a command with temporary output and status files +run_command() { + local cmd="$1" + local key="$2" + local output_file="$3" + local status_file="$4" + + # Run the command and store output and exit status + { + eval "$cmd" + echo $? > "$status_file" + } &> "$output_file" & +} + +# Create temporary files for outputs and statuses +for cmd in compileall pylint mypy pyright; do + output_files[$cmd]=$(mktemp) + status_files[$cmd]=$(mktemp) +done + +# Collect PIDs for background processes +declare -a pids + +# Run commands in parallel using temporary files +echo "Running python -m compileall" +run_command 'poetry run python -m compileall -q . && echo "Success"' 'compileall' "${output_files[compileall]}" "${status_files[compileall]}" +pids+=($!) + +echo "Running pylint" +run_command 'poetry run pylint src' 'pylint' "${output_files[pylint]}" "${status_files[pylint]}" +pids+=($!) + +echo "Running mypy" +run_command 'poetry run mypy src' 'mypy' "${output_files[mypy]}" "${status_files[mypy]}" +pids+=($!) + +echo "Running pyright (optional)" +run_command 'if command -v pyright > /dev/null 2>&1; then pyright src; else echo "pyright not found, skipping"; fi' 'pyright' "${output_files[pyright]}" "${status_files[pyright]}" +pids+=($!) + +# Wait for all processes to complete +echo "Waiting for processes to complete" +for pid in "${pids[@]}"; do + wait "$pid" +done + +# Print output sequentially and check for failures +failed=false +for key in "${!output_files[@]}"; do + echo "--- Output from Command: $key ---" + echo + cat "${output_files[$key]}" + echo # Empty line for separation + echo "--- End of Output from Command: $key ---" + echo + + exit_status=$(cat "${status_files[$key]}") + if [ "$exit_status" -ne 0 ]; then + echo "Command $key failed with exit status $exit_status" >&2 + failed=true + fi +done + +# Clean up temporary files +for tmp_file in "${output_files[@]}" "${status_files[@]}"; do + rm -f "$tmp_file" +done + +if $failed; then + echo "One or more commands failed." >&2 + exit 1 +else + echo "All commands completed successfully." + exit 0 +fi diff --git a/packages/mistralai_azure/scripts/publish.sh b/packages/mistralai_azure/scripts/publish.sh new file mode 100755 index 00000000..1ee7194c --- /dev/null +++ b/packages/mistralai_azure/scripts/publish.sh @@ -0,0 +1,5 @@ +#!/usr/bin/env bash + +export POETRY_PYPI_TOKEN_PYPI=${PYPI_TOKEN} + +poetry publish --build --skip-existing diff --git a/packages/mistralai_azure/src/mistralai_azure/__init__.py b/packages/mistralai_azure/src/mistralai_azure/__init__.py new file mode 100644 index 00000000..68138c47 --- /dev/null +++ b/packages/mistralai_azure/src/mistralai_azure/__init__.py @@ -0,0 +1,5 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from .sdk import * +from .sdkconfiguration import * +from .models import * diff --git a/packages/mistralai_azure/src/mistralai_azure/_hooks/__init__.py b/packages/mistralai_azure/src/mistralai_azure/_hooks/__init__.py new file mode 100644 index 00000000..2ee66cdd --- /dev/null +++ b/packages/mistralai_azure/src/mistralai_azure/_hooks/__init__.py @@ -0,0 +1,5 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from .sdkhooks import * +from .types import * +from .registration import * diff --git a/packages/mistralai_azure/src/mistralai_azure/_hooks/custom_user_agent.py b/packages/mistralai_azure/src/mistralai_azure/_hooks/custom_user_agent.py new file mode 100644 index 00000000..b03549c3 --- /dev/null +++ b/packages/mistralai_azure/src/mistralai_azure/_hooks/custom_user_agent.py @@ -0,0 +1,16 @@ +# THIS FILE IS THE EXACT COPY OF THE ORIGINAL FILE FROM src/mistralai/_hooks/custom_user_agent.py +from typing import Union + +import httpx + +from .types import BeforeRequestContext, BeforeRequestHook + + +class CustomUserAgentHook(BeforeRequestHook): + def before_request( + self, hook_ctx: BeforeRequestContext, request: httpx.Request + ) -> Union[httpx.Request, Exception]: + request.headers["user-agent"] = ( + "mistral-client-python/" + request.headers["user-agent"].split(" ")[1] + ) + return request diff --git a/packages/mistralai_azure/src/mistralai_azure/_hooks/registration.py b/packages/mistralai_azure/src/mistralai_azure/_hooks/registration.py new file mode 100644 index 00000000..304edfa2 --- /dev/null +++ b/packages/mistralai_azure/src/mistralai_azure/_hooks/registration.py @@ -0,0 +1,15 @@ +from .custom_user_agent import CustomUserAgentHook +from .types import Hooks + +# This file is only ever generated once on the first generation and then is free to be modified. +# Any hooks you wish to add should be registered in the init_hooks function. Feel free to define them +# in this file or in separate files in the hooks folder. + + +def init_hooks(hooks: Hooks): + # pylint: disable=unused-argument + """Add hooks by calling hooks.register{sdk_init/before_request/after_success/after_error}Hook + with an instance of a hook that implements that specific Hook interface + Hooks are registered per SDK instance, and are valid for the lifetime of the SDK instance + """ + hooks.register_before_request_hook(CustomUserAgentHook()) diff --git a/packages/mistralai_azure/src/mistralai_azure/_hooks/sdkhooks.py b/packages/mistralai_azure/src/mistralai_azure/_hooks/sdkhooks.py new file mode 100644 index 00000000..c8e9631a --- /dev/null +++ b/packages/mistralai_azure/src/mistralai_azure/_hooks/sdkhooks.py @@ -0,0 +1,57 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +import httpx +from .types import SDKInitHook, BeforeRequestContext, BeforeRequestHook, AfterSuccessContext, AfterSuccessHook, AfterErrorContext, AfterErrorHook, Hooks +from .registration import init_hooks +from typing import List, Optional, Tuple +from mistralai_azure.httpclient import HttpClient + +class SDKHooks(Hooks): + def __init__(self) -> None: + self.sdk_init_hooks: List[SDKInitHook] = [] + self.before_request_hooks: List[BeforeRequestHook] = [] + self.after_success_hooks: List[AfterSuccessHook] = [] + self.after_error_hooks: List[AfterErrorHook] = [] + init_hooks(self) + + def register_sdk_init_hook(self, hook: SDKInitHook) -> None: + self.sdk_init_hooks.append(hook) + + def register_before_request_hook(self, hook: BeforeRequestHook) -> None: + self.before_request_hooks.append(hook) + + def register_after_success_hook(self, hook: AfterSuccessHook) -> None: + self.after_success_hooks.append(hook) + + def register_after_error_hook(self, hook: AfterErrorHook) -> None: + self.after_error_hooks.append(hook) + + def sdk_init(self, base_url: str, client: HttpClient) -> Tuple[str, HttpClient]: + for hook in self.sdk_init_hooks: + base_url, client = hook.sdk_init(base_url, client) + return base_url, client + + def before_request(self, hook_ctx: BeforeRequestContext, request: httpx.Request) -> httpx.Request: + for hook in self.before_request_hooks: + out = hook.before_request(hook_ctx, request) + if isinstance(out, Exception): + raise out + request = out + + return request + + def after_success(self, hook_ctx: AfterSuccessContext, response: httpx.Response) -> httpx.Response: + for hook in self.after_success_hooks: + out = hook.after_success(hook_ctx, response) + if isinstance(out, Exception): + raise out + response = out + return response + + def after_error(self, hook_ctx: AfterErrorContext, response: Optional[httpx.Response], error: Optional[Exception]) -> Tuple[Optional[httpx.Response], Optional[Exception]]: + for hook in self.after_error_hooks: + result = hook.after_error(hook_ctx, response, error) + if isinstance(result, Exception): + raise result + response, error = result + return response, error diff --git a/packages/mistralai_azure/src/mistralai_azure/_hooks/types.py b/packages/mistralai_azure/src/mistralai_azure/_hooks/types.py new file mode 100644 index 00000000..3076b41d --- /dev/null +++ b/packages/mistralai_azure/src/mistralai_azure/_hooks/types.py @@ -0,0 +1,76 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + + +from abc import ABC, abstractmethod +import httpx +from mistralai_azure.httpclient import HttpClient +from typing import Any, Callable, List, Optional, Tuple, Union + + +class HookContext: + operation_id: str + oauth2_scopes: Optional[List[str]] = None + security_source: Optional[Union[Any, Callable[[], Any]]] = None + + def __init__(self, operation_id: str, oauth2_scopes: Optional[List[str]], security_source: Optional[Union[Any, Callable[[], Any]]]): + self.operation_id = operation_id + self.oauth2_scopes = oauth2_scopes + self.security_source = security_source + + +class BeforeRequestContext(HookContext): + def __init__(self, hook_ctx: HookContext): + super().__init__(hook_ctx.operation_id, hook_ctx.oauth2_scopes, hook_ctx.security_source) + + +class AfterSuccessContext(HookContext): + def __init__(self, hook_ctx: HookContext): + super().__init__(hook_ctx.operation_id, hook_ctx.oauth2_scopes, hook_ctx.security_source) + + + +class AfterErrorContext(HookContext): + def __init__(self, hook_ctx: HookContext): + super().__init__(hook_ctx.operation_id, hook_ctx.oauth2_scopes, hook_ctx.security_source) + + +class SDKInitHook(ABC): + @abstractmethod + def sdk_init(self, base_url: str, client: HttpClient) -> Tuple[str, HttpClient]: + pass + + +class BeforeRequestHook(ABC): + @abstractmethod + def before_request(self, hook_ctx: BeforeRequestContext, request: httpx.Request) -> Union[httpx.Request, Exception]: + pass + + +class AfterSuccessHook(ABC): + @abstractmethod + def after_success(self, hook_ctx: AfterSuccessContext, response: httpx.Response) -> Union[httpx.Response, Exception]: + pass + + +class AfterErrorHook(ABC): + @abstractmethod + def after_error(self, hook_ctx: AfterErrorContext, response: Optional[httpx.Response], error: Optional[Exception]) -> Union[Tuple[Optional[httpx.Response], Optional[Exception]], Exception]: + pass + + +class Hooks(ABC): + @abstractmethod + def register_sdk_init_hook(self, hook: SDKInitHook): + pass + + @abstractmethod + def register_before_request_hook(self, hook: BeforeRequestHook): + pass + + @abstractmethod + def register_after_success_hook(self, hook: AfterSuccessHook): + pass + + @abstractmethod + def register_after_error_hook(self, hook: AfterErrorHook): + pass diff --git a/packages/mistralai_azure/src/mistralai_azure/basesdk.py b/packages/mistralai_azure/src/mistralai_azure/basesdk.py new file mode 100644 index 00000000..1f22dbcf --- /dev/null +++ b/packages/mistralai_azure/src/mistralai_azure/basesdk.py @@ -0,0 +1,253 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from .sdkconfiguration import SDKConfiguration +import httpx +from mistralai_azure import models, utils +from mistralai_azure._hooks import AfterErrorContext, AfterSuccessContext, BeforeRequestContext +from mistralai_azure.utils import RetryConfig, SerializedRequestBody, get_body_content +from typing import Callable, List, Optional, Tuple + +class BaseSDK: + sdk_configuration: SDKConfiguration + + def __init__(self, sdk_config: SDKConfiguration) -> None: + self.sdk_configuration = sdk_config + + def get_url(self, base_url, url_variables): + sdk_url, sdk_variables = self.sdk_configuration.get_server_details() + + if base_url is None: + base_url = sdk_url + + if url_variables is None: + url_variables = sdk_variables + + return utils.template_url(base_url, url_variables) + + def build_request( + self, + method, + path, + base_url, + url_variables, + request, + request_body_required, + request_has_path_params, + request_has_query_params, + user_agent_header, + accept_header_value, + _globals=None, + security=None, + timeout_ms: Optional[int] = None, + get_serialized_body: Optional[ + Callable[[], Optional[SerializedRequestBody]] + ] = None, + url_override: Optional[str] = None, + ) -> httpx.Request: + client = self.sdk_configuration.client + + query_params = {} + + url = url_override + if url is None: + url = utils.generate_url( + self.get_url(base_url, url_variables), + path, + request if request_has_path_params else None, + _globals if request_has_path_params else None, + ) + + query_params = utils.get_query_params( + request if request_has_query_params else None, + _globals if request_has_query_params else None, + ) + + headers = utils.get_headers(request, _globals) + headers["Accept"] = accept_header_value + headers[user_agent_header] = self.sdk_configuration.user_agent + + if security is not None: + if callable(security): + security = security() + + if security is not None: + security_headers, security_query_params = utils.get_security(security) + headers = {**headers, **security_headers} + query_params = {**query_params, **security_query_params} + + serialized_request_body = SerializedRequestBody("application/octet-stream") + if get_serialized_body is not None: + rb = get_serialized_body() + if request_body_required and rb is None: + raise ValueError("request body is required") + + if rb is not None: + serialized_request_body = rb + + if ( + serialized_request_body.media_type is not None + and serialized_request_body.media_type + not in ( + "multipart/form-data", + "multipart/mixed", + ) + ): + headers["content-type"] = serialized_request_body.media_type + + timeout = timeout_ms / 1000 if timeout_ms is not None else None + + return client.build_request( + method, + url, + params=query_params, + content=serialized_request_body.content, + data=serialized_request_body.data, + files=serialized_request_body.files, + headers=headers, + timeout=timeout, + ) + + def do_request( + self, + hook_ctx, + request, + error_status_codes, + stream=False, + retry_config: Optional[Tuple[RetryConfig, List[str]]] = None, + ) -> httpx.Response: + client = self.sdk_configuration.client + logger = self.sdk_configuration.debug_logger + + def do(): + http_res = None + try: + req = self.sdk_configuration.get_hooks().before_request( + BeforeRequestContext(hook_ctx), request + ) + logger.debug( + "Request:\nMethod: %s\nURL: %s\nHeaders: %s\nBody: %s", + req.method, + req.url, + req.headers, + get_body_content(req) + ) + http_res = client.send(req, stream=stream) + except Exception as e: + _, e = self.sdk_configuration.get_hooks().after_error( + AfterErrorContext(hook_ctx), None, e + ) + if e is not None: + logger.debug("Request Exception", exc_info=True) + raise e + + if http_res is None: + logger.debug("Raising no response SDK error") + raise models.SDKError("No response received") + + logger.debug( + "Response:\nStatus Code: %s\nURL: %s\nHeaders: %s\nBody: %s", + http_res.status_code, + http_res.url, + http_res.headers, + "" if stream else http_res.text + ) + + if utils.match_status_codes(error_status_codes, http_res.status_code): + result, err = self.sdk_configuration.get_hooks().after_error( + AfterErrorContext(hook_ctx), http_res, None + ) + if err is not None: + logger.debug("Request Exception", exc_info=True) + raise err + if result is not None: + http_res = result + else: + logger.debug("Raising unexpected SDK error") + raise models.SDKError("Unexpected error occurred") + + return http_res + + if retry_config is not None: + http_res = utils.retry(do, utils.Retries(retry_config[0], retry_config[1])) + else: + http_res = do() + + if not utils.match_status_codes(error_status_codes, http_res.status_code): + http_res = self.sdk_configuration.get_hooks().after_success( + AfterSuccessContext(hook_ctx), http_res + ) + + return http_res + + async def do_request_async( + self, + hook_ctx, + request, + error_status_codes, + stream=False, + retry_config: Optional[Tuple[RetryConfig, List[str]]] = None, + ) -> httpx.Response: + client = self.sdk_configuration.async_client + logger = self.sdk_configuration.debug_logger + async def do(): + http_res = None + try: + req = self.sdk_configuration.get_hooks().before_request( + BeforeRequestContext(hook_ctx), request + ) + logger.debug( + "Request:\nMethod: %s\nURL: %s\nHeaders: %s\nBody: %s", + req.method, + req.url, + req.headers, + get_body_content(req) + ) + http_res = await client.send(req, stream=stream) + except Exception as e: + _, e = self.sdk_configuration.get_hooks().after_error( + AfterErrorContext(hook_ctx), None, e + ) + if e is not None: + logger.debug("Request Exception", exc_info=True) + raise e + + if http_res is None: + logger.debug("Raising no response SDK error") + raise models.SDKError("No response received") + + logger.debug( + "Response:\nStatus Code: %s\nURL: %s\nHeaders: %s\nBody: %s", + http_res.status_code, + http_res.url, + http_res.headers, + "" if stream else http_res.text + ) + + if utils.match_status_codes(error_status_codes, http_res.status_code): + result, err = self.sdk_configuration.get_hooks().after_error( + AfterErrorContext(hook_ctx), http_res, None + ) + if err is not None: + logger.debug("Request Exception", exc_info=True) + raise err + if result is not None: + http_res = result + else: + logger.debug("Raising unexpected SDK error") + raise models.SDKError("Unexpected error occurred") + + return http_res + + if retry_config is not None: + http_res = await utils.retry_async( + do, utils.Retries(retry_config[0], retry_config[1]) + ) + else: + http_res = await do() + + if not utils.match_status_codes(error_status_codes, http_res.status_code): + http_res = self.sdk_configuration.get_hooks().after_success( + AfterSuccessContext(hook_ctx), http_res + ) + + return http_res diff --git a/packages/mistralai_azure/src/mistralai_azure/chat.py b/packages/mistralai_azure/src/mistralai_azure/chat.py new file mode 100644 index 00000000..a5e172db --- /dev/null +++ b/packages/mistralai_azure/src/mistralai_azure/chat.py @@ -0,0 +1,470 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from .basesdk import BaseSDK +from mistralai_azure import models, utils +from mistralai_azure._hooks import HookContext +from mistralai_azure.types import OptionalNullable, UNSET +from mistralai_azure.utils import eventstreaming +from typing import Any, AsyncGenerator, Generator, List, Optional, Union + +class Chat(BaseSDK): + r"""Chat Completion API.""" + + + def stream( + self, *, + messages: Union[List[models.Messages], List[models.MessagesTypedDict]], + model: OptionalNullable[str] = "azureai", + temperature: Optional[float] = 0.7, + top_p: Optional[float] = 1, + max_tokens: OptionalNullable[int] = UNSET, + min_tokens: OptionalNullable[int] = UNSET, + stream: Optional[bool] = True, + stop: Optional[Union[models.Stop, models.StopTypedDict]] = None, + random_seed: OptionalNullable[int] = UNSET, + response_format: Optional[Union[models.ResponseFormat, models.ResponseFormatTypedDict]] = None, + tools: OptionalNullable[Union[List[models.Tool], List[models.ToolTypedDict]]] = UNSET, + tool_choice: Optional[models.ToolChoice] = "auto", + safe_prompt: Optional[bool] = False, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + ) -> Optional[Generator[models.CompletionEvent, None, None]]: + r"""Stream chat completion + + Mistral AI provides the ability to stream responses back to a client in order to allow partial results for certain requests. Tokens will be sent as data-only server-sent events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. + + :param messages: The prompt(s) to generate completions for, encoded as a list of dict with role and content. + :param model: The ID of the model to use for this request. + :param temperature: What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. + :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. + :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. + :param min_tokens: The minimum number of tokens to generate in the completion. + :param stream: + :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array + :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. + :param response_format: + :param tools: + :param tool_choice: + :param safe_prompt: Whether to inject a safety prompt before all conversations. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + + request = models.ChatCompletionStreamRequest( + model=model, + temperature=temperature, + top_p=top_p, + max_tokens=max_tokens, + min_tokens=min_tokens, + stream=stream, + stop=stop, + random_seed=random_seed, + messages=utils.get_pydantic_model(messages, List[models.Messages]), + response_format=utils.get_pydantic_model(response_format, Optional[models.ResponseFormat]), + tools=utils.get_pydantic_model(tools, OptionalNullable[List[models.Tool]]), + tool_choice=tool_choice, + safe_prompt=safe_prompt, + ) + + req = self.build_request( + method="POST", + path="/chat/completions#stream", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="text/event-stream", + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body(request, False, False, "json", models.ChatCompletionStreamRequest), + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, [ + "429", + "500", + "502", + "503", + "504" + ]) + + http_res = self.do_request( + hook_ctx=HookContext(operation_id="stream_chat", oauth2_scopes=[], security_source=self.sdk_configuration.security), + request=req, + error_status_codes=["422","4XX","5XX"], + stream=True, + retry_config=retry_config + ) + + data: Any = None + if utils.match_response(http_res, "200", "text/event-stream"): + return eventstreaming.stream_events(http_res, lambda raw: utils.unmarshal_json(raw, models.CompletionEvent), sentinel="[DONE]") + if utils.match_response(http_res, "422", "application/json"): + data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) + raise models.HTTPValidationError(data=data) + if utils.match_response(http_res, ["4XX","5XX"], "*"): + raise models.SDKError("API error occurred", http_res.status_code, http_res.text, http_res) + + content_type = http_res.headers.get("Content-Type") + raise models.SDKError(f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, http_res.text, http_res) + + + + async def stream_async( + self, *, + messages: Union[List[models.Messages], List[models.MessagesTypedDict]], + model: OptionalNullable[str] = "azureai", + temperature: Optional[float] = 0.7, + top_p: Optional[float] = 1, + max_tokens: OptionalNullable[int] = UNSET, + min_tokens: OptionalNullable[int] = UNSET, + stream: Optional[bool] = True, + stop: Optional[Union[models.Stop, models.StopTypedDict]] = None, + random_seed: OptionalNullable[int] = UNSET, + response_format: Optional[Union[models.ResponseFormat, models.ResponseFormatTypedDict]] = None, + tools: OptionalNullable[Union[List[models.Tool], List[models.ToolTypedDict]]] = UNSET, + tool_choice: Optional[models.ToolChoice] = "auto", + safe_prompt: Optional[bool] = False, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + ) -> Optional[AsyncGenerator[models.CompletionEvent, None]]: + r"""Stream chat completion + + Mistral AI provides the ability to stream responses back to a client in order to allow partial results for certain requests. Tokens will be sent as data-only server-sent events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. + + :param messages: The prompt(s) to generate completions for, encoded as a list of dict with role and content. + :param model: The ID of the model to use for this request. + :param temperature: What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. + :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. + :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. + :param min_tokens: The minimum number of tokens to generate in the completion. + :param stream: + :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array + :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. + :param response_format: + :param tools: + :param tool_choice: + :param safe_prompt: Whether to inject a safety prompt before all conversations. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + + request = models.ChatCompletionStreamRequest( + model=model, + temperature=temperature, + top_p=top_p, + max_tokens=max_tokens, + min_tokens=min_tokens, + stream=stream, + stop=stop, + random_seed=random_seed, + messages=utils.get_pydantic_model(messages, List[models.Messages]), + response_format=utils.get_pydantic_model(response_format, Optional[models.ResponseFormat]), + tools=utils.get_pydantic_model(tools, OptionalNullable[List[models.Tool]]), + tool_choice=tool_choice, + safe_prompt=safe_prompt, + ) + + req = self.build_request( + method="POST", + path="/chat/completions#stream", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="text/event-stream", + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body(request, False, False, "json", models.ChatCompletionStreamRequest), + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, [ + "429", + "500", + "502", + "503", + "504" + ]) + + http_res = await self.do_request_async( + hook_ctx=HookContext(operation_id="stream_chat", oauth2_scopes=[], security_source=self.sdk_configuration.security), + request=req, + error_status_codes=["422","4XX","5XX"], + stream=True, + retry_config=retry_config + ) + + data: Any = None + if utils.match_response(http_res, "200", "text/event-stream"): + return eventstreaming.stream_events_async(http_res, lambda raw: utils.unmarshal_json(raw, models.CompletionEvent), sentinel="[DONE]") + if utils.match_response(http_res, "422", "application/json"): + data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) + raise models.HTTPValidationError(data=data) + if utils.match_response(http_res, ["4XX","5XX"], "*"): + raise models.SDKError("API error occurred", http_res.status_code, http_res.text, http_res) + + content_type = http_res.headers.get("Content-Type") + raise models.SDKError(f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, http_res.text, http_res) + + + + def complete( + self, *, + messages: Union[List[models.ChatCompletionRequestMessages], List[models.ChatCompletionRequestMessagesTypedDict]], + model: OptionalNullable[str] = "azureai", + temperature: Optional[float] = 0.7, + top_p: Optional[float] = 1, + max_tokens: OptionalNullable[int] = UNSET, + min_tokens: OptionalNullable[int] = UNSET, + stream: Optional[bool] = False, + stop: Optional[Union[models.ChatCompletionRequestStop, models.ChatCompletionRequestStopTypedDict]] = None, + random_seed: OptionalNullable[int] = UNSET, + response_format: Optional[Union[models.ResponseFormat, models.ResponseFormatTypedDict]] = None, + tools: OptionalNullable[Union[List[models.Tool], List[models.ToolTypedDict]]] = UNSET, + tool_choice: Optional[models.ChatCompletionRequestToolChoice] = "auto", + safe_prompt: Optional[bool] = False, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + ) -> Optional[models.ChatCompletionResponse]: + r"""Chat Completion + + :param messages: The prompt(s) to generate completions for, encoded as a list of dict with role and content. + :param model: The ID of the model to use for this request. + :param temperature: What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. + :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. + :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. + :param min_tokens: The minimum number of tokens to generate in the completion. + :param stream: Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. + :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array + :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. + :param response_format: + :param tools: + :param tool_choice: + :param safe_prompt: Whether to inject a safety prompt before all conversations. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + + request = models.ChatCompletionRequest( + model=model, + temperature=temperature, + top_p=top_p, + max_tokens=max_tokens, + min_tokens=min_tokens, + stream=stream, + stop=stop, + random_seed=random_seed, + messages=utils.get_pydantic_model(messages, List[models.ChatCompletionRequestMessages]), + response_format=utils.get_pydantic_model(response_format, Optional[models.ResponseFormat]), + tools=utils.get_pydantic_model(tools, OptionalNullable[List[models.Tool]]), + tool_choice=tool_choice, + safe_prompt=safe_prompt, + ) + + req = self.build_request( + method="POST", + path="/chat/completions", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body(request, False, False, "json", models.ChatCompletionRequest), + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, [ + "429", + "500", + "502", + "503", + "504" + ]) + + http_res = self.do_request( + hook_ctx=HookContext(operation_id="chat_completion_v1_chat_completions_post", oauth2_scopes=[], security_source=self.sdk_configuration.security), + request=req, + error_status_codes=["422","4XX","5XX"], + retry_config=retry_config + ) + + data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return utils.unmarshal_json(http_res.text, Optional[models.ChatCompletionResponse]) + if utils.match_response(http_res, "422", "application/json"): + data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) + raise models.HTTPValidationError(data=data) + if utils.match_response(http_res, ["4XX","5XX"], "*"): + raise models.SDKError("API error occurred", http_res.status_code, http_res.text, http_res) + + content_type = http_res.headers.get("Content-Type") + raise models.SDKError(f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, http_res.text, http_res) + + + + async def complete_async( + self, *, + messages: Union[List[models.ChatCompletionRequestMessages], List[models.ChatCompletionRequestMessagesTypedDict]], + model: OptionalNullable[str] = "azureai", + temperature: Optional[float] = 0.7, + top_p: Optional[float] = 1, + max_tokens: OptionalNullable[int] = UNSET, + min_tokens: OptionalNullable[int] = UNSET, + stream: Optional[bool] = False, + stop: Optional[Union[models.ChatCompletionRequestStop, models.ChatCompletionRequestStopTypedDict]] = None, + random_seed: OptionalNullable[int] = UNSET, + response_format: Optional[Union[models.ResponseFormat, models.ResponseFormatTypedDict]] = None, + tools: OptionalNullable[Union[List[models.Tool], List[models.ToolTypedDict]]] = UNSET, + tool_choice: Optional[models.ChatCompletionRequestToolChoice] = "auto", + safe_prompt: Optional[bool] = False, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + ) -> Optional[models.ChatCompletionResponse]: + r"""Chat Completion + + :param messages: The prompt(s) to generate completions for, encoded as a list of dict with role and content. + :param model: The ID of the model to use for this request. + :param temperature: What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. + :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. + :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. + :param min_tokens: The minimum number of tokens to generate in the completion. + :param stream: Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. + :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array + :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. + :param response_format: + :param tools: + :param tool_choice: + :param safe_prompt: Whether to inject a safety prompt before all conversations. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + + request = models.ChatCompletionRequest( + model=model, + temperature=temperature, + top_p=top_p, + max_tokens=max_tokens, + min_tokens=min_tokens, + stream=stream, + stop=stop, + random_seed=random_seed, + messages=utils.get_pydantic_model(messages, List[models.ChatCompletionRequestMessages]), + response_format=utils.get_pydantic_model(response_format, Optional[models.ResponseFormat]), + tools=utils.get_pydantic_model(tools, OptionalNullable[List[models.Tool]]), + tool_choice=tool_choice, + safe_prompt=safe_prompt, + ) + + req = self.build_request( + method="POST", + path="/chat/completions", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body(request, False, False, "json", models.ChatCompletionRequest), + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, [ + "429", + "500", + "502", + "503", + "504" + ]) + + http_res = await self.do_request_async( + hook_ctx=HookContext(operation_id="chat_completion_v1_chat_completions_post", oauth2_scopes=[], security_source=self.sdk_configuration.security), + request=req, + error_status_codes=["422","4XX","5XX"], + retry_config=retry_config + ) + + data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return utils.unmarshal_json(http_res.text, Optional[models.ChatCompletionResponse]) + if utils.match_response(http_res, "422", "application/json"): + data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) + raise models.HTTPValidationError(data=data) + if utils.match_response(http_res, ["4XX","5XX"], "*"): + raise models.SDKError("API error occurred", http_res.status_code, http_res.text, http_res) + + content_type = http_res.headers.get("Content-Type") + raise models.SDKError(f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, http_res.text, http_res) + + diff --git a/packages/mistralai_azure/src/mistralai_azure/httpclient.py b/packages/mistralai_azure/src/mistralai_azure/httpclient.py new file mode 100644 index 00000000..36b642a0 --- /dev/null +++ b/packages/mistralai_azure/src/mistralai_azure/httpclient.py @@ -0,0 +1,78 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +# pyright: reportReturnType = false +from typing_extensions import Protocol, runtime_checkable +import httpx +from typing import Any, Optional, Union + + +@runtime_checkable +class HttpClient(Protocol): + def send( + self, + request: httpx.Request, + *, + stream: bool = False, + auth: Union[ + httpx._types.AuthTypes, httpx._client.UseClientDefault, None + ] = httpx.USE_CLIENT_DEFAULT, + follow_redirects: Union[ + bool, httpx._client.UseClientDefault + ] = httpx.USE_CLIENT_DEFAULT, + ) -> httpx.Response: + pass + + def build_request( + self, + method: str, + url: httpx._types.URLTypes, + *, + content: Optional[httpx._types.RequestContent] = None, + data: Optional[httpx._types.RequestData] = None, + files: Optional[httpx._types.RequestFiles] = None, + json: Optional[Any] = None, + params: Optional[httpx._types.QueryParamTypes] = None, + headers: Optional[httpx._types.HeaderTypes] = None, + cookies: Optional[httpx._types.CookieTypes] = None, + timeout: Union[ + httpx._types.TimeoutTypes, httpx._client.UseClientDefault + ] = httpx.USE_CLIENT_DEFAULT, + extensions: Optional[httpx._types.RequestExtensions] = None, + ) -> httpx.Request: + pass + + +@runtime_checkable +class AsyncHttpClient(Protocol): + async def send( + self, + request: httpx.Request, + *, + stream: bool = False, + auth: Union[ + httpx._types.AuthTypes, httpx._client.UseClientDefault, None + ] = httpx.USE_CLIENT_DEFAULT, + follow_redirects: Union[ + bool, httpx._client.UseClientDefault + ] = httpx.USE_CLIENT_DEFAULT, + ) -> httpx.Response: + pass + + def build_request( + self, + method: str, + url: httpx._types.URLTypes, + *, + content: Optional[httpx._types.RequestContent] = None, + data: Optional[httpx._types.RequestData] = None, + files: Optional[httpx._types.RequestFiles] = None, + json: Optional[Any] = None, + params: Optional[httpx._types.QueryParamTypes] = None, + headers: Optional[httpx._types.HeaderTypes] = None, + cookies: Optional[httpx._types.CookieTypes] = None, + timeout: Union[ + httpx._types.TimeoutTypes, httpx._client.UseClientDefault + ] = httpx.USE_CLIENT_DEFAULT, + extensions: Optional[httpx._types.RequestExtensions] = None, + ) -> httpx.Request: + pass diff --git a/packages/mistralai_azure/src/mistralai_azure/models/__init__.py b/packages/mistralai_azure/src/mistralai_azure/models/__init__.py new file mode 100644 index 00000000..a102b139 --- /dev/null +++ b/packages/mistralai_azure/src/mistralai_azure/models/__init__.py @@ -0,0 +1,28 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from .assistantmessage import AssistantMessage, AssistantMessageRole, AssistantMessageTypedDict +from .chatcompletionchoice import ChatCompletionChoice, ChatCompletionChoiceFinishReason, ChatCompletionChoiceTypedDict +from .chatcompletionrequest import ChatCompletionRequest, ChatCompletionRequestMessages, ChatCompletionRequestMessagesTypedDict, ChatCompletionRequestStop, ChatCompletionRequestStopTypedDict, ChatCompletionRequestToolChoice, ChatCompletionRequestTypedDict +from .chatcompletionresponse import ChatCompletionResponse, ChatCompletionResponseTypedDict +from .chatcompletionstreamrequest import ChatCompletionStreamRequest, ChatCompletionStreamRequestTypedDict, Messages, MessagesTypedDict, Stop, StopTypedDict, ToolChoice +from .completionchunk import CompletionChunk, CompletionChunkTypedDict +from .completionevent import CompletionEvent, CompletionEventTypedDict +from .completionresponsestreamchoice import CompletionResponseStreamChoice, CompletionResponseStreamChoiceTypedDict, FinishReason +from .contentchunk import ContentChunk, ContentChunkTypedDict +from .deltamessage import DeltaMessage, DeltaMessageTypedDict +from .function import Function, FunctionTypedDict +from .functioncall import Arguments, ArgumentsTypedDict, FunctionCall, FunctionCallTypedDict +from .httpvalidationerror import HTTPValidationError, HTTPValidationErrorData +from .responseformat import ResponseFormat, ResponseFormatTypedDict, ResponseFormats +from .sdkerror import SDKError +from .security import Security, SecurityTypedDict +from .systemmessage import Content, ContentTypedDict, Role, SystemMessage, SystemMessageTypedDict +from .textchunk import TextChunk, TextChunkTypedDict +from .tool import Tool, ToolTypedDict +from .toolcall import ToolCall, ToolCallTypedDict +from .toolmessage import ToolMessage, ToolMessageRole, ToolMessageTypedDict +from .usageinfo import UsageInfo, UsageInfoTypedDict +from .usermessage import UserMessage, UserMessageContent, UserMessageContentTypedDict, UserMessageRole, UserMessageTypedDict +from .validationerror import Loc, LocTypedDict, ValidationError, ValidationErrorTypedDict + +__all__ = ["Arguments", "ArgumentsTypedDict", "AssistantMessage", "AssistantMessageRole", "AssistantMessageTypedDict", "ChatCompletionChoice", "ChatCompletionChoiceFinishReason", "ChatCompletionChoiceTypedDict", "ChatCompletionRequest", "ChatCompletionRequestMessages", "ChatCompletionRequestMessagesTypedDict", "ChatCompletionRequestStop", "ChatCompletionRequestStopTypedDict", "ChatCompletionRequestToolChoice", "ChatCompletionRequestTypedDict", "ChatCompletionResponse", "ChatCompletionResponseTypedDict", "ChatCompletionStreamRequest", "ChatCompletionStreamRequestTypedDict", "CompletionChunk", "CompletionChunkTypedDict", "CompletionEvent", "CompletionEventTypedDict", "CompletionResponseStreamChoice", "CompletionResponseStreamChoiceTypedDict", "Content", "ContentChunk", "ContentChunkTypedDict", "ContentTypedDict", "DeltaMessage", "DeltaMessageTypedDict", "FinishReason", "Function", "FunctionCall", "FunctionCallTypedDict", "FunctionTypedDict", "HTTPValidationError", "HTTPValidationErrorData", "Loc", "LocTypedDict", "Messages", "MessagesTypedDict", "ResponseFormat", "ResponseFormatTypedDict", "ResponseFormats", "Role", "SDKError", "Security", "SecurityTypedDict", "Stop", "StopTypedDict", "SystemMessage", "SystemMessageTypedDict", "TextChunk", "TextChunkTypedDict", "Tool", "ToolCall", "ToolCallTypedDict", "ToolChoice", "ToolMessage", "ToolMessageRole", "ToolMessageTypedDict", "ToolTypedDict", "UsageInfo", "UsageInfoTypedDict", "UserMessage", "UserMessageContent", "UserMessageContentTypedDict", "UserMessageRole", "UserMessageTypedDict", "ValidationError", "ValidationErrorTypedDict"] diff --git a/packages/mistralai_azure/src/mistralai_azure/models/assistantmessage.py b/packages/mistralai_azure/src/mistralai_azure/models/assistantmessage.py new file mode 100644 index 00000000..c7bc4b48 --- /dev/null +++ b/packages/mistralai_azure/src/mistralai_azure/models/assistantmessage.py @@ -0,0 +1,53 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .toolcall import ToolCall, ToolCallTypedDict +from mistralai_azure.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +from pydantic import model_serializer +from typing import List, Literal, Optional, TypedDict +from typing_extensions import NotRequired + + +AssistantMessageRole = Literal["assistant"] + +class AssistantMessageTypedDict(TypedDict): + content: NotRequired[Nullable[str]] + tool_calls: NotRequired[Nullable[List[ToolCallTypedDict]]] + prefix: NotRequired[bool] + r"""Set this to `true` when adding an assistant message as prefix to condition the model response. The role of the prefix message is to force the model to start its answer by the content of the message.""" + role: NotRequired[AssistantMessageRole] + + +class AssistantMessage(BaseModel): + content: OptionalNullable[str] = UNSET + tool_calls: OptionalNullable[List[ToolCall]] = UNSET + prefix: Optional[bool] = False + r"""Set this to `true` when adding an assistant message as prefix to condition the model response. The role of the prefix message is to force the model to start its answer by the content of the message.""" + role: Optional[AssistantMessageRole] = "assistant" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["content", "tool_calls", "prefix", "role"] + nullable_fields = ["content", "tool_calls"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in self.model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = (self.__pydantic_fields_set__.intersection({n}) or k in null_default_fields) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m + diff --git a/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionchoice.py b/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionchoice.py new file mode 100644 index 00000000..acfd5bb3 --- /dev/null +++ b/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionchoice.py @@ -0,0 +1,22 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .assistantmessage import AssistantMessage, AssistantMessageTypedDict +from mistralai_azure.types import BaseModel +from typing import Literal, Optional, TypedDict +from typing_extensions import NotRequired + + +ChatCompletionChoiceFinishReason = Literal["stop", "length", "model_length", "error", "tool_calls"] + +class ChatCompletionChoiceTypedDict(TypedDict): + index: int + finish_reason: ChatCompletionChoiceFinishReason + message: NotRequired[AssistantMessageTypedDict] + + +class ChatCompletionChoice(BaseModel): + index: int + finish_reason: ChatCompletionChoiceFinishReason + message: Optional[AssistantMessage] = None + diff --git a/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionrequest.py b/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionrequest.py new file mode 100644 index 00000000..352e8837 --- /dev/null +++ b/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionrequest.py @@ -0,0 +1,109 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .assistantmessage import AssistantMessage, AssistantMessageTypedDict +from .responseformat import ResponseFormat, ResponseFormatTypedDict +from .systemmessage import SystemMessage, SystemMessageTypedDict +from .tool import Tool, ToolTypedDict +from .toolmessage import ToolMessage, ToolMessageTypedDict +from .usermessage import UserMessage, UserMessageTypedDict +from mistralai_azure.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +from mistralai_azure.utils import get_discriminator +from pydantic import Discriminator, Tag, model_serializer +from typing import List, Literal, Optional, TypedDict, Union +from typing_extensions import Annotated, NotRequired + + +ChatCompletionRequestToolChoice = Literal["auto", "none", "any"] + +class ChatCompletionRequestTypedDict(TypedDict): + messages: List[ChatCompletionRequestMessagesTypedDict] + r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content.""" + model: NotRequired[Nullable[str]] + r"""The ID of the model to use for this request.""" + temperature: NotRequired[float] + r"""What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both.""" + top_p: NotRequired[float] + r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.""" + max_tokens: NotRequired[Nullable[int]] + r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" + min_tokens: NotRequired[Nullable[int]] + r"""The minimum number of tokens to generate in the completion.""" + stream: NotRequired[bool] + r"""Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON.""" + stop: NotRequired[ChatCompletionRequestStopTypedDict] + r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + random_seed: NotRequired[Nullable[int]] + r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" + response_format: NotRequired[ResponseFormatTypedDict] + tools: NotRequired[Nullable[List[ToolTypedDict]]] + tool_choice: NotRequired[ChatCompletionRequestToolChoice] + safe_prompt: NotRequired[bool] + r"""Whether to inject a safety prompt before all conversations.""" + + +class ChatCompletionRequest(BaseModel): + messages: List[ChatCompletionRequestMessages] + r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content.""" + model: OptionalNullable[str] = "azureai" + r"""The ID of the model to use for this request.""" + temperature: Optional[float] = 0.7 + r"""What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both.""" + top_p: Optional[float] = 1 + r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.""" + max_tokens: OptionalNullable[int] = UNSET + r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" + min_tokens: OptionalNullable[int] = UNSET + r"""The minimum number of tokens to generate in the completion.""" + stream: Optional[bool] = False + r"""Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON.""" + stop: Optional[ChatCompletionRequestStop] = None + r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + random_seed: OptionalNullable[int] = UNSET + r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" + response_format: Optional[ResponseFormat] = None + tools: OptionalNullable[List[Tool]] = UNSET + tool_choice: Optional[ChatCompletionRequestToolChoice] = "auto" + safe_prompt: Optional[bool] = False + r"""Whether to inject a safety prompt before all conversations.""" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["model", "temperature", "top_p", "max_tokens", "min_tokens", "stream", "stop", "random_seed", "response_format", "tools", "tool_choice", "safe_prompt"] + nullable_fields = ["model", "max_tokens", "min_tokens", "random_seed", "tools"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in self.model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = (self.__pydantic_fields_set__.intersection({n}) or k in null_default_fields) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m + + +ChatCompletionRequestStopTypedDict = Union[str, List[str]] +r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + + +ChatCompletionRequestStop = Union[str, List[str]] +r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + + +ChatCompletionRequestMessagesTypedDict = Union[SystemMessageTypedDict, UserMessageTypedDict, AssistantMessageTypedDict, ToolMessageTypedDict] + + +ChatCompletionRequestMessages = Annotated[Union[Annotated[AssistantMessage, Tag("assistant")], Annotated[SystemMessage, Tag("system")], Annotated[ToolMessage, Tag("tool")], Annotated[UserMessage, Tag("user")]], Discriminator(lambda m: get_discriminator(m, "role", "role"))] + diff --git a/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionresponse.py b/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionresponse.py new file mode 100644 index 00000000..88591210 --- /dev/null +++ b/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionresponse.py @@ -0,0 +1,27 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .chatcompletionchoice import ChatCompletionChoice, ChatCompletionChoiceTypedDict +from .usageinfo import UsageInfo, UsageInfoTypedDict +from mistralai_azure.types import BaseModel +from typing import List, Optional, TypedDict +from typing_extensions import NotRequired + + +class ChatCompletionResponseTypedDict(TypedDict): + id: str + object: str + model: str + usage: UsageInfoTypedDict + created: NotRequired[int] + choices: NotRequired[List[ChatCompletionChoiceTypedDict]] + + +class ChatCompletionResponse(BaseModel): + id: str + object: str + model: str + usage: UsageInfo + created: Optional[int] = None + choices: Optional[List[ChatCompletionChoice]] = None + diff --git a/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionstreamrequest.py b/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionstreamrequest.py new file mode 100644 index 00000000..85276b15 --- /dev/null +++ b/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionstreamrequest.py @@ -0,0 +1,107 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .assistantmessage import AssistantMessage, AssistantMessageTypedDict +from .responseformat import ResponseFormat, ResponseFormatTypedDict +from .systemmessage import SystemMessage, SystemMessageTypedDict +from .tool import Tool, ToolTypedDict +from .toolmessage import ToolMessage, ToolMessageTypedDict +from .usermessage import UserMessage, UserMessageTypedDict +from mistralai_azure.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +from mistralai_azure.utils import get_discriminator +from pydantic import Discriminator, Tag, model_serializer +from typing import List, Literal, Optional, TypedDict, Union +from typing_extensions import Annotated, NotRequired + + +ToolChoice = Literal["auto", "none", "any"] + +class ChatCompletionStreamRequestTypedDict(TypedDict): + messages: List[MessagesTypedDict] + r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content.""" + model: NotRequired[Nullable[str]] + r"""The ID of the model to use for this request.""" + temperature: NotRequired[float] + r"""What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both.""" + top_p: NotRequired[float] + r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.""" + max_tokens: NotRequired[Nullable[int]] + r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" + min_tokens: NotRequired[Nullable[int]] + r"""The minimum number of tokens to generate in the completion.""" + stream: NotRequired[bool] + stop: NotRequired[StopTypedDict] + r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + random_seed: NotRequired[Nullable[int]] + r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" + response_format: NotRequired[ResponseFormatTypedDict] + tools: NotRequired[Nullable[List[ToolTypedDict]]] + tool_choice: NotRequired[ToolChoice] + safe_prompt: NotRequired[bool] + r"""Whether to inject a safety prompt before all conversations.""" + + +class ChatCompletionStreamRequest(BaseModel): + messages: List[Messages] + r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content.""" + model: OptionalNullable[str] = "azureai" + r"""The ID of the model to use for this request.""" + temperature: Optional[float] = 0.7 + r"""What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both.""" + top_p: Optional[float] = 1 + r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.""" + max_tokens: OptionalNullable[int] = UNSET + r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" + min_tokens: OptionalNullable[int] = UNSET + r"""The minimum number of tokens to generate in the completion.""" + stream: Optional[bool] = True + stop: Optional[Stop] = None + r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + random_seed: OptionalNullable[int] = UNSET + r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" + response_format: Optional[ResponseFormat] = None + tools: OptionalNullable[List[Tool]] = UNSET + tool_choice: Optional[ToolChoice] = "auto" + safe_prompt: Optional[bool] = False + r"""Whether to inject a safety prompt before all conversations.""" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["model", "temperature", "top_p", "max_tokens", "min_tokens", "stream", "stop", "random_seed", "response_format", "tools", "tool_choice", "safe_prompt"] + nullable_fields = ["model", "max_tokens", "min_tokens", "random_seed", "tools"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in self.model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = (self.__pydantic_fields_set__.intersection({n}) or k in null_default_fields) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m + + +StopTypedDict = Union[str, List[str]] +r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + + +Stop = Union[str, List[str]] +r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + + +MessagesTypedDict = Union[SystemMessageTypedDict, UserMessageTypedDict, AssistantMessageTypedDict, ToolMessageTypedDict] + + +Messages = Annotated[Union[Annotated[AssistantMessage, Tag("assistant")], Annotated[SystemMessage, Tag("system")], Annotated[ToolMessage, Tag("tool")], Annotated[UserMessage, Tag("user")]], Discriminator(lambda m: get_discriminator(m, "role", "role"))] + diff --git a/packages/mistralai_azure/src/mistralai_azure/models/completionchunk.py b/packages/mistralai_azure/src/mistralai_azure/models/completionchunk.py new file mode 100644 index 00000000..f51aca3c --- /dev/null +++ b/packages/mistralai_azure/src/mistralai_azure/models/completionchunk.py @@ -0,0 +1,27 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .completionresponsestreamchoice import CompletionResponseStreamChoice, CompletionResponseStreamChoiceTypedDict +from .usageinfo import UsageInfo, UsageInfoTypedDict +from mistralai_azure.types import BaseModel +from typing import List, Optional, TypedDict +from typing_extensions import NotRequired + + +class CompletionChunkTypedDict(TypedDict): + id: str + model: str + choices: List[CompletionResponseStreamChoiceTypedDict] + object: NotRequired[str] + created: NotRequired[int] + usage: NotRequired[UsageInfoTypedDict] + + +class CompletionChunk(BaseModel): + id: str + model: str + choices: List[CompletionResponseStreamChoice] + object: Optional[str] = None + created: Optional[int] = None + usage: Optional[UsageInfo] = None + diff --git a/packages/mistralai_azure/src/mistralai_azure/models/completionevent.py b/packages/mistralai_azure/src/mistralai_azure/models/completionevent.py new file mode 100644 index 00000000..2f8f4b9c --- /dev/null +++ b/packages/mistralai_azure/src/mistralai_azure/models/completionevent.py @@ -0,0 +1,15 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .completionchunk import CompletionChunk, CompletionChunkTypedDict +from mistralai_azure.types import BaseModel +from typing import TypedDict + + +class CompletionEventTypedDict(TypedDict): + data: CompletionChunkTypedDict + + +class CompletionEvent(BaseModel): + data: CompletionChunk + diff --git a/packages/mistralai_azure/src/mistralai_azure/models/completionresponsestreamchoice.py b/packages/mistralai_azure/src/mistralai_azure/models/completionresponsestreamchoice.py new file mode 100644 index 00000000..76f7fce8 --- /dev/null +++ b/packages/mistralai_azure/src/mistralai_azure/models/completionresponsestreamchoice.py @@ -0,0 +1,48 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .deltamessage import DeltaMessage, DeltaMessageTypedDict +from mistralai_azure.types import BaseModel, Nullable, UNSET_SENTINEL +from pydantic import model_serializer +from typing import Literal, TypedDict + + +FinishReason = Literal["stop", "length", "error", "tool_calls"] + +class CompletionResponseStreamChoiceTypedDict(TypedDict): + index: int + delta: DeltaMessageTypedDict + finish_reason: Nullable[FinishReason] + + +class CompletionResponseStreamChoice(BaseModel): + index: int + delta: DeltaMessage + finish_reason: Nullable[FinishReason] + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = [] + nullable_fields = ["finish_reason"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in self.model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = (self.__pydantic_fields_set__.intersection({n}) or k in null_default_fields) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m + diff --git a/packages/mistralai_azure/src/mistralai_azure/models/contentchunk.py b/packages/mistralai_azure/src/mistralai_azure/models/contentchunk.py new file mode 100644 index 00000000..a45f2bdb --- /dev/null +++ b/packages/mistralai_azure/src/mistralai_azure/models/contentchunk.py @@ -0,0 +1,17 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai_azure.types import BaseModel +import pydantic +from typing import Final, Optional, TypedDict +from typing_extensions import Annotated + + +class ContentChunkTypedDict(TypedDict): + text: str + + +class ContentChunk(BaseModel): + text: str + TYPE: Annotated[Final[Optional[str]], pydantic.Field(alias="type")] = "text" # type: ignore + diff --git a/packages/mistralai_azure/src/mistralai_azure/models/deltamessage.py b/packages/mistralai_azure/src/mistralai_azure/models/deltamessage.py new file mode 100644 index 00000000..68d0221d --- /dev/null +++ b/packages/mistralai_azure/src/mistralai_azure/models/deltamessage.py @@ -0,0 +1,47 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .toolcall import ToolCall, ToolCallTypedDict +from mistralai_azure.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +from pydantic import model_serializer +from typing import Optional, TypedDict +from typing_extensions import NotRequired + + +class DeltaMessageTypedDict(TypedDict): + role: NotRequired[str] + content: NotRequired[str] + tool_calls: NotRequired[Nullable[ToolCallTypedDict]] + + +class DeltaMessage(BaseModel): + role: Optional[str] = None + content: Optional[str] = None + tool_calls: OptionalNullable[ToolCall] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["role", "content", "tool_calls"] + nullable_fields = ["tool_calls"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in self.model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = (self.__pydantic_fields_set__.intersection({n}) or k in null_default_fields) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m + diff --git a/packages/mistralai_azure/src/mistralai_azure/models/function.py b/packages/mistralai_azure/src/mistralai_azure/models/function.py new file mode 100644 index 00000000..6ffcacf2 --- /dev/null +++ b/packages/mistralai_azure/src/mistralai_azure/models/function.py @@ -0,0 +1,19 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai_azure.types import BaseModel +from typing import Any, Dict, Optional, TypedDict +from typing_extensions import NotRequired + + +class FunctionTypedDict(TypedDict): + name: str + parameters: Dict[str, Any] + description: NotRequired[str] + + +class Function(BaseModel): + name: str + parameters: Dict[str, Any] + description: Optional[str] = "" + diff --git a/packages/mistralai_azure/src/mistralai_azure/models/functioncall.py b/packages/mistralai_azure/src/mistralai_azure/models/functioncall.py new file mode 100644 index 00000000..2a9bc801 --- /dev/null +++ b/packages/mistralai_azure/src/mistralai_azure/models/functioncall.py @@ -0,0 +1,22 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai_azure.types import BaseModel +from typing import Any, Dict, TypedDict, Union + + +class FunctionCallTypedDict(TypedDict): + name: str + arguments: ArgumentsTypedDict + + +class FunctionCall(BaseModel): + name: str + arguments: Arguments + + +ArgumentsTypedDict = Union[Dict[str, Any], str] + + +Arguments = Union[Dict[str, Any], str] + diff --git a/packages/mistralai_azure/src/mistralai_azure/models/httpvalidationerror.py b/packages/mistralai_azure/src/mistralai_azure/models/httpvalidationerror.py new file mode 100644 index 00000000..de07a3d4 --- /dev/null +++ b/packages/mistralai_azure/src/mistralai_azure/models/httpvalidationerror.py @@ -0,0 +1,23 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .validationerror import ValidationError +from mistralai_azure import utils +from mistralai_azure.types import BaseModel +from typing import List, Optional + +class HTTPValidationErrorData(BaseModel): + detail: Optional[List[ValidationError]] = None + + + +class HTTPValidationError(Exception): + r"""Validation Error""" + data: HTTPValidationErrorData + + def __init__(self, data: HTTPValidationErrorData): + self.data = data + + def __str__(self) -> str: + return utils.marshal_json(self.data, HTTPValidationErrorData) + diff --git a/packages/mistralai_azure/src/mistralai_azure/models/responseformat.py b/packages/mistralai_azure/src/mistralai_azure/models/responseformat.py new file mode 100644 index 00000000..0dac0f6b --- /dev/null +++ b/packages/mistralai_azure/src/mistralai_azure/models/responseformat.py @@ -0,0 +1,18 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai_azure.types import BaseModel +from typing import Literal, Optional, TypedDict +from typing_extensions import NotRequired + + +ResponseFormats = Literal["text", "json_object"] +r"""An object specifying the format that the model must output. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message.""" + +class ResponseFormatTypedDict(TypedDict): + type: NotRequired[ResponseFormats] + + +class ResponseFormat(BaseModel): + type: Optional[ResponseFormats] = "text" + diff --git a/packages/mistralai_azure/src/mistralai_azure/models/sdkerror.py b/packages/mistralai_azure/src/mistralai_azure/models/sdkerror.py new file mode 100644 index 00000000..03216cbf --- /dev/null +++ b/packages/mistralai_azure/src/mistralai_azure/models/sdkerror.py @@ -0,0 +1,22 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from dataclasses import dataclass +from typing import Optional +import httpx + + +@dataclass +class SDKError(Exception): + """Represents an error returned by the API.""" + + message: str + status_code: int = -1 + body: str = "" + raw_response: Optional[httpx.Response] = None + + def __str__(self): + body = "" + if len(self.body) > 0: + body = f"\n{self.body}" + + return f"{self.message}: Status {self.status_code}{body}" diff --git a/packages/mistralai_azure/src/mistralai_azure/models/security.py b/packages/mistralai_azure/src/mistralai_azure/models/security.py new file mode 100644 index 00000000..94d9e645 --- /dev/null +++ b/packages/mistralai_azure/src/mistralai_azure/models/security.py @@ -0,0 +1,16 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai_azure.types import BaseModel +from mistralai_azure.utils import FieldMetadata, SecurityMetadata +from typing import TypedDict +from typing_extensions import Annotated + + +class SecurityTypedDict(TypedDict): + api_key: str + + +class Security(BaseModel): + api_key: Annotated[str, FieldMetadata(security=SecurityMetadata(scheme=True, scheme_type="http", sub_type="bearer", field_name="Authorization"))] + diff --git a/packages/mistralai_azure/src/mistralai_azure/models/systemmessage.py b/packages/mistralai_azure/src/mistralai_azure/models/systemmessage.py new file mode 100644 index 00000000..1ed8a756 --- /dev/null +++ b/packages/mistralai_azure/src/mistralai_azure/models/systemmessage.py @@ -0,0 +1,26 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .contentchunk import ContentChunk, ContentChunkTypedDict +from mistralai_azure.types import BaseModel +from typing import List, Literal, Optional, TypedDict, Union +from typing_extensions import NotRequired + + +Role = Literal["system"] + +class SystemMessageTypedDict(TypedDict): + content: ContentTypedDict + role: NotRequired[Role] + + +class SystemMessage(BaseModel): + content: Content + role: Optional[Role] = "system" + + +ContentTypedDict = Union[str, List[ContentChunkTypedDict]] + + +Content = Union[str, List[ContentChunk]] + diff --git a/packages/mistralai_azure/src/mistralai_azure/models/textchunk.py b/packages/mistralai_azure/src/mistralai_azure/models/textchunk.py new file mode 100644 index 00000000..12f2e781 --- /dev/null +++ b/packages/mistralai_azure/src/mistralai_azure/models/textchunk.py @@ -0,0 +1,17 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai_azure.types import BaseModel +import pydantic +from typing import Final, Optional, TypedDict +from typing_extensions import Annotated + + +class TextChunkTypedDict(TypedDict): + text: str + + +class TextChunk(BaseModel): + text: str + TYPE: Annotated[Final[Optional[str]], pydantic.Field(alias="type")] = "text" # type: ignore + diff --git a/packages/mistralai_azure/src/mistralai_azure/models/tool.py b/packages/mistralai_azure/src/mistralai_azure/models/tool.py new file mode 100644 index 00000000..e77c77df --- /dev/null +++ b/packages/mistralai_azure/src/mistralai_azure/models/tool.py @@ -0,0 +1,18 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .function import Function, FunctionTypedDict +from mistralai_azure.types import BaseModel +import pydantic +from typing import Final, Optional, TypedDict +from typing_extensions import Annotated + + +class ToolTypedDict(TypedDict): + function: FunctionTypedDict + + +class Tool(BaseModel): + function: Function + TYPE: Annotated[Final[Optional[str]], pydantic.Field(alias="type")] = "function" # type: ignore + diff --git a/packages/mistralai_azure/src/mistralai_azure/models/toolcall.py b/packages/mistralai_azure/src/mistralai_azure/models/toolcall.py new file mode 100644 index 00000000..f15bee96 --- /dev/null +++ b/packages/mistralai_azure/src/mistralai_azure/models/toolcall.py @@ -0,0 +1,20 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .functioncall import FunctionCall, FunctionCallTypedDict +from mistralai_azure.types import BaseModel +import pydantic +from typing import Final, Optional, TypedDict +from typing_extensions import Annotated, NotRequired + + +class ToolCallTypedDict(TypedDict): + function: FunctionCallTypedDict + id: NotRequired[str] + + +class ToolCall(BaseModel): + function: FunctionCall + id: Optional[str] = "null" + TYPE: Annotated[Final[Optional[str]], pydantic.Field(alias="type")] = "function" # type: ignore + diff --git a/packages/mistralai_azure/src/mistralai_azure/models/toolmessage.py b/packages/mistralai_azure/src/mistralai_azure/models/toolmessage.py new file mode 100644 index 00000000..e8452977 --- /dev/null +++ b/packages/mistralai_azure/src/mistralai_azure/models/toolmessage.py @@ -0,0 +1,50 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai_azure.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +from pydantic import model_serializer +from typing import Literal, Optional, TypedDict +from typing_extensions import NotRequired + + +ToolMessageRole = Literal["tool"] + +class ToolMessageTypedDict(TypedDict): + content: str + tool_call_id: NotRequired[Nullable[str]] + name: NotRequired[Nullable[str]] + role: NotRequired[ToolMessageRole] + + +class ToolMessage(BaseModel): + content: str + tool_call_id: OptionalNullable[str] = UNSET + name: OptionalNullable[str] = UNSET + role: Optional[ToolMessageRole] = "tool" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["tool_call_id", "name", "role"] + nullable_fields = ["tool_call_id", "name"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in self.model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = (self.__pydantic_fields_set__.intersection({n}) or k in null_default_fields) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m + diff --git a/packages/mistralai_azure/src/mistralai_azure/models/usageinfo.py b/packages/mistralai_azure/src/mistralai_azure/models/usageinfo.py new file mode 100644 index 00000000..f30c1ebe --- /dev/null +++ b/packages/mistralai_azure/src/mistralai_azure/models/usageinfo.py @@ -0,0 +1,18 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai_azure.types import BaseModel +from typing import TypedDict + + +class UsageInfoTypedDict(TypedDict): + prompt_tokens: int + completion_tokens: int + total_tokens: int + + +class UsageInfo(BaseModel): + prompt_tokens: int + completion_tokens: int + total_tokens: int + diff --git a/packages/mistralai_azure/src/mistralai_azure/models/usermessage.py b/packages/mistralai_azure/src/mistralai_azure/models/usermessage.py new file mode 100644 index 00000000..8ddc8c8a --- /dev/null +++ b/packages/mistralai_azure/src/mistralai_azure/models/usermessage.py @@ -0,0 +1,26 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .textchunk import TextChunk, TextChunkTypedDict +from mistralai_azure.types import BaseModel +from typing import List, Literal, Optional, TypedDict, Union +from typing_extensions import NotRequired + + +UserMessageRole = Literal["user"] + +class UserMessageTypedDict(TypedDict): + content: UserMessageContentTypedDict + role: NotRequired[UserMessageRole] + + +class UserMessage(BaseModel): + content: UserMessageContent + role: Optional[UserMessageRole] = "user" + + +UserMessageContentTypedDict = Union[str, List[TextChunkTypedDict]] + + +UserMessageContent = Union[str, List[TextChunk]] + diff --git a/packages/mistralai_azure/src/mistralai_azure/models/validationerror.py b/packages/mistralai_azure/src/mistralai_azure/models/validationerror.py new file mode 100644 index 00000000..626e9c4c --- /dev/null +++ b/packages/mistralai_azure/src/mistralai_azure/models/validationerror.py @@ -0,0 +1,24 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai_azure.types import BaseModel +from typing import List, TypedDict, Union + + +class ValidationErrorTypedDict(TypedDict): + loc: List[LocTypedDict] + msg: str + type: str + + +class ValidationError(BaseModel): + loc: List[Loc] + msg: str + type: str + + +LocTypedDict = Union[str, int] + + +Loc = Union[str, int] + diff --git a/packages/mistralai_azure/src/mistralai_azure/py.typed b/packages/mistralai_azure/src/mistralai_azure/py.typed new file mode 100644 index 00000000..3e38f1a9 --- /dev/null +++ b/packages/mistralai_azure/src/mistralai_azure/py.typed @@ -0,0 +1 @@ +# Marker file for PEP 561. The package enables type hints. diff --git a/packages/mistralai_azure/src/mistralai_azure/sdk.py b/packages/mistralai_azure/src/mistralai_azure/sdk.py new file mode 100644 index 00000000..a83faa7b --- /dev/null +++ b/packages/mistralai_azure/src/mistralai_azure/sdk.py @@ -0,0 +1,107 @@ +"""Code generated by Speakeasy (https://speakeasyapi.dev). DO NOT EDIT.""" + +from typing import Any, Callable, Dict, Optional, Union + +import httpx +from mistralai_azure import models, utils +from mistralai_azure._hooks import SDKHooks +from mistralai_azure.chat import Chat +from mistralai_azure.types import Nullable + +from .basesdk import BaseSDK +from .httpclient import AsyncHttpClient, HttpClient +from .sdkconfiguration import SDKConfiguration +from .utils.logger import Logger, NoOpLogger +from .utils.retries import RetryConfig + + +class MistralAzure(BaseSDK): + r"""Mistral AI API: Our Chat Completion and Embeddings APIs specification. Create your account on [La Plateforme](https://console.mistral.ai) to get access and read the [docs](https://docs.mistral.ai) to learn how to use it.""" + + chat: Chat + r"""Chat Completion API""" + + def __init__( + self, + azure_api_key: Union[str, Callable[[], str]], + azure_endpoint: str, + url_params: Optional[Dict[str, str]] = None, + client: Optional[HttpClient] = None, + async_client: Optional[AsyncHttpClient] = None, + retry_config: Optional[Nullable[RetryConfig]] = None, + debug_logger: Optional[Logger] = None, + ) -> None: + r"""Instantiates the SDK configuring it with the provided parameters. + + :param azure_api_key: The azure_api_key required for authentication + :param azure_endpoint: The Azure AI endpoint URL to use for all methods + :param url_params: Parameters to optionally template the server URL with + :param client: The HTTP client to use for all synchronous methods + :param async_client: The Async HTTP client to use for all asynchronous methods + :param retry_config: The retry configuration to use for all supported methods + """ + # if azure_endpoint doesn't end with `/v1` add it + if not azure_endpoint.endswith("/"): + azure_endpoint += "/" + if not azure_endpoint.endswith("v1/"): + azure_endpoint += "v1/" + server_url = azure_endpoint + + if client is None: + client = httpx.Client() + + assert issubclass( + type(client), HttpClient + ), "The provided client must implement the HttpClient protocol." + + if async_client is None: + async_client = httpx.AsyncClient() + + assert issubclass( + type(async_client), AsyncHttpClient + ), "The provided async_client must implement the AsyncHttpClient protocol." + + if debug_logger is None: + debug_logger = NoOpLogger() + + security: Any = None + if callable(azure_api_key): + security = lambda: models.Security( # pylint: disable=unnecessary-lambda-assignment + api_key=azure_api_key() + ) + else: + security = models.Security(api_key=azure_api_key) + + if server_url is not None: + if url_params is not None: + server_url = utils.template_url(server_url, url_params) + + BaseSDK.__init__( + self, + SDKConfiguration( + client=client, + async_client=async_client, + security=security, + server_url=server_url, + server=None, + retry_config=retry_config, + debug_logger=debug_logger, + ), + ) + + hooks = SDKHooks() + + current_server_url, *_ = self.sdk_configuration.get_server_details() + server_url, self.sdk_configuration.client = hooks.sdk_init( + current_server_url, self.sdk_configuration.client + ) + if current_server_url != server_url: + self.sdk_configuration.server_url = server_url + + # pylint: disable=protected-access + self.sdk_configuration.__dict__["_hooks"] = hooks + + self._init_sdks() + + def _init_sdks(self): + self.chat = Chat(self.sdk_configuration) diff --git a/packages/mistralai_azure/src/mistralai_azure/sdkconfiguration.py b/packages/mistralai_azure/src/mistralai_azure/sdkconfiguration.py new file mode 100644 index 00000000..5ba1c4c7 --- /dev/null +++ b/packages/mistralai_azure/src/mistralai_azure/sdkconfiguration.py @@ -0,0 +1,54 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + + +from ._hooks import SDKHooks +from .httpclient import AsyncHttpClient, HttpClient +from .utils import Logger, RetryConfig, remove_suffix +from dataclasses import dataclass +from mistralai_azure import models +from mistralai_azure.types import OptionalNullable, UNSET +from pydantic import Field +from typing import Callable, Dict, Optional, Tuple, Union + + +SERVER_PROD = "prod" +r"""Production server""" +SERVERS = { + SERVER_PROD: "https://api.mistral.ai", +} +"""Contains the list of servers available to the SDK""" + + +@dataclass +class SDKConfiguration: + client: HttpClient + async_client: AsyncHttpClient + debug_logger: Logger + security: Optional[Union[models.Security,Callable[[], models.Security]]] = None + server_url: Optional[str] = "" + server: Optional[str] = "" + language: str = "python" + openapi_doc_version: str = "0.0.2" + sdk_version: str = "1.0.0-rc.2" + gen_version: str = "2.388.1" + user_agent: str = "speakeasy-sdk/python 1.0.0-rc.2 2.388.1 0.0.2 mistralai_azure" + retry_config: OptionalNullable[RetryConfig] = Field(default_factory=lambda: UNSET) + timeout_ms: Optional[int] = None + + def __post_init__(self): + self._hooks = SDKHooks() + + def get_server_details(self) -> Tuple[str, Dict[str, str]]: + if self.server_url is not None and self.server_url: + return remove_suffix(self.server_url, "/"), {} + if not self.server: + self.server = SERVER_PROD + + if self.server not in SERVERS: + raise ValueError(f"Invalid server \"{self.server}\"") + + return SERVERS[self.server], {} + + + def get_hooks(self) -> SDKHooks: + return self._hooks diff --git a/packages/mistralai_azure/src/mistralai_azure/types/__init__.py b/packages/mistralai_azure/src/mistralai_azure/types/__init__.py new file mode 100644 index 00000000..fc76fe0c --- /dev/null +++ b/packages/mistralai_azure/src/mistralai_azure/types/__init__.py @@ -0,0 +1,21 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from .basemodel import ( + BaseModel, + Nullable, + OptionalNullable, + UnrecognizedInt, + UnrecognizedStr, + UNSET, + UNSET_SENTINEL, +) + +__all__ = [ + "BaseModel", + "Nullable", + "OptionalNullable", + "UnrecognizedInt", + "UnrecognizedStr", + "UNSET", + "UNSET_SENTINEL", +] diff --git a/packages/mistralai_azure/src/mistralai_azure/types/basemodel.py b/packages/mistralai_azure/src/mistralai_azure/types/basemodel.py new file mode 100644 index 00000000..a6187efa --- /dev/null +++ b/packages/mistralai_azure/src/mistralai_azure/types/basemodel.py @@ -0,0 +1,39 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from pydantic import ConfigDict, model_serializer +from pydantic import BaseModel as PydanticBaseModel +from typing import TYPE_CHECKING, Literal, Optional, TypeVar, Union, NewType +from typing_extensions import TypeAliasType, TypeAlias + + +class BaseModel(PydanticBaseModel): + model_config = ConfigDict( + populate_by_name=True, arbitrary_types_allowed=True, protected_namespaces=() + ) + + +class Unset(BaseModel): + @model_serializer(mode="plain") + def serialize_model(self): + return UNSET_SENTINEL + + def __bool__(self) -> Literal[False]: + return False + + +UNSET = Unset() +UNSET_SENTINEL = "~?~unset~?~sentinel~?~" + + +T = TypeVar("T") +if TYPE_CHECKING: + Nullable: TypeAlias = Union[T, None] + OptionalNullable: TypeAlias = Union[Optional[Nullable[T]], Unset] +else: + Nullable = TypeAliasType("Nullable", Union[T, None], type_params=(T,)) + OptionalNullable = TypeAliasType( + "OptionalNullable", Union[Optional[Nullable[T]], Unset], type_params=(T,) + ) + +UnrecognizedInt = NewType("UnrecognizedInt", int) +UnrecognizedStr = NewType("UnrecognizedStr", str) diff --git a/packages/mistralai_azure/src/mistralai_azure/utils/__init__.py b/packages/mistralai_azure/src/mistralai_azure/utils/__init__.py new file mode 100644 index 00000000..95aa1b60 --- /dev/null +++ b/packages/mistralai_azure/src/mistralai_azure/utils/__init__.py @@ -0,0 +1,84 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from .annotations import get_discriminator +from .enums import OpenEnumMeta +from .headers import get_headers, get_response_headers +from .metadata import ( + FieldMetadata, + find_metadata, + FormMetadata, + HeaderMetadata, + MultipartFormMetadata, + PathParamMetadata, + QueryParamMetadata, + RequestMetadata, + SecurityMetadata, +) +from .queryparams import get_query_params +from .retries import BackoffStrategy, Retries, retry, retry_async, RetryConfig +from .requestbodies import serialize_request_body, SerializedRequestBody +from .security import get_security +from .serializers import ( + get_pydantic_model, + marshal_json, + unmarshal, + unmarshal_json, + serialize_decimal, + serialize_float, + serialize_int, + stream_to_text, + validate_decimal, + validate_float, + validate_int, + validate_open_enum, +) +from .url import generate_url, template_url, remove_suffix +from .values import get_global_from_env, match_content_type, match_status_codes, match_response +from .logger import Logger, get_body_content, NoOpLogger + +__all__ = [ + "BackoffStrategy", + "FieldMetadata", + "find_metadata", + "FormMetadata", + "generate_url", + "get_body_content", + "get_discriminator", + "get_global_from_env", + "get_headers", + "get_pydantic_model", + "get_query_params", + "get_response_headers", + "get_security", + "HeaderMetadata", + "Logger", + "marshal_json", + "match_content_type", + "match_status_codes", + "match_response", + "MultipartFormMetadata", + "NoOpLogger", + "OpenEnumMeta", + "PathParamMetadata", + "QueryParamMetadata", + "remove_suffix", + "Retries", + "retry", + "retry_async", + "RetryConfig", + "RequestMetadata", + "SecurityMetadata", + "serialize_decimal", + "serialize_float", + "serialize_int", + "serialize_request_body", + "SerializedRequestBody", + "stream_to_text", + "template_url", + "unmarshal", + "unmarshal_json", + "validate_decimal", + "validate_float", + "validate_int", + "validate_open_enum", +] diff --git a/packages/mistralai_azure/src/mistralai_azure/utils/annotations.py b/packages/mistralai_azure/src/mistralai_azure/utils/annotations.py new file mode 100644 index 00000000..0d17472b --- /dev/null +++ b/packages/mistralai_azure/src/mistralai_azure/utils/annotations.py @@ -0,0 +1,19 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from typing import Any + +def get_discriminator(model: Any, fieldname: str, key: str) -> str: + if isinstance(model, dict): + try: + return f'{model.get(key)}' + except AttributeError as e: + raise ValueError(f'Could not find discriminator key {key} in {model}') from e + + if hasattr(model, fieldname): + return f'{getattr(model, fieldname)}' + + fieldname = fieldname.upper() + if hasattr(model, fieldname): + return f'{getattr(model, fieldname)}' + + raise ValueError(f'Could not find discriminator field {fieldname} in {model}') diff --git a/packages/mistralai_azure/src/mistralai_azure/utils/enums.py b/packages/mistralai_azure/src/mistralai_azure/utils/enums.py new file mode 100644 index 00000000..c650b10c --- /dev/null +++ b/packages/mistralai_azure/src/mistralai_azure/utils/enums.py @@ -0,0 +1,34 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +import enum + + +class OpenEnumMeta(enum.EnumMeta): + def __call__( + cls, value, names=None, *, module=None, qualname=None, type=None, start=1 + ): + # The `type` kwarg also happens to be a built-in that pylint flags as + # redeclared. Safe to ignore this lint rule with this scope. + # pylint: disable=redefined-builtin + + if names is not None: + return super().__call__( + value, + names=names, + module=module, + qualname=qualname, + type=type, + start=start, + ) + + try: + return super().__call__( + value, + names=names, # pyright: ignore[reportArgumentType] + module=module, + qualname=qualname, + type=type, + start=start, + ) + except ValueError: + return value diff --git a/packages/mistralai_azure/src/mistralai_azure/utils/eventstreaming.py b/packages/mistralai_azure/src/mistralai_azure/utils/eventstreaming.py new file mode 100644 index 00000000..553b386b --- /dev/null +++ b/packages/mistralai_azure/src/mistralai_azure/utils/eventstreaming.py @@ -0,0 +1,178 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +import re +import json +from typing import Callable, TypeVar, Optional, Generator, AsyncGenerator, Tuple +import httpx + +T = TypeVar("T") + + +class ServerEvent: + id: Optional[str] = None + event: Optional[str] = None + data: Optional[str] = None + retry: Optional[int] = None + + +MESSAGE_BOUNDARIES = [ + b"\r\n\r\n", + b"\n\n", + b"\r\r", +] + + +async def stream_events_async( + response: httpx.Response, + decoder: Callable[[str], T], + sentinel: Optional[str] = None, +) -> AsyncGenerator[T, None]: + buffer = bytearray() + position = 0 + discard = False + async for chunk in response.aiter_bytes(): + # We've encountered the sentinel value and should no longer process + # incoming data. Instead we throw new data away until the server closes + # the connection. + if discard: + continue + + buffer += chunk + for i in range(position, len(buffer)): + char = buffer[i : i + 1] + seq: Optional[bytes] = None + if char in [b"\r", b"\n"]: + for boundary in MESSAGE_BOUNDARIES: + seq = _peek_sequence(i, buffer, boundary) + if seq is not None: + break + if seq is None: + continue + + block = buffer[position:i] + position = i + len(seq) + event, discard = _parse_event(block, decoder, sentinel) + if event is not None: + yield event + + if position > 0: + buffer = buffer[position:] + position = 0 + + event, discard = _parse_event(buffer, decoder, sentinel) + if event is not None: + yield event + + +def stream_events( + response: httpx.Response, + decoder: Callable[[str], T], + sentinel: Optional[str] = None, +) -> Generator[T, None, None]: + buffer = bytearray() + position = 0 + discard = False + for chunk in response.iter_bytes(): + # We've encountered the sentinel value and should no longer process + # incoming data. Instead we throw new data away until the server closes + # the connection. + if discard: + continue + + buffer += chunk + for i in range(position, len(buffer)): + char = buffer[i : i + 1] + seq: Optional[bytes] = None + if char in [b"\r", b"\n"]: + for boundary in MESSAGE_BOUNDARIES: + seq = _peek_sequence(i, buffer, boundary) + if seq is not None: + break + if seq is None: + continue + + block = buffer[position:i] + position = i + len(seq) + event, discard = _parse_event(block, decoder, sentinel) + if event is not None: + yield event + + if position > 0: + buffer = buffer[position:] + position = 0 + + event, discard = _parse_event(buffer, decoder, sentinel) + if event is not None: + yield event + + +def _parse_event( + raw: bytearray, decoder: Callable[[str], T], sentinel: Optional[str] = None +) -> Tuple[Optional[T], bool]: + block = raw.decode() + lines = re.split(r"\r?\n|\r", block) + publish = False + event = ServerEvent() + data = "" + for line in lines: + if not line: + continue + + delim = line.find(":") + if delim <= 0: + continue + + field = line[0:delim] + value = line[delim + 1 :] if delim < len(line) - 1 else "" + if len(value) and value[0] == " ": + value = value[1:] + + if field == "event": + event.event = value + publish = True + elif field == "data": + data += value + "\n" + publish = True + elif field == "id": + event.id = value + publish = True + elif field == "retry": + event.retry = int(value) if value.isdigit() else None + publish = True + + if sentinel and data == f"{sentinel}\n": + return None, True + + if data: + data = data[:-1] + event.data = data + + data_is_primitive = ( + data.isnumeric() or data == "true" or data == "false" or data == "null" + ) + data_is_json = ( + data.startswith("{") or data.startswith("[") or data.startswith('"') + ) + + if data_is_primitive or data_is_json: + try: + event.data = json.loads(data) + except Exception: + pass + + out = None + if publish: + out = decoder(json.dumps(event.__dict__)) + + return out, False + + +def _peek_sequence(position: int, buffer: bytearray, sequence: bytes): + if len(sequence) > (len(buffer) - position): + return None + + for i, seq in enumerate(sequence): + if buffer[position + i] != seq: + return None + + return sequence diff --git a/packages/mistralai_azure/src/mistralai_azure/utils/forms.py b/packages/mistralai_azure/src/mistralai_azure/utils/forms.py new file mode 100644 index 00000000..07f9b235 --- /dev/null +++ b/packages/mistralai_azure/src/mistralai_azure/utils/forms.py @@ -0,0 +1,207 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from typing import ( + Any, + Dict, + get_type_hints, + List, + Tuple, +) +from pydantic import BaseModel +from pydantic.fields import FieldInfo + +from .serializers import marshal_json + +from .metadata import ( + FormMetadata, + MultipartFormMetadata, + find_field_metadata, +) +from .values import _val_to_string + + +def _populate_form( + field_name: str, + explode: bool, + obj: Any, + delimiter: str, + form: Dict[str, List[str]], +): + if obj is None: + return form + + if isinstance(obj, BaseModel): + items = [] + + obj_fields: Dict[str, FieldInfo] = obj.__class__.model_fields + for name in obj_fields: + obj_field = obj_fields[name] + obj_field_name = obj_field.alias if obj_field.alias is not None else name + if obj_field_name == "": + continue + + val = getattr(obj, name) + if val is None: + continue + + if explode: + form[obj_field_name] = [_val_to_string(val)] + else: + items.append(f"{obj_field_name}{delimiter}{_val_to_string(val)}") + + if len(items) > 0: + form[field_name] = [delimiter.join(items)] + elif isinstance(obj, Dict): + items = [] + for key, value in obj.items(): + if value is None: + continue + + if explode: + form[key] = [_val_to_string(value)] + else: + items.append(f"{key}{delimiter}{_val_to_string(value)}") + + if len(items) > 0: + form[field_name] = [delimiter.join(items)] + elif isinstance(obj, List): + items = [] + + for value in obj: + if value is None: + continue + + if explode: + if not field_name in form: + form[field_name] = [] + form[field_name].append(_val_to_string(value)) + else: + items.append(_val_to_string(value)) + + if len(items) > 0: + form[field_name] = [delimiter.join([str(item) for item in items])] + else: + form[field_name] = [_val_to_string(obj)] + + return form + + +def serialize_multipart_form( + media_type: str, request: Any +) -> Tuple[str, Dict[str, Any], Dict[str, Any]]: + form: Dict[str, Any] = {} + files: Dict[str, Any] = {} + + if not isinstance(request, BaseModel): + raise TypeError("invalid request body type") + + request_fields: Dict[str, FieldInfo] = request.__class__.model_fields + request_field_types = get_type_hints(request.__class__) + + for name in request_fields: + field = request_fields[name] + + val = getattr(request, name) + if val is None: + continue + + field_metadata = find_field_metadata(field, MultipartFormMetadata) + if not field_metadata: + continue + + f_name = field.alias if field.alias is not None else name + + if field_metadata.file: + file_fields: Dict[str, FieldInfo] = val.__class__.model_fields + + file_name = "" + field_name = "" + content = None + content_type = None + + for file_field_name in file_fields: + file_field = file_fields[file_field_name] + + file_metadata = find_field_metadata(file_field, MultipartFormMetadata) + if file_metadata is None: + continue + + if file_metadata.content: + content = getattr(val, file_field_name, None) + elif file_field_name == "content_type": + content_type = getattr(val, file_field_name, None) + else: + field_name = ( + file_field.alias + if file_field.alias is not None + else file_field_name + ) + file_name = getattr(val, file_field_name) + + if field_name == "" or file_name == "" or content is None: + raise ValueError("invalid multipart/form-data file") + + if content_type is not None: + files[field_name] = (file_name, content, content_type) + else: + files[field_name] = (file_name, content) + elif field_metadata.json: + files[f_name] = ( + None, + marshal_json(val, request_field_types[name]), + "application/json", + ) + else: + if isinstance(val, List): + values = [] + + for value in val: + if value is None: + continue + values.append(_val_to_string(value)) + + form[f_name + "[]"] = values + else: + form[f_name] = _val_to_string(val) + return media_type, form, files + + +def serialize_form_data(data: Any) -> Dict[str, Any]: + form: Dict[str, List[str]] = {} + + if isinstance(data, BaseModel): + data_fields: Dict[str, FieldInfo] = data.__class__.model_fields + data_field_types = get_type_hints(data.__class__) + for name in data_fields: + field = data_fields[name] + + val = getattr(data, name) + if val is None: + continue + + metadata = find_field_metadata(field, FormMetadata) + if metadata is None: + continue + + f_name = field.alias if field.alias is not None else name + + if metadata.json: + form[f_name] = [marshal_json(val, data_field_types[name])] + else: + if metadata.style == "form": + _populate_form( + f_name, + metadata.explode, + val, + ",", + form, + ) + else: + raise ValueError(f"Invalid form style for field {name}") + elif isinstance(data, Dict): + for key, value in data.items(): + form[key] = [_val_to_string(value)] + else: + raise TypeError(f"Invalid request body type {type(data)} for form data") + + return form diff --git a/packages/mistralai_azure/src/mistralai_azure/utils/headers.py b/packages/mistralai_azure/src/mistralai_azure/utils/headers.py new file mode 100644 index 00000000..e14a0f4a --- /dev/null +++ b/packages/mistralai_azure/src/mistralai_azure/utils/headers.py @@ -0,0 +1,136 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from typing import ( + Any, + Dict, + List, + Optional, +) +from httpx import Headers +from pydantic import BaseModel +from pydantic.fields import FieldInfo + +from .metadata import ( + HeaderMetadata, + find_field_metadata, +) + +from .values import _populate_from_globals, _val_to_string + + +def get_headers(headers_params: Any, gbls: Optional[Any] = None) -> Dict[str, str]: + headers: Dict[str, str] = {} + + globals_already_populated = [] + if headers_params is not None: + globals_already_populated = _populate_headers(headers_params, gbls, headers, []) + if gbls is not None: + _populate_headers(gbls, None, headers, globals_already_populated) + + return headers + + +def _populate_headers( + headers_params: Any, + gbls: Any, + header_values: Dict[str, str], + skip_fields: List[str], +) -> List[str]: + globals_already_populated: List[str] = [] + + if not isinstance(headers_params, BaseModel): + return globals_already_populated + + param_fields: Dict[str, FieldInfo] = headers_params.__class__.model_fields + for name in param_fields: + if name in skip_fields: + continue + + field = param_fields[name] + f_name = field.alias if field.alias is not None else name + + metadata = find_field_metadata(field, HeaderMetadata) + if metadata is None: + continue + + value, global_found = _populate_from_globals( + name, getattr(headers_params, name), HeaderMetadata, gbls + ) + if global_found: + globals_already_populated.append(name) + value = _serialize_header(metadata.explode, value) + + if value != "": + header_values[f_name] = value + + return globals_already_populated + + +def _serialize_header(explode: bool, obj: Any) -> str: + if obj is None: + return "" + + if isinstance(obj, BaseModel): + items = [] + obj_fields: Dict[str, FieldInfo] = obj.__class__.model_fields + for name in obj_fields: + obj_field = obj_fields[name] + obj_param_metadata = find_field_metadata(obj_field, HeaderMetadata) + + if not obj_param_metadata: + continue + + f_name = obj_field.alias if obj_field.alias is not None else name + + val = getattr(obj, name) + if val is None: + continue + + if explode: + items.append(f"{f_name}={_val_to_string(val)}") + else: + items.append(f_name) + items.append(_val_to_string(val)) + + if len(items) > 0: + return ",".join(items) + elif isinstance(obj, Dict): + items = [] + + for key, value in obj.items(): + if value is None: + continue + + if explode: + items.append(f"{key}={_val_to_string(value)}") + else: + items.append(key) + items.append(_val_to_string(value)) + + if len(items) > 0: + return ",".join([str(item) for item in items]) + elif isinstance(obj, List): + items = [] + + for value in obj: + if value is None: + continue + + items.append(_val_to_string(value)) + + if len(items) > 0: + return ",".join(items) + else: + return f"{_val_to_string(obj)}" + + return "" + + +def get_response_headers(headers: Headers) -> Dict[str, List[str]]: + res: Dict[str, List[str]] = {} + for k, v in headers.items(): + if not k in res: + res[k] = [] + + res[k].append(v) + return res diff --git a/packages/mistralai_azure/src/mistralai_azure/utils/logger.py b/packages/mistralai_azure/src/mistralai_azure/utils/logger.py new file mode 100644 index 00000000..7e4bbeac --- /dev/null +++ b/packages/mistralai_azure/src/mistralai_azure/utils/logger.py @@ -0,0 +1,16 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +import httpx +from typing import Any, Protocol + +class Logger(Protocol): + def debug(self, msg: str, *args: Any, **kwargs: Any) -> None: + pass + +class NoOpLogger: + def debug(self, msg: str, *args: Any, **kwargs: Any) -> None: + pass + +def get_body_content(req: httpx.Request) -> str: + return "" if not hasattr(req, "_content") else str(req.content) + diff --git a/packages/mistralai_azure/src/mistralai_azure/utils/metadata.py b/packages/mistralai_azure/src/mistralai_azure/utils/metadata.py new file mode 100644 index 00000000..173b3e5c --- /dev/null +++ b/packages/mistralai_azure/src/mistralai_azure/utils/metadata.py @@ -0,0 +1,118 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from typing import Optional, Type, TypeVar, Union +from dataclasses import dataclass +from pydantic.fields import FieldInfo + + +T = TypeVar("T") + + +@dataclass +class SecurityMetadata: + option: bool = False + scheme: bool = False + scheme_type: Optional[str] = None + sub_type: Optional[str] = None + field_name: Optional[str] = None + + def get_field_name(self, default: str) -> str: + return self.field_name or default + + +@dataclass +class ParamMetadata: + serialization: Optional[str] = None + style: str = "simple" + explode: bool = False + + +@dataclass +class PathParamMetadata(ParamMetadata): + pass + + +@dataclass +class QueryParamMetadata(ParamMetadata): + style: str = "form" + explode: bool = True + + +@dataclass +class HeaderMetadata(ParamMetadata): + pass + + +@dataclass +class RequestMetadata: + media_type: str = "application/octet-stream" + + +@dataclass +class MultipartFormMetadata: + file: bool = False + content: bool = False + json: bool = False + + +@dataclass +class FormMetadata: + json: bool = False + style: str = "form" + explode: bool = True + + +class FieldMetadata: + security: Optional[SecurityMetadata] = None + path: Optional[PathParamMetadata] = None + query: Optional[QueryParamMetadata] = None + header: Optional[HeaderMetadata] = None + request: Optional[RequestMetadata] = None + form: Optional[FormMetadata] = None + multipart: Optional[MultipartFormMetadata] = None + + def __init__( + self, + security: Optional[SecurityMetadata] = None, + path: Optional[Union[PathParamMetadata, bool]] = None, + query: Optional[Union[QueryParamMetadata, bool]] = None, + header: Optional[Union[HeaderMetadata, bool]] = None, + request: Optional[Union[RequestMetadata, bool]] = None, + form: Optional[Union[FormMetadata, bool]] = None, + multipart: Optional[Union[MultipartFormMetadata, bool]] = None, + ): + self.security = security + self.path = PathParamMetadata() if isinstance(path, bool) else path + self.query = QueryParamMetadata() if isinstance(query, bool) else query + self.header = HeaderMetadata() if isinstance(header, bool) else header + self.request = RequestMetadata() if isinstance(request, bool) else request + self.form = FormMetadata() if isinstance(form, bool) else form + self.multipart = ( + MultipartFormMetadata() if isinstance(multipart, bool) else multipart + ) + + +def find_field_metadata(field_info: FieldInfo, metadata_type: Type[T]) -> Optional[T]: + metadata = find_metadata(field_info, FieldMetadata) + if not metadata: + return None + + fields = metadata.__dict__ + + for field in fields: + if isinstance(fields[field], metadata_type): + return fields[field] + + return None + + +def find_metadata(field_info: FieldInfo, metadata_type: Type[T]) -> Optional[T]: + metadata = field_info.metadata + if not metadata: + return None + + for md in metadata: + if isinstance(md, metadata_type): + return md + + return None diff --git a/packages/mistralai_azure/src/mistralai_azure/utils/queryparams.py b/packages/mistralai_azure/src/mistralai_azure/utils/queryparams.py new file mode 100644 index 00000000..1c8c5834 --- /dev/null +++ b/packages/mistralai_azure/src/mistralai_azure/utils/queryparams.py @@ -0,0 +1,203 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from typing import ( + Any, + Dict, + get_type_hints, + List, + Optional, +) + +from pydantic import BaseModel +from pydantic.fields import FieldInfo + +from .metadata import ( + QueryParamMetadata, + find_field_metadata, +) +from .values import _get_serialized_params, _populate_from_globals, _val_to_string +from .forms import _populate_form + + +def get_query_params( + query_params: Any, + gbls: Optional[Any] = None, +) -> Dict[str, List[str]]: + params: Dict[str, List[str]] = {} + + globals_already_populated = _populate_query_params(query_params, gbls, params, []) + if gbls is not None: + _populate_query_params(gbls, None, params, globals_already_populated) + + return params + + +def _populate_query_params( + query_params: Any, + gbls: Any, + query_param_values: Dict[str, List[str]], + skip_fields: List[str], +) -> List[str]: + globals_already_populated: List[str] = [] + + if not isinstance(query_params, BaseModel): + return globals_already_populated + + param_fields: Dict[str, FieldInfo] = query_params.__class__.model_fields + param_field_types = get_type_hints(query_params.__class__) + for name in param_fields: + if name in skip_fields: + continue + + field = param_fields[name] + + metadata = find_field_metadata(field, QueryParamMetadata) + if not metadata: + continue + + value = getattr(query_params, name) if query_params is not None else None + + value, global_found = _populate_from_globals( + name, value, QueryParamMetadata, gbls + ) + if global_found: + globals_already_populated.append(name) + + f_name = field.alias if field.alias is not None else name + serialization = metadata.serialization + if serialization is not None: + serialized_parms = _get_serialized_params( + metadata, f_name, value, param_field_types[name] + ) + for key, value in serialized_parms.items(): + if key in query_param_values: + query_param_values[key].extend(value) + else: + query_param_values[key] = [value] + else: + style = metadata.style + if style == "deepObject": + _populate_deep_object_query_params(f_name, value, query_param_values) + elif style == "form": + _populate_delimited_query_params( + metadata, f_name, value, ",", query_param_values + ) + elif style == "pipeDelimited": + _populate_delimited_query_params( + metadata, f_name, value, "|", query_param_values + ) + else: + raise NotImplementedError( + f"query param style {style} not yet supported" + ) + + return globals_already_populated + + +def _populate_deep_object_query_params( + field_name: str, + obj: Any, + params: Dict[str, List[str]], +): + if obj is None: + return + + if isinstance(obj, BaseModel): + _populate_deep_object_query_params_basemodel(field_name, obj, params) + elif isinstance(obj, Dict): + _populate_deep_object_query_params_dict(field_name, obj, params) + + +def _populate_deep_object_query_params_basemodel( + prior_params_key: str, + obj: Any, + params: Dict[str, List[str]], +): + if obj is None: + return + + if not isinstance(obj, BaseModel): + return + + obj_fields: Dict[str, FieldInfo] = obj.__class__.model_fields + for name in obj_fields: + obj_field = obj_fields[name] + + f_name = obj_field.alias if obj_field.alias is not None else name + + params_key = f"{prior_params_key}[{f_name}]" + + obj_param_metadata = find_field_metadata(obj_field, QueryParamMetadata) + if obj_param_metadata is None: + continue + + obj_val = getattr(obj, name) + if obj_val is None: + continue + + if isinstance(obj_val, BaseModel): + _populate_deep_object_query_params_basemodel(params_key, obj_val, params) + elif isinstance(obj_val, Dict): + _populate_deep_object_query_params_dict(params_key, obj_val, params) + elif isinstance(obj_val, List): + _populate_deep_object_query_params_list(params_key, obj_val, params) + else: + params[params_key] = [_val_to_string(obj_val)] + + +def _populate_deep_object_query_params_dict( + prior_params_key: str, + value: Dict, + params: Dict[str, List[str]], +): + if value is None: + return + + for key, val in value.items(): + if val is None: + continue + + params_key = f"{prior_params_key}[{key}]" + + if isinstance(val, BaseModel): + _populate_deep_object_query_params_basemodel(params_key, val, params) + elif isinstance(val, Dict): + _populate_deep_object_query_params_dict(params_key, val, params) + elif isinstance(val, List): + _populate_deep_object_query_params_list(params_key, val, params) + else: + params[params_key] = [_val_to_string(val)] + + +def _populate_deep_object_query_params_list( + params_key: str, + value: List, + params: Dict[str, List[str]], +): + if value is None: + return + + for val in value: + if val is None: + continue + + if params.get(params_key) is None: + params[params_key] = [] + + params[params_key].append(_val_to_string(val)) + + +def _populate_delimited_query_params( + metadata: QueryParamMetadata, + field_name: str, + obj: Any, + delimiter: str, + query_param_values: Dict[str, List[str]], +): + _populate_form( + field_name, + metadata.explode, + obj, + delimiter, + query_param_values, + ) diff --git a/packages/mistralai_azure/src/mistralai_azure/utils/requestbodies.py b/packages/mistralai_azure/src/mistralai_azure/utils/requestbodies.py new file mode 100644 index 00000000..4f586ae7 --- /dev/null +++ b/packages/mistralai_azure/src/mistralai_azure/utils/requestbodies.py @@ -0,0 +1,66 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +import io +from dataclasses import dataclass +import re +from typing import ( + Any, + Optional, +) + +from .forms import serialize_form_data, serialize_multipart_form + +from .serializers import marshal_json + +SERIALIZATION_METHOD_TO_CONTENT_TYPE = { + "json": "application/json", + "form": "application/x-www-form-urlencoded", + "multipart": "multipart/form-data", + "raw": "application/octet-stream", + "string": "text/plain", +} + + +@dataclass +class SerializedRequestBody: + media_type: str + content: Optional[Any] = None + data: Optional[Any] = None + files: Optional[Any] = None + + +def serialize_request_body( + request_body: Any, + nullable: bool, + optional: bool, + serialization_method: str, + request_body_type, +) -> Optional[SerializedRequestBody]: + if request_body is None: + if not nullable and optional: + return None + + media_type = SERIALIZATION_METHOD_TO_CONTENT_TYPE[serialization_method] + + serialized_request_body = SerializedRequestBody(media_type) + + if re.match(r"(application|text)\/.*?\+*json.*", media_type) is not None: + serialized_request_body.content = marshal_json(request_body, request_body_type) + elif re.match(r"multipart\/.*", media_type) is not None: + ( + serialized_request_body.media_type, + serialized_request_body.data, + serialized_request_body.files, + ) = serialize_multipart_form(media_type, request_body) + elif re.match(r"application\/x-www-form-urlencoded.*", media_type) is not None: + serialized_request_body.data = serialize_form_data(request_body) + elif isinstance(request_body, (bytes, bytearray, io.BytesIO, io.BufferedReader)): + serialized_request_body.content = request_body + elif isinstance(request_body, str): + serialized_request_body.content = request_body + else: + raise TypeError( + f"invalid request body type {type(request_body)} for mediaType {media_type}" + ) + + return serialized_request_body diff --git a/packages/mistralai_azure/src/mistralai_azure/utils/retries.py b/packages/mistralai_azure/src/mistralai_azure/utils/retries.py new file mode 100644 index 00000000..a06f9279 --- /dev/null +++ b/packages/mistralai_azure/src/mistralai_azure/utils/retries.py @@ -0,0 +1,216 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +import random +import time +from typing import List + +import httpx + + +class BackoffStrategy: + initial_interval: int + max_interval: int + exponent: float + max_elapsed_time: int + + def __init__( + self, + initial_interval: int, + max_interval: int, + exponent: float, + max_elapsed_time: int, + ): + self.initial_interval = initial_interval + self.max_interval = max_interval + self.exponent = exponent + self.max_elapsed_time = max_elapsed_time + + +class RetryConfig: + strategy: str + backoff: BackoffStrategy + retry_connection_errors: bool + + def __init__( + self, strategy: str, backoff: BackoffStrategy, retry_connection_errors: bool + ): + self.strategy = strategy + self.backoff = backoff + self.retry_connection_errors = retry_connection_errors + + +class Retries: + config: RetryConfig + status_codes: List[str] + + def __init__(self, config: RetryConfig, status_codes: List[str]): + self.config = config + self.status_codes = status_codes + + +class TemporaryError(Exception): + response: httpx.Response + + def __init__(self, response: httpx.Response): + self.response = response + + +class PermanentError(Exception): + inner: Exception + + def __init__(self, inner: Exception): + self.inner = inner + + +def retry(func, retries: Retries): + if retries.config.strategy == "backoff": + + def do_request() -> httpx.Response: + res: httpx.Response + try: + res = func() + + for code in retries.status_codes: + if "X" in code.upper(): + code_range = int(code[0]) + + status_major = res.status_code / 100 + + if code_range <= status_major < code_range + 1: + raise TemporaryError(res) + else: + parsed_code = int(code) + + if res.status_code == parsed_code: + raise TemporaryError(res) + except httpx.ConnectError as exception: + if retries.config.retry_connection_errors: + raise + + raise PermanentError(exception) from exception + except httpx.TimeoutException as exception: + if retries.config.retry_connection_errors: + raise + + raise PermanentError(exception) from exception + except TemporaryError: + raise + except Exception as exception: + raise PermanentError(exception) from exception + + return res + + return retry_with_backoff( + do_request, + retries.config.backoff.initial_interval, + retries.config.backoff.max_interval, + retries.config.backoff.exponent, + retries.config.backoff.max_elapsed_time, + ) + + return func() + + +async def retry_async(func, retries: Retries): + if retries.config.strategy == "backoff": + + async def do_request() -> httpx.Response: + res: httpx.Response + try: + res = await func() + + for code in retries.status_codes: + if "X" in code.upper(): + code_range = int(code[0]) + + status_major = res.status_code / 100 + + if code_range <= status_major < code_range + 1: + raise TemporaryError(res) + else: + parsed_code = int(code) + + if res.status_code == parsed_code: + raise TemporaryError(res) + except httpx.ConnectError as exception: + if retries.config.retry_connection_errors: + raise + + raise PermanentError(exception) from exception + except httpx.TimeoutException as exception: + if retries.config.retry_connection_errors: + raise + + raise PermanentError(exception) from exception + except TemporaryError: + raise + except Exception as exception: + raise PermanentError(exception) from exception + + return res + + return await retry_with_backoff_async( + do_request, + retries.config.backoff.initial_interval, + retries.config.backoff.max_interval, + retries.config.backoff.exponent, + retries.config.backoff.max_elapsed_time, + ) + + return await func() + + +def retry_with_backoff( + func, + initial_interval=500, + max_interval=60000, + exponent=1.5, + max_elapsed_time=3600000, +): + start = round(time.time() * 1000) + retries = 0 + + while True: + try: + return func() + except PermanentError as exception: + raise exception.inner + except Exception as exception: # pylint: disable=broad-exception-caught + now = round(time.time() * 1000) + if now - start > max_elapsed_time: + if isinstance(exception, TemporaryError): + return exception.response + + raise + sleep = (initial_interval / 1000) * exponent**retries + random.uniform(0, 1) + sleep = min(sleep, max_interval / 1000) + time.sleep(sleep) + retries += 1 + + +async def retry_with_backoff_async( + func, + initial_interval=500, + max_interval=60000, + exponent=1.5, + max_elapsed_time=3600000, +): + start = round(time.time() * 1000) + retries = 0 + + while True: + try: + return await func() + except PermanentError as exception: + raise exception.inner + except Exception as exception: # pylint: disable=broad-exception-caught + now = round(time.time() * 1000) + if now - start > max_elapsed_time: + if isinstance(exception, TemporaryError): + return exception.response + + raise + sleep = (initial_interval / 1000) * exponent**retries + random.uniform(0, 1) + sleep = min(sleep, max_interval / 1000) + time.sleep(sleep) + retries += 1 diff --git a/packages/mistralai_azure/src/mistralai_azure/utils/security.py b/packages/mistralai_azure/src/mistralai_azure/utils/security.py new file mode 100644 index 00000000..aab4cb65 --- /dev/null +++ b/packages/mistralai_azure/src/mistralai_azure/utils/security.py @@ -0,0 +1,168 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +import base64 +from typing import ( + Any, + Dict, + List, + Tuple, +) +from pydantic import BaseModel +from pydantic.fields import FieldInfo + +from .metadata import ( + SecurityMetadata, + find_field_metadata, +) + + + +def get_security(security: Any) -> Tuple[Dict[str, str], Dict[str, List[str]]]: + headers: Dict[str, str] = {} + query_params: Dict[str, List[str]] = {} + + if security is None: + return headers, query_params + + if not isinstance(security, BaseModel): + raise TypeError("security must be a pydantic model") + + sec_fields: Dict[str, FieldInfo] = security.__class__.model_fields + for name in sec_fields: + sec_field = sec_fields[name] + + value = getattr(security, name) + if value is None: + continue + + metadata = find_field_metadata(sec_field, SecurityMetadata) + if metadata is None: + continue + if metadata.option: + _parse_security_option(headers, query_params, value) + return headers, query_params + if metadata.scheme: + # Special case for basic auth which could be a flattened model + if metadata.sub_type == "basic" and not isinstance(value, BaseModel): + _parse_security_scheme(headers, query_params, metadata, name, security) + else: + _parse_security_scheme(headers, query_params, metadata, name, value) + + return headers, query_params + + +def _parse_security_option( + headers: Dict[str, str], query_params: Dict[str, List[str]], option: Any +): + if not isinstance(option, BaseModel): + raise TypeError("security option must be a pydantic model") + + opt_fields: Dict[str, FieldInfo] = option.__class__.model_fields + for name in opt_fields: + opt_field = opt_fields[name] + + metadata = find_field_metadata(opt_field, SecurityMetadata) + if metadata is None or not metadata.scheme: + continue + _parse_security_scheme( + headers, query_params, metadata, name, getattr(option, name) + ) + + +def _parse_security_scheme( + headers: Dict[str, str], + query_params: Dict[str, List[str]], + scheme_metadata: SecurityMetadata, + field_name: str, + scheme: Any, +): + scheme_type = scheme_metadata.scheme_type + sub_type = scheme_metadata.sub_type + + if isinstance(scheme, BaseModel): + if scheme_type == "http" and sub_type == "basic": + _parse_basic_auth_scheme(headers, scheme) + return + + scheme_fields: Dict[str, FieldInfo] = scheme.__class__.model_fields + for name in scheme_fields: + scheme_field = scheme_fields[name] + + metadata = find_field_metadata(scheme_field, SecurityMetadata) + if metadata is None or metadata.field_name is None: + continue + + value = getattr(scheme, name) + + _parse_security_scheme_value( + headers, query_params, scheme_metadata, metadata, name, value + ) + else: + _parse_security_scheme_value( + headers, query_params, scheme_metadata, scheme_metadata, field_name, scheme + ) + + +def _parse_security_scheme_value( + headers: Dict[str, str], + query_params: Dict[str, List[str]], + scheme_metadata: SecurityMetadata, + security_metadata: SecurityMetadata, + field_name: str, + value: Any, +): + scheme_type = scheme_metadata.scheme_type + sub_type = scheme_metadata.sub_type + + header_name = security_metadata.get_field_name(field_name) + + if scheme_type == "apiKey": + if sub_type == "header": + headers[header_name] = value + elif sub_type == "query": + query_params[header_name] = [value] + else: + raise ValueError("sub type {sub_type} not supported") + elif scheme_type == "openIdConnect": + headers[header_name] = _apply_bearer(value) + elif scheme_type == "oauth2": + if sub_type != "client_credentials": + headers[header_name] = _apply_bearer(value) + elif scheme_type == "http": + if sub_type == "bearer": + headers[header_name] = _apply_bearer(value) + else: + raise ValueError("sub type {sub_type} not supported") + else: + raise ValueError("scheme type {scheme_type} not supported") + + +def _apply_bearer(token: str) -> str: + return token.lower().startswith("bearer ") and token or f"Bearer {token}" + + +def _parse_basic_auth_scheme(headers: Dict[str, str], scheme: Any): + username = "" + password = "" + + if not isinstance(scheme, BaseModel): + raise TypeError("basic auth scheme must be a pydantic model") + + scheme_fields: Dict[str, FieldInfo] = scheme.__class__.model_fields + for name in scheme_fields: + scheme_field = scheme_fields[name] + + metadata = find_field_metadata(scheme_field, SecurityMetadata) + if metadata is None or metadata.field_name is None: + continue + + field_name = metadata.field_name + value = getattr(scheme, name) + + if field_name == "username": + username = value + if field_name == "password": + password = value + + data = f"{username}:{password}".encode() + headers["Authorization"] = f"Basic {base64.b64encode(data).decode()}" diff --git a/packages/mistralai_azure/src/mistralai_azure/utils/serializers.py b/packages/mistralai_azure/src/mistralai_azure/utils/serializers.py new file mode 100644 index 00000000..a98998a3 --- /dev/null +++ b/packages/mistralai_azure/src/mistralai_azure/utils/serializers.py @@ -0,0 +1,181 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from decimal import Decimal +import json +from typing import Any, Dict, List, Union, get_args +import httpx +from typing_extensions import get_origin +from pydantic import ConfigDict, create_model +from pydantic_core import from_json +from typing_inspect import is_optional_type + +from ..types.basemodel import BaseModel, Nullable, OptionalNullable + + +def serialize_decimal(as_str: bool): + def serialize(d): + if is_optional_type(type(d)) and d is None: + return None + + if not isinstance(d, Decimal): + raise ValueError("Expected Decimal object") + + return str(d) if as_str else float(d) + + return serialize + + +def validate_decimal(d): + if d is None: + return None + + if isinstance(d, Decimal): + return d + + if not isinstance(d, (str, int, float)): + raise ValueError("Expected string, int or float") + + return Decimal(str(d)) + + +def serialize_float(as_str: bool): + def serialize(f): + if is_optional_type(type(f)) and f is None: + return None + + if not isinstance(f, float): + raise ValueError("Expected float") + + return str(f) if as_str else f + + return serialize + + +def validate_float(f): + if f is None: + return None + + if isinstance(f, float): + return f + + if not isinstance(f, str): + raise ValueError("Expected string") + + return float(f) + + +def serialize_int(as_str: bool): + def serialize(b): + if is_optional_type(type(b)) and b is None: + return None + + if not isinstance(b, int): + raise ValueError("Expected int") + + return str(b) if as_str else b + + return serialize + + +def validate_int(b): + if b is None: + return None + + if isinstance(b, int): + return b + + if not isinstance(b, str): + raise ValueError("Expected string") + + return int(b) + + +def validate_open_enum(is_int: bool): + def validate(e): + if e is None: + return None + + if is_int: + if not isinstance(e, int): + raise ValueError("Expected int") + else: + if not isinstance(e, str): + raise ValueError("Expected string") + + return e + + return validate + + +def unmarshal_json(raw, typ: Any) -> Any: + return unmarshal(from_json(raw), typ) + + +def unmarshal(val, typ: Any) -> Any: + unmarshaller = create_model( + "Unmarshaller", + body=(typ, ...), + __config__=ConfigDict(populate_by_name=True, arbitrary_types_allowed=True), + ) + + m = unmarshaller(body=val) + + # pyright: ignore[reportAttributeAccessIssue] + return m.body # type: ignore + + +def marshal_json(val, typ): + if is_nullable(typ) and val is None: + return "null" + + marshaller = create_model( + "Marshaller", + body=(typ, ...), + __config__=ConfigDict(populate_by_name=True, arbitrary_types_allowed=True), + ) + + m = marshaller(body=val) + + d = m.model_dump(by_alias=True, mode="json", exclude_none=True) + + if len(d) == 0: + return "" + + return json.dumps(d[next(iter(d))], separators=(",", ":"), sort_keys=True) + + +def is_nullable(field): + origin = get_origin(field) + if origin is Nullable or origin is OptionalNullable: + return True + + if not origin is Union or type(None) not in get_args(field): + return False + + for arg in get_args(field): + if get_origin(arg) is Nullable or get_origin(arg) is OptionalNullable: + return True + + return False + + +def stream_to_text(stream: httpx.Response) -> str: + return "".join(stream.iter_text()) + + +def get_pydantic_model(data: Any, typ: Any) -> Any: + if not _contains_pydantic_model(data): + return unmarshal(data, typ) + + return data + + +def _contains_pydantic_model(data: Any) -> bool: + if isinstance(data, BaseModel): + return True + if isinstance(data, List): + return any(_contains_pydantic_model(item) for item in data) + if isinstance(data, Dict): + return any(_contains_pydantic_model(value) for value in data.values()) + + return False diff --git a/packages/mistralai_azure/src/mistralai_azure/utils/url.py b/packages/mistralai_azure/src/mistralai_azure/utils/url.py new file mode 100644 index 00000000..b201bfa4 --- /dev/null +++ b/packages/mistralai_azure/src/mistralai_azure/utils/url.py @@ -0,0 +1,150 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from decimal import Decimal +from typing import ( + Any, + Dict, + get_type_hints, + List, + Optional, + Union, + get_args, + get_origin, +) +from pydantic import BaseModel +from pydantic.fields import FieldInfo + +from .metadata import ( + PathParamMetadata, + find_field_metadata, +) +from .values import _get_serialized_params, _populate_from_globals, _val_to_string + + +def generate_url( + server_url: str, + path: str, + path_params: Any, + gbls: Optional[Any] = None, +) -> str: + path_param_values: Dict[str, str] = {} + + globals_already_populated = _populate_path_params( + path_params, gbls, path_param_values, [] + ) + if gbls is not None: + _populate_path_params(gbls, None, path_param_values, globals_already_populated) + + for key, value in path_param_values.items(): + path = path.replace("{" + key + "}", value, 1) + + return remove_suffix(server_url, "/") + path + + +def _populate_path_params( + path_params: Any, + gbls: Any, + path_param_values: Dict[str, str], + skip_fields: List[str], +) -> List[str]: + globals_already_populated: List[str] = [] + + if not isinstance(path_params, BaseModel): + return globals_already_populated + + path_param_fields: Dict[str, FieldInfo] = path_params.__class__.model_fields + path_param_field_types = get_type_hints(path_params.__class__) + for name in path_param_fields: + if name in skip_fields: + continue + + field = path_param_fields[name] + + param_metadata = find_field_metadata(field, PathParamMetadata) + if param_metadata is None: + continue + + param = getattr(path_params, name) if path_params is not None else None + param, global_found = _populate_from_globals( + name, param, PathParamMetadata, gbls + ) + if global_found: + globals_already_populated.append(name) + + if param is None: + continue + + f_name = field.alias if field.alias is not None else name + serialization = param_metadata.serialization + if serialization is not None: + serialized_params = _get_serialized_params( + param_metadata, f_name, param, path_param_field_types[name] + ) + for key, value in serialized_params.items(): + path_param_values[key] = value + else: + pp_vals: List[str] = [] + if param_metadata.style == "simple": + if isinstance(param, List): + for pp_val in param: + if pp_val is None: + continue + pp_vals.append(_val_to_string(pp_val)) + path_param_values[f_name] = ",".join(pp_vals) + elif isinstance(param, Dict): + for pp_key in param: + if param[pp_key] is None: + continue + if param_metadata.explode: + pp_vals.append(f"{pp_key}={_val_to_string(param[pp_key])}") + else: + pp_vals.append(f"{pp_key},{_val_to_string(param[pp_key])}") + path_param_values[f_name] = ",".join(pp_vals) + elif not isinstance(param, (str, int, float, complex, bool, Decimal)): + param_fields: Dict[str, FieldInfo] = param.__class__.model_fields + for name in param_fields: + param_field = param_fields[name] + + param_value_metadata = find_field_metadata( + param_field, PathParamMetadata + ) + if param_value_metadata is None: + continue + + param_name = ( + param_field.alias if param_field.alias is not None else name + ) + + param_field_val = getattr(param, name) + if param_field_val is None: + continue + if param_metadata.explode: + pp_vals.append( + f"{param_name}={_val_to_string(param_field_val)}" + ) + else: + pp_vals.append( + f"{param_name},{_val_to_string(param_field_val)}" + ) + path_param_values[f_name] = ",".join(pp_vals) + else: + path_param_values[f_name] = _val_to_string(param) + + return globals_already_populated + + +def is_optional(field): + return get_origin(field) is Union and type(None) in get_args(field) + + +def template_url(url_with_params: str, params: Dict[str, str]) -> str: + for key, value in params.items(): + url_with_params = url_with_params.replace("{" + key + "}", value) + + return url_with_params + + +def remove_suffix(input_string, suffix): + if suffix and input_string.endswith(suffix): + return input_string[: -len(suffix)] + return input_string diff --git a/packages/mistralai_azure/src/mistralai_azure/utils/values.py b/packages/mistralai_azure/src/mistralai_azure/utils/values.py new file mode 100644 index 00000000..24ccae3d --- /dev/null +++ b/packages/mistralai_azure/src/mistralai_azure/utils/values.py @@ -0,0 +1,128 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from datetime import datetime +from enum import Enum +from email.message import Message +import os +from typing import Any, Callable, Dict, List, Optional, Tuple, TypeVar, Union + +from httpx import Response +from pydantic import BaseModel +from pydantic.fields import FieldInfo + +from .serializers import marshal_json + +from .metadata import ParamMetadata, find_field_metadata + + +def match_content_type(content_type: str, pattern: str) -> bool: + if pattern in (content_type, "*", "*/*"): + return True + + msg = Message() + msg["content-type"] = content_type + media_type = msg.get_content_type() + + if media_type == pattern: + return True + + parts = media_type.split("/") + if len(parts) == 2: + if pattern in (f"{parts[0]}/*", f"*/{parts[1]}"): + return True + + return False + + +def match_status_codes(status_codes: List[str], status_code: int) -> bool: + if "default" in status_codes: + return True + + for code in status_codes: + if code == str(status_code): + return True + + if code.endswith("XX") and code.startswith(str(status_code)[:1]): + return True + return False + + +T = TypeVar("T") + + +def get_global_from_env( + value: Optional[T], env_key: str, type_cast: Callable[[str], T] +) -> Optional[T]: + if value is not None: + return value + env_value = os.getenv(env_key) + if env_value is not None: + try: + return type_cast(env_value) + except ValueError: + pass + return None + + +def match_response( + response: Response, code: Union[str, List[str]], content_type: str +) -> bool: + codes = code if isinstance(code, list) else [code] + return match_status_codes(codes, response.status_code) and match_content_type( + response.headers.get("content-type", "application/octet-stream"), content_type + ) + + +def _populate_from_globals( + param_name: str, value: Any, param_metadata_type: type, gbls: Any +) -> Tuple[Any, bool]: + if gbls is None: + return value, False + + if not isinstance(gbls, BaseModel): + raise TypeError("globals must be a pydantic model") + + global_fields: Dict[str, FieldInfo] = gbls.__class__.model_fields + found = False + for name in global_fields: + field = global_fields[name] + if name is not param_name: + continue + + found = True + + if value is not None: + return value, True + + global_value = getattr(gbls, name) + + param_metadata = find_field_metadata(field, param_metadata_type) + if param_metadata is None: + return value, True + + return global_value, True + + return value, found + + +def _val_to_string(val) -> str: + if isinstance(val, bool): + return str(val).lower() + if isinstance(val, datetime): + return str(val.isoformat().replace("+00:00", "Z")) + if isinstance(val, Enum): + return str(val.value) + + return str(val) + + +def _get_serialized_params( + metadata: ParamMetadata, field_name: str, obj: Any, typ: type +) -> Dict[str, str]: + params: Dict[str, str] = {} + + serialization = metadata.serialization + if serialization == "json": + params[field_name] = marshal_json(obj, typ) + + return params diff --git a/packages/mistralai_gcp/.genignore b/packages/mistralai_gcp/.genignore new file mode 100644 index 00000000..ea10bc8e --- /dev/null +++ b/packages/mistralai_gcp/.genignore @@ -0,0 +1,4 @@ +src/mistralai_gcp/sdk.py +README.md +USAGE.md +docs/sdks/**/README.md \ No newline at end of file diff --git a/packages/mistralai_gcp/.gitattributes b/packages/mistralai_gcp/.gitattributes new file mode 100644 index 00000000..4d75d590 --- /dev/null +++ b/packages/mistralai_gcp/.gitattributes @@ -0,0 +1,2 @@ +# This allows generated code to be indexed correctly +*.py linguist-generated=false \ No newline at end of file diff --git a/packages/mistralai_gcp/.gitignore b/packages/mistralai_gcp/.gitignore new file mode 100644 index 00000000..477b7729 --- /dev/null +++ b/packages/mistralai_gcp/.gitignore @@ -0,0 +1,8 @@ +.venv/ +venv/ +src/*.egg-info/ +__pycache__/ +.pytest_cache/ +.python-version +.DS_Store +pyrightconfig.json diff --git a/packages/mistralai_gcp/.speakeasy/gen.lock b/packages/mistralai_gcp/.speakeasy/gen.lock new file mode 100644 index 00000000..ab483934 --- /dev/null +++ b/packages/mistralai_gcp/.speakeasy/gen.lock @@ -0,0 +1,142 @@ +lockVersion: 2.0.0 +id: ec60f2d8-7869-45c1-918e-773d41a8cf74 +management: + docChecksum: 5daa3767285068a2f496f5fd41eb7a01 + docVersion: 0.0.2 + speakeasyVersion: 1.356.0 + generationVersion: 2.388.1 + releaseVersion: 1.0.0-rc.2 + configChecksum: 68063242d77238d1f19a7d7b0a39c381 + published: true +features: + python: + additionalDependencies: 1.0.0 + constsAndDefaults: 1.0.2 + core: 5.3.4 + defaultEnabledRetries: 0.2.0 + envVarSecurityUsage: 0.3.1 + examples: 3.0.0 + flatRequests: 1.0.1 + globalSecurity: 3.0.1 + globalSecurityCallbacks: 1.0.0 + globalSecurityFlattening: 1.0.0 + globalServerURLs: 3.0.0 + nameOverrides: 3.0.0 + nullables: 1.0.0 + responseFormat: 1.0.0 + retries: 3.0.0 + sdkHooks: 1.0.0 + serverEvents: 1.0.2 + serverEventsSentinels: 0.1.0 + serverIDs: 3.0.0 + unions: 3.0.1 +generatedFiles: + - src/mistralai_gcp/sdkconfiguration.py + - src/mistralai_gcp/chat.py + - src/mistralai_gcp/fim.py + - .vscode/settings.json + - poetry.toml + - py.typed + - pylintrc + - pyproject.toml + - scripts/compile.sh + - scripts/publish.sh + - src/mistralai_gcp/__init__.py + - src/mistralai_gcp/basesdk.py + - src/mistralai_gcp/httpclient.py + - src/mistralai_gcp/py.typed + - src/mistralai_gcp/types/__init__.py + - src/mistralai_gcp/types/basemodel.py + - src/mistralai_gcp/utils/__init__.py + - src/mistralai_gcp/utils/annotations.py + - src/mistralai_gcp/utils/enums.py + - src/mistralai_gcp/utils/eventstreaming.py + - src/mistralai_gcp/utils/forms.py + - src/mistralai_gcp/utils/headers.py + - src/mistralai_gcp/utils/logger.py + - src/mistralai_gcp/utils/metadata.py + - src/mistralai_gcp/utils/queryparams.py + - src/mistralai_gcp/utils/requestbodies.py + - src/mistralai_gcp/utils/retries.py + - src/mistralai_gcp/utils/security.py + - src/mistralai_gcp/utils/serializers.py + - src/mistralai_gcp/utils/url.py + - src/mistralai_gcp/utils/values.py + - src/mistralai_gcp/models/sdkerror.py + - src/mistralai_gcp/models/completionevent.py + - src/mistralai_gcp/models/completionchunk.py + - src/mistralai_gcp/models/completionresponsestreamchoice.py + - src/mistralai_gcp/models/deltamessage.py + - src/mistralai_gcp/models/toolcall.py + - src/mistralai_gcp/models/functioncall.py + - src/mistralai_gcp/models/usageinfo.py + - src/mistralai_gcp/models/httpvalidationerror.py + - src/mistralai_gcp/models/validationerror.py + - src/mistralai_gcp/models/chatcompletionstreamrequest.py + - src/mistralai_gcp/models/tool.py + - src/mistralai_gcp/models/function.py + - src/mistralai_gcp/models/responseformat.py + - src/mistralai_gcp/models/systemmessage.py + - src/mistralai_gcp/models/contentchunk.py + - src/mistralai_gcp/models/usermessage.py + - src/mistralai_gcp/models/textchunk.py + - src/mistralai_gcp/models/assistantmessage.py + - src/mistralai_gcp/models/toolmessage.py + - src/mistralai_gcp/models/chatcompletionresponse.py + - src/mistralai_gcp/models/chatcompletionchoice.py + - src/mistralai_gcp/models/chatcompletionrequest.py + - src/mistralai_gcp/models/fimcompletionstreamrequest.py + - src/mistralai_gcp/models/fimcompletionresponse.py + - src/mistralai_gcp/models/fimcompletionrequest.py + - src/mistralai_gcp/models/security.py + - src/mistralai_gcp/models/__init__.py + - docs/models/completionevent.md + - docs/models/completionchunk.md + - docs/models/finishreason.md + - docs/models/completionresponsestreamchoice.md + - docs/models/deltamessage.md + - docs/models/toolcall.md + - docs/models/arguments.md + - docs/models/functioncall.md + - docs/models/usageinfo.md + - docs/models/httpvalidationerror.md + - docs/models/loc.md + - docs/models/validationerror.md + - docs/models/stop.md + - docs/models/messages.md + - docs/models/toolchoice.md + - docs/models/chatcompletionstreamrequest.md + - docs/models/tool.md + - docs/models/function.md + - docs/models/responseformats.md + - docs/models/responseformat.md + - docs/models/content.md + - docs/models/role.md + - docs/models/systemmessage.md + - docs/models/contentchunk.md + - docs/models/usermessagecontent.md + - docs/models/usermessagerole.md + - docs/models/usermessage.md + - docs/models/textchunk.md + - docs/models/assistantmessagerole.md + - docs/models/assistantmessage.md + - docs/models/toolmessagerole.md + - docs/models/toolmessage.md + - docs/models/chatcompletionresponse.md + - docs/models/chatcompletionchoicefinishreason.md + - docs/models/chatcompletionchoice.md + - docs/models/chatcompletionrequeststop.md + - docs/models/chatcompletionrequestmessages.md + - docs/models/chatcompletionrequesttoolchoice.md + - docs/models/chatcompletionrequest.md + - docs/models/fimcompletionstreamrequeststop.md + - docs/models/fimcompletionstreamrequest.md + - docs/models/fimcompletionresponse.md + - docs/models/fimcompletionrequeststop.md + - docs/models/fimcompletionrequest.md + - docs/models/security.md + - docs/models/utils/retryconfig.md + - .gitattributes + - src/mistralai_gcp/_hooks/sdkhooks.py + - src/mistralai_gcp/_hooks/types.py + - src/mistralai_gcp/_hooks/__init__.py diff --git a/packages/mistralai_gcp/.speakeasy/gen.yaml b/packages/mistralai_gcp/.speakeasy/gen.yaml new file mode 100644 index 00000000..2a5993bd --- /dev/null +++ b/packages/mistralai_gcp/.speakeasy/gen.yaml @@ -0,0 +1,44 @@ +configVersion: 2.0.0 +generation: + sdkClassName: MistralGCP + maintainOpenAPIOrder: true + usageSnippets: + optionalPropertyRendering: withExample + useClassNamesForArrayFields: true + fixes: + nameResolutionDec2023: true + parameterOrderingFeb2024: true + requestResponseComponentNamesFeb2024: true + auth: + oAuth2ClientCredentialsEnabled: true +python: + version: 1.0.0-rc.2 + additionalDependencies: + dev: + pytest: ^8.2.2 + pytest-asyncio: ^0.23.7 + main: + google-auth: ^2.31.0 + requests: ^2.32.3 + authors: + - Mistral + clientServerStatusCodesAsErrors: true + description: Python Client SDK for the Mistral AI API in GCP. + enumFormat: union + flattenGlobalSecurity: true + flattenRequests: true + imports: + option: openapi + paths: + callbacks: "" + errors: "" + operations: "" + shared: "" + webhooks: "" + inputModelSuffix: input + maxMethodParams: 4 + methodArguments: infer-optional-args + outputModelSuffix: output + packageName: mistralai-gcp + responseFormat: flat + templateVersion: v2 diff --git a/packages/mistralai_gcp/.vscode/settings.json b/packages/mistralai_gcp/.vscode/settings.json new file mode 100644 index 00000000..8d79f0ab --- /dev/null +++ b/packages/mistralai_gcp/.vscode/settings.json @@ -0,0 +1,6 @@ +{ + "python.testing.pytestArgs": ["tests", "-vv"], + "python.testing.unittestEnabled": false, + "python.testing.pytestEnabled": true, + "pylint.args": ["--rcfile=pylintrc"] +} diff --git a/packages/mistralai_gcp/CONTRIBUTING.md b/packages/mistralai_gcp/CONTRIBUTING.md new file mode 100644 index 00000000..d585717f --- /dev/null +++ b/packages/mistralai_gcp/CONTRIBUTING.md @@ -0,0 +1,26 @@ +# Contributing to This Repository + +Thank you for your interest in contributing to this repository. Please note that this repository contains generated code. As such, we do not accept direct changes or pull requests. Instead, we encourage you to follow the guidelines below to report issues and suggest improvements. + +## How to Report Issues + +If you encounter any bugs or have suggestions for improvements, please open an issue on GitHub. When reporting an issue, please provide as much detail as possible to help us reproduce the problem. This includes: + +- A clear and descriptive title +- Steps to reproduce the issue +- Expected and actual behavior +- Any relevant logs, screenshots, or error messages +- Information about your environment (e.g., operating system, software versions) + - For example can be collected using the `npx envinfo` command from your terminal if you have Node.js installed + +## Issue Triage and Upstream Fixes + +We will review and triage issues as quickly as possible. Our goal is to address bugs and incorporate improvements in the upstream source code. Fixes will be included in the next generation of the generated code. + +## Contact + +If you have any questions or need further assistance, please feel free to reach out by opening an issue. + +Thank you for your understanding and cooperation! + +The Maintainers diff --git a/packages/mistralai_gcp/README.md b/packages/mistralai_gcp/README.md new file mode 100644 index 00000000..a4233244 --- /dev/null +++ b/packages/mistralai_gcp/README.md @@ -0,0 +1,425 @@ +# Mistral on GCP Python Client + + +**Prerequisites** + +Before you begin, you will need to create a Google Cloud project and enable the Mistral API. To do this, follow the instructions [here](https://docs.mistral.ai/deployment/cloud/vertex/). + +To run this locally you will also need to ensure you are authenticated with Google Cloud. You can do this by running + +```bash +gcloud auth application-default login +``` + +## SDK Installation + +Install the extras dependencies specific to Google Cloud: + +```bash +pip install mistralai[gcp] +``` + + +## SDK Example Usage + +### Create Chat Completions + +This example shows how to create chat completions. + +```python +# Synchronous Example +from mistralai_gcp import MistralGCP +import os +) + + +res = s.chat.complete(messages=[ + { + "content": "Who is the best French painter? Answer in one short sentence.", + "role": "user", + }, +], model="mistral-small-latest") + +if res is not None: + # handle response + pass +``` + +
+ +The same SDK client can also be used to make asychronous requests by importing asyncio. +```python +# Asynchronous Example +import asyncio +from mistralai_gcp import MistralGCP +import os + +async def main(): + s = MistralGCP( + api_key=os.getenv("API_KEY", ""), + ) + res = await s.chat.complete_async(messages=[ + { + "content": "Who is the best French painter? Answer in one short sentence.", + "role": "user", + }, + ], model="mistral-small-latest") + if res is not None: + # handle response + pass + +asyncio.run(main()) +``` + + + +## Available Resources and Operations + +### [chat](docs/sdks/chat/README.md) + +* [stream](docs/sdks/chat/README.md#stream) - Stream chat completion +* [create](docs/sdks/chat/README.md#create) - Chat Completion + +### [fim](docs/sdks/fim/README.md) + +* [stream](docs/sdks/fim/README.md#stream) - Stream fim completion +* [create](docs/sdks/fim/README.md#create) - Fim Completion + + + +## Server-sent event streaming + +[Server-sent events][mdn-sse] are used to stream content from certain +operations. These operations will expose the stream as [Generator][generator] that +can be consumed using a simple `for` loop. The loop will +terminate when the server no longer has any events to send and closes the +underlying connection. + +```python +from mistralai_gcp import MistralGCP +import os + +s = MistralGCP() + + +res = s.chat.stream(messages=[ + { + "content": "Who is the best French painter? Answer in one short sentence.", + "role": "user", + }, +], model="mistral-small-latest") + +if res is not None: + for event in res: + # handle event + print(event) + +``` + +[mdn-sse]: https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events +[generator]: https://wiki.python.org/moin/Generators + + + +## Retries + +Some of the endpoints in this SDK support retries. If you use the SDK without any configuration, it will fall back to the default retry strategy provided by the API. However, the default retry strategy can be overridden on a per-operation basis, or across the entire SDK. + +To change the default retry strategy for a single API call, simply provide a `RetryConfig` object to the call: +```python +from mistralai_gcp import MistralGCP +from mistralgcp.utils import BackoffStrategy, RetryConfig +import os + +s = MistralGCP() + + +res = s.chat.stream( + messages=[ + { + "content": "Who is the best French painter? Answer in one short sentence.", + "role": "user", + }, + ], + model="mistral-small-latest", + retries=RetryConfig( + "backoff", + BackoffStrategy(1, 50, 1.1, 100), + False + ) +) + +if res is not None: + for event in res: + # handle event + print(event) + +``` + +If you'd like to override the default retry strategy for all operations that support retries, you can use the `retry_config` optional parameter when initializing the SDK: +```python +from mistralai_gcp import MistralGCP +from mistralgcp.utils import BackoffStrategy, RetryConfig +import os + +s = MistralGCP( + retry_config=RetryConfig("backoff", BackoffStrategy(1, 50, 1.1, 100), False), +) + + +res = s.chat.stream( + messages=[ + { + "content": "Who is the best French painter? Answer in one short sentence.", + "role": "user", + }, + ], + model="mistral-small-latest" +) + +if res is not None: + for event in res: + # handle event + print(event) + +``` + + + +## Error Handling + +Handling errors in this SDK should largely match your expectations. All operations return a response object or raise an error. If Error objects are specified in your OpenAPI Spec, the SDK will raise the appropriate Error type. + +| Error Object | Status Code | Content Type | +| -------------------------- | ----------- | ---------------- | +| models.HTTPValidationError | 422 | application/json | +| models.SDKError | 4xx-5xx | */* | + +### Example + +```python +from mistralai_gcp import MistralGCP, models +import os + +s = MistralGCP() + +res = None +try: + res = s.chat.complete( + messages=[ + { + "content": "Who is the best French painter? Answer in one short sentence.", + "role": "user", + }, + ], + model="mistral-small-latest" + ) + +except models.HTTPValidationError as e: + # handle exception + raise(e) +except models.SDKError as e: + # handle exception + raise(e) + +if res is not None: + # handle response + pass + +``` + + + +## Server Selection + +### Select Server by Name + +You can override the default server globally by passing a server name to the `server: str` optional parameter when initializing the SDK client instance. The selected server will then be used as the default on the operations that use it. This table lists the names associated with the available servers: + +| Name | Server | Variables | +| ------ | ------------------------ | --------- | +| `prod` | `https://api.mistral.ai` | None | + +#### Example + +```python +from mistralai_gcp import MistralGCP +import os + +s = MistralGCP(server="prod") + + +res = s.chat.stream( + messages=[ + { + "content": "Who is the best French painter? Answer in one short sentence.", + "role": "user", + }, + ], + model="mistral-small-latest" +) + +if res is not None: + for event in res: + # handle event + print(event) + +``` + + +### Override Server URL Per-Client + +The default server can also be overridden globally by passing a URL to the `server_url: str` optional parameter when initializing the SDK client instance. For example: +```python +from mistralai_gcp import MistralGCP +import os + +s = MistralGCP( + server_url="https://api.mistral.ai", +) + + +res = s.chat.stream( + messages=[ + { + "content": "Who is the best French painter? Answer in one short sentence.", + "role": "user", + }, + ], + model="mistral-small-latest" +) + +if res is not None: + for event in res: + # handle event + print(event) + +``` + + + +## Custom HTTP Client + +The Python SDK makes API calls using the [httpx](https://www.python-httpx.org/) HTTP library. In order to provide a convenient way to configure timeouts, cookies, proxies, custom headers, and other low-level configuration, you can initialize the SDK client with your own HTTP client instance. +Depending on whether you are using the sync or async version of the SDK, you can pass an instance of `HttpClient` or `AsyncHttpClient` respectively, which are Protocol's ensuring that the client has the necessary methods to make API calls. +This allows you to wrap the client with your own custom logic, such as adding custom headers, logging, or error handling, or you can just pass an instance of `httpx.Client` or `httpx.AsyncClient` directly. + +For example, you could specify a header for every request that this sdk makes as follows: +```python +from mistralai_gcp import MistralGCP +import httpx + +http_client = httpx.Client(headers={"x-custom-header": "someValue"}) +s = MistralGCP(client=http_client) +``` + +or you could wrap the client with your own custom logic: +```python +from mistralai_gcp import MistralGCP +from mistralai_gcp.httpclient import AsyncHttpClient +import httpx + +class CustomClient(AsyncHttpClient): + client: AsyncHttpClient + + def __init__(self, client: AsyncHttpClient): + self.client = client + + async def send( + self, + request: httpx.Request, + *, + stream: bool = False, + auth: Union[ + httpx._types.AuthTypes, httpx._client.UseClientDefault, None + ] = httpx.USE_CLIENT_DEFAULT, + follow_redirects: Union[ + bool, httpx._client.UseClientDefault + ] = httpx.USE_CLIENT_DEFAULT, + ) -> httpx.Response: + request.headers["Client-Level-Header"] = "added by client" + + return await self.client.send( + request, stream=stream, auth=auth, follow_redirects=follow_redirects + ) + + def build_request( + self, + method: str, + url: httpx._types.URLTypes, + *, + content: Optional[httpx._types.RequestContent] = None, + data: Optional[httpx._types.RequestData] = None, + files: Optional[httpx._types.RequestFiles] = None, + json: Optional[Any] = None, + params: Optional[httpx._types.QueryParamTypes] = None, + headers: Optional[httpx._types.HeaderTypes] = None, + cookies: Optional[httpx._types.CookieTypes] = None, + timeout: Union[ + httpx._types.TimeoutTypes, httpx._client.UseClientDefault + ] = httpx.USE_CLIENT_DEFAULT, + extensions: Optional[httpx._types.RequestExtensions] = None, + ) -> httpx.Request: + return self.client.build_request( + method, + url, + content=content, + data=data, + files=files, + json=json, + params=params, + headers=headers, + cookies=cookies, + timeout=timeout, + extensions=extensions, + ) + +s = MistralGCP(async_client=CustomClient(httpx.AsyncClient())) +``` + + + +## Authentication + +### Per-Client Security Schemes + +This SDK supports the following security scheme globally: + +| Name | Type | Scheme | +| --------- | ---- | ----------- | +| `api_key` | http | HTTP Bearer | + +To authenticate with the API the `api_key` parameter must be set when initializing the SDK client instance. For example: +```python +from mistralai_gcp import MistralGCP +import os + +s = MistralGCP() + + +res = s.chat.stream( + messages=[ + { + "content": "Who is the best French painter? Answer in one short sentence.", + "role": "user", + }, + ], + model="mistral-small-latest" +) + +if res is not None: + for event in res: + # handle event + print(event) + +``` + + + + +# Development + +## Contributions + +While we value open-source contributions to this SDK, this library is generated programmatically. Any manual changes added to internal files will be overwritten on the next generation. +We look forward to hearing your feedback. Feel free to open a PR or an issue with a proof of concept and we'll do our best to include it in a future release. diff --git a/packages/mistralai_gcp/USAGE.md b/packages/mistralai_gcp/USAGE.md new file mode 100644 index 00000000..30fa08aa --- /dev/null +++ b/packages/mistralai_gcp/USAGE.md @@ -0,0 +1,51 @@ + +### Create Chat Completions + +This example shows how to create chat completions. + +```python +# Synchronous Example +from mistralai_gcp import MistralGCP +import os + +s = MistralGCP() + + +res = s.chat.complete(messages=[ + { + "content": "Who is the best French painter? Answer in one short sentence.", + "role": "user", + }, +], model="mistral-small-latest") + +if res is not None: + # handle response + pass +``` + +
+ +The same SDK client can also be used to make asychronous requests by importing asyncio. +```python +# Asynchronous Example +import asyncio +from mistralai_gcp import MistralGCP +import os + +async def main(): + s = MistralGCP( + api_key=os.getenv("API_KEY", ""), + ) + res = await s.chat.complete_async(messages=[ + { + "content": "Who is the best French painter? Answer in one short sentence.", + "role": "user", + }, + ], model="mistral-small-latest") + if res is not None: + # handle response + pass + +asyncio.run(main()) +``` + \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/arguments.md b/packages/mistralai_gcp/docs/models/arguments.md new file mode 100644 index 00000000..2e54e27e --- /dev/null +++ b/packages/mistralai_gcp/docs/models/arguments.md @@ -0,0 +1,17 @@ +# Arguments + + +## Supported Types + +### `Dict[str, Any]` + +```python +value: Dict[str, Any] = /* values here */ +``` + +### `str` + +```python +value: str = /* values here */ +``` + diff --git a/packages/mistralai_gcp/docs/models/assistantmessage.md b/packages/mistralai_gcp/docs/models/assistantmessage.md new file mode 100644 index 00000000..0c36cde9 --- /dev/null +++ b/packages/mistralai_gcp/docs/models/assistantmessage.md @@ -0,0 +1,11 @@ +# AssistantMessage + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| `content` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `tool_calls` | List[[models.ToolCall](../models/toolcall.md)] | :heavy_minus_sign: | N/A | +| `prefix` | *Optional[bool]* | :heavy_minus_sign: | Set this to `true` when adding an assistant message as prefix to condition the model response. The role of the prefix message is to force the model to start its answer by the content of the message. | +| `role` | [Optional[models.AssistantMessageRole]](../models/assistantmessagerole.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/assistantmessagerole.md b/packages/mistralai_gcp/docs/models/assistantmessagerole.md new file mode 100644 index 00000000..658229e7 --- /dev/null +++ b/packages/mistralai_gcp/docs/models/assistantmessagerole.md @@ -0,0 +1,8 @@ +# AssistantMessageRole + + +## Values + +| Name | Value | +| ----------- | ----------- | +| `ASSISTANT` | assistant | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/chatcompletionchoice.md b/packages/mistralai_gcp/docs/models/chatcompletionchoice.md new file mode 100644 index 00000000..6fa839b7 --- /dev/null +++ b/packages/mistralai_gcp/docs/models/chatcompletionchoice.md @@ -0,0 +1,10 @@ +# ChatCompletionChoice + + +## Fields + +| Field | Type | Required | Description | Example | +| ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | +| `index` | *int* | :heavy_check_mark: | N/A | 0 | +| `finish_reason` | [models.ChatCompletionChoiceFinishReason](../models/chatcompletionchoicefinishreason.md) | :heavy_check_mark: | N/A | stop | +| `message` | [Optional[models.AssistantMessage]](../models/assistantmessage.md) | :heavy_minus_sign: | N/A | | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/chatcompletionchoicefinishreason.md b/packages/mistralai_gcp/docs/models/chatcompletionchoicefinishreason.md new file mode 100644 index 00000000..b2f15ecb --- /dev/null +++ b/packages/mistralai_gcp/docs/models/chatcompletionchoicefinishreason.md @@ -0,0 +1,12 @@ +# ChatCompletionChoiceFinishReason + + +## Values + +| Name | Value | +| -------------- | -------------- | +| `STOP` | stop | +| `LENGTH` | length | +| `MODEL_LENGTH` | model_length | +| `ERROR` | error | +| `TOOL_CALLS` | tool_calls | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/chatcompletionrequest.md b/packages/mistralai_gcp/docs/models/chatcompletionrequest.md new file mode 100644 index 00000000..3e30c649 --- /dev/null +++ b/packages/mistralai_gcp/docs/models/chatcompletionrequest.md @@ -0,0 +1,19 @@ +# ChatCompletionRequest + + +## Fields + +| Field | Type | Required | Description | Example | +| ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `model` | *Nullable[str]* | :heavy_check_mark: | ID of the model to use. You can use the [List Available Models](/api#operation/listModels) API to see all of your available models, or see our [Model overview](/models) for model descriptions. | mistral-small-latest | +| `messages` | List[[models.ChatCompletionRequestMessages](../models/chatcompletionrequestmessages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | {
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
} | +| `temperature` | *Optional[float]* | :heavy_minus_sign: | What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. | | +| `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | +| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | +| `min_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The minimum number of tokens to generate in the completion. | | +| `stream` | *Optional[bool]* | :heavy_minus_sign: | Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. | | +| `stop` | [Optional[models.ChatCompletionRequestStop]](../models/chatcompletionrequeststop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | +| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | +| `response_format` | [Optional[models.ResponseFormat]](../models/responseformat.md) | :heavy_minus_sign: | N/A | | +| `tools` | List[[models.Tool](../models/tool.md)] | :heavy_minus_sign: | N/A | | +| `tool_choice` | [Optional[models.ChatCompletionRequestToolChoice]](../models/chatcompletionrequesttoolchoice.md) | :heavy_minus_sign: | N/A | | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/chatcompletionrequestmessages.md b/packages/mistralai_gcp/docs/models/chatcompletionrequestmessages.md new file mode 100644 index 00000000..bc7708a6 --- /dev/null +++ b/packages/mistralai_gcp/docs/models/chatcompletionrequestmessages.md @@ -0,0 +1,29 @@ +# ChatCompletionRequestMessages + + +## Supported Types + +### `models.AssistantMessage` + +```python +value: models.AssistantMessage = /* values here */ +``` + +### `models.SystemMessage` + +```python +value: models.SystemMessage = /* values here */ +``` + +### `models.ToolMessage` + +```python +value: models.ToolMessage = /* values here */ +``` + +### `models.UserMessage` + +```python +value: models.UserMessage = /* values here */ +``` + diff --git a/packages/mistralai_gcp/docs/models/chatcompletionrequeststop.md b/packages/mistralai_gcp/docs/models/chatcompletionrequeststop.md new file mode 100644 index 00000000..749296d4 --- /dev/null +++ b/packages/mistralai_gcp/docs/models/chatcompletionrequeststop.md @@ -0,0 +1,19 @@ +# ChatCompletionRequestStop + +Stop generation if this token is detected. Or if one of these tokens is detected when providing an array + + +## Supported Types + +### `str` + +```python +value: str = /* values here */ +``` + +### `List[str]` + +```python +value: List[str] = /* values here */ +``` + diff --git a/packages/mistralai_gcp/docs/models/chatcompletionrequesttoolchoice.md b/packages/mistralai_gcp/docs/models/chatcompletionrequesttoolchoice.md new file mode 100644 index 00000000..ed32b75e --- /dev/null +++ b/packages/mistralai_gcp/docs/models/chatcompletionrequesttoolchoice.md @@ -0,0 +1,10 @@ +# ChatCompletionRequestToolChoice + + +## Values + +| Name | Value | +| ------ | ------ | +| `AUTO` | auto | +| `NONE` | none | +| `ANY` | any | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/chatcompletionresponse.md b/packages/mistralai_gcp/docs/models/chatcompletionresponse.md new file mode 100644 index 00000000..ad376158 --- /dev/null +++ b/packages/mistralai_gcp/docs/models/chatcompletionresponse.md @@ -0,0 +1,13 @@ +# ChatCompletionResponse + + +## Fields + +| Field | Type | Required | Description | Example | +| ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | +| `id` | *str* | :heavy_check_mark: | N/A | cmpl-e5cc70bb28c444948073e77776eb30ef | +| `object` | *str* | :heavy_check_mark: | N/A | chat.completion | +| `model` | *str* | :heavy_check_mark: | N/A | mistral-small-latest | +| `usage` | [models.UsageInfo](../models/usageinfo.md) | :heavy_check_mark: | N/A | | +| `created` | *Optional[int]* | :heavy_minus_sign: | N/A | 1702256327 | +| `choices` | List[[models.ChatCompletionChoice](../models/chatcompletionchoice.md)] | :heavy_minus_sign: | N/A | | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/chatcompletionstreamrequest.md b/packages/mistralai_gcp/docs/models/chatcompletionstreamrequest.md new file mode 100644 index 00000000..adc7ff99 --- /dev/null +++ b/packages/mistralai_gcp/docs/models/chatcompletionstreamrequest.md @@ -0,0 +1,19 @@ +# ChatCompletionStreamRequest + + +## Fields + +| Field | Type | Required | Description | Example | +| ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `model` | *Nullable[str]* | :heavy_check_mark: | ID of the model to use. You can use the [List Available Models](/api#operation/listModels) API to see all of your available models, or see our [Model overview](/models) for model descriptions. | mistral-small-latest | +| `messages` | List[[models.Messages](../models/messages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | {
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
} | +| `temperature` | *Optional[float]* | :heavy_minus_sign: | What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. | | +| `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | +| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | +| `min_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The minimum number of tokens to generate in the completion. | | +| `stream` | *Optional[bool]* | :heavy_minus_sign: | N/A | | +| `stop` | [Optional[models.Stop]](../models/stop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | +| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | +| `response_format` | [Optional[models.ResponseFormat]](../models/responseformat.md) | :heavy_minus_sign: | N/A | | +| `tools` | List[[models.Tool](../models/tool.md)] | :heavy_minus_sign: | N/A | | +| `tool_choice` | [Optional[models.ToolChoice]](../models/toolchoice.md) | :heavy_minus_sign: | N/A | | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/completionchunk.md b/packages/mistralai_gcp/docs/models/completionchunk.md new file mode 100644 index 00000000..b8ae6a09 --- /dev/null +++ b/packages/mistralai_gcp/docs/models/completionchunk.md @@ -0,0 +1,13 @@ +# CompletionChunk + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------ | +| `id` | *str* | :heavy_check_mark: | N/A | +| `model` | *str* | :heavy_check_mark: | N/A | +| `choices` | List[[models.CompletionResponseStreamChoice](../models/completionresponsestreamchoice.md)] | :heavy_check_mark: | N/A | +| `object` | *Optional[str]* | :heavy_minus_sign: | N/A | +| `created` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `usage` | [Optional[models.UsageInfo]](../models/usageinfo.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/completionevent.md b/packages/mistralai_gcp/docs/models/completionevent.md new file mode 100644 index 00000000..7a66e8fe --- /dev/null +++ b/packages/mistralai_gcp/docs/models/completionevent.md @@ -0,0 +1,8 @@ +# CompletionEvent + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------------------------ | ------------------------------------------------------ | ------------------------------------------------------ | ------------------------------------------------------ | +| `data` | [models.CompletionChunk](../models/completionchunk.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/completionresponsestreamchoice.md b/packages/mistralai_gcp/docs/models/completionresponsestreamchoice.md new file mode 100644 index 00000000..c807dacd --- /dev/null +++ b/packages/mistralai_gcp/docs/models/completionresponsestreamchoice.md @@ -0,0 +1,10 @@ +# CompletionResponseStreamChoice + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------------- | ---------------------------------------------------------- | ---------------------------------------------------------- | ---------------------------------------------------------- | +| `index` | *int* | :heavy_check_mark: | N/A | +| `delta` | [models.DeltaMessage](../models/deltamessage.md) | :heavy_check_mark: | N/A | +| `finish_reason` | [Nullable[models.FinishReason]](../models/finishreason.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/content.md b/packages/mistralai_gcp/docs/models/content.md new file mode 100644 index 00000000..a833dc2c --- /dev/null +++ b/packages/mistralai_gcp/docs/models/content.md @@ -0,0 +1,17 @@ +# Content + + +## Supported Types + +### `str` + +```python +value: str = /* values here */ +``` + +### `List[models.ContentChunk]` + +```python +value: List[models.ContentChunk] = /* values here */ +``` + diff --git a/packages/mistralai_gcp/docs/models/contentchunk.md b/packages/mistralai_gcp/docs/models/contentchunk.md new file mode 100644 index 00000000..64fc80d6 --- /dev/null +++ b/packages/mistralai_gcp/docs/models/contentchunk.md @@ -0,0 +1,9 @@ +# ContentChunk + + +## Fields + +| Field | Type | Required | Description | +| ------------------ | ------------------ | ------------------ | ------------------ | +| `text` | *str* | :heavy_check_mark: | N/A | +| `type` | *Optional[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/deltamessage.md b/packages/mistralai_gcp/docs/models/deltamessage.md new file mode 100644 index 00000000..4cb9e91e --- /dev/null +++ b/packages/mistralai_gcp/docs/models/deltamessage.md @@ -0,0 +1,10 @@ +# DeltaMessage + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------------- | ---------------------------------------------------------- | ---------------------------------------------------------- | ---------------------------------------------------------- | +| `role` | *Optional[str]* | :heavy_minus_sign: | N/A | +| `content` | *Optional[str]* | :heavy_minus_sign: | N/A | +| `tool_calls` | [OptionalNullable[models.ToolCall]](../models/toolcall.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/fimcompletionrequest.md b/packages/mistralai_gcp/docs/models/fimcompletionrequest.md new file mode 100644 index 00000000..b4b024ed --- /dev/null +++ b/packages/mistralai_gcp/docs/models/fimcompletionrequest.md @@ -0,0 +1,17 @@ +# FIMCompletionRequest + + +## Fields + +| Field | Type | Required | Description | Example | +| ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `model` | *Nullable[str]* | :heavy_check_mark: | ID of the model to use. Only compatible for now with:
- `codestral-2405`
- `codestral-latest` | codestral-2405 | +| `prompt` | *str* | :heavy_check_mark: | The text/code to complete. | def | +| `temperature` | *Optional[float]* | :heavy_minus_sign: | What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. | | +| `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | +| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | +| `min_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The minimum number of tokens to generate in the completion. | | +| `stream` | *Optional[bool]* | :heavy_minus_sign: | Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. | | +| `stop` | [Optional[models.FIMCompletionRequestStop]](../models/fimcompletionrequeststop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | +| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | +| `suffix` | *OptionalNullable[str]* | :heavy_minus_sign: | Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`. | return a+b | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/fimcompletionrequeststop.md b/packages/mistralai_gcp/docs/models/fimcompletionrequeststop.md new file mode 100644 index 00000000..a0dbb00a --- /dev/null +++ b/packages/mistralai_gcp/docs/models/fimcompletionrequeststop.md @@ -0,0 +1,19 @@ +# FIMCompletionRequestStop + +Stop generation if this token is detected. Or if one of these tokens is detected when providing an array + + +## Supported Types + +### `str` + +```python +value: str = /* values here */ +``` + +### `List[str]` + +```python +value: List[str] = /* values here */ +``` + diff --git a/packages/mistralai_gcp/docs/models/fimcompletionresponse.md b/packages/mistralai_gcp/docs/models/fimcompletionresponse.md new file mode 100644 index 00000000..da786a1f --- /dev/null +++ b/packages/mistralai_gcp/docs/models/fimcompletionresponse.md @@ -0,0 +1,13 @@ +# FIMCompletionResponse + + +## Fields + +| Field | Type | Required | Description | Example | +| ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | +| `id` | *str* | :heavy_check_mark: | N/A | cmpl-e5cc70bb28c444948073e77776eb30ef | +| `object` | *str* | :heavy_check_mark: | N/A | chat.completion | +| `model` | *str* | :heavy_check_mark: | N/A | codestral-latest | +| `usage` | [models.UsageInfo](../models/usageinfo.md) | :heavy_check_mark: | N/A | | +| `created` | *Optional[int]* | :heavy_minus_sign: | N/A | 1702256327 | +| `choices` | List[[models.ChatCompletionChoice](../models/chatcompletionchoice.md)] | :heavy_minus_sign: | N/A | | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/fimcompletionstreamrequest.md b/packages/mistralai_gcp/docs/models/fimcompletionstreamrequest.md new file mode 100644 index 00000000..acffb536 --- /dev/null +++ b/packages/mistralai_gcp/docs/models/fimcompletionstreamrequest.md @@ -0,0 +1,17 @@ +# FIMCompletionStreamRequest + + +## Fields + +| Field | Type | Required | Description | Example | +| ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `model` | *Nullable[str]* | :heavy_check_mark: | ID of the model to use. Only compatible for now with:
- `codestral-2405`
- `codestral-latest` | codestral-2405 | +| `prompt` | *str* | :heavy_check_mark: | The text/code to complete. | def | +| `temperature` | *Optional[float]* | :heavy_minus_sign: | What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. | | +| `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | +| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | +| `min_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The minimum number of tokens to generate in the completion. | | +| `stream` | *Optional[bool]* | :heavy_minus_sign: | N/A | | +| `stop` | [Optional[models.FIMCompletionStreamRequestStop]](../models/fimcompletionstreamrequeststop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | +| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | +| `suffix` | *OptionalNullable[str]* | :heavy_minus_sign: | Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`. | return a+b | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/fimcompletionstreamrequeststop.md b/packages/mistralai_gcp/docs/models/fimcompletionstreamrequeststop.md new file mode 100644 index 00000000..5a9e2ff0 --- /dev/null +++ b/packages/mistralai_gcp/docs/models/fimcompletionstreamrequeststop.md @@ -0,0 +1,19 @@ +# FIMCompletionStreamRequestStop + +Stop generation if this token is detected. Or if one of these tokens is detected when providing an array + + +## Supported Types + +### `str` + +```python +value: str = /* values here */ +``` + +### `List[str]` + +```python +value: List[str] = /* values here */ +``` + diff --git a/packages/mistralai_gcp/docs/models/finishreason.md b/packages/mistralai_gcp/docs/models/finishreason.md new file mode 100644 index 00000000..45a5aedb --- /dev/null +++ b/packages/mistralai_gcp/docs/models/finishreason.md @@ -0,0 +1,11 @@ +# FinishReason + + +## Values + +| Name | Value | +| ------------ | ------------ | +| `STOP` | stop | +| `LENGTH` | length | +| `ERROR` | error | +| `TOOL_CALLS` | tool_calls | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/function.md b/packages/mistralai_gcp/docs/models/function.md new file mode 100644 index 00000000..8af398f5 --- /dev/null +++ b/packages/mistralai_gcp/docs/models/function.md @@ -0,0 +1,10 @@ +# Function + + +## Fields + +| Field | Type | Required | Description | +| ------------------ | ------------------ | ------------------ | ------------------ | +| `name` | *str* | :heavy_check_mark: | N/A | +| `parameters` | Dict[str, *Any*] | :heavy_check_mark: | N/A | +| `description` | *Optional[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/functioncall.md b/packages/mistralai_gcp/docs/models/functioncall.md new file mode 100644 index 00000000..7ccd90dc --- /dev/null +++ b/packages/mistralai_gcp/docs/models/functioncall.md @@ -0,0 +1,9 @@ +# FunctionCall + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------------ | ------------------------------------------ | ------------------------------------------ | ------------------------------------------ | +| `name` | *str* | :heavy_check_mark: | N/A | +| `arguments` | [models.Arguments](../models/arguments.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/httpvalidationerror.md b/packages/mistralai_gcp/docs/models/httpvalidationerror.md new file mode 100644 index 00000000..63892430 --- /dev/null +++ b/packages/mistralai_gcp/docs/models/httpvalidationerror.md @@ -0,0 +1,10 @@ +# HTTPValidationError + +Validation Error + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------------------------------ | ------------------------------------------------------------ | ------------------------------------------------------------ | ------------------------------------------------------------ | +| `detail` | List[[models.ValidationError](../models/validationerror.md)] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/loc.md b/packages/mistralai_gcp/docs/models/loc.md new file mode 100644 index 00000000..d6094ac2 --- /dev/null +++ b/packages/mistralai_gcp/docs/models/loc.md @@ -0,0 +1,17 @@ +# Loc + + +## Supported Types + +### `str` + +```python +value: str = /* values here */ +``` + +### `int` + +```python +value: int = /* values here */ +``` + diff --git a/packages/mistralai_gcp/docs/models/messages.md b/packages/mistralai_gcp/docs/models/messages.md new file mode 100644 index 00000000..1d394500 --- /dev/null +++ b/packages/mistralai_gcp/docs/models/messages.md @@ -0,0 +1,29 @@ +# Messages + + +## Supported Types + +### `models.AssistantMessage` + +```python +value: models.AssistantMessage = /* values here */ +``` + +### `models.SystemMessage` + +```python +value: models.SystemMessage = /* values here */ +``` + +### `models.ToolMessage` + +```python +value: models.ToolMessage = /* values here */ +``` + +### `models.UserMessage` + +```python +value: models.UserMessage = /* values here */ +``` + diff --git a/packages/mistralai_gcp/docs/models/responseformat.md b/packages/mistralai_gcp/docs/models/responseformat.md new file mode 100644 index 00000000..2704eab4 --- /dev/null +++ b/packages/mistralai_gcp/docs/models/responseformat.md @@ -0,0 +1,8 @@ +# ResponseFormat + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------------------- | ---------------------------------------------------------------- | ---------------------------------------------------------------- | ---------------------------------------------------------------- | +| `type` | [Optional[models.ResponseFormats]](../models/responseformats.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/responseformats.md b/packages/mistralai_gcp/docs/models/responseformats.md new file mode 100644 index 00000000..ce35fbb3 --- /dev/null +++ b/packages/mistralai_gcp/docs/models/responseformats.md @@ -0,0 +1,11 @@ +# ResponseFormats + +An object specifying the format that the model must output. Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. + + +## Values + +| Name | Value | +| ------------- | ------------- | +| `TEXT` | text | +| `JSON_OBJECT` | json_object | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/role.md b/packages/mistralai_gcp/docs/models/role.md new file mode 100644 index 00000000..affca78d --- /dev/null +++ b/packages/mistralai_gcp/docs/models/role.md @@ -0,0 +1,8 @@ +# Role + + +## Values + +| Name | Value | +| -------- | -------- | +| `SYSTEM` | system | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/security.md b/packages/mistralai_gcp/docs/models/security.md new file mode 100644 index 00000000..c698674c --- /dev/null +++ b/packages/mistralai_gcp/docs/models/security.md @@ -0,0 +1,8 @@ +# Security + + +## Fields + +| Field | Type | Required | Description | +| ------------------ | ------------------ | ------------------ | ------------------ | +| `api_key` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/stop.md b/packages/mistralai_gcp/docs/models/stop.md new file mode 100644 index 00000000..ba40ca83 --- /dev/null +++ b/packages/mistralai_gcp/docs/models/stop.md @@ -0,0 +1,19 @@ +# Stop + +Stop generation if this token is detected. Or if one of these tokens is detected when providing an array + + +## Supported Types + +### `str` + +```python +value: str = /* values here */ +``` + +### `List[str]` + +```python +value: List[str] = /* values here */ +``` + diff --git a/packages/mistralai_gcp/docs/models/systemmessage.md b/packages/mistralai_gcp/docs/models/systemmessage.md new file mode 100644 index 00000000..7f827984 --- /dev/null +++ b/packages/mistralai_gcp/docs/models/systemmessage.md @@ -0,0 +1,9 @@ +# SystemMessage + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------------ | ------------------------------------------ | ------------------------------------------ | ------------------------------------------ | +| `content` | [models.Content](../models/content.md) | :heavy_check_mark: | N/A | +| `role` | [Optional[models.Role]](../models/role.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/textchunk.md b/packages/mistralai_gcp/docs/models/textchunk.md new file mode 100644 index 00000000..34e4dd6f --- /dev/null +++ b/packages/mistralai_gcp/docs/models/textchunk.md @@ -0,0 +1,9 @@ +# TextChunk + + +## Fields + +| Field | Type | Required | Description | +| ------------------ | ------------------ | ------------------ | ------------------ | +| `text` | *str* | :heavy_check_mark: | N/A | +| `type` | *Optional[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/tool.md b/packages/mistralai_gcp/docs/models/tool.md new file mode 100644 index 00000000..291394c0 --- /dev/null +++ b/packages/mistralai_gcp/docs/models/tool.md @@ -0,0 +1,9 @@ +# Tool + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------- | ---------------------------------------- | ---------------------------------------- | ---------------------------------------- | +| `function` | [models.Function](../models/function.md) | :heavy_check_mark: | N/A | +| `type` | *Optional[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/toolcall.md b/packages/mistralai_gcp/docs/models/toolcall.md new file mode 100644 index 00000000..bd2dc9ff --- /dev/null +++ b/packages/mistralai_gcp/docs/models/toolcall.md @@ -0,0 +1,10 @@ +# ToolCall + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------------------ | ------------------------------------------------ | ------------------------------------------------ | ------------------------------------------------ | +| `function` | [models.FunctionCall](../models/functioncall.md) | :heavy_check_mark: | N/A | +| `id` | *Optional[str]* | :heavy_minus_sign: | N/A | +| `type` | *Optional[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/toolchoice.md b/packages/mistralai_gcp/docs/models/toolchoice.md new file mode 100644 index 00000000..b84f51f6 --- /dev/null +++ b/packages/mistralai_gcp/docs/models/toolchoice.md @@ -0,0 +1,10 @@ +# ToolChoice + + +## Values + +| Name | Value | +| ------ | ------ | +| `AUTO` | auto | +| `NONE` | none | +| `ANY` | any | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/toolmessage.md b/packages/mistralai_gcp/docs/models/toolmessage.md new file mode 100644 index 00000000..364339e1 --- /dev/null +++ b/packages/mistralai_gcp/docs/models/toolmessage.md @@ -0,0 +1,11 @@ +# ToolMessage + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------------------- | ---------------------------------------------------------------- | ---------------------------------------------------------------- | ---------------------------------------------------------------- | +| `content` | *str* | :heavy_check_mark: | N/A | +| `tool_call_id` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `role` | [Optional[models.ToolMessageRole]](../models/toolmessagerole.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/toolmessagerole.md b/packages/mistralai_gcp/docs/models/toolmessagerole.md new file mode 100644 index 00000000..c24e59c0 --- /dev/null +++ b/packages/mistralai_gcp/docs/models/toolmessagerole.md @@ -0,0 +1,8 @@ +# ToolMessageRole + + +## Values + +| Name | Value | +| ------ | ------ | +| `TOOL` | tool | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/usageinfo.md b/packages/mistralai_gcp/docs/models/usageinfo.md new file mode 100644 index 00000000..9f56a3ae --- /dev/null +++ b/packages/mistralai_gcp/docs/models/usageinfo.md @@ -0,0 +1,10 @@ +# UsageInfo + + +## Fields + +| Field | Type | Required | Description | Example | +| ------------------- | ------------------- | ------------------- | ------------------- | ------------------- | +| `prompt_tokens` | *int* | :heavy_check_mark: | N/A | 16 | +| `completion_tokens` | *int* | :heavy_check_mark: | N/A | 34 | +| `total_tokens` | *int* | :heavy_check_mark: | N/A | 50 | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/usermessage.md b/packages/mistralai_gcp/docs/models/usermessage.md new file mode 100644 index 00000000..3d96f1cd --- /dev/null +++ b/packages/mistralai_gcp/docs/models/usermessage.md @@ -0,0 +1,9 @@ +# UserMessage + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------------------- | ---------------------------------------------------------------- | ---------------------------------------------------------------- | ---------------------------------------------------------------- | +| `content` | [models.UserMessageContent](../models/usermessagecontent.md) | :heavy_check_mark: | N/A | +| `role` | [Optional[models.UserMessageRole]](../models/usermessagerole.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/usermessagecontent.md b/packages/mistralai_gcp/docs/models/usermessagecontent.md new file mode 100644 index 00000000..86ebd18f --- /dev/null +++ b/packages/mistralai_gcp/docs/models/usermessagecontent.md @@ -0,0 +1,17 @@ +# UserMessageContent + + +## Supported Types + +### `str` + +```python +value: str = /* values here */ +``` + +### `List[models.TextChunk]` + +```python +value: List[models.TextChunk] = /* values here */ +``` + diff --git a/packages/mistralai_gcp/docs/models/usermessagerole.md b/packages/mistralai_gcp/docs/models/usermessagerole.md new file mode 100644 index 00000000..171124e4 --- /dev/null +++ b/packages/mistralai_gcp/docs/models/usermessagerole.md @@ -0,0 +1,8 @@ +# UserMessageRole + + +## Values + +| Name | Value | +| ------ | ------ | +| `USER` | user | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/utils/retryconfig.md b/packages/mistralai_gcp/docs/models/utils/retryconfig.md new file mode 100644 index 00000000..69dd549e --- /dev/null +++ b/packages/mistralai_gcp/docs/models/utils/retryconfig.md @@ -0,0 +1,24 @@ +# RetryConfig + +Allows customizing the default retry configuration. Only usable with methods that mention they support retries. + +## Fields + +| Name | Type | Description | Example | +| ------------------------- | ----------------------------------- | --------------------------------------- | --------- | +| `strategy` | `*str*` | The retry strategy to use. | `backoff` | +| `backoff` | [BackoffStrategy](#backoffstrategy) | Configuration for the backoff strategy. | | +| `retry_connection_errors` | `*bool*` | Whether to retry on connection errors. | `true` | + +## BackoffStrategy + +The backoff strategy allows retrying a request with an exponential backoff between each retry. + +### Fields + +| Name | Type | Description | Example | +| ------------------ | --------- | ----------------------------------------- | -------- | +| `initial_interval` | `*int*` | The initial interval in milliseconds. | `500` | +| `max_interval` | `*int*` | The maximum interval in milliseconds. | `60000` | +| `exponent` | `*float*` | The exponent to use for the backoff. | `1.5` | +| `max_elapsed_time` | `*int*` | The maximum elapsed time in milliseconds. | `300000` | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/validationerror.md b/packages/mistralai_gcp/docs/models/validationerror.md new file mode 100644 index 00000000..7a1654a1 --- /dev/null +++ b/packages/mistralai_gcp/docs/models/validationerror.md @@ -0,0 +1,10 @@ +# ValidationError + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------ | ------------------------------------ | ------------------------------------ | ------------------------------------ | +| `loc` | List[[models.Loc](../models/loc.md)] | :heavy_check_mark: | N/A | +| `msg` | *str* | :heavy_check_mark: | N/A | +| `type` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/sdks/chat/README.md b/packages/mistralai_gcp/docs/sdks/chat/README.md new file mode 100644 index 00000000..6f5f1977 --- /dev/null +++ b/packages/mistralai_gcp/docs/sdks/chat/README.md @@ -0,0 +1,121 @@ +# Chat +(*chat*) + +## Overview + +Chat Completion API. + +### Available Operations + +* [stream](#stream) - Stream chat completion +* [create](#create) - Chat Completion + +## stream + +Mistral AI provides the ability to stream responses back to a client in order to allow partial results for certain requests. Tokens will be sent as data-only server-sent events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. + +### Example Usage + +```python +from mistralai_gcp import MistralGCP +import os + +s = MistralGCP() + + +res = s.chat.stream(messages=[ + { + "content": "Who is the best French painter? Answer in one short sentence.", + "role": "user", + }, +], model="mistral-small-latest") + +if res is not None: + for event in res: + # handle event + print(event) + +``` + +### Parameters + +| Parameter | Type | Required | Description | Example | +| ----------------- | ----------------------------------------------------------------- | ------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------- | +| `messages` | List[[models.Messages](../../models/messages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | {
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
} | +| `model` | *OptionalNullable[str]* | :heavy_minus_sign: | ID of the model to use. You can use the [List Available Models](/api#operation/listModels) API to see all of your available models, or see our [Model overview](/models) for model descriptions. | mistral-small-latest | +| `temperature` | *Optional[float]* | :heavy_minus_sign: | What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. | | +| `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | +| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | +| `min_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The minimum number of tokens to generate in the completion. | | +| `stream` | *Optional[bool]* | :heavy_minus_sign: | N/A | | +| `stop` | [Optional[models.Stop]](../../models/stop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | +| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | +| `response_format` | [Optional[models.ResponseFormat]](../../models/responseformat.md) | :heavy_minus_sign: | N/A | | +| `tools` | List[[models.Tool](../../models/tool.md)] | :heavy_minus_sign: | N/A | | +| `tool_choice` | [Optional[models.ToolChoice]](../../models/toolchoice.md) | :heavy_minus_sign: | N/A | | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | | + + +### Response + +**[Union[Generator[models.CompletionEvent, None, None], AsyncGenerator[models.CompletionEvent, None]]](../../models/.md)** +### Errors + +| Error Object | Status Code | Content Type | +| --------------- | ----------- | ------------ | +| models.SDKError | 4xx-5xx | */* | + +## create + +Chat Completion + +### Example Usage + +```python +from mistralai_gcp import MistralGCP +import os + +s = MistralGCP() + + +res = s.chat.complete(messages=[ + { + "content": "Who is the best French painter? Answer in one short sentence.", + "role": "user", + }, +], model="mistral-small-latest") + +if res is not None: + # handle response + pass + +``` + +### Parameters + +| Parameter | Type | Required | Description | Example | +| ----------------- | --------------------------------------------------------------------------------------------------- | ------------------ | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------- | +| `messages` | List[[models.ChatCompletionRequestMessages](../../models/chatcompletionrequestmessages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | {
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
} | +| `model` | *OptionalNullable[str]* | :heavy_minus_sign: | ID of the model to use. You can use the [List Available Models](/api#operation/listModels) API to see all of your available models, or see our [Model overview](/models) for model descriptions. | mistral-small-latest | +| `temperature` | *Optional[float]* | :heavy_minus_sign: | What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. | | +| `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | +| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | +| `min_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The minimum number of tokens to generate in the completion. | | +| `stream` | *Optional[bool]* | :heavy_minus_sign: | Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. | | +| `stop` | [Optional[models.ChatCompletionRequestStop]](../../models/chatcompletionrequeststop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | +| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | +| `response_format` | [Optional[models.ResponseFormat]](../../models/responseformat.md) | :heavy_minus_sign: | N/A | | +| `tools` | List[[models.Tool](../../models/tool.md)] | :heavy_minus_sign: | N/A | | +| `tool_choice` | [Optional[models.ChatCompletionRequestToolChoice]](../../models/chatcompletionrequesttoolchoice.md) | :heavy_minus_sign: | N/A | | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | | + + +### Response + +**[models.ChatCompletionResponse](../../models/chatcompletionresponse.md)** +### Errors + +| Error Object | Status Code | Content Type | +| -------------------------- | ----------- | ---------------- | +| models.HTTPValidationError | 422 | application/json | +| models.SDKError | 4xx-5xx | */* | diff --git a/packages/mistralai_gcp/docs/sdks/fim/README.md b/packages/mistralai_gcp/docs/sdks/fim/README.md new file mode 100644 index 00000000..b997fabf --- /dev/null +++ b/packages/mistralai_gcp/docs/sdks/fim/README.md @@ -0,0 +1,107 @@ +# Fim +(*fim*) + +## Overview + +Fill-in-the-middle API. + +### Available Operations + +* [stream](#stream) - Stream fim completion +* [create](#create) - Fim Completion + +## stream + +Mistral AI provides the ability to stream responses back to a client in order to allow partial results for certain requests. Tokens will be sent as data-only server-sent events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. + +### Example Usage + +```python +from mistralai_gcp import MistralGCP +import os + +s = MistralGCP() + + +res = s.fim.stream(prompt="def", model="codestral-2405", suffix="return a+b") + +if res is not None: + for event in res: + # handle event + print(event) + +``` + +### Parameters + +| Parameter | Type | Required | Description | Example | +| ------------- | ------------------------------------------------------------------------------------------------- | ------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------- | +| `prompt` | *str* | :heavy_check_mark: | The text/code to complete. | def | +| `model` | *OptionalNullable[str]* | :heavy_minus_sign: | ID of the model to use. Only compatible for now with:
- `codestral-2405`
- `codestral-latest` | codestral-2405 | +| `temperature` | *Optional[float]* | :heavy_minus_sign: | What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. | | +| `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | +| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | +| `min_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The minimum number of tokens to generate in the completion. | | +| `stream` | *Optional[bool]* | :heavy_minus_sign: | N/A | | +| `stop` | [Optional[models.FIMCompletionStreamRequestStop]](../../models/fimcompletionstreamrequeststop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | +| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | +| `suffix` | *OptionalNullable[str]* | :heavy_minus_sign: | Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`. | return a+b | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | | + + +### Response + +**[Union[Generator[models.CompletionEvent, None, None], AsyncGenerator[models.CompletionEvent, None]]](../../models/.md)** +### Errors + +| Error Object | Status Code | Content Type | +| --------------- | ----------- | ------------ | +| models.SDKError | 4xx-5xx | */* | + +## create + +FIM completion. + +### Example Usage + +```python +from mistralai_gcp import MistralGCP +import os + +s = MistralGCP() + + +res = s.fim.complete(prompt="def", model="codestral-2405", suffix="return a+b") + +if res is not None: + # handle response + pass + +``` + +### Parameters + +| Parameter | Type | Required | Description | Example | +| ------------- | ------------------------------------------------------------------------------------- | ------------------ | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------- | +| `prompt` | *str* | :heavy_check_mark: | The text/code to complete. | def | +| `model` | *OptionalNullable[str]* | :heavy_minus_sign: | ID of the model to use. Only compatible for now with:
- `codestral-2405`
- `codestral-latest` | codestral-2405 | +| `temperature` | *Optional[float]* | :heavy_minus_sign: | What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. | | +| `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | +| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | +| `min_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The minimum number of tokens to generate in the completion. | | +| `stream` | *Optional[bool]* | :heavy_minus_sign: | Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. | | +| `stop` | [Optional[models.FIMCompletionRequestStop]](../../models/fimcompletionrequeststop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | +| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | +| `suffix` | *OptionalNullable[str]* | :heavy_minus_sign: | Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`. | return a+b | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | | + + +### Response + +**[models.FIMCompletionResponse](../../models/fimcompletionresponse.md)** +### Errors + +| Error Object | Status Code | Content Type | +| -------------------------- | ----------- | ---------------- | +| models.HTTPValidationError | 422 | application/json | +| models.SDKError | 4xx-5xx | */* | diff --git a/packages/mistralai_gcp/docs/sdks/mistralgcp/README.md b/packages/mistralai_gcp/docs/sdks/mistralgcp/README.md new file mode 100644 index 00000000..84963a9e --- /dev/null +++ b/packages/mistralai_gcp/docs/sdks/mistralgcp/README.md @@ -0,0 +1,9 @@ +# MistralGCP SDK + + +## Overview + +Mistral AI API: Our Chat Completion and Embeddings APIs specification. Create your account on [La Plateforme](https://console.mistral.ai) to get access and read the [docs](https://docs.mistral.ai) to learn how to use it. + +### Available Operations + diff --git a/packages/mistralai_gcp/poetry.lock b/packages/mistralai_gcp/poetry.lock new file mode 100644 index 00000000..a3d5456c --- /dev/null +++ b/packages/mistralai_gcp/poetry.lock @@ -0,0 +1,848 @@ +# This file is automatically @generated by Poetry 1.8.3 and should not be changed by hand. + +[[package]] +name = "annotated-types" +version = "0.7.0" +description = "Reusable constraint types to use with typing.Annotated" +optional = false +python-versions = ">=3.8" +files = [ + {file = "annotated_types-0.7.0-py3-none-any.whl", hash = "sha256:1f02e8b43a8fbbc3f3e0d4f0f4bfc8131bcb4eebe8849b8e5c773f3a1c582a53"}, + {file = "annotated_types-0.7.0.tar.gz", hash = "sha256:aff07c09a53a08bc8cfccb9c85b05f1aa9a2a6f23728d790723543408344ce89"}, +] + +[package.dependencies] +typing-extensions = {version = ">=4.0.0", markers = "python_version < \"3.9\""} + +[[package]] +name = "anyio" +version = "4.4.0" +description = "High level compatibility layer for multiple asynchronous event loop implementations" +optional = false +python-versions = ">=3.8" +files = [ + {file = "anyio-4.4.0-py3-none-any.whl", hash = "sha256:c1b2d8f46a8a812513012e1107cb0e68c17159a7a594208005a57dc776e1bdc7"}, + {file = "anyio-4.4.0.tar.gz", hash = "sha256:5aadc6a1bbb7cdb0bede386cac5e2940f5e2ff3aa20277e991cf028e0585ce94"}, +] + +[package.dependencies] +exceptiongroup = {version = ">=1.0.2", markers = "python_version < \"3.11\""} +idna = ">=2.8" +sniffio = ">=1.1" +typing-extensions = {version = ">=4.1", markers = "python_version < \"3.11\""} + +[package.extras] +doc = ["Sphinx (>=7)", "packaging", "sphinx-autodoc-typehints (>=1.2.0)", "sphinx-rtd-theme"] +test = ["anyio[trio]", "coverage[toml] (>=7)", "exceptiongroup (>=1.2.0)", "hypothesis (>=4.0)", "psutil (>=5.9)", "pytest (>=7.0)", "pytest-mock (>=3.6.1)", "trustme", "uvloop (>=0.17)"] +trio = ["trio (>=0.23)"] + +[[package]] +name = "astroid" +version = "3.2.4" +description = "An abstract syntax tree for Python with inference support." +optional = false +python-versions = ">=3.8.0" +files = [ + {file = "astroid-3.2.4-py3-none-any.whl", hash = "sha256:413658a61eeca6202a59231abb473f932038fbcbf1666587f66d482083413a25"}, + {file = "astroid-3.2.4.tar.gz", hash = "sha256:0e14202810b30da1b735827f78f5157be2bbd4a7a59b7707ca0bfc2fb4c0063a"}, +] + +[package.dependencies] +typing-extensions = {version = ">=4.0.0", markers = "python_version < \"3.11\""} + +[[package]] +name = "cachetools" +version = "5.4.0" +description = "Extensible memoizing collections and decorators" +optional = false +python-versions = ">=3.7" +files = [ + {file = "cachetools-5.4.0-py3-none-any.whl", hash = "sha256:3ae3b49a3d5e28a77a0be2b37dbcb89005058959cb2323858c2657c4a8cab474"}, + {file = "cachetools-5.4.0.tar.gz", hash = "sha256:b8adc2e7c07f105ced7bc56dbb6dfbe7c4a00acce20e2227b3f355be89bc6827"}, +] + +[[package]] +name = "certifi" +version = "2024.7.4" +description = "Python package for providing Mozilla's CA Bundle." +optional = false +python-versions = ">=3.6" +files = [ + {file = "certifi-2024.7.4-py3-none-any.whl", hash = "sha256:c198e21b1289c2ab85ee4e67bb4b4ef3ead0892059901a8d5b622f24a1101e90"}, + {file = "certifi-2024.7.4.tar.gz", hash = "sha256:5a1e7645bc0ec61a09e26c36f6106dd4cf40c6db3a1fb6352b0244e7fb057c7b"}, +] + +[[package]] +name = "charset-normalizer" +version = "3.3.2" +description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet." +optional = false +python-versions = ">=3.7.0" +files = [ + {file = "charset-normalizer-3.3.2.tar.gz", hash = "sha256:f30c3cb33b24454a82faecaf01b19c18562b1e89558fb6c56de4d9118a032fd5"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:25baf083bf6f6b341f4121c2f3c548875ee6f5339300e08be3f2b2ba1721cdd3"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:06435b539f889b1f6f4ac1758871aae42dc3a8c0e24ac9e60c2384973ad73027"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9063e24fdb1e498ab71cb7419e24622516c4a04476b17a2dab57e8baa30d6e03"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6897af51655e3691ff853668779c7bad41579facacf5fd7253b0133308cf000d"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1d3193f4a680c64b4b6a9115943538edb896edc190f0b222e73761716519268e"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:cd70574b12bb8a4d2aaa0094515df2463cb429d8536cfb6c7ce983246983e5a6"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8465322196c8b4d7ab6d1e049e4c5cb460d0394da4a27d23cc242fbf0034b6b5"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a9a8e9031d613fd2009c182b69c7b2c1ef8239a0efb1df3f7c8da66d5dd3d537"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:beb58fe5cdb101e3a055192ac291b7a21e3b7ef4f67fa1d74e331a7f2124341c"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:e06ed3eb3218bc64786f7db41917d4e686cc4856944f53d5bdf83a6884432e12"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:2e81c7b9c8979ce92ed306c249d46894776a909505d8f5a4ba55b14206e3222f"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:572c3763a264ba47b3cf708a44ce965d98555f618ca42c926a9c1616d8f34269"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:fd1abc0d89e30cc4e02e4064dc67fcc51bd941eb395c502aac3ec19fab46b519"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-win32.whl", hash = "sha256:3d47fa203a7bd9c5b6cee4736ee84ca03b8ef23193c0d1ca99b5089f72645c73"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-win_amd64.whl", hash = "sha256:10955842570876604d404661fbccbc9c7e684caf432c09c715ec38fbae45ae09"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:802fe99cca7457642125a8a88a084cef28ff0cf9407060f7b93dca5aa25480db"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:573f6eac48f4769d667c4442081b1794f52919e7edada77495aaed9236d13a96"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:549a3a73da901d5bc3ce8d24e0600d1fa85524c10287f6004fbab87672bf3e1e"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f27273b60488abe721a075bcca6d7f3964f9f6f067c8c4c605743023d7d3944f"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1ceae2f17a9c33cb48e3263960dc5fc8005351ee19db217e9b1bb15d28c02574"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:65f6f63034100ead094b8744b3b97965785388f308a64cf8d7c34f2f2e5be0c4"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:753f10e867343b4511128c6ed8c82f7bec3bd026875576dfd88483c5c73b2fd8"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4a78b2b446bd7c934f5dcedc588903fb2f5eec172f3d29e52a9096a43722adfc"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:e537484df0d8f426ce2afb2d0f8e1c3d0b114b83f8850e5f2fbea0e797bd82ae"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:eb6904c354526e758fda7167b33005998fb68c46fbc10e013ca97f21ca5c8887"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:deb6be0ac38ece9ba87dea880e438f25ca3eddfac8b002a2ec3d9183a454e8ae"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:4ab2fe47fae9e0f9dee8c04187ce5d09f48eabe611be8259444906793ab7cbce"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:80402cd6ee291dcb72644d6eac93785fe2c8b9cb30893c1af5b8fdd753b9d40f"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-win32.whl", hash = "sha256:7cd13a2e3ddeed6913a65e66e94b51d80a041145a026c27e6bb76c31a853c6ab"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-win_amd64.whl", hash = "sha256:663946639d296df6a2bb2aa51b60a2454ca1cb29835324c640dafb5ff2131a77"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:0b2b64d2bb6d3fb9112bafa732def486049e63de9618b5843bcdd081d8144cd8"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:ddbb2551d7e0102e7252db79ba445cdab71b26640817ab1e3e3648dad515003b"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:55086ee1064215781fff39a1af09518bc9255b50d6333f2e4c74ca09fac6a8f6"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8f4a014bc36d3c57402e2977dada34f9c12300af536839dc38c0beab8878f38a"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a10af20b82360ab00827f916a6058451b723b4e65030c5a18577c8b2de5b3389"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8d756e44e94489e49571086ef83b2bb8ce311e730092d2c34ca8f7d925cb20aa"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:90d558489962fd4918143277a773316e56c72da56ec7aa3dc3dbbe20fdfed15b"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6ac7ffc7ad6d040517be39eb591cac5ff87416c2537df6ba3cba3bae290c0fed"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:7ed9e526742851e8d5cc9e6cf41427dfc6068d4f5a3bb03659444b4cabf6bc26"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:8bdb58ff7ba23002a4c5808d608e4e6c687175724f54a5dade5fa8c67b604e4d"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_ppc64le.whl", hash = "sha256:6b3251890fff30ee142c44144871185dbe13b11bab478a88887a639655be1068"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_s390x.whl", hash = "sha256:b4a23f61ce87adf89be746c8a8974fe1c823c891d8f86eb218bb957c924bb143"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:efcb3f6676480691518c177e3b465bcddf57cea040302f9f4e6e191af91174d4"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-win32.whl", hash = "sha256:d965bba47ddeec8cd560687584e88cf699fd28f192ceb452d1d7ee807c5597b7"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-win_amd64.whl", hash = "sha256:96b02a3dc4381e5494fad39be677abcb5e6634bf7b4fa83a6dd3112607547001"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:95f2a5796329323b8f0512e09dbb7a1860c46a39da62ecb2324f116fa8fdc85c"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c002b4ffc0be611f0d9da932eb0f704fe2602a9a949d1f738e4c34c75b0863d5"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a981a536974bbc7a512cf44ed14938cf01030a99e9b3a06dd59578882f06f985"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3287761bc4ee9e33561a7e058c72ac0938c4f57fe49a09eae428fd88aafe7bb6"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:42cb296636fcc8b0644486d15c12376cb9fa75443e00fb25de0b8602e64c1714"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0a55554a2fa0d408816b3b5cedf0045f4b8e1a6065aec45849de2d6f3f8e9786"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:c083af607d2515612056a31f0a8d9e0fcb5876b7bfc0abad3ecd275bc4ebc2d5"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:87d1351268731db79e0f8e745d92493ee2841c974128ef629dc518b937d9194c"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:bd8f7df7d12c2db9fab40bdd87a7c09b1530128315d047a086fa3ae3435cb3a8"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:c180f51afb394e165eafe4ac2936a14bee3eb10debc9d9e4db8958fe36afe711"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:8c622a5fe39a48f78944a87d4fb8a53ee07344641b0562c540d840748571b811"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-win32.whl", hash = "sha256:db364eca23f876da6f9e16c9da0df51aa4f104a972735574842618b8c6d999d4"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-win_amd64.whl", hash = "sha256:86216b5cee4b06df986d214f664305142d9c76df9b6512be2738aa72a2048f99"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:6463effa3186ea09411d50efc7d85360b38d5f09b870c48e4600f63af490e56a"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:6c4caeef8fa63d06bd437cd4bdcf3ffefe6738fb1b25951440d80dc7df8c03ac"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:37e55c8e51c236f95b033f6fb391d7d7970ba5fe7ff453dad675e88cf303377a"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fb69256e180cb6c8a894fee62b3afebae785babc1ee98b81cdf68bbca1987f33"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ae5f4161f18c61806f411a13b0310bea87f987c7d2ecdbdaad0e94eb2e404238"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b2b0a0c0517616b6869869f8c581d4eb2dd83a4d79e0ebcb7d373ef9956aeb0a"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:45485e01ff4d3630ec0d9617310448a8702f70e9c01906b0d0118bdf9d124cf2"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:eb00ed941194665c332bf8e078baf037d6c35d7c4f3102ea2d4f16ca94a26dc8"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:2127566c664442652f024c837091890cb1942c30937add288223dc895793f898"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:a50aebfa173e157099939b17f18600f72f84eed3049e743b68ad15bd69b6bf99"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:4d0d1650369165a14e14e1e47b372cfcb31d6ab44e6e33cb2d4e57265290044d"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:923c0c831b7cfcb071580d3f46c4baf50f174be571576556269530f4bbd79d04"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:06a81e93cd441c56a9b65d8e1d043daeb97a3d0856d177d5c90ba85acb3db087"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-win32.whl", hash = "sha256:6ef1d82a3af9d3eecdba2321dc1b3c238245d890843e040e41e470ffa64c3e25"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-win_amd64.whl", hash = "sha256:eb8821e09e916165e160797a6c17edda0679379a4be5c716c260e836e122f54b"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:c235ebd9baae02f1b77bcea61bce332cb4331dc3617d254df3323aa01ab47bd4"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:5b4c145409bef602a690e7cfad0a15a55c13320ff7a3ad7ca59c13bb8ba4d45d"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:68d1f8a9e9e37c1223b656399be5d6b448dea850bed7d0f87a8311f1ff3dabb0"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:22afcb9f253dac0696b5a4be4a1c0f8762f8239e21b99680099abd9b2b1b2269"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e27ad930a842b4c5eb8ac0016b0a54f5aebbe679340c26101df33424142c143c"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1f79682fbe303db92bc2b1136016a38a42e835d932bab5b3b1bfcfbf0640e519"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b261ccdec7821281dade748d088bb6e9b69e6d15b30652b74cbbac25e280b796"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:122c7fa62b130ed55f8f285bfd56d5f4b4a5b503609d181f9ad85e55c89f4185"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:d0eccceffcb53201b5bfebb52600a5fb483a20b61da9dbc885f8b103cbe7598c"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:9f96df6923e21816da7e0ad3fd47dd8f94b2a5ce594e00677c0013018b813458"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:7f04c839ed0b6b98b1a7501a002144b76c18fb1c1850c8b98d458ac269e26ed2"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:34d1c8da1e78d2e001f363791c98a272bb734000fcef47a491c1e3b0505657a8"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:ff8fa367d09b717b2a17a052544193ad76cd49979c805768879cb63d9ca50561"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-win32.whl", hash = "sha256:aed38f6e4fb3f5d6bf81bfa990a07806be9d83cf7bacef998ab1a9bd660a581f"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-win_amd64.whl", hash = "sha256:b01b88d45a6fcb69667cd6d2f7a9aeb4bf53760d7fc536bf679ec94fe9f3ff3d"}, + {file = "charset_normalizer-3.3.2-py3-none-any.whl", hash = "sha256:3e4d1f6587322d2788836a99c69062fbb091331ec940e02d12d179c1d53e25fc"}, +] + +[[package]] +name = "colorama" +version = "0.4.6" +description = "Cross-platform colored terminal text." +optional = false +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7" +files = [ + {file = "colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6"}, + {file = "colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44"}, +] + +[[package]] +name = "dill" +version = "0.3.8" +description = "serialize all of Python" +optional = false +python-versions = ">=3.8" +files = [ + {file = "dill-0.3.8-py3-none-any.whl", hash = "sha256:c36ca9ffb54365bdd2f8eb3eff7d2a21237f8452b57ace88b1ac615b7e815bd7"}, + {file = "dill-0.3.8.tar.gz", hash = "sha256:3ebe3c479ad625c4553aca177444d89b486b1d84982eeacded644afc0cf797ca"}, +] + +[package.extras] +graph = ["objgraph (>=1.7.2)"] +profile = ["gprof2dot (>=2022.7.29)"] + +[[package]] +name = "exceptiongroup" +version = "1.2.2" +description = "Backport of PEP 654 (exception groups)" +optional = false +python-versions = ">=3.7" +files = [ + {file = "exceptiongroup-1.2.2-py3-none-any.whl", hash = "sha256:3111b9d131c238bec2f8f516e123e14ba243563fb135d3fe885990585aa7795b"}, + {file = "exceptiongroup-1.2.2.tar.gz", hash = "sha256:47c2edf7c6738fafb49fd34290706d1a1a2f4d1c6df275526b62cbb4aa5393cc"}, +] + +[package.extras] +test = ["pytest (>=6)"] + +[[package]] +name = "google-auth" +version = "2.32.0" +description = "Google Authentication Library" +optional = false +python-versions = ">=3.7" +files = [ + {file = "google_auth-2.32.0-py2.py3-none-any.whl", hash = "sha256:53326ea2ebec768070a94bee4e1b9194c9646ea0c2bd72422785bd0f9abfad7b"}, + {file = "google_auth-2.32.0.tar.gz", hash = "sha256:49315be72c55a6a37d62819e3573f6b416aca00721f7e3e31a008d928bf64022"}, +] + +[package.dependencies] +cachetools = ">=2.0.0,<6.0" +pyasn1-modules = ">=0.2.1" +rsa = ">=3.1.4,<5" + +[package.extras] +aiohttp = ["aiohttp (>=3.6.2,<4.0.0.dev0)", "requests (>=2.20.0,<3.0.0.dev0)"] +enterprise-cert = ["cryptography (==36.0.2)", "pyopenssl (==22.0.0)"] +pyopenssl = ["cryptography (>=38.0.3)", "pyopenssl (>=20.0.0)"] +reauth = ["pyu2f (>=0.1.5)"] +requests = ["requests (>=2.20.0,<3.0.0.dev0)"] + +[[package]] +name = "h11" +version = "0.14.0" +description = "A pure-Python, bring-your-own-I/O implementation of HTTP/1.1" +optional = false +python-versions = ">=3.7" +files = [ + {file = "h11-0.14.0-py3-none-any.whl", hash = "sha256:e3fe4ac4b851c468cc8363d500db52c2ead036020723024a109d37346efaa761"}, + {file = "h11-0.14.0.tar.gz", hash = "sha256:8f19fbbe99e72420ff35c00b27a34cb9937e902a8b810e2c88300c6f0a3b699d"}, +] + +[[package]] +name = "httpcore" +version = "1.0.5" +description = "A minimal low-level HTTP client." +optional = false +python-versions = ">=3.8" +files = [ + {file = "httpcore-1.0.5-py3-none-any.whl", hash = "sha256:421f18bac248b25d310f3cacd198d55b8e6125c107797b609ff9b7a6ba7991b5"}, + {file = "httpcore-1.0.5.tar.gz", hash = "sha256:34a38e2f9291467ee3b44e89dd52615370e152954ba21721378a87b2960f7a61"}, +] + +[package.dependencies] +certifi = "*" +h11 = ">=0.13,<0.15" + +[package.extras] +asyncio = ["anyio (>=4.0,<5.0)"] +http2 = ["h2 (>=3,<5)"] +socks = ["socksio (==1.*)"] +trio = ["trio (>=0.22.0,<0.26.0)"] + +[[package]] +name = "httpx" +version = "0.27.0" +description = "The next generation HTTP client." +optional = false +python-versions = ">=3.8" +files = [ + {file = "httpx-0.27.0-py3-none-any.whl", hash = "sha256:71d5465162c13681bff01ad59b2cc68dd838ea1f10e51574bac27103f00c91a5"}, + {file = "httpx-0.27.0.tar.gz", hash = "sha256:a0cb88a46f32dc874e04ee956e4c2764aba2aa228f650b06788ba6bda2962ab5"}, +] + +[package.dependencies] +anyio = "*" +certifi = "*" +httpcore = "==1.*" +idna = "*" +sniffio = "*" + +[package.extras] +brotli = ["brotli", "brotlicffi"] +cli = ["click (==8.*)", "pygments (==2.*)", "rich (>=10,<14)"] +http2 = ["h2 (>=3,<5)"] +socks = ["socksio (==1.*)"] + +[[package]] +name = "idna" +version = "3.7" +description = "Internationalized Domain Names in Applications (IDNA)" +optional = false +python-versions = ">=3.5" +files = [ + {file = "idna-3.7-py3-none-any.whl", hash = "sha256:82fee1fc78add43492d3a1898bfa6d8a904cc97d8427f683ed8e798d07761aa0"}, + {file = "idna-3.7.tar.gz", hash = "sha256:028ff3aadf0609c1fd278d8ea3089299412a7a8b9bd005dd08b9f8285bcb5cfc"}, +] + +[[package]] +name = "iniconfig" +version = "2.0.0" +description = "brain-dead simple config-ini parsing" +optional = false +python-versions = ">=3.7" +files = [ + {file = "iniconfig-2.0.0-py3-none-any.whl", hash = "sha256:b6a85871a79d2e3b22d2d1b94ac2824226a63c6b741c88f7ae975f18b6778374"}, + {file = "iniconfig-2.0.0.tar.gz", hash = "sha256:2d91e135bf72d31a410b17c16da610a82cb55f6b0477d1a902134b24a455b8b3"}, +] + +[[package]] +name = "isort" +version = "5.13.2" +description = "A Python utility / library to sort Python imports." +optional = false +python-versions = ">=3.8.0" +files = [ + {file = "isort-5.13.2-py3-none-any.whl", hash = "sha256:8ca5e72a8d85860d5a3fa69b8745237f2939afe12dbf656afbcb47fe72d947a6"}, + {file = "isort-5.13.2.tar.gz", hash = "sha256:48fdfcb9face5d58a4f6dde2e72a1fb8dcaf8ab26f95ab49fab84c2ddefb0109"}, +] + +[package.extras] +colors = ["colorama (>=0.4.6)"] + +[[package]] +name = "jsonpath-python" +version = "1.0.6" +description = "A more powerful JSONPath implementation in modern python" +optional = false +python-versions = ">=3.6" +files = [ + {file = "jsonpath-python-1.0.6.tar.gz", hash = "sha256:dd5be4a72d8a2995c3f583cf82bf3cd1a9544cfdabf2d22595b67aff07349666"}, + {file = "jsonpath_python-1.0.6-py3-none-any.whl", hash = "sha256:1e3b78df579f5efc23565293612decee04214609208a2335884b3ee3f786b575"}, +] + +[[package]] +name = "mccabe" +version = "0.7.0" +description = "McCabe checker, plugin for flake8" +optional = false +python-versions = ">=3.6" +files = [ + {file = "mccabe-0.7.0-py2.py3-none-any.whl", hash = "sha256:6c2d30ab6be0e4a46919781807b4f0d834ebdd6c6e3dca0bda5a15f863427b6e"}, + {file = "mccabe-0.7.0.tar.gz", hash = "sha256:348e0240c33b60bbdf4e523192ef919f28cb2c3d7d5c7794f74009290f236325"}, +] + +[[package]] +name = "mypy" +version = "1.10.1" +description = "Optional static typing for Python" +optional = false +python-versions = ">=3.8" +files = [ + {file = "mypy-1.10.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:e36f229acfe250dc660790840916eb49726c928e8ce10fbdf90715090fe4ae02"}, + {file = "mypy-1.10.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:51a46974340baaa4145363b9e051812a2446cf583dfaeba124af966fa44593f7"}, + {file = "mypy-1.10.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:901c89c2d67bba57aaaca91ccdb659aa3a312de67f23b9dfb059727cce2e2e0a"}, + {file = "mypy-1.10.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:0cd62192a4a32b77ceb31272d9e74d23cd88c8060c34d1d3622db3267679a5d9"}, + {file = "mypy-1.10.1-cp310-cp310-win_amd64.whl", hash = "sha256:a2cbc68cb9e943ac0814c13e2452d2046c2f2b23ff0278e26599224cf164e78d"}, + {file = "mypy-1.10.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:bd6f629b67bb43dc0d9211ee98b96d8dabc97b1ad38b9b25f5e4c4d7569a0c6a"}, + {file = "mypy-1.10.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:a1bbb3a6f5ff319d2b9d40b4080d46cd639abe3516d5a62c070cf0114a457d84"}, + {file = "mypy-1.10.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b8edd4e9bbbc9d7b79502eb9592cab808585516ae1bcc1446eb9122656c6066f"}, + {file = "mypy-1.10.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:6166a88b15f1759f94a46fa474c7b1b05d134b1b61fca627dd7335454cc9aa6b"}, + {file = "mypy-1.10.1-cp311-cp311-win_amd64.whl", hash = "sha256:5bb9cd11c01c8606a9d0b83ffa91d0b236a0e91bc4126d9ba9ce62906ada868e"}, + {file = "mypy-1.10.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:d8681909f7b44d0b7b86e653ca152d6dff0eb5eb41694e163c6092124f8246d7"}, + {file = "mypy-1.10.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:378c03f53f10bbdd55ca94e46ec3ba255279706a6aacaecac52ad248f98205d3"}, + {file = "mypy-1.10.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6bacf8f3a3d7d849f40ca6caea5c055122efe70e81480c8328ad29c55c69e93e"}, + {file = "mypy-1.10.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:701b5f71413f1e9855566a34d6e9d12624e9e0a8818a5704d74d6b0402e66c04"}, + {file = "mypy-1.10.1-cp312-cp312-win_amd64.whl", hash = "sha256:3c4c2992f6ea46ff7fce0072642cfb62af7a2484efe69017ed8b095f7b39ef31"}, + {file = "mypy-1.10.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:604282c886497645ffb87b8f35a57ec773a4a2721161e709a4422c1636ddde5c"}, + {file = "mypy-1.10.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:37fd87cab83f09842653f08de066ee68f1182b9b5282e4634cdb4b407266bade"}, + {file = "mypy-1.10.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8addf6313777dbb92e9564c5d32ec122bf2c6c39d683ea64de6a1fd98b90fe37"}, + {file = "mypy-1.10.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:5cc3ca0a244eb9a5249c7c583ad9a7e881aa5d7b73c35652296ddcdb33b2b9c7"}, + {file = "mypy-1.10.1-cp38-cp38-win_amd64.whl", hash = "sha256:1b3a2ffce52cc4dbaeee4df762f20a2905aa171ef157b82192f2e2f368eec05d"}, + {file = "mypy-1.10.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:fe85ed6836165d52ae8b88f99527d3d1b2362e0cb90b005409b8bed90e9059b3"}, + {file = "mypy-1.10.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:c2ae450d60d7d020d67ab440c6e3fae375809988119817214440033f26ddf7bf"}, + {file = "mypy-1.10.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6be84c06e6abd72f960ba9a71561c14137a583093ffcf9bbfaf5e613d63fa531"}, + {file = "mypy-1.10.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:2189ff1e39db399f08205e22a797383613ce1cb0cb3b13d8bcf0170e45b96cc3"}, + {file = "mypy-1.10.1-cp39-cp39-win_amd64.whl", hash = "sha256:97a131ee36ac37ce9581f4220311247ab6cba896b4395b9c87af0675a13a755f"}, + {file = "mypy-1.10.1-py3-none-any.whl", hash = "sha256:71d8ac0b906354ebda8ef1673e5fde785936ac1f29ff6987c7483cfbd5a4235a"}, + {file = "mypy-1.10.1.tar.gz", hash = "sha256:1f8f492d7db9e3593ef42d4f115f04e556130f2819ad33ab84551403e97dd4c0"}, +] + +[package.dependencies] +mypy-extensions = ">=1.0.0" +tomli = {version = ">=1.1.0", markers = "python_version < \"3.11\""} +typing-extensions = ">=4.1.0" + +[package.extras] +dmypy = ["psutil (>=4.0)"] +install-types = ["pip"] +mypyc = ["setuptools (>=50)"] +reports = ["lxml"] + +[[package]] +name = "mypy-extensions" +version = "1.0.0" +description = "Type system extensions for programs checked with the mypy type checker." +optional = false +python-versions = ">=3.5" +files = [ + {file = "mypy_extensions-1.0.0-py3-none-any.whl", hash = "sha256:4392f6c0eb8a5668a69e23d168ffa70f0be9ccfd32b5cc2d26a34ae5b844552d"}, + {file = "mypy_extensions-1.0.0.tar.gz", hash = "sha256:75dbf8955dc00442a438fc4d0666508a9a97b6bd41aa2f0ffe9d2f2725af0782"}, +] + +[[package]] +name = "nodeenv" +version = "1.9.1" +description = "Node.js virtual environment builder" +optional = false +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7" +files = [ + {file = "nodeenv-1.9.1-py2.py3-none-any.whl", hash = "sha256:ba11c9782d29c27c70ffbdda2d7415098754709be8a7056d79a737cd901155c9"}, + {file = "nodeenv-1.9.1.tar.gz", hash = "sha256:6ec12890a2dab7946721edbfbcd91f3319c6ccc9aec47be7c7e6b7011ee6645f"}, +] + +[[package]] +name = "packaging" +version = "24.1" +description = "Core utilities for Python packages" +optional = false +python-versions = ">=3.8" +files = [ + {file = "packaging-24.1-py3-none-any.whl", hash = "sha256:5b8f2217dbdbd2f7f384c41c628544e6d52f2d0f53c6d0c3ea61aa5d1d7ff124"}, + {file = "packaging-24.1.tar.gz", hash = "sha256:026ed72c8ed3fcce5bf8950572258698927fd1dbda10a5e981cdf0ac37f4f002"}, +] + +[[package]] +name = "platformdirs" +version = "4.2.2" +description = "A small Python package for determining appropriate platform-specific dirs, e.g. a `user data dir`." +optional = false +python-versions = ">=3.8" +files = [ + {file = "platformdirs-4.2.2-py3-none-any.whl", hash = "sha256:2d7a1657e36a80ea911db832a8a6ece5ee53d8de21edd5cc5879af6530b1bfee"}, + {file = "platformdirs-4.2.2.tar.gz", hash = "sha256:38b7b51f512eed9e84a22788b4bce1de17c0adb134d6becb09836e37d8654cd3"}, +] + +[package.extras] +docs = ["furo (>=2023.9.10)", "proselint (>=0.13)", "sphinx (>=7.2.6)", "sphinx-autodoc-typehints (>=1.25.2)"] +test = ["appdirs (==1.4.4)", "covdefaults (>=2.3)", "pytest (>=7.4.3)", "pytest-cov (>=4.1)", "pytest-mock (>=3.12)"] +type = ["mypy (>=1.8)"] + +[[package]] +name = "pluggy" +version = "1.5.0" +description = "plugin and hook calling mechanisms for python" +optional = false +python-versions = ">=3.8" +files = [ + {file = "pluggy-1.5.0-py3-none-any.whl", hash = "sha256:44e1ad92c8ca002de6377e165f3e0f1be63266ab4d554740532335b9d75ea669"}, + {file = "pluggy-1.5.0.tar.gz", hash = "sha256:2cffa88e94fdc978c4c574f15f9e59b7f4201d439195c3715ca9e2486f1d0cf1"}, +] + +[package.extras] +dev = ["pre-commit", "tox"] +testing = ["pytest", "pytest-benchmark"] + +[[package]] +name = "pyasn1" +version = "0.6.0" +description = "Pure-Python implementation of ASN.1 types and DER/BER/CER codecs (X.208)" +optional = false +python-versions = ">=3.8" +files = [ + {file = "pyasn1-0.6.0-py2.py3-none-any.whl", hash = "sha256:cca4bb0f2df5504f02f6f8a775b6e416ff9b0b3b16f7ee80b5a3153d9b804473"}, + {file = "pyasn1-0.6.0.tar.gz", hash = "sha256:3a35ab2c4b5ef98e17dfdec8ab074046fbda76e281c5a706ccd82328cfc8f64c"}, +] + +[[package]] +name = "pyasn1-modules" +version = "0.4.0" +description = "A collection of ASN.1-based protocols modules" +optional = false +python-versions = ">=3.8" +files = [ + {file = "pyasn1_modules-0.4.0-py3-none-any.whl", hash = "sha256:be04f15b66c206eed667e0bb5ab27e2b1855ea54a842e5037738099e8ca4ae0b"}, + {file = "pyasn1_modules-0.4.0.tar.gz", hash = "sha256:831dbcea1b177b28c9baddf4c6d1013c24c3accd14a1873fffaa6a2e905f17b6"}, +] + +[package.dependencies] +pyasn1 = ">=0.4.6,<0.7.0" + +[[package]] +name = "pydantic" +version = "2.8.2" +description = "Data validation using Python type hints" +optional = false +python-versions = ">=3.8" +files = [ + {file = "pydantic-2.8.2-py3-none-any.whl", hash = "sha256:73ee9fddd406dc318b885c7a2eab8a6472b68b8fb5ba8150949fc3db939f23c8"}, + {file = "pydantic-2.8.2.tar.gz", hash = "sha256:6f62c13d067b0755ad1c21a34bdd06c0c12625a22b0fc09c6b149816604f7c2a"}, +] + +[package.dependencies] +annotated-types = ">=0.4.0" +pydantic-core = "2.20.1" +typing-extensions = [ + {version = ">=4.6.1", markers = "python_version < \"3.13\""}, + {version = ">=4.12.2", markers = "python_version >= \"3.13\""}, +] + +[package.extras] +email = ["email-validator (>=2.0.0)"] + +[[package]] +name = "pydantic-core" +version = "2.20.1" +description = "Core functionality for Pydantic validation and serialization" +optional = false +python-versions = ">=3.8" +files = [ + {file = "pydantic_core-2.20.1-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:3acae97ffd19bf091c72df4d726d552c473f3576409b2a7ca36b2f535ffff4a3"}, + {file = "pydantic_core-2.20.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:41f4c96227a67a013e7de5ff8f20fb496ce573893b7f4f2707d065907bffdbd6"}, + {file = "pydantic_core-2.20.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5f239eb799a2081495ea659d8d4a43a8f42cd1fe9ff2e7e436295c38a10c286a"}, + {file = "pydantic_core-2.20.1-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:53e431da3fc53360db73eedf6f7124d1076e1b4ee4276b36fb25514544ceb4a3"}, + {file = "pydantic_core-2.20.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f1f62b2413c3a0e846c3b838b2ecd6c7a19ec6793b2a522745b0869e37ab5bc1"}, + {file = "pydantic_core-2.20.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5d41e6daee2813ecceea8eda38062d69e280b39df793f5a942fa515b8ed67953"}, + {file = "pydantic_core-2.20.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3d482efec8b7dc6bfaedc0f166b2ce349df0011f5d2f1f25537ced4cfc34fd98"}, + {file = "pydantic_core-2.20.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:e93e1a4b4b33daed65d781a57a522ff153dcf748dee70b40c7258c5861e1768a"}, + {file = "pydantic_core-2.20.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:e7c4ea22b6739b162c9ecaaa41d718dfad48a244909fe7ef4b54c0b530effc5a"}, + {file = "pydantic_core-2.20.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:4f2790949cf385d985a31984907fecb3896999329103df4e4983a4a41e13e840"}, + {file = "pydantic_core-2.20.1-cp310-none-win32.whl", hash = "sha256:5e999ba8dd90e93d57410c5e67ebb67ffcaadcea0ad973240fdfd3a135506250"}, + {file = "pydantic_core-2.20.1-cp310-none-win_amd64.whl", hash = "sha256:512ecfbefef6dac7bc5eaaf46177b2de58cdf7acac8793fe033b24ece0b9566c"}, + {file = "pydantic_core-2.20.1-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:d2a8fa9d6d6f891f3deec72f5cc668e6f66b188ab14bb1ab52422fe8e644f312"}, + {file = "pydantic_core-2.20.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:175873691124f3d0da55aeea1d90660a6ea7a3cfea137c38afa0a5ffabe37b88"}, + {file = "pydantic_core-2.20.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:37eee5b638f0e0dcd18d21f59b679686bbd18917b87db0193ae36f9c23c355fc"}, + {file = "pydantic_core-2.20.1-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:25e9185e2d06c16ee438ed39bf62935ec436474a6ac4f9358524220f1b236e43"}, + {file = "pydantic_core-2.20.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:150906b40ff188a3260cbee25380e7494ee85048584998c1e66df0c7a11c17a6"}, + {file = "pydantic_core-2.20.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8ad4aeb3e9a97286573c03df758fc7627aecdd02f1da04516a86dc159bf70121"}, + {file = "pydantic_core-2.20.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d3f3ed29cd9f978c604708511a1f9c2fdcb6c38b9aae36a51905b8811ee5cbf1"}, + {file = "pydantic_core-2.20.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:b0dae11d8f5ded51699c74d9548dcc5938e0804cc8298ec0aa0da95c21fff57b"}, + {file = "pydantic_core-2.20.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:faa6b09ee09433b87992fb5a2859efd1c264ddc37280d2dd5db502126d0e7f27"}, + {file = "pydantic_core-2.20.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:9dc1b507c12eb0481d071f3c1808f0529ad41dc415d0ca11f7ebfc666e66a18b"}, + {file = "pydantic_core-2.20.1-cp311-none-win32.whl", hash = "sha256:fa2fddcb7107e0d1808086ca306dcade7df60a13a6c347a7acf1ec139aa6789a"}, + {file = "pydantic_core-2.20.1-cp311-none-win_amd64.whl", hash = "sha256:40a783fb7ee353c50bd3853e626f15677ea527ae556429453685ae32280c19c2"}, + {file = "pydantic_core-2.20.1-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:595ba5be69b35777474fa07f80fc260ea71255656191adb22a8c53aba4479231"}, + {file = "pydantic_core-2.20.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:a4f55095ad087474999ee28d3398bae183a66be4823f753cd7d67dd0153427c9"}, + {file = "pydantic_core-2.20.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f9aa05d09ecf4c75157197f27cdc9cfaeb7c5f15021c6373932bf3e124af029f"}, + {file = "pydantic_core-2.20.1-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:e97fdf088d4b31ff4ba35db26d9cc472ac7ef4a2ff2badeabf8d727b3377fc52"}, + {file = "pydantic_core-2.20.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:bc633a9fe1eb87e250b5c57d389cf28998e4292336926b0b6cdaee353f89a237"}, + {file = "pydantic_core-2.20.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d573faf8eb7e6b1cbbcb4f5b247c60ca8be39fe2c674495df0eb4318303137fe"}, + {file = "pydantic_core-2.20.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:26dc97754b57d2fd00ac2b24dfa341abffc380b823211994c4efac7f13b9e90e"}, + {file = "pydantic_core-2.20.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:33499e85e739a4b60c9dac710c20a08dc73cb3240c9a0e22325e671b27b70d24"}, + {file = "pydantic_core-2.20.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:bebb4d6715c814597f85297c332297c6ce81e29436125ca59d1159b07f423eb1"}, + {file = "pydantic_core-2.20.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:516d9227919612425c8ef1c9b869bbbee249bc91912c8aaffb66116c0b447ebd"}, + {file = "pydantic_core-2.20.1-cp312-none-win32.whl", hash = "sha256:469f29f9093c9d834432034d33f5fe45699e664f12a13bf38c04967ce233d688"}, + {file = "pydantic_core-2.20.1-cp312-none-win_amd64.whl", hash = "sha256:035ede2e16da7281041f0e626459bcae33ed998cca6a0a007a5ebb73414ac72d"}, + {file = "pydantic_core-2.20.1-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:0827505a5c87e8aa285dc31e9ec7f4a17c81a813d45f70b1d9164e03a813a686"}, + {file = "pydantic_core-2.20.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:19c0fa39fa154e7e0b7f82f88ef85faa2a4c23cc65aae2f5aea625e3c13c735a"}, + {file = "pydantic_core-2.20.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4aa223cd1e36b642092c326d694d8bf59b71ddddc94cdb752bbbb1c5c91d833b"}, + {file = "pydantic_core-2.20.1-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:c336a6d235522a62fef872c6295a42ecb0c4e1d0f1a3e500fe949415761b8a19"}, + {file = "pydantic_core-2.20.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7eb6a0587eded33aeefea9f916899d42b1799b7b14b8f8ff2753c0ac1741edac"}, + {file = "pydantic_core-2.20.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:70c8daf4faca8da5a6d655f9af86faf6ec2e1768f4b8b9d0226c02f3d6209703"}, + {file = "pydantic_core-2.20.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e9fa4c9bf273ca41f940bceb86922a7667cd5bf90e95dbb157cbb8441008482c"}, + {file = "pydantic_core-2.20.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:11b71d67b4725e7e2a9f6e9c0ac1239bbc0c48cce3dc59f98635efc57d6dac83"}, + {file = "pydantic_core-2.20.1-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:270755f15174fb983890c49881e93f8f1b80f0b5e3a3cc1394a255706cabd203"}, + {file = "pydantic_core-2.20.1-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:c81131869240e3e568916ef4c307f8b99583efaa60a8112ef27a366eefba8ef0"}, + {file = "pydantic_core-2.20.1-cp313-none-win32.whl", hash = "sha256:b91ced227c41aa29c672814f50dbb05ec93536abf8f43cd14ec9521ea09afe4e"}, + {file = "pydantic_core-2.20.1-cp313-none-win_amd64.whl", hash = "sha256:65db0f2eefcaad1a3950f498aabb4875c8890438bc80b19362cf633b87a8ab20"}, + {file = "pydantic_core-2.20.1-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:4745f4ac52cc6686390c40eaa01d48b18997cb130833154801a442323cc78f91"}, + {file = "pydantic_core-2.20.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:a8ad4c766d3f33ba8fd692f9aa297c9058970530a32c728a2c4bfd2616d3358b"}, + {file = "pydantic_core-2.20.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:41e81317dd6a0127cabce83c0c9c3fbecceae981c8391e6f1dec88a77c8a569a"}, + {file = "pydantic_core-2.20.1-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:04024d270cf63f586ad41fff13fde4311c4fc13ea74676962c876d9577bcc78f"}, + {file = "pydantic_core-2.20.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:eaad4ff2de1c3823fddf82f41121bdf453d922e9a238642b1dedb33c4e4f98ad"}, + {file = "pydantic_core-2.20.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:26ab812fa0c845df815e506be30337e2df27e88399b985d0bb4e3ecfe72df31c"}, + {file = "pydantic_core-2.20.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3c5ebac750d9d5f2706654c638c041635c385596caf68f81342011ddfa1e5598"}, + {file = "pydantic_core-2.20.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2aafc5a503855ea5885559eae883978c9b6d8c8993d67766ee73d82e841300dd"}, + {file = "pydantic_core-2.20.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:4868f6bd7c9d98904b748a2653031fc9c2f85b6237009d475b1008bfaeb0a5aa"}, + {file = "pydantic_core-2.20.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:aa2f457b4af386254372dfa78a2eda2563680d982422641a85f271c859df1987"}, + {file = "pydantic_core-2.20.1-cp38-none-win32.whl", hash = "sha256:225b67a1f6d602de0ce7f6c1c3ae89a4aa25d3de9be857999e9124f15dab486a"}, + {file = "pydantic_core-2.20.1-cp38-none-win_amd64.whl", hash = "sha256:6b507132dcfc0dea440cce23ee2182c0ce7aba7054576efc65634f080dbe9434"}, + {file = "pydantic_core-2.20.1-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:b03f7941783b4c4a26051846dea594628b38f6940a2fdc0df00b221aed39314c"}, + {file = "pydantic_core-2.20.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:1eedfeb6089ed3fad42e81a67755846ad4dcc14d73698c120a82e4ccf0f1f9f6"}, + {file = "pydantic_core-2.20.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:635fee4e041ab9c479e31edda27fcf966ea9614fff1317e280d99eb3e5ab6fe2"}, + {file = "pydantic_core-2.20.1-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:77bf3ac639c1ff567ae3b47f8d4cc3dc20f9966a2a6dd2311dcc055d3d04fb8a"}, + {file = "pydantic_core-2.20.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7ed1b0132f24beeec5a78b67d9388656d03e6a7c837394f99257e2d55b461611"}, + {file = "pydantic_core-2.20.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c6514f963b023aeee506678a1cf821fe31159b925c4b76fe2afa94cc70b3222b"}, + {file = "pydantic_core-2.20.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:10d4204d8ca33146e761c79f83cc861df20e7ae9f6487ca290a97702daf56006"}, + {file = "pydantic_core-2.20.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2d036c7187b9422ae5b262badb87a20a49eb6c5238b2004e96d4da1231badef1"}, + {file = "pydantic_core-2.20.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:9ebfef07dbe1d93efb94b4700f2d278494e9162565a54f124c404a5656d7ff09"}, + {file = "pydantic_core-2.20.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:6b9d9bb600328a1ce523ab4f454859e9d439150abb0906c5a1983c146580ebab"}, + {file = "pydantic_core-2.20.1-cp39-none-win32.whl", hash = "sha256:784c1214cb6dd1e3b15dd8b91b9a53852aed16671cc3fbe4786f4f1db07089e2"}, + {file = "pydantic_core-2.20.1-cp39-none-win_amd64.whl", hash = "sha256:d2fe69c5434391727efa54b47a1e7986bb0186e72a41b203df8f5b0a19a4f669"}, + {file = "pydantic_core-2.20.1-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:a45f84b09ac9c3d35dfcf6a27fd0634d30d183205230a0ebe8373a0e8cfa0906"}, + {file = "pydantic_core-2.20.1-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:d02a72df14dfdbaf228424573a07af10637bd490f0901cee872c4f434a735b94"}, + {file = "pydantic_core-2.20.1-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d2b27e6af28f07e2f195552b37d7d66b150adbaa39a6d327766ffd695799780f"}, + {file = "pydantic_core-2.20.1-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:084659fac3c83fd674596612aeff6041a18402f1e1bc19ca39e417d554468482"}, + {file = "pydantic_core-2.20.1-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:242b8feb3c493ab78be289c034a1f659e8826e2233786e36f2893a950a719bb6"}, + {file = "pydantic_core-2.20.1-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:38cf1c40a921d05c5edc61a785c0ddb4bed67827069f535d794ce6bcded919fc"}, + {file = "pydantic_core-2.20.1-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:e0bbdd76ce9aa5d4209d65f2b27fc6e5ef1312ae6c5333c26db3f5ade53a1e99"}, + {file = "pydantic_core-2.20.1-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:254ec27fdb5b1ee60684f91683be95e5133c994cc54e86a0b0963afa25c8f8a6"}, + {file = "pydantic_core-2.20.1-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:407653af5617f0757261ae249d3fba09504d7a71ab36ac057c938572d1bc9331"}, + {file = "pydantic_core-2.20.1-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:c693e916709c2465b02ca0ad7b387c4f8423d1db7b4649c551f27a529181c5ad"}, + {file = "pydantic_core-2.20.1-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5b5ff4911aea936a47d9376fd3ab17e970cc543d1b68921886e7f64bd28308d1"}, + {file = "pydantic_core-2.20.1-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:177f55a886d74f1808763976ac4efd29b7ed15c69f4d838bbd74d9d09cf6fa86"}, + {file = "pydantic_core-2.20.1-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:964faa8a861d2664f0c7ab0c181af0bea66098b1919439815ca8803ef136fc4e"}, + {file = "pydantic_core-2.20.1-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:4dd484681c15e6b9a977c785a345d3e378d72678fd5f1f3c0509608da24f2ac0"}, + {file = "pydantic_core-2.20.1-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:f6d6cff3538391e8486a431569b77921adfcdef14eb18fbf19b7c0a5294d4e6a"}, + {file = "pydantic_core-2.20.1-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:a6d511cc297ff0883bc3708b465ff82d7560193169a8b93260f74ecb0a5e08a7"}, + {file = "pydantic_core-2.20.1.tar.gz", hash = "sha256:26ca695eeee5f9f1aeeb211ffc12f10bcb6f71e2989988fda61dabd65db878d4"}, +] + +[package.dependencies] +typing-extensions = ">=4.6.0,<4.7.0 || >4.7.0" + +[[package]] +name = "pylint" +version = "3.2.3" +description = "python code static checker" +optional = false +python-versions = ">=3.8.0" +files = [ + {file = "pylint-3.2.3-py3-none-any.whl", hash = "sha256:b3d7d2708a3e04b4679e02d99e72329a8b7ee8afb8d04110682278781f889fa8"}, + {file = "pylint-3.2.3.tar.gz", hash = "sha256:02f6c562b215582386068d52a30f520d84fdbcf2a95fc7e855b816060d048b60"}, +] + +[package.dependencies] +astroid = ">=3.2.2,<=3.3.0-dev0" +colorama = {version = ">=0.4.5", markers = "sys_platform == \"win32\""} +dill = [ + {version = ">=0.2", markers = "python_version < \"3.11\""}, + {version = ">=0.3.7", markers = "python_version >= \"3.12\""}, + {version = ">=0.3.6", markers = "python_version >= \"3.11\" and python_version < \"3.12\""}, +] +isort = ">=4.2.5,<5.13.0 || >5.13.0,<6" +mccabe = ">=0.6,<0.8" +platformdirs = ">=2.2.0" +tomli = {version = ">=1.1.0", markers = "python_version < \"3.11\""} +tomlkit = ">=0.10.1" +typing-extensions = {version = ">=3.10.0", markers = "python_version < \"3.10\""} + +[package.extras] +spelling = ["pyenchant (>=3.2,<4.0)"] +testutils = ["gitpython (>3)"] + +[[package]] +name = "pyright" +version = "1.1.374" +description = "Command line wrapper for pyright" +optional = false +python-versions = ">=3.7" +files = [ + {file = "pyright-1.1.374-py3-none-any.whl", hash = "sha256:55752bcf7a3646d293cd76710a983b71e16f6128aab2d42468e6eb7e46c0a70d"}, + {file = "pyright-1.1.374.tar.gz", hash = "sha256:d01b2daf864ba5e0362e56b844984865970d7204158e61eb685e2dab7804cb82"}, +] + +[package.dependencies] +nodeenv = ">=1.6.0" + +[package.extras] +all = ["twine (>=3.4.1)"] +dev = ["twine (>=3.4.1)"] + +[[package]] +name = "pytest" +version = "8.3.2" +description = "pytest: simple powerful testing with Python" +optional = false +python-versions = ">=3.8" +files = [ + {file = "pytest-8.3.2-py3-none-any.whl", hash = "sha256:4ba08f9ae7dcf84ded419494d229b48d0903ea6407b030eaec46df5e6a73bba5"}, + {file = "pytest-8.3.2.tar.gz", hash = "sha256:c132345d12ce551242c87269de812483f5bcc87cdbb4722e48487ba194f9fdce"}, +] + +[package.dependencies] +colorama = {version = "*", markers = "sys_platform == \"win32\""} +exceptiongroup = {version = ">=1.0.0rc8", markers = "python_version < \"3.11\""} +iniconfig = "*" +packaging = "*" +pluggy = ">=1.5,<2" +tomli = {version = ">=1", markers = "python_version < \"3.11\""} + +[package.extras] +dev = ["argcomplete", "attrs (>=19.2)", "hypothesis (>=3.56)", "mock", "pygments (>=2.7.2)", "requests", "setuptools", "xmlschema"] + +[[package]] +name = "pytest-asyncio" +version = "0.23.8" +description = "Pytest support for asyncio" +optional = false +python-versions = ">=3.8" +files = [ + {file = "pytest_asyncio-0.23.8-py3-none-any.whl", hash = "sha256:50265d892689a5faefb84df80819d1ecef566eb3549cf915dfb33569359d1ce2"}, + {file = "pytest_asyncio-0.23.8.tar.gz", hash = "sha256:759b10b33a6dc61cce40a8bd5205e302978bbbcc00e279a8b61d9a6a3c82e4d3"}, +] + +[package.dependencies] +pytest = ">=7.0.0,<9" + +[package.extras] +docs = ["sphinx (>=5.3)", "sphinx-rtd-theme (>=1.0)"] +testing = ["coverage (>=6.2)", "hypothesis (>=5.7.1)"] + +[[package]] +name = "python-dateutil" +version = "2.9.0.post0" +description = "Extensions to the standard Python datetime module" +optional = false +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7" +files = [ + {file = "python-dateutil-2.9.0.post0.tar.gz", hash = "sha256:37dd54208da7e1cd875388217d5e00ebd4179249f90fb72437e91a35459a0ad3"}, + {file = "python_dateutil-2.9.0.post0-py2.py3-none-any.whl", hash = "sha256:a8b2bc7bffae282281c8140a97d3aa9c14da0b136dfe83f850eea9a5f7470427"}, +] + +[package.dependencies] +six = ">=1.5" + +[[package]] +name = "requests" +version = "2.32.3" +description = "Python HTTP for Humans." +optional = false +python-versions = ">=3.8" +files = [ + {file = "requests-2.32.3-py3-none-any.whl", hash = "sha256:70761cfe03c773ceb22aa2f671b4757976145175cdfca038c02654d061d6dcc6"}, + {file = "requests-2.32.3.tar.gz", hash = "sha256:55365417734eb18255590a9ff9eb97e9e1da868d4ccd6402399eaf68af20a760"}, +] + +[package.dependencies] +certifi = ">=2017.4.17" +charset-normalizer = ">=2,<4" +idna = ">=2.5,<4" +urllib3 = ">=1.21.1,<3" + +[package.extras] +socks = ["PySocks (>=1.5.6,!=1.5.7)"] +use-chardet-on-py3 = ["chardet (>=3.0.2,<6)"] + +[[package]] +name = "rsa" +version = "4.9" +description = "Pure-Python RSA implementation" +optional = false +python-versions = ">=3.6,<4" +files = [ + {file = "rsa-4.9-py3-none-any.whl", hash = "sha256:90260d9058e514786967344d0ef75fa8727eed8a7d2e43ce9f4bcf1b536174f7"}, + {file = "rsa-4.9.tar.gz", hash = "sha256:e38464a49c6c85d7f1351b0126661487a7e0a14a50f1675ec50eb34d4f20ef21"}, +] + +[package.dependencies] +pyasn1 = ">=0.1.3" + +[[package]] +name = "six" +version = "1.16.0" +description = "Python 2 and 3 compatibility utilities" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*" +files = [ + {file = "six-1.16.0-py2.py3-none-any.whl", hash = "sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254"}, + {file = "six-1.16.0.tar.gz", hash = "sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926"}, +] + +[[package]] +name = "sniffio" +version = "1.3.1" +description = "Sniff out which async library your code is running under" +optional = false +python-versions = ">=3.7" +files = [ + {file = "sniffio-1.3.1-py3-none-any.whl", hash = "sha256:2f6da418d1f1e0fddd844478f41680e794e6051915791a034ff65e5f100525a2"}, + {file = "sniffio-1.3.1.tar.gz", hash = "sha256:f4324edc670a0f49750a81b895f35c3adb843cca46f0530f79fc1babb23789dc"}, +] + +[[package]] +name = "tomli" +version = "2.0.1" +description = "A lil' TOML parser" +optional = false +python-versions = ">=3.7" +files = [ + {file = "tomli-2.0.1-py3-none-any.whl", hash = "sha256:939de3e7a6161af0c887ef91b7d41a53e7c5a1ca976325f429cb46ea9bc30ecc"}, + {file = "tomli-2.0.1.tar.gz", hash = "sha256:de526c12914f0c550d15924c62d72abc48d6fe7364aa87328337a31007fe8a4f"}, +] + +[[package]] +name = "tomlkit" +version = "0.13.0" +description = "Style preserving TOML library" +optional = false +python-versions = ">=3.8" +files = [ + {file = "tomlkit-0.13.0-py3-none-any.whl", hash = "sha256:7075d3042d03b80f603482d69bf0c8f345c2b30e41699fd8883227f89972b264"}, + {file = "tomlkit-0.13.0.tar.gz", hash = "sha256:08ad192699734149f5b97b45f1f18dad7eb1b6d16bc72ad0c2335772650d7b72"}, +] + +[[package]] +name = "types-python-dateutil" +version = "2.9.0.20240316" +description = "Typing stubs for python-dateutil" +optional = false +python-versions = ">=3.8" +files = [ + {file = "types-python-dateutil-2.9.0.20240316.tar.gz", hash = "sha256:5d2f2e240b86905e40944dd787db6da9263f0deabef1076ddaed797351ec0202"}, + {file = "types_python_dateutil-2.9.0.20240316-py3-none-any.whl", hash = "sha256:6b8cb66d960771ce5ff974e9dd45e38facb81718cc1e208b10b1baccbfdbee3b"}, +] + +[[package]] +name = "typing-extensions" +version = "4.12.2" +description = "Backported and Experimental Type Hints for Python 3.8+" +optional = false +python-versions = ">=3.8" +files = [ + {file = "typing_extensions-4.12.2-py3-none-any.whl", hash = "sha256:04e5ca0351e0f3f85c6853954072df659d0d13fac324d0072316b67d7794700d"}, + {file = "typing_extensions-4.12.2.tar.gz", hash = "sha256:1a7ead55c7e559dd4dee8856e3a88b41225abfe1ce8df57b7c13915fe121ffb8"}, +] + +[[package]] +name = "typing-inspect" +version = "0.9.0" +description = "Runtime inspection utilities for typing module." +optional = false +python-versions = "*" +files = [ + {file = "typing_inspect-0.9.0-py3-none-any.whl", hash = "sha256:9ee6fc59062311ef8547596ab6b955e1b8aa46242d854bfc78f4f6b0eff35f9f"}, + {file = "typing_inspect-0.9.0.tar.gz", hash = "sha256:b23fc42ff6f6ef6954e4852c1fb512cdd18dbea03134f91f856a95ccc9461f78"}, +] + +[package.dependencies] +mypy-extensions = ">=0.3.0" +typing-extensions = ">=3.7.4" + +[[package]] +name = "urllib3" +version = "2.2.2" +description = "HTTP library with thread-safe connection pooling, file post, and more." +optional = false +python-versions = ">=3.8" +files = [ + {file = "urllib3-2.2.2-py3-none-any.whl", hash = "sha256:a448b2f64d686155468037e1ace9f2d2199776e17f0a46610480d311f73e3472"}, + {file = "urllib3-2.2.2.tar.gz", hash = "sha256:dd505485549a7a552833da5e6063639d0d177c04f23bc3864e41e5dc5f612168"}, +] + +[package.extras] +brotli = ["brotli (>=1.0.9)", "brotlicffi (>=0.8.0)"] +h2 = ["h2 (>=4,<5)"] +socks = ["pysocks (>=1.5.6,!=1.5.7,<2.0)"] +zstd = ["zstandard (>=0.18.0)"] + +[metadata] +lock-version = "2.0" +python-versions = "^3.8" +content-hash = "a68027cf5e3c64af190addf2b94014fb7eeb47d41cdd5c7f0ae2fb87305f83d0" diff --git a/packages/mistralai_gcp/poetry.toml b/packages/mistralai_gcp/poetry.toml new file mode 100644 index 00000000..ab1033bd --- /dev/null +++ b/packages/mistralai_gcp/poetry.toml @@ -0,0 +1,2 @@ +[virtualenvs] +in-project = true diff --git a/packages/mistralai_gcp/py.typed b/packages/mistralai_gcp/py.typed new file mode 100644 index 00000000..3e38f1a9 --- /dev/null +++ b/packages/mistralai_gcp/py.typed @@ -0,0 +1 @@ +# Marker file for PEP 561. The package enables type hints. diff --git a/packages/mistralai_gcp/pylintrc b/packages/mistralai_gcp/pylintrc new file mode 100644 index 00000000..50800386 --- /dev/null +++ b/packages/mistralai_gcp/pylintrc @@ -0,0 +1,658 @@ +[MAIN] + +# Analyse import fallback blocks. This can be used to support both Python 2 and +# 3 compatible code, which means that the block might have code that exists +# only in one or another interpreter, leading to false positives when analysed. +analyse-fallback-blocks=no + +# Clear in-memory caches upon conclusion of linting. Useful if running pylint +# in a server-like mode. +clear-cache-post-run=no + +# Load and enable all available extensions. Use --list-extensions to see a list +# all available extensions. +#enable-all-extensions= + +# In error mode, messages with a category besides ERROR or FATAL are +# suppressed, and no reports are done by default. Error mode is compatible with +# disabling specific errors. +#errors-only= + +# Always return a 0 (non-error) status code, even if lint errors are found. +# This is primarily useful in continuous integration scripts. +#exit-zero= + +# A comma-separated list of package or module names from where C extensions may +# be loaded. Extensions are loading into the active Python interpreter and may +# run arbitrary code. +extension-pkg-allow-list= + +# A comma-separated list of package or module names from where C extensions may +# be loaded. Extensions are loading into the active Python interpreter and may +# run arbitrary code. (This is an alternative name to extension-pkg-allow-list +# for backward compatibility.) +extension-pkg-whitelist= + +# Return non-zero exit code if any of these messages/categories are detected, +# even if score is above --fail-under value. Syntax same as enable. Messages +# specified are enabled, while categories only check already-enabled messages. +fail-on= + +# Specify a score threshold under which the program will exit with error. +fail-under=10 + +# Interpret the stdin as a python script, whose filename needs to be passed as +# the module_or_package argument. +#from-stdin= + +# Files or directories to be skipped. They should be base names, not paths. +ignore=CVS + +# Add files or directories matching the regular expressions patterns to the +# ignore-list. The regex matches against paths and can be in Posix or Windows +# format. Because '\\' represents the directory delimiter on Windows systems, +# it can't be used as an escape character. +ignore-paths= + +# Files or directories matching the regular expression patterns are skipped. +# The regex matches against base names, not paths. The default value ignores +# Emacs file locks +ignore-patterns=^\.# + +# List of module names for which member attributes should not be checked and +# will not be imported (useful for modules/projects where namespaces are +# manipulated during runtime and thus existing member attributes cannot be +# deduced by static analysis). It supports qualified module names, as well as +# Unix pattern matching. +ignored-modules= + +# Python code to execute, usually for sys.path manipulation such as +# pygtk.require(). +#init-hook= + +# Use multiple processes to speed up Pylint. Specifying 0 will auto-detect the +# number of processors available to use, and will cap the count on Windows to +# avoid hangs. +jobs=1 + +# Control the amount of potential inferred values when inferring a single +# object. This can help the performance when dealing with large functions or +# complex, nested conditions. +limit-inference-results=100 + +# List of plugins (as comma separated values of python module names) to load, +# usually to register additional checkers. +load-plugins= + +# Pickle collected data for later comparisons. +persistent=yes + +# Minimum Python version to use for version dependent checks. Will default to +# the version used to run pylint. +py-version=3.8 + +# Discover python modules and packages in the file system subtree. +recursive=no + +# Add paths to the list of the source roots. Supports globbing patterns. The +# source root is an absolute path or a path relative to the current working +# directory used to determine a package namespace for modules located under the +# source root. +source-roots=src + +# When enabled, pylint would attempt to guess common misconfiguration and emit +# user-friendly hints instead of false-positive error messages. +suggestion-mode=yes + +# Allow loading of arbitrary C extensions. Extensions are imported into the +# active Python interpreter and may run arbitrary code. +unsafe-load-any-extension=no + +# In verbose mode, extra non-checker-related info will be displayed. +#verbose= + + +[BASIC] + +# Naming style matching correct argument names. +argument-naming-style=snake_case + +# Regular expression matching correct argument names. Overrides argument- +# naming-style. If left empty, argument names will be checked with the set +# naming style. +#argument-rgx= + +# Naming style matching correct attribute names. +#attr-naming-style=snake_case + +# Regular expression matching correct attribute names. Overrides attr-naming- +# style. If left empty, attribute names will be checked with the set naming +# style. +attr-rgx=[^\W\d][^\W]*|__.*__$ + +# Bad variable names which should always be refused, separated by a comma. +bad-names= + +# Bad variable names regexes, separated by a comma. If names match any regex, +# they will always be refused +bad-names-rgxs= + +# Naming style matching correct class attribute names. +class-attribute-naming-style=any + +# Regular expression matching correct class attribute names. Overrides class- +# attribute-naming-style. If left empty, class attribute names will be checked +# with the set naming style. +#class-attribute-rgx= + +# Naming style matching correct class constant names. +class-const-naming-style=UPPER_CASE + +# Regular expression matching correct class constant names. Overrides class- +# const-naming-style. If left empty, class constant names will be checked with +# the set naming style. +#class-const-rgx= + +# Naming style matching correct class names. +class-naming-style=PascalCase + +# Regular expression matching correct class names. Overrides class-naming- +# style. If left empty, class names will be checked with the set naming style. +#class-rgx= + +# Naming style matching correct constant names. +const-naming-style=UPPER_CASE + +# Regular expression matching correct constant names. Overrides const-naming- +# style. If left empty, constant names will be checked with the set naming +# style. +#const-rgx= + +# Minimum line length for functions/classes that require docstrings, shorter +# ones are exempt. +docstring-min-length=-1 + +# Naming style matching correct function names. +function-naming-style=snake_case + +# Regular expression matching correct function names. Overrides function- +# naming-style. If left empty, function names will be checked with the set +# naming style. +#function-rgx= + +# Good variable names which should always be accepted, separated by a comma. +good-names=i, + j, + k, + ex, + Run, + _, + e, + id + +# Good variable names regexes, separated by a comma. If names match any regex, +# they will always be accepted +good-names-rgxs= + +# Include a hint for the correct naming format with invalid-name. +include-naming-hint=no + +# Naming style matching correct inline iteration names. +inlinevar-naming-style=any + +# Regular expression matching correct inline iteration names. Overrides +# inlinevar-naming-style. If left empty, inline iteration names will be checked +# with the set naming style. +#inlinevar-rgx= + +# Naming style matching correct method names. +method-naming-style=snake_case + +# Regular expression matching correct method names. Overrides method-naming- +# style. If left empty, method names will be checked with the set naming style. +#method-rgx= + +# Naming style matching correct module names. +module-naming-style=snake_case + +# Regular expression matching correct module names. Overrides module-naming- +# style. If left empty, module names will be checked with the set naming style. +#module-rgx= + +# Colon-delimited sets of names that determine each other's naming style when +# the name regexes allow several styles. +name-group= + +# Regular expression which should only match function or class names that do +# not require a docstring. +no-docstring-rgx=^_ + +# List of decorators that produce properties, such as abc.abstractproperty. Add +# to this list to register other decorators that produce valid properties. +# These decorators are taken in consideration only for invalid-name. +property-classes=abc.abstractproperty + +# Regular expression matching correct type alias names. If left empty, type +# alias names will be checked with the set naming style. +typealias-rgx=.* + +# Regular expression matching correct type variable names. If left empty, type +# variable names will be checked with the set naming style. +#typevar-rgx= + +# Naming style matching correct variable names. +variable-naming-style=snake_case + +# Regular expression matching correct variable names. Overrides variable- +# naming-style. If left empty, variable names will be checked with the set +# naming style. +#variable-rgx= + + +[CLASSES] + +# Warn about protected attribute access inside special methods +check-protected-access-in-special-methods=no + +# List of method names used to declare (i.e. assign) instance attributes. +defining-attr-methods=__init__, + __new__, + setUp, + asyncSetUp, + __post_init__ + +# List of member names, which should be excluded from the protected access +# warning. +exclude-protected=_asdict,_fields,_replace,_source,_make,os._exit + +# List of valid names for the first argument in a class method. +valid-classmethod-first-arg=cls + +# List of valid names for the first argument in a metaclass class method. +valid-metaclass-classmethod-first-arg=mcs + + +[DESIGN] + +# List of regular expressions of class ancestor names to ignore when counting +# public methods (see R0903) +exclude-too-few-public-methods= + +# List of qualified class names to ignore when counting class parents (see +# R0901) +ignored-parents= + +# Maximum number of arguments for function / method. +max-args=5 + +# Maximum number of attributes for a class (see R0902). +max-attributes=7 + +# Maximum number of boolean expressions in an if statement (see R0916). +max-bool-expr=5 + +# Maximum number of branch for function / method body. +max-branches=12 + +# Maximum number of locals for function / method body. +max-locals=15 + +# Maximum number of parents for a class (see R0901). +max-parents=7 + +# Maximum number of public methods for a class (see R0904). +max-public-methods=25 + +# Maximum number of return / yield for function / method body. +max-returns=6 + +# Maximum number of statements in function / method body. +max-statements=50 + +# Minimum number of public methods for a class (see R0903). +min-public-methods=2 + + +[EXCEPTIONS] + +# Exceptions that will emit a warning when caught. +overgeneral-exceptions=builtins.BaseException,builtins.Exception + + +[FORMAT] + +# Expected format of line ending, e.g. empty (any line ending), LF or CRLF. +expected-line-ending-format= + +# Regexp for a line that is allowed to be longer than the limit. +ignore-long-lines=^\s*(# )??$ + +# Number of spaces of indent required inside a hanging or continued line. +indent-after-paren=4 + +# String used as indentation unit. This is usually " " (4 spaces) or "\t" (1 +# tab). +indent-string=' ' + +# Maximum number of characters on a single line. +max-line-length=100 + +# Maximum number of lines in a module. +max-module-lines=1000 + +# Allow the body of a class to be on the same line as the declaration if body +# contains single statement. +single-line-class-stmt=no + +# Allow the body of an if to be on the same line as the test if there is no +# else. +single-line-if-stmt=no + + +[IMPORTS] + +# List of modules that can be imported at any level, not just the top level +# one. +allow-any-import-level= + +# Allow explicit reexports by alias from a package __init__. +allow-reexport-from-package=no + +# Allow wildcard imports from modules that define __all__. +allow-wildcard-with-all=no + +# Deprecated modules which should not be used, separated by a comma. +deprecated-modules= + +# Output a graph (.gv or any supported image format) of external dependencies +# to the given file (report RP0402 must not be disabled). +ext-import-graph= + +# Output a graph (.gv or any supported image format) of all (i.e. internal and +# external) dependencies to the given file (report RP0402 must not be +# disabled). +import-graph= + +# Output a graph (.gv or any supported image format) of internal dependencies +# to the given file (report RP0402 must not be disabled). +int-import-graph= + +# Force import order to recognize a module as part of the standard +# compatibility libraries. +known-standard-library= + +# Force import order to recognize a module as part of a third party library. +known-third-party=enchant + +# Couples of modules and preferred modules, separated by a comma. +preferred-modules= + + +[LOGGING] + +# The type of string formatting that logging methods do. `old` means using % +# formatting, `new` is for `{}` formatting. +logging-format-style=old + +# Logging modules to check that the string format arguments are in logging +# function parameter format. +logging-modules=logging + + +[MESSAGES CONTROL] + +# Only show warnings with the listed confidence levels. Leave empty to show +# all. Valid levels: HIGH, CONTROL_FLOW, INFERENCE, INFERENCE_FAILURE, +# UNDEFINED. +confidence=HIGH, + CONTROL_FLOW, + INFERENCE, + INFERENCE_FAILURE, + UNDEFINED + +# Disable the message, report, category or checker with the given id(s). You +# can either give multiple identifiers separated by comma (,) or put this +# option multiple times (only on the command line, not in the configuration +# file where it should appear only once). You can also use "--disable=all" to +# disable everything first and then re-enable specific checks. For example, if +# you want to run only the similarities checker, you can use "--disable=all +# --enable=similarities". If you want to run only the classes checker, but have +# no Warning level messages displayed, use "--disable=all --enable=classes +# --disable=W". +disable=raw-checker-failed, + bad-inline-option, + locally-disabled, + file-ignored, + suppressed-message, + useless-suppression, + deprecated-pragma, + use-implicit-booleaness-not-comparison-to-string, + use-implicit-booleaness-not-comparison-to-zero, + use-symbolic-message-instead, + trailing-whitespace, + line-too-long, + missing-class-docstring, + missing-module-docstring, + missing-function-docstring, + too-many-instance-attributes, + wrong-import-order, + too-many-arguments, + broad-exception-raised, + too-few-public-methods, + too-many-branches, + duplicate-code, + trailing-newlines, + too-many-public-methods, + too-many-locals, + too-many-lines, + using-constant-test, + too-many-statements, + cyclic-import, + too-many-nested-blocks, + too-many-boolean-expressions, + no-else-raise, + bare-except, + broad-exception-caught, + fixme, + relative-beyond-top-level + +# Enable the message, report, category or checker with the given id(s). You can +# either give multiple identifier separated by comma (,) or put this option +# multiple time (only on the command line, not in the configuration file where +# it should appear only once). See also the "--disable" option for examples. +enable= + + +[METHOD_ARGS] + +# List of qualified names (i.e., library.method) which require a timeout +# parameter e.g. 'requests.api.get,requests.api.post' +timeout-methods=requests.api.delete,requests.api.get,requests.api.head,requests.api.options,requests.api.patch,requests.api.post,requests.api.put,requests.api.request + + +[MISCELLANEOUS] + +# List of note tags to take in consideration, separated by a comma. +notes=FIXME, + XXX, + TODO + +# Regular expression of note tags to take in consideration. +notes-rgx= + + +[REFACTORING] + +# Maximum number of nested blocks for function / method body +max-nested-blocks=5 + +# Complete name of functions that never returns. When checking for +# inconsistent-return-statements if a never returning function is called then +# it will be considered as an explicit return statement and no message will be +# printed. +never-returning-functions=sys.exit,argparse.parse_error + + +[REPORTS] + +# Python expression which should return a score less than or equal to 10. You +# have access to the variables 'fatal', 'error', 'warning', 'refactor', +# 'convention', and 'info' which contain the number of messages in each +# category, as well as 'statement' which is the total number of statements +# analyzed. This score is used by the global evaluation report (RP0004). +evaluation=max(0, 0 if fatal else 10.0 - ((float(5 * error + warning + refactor + convention) / statement) * 10)) + +# Template used to display messages. This is a python new-style format string +# used to format the message information. See doc for all details. +msg-template= + +# Set the output format. Available formats are: text, parseable, colorized, +# json2 (improved json format), json (old json format) and msvs (visual +# studio). You can also give a reporter class, e.g. +# mypackage.mymodule.MyReporterClass. +#output-format= + +# Tells whether to display a full report or only the messages. +reports=no + +# Activate the evaluation score. +score=yes + + +[SIMILARITIES] + +# Comments are removed from the similarity computation +ignore-comments=yes + +# Docstrings are removed from the similarity computation +ignore-docstrings=yes + +# Imports are removed from the similarity computation +ignore-imports=yes + +# Signatures are removed from the similarity computation +ignore-signatures=yes + +# Minimum lines number of a similarity. +min-similarity-lines=4 + + +[SPELLING] + +# Limits count of emitted suggestions for spelling mistakes. +max-spelling-suggestions=4 + +# Spelling dictionary name. No available dictionaries : You need to install +# both the python package and the system dependency for enchant to work. +spelling-dict= + +# List of comma separated words that should be considered directives if they +# appear at the beginning of a comment and should not be checked. +spelling-ignore-comment-directives=fmt: on,fmt: off,noqa:,noqa,nosec,isort:skip,mypy: + +# List of comma separated words that should not be checked. +spelling-ignore-words= + +# A path to a file that contains the private dictionary; one word per line. +spelling-private-dict-file= + +# Tells whether to store unknown words to the private dictionary (see the +# --spelling-private-dict-file option) instead of raising a message. +spelling-store-unknown-words=no + + +[STRING] + +# This flag controls whether inconsistent-quotes generates a warning when the +# character used as a quote delimiter is used inconsistently within a module. +check-quote-consistency=no + +# This flag controls whether the implicit-str-concat should generate a warning +# on implicit string concatenation in sequences defined over several lines. +check-str-concat-over-line-jumps=no + + +[TYPECHECK] + +# List of decorators that produce context managers, such as +# contextlib.contextmanager. Add to this list to register other decorators that +# produce valid context managers. +contextmanager-decorators=contextlib.contextmanager + +# List of members which are set dynamically and missed by pylint inference +# system, and so shouldn't trigger E1101 when accessed. Python regular +# expressions are accepted. +generated-members= + +# Tells whether to warn about missing members when the owner of the attribute +# is inferred to be None. +ignore-none=yes + +# This flag controls whether pylint should warn about no-member and similar +# checks whenever an opaque object is returned when inferring. The inference +# can return multiple potential results while evaluating a Python object, but +# some branches might not be evaluated, which results in partial inference. In +# that case, it might be useful to still emit no-member and other checks for +# the rest of the inferred objects. +ignore-on-opaque-inference=yes + +# List of symbolic message names to ignore for Mixin members. +ignored-checks-for-mixins=no-member, + not-async-context-manager, + not-context-manager, + attribute-defined-outside-init + +# List of class names for which member attributes should not be checked (useful +# for classes with dynamically set attributes). This supports the use of +# qualified names. +ignored-classes=optparse.Values,thread._local,_thread._local,argparse.Namespace + +# Show a hint with possible names when a member name was not found. The aspect +# of finding the hint is based on edit distance. +missing-member-hint=yes + +# The minimum edit distance a name should have in order to be considered a +# similar match for a missing member name. +missing-member-hint-distance=1 + +# The total number of similar names that should be taken in consideration when +# showing a hint for a missing member. +missing-member-max-choices=1 + +# Regex pattern to define which classes are considered mixins. +mixin-class-rgx=.*[Mm]ixin + +# List of decorators that change the signature of a decorated function. +signature-mutators= + + +[VARIABLES] + +# List of additional names supposed to be defined in builtins. Remember that +# you should avoid defining new builtins when possible. +additional-builtins= + +# Tells whether unused global variables should be treated as a violation. +allow-global-unused-variables=yes + +# List of names allowed to shadow builtins +allowed-redefined-builtins=id,object + +# List of strings which can identify a callback function by name. A callback +# name must start or end with one of those strings. +callbacks=cb_, + _cb + +# A regular expression matching the name of dummy variables (i.e. expected to +# not be used). +dummy-variables-rgx=_+$|(_[a-zA-Z0-9_]*[a-zA-Z0-9]+?$)|dummy|^ignored_|^unused_ + +# Argument names that match this expression will be ignored. +ignored-argument-names=_.*|^ignored_|^unused_ + +# Tells whether we should check for unused import in __init__ files. +init-import=no + +# List of qualified module names which can have objects that can redefine +# builtins. +redefining-builtins-modules=six.moves,past.builtins,future.builtins,builtins,io diff --git a/packages/mistralai_gcp/pyproject.toml b/packages/mistralai_gcp/pyproject.toml new file mode 100644 index 00000000..48841e43 --- /dev/null +++ b/packages/mistralai_gcp/pyproject.toml @@ -0,0 +1,58 @@ +[tool.poetry] +name = "mistralai-gcp" +version = "1.0.0-rc.2" +description = "Python Client SDK for the Mistral AI API in GCP." +authors = ["Mistral",] +readme = "README.md" +packages = [ + { include = "mistralai_gcp", from = "src" } +] +include = ["py.typed", "src/mistralai_gcp/py.typed"] + +[tool.setuptools.package-data] +"*" = ["py.typed", "src/mistralai_gcp/py.typed"] + +[virtualenvs] +in-project = true + +[tool.poetry.dependencies] +python = "^3.8" +google-auth = "^2.31.0" +httpx = "^0.27.0" +jsonpath-python = "^1.0.6" +pydantic = "~2.8.2" +python-dateutil = "^2.9.0.post0" +requests = "^2.32.3" +typing-inspect = "^0.9.0" + +[tool.poetry.group.dev.dependencies] +mypy = "==1.10.1" +pylint = "==3.2.3" +pyright = "==1.1.374" +pytest = "^8.2.2" +pytest-asyncio = "^0.23.7" +types-python-dateutil = "^2.9.0.20240316" + +[build-system] +requires = ["poetry-core"] +build-backend = "poetry.core.masonry.api" + +[tool.pytest.ini_options] +pythonpath = ["src"] + +[tool.mypy] +disable_error_code = "misc" + +[[tool.mypy.overrides]] +module = "typing_inspect" +ignore_missing_imports = true + +[[tool.mypy.overrides]] +module = "jsonpath" +ignore_missing_imports = true + +[tool.pyright] +venvPath = "." +venv = ".venv" + + diff --git a/packages/mistralai_gcp/scripts/compile.sh b/packages/mistralai_gcp/scripts/compile.sh new file mode 100755 index 00000000..aa49772e --- /dev/null +++ b/packages/mistralai_gcp/scripts/compile.sh @@ -0,0 +1,83 @@ +#!/usr/bin/env bash + +set -o pipefail # Ensure pipeline failures are propagated + +# Use temporary files to store outputs and exit statuses +declare -A output_files +declare -A status_files + +# Function to run a command with temporary output and status files +run_command() { + local cmd="$1" + local key="$2" + local output_file="$3" + local status_file="$4" + + # Run the command and store output and exit status + { + eval "$cmd" + echo $? > "$status_file" + } &> "$output_file" & +} + +# Create temporary files for outputs and statuses +for cmd in compileall pylint mypy pyright; do + output_files[$cmd]=$(mktemp) + status_files[$cmd]=$(mktemp) +done + +# Collect PIDs for background processes +declare -a pids + +# Run commands in parallel using temporary files +echo "Running python -m compileall" +run_command 'poetry run python -m compileall -q . && echo "Success"' 'compileall' "${output_files[compileall]}" "${status_files[compileall]}" +pids+=($!) + +echo "Running pylint" +run_command 'poetry run pylint src' 'pylint' "${output_files[pylint]}" "${status_files[pylint]}" +pids+=($!) + +echo "Running mypy" +run_command 'poetry run mypy src' 'mypy' "${output_files[mypy]}" "${status_files[mypy]}" +pids+=($!) + +echo "Running pyright (optional)" +run_command 'if command -v pyright > /dev/null 2>&1; then pyright src; else echo "pyright not found, skipping"; fi' 'pyright' "${output_files[pyright]}" "${status_files[pyright]}" +pids+=($!) + +# Wait for all processes to complete +echo "Waiting for processes to complete" +for pid in "${pids[@]}"; do + wait "$pid" +done + +# Print output sequentially and check for failures +failed=false +for key in "${!output_files[@]}"; do + echo "--- Output from Command: $key ---" + echo + cat "${output_files[$key]}" + echo # Empty line for separation + echo "--- End of Output from Command: $key ---" + echo + + exit_status=$(cat "${status_files[$key]}") + if [ "$exit_status" -ne 0 ]; then + echo "Command $key failed with exit status $exit_status" >&2 + failed=true + fi +done + +# Clean up temporary files +for tmp_file in "${output_files[@]}" "${status_files[@]}"; do + rm -f "$tmp_file" +done + +if $failed; then + echo "One or more commands failed." >&2 + exit 1 +else + echo "All commands completed successfully." + exit 0 +fi diff --git a/packages/mistralai_gcp/scripts/publish.sh b/packages/mistralai_gcp/scripts/publish.sh new file mode 100755 index 00000000..1ee7194c --- /dev/null +++ b/packages/mistralai_gcp/scripts/publish.sh @@ -0,0 +1,5 @@ +#!/usr/bin/env bash + +export POETRY_PYPI_TOKEN_PYPI=${PYPI_TOKEN} + +poetry publish --build --skip-existing diff --git a/packages/mistralai_gcp/src/mistralai_gcp/__init__.py b/packages/mistralai_gcp/src/mistralai_gcp/__init__.py new file mode 100644 index 00000000..68138c47 --- /dev/null +++ b/packages/mistralai_gcp/src/mistralai_gcp/__init__.py @@ -0,0 +1,5 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from .sdk import * +from .sdkconfiguration import * +from .models import * diff --git a/packages/mistralai_gcp/src/mistralai_gcp/_hooks/__init__.py b/packages/mistralai_gcp/src/mistralai_gcp/_hooks/__init__.py new file mode 100644 index 00000000..2ee66cdd --- /dev/null +++ b/packages/mistralai_gcp/src/mistralai_gcp/_hooks/__init__.py @@ -0,0 +1,5 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from .sdkhooks import * +from .types import * +from .registration import * diff --git a/packages/mistralai_gcp/src/mistralai_gcp/_hooks/custom_user_agent.py b/packages/mistralai_gcp/src/mistralai_gcp/_hooks/custom_user_agent.py new file mode 100644 index 00000000..b03549c3 --- /dev/null +++ b/packages/mistralai_gcp/src/mistralai_gcp/_hooks/custom_user_agent.py @@ -0,0 +1,16 @@ +# THIS FILE IS THE EXACT COPY OF THE ORIGINAL FILE FROM src/mistralai/_hooks/custom_user_agent.py +from typing import Union + +import httpx + +from .types import BeforeRequestContext, BeforeRequestHook + + +class CustomUserAgentHook(BeforeRequestHook): + def before_request( + self, hook_ctx: BeforeRequestContext, request: httpx.Request + ) -> Union[httpx.Request, Exception]: + request.headers["user-agent"] = ( + "mistral-client-python/" + request.headers["user-agent"].split(" ")[1] + ) + return request diff --git a/packages/mistralai_gcp/src/mistralai_gcp/_hooks/registration.py b/packages/mistralai_gcp/src/mistralai_gcp/_hooks/registration.py new file mode 100644 index 00000000..304edfa2 --- /dev/null +++ b/packages/mistralai_gcp/src/mistralai_gcp/_hooks/registration.py @@ -0,0 +1,15 @@ +from .custom_user_agent import CustomUserAgentHook +from .types import Hooks + +# This file is only ever generated once on the first generation and then is free to be modified. +# Any hooks you wish to add should be registered in the init_hooks function. Feel free to define them +# in this file or in separate files in the hooks folder. + + +def init_hooks(hooks: Hooks): + # pylint: disable=unused-argument + """Add hooks by calling hooks.register{sdk_init/before_request/after_success/after_error}Hook + with an instance of a hook that implements that specific Hook interface + Hooks are registered per SDK instance, and are valid for the lifetime of the SDK instance + """ + hooks.register_before_request_hook(CustomUserAgentHook()) diff --git a/packages/mistralai_gcp/src/mistralai_gcp/_hooks/sdkhooks.py b/packages/mistralai_gcp/src/mistralai_gcp/_hooks/sdkhooks.py new file mode 100644 index 00000000..ca3b7b36 --- /dev/null +++ b/packages/mistralai_gcp/src/mistralai_gcp/_hooks/sdkhooks.py @@ -0,0 +1,57 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +import httpx +from .types import SDKInitHook, BeforeRequestContext, BeforeRequestHook, AfterSuccessContext, AfterSuccessHook, AfterErrorContext, AfterErrorHook, Hooks +from .registration import init_hooks +from typing import List, Optional, Tuple +from mistralai_gcp.httpclient import HttpClient + +class SDKHooks(Hooks): + def __init__(self) -> None: + self.sdk_init_hooks: List[SDKInitHook] = [] + self.before_request_hooks: List[BeforeRequestHook] = [] + self.after_success_hooks: List[AfterSuccessHook] = [] + self.after_error_hooks: List[AfterErrorHook] = [] + init_hooks(self) + + def register_sdk_init_hook(self, hook: SDKInitHook) -> None: + self.sdk_init_hooks.append(hook) + + def register_before_request_hook(self, hook: BeforeRequestHook) -> None: + self.before_request_hooks.append(hook) + + def register_after_success_hook(self, hook: AfterSuccessHook) -> None: + self.after_success_hooks.append(hook) + + def register_after_error_hook(self, hook: AfterErrorHook) -> None: + self.after_error_hooks.append(hook) + + def sdk_init(self, base_url: str, client: HttpClient) -> Tuple[str, HttpClient]: + for hook in self.sdk_init_hooks: + base_url, client = hook.sdk_init(base_url, client) + return base_url, client + + def before_request(self, hook_ctx: BeforeRequestContext, request: httpx.Request) -> httpx.Request: + for hook in self.before_request_hooks: + out = hook.before_request(hook_ctx, request) + if isinstance(out, Exception): + raise out + request = out + + return request + + def after_success(self, hook_ctx: AfterSuccessContext, response: httpx.Response) -> httpx.Response: + for hook in self.after_success_hooks: + out = hook.after_success(hook_ctx, response) + if isinstance(out, Exception): + raise out + response = out + return response + + def after_error(self, hook_ctx: AfterErrorContext, response: Optional[httpx.Response], error: Optional[Exception]) -> Tuple[Optional[httpx.Response], Optional[Exception]]: + for hook in self.after_error_hooks: + result = hook.after_error(hook_ctx, response, error) + if isinstance(result, Exception): + raise result + response, error = result + return response, error diff --git a/packages/mistralai_gcp/src/mistralai_gcp/_hooks/types.py b/packages/mistralai_gcp/src/mistralai_gcp/_hooks/types.py new file mode 100644 index 00000000..f4ee7f37 --- /dev/null +++ b/packages/mistralai_gcp/src/mistralai_gcp/_hooks/types.py @@ -0,0 +1,76 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + + +from abc import ABC, abstractmethod +import httpx +from mistralai_gcp.httpclient import HttpClient +from typing import Any, Callable, List, Optional, Tuple, Union + + +class HookContext: + operation_id: str + oauth2_scopes: Optional[List[str]] = None + security_source: Optional[Union[Any, Callable[[], Any]]] = None + + def __init__(self, operation_id: str, oauth2_scopes: Optional[List[str]], security_source: Optional[Union[Any, Callable[[], Any]]]): + self.operation_id = operation_id + self.oauth2_scopes = oauth2_scopes + self.security_source = security_source + + +class BeforeRequestContext(HookContext): + def __init__(self, hook_ctx: HookContext): + super().__init__(hook_ctx.operation_id, hook_ctx.oauth2_scopes, hook_ctx.security_source) + + +class AfterSuccessContext(HookContext): + def __init__(self, hook_ctx: HookContext): + super().__init__(hook_ctx.operation_id, hook_ctx.oauth2_scopes, hook_ctx.security_source) + + + +class AfterErrorContext(HookContext): + def __init__(self, hook_ctx: HookContext): + super().__init__(hook_ctx.operation_id, hook_ctx.oauth2_scopes, hook_ctx.security_source) + + +class SDKInitHook(ABC): + @abstractmethod + def sdk_init(self, base_url: str, client: HttpClient) -> Tuple[str, HttpClient]: + pass + + +class BeforeRequestHook(ABC): + @abstractmethod + def before_request(self, hook_ctx: BeforeRequestContext, request: httpx.Request) -> Union[httpx.Request, Exception]: + pass + + +class AfterSuccessHook(ABC): + @abstractmethod + def after_success(self, hook_ctx: AfterSuccessContext, response: httpx.Response) -> Union[httpx.Response, Exception]: + pass + + +class AfterErrorHook(ABC): + @abstractmethod + def after_error(self, hook_ctx: AfterErrorContext, response: Optional[httpx.Response], error: Optional[Exception]) -> Union[Tuple[Optional[httpx.Response], Optional[Exception]], Exception]: + pass + + +class Hooks(ABC): + @abstractmethod + def register_sdk_init_hook(self, hook: SDKInitHook): + pass + + @abstractmethod + def register_before_request_hook(self, hook: BeforeRequestHook): + pass + + @abstractmethod + def register_after_success_hook(self, hook: AfterSuccessHook): + pass + + @abstractmethod + def register_after_error_hook(self, hook: AfterErrorHook): + pass diff --git a/packages/mistralai_gcp/src/mistralai_gcp/basesdk.py b/packages/mistralai_gcp/src/mistralai_gcp/basesdk.py new file mode 100644 index 00000000..fd4854f7 --- /dev/null +++ b/packages/mistralai_gcp/src/mistralai_gcp/basesdk.py @@ -0,0 +1,253 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from .sdkconfiguration import SDKConfiguration +import httpx +from mistralai_gcp import models, utils +from mistralai_gcp._hooks import AfterErrorContext, AfterSuccessContext, BeforeRequestContext +from mistralai_gcp.utils import RetryConfig, SerializedRequestBody, get_body_content +from typing import Callable, List, Optional, Tuple + +class BaseSDK: + sdk_configuration: SDKConfiguration + + def __init__(self, sdk_config: SDKConfiguration) -> None: + self.sdk_configuration = sdk_config + + def get_url(self, base_url, url_variables): + sdk_url, sdk_variables = self.sdk_configuration.get_server_details() + + if base_url is None: + base_url = sdk_url + + if url_variables is None: + url_variables = sdk_variables + + return utils.template_url(base_url, url_variables) + + def build_request( + self, + method, + path, + base_url, + url_variables, + request, + request_body_required, + request_has_path_params, + request_has_query_params, + user_agent_header, + accept_header_value, + _globals=None, + security=None, + timeout_ms: Optional[int] = None, + get_serialized_body: Optional[ + Callable[[], Optional[SerializedRequestBody]] + ] = None, + url_override: Optional[str] = None, + ) -> httpx.Request: + client = self.sdk_configuration.client + + query_params = {} + + url = url_override + if url is None: + url = utils.generate_url( + self.get_url(base_url, url_variables), + path, + request if request_has_path_params else None, + _globals if request_has_path_params else None, + ) + + query_params = utils.get_query_params( + request if request_has_query_params else None, + _globals if request_has_query_params else None, + ) + + headers = utils.get_headers(request, _globals) + headers["Accept"] = accept_header_value + headers[user_agent_header] = self.sdk_configuration.user_agent + + if security is not None: + if callable(security): + security = security() + + if security is not None: + security_headers, security_query_params = utils.get_security(security) + headers = {**headers, **security_headers} + query_params = {**query_params, **security_query_params} + + serialized_request_body = SerializedRequestBody("application/octet-stream") + if get_serialized_body is not None: + rb = get_serialized_body() + if request_body_required and rb is None: + raise ValueError("request body is required") + + if rb is not None: + serialized_request_body = rb + + if ( + serialized_request_body.media_type is not None + and serialized_request_body.media_type + not in ( + "multipart/form-data", + "multipart/mixed", + ) + ): + headers["content-type"] = serialized_request_body.media_type + + timeout = timeout_ms / 1000 if timeout_ms is not None else None + + return client.build_request( + method, + url, + params=query_params, + content=serialized_request_body.content, + data=serialized_request_body.data, + files=serialized_request_body.files, + headers=headers, + timeout=timeout, + ) + + def do_request( + self, + hook_ctx, + request, + error_status_codes, + stream=False, + retry_config: Optional[Tuple[RetryConfig, List[str]]] = None, + ) -> httpx.Response: + client = self.sdk_configuration.client + logger = self.sdk_configuration.debug_logger + + def do(): + http_res = None + try: + req = self.sdk_configuration.get_hooks().before_request( + BeforeRequestContext(hook_ctx), request + ) + logger.debug( + "Request:\nMethod: %s\nURL: %s\nHeaders: %s\nBody: %s", + req.method, + req.url, + req.headers, + get_body_content(req) + ) + http_res = client.send(req, stream=stream) + except Exception as e: + _, e = self.sdk_configuration.get_hooks().after_error( + AfterErrorContext(hook_ctx), None, e + ) + if e is not None: + logger.debug("Request Exception", exc_info=True) + raise e + + if http_res is None: + logger.debug("Raising no response SDK error") + raise models.SDKError("No response received") + + logger.debug( + "Response:\nStatus Code: %s\nURL: %s\nHeaders: %s\nBody: %s", + http_res.status_code, + http_res.url, + http_res.headers, + "" if stream else http_res.text + ) + + if utils.match_status_codes(error_status_codes, http_res.status_code): + result, err = self.sdk_configuration.get_hooks().after_error( + AfterErrorContext(hook_ctx), http_res, None + ) + if err is not None: + logger.debug("Request Exception", exc_info=True) + raise err + if result is not None: + http_res = result + else: + logger.debug("Raising unexpected SDK error") + raise models.SDKError("Unexpected error occurred") + + return http_res + + if retry_config is not None: + http_res = utils.retry(do, utils.Retries(retry_config[0], retry_config[1])) + else: + http_res = do() + + if not utils.match_status_codes(error_status_codes, http_res.status_code): + http_res = self.sdk_configuration.get_hooks().after_success( + AfterSuccessContext(hook_ctx), http_res + ) + + return http_res + + async def do_request_async( + self, + hook_ctx, + request, + error_status_codes, + stream=False, + retry_config: Optional[Tuple[RetryConfig, List[str]]] = None, + ) -> httpx.Response: + client = self.sdk_configuration.async_client + logger = self.sdk_configuration.debug_logger + async def do(): + http_res = None + try: + req = self.sdk_configuration.get_hooks().before_request( + BeforeRequestContext(hook_ctx), request + ) + logger.debug( + "Request:\nMethod: %s\nURL: %s\nHeaders: %s\nBody: %s", + req.method, + req.url, + req.headers, + get_body_content(req) + ) + http_res = await client.send(req, stream=stream) + except Exception as e: + _, e = self.sdk_configuration.get_hooks().after_error( + AfterErrorContext(hook_ctx), None, e + ) + if e is not None: + logger.debug("Request Exception", exc_info=True) + raise e + + if http_res is None: + logger.debug("Raising no response SDK error") + raise models.SDKError("No response received") + + logger.debug( + "Response:\nStatus Code: %s\nURL: %s\nHeaders: %s\nBody: %s", + http_res.status_code, + http_res.url, + http_res.headers, + "" if stream else http_res.text + ) + + if utils.match_status_codes(error_status_codes, http_res.status_code): + result, err = self.sdk_configuration.get_hooks().after_error( + AfterErrorContext(hook_ctx), http_res, None + ) + if err is not None: + logger.debug("Request Exception", exc_info=True) + raise err + if result is not None: + http_res = result + else: + logger.debug("Raising unexpected SDK error") + raise models.SDKError("Unexpected error occurred") + + return http_res + + if retry_config is not None: + http_res = await utils.retry_async( + do, utils.Retries(retry_config[0], retry_config[1]) + ) + else: + http_res = await do() + + if not utils.match_status_codes(error_status_codes, http_res.status_code): + http_res = self.sdk_configuration.get_hooks().after_success( + AfterSuccessContext(hook_ctx), http_res + ) + + return http_res diff --git a/packages/mistralai_gcp/src/mistralai_gcp/chat.py b/packages/mistralai_gcp/src/mistralai_gcp/chat.py new file mode 100644 index 00000000..d9ad7bcc --- /dev/null +++ b/packages/mistralai_gcp/src/mistralai_gcp/chat.py @@ -0,0 +1,458 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from .basesdk import BaseSDK +from mistralai_gcp import models, utils +from mistralai_gcp._hooks import HookContext +from mistralai_gcp.types import Nullable, OptionalNullable, UNSET +from mistralai_gcp.utils import eventstreaming +from typing import Any, AsyncGenerator, Generator, List, Optional, Union + +class Chat(BaseSDK): + r"""Chat Completion API.""" + + + def stream( + self, *, + model: Nullable[str], + messages: Union[List[models.Messages], List[models.MessagesTypedDict]], + temperature: Optional[float] = 0.7, + top_p: Optional[float] = 1, + max_tokens: OptionalNullable[int] = UNSET, + min_tokens: OptionalNullable[int] = UNSET, + stream: Optional[bool] = True, + stop: Optional[Union[models.Stop, models.StopTypedDict]] = None, + random_seed: OptionalNullable[int] = UNSET, + response_format: Optional[Union[models.ResponseFormat, models.ResponseFormatTypedDict]] = None, + tools: OptionalNullable[Union[List[models.Tool], List[models.ToolTypedDict]]] = UNSET, + tool_choice: Optional[models.ToolChoice] = "auto", + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + ) -> Optional[Generator[models.CompletionEvent, None, None]]: + r"""Stream chat completion + + Mistral AI provides the ability to stream responses back to a client in order to allow partial results for certain requests. Tokens will be sent as data-only server-sent events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. + + :param model: ID of the model to use. You can use the [List Available Models](/api#operation/listModels) API to see all of your available models, or see our [Model overview](/models) for model descriptions. + :param messages: The prompt(s) to generate completions for, encoded as a list of dict with role and content. + :param temperature: What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. + :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. + :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. + :param min_tokens: The minimum number of tokens to generate in the completion. + :param stream: + :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array + :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. + :param response_format: + :param tools: + :param tool_choice: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + + request = models.ChatCompletionStreamRequest( + model=model, + temperature=temperature, + top_p=top_p, + max_tokens=max_tokens, + min_tokens=min_tokens, + stream=stream, + stop=stop, + random_seed=random_seed, + messages=utils.get_pydantic_model(messages, List[models.Messages]), + response_format=utils.get_pydantic_model(response_format, Optional[models.ResponseFormat]), + tools=utils.get_pydantic_model(tools, OptionalNullable[List[models.Tool]]), + tool_choice=tool_choice, + ) + + req = self.build_request( + method="POST", + path="/streamRawPredict", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="text/event-stream", + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body(request, False, False, "json", models.ChatCompletionStreamRequest), + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, [ + "429", + "500", + "502", + "503", + "504" + ]) + + http_res = self.do_request( + hook_ctx=HookContext(operation_id="stream_chat", oauth2_scopes=[], security_source=self.sdk_configuration.security), + request=req, + error_status_codes=["422","4XX","5XX"], + stream=True, + retry_config=retry_config + ) + + data: Any = None + if utils.match_response(http_res, "200", "text/event-stream"): + return eventstreaming.stream_events(http_res, lambda raw: utils.unmarshal_json(raw, models.CompletionEvent), sentinel="[DONE]") + if utils.match_response(http_res, "422", "application/json"): + data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) + raise models.HTTPValidationError(data=data) + if utils.match_response(http_res, ["4XX","5XX"], "*"): + raise models.SDKError("API error occurred", http_res.status_code, http_res.text, http_res) + + content_type = http_res.headers.get("Content-Type") + raise models.SDKError(f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, http_res.text, http_res) + + + + async def stream_async( + self, *, + model: Nullable[str], + messages: Union[List[models.Messages], List[models.MessagesTypedDict]], + temperature: Optional[float] = 0.7, + top_p: Optional[float] = 1, + max_tokens: OptionalNullable[int] = UNSET, + min_tokens: OptionalNullable[int] = UNSET, + stream: Optional[bool] = True, + stop: Optional[Union[models.Stop, models.StopTypedDict]] = None, + random_seed: OptionalNullable[int] = UNSET, + response_format: Optional[Union[models.ResponseFormat, models.ResponseFormatTypedDict]] = None, + tools: OptionalNullable[Union[List[models.Tool], List[models.ToolTypedDict]]] = UNSET, + tool_choice: Optional[models.ToolChoice] = "auto", + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + ) -> Optional[AsyncGenerator[models.CompletionEvent, None]]: + r"""Stream chat completion + + Mistral AI provides the ability to stream responses back to a client in order to allow partial results for certain requests. Tokens will be sent as data-only server-sent events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. + + :param model: ID of the model to use. You can use the [List Available Models](/api#operation/listModels) API to see all of your available models, or see our [Model overview](/models) for model descriptions. + :param messages: The prompt(s) to generate completions for, encoded as a list of dict with role and content. + :param temperature: What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. + :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. + :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. + :param min_tokens: The minimum number of tokens to generate in the completion. + :param stream: + :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array + :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. + :param response_format: + :param tools: + :param tool_choice: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + + request = models.ChatCompletionStreamRequest( + model=model, + temperature=temperature, + top_p=top_p, + max_tokens=max_tokens, + min_tokens=min_tokens, + stream=stream, + stop=stop, + random_seed=random_seed, + messages=utils.get_pydantic_model(messages, List[models.Messages]), + response_format=utils.get_pydantic_model(response_format, Optional[models.ResponseFormat]), + tools=utils.get_pydantic_model(tools, OptionalNullable[List[models.Tool]]), + tool_choice=tool_choice, + ) + + req = self.build_request( + method="POST", + path="/streamRawPredict", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="text/event-stream", + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body(request, False, False, "json", models.ChatCompletionStreamRequest), + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, [ + "429", + "500", + "502", + "503", + "504" + ]) + + http_res = await self.do_request_async( + hook_ctx=HookContext(operation_id="stream_chat", oauth2_scopes=[], security_source=self.sdk_configuration.security), + request=req, + error_status_codes=["422","4XX","5XX"], + stream=True, + retry_config=retry_config + ) + + data: Any = None + if utils.match_response(http_res, "200", "text/event-stream"): + return eventstreaming.stream_events_async(http_res, lambda raw: utils.unmarshal_json(raw, models.CompletionEvent), sentinel="[DONE]") + if utils.match_response(http_res, "422", "application/json"): + data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) + raise models.HTTPValidationError(data=data) + if utils.match_response(http_res, ["4XX","5XX"], "*"): + raise models.SDKError("API error occurred", http_res.status_code, http_res.text, http_res) + + content_type = http_res.headers.get("Content-Type") + raise models.SDKError(f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, http_res.text, http_res) + + + + def complete( + self, *, + model: Nullable[str], + messages: Union[List[models.ChatCompletionRequestMessages], List[models.ChatCompletionRequestMessagesTypedDict]], + temperature: Optional[float] = 0.7, + top_p: Optional[float] = 1, + max_tokens: OptionalNullable[int] = UNSET, + min_tokens: OptionalNullable[int] = UNSET, + stream: Optional[bool] = False, + stop: Optional[Union[models.ChatCompletionRequestStop, models.ChatCompletionRequestStopTypedDict]] = None, + random_seed: OptionalNullable[int] = UNSET, + response_format: Optional[Union[models.ResponseFormat, models.ResponseFormatTypedDict]] = None, + tools: OptionalNullable[Union[List[models.Tool], List[models.ToolTypedDict]]] = UNSET, + tool_choice: Optional[models.ChatCompletionRequestToolChoice] = "auto", + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + ) -> Optional[models.ChatCompletionResponse]: + r"""Chat Completion + + :param model: ID of the model to use. You can use the [List Available Models](/api#operation/listModels) API to see all of your available models, or see our [Model overview](/models) for model descriptions. + :param messages: The prompt(s) to generate completions for, encoded as a list of dict with role and content. + :param temperature: What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. + :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. + :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. + :param min_tokens: The minimum number of tokens to generate in the completion. + :param stream: Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. + :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array + :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. + :param response_format: + :param tools: + :param tool_choice: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + + request = models.ChatCompletionRequest( + model=model, + temperature=temperature, + top_p=top_p, + max_tokens=max_tokens, + min_tokens=min_tokens, + stream=stream, + stop=stop, + random_seed=random_seed, + messages=utils.get_pydantic_model(messages, List[models.ChatCompletionRequestMessages]), + response_format=utils.get_pydantic_model(response_format, Optional[models.ResponseFormat]), + tools=utils.get_pydantic_model(tools, OptionalNullable[List[models.Tool]]), + tool_choice=tool_choice, + ) + + req = self.build_request( + method="POST", + path="/rawPredict", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body(request, False, False, "json", models.ChatCompletionRequest), + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, [ + "429", + "500", + "502", + "503", + "504" + ]) + + http_res = self.do_request( + hook_ctx=HookContext(operation_id="chat_completion_v1_chat_completions_post", oauth2_scopes=[], security_source=self.sdk_configuration.security), + request=req, + error_status_codes=["422","4XX","5XX"], + retry_config=retry_config + ) + + data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return utils.unmarshal_json(http_res.text, Optional[models.ChatCompletionResponse]) + if utils.match_response(http_res, "422", "application/json"): + data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) + raise models.HTTPValidationError(data=data) + if utils.match_response(http_res, ["4XX","5XX"], "*"): + raise models.SDKError("API error occurred", http_res.status_code, http_res.text, http_res) + + content_type = http_res.headers.get("Content-Type") + raise models.SDKError(f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, http_res.text, http_res) + + + + async def complete_async( + self, *, + model: Nullable[str], + messages: Union[List[models.ChatCompletionRequestMessages], List[models.ChatCompletionRequestMessagesTypedDict]], + temperature: Optional[float] = 0.7, + top_p: Optional[float] = 1, + max_tokens: OptionalNullable[int] = UNSET, + min_tokens: OptionalNullable[int] = UNSET, + stream: Optional[bool] = False, + stop: Optional[Union[models.ChatCompletionRequestStop, models.ChatCompletionRequestStopTypedDict]] = None, + random_seed: OptionalNullable[int] = UNSET, + response_format: Optional[Union[models.ResponseFormat, models.ResponseFormatTypedDict]] = None, + tools: OptionalNullable[Union[List[models.Tool], List[models.ToolTypedDict]]] = UNSET, + tool_choice: Optional[models.ChatCompletionRequestToolChoice] = "auto", + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + ) -> Optional[models.ChatCompletionResponse]: + r"""Chat Completion + + :param model: ID of the model to use. You can use the [List Available Models](/api#operation/listModels) API to see all of your available models, or see our [Model overview](/models) for model descriptions. + :param messages: The prompt(s) to generate completions for, encoded as a list of dict with role and content. + :param temperature: What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. + :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. + :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. + :param min_tokens: The minimum number of tokens to generate in the completion. + :param stream: Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. + :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array + :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. + :param response_format: + :param tools: + :param tool_choice: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + + request = models.ChatCompletionRequest( + model=model, + temperature=temperature, + top_p=top_p, + max_tokens=max_tokens, + min_tokens=min_tokens, + stream=stream, + stop=stop, + random_seed=random_seed, + messages=utils.get_pydantic_model(messages, List[models.ChatCompletionRequestMessages]), + response_format=utils.get_pydantic_model(response_format, Optional[models.ResponseFormat]), + tools=utils.get_pydantic_model(tools, OptionalNullable[List[models.Tool]]), + tool_choice=tool_choice, + ) + + req = self.build_request( + method="POST", + path="/rawPredict", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body(request, False, False, "json", models.ChatCompletionRequest), + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, [ + "429", + "500", + "502", + "503", + "504" + ]) + + http_res = await self.do_request_async( + hook_ctx=HookContext(operation_id="chat_completion_v1_chat_completions_post", oauth2_scopes=[], security_source=self.sdk_configuration.security), + request=req, + error_status_codes=["422","4XX","5XX"], + retry_config=retry_config + ) + + data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return utils.unmarshal_json(http_res.text, Optional[models.ChatCompletionResponse]) + if utils.match_response(http_res, "422", "application/json"): + data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) + raise models.HTTPValidationError(data=data) + if utils.match_response(http_res, ["4XX","5XX"], "*"): + raise models.SDKError("API error occurred", http_res.status_code, http_res.text, http_res) + + content_type = http_res.headers.get("Content-Type") + raise models.SDKError(f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, http_res.text, http_res) + + diff --git a/packages/mistralai_gcp/src/mistralai_gcp/fim.py b/packages/mistralai_gcp/src/mistralai_gcp/fim.py new file mode 100644 index 00000000..47d8c9a9 --- /dev/null +++ b/packages/mistralai_gcp/src/mistralai_gcp/fim.py @@ -0,0 +1,438 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from .basesdk import BaseSDK +from mistralai_gcp import models, utils +from mistralai_gcp._hooks import HookContext +from mistralai_gcp.types import Nullable, OptionalNullable, UNSET +from mistralai_gcp.utils import eventstreaming +from typing import Any, AsyncGenerator, Generator, Optional, Union + +class Fim(BaseSDK): + r"""Fill-in-the-middle API.""" + + + def stream( + self, *, + model: Nullable[str], + prompt: str, + temperature: Optional[float] = 0.7, + top_p: Optional[float] = 1, + max_tokens: OptionalNullable[int] = UNSET, + min_tokens: OptionalNullable[int] = UNSET, + stream: Optional[bool] = True, + stop: Optional[Union[models.FIMCompletionStreamRequestStop, models.FIMCompletionStreamRequestStopTypedDict]] = None, + random_seed: OptionalNullable[int] = UNSET, + suffix: OptionalNullable[str] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + ) -> Optional[Generator[models.CompletionEvent, None, None]]: + r"""Stream fim completion + + Mistral AI provides the ability to stream responses back to a client in order to allow partial results for certain requests. Tokens will be sent as data-only server-sent events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. + + :param model: ID of the model to use. Only compatible for now with: - `codestral-2405` - `codestral-latest` + :param prompt: The text/code to complete. + :param temperature: What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. + :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. + :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. + :param min_tokens: The minimum number of tokens to generate in the completion. + :param stream: + :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array + :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. + :param suffix: Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + + request = models.FIMCompletionStreamRequest( + model=model, + temperature=temperature, + top_p=top_p, + max_tokens=max_tokens, + min_tokens=min_tokens, + stream=stream, + stop=stop, + random_seed=random_seed, + prompt=prompt, + suffix=suffix, + ) + + req = self.build_request( + method="POST", + path="/streamRawPredict#fim", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="text/event-stream", + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body(request, False, False, "json", models.FIMCompletionStreamRequest), + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, [ + "429", + "500", + "502", + "503", + "504" + ]) + + http_res = self.do_request( + hook_ctx=HookContext(operation_id="stream_fim", oauth2_scopes=[], security_source=self.sdk_configuration.security), + request=req, + error_status_codes=["422","4XX","5XX"], + stream=True, + retry_config=retry_config + ) + + data: Any = None + if utils.match_response(http_res, "200", "text/event-stream"): + return eventstreaming.stream_events(http_res, lambda raw: utils.unmarshal_json(raw, models.CompletionEvent), sentinel="[DONE]") + if utils.match_response(http_res, "422", "application/json"): + data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) + raise models.HTTPValidationError(data=data) + if utils.match_response(http_res, ["4XX","5XX"], "*"): + raise models.SDKError("API error occurred", http_res.status_code, http_res.text, http_res) + + content_type = http_res.headers.get("Content-Type") + raise models.SDKError(f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, http_res.text, http_res) + + + + async def stream_async( + self, *, + model: Nullable[str], + prompt: str, + temperature: Optional[float] = 0.7, + top_p: Optional[float] = 1, + max_tokens: OptionalNullable[int] = UNSET, + min_tokens: OptionalNullable[int] = UNSET, + stream: Optional[bool] = True, + stop: Optional[Union[models.FIMCompletionStreamRequestStop, models.FIMCompletionStreamRequestStopTypedDict]] = None, + random_seed: OptionalNullable[int] = UNSET, + suffix: OptionalNullable[str] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + ) -> Optional[AsyncGenerator[models.CompletionEvent, None]]: + r"""Stream fim completion + + Mistral AI provides the ability to stream responses back to a client in order to allow partial results for certain requests. Tokens will be sent as data-only server-sent events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. + + :param model: ID of the model to use. Only compatible for now with: - `codestral-2405` - `codestral-latest` + :param prompt: The text/code to complete. + :param temperature: What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. + :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. + :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. + :param min_tokens: The minimum number of tokens to generate in the completion. + :param stream: + :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array + :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. + :param suffix: Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + + request = models.FIMCompletionStreamRequest( + model=model, + temperature=temperature, + top_p=top_p, + max_tokens=max_tokens, + min_tokens=min_tokens, + stream=stream, + stop=stop, + random_seed=random_seed, + prompt=prompt, + suffix=suffix, + ) + + req = self.build_request( + method="POST", + path="/streamRawPredict#fim", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="text/event-stream", + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body(request, False, False, "json", models.FIMCompletionStreamRequest), + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, [ + "429", + "500", + "502", + "503", + "504" + ]) + + http_res = await self.do_request_async( + hook_ctx=HookContext(operation_id="stream_fim", oauth2_scopes=[], security_source=self.sdk_configuration.security), + request=req, + error_status_codes=["422","4XX","5XX"], + stream=True, + retry_config=retry_config + ) + + data: Any = None + if utils.match_response(http_res, "200", "text/event-stream"): + return eventstreaming.stream_events_async(http_res, lambda raw: utils.unmarshal_json(raw, models.CompletionEvent), sentinel="[DONE]") + if utils.match_response(http_res, "422", "application/json"): + data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) + raise models.HTTPValidationError(data=data) + if utils.match_response(http_res, ["4XX","5XX"], "*"): + raise models.SDKError("API error occurred", http_res.status_code, http_res.text, http_res) + + content_type = http_res.headers.get("Content-Type") + raise models.SDKError(f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, http_res.text, http_res) + + + + def complete( + self, *, + model: Nullable[str], + prompt: str, + temperature: Optional[float] = 0.7, + top_p: Optional[float] = 1, + max_tokens: OptionalNullable[int] = UNSET, + min_tokens: OptionalNullable[int] = UNSET, + stream: Optional[bool] = False, + stop: Optional[Union[models.FIMCompletionRequestStop, models.FIMCompletionRequestStopTypedDict]] = None, + random_seed: OptionalNullable[int] = UNSET, + suffix: OptionalNullable[str] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + ) -> Optional[models.FIMCompletionResponse]: + r"""Fim Completion + + FIM completion. + + :param model: ID of the model to use. Only compatible for now with: - `codestral-2405` - `codestral-latest` + :param prompt: The text/code to complete. + :param temperature: What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. + :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. + :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. + :param min_tokens: The minimum number of tokens to generate in the completion. + :param stream: Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. + :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array + :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. + :param suffix: Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + + request = models.FIMCompletionRequest( + model=model, + temperature=temperature, + top_p=top_p, + max_tokens=max_tokens, + min_tokens=min_tokens, + stream=stream, + stop=stop, + random_seed=random_seed, + prompt=prompt, + suffix=suffix, + ) + + req = self.build_request( + method="POST", + path="/rawPredict#fim", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body(request, False, False, "json", models.FIMCompletionRequest), + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, [ + "429", + "500", + "502", + "503", + "504" + ]) + + http_res = self.do_request( + hook_ctx=HookContext(operation_id="fim_completion_v1_fim_completions_post", oauth2_scopes=[], security_source=self.sdk_configuration.security), + request=req, + error_status_codes=["422","4XX","5XX"], + retry_config=retry_config + ) + + data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return utils.unmarshal_json(http_res.text, Optional[models.FIMCompletionResponse]) + if utils.match_response(http_res, "422", "application/json"): + data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) + raise models.HTTPValidationError(data=data) + if utils.match_response(http_res, ["4XX","5XX"], "*"): + raise models.SDKError("API error occurred", http_res.status_code, http_res.text, http_res) + + content_type = http_res.headers.get("Content-Type") + raise models.SDKError(f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, http_res.text, http_res) + + + + async def complete_async( + self, *, + model: Nullable[str], + prompt: str, + temperature: Optional[float] = 0.7, + top_p: Optional[float] = 1, + max_tokens: OptionalNullable[int] = UNSET, + min_tokens: OptionalNullable[int] = UNSET, + stream: Optional[bool] = False, + stop: Optional[Union[models.FIMCompletionRequestStop, models.FIMCompletionRequestStopTypedDict]] = None, + random_seed: OptionalNullable[int] = UNSET, + suffix: OptionalNullable[str] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + ) -> Optional[models.FIMCompletionResponse]: + r"""Fim Completion + + FIM completion. + + :param model: ID of the model to use. Only compatible for now with: - `codestral-2405` - `codestral-latest` + :param prompt: The text/code to complete. + :param temperature: What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. + :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. + :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. + :param min_tokens: The minimum number of tokens to generate in the completion. + :param stream: Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. + :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array + :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. + :param suffix: Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + + request = models.FIMCompletionRequest( + model=model, + temperature=temperature, + top_p=top_p, + max_tokens=max_tokens, + min_tokens=min_tokens, + stream=stream, + stop=stop, + random_seed=random_seed, + prompt=prompt, + suffix=suffix, + ) + + req = self.build_request( + method="POST", + path="/rawPredict#fim", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body(request, False, False, "json", models.FIMCompletionRequest), + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, [ + "429", + "500", + "502", + "503", + "504" + ]) + + http_res = await self.do_request_async( + hook_ctx=HookContext(operation_id="fim_completion_v1_fim_completions_post", oauth2_scopes=[], security_source=self.sdk_configuration.security), + request=req, + error_status_codes=["422","4XX","5XX"], + retry_config=retry_config + ) + + data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return utils.unmarshal_json(http_res.text, Optional[models.FIMCompletionResponse]) + if utils.match_response(http_res, "422", "application/json"): + data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) + raise models.HTTPValidationError(data=data) + if utils.match_response(http_res, ["4XX","5XX"], "*"): + raise models.SDKError("API error occurred", http_res.status_code, http_res.text, http_res) + + content_type = http_res.headers.get("Content-Type") + raise models.SDKError(f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, http_res.text, http_res) + + diff --git a/packages/mistralai_gcp/src/mistralai_gcp/httpclient.py b/packages/mistralai_gcp/src/mistralai_gcp/httpclient.py new file mode 100644 index 00000000..36b642a0 --- /dev/null +++ b/packages/mistralai_gcp/src/mistralai_gcp/httpclient.py @@ -0,0 +1,78 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +# pyright: reportReturnType = false +from typing_extensions import Protocol, runtime_checkable +import httpx +from typing import Any, Optional, Union + + +@runtime_checkable +class HttpClient(Protocol): + def send( + self, + request: httpx.Request, + *, + stream: bool = False, + auth: Union[ + httpx._types.AuthTypes, httpx._client.UseClientDefault, None + ] = httpx.USE_CLIENT_DEFAULT, + follow_redirects: Union[ + bool, httpx._client.UseClientDefault + ] = httpx.USE_CLIENT_DEFAULT, + ) -> httpx.Response: + pass + + def build_request( + self, + method: str, + url: httpx._types.URLTypes, + *, + content: Optional[httpx._types.RequestContent] = None, + data: Optional[httpx._types.RequestData] = None, + files: Optional[httpx._types.RequestFiles] = None, + json: Optional[Any] = None, + params: Optional[httpx._types.QueryParamTypes] = None, + headers: Optional[httpx._types.HeaderTypes] = None, + cookies: Optional[httpx._types.CookieTypes] = None, + timeout: Union[ + httpx._types.TimeoutTypes, httpx._client.UseClientDefault + ] = httpx.USE_CLIENT_DEFAULT, + extensions: Optional[httpx._types.RequestExtensions] = None, + ) -> httpx.Request: + pass + + +@runtime_checkable +class AsyncHttpClient(Protocol): + async def send( + self, + request: httpx.Request, + *, + stream: bool = False, + auth: Union[ + httpx._types.AuthTypes, httpx._client.UseClientDefault, None + ] = httpx.USE_CLIENT_DEFAULT, + follow_redirects: Union[ + bool, httpx._client.UseClientDefault + ] = httpx.USE_CLIENT_DEFAULT, + ) -> httpx.Response: + pass + + def build_request( + self, + method: str, + url: httpx._types.URLTypes, + *, + content: Optional[httpx._types.RequestContent] = None, + data: Optional[httpx._types.RequestData] = None, + files: Optional[httpx._types.RequestFiles] = None, + json: Optional[Any] = None, + params: Optional[httpx._types.QueryParamTypes] = None, + headers: Optional[httpx._types.HeaderTypes] = None, + cookies: Optional[httpx._types.CookieTypes] = None, + timeout: Union[ + httpx._types.TimeoutTypes, httpx._client.UseClientDefault + ] = httpx.USE_CLIENT_DEFAULT, + extensions: Optional[httpx._types.RequestExtensions] = None, + ) -> httpx.Request: + pass diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/__init__.py b/packages/mistralai_gcp/src/mistralai_gcp/models/__init__.py new file mode 100644 index 00000000..79fb7c98 --- /dev/null +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/__init__.py @@ -0,0 +1,31 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from .assistantmessage import AssistantMessage, AssistantMessageRole, AssistantMessageTypedDict +from .chatcompletionchoice import ChatCompletionChoice, ChatCompletionChoiceFinishReason, ChatCompletionChoiceTypedDict +from .chatcompletionrequest import ChatCompletionRequest, ChatCompletionRequestMessages, ChatCompletionRequestMessagesTypedDict, ChatCompletionRequestStop, ChatCompletionRequestStopTypedDict, ChatCompletionRequestToolChoice, ChatCompletionRequestTypedDict +from .chatcompletionresponse import ChatCompletionResponse, ChatCompletionResponseTypedDict +from .chatcompletionstreamrequest import ChatCompletionStreamRequest, ChatCompletionStreamRequestTypedDict, Messages, MessagesTypedDict, Stop, StopTypedDict, ToolChoice +from .completionchunk import CompletionChunk, CompletionChunkTypedDict +from .completionevent import CompletionEvent, CompletionEventTypedDict +from .completionresponsestreamchoice import CompletionResponseStreamChoice, CompletionResponseStreamChoiceTypedDict, FinishReason +from .contentchunk import ContentChunk, ContentChunkTypedDict +from .deltamessage import DeltaMessage, DeltaMessageTypedDict +from .fimcompletionrequest import FIMCompletionRequest, FIMCompletionRequestStop, FIMCompletionRequestStopTypedDict, FIMCompletionRequestTypedDict +from .fimcompletionresponse import FIMCompletionResponse, FIMCompletionResponseTypedDict +from .fimcompletionstreamrequest import FIMCompletionStreamRequest, FIMCompletionStreamRequestStop, FIMCompletionStreamRequestStopTypedDict, FIMCompletionStreamRequestTypedDict +from .function import Function, FunctionTypedDict +from .functioncall import Arguments, ArgumentsTypedDict, FunctionCall, FunctionCallTypedDict +from .httpvalidationerror import HTTPValidationError, HTTPValidationErrorData +from .responseformat import ResponseFormat, ResponseFormatTypedDict, ResponseFormats +from .sdkerror import SDKError +from .security import Security, SecurityTypedDict +from .systemmessage import Content, ContentTypedDict, Role, SystemMessage, SystemMessageTypedDict +from .textchunk import TextChunk, TextChunkTypedDict +from .tool import Tool, ToolTypedDict +from .toolcall import ToolCall, ToolCallTypedDict +from .toolmessage import ToolMessage, ToolMessageRole, ToolMessageTypedDict +from .usageinfo import UsageInfo, UsageInfoTypedDict +from .usermessage import UserMessage, UserMessageContent, UserMessageContentTypedDict, UserMessageRole, UserMessageTypedDict +from .validationerror import Loc, LocTypedDict, ValidationError, ValidationErrorTypedDict + +__all__ = ["Arguments", "ArgumentsTypedDict", "AssistantMessage", "AssistantMessageRole", "AssistantMessageTypedDict", "ChatCompletionChoice", "ChatCompletionChoiceFinishReason", "ChatCompletionChoiceTypedDict", "ChatCompletionRequest", "ChatCompletionRequestMessages", "ChatCompletionRequestMessagesTypedDict", "ChatCompletionRequestStop", "ChatCompletionRequestStopTypedDict", "ChatCompletionRequestToolChoice", "ChatCompletionRequestTypedDict", "ChatCompletionResponse", "ChatCompletionResponseTypedDict", "ChatCompletionStreamRequest", "ChatCompletionStreamRequestTypedDict", "CompletionChunk", "CompletionChunkTypedDict", "CompletionEvent", "CompletionEventTypedDict", "CompletionResponseStreamChoice", "CompletionResponseStreamChoiceTypedDict", "Content", "ContentChunk", "ContentChunkTypedDict", "ContentTypedDict", "DeltaMessage", "DeltaMessageTypedDict", "FIMCompletionRequest", "FIMCompletionRequestStop", "FIMCompletionRequestStopTypedDict", "FIMCompletionRequestTypedDict", "FIMCompletionResponse", "FIMCompletionResponseTypedDict", "FIMCompletionStreamRequest", "FIMCompletionStreamRequestStop", "FIMCompletionStreamRequestStopTypedDict", "FIMCompletionStreamRequestTypedDict", "FinishReason", "Function", "FunctionCall", "FunctionCallTypedDict", "FunctionTypedDict", "HTTPValidationError", "HTTPValidationErrorData", "Loc", "LocTypedDict", "Messages", "MessagesTypedDict", "ResponseFormat", "ResponseFormatTypedDict", "ResponseFormats", "Role", "SDKError", "Security", "SecurityTypedDict", "Stop", "StopTypedDict", "SystemMessage", "SystemMessageTypedDict", "TextChunk", "TextChunkTypedDict", "Tool", "ToolCall", "ToolCallTypedDict", "ToolChoice", "ToolMessage", "ToolMessageRole", "ToolMessageTypedDict", "ToolTypedDict", "UsageInfo", "UsageInfoTypedDict", "UserMessage", "UserMessageContent", "UserMessageContentTypedDict", "UserMessageRole", "UserMessageTypedDict", "ValidationError", "ValidationErrorTypedDict"] diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/assistantmessage.py b/packages/mistralai_gcp/src/mistralai_gcp/models/assistantmessage.py new file mode 100644 index 00000000..f4e94f38 --- /dev/null +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/assistantmessage.py @@ -0,0 +1,53 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .toolcall import ToolCall, ToolCallTypedDict +from mistralai_gcp.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +from pydantic import model_serializer +from typing import List, Literal, Optional, TypedDict +from typing_extensions import NotRequired + + +AssistantMessageRole = Literal["assistant"] + +class AssistantMessageTypedDict(TypedDict): + content: NotRequired[Nullable[str]] + tool_calls: NotRequired[Nullable[List[ToolCallTypedDict]]] + prefix: NotRequired[bool] + r"""Set this to `true` when adding an assistant message as prefix to condition the model response. The role of the prefix message is to force the model to start its answer by the content of the message.""" + role: NotRequired[AssistantMessageRole] + + +class AssistantMessage(BaseModel): + content: OptionalNullable[str] = UNSET + tool_calls: OptionalNullable[List[ToolCall]] = UNSET + prefix: Optional[bool] = False + r"""Set this to `true` when adding an assistant message as prefix to condition the model response. The role of the prefix message is to force the model to start its answer by the content of the message.""" + role: Optional[AssistantMessageRole] = "assistant" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["content", "tool_calls", "prefix", "role"] + nullable_fields = ["content", "tool_calls"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in self.model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = (self.__pydantic_fields_set__.intersection({n}) or k in null_default_fields) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m + diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionchoice.py b/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionchoice.py new file mode 100644 index 00000000..d868422a --- /dev/null +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionchoice.py @@ -0,0 +1,22 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .assistantmessage import AssistantMessage, AssistantMessageTypedDict +from mistralai_gcp.types import BaseModel +from typing import Literal, Optional, TypedDict +from typing_extensions import NotRequired + + +ChatCompletionChoiceFinishReason = Literal["stop", "length", "model_length", "error", "tool_calls"] + +class ChatCompletionChoiceTypedDict(TypedDict): + index: int + finish_reason: ChatCompletionChoiceFinishReason + message: NotRequired[AssistantMessageTypedDict] + + +class ChatCompletionChoice(BaseModel): + index: int + finish_reason: ChatCompletionChoiceFinishReason + message: Optional[AssistantMessage] = None + diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionrequest.py b/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionrequest.py new file mode 100644 index 00000000..759aa1e5 --- /dev/null +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionrequest.py @@ -0,0 +1,105 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .assistantmessage import AssistantMessage, AssistantMessageTypedDict +from .responseformat import ResponseFormat, ResponseFormatTypedDict +from .systemmessage import SystemMessage, SystemMessageTypedDict +from .tool import Tool, ToolTypedDict +from .toolmessage import ToolMessage, ToolMessageTypedDict +from .usermessage import UserMessage, UserMessageTypedDict +from mistralai_gcp.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +from mistralai_gcp.utils import get_discriminator +from pydantic import Discriminator, Tag, model_serializer +from typing import List, Literal, Optional, TypedDict, Union +from typing_extensions import Annotated, NotRequired + + +ChatCompletionRequestToolChoice = Literal["auto", "none", "any"] + +class ChatCompletionRequestTypedDict(TypedDict): + model: Nullable[str] + r"""ID of the model to use. You can use the [List Available Models](/api#operation/listModels) API to see all of your available models, or see our [Model overview](/models) for model descriptions.""" + messages: List[ChatCompletionRequestMessagesTypedDict] + r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content.""" + temperature: NotRequired[float] + r"""What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both.""" + top_p: NotRequired[float] + r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.""" + max_tokens: NotRequired[Nullable[int]] + r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" + min_tokens: NotRequired[Nullable[int]] + r"""The minimum number of tokens to generate in the completion.""" + stream: NotRequired[bool] + r"""Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON.""" + stop: NotRequired[ChatCompletionRequestStopTypedDict] + r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + random_seed: NotRequired[Nullable[int]] + r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" + response_format: NotRequired[ResponseFormatTypedDict] + tools: NotRequired[Nullable[List[ToolTypedDict]]] + tool_choice: NotRequired[ChatCompletionRequestToolChoice] + + +class ChatCompletionRequest(BaseModel): + model: Nullable[str] + r"""ID of the model to use. You can use the [List Available Models](/api#operation/listModels) API to see all of your available models, or see our [Model overview](/models) for model descriptions.""" + messages: List[ChatCompletionRequestMessages] + r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content.""" + temperature: Optional[float] = 0.7 + r"""What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both.""" + top_p: Optional[float] = 1 + r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.""" + max_tokens: OptionalNullable[int] = UNSET + r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" + min_tokens: OptionalNullable[int] = UNSET + r"""The minimum number of tokens to generate in the completion.""" + stream: Optional[bool] = False + r"""Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON.""" + stop: Optional[ChatCompletionRequestStop] = None + r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + random_seed: OptionalNullable[int] = UNSET + r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" + response_format: Optional[ResponseFormat] = None + tools: OptionalNullable[List[Tool]] = UNSET + tool_choice: Optional[ChatCompletionRequestToolChoice] = "auto" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["temperature", "top_p", "max_tokens", "min_tokens", "stream", "stop", "random_seed", "response_format", "tools", "tool_choice"] + nullable_fields = ["model", "max_tokens", "min_tokens", "random_seed", "tools"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in self.model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = (self.__pydantic_fields_set__.intersection({n}) or k in null_default_fields) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m + + +ChatCompletionRequestStopTypedDict = Union[str, List[str]] +r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + + +ChatCompletionRequestStop = Union[str, List[str]] +r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + + +ChatCompletionRequestMessagesTypedDict = Union[SystemMessageTypedDict, UserMessageTypedDict, AssistantMessageTypedDict, ToolMessageTypedDict] + + +ChatCompletionRequestMessages = Annotated[Union[Annotated[AssistantMessage, Tag("assistant")], Annotated[SystemMessage, Tag("system")], Annotated[ToolMessage, Tag("tool")], Annotated[UserMessage, Tag("user")]], Discriminator(lambda m: get_discriminator(m, "role", "role"))] + diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionresponse.py b/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionresponse.py new file mode 100644 index 00000000..c8ccdfca --- /dev/null +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionresponse.py @@ -0,0 +1,27 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .chatcompletionchoice import ChatCompletionChoice, ChatCompletionChoiceTypedDict +from .usageinfo import UsageInfo, UsageInfoTypedDict +from mistralai_gcp.types import BaseModel +from typing import List, Optional, TypedDict +from typing_extensions import NotRequired + + +class ChatCompletionResponseTypedDict(TypedDict): + id: str + object: str + model: str + usage: UsageInfoTypedDict + created: NotRequired[int] + choices: NotRequired[List[ChatCompletionChoiceTypedDict]] + + +class ChatCompletionResponse(BaseModel): + id: str + object: str + model: str + usage: UsageInfo + created: Optional[int] = None + choices: Optional[List[ChatCompletionChoice]] = None + diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionstreamrequest.py b/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionstreamrequest.py new file mode 100644 index 00000000..ad0fc799 --- /dev/null +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionstreamrequest.py @@ -0,0 +1,103 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .assistantmessage import AssistantMessage, AssistantMessageTypedDict +from .responseformat import ResponseFormat, ResponseFormatTypedDict +from .systemmessage import SystemMessage, SystemMessageTypedDict +from .tool import Tool, ToolTypedDict +from .toolmessage import ToolMessage, ToolMessageTypedDict +from .usermessage import UserMessage, UserMessageTypedDict +from mistralai_gcp.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +from mistralai_gcp.utils import get_discriminator +from pydantic import Discriminator, Tag, model_serializer +from typing import List, Literal, Optional, TypedDict, Union +from typing_extensions import Annotated, NotRequired + + +ToolChoice = Literal["auto", "none", "any"] + +class ChatCompletionStreamRequestTypedDict(TypedDict): + model: Nullable[str] + r"""ID of the model to use. You can use the [List Available Models](/api#operation/listModels) API to see all of your available models, or see our [Model overview](/models) for model descriptions.""" + messages: List[MessagesTypedDict] + r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content.""" + temperature: NotRequired[float] + r"""What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both.""" + top_p: NotRequired[float] + r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.""" + max_tokens: NotRequired[Nullable[int]] + r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" + min_tokens: NotRequired[Nullable[int]] + r"""The minimum number of tokens to generate in the completion.""" + stream: NotRequired[bool] + stop: NotRequired[StopTypedDict] + r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + random_seed: NotRequired[Nullable[int]] + r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" + response_format: NotRequired[ResponseFormatTypedDict] + tools: NotRequired[Nullable[List[ToolTypedDict]]] + tool_choice: NotRequired[ToolChoice] + + +class ChatCompletionStreamRequest(BaseModel): + model: Nullable[str] + r"""ID of the model to use. You can use the [List Available Models](/api#operation/listModels) API to see all of your available models, or see our [Model overview](/models) for model descriptions.""" + messages: List[Messages] + r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content.""" + temperature: Optional[float] = 0.7 + r"""What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both.""" + top_p: Optional[float] = 1 + r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.""" + max_tokens: OptionalNullable[int] = UNSET + r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" + min_tokens: OptionalNullable[int] = UNSET + r"""The minimum number of tokens to generate in the completion.""" + stream: Optional[bool] = True + stop: Optional[Stop] = None + r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + random_seed: OptionalNullable[int] = UNSET + r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" + response_format: Optional[ResponseFormat] = None + tools: OptionalNullable[List[Tool]] = UNSET + tool_choice: Optional[ToolChoice] = "auto" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["temperature", "top_p", "max_tokens", "min_tokens", "stream", "stop", "random_seed", "response_format", "tools", "tool_choice"] + nullable_fields = ["model", "max_tokens", "min_tokens", "random_seed", "tools"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in self.model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = (self.__pydantic_fields_set__.intersection({n}) or k in null_default_fields) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m + + +StopTypedDict = Union[str, List[str]] +r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + + +Stop = Union[str, List[str]] +r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + + +MessagesTypedDict = Union[SystemMessageTypedDict, UserMessageTypedDict, AssistantMessageTypedDict, ToolMessageTypedDict] + + +Messages = Annotated[Union[Annotated[AssistantMessage, Tag("assistant")], Annotated[SystemMessage, Tag("system")], Annotated[ToolMessage, Tag("tool")], Annotated[UserMessage, Tag("user")]], Discriminator(lambda m: get_discriminator(m, "role", "role"))] + diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/completionchunk.py b/packages/mistralai_gcp/src/mistralai_gcp/models/completionchunk.py new file mode 100644 index 00000000..52266f47 --- /dev/null +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/completionchunk.py @@ -0,0 +1,27 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .completionresponsestreamchoice import CompletionResponseStreamChoice, CompletionResponseStreamChoiceTypedDict +from .usageinfo import UsageInfo, UsageInfoTypedDict +from mistralai_gcp.types import BaseModel +from typing import List, Optional, TypedDict +from typing_extensions import NotRequired + + +class CompletionChunkTypedDict(TypedDict): + id: str + model: str + choices: List[CompletionResponseStreamChoiceTypedDict] + object: NotRequired[str] + created: NotRequired[int] + usage: NotRequired[UsageInfoTypedDict] + + +class CompletionChunk(BaseModel): + id: str + model: str + choices: List[CompletionResponseStreamChoice] + object: Optional[str] = None + created: Optional[int] = None + usage: Optional[UsageInfo] = None + diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/completionevent.py b/packages/mistralai_gcp/src/mistralai_gcp/models/completionevent.py new file mode 100644 index 00000000..5a6e3c2d --- /dev/null +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/completionevent.py @@ -0,0 +1,15 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .completionchunk import CompletionChunk, CompletionChunkTypedDict +from mistralai_gcp.types import BaseModel +from typing import TypedDict + + +class CompletionEventTypedDict(TypedDict): + data: CompletionChunkTypedDict + + +class CompletionEvent(BaseModel): + data: CompletionChunk + diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/completionresponsestreamchoice.py b/packages/mistralai_gcp/src/mistralai_gcp/models/completionresponsestreamchoice.py new file mode 100644 index 00000000..83a0b02a --- /dev/null +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/completionresponsestreamchoice.py @@ -0,0 +1,48 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .deltamessage import DeltaMessage, DeltaMessageTypedDict +from mistralai_gcp.types import BaseModel, Nullable, UNSET_SENTINEL +from pydantic import model_serializer +from typing import Literal, TypedDict + + +FinishReason = Literal["stop", "length", "error", "tool_calls"] + +class CompletionResponseStreamChoiceTypedDict(TypedDict): + index: int + delta: DeltaMessageTypedDict + finish_reason: Nullable[FinishReason] + + +class CompletionResponseStreamChoice(BaseModel): + index: int + delta: DeltaMessage + finish_reason: Nullable[FinishReason] + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = [] + nullable_fields = ["finish_reason"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in self.model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = (self.__pydantic_fields_set__.intersection({n}) or k in null_default_fields) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m + diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/contentchunk.py b/packages/mistralai_gcp/src/mistralai_gcp/models/contentchunk.py new file mode 100644 index 00000000..9adcb95e --- /dev/null +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/contentchunk.py @@ -0,0 +1,17 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai_gcp.types import BaseModel +import pydantic +from typing import Final, Optional, TypedDict +from typing_extensions import Annotated + + +class ContentChunkTypedDict(TypedDict): + text: str + + +class ContentChunk(BaseModel): + text: str + TYPE: Annotated[Final[Optional[str]], pydantic.Field(alias="type")] = "text" # type: ignore + diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/deltamessage.py b/packages/mistralai_gcp/src/mistralai_gcp/models/deltamessage.py new file mode 100644 index 00000000..34cc3464 --- /dev/null +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/deltamessage.py @@ -0,0 +1,47 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .toolcall import ToolCall, ToolCallTypedDict +from mistralai_gcp.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +from pydantic import model_serializer +from typing import Optional, TypedDict +from typing_extensions import NotRequired + + +class DeltaMessageTypedDict(TypedDict): + role: NotRequired[str] + content: NotRequired[str] + tool_calls: NotRequired[Nullable[ToolCallTypedDict]] + + +class DeltaMessage(BaseModel): + role: Optional[str] = None + content: Optional[str] = None + tool_calls: OptionalNullable[ToolCall] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["role", "content", "tool_calls"] + nullable_fields = ["tool_calls"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in self.model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = (self.__pydantic_fields_set__.intersection({n}) or k in null_default_fields) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m + diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/fimcompletionrequest.py b/packages/mistralai_gcp/src/mistralai_gcp/models/fimcompletionrequest.py new file mode 100644 index 00000000..15e36cc4 --- /dev/null +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/fimcompletionrequest.py @@ -0,0 +1,94 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai_gcp.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +from pydantic import model_serializer +from typing import List, Optional, TypedDict, Union +from typing_extensions import NotRequired + + +class FIMCompletionRequestTypedDict(TypedDict): + model: Nullable[str] + r"""ID of the model to use. Only compatible for now with: + - `codestral-2405` + - `codestral-latest` + """ + prompt: str + r"""The text/code to complete.""" + temperature: NotRequired[float] + r"""What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both.""" + top_p: NotRequired[float] + r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.""" + max_tokens: NotRequired[Nullable[int]] + r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" + min_tokens: NotRequired[Nullable[int]] + r"""The minimum number of tokens to generate in the completion.""" + stream: NotRequired[bool] + r"""Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON.""" + stop: NotRequired[FIMCompletionRequestStopTypedDict] + r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + random_seed: NotRequired[Nullable[int]] + r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" + suffix: NotRequired[Nullable[str]] + r"""Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`.""" + + +class FIMCompletionRequest(BaseModel): + model: Nullable[str] + r"""ID of the model to use. Only compatible for now with: + - `codestral-2405` + - `codestral-latest` + """ + prompt: str + r"""The text/code to complete.""" + temperature: Optional[float] = 0.7 + r"""What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both.""" + top_p: Optional[float] = 1 + r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.""" + max_tokens: OptionalNullable[int] = UNSET + r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" + min_tokens: OptionalNullable[int] = UNSET + r"""The minimum number of tokens to generate in the completion.""" + stream: Optional[bool] = False + r"""Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON.""" + stop: Optional[FIMCompletionRequestStop] = None + r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + random_seed: OptionalNullable[int] = UNSET + r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" + suffix: OptionalNullable[str] = UNSET + r"""Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`.""" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["temperature", "top_p", "max_tokens", "min_tokens", "stream", "stop", "random_seed", "suffix"] + nullable_fields = ["model", "max_tokens", "min_tokens", "random_seed", "suffix"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in self.model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = (self.__pydantic_fields_set__.intersection({n}) or k in null_default_fields) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m + + +FIMCompletionRequestStopTypedDict = Union[str, List[str]] +r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + + +FIMCompletionRequestStop = Union[str, List[str]] +r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/fimcompletionresponse.py b/packages/mistralai_gcp/src/mistralai_gcp/models/fimcompletionresponse.py new file mode 100644 index 00000000..27fcc4fe --- /dev/null +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/fimcompletionresponse.py @@ -0,0 +1,27 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .chatcompletionchoice import ChatCompletionChoice, ChatCompletionChoiceTypedDict +from .usageinfo import UsageInfo, UsageInfoTypedDict +from mistralai_gcp.types import BaseModel +from typing import List, Optional, TypedDict +from typing_extensions import NotRequired + + +class FIMCompletionResponseTypedDict(TypedDict): + id: str + object: str + model: str + usage: UsageInfoTypedDict + created: NotRequired[int] + choices: NotRequired[List[ChatCompletionChoiceTypedDict]] + + +class FIMCompletionResponse(BaseModel): + id: str + object: str + model: str + usage: UsageInfo + created: Optional[int] = None + choices: Optional[List[ChatCompletionChoice]] = None + diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/fimcompletionstreamrequest.py b/packages/mistralai_gcp/src/mistralai_gcp/models/fimcompletionstreamrequest.py new file mode 100644 index 00000000..38888466 --- /dev/null +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/fimcompletionstreamrequest.py @@ -0,0 +1,92 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai_gcp.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +from pydantic import model_serializer +from typing import List, Optional, TypedDict, Union +from typing_extensions import NotRequired + + +class FIMCompletionStreamRequestTypedDict(TypedDict): + model: Nullable[str] + r"""ID of the model to use. Only compatible for now with: + - `codestral-2405` + - `codestral-latest` + """ + prompt: str + r"""The text/code to complete.""" + temperature: NotRequired[float] + r"""What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both.""" + top_p: NotRequired[float] + r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.""" + max_tokens: NotRequired[Nullable[int]] + r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" + min_tokens: NotRequired[Nullable[int]] + r"""The minimum number of tokens to generate in the completion.""" + stream: NotRequired[bool] + stop: NotRequired[FIMCompletionStreamRequestStopTypedDict] + r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + random_seed: NotRequired[Nullable[int]] + r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" + suffix: NotRequired[Nullable[str]] + r"""Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`.""" + + +class FIMCompletionStreamRequest(BaseModel): + model: Nullable[str] + r"""ID of the model to use. Only compatible for now with: + - `codestral-2405` + - `codestral-latest` + """ + prompt: str + r"""The text/code to complete.""" + temperature: Optional[float] = 0.7 + r"""What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both.""" + top_p: Optional[float] = 1 + r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.""" + max_tokens: OptionalNullable[int] = UNSET + r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" + min_tokens: OptionalNullable[int] = UNSET + r"""The minimum number of tokens to generate in the completion.""" + stream: Optional[bool] = True + stop: Optional[FIMCompletionStreamRequestStop] = None + r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + random_seed: OptionalNullable[int] = UNSET + r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" + suffix: OptionalNullable[str] = UNSET + r"""Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`.""" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["temperature", "top_p", "max_tokens", "min_tokens", "stream", "stop", "random_seed", "suffix"] + nullable_fields = ["model", "max_tokens", "min_tokens", "random_seed", "suffix"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in self.model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = (self.__pydantic_fields_set__.intersection({n}) or k in null_default_fields) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m + + +FIMCompletionStreamRequestStopTypedDict = Union[str, List[str]] +r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + + +FIMCompletionStreamRequestStop = Union[str, List[str]] +r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/function.py b/packages/mistralai_gcp/src/mistralai_gcp/models/function.py new file mode 100644 index 00000000..235eb34c --- /dev/null +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/function.py @@ -0,0 +1,19 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai_gcp.types import BaseModel +from typing import Any, Dict, Optional, TypedDict +from typing_extensions import NotRequired + + +class FunctionTypedDict(TypedDict): + name: str + parameters: Dict[str, Any] + description: NotRequired[str] + + +class Function(BaseModel): + name: str + parameters: Dict[str, Any] + description: Optional[str] = "" + diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/functioncall.py b/packages/mistralai_gcp/src/mistralai_gcp/models/functioncall.py new file mode 100644 index 00000000..c188ad42 --- /dev/null +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/functioncall.py @@ -0,0 +1,22 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai_gcp.types import BaseModel +from typing import Any, Dict, TypedDict, Union + + +class FunctionCallTypedDict(TypedDict): + name: str + arguments: ArgumentsTypedDict + + +class FunctionCall(BaseModel): + name: str + arguments: Arguments + + +ArgumentsTypedDict = Union[Dict[str, Any], str] + + +Arguments = Union[Dict[str, Any], str] + diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/httpvalidationerror.py b/packages/mistralai_gcp/src/mistralai_gcp/models/httpvalidationerror.py new file mode 100644 index 00000000..0347dc16 --- /dev/null +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/httpvalidationerror.py @@ -0,0 +1,23 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .validationerror import ValidationError +from mistralai_gcp import utils +from mistralai_gcp.types import BaseModel +from typing import List, Optional + +class HTTPValidationErrorData(BaseModel): + detail: Optional[List[ValidationError]] = None + + + +class HTTPValidationError(Exception): + r"""Validation Error""" + data: HTTPValidationErrorData + + def __init__(self, data: HTTPValidationErrorData): + self.data = data + + def __str__(self) -> str: + return utils.marshal_json(self.data, HTTPValidationErrorData) + diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/responseformat.py b/packages/mistralai_gcp/src/mistralai_gcp/models/responseformat.py new file mode 100644 index 00000000..5c3e9b7e --- /dev/null +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/responseformat.py @@ -0,0 +1,18 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai_gcp.types import BaseModel +from typing import Literal, Optional, TypedDict +from typing_extensions import NotRequired + + +ResponseFormats = Literal["text", "json_object"] +r"""An object specifying the format that the model must output. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message.""" + +class ResponseFormatTypedDict(TypedDict): + type: NotRequired[ResponseFormats] + + +class ResponseFormat(BaseModel): + type: Optional[ResponseFormats] = "text" + diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/sdkerror.py b/packages/mistralai_gcp/src/mistralai_gcp/models/sdkerror.py new file mode 100644 index 00000000..03216cbf --- /dev/null +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/sdkerror.py @@ -0,0 +1,22 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from dataclasses import dataclass +from typing import Optional +import httpx + + +@dataclass +class SDKError(Exception): + """Represents an error returned by the API.""" + + message: str + status_code: int = -1 + body: str = "" + raw_response: Optional[httpx.Response] = None + + def __str__(self): + body = "" + if len(self.body) > 0: + body = f"\n{self.body}" + + return f"{self.message}: Status {self.status_code}{body}" diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/security.py b/packages/mistralai_gcp/src/mistralai_gcp/models/security.py new file mode 100644 index 00000000..cd4d8f3e --- /dev/null +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/security.py @@ -0,0 +1,16 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai_gcp.types import BaseModel +from mistralai_gcp.utils import FieldMetadata, SecurityMetadata +from typing import TypedDict +from typing_extensions import Annotated + + +class SecurityTypedDict(TypedDict): + api_key: str + + +class Security(BaseModel): + api_key: Annotated[str, FieldMetadata(security=SecurityMetadata(scheme=True, scheme_type="http", sub_type="bearer", field_name="Authorization"))] + diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/systemmessage.py b/packages/mistralai_gcp/src/mistralai_gcp/models/systemmessage.py new file mode 100644 index 00000000..461a4ccc --- /dev/null +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/systemmessage.py @@ -0,0 +1,26 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .contentchunk import ContentChunk, ContentChunkTypedDict +from mistralai_gcp.types import BaseModel +from typing import List, Literal, Optional, TypedDict, Union +from typing_extensions import NotRequired + + +Role = Literal["system"] + +class SystemMessageTypedDict(TypedDict): + content: ContentTypedDict + role: NotRequired[Role] + + +class SystemMessage(BaseModel): + content: Content + role: Optional[Role] = "system" + + +ContentTypedDict = Union[str, List[ContentChunkTypedDict]] + + +Content = Union[str, List[ContentChunk]] + diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/textchunk.py b/packages/mistralai_gcp/src/mistralai_gcp/models/textchunk.py new file mode 100644 index 00000000..ecf27413 --- /dev/null +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/textchunk.py @@ -0,0 +1,17 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai_gcp.types import BaseModel +import pydantic +from typing import Final, Optional, TypedDict +from typing_extensions import Annotated + + +class TextChunkTypedDict(TypedDict): + text: str + + +class TextChunk(BaseModel): + text: str + TYPE: Annotated[Final[Optional[str]], pydantic.Field(alias="type")] = "text" # type: ignore + diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/tool.py b/packages/mistralai_gcp/src/mistralai_gcp/models/tool.py new file mode 100644 index 00000000..b4e0645f --- /dev/null +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/tool.py @@ -0,0 +1,18 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .function import Function, FunctionTypedDict +from mistralai_gcp.types import BaseModel +import pydantic +from typing import Final, Optional, TypedDict +from typing_extensions import Annotated + + +class ToolTypedDict(TypedDict): + function: FunctionTypedDict + + +class Tool(BaseModel): + function: Function + TYPE: Annotated[Final[Optional[str]], pydantic.Field(alias="type")] = "function" # type: ignore + diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/toolcall.py b/packages/mistralai_gcp/src/mistralai_gcp/models/toolcall.py new file mode 100644 index 00000000..5ea87fd3 --- /dev/null +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/toolcall.py @@ -0,0 +1,20 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .functioncall import FunctionCall, FunctionCallTypedDict +from mistralai_gcp.types import BaseModel +import pydantic +from typing import Final, Optional, TypedDict +from typing_extensions import Annotated, NotRequired + + +class ToolCallTypedDict(TypedDict): + function: FunctionCallTypedDict + id: NotRequired[str] + + +class ToolCall(BaseModel): + function: FunctionCall + id: Optional[str] = "null" + TYPE: Annotated[Final[Optional[str]], pydantic.Field(alias="type")] = "function" # type: ignore + diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/toolmessage.py b/packages/mistralai_gcp/src/mistralai_gcp/models/toolmessage.py new file mode 100644 index 00000000..e36f8033 --- /dev/null +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/toolmessage.py @@ -0,0 +1,50 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai_gcp.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +from pydantic import model_serializer +from typing import Literal, Optional, TypedDict +from typing_extensions import NotRequired + + +ToolMessageRole = Literal["tool"] + +class ToolMessageTypedDict(TypedDict): + content: str + tool_call_id: NotRequired[Nullable[str]] + name: NotRequired[Nullable[str]] + role: NotRequired[ToolMessageRole] + + +class ToolMessage(BaseModel): + content: str + tool_call_id: OptionalNullable[str] = UNSET + name: OptionalNullable[str] = UNSET + role: Optional[ToolMessageRole] = "tool" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["tool_call_id", "name", "role"] + nullable_fields = ["tool_call_id", "name"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in self.model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = (self.__pydantic_fields_set__.intersection({n}) or k in null_default_fields) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m + diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/usageinfo.py b/packages/mistralai_gcp/src/mistralai_gcp/models/usageinfo.py new file mode 100644 index 00000000..43877c9e --- /dev/null +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/usageinfo.py @@ -0,0 +1,18 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai_gcp.types import BaseModel +from typing import TypedDict + + +class UsageInfoTypedDict(TypedDict): + prompt_tokens: int + completion_tokens: int + total_tokens: int + + +class UsageInfo(BaseModel): + prompt_tokens: int + completion_tokens: int + total_tokens: int + diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/usermessage.py b/packages/mistralai_gcp/src/mistralai_gcp/models/usermessage.py new file mode 100644 index 00000000..9e82ff34 --- /dev/null +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/usermessage.py @@ -0,0 +1,26 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .textchunk import TextChunk, TextChunkTypedDict +from mistralai_gcp.types import BaseModel +from typing import List, Literal, Optional, TypedDict, Union +from typing_extensions import NotRequired + + +UserMessageRole = Literal["user"] + +class UserMessageTypedDict(TypedDict): + content: UserMessageContentTypedDict + role: NotRequired[UserMessageRole] + + +class UserMessage(BaseModel): + content: UserMessageContent + role: Optional[UserMessageRole] = "user" + + +UserMessageContentTypedDict = Union[str, List[TextChunkTypedDict]] + + +UserMessageContent = Union[str, List[TextChunk]] + diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/validationerror.py b/packages/mistralai_gcp/src/mistralai_gcp/models/validationerror.py new file mode 100644 index 00000000..4eee48c4 --- /dev/null +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/validationerror.py @@ -0,0 +1,24 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai_gcp.types import BaseModel +from typing import List, TypedDict, Union + + +class ValidationErrorTypedDict(TypedDict): + loc: List[LocTypedDict] + msg: str + type: str + + +class ValidationError(BaseModel): + loc: List[Loc] + msg: str + type: str + + +LocTypedDict = Union[str, int] + + +Loc = Union[str, int] + diff --git a/packages/mistralai_gcp/src/mistralai_gcp/py.typed b/packages/mistralai_gcp/src/mistralai_gcp/py.typed new file mode 100644 index 00000000..3e38f1a9 --- /dev/null +++ b/packages/mistralai_gcp/src/mistralai_gcp/py.typed @@ -0,0 +1 @@ +# Marker file for PEP 561. The package enables type hints. diff --git a/packages/mistralai_gcp/src/mistralai_gcp/sdk.py b/packages/mistralai_gcp/src/mistralai_gcp/sdk.py new file mode 100644 index 00000000..3c530c8c --- /dev/null +++ b/packages/mistralai_gcp/src/mistralai_gcp/sdk.py @@ -0,0 +1,174 @@ +"""Code generated by Speakeasy (https://speakeasyapi.dev). DO NOT EDIT.""" + +import json +from typing import Optional, Union + +import google.auth +import google.auth.credentials +import google.auth.transport +import google.auth.transport.requests +import httpx +from mistralai_gcp import models +from mistralai_gcp._hooks import BeforeRequestHook, SDKHooks +from mistralai_gcp.chat import Chat +from mistralai_gcp.fim import Fim +from mistralai_gcp.types import Nullable + +from .basesdk import BaseSDK +from .httpclient import AsyncHttpClient, HttpClient +from .sdkconfiguration import SDKConfiguration +from .utils.logger import Logger, NoOpLogger +from .utils.retries import RetryConfig + + +class MistralGoogleCloud(BaseSDK): + r"""Mistral AI API: Our Chat Completion and Embeddings APIs specification. Create your account on [La Plateforme](https://console.mistral.ai) to get access and read the [docs](https://docs.mistral.ai) to learn how to use it.""" + + chat: Chat + fim: Fim + r"""Chat Completion API""" + + def __init__( + self, + region: str = "europe-west4", + project_id: Optional[str] = None, + client: Optional[HttpClient] = None, + async_client: Optional[AsyncHttpClient] = None, + retry_config: Optional[Nullable[RetryConfig]] = None, + debug_logger: Optional[Logger] = None, + ) -> None: + r"""Instantiates the SDK configuring it with the provided parameters. + + :param region: The Google Cloud region to use for all methods + :param project_id: The project ID to use for all methods + :param client: The HTTP client to use for all synchronous methods + :param async_client: The Async HTTP client to use for all asynchronous methods + :param retry_config: The retry configuration to use for all supported methods + """ + + credentials, loaded_project_id = google.auth.default( + scopes=["https://www.googleapis.com/auth/cloud-platform"], + ) + + if not isinstance(credentials, google.auth.credentials.Credentials): + raise models.SDKError( + "credentials must be an instance of google.auth.credentials.Credentials" + ) + + project_id = project_id or loaded_project_id + if project_id is None: + raise models.SDKError("project_id must be provided") + + def auth_token() -> str: + if credentials.expired: + credentials.refresh(google.auth.transport.requests.Request()) + token = credentials.token + if not token: + raise models.SDKError("Failed to get token from credentials") + return token + + if client is None: + client = httpx.Client() + + assert issubclass( + type(client), HttpClient + ), "The provided client must implement the HttpClient protocol." + + if async_client is None: + async_client = httpx.AsyncClient() + + if debug_logger is None: + debug_logger = NoOpLogger() + + assert issubclass( + type(async_client), AsyncHttpClient + ), "The provided async_client must implement the AsyncHttpClient protocol." + + security = None + if callable(auth_token): + security = lambda: models.Security( # pylint: disable=unnecessary-lambda-assignment + api_key=auth_token() + ) + else: + security = models.Security(api_key=auth_token) + + BaseSDK.__init__( + self, + SDKConfiguration( + client=client, + async_client=async_client, + security=security, + server_url=f"https://{region}-aiplatform.googleapis.com", + server=None, + retry_config=retry_config, + debug_logger=debug_logger, + ), + ) + + hooks = SDKHooks() + + hook = GoogleCloudBeforeRequestHook(region, project_id) + hooks.register_before_request_hook(hook) + + current_server_url, *_ = self.sdk_configuration.get_server_details() + server_url, self.sdk_configuration.client = hooks.sdk_init( + current_server_url, self.sdk_configuration.client + ) + if current_server_url != server_url: + self.sdk_configuration.server_url = server_url + + # pylint: disable=protected-access + self.sdk_configuration.__dict__["_hooks"] = hooks + + self._init_sdks() + + def _init_sdks(self): + self.chat = Chat(self.sdk_configuration) + self.fim = Fim(self.sdk_configuration) + + +class GoogleCloudBeforeRequestHook(BeforeRequestHook): + + def __init__(self, region: str, project_id: str): + self.region = region + self.project_id = project_id + + def before_request( + self, hook_ctx, request: httpx.Request + ) -> Union[httpx.Request, Exception]: + # The goal of this function is to template in the region, project, model, and model_version into the URL path + # We do this here so that the API remains more user-friendly + model = None + model_version = None + new_content = None + if request.content: + parsed = json.loads(request.content.decode("utf-8")) + model_raw = parsed.get("model") + model = "-".join(model_raw.split("-")[:-1]) + model_version = model_raw.split("-")[-1] + parsed["model"] = model + new_content = json.dumps(parsed).encode("utf-8") + + if model == "": + raise models.SDKError("model must be provided") + + if model_version is None: + raise models.SDKError("model_version must be provided") + + stream = "streamRawPredict" in request.url.path + specifier = "streamRawPredict" if stream else "rawPredict" + url = f"/v1/projects/{self.project_id}/locations/{self.region}/publishers/mistralai/models/{model}@{model_version}:{specifier}" + + headers = dict(request.headers) + # Delete content-length header as it will need to be recalculated + headers.pop("content-length", None) + + next_request = httpx.Request( + method=request.method, + url=request.url.copy_with(path=url), + headers=headers, + content=new_content, + stream=None, + ) + + return next_request diff --git a/packages/mistralai_gcp/src/mistralai_gcp/sdkconfiguration.py b/packages/mistralai_gcp/src/mistralai_gcp/sdkconfiguration.py new file mode 100644 index 00000000..65d3c752 --- /dev/null +++ b/packages/mistralai_gcp/src/mistralai_gcp/sdkconfiguration.py @@ -0,0 +1,54 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + + +from ._hooks import SDKHooks +from .httpclient import AsyncHttpClient, HttpClient +from .utils import Logger, RetryConfig, remove_suffix +from dataclasses import dataclass +from mistralai_gcp import models +from mistralai_gcp.types import OptionalNullable, UNSET +from pydantic import Field +from typing import Callable, Dict, Optional, Tuple, Union + + +SERVER_PROD = "prod" +r"""Production server""" +SERVERS = { + SERVER_PROD: "https://api.mistral.ai", +} +"""Contains the list of servers available to the SDK""" + + +@dataclass +class SDKConfiguration: + client: HttpClient + async_client: AsyncHttpClient + debug_logger: Logger + security: Optional[Union[models.Security,Callable[[], models.Security]]] = None + server_url: Optional[str] = "" + server: Optional[str] = "" + language: str = "python" + openapi_doc_version: str = "0.0.2" + sdk_version: str = "1.0.0-rc.2" + gen_version: str = "2.388.1" + user_agent: str = "speakeasy-sdk/python 1.0.0-rc.2 2.388.1 0.0.2 mistralai-gcp" + retry_config: OptionalNullable[RetryConfig] = Field(default_factory=lambda: UNSET) + timeout_ms: Optional[int] = None + + def __post_init__(self): + self._hooks = SDKHooks() + + def get_server_details(self) -> Tuple[str, Dict[str, str]]: + if self.server_url is not None and self.server_url: + return remove_suffix(self.server_url, "/"), {} + if not self.server: + self.server = SERVER_PROD + + if self.server not in SERVERS: + raise ValueError(f"Invalid server \"{self.server}\"") + + return SERVERS[self.server], {} + + + def get_hooks(self) -> SDKHooks: + return self._hooks diff --git a/packages/mistralai_gcp/src/mistralai_gcp/types/__init__.py b/packages/mistralai_gcp/src/mistralai_gcp/types/__init__.py new file mode 100644 index 00000000..fc76fe0c --- /dev/null +++ b/packages/mistralai_gcp/src/mistralai_gcp/types/__init__.py @@ -0,0 +1,21 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from .basemodel import ( + BaseModel, + Nullable, + OptionalNullable, + UnrecognizedInt, + UnrecognizedStr, + UNSET, + UNSET_SENTINEL, +) + +__all__ = [ + "BaseModel", + "Nullable", + "OptionalNullable", + "UnrecognizedInt", + "UnrecognizedStr", + "UNSET", + "UNSET_SENTINEL", +] diff --git a/packages/mistralai_gcp/src/mistralai_gcp/types/basemodel.py b/packages/mistralai_gcp/src/mistralai_gcp/types/basemodel.py new file mode 100644 index 00000000..a6187efa --- /dev/null +++ b/packages/mistralai_gcp/src/mistralai_gcp/types/basemodel.py @@ -0,0 +1,39 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from pydantic import ConfigDict, model_serializer +from pydantic import BaseModel as PydanticBaseModel +from typing import TYPE_CHECKING, Literal, Optional, TypeVar, Union, NewType +from typing_extensions import TypeAliasType, TypeAlias + + +class BaseModel(PydanticBaseModel): + model_config = ConfigDict( + populate_by_name=True, arbitrary_types_allowed=True, protected_namespaces=() + ) + + +class Unset(BaseModel): + @model_serializer(mode="plain") + def serialize_model(self): + return UNSET_SENTINEL + + def __bool__(self) -> Literal[False]: + return False + + +UNSET = Unset() +UNSET_SENTINEL = "~?~unset~?~sentinel~?~" + + +T = TypeVar("T") +if TYPE_CHECKING: + Nullable: TypeAlias = Union[T, None] + OptionalNullable: TypeAlias = Union[Optional[Nullable[T]], Unset] +else: + Nullable = TypeAliasType("Nullable", Union[T, None], type_params=(T,)) + OptionalNullable = TypeAliasType( + "OptionalNullable", Union[Optional[Nullable[T]], Unset], type_params=(T,) + ) + +UnrecognizedInt = NewType("UnrecognizedInt", int) +UnrecognizedStr = NewType("UnrecognizedStr", str) diff --git a/packages/mistralai_gcp/src/mistralai_gcp/utils/__init__.py b/packages/mistralai_gcp/src/mistralai_gcp/utils/__init__.py new file mode 100644 index 00000000..95aa1b60 --- /dev/null +++ b/packages/mistralai_gcp/src/mistralai_gcp/utils/__init__.py @@ -0,0 +1,84 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from .annotations import get_discriminator +from .enums import OpenEnumMeta +from .headers import get_headers, get_response_headers +from .metadata import ( + FieldMetadata, + find_metadata, + FormMetadata, + HeaderMetadata, + MultipartFormMetadata, + PathParamMetadata, + QueryParamMetadata, + RequestMetadata, + SecurityMetadata, +) +from .queryparams import get_query_params +from .retries import BackoffStrategy, Retries, retry, retry_async, RetryConfig +from .requestbodies import serialize_request_body, SerializedRequestBody +from .security import get_security +from .serializers import ( + get_pydantic_model, + marshal_json, + unmarshal, + unmarshal_json, + serialize_decimal, + serialize_float, + serialize_int, + stream_to_text, + validate_decimal, + validate_float, + validate_int, + validate_open_enum, +) +from .url import generate_url, template_url, remove_suffix +from .values import get_global_from_env, match_content_type, match_status_codes, match_response +from .logger import Logger, get_body_content, NoOpLogger + +__all__ = [ + "BackoffStrategy", + "FieldMetadata", + "find_metadata", + "FormMetadata", + "generate_url", + "get_body_content", + "get_discriminator", + "get_global_from_env", + "get_headers", + "get_pydantic_model", + "get_query_params", + "get_response_headers", + "get_security", + "HeaderMetadata", + "Logger", + "marshal_json", + "match_content_type", + "match_status_codes", + "match_response", + "MultipartFormMetadata", + "NoOpLogger", + "OpenEnumMeta", + "PathParamMetadata", + "QueryParamMetadata", + "remove_suffix", + "Retries", + "retry", + "retry_async", + "RetryConfig", + "RequestMetadata", + "SecurityMetadata", + "serialize_decimal", + "serialize_float", + "serialize_int", + "serialize_request_body", + "SerializedRequestBody", + "stream_to_text", + "template_url", + "unmarshal", + "unmarshal_json", + "validate_decimal", + "validate_float", + "validate_int", + "validate_open_enum", +] diff --git a/packages/mistralai_gcp/src/mistralai_gcp/utils/annotations.py b/packages/mistralai_gcp/src/mistralai_gcp/utils/annotations.py new file mode 100644 index 00000000..0d17472b --- /dev/null +++ b/packages/mistralai_gcp/src/mistralai_gcp/utils/annotations.py @@ -0,0 +1,19 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from typing import Any + +def get_discriminator(model: Any, fieldname: str, key: str) -> str: + if isinstance(model, dict): + try: + return f'{model.get(key)}' + except AttributeError as e: + raise ValueError(f'Could not find discriminator key {key} in {model}') from e + + if hasattr(model, fieldname): + return f'{getattr(model, fieldname)}' + + fieldname = fieldname.upper() + if hasattr(model, fieldname): + return f'{getattr(model, fieldname)}' + + raise ValueError(f'Could not find discriminator field {fieldname} in {model}') diff --git a/packages/mistralai_gcp/src/mistralai_gcp/utils/enums.py b/packages/mistralai_gcp/src/mistralai_gcp/utils/enums.py new file mode 100644 index 00000000..c650b10c --- /dev/null +++ b/packages/mistralai_gcp/src/mistralai_gcp/utils/enums.py @@ -0,0 +1,34 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +import enum + + +class OpenEnumMeta(enum.EnumMeta): + def __call__( + cls, value, names=None, *, module=None, qualname=None, type=None, start=1 + ): + # The `type` kwarg also happens to be a built-in that pylint flags as + # redeclared. Safe to ignore this lint rule with this scope. + # pylint: disable=redefined-builtin + + if names is not None: + return super().__call__( + value, + names=names, + module=module, + qualname=qualname, + type=type, + start=start, + ) + + try: + return super().__call__( + value, + names=names, # pyright: ignore[reportArgumentType] + module=module, + qualname=qualname, + type=type, + start=start, + ) + except ValueError: + return value diff --git a/packages/mistralai_gcp/src/mistralai_gcp/utils/eventstreaming.py b/packages/mistralai_gcp/src/mistralai_gcp/utils/eventstreaming.py new file mode 100644 index 00000000..553b386b --- /dev/null +++ b/packages/mistralai_gcp/src/mistralai_gcp/utils/eventstreaming.py @@ -0,0 +1,178 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +import re +import json +from typing import Callable, TypeVar, Optional, Generator, AsyncGenerator, Tuple +import httpx + +T = TypeVar("T") + + +class ServerEvent: + id: Optional[str] = None + event: Optional[str] = None + data: Optional[str] = None + retry: Optional[int] = None + + +MESSAGE_BOUNDARIES = [ + b"\r\n\r\n", + b"\n\n", + b"\r\r", +] + + +async def stream_events_async( + response: httpx.Response, + decoder: Callable[[str], T], + sentinel: Optional[str] = None, +) -> AsyncGenerator[T, None]: + buffer = bytearray() + position = 0 + discard = False + async for chunk in response.aiter_bytes(): + # We've encountered the sentinel value and should no longer process + # incoming data. Instead we throw new data away until the server closes + # the connection. + if discard: + continue + + buffer += chunk + for i in range(position, len(buffer)): + char = buffer[i : i + 1] + seq: Optional[bytes] = None + if char in [b"\r", b"\n"]: + for boundary in MESSAGE_BOUNDARIES: + seq = _peek_sequence(i, buffer, boundary) + if seq is not None: + break + if seq is None: + continue + + block = buffer[position:i] + position = i + len(seq) + event, discard = _parse_event(block, decoder, sentinel) + if event is not None: + yield event + + if position > 0: + buffer = buffer[position:] + position = 0 + + event, discard = _parse_event(buffer, decoder, sentinel) + if event is not None: + yield event + + +def stream_events( + response: httpx.Response, + decoder: Callable[[str], T], + sentinel: Optional[str] = None, +) -> Generator[T, None, None]: + buffer = bytearray() + position = 0 + discard = False + for chunk in response.iter_bytes(): + # We've encountered the sentinel value and should no longer process + # incoming data. Instead we throw new data away until the server closes + # the connection. + if discard: + continue + + buffer += chunk + for i in range(position, len(buffer)): + char = buffer[i : i + 1] + seq: Optional[bytes] = None + if char in [b"\r", b"\n"]: + for boundary in MESSAGE_BOUNDARIES: + seq = _peek_sequence(i, buffer, boundary) + if seq is not None: + break + if seq is None: + continue + + block = buffer[position:i] + position = i + len(seq) + event, discard = _parse_event(block, decoder, sentinel) + if event is not None: + yield event + + if position > 0: + buffer = buffer[position:] + position = 0 + + event, discard = _parse_event(buffer, decoder, sentinel) + if event is not None: + yield event + + +def _parse_event( + raw: bytearray, decoder: Callable[[str], T], sentinel: Optional[str] = None +) -> Tuple[Optional[T], bool]: + block = raw.decode() + lines = re.split(r"\r?\n|\r", block) + publish = False + event = ServerEvent() + data = "" + for line in lines: + if not line: + continue + + delim = line.find(":") + if delim <= 0: + continue + + field = line[0:delim] + value = line[delim + 1 :] if delim < len(line) - 1 else "" + if len(value) and value[0] == " ": + value = value[1:] + + if field == "event": + event.event = value + publish = True + elif field == "data": + data += value + "\n" + publish = True + elif field == "id": + event.id = value + publish = True + elif field == "retry": + event.retry = int(value) if value.isdigit() else None + publish = True + + if sentinel and data == f"{sentinel}\n": + return None, True + + if data: + data = data[:-1] + event.data = data + + data_is_primitive = ( + data.isnumeric() or data == "true" or data == "false" or data == "null" + ) + data_is_json = ( + data.startswith("{") or data.startswith("[") or data.startswith('"') + ) + + if data_is_primitive or data_is_json: + try: + event.data = json.loads(data) + except Exception: + pass + + out = None + if publish: + out = decoder(json.dumps(event.__dict__)) + + return out, False + + +def _peek_sequence(position: int, buffer: bytearray, sequence: bytes): + if len(sequence) > (len(buffer) - position): + return None + + for i, seq in enumerate(sequence): + if buffer[position + i] != seq: + return None + + return sequence diff --git a/packages/mistralai_gcp/src/mistralai_gcp/utils/forms.py b/packages/mistralai_gcp/src/mistralai_gcp/utils/forms.py new file mode 100644 index 00000000..07f9b235 --- /dev/null +++ b/packages/mistralai_gcp/src/mistralai_gcp/utils/forms.py @@ -0,0 +1,207 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from typing import ( + Any, + Dict, + get_type_hints, + List, + Tuple, +) +from pydantic import BaseModel +from pydantic.fields import FieldInfo + +from .serializers import marshal_json + +from .metadata import ( + FormMetadata, + MultipartFormMetadata, + find_field_metadata, +) +from .values import _val_to_string + + +def _populate_form( + field_name: str, + explode: bool, + obj: Any, + delimiter: str, + form: Dict[str, List[str]], +): + if obj is None: + return form + + if isinstance(obj, BaseModel): + items = [] + + obj_fields: Dict[str, FieldInfo] = obj.__class__.model_fields + for name in obj_fields: + obj_field = obj_fields[name] + obj_field_name = obj_field.alias if obj_field.alias is not None else name + if obj_field_name == "": + continue + + val = getattr(obj, name) + if val is None: + continue + + if explode: + form[obj_field_name] = [_val_to_string(val)] + else: + items.append(f"{obj_field_name}{delimiter}{_val_to_string(val)}") + + if len(items) > 0: + form[field_name] = [delimiter.join(items)] + elif isinstance(obj, Dict): + items = [] + for key, value in obj.items(): + if value is None: + continue + + if explode: + form[key] = [_val_to_string(value)] + else: + items.append(f"{key}{delimiter}{_val_to_string(value)}") + + if len(items) > 0: + form[field_name] = [delimiter.join(items)] + elif isinstance(obj, List): + items = [] + + for value in obj: + if value is None: + continue + + if explode: + if not field_name in form: + form[field_name] = [] + form[field_name].append(_val_to_string(value)) + else: + items.append(_val_to_string(value)) + + if len(items) > 0: + form[field_name] = [delimiter.join([str(item) for item in items])] + else: + form[field_name] = [_val_to_string(obj)] + + return form + + +def serialize_multipart_form( + media_type: str, request: Any +) -> Tuple[str, Dict[str, Any], Dict[str, Any]]: + form: Dict[str, Any] = {} + files: Dict[str, Any] = {} + + if not isinstance(request, BaseModel): + raise TypeError("invalid request body type") + + request_fields: Dict[str, FieldInfo] = request.__class__.model_fields + request_field_types = get_type_hints(request.__class__) + + for name in request_fields: + field = request_fields[name] + + val = getattr(request, name) + if val is None: + continue + + field_metadata = find_field_metadata(field, MultipartFormMetadata) + if not field_metadata: + continue + + f_name = field.alias if field.alias is not None else name + + if field_metadata.file: + file_fields: Dict[str, FieldInfo] = val.__class__.model_fields + + file_name = "" + field_name = "" + content = None + content_type = None + + for file_field_name in file_fields: + file_field = file_fields[file_field_name] + + file_metadata = find_field_metadata(file_field, MultipartFormMetadata) + if file_metadata is None: + continue + + if file_metadata.content: + content = getattr(val, file_field_name, None) + elif file_field_name == "content_type": + content_type = getattr(val, file_field_name, None) + else: + field_name = ( + file_field.alias + if file_field.alias is not None + else file_field_name + ) + file_name = getattr(val, file_field_name) + + if field_name == "" or file_name == "" or content is None: + raise ValueError("invalid multipart/form-data file") + + if content_type is not None: + files[field_name] = (file_name, content, content_type) + else: + files[field_name] = (file_name, content) + elif field_metadata.json: + files[f_name] = ( + None, + marshal_json(val, request_field_types[name]), + "application/json", + ) + else: + if isinstance(val, List): + values = [] + + for value in val: + if value is None: + continue + values.append(_val_to_string(value)) + + form[f_name + "[]"] = values + else: + form[f_name] = _val_to_string(val) + return media_type, form, files + + +def serialize_form_data(data: Any) -> Dict[str, Any]: + form: Dict[str, List[str]] = {} + + if isinstance(data, BaseModel): + data_fields: Dict[str, FieldInfo] = data.__class__.model_fields + data_field_types = get_type_hints(data.__class__) + for name in data_fields: + field = data_fields[name] + + val = getattr(data, name) + if val is None: + continue + + metadata = find_field_metadata(field, FormMetadata) + if metadata is None: + continue + + f_name = field.alias if field.alias is not None else name + + if metadata.json: + form[f_name] = [marshal_json(val, data_field_types[name])] + else: + if metadata.style == "form": + _populate_form( + f_name, + metadata.explode, + val, + ",", + form, + ) + else: + raise ValueError(f"Invalid form style for field {name}") + elif isinstance(data, Dict): + for key, value in data.items(): + form[key] = [_val_to_string(value)] + else: + raise TypeError(f"Invalid request body type {type(data)} for form data") + + return form diff --git a/packages/mistralai_gcp/src/mistralai_gcp/utils/headers.py b/packages/mistralai_gcp/src/mistralai_gcp/utils/headers.py new file mode 100644 index 00000000..e14a0f4a --- /dev/null +++ b/packages/mistralai_gcp/src/mistralai_gcp/utils/headers.py @@ -0,0 +1,136 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from typing import ( + Any, + Dict, + List, + Optional, +) +from httpx import Headers +from pydantic import BaseModel +from pydantic.fields import FieldInfo + +from .metadata import ( + HeaderMetadata, + find_field_metadata, +) + +from .values import _populate_from_globals, _val_to_string + + +def get_headers(headers_params: Any, gbls: Optional[Any] = None) -> Dict[str, str]: + headers: Dict[str, str] = {} + + globals_already_populated = [] + if headers_params is not None: + globals_already_populated = _populate_headers(headers_params, gbls, headers, []) + if gbls is not None: + _populate_headers(gbls, None, headers, globals_already_populated) + + return headers + + +def _populate_headers( + headers_params: Any, + gbls: Any, + header_values: Dict[str, str], + skip_fields: List[str], +) -> List[str]: + globals_already_populated: List[str] = [] + + if not isinstance(headers_params, BaseModel): + return globals_already_populated + + param_fields: Dict[str, FieldInfo] = headers_params.__class__.model_fields + for name in param_fields: + if name in skip_fields: + continue + + field = param_fields[name] + f_name = field.alias if field.alias is not None else name + + metadata = find_field_metadata(field, HeaderMetadata) + if metadata is None: + continue + + value, global_found = _populate_from_globals( + name, getattr(headers_params, name), HeaderMetadata, gbls + ) + if global_found: + globals_already_populated.append(name) + value = _serialize_header(metadata.explode, value) + + if value != "": + header_values[f_name] = value + + return globals_already_populated + + +def _serialize_header(explode: bool, obj: Any) -> str: + if obj is None: + return "" + + if isinstance(obj, BaseModel): + items = [] + obj_fields: Dict[str, FieldInfo] = obj.__class__.model_fields + for name in obj_fields: + obj_field = obj_fields[name] + obj_param_metadata = find_field_metadata(obj_field, HeaderMetadata) + + if not obj_param_metadata: + continue + + f_name = obj_field.alias if obj_field.alias is not None else name + + val = getattr(obj, name) + if val is None: + continue + + if explode: + items.append(f"{f_name}={_val_to_string(val)}") + else: + items.append(f_name) + items.append(_val_to_string(val)) + + if len(items) > 0: + return ",".join(items) + elif isinstance(obj, Dict): + items = [] + + for key, value in obj.items(): + if value is None: + continue + + if explode: + items.append(f"{key}={_val_to_string(value)}") + else: + items.append(key) + items.append(_val_to_string(value)) + + if len(items) > 0: + return ",".join([str(item) for item in items]) + elif isinstance(obj, List): + items = [] + + for value in obj: + if value is None: + continue + + items.append(_val_to_string(value)) + + if len(items) > 0: + return ",".join(items) + else: + return f"{_val_to_string(obj)}" + + return "" + + +def get_response_headers(headers: Headers) -> Dict[str, List[str]]: + res: Dict[str, List[str]] = {} + for k, v in headers.items(): + if not k in res: + res[k] = [] + + res[k].append(v) + return res diff --git a/packages/mistralai_gcp/src/mistralai_gcp/utils/logger.py b/packages/mistralai_gcp/src/mistralai_gcp/utils/logger.py new file mode 100644 index 00000000..7e4bbeac --- /dev/null +++ b/packages/mistralai_gcp/src/mistralai_gcp/utils/logger.py @@ -0,0 +1,16 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +import httpx +from typing import Any, Protocol + +class Logger(Protocol): + def debug(self, msg: str, *args: Any, **kwargs: Any) -> None: + pass + +class NoOpLogger: + def debug(self, msg: str, *args: Any, **kwargs: Any) -> None: + pass + +def get_body_content(req: httpx.Request) -> str: + return "" if not hasattr(req, "_content") else str(req.content) + diff --git a/packages/mistralai_gcp/src/mistralai_gcp/utils/metadata.py b/packages/mistralai_gcp/src/mistralai_gcp/utils/metadata.py new file mode 100644 index 00000000..173b3e5c --- /dev/null +++ b/packages/mistralai_gcp/src/mistralai_gcp/utils/metadata.py @@ -0,0 +1,118 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from typing import Optional, Type, TypeVar, Union +from dataclasses import dataclass +from pydantic.fields import FieldInfo + + +T = TypeVar("T") + + +@dataclass +class SecurityMetadata: + option: bool = False + scheme: bool = False + scheme_type: Optional[str] = None + sub_type: Optional[str] = None + field_name: Optional[str] = None + + def get_field_name(self, default: str) -> str: + return self.field_name or default + + +@dataclass +class ParamMetadata: + serialization: Optional[str] = None + style: str = "simple" + explode: bool = False + + +@dataclass +class PathParamMetadata(ParamMetadata): + pass + + +@dataclass +class QueryParamMetadata(ParamMetadata): + style: str = "form" + explode: bool = True + + +@dataclass +class HeaderMetadata(ParamMetadata): + pass + + +@dataclass +class RequestMetadata: + media_type: str = "application/octet-stream" + + +@dataclass +class MultipartFormMetadata: + file: bool = False + content: bool = False + json: bool = False + + +@dataclass +class FormMetadata: + json: bool = False + style: str = "form" + explode: bool = True + + +class FieldMetadata: + security: Optional[SecurityMetadata] = None + path: Optional[PathParamMetadata] = None + query: Optional[QueryParamMetadata] = None + header: Optional[HeaderMetadata] = None + request: Optional[RequestMetadata] = None + form: Optional[FormMetadata] = None + multipart: Optional[MultipartFormMetadata] = None + + def __init__( + self, + security: Optional[SecurityMetadata] = None, + path: Optional[Union[PathParamMetadata, bool]] = None, + query: Optional[Union[QueryParamMetadata, bool]] = None, + header: Optional[Union[HeaderMetadata, bool]] = None, + request: Optional[Union[RequestMetadata, bool]] = None, + form: Optional[Union[FormMetadata, bool]] = None, + multipart: Optional[Union[MultipartFormMetadata, bool]] = None, + ): + self.security = security + self.path = PathParamMetadata() if isinstance(path, bool) else path + self.query = QueryParamMetadata() if isinstance(query, bool) else query + self.header = HeaderMetadata() if isinstance(header, bool) else header + self.request = RequestMetadata() if isinstance(request, bool) else request + self.form = FormMetadata() if isinstance(form, bool) else form + self.multipart = ( + MultipartFormMetadata() if isinstance(multipart, bool) else multipart + ) + + +def find_field_metadata(field_info: FieldInfo, metadata_type: Type[T]) -> Optional[T]: + metadata = find_metadata(field_info, FieldMetadata) + if not metadata: + return None + + fields = metadata.__dict__ + + for field in fields: + if isinstance(fields[field], metadata_type): + return fields[field] + + return None + + +def find_metadata(field_info: FieldInfo, metadata_type: Type[T]) -> Optional[T]: + metadata = field_info.metadata + if not metadata: + return None + + for md in metadata: + if isinstance(md, metadata_type): + return md + + return None diff --git a/packages/mistralai_gcp/src/mistralai_gcp/utils/queryparams.py b/packages/mistralai_gcp/src/mistralai_gcp/utils/queryparams.py new file mode 100644 index 00000000..1c8c5834 --- /dev/null +++ b/packages/mistralai_gcp/src/mistralai_gcp/utils/queryparams.py @@ -0,0 +1,203 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from typing import ( + Any, + Dict, + get_type_hints, + List, + Optional, +) + +from pydantic import BaseModel +from pydantic.fields import FieldInfo + +from .metadata import ( + QueryParamMetadata, + find_field_metadata, +) +from .values import _get_serialized_params, _populate_from_globals, _val_to_string +from .forms import _populate_form + + +def get_query_params( + query_params: Any, + gbls: Optional[Any] = None, +) -> Dict[str, List[str]]: + params: Dict[str, List[str]] = {} + + globals_already_populated = _populate_query_params(query_params, gbls, params, []) + if gbls is not None: + _populate_query_params(gbls, None, params, globals_already_populated) + + return params + + +def _populate_query_params( + query_params: Any, + gbls: Any, + query_param_values: Dict[str, List[str]], + skip_fields: List[str], +) -> List[str]: + globals_already_populated: List[str] = [] + + if not isinstance(query_params, BaseModel): + return globals_already_populated + + param_fields: Dict[str, FieldInfo] = query_params.__class__.model_fields + param_field_types = get_type_hints(query_params.__class__) + for name in param_fields: + if name in skip_fields: + continue + + field = param_fields[name] + + metadata = find_field_metadata(field, QueryParamMetadata) + if not metadata: + continue + + value = getattr(query_params, name) if query_params is not None else None + + value, global_found = _populate_from_globals( + name, value, QueryParamMetadata, gbls + ) + if global_found: + globals_already_populated.append(name) + + f_name = field.alias if field.alias is not None else name + serialization = metadata.serialization + if serialization is not None: + serialized_parms = _get_serialized_params( + metadata, f_name, value, param_field_types[name] + ) + for key, value in serialized_parms.items(): + if key in query_param_values: + query_param_values[key].extend(value) + else: + query_param_values[key] = [value] + else: + style = metadata.style + if style == "deepObject": + _populate_deep_object_query_params(f_name, value, query_param_values) + elif style == "form": + _populate_delimited_query_params( + metadata, f_name, value, ",", query_param_values + ) + elif style == "pipeDelimited": + _populate_delimited_query_params( + metadata, f_name, value, "|", query_param_values + ) + else: + raise NotImplementedError( + f"query param style {style} not yet supported" + ) + + return globals_already_populated + + +def _populate_deep_object_query_params( + field_name: str, + obj: Any, + params: Dict[str, List[str]], +): + if obj is None: + return + + if isinstance(obj, BaseModel): + _populate_deep_object_query_params_basemodel(field_name, obj, params) + elif isinstance(obj, Dict): + _populate_deep_object_query_params_dict(field_name, obj, params) + + +def _populate_deep_object_query_params_basemodel( + prior_params_key: str, + obj: Any, + params: Dict[str, List[str]], +): + if obj is None: + return + + if not isinstance(obj, BaseModel): + return + + obj_fields: Dict[str, FieldInfo] = obj.__class__.model_fields + for name in obj_fields: + obj_field = obj_fields[name] + + f_name = obj_field.alias if obj_field.alias is not None else name + + params_key = f"{prior_params_key}[{f_name}]" + + obj_param_metadata = find_field_metadata(obj_field, QueryParamMetadata) + if obj_param_metadata is None: + continue + + obj_val = getattr(obj, name) + if obj_val is None: + continue + + if isinstance(obj_val, BaseModel): + _populate_deep_object_query_params_basemodel(params_key, obj_val, params) + elif isinstance(obj_val, Dict): + _populate_deep_object_query_params_dict(params_key, obj_val, params) + elif isinstance(obj_val, List): + _populate_deep_object_query_params_list(params_key, obj_val, params) + else: + params[params_key] = [_val_to_string(obj_val)] + + +def _populate_deep_object_query_params_dict( + prior_params_key: str, + value: Dict, + params: Dict[str, List[str]], +): + if value is None: + return + + for key, val in value.items(): + if val is None: + continue + + params_key = f"{prior_params_key}[{key}]" + + if isinstance(val, BaseModel): + _populate_deep_object_query_params_basemodel(params_key, val, params) + elif isinstance(val, Dict): + _populate_deep_object_query_params_dict(params_key, val, params) + elif isinstance(val, List): + _populate_deep_object_query_params_list(params_key, val, params) + else: + params[params_key] = [_val_to_string(val)] + + +def _populate_deep_object_query_params_list( + params_key: str, + value: List, + params: Dict[str, List[str]], +): + if value is None: + return + + for val in value: + if val is None: + continue + + if params.get(params_key) is None: + params[params_key] = [] + + params[params_key].append(_val_to_string(val)) + + +def _populate_delimited_query_params( + metadata: QueryParamMetadata, + field_name: str, + obj: Any, + delimiter: str, + query_param_values: Dict[str, List[str]], +): + _populate_form( + field_name, + metadata.explode, + obj, + delimiter, + query_param_values, + ) diff --git a/packages/mistralai_gcp/src/mistralai_gcp/utils/requestbodies.py b/packages/mistralai_gcp/src/mistralai_gcp/utils/requestbodies.py new file mode 100644 index 00000000..4f586ae7 --- /dev/null +++ b/packages/mistralai_gcp/src/mistralai_gcp/utils/requestbodies.py @@ -0,0 +1,66 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +import io +from dataclasses import dataclass +import re +from typing import ( + Any, + Optional, +) + +from .forms import serialize_form_data, serialize_multipart_form + +from .serializers import marshal_json + +SERIALIZATION_METHOD_TO_CONTENT_TYPE = { + "json": "application/json", + "form": "application/x-www-form-urlencoded", + "multipart": "multipart/form-data", + "raw": "application/octet-stream", + "string": "text/plain", +} + + +@dataclass +class SerializedRequestBody: + media_type: str + content: Optional[Any] = None + data: Optional[Any] = None + files: Optional[Any] = None + + +def serialize_request_body( + request_body: Any, + nullable: bool, + optional: bool, + serialization_method: str, + request_body_type, +) -> Optional[SerializedRequestBody]: + if request_body is None: + if not nullable and optional: + return None + + media_type = SERIALIZATION_METHOD_TO_CONTENT_TYPE[serialization_method] + + serialized_request_body = SerializedRequestBody(media_type) + + if re.match(r"(application|text)\/.*?\+*json.*", media_type) is not None: + serialized_request_body.content = marshal_json(request_body, request_body_type) + elif re.match(r"multipart\/.*", media_type) is not None: + ( + serialized_request_body.media_type, + serialized_request_body.data, + serialized_request_body.files, + ) = serialize_multipart_form(media_type, request_body) + elif re.match(r"application\/x-www-form-urlencoded.*", media_type) is not None: + serialized_request_body.data = serialize_form_data(request_body) + elif isinstance(request_body, (bytes, bytearray, io.BytesIO, io.BufferedReader)): + serialized_request_body.content = request_body + elif isinstance(request_body, str): + serialized_request_body.content = request_body + else: + raise TypeError( + f"invalid request body type {type(request_body)} for mediaType {media_type}" + ) + + return serialized_request_body diff --git a/packages/mistralai_gcp/src/mistralai_gcp/utils/retries.py b/packages/mistralai_gcp/src/mistralai_gcp/utils/retries.py new file mode 100644 index 00000000..a06f9279 --- /dev/null +++ b/packages/mistralai_gcp/src/mistralai_gcp/utils/retries.py @@ -0,0 +1,216 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +import random +import time +from typing import List + +import httpx + + +class BackoffStrategy: + initial_interval: int + max_interval: int + exponent: float + max_elapsed_time: int + + def __init__( + self, + initial_interval: int, + max_interval: int, + exponent: float, + max_elapsed_time: int, + ): + self.initial_interval = initial_interval + self.max_interval = max_interval + self.exponent = exponent + self.max_elapsed_time = max_elapsed_time + + +class RetryConfig: + strategy: str + backoff: BackoffStrategy + retry_connection_errors: bool + + def __init__( + self, strategy: str, backoff: BackoffStrategy, retry_connection_errors: bool + ): + self.strategy = strategy + self.backoff = backoff + self.retry_connection_errors = retry_connection_errors + + +class Retries: + config: RetryConfig + status_codes: List[str] + + def __init__(self, config: RetryConfig, status_codes: List[str]): + self.config = config + self.status_codes = status_codes + + +class TemporaryError(Exception): + response: httpx.Response + + def __init__(self, response: httpx.Response): + self.response = response + + +class PermanentError(Exception): + inner: Exception + + def __init__(self, inner: Exception): + self.inner = inner + + +def retry(func, retries: Retries): + if retries.config.strategy == "backoff": + + def do_request() -> httpx.Response: + res: httpx.Response + try: + res = func() + + for code in retries.status_codes: + if "X" in code.upper(): + code_range = int(code[0]) + + status_major = res.status_code / 100 + + if code_range <= status_major < code_range + 1: + raise TemporaryError(res) + else: + parsed_code = int(code) + + if res.status_code == parsed_code: + raise TemporaryError(res) + except httpx.ConnectError as exception: + if retries.config.retry_connection_errors: + raise + + raise PermanentError(exception) from exception + except httpx.TimeoutException as exception: + if retries.config.retry_connection_errors: + raise + + raise PermanentError(exception) from exception + except TemporaryError: + raise + except Exception as exception: + raise PermanentError(exception) from exception + + return res + + return retry_with_backoff( + do_request, + retries.config.backoff.initial_interval, + retries.config.backoff.max_interval, + retries.config.backoff.exponent, + retries.config.backoff.max_elapsed_time, + ) + + return func() + + +async def retry_async(func, retries: Retries): + if retries.config.strategy == "backoff": + + async def do_request() -> httpx.Response: + res: httpx.Response + try: + res = await func() + + for code in retries.status_codes: + if "X" in code.upper(): + code_range = int(code[0]) + + status_major = res.status_code / 100 + + if code_range <= status_major < code_range + 1: + raise TemporaryError(res) + else: + parsed_code = int(code) + + if res.status_code == parsed_code: + raise TemporaryError(res) + except httpx.ConnectError as exception: + if retries.config.retry_connection_errors: + raise + + raise PermanentError(exception) from exception + except httpx.TimeoutException as exception: + if retries.config.retry_connection_errors: + raise + + raise PermanentError(exception) from exception + except TemporaryError: + raise + except Exception as exception: + raise PermanentError(exception) from exception + + return res + + return await retry_with_backoff_async( + do_request, + retries.config.backoff.initial_interval, + retries.config.backoff.max_interval, + retries.config.backoff.exponent, + retries.config.backoff.max_elapsed_time, + ) + + return await func() + + +def retry_with_backoff( + func, + initial_interval=500, + max_interval=60000, + exponent=1.5, + max_elapsed_time=3600000, +): + start = round(time.time() * 1000) + retries = 0 + + while True: + try: + return func() + except PermanentError as exception: + raise exception.inner + except Exception as exception: # pylint: disable=broad-exception-caught + now = round(time.time() * 1000) + if now - start > max_elapsed_time: + if isinstance(exception, TemporaryError): + return exception.response + + raise + sleep = (initial_interval / 1000) * exponent**retries + random.uniform(0, 1) + sleep = min(sleep, max_interval / 1000) + time.sleep(sleep) + retries += 1 + + +async def retry_with_backoff_async( + func, + initial_interval=500, + max_interval=60000, + exponent=1.5, + max_elapsed_time=3600000, +): + start = round(time.time() * 1000) + retries = 0 + + while True: + try: + return await func() + except PermanentError as exception: + raise exception.inner + except Exception as exception: # pylint: disable=broad-exception-caught + now = round(time.time() * 1000) + if now - start > max_elapsed_time: + if isinstance(exception, TemporaryError): + return exception.response + + raise + sleep = (initial_interval / 1000) * exponent**retries + random.uniform(0, 1) + sleep = min(sleep, max_interval / 1000) + time.sleep(sleep) + retries += 1 diff --git a/packages/mistralai_gcp/src/mistralai_gcp/utils/security.py b/packages/mistralai_gcp/src/mistralai_gcp/utils/security.py new file mode 100644 index 00000000..aab4cb65 --- /dev/null +++ b/packages/mistralai_gcp/src/mistralai_gcp/utils/security.py @@ -0,0 +1,168 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +import base64 +from typing import ( + Any, + Dict, + List, + Tuple, +) +from pydantic import BaseModel +from pydantic.fields import FieldInfo + +from .metadata import ( + SecurityMetadata, + find_field_metadata, +) + + + +def get_security(security: Any) -> Tuple[Dict[str, str], Dict[str, List[str]]]: + headers: Dict[str, str] = {} + query_params: Dict[str, List[str]] = {} + + if security is None: + return headers, query_params + + if not isinstance(security, BaseModel): + raise TypeError("security must be a pydantic model") + + sec_fields: Dict[str, FieldInfo] = security.__class__.model_fields + for name in sec_fields: + sec_field = sec_fields[name] + + value = getattr(security, name) + if value is None: + continue + + metadata = find_field_metadata(sec_field, SecurityMetadata) + if metadata is None: + continue + if metadata.option: + _parse_security_option(headers, query_params, value) + return headers, query_params + if metadata.scheme: + # Special case for basic auth which could be a flattened model + if metadata.sub_type == "basic" and not isinstance(value, BaseModel): + _parse_security_scheme(headers, query_params, metadata, name, security) + else: + _parse_security_scheme(headers, query_params, metadata, name, value) + + return headers, query_params + + +def _parse_security_option( + headers: Dict[str, str], query_params: Dict[str, List[str]], option: Any +): + if not isinstance(option, BaseModel): + raise TypeError("security option must be a pydantic model") + + opt_fields: Dict[str, FieldInfo] = option.__class__.model_fields + for name in opt_fields: + opt_field = opt_fields[name] + + metadata = find_field_metadata(opt_field, SecurityMetadata) + if metadata is None or not metadata.scheme: + continue + _parse_security_scheme( + headers, query_params, metadata, name, getattr(option, name) + ) + + +def _parse_security_scheme( + headers: Dict[str, str], + query_params: Dict[str, List[str]], + scheme_metadata: SecurityMetadata, + field_name: str, + scheme: Any, +): + scheme_type = scheme_metadata.scheme_type + sub_type = scheme_metadata.sub_type + + if isinstance(scheme, BaseModel): + if scheme_type == "http" and sub_type == "basic": + _parse_basic_auth_scheme(headers, scheme) + return + + scheme_fields: Dict[str, FieldInfo] = scheme.__class__.model_fields + for name in scheme_fields: + scheme_field = scheme_fields[name] + + metadata = find_field_metadata(scheme_field, SecurityMetadata) + if metadata is None or metadata.field_name is None: + continue + + value = getattr(scheme, name) + + _parse_security_scheme_value( + headers, query_params, scheme_metadata, metadata, name, value + ) + else: + _parse_security_scheme_value( + headers, query_params, scheme_metadata, scheme_metadata, field_name, scheme + ) + + +def _parse_security_scheme_value( + headers: Dict[str, str], + query_params: Dict[str, List[str]], + scheme_metadata: SecurityMetadata, + security_metadata: SecurityMetadata, + field_name: str, + value: Any, +): + scheme_type = scheme_metadata.scheme_type + sub_type = scheme_metadata.sub_type + + header_name = security_metadata.get_field_name(field_name) + + if scheme_type == "apiKey": + if sub_type == "header": + headers[header_name] = value + elif sub_type == "query": + query_params[header_name] = [value] + else: + raise ValueError("sub type {sub_type} not supported") + elif scheme_type == "openIdConnect": + headers[header_name] = _apply_bearer(value) + elif scheme_type == "oauth2": + if sub_type != "client_credentials": + headers[header_name] = _apply_bearer(value) + elif scheme_type == "http": + if sub_type == "bearer": + headers[header_name] = _apply_bearer(value) + else: + raise ValueError("sub type {sub_type} not supported") + else: + raise ValueError("scheme type {scheme_type} not supported") + + +def _apply_bearer(token: str) -> str: + return token.lower().startswith("bearer ") and token or f"Bearer {token}" + + +def _parse_basic_auth_scheme(headers: Dict[str, str], scheme: Any): + username = "" + password = "" + + if not isinstance(scheme, BaseModel): + raise TypeError("basic auth scheme must be a pydantic model") + + scheme_fields: Dict[str, FieldInfo] = scheme.__class__.model_fields + for name in scheme_fields: + scheme_field = scheme_fields[name] + + metadata = find_field_metadata(scheme_field, SecurityMetadata) + if metadata is None or metadata.field_name is None: + continue + + field_name = metadata.field_name + value = getattr(scheme, name) + + if field_name == "username": + username = value + if field_name == "password": + password = value + + data = f"{username}:{password}".encode() + headers["Authorization"] = f"Basic {base64.b64encode(data).decode()}" diff --git a/packages/mistralai_gcp/src/mistralai_gcp/utils/serializers.py b/packages/mistralai_gcp/src/mistralai_gcp/utils/serializers.py new file mode 100644 index 00000000..a98998a3 --- /dev/null +++ b/packages/mistralai_gcp/src/mistralai_gcp/utils/serializers.py @@ -0,0 +1,181 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from decimal import Decimal +import json +from typing import Any, Dict, List, Union, get_args +import httpx +from typing_extensions import get_origin +from pydantic import ConfigDict, create_model +from pydantic_core import from_json +from typing_inspect import is_optional_type + +from ..types.basemodel import BaseModel, Nullable, OptionalNullable + + +def serialize_decimal(as_str: bool): + def serialize(d): + if is_optional_type(type(d)) and d is None: + return None + + if not isinstance(d, Decimal): + raise ValueError("Expected Decimal object") + + return str(d) if as_str else float(d) + + return serialize + + +def validate_decimal(d): + if d is None: + return None + + if isinstance(d, Decimal): + return d + + if not isinstance(d, (str, int, float)): + raise ValueError("Expected string, int or float") + + return Decimal(str(d)) + + +def serialize_float(as_str: bool): + def serialize(f): + if is_optional_type(type(f)) and f is None: + return None + + if not isinstance(f, float): + raise ValueError("Expected float") + + return str(f) if as_str else f + + return serialize + + +def validate_float(f): + if f is None: + return None + + if isinstance(f, float): + return f + + if not isinstance(f, str): + raise ValueError("Expected string") + + return float(f) + + +def serialize_int(as_str: bool): + def serialize(b): + if is_optional_type(type(b)) and b is None: + return None + + if not isinstance(b, int): + raise ValueError("Expected int") + + return str(b) if as_str else b + + return serialize + + +def validate_int(b): + if b is None: + return None + + if isinstance(b, int): + return b + + if not isinstance(b, str): + raise ValueError("Expected string") + + return int(b) + + +def validate_open_enum(is_int: bool): + def validate(e): + if e is None: + return None + + if is_int: + if not isinstance(e, int): + raise ValueError("Expected int") + else: + if not isinstance(e, str): + raise ValueError("Expected string") + + return e + + return validate + + +def unmarshal_json(raw, typ: Any) -> Any: + return unmarshal(from_json(raw), typ) + + +def unmarshal(val, typ: Any) -> Any: + unmarshaller = create_model( + "Unmarshaller", + body=(typ, ...), + __config__=ConfigDict(populate_by_name=True, arbitrary_types_allowed=True), + ) + + m = unmarshaller(body=val) + + # pyright: ignore[reportAttributeAccessIssue] + return m.body # type: ignore + + +def marshal_json(val, typ): + if is_nullable(typ) and val is None: + return "null" + + marshaller = create_model( + "Marshaller", + body=(typ, ...), + __config__=ConfigDict(populate_by_name=True, arbitrary_types_allowed=True), + ) + + m = marshaller(body=val) + + d = m.model_dump(by_alias=True, mode="json", exclude_none=True) + + if len(d) == 0: + return "" + + return json.dumps(d[next(iter(d))], separators=(",", ":"), sort_keys=True) + + +def is_nullable(field): + origin = get_origin(field) + if origin is Nullable or origin is OptionalNullable: + return True + + if not origin is Union or type(None) not in get_args(field): + return False + + for arg in get_args(field): + if get_origin(arg) is Nullable or get_origin(arg) is OptionalNullable: + return True + + return False + + +def stream_to_text(stream: httpx.Response) -> str: + return "".join(stream.iter_text()) + + +def get_pydantic_model(data: Any, typ: Any) -> Any: + if not _contains_pydantic_model(data): + return unmarshal(data, typ) + + return data + + +def _contains_pydantic_model(data: Any) -> bool: + if isinstance(data, BaseModel): + return True + if isinstance(data, List): + return any(_contains_pydantic_model(item) for item in data) + if isinstance(data, Dict): + return any(_contains_pydantic_model(value) for value in data.values()) + + return False diff --git a/packages/mistralai_gcp/src/mistralai_gcp/utils/url.py b/packages/mistralai_gcp/src/mistralai_gcp/utils/url.py new file mode 100644 index 00000000..b201bfa4 --- /dev/null +++ b/packages/mistralai_gcp/src/mistralai_gcp/utils/url.py @@ -0,0 +1,150 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from decimal import Decimal +from typing import ( + Any, + Dict, + get_type_hints, + List, + Optional, + Union, + get_args, + get_origin, +) +from pydantic import BaseModel +from pydantic.fields import FieldInfo + +from .metadata import ( + PathParamMetadata, + find_field_metadata, +) +from .values import _get_serialized_params, _populate_from_globals, _val_to_string + + +def generate_url( + server_url: str, + path: str, + path_params: Any, + gbls: Optional[Any] = None, +) -> str: + path_param_values: Dict[str, str] = {} + + globals_already_populated = _populate_path_params( + path_params, gbls, path_param_values, [] + ) + if gbls is not None: + _populate_path_params(gbls, None, path_param_values, globals_already_populated) + + for key, value in path_param_values.items(): + path = path.replace("{" + key + "}", value, 1) + + return remove_suffix(server_url, "/") + path + + +def _populate_path_params( + path_params: Any, + gbls: Any, + path_param_values: Dict[str, str], + skip_fields: List[str], +) -> List[str]: + globals_already_populated: List[str] = [] + + if not isinstance(path_params, BaseModel): + return globals_already_populated + + path_param_fields: Dict[str, FieldInfo] = path_params.__class__.model_fields + path_param_field_types = get_type_hints(path_params.__class__) + for name in path_param_fields: + if name in skip_fields: + continue + + field = path_param_fields[name] + + param_metadata = find_field_metadata(field, PathParamMetadata) + if param_metadata is None: + continue + + param = getattr(path_params, name) if path_params is not None else None + param, global_found = _populate_from_globals( + name, param, PathParamMetadata, gbls + ) + if global_found: + globals_already_populated.append(name) + + if param is None: + continue + + f_name = field.alias if field.alias is not None else name + serialization = param_metadata.serialization + if serialization is not None: + serialized_params = _get_serialized_params( + param_metadata, f_name, param, path_param_field_types[name] + ) + for key, value in serialized_params.items(): + path_param_values[key] = value + else: + pp_vals: List[str] = [] + if param_metadata.style == "simple": + if isinstance(param, List): + for pp_val in param: + if pp_val is None: + continue + pp_vals.append(_val_to_string(pp_val)) + path_param_values[f_name] = ",".join(pp_vals) + elif isinstance(param, Dict): + for pp_key in param: + if param[pp_key] is None: + continue + if param_metadata.explode: + pp_vals.append(f"{pp_key}={_val_to_string(param[pp_key])}") + else: + pp_vals.append(f"{pp_key},{_val_to_string(param[pp_key])}") + path_param_values[f_name] = ",".join(pp_vals) + elif not isinstance(param, (str, int, float, complex, bool, Decimal)): + param_fields: Dict[str, FieldInfo] = param.__class__.model_fields + for name in param_fields: + param_field = param_fields[name] + + param_value_metadata = find_field_metadata( + param_field, PathParamMetadata + ) + if param_value_metadata is None: + continue + + param_name = ( + param_field.alias if param_field.alias is not None else name + ) + + param_field_val = getattr(param, name) + if param_field_val is None: + continue + if param_metadata.explode: + pp_vals.append( + f"{param_name}={_val_to_string(param_field_val)}" + ) + else: + pp_vals.append( + f"{param_name},{_val_to_string(param_field_val)}" + ) + path_param_values[f_name] = ",".join(pp_vals) + else: + path_param_values[f_name] = _val_to_string(param) + + return globals_already_populated + + +def is_optional(field): + return get_origin(field) is Union and type(None) in get_args(field) + + +def template_url(url_with_params: str, params: Dict[str, str]) -> str: + for key, value in params.items(): + url_with_params = url_with_params.replace("{" + key + "}", value) + + return url_with_params + + +def remove_suffix(input_string, suffix): + if suffix and input_string.endswith(suffix): + return input_string[: -len(suffix)] + return input_string diff --git a/packages/mistralai_gcp/src/mistralai_gcp/utils/values.py b/packages/mistralai_gcp/src/mistralai_gcp/utils/values.py new file mode 100644 index 00000000..24ccae3d --- /dev/null +++ b/packages/mistralai_gcp/src/mistralai_gcp/utils/values.py @@ -0,0 +1,128 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from datetime import datetime +from enum import Enum +from email.message import Message +import os +from typing import Any, Callable, Dict, List, Optional, Tuple, TypeVar, Union + +from httpx import Response +from pydantic import BaseModel +from pydantic.fields import FieldInfo + +from .serializers import marshal_json + +from .metadata import ParamMetadata, find_field_metadata + + +def match_content_type(content_type: str, pattern: str) -> bool: + if pattern in (content_type, "*", "*/*"): + return True + + msg = Message() + msg["content-type"] = content_type + media_type = msg.get_content_type() + + if media_type == pattern: + return True + + parts = media_type.split("/") + if len(parts) == 2: + if pattern in (f"{parts[0]}/*", f"*/{parts[1]}"): + return True + + return False + + +def match_status_codes(status_codes: List[str], status_code: int) -> bool: + if "default" in status_codes: + return True + + for code in status_codes: + if code == str(status_code): + return True + + if code.endswith("XX") and code.startswith(str(status_code)[:1]): + return True + return False + + +T = TypeVar("T") + + +def get_global_from_env( + value: Optional[T], env_key: str, type_cast: Callable[[str], T] +) -> Optional[T]: + if value is not None: + return value + env_value = os.getenv(env_key) + if env_value is not None: + try: + return type_cast(env_value) + except ValueError: + pass + return None + + +def match_response( + response: Response, code: Union[str, List[str]], content_type: str +) -> bool: + codes = code if isinstance(code, list) else [code] + return match_status_codes(codes, response.status_code) and match_content_type( + response.headers.get("content-type", "application/octet-stream"), content_type + ) + + +def _populate_from_globals( + param_name: str, value: Any, param_metadata_type: type, gbls: Any +) -> Tuple[Any, bool]: + if gbls is None: + return value, False + + if not isinstance(gbls, BaseModel): + raise TypeError("globals must be a pydantic model") + + global_fields: Dict[str, FieldInfo] = gbls.__class__.model_fields + found = False + for name in global_fields: + field = global_fields[name] + if name is not param_name: + continue + + found = True + + if value is not None: + return value, True + + global_value = getattr(gbls, name) + + param_metadata = find_field_metadata(field, param_metadata_type) + if param_metadata is None: + return value, True + + return global_value, True + + return value, found + + +def _val_to_string(val) -> str: + if isinstance(val, bool): + return str(val).lower() + if isinstance(val, datetime): + return str(val.isoformat().replace("+00:00", "Z")) + if isinstance(val, Enum): + return str(val.value) + + return str(val) + + +def _get_serialized_params( + metadata: ParamMetadata, field_name: str, obj: Any, typ: type +) -> Dict[str, str]: + params: Dict[str, str] = {} + + serialization = metadata.serialization + if serialization == "json": + params[field_name] = marshal_json(obj, typ) + + return params diff --git a/poetry.lock b/poetry.lock index 167827a9..f22cde1d 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1,25 +1,28 @@ -# This file is automatically @generated by Poetry 1.8.2 and should not be changed by hand. +# This file is automatically @generated by Poetry 1.8.3 and should not be changed by hand. [[package]] name = "annotated-types" -version = "0.6.0" +version = "0.7.0" description = "Reusable constraint types to use with typing.Annotated" optional = false python-versions = ">=3.8" files = [ - {file = "annotated_types-0.6.0-py3-none-any.whl", hash = "sha256:0641064de18ba7a25dee8f96403ebc39113d0cb953a01429249d5c7564666a43"}, - {file = "annotated_types-0.6.0.tar.gz", hash = "sha256:563339e807e53ffd9c267e99fc6d9ea23eb8443c08f112651963e24e22f84a5d"}, + {file = "annotated_types-0.7.0-py3-none-any.whl", hash = "sha256:1f02e8b43a8fbbc3f3e0d4f0f4bfc8131bcb4eebe8849b8e5c773f3a1c582a53"}, + {file = "annotated_types-0.7.0.tar.gz", hash = "sha256:aff07c09a53a08bc8cfccb9c85b05f1aa9a2a6f23728d790723543408344ce89"}, ] +[package.dependencies] +typing-extensions = {version = ">=4.0.0", markers = "python_version < \"3.9\""} + [[package]] name = "anyio" -version = "4.3.0" +version = "4.4.0" description = "High level compatibility layer for multiple asynchronous event loop implementations" optional = false python-versions = ">=3.8" files = [ - {file = "anyio-4.3.0-py3-none-any.whl", hash = "sha256:048e05d0f6caeed70d731f3db756d35dcc1f35747c8c403364a8332c630441b8"}, - {file = "anyio-4.3.0.tar.gz", hash = "sha256:f75253795a87df48568485fd18cdd2a3fa5c4f7c5be8e5e36637733fce06fed6"}, + {file = "anyio-4.4.0-py3-none-any.whl", hash = "sha256:c1b2d8f46a8a812513012e1107cb0e68c17159a7a594208005a57dc776e1bdc7"}, + {file = "anyio-4.4.0.tar.gz", hash = "sha256:5aadc6a1bbb7cdb0bede386cac5e2940f5e2ff3aa20277e991cf028e0585ce94"}, ] [package.dependencies] @@ -33,15 +36,139 @@ doc = ["Sphinx (>=7)", "packaging", "sphinx-autodoc-typehints (>=1.2.0)", "sphin test = ["anyio[trio]", "coverage[toml] (>=7)", "exceptiongroup (>=1.2.0)", "hypothesis (>=4.0)", "psutil (>=5.9)", "pytest (>=7.0)", "pytest-mock (>=3.6.1)", "trustme", "uvloop (>=0.17)"] trio = ["trio (>=0.23)"] +[[package]] +name = "astroid" +version = "3.2.4" +description = "An abstract syntax tree for Python with inference support." +optional = false +python-versions = ">=3.8.0" +files = [ + {file = "astroid-3.2.4-py3-none-any.whl", hash = "sha256:413658a61eeca6202a59231abb473f932038fbcbf1666587f66d482083413a25"}, + {file = "astroid-3.2.4.tar.gz", hash = "sha256:0e14202810b30da1b735827f78f5157be2bbd4a7a59b7707ca0bfc2fb4c0063a"}, +] + +[package.dependencies] +typing-extensions = {version = ">=4.0.0", markers = "python_version < \"3.11\""} + +[[package]] +name = "cachetools" +version = "5.4.0" +description = "Extensible memoizing collections and decorators" +optional = true +python-versions = ">=3.7" +files = [ + {file = "cachetools-5.4.0-py3-none-any.whl", hash = "sha256:3ae3b49a3d5e28a77a0be2b37dbcb89005058959cb2323858c2657c4a8cab474"}, + {file = "cachetools-5.4.0.tar.gz", hash = "sha256:b8adc2e7c07f105ced7bc56dbb6dfbe7c4a00acce20e2227b3f355be89bc6827"}, +] + [[package]] name = "certifi" -version = "2024.2.2" +version = "2024.7.4" description = "Python package for providing Mozilla's CA Bundle." optional = false python-versions = ">=3.6" files = [ - {file = "certifi-2024.2.2-py3-none-any.whl", hash = "sha256:dc383c07b76109f368f6106eee2b593b04a011ea4d55f652c6ca24a754d1cdd1"}, - {file = "certifi-2024.2.2.tar.gz", hash = "sha256:0569859f95fc761b18b45ef421b1290a0f65f147e92a1e5eb3e635f9a5e4e66f"}, + {file = "certifi-2024.7.4-py3-none-any.whl", hash = "sha256:c198e21b1289c2ab85ee4e67bb4b4ef3ead0892059901a8d5b622f24a1101e90"}, + {file = "certifi-2024.7.4.tar.gz", hash = "sha256:5a1e7645bc0ec61a09e26c36f6106dd4cf40c6db3a1fb6352b0244e7fb057c7b"}, +] + +[[package]] +name = "charset-normalizer" +version = "3.3.2" +description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet." +optional = true +python-versions = ">=3.7.0" +files = [ + {file = "charset-normalizer-3.3.2.tar.gz", hash = "sha256:f30c3cb33b24454a82faecaf01b19c18562b1e89558fb6c56de4d9118a032fd5"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:25baf083bf6f6b341f4121c2f3c548875ee6f5339300e08be3f2b2ba1721cdd3"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:06435b539f889b1f6f4ac1758871aae42dc3a8c0e24ac9e60c2384973ad73027"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9063e24fdb1e498ab71cb7419e24622516c4a04476b17a2dab57e8baa30d6e03"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6897af51655e3691ff853668779c7bad41579facacf5fd7253b0133308cf000d"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1d3193f4a680c64b4b6a9115943538edb896edc190f0b222e73761716519268e"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:cd70574b12bb8a4d2aaa0094515df2463cb429d8536cfb6c7ce983246983e5a6"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8465322196c8b4d7ab6d1e049e4c5cb460d0394da4a27d23cc242fbf0034b6b5"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a9a8e9031d613fd2009c182b69c7b2c1ef8239a0efb1df3f7c8da66d5dd3d537"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:beb58fe5cdb101e3a055192ac291b7a21e3b7ef4f67fa1d74e331a7f2124341c"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:e06ed3eb3218bc64786f7db41917d4e686cc4856944f53d5bdf83a6884432e12"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:2e81c7b9c8979ce92ed306c249d46894776a909505d8f5a4ba55b14206e3222f"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:572c3763a264ba47b3cf708a44ce965d98555f618ca42c926a9c1616d8f34269"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:fd1abc0d89e30cc4e02e4064dc67fcc51bd941eb395c502aac3ec19fab46b519"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-win32.whl", hash = "sha256:3d47fa203a7bd9c5b6cee4736ee84ca03b8ef23193c0d1ca99b5089f72645c73"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-win_amd64.whl", hash = "sha256:10955842570876604d404661fbccbc9c7e684caf432c09c715ec38fbae45ae09"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:802fe99cca7457642125a8a88a084cef28ff0cf9407060f7b93dca5aa25480db"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:573f6eac48f4769d667c4442081b1794f52919e7edada77495aaed9236d13a96"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:549a3a73da901d5bc3ce8d24e0600d1fa85524c10287f6004fbab87672bf3e1e"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f27273b60488abe721a075bcca6d7f3964f9f6f067c8c4c605743023d7d3944f"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1ceae2f17a9c33cb48e3263960dc5fc8005351ee19db217e9b1bb15d28c02574"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:65f6f63034100ead094b8744b3b97965785388f308a64cf8d7c34f2f2e5be0c4"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:753f10e867343b4511128c6ed8c82f7bec3bd026875576dfd88483c5c73b2fd8"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4a78b2b446bd7c934f5dcedc588903fb2f5eec172f3d29e52a9096a43722adfc"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:e537484df0d8f426ce2afb2d0f8e1c3d0b114b83f8850e5f2fbea0e797bd82ae"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:eb6904c354526e758fda7167b33005998fb68c46fbc10e013ca97f21ca5c8887"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:deb6be0ac38ece9ba87dea880e438f25ca3eddfac8b002a2ec3d9183a454e8ae"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:4ab2fe47fae9e0f9dee8c04187ce5d09f48eabe611be8259444906793ab7cbce"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:80402cd6ee291dcb72644d6eac93785fe2c8b9cb30893c1af5b8fdd753b9d40f"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-win32.whl", hash = "sha256:7cd13a2e3ddeed6913a65e66e94b51d80a041145a026c27e6bb76c31a853c6ab"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-win_amd64.whl", hash = "sha256:663946639d296df6a2bb2aa51b60a2454ca1cb29835324c640dafb5ff2131a77"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:0b2b64d2bb6d3fb9112bafa732def486049e63de9618b5843bcdd081d8144cd8"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:ddbb2551d7e0102e7252db79ba445cdab71b26640817ab1e3e3648dad515003b"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:55086ee1064215781fff39a1af09518bc9255b50d6333f2e4c74ca09fac6a8f6"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8f4a014bc36d3c57402e2977dada34f9c12300af536839dc38c0beab8878f38a"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a10af20b82360ab00827f916a6058451b723b4e65030c5a18577c8b2de5b3389"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8d756e44e94489e49571086ef83b2bb8ce311e730092d2c34ca8f7d925cb20aa"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:90d558489962fd4918143277a773316e56c72da56ec7aa3dc3dbbe20fdfed15b"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6ac7ffc7ad6d040517be39eb591cac5ff87416c2537df6ba3cba3bae290c0fed"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:7ed9e526742851e8d5cc9e6cf41427dfc6068d4f5a3bb03659444b4cabf6bc26"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:8bdb58ff7ba23002a4c5808d608e4e6c687175724f54a5dade5fa8c67b604e4d"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_ppc64le.whl", hash = "sha256:6b3251890fff30ee142c44144871185dbe13b11bab478a88887a639655be1068"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_s390x.whl", hash = "sha256:b4a23f61ce87adf89be746c8a8974fe1c823c891d8f86eb218bb957c924bb143"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:efcb3f6676480691518c177e3b465bcddf57cea040302f9f4e6e191af91174d4"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-win32.whl", hash = "sha256:d965bba47ddeec8cd560687584e88cf699fd28f192ceb452d1d7ee807c5597b7"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-win_amd64.whl", hash = "sha256:96b02a3dc4381e5494fad39be677abcb5e6634bf7b4fa83a6dd3112607547001"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:95f2a5796329323b8f0512e09dbb7a1860c46a39da62ecb2324f116fa8fdc85c"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c002b4ffc0be611f0d9da932eb0f704fe2602a9a949d1f738e4c34c75b0863d5"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a981a536974bbc7a512cf44ed14938cf01030a99e9b3a06dd59578882f06f985"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3287761bc4ee9e33561a7e058c72ac0938c4f57fe49a09eae428fd88aafe7bb6"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:42cb296636fcc8b0644486d15c12376cb9fa75443e00fb25de0b8602e64c1714"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0a55554a2fa0d408816b3b5cedf0045f4b8e1a6065aec45849de2d6f3f8e9786"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:c083af607d2515612056a31f0a8d9e0fcb5876b7bfc0abad3ecd275bc4ebc2d5"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:87d1351268731db79e0f8e745d92493ee2841c974128ef629dc518b937d9194c"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:bd8f7df7d12c2db9fab40bdd87a7c09b1530128315d047a086fa3ae3435cb3a8"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:c180f51afb394e165eafe4ac2936a14bee3eb10debc9d9e4db8958fe36afe711"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:8c622a5fe39a48f78944a87d4fb8a53ee07344641b0562c540d840748571b811"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-win32.whl", hash = "sha256:db364eca23f876da6f9e16c9da0df51aa4f104a972735574842618b8c6d999d4"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-win_amd64.whl", hash = "sha256:86216b5cee4b06df986d214f664305142d9c76df9b6512be2738aa72a2048f99"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:6463effa3186ea09411d50efc7d85360b38d5f09b870c48e4600f63af490e56a"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:6c4caeef8fa63d06bd437cd4bdcf3ffefe6738fb1b25951440d80dc7df8c03ac"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:37e55c8e51c236f95b033f6fb391d7d7970ba5fe7ff453dad675e88cf303377a"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fb69256e180cb6c8a894fee62b3afebae785babc1ee98b81cdf68bbca1987f33"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ae5f4161f18c61806f411a13b0310bea87f987c7d2ecdbdaad0e94eb2e404238"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b2b0a0c0517616b6869869f8c581d4eb2dd83a4d79e0ebcb7d373ef9956aeb0a"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:45485e01ff4d3630ec0d9617310448a8702f70e9c01906b0d0118bdf9d124cf2"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:eb00ed941194665c332bf8e078baf037d6c35d7c4f3102ea2d4f16ca94a26dc8"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:2127566c664442652f024c837091890cb1942c30937add288223dc895793f898"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:a50aebfa173e157099939b17f18600f72f84eed3049e743b68ad15bd69b6bf99"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:4d0d1650369165a14e14e1e47b372cfcb31d6ab44e6e33cb2d4e57265290044d"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:923c0c831b7cfcb071580d3f46c4baf50f174be571576556269530f4bbd79d04"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:06a81e93cd441c56a9b65d8e1d043daeb97a3d0856d177d5c90ba85acb3db087"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-win32.whl", hash = "sha256:6ef1d82a3af9d3eecdba2321dc1b3c238245d890843e040e41e470ffa64c3e25"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-win_amd64.whl", hash = "sha256:eb8821e09e916165e160797a6c17edda0679379a4be5c716c260e836e122f54b"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:c235ebd9baae02f1b77bcea61bce332cb4331dc3617d254df3323aa01ab47bd4"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:5b4c145409bef602a690e7cfad0a15a55c13320ff7a3ad7ca59c13bb8ba4d45d"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:68d1f8a9e9e37c1223b656399be5d6b448dea850bed7d0f87a8311f1ff3dabb0"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:22afcb9f253dac0696b5a4be4a1c0f8762f8239e21b99680099abd9b2b1b2269"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e27ad930a842b4c5eb8ac0016b0a54f5aebbe679340c26101df33424142c143c"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1f79682fbe303db92bc2b1136016a38a42e835d932bab5b3b1bfcfbf0640e519"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b261ccdec7821281dade748d088bb6e9b69e6d15b30652b74cbbac25e280b796"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:122c7fa62b130ed55f8f285bfd56d5f4b4a5b503609d181f9ad85e55c89f4185"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:d0eccceffcb53201b5bfebb52600a5fb483a20b61da9dbc885f8b103cbe7598c"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:9f96df6923e21816da7e0ad3fd47dd8f94b2a5ce594e00677c0013018b813458"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:7f04c839ed0b6b98b1a7501a002144b76c18fb1c1850c8b98d458ac269e26ed2"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:34d1c8da1e78d2e001f363791c98a272bb734000fcef47a491c1e3b0505657a8"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:ff8fa367d09b717b2a17a052544193ad76cd49979c805768879cb63d9ca50561"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-win32.whl", hash = "sha256:aed38f6e4fb3f5d6bf81bfa990a07806be9d83cf7bacef998ab1a9bd660a581f"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-win_amd64.whl", hash = "sha256:b01b88d45a6fcb69667cd6d2f7a9aeb4bf53760d7fc536bf679ec94fe9f3ff3d"}, + {file = "charset_normalizer-3.3.2-py3-none-any.whl", hash = "sha256:3e4d1f6587322d2788836a99c69062fbb091331ec940e02d12d179c1d53e25fc"}, ] [[package]] @@ -55,20 +182,58 @@ files = [ {file = "colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44"}, ] +[[package]] +name = "dill" +version = "0.3.8" +description = "serialize all of Python" +optional = false +python-versions = ">=3.8" +files = [ + {file = "dill-0.3.8-py3-none-any.whl", hash = "sha256:c36ca9ffb54365bdd2f8eb3eff7d2a21237f8452b57ace88b1ac615b7e815bd7"}, + {file = "dill-0.3.8.tar.gz", hash = "sha256:3ebe3c479ad625c4553aca177444d89b486b1d84982eeacded644afc0cf797ca"}, +] + +[package.extras] +graph = ["objgraph (>=1.7.2)"] +profile = ["gprof2dot (>=2022.7.29)"] + [[package]] name = "exceptiongroup" -version = "1.2.0" +version = "1.2.2" description = "Backport of PEP 654 (exception groups)" optional = false python-versions = ">=3.7" files = [ - {file = "exceptiongroup-1.2.0-py3-none-any.whl", hash = "sha256:4bfd3996ac73b41e9b9628b04e079f193850720ea5945fc96a08633c66912f14"}, - {file = "exceptiongroup-1.2.0.tar.gz", hash = "sha256:91f5c769735f051a4290d52edd0858999b57e5876e9f85937691bd4c9fa3ed68"}, + {file = "exceptiongroup-1.2.2-py3-none-any.whl", hash = "sha256:3111b9d131c238bec2f8f516e123e14ba243563fb135d3fe885990585aa7795b"}, + {file = "exceptiongroup-1.2.2.tar.gz", hash = "sha256:47c2edf7c6738fafb49fd34290706d1a1a2f4d1c6df275526b62cbb4aa5393cc"}, ] [package.extras] test = ["pytest (>=6)"] +[[package]] +name = "google-auth" +version = "2.32.0" +description = "Google Authentication Library" +optional = true +python-versions = ">=3.7" +files = [ + {file = "google_auth-2.32.0-py2.py3-none-any.whl", hash = "sha256:53326ea2ebec768070a94bee4e1b9194c9646ea0c2bd72422785bd0f9abfad7b"}, + {file = "google_auth-2.32.0.tar.gz", hash = "sha256:49315be72c55a6a37d62819e3573f6b416aca00721f7e3e31a008d928bf64022"}, +] + +[package.dependencies] +cachetools = ">=2.0.0,<6.0" +pyasn1-modules = ">=0.2.1" +rsa = ">=3.1.4,<5" + +[package.extras] +aiohttp = ["aiohttp (>=3.6.2,<4.0.0.dev0)", "requests (>=2.20.0,<3.0.0.dev0)"] +enterprise-cert = ["cryptography (==36.0.2)", "pyopenssl (==22.0.0)"] +pyopenssl = ["cryptography (>=38.0.3)", "pyopenssl (>=20.0.0)"] +reauth = ["pyu2f (>=0.1.5)"] +requests = ["requests (>=2.20.0,<3.0.0.dev0)"] + [[package]] name = "h11" version = "0.14.0" @@ -82,13 +247,13 @@ files = [ [[package]] name = "httpcore" -version = "1.0.4" +version = "1.0.5" description = "A minimal low-level HTTP client." optional = false python-versions = ">=3.8" files = [ - {file = "httpcore-1.0.4-py3-none-any.whl", hash = "sha256:ac418c1db41bade2ad53ae2f3834a3a0f5ae76b56cf5aa497d2d033384fc7d73"}, - {file = "httpcore-1.0.4.tar.gz", hash = "sha256:cb2839ccfcba0d2d3c1131d3c3e26dfc327326fbe7a5dc0dbfe9f6c9151bb022"}, + {file = "httpcore-1.0.5-py3-none-any.whl", hash = "sha256:421f18bac248b25d310f3cacd198d55b8e6125c107797b609ff9b7a6ba7991b5"}, + {file = "httpcore-1.0.5.tar.gz", hash = "sha256:34a38e2f9291467ee3b44e89dd52615370e152954ba21721378a87b2960f7a61"}, ] [package.dependencies] @@ -99,17 +264,17 @@ h11 = ">=0.13,<0.15" asyncio = ["anyio (>=4.0,<5.0)"] http2 = ["h2 (>=3,<5)"] socks = ["socksio (==1.*)"] -trio = ["trio (>=0.22.0,<0.25.0)"] +trio = ["trio (>=0.22.0,<0.26.0)"] [[package]] name = "httpx" -version = "0.25.2" +version = "0.27.0" description = "The next generation HTTP client." optional = false python-versions = ">=3.8" files = [ - {file = "httpx-0.25.2-py3-none-any.whl", hash = "sha256:a05d3d052d9b2dfce0e3896636467f8a5342fb2b902c819428e1ac65413ca118"}, - {file = "httpx-0.25.2.tar.gz", hash = "sha256:8b8fcaa0c8ea7b05edd69a094e63a2094c4efcb48129fb757361bc423c0ad9e8"}, + {file = "httpx-0.27.0-py3-none-any.whl", hash = "sha256:71d5465162c13681bff01ad59b2cc68dd838ea1f10e51574bac27103f00c91a5"}, + {file = "httpx-0.27.0.tar.gz", hash = "sha256:a0cb88a46f32dc874e04ee956e4c2764aba2aa228f650b06788ba6bda2962ab5"}, ] [package.dependencies] @@ -127,13 +292,13 @@ socks = ["socksio (==1.*)"] [[package]] name = "idna" -version = "3.6" +version = "3.7" description = "Internationalized Domain Names in Applications (IDNA)" optional = false python-versions = ">=3.5" files = [ - {file = "idna-3.6-py3-none-any.whl", hash = "sha256:c05567e9c24a6b9faaa835c4821bad0590fbb9d5779e7caa6e1cc4978e7eb24f"}, - {file = "idna-3.6.tar.gz", hash = "sha256:9ecdbbd083b06798ae1e86adcbfe8ab1479cf864e4ee30fe4e46a003d12491ca"}, + {file = "idna-3.7-py3-none-any.whl", hash = "sha256:82fee1fc78add43492d3a1898bfa6d8a904cc97d8427f683ed8e798d07761aa0"}, + {file = "idna-3.7.tar.gz", hash = "sha256:028ff3aadf0609c1fd278d8ea3089299412a7a8b9bd005dd08b9f8285bcb5cfc"}, ] [[package]] @@ -147,40 +312,76 @@ files = [ {file = "iniconfig-2.0.0.tar.gz", hash = "sha256:2d91e135bf72d31a410b17c16da610a82cb55f6b0477d1a902134b24a455b8b3"}, ] +[[package]] +name = "isort" +version = "5.13.2" +description = "A Python utility / library to sort Python imports." +optional = false +python-versions = ">=3.8.0" +files = [ + {file = "isort-5.13.2-py3-none-any.whl", hash = "sha256:8ca5e72a8d85860d5a3fa69b8745237f2939afe12dbf656afbcb47fe72d947a6"}, + {file = "isort-5.13.2.tar.gz", hash = "sha256:48fdfcb9face5d58a4f6dde2e72a1fb8dcaf8ab26f95ab49fab84c2ddefb0109"}, +] + +[package.extras] +colors = ["colorama (>=0.4.6)"] + +[[package]] +name = "jsonpath-python" +version = "1.0.6" +description = "A more powerful JSONPath implementation in modern python" +optional = false +python-versions = ">=3.6" +files = [ + {file = "jsonpath-python-1.0.6.tar.gz", hash = "sha256:dd5be4a72d8a2995c3f583cf82bf3cd1a9544cfdabf2d22595b67aff07349666"}, + {file = "jsonpath_python-1.0.6-py3-none-any.whl", hash = "sha256:1e3b78df579f5efc23565293612decee04214609208a2335884b3ee3f786b575"}, +] + +[[package]] +name = "mccabe" +version = "0.7.0" +description = "McCabe checker, plugin for flake8" +optional = false +python-versions = ">=3.6" +files = [ + {file = "mccabe-0.7.0-py2.py3-none-any.whl", hash = "sha256:6c2d30ab6be0e4a46919781807b4f0d834ebdd6c6e3dca0bda5a15f863427b6e"}, + {file = "mccabe-0.7.0.tar.gz", hash = "sha256:348e0240c33b60bbdf4e523192ef919f28cb2c3d7d5c7794f74009290f236325"}, +] + [[package]] name = "mypy" -version = "1.9.0" +version = "1.10.1" description = "Optional static typing for Python" optional = false python-versions = ">=3.8" files = [ - {file = "mypy-1.9.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:f8a67616990062232ee4c3952f41c779afac41405806042a8126fe96e098419f"}, - {file = "mypy-1.9.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:d357423fa57a489e8c47b7c85dfb96698caba13d66e086b412298a1a0ea3b0ed"}, - {file = "mypy-1.9.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:49c87c15aed320de9b438ae7b00c1ac91cd393c1b854c2ce538e2a72d55df150"}, - {file = "mypy-1.9.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:48533cdd345c3c2e5ef48ba3b0d3880b257b423e7995dada04248725c6f77374"}, - {file = "mypy-1.9.0-cp310-cp310-win_amd64.whl", hash = "sha256:4d3dbd346cfec7cb98e6cbb6e0f3c23618af826316188d587d1c1bc34f0ede03"}, - {file = "mypy-1.9.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:653265f9a2784db65bfca694d1edd23093ce49740b2244cde583aeb134c008f3"}, - {file = "mypy-1.9.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:3a3c007ff3ee90f69cf0a15cbcdf0995749569b86b6d2f327af01fd1b8aee9dc"}, - {file = "mypy-1.9.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2418488264eb41f69cc64a69a745fad4a8f86649af4b1041a4c64ee61fc61129"}, - {file = "mypy-1.9.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:68edad3dc7d70f2f17ae4c6c1b9471a56138ca22722487eebacfd1eb5321d612"}, - {file = "mypy-1.9.0-cp311-cp311-win_amd64.whl", hash = "sha256:85ca5fcc24f0b4aeedc1d02f93707bccc04733f21d41c88334c5482219b1ccb3"}, - {file = "mypy-1.9.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:aceb1db093b04db5cd390821464504111b8ec3e351eb85afd1433490163d60cd"}, - {file = "mypy-1.9.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:0235391f1c6f6ce487b23b9dbd1327b4ec33bb93934aa986efe8a9563d9349e6"}, - {file = "mypy-1.9.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d4d5ddc13421ba3e2e082a6c2d74c2ddb3979c39b582dacd53dd5d9431237185"}, - {file = "mypy-1.9.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:190da1ee69b427d7efa8aa0d5e5ccd67a4fb04038c380237a0d96829cb157913"}, - {file = "mypy-1.9.0-cp312-cp312-win_amd64.whl", hash = "sha256:fe28657de3bfec596bbeef01cb219833ad9d38dd5393fc649f4b366840baefe6"}, - {file = "mypy-1.9.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:e54396d70be04b34f31d2edf3362c1edd023246c82f1730bbf8768c28db5361b"}, - {file = "mypy-1.9.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:5e6061f44f2313b94f920e91b204ec600982961e07a17e0f6cd83371cb23f5c2"}, - {file = "mypy-1.9.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:81a10926e5473c5fc3da8abb04119a1f5811a236dc3a38d92015cb1e6ba4cb9e"}, - {file = "mypy-1.9.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:b685154e22e4e9199fc95f298661deea28aaede5ae16ccc8cbb1045e716b3e04"}, - {file = "mypy-1.9.0-cp38-cp38-win_amd64.whl", hash = "sha256:5d741d3fc7c4da608764073089e5f58ef6352bedc223ff58f2f038c2c4698a89"}, - {file = "mypy-1.9.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:587ce887f75dd9700252a3abbc9c97bbe165a4a630597845c61279cf32dfbf02"}, - {file = "mypy-1.9.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:f88566144752999351725ac623471661c9d1cd8caa0134ff98cceeea181789f4"}, - {file = "mypy-1.9.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:61758fabd58ce4b0720ae1e2fea5cfd4431591d6d590b197775329264f86311d"}, - {file = "mypy-1.9.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:e49499be624dead83927e70c756970a0bc8240e9f769389cdf5714b0784ca6bf"}, - {file = "mypy-1.9.0-cp39-cp39-win_amd64.whl", hash = "sha256:571741dc4194b4f82d344b15e8837e8c5fcc462d66d076748142327626a1b6e9"}, - {file = "mypy-1.9.0-py3-none-any.whl", hash = "sha256:a260627a570559181a9ea5de61ac6297aa5af202f06fd7ab093ce74e7181e43e"}, - {file = "mypy-1.9.0.tar.gz", hash = "sha256:3cc5da0127e6a478cddd906068496a97a7618a21ce9b54bde5bf7e539c7af974"}, + {file = "mypy-1.10.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:e36f229acfe250dc660790840916eb49726c928e8ce10fbdf90715090fe4ae02"}, + {file = "mypy-1.10.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:51a46974340baaa4145363b9e051812a2446cf583dfaeba124af966fa44593f7"}, + {file = "mypy-1.10.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:901c89c2d67bba57aaaca91ccdb659aa3a312de67f23b9dfb059727cce2e2e0a"}, + {file = "mypy-1.10.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:0cd62192a4a32b77ceb31272d9e74d23cd88c8060c34d1d3622db3267679a5d9"}, + {file = "mypy-1.10.1-cp310-cp310-win_amd64.whl", hash = "sha256:a2cbc68cb9e943ac0814c13e2452d2046c2f2b23ff0278e26599224cf164e78d"}, + {file = "mypy-1.10.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:bd6f629b67bb43dc0d9211ee98b96d8dabc97b1ad38b9b25f5e4c4d7569a0c6a"}, + {file = "mypy-1.10.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:a1bbb3a6f5ff319d2b9d40b4080d46cd639abe3516d5a62c070cf0114a457d84"}, + {file = "mypy-1.10.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b8edd4e9bbbc9d7b79502eb9592cab808585516ae1bcc1446eb9122656c6066f"}, + {file = "mypy-1.10.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:6166a88b15f1759f94a46fa474c7b1b05d134b1b61fca627dd7335454cc9aa6b"}, + {file = "mypy-1.10.1-cp311-cp311-win_amd64.whl", hash = "sha256:5bb9cd11c01c8606a9d0b83ffa91d0b236a0e91bc4126d9ba9ce62906ada868e"}, + {file = "mypy-1.10.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:d8681909f7b44d0b7b86e653ca152d6dff0eb5eb41694e163c6092124f8246d7"}, + {file = "mypy-1.10.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:378c03f53f10bbdd55ca94e46ec3ba255279706a6aacaecac52ad248f98205d3"}, + {file = "mypy-1.10.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6bacf8f3a3d7d849f40ca6caea5c055122efe70e81480c8328ad29c55c69e93e"}, + {file = "mypy-1.10.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:701b5f71413f1e9855566a34d6e9d12624e9e0a8818a5704d74d6b0402e66c04"}, + {file = "mypy-1.10.1-cp312-cp312-win_amd64.whl", hash = "sha256:3c4c2992f6ea46ff7fce0072642cfb62af7a2484efe69017ed8b095f7b39ef31"}, + {file = "mypy-1.10.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:604282c886497645ffb87b8f35a57ec773a4a2721161e709a4422c1636ddde5c"}, + {file = "mypy-1.10.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:37fd87cab83f09842653f08de066ee68f1182b9b5282e4634cdb4b407266bade"}, + {file = "mypy-1.10.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8addf6313777dbb92e9564c5d32ec122bf2c6c39d683ea64de6a1fd98b90fe37"}, + {file = "mypy-1.10.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:5cc3ca0a244eb9a5249c7c583ad9a7e881aa5d7b73c35652296ddcdb33b2b9c7"}, + {file = "mypy-1.10.1-cp38-cp38-win_amd64.whl", hash = "sha256:1b3a2ffce52cc4dbaeee4df762f20a2905aa171ef157b82192f2e2f368eec05d"}, + {file = "mypy-1.10.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:fe85ed6836165d52ae8b88f99527d3d1b2362e0cb90b005409b8bed90e9059b3"}, + {file = "mypy-1.10.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:c2ae450d60d7d020d67ab440c6e3fae375809988119817214440033f26ddf7bf"}, + {file = "mypy-1.10.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6be84c06e6abd72f960ba9a71561c14137a583093ffcf9bbfaf5e613d63fa531"}, + {file = "mypy-1.10.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:2189ff1e39db399f08205e22a797383613ce1cb0cb3b13d8bcf0170e45b96cc3"}, + {file = "mypy-1.10.1-cp39-cp39-win_amd64.whl", hash = "sha256:97a131ee36ac37ce9581f4220311247ab6cba896b4395b9c87af0675a13a755f"}, + {file = "mypy-1.10.1-py3-none-any.whl", hash = "sha256:71d8ac0b906354ebda8ef1673e5fde785936ac1f29ff6987c7483cfbd5a4235a"}, + {file = "mypy-1.10.1.tar.gz", hash = "sha256:1f8f492d7db9e3593ef42d4f115f04e556130f2819ad33ab84551403e97dd4c0"}, ] [package.dependencies] @@ -206,209 +407,234 @@ files = [ ] [[package]] -name = "orjson" -version = "3.9.15" -description = "Fast, correct Python JSON library supporting dataclasses, datetimes, and numpy" +name = "packaging" +version = "24.1" +description = "Core utilities for Python packages" optional = false python-versions = ">=3.8" files = [ - {file = "orjson-3.9.15-cp310-cp310-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:d61f7ce4727a9fa7680cd6f3986b0e2c732639f46a5e0156e550e35258aa313a"}, - {file = "orjson-3.9.15-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4feeb41882e8aa17634b589533baafdceb387e01e117b1ec65534ec724023d04"}, - {file = "orjson-3.9.15-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:fbbeb3c9b2edb5fd044b2a070f127a0ac456ffd079cb82746fc84af01ef021a4"}, - {file = "orjson-3.9.15-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b66bcc5670e8a6b78f0313bcb74774c8291f6f8aeef10fe70e910b8040f3ab75"}, - {file = "orjson-3.9.15-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2973474811db7b35c30248d1129c64fd2bdf40d57d84beed2a9a379a6f57d0ab"}, - {file = "orjson-3.9.15-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9fe41b6f72f52d3da4db524c8653e46243c8c92df826ab5ffaece2dba9cccd58"}, - {file = "orjson-3.9.15-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:4228aace81781cc9d05a3ec3a6d2673a1ad0d8725b4e915f1089803e9efd2b99"}, - {file = "orjson-3.9.15-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:6f7b65bfaf69493c73423ce9db66cfe9138b2f9ef62897486417a8fcb0a92bfe"}, - {file = "orjson-3.9.15-cp310-none-win32.whl", hash = "sha256:2d99e3c4c13a7b0fb3792cc04c2829c9db07838fb6973e578b85c1745e7d0ce7"}, - {file = "orjson-3.9.15-cp310-none-win_amd64.whl", hash = "sha256:b725da33e6e58e4a5d27958568484aa766e825e93aa20c26c91168be58e08cbb"}, - {file = "orjson-3.9.15-cp311-cp311-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:c8e8fe01e435005d4421f183038fc70ca85d2c1e490f51fb972db92af6e047c2"}, - {file = "orjson-3.9.15-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:87f1097acb569dde17f246faa268759a71a2cb8c96dd392cd25c668b104cad2f"}, - {file = "orjson-3.9.15-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ff0f9913d82e1d1fadbd976424c316fbc4d9c525c81d047bbdd16bd27dd98cfc"}, - {file = "orjson-3.9.15-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8055ec598605b0077e29652ccfe9372247474375e0e3f5775c91d9434e12d6b1"}, - {file = "orjson-3.9.15-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d6768a327ea1ba44c9114dba5fdda4a214bdb70129065cd0807eb5f010bfcbb5"}, - {file = "orjson-3.9.15-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:12365576039b1a5a47df01aadb353b68223da413e2e7f98c02403061aad34bde"}, - {file = "orjson-3.9.15-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:71c6b009d431b3839d7c14c3af86788b3cfac41e969e3e1c22f8a6ea13139404"}, - {file = "orjson-3.9.15-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:e18668f1bd39e69b7fed19fa7cd1cd110a121ec25439328b5c89934e6d30d357"}, - {file = "orjson-3.9.15-cp311-none-win32.whl", hash = "sha256:62482873e0289cf7313461009bf62ac8b2e54bc6f00c6fabcde785709231a5d7"}, - {file = "orjson-3.9.15-cp311-none-win_amd64.whl", hash = "sha256:b3d336ed75d17c7b1af233a6561cf421dee41d9204aa3cfcc6c9c65cd5bb69a8"}, - {file = "orjson-3.9.15-cp312-cp312-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:82425dd5c7bd3adfe4e94c78e27e2fa02971750c2b7ffba648b0f5d5cc016a73"}, - {file = "orjson-3.9.15-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2c51378d4a8255b2e7c1e5cc430644f0939539deddfa77f6fac7b56a9784160a"}, - {file = "orjson-3.9.15-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:6ae4e06be04dc00618247c4ae3f7c3e561d5bc19ab6941427f6d3722a0875ef7"}, - {file = "orjson-3.9.15-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:bcef128f970bb63ecf9a65f7beafd9b55e3aaf0efc271a4154050fc15cdb386e"}, - {file = "orjson-3.9.15-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b72758f3ffc36ca566ba98a8e7f4f373b6c17c646ff8ad9b21ad10c29186f00d"}, - {file = "orjson-3.9.15-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:10c57bc7b946cf2efa67ac55766e41764b66d40cbd9489041e637c1304400494"}, - {file = "orjson-3.9.15-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:946c3a1ef25338e78107fba746f299f926db408d34553b4754e90a7de1d44068"}, - {file = "orjson-3.9.15-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:2f256d03957075fcb5923410058982aea85455d035607486ccb847f095442bda"}, - {file = "orjson-3.9.15-cp312-none-win_amd64.whl", hash = "sha256:5bb399e1b49db120653a31463b4a7b27cf2fbfe60469546baf681d1b39f4edf2"}, - {file = "orjson-3.9.15-cp38-cp38-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:b17f0f14a9c0ba55ff6279a922d1932e24b13fc218a3e968ecdbf791b3682b25"}, - {file = "orjson-3.9.15-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7f6cbd8e6e446fb7e4ed5bac4661a29e43f38aeecbf60c4b900b825a353276a1"}, - {file = "orjson-3.9.15-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:76bc6356d07c1d9f4b782813094d0caf1703b729d876ab6a676f3aaa9a47e37c"}, - {file = "orjson-3.9.15-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:fdfa97090e2d6f73dced247a2f2d8004ac6449df6568f30e7fa1a045767c69a6"}, - {file = "orjson-3.9.15-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7413070a3e927e4207d00bd65f42d1b780fb0d32d7b1d951f6dc6ade318e1b5a"}, - {file = "orjson-3.9.15-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9cf1596680ac1f01839dba32d496136bdd5d8ffb858c280fa82bbfeb173bdd40"}, - {file = "orjson-3.9.15-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:809d653c155e2cc4fd39ad69c08fdff7f4016c355ae4b88905219d3579e31eb7"}, - {file = "orjson-3.9.15-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:920fa5a0c5175ab14b9c78f6f820b75804fb4984423ee4c4f1e6d748f8b22bc1"}, - {file = "orjson-3.9.15-cp38-none-win32.whl", hash = "sha256:2b5c0f532905e60cf22a511120e3719b85d9c25d0e1c2a8abb20c4dede3b05a5"}, - {file = "orjson-3.9.15-cp38-none-win_amd64.whl", hash = "sha256:67384f588f7f8daf040114337d34a5188346e3fae6c38b6a19a2fe8c663a2f9b"}, - {file = "orjson-3.9.15-cp39-cp39-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:6fc2fe4647927070df3d93f561d7e588a38865ea0040027662e3e541d592811e"}, - {file = "orjson-3.9.15-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:34cbcd216e7af5270f2ffa63a963346845eb71e174ea530867b7443892d77180"}, - {file = "orjson-3.9.15-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:f541587f5c558abd93cb0de491ce99a9ef8d1ae29dd6ab4dbb5a13281ae04cbd"}, - {file = "orjson-3.9.15-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:92255879280ef9c3c0bcb327c5a1b8ed694c290d61a6a532458264f887f052cb"}, - {file = "orjson-3.9.15-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:05a1f57fb601c426635fcae9ddbe90dfc1ed42245eb4c75e4960440cac667262"}, - {file = "orjson-3.9.15-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ede0bde16cc6e9b96633df1631fbcd66491d1063667f260a4f2386a098393790"}, - {file = "orjson-3.9.15-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:e88b97ef13910e5f87bcbc4dd7979a7de9ba8702b54d3204ac587e83639c0c2b"}, - {file = "orjson-3.9.15-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:57d5d8cf9c27f7ef6bc56a5925c7fbc76b61288ab674eb352c26ac780caa5b10"}, - {file = "orjson-3.9.15-cp39-none-win32.whl", hash = "sha256:001f4eb0ecd8e9ebd295722d0cbedf0748680fb9998d3993abaed2f40587257a"}, - {file = "orjson-3.9.15-cp39-none-win_amd64.whl", hash = "sha256:ea0b183a5fe6b2b45f3b854b0d19c4e932d6f5934ae1f723b07cf9560edd4ec7"}, - {file = "orjson-3.9.15.tar.gz", hash = "sha256:95cae920959d772f30ab36d3b25f83bb0f3be671e986c72ce22f8fa700dae061"}, + {file = "packaging-24.1-py3-none-any.whl", hash = "sha256:5b8f2217dbdbd2f7f384c41c628544e6d52f2d0f53c6d0c3ea61aa5d1d7ff124"}, + {file = "packaging-24.1.tar.gz", hash = "sha256:026ed72c8ed3fcce5bf8950572258698927fd1dbda10a5e981cdf0ac37f4f002"}, ] [[package]] -name = "packaging" -version = "24.0" -description = "Core utilities for Python packages" +name = "platformdirs" +version = "4.2.2" +description = "A small Python package for determining appropriate platform-specific dirs, e.g. a `user data dir`." optional = false -python-versions = ">=3.7" +python-versions = ">=3.8" files = [ - {file = "packaging-24.0-py3-none-any.whl", hash = "sha256:2ddfb553fdf02fb784c234c7ba6ccc288296ceabec964ad2eae3777778130bc5"}, - {file = "packaging-24.0.tar.gz", hash = "sha256:eb82c5e3e56209074766e6885bb04b8c38a0c015d0a30036ebe7ece34c9989e9"}, + {file = "platformdirs-4.2.2-py3-none-any.whl", hash = "sha256:2d7a1657e36a80ea911db832a8a6ece5ee53d8de21edd5cc5879af6530b1bfee"}, + {file = "platformdirs-4.2.2.tar.gz", hash = "sha256:38b7b51f512eed9e84a22788b4bce1de17c0adb134d6becb09836e37d8654cd3"}, ] +[package.extras] +docs = ["furo (>=2023.9.10)", "proselint (>=0.13)", "sphinx (>=7.2.6)", "sphinx-autodoc-typehints (>=1.25.2)"] +test = ["appdirs (==1.4.4)", "covdefaults (>=2.3)", "pytest (>=7.4.3)", "pytest-cov (>=4.1)", "pytest-mock (>=3.12)"] +type = ["mypy (>=1.8)"] + [[package]] name = "pluggy" -version = "1.4.0" +version = "1.5.0" description = "plugin and hook calling mechanisms for python" optional = false python-versions = ">=3.8" files = [ - {file = "pluggy-1.4.0-py3-none-any.whl", hash = "sha256:7db9f7b503d67d1c5b95f59773ebb58a8c1c288129a88665838012cfb07b8981"}, - {file = "pluggy-1.4.0.tar.gz", hash = "sha256:8c85c2876142a764e5b7548e7d9a0e0ddb46f5185161049a79b7e974454223be"}, + {file = "pluggy-1.5.0-py3-none-any.whl", hash = "sha256:44e1ad92c8ca002de6377e165f3e0f1be63266ab4d554740532335b9d75ea669"}, + {file = "pluggy-1.5.0.tar.gz", hash = "sha256:2cffa88e94fdc978c4c574f15f9e59b7f4201d439195c3715ca9e2486f1d0cf1"}, ] [package.extras] dev = ["pre-commit", "tox"] testing = ["pytest", "pytest-benchmark"] +[[package]] +name = "pyasn1" +version = "0.6.0" +description = "Pure-Python implementation of ASN.1 types and DER/BER/CER codecs (X.208)" +optional = true +python-versions = ">=3.8" +files = [ + {file = "pyasn1-0.6.0-py2.py3-none-any.whl", hash = "sha256:cca4bb0f2df5504f02f6f8a775b6e416ff9b0b3b16f7ee80b5a3153d9b804473"}, + {file = "pyasn1-0.6.0.tar.gz", hash = "sha256:3a35ab2c4b5ef98e17dfdec8ab074046fbda76e281c5a706ccd82328cfc8f64c"}, +] + +[[package]] +name = "pyasn1-modules" +version = "0.4.0" +description = "A collection of ASN.1-based protocols modules" +optional = true +python-versions = ">=3.8" +files = [ + {file = "pyasn1_modules-0.4.0-py3-none-any.whl", hash = "sha256:be04f15b66c206eed667e0bb5ab27e2b1855ea54a842e5037738099e8ca4ae0b"}, + {file = "pyasn1_modules-0.4.0.tar.gz", hash = "sha256:831dbcea1b177b28c9baddf4c6d1013c24c3accd14a1873fffaa6a2e905f17b6"}, +] + +[package.dependencies] +pyasn1 = ">=0.4.6,<0.7.0" + [[package]] name = "pydantic" -version = "2.6.4" +version = "2.8.2" description = "Data validation using Python type hints" optional = false python-versions = ">=3.8" files = [ - {file = "pydantic-2.6.4-py3-none-any.whl", hash = "sha256:cc46fce86607580867bdc3361ad462bab9c222ef042d3da86f2fb333e1d916c5"}, - {file = "pydantic-2.6.4.tar.gz", hash = "sha256:b1704e0847db01817624a6b86766967f552dd9dbf3afba4004409f908dcc84e6"}, + {file = "pydantic-2.8.2-py3-none-any.whl", hash = "sha256:73ee9fddd406dc318b885c7a2eab8a6472b68b8fb5ba8150949fc3db939f23c8"}, + {file = "pydantic-2.8.2.tar.gz", hash = "sha256:6f62c13d067b0755ad1c21a34bdd06c0c12625a22b0fc09c6b149816604f7c2a"}, ] [package.dependencies] annotated-types = ">=0.4.0" -pydantic-core = "2.16.3" -typing-extensions = ">=4.6.1" +pydantic-core = "2.20.1" +typing-extensions = [ + {version = ">=4.6.1", markers = "python_version < \"3.13\""}, + {version = ">=4.12.2", markers = "python_version >= \"3.13\""}, +] [package.extras] email = ["email-validator (>=2.0.0)"] [[package]] name = "pydantic-core" -version = "2.16.3" -description = "" +version = "2.20.1" +description = "Core functionality for Pydantic validation and serialization" optional = false python-versions = ">=3.8" files = [ - {file = "pydantic_core-2.16.3-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:75b81e678d1c1ede0785c7f46690621e4c6e63ccd9192af1f0bd9d504bbb6bf4"}, - {file = "pydantic_core-2.16.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9c865a7ee6f93783bd5d781af5a4c43dadc37053a5b42f7d18dc019f8c9d2bd1"}, - {file = "pydantic_core-2.16.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:162e498303d2b1c036b957a1278fa0899d02b2842f1ff901b6395104c5554a45"}, - {file = "pydantic_core-2.16.3-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:2f583bd01bbfbff4eaee0868e6fc607efdfcc2b03c1c766b06a707abbc856187"}, - {file = "pydantic_core-2.16.3-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b926dd38db1519ed3043a4de50214e0d600d404099c3392f098a7f9d75029ff8"}, - {file = "pydantic_core-2.16.3-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:716b542728d4c742353448765aa7cdaa519a7b82f9564130e2b3f6766018c9ec"}, - {file = "pydantic_core-2.16.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fc4ad7f7ee1a13d9cb49d8198cd7d7e3aa93e425f371a68235f784e99741561f"}, - {file = "pydantic_core-2.16.3-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:bd87f48924f360e5d1c5f770d6155ce0e7d83f7b4e10c2f9ec001c73cf475c99"}, - {file = "pydantic_core-2.16.3-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:0df446663464884297c793874573549229f9eca73b59360878f382a0fc085979"}, - {file = "pydantic_core-2.16.3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:4df8a199d9f6afc5ae9a65f8f95ee52cae389a8c6b20163762bde0426275b7db"}, - {file = "pydantic_core-2.16.3-cp310-none-win32.whl", hash = "sha256:456855f57b413f077dff513a5a28ed838dbbb15082ba00f80750377eed23d132"}, - {file = "pydantic_core-2.16.3-cp310-none-win_amd64.whl", hash = "sha256:732da3243e1b8d3eab8c6ae23ae6a58548849d2e4a4e03a1924c8ddf71a387cb"}, - {file = "pydantic_core-2.16.3-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:519ae0312616026bf4cedc0fe459e982734f3ca82ee8c7246c19b650b60a5ee4"}, - {file = "pydantic_core-2.16.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:b3992a322a5617ded0a9f23fd06dbc1e4bd7cf39bc4ccf344b10f80af58beacd"}, - {file = "pydantic_core-2.16.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8d62da299c6ecb04df729e4b5c52dc0d53f4f8430b4492b93aa8de1f541c4aac"}, - {file = "pydantic_core-2.16.3-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:2acca2be4bb2f2147ada8cac612f8a98fc09f41c89f87add7256ad27332c2fda"}, - {file = "pydantic_core-2.16.3-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1b662180108c55dfbf1280d865b2d116633d436cfc0bba82323554873967b340"}, - {file = "pydantic_core-2.16.3-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e7c6ed0dc9d8e65f24f5824291550139fe6f37fac03788d4580da0d33bc00c97"}, - {file = "pydantic_core-2.16.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a6b1bb0827f56654b4437955555dc3aeeebeddc47c2d7ed575477f082622c49e"}, - {file = "pydantic_core-2.16.3-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:e56f8186d6210ac7ece503193ec84104da7ceb98f68ce18c07282fcc2452e76f"}, - {file = "pydantic_core-2.16.3-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:936e5db01dd49476fa8f4383c259b8b1303d5dd5fb34c97de194560698cc2c5e"}, - {file = "pydantic_core-2.16.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:33809aebac276089b78db106ee692bdc9044710e26f24a9a2eaa35a0f9fa70ba"}, - {file = "pydantic_core-2.16.3-cp311-none-win32.whl", hash = "sha256:ded1c35f15c9dea16ead9bffcde9bb5c7c031bff076355dc58dcb1cb436c4721"}, - {file = "pydantic_core-2.16.3-cp311-none-win_amd64.whl", hash = "sha256:d89ca19cdd0dd5f31606a9329e309d4fcbb3df860960acec32630297d61820df"}, - {file = "pydantic_core-2.16.3-cp311-none-win_arm64.whl", hash = "sha256:6162f8d2dc27ba21027f261e4fa26f8bcb3cf9784b7f9499466a311ac284b5b9"}, - {file = "pydantic_core-2.16.3-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:0f56ae86b60ea987ae8bcd6654a887238fd53d1384f9b222ac457070b7ac4cff"}, - {file = "pydantic_core-2.16.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:c9bd22a2a639e26171068f8ebb5400ce2c1bc7d17959f60a3b753ae13c632975"}, - {file = "pydantic_core-2.16.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4204e773b4b408062960e65468d5346bdfe139247ee5f1ca2a378983e11388a2"}, - {file = "pydantic_core-2.16.3-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:f651dd19363c632f4abe3480a7c87a9773be27cfe1341aef06e8759599454120"}, - {file = "pydantic_core-2.16.3-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:aaf09e615a0bf98d406657e0008e4a8701b11481840be7d31755dc9f97c44053"}, - {file = "pydantic_core-2.16.3-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8e47755d8152c1ab5b55928ab422a76e2e7b22b5ed8e90a7d584268dd49e9c6b"}, - {file = "pydantic_core-2.16.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:500960cb3a0543a724a81ba859da816e8cf01b0e6aaeedf2c3775d12ee49cade"}, - {file = "pydantic_core-2.16.3-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:cf6204fe865da605285c34cf1172879d0314ff267b1c35ff59de7154f35fdc2e"}, - {file = "pydantic_core-2.16.3-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:d33dd21f572545649f90c38c227cc8631268ba25c460b5569abebdd0ec5974ca"}, - {file = "pydantic_core-2.16.3-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:49d5d58abd4b83fb8ce763be7794d09b2f50f10aa65c0f0c1696c677edeb7cbf"}, - {file = "pydantic_core-2.16.3-cp312-none-win32.whl", hash = "sha256:f53aace168a2a10582e570b7736cc5bef12cae9cf21775e3eafac597e8551fbe"}, - {file = "pydantic_core-2.16.3-cp312-none-win_amd64.whl", hash = "sha256:0d32576b1de5a30d9a97f300cc6a3f4694c428d956adbc7e6e2f9cad279e45ed"}, - {file = "pydantic_core-2.16.3-cp312-none-win_arm64.whl", hash = "sha256:ec08be75bb268473677edb83ba71e7e74b43c008e4a7b1907c6d57e940bf34b6"}, - {file = "pydantic_core-2.16.3-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:b1f6f5938d63c6139860f044e2538baeee6f0b251a1816e7adb6cbce106a1f01"}, - {file = "pydantic_core-2.16.3-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:2a1ef6a36fdbf71538142ed604ad19b82f67b05749512e47f247a6ddd06afdc7"}, - {file = "pydantic_core-2.16.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:704d35ecc7e9c31d48926150afada60401c55efa3b46cd1ded5a01bdffaf1d48"}, - {file = "pydantic_core-2.16.3-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:d937653a696465677ed583124b94a4b2d79f5e30b2c46115a68e482c6a591c8a"}, - {file = "pydantic_core-2.16.3-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c9803edf8e29bd825f43481f19c37f50d2b01899448273b3a7758441b512acf8"}, - {file = "pydantic_core-2.16.3-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:72282ad4892a9fb2da25defeac8c2e84352c108705c972db82ab121d15f14e6d"}, - {file = "pydantic_core-2.16.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7f752826b5b8361193df55afcdf8ca6a57d0232653494ba473630a83ba50d8c9"}, - {file = "pydantic_core-2.16.3-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:4384a8f68ddb31a0b0c3deae88765f5868a1b9148939c3f4121233314ad5532c"}, - {file = "pydantic_core-2.16.3-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:a4b2bf78342c40b3dc830880106f54328928ff03e357935ad26c7128bbd66ce8"}, - {file = "pydantic_core-2.16.3-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:13dcc4802961b5f843a9385fc821a0b0135e8c07fc3d9949fd49627c1a5e6ae5"}, - {file = "pydantic_core-2.16.3-cp38-none-win32.whl", hash = "sha256:e3e70c94a0c3841e6aa831edab1619ad5c511199be94d0c11ba75fe06efe107a"}, - {file = "pydantic_core-2.16.3-cp38-none-win_amd64.whl", hash = "sha256:ecdf6bf5f578615f2e985a5e1f6572e23aa632c4bd1dc67f8f406d445ac115ed"}, - {file = "pydantic_core-2.16.3-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:bda1ee3e08252b8d41fa5537413ffdddd58fa73107171a126d3b9ff001b9b820"}, - {file = "pydantic_core-2.16.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:21b888c973e4f26b7a96491c0965a8a312e13be108022ee510248fe379a5fa23"}, - {file = "pydantic_core-2.16.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:be0ec334369316fa73448cc8c982c01e5d2a81c95969d58b8f6e272884df0074"}, - {file = "pydantic_core-2.16.3-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:b5b6079cc452a7c53dd378c6f881ac528246b3ac9aae0f8eef98498a75657805"}, - {file = "pydantic_core-2.16.3-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7ee8d5f878dccb6d499ba4d30d757111847b6849ae07acdd1205fffa1fc1253c"}, - {file = "pydantic_core-2.16.3-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7233d65d9d651242a68801159763d09e9ec96e8a158dbf118dc090cd77a104c9"}, - {file = "pydantic_core-2.16.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c6119dc90483a5cb50a1306adb8d52c66e447da88ea44f323e0ae1a5fcb14256"}, - {file = "pydantic_core-2.16.3-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:578114bc803a4c1ff9946d977c221e4376620a46cf78da267d946397dc9514a8"}, - {file = "pydantic_core-2.16.3-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:d8f99b147ff3fcf6b3cc60cb0c39ea443884d5559a30b1481e92495f2310ff2b"}, - {file = "pydantic_core-2.16.3-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:4ac6b4ce1e7283d715c4b729d8f9dab9627586dafce81d9eaa009dd7f25dd972"}, - {file = "pydantic_core-2.16.3-cp39-none-win32.whl", hash = "sha256:e7774b570e61cb998490c5235740d475413a1f6de823169b4cf94e2fe9e9f6b2"}, - {file = "pydantic_core-2.16.3-cp39-none-win_amd64.whl", hash = "sha256:9091632a25b8b87b9a605ec0e61f241c456e9248bfdcf7abdf344fdb169c81cf"}, - {file = "pydantic_core-2.16.3-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:36fa178aacbc277bc6b62a2c3da95226520da4f4e9e206fdf076484363895d2c"}, - {file = "pydantic_core-2.16.3-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:dcca5d2bf65c6fb591fff92da03f94cd4f315972f97c21975398bd4bd046854a"}, - {file = "pydantic_core-2.16.3-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2a72fb9963cba4cd5793854fd12f4cfee731e86df140f59ff52a49b3552db241"}, - {file = "pydantic_core-2.16.3-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b60cc1a081f80a2105a59385b92d82278b15d80ebb3adb200542ae165cd7d183"}, - {file = "pydantic_core-2.16.3-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:cbcc558401de90a746d02ef330c528f2e668c83350f045833543cd57ecead1ad"}, - {file = "pydantic_core-2.16.3-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:fee427241c2d9fb7192b658190f9f5fd6dfe41e02f3c1489d2ec1e6a5ab1e04a"}, - {file = "pydantic_core-2.16.3-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:f4cb85f693044e0f71f394ff76c98ddc1bc0953e48c061725e540396d5c8a2e1"}, - {file = "pydantic_core-2.16.3-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:b29eeb887aa931c2fcef5aa515d9d176d25006794610c264ddc114c053bf96fe"}, - {file = "pydantic_core-2.16.3-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:a425479ee40ff021f8216c9d07a6a3b54b31c8267c6e17aa88b70d7ebd0e5e5b"}, - {file = "pydantic_core-2.16.3-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:5c5cbc703168d1b7a838668998308018a2718c2130595e8e190220238addc96f"}, - {file = "pydantic_core-2.16.3-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:99b6add4c0b39a513d323d3b93bc173dac663c27b99860dd5bf491b240d26137"}, - {file = "pydantic_core-2.16.3-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:75f76ee558751746d6a38f89d60b6228fa174e5172d143886af0f85aa306fd89"}, - {file = "pydantic_core-2.16.3-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:00ee1c97b5364b84cb0bd82e9bbf645d5e2871fb8c58059d158412fee2d33d8a"}, - {file = "pydantic_core-2.16.3-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:287073c66748f624be4cef893ef9174e3eb88fe0b8a78dc22e88eca4bc357ca6"}, - {file = "pydantic_core-2.16.3-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:ed25e1835c00a332cb10c683cd39da96a719ab1dfc08427d476bce41b92531fc"}, - {file = "pydantic_core-2.16.3-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:86b3d0033580bd6bbe07590152007275bd7af95f98eaa5bd36f3da219dcd93da"}, - {file = "pydantic_core-2.16.3.tar.gz", hash = "sha256:1cac689f80a3abab2d3c0048b29eea5751114054f032a941a32de4c852c59cad"}, + {file = "pydantic_core-2.20.1-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:3acae97ffd19bf091c72df4d726d552c473f3576409b2a7ca36b2f535ffff4a3"}, + {file = "pydantic_core-2.20.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:41f4c96227a67a013e7de5ff8f20fb496ce573893b7f4f2707d065907bffdbd6"}, + {file = "pydantic_core-2.20.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5f239eb799a2081495ea659d8d4a43a8f42cd1fe9ff2e7e436295c38a10c286a"}, + {file = "pydantic_core-2.20.1-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:53e431da3fc53360db73eedf6f7124d1076e1b4ee4276b36fb25514544ceb4a3"}, + {file = "pydantic_core-2.20.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f1f62b2413c3a0e846c3b838b2ecd6c7a19ec6793b2a522745b0869e37ab5bc1"}, + {file = "pydantic_core-2.20.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5d41e6daee2813ecceea8eda38062d69e280b39df793f5a942fa515b8ed67953"}, + {file = "pydantic_core-2.20.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3d482efec8b7dc6bfaedc0f166b2ce349df0011f5d2f1f25537ced4cfc34fd98"}, + {file = "pydantic_core-2.20.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:e93e1a4b4b33daed65d781a57a522ff153dcf748dee70b40c7258c5861e1768a"}, + {file = "pydantic_core-2.20.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:e7c4ea22b6739b162c9ecaaa41d718dfad48a244909fe7ef4b54c0b530effc5a"}, + {file = "pydantic_core-2.20.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:4f2790949cf385d985a31984907fecb3896999329103df4e4983a4a41e13e840"}, + {file = "pydantic_core-2.20.1-cp310-none-win32.whl", hash = "sha256:5e999ba8dd90e93d57410c5e67ebb67ffcaadcea0ad973240fdfd3a135506250"}, + {file = "pydantic_core-2.20.1-cp310-none-win_amd64.whl", hash = "sha256:512ecfbefef6dac7bc5eaaf46177b2de58cdf7acac8793fe033b24ece0b9566c"}, + {file = "pydantic_core-2.20.1-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:d2a8fa9d6d6f891f3deec72f5cc668e6f66b188ab14bb1ab52422fe8e644f312"}, + {file = "pydantic_core-2.20.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:175873691124f3d0da55aeea1d90660a6ea7a3cfea137c38afa0a5ffabe37b88"}, + {file = "pydantic_core-2.20.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:37eee5b638f0e0dcd18d21f59b679686bbd18917b87db0193ae36f9c23c355fc"}, + {file = "pydantic_core-2.20.1-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:25e9185e2d06c16ee438ed39bf62935ec436474a6ac4f9358524220f1b236e43"}, + {file = "pydantic_core-2.20.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:150906b40ff188a3260cbee25380e7494ee85048584998c1e66df0c7a11c17a6"}, + {file = "pydantic_core-2.20.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8ad4aeb3e9a97286573c03df758fc7627aecdd02f1da04516a86dc159bf70121"}, + {file = "pydantic_core-2.20.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d3f3ed29cd9f978c604708511a1f9c2fdcb6c38b9aae36a51905b8811ee5cbf1"}, + {file = "pydantic_core-2.20.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:b0dae11d8f5ded51699c74d9548dcc5938e0804cc8298ec0aa0da95c21fff57b"}, + {file = "pydantic_core-2.20.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:faa6b09ee09433b87992fb5a2859efd1c264ddc37280d2dd5db502126d0e7f27"}, + {file = "pydantic_core-2.20.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:9dc1b507c12eb0481d071f3c1808f0529ad41dc415d0ca11f7ebfc666e66a18b"}, + {file = "pydantic_core-2.20.1-cp311-none-win32.whl", hash = "sha256:fa2fddcb7107e0d1808086ca306dcade7df60a13a6c347a7acf1ec139aa6789a"}, + {file = "pydantic_core-2.20.1-cp311-none-win_amd64.whl", hash = "sha256:40a783fb7ee353c50bd3853e626f15677ea527ae556429453685ae32280c19c2"}, + {file = "pydantic_core-2.20.1-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:595ba5be69b35777474fa07f80fc260ea71255656191adb22a8c53aba4479231"}, + {file = "pydantic_core-2.20.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:a4f55095ad087474999ee28d3398bae183a66be4823f753cd7d67dd0153427c9"}, + {file = "pydantic_core-2.20.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f9aa05d09ecf4c75157197f27cdc9cfaeb7c5f15021c6373932bf3e124af029f"}, + {file = "pydantic_core-2.20.1-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:e97fdf088d4b31ff4ba35db26d9cc472ac7ef4a2ff2badeabf8d727b3377fc52"}, + {file = "pydantic_core-2.20.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:bc633a9fe1eb87e250b5c57d389cf28998e4292336926b0b6cdaee353f89a237"}, + {file = "pydantic_core-2.20.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d573faf8eb7e6b1cbbcb4f5b247c60ca8be39fe2c674495df0eb4318303137fe"}, + {file = "pydantic_core-2.20.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:26dc97754b57d2fd00ac2b24dfa341abffc380b823211994c4efac7f13b9e90e"}, + {file = "pydantic_core-2.20.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:33499e85e739a4b60c9dac710c20a08dc73cb3240c9a0e22325e671b27b70d24"}, + {file = "pydantic_core-2.20.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:bebb4d6715c814597f85297c332297c6ce81e29436125ca59d1159b07f423eb1"}, + {file = "pydantic_core-2.20.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:516d9227919612425c8ef1c9b869bbbee249bc91912c8aaffb66116c0b447ebd"}, + {file = "pydantic_core-2.20.1-cp312-none-win32.whl", hash = "sha256:469f29f9093c9d834432034d33f5fe45699e664f12a13bf38c04967ce233d688"}, + {file = "pydantic_core-2.20.1-cp312-none-win_amd64.whl", hash = "sha256:035ede2e16da7281041f0e626459bcae33ed998cca6a0a007a5ebb73414ac72d"}, + {file = "pydantic_core-2.20.1-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:0827505a5c87e8aa285dc31e9ec7f4a17c81a813d45f70b1d9164e03a813a686"}, + {file = "pydantic_core-2.20.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:19c0fa39fa154e7e0b7f82f88ef85faa2a4c23cc65aae2f5aea625e3c13c735a"}, + {file = "pydantic_core-2.20.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4aa223cd1e36b642092c326d694d8bf59b71ddddc94cdb752bbbb1c5c91d833b"}, + {file = "pydantic_core-2.20.1-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:c336a6d235522a62fef872c6295a42ecb0c4e1d0f1a3e500fe949415761b8a19"}, + {file = "pydantic_core-2.20.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7eb6a0587eded33aeefea9f916899d42b1799b7b14b8f8ff2753c0ac1741edac"}, + {file = "pydantic_core-2.20.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:70c8daf4faca8da5a6d655f9af86faf6ec2e1768f4b8b9d0226c02f3d6209703"}, + {file = "pydantic_core-2.20.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e9fa4c9bf273ca41f940bceb86922a7667cd5bf90e95dbb157cbb8441008482c"}, + {file = "pydantic_core-2.20.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:11b71d67b4725e7e2a9f6e9c0ac1239bbc0c48cce3dc59f98635efc57d6dac83"}, + {file = "pydantic_core-2.20.1-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:270755f15174fb983890c49881e93f8f1b80f0b5e3a3cc1394a255706cabd203"}, + {file = "pydantic_core-2.20.1-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:c81131869240e3e568916ef4c307f8b99583efaa60a8112ef27a366eefba8ef0"}, + {file = "pydantic_core-2.20.1-cp313-none-win32.whl", hash = "sha256:b91ced227c41aa29c672814f50dbb05ec93536abf8f43cd14ec9521ea09afe4e"}, + {file = "pydantic_core-2.20.1-cp313-none-win_amd64.whl", hash = "sha256:65db0f2eefcaad1a3950f498aabb4875c8890438bc80b19362cf633b87a8ab20"}, + {file = "pydantic_core-2.20.1-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:4745f4ac52cc6686390c40eaa01d48b18997cb130833154801a442323cc78f91"}, + {file = "pydantic_core-2.20.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:a8ad4c766d3f33ba8fd692f9aa297c9058970530a32c728a2c4bfd2616d3358b"}, + {file = "pydantic_core-2.20.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:41e81317dd6a0127cabce83c0c9c3fbecceae981c8391e6f1dec88a77c8a569a"}, + {file = "pydantic_core-2.20.1-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:04024d270cf63f586ad41fff13fde4311c4fc13ea74676962c876d9577bcc78f"}, + {file = "pydantic_core-2.20.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:eaad4ff2de1c3823fddf82f41121bdf453d922e9a238642b1dedb33c4e4f98ad"}, + {file = "pydantic_core-2.20.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:26ab812fa0c845df815e506be30337e2df27e88399b985d0bb4e3ecfe72df31c"}, + {file = "pydantic_core-2.20.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3c5ebac750d9d5f2706654c638c041635c385596caf68f81342011ddfa1e5598"}, + {file = "pydantic_core-2.20.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2aafc5a503855ea5885559eae883978c9b6d8c8993d67766ee73d82e841300dd"}, + {file = "pydantic_core-2.20.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:4868f6bd7c9d98904b748a2653031fc9c2f85b6237009d475b1008bfaeb0a5aa"}, + {file = "pydantic_core-2.20.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:aa2f457b4af386254372dfa78a2eda2563680d982422641a85f271c859df1987"}, + {file = "pydantic_core-2.20.1-cp38-none-win32.whl", hash = "sha256:225b67a1f6d602de0ce7f6c1c3ae89a4aa25d3de9be857999e9124f15dab486a"}, + {file = "pydantic_core-2.20.1-cp38-none-win_amd64.whl", hash = "sha256:6b507132dcfc0dea440cce23ee2182c0ce7aba7054576efc65634f080dbe9434"}, + {file = "pydantic_core-2.20.1-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:b03f7941783b4c4a26051846dea594628b38f6940a2fdc0df00b221aed39314c"}, + {file = "pydantic_core-2.20.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:1eedfeb6089ed3fad42e81a67755846ad4dcc14d73698c120a82e4ccf0f1f9f6"}, + {file = "pydantic_core-2.20.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:635fee4e041ab9c479e31edda27fcf966ea9614fff1317e280d99eb3e5ab6fe2"}, + {file = "pydantic_core-2.20.1-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:77bf3ac639c1ff567ae3b47f8d4cc3dc20f9966a2a6dd2311dcc055d3d04fb8a"}, + {file = "pydantic_core-2.20.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7ed1b0132f24beeec5a78b67d9388656d03e6a7c837394f99257e2d55b461611"}, + {file = "pydantic_core-2.20.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c6514f963b023aeee506678a1cf821fe31159b925c4b76fe2afa94cc70b3222b"}, + {file = "pydantic_core-2.20.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:10d4204d8ca33146e761c79f83cc861df20e7ae9f6487ca290a97702daf56006"}, + {file = "pydantic_core-2.20.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2d036c7187b9422ae5b262badb87a20a49eb6c5238b2004e96d4da1231badef1"}, + {file = "pydantic_core-2.20.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:9ebfef07dbe1d93efb94b4700f2d278494e9162565a54f124c404a5656d7ff09"}, + {file = "pydantic_core-2.20.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:6b9d9bb600328a1ce523ab4f454859e9d439150abb0906c5a1983c146580ebab"}, + {file = "pydantic_core-2.20.1-cp39-none-win32.whl", hash = "sha256:784c1214cb6dd1e3b15dd8b91b9a53852aed16671cc3fbe4786f4f1db07089e2"}, + {file = "pydantic_core-2.20.1-cp39-none-win_amd64.whl", hash = "sha256:d2fe69c5434391727efa54b47a1e7986bb0186e72a41b203df8f5b0a19a4f669"}, + {file = "pydantic_core-2.20.1-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:a45f84b09ac9c3d35dfcf6a27fd0634d30d183205230a0ebe8373a0e8cfa0906"}, + {file = "pydantic_core-2.20.1-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:d02a72df14dfdbaf228424573a07af10637bd490f0901cee872c4f434a735b94"}, + {file = "pydantic_core-2.20.1-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d2b27e6af28f07e2f195552b37d7d66b150adbaa39a6d327766ffd695799780f"}, + {file = "pydantic_core-2.20.1-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:084659fac3c83fd674596612aeff6041a18402f1e1bc19ca39e417d554468482"}, + {file = "pydantic_core-2.20.1-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:242b8feb3c493ab78be289c034a1f659e8826e2233786e36f2893a950a719bb6"}, + {file = "pydantic_core-2.20.1-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:38cf1c40a921d05c5edc61a785c0ddb4bed67827069f535d794ce6bcded919fc"}, + {file = "pydantic_core-2.20.1-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:e0bbdd76ce9aa5d4209d65f2b27fc6e5ef1312ae6c5333c26db3f5ade53a1e99"}, + {file = "pydantic_core-2.20.1-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:254ec27fdb5b1ee60684f91683be95e5133c994cc54e86a0b0963afa25c8f8a6"}, + {file = "pydantic_core-2.20.1-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:407653af5617f0757261ae249d3fba09504d7a71ab36ac057c938572d1bc9331"}, + {file = "pydantic_core-2.20.1-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:c693e916709c2465b02ca0ad7b387c4f8423d1db7b4649c551f27a529181c5ad"}, + {file = "pydantic_core-2.20.1-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5b5ff4911aea936a47d9376fd3ab17e970cc543d1b68921886e7f64bd28308d1"}, + {file = "pydantic_core-2.20.1-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:177f55a886d74f1808763976ac4efd29b7ed15c69f4d838bbd74d9d09cf6fa86"}, + {file = "pydantic_core-2.20.1-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:964faa8a861d2664f0c7ab0c181af0bea66098b1919439815ca8803ef136fc4e"}, + {file = "pydantic_core-2.20.1-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:4dd484681c15e6b9a977c785a345d3e378d72678fd5f1f3c0509608da24f2ac0"}, + {file = "pydantic_core-2.20.1-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:f6d6cff3538391e8486a431569b77921adfcdef14eb18fbf19b7c0a5294d4e6a"}, + {file = "pydantic_core-2.20.1-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:a6d511cc297ff0883bc3708b465ff82d7560193169a8b93260f74ecb0a5e08a7"}, + {file = "pydantic_core-2.20.1.tar.gz", hash = "sha256:26ca695eeee5f9f1aeeb211ffc12f10bcb6f71e2989988fda61dabd65db878d4"}, ] [package.dependencies] typing-extensions = ">=4.6.0,<4.7.0 || >4.7.0" +[[package]] +name = "pylint" +version = "3.2.3" +description = "python code static checker" +optional = false +python-versions = ">=3.8.0" +files = [ + {file = "pylint-3.2.3-py3-none-any.whl", hash = "sha256:b3d7d2708a3e04b4679e02d99e72329a8b7ee8afb8d04110682278781f889fa8"}, + {file = "pylint-3.2.3.tar.gz", hash = "sha256:02f6c562b215582386068d52a30f520d84fdbcf2a95fc7e855b816060d048b60"}, +] + +[package.dependencies] +astroid = ">=3.2.2,<=3.3.0-dev0" +colorama = {version = ">=0.4.5", markers = "sys_platform == \"win32\""} +dill = [ + {version = ">=0.2", markers = "python_version < \"3.11\""}, + {version = ">=0.3.7", markers = "python_version >= \"3.12\""}, + {version = ">=0.3.6", markers = "python_version >= \"3.11\" and python_version < \"3.12\""}, +] +isort = ">=4.2.5,<5.13.0 || >5.13.0,<6" +mccabe = ">=0.6,<0.8" +platformdirs = ">=2.2.0" +tomli = {version = ">=1.1.0", markers = "python_version < \"3.11\""} +tomlkit = ">=0.10.1" +typing-extensions = {version = ">=3.10.0", markers = "python_version < \"3.10\""} + +[package.extras] +spelling = ["pyenchant (>=3.2,<4.0)"] +testutils = ["gitpython (>3)"] + [[package]] name = "pytest" -version = "7.4.4" +version = "8.3.2" description = "pytest: simple powerful testing with Python" optional = false -python-versions = ">=3.7" +python-versions = ">=3.8" files = [ - {file = "pytest-7.4.4-py3-none-any.whl", hash = "sha256:b090cdf5ed60bf4c45261be03239c2c1c22df034fbffe691abe93cd80cea01d8"}, - {file = "pytest-7.4.4.tar.gz", hash = "sha256:2cf0005922c6ace4a3e2ec8b4080eb0d9753fdc93107415332f50ce9e7994280"}, + {file = "pytest-8.3.2-py3-none-any.whl", hash = "sha256:4ba08f9ae7dcf84ded419494d229b48d0903ea6407b030eaec46df5e6a73bba5"}, + {file = "pytest-8.3.2.tar.gz", hash = "sha256:c132345d12ce551242c87269de812483f5bcc87cdbb4722e48487ba194f9fdce"}, ] [package.dependencies] @@ -416,21 +642,21 @@ colorama = {version = "*", markers = "sys_platform == \"win32\""} exceptiongroup = {version = ">=1.0.0rc8", markers = "python_version < \"3.11\""} iniconfig = "*" packaging = "*" -pluggy = ">=0.12,<2.0" -tomli = {version = ">=1.0.0", markers = "python_version < \"3.11\""} +pluggy = ">=1.5,<2" +tomli = {version = ">=1", markers = "python_version < \"3.11\""} [package.extras] -testing = ["argcomplete", "attrs (>=19.2.0)", "hypothesis (>=3.56)", "mock", "nose", "pygments (>=2.7.2)", "requests", "setuptools", "xmlschema"] +dev = ["argcomplete", "attrs (>=19.2)", "hypothesis (>=3.56)", "mock", "pygments (>=2.7.2)", "requests", "setuptools", "xmlschema"] [[package]] name = "pytest-asyncio" -version = "0.23.5.post1" +version = "0.23.8" description = "Pytest support for asyncio" optional = false python-versions = ">=3.8" files = [ - {file = "pytest-asyncio-0.23.5.post1.tar.gz", hash = "sha256:b9a8806bea78c21276bc34321bbf234ba1b2ea5b30d9f0ce0f2dea45e4685813"}, - {file = "pytest_asyncio-0.23.5.post1-py3-none-any.whl", hash = "sha256:30f54d27774e79ac409778889880242b0403d09cabd65b727ce90fe92dd5d80e"}, + {file = "pytest_asyncio-0.23.8-py3-none-any.whl", hash = "sha256:50265d892689a5faefb84df80819d1ecef566eb3549cf915dfb33569359d1ce2"}, + {file = "pytest_asyncio-0.23.8.tar.gz", hash = "sha256:759b10b33a6dc61cce40a8bd5205e302978bbbcc00e279a8b61d9a6a3c82e4d3"}, ] [package.dependencies] @@ -441,29 +667,63 @@ docs = ["sphinx (>=5.3)", "sphinx-rtd-theme (>=1.0)"] testing = ["coverage (>=6.2)", "hypothesis (>=5.7.1)"] [[package]] -name = "ruff" -version = "0.1.15" -description = "An extremely fast Python linter and code formatter, written in Rust." +name = "python-dateutil" +version = "2.9.0.post0" +description = "Extensions to the standard Python datetime module" optional = false -python-versions = ">=3.7" +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7" +files = [ + {file = "python-dateutil-2.9.0.post0.tar.gz", hash = "sha256:37dd54208da7e1cd875388217d5e00ebd4179249f90fb72437e91a35459a0ad3"}, + {file = "python_dateutil-2.9.0.post0-py2.py3-none-any.whl", hash = "sha256:a8b2bc7bffae282281c8140a97d3aa9c14da0b136dfe83f850eea9a5f7470427"}, +] + +[package.dependencies] +six = ">=1.5" + +[[package]] +name = "requests" +version = "2.32.3" +description = "Python HTTP for Humans." +optional = true +python-versions = ">=3.8" +files = [ + {file = "requests-2.32.3-py3-none-any.whl", hash = "sha256:70761cfe03c773ceb22aa2f671b4757976145175cdfca038c02654d061d6dcc6"}, + {file = "requests-2.32.3.tar.gz", hash = "sha256:55365417734eb18255590a9ff9eb97e9e1da868d4ccd6402399eaf68af20a760"}, +] + +[package.dependencies] +certifi = ">=2017.4.17" +charset-normalizer = ">=2,<4" +idna = ">=2.5,<4" +urllib3 = ">=1.21.1,<3" + +[package.extras] +socks = ["PySocks (>=1.5.6,!=1.5.7)"] +use-chardet-on-py3 = ["chardet (>=3.0.2,<6)"] + +[[package]] +name = "rsa" +version = "4.9" +description = "Pure-Python RSA implementation" +optional = true +python-versions = ">=3.6,<4" +files = [ + {file = "rsa-4.9-py3-none-any.whl", hash = "sha256:90260d9058e514786967344d0ef75fa8727eed8a7d2e43ce9f4bcf1b536174f7"}, + {file = "rsa-4.9.tar.gz", hash = "sha256:e38464a49c6c85d7f1351b0126661487a7e0a14a50f1675ec50eb34d4f20ef21"}, +] + +[package.dependencies] +pyasn1 = ">=0.1.3" + +[[package]] +name = "six" +version = "1.16.0" +description = "Python 2 and 3 compatibility utilities" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*" files = [ - {file = "ruff-0.1.15-py3-none-macosx_10_12_x86_64.macosx_11_0_arm64.macosx_10_12_universal2.whl", hash = "sha256:5fe8d54df166ecc24106db7dd6a68d44852d14eb0729ea4672bb4d96c320b7df"}, - {file = "ruff-0.1.15-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:6f0bfbb53c4b4de117ac4d6ddfd33aa5fc31beeaa21d23c45c6dd249faf9126f"}, - {file = "ruff-0.1.15-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e0d432aec35bfc0d800d4f70eba26e23a352386be3a6cf157083d18f6f5881c8"}, - {file = "ruff-0.1.15-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:9405fa9ac0e97f35aaddf185a1be194a589424b8713e3b97b762336ec79ff807"}, - {file = "ruff-0.1.15-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c66ec24fe36841636e814b8f90f572a8c0cb0e54d8b5c2d0e300d28a0d7bffec"}, - {file = "ruff-0.1.15-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:6f8ad828f01e8dd32cc58bc28375150171d198491fc901f6f98d2a39ba8e3ff5"}, - {file = "ruff-0.1.15-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:86811954eec63e9ea162af0ffa9f8d09088bab51b7438e8b6488b9401863c25e"}, - {file = "ruff-0.1.15-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:fd4025ac5e87d9b80e1f300207eb2fd099ff8200fa2320d7dc066a3f4622dc6b"}, - {file = "ruff-0.1.15-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b17b93c02cdb6aeb696effecea1095ac93f3884a49a554a9afa76bb125c114c1"}, - {file = "ruff-0.1.15-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:ddb87643be40f034e97e97f5bc2ef7ce39de20e34608f3f829db727a93fb82c5"}, - {file = "ruff-0.1.15-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:abf4822129ed3a5ce54383d5f0e964e7fef74a41e48eb1dfad404151efc130a2"}, - {file = "ruff-0.1.15-py3-none-musllinux_1_2_i686.whl", hash = "sha256:6c629cf64bacfd136c07c78ac10a54578ec9d1bd2a9d395efbee0935868bf852"}, - {file = "ruff-0.1.15-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:1bab866aafb53da39c2cadfb8e1c4550ac5340bb40300083eb8967ba25481447"}, - {file = "ruff-0.1.15-py3-none-win32.whl", hash = "sha256:2417e1cb6e2068389b07e6fa74c306b2810fe3ee3476d5b8a96616633f40d14f"}, - {file = "ruff-0.1.15-py3-none-win_amd64.whl", hash = "sha256:3837ac73d869efc4182d9036b1405ef4c73d9b1f88da2413875e34e0d6919587"}, - {file = "ruff-0.1.15-py3-none-win_arm64.whl", hash = "sha256:9a933dfb1c14ec7a33cceb1e49ec4a16b51ce3c20fd42663198746efc0427360"}, - {file = "ruff-0.1.15.tar.gz", hash = "sha256:f6dfa8c1b21c913c326919056c390966648b680966febcb796cc9d1aaab8564e"}, + {file = "six-1.16.0-py2.py3-none-any.whl", hash = "sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254"}, + {file = "six-1.16.0.tar.gz", hash = "sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926"}, ] [[package]] @@ -489,39 +749,62 @@ files = [ ] [[package]] -name = "types-requests" -version = "2.31.0.20240311" -description = "Typing stubs for requests" +name = "tomlkit" +version = "0.13.0" +description = "Style preserving TOML library" optional = false python-versions = ">=3.8" files = [ - {file = "types-requests-2.31.0.20240311.tar.gz", hash = "sha256:b1c1b66abfb7fa79aae09097a811c4aa97130eb8831c60e47aee4ca344731ca5"}, - {file = "types_requests-2.31.0.20240311-py3-none-any.whl", hash = "sha256:47872893d65a38e282ee9f277a4ee50d1b28bd592040df7d1fdaffdf3779937d"}, + {file = "tomlkit-0.13.0-py3-none-any.whl", hash = "sha256:7075d3042d03b80f603482d69bf0c8f345c2b30e41699fd8883227f89972b264"}, + {file = "tomlkit-0.13.0.tar.gz", hash = "sha256:08ad192699734149f5b97b45f1f18dad7eb1b6d16bc72ad0c2335772650d7b72"}, ] -[package.dependencies] -urllib3 = ">=2" +[[package]] +name = "types-python-dateutil" +version = "2.9.0.20240316" +description = "Typing stubs for python-dateutil" +optional = false +python-versions = ">=3.8" +files = [ + {file = "types-python-dateutil-2.9.0.20240316.tar.gz", hash = "sha256:5d2f2e240b86905e40944dd787db6da9263f0deabef1076ddaed797351ec0202"}, + {file = "types_python_dateutil-2.9.0.20240316-py3-none-any.whl", hash = "sha256:6b8cb66d960771ce5ff974e9dd45e38facb81718cc1e208b10b1baccbfdbee3b"}, +] [[package]] name = "typing-extensions" -version = "4.10.0" +version = "4.12.2" description = "Backported and Experimental Type Hints for Python 3.8+" optional = false python-versions = ">=3.8" files = [ - {file = "typing_extensions-4.10.0-py3-none-any.whl", hash = "sha256:69b1a937c3a517342112fb4c6df7e72fc39a38e7891a5730ed4985b5214b5475"}, - {file = "typing_extensions-4.10.0.tar.gz", hash = "sha256:b0abd7c89e8fb96f98db18d86106ff1d90ab692004eb746cf6eda2682f91b3cb"}, + {file = "typing_extensions-4.12.2-py3-none-any.whl", hash = "sha256:04e5ca0351e0f3f85c6853954072df659d0d13fac324d0072316b67d7794700d"}, + {file = "typing_extensions-4.12.2.tar.gz", hash = "sha256:1a7ead55c7e559dd4dee8856e3a88b41225abfe1ce8df57b7c13915fe121ffb8"}, +] + +[[package]] +name = "typing-inspect" +version = "0.9.0" +description = "Runtime inspection utilities for typing module." +optional = false +python-versions = "*" +files = [ + {file = "typing_inspect-0.9.0-py3-none-any.whl", hash = "sha256:9ee6fc59062311ef8547596ab6b955e1b8aa46242d854bfc78f4f6b0eff35f9f"}, + {file = "typing_inspect-0.9.0.tar.gz", hash = "sha256:b23fc42ff6f6ef6954e4852c1fb512cdd18dbea03134f91f856a95ccc9461f78"}, ] +[package.dependencies] +mypy-extensions = ">=0.3.0" +typing-extensions = ">=3.7.4" + [[package]] name = "urllib3" -version = "2.2.1" +version = "2.2.2" description = "HTTP library with thread-safe connection pooling, file post, and more." -optional = false +optional = true python-versions = ">=3.8" files = [ - {file = "urllib3-2.2.1-py3-none-any.whl", hash = "sha256:450b20ec296a467077128bff42b73080516e71b56ff59a60a02bef2232c4fa9d"}, - {file = "urllib3-2.2.1.tar.gz", hash = "sha256:d0570876c61ab9e520d776c38acbbb5b05a776d3f9ff98a5c8fd5162a444cf19"}, + {file = "urllib3-2.2.2-py3-none-any.whl", hash = "sha256:a448b2f64d686155468037e1ace9f2d2199776e17f0a46610480d311f73e3472"}, + {file = "urllib3-2.2.2.tar.gz", hash = "sha256:dd505485549a7a552833da5e6063639d0d177c04f23bc3864e41e5dc5f612168"}, ] [package.extras] @@ -530,7 +813,10 @@ h2 = ["h2 (>=4,<5)"] socks = ["pysocks (>=1.5.6,!=1.5.7,<2.0)"] zstd = ["zstandard (>=0.18.0)"] +[extras] +gcp = ["google-auth", "requests"] + [metadata] lock-version = "2.0" -python-versions = ">=3.9,<4.0" -content-hash = "a1b9663d7041a47bc8b6705e4fc9bd4563718a49e492aa8f0edf96fb8afa468b" +python-versions = "^3.8" +content-hash = "a1ca991b0570a5c978745559e8d18354ec04cbd566513cc895346ec1bae01112" diff --git a/poetry.toml b/poetry.toml new file mode 100644 index 00000000..ab1033bd --- /dev/null +++ b/poetry.toml @@ -0,0 +1,2 @@ +[virtualenvs] +in-project = true diff --git a/py.typed b/py.typed new file mode 100644 index 00000000..3e38f1a9 --- /dev/null +++ b/py.typed @@ -0,0 +1 @@ +# Marker file for PEP 561. The package enables type hints. diff --git a/pylintrc b/pylintrc new file mode 100644 index 00000000..50800386 --- /dev/null +++ b/pylintrc @@ -0,0 +1,658 @@ +[MAIN] + +# Analyse import fallback blocks. This can be used to support both Python 2 and +# 3 compatible code, which means that the block might have code that exists +# only in one or another interpreter, leading to false positives when analysed. +analyse-fallback-blocks=no + +# Clear in-memory caches upon conclusion of linting. Useful if running pylint +# in a server-like mode. +clear-cache-post-run=no + +# Load and enable all available extensions. Use --list-extensions to see a list +# all available extensions. +#enable-all-extensions= + +# In error mode, messages with a category besides ERROR or FATAL are +# suppressed, and no reports are done by default. Error mode is compatible with +# disabling specific errors. +#errors-only= + +# Always return a 0 (non-error) status code, even if lint errors are found. +# This is primarily useful in continuous integration scripts. +#exit-zero= + +# A comma-separated list of package or module names from where C extensions may +# be loaded. Extensions are loading into the active Python interpreter and may +# run arbitrary code. +extension-pkg-allow-list= + +# A comma-separated list of package or module names from where C extensions may +# be loaded. Extensions are loading into the active Python interpreter and may +# run arbitrary code. (This is an alternative name to extension-pkg-allow-list +# for backward compatibility.) +extension-pkg-whitelist= + +# Return non-zero exit code if any of these messages/categories are detected, +# even if score is above --fail-under value. Syntax same as enable. Messages +# specified are enabled, while categories only check already-enabled messages. +fail-on= + +# Specify a score threshold under which the program will exit with error. +fail-under=10 + +# Interpret the stdin as a python script, whose filename needs to be passed as +# the module_or_package argument. +#from-stdin= + +# Files or directories to be skipped. They should be base names, not paths. +ignore=CVS + +# Add files or directories matching the regular expressions patterns to the +# ignore-list. The regex matches against paths and can be in Posix or Windows +# format. Because '\\' represents the directory delimiter on Windows systems, +# it can't be used as an escape character. +ignore-paths= + +# Files or directories matching the regular expression patterns are skipped. +# The regex matches against base names, not paths. The default value ignores +# Emacs file locks +ignore-patterns=^\.# + +# List of module names for which member attributes should not be checked and +# will not be imported (useful for modules/projects where namespaces are +# manipulated during runtime and thus existing member attributes cannot be +# deduced by static analysis). It supports qualified module names, as well as +# Unix pattern matching. +ignored-modules= + +# Python code to execute, usually for sys.path manipulation such as +# pygtk.require(). +#init-hook= + +# Use multiple processes to speed up Pylint. Specifying 0 will auto-detect the +# number of processors available to use, and will cap the count on Windows to +# avoid hangs. +jobs=1 + +# Control the amount of potential inferred values when inferring a single +# object. This can help the performance when dealing with large functions or +# complex, nested conditions. +limit-inference-results=100 + +# List of plugins (as comma separated values of python module names) to load, +# usually to register additional checkers. +load-plugins= + +# Pickle collected data for later comparisons. +persistent=yes + +# Minimum Python version to use for version dependent checks. Will default to +# the version used to run pylint. +py-version=3.8 + +# Discover python modules and packages in the file system subtree. +recursive=no + +# Add paths to the list of the source roots. Supports globbing patterns. The +# source root is an absolute path or a path relative to the current working +# directory used to determine a package namespace for modules located under the +# source root. +source-roots=src + +# When enabled, pylint would attempt to guess common misconfiguration and emit +# user-friendly hints instead of false-positive error messages. +suggestion-mode=yes + +# Allow loading of arbitrary C extensions. Extensions are imported into the +# active Python interpreter and may run arbitrary code. +unsafe-load-any-extension=no + +# In verbose mode, extra non-checker-related info will be displayed. +#verbose= + + +[BASIC] + +# Naming style matching correct argument names. +argument-naming-style=snake_case + +# Regular expression matching correct argument names. Overrides argument- +# naming-style. If left empty, argument names will be checked with the set +# naming style. +#argument-rgx= + +# Naming style matching correct attribute names. +#attr-naming-style=snake_case + +# Regular expression matching correct attribute names. Overrides attr-naming- +# style. If left empty, attribute names will be checked with the set naming +# style. +attr-rgx=[^\W\d][^\W]*|__.*__$ + +# Bad variable names which should always be refused, separated by a comma. +bad-names= + +# Bad variable names regexes, separated by a comma. If names match any regex, +# they will always be refused +bad-names-rgxs= + +# Naming style matching correct class attribute names. +class-attribute-naming-style=any + +# Regular expression matching correct class attribute names. Overrides class- +# attribute-naming-style. If left empty, class attribute names will be checked +# with the set naming style. +#class-attribute-rgx= + +# Naming style matching correct class constant names. +class-const-naming-style=UPPER_CASE + +# Regular expression matching correct class constant names. Overrides class- +# const-naming-style. If left empty, class constant names will be checked with +# the set naming style. +#class-const-rgx= + +# Naming style matching correct class names. +class-naming-style=PascalCase + +# Regular expression matching correct class names. Overrides class-naming- +# style. If left empty, class names will be checked with the set naming style. +#class-rgx= + +# Naming style matching correct constant names. +const-naming-style=UPPER_CASE + +# Regular expression matching correct constant names. Overrides const-naming- +# style. If left empty, constant names will be checked with the set naming +# style. +#const-rgx= + +# Minimum line length for functions/classes that require docstrings, shorter +# ones are exempt. +docstring-min-length=-1 + +# Naming style matching correct function names. +function-naming-style=snake_case + +# Regular expression matching correct function names. Overrides function- +# naming-style. If left empty, function names will be checked with the set +# naming style. +#function-rgx= + +# Good variable names which should always be accepted, separated by a comma. +good-names=i, + j, + k, + ex, + Run, + _, + e, + id + +# Good variable names regexes, separated by a comma. If names match any regex, +# they will always be accepted +good-names-rgxs= + +# Include a hint for the correct naming format with invalid-name. +include-naming-hint=no + +# Naming style matching correct inline iteration names. +inlinevar-naming-style=any + +# Regular expression matching correct inline iteration names. Overrides +# inlinevar-naming-style. If left empty, inline iteration names will be checked +# with the set naming style. +#inlinevar-rgx= + +# Naming style matching correct method names. +method-naming-style=snake_case + +# Regular expression matching correct method names. Overrides method-naming- +# style. If left empty, method names will be checked with the set naming style. +#method-rgx= + +# Naming style matching correct module names. +module-naming-style=snake_case + +# Regular expression matching correct module names. Overrides module-naming- +# style. If left empty, module names will be checked with the set naming style. +#module-rgx= + +# Colon-delimited sets of names that determine each other's naming style when +# the name regexes allow several styles. +name-group= + +# Regular expression which should only match function or class names that do +# not require a docstring. +no-docstring-rgx=^_ + +# List of decorators that produce properties, such as abc.abstractproperty. Add +# to this list to register other decorators that produce valid properties. +# These decorators are taken in consideration only for invalid-name. +property-classes=abc.abstractproperty + +# Regular expression matching correct type alias names. If left empty, type +# alias names will be checked with the set naming style. +typealias-rgx=.* + +# Regular expression matching correct type variable names. If left empty, type +# variable names will be checked with the set naming style. +#typevar-rgx= + +# Naming style matching correct variable names. +variable-naming-style=snake_case + +# Regular expression matching correct variable names. Overrides variable- +# naming-style. If left empty, variable names will be checked with the set +# naming style. +#variable-rgx= + + +[CLASSES] + +# Warn about protected attribute access inside special methods +check-protected-access-in-special-methods=no + +# List of method names used to declare (i.e. assign) instance attributes. +defining-attr-methods=__init__, + __new__, + setUp, + asyncSetUp, + __post_init__ + +# List of member names, which should be excluded from the protected access +# warning. +exclude-protected=_asdict,_fields,_replace,_source,_make,os._exit + +# List of valid names for the first argument in a class method. +valid-classmethod-first-arg=cls + +# List of valid names for the first argument in a metaclass class method. +valid-metaclass-classmethod-first-arg=mcs + + +[DESIGN] + +# List of regular expressions of class ancestor names to ignore when counting +# public methods (see R0903) +exclude-too-few-public-methods= + +# List of qualified class names to ignore when counting class parents (see +# R0901) +ignored-parents= + +# Maximum number of arguments for function / method. +max-args=5 + +# Maximum number of attributes for a class (see R0902). +max-attributes=7 + +# Maximum number of boolean expressions in an if statement (see R0916). +max-bool-expr=5 + +# Maximum number of branch for function / method body. +max-branches=12 + +# Maximum number of locals for function / method body. +max-locals=15 + +# Maximum number of parents for a class (see R0901). +max-parents=7 + +# Maximum number of public methods for a class (see R0904). +max-public-methods=25 + +# Maximum number of return / yield for function / method body. +max-returns=6 + +# Maximum number of statements in function / method body. +max-statements=50 + +# Minimum number of public methods for a class (see R0903). +min-public-methods=2 + + +[EXCEPTIONS] + +# Exceptions that will emit a warning when caught. +overgeneral-exceptions=builtins.BaseException,builtins.Exception + + +[FORMAT] + +# Expected format of line ending, e.g. empty (any line ending), LF or CRLF. +expected-line-ending-format= + +# Regexp for a line that is allowed to be longer than the limit. +ignore-long-lines=^\s*(# )??$ + +# Number of spaces of indent required inside a hanging or continued line. +indent-after-paren=4 + +# String used as indentation unit. This is usually " " (4 spaces) or "\t" (1 +# tab). +indent-string=' ' + +# Maximum number of characters on a single line. +max-line-length=100 + +# Maximum number of lines in a module. +max-module-lines=1000 + +# Allow the body of a class to be on the same line as the declaration if body +# contains single statement. +single-line-class-stmt=no + +# Allow the body of an if to be on the same line as the test if there is no +# else. +single-line-if-stmt=no + + +[IMPORTS] + +# List of modules that can be imported at any level, not just the top level +# one. +allow-any-import-level= + +# Allow explicit reexports by alias from a package __init__. +allow-reexport-from-package=no + +# Allow wildcard imports from modules that define __all__. +allow-wildcard-with-all=no + +# Deprecated modules which should not be used, separated by a comma. +deprecated-modules= + +# Output a graph (.gv or any supported image format) of external dependencies +# to the given file (report RP0402 must not be disabled). +ext-import-graph= + +# Output a graph (.gv or any supported image format) of all (i.e. internal and +# external) dependencies to the given file (report RP0402 must not be +# disabled). +import-graph= + +# Output a graph (.gv or any supported image format) of internal dependencies +# to the given file (report RP0402 must not be disabled). +int-import-graph= + +# Force import order to recognize a module as part of the standard +# compatibility libraries. +known-standard-library= + +# Force import order to recognize a module as part of a third party library. +known-third-party=enchant + +# Couples of modules and preferred modules, separated by a comma. +preferred-modules= + + +[LOGGING] + +# The type of string formatting that logging methods do. `old` means using % +# formatting, `new` is for `{}` formatting. +logging-format-style=old + +# Logging modules to check that the string format arguments are in logging +# function parameter format. +logging-modules=logging + + +[MESSAGES CONTROL] + +# Only show warnings with the listed confidence levels. Leave empty to show +# all. Valid levels: HIGH, CONTROL_FLOW, INFERENCE, INFERENCE_FAILURE, +# UNDEFINED. +confidence=HIGH, + CONTROL_FLOW, + INFERENCE, + INFERENCE_FAILURE, + UNDEFINED + +# Disable the message, report, category or checker with the given id(s). You +# can either give multiple identifiers separated by comma (,) or put this +# option multiple times (only on the command line, not in the configuration +# file where it should appear only once). You can also use "--disable=all" to +# disable everything first and then re-enable specific checks. For example, if +# you want to run only the similarities checker, you can use "--disable=all +# --enable=similarities". If you want to run only the classes checker, but have +# no Warning level messages displayed, use "--disable=all --enable=classes +# --disable=W". +disable=raw-checker-failed, + bad-inline-option, + locally-disabled, + file-ignored, + suppressed-message, + useless-suppression, + deprecated-pragma, + use-implicit-booleaness-not-comparison-to-string, + use-implicit-booleaness-not-comparison-to-zero, + use-symbolic-message-instead, + trailing-whitespace, + line-too-long, + missing-class-docstring, + missing-module-docstring, + missing-function-docstring, + too-many-instance-attributes, + wrong-import-order, + too-many-arguments, + broad-exception-raised, + too-few-public-methods, + too-many-branches, + duplicate-code, + trailing-newlines, + too-many-public-methods, + too-many-locals, + too-many-lines, + using-constant-test, + too-many-statements, + cyclic-import, + too-many-nested-blocks, + too-many-boolean-expressions, + no-else-raise, + bare-except, + broad-exception-caught, + fixme, + relative-beyond-top-level + +# Enable the message, report, category or checker with the given id(s). You can +# either give multiple identifier separated by comma (,) or put this option +# multiple time (only on the command line, not in the configuration file where +# it should appear only once). See also the "--disable" option for examples. +enable= + + +[METHOD_ARGS] + +# List of qualified names (i.e., library.method) which require a timeout +# parameter e.g. 'requests.api.get,requests.api.post' +timeout-methods=requests.api.delete,requests.api.get,requests.api.head,requests.api.options,requests.api.patch,requests.api.post,requests.api.put,requests.api.request + + +[MISCELLANEOUS] + +# List of note tags to take in consideration, separated by a comma. +notes=FIXME, + XXX, + TODO + +# Regular expression of note tags to take in consideration. +notes-rgx= + + +[REFACTORING] + +# Maximum number of nested blocks for function / method body +max-nested-blocks=5 + +# Complete name of functions that never returns. When checking for +# inconsistent-return-statements if a never returning function is called then +# it will be considered as an explicit return statement and no message will be +# printed. +never-returning-functions=sys.exit,argparse.parse_error + + +[REPORTS] + +# Python expression which should return a score less than or equal to 10. You +# have access to the variables 'fatal', 'error', 'warning', 'refactor', +# 'convention', and 'info' which contain the number of messages in each +# category, as well as 'statement' which is the total number of statements +# analyzed. This score is used by the global evaluation report (RP0004). +evaluation=max(0, 0 if fatal else 10.0 - ((float(5 * error + warning + refactor + convention) / statement) * 10)) + +# Template used to display messages. This is a python new-style format string +# used to format the message information. See doc for all details. +msg-template= + +# Set the output format. Available formats are: text, parseable, colorized, +# json2 (improved json format), json (old json format) and msvs (visual +# studio). You can also give a reporter class, e.g. +# mypackage.mymodule.MyReporterClass. +#output-format= + +# Tells whether to display a full report or only the messages. +reports=no + +# Activate the evaluation score. +score=yes + + +[SIMILARITIES] + +# Comments are removed from the similarity computation +ignore-comments=yes + +# Docstrings are removed from the similarity computation +ignore-docstrings=yes + +# Imports are removed from the similarity computation +ignore-imports=yes + +# Signatures are removed from the similarity computation +ignore-signatures=yes + +# Minimum lines number of a similarity. +min-similarity-lines=4 + + +[SPELLING] + +# Limits count of emitted suggestions for spelling mistakes. +max-spelling-suggestions=4 + +# Spelling dictionary name. No available dictionaries : You need to install +# both the python package and the system dependency for enchant to work. +spelling-dict= + +# List of comma separated words that should be considered directives if they +# appear at the beginning of a comment and should not be checked. +spelling-ignore-comment-directives=fmt: on,fmt: off,noqa:,noqa,nosec,isort:skip,mypy: + +# List of comma separated words that should not be checked. +spelling-ignore-words= + +# A path to a file that contains the private dictionary; one word per line. +spelling-private-dict-file= + +# Tells whether to store unknown words to the private dictionary (see the +# --spelling-private-dict-file option) instead of raising a message. +spelling-store-unknown-words=no + + +[STRING] + +# This flag controls whether inconsistent-quotes generates a warning when the +# character used as a quote delimiter is used inconsistently within a module. +check-quote-consistency=no + +# This flag controls whether the implicit-str-concat should generate a warning +# on implicit string concatenation in sequences defined over several lines. +check-str-concat-over-line-jumps=no + + +[TYPECHECK] + +# List of decorators that produce context managers, such as +# contextlib.contextmanager. Add to this list to register other decorators that +# produce valid context managers. +contextmanager-decorators=contextlib.contextmanager + +# List of members which are set dynamically and missed by pylint inference +# system, and so shouldn't trigger E1101 when accessed. Python regular +# expressions are accepted. +generated-members= + +# Tells whether to warn about missing members when the owner of the attribute +# is inferred to be None. +ignore-none=yes + +# This flag controls whether pylint should warn about no-member and similar +# checks whenever an opaque object is returned when inferring. The inference +# can return multiple potential results while evaluating a Python object, but +# some branches might not be evaluated, which results in partial inference. In +# that case, it might be useful to still emit no-member and other checks for +# the rest of the inferred objects. +ignore-on-opaque-inference=yes + +# List of symbolic message names to ignore for Mixin members. +ignored-checks-for-mixins=no-member, + not-async-context-manager, + not-context-manager, + attribute-defined-outside-init + +# List of class names for which member attributes should not be checked (useful +# for classes with dynamically set attributes). This supports the use of +# qualified names. +ignored-classes=optparse.Values,thread._local,_thread._local,argparse.Namespace + +# Show a hint with possible names when a member name was not found. The aspect +# of finding the hint is based on edit distance. +missing-member-hint=yes + +# The minimum edit distance a name should have in order to be considered a +# similar match for a missing member name. +missing-member-hint-distance=1 + +# The total number of similar names that should be taken in consideration when +# showing a hint for a missing member. +missing-member-max-choices=1 + +# Regex pattern to define which classes are considered mixins. +mixin-class-rgx=.*[Mm]ixin + +# List of decorators that change the signature of a decorated function. +signature-mutators= + + +[VARIABLES] + +# List of additional names supposed to be defined in builtins. Remember that +# you should avoid defining new builtins when possible. +additional-builtins= + +# Tells whether unused global variables should be treated as a violation. +allow-global-unused-variables=yes + +# List of names allowed to shadow builtins +allowed-redefined-builtins=id,object + +# List of strings which can identify a callback function by name. A callback +# name must start or end with one of those strings. +callbacks=cb_, + _cb + +# A regular expression matching the name of dummy variables (i.e. expected to +# not be used). +dummy-variables-rgx=_+$|(_[a-zA-Z0-9_]*[a-zA-Z0-9]+?$)|dummy|^ignored_|^unused_ + +# Argument names that match this expression will be ignored. +ignored-argument-names=_.*|^ignored_|^unused_ + +# Tells whether we should check for unused import in __init__ files. +init-import=no + +# List of qualified module names which can have objects that can redefine +# builtins. +redefining-builtins-modules=six.moves,past.builtins,future.builtins,builtins,io diff --git a/pyproject.toml b/pyproject.toml index 4b115d61..bea3e42d 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,42 +1,53 @@ [tool.poetry] name = "mistralai" -version = "0.4.2" -description = "" -authors = ["Bam4d "] +version = "1.0.0-rc.2" +description = "Python Client SDK for the Mistral AI API." +authors = ["Mistral"] readme = "README.md" -license = "Apache 2.0 License" - -[tool.ruff] -select = ["E", "F", "W", "Q", "I"] -ignore = ["E203"] -fixable = ["ALL"] -unfixable = [] -line-length = 120 - - -[tool.mypy] -disallow_untyped_defs = true -show_error_codes = true -no_implicit_optional = true -warn_return_any = true -warn_unused_ignores = true -exclude = ["docs", "tests", "examples", "tools", "build"] +packages = [ + { include = "mistralai", from = "src" }, + { include = "mistralai_azure", from = "packages/mistralai_azure/src" }, + { include = "mistralai_gcp", from = "packages/mistralai_gcp/src" }, +] +include = ["py.typed", "src/mistralai/py.typed"] +[tool.setuptools.package-data] +"*" = ["py.typed", "src/mistralai/py.typed"] [tool.poetry.dependencies] -python = ">=3.9,<4.0" -orjson = ">=3.9.10,<3.11" -pydantic = ">=2.5.2,<3" -httpx = ">=0.25,<1" - +python = "^3.8" +httpx = "^0.27.0" +jsonpath-python = "^1.0.6" +pydantic = "~2.8.2" +python-dateutil = "^2.9.0.post0" +typing-inspect = "^0.9.0" +google-auth = { version = "^2.31.0", optional = true } +requests = { version = "^2.32.3", optional = true } [tool.poetry.group.dev.dependencies] -ruff = "^0.1.6" -mypy = "^1.7.1" -types-requests = "^2.31.0.10" -pytest = "^7.4.3" -pytest-asyncio = "^0.23.2" +mypy = "==1.10.1" +pylint = "==3.2.3" +pytest = "^8.2.2" +pytest-asyncio = "^0.23.7" +types-python-dateutil = "^2.9.0.20240316" + +[tool.poetry.extras] +gcp = ["google-auth", "requests"] [build-system] requires = ["poetry-core"] build-backend = "poetry.core.masonry.api" + +[tool.pytest.ini_options] +pythonpath = ["src"] + +[tool.mypy] +disable_error_code = "misc" + +[[tool.mypy.overrides]] +module = "typing_inspect" +ignore_missing_imports = true + +[[tool.mypy.overrides]] +module = "jsonpath" +ignore_missing_imports = true diff --git a/scripts/compile.sh b/scripts/compile.sh new file mode 100755 index 00000000..aa49772e --- /dev/null +++ b/scripts/compile.sh @@ -0,0 +1,83 @@ +#!/usr/bin/env bash + +set -o pipefail # Ensure pipeline failures are propagated + +# Use temporary files to store outputs and exit statuses +declare -A output_files +declare -A status_files + +# Function to run a command with temporary output and status files +run_command() { + local cmd="$1" + local key="$2" + local output_file="$3" + local status_file="$4" + + # Run the command and store output and exit status + { + eval "$cmd" + echo $? > "$status_file" + } &> "$output_file" & +} + +# Create temporary files for outputs and statuses +for cmd in compileall pylint mypy pyright; do + output_files[$cmd]=$(mktemp) + status_files[$cmd]=$(mktemp) +done + +# Collect PIDs for background processes +declare -a pids + +# Run commands in parallel using temporary files +echo "Running python -m compileall" +run_command 'poetry run python -m compileall -q . && echo "Success"' 'compileall' "${output_files[compileall]}" "${status_files[compileall]}" +pids+=($!) + +echo "Running pylint" +run_command 'poetry run pylint src' 'pylint' "${output_files[pylint]}" "${status_files[pylint]}" +pids+=($!) + +echo "Running mypy" +run_command 'poetry run mypy src' 'mypy' "${output_files[mypy]}" "${status_files[mypy]}" +pids+=($!) + +echo "Running pyright (optional)" +run_command 'if command -v pyright > /dev/null 2>&1; then pyright src; else echo "pyright not found, skipping"; fi' 'pyright' "${output_files[pyright]}" "${status_files[pyright]}" +pids+=($!) + +# Wait for all processes to complete +echo "Waiting for processes to complete" +for pid in "${pids[@]}"; do + wait "$pid" +done + +# Print output sequentially and check for failures +failed=false +for key in "${!output_files[@]}"; do + echo "--- Output from Command: $key ---" + echo + cat "${output_files[$key]}" + echo # Empty line for separation + echo "--- End of Output from Command: $key ---" + echo + + exit_status=$(cat "${status_files[$key]}") + if [ "$exit_status" -ne 0 ]; then + echo "Command $key failed with exit status $exit_status" >&2 + failed=true + fi +done + +# Clean up temporary files +for tmp_file in "${output_files[@]}" "${status_files[@]}"; do + rm -f "$tmp_file" +done + +if $failed; then + echo "One or more commands failed." >&2 + exit 1 +else + echo "All commands completed successfully." + exit 0 +fi diff --git a/scripts/publish.sh b/scripts/publish.sh new file mode 100755 index 00000000..1ee7194c --- /dev/null +++ b/scripts/publish.sh @@ -0,0 +1,5 @@ +#!/usr/bin/env bash + +export POETRY_PYPI_TOKEN_PYPI=${PYPI_TOKEN} + +poetry publish --build --skip-existing diff --git a/src/mistralai/__init__.py b/src/mistralai/__init__.py index e69de29b..68138c47 100644 --- a/src/mistralai/__init__.py +++ b/src/mistralai/__init__.py @@ -0,0 +1,5 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from .sdk import * +from .sdkconfiguration import * +from .models import * diff --git a/src/mistralai/_hooks/__init__.py b/src/mistralai/_hooks/__init__.py new file mode 100644 index 00000000..2ee66cdd --- /dev/null +++ b/src/mistralai/_hooks/__init__.py @@ -0,0 +1,5 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from .sdkhooks import * +from .types import * +from .registration import * diff --git a/src/mistralai/_hooks/custom_user_agent.py b/src/mistralai/_hooks/custom_user_agent.py new file mode 100644 index 00000000..59506ea2 --- /dev/null +++ b/src/mistralai/_hooks/custom_user_agent.py @@ -0,0 +1,16 @@ +# MAKE SURE YOU UPDATE THE COPIES OF THIS FILES IN THE PROVIDERS'S PACKAGES WHEN YOU MAKE CHANGES HERE +from typing import Union + +import httpx + +from .types import BeforeRequestContext, BeforeRequestHook + + +class CustomUserAgentHook(BeforeRequestHook): + def before_request( + self, hook_ctx: BeforeRequestContext, request: httpx.Request + ) -> Union[httpx.Request, Exception]: + request.headers["user-agent"] = ( + "mistral-client-python/" + request.headers["user-agent"].split(" ")[1] + ) + return request diff --git a/src/mistralai/_hooks/deprecation_warning.py b/src/mistralai/_hooks/deprecation_warning.py new file mode 100644 index 00000000..8de2968b --- /dev/null +++ b/src/mistralai/_hooks/deprecation_warning.py @@ -0,0 +1,26 @@ +import logging +from typing import Union + +import httpx + +from .types import AfterSuccessContext, AfterSuccessHook + +logger = logging.getLogger(__name__) + +HEADER_MODEL_DEPRECATION_TIMESTAMP = "x-model-deprecation-timestamp" + + +class DeprecationWarningHook(AfterSuccessHook): + + def after_success( + self, hook_ctx: AfterSuccessContext, response: httpx.Response + ) -> Union[httpx.Response, Exception]: + if HEADER_MODEL_DEPRECATION_TIMESTAMP in response.headers: + model = response.json()["model"] + # pylint: disable=logging-fstring-interpolation + logger.warning( + "WARNING: The model %s is deprecated and will be removed on %s. Please refer to https://docs.mistral.ai/getting-started/models/#api-versioning for more information.", + model, + response.headers[HEADER_MODEL_DEPRECATION_TIMESTAMP], + ) + return response diff --git a/src/mistralai/_hooks/registration.py b/src/mistralai/_hooks/registration.py new file mode 100644 index 00000000..fc3ae79b --- /dev/null +++ b/src/mistralai/_hooks/registration.py @@ -0,0 +1,17 @@ +from .custom_user_agent import CustomUserAgentHook +from .deprecation_warning import DeprecationWarningHook +from .types import Hooks + +# This file is only ever generated once on the first generation and then is free to be modified. +# Any hooks you wish to add should be registered in the init_hooks function. Feel free to define them +# in this file or in separate files in the hooks folder. + + +def init_hooks(hooks: Hooks): + # pylint: disable=unused-argument + """Add hooks by calling hooks.register{sdk_init/before_request/after_success/after_error}Hook + with an instance of a hook that implements that specific Hook interface + Hooks are registered per SDK instance, and are valid for the lifetime of the SDK instance + """ + hooks.register_before_request_hook(CustomUserAgentHook()) + hooks.register_after_success_hook(DeprecationWarningHook()) diff --git a/src/mistralai/_hooks/sdkhooks.py b/src/mistralai/_hooks/sdkhooks.py new file mode 100644 index 00000000..24b0d08c --- /dev/null +++ b/src/mistralai/_hooks/sdkhooks.py @@ -0,0 +1,57 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +import httpx +from .types import SDKInitHook, BeforeRequestContext, BeforeRequestHook, AfterSuccessContext, AfterSuccessHook, AfterErrorContext, AfterErrorHook, Hooks +from .registration import init_hooks +from typing import List, Optional, Tuple +from mistralai.httpclient import HttpClient + +class SDKHooks(Hooks): + def __init__(self) -> None: + self.sdk_init_hooks: List[SDKInitHook] = [] + self.before_request_hooks: List[BeforeRequestHook] = [] + self.after_success_hooks: List[AfterSuccessHook] = [] + self.after_error_hooks: List[AfterErrorHook] = [] + init_hooks(self) + + def register_sdk_init_hook(self, hook: SDKInitHook) -> None: + self.sdk_init_hooks.append(hook) + + def register_before_request_hook(self, hook: BeforeRequestHook) -> None: + self.before_request_hooks.append(hook) + + def register_after_success_hook(self, hook: AfterSuccessHook) -> None: + self.after_success_hooks.append(hook) + + def register_after_error_hook(self, hook: AfterErrorHook) -> None: + self.after_error_hooks.append(hook) + + def sdk_init(self, base_url: str, client: HttpClient) -> Tuple[str, HttpClient]: + for hook in self.sdk_init_hooks: + base_url, client = hook.sdk_init(base_url, client) + return base_url, client + + def before_request(self, hook_ctx: BeforeRequestContext, request: httpx.Request) -> httpx.Request: + for hook in self.before_request_hooks: + out = hook.before_request(hook_ctx, request) + if isinstance(out, Exception): + raise out + request = out + + return request + + def after_success(self, hook_ctx: AfterSuccessContext, response: httpx.Response) -> httpx.Response: + for hook in self.after_success_hooks: + out = hook.after_success(hook_ctx, response) + if isinstance(out, Exception): + raise out + response = out + return response + + def after_error(self, hook_ctx: AfterErrorContext, response: Optional[httpx.Response], error: Optional[Exception]) -> Tuple[Optional[httpx.Response], Optional[Exception]]: + for hook in self.after_error_hooks: + result = hook.after_error(hook_ctx, response, error) + if isinstance(result, Exception): + raise result + response, error = result + return response, error diff --git a/src/mistralai/_hooks/types.py b/src/mistralai/_hooks/types.py new file mode 100644 index 00000000..e9391f3b --- /dev/null +++ b/src/mistralai/_hooks/types.py @@ -0,0 +1,76 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + + +from abc import ABC, abstractmethod +import httpx +from mistralai.httpclient import HttpClient +from typing import Any, Callable, List, Optional, Tuple, Union + + +class HookContext: + operation_id: str + oauth2_scopes: Optional[List[str]] = None + security_source: Optional[Union[Any, Callable[[], Any]]] = None + + def __init__(self, operation_id: str, oauth2_scopes: Optional[List[str]], security_source: Optional[Union[Any, Callable[[], Any]]]): + self.operation_id = operation_id + self.oauth2_scopes = oauth2_scopes + self.security_source = security_source + + +class BeforeRequestContext(HookContext): + def __init__(self, hook_ctx: HookContext): + super().__init__(hook_ctx.operation_id, hook_ctx.oauth2_scopes, hook_ctx.security_source) + + +class AfterSuccessContext(HookContext): + def __init__(self, hook_ctx: HookContext): + super().__init__(hook_ctx.operation_id, hook_ctx.oauth2_scopes, hook_ctx.security_source) + + + +class AfterErrorContext(HookContext): + def __init__(self, hook_ctx: HookContext): + super().__init__(hook_ctx.operation_id, hook_ctx.oauth2_scopes, hook_ctx.security_source) + + +class SDKInitHook(ABC): + @abstractmethod + def sdk_init(self, base_url: str, client: HttpClient) -> Tuple[str, HttpClient]: + pass + + +class BeforeRequestHook(ABC): + @abstractmethod + def before_request(self, hook_ctx: BeforeRequestContext, request: httpx.Request) -> Union[httpx.Request, Exception]: + pass + + +class AfterSuccessHook(ABC): + @abstractmethod + def after_success(self, hook_ctx: AfterSuccessContext, response: httpx.Response) -> Union[httpx.Response, Exception]: + pass + + +class AfterErrorHook(ABC): + @abstractmethod + def after_error(self, hook_ctx: AfterErrorContext, response: Optional[httpx.Response], error: Optional[Exception]) -> Union[Tuple[Optional[httpx.Response], Optional[Exception]], Exception]: + pass + + +class Hooks(ABC): + @abstractmethod + def register_sdk_init_hook(self, hook: SDKInitHook): + pass + + @abstractmethod + def register_before_request_hook(self, hook: BeforeRequestHook): + pass + + @abstractmethod + def register_after_success_hook(self, hook: AfterSuccessHook): + pass + + @abstractmethod + def register_after_error_hook(self, hook: AfterErrorHook): + pass diff --git a/src/mistralai/agents.py b/src/mistralai/agents.py new file mode 100644 index 00000000..12ea5754 --- /dev/null +++ b/src/mistralai/agents.py @@ -0,0 +1,434 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from .basesdk import BaseSDK +from mistralai import models, utils +from mistralai._hooks import HookContext +from mistralai.types import Nullable, OptionalNullable, UNSET +from mistralai.utils import eventstreaming, get_security_from_env +from typing import Any, AsyncGenerator, Generator, List, Optional, Union + +class Agents(BaseSDK): + r"""Agents API.""" + + + def complete( + self, *, + messages: Union[List[models.AgentsCompletionRequestMessages], List[models.AgentsCompletionRequestMessagesTypedDict]], + agent_id: str, + max_tokens: OptionalNullable[int] = UNSET, + min_tokens: OptionalNullable[int] = UNSET, + stream: Optional[bool] = False, + stop: Optional[Union[models.AgentsCompletionRequestStop, models.AgentsCompletionRequestStopTypedDict]] = None, + random_seed: OptionalNullable[int] = UNSET, + response_format: Optional[Union[models.ResponseFormat, models.ResponseFormatTypedDict]] = None, + tools: OptionalNullable[Union[List[models.Tool], List[models.ToolTypedDict]]] = UNSET, + tool_choice: Optional[models.AgentsCompletionRequestToolChoice] = "auto", + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + ) -> Optional[models.ChatCompletionResponse]: + r"""Chat Completion + + :param messages: The prompt(s) to generate completions for, encoded as a list of dict with role and content. + :param agent_id: The ID of the agent to use for this completion. + :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. + :param min_tokens: The minimum number of tokens to generate in the completion. + :param stream: Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. + :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array + :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. + :param response_format: + :param tools: + :param tool_choice: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + + request = models.AgentsCompletionRequest( + max_tokens=max_tokens, + min_tokens=min_tokens, + stream=stream, + stop=stop, + random_seed=random_seed, + messages=utils.get_pydantic_model(messages, List[models.AgentsCompletionRequestMessages]), + response_format=utils.get_pydantic_model(response_format, Optional[models.ResponseFormat]), + tools=utils.get_pydantic_model(tools, OptionalNullable[List[models.Tool]]), + tool_choice=tool_choice, + agent_id=agent_id, + ) + + req = self.build_request( + method="POST", + path="/v1/agents/completions", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body(request, False, False, "json", models.AgentsCompletionRequest), + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, [ + "429", + "500", + "502", + "503", + "504" + ]) + + http_res = self.do_request( + hook_ctx=HookContext(operation_id="agents_completion_v1_agents_completions_post", oauth2_scopes=[], security_source=get_security_from_env(self.sdk_configuration.security, models.Security)), + request=req, + error_status_codes=["422","4XX","5XX"], + retry_config=retry_config + ) + + data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return utils.unmarshal_json(http_res.text, Optional[models.ChatCompletionResponse]) + if utils.match_response(http_res, "422", "application/json"): + data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) + raise models.HTTPValidationError(data=data) + if utils.match_response(http_res, ["4XX","5XX"], "*"): + raise models.SDKError("API error occurred", http_res.status_code, http_res.text, http_res) + + content_type = http_res.headers.get("Content-Type") + raise models.SDKError(f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, http_res.text, http_res) + + + + async def complete_async( + self, *, + messages: Union[List[models.AgentsCompletionRequestMessages], List[models.AgentsCompletionRequestMessagesTypedDict]], + agent_id: str, + max_tokens: OptionalNullable[int] = UNSET, + min_tokens: OptionalNullable[int] = UNSET, + stream: Optional[bool] = False, + stop: Optional[Union[models.AgentsCompletionRequestStop, models.AgentsCompletionRequestStopTypedDict]] = None, + random_seed: OptionalNullable[int] = UNSET, + response_format: Optional[Union[models.ResponseFormat, models.ResponseFormatTypedDict]] = None, + tools: OptionalNullable[Union[List[models.Tool], List[models.ToolTypedDict]]] = UNSET, + tool_choice: Optional[models.AgentsCompletionRequestToolChoice] = "auto", + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + ) -> Optional[models.ChatCompletionResponse]: + r"""Chat Completion + + :param messages: The prompt(s) to generate completions for, encoded as a list of dict with role and content. + :param agent_id: The ID of the agent to use for this completion. + :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. + :param min_tokens: The minimum number of tokens to generate in the completion. + :param stream: Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. + :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array + :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. + :param response_format: + :param tools: + :param tool_choice: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + + request = models.AgentsCompletionRequest( + max_tokens=max_tokens, + min_tokens=min_tokens, + stream=stream, + stop=stop, + random_seed=random_seed, + messages=utils.get_pydantic_model(messages, List[models.AgentsCompletionRequestMessages]), + response_format=utils.get_pydantic_model(response_format, Optional[models.ResponseFormat]), + tools=utils.get_pydantic_model(tools, OptionalNullable[List[models.Tool]]), + tool_choice=tool_choice, + agent_id=agent_id, + ) + + req = self.build_request( + method="POST", + path="/v1/agents/completions", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body(request, False, False, "json", models.AgentsCompletionRequest), + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, [ + "429", + "500", + "502", + "503", + "504" + ]) + + http_res = await self.do_request_async( + hook_ctx=HookContext(operation_id="agents_completion_v1_agents_completions_post", oauth2_scopes=[], security_source=get_security_from_env(self.sdk_configuration.security, models.Security)), + request=req, + error_status_codes=["422","4XX","5XX"], + retry_config=retry_config + ) + + data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return utils.unmarshal_json(http_res.text, Optional[models.ChatCompletionResponse]) + if utils.match_response(http_res, "422", "application/json"): + data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) + raise models.HTTPValidationError(data=data) + if utils.match_response(http_res, ["4XX","5XX"], "*"): + raise models.SDKError("API error occurred", http_res.status_code, http_res.text, http_res) + + content_type = http_res.headers.get("Content-Type") + raise models.SDKError(f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, http_res.text, http_res) + + + + def stream( + self, *, + model: Nullable[str], + prompt: str, + temperature: Optional[float] = 0.7, + top_p: Optional[float] = 1, + max_tokens: OptionalNullable[int] = UNSET, + min_tokens: OptionalNullable[int] = UNSET, + stream: Optional[bool] = True, + stop: Optional[Union[models.AgentsCompletionStreamRequestStop, models.AgentsCompletionStreamRequestStopTypedDict]] = None, + random_seed: OptionalNullable[int] = UNSET, + suffix: OptionalNullable[str] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + ) -> Optional[Generator[models.CompletionEvent, None, None]]: + r"""Stream Agents completion + + Mistral AI provides the ability to stream responses back to a client in order to allow partial results for certain requests. Tokens will be sent as data-only server-sent events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. + + :param model: ID of the model to use. Only compatible for now with: - `codestral-2405` - `codestral-latest` + :param prompt: The text/code to complete. + :param temperature: What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. + :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. + :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. + :param min_tokens: The minimum number of tokens to generate in the completion. + :param stream: + :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array + :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. + :param suffix: Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + + request = models.AgentsCompletionStreamRequest( + model=model, + temperature=temperature, + top_p=top_p, + max_tokens=max_tokens, + min_tokens=min_tokens, + stream=stream, + stop=stop, + random_seed=random_seed, + prompt=prompt, + suffix=suffix, + ) + + req = self.build_request( + method="POST", + path="/v1/agents/completions#stream", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="text/event-stream", + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body(request, False, False, "json", models.AgentsCompletionStreamRequest), + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, [ + "429", + "500", + "502", + "503", + "504" + ]) + + http_res = self.do_request( + hook_ctx=HookContext(operation_id="stream_agents", oauth2_scopes=[], security_source=get_security_from_env(self.sdk_configuration.security, models.Security)), + request=req, + error_status_codes=["422","4XX","5XX"], + stream=True, + retry_config=retry_config + ) + + data: Any = None + if utils.match_response(http_res, "200", "text/event-stream"): + return eventstreaming.stream_events(http_res, lambda raw: utils.unmarshal_json(raw, models.CompletionEvent), sentinel="[DONE]") + if utils.match_response(http_res, "422", "application/json"): + data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) + raise models.HTTPValidationError(data=data) + if utils.match_response(http_res, ["4XX","5XX"], "*"): + raise models.SDKError("API error occurred", http_res.status_code, http_res.text, http_res) + + content_type = http_res.headers.get("Content-Type") + raise models.SDKError(f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, http_res.text, http_res) + + + + async def stream_async( + self, *, + model: Nullable[str], + prompt: str, + temperature: Optional[float] = 0.7, + top_p: Optional[float] = 1, + max_tokens: OptionalNullable[int] = UNSET, + min_tokens: OptionalNullable[int] = UNSET, + stream: Optional[bool] = True, + stop: Optional[Union[models.AgentsCompletionStreamRequestStop, models.AgentsCompletionStreamRequestStopTypedDict]] = None, + random_seed: OptionalNullable[int] = UNSET, + suffix: OptionalNullable[str] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + ) -> Optional[AsyncGenerator[models.CompletionEvent, None]]: + r"""Stream Agents completion + + Mistral AI provides the ability to stream responses back to a client in order to allow partial results for certain requests. Tokens will be sent as data-only server-sent events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. + + :param model: ID of the model to use. Only compatible for now with: - `codestral-2405` - `codestral-latest` + :param prompt: The text/code to complete. + :param temperature: What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. + :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. + :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. + :param min_tokens: The minimum number of tokens to generate in the completion. + :param stream: + :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array + :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. + :param suffix: Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + + request = models.AgentsCompletionStreamRequest( + model=model, + temperature=temperature, + top_p=top_p, + max_tokens=max_tokens, + min_tokens=min_tokens, + stream=stream, + stop=stop, + random_seed=random_seed, + prompt=prompt, + suffix=suffix, + ) + + req = self.build_request( + method="POST", + path="/v1/agents/completions#stream", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="text/event-stream", + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body(request, False, False, "json", models.AgentsCompletionStreamRequest), + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, [ + "429", + "500", + "502", + "503", + "504" + ]) + + http_res = await self.do_request_async( + hook_ctx=HookContext(operation_id="stream_agents", oauth2_scopes=[], security_source=get_security_from_env(self.sdk_configuration.security, models.Security)), + request=req, + error_status_codes=["422","4XX","5XX"], + stream=True, + retry_config=retry_config + ) + + data: Any = None + if utils.match_response(http_res, "200", "text/event-stream"): + return eventstreaming.stream_events_async(http_res, lambda raw: utils.unmarshal_json(raw, models.CompletionEvent), sentinel="[DONE]") + if utils.match_response(http_res, "422", "application/json"): + data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) + raise models.HTTPValidationError(data=data) + if utils.match_response(http_res, ["4XX","5XX"], "*"): + raise models.SDKError("API error occurred", http_res.status_code, http_res.text, http_res) + + content_type = http_res.headers.get("Content-Type") + raise models.SDKError(f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, http_res.text, http_res) + + diff --git a/src/mistralai/async_client.py b/src/mistralai/async_client.py index abe86548..f9522a28 100644 --- a/src/mistralai/async_client.py +++ b/src/mistralai/async_client.py @@ -1,423 +1,15 @@ -import asyncio -import posixpath -from json import JSONDecodeError -from typing import Any, AsyncGenerator, Callable, Dict, List, Optional, Union +from typing import Optional -from httpx import ( - AsyncClient, - AsyncHTTPTransport, - ConnectError, - Limits, - RequestError, - Response, -) +from .client import MIGRATION_MESSAGE -from mistralai.client_base import ClientBase -from mistralai.constants import ENDPOINT, RETRY_STATUS_CODES -from mistralai.exceptions import ( - MistralAPIException, - MistralAPIStatusException, - MistralConnectionException, - MistralException, -) -from mistralai.files import FilesAsyncClient -from mistralai.jobs import JobsAsyncClient -from mistralai.models.chat_completion import ( - ChatCompletionResponse, - ChatCompletionStreamResponse, - ResponseFormat, - ToolChoice, -) -from mistralai.models.embeddings import EmbeddingResponse -from mistralai.models.models import ModelDeleted, ModelList - -class MistralAsyncClient(ClientBase): +class MistralAsyncClient: def __init__( self, api_key: Optional[str] = None, - endpoint: str = ENDPOINT, + endpoint: str = "", max_retries: int = 5, timeout: int = 120, max_concurrent_requests: int = 64, ): - super().__init__(endpoint, api_key, max_retries, timeout) - - self._client = AsyncClient( - follow_redirects=True, - timeout=timeout, - limits=Limits(max_connections=max_concurrent_requests), - transport=AsyncHTTPTransport(retries=max_retries), - ) - self.files = FilesAsyncClient(self) - self.jobs = JobsAsyncClient(self) - - async def close(self) -> None: - await self._client.aclose() - - async def _check_response_status_codes(self, response: Response) -> None: - if response.status_code in RETRY_STATUS_CODES: - raise MistralAPIStatusException.from_response( - response, - message=f"Status: {response.status_code}. Message: {response.text}", - ) - elif 400 <= response.status_code < 500: - if response.stream: - await response.aread() - raise MistralAPIException.from_response( - response, - message=f"Status: {response.status_code}. Message: {response.text}", - ) - elif response.status_code >= 500: - if response.stream: - await response.aread() - raise MistralException( - message=f"Status: {response.status_code}. Message: {response.text}", - ) - - async def _check_streaming_response(self, response: Response) -> None: - await self._check_response_status_codes(response) - - async def _check_response(self, response: Response) -> Dict[str, Any]: - await self._check_response_status_codes(response) - - json_response: Dict[str, Any] = response.json() - - if "object" not in json_response: - raise MistralException(message=f"Unexpected response: {json_response}") - if "error" == json_response["object"]: # has errors - raise MistralAPIException.from_response( - response, - message=json_response["message"], - ) - - return json_response - - async def _request( - self, - method: str, - json: Optional[Dict[str, Any]], - path: str, - stream: bool = False, - attempt: int = 1, - data: Optional[Dict[str, Any]] = None, - check_model_deprecation_headers_callback: Optional[Callable] = None, - **kwargs: Any, - ) -> AsyncGenerator[Dict[str, Any], None]: - accept_header = "text/event-stream" if stream else "application/json" - headers = { - "Accept": accept_header, - "User-Agent": f"mistral-client-python/{self._version}", - "Authorization": f"Bearer {self._api_key}", - } - - if json is not None: - headers["Content-Type"] = "application/json" - - url = posixpath.join(self._endpoint, path) - - self._logger.debug(f"Sending request: {method} {url} {json}") - - response: Response - - try: - if stream: - async with self._client.stream( - method, - url, - headers=headers, - json=json, - data=data, - **kwargs, - ) as response: - if check_model_deprecation_headers_callback: - check_model_deprecation_headers_callback(response.headers) - await self._check_streaming_response(response) - - async for line in response.aiter_lines(): - json_streamed_response = self._process_line(line) - if json_streamed_response: - yield json_streamed_response - - else: - response = await self._client.request( - method, - url, - headers=headers, - json=json, - data=data, - **kwargs, - ) - if check_model_deprecation_headers_callback: - check_model_deprecation_headers_callback(response.headers) - yield await self._check_response(response) - - except ConnectError as e: - raise MistralConnectionException(str(e)) from e - except RequestError as e: - raise MistralException(f"Unexpected exception ({e.__class__.__name__}): {e}") from e - except JSONDecodeError as e: - raise MistralAPIException.from_response( - response, - message=f"Failed to decode json body: {response.text}", - ) from e - except MistralAPIStatusException as e: - attempt += 1 - if attempt > self._max_retries: - raise MistralAPIStatusException.from_response(response, message=str(e)) from e - backoff = 2.0**attempt # exponential backoff - await asyncio.sleep(backoff) - - # Retry as a generator - async for r in self._request(method, json, path, stream=stream, attempt=attempt): - yield r - - async def chat( - self, - messages: List[Any], - model: Optional[str] = None, - tools: Optional[List[Dict[str, Any]]] = None, - temperature: Optional[float] = None, - max_tokens: Optional[int] = None, - top_p: Optional[float] = None, - random_seed: Optional[int] = None, - safe_mode: bool = False, - safe_prompt: bool = False, - tool_choice: Optional[Union[str, ToolChoice]] = None, - response_format: Optional[Union[Dict[str, str], ResponseFormat]] = None, - ) -> ChatCompletionResponse: - """A asynchronous chat endpoint that returns a single response. - - Args: - model (str): model the name of the model to chat with, e.g. mistral-tiny - messages (List[Any]): messages an array of messages to chat with, e.g. - [{role: 'user', content: 'What is the best French cheese?'}] - temperature (Optional[float], optional): temperature the temperature to use for sampling, e.g. 0.5. - max_tokens (Optional[int], optional): the maximum number of tokens to generate, e.g. 100. Defaults to None. - top_p (Optional[float], optional): the cumulative probability of tokens to generate, e.g. 0.9. - Defaults to None. - random_seed (Optional[int], optional): the random seed to use for sampling, e.g. 42. Defaults to None. - safe_mode (bool, optional): deprecated, use safe_prompt instead. Defaults to False. - safe_prompt (bool, optional): whether to use safe prompt, e.g. true. Defaults to False. - - Returns: - ChatCompletionResponse: a response object containing the generated text. - """ - request = self._make_chat_request( - messages, - model, - tools=tools, - temperature=temperature, - max_tokens=max_tokens, - top_p=top_p, - random_seed=random_seed, - stream=False, - safe_prompt=safe_mode or safe_prompt, - tool_choice=tool_choice, - response_format=response_format, - ) - - single_response = self._request( - "post", - request, - "v1/chat/completions", - check_model_deprecation_headers_callback=self._check_model_deprecation_header_callback_factory(model), - ) - - async for response in single_response: - return ChatCompletionResponse(**response) - - raise MistralException("No response received") - - async def chat_stream( - self, - messages: List[Any], - model: Optional[str] = None, - tools: Optional[List[Dict[str, Any]]] = None, - temperature: Optional[float] = None, - max_tokens: Optional[int] = None, - top_p: Optional[float] = None, - random_seed: Optional[int] = None, - safe_mode: bool = False, - safe_prompt: bool = False, - tool_choice: Optional[Union[str, ToolChoice]] = None, - response_format: Optional[Union[Dict[str, str], ResponseFormat]] = None, - ) -> AsyncGenerator[ChatCompletionStreamResponse, None]: - """An Asynchronous chat endpoint that streams responses. - - Args: - model (str): model the name of the model to chat with, e.g. mistral-tiny - messages (List[Any]): messages an array of messages to chat with, e.g. - [{role: 'user', content: 'What is the best French cheese?'}] - tools (Optional[List[Function]], optional): a list of tools to use. - temperature (Optional[float], optional): temperature the temperature to use for sampling, e.g. 0.5. - max_tokens (Optional[int], optional): the maximum number of tokens to generate, e.g. 100. Defaults to None. - top_p (Optional[float], optional): the cumulative probability of tokens to generate, e.g. 0.9. - Defaults to None. - random_seed (Optional[int], optional): the random seed to use for sampling, e.g. 42. Defaults to None. - safe_mode (bool, optional): deprecated, use safe_prompt instead. Defaults to False. - safe_prompt (bool, optional): whether to use safe prompt, e.g. true. Defaults to False. - - Returns: - AsyncGenerator[ChatCompletionStreamResponse, None]: - An async generator that yields ChatCompletionStreamResponse objects. - """ - - request = self._make_chat_request( - messages, - model, - tools=tools, - temperature=temperature, - max_tokens=max_tokens, - top_p=top_p, - random_seed=random_seed, - stream=True, - safe_prompt=safe_mode or safe_prompt, - tool_choice=tool_choice, - response_format=response_format, - ) - async_response = self._request( - "post", - request, - "v1/chat/completions", - stream=True, - check_model_deprecation_headers_callback=self._check_model_deprecation_header_callback_factory(model), - ) - - async for json_response in async_response: - yield ChatCompletionStreamResponse(**json_response) - - async def embeddings(self, model: str, input: Union[str, List[str]]) -> EmbeddingResponse: - """An asynchronous embeddings endpoint that returns embeddings for a single, or batch of inputs - - Args: - model (str): The embedding model to use, e.g. mistral-embed - input (Union[str, List[str]]): The input to embed, - e.g. ['What is the best French cheese?'] - - Returns: - EmbeddingResponse: A response object containing the embeddings. - """ - request = {"model": model, "input": input} - single_response = self._request( - "post", - request, - "v1/embeddings", - check_model_deprecation_headers_callback=self._check_model_deprecation_header_callback_factory(model), - ) - - async for response in single_response: - return EmbeddingResponse(**response) - - raise MistralException("No response received") - - async def list_models(self) -> ModelList: - """Returns a list of the available models - - Returns: - ModelList: A response object containing the list of models. - """ - single_response = self._request("get", {}, "v1/models") - - async for response in single_response: - return ModelList(**response) - - raise MistralException("No response received") - - async def delete_model(self, model_id: str) -> ModelDeleted: - single_response = self._request("delete", {}, f"v1/models/{model_id}") - - async for response in single_response: - return ModelDeleted(**response) - - raise MistralException("No response received") - - async def completion( - self, - model: str, - prompt: str, - suffix: Optional[str] = None, - temperature: Optional[float] = None, - max_tokens: Optional[int] = None, - top_p: Optional[float] = None, - random_seed: Optional[int] = None, - stop: Optional[List[str]] = None, - ) -> ChatCompletionResponse: - """An asynchronous completion endpoint that returns a single response. - - Args: - model (str): model the name of the model to get completions with, e.g. codestral-latest - prompt (str): the prompt to complete - suffix (Optional[str]): the suffix to append to the prompt for fill-in-the-middle completion - temperature (Optional[float], optional): temperature the temperature to use for sampling, e.g. 0.5. - max_tokens (Optional[int], optional): the maximum number of tokens to generate, e.g. 100. Defaults to None. - top_p (Optional[float], optional): the cumulative probability of tokens to generate, e.g. 0.9. - Defaults to None. - random_seed (Optional[int], optional): the random seed to use for sampling, e.g. 42. Defaults to None. - stop (Optional[List[str]], optional): a list of tokens to stop generation at, e.g. ['/n/n'] - Returns: - Dict[str, Any]: a response object containing the generated text. - """ - request = self._make_completion_request( - prompt, model, suffix, temperature, max_tokens, top_p, random_seed, stop - ) - single_response = self._request( - "post", - request, - "v1/fim/completions", - check_model_deprecation_headers_callback=self._check_model_deprecation_header_callback_factory(model), - ) - - async for response in single_response: - return ChatCompletionResponse(**response) - - raise MistralException("No response received") - - async def completion_stream( - self, - model: str, - prompt: str, - suffix: Optional[str] = None, - temperature: Optional[float] = None, - max_tokens: Optional[int] = None, - top_p: Optional[float] = None, - random_seed: Optional[int] = None, - stop: Optional[List[str]] = None, - ) -> AsyncGenerator[ChatCompletionStreamResponse, None]: - """An asynchronous completion endpoint that returns a streaming response. - - Args: - model (str): model the name of the model to get completions with, e.g. codestral-latest - prompt (str): the prompt to complete - suffix (Optional[str]): the suffix to append to the prompt for fill-in-the-middle completion - temperature (Optional[float], optional): temperature the temperature to use for sampling, e.g. 0.5. - max_tokens (Optional[int], optional): the maximum number of tokens to generate, e.g. 100. Defaults to None. - top_p (Optional[float], optional): the cumulative probability of tokens to generate, e.g. 0.9. - Defaults to None. - random_seed (Optional[int], optional): the random seed to use for sampling, e.g. 42. Defaults to None. - stop (Optional[List[str]], optional): a list of tokens to stop generation at, e.g. ['/n/n'] - - Returns: - Dict[str, Any]: a response object containing the generated text. - """ - request = self._make_completion_request( - prompt, - model, - suffix, - temperature, - max_tokens, - top_p, - random_seed, - stop, - stream=True, - ) - async_response = self._request( - "post", - request, - "v1/fim/completions", - stream=True, - check_model_deprecation_headers_callback=self._check_model_deprecation_header_callback_factory(model), - ) - - async for json_response in async_response: - yield ChatCompletionStreamResponse(**json_response) + raise NotImplementedError(MIGRATION_MESSAGE) diff --git a/src/mistralai/basesdk.py b/src/mistralai/basesdk.py new file mode 100644 index 00000000..f9e54c5a --- /dev/null +++ b/src/mistralai/basesdk.py @@ -0,0 +1,253 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from .sdkconfiguration import SDKConfiguration +import httpx +from mistralai import models, utils +from mistralai._hooks import AfterErrorContext, AfterSuccessContext, BeforeRequestContext +from mistralai.utils import RetryConfig, SerializedRequestBody, get_body_content +from typing import Callable, List, Optional, Tuple + +class BaseSDK: + sdk_configuration: SDKConfiguration + + def __init__(self, sdk_config: SDKConfiguration) -> None: + self.sdk_configuration = sdk_config + + def get_url(self, base_url, url_variables): + sdk_url, sdk_variables = self.sdk_configuration.get_server_details() + + if base_url is None: + base_url = sdk_url + + if url_variables is None: + url_variables = sdk_variables + + return utils.template_url(base_url, url_variables) + + def build_request( + self, + method, + path, + base_url, + url_variables, + request, + request_body_required, + request_has_path_params, + request_has_query_params, + user_agent_header, + accept_header_value, + _globals=None, + security=None, + timeout_ms: Optional[int] = None, + get_serialized_body: Optional[ + Callable[[], Optional[SerializedRequestBody]] + ] = None, + url_override: Optional[str] = None, + ) -> httpx.Request: + client = self.sdk_configuration.client + + query_params = {} + + url = url_override + if url is None: + url = utils.generate_url( + self.get_url(base_url, url_variables), + path, + request if request_has_path_params else None, + _globals if request_has_path_params else None, + ) + + query_params = utils.get_query_params( + request if request_has_query_params else None, + _globals if request_has_query_params else None, + ) + + headers = utils.get_headers(request, _globals) + headers["Accept"] = accept_header_value + headers[user_agent_header] = self.sdk_configuration.user_agent + + if security is not None: + if callable(security): + security = security() + security = utils.get_security_from_env(security, models.Security) + if security is not None: + security_headers, security_query_params = utils.get_security(security) + headers = {**headers, **security_headers} + query_params = {**query_params, **security_query_params} + + serialized_request_body = SerializedRequestBody("application/octet-stream") + if get_serialized_body is not None: + rb = get_serialized_body() + if request_body_required and rb is None: + raise ValueError("request body is required") + + if rb is not None: + serialized_request_body = rb + + if ( + serialized_request_body.media_type is not None + and serialized_request_body.media_type + not in ( + "multipart/form-data", + "multipart/mixed", + ) + ): + headers["content-type"] = serialized_request_body.media_type + + timeout = timeout_ms / 1000 if timeout_ms is not None else None + + return client.build_request( + method, + url, + params=query_params, + content=serialized_request_body.content, + data=serialized_request_body.data, + files=serialized_request_body.files, + headers=headers, + timeout=timeout, + ) + + def do_request( + self, + hook_ctx, + request, + error_status_codes, + stream=False, + retry_config: Optional[Tuple[RetryConfig, List[str]]] = None, + ) -> httpx.Response: + client = self.sdk_configuration.client + logger = self.sdk_configuration.debug_logger + + def do(): + http_res = None + try: + req = self.sdk_configuration.get_hooks().before_request( + BeforeRequestContext(hook_ctx), request + ) + logger.debug( + "Request:\nMethod: %s\nURL: %s\nHeaders: %s\nBody: %s", + req.method, + req.url, + req.headers, + get_body_content(req) + ) + http_res = client.send(req, stream=stream) + except Exception as e: + _, e = self.sdk_configuration.get_hooks().after_error( + AfterErrorContext(hook_ctx), None, e + ) + if e is not None: + logger.debug("Request Exception", exc_info=True) + raise e + + if http_res is None: + logger.debug("Raising no response SDK error") + raise models.SDKError("No response received") + + logger.debug( + "Response:\nStatus Code: %s\nURL: %s\nHeaders: %s\nBody: %s", + http_res.status_code, + http_res.url, + http_res.headers, + "" if stream else http_res.text + ) + + if utils.match_status_codes(error_status_codes, http_res.status_code): + result, err = self.sdk_configuration.get_hooks().after_error( + AfterErrorContext(hook_ctx), http_res, None + ) + if err is not None: + logger.debug("Request Exception", exc_info=True) + raise err + if result is not None: + http_res = result + else: + logger.debug("Raising unexpected SDK error") + raise models.SDKError("Unexpected error occurred") + + return http_res + + if retry_config is not None: + http_res = utils.retry(do, utils.Retries(retry_config[0], retry_config[1])) + else: + http_res = do() + + if not utils.match_status_codes(error_status_codes, http_res.status_code): + http_res = self.sdk_configuration.get_hooks().after_success( + AfterSuccessContext(hook_ctx), http_res + ) + + return http_res + + async def do_request_async( + self, + hook_ctx, + request, + error_status_codes, + stream=False, + retry_config: Optional[Tuple[RetryConfig, List[str]]] = None, + ) -> httpx.Response: + client = self.sdk_configuration.async_client + logger = self.sdk_configuration.debug_logger + async def do(): + http_res = None + try: + req = self.sdk_configuration.get_hooks().before_request( + BeforeRequestContext(hook_ctx), request + ) + logger.debug( + "Request:\nMethod: %s\nURL: %s\nHeaders: %s\nBody: %s", + req.method, + req.url, + req.headers, + get_body_content(req) + ) + http_res = await client.send(req, stream=stream) + except Exception as e: + _, e = self.sdk_configuration.get_hooks().after_error( + AfterErrorContext(hook_ctx), None, e + ) + if e is not None: + logger.debug("Request Exception", exc_info=True) + raise e + + if http_res is None: + logger.debug("Raising no response SDK error") + raise models.SDKError("No response received") + + logger.debug( + "Response:\nStatus Code: %s\nURL: %s\nHeaders: %s\nBody: %s", + http_res.status_code, + http_res.url, + http_res.headers, + "" if stream else http_res.text + ) + + if utils.match_status_codes(error_status_codes, http_res.status_code): + result, err = self.sdk_configuration.get_hooks().after_error( + AfterErrorContext(hook_ctx), http_res, None + ) + if err is not None: + logger.debug("Request Exception", exc_info=True) + raise err + if result is not None: + http_res = result + else: + logger.debug("Raising unexpected SDK error") + raise models.SDKError("Unexpected error occurred") + + return http_res + + if retry_config is not None: + http_res = await utils.retry_async( + do, utils.Retries(retry_config[0], retry_config[1]) + ) + else: + http_res = await do() + + if not utils.match_status_codes(error_status_codes, http_res.status_code): + http_res = self.sdk_configuration.get_hooks().after_success( + AfterSuccessContext(hook_ctx), http_res + ) + + return http_res diff --git a/src/mistralai/chat.py b/src/mistralai/chat.py new file mode 100644 index 00000000..1323be20 --- /dev/null +++ b/src/mistralai/chat.py @@ -0,0 +1,470 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from .basesdk import BaseSDK +from mistralai import models, utils +from mistralai._hooks import HookContext +from mistralai.types import Nullable, OptionalNullable, UNSET +from mistralai.utils import eventstreaming, get_security_from_env +from typing import Any, AsyncGenerator, Generator, List, Optional, Union + +class Chat(BaseSDK): + r"""Chat Completion API.""" + + + def complete( + self, *, + model: Nullable[str], + messages: Union[List[models.Messages], List[models.MessagesTypedDict]], + temperature: Optional[float] = 0.7, + top_p: Optional[float] = 1, + max_tokens: OptionalNullable[int] = UNSET, + min_tokens: OptionalNullable[int] = UNSET, + stream: Optional[bool] = False, + stop: Optional[Union[models.Stop, models.StopTypedDict]] = None, + random_seed: OptionalNullable[int] = UNSET, + response_format: Optional[Union[models.ResponseFormat, models.ResponseFormatTypedDict]] = None, + tools: OptionalNullable[Union[List[models.Tool], List[models.ToolTypedDict]]] = UNSET, + tool_choice: Optional[models.ToolChoice] = "auto", + safe_prompt: Optional[bool] = False, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + ) -> Optional[models.ChatCompletionResponse]: + r"""Chat Completion + + :param model: ID of the model to use. You can use the [List Available Models](/api#operation/listModels) API to see all of your available models, or see our [Model overview](/models) for model descriptions. + :param messages: The prompt(s) to generate completions for, encoded as a list of dict with role and content. + :param temperature: What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. + :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. + :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. + :param min_tokens: The minimum number of tokens to generate in the completion. + :param stream: Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. + :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array + :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. + :param response_format: + :param tools: + :param tool_choice: + :param safe_prompt: Whether to inject a safety prompt before all conversations. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + + request = models.ChatCompletionRequest( + model=model, + temperature=temperature, + top_p=top_p, + max_tokens=max_tokens, + min_tokens=min_tokens, + stream=stream, + stop=stop, + random_seed=random_seed, + messages=utils.get_pydantic_model(messages, List[models.Messages]), + response_format=utils.get_pydantic_model(response_format, Optional[models.ResponseFormat]), + tools=utils.get_pydantic_model(tools, OptionalNullable[List[models.Tool]]), + tool_choice=tool_choice, + safe_prompt=safe_prompt, + ) + + req = self.build_request( + method="POST", + path="/v1/chat/completions", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body(request, False, False, "json", models.ChatCompletionRequest), + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, [ + "429", + "500", + "502", + "503", + "504" + ]) + + http_res = self.do_request( + hook_ctx=HookContext(operation_id="chat_completion_v1_chat_completions_post", oauth2_scopes=[], security_source=get_security_from_env(self.sdk_configuration.security, models.Security)), + request=req, + error_status_codes=["422","4XX","5XX"], + retry_config=retry_config + ) + + data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return utils.unmarshal_json(http_res.text, Optional[models.ChatCompletionResponse]) + if utils.match_response(http_res, "422", "application/json"): + data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) + raise models.HTTPValidationError(data=data) + if utils.match_response(http_res, ["4XX","5XX"], "*"): + raise models.SDKError("API error occurred", http_res.status_code, http_res.text, http_res) + + content_type = http_res.headers.get("Content-Type") + raise models.SDKError(f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, http_res.text, http_res) + + + + async def complete_async( + self, *, + model: Nullable[str], + messages: Union[List[models.Messages], List[models.MessagesTypedDict]], + temperature: Optional[float] = 0.7, + top_p: Optional[float] = 1, + max_tokens: OptionalNullable[int] = UNSET, + min_tokens: OptionalNullable[int] = UNSET, + stream: Optional[bool] = False, + stop: Optional[Union[models.Stop, models.StopTypedDict]] = None, + random_seed: OptionalNullable[int] = UNSET, + response_format: Optional[Union[models.ResponseFormat, models.ResponseFormatTypedDict]] = None, + tools: OptionalNullable[Union[List[models.Tool], List[models.ToolTypedDict]]] = UNSET, + tool_choice: Optional[models.ToolChoice] = "auto", + safe_prompt: Optional[bool] = False, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + ) -> Optional[models.ChatCompletionResponse]: + r"""Chat Completion + + :param model: ID of the model to use. You can use the [List Available Models](/api#operation/listModels) API to see all of your available models, or see our [Model overview](/models) for model descriptions. + :param messages: The prompt(s) to generate completions for, encoded as a list of dict with role and content. + :param temperature: What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. + :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. + :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. + :param min_tokens: The minimum number of tokens to generate in the completion. + :param stream: Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. + :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array + :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. + :param response_format: + :param tools: + :param tool_choice: + :param safe_prompt: Whether to inject a safety prompt before all conversations. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + + request = models.ChatCompletionRequest( + model=model, + temperature=temperature, + top_p=top_p, + max_tokens=max_tokens, + min_tokens=min_tokens, + stream=stream, + stop=stop, + random_seed=random_seed, + messages=utils.get_pydantic_model(messages, List[models.Messages]), + response_format=utils.get_pydantic_model(response_format, Optional[models.ResponseFormat]), + tools=utils.get_pydantic_model(tools, OptionalNullable[List[models.Tool]]), + tool_choice=tool_choice, + safe_prompt=safe_prompt, + ) + + req = self.build_request( + method="POST", + path="/v1/chat/completions", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body(request, False, False, "json", models.ChatCompletionRequest), + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, [ + "429", + "500", + "502", + "503", + "504" + ]) + + http_res = await self.do_request_async( + hook_ctx=HookContext(operation_id="chat_completion_v1_chat_completions_post", oauth2_scopes=[], security_source=get_security_from_env(self.sdk_configuration.security, models.Security)), + request=req, + error_status_codes=["422","4XX","5XX"], + retry_config=retry_config + ) + + data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return utils.unmarshal_json(http_res.text, Optional[models.ChatCompletionResponse]) + if utils.match_response(http_res, "422", "application/json"): + data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) + raise models.HTTPValidationError(data=data) + if utils.match_response(http_res, ["4XX","5XX"], "*"): + raise models.SDKError("API error occurred", http_res.status_code, http_res.text, http_res) + + content_type = http_res.headers.get("Content-Type") + raise models.SDKError(f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, http_res.text, http_res) + + + + def stream( + self, *, + model: Nullable[str], + messages: Union[List[models.ChatCompletionStreamRequestMessages], List[models.ChatCompletionStreamRequestMessagesTypedDict]], + temperature: Optional[float] = 0.7, + top_p: Optional[float] = 1, + max_tokens: OptionalNullable[int] = UNSET, + min_tokens: OptionalNullable[int] = UNSET, + stream: Optional[bool] = True, + stop: Optional[Union[models.ChatCompletionStreamRequestStop, models.ChatCompletionStreamRequestStopTypedDict]] = None, + random_seed: OptionalNullable[int] = UNSET, + response_format: Optional[Union[models.ResponseFormat, models.ResponseFormatTypedDict]] = None, + tools: OptionalNullable[Union[List[models.Tool], List[models.ToolTypedDict]]] = UNSET, + tool_choice: Optional[models.ChatCompletionStreamRequestToolChoice] = "auto", + safe_prompt: Optional[bool] = False, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + ) -> Optional[Generator[models.CompletionEvent, None, None]]: + r"""Stream chat completion + + Mistral AI provides the ability to stream responses back to a client in order to allow partial results for certain requests. Tokens will be sent as data-only server-sent events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. + + :param model: ID of the model to use. You can use the [List Available Models](/api#operation/listModels) API to see all of your available models, or see our [Model overview](/models) for model descriptions. + :param messages: The prompt(s) to generate completions for, encoded as a list of dict with role and content. + :param temperature: What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. + :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. + :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. + :param min_tokens: The minimum number of tokens to generate in the completion. + :param stream: + :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array + :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. + :param response_format: + :param tools: + :param tool_choice: + :param safe_prompt: Whether to inject a safety prompt before all conversations. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + + request = models.ChatCompletionStreamRequest( + model=model, + temperature=temperature, + top_p=top_p, + max_tokens=max_tokens, + min_tokens=min_tokens, + stream=stream, + stop=stop, + random_seed=random_seed, + messages=utils.get_pydantic_model(messages, List[models.ChatCompletionStreamRequestMessages]), + response_format=utils.get_pydantic_model(response_format, Optional[models.ResponseFormat]), + tools=utils.get_pydantic_model(tools, OptionalNullable[List[models.Tool]]), + tool_choice=tool_choice, + safe_prompt=safe_prompt, + ) + + req = self.build_request( + method="POST", + path="/v1/chat/completions#stream", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="text/event-stream", + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body(request, False, False, "json", models.ChatCompletionStreamRequest), + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, [ + "429", + "500", + "502", + "503", + "504" + ]) + + http_res = self.do_request( + hook_ctx=HookContext(operation_id="stream_chat", oauth2_scopes=[], security_source=get_security_from_env(self.sdk_configuration.security, models.Security)), + request=req, + error_status_codes=["422","4XX","5XX"], + stream=True, + retry_config=retry_config + ) + + data: Any = None + if utils.match_response(http_res, "200", "text/event-stream"): + return eventstreaming.stream_events(http_res, lambda raw: utils.unmarshal_json(raw, models.CompletionEvent), sentinel="[DONE]") + if utils.match_response(http_res, "422", "application/json"): + data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) + raise models.HTTPValidationError(data=data) + if utils.match_response(http_res, ["4XX","5XX"], "*"): + raise models.SDKError("API error occurred", http_res.status_code, http_res.text, http_res) + + content_type = http_res.headers.get("Content-Type") + raise models.SDKError(f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, http_res.text, http_res) + + + + async def stream_async( + self, *, + model: Nullable[str], + messages: Union[List[models.ChatCompletionStreamRequestMessages], List[models.ChatCompletionStreamRequestMessagesTypedDict]], + temperature: Optional[float] = 0.7, + top_p: Optional[float] = 1, + max_tokens: OptionalNullable[int] = UNSET, + min_tokens: OptionalNullable[int] = UNSET, + stream: Optional[bool] = True, + stop: Optional[Union[models.ChatCompletionStreamRequestStop, models.ChatCompletionStreamRequestStopTypedDict]] = None, + random_seed: OptionalNullable[int] = UNSET, + response_format: Optional[Union[models.ResponseFormat, models.ResponseFormatTypedDict]] = None, + tools: OptionalNullable[Union[List[models.Tool], List[models.ToolTypedDict]]] = UNSET, + tool_choice: Optional[models.ChatCompletionStreamRequestToolChoice] = "auto", + safe_prompt: Optional[bool] = False, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + ) -> Optional[AsyncGenerator[models.CompletionEvent, None]]: + r"""Stream chat completion + + Mistral AI provides the ability to stream responses back to a client in order to allow partial results for certain requests. Tokens will be sent as data-only server-sent events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. + + :param model: ID of the model to use. You can use the [List Available Models](/api#operation/listModels) API to see all of your available models, or see our [Model overview](/models) for model descriptions. + :param messages: The prompt(s) to generate completions for, encoded as a list of dict with role and content. + :param temperature: What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. + :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. + :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. + :param min_tokens: The minimum number of tokens to generate in the completion. + :param stream: + :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array + :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. + :param response_format: + :param tools: + :param tool_choice: + :param safe_prompt: Whether to inject a safety prompt before all conversations. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + + request = models.ChatCompletionStreamRequest( + model=model, + temperature=temperature, + top_p=top_p, + max_tokens=max_tokens, + min_tokens=min_tokens, + stream=stream, + stop=stop, + random_seed=random_seed, + messages=utils.get_pydantic_model(messages, List[models.ChatCompletionStreamRequestMessages]), + response_format=utils.get_pydantic_model(response_format, Optional[models.ResponseFormat]), + tools=utils.get_pydantic_model(tools, OptionalNullable[List[models.Tool]]), + tool_choice=tool_choice, + safe_prompt=safe_prompt, + ) + + req = self.build_request( + method="POST", + path="/v1/chat/completions#stream", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="text/event-stream", + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body(request, False, False, "json", models.ChatCompletionStreamRequest), + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, [ + "429", + "500", + "502", + "503", + "504" + ]) + + http_res = await self.do_request_async( + hook_ctx=HookContext(operation_id="stream_chat", oauth2_scopes=[], security_source=get_security_from_env(self.sdk_configuration.security, models.Security)), + request=req, + error_status_codes=["422","4XX","5XX"], + stream=True, + retry_config=retry_config + ) + + data: Any = None + if utils.match_response(http_res, "200", "text/event-stream"): + return eventstreaming.stream_events_async(http_res, lambda raw: utils.unmarshal_json(raw, models.CompletionEvent), sentinel="[DONE]") + if utils.match_response(http_res, "422", "application/json"): + data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) + raise models.HTTPValidationError(data=data) + if utils.match_response(http_res, ["4XX","5XX"], "*"): + raise models.SDKError("API error occurred", http_res.status_code, http_res.text, http_res) + + content_type = http_res.headers.get("Content-Type") + raise models.SDKError(f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, http_res.text, http_res) + + diff --git a/src/mistralai/client.py b/src/mistralai/client.py index 9c75373e..d3582f77 100644 --- a/src/mistralai/client.py +++ b/src/mistralai/client.py @@ -1,423 +1,14 @@ -import posixpath -import time -from json import JSONDecodeError -from typing import Any, Callable, Dict, Iterable, Iterator, List, Optional, Union +from typing import Optional -from httpx import Client, ConnectError, HTTPTransport, RequestError, Response +MIGRATION_MESSAGE = "This client is deprecated. To migrate to the new client, please refer to this guide: https://github.com/mistralai/client-python/blob/main/MIGRATION.md. If you need to use this client anyway, pin your version to 0.4.2." -from mistralai.client_base import ClientBase -from mistralai.constants import ENDPOINT, RETRY_STATUS_CODES -from mistralai.exceptions import ( - MistralAPIException, - MistralAPIStatusException, - MistralConnectionException, - MistralException, -) -from mistralai.files import FilesClient -from mistralai.jobs import JobsClient -from mistralai.models.chat_completion import ( - ChatCompletionResponse, - ChatCompletionStreamResponse, - ResponseFormat, - ToolChoice, -) -from mistralai.models.embeddings import EmbeddingResponse -from mistralai.models.models import ModelDeleted, ModelList - - -class MistralClient(ClientBase): - """ - Synchronous wrapper around the async client - """ +class MistralClient: def __init__( self, api_key: Optional[str] = None, - endpoint: str = ENDPOINT, + endpoint: str = "", max_retries: int = 5, timeout: int = 120, ): - super().__init__(endpoint, api_key, max_retries, timeout) - - self._client = Client( - follow_redirects=True, - timeout=self._timeout, - transport=HTTPTransport(retries=self._max_retries), - ) - self.files = FilesClient(self) - self.jobs = JobsClient(self) - - def __del__(self) -> None: - self._client.close() - - def _check_response_status_codes(self, response: Response) -> None: - if response.status_code in RETRY_STATUS_CODES: - raise MistralAPIStatusException.from_response( - response, - message=f"Status: {response.status_code}. Message: {response.text}", - ) - elif 400 <= response.status_code < 500: - if response.stream: - response.read() - raise MistralAPIException.from_response( - response, - message=f"Status: {response.status_code}. Message: {response.text}", - ) - elif response.status_code >= 500: - if response.stream: - response.read() - raise MistralException( - message=f"Status: {response.status_code}. Message: {response.text}", - ) - - def _check_streaming_response(self, response: Response) -> None: - self._check_response_status_codes(response) - - def _check_response(self, response: Response) -> Dict[str, Any]: - self._check_response_status_codes(response) - - json_response: Dict[str, Any] = response.json() - - if "object" not in json_response: - raise MistralException(message=f"Unexpected response: {json_response}") - if "error" == json_response["object"]: # has errors - raise MistralAPIException.from_response( - response, - message=json_response["message"], - ) - - return json_response - - def _request( - self, - method: str, - json: Optional[Dict[str, Any]], - path: str, - stream: bool = False, - attempt: int = 1, - data: Optional[Dict[str, Any]] = None, - check_model_deprecation_headers_callback: Optional[Callable] = None, - **kwargs: Any, - ) -> Iterator[Dict[str, Any]]: - accept_header = "text/event-stream" if stream else "application/json" - headers = { - "Accept": accept_header, - "User-Agent": f"mistral-client-python/{self._version}", - "Authorization": f"Bearer {self._api_key}", - } - - if json is not None: - headers["Content-Type"] = "application/json" - - url = posixpath.join(self._endpoint, path) - - self._logger.debug(f"Sending request: {method} {url} {json}") - - response: Response - - try: - if stream: - with self._client.stream( - method, - url, - headers=headers, - json=json, - data=data, - **kwargs, - ) as response: - if check_model_deprecation_headers_callback: - check_model_deprecation_headers_callback(response.headers) - self._check_streaming_response(response) - - for line in response.iter_lines(): - json_streamed_response = self._process_line(line) - if json_streamed_response: - yield json_streamed_response - - else: - response = self._client.request( - method, - url, - headers=headers, - json=json, - data=data, - **kwargs, - ) - if check_model_deprecation_headers_callback: - check_model_deprecation_headers_callback(response.headers) - yield self._check_response(response) - - except ConnectError as e: - raise MistralConnectionException(str(e)) from e - except RequestError as e: - raise MistralException(f"Unexpected exception ({e.__class__.__name__}): {e}") from e - except JSONDecodeError as e: - raise MistralAPIException.from_response( - response, - message=f"Failed to decode json body: {response.text}", - ) from e - except MistralAPIStatusException as e: - attempt += 1 - if attempt > self._max_retries: - raise MistralAPIStatusException.from_response(response, message=str(e)) from e - backoff = 2.0**attempt # exponential backoff - time.sleep(backoff) - - # Retry as a generator - for r in self._request(method, json, path, stream=stream, attempt=attempt): - yield r - - def chat( - self, - messages: List[Any], - model: Optional[str] = None, - tools: Optional[List[Dict[str, Any]]] = None, - temperature: Optional[float] = None, - max_tokens: Optional[int] = None, - top_p: Optional[float] = None, - random_seed: Optional[int] = None, - safe_mode: bool = False, - safe_prompt: bool = False, - tool_choice: Optional[Union[str, ToolChoice]] = None, - response_format: Optional[Union[Dict[str, str], ResponseFormat]] = None, - ) -> ChatCompletionResponse: - """A chat endpoint that returns a single response. - - Args: - model (str): model the name of the model to chat with, e.g. mistral-tiny - messages (List[Any]): messages an array of messages to chat with, e.g. - [{role: 'user', content: 'What is the best French cheese?'}] - tools (Optional[List[Function]], optional): a list of tools to use. - temperature (Optional[float], optional): temperature the temperature to use for sampling, e.g. 0.5. - max_tokens (Optional[int], optional): the maximum number of tokens to generate, e.g. 100. Defaults to None. - top_p (Optional[float], optional): the cumulative probability of tokens to generate, e.g. 0.9. - Defaults to None. - random_seed (Optional[int], optional): the random seed to use for sampling, e.g. 42. Defaults to None. - safe_mode (bool, optional): deprecated, use safe_prompt instead. Defaults to False. - safe_prompt (bool, optional): whether to use safe prompt, e.g. true. Defaults to False. - - Returns: - ChatCompletionResponse: a response object containing the generated text. - """ - request = self._make_chat_request( - messages, - model, - tools=tools, - temperature=temperature, - max_tokens=max_tokens, - top_p=top_p, - random_seed=random_seed, - stream=False, - safe_prompt=safe_mode or safe_prompt, - tool_choice=tool_choice, - response_format=response_format, - ) - - single_response = self._request( - "post", - request, - "v1/chat/completions", - check_model_deprecation_headers_callback=self._check_model_deprecation_header_callback_factory(model), - ) - - for response in single_response: - return ChatCompletionResponse(**response) - - raise MistralException("No response received") - - def chat_stream( - self, - messages: List[Any], - model: Optional[str] = None, - tools: Optional[List[Dict[str, Any]]] = None, - temperature: Optional[float] = None, - max_tokens: Optional[int] = None, - top_p: Optional[float] = None, - random_seed: Optional[int] = None, - safe_mode: bool = False, - safe_prompt: bool = False, - tool_choice: Optional[Union[str, ToolChoice]] = None, - response_format: Optional[Union[Dict[str, str], ResponseFormat]] = None, - ) -> Iterable[ChatCompletionStreamResponse]: - """A chat endpoint that streams responses. - - Args: - model (str): model the name of the model to chat with, e.g. mistral-tiny - messages (List[Any]): messages an array of messages to chat with, e.g. - [{role: 'user', content: 'What is the best French cheese?'}] - tools (Optional[List[Function]], optional): a list of tools to use. - temperature (Optional[float], optional): temperature the temperature to use for sampling, e.g. 0.5. - max_tokens (Optional[int], optional): the maximum number of tokens to generate, e.g. 100. Defaults to None. - top_p (Optional[float], optional): the cumulative probability of tokens to generate, e.g. 0.9. - Defaults to None. - random_seed (Optional[int], optional): the random seed to use for sampling, e.g. 42. Defaults to None. - safe_mode (bool, optional): deprecated, use safe_prompt instead. Defaults to False. - safe_prompt (bool, optional): whether to use safe prompt, e.g. true. Defaults to False. - - Returns: - Iterable[ChatCompletionStreamResponse]: - A generator that yields ChatCompletionStreamResponse objects. - """ - request = self._make_chat_request( - messages, - model, - tools=tools, - temperature=temperature, - max_tokens=max_tokens, - top_p=top_p, - random_seed=random_seed, - stream=True, - safe_prompt=safe_mode or safe_prompt, - tool_choice=tool_choice, - response_format=response_format, - ) - - response = self._request( - "post", - request, - "v1/chat/completions", - stream=True, - check_model_deprecation_headers_callback=self._check_model_deprecation_header_callback_factory(model), - ) - - for json_streamed_response in response: - yield ChatCompletionStreamResponse(**json_streamed_response) - - def embeddings(self, model: str, input: Union[str, List[str]]) -> EmbeddingResponse: - """An embeddings endpoint that returns embeddings for a single, or batch of inputs - - Args: - model (str): The embedding model to use, e.g. mistral-embed - input (Union[str, List[str]]): The input to embed, - e.g. ['What is the best French cheese?'] - - Returns: - EmbeddingResponse: A response object containing the embeddings. - """ - request = {"model": model, "input": input} - singleton_response = self._request( - "post", - request, - "v1/embeddings", - check_model_deprecation_headers_callback=self._check_model_deprecation_header_callback_factory(model), - ) - - for response in singleton_response: - return EmbeddingResponse(**response) - - raise MistralException("No response received") - - def list_models(self) -> ModelList: - """Returns a list of the available models - - Returns: - ModelList: A response object containing the list of models. - """ - singleton_response = self._request("get", {}, "v1/models") - - for response in singleton_response: - return ModelList(**response) - - raise MistralException("No response received") - - def delete_model(self, model_id: str) -> ModelDeleted: - single_response = self._request("delete", {}, f"v1/models/{model_id}") - - for response in single_response: - return ModelDeleted(**response) - - raise MistralException("No response received") - - def completion( - self, - model: str, - prompt: str, - suffix: Optional[str] = None, - temperature: Optional[float] = None, - max_tokens: Optional[int] = None, - top_p: Optional[float] = None, - random_seed: Optional[int] = None, - stop: Optional[List[str]] = None, - ) -> ChatCompletionResponse: - """A completion endpoint that returns a single response. - - Args: - model (str): model the name of the model to get completion with, e.g. codestral-latest - prompt (str): the prompt to complete - suffix (Optional[str]): the suffix to append to the prompt for fill-in-the-middle completion - temperature (Optional[float], optional): temperature the temperature to use for sampling, e.g. 0.5. - max_tokens (Optional[int], optional): the maximum number of tokens to generate, e.g. 100. Defaults to None. - top_p (Optional[float], optional): the cumulative probability of tokens to generate, e.g. 0.9. - Defaults to None. - random_seed (Optional[int], optional): the random seed to use for sampling, e.g. 42. Defaults to None. - stop (Optional[List[str]], optional): a list of tokens to stop generation at, e.g. ['/n/n'] - - Returns: - Dict[str, Any]: a response object containing the generated text. - """ - request = self._make_completion_request( - prompt, model, suffix, temperature, max_tokens, top_p, random_seed, stop - ) - - single_response = self._request( - "post", - request, - "v1/fim/completions", - stream=False, - check_model_deprecation_headers_callback=self._check_model_deprecation_header_callback_factory(model), - ) - - for response in single_response: - return ChatCompletionResponse(**response) - - raise MistralException("No response received") - - def completion_stream( - self, - model: str, - prompt: str, - suffix: Optional[str] = None, - temperature: Optional[float] = None, - max_tokens: Optional[int] = None, - top_p: Optional[float] = None, - random_seed: Optional[int] = None, - stop: Optional[List[str]] = None, - ) -> Iterable[ChatCompletionStreamResponse]: - """An asynchronous completion endpoint that streams responses. - - Args: - model (str): model the name of the model to get completions with, e.g. codestral-latest - prompt (str): the prompt to complete - suffix (Optional[str]): the suffix to append to the prompt for fill-in-the-middle completion - temperature (Optional[float], optional): temperature the temperature to use for sampling, e.g. 0.5. - max_tokens (Optional[int], optional): the maximum number of tokens to generate, e.g. 100. Defaults to None. - top_p (Optional[float], optional): the cumulative probability of tokens to generate, e.g. 0.9. - Defaults to None. - random_seed (Optional[int], optional): the random seed to use for sampling, e.g. 42. Defaults to None. - stop (Optional[List[str]], optional): a list of tokens to stop generation at, e.g. ['/n/n'] - - Returns: - Iterable[Dict[str, Any]]: a generator that yields response objects containing the generated text. - """ - request = self._make_completion_request( - prompt, - model, - suffix, - temperature, - max_tokens, - top_p, - random_seed, - stop, - stream=True, - ) - - response = self._request( - "post", - request, - "v1/fim/completions", - stream=True, - check_model_deprecation_headers_callback=self._check_model_deprecation_header_callback_factory(model), - ) - - for json_streamed_response in response: - yield ChatCompletionStreamResponse(**json_streamed_response) + raise NotImplementedError(MIGRATION_MESSAGE) diff --git a/src/mistralai/client_base.py b/src/mistralai/client_base.py deleted file mode 100644 index 25077772..00000000 --- a/src/mistralai/client_base.py +++ /dev/null @@ -1,211 +0,0 @@ -import logging -import os -from abc import ABC -from typing import Any, Callable, Dict, List, Optional, Union - -import orjson -from httpx import Headers - -from mistralai.constants import HEADER_MODEL_DEPRECATION_TIMESTAMP -from mistralai.exceptions import MistralException -from mistralai.models.chat_completion import ( - ChatMessage, - Function, - ResponseFormat, - ToolChoice, -) - -CLIENT_VERSION = "0.4.2" - - -class ClientBase(ABC): - def __init__( - self, - endpoint: str, - api_key: Optional[str] = None, - max_retries: int = 5, - timeout: int = 120, - ): - self._max_retries = max_retries - self._timeout = timeout - - if api_key is None: - api_key = os.environ.get("MISTRAL_API_KEY") - if api_key is None: - raise MistralException(message="API key not provided. Please set MISTRAL_API_KEY environment variable.") - self._api_key = api_key - self._endpoint = endpoint - self._logger = logging.getLogger(__name__) - - # For azure endpoints, we default to the mistral model - if "inference.azure.com" in self._endpoint: - self._default_model = "mistral" - - self._version = CLIENT_VERSION - - def _get_model(self, model: Optional[str] = None) -> str: - if model is not None: - return model - else: - if self._default_model is None: - raise MistralException(message="model must be provided") - return self._default_model - - def _parse_tools(self, tools: List[Dict[str, Any]]) -> List[Dict[str, Any]]: - parsed_tools: List[Dict[str, Any]] = [] - for tool in tools: - if tool["type"] == "function": - parsed_function = {} - parsed_function["type"] = tool["type"] - if isinstance(tool["function"], Function): - parsed_function["function"] = tool["function"].model_dump(exclude_none=True) - else: - parsed_function["function"] = tool["function"] - - parsed_tools.append(parsed_function) - - return parsed_tools - - def _parse_tool_choice(self, tool_choice: Union[str, ToolChoice]) -> str: - if isinstance(tool_choice, ToolChoice): - return tool_choice.value - return tool_choice - - def _parse_response_format(self, response_format: Union[Dict[str, Any], ResponseFormat]) -> Dict[str, Any]: - if isinstance(response_format, ResponseFormat): - return response_format.model_dump(exclude_none=True) - return response_format - - def _parse_messages(self, messages: List[Any]) -> List[Dict[str, Any]]: - parsed_messages: List[Dict[str, Any]] = [] - for message in messages: - if isinstance(message, ChatMessage): - parsed_messages.append(message.model_dump(exclude_none=True)) - else: - parsed_messages.append(message) - - return parsed_messages - - def _check_model_deprecation_header_callback_factory(self, model: Optional[str] = None) -> Callable: - model = self._get_model(model) - - def _check_model_deprecation_header_callback( - headers: Headers, - ) -> None: - if HEADER_MODEL_DEPRECATION_TIMESTAMP in headers: - self._logger.warning( - f"WARNING: The model {model} is deprecated " - f"and will be removed on {headers[HEADER_MODEL_DEPRECATION_TIMESTAMP]}. " - "Please refer to https://docs.mistral.ai/getting-started/models/#api-versioning " - "for more information." - ) - - return _check_model_deprecation_header_callback - - def _make_completion_request( - self, - prompt: str, - model: Optional[str] = None, - suffix: Optional[str] = None, - temperature: Optional[float] = None, - max_tokens: Optional[int] = None, - top_p: Optional[float] = None, - random_seed: Optional[int] = None, - stop: Optional[List[str]] = None, - stream: Optional[bool] = False, - ) -> Dict[str, Any]: - request_data: Dict[str, Any] = { - "prompt": prompt, - "suffix": suffix, - "model": model, - "stream": stream, - } - - if stop is not None: - request_data["stop"] = stop - - request_data["model"] = self._get_model(model) - - request_data.update( - self._build_sampling_params( - temperature=temperature, - max_tokens=max_tokens, - top_p=top_p, - random_seed=random_seed, - ) - ) - - self._logger.debug(f"Completion request: {request_data}") - - return request_data - - def _build_sampling_params( - self, - max_tokens: Optional[int], - random_seed: Optional[int], - temperature: Optional[float], - top_p: Optional[float], - ) -> Dict[str, Any]: - params = {} - if temperature is not None: - params["temperature"] = temperature - if max_tokens is not None: - params["max_tokens"] = max_tokens - if top_p is not None: - params["top_p"] = top_p - if random_seed is not None: - params["random_seed"] = random_seed - return params - - def _make_chat_request( - self, - messages: List[Any], - model: Optional[str] = None, - tools: Optional[List[Dict[str, Any]]] = None, - temperature: Optional[float] = None, - max_tokens: Optional[int] = None, - top_p: Optional[float] = None, - random_seed: Optional[int] = None, - stream: Optional[bool] = None, - safe_prompt: Optional[bool] = False, - tool_choice: Optional[Union[str, ToolChoice]] = None, - response_format: Optional[Union[Dict[str, str], ResponseFormat]] = None, - ) -> Dict[str, Any]: - request_data: Dict[str, Any] = { - "messages": self._parse_messages(messages), - } - - request_data["model"] = self._get_model(model) - - request_data.update( - self._build_sampling_params( - temperature=temperature, - max_tokens=max_tokens, - top_p=top_p, - random_seed=random_seed, - ) - ) - - if safe_prompt: - request_data["safe_prompt"] = safe_prompt - if tools is not None: - request_data["tools"] = self._parse_tools(tools) - if stream is not None: - request_data["stream"] = stream - - if tool_choice is not None: - request_data["tool_choice"] = self._parse_tool_choice(tool_choice) - if response_format is not None: - request_data["response_format"] = self._parse_response_format(response_format) - - self._logger.debug(f"Chat request: {request_data}") - - return request_data - - def _process_line(self, line: str) -> Optional[Dict[str, Any]]: - if line.startswith("data: "): - line = line[6:].strip() - if line != "[DONE]": - json_streamed_response: Dict[str, Any] = orjson.loads(line) - return json_streamed_response - return None diff --git a/src/mistralai/constants.py b/src/mistralai/constants.py deleted file mode 100644 index c057d4ce..00000000 --- a/src/mistralai/constants.py +++ /dev/null @@ -1,5 +0,0 @@ -RETRY_STATUS_CODES = {429, 500, 502, 503, 504} - -ENDPOINT = "https://api.mistral.ai" - -HEADER_MODEL_DEPRECATION_TIMESTAMP = "x-model-deprecation-timestamp" diff --git a/src/mistralai/embeddings.py b/src/mistralai/embeddings.py new file mode 100644 index 00000000..193758ef --- /dev/null +++ b/src/mistralai/embeddings.py @@ -0,0 +1,182 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from .basesdk import BaseSDK +from mistralai import models, utils +from mistralai._hooks import HookContext +from mistralai.types import OptionalNullable, UNSET +from mistralai.utils import get_security_from_env +from typing import Any, Optional, Union + +class Embeddings(BaseSDK): + r"""Embeddings API.""" + + + def create( + self, *, + inputs: Union[models.Inputs, models.InputsTypedDict], + model: str, + encoding_format: OptionalNullable[str] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + ) -> Optional[models.EmbeddingResponse]: + r"""Embeddings + + Embeddings + + :param inputs: Text to embed. + :param model: ID of the model to use. + :param encoding_format: The format to return the embeddings in. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + + request = models.EmbeddingRequest( + inputs=inputs, + model=model, + encoding_format=encoding_format, + ) + + req = self.build_request( + method="POST", + path="/v1/embeddings", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body(request, False, False, "json", models.EmbeddingRequest), + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, [ + "429", + "500", + "502", + "503", + "504" + ]) + + http_res = self.do_request( + hook_ctx=HookContext(operation_id="embeddings_v1_embeddings_post", oauth2_scopes=[], security_source=get_security_from_env(self.sdk_configuration.security, models.Security)), + request=req, + error_status_codes=["422","4XX","5XX"], + retry_config=retry_config + ) + + data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return utils.unmarshal_json(http_res.text, Optional[models.EmbeddingResponse]) + if utils.match_response(http_res, "422", "application/json"): + data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) + raise models.HTTPValidationError(data=data) + if utils.match_response(http_res, ["4XX","5XX"], "*"): + raise models.SDKError("API error occurred", http_res.status_code, http_res.text, http_res) + + content_type = http_res.headers.get("Content-Type") + raise models.SDKError(f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, http_res.text, http_res) + + + + async def create_async( + self, *, + inputs: Union[models.Inputs, models.InputsTypedDict], + model: str, + encoding_format: OptionalNullable[str] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + ) -> Optional[models.EmbeddingResponse]: + r"""Embeddings + + Embeddings + + :param inputs: Text to embed. + :param model: ID of the model to use. + :param encoding_format: The format to return the embeddings in. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + + request = models.EmbeddingRequest( + inputs=inputs, + model=model, + encoding_format=encoding_format, + ) + + req = self.build_request( + method="POST", + path="/v1/embeddings", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body(request, False, False, "json", models.EmbeddingRequest), + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, [ + "429", + "500", + "502", + "503", + "504" + ]) + + http_res = await self.do_request_async( + hook_ctx=HookContext(operation_id="embeddings_v1_embeddings_post", oauth2_scopes=[], security_source=get_security_from_env(self.sdk_configuration.security, models.Security)), + request=req, + error_status_codes=["422","4XX","5XX"], + retry_config=retry_config + ) + + data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return utils.unmarshal_json(http_res.text, Optional[models.EmbeddingResponse]) + if utils.match_response(http_res, "422", "application/json"): + data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) + raise models.HTTPValidationError(data=data) + if utils.match_response(http_res, ["4XX","5XX"], "*"): + raise models.SDKError("API error occurred", http_res.status_code, http_res.text, http_res) + + content_type = http_res.headers.get("Content-Type") + raise models.SDKError(f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, http_res.text, http_res) + + diff --git a/src/mistralai/exceptions.py b/src/mistralai/exceptions.py deleted file mode 100644 index 5728a1c1..00000000 --- a/src/mistralai/exceptions.py +++ /dev/null @@ -1,54 +0,0 @@ -from __future__ import annotations - -from typing import Any, Dict, Optional - -from httpx import Response - - -class MistralException(Exception): - """Base Exception class, returned when nothing more specific applies""" - - def __init__(self, message: Optional[str] = None) -> None: - super(MistralException, self).__init__(message) - - self.message = message - - def __str__(self) -> str: - msg = self.message or "" - return msg - - def __repr__(self) -> str: - return f"{self.__class__.__name__}(message={str(self)})" - - -class MistralAPIException(MistralException): - """Returned when the API responds with an error message""" - - def __init__( - self, - message: Optional[str] = None, - http_status: Optional[int] = None, - headers: Optional[Dict[str, Any]] = None, - ) -> None: - super().__init__(message) - self.http_status = http_status - self.headers = headers or {} - - @classmethod - def from_response(cls, response: Response, message: Optional[str] = None) -> MistralAPIException: - return cls( - message=message or response.text, - http_status=response.status_code, - headers=dict(response.headers), - ) - - def __repr__(self) -> str: - return f"{self.__class__.__name__}(message={str(self)}, http_status={self.http_status})" - - -class MistralAPIStatusException(MistralAPIException): - """Returned when we receive a non-200 response from the API that we should retry""" - - -class MistralConnectionException(MistralException): - """Returned when the SDK can not reach the API server for any reason""" diff --git a/src/mistralai/files.py b/src/mistralai/files.py index 7646a065..2aa37650 100644 --- a/src/mistralai/files.py +++ b/src/mistralai/files.py @@ -1,84 +1,600 @@ -from typing import Any - -from mistralai.exceptions import ( - MistralException, -) -from mistralai.models.files import FileDeleted, FileObject, Files - - -class FilesClient: - def __init__(self, client: Any): - self.client = client - - def create( - self, - file: bytes, - purpose: str = "fine-tune", - ) -> FileObject: - single_response = self.client._request( - "post", - None, - "v1/files", - files={"file": file}, - data={"purpose": purpose}, - ) - for response in single_response: - return FileObject(**response) - raise MistralException("No response received") - - def retrieve(self, file_id: str) -> FileObject: - single_response = self.client._request("get", {}, f"v1/files/{file_id}") - for response in single_response: - return FileObject(**response) - raise MistralException("No response received") - - def list(self) -> Files: - single_response = self.client._request("get", {}, "v1/files") - for response in single_response: - return Files(**response) - raise MistralException("No response received") - - def delete(self, file_id: str) -> FileDeleted: - single_response = self.client._request("delete", {}, f"v1/files/{file_id}") - for response in single_response: - return FileDeleted(**response) - raise MistralException("No response received") - - -class FilesAsyncClient: - def __init__(self, client: Any): - self.client = client - - async def create( - self, - file: bytes, - purpose: str = "fine-tune", - ) -> FileObject: - single_response = self.client._request( - "post", - None, - "v1/files", - files={"file": file}, - data={"purpose": purpose}, - ) - async for response in single_response: - return FileObject(**response) - raise MistralException("No response received") - - async def retrieve(self, file_id: str) -> FileObject: - single_response = self.client._request("get", {}, f"v1/files/{file_id}") - async for response in single_response: - return FileObject(**response) - raise MistralException("No response received") - - async def list(self) -> Files: - single_response = self.client._request("get", {}, "v1/files") - async for response in single_response: - return Files(**response) - raise MistralException("No response received") - - async def delete(self, file_id: str) -> FileDeleted: - single_response = self.client._request("delete", {}, f"v1/files/{file_id}") - async for response in single_response: - return FileDeleted(**response) - raise MistralException("No response received") +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from .basesdk import BaseSDK +from mistralai import models, utils +from mistralai._hooks import HookContext +from mistralai.types import OptionalNullable, UNSET +from mistralai.utils import get_security_from_env +from typing import Optional, Union + +class Files(BaseSDK): + r"""Files API""" + + + def upload( + self, *, + file: Union[models.File, models.FileTypedDict], + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + ) -> Optional[models.UploadFileOut]: + r"""Upload File + + Upload a file that can be used across various endpoints. + + The size of individual files can be a maximum of 512 MB. The Fine-tuning API only supports .jsonl files. + + Please contact us if you need to increase these storage limits. + + :param file: The File object (not file name) to be uploaded. To upload a file and specify a custom file name you should format your request as such: ```bash file=@path/to/your/file.jsonl;filename=custom_name.jsonl ``` Otherwise, you can just keep the original file name: ```bash file=@path/to/your/file.jsonl ``` + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + + request = models.FilesAPIRoutesUploadFileMultiPartBodyParams( + file=utils.get_pydantic_model(file, models.File), + ) + + req = self.build_request( + method="POST", + path="/v1/files", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body(request, False, False, "multipart", models.FilesAPIRoutesUploadFileMultiPartBodyParams), + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, [ + "429", + "500", + "502", + "503", + "504" + ]) + + http_res = self.do_request( + hook_ctx=HookContext(operation_id="files_api_routes_upload_file", oauth2_scopes=[], security_source=get_security_from_env(self.sdk_configuration.security, models.Security)), + request=req, + error_status_codes=["4XX","5XX"], + retry_config=retry_config + ) + + if utils.match_response(http_res, "200", "application/json"): + return utils.unmarshal_json(http_res.text, Optional[models.UploadFileOut]) + if utils.match_response(http_res, ["4XX","5XX"], "*"): + raise models.SDKError("API error occurred", http_res.status_code, http_res.text, http_res) + + content_type = http_res.headers.get("Content-Type") + raise models.SDKError(f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, http_res.text, http_res) + + + + async def upload_async( + self, *, + file: Union[models.File, models.FileTypedDict], + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + ) -> Optional[models.UploadFileOut]: + r"""Upload File + + Upload a file that can be used across various endpoints. + + The size of individual files can be a maximum of 512 MB. The Fine-tuning API only supports .jsonl files. + + Please contact us if you need to increase these storage limits. + + :param file: The File object (not file name) to be uploaded. To upload a file and specify a custom file name you should format your request as such: ```bash file=@path/to/your/file.jsonl;filename=custom_name.jsonl ``` Otherwise, you can just keep the original file name: ```bash file=@path/to/your/file.jsonl ``` + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + + request = models.FilesAPIRoutesUploadFileMultiPartBodyParams( + file=utils.get_pydantic_model(file, models.File), + ) + + req = self.build_request( + method="POST", + path="/v1/files", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body(request, False, False, "multipart", models.FilesAPIRoutesUploadFileMultiPartBodyParams), + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, [ + "429", + "500", + "502", + "503", + "504" + ]) + + http_res = await self.do_request_async( + hook_ctx=HookContext(operation_id="files_api_routes_upload_file", oauth2_scopes=[], security_source=get_security_from_env(self.sdk_configuration.security, models.Security)), + request=req, + error_status_codes=["4XX","5XX"], + retry_config=retry_config + ) + + if utils.match_response(http_res, "200", "application/json"): + return utils.unmarshal_json(http_res.text, Optional[models.UploadFileOut]) + if utils.match_response(http_res, ["4XX","5XX"], "*"): + raise models.SDKError("API error occurred", http_res.status_code, http_res.text, http_res) + + content_type = http_res.headers.get("Content-Type") + raise models.SDKError(f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, http_res.text, http_res) + + + + def list( + self, *, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + ) -> Optional[models.ListFilesOut]: + r"""List Files + + Returns a list of files that belong to the user's organization. + + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + req = self.build_request( + method="GET", + path="/v1/files", + base_url=base_url, + url_variables=url_variables, + request=None, + request_body_required=False, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + security=self.sdk_configuration.security, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, [ + "429", + "500", + "502", + "503", + "504" + ]) + + http_res = self.do_request( + hook_ctx=HookContext(operation_id="files_api_routes_list_files", oauth2_scopes=[], security_source=get_security_from_env(self.sdk_configuration.security, models.Security)), + request=req, + error_status_codes=["4XX","5XX"], + retry_config=retry_config + ) + + if utils.match_response(http_res, "200", "application/json"): + return utils.unmarshal_json(http_res.text, Optional[models.ListFilesOut]) + if utils.match_response(http_res, ["4XX","5XX"], "*"): + raise models.SDKError("API error occurred", http_res.status_code, http_res.text, http_res) + + content_type = http_res.headers.get("Content-Type") + raise models.SDKError(f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, http_res.text, http_res) + + + + async def list_async( + self, *, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + ) -> Optional[models.ListFilesOut]: + r"""List Files + + Returns a list of files that belong to the user's organization. + + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + req = self.build_request( + method="GET", + path="/v1/files", + base_url=base_url, + url_variables=url_variables, + request=None, + request_body_required=False, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + security=self.sdk_configuration.security, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, [ + "429", + "500", + "502", + "503", + "504" + ]) + + http_res = await self.do_request_async( + hook_ctx=HookContext(operation_id="files_api_routes_list_files", oauth2_scopes=[], security_source=get_security_from_env(self.sdk_configuration.security, models.Security)), + request=req, + error_status_codes=["4XX","5XX"], + retry_config=retry_config + ) + + if utils.match_response(http_res, "200", "application/json"): + return utils.unmarshal_json(http_res.text, Optional[models.ListFilesOut]) + if utils.match_response(http_res, ["4XX","5XX"], "*"): + raise models.SDKError("API error occurred", http_res.status_code, http_res.text, http_res) + + content_type = http_res.headers.get("Content-Type") + raise models.SDKError(f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, http_res.text, http_res) + + + + def retrieve( + self, *, + file_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + ) -> Optional[models.RetrieveFileOut]: + r"""Retrieve File + + Returns information about a specific file. + + :param file_id: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + + request = models.FilesAPIRoutesRetrieveFileRequest( + file_id=file_id, + ) + + req = self.build_request( + method="GET", + path="/v1/files/{file_id}", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + security=self.sdk_configuration.security, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, [ + "429", + "500", + "502", + "503", + "504" + ]) + + http_res = self.do_request( + hook_ctx=HookContext(operation_id="files_api_routes_retrieve_file", oauth2_scopes=[], security_source=get_security_from_env(self.sdk_configuration.security, models.Security)), + request=req, + error_status_codes=["4XX","5XX"], + retry_config=retry_config + ) + + if utils.match_response(http_res, "200", "application/json"): + return utils.unmarshal_json(http_res.text, Optional[models.RetrieveFileOut]) + if utils.match_response(http_res, ["4XX","5XX"], "*"): + raise models.SDKError("API error occurred", http_res.status_code, http_res.text, http_res) + + content_type = http_res.headers.get("Content-Type") + raise models.SDKError(f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, http_res.text, http_res) + + + + async def retrieve_async( + self, *, + file_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + ) -> Optional[models.RetrieveFileOut]: + r"""Retrieve File + + Returns information about a specific file. + + :param file_id: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + + request = models.FilesAPIRoutesRetrieveFileRequest( + file_id=file_id, + ) + + req = self.build_request( + method="GET", + path="/v1/files/{file_id}", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + security=self.sdk_configuration.security, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, [ + "429", + "500", + "502", + "503", + "504" + ]) + + http_res = await self.do_request_async( + hook_ctx=HookContext(operation_id="files_api_routes_retrieve_file", oauth2_scopes=[], security_source=get_security_from_env(self.sdk_configuration.security, models.Security)), + request=req, + error_status_codes=["4XX","5XX"], + retry_config=retry_config + ) + + if utils.match_response(http_res, "200", "application/json"): + return utils.unmarshal_json(http_res.text, Optional[models.RetrieveFileOut]) + if utils.match_response(http_res, ["4XX","5XX"], "*"): + raise models.SDKError("API error occurred", http_res.status_code, http_res.text, http_res) + + content_type = http_res.headers.get("Content-Type") + raise models.SDKError(f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, http_res.text, http_res) + + + + def delete( + self, *, + file_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + ) -> Optional[models.DeleteFileOut]: + r"""Delete File + + Delete a file. + + :param file_id: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + + request = models.FilesAPIRoutesDeleteFileRequest( + file_id=file_id, + ) + + req = self.build_request( + method="DELETE", + path="/v1/files/{file_id}", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + security=self.sdk_configuration.security, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, [ + "429", + "500", + "502", + "503", + "504" + ]) + + http_res = self.do_request( + hook_ctx=HookContext(operation_id="files_api_routes_delete_file", oauth2_scopes=[], security_source=get_security_from_env(self.sdk_configuration.security, models.Security)), + request=req, + error_status_codes=["4XX","5XX"], + retry_config=retry_config + ) + + if utils.match_response(http_res, "200", "application/json"): + return utils.unmarshal_json(http_res.text, Optional[models.DeleteFileOut]) + if utils.match_response(http_res, ["4XX","5XX"], "*"): + raise models.SDKError("API error occurred", http_res.status_code, http_res.text, http_res) + + content_type = http_res.headers.get("Content-Type") + raise models.SDKError(f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, http_res.text, http_res) + + + + async def delete_async( + self, *, + file_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + ) -> Optional[models.DeleteFileOut]: + r"""Delete File + + Delete a file. + + :param file_id: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + + request = models.FilesAPIRoutesDeleteFileRequest( + file_id=file_id, + ) + + req = self.build_request( + method="DELETE", + path="/v1/files/{file_id}", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + security=self.sdk_configuration.security, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, [ + "429", + "500", + "502", + "503", + "504" + ]) + + http_res = await self.do_request_async( + hook_ctx=HookContext(operation_id="files_api_routes_delete_file", oauth2_scopes=[], security_source=get_security_from_env(self.sdk_configuration.security, models.Security)), + request=req, + error_status_codes=["4XX","5XX"], + retry_config=retry_config + ) + + if utils.match_response(http_res, "200", "application/json"): + return utils.unmarshal_json(http_res.text, Optional[models.DeleteFileOut]) + if utils.match_response(http_res, ["4XX","5XX"], "*"): + raise models.SDKError("API error occurred", http_res.status_code, http_res.text, http_res) + + content_type = http_res.headers.get("Content-Type") + raise models.SDKError(f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, http_res.text, http_res) + + diff --git a/src/mistralai/fim.py b/src/mistralai/fim.py new file mode 100644 index 00000000..19090d91 --- /dev/null +++ b/src/mistralai/fim.py @@ -0,0 +1,438 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from .basesdk import BaseSDK +from mistralai import models, utils +from mistralai._hooks import HookContext +from mistralai.types import Nullable, OptionalNullable, UNSET +from mistralai.utils import eventstreaming, get_security_from_env +from typing import Any, AsyncGenerator, Generator, Optional, Union + +class Fim(BaseSDK): + r"""Fill-in-the-middle API.""" + + + def complete( + self, *, + model: Nullable[str], + prompt: str, + temperature: Optional[float] = 0.7, + top_p: Optional[float] = 1, + max_tokens: OptionalNullable[int] = UNSET, + min_tokens: OptionalNullable[int] = UNSET, + stream: Optional[bool] = False, + stop: Optional[Union[models.FIMCompletionRequestStop, models.FIMCompletionRequestStopTypedDict]] = None, + random_seed: OptionalNullable[int] = UNSET, + suffix: OptionalNullable[str] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + ) -> Optional[models.FIMCompletionResponse]: + r"""Fim Completion + + FIM completion. + + :param model: ID of the model to use. Only compatible for now with: - `codestral-2405` - `codestral-latest` + :param prompt: The text/code to complete. + :param temperature: What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. + :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. + :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. + :param min_tokens: The minimum number of tokens to generate in the completion. + :param stream: Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. + :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array + :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. + :param suffix: Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + + request = models.FIMCompletionRequest( + model=model, + temperature=temperature, + top_p=top_p, + max_tokens=max_tokens, + min_tokens=min_tokens, + stream=stream, + stop=stop, + random_seed=random_seed, + prompt=prompt, + suffix=suffix, + ) + + req = self.build_request( + method="POST", + path="/v1/fim/completions", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body(request, False, False, "json", models.FIMCompletionRequest), + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, [ + "429", + "500", + "502", + "503", + "504" + ]) + + http_res = self.do_request( + hook_ctx=HookContext(operation_id="fim_completion_v1_fim_completions_post", oauth2_scopes=[], security_source=get_security_from_env(self.sdk_configuration.security, models.Security)), + request=req, + error_status_codes=["422","4XX","5XX"], + retry_config=retry_config + ) + + data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return utils.unmarshal_json(http_res.text, Optional[models.FIMCompletionResponse]) + if utils.match_response(http_res, "422", "application/json"): + data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) + raise models.HTTPValidationError(data=data) + if utils.match_response(http_res, ["4XX","5XX"], "*"): + raise models.SDKError("API error occurred", http_res.status_code, http_res.text, http_res) + + content_type = http_res.headers.get("Content-Type") + raise models.SDKError(f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, http_res.text, http_res) + + + + async def complete_async( + self, *, + model: Nullable[str], + prompt: str, + temperature: Optional[float] = 0.7, + top_p: Optional[float] = 1, + max_tokens: OptionalNullable[int] = UNSET, + min_tokens: OptionalNullable[int] = UNSET, + stream: Optional[bool] = False, + stop: Optional[Union[models.FIMCompletionRequestStop, models.FIMCompletionRequestStopTypedDict]] = None, + random_seed: OptionalNullable[int] = UNSET, + suffix: OptionalNullable[str] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + ) -> Optional[models.FIMCompletionResponse]: + r"""Fim Completion + + FIM completion. + + :param model: ID of the model to use. Only compatible for now with: - `codestral-2405` - `codestral-latest` + :param prompt: The text/code to complete. + :param temperature: What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. + :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. + :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. + :param min_tokens: The minimum number of tokens to generate in the completion. + :param stream: Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. + :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array + :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. + :param suffix: Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + + request = models.FIMCompletionRequest( + model=model, + temperature=temperature, + top_p=top_p, + max_tokens=max_tokens, + min_tokens=min_tokens, + stream=stream, + stop=stop, + random_seed=random_seed, + prompt=prompt, + suffix=suffix, + ) + + req = self.build_request( + method="POST", + path="/v1/fim/completions", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body(request, False, False, "json", models.FIMCompletionRequest), + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, [ + "429", + "500", + "502", + "503", + "504" + ]) + + http_res = await self.do_request_async( + hook_ctx=HookContext(operation_id="fim_completion_v1_fim_completions_post", oauth2_scopes=[], security_source=get_security_from_env(self.sdk_configuration.security, models.Security)), + request=req, + error_status_codes=["422","4XX","5XX"], + retry_config=retry_config + ) + + data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return utils.unmarshal_json(http_res.text, Optional[models.FIMCompletionResponse]) + if utils.match_response(http_res, "422", "application/json"): + data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) + raise models.HTTPValidationError(data=data) + if utils.match_response(http_res, ["4XX","5XX"], "*"): + raise models.SDKError("API error occurred", http_res.status_code, http_res.text, http_res) + + content_type = http_res.headers.get("Content-Type") + raise models.SDKError(f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, http_res.text, http_res) + + + + def stream( + self, *, + model: Nullable[str], + prompt: str, + temperature: Optional[float] = 0.7, + top_p: Optional[float] = 1, + max_tokens: OptionalNullable[int] = UNSET, + min_tokens: OptionalNullable[int] = UNSET, + stream: Optional[bool] = True, + stop: Optional[Union[models.FIMCompletionStreamRequestStop, models.FIMCompletionStreamRequestStopTypedDict]] = None, + random_seed: OptionalNullable[int] = UNSET, + suffix: OptionalNullable[str] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + ) -> Optional[Generator[models.CompletionEvent, None, None]]: + r"""Stream fim completion + + Mistral AI provides the ability to stream responses back to a client in order to allow partial results for certain requests. Tokens will be sent as data-only server-sent events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. + + :param model: ID of the model to use. Only compatible for now with: - `codestral-2405` - `codestral-latest` + :param prompt: The text/code to complete. + :param temperature: What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. + :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. + :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. + :param min_tokens: The minimum number of tokens to generate in the completion. + :param stream: + :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array + :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. + :param suffix: Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + + request = models.FIMCompletionStreamRequest( + model=model, + temperature=temperature, + top_p=top_p, + max_tokens=max_tokens, + min_tokens=min_tokens, + stream=stream, + stop=stop, + random_seed=random_seed, + prompt=prompt, + suffix=suffix, + ) + + req = self.build_request( + method="POST", + path="/v1/fim/completions#stream", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="text/event-stream", + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body(request, False, False, "json", models.FIMCompletionStreamRequest), + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, [ + "429", + "500", + "502", + "503", + "504" + ]) + + http_res = self.do_request( + hook_ctx=HookContext(operation_id="stream_fim", oauth2_scopes=[], security_source=get_security_from_env(self.sdk_configuration.security, models.Security)), + request=req, + error_status_codes=["422","4XX","5XX"], + stream=True, + retry_config=retry_config + ) + + data: Any = None + if utils.match_response(http_res, "200", "text/event-stream"): + return eventstreaming.stream_events(http_res, lambda raw: utils.unmarshal_json(raw, models.CompletionEvent), sentinel="[DONE]") + if utils.match_response(http_res, "422", "application/json"): + data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) + raise models.HTTPValidationError(data=data) + if utils.match_response(http_res, ["4XX","5XX"], "*"): + raise models.SDKError("API error occurred", http_res.status_code, http_res.text, http_res) + + content_type = http_res.headers.get("Content-Type") + raise models.SDKError(f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, http_res.text, http_res) + + + + async def stream_async( + self, *, + model: Nullable[str], + prompt: str, + temperature: Optional[float] = 0.7, + top_p: Optional[float] = 1, + max_tokens: OptionalNullable[int] = UNSET, + min_tokens: OptionalNullable[int] = UNSET, + stream: Optional[bool] = True, + stop: Optional[Union[models.FIMCompletionStreamRequestStop, models.FIMCompletionStreamRequestStopTypedDict]] = None, + random_seed: OptionalNullable[int] = UNSET, + suffix: OptionalNullable[str] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + ) -> Optional[AsyncGenerator[models.CompletionEvent, None]]: + r"""Stream fim completion + + Mistral AI provides the ability to stream responses back to a client in order to allow partial results for certain requests. Tokens will be sent as data-only server-sent events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. + + :param model: ID of the model to use. Only compatible for now with: - `codestral-2405` - `codestral-latest` + :param prompt: The text/code to complete. + :param temperature: What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. + :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. + :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. + :param min_tokens: The minimum number of tokens to generate in the completion. + :param stream: + :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array + :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. + :param suffix: Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + + request = models.FIMCompletionStreamRequest( + model=model, + temperature=temperature, + top_p=top_p, + max_tokens=max_tokens, + min_tokens=min_tokens, + stream=stream, + stop=stop, + random_seed=random_seed, + prompt=prompt, + suffix=suffix, + ) + + req = self.build_request( + method="POST", + path="/v1/fim/completions#stream", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="text/event-stream", + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body(request, False, False, "json", models.FIMCompletionStreamRequest), + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, [ + "429", + "500", + "502", + "503", + "504" + ]) + + http_res = await self.do_request_async( + hook_ctx=HookContext(operation_id="stream_fim", oauth2_scopes=[], security_source=get_security_from_env(self.sdk_configuration.security, models.Security)), + request=req, + error_status_codes=["422","4XX","5XX"], + stream=True, + retry_config=retry_config + ) + + data: Any = None + if utils.match_response(http_res, "200", "text/event-stream"): + return eventstreaming.stream_events_async(http_res, lambda raw: utils.unmarshal_json(raw, models.CompletionEvent), sentinel="[DONE]") + if utils.match_response(http_res, "422", "application/json"): + data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) + raise models.HTTPValidationError(data=data) + if utils.match_response(http_res, ["4XX","5XX"], "*"): + raise models.SDKError("API error occurred", http_res.status_code, http_res.text, http_res) + + content_type = http_res.headers.get("Content-Type") + raise models.SDKError(f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, http_res.text, http_res) + + diff --git a/src/mistralai/fine_tuning.py b/src/mistralai/fine_tuning.py new file mode 100644 index 00000000..998100a0 --- /dev/null +++ b/src/mistralai/fine_tuning.py @@ -0,0 +1,16 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from .basesdk import BaseSDK +from .sdkconfiguration import SDKConfiguration +from mistralai.jobs import Jobs + +class FineTuning(BaseSDK): + jobs: Jobs + def __init__(self, sdk_config: SDKConfiguration) -> None: + BaseSDK.__init__(self, sdk_config) + self.sdk_configuration = sdk_config + self._init_sdks() + + def _init_sdks(self): + self.jobs = Jobs(self.sdk_configuration) + diff --git a/src/mistralai/httpclient.py b/src/mistralai/httpclient.py new file mode 100644 index 00000000..36b642a0 --- /dev/null +++ b/src/mistralai/httpclient.py @@ -0,0 +1,78 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +# pyright: reportReturnType = false +from typing_extensions import Protocol, runtime_checkable +import httpx +from typing import Any, Optional, Union + + +@runtime_checkable +class HttpClient(Protocol): + def send( + self, + request: httpx.Request, + *, + stream: bool = False, + auth: Union[ + httpx._types.AuthTypes, httpx._client.UseClientDefault, None + ] = httpx.USE_CLIENT_DEFAULT, + follow_redirects: Union[ + bool, httpx._client.UseClientDefault + ] = httpx.USE_CLIENT_DEFAULT, + ) -> httpx.Response: + pass + + def build_request( + self, + method: str, + url: httpx._types.URLTypes, + *, + content: Optional[httpx._types.RequestContent] = None, + data: Optional[httpx._types.RequestData] = None, + files: Optional[httpx._types.RequestFiles] = None, + json: Optional[Any] = None, + params: Optional[httpx._types.QueryParamTypes] = None, + headers: Optional[httpx._types.HeaderTypes] = None, + cookies: Optional[httpx._types.CookieTypes] = None, + timeout: Union[ + httpx._types.TimeoutTypes, httpx._client.UseClientDefault + ] = httpx.USE_CLIENT_DEFAULT, + extensions: Optional[httpx._types.RequestExtensions] = None, + ) -> httpx.Request: + pass + + +@runtime_checkable +class AsyncHttpClient(Protocol): + async def send( + self, + request: httpx.Request, + *, + stream: bool = False, + auth: Union[ + httpx._types.AuthTypes, httpx._client.UseClientDefault, None + ] = httpx.USE_CLIENT_DEFAULT, + follow_redirects: Union[ + bool, httpx._client.UseClientDefault + ] = httpx.USE_CLIENT_DEFAULT, + ) -> httpx.Response: + pass + + def build_request( + self, + method: str, + url: httpx._types.URLTypes, + *, + content: Optional[httpx._types.RequestContent] = None, + data: Optional[httpx._types.RequestData] = None, + files: Optional[httpx._types.RequestFiles] = None, + json: Optional[Any] = None, + params: Optional[httpx._types.QueryParamTypes] = None, + headers: Optional[httpx._types.HeaderTypes] = None, + cookies: Optional[httpx._types.CookieTypes] = None, + timeout: Union[ + httpx._types.TimeoutTypes, httpx._client.UseClientDefault + ] = httpx.USE_CLIENT_DEFAULT, + extensions: Optional[httpx._types.RequestExtensions] = None, + ) -> httpx.Request: + pass diff --git a/src/mistralai/jobs.py b/src/mistralai/jobs.py index 115b2327..255310f6 100644 --- a/src/mistralai/jobs.py +++ b/src/mistralai/jobs.py @@ -1,72 +1,57 @@ -from datetime import datetime -from typing import Any, Optional, Union - -from mistralai.exceptions import ( - MistralException, -) -from mistralai.models.jobs import DetailedJob, IntegrationIn, Job, JobMetadata, JobQueryFilter, Jobs, TrainingParameters +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +from .basesdk import BaseSDK +from datetime import datetime +from mistralai import models, utils +from mistralai._hooks import HookContext +from mistralai.types import OptionalNullable, UNSET +from mistralai.utils import get_security_from_env +from typing import List, Optional, Union -class JobsClient: - def __init__(self, client: Any): - self.client = client +class Jobs(BaseSDK): + + + def list( + self, *, + page: Optional[int] = 0, + page_size: Optional[int] = 100, + model: OptionalNullable[str] = UNSET, + created_after: OptionalNullable[datetime] = UNSET, + created_by_me: Optional[bool] = False, + status: OptionalNullable[models.QueryParamStatus] = UNSET, + wandb_project: OptionalNullable[str] = UNSET, + wandb_name: OptionalNullable[str] = UNSET, + suffix: OptionalNullable[str] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + ) -> Optional[models.JobsOut]: + r"""Get Fine Tuning Jobs - def create( - self, - model: str, - training_files: Union[list[str], None] = None, - validation_files: Union[list[str], None] = None, - hyperparameters: TrainingParameters = TrainingParameters( - training_steps=1800, - learning_rate=1.0e-4, - ), - suffix: Union[str, None] = None, - integrations: Union[set[IntegrationIn], None] = None, - training_file: Union[str, None] = None, # Deprecated: Added for compatibility with OpenAI API - validation_file: Union[str, None] = None, # Deprecated: Added for compatibility with OpenAI API - dry_run: bool = False, - ) -> Union[Job, JobMetadata]: - # Handle deprecated arguments - if not training_files and training_file: - training_files = [training_file] - if not validation_files and validation_file: - validation_files = [validation_file] - single_response = self.client._request( - method="post", - json={ - "model": model, - "training_files": training_files, - "validation_files": validation_files, - "hyperparameters": hyperparameters.dict(), - "suffix": suffix, - "integrations": integrations, - }, - path="v1/fine_tuning/jobs", - params={"dry_run": dry_run}, - ) - for response in single_response: - return Job(**response) if not dry_run else JobMetadata(**response) - raise MistralException("No response received") - - def retrieve(self, job_id: str) -> DetailedJob: - single_response = self.client._request(method="get", path=f"v1/fine_tuning/jobs/{job_id}", json={}) - for response in single_response: - return DetailedJob(**response) - raise MistralException("No response received") + Get a list of fine-tuning jobs for your organization and user. - def list( - self, - page: int = 0, - page_size: int = 10, - model: Optional[str] = None, - created_after: Optional[datetime] = None, - created_by_me: Optional[bool] = None, - status: Optional[str] = None, - wandb_project: Optional[str] = None, - wandb_name: Optional[str] = None, - suffix: Optional[str] = None, - ) -> Jobs: - query_params = JobQueryFilter( + :param page: The page number of the results to be returned. + :param page_size: The number of items to return per page. + :param model: The model name used for fine-tuning to filter on. When set, the other results are not displayed. + :param created_after: The date/time to filter on. When set, the results for previous creation times are not displayed. + :param created_by_me: When set, only return results for jobs created by the API caller. Other results are not displayed. + :param status: The current job state to filter on. When set, the other results are not displayed. + :param wandb_project: The Weights and Biases project to filter on. When set, the other results are not displayed. + :param wandb_name: The Weight and Biases run name to filter on. When set, the other results are not displayed. + :param suffix: The model suffix to filter on. When set, the other results are not displayed. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + + request = models.JobsAPIRoutesFineTuningGetFineTuningJobsRequest( page=page, page_size=page_size, model=model, @@ -76,80 +61,95 @@ def list( wandb_project=wandb_project, wandb_name=wandb_name, suffix=suffix, - ).model_dump(exclude_none=True) - single_response = self.client._request(method="get", params=query_params, path="v1/fine_tuning/jobs", json={}) - for response in single_response: - return Jobs(**response) - raise MistralException("No response received") - - def cancel(self, job_id: str) -> DetailedJob: - single_response = self.client._request(method="post", path=f"v1/fine_tuning/jobs/{job_id}/cancel", json={}) - for response in single_response: - return DetailedJob(**response) - raise MistralException("No response received") - - -class JobsAsyncClient: - def __init__(self, client: Any): - self.client = client - - async def create( - self, - model: str, - training_files: Union[list[str], None] = None, - validation_files: Union[list[str], None] = None, - hyperparameters: TrainingParameters = TrainingParameters( - training_steps=1800, - learning_rate=1.0e-4, - ), - suffix: Union[str, None] = None, - integrations: Union[set[IntegrationIn], None] = None, - training_file: Union[str, None] = None, # Deprecated: Added for compatibility with OpenAI API - validation_file: Union[str, None] = None, # Deprecated: Added for compatibility with OpenAI API - dry_run: bool = False, - ) -> Union[Job, JobMetadata]: - # Handle deprecated arguments - if not training_files and training_file: - training_files = [training_file] - if not validation_files and validation_file: - validation_files = [validation_file] - - single_response = self.client._request( - method="post", - json={ - "model": model, - "training_files": training_files, - "validation_files": validation_files, - "hyperparameters": hyperparameters.dict(), - "suffix": suffix, - "integrations": integrations, - }, - path="v1/fine_tuning/jobs", - params={"dry_run": dry_run}, - ) - async for response in single_response: - return Job(**response) if not dry_run else JobMetadata(**response) - raise MistralException("No response received") - - async def retrieve(self, job_id: str) -> DetailedJob: - single_response = self.client._request(method="get", path=f"v1/fine_tuning/jobs/{job_id}", json={}) - async for response in single_response: - return DetailedJob(**response) - raise MistralException("No response received") - - async def list( - self, - page: int = 0, - page_size: int = 10, - model: Optional[str] = None, - created_after: Optional[datetime] = None, - created_by_me: Optional[bool] = None, - status: Optional[str] = None, - wandb_project: Optional[str] = None, - wandb_name: Optional[str] = None, - suffix: Optional[str] = None, - ) -> Jobs: - query_params = JobQueryFilter( + ) + + req = self.build_request( + method="GET", + path="/v1/fine_tuning/jobs", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + security=self.sdk_configuration.security, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, [ + "429", + "500", + "502", + "503", + "504" + ]) + + http_res = self.do_request( + hook_ctx=HookContext(operation_id="jobs_api_routes_fine_tuning_get_fine_tuning_jobs", oauth2_scopes=[], security_source=get_security_from_env(self.sdk_configuration.security, models.Security)), + request=req, + error_status_codes=["4XX","5XX"], + retry_config=retry_config + ) + + if utils.match_response(http_res, "200", "application/json"): + return utils.unmarshal_json(http_res.text, Optional[models.JobsOut]) + if utils.match_response(http_res, ["4XX","5XX"], "*"): + raise models.SDKError("API error occurred", http_res.status_code, http_res.text, http_res) + + content_type = http_res.headers.get("Content-Type") + raise models.SDKError(f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, http_res.text, http_res) + + + + async def list_async( + self, *, + page: Optional[int] = 0, + page_size: Optional[int] = 100, + model: OptionalNullable[str] = UNSET, + created_after: OptionalNullable[datetime] = UNSET, + created_by_me: Optional[bool] = False, + status: OptionalNullable[models.QueryParamStatus] = UNSET, + wandb_project: OptionalNullable[str] = UNSET, + wandb_name: OptionalNullable[str] = UNSET, + suffix: OptionalNullable[str] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + ) -> Optional[models.JobsOut]: + r"""Get Fine Tuning Jobs + + Get a list of fine-tuning jobs for your organization and user. + + :param page: The page number of the results to be returned. + :param page_size: The number of items to return per page. + :param model: The model name used for fine-tuning to filter on. When set, the other results are not displayed. + :param created_after: The date/time to filter on. When set, the results for previous creation times are not displayed. + :param created_by_me: When set, only return results for jobs created by the API caller. Other results are not displayed. + :param status: The current job state to filter on. When set, the other results are not displayed. + :param wandb_project: The Weights and Biases project to filter on. When set, the other results are not displayed. + :param wandb_name: The Weight and Biases run name to filter on. When set, the other results are not displayed. + :param suffix: The model suffix to filter on. When set, the other results are not displayed. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + + request = models.JobsAPIRoutesFineTuningGetFineTuningJobsRequest( page=page, page_size=page_size, model=model, @@ -159,14 +159,686 @@ async def list( wandb_project=wandb_project, wandb_name=wandb_name, suffix=suffix, - ).model_dump(exclude_none=True) - single_response = self.client._request(method="get", path="v1/fine_tuning/jobs", params=query_params, json={}) - async for response in single_response: - return Jobs(**response) - raise MistralException("No response received") - - async def cancel(self, job_id: str) -> DetailedJob: - single_response = self.client._request(method="post", path=f"v1/fine_tuning/jobs/{job_id}/cancel", json={}) - async for response in single_response: - return DetailedJob(**response) - raise MistralException("No response received") + ) + + req = self.build_request( + method="GET", + path="/v1/fine_tuning/jobs", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + security=self.sdk_configuration.security, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, [ + "429", + "500", + "502", + "503", + "504" + ]) + + http_res = await self.do_request_async( + hook_ctx=HookContext(operation_id="jobs_api_routes_fine_tuning_get_fine_tuning_jobs", oauth2_scopes=[], security_source=get_security_from_env(self.sdk_configuration.security, models.Security)), + request=req, + error_status_codes=["4XX","5XX"], + retry_config=retry_config + ) + + if utils.match_response(http_res, "200", "application/json"): + return utils.unmarshal_json(http_res.text, Optional[models.JobsOut]) + if utils.match_response(http_res, ["4XX","5XX"], "*"): + raise models.SDKError("API error occurred", http_res.status_code, http_res.text, http_res) + + content_type = http_res.headers.get("Content-Type") + raise models.SDKError(f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, http_res.text, http_res) + + + + def create( + self, *, + model: models.FineTuneableModel, + hyperparameters: Union[models.TrainingParametersIn, models.TrainingParametersInTypedDict], + training_files: Optional[Union[List[models.TrainingFile], List[models.TrainingFileTypedDict]]] = None, + validation_files: OptionalNullable[List[str]] = UNSET, + suffix: OptionalNullable[str] = UNSET, + integrations: OptionalNullable[Union[List[models.WandbIntegration], List[models.WandbIntegrationTypedDict]]] = UNSET, + repositories: Optional[Union[List[models.GithubRepositoryIn], List[models.GithubRepositoryInTypedDict]]] = None, + auto_start: Optional[bool] = None, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + ) -> Optional[models.JobsAPIRoutesFineTuningCreateFineTuningJobResponse]: + r"""Create Fine Tuning Job + + Create a new fine-tuning job, it will be queued for processing. + + :param model: The name of the model to fine-tune. + :param hyperparameters: The fine-tuning hyperparameter settings used in a fine-tune job. + :param training_files: + :param validation_files: A list containing the IDs of uploaded files that contain validation data. If you provide these files, the data is used to generate validation metrics periodically during fine-tuning. These metrics can be viewed in `checkpoints` when getting the status of a running fine-tuning job. The same data should not be present in both train and validation files. + :param suffix: A string that will be added to your fine-tuning model name. For example, a suffix of \"my-great-model\" would produce a model name like `ft:open-mistral-7b:my-great-model:xxx...` + :param integrations: A list of integrations to enable for your fine-tuning job. + :param repositories: + :param auto_start: This field will be required in a future release. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + + request = models.JobIn( + model=model, + training_files=utils.get_pydantic_model(training_files, Optional[List[models.TrainingFile]]), + validation_files=validation_files, + hyperparameters=utils.get_pydantic_model(hyperparameters, models.TrainingParametersIn), + suffix=suffix, + integrations=utils.get_pydantic_model(integrations, OptionalNullable[List[models.WandbIntegration]]), + repositories=utils.get_pydantic_model(repositories, Optional[List[models.GithubRepositoryIn]]), + auto_start=auto_start, + ) + + req = self.build_request( + method="POST", + path="/v1/fine_tuning/jobs", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body(request, False, False, "json", models.JobIn), + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, [ + "429", + "500", + "502", + "503", + "504" + ]) + + http_res = self.do_request( + hook_ctx=HookContext(operation_id="jobs_api_routes_fine_tuning_create_fine_tuning_job", oauth2_scopes=[], security_source=get_security_from_env(self.sdk_configuration.security, models.Security)), + request=req, + error_status_codes=["4XX","5XX"], + retry_config=retry_config + ) + + if utils.match_response(http_res, "200", "application/json"): + return utils.unmarshal_json(http_res.text, Optional[models.JobsAPIRoutesFineTuningCreateFineTuningJobResponse]) + if utils.match_response(http_res, ["4XX","5XX"], "*"): + raise models.SDKError("API error occurred", http_res.status_code, http_res.text, http_res) + + content_type = http_res.headers.get("Content-Type") + raise models.SDKError(f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, http_res.text, http_res) + + + + async def create_async( + self, *, + model: models.FineTuneableModel, + hyperparameters: Union[models.TrainingParametersIn, models.TrainingParametersInTypedDict], + training_files: Optional[Union[List[models.TrainingFile], List[models.TrainingFileTypedDict]]] = None, + validation_files: OptionalNullable[List[str]] = UNSET, + suffix: OptionalNullable[str] = UNSET, + integrations: OptionalNullable[Union[List[models.WandbIntegration], List[models.WandbIntegrationTypedDict]]] = UNSET, + repositories: Optional[Union[List[models.GithubRepositoryIn], List[models.GithubRepositoryInTypedDict]]] = None, + auto_start: Optional[bool] = None, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + ) -> Optional[models.JobsAPIRoutesFineTuningCreateFineTuningJobResponse]: + r"""Create Fine Tuning Job + + Create a new fine-tuning job, it will be queued for processing. + + :param model: The name of the model to fine-tune. + :param hyperparameters: The fine-tuning hyperparameter settings used in a fine-tune job. + :param training_files: + :param validation_files: A list containing the IDs of uploaded files that contain validation data. If you provide these files, the data is used to generate validation metrics periodically during fine-tuning. These metrics can be viewed in `checkpoints` when getting the status of a running fine-tuning job. The same data should not be present in both train and validation files. + :param suffix: A string that will be added to your fine-tuning model name. For example, a suffix of \"my-great-model\" would produce a model name like `ft:open-mistral-7b:my-great-model:xxx...` + :param integrations: A list of integrations to enable for your fine-tuning job. + :param repositories: + :param auto_start: This field will be required in a future release. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + + request = models.JobIn( + model=model, + training_files=utils.get_pydantic_model(training_files, Optional[List[models.TrainingFile]]), + validation_files=validation_files, + hyperparameters=utils.get_pydantic_model(hyperparameters, models.TrainingParametersIn), + suffix=suffix, + integrations=utils.get_pydantic_model(integrations, OptionalNullable[List[models.WandbIntegration]]), + repositories=utils.get_pydantic_model(repositories, Optional[List[models.GithubRepositoryIn]]), + auto_start=auto_start, + ) + + req = self.build_request( + method="POST", + path="/v1/fine_tuning/jobs", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body(request, False, False, "json", models.JobIn), + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, [ + "429", + "500", + "502", + "503", + "504" + ]) + + http_res = await self.do_request_async( + hook_ctx=HookContext(operation_id="jobs_api_routes_fine_tuning_create_fine_tuning_job", oauth2_scopes=[], security_source=get_security_from_env(self.sdk_configuration.security, models.Security)), + request=req, + error_status_codes=["4XX","5XX"], + retry_config=retry_config + ) + + if utils.match_response(http_res, "200", "application/json"): + return utils.unmarshal_json(http_res.text, Optional[models.JobsAPIRoutesFineTuningCreateFineTuningJobResponse]) + if utils.match_response(http_res, ["4XX","5XX"], "*"): + raise models.SDKError("API error occurred", http_res.status_code, http_res.text, http_res) + + content_type = http_res.headers.get("Content-Type") + raise models.SDKError(f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, http_res.text, http_res) + + + + def get( + self, *, + job_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + ) -> Optional[models.DetailedJobOut]: + r"""Get Fine Tuning Job + + Get a fine-tuned job details by its UUID. + + :param job_id: The ID of the job to analyse. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + + request = models.JobsAPIRoutesFineTuningGetFineTuningJobRequest( + job_id=job_id, + ) + + req = self.build_request( + method="GET", + path="/v1/fine_tuning/jobs/{job_id}", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + security=self.sdk_configuration.security, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, [ + "429", + "500", + "502", + "503", + "504" + ]) + + http_res = self.do_request( + hook_ctx=HookContext(operation_id="jobs_api_routes_fine_tuning_get_fine_tuning_job", oauth2_scopes=[], security_source=get_security_from_env(self.sdk_configuration.security, models.Security)), + request=req, + error_status_codes=["4XX","5XX"], + retry_config=retry_config + ) + + if utils.match_response(http_res, "200", "application/json"): + return utils.unmarshal_json(http_res.text, Optional[models.DetailedJobOut]) + if utils.match_response(http_res, ["4XX","5XX"], "*"): + raise models.SDKError("API error occurred", http_res.status_code, http_res.text, http_res) + + content_type = http_res.headers.get("Content-Type") + raise models.SDKError(f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, http_res.text, http_res) + + + + async def get_async( + self, *, + job_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + ) -> Optional[models.DetailedJobOut]: + r"""Get Fine Tuning Job + + Get a fine-tuned job details by its UUID. + + :param job_id: The ID of the job to analyse. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + + request = models.JobsAPIRoutesFineTuningGetFineTuningJobRequest( + job_id=job_id, + ) + + req = self.build_request( + method="GET", + path="/v1/fine_tuning/jobs/{job_id}", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + security=self.sdk_configuration.security, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, [ + "429", + "500", + "502", + "503", + "504" + ]) + + http_res = await self.do_request_async( + hook_ctx=HookContext(operation_id="jobs_api_routes_fine_tuning_get_fine_tuning_job", oauth2_scopes=[], security_source=get_security_from_env(self.sdk_configuration.security, models.Security)), + request=req, + error_status_codes=["4XX","5XX"], + retry_config=retry_config + ) + + if utils.match_response(http_res, "200", "application/json"): + return utils.unmarshal_json(http_res.text, Optional[models.DetailedJobOut]) + if utils.match_response(http_res, ["4XX","5XX"], "*"): + raise models.SDKError("API error occurred", http_res.status_code, http_res.text, http_res) + + content_type = http_res.headers.get("Content-Type") + raise models.SDKError(f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, http_res.text, http_res) + + + + def cancel( + self, *, + job_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + ) -> Optional[models.DetailedJobOut]: + r"""Cancel Fine Tuning Job + + Request the cancellation of a fine tuning job. + + :param job_id: The ID of the job to cancel. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + + request = models.JobsAPIRoutesFineTuningCancelFineTuningJobRequest( + job_id=job_id, + ) + + req = self.build_request( + method="POST", + path="/v1/fine_tuning/jobs/{job_id}/cancel", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + security=self.sdk_configuration.security, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, [ + "429", + "500", + "502", + "503", + "504" + ]) + + http_res = self.do_request( + hook_ctx=HookContext(operation_id="jobs_api_routes_fine_tuning_cancel_fine_tuning_job", oauth2_scopes=[], security_source=get_security_from_env(self.sdk_configuration.security, models.Security)), + request=req, + error_status_codes=["4XX","5XX"], + retry_config=retry_config + ) + + if utils.match_response(http_res, "200", "application/json"): + return utils.unmarshal_json(http_res.text, Optional[models.DetailedJobOut]) + if utils.match_response(http_res, ["4XX","5XX"], "*"): + raise models.SDKError("API error occurred", http_res.status_code, http_res.text, http_res) + + content_type = http_res.headers.get("Content-Type") + raise models.SDKError(f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, http_res.text, http_res) + + + + async def cancel_async( + self, *, + job_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + ) -> Optional[models.DetailedJobOut]: + r"""Cancel Fine Tuning Job + + Request the cancellation of a fine tuning job. + + :param job_id: The ID of the job to cancel. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + + request = models.JobsAPIRoutesFineTuningCancelFineTuningJobRequest( + job_id=job_id, + ) + + req = self.build_request( + method="POST", + path="/v1/fine_tuning/jobs/{job_id}/cancel", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + security=self.sdk_configuration.security, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, [ + "429", + "500", + "502", + "503", + "504" + ]) + + http_res = await self.do_request_async( + hook_ctx=HookContext(operation_id="jobs_api_routes_fine_tuning_cancel_fine_tuning_job", oauth2_scopes=[], security_source=get_security_from_env(self.sdk_configuration.security, models.Security)), + request=req, + error_status_codes=["4XX","5XX"], + retry_config=retry_config + ) + + if utils.match_response(http_res, "200", "application/json"): + return utils.unmarshal_json(http_res.text, Optional[models.DetailedJobOut]) + if utils.match_response(http_res, ["4XX","5XX"], "*"): + raise models.SDKError("API error occurred", http_res.status_code, http_res.text, http_res) + + content_type = http_res.headers.get("Content-Type") + raise models.SDKError(f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, http_res.text, http_res) + + + + def start( + self, *, + job_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + ) -> Optional[models.DetailedJobOut]: + r"""Start Fine Tuning Job + + Request the start of a validated fine tuning job. + + :param job_id: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + + request = models.JobsAPIRoutesFineTuningStartFineTuningJobRequest( + job_id=job_id, + ) + + req = self.build_request( + method="POST", + path="/v1/fine_tuning/jobs/{job_id}/start", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + security=self.sdk_configuration.security, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, [ + "429", + "500", + "502", + "503", + "504" + ]) + + http_res = self.do_request( + hook_ctx=HookContext(operation_id="jobs_api_routes_fine_tuning_start_fine_tuning_job", oauth2_scopes=[], security_source=get_security_from_env(self.sdk_configuration.security, models.Security)), + request=req, + error_status_codes=["4XX","5XX"], + retry_config=retry_config + ) + + if utils.match_response(http_res, "200", "application/json"): + return utils.unmarshal_json(http_res.text, Optional[models.DetailedJobOut]) + if utils.match_response(http_res, ["4XX","5XX"], "*"): + raise models.SDKError("API error occurred", http_res.status_code, http_res.text, http_res) + + content_type = http_res.headers.get("Content-Type") + raise models.SDKError(f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, http_res.text, http_res) + + + + async def start_async( + self, *, + job_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + ) -> Optional[models.DetailedJobOut]: + r"""Start Fine Tuning Job + + Request the start of a validated fine tuning job. + + :param job_id: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + + request = models.JobsAPIRoutesFineTuningStartFineTuningJobRequest( + job_id=job_id, + ) + + req = self.build_request( + method="POST", + path="/v1/fine_tuning/jobs/{job_id}/start", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + security=self.sdk_configuration.security, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, [ + "429", + "500", + "502", + "503", + "504" + ]) + + http_res = await self.do_request_async( + hook_ctx=HookContext(operation_id="jobs_api_routes_fine_tuning_start_fine_tuning_job", oauth2_scopes=[], security_source=get_security_from_env(self.sdk_configuration.security, models.Security)), + request=req, + error_status_codes=["4XX","5XX"], + retry_config=retry_config + ) + + if utils.match_response(http_res, "200", "application/json"): + return utils.unmarshal_json(http_res.text, Optional[models.DetailedJobOut]) + if utils.match_response(http_res, ["4XX","5XX"], "*"): + raise models.SDKError("API error occurred", http_res.status_code, http_res.text, http_res) + + content_type = http_res.headers.get("Content-Type") + raise models.SDKError(f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, http_res.text, http_res) + + diff --git a/src/mistralai/models/__init__.py b/src/mistralai/models/__init__.py index e69de29b..f3162705 100644 --- a/src/mistralai/models/__init__.py +++ b/src/mistralai/models/__init__.py @@ -0,0 +1,82 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from .agentscompletionrequest import AgentsCompletionRequest, AgentsCompletionRequestMessages, AgentsCompletionRequestMessagesTypedDict, AgentsCompletionRequestStop, AgentsCompletionRequestStopTypedDict, AgentsCompletionRequestToolChoice, AgentsCompletionRequestTypedDict +from .agentscompletionstreamrequest import AgentsCompletionStreamRequest, AgentsCompletionStreamRequestStop, AgentsCompletionStreamRequestStopTypedDict, AgentsCompletionStreamRequestTypedDict +from .archiveftmodelout import ArchiveFTModelOut, ArchiveFTModelOutTypedDict +from .assistantmessage import AssistantMessage, AssistantMessageRole, AssistantMessageTypedDict +from .chatcompletionchoice import ChatCompletionChoice, ChatCompletionChoiceTypedDict, FinishReason +from .chatcompletionrequest import ChatCompletionRequest, ChatCompletionRequestTypedDict, Messages, MessagesTypedDict, Stop, StopTypedDict, ToolChoice +from .chatcompletionresponse import ChatCompletionResponse, ChatCompletionResponseTypedDict +from .chatcompletionstreamrequest import ChatCompletionStreamRequest, ChatCompletionStreamRequestMessages, ChatCompletionStreamRequestMessagesTypedDict, ChatCompletionStreamRequestStop, ChatCompletionStreamRequestStopTypedDict, ChatCompletionStreamRequestToolChoice, ChatCompletionStreamRequestTypedDict +from .checkpointout import CheckpointOut, CheckpointOutTypedDict +from .completionchunk import CompletionChunk, CompletionChunkTypedDict +from .completionevent import CompletionEvent, CompletionEventTypedDict +from .completionresponsestreamchoice import CompletionResponseStreamChoice, CompletionResponseStreamChoiceFinishReason, CompletionResponseStreamChoiceTypedDict +from .contentchunk import ContentChunk, ContentChunkTypedDict +from .delete_model_v1_models_model_id_deleteop import DeleteModelV1ModelsModelIDDeleteRequest, DeleteModelV1ModelsModelIDDeleteRequestTypedDict +from .deletefileout import DeleteFileOut, DeleteFileOutTypedDict +from .deletemodelout import DeleteModelOut, DeleteModelOutTypedDict +from .deltamessage import DeltaMessage, DeltaMessageTypedDict +from .detailedjobout import DetailedJobOut, DetailedJobOutStatus, DetailedJobOutTypedDict +from .embeddingrequest import EmbeddingRequest, EmbeddingRequestTypedDict, Inputs, InputsTypedDict +from .embeddingresponse import EmbeddingResponse, EmbeddingResponseTypedDict +from .embeddingresponsedata import EmbeddingResponseData, EmbeddingResponseDataTypedDict +from .eventout import EventOut, EventOutTypedDict +from .files_api_routes_delete_fileop import FilesAPIRoutesDeleteFileRequest, FilesAPIRoutesDeleteFileRequestTypedDict +from .files_api_routes_retrieve_fileop import FilesAPIRoutesRetrieveFileRequest, FilesAPIRoutesRetrieveFileRequestTypedDict +from .files_api_routes_upload_fileop import File, FileTypedDict, FilesAPIRoutesUploadFileMultiPartBodyParams, FilesAPIRoutesUploadFileMultiPartBodyParamsTypedDict +from .fileschema import FileSchema, FileSchemaTypedDict +from .fimcompletionrequest import FIMCompletionRequest, FIMCompletionRequestStop, FIMCompletionRequestStopTypedDict, FIMCompletionRequestTypedDict +from .fimcompletionresponse import FIMCompletionResponse, FIMCompletionResponseTypedDict +from .fimcompletionstreamrequest import FIMCompletionStreamRequest, FIMCompletionStreamRequestStop, FIMCompletionStreamRequestStopTypedDict, FIMCompletionStreamRequestTypedDict +from .finetuneablemodel import FineTuneableModel +from .ftmodelcapabilitiesout import FTModelCapabilitiesOut, FTModelCapabilitiesOutTypedDict +from .ftmodelout import FTModelOut, FTModelOutTypedDict +from .function import Function, FunctionTypedDict +from .functioncall import Arguments, ArgumentsTypedDict, FunctionCall, FunctionCallTypedDict +from .githubrepositoryin import GithubRepositoryIn, GithubRepositoryInTypedDict +from .githubrepositoryout import GithubRepositoryOut, GithubRepositoryOutTypedDict +from .httpvalidationerror import HTTPValidationError, HTTPValidationErrorData +from .jobin import JobIn, JobInTypedDict +from .jobmetadataout import JobMetadataOut, JobMetadataOutTypedDict +from .jobout import JobOut, JobOutTypedDict, Status +from .jobs_api_routes_fine_tuning_archive_fine_tuned_modelop import JobsAPIRoutesFineTuningArchiveFineTunedModelRequest, JobsAPIRoutesFineTuningArchiveFineTunedModelRequestTypedDict +from .jobs_api_routes_fine_tuning_cancel_fine_tuning_jobop import JobsAPIRoutesFineTuningCancelFineTuningJobRequest, JobsAPIRoutesFineTuningCancelFineTuningJobRequestTypedDict +from .jobs_api_routes_fine_tuning_create_fine_tuning_jobop import JobsAPIRoutesFineTuningCreateFineTuningJobResponse, JobsAPIRoutesFineTuningCreateFineTuningJobResponseTypedDict +from .jobs_api_routes_fine_tuning_get_fine_tuning_jobop import JobsAPIRoutesFineTuningGetFineTuningJobRequest, JobsAPIRoutesFineTuningGetFineTuningJobRequestTypedDict +from .jobs_api_routes_fine_tuning_get_fine_tuning_jobsop import JobsAPIRoutesFineTuningGetFineTuningJobsRequest, JobsAPIRoutesFineTuningGetFineTuningJobsRequestTypedDict, QueryParamStatus +from .jobs_api_routes_fine_tuning_start_fine_tuning_jobop import JobsAPIRoutesFineTuningStartFineTuningJobRequest, JobsAPIRoutesFineTuningStartFineTuningJobRequestTypedDict +from .jobs_api_routes_fine_tuning_unarchive_fine_tuned_modelop import JobsAPIRoutesFineTuningUnarchiveFineTunedModelRequest, JobsAPIRoutesFineTuningUnarchiveFineTunedModelRequestTypedDict +from .jobs_api_routes_fine_tuning_update_fine_tuned_modelop import JobsAPIRoutesFineTuningUpdateFineTunedModelRequest, JobsAPIRoutesFineTuningUpdateFineTunedModelRequestTypedDict +from .jobsout import JobsOut, JobsOutTypedDict +from .legacyjobmetadataout import LegacyJobMetadataOut, LegacyJobMetadataOutTypedDict +from .listfilesout import ListFilesOut, ListFilesOutTypedDict +from .metricout import MetricOut, MetricOutTypedDict +from .modelcapabilities import ModelCapabilities, ModelCapabilitiesTypedDict +from .modelcard import ModelCard, ModelCardTypedDict +from .modellist import ModelList, ModelListTypedDict +from .responseformat import ResponseFormat, ResponseFormatTypedDict, ResponseFormats +from .retrieve_model_v1_models_model_id_getop import RetrieveModelV1ModelsModelIDGetRequest, RetrieveModelV1ModelsModelIDGetRequestTypedDict +from .retrievefileout import RetrieveFileOut, RetrieveFileOutTypedDict +from .sampletype import SampleType +from .sdkerror import SDKError +from .security import Security, SecurityTypedDict +from .source import Source +from .systemmessage import Content, ContentTypedDict, Role, SystemMessage, SystemMessageTypedDict +from .textchunk import TextChunk, TextChunkTypedDict +from .tool import Tool, ToolTypedDict +from .toolcall import ToolCall, ToolCallTypedDict +from .toolmessage import ToolMessage, ToolMessageRole, ToolMessageTypedDict +from .trainingfile import TrainingFile, TrainingFileTypedDict +from .trainingparameters import TrainingParameters, TrainingParametersTypedDict +from .trainingparametersin import TrainingParametersIn, TrainingParametersInTypedDict +from .unarchiveftmodelout import UnarchiveFTModelOut, UnarchiveFTModelOutTypedDict +from .updateftmodelin import UpdateFTModelIn, UpdateFTModelInTypedDict +from .uploadfileout import UploadFileOut, UploadFileOutTypedDict +from .usageinfo import UsageInfo, UsageInfoTypedDict +from .usermessage import UserMessage, UserMessageContent, UserMessageContentTypedDict, UserMessageRole, UserMessageTypedDict +from .validationerror import Loc, LocTypedDict, ValidationError, ValidationErrorTypedDict +from .wandbintegration import WandbIntegration, WandbIntegrationTypedDict +from .wandbintegrationout import WandbIntegrationOut, WandbIntegrationOutTypedDict + +__all__ = ["AgentsCompletionRequest", "AgentsCompletionRequestMessages", "AgentsCompletionRequestMessagesTypedDict", "AgentsCompletionRequestStop", "AgentsCompletionRequestStopTypedDict", "AgentsCompletionRequestToolChoice", "AgentsCompletionRequestTypedDict", "AgentsCompletionStreamRequest", "AgentsCompletionStreamRequestStop", "AgentsCompletionStreamRequestStopTypedDict", "AgentsCompletionStreamRequestTypedDict", "ArchiveFTModelOut", "ArchiveFTModelOutTypedDict", "Arguments", "ArgumentsTypedDict", "AssistantMessage", "AssistantMessageRole", "AssistantMessageTypedDict", "ChatCompletionChoice", "ChatCompletionChoiceTypedDict", "ChatCompletionRequest", "ChatCompletionRequestTypedDict", "ChatCompletionResponse", "ChatCompletionResponseTypedDict", "ChatCompletionStreamRequest", "ChatCompletionStreamRequestMessages", "ChatCompletionStreamRequestMessagesTypedDict", "ChatCompletionStreamRequestStop", "ChatCompletionStreamRequestStopTypedDict", "ChatCompletionStreamRequestToolChoice", "ChatCompletionStreamRequestTypedDict", "CheckpointOut", "CheckpointOutTypedDict", "CompletionChunk", "CompletionChunkTypedDict", "CompletionEvent", "CompletionEventTypedDict", "CompletionResponseStreamChoice", "CompletionResponseStreamChoiceFinishReason", "CompletionResponseStreamChoiceTypedDict", "Content", "ContentChunk", "ContentChunkTypedDict", "ContentTypedDict", "DeleteFileOut", "DeleteFileOutTypedDict", "DeleteModelOut", "DeleteModelOutTypedDict", "DeleteModelV1ModelsModelIDDeleteRequest", "DeleteModelV1ModelsModelIDDeleteRequestTypedDict", "DeltaMessage", "DeltaMessageTypedDict", "DetailedJobOut", "DetailedJobOutStatus", "DetailedJobOutTypedDict", "EmbeddingRequest", "EmbeddingRequestTypedDict", "EmbeddingResponse", "EmbeddingResponseData", "EmbeddingResponseDataTypedDict", "EmbeddingResponseTypedDict", "EventOut", "EventOutTypedDict", "FIMCompletionRequest", "FIMCompletionRequestStop", "FIMCompletionRequestStopTypedDict", "FIMCompletionRequestTypedDict", "FIMCompletionResponse", "FIMCompletionResponseTypedDict", "FIMCompletionStreamRequest", "FIMCompletionStreamRequestStop", "FIMCompletionStreamRequestStopTypedDict", "FIMCompletionStreamRequestTypedDict", "FTModelCapabilitiesOut", "FTModelCapabilitiesOutTypedDict", "FTModelOut", "FTModelOutTypedDict", "File", "FileSchema", "FileSchemaTypedDict", "FileTypedDict", "FilesAPIRoutesDeleteFileRequest", "FilesAPIRoutesDeleteFileRequestTypedDict", "FilesAPIRoutesRetrieveFileRequest", "FilesAPIRoutesRetrieveFileRequestTypedDict", "FilesAPIRoutesUploadFileMultiPartBodyParams", "FilesAPIRoutesUploadFileMultiPartBodyParamsTypedDict", "FineTuneableModel", "FinishReason", "Function", "FunctionCall", "FunctionCallTypedDict", "FunctionTypedDict", "GithubRepositoryIn", "GithubRepositoryInTypedDict", "GithubRepositoryOut", "GithubRepositoryOutTypedDict", "HTTPValidationError", "HTTPValidationErrorData", "Inputs", "InputsTypedDict", "JobIn", "JobInTypedDict", "JobMetadataOut", "JobMetadataOutTypedDict", "JobOut", "JobOutTypedDict", "JobsAPIRoutesFineTuningArchiveFineTunedModelRequest", "JobsAPIRoutesFineTuningArchiveFineTunedModelRequestTypedDict", "JobsAPIRoutesFineTuningCancelFineTuningJobRequest", "JobsAPIRoutesFineTuningCancelFineTuningJobRequestTypedDict", "JobsAPIRoutesFineTuningCreateFineTuningJobResponse", "JobsAPIRoutesFineTuningCreateFineTuningJobResponseTypedDict", "JobsAPIRoutesFineTuningGetFineTuningJobRequest", "JobsAPIRoutesFineTuningGetFineTuningJobRequestTypedDict", "JobsAPIRoutesFineTuningGetFineTuningJobsRequest", "JobsAPIRoutesFineTuningGetFineTuningJobsRequestTypedDict", "JobsAPIRoutesFineTuningStartFineTuningJobRequest", "JobsAPIRoutesFineTuningStartFineTuningJobRequestTypedDict", "JobsAPIRoutesFineTuningUnarchiveFineTunedModelRequest", "JobsAPIRoutesFineTuningUnarchiveFineTunedModelRequestTypedDict", "JobsAPIRoutesFineTuningUpdateFineTunedModelRequest", "JobsAPIRoutesFineTuningUpdateFineTunedModelRequestTypedDict", "JobsOut", "JobsOutTypedDict", "LegacyJobMetadataOut", "LegacyJobMetadataOutTypedDict", "ListFilesOut", "ListFilesOutTypedDict", "Loc", "LocTypedDict", "Messages", "MessagesTypedDict", "MetricOut", "MetricOutTypedDict", "ModelCapabilities", "ModelCapabilitiesTypedDict", "ModelCard", "ModelCardTypedDict", "ModelList", "ModelListTypedDict", "QueryParamStatus", "ResponseFormat", "ResponseFormatTypedDict", "ResponseFormats", "RetrieveFileOut", "RetrieveFileOutTypedDict", "RetrieveModelV1ModelsModelIDGetRequest", "RetrieveModelV1ModelsModelIDGetRequestTypedDict", "Role", "SDKError", "SampleType", "Security", "SecurityTypedDict", "Source", "Status", "Stop", "StopTypedDict", "SystemMessage", "SystemMessageTypedDict", "TextChunk", "TextChunkTypedDict", "Tool", "ToolCall", "ToolCallTypedDict", "ToolChoice", "ToolMessage", "ToolMessageRole", "ToolMessageTypedDict", "ToolTypedDict", "TrainingFile", "TrainingFileTypedDict", "TrainingParameters", "TrainingParametersIn", "TrainingParametersInTypedDict", "TrainingParametersTypedDict", "UnarchiveFTModelOut", "UnarchiveFTModelOutTypedDict", "UpdateFTModelIn", "UpdateFTModelInTypedDict", "UploadFileOut", "UploadFileOutTypedDict", "UsageInfo", "UsageInfoTypedDict", "UserMessage", "UserMessageContent", "UserMessageContentTypedDict", "UserMessageRole", "UserMessageTypedDict", "ValidationError", "ValidationErrorTypedDict", "WandbIntegration", "WandbIntegrationOut", "WandbIntegrationOutTypedDict", "WandbIntegrationTypedDict"] diff --git a/src/mistralai/models/agentscompletionrequest.py b/src/mistralai/models/agentscompletionrequest.py new file mode 100644 index 00000000..3eb8b380 --- /dev/null +++ b/src/mistralai/models/agentscompletionrequest.py @@ -0,0 +1,96 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .assistantmessage import AssistantMessage, AssistantMessageTypedDict +from .responseformat import ResponseFormat, ResponseFormatTypedDict +from .tool import Tool, ToolTypedDict +from .toolmessage import ToolMessage, ToolMessageTypedDict +from .usermessage import UserMessage, UserMessageTypedDict +from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +from mistralai.utils import get_discriminator +from pydantic import Discriminator, Tag, model_serializer +from typing import List, Literal, Optional, TypedDict, Union +from typing_extensions import Annotated, NotRequired + + +AgentsCompletionRequestToolChoice = Literal["auto", "none", "any"] + +class AgentsCompletionRequestTypedDict(TypedDict): + messages: List[AgentsCompletionRequestMessagesTypedDict] + r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content.""" + agent_id: str + r"""The ID of the agent to use for this completion.""" + max_tokens: NotRequired[Nullable[int]] + r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" + min_tokens: NotRequired[Nullable[int]] + r"""The minimum number of tokens to generate in the completion.""" + stream: NotRequired[bool] + r"""Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON.""" + stop: NotRequired[AgentsCompletionRequestStopTypedDict] + r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + random_seed: NotRequired[Nullable[int]] + r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" + response_format: NotRequired[ResponseFormatTypedDict] + tools: NotRequired[Nullable[List[ToolTypedDict]]] + tool_choice: NotRequired[AgentsCompletionRequestToolChoice] + + +class AgentsCompletionRequest(BaseModel): + messages: List[AgentsCompletionRequestMessages] + r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content.""" + agent_id: str + r"""The ID of the agent to use for this completion.""" + max_tokens: OptionalNullable[int] = UNSET + r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" + min_tokens: OptionalNullable[int] = UNSET + r"""The minimum number of tokens to generate in the completion.""" + stream: Optional[bool] = False + r"""Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON.""" + stop: Optional[AgentsCompletionRequestStop] = None + r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + random_seed: OptionalNullable[int] = UNSET + r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" + response_format: Optional[ResponseFormat] = None + tools: OptionalNullable[List[Tool]] = UNSET + tool_choice: Optional[AgentsCompletionRequestToolChoice] = "auto" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["max_tokens", "min_tokens", "stream", "stop", "random_seed", "response_format", "tools", "tool_choice"] + nullable_fields = ["max_tokens", "min_tokens", "random_seed", "tools"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in self.model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = (self.__pydantic_fields_set__.intersection({n}) or k in null_default_fields) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m + + +AgentsCompletionRequestStopTypedDict = Union[str, List[str]] +r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + + +AgentsCompletionRequestStop = Union[str, List[str]] +r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + + +AgentsCompletionRequestMessagesTypedDict = Union[UserMessageTypedDict, AssistantMessageTypedDict, ToolMessageTypedDict] + + +AgentsCompletionRequestMessages = Annotated[Union[Annotated[AssistantMessage, Tag("assistant")], Annotated[ToolMessage, Tag("tool")], Annotated[UserMessage, Tag("user")]], Discriminator(lambda m: get_discriminator(m, "role", "role"))] + diff --git a/src/mistralai/models/agentscompletionstreamrequest.py b/src/mistralai/models/agentscompletionstreamrequest.py new file mode 100644 index 00000000..e6a1ea64 --- /dev/null +++ b/src/mistralai/models/agentscompletionstreamrequest.py @@ -0,0 +1,92 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +from pydantic import model_serializer +from typing import List, Optional, TypedDict, Union +from typing_extensions import NotRequired + + +class AgentsCompletionStreamRequestTypedDict(TypedDict): + model: Nullable[str] + r"""ID of the model to use. Only compatible for now with: + - `codestral-2405` + - `codestral-latest` + """ + prompt: str + r"""The text/code to complete.""" + temperature: NotRequired[float] + r"""What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both.""" + top_p: NotRequired[float] + r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.""" + max_tokens: NotRequired[Nullable[int]] + r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" + min_tokens: NotRequired[Nullable[int]] + r"""The minimum number of tokens to generate in the completion.""" + stream: NotRequired[bool] + stop: NotRequired[AgentsCompletionStreamRequestStopTypedDict] + r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + random_seed: NotRequired[Nullable[int]] + r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" + suffix: NotRequired[Nullable[str]] + r"""Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`.""" + + +class AgentsCompletionStreamRequest(BaseModel): + model: Nullable[str] + r"""ID of the model to use. Only compatible for now with: + - `codestral-2405` + - `codestral-latest` + """ + prompt: str + r"""The text/code to complete.""" + temperature: Optional[float] = 0.7 + r"""What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both.""" + top_p: Optional[float] = 1 + r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.""" + max_tokens: OptionalNullable[int] = UNSET + r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" + min_tokens: OptionalNullable[int] = UNSET + r"""The minimum number of tokens to generate in the completion.""" + stream: Optional[bool] = True + stop: Optional[AgentsCompletionStreamRequestStop] = None + r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + random_seed: OptionalNullable[int] = UNSET + r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" + suffix: OptionalNullable[str] = UNSET + r"""Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`.""" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["temperature", "top_p", "max_tokens", "min_tokens", "stream", "stop", "random_seed", "suffix"] + nullable_fields = ["model", "max_tokens", "min_tokens", "random_seed", "suffix"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in self.model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = (self.__pydantic_fields_set__.intersection({n}) or k in null_default_fields) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m + + +AgentsCompletionStreamRequestStopTypedDict = Union[str, List[str]] +r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + + +AgentsCompletionStreamRequestStop = Union[str, List[str]] +r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + diff --git a/src/mistralai/models/archiveftmodelout.py b/src/mistralai/models/archiveftmodelout.py new file mode 100644 index 00000000..ba76737b --- /dev/null +++ b/src/mistralai/models/archiveftmodelout.py @@ -0,0 +1,19 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.types import BaseModel +import pydantic +from typing import Final, Optional, TypedDict +from typing_extensions import Annotated, NotRequired + + +class ArchiveFTModelOutTypedDict(TypedDict): + id: str + archived: NotRequired[bool] + + +class ArchiveFTModelOut(BaseModel): + id: str + OBJECT: Annotated[Final[Optional[str]], pydantic.Field(alias="object")] = "model" # type: ignore + archived: Optional[bool] = True + diff --git a/src/mistralai/models/assistantmessage.py b/src/mistralai/models/assistantmessage.py new file mode 100644 index 00000000..b7080d55 --- /dev/null +++ b/src/mistralai/models/assistantmessage.py @@ -0,0 +1,53 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .toolcall import ToolCall, ToolCallTypedDict +from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +from pydantic import model_serializer +from typing import List, Literal, Optional, TypedDict +from typing_extensions import NotRequired + + +AssistantMessageRole = Literal["assistant"] + +class AssistantMessageTypedDict(TypedDict): + content: NotRequired[Nullable[str]] + tool_calls: NotRequired[Nullable[List[ToolCallTypedDict]]] + prefix: NotRequired[bool] + r"""Set this to `true` when adding an assistant message as prefix to condition the model response. The role of the prefix message is to force the model to start its answer by the content of the message.""" + role: NotRequired[AssistantMessageRole] + + +class AssistantMessage(BaseModel): + content: OptionalNullable[str] = UNSET + tool_calls: OptionalNullable[List[ToolCall]] = UNSET + prefix: Optional[bool] = False + r"""Set this to `true` when adding an assistant message as prefix to condition the model response. The role of the prefix message is to force the model to start its answer by the content of the message.""" + role: Optional[AssistantMessageRole] = "assistant" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["content", "tool_calls", "prefix", "role"] + nullable_fields = ["content", "tool_calls"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in self.model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = (self.__pydantic_fields_set__.intersection({n}) or k in null_default_fields) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m + diff --git a/src/mistralai/models/chat_completion.py b/src/mistralai/models/chat_completion.py deleted file mode 100644 index c5eda5ad..00000000 --- a/src/mistralai/models/chat_completion.py +++ /dev/null @@ -1,93 +0,0 @@ -from enum import Enum -from typing import List, Optional - -from pydantic import BaseModel - -from mistralai.models.common import UsageInfo - - -class Function(BaseModel): - name: str - description: str - parameters: dict - - -class ToolType(str, Enum): - function = "function" - - -class FunctionCall(BaseModel): - name: str - arguments: str - - -class ToolCall(BaseModel): - id: str = "null" - type: ToolType = ToolType.function - function: FunctionCall - - -class ResponseFormats(str, Enum): - text: str = "text" - json_object: str = "json_object" - - -class ToolChoice(str, Enum): - auto: str = "auto" - any: str = "any" - none: str = "none" - - -class ResponseFormat(BaseModel): - type: ResponseFormats = ResponseFormats.text - - -class ChatMessage(BaseModel): - role: str - content: str - name: Optional[str] = None - tool_calls: Optional[List[ToolCall]] = None - tool_call_id: Optional[str] = None - - -class DeltaMessage(BaseModel): - role: Optional[str] = None - content: Optional[str] = None - tool_calls: Optional[List[ToolCall]] = None - - -class FinishReason(str, Enum): - stop = "stop" - length = "length" - error = "error" - tool_calls = "tool_calls" - - -class ChatCompletionResponseStreamChoice(BaseModel): - index: int - delta: DeltaMessage - finish_reason: Optional[FinishReason] - - -class ChatCompletionStreamResponse(BaseModel): - id: str - model: str - choices: List[ChatCompletionResponseStreamChoice] - created: Optional[int] = None - object: Optional[str] = None - usage: Optional[UsageInfo] = None - - -class ChatCompletionResponseChoice(BaseModel): - index: int - message: ChatMessage - finish_reason: Optional[FinishReason] - - -class ChatCompletionResponse(BaseModel): - id: str - object: str - created: int - model: str - choices: List[ChatCompletionResponseChoice] - usage: UsageInfo diff --git a/src/mistralai/models/chatcompletionchoice.py b/src/mistralai/models/chatcompletionchoice.py new file mode 100644 index 00000000..04d2350a --- /dev/null +++ b/src/mistralai/models/chatcompletionchoice.py @@ -0,0 +1,22 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .assistantmessage import AssistantMessage, AssistantMessageTypedDict +from mistralai.types import BaseModel +from typing import Literal, Optional, TypedDict +from typing_extensions import NotRequired + + +FinishReason = Literal["stop", "length", "model_length", "error", "tool_calls"] + +class ChatCompletionChoiceTypedDict(TypedDict): + index: int + finish_reason: FinishReason + message: NotRequired[AssistantMessageTypedDict] + + +class ChatCompletionChoice(BaseModel): + index: int + finish_reason: FinishReason + message: Optional[AssistantMessage] = None + diff --git a/src/mistralai/models/chatcompletionrequest.py b/src/mistralai/models/chatcompletionrequest.py new file mode 100644 index 00000000..30817c5a --- /dev/null +++ b/src/mistralai/models/chatcompletionrequest.py @@ -0,0 +1,109 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .assistantmessage import AssistantMessage, AssistantMessageTypedDict +from .responseformat import ResponseFormat, ResponseFormatTypedDict +from .systemmessage import SystemMessage, SystemMessageTypedDict +from .tool import Tool, ToolTypedDict +from .toolmessage import ToolMessage, ToolMessageTypedDict +from .usermessage import UserMessage, UserMessageTypedDict +from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +from mistralai.utils import get_discriminator +from pydantic import Discriminator, Tag, model_serializer +from typing import List, Literal, Optional, TypedDict, Union +from typing_extensions import Annotated, NotRequired + + +ToolChoice = Literal["auto", "none", "any"] + +class ChatCompletionRequestTypedDict(TypedDict): + model: Nullable[str] + r"""ID of the model to use. You can use the [List Available Models](/api#operation/listModels) API to see all of your available models, or see our [Model overview](/models) for model descriptions.""" + messages: List[MessagesTypedDict] + r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content.""" + temperature: NotRequired[float] + r"""What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both.""" + top_p: NotRequired[float] + r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.""" + max_tokens: NotRequired[Nullable[int]] + r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" + min_tokens: NotRequired[Nullable[int]] + r"""The minimum number of tokens to generate in the completion.""" + stream: NotRequired[bool] + r"""Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON.""" + stop: NotRequired[StopTypedDict] + r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + random_seed: NotRequired[Nullable[int]] + r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" + response_format: NotRequired[ResponseFormatTypedDict] + tools: NotRequired[Nullable[List[ToolTypedDict]]] + tool_choice: NotRequired[ToolChoice] + safe_prompt: NotRequired[bool] + r"""Whether to inject a safety prompt before all conversations.""" + + +class ChatCompletionRequest(BaseModel): + model: Nullable[str] + r"""ID of the model to use. You can use the [List Available Models](/api#operation/listModels) API to see all of your available models, or see our [Model overview](/models) for model descriptions.""" + messages: List[Messages] + r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content.""" + temperature: Optional[float] = 0.7 + r"""What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both.""" + top_p: Optional[float] = 1 + r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.""" + max_tokens: OptionalNullable[int] = UNSET + r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" + min_tokens: OptionalNullable[int] = UNSET + r"""The minimum number of tokens to generate in the completion.""" + stream: Optional[bool] = False + r"""Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON.""" + stop: Optional[Stop] = None + r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + random_seed: OptionalNullable[int] = UNSET + r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" + response_format: Optional[ResponseFormat] = None + tools: OptionalNullable[List[Tool]] = UNSET + tool_choice: Optional[ToolChoice] = "auto" + safe_prompt: Optional[bool] = False + r"""Whether to inject a safety prompt before all conversations.""" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["temperature", "top_p", "max_tokens", "min_tokens", "stream", "stop", "random_seed", "response_format", "tools", "tool_choice", "safe_prompt"] + nullable_fields = ["model", "max_tokens", "min_tokens", "random_seed", "tools"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in self.model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = (self.__pydantic_fields_set__.intersection({n}) or k in null_default_fields) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m + + +StopTypedDict = Union[str, List[str]] +r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + + +Stop = Union[str, List[str]] +r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + + +MessagesTypedDict = Union[SystemMessageTypedDict, UserMessageTypedDict, AssistantMessageTypedDict, ToolMessageTypedDict] + + +Messages = Annotated[Union[Annotated[AssistantMessage, Tag("assistant")], Annotated[SystemMessage, Tag("system")], Annotated[ToolMessage, Tag("tool")], Annotated[UserMessage, Tag("user")]], Discriminator(lambda m: get_discriminator(m, "role", "role"))] + diff --git a/src/mistralai/models/chatcompletionresponse.py b/src/mistralai/models/chatcompletionresponse.py new file mode 100644 index 00000000..dacb0acb --- /dev/null +++ b/src/mistralai/models/chatcompletionresponse.py @@ -0,0 +1,27 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .chatcompletionchoice import ChatCompletionChoice, ChatCompletionChoiceTypedDict +from .usageinfo import UsageInfo, UsageInfoTypedDict +from mistralai.types import BaseModel +from typing import List, Optional, TypedDict +from typing_extensions import NotRequired + + +class ChatCompletionResponseTypedDict(TypedDict): + id: str + object: str + model: str + usage: UsageInfoTypedDict + created: NotRequired[int] + choices: NotRequired[List[ChatCompletionChoiceTypedDict]] + + +class ChatCompletionResponse(BaseModel): + id: str + object: str + model: str + usage: UsageInfo + created: Optional[int] = None + choices: Optional[List[ChatCompletionChoice]] = None + diff --git a/src/mistralai/models/chatcompletionstreamrequest.py b/src/mistralai/models/chatcompletionstreamrequest.py new file mode 100644 index 00000000..9523dd5a --- /dev/null +++ b/src/mistralai/models/chatcompletionstreamrequest.py @@ -0,0 +1,107 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .assistantmessage import AssistantMessage, AssistantMessageTypedDict +from .responseformat import ResponseFormat, ResponseFormatTypedDict +from .systemmessage import SystemMessage, SystemMessageTypedDict +from .tool import Tool, ToolTypedDict +from .toolmessage import ToolMessage, ToolMessageTypedDict +from .usermessage import UserMessage, UserMessageTypedDict +from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +from mistralai.utils import get_discriminator +from pydantic import Discriminator, Tag, model_serializer +from typing import List, Literal, Optional, TypedDict, Union +from typing_extensions import Annotated, NotRequired + + +ChatCompletionStreamRequestToolChoice = Literal["auto", "none", "any"] + +class ChatCompletionStreamRequestTypedDict(TypedDict): + model: Nullable[str] + r"""ID of the model to use. You can use the [List Available Models](/api#operation/listModels) API to see all of your available models, or see our [Model overview](/models) for model descriptions.""" + messages: List[ChatCompletionStreamRequestMessagesTypedDict] + r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content.""" + temperature: NotRequired[float] + r"""What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both.""" + top_p: NotRequired[float] + r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.""" + max_tokens: NotRequired[Nullable[int]] + r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" + min_tokens: NotRequired[Nullable[int]] + r"""The minimum number of tokens to generate in the completion.""" + stream: NotRequired[bool] + stop: NotRequired[ChatCompletionStreamRequestStopTypedDict] + r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + random_seed: NotRequired[Nullable[int]] + r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" + response_format: NotRequired[ResponseFormatTypedDict] + tools: NotRequired[Nullable[List[ToolTypedDict]]] + tool_choice: NotRequired[ChatCompletionStreamRequestToolChoice] + safe_prompt: NotRequired[bool] + r"""Whether to inject a safety prompt before all conversations.""" + + +class ChatCompletionStreamRequest(BaseModel): + model: Nullable[str] + r"""ID of the model to use. You can use the [List Available Models](/api#operation/listModels) API to see all of your available models, or see our [Model overview](/models) for model descriptions.""" + messages: List[ChatCompletionStreamRequestMessages] + r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content.""" + temperature: Optional[float] = 0.7 + r"""What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both.""" + top_p: Optional[float] = 1 + r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.""" + max_tokens: OptionalNullable[int] = UNSET + r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" + min_tokens: OptionalNullable[int] = UNSET + r"""The minimum number of tokens to generate in the completion.""" + stream: Optional[bool] = True + stop: Optional[ChatCompletionStreamRequestStop] = None + r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + random_seed: OptionalNullable[int] = UNSET + r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" + response_format: Optional[ResponseFormat] = None + tools: OptionalNullable[List[Tool]] = UNSET + tool_choice: Optional[ChatCompletionStreamRequestToolChoice] = "auto" + safe_prompt: Optional[bool] = False + r"""Whether to inject a safety prompt before all conversations.""" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["temperature", "top_p", "max_tokens", "min_tokens", "stream", "stop", "random_seed", "response_format", "tools", "tool_choice", "safe_prompt"] + nullable_fields = ["model", "max_tokens", "min_tokens", "random_seed", "tools"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in self.model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = (self.__pydantic_fields_set__.intersection({n}) or k in null_default_fields) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m + + +ChatCompletionStreamRequestStopTypedDict = Union[str, List[str]] +r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + + +ChatCompletionStreamRequestStop = Union[str, List[str]] +r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + + +ChatCompletionStreamRequestMessagesTypedDict = Union[SystemMessageTypedDict, UserMessageTypedDict, AssistantMessageTypedDict, ToolMessageTypedDict] + + +ChatCompletionStreamRequestMessages = Annotated[Union[Annotated[AssistantMessage, Tag("assistant")], Annotated[SystemMessage, Tag("system")], Annotated[ToolMessage, Tag("tool")], Annotated[UserMessage, Tag("user")]], Discriminator(lambda m: get_discriminator(m, "role", "role"))] + diff --git a/src/mistralai/models/checkpointout.py b/src/mistralai/models/checkpointout.py new file mode 100644 index 00000000..108356c0 --- /dev/null +++ b/src/mistralai/models/checkpointout.py @@ -0,0 +1,25 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .metricout import MetricOut, MetricOutTypedDict +from mistralai.types import BaseModel +from typing import TypedDict + + +class CheckpointOutTypedDict(TypedDict): + metrics: MetricOutTypedDict + r"""Metrics at the step number during the fine-tuning job. Use these metrics to assess if the training is going smoothly (loss should decrease, token accuracy should increase).""" + step_number: int + r"""The step number that the checkpoint was created at.""" + created_at: int + r"""The UNIX timestamp (in seconds) for when the checkpoint was created.""" + + +class CheckpointOut(BaseModel): + metrics: MetricOut + r"""Metrics at the step number during the fine-tuning job. Use these metrics to assess if the training is going smoothly (loss should decrease, token accuracy should increase).""" + step_number: int + r"""The step number that the checkpoint was created at.""" + created_at: int + r"""The UNIX timestamp (in seconds) for when the checkpoint was created.""" + diff --git a/src/mistralai/models/common.py b/src/mistralai/models/common.py deleted file mode 100644 index 11b71e50..00000000 --- a/src/mistralai/models/common.py +++ /dev/null @@ -1,9 +0,0 @@ -from typing import Optional - -from pydantic import BaseModel - - -class UsageInfo(BaseModel): - prompt_tokens: int - total_tokens: int - completion_tokens: Optional[int] diff --git a/src/mistralai/models/completionchunk.py b/src/mistralai/models/completionchunk.py new file mode 100644 index 00000000..f3a12c12 --- /dev/null +++ b/src/mistralai/models/completionchunk.py @@ -0,0 +1,27 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .completionresponsestreamchoice import CompletionResponseStreamChoice, CompletionResponseStreamChoiceTypedDict +from .usageinfo import UsageInfo, UsageInfoTypedDict +from mistralai.types import BaseModel +from typing import List, Optional, TypedDict +from typing_extensions import NotRequired + + +class CompletionChunkTypedDict(TypedDict): + id: str + model: str + choices: List[CompletionResponseStreamChoiceTypedDict] + object: NotRequired[str] + created: NotRequired[int] + usage: NotRequired[UsageInfoTypedDict] + + +class CompletionChunk(BaseModel): + id: str + model: str + choices: List[CompletionResponseStreamChoice] + object: Optional[str] = None + created: Optional[int] = None + usage: Optional[UsageInfo] = None + diff --git a/src/mistralai/models/completionevent.py b/src/mistralai/models/completionevent.py new file mode 100644 index 00000000..9b75f730 --- /dev/null +++ b/src/mistralai/models/completionevent.py @@ -0,0 +1,15 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .completionchunk import CompletionChunk, CompletionChunkTypedDict +from mistralai.types import BaseModel +from typing import TypedDict + + +class CompletionEventTypedDict(TypedDict): + data: CompletionChunkTypedDict + + +class CompletionEvent(BaseModel): + data: CompletionChunk + diff --git a/src/mistralai/models/completionresponsestreamchoice.py b/src/mistralai/models/completionresponsestreamchoice.py new file mode 100644 index 00000000..bd3cf9b2 --- /dev/null +++ b/src/mistralai/models/completionresponsestreamchoice.py @@ -0,0 +1,48 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .deltamessage import DeltaMessage, DeltaMessageTypedDict +from mistralai.types import BaseModel, Nullable, UNSET_SENTINEL +from pydantic import model_serializer +from typing import Literal, TypedDict + + +CompletionResponseStreamChoiceFinishReason = Literal["stop", "length", "error", "tool_calls"] + +class CompletionResponseStreamChoiceTypedDict(TypedDict): + index: int + delta: DeltaMessageTypedDict + finish_reason: Nullable[CompletionResponseStreamChoiceFinishReason] + + +class CompletionResponseStreamChoice(BaseModel): + index: int + delta: DeltaMessage + finish_reason: Nullable[CompletionResponseStreamChoiceFinishReason] + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = [] + nullable_fields = ["finish_reason"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in self.model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = (self.__pydantic_fields_set__.intersection({n}) or k in null_default_fields) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m + diff --git a/src/mistralai/models/contentchunk.py b/src/mistralai/models/contentchunk.py new file mode 100644 index 00000000..06954920 --- /dev/null +++ b/src/mistralai/models/contentchunk.py @@ -0,0 +1,17 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.types import BaseModel +import pydantic +from typing import Final, Optional, TypedDict +from typing_extensions import Annotated + + +class ContentChunkTypedDict(TypedDict): + text: str + + +class ContentChunk(BaseModel): + text: str + TYPE: Annotated[Final[Optional[str]], pydantic.Field(alias="type")] = "text" # type: ignore + diff --git a/src/mistralai/models/delete_model_v1_models_model_id_deleteop.py b/src/mistralai/models/delete_model_v1_models_model_id_deleteop.py new file mode 100644 index 00000000..8935acb3 --- /dev/null +++ b/src/mistralai/models/delete_model_v1_models_model_id_deleteop.py @@ -0,0 +1,18 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.types import BaseModel +from mistralai.utils import FieldMetadata, PathParamMetadata +from typing import TypedDict +from typing_extensions import Annotated + + +class DeleteModelV1ModelsModelIDDeleteRequestTypedDict(TypedDict): + model_id: str + r"""The ID of the model to delete.""" + + +class DeleteModelV1ModelsModelIDDeleteRequest(BaseModel): + model_id: Annotated[str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False))] + r"""The ID of the model to delete.""" + diff --git a/src/mistralai/models/deletefileout.py b/src/mistralai/models/deletefileout.py new file mode 100644 index 00000000..90c60ffa --- /dev/null +++ b/src/mistralai/models/deletefileout.py @@ -0,0 +1,24 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.types import BaseModel +from typing import TypedDict + + +class DeleteFileOutTypedDict(TypedDict): + id: str + r"""The ID of the deleted file.""" + object: str + r"""The object type that was deleted""" + deleted: bool + r"""The deletion status.""" + + +class DeleteFileOut(BaseModel): + id: str + r"""The ID of the deleted file.""" + object: str + r"""The object type that was deleted""" + deleted: bool + r"""The deletion status.""" + diff --git a/src/mistralai/models/deletemodelout.py b/src/mistralai/models/deletemodelout.py new file mode 100644 index 00000000..bab96e07 --- /dev/null +++ b/src/mistralai/models/deletemodelout.py @@ -0,0 +1,25 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.types import BaseModel +from typing import Optional, TypedDict +from typing_extensions import NotRequired + + +class DeleteModelOutTypedDict(TypedDict): + id: str + r"""The ID of the deleted model.""" + object: NotRequired[str] + r"""The object type that was deleted""" + deleted: NotRequired[bool] + r"""The deletion status""" + + +class DeleteModelOut(BaseModel): + id: str + r"""The ID of the deleted model.""" + object: Optional[str] = "model" + r"""The object type that was deleted""" + deleted: Optional[bool] = True + r"""The deletion status""" + diff --git a/src/mistralai/models/deltamessage.py b/src/mistralai/models/deltamessage.py new file mode 100644 index 00000000..013f708c --- /dev/null +++ b/src/mistralai/models/deltamessage.py @@ -0,0 +1,47 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .toolcall import ToolCall, ToolCallTypedDict +from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +from pydantic import model_serializer +from typing import Optional, TypedDict +from typing_extensions import NotRequired + + +class DeltaMessageTypedDict(TypedDict): + role: NotRequired[str] + content: NotRequired[str] + tool_calls: NotRequired[Nullable[ToolCallTypedDict]] + + +class DeltaMessage(BaseModel): + role: Optional[str] = None + content: Optional[str] = None + tool_calls: OptionalNullable[ToolCall] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["role", "content", "tool_calls"] + nullable_fields = ["tool_calls"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in self.model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = (self.__pydantic_fields_set__.intersection({n}) or k in null_default_fields) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m + diff --git a/src/mistralai/models/detailedjobout.py b/src/mistralai/models/detailedjobout.py new file mode 100644 index 00000000..c9f31220 --- /dev/null +++ b/src/mistralai/models/detailedjobout.py @@ -0,0 +1,91 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .checkpointout import CheckpointOut, CheckpointOutTypedDict +from .eventout import EventOut, EventOutTypedDict +from .finetuneablemodel import FineTuneableModel +from .githubrepositoryout import GithubRepositoryOut, GithubRepositoryOutTypedDict +from .jobmetadataout import JobMetadataOut, JobMetadataOutTypedDict +from .trainingparameters import TrainingParameters, TrainingParametersTypedDict +from .wandbintegrationout import WandbIntegrationOut, WandbIntegrationOutTypedDict +from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +import pydantic +from pydantic import model_serializer +from typing import Final, List, Literal, Optional, TypedDict +from typing_extensions import Annotated, NotRequired + + +DetailedJobOutStatus = Literal["QUEUED", "STARTED", "VALIDATING", "VALIDATED", "RUNNING", "FAILED_VALIDATION", "FAILED", "SUCCESS", "CANCELLED", "CANCELLATION_REQUESTED"] + +class DetailedJobOutTypedDict(TypedDict): + id: str + auto_start: bool + hyperparameters: TrainingParametersTypedDict + model: FineTuneableModel + r"""The name of the model to fine-tune.""" + status: DetailedJobOutStatus + job_type: str + created_at: int + modified_at: int + training_files: List[str] + validation_files: NotRequired[Nullable[List[str]]] + fine_tuned_model: NotRequired[Nullable[str]] + suffix: NotRequired[Nullable[str]] + integrations: NotRequired[Nullable[List[WandbIntegrationOutTypedDict]]] + trained_tokens: NotRequired[Nullable[int]] + repositories: NotRequired[List[GithubRepositoryOutTypedDict]] + metadata: NotRequired[Nullable[JobMetadataOutTypedDict]] + events: NotRequired[List[EventOutTypedDict]] + r"""Event items are created every time the status of a fine-tuning job changes. The timestamped list of all events is accessible here.""" + checkpoints: NotRequired[List[CheckpointOutTypedDict]] + + +class DetailedJobOut(BaseModel): + id: str + auto_start: bool + hyperparameters: TrainingParameters + model: FineTuneableModel + r"""The name of the model to fine-tune.""" + status: DetailedJobOutStatus + job_type: str + created_at: int + modified_at: int + training_files: List[str] + validation_files: OptionalNullable[List[str]] = UNSET + OBJECT: Annotated[Final[Optional[str]], pydantic.Field(alias="object")] = "job" # type: ignore + fine_tuned_model: OptionalNullable[str] = UNSET + suffix: OptionalNullable[str] = UNSET + integrations: OptionalNullable[List[WandbIntegrationOut]] = UNSET + trained_tokens: OptionalNullable[int] = UNSET + repositories: Optional[List[GithubRepositoryOut]] = None + metadata: OptionalNullable[JobMetadataOut] = UNSET + events: Optional[List[EventOut]] = None + r"""Event items are created every time the status of a fine-tuning job changes. The timestamped list of all events is accessible here.""" + checkpoints: Optional[List[CheckpointOut]] = None + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["validation_files", "object", "fine_tuned_model", "suffix", "integrations", "trained_tokens", "repositories", "metadata", "events", "checkpoints"] + nullable_fields = ["validation_files", "fine_tuned_model", "suffix", "integrations", "trained_tokens", "metadata"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in self.model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = (self.__pydantic_fields_set__.intersection({n}) or k in null_default_fields) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m + diff --git a/src/mistralai/models/embeddingrequest.py b/src/mistralai/models/embeddingrequest.py new file mode 100644 index 00000000..6875e14c --- /dev/null +++ b/src/mistralai/models/embeddingrequest.py @@ -0,0 +1,61 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +import pydantic +from pydantic import model_serializer +from typing import List, TypedDict, Union +from typing_extensions import Annotated, NotRequired + + +class EmbeddingRequestTypedDict(TypedDict): + inputs: InputsTypedDict + r"""Text to embed.""" + model: str + r"""ID of the model to use.""" + encoding_format: NotRequired[Nullable[str]] + r"""The format to return the embeddings in.""" + + +class EmbeddingRequest(BaseModel): + inputs: Annotated[Inputs, pydantic.Field(alias="input")] + r"""Text to embed.""" + model: str + r"""ID of the model to use.""" + encoding_format: OptionalNullable[str] = UNSET + r"""The format to return the embeddings in.""" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["encoding_format"] + nullable_fields = ["encoding_format"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in self.model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = (self.__pydantic_fields_set__.intersection({n}) or k in null_default_fields) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m + + +InputsTypedDict = Union[str, List[str]] +r"""Text to embed.""" + + +Inputs = Union[str, List[str]] +r"""Text to embed.""" + diff --git a/src/mistralai/models/embeddingresponse.py b/src/mistralai/models/embeddingresponse.py new file mode 100644 index 00000000..040c42dc --- /dev/null +++ b/src/mistralai/models/embeddingresponse.py @@ -0,0 +1,24 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .embeddingresponsedata import EmbeddingResponseData, EmbeddingResponseDataTypedDict +from .usageinfo import UsageInfo, UsageInfoTypedDict +from mistralai.types import BaseModel +from typing import List, TypedDict + + +class EmbeddingResponseTypedDict(TypedDict): + id: str + object: str + model: str + usage: UsageInfoTypedDict + data: List[EmbeddingResponseDataTypedDict] + + +class EmbeddingResponse(BaseModel): + id: str + object: str + model: str + usage: UsageInfo + data: List[EmbeddingResponseData] + diff --git a/src/mistralai/models/embeddingresponsedata.py b/src/mistralai/models/embeddingresponsedata.py new file mode 100644 index 00000000..07a061bc --- /dev/null +++ b/src/mistralai/models/embeddingresponsedata.py @@ -0,0 +1,19 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.types import BaseModel +from typing import List, Optional, TypedDict +from typing_extensions import NotRequired + + +class EmbeddingResponseDataTypedDict(TypedDict): + object: NotRequired[str] + embedding: NotRequired[List[float]] + index: NotRequired[int] + + +class EmbeddingResponseData(BaseModel): + object: Optional[str] = None + embedding: Optional[List[float]] = None + index: Optional[int] = None + diff --git a/src/mistralai/models/embeddings.py b/src/mistralai/models/embeddings.py deleted file mode 100644 index a3200f74..00000000 --- a/src/mistralai/models/embeddings.py +++ /dev/null @@ -1,19 +0,0 @@ -from typing import List - -from pydantic import BaseModel - -from mistralai.models.common import UsageInfo - - -class EmbeddingObject(BaseModel): - object: str - embedding: List[float] - index: int - - -class EmbeddingResponse(BaseModel): - id: str - object: str - data: List[EmbeddingObject] - model: str - usage: UsageInfo diff --git a/src/mistralai/models/eventout.py b/src/mistralai/models/eventout.py new file mode 100644 index 00000000..d522abed --- /dev/null +++ b/src/mistralai/models/eventout.py @@ -0,0 +1,50 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +from pydantic import model_serializer +from typing import Any, Dict, TypedDict +from typing_extensions import NotRequired + + +class EventOutTypedDict(TypedDict): + name: str + r"""The name of the event.""" + created_at: int + r"""The UNIX timestamp (in seconds) of the event.""" + data: NotRequired[Nullable[Dict[str, Any]]] + + +class EventOut(BaseModel): + name: str + r"""The name of the event.""" + created_at: int + r"""The UNIX timestamp (in seconds) of the event.""" + data: OptionalNullable[Dict[str, Any]] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["data"] + nullable_fields = ["data"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in self.model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = (self.__pydantic_fields_set__.intersection({n}) or k in null_default_fields) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m + diff --git a/src/mistralai/models/files.py b/src/mistralai/models/files.py deleted file mode 100644 index f0aeff3f..00000000 --- a/src/mistralai/models/files.py +++ /dev/null @@ -1,23 +0,0 @@ -from typing import Literal, Optional - -from pydantic import BaseModel - - -class FileObject(BaseModel): - id: str - object: str - bytes: int - created_at: int - filename: str - purpose: Optional[Literal["fine-tune"]] = "fine-tune" - - -class FileDeleted(BaseModel): - id: str - object: str - deleted: bool - - -class Files(BaseModel): - data: list[FileObject] - object: Literal["list"] diff --git a/src/mistralai/models/files_api_routes_delete_fileop.py b/src/mistralai/models/files_api_routes_delete_fileop.py new file mode 100644 index 00000000..85714838 --- /dev/null +++ b/src/mistralai/models/files_api_routes_delete_fileop.py @@ -0,0 +1,16 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.types import BaseModel +from mistralai.utils import FieldMetadata, PathParamMetadata +from typing import TypedDict +from typing_extensions import Annotated + + +class FilesAPIRoutesDeleteFileRequestTypedDict(TypedDict): + file_id: str + + +class FilesAPIRoutesDeleteFileRequest(BaseModel): + file_id: Annotated[str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False))] + diff --git a/src/mistralai/models/files_api_routes_retrieve_fileop.py b/src/mistralai/models/files_api_routes_retrieve_fileop.py new file mode 100644 index 00000000..76063be9 --- /dev/null +++ b/src/mistralai/models/files_api_routes_retrieve_fileop.py @@ -0,0 +1,16 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.types import BaseModel +from mistralai.utils import FieldMetadata, PathParamMetadata +from typing import TypedDict +from typing_extensions import Annotated + + +class FilesAPIRoutesRetrieveFileRequestTypedDict(TypedDict): + file_id: str + + +class FilesAPIRoutesRetrieveFileRequest(BaseModel): + file_id: Annotated[str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False))] + diff --git a/src/mistralai/models/files_api_routes_upload_fileop.py b/src/mistralai/models/files_api_routes_upload_fileop.py new file mode 100644 index 00000000..5d72a89a --- /dev/null +++ b/src/mistralai/models/files_api_routes_upload_fileop.py @@ -0,0 +1,51 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +import io +from mistralai.types import BaseModel +from mistralai.utils import FieldMetadata, MultipartFormMetadata +import pydantic +from typing import Final, IO, Optional, TypedDict, Union +from typing_extensions import Annotated, NotRequired + + +class FileTypedDict(TypedDict): + file_name: str + content: Union[bytes, IO[bytes], io.BufferedReader] + content_type: NotRequired[str] + + +class File(BaseModel): + file_name: Annotated[str, pydantic.Field(alias="file"), FieldMetadata(multipart=True)] + content: Annotated[Union[bytes, IO[bytes], io.BufferedReader], pydantic.Field(alias=""), FieldMetadata(multipart=MultipartFormMetadata(content=True))] + content_type: Annotated[Optional[str], pydantic.Field(alias="Content-Type"), FieldMetadata(multipart=True)] = None + + +class FilesAPIRoutesUploadFileMultiPartBodyParamsTypedDict(TypedDict): + file: FileTypedDict + r"""The File object (not file name) to be uploaded. + To upload a file and specify a custom file name you should format your request as such: + ```bash + file=@path/to/your/file.jsonl;filename=custom_name.jsonl + ``` + Otherwise, you can just keep the original file name: + ```bash + file=@path/to/your/file.jsonl + ``` + """ + + +class FilesAPIRoutesUploadFileMultiPartBodyParams(BaseModel): + file: Annotated[File, pydantic.Field(alias=""), FieldMetadata(multipart=MultipartFormMetadata(file=True))] + r"""The File object (not file name) to be uploaded. + To upload a file and specify a custom file name you should format your request as such: + ```bash + file=@path/to/your/file.jsonl;filename=custom_name.jsonl + ``` + Otherwise, you can just keep the original file name: + ```bash + file=@path/to/your/file.jsonl + ``` + """ + PURPOSE: Annotated[Final[Optional[str]], pydantic.Field(alias="purpose"), FieldMetadata(multipart=True)] = "fine-tune" # type: ignore + diff --git a/src/mistralai/models/fileschema.py b/src/mistralai/models/fileschema.py new file mode 100644 index 00000000..c0552424 --- /dev/null +++ b/src/mistralai/models/fileschema.py @@ -0,0 +1,71 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .sampletype import SampleType +from .source import Source +from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +import pydantic +from pydantic import model_serializer +from typing import Final, TypedDict +from typing_extensions import Annotated, NotRequired + + +class FileSchemaTypedDict(TypedDict): + id: str + r"""The unique identifier of the file.""" + object: str + r"""The object type, which is always \"file\".""" + bytes: int + r"""The size of the file, in bytes.""" + created_at: int + r"""The UNIX timestamp (in seconds) of the event.""" + filename: str + r"""The name of the uploaded file.""" + sample_type: SampleType + source: Source + num_lines: NotRequired[Nullable[int]] + + +class FileSchema(BaseModel): + id: str + r"""The unique identifier of the file.""" + object: str + r"""The object type, which is always \"file\".""" + bytes: int + r"""The size of the file, in bytes.""" + created_at: int + r"""The UNIX timestamp (in seconds) of the event.""" + filename: str + r"""The name of the uploaded file.""" + sample_type: SampleType + source: Source + PURPOSE: Annotated[Final[str], pydantic.Field(alias="purpose")] = "fine-tune" # type: ignore + r"""The intended purpose of the uploaded file. Only accepts fine-tuning (`fine-tune`) for now.""" + num_lines: OptionalNullable[int] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["num_lines"] + nullable_fields = ["num_lines"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in self.model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = (self.__pydantic_fields_set__.intersection({n}) or k in null_default_fields) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m + diff --git a/src/mistralai/models/fimcompletionrequest.py b/src/mistralai/models/fimcompletionrequest.py new file mode 100644 index 00000000..ba941bb9 --- /dev/null +++ b/src/mistralai/models/fimcompletionrequest.py @@ -0,0 +1,94 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +from pydantic import model_serializer +from typing import List, Optional, TypedDict, Union +from typing_extensions import NotRequired + + +class FIMCompletionRequestTypedDict(TypedDict): + model: Nullable[str] + r"""ID of the model to use. Only compatible for now with: + - `codestral-2405` + - `codestral-latest` + """ + prompt: str + r"""The text/code to complete.""" + temperature: NotRequired[float] + r"""What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both.""" + top_p: NotRequired[float] + r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.""" + max_tokens: NotRequired[Nullable[int]] + r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" + min_tokens: NotRequired[Nullable[int]] + r"""The minimum number of tokens to generate in the completion.""" + stream: NotRequired[bool] + r"""Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON.""" + stop: NotRequired[FIMCompletionRequestStopTypedDict] + r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + random_seed: NotRequired[Nullable[int]] + r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" + suffix: NotRequired[Nullable[str]] + r"""Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`.""" + + +class FIMCompletionRequest(BaseModel): + model: Nullable[str] + r"""ID of the model to use. Only compatible for now with: + - `codestral-2405` + - `codestral-latest` + """ + prompt: str + r"""The text/code to complete.""" + temperature: Optional[float] = 0.7 + r"""What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both.""" + top_p: Optional[float] = 1 + r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.""" + max_tokens: OptionalNullable[int] = UNSET + r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" + min_tokens: OptionalNullable[int] = UNSET + r"""The minimum number of tokens to generate in the completion.""" + stream: Optional[bool] = False + r"""Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON.""" + stop: Optional[FIMCompletionRequestStop] = None + r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + random_seed: OptionalNullable[int] = UNSET + r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" + suffix: OptionalNullable[str] = UNSET + r"""Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`.""" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["temperature", "top_p", "max_tokens", "min_tokens", "stream", "stop", "random_seed", "suffix"] + nullable_fields = ["model", "max_tokens", "min_tokens", "random_seed", "suffix"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in self.model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = (self.__pydantic_fields_set__.intersection({n}) or k in null_default_fields) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m + + +FIMCompletionRequestStopTypedDict = Union[str, List[str]] +r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + + +FIMCompletionRequestStop = Union[str, List[str]] +r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + diff --git a/src/mistralai/models/fimcompletionresponse.py b/src/mistralai/models/fimcompletionresponse.py new file mode 100644 index 00000000..f359adb9 --- /dev/null +++ b/src/mistralai/models/fimcompletionresponse.py @@ -0,0 +1,27 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .chatcompletionchoice import ChatCompletionChoice, ChatCompletionChoiceTypedDict +from .usageinfo import UsageInfo, UsageInfoTypedDict +from mistralai.types import BaseModel +from typing import List, Optional, TypedDict +from typing_extensions import NotRequired + + +class FIMCompletionResponseTypedDict(TypedDict): + id: str + object: str + model: str + usage: UsageInfoTypedDict + created: NotRequired[int] + choices: NotRequired[List[ChatCompletionChoiceTypedDict]] + + +class FIMCompletionResponse(BaseModel): + id: str + object: str + model: str + usage: UsageInfo + created: Optional[int] = None + choices: Optional[List[ChatCompletionChoice]] = None + diff --git a/src/mistralai/models/fimcompletionstreamrequest.py b/src/mistralai/models/fimcompletionstreamrequest.py new file mode 100644 index 00000000..767d0416 --- /dev/null +++ b/src/mistralai/models/fimcompletionstreamrequest.py @@ -0,0 +1,92 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +from pydantic import model_serializer +from typing import List, Optional, TypedDict, Union +from typing_extensions import NotRequired + + +class FIMCompletionStreamRequestTypedDict(TypedDict): + model: Nullable[str] + r"""ID of the model to use. Only compatible for now with: + - `codestral-2405` + - `codestral-latest` + """ + prompt: str + r"""The text/code to complete.""" + temperature: NotRequired[float] + r"""What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both.""" + top_p: NotRequired[float] + r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.""" + max_tokens: NotRequired[Nullable[int]] + r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" + min_tokens: NotRequired[Nullable[int]] + r"""The minimum number of tokens to generate in the completion.""" + stream: NotRequired[bool] + stop: NotRequired[FIMCompletionStreamRequestStopTypedDict] + r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + random_seed: NotRequired[Nullable[int]] + r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" + suffix: NotRequired[Nullable[str]] + r"""Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`.""" + + +class FIMCompletionStreamRequest(BaseModel): + model: Nullable[str] + r"""ID of the model to use. Only compatible for now with: + - `codestral-2405` + - `codestral-latest` + """ + prompt: str + r"""The text/code to complete.""" + temperature: Optional[float] = 0.7 + r"""What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both.""" + top_p: Optional[float] = 1 + r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.""" + max_tokens: OptionalNullable[int] = UNSET + r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" + min_tokens: OptionalNullable[int] = UNSET + r"""The minimum number of tokens to generate in the completion.""" + stream: Optional[bool] = True + stop: Optional[FIMCompletionStreamRequestStop] = None + r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + random_seed: OptionalNullable[int] = UNSET + r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" + suffix: OptionalNullable[str] = UNSET + r"""Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`.""" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["temperature", "top_p", "max_tokens", "min_tokens", "stream", "stop", "random_seed", "suffix"] + nullable_fields = ["model", "max_tokens", "min_tokens", "random_seed", "suffix"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in self.model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = (self.__pydantic_fields_set__.intersection({n}) or k in null_default_fields) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m + + +FIMCompletionStreamRequestStopTypedDict = Union[str, List[str]] +r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + + +FIMCompletionStreamRequestStop = Union[str, List[str]] +r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + diff --git a/src/mistralai/models/finetuneablemodel.py b/src/mistralai/models/finetuneablemodel.py new file mode 100644 index 00000000..22c8e4c6 --- /dev/null +++ b/src/mistralai/models/finetuneablemodel.py @@ -0,0 +1,8 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from typing import Literal + + +FineTuneableModel = Literal["open-mistral-7b", "mistral-small-latest", "codestral-latest", "mistral-large-latest", "open-mistral-nemo"] +r"""The name of the model to fine-tune.""" diff --git a/src/mistralai/models/ftmodelcapabilitiesout.py b/src/mistralai/models/ftmodelcapabilitiesout.py new file mode 100644 index 00000000..ab76af38 --- /dev/null +++ b/src/mistralai/models/ftmodelcapabilitiesout.py @@ -0,0 +1,21 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.types import BaseModel +from typing import Optional, TypedDict +from typing_extensions import NotRequired + + +class FTModelCapabilitiesOutTypedDict(TypedDict): + completion_chat: NotRequired[bool] + completion_fim: NotRequired[bool] + function_calling: NotRequired[bool] + fine_tuning: NotRequired[bool] + + +class FTModelCapabilitiesOut(BaseModel): + completion_chat: Optional[bool] = True + completion_fim: Optional[bool] = False + function_calling: Optional[bool] = False + fine_tuning: Optional[bool] = False + diff --git a/src/mistralai/models/ftmodelout.py b/src/mistralai/models/ftmodelout.py new file mode 100644 index 00000000..44b5348d --- /dev/null +++ b/src/mistralai/models/ftmodelout.py @@ -0,0 +1,65 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .ftmodelcapabilitiesout import FTModelCapabilitiesOut, FTModelCapabilitiesOutTypedDict +from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +import pydantic +from pydantic import model_serializer +from typing import Final, List, Optional, TypedDict +from typing_extensions import Annotated, NotRequired + + +class FTModelOutTypedDict(TypedDict): + id: str + created: int + owned_by: str + root: str + archived: bool + capabilities: FTModelCapabilitiesOutTypedDict + job: str + name: NotRequired[Nullable[str]] + description: NotRequired[Nullable[str]] + max_context_length: NotRequired[int] + aliases: NotRequired[List[str]] + + +class FTModelOut(BaseModel): + id: str + created: int + owned_by: str + root: str + archived: bool + capabilities: FTModelCapabilitiesOut + job: str + OBJECT: Annotated[Final[Optional[str]], pydantic.Field(alias="object")] = "model" # type: ignore + name: OptionalNullable[str] = UNSET + description: OptionalNullable[str] = UNSET + max_context_length: Optional[int] = 32768 + aliases: Optional[List[str]] = None + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["object", "name", "description", "max_context_length", "aliases"] + nullable_fields = ["name", "description"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in self.model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = (self.__pydantic_fields_set__.intersection({n}) or k in null_default_fields) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m + diff --git a/src/mistralai/models/function.py b/src/mistralai/models/function.py new file mode 100644 index 00000000..78eb2594 --- /dev/null +++ b/src/mistralai/models/function.py @@ -0,0 +1,19 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.types import BaseModel +from typing import Any, Dict, Optional, TypedDict +from typing_extensions import NotRequired + + +class FunctionTypedDict(TypedDict): + name: str + parameters: Dict[str, Any] + description: NotRequired[str] + + +class Function(BaseModel): + name: str + parameters: Dict[str, Any] + description: Optional[str] = "" + diff --git a/src/mistralai/models/functioncall.py b/src/mistralai/models/functioncall.py new file mode 100644 index 00000000..c8a6591d --- /dev/null +++ b/src/mistralai/models/functioncall.py @@ -0,0 +1,22 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.types import BaseModel +from typing import Any, Dict, TypedDict, Union + + +class FunctionCallTypedDict(TypedDict): + name: str + arguments: ArgumentsTypedDict + + +class FunctionCall(BaseModel): + name: str + arguments: Arguments + + +ArgumentsTypedDict = Union[Dict[str, Any], str] + + +Arguments = Union[Dict[str, Any], str] + diff --git a/src/mistralai/models/githubrepositoryin.py b/src/mistralai/models/githubrepositoryin.py new file mode 100644 index 00000000..8c4cdd9a --- /dev/null +++ b/src/mistralai/models/githubrepositoryin.py @@ -0,0 +1,52 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +import pydantic +from pydantic import model_serializer +from typing import Final, Optional, TypedDict +from typing_extensions import Annotated, NotRequired + + +class GithubRepositoryInTypedDict(TypedDict): + name: str + owner: str + token: str + ref: NotRequired[Nullable[str]] + weight: NotRequired[float] + + +class GithubRepositoryIn(BaseModel): + name: str + owner: str + token: str + TYPE: Annotated[Final[Optional[str]], pydantic.Field(alias="type")] = "github" # type: ignore + ref: OptionalNullable[str] = UNSET + weight: Optional[float] = 1 + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["type", "ref", "weight"] + nullable_fields = ["ref"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in self.model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = (self.__pydantic_fields_set__.intersection({n}) or k in null_default_fields) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m + diff --git a/src/mistralai/models/githubrepositoryout.py b/src/mistralai/models/githubrepositoryout.py new file mode 100644 index 00000000..6bc539ef --- /dev/null +++ b/src/mistralai/models/githubrepositoryout.py @@ -0,0 +1,52 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +import pydantic +from pydantic import model_serializer +from typing import Final, Optional, TypedDict +from typing_extensions import Annotated, NotRequired + + +class GithubRepositoryOutTypedDict(TypedDict): + name: str + owner: str + commit_id: str + ref: NotRequired[Nullable[str]] + weight: NotRequired[float] + + +class GithubRepositoryOut(BaseModel): + name: str + owner: str + commit_id: str + TYPE: Annotated[Final[Optional[str]], pydantic.Field(alias="type")] = "github" # type: ignore + ref: OptionalNullable[str] = UNSET + weight: Optional[float] = 1 + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["type", "ref", "weight"] + nullable_fields = ["ref"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in self.model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = (self.__pydantic_fields_set__.intersection({n}) or k in null_default_fields) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m + diff --git a/src/mistralai/models/httpvalidationerror.py b/src/mistralai/models/httpvalidationerror.py new file mode 100644 index 00000000..4e4a2094 --- /dev/null +++ b/src/mistralai/models/httpvalidationerror.py @@ -0,0 +1,23 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .validationerror import ValidationError +from mistralai import utils +from mistralai.types import BaseModel +from typing import List, Optional + +class HTTPValidationErrorData(BaseModel): + detail: Optional[List[ValidationError]] = None + + + +class HTTPValidationError(Exception): + r"""Validation Error""" + data: HTTPValidationErrorData + + def __init__(self, data: HTTPValidationErrorData): + self.data = data + + def __str__(self) -> str: + return utils.marshal_json(self.data, HTTPValidationErrorData) + diff --git a/src/mistralai/models/jobin.py b/src/mistralai/models/jobin.py new file mode 100644 index 00000000..95cd8bf8 --- /dev/null +++ b/src/mistralai/models/jobin.py @@ -0,0 +1,73 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .finetuneablemodel import FineTuneableModel +from .githubrepositoryin import GithubRepositoryIn, GithubRepositoryInTypedDict +from .trainingfile import TrainingFile, TrainingFileTypedDict +from .trainingparametersin import TrainingParametersIn, TrainingParametersInTypedDict +from .wandbintegration import WandbIntegration, WandbIntegrationTypedDict +from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +from pydantic import model_serializer +from typing import List, Optional, TypedDict +from typing_extensions import NotRequired + + +class JobInTypedDict(TypedDict): + model: FineTuneableModel + r"""The name of the model to fine-tune.""" + hyperparameters: TrainingParametersInTypedDict + r"""The fine-tuning hyperparameter settings used in a fine-tune job.""" + training_files: NotRequired[List[TrainingFileTypedDict]] + validation_files: NotRequired[Nullable[List[str]]] + r"""A list containing the IDs of uploaded files that contain validation data. If you provide these files, the data is used to generate validation metrics periodically during fine-tuning. These metrics can be viewed in `checkpoints` when getting the status of a running fine-tuning job. The same data should not be present in both train and validation files.""" + suffix: NotRequired[Nullable[str]] + r"""A string that will be added to your fine-tuning model name. For example, a suffix of \"my-great-model\" would produce a model name like `ft:open-mistral-7b:my-great-model:xxx...`""" + integrations: NotRequired[Nullable[List[WandbIntegrationTypedDict]]] + r"""A list of integrations to enable for your fine-tuning job.""" + repositories: NotRequired[List[GithubRepositoryInTypedDict]] + auto_start: NotRequired[bool] + r"""This field will be required in a future release.""" + + +class JobIn(BaseModel): + model: FineTuneableModel + r"""The name of the model to fine-tune.""" + hyperparameters: TrainingParametersIn + r"""The fine-tuning hyperparameter settings used in a fine-tune job.""" + training_files: Optional[List[TrainingFile]] = None + validation_files: OptionalNullable[List[str]] = UNSET + r"""A list containing the IDs of uploaded files that contain validation data. If you provide these files, the data is used to generate validation metrics periodically during fine-tuning. These metrics can be viewed in `checkpoints` when getting the status of a running fine-tuning job. The same data should not be present in both train and validation files.""" + suffix: OptionalNullable[str] = UNSET + r"""A string that will be added to your fine-tuning model name. For example, a suffix of \"my-great-model\" would produce a model name like `ft:open-mistral-7b:my-great-model:xxx...`""" + integrations: OptionalNullable[List[WandbIntegration]] = UNSET + r"""A list of integrations to enable for your fine-tuning job.""" + repositories: Optional[List[GithubRepositoryIn]] = None + auto_start: Optional[bool] = None + r"""This field will be required in a future release.""" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["training_files", "validation_files", "suffix", "integrations", "repositories", "auto_start"] + nullable_fields = ["validation_files", "suffix", "integrations"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in self.model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = (self.__pydantic_fields_set__.intersection({n}) or k in null_default_fields) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m + diff --git a/src/mistralai/models/jobmetadataout.py b/src/mistralai/models/jobmetadataout.py new file mode 100644 index 00000000..9d3bfba2 --- /dev/null +++ b/src/mistralai/models/jobmetadataout.py @@ -0,0 +1,54 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +from pydantic import model_serializer +from typing import TypedDict +from typing_extensions import NotRequired + + +class JobMetadataOutTypedDict(TypedDict): + expected_duration_seconds: NotRequired[Nullable[int]] + cost: NotRequired[Nullable[float]] + cost_currency: NotRequired[Nullable[str]] + train_tokens_per_step: NotRequired[Nullable[int]] + train_tokens: NotRequired[Nullable[int]] + data_tokens: NotRequired[Nullable[int]] + estimated_start_time: NotRequired[Nullable[int]] + + +class JobMetadataOut(BaseModel): + expected_duration_seconds: OptionalNullable[int] = UNSET + cost: OptionalNullable[float] = UNSET + cost_currency: OptionalNullable[str] = UNSET + train_tokens_per_step: OptionalNullable[int] = UNSET + train_tokens: OptionalNullable[int] = UNSET + data_tokens: OptionalNullable[int] = UNSET + estimated_start_time: OptionalNullable[int] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["expected_duration_seconds", "cost", "cost_currency", "train_tokens_per_step", "train_tokens", "data_tokens", "estimated_start_time"] + nullable_fields = ["expected_duration_seconds", "cost", "cost_currency", "train_tokens_per_step", "train_tokens", "data_tokens", "estimated_start_time"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in self.model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = (self.__pydantic_fields_set__.intersection({n}) or k in null_default_fields) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m + diff --git a/src/mistralai/models/jobout.py b/src/mistralai/models/jobout.py new file mode 100644 index 00000000..353b5cf3 --- /dev/null +++ b/src/mistralai/models/jobout.py @@ -0,0 +1,107 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .finetuneablemodel import FineTuneableModel +from .githubrepositoryout import GithubRepositoryOut, GithubRepositoryOutTypedDict +from .jobmetadataout import JobMetadataOut, JobMetadataOutTypedDict +from .trainingparameters import TrainingParameters, TrainingParametersTypedDict +from .wandbintegrationout import WandbIntegrationOut, WandbIntegrationOutTypedDict +from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +import pydantic +from pydantic import model_serializer +from typing import Final, List, Literal, Optional, TypedDict +from typing_extensions import Annotated, NotRequired + + +Status = Literal["QUEUED", "STARTED", "VALIDATING", "VALIDATED", "RUNNING", "FAILED_VALIDATION", "FAILED", "SUCCESS", "CANCELLED", "CANCELLATION_REQUESTED"] +r"""The current status of the fine-tuning job.""" + +class JobOutTypedDict(TypedDict): + id: str + r"""The ID of the job.""" + auto_start: bool + hyperparameters: TrainingParametersTypedDict + model: FineTuneableModel + r"""The name of the model to fine-tune.""" + status: Status + r"""The current status of the fine-tuning job.""" + job_type: str + r"""The type of job (`FT` for fine-tuning).""" + created_at: int + r"""The UNIX timestamp (in seconds) for when the fine-tuning job was created.""" + modified_at: int + r"""The UNIX timestamp (in seconds) for when the fine-tuning job was last modified.""" + training_files: List[str] + r"""A list containing the IDs of uploaded files that contain training data.""" + validation_files: NotRequired[Nullable[List[str]]] + r"""A list containing the IDs of uploaded files that contain validation data.""" + fine_tuned_model: NotRequired[Nullable[str]] + r"""The name of the fine-tuned model that is being created. The value will be `null` if the fine-tuning job is still running.""" + suffix: NotRequired[Nullable[str]] + r"""Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`.""" + integrations: NotRequired[Nullable[List[WandbIntegrationOutTypedDict]]] + r"""A list of integrations enabled for your fine-tuning job.""" + trained_tokens: NotRequired[Nullable[int]] + r"""Total number of tokens trained.""" + repositories: NotRequired[List[GithubRepositoryOutTypedDict]] + metadata: NotRequired[Nullable[JobMetadataOutTypedDict]] + + +class JobOut(BaseModel): + id: str + r"""The ID of the job.""" + auto_start: bool + hyperparameters: TrainingParameters + model: FineTuneableModel + r"""The name of the model to fine-tune.""" + status: Status + r"""The current status of the fine-tuning job.""" + job_type: str + r"""The type of job (`FT` for fine-tuning).""" + created_at: int + r"""The UNIX timestamp (in seconds) for when the fine-tuning job was created.""" + modified_at: int + r"""The UNIX timestamp (in seconds) for when the fine-tuning job was last modified.""" + training_files: List[str] + r"""A list containing the IDs of uploaded files that contain training data.""" + validation_files: OptionalNullable[List[str]] = UNSET + r"""A list containing the IDs of uploaded files that contain validation data.""" + OBJECT: Annotated[Final[Optional[str]], pydantic.Field(alias="object")] = "job" # type: ignore + r"""The object type of the fine-tuning job.""" + fine_tuned_model: OptionalNullable[str] = UNSET + r"""The name of the fine-tuned model that is being created. The value will be `null` if the fine-tuning job is still running.""" + suffix: OptionalNullable[str] = UNSET + r"""Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`.""" + integrations: OptionalNullable[List[WandbIntegrationOut]] = UNSET + r"""A list of integrations enabled for your fine-tuning job.""" + trained_tokens: OptionalNullable[int] = UNSET + r"""Total number of tokens trained.""" + repositories: Optional[List[GithubRepositoryOut]] = None + metadata: OptionalNullable[JobMetadataOut] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["validation_files", "object", "fine_tuned_model", "suffix", "integrations", "trained_tokens", "repositories", "metadata"] + nullable_fields = ["validation_files", "fine_tuned_model", "suffix", "integrations", "trained_tokens", "metadata"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in self.model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = (self.__pydantic_fields_set__.intersection({n}) or k in null_default_fields) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m + diff --git a/src/mistralai/models/jobs.py b/src/mistralai/models/jobs.py deleted file mode 100644 index 64d3351d..00000000 --- a/src/mistralai/models/jobs.py +++ /dev/null @@ -1,100 +0,0 @@ -from datetime import datetime -from typing import Annotated, List, Literal, Optional, Union - -from pydantic import BaseModel, Field - - -class TrainingParameters(BaseModel): - training_steps: int = Field(1800, le=10000, ge=1) - learning_rate: float = Field(1.0e-4, le=1, ge=1.0e-8) - - -class WandbIntegration(BaseModel): - type: Literal["wandb"] = "wandb" - project: str - name: Union[str, None] = None - run_name: Union[str, None] = None - - -class WandbIntegrationIn(WandbIntegration): - api_key: str - - -Integration = Annotated[Union[WandbIntegration], Field(discriminator="type")] -IntegrationIn = Annotated[Union[WandbIntegrationIn], Field(discriminator="type")] - - -class JobMetadata(BaseModel): - object: Literal["job.metadata"] = "job.metadata" - training_steps: int - train_tokens_per_step: int - data_tokens: int - train_tokens: int - epochs: float - expected_duration_seconds: Optional[int] - cost: Optional[float] = None - cost_currency: Optional[str] = None - - -class Job(BaseModel): - id: str - hyperparameters: TrainingParameters - fine_tuned_model: Union[str, None] - model: str - status: Literal[ - "QUEUED", - "STARTED", - "RUNNING", - "FAILED", - "SUCCESS", - "CANCELLED", - "CANCELLATION_REQUESTED", - ] - job_type: str - created_at: int - modified_at: int - training_files: list[str] - validation_files: Union[list[str], None] = [] - object: Literal["job"] - integrations: List[Integration] = [] - - -class Event(BaseModel): - name: str - data: Union[dict, None] = None - created_at: int - - -class Metric(BaseModel): - train_loss: Union[float, None] = None - valid_loss: Union[float, None] = None - valid_mean_token_accuracy: Union[float, None] = None - - -class Checkpoint(BaseModel): - metrics: Metric - step_number: int - created_at: int - - -class JobQueryFilter(BaseModel): - page: int = 0 - page_size: int = 100 - model: Optional[str] = None - created_after: Optional[datetime] = None - created_by_me: Optional[bool] = None - status: Optional[str] = None - wandb_project: Optional[str] = None - wandb_name: Optional[str] = None - suffix: Optional[str] = None - - -class DetailedJob(Job): - events: list[Event] = [] - checkpoints: list[Checkpoint] = [] - estimated_start_time: Optional[int] = None - - -class Jobs(BaseModel): - data: list[Job] = [] - object: Literal["list"] diff --git a/src/mistralai/models/jobs_api_routes_fine_tuning_archive_fine_tuned_modelop.py b/src/mistralai/models/jobs_api_routes_fine_tuning_archive_fine_tuned_modelop.py new file mode 100644 index 00000000..e32d52b1 --- /dev/null +++ b/src/mistralai/models/jobs_api_routes_fine_tuning_archive_fine_tuned_modelop.py @@ -0,0 +1,18 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.types import BaseModel +from mistralai.utils import FieldMetadata, PathParamMetadata +from typing import TypedDict +from typing_extensions import Annotated + + +class JobsAPIRoutesFineTuningArchiveFineTunedModelRequestTypedDict(TypedDict): + model_id: str + r"""The ID of the model to archive.""" + + +class JobsAPIRoutesFineTuningArchiveFineTunedModelRequest(BaseModel): + model_id: Annotated[str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False))] + r"""The ID of the model to archive.""" + diff --git a/src/mistralai/models/jobs_api_routes_fine_tuning_cancel_fine_tuning_jobop.py b/src/mistralai/models/jobs_api_routes_fine_tuning_cancel_fine_tuning_jobop.py new file mode 100644 index 00000000..0ba05ee5 --- /dev/null +++ b/src/mistralai/models/jobs_api_routes_fine_tuning_cancel_fine_tuning_jobop.py @@ -0,0 +1,18 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.types import BaseModel +from mistralai.utils import FieldMetadata, PathParamMetadata +from typing import TypedDict +from typing_extensions import Annotated + + +class JobsAPIRoutesFineTuningCancelFineTuningJobRequestTypedDict(TypedDict): + job_id: str + r"""The ID of the job to cancel.""" + + +class JobsAPIRoutesFineTuningCancelFineTuningJobRequest(BaseModel): + job_id: Annotated[str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False))] + r"""The ID of the job to cancel.""" + diff --git a/src/mistralai/models/jobs_api_routes_fine_tuning_create_fine_tuning_jobop.py b/src/mistralai/models/jobs_api_routes_fine_tuning_create_fine_tuning_jobop.py new file mode 100644 index 00000000..c4ba3c19 --- /dev/null +++ b/src/mistralai/models/jobs_api_routes_fine_tuning_create_fine_tuning_jobop.py @@ -0,0 +1,15 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .jobout import JobOut, JobOutTypedDict +from .legacyjobmetadataout import LegacyJobMetadataOut, LegacyJobMetadataOutTypedDict +from typing import Union + + +JobsAPIRoutesFineTuningCreateFineTuningJobResponseTypedDict = Union[LegacyJobMetadataOutTypedDict, JobOutTypedDict] +r"""OK""" + + +JobsAPIRoutesFineTuningCreateFineTuningJobResponse = Union[LegacyJobMetadataOut, JobOut] +r"""OK""" + diff --git a/src/mistralai/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobop.py b/src/mistralai/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobop.py new file mode 100644 index 00000000..f8924c89 --- /dev/null +++ b/src/mistralai/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobop.py @@ -0,0 +1,18 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.types import BaseModel +from mistralai.utils import FieldMetadata, PathParamMetadata +from typing import TypedDict +from typing_extensions import Annotated + + +class JobsAPIRoutesFineTuningGetFineTuningJobRequestTypedDict(TypedDict): + job_id: str + r"""The ID of the job to analyse.""" + + +class JobsAPIRoutesFineTuningGetFineTuningJobRequest(BaseModel): + job_id: Annotated[str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False))] + r"""The ID of the job to analyse.""" + diff --git a/src/mistralai/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobsop.py b/src/mistralai/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobsop.py new file mode 100644 index 00000000..bb5bf3b5 --- /dev/null +++ b/src/mistralai/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobsop.py @@ -0,0 +1,81 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from datetime import datetime +from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +from mistralai.utils import FieldMetadata, QueryParamMetadata +from pydantic import model_serializer +from typing import Literal, Optional, TypedDict +from typing_extensions import Annotated, NotRequired + + +QueryParamStatus = Literal["QUEUED", "STARTED", "VALIDATING", "VALIDATED", "RUNNING", "FAILED_VALIDATION", "FAILED", "SUCCESS", "CANCELLED", "CANCELLATION_REQUESTED"] +r"""The current job state to filter on. When set, the other results are not displayed.""" + +class JobsAPIRoutesFineTuningGetFineTuningJobsRequestTypedDict(TypedDict): + page: NotRequired[int] + r"""The page number of the results to be returned.""" + page_size: NotRequired[int] + r"""The number of items to return per page.""" + model: NotRequired[Nullable[str]] + r"""The model name used for fine-tuning to filter on. When set, the other results are not displayed.""" + created_after: NotRequired[Nullable[datetime]] + r"""The date/time to filter on. When set, the results for previous creation times are not displayed.""" + created_by_me: NotRequired[bool] + r"""When set, only return results for jobs created by the API caller. Other results are not displayed.""" + status: NotRequired[Nullable[QueryParamStatus]] + r"""The current job state to filter on. When set, the other results are not displayed.""" + wandb_project: NotRequired[Nullable[str]] + r"""The Weights and Biases project to filter on. When set, the other results are not displayed.""" + wandb_name: NotRequired[Nullable[str]] + r"""The Weight and Biases run name to filter on. When set, the other results are not displayed.""" + suffix: NotRequired[Nullable[str]] + r"""The model suffix to filter on. When set, the other results are not displayed.""" + + +class JobsAPIRoutesFineTuningGetFineTuningJobsRequest(BaseModel): + page: Annotated[Optional[int], FieldMetadata(query=QueryParamMetadata(style="form", explode=True))] = 0 + r"""The page number of the results to be returned.""" + page_size: Annotated[Optional[int], FieldMetadata(query=QueryParamMetadata(style="form", explode=True))] = 100 + r"""The number of items to return per page.""" + model: Annotated[OptionalNullable[str], FieldMetadata(query=QueryParamMetadata(style="form", explode=True))] = UNSET + r"""The model name used for fine-tuning to filter on. When set, the other results are not displayed.""" + created_after: Annotated[OptionalNullable[datetime], FieldMetadata(query=QueryParamMetadata(style="form", explode=True))] = UNSET + r"""The date/time to filter on. When set, the results for previous creation times are not displayed.""" + created_by_me: Annotated[Optional[bool], FieldMetadata(query=QueryParamMetadata(style="form", explode=True))] = False + r"""When set, only return results for jobs created by the API caller. Other results are not displayed.""" + status: Annotated[OptionalNullable[QueryParamStatus], FieldMetadata(query=QueryParamMetadata(style="form", explode=True))] = UNSET + r"""The current job state to filter on. When set, the other results are not displayed.""" + wandb_project: Annotated[OptionalNullable[str], FieldMetadata(query=QueryParamMetadata(style="form", explode=True))] = UNSET + r"""The Weights and Biases project to filter on. When set, the other results are not displayed.""" + wandb_name: Annotated[OptionalNullable[str], FieldMetadata(query=QueryParamMetadata(style="form", explode=True))] = UNSET + r"""The Weight and Biases run name to filter on. When set, the other results are not displayed.""" + suffix: Annotated[OptionalNullable[str], FieldMetadata(query=QueryParamMetadata(style="form", explode=True))] = UNSET + r"""The model suffix to filter on. When set, the other results are not displayed.""" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["page", "page_size", "model", "created_after", "created_by_me", "status", "wandb_project", "wandb_name", "suffix"] + nullable_fields = ["model", "created_after", "status", "wandb_project", "wandb_name", "suffix"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in self.model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = (self.__pydantic_fields_set__.intersection({n}) or k in null_default_fields) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m + diff --git a/src/mistralai/models/jobs_api_routes_fine_tuning_start_fine_tuning_jobop.py b/src/mistralai/models/jobs_api_routes_fine_tuning_start_fine_tuning_jobop.py new file mode 100644 index 00000000..312063fd --- /dev/null +++ b/src/mistralai/models/jobs_api_routes_fine_tuning_start_fine_tuning_jobop.py @@ -0,0 +1,16 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.types import BaseModel +from mistralai.utils import FieldMetadata, PathParamMetadata +from typing import TypedDict +from typing_extensions import Annotated + + +class JobsAPIRoutesFineTuningStartFineTuningJobRequestTypedDict(TypedDict): + job_id: str + + +class JobsAPIRoutesFineTuningStartFineTuningJobRequest(BaseModel): + job_id: Annotated[str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False))] + diff --git a/src/mistralai/models/jobs_api_routes_fine_tuning_unarchive_fine_tuned_modelop.py b/src/mistralai/models/jobs_api_routes_fine_tuning_unarchive_fine_tuned_modelop.py new file mode 100644 index 00000000..ef44fedd --- /dev/null +++ b/src/mistralai/models/jobs_api_routes_fine_tuning_unarchive_fine_tuned_modelop.py @@ -0,0 +1,18 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.types import BaseModel +from mistralai.utils import FieldMetadata, PathParamMetadata +from typing import TypedDict +from typing_extensions import Annotated + + +class JobsAPIRoutesFineTuningUnarchiveFineTunedModelRequestTypedDict(TypedDict): + model_id: str + r"""The ID of the model to unarchive.""" + + +class JobsAPIRoutesFineTuningUnarchiveFineTunedModelRequest(BaseModel): + model_id: Annotated[str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False))] + r"""The ID of the model to unarchive.""" + diff --git a/src/mistralai/models/jobs_api_routes_fine_tuning_update_fine_tuned_modelop.py b/src/mistralai/models/jobs_api_routes_fine_tuning_update_fine_tuned_modelop.py new file mode 100644 index 00000000..8a229f0e --- /dev/null +++ b/src/mistralai/models/jobs_api_routes_fine_tuning_update_fine_tuned_modelop.py @@ -0,0 +1,21 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .updateftmodelin import UpdateFTModelIn, UpdateFTModelInTypedDict +from mistralai.types import BaseModel +from mistralai.utils import FieldMetadata, PathParamMetadata, RequestMetadata +from typing import TypedDict +from typing_extensions import Annotated + + +class JobsAPIRoutesFineTuningUpdateFineTunedModelRequestTypedDict(TypedDict): + model_id: str + r"""The ID of the model to update.""" + update_ft_model_in: UpdateFTModelInTypedDict + + +class JobsAPIRoutesFineTuningUpdateFineTunedModelRequest(BaseModel): + model_id: Annotated[str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False))] + r"""The ID of the model to update.""" + update_ft_model_in: Annotated[UpdateFTModelIn, FieldMetadata(request=RequestMetadata(media_type="application/json"))] + diff --git a/src/mistralai/models/jobsout.py b/src/mistralai/models/jobsout.py new file mode 100644 index 00000000..15776ad8 --- /dev/null +++ b/src/mistralai/models/jobsout.py @@ -0,0 +1,20 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .jobout import JobOut, JobOutTypedDict +from mistralai.types import BaseModel +import pydantic +from typing import Final, List, Optional, TypedDict +from typing_extensions import Annotated, NotRequired + + +class JobsOutTypedDict(TypedDict): + total: int + data: NotRequired[List[JobOutTypedDict]] + + +class JobsOut(BaseModel): + total: int + data: Optional[List[JobOut]] = None + OBJECT: Annotated[Final[Optional[str]], pydantic.Field(alias="object")] = "list" # type: ignore + diff --git a/src/mistralai/models/legacyjobmetadataout.py b/src/mistralai/models/legacyjobmetadataout.py new file mode 100644 index 00000000..f4c2d7a0 --- /dev/null +++ b/src/mistralai/models/legacyjobmetadataout.py @@ -0,0 +1,80 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +import pydantic +from pydantic import model_serializer +from typing import Final, Optional, TypedDict +from typing_extensions import Annotated, NotRequired + + +class LegacyJobMetadataOutTypedDict(TypedDict): + details: str + expected_duration_seconds: NotRequired[Nullable[int]] + r"""The approximated time (in seconds) for the fine-tuning process to complete.""" + cost: NotRequired[Nullable[float]] + r"""The cost of the fine-tuning job.""" + cost_currency: NotRequired[Nullable[str]] + r"""The currency used for the fine-tuning job cost.""" + train_tokens_per_step: NotRequired[Nullable[int]] + r"""The number of tokens consumed by one training step.""" + train_tokens: NotRequired[Nullable[int]] + r"""The total number of tokens used during the fine-tuning process.""" + data_tokens: NotRequired[Nullable[int]] + r"""The total number of tokens in the training dataset.""" + estimated_start_time: NotRequired[Nullable[int]] + deprecated: NotRequired[bool] + epochs: NotRequired[Nullable[float]] + r"""The number of complete passes through the entire training dataset.""" + training_steps: NotRequired[Nullable[int]] + r"""The number of training steps to perform. A training step refers to a single update of the model weights during the fine-tuning process. This update is typically calculated using a batch of samples from the training dataset.""" + + +class LegacyJobMetadataOut(BaseModel): + details: str + expected_duration_seconds: OptionalNullable[int] = UNSET + r"""The approximated time (in seconds) for the fine-tuning process to complete.""" + cost: OptionalNullable[float] = UNSET + r"""The cost of the fine-tuning job.""" + cost_currency: OptionalNullable[str] = UNSET + r"""The currency used for the fine-tuning job cost.""" + train_tokens_per_step: OptionalNullable[int] = UNSET + r"""The number of tokens consumed by one training step.""" + train_tokens: OptionalNullable[int] = UNSET + r"""The total number of tokens used during the fine-tuning process.""" + data_tokens: OptionalNullable[int] = UNSET + r"""The total number of tokens in the training dataset.""" + estimated_start_time: OptionalNullable[int] = UNSET + deprecated: Optional[bool] = True + epochs: OptionalNullable[float] = UNSET + r"""The number of complete passes through the entire training dataset.""" + training_steps: OptionalNullable[int] = UNSET + r"""The number of training steps to perform. A training step refers to a single update of the model weights during the fine-tuning process. This update is typically calculated using a batch of samples from the training dataset.""" + OBJECT: Annotated[Final[Optional[str]], pydantic.Field(alias="object")] = "job.metadata" # type: ignore + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["expected_duration_seconds", "cost", "cost_currency", "train_tokens_per_step", "train_tokens", "data_tokens", "estimated_start_time", "deprecated", "epochs", "training_steps", "object"] + nullable_fields = ["expected_duration_seconds", "cost", "cost_currency", "train_tokens_per_step", "train_tokens", "data_tokens", "estimated_start_time", "epochs", "training_steps"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in self.model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = (self.__pydantic_fields_set__.intersection({n}) or k in null_default_fields) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m + diff --git a/src/mistralai/models/listfilesout.py b/src/mistralai/models/listfilesout.py new file mode 100644 index 00000000..b6f4dd10 --- /dev/null +++ b/src/mistralai/models/listfilesout.py @@ -0,0 +1,17 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .fileschema import FileSchema, FileSchemaTypedDict +from mistralai.types import BaseModel +from typing import List, TypedDict + + +class ListFilesOutTypedDict(TypedDict): + data: List[FileSchemaTypedDict] + object: str + + +class ListFilesOut(BaseModel): + data: List[FileSchema] + object: str + diff --git a/src/mistralai/models/metricout.py b/src/mistralai/models/metricout.py new file mode 100644 index 00000000..b85cd7d0 --- /dev/null +++ b/src/mistralai/models/metricout.py @@ -0,0 +1,50 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +from pydantic import model_serializer +from typing import TypedDict +from typing_extensions import NotRequired + + +class MetricOutTypedDict(TypedDict): + r"""Metrics at the step number during the fine-tuning job. Use these metrics to assess if the training is going smoothly (loss should decrease, token accuracy should increase).""" + + train_loss: NotRequired[Nullable[float]] + valid_loss: NotRequired[Nullable[float]] + valid_mean_token_accuracy: NotRequired[Nullable[float]] + + +class MetricOut(BaseModel): + r"""Metrics at the step number during the fine-tuning job. Use these metrics to assess if the training is going smoothly (loss should decrease, token accuracy should increase).""" + + train_loss: OptionalNullable[float] = UNSET + valid_loss: OptionalNullable[float] = UNSET + valid_mean_token_accuracy: OptionalNullable[float] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["train_loss", "valid_loss", "valid_mean_token_accuracy"] + nullable_fields = ["train_loss", "valid_loss", "valid_mean_token_accuracy"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in self.model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = (self.__pydantic_fields_set__.intersection({n}) or k in null_default_fields) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m + diff --git a/src/mistralai/models/modelcapabilities.py b/src/mistralai/models/modelcapabilities.py new file mode 100644 index 00000000..c22ce59d --- /dev/null +++ b/src/mistralai/models/modelcapabilities.py @@ -0,0 +1,21 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.types import BaseModel +from typing import Optional, TypedDict +from typing_extensions import NotRequired + + +class ModelCapabilitiesTypedDict(TypedDict): + completion_chat: NotRequired[bool] + completion_fim: NotRequired[bool] + function_calling: NotRequired[bool] + fine_tuning: NotRequired[bool] + + +class ModelCapabilities(BaseModel): + completion_chat: Optional[bool] = True + completion_fim: Optional[bool] = False + function_calling: Optional[bool] = True + fine_tuning: Optional[bool] = False + diff --git a/src/mistralai/models/modelcard.py b/src/mistralai/models/modelcard.py new file mode 100644 index 00000000..80e082e4 --- /dev/null +++ b/src/mistralai/models/modelcard.py @@ -0,0 +1,66 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .modelcapabilities import ModelCapabilities, ModelCapabilitiesTypedDict +from datetime import datetime +from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +from pydantic import model_serializer +from typing import List, Optional, TypedDict +from typing_extensions import NotRequired + + +class ModelCardTypedDict(TypedDict): + id: str + capabilities: ModelCapabilitiesTypedDict + object: NotRequired[str] + created: NotRequired[int] + owned_by: NotRequired[str] + root: NotRequired[Nullable[str]] + archived: NotRequired[bool] + name: NotRequired[Nullable[str]] + description: NotRequired[Nullable[str]] + max_context_length: NotRequired[int] + aliases: NotRequired[List[str]] + deprecation: NotRequired[Nullable[datetime]] + + +class ModelCard(BaseModel): + id: str + capabilities: ModelCapabilities + object: Optional[str] = "model" + created: Optional[int] = None + owned_by: Optional[str] = "mistralai" + root: OptionalNullable[str] = UNSET + archived: Optional[bool] = False + name: OptionalNullable[str] = UNSET + description: OptionalNullable[str] = UNSET + max_context_length: Optional[int] = 32768 + aliases: Optional[List[str]] = None + deprecation: OptionalNullable[datetime] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["object", "created", "owned_by", "root", "archived", "name", "description", "max_context_length", "aliases", "deprecation"] + nullable_fields = ["root", "name", "description", "deprecation"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in self.model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = (self.__pydantic_fields_set__.intersection({n}) or k in null_default_fields) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m + diff --git a/src/mistralai/models/modellist.py b/src/mistralai/models/modellist.py new file mode 100644 index 00000000..0c76b322 --- /dev/null +++ b/src/mistralai/models/modellist.py @@ -0,0 +1,18 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .modelcard import ModelCard, ModelCardTypedDict +from mistralai.types import BaseModel +from typing import List, Optional, TypedDict +from typing_extensions import NotRequired + + +class ModelListTypedDict(TypedDict): + object: NotRequired[str] + data: NotRequired[List[ModelCardTypedDict]] + + +class ModelList(BaseModel): + object: Optional[str] = "list" + data: Optional[List[ModelCard]] = None + diff --git a/src/mistralai/models/models.py b/src/mistralai/models/models.py deleted file mode 100644 index f88033d4..00000000 --- a/src/mistralai/models/models.py +++ /dev/null @@ -1,39 +0,0 @@ -from typing import List, Optional - -from pydantic import BaseModel - - -class ModelPermission(BaseModel): - id: str - object: str - created: int - allow_create_engine: Optional[bool] = False - allow_sampling: bool = True - allow_logprobs: bool = True - allow_search_indices: Optional[bool] = False - allow_view: bool = True - allow_fine_tuning: bool = False - organization: str = "*" - group: Optional[str] = None - is_blocking: Optional[bool] = False - - -class ModelCard(BaseModel): - id: str - object: str - created: int - owned_by: str - root: Optional[str] = None - parent: Optional[str] = None - permission: List[ModelPermission] = [] - - -class ModelList(BaseModel): - object: str - data: List[ModelCard] - - -class ModelDeleted(BaseModel): - id: str - object: str - deleted: bool diff --git a/src/mistralai/models/responseformat.py b/src/mistralai/models/responseformat.py new file mode 100644 index 00000000..0ead91a4 --- /dev/null +++ b/src/mistralai/models/responseformat.py @@ -0,0 +1,18 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.types import BaseModel +from typing import Literal, Optional, TypedDict +from typing_extensions import NotRequired + + +ResponseFormats = Literal["text", "json_object"] +r"""An object specifying the format that the model must output. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message.""" + +class ResponseFormatTypedDict(TypedDict): + type: NotRequired[ResponseFormats] + + +class ResponseFormat(BaseModel): + type: Optional[ResponseFormats] = "text" + diff --git a/src/mistralai/models/retrieve_model_v1_models_model_id_getop.py b/src/mistralai/models/retrieve_model_v1_models_model_id_getop.py new file mode 100644 index 00000000..8a37b1ca --- /dev/null +++ b/src/mistralai/models/retrieve_model_v1_models_model_id_getop.py @@ -0,0 +1,18 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.types import BaseModel +from mistralai.utils import FieldMetadata, PathParamMetadata +from typing import TypedDict +from typing_extensions import Annotated + + +class RetrieveModelV1ModelsModelIDGetRequestTypedDict(TypedDict): + model_id: str + r"""The ID of the model to retrieve.""" + + +class RetrieveModelV1ModelsModelIDGetRequest(BaseModel): + model_id: Annotated[str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False))] + r"""The ID of the model to retrieve.""" + diff --git a/src/mistralai/models/retrievefileout.py b/src/mistralai/models/retrievefileout.py new file mode 100644 index 00000000..98af323e --- /dev/null +++ b/src/mistralai/models/retrievefileout.py @@ -0,0 +1,71 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .sampletype import SampleType +from .source import Source +from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +import pydantic +from pydantic import model_serializer +from typing import Final, TypedDict +from typing_extensions import Annotated, NotRequired + + +class RetrieveFileOutTypedDict(TypedDict): + id: str + r"""The unique identifier of the file.""" + object: str + r"""The object type, which is always \"file\".""" + bytes: int + r"""The size of the file, in bytes.""" + created_at: int + r"""The UNIX timestamp (in seconds) of the event.""" + filename: str + r"""The name of the uploaded file.""" + sample_type: SampleType + source: Source + num_lines: NotRequired[Nullable[int]] + + +class RetrieveFileOut(BaseModel): + id: str + r"""The unique identifier of the file.""" + object: str + r"""The object type, which is always \"file\".""" + bytes: int + r"""The size of the file, in bytes.""" + created_at: int + r"""The UNIX timestamp (in seconds) of the event.""" + filename: str + r"""The name of the uploaded file.""" + sample_type: SampleType + source: Source + PURPOSE: Annotated[Final[str], pydantic.Field(alias="purpose")] = "fine-tune" # type: ignore + r"""The intended purpose of the uploaded file. Only accepts fine-tuning (`fine-tune`) for now.""" + num_lines: OptionalNullable[int] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["num_lines"] + nullable_fields = ["num_lines"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in self.model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = (self.__pydantic_fields_set__.intersection({n}) or k in null_default_fields) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m + diff --git a/src/mistralai/models/sampletype.py b/src/mistralai/models/sampletype.py new file mode 100644 index 00000000..83424f3a --- /dev/null +++ b/src/mistralai/models/sampletype.py @@ -0,0 +1,7 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from typing import Literal + + +SampleType = Literal["pretrain", "instruct"] diff --git a/src/mistralai/models/sdkerror.py b/src/mistralai/models/sdkerror.py new file mode 100644 index 00000000..03216cbf --- /dev/null +++ b/src/mistralai/models/sdkerror.py @@ -0,0 +1,22 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from dataclasses import dataclass +from typing import Optional +import httpx + + +@dataclass +class SDKError(Exception): + """Represents an error returned by the API.""" + + message: str + status_code: int = -1 + body: str = "" + raw_response: Optional[httpx.Response] = None + + def __str__(self): + body = "" + if len(self.body) > 0: + body = f"\n{self.body}" + + return f"{self.message}: Status {self.status_code}{body}" diff --git a/src/mistralai/models/security.py b/src/mistralai/models/security.py new file mode 100644 index 00000000..3d69602f --- /dev/null +++ b/src/mistralai/models/security.py @@ -0,0 +1,16 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.types import BaseModel +from mistralai.utils import FieldMetadata, SecurityMetadata +from typing import Optional, TypedDict +from typing_extensions import Annotated, NotRequired + + +class SecurityTypedDict(TypedDict): + api_key: NotRequired[str] + + +class Security(BaseModel): + api_key: Annotated[Optional[str], FieldMetadata(security=SecurityMetadata(scheme=True, scheme_type="http", sub_type="bearer", field_name="Authorization"))] = None + diff --git a/src/mistralai/models/source.py b/src/mistralai/models/source.py new file mode 100644 index 00000000..66d09aeb --- /dev/null +++ b/src/mistralai/models/source.py @@ -0,0 +1,7 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from typing import Literal + + +Source = Literal["upload", "repository"] diff --git a/src/mistralai/models/systemmessage.py b/src/mistralai/models/systemmessage.py new file mode 100644 index 00000000..171acf50 --- /dev/null +++ b/src/mistralai/models/systemmessage.py @@ -0,0 +1,26 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .contentchunk import ContentChunk, ContentChunkTypedDict +from mistralai.types import BaseModel +from typing import List, Literal, Optional, TypedDict, Union +from typing_extensions import NotRequired + + +Role = Literal["system"] + +class SystemMessageTypedDict(TypedDict): + content: ContentTypedDict + role: NotRequired[Role] + + +class SystemMessage(BaseModel): + content: Content + role: Optional[Role] = "system" + + +ContentTypedDict = Union[str, List[ContentChunkTypedDict]] + + +Content = Union[str, List[ContentChunk]] + diff --git a/src/mistralai/models/textchunk.py b/src/mistralai/models/textchunk.py new file mode 100644 index 00000000..fd95ab82 --- /dev/null +++ b/src/mistralai/models/textchunk.py @@ -0,0 +1,17 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.types import BaseModel +import pydantic +from typing import Final, Optional, TypedDict +from typing_extensions import Annotated + + +class TextChunkTypedDict(TypedDict): + text: str + + +class TextChunk(BaseModel): + text: str + TYPE: Annotated[Final[Optional[str]], pydantic.Field(alias="type")] = "text" # type: ignore + diff --git a/src/mistralai/models/tool.py b/src/mistralai/models/tool.py new file mode 100644 index 00000000..c790e637 --- /dev/null +++ b/src/mistralai/models/tool.py @@ -0,0 +1,18 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .function import Function, FunctionTypedDict +from mistralai.types import BaseModel +import pydantic +from typing import Final, Optional, TypedDict +from typing_extensions import Annotated + + +class ToolTypedDict(TypedDict): + function: FunctionTypedDict + + +class Tool(BaseModel): + function: Function + TYPE: Annotated[Final[Optional[str]], pydantic.Field(alias="type")] = "function" # type: ignore + diff --git a/src/mistralai/models/toolcall.py b/src/mistralai/models/toolcall.py new file mode 100644 index 00000000..2afd453d --- /dev/null +++ b/src/mistralai/models/toolcall.py @@ -0,0 +1,20 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .functioncall import FunctionCall, FunctionCallTypedDict +from mistralai.types import BaseModel +import pydantic +from typing import Final, Optional, TypedDict +from typing_extensions import Annotated, NotRequired + + +class ToolCallTypedDict(TypedDict): + function: FunctionCallTypedDict + id: NotRequired[str] + + +class ToolCall(BaseModel): + function: FunctionCall + id: Optional[str] = "null" + TYPE: Annotated[Final[Optional[str]], pydantic.Field(alias="type")] = "function" # type: ignore + diff --git a/src/mistralai/models/toolmessage.py b/src/mistralai/models/toolmessage.py new file mode 100644 index 00000000..8445861a --- /dev/null +++ b/src/mistralai/models/toolmessage.py @@ -0,0 +1,50 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +from pydantic import model_serializer +from typing import Literal, Optional, TypedDict +from typing_extensions import NotRequired + + +ToolMessageRole = Literal["tool"] + +class ToolMessageTypedDict(TypedDict): + content: str + tool_call_id: NotRequired[Nullable[str]] + name: NotRequired[Nullable[str]] + role: NotRequired[ToolMessageRole] + + +class ToolMessage(BaseModel): + content: str + tool_call_id: OptionalNullable[str] = UNSET + name: OptionalNullable[str] = UNSET + role: Optional[ToolMessageRole] = "tool" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["tool_call_id", "name", "role"] + nullable_fields = ["tool_call_id", "name"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in self.model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = (self.__pydantic_fields_set__.intersection({n}) or k in null_default_fields) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m + diff --git a/src/mistralai/models/trainingfile.py b/src/mistralai/models/trainingfile.py new file mode 100644 index 00000000..097ea174 --- /dev/null +++ b/src/mistralai/models/trainingfile.py @@ -0,0 +1,17 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.types import BaseModel +from typing import Optional, TypedDict +from typing_extensions import NotRequired + + +class TrainingFileTypedDict(TypedDict): + file_id: str + weight: NotRequired[float] + + +class TrainingFile(BaseModel): + file_id: str + weight: Optional[float] = 1 + diff --git a/src/mistralai/models/trainingparameters.py b/src/mistralai/models/trainingparameters.py new file mode 100644 index 00000000..2110b55f --- /dev/null +++ b/src/mistralai/models/trainingparameters.py @@ -0,0 +1,48 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +from pydantic import model_serializer +from typing import Optional, TypedDict +from typing_extensions import NotRequired + + +class TrainingParametersTypedDict(TypedDict): + training_steps: NotRequired[Nullable[int]] + learning_rate: NotRequired[float] + epochs: NotRequired[Nullable[float]] + fim_ratio: NotRequired[Nullable[float]] + + +class TrainingParameters(BaseModel): + training_steps: OptionalNullable[int] = UNSET + learning_rate: Optional[float] = 0.0001 + epochs: OptionalNullable[float] = UNSET + fim_ratio: OptionalNullable[float] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["training_steps", "learning_rate", "epochs", "fim_ratio"] + nullable_fields = ["training_steps", "epochs", "fim_ratio"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in self.model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = (self.__pydantic_fields_set__.intersection({n}) or k in null_default_fields) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m + diff --git a/src/mistralai/models/trainingparametersin.py b/src/mistralai/models/trainingparametersin.py new file mode 100644 index 00000000..60d71b27 --- /dev/null +++ b/src/mistralai/models/trainingparametersin.py @@ -0,0 +1,56 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +from pydantic import model_serializer +from typing import Optional, TypedDict +from typing_extensions import NotRequired + + +class TrainingParametersInTypedDict(TypedDict): + r"""The fine-tuning hyperparameter settings used in a fine-tune job.""" + + training_steps: NotRequired[Nullable[int]] + r"""The number of training steps to perform. A training step refers to a single update of the model weights during the fine-tuning process. This update is typically calculated using a batch of samples from the training dataset.""" + learning_rate: NotRequired[float] + r"""A parameter describing how much to adjust the pre-trained model's weights in response to the estimated error each time the weights are updated during the fine-tuning process.""" + epochs: NotRequired[Nullable[float]] + fim_ratio: NotRequired[Nullable[float]] + + +class TrainingParametersIn(BaseModel): + r"""The fine-tuning hyperparameter settings used in a fine-tune job.""" + + training_steps: OptionalNullable[int] = UNSET + r"""The number of training steps to perform. A training step refers to a single update of the model weights during the fine-tuning process. This update is typically calculated using a batch of samples from the training dataset.""" + learning_rate: Optional[float] = 0.0001 + r"""A parameter describing how much to adjust the pre-trained model's weights in response to the estimated error each time the weights are updated during the fine-tuning process.""" + epochs: OptionalNullable[float] = UNSET + fim_ratio: OptionalNullable[float] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["training_steps", "learning_rate", "epochs", "fim_ratio"] + nullable_fields = ["training_steps", "epochs", "fim_ratio"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in self.model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = (self.__pydantic_fields_set__.intersection({n}) or k in null_default_fields) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m + diff --git a/src/mistralai/models/unarchiveftmodelout.py b/src/mistralai/models/unarchiveftmodelout.py new file mode 100644 index 00000000..07334f57 --- /dev/null +++ b/src/mistralai/models/unarchiveftmodelout.py @@ -0,0 +1,19 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.types import BaseModel +import pydantic +from typing import Final, Optional, TypedDict +from typing_extensions import Annotated, NotRequired + + +class UnarchiveFTModelOutTypedDict(TypedDict): + id: str + archived: NotRequired[bool] + + +class UnarchiveFTModelOut(BaseModel): + id: str + OBJECT: Annotated[Final[Optional[str]], pydantic.Field(alias="object")] = "model" # type: ignore + archived: Optional[bool] = False + diff --git a/src/mistralai/models/updateftmodelin.py b/src/mistralai/models/updateftmodelin.py new file mode 100644 index 00000000..8c3d8475 --- /dev/null +++ b/src/mistralai/models/updateftmodelin.py @@ -0,0 +1,44 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +from pydantic import model_serializer +from typing import TypedDict +from typing_extensions import NotRequired + + +class UpdateFTModelInTypedDict(TypedDict): + name: NotRequired[Nullable[str]] + description: NotRequired[Nullable[str]] + + +class UpdateFTModelIn(BaseModel): + name: OptionalNullable[str] = UNSET + description: OptionalNullable[str] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["name", "description"] + nullable_fields = ["name", "description"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in self.model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = (self.__pydantic_fields_set__.intersection({n}) or k in null_default_fields) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m + diff --git a/src/mistralai/models/uploadfileout.py b/src/mistralai/models/uploadfileout.py new file mode 100644 index 00000000..cd5b86c6 --- /dev/null +++ b/src/mistralai/models/uploadfileout.py @@ -0,0 +1,71 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .sampletype import SampleType +from .source import Source +from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +import pydantic +from pydantic import model_serializer +from typing import Final, TypedDict +from typing_extensions import Annotated, NotRequired + + +class UploadFileOutTypedDict(TypedDict): + id: str + r"""The unique identifier of the file.""" + object: str + r"""The object type, which is always \"file\".""" + bytes: int + r"""The size of the file, in bytes.""" + created_at: int + r"""The UNIX timestamp (in seconds) of the event.""" + filename: str + r"""The name of the uploaded file.""" + sample_type: SampleType + source: Source + num_lines: NotRequired[Nullable[int]] + + +class UploadFileOut(BaseModel): + id: str + r"""The unique identifier of the file.""" + object: str + r"""The object type, which is always \"file\".""" + bytes: int + r"""The size of the file, in bytes.""" + created_at: int + r"""The UNIX timestamp (in seconds) of the event.""" + filename: str + r"""The name of the uploaded file.""" + sample_type: SampleType + source: Source + PURPOSE: Annotated[Final[str], pydantic.Field(alias="purpose")] = "fine-tune" # type: ignore + r"""The intended purpose of the uploaded file. Only accepts fine-tuning (`fine-tune`) for now.""" + num_lines: OptionalNullable[int] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["num_lines"] + nullable_fields = ["num_lines"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in self.model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = (self.__pydantic_fields_set__.intersection({n}) or k in null_default_fields) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m + diff --git a/src/mistralai/models/usageinfo.py b/src/mistralai/models/usageinfo.py new file mode 100644 index 00000000..153ab6b5 --- /dev/null +++ b/src/mistralai/models/usageinfo.py @@ -0,0 +1,18 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.types import BaseModel +from typing import TypedDict + + +class UsageInfoTypedDict(TypedDict): + prompt_tokens: int + completion_tokens: int + total_tokens: int + + +class UsageInfo(BaseModel): + prompt_tokens: int + completion_tokens: int + total_tokens: int + diff --git a/src/mistralai/models/usermessage.py b/src/mistralai/models/usermessage.py new file mode 100644 index 00000000..bea7328b --- /dev/null +++ b/src/mistralai/models/usermessage.py @@ -0,0 +1,26 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .textchunk import TextChunk, TextChunkTypedDict +from mistralai.types import BaseModel +from typing import List, Literal, Optional, TypedDict, Union +from typing_extensions import NotRequired + + +UserMessageRole = Literal["user"] + +class UserMessageTypedDict(TypedDict): + content: UserMessageContentTypedDict + role: NotRequired[UserMessageRole] + + +class UserMessage(BaseModel): + content: UserMessageContent + role: Optional[UserMessageRole] = "user" + + +UserMessageContentTypedDict = Union[str, List[TextChunkTypedDict]] + + +UserMessageContent = Union[str, List[TextChunk]] + diff --git a/src/mistralai/models/validationerror.py b/src/mistralai/models/validationerror.py new file mode 100644 index 00000000..2d4a97bd --- /dev/null +++ b/src/mistralai/models/validationerror.py @@ -0,0 +1,24 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.types import BaseModel +from typing import List, TypedDict, Union + + +class ValidationErrorTypedDict(TypedDict): + loc: List[LocTypedDict] + msg: str + type: str + + +class ValidationError(BaseModel): + loc: List[Loc] + msg: str + type: str + + +LocTypedDict = Union[str, int] + + +Loc = Union[str, int] + diff --git a/src/mistralai/models/wandbintegration.py b/src/mistralai/models/wandbintegration.py new file mode 100644 index 00000000..fccab00f --- /dev/null +++ b/src/mistralai/models/wandbintegration.py @@ -0,0 +1,56 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +import pydantic +from pydantic import model_serializer +from typing import Final, Optional, TypedDict +from typing_extensions import Annotated, NotRequired + + +class WandbIntegrationTypedDict(TypedDict): + project: str + r"""The name of the project that the new run will be created under.""" + api_key: str + r"""The WandB API key to use for authentication.""" + name: NotRequired[Nullable[str]] + r"""A display name to set for the run. If not set, will use the job ID as the name.""" + run_name: NotRequired[Nullable[str]] + + +class WandbIntegration(BaseModel): + project: str + r"""The name of the project that the new run will be created under.""" + api_key: str + r"""The WandB API key to use for authentication.""" + TYPE: Annotated[Final[Optional[str]], pydantic.Field(alias="type")] = "wandb" # type: ignore + name: OptionalNullable[str] = UNSET + r"""A display name to set for the run. If not set, will use the job ID as the name.""" + run_name: OptionalNullable[str] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["type", "name", "run_name"] + nullable_fields = ["name", "run_name"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in self.model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = (self.__pydantic_fields_set__.intersection({n}) or k in null_default_fields) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m + diff --git a/src/mistralai/models/wandbintegrationout.py b/src/mistralai/models/wandbintegrationout.py new file mode 100644 index 00000000..f971ba58 --- /dev/null +++ b/src/mistralai/models/wandbintegrationout.py @@ -0,0 +1,52 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +import pydantic +from pydantic import model_serializer +from typing import Final, Optional, TypedDict +from typing_extensions import Annotated, NotRequired + + +class WandbIntegrationOutTypedDict(TypedDict): + project: str + r"""The name of the project that the new run will be created under.""" + name: NotRequired[Nullable[str]] + r"""A display name to set for the run. If not set, will use the job ID as the name.""" + run_name: NotRequired[Nullable[str]] + + +class WandbIntegrationOut(BaseModel): + project: str + r"""The name of the project that the new run will be created under.""" + TYPE: Annotated[Final[Optional[str]], pydantic.Field(alias="type")] = "wandb" # type: ignore + name: OptionalNullable[str] = UNSET + r"""A display name to set for the run. If not set, will use the job ID as the name.""" + run_name: OptionalNullable[str] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["type", "name", "run_name"] + nullable_fields = ["name", "run_name"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in self.model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = (self.__pydantic_fields_set__.intersection({n}) or k in null_default_fields) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m + diff --git a/src/mistralai/models_.py b/src/mistralai/models_.py new file mode 100644 index 00000000..aba57826 --- /dev/null +++ b/src/mistralai/models_.py @@ -0,0 +1,928 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from .basesdk import BaseSDK +from mistralai import models, utils +from mistralai._hooks import HookContext +from mistralai.types import OptionalNullable, UNSET +from mistralai.utils import get_security_from_env +from typing import Any, Optional + +class Models(BaseSDK): + r"""Model Management API""" + + + def list( + self, *, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + ) -> Optional[models.ModelList]: + r"""List Models + + List all models available to the user. + + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + req = self.build_request( + method="GET", + path="/v1/models", + base_url=base_url, + url_variables=url_variables, + request=None, + request_body_required=False, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + security=self.sdk_configuration.security, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, [ + "429", + "500", + "502", + "503", + "504" + ]) + + http_res = self.do_request( + hook_ctx=HookContext(operation_id="list_models_v1_models_get", oauth2_scopes=[], security_source=get_security_from_env(self.sdk_configuration.security, models.Security)), + request=req, + error_status_codes=["422","4XX","5XX"], + retry_config=retry_config + ) + + data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return utils.unmarshal_json(http_res.text, Optional[models.ModelList]) + if utils.match_response(http_res, "422", "application/json"): + data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) + raise models.HTTPValidationError(data=data) + if utils.match_response(http_res, ["4XX","5XX"], "*"): + raise models.SDKError("API error occurred", http_res.status_code, http_res.text, http_res) + + content_type = http_res.headers.get("Content-Type") + raise models.SDKError(f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, http_res.text, http_res) + + + + async def list_async( + self, *, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + ) -> Optional[models.ModelList]: + r"""List Models + + List all models available to the user. + + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + req = self.build_request( + method="GET", + path="/v1/models", + base_url=base_url, + url_variables=url_variables, + request=None, + request_body_required=False, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + security=self.sdk_configuration.security, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, [ + "429", + "500", + "502", + "503", + "504" + ]) + + http_res = await self.do_request_async( + hook_ctx=HookContext(operation_id="list_models_v1_models_get", oauth2_scopes=[], security_source=get_security_from_env(self.sdk_configuration.security, models.Security)), + request=req, + error_status_codes=["422","4XX","5XX"], + retry_config=retry_config + ) + + data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return utils.unmarshal_json(http_res.text, Optional[models.ModelList]) + if utils.match_response(http_res, "422", "application/json"): + data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) + raise models.HTTPValidationError(data=data) + if utils.match_response(http_res, ["4XX","5XX"], "*"): + raise models.SDKError("API error occurred", http_res.status_code, http_res.text, http_res) + + content_type = http_res.headers.get("Content-Type") + raise models.SDKError(f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, http_res.text, http_res) + + + + def retrieve( + self, *, + model_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + ) -> Optional[models.ModelCard]: + r"""Retrieve Model + + Retrieve a model information. + + :param model_id: The ID of the model to retrieve. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + + request = models.RetrieveModelV1ModelsModelIDGetRequest( + model_id=model_id, + ) + + req = self.build_request( + method="GET", + path="/v1/models/{model_id}", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + security=self.sdk_configuration.security, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, [ + "429", + "500", + "502", + "503", + "504" + ]) + + http_res = self.do_request( + hook_ctx=HookContext(operation_id="retrieve_model_v1_models__model_id__get", oauth2_scopes=[], security_source=get_security_from_env(self.sdk_configuration.security, models.Security)), + request=req, + error_status_codes=["422","4XX","5XX"], + retry_config=retry_config + ) + + data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return utils.unmarshal_json(http_res.text, Optional[models.ModelCard]) + if utils.match_response(http_res, "422", "application/json"): + data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) + raise models.HTTPValidationError(data=data) + if utils.match_response(http_res, ["4XX","5XX"], "*"): + raise models.SDKError("API error occurred", http_res.status_code, http_res.text, http_res) + + content_type = http_res.headers.get("Content-Type") + raise models.SDKError(f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, http_res.text, http_res) + + + + async def retrieve_async( + self, *, + model_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + ) -> Optional[models.ModelCard]: + r"""Retrieve Model + + Retrieve a model information. + + :param model_id: The ID of the model to retrieve. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + + request = models.RetrieveModelV1ModelsModelIDGetRequest( + model_id=model_id, + ) + + req = self.build_request( + method="GET", + path="/v1/models/{model_id}", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + security=self.sdk_configuration.security, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, [ + "429", + "500", + "502", + "503", + "504" + ]) + + http_res = await self.do_request_async( + hook_ctx=HookContext(operation_id="retrieve_model_v1_models__model_id__get", oauth2_scopes=[], security_source=get_security_from_env(self.sdk_configuration.security, models.Security)), + request=req, + error_status_codes=["422","4XX","5XX"], + retry_config=retry_config + ) + + data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return utils.unmarshal_json(http_res.text, Optional[models.ModelCard]) + if utils.match_response(http_res, "422", "application/json"): + data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) + raise models.HTTPValidationError(data=data) + if utils.match_response(http_res, ["4XX","5XX"], "*"): + raise models.SDKError("API error occurred", http_res.status_code, http_res.text, http_res) + + content_type = http_res.headers.get("Content-Type") + raise models.SDKError(f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, http_res.text, http_res) + + + + def delete( + self, *, + model_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + ) -> Optional[models.DeleteModelOut]: + r"""Delete Model + + Delete a fine-tuned model. + + :param model_id: The ID of the model to delete. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + + request = models.DeleteModelV1ModelsModelIDDeleteRequest( + model_id=model_id, + ) + + req = self.build_request( + method="DELETE", + path="/v1/models/{model_id}", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + security=self.sdk_configuration.security, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, [ + "429", + "500", + "502", + "503", + "504" + ]) + + http_res = self.do_request( + hook_ctx=HookContext(operation_id="delete_model_v1_models__model_id__delete", oauth2_scopes=[], security_source=get_security_from_env(self.sdk_configuration.security, models.Security)), + request=req, + error_status_codes=["422","4XX","5XX"], + retry_config=retry_config + ) + + data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return utils.unmarshal_json(http_res.text, Optional[models.DeleteModelOut]) + if utils.match_response(http_res, "422", "application/json"): + data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) + raise models.HTTPValidationError(data=data) + if utils.match_response(http_res, ["4XX","5XX"], "*"): + raise models.SDKError("API error occurred", http_res.status_code, http_res.text, http_res) + + content_type = http_res.headers.get("Content-Type") + raise models.SDKError(f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, http_res.text, http_res) + + + + async def delete_async( + self, *, + model_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + ) -> Optional[models.DeleteModelOut]: + r"""Delete Model + + Delete a fine-tuned model. + + :param model_id: The ID of the model to delete. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + + request = models.DeleteModelV1ModelsModelIDDeleteRequest( + model_id=model_id, + ) + + req = self.build_request( + method="DELETE", + path="/v1/models/{model_id}", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + security=self.sdk_configuration.security, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, [ + "429", + "500", + "502", + "503", + "504" + ]) + + http_res = await self.do_request_async( + hook_ctx=HookContext(operation_id="delete_model_v1_models__model_id__delete", oauth2_scopes=[], security_source=get_security_from_env(self.sdk_configuration.security, models.Security)), + request=req, + error_status_codes=["422","4XX","5XX"], + retry_config=retry_config + ) + + data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return utils.unmarshal_json(http_res.text, Optional[models.DeleteModelOut]) + if utils.match_response(http_res, "422", "application/json"): + data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) + raise models.HTTPValidationError(data=data) + if utils.match_response(http_res, ["4XX","5XX"], "*"): + raise models.SDKError("API error occurred", http_res.status_code, http_res.text, http_res) + + content_type = http_res.headers.get("Content-Type") + raise models.SDKError(f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, http_res.text, http_res) + + + + def update( + self, *, + model_id: str, + name: OptionalNullable[str] = UNSET, + description: OptionalNullable[str] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + ) -> Optional[models.FTModelOut]: + r"""Update Fine Tuned Model + + Update a model name or description. + + :param model_id: The ID of the model to update. + :param name: + :param description: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + + request = models.JobsAPIRoutesFineTuningUpdateFineTunedModelRequest( + model_id=model_id, + update_ft_model_in=models.UpdateFTModelIn( + name=name, + description=description, + ), + ) + + req = self.build_request( + method="PATCH", + path="/v1/fine_tuning/models/{model_id}", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body(request.update_ft_model_in, False, False, "json", models.UpdateFTModelIn), + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, [ + "429", + "500", + "502", + "503", + "504" + ]) + + http_res = self.do_request( + hook_ctx=HookContext(operation_id="jobs_api_routes_fine_tuning_update_fine_tuned_model", oauth2_scopes=[], security_source=get_security_from_env(self.sdk_configuration.security, models.Security)), + request=req, + error_status_codes=["4XX","5XX"], + retry_config=retry_config + ) + + if utils.match_response(http_res, "200", "application/json"): + return utils.unmarshal_json(http_res.text, Optional[models.FTModelOut]) + if utils.match_response(http_res, ["4XX","5XX"], "*"): + raise models.SDKError("API error occurred", http_res.status_code, http_res.text, http_res) + + content_type = http_res.headers.get("Content-Type") + raise models.SDKError(f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, http_res.text, http_res) + + + + async def update_async( + self, *, + model_id: str, + name: OptionalNullable[str] = UNSET, + description: OptionalNullable[str] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + ) -> Optional[models.FTModelOut]: + r"""Update Fine Tuned Model + + Update a model name or description. + + :param model_id: The ID of the model to update. + :param name: + :param description: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + + request = models.JobsAPIRoutesFineTuningUpdateFineTunedModelRequest( + model_id=model_id, + update_ft_model_in=models.UpdateFTModelIn( + name=name, + description=description, + ), + ) + + req = self.build_request( + method="PATCH", + path="/v1/fine_tuning/models/{model_id}", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body(request.update_ft_model_in, False, False, "json", models.UpdateFTModelIn), + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, [ + "429", + "500", + "502", + "503", + "504" + ]) + + http_res = await self.do_request_async( + hook_ctx=HookContext(operation_id="jobs_api_routes_fine_tuning_update_fine_tuned_model", oauth2_scopes=[], security_source=get_security_from_env(self.sdk_configuration.security, models.Security)), + request=req, + error_status_codes=["4XX","5XX"], + retry_config=retry_config + ) + + if utils.match_response(http_res, "200", "application/json"): + return utils.unmarshal_json(http_res.text, Optional[models.FTModelOut]) + if utils.match_response(http_res, ["4XX","5XX"], "*"): + raise models.SDKError("API error occurred", http_res.status_code, http_res.text, http_res) + + content_type = http_res.headers.get("Content-Type") + raise models.SDKError(f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, http_res.text, http_res) + + + + def archive( + self, *, + model_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + ) -> Optional[models.ArchiveFTModelOut]: + r"""Archive Fine Tuned Model + + Archive a fine-tuned model. + + :param model_id: The ID of the model to archive. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + + request = models.JobsAPIRoutesFineTuningArchiveFineTunedModelRequest( + model_id=model_id, + ) + + req = self.build_request( + method="POST", + path="/v1/fine_tuning/models/{model_id}/archive", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + security=self.sdk_configuration.security, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, [ + "429", + "500", + "502", + "503", + "504" + ]) + + http_res = self.do_request( + hook_ctx=HookContext(operation_id="jobs_api_routes_fine_tuning_archive_fine_tuned_model", oauth2_scopes=[], security_source=get_security_from_env(self.sdk_configuration.security, models.Security)), + request=req, + error_status_codes=["4XX","5XX"], + retry_config=retry_config + ) + + if utils.match_response(http_res, "200", "application/json"): + return utils.unmarshal_json(http_res.text, Optional[models.ArchiveFTModelOut]) + if utils.match_response(http_res, ["4XX","5XX"], "*"): + raise models.SDKError("API error occurred", http_res.status_code, http_res.text, http_res) + + content_type = http_res.headers.get("Content-Type") + raise models.SDKError(f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, http_res.text, http_res) + + + + async def archive_async( + self, *, + model_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + ) -> Optional[models.ArchiveFTModelOut]: + r"""Archive Fine Tuned Model + + Archive a fine-tuned model. + + :param model_id: The ID of the model to archive. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + + request = models.JobsAPIRoutesFineTuningArchiveFineTunedModelRequest( + model_id=model_id, + ) + + req = self.build_request( + method="POST", + path="/v1/fine_tuning/models/{model_id}/archive", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + security=self.sdk_configuration.security, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, [ + "429", + "500", + "502", + "503", + "504" + ]) + + http_res = await self.do_request_async( + hook_ctx=HookContext(operation_id="jobs_api_routes_fine_tuning_archive_fine_tuned_model", oauth2_scopes=[], security_source=get_security_from_env(self.sdk_configuration.security, models.Security)), + request=req, + error_status_codes=["4XX","5XX"], + retry_config=retry_config + ) + + if utils.match_response(http_res, "200", "application/json"): + return utils.unmarshal_json(http_res.text, Optional[models.ArchiveFTModelOut]) + if utils.match_response(http_res, ["4XX","5XX"], "*"): + raise models.SDKError("API error occurred", http_res.status_code, http_res.text, http_res) + + content_type = http_res.headers.get("Content-Type") + raise models.SDKError(f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, http_res.text, http_res) + + + + def unarchive( + self, *, + model_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + ) -> Optional[models.UnarchiveFTModelOut]: + r"""Unarchive Fine Tuned Model + + Un-archive a fine-tuned model. + + :param model_id: The ID of the model to unarchive. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + + request = models.JobsAPIRoutesFineTuningUnarchiveFineTunedModelRequest( + model_id=model_id, + ) + + req = self.build_request( + method="DELETE", + path="/v1/fine_tuning/models/{model_id}/archive", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + security=self.sdk_configuration.security, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, [ + "429", + "500", + "502", + "503", + "504" + ]) + + http_res = self.do_request( + hook_ctx=HookContext(operation_id="jobs_api_routes_fine_tuning_unarchive_fine_tuned_model", oauth2_scopes=[], security_source=get_security_from_env(self.sdk_configuration.security, models.Security)), + request=req, + error_status_codes=["4XX","5XX"], + retry_config=retry_config + ) + + if utils.match_response(http_res, "200", "application/json"): + return utils.unmarshal_json(http_res.text, Optional[models.UnarchiveFTModelOut]) + if utils.match_response(http_res, ["4XX","5XX"], "*"): + raise models.SDKError("API error occurred", http_res.status_code, http_res.text, http_res) + + content_type = http_res.headers.get("Content-Type") + raise models.SDKError(f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, http_res.text, http_res) + + + + async def unarchive_async( + self, *, + model_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + ) -> Optional[models.UnarchiveFTModelOut]: + r"""Unarchive Fine Tuned Model + + Un-archive a fine-tuned model. + + :param model_id: The ID of the model to unarchive. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + + request = models.JobsAPIRoutesFineTuningUnarchiveFineTunedModelRequest( + model_id=model_id, + ) + + req = self.build_request( + method="DELETE", + path="/v1/fine_tuning/models/{model_id}/archive", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + security=self.sdk_configuration.security, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, [ + "429", + "500", + "502", + "503", + "504" + ]) + + http_res = await self.do_request_async( + hook_ctx=HookContext(operation_id="jobs_api_routes_fine_tuning_unarchive_fine_tuned_model", oauth2_scopes=[], security_source=get_security_from_env(self.sdk_configuration.security, models.Security)), + request=req, + error_status_codes=["4XX","5XX"], + retry_config=retry_config + ) + + if utils.match_response(http_res, "200", "application/json"): + return utils.unmarshal_json(http_res.text, Optional[models.UnarchiveFTModelOut]) + if utils.match_response(http_res, ["4XX","5XX"], "*"): + raise models.SDKError("API error occurred", http_res.status_code, http_res.text, http_res) + + content_type = http_res.headers.get("Content-Type") + raise models.SDKError(f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, http_res.text, http_res) + + diff --git a/src/mistralai/py.typed b/src/mistralai/py.typed index e69de29b..3e38f1a9 100644 --- a/src/mistralai/py.typed +++ b/src/mistralai/py.typed @@ -0,0 +1 @@ +# Marker file for PEP 561. The package enables type hints. diff --git a/src/mistralai/sdk.py b/src/mistralai/sdk.py new file mode 100644 index 00000000..b0d2bb18 --- /dev/null +++ b/src/mistralai/sdk.py @@ -0,0 +1,119 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from .basesdk import BaseSDK +from .httpclient import AsyncHttpClient, HttpClient +from .sdkconfiguration import SDKConfiguration +from .utils.logger import Logger, NoOpLogger +from .utils.retries import RetryConfig +import httpx +from mistralai import models, utils +from mistralai._hooks import SDKHooks +from mistralai.agents import Agents +from mistralai.chat import Chat +from mistralai.embeddings import Embeddings +from mistralai.files import Files +from mistralai.fim import Fim +from mistralai.fine_tuning import FineTuning +from mistralai.models_ import Models +from mistralai.types import OptionalNullable, UNSET +from typing import Any, Callable, Dict, Optional, Union + +class Mistral(BaseSDK): + r"""Mistral AI API: Our Chat Completion and Embeddings APIs specification. Create your account on [La Plateforme](https://console.mistral.ai) to get access and read the [docs](https://docs.mistral.ai) to learn how to use it.""" + models: Models + r"""Model Management API""" + files: Files + r"""Files API""" + fine_tuning: FineTuning + chat: Chat + r"""Chat Completion API.""" + fim: Fim + r"""Fill-in-the-middle API.""" + agents: Agents + r"""Agents API.""" + embeddings: Embeddings + r"""Embeddings API.""" + def __init__( + self, + api_key: Optional[Union[Optional[str], Callable[[], Optional[str]]]] = None, + server: Optional[str] = None, + server_url: Optional[str] = None, + url_params: Optional[Dict[str, str]] = None, + client: Optional[HttpClient] = None, + async_client: Optional[AsyncHttpClient] = None, + retry_config: OptionalNullable[RetryConfig] = UNSET, + timeout_ms: Optional[int] = None, + debug_logger: Optional[Logger] = None + ) -> None: + r"""Instantiates the SDK configuring it with the provided parameters. + + :param api_key: The api_key required for authentication + :param server: The server by name to use for all methods + :param server_url: The server URL to use for all methods + :param url_params: Parameters to optionally template the server URL with + :param client: The HTTP client to use for all synchronous methods + :param async_client: The Async HTTP client to use for all asynchronous methods + :param retry_config: The retry configuration to use for all supported methods + :param timeout_ms: Optional request timeout applied to each operation in milliseconds + """ + if client is None: + client = httpx.Client() + + assert issubclass( + type(client), HttpClient + ), "The provided client must implement the HttpClient protocol." + + if async_client is None: + async_client = httpx.AsyncClient() + + if debug_logger is None: + debug_logger = NoOpLogger() + + assert issubclass( + type(async_client), AsyncHttpClient + ), "The provided async_client must implement the AsyncHttpClient protocol." + + security: Any = None + if callable(api_key): + security = lambda: models.Security(api_key = api_key()) # pylint: disable=unnecessary-lambda-assignment + else: + security = models.Security(api_key = api_key) + + if server_url is not None: + if url_params is not None: + server_url = utils.template_url(server_url, url_params) + + + BaseSDK.__init__(self, SDKConfiguration( + client=client, + async_client=async_client, + security=security, + server_url=server_url, + server=server, + retry_config=retry_config, + timeout_ms=timeout_ms, + debug_logger=debug_logger + )) + + hooks = SDKHooks() + + current_server_url, *_ = self.sdk_configuration.get_server_details() + server_url, self.sdk_configuration.client = hooks.sdk_init(current_server_url, self.sdk_configuration.client) + if current_server_url != server_url: + self.sdk_configuration.server_url = server_url + + # pylint: disable=protected-access + self.sdk_configuration.__dict__["_hooks"] = hooks + + self._init_sdks() + + + def _init_sdks(self): + self.models = Models(self.sdk_configuration) + self.files = Files(self.sdk_configuration) + self.fine_tuning = FineTuning(self.sdk_configuration) + self.chat = Chat(self.sdk_configuration) + self.fim = Fim(self.sdk_configuration) + self.agents = Agents(self.sdk_configuration) + self.embeddings = Embeddings(self.sdk_configuration) + diff --git a/src/mistralai/sdkconfiguration.py b/src/mistralai/sdkconfiguration.py new file mode 100644 index 00000000..8da7f2a4 --- /dev/null +++ b/src/mistralai/sdkconfiguration.py @@ -0,0 +1,54 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + + +from ._hooks import SDKHooks +from .httpclient import AsyncHttpClient, HttpClient +from .utils import Logger, RetryConfig, remove_suffix +from dataclasses import dataclass +from mistralai import models +from mistralai.types import OptionalNullable, UNSET +from pydantic import Field +from typing import Callable, Dict, Optional, Tuple, Union + + +SERVER_PROD = "prod" +r"""Production server""" +SERVERS = { + SERVER_PROD: "https://api.mistral.ai", +} +"""Contains the list of servers available to the SDK""" + + +@dataclass +class SDKConfiguration: + client: HttpClient + async_client: AsyncHttpClient + debug_logger: Logger + security: Optional[Union[models.Security,Callable[[], models.Security]]] = None + server_url: Optional[str] = "" + server: Optional[str] = "" + language: str = "python" + openapi_doc_version: str = "0.0.2" + sdk_version: str = "1.0.0-rc.2" + gen_version: str = "2.388.1" + user_agent: str = "speakeasy-sdk/python 1.0.0-rc.2 2.388.1 0.0.2 mistralai" + retry_config: OptionalNullable[RetryConfig] = Field(default_factory=lambda: UNSET) + timeout_ms: Optional[int] = None + + def __post_init__(self): + self._hooks = SDKHooks() + + def get_server_details(self) -> Tuple[str, Dict[str, str]]: + if self.server_url is not None and self.server_url: + return remove_suffix(self.server_url, "/"), {} + if not self.server: + self.server = SERVER_PROD + + if self.server not in SERVERS: + raise ValueError(f"Invalid server \"{self.server}\"") + + return SERVERS[self.server], {} + + + def get_hooks(self) -> SDKHooks: + return self._hooks diff --git a/src/mistralai/types/__init__.py b/src/mistralai/types/__init__.py new file mode 100644 index 00000000..fc76fe0c --- /dev/null +++ b/src/mistralai/types/__init__.py @@ -0,0 +1,21 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from .basemodel import ( + BaseModel, + Nullable, + OptionalNullable, + UnrecognizedInt, + UnrecognizedStr, + UNSET, + UNSET_SENTINEL, +) + +__all__ = [ + "BaseModel", + "Nullable", + "OptionalNullable", + "UnrecognizedInt", + "UnrecognizedStr", + "UNSET", + "UNSET_SENTINEL", +] diff --git a/src/mistralai/types/basemodel.py b/src/mistralai/types/basemodel.py new file mode 100644 index 00000000..a6187efa --- /dev/null +++ b/src/mistralai/types/basemodel.py @@ -0,0 +1,39 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from pydantic import ConfigDict, model_serializer +from pydantic import BaseModel as PydanticBaseModel +from typing import TYPE_CHECKING, Literal, Optional, TypeVar, Union, NewType +from typing_extensions import TypeAliasType, TypeAlias + + +class BaseModel(PydanticBaseModel): + model_config = ConfigDict( + populate_by_name=True, arbitrary_types_allowed=True, protected_namespaces=() + ) + + +class Unset(BaseModel): + @model_serializer(mode="plain") + def serialize_model(self): + return UNSET_SENTINEL + + def __bool__(self) -> Literal[False]: + return False + + +UNSET = Unset() +UNSET_SENTINEL = "~?~unset~?~sentinel~?~" + + +T = TypeVar("T") +if TYPE_CHECKING: + Nullable: TypeAlias = Union[T, None] + OptionalNullable: TypeAlias = Union[Optional[Nullable[T]], Unset] +else: + Nullable = TypeAliasType("Nullable", Union[T, None], type_params=(T,)) + OptionalNullable = TypeAliasType( + "OptionalNullable", Union[Optional[Nullable[T]], Unset], type_params=(T,) + ) + +UnrecognizedInt = NewType("UnrecognizedInt", int) +UnrecognizedStr = NewType("UnrecognizedStr", str) diff --git a/src/mistralai/utils/__init__.py b/src/mistralai/utils/__init__.py new file mode 100644 index 00000000..75ca0241 --- /dev/null +++ b/src/mistralai/utils/__init__.py @@ -0,0 +1,86 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from .annotations import get_discriminator +from .enums import OpenEnumMeta +from .headers import get_headers, get_response_headers +from .metadata import ( + FieldMetadata, + find_metadata, + FormMetadata, + HeaderMetadata, + MultipartFormMetadata, + PathParamMetadata, + QueryParamMetadata, + RequestMetadata, + SecurityMetadata, +) +from .queryparams import get_query_params +from .retries import BackoffStrategy, Retries, retry, retry_async, RetryConfig +from .requestbodies import serialize_request_body, SerializedRequestBody +from .security import get_security, get_security_from_env + +from .serializers import ( + get_pydantic_model, + marshal_json, + unmarshal, + unmarshal_json, + serialize_decimal, + serialize_float, + serialize_int, + stream_to_text, + validate_decimal, + validate_float, + validate_int, + validate_open_enum, +) +from .url import generate_url, template_url, remove_suffix +from .values import get_global_from_env, match_content_type, match_status_codes, match_response +from .logger import Logger, get_body_content, NoOpLogger + +__all__ = [ + "BackoffStrategy", + "FieldMetadata", + "find_metadata", + "FormMetadata", + "generate_url", + "get_body_content", + "get_discriminator", + "get_global_from_env", + "get_headers", + "get_pydantic_model", + "get_query_params", + "get_response_headers", + "get_security", + "get_security_from_env", + "HeaderMetadata", + "Logger", + "marshal_json", + "match_content_type", + "match_status_codes", + "match_response", + "MultipartFormMetadata", + "NoOpLogger", + "OpenEnumMeta", + "PathParamMetadata", + "QueryParamMetadata", + "remove_suffix", + "Retries", + "retry", + "retry_async", + "RetryConfig", + "RequestMetadata", + "SecurityMetadata", + "serialize_decimal", + "serialize_float", + "serialize_int", + "serialize_request_body", + "SerializedRequestBody", + "stream_to_text", + "template_url", + "unmarshal", + "unmarshal_json", + "validate_decimal", + "validate_float", + "validate_int", + "validate_open_enum", +] diff --git a/src/mistralai/utils/annotations.py b/src/mistralai/utils/annotations.py new file mode 100644 index 00000000..0d17472b --- /dev/null +++ b/src/mistralai/utils/annotations.py @@ -0,0 +1,19 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from typing import Any + +def get_discriminator(model: Any, fieldname: str, key: str) -> str: + if isinstance(model, dict): + try: + return f'{model.get(key)}' + except AttributeError as e: + raise ValueError(f'Could not find discriminator key {key} in {model}') from e + + if hasattr(model, fieldname): + return f'{getattr(model, fieldname)}' + + fieldname = fieldname.upper() + if hasattr(model, fieldname): + return f'{getattr(model, fieldname)}' + + raise ValueError(f'Could not find discriminator field {fieldname} in {model}') diff --git a/src/mistralai/utils/enums.py b/src/mistralai/utils/enums.py new file mode 100644 index 00000000..c650b10c --- /dev/null +++ b/src/mistralai/utils/enums.py @@ -0,0 +1,34 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +import enum + + +class OpenEnumMeta(enum.EnumMeta): + def __call__( + cls, value, names=None, *, module=None, qualname=None, type=None, start=1 + ): + # The `type` kwarg also happens to be a built-in that pylint flags as + # redeclared. Safe to ignore this lint rule with this scope. + # pylint: disable=redefined-builtin + + if names is not None: + return super().__call__( + value, + names=names, + module=module, + qualname=qualname, + type=type, + start=start, + ) + + try: + return super().__call__( + value, + names=names, # pyright: ignore[reportArgumentType] + module=module, + qualname=qualname, + type=type, + start=start, + ) + except ValueError: + return value diff --git a/src/mistralai/utils/eventstreaming.py b/src/mistralai/utils/eventstreaming.py new file mode 100644 index 00000000..553b386b --- /dev/null +++ b/src/mistralai/utils/eventstreaming.py @@ -0,0 +1,178 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +import re +import json +from typing import Callable, TypeVar, Optional, Generator, AsyncGenerator, Tuple +import httpx + +T = TypeVar("T") + + +class ServerEvent: + id: Optional[str] = None + event: Optional[str] = None + data: Optional[str] = None + retry: Optional[int] = None + + +MESSAGE_BOUNDARIES = [ + b"\r\n\r\n", + b"\n\n", + b"\r\r", +] + + +async def stream_events_async( + response: httpx.Response, + decoder: Callable[[str], T], + sentinel: Optional[str] = None, +) -> AsyncGenerator[T, None]: + buffer = bytearray() + position = 0 + discard = False + async for chunk in response.aiter_bytes(): + # We've encountered the sentinel value and should no longer process + # incoming data. Instead we throw new data away until the server closes + # the connection. + if discard: + continue + + buffer += chunk + for i in range(position, len(buffer)): + char = buffer[i : i + 1] + seq: Optional[bytes] = None + if char in [b"\r", b"\n"]: + for boundary in MESSAGE_BOUNDARIES: + seq = _peek_sequence(i, buffer, boundary) + if seq is not None: + break + if seq is None: + continue + + block = buffer[position:i] + position = i + len(seq) + event, discard = _parse_event(block, decoder, sentinel) + if event is not None: + yield event + + if position > 0: + buffer = buffer[position:] + position = 0 + + event, discard = _parse_event(buffer, decoder, sentinel) + if event is not None: + yield event + + +def stream_events( + response: httpx.Response, + decoder: Callable[[str], T], + sentinel: Optional[str] = None, +) -> Generator[T, None, None]: + buffer = bytearray() + position = 0 + discard = False + for chunk in response.iter_bytes(): + # We've encountered the sentinel value and should no longer process + # incoming data. Instead we throw new data away until the server closes + # the connection. + if discard: + continue + + buffer += chunk + for i in range(position, len(buffer)): + char = buffer[i : i + 1] + seq: Optional[bytes] = None + if char in [b"\r", b"\n"]: + for boundary in MESSAGE_BOUNDARIES: + seq = _peek_sequence(i, buffer, boundary) + if seq is not None: + break + if seq is None: + continue + + block = buffer[position:i] + position = i + len(seq) + event, discard = _parse_event(block, decoder, sentinel) + if event is not None: + yield event + + if position > 0: + buffer = buffer[position:] + position = 0 + + event, discard = _parse_event(buffer, decoder, sentinel) + if event is not None: + yield event + + +def _parse_event( + raw: bytearray, decoder: Callable[[str], T], sentinel: Optional[str] = None +) -> Tuple[Optional[T], bool]: + block = raw.decode() + lines = re.split(r"\r?\n|\r", block) + publish = False + event = ServerEvent() + data = "" + for line in lines: + if not line: + continue + + delim = line.find(":") + if delim <= 0: + continue + + field = line[0:delim] + value = line[delim + 1 :] if delim < len(line) - 1 else "" + if len(value) and value[0] == " ": + value = value[1:] + + if field == "event": + event.event = value + publish = True + elif field == "data": + data += value + "\n" + publish = True + elif field == "id": + event.id = value + publish = True + elif field == "retry": + event.retry = int(value) if value.isdigit() else None + publish = True + + if sentinel and data == f"{sentinel}\n": + return None, True + + if data: + data = data[:-1] + event.data = data + + data_is_primitive = ( + data.isnumeric() or data == "true" or data == "false" or data == "null" + ) + data_is_json = ( + data.startswith("{") or data.startswith("[") or data.startswith('"') + ) + + if data_is_primitive or data_is_json: + try: + event.data = json.loads(data) + except Exception: + pass + + out = None + if publish: + out = decoder(json.dumps(event.__dict__)) + + return out, False + + +def _peek_sequence(position: int, buffer: bytearray, sequence: bytes): + if len(sequence) > (len(buffer) - position): + return None + + for i, seq in enumerate(sequence): + if buffer[position + i] != seq: + return None + + return sequence diff --git a/src/mistralai/utils/forms.py b/src/mistralai/utils/forms.py new file mode 100644 index 00000000..07f9b235 --- /dev/null +++ b/src/mistralai/utils/forms.py @@ -0,0 +1,207 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from typing import ( + Any, + Dict, + get_type_hints, + List, + Tuple, +) +from pydantic import BaseModel +from pydantic.fields import FieldInfo + +from .serializers import marshal_json + +from .metadata import ( + FormMetadata, + MultipartFormMetadata, + find_field_metadata, +) +from .values import _val_to_string + + +def _populate_form( + field_name: str, + explode: bool, + obj: Any, + delimiter: str, + form: Dict[str, List[str]], +): + if obj is None: + return form + + if isinstance(obj, BaseModel): + items = [] + + obj_fields: Dict[str, FieldInfo] = obj.__class__.model_fields + for name in obj_fields: + obj_field = obj_fields[name] + obj_field_name = obj_field.alias if obj_field.alias is not None else name + if obj_field_name == "": + continue + + val = getattr(obj, name) + if val is None: + continue + + if explode: + form[obj_field_name] = [_val_to_string(val)] + else: + items.append(f"{obj_field_name}{delimiter}{_val_to_string(val)}") + + if len(items) > 0: + form[field_name] = [delimiter.join(items)] + elif isinstance(obj, Dict): + items = [] + for key, value in obj.items(): + if value is None: + continue + + if explode: + form[key] = [_val_to_string(value)] + else: + items.append(f"{key}{delimiter}{_val_to_string(value)}") + + if len(items) > 0: + form[field_name] = [delimiter.join(items)] + elif isinstance(obj, List): + items = [] + + for value in obj: + if value is None: + continue + + if explode: + if not field_name in form: + form[field_name] = [] + form[field_name].append(_val_to_string(value)) + else: + items.append(_val_to_string(value)) + + if len(items) > 0: + form[field_name] = [delimiter.join([str(item) for item in items])] + else: + form[field_name] = [_val_to_string(obj)] + + return form + + +def serialize_multipart_form( + media_type: str, request: Any +) -> Tuple[str, Dict[str, Any], Dict[str, Any]]: + form: Dict[str, Any] = {} + files: Dict[str, Any] = {} + + if not isinstance(request, BaseModel): + raise TypeError("invalid request body type") + + request_fields: Dict[str, FieldInfo] = request.__class__.model_fields + request_field_types = get_type_hints(request.__class__) + + for name in request_fields: + field = request_fields[name] + + val = getattr(request, name) + if val is None: + continue + + field_metadata = find_field_metadata(field, MultipartFormMetadata) + if not field_metadata: + continue + + f_name = field.alias if field.alias is not None else name + + if field_metadata.file: + file_fields: Dict[str, FieldInfo] = val.__class__.model_fields + + file_name = "" + field_name = "" + content = None + content_type = None + + for file_field_name in file_fields: + file_field = file_fields[file_field_name] + + file_metadata = find_field_metadata(file_field, MultipartFormMetadata) + if file_metadata is None: + continue + + if file_metadata.content: + content = getattr(val, file_field_name, None) + elif file_field_name == "content_type": + content_type = getattr(val, file_field_name, None) + else: + field_name = ( + file_field.alias + if file_field.alias is not None + else file_field_name + ) + file_name = getattr(val, file_field_name) + + if field_name == "" or file_name == "" or content is None: + raise ValueError("invalid multipart/form-data file") + + if content_type is not None: + files[field_name] = (file_name, content, content_type) + else: + files[field_name] = (file_name, content) + elif field_metadata.json: + files[f_name] = ( + None, + marshal_json(val, request_field_types[name]), + "application/json", + ) + else: + if isinstance(val, List): + values = [] + + for value in val: + if value is None: + continue + values.append(_val_to_string(value)) + + form[f_name + "[]"] = values + else: + form[f_name] = _val_to_string(val) + return media_type, form, files + + +def serialize_form_data(data: Any) -> Dict[str, Any]: + form: Dict[str, List[str]] = {} + + if isinstance(data, BaseModel): + data_fields: Dict[str, FieldInfo] = data.__class__.model_fields + data_field_types = get_type_hints(data.__class__) + for name in data_fields: + field = data_fields[name] + + val = getattr(data, name) + if val is None: + continue + + metadata = find_field_metadata(field, FormMetadata) + if metadata is None: + continue + + f_name = field.alias if field.alias is not None else name + + if metadata.json: + form[f_name] = [marshal_json(val, data_field_types[name])] + else: + if metadata.style == "form": + _populate_form( + f_name, + metadata.explode, + val, + ",", + form, + ) + else: + raise ValueError(f"Invalid form style for field {name}") + elif isinstance(data, Dict): + for key, value in data.items(): + form[key] = [_val_to_string(value)] + else: + raise TypeError(f"Invalid request body type {type(data)} for form data") + + return form diff --git a/src/mistralai/utils/headers.py b/src/mistralai/utils/headers.py new file mode 100644 index 00000000..e14a0f4a --- /dev/null +++ b/src/mistralai/utils/headers.py @@ -0,0 +1,136 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from typing import ( + Any, + Dict, + List, + Optional, +) +from httpx import Headers +from pydantic import BaseModel +from pydantic.fields import FieldInfo + +from .metadata import ( + HeaderMetadata, + find_field_metadata, +) + +from .values import _populate_from_globals, _val_to_string + + +def get_headers(headers_params: Any, gbls: Optional[Any] = None) -> Dict[str, str]: + headers: Dict[str, str] = {} + + globals_already_populated = [] + if headers_params is not None: + globals_already_populated = _populate_headers(headers_params, gbls, headers, []) + if gbls is not None: + _populate_headers(gbls, None, headers, globals_already_populated) + + return headers + + +def _populate_headers( + headers_params: Any, + gbls: Any, + header_values: Dict[str, str], + skip_fields: List[str], +) -> List[str]: + globals_already_populated: List[str] = [] + + if not isinstance(headers_params, BaseModel): + return globals_already_populated + + param_fields: Dict[str, FieldInfo] = headers_params.__class__.model_fields + for name in param_fields: + if name in skip_fields: + continue + + field = param_fields[name] + f_name = field.alias if field.alias is not None else name + + metadata = find_field_metadata(field, HeaderMetadata) + if metadata is None: + continue + + value, global_found = _populate_from_globals( + name, getattr(headers_params, name), HeaderMetadata, gbls + ) + if global_found: + globals_already_populated.append(name) + value = _serialize_header(metadata.explode, value) + + if value != "": + header_values[f_name] = value + + return globals_already_populated + + +def _serialize_header(explode: bool, obj: Any) -> str: + if obj is None: + return "" + + if isinstance(obj, BaseModel): + items = [] + obj_fields: Dict[str, FieldInfo] = obj.__class__.model_fields + for name in obj_fields: + obj_field = obj_fields[name] + obj_param_metadata = find_field_metadata(obj_field, HeaderMetadata) + + if not obj_param_metadata: + continue + + f_name = obj_field.alias if obj_field.alias is not None else name + + val = getattr(obj, name) + if val is None: + continue + + if explode: + items.append(f"{f_name}={_val_to_string(val)}") + else: + items.append(f_name) + items.append(_val_to_string(val)) + + if len(items) > 0: + return ",".join(items) + elif isinstance(obj, Dict): + items = [] + + for key, value in obj.items(): + if value is None: + continue + + if explode: + items.append(f"{key}={_val_to_string(value)}") + else: + items.append(key) + items.append(_val_to_string(value)) + + if len(items) > 0: + return ",".join([str(item) for item in items]) + elif isinstance(obj, List): + items = [] + + for value in obj: + if value is None: + continue + + items.append(_val_to_string(value)) + + if len(items) > 0: + return ",".join(items) + else: + return f"{_val_to_string(obj)}" + + return "" + + +def get_response_headers(headers: Headers) -> Dict[str, List[str]]: + res: Dict[str, List[str]] = {} + for k, v in headers.items(): + if not k in res: + res[k] = [] + + res[k].append(v) + return res diff --git a/src/mistralai/utils/logger.py b/src/mistralai/utils/logger.py new file mode 100644 index 00000000..7e4bbeac --- /dev/null +++ b/src/mistralai/utils/logger.py @@ -0,0 +1,16 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +import httpx +from typing import Any, Protocol + +class Logger(Protocol): + def debug(self, msg: str, *args: Any, **kwargs: Any) -> None: + pass + +class NoOpLogger: + def debug(self, msg: str, *args: Any, **kwargs: Any) -> None: + pass + +def get_body_content(req: httpx.Request) -> str: + return "" if not hasattr(req, "_content") else str(req.content) + diff --git a/src/mistralai/utils/metadata.py b/src/mistralai/utils/metadata.py new file mode 100644 index 00000000..173b3e5c --- /dev/null +++ b/src/mistralai/utils/metadata.py @@ -0,0 +1,118 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from typing import Optional, Type, TypeVar, Union +from dataclasses import dataclass +from pydantic.fields import FieldInfo + + +T = TypeVar("T") + + +@dataclass +class SecurityMetadata: + option: bool = False + scheme: bool = False + scheme_type: Optional[str] = None + sub_type: Optional[str] = None + field_name: Optional[str] = None + + def get_field_name(self, default: str) -> str: + return self.field_name or default + + +@dataclass +class ParamMetadata: + serialization: Optional[str] = None + style: str = "simple" + explode: bool = False + + +@dataclass +class PathParamMetadata(ParamMetadata): + pass + + +@dataclass +class QueryParamMetadata(ParamMetadata): + style: str = "form" + explode: bool = True + + +@dataclass +class HeaderMetadata(ParamMetadata): + pass + + +@dataclass +class RequestMetadata: + media_type: str = "application/octet-stream" + + +@dataclass +class MultipartFormMetadata: + file: bool = False + content: bool = False + json: bool = False + + +@dataclass +class FormMetadata: + json: bool = False + style: str = "form" + explode: bool = True + + +class FieldMetadata: + security: Optional[SecurityMetadata] = None + path: Optional[PathParamMetadata] = None + query: Optional[QueryParamMetadata] = None + header: Optional[HeaderMetadata] = None + request: Optional[RequestMetadata] = None + form: Optional[FormMetadata] = None + multipart: Optional[MultipartFormMetadata] = None + + def __init__( + self, + security: Optional[SecurityMetadata] = None, + path: Optional[Union[PathParamMetadata, bool]] = None, + query: Optional[Union[QueryParamMetadata, bool]] = None, + header: Optional[Union[HeaderMetadata, bool]] = None, + request: Optional[Union[RequestMetadata, bool]] = None, + form: Optional[Union[FormMetadata, bool]] = None, + multipart: Optional[Union[MultipartFormMetadata, bool]] = None, + ): + self.security = security + self.path = PathParamMetadata() if isinstance(path, bool) else path + self.query = QueryParamMetadata() if isinstance(query, bool) else query + self.header = HeaderMetadata() if isinstance(header, bool) else header + self.request = RequestMetadata() if isinstance(request, bool) else request + self.form = FormMetadata() if isinstance(form, bool) else form + self.multipart = ( + MultipartFormMetadata() if isinstance(multipart, bool) else multipart + ) + + +def find_field_metadata(field_info: FieldInfo, metadata_type: Type[T]) -> Optional[T]: + metadata = find_metadata(field_info, FieldMetadata) + if not metadata: + return None + + fields = metadata.__dict__ + + for field in fields: + if isinstance(fields[field], metadata_type): + return fields[field] + + return None + + +def find_metadata(field_info: FieldInfo, metadata_type: Type[T]) -> Optional[T]: + metadata = field_info.metadata + if not metadata: + return None + + for md in metadata: + if isinstance(md, metadata_type): + return md + + return None diff --git a/src/mistralai/utils/queryparams.py b/src/mistralai/utils/queryparams.py new file mode 100644 index 00000000..1c8c5834 --- /dev/null +++ b/src/mistralai/utils/queryparams.py @@ -0,0 +1,203 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from typing import ( + Any, + Dict, + get_type_hints, + List, + Optional, +) + +from pydantic import BaseModel +from pydantic.fields import FieldInfo + +from .metadata import ( + QueryParamMetadata, + find_field_metadata, +) +from .values import _get_serialized_params, _populate_from_globals, _val_to_string +from .forms import _populate_form + + +def get_query_params( + query_params: Any, + gbls: Optional[Any] = None, +) -> Dict[str, List[str]]: + params: Dict[str, List[str]] = {} + + globals_already_populated = _populate_query_params(query_params, gbls, params, []) + if gbls is not None: + _populate_query_params(gbls, None, params, globals_already_populated) + + return params + + +def _populate_query_params( + query_params: Any, + gbls: Any, + query_param_values: Dict[str, List[str]], + skip_fields: List[str], +) -> List[str]: + globals_already_populated: List[str] = [] + + if not isinstance(query_params, BaseModel): + return globals_already_populated + + param_fields: Dict[str, FieldInfo] = query_params.__class__.model_fields + param_field_types = get_type_hints(query_params.__class__) + for name in param_fields: + if name in skip_fields: + continue + + field = param_fields[name] + + metadata = find_field_metadata(field, QueryParamMetadata) + if not metadata: + continue + + value = getattr(query_params, name) if query_params is not None else None + + value, global_found = _populate_from_globals( + name, value, QueryParamMetadata, gbls + ) + if global_found: + globals_already_populated.append(name) + + f_name = field.alias if field.alias is not None else name + serialization = metadata.serialization + if serialization is not None: + serialized_parms = _get_serialized_params( + metadata, f_name, value, param_field_types[name] + ) + for key, value in serialized_parms.items(): + if key in query_param_values: + query_param_values[key].extend(value) + else: + query_param_values[key] = [value] + else: + style = metadata.style + if style == "deepObject": + _populate_deep_object_query_params(f_name, value, query_param_values) + elif style == "form": + _populate_delimited_query_params( + metadata, f_name, value, ",", query_param_values + ) + elif style == "pipeDelimited": + _populate_delimited_query_params( + metadata, f_name, value, "|", query_param_values + ) + else: + raise NotImplementedError( + f"query param style {style} not yet supported" + ) + + return globals_already_populated + + +def _populate_deep_object_query_params( + field_name: str, + obj: Any, + params: Dict[str, List[str]], +): + if obj is None: + return + + if isinstance(obj, BaseModel): + _populate_deep_object_query_params_basemodel(field_name, obj, params) + elif isinstance(obj, Dict): + _populate_deep_object_query_params_dict(field_name, obj, params) + + +def _populate_deep_object_query_params_basemodel( + prior_params_key: str, + obj: Any, + params: Dict[str, List[str]], +): + if obj is None: + return + + if not isinstance(obj, BaseModel): + return + + obj_fields: Dict[str, FieldInfo] = obj.__class__.model_fields + for name in obj_fields: + obj_field = obj_fields[name] + + f_name = obj_field.alias if obj_field.alias is not None else name + + params_key = f"{prior_params_key}[{f_name}]" + + obj_param_metadata = find_field_metadata(obj_field, QueryParamMetadata) + if obj_param_metadata is None: + continue + + obj_val = getattr(obj, name) + if obj_val is None: + continue + + if isinstance(obj_val, BaseModel): + _populate_deep_object_query_params_basemodel(params_key, obj_val, params) + elif isinstance(obj_val, Dict): + _populate_deep_object_query_params_dict(params_key, obj_val, params) + elif isinstance(obj_val, List): + _populate_deep_object_query_params_list(params_key, obj_val, params) + else: + params[params_key] = [_val_to_string(obj_val)] + + +def _populate_deep_object_query_params_dict( + prior_params_key: str, + value: Dict, + params: Dict[str, List[str]], +): + if value is None: + return + + for key, val in value.items(): + if val is None: + continue + + params_key = f"{prior_params_key}[{key}]" + + if isinstance(val, BaseModel): + _populate_deep_object_query_params_basemodel(params_key, val, params) + elif isinstance(val, Dict): + _populate_deep_object_query_params_dict(params_key, val, params) + elif isinstance(val, List): + _populate_deep_object_query_params_list(params_key, val, params) + else: + params[params_key] = [_val_to_string(val)] + + +def _populate_deep_object_query_params_list( + params_key: str, + value: List, + params: Dict[str, List[str]], +): + if value is None: + return + + for val in value: + if val is None: + continue + + if params.get(params_key) is None: + params[params_key] = [] + + params[params_key].append(_val_to_string(val)) + + +def _populate_delimited_query_params( + metadata: QueryParamMetadata, + field_name: str, + obj: Any, + delimiter: str, + query_param_values: Dict[str, List[str]], +): + _populate_form( + field_name, + metadata.explode, + obj, + delimiter, + query_param_values, + ) diff --git a/src/mistralai/utils/requestbodies.py b/src/mistralai/utils/requestbodies.py new file mode 100644 index 00000000..4f586ae7 --- /dev/null +++ b/src/mistralai/utils/requestbodies.py @@ -0,0 +1,66 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +import io +from dataclasses import dataclass +import re +from typing import ( + Any, + Optional, +) + +from .forms import serialize_form_data, serialize_multipart_form + +from .serializers import marshal_json + +SERIALIZATION_METHOD_TO_CONTENT_TYPE = { + "json": "application/json", + "form": "application/x-www-form-urlencoded", + "multipart": "multipart/form-data", + "raw": "application/octet-stream", + "string": "text/plain", +} + + +@dataclass +class SerializedRequestBody: + media_type: str + content: Optional[Any] = None + data: Optional[Any] = None + files: Optional[Any] = None + + +def serialize_request_body( + request_body: Any, + nullable: bool, + optional: bool, + serialization_method: str, + request_body_type, +) -> Optional[SerializedRequestBody]: + if request_body is None: + if not nullable and optional: + return None + + media_type = SERIALIZATION_METHOD_TO_CONTENT_TYPE[serialization_method] + + serialized_request_body = SerializedRequestBody(media_type) + + if re.match(r"(application|text)\/.*?\+*json.*", media_type) is not None: + serialized_request_body.content = marshal_json(request_body, request_body_type) + elif re.match(r"multipart\/.*", media_type) is not None: + ( + serialized_request_body.media_type, + serialized_request_body.data, + serialized_request_body.files, + ) = serialize_multipart_form(media_type, request_body) + elif re.match(r"application\/x-www-form-urlencoded.*", media_type) is not None: + serialized_request_body.data = serialize_form_data(request_body) + elif isinstance(request_body, (bytes, bytearray, io.BytesIO, io.BufferedReader)): + serialized_request_body.content = request_body + elif isinstance(request_body, str): + serialized_request_body.content = request_body + else: + raise TypeError( + f"invalid request body type {type(request_body)} for mediaType {media_type}" + ) + + return serialized_request_body diff --git a/src/mistralai/utils/retries.py b/src/mistralai/utils/retries.py new file mode 100644 index 00000000..a06f9279 --- /dev/null +++ b/src/mistralai/utils/retries.py @@ -0,0 +1,216 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +import random +import time +from typing import List + +import httpx + + +class BackoffStrategy: + initial_interval: int + max_interval: int + exponent: float + max_elapsed_time: int + + def __init__( + self, + initial_interval: int, + max_interval: int, + exponent: float, + max_elapsed_time: int, + ): + self.initial_interval = initial_interval + self.max_interval = max_interval + self.exponent = exponent + self.max_elapsed_time = max_elapsed_time + + +class RetryConfig: + strategy: str + backoff: BackoffStrategy + retry_connection_errors: bool + + def __init__( + self, strategy: str, backoff: BackoffStrategy, retry_connection_errors: bool + ): + self.strategy = strategy + self.backoff = backoff + self.retry_connection_errors = retry_connection_errors + + +class Retries: + config: RetryConfig + status_codes: List[str] + + def __init__(self, config: RetryConfig, status_codes: List[str]): + self.config = config + self.status_codes = status_codes + + +class TemporaryError(Exception): + response: httpx.Response + + def __init__(self, response: httpx.Response): + self.response = response + + +class PermanentError(Exception): + inner: Exception + + def __init__(self, inner: Exception): + self.inner = inner + + +def retry(func, retries: Retries): + if retries.config.strategy == "backoff": + + def do_request() -> httpx.Response: + res: httpx.Response + try: + res = func() + + for code in retries.status_codes: + if "X" in code.upper(): + code_range = int(code[0]) + + status_major = res.status_code / 100 + + if code_range <= status_major < code_range + 1: + raise TemporaryError(res) + else: + parsed_code = int(code) + + if res.status_code == parsed_code: + raise TemporaryError(res) + except httpx.ConnectError as exception: + if retries.config.retry_connection_errors: + raise + + raise PermanentError(exception) from exception + except httpx.TimeoutException as exception: + if retries.config.retry_connection_errors: + raise + + raise PermanentError(exception) from exception + except TemporaryError: + raise + except Exception as exception: + raise PermanentError(exception) from exception + + return res + + return retry_with_backoff( + do_request, + retries.config.backoff.initial_interval, + retries.config.backoff.max_interval, + retries.config.backoff.exponent, + retries.config.backoff.max_elapsed_time, + ) + + return func() + + +async def retry_async(func, retries: Retries): + if retries.config.strategy == "backoff": + + async def do_request() -> httpx.Response: + res: httpx.Response + try: + res = await func() + + for code in retries.status_codes: + if "X" in code.upper(): + code_range = int(code[0]) + + status_major = res.status_code / 100 + + if code_range <= status_major < code_range + 1: + raise TemporaryError(res) + else: + parsed_code = int(code) + + if res.status_code == parsed_code: + raise TemporaryError(res) + except httpx.ConnectError as exception: + if retries.config.retry_connection_errors: + raise + + raise PermanentError(exception) from exception + except httpx.TimeoutException as exception: + if retries.config.retry_connection_errors: + raise + + raise PermanentError(exception) from exception + except TemporaryError: + raise + except Exception as exception: + raise PermanentError(exception) from exception + + return res + + return await retry_with_backoff_async( + do_request, + retries.config.backoff.initial_interval, + retries.config.backoff.max_interval, + retries.config.backoff.exponent, + retries.config.backoff.max_elapsed_time, + ) + + return await func() + + +def retry_with_backoff( + func, + initial_interval=500, + max_interval=60000, + exponent=1.5, + max_elapsed_time=3600000, +): + start = round(time.time() * 1000) + retries = 0 + + while True: + try: + return func() + except PermanentError as exception: + raise exception.inner + except Exception as exception: # pylint: disable=broad-exception-caught + now = round(time.time() * 1000) + if now - start > max_elapsed_time: + if isinstance(exception, TemporaryError): + return exception.response + + raise + sleep = (initial_interval / 1000) * exponent**retries + random.uniform(0, 1) + sleep = min(sleep, max_interval / 1000) + time.sleep(sleep) + retries += 1 + + +async def retry_with_backoff_async( + func, + initial_interval=500, + max_interval=60000, + exponent=1.5, + max_elapsed_time=3600000, +): + start = round(time.time() * 1000) + retries = 0 + + while True: + try: + return await func() + except PermanentError as exception: + raise exception.inner + except Exception as exception: # pylint: disable=broad-exception-caught + now = round(time.time() * 1000) + if now - start > max_elapsed_time: + if isinstance(exception, TemporaryError): + return exception.response + + raise + sleep = (initial_interval / 1000) * exponent**retries + random.uniform(0, 1) + sleep = min(sleep, max_interval / 1000) + time.sleep(sleep) + retries += 1 diff --git a/src/mistralai/utils/security.py b/src/mistralai/utils/security.py new file mode 100644 index 00000000..4c511d94 --- /dev/null +++ b/src/mistralai/utils/security.py @@ -0,0 +1,185 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +import base64 + +from typing import ( + Any, + Dict, + List, + Optional, + Tuple, +) +from pydantic import BaseModel +from pydantic.fields import FieldInfo + +from .metadata import ( + SecurityMetadata, + find_field_metadata, +) +import os + + +def get_security(security: Any) -> Tuple[Dict[str, str], Dict[str, List[str]]]: + headers: Dict[str, str] = {} + query_params: Dict[str, List[str]] = {} + + if security is None: + return headers, query_params + + if not isinstance(security, BaseModel): + raise TypeError("security must be a pydantic model") + + sec_fields: Dict[str, FieldInfo] = security.__class__.model_fields + for name in sec_fields: + sec_field = sec_fields[name] + + value = getattr(security, name) + if value is None: + continue + + metadata = find_field_metadata(sec_field, SecurityMetadata) + if metadata is None: + continue + if metadata.option: + _parse_security_option(headers, query_params, value) + return headers, query_params + if metadata.scheme: + # Special case for basic auth which could be a flattened model + if metadata.sub_type == "basic" and not isinstance(value, BaseModel): + _parse_security_scheme(headers, query_params, metadata, name, security) + else: + _parse_security_scheme(headers, query_params, metadata, name, value) + + return headers, query_params + + +def get_security_from_env(security: Any, security_class: Any) -> Optional[BaseModel]: + if security is not None: + return security + + if not issubclass(security_class, BaseModel): + raise TypeError("security_class must be a pydantic model class") + + security_dict: Any = {} + + if os.getenv("MISTRAL_API_KEY"): + security_dict["api_key"] = os.getenv("MISTRAL_API_KEY") + + return security_class(**security_dict) if security_dict else None + + +def _parse_security_option( + headers: Dict[str, str], query_params: Dict[str, List[str]], option: Any +): + if not isinstance(option, BaseModel): + raise TypeError("security option must be a pydantic model") + + opt_fields: Dict[str, FieldInfo] = option.__class__.model_fields + for name in opt_fields: + opt_field = opt_fields[name] + + metadata = find_field_metadata(opt_field, SecurityMetadata) + if metadata is None or not metadata.scheme: + continue + _parse_security_scheme( + headers, query_params, metadata, name, getattr(option, name) + ) + + +def _parse_security_scheme( + headers: Dict[str, str], + query_params: Dict[str, List[str]], + scheme_metadata: SecurityMetadata, + field_name: str, + scheme: Any, +): + scheme_type = scheme_metadata.scheme_type + sub_type = scheme_metadata.sub_type + + if isinstance(scheme, BaseModel): + if scheme_type == "http" and sub_type == "basic": + _parse_basic_auth_scheme(headers, scheme) + return + + scheme_fields: Dict[str, FieldInfo] = scheme.__class__.model_fields + for name in scheme_fields: + scheme_field = scheme_fields[name] + + metadata = find_field_metadata(scheme_field, SecurityMetadata) + if metadata is None or metadata.field_name is None: + continue + + value = getattr(scheme, name) + + _parse_security_scheme_value( + headers, query_params, scheme_metadata, metadata, name, value + ) + else: + _parse_security_scheme_value( + headers, query_params, scheme_metadata, scheme_metadata, field_name, scheme + ) + + +def _parse_security_scheme_value( + headers: Dict[str, str], + query_params: Dict[str, List[str]], + scheme_metadata: SecurityMetadata, + security_metadata: SecurityMetadata, + field_name: str, + value: Any, +): + scheme_type = scheme_metadata.scheme_type + sub_type = scheme_metadata.sub_type + + header_name = security_metadata.get_field_name(field_name) + + if scheme_type == "apiKey": + if sub_type == "header": + headers[header_name] = value + elif sub_type == "query": + query_params[header_name] = [value] + else: + raise ValueError("sub type {sub_type} not supported") + elif scheme_type == "openIdConnect": + headers[header_name] = _apply_bearer(value) + elif scheme_type == "oauth2": + if sub_type != "client_credentials": + headers[header_name] = _apply_bearer(value) + elif scheme_type == "http": + if sub_type == "bearer": + headers[header_name] = _apply_bearer(value) + else: + raise ValueError("sub type {sub_type} not supported") + else: + raise ValueError("scheme type {scheme_type} not supported") + + +def _apply_bearer(token: str) -> str: + return token.lower().startswith("bearer ") and token or f"Bearer {token}" + + +def _parse_basic_auth_scheme(headers: Dict[str, str], scheme: Any): + username = "" + password = "" + + if not isinstance(scheme, BaseModel): + raise TypeError("basic auth scheme must be a pydantic model") + + scheme_fields: Dict[str, FieldInfo] = scheme.__class__.model_fields + for name in scheme_fields: + scheme_field = scheme_fields[name] + + metadata = find_field_metadata(scheme_field, SecurityMetadata) + if metadata is None or metadata.field_name is None: + continue + + field_name = metadata.field_name + value = getattr(scheme, name) + + if field_name == "username": + username = value + if field_name == "password": + password = value + + data = f"{username}:{password}".encode() + headers["Authorization"] = f"Basic {base64.b64encode(data).decode()}" diff --git a/src/mistralai/utils/serializers.py b/src/mistralai/utils/serializers.py new file mode 100644 index 00000000..a98998a3 --- /dev/null +++ b/src/mistralai/utils/serializers.py @@ -0,0 +1,181 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from decimal import Decimal +import json +from typing import Any, Dict, List, Union, get_args +import httpx +from typing_extensions import get_origin +from pydantic import ConfigDict, create_model +from pydantic_core import from_json +from typing_inspect import is_optional_type + +from ..types.basemodel import BaseModel, Nullable, OptionalNullable + + +def serialize_decimal(as_str: bool): + def serialize(d): + if is_optional_type(type(d)) and d is None: + return None + + if not isinstance(d, Decimal): + raise ValueError("Expected Decimal object") + + return str(d) if as_str else float(d) + + return serialize + + +def validate_decimal(d): + if d is None: + return None + + if isinstance(d, Decimal): + return d + + if not isinstance(d, (str, int, float)): + raise ValueError("Expected string, int or float") + + return Decimal(str(d)) + + +def serialize_float(as_str: bool): + def serialize(f): + if is_optional_type(type(f)) and f is None: + return None + + if not isinstance(f, float): + raise ValueError("Expected float") + + return str(f) if as_str else f + + return serialize + + +def validate_float(f): + if f is None: + return None + + if isinstance(f, float): + return f + + if not isinstance(f, str): + raise ValueError("Expected string") + + return float(f) + + +def serialize_int(as_str: bool): + def serialize(b): + if is_optional_type(type(b)) and b is None: + return None + + if not isinstance(b, int): + raise ValueError("Expected int") + + return str(b) if as_str else b + + return serialize + + +def validate_int(b): + if b is None: + return None + + if isinstance(b, int): + return b + + if not isinstance(b, str): + raise ValueError("Expected string") + + return int(b) + + +def validate_open_enum(is_int: bool): + def validate(e): + if e is None: + return None + + if is_int: + if not isinstance(e, int): + raise ValueError("Expected int") + else: + if not isinstance(e, str): + raise ValueError("Expected string") + + return e + + return validate + + +def unmarshal_json(raw, typ: Any) -> Any: + return unmarshal(from_json(raw), typ) + + +def unmarshal(val, typ: Any) -> Any: + unmarshaller = create_model( + "Unmarshaller", + body=(typ, ...), + __config__=ConfigDict(populate_by_name=True, arbitrary_types_allowed=True), + ) + + m = unmarshaller(body=val) + + # pyright: ignore[reportAttributeAccessIssue] + return m.body # type: ignore + + +def marshal_json(val, typ): + if is_nullable(typ) and val is None: + return "null" + + marshaller = create_model( + "Marshaller", + body=(typ, ...), + __config__=ConfigDict(populate_by_name=True, arbitrary_types_allowed=True), + ) + + m = marshaller(body=val) + + d = m.model_dump(by_alias=True, mode="json", exclude_none=True) + + if len(d) == 0: + return "" + + return json.dumps(d[next(iter(d))], separators=(",", ":"), sort_keys=True) + + +def is_nullable(field): + origin = get_origin(field) + if origin is Nullable or origin is OptionalNullable: + return True + + if not origin is Union or type(None) not in get_args(field): + return False + + for arg in get_args(field): + if get_origin(arg) is Nullable or get_origin(arg) is OptionalNullable: + return True + + return False + + +def stream_to_text(stream: httpx.Response) -> str: + return "".join(stream.iter_text()) + + +def get_pydantic_model(data: Any, typ: Any) -> Any: + if not _contains_pydantic_model(data): + return unmarshal(data, typ) + + return data + + +def _contains_pydantic_model(data: Any) -> bool: + if isinstance(data, BaseModel): + return True + if isinstance(data, List): + return any(_contains_pydantic_model(item) for item in data) + if isinstance(data, Dict): + return any(_contains_pydantic_model(value) for value in data.values()) + + return False diff --git a/src/mistralai/utils/url.py b/src/mistralai/utils/url.py new file mode 100644 index 00000000..b201bfa4 --- /dev/null +++ b/src/mistralai/utils/url.py @@ -0,0 +1,150 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from decimal import Decimal +from typing import ( + Any, + Dict, + get_type_hints, + List, + Optional, + Union, + get_args, + get_origin, +) +from pydantic import BaseModel +from pydantic.fields import FieldInfo + +from .metadata import ( + PathParamMetadata, + find_field_metadata, +) +from .values import _get_serialized_params, _populate_from_globals, _val_to_string + + +def generate_url( + server_url: str, + path: str, + path_params: Any, + gbls: Optional[Any] = None, +) -> str: + path_param_values: Dict[str, str] = {} + + globals_already_populated = _populate_path_params( + path_params, gbls, path_param_values, [] + ) + if gbls is not None: + _populate_path_params(gbls, None, path_param_values, globals_already_populated) + + for key, value in path_param_values.items(): + path = path.replace("{" + key + "}", value, 1) + + return remove_suffix(server_url, "/") + path + + +def _populate_path_params( + path_params: Any, + gbls: Any, + path_param_values: Dict[str, str], + skip_fields: List[str], +) -> List[str]: + globals_already_populated: List[str] = [] + + if not isinstance(path_params, BaseModel): + return globals_already_populated + + path_param_fields: Dict[str, FieldInfo] = path_params.__class__.model_fields + path_param_field_types = get_type_hints(path_params.__class__) + for name in path_param_fields: + if name in skip_fields: + continue + + field = path_param_fields[name] + + param_metadata = find_field_metadata(field, PathParamMetadata) + if param_metadata is None: + continue + + param = getattr(path_params, name) if path_params is not None else None + param, global_found = _populate_from_globals( + name, param, PathParamMetadata, gbls + ) + if global_found: + globals_already_populated.append(name) + + if param is None: + continue + + f_name = field.alias if field.alias is not None else name + serialization = param_metadata.serialization + if serialization is not None: + serialized_params = _get_serialized_params( + param_metadata, f_name, param, path_param_field_types[name] + ) + for key, value in serialized_params.items(): + path_param_values[key] = value + else: + pp_vals: List[str] = [] + if param_metadata.style == "simple": + if isinstance(param, List): + for pp_val in param: + if pp_val is None: + continue + pp_vals.append(_val_to_string(pp_val)) + path_param_values[f_name] = ",".join(pp_vals) + elif isinstance(param, Dict): + for pp_key in param: + if param[pp_key] is None: + continue + if param_metadata.explode: + pp_vals.append(f"{pp_key}={_val_to_string(param[pp_key])}") + else: + pp_vals.append(f"{pp_key},{_val_to_string(param[pp_key])}") + path_param_values[f_name] = ",".join(pp_vals) + elif not isinstance(param, (str, int, float, complex, bool, Decimal)): + param_fields: Dict[str, FieldInfo] = param.__class__.model_fields + for name in param_fields: + param_field = param_fields[name] + + param_value_metadata = find_field_metadata( + param_field, PathParamMetadata + ) + if param_value_metadata is None: + continue + + param_name = ( + param_field.alias if param_field.alias is not None else name + ) + + param_field_val = getattr(param, name) + if param_field_val is None: + continue + if param_metadata.explode: + pp_vals.append( + f"{param_name}={_val_to_string(param_field_val)}" + ) + else: + pp_vals.append( + f"{param_name},{_val_to_string(param_field_val)}" + ) + path_param_values[f_name] = ",".join(pp_vals) + else: + path_param_values[f_name] = _val_to_string(param) + + return globals_already_populated + + +def is_optional(field): + return get_origin(field) is Union and type(None) in get_args(field) + + +def template_url(url_with_params: str, params: Dict[str, str]) -> str: + for key, value in params.items(): + url_with_params = url_with_params.replace("{" + key + "}", value) + + return url_with_params + + +def remove_suffix(input_string, suffix): + if suffix and input_string.endswith(suffix): + return input_string[: -len(suffix)] + return input_string diff --git a/src/mistralai/utils/values.py b/src/mistralai/utils/values.py new file mode 100644 index 00000000..24ccae3d --- /dev/null +++ b/src/mistralai/utils/values.py @@ -0,0 +1,128 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from datetime import datetime +from enum import Enum +from email.message import Message +import os +from typing import Any, Callable, Dict, List, Optional, Tuple, TypeVar, Union + +from httpx import Response +from pydantic import BaseModel +from pydantic.fields import FieldInfo + +from .serializers import marshal_json + +from .metadata import ParamMetadata, find_field_metadata + + +def match_content_type(content_type: str, pattern: str) -> bool: + if pattern in (content_type, "*", "*/*"): + return True + + msg = Message() + msg["content-type"] = content_type + media_type = msg.get_content_type() + + if media_type == pattern: + return True + + parts = media_type.split("/") + if len(parts) == 2: + if pattern in (f"{parts[0]}/*", f"*/{parts[1]}"): + return True + + return False + + +def match_status_codes(status_codes: List[str], status_code: int) -> bool: + if "default" in status_codes: + return True + + for code in status_codes: + if code == str(status_code): + return True + + if code.endswith("XX") and code.startswith(str(status_code)[:1]): + return True + return False + + +T = TypeVar("T") + + +def get_global_from_env( + value: Optional[T], env_key: str, type_cast: Callable[[str], T] +) -> Optional[T]: + if value is not None: + return value + env_value = os.getenv(env_key) + if env_value is not None: + try: + return type_cast(env_value) + except ValueError: + pass + return None + + +def match_response( + response: Response, code: Union[str, List[str]], content_type: str +) -> bool: + codes = code if isinstance(code, list) else [code] + return match_status_codes(codes, response.status_code) and match_content_type( + response.headers.get("content-type", "application/octet-stream"), content_type + ) + + +def _populate_from_globals( + param_name: str, value: Any, param_metadata_type: type, gbls: Any +) -> Tuple[Any, bool]: + if gbls is None: + return value, False + + if not isinstance(gbls, BaseModel): + raise TypeError("globals must be a pydantic model") + + global_fields: Dict[str, FieldInfo] = gbls.__class__.model_fields + found = False + for name in global_fields: + field = global_fields[name] + if name is not param_name: + continue + + found = True + + if value is not None: + return value, True + + global_value = getattr(gbls, name) + + param_metadata = find_field_metadata(field, param_metadata_type) + if param_metadata is None: + return value, True + + return global_value, True + + return value, found + + +def _val_to_string(val) -> str: + if isinstance(val, bool): + return str(val).lower() + if isinstance(val, datetime): + return str(val.isoformat().replace("+00:00", "Z")) + if isinstance(val, Enum): + return str(val.value) + + return str(val) + + +def _get_serialized_params( + metadata: ParamMetadata, field_name: str, obj: Any, typ: type +) -> Dict[str, str]: + params: Dict[str, str] = {} + + serialization = metadata.serialization + if serialization == "json": + params[field_name] = marshal_json(obj, typ) + + return params diff --git a/tests/__init__.py b/tests/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/tests/conftest.py b/tests/conftest.py deleted file mode 100644 index c43f7aa2..00000000 --- a/tests/conftest.py +++ /dev/null @@ -1,19 +0,0 @@ -from unittest import mock - -import pytest -from mistralai.async_client import MistralAsyncClient -from mistralai.client import MistralClient - - -@pytest.fixture() -def client(): - client = MistralClient(api_key="test_api_key") - client._client = mock.MagicMock() - return client - - -@pytest.fixture() -def async_client(): - client = MistralAsyncClient(api_key="test_api_key") - client._client = mock.AsyncMock() - return client diff --git a/tests/test_chat.py b/tests/test_chat.py deleted file mode 100644 index 15a40651..00000000 --- a/tests/test_chat.py +++ /dev/null @@ -1,149 +0,0 @@ -import io -import logging - -import pytest -from mistralai.constants import HEADER_MODEL_DEPRECATION_TIMESTAMP -from mistralai.models.chat_completion import ( - ChatCompletionResponse, - ChatCompletionStreamResponse, - ChatMessage, -) - -from .utils import ( - mock_chat_response_payload, - mock_chat_response_streaming_payload, - mock_response, - mock_stream_response, -) - - -class TestChat: - @pytest.mark.parametrize("target_deprecated_model", [True, False], ids=["deprecated", "not_deprecated"]) - def test_chat(self, client, target_deprecated_model): - headers = ( - { - HEADER_MODEL_DEPRECATION_TIMESTAMP: "2023-12-01T00:00:00", - } - if target_deprecated_model - else {} - ) - - client._client.request.return_value = mock_response(200, mock_chat_response_payload(), headers) - - # Create a stream to capture the log output - log_stream = io.StringIO() - - # Create a logger and add a handler that writes to the stream - logger = client._logger - handler = logging.StreamHandler(log_stream) - logger.addHandler(handler) - - result = client.chat( - model="mistral-small-latest", - messages=[ChatMessage(role="user", content="What is the best French cheese?")], - ) - - client._client.request.assert_called_once_with( - "post", - "https://api.mistral.ai/v1/chat/completions", - headers={ - "User-Agent": f"mistral-client-python/{client._version}", - "Accept": "application/json", - "Authorization": "Bearer test_api_key", - "Content-Type": "application/json", - }, - json={ - "model": "mistral-small-latest", - "messages": [{"role": "user", "content": "What is the best French cheese?"}], - "stream": False, - }, - data=None, - ) - - assert isinstance(result, ChatCompletionResponse), "Should return an ChatCompletionResponse" - assert len(result.choices) == 1 - assert result.choices[0].index == 0 - assert result.object == "chat.completion" - - # Check if the log message was produced when the model is deprecated - log_output = log_stream.getvalue() - excepted_log = ( - ( - "WARNING: The model mistral-small-latest is deprecated " - "and will be removed on 2023-12-01T00:00:00. " - "Please refer to https://docs.mistral.ai/getting-started/models/#api-versioning for more information.\n" - ) - if target_deprecated_model - else "" - ) - assert excepted_log == log_output - - @pytest.mark.parametrize("target_deprecated_model", [True, False], ids=["deprecated", "not_deprecated"]) - def test_chat_streaming(self, client, target_deprecated_model): - headers = ( - { - HEADER_MODEL_DEPRECATION_TIMESTAMP: "2023-12-01T00:00:00", - } - if target_deprecated_model - else {} - ) - - client._client.stream.return_value = mock_stream_response(200, mock_chat_response_streaming_payload(), headers) - - # Create a stream to capture the log output - log_stream = io.StringIO() - - # Create a logger and add a handler that writes to the stream - logger = client._logger - handler = logging.StreamHandler(log_stream) - logger.addHandler(handler) - - result = client.chat_stream( - model="mistral-small-latest", - messages=[ChatMessage(role="user", content="What is the best French cheese?")], - ) - - results = list(result) - - client._client.stream.assert_called_once_with( - "post", - "https://api.mistral.ai/v1/chat/completions", - headers={ - "User-Agent": f"mistral-client-python/{client._version}", - "Accept": "text/event-stream", - "Authorization": "Bearer test_api_key", - "Content-Type": "application/json", - }, - json={ - "model": "mistral-small-latest", - "messages": [{"role": "user", "content": "What is the best French cheese?"}], - "stream": True, - }, - data=None, - ) - - for i, result in enumerate(results): - if i == 0: - assert isinstance(result, ChatCompletionStreamResponse), "Should return an ChatCompletionStreamResponse" - assert len(result.choices) == 1 - assert result.choices[0].index == 0 - assert result.choices[0].delta.role == "assistant" - else: - assert isinstance(result, ChatCompletionStreamResponse), "Should return an ChatCompletionStreamResponse" - assert len(result.choices) == 1 - assert result.choices[0].index == i - 1 - assert result.choices[0].delta.content == f"stream response {i-1}" - assert result.object == "chat.completion.chunk" - - # Check if the log message was produced - log_output = log_stream.getvalue() - excepted_log = ( - ( - "WARNING: The model mistral-small-latest is deprecated " - "and will be removed on 2023-12-01T00:00:00. " - "Please refer to https://docs.mistral.ai/getting-started/models/#api-versioning for more information.\n" - ) - if target_deprecated_model - else "" - ) - assert excepted_log == log_output diff --git a/tests/test_chat_async.py b/tests/test_chat_async.py deleted file mode 100644 index c16a9a80..00000000 --- a/tests/test_chat_async.py +++ /dev/null @@ -1,157 +0,0 @@ -import io -import logging -import unittest.mock as mock - -import pytest -from mistralai.constants import ( - HEADER_MODEL_DEPRECATION_TIMESTAMP, -) -from mistralai.models.chat_completion import ( - ChatCompletionResponse, - ChatCompletionStreamResponse, - ChatMessage, -) - -from .utils import ( - mock_async_stream_response, - mock_chat_response_payload, - mock_chat_response_streaming_payload, - mock_response, -) - - -class TestAsyncChat: - @pytest.mark.asyncio - @pytest.mark.parametrize("target_deprecated_model", [True, False], ids=["deprecated", "not_deprecated"]) - async def test_chat(self, async_client, target_deprecated_model): - headers = ( - { - HEADER_MODEL_DEPRECATION_TIMESTAMP: "2023-12-01T00:00:00", - } - if target_deprecated_model - else {} - ) - - async_client._client.request.return_value = mock_response(200, mock_chat_response_payload(), headers) - - # Create a stream to capture the log output - log_stream = io.StringIO() - - # Create a logger and add a handler that writes to the stream - logger = async_client._logger - handler = logging.StreamHandler(log_stream) - logger.addHandler(handler) - - result = await async_client.chat( - model="mistral-small-latest", - messages=[ChatMessage(role="user", content="What is the best French cheese?")], - ) - - async_client._client.request.assert_awaited_once_with( - "post", - "https://api.mistral.ai/v1/chat/completions", - headers={ - "User-Agent": f"mistral-client-python/{async_client._version}", - "Accept": "application/json", - "Authorization": "Bearer test_api_key", - "Content-Type": "application/json", - }, - json={ - "model": "mistral-small-latest", - "messages": [{"role": "user", "content": "What is the best French cheese?"}], - "stream": False, - }, - data=None, - ) - - assert isinstance(result, ChatCompletionResponse), "Should return an ChatCompletionResponse" - assert len(result.choices) == 1 - assert result.choices[0].index == 0 - assert result.object == "chat.completion" - - # Check if the log message was produced when the model is deprecated - log_output = log_stream.getvalue() - excepted_log = ( - ( - "WARNING: The model mistral-small-latest is deprecated " - "and will be removed on 2023-12-01T00:00:00. " - "Please refer to https://docs.mistral.ai/getting-started/models/#api-versioning for more information.\n" - ) - if target_deprecated_model - else "" - ) - assert excepted_log == log_output - - @pytest.mark.asyncio - @pytest.mark.parametrize("target_deprecated_model", [True, False], ids=["deprecated", "not_deprecated"]) - async def test_chat_streaming(self, async_client, target_deprecated_model): - headers = ( - { - HEADER_MODEL_DEPRECATION_TIMESTAMP: "2023-12-01T00:00:00", - } - if target_deprecated_model - else {} - ) - - async_client._client.stream = mock.Mock() - async_client._client.stream.return_value = mock_async_stream_response( - 200, mock_chat_response_streaming_payload(), headers - ) - - # Create a stream to capture the log output - log_stream = io.StringIO() - - # Create a logger and add a handler that writes to the stream - logger = async_client._logger - handler = logging.StreamHandler(log_stream) - logger.addHandler(handler) - - result = async_client.chat_stream( - model="mistral-small-latest", - messages=[ChatMessage(role="user", content="What is the best French cheese?")], - ) - - results = [r async for r in result] - - async_client._client.stream.assert_called_once_with( - "post", - "https://api.mistral.ai/v1/chat/completions", - headers={ - "Accept": "text/event-stream", - "User-Agent": f"mistral-client-python/{async_client._version}", - "Authorization": "Bearer test_api_key", - "Content-Type": "application/json", - }, - json={ - "model": "mistral-small-latest", - "messages": [{"role": "user", "content": "What is the best French cheese?"}], - "stream": True, - }, - data=None, - ) - - for i, result in enumerate(results): - if i == 0: - assert isinstance(result, ChatCompletionStreamResponse), "Should return an ChatCompletionStreamResponse" - assert len(result.choices) == 1 - assert result.choices[0].index == 0 - assert result.choices[0].delta.role == "assistant" - else: - assert isinstance(result, ChatCompletionStreamResponse), "Should return an ChatCompletionStreamResponse" - assert len(result.choices) == 1 - assert result.choices[0].index == i - 1 - assert result.choices[0].delta.content == f"stream response {i-1}" - assert result.object == "chat.completion.chunk" - - # Check if the log message was produced when the model is deprecated - log_output = log_stream.getvalue() - excepted_log = ( - ( - "WARNING: The model mistral-small-latest is deprecated " - "and will be removed on 2023-12-01T00:00:00. " - "Please refer to https://docs.mistral.ai/getting-started/models/#api-versioning for more information.\n" - ) - if target_deprecated_model - else "" - ) - assert excepted_log == log_output diff --git a/tests/test_completion.py b/tests/test_completion.py deleted file mode 100644 index a30cfcf6..00000000 --- a/tests/test_completion.py +++ /dev/null @@ -1,99 +0,0 @@ -from mistralai.models.chat_completion import ( - ChatCompletionResponse, - ChatCompletionStreamResponse, -) - -from .utils import ( - mock_completion_response_payload, - mock_response, - mock_stream_response, -) - - -class TestCompletion: - def test_completion(self, client): - client._client.request.return_value = mock_response( - 200, - mock_completion_response_payload(), - ) - - result = client.completion( - model="mistral-small-latest", - prompt="def add(a, b):", - suffix="return a + b", - temperature=0.5, - max_tokens=50, - top_p=0.9, - random_seed=42, - ) - - client._client.request.assert_called_once_with( - "post", - "https://api.mistral.ai/v1/fim/completions", - headers={ - "User-Agent": f"mistral-client-python/{client._version}", - "Accept": "application/json", - "Authorization": "Bearer test_api_key", - "Content-Type": "application/json", - }, - json={ - "model": "mistral-small-latest", - "prompt": "def add(a, b):", - "suffix": "return a + b", - "stream": False, - "temperature": 0.5, - "max_tokens": 50, - "top_p": 0.9, - "random_seed": 42, - }, - data=None, - ) - - assert isinstance(result, ChatCompletionResponse), "Should return an ChatCompletionResponse" - assert len(result.choices) == 1 - assert result.choices[0].index == 0 - assert result.object == "chat.completion" - - def test_completion_streaming(self, client): - client._client.stream.return_value = mock_stream_response( - 200, - mock_completion_response_payload(), - ) - - result = client.completion_stream( - model="mistral-small-latest", prompt="def add(a, b):", suffix="return a + b", stop=["#"] - ) - - results = list(result) - - client._client.stream.assert_called_once_with( - "post", - "https://api.mistral.ai/v1/fim/completions", - headers={ - "User-Agent": f"mistral-client-python/{client._version}", - "Accept": "text/event-stream", - "Authorization": "Bearer test_api_key", - "Content-Type": "application/json", - }, - json={ - "model": "mistral-small-latest", - "prompt": "def add(a, b):", - "suffix": "return a + b", - "stream": True, - "stop": ["#"], - }, - data=None, - ) - - for i, result in enumerate(results): - if i == 0: - assert isinstance(result, ChatCompletionStreamResponse), "Should return an ChatCompletionStreamResponse" - assert len(result.choices) == 1 - assert result.choices[0].index == 0 - assert result.choices[0].delta.role == "assistant" - else: - assert isinstance(result, ChatCompletionStreamResponse), "Should return an ChatCompletionStreamResponse" - assert len(result.choices) == 1 - assert result.choices[0].index == i - 1 - assert result.choices[0].delta.content == f"stream response {i - 1}" - assert result.object == "chat.completion.chunk" diff --git a/tests/test_delete_model.py b/tests/test_delete_model.py deleted file mode 100644 index d050c21a..00000000 --- a/tests/test_delete_model.py +++ /dev/null @@ -1,26 +0,0 @@ -from mistralai.models.models import ModelDeleted - -from .utils import mock_model_deleted_response_payload, mock_response - - -class TestDeleteModel: - def test_delete_model(self, client): - expected_response_model = ModelDeleted.model_validate_json(mock_model_deleted_response_payload()) - client._client.request.return_value = mock_response(200, expected_response_model.json()) - - response_model = client.delete_model("model_id") - - client._client.request.assert_called_once_with( - "delete", - "https://api.mistral.ai/v1/models/model_id", - headers={ - "User-Agent": f"mistral-client-python/{client._version}", - "Accept": "application/json", - "Content-Type": "application/json", - "Authorization": "Bearer test_api_key", - }, - json={}, - data=None, - ) - - assert response_model == expected_response_model diff --git a/tests/test_delete_model_async.py b/tests/test_delete_model_async.py deleted file mode 100644 index 9fa393e8..00000000 --- a/tests/test_delete_model_async.py +++ /dev/null @@ -1,28 +0,0 @@ -import pytest -from mistralai.models.models import ModelDeleted - -from .utils import mock_model_deleted_response_payload, mock_response - - -class TestAsyncDeleteModel: - @pytest.mark.asyncio - async def test_delete_model(self, async_client): - expected_response_model = ModelDeleted.model_validate_json(mock_model_deleted_response_payload()) - async_client._client.request.return_value = mock_response(200, expected_response_model.json()) - - response_model = await async_client.delete_model("model_id") - - async_client._client.request.assert_called_once_with( - "delete", - "https://api.mistral.ai/v1/models/model_id", - headers={ - "User-Agent": f"mistral-client-python/{async_client._version}", - "Accept": "application/json", - "Content-Type": "application/json", - "Authorization": "Bearer test_api_key", - }, - json={}, - data=None, - ) - - assert response_model == expected_response_model diff --git a/tests/test_embedder.py b/tests/test_embedder.py deleted file mode 100644 index 6d9a0df3..00000000 --- a/tests/test_embedder.py +++ /dev/null @@ -1,66 +0,0 @@ -from mistralai.models.embeddings import EmbeddingResponse - -from .utils import mock_embedding_response_payload, mock_response - - -class TestEmbeddings: - def test_embeddings(self, client): - client._client.request.return_value = mock_response( - 200, - mock_embedding_response_payload(), - ) - - result = client.embeddings( - model="mistral-embed", - input="What is the best French cheese?", - ) - - client._client.request.assert_called_once_with( - "post", - "https://api.mistral.ai/v1/embeddings", - headers={ - "User-Agent": f"mistral-client-python/{client._version}", - "Accept": "application/json", - "Authorization": "Bearer test_api_key", - "Content-Type": "application/json", - }, - json={"model": "mistral-embed", "input": "What is the best French cheese?"}, - data=None, - ) - - assert isinstance(result, EmbeddingResponse), "Should return an EmbeddingResponse" - assert len(result.data) == 1 - assert result.data[0].index == 0 - assert result.object == "list" - - def test_embeddings_batch(self, client): - client._client.request.return_value = mock_response( - 200, - mock_embedding_response_payload(batch_size=10), - ) - - result = client.embeddings( - model="mistral-embed", - input=["What is the best French cheese?"] * 10, - ) - - client._client.request.assert_called_once_with( - "post", - "https://api.mistral.ai/v1/embeddings", - headers={ - "User-Agent": f"mistral-client-python/{client._version}", - "Accept": "application/json", - "Authorization": "Bearer test_api_key", - "Content-Type": "application/json", - }, - json={ - "model": "mistral-embed", - "input": ["What is the best French cheese?"] * 10, - }, - data=None, - ) - - assert isinstance(result, EmbeddingResponse), "Should return an EmbeddingResponse" - assert len(result.data) == 10 - assert result.data[0].index == 0 - assert result.object == "list" diff --git a/tests/test_embedder_async.py b/tests/test_embedder_async.py deleted file mode 100644 index 3de16011..00000000 --- a/tests/test_embedder_async.py +++ /dev/null @@ -1,69 +0,0 @@ -import pytest -from mistralai.models.embeddings import EmbeddingResponse - -from .utils import mock_embedding_response_payload, mock_response - - -class TestAsyncEmbeddings: - @pytest.mark.asyncio - async def test_embeddings(self, async_client): - async_client._client.request.return_value = mock_response( - 200, - mock_embedding_response_payload(), - ) - - result = await async_client.embeddings( - model="mistral-embed", - input="What is the best French cheese?", - ) - - async_client._client.request.assert_awaited_once_with( - "post", - "https://api.mistral.ai/v1/embeddings", - headers={ - "User-Agent": f"mistral-client-python/{async_client._version}", - "Accept": "application/json", - "Authorization": "Bearer test_api_key", - "Content-Type": "application/json", - }, - json={"model": "mistral-embed", "input": "What is the best French cheese?"}, - data=None, - ) - - assert isinstance(result, EmbeddingResponse), "Should return an EmbeddingResponse" - assert len(result.data) == 1 - assert result.data[0].index == 0 - assert result.object == "list" - - @pytest.mark.asyncio - async def test_embeddings_batch(self, async_client): - async_client._client.request.return_value = mock_response( - 200, - mock_embedding_response_payload(batch_size=10), - ) - - result = await async_client.embeddings( - model="mistral-embed", - input=["What is the best French cheese?"] * 10, - ) - - async_client._client.request.assert_awaited_once_with( - "post", - "https://api.mistral.ai/v1/embeddings", - headers={ - "User-Agent": f"mistral-client-python/{async_client._version}", - "Accept": "application/json", - "Authorization": "Bearer test_api_key", - "Content-Type": "application/json", - }, - json={ - "model": "mistral-embed", - "input": ["What is the best French cheese?"] * 10, - }, - data=None, - ) - - assert isinstance(result, EmbeddingResponse), "Should return an EmbeddingResponse" - assert len(result.data) == 10 - assert result.data[0].index == 0 - assert result.object == "list" diff --git a/tests/test_files.py b/tests/test_files.py deleted file mode 100644 index e4ef9e4d..00000000 --- a/tests/test_files.py +++ /dev/null @@ -1,105 +0,0 @@ -import orjson -from mistralai.models.files import FileDeleted, FileObject - -from .utils import ( - mock_file_deleted_response_payload, - mock_file_response_payload, - mock_response, -) - - -class TestFilesClient: - def test_create_file(self, client): - expected_response_file = FileObject.model_validate_json(mock_file_response_payload()) - client._client.request.return_value = mock_response( - 200, - expected_response_file.json(), - ) - - response_file = client.files.create(b"file_content") - - client._client.request.assert_called_once_with( - "post", - "https://api.mistral.ai/v1/files", - headers={ - "User-Agent": f"mistral-client-python/{client._version}", - "Accept": "application/json", - "Authorization": "Bearer test_api_key", - }, - files={"file": b"file_content"}, - json=None, - data={"purpose": "fine-tune"}, - ) - assert response_file == expected_response_file - - def test_retrieve(self, client): - expected_response_file = FileObject.model_validate_json(mock_file_response_payload()) - client._client.request.return_value = mock_response( - 200, - expected_response_file.json(), - ) - - response_file = client.files.retrieve("file_id") - - client._client.request.assert_called_once_with( - "get", - "https://api.mistral.ai/v1/files/file_id", - headers={ - "User-Agent": f"mistral-client-python/{client._version}", - "Accept": "application/json", - "Content-Type": "application/json", - "Authorization": "Bearer test_api_key", - }, - json={}, - data=None, - ) - assert response_file == expected_response_file - - def test_list_files(self, client): - expected_response_file = FileObject.model_validate_json(mock_file_response_payload()) - client._client.request.return_value = mock_response( - 200, - orjson.dumps( - { - "data": [expected_response_file.model_dump()], - "object": "list", - } - ), - ) - - response_files = client.files.list() - response_file = response_files.data[0] - - client._client.request.assert_called_once_with( - "get", - "https://api.mistral.ai/v1/files", - headers={ - "User-Agent": f"mistral-client-python/{client._version}", - "Accept": "application/json", - "Content-Type": "application/json", - "Authorization": "Bearer test_api_key", - }, - json={}, - data=None, - ) - assert response_file == expected_response_file - - def test_delete_file(self, client): - expected_response_file = FileDeleted.model_validate_json(mock_file_deleted_response_payload()) - client._client.request.return_value = mock_response(200, expected_response_file.json()) - - response_file = client.files.delete("file_id") - - client._client.request.assert_called_once_with( - "delete", - "https://api.mistral.ai/v1/files/file_id", - headers={ - "User-Agent": f"mistral-client-python/{client._version}", - "Accept": "application/json", - "Content-Type": "application/json", - "Authorization": "Bearer test_api_key", - }, - json={}, - data=None, - ) - assert response_file == expected_response_file diff --git a/tests/test_files_async.py b/tests/test_files_async.py deleted file mode 100644 index 7248f407..00000000 --- a/tests/test_files_async.py +++ /dev/null @@ -1,110 +0,0 @@ -import orjson -import pytest -from mistralai.models.files import FileDeleted, FileObject - -from .utils import ( - mock_file_deleted_response_payload, - mock_file_response_payload, - mock_response, -) - - -class TestFilesAyncClient: - @pytest.mark.asyncio - async def test_create_file(self, async_client): - expected_response_file = FileObject.model_validate_json(mock_file_response_payload()) - async_client._client.request.return_value = mock_response( - 200, - expected_response_file.json(), - ) - - response_file = await async_client.files.create(b"file_content") - - async_client._client.request.assert_called_once_with( - "post", - "https://api.mistral.ai/v1/files", - headers={ - "User-Agent": f"mistral-client-python/{async_client._version}", - "Accept": "application/json", - "Authorization": "Bearer test_api_key", - }, - files={"file": b"file_content"}, - json=None, - data={"purpose": "fine-tune"}, - ) - assert response_file == expected_response_file - - @pytest.mark.asyncio - async def test_retrieve(self, async_client): - expected_response_file = FileObject.model_validate_json(mock_file_response_payload()) - async_client._client.request.return_value = mock_response( - 200, - expected_response_file.json(), - ) - - response_file = await async_client.files.retrieve("file_id") - - async_client._client.request.assert_called_once_with( - "get", - "https://api.mistral.ai/v1/files/file_id", - headers={ - "User-Agent": f"mistral-client-python/{async_client._version}", - "Accept": "application/json", - "Content-Type": "application/json", - "Authorization": "Bearer test_api_key", - }, - json={}, - data=None, - ) - assert response_file == expected_response_file - - @pytest.mark.asyncio - async def test_list_files(self, async_client): - expected_response_file = FileObject.model_validate_json(mock_file_response_payload()) - async_client._client.request.return_value = mock_response( - 200, - orjson.dumps( - { - "data": [expected_response_file.model_dump()], - "object": "list", - } - ), - ) - - response_files = await async_client.files.list() - response_file = response_files.data[0] - - async_client._client.request.assert_called_once_with( - "get", - "https://api.mistral.ai/v1/files", - headers={ - "User-Agent": f"mistral-client-python/{async_client._version}", - "Accept": "application/json", - "Content-Type": "application/json", - "Authorization": "Bearer test_api_key", - }, - json={}, - data=None, - ) - assert response_file == expected_response_file - - @pytest.mark.asyncio - async def test_delete_file(self, async_client): - expected_response_file = FileDeleted.model_validate_json(mock_file_deleted_response_payload()) - async_client._client.request.return_value = mock_response(200, expected_response_file.json()) - - response_file = await async_client.files.delete("file_id") - - async_client._client.request.assert_called_once_with( - "delete", - "https://api.mistral.ai/v1/files/file_id", - headers={ - "User-Agent": f"mistral-client-python/{async_client._version}", - "Accept": "application/json", - "Content-Type": "application/json", - "Authorization": "Bearer test_api_key", - }, - json={}, - data=None, - ) - assert response_file == expected_response_file diff --git a/tests/test_jobs.py b/tests/test_jobs.py deleted file mode 100644 index efb19b7a..00000000 --- a/tests/test_jobs.py +++ /dev/null @@ -1,128 +0,0 @@ -import orjson -from mistralai.models.jobs import DetailedJob, Job, TrainingParameters - -from .utils import ( - mock_detailed_job_response_payload, - mock_job_response_payload, - mock_response, -) - - -class TestJobsClient: - def test_create(self, client): - expected_response_job = Job.model_validate_json(mock_job_response_payload()) - client._client.request.return_value = mock_response( - 200, - expected_response_job.json(), - ) - - response_job = client.jobs.create( - model="model", - training_files=["training_file_id"], - validation_files=["validation_file_id"], - hyperparameters=TrainingParameters( - training_steps=1800, - learning_rate=1.0e-4, - ), - ) - - client._client.request.assert_called_once_with( - "post", - "https://api.mistral.ai/v1/fine_tuning/jobs", - headers={ - "User-Agent": f"mistral-client-python/{client._version}", - "Accept": "application/json", - "Content-Type": "application/json", - "Authorization": "Bearer test_api_key", - }, - json={ - "model": "model", - "training_files": ["training_file_id"], - "validation_files": ["validation_file_id"], - "hyperparameters": { - "training_steps": 1800, - "learning_rate": 1.0e-4, - }, - "suffix": None, - "integrations": None, - }, - data=None, - params={"dry_run": False}, - ) - assert response_job == expected_response_job - - def test_retrieve(self, client): - expected_response_job = DetailedJob.model_validate_json(mock_detailed_job_response_payload()) - client._client.request.return_value = mock_response( - 200, - expected_response_job.json(), - ) - - response_job = client.jobs.retrieve("job_id") - - client._client.request.assert_called_once_with( - "get", - "https://api.mistral.ai/v1/fine_tuning/jobs/job_id", - headers={ - "User-Agent": f"mistral-client-python/{client._version}", - "Accept": "application/json", - "Content-Type": "application/json", - "Authorization": "Bearer test_api_key", - }, - json={}, - data=None, - ) - assert response_job == expected_response_job - - def test_list(self, client): - expected_response_job = Job.model_validate_json(mock_job_response_payload()) - client._client.request.return_value = mock_response( - 200, - orjson.dumps( - { - "data": [expected_response_job.model_dump()], - "object": "list", - } - ), - ) - - response_jobs = client.jobs.list() - response_job = response_jobs.data[0] - - client._client.request.assert_called_once_with( - "get", - "https://api.mistral.ai/v1/fine_tuning/jobs", - headers={ - "User-Agent": f"mistral-client-python/{client._version}", - "Accept": "application/json", - "Content-Type": "application/json", - "Authorization": "Bearer test_api_key", - }, - json={}, - data=None, - params={"page": 0, "page_size": 10}, - ) - assert response_job == expected_response_job - - def test_cancel(self, client): - expected_response_job = DetailedJob.model_validate_json(mock_detailed_job_response_payload()) - client._client.request.return_value = mock_response( - 200, - expected_response_job.json(), - ) - - response_job = client.jobs.cancel("job_id") - - client._client.request.assert_called_once_with( - "post", - "https://api.mistral.ai/v1/fine_tuning/jobs/job_id/cancel", - headers={ - "User-Agent": f"mistral-client-python/{client._version}", - "Accept": "application/json", - "Content-Type": "application/json", - "Authorization": "Bearer test_api_key", - }, - json={}, - data=None, - ) - assert response_job == expected_response_job diff --git a/tests/test_jobs_async.py b/tests/test_jobs_async.py deleted file mode 100644 index 2d0d488f..00000000 --- a/tests/test_jobs_async.py +++ /dev/null @@ -1,133 +0,0 @@ -import orjson -import pytest -from mistralai.models.jobs import DetailedJob, Job, TrainingParameters - -from .utils import ( - mock_detailed_job_response_payload, - mock_job_response_payload, - mock_response, -) - - -class TestJobsClient: - @pytest.mark.asyncio - async def test_create(self, async_client): - expected_response_job = Job.model_validate_json(mock_job_response_payload()) - async_client._client.request.return_value = mock_response( - 200, - expected_response_job.json(), - ) - - response_job = await async_client.jobs.create( - model="model", - training_files=["training_file_id"], - validation_files=["validation_file_id"], - hyperparameters=TrainingParameters( - training_steps=1800, - learning_rate=1.0e-4, - ), - ) - - async_client._client.request.assert_called_once_with( - "post", - "https://api.mistral.ai/v1/fine_tuning/jobs", - headers={ - "User-Agent": f"mistral-client-python/{async_client._version}", - "Accept": "application/json", - "Content-Type": "application/json", - "Authorization": "Bearer test_api_key", - }, - json={ - "model": "model", - "training_files": ["training_file_id"], - "validation_files": ["validation_file_id"], - "hyperparameters": { - "training_steps": 1800, - "learning_rate": 1.0e-4, - }, - "suffix": None, - "integrations": None, - }, - data=None, - params={"dry_run": False}, - ) - assert response_job == expected_response_job - - @pytest.mark.asyncio - async def test_retrieve(self, async_client): - expected_response_job = DetailedJob.model_validate_json(mock_detailed_job_response_payload()) - async_client._client.request.return_value = mock_response( - 200, - expected_response_job.json(), - ) - - response_job = await async_client.jobs.retrieve("job_id") - - async_client._client.request.assert_called_once_with( - "get", - "https://api.mistral.ai/v1/fine_tuning/jobs/job_id", - headers={ - "User-Agent": f"mistral-client-python/{async_client._version}", - "Accept": "application/json", - "Content-Type": "application/json", - "Authorization": "Bearer test_api_key", - }, - json={}, - data=None, - ) - assert response_job == expected_response_job - - @pytest.mark.asyncio - async def test_list(self, async_client): - expected_response_job = Job.model_validate_json(mock_job_response_payload()) - async_client._client.request.return_value = mock_response( - 200, - orjson.dumps( - { - "data": [expected_response_job.model_dump()], - "object": "list", - } - ), - ) - - response_jobs = await async_client.jobs.list() - response_job = response_jobs.data[0] - - async_client._client.request.assert_called_once_with( - "get", - "https://api.mistral.ai/v1/fine_tuning/jobs", - headers={ - "User-Agent": f"mistral-client-python/{async_client._version}", - "Accept": "application/json", - "Content-Type": "application/json", - "Authorization": "Bearer test_api_key", - }, - json={}, - data=None, - params={"page": 0, "page_size": 10}, - ) - assert response_job == expected_response_job - - @pytest.mark.asyncio - async def test_cancel(self, async_client): - expected_response_job = DetailedJob.model_validate_json(mock_detailed_job_response_payload()) - async_client._client.request.return_value = mock_response( - 200, - expected_response_job.json(), - ) - - response_job = await async_client.jobs.cancel("job_id") - - async_client._client.request.assert_called_once_with( - "post", - "https://api.mistral.ai/v1/fine_tuning/jobs/job_id/cancel", - headers={ - "User-Agent": f"mistral-client-python/{async_client._version}", - "Accept": "application/json", - "Content-Type": "application/json", - "Authorization": "Bearer test_api_key", - }, - json={}, - data=None, - ) - assert response_job == expected_response_job diff --git a/tests/test_list_models.py b/tests/test_list_models.py deleted file mode 100644 index 15de8475..00000000 --- a/tests/test_list_models.py +++ /dev/null @@ -1,30 +0,0 @@ -from mistralai.models.models import ModelList - -from .utils import mock_list_models_response_payload, mock_response - - -class TestListModels: - def test_list_models(self, client): - client._client.request.return_value = mock_response( - 200, - mock_list_models_response_payload(), - ) - - result = client.list_models() - - client._client.request.assert_called_once_with( - "get", - "https://api.mistral.ai/v1/models", - headers={ - "User-Agent": f"mistral-client-python/{client._version}", - "Accept": "application/json", - "Authorization": "Bearer test_api_key", - "Content-Type": "application/json", - }, - json={}, - data=None, - ) - - assert isinstance(result, ModelList), "Should return an ModelList" - assert len(result.data) == 4 - assert result.object == "list" diff --git a/tests/test_list_models_async.py b/tests/test_list_models_async.py deleted file mode 100644 index 2f3d7b44..00000000 --- a/tests/test_list_models_async.py +++ /dev/null @@ -1,32 +0,0 @@ -import pytest -from mistralai.models.models import ModelList - -from .utils import mock_list_models_response_payload, mock_response - - -class TestAsyncListModels: - @pytest.mark.asyncio - async def test_list_models(self, async_client): - async_client._client.request.return_value = mock_response( - 200, - mock_list_models_response_payload(), - ) - - result = await async_client.list_models() - - async_client._client.request.assert_awaited_once_with( - "get", - "https://api.mistral.ai/v1/models", - headers={ - "User-Agent": f"mistral-client-python/{async_client._version}", - "Accept": "application/json", - "Authorization": "Bearer test_api_key", - "Content-Type": "application/json", - }, - json={}, - data=None, - ) - - assert isinstance(result, ModelList), "Should return an ModelList" - assert len(result.data) == 4 - assert result.object == "list" diff --git a/tests/utils.py b/tests/utils.py deleted file mode 100644 index 4c2ca146..00000000 --- a/tests/utils.py +++ /dev/null @@ -1,335 +0,0 @@ -import contextlib -import unittest.mock as mock -from typing import Any, Dict, List - -import orjson -from httpx import Response - - -@contextlib.contextmanager -def mock_stream_response(status_code: int, content: List[str], headers: Dict[str, Any] = None): - response = mock.Mock(Response) - response.status_code = status_code - response.headers = headers if headers else {} - response.iter_lines.return_value = iter(content) - yield response - - -@contextlib.asynccontextmanager -async def mock_async_stream_response(status_code: int, content: List[str], headers: Dict[str, Any] = None): - response = mock.Mock(Response) - response.status_code = status_code - response.headers = headers if headers else {} - - async def async_iter(content: List[str]): - for line in content: - yield line - - response.aiter_lines.return_value = async_iter(content) - yield response - - -def mock_response( - status_code: int, content: str, headers: Dict[str, Any] = None, is_json: bool = True -) -> mock.MagicMock: - response = mock.Mock(Response) - response.status_code = status_code - response.headers = headers if headers else {} - if is_json: - response.json = mock.MagicMock() - response.json.return_value = orjson.loads(content) - response.text = content - return response - - -def mock_list_models_response_payload() -> str: - return orjson.dumps( - { - "object": "list", - "data": [ - { - "id": "mistral-medium", - "object": "model", - "created": 1703186988, - "owned_by": "mistralai", - "root": None, - "parent": None, - "permission": [ - { - "id": "modelperm-15bebaf316264adb84b891bf06a84933", - "object": "model_permission", - "created": 1703186988, - "allow_create_engine": False, - "allow_sampling": True, - "allow_logprobs": False, - "allow_search_indices": False, - "allow_view": True, - "allow_fine_tuning": False, - "organization": "*", - "group": None, - "is_blocking": False, - } - ], - }, - { - "id": "mistral-small-latest", - "object": "model", - "created": 1703186988, - "owned_by": "mistralai", - "root": None, - "parent": None, - "permission": [ - { - "id": "modelperm-d0dced5c703242fa862f4ca3f241c00e", - "object": "model_permission", - "created": 1703186988, - "allow_create_engine": False, - "allow_sampling": True, - "allow_logprobs": False, - "allow_search_indices": False, - "allow_view": True, - "allow_fine_tuning": False, - "organization": "*", - "group": None, - "is_blocking": False, - } - ], - }, - { - "id": "mistral-tiny", - "object": "model", - "created": 1703186988, - "owned_by": "mistralai", - "root": None, - "parent": None, - "permission": [ - { - "id": "modelperm-0e64e727c3a94f17b29f8895d4be2910", - "object": "model_permission", - "created": 1703186988, - "allow_create_engine": False, - "allow_sampling": True, - "allow_logprobs": False, - "allow_search_indices": False, - "allow_view": True, - "allow_fine_tuning": False, - "organization": "*", - "group": None, - "is_blocking": False, - } - ], - }, - { - "id": "mistral-embed", - "object": "model", - "created": 1703186988, - "owned_by": "mistralai", - "root": None, - "parent": None, - "permission": [ - { - "id": "modelperm-ebdff9046f524e628059447b5932e3ad", - "object": "model_permission", - "created": 1703186988, - "allow_create_engine": False, - "allow_sampling": True, - "allow_logprobs": False, - "allow_search_indices": False, - "allow_view": True, - "allow_fine_tuning": False, - "organization": "*", - "group": None, - "is_blocking": False, - } - ], - }, - ], - } - ) - - -def mock_embedding_response_payload(batch_size: int = 1) -> str: - return orjson.dumps( - { - "id": "embd-98c8c60e3fbf4fc49658eddaf447357c", - "object": "list", - "data": [ - { - "object": "embedding", - "embedding": [-0.018585205078125, 0.027099609375, 0.02587890625], - "index": 0, - } - ] - * batch_size, - "model": "mistral-embed", - "usage": {"prompt_tokens": 90, "total_tokens": 90, "completion_tokens": 0}, - } - ).decode() - - -def mock_chat_response_payload(): - return orjson.dumps( - { - "id": "chat-98c8c60e3fbf4fc49658eddaf447357c", - "object": "chat.completion", - "created": 1703165682, - "choices": [ - { - "finish_reason": "stop", - "message": { - "role": "assistant", - "content": "What is the best French cheese?", - }, - "index": 0, - } - ], - "model": "mistral-small-latest", - "usage": {"prompt_tokens": 90, "total_tokens": 90, "completion_tokens": 0}, - } - ).decode() - - -def mock_chat_response_streaming_payload(): - return [ - "data: " - + orjson.dumps( - { - "id": "cmpl-8cd9019d21ba490aa6b9740f5d0a883e", - "model": "mistral-small-latest", - "choices": [ - { - "index": 0, - "delta": {"role": "assistant"}, - "finish_reason": None, - } - ], - } - ).decode() - + "\n\n", - *[ - "data: " - + orjson.dumps( - { - "id": "cmpl-8cd9019d21ba490aa6b9740f5d0a883e", - "object": "chat.completion.chunk", - "created": 1703168544, - "model": "mistral-small-latest", - "choices": [ - { - "index": i, - "delta": {"content": f"stream response {i}"}, - "finish_reason": None, - } - ], - } - ).decode() - + "\n\n" - for i in range(10) - ], - "data: [DONE]\n\n", - ] - - -def mock_completion_response_payload() -> str: - return orjson.dumps( - { - "id": "chat-98c8c60e3fbf4fc49658eddaf447357c", - "object": "chat.completion", - "created": 1703165682, - "choices": [ - { - "finish_reason": "stop", - "message": { - "role": "assistant", - "content": " a + b", - }, - "index": 0, - } - ], - "model": "mistral-small-latest", - "usage": {"prompt_tokens": 90, "total_tokens": 90, "completion_tokens": 0}, - } - ).decode() - - -def mock_job_response_payload() -> str: - return orjson.dumps( - { - "id": "job_id", - "hyperparameters": { - "training_steps": 1800, - "learning_rate": 1.0e-4, - }, - "fine_tuned_model": "fine_tuned_model", - "model": "model", - "status": "QUEUED", - "job_type": "job_type", - "created_at": 1633046400000, - "modified_at": 1633046400000, - "training_files": ["training_file_id"], - "validation_files": ["validation_file_id"], - "object": "job", - "integrations": [], - } - ) - - -def mock_detailed_job_response_payload() -> str: - return orjson.dumps( - { - "id": "job_id", - "hyperparameters": { - "training_steps": 1800, - "learning_rate": 1.0e-4, - }, - "fine_tuned_model": "fine_tuned_model", - "model": "model", - "status": "QUEUED", - "job_type": "job_type", - "created_at": 1633046400000, - "modified_at": 1633046400000, - "training_files": ["training_file_id"], - "validation_files": ["validation_file_id"], - "object": "job", - "integrations": [], - "events": [ - { - "name": "event_name", - "created_at": 1633046400000, - } - ], - } - ) - - -def mock_file_response_payload() -> str: - return orjson.dumps( - { - "id": "file_id", - "object": "file", - "bytes": 0, - "created_at": 1633046400000, - "filename": "file.jsonl", - "purpose": "fine-tune", - } - ) - - -def mock_file_deleted_response_payload() -> str: - return orjson.dumps( - { - "id": "file_id", - "object": "file", - "deleted": True, - } - ) - - -def mock_model_deleted_response_payload() -> str: - return orjson.dumps( - { - "id": "model_id", - "object": "model", - "deleted": True, - } - ) From 821aa40af50c8b8c1e285e2720ae0ed93e981082 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Wed, 7 Aug 2024 18:58:57 +0200 Subject: [PATCH 063/223] publish 1.0.0 (#125) Co-authored-by: speakeasybot --- .speakeasy/gen.lock | 8 +++++--- .speakeasy/gen.yaml | 2 +- .speakeasy/workflow.lock | 2 +- README.md | 22 +++++++++++----------- RELEASES.md | 11 +++++++++++ docs/sdks/agents/README.md | 4 ++-- pyproject.toml | 2 +- src/mistralai/agents.py | 4 ++-- src/mistralai/sdkconfiguration.py | 4 ++-- 9 files changed, 36 insertions(+), 23 deletions(-) create mode 100644 RELEASES.md diff --git a/.speakeasy/gen.lock b/.speakeasy/gen.lock index cf866343..655c1cae 100644 --- a/.speakeasy/gen.lock +++ b/.speakeasy/gen.lock @@ -1,12 +1,14 @@ lockVersion: 2.0.0 id: 2d045ec7-2ebb-4f4d-ad25-40953b132161 management: - docChecksum: ab9fe4a3c278619e334a828e2c336554 + docChecksum: 9e7a46648104410da4d94d5c2b373d20 docVersion: 0.0.2 speakeasyVersion: 1.356.0 generationVersion: 2.388.1 - releaseVersion: 1.0.0-rc.2 - configChecksum: 09abab5b4ed374c8d48d4e9b9ca6eb65 + releaseVersion: 1.0.0 + configChecksum: ab3daea0e59b8225d1742719ddfb86f4 + repoURL: https://github.com/mistralai/client-python.git + installationURL: https://github.com/mistralai/client-python.git published: true features: python: diff --git a/.speakeasy/gen.yaml b/.speakeasy/gen.yaml index 13a25388..7013a1bb 100644 --- a/.speakeasy/gen.yaml +++ b/.speakeasy/gen.yaml @@ -12,7 +12,7 @@ generation: auth: oAuth2ClientCredentialsEnabled: true python: - version: 1.0.0-rc.2 + version: 1.0.0 additionalDependencies: dev: pytest: ^8.2.2 diff --git a/.speakeasy/workflow.lock b/.speakeasy/workflow.lock index 47bfbe00..81d68d32 100644 --- a/.speakeasy/workflow.lock +++ b/.speakeasy/workflow.lock @@ -9,7 +9,7 @@ targets: outLocation: ./packages/mistralai_gcp mistralai-sdk: source: mistral-openapi - outLocation: /Users/gaspard/public-mistral/client-python + outLocation: /github/workspace/repo workflow: workflowVersion: 1.0.0 speakeasyVersion: latest diff --git a/README.md b/README.md index d207a895..04b8b476 100644 --- a/README.md +++ b/README.md @@ -324,7 +324,7 @@ The documentation for the GCP SDK is available [here](packages/mistralai_gcp/REA ### [agents](docs/sdks/agents/README.md) -* [complete](docs/sdks/agents/README.md#complete) - Chat Completion +* [complete](docs/sdks/agents/README.md#complete) - Agents Completion * [stream](docs/sdks/agents/README.md#stream) - Stream Agents completion ### [embeddings](docs/sdks/embeddings/README.md) @@ -450,10 +450,10 @@ if res is not None: Handling errors in this SDK should largely match your expectations. All operations return a response object or raise an error. If Error objects are specified in your OpenAPI Spec, the SDK will raise the appropriate Error type. -| Error Object | Status Code | Content Type | -| -------------------------- | ----------- | ---------------- | -| models.HTTPValidationError | 422 | application/json | -| models.SDKError | 4xx-5xx | */* | +| Error Object | Status Code | Content Type | +| -------------------------- | -------------------------- | -------------------------- | +| models.HTTPValidationError | 422 | application/json | +| models.SDKError | 4xx-5xx | */* | ### Example @@ -490,9 +490,9 @@ if res is not None: You can override the default server globally by passing a server name to the `server: str` optional parameter when initializing the SDK client instance. The selected server will then be used as the default on the operations that use it. This table lists the names associated with the available servers: -| Name | Server | Variables | -| ------ | ------------------------ | --------- | -| `prod` | `https://api.mistral.ai` | None | +| Name | Server | Variables | +| ----- | ------ | --------- | +| `prod` | `https://api.mistral.ai` | None | #### Example @@ -625,9 +625,9 @@ s = Mistral(async_client=CustomClient(httpx.AsyncClient())) This SDK supports the following security scheme globally: -| Name | Type | Scheme | Environment Variable | -| --------- | ---- | ----------- | -------------------- | -| `api_key` | http | HTTP Bearer | `MISTRAL_API_KEY` | +| Name | Type | Scheme | Environment Variable | +| -------------------- | -------------------- | -------------------- | -------------------- | +| `api_key` | http | HTTP Bearer | `MISTRAL_API_KEY` | To authenticate with the API the `null` parameter must be set when initializing the SDK client instance. For example: ```python diff --git a/RELEASES.md b/RELEASES.md new file mode 100644 index 00000000..cb062cbf --- /dev/null +++ b/RELEASES.md @@ -0,0 +1,11 @@ + + +## 2024-08-07 14:25:13 +### Changes +Based on: +- OpenAPI Doc +- Speakeasy CLI 1.356.0 (2.388.1) https://github.com/speakeasy-api/speakeasy +### Generated +- [python v1.0.0] . +### Releases +- [PyPI v1.0.0] https://pypi.org/project/mistralai/1.0.0 - . \ No newline at end of file diff --git a/docs/sdks/agents/README.md b/docs/sdks/agents/README.md index e8740f3a..e3981c13 100644 --- a/docs/sdks/agents/README.md +++ b/docs/sdks/agents/README.md @@ -7,12 +7,12 @@ Agents API. ### Available Operations -* [complete](#complete) - Chat Completion +* [complete](#complete) - Agents Completion * [stream](#stream) - Stream Agents completion ## complete -Chat Completion +Agents Completion ### Example Usage diff --git a/pyproject.toml b/pyproject.toml index bea3e42d..a42fee09 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "mistralai" -version = "1.0.0-rc.2" +version = "1.0.0" description = "Python Client SDK for the Mistral AI API." authors = ["Mistral"] readme = "README.md" diff --git a/src/mistralai/agents.py b/src/mistralai/agents.py index 12ea5754..e1c87040 100644 --- a/src/mistralai/agents.py +++ b/src/mistralai/agents.py @@ -27,7 +27,7 @@ def complete( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, ) -> Optional[models.ChatCompletionResponse]: - r"""Chat Completion + r"""Agents Completion :param messages: The prompt(s) to generate completions for, encoded as a list of dict with role and content. :param agent_id: The ID of the agent to use for this completion. @@ -131,7 +131,7 @@ async def complete_async( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, ) -> Optional[models.ChatCompletionResponse]: - r"""Chat Completion + r"""Agents Completion :param messages: The prompt(s) to generate completions for, encoded as a list of dict with role and content. :param agent_id: The ID of the agent to use for this completion. diff --git a/src/mistralai/sdkconfiguration.py b/src/mistralai/sdkconfiguration.py index 8da7f2a4..81df1bf6 100644 --- a/src/mistralai/sdkconfiguration.py +++ b/src/mistralai/sdkconfiguration.py @@ -29,9 +29,9 @@ class SDKConfiguration: server: Optional[str] = "" language: str = "python" openapi_doc_version: str = "0.0.2" - sdk_version: str = "1.0.0-rc.2" + sdk_version: str = "1.0.0" gen_version: str = "2.388.1" - user_agent: str = "speakeasy-sdk/python 1.0.0-rc.2 2.388.1 0.0.2 mistralai" + user_agent: str = "speakeasy-sdk/python 1.0.0 2.388.1 0.0.2 mistralai" retry_config: OptionalNullable[RetryConfig] = Field(default_factory=lambda: UNSET) timeout_ms: Optional[int] = None From 8d7ebd8d585a08f5b7074e35ad96950d744414b3 Mon Sep 17 00:00:00 2001 From: pandora <128635000+pandora-s-git@users.noreply.github.com> Date: Thu, 8 Aug 2024 15:34:30 +0200 Subject: [PATCH 064/223] Update MIGRATION.md --- MIGRATION.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/MIGRATION.md b/MIGRATION.md index 6582b85b..07f6e33a 100644 --- a/MIGRATION.md +++ b/MIGRATION.md @@ -160,7 +160,7 @@ messages = [ }, ] -stream_response = client.chat.stream_async( +stream_response = client.chat.stream( model = model, messages = messages, ) From aee92fa11f4c3b6d0abf98a072fc5b11a2738464 Mon Sep 17 00:00:00 2001 From: Gaspard Blanchet <26529448+GaspardBT@users.noreply.github.com> Date: Thu, 8 Aug 2024 20:05:51 +0200 Subject: [PATCH 065/223] fix fc streaming (#127) --- .../sdk_generation_mistralai_sdk.yaml | 2 +- .gitignore | 1 + .speakeasy/gen.lock | 15 +++++---- .speakeasy/gen.yaml | 2 +- .speakeasy/workflow.lock | 4 +-- README.md | 12 ++++++- docs/models/deltamessage.md | 10 +++--- packages/mistralai_azure/.gitignore | 1 + packages/mistralai_azure/.speakeasy/gen.lock | 15 +++++---- packages/mistralai_azure/.speakeasy/gen.yaml | 2 +- .../docs/models/deltamessage.md | 10 +++--- packages/mistralai_azure/poetry.lock | 16 ++++++++- packages/mistralai_azure/pyproject.toml | 5 +-- packages/mistralai_azure/scripts/compile.sh | 2 ++ .../mistralai_azure/scripts/prepare-readme.py | 9 +++++ .../models/chatcompletionrequest.py | 28 ++++++++-------- .../models/chatcompletionstreamrequest.py | 28 ++++++++-------- .../mistralai_azure/models/deltamessage.py | 6 ++-- .../mistralai_azure/models/functioncall.py | 12 +++---- .../mistralai_azure/models/systemmessage.py | 12 +++---- .../src/mistralai_azure/models/usermessage.py | 12 +++---- .../mistralai_azure/models/validationerror.py | 12 +++---- .../src/mistralai_azure/sdkconfiguration.py | 6 ++-- packages/mistralai_gcp/.gitignore | 1 + packages/mistralai_gcp/.speakeasy/gen.lock | 15 +++++---- packages/mistralai_gcp/.speakeasy/gen.yaml | 2 +- .../mistralai_gcp/docs/models/deltamessage.md | 10 +++--- packages/mistralai_gcp/poetry.lock | 16 ++++++++- packages/mistralai_gcp/pyproject.toml | 5 +-- packages/mistralai_gcp/scripts/compile.sh | 2 ++ .../mistralai_gcp/scripts/prepare-readme.py | 9 +++++ .../models/chatcompletionrequest.py | 28 ++++++++-------- .../models/chatcompletionstreamrequest.py | 28 ++++++++-------- .../src/mistralai_gcp/models/deltamessage.py | 6 ++-- .../models/fimcompletionrequest.py | 16 ++++----- .../models/fimcompletionstreamrequest.py | 16 ++++----- .../src/mistralai_gcp/models/functioncall.py | 12 +++---- .../src/mistralai_gcp/models/systemmessage.py | 12 +++---- .../src/mistralai_gcp/models/usermessage.py | 12 +++---- .../mistralai_gcp/models/validationerror.py | 12 +++---- .../src/mistralai_gcp/sdkconfiguration.py | 6 ++-- pyproject.toml | 2 +- scripts/compile.sh | 2 ++ scripts/prepare-readme.py | 33 +++++++++++++++++++ .../models/agentscompletionrequest.py | 28 ++++++++-------- .../models/agentscompletionstreamrequest.py | 16 ++++----- src/mistralai/models/chatcompletionrequest.py | 28 ++++++++-------- .../models/chatcompletionstreamrequest.py | 28 ++++++++-------- src/mistralai/models/deltamessage.py | 6 ++-- src/mistralai/models/embeddingrequest.py | 16 ++++----- src/mistralai/models/fimcompletionrequest.py | 16 ++++----- .../models/fimcompletionstreamrequest.py | 16 ++++----- src/mistralai/models/functioncall.py | 12 +++---- src/mistralai/models/systemmessage.py | 12 +++---- src/mistralai/models/usermessage.py | 12 +++---- src/mistralai/models/validationerror.py | 12 +++---- src/mistralai/sdkconfiguration.py | 6 ++-- 57 files changed, 389 insertions(+), 286 deletions(-) create mode 100644 packages/mistralai_azure/scripts/prepare-readme.py create mode 100644 packages/mistralai_gcp/scripts/prepare-readme.py create mode 100644 scripts/prepare-readme.py diff --git a/.github/workflows/sdk_generation_mistralai_sdk.yaml b/.github/workflows/sdk_generation_mistralai_sdk.yaml index 7d0540e7..32564ae5 100644 --- a/.github/workflows/sdk_generation_mistralai_sdk.yaml +++ b/.github/workflows/sdk_generation_mistralai_sdk.yaml @@ -22,7 +22,7 @@ jobs: mode: pr set_version: ${{ github.event.inputs.set_version }} speakeasy_version: latest - target: mistralai-sdk + target: all secrets: github_access_token: ${{ secrets.GITHUB_TOKEN }} pypi_token: ${{ secrets.PYPI_TOKEN }} diff --git a/.gitignore b/.gitignore index 999b933e..965344c8 100644 --- a/.gitignore +++ b/.gitignore @@ -1,3 +1,4 @@ +README-PYPI.md .venv/ pyrightconfig.json src/*.egg-info/ diff --git a/.speakeasy/gen.lock b/.speakeasy/gen.lock index 655c1cae..90d9e1b5 100644 --- a/.speakeasy/gen.lock +++ b/.speakeasy/gen.lock @@ -1,12 +1,12 @@ lockVersion: 2.0.0 id: 2d045ec7-2ebb-4f4d-ad25-40953b132161 management: - docChecksum: 9e7a46648104410da4d94d5c2b373d20 + docChecksum: a7c4268dd1228c969aecfd7cfdb6ca3c docVersion: 0.0.2 - speakeasyVersion: 1.356.0 - generationVersion: 2.388.1 - releaseVersion: 1.0.0 - configChecksum: ab3daea0e59b8225d1742719ddfb86f4 + speakeasyVersion: 1.357.4 + generationVersion: 2.390.6 + releaseVersion: 1.0.2 + configChecksum: ed07f7fc253047a5a4dd2c0f813b8ea4 repoURL: https://github.com/mistralai/client-python.git installationURL: https://github.com/mistralai/client-python.git published: true @@ -14,13 +14,13 @@ features: python: additionalDependencies: 1.0.0 constsAndDefaults: 1.0.2 - core: 5.3.4 + core: 5.3.7 defaultEnabledRetries: 0.2.0 envVarSecurityUsage: 0.3.1 examples: 3.0.0 flatRequests: 1.0.1 flattening: 3.0.0 - globalSecurity: 3.0.1 + globalSecurity: 3.0.2 globalSecurityCallbacks: 1.0.0 globalSecurityFlattening: 1.0.0 globalServerURLs: 3.0.0 @@ -51,6 +51,7 @@ generatedFiles: - py.typed - pylintrc - scripts/compile.sh + - scripts/prepare-readme.py - scripts/publish.sh - src/mistralai/__init__.py - src/mistralai/basesdk.py diff --git a/.speakeasy/gen.yaml b/.speakeasy/gen.yaml index 7013a1bb..c613fdbb 100644 --- a/.speakeasy/gen.yaml +++ b/.speakeasy/gen.yaml @@ -12,7 +12,7 @@ generation: auth: oAuth2ClientCredentialsEnabled: true python: - version: 1.0.0 + version: 1.0.2 additionalDependencies: dev: pytest: ^8.2.2 diff --git a/.speakeasy/workflow.lock b/.speakeasy/workflow.lock index 81d68d32..57bf6871 100644 --- a/.speakeasy/workflow.lock +++ b/.speakeasy/workflow.lock @@ -1,4 +1,4 @@ -speakeasyVersion: 1.356.0 +speakeasyVersion: 1.357.4 sources: {} targets: mistralai-azure-sdk: @@ -9,7 +9,7 @@ targets: outLocation: ./packages/mistralai_gcp mistralai-sdk: source: mistral-openapi - outLocation: /github/workspace/repo + outLocation: /Users/gaspard/public-mistral/client-python workflow: workflowVersion: 1.0.0 speakeasyVersion: latest diff --git a/README.md b/README.md index 04b8b476..44237eac 100644 --- a/README.md +++ b/README.md @@ -629,7 +629,7 @@ This SDK supports the following security scheme globally: | -------------------- | -------------------- | -------------------- | -------------------- | | `api_key` | http | HTTP Bearer | `MISTRAL_API_KEY` | -To authenticate with the API the `null` parameter must be set when initializing the SDK client instance. For example: +To authenticate with the API the `api_key` parameter must be set when initializing the SDK client instance. For example: ```python from mistralai import Mistral import os @@ -662,6 +662,16 @@ s = Mistral(debug_logger=logging.getLogger("mistralai")) ``` + +## IDE Support + +### PyCharm + +Generally, the SDK will work well with most IDEs out of the box. However, when using PyCharm, you can enjoy much better integration with Pydantic by installing an additional plugin. + +- [PyCharm Pydantic Plugin](https://docs.pydantic.dev/latest/integrations/pycharm/) + + # Development diff --git a/docs/models/deltamessage.md b/docs/models/deltamessage.md index 4cb9e91e..9ad068b8 100644 --- a/docs/models/deltamessage.md +++ b/docs/models/deltamessage.md @@ -3,8 +3,8 @@ ## Fields -| Field | Type | Required | Description | -| ---------------------------------------------------------- | ---------------------------------------------------------- | ---------------------------------------------------------- | ---------------------------------------------------------- | -| `role` | *Optional[str]* | :heavy_minus_sign: | N/A | -| `content` | *Optional[str]* | :heavy_minus_sign: | N/A | -| `tool_calls` | [OptionalNullable[models.ToolCall]](../models/toolcall.md) | :heavy_minus_sign: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| ---------------------------------------------- | ---------------------------------------------- | ---------------------------------------------- | ---------------------------------------------- | +| `role` | *Optional[str]* | :heavy_minus_sign: | N/A | +| `content` | *Optional[str]* | :heavy_minus_sign: | N/A | +| `tool_calls` | List[[models.ToolCall](../models/toolcall.md)] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai_azure/.gitignore b/packages/mistralai_azure/.gitignore index 477b7729..7755092b 100644 --- a/packages/mistralai_azure/.gitignore +++ b/packages/mistralai_azure/.gitignore @@ -1,3 +1,4 @@ +README-PYPI.md .venv/ venv/ src/*.egg-info/ diff --git a/packages/mistralai_azure/.speakeasy/gen.lock b/packages/mistralai_azure/.speakeasy/gen.lock index 047a649a..78232812 100644 --- a/packages/mistralai_azure/.speakeasy/gen.lock +++ b/packages/mistralai_azure/.speakeasy/gen.lock @@ -1,23 +1,23 @@ lockVersion: 2.0.0 id: dc40fa48-2c4d-46ad-ac8b-270749770f34 management: - docChecksum: f04749e097bb06d5fb8850400b089250 + docChecksum: ec02d5407fd9354b416518c4b8fa8b95 docVersion: 0.0.2 - speakeasyVersion: 1.356.0 - generationVersion: 2.388.1 - releaseVersion: 1.0.0-rc.2 - configChecksum: 98e9cf39c9535097961a0ca73dbac10b + speakeasyVersion: 1.357.4 + generationVersion: 2.390.6 + releaseVersion: 1.0.0-rc.4 + configChecksum: ad8d0273f78dacd83fbba33510acd0a5 published: true features: python: additionalDependencies: 1.0.0 constsAndDefaults: 1.0.2 - core: 5.3.4 + core: 5.3.7 defaultEnabledRetries: 0.2.0 envVarSecurityUsage: 0.3.1 examples: 3.0.0 flatRequests: 1.0.1 - globalSecurity: 3.0.1 + globalSecurity: 3.0.2 globalSecurityCallbacks: 1.0.0 globalSecurityFlattening: 1.0.0 globalServerURLs: 3.0.0 @@ -39,6 +39,7 @@ generatedFiles: - pylintrc - pyproject.toml - scripts/compile.sh + - scripts/prepare-readme.py - scripts/publish.sh - src/mistralai_azure/__init__.py - src/mistralai_azure/basesdk.py diff --git a/packages/mistralai_azure/.speakeasy/gen.yaml b/packages/mistralai_azure/.speakeasy/gen.yaml index b36d96f9..be4280bc 100644 --- a/packages/mistralai_azure/.speakeasy/gen.yaml +++ b/packages/mistralai_azure/.speakeasy/gen.yaml @@ -12,7 +12,7 @@ generation: auth: oAuth2ClientCredentialsEnabled: true python: - version: 1.0.0-rc.2 + version: 1.0.0-rc.4 additionalDependencies: dev: pytest: ^8.2.2 diff --git a/packages/mistralai_azure/docs/models/deltamessage.md b/packages/mistralai_azure/docs/models/deltamessage.md index 4cb9e91e..9ad068b8 100644 --- a/packages/mistralai_azure/docs/models/deltamessage.md +++ b/packages/mistralai_azure/docs/models/deltamessage.md @@ -3,8 +3,8 @@ ## Fields -| Field | Type | Required | Description | -| ---------------------------------------------------------- | ---------------------------------------------------------- | ---------------------------------------------------------- | ---------------------------------------------------------- | -| `role` | *Optional[str]* | :heavy_minus_sign: | N/A | -| `content` | *Optional[str]* | :heavy_minus_sign: | N/A | -| `tool_calls` | [OptionalNullable[models.ToolCall]](../models/toolcall.md) | :heavy_minus_sign: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| ---------------------------------------------- | ---------------------------------------------- | ---------------------------------------------- | ---------------------------------------------- | +| `role` | *Optional[str]* | :heavy_minus_sign: | N/A | +| `content` | *Optional[str]* | :heavy_minus_sign: | N/A | +| `tool_calls` | List[[models.ToolCall](../models/toolcall.md)] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai_azure/poetry.lock b/packages/mistralai_azure/poetry.lock index 477ecfde..457c8ecc 100644 --- a/packages/mistralai_azure/poetry.lock +++ b/packages/mistralai_azure/poetry.lock @@ -87,6 +87,20 @@ files = [ graph = ["objgraph (>=1.7.2)"] profile = ["gprof2dot (>=2022.7.29)"] +[[package]] +name = "eval-type-backport" +version = "0.2.0" +description = "Like `typing._eval_type`, but lets older Python versions use newer typing features." +optional = false +python-versions = ">=3.8" +files = [ + {file = "eval_type_backport-0.2.0-py3-none-any.whl", hash = "sha256:ac2f73d30d40c5a30a80b8739a789d6bb5e49fdffa66d7912667e2015d9c9933"}, + {file = "eval_type_backport-0.2.0.tar.gz", hash = "sha256:68796cfbc7371ebf923f03bdf7bef415f3ec098aeced24e054b253a0e78f7b37"}, +] + +[package.extras] +tests = ["pytest"] + [[package]] name = "exceptiongroup" version = "1.2.2" @@ -635,4 +649,4 @@ typing-extensions = ">=3.7.4" [metadata] lock-version = "2.0" python-versions = "^3.8" -content-hash = "1478d3764c93fadedc6a94a2b911eb59eb142cd4b127d65deb7120a378e07c45" +content-hash = "c6fe50d9865be14321ec4828bf746f43c421f79507e9956b4e45ee6601fd1f0d" diff --git a/packages/mistralai_azure/pyproject.toml b/packages/mistralai_azure/pyproject.toml index 15aea3d9..2253ab3d 100644 --- a/packages/mistralai_azure/pyproject.toml +++ b/packages/mistralai_azure/pyproject.toml @@ -1,9 +1,9 @@ [tool.poetry] name = "mistralai_azure" -version = "1.0.0-rc.2" +version = "1.0.0-rc.4" description = "Python Client SDK for the Mistral AI API in Azure." authors = ["Mistral",] -readme = "README.md" +readme = "README-PYPI.md" packages = [ { include = "mistralai_azure", from = "src" } ] @@ -17,6 +17,7 @@ in-project = true [tool.poetry.dependencies] python = "^3.8" +eval-type-backport = "^0.2.0" httpx = "^0.27.0" jsonpath-python = "^1.0.6" pydantic = "~2.8.2" diff --git a/packages/mistralai_azure/scripts/compile.sh b/packages/mistralai_azure/scripts/compile.sh index aa49772e..fafe635b 100755 --- a/packages/mistralai_azure/scripts/compile.sh +++ b/packages/mistralai_azure/scripts/compile.sh @@ -20,6 +20,8 @@ run_command() { } &> "$output_file" & } +poetry run python scripts/prepare-readme.py + # Create temporary files for outputs and statuses for cmd in compileall pylint mypy pyright; do output_files[$cmd]=$(mktemp) diff --git a/packages/mistralai_azure/scripts/prepare-readme.py b/packages/mistralai_azure/scripts/prepare-readme.py new file mode 100644 index 00000000..a8ef8ea1 --- /dev/null +++ b/packages/mistralai_azure/scripts/prepare-readme.py @@ -0,0 +1,9 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +import shutil + +try: + shutil.copyfile('README.md', 'README-PYPI.md') +except Exception as e: + print("Failed to copy README.md to README-PYPI.md") + print(e) diff --git a/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionrequest.py b/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionrequest.py index 352e8837..1de5cd86 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionrequest.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionrequest.py @@ -14,6 +14,20 @@ from typing_extensions import Annotated, NotRequired +ChatCompletionRequestStopTypedDict = Union[str, List[str]] +r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + + +ChatCompletionRequestStop = Union[str, List[str]] +r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + + +ChatCompletionRequestMessagesTypedDict = Union[SystemMessageTypedDict, UserMessageTypedDict, AssistantMessageTypedDict, ToolMessageTypedDict] + + +ChatCompletionRequestMessages = Annotated[Union[Annotated[AssistantMessage, Tag("assistant")], Annotated[SystemMessage, Tag("system")], Annotated[ToolMessage, Tag("tool")], Annotated[UserMessage, Tag("user")]], Discriminator(lambda m: get_discriminator(m, "role", "role"))] + + ChatCompletionRequestToolChoice = Literal["auto", "none", "any"] class ChatCompletionRequestTypedDict(TypedDict): @@ -93,17 +107,3 @@ def serialize_model(self, handler): return m - -ChatCompletionRequestStopTypedDict = Union[str, List[str]] -r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" - - -ChatCompletionRequestStop = Union[str, List[str]] -r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" - - -ChatCompletionRequestMessagesTypedDict = Union[SystemMessageTypedDict, UserMessageTypedDict, AssistantMessageTypedDict, ToolMessageTypedDict] - - -ChatCompletionRequestMessages = Annotated[Union[Annotated[AssistantMessage, Tag("assistant")], Annotated[SystemMessage, Tag("system")], Annotated[ToolMessage, Tag("tool")], Annotated[UserMessage, Tag("user")]], Discriminator(lambda m: get_discriminator(m, "role", "role"))] - diff --git a/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionstreamrequest.py b/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionstreamrequest.py index 85276b15..86d3aa10 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionstreamrequest.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionstreamrequest.py @@ -14,6 +14,20 @@ from typing_extensions import Annotated, NotRequired +StopTypedDict = Union[str, List[str]] +r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + + +Stop = Union[str, List[str]] +r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + + +MessagesTypedDict = Union[SystemMessageTypedDict, UserMessageTypedDict, AssistantMessageTypedDict, ToolMessageTypedDict] + + +Messages = Annotated[Union[Annotated[AssistantMessage, Tag("assistant")], Annotated[SystemMessage, Tag("system")], Annotated[ToolMessage, Tag("tool")], Annotated[UserMessage, Tag("user")]], Discriminator(lambda m: get_discriminator(m, "role", "role"))] + + ToolChoice = Literal["auto", "none", "any"] class ChatCompletionStreamRequestTypedDict(TypedDict): @@ -91,17 +105,3 @@ def serialize_model(self, handler): return m - -StopTypedDict = Union[str, List[str]] -r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" - - -Stop = Union[str, List[str]] -r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" - - -MessagesTypedDict = Union[SystemMessageTypedDict, UserMessageTypedDict, AssistantMessageTypedDict, ToolMessageTypedDict] - - -Messages = Annotated[Union[Annotated[AssistantMessage, Tag("assistant")], Annotated[SystemMessage, Tag("system")], Annotated[ToolMessage, Tag("tool")], Annotated[UserMessage, Tag("user")]], Discriminator(lambda m: get_discriminator(m, "role", "role"))] - diff --git a/packages/mistralai_azure/src/mistralai_azure/models/deltamessage.py b/packages/mistralai_azure/src/mistralai_azure/models/deltamessage.py index 68d0221d..3d763df8 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/deltamessage.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/deltamessage.py @@ -4,20 +4,20 @@ from .toolcall import ToolCall, ToolCallTypedDict from mistralai_azure.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL from pydantic import model_serializer -from typing import Optional, TypedDict +from typing import List, Optional, TypedDict from typing_extensions import NotRequired class DeltaMessageTypedDict(TypedDict): role: NotRequired[str] content: NotRequired[str] - tool_calls: NotRequired[Nullable[ToolCallTypedDict]] + tool_calls: NotRequired[Nullable[List[ToolCallTypedDict]]] class DeltaMessage(BaseModel): role: Optional[str] = None content: Optional[str] = None - tool_calls: OptionalNullable[ToolCall] = UNSET + tool_calls: OptionalNullable[List[ToolCall]] = UNSET @model_serializer(mode="wrap") def serialize_model(self, handler): diff --git a/packages/mistralai_azure/src/mistralai_azure/models/functioncall.py b/packages/mistralai_azure/src/mistralai_azure/models/functioncall.py index 2a9bc801..3259ad99 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/functioncall.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/functioncall.py @@ -5,6 +5,12 @@ from typing import Any, Dict, TypedDict, Union +ArgumentsTypedDict = Union[Dict[str, Any], str] + + +Arguments = Union[Dict[str, Any], str] + + class FunctionCallTypedDict(TypedDict): name: str arguments: ArgumentsTypedDict @@ -14,9 +20,3 @@ class FunctionCall(BaseModel): name: str arguments: Arguments - -ArgumentsTypedDict = Union[Dict[str, Any], str] - - -Arguments = Union[Dict[str, Any], str] - diff --git a/packages/mistralai_azure/src/mistralai_azure/models/systemmessage.py b/packages/mistralai_azure/src/mistralai_azure/models/systemmessage.py index 1ed8a756..7898aecf 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/systemmessage.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/systemmessage.py @@ -7,6 +7,12 @@ from typing_extensions import NotRequired +ContentTypedDict = Union[str, List[ContentChunkTypedDict]] + + +Content = Union[str, List[ContentChunk]] + + Role = Literal["system"] class SystemMessageTypedDict(TypedDict): @@ -18,9 +24,3 @@ class SystemMessage(BaseModel): content: Content role: Optional[Role] = "system" - -ContentTypedDict = Union[str, List[ContentChunkTypedDict]] - - -Content = Union[str, List[ContentChunk]] - diff --git a/packages/mistralai_azure/src/mistralai_azure/models/usermessage.py b/packages/mistralai_azure/src/mistralai_azure/models/usermessage.py index 8ddc8c8a..7c525d98 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/usermessage.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/usermessage.py @@ -7,6 +7,12 @@ from typing_extensions import NotRequired +UserMessageContentTypedDict = Union[str, List[TextChunkTypedDict]] + + +UserMessageContent = Union[str, List[TextChunk]] + + UserMessageRole = Literal["user"] class UserMessageTypedDict(TypedDict): @@ -18,9 +24,3 @@ class UserMessage(BaseModel): content: UserMessageContent role: Optional[UserMessageRole] = "user" - -UserMessageContentTypedDict = Union[str, List[TextChunkTypedDict]] - - -UserMessageContent = Union[str, List[TextChunk]] - diff --git a/packages/mistralai_azure/src/mistralai_azure/models/validationerror.py b/packages/mistralai_azure/src/mistralai_azure/models/validationerror.py index 626e9c4c..9b7b9a9a 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/validationerror.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/validationerror.py @@ -5,6 +5,12 @@ from typing import List, TypedDict, Union +LocTypedDict = Union[str, int] + + +Loc = Union[str, int] + + class ValidationErrorTypedDict(TypedDict): loc: List[LocTypedDict] msg: str @@ -16,9 +22,3 @@ class ValidationError(BaseModel): msg: str type: str - -LocTypedDict = Union[str, int] - - -Loc = Union[str, int] - diff --git a/packages/mistralai_azure/src/mistralai_azure/sdkconfiguration.py b/packages/mistralai_azure/src/mistralai_azure/sdkconfiguration.py index 5ba1c4c7..b6cff2ab 100644 --- a/packages/mistralai_azure/src/mistralai_azure/sdkconfiguration.py +++ b/packages/mistralai_azure/src/mistralai_azure/sdkconfiguration.py @@ -29,9 +29,9 @@ class SDKConfiguration: server: Optional[str] = "" language: str = "python" openapi_doc_version: str = "0.0.2" - sdk_version: str = "1.0.0-rc.2" - gen_version: str = "2.388.1" - user_agent: str = "speakeasy-sdk/python 1.0.0-rc.2 2.388.1 0.0.2 mistralai_azure" + sdk_version: str = "1.0.0-rc.4" + gen_version: str = "2.390.6" + user_agent: str = "speakeasy-sdk/python 1.0.0-rc.4 2.390.6 0.0.2 mistralai_azure" retry_config: OptionalNullable[RetryConfig] = Field(default_factory=lambda: UNSET) timeout_ms: Optional[int] = None diff --git a/packages/mistralai_gcp/.gitignore b/packages/mistralai_gcp/.gitignore index 477b7729..7755092b 100644 --- a/packages/mistralai_gcp/.gitignore +++ b/packages/mistralai_gcp/.gitignore @@ -1,3 +1,4 @@ +README-PYPI.md .venv/ venv/ src/*.egg-info/ diff --git a/packages/mistralai_gcp/.speakeasy/gen.lock b/packages/mistralai_gcp/.speakeasy/gen.lock index ab483934..c28b2183 100644 --- a/packages/mistralai_gcp/.speakeasy/gen.lock +++ b/packages/mistralai_gcp/.speakeasy/gen.lock @@ -1,23 +1,23 @@ lockVersion: 2.0.0 id: ec60f2d8-7869-45c1-918e-773d41a8cf74 management: - docChecksum: 5daa3767285068a2f496f5fd41eb7a01 + docChecksum: e2bc44269918d569bbc51b1521c4c29b docVersion: 0.0.2 - speakeasyVersion: 1.356.0 - generationVersion: 2.388.1 - releaseVersion: 1.0.0-rc.2 - configChecksum: 68063242d77238d1f19a7d7b0a39c381 + speakeasyVersion: 1.357.4 + generationVersion: 2.390.6 + releaseVersion: 1.0.0-rc.4 + configChecksum: a8248e0ef5bdbc73910c2aae86c3c3b5 published: true features: python: additionalDependencies: 1.0.0 constsAndDefaults: 1.0.2 - core: 5.3.4 + core: 5.3.7 defaultEnabledRetries: 0.2.0 envVarSecurityUsage: 0.3.1 examples: 3.0.0 flatRequests: 1.0.1 - globalSecurity: 3.0.1 + globalSecurity: 3.0.2 globalSecurityCallbacks: 1.0.0 globalSecurityFlattening: 1.0.0 globalServerURLs: 3.0.0 @@ -40,6 +40,7 @@ generatedFiles: - pylintrc - pyproject.toml - scripts/compile.sh + - scripts/prepare-readme.py - scripts/publish.sh - src/mistralai_gcp/__init__.py - src/mistralai_gcp/basesdk.py diff --git a/packages/mistralai_gcp/.speakeasy/gen.yaml b/packages/mistralai_gcp/.speakeasy/gen.yaml index 2a5993bd..8e6ae7b5 100644 --- a/packages/mistralai_gcp/.speakeasy/gen.yaml +++ b/packages/mistralai_gcp/.speakeasy/gen.yaml @@ -12,7 +12,7 @@ generation: auth: oAuth2ClientCredentialsEnabled: true python: - version: 1.0.0-rc.2 + version: 1.0.0-rc.4 additionalDependencies: dev: pytest: ^8.2.2 diff --git a/packages/mistralai_gcp/docs/models/deltamessage.md b/packages/mistralai_gcp/docs/models/deltamessage.md index 4cb9e91e..9ad068b8 100644 --- a/packages/mistralai_gcp/docs/models/deltamessage.md +++ b/packages/mistralai_gcp/docs/models/deltamessage.md @@ -3,8 +3,8 @@ ## Fields -| Field | Type | Required | Description | -| ---------------------------------------------------------- | ---------------------------------------------------------- | ---------------------------------------------------------- | ---------------------------------------------------------- | -| `role` | *Optional[str]* | :heavy_minus_sign: | N/A | -| `content` | *Optional[str]* | :heavy_minus_sign: | N/A | -| `tool_calls` | [OptionalNullable[models.ToolCall]](../models/toolcall.md) | :heavy_minus_sign: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| ---------------------------------------------- | ---------------------------------------------- | ---------------------------------------------- | ---------------------------------------------- | +| `role` | *Optional[str]* | :heavy_minus_sign: | N/A | +| `content` | *Optional[str]* | :heavy_minus_sign: | N/A | +| `tool_calls` | List[[models.ToolCall](../models/toolcall.md)] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai_gcp/poetry.lock b/packages/mistralai_gcp/poetry.lock index a3d5456c..befb32f7 100644 --- a/packages/mistralai_gcp/poetry.lock +++ b/packages/mistralai_gcp/poetry.lock @@ -197,6 +197,20 @@ files = [ graph = ["objgraph (>=1.7.2)"] profile = ["gprof2dot (>=2022.7.29)"] +[[package]] +name = "eval-type-backport" +version = "0.2.0" +description = "Like `typing._eval_type`, but lets older Python versions use newer typing features." +optional = false +python-versions = ">=3.8" +files = [ + {file = "eval_type_backport-0.2.0-py3-none-any.whl", hash = "sha256:ac2f73d30d40c5a30a80b8739a789d6bb5e49fdffa66d7912667e2015d9c9933"}, + {file = "eval_type_backport-0.2.0.tar.gz", hash = "sha256:68796cfbc7371ebf923f03bdf7bef415f3ec098aeced24e054b253a0e78f7b37"}, +] + +[package.extras] +tests = ["pytest"] + [[package]] name = "exceptiongroup" version = "1.2.2" @@ -845,4 +859,4 @@ zstd = ["zstandard (>=0.18.0)"] [metadata] lock-version = "2.0" python-versions = "^3.8" -content-hash = "a68027cf5e3c64af190addf2b94014fb7eeb47d41cdd5c7f0ae2fb87305f83d0" +content-hash = "f7ec8ed73d60233b1bf0450f38af7d51c9dfe088ae0a0b8ee975ba2ae512d817" diff --git a/packages/mistralai_gcp/pyproject.toml b/packages/mistralai_gcp/pyproject.toml index 48841e43..90afd369 100644 --- a/packages/mistralai_gcp/pyproject.toml +++ b/packages/mistralai_gcp/pyproject.toml @@ -1,9 +1,9 @@ [tool.poetry] name = "mistralai-gcp" -version = "1.0.0-rc.2" +version = "1.0.0-rc.4" description = "Python Client SDK for the Mistral AI API in GCP." authors = ["Mistral",] -readme = "README.md" +readme = "README-PYPI.md" packages = [ { include = "mistralai_gcp", from = "src" } ] @@ -17,6 +17,7 @@ in-project = true [tool.poetry.dependencies] python = "^3.8" +eval-type-backport = "^0.2.0" google-auth = "^2.31.0" httpx = "^0.27.0" jsonpath-python = "^1.0.6" diff --git a/packages/mistralai_gcp/scripts/compile.sh b/packages/mistralai_gcp/scripts/compile.sh index aa49772e..fafe635b 100755 --- a/packages/mistralai_gcp/scripts/compile.sh +++ b/packages/mistralai_gcp/scripts/compile.sh @@ -20,6 +20,8 @@ run_command() { } &> "$output_file" & } +poetry run python scripts/prepare-readme.py + # Create temporary files for outputs and statuses for cmd in compileall pylint mypy pyright; do output_files[$cmd]=$(mktemp) diff --git a/packages/mistralai_gcp/scripts/prepare-readme.py b/packages/mistralai_gcp/scripts/prepare-readme.py new file mode 100644 index 00000000..a8ef8ea1 --- /dev/null +++ b/packages/mistralai_gcp/scripts/prepare-readme.py @@ -0,0 +1,9 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +import shutil + +try: + shutil.copyfile('README.md', 'README-PYPI.md') +except Exception as e: + print("Failed to copy README.md to README-PYPI.md") + print(e) diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionrequest.py b/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionrequest.py index 759aa1e5..a1053599 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionrequest.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionrequest.py @@ -14,6 +14,20 @@ from typing_extensions import Annotated, NotRequired +ChatCompletionRequestStopTypedDict = Union[str, List[str]] +r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + + +ChatCompletionRequestStop = Union[str, List[str]] +r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + + +ChatCompletionRequestMessagesTypedDict = Union[SystemMessageTypedDict, UserMessageTypedDict, AssistantMessageTypedDict, ToolMessageTypedDict] + + +ChatCompletionRequestMessages = Annotated[Union[Annotated[AssistantMessage, Tag("assistant")], Annotated[SystemMessage, Tag("system")], Annotated[ToolMessage, Tag("tool")], Annotated[UserMessage, Tag("user")]], Discriminator(lambda m: get_discriminator(m, "role", "role"))] + + ChatCompletionRequestToolChoice = Literal["auto", "none", "any"] class ChatCompletionRequestTypedDict(TypedDict): @@ -89,17 +103,3 @@ def serialize_model(self, handler): return m - -ChatCompletionRequestStopTypedDict = Union[str, List[str]] -r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" - - -ChatCompletionRequestStop = Union[str, List[str]] -r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" - - -ChatCompletionRequestMessagesTypedDict = Union[SystemMessageTypedDict, UserMessageTypedDict, AssistantMessageTypedDict, ToolMessageTypedDict] - - -ChatCompletionRequestMessages = Annotated[Union[Annotated[AssistantMessage, Tag("assistant")], Annotated[SystemMessage, Tag("system")], Annotated[ToolMessage, Tag("tool")], Annotated[UserMessage, Tag("user")]], Discriminator(lambda m: get_discriminator(m, "role", "role"))] - diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionstreamrequest.py b/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionstreamrequest.py index ad0fc799..ecf8393a 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionstreamrequest.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionstreamrequest.py @@ -14,6 +14,20 @@ from typing_extensions import Annotated, NotRequired +StopTypedDict = Union[str, List[str]] +r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + + +Stop = Union[str, List[str]] +r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + + +MessagesTypedDict = Union[SystemMessageTypedDict, UserMessageTypedDict, AssistantMessageTypedDict, ToolMessageTypedDict] + + +Messages = Annotated[Union[Annotated[AssistantMessage, Tag("assistant")], Annotated[SystemMessage, Tag("system")], Annotated[ToolMessage, Tag("tool")], Annotated[UserMessage, Tag("user")]], Discriminator(lambda m: get_discriminator(m, "role", "role"))] + + ToolChoice = Literal["auto", "none", "any"] class ChatCompletionStreamRequestTypedDict(TypedDict): @@ -87,17 +101,3 @@ def serialize_model(self, handler): return m - -StopTypedDict = Union[str, List[str]] -r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" - - -Stop = Union[str, List[str]] -r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" - - -MessagesTypedDict = Union[SystemMessageTypedDict, UserMessageTypedDict, AssistantMessageTypedDict, ToolMessageTypedDict] - - -Messages = Annotated[Union[Annotated[AssistantMessage, Tag("assistant")], Annotated[SystemMessage, Tag("system")], Annotated[ToolMessage, Tag("tool")], Annotated[UserMessage, Tag("user")]], Discriminator(lambda m: get_discriminator(m, "role", "role"))] - diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/deltamessage.py b/packages/mistralai_gcp/src/mistralai_gcp/models/deltamessage.py index 34cc3464..30de9e7b 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/deltamessage.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/deltamessage.py @@ -4,20 +4,20 @@ from .toolcall import ToolCall, ToolCallTypedDict from mistralai_gcp.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL from pydantic import model_serializer -from typing import Optional, TypedDict +from typing import List, Optional, TypedDict from typing_extensions import NotRequired class DeltaMessageTypedDict(TypedDict): role: NotRequired[str] content: NotRequired[str] - tool_calls: NotRequired[Nullable[ToolCallTypedDict]] + tool_calls: NotRequired[Nullable[List[ToolCallTypedDict]]] class DeltaMessage(BaseModel): role: Optional[str] = None content: Optional[str] = None - tool_calls: OptionalNullable[ToolCall] = UNSET + tool_calls: OptionalNullable[List[ToolCall]] = UNSET @model_serializer(mode="wrap") def serialize_model(self, handler): diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/fimcompletionrequest.py b/packages/mistralai_gcp/src/mistralai_gcp/models/fimcompletionrequest.py index 15e36cc4..7e0e1b5a 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/fimcompletionrequest.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/fimcompletionrequest.py @@ -7,6 +7,14 @@ from typing_extensions import NotRequired +FIMCompletionRequestStopTypedDict = Union[str, List[str]] +r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + + +FIMCompletionRequestStop = Union[str, List[str]] +r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + + class FIMCompletionRequestTypedDict(TypedDict): model: Nullable[str] r"""ID of the model to use. Only compatible for now with: @@ -84,11 +92,3 @@ def serialize_model(self, handler): return m - -FIMCompletionRequestStopTypedDict = Union[str, List[str]] -r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" - - -FIMCompletionRequestStop = Union[str, List[str]] -r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" - diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/fimcompletionstreamrequest.py b/packages/mistralai_gcp/src/mistralai_gcp/models/fimcompletionstreamrequest.py index 38888466..3f2dc80c 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/fimcompletionstreamrequest.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/fimcompletionstreamrequest.py @@ -7,6 +7,14 @@ from typing_extensions import NotRequired +FIMCompletionStreamRequestStopTypedDict = Union[str, List[str]] +r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + + +FIMCompletionStreamRequestStop = Union[str, List[str]] +r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + + class FIMCompletionStreamRequestTypedDict(TypedDict): model: Nullable[str] r"""ID of the model to use. Only compatible for now with: @@ -82,11 +90,3 @@ def serialize_model(self, handler): return m - -FIMCompletionStreamRequestStopTypedDict = Union[str, List[str]] -r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" - - -FIMCompletionStreamRequestStop = Union[str, List[str]] -r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" - diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/functioncall.py b/packages/mistralai_gcp/src/mistralai_gcp/models/functioncall.py index c188ad42..a036ad75 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/functioncall.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/functioncall.py @@ -5,6 +5,12 @@ from typing import Any, Dict, TypedDict, Union +ArgumentsTypedDict = Union[Dict[str, Any], str] + + +Arguments = Union[Dict[str, Any], str] + + class FunctionCallTypedDict(TypedDict): name: str arguments: ArgumentsTypedDict @@ -14,9 +20,3 @@ class FunctionCall(BaseModel): name: str arguments: Arguments - -ArgumentsTypedDict = Union[Dict[str, Any], str] - - -Arguments = Union[Dict[str, Any], str] - diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/systemmessage.py b/packages/mistralai_gcp/src/mistralai_gcp/models/systemmessage.py index 461a4ccc..0209c5bb 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/systemmessage.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/systemmessage.py @@ -7,6 +7,12 @@ from typing_extensions import NotRequired +ContentTypedDict = Union[str, List[ContentChunkTypedDict]] + + +Content = Union[str, List[ContentChunk]] + + Role = Literal["system"] class SystemMessageTypedDict(TypedDict): @@ -18,9 +24,3 @@ class SystemMessage(BaseModel): content: Content role: Optional[Role] = "system" - -ContentTypedDict = Union[str, List[ContentChunkTypedDict]] - - -Content = Union[str, List[ContentChunk]] - diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/usermessage.py b/packages/mistralai_gcp/src/mistralai_gcp/models/usermessage.py index 9e82ff34..98649bf1 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/usermessage.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/usermessage.py @@ -7,6 +7,12 @@ from typing_extensions import NotRequired +UserMessageContentTypedDict = Union[str, List[TextChunkTypedDict]] + + +UserMessageContent = Union[str, List[TextChunk]] + + UserMessageRole = Literal["user"] class UserMessageTypedDict(TypedDict): @@ -18,9 +24,3 @@ class UserMessage(BaseModel): content: UserMessageContent role: Optional[UserMessageRole] = "user" - -UserMessageContentTypedDict = Union[str, List[TextChunkTypedDict]] - - -UserMessageContent = Union[str, List[TextChunk]] - diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/validationerror.py b/packages/mistralai_gcp/src/mistralai_gcp/models/validationerror.py index 4eee48c4..23008f45 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/validationerror.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/validationerror.py @@ -5,6 +5,12 @@ from typing import List, TypedDict, Union +LocTypedDict = Union[str, int] + + +Loc = Union[str, int] + + class ValidationErrorTypedDict(TypedDict): loc: List[LocTypedDict] msg: str @@ -16,9 +22,3 @@ class ValidationError(BaseModel): msg: str type: str - -LocTypedDict = Union[str, int] - - -Loc = Union[str, int] - diff --git a/packages/mistralai_gcp/src/mistralai_gcp/sdkconfiguration.py b/packages/mistralai_gcp/src/mistralai_gcp/sdkconfiguration.py index 65d3c752..94d271b9 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/sdkconfiguration.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/sdkconfiguration.py @@ -29,9 +29,9 @@ class SDKConfiguration: server: Optional[str] = "" language: str = "python" openapi_doc_version: str = "0.0.2" - sdk_version: str = "1.0.0-rc.2" - gen_version: str = "2.388.1" - user_agent: str = "speakeasy-sdk/python 1.0.0-rc.2 2.388.1 0.0.2 mistralai-gcp" + sdk_version: str = "1.0.0-rc.4" + gen_version: str = "2.390.6" + user_agent: str = "speakeasy-sdk/python 1.0.0-rc.4 2.390.6 0.0.2 mistralai-gcp" retry_config: OptionalNullable[RetryConfig] = Field(default_factory=lambda: UNSET) timeout_ms: Optional[int] = None diff --git a/pyproject.toml b/pyproject.toml index a42fee09..e6db0795 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "mistralai" -version = "1.0.0" +version = "1.0.2" description = "Python Client SDK for the Mistral AI API." authors = ["Mistral"] readme = "README.md" diff --git a/scripts/compile.sh b/scripts/compile.sh index aa49772e..fafe635b 100755 --- a/scripts/compile.sh +++ b/scripts/compile.sh @@ -20,6 +20,8 @@ run_command() { } &> "$output_file" & } +poetry run python scripts/prepare-readme.py + # Create temporary files for outputs and statuses for cmd in compileall pylint mypy pyright; do output_files[$cmd]=$(mktemp) diff --git a/scripts/prepare-readme.py b/scripts/prepare-readme.py new file mode 100644 index 00000000..9111d6cb --- /dev/null +++ b/scripts/prepare-readme.py @@ -0,0 +1,33 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +import re +import shutil + +try: + with open("README.md", "r") as rh: + readme_contents = rh.read() + GITHUB_URL = "https://github.com/mistralai/client-python.git" + GITHUB_URL = ( + GITHUB_URL[: -len(".git")] if GITHUB_URL.endswith(".git") else GITHUB_URL + ) + # links on PyPI should have absolute URLs + readme_contents = re.sub( + r"(\[[^\]]+\]\()((?!https?:)[^\)]+)(\))", + lambda m: m.group(1) + + GITHUB_URL + + "/blob/master/" + + m.group(2) + + m.group(3), + readme_contents, + ) + + with open("README-PYPI.md", "w") as wh: + wh.write(readme_contents) +except Exception as e: + try: + print("Failed to rewrite README.md to README-PYPI.md, copying original instead") + print(e) + shutil.copyfile("README.md", "README-PYPI.md") + except Exception as e: + print("Failed to copy README.md to README-PYPI.md") + print(e) diff --git a/src/mistralai/models/agentscompletionrequest.py b/src/mistralai/models/agentscompletionrequest.py index 3eb8b380..6a62b46e 100644 --- a/src/mistralai/models/agentscompletionrequest.py +++ b/src/mistralai/models/agentscompletionrequest.py @@ -13,6 +13,20 @@ from typing_extensions import Annotated, NotRequired +AgentsCompletionRequestStopTypedDict = Union[str, List[str]] +r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + + +AgentsCompletionRequestStop = Union[str, List[str]] +r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + + +AgentsCompletionRequestMessagesTypedDict = Union[UserMessageTypedDict, AssistantMessageTypedDict, ToolMessageTypedDict] + + +AgentsCompletionRequestMessages = Annotated[Union[Annotated[AssistantMessage, Tag("assistant")], Annotated[ToolMessage, Tag("tool")], Annotated[UserMessage, Tag("user")]], Discriminator(lambda m: get_discriminator(m, "role", "role"))] + + AgentsCompletionRequestToolChoice = Literal["auto", "none", "any"] class AgentsCompletionRequestTypedDict(TypedDict): @@ -80,17 +94,3 @@ def serialize_model(self, handler): return m - -AgentsCompletionRequestStopTypedDict = Union[str, List[str]] -r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" - - -AgentsCompletionRequestStop = Union[str, List[str]] -r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" - - -AgentsCompletionRequestMessagesTypedDict = Union[UserMessageTypedDict, AssistantMessageTypedDict, ToolMessageTypedDict] - - -AgentsCompletionRequestMessages = Annotated[Union[Annotated[AssistantMessage, Tag("assistant")], Annotated[ToolMessage, Tag("tool")], Annotated[UserMessage, Tag("user")]], Discriminator(lambda m: get_discriminator(m, "role", "role"))] - diff --git a/src/mistralai/models/agentscompletionstreamrequest.py b/src/mistralai/models/agentscompletionstreamrequest.py index e6a1ea64..91b5bcb2 100644 --- a/src/mistralai/models/agentscompletionstreamrequest.py +++ b/src/mistralai/models/agentscompletionstreamrequest.py @@ -7,6 +7,14 @@ from typing_extensions import NotRequired +AgentsCompletionStreamRequestStopTypedDict = Union[str, List[str]] +r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + + +AgentsCompletionStreamRequestStop = Union[str, List[str]] +r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + + class AgentsCompletionStreamRequestTypedDict(TypedDict): model: Nullable[str] r"""ID of the model to use. Only compatible for now with: @@ -82,11 +90,3 @@ def serialize_model(self, handler): return m - -AgentsCompletionStreamRequestStopTypedDict = Union[str, List[str]] -r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" - - -AgentsCompletionStreamRequestStop = Union[str, List[str]] -r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" - diff --git a/src/mistralai/models/chatcompletionrequest.py b/src/mistralai/models/chatcompletionrequest.py index 30817c5a..bf6baf25 100644 --- a/src/mistralai/models/chatcompletionrequest.py +++ b/src/mistralai/models/chatcompletionrequest.py @@ -14,6 +14,20 @@ from typing_extensions import Annotated, NotRequired +StopTypedDict = Union[str, List[str]] +r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + + +Stop = Union[str, List[str]] +r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + + +MessagesTypedDict = Union[SystemMessageTypedDict, UserMessageTypedDict, AssistantMessageTypedDict, ToolMessageTypedDict] + + +Messages = Annotated[Union[Annotated[AssistantMessage, Tag("assistant")], Annotated[SystemMessage, Tag("system")], Annotated[ToolMessage, Tag("tool")], Annotated[UserMessage, Tag("user")]], Discriminator(lambda m: get_discriminator(m, "role", "role"))] + + ToolChoice = Literal["auto", "none", "any"] class ChatCompletionRequestTypedDict(TypedDict): @@ -93,17 +107,3 @@ def serialize_model(self, handler): return m - -StopTypedDict = Union[str, List[str]] -r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" - - -Stop = Union[str, List[str]] -r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" - - -MessagesTypedDict = Union[SystemMessageTypedDict, UserMessageTypedDict, AssistantMessageTypedDict, ToolMessageTypedDict] - - -Messages = Annotated[Union[Annotated[AssistantMessage, Tag("assistant")], Annotated[SystemMessage, Tag("system")], Annotated[ToolMessage, Tag("tool")], Annotated[UserMessage, Tag("user")]], Discriminator(lambda m: get_discriminator(m, "role", "role"))] - diff --git a/src/mistralai/models/chatcompletionstreamrequest.py b/src/mistralai/models/chatcompletionstreamrequest.py index 9523dd5a..9e2ae401 100644 --- a/src/mistralai/models/chatcompletionstreamrequest.py +++ b/src/mistralai/models/chatcompletionstreamrequest.py @@ -14,6 +14,20 @@ from typing_extensions import Annotated, NotRequired +ChatCompletionStreamRequestStopTypedDict = Union[str, List[str]] +r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + + +ChatCompletionStreamRequestStop = Union[str, List[str]] +r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + + +ChatCompletionStreamRequestMessagesTypedDict = Union[SystemMessageTypedDict, UserMessageTypedDict, AssistantMessageTypedDict, ToolMessageTypedDict] + + +ChatCompletionStreamRequestMessages = Annotated[Union[Annotated[AssistantMessage, Tag("assistant")], Annotated[SystemMessage, Tag("system")], Annotated[ToolMessage, Tag("tool")], Annotated[UserMessage, Tag("user")]], Discriminator(lambda m: get_discriminator(m, "role", "role"))] + + ChatCompletionStreamRequestToolChoice = Literal["auto", "none", "any"] class ChatCompletionStreamRequestTypedDict(TypedDict): @@ -91,17 +105,3 @@ def serialize_model(self, handler): return m - -ChatCompletionStreamRequestStopTypedDict = Union[str, List[str]] -r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" - - -ChatCompletionStreamRequestStop = Union[str, List[str]] -r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" - - -ChatCompletionStreamRequestMessagesTypedDict = Union[SystemMessageTypedDict, UserMessageTypedDict, AssistantMessageTypedDict, ToolMessageTypedDict] - - -ChatCompletionStreamRequestMessages = Annotated[Union[Annotated[AssistantMessage, Tag("assistant")], Annotated[SystemMessage, Tag("system")], Annotated[ToolMessage, Tag("tool")], Annotated[UserMessage, Tag("user")]], Discriminator(lambda m: get_discriminator(m, "role", "role"))] - diff --git a/src/mistralai/models/deltamessage.py b/src/mistralai/models/deltamessage.py index 013f708c..228f1aab 100644 --- a/src/mistralai/models/deltamessage.py +++ b/src/mistralai/models/deltamessage.py @@ -4,20 +4,20 @@ from .toolcall import ToolCall, ToolCallTypedDict from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL from pydantic import model_serializer -from typing import Optional, TypedDict +from typing import List, Optional, TypedDict from typing_extensions import NotRequired class DeltaMessageTypedDict(TypedDict): role: NotRequired[str] content: NotRequired[str] - tool_calls: NotRequired[Nullable[ToolCallTypedDict]] + tool_calls: NotRequired[Nullable[List[ToolCallTypedDict]]] class DeltaMessage(BaseModel): role: Optional[str] = None content: Optional[str] = None - tool_calls: OptionalNullable[ToolCall] = UNSET + tool_calls: OptionalNullable[List[ToolCall]] = UNSET @model_serializer(mode="wrap") def serialize_model(self, handler): diff --git a/src/mistralai/models/embeddingrequest.py b/src/mistralai/models/embeddingrequest.py index 6875e14c..6d70cc8b 100644 --- a/src/mistralai/models/embeddingrequest.py +++ b/src/mistralai/models/embeddingrequest.py @@ -8,6 +8,14 @@ from typing_extensions import Annotated, NotRequired +InputsTypedDict = Union[str, List[str]] +r"""Text to embed.""" + + +Inputs = Union[str, List[str]] +r"""Text to embed.""" + + class EmbeddingRequestTypedDict(TypedDict): inputs: InputsTypedDict r"""Text to embed.""" @@ -51,11 +59,3 @@ def serialize_model(self, handler): return m - -InputsTypedDict = Union[str, List[str]] -r"""Text to embed.""" - - -Inputs = Union[str, List[str]] -r"""Text to embed.""" - diff --git a/src/mistralai/models/fimcompletionrequest.py b/src/mistralai/models/fimcompletionrequest.py index ba941bb9..7e041681 100644 --- a/src/mistralai/models/fimcompletionrequest.py +++ b/src/mistralai/models/fimcompletionrequest.py @@ -7,6 +7,14 @@ from typing_extensions import NotRequired +FIMCompletionRequestStopTypedDict = Union[str, List[str]] +r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + + +FIMCompletionRequestStop = Union[str, List[str]] +r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + + class FIMCompletionRequestTypedDict(TypedDict): model: Nullable[str] r"""ID of the model to use. Only compatible for now with: @@ -84,11 +92,3 @@ def serialize_model(self, handler): return m - -FIMCompletionRequestStopTypedDict = Union[str, List[str]] -r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" - - -FIMCompletionRequestStop = Union[str, List[str]] -r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" - diff --git a/src/mistralai/models/fimcompletionstreamrequest.py b/src/mistralai/models/fimcompletionstreamrequest.py index 767d0416..6d01053a 100644 --- a/src/mistralai/models/fimcompletionstreamrequest.py +++ b/src/mistralai/models/fimcompletionstreamrequest.py @@ -7,6 +7,14 @@ from typing_extensions import NotRequired +FIMCompletionStreamRequestStopTypedDict = Union[str, List[str]] +r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + + +FIMCompletionStreamRequestStop = Union[str, List[str]] +r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + + class FIMCompletionStreamRequestTypedDict(TypedDict): model: Nullable[str] r"""ID of the model to use. Only compatible for now with: @@ -82,11 +90,3 @@ def serialize_model(self, handler): return m - -FIMCompletionStreamRequestStopTypedDict = Union[str, List[str]] -r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" - - -FIMCompletionStreamRequestStop = Union[str, List[str]] -r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" - diff --git a/src/mistralai/models/functioncall.py b/src/mistralai/models/functioncall.py index c8a6591d..4b79c325 100644 --- a/src/mistralai/models/functioncall.py +++ b/src/mistralai/models/functioncall.py @@ -5,6 +5,12 @@ from typing import Any, Dict, TypedDict, Union +ArgumentsTypedDict = Union[Dict[str, Any], str] + + +Arguments = Union[Dict[str, Any], str] + + class FunctionCallTypedDict(TypedDict): name: str arguments: ArgumentsTypedDict @@ -14,9 +20,3 @@ class FunctionCall(BaseModel): name: str arguments: Arguments - -ArgumentsTypedDict = Union[Dict[str, Any], str] - - -Arguments = Union[Dict[str, Any], str] - diff --git a/src/mistralai/models/systemmessage.py b/src/mistralai/models/systemmessage.py index 171acf50..91d1a6ed 100644 --- a/src/mistralai/models/systemmessage.py +++ b/src/mistralai/models/systemmessage.py @@ -7,6 +7,12 @@ from typing_extensions import NotRequired +ContentTypedDict = Union[str, List[ContentChunkTypedDict]] + + +Content = Union[str, List[ContentChunk]] + + Role = Literal["system"] class SystemMessageTypedDict(TypedDict): @@ -18,9 +24,3 @@ class SystemMessage(BaseModel): content: Content role: Optional[Role] = "system" - -ContentTypedDict = Union[str, List[ContentChunkTypedDict]] - - -Content = Union[str, List[ContentChunk]] - diff --git a/src/mistralai/models/usermessage.py b/src/mistralai/models/usermessage.py index bea7328b..a1749ec8 100644 --- a/src/mistralai/models/usermessage.py +++ b/src/mistralai/models/usermessage.py @@ -7,6 +7,12 @@ from typing_extensions import NotRequired +UserMessageContentTypedDict = Union[str, List[TextChunkTypedDict]] + + +UserMessageContent = Union[str, List[TextChunk]] + + UserMessageRole = Literal["user"] class UserMessageTypedDict(TypedDict): @@ -18,9 +24,3 @@ class UserMessage(BaseModel): content: UserMessageContent role: Optional[UserMessageRole] = "user" - -UserMessageContentTypedDict = Union[str, List[TextChunkTypedDict]] - - -UserMessageContent = Union[str, List[TextChunk]] - diff --git a/src/mistralai/models/validationerror.py b/src/mistralai/models/validationerror.py index 2d4a97bd..42b9af48 100644 --- a/src/mistralai/models/validationerror.py +++ b/src/mistralai/models/validationerror.py @@ -5,6 +5,12 @@ from typing import List, TypedDict, Union +LocTypedDict = Union[str, int] + + +Loc = Union[str, int] + + class ValidationErrorTypedDict(TypedDict): loc: List[LocTypedDict] msg: str @@ -16,9 +22,3 @@ class ValidationError(BaseModel): msg: str type: str - -LocTypedDict = Union[str, int] - - -Loc = Union[str, int] - diff --git a/src/mistralai/sdkconfiguration.py b/src/mistralai/sdkconfiguration.py index 81df1bf6..bd21306e 100644 --- a/src/mistralai/sdkconfiguration.py +++ b/src/mistralai/sdkconfiguration.py @@ -29,9 +29,9 @@ class SDKConfiguration: server: Optional[str] = "" language: str = "python" openapi_doc_version: str = "0.0.2" - sdk_version: str = "1.0.0" - gen_version: str = "2.388.1" - user_agent: str = "speakeasy-sdk/python 1.0.0 2.388.1 0.0.2 mistralai" + sdk_version: str = "1.0.2" + gen_version: str = "2.390.6" + user_agent: str = "speakeasy-sdk/python 1.0.2 2.390.6 0.0.2 mistralai" retry_config: OptionalNullable[RetryConfig] = Field(default_factory=lambda: UNSET) timeout_ms: Optional[int] = None From 5d117a7df3adb933ecb55f999169bd5ee2780741 Mon Sep 17 00:00:00 2001 From: Gaspard Blanchet <26529448+GaspardBT@users.noreply.github.com> Date: Thu, 8 Aug 2024 20:11:17 +0200 Subject: [PATCH 066/223] fix workflow (#128) --- .github/workflows/sdk_generation_mistralai_sdk.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/sdk_generation_mistralai_sdk.yaml b/.github/workflows/sdk_generation_mistralai_sdk.yaml index 32564ae5..7d0540e7 100644 --- a/.github/workflows/sdk_generation_mistralai_sdk.yaml +++ b/.github/workflows/sdk_generation_mistralai_sdk.yaml @@ -22,7 +22,7 @@ jobs: mode: pr set_version: ${{ github.event.inputs.set_version }} speakeasy_version: latest - target: all + target: mistralai-sdk secrets: github_access_token: ${{ secrets.GITHUB_TOKEN }} pypi_token: ${{ secrets.PYPI_TOKEN }} From 55b92b18b36ed909e5b56036d6c7962f66447067 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Thu, 8 Aug 2024 20:14:52 +0200 Subject: [PATCH 067/223] ci: regenerated with OpenAPI Doc , Speakeasy CLI 1.357.4 (#129) Co-authored-by: speakeasybot --- .speakeasy/gen.lock | 4 ++-- .speakeasy/gen.yaml | 2 +- .speakeasy/workflow.lock | 2 +- RELEASES.md | 12 +++++++++++- pyproject.toml | 2 +- src/mistralai/sdkconfiguration.py | 4 ++-- 6 files changed, 18 insertions(+), 8 deletions(-) diff --git a/.speakeasy/gen.lock b/.speakeasy/gen.lock index 90d9e1b5..0cbace74 100644 --- a/.speakeasy/gen.lock +++ b/.speakeasy/gen.lock @@ -5,8 +5,8 @@ management: docVersion: 0.0.2 speakeasyVersion: 1.357.4 generationVersion: 2.390.6 - releaseVersion: 1.0.2 - configChecksum: ed07f7fc253047a5a4dd2c0f813b8ea4 + releaseVersion: 1.0.1 + configChecksum: 374a669373f10730cda1eb9a91d59b8b repoURL: https://github.com/mistralai/client-python.git installationURL: https://github.com/mistralai/client-python.git published: true diff --git a/.speakeasy/gen.yaml b/.speakeasy/gen.yaml index c613fdbb..1d54a9ea 100644 --- a/.speakeasy/gen.yaml +++ b/.speakeasy/gen.yaml @@ -12,7 +12,7 @@ generation: auth: oAuth2ClientCredentialsEnabled: true python: - version: 1.0.2 + version: 1.0.1 additionalDependencies: dev: pytest: ^8.2.2 diff --git a/.speakeasy/workflow.lock b/.speakeasy/workflow.lock index 57bf6871..92bc8804 100644 --- a/.speakeasy/workflow.lock +++ b/.speakeasy/workflow.lock @@ -9,7 +9,7 @@ targets: outLocation: ./packages/mistralai_gcp mistralai-sdk: source: mistral-openapi - outLocation: /Users/gaspard/public-mistral/client-python + outLocation: /github/workspace/repo workflow: workflowVersion: 1.0.0 speakeasyVersion: latest diff --git a/RELEASES.md b/RELEASES.md index cb062cbf..e49882ce 100644 --- a/RELEASES.md +++ b/RELEASES.md @@ -8,4 +8,14 @@ Based on: ### Generated - [python v1.0.0] . ### Releases -- [PyPI v1.0.0] https://pypi.org/project/mistralai/1.0.0 - . \ No newline at end of file +- [PyPI v1.0.0] https://pypi.org/project/mistralai/1.0.0 - . + +## 2024-08-08 18:12:16 +### Changes +Based on: +- OpenAPI Doc +- Speakeasy CLI 1.357.4 (2.390.6) https://github.com/speakeasy-api/speakeasy +### Generated +- [python v1.0.1] . +### Releases +- [PyPI v1.0.1] https://pypi.org/project/mistralai/1.0.1 - . \ No newline at end of file diff --git a/pyproject.toml b/pyproject.toml index e6db0795..810ec3d4 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "mistralai" -version = "1.0.2" +version = "1.0.1" description = "Python Client SDK for the Mistral AI API." authors = ["Mistral"] readme = "README.md" diff --git a/src/mistralai/sdkconfiguration.py b/src/mistralai/sdkconfiguration.py index bd21306e..2a18bab6 100644 --- a/src/mistralai/sdkconfiguration.py +++ b/src/mistralai/sdkconfiguration.py @@ -29,9 +29,9 @@ class SDKConfiguration: server: Optional[str] = "" language: str = "python" openapi_doc_version: str = "0.0.2" - sdk_version: str = "1.0.2" + sdk_version: str = "1.0.1" gen_version: str = "2.390.6" - user_agent: str = "speakeasy-sdk/python 1.0.2 2.390.6 0.0.2 mistralai" + user_agent: str = "speakeasy-sdk/python 1.0.1 2.390.6 0.0.2 mistralai" retry_config: OptionalNullable[RetryConfig] = Field(default_factory=lambda: UNSET) timeout_ms: Optional[int] = None From f769a074412e13ecb85239bcb1ac1dee600c4fff Mon Sep 17 00:00:00 2001 From: Gaspard Blanchet <26529448+GaspardBT@users.noreply.github.com> Date: Fri, 16 Aug 2024 16:16:28 +0200 Subject: [PATCH 068/223] Fix AgentsCompletionStreamRequest (#132) * fix agents * update doc --- .speakeasy/gen.lock | 19 ++++-- .speakeasy/gen.yaml | 2 +- .speakeasy/workflow.lock | 33 +++++++++- docs/models/agentscompletionrequest.md | 2 +- docs/models/agentscompletionstreamrequest.md | 24 +++---- .../agentscompletionstreamrequestmessages.md | 23 +++++++ ...agentscompletionstreamrequesttoolchoice.md | 10 +++ docs/models/chatcompletionchoice.md | 10 +-- docs/models/chatcompletionrequest.md | 4 +- docs/models/chatcompletionstreamrequest.md | 4 +- docs/models/tool.md | 8 +-- docs/models/toolcall.md | 10 +-- docs/models/tooltooltypes.md | 8 +++ docs/models/tooltypes.md | 8 +++ docs/sdks/agents/README.md | 39 ++++++----- docs/sdks/chat/README.md | 12 ++-- docs/sdks/embeddings/README.md | 2 + docs/sdks/files/README.md | 8 +++ docs/sdks/fim/README.md | 4 ++ docs/sdks/jobs/README.md | 10 +++ docs/sdks/models/README.md | 12 ++++ examples/async_jobs.py | 4 +- packages/mistralai_azure/.speakeasy/gen.lock | 17 +++-- packages/mistralai_azure/.speakeasy/gen.yaml | 2 +- .../docs/models/chatcompletionchoice.md | 4 +- .../docs/models/chatcompletionrequest.md | 2 +- .../models/chatcompletionstreamrequest.md | 2 +- packages/mistralai_azure/docs/models/tool.md | 8 +-- .../mistralai_azure/docs/models/toolcall.md | 10 +-- .../docs/models/tooltooltypes.md | 8 +++ .../mistralai_azure/docs/models/tooltypes.md | 8 +++ packages/mistralai_azure/poetry.lock | 8 +-- packages/mistralai_azure/pyproject.toml | 4 +- packages/mistralai_azure/scripts/publish.sh | 2 + .../src/mistralai_azure/models/__init__.py | 6 +- .../models/chatcompletionchoice.py | 7 +- .../src/mistralai_azure/models/tool.py | 14 ++-- .../src/mistralai_azure/models/toolcall.py | 12 ++-- .../src/mistralai_azure/sdkconfiguration.py | 6 +- packages/mistralai_gcp/.speakeasy/gen.lock | 17 +++-- packages/mistralai_gcp/.speakeasy/gen.yaml | 2 +- .../docs/models/chatcompletionchoice.md | 4 +- .../docs/models/chatcompletionrequest.md | 4 +- .../models/chatcompletionstreamrequest.md | 4 +- packages/mistralai_gcp/docs/models/tool.md | 8 +-- .../mistralai_gcp/docs/models/toolcall.md | 10 +-- .../docs/models/tooltooltypes.md | 8 +++ .../mistralai_gcp/docs/models/tooltypes.md | 8 +++ packages/mistralai_gcp/poetry.lock | 8 +-- packages/mistralai_gcp/pyproject.toml | 4 +- packages/mistralai_gcp/scripts/publish.sh | 2 + .../mistralai_gcp/src/mistralai_gcp/chat.py | 8 +-- .../src/mistralai_gcp/models/__init__.py | 6 +- .../models/chatcompletionchoice.py | 7 +- .../models/chatcompletionrequest.py | 4 +- .../models/chatcompletionstreamrequest.py | 4 +- .../src/mistralai_gcp/models/tool.py | 14 ++-- .../src/mistralai_gcp/models/toolcall.py | 12 ++-- .../src/mistralai_gcp/sdkconfiguration.py | 6 +- pyproject.toml | 2 +- scripts/publish.sh | 2 + src/mistralai/agents.py | 62 +++++++++--------- src/mistralai/chat.py | 8 +-- src/mistralai/models/__init__.py | 8 +-- .../models/agentscompletionstreamrequest.py | 64 ++++++++++--------- src/mistralai/models/chatcompletionchoice.py | 7 +- src/mistralai/models/chatcompletionrequest.py | 4 +- .../models/chatcompletionstreamrequest.py | 4 +- src/mistralai/models/tool.py | 14 ++-- src/mistralai/models/toolcall.py | 12 ++-- src/mistralai/sdkconfiguration.py | 6 +- 71 files changed, 454 insertions(+), 255 deletions(-) create mode 100644 docs/models/agentscompletionstreamrequestmessages.md create mode 100644 docs/models/agentscompletionstreamrequesttoolchoice.md create mode 100644 docs/models/tooltooltypes.md create mode 100644 docs/models/tooltypes.md create mode 100644 packages/mistralai_azure/docs/models/tooltooltypes.md create mode 100644 packages/mistralai_azure/docs/models/tooltypes.md create mode 100644 packages/mistralai_gcp/docs/models/tooltooltypes.md create mode 100644 packages/mistralai_gcp/docs/models/tooltypes.md diff --git a/.speakeasy/gen.lock b/.speakeasy/gen.lock index 0cbace74..bc40f704 100644 --- a/.speakeasy/gen.lock +++ b/.speakeasy/gen.lock @@ -1,12 +1,12 @@ lockVersion: 2.0.0 id: 2d045ec7-2ebb-4f4d-ad25-40953b132161 management: - docChecksum: a7c4268dd1228c969aecfd7cfdb6ca3c + docChecksum: c19f5a86b8045af32a46604ee5478061 docVersion: 0.0.2 - speakeasyVersion: 1.357.4 - generationVersion: 2.390.6 - releaseVersion: 1.0.1 - configChecksum: 374a669373f10730cda1eb9a91d59b8b + speakeasyVersion: 1.372.0 + generationVersion: 2.399.0 + releaseVersion: 1.1.3 + configChecksum: b9757e45cfabeceebf51f9a514724903 repoURL: https://github.com/mistralai/client-python.git installationURL: https://github.com/mistralai/client-python.git published: true @@ -14,7 +14,7 @@ features: python: additionalDependencies: 1.0.0 constsAndDefaults: 1.0.2 - core: 5.3.7 + core: 5.3.8 defaultEnabledRetries: 0.2.0 envVarSecurityUsage: 0.3.1 examples: 3.0.0 @@ -27,13 +27,14 @@ features: multipartFileContentType: 1.0.0 nameOverrides: 3.0.0 nullables: 1.0.0 + openEnums: 1.0.0 responseFormat: 1.0.0 retries: 3.0.0 sdkHooks: 1.0.0 serverEvents: 1.0.2 serverEventsSentinels: 0.1.0 serverIDs: 3.0.0 - unions: 3.0.1 + unions: 3.0.2 uploadStreams: 1.0.0 generatedFiles: - src/mistralai/sdkconfiguration.py @@ -211,6 +212,7 @@ generatedFiles: - docs/models/chatcompletionchoice.md - docs/models/assistantmessagerole.md - docs/models/assistantmessage.md + - docs/models/tooltypes.md - docs/models/toolcall.md - docs/models/arguments.md - docs/models/functioncall.md @@ -219,6 +221,7 @@ generatedFiles: - docs/models/messages.md - docs/models/toolchoice.md - docs/models/chatcompletionrequest.md + - docs/models/tooltooltypes.md - docs/models/tool.md - docs/models/function.md - docs/models/responseformats.md @@ -252,6 +255,8 @@ generatedFiles: - docs/models/agentscompletionrequesttoolchoice.md - docs/models/agentscompletionrequest.md - docs/models/agentscompletionstreamrequeststop.md + - docs/models/agentscompletionstreamrequestmessages.md + - docs/models/agentscompletionstreamrequesttoolchoice.md - docs/models/agentscompletionstreamrequest.md - docs/models/embeddingresponse.md - docs/models/embeddingresponsedata.md diff --git a/.speakeasy/gen.yaml b/.speakeasy/gen.yaml index 1d54a9ea..90ee2f7c 100644 --- a/.speakeasy/gen.yaml +++ b/.speakeasy/gen.yaml @@ -12,7 +12,7 @@ generation: auth: oAuth2ClientCredentialsEnabled: true python: - version: 1.0.1 + version: 1.1.3 additionalDependencies: dev: pytest: ^8.2.2 diff --git a/.speakeasy/workflow.lock b/.speakeasy/workflow.lock index 92bc8804..432ad608 100644 --- a/.speakeasy/workflow.lock +++ b/.speakeasy/workflow.lock @@ -1,15 +1,42 @@ -speakeasyVersion: 1.357.4 -sources: {} +speakeasyVersion: 1.372.0 +sources: + mistral-azure-source: + sourceNamespace: mistral-openapi-azure + sourceRevisionDigest: sha256:bc53dba5935490a409045de3c39ccf9e90243a289656dd538a542990aa376cca + sourceBlobDigest: sha256:4173c3be19775dd2bdd4ce28bb9ae6655650df75f2b689a44c3362d418d69d49 + tags: + - latest + mistral-google-cloud-source: + sourceNamespace: mistral-openapi-google-cloud + sourceRevisionDigest: sha256:ab52d75474e071db240ed9a5367dc6374867b5c9306d478dcfdf8f7b7d08607f + sourceBlobDigest: sha256:d5f9c665861d7fedd5093567d13e1f7f6a12b82137fbbecda4708007b15030ba + tags: + - latest + mistral-openapi: + sourceNamespace: mistral-openapi + sourceRevisionDigest: sha256:e4d5f5fe40e7f1141006ba40c1d85b743ce5dc2407635ca2e776ba0dfb00a398 + sourceBlobDigest: sha256:56f1bbe3a050c9505e003bb9790e443084922bff74b072805757076cdb8a136e + tags: + - latest targets: mistralai-azure-sdk: source: mistral-azure-source + sourceNamespace: mistral-openapi-azure + sourceRevisionDigest: sha256:bc53dba5935490a409045de3c39ccf9e90243a289656dd538a542990aa376cca + sourceBlobDigest: sha256:4173c3be19775dd2bdd4ce28bb9ae6655650df75f2b689a44c3362d418d69d49 outLocation: ./packages/mistralai_azure mistralai-gcp-sdk: source: mistral-google-cloud-source + sourceNamespace: mistral-openapi-google-cloud + sourceRevisionDigest: sha256:ab52d75474e071db240ed9a5367dc6374867b5c9306d478dcfdf8f7b7d08607f + sourceBlobDigest: sha256:d5f9c665861d7fedd5093567d13e1f7f6a12b82137fbbecda4708007b15030ba outLocation: ./packages/mistralai_gcp mistralai-sdk: source: mistral-openapi - outLocation: /github/workspace/repo + sourceNamespace: mistral-openapi + sourceRevisionDigest: sha256:e4d5f5fe40e7f1141006ba40c1d85b743ce5dc2407635ca2e776ba0dfb00a398 + sourceBlobDigest: sha256:56f1bbe3a050c9505e003bb9790e443084922bff74b072805757076cdb8a136e + outLocation: /Users/gaspard/public-mistral/client-python workflow: workflowVersion: 1.0.0 speakeasyVersion: latest diff --git a/docs/models/agentscompletionrequest.md b/docs/models/agentscompletionrequest.md index 2d0d6721..7f6c4283 100644 --- a/docs/models/agentscompletionrequest.md +++ b/docs/models/agentscompletionrequest.md @@ -5,7 +5,7 @@ | Field | Type | Required | Description | Example | | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `messages` | List[[models.AgentsCompletionRequestMessages](../models/agentscompletionrequestmessages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | {
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
} | +| `messages` | List[[models.AgentsCompletionRequestMessages](../models/agentscompletionrequestmessages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | | `agent_id` | *str* | :heavy_check_mark: | The ID of the agent to use for this completion. | | | `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | | `min_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The minimum number of tokens to generate in the completion. | | diff --git a/docs/models/agentscompletionstreamrequest.md b/docs/models/agentscompletionstreamrequest.md index c3187749..d849a95d 100644 --- a/docs/models/agentscompletionstreamrequest.md +++ b/docs/models/agentscompletionstreamrequest.md @@ -3,15 +3,15 @@ ## Fields -| Field | Type | Required | Description | Example | -| ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `model` | *Nullable[str]* | :heavy_check_mark: | ID of the model to use. Only compatible for now with:
- `codestral-2405`
- `codestral-latest` | codestral-2405 | -| `prompt` | *str* | :heavy_check_mark: | The text/code to complete. | def | -| `temperature` | *Optional[float]* | :heavy_minus_sign: | What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. | | -| `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | -| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | -| `min_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The minimum number of tokens to generate in the completion. | | -| `stream` | *Optional[bool]* | :heavy_minus_sign: | N/A | | -| `stop` | [Optional[models.AgentsCompletionStreamRequestStop]](../models/agentscompletionstreamrequeststop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | -| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | -| `suffix` | *OptionalNullable[str]* | :heavy_minus_sign: | Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`. | return a+b | \ No newline at end of file +| Field | Type | Required | Description | Example | +| ------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------ | +| `messages` | List[[models.AgentsCompletionStreamRequestMessages](../models/agentscompletionstreamrequestmessages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | +| `agent_id` | *str* | :heavy_check_mark: | The ID of the agent to use for this completion. | | +| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | +| `min_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The minimum number of tokens to generate in the completion. | | +| `stream` | *Optional[bool]* | :heavy_minus_sign: | N/A | | +| `stop` | [Optional[models.AgentsCompletionStreamRequestStop]](../models/agentscompletionstreamrequeststop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | +| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | +| `response_format` | [Optional[models.ResponseFormat]](../models/responseformat.md) | :heavy_minus_sign: | N/A | | +| `tools` | List[[models.Tool](../models/tool.md)] | :heavy_minus_sign: | N/A | | +| `tool_choice` | [Optional[models.AgentsCompletionStreamRequestToolChoice]](../models/agentscompletionstreamrequesttoolchoice.md) | :heavy_minus_sign: | N/A | | \ No newline at end of file diff --git a/docs/models/agentscompletionstreamrequestmessages.md b/docs/models/agentscompletionstreamrequestmessages.md new file mode 100644 index 00000000..d8cf99e2 --- /dev/null +++ b/docs/models/agentscompletionstreamrequestmessages.md @@ -0,0 +1,23 @@ +# AgentsCompletionStreamRequestMessages + + +## Supported Types + +### `models.AssistantMessage` + +```python +value: models.AssistantMessage = /* values here */ +``` + +### `models.ToolMessage` + +```python +value: models.ToolMessage = /* values here */ +``` + +### `models.UserMessage` + +```python +value: models.UserMessage = /* values here */ +``` + diff --git a/docs/models/agentscompletionstreamrequesttoolchoice.md b/docs/models/agentscompletionstreamrequesttoolchoice.md new file mode 100644 index 00000000..e761d1e1 --- /dev/null +++ b/docs/models/agentscompletionstreamrequesttoolchoice.md @@ -0,0 +1,10 @@ +# AgentsCompletionStreamRequestToolChoice + + +## Values + +| Name | Value | +| ------ | ------ | +| `AUTO` | auto | +| `NONE` | none | +| `ANY` | any | \ No newline at end of file diff --git a/docs/models/chatcompletionchoice.md b/docs/models/chatcompletionchoice.md index c916fc06..d77d286e 100644 --- a/docs/models/chatcompletionchoice.md +++ b/docs/models/chatcompletionchoice.md @@ -3,8 +3,8 @@ ## Fields -| Field | Type | Required | Description | Example | -| ------------------------------------------------------------------ | ------------------------------------------------------------------ | ------------------------------------------------------------------ | ------------------------------------------------------------------ | ------------------------------------------------------------------ | -| `index` | *int* | :heavy_check_mark: | N/A | 0 | -| `finish_reason` | [models.FinishReason](../models/finishreason.md) | :heavy_check_mark: | N/A | stop | -| `message` | [Optional[models.AssistantMessage]](../models/assistantmessage.md) | :heavy_minus_sign: | N/A | | \ No newline at end of file +| Field | Type | Required | Description | Example | +| -------------------------------------------------------- | -------------------------------------------------------- | -------------------------------------------------------- | -------------------------------------------------------- | -------------------------------------------------------- | +| `index` | *int* | :heavy_check_mark: | N/A | 0 | +| `message` | [models.AssistantMessage](../models/assistantmessage.md) | :heavy_check_mark: | N/A | | +| `finish_reason` | [models.FinishReason](../models/finishreason.md) | :heavy_check_mark: | N/A | stop | \ No newline at end of file diff --git a/docs/models/chatcompletionrequest.md b/docs/models/chatcompletionrequest.md index cfb3596a..d22efc34 100644 --- a/docs/models/chatcompletionrequest.md +++ b/docs/models/chatcompletionrequest.md @@ -5,8 +5,8 @@ | Field | Type | Required | Description | Example | | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `model` | *Nullable[str]* | :heavy_check_mark: | ID of the model to use. You can use the [List Available Models](/api#operation/listModels) API to see all of your available models, or see our [Model overview](/models) for model descriptions. | mistral-small-latest | -| `messages` | List[[models.Messages](../models/messages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | {
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
} | +| `model` | *Nullable[str]* | :heavy_check_mark: | ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions. | mistral-small-latest | +| `messages` | List[[models.Messages](../models/messages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | | `temperature` | *Optional[float]* | :heavy_minus_sign: | What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. | | | `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | | `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | diff --git a/docs/models/chatcompletionstreamrequest.md b/docs/models/chatcompletionstreamrequest.md index 8c3a0bab..fd1fc484 100644 --- a/docs/models/chatcompletionstreamrequest.md +++ b/docs/models/chatcompletionstreamrequest.md @@ -5,8 +5,8 @@ | Field | Type | Required | Description | Example | | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `model` | *Nullable[str]* | :heavy_check_mark: | ID of the model to use. You can use the [List Available Models](/api#operation/listModels) API to see all of your available models, or see our [Model overview](/models) for model descriptions. | mistral-small-latest | -| `messages` | List[[models.ChatCompletionStreamRequestMessages](../models/chatcompletionstreamrequestmessages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | {
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
} | +| `model` | *Nullable[str]* | :heavy_check_mark: | ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions. | mistral-small-latest | +| `messages` | List[[models.ChatCompletionStreamRequestMessages](../models/chatcompletionstreamrequestmessages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | | `temperature` | *Optional[float]* | :heavy_minus_sign: | What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. | | | `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | | `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | diff --git a/docs/models/tool.md b/docs/models/tool.md index 291394c0..ca624a90 100644 --- a/docs/models/tool.md +++ b/docs/models/tool.md @@ -3,7 +3,7 @@ ## Fields -| Field | Type | Required | Description | -| ---------------------------------------- | ---------------------------------------- | ---------------------------------------- | ---------------------------------------- | -| `function` | [models.Function](../models/function.md) | :heavy_check_mark: | N/A | -| `type` | *Optional[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| ------------------------------------------------------------ | ------------------------------------------------------------ | ------------------------------------------------------------ | ------------------------------------------------------------ | +| `function` | [models.Function](../models/function.md) | :heavy_check_mark: | N/A | +| `type` | [Optional[models.ToolToolTypes]](../models/tooltooltypes.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/toolcall.md b/docs/models/toolcall.md index bd2dc9ff..7aca5fc9 100644 --- a/docs/models/toolcall.md +++ b/docs/models/toolcall.md @@ -3,8 +3,8 @@ ## Fields -| Field | Type | Required | Description | -| ------------------------------------------------ | ------------------------------------------------ | ------------------------------------------------ | ------------------------------------------------ | -| `function` | [models.FunctionCall](../models/functioncall.md) | :heavy_check_mark: | N/A | -| `id` | *Optional[str]* | :heavy_minus_sign: | N/A | -| `type` | *Optional[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| ---------------------------------------------------- | ---------------------------------------------------- | ---------------------------------------------------- | ---------------------------------------------------- | +| `function` | [models.FunctionCall](../models/functioncall.md) | :heavy_check_mark: | N/A | +| `id` | *Optional[str]* | :heavy_minus_sign: | N/A | +| `type` | [Optional[models.ToolTypes]](../models/tooltypes.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/tooltooltypes.md b/docs/models/tooltooltypes.md new file mode 100644 index 00000000..e3964307 --- /dev/null +++ b/docs/models/tooltooltypes.md @@ -0,0 +1,8 @@ +# ToolToolTypes + + +## Values + +| Name | Value | +| ---------- | ---------- | +| `FUNCTION` | function | \ No newline at end of file diff --git a/docs/models/tooltypes.md b/docs/models/tooltypes.md new file mode 100644 index 00000000..84e49253 --- /dev/null +++ b/docs/models/tooltypes.md @@ -0,0 +1,8 @@ +# ToolTypes + + +## Values + +| Name | Value | +| ---------- | ---------- | +| `FUNCTION` | function | \ No newline at end of file diff --git a/docs/sdks/agents/README.md b/docs/sdks/agents/README.md index e3981c13..167f0411 100644 --- a/docs/sdks/agents/README.md +++ b/docs/sdks/agents/README.md @@ -38,11 +38,13 @@ if res is not None: ``` + + ### Parameters | Parameter | Type | Required | Description | Example | | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `messages` | List[[models.AgentsCompletionRequestMessages](../../models/agentscompletionrequestmessages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | {
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
} | +| `messages` | List[[models.AgentsCompletionRequestMessages](../../models/agentscompletionrequestmessages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | | `agent_id` | *str* | :heavy_check_mark: | The ID of the agent to use for this completion. | | | `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | | `min_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The minimum number of tokens to generate in the completion. | | @@ -80,7 +82,12 @@ s = Mistral( ) -res = s.agents.stream(model="codestral-2405", prompt="def", suffix="return a+b") +res = s.agents.stream(messages=[ + { + "content": "Who is the best French painter? Answer in one short sentence.", + "role": "user", + }, +], agent_id="") if res is not None: for event in res: @@ -89,21 +96,23 @@ if res is not None: ``` + + ### Parameters -| Parameter | Type | Required | Description | Example | -| ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `model` | *Nullable[str]* | :heavy_check_mark: | ID of the model to use. Only compatible for now with:
- `codestral-2405`
- `codestral-latest` | codestral-2405 | -| `prompt` | *str* | :heavy_check_mark: | The text/code to complete. | def | -| `temperature` | *Optional[float]* | :heavy_minus_sign: | What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. | | -| `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | -| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | -| `min_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The minimum number of tokens to generate in the completion. | | -| `stream` | *Optional[bool]* | :heavy_minus_sign: | N/A | | -| `stop` | [Optional[models.AgentsCompletionStreamRequestStop]](../../models/agentscompletionstreamrequeststop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | -| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | -| `suffix` | *OptionalNullable[str]* | :heavy_minus_sign: | Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`. | return a+b | -| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | | +| Parameter | Type | Required | Description | Example | +| ------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------ | +| `messages` | List[[models.AgentsCompletionStreamRequestMessages](../../models/agentscompletionstreamrequestmessages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | +| `agent_id` | *str* | :heavy_check_mark: | The ID of the agent to use for this completion. | | +| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | +| `min_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The minimum number of tokens to generate in the completion. | | +| `stream` | *Optional[bool]* | :heavy_minus_sign: | N/A | | +| `stop` | [Optional[models.AgentsCompletionStreamRequestStop]](../../models/agentscompletionstreamrequeststop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | +| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | +| `response_format` | [Optional[models.ResponseFormat]](../../models/responseformat.md) | :heavy_minus_sign: | N/A | | +| `tools` | List[[models.Tool](../../models/tool.md)] | :heavy_minus_sign: | N/A | | +| `tool_choice` | [Optional[models.AgentsCompletionStreamRequestToolChoice]](../../models/agentscompletionstreamrequesttoolchoice.md) | :heavy_minus_sign: | N/A | | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | | ### Response diff --git a/docs/sdks/chat/README.md b/docs/sdks/chat/README.md index e941104c..aaa828ec 100644 --- a/docs/sdks/chat/README.md +++ b/docs/sdks/chat/README.md @@ -38,12 +38,14 @@ if res is not None: ``` + + ### Parameters | Parameter | Type | Required | Description | Example | | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `model` | *Nullable[str]* | :heavy_check_mark: | ID of the model to use. You can use the [List Available Models](/api#operation/listModels) API to see all of your available models, or see our [Model overview](/models) for model descriptions. | mistral-small-latest | -| `messages` | List[[models.Messages](../../models/messages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | {
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
} | +| `model` | *Nullable[str]* | :heavy_check_mark: | ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions. | mistral-small-latest | +| `messages` | List[[models.Messages](../../models/messages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | | `temperature` | *Optional[float]* | :heavy_minus_sign: | What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. | | | `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | | `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | @@ -97,12 +99,14 @@ if res is not None: ``` + + ### Parameters | Parameter | Type | Required | Description | Example | | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `model` | *Nullable[str]* | :heavy_check_mark: | ID of the model to use. You can use the [List Available Models](/api#operation/listModels) API to see all of your available models, or see our [Model overview](/models) for model descriptions. | mistral-small-latest | -| `messages` | List[[models.ChatCompletionStreamRequestMessages](../../models/chatcompletionstreamrequestmessages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | {
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
} | +| `model` | *Nullable[str]* | :heavy_check_mark: | ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions. | mistral-small-latest | +| `messages` | List[[models.ChatCompletionStreamRequestMessages](../../models/chatcompletionstreamrequestmessages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | | `temperature` | *Optional[float]* | :heavy_minus_sign: | What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. | | | `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | | `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | diff --git a/docs/sdks/embeddings/README.md b/docs/sdks/embeddings/README.md index ee46f9b2..2f9f2c72 100644 --- a/docs/sdks/embeddings/README.md +++ b/docs/sdks/embeddings/README.md @@ -32,6 +32,8 @@ if res is not None: ``` + + ### Parameters | Parameter | Type | Required | Description | diff --git a/docs/sdks/files/README.md b/docs/sdks/files/README.md index 897556fe..ec90fd37 100644 --- a/docs/sdks/files/README.md +++ b/docs/sdks/files/README.md @@ -42,6 +42,8 @@ if res is not None: ``` + + ### Parameters | Parameter | Type | Required | Description | @@ -82,6 +84,8 @@ if res is not None: ``` + + ### Parameters | Parameter | Type | Required | Description | @@ -121,6 +125,8 @@ if res is not None: ``` + + ### Parameters | Parameter | Type | Required | Description | @@ -161,6 +167,8 @@ if res is not None: ``` + + ### Parameters | Parameter | Type | Required | Description | diff --git a/docs/sdks/fim/README.md b/docs/sdks/fim/README.md index 784b5213..ef8b1dc6 100644 --- a/docs/sdks/fim/README.md +++ b/docs/sdks/fim/README.md @@ -33,6 +33,8 @@ if res is not None: ``` + + ### Parameters | Parameter | Type | Required | Description | Example | @@ -84,6 +86,8 @@ if res is not None: ``` + + ### Parameters | Parameter | Type | Required | Description | Example | diff --git a/docs/sdks/jobs/README.md b/docs/sdks/jobs/README.md index 3366c731..b0926f68 100644 --- a/docs/sdks/jobs/README.md +++ b/docs/sdks/jobs/README.md @@ -32,6 +32,8 @@ if res is not None: ``` + + ### Parameters | Parameter | Type | Required | Description | @@ -80,6 +82,8 @@ if res is not None: ``` + + ### Parameters | Parameter | Type | Required | Description | @@ -127,6 +131,8 @@ if res is not None: ``` + + ### Parameters | Parameter | Type | Required | Description | @@ -167,6 +173,8 @@ if res is not None: ``` + + ### Parameters | Parameter | Type | Required | Description | @@ -207,6 +215,8 @@ if res is not None: ``` + + ### Parameters | Parameter | Type | Required | Description | diff --git a/docs/sdks/models/README.md b/docs/sdks/models/README.md index 051aa53c..00fca08b 100644 --- a/docs/sdks/models/README.md +++ b/docs/sdks/models/README.md @@ -37,6 +37,8 @@ if res is not None: ``` + + ### Parameters | Parameter | Type | Required | Description | @@ -77,6 +79,8 @@ if res is not None: ``` + + ### Parameters | Parameter | Type | Required | Description | Example | @@ -118,6 +122,8 @@ if res is not None: ``` + + ### Parameters | Parameter | Type | Required | Description | Example | @@ -159,6 +165,8 @@ if res is not None: ``` + + ### Parameters | Parameter | Type | Required | Description | Example | @@ -201,6 +209,8 @@ if res is not None: ``` + + ### Parameters | Parameter | Type | Required | Description | Example | @@ -241,6 +251,8 @@ if res is not None: ``` + + ### Parameters | Parameter | Type | Required | Description | Example | diff --git a/examples/async_jobs.py b/examples/async_jobs.py index b1f9e3bf..e54c890f 100644 --- a/examples/async_jobs.py +++ b/examples/async_jobs.py @@ -10,7 +10,9 @@ async def main(): api_key = os.environ["MISTRAL_API_KEY"] - client = Mistral(api_key=api_key) + client = Mistral( + api_key="gpN4hC0YOSdZoBbzRcWNyALyMnNOT9jj", server_url="http://0.0.0.0:8882/" + ) # Create new files with open("examples/file.jsonl", "rb") as f: diff --git a/packages/mistralai_azure/.speakeasy/gen.lock b/packages/mistralai_azure/.speakeasy/gen.lock index 78232812..5d2740c4 100644 --- a/packages/mistralai_azure/.speakeasy/gen.lock +++ b/packages/mistralai_azure/.speakeasy/gen.lock @@ -1,18 +1,18 @@ lockVersion: 2.0.0 id: dc40fa48-2c4d-46ad-ac8b-270749770f34 management: - docChecksum: ec02d5407fd9354b416518c4b8fa8b95 + docChecksum: 0f6edfd8ad8df6c49b3d429d1af7b9e2 docVersion: 0.0.2 - speakeasyVersion: 1.357.4 - generationVersion: 2.390.6 - releaseVersion: 1.0.0-rc.4 - configChecksum: ad8d0273f78dacd83fbba33510acd0a5 + speakeasyVersion: 1.372.0 + generationVersion: 2.399.0 + releaseVersion: 1.0.0-rc.7 + configChecksum: d14083a6bfd01e2d81264338ac4ed619 published: true features: python: additionalDependencies: 1.0.0 constsAndDefaults: 1.0.2 - core: 5.3.7 + core: 5.3.8 defaultEnabledRetries: 0.2.0 envVarSecurityUsage: 0.3.1 examples: 3.0.0 @@ -23,13 +23,14 @@ features: globalServerURLs: 3.0.0 nameOverrides: 3.0.0 nullables: 1.0.0 + openEnums: 1.0.0 responseFormat: 1.0.0 retries: 3.0.0 sdkHooks: 1.0.0 serverEvents: 1.0.2 serverEventsSentinels: 0.1.0 serverIDs: 3.0.0 - unions: 3.0.1 + unions: 3.0.2 generatedFiles: - src/mistralai_azure/sdkconfiguration.py - src/mistralai_azure/chat.py @@ -92,6 +93,7 @@ generatedFiles: - docs/models/finishreason.md - docs/models/completionresponsestreamchoice.md - docs/models/deltamessage.md + - docs/models/tooltypes.md - docs/models/toolcall.md - docs/models/arguments.md - docs/models/functioncall.md @@ -103,6 +105,7 @@ generatedFiles: - docs/models/messages.md - docs/models/toolchoice.md - docs/models/chatcompletionstreamrequest.md + - docs/models/tooltooltypes.md - docs/models/tool.md - docs/models/function.md - docs/models/responseformats.md diff --git a/packages/mistralai_azure/.speakeasy/gen.yaml b/packages/mistralai_azure/.speakeasy/gen.yaml index be4280bc..ec9945e2 100644 --- a/packages/mistralai_azure/.speakeasy/gen.yaml +++ b/packages/mistralai_azure/.speakeasy/gen.yaml @@ -12,7 +12,7 @@ generation: auth: oAuth2ClientCredentialsEnabled: true python: - version: 1.0.0-rc.4 + version: 1.0.0-rc.7 additionalDependencies: dev: pytest: ^8.2.2 diff --git a/packages/mistralai_azure/docs/models/chatcompletionchoice.md b/packages/mistralai_azure/docs/models/chatcompletionchoice.md index 6fa839b7..deaa0ea0 100644 --- a/packages/mistralai_azure/docs/models/chatcompletionchoice.md +++ b/packages/mistralai_azure/docs/models/chatcompletionchoice.md @@ -6,5 +6,5 @@ | Field | Type | Required | Description | Example | | ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | | `index` | *int* | :heavy_check_mark: | N/A | 0 | -| `finish_reason` | [models.ChatCompletionChoiceFinishReason](../models/chatcompletionchoicefinishreason.md) | :heavy_check_mark: | N/A | stop | -| `message` | [Optional[models.AssistantMessage]](../models/assistantmessage.md) | :heavy_minus_sign: | N/A | | \ No newline at end of file +| `message` | [models.AssistantMessage](../models/assistantmessage.md) | :heavy_check_mark: | N/A | | +| `finish_reason` | [models.ChatCompletionChoiceFinishReason](../models/chatcompletionchoicefinishreason.md) | :heavy_check_mark: | N/A | stop | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/chatcompletionrequest.md b/packages/mistralai_azure/docs/models/chatcompletionrequest.md index 3df1e28e..307b2796 100644 --- a/packages/mistralai_azure/docs/models/chatcompletionrequest.md +++ b/packages/mistralai_azure/docs/models/chatcompletionrequest.md @@ -5,7 +5,7 @@ | Field | Type | Required | Description | Example | | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `messages` | List[[models.ChatCompletionRequestMessages](../models/chatcompletionrequestmessages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | {
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
} | +| `messages` | List[[models.ChatCompletionRequestMessages](../models/chatcompletionrequestmessages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | | `model` | *OptionalNullable[str]* | :heavy_minus_sign: | The ID of the model to use for this request. | azureai | | `temperature` | *Optional[float]* | :heavy_minus_sign: | What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. | | | `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | diff --git a/packages/mistralai_azure/docs/models/chatcompletionstreamrequest.md b/packages/mistralai_azure/docs/models/chatcompletionstreamrequest.md index 1fc34709..5ed2e2bc 100644 --- a/packages/mistralai_azure/docs/models/chatcompletionstreamrequest.md +++ b/packages/mistralai_azure/docs/models/chatcompletionstreamrequest.md @@ -5,7 +5,7 @@ | Field | Type | Required | Description | Example | | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `messages` | List[[models.Messages](../models/messages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | {
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
} | +| `messages` | List[[models.Messages](../models/messages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | | `model` | *OptionalNullable[str]* | :heavy_minus_sign: | The ID of the model to use for this request. | azureai | | `temperature` | *Optional[float]* | :heavy_minus_sign: | What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. | | | `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | diff --git a/packages/mistralai_azure/docs/models/tool.md b/packages/mistralai_azure/docs/models/tool.md index 291394c0..ca624a90 100644 --- a/packages/mistralai_azure/docs/models/tool.md +++ b/packages/mistralai_azure/docs/models/tool.md @@ -3,7 +3,7 @@ ## Fields -| Field | Type | Required | Description | -| ---------------------------------------- | ---------------------------------------- | ---------------------------------------- | ---------------------------------------- | -| `function` | [models.Function](../models/function.md) | :heavy_check_mark: | N/A | -| `type` | *Optional[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| ------------------------------------------------------------ | ------------------------------------------------------------ | ------------------------------------------------------------ | ------------------------------------------------------------ | +| `function` | [models.Function](../models/function.md) | :heavy_check_mark: | N/A | +| `type` | [Optional[models.ToolToolTypes]](../models/tooltooltypes.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/toolcall.md b/packages/mistralai_azure/docs/models/toolcall.md index bd2dc9ff..7aca5fc9 100644 --- a/packages/mistralai_azure/docs/models/toolcall.md +++ b/packages/mistralai_azure/docs/models/toolcall.md @@ -3,8 +3,8 @@ ## Fields -| Field | Type | Required | Description | -| ------------------------------------------------ | ------------------------------------------------ | ------------------------------------------------ | ------------------------------------------------ | -| `function` | [models.FunctionCall](../models/functioncall.md) | :heavy_check_mark: | N/A | -| `id` | *Optional[str]* | :heavy_minus_sign: | N/A | -| `type` | *Optional[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| ---------------------------------------------------- | ---------------------------------------------------- | ---------------------------------------------------- | ---------------------------------------------------- | +| `function` | [models.FunctionCall](../models/functioncall.md) | :heavy_check_mark: | N/A | +| `id` | *Optional[str]* | :heavy_minus_sign: | N/A | +| `type` | [Optional[models.ToolTypes]](../models/tooltypes.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/tooltooltypes.md b/packages/mistralai_azure/docs/models/tooltooltypes.md new file mode 100644 index 00000000..e3964307 --- /dev/null +++ b/packages/mistralai_azure/docs/models/tooltooltypes.md @@ -0,0 +1,8 @@ +# ToolToolTypes + + +## Values + +| Name | Value | +| ---------- | ---------- | +| `FUNCTION` | function | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/tooltypes.md b/packages/mistralai_azure/docs/models/tooltypes.md new file mode 100644 index 00000000..84e49253 --- /dev/null +++ b/packages/mistralai_azure/docs/models/tooltypes.md @@ -0,0 +1,8 @@ +# ToolTypes + + +## Values + +| Name | Value | +| ---------- | ---------- | +| `FUNCTION` | function | \ No newline at end of file diff --git a/packages/mistralai_azure/poetry.lock b/packages/mistralai_azure/poetry.lock index 457c8ecc..2e5fecff 100644 --- a/packages/mistralai_azure/poetry.lock +++ b/packages/mistralai_azure/poetry.lock @@ -553,13 +553,13 @@ testing = ["coverage (>=6.2)", "hypothesis (>=5.7.1)"] [[package]] name = "python-dateutil" -version = "2.9.0.post0" +version = "2.8.2" description = "Extensions to the standard Python datetime module" optional = false python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7" files = [ - {file = "python-dateutil-2.9.0.post0.tar.gz", hash = "sha256:37dd54208da7e1cd875388217d5e00ebd4179249f90fb72437e91a35459a0ad3"}, - {file = "python_dateutil-2.9.0.post0-py2.py3-none-any.whl", hash = "sha256:a8b2bc7bffae282281c8140a97d3aa9c14da0b136dfe83f850eea9a5f7470427"}, + {file = "python-dateutil-2.8.2.tar.gz", hash = "sha256:0123cacc1627ae19ddf3c27a5de5bd67ee4586fbdd6440d9748f8abb483d3e86"}, + {file = "python_dateutil-2.8.2-py2.py3-none-any.whl", hash = "sha256:961d03dc3453ebbc59dbdea9e4e11c5651520a876d0f4db161e8674aae935da9"}, ] [package.dependencies] @@ -649,4 +649,4 @@ typing-extensions = ">=3.7.4" [metadata] lock-version = "2.0" python-versions = "^3.8" -content-hash = "c6fe50d9865be14321ec4828bf746f43c421f79507e9956b4e45ee6601fd1f0d" +content-hash = "85499d03f45cd26302b8b267be44478c701581e8a56a3df0907bb38897fdb2e4" diff --git a/packages/mistralai_azure/pyproject.toml b/packages/mistralai_azure/pyproject.toml index 2253ab3d..f4cf5b58 100644 --- a/packages/mistralai_azure/pyproject.toml +++ b/packages/mistralai_azure/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "mistralai_azure" -version = "1.0.0-rc.4" +version = "1.0.0-rc.7" description = "Python Client SDK for the Mistral AI API in Azure." authors = ["Mistral",] readme = "README-PYPI.md" @@ -21,7 +21,7 @@ eval-type-backport = "^0.2.0" httpx = "^0.27.0" jsonpath-python = "^1.0.6" pydantic = "~2.8.2" -python-dateutil = "^2.9.0.post0" +python-dateutil = "2.8.2" typing-inspect = "^0.9.0" [tool.poetry.group.dev.dependencies] diff --git a/packages/mistralai_azure/scripts/publish.sh b/packages/mistralai_azure/scripts/publish.sh index 1ee7194c..ab45b1f9 100755 --- a/packages/mistralai_azure/scripts/publish.sh +++ b/packages/mistralai_azure/scripts/publish.sh @@ -2,4 +2,6 @@ export POETRY_PYPI_TOKEN_PYPI=${PYPI_TOKEN} +poetry run python scripts/prepare-readme.py + poetry publish --build --skip-existing diff --git a/packages/mistralai_azure/src/mistralai_azure/models/__init__.py b/packages/mistralai_azure/src/mistralai_azure/models/__init__.py index a102b139..710fe565 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/__init__.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/__init__.py @@ -18,11 +18,11 @@ from .security import Security, SecurityTypedDict from .systemmessage import Content, ContentTypedDict, Role, SystemMessage, SystemMessageTypedDict from .textchunk import TextChunk, TextChunkTypedDict -from .tool import Tool, ToolTypedDict -from .toolcall import ToolCall, ToolCallTypedDict +from .tool import Tool, ToolToolTypes, ToolTypedDict +from .toolcall import ToolCall, ToolCallTypedDict, ToolTypes from .toolmessage import ToolMessage, ToolMessageRole, ToolMessageTypedDict from .usageinfo import UsageInfo, UsageInfoTypedDict from .usermessage import UserMessage, UserMessageContent, UserMessageContentTypedDict, UserMessageRole, UserMessageTypedDict from .validationerror import Loc, LocTypedDict, ValidationError, ValidationErrorTypedDict -__all__ = ["Arguments", "ArgumentsTypedDict", "AssistantMessage", "AssistantMessageRole", "AssistantMessageTypedDict", "ChatCompletionChoice", "ChatCompletionChoiceFinishReason", "ChatCompletionChoiceTypedDict", "ChatCompletionRequest", "ChatCompletionRequestMessages", "ChatCompletionRequestMessagesTypedDict", "ChatCompletionRequestStop", "ChatCompletionRequestStopTypedDict", "ChatCompletionRequestToolChoice", "ChatCompletionRequestTypedDict", "ChatCompletionResponse", "ChatCompletionResponseTypedDict", "ChatCompletionStreamRequest", "ChatCompletionStreamRequestTypedDict", "CompletionChunk", "CompletionChunkTypedDict", "CompletionEvent", "CompletionEventTypedDict", "CompletionResponseStreamChoice", "CompletionResponseStreamChoiceTypedDict", "Content", "ContentChunk", "ContentChunkTypedDict", "ContentTypedDict", "DeltaMessage", "DeltaMessageTypedDict", "FinishReason", "Function", "FunctionCall", "FunctionCallTypedDict", "FunctionTypedDict", "HTTPValidationError", "HTTPValidationErrorData", "Loc", "LocTypedDict", "Messages", "MessagesTypedDict", "ResponseFormat", "ResponseFormatTypedDict", "ResponseFormats", "Role", "SDKError", "Security", "SecurityTypedDict", "Stop", "StopTypedDict", "SystemMessage", "SystemMessageTypedDict", "TextChunk", "TextChunkTypedDict", "Tool", "ToolCall", "ToolCallTypedDict", "ToolChoice", "ToolMessage", "ToolMessageRole", "ToolMessageTypedDict", "ToolTypedDict", "UsageInfo", "UsageInfoTypedDict", "UserMessage", "UserMessageContent", "UserMessageContentTypedDict", "UserMessageRole", "UserMessageTypedDict", "ValidationError", "ValidationErrorTypedDict"] +__all__ = ["Arguments", "ArgumentsTypedDict", "AssistantMessage", "AssistantMessageRole", "AssistantMessageTypedDict", "ChatCompletionChoice", "ChatCompletionChoiceFinishReason", "ChatCompletionChoiceTypedDict", "ChatCompletionRequest", "ChatCompletionRequestMessages", "ChatCompletionRequestMessagesTypedDict", "ChatCompletionRequestStop", "ChatCompletionRequestStopTypedDict", "ChatCompletionRequestToolChoice", "ChatCompletionRequestTypedDict", "ChatCompletionResponse", "ChatCompletionResponseTypedDict", "ChatCompletionStreamRequest", "ChatCompletionStreamRequestTypedDict", "CompletionChunk", "CompletionChunkTypedDict", "CompletionEvent", "CompletionEventTypedDict", "CompletionResponseStreamChoice", "CompletionResponseStreamChoiceTypedDict", "Content", "ContentChunk", "ContentChunkTypedDict", "ContentTypedDict", "DeltaMessage", "DeltaMessageTypedDict", "FinishReason", "Function", "FunctionCall", "FunctionCallTypedDict", "FunctionTypedDict", "HTTPValidationError", "HTTPValidationErrorData", "Loc", "LocTypedDict", "Messages", "MessagesTypedDict", "ResponseFormat", "ResponseFormatTypedDict", "ResponseFormats", "Role", "SDKError", "Security", "SecurityTypedDict", "Stop", "StopTypedDict", "SystemMessage", "SystemMessageTypedDict", "TextChunk", "TextChunkTypedDict", "Tool", "ToolCall", "ToolCallTypedDict", "ToolChoice", "ToolMessage", "ToolMessageRole", "ToolMessageTypedDict", "ToolToolTypes", "ToolTypedDict", "ToolTypes", "UsageInfo", "UsageInfoTypedDict", "UserMessage", "UserMessageContent", "UserMessageContentTypedDict", "UserMessageRole", "UserMessageTypedDict", "ValidationError", "ValidationErrorTypedDict"] diff --git a/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionchoice.py b/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionchoice.py index acfd5bb3..91995452 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionchoice.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionchoice.py @@ -3,20 +3,19 @@ from __future__ import annotations from .assistantmessage import AssistantMessage, AssistantMessageTypedDict from mistralai_azure.types import BaseModel -from typing import Literal, Optional, TypedDict -from typing_extensions import NotRequired +from typing import Literal, TypedDict ChatCompletionChoiceFinishReason = Literal["stop", "length", "model_length", "error", "tool_calls"] class ChatCompletionChoiceTypedDict(TypedDict): index: int + message: AssistantMessageTypedDict finish_reason: ChatCompletionChoiceFinishReason - message: NotRequired[AssistantMessageTypedDict] class ChatCompletionChoice(BaseModel): index: int + message: AssistantMessage finish_reason: ChatCompletionChoiceFinishReason - message: Optional[AssistantMessage] = None diff --git a/packages/mistralai_azure/src/mistralai_azure/models/tool.py b/packages/mistralai_azure/src/mistralai_azure/models/tool.py index e77c77df..48c5ba8e 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/tool.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/tool.py @@ -2,17 +2,21 @@ from __future__ import annotations from .function import Function, FunctionTypedDict -from mistralai_azure.types import BaseModel -import pydantic -from typing import Final, Optional, TypedDict -from typing_extensions import Annotated +from mistralai_azure.types import BaseModel, UnrecognizedStr +from mistralai_azure.utils import validate_open_enum +from pydantic.functional_validators import PlainValidator +from typing import Literal, Optional, TypedDict, Union +from typing_extensions import Annotated, NotRequired +ToolToolTypes = Union[Literal["function"], UnrecognizedStr] + class ToolTypedDict(TypedDict): function: FunctionTypedDict + type: NotRequired[ToolToolTypes] class Tool(BaseModel): function: Function - TYPE: Annotated[Final[Optional[str]], pydantic.Field(alias="type")] = "function" # type: ignore + type: Annotated[Optional[ToolToolTypes], PlainValidator(validate_open_enum(False))] = "function" diff --git a/packages/mistralai_azure/src/mistralai_azure/models/toolcall.py b/packages/mistralai_azure/src/mistralai_azure/models/toolcall.py index f15bee96..578d6ffc 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/toolcall.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/toolcall.py @@ -2,19 +2,23 @@ from __future__ import annotations from .functioncall import FunctionCall, FunctionCallTypedDict -from mistralai_azure.types import BaseModel -import pydantic -from typing import Final, Optional, TypedDict +from mistralai_azure.types import BaseModel, UnrecognizedStr +from mistralai_azure.utils import validate_open_enum +from pydantic.functional_validators import PlainValidator +from typing import Literal, Optional, TypedDict, Union from typing_extensions import Annotated, NotRequired +ToolTypes = Union[Literal["function"], UnrecognizedStr] + class ToolCallTypedDict(TypedDict): function: FunctionCallTypedDict id: NotRequired[str] + type: NotRequired[ToolTypes] class ToolCall(BaseModel): function: FunctionCall id: Optional[str] = "null" - TYPE: Annotated[Final[Optional[str]], pydantic.Field(alias="type")] = "function" # type: ignore + type: Annotated[Optional[ToolTypes], PlainValidator(validate_open_enum(False))] = "function" diff --git a/packages/mistralai_azure/src/mistralai_azure/sdkconfiguration.py b/packages/mistralai_azure/src/mistralai_azure/sdkconfiguration.py index b6cff2ab..b0a94cc5 100644 --- a/packages/mistralai_azure/src/mistralai_azure/sdkconfiguration.py +++ b/packages/mistralai_azure/src/mistralai_azure/sdkconfiguration.py @@ -29,9 +29,9 @@ class SDKConfiguration: server: Optional[str] = "" language: str = "python" openapi_doc_version: str = "0.0.2" - sdk_version: str = "1.0.0-rc.4" - gen_version: str = "2.390.6" - user_agent: str = "speakeasy-sdk/python 1.0.0-rc.4 2.390.6 0.0.2 mistralai_azure" + sdk_version: str = "1.0.0-rc.7" + gen_version: str = "2.399.0" + user_agent: str = "speakeasy-sdk/python 1.0.0-rc.7 2.399.0 0.0.2 mistralai_azure" retry_config: OptionalNullable[RetryConfig] = Field(default_factory=lambda: UNSET) timeout_ms: Optional[int] = None diff --git a/packages/mistralai_gcp/.speakeasy/gen.lock b/packages/mistralai_gcp/.speakeasy/gen.lock index c28b2183..1b9cf794 100644 --- a/packages/mistralai_gcp/.speakeasy/gen.lock +++ b/packages/mistralai_gcp/.speakeasy/gen.lock @@ -1,18 +1,18 @@ lockVersion: 2.0.0 id: ec60f2d8-7869-45c1-918e-773d41a8cf74 management: - docChecksum: e2bc44269918d569bbc51b1521c4c29b + docChecksum: 4cc6e7c5c5ba15491872c600d4a247ef docVersion: 0.0.2 - speakeasyVersion: 1.357.4 - generationVersion: 2.390.6 - releaseVersion: 1.0.0-rc.4 - configChecksum: a8248e0ef5bdbc73910c2aae86c3c3b5 + speakeasyVersion: 1.372.0 + generationVersion: 2.399.0 + releaseVersion: 1.0.0-rc.8 + configChecksum: 1830c00a5e810fe954553fe55fdf9b71 published: true features: python: additionalDependencies: 1.0.0 constsAndDefaults: 1.0.2 - core: 5.3.7 + core: 5.3.8 defaultEnabledRetries: 0.2.0 envVarSecurityUsage: 0.3.1 examples: 3.0.0 @@ -23,13 +23,14 @@ features: globalServerURLs: 3.0.0 nameOverrides: 3.0.0 nullables: 1.0.0 + openEnums: 1.0.0 responseFormat: 1.0.0 retries: 3.0.0 sdkHooks: 1.0.0 serverEvents: 1.0.2 serverEventsSentinels: 0.1.0 serverIDs: 3.0.0 - unions: 3.0.1 + unions: 3.0.2 generatedFiles: - src/mistralai_gcp/sdkconfiguration.py - src/mistralai_gcp/chat.py @@ -96,6 +97,7 @@ generatedFiles: - docs/models/finishreason.md - docs/models/completionresponsestreamchoice.md - docs/models/deltamessage.md + - docs/models/tooltypes.md - docs/models/toolcall.md - docs/models/arguments.md - docs/models/functioncall.md @@ -107,6 +109,7 @@ generatedFiles: - docs/models/messages.md - docs/models/toolchoice.md - docs/models/chatcompletionstreamrequest.md + - docs/models/tooltooltypes.md - docs/models/tool.md - docs/models/function.md - docs/models/responseformats.md diff --git a/packages/mistralai_gcp/.speakeasy/gen.yaml b/packages/mistralai_gcp/.speakeasy/gen.yaml index 8e6ae7b5..7ddc742a 100644 --- a/packages/mistralai_gcp/.speakeasy/gen.yaml +++ b/packages/mistralai_gcp/.speakeasy/gen.yaml @@ -12,7 +12,7 @@ generation: auth: oAuth2ClientCredentialsEnabled: true python: - version: 1.0.0-rc.4 + version: 1.0.0-rc.8 additionalDependencies: dev: pytest: ^8.2.2 diff --git a/packages/mistralai_gcp/docs/models/chatcompletionchoice.md b/packages/mistralai_gcp/docs/models/chatcompletionchoice.md index 6fa839b7..deaa0ea0 100644 --- a/packages/mistralai_gcp/docs/models/chatcompletionchoice.md +++ b/packages/mistralai_gcp/docs/models/chatcompletionchoice.md @@ -6,5 +6,5 @@ | Field | Type | Required | Description | Example | | ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | | `index` | *int* | :heavy_check_mark: | N/A | 0 | -| `finish_reason` | [models.ChatCompletionChoiceFinishReason](../models/chatcompletionchoicefinishreason.md) | :heavy_check_mark: | N/A | stop | -| `message` | [Optional[models.AssistantMessage]](../models/assistantmessage.md) | :heavy_minus_sign: | N/A | | \ No newline at end of file +| `message` | [models.AssistantMessage](../models/assistantmessage.md) | :heavy_check_mark: | N/A | | +| `finish_reason` | [models.ChatCompletionChoiceFinishReason](../models/chatcompletionchoicefinishreason.md) | :heavy_check_mark: | N/A | stop | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/chatcompletionrequest.md b/packages/mistralai_gcp/docs/models/chatcompletionrequest.md index 3e30c649..fb3bfb42 100644 --- a/packages/mistralai_gcp/docs/models/chatcompletionrequest.md +++ b/packages/mistralai_gcp/docs/models/chatcompletionrequest.md @@ -5,8 +5,8 @@ | Field | Type | Required | Description | Example | | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `model` | *Nullable[str]* | :heavy_check_mark: | ID of the model to use. You can use the [List Available Models](/api#operation/listModels) API to see all of your available models, or see our [Model overview](/models) for model descriptions. | mistral-small-latest | -| `messages` | List[[models.ChatCompletionRequestMessages](../models/chatcompletionrequestmessages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | {
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
} | +| `model` | *Nullable[str]* | :heavy_check_mark: | ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions. | mistral-small-latest | +| `messages` | List[[models.ChatCompletionRequestMessages](../models/chatcompletionrequestmessages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | | `temperature` | *Optional[float]* | :heavy_minus_sign: | What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. | | | `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | | `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | diff --git a/packages/mistralai_gcp/docs/models/chatcompletionstreamrequest.md b/packages/mistralai_gcp/docs/models/chatcompletionstreamrequest.md index adc7ff99..d7b7fe98 100644 --- a/packages/mistralai_gcp/docs/models/chatcompletionstreamrequest.md +++ b/packages/mistralai_gcp/docs/models/chatcompletionstreamrequest.md @@ -5,8 +5,8 @@ | Field | Type | Required | Description | Example | | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `model` | *Nullable[str]* | :heavy_check_mark: | ID of the model to use. You can use the [List Available Models](/api#operation/listModels) API to see all of your available models, or see our [Model overview](/models) for model descriptions. | mistral-small-latest | -| `messages` | List[[models.Messages](../models/messages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | {
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
} | +| `model` | *Nullable[str]* | :heavy_check_mark: | ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions. | mistral-small-latest | +| `messages` | List[[models.Messages](../models/messages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | | `temperature` | *Optional[float]* | :heavy_minus_sign: | What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. | | | `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | | `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | diff --git a/packages/mistralai_gcp/docs/models/tool.md b/packages/mistralai_gcp/docs/models/tool.md index 291394c0..ca624a90 100644 --- a/packages/mistralai_gcp/docs/models/tool.md +++ b/packages/mistralai_gcp/docs/models/tool.md @@ -3,7 +3,7 @@ ## Fields -| Field | Type | Required | Description | -| ---------------------------------------- | ---------------------------------------- | ---------------------------------------- | ---------------------------------------- | -| `function` | [models.Function](../models/function.md) | :heavy_check_mark: | N/A | -| `type` | *Optional[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| ------------------------------------------------------------ | ------------------------------------------------------------ | ------------------------------------------------------------ | ------------------------------------------------------------ | +| `function` | [models.Function](../models/function.md) | :heavy_check_mark: | N/A | +| `type` | [Optional[models.ToolToolTypes]](../models/tooltooltypes.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/toolcall.md b/packages/mistralai_gcp/docs/models/toolcall.md index bd2dc9ff..7aca5fc9 100644 --- a/packages/mistralai_gcp/docs/models/toolcall.md +++ b/packages/mistralai_gcp/docs/models/toolcall.md @@ -3,8 +3,8 @@ ## Fields -| Field | Type | Required | Description | -| ------------------------------------------------ | ------------------------------------------------ | ------------------------------------------------ | ------------------------------------------------ | -| `function` | [models.FunctionCall](../models/functioncall.md) | :heavy_check_mark: | N/A | -| `id` | *Optional[str]* | :heavy_minus_sign: | N/A | -| `type` | *Optional[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| ---------------------------------------------------- | ---------------------------------------------------- | ---------------------------------------------------- | ---------------------------------------------------- | +| `function` | [models.FunctionCall](../models/functioncall.md) | :heavy_check_mark: | N/A | +| `id` | *Optional[str]* | :heavy_minus_sign: | N/A | +| `type` | [Optional[models.ToolTypes]](../models/tooltypes.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/tooltooltypes.md b/packages/mistralai_gcp/docs/models/tooltooltypes.md new file mode 100644 index 00000000..e3964307 --- /dev/null +++ b/packages/mistralai_gcp/docs/models/tooltooltypes.md @@ -0,0 +1,8 @@ +# ToolToolTypes + + +## Values + +| Name | Value | +| ---------- | ---------- | +| `FUNCTION` | function | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/tooltypes.md b/packages/mistralai_gcp/docs/models/tooltypes.md new file mode 100644 index 00000000..84e49253 --- /dev/null +++ b/packages/mistralai_gcp/docs/models/tooltypes.md @@ -0,0 +1,8 @@ +# ToolTypes + + +## Values + +| Name | Value | +| ---------- | ---------- | +| `FUNCTION` | function | \ No newline at end of file diff --git a/packages/mistralai_gcp/poetry.lock b/packages/mistralai_gcp/poetry.lock index befb32f7..8a625e2f 100644 --- a/packages/mistralai_gcp/poetry.lock +++ b/packages/mistralai_gcp/poetry.lock @@ -711,13 +711,13 @@ testing = ["coverage (>=6.2)", "hypothesis (>=5.7.1)"] [[package]] name = "python-dateutil" -version = "2.9.0.post0" +version = "2.8.2" description = "Extensions to the standard Python datetime module" optional = false python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7" files = [ - {file = "python-dateutil-2.9.0.post0.tar.gz", hash = "sha256:37dd54208da7e1cd875388217d5e00ebd4179249f90fb72437e91a35459a0ad3"}, - {file = "python_dateutil-2.9.0.post0-py2.py3-none-any.whl", hash = "sha256:a8b2bc7bffae282281c8140a97d3aa9c14da0b136dfe83f850eea9a5f7470427"}, + {file = "python-dateutil-2.8.2.tar.gz", hash = "sha256:0123cacc1627ae19ddf3c27a5de5bd67ee4586fbdd6440d9748f8abb483d3e86"}, + {file = "python_dateutil-2.8.2-py2.py3-none-any.whl", hash = "sha256:961d03dc3453ebbc59dbdea9e4e11c5651520a876d0f4db161e8674aae935da9"}, ] [package.dependencies] @@ -859,4 +859,4 @@ zstd = ["zstandard (>=0.18.0)"] [metadata] lock-version = "2.0" python-versions = "^3.8" -content-hash = "f7ec8ed73d60233b1bf0450f38af7d51c9dfe088ae0a0b8ee975ba2ae512d817" +content-hash = "c693a1bfd23435953d0a7305446907287d0d66ba881c76188dca0a9eefc7a1b6" diff --git a/packages/mistralai_gcp/pyproject.toml b/packages/mistralai_gcp/pyproject.toml index 90afd369..7e7a2a1b 100644 --- a/packages/mistralai_gcp/pyproject.toml +++ b/packages/mistralai_gcp/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "mistralai-gcp" -version = "1.0.0-rc.4" +version = "1.0.0-rc.8" description = "Python Client SDK for the Mistral AI API in GCP." authors = ["Mistral",] readme = "README-PYPI.md" @@ -22,7 +22,7 @@ google-auth = "^2.31.0" httpx = "^0.27.0" jsonpath-python = "^1.0.6" pydantic = "~2.8.2" -python-dateutil = "^2.9.0.post0" +python-dateutil = "2.8.2" requests = "^2.32.3" typing-inspect = "^0.9.0" diff --git a/packages/mistralai_gcp/scripts/publish.sh b/packages/mistralai_gcp/scripts/publish.sh index 1ee7194c..ab45b1f9 100755 --- a/packages/mistralai_gcp/scripts/publish.sh +++ b/packages/mistralai_gcp/scripts/publish.sh @@ -2,4 +2,6 @@ export POETRY_PYPI_TOKEN_PYPI=${PYPI_TOKEN} +poetry run python scripts/prepare-readme.py + poetry publish --build --skip-existing diff --git a/packages/mistralai_gcp/src/mistralai_gcp/chat.py b/packages/mistralai_gcp/src/mistralai_gcp/chat.py index d9ad7bcc..7a1e7f7e 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/chat.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/chat.py @@ -33,7 +33,7 @@ def stream( Mistral AI provides the ability to stream responses back to a client in order to allow partial results for certain requests. Tokens will be sent as data-only server-sent events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. - :param model: ID of the model to use. You can use the [List Available Models](/api#operation/listModels) API to see all of your available models, or see our [Model overview](/models) for model descriptions. + :param model: ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions. :param messages: The prompt(s) to generate completions for, encoded as a list of dict with role and content. :param temperature: What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. @@ -146,7 +146,7 @@ async def stream_async( Mistral AI provides the ability to stream responses back to a client in order to allow partial results for certain requests. Tokens will be sent as data-only server-sent events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. - :param model: ID of the model to use. You can use the [List Available Models](/api#operation/listModels) API to see all of your available models, or see our [Model overview](/models) for model descriptions. + :param model: ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions. :param messages: The prompt(s) to generate completions for, encoded as a list of dict with role and content. :param temperature: What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. @@ -257,7 +257,7 @@ def complete( ) -> Optional[models.ChatCompletionResponse]: r"""Chat Completion - :param model: ID of the model to use. You can use the [List Available Models](/api#operation/listModels) API to see all of your available models, or see our [Model overview](/models) for model descriptions. + :param model: ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions. :param messages: The prompt(s) to generate completions for, encoded as a list of dict with role and content. :param temperature: What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. @@ -367,7 +367,7 @@ async def complete_async( ) -> Optional[models.ChatCompletionResponse]: r"""Chat Completion - :param model: ID of the model to use. You can use the [List Available Models](/api#operation/listModels) API to see all of your available models, or see our [Model overview](/models) for model descriptions. + :param model: ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions. :param messages: The prompt(s) to generate completions for, encoded as a list of dict with role and content. :param temperature: What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/__init__.py b/packages/mistralai_gcp/src/mistralai_gcp/models/__init__.py index 79fb7c98..7c8c1f4a 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/__init__.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/__init__.py @@ -21,11 +21,11 @@ from .security import Security, SecurityTypedDict from .systemmessage import Content, ContentTypedDict, Role, SystemMessage, SystemMessageTypedDict from .textchunk import TextChunk, TextChunkTypedDict -from .tool import Tool, ToolTypedDict -from .toolcall import ToolCall, ToolCallTypedDict +from .tool import Tool, ToolToolTypes, ToolTypedDict +from .toolcall import ToolCall, ToolCallTypedDict, ToolTypes from .toolmessage import ToolMessage, ToolMessageRole, ToolMessageTypedDict from .usageinfo import UsageInfo, UsageInfoTypedDict from .usermessage import UserMessage, UserMessageContent, UserMessageContentTypedDict, UserMessageRole, UserMessageTypedDict from .validationerror import Loc, LocTypedDict, ValidationError, ValidationErrorTypedDict -__all__ = ["Arguments", "ArgumentsTypedDict", "AssistantMessage", "AssistantMessageRole", "AssistantMessageTypedDict", "ChatCompletionChoice", "ChatCompletionChoiceFinishReason", "ChatCompletionChoiceTypedDict", "ChatCompletionRequest", "ChatCompletionRequestMessages", "ChatCompletionRequestMessagesTypedDict", "ChatCompletionRequestStop", "ChatCompletionRequestStopTypedDict", "ChatCompletionRequestToolChoice", "ChatCompletionRequestTypedDict", "ChatCompletionResponse", "ChatCompletionResponseTypedDict", "ChatCompletionStreamRequest", "ChatCompletionStreamRequestTypedDict", "CompletionChunk", "CompletionChunkTypedDict", "CompletionEvent", "CompletionEventTypedDict", "CompletionResponseStreamChoice", "CompletionResponseStreamChoiceTypedDict", "Content", "ContentChunk", "ContentChunkTypedDict", "ContentTypedDict", "DeltaMessage", "DeltaMessageTypedDict", "FIMCompletionRequest", "FIMCompletionRequestStop", "FIMCompletionRequestStopTypedDict", "FIMCompletionRequestTypedDict", "FIMCompletionResponse", "FIMCompletionResponseTypedDict", "FIMCompletionStreamRequest", "FIMCompletionStreamRequestStop", "FIMCompletionStreamRequestStopTypedDict", "FIMCompletionStreamRequestTypedDict", "FinishReason", "Function", "FunctionCall", "FunctionCallTypedDict", "FunctionTypedDict", "HTTPValidationError", "HTTPValidationErrorData", "Loc", "LocTypedDict", "Messages", "MessagesTypedDict", "ResponseFormat", "ResponseFormatTypedDict", "ResponseFormats", "Role", "SDKError", "Security", "SecurityTypedDict", "Stop", "StopTypedDict", "SystemMessage", "SystemMessageTypedDict", "TextChunk", "TextChunkTypedDict", "Tool", "ToolCall", "ToolCallTypedDict", "ToolChoice", "ToolMessage", "ToolMessageRole", "ToolMessageTypedDict", "ToolTypedDict", "UsageInfo", "UsageInfoTypedDict", "UserMessage", "UserMessageContent", "UserMessageContentTypedDict", "UserMessageRole", "UserMessageTypedDict", "ValidationError", "ValidationErrorTypedDict"] +__all__ = ["Arguments", "ArgumentsTypedDict", "AssistantMessage", "AssistantMessageRole", "AssistantMessageTypedDict", "ChatCompletionChoice", "ChatCompletionChoiceFinishReason", "ChatCompletionChoiceTypedDict", "ChatCompletionRequest", "ChatCompletionRequestMessages", "ChatCompletionRequestMessagesTypedDict", "ChatCompletionRequestStop", "ChatCompletionRequestStopTypedDict", "ChatCompletionRequestToolChoice", "ChatCompletionRequestTypedDict", "ChatCompletionResponse", "ChatCompletionResponseTypedDict", "ChatCompletionStreamRequest", "ChatCompletionStreamRequestTypedDict", "CompletionChunk", "CompletionChunkTypedDict", "CompletionEvent", "CompletionEventTypedDict", "CompletionResponseStreamChoice", "CompletionResponseStreamChoiceTypedDict", "Content", "ContentChunk", "ContentChunkTypedDict", "ContentTypedDict", "DeltaMessage", "DeltaMessageTypedDict", "FIMCompletionRequest", "FIMCompletionRequestStop", "FIMCompletionRequestStopTypedDict", "FIMCompletionRequestTypedDict", "FIMCompletionResponse", "FIMCompletionResponseTypedDict", "FIMCompletionStreamRequest", "FIMCompletionStreamRequestStop", "FIMCompletionStreamRequestStopTypedDict", "FIMCompletionStreamRequestTypedDict", "FinishReason", "Function", "FunctionCall", "FunctionCallTypedDict", "FunctionTypedDict", "HTTPValidationError", "HTTPValidationErrorData", "Loc", "LocTypedDict", "Messages", "MessagesTypedDict", "ResponseFormat", "ResponseFormatTypedDict", "ResponseFormats", "Role", "SDKError", "Security", "SecurityTypedDict", "Stop", "StopTypedDict", "SystemMessage", "SystemMessageTypedDict", "TextChunk", "TextChunkTypedDict", "Tool", "ToolCall", "ToolCallTypedDict", "ToolChoice", "ToolMessage", "ToolMessageRole", "ToolMessageTypedDict", "ToolToolTypes", "ToolTypedDict", "ToolTypes", "UsageInfo", "UsageInfoTypedDict", "UserMessage", "UserMessageContent", "UserMessageContentTypedDict", "UserMessageRole", "UserMessageTypedDict", "ValidationError", "ValidationErrorTypedDict"] diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionchoice.py b/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionchoice.py index d868422a..67ff1f50 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionchoice.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionchoice.py @@ -3,20 +3,19 @@ from __future__ import annotations from .assistantmessage import AssistantMessage, AssistantMessageTypedDict from mistralai_gcp.types import BaseModel -from typing import Literal, Optional, TypedDict -from typing_extensions import NotRequired +from typing import Literal, TypedDict ChatCompletionChoiceFinishReason = Literal["stop", "length", "model_length", "error", "tool_calls"] class ChatCompletionChoiceTypedDict(TypedDict): index: int + message: AssistantMessageTypedDict finish_reason: ChatCompletionChoiceFinishReason - message: NotRequired[AssistantMessageTypedDict] class ChatCompletionChoice(BaseModel): index: int + message: AssistantMessage finish_reason: ChatCompletionChoiceFinishReason - message: Optional[AssistantMessage] = None diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionrequest.py b/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionrequest.py index a1053599..45f61e7f 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionrequest.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionrequest.py @@ -32,7 +32,7 @@ class ChatCompletionRequestTypedDict(TypedDict): model: Nullable[str] - r"""ID of the model to use. You can use the [List Available Models](/api#operation/listModels) API to see all of your available models, or see our [Model overview](/models) for model descriptions.""" + r"""ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions.""" messages: List[ChatCompletionRequestMessagesTypedDict] r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content.""" temperature: NotRequired[float] @@ -56,7 +56,7 @@ class ChatCompletionRequestTypedDict(TypedDict): class ChatCompletionRequest(BaseModel): model: Nullable[str] - r"""ID of the model to use. You can use the [List Available Models](/api#operation/listModels) API to see all of your available models, or see our [Model overview](/models) for model descriptions.""" + r"""ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions.""" messages: List[ChatCompletionRequestMessages] r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content.""" temperature: Optional[float] = 0.7 diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionstreamrequest.py b/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionstreamrequest.py index ecf8393a..a07f71e2 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionstreamrequest.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionstreamrequest.py @@ -32,7 +32,7 @@ class ChatCompletionStreamRequestTypedDict(TypedDict): model: Nullable[str] - r"""ID of the model to use. You can use the [List Available Models](/api#operation/listModels) API to see all of your available models, or see our [Model overview](/models) for model descriptions.""" + r"""ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions.""" messages: List[MessagesTypedDict] r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content.""" temperature: NotRequired[float] @@ -55,7 +55,7 @@ class ChatCompletionStreamRequestTypedDict(TypedDict): class ChatCompletionStreamRequest(BaseModel): model: Nullable[str] - r"""ID of the model to use. You can use the [List Available Models](/api#operation/listModels) API to see all of your available models, or see our [Model overview](/models) for model descriptions.""" + r"""ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions.""" messages: List[Messages] r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content.""" temperature: Optional[float] = 0.7 diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/tool.py b/packages/mistralai_gcp/src/mistralai_gcp/models/tool.py index b4e0645f..2e860d9f 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/tool.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/tool.py @@ -2,17 +2,21 @@ from __future__ import annotations from .function import Function, FunctionTypedDict -from mistralai_gcp.types import BaseModel -import pydantic -from typing import Final, Optional, TypedDict -from typing_extensions import Annotated +from mistralai_gcp.types import BaseModel, UnrecognizedStr +from mistralai_gcp.utils import validate_open_enum +from pydantic.functional_validators import PlainValidator +from typing import Literal, Optional, TypedDict, Union +from typing_extensions import Annotated, NotRequired +ToolToolTypes = Union[Literal["function"], UnrecognizedStr] + class ToolTypedDict(TypedDict): function: FunctionTypedDict + type: NotRequired[ToolToolTypes] class Tool(BaseModel): function: Function - TYPE: Annotated[Final[Optional[str]], pydantic.Field(alias="type")] = "function" # type: ignore + type: Annotated[Optional[ToolToolTypes], PlainValidator(validate_open_enum(False))] = "function" diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/toolcall.py b/packages/mistralai_gcp/src/mistralai_gcp/models/toolcall.py index 5ea87fd3..7f22889b 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/toolcall.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/toolcall.py @@ -2,19 +2,23 @@ from __future__ import annotations from .functioncall import FunctionCall, FunctionCallTypedDict -from mistralai_gcp.types import BaseModel -import pydantic -from typing import Final, Optional, TypedDict +from mistralai_gcp.types import BaseModel, UnrecognizedStr +from mistralai_gcp.utils import validate_open_enum +from pydantic.functional_validators import PlainValidator +from typing import Literal, Optional, TypedDict, Union from typing_extensions import Annotated, NotRequired +ToolTypes = Union[Literal["function"], UnrecognizedStr] + class ToolCallTypedDict(TypedDict): function: FunctionCallTypedDict id: NotRequired[str] + type: NotRequired[ToolTypes] class ToolCall(BaseModel): function: FunctionCall id: Optional[str] = "null" - TYPE: Annotated[Final[Optional[str]], pydantic.Field(alias="type")] = "function" # type: ignore + type: Annotated[Optional[ToolTypes], PlainValidator(validate_open_enum(False))] = "function" diff --git a/packages/mistralai_gcp/src/mistralai_gcp/sdkconfiguration.py b/packages/mistralai_gcp/src/mistralai_gcp/sdkconfiguration.py index 94d271b9..9b354d3c 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/sdkconfiguration.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/sdkconfiguration.py @@ -29,9 +29,9 @@ class SDKConfiguration: server: Optional[str] = "" language: str = "python" openapi_doc_version: str = "0.0.2" - sdk_version: str = "1.0.0-rc.4" - gen_version: str = "2.390.6" - user_agent: str = "speakeasy-sdk/python 1.0.0-rc.4 2.390.6 0.0.2 mistralai-gcp" + sdk_version: str = "1.0.0-rc.8" + gen_version: str = "2.399.0" + user_agent: str = "speakeasy-sdk/python 1.0.0-rc.8 2.399.0 0.0.2 mistralai-gcp" retry_config: OptionalNullable[RetryConfig] = Field(default_factory=lambda: UNSET) timeout_ms: Optional[int] = None diff --git a/pyproject.toml b/pyproject.toml index 810ec3d4..d8b78541 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "mistralai" -version = "1.0.1" +version = "1.1.3" description = "Python Client SDK for the Mistral AI API." authors = ["Mistral"] readme = "README.md" diff --git a/scripts/publish.sh b/scripts/publish.sh index 1ee7194c..ab45b1f9 100755 --- a/scripts/publish.sh +++ b/scripts/publish.sh @@ -2,4 +2,6 @@ export POETRY_PYPI_TOKEN_PYPI=${PYPI_TOKEN} +poetry run python scripts/prepare-readme.py + poetry publish --build --skip-existing diff --git a/src/mistralai/agents.py b/src/mistralai/agents.py index e1c87040..aa291254 100644 --- a/src/mistralai/agents.py +++ b/src/mistralai/agents.py @@ -3,7 +3,7 @@ from .basesdk import BaseSDK from mistralai import models, utils from mistralai._hooks import HookContext -from mistralai.types import Nullable, OptionalNullable, UNSET +from mistralai.types import OptionalNullable, UNSET from mistralai.utils import eventstreaming, get_security_from_env from typing import Any, AsyncGenerator, Generator, List, Optional, Union @@ -221,16 +221,16 @@ async def complete_async( def stream( self, *, - model: Nullable[str], - prompt: str, - temperature: Optional[float] = 0.7, - top_p: Optional[float] = 1, + messages: Union[List[models.AgentsCompletionStreamRequestMessages], List[models.AgentsCompletionStreamRequestMessagesTypedDict]], + agent_id: str, max_tokens: OptionalNullable[int] = UNSET, min_tokens: OptionalNullable[int] = UNSET, stream: Optional[bool] = True, stop: Optional[Union[models.AgentsCompletionStreamRequestStop, models.AgentsCompletionStreamRequestStopTypedDict]] = None, random_seed: OptionalNullable[int] = UNSET, - suffix: OptionalNullable[str] = UNSET, + response_format: Optional[Union[models.ResponseFormat, models.ResponseFormatTypedDict]] = None, + tools: OptionalNullable[Union[List[models.Tool], List[models.ToolTypedDict]]] = UNSET, + tool_choice: Optional[models.AgentsCompletionStreamRequestToolChoice] = "auto", retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -239,16 +239,16 @@ def stream( Mistral AI provides the ability to stream responses back to a client in order to allow partial results for certain requests. Tokens will be sent as data-only server-sent events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. - :param model: ID of the model to use. Only compatible for now with: - `codestral-2405` - `codestral-latest` - :param prompt: The text/code to complete. - :param temperature: What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. - :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. + :param messages: The prompt(s) to generate completions for, encoded as a list of dict with role and content. + :param agent_id: The ID of the agent to use for this completion. :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. :param min_tokens: The minimum number of tokens to generate in the completion. :param stream: :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. - :param suffix: Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`. + :param response_format: + :param tools: + :param tool_choice: :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -262,16 +262,16 @@ def stream( base_url = server_url request = models.AgentsCompletionStreamRequest( - model=model, - temperature=temperature, - top_p=top_p, max_tokens=max_tokens, min_tokens=min_tokens, stream=stream, stop=stop, random_seed=random_seed, - prompt=prompt, - suffix=suffix, + messages=utils.get_pydantic_model(messages, List[models.AgentsCompletionStreamRequestMessages]), + response_format=utils.get_pydantic_model(response_format, Optional[models.ResponseFormat]), + tools=utils.get_pydantic_model(tools, OptionalNullable[List[models.Tool]]), + tool_choice=tool_choice, + agent_id=agent_id, ) req = self.build_request( @@ -328,16 +328,16 @@ def stream( async def stream_async( self, *, - model: Nullable[str], - prompt: str, - temperature: Optional[float] = 0.7, - top_p: Optional[float] = 1, + messages: Union[List[models.AgentsCompletionStreamRequestMessages], List[models.AgentsCompletionStreamRequestMessagesTypedDict]], + agent_id: str, max_tokens: OptionalNullable[int] = UNSET, min_tokens: OptionalNullable[int] = UNSET, stream: Optional[bool] = True, stop: Optional[Union[models.AgentsCompletionStreamRequestStop, models.AgentsCompletionStreamRequestStopTypedDict]] = None, random_seed: OptionalNullable[int] = UNSET, - suffix: OptionalNullable[str] = UNSET, + response_format: Optional[Union[models.ResponseFormat, models.ResponseFormatTypedDict]] = None, + tools: OptionalNullable[Union[List[models.Tool], List[models.ToolTypedDict]]] = UNSET, + tool_choice: Optional[models.AgentsCompletionStreamRequestToolChoice] = "auto", retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -346,16 +346,16 @@ async def stream_async( Mistral AI provides the ability to stream responses back to a client in order to allow partial results for certain requests. Tokens will be sent as data-only server-sent events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. - :param model: ID of the model to use. Only compatible for now with: - `codestral-2405` - `codestral-latest` - :param prompt: The text/code to complete. - :param temperature: What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. - :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. + :param messages: The prompt(s) to generate completions for, encoded as a list of dict with role and content. + :param agent_id: The ID of the agent to use for this completion. :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. :param min_tokens: The minimum number of tokens to generate in the completion. :param stream: :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. - :param suffix: Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`. + :param response_format: + :param tools: + :param tool_choice: :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -369,16 +369,16 @@ async def stream_async( base_url = server_url request = models.AgentsCompletionStreamRequest( - model=model, - temperature=temperature, - top_p=top_p, max_tokens=max_tokens, min_tokens=min_tokens, stream=stream, stop=stop, random_seed=random_seed, - prompt=prompt, - suffix=suffix, + messages=utils.get_pydantic_model(messages, List[models.AgentsCompletionStreamRequestMessages]), + response_format=utils.get_pydantic_model(response_format, Optional[models.ResponseFormat]), + tools=utils.get_pydantic_model(tools, OptionalNullable[List[models.Tool]]), + tool_choice=tool_choice, + agent_id=agent_id, ) req = self.build_request( diff --git a/src/mistralai/chat.py b/src/mistralai/chat.py index 1323be20..e83fc34a 100644 --- a/src/mistralai/chat.py +++ b/src/mistralai/chat.py @@ -32,7 +32,7 @@ def complete( ) -> Optional[models.ChatCompletionResponse]: r"""Chat Completion - :param model: ID of the model to use. You can use the [List Available Models](/api#operation/listModels) API to see all of your available models, or see our [Model overview](/models) for model descriptions. + :param model: ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions. :param messages: The prompt(s) to generate completions for, encoded as a list of dict with role and content. :param temperature: What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. @@ -145,7 +145,7 @@ async def complete_async( ) -> Optional[models.ChatCompletionResponse]: r"""Chat Completion - :param model: ID of the model to use. You can use the [List Available Models](/api#operation/listModels) API to see all of your available models, or see our [Model overview](/models) for model descriptions. + :param model: ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions. :param messages: The prompt(s) to generate completions for, encoded as a list of dict with role and content. :param temperature: What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. @@ -260,7 +260,7 @@ def stream( Mistral AI provides the ability to stream responses back to a client in order to allow partial results for certain requests. Tokens will be sent as data-only server-sent events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. - :param model: ID of the model to use. You can use the [List Available Models](/api#operation/listModels) API to see all of your available models, or see our [Model overview](/models) for model descriptions. + :param model: ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions. :param messages: The prompt(s) to generate completions for, encoded as a list of dict with role and content. :param temperature: What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. @@ -376,7 +376,7 @@ async def stream_async( Mistral AI provides the ability to stream responses back to a client in order to allow partial results for certain requests. Tokens will be sent as data-only server-sent events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. - :param model: ID of the model to use. You can use the [List Available Models](/api#operation/listModels) API to see all of your available models, or see our [Model overview](/models) for model descriptions. + :param model: ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions. :param messages: The prompt(s) to generate completions for, encoded as a list of dict with role and content. :param temperature: What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. diff --git a/src/mistralai/models/__init__.py b/src/mistralai/models/__init__.py index f3162705..cb21b8f3 100644 --- a/src/mistralai/models/__init__.py +++ b/src/mistralai/models/__init__.py @@ -1,7 +1,7 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from .agentscompletionrequest import AgentsCompletionRequest, AgentsCompletionRequestMessages, AgentsCompletionRequestMessagesTypedDict, AgentsCompletionRequestStop, AgentsCompletionRequestStopTypedDict, AgentsCompletionRequestToolChoice, AgentsCompletionRequestTypedDict -from .agentscompletionstreamrequest import AgentsCompletionStreamRequest, AgentsCompletionStreamRequestStop, AgentsCompletionStreamRequestStopTypedDict, AgentsCompletionStreamRequestTypedDict +from .agentscompletionstreamrequest import AgentsCompletionStreamRequest, AgentsCompletionStreamRequestMessages, AgentsCompletionStreamRequestMessagesTypedDict, AgentsCompletionStreamRequestStop, AgentsCompletionStreamRequestStopTypedDict, AgentsCompletionStreamRequestToolChoice, AgentsCompletionStreamRequestTypedDict from .archiveftmodelout import ArchiveFTModelOut, ArchiveFTModelOutTypedDict from .assistantmessage import AssistantMessage, AssistantMessageRole, AssistantMessageTypedDict from .chatcompletionchoice import ChatCompletionChoice, ChatCompletionChoiceTypedDict, FinishReason @@ -64,8 +64,8 @@ from .source import Source from .systemmessage import Content, ContentTypedDict, Role, SystemMessage, SystemMessageTypedDict from .textchunk import TextChunk, TextChunkTypedDict -from .tool import Tool, ToolTypedDict -from .toolcall import ToolCall, ToolCallTypedDict +from .tool import Tool, ToolToolTypes, ToolTypedDict +from .toolcall import ToolCall, ToolCallTypedDict, ToolTypes from .toolmessage import ToolMessage, ToolMessageRole, ToolMessageTypedDict from .trainingfile import TrainingFile, TrainingFileTypedDict from .trainingparameters import TrainingParameters, TrainingParametersTypedDict @@ -79,4 +79,4 @@ from .wandbintegration import WandbIntegration, WandbIntegrationTypedDict from .wandbintegrationout import WandbIntegrationOut, WandbIntegrationOutTypedDict -__all__ = ["AgentsCompletionRequest", "AgentsCompletionRequestMessages", "AgentsCompletionRequestMessagesTypedDict", "AgentsCompletionRequestStop", "AgentsCompletionRequestStopTypedDict", "AgentsCompletionRequestToolChoice", "AgentsCompletionRequestTypedDict", "AgentsCompletionStreamRequest", "AgentsCompletionStreamRequestStop", "AgentsCompletionStreamRequestStopTypedDict", "AgentsCompletionStreamRequestTypedDict", "ArchiveFTModelOut", "ArchiveFTModelOutTypedDict", "Arguments", "ArgumentsTypedDict", "AssistantMessage", "AssistantMessageRole", "AssistantMessageTypedDict", "ChatCompletionChoice", "ChatCompletionChoiceTypedDict", "ChatCompletionRequest", "ChatCompletionRequestTypedDict", "ChatCompletionResponse", "ChatCompletionResponseTypedDict", "ChatCompletionStreamRequest", "ChatCompletionStreamRequestMessages", "ChatCompletionStreamRequestMessagesTypedDict", "ChatCompletionStreamRequestStop", "ChatCompletionStreamRequestStopTypedDict", "ChatCompletionStreamRequestToolChoice", "ChatCompletionStreamRequestTypedDict", "CheckpointOut", "CheckpointOutTypedDict", "CompletionChunk", "CompletionChunkTypedDict", "CompletionEvent", "CompletionEventTypedDict", "CompletionResponseStreamChoice", "CompletionResponseStreamChoiceFinishReason", "CompletionResponseStreamChoiceTypedDict", "Content", "ContentChunk", "ContentChunkTypedDict", "ContentTypedDict", "DeleteFileOut", "DeleteFileOutTypedDict", "DeleteModelOut", "DeleteModelOutTypedDict", "DeleteModelV1ModelsModelIDDeleteRequest", "DeleteModelV1ModelsModelIDDeleteRequestTypedDict", "DeltaMessage", "DeltaMessageTypedDict", "DetailedJobOut", "DetailedJobOutStatus", "DetailedJobOutTypedDict", "EmbeddingRequest", "EmbeddingRequestTypedDict", "EmbeddingResponse", "EmbeddingResponseData", "EmbeddingResponseDataTypedDict", "EmbeddingResponseTypedDict", "EventOut", "EventOutTypedDict", "FIMCompletionRequest", "FIMCompletionRequestStop", "FIMCompletionRequestStopTypedDict", "FIMCompletionRequestTypedDict", "FIMCompletionResponse", "FIMCompletionResponseTypedDict", "FIMCompletionStreamRequest", "FIMCompletionStreamRequestStop", "FIMCompletionStreamRequestStopTypedDict", "FIMCompletionStreamRequestTypedDict", "FTModelCapabilitiesOut", "FTModelCapabilitiesOutTypedDict", "FTModelOut", "FTModelOutTypedDict", "File", "FileSchema", "FileSchemaTypedDict", "FileTypedDict", "FilesAPIRoutesDeleteFileRequest", "FilesAPIRoutesDeleteFileRequestTypedDict", "FilesAPIRoutesRetrieveFileRequest", "FilesAPIRoutesRetrieveFileRequestTypedDict", "FilesAPIRoutesUploadFileMultiPartBodyParams", "FilesAPIRoutesUploadFileMultiPartBodyParamsTypedDict", "FineTuneableModel", "FinishReason", "Function", "FunctionCall", "FunctionCallTypedDict", "FunctionTypedDict", "GithubRepositoryIn", "GithubRepositoryInTypedDict", "GithubRepositoryOut", "GithubRepositoryOutTypedDict", "HTTPValidationError", "HTTPValidationErrorData", "Inputs", "InputsTypedDict", "JobIn", "JobInTypedDict", "JobMetadataOut", "JobMetadataOutTypedDict", "JobOut", "JobOutTypedDict", "JobsAPIRoutesFineTuningArchiveFineTunedModelRequest", "JobsAPIRoutesFineTuningArchiveFineTunedModelRequestTypedDict", "JobsAPIRoutesFineTuningCancelFineTuningJobRequest", "JobsAPIRoutesFineTuningCancelFineTuningJobRequestTypedDict", "JobsAPIRoutesFineTuningCreateFineTuningJobResponse", "JobsAPIRoutesFineTuningCreateFineTuningJobResponseTypedDict", "JobsAPIRoutesFineTuningGetFineTuningJobRequest", "JobsAPIRoutesFineTuningGetFineTuningJobRequestTypedDict", "JobsAPIRoutesFineTuningGetFineTuningJobsRequest", "JobsAPIRoutesFineTuningGetFineTuningJobsRequestTypedDict", "JobsAPIRoutesFineTuningStartFineTuningJobRequest", "JobsAPIRoutesFineTuningStartFineTuningJobRequestTypedDict", "JobsAPIRoutesFineTuningUnarchiveFineTunedModelRequest", "JobsAPIRoutesFineTuningUnarchiveFineTunedModelRequestTypedDict", "JobsAPIRoutesFineTuningUpdateFineTunedModelRequest", "JobsAPIRoutesFineTuningUpdateFineTunedModelRequestTypedDict", "JobsOut", "JobsOutTypedDict", "LegacyJobMetadataOut", "LegacyJobMetadataOutTypedDict", "ListFilesOut", "ListFilesOutTypedDict", "Loc", "LocTypedDict", "Messages", "MessagesTypedDict", "MetricOut", "MetricOutTypedDict", "ModelCapabilities", "ModelCapabilitiesTypedDict", "ModelCard", "ModelCardTypedDict", "ModelList", "ModelListTypedDict", "QueryParamStatus", "ResponseFormat", "ResponseFormatTypedDict", "ResponseFormats", "RetrieveFileOut", "RetrieveFileOutTypedDict", "RetrieveModelV1ModelsModelIDGetRequest", "RetrieveModelV1ModelsModelIDGetRequestTypedDict", "Role", "SDKError", "SampleType", "Security", "SecurityTypedDict", "Source", "Status", "Stop", "StopTypedDict", "SystemMessage", "SystemMessageTypedDict", "TextChunk", "TextChunkTypedDict", "Tool", "ToolCall", "ToolCallTypedDict", "ToolChoice", "ToolMessage", "ToolMessageRole", "ToolMessageTypedDict", "ToolTypedDict", "TrainingFile", "TrainingFileTypedDict", "TrainingParameters", "TrainingParametersIn", "TrainingParametersInTypedDict", "TrainingParametersTypedDict", "UnarchiveFTModelOut", "UnarchiveFTModelOutTypedDict", "UpdateFTModelIn", "UpdateFTModelInTypedDict", "UploadFileOut", "UploadFileOutTypedDict", "UsageInfo", "UsageInfoTypedDict", "UserMessage", "UserMessageContent", "UserMessageContentTypedDict", "UserMessageRole", "UserMessageTypedDict", "ValidationError", "ValidationErrorTypedDict", "WandbIntegration", "WandbIntegrationOut", "WandbIntegrationOutTypedDict", "WandbIntegrationTypedDict"] +__all__ = ["AgentsCompletionRequest", "AgentsCompletionRequestMessages", "AgentsCompletionRequestMessagesTypedDict", "AgentsCompletionRequestStop", "AgentsCompletionRequestStopTypedDict", "AgentsCompletionRequestToolChoice", "AgentsCompletionRequestTypedDict", "AgentsCompletionStreamRequest", "AgentsCompletionStreamRequestMessages", "AgentsCompletionStreamRequestMessagesTypedDict", "AgentsCompletionStreamRequestStop", "AgentsCompletionStreamRequestStopTypedDict", "AgentsCompletionStreamRequestToolChoice", "AgentsCompletionStreamRequestTypedDict", "ArchiveFTModelOut", "ArchiveFTModelOutTypedDict", "Arguments", "ArgumentsTypedDict", "AssistantMessage", "AssistantMessageRole", "AssistantMessageTypedDict", "ChatCompletionChoice", "ChatCompletionChoiceTypedDict", "ChatCompletionRequest", "ChatCompletionRequestTypedDict", "ChatCompletionResponse", "ChatCompletionResponseTypedDict", "ChatCompletionStreamRequest", "ChatCompletionStreamRequestMessages", "ChatCompletionStreamRequestMessagesTypedDict", "ChatCompletionStreamRequestStop", "ChatCompletionStreamRequestStopTypedDict", "ChatCompletionStreamRequestToolChoice", "ChatCompletionStreamRequestTypedDict", "CheckpointOut", "CheckpointOutTypedDict", "CompletionChunk", "CompletionChunkTypedDict", "CompletionEvent", "CompletionEventTypedDict", "CompletionResponseStreamChoice", "CompletionResponseStreamChoiceFinishReason", "CompletionResponseStreamChoiceTypedDict", "Content", "ContentChunk", "ContentChunkTypedDict", "ContentTypedDict", "DeleteFileOut", "DeleteFileOutTypedDict", "DeleteModelOut", "DeleteModelOutTypedDict", "DeleteModelV1ModelsModelIDDeleteRequest", "DeleteModelV1ModelsModelIDDeleteRequestTypedDict", "DeltaMessage", "DeltaMessageTypedDict", "DetailedJobOut", "DetailedJobOutStatus", "DetailedJobOutTypedDict", "EmbeddingRequest", "EmbeddingRequestTypedDict", "EmbeddingResponse", "EmbeddingResponseData", "EmbeddingResponseDataTypedDict", "EmbeddingResponseTypedDict", "EventOut", "EventOutTypedDict", "FIMCompletionRequest", "FIMCompletionRequestStop", "FIMCompletionRequestStopTypedDict", "FIMCompletionRequestTypedDict", "FIMCompletionResponse", "FIMCompletionResponseTypedDict", "FIMCompletionStreamRequest", "FIMCompletionStreamRequestStop", "FIMCompletionStreamRequestStopTypedDict", "FIMCompletionStreamRequestTypedDict", "FTModelCapabilitiesOut", "FTModelCapabilitiesOutTypedDict", "FTModelOut", "FTModelOutTypedDict", "File", "FileSchema", "FileSchemaTypedDict", "FileTypedDict", "FilesAPIRoutesDeleteFileRequest", "FilesAPIRoutesDeleteFileRequestTypedDict", "FilesAPIRoutesRetrieveFileRequest", "FilesAPIRoutesRetrieveFileRequestTypedDict", "FilesAPIRoutesUploadFileMultiPartBodyParams", "FilesAPIRoutesUploadFileMultiPartBodyParamsTypedDict", "FineTuneableModel", "FinishReason", "Function", "FunctionCall", "FunctionCallTypedDict", "FunctionTypedDict", "GithubRepositoryIn", "GithubRepositoryInTypedDict", "GithubRepositoryOut", "GithubRepositoryOutTypedDict", "HTTPValidationError", "HTTPValidationErrorData", "Inputs", "InputsTypedDict", "JobIn", "JobInTypedDict", "JobMetadataOut", "JobMetadataOutTypedDict", "JobOut", "JobOutTypedDict", "JobsAPIRoutesFineTuningArchiveFineTunedModelRequest", "JobsAPIRoutesFineTuningArchiveFineTunedModelRequestTypedDict", "JobsAPIRoutesFineTuningCancelFineTuningJobRequest", "JobsAPIRoutesFineTuningCancelFineTuningJobRequestTypedDict", "JobsAPIRoutesFineTuningCreateFineTuningJobResponse", "JobsAPIRoutesFineTuningCreateFineTuningJobResponseTypedDict", "JobsAPIRoutesFineTuningGetFineTuningJobRequest", "JobsAPIRoutesFineTuningGetFineTuningJobRequestTypedDict", "JobsAPIRoutesFineTuningGetFineTuningJobsRequest", "JobsAPIRoutesFineTuningGetFineTuningJobsRequestTypedDict", "JobsAPIRoutesFineTuningStartFineTuningJobRequest", "JobsAPIRoutesFineTuningStartFineTuningJobRequestTypedDict", "JobsAPIRoutesFineTuningUnarchiveFineTunedModelRequest", "JobsAPIRoutesFineTuningUnarchiveFineTunedModelRequestTypedDict", "JobsAPIRoutesFineTuningUpdateFineTunedModelRequest", "JobsAPIRoutesFineTuningUpdateFineTunedModelRequestTypedDict", "JobsOut", "JobsOutTypedDict", "LegacyJobMetadataOut", "LegacyJobMetadataOutTypedDict", "ListFilesOut", "ListFilesOutTypedDict", "Loc", "LocTypedDict", "Messages", "MessagesTypedDict", "MetricOut", "MetricOutTypedDict", "ModelCapabilities", "ModelCapabilitiesTypedDict", "ModelCard", "ModelCardTypedDict", "ModelList", "ModelListTypedDict", "QueryParamStatus", "ResponseFormat", "ResponseFormatTypedDict", "ResponseFormats", "RetrieveFileOut", "RetrieveFileOutTypedDict", "RetrieveModelV1ModelsModelIDGetRequest", "RetrieveModelV1ModelsModelIDGetRequestTypedDict", "Role", "SDKError", "SampleType", "Security", "SecurityTypedDict", "Source", "Status", "Stop", "StopTypedDict", "SystemMessage", "SystemMessageTypedDict", "TextChunk", "TextChunkTypedDict", "Tool", "ToolCall", "ToolCallTypedDict", "ToolChoice", "ToolMessage", "ToolMessageRole", "ToolMessageTypedDict", "ToolToolTypes", "ToolTypedDict", "ToolTypes", "TrainingFile", "TrainingFileTypedDict", "TrainingParameters", "TrainingParametersIn", "TrainingParametersInTypedDict", "TrainingParametersTypedDict", "UnarchiveFTModelOut", "UnarchiveFTModelOutTypedDict", "UpdateFTModelIn", "UpdateFTModelInTypedDict", "UploadFileOut", "UploadFileOutTypedDict", "UsageInfo", "UsageInfoTypedDict", "UserMessage", "UserMessageContent", "UserMessageContentTypedDict", "UserMessageRole", "UserMessageTypedDict", "ValidationError", "ValidationErrorTypedDict", "WandbIntegration", "WandbIntegrationOut", "WandbIntegrationOutTypedDict", "WandbIntegrationTypedDict"] diff --git a/src/mistralai/models/agentscompletionstreamrequest.py b/src/mistralai/models/agentscompletionstreamrequest.py index 91b5bcb2..9398081d 100644 --- a/src/mistralai/models/agentscompletionstreamrequest.py +++ b/src/mistralai/models/agentscompletionstreamrequest.py @@ -1,10 +1,16 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from __future__ import annotations +from .assistantmessage import AssistantMessage, AssistantMessageTypedDict +from .responseformat import ResponseFormat, ResponseFormatTypedDict +from .tool import Tool, ToolTypedDict +from .toolmessage import ToolMessage, ToolMessageTypedDict +from .usermessage import UserMessage, UserMessageTypedDict from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from pydantic import model_serializer -from typing import List, Optional, TypedDict, Union -from typing_extensions import NotRequired +from mistralai.utils import get_discriminator +from pydantic import Discriminator, Tag, model_serializer +from typing import List, Literal, Optional, TypedDict, Union +from typing_extensions import Annotated, NotRequired AgentsCompletionStreamRequestStopTypedDict = Union[str, List[str]] @@ -15,18 +21,19 @@ r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" +AgentsCompletionStreamRequestMessagesTypedDict = Union[UserMessageTypedDict, AssistantMessageTypedDict, ToolMessageTypedDict] + + +AgentsCompletionStreamRequestMessages = Annotated[Union[Annotated[AssistantMessage, Tag("assistant")], Annotated[ToolMessage, Tag("tool")], Annotated[UserMessage, Tag("user")]], Discriminator(lambda m: get_discriminator(m, "role", "role"))] + + +AgentsCompletionStreamRequestToolChoice = Literal["auto", "none", "any"] + class AgentsCompletionStreamRequestTypedDict(TypedDict): - model: Nullable[str] - r"""ID of the model to use. Only compatible for now with: - - `codestral-2405` - - `codestral-latest` - """ - prompt: str - r"""The text/code to complete.""" - temperature: NotRequired[float] - r"""What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both.""" - top_p: NotRequired[float] - r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.""" + messages: List[AgentsCompletionStreamRequestMessagesTypedDict] + r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content.""" + agent_id: str + r"""The ID of the agent to use for this completion.""" max_tokens: NotRequired[Nullable[int]] r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" min_tokens: NotRequired[Nullable[int]] @@ -36,22 +43,16 @@ class AgentsCompletionStreamRequestTypedDict(TypedDict): r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" random_seed: NotRequired[Nullable[int]] r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" - suffix: NotRequired[Nullable[str]] - r"""Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`.""" + response_format: NotRequired[ResponseFormatTypedDict] + tools: NotRequired[Nullable[List[ToolTypedDict]]] + tool_choice: NotRequired[AgentsCompletionStreamRequestToolChoice] class AgentsCompletionStreamRequest(BaseModel): - model: Nullable[str] - r"""ID of the model to use. Only compatible for now with: - - `codestral-2405` - - `codestral-latest` - """ - prompt: str - r"""The text/code to complete.""" - temperature: Optional[float] = 0.7 - r"""What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both.""" - top_p: Optional[float] = 1 - r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.""" + messages: List[AgentsCompletionStreamRequestMessages] + r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content.""" + agent_id: str + r"""The ID of the agent to use for this completion.""" max_tokens: OptionalNullable[int] = UNSET r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" min_tokens: OptionalNullable[int] = UNSET @@ -61,13 +62,14 @@ class AgentsCompletionStreamRequest(BaseModel): r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" random_seed: OptionalNullable[int] = UNSET r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" - suffix: OptionalNullable[str] = UNSET - r"""Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`.""" + response_format: Optional[ResponseFormat] = None + tools: OptionalNullable[List[Tool]] = UNSET + tool_choice: Optional[AgentsCompletionStreamRequestToolChoice] = "auto" @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = ["temperature", "top_p", "max_tokens", "min_tokens", "stream", "stop", "random_seed", "suffix"] - nullable_fields = ["model", "max_tokens", "min_tokens", "random_seed", "suffix"] + optional_fields = ["max_tokens", "min_tokens", "stream", "stop", "random_seed", "response_format", "tools", "tool_choice"] + nullable_fields = ["max_tokens", "min_tokens", "random_seed", "tools"] null_default_fields = [] serialized = handler(self) diff --git a/src/mistralai/models/chatcompletionchoice.py b/src/mistralai/models/chatcompletionchoice.py index 04d2350a..748dbc1b 100644 --- a/src/mistralai/models/chatcompletionchoice.py +++ b/src/mistralai/models/chatcompletionchoice.py @@ -3,20 +3,19 @@ from __future__ import annotations from .assistantmessage import AssistantMessage, AssistantMessageTypedDict from mistralai.types import BaseModel -from typing import Literal, Optional, TypedDict -from typing_extensions import NotRequired +from typing import Literal, TypedDict FinishReason = Literal["stop", "length", "model_length", "error", "tool_calls"] class ChatCompletionChoiceTypedDict(TypedDict): index: int + message: AssistantMessageTypedDict finish_reason: FinishReason - message: NotRequired[AssistantMessageTypedDict] class ChatCompletionChoice(BaseModel): index: int + message: AssistantMessage finish_reason: FinishReason - message: Optional[AssistantMessage] = None diff --git a/src/mistralai/models/chatcompletionrequest.py b/src/mistralai/models/chatcompletionrequest.py index bf6baf25..e3440b7b 100644 --- a/src/mistralai/models/chatcompletionrequest.py +++ b/src/mistralai/models/chatcompletionrequest.py @@ -32,7 +32,7 @@ class ChatCompletionRequestTypedDict(TypedDict): model: Nullable[str] - r"""ID of the model to use. You can use the [List Available Models](/api#operation/listModels) API to see all of your available models, or see our [Model overview](/models) for model descriptions.""" + r"""ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions.""" messages: List[MessagesTypedDict] r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content.""" temperature: NotRequired[float] @@ -58,7 +58,7 @@ class ChatCompletionRequestTypedDict(TypedDict): class ChatCompletionRequest(BaseModel): model: Nullable[str] - r"""ID of the model to use. You can use the [List Available Models](/api#operation/listModels) API to see all of your available models, or see our [Model overview](/models) for model descriptions.""" + r"""ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions.""" messages: List[Messages] r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content.""" temperature: Optional[float] = 0.7 diff --git a/src/mistralai/models/chatcompletionstreamrequest.py b/src/mistralai/models/chatcompletionstreamrequest.py index 9e2ae401..992584dc 100644 --- a/src/mistralai/models/chatcompletionstreamrequest.py +++ b/src/mistralai/models/chatcompletionstreamrequest.py @@ -32,7 +32,7 @@ class ChatCompletionStreamRequestTypedDict(TypedDict): model: Nullable[str] - r"""ID of the model to use. You can use the [List Available Models](/api#operation/listModels) API to see all of your available models, or see our [Model overview](/models) for model descriptions.""" + r"""ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions.""" messages: List[ChatCompletionStreamRequestMessagesTypedDict] r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content.""" temperature: NotRequired[float] @@ -57,7 +57,7 @@ class ChatCompletionStreamRequestTypedDict(TypedDict): class ChatCompletionStreamRequest(BaseModel): model: Nullable[str] - r"""ID of the model to use. You can use the [List Available Models](/api#operation/listModels) API to see all of your available models, or see our [Model overview](/models) for model descriptions.""" + r"""ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions.""" messages: List[ChatCompletionStreamRequestMessages] r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content.""" temperature: Optional[float] = 0.7 diff --git a/src/mistralai/models/tool.py b/src/mistralai/models/tool.py index c790e637..3a3ccdf8 100644 --- a/src/mistralai/models/tool.py +++ b/src/mistralai/models/tool.py @@ -2,17 +2,21 @@ from __future__ import annotations from .function import Function, FunctionTypedDict -from mistralai.types import BaseModel -import pydantic -from typing import Final, Optional, TypedDict -from typing_extensions import Annotated +from mistralai.types import BaseModel, UnrecognizedStr +from mistralai.utils import validate_open_enum +from pydantic.functional_validators import PlainValidator +from typing import Literal, Optional, TypedDict, Union +from typing_extensions import Annotated, NotRequired +ToolToolTypes = Union[Literal["function"], UnrecognizedStr] + class ToolTypedDict(TypedDict): function: FunctionTypedDict + type: NotRequired[ToolToolTypes] class Tool(BaseModel): function: Function - TYPE: Annotated[Final[Optional[str]], pydantic.Field(alias="type")] = "function" # type: ignore + type: Annotated[Optional[ToolToolTypes], PlainValidator(validate_open_enum(False))] = "function" diff --git a/src/mistralai/models/toolcall.py b/src/mistralai/models/toolcall.py index 2afd453d..4842aff2 100644 --- a/src/mistralai/models/toolcall.py +++ b/src/mistralai/models/toolcall.py @@ -2,19 +2,23 @@ from __future__ import annotations from .functioncall import FunctionCall, FunctionCallTypedDict -from mistralai.types import BaseModel -import pydantic -from typing import Final, Optional, TypedDict +from mistralai.types import BaseModel, UnrecognizedStr +from mistralai.utils import validate_open_enum +from pydantic.functional_validators import PlainValidator +from typing import Literal, Optional, TypedDict, Union from typing_extensions import Annotated, NotRequired +ToolTypes = Union[Literal["function"], UnrecognizedStr] + class ToolCallTypedDict(TypedDict): function: FunctionCallTypedDict id: NotRequired[str] + type: NotRequired[ToolTypes] class ToolCall(BaseModel): function: FunctionCall id: Optional[str] = "null" - TYPE: Annotated[Final[Optional[str]], pydantic.Field(alias="type")] = "function" # type: ignore + type: Annotated[Optional[ToolTypes], PlainValidator(validate_open_enum(False))] = "function" diff --git a/src/mistralai/sdkconfiguration.py b/src/mistralai/sdkconfiguration.py index 2a18bab6..5baafe9e 100644 --- a/src/mistralai/sdkconfiguration.py +++ b/src/mistralai/sdkconfiguration.py @@ -29,9 +29,9 @@ class SDKConfiguration: server: Optional[str] = "" language: str = "python" openapi_doc_version: str = "0.0.2" - sdk_version: str = "1.0.1" - gen_version: str = "2.390.6" - user_agent: str = "speakeasy-sdk/python 1.0.1 2.390.6 0.0.2 mistralai" + sdk_version: str = "1.1.3" + gen_version: str = "2.399.0" + user_agent: str = "speakeasy-sdk/python 1.1.3 2.399.0 0.0.2 mistralai" retry_config: OptionalNullable[RetryConfig] = Field(default_factory=lambda: UNSET) timeout_ms: Optional[int] = None From e2ad43058c193861ca1de8b37f45df223f7a13f3 Mon Sep 17 00:00:00 2001 From: Gaspard Blanchet <26529448+GaspardBT@users.noreply.github.com> Date: Mon, 19 Aug 2024 09:50:27 +0200 Subject: [PATCH 069/223] Fix streaming response with empty content (#135) * fix streaming response with empty content * fix example --- .speakeasy/gen.lock | 8 +++--- .speakeasy/gen.yaml | 2 +- .speakeasy/workflow.lock | 26 +++++++++---------- docs/models/deltamessage.md | 2 +- examples/async_jobs.py | 4 +-- packages/mistralai_azure/.speakeasy/gen.lock | 8 +++--- packages/mistralai_azure/.speakeasy/gen.yaml | 2 +- .../docs/models/deltamessage.md | 2 +- packages/mistralai_azure/pyproject.toml | 2 +- .../mistralai_azure/models/deltamessage.py | 6 ++--- .../src/mistralai_azure/sdkconfiguration.py | 4 +-- packages/mistralai_gcp/.speakeasy/gen.lock | 8 +++--- packages/mistralai_gcp/.speakeasy/gen.yaml | 2 +- .../mistralai_gcp/docs/models/deltamessage.md | 2 +- packages/mistralai_gcp/pyproject.toml | 2 +- .../src/mistralai_gcp/models/deltamessage.py | 6 ++--- .../src/mistralai_gcp/sdkconfiguration.py | 4 +-- pyproject.toml | 2 +- src/mistralai/models/deltamessage.py | 6 ++--- src/mistralai/sdkconfiguration.py | 4 +-- 20 files changed, 50 insertions(+), 52 deletions(-) diff --git a/.speakeasy/gen.lock b/.speakeasy/gen.lock index bc40f704..e7aad22b 100644 --- a/.speakeasy/gen.lock +++ b/.speakeasy/gen.lock @@ -1,12 +1,12 @@ lockVersion: 2.0.0 id: 2d045ec7-2ebb-4f4d-ad25-40953b132161 management: - docChecksum: c19f5a86b8045af32a46604ee5478061 + docChecksum: 87135817a5a746d7466c41070e5f581e docVersion: 0.0.2 - speakeasyVersion: 1.372.0 + speakeasyVersion: 1.373.1 generationVersion: 2.399.0 - releaseVersion: 1.1.3 - configChecksum: b9757e45cfabeceebf51f9a514724903 + releaseVersion: 1.0.1 + configChecksum: 374a669373f10730cda1eb9a91d59b8b repoURL: https://github.com/mistralai/client-python.git installationURL: https://github.com/mistralai/client-python.git published: true diff --git a/.speakeasy/gen.yaml b/.speakeasy/gen.yaml index 90ee2f7c..1d54a9ea 100644 --- a/.speakeasy/gen.yaml +++ b/.speakeasy/gen.yaml @@ -12,7 +12,7 @@ generation: auth: oAuth2ClientCredentialsEnabled: true python: - version: 1.1.3 + version: 1.0.1 additionalDependencies: dev: pytest: ^8.2.2 diff --git a/.speakeasy/workflow.lock b/.speakeasy/workflow.lock index 432ad608..db02e2a8 100644 --- a/.speakeasy/workflow.lock +++ b/.speakeasy/workflow.lock @@ -1,41 +1,41 @@ -speakeasyVersion: 1.372.0 +speakeasyVersion: 1.373.1 sources: mistral-azure-source: sourceNamespace: mistral-openapi-azure - sourceRevisionDigest: sha256:bc53dba5935490a409045de3c39ccf9e90243a289656dd538a542990aa376cca - sourceBlobDigest: sha256:4173c3be19775dd2bdd4ce28bb9ae6655650df75f2b689a44c3362d418d69d49 + sourceRevisionDigest: sha256:becb324b11dfc5155aa0cc420ca312d0af5aecfcbad22fe90066a09561ae4e6a + sourceBlobDigest: sha256:84928a6297c3a838dce719ffa3da1e221cba968ce4a6c74d5c3bb41bf86a7e5d tags: - latest mistral-google-cloud-source: sourceNamespace: mistral-openapi-google-cloud - sourceRevisionDigest: sha256:ab52d75474e071db240ed9a5367dc6374867b5c9306d478dcfdf8f7b7d08607f - sourceBlobDigest: sha256:d5f9c665861d7fedd5093567d13e1f7f6a12b82137fbbecda4708007b15030ba + sourceRevisionDigest: sha256:7fee22ae1a434b8919112c7feae87af7f1378952fcc6bde081deb55f65e5bfc2 + sourceBlobDigest: sha256:a4c011f461c73809a7d6cf1c9823d3c51d5050895aad246287ff14ac971efb8c tags: - latest mistral-openapi: sourceNamespace: mistral-openapi - sourceRevisionDigest: sha256:e4d5f5fe40e7f1141006ba40c1d85b743ce5dc2407635ca2e776ba0dfb00a398 - sourceBlobDigest: sha256:56f1bbe3a050c9505e003bb9790e443084922bff74b072805757076cdb8a136e + sourceRevisionDigest: sha256:088d899162941380ec90445852dc7e8c65a8e2eab6b32f552fd7f4fc6f152e76 + sourceBlobDigest: sha256:feb2a952c0f5757a656e8fed5614e28bc4da195cbeb548b5aaf4fc09aee4ddac tags: - latest targets: mistralai-azure-sdk: source: mistral-azure-source sourceNamespace: mistral-openapi-azure - sourceRevisionDigest: sha256:bc53dba5935490a409045de3c39ccf9e90243a289656dd538a542990aa376cca - sourceBlobDigest: sha256:4173c3be19775dd2bdd4ce28bb9ae6655650df75f2b689a44c3362d418d69d49 + sourceRevisionDigest: sha256:becb324b11dfc5155aa0cc420ca312d0af5aecfcbad22fe90066a09561ae4e6a + sourceBlobDigest: sha256:84928a6297c3a838dce719ffa3da1e221cba968ce4a6c74d5c3bb41bf86a7e5d outLocation: ./packages/mistralai_azure mistralai-gcp-sdk: source: mistral-google-cloud-source sourceNamespace: mistral-openapi-google-cloud - sourceRevisionDigest: sha256:ab52d75474e071db240ed9a5367dc6374867b5c9306d478dcfdf8f7b7d08607f - sourceBlobDigest: sha256:d5f9c665861d7fedd5093567d13e1f7f6a12b82137fbbecda4708007b15030ba + sourceRevisionDigest: sha256:7fee22ae1a434b8919112c7feae87af7f1378952fcc6bde081deb55f65e5bfc2 + sourceBlobDigest: sha256:a4c011f461c73809a7d6cf1c9823d3c51d5050895aad246287ff14ac971efb8c outLocation: ./packages/mistralai_gcp mistralai-sdk: source: mistral-openapi sourceNamespace: mistral-openapi - sourceRevisionDigest: sha256:e4d5f5fe40e7f1141006ba40c1d85b743ce5dc2407635ca2e776ba0dfb00a398 - sourceBlobDigest: sha256:56f1bbe3a050c9505e003bb9790e443084922bff74b072805757076cdb8a136e + sourceRevisionDigest: sha256:088d899162941380ec90445852dc7e8c65a8e2eab6b32f552fd7f4fc6f152e76 + sourceBlobDigest: sha256:feb2a952c0f5757a656e8fed5614e28bc4da195cbeb548b5aaf4fc09aee4ddac outLocation: /Users/gaspard/public-mistral/client-python workflow: workflowVersion: 1.0.0 diff --git a/docs/models/deltamessage.md b/docs/models/deltamessage.md index 9ad068b8..d32f8e10 100644 --- a/docs/models/deltamessage.md +++ b/docs/models/deltamessage.md @@ -6,5 +6,5 @@ | Field | Type | Required | Description | | ---------------------------------------------- | ---------------------------------------------- | ---------------------------------------------- | ---------------------------------------------- | | `role` | *Optional[str]* | :heavy_minus_sign: | N/A | -| `content` | *Optional[str]* | :heavy_minus_sign: | N/A | +| `content` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | `tool_calls` | List[[models.ToolCall](../models/toolcall.md)] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/examples/async_jobs.py b/examples/async_jobs.py index e54c890f..b1f9e3bf 100644 --- a/examples/async_jobs.py +++ b/examples/async_jobs.py @@ -10,9 +10,7 @@ async def main(): api_key = os.environ["MISTRAL_API_KEY"] - client = Mistral( - api_key="gpN4hC0YOSdZoBbzRcWNyALyMnNOT9jj", server_url="http://0.0.0.0:8882/" - ) + client = Mistral(api_key=api_key) # Create new files with open("examples/file.jsonl", "rb") as f: diff --git a/packages/mistralai_azure/.speakeasy/gen.lock b/packages/mistralai_azure/.speakeasy/gen.lock index 5d2740c4..9a1dd1fd 100644 --- a/packages/mistralai_azure/.speakeasy/gen.lock +++ b/packages/mistralai_azure/.speakeasy/gen.lock @@ -1,12 +1,12 @@ lockVersion: 2.0.0 id: dc40fa48-2c4d-46ad-ac8b-270749770f34 management: - docChecksum: 0f6edfd8ad8df6c49b3d429d1af7b9e2 + docChecksum: 9128fabc3ae45ecc9ed7fae8991b3d3e docVersion: 0.0.2 - speakeasyVersion: 1.372.0 + speakeasyVersion: 1.373.1 generationVersion: 2.399.0 - releaseVersion: 1.0.0-rc.7 - configChecksum: d14083a6bfd01e2d81264338ac4ed619 + releaseVersion: 1.0.1 + configChecksum: dc28e30e8f503aee23a53bb77a46c902 published: true features: python: diff --git a/packages/mistralai_azure/.speakeasy/gen.yaml b/packages/mistralai_azure/.speakeasy/gen.yaml index ec9945e2..0660e8cc 100644 --- a/packages/mistralai_azure/.speakeasy/gen.yaml +++ b/packages/mistralai_azure/.speakeasy/gen.yaml @@ -12,7 +12,7 @@ generation: auth: oAuth2ClientCredentialsEnabled: true python: - version: 1.0.0-rc.7 + version: 1.0.1 additionalDependencies: dev: pytest: ^8.2.2 diff --git a/packages/mistralai_azure/docs/models/deltamessage.md b/packages/mistralai_azure/docs/models/deltamessage.md index 9ad068b8..d32f8e10 100644 --- a/packages/mistralai_azure/docs/models/deltamessage.md +++ b/packages/mistralai_azure/docs/models/deltamessage.md @@ -6,5 +6,5 @@ | Field | Type | Required | Description | | ---------------------------------------------- | ---------------------------------------------- | ---------------------------------------------- | ---------------------------------------------- | | `role` | *Optional[str]* | :heavy_minus_sign: | N/A | -| `content` | *Optional[str]* | :heavy_minus_sign: | N/A | +| `content` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | `tool_calls` | List[[models.ToolCall](../models/toolcall.md)] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai_azure/pyproject.toml b/packages/mistralai_azure/pyproject.toml index f4cf5b58..c72b384e 100644 --- a/packages/mistralai_azure/pyproject.toml +++ b/packages/mistralai_azure/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "mistralai_azure" -version = "1.0.0-rc.7" +version = "1.0.1" description = "Python Client SDK for the Mistral AI API in Azure." authors = ["Mistral",] readme = "README-PYPI.md" diff --git a/packages/mistralai_azure/src/mistralai_azure/models/deltamessage.py b/packages/mistralai_azure/src/mistralai_azure/models/deltamessage.py index 3d763df8..4f9f395c 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/deltamessage.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/deltamessage.py @@ -10,19 +10,19 @@ class DeltaMessageTypedDict(TypedDict): role: NotRequired[str] - content: NotRequired[str] + content: NotRequired[Nullable[str]] tool_calls: NotRequired[Nullable[List[ToolCallTypedDict]]] class DeltaMessage(BaseModel): role: Optional[str] = None - content: Optional[str] = None + content: OptionalNullable[str] = UNSET tool_calls: OptionalNullable[List[ToolCall]] = UNSET @model_serializer(mode="wrap") def serialize_model(self, handler): optional_fields = ["role", "content", "tool_calls"] - nullable_fields = ["tool_calls"] + nullable_fields = ["content", "tool_calls"] null_default_fields = [] serialized = handler(self) diff --git a/packages/mistralai_azure/src/mistralai_azure/sdkconfiguration.py b/packages/mistralai_azure/src/mistralai_azure/sdkconfiguration.py index b0a94cc5..4a6f93b7 100644 --- a/packages/mistralai_azure/src/mistralai_azure/sdkconfiguration.py +++ b/packages/mistralai_azure/src/mistralai_azure/sdkconfiguration.py @@ -29,9 +29,9 @@ class SDKConfiguration: server: Optional[str] = "" language: str = "python" openapi_doc_version: str = "0.0.2" - sdk_version: str = "1.0.0-rc.7" + sdk_version: str = "1.0.1" gen_version: str = "2.399.0" - user_agent: str = "speakeasy-sdk/python 1.0.0-rc.7 2.399.0 0.0.2 mistralai_azure" + user_agent: str = "speakeasy-sdk/python 1.0.1 2.399.0 0.0.2 mistralai_azure" retry_config: OptionalNullable[RetryConfig] = Field(default_factory=lambda: UNSET) timeout_ms: Optional[int] = None diff --git a/packages/mistralai_gcp/.speakeasy/gen.lock b/packages/mistralai_gcp/.speakeasy/gen.lock index 1b9cf794..4dcdd752 100644 --- a/packages/mistralai_gcp/.speakeasy/gen.lock +++ b/packages/mistralai_gcp/.speakeasy/gen.lock @@ -1,12 +1,12 @@ lockVersion: 2.0.0 id: ec60f2d8-7869-45c1-918e-773d41a8cf74 management: - docChecksum: 4cc6e7c5c5ba15491872c600d4a247ef + docChecksum: b276b71cb6764f11b10461fe70962781 docVersion: 0.0.2 - speakeasyVersion: 1.372.0 + speakeasyVersion: 1.373.1 generationVersion: 2.399.0 - releaseVersion: 1.0.0-rc.8 - configChecksum: 1830c00a5e810fe954553fe55fdf9b71 + releaseVersion: 1.0.1 + configChecksum: 698bd633a47a664d6f3515c1b9ecdbaa published: true features: python: diff --git a/packages/mistralai_gcp/.speakeasy/gen.yaml b/packages/mistralai_gcp/.speakeasy/gen.yaml index 7ddc742a..a857a5d0 100644 --- a/packages/mistralai_gcp/.speakeasy/gen.yaml +++ b/packages/mistralai_gcp/.speakeasy/gen.yaml @@ -12,7 +12,7 @@ generation: auth: oAuth2ClientCredentialsEnabled: true python: - version: 1.0.0-rc.8 + version: 1.0.1 additionalDependencies: dev: pytest: ^8.2.2 diff --git a/packages/mistralai_gcp/docs/models/deltamessage.md b/packages/mistralai_gcp/docs/models/deltamessage.md index 9ad068b8..d32f8e10 100644 --- a/packages/mistralai_gcp/docs/models/deltamessage.md +++ b/packages/mistralai_gcp/docs/models/deltamessage.md @@ -6,5 +6,5 @@ | Field | Type | Required | Description | | ---------------------------------------------- | ---------------------------------------------- | ---------------------------------------------- | ---------------------------------------------- | | `role` | *Optional[str]* | :heavy_minus_sign: | N/A | -| `content` | *Optional[str]* | :heavy_minus_sign: | N/A | +| `content` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | `tool_calls` | List[[models.ToolCall](../models/toolcall.md)] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai_gcp/pyproject.toml b/packages/mistralai_gcp/pyproject.toml index 7e7a2a1b..c169af4f 100644 --- a/packages/mistralai_gcp/pyproject.toml +++ b/packages/mistralai_gcp/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "mistralai-gcp" -version = "1.0.0-rc.8" +version = "1.0.1" description = "Python Client SDK for the Mistral AI API in GCP." authors = ["Mistral",] readme = "README-PYPI.md" diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/deltamessage.py b/packages/mistralai_gcp/src/mistralai_gcp/models/deltamessage.py index 30de9e7b..763b48ec 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/deltamessage.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/deltamessage.py @@ -10,19 +10,19 @@ class DeltaMessageTypedDict(TypedDict): role: NotRequired[str] - content: NotRequired[str] + content: NotRequired[Nullable[str]] tool_calls: NotRequired[Nullable[List[ToolCallTypedDict]]] class DeltaMessage(BaseModel): role: Optional[str] = None - content: Optional[str] = None + content: OptionalNullable[str] = UNSET tool_calls: OptionalNullable[List[ToolCall]] = UNSET @model_serializer(mode="wrap") def serialize_model(self, handler): optional_fields = ["role", "content", "tool_calls"] - nullable_fields = ["tool_calls"] + nullable_fields = ["content", "tool_calls"] null_default_fields = [] serialized = handler(self) diff --git a/packages/mistralai_gcp/src/mistralai_gcp/sdkconfiguration.py b/packages/mistralai_gcp/src/mistralai_gcp/sdkconfiguration.py index 9b354d3c..54b73aa6 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/sdkconfiguration.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/sdkconfiguration.py @@ -29,9 +29,9 @@ class SDKConfiguration: server: Optional[str] = "" language: str = "python" openapi_doc_version: str = "0.0.2" - sdk_version: str = "1.0.0-rc.8" + sdk_version: str = "1.0.1" gen_version: str = "2.399.0" - user_agent: str = "speakeasy-sdk/python 1.0.0-rc.8 2.399.0 0.0.2 mistralai-gcp" + user_agent: str = "speakeasy-sdk/python 1.0.1 2.399.0 0.0.2 mistralai-gcp" retry_config: OptionalNullable[RetryConfig] = Field(default_factory=lambda: UNSET) timeout_ms: Optional[int] = None diff --git a/pyproject.toml b/pyproject.toml index d8b78541..810ec3d4 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "mistralai" -version = "1.1.3" +version = "1.0.1" description = "Python Client SDK for the Mistral AI API." authors = ["Mistral"] readme = "README.md" diff --git a/src/mistralai/models/deltamessage.py b/src/mistralai/models/deltamessage.py index 228f1aab..97bef0ef 100644 --- a/src/mistralai/models/deltamessage.py +++ b/src/mistralai/models/deltamessage.py @@ -10,19 +10,19 @@ class DeltaMessageTypedDict(TypedDict): role: NotRequired[str] - content: NotRequired[str] + content: NotRequired[Nullable[str]] tool_calls: NotRequired[Nullable[List[ToolCallTypedDict]]] class DeltaMessage(BaseModel): role: Optional[str] = None - content: Optional[str] = None + content: OptionalNullable[str] = UNSET tool_calls: OptionalNullable[List[ToolCall]] = UNSET @model_serializer(mode="wrap") def serialize_model(self, handler): optional_fields = ["role", "content", "tool_calls"] - nullable_fields = ["tool_calls"] + nullable_fields = ["content", "tool_calls"] null_default_fields = [] serialized = handler(self) diff --git a/src/mistralai/sdkconfiguration.py b/src/mistralai/sdkconfiguration.py index 5baafe9e..1d89e4cc 100644 --- a/src/mistralai/sdkconfiguration.py +++ b/src/mistralai/sdkconfiguration.py @@ -29,9 +29,9 @@ class SDKConfiguration: server: Optional[str] = "" language: str = "python" openapi_doc_version: str = "0.0.2" - sdk_version: str = "1.1.3" + sdk_version: str = "1.0.1" gen_version: str = "2.399.0" - user_agent: str = "speakeasy-sdk/python 1.1.3 2.399.0 0.0.2 mistralai" + user_agent: str = "speakeasy-sdk/python 1.0.1 2.399.0 0.0.2 mistralai" retry_config: OptionalNullable[RetryConfig] = Field(default_factory=lambda: UNSET) timeout_ms: Optional[int] = None From 3c56068a96283289303d06dfd0d136e018601b1a Mon Sep 17 00:00:00 2001 From: Gaspard Blanchet <26529448+GaspardBT@users.noreply.github.com> Date: Mon, 19 Aug 2024 09:59:19 +0200 Subject: [PATCH 070/223] add new message object (#131) --- MIGRATION.md | 70 +++++++++++++++++++++++++++++++++++----------------- 1 file changed, 48 insertions(+), 22 deletions(-) diff --git a/MIGRATION.md b/MIGRATION.md index 07f6e33a..1b83ac82 100644 --- a/MIGRATION.md +++ b/MIGRATION.md @@ -99,6 +99,7 @@ print(chat_response.choices[0].message.content) ```python import os + from mistralai import Mistral, UserMessage api_key = os.environ["MISTRAL_API_KEY"] @@ -106,16 +107,20 @@ model = "mistral-large-latest" client = Mistral(api_key=api_key) - messages = [ +messages = [ { "role": "user", "content": "What is the best French cheese?", }, - ] +] +# Or using the new message classes +# messages = [ +# UserMessage(content="What is the best French cheese?"), +# ] chat_response = client.chat.complete( - model = model, - messages = messages, + model=model, + messages=messages, ) print(chat_response.choices[0].message.content) @@ -146,6 +151,8 @@ for chunk in stream_response: ``` **New:** ```python +import os + from mistralai import Mistral, UserMessage api_key = os.environ["MISTRAL_API_KEY"] @@ -159,14 +166,19 @@ messages = [ "content": "What is the best French cheese?", }, ] +# Or using the new message classes +# messages = [ +# UserMessage(content="What is the best French cheese?"), +# ] stream_response = client.chat.stream( - model = model, - messages = messages, + model=model, + messages=messages, ) for chunk in stream_response: print(chunk.data.choices[0].delta.content) + ``` ### Example 3: Async @@ -194,23 +206,37 @@ async for chunk in async_response: **New:** ```python -from mistralai import Mistral, UserMessage - -api_key = os.environ["MISTRAL_API_KEY"] -model = "mistral-large-latest" - -client = Mistral(api_key=api_key) +import asyncio +import os -messages = [ - { - "role": "user", - "content": "What is the best French cheese?", - }, -] +from mistralai import Mistral, UserMessage -# With async -async_response = await client.chat.stream_async(model=model, messages=messages) -async for chunk in async_response: - print(chunk.data.choices[0].delta.content) +async def main(): + client = Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), + ) + + messages = [ + { + "role": "user", + "content": "What is the best French cheese?", + }, + ] + # Or using the new message classes + # messages = [ + # UserMessage( + # content="What is the best French cheese?", + # ), + # ] + async_response = await client.chat.completstream_asynce_async( + messages=messages, + model="mistral-large-latest", + ) + + async for chunk in async_response: + print(chunk.data.choices[0].delta.content) + + +asyncio.run(main()) ``` From f62530a3c3464981e702f3f22105eec000b9acaa Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Thu, 22 Aug 2024 15:29:56 +0200 Subject: [PATCH 071/223] =?UTF-8?q?chore:=20=F0=9F=90=9D=20Update=20SDK=20?= =?UTF-8?q?-=20Generate=20MISTRALAI=20MISTRALAI-SDK=201.0.2=20(#137)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * ci: regenerated with OpenAPI Doc , Speakeasy CLI 1.376.0 * update dry run example --------- Co-authored-by: speakeasybot Co-authored-by: gaspardBT --- .speakeasy/gen.lock | 33 ++++++++++++++---- .speakeasy/gen.yaml | 2 +- .speakeasy/workflow.lock | 13 +++---- README.md | 5 ++- RELEASES.md | 12 ++++++- docs/models/archiveftmodelout.md | 10 +++--- docs/models/archiveftmodeloutobject.md | 8 +++++ docs/models/detailedjobout.md | 6 ++-- docs/models/detailedjoboutintegrations.md | 11 ++++++ docs/models/detailedjoboutobject.md | 8 +++++ docs/models/detailedjoboutrepositories.md | 11 ++++++ ...sapiroutesuploadfilemultipartbodyparams.md | 2 +- .../models/filesapiroutesuploadfilepurpose.md | 8 +++++ docs/models/fileschema.md | 2 +- docs/models/fileschemapurpose.md | 10 ++++++ docs/models/ftmodelout.md | 2 +- docs/models/ftmodeloutobject.md | 8 +++++ docs/models/githubrepositoryin.md | 16 ++++----- docs/models/githubrepositoryintype.md | 8 +++++ docs/models/githubrepositoryout.md | 16 ++++----- docs/models/githubrepositoryouttype.md | 8 +++++ docs/models/integrations.md | 11 ++++++ docs/models/jobin.md | 4 +-- docs/models/jobinintegrations.md | 11 ++++++ docs/models/jobinrepositories.md | 11 ++++++ docs/models/jobout.md | 6 ++-- docs/models/jobsout.md | 10 +++--- docs/models/jobsoutobject.md | 8 +++++ docs/models/legacyjobmetadataout.md | 2 +- docs/models/legacyjobmetadataoutobject.md | 8 +++++ docs/models/object.md | 10 ++++++ docs/models/purpose.md | 10 ++++++ docs/models/repositories.md | 11 ++++++ docs/models/retrievefileout.md | 2 +- docs/models/retrievefileoutpurpose.md | 10 ++++++ docs/models/trainingparameters.md | 2 ++ docs/models/trainingparametersin.md | 14 ++++---- docs/models/type.md | 8 +++++ docs/models/unarchiveftmodelout.md | 10 +++--- docs/models/unarchiveftmodeloutobject.md | 8 +++++ docs/models/uploadfileout.md | 2 +- docs/models/wandbintegration.md | 2 +- docs/models/wandbintegrationout.md | 2 +- docs/models/wandbintegrationtype.md | 8 +++++ docs/sdks/jobs/README.md | 4 +-- examples/dry_run_job.py | 14 ++++---- pyproject.toml | 2 +- src/mistralai/jobs.py | 16 ++++----- src/mistralai/models/__init__.py | 34 +++++++++---------- src/mistralai/models/archiveftmodelout.py | 6 ++-- src/mistralai/models/detailedjobout.py | 24 ++++++++++--- .../models/files_api_routes_upload_fileop.py | 11 +++--- src/mistralai/models/fileschema.py | 11 ++++-- src/mistralai/models/ftmodelout.py | 6 ++-- src/mistralai/models/githubrepositoryin.py | 6 ++-- src/mistralai/models/githubrepositoryout.py | 6 ++-- src/mistralai/models/jobin.py | 20 ++++++++--- src/mistralai/models/jobout.py | 25 +++++++++++--- src/mistralai/models/jobsout.py | 6 ++-- src/mistralai/models/legacyjobmetadataout.py | 6 ++-- src/mistralai/models/retrievefileout.py | 11 ++++-- src/mistralai/models/trainingparameters.py | 8 +++-- src/mistralai/models/trainingparametersin.py | 12 +++++-- src/mistralai/models/unarchiveftmodelout.py | 6 ++-- src/mistralai/models/uploadfileout.py | 11 ++++-- src/mistralai/models/wandbintegration.py | 6 ++-- src/mistralai/models/wandbintegrationout.py | 6 ++-- src/mistralai/sdk.py | 4 +-- src/mistralai/sdkconfiguration.py | 6 ++-- src/mistralai/utils/__init__.py | 4 +-- src/mistralai/utils/forms.py | 19 ++++++----- src/mistralai/utils/headers.py | 16 ++++----- src/mistralai/utils/logger.py | 8 +++++ src/mistralai/utils/queryparams.py | 30 ++++++++-------- src/mistralai/utils/serializers.py | 25 +++++++++----- src/mistralai/utils/url.py | 21 +++++++----- src/mistralai/utils/values.py | 6 ++++ 77 files changed, 560 insertions(+), 205 deletions(-) create mode 100644 docs/models/archiveftmodeloutobject.md create mode 100644 docs/models/detailedjoboutintegrations.md create mode 100644 docs/models/detailedjoboutobject.md create mode 100644 docs/models/detailedjoboutrepositories.md create mode 100644 docs/models/filesapiroutesuploadfilepurpose.md create mode 100644 docs/models/fileschemapurpose.md create mode 100644 docs/models/ftmodeloutobject.md create mode 100644 docs/models/githubrepositoryintype.md create mode 100644 docs/models/githubrepositoryouttype.md create mode 100644 docs/models/integrations.md create mode 100644 docs/models/jobinintegrations.md create mode 100644 docs/models/jobinrepositories.md create mode 100644 docs/models/jobsoutobject.md create mode 100644 docs/models/legacyjobmetadataoutobject.md create mode 100644 docs/models/object.md create mode 100644 docs/models/purpose.md create mode 100644 docs/models/repositories.md create mode 100644 docs/models/retrievefileoutpurpose.md create mode 100644 docs/models/type.md create mode 100644 docs/models/unarchiveftmodeloutobject.md create mode 100644 docs/models/wandbintegrationtype.md diff --git a/.speakeasy/gen.lock b/.speakeasy/gen.lock index e7aad22b..bdf53d43 100644 --- a/.speakeasy/gen.lock +++ b/.speakeasy/gen.lock @@ -1,12 +1,12 @@ lockVersion: 2.0.0 id: 2d045ec7-2ebb-4f4d-ad25-40953b132161 management: - docChecksum: 87135817a5a746d7466c41070e5f581e + docChecksum: ad1a7d6946828a089ca3831e257d307d docVersion: 0.0.2 - speakeasyVersion: 1.373.1 - generationVersion: 2.399.0 - releaseVersion: 1.0.1 - configChecksum: 374a669373f10730cda1eb9a91d59b8b + speakeasyVersion: 1.376.0 + generationVersion: 2.402.5 + releaseVersion: 1.0.2 + configChecksum: ed07f7fc253047a5a4dd2c0f813b8ea4 repoURL: https://github.com/mistralai/client-python.git installationURL: https://github.com/mistralai/client-python.git published: true @@ -14,7 +14,7 @@ features: python: additionalDependencies: 1.0.0 constsAndDefaults: 1.0.2 - core: 5.3.8 + core: 5.4.1 defaultEnabledRetries: 0.2.0 envVarSecurityUsage: 0.3.1 examples: 3.0.0 @@ -163,43 +163,64 @@ generatedFiles: - docs/models/retrievemodelv1modelsmodelidgetrequest.md - docs/models/deletemodelout.md - docs/models/deletemodelv1modelsmodeliddeleterequest.md + - docs/models/ftmodeloutobject.md - docs/models/ftmodelout.md - docs/models/ftmodelcapabilitiesout.md - docs/models/jobsapiroutesfinetuningupdatefinetunedmodelrequest.md - docs/models/updateftmodelin.md + - docs/models/archiveftmodeloutobject.md - docs/models/archiveftmodelout.md - docs/models/jobsapiroutesfinetuningarchivefinetunedmodelrequest.md + - docs/models/unarchiveftmodeloutobject.md - docs/models/unarchiveftmodelout.md - docs/models/jobsapiroutesfinetuningunarchivefinetunedmodelrequest.md + - docs/models/purpose.md - docs/models/uploadfileout.md - docs/models/source.md - docs/models/sampletype.md + - docs/models/filesapiroutesuploadfilepurpose.md - docs/models/file.md - docs/models/filesapiroutesuploadfilemultipartbodyparams.md - docs/models/listfilesout.md + - docs/models/fileschemapurpose.md - docs/models/fileschema.md + - docs/models/retrievefileoutpurpose.md - docs/models/retrievefileout.md - docs/models/filesapiroutesretrievefilerequest.md - docs/models/deletefileout.md - docs/models/filesapiroutesdeletefilerequest.md + - docs/models/jobsoutobject.md - docs/models/jobsout.md - docs/models/status.md + - docs/models/object.md + - docs/models/integrations.md + - docs/models/repositories.md - docs/models/jobout.md - docs/models/jobmetadataout.md + - docs/models/githubrepositoryouttype.md - docs/models/githubrepositoryout.md + - docs/models/type.md - docs/models/wandbintegrationout.md - docs/models/finetuneablemodel.md - docs/models/trainingparameters.md - docs/models/queryparamstatus.md - docs/models/jobsapiroutesfinetuninggetfinetuningjobsrequest.md - docs/models/jobsapiroutesfinetuningcreatefinetuningjobresponse.md + - docs/models/legacyjobmetadataoutobject.md - docs/models/legacyjobmetadataout.md + - docs/models/jobinintegrations.md + - docs/models/jobinrepositories.md - docs/models/jobin.md + - docs/models/githubrepositoryintype.md - docs/models/githubrepositoryin.md + - docs/models/wandbintegrationtype.md - docs/models/wandbintegration.md - docs/models/trainingparametersin.md - docs/models/trainingfile.md - docs/models/detailedjoboutstatus.md + - docs/models/detailedjoboutobject.md + - docs/models/detailedjoboutintegrations.md + - docs/models/detailedjoboutrepositories.md - docs/models/detailedjobout.md - docs/models/checkpointout.md - docs/models/metricout.md diff --git a/.speakeasy/gen.yaml b/.speakeasy/gen.yaml index 1d54a9ea..c613fdbb 100644 --- a/.speakeasy/gen.yaml +++ b/.speakeasy/gen.yaml @@ -12,7 +12,7 @@ generation: auth: oAuth2ClientCredentialsEnabled: true python: - version: 1.0.1 + version: 1.0.2 additionalDependencies: dev: pytest: ^8.2.2 diff --git a/.speakeasy/workflow.lock b/.speakeasy/workflow.lock index db02e2a8..c32ba6cf 100644 --- a/.speakeasy/workflow.lock +++ b/.speakeasy/workflow.lock @@ -1,4 +1,4 @@ -speakeasyVersion: 1.373.1 +speakeasyVersion: 1.376.0 sources: mistral-azure-source: sourceNamespace: mistral-openapi-azure @@ -14,10 +14,11 @@ sources: - latest mistral-openapi: sourceNamespace: mistral-openapi - sourceRevisionDigest: sha256:088d899162941380ec90445852dc7e8c65a8e2eab6b32f552fd7f4fc6f152e76 - sourceBlobDigest: sha256:feb2a952c0f5757a656e8fed5614e28bc4da195cbeb548b5aaf4fc09aee4ddac + sourceRevisionDigest: sha256:421a4bd55fd50ba00d6ebf2db603888009e9996b642b0499110c223fd6ca21c2 + sourceBlobDigest: sha256:1c87b4b8287f6a3083167c13ab59c5e7ac180ab7e19ad1532f3f46495cc12a26 tags: - latest + - main targets: mistralai-azure-sdk: source: mistral-azure-source @@ -34,9 +35,9 @@ targets: mistralai-sdk: source: mistral-openapi sourceNamespace: mistral-openapi - sourceRevisionDigest: sha256:088d899162941380ec90445852dc7e8c65a8e2eab6b32f552fd7f4fc6f152e76 - sourceBlobDigest: sha256:feb2a952c0f5757a656e8fed5614e28bc4da195cbeb548b5aaf4fc09aee4ddac - outLocation: /Users/gaspard/public-mistral/client-python + sourceRevisionDigest: sha256:421a4bd55fd50ba00d6ebf2db603888009e9996b642b0499110c223fd6ca21c2 + sourceBlobDigest: sha256:1c87b4b8287f6a3083167c13ab59c5e7ac180ab7e19ad1532f3f46495cc12a26 + outLocation: /github/workspace/repo workflow: workflowVersion: 1.0.0 speakeasyVersion: latest diff --git a/README.md b/README.md index 44237eac..c73adebb 100644 --- a/README.md +++ b/README.md @@ -651,8 +651,9 @@ if res is not None: ## Debugging -To emit debug logs for SDK requests and responses you can pass a logger object directly into your SDK object. +You can setup your SDK to emit debug logs for SDK requests and responses. +You can pass your own logger class directly into your SDK. ```python from mistralai import Mistral import logging @@ -660,6 +661,8 @@ import logging logging.basicConfig(level=logging.DEBUG) s = Mistral(debug_logger=logging.getLogger("mistralai")) ``` + +You can also enable a default debug logger by setting an environment variable `MISTRAL_DEBUG` to true. diff --git a/RELEASES.md b/RELEASES.md index e49882ce..319cce5c 100644 --- a/RELEASES.md +++ b/RELEASES.md @@ -18,4 +18,14 @@ Based on: ### Generated - [python v1.0.1] . ### Releases -- [PyPI v1.0.1] https://pypi.org/project/mistralai/1.0.1 - . \ No newline at end of file +- [PyPI v1.0.1] https://pypi.org/project/mistralai/1.0.1 - . + +## 2024-08-20 08:36:28 +### Changes +Based on: +- OpenAPI Doc +- Speakeasy CLI 1.376.0 (2.402.5) https://github.com/speakeasy-api/speakeasy +### Generated +- [python v1.0.2] . +### Releases +- [PyPI v1.0.2] https://pypi.org/project/mistralai/1.0.2 - . \ No newline at end of file diff --git a/docs/models/archiveftmodelout.md b/docs/models/archiveftmodelout.md index c2e8f8ef..46a9e755 100644 --- a/docs/models/archiveftmodelout.md +++ b/docs/models/archiveftmodelout.md @@ -3,8 +3,8 @@ ## Fields -| Field | Type | Required | Description | -| ------------------ | ------------------ | ------------------ | ------------------ | -| `id` | *str* | :heavy_check_mark: | N/A | -| `object` | *Optional[str]* | :heavy_minus_sign: | N/A | -| `archived` | *Optional[bool]* | :heavy_minus_sign: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | +| `id` | *str* | :heavy_check_mark: | N/A | +| `object` | [Optional[models.ArchiveFTModelOutObject]](../models/archiveftmodeloutobject.md) | :heavy_minus_sign: | N/A | +| `archived` | *Optional[bool]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/archiveftmodeloutobject.md b/docs/models/archiveftmodeloutobject.md new file mode 100644 index 00000000..f6f46889 --- /dev/null +++ b/docs/models/archiveftmodeloutobject.md @@ -0,0 +1,8 @@ +# ArchiveFTModelOutObject + + +## Values + +| Name | Value | +| ------- | ------- | +| `MODEL` | model | \ No newline at end of file diff --git a/docs/models/detailedjobout.md b/docs/models/detailedjobout.md index f52d5cd2..3eae6b30 100644 --- a/docs/models/detailedjobout.md +++ b/docs/models/detailedjobout.md @@ -15,12 +15,12 @@ | `modified_at` | *int* | :heavy_check_mark: | N/A | | `training_files` | List[*str*] | :heavy_check_mark: | N/A | | `validation_files` | List[*str*] | :heavy_minus_sign: | N/A | -| `object` | *Optional[str]* | :heavy_minus_sign: | N/A | +| `object` | [Optional[models.DetailedJobOutObject]](../models/detailedjoboutobject.md) | :heavy_minus_sign: | N/A | | `fine_tuned_model` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | `suffix` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `integrations` | List[[models.WandbIntegrationOut](../models/wandbintegrationout.md)] | :heavy_minus_sign: | N/A | +| `integrations` | List[[models.DetailedJobOutIntegrations](../models/detailedjoboutintegrations.md)] | :heavy_minus_sign: | N/A | | `trained_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | -| `repositories` | List[[models.GithubRepositoryOut](../models/githubrepositoryout.md)] | :heavy_minus_sign: | N/A | +| `repositories` | List[[models.DetailedJobOutRepositories](../models/detailedjoboutrepositories.md)] | :heavy_minus_sign: | N/A | | `metadata` | [OptionalNullable[models.JobMetadataOut]](../models/jobmetadataout.md) | :heavy_minus_sign: | N/A | | `events` | List[[models.EventOut](../models/eventout.md)] | :heavy_minus_sign: | Event items are created every time the status of a fine-tuning job changes. The timestamped list of all events is accessible here. | | `checkpoints` | List[[models.CheckpointOut](../models/checkpointout.md)] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/detailedjoboutintegrations.md b/docs/models/detailedjoboutintegrations.md new file mode 100644 index 00000000..46beabc1 --- /dev/null +++ b/docs/models/detailedjoboutintegrations.md @@ -0,0 +1,11 @@ +# DetailedJobOutIntegrations + + +## Supported Types + +### `models.WandbIntegrationOut` + +```python +value: models.WandbIntegrationOut = /* values here */ +``` + diff --git a/docs/models/detailedjoboutobject.md b/docs/models/detailedjoboutobject.md new file mode 100644 index 00000000..3731b1f6 --- /dev/null +++ b/docs/models/detailedjoboutobject.md @@ -0,0 +1,8 @@ +# DetailedJobOutObject + + +## Values + +| Name | Value | +| ----- | ----- | +| `JOB` | job | \ No newline at end of file diff --git a/docs/models/detailedjoboutrepositories.md b/docs/models/detailedjoboutrepositories.md new file mode 100644 index 00000000..4b32079a --- /dev/null +++ b/docs/models/detailedjoboutrepositories.md @@ -0,0 +1,11 @@ +# DetailedJobOutRepositories + + +## Supported Types + +### `models.GithubRepositoryOut` + +```python +value: models.GithubRepositoryOut = /* values here */ +``` + diff --git a/docs/models/filesapiroutesuploadfilemultipartbodyparams.md b/docs/models/filesapiroutesuploadfilemultipartbodyparams.md index 1a6dfc6d..2472dccd 100644 --- a/docs/models/filesapiroutesuploadfilemultipartbodyparams.md +++ b/docs/models/filesapiroutesuploadfilemultipartbodyparams.md @@ -6,4 +6,4 @@ | Field | Type | Required | Description | | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | | `file` | [models.File](../models/file.md) | :heavy_check_mark: | The File object (not file name) to be uploaded.
To upload a file and specify a custom file name you should format your request as such:
```bash
file=@path/to/your/file.jsonl;filename=custom_name.jsonl
```
Otherwise, you can just keep the original file name:
```bash
file=@path/to/your/file.jsonl
``` | -| `purpose` | *Optional[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file +| `purpose` | [Optional[models.FilesAPIRoutesUploadFilePurpose]](../models/filesapiroutesuploadfilepurpose.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/filesapiroutesuploadfilepurpose.md b/docs/models/filesapiroutesuploadfilepurpose.md new file mode 100644 index 00000000..164af615 --- /dev/null +++ b/docs/models/filesapiroutesuploadfilepurpose.md @@ -0,0 +1,8 @@ +# FilesAPIRoutesUploadFilePurpose + + +## Values + +| Name | Value | +| ----------- | ----------- | +| `FINE_TUNE` | fine-tune | \ No newline at end of file diff --git a/docs/models/fileschema.md b/docs/models/fileschema.md index fd3ec080..a877bee3 100644 --- a/docs/models/fileschema.md +++ b/docs/models/fileschema.md @@ -12,5 +12,5 @@ | `filename` | *str* | :heavy_check_mark: | The name of the uploaded file. | files_upload.jsonl | | `sample_type` | [models.SampleType](../models/sampletype.md) | :heavy_check_mark: | N/A | | | `source` | [models.Source](../models/source.md) | :heavy_check_mark: | N/A | | -| `purpose` | *str* | :heavy_check_mark: | The intended purpose of the uploaded file. Only accepts fine-tuning (`fine-tune`) for now. | fine-tune | +| `purpose` | [models.FileSchemaPurpose](../models/fileschemapurpose.md) | :heavy_check_mark: | The intended purpose of the uploaded file. Only accepts fine-tuning (`fine-tune`) for now. | fine-tune | | `num_lines` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | | \ No newline at end of file diff --git a/docs/models/fileschemapurpose.md b/docs/models/fileschemapurpose.md new file mode 100644 index 00000000..b7ba5113 --- /dev/null +++ b/docs/models/fileschemapurpose.md @@ -0,0 +1,10 @@ +# FileSchemaPurpose + +The intended purpose of the uploaded file. Only accepts fine-tuning (`fine-tune`) for now. + + +## Values + +| Name | Value | +| ----------- | ----------- | +| `FINE_TUNE` | fine-tune | \ No newline at end of file diff --git a/docs/models/ftmodelout.md b/docs/models/ftmodelout.md index 8d081f6c..6dec7156 100644 --- a/docs/models/ftmodelout.md +++ b/docs/models/ftmodelout.md @@ -12,7 +12,7 @@ | `archived` | *bool* | :heavy_check_mark: | N/A | | `capabilities` | [models.FTModelCapabilitiesOut](../models/ftmodelcapabilitiesout.md) | :heavy_check_mark: | N/A | | `job` | *str* | :heavy_check_mark: | N/A | -| `object` | *Optional[str]* | :heavy_minus_sign: | N/A | +| `object` | [Optional[models.FTModelOutObject]](../models/ftmodeloutobject.md) | :heavy_minus_sign: | N/A | | `name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | `max_context_length` | *Optional[int]* | :heavy_minus_sign: | N/A | diff --git a/docs/models/ftmodeloutobject.md b/docs/models/ftmodeloutobject.md new file mode 100644 index 00000000..e12b214e --- /dev/null +++ b/docs/models/ftmodeloutobject.md @@ -0,0 +1,8 @@ +# FTModelOutObject + + +## Values + +| Name | Value | +| ------- | ------- | +| `MODEL` | model | \ No newline at end of file diff --git a/docs/models/githubrepositoryin.md b/docs/models/githubrepositoryin.md index 1a6be96c..7ae2fb4f 100644 --- a/docs/models/githubrepositoryin.md +++ b/docs/models/githubrepositoryin.md @@ -3,11 +3,11 @@ ## Fields -| Field | Type | Required | Description | -| ----------------------- | ----------------------- | ----------------------- | ----------------------- | -| `name` | *str* | :heavy_check_mark: | N/A | -| `owner` | *str* | :heavy_check_mark: | N/A | -| `token` | *str* | :heavy_check_mark: | N/A | -| `type` | *Optional[str]* | :heavy_minus_sign: | N/A | -| `ref` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `weight` | *Optional[float]* | :heavy_minus_sign: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | +| `name` | *str* | :heavy_check_mark: | N/A | +| `owner` | *str* | :heavy_check_mark: | N/A | +| `token` | *str* | :heavy_check_mark: | N/A | +| `type` | [Optional[models.GithubRepositoryInType]](../models/githubrepositoryintype.md) | :heavy_minus_sign: | N/A | +| `ref` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `weight` | *Optional[float]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/githubrepositoryintype.md b/docs/models/githubrepositoryintype.md new file mode 100644 index 00000000..63da967c --- /dev/null +++ b/docs/models/githubrepositoryintype.md @@ -0,0 +1,8 @@ +# GithubRepositoryInType + + +## Values + +| Name | Value | +| -------- | -------- | +| `GITHUB` | github | \ No newline at end of file diff --git a/docs/models/githubrepositoryout.md b/docs/models/githubrepositoryout.md index fbabf1e0..0f96736f 100644 --- a/docs/models/githubrepositoryout.md +++ b/docs/models/githubrepositoryout.md @@ -3,11 +3,11 @@ ## Fields -| Field | Type | Required | Description | -| ----------------------- | ----------------------- | ----------------------- | ----------------------- | -| `name` | *str* | :heavy_check_mark: | N/A | -| `owner` | *str* | :heavy_check_mark: | N/A | -| `commit_id` | *str* | :heavy_check_mark: | N/A | -| `type` | *Optional[str]* | :heavy_minus_sign: | N/A | -| `ref` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `weight` | *Optional[float]* | :heavy_minus_sign: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | +| `name` | *str* | :heavy_check_mark: | N/A | +| `owner` | *str* | :heavy_check_mark: | N/A | +| `commit_id` | *str* | :heavy_check_mark: | N/A | +| `type` | [Optional[models.GithubRepositoryOutType]](../models/githubrepositoryouttype.md) | :heavy_minus_sign: | N/A | +| `ref` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `weight` | *Optional[float]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/githubrepositoryouttype.md b/docs/models/githubrepositoryouttype.md new file mode 100644 index 00000000..46c3eefd --- /dev/null +++ b/docs/models/githubrepositoryouttype.md @@ -0,0 +1,8 @@ +# GithubRepositoryOutType + + +## Values + +| Name | Value | +| -------- | -------- | +| `GITHUB` | github | \ No newline at end of file diff --git a/docs/models/integrations.md b/docs/models/integrations.md new file mode 100644 index 00000000..35214d63 --- /dev/null +++ b/docs/models/integrations.md @@ -0,0 +1,11 @@ +# Integrations + + +## Supported Types + +### `models.WandbIntegrationOut` + +```python +value: models.WandbIntegrationOut = /* values here */ +``` + diff --git a/docs/models/jobin.md b/docs/models/jobin.md index 6358e7a6..ebaed9a9 100644 --- a/docs/models/jobin.md +++ b/docs/models/jobin.md @@ -10,6 +10,6 @@ | `training_files` | List[[models.TrainingFile](../models/trainingfile.md)] | :heavy_minus_sign: | N/A | | `validation_files` | List[*str*] | :heavy_minus_sign: | A list containing the IDs of uploaded files that contain validation data. If you provide these files, the data is used to generate validation metrics periodically during fine-tuning. These metrics can be viewed in `checkpoints` when getting the status of a running fine-tuning job. The same data should not be present in both train and validation files. | | `suffix` | *OptionalNullable[str]* | :heavy_minus_sign: | A string that will be added to your fine-tuning model name. For example, a suffix of "my-great-model" would produce a model name like `ft:open-mistral-7b:my-great-model:xxx...` | -| `integrations` | List[[models.WandbIntegration](../models/wandbintegration.md)] | :heavy_minus_sign: | A list of integrations to enable for your fine-tuning job. | -| `repositories` | List[[models.GithubRepositoryIn](../models/githubrepositoryin.md)] | :heavy_minus_sign: | N/A | +| `integrations` | List[[models.JobInIntegrations](../models/jobinintegrations.md)] | :heavy_minus_sign: | A list of integrations to enable for your fine-tuning job. | +| `repositories` | List[[models.JobInRepositories](../models/jobinrepositories.md)] | :heavy_minus_sign: | N/A | | `auto_start` | *Optional[bool]* | :heavy_minus_sign: | This field will be required in a future release. | \ No newline at end of file diff --git a/docs/models/jobinintegrations.md b/docs/models/jobinintegrations.md new file mode 100644 index 00000000..91c10242 --- /dev/null +++ b/docs/models/jobinintegrations.md @@ -0,0 +1,11 @@ +# JobInIntegrations + + +## Supported Types + +### `models.WandbIntegration` + +```python +value: models.WandbIntegration = /* values here */ +``` + diff --git a/docs/models/jobinrepositories.md b/docs/models/jobinrepositories.md new file mode 100644 index 00000000..b94477af --- /dev/null +++ b/docs/models/jobinrepositories.md @@ -0,0 +1,11 @@ +# JobInRepositories + + +## Supported Types + +### `models.GithubRepositoryIn` + +```python +value: models.GithubRepositoryIn = /* values here */ +``` + diff --git a/docs/models/jobout.md b/docs/models/jobout.md index 0b88fbac..2fe60fd8 100644 --- a/docs/models/jobout.md +++ b/docs/models/jobout.md @@ -15,10 +15,10 @@ | `modified_at` | *int* | :heavy_check_mark: | The UNIX timestamp (in seconds) for when the fine-tuning job was last modified. | | `training_files` | List[*str*] | :heavy_check_mark: | A list containing the IDs of uploaded files that contain training data. | | `validation_files` | List[*str*] | :heavy_minus_sign: | A list containing the IDs of uploaded files that contain validation data. | -| `object` | *Optional[str]* | :heavy_minus_sign: | The object type of the fine-tuning job. | +| `object` | [Optional[models.Object]](../models/object.md) | :heavy_minus_sign: | The object type of the fine-tuning job. | | `fine_tuned_model` | *OptionalNullable[str]* | :heavy_minus_sign: | The name of the fine-tuned model that is being created. The value will be `null` if the fine-tuning job is still running. | | `suffix` | *OptionalNullable[str]* | :heavy_minus_sign: | Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`. | -| `integrations` | List[[models.WandbIntegrationOut](../models/wandbintegrationout.md)] | :heavy_minus_sign: | A list of integrations enabled for your fine-tuning job. | +| `integrations` | List[[models.Integrations](../models/integrations.md)] | :heavy_minus_sign: | A list of integrations enabled for your fine-tuning job. | | `trained_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | Total number of tokens trained. | -| `repositories` | List[[models.GithubRepositoryOut](../models/githubrepositoryout.md)] | :heavy_minus_sign: | N/A | +| `repositories` | List[[models.Repositories](../models/repositories.md)] | :heavy_minus_sign: | N/A | | `metadata` | [OptionalNullable[models.JobMetadataOut]](../models/jobmetadataout.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/jobsout.md b/docs/models/jobsout.md index d3b10a89..99ff75ec 100644 --- a/docs/models/jobsout.md +++ b/docs/models/jobsout.md @@ -3,8 +3,8 @@ ## Fields -| Field | Type | Required | Description | -| ------------------------------------------ | ------------------------------------------ | ------------------------------------------ | ------------------------------------------ | -| `total` | *int* | :heavy_check_mark: | N/A | -| `data` | List[[models.JobOut](../models/jobout.md)] | :heavy_minus_sign: | N/A | -| `object` | *Optional[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| ------------------------------------------------------------ | ------------------------------------------------------------ | ------------------------------------------------------------ | ------------------------------------------------------------ | +| `total` | *int* | :heavy_check_mark: | N/A | +| `data` | List[[models.JobOut](../models/jobout.md)] | :heavy_minus_sign: | N/A | +| `object` | [Optional[models.JobsOutObject]](../models/jobsoutobject.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/jobsoutobject.md b/docs/models/jobsoutobject.md new file mode 100644 index 00000000..f6c8a2c3 --- /dev/null +++ b/docs/models/jobsoutobject.md @@ -0,0 +1,8 @@ +# JobsOutObject + + +## Values + +| Name | Value | +| ------ | ------ | +| `LIST` | list | \ No newline at end of file diff --git a/docs/models/legacyjobmetadataout.md b/docs/models/legacyjobmetadataout.md index 04925baa..44d17e95 100644 --- a/docs/models/legacyjobmetadataout.md +++ b/docs/models/legacyjobmetadataout.md @@ -16,4 +16,4 @@ | `deprecated` | *Optional[bool]* | :heavy_minus_sign: | N/A | | | `epochs` | *OptionalNullable[float]* | :heavy_minus_sign: | The number of complete passes through the entire training dataset. | 4.2922 | | `training_steps` | *OptionalNullable[int]* | :heavy_minus_sign: | The number of training steps to perform. A training step refers to a single update of the model weights during the fine-tuning process. This update is typically calculated using a batch of samples from the training dataset. | 10 | -| `object` | *Optional[str]* | :heavy_minus_sign: | N/A | | \ No newline at end of file +| `object` | [Optional[models.LegacyJobMetadataOutObject]](../models/legacyjobmetadataoutobject.md) | :heavy_minus_sign: | N/A | | \ No newline at end of file diff --git a/docs/models/legacyjobmetadataoutobject.md b/docs/models/legacyjobmetadataoutobject.md new file mode 100644 index 00000000..9873ada8 --- /dev/null +++ b/docs/models/legacyjobmetadataoutobject.md @@ -0,0 +1,8 @@ +# LegacyJobMetadataOutObject + + +## Values + +| Name | Value | +| -------------- | -------------- | +| `JOB_METADATA` | job.metadata | \ No newline at end of file diff --git a/docs/models/object.md b/docs/models/object.md new file mode 100644 index 00000000..ab4c4588 --- /dev/null +++ b/docs/models/object.md @@ -0,0 +1,10 @@ +# Object + +The object type of the fine-tuning job. + + +## Values + +| Name | Value | +| ----- | ----- | +| `JOB` | job | \ No newline at end of file diff --git a/docs/models/purpose.md b/docs/models/purpose.md new file mode 100644 index 00000000..6c795b93 --- /dev/null +++ b/docs/models/purpose.md @@ -0,0 +1,10 @@ +# Purpose + +The intended purpose of the uploaded file. Only accepts fine-tuning (`fine-tune`) for now. + + +## Values + +| Name | Value | +| ----------- | ----------- | +| `FINE_TUNE` | fine-tune | \ No newline at end of file diff --git a/docs/models/repositories.md b/docs/models/repositories.md new file mode 100644 index 00000000..02274e3d --- /dev/null +++ b/docs/models/repositories.md @@ -0,0 +1,11 @@ +# Repositories + + +## Supported Types + +### `models.GithubRepositoryOut` + +```python +value: models.GithubRepositoryOut = /* values here */ +``` + diff --git a/docs/models/retrievefileout.md b/docs/models/retrievefileout.md index 1a624576..02311777 100644 --- a/docs/models/retrievefileout.md +++ b/docs/models/retrievefileout.md @@ -12,5 +12,5 @@ | `filename` | *str* | :heavy_check_mark: | The name of the uploaded file. | files_upload.jsonl | | `sample_type` | [models.SampleType](../models/sampletype.md) | :heavy_check_mark: | N/A | | | `source` | [models.Source](../models/source.md) | :heavy_check_mark: | N/A | | -| `purpose` | *str* | :heavy_check_mark: | The intended purpose of the uploaded file. Only accepts fine-tuning (`fine-tune`) for now. | fine-tune | +| `purpose` | [models.RetrieveFileOutPurpose](../models/retrievefileoutpurpose.md) | :heavy_check_mark: | The intended purpose of the uploaded file. Only accepts fine-tuning (`fine-tune`) for now. | fine-tune | | `num_lines` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | | \ No newline at end of file diff --git a/docs/models/retrievefileoutpurpose.md b/docs/models/retrievefileoutpurpose.md new file mode 100644 index 00000000..8b1df1a8 --- /dev/null +++ b/docs/models/retrievefileoutpurpose.md @@ -0,0 +1,10 @@ +# RetrieveFileOutPurpose + +The intended purpose of the uploaded file. Only accepts fine-tuning (`fine-tune`) for now. + + +## Values + +| Name | Value | +| ----------- | ----------- | +| `FINE_TUNE` | fine-tune | \ No newline at end of file diff --git a/docs/models/trainingparameters.md b/docs/models/trainingparameters.md index 4356c33f..0a47b615 100644 --- a/docs/models/trainingparameters.md +++ b/docs/models/trainingparameters.md @@ -7,5 +7,7 @@ | ------------------------- | ------------------------- | ------------------------- | ------------------------- | | `training_steps` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | | `learning_rate` | *Optional[float]* | :heavy_minus_sign: | N/A | +| `weight_decay` | *OptionalNullable[float]* | :heavy_minus_sign: | N/A | +| `warmup_fraction` | *OptionalNullable[float]* | :heavy_minus_sign: | N/A | | `epochs` | *OptionalNullable[float]* | :heavy_minus_sign: | N/A | | `fim_ratio` | *OptionalNullable[float]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/trainingparametersin.md b/docs/models/trainingparametersin.md index afc094d7..34918ce3 100644 --- a/docs/models/trainingparametersin.md +++ b/docs/models/trainingparametersin.md @@ -5,9 +5,11 @@ The fine-tuning hyperparameter settings used in a fine-tune job. ## Fields -| Field | Type | Required | Description | -| ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `training_steps` | *OptionalNullable[int]* | :heavy_minus_sign: | The number of training steps to perform. A training step refers to a single update of the model weights during the fine-tuning process. This update is typically calculated using a batch of samples from the training dataset. | -| `learning_rate` | *Optional[float]* | :heavy_minus_sign: | A parameter describing how much to adjust the pre-trained model's weights in response to the estimated error each time the weights are updated during the fine-tuning process. | -| `epochs` | *OptionalNullable[float]* | :heavy_minus_sign: | N/A | -| `fim_ratio` | *OptionalNullable[float]* | :heavy_minus_sign: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `training_steps` | *OptionalNullable[int]* | :heavy_minus_sign: | The number of training steps to perform. A training step refers to a single update of the model weights during the fine-tuning process. This update is typically calculated using a batch of samples from the training dataset. | +| `learning_rate` | *Optional[float]* | :heavy_minus_sign: | A parameter describing how much to adjust the pre-trained model's weights in response to the estimated error each time the weights are updated during the fine-tuning process. | +| `weight_decay` | *OptionalNullable[float]* | :heavy_minus_sign: | (Advanced Usage) Weight decay adds a term to the loss function that is proportional to the sum of the squared weights. This term reduces the magnitude of the weights and prevents them from growing too large. | +| `warmup_fraction` | *OptionalNullable[float]* | :heavy_minus_sign: | (Advanced Usage) A parameter that specifies the percentage of the total training steps at which the learning rate warm-up phase ends. During this phase, the learning rate gradually increases from a small value to the initial learning rate, helping to stabilize the training process and improve convergence. Similar to `pct_start` in [mistral-finetune](https://github.com/mistralai/mistral-finetune) | +| `epochs` | *OptionalNullable[float]* | :heavy_minus_sign: | N/A | +| `fim_ratio` | *OptionalNullable[float]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/type.md b/docs/models/type.md new file mode 100644 index 00000000..342c8c7b --- /dev/null +++ b/docs/models/type.md @@ -0,0 +1,8 @@ +# Type + + +## Values + +| Name | Value | +| ------- | ------- | +| `WANDB` | wandb | \ No newline at end of file diff --git a/docs/models/unarchiveftmodelout.md b/docs/models/unarchiveftmodelout.md index aa26792c..287c9a00 100644 --- a/docs/models/unarchiveftmodelout.md +++ b/docs/models/unarchiveftmodelout.md @@ -3,8 +3,8 @@ ## Fields -| Field | Type | Required | Description | -| ------------------ | ------------------ | ------------------ | ------------------ | -| `id` | *str* | :heavy_check_mark: | N/A | -| `object` | *Optional[str]* | :heavy_minus_sign: | N/A | -| `archived` | *Optional[bool]* | :heavy_minus_sign: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| ------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------ | +| `id` | *str* | :heavy_check_mark: | N/A | +| `object` | [Optional[models.UnarchiveFTModelOutObject]](../models/unarchiveftmodeloutobject.md) | :heavy_minus_sign: | N/A | +| `archived` | *Optional[bool]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/unarchiveftmodeloutobject.md b/docs/models/unarchiveftmodeloutobject.md new file mode 100644 index 00000000..623dcec2 --- /dev/null +++ b/docs/models/unarchiveftmodeloutobject.md @@ -0,0 +1,8 @@ +# UnarchiveFTModelOutObject + + +## Values + +| Name | Value | +| ------- | ------- | +| `MODEL` | model | \ No newline at end of file diff --git a/docs/models/uploadfileout.md b/docs/models/uploadfileout.md index 7eef9bc4..7a695ba5 100644 --- a/docs/models/uploadfileout.md +++ b/docs/models/uploadfileout.md @@ -12,5 +12,5 @@ | `filename` | *str* | :heavy_check_mark: | The name of the uploaded file. | files_upload.jsonl | | `sample_type` | [models.SampleType](../models/sampletype.md) | :heavy_check_mark: | N/A | | | `source` | [models.Source](../models/source.md) | :heavy_check_mark: | N/A | | -| `purpose` | *str* | :heavy_check_mark: | The intended purpose of the uploaded file. Only accepts fine-tuning (`fine-tune`) for now. | fine-tune | +| `purpose` | [models.Purpose](../models/purpose.md) | :heavy_check_mark: | The intended purpose of the uploaded file. Only accepts fine-tuning (`fine-tune`) for now. | fine-tune | | `num_lines` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | | \ No newline at end of file diff --git a/docs/models/wandbintegration.md b/docs/models/wandbintegration.md index d48e501b..c003a46e 100644 --- a/docs/models/wandbintegration.md +++ b/docs/models/wandbintegration.md @@ -7,6 +7,6 @@ | ------------------------------------------------------------------------------- | ------------------------------------------------------------------------------- | ------------------------------------------------------------------------------- | ------------------------------------------------------------------------------- | | `project` | *str* | :heavy_check_mark: | The name of the project that the new run will be created under. | | `api_key` | *str* | :heavy_check_mark: | The WandB API key to use for authentication. | -| `type` | *Optional[str]* | :heavy_minus_sign: | N/A | +| `type` | [Optional[models.WandbIntegrationType]](../models/wandbintegrationtype.md) | :heavy_minus_sign: | N/A | | `name` | *OptionalNullable[str]* | :heavy_minus_sign: | A display name to set for the run. If not set, will use the job ID as the name. | | `run_name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/wandbintegrationout.md b/docs/models/wandbintegrationout.md index a51067bb..e7616fc6 100644 --- a/docs/models/wandbintegrationout.md +++ b/docs/models/wandbintegrationout.md @@ -6,6 +6,6 @@ | Field | Type | Required | Description | | ------------------------------------------------------------------------------- | ------------------------------------------------------------------------------- | ------------------------------------------------------------------------------- | ------------------------------------------------------------------------------- | | `project` | *str* | :heavy_check_mark: | The name of the project that the new run will be created under. | -| `type` | *Optional[str]* | :heavy_minus_sign: | N/A | +| `type` | [Optional[models.Type]](../models/type.md) | :heavy_minus_sign: | N/A | | `name` | *OptionalNullable[str]* | :heavy_minus_sign: | A display name to set for the run. If not set, will use the job ID as the name. | | `run_name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/wandbintegrationtype.md b/docs/models/wandbintegrationtype.md new file mode 100644 index 00000000..4fdffe22 --- /dev/null +++ b/docs/models/wandbintegrationtype.md @@ -0,0 +1,8 @@ +# WandbIntegrationType + + +## Values + +| Name | Value | +| ------- | ------- | +| `WANDB` | wandb | \ No newline at end of file diff --git a/docs/sdks/jobs/README.md b/docs/sdks/jobs/README.md index b0926f68..36d452e3 100644 --- a/docs/sdks/jobs/README.md +++ b/docs/sdks/jobs/README.md @@ -93,8 +93,8 @@ if res is not None: | `training_files` | List[[models.TrainingFile](../../models/trainingfile.md)] | :heavy_minus_sign: | N/A | | `validation_files` | List[*str*] | :heavy_minus_sign: | A list containing the IDs of uploaded files that contain validation data. If you provide these files, the data is used to generate validation metrics periodically during fine-tuning. These metrics can be viewed in `checkpoints` when getting the status of a running fine-tuning job. The same data should not be present in both train and validation files. | | `suffix` | *OptionalNullable[str]* | :heavy_minus_sign: | A string that will be added to your fine-tuning model name. For example, a suffix of "my-great-model" would produce a model name like `ft:open-mistral-7b:my-great-model:xxx...` | -| `integrations` | List[[models.WandbIntegration](../../models/wandbintegration.md)] | :heavy_minus_sign: | A list of integrations to enable for your fine-tuning job. | -| `repositories` | List[[models.GithubRepositoryIn](../../models/githubrepositoryin.md)] | :heavy_minus_sign: | N/A | +| `integrations` | List[[models.JobInIntegrations](../../models/jobinintegrations.md)] | :heavy_minus_sign: | A list of integrations to enable for your fine-tuning job. | +| `repositories` | List[[models.JobInRepositories](../../models/jobinrepositories.md)] | :heavy_minus_sign: | N/A | | `auto_start` | *Optional[bool]* | :heavy_minus_sign: | This field will be required in a future release. | | `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | diff --git a/examples/dry_run_job.py b/examples/dry_run_job.py index 0701b191..3c2a6369 100644 --- a/examples/dry_run_job.py +++ b/examples/dry_run_job.py @@ -25,16 +25,18 @@ async def main(): hyperparameters=TrainingParametersIn( training_steps=1, learning_rate=0.0001, + warmup_fraction=0.01, ), - dry_run=True, + auto_start=False, ) print("Dry run job created") - print(f"Train tokens: {dry_run_job.train_tokens}") - print(f"Dataset tokens: {dry_run_job.data_tokens}") - print(f"Epochs number: {dry_run_job.epochs}") - print(f"Expected duration: {dry_run_job.expected_duration_seconds}") - print(f"Cost: {dry_run_job.cost} {dry_run_job.cost_currency}") + print(f"Job ID: {dry_run_job}") + print(f"Train tokens: {dry_run_job.trained_tokens}") + print(f"Dataset tokens: {dry_run_job.metadata.data_tokens}") + print(f"Epochs number: {dry_run_job.hyperparameters.epochs}") + print(f"Expected duration: {dry_run_job.metadata.expected_duration_seconds}") + print(f"Cost: {dry_run_job.metadata.cost} {dry_run_job.metadata.cost_currency}") if __name__ == "__main__": diff --git a/pyproject.toml b/pyproject.toml index 810ec3d4..e6db0795 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "mistralai" -version = "1.0.1" +version = "1.0.2" description = "Python Client SDK for the Mistral AI API." authors = ["Mistral"] readme = "README.md" diff --git a/src/mistralai/jobs.py b/src/mistralai/jobs.py index 255310f6..2ea3e4a0 100644 --- a/src/mistralai/jobs.py +++ b/src/mistralai/jobs.py @@ -214,8 +214,8 @@ def create( training_files: Optional[Union[List[models.TrainingFile], List[models.TrainingFileTypedDict]]] = None, validation_files: OptionalNullable[List[str]] = UNSET, suffix: OptionalNullable[str] = UNSET, - integrations: OptionalNullable[Union[List[models.WandbIntegration], List[models.WandbIntegrationTypedDict]]] = UNSET, - repositories: Optional[Union[List[models.GithubRepositoryIn], List[models.GithubRepositoryInTypedDict]]] = None, + integrations: OptionalNullable[Union[List[models.JobInIntegrations], List[models.JobInIntegrationsTypedDict]]] = UNSET, + repositories: Optional[Union[List[models.JobInRepositories], List[models.JobInRepositoriesTypedDict]]] = None, auto_start: Optional[bool] = None, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, @@ -251,8 +251,8 @@ def create( validation_files=validation_files, hyperparameters=utils.get_pydantic_model(hyperparameters, models.TrainingParametersIn), suffix=suffix, - integrations=utils.get_pydantic_model(integrations, OptionalNullable[List[models.WandbIntegration]]), - repositories=utils.get_pydantic_model(repositories, Optional[List[models.GithubRepositoryIn]]), + integrations=utils.get_pydantic_model(integrations, OptionalNullable[List[models.JobInIntegrations]]), + repositories=utils.get_pydantic_model(repositories, Optional[List[models.JobInRepositories]]), auto_start=auto_start, ) @@ -310,8 +310,8 @@ async def create_async( training_files: Optional[Union[List[models.TrainingFile], List[models.TrainingFileTypedDict]]] = None, validation_files: OptionalNullable[List[str]] = UNSET, suffix: OptionalNullable[str] = UNSET, - integrations: OptionalNullable[Union[List[models.WandbIntegration], List[models.WandbIntegrationTypedDict]]] = UNSET, - repositories: Optional[Union[List[models.GithubRepositoryIn], List[models.GithubRepositoryInTypedDict]]] = None, + integrations: OptionalNullable[Union[List[models.JobInIntegrations], List[models.JobInIntegrationsTypedDict]]] = UNSET, + repositories: Optional[Union[List[models.JobInRepositories], List[models.JobInRepositoriesTypedDict]]] = None, auto_start: Optional[bool] = None, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, @@ -347,8 +347,8 @@ async def create_async( validation_files=validation_files, hyperparameters=utils.get_pydantic_model(hyperparameters, models.TrainingParametersIn), suffix=suffix, - integrations=utils.get_pydantic_model(integrations, OptionalNullable[List[models.WandbIntegration]]), - repositories=utils.get_pydantic_model(repositories, Optional[List[models.GithubRepositoryIn]]), + integrations=utils.get_pydantic_model(integrations, OptionalNullable[List[models.JobInIntegrations]]), + repositories=utils.get_pydantic_model(repositories, Optional[List[models.JobInRepositories]]), auto_start=auto_start, ) diff --git a/src/mistralai/models/__init__.py b/src/mistralai/models/__init__.py index cb21b8f3..647bbdf5 100644 --- a/src/mistralai/models/__init__.py +++ b/src/mistralai/models/__init__.py @@ -2,7 +2,7 @@ from .agentscompletionrequest import AgentsCompletionRequest, AgentsCompletionRequestMessages, AgentsCompletionRequestMessagesTypedDict, AgentsCompletionRequestStop, AgentsCompletionRequestStopTypedDict, AgentsCompletionRequestToolChoice, AgentsCompletionRequestTypedDict from .agentscompletionstreamrequest import AgentsCompletionStreamRequest, AgentsCompletionStreamRequestMessages, AgentsCompletionStreamRequestMessagesTypedDict, AgentsCompletionStreamRequestStop, AgentsCompletionStreamRequestStopTypedDict, AgentsCompletionStreamRequestToolChoice, AgentsCompletionStreamRequestTypedDict -from .archiveftmodelout import ArchiveFTModelOut, ArchiveFTModelOutTypedDict +from .archiveftmodelout import ArchiveFTModelOut, ArchiveFTModelOutObject, ArchiveFTModelOutTypedDict from .assistantmessage import AssistantMessage, AssistantMessageRole, AssistantMessageTypedDict from .chatcompletionchoice import ChatCompletionChoice, ChatCompletionChoiceTypedDict, FinishReason from .chatcompletionrequest import ChatCompletionRequest, ChatCompletionRequestTypedDict, Messages, MessagesTypedDict, Stop, StopTypedDict, ToolChoice @@ -17,29 +17,29 @@ from .deletefileout import DeleteFileOut, DeleteFileOutTypedDict from .deletemodelout import DeleteModelOut, DeleteModelOutTypedDict from .deltamessage import DeltaMessage, DeltaMessageTypedDict -from .detailedjobout import DetailedJobOut, DetailedJobOutStatus, DetailedJobOutTypedDict +from .detailedjobout import DetailedJobOut, DetailedJobOutIntegrations, DetailedJobOutIntegrationsTypedDict, DetailedJobOutObject, DetailedJobOutRepositories, DetailedJobOutRepositoriesTypedDict, DetailedJobOutStatus, DetailedJobOutTypedDict from .embeddingrequest import EmbeddingRequest, EmbeddingRequestTypedDict, Inputs, InputsTypedDict from .embeddingresponse import EmbeddingResponse, EmbeddingResponseTypedDict from .embeddingresponsedata import EmbeddingResponseData, EmbeddingResponseDataTypedDict from .eventout import EventOut, EventOutTypedDict from .files_api_routes_delete_fileop import FilesAPIRoutesDeleteFileRequest, FilesAPIRoutesDeleteFileRequestTypedDict from .files_api_routes_retrieve_fileop import FilesAPIRoutesRetrieveFileRequest, FilesAPIRoutesRetrieveFileRequestTypedDict -from .files_api_routes_upload_fileop import File, FileTypedDict, FilesAPIRoutesUploadFileMultiPartBodyParams, FilesAPIRoutesUploadFileMultiPartBodyParamsTypedDict -from .fileschema import FileSchema, FileSchemaTypedDict +from .files_api_routes_upload_fileop import File, FileTypedDict, FilesAPIRoutesUploadFileMultiPartBodyParams, FilesAPIRoutesUploadFileMultiPartBodyParamsTypedDict, FilesAPIRoutesUploadFilePurpose +from .fileschema import FileSchema, FileSchemaPurpose, FileSchemaTypedDict from .fimcompletionrequest import FIMCompletionRequest, FIMCompletionRequestStop, FIMCompletionRequestStopTypedDict, FIMCompletionRequestTypedDict from .fimcompletionresponse import FIMCompletionResponse, FIMCompletionResponseTypedDict from .fimcompletionstreamrequest import FIMCompletionStreamRequest, FIMCompletionStreamRequestStop, FIMCompletionStreamRequestStopTypedDict, FIMCompletionStreamRequestTypedDict from .finetuneablemodel import FineTuneableModel from .ftmodelcapabilitiesout import FTModelCapabilitiesOut, FTModelCapabilitiesOutTypedDict -from .ftmodelout import FTModelOut, FTModelOutTypedDict +from .ftmodelout import FTModelOut, FTModelOutObject, FTModelOutTypedDict from .function import Function, FunctionTypedDict from .functioncall import Arguments, ArgumentsTypedDict, FunctionCall, FunctionCallTypedDict -from .githubrepositoryin import GithubRepositoryIn, GithubRepositoryInTypedDict -from .githubrepositoryout import GithubRepositoryOut, GithubRepositoryOutTypedDict +from .githubrepositoryin import GithubRepositoryIn, GithubRepositoryInType, GithubRepositoryInTypedDict +from .githubrepositoryout import GithubRepositoryOut, GithubRepositoryOutType, GithubRepositoryOutTypedDict from .httpvalidationerror import HTTPValidationError, HTTPValidationErrorData -from .jobin import JobIn, JobInTypedDict +from .jobin import JobIn, JobInIntegrations, JobInIntegrationsTypedDict, JobInRepositories, JobInRepositoriesTypedDict, JobInTypedDict from .jobmetadataout import JobMetadataOut, JobMetadataOutTypedDict -from .jobout import JobOut, JobOutTypedDict, Status +from .jobout import Integrations, IntegrationsTypedDict, JobOut, JobOutTypedDict, Object, Repositories, RepositoriesTypedDict, Status from .jobs_api_routes_fine_tuning_archive_fine_tuned_modelop import JobsAPIRoutesFineTuningArchiveFineTunedModelRequest, JobsAPIRoutesFineTuningArchiveFineTunedModelRequestTypedDict from .jobs_api_routes_fine_tuning_cancel_fine_tuning_jobop import JobsAPIRoutesFineTuningCancelFineTuningJobRequest, JobsAPIRoutesFineTuningCancelFineTuningJobRequestTypedDict from .jobs_api_routes_fine_tuning_create_fine_tuning_jobop import JobsAPIRoutesFineTuningCreateFineTuningJobResponse, JobsAPIRoutesFineTuningCreateFineTuningJobResponseTypedDict @@ -48,8 +48,8 @@ from .jobs_api_routes_fine_tuning_start_fine_tuning_jobop import JobsAPIRoutesFineTuningStartFineTuningJobRequest, JobsAPIRoutesFineTuningStartFineTuningJobRequestTypedDict from .jobs_api_routes_fine_tuning_unarchive_fine_tuned_modelop import JobsAPIRoutesFineTuningUnarchiveFineTunedModelRequest, JobsAPIRoutesFineTuningUnarchiveFineTunedModelRequestTypedDict from .jobs_api_routes_fine_tuning_update_fine_tuned_modelop import JobsAPIRoutesFineTuningUpdateFineTunedModelRequest, JobsAPIRoutesFineTuningUpdateFineTunedModelRequestTypedDict -from .jobsout import JobsOut, JobsOutTypedDict -from .legacyjobmetadataout import LegacyJobMetadataOut, LegacyJobMetadataOutTypedDict +from .jobsout import JobsOut, JobsOutObject, JobsOutTypedDict +from .legacyjobmetadataout import LegacyJobMetadataOut, LegacyJobMetadataOutObject, LegacyJobMetadataOutTypedDict from .listfilesout import ListFilesOut, ListFilesOutTypedDict from .metricout import MetricOut, MetricOutTypedDict from .modelcapabilities import ModelCapabilities, ModelCapabilitiesTypedDict @@ -57,7 +57,7 @@ from .modellist import ModelList, ModelListTypedDict from .responseformat import ResponseFormat, ResponseFormatTypedDict, ResponseFormats from .retrieve_model_v1_models_model_id_getop import RetrieveModelV1ModelsModelIDGetRequest, RetrieveModelV1ModelsModelIDGetRequestTypedDict -from .retrievefileout import RetrieveFileOut, RetrieveFileOutTypedDict +from .retrievefileout import RetrieveFileOut, RetrieveFileOutPurpose, RetrieveFileOutTypedDict from .sampletype import SampleType from .sdkerror import SDKError from .security import Security, SecurityTypedDict @@ -70,13 +70,13 @@ from .trainingfile import TrainingFile, TrainingFileTypedDict from .trainingparameters import TrainingParameters, TrainingParametersTypedDict from .trainingparametersin import TrainingParametersIn, TrainingParametersInTypedDict -from .unarchiveftmodelout import UnarchiveFTModelOut, UnarchiveFTModelOutTypedDict +from .unarchiveftmodelout import UnarchiveFTModelOut, UnarchiveFTModelOutObject, UnarchiveFTModelOutTypedDict from .updateftmodelin import UpdateFTModelIn, UpdateFTModelInTypedDict -from .uploadfileout import UploadFileOut, UploadFileOutTypedDict +from .uploadfileout import Purpose, UploadFileOut, UploadFileOutTypedDict from .usageinfo import UsageInfo, UsageInfoTypedDict from .usermessage import UserMessage, UserMessageContent, UserMessageContentTypedDict, UserMessageRole, UserMessageTypedDict from .validationerror import Loc, LocTypedDict, ValidationError, ValidationErrorTypedDict -from .wandbintegration import WandbIntegration, WandbIntegrationTypedDict -from .wandbintegrationout import WandbIntegrationOut, WandbIntegrationOutTypedDict +from .wandbintegration import WandbIntegration, WandbIntegrationType, WandbIntegrationTypedDict +from .wandbintegrationout import Type, WandbIntegrationOut, WandbIntegrationOutTypedDict -__all__ = ["AgentsCompletionRequest", "AgentsCompletionRequestMessages", "AgentsCompletionRequestMessagesTypedDict", "AgentsCompletionRequestStop", "AgentsCompletionRequestStopTypedDict", "AgentsCompletionRequestToolChoice", "AgentsCompletionRequestTypedDict", "AgentsCompletionStreamRequest", "AgentsCompletionStreamRequestMessages", "AgentsCompletionStreamRequestMessagesTypedDict", "AgentsCompletionStreamRequestStop", "AgentsCompletionStreamRequestStopTypedDict", "AgentsCompletionStreamRequestToolChoice", "AgentsCompletionStreamRequestTypedDict", "ArchiveFTModelOut", "ArchiveFTModelOutTypedDict", "Arguments", "ArgumentsTypedDict", "AssistantMessage", "AssistantMessageRole", "AssistantMessageTypedDict", "ChatCompletionChoice", "ChatCompletionChoiceTypedDict", "ChatCompletionRequest", "ChatCompletionRequestTypedDict", "ChatCompletionResponse", "ChatCompletionResponseTypedDict", "ChatCompletionStreamRequest", "ChatCompletionStreamRequestMessages", "ChatCompletionStreamRequestMessagesTypedDict", "ChatCompletionStreamRequestStop", "ChatCompletionStreamRequestStopTypedDict", "ChatCompletionStreamRequestToolChoice", "ChatCompletionStreamRequestTypedDict", "CheckpointOut", "CheckpointOutTypedDict", "CompletionChunk", "CompletionChunkTypedDict", "CompletionEvent", "CompletionEventTypedDict", "CompletionResponseStreamChoice", "CompletionResponseStreamChoiceFinishReason", "CompletionResponseStreamChoiceTypedDict", "Content", "ContentChunk", "ContentChunkTypedDict", "ContentTypedDict", "DeleteFileOut", "DeleteFileOutTypedDict", "DeleteModelOut", "DeleteModelOutTypedDict", "DeleteModelV1ModelsModelIDDeleteRequest", "DeleteModelV1ModelsModelIDDeleteRequestTypedDict", "DeltaMessage", "DeltaMessageTypedDict", "DetailedJobOut", "DetailedJobOutStatus", "DetailedJobOutTypedDict", "EmbeddingRequest", "EmbeddingRequestTypedDict", "EmbeddingResponse", "EmbeddingResponseData", "EmbeddingResponseDataTypedDict", "EmbeddingResponseTypedDict", "EventOut", "EventOutTypedDict", "FIMCompletionRequest", "FIMCompletionRequestStop", "FIMCompletionRequestStopTypedDict", "FIMCompletionRequestTypedDict", "FIMCompletionResponse", "FIMCompletionResponseTypedDict", "FIMCompletionStreamRequest", "FIMCompletionStreamRequestStop", "FIMCompletionStreamRequestStopTypedDict", "FIMCompletionStreamRequestTypedDict", "FTModelCapabilitiesOut", "FTModelCapabilitiesOutTypedDict", "FTModelOut", "FTModelOutTypedDict", "File", "FileSchema", "FileSchemaTypedDict", "FileTypedDict", "FilesAPIRoutesDeleteFileRequest", "FilesAPIRoutesDeleteFileRequestTypedDict", "FilesAPIRoutesRetrieveFileRequest", "FilesAPIRoutesRetrieveFileRequestTypedDict", "FilesAPIRoutesUploadFileMultiPartBodyParams", "FilesAPIRoutesUploadFileMultiPartBodyParamsTypedDict", "FineTuneableModel", "FinishReason", "Function", "FunctionCall", "FunctionCallTypedDict", "FunctionTypedDict", "GithubRepositoryIn", "GithubRepositoryInTypedDict", "GithubRepositoryOut", "GithubRepositoryOutTypedDict", "HTTPValidationError", "HTTPValidationErrorData", "Inputs", "InputsTypedDict", "JobIn", "JobInTypedDict", "JobMetadataOut", "JobMetadataOutTypedDict", "JobOut", "JobOutTypedDict", "JobsAPIRoutesFineTuningArchiveFineTunedModelRequest", "JobsAPIRoutesFineTuningArchiveFineTunedModelRequestTypedDict", "JobsAPIRoutesFineTuningCancelFineTuningJobRequest", "JobsAPIRoutesFineTuningCancelFineTuningJobRequestTypedDict", "JobsAPIRoutesFineTuningCreateFineTuningJobResponse", "JobsAPIRoutesFineTuningCreateFineTuningJobResponseTypedDict", "JobsAPIRoutesFineTuningGetFineTuningJobRequest", "JobsAPIRoutesFineTuningGetFineTuningJobRequestTypedDict", "JobsAPIRoutesFineTuningGetFineTuningJobsRequest", "JobsAPIRoutesFineTuningGetFineTuningJobsRequestTypedDict", "JobsAPIRoutesFineTuningStartFineTuningJobRequest", "JobsAPIRoutesFineTuningStartFineTuningJobRequestTypedDict", "JobsAPIRoutesFineTuningUnarchiveFineTunedModelRequest", "JobsAPIRoutesFineTuningUnarchiveFineTunedModelRequestTypedDict", "JobsAPIRoutesFineTuningUpdateFineTunedModelRequest", "JobsAPIRoutesFineTuningUpdateFineTunedModelRequestTypedDict", "JobsOut", "JobsOutTypedDict", "LegacyJobMetadataOut", "LegacyJobMetadataOutTypedDict", "ListFilesOut", "ListFilesOutTypedDict", "Loc", "LocTypedDict", "Messages", "MessagesTypedDict", "MetricOut", "MetricOutTypedDict", "ModelCapabilities", "ModelCapabilitiesTypedDict", "ModelCard", "ModelCardTypedDict", "ModelList", "ModelListTypedDict", "QueryParamStatus", "ResponseFormat", "ResponseFormatTypedDict", "ResponseFormats", "RetrieveFileOut", "RetrieveFileOutTypedDict", "RetrieveModelV1ModelsModelIDGetRequest", "RetrieveModelV1ModelsModelIDGetRequestTypedDict", "Role", "SDKError", "SampleType", "Security", "SecurityTypedDict", "Source", "Status", "Stop", "StopTypedDict", "SystemMessage", "SystemMessageTypedDict", "TextChunk", "TextChunkTypedDict", "Tool", "ToolCall", "ToolCallTypedDict", "ToolChoice", "ToolMessage", "ToolMessageRole", "ToolMessageTypedDict", "ToolToolTypes", "ToolTypedDict", "ToolTypes", "TrainingFile", "TrainingFileTypedDict", "TrainingParameters", "TrainingParametersIn", "TrainingParametersInTypedDict", "TrainingParametersTypedDict", "UnarchiveFTModelOut", "UnarchiveFTModelOutTypedDict", "UpdateFTModelIn", "UpdateFTModelInTypedDict", "UploadFileOut", "UploadFileOutTypedDict", "UsageInfo", "UsageInfoTypedDict", "UserMessage", "UserMessageContent", "UserMessageContentTypedDict", "UserMessageRole", "UserMessageTypedDict", "ValidationError", "ValidationErrorTypedDict", "WandbIntegration", "WandbIntegrationOut", "WandbIntegrationOutTypedDict", "WandbIntegrationTypedDict"] +__all__ = ["AgentsCompletionRequest", "AgentsCompletionRequestMessages", "AgentsCompletionRequestMessagesTypedDict", "AgentsCompletionRequestStop", "AgentsCompletionRequestStopTypedDict", "AgentsCompletionRequestToolChoice", "AgentsCompletionRequestTypedDict", "AgentsCompletionStreamRequest", "AgentsCompletionStreamRequestMessages", "AgentsCompletionStreamRequestMessagesTypedDict", "AgentsCompletionStreamRequestStop", "AgentsCompletionStreamRequestStopTypedDict", "AgentsCompletionStreamRequestToolChoice", "AgentsCompletionStreamRequestTypedDict", "ArchiveFTModelOut", "ArchiveFTModelOutObject", "ArchiveFTModelOutTypedDict", "Arguments", "ArgumentsTypedDict", "AssistantMessage", "AssistantMessageRole", "AssistantMessageTypedDict", "ChatCompletionChoice", "ChatCompletionChoiceTypedDict", "ChatCompletionRequest", "ChatCompletionRequestTypedDict", "ChatCompletionResponse", "ChatCompletionResponseTypedDict", "ChatCompletionStreamRequest", "ChatCompletionStreamRequestMessages", "ChatCompletionStreamRequestMessagesTypedDict", "ChatCompletionStreamRequestStop", "ChatCompletionStreamRequestStopTypedDict", "ChatCompletionStreamRequestToolChoice", "ChatCompletionStreamRequestTypedDict", "CheckpointOut", "CheckpointOutTypedDict", "CompletionChunk", "CompletionChunkTypedDict", "CompletionEvent", "CompletionEventTypedDict", "CompletionResponseStreamChoice", "CompletionResponseStreamChoiceFinishReason", "CompletionResponseStreamChoiceTypedDict", "Content", "ContentChunk", "ContentChunkTypedDict", "ContentTypedDict", "DeleteFileOut", "DeleteFileOutTypedDict", "DeleteModelOut", "DeleteModelOutTypedDict", "DeleteModelV1ModelsModelIDDeleteRequest", "DeleteModelV1ModelsModelIDDeleteRequestTypedDict", "DeltaMessage", "DeltaMessageTypedDict", "DetailedJobOut", "DetailedJobOutIntegrations", "DetailedJobOutIntegrationsTypedDict", "DetailedJobOutObject", "DetailedJobOutRepositories", "DetailedJobOutRepositoriesTypedDict", "DetailedJobOutStatus", "DetailedJobOutTypedDict", "EmbeddingRequest", "EmbeddingRequestTypedDict", "EmbeddingResponse", "EmbeddingResponseData", "EmbeddingResponseDataTypedDict", "EmbeddingResponseTypedDict", "EventOut", "EventOutTypedDict", "FIMCompletionRequest", "FIMCompletionRequestStop", "FIMCompletionRequestStopTypedDict", "FIMCompletionRequestTypedDict", "FIMCompletionResponse", "FIMCompletionResponseTypedDict", "FIMCompletionStreamRequest", "FIMCompletionStreamRequestStop", "FIMCompletionStreamRequestStopTypedDict", "FIMCompletionStreamRequestTypedDict", "FTModelCapabilitiesOut", "FTModelCapabilitiesOutTypedDict", "FTModelOut", "FTModelOutObject", "FTModelOutTypedDict", "File", "FileSchema", "FileSchemaPurpose", "FileSchemaTypedDict", "FileTypedDict", "FilesAPIRoutesDeleteFileRequest", "FilesAPIRoutesDeleteFileRequestTypedDict", "FilesAPIRoutesRetrieveFileRequest", "FilesAPIRoutesRetrieveFileRequestTypedDict", "FilesAPIRoutesUploadFileMultiPartBodyParams", "FilesAPIRoutesUploadFileMultiPartBodyParamsTypedDict", "FilesAPIRoutesUploadFilePurpose", "FineTuneableModel", "FinishReason", "Function", "FunctionCall", "FunctionCallTypedDict", "FunctionTypedDict", "GithubRepositoryIn", "GithubRepositoryInType", "GithubRepositoryInTypedDict", "GithubRepositoryOut", "GithubRepositoryOutType", "GithubRepositoryOutTypedDict", "HTTPValidationError", "HTTPValidationErrorData", "Inputs", "InputsTypedDict", "Integrations", "IntegrationsTypedDict", "JobIn", "JobInIntegrations", "JobInIntegrationsTypedDict", "JobInRepositories", "JobInRepositoriesTypedDict", "JobInTypedDict", "JobMetadataOut", "JobMetadataOutTypedDict", "JobOut", "JobOutTypedDict", "JobsAPIRoutesFineTuningArchiveFineTunedModelRequest", "JobsAPIRoutesFineTuningArchiveFineTunedModelRequestTypedDict", "JobsAPIRoutesFineTuningCancelFineTuningJobRequest", "JobsAPIRoutesFineTuningCancelFineTuningJobRequestTypedDict", "JobsAPIRoutesFineTuningCreateFineTuningJobResponse", "JobsAPIRoutesFineTuningCreateFineTuningJobResponseTypedDict", "JobsAPIRoutesFineTuningGetFineTuningJobRequest", "JobsAPIRoutesFineTuningGetFineTuningJobRequestTypedDict", "JobsAPIRoutesFineTuningGetFineTuningJobsRequest", "JobsAPIRoutesFineTuningGetFineTuningJobsRequestTypedDict", "JobsAPIRoutesFineTuningStartFineTuningJobRequest", "JobsAPIRoutesFineTuningStartFineTuningJobRequestTypedDict", "JobsAPIRoutesFineTuningUnarchiveFineTunedModelRequest", "JobsAPIRoutesFineTuningUnarchiveFineTunedModelRequestTypedDict", "JobsAPIRoutesFineTuningUpdateFineTunedModelRequest", "JobsAPIRoutesFineTuningUpdateFineTunedModelRequestTypedDict", "JobsOut", "JobsOutObject", "JobsOutTypedDict", "LegacyJobMetadataOut", "LegacyJobMetadataOutObject", "LegacyJobMetadataOutTypedDict", "ListFilesOut", "ListFilesOutTypedDict", "Loc", "LocTypedDict", "Messages", "MessagesTypedDict", "MetricOut", "MetricOutTypedDict", "ModelCapabilities", "ModelCapabilitiesTypedDict", "ModelCard", "ModelCardTypedDict", "ModelList", "ModelListTypedDict", "Object", "Purpose", "QueryParamStatus", "Repositories", "RepositoriesTypedDict", "ResponseFormat", "ResponseFormatTypedDict", "ResponseFormats", "RetrieveFileOut", "RetrieveFileOutPurpose", "RetrieveFileOutTypedDict", "RetrieveModelV1ModelsModelIDGetRequest", "RetrieveModelV1ModelsModelIDGetRequestTypedDict", "Role", "SDKError", "SampleType", "Security", "SecurityTypedDict", "Source", "Status", "Stop", "StopTypedDict", "SystemMessage", "SystemMessageTypedDict", "TextChunk", "TextChunkTypedDict", "Tool", "ToolCall", "ToolCallTypedDict", "ToolChoice", "ToolMessage", "ToolMessageRole", "ToolMessageTypedDict", "ToolToolTypes", "ToolTypedDict", "ToolTypes", "TrainingFile", "TrainingFileTypedDict", "TrainingParameters", "TrainingParametersIn", "TrainingParametersInTypedDict", "TrainingParametersTypedDict", "Type", "UnarchiveFTModelOut", "UnarchiveFTModelOutObject", "UnarchiveFTModelOutTypedDict", "UpdateFTModelIn", "UpdateFTModelInTypedDict", "UploadFileOut", "UploadFileOutTypedDict", "UsageInfo", "UsageInfoTypedDict", "UserMessage", "UserMessageContent", "UserMessageContentTypedDict", "UserMessageRole", "UserMessageTypedDict", "ValidationError", "ValidationErrorTypedDict", "WandbIntegration", "WandbIntegrationOut", "WandbIntegrationOutTypedDict", "WandbIntegrationType", "WandbIntegrationTypedDict"] diff --git a/src/mistralai/models/archiveftmodelout.py b/src/mistralai/models/archiveftmodelout.py index ba76737b..be2e9040 100644 --- a/src/mistralai/models/archiveftmodelout.py +++ b/src/mistralai/models/archiveftmodelout.py @@ -3,10 +3,12 @@ from __future__ import annotations from mistralai.types import BaseModel import pydantic -from typing import Final, Optional, TypedDict +from typing import Final, Literal, Optional, TypedDict from typing_extensions import Annotated, NotRequired +ArchiveFTModelOutObject = Literal["model"] + class ArchiveFTModelOutTypedDict(TypedDict): id: str archived: NotRequired[bool] @@ -14,6 +16,6 @@ class ArchiveFTModelOutTypedDict(TypedDict): class ArchiveFTModelOut(BaseModel): id: str - OBJECT: Annotated[Final[Optional[str]], pydantic.Field(alias="object")] = "model" # type: ignore + OBJECT: Annotated[Final[Optional[ArchiveFTModelOutObject]], pydantic.Field(alias="object")] = "model" # type: ignore archived: Optional[bool] = True diff --git a/src/mistralai/models/detailedjobout.py b/src/mistralai/models/detailedjobout.py index c9f31220..b33b6e3e 100644 --- a/src/mistralai/models/detailedjobout.py +++ b/src/mistralai/models/detailedjobout.py @@ -17,6 +17,20 @@ DetailedJobOutStatus = Literal["QUEUED", "STARTED", "VALIDATING", "VALIDATED", "RUNNING", "FAILED_VALIDATION", "FAILED", "SUCCESS", "CANCELLED", "CANCELLATION_REQUESTED"] +DetailedJobOutObject = Literal["job"] + +DetailedJobOutIntegrationsTypedDict = WandbIntegrationOutTypedDict + + +DetailedJobOutIntegrations = WandbIntegrationOut + + +DetailedJobOutRepositoriesTypedDict = GithubRepositoryOutTypedDict + + +DetailedJobOutRepositories = GithubRepositoryOut + + class DetailedJobOutTypedDict(TypedDict): id: str auto_start: bool @@ -31,9 +45,9 @@ class DetailedJobOutTypedDict(TypedDict): validation_files: NotRequired[Nullable[List[str]]] fine_tuned_model: NotRequired[Nullable[str]] suffix: NotRequired[Nullable[str]] - integrations: NotRequired[Nullable[List[WandbIntegrationOutTypedDict]]] + integrations: NotRequired[Nullable[List[DetailedJobOutIntegrationsTypedDict]]] trained_tokens: NotRequired[Nullable[int]] - repositories: NotRequired[List[GithubRepositoryOutTypedDict]] + repositories: NotRequired[List[DetailedJobOutRepositoriesTypedDict]] metadata: NotRequired[Nullable[JobMetadataOutTypedDict]] events: NotRequired[List[EventOutTypedDict]] r"""Event items are created every time the status of a fine-tuning job changes. The timestamped list of all events is accessible here.""" @@ -52,12 +66,12 @@ class DetailedJobOut(BaseModel): modified_at: int training_files: List[str] validation_files: OptionalNullable[List[str]] = UNSET - OBJECT: Annotated[Final[Optional[str]], pydantic.Field(alias="object")] = "job" # type: ignore + OBJECT: Annotated[Final[Optional[DetailedJobOutObject]], pydantic.Field(alias="object")] = "job" # type: ignore fine_tuned_model: OptionalNullable[str] = UNSET suffix: OptionalNullable[str] = UNSET - integrations: OptionalNullable[List[WandbIntegrationOut]] = UNSET + integrations: OptionalNullable[List[DetailedJobOutIntegrations]] = UNSET trained_tokens: OptionalNullable[int] = UNSET - repositories: Optional[List[GithubRepositoryOut]] = None + repositories: Optional[List[DetailedJobOutRepositories]] = None metadata: OptionalNullable[JobMetadataOut] = UNSET events: Optional[List[EventOut]] = None r"""Event items are created every time the status of a fine-tuning job changes. The timestamped list of all events is accessible here.""" diff --git a/src/mistralai/models/files_api_routes_upload_fileop.py b/src/mistralai/models/files_api_routes_upload_fileop.py index 5d72a89a..74720d6a 100644 --- a/src/mistralai/models/files_api_routes_upload_fileop.py +++ b/src/mistralai/models/files_api_routes_upload_fileop.py @@ -2,13 +2,16 @@ from __future__ import annotations import io -from mistralai.types import BaseModel -from mistralai.utils import FieldMetadata, MultipartFormMetadata +from mistralai.types import BaseModel, UnrecognizedStr +from mistralai.utils import FieldMetadata, MultipartFormMetadata, validate_open_enum import pydantic -from typing import Final, IO, Optional, TypedDict, Union +from pydantic.functional_validators import PlainValidator +from typing import Final, IO, Literal, Optional, TypedDict, Union from typing_extensions import Annotated, NotRequired +FilesAPIRoutesUploadFilePurpose = Union[Literal["fine-tune"], UnrecognizedStr] + class FileTypedDict(TypedDict): file_name: str content: Union[bytes, IO[bytes], io.BufferedReader] @@ -47,5 +50,5 @@ class FilesAPIRoutesUploadFileMultiPartBodyParams(BaseModel): file=@path/to/your/file.jsonl ``` """ - PURPOSE: Annotated[Final[Optional[str]], pydantic.Field(alias="purpose"), FieldMetadata(multipart=True)] = "fine-tune" # type: ignore + PURPOSE: Annotated[Final[Annotated[Optional[FilesAPIRoutesUploadFilePurpose], PlainValidator(validate_open_enum(False))]], pydantic.Field(alias="purpose"), FieldMetadata(multipart=True)] = "fine-tune" # type: ignore diff --git a/src/mistralai/models/fileschema.py b/src/mistralai/models/fileschema.py index c0552424..b852dcb4 100644 --- a/src/mistralai/models/fileschema.py +++ b/src/mistralai/models/fileschema.py @@ -3,13 +3,18 @@ from __future__ import annotations from .sampletype import SampleType from .source import Source -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL, UnrecognizedStr +from mistralai.utils import validate_open_enum import pydantic from pydantic import model_serializer -from typing import Final, TypedDict +from pydantic.functional_validators import PlainValidator +from typing import Final, Literal, TypedDict, Union from typing_extensions import Annotated, NotRequired +FileSchemaPurpose = Union[Literal["fine-tune"], UnrecognizedStr] +r"""The intended purpose of the uploaded file. Only accepts fine-tuning (`fine-tune`) for now.""" + class FileSchemaTypedDict(TypedDict): id: str r"""The unique identifier of the file.""" @@ -39,7 +44,7 @@ class FileSchema(BaseModel): r"""The name of the uploaded file.""" sample_type: SampleType source: Source - PURPOSE: Annotated[Final[str], pydantic.Field(alias="purpose")] = "fine-tune" # type: ignore + PURPOSE: Annotated[Final[Annotated[FileSchemaPurpose, PlainValidator(validate_open_enum(False))]], pydantic.Field(alias="purpose")] = "fine-tune" # type: ignore r"""The intended purpose of the uploaded file. Only accepts fine-tuning (`fine-tune`) for now.""" num_lines: OptionalNullable[int] = UNSET diff --git a/src/mistralai/models/ftmodelout.py b/src/mistralai/models/ftmodelout.py index 44b5348d..6f99bcbe 100644 --- a/src/mistralai/models/ftmodelout.py +++ b/src/mistralai/models/ftmodelout.py @@ -5,10 +5,12 @@ from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL import pydantic from pydantic import model_serializer -from typing import Final, List, Optional, TypedDict +from typing import Final, List, Literal, Optional, TypedDict from typing_extensions import Annotated, NotRequired +FTModelOutObject = Literal["model"] + class FTModelOutTypedDict(TypedDict): id: str created: int @@ -31,7 +33,7 @@ class FTModelOut(BaseModel): archived: bool capabilities: FTModelCapabilitiesOut job: str - OBJECT: Annotated[Final[Optional[str]], pydantic.Field(alias="object")] = "model" # type: ignore + OBJECT: Annotated[Final[Optional[FTModelOutObject]], pydantic.Field(alias="object")] = "model" # type: ignore name: OptionalNullable[str] = UNSET description: OptionalNullable[str] = UNSET max_context_length: Optional[int] = 32768 diff --git a/src/mistralai/models/githubrepositoryin.py b/src/mistralai/models/githubrepositoryin.py index 8c4cdd9a..234afeb1 100644 --- a/src/mistralai/models/githubrepositoryin.py +++ b/src/mistralai/models/githubrepositoryin.py @@ -4,10 +4,12 @@ from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL import pydantic from pydantic import model_serializer -from typing import Final, Optional, TypedDict +from typing import Final, Literal, Optional, TypedDict from typing_extensions import Annotated, NotRequired +GithubRepositoryInType = Literal["github"] + class GithubRepositoryInTypedDict(TypedDict): name: str owner: str @@ -20,7 +22,7 @@ class GithubRepositoryIn(BaseModel): name: str owner: str token: str - TYPE: Annotated[Final[Optional[str]], pydantic.Field(alias="type")] = "github" # type: ignore + TYPE: Annotated[Final[Optional[GithubRepositoryInType]], pydantic.Field(alias="type")] = "github" # type: ignore ref: OptionalNullable[str] = UNSET weight: Optional[float] = 1 diff --git a/src/mistralai/models/githubrepositoryout.py b/src/mistralai/models/githubrepositoryout.py index 6bc539ef..2c0a4276 100644 --- a/src/mistralai/models/githubrepositoryout.py +++ b/src/mistralai/models/githubrepositoryout.py @@ -4,10 +4,12 @@ from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL import pydantic from pydantic import model_serializer -from typing import Final, Optional, TypedDict +from typing import Final, Literal, Optional, TypedDict from typing_extensions import Annotated, NotRequired +GithubRepositoryOutType = Literal["github"] + class GithubRepositoryOutTypedDict(TypedDict): name: str owner: str @@ -20,7 +22,7 @@ class GithubRepositoryOut(BaseModel): name: str owner: str commit_id: str - TYPE: Annotated[Final[Optional[str]], pydantic.Field(alias="type")] = "github" # type: ignore + TYPE: Annotated[Final[Optional[GithubRepositoryOutType]], pydantic.Field(alias="type")] = "github" # type: ignore ref: OptionalNullable[str] = UNSET weight: Optional[float] = 1 diff --git a/src/mistralai/models/jobin.py b/src/mistralai/models/jobin.py index 95cd8bf8..dd8e4ee8 100644 --- a/src/mistralai/models/jobin.py +++ b/src/mistralai/models/jobin.py @@ -12,6 +12,18 @@ from typing_extensions import NotRequired +JobInIntegrationsTypedDict = WandbIntegrationTypedDict + + +JobInIntegrations = WandbIntegration + + +JobInRepositoriesTypedDict = GithubRepositoryInTypedDict + + +JobInRepositories = GithubRepositoryIn + + class JobInTypedDict(TypedDict): model: FineTuneableModel r"""The name of the model to fine-tune.""" @@ -22,9 +34,9 @@ class JobInTypedDict(TypedDict): r"""A list containing the IDs of uploaded files that contain validation data. If you provide these files, the data is used to generate validation metrics periodically during fine-tuning. These metrics can be viewed in `checkpoints` when getting the status of a running fine-tuning job. The same data should not be present in both train and validation files.""" suffix: NotRequired[Nullable[str]] r"""A string that will be added to your fine-tuning model name. For example, a suffix of \"my-great-model\" would produce a model name like `ft:open-mistral-7b:my-great-model:xxx...`""" - integrations: NotRequired[Nullable[List[WandbIntegrationTypedDict]]] + integrations: NotRequired[Nullable[List[JobInIntegrationsTypedDict]]] r"""A list of integrations to enable for your fine-tuning job.""" - repositories: NotRequired[List[GithubRepositoryInTypedDict]] + repositories: NotRequired[List[JobInRepositoriesTypedDict]] auto_start: NotRequired[bool] r"""This field will be required in a future release.""" @@ -39,9 +51,9 @@ class JobIn(BaseModel): r"""A list containing the IDs of uploaded files that contain validation data. If you provide these files, the data is used to generate validation metrics periodically during fine-tuning. These metrics can be viewed in `checkpoints` when getting the status of a running fine-tuning job. The same data should not be present in both train and validation files.""" suffix: OptionalNullable[str] = UNSET r"""A string that will be added to your fine-tuning model name. For example, a suffix of \"my-great-model\" would produce a model name like `ft:open-mistral-7b:my-great-model:xxx...`""" - integrations: OptionalNullable[List[WandbIntegration]] = UNSET + integrations: OptionalNullable[List[JobInIntegrations]] = UNSET r"""A list of integrations to enable for your fine-tuning job.""" - repositories: Optional[List[GithubRepositoryIn]] = None + repositories: Optional[List[JobInRepositories]] = None auto_start: Optional[bool] = None r"""This field will be required in a future release.""" diff --git a/src/mistralai/models/jobout.py b/src/mistralai/models/jobout.py index 353b5cf3..f0e0d253 100644 --- a/src/mistralai/models/jobout.py +++ b/src/mistralai/models/jobout.py @@ -16,6 +16,21 @@ Status = Literal["QUEUED", "STARTED", "VALIDATING", "VALIDATED", "RUNNING", "FAILED_VALIDATION", "FAILED", "SUCCESS", "CANCELLED", "CANCELLATION_REQUESTED"] r"""The current status of the fine-tuning job.""" +Object = Literal["job"] +r"""The object type of the fine-tuning job.""" + +IntegrationsTypedDict = WandbIntegrationOutTypedDict + + +Integrations = WandbIntegrationOut + + +RepositoriesTypedDict = GithubRepositoryOutTypedDict + + +Repositories = GithubRepositoryOut + + class JobOutTypedDict(TypedDict): id: str r"""The ID of the job.""" @@ -39,11 +54,11 @@ class JobOutTypedDict(TypedDict): r"""The name of the fine-tuned model that is being created. The value will be `null` if the fine-tuning job is still running.""" suffix: NotRequired[Nullable[str]] r"""Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`.""" - integrations: NotRequired[Nullable[List[WandbIntegrationOutTypedDict]]] + integrations: NotRequired[Nullable[List[IntegrationsTypedDict]]] r"""A list of integrations enabled for your fine-tuning job.""" trained_tokens: NotRequired[Nullable[int]] r"""Total number of tokens trained.""" - repositories: NotRequired[List[GithubRepositoryOutTypedDict]] + repositories: NotRequired[List[RepositoriesTypedDict]] metadata: NotRequired[Nullable[JobMetadataOutTypedDict]] @@ -66,17 +81,17 @@ class JobOut(BaseModel): r"""A list containing the IDs of uploaded files that contain training data.""" validation_files: OptionalNullable[List[str]] = UNSET r"""A list containing the IDs of uploaded files that contain validation data.""" - OBJECT: Annotated[Final[Optional[str]], pydantic.Field(alias="object")] = "job" # type: ignore + OBJECT: Annotated[Final[Optional[Object]], pydantic.Field(alias="object")] = "job" # type: ignore r"""The object type of the fine-tuning job.""" fine_tuned_model: OptionalNullable[str] = UNSET r"""The name of the fine-tuned model that is being created. The value will be `null` if the fine-tuning job is still running.""" suffix: OptionalNullable[str] = UNSET r"""Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`.""" - integrations: OptionalNullable[List[WandbIntegrationOut]] = UNSET + integrations: OptionalNullable[List[Integrations]] = UNSET r"""A list of integrations enabled for your fine-tuning job.""" trained_tokens: OptionalNullable[int] = UNSET r"""Total number of tokens trained.""" - repositories: Optional[List[GithubRepositoryOut]] = None + repositories: Optional[List[Repositories]] = None metadata: OptionalNullable[JobMetadataOut] = UNSET @model_serializer(mode="wrap") diff --git a/src/mistralai/models/jobsout.py b/src/mistralai/models/jobsout.py index 15776ad8..0ed51c8e 100644 --- a/src/mistralai/models/jobsout.py +++ b/src/mistralai/models/jobsout.py @@ -4,10 +4,12 @@ from .jobout import JobOut, JobOutTypedDict from mistralai.types import BaseModel import pydantic -from typing import Final, List, Optional, TypedDict +from typing import Final, List, Literal, Optional, TypedDict from typing_extensions import Annotated, NotRequired +JobsOutObject = Literal["list"] + class JobsOutTypedDict(TypedDict): total: int data: NotRequired[List[JobOutTypedDict]] @@ -16,5 +18,5 @@ class JobsOutTypedDict(TypedDict): class JobsOut(BaseModel): total: int data: Optional[List[JobOut]] = None - OBJECT: Annotated[Final[Optional[str]], pydantic.Field(alias="object")] = "list" # type: ignore + OBJECT: Annotated[Final[Optional[JobsOutObject]], pydantic.Field(alias="object")] = "list" # type: ignore diff --git a/src/mistralai/models/legacyjobmetadataout.py b/src/mistralai/models/legacyjobmetadataout.py index f4c2d7a0..3b3106d0 100644 --- a/src/mistralai/models/legacyjobmetadataout.py +++ b/src/mistralai/models/legacyjobmetadataout.py @@ -4,10 +4,12 @@ from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL import pydantic from pydantic import model_serializer -from typing import Final, Optional, TypedDict +from typing import Final, Literal, Optional, TypedDict from typing_extensions import Annotated, NotRequired +LegacyJobMetadataOutObject = Literal["job.metadata"] + class LegacyJobMetadataOutTypedDict(TypedDict): details: str expected_duration_seconds: NotRequired[Nullable[int]] @@ -50,7 +52,7 @@ class LegacyJobMetadataOut(BaseModel): r"""The number of complete passes through the entire training dataset.""" training_steps: OptionalNullable[int] = UNSET r"""The number of training steps to perform. A training step refers to a single update of the model weights during the fine-tuning process. This update is typically calculated using a batch of samples from the training dataset.""" - OBJECT: Annotated[Final[Optional[str]], pydantic.Field(alias="object")] = "job.metadata" # type: ignore + OBJECT: Annotated[Final[Optional[LegacyJobMetadataOutObject]], pydantic.Field(alias="object")] = "job.metadata" # type: ignore @model_serializer(mode="wrap") def serialize_model(self, handler): diff --git a/src/mistralai/models/retrievefileout.py b/src/mistralai/models/retrievefileout.py index 98af323e..cab3b658 100644 --- a/src/mistralai/models/retrievefileout.py +++ b/src/mistralai/models/retrievefileout.py @@ -3,13 +3,18 @@ from __future__ import annotations from .sampletype import SampleType from .source import Source -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL, UnrecognizedStr +from mistralai.utils import validate_open_enum import pydantic from pydantic import model_serializer -from typing import Final, TypedDict +from pydantic.functional_validators import PlainValidator +from typing import Final, Literal, TypedDict, Union from typing_extensions import Annotated, NotRequired +RetrieveFileOutPurpose = Union[Literal["fine-tune"], UnrecognizedStr] +r"""The intended purpose of the uploaded file. Only accepts fine-tuning (`fine-tune`) for now.""" + class RetrieveFileOutTypedDict(TypedDict): id: str r"""The unique identifier of the file.""" @@ -39,7 +44,7 @@ class RetrieveFileOut(BaseModel): r"""The name of the uploaded file.""" sample_type: SampleType source: Source - PURPOSE: Annotated[Final[str], pydantic.Field(alias="purpose")] = "fine-tune" # type: ignore + PURPOSE: Annotated[Final[Annotated[RetrieveFileOutPurpose, PlainValidator(validate_open_enum(False))]], pydantic.Field(alias="purpose")] = "fine-tune" # type: ignore r"""The intended purpose of the uploaded file. Only accepts fine-tuning (`fine-tune`) for now.""" num_lines: OptionalNullable[int] = UNSET diff --git a/src/mistralai/models/trainingparameters.py b/src/mistralai/models/trainingparameters.py index 2110b55f..dcbb3949 100644 --- a/src/mistralai/models/trainingparameters.py +++ b/src/mistralai/models/trainingparameters.py @@ -10,6 +10,8 @@ class TrainingParametersTypedDict(TypedDict): training_steps: NotRequired[Nullable[int]] learning_rate: NotRequired[float] + weight_decay: NotRequired[Nullable[float]] + warmup_fraction: NotRequired[Nullable[float]] epochs: NotRequired[Nullable[float]] fim_ratio: NotRequired[Nullable[float]] @@ -17,13 +19,15 @@ class TrainingParametersTypedDict(TypedDict): class TrainingParameters(BaseModel): training_steps: OptionalNullable[int] = UNSET learning_rate: Optional[float] = 0.0001 + weight_decay: OptionalNullable[float] = UNSET + warmup_fraction: OptionalNullable[float] = UNSET epochs: OptionalNullable[float] = UNSET fim_ratio: OptionalNullable[float] = UNSET @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = ["training_steps", "learning_rate", "epochs", "fim_ratio"] - nullable_fields = ["training_steps", "epochs", "fim_ratio"] + optional_fields = ["training_steps", "learning_rate", "weight_decay", "warmup_fraction", "epochs", "fim_ratio"] + nullable_fields = ["training_steps", "weight_decay", "warmup_fraction", "epochs", "fim_ratio"] null_default_fields = [] serialized = handler(self) diff --git a/src/mistralai/models/trainingparametersin.py b/src/mistralai/models/trainingparametersin.py index 60d71b27..f4ec585d 100644 --- a/src/mistralai/models/trainingparametersin.py +++ b/src/mistralai/models/trainingparametersin.py @@ -14,6 +14,10 @@ class TrainingParametersInTypedDict(TypedDict): r"""The number of training steps to perform. A training step refers to a single update of the model weights during the fine-tuning process. This update is typically calculated using a batch of samples from the training dataset.""" learning_rate: NotRequired[float] r"""A parameter describing how much to adjust the pre-trained model's weights in response to the estimated error each time the weights are updated during the fine-tuning process.""" + weight_decay: NotRequired[Nullable[float]] + r"""(Advanced Usage) Weight decay adds a term to the loss function that is proportional to the sum of the squared weights. This term reduces the magnitude of the weights and prevents them from growing too large.""" + warmup_fraction: NotRequired[Nullable[float]] + r"""(Advanced Usage) A parameter that specifies the percentage of the total training steps at which the learning rate warm-up phase ends. During this phase, the learning rate gradually increases from a small value to the initial learning rate, helping to stabilize the training process and improve convergence. Similar to `pct_start` in [mistral-finetune](https://github.com/mistralai/mistral-finetune)""" epochs: NotRequired[Nullable[float]] fim_ratio: NotRequired[Nullable[float]] @@ -25,13 +29,17 @@ class TrainingParametersIn(BaseModel): r"""The number of training steps to perform. A training step refers to a single update of the model weights during the fine-tuning process. This update is typically calculated using a batch of samples from the training dataset.""" learning_rate: Optional[float] = 0.0001 r"""A parameter describing how much to adjust the pre-trained model's weights in response to the estimated error each time the weights are updated during the fine-tuning process.""" + weight_decay: OptionalNullable[float] = UNSET + r"""(Advanced Usage) Weight decay adds a term to the loss function that is proportional to the sum of the squared weights. This term reduces the magnitude of the weights and prevents them from growing too large.""" + warmup_fraction: OptionalNullable[float] = UNSET + r"""(Advanced Usage) A parameter that specifies the percentage of the total training steps at which the learning rate warm-up phase ends. During this phase, the learning rate gradually increases from a small value to the initial learning rate, helping to stabilize the training process and improve convergence. Similar to `pct_start` in [mistral-finetune](https://github.com/mistralai/mistral-finetune)""" epochs: OptionalNullable[float] = UNSET fim_ratio: OptionalNullable[float] = UNSET @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = ["training_steps", "learning_rate", "epochs", "fim_ratio"] - nullable_fields = ["training_steps", "epochs", "fim_ratio"] + optional_fields = ["training_steps", "learning_rate", "weight_decay", "warmup_fraction", "epochs", "fim_ratio"] + nullable_fields = ["training_steps", "weight_decay", "warmup_fraction", "epochs", "fim_ratio"] null_default_fields = [] serialized = handler(self) diff --git a/src/mistralai/models/unarchiveftmodelout.py b/src/mistralai/models/unarchiveftmodelout.py index 07334f57..7391df2c 100644 --- a/src/mistralai/models/unarchiveftmodelout.py +++ b/src/mistralai/models/unarchiveftmodelout.py @@ -3,10 +3,12 @@ from __future__ import annotations from mistralai.types import BaseModel import pydantic -from typing import Final, Optional, TypedDict +from typing import Final, Literal, Optional, TypedDict from typing_extensions import Annotated, NotRequired +UnarchiveFTModelOutObject = Literal["model"] + class UnarchiveFTModelOutTypedDict(TypedDict): id: str archived: NotRequired[bool] @@ -14,6 +16,6 @@ class UnarchiveFTModelOutTypedDict(TypedDict): class UnarchiveFTModelOut(BaseModel): id: str - OBJECT: Annotated[Final[Optional[str]], pydantic.Field(alias="object")] = "model" # type: ignore + OBJECT: Annotated[Final[Optional[UnarchiveFTModelOutObject]], pydantic.Field(alias="object")] = "model" # type: ignore archived: Optional[bool] = False diff --git a/src/mistralai/models/uploadfileout.py b/src/mistralai/models/uploadfileout.py index cd5b86c6..dce8d0f2 100644 --- a/src/mistralai/models/uploadfileout.py +++ b/src/mistralai/models/uploadfileout.py @@ -3,13 +3,18 @@ from __future__ import annotations from .sampletype import SampleType from .source import Source -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL, UnrecognizedStr +from mistralai.utils import validate_open_enum import pydantic from pydantic import model_serializer -from typing import Final, TypedDict +from pydantic.functional_validators import PlainValidator +from typing import Final, Literal, TypedDict, Union from typing_extensions import Annotated, NotRequired +Purpose = Union[Literal["fine-tune"], UnrecognizedStr] +r"""The intended purpose of the uploaded file. Only accepts fine-tuning (`fine-tune`) for now.""" + class UploadFileOutTypedDict(TypedDict): id: str r"""The unique identifier of the file.""" @@ -39,7 +44,7 @@ class UploadFileOut(BaseModel): r"""The name of the uploaded file.""" sample_type: SampleType source: Source - PURPOSE: Annotated[Final[str], pydantic.Field(alias="purpose")] = "fine-tune" # type: ignore + PURPOSE: Annotated[Final[Annotated[Purpose, PlainValidator(validate_open_enum(False))]], pydantic.Field(alias="purpose")] = "fine-tune" # type: ignore r"""The intended purpose of the uploaded file. Only accepts fine-tuning (`fine-tune`) for now.""" num_lines: OptionalNullable[int] = UNSET diff --git a/src/mistralai/models/wandbintegration.py b/src/mistralai/models/wandbintegration.py index fccab00f..2a86caa3 100644 --- a/src/mistralai/models/wandbintegration.py +++ b/src/mistralai/models/wandbintegration.py @@ -4,10 +4,12 @@ from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL import pydantic from pydantic import model_serializer -from typing import Final, Optional, TypedDict +from typing import Final, Literal, Optional, TypedDict from typing_extensions import Annotated, NotRequired +WandbIntegrationType = Literal["wandb"] + class WandbIntegrationTypedDict(TypedDict): project: str r"""The name of the project that the new run will be created under.""" @@ -23,7 +25,7 @@ class WandbIntegration(BaseModel): r"""The name of the project that the new run will be created under.""" api_key: str r"""The WandB API key to use for authentication.""" - TYPE: Annotated[Final[Optional[str]], pydantic.Field(alias="type")] = "wandb" # type: ignore + TYPE: Annotated[Final[Optional[WandbIntegrationType]], pydantic.Field(alias="type")] = "wandb" # type: ignore name: OptionalNullable[str] = UNSET r"""A display name to set for the run. If not set, will use the job ID as the name.""" run_name: OptionalNullable[str] = UNSET diff --git a/src/mistralai/models/wandbintegrationout.py b/src/mistralai/models/wandbintegrationout.py index f971ba58..f6e185a9 100644 --- a/src/mistralai/models/wandbintegrationout.py +++ b/src/mistralai/models/wandbintegrationout.py @@ -4,10 +4,12 @@ from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL import pydantic from pydantic import model_serializer -from typing import Final, Optional, TypedDict +from typing import Final, Literal, Optional, TypedDict from typing_extensions import Annotated, NotRequired +Type = Literal["wandb"] + class WandbIntegrationOutTypedDict(TypedDict): project: str r"""The name of the project that the new run will be created under.""" @@ -19,7 +21,7 @@ class WandbIntegrationOutTypedDict(TypedDict): class WandbIntegrationOut(BaseModel): project: str r"""The name of the project that the new run will be created under.""" - TYPE: Annotated[Final[Optional[str]], pydantic.Field(alias="type")] = "wandb" # type: ignore + TYPE: Annotated[Final[Optional[Type]], pydantic.Field(alias="type")] = "wandb" # type: ignore name: OptionalNullable[str] = UNSET r"""A display name to set for the run. If not set, will use the job ID as the name.""" run_name: OptionalNullable[str] = UNSET diff --git a/src/mistralai/sdk.py b/src/mistralai/sdk.py index b0d2bb18..1b79f700 100644 --- a/src/mistralai/sdk.py +++ b/src/mistralai/sdk.py @@ -3,7 +3,7 @@ from .basesdk import BaseSDK from .httpclient import AsyncHttpClient, HttpClient from .sdkconfiguration import SDKConfiguration -from .utils.logger import Logger, NoOpLogger +from .utils.logger import Logger, get_default_logger from .utils.retries import RetryConfig import httpx from mistralai import models, utils @@ -67,7 +67,7 @@ def __init__( async_client = httpx.AsyncClient() if debug_logger is None: - debug_logger = NoOpLogger() + debug_logger = get_default_logger() assert issubclass( type(async_client), AsyncHttpClient diff --git a/src/mistralai/sdkconfiguration.py b/src/mistralai/sdkconfiguration.py index 1d89e4cc..e0821a7e 100644 --- a/src/mistralai/sdkconfiguration.py +++ b/src/mistralai/sdkconfiguration.py @@ -29,9 +29,9 @@ class SDKConfiguration: server: Optional[str] = "" language: str = "python" openapi_doc_version: str = "0.0.2" - sdk_version: str = "1.0.1" - gen_version: str = "2.399.0" - user_agent: str = "speakeasy-sdk/python 1.0.1 2.399.0 0.0.2 mistralai" + sdk_version: str = "1.0.2" + gen_version: str = "2.402.5" + user_agent: str = "speakeasy-sdk/python 1.0.2 2.402.5 0.0.2 mistralai" retry_config: OptionalNullable[RetryConfig] = Field(default_factory=lambda: UNSET) timeout_ms: Optional[int] = None diff --git a/src/mistralai/utils/__init__.py b/src/mistralai/utils/__init__.py index 75ca0241..feee4dc5 100644 --- a/src/mistralai/utils/__init__.py +++ b/src/mistralai/utils/__init__.py @@ -35,7 +35,7 @@ ) from .url import generate_url, template_url, remove_suffix from .values import get_global_from_env, match_content_type, match_status_codes, match_response -from .logger import Logger, get_body_content, NoOpLogger +from .logger import Logger, get_body_content, get_default_logger __all__ = [ "BackoffStrategy", @@ -44,6 +44,7 @@ "FormMetadata", "generate_url", "get_body_content", + "get_default_logger", "get_discriminator", "get_global_from_env", "get_headers", @@ -59,7 +60,6 @@ "match_status_codes", "match_response", "MultipartFormMetadata", - "NoOpLogger", "OpenEnumMeta", "PathParamMetadata", "QueryParamMetadata", diff --git a/src/mistralai/utils/forms.py b/src/mistralai/utils/forms.py index 07f9b235..9f5a731e 100644 --- a/src/mistralai/utils/forms.py +++ b/src/mistralai/utils/forms.py @@ -17,7 +17,7 @@ MultipartFormMetadata, find_field_metadata, ) -from .values import _val_to_string +from .values import _is_set, _val_to_string def _populate_form( @@ -27,7 +27,7 @@ def _populate_form( delimiter: str, form: Dict[str, List[str]], ): - if obj is None: + if not _is_set(obj): return form if isinstance(obj, BaseModel): @@ -41,7 +41,7 @@ def _populate_form( continue val = getattr(obj, name) - if val is None: + if not _is_set(val): continue if explode: @@ -54,7 +54,7 @@ def _populate_form( elif isinstance(obj, Dict): items = [] for key, value in obj.items(): - if value is None: + if not _is_set(value): continue if explode: @@ -68,7 +68,7 @@ def _populate_form( items = [] for value in obj: - if value is None: + if not _is_set(value): continue if explode: @@ -102,7 +102,7 @@ def serialize_multipart_form( field = request_fields[name] val = getattr(request, name) - if val is None: + if not _is_set(val): continue field_metadata = find_field_metadata(field, MultipartFormMetadata) @@ -156,7 +156,7 @@ def serialize_multipart_form( values = [] for value in val: - if value is None: + if not _is_set(value): continue values.append(_val_to_string(value)) @@ -176,7 +176,7 @@ def serialize_form_data(data: Any) -> Dict[str, Any]: field = data_fields[name] val = getattr(data, name) - if val is None: + if not _is_set(val): continue metadata = find_field_metadata(field, FormMetadata) @@ -200,7 +200,8 @@ def serialize_form_data(data: Any) -> Dict[str, Any]: raise ValueError(f"Invalid form style for field {name}") elif isinstance(data, Dict): for key, value in data.items(): - form[key] = [_val_to_string(value)] + if _is_set(value): + form[key] = [_val_to_string(value)] else: raise TypeError(f"Invalid request body type {type(data)} for form data") diff --git a/src/mistralai/utils/headers.py b/src/mistralai/utils/headers.py index e14a0f4a..37864cbb 100644 --- a/src/mistralai/utils/headers.py +++ b/src/mistralai/utils/headers.py @@ -15,16 +15,16 @@ find_field_metadata, ) -from .values import _populate_from_globals, _val_to_string +from .values import _is_set, _populate_from_globals, _val_to_string def get_headers(headers_params: Any, gbls: Optional[Any] = None) -> Dict[str, str]: headers: Dict[str, str] = {} globals_already_populated = [] - if headers_params is not None: + if _is_set(headers_params): globals_already_populated = _populate_headers(headers_params, gbls, headers, []) - if gbls is not None: + if _is_set(gbls): _populate_headers(gbls, None, headers, globals_already_populated) return headers @@ -67,7 +67,7 @@ def _populate_headers( def _serialize_header(explode: bool, obj: Any) -> str: - if obj is None: + if not _is_set(obj): return "" if isinstance(obj, BaseModel): @@ -83,7 +83,7 @@ def _serialize_header(explode: bool, obj: Any) -> str: f_name = obj_field.alias if obj_field.alias is not None else name val = getattr(obj, name) - if val is None: + if not _is_set(val): continue if explode: @@ -98,7 +98,7 @@ def _serialize_header(explode: bool, obj: Any) -> str: items = [] for key, value in obj.items(): - if value is None: + if not _is_set(value): continue if explode: @@ -113,14 +113,14 @@ def _serialize_header(explode: bool, obj: Any) -> str: items = [] for value in obj: - if value is None: + if not _is_set(value): continue items.append(_val_to_string(value)) if len(items) > 0: return ",".join(items) - else: + elif _is_set(obj): return f"{_val_to_string(obj)}" return "" diff --git a/src/mistralai/utils/logger.py b/src/mistralai/utils/logger.py index 7e4bbeac..c4ea1a03 100644 --- a/src/mistralai/utils/logger.py +++ b/src/mistralai/utils/logger.py @@ -1,6 +1,8 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" import httpx +import logging +import os from typing import Any, Protocol class Logger(Protocol): @@ -14,3 +16,9 @@ def debug(self, msg: str, *args: Any, **kwargs: Any) -> None: def get_body_content(req: httpx.Request) -> str: return "" if not hasattr(req, "_content") else str(req.content) +def get_default_logger() -> Logger: + if os.getenv("MISTRAL_DEBUG"): + logging.basicConfig(level=logging.DEBUG) + return logging.getLogger("mistralai") + return NoOpLogger() + diff --git a/src/mistralai/utils/queryparams.py b/src/mistralai/utils/queryparams.py index 1c8c5834..37a6e7f9 100644 --- a/src/mistralai/utils/queryparams.py +++ b/src/mistralai/utils/queryparams.py @@ -15,7 +15,12 @@ QueryParamMetadata, find_field_metadata, ) -from .values import _get_serialized_params, _populate_from_globals, _val_to_string +from .values import ( + _get_serialized_params, + _is_set, + _populate_from_globals, + _val_to_string, +) from .forms import _populate_form @@ -26,7 +31,7 @@ def get_query_params( params: Dict[str, List[str]] = {} globals_already_populated = _populate_query_params(query_params, gbls, params, []) - if gbls is not None: + if _is_set(gbls): _populate_query_params(gbls, None, params, globals_already_populated) return params @@ -55,7 +60,7 @@ def _populate_query_params( if not metadata: continue - value = getattr(query_params, name) if query_params is not None else None + value = getattr(query_params, name) if _is_set(query_params) else None value, global_found = _populate_from_globals( name, value, QueryParamMetadata, gbls @@ -99,7 +104,7 @@ def _populate_deep_object_query_params( obj: Any, params: Dict[str, List[str]], ): - if obj is None: + if not _is_set(obj): return if isinstance(obj, BaseModel): @@ -113,10 +118,7 @@ def _populate_deep_object_query_params_basemodel( obj: Any, params: Dict[str, List[str]], ): - if obj is None: - return - - if not isinstance(obj, BaseModel): + if not _is_set(obj) or not isinstance(obj, BaseModel): return obj_fields: Dict[str, FieldInfo] = obj.__class__.model_fields @@ -128,11 +130,11 @@ def _populate_deep_object_query_params_basemodel( params_key = f"{prior_params_key}[{f_name}]" obj_param_metadata = find_field_metadata(obj_field, QueryParamMetadata) - if obj_param_metadata is None: + if not _is_set(obj_param_metadata): continue obj_val = getattr(obj, name) - if obj_val is None: + if not _is_set(obj_val): continue if isinstance(obj_val, BaseModel): @@ -150,11 +152,11 @@ def _populate_deep_object_query_params_dict( value: Dict, params: Dict[str, List[str]], ): - if value is None: + if not _is_set(value): return for key, val in value.items(): - if val is None: + if not _is_set(val): continue params_key = f"{prior_params_key}[{key}]" @@ -174,11 +176,11 @@ def _populate_deep_object_query_params_list( value: List, params: Dict[str, List[str]], ): - if value is None: + if not _is_set(value): return for val in value: - if val is None: + if not _is_set(val): continue if params.get(params_key) is None: diff --git a/src/mistralai/utils/serializers.py b/src/mistralai/utils/serializers.py index a98998a3..85d57f43 100644 --- a/src/mistralai/utils/serializers.py +++ b/src/mistralai/utils/serializers.py @@ -9,13 +9,15 @@ from pydantic_core import from_json from typing_inspect import is_optional_type -from ..types.basemodel import BaseModel, Nullable, OptionalNullable +from ..types.basemodel import BaseModel, Nullable, OptionalNullable, Unset def serialize_decimal(as_str: bool): def serialize(d): if is_optional_type(type(d)) and d is None: return None + if isinstance(d, Unset): + return d if not isinstance(d, Decimal): raise ValueError("Expected Decimal object") @@ -29,7 +31,7 @@ def validate_decimal(d): if d is None: return None - if isinstance(d, Decimal): + if isinstance(d, (Decimal, Unset)): return d if not isinstance(d, (str, int, float)): @@ -42,6 +44,8 @@ def serialize_float(as_str: bool): def serialize(f): if is_optional_type(type(f)) and f is None: return None + if isinstance(f, Unset): + return f if not isinstance(f, float): raise ValueError("Expected float") @@ -55,7 +59,7 @@ def validate_float(f): if f is None: return None - if isinstance(f, float): + if isinstance(f, (float, Unset)): return f if not isinstance(f, str): @@ -65,14 +69,16 @@ def validate_float(f): def serialize_int(as_str: bool): - def serialize(b): - if is_optional_type(type(b)) and b is None: + def serialize(i): + if is_optional_type(type(i)) and i is None: return None + if isinstance(i, Unset): + return i - if not isinstance(b, int): + if not isinstance(i, int): raise ValueError("Expected int") - return str(b) if as_str else b + return str(i) if as_str else i return serialize @@ -81,7 +87,7 @@ def validate_int(b): if b is None: return None - if isinstance(b, int): + if isinstance(b, (int, Unset)): return b if not isinstance(b, str): @@ -95,6 +101,9 @@ def validate(e): if e is None: return None + if isinstance(e, Unset): + return e + if is_int: if not isinstance(e, int): raise ValueError("Expected int") diff --git a/src/mistralai/utils/url.py b/src/mistralai/utils/url.py index b201bfa4..c78ccbae 100644 --- a/src/mistralai/utils/url.py +++ b/src/mistralai/utils/url.py @@ -18,7 +18,12 @@ PathParamMetadata, find_field_metadata, ) -from .values import _get_serialized_params, _populate_from_globals, _val_to_string +from .values import ( + _get_serialized_params, + _is_set, + _populate_from_globals, + _val_to_string, +) def generate_url( @@ -32,7 +37,7 @@ def generate_url( globals_already_populated = _populate_path_params( path_params, gbls, path_param_values, [] ) - if gbls is not None: + if _is_set(gbls): _populate_path_params(gbls, None, path_param_values, globals_already_populated) for key, value in path_param_values.items(): @@ -64,14 +69,14 @@ def _populate_path_params( if param_metadata is None: continue - param = getattr(path_params, name) if path_params is not None else None + param = getattr(path_params, name) if _is_set(path_params) else None param, global_found = _populate_from_globals( name, param, PathParamMetadata, gbls ) if global_found: globals_already_populated.append(name) - if param is None: + if not _is_set(param): continue f_name = field.alias if field.alias is not None else name @@ -87,13 +92,13 @@ def _populate_path_params( if param_metadata.style == "simple": if isinstance(param, List): for pp_val in param: - if pp_val is None: + if not _is_set(pp_val): continue pp_vals.append(_val_to_string(pp_val)) path_param_values[f_name] = ",".join(pp_vals) elif isinstance(param, Dict): for pp_key in param: - if param[pp_key] is None: + if not _is_set(param[pp_key]): continue if param_metadata.explode: pp_vals.append(f"{pp_key}={_val_to_string(param[pp_key])}") @@ -116,7 +121,7 @@ def _populate_path_params( ) param_field_val = getattr(param, name) - if param_field_val is None: + if not _is_set(param_field_val): continue if param_metadata.explode: pp_vals.append( @@ -127,7 +132,7 @@ def _populate_path_params( f"{param_name},{_val_to_string(param_field_val)}" ) path_param_values[f_name] = ",".join(pp_vals) - else: + elif _is_set(param): path_param_values[f_name] = _val_to_string(param) return globals_already_populated diff --git a/src/mistralai/utils/values.py b/src/mistralai/utils/values.py index 24ccae3d..2b4b6832 100644 --- a/src/mistralai/utils/values.py +++ b/src/mistralai/utils/values.py @@ -10,6 +10,8 @@ from pydantic import BaseModel from pydantic.fields import FieldInfo +from ..types.basemodel import Unset + from .serializers import marshal_json from .metadata import ParamMetadata, find_field_metadata @@ -126,3 +128,7 @@ def _get_serialized_params( params[field_name] = marshal_json(obj, typ) return params + + +def _is_set(value: Any) -> bool: + return value is not None and not isinstance(value, Unset) From e1db2254f004034d0eeeca1c1d6270bbd80ffc73 Mon Sep 17 00:00:00 2001 From: Harizo Rajaona Date: Tue, 27 Aug 2024 14:10:23 +0200 Subject: [PATCH 072/223] GCP improvements - Pin google-auth to 2.27.0 to fix conflicts in Colab notebooks - Enable passing a user-provided access token to MistralGoogleCloud --- packages/mistralai_gcp/poetry.lock | 10 +++++----- packages/mistralai_gcp/pyproject.toml | 2 +- packages/mistralai_gcp/src/mistralai_gcp/sdk.py | 16 ++++++++++------ poetry.lock | 10 +++++----- pyproject.toml | 2 +- 5 files changed, 22 insertions(+), 18 deletions(-) diff --git a/packages/mistralai_gcp/poetry.lock b/packages/mistralai_gcp/poetry.lock index 8a625e2f..67c9cec7 100644 --- a/packages/mistralai_gcp/poetry.lock +++ b/packages/mistralai_gcp/poetry.lock @@ -1,4 +1,4 @@ -# This file is automatically @generated by Poetry 1.8.3 and should not be changed by hand. +# This file is automatically @generated by Poetry 1.8.1 and should not be changed by hand. [[package]] name = "annotated-types" @@ -227,13 +227,13 @@ test = ["pytest (>=6)"] [[package]] name = "google-auth" -version = "2.32.0" +version = "2.27.0" description = "Google Authentication Library" optional = false python-versions = ">=3.7" files = [ - {file = "google_auth-2.32.0-py2.py3-none-any.whl", hash = "sha256:53326ea2ebec768070a94bee4e1b9194c9646ea0c2bd72422785bd0f9abfad7b"}, - {file = "google_auth-2.32.0.tar.gz", hash = "sha256:49315be72c55a6a37d62819e3573f6b416aca00721f7e3e31a008d928bf64022"}, + {file = "google-auth-2.27.0.tar.gz", hash = "sha256:e863a56ccc2d8efa83df7a80272601e43487fa9a728a376205c86c26aaefa821"}, + {file = "google_auth-2.27.0-py2.py3-none-any.whl", hash = "sha256:8e4bad367015430ff253fe49d500fdc3396c1a434db5740828c728e45bcce245"}, ] [package.dependencies] @@ -859,4 +859,4 @@ zstd = ["zstandard (>=0.18.0)"] [metadata] lock-version = "2.0" python-versions = "^3.8" -content-hash = "c693a1bfd23435953d0a7305446907287d0d66ba881c76188dca0a9eefc7a1b6" +content-hash = "6a01b3944f3e2b62891369e56c6e0e00815d65e9a137f0558ee13fd17f674669" diff --git a/packages/mistralai_gcp/pyproject.toml b/packages/mistralai_gcp/pyproject.toml index c169af4f..4d1d5ee1 100644 --- a/packages/mistralai_gcp/pyproject.toml +++ b/packages/mistralai_gcp/pyproject.toml @@ -18,7 +18,7 @@ in-project = true [tool.poetry.dependencies] python = "^3.8" eval-type-backport = "^0.2.0" -google-auth = "^2.31.0" +google-auth = "2.27.0" httpx = "^0.27.0" jsonpath-python = "^1.0.6" pydantic = "~2.8.2" diff --git a/packages/mistralai_gcp/src/mistralai_gcp/sdk.py b/packages/mistralai_gcp/src/mistralai_gcp/sdk.py index 3c530c8c..446d5285 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/sdk.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/sdk.py @@ -32,6 +32,7 @@ def __init__( self, region: str = "europe-west4", project_id: Optional[str] = None, + access_token: Optional[str] = None, client: Optional[HttpClient] = None, async_client: Optional[AsyncHttpClient] = None, retry_config: Optional[Nullable[RetryConfig]] = None, @@ -45,10 +46,11 @@ def __init__( :param async_client: The Async HTTP client to use for all asynchronous methods :param retry_config: The retry configuration to use for all supported methods """ - + credentials, loaded_project_id = google.auth.default( scopes=["https://www.googleapis.com/auth/cloud-platform"], ) + credentials.refresh(google.auth.transport.requests.Request()) if not isinstance(credentials, google.auth.credentials.Credentials): raise models.SDKError( @@ -60,12 +62,14 @@ def __init__( raise models.SDKError("project_id must be provided") def auth_token() -> str: - if credentials.expired: + if access_token: + return access_token + else: credentials.refresh(google.auth.transport.requests.Request()) - token = credentials.token - if not token: - raise models.SDKError("Failed to get token from credentials") - return token + token = credentials.token + if not token: + raise models.SDKError("Failed to get token from credentials") + return token if client is None: client = httpx.Client() diff --git a/poetry.lock b/poetry.lock index f22cde1d..78e95d41 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1,4 +1,4 @@ -# This file is automatically @generated by Poetry 1.8.3 and should not be changed by hand. +# This file is automatically @generated by Poetry 1.8.1 and should not be changed by hand. [[package]] name = "annotated-types" @@ -213,13 +213,13 @@ test = ["pytest (>=6)"] [[package]] name = "google-auth" -version = "2.32.0" +version = "2.27.0" description = "Google Authentication Library" optional = true python-versions = ">=3.7" files = [ - {file = "google_auth-2.32.0-py2.py3-none-any.whl", hash = "sha256:53326ea2ebec768070a94bee4e1b9194c9646ea0c2bd72422785bd0f9abfad7b"}, - {file = "google_auth-2.32.0.tar.gz", hash = "sha256:49315be72c55a6a37d62819e3573f6b416aca00721f7e3e31a008d928bf64022"}, + {file = "google-auth-2.27.0.tar.gz", hash = "sha256:e863a56ccc2d8efa83df7a80272601e43487fa9a728a376205c86c26aaefa821"}, + {file = "google_auth-2.27.0-py2.py3-none-any.whl", hash = "sha256:8e4bad367015430ff253fe49d500fdc3396c1a434db5740828c728e45bcce245"}, ] [package.dependencies] @@ -819,4 +819,4 @@ gcp = ["google-auth", "requests"] [metadata] lock-version = "2.0" python-versions = "^3.8" -content-hash = "a1ca991b0570a5c978745559e8d18354ec04cbd566513cc895346ec1bae01112" +content-hash = "ed93474ac9f1d994cf76bfbd505206701b3c0ace3a2402e9c638f270301401cd" diff --git a/pyproject.toml b/pyproject.toml index e6db0795..02570db3 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -21,7 +21,7 @@ jsonpath-python = "^1.0.6" pydantic = "~2.8.2" python-dateutil = "^2.9.0.post0" typing-inspect = "^0.9.0" -google-auth = { version = "^2.31.0", optional = true } +google-auth = { version = "2.27.0", optional = true } requests = { version = "^2.32.3", optional = true } [tool.poetry.group.dev.dependencies] From 64f755a4d16fb562ff259b1d88622fd28a62c194 Mon Sep 17 00:00:00 2001 From: Harizo Rajaona Date: Tue, 27 Aug 2024 16:05:57 +0200 Subject: [PATCH 073/223] Adjust GCP client dep in gen.yml instead of package-level pyproject.toml --- packages/mistralai_gcp/.speakeasy/gen.yaml | 2 +- packages/mistralai_gcp/pyproject.toml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/packages/mistralai_gcp/.speakeasy/gen.yaml b/packages/mistralai_gcp/.speakeasy/gen.yaml index a857a5d0..4f520700 100644 --- a/packages/mistralai_gcp/.speakeasy/gen.yaml +++ b/packages/mistralai_gcp/.speakeasy/gen.yaml @@ -18,7 +18,7 @@ python: pytest: ^8.2.2 pytest-asyncio: ^0.23.7 main: - google-auth: ^2.31.0 + google-auth: 2.27.0 requests: ^2.32.3 authors: - Mistral diff --git a/packages/mistralai_gcp/pyproject.toml b/packages/mistralai_gcp/pyproject.toml index 4d1d5ee1..c169af4f 100644 --- a/packages/mistralai_gcp/pyproject.toml +++ b/packages/mistralai_gcp/pyproject.toml @@ -18,7 +18,7 @@ in-project = true [tool.poetry.dependencies] python = "^3.8" eval-type-backport = "^0.2.0" -google-auth = "2.27.0" +google-auth = "^2.31.0" httpx = "^0.27.0" jsonpath-python = "^1.0.6" pydantic = "~2.8.2" From b7315e5b04c7d066e991f2c6cbf1de8c61f85b3b Mon Sep 17 00:00:00 2001 From: Harizo Rajaona Date: Tue, 27 Aug 2024 16:21:35 +0200 Subject: [PATCH 074/223] Only authenticate via gcloud if no access token is manually provided --- packages/mistralai_gcp/src/mistralai_gcp/sdk.py | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) diff --git a/packages/mistralai_gcp/src/mistralai_gcp/sdk.py b/packages/mistralai_gcp/src/mistralai_gcp/sdk.py index 446d5285..262f60e8 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/sdk.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/sdk.py @@ -46,16 +46,17 @@ def __init__( :param async_client: The Async HTTP client to use for all asynchronous methods :param retry_config: The retry configuration to use for all supported methods """ - - credentials, loaded_project_id = google.auth.default( - scopes=["https://www.googleapis.com/auth/cloud-platform"], - ) - credentials.refresh(google.auth.transport.requests.Request()) - if not isinstance(credentials, google.auth.credentials.Credentials): - raise models.SDKError( - "credentials must be an instance of google.auth.credentials.Credentials" + if not access_token: + credentials, loaded_project_id = google.auth.default( + scopes=["https://www.googleapis.com/auth/cloud-platform"], ) + credentials.refresh(google.auth.transport.requests.Request()) + + if not isinstance(credentials, google.auth.credentials.Credentials): + raise models.SDKError( + "credentials must be an instance of google.auth.credentials.Credentials" + ) project_id = project_id or loaded_project_id if project_id is None: From 951f0bb2b9d2d70cb0c161b8f772c8bf088da812 Mon Sep 17 00:00:00 2001 From: speakeasybot Date: Thu, 29 Aug 2024 09:09:47 +0000 Subject: [PATCH 075/223] ci: regenerated with OpenAPI Doc , Speakeasy CLI 1.382.0 --- .speakeasy/gen.lock | 8 ++++---- .speakeasy/gen.yaml | 2 +- .speakeasy/workflow.lock | 2 +- RELEASES.md | 12 +++++++++++- docs/sdks/agents/README.md | 9 +++------ docs/sdks/chat/README.md | 9 +++------ docs/sdks/embeddings/README.md | 4 +--- docs/sdks/files/README.md | 19 +++++++------------ docs/sdks/fim/README.md | 9 +++------ docs/sdks/finetuning/README.md | 5 +---- docs/sdks/jobs/README.md | 26 +++++++++++--------------- docs/sdks/mistral/README.md | 6 +----- docs/sdks/models/README.md | 29 +++++++++++------------------ pyproject.toml | 2 +- src/mistralai/sdkconfiguration.py | 6 +++--- 15 files changed, 62 insertions(+), 86 deletions(-) diff --git a/.speakeasy/gen.lock b/.speakeasy/gen.lock index bdf53d43..6b527685 100644 --- a/.speakeasy/gen.lock +++ b/.speakeasy/gen.lock @@ -3,10 +3,10 @@ id: 2d045ec7-2ebb-4f4d-ad25-40953b132161 management: docChecksum: ad1a7d6946828a089ca3831e257d307d docVersion: 0.0.2 - speakeasyVersion: 1.376.0 - generationVersion: 2.402.5 - releaseVersion: 1.0.2 - configChecksum: ed07f7fc253047a5a4dd2c0f813b8ea4 + speakeasyVersion: 1.382.0 + generationVersion: 2.404.11 + releaseVersion: 1.0.3 + configChecksum: 818970b881ec69b05f6660ca354f26f5 repoURL: https://github.com/mistralai/client-python.git installationURL: https://github.com/mistralai/client-python.git published: true diff --git a/.speakeasy/gen.yaml b/.speakeasy/gen.yaml index c613fdbb..289fb16e 100644 --- a/.speakeasy/gen.yaml +++ b/.speakeasy/gen.yaml @@ -12,7 +12,7 @@ generation: auth: oAuth2ClientCredentialsEnabled: true python: - version: 1.0.2 + version: 1.0.3 additionalDependencies: dev: pytest: ^8.2.2 diff --git a/.speakeasy/workflow.lock b/.speakeasy/workflow.lock index c32ba6cf..d16a5187 100644 --- a/.speakeasy/workflow.lock +++ b/.speakeasy/workflow.lock @@ -1,4 +1,4 @@ -speakeasyVersion: 1.376.0 +speakeasyVersion: 1.382.0 sources: mistral-azure-source: sourceNamespace: mistral-openapi-azure diff --git a/RELEASES.md b/RELEASES.md index 319cce5c..b92e67ac 100644 --- a/RELEASES.md +++ b/RELEASES.md @@ -28,4 +28,14 @@ Based on: ### Generated - [python v1.0.2] . ### Releases -- [PyPI v1.0.2] https://pypi.org/project/mistralai/1.0.2 - . \ No newline at end of file +- [PyPI v1.0.2] https://pypi.org/project/mistralai/1.0.2 - . + +## 2024-08-29 09:09:05 +### Changes +Based on: +- OpenAPI Doc +- Speakeasy CLI 1.382.0 (2.404.11) https://github.com/speakeasy-api/speakeasy +### Generated +- [python v1.0.3] . +### Releases +- [PyPI v1.0.3] https://pypi.org/project/mistralai/1.0.3 - . \ No newline at end of file diff --git a/docs/sdks/agents/README.md b/docs/sdks/agents/README.md index 167f0411..744fc17a 100644 --- a/docs/sdks/agents/README.md +++ b/docs/sdks/agents/README.md @@ -38,8 +38,6 @@ if res is not None: ``` - - ### Parameters | Parameter | Type | Required | Description | Example | @@ -56,10 +54,10 @@ if res is not None: | `tool_choice` | [Optional[models.AgentsCompletionRequestToolChoice]](../../models/agentscompletionrequesttoolchoice.md) | :heavy_minus_sign: | N/A | | | `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | | - ### Response **[models.ChatCompletionResponse](../../models/chatcompletionresponse.md)** + ### Errors | Error Object | Status Code | Content Type | @@ -67,6 +65,7 @@ if res is not None: | models.HTTPValidationError | 422 | application/json | | models.SDKError | 4xx-5xx | */* | + ## stream Mistral AI provides the ability to stream responses back to a client in order to allow partial results for certain requests. Tokens will be sent as data-only server-sent events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. @@ -96,8 +95,6 @@ if res is not None: ``` - - ### Parameters | Parameter | Type | Required | Description | Example | @@ -114,10 +111,10 @@ if res is not None: | `tool_choice` | [Optional[models.AgentsCompletionStreamRequestToolChoice]](../../models/agentscompletionstreamrequesttoolchoice.md) | :heavy_minus_sign: | N/A | | | `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | | - ### Response **[Union[Generator[models.CompletionEvent, None, None], AsyncGenerator[models.CompletionEvent, None]]](../../models/.md)** + ### Errors | Error Object | Status Code | Content Type | diff --git a/docs/sdks/chat/README.md b/docs/sdks/chat/README.md index aaa828ec..2cfba980 100644 --- a/docs/sdks/chat/README.md +++ b/docs/sdks/chat/README.md @@ -38,8 +38,6 @@ if res is not None: ``` - - ### Parameters | Parameter | Type | Required | Description | Example | @@ -59,10 +57,10 @@ if res is not None: | `safe_prompt` | *Optional[bool]* | :heavy_minus_sign: | Whether to inject a safety prompt before all conversations. | | | `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | | - ### Response **[models.ChatCompletionResponse](../../models/chatcompletionresponse.md)** + ### Errors | Error Object | Status Code | Content Type | @@ -70,6 +68,7 @@ if res is not None: | models.HTTPValidationError | 422 | application/json | | models.SDKError | 4xx-5xx | */* | + ## stream Mistral AI provides the ability to stream responses back to a client in order to allow partial results for certain requests. Tokens will be sent as data-only server-sent events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. @@ -99,8 +98,6 @@ if res is not None: ``` - - ### Parameters | Parameter | Type | Required | Description | Example | @@ -120,10 +117,10 @@ if res is not None: | `safe_prompt` | *Optional[bool]* | :heavy_minus_sign: | Whether to inject a safety prompt before all conversations. | | | `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | | - ### Response **[Union[Generator[models.CompletionEvent, None, None], AsyncGenerator[models.CompletionEvent, None]]](../../models/.md)** + ### Errors | Error Object | Status Code | Content Type | diff --git a/docs/sdks/embeddings/README.md b/docs/sdks/embeddings/README.md index 2f9f2c72..38b8b53a 100644 --- a/docs/sdks/embeddings/README.md +++ b/docs/sdks/embeddings/README.md @@ -32,8 +32,6 @@ if res is not None: ``` - - ### Parameters | Parameter | Type | Required | Description | @@ -43,10 +41,10 @@ if res is not None: | `encoding_format` | *OptionalNullable[str]* | :heavy_minus_sign: | The format to return the embeddings in. | | `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | - ### Response **[models.EmbeddingResponse](../../models/embeddingresponse.md)** + ### Errors | Error Object | Status Code | Content Type | diff --git a/docs/sdks/files/README.md b/docs/sdks/files/README.md index ec90fd37..c931f173 100644 --- a/docs/sdks/files/README.md +++ b/docs/sdks/files/README.md @@ -42,8 +42,6 @@ if res is not None: ``` - - ### Parameters | Parameter | Type | Required | Description | @@ -51,16 +49,17 @@ if res is not None: | `file` | [models.File](../../models/file.md) | :heavy_check_mark: | The File object (not file name) to be uploaded.
To upload a file and specify a custom file name you should format your request as such:
```bash
file=@path/to/your/file.jsonl;filename=custom_name.jsonl
```
Otherwise, you can just keep the original file name:
```bash
file=@path/to/your/file.jsonl
``` | | `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | - ### Response **[models.UploadFileOut](../../models/uploadfileout.md)** + ### Errors | Error Object | Status Code | Content Type | | --------------- | --------------- | --------------- | | models.SDKError | 4xx-5xx | */* | + ## list Returns a list of files that belong to the user's organization. @@ -84,24 +83,23 @@ if res is not None: ``` - - ### Parameters | Parameter | Type | Required | Description | | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | | `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | - ### Response **[models.ListFilesOut](../../models/listfilesout.md)** + ### Errors | Error Object | Status Code | Content Type | | --------------- | --------------- | --------------- | | models.SDKError | 4xx-5xx | */* | + ## retrieve Returns information about a specific file. @@ -125,8 +123,6 @@ if res is not None: ``` - - ### Parameters | Parameter | Type | Required | Description | @@ -134,16 +130,17 @@ if res is not None: | `file_id` | *str* | :heavy_check_mark: | N/A | | `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | - ### Response **[models.RetrieveFileOut](../../models/retrievefileout.md)** + ### Errors | Error Object | Status Code | Content Type | | --------------- | --------------- | --------------- | | models.SDKError | 4xx-5xx | */* | + ## delete Delete a file. @@ -167,8 +164,6 @@ if res is not None: ``` - - ### Parameters | Parameter | Type | Required | Description | @@ -176,10 +171,10 @@ if res is not None: | `file_id` | *str* | :heavy_check_mark: | N/A | | `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | - ### Response **[models.DeleteFileOut](../../models/deletefileout.md)** + ### Errors | Error Object | Status Code | Content Type | diff --git a/docs/sdks/fim/README.md b/docs/sdks/fim/README.md index ef8b1dc6..2f3d8fe6 100644 --- a/docs/sdks/fim/README.md +++ b/docs/sdks/fim/README.md @@ -33,8 +33,6 @@ if res is not None: ``` - - ### Parameters | Parameter | Type | Required | Description | Example | @@ -51,10 +49,10 @@ if res is not None: | `suffix` | *OptionalNullable[str]* | :heavy_minus_sign: | Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`. | return a+b | | `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | | - ### Response **[models.FIMCompletionResponse](../../models/fimcompletionresponse.md)** + ### Errors | Error Object | Status Code | Content Type | @@ -62,6 +60,7 @@ if res is not None: | models.HTTPValidationError | 422 | application/json | | models.SDKError | 4xx-5xx | */* | + ## stream Mistral AI provides the ability to stream responses back to a client in order to allow partial results for certain requests. Tokens will be sent as data-only server-sent events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. @@ -86,8 +85,6 @@ if res is not None: ``` - - ### Parameters | Parameter | Type | Required | Description | Example | @@ -104,10 +101,10 @@ if res is not None: | `suffix` | *OptionalNullable[str]* | :heavy_minus_sign: | Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`. | return a+b | | `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | | - ### Response **[Union[Generator[models.CompletionEvent, None, None], AsyncGenerator[models.CompletionEvent, None]]](../../models/.md)** + ### Errors | Error Object | Status Code | Content Type | diff --git a/docs/sdks/finetuning/README.md b/docs/sdks/finetuning/README.md index 2b357f2d..fdcbd62a 100644 --- a/docs/sdks/finetuning/README.md +++ b/docs/sdks/finetuning/README.md @@ -1,5 +1,2 @@ # FineTuning -(*fine_tuning*) - -### Available Operations - +(*fine_tuning*) \ No newline at end of file diff --git a/docs/sdks/jobs/README.md b/docs/sdks/jobs/README.md index 36d452e3..cecff0e7 100644 --- a/docs/sdks/jobs/README.md +++ b/docs/sdks/jobs/README.md @@ -1,6 +1,8 @@ # Jobs (*fine_tuning.jobs*) +## Overview + ### Available Operations * [list](#list) - Get Fine Tuning Jobs @@ -32,8 +34,6 @@ if res is not None: ``` - - ### Parameters | Parameter | Type | Required | Description | @@ -49,16 +49,17 @@ if res is not None: | `suffix` | *OptionalNullable[str]* | :heavy_minus_sign: | The model suffix to filter on. When set, the other results are not displayed. | | `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | - ### Response **[models.JobsOut](../../models/jobsout.md)** + ### Errors | Error Object | Status Code | Content Type | | --------------- | --------------- | --------------- | | models.SDKError | 4xx-5xx | */* | + ## create Create a new fine-tuning job, it will be queued for processing. @@ -82,8 +83,6 @@ if res is not None: ``` - - ### Parameters | Parameter | Type | Required | Description | @@ -98,16 +97,17 @@ if res is not None: | `auto_start` | *Optional[bool]* | :heavy_minus_sign: | This field will be required in a future release. | | `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | - ### Response **[models.JobsAPIRoutesFineTuningCreateFineTuningJobResponse](../../models/jobsapiroutesfinetuningcreatefinetuningjobresponse.md)** + ### Errors | Error Object | Status Code | Content Type | | --------------- | --------------- | --------------- | | models.SDKError | 4xx-5xx | */* | + ## get Get a fine-tuned job details by its UUID. @@ -131,8 +131,6 @@ if res is not None: ``` - - ### Parameters | Parameter | Type | Required | Description | @@ -140,16 +138,17 @@ if res is not None: | `job_id` | *str* | :heavy_check_mark: | The ID of the job to analyse. | | `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | - ### Response **[models.DetailedJobOut](../../models/detailedjobout.md)** + ### Errors | Error Object | Status Code | Content Type | | --------------- | --------------- | --------------- | | models.SDKError | 4xx-5xx | */* | + ## cancel Request the cancellation of a fine tuning job. @@ -173,8 +172,6 @@ if res is not None: ``` - - ### Parameters | Parameter | Type | Required | Description | @@ -182,16 +179,17 @@ if res is not None: | `job_id` | *str* | :heavy_check_mark: | The ID of the job to cancel. | | `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | - ### Response **[models.DetailedJobOut](../../models/detailedjobout.md)** + ### Errors | Error Object | Status Code | Content Type | | --------------- | --------------- | --------------- | | models.SDKError | 4xx-5xx | */* | + ## start Request the start of a validated fine tuning job. @@ -215,8 +213,6 @@ if res is not None: ``` - - ### Parameters | Parameter | Type | Required | Description | @@ -224,10 +220,10 @@ if res is not None: | `job_id` | *str* | :heavy_check_mark: | N/A | | `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | - ### Response **[models.DetailedJobOut](../../models/detailedjobout.md)** + ### Errors | Error Object | Status Code | Content Type | diff --git a/docs/sdks/mistral/README.md b/docs/sdks/mistral/README.md index d4e985eb..0189a6c4 100644 --- a/docs/sdks/mistral/README.md +++ b/docs/sdks/mistral/README.md @@ -1,9 +1,5 @@ # Mistral SDK - ## Overview -Mistral AI API: Our Chat Completion and Embeddings APIs specification. Create your account on [La Plateforme](https://console.mistral.ai) to get access and read the [docs](https://docs.mistral.ai) to learn how to use it. - -### Available Operations - +Mistral AI API: Our Chat Completion and Embeddings APIs specification. Create your account on [La Plateforme](https://console.mistral.ai) to get access and read the [docs](https://docs.mistral.ai) to learn how to use it. \ No newline at end of file diff --git a/docs/sdks/models/README.md b/docs/sdks/models/README.md index 00fca08b..a5d05eb1 100644 --- a/docs/sdks/models/README.md +++ b/docs/sdks/models/README.md @@ -37,18 +37,16 @@ if res is not None: ``` - - ### Parameters | Parameter | Type | Required | Description | | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | | `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | - ### Response **[models.ModelList](../../models/modellist.md)** + ### Errors | Error Object | Status Code | Content Type | @@ -56,6 +54,7 @@ if res is not None: | models.HTTPValidationError | 422 | application/json | | models.SDKError | 4xx-5xx | */* | + ## retrieve Retrieve a model information. @@ -79,8 +78,6 @@ if res is not None: ``` - - ### Parameters | Parameter | Type | Required | Description | Example | @@ -88,10 +85,10 @@ if res is not None: | `model_id` | *str* | :heavy_check_mark: | The ID of the model to retrieve. | ft:open-mistral-7b:587a6b29:20240514:7e773925 | | `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | | - ### Response **[models.ModelCard](../../models/modelcard.md)** + ### Errors | Error Object | Status Code | Content Type | @@ -99,6 +96,7 @@ if res is not None: | models.HTTPValidationError | 422 | application/json | | models.SDKError | 4xx-5xx | */* | + ## delete Delete a fine-tuned model. @@ -122,8 +120,6 @@ if res is not None: ``` - - ### Parameters | Parameter | Type | Required | Description | Example | @@ -131,10 +127,10 @@ if res is not None: | `model_id` | *str* | :heavy_check_mark: | The ID of the model to delete. | ft:open-mistral-7b:587a6b29:20240514:7e773925 | | `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | | - ### Response **[models.DeleteModelOut](../../models/deletemodelout.md)** + ### Errors | Error Object | Status Code | Content Type | @@ -142,6 +138,7 @@ if res is not None: | models.HTTPValidationError | 422 | application/json | | models.SDKError | 4xx-5xx | */* | + ## update Update a model name or description. @@ -165,8 +162,6 @@ if res is not None: ``` - - ### Parameters | Parameter | Type | Required | Description | Example | @@ -176,16 +171,17 @@ if res is not None: | `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | | `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | | - ### Response **[models.FTModelOut](../../models/ftmodelout.md)** + ### Errors | Error Object | Status Code | Content Type | | --------------- | --------------- | --------------- | | models.SDKError | 4xx-5xx | */* | + ## archive Archive a fine-tuned model. @@ -209,8 +205,6 @@ if res is not None: ``` - - ### Parameters | Parameter | Type | Required | Description | Example | @@ -218,16 +212,17 @@ if res is not None: | `model_id` | *str* | :heavy_check_mark: | The ID of the model to archive. | ft:open-mistral-7b:587a6b29:20240514:7e773925 | | `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | | - ### Response **[models.ArchiveFTModelOut](../../models/archiveftmodelout.md)** + ### Errors | Error Object | Status Code | Content Type | | --------------- | --------------- | --------------- | | models.SDKError | 4xx-5xx | */* | + ## unarchive Un-archive a fine-tuned model. @@ -251,8 +246,6 @@ if res is not None: ``` - - ### Parameters | Parameter | Type | Required | Description | Example | @@ -260,10 +253,10 @@ if res is not None: | `model_id` | *str* | :heavy_check_mark: | The ID of the model to unarchive. | ft:open-mistral-7b:587a6b29:20240514:7e773925 | | `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | | - ### Response **[models.UnarchiveFTModelOut](../../models/unarchiveftmodelout.md)** + ### Errors | Error Object | Status Code | Content Type | diff --git a/pyproject.toml b/pyproject.toml index 02570db3..35bcf5e1 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "mistralai" -version = "1.0.2" +version = "1.0.3" description = "Python Client SDK for the Mistral AI API." authors = ["Mistral"] readme = "README.md" diff --git a/src/mistralai/sdkconfiguration.py b/src/mistralai/sdkconfiguration.py index e0821a7e..a8699133 100644 --- a/src/mistralai/sdkconfiguration.py +++ b/src/mistralai/sdkconfiguration.py @@ -29,9 +29,9 @@ class SDKConfiguration: server: Optional[str] = "" language: str = "python" openapi_doc_version: str = "0.0.2" - sdk_version: str = "1.0.2" - gen_version: str = "2.402.5" - user_agent: str = "speakeasy-sdk/python 1.0.2 2.402.5 0.0.2 mistralai" + sdk_version: str = "1.0.3" + gen_version: str = "2.404.11" + user_agent: str = "speakeasy-sdk/python 1.0.3 2.404.11 0.0.2 mistralai" retry_config: OptionalNullable[RetryConfig] = Field(default_factory=lambda: UNSET) timeout_ms: Optional[int] = None From 290e1bf27c1eca95bf8946a498eb70f8b9a9063a Mon Sep 17 00:00:00 2001 From: Gaspard Blanchet <26529448+GaspardBT@users.noreply.github.com> Date: Fri, 13 Sep 2024 18:20:23 +0200 Subject: [PATCH 076/223] [API-47] Images support and other updates (#144) * fix gcp * spec update * generation with new version * add image example * fix typo * use correct method * fix typo * fix message role --- .speakeasy/gen.lock | 654 +++++++++++------ .speakeasy/gen.yaml | 2 +- .speakeasy/workflow.lock | 30 +- MIGRATION.md | 2 +- README.md | 125 ++-- USAGE.md | 27 +- .../agentscompletionrequesttoolchoice.md | 19 +- ...agentscompletionstreamrequesttoolchoice.md | 19 +- .../models/{modelcard.md => basemodelcard.md} | 7 +- docs/models/chatcompletionrequest.md | 2 +- .../models/chatcompletionrequesttoolchoice.md | 17 + .../chatcompletionstreamrequesttoolchoice.md | 19 +- docs/models/content.md | 4 +- docs/models/contentchunk.md | 18 +- docs/models/data.md | 17 + docs/models/ftmodelcard.md | 23 + docs/models/functionname.md | 10 + docs/models/imageurl.md | 9 + docs/models/imageurlchunk.md | 11 + docs/models/imageurlchunkimageurl.md | 17 + docs/models/imageurlchunktype.md | 8 + docs/models/modelcapabilities.md | 3 +- docs/models/modellist.md | 8 +- docs/models/responseformat.md | 6 +- ...responseretrievemodelv1modelsmodelidget.md | 19 + docs/models/textchunk.md | 8 +- docs/models/textchunktype.md | 8 + docs/models/tool.md | 8 +- docs/models/toolchoice.md | 13 +- docs/models/toolchoiceenum.md | 11 + docs/models/tooltooltypes.md | 8 - docs/models/usermessagecontent.md | 4 +- docs/sdks/agents/README.md | 14 +- docs/sdks/chat/README.md | 14 +- docs/sdks/embeddings/README.md | 3 +- docs/sdks/files/README.md | 8 +- docs/sdks/fim/README.md | 2 - docs/sdks/jobs/README.md | 5 - docs/sdks/models/README.md | 8 +- .../async_chat_with_image_no_streaming.py | 36 + packages/mistralai_azure/.speakeasy/gen.lock | 184 ++--- packages/mistralai_azure/.speakeasy/gen.yaml | 2 +- .../models/chatcompletionrequesttoolchoice.md | 19 +- .../models/chatcompletionstreamrequest.md | 2 +- .../chatcompletionstreamrequesttoolchoice.md | 17 + .../mistralai_azure/docs/models/content.md | 4 +- .../docs/models/contentchunk.md | 12 +- .../docs/models/functionname.md | 10 + .../docs/models/responseformat.md | 6 +- .../mistralai_azure/docs/models/textchunk.md | 8 +- packages/mistralai_azure/docs/models/tool.md | 8 +- .../mistralai_azure/docs/models/toolchoice.md | 13 +- .../docs/models/toolchoiceenum.md | 11 + .../docs/models/tooltooltypes.md | 8 - packages/mistralai_azure/docs/models/type.md | 8 + .../docs/models/usermessagecontent.md | 4 +- packages/mistralai_azure/poetry.lock | 222 +++--- packages/mistralai_azure/pyproject.toml | 5 +- .../mistralai_azure/scripts/prepare-readme.py | 2 +- .../src/mistralai_azure/_hooks/sdkhooks.py | 27 +- .../src/mistralai_azure/_hooks/types.py | 36 +- .../src/mistralai_azure/basesdk.py | 97 ++- .../src/mistralai_azure/chat.py | 396 ++++++---- .../src/mistralai_azure/models/__init__.py | 172 ++++- .../models/assistantmessage.py | 23 +- .../models/chatcompletionchoice.py | 10 +- .../models/chatcompletionrequest.py | 77 +- .../models/chatcompletionresponse.py | 8 +- .../models/chatcompletionstreamrequest.py | 79 +- .../mistralai_azure/models/completionchunk.py | 13 +- .../mistralai_azure/models/completionevent.py | 3 +- .../models/completionresponsestreamchoice.py | 14 +- .../mistralai_azure/models/contentchunk.py | 15 +- .../mistralai_azure/models/deltamessage.py | 21 +- .../src/mistralai_azure/models/function.py | 5 +- .../mistralai_azure/models/functioncall.py | 4 +- .../mistralai_azure/models/functionname.py | 17 + .../models/httpvalidationerror.py | 4 +- .../mistralai_azure/models/responseformat.py | 13 +- .../mistralai_azure/models/responseformats.py | 8 + .../src/mistralai_azure/models/security.py | 15 +- .../mistralai_azure/models/systemmessage.py | 11 +- .../src/mistralai_azure/models/textchunk.py | 13 +- .../src/mistralai_azure/models/tool.py | 17 +- .../src/mistralai_azure/models/toolcall.py | 16 +- .../src/mistralai_azure/models/toolchoice.py | 29 + .../mistralai_azure/models/toolchoiceenum.py | 7 + .../src/mistralai_azure/models/toolmessage.py | 23 +- .../src/mistralai_azure/models/tooltypes.py | 8 + .../src/mistralai_azure/models/usageinfo.py | 5 +- .../src/mistralai_azure/models/usermessage.py | 11 +- .../mistralai_azure/models/validationerror.py | 5 +- .../src/mistralai_azure/sdkconfiguration.py | 14 +- .../src/mistralai_azure/utils/__init__.py | 11 +- .../src/mistralai_azure/utils/forms.py | 19 +- .../src/mistralai_azure/utils/headers.py | 16 +- .../src/mistralai_azure/utils/logger.py | 6 + .../src/mistralai_azure/utils/queryparams.py | 30 +- .../src/mistralai_azure/utils/retries.py | 3 +- .../src/mistralai_azure/utils/security.py | 18 +- .../src/mistralai_azure/utils/serializers.py | 25 +- .../src/mistralai_azure/utils/url.py | 21 +- .../src/mistralai_azure/utils/values.py | 6 + packages/mistralai_gcp/.speakeasy/gen.lock | 214 +++--- packages/mistralai_gcp/.speakeasy/gen.yaml | 2 +- .../models/chatcompletionrequesttoolchoice.md | 19 +- .../models/chatcompletionstreamrequest.md | 2 +- .../chatcompletionstreamrequesttoolchoice.md | 17 + packages/mistralai_gcp/docs/models/content.md | 4 +- .../mistralai_gcp/docs/models/contentchunk.md | 12 +- .../mistralai_gcp/docs/models/functionname.md | 10 + .../docs/models/responseformat.md | 6 +- .../mistralai_gcp/docs/models/textchunk.md | 8 +- packages/mistralai_gcp/docs/models/tool.md | 8 +- .../mistralai_gcp/docs/models/toolchoice.md | 13 +- .../docs/models/toolchoiceenum.md | 11 + .../docs/models/tooltooltypes.md | 8 - packages/mistralai_gcp/docs/models/type.md | 8 + .../docs/models/usermessagecontent.md | 4 +- packages/mistralai_gcp/poetry.lock | 224 +++--- packages/mistralai_gcp/pyproject.toml | 7 +- .../mistralai_gcp/scripts/prepare-readme.py | 2 +- .../src/mistralai_gcp/_hooks/sdkhooks.py | 27 +- .../src/mistralai_gcp/_hooks/types.py | 36 +- .../src/mistralai_gcp/basesdk.py | 97 ++- .../mistralai_gcp/src/mistralai_gcp/chat.py | 396 ++++++---- .../mistralai_gcp/src/mistralai_gcp/fim.py | 276 ++++--- .../src/mistralai_gcp/models/__init__.py | 196 ++++- .../mistralai_gcp/models/assistantmessage.py | 23 +- .../models/chatcompletionchoice.py | 10 +- .../models/chatcompletionrequest.py | 74 +- .../models/chatcompletionresponse.py | 8 +- .../models/chatcompletionstreamrequest.py | 76 +- .../mistralai_gcp/models/completionchunk.py | 13 +- .../mistralai_gcp/models/completionevent.py | 3 +- .../models/completionresponsestreamchoice.py | 14 +- .../src/mistralai_gcp/models/contentchunk.py | 15 +- .../src/mistralai_gcp/models/deltamessage.py | 21 +- .../models/fimcompletionrequest.py | 39 +- .../models/fimcompletionresponse.py | 8 +- .../models/fimcompletionstreamrequest.py | 39 +- .../src/mistralai_gcp/models/function.py | 5 +- .../src/mistralai_gcp/models/functioncall.py | 4 +- .../src/mistralai_gcp/models/functionname.py | 17 + .../models/httpvalidationerror.py | 4 +- .../mistralai_gcp/models/responseformat.py | 13 +- .../mistralai_gcp/models/responseformats.py | 8 + .../src/mistralai_gcp/models/security.py | 15 +- .../src/mistralai_gcp/models/systemmessage.py | 11 +- .../src/mistralai_gcp/models/textchunk.py | 13 +- .../src/mistralai_gcp/models/tool.py | 17 +- .../src/mistralai_gcp/models/toolcall.py | 16 +- .../src/mistralai_gcp/models/toolchoice.py | 29 + .../mistralai_gcp/models/toolchoiceenum.py | 7 + .../src/mistralai_gcp/models/toolmessage.py | 23 +- .../src/mistralai_gcp/models/tooltypes.py | 8 + .../src/mistralai_gcp/models/usageinfo.py | 5 +- .../src/mistralai_gcp/models/usermessage.py | 11 +- .../mistralai_gcp/models/validationerror.py | 5 +- .../mistralai_gcp/src/mistralai_gcp/sdk.py | 13 +- .../src/mistralai_gcp/sdkconfiguration.py | 14 +- .../src/mistralai_gcp/utils/__init__.py | 11 +- .../src/mistralai_gcp/utils/forms.py | 19 +- .../src/mistralai_gcp/utils/headers.py | 16 +- .../src/mistralai_gcp/utils/logger.py | 6 + .../src/mistralai_gcp/utils/queryparams.py | 30 +- .../src/mistralai_gcp/utils/retries.py | 3 +- .../src/mistralai_gcp/utils/security.py | 18 +- .../src/mistralai_gcp/utils/serializers.py | 25 +- .../src/mistralai_gcp/utils/url.py | 21 +- .../src/mistralai_gcp/utils/values.py | 6 + pyproject.toml | 2 +- src/mistralai/_hooks/sdkhooks.py | 27 +- src/mistralai/_hooks/types.py | 36 +- src/mistralai/agents.py | 436 +++++++---- src/mistralai/basesdk.py | 95 ++- src/mistralai/chat.py | 404 +++++++---- src/mistralai/embeddings.py | 126 ++-- src/mistralai/files.py | 444 +++++++----- src/mistralai/fim.py | 284 +++++--- src/mistralai/fine_tuning.py | 5 +- src/mistralai/jobs.py | 635 +++++++++------- src/mistralai/models/__init__.py | 545 ++++++++++++-- .../models/agentscompletionrequest.py | 58 +- .../models/agentscompletionstreamrequest.py | 60 +- src/mistralai/models/archiveftmodelout.py | 8 +- src/mistralai/models/assistantmessage.py | 15 +- .../models/{modelcard.py => basemodelcard.py} | 51 +- src/mistralai/models/chatcompletionchoice.py | 6 +- src/mistralai/models/chatcompletionrequest.py | 68 +- .../models/chatcompletionresponse.py | 8 +- .../models/chatcompletionstreamrequest.py | 70 +- src/mistralai/models/checkpointout.py | 5 +- src/mistralai/models/completionchunk.py | 13 +- src/mistralai/models/completionevent.py | 3 +- .../models/completionresponsestreamchoice.py | 18 +- src/mistralai/models/contentchunk.py | 23 +- ...elete_model_v1_models_model_id_deleteop.py | 7 +- src/mistralai/models/deletefileout.py | 5 +- src/mistralai/models/deletemodelout.py | 5 +- src/mistralai/models/deltamessage.py | 13 +- src/mistralai/models/detailedjobout.py | 66 +- src/mistralai/models/embeddingrequest.py | 13 +- src/mistralai/models/embeddingresponse.py | 7 +- src/mistralai/models/embeddingresponsedata.py | 5 +- src/mistralai/models/eventout.py | 13 +- .../models/files_api_routes_delete_fileop.py | 7 +- .../files_api_routes_retrieve_fileop.py | 7 +- .../models/files_api_routes_upload_fileop.py | 35 +- src/mistralai/models/fileschema.py | 31 +- src/mistralai/models/fimcompletionrequest.py | 31 +- src/mistralai/models/fimcompletionresponse.py | 8 +- .../models/fimcompletionstreamrequest.py | 31 +- src/mistralai/models/finetuneablemodel.py | 8 +- .../models/ftmodelcapabilitiesout.py | 6 +- src/mistralai/models/ftmodelcard.py | 103 +++ src/mistralai/models/ftmodelout.py | 38 +- src/mistralai/models/function.py | 5 +- src/mistralai/models/functioncall.py | 4 +- src/mistralai/models/functionname.py | 17 + src/mistralai/models/githubrepositoryin.py | 19 +- src/mistralai/models/githubrepositoryout.py | 19 +- src/mistralai/models/httpvalidationerror.py | 4 +- src/mistralai/models/imageurl.py | 48 ++ src/mistralai/models/imageurlchunk.py | 32 + src/mistralai/models/jobin.py | 27 +- src/mistralai/models/jobmetadataout.py | 37 +- src/mistralai/models/jobout.py | 62 +- ..._fine_tuning_archive_fine_tuned_modelop.py | 7 +- ...es_fine_tuning_cancel_fine_tuning_jobop.py | 7 +- ...es_fine_tuning_create_fine_tuning_jobop.py | 5 +- ...outes_fine_tuning_get_fine_tuning_jobop.py | 7 +- ...utes_fine_tuning_get_fine_tuning_jobsop.py | 99 ++- ...tes_fine_tuning_start_fine_tuning_jobop.py | 7 +- ...ine_tuning_unarchive_fine_tuned_modelop.py | 7 +- ...s_fine_tuning_update_fine_tuned_modelop.py | 13 +- src/mistralai/models/jobsout.py | 8 +- src/mistralai/models/legacyjobmetadataout.py | 51 +- src/mistralai/models/listfilesout.py | 4 +- src/mistralai/models/metricout.py | 17 +- src/mistralai/models/modelcapabilities.py | 9 +- src/mistralai/models/modellist.py | 28 +- src/mistralai/models/responseformat.py | 13 +- src/mistralai/models/responseformats.py | 8 + ...retrieve_model_v1_models_model_id_getop.py | 29 +- src/mistralai/models/retrievefileout.py | 31 +- src/mistralai/models/security.py | 15 +- src/mistralai/models/systemmessage.py | 11 +- src/mistralai/models/textchunk.py | 13 +- src/mistralai/models/tool.py | 17 +- src/mistralai/models/toolcall.py | 16 +- src/mistralai/models/toolchoice.py | 29 + src/mistralai/models/toolchoiceenum.py | 7 + src/mistralai/models/toolmessage.py | 15 +- src/mistralai/models/tooltypes.py | 8 + src/mistralai/models/trainingfile.py | 4 +- src/mistralai/models/trainingparameters.py | 33 +- src/mistralai/models/trainingparametersin.py | 37 +- src/mistralai/models/unarchiveftmodelout.py | 8 +- src/mistralai/models/updateftmodelin.py | 12 +- src/mistralai/models/uploadfileout.py | 31 +- src/mistralai/models/usageinfo.py | 5 +- src/mistralai/models/usermessage.py | 11 +- src/mistralai/models/validationerror.py | 5 +- src/mistralai/models/wandbintegration.py | 18 +- src/mistralai/models/wandbintegrationout.py | 17 +- src/mistralai/models_.py | 686 ++++++++++-------- src/mistralai/sdk.py | 43 +- src/mistralai/sdkconfiguration.py | 14 +- src/mistralai/utils/__init__.py | 7 +- src/mistralai/utils/logger.py | 5 +- src/mistralai/utils/retries.py | 3 +- src/mistralai/utils/security.py | 19 +- 273 files changed, 7638 insertions(+), 3444 deletions(-) rename docs/models/{modelcard.md => basemodelcard.md} (89%) create mode 100644 docs/models/chatcompletionrequesttoolchoice.md create mode 100644 docs/models/data.md create mode 100644 docs/models/ftmodelcard.md create mode 100644 docs/models/functionname.md create mode 100644 docs/models/imageurl.md create mode 100644 docs/models/imageurlchunk.md create mode 100644 docs/models/imageurlchunkimageurl.md create mode 100644 docs/models/imageurlchunktype.md create mode 100644 docs/models/retrievemodelv1modelsmodelidgetresponseretrievemodelv1modelsmodelidget.md create mode 100644 docs/models/textchunktype.md create mode 100644 docs/models/toolchoiceenum.md delete mode 100644 docs/models/tooltooltypes.md create mode 100755 examples/async_chat_with_image_no_streaming.py create mode 100644 packages/mistralai_azure/docs/models/chatcompletionstreamrequesttoolchoice.md create mode 100644 packages/mistralai_azure/docs/models/functionname.md create mode 100644 packages/mistralai_azure/docs/models/toolchoiceenum.md delete mode 100644 packages/mistralai_azure/docs/models/tooltooltypes.md create mode 100644 packages/mistralai_azure/docs/models/type.md create mode 100644 packages/mistralai_azure/src/mistralai_azure/models/functionname.py create mode 100644 packages/mistralai_azure/src/mistralai_azure/models/responseformats.py create mode 100644 packages/mistralai_azure/src/mistralai_azure/models/toolchoice.py create mode 100644 packages/mistralai_azure/src/mistralai_azure/models/toolchoiceenum.py create mode 100644 packages/mistralai_azure/src/mistralai_azure/models/tooltypes.py create mode 100644 packages/mistralai_gcp/docs/models/chatcompletionstreamrequesttoolchoice.md create mode 100644 packages/mistralai_gcp/docs/models/functionname.md create mode 100644 packages/mistralai_gcp/docs/models/toolchoiceenum.md delete mode 100644 packages/mistralai_gcp/docs/models/tooltooltypes.md create mode 100644 packages/mistralai_gcp/docs/models/type.md create mode 100644 packages/mistralai_gcp/src/mistralai_gcp/models/functionname.py create mode 100644 packages/mistralai_gcp/src/mistralai_gcp/models/responseformats.py create mode 100644 packages/mistralai_gcp/src/mistralai_gcp/models/toolchoice.py create mode 100644 packages/mistralai_gcp/src/mistralai_gcp/models/toolchoiceenum.py create mode 100644 packages/mistralai_gcp/src/mistralai_gcp/models/tooltypes.py rename src/mistralai/models/{modelcard.py => basemodelcard.py} (66%) create mode 100644 src/mistralai/models/ftmodelcard.py create mode 100644 src/mistralai/models/functionname.py create mode 100644 src/mistralai/models/imageurl.py create mode 100644 src/mistralai/models/imageurlchunk.py create mode 100644 src/mistralai/models/responseformats.py create mode 100644 src/mistralai/models/toolchoice.py create mode 100644 src/mistralai/models/toolchoiceenum.py create mode 100644 src/mistralai/models/tooltypes.py diff --git a/.speakeasy/gen.lock b/.speakeasy/gen.lock index 6b527685..d333d1a2 100644 --- a/.speakeasy/gen.lock +++ b/.speakeasy/gen.lock @@ -1,12 +1,12 @@ lockVersion: 2.0.0 id: 2d045ec7-2ebb-4f4d-ad25-40953b132161 management: - docChecksum: ad1a7d6946828a089ca3831e257d307d + docChecksum: e75ca54601920b2770d9a559b299d272 docVersion: 0.0.2 - speakeasyVersion: 1.382.0 - generationVersion: 2.404.11 - releaseVersion: 1.0.3 - configChecksum: 818970b881ec69b05f6660ca354f26f5 + speakeasyVersion: 1.396.7 + generationVersion: 2.415.6 + releaseVersion: 1.1.1 + configChecksum: 450e609764e2b24aa8ece63616b81348 repoURL: https://github.com/mistralai/client-python.git installationURL: https://github.com/mistralai/client-python.git published: true @@ -14,8 +14,9 @@ features: python: additionalDependencies: 1.0.0 constsAndDefaults: 1.0.2 - core: 5.4.1 + core: 5.5.3 defaultEnabledRetries: 0.2.0 + enumUnions: 0.1.0 envVarSecurityUsage: 0.3.1 examples: 3.0.0 flatRequests: 1.0.1 @@ -29,7 +30,7 @@ features: nullables: 1.0.0 openEnums: 1.0.0 responseFormat: 1.0.0 - retries: 3.0.0 + retries: 3.0.2 sdkHooks: 1.0.0 serverEvents: 1.0.2 serverEventsSentinels: 0.1.0 @@ -37,17 +38,159 @@ features: unions: 3.0.2 uploadStreams: 1.0.0 generatedFiles: - - src/mistralai/sdkconfiguration.py - - src/mistralai/models_.py - - src/mistralai/files.py - - src/mistralai/jobs.py - - src/mistralai/fine_tuning.py - - src/mistralai/chat.py - - src/mistralai/fim.py - - src/mistralai/agents.py - - src/mistralai/embeddings.py - - src/mistralai/sdk.py + - .gitattributes - .vscode/settings.json + - USAGE.md + - docs/models/agentscompletionrequest.md + - docs/models/agentscompletionrequestmessages.md + - docs/models/agentscompletionrequeststop.md + - docs/models/agentscompletionrequesttoolchoice.md + - docs/models/agentscompletionstreamrequest.md + - docs/models/agentscompletionstreamrequestmessages.md + - docs/models/agentscompletionstreamrequeststop.md + - docs/models/agentscompletionstreamrequesttoolchoice.md + - docs/models/archiveftmodelout.md + - docs/models/archiveftmodeloutobject.md + - docs/models/arguments.md + - docs/models/assistantmessage.md + - docs/models/assistantmessagerole.md + - docs/models/basemodelcard.md + - docs/models/chatcompletionchoice.md + - docs/models/chatcompletionrequest.md + - docs/models/chatcompletionrequesttoolchoice.md + - docs/models/chatcompletionresponse.md + - docs/models/chatcompletionstreamrequest.md + - docs/models/chatcompletionstreamrequestmessages.md + - docs/models/chatcompletionstreamrequeststop.md + - docs/models/chatcompletionstreamrequesttoolchoice.md + - docs/models/checkpointout.md + - docs/models/completionchunk.md + - docs/models/completionevent.md + - docs/models/completionresponsestreamchoice.md + - docs/models/completionresponsestreamchoicefinishreason.md + - docs/models/content.md + - docs/models/contentchunk.md + - docs/models/data.md + - docs/models/deletefileout.md + - docs/models/deletemodelout.md + - docs/models/deletemodelv1modelsmodeliddeleterequest.md + - docs/models/deltamessage.md + - docs/models/detailedjobout.md + - docs/models/detailedjoboutintegrations.md + - docs/models/detailedjoboutobject.md + - docs/models/detailedjoboutrepositories.md + - docs/models/detailedjoboutstatus.md + - docs/models/embeddingrequest.md + - docs/models/embeddingresponse.md + - docs/models/embeddingresponsedata.md + - docs/models/eventout.md + - docs/models/file.md + - docs/models/filesapiroutesdeletefilerequest.md + - docs/models/filesapiroutesretrievefilerequest.md + - docs/models/filesapiroutesuploadfilemultipartbodyparams.md + - docs/models/filesapiroutesuploadfilepurpose.md + - docs/models/fileschema.md + - docs/models/fileschemapurpose.md + - docs/models/fimcompletionrequest.md + - docs/models/fimcompletionrequeststop.md + - docs/models/fimcompletionresponse.md + - docs/models/fimcompletionstreamrequest.md + - docs/models/fimcompletionstreamrequeststop.md + - docs/models/finetuneablemodel.md + - docs/models/finishreason.md + - docs/models/ftmodelcapabilitiesout.md + - docs/models/ftmodelcard.md + - docs/models/ftmodelout.md + - docs/models/ftmodeloutobject.md + - docs/models/function.md + - docs/models/functioncall.md + - docs/models/functionname.md + - docs/models/githubrepositoryin.md + - docs/models/githubrepositoryintype.md + - docs/models/githubrepositoryout.md + - docs/models/githubrepositoryouttype.md + - docs/models/httpvalidationerror.md + - docs/models/imageurl.md + - docs/models/imageurlchunk.md + - docs/models/imageurlchunkimageurl.md + - docs/models/imageurlchunktype.md + - docs/models/inputs.md + - docs/models/integrations.md + - docs/models/jobin.md + - docs/models/jobinintegrations.md + - docs/models/jobinrepositories.md + - docs/models/jobmetadataout.md + - docs/models/jobout.md + - docs/models/jobsapiroutesfinetuningarchivefinetunedmodelrequest.md + - docs/models/jobsapiroutesfinetuningcancelfinetuningjobrequest.md + - docs/models/jobsapiroutesfinetuningcreatefinetuningjobresponse.md + - docs/models/jobsapiroutesfinetuninggetfinetuningjobrequest.md + - docs/models/jobsapiroutesfinetuninggetfinetuningjobsrequest.md + - docs/models/jobsapiroutesfinetuningstartfinetuningjobrequest.md + - docs/models/jobsapiroutesfinetuningunarchivefinetunedmodelrequest.md + - docs/models/jobsapiroutesfinetuningupdatefinetunedmodelrequest.md + - docs/models/jobsout.md + - docs/models/jobsoutobject.md + - docs/models/legacyjobmetadataout.md + - docs/models/legacyjobmetadataoutobject.md + - docs/models/listfilesout.md + - docs/models/loc.md + - docs/models/messages.md + - docs/models/metricout.md + - docs/models/modelcapabilities.md + - docs/models/modellist.md + - docs/models/object.md + - docs/models/purpose.md + - docs/models/queryparamstatus.md + - docs/models/repositories.md + - docs/models/responseformat.md + - docs/models/responseformats.md + - docs/models/retrievefileout.md + - docs/models/retrievefileoutpurpose.md + - docs/models/retrievemodelv1modelsmodelidgetrequest.md + - docs/models/retrievemodelv1modelsmodelidgetresponseretrievemodelv1modelsmodelidget.md + - docs/models/role.md + - docs/models/sampletype.md + - docs/models/security.md + - docs/models/source.md + - docs/models/status.md + - docs/models/stop.md + - docs/models/systemmessage.md + - docs/models/textchunk.md + - docs/models/textchunktype.md + - docs/models/tool.md + - docs/models/toolcall.md + - docs/models/toolchoice.md + - docs/models/toolchoiceenum.md + - docs/models/toolmessage.md + - docs/models/toolmessagerole.md + - docs/models/tooltypes.md + - docs/models/trainingfile.md + - docs/models/trainingparameters.md + - docs/models/trainingparametersin.md + - docs/models/type.md + - docs/models/unarchiveftmodelout.md + - docs/models/unarchiveftmodeloutobject.md + - docs/models/updateftmodelin.md + - docs/models/uploadfileout.md + - docs/models/usageinfo.md + - docs/models/usermessage.md + - docs/models/usermessagecontent.md + - docs/models/usermessagerole.md + - docs/models/utils/retryconfig.md + - docs/models/validationerror.md + - docs/models/wandbintegration.md + - docs/models/wandbintegrationout.md + - docs/models/wandbintegrationtype.md + - docs/sdks/agents/README.md + - docs/sdks/chat/README.md + - docs/sdks/embeddings/README.md + - docs/sdks/files/README.md + - docs/sdks/fim/README.md + - docs/sdks/finetuning/README.md + - docs/sdks/jobs/README.md + - docs/sdks/mistral/README.md + - docs/sdks/models/README.md - poetry.toml - py.typed - pylintrc @@ -55,9 +198,109 @@ generatedFiles: - scripts/prepare-readme.py - scripts/publish.sh - src/mistralai/__init__.py + - src/mistralai/_hooks/__init__.py + - src/mistralai/_hooks/sdkhooks.py + - src/mistralai/_hooks/types.py + - src/mistralai/agents.py - src/mistralai/basesdk.py + - src/mistralai/chat.py + - src/mistralai/embeddings.py + - src/mistralai/files.py + - src/mistralai/fim.py + - src/mistralai/fine_tuning.py - src/mistralai/httpclient.py + - src/mistralai/jobs.py + - src/mistralai/models/__init__.py + - src/mistralai/models/agentscompletionrequest.py + - src/mistralai/models/agentscompletionstreamrequest.py + - src/mistralai/models/archiveftmodelout.py + - src/mistralai/models/assistantmessage.py + - src/mistralai/models/basemodelcard.py + - src/mistralai/models/chatcompletionchoice.py + - src/mistralai/models/chatcompletionrequest.py + - src/mistralai/models/chatcompletionresponse.py + - src/mistralai/models/chatcompletionstreamrequest.py + - src/mistralai/models/checkpointout.py + - src/mistralai/models/completionchunk.py + - src/mistralai/models/completionevent.py + - src/mistralai/models/completionresponsestreamchoice.py + - src/mistralai/models/contentchunk.py + - src/mistralai/models/delete_model_v1_models_model_id_deleteop.py + - src/mistralai/models/deletefileout.py + - src/mistralai/models/deletemodelout.py + - src/mistralai/models/deltamessage.py + - src/mistralai/models/detailedjobout.py + - src/mistralai/models/embeddingrequest.py + - src/mistralai/models/embeddingresponse.py + - src/mistralai/models/embeddingresponsedata.py + - src/mistralai/models/eventout.py + - src/mistralai/models/files_api_routes_delete_fileop.py + - src/mistralai/models/files_api_routes_retrieve_fileop.py + - src/mistralai/models/files_api_routes_upload_fileop.py + - src/mistralai/models/fileschema.py + - src/mistralai/models/fimcompletionrequest.py + - src/mistralai/models/fimcompletionresponse.py + - src/mistralai/models/fimcompletionstreamrequest.py + - src/mistralai/models/finetuneablemodel.py + - src/mistralai/models/ftmodelcapabilitiesout.py + - src/mistralai/models/ftmodelcard.py + - src/mistralai/models/ftmodelout.py + - src/mistralai/models/function.py + - src/mistralai/models/functioncall.py + - src/mistralai/models/functionname.py + - src/mistralai/models/githubrepositoryin.py + - src/mistralai/models/githubrepositoryout.py + - src/mistralai/models/httpvalidationerror.py + - src/mistralai/models/imageurl.py + - src/mistralai/models/imageurlchunk.py + - src/mistralai/models/jobin.py + - src/mistralai/models/jobmetadataout.py + - src/mistralai/models/jobout.py + - src/mistralai/models/jobs_api_routes_fine_tuning_archive_fine_tuned_modelop.py + - src/mistralai/models/jobs_api_routes_fine_tuning_cancel_fine_tuning_jobop.py + - src/mistralai/models/jobs_api_routes_fine_tuning_create_fine_tuning_jobop.py + - src/mistralai/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobop.py + - src/mistralai/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobsop.py + - src/mistralai/models/jobs_api_routes_fine_tuning_start_fine_tuning_jobop.py + - src/mistralai/models/jobs_api_routes_fine_tuning_unarchive_fine_tuned_modelop.py + - src/mistralai/models/jobs_api_routes_fine_tuning_update_fine_tuned_modelop.py + - src/mistralai/models/jobsout.py + - src/mistralai/models/legacyjobmetadataout.py + - src/mistralai/models/listfilesout.py + - src/mistralai/models/metricout.py + - src/mistralai/models/modelcapabilities.py + - src/mistralai/models/modellist.py + - src/mistralai/models/responseformat.py + - src/mistralai/models/responseformats.py + - src/mistralai/models/retrieve_model_v1_models_model_id_getop.py + - src/mistralai/models/retrievefileout.py + - src/mistralai/models/sampletype.py + - src/mistralai/models/sdkerror.py + - src/mistralai/models/security.py + - src/mistralai/models/source.py + - src/mistralai/models/systemmessage.py + - src/mistralai/models/textchunk.py + - src/mistralai/models/tool.py + - src/mistralai/models/toolcall.py + - src/mistralai/models/toolchoice.py + - src/mistralai/models/toolchoiceenum.py + - src/mistralai/models/toolmessage.py + - src/mistralai/models/tooltypes.py + - src/mistralai/models/trainingfile.py + - src/mistralai/models/trainingparameters.py + - src/mistralai/models/trainingparametersin.py + - src/mistralai/models/unarchiveftmodelout.py + - src/mistralai/models/updateftmodelin.py + - src/mistralai/models/uploadfileout.py + - src/mistralai/models/usageinfo.py + - src/mistralai/models/usermessage.py + - src/mistralai/models/validationerror.py + - src/mistralai/models/wandbintegration.py + - src/mistralai/models/wandbintegrationout.py + - src/mistralai/models_.py - src/mistralai/py.typed + - src/mistralai/sdk.py + - src/mistralai/sdkconfiguration.py - src/mistralai/types/__init__.py - src/mistralai/types/basemodel.py - src/mistralai/utils/__init__.py @@ -75,227 +318,156 @@ generatedFiles: - src/mistralai/utils/serializers.py - src/mistralai/utils/url.py - src/mistralai/utils/values.py - - src/mistralai/models/sdkerror.py - - src/mistralai/models/modellist.py - - src/mistralai/models/modelcard.py - - src/mistralai/models/modelcapabilities.py - - src/mistralai/models/httpvalidationerror.py - - src/mistralai/models/validationerror.py - - src/mistralai/models/retrieve_model_v1_models_model_id_getop.py - - src/mistralai/models/deletemodelout.py - - src/mistralai/models/delete_model_v1_models_model_id_deleteop.py - - src/mistralai/models/ftmodelout.py - - src/mistralai/models/ftmodelcapabilitiesout.py - - src/mistralai/models/jobs_api_routes_fine_tuning_update_fine_tuned_modelop.py - - src/mistralai/models/updateftmodelin.py - - src/mistralai/models/archiveftmodelout.py - - src/mistralai/models/jobs_api_routes_fine_tuning_archive_fine_tuned_modelop.py - - src/mistralai/models/unarchiveftmodelout.py - - src/mistralai/models/jobs_api_routes_fine_tuning_unarchive_fine_tuned_modelop.py - - src/mistralai/models/uploadfileout.py - - src/mistralai/models/source.py - - src/mistralai/models/sampletype.py - - src/mistralai/models/files_api_routes_upload_fileop.py - - src/mistralai/models/listfilesout.py - - src/mistralai/models/fileschema.py - - src/mistralai/models/retrievefileout.py - - src/mistralai/models/files_api_routes_retrieve_fileop.py - - src/mistralai/models/deletefileout.py - - src/mistralai/models/files_api_routes_delete_fileop.py - - src/mistralai/models/jobsout.py - - src/mistralai/models/jobout.py - - src/mistralai/models/jobmetadataout.py - - src/mistralai/models/githubrepositoryout.py - - src/mistralai/models/wandbintegrationout.py - - src/mistralai/models/finetuneablemodel.py - - src/mistralai/models/trainingparameters.py - - src/mistralai/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobsop.py - - src/mistralai/models/jobs_api_routes_fine_tuning_create_fine_tuning_jobop.py - - src/mistralai/models/legacyjobmetadataout.py - - src/mistralai/models/jobin.py - - src/mistralai/models/githubrepositoryin.py - - src/mistralai/models/wandbintegration.py - - src/mistralai/models/trainingparametersin.py - - src/mistralai/models/trainingfile.py - - src/mistralai/models/detailedjobout.py - - src/mistralai/models/checkpointout.py - - src/mistralai/models/metricout.py - - src/mistralai/models/eventout.py - - src/mistralai/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobop.py - - src/mistralai/models/jobs_api_routes_fine_tuning_cancel_fine_tuning_jobop.py - - src/mistralai/models/jobs_api_routes_fine_tuning_start_fine_tuning_jobop.py - - src/mistralai/models/chatcompletionresponse.py - - src/mistralai/models/chatcompletionchoice.py - - src/mistralai/models/assistantmessage.py - - src/mistralai/models/toolcall.py - - src/mistralai/models/functioncall.py - - src/mistralai/models/usageinfo.py - - src/mistralai/models/chatcompletionrequest.py - - src/mistralai/models/tool.py - - src/mistralai/models/function.py - - src/mistralai/models/responseformat.py - - src/mistralai/models/systemmessage.py - - src/mistralai/models/contentchunk.py - - src/mistralai/models/usermessage.py - - src/mistralai/models/textchunk.py - - src/mistralai/models/toolmessage.py - - src/mistralai/models/completionevent.py - - src/mistralai/models/completionchunk.py - - src/mistralai/models/completionresponsestreamchoice.py - - src/mistralai/models/deltamessage.py - - src/mistralai/models/chatcompletionstreamrequest.py - - src/mistralai/models/fimcompletionresponse.py - - src/mistralai/models/fimcompletionrequest.py - - src/mistralai/models/fimcompletionstreamrequest.py - - src/mistralai/models/agentscompletionrequest.py - - src/mistralai/models/agentscompletionstreamrequest.py - - src/mistralai/models/embeddingresponse.py - - src/mistralai/models/embeddingresponsedata.py - - src/mistralai/models/embeddingrequest.py - - src/mistralai/models/security.py - - src/mistralai/models/__init__.py - - docs/models/modellist.md - - docs/models/modelcard.md - - docs/models/modelcapabilities.md - - docs/models/httpvalidationerror.md - - docs/models/loc.md - - docs/models/validationerror.md - - docs/models/retrievemodelv1modelsmodelidgetrequest.md - - docs/models/deletemodelout.md - - docs/models/deletemodelv1modelsmodeliddeleterequest.md - - docs/models/ftmodeloutobject.md - - docs/models/ftmodelout.md - - docs/models/ftmodelcapabilitiesout.md - - docs/models/jobsapiroutesfinetuningupdatefinetunedmodelrequest.md - - docs/models/updateftmodelin.md - - docs/models/archiveftmodeloutobject.md - - docs/models/archiveftmodelout.md - - docs/models/jobsapiroutesfinetuningarchivefinetunedmodelrequest.md - - docs/models/unarchiveftmodeloutobject.md - - docs/models/unarchiveftmodelout.md - - docs/models/jobsapiroutesfinetuningunarchivefinetunedmodelrequest.md - - docs/models/purpose.md - - docs/models/uploadfileout.md - - docs/models/source.md - - docs/models/sampletype.md - - docs/models/filesapiroutesuploadfilepurpose.md - - docs/models/file.md - - docs/models/filesapiroutesuploadfilemultipartbodyparams.md - - docs/models/listfilesout.md - - docs/models/fileschemapurpose.md - - docs/models/fileschema.md - - docs/models/retrievefileoutpurpose.md - - docs/models/retrievefileout.md - - docs/models/filesapiroutesretrievefilerequest.md - - docs/models/deletefileout.md - - docs/models/filesapiroutesdeletefilerequest.md - - docs/models/jobsoutobject.md - - docs/models/jobsout.md - - docs/models/status.md - - docs/models/object.md - - docs/models/integrations.md - - docs/models/repositories.md - - docs/models/jobout.md - - docs/models/jobmetadataout.md - - docs/models/githubrepositoryouttype.md - - docs/models/githubrepositoryout.md - - docs/models/type.md - - docs/models/wandbintegrationout.md - - docs/models/finetuneablemodel.md - - docs/models/trainingparameters.md - - docs/models/queryparamstatus.md - - docs/models/jobsapiroutesfinetuninggetfinetuningjobsrequest.md - - docs/models/jobsapiroutesfinetuningcreatefinetuningjobresponse.md - - docs/models/legacyjobmetadataoutobject.md - - docs/models/legacyjobmetadataout.md - - docs/models/jobinintegrations.md - - docs/models/jobinrepositories.md - - docs/models/jobin.md - - docs/models/githubrepositoryintype.md - - docs/models/githubrepositoryin.md - - docs/models/wandbintegrationtype.md - - docs/models/wandbintegration.md - - docs/models/trainingparametersin.md - - docs/models/trainingfile.md - - docs/models/detailedjoboutstatus.md - - docs/models/detailedjoboutobject.md - - docs/models/detailedjoboutintegrations.md - - docs/models/detailedjoboutrepositories.md - - docs/models/detailedjobout.md - - docs/models/checkpointout.md - - docs/models/metricout.md - - docs/models/eventout.md - - docs/models/jobsapiroutesfinetuninggetfinetuningjobrequest.md - - docs/models/jobsapiroutesfinetuningcancelfinetuningjobrequest.md - - docs/models/jobsapiroutesfinetuningstartfinetuningjobrequest.md - - docs/models/chatcompletionresponse.md - - docs/models/finishreason.md - - docs/models/chatcompletionchoice.md - - docs/models/assistantmessagerole.md - - docs/models/assistantmessage.md - - docs/models/tooltypes.md - - docs/models/toolcall.md - - docs/models/arguments.md - - docs/models/functioncall.md - - docs/models/usageinfo.md - - docs/models/stop.md - - docs/models/messages.md - - docs/models/toolchoice.md - - docs/models/chatcompletionrequest.md - - docs/models/tooltooltypes.md - - docs/models/tool.md - - docs/models/function.md - - docs/models/responseformats.md - - docs/models/responseformat.md - - docs/models/content.md - - docs/models/role.md - - docs/models/systemmessage.md - - docs/models/contentchunk.md - - docs/models/usermessagecontent.md - - docs/models/usermessagerole.md - - docs/models/usermessage.md - - docs/models/textchunk.md - - docs/models/toolmessagerole.md - - docs/models/toolmessage.md - - docs/models/completionevent.md - - docs/models/completionchunk.md - - docs/models/completionresponsestreamchoicefinishreason.md - - docs/models/completionresponsestreamchoice.md - - docs/models/deltamessage.md - - docs/models/chatcompletionstreamrequeststop.md - - docs/models/chatcompletionstreamrequestmessages.md - - docs/models/chatcompletionstreamrequesttoolchoice.md - - docs/models/chatcompletionstreamrequest.md - - docs/models/fimcompletionresponse.md - - docs/models/fimcompletionrequeststop.md - - docs/models/fimcompletionrequest.md - - docs/models/fimcompletionstreamrequeststop.md - - docs/models/fimcompletionstreamrequest.md - - docs/models/agentscompletionrequeststop.md - - docs/models/agentscompletionrequestmessages.md - - docs/models/agentscompletionrequesttoolchoice.md - - docs/models/agentscompletionrequest.md - - docs/models/agentscompletionstreamrequeststop.md - - docs/models/agentscompletionstreamrequestmessages.md - - docs/models/agentscompletionstreamrequesttoolchoice.md - - docs/models/agentscompletionstreamrequest.md - - docs/models/embeddingresponse.md - - docs/models/embeddingresponsedata.md - - docs/models/inputs.md - - docs/models/embeddingrequest.md - - docs/models/security.md - - docs/sdks/mistral/README.md - - docs/models/utils/retryconfig.md - - docs/sdks/models/README.md - - docs/sdks/files/README.md - - docs/sdks/finetuning/README.md - - docs/sdks/jobs/README.md - - docs/sdks/chat/README.md - - docs/sdks/fim/README.md - - docs/sdks/agents/README.md - - docs/sdks/embeddings/README.md - - USAGE.md - - .gitattributes - - src/mistralai/_hooks/sdkhooks.py - - src/mistralai/_hooks/types.py - - src/mistralai/_hooks/__init__.py +examples: + list_models_v1_models_get: + speakeasy-default-list-models-v1-models-get: {} + retrieve_model_v1_models__model_id__get: + "": + parameters: + path: + model_id: "ft:open-mistral-7b:587a6b29:20240514:7e773925" + responses: + "200": + application/json: {"id": ""} + "422": {} + delete_model_v1_models__model_id__delete: + "": + parameters: + path: + model_id: "ft:open-mistral-7b:587a6b29:20240514:7e773925" + responses: + "200": + application/json: {"id": "ft:open-mistral-7b:587a6b29:20240514:7e773925", "deleted": true} + "422": {} + jobs_api_routes_fine_tuning_update_fine_tuned_model: + "": + parameters: + path: + model_id: "ft:open-mistral-7b:587a6b29:20240514:7e773925" + responses: + "200": + application/json: {"id": "", "created": 857478, "owned_by": "", "root": "", "archived": true, "job": "905bf4aa-77f2-404e-b754-c352acfe5407"} + jobs_api_routes_fine_tuning_archive_fine_tuned_model: + "": + parameters: + path: + model_id: "ft:open-mistral-7b:587a6b29:20240514:7e773925" + responses: + "200": + application/json: {"id": ""} + jobs_api_routes_fine_tuning_unarchive_fine_tuned_model: + "": + parameters: + path: + model_id: "ft:open-mistral-7b:587a6b29:20240514:7e773925" + responses: + "200": + application/json: {"id": ""} + files_api_routes_upload_file: + speakeasy-default-files-api-routes-upload-file: + requestBody: + multipart/form-data: {"file": {}} + responses: + "200": + application/json: {"id": "497f6eca-6276-4993-bfeb-53cbbbba6f09", "object": "file", "bytes": 13000, "created_at": 1716963433, "filename": "files_upload.jsonl", "sample_type": "pretrain", "source": "upload"} + files_api_routes_list_files: + speakeasy-default-files-api-routes-list-files: + responses: + "200": + application/json: {"data": [], "object": ""} + files_api_routes_retrieve_file: + speakeasy-default-files-api-routes-retrieve-file: + parameters: + path: + file_id: "" + responses: + "200": + application/json: {"id": "497f6eca-6276-4993-bfeb-53cbbbba6f09", "object": "file", "bytes": 13000, "created_at": 1716963433, "filename": "files_upload.jsonl", "sample_type": "pretrain", "source": "repository"} + files_api_routes_delete_file: + speakeasy-default-files-api-routes-delete-file: + parameters: + path: + file_id: "" + responses: + "200": + application/json: {"id": "497f6eca-6276-4993-bfeb-53cbbbba6f09", "object": "file", "deleted": false} + jobs_api_routes_fine_tuning_get_fine_tuning_jobs: + speakeasy-default-jobs-api-routes-fine-tuning-get-fine-tuning-jobs: + responses: + "200": + application/json: {"total": 768578} + jobs_api_routes_fine_tuning_create_fine_tuning_job: + speakeasy-default-jobs-api-routes-fine-tuning-create-fine-tuning-job: + requestBody: + application/json: {"model": "codestral-latest"} + responses: + "200": + application/json: {"id": "7ad642c1-fc6f-4e07-a41b-cdd89dc7fa50", "auto_start": true, "model": "open-mistral-nemo", "status": "QUEUED", "job_type": "", "created_at": 519028, "modified_at": 230313, "training_files": []} + jobs_api_routes_fine_tuning_get_fine_tuning_job: + speakeasy-default-jobs-api-routes-fine-tuning-get-fine-tuning-job: + parameters: + path: + job_id: "b18d8d81-fd7b-4764-a31e-475cb1f36591" + responses: + "200": + application/json: {"id": "58ccc65b-c928-4154-952e-30c048b8c2b5", "auto_start": false, "model": "open-mistral-nemo", "status": "VALIDATED", "job_type": "", "created_at": 968091, "modified_at": 32069, "training_files": [], "checkpoints": []} + jobs_api_routes_fine_tuning_cancel_fine_tuning_job: + speakeasy-default-jobs-api-routes-fine-tuning-cancel-fine-tuning-job: + parameters: + path: + job_id: "03fa7112-315a-4072-a9f2-43f3f1ec962e" + responses: + "200": + application/json: {"id": "fb7dec95-f740-47b2-b8ee-d9b046936a67", "auto_start": true, "model": "mistral-large-latest", "status": "VALIDATED", "job_type": "", "created_at": 252151, "modified_at": 56775, "training_files": [], "checkpoints": []} + jobs_api_routes_fine_tuning_start_fine_tuning_job: + speakeasy-default-jobs-api-routes-fine-tuning-start-fine-tuning-job: + parameters: + path: + job_id: "0eb0f807-fb9f-4e46-9c13-4e257df6e1ba" + responses: + "200": + application/json: {"id": "bc3810ce-43e6-4fde-85a4-cd01d1f9cf8f", "auto_start": true, "model": "codestral-latest", "status": "RUNNING", "job_type": "", "created_at": 186591, "modified_at": 451468, "training_files": [], "checkpoints": []} + chat_completion_v1_chat_completions_post: + speakeasy-default-chat-completion-v1-chat-completions-post: + requestBody: + application/json: {"model": "mistral-small-latest", "messages": [{"content": ""}]} + responses: + "200": + application/json: {"id": "cmpl-e5cc70bb28c444948073e77776eb30ef", "object": "chat.completion", "model": "mistral-small-latest", "usage": {"prompt_tokens": 16, "completion_tokens": 34, "total_tokens": 50}, "created": 1702256327, "choices": []} + "422": {} + stream_chat: + speakeasy-default-stream-chat: + requestBody: + application/json: {"model": "mistral-small-latest", "messages": [{"content": []}]} + fim_completion_v1_fim_completions_post: + speakeasy-default-fim-completion-v1-fim-completions-post: + requestBody: + application/json: {"model": "codestral-2405", "prompt": "def", "suffix": "return a+b"} + responses: + "200": + application/json: {"id": "cmpl-e5cc70bb28c444948073e77776eb30ef", "object": "chat.completion", "model": "codestral-latest", "usage": {"prompt_tokens": 16, "completion_tokens": 34, "total_tokens": 50}, "created": 1702256327, "choices": []} + "422": {} + stream_fim: + speakeasy-default-stream-fim: + requestBody: + application/json: {"model": "codestral-2405", "prompt": "def", "suffix": "return a+b"} + agents_completion_v1_agents_completions_post: + speakeasy-default-agents-completion-v1-agents-completions-post: + requestBody: + application/json: {"messages": [{"content": ""}], "agent_id": ""} + responses: + "200": + application/json: {"id": "cmpl-e5cc70bb28c444948073e77776eb30ef", "object": "chat.completion", "model": "mistral-small-latest", "usage": {"prompt_tokens": 16, "completion_tokens": 34, "total_tokens": 50}, "created": 1702256327, "choices": []} + "422": {} + stream_agents: + speakeasy-default-stream-agents: + requestBody: + application/json: {"messages": [{"content": []}], "agent_id": ""} + embeddings_v1_embeddings_post: + speakeasy-default-embeddings-v1-embeddings-post: + requestBody: + application/json: {"input": "", "model": "Wrangler"} + responses: + "200": + application/json: {"id": "cmpl-e5cc70bb28c444948073e77776eb30ef", "object": "chat.completion", "model": "mistral-small-latest", "usage": {"prompt_tokens": 16, "completion_tokens": 34, "total_tokens": 50}, "data": [{"object": "embedding", "embedding": [0.1, 0.2, 0.3], "index": 0}]} + "422": {} diff --git a/.speakeasy/gen.yaml b/.speakeasy/gen.yaml index 289fb16e..ffccc0b7 100644 --- a/.speakeasy/gen.yaml +++ b/.speakeasy/gen.yaml @@ -12,7 +12,7 @@ generation: auth: oAuth2ClientCredentialsEnabled: true python: - version: 1.0.3 + version: 1.1.1 additionalDependencies: dev: pytest: ^8.2.2 diff --git a/.speakeasy/workflow.lock b/.speakeasy/workflow.lock index d16a5187..4ef07869 100644 --- a/.speakeasy/workflow.lock +++ b/.speakeasy/workflow.lock @@ -1,43 +1,39 @@ -speakeasyVersion: 1.382.0 +speakeasyVersion: 1.396.7 sources: mistral-azure-source: sourceNamespace: mistral-openapi-azure - sourceRevisionDigest: sha256:becb324b11dfc5155aa0cc420ca312d0af5aecfcbad22fe90066a09561ae4e6a - sourceBlobDigest: sha256:84928a6297c3a838dce719ffa3da1e221cba968ce4a6c74d5c3bb41bf86a7e5d + sourceRevisionDigest: sha256:4e9539e6903e630aa69e48af190a24d3702f6038c7b7a92472c7942597c2a6f5 + sourceBlobDigest: sha256:3ace0709471c04a040c9763097fef0081d6c21a1be0b694dfe5991c045b76d18 tags: - latest mistral-google-cloud-source: sourceNamespace: mistral-openapi-google-cloud - sourceRevisionDigest: sha256:7fee22ae1a434b8919112c7feae87af7f1378952fcc6bde081deb55f65e5bfc2 - sourceBlobDigest: sha256:a4c011f461c73809a7d6cf1c9823d3c51d5050895aad246287ff14ac971efb8c + sourceRevisionDigest: sha256:3047ad3ff8797fded89618b375d1398d48924a3a5f9ea1000c4284a110567c43 + sourceBlobDigest: sha256:02bbcef310f965d7ad089fb46d57b39f45b47cbc8f5cf90f728db03e960bdbca tags: - latest mistral-openapi: sourceNamespace: mistral-openapi - sourceRevisionDigest: sha256:421a4bd55fd50ba00d6ebf2db603888009e9996b642b0499110c223fd6ca21c2 - sourceBlobDigest: sha256:1c87b4b8287f6a3083167c13ab59c5e7ac180ab7e19ad1532f3f46495cc12a26 + sourceRevisionDigest: sha256:8e2d62b2242960d958406ba266eda41a013c1459dbac67195f8e2662c04cd05f + sourceBlobDigest: sha256:9fbff48fe087e3b2f950b1cfa52b6a25143982741dc7e6750dd14d9c5bed4041 tags: - latest - - main targets: mistralai-azure-sdk: source: mistral-azure-source sourceNamespace: mistral-openapi-azure - sourceRevisionDigest: sha256:becb324b11dfc5155aa0cc420ca312d0af5aecfcbad22fe90066a09561ae4e6a - sourceBlobDigest: sha256:84928a6297c3a838dce719ffa3da1e221cba968ce4a6c74d5c3bb41bf86a7e5d - outLocation: ./packages/mistralai_azure + sourceRevisionDigest: sha256:4e9539e6903e630aa69e48af190a24d3702f6038c7b7a92472c7942597c2a6f5 + sourceBlobDigest: sha256:3ace0709471c04a040c9763097fef0081d6c21a1be0b694dfe5991c045b76d18 mistralai-gcp-sdk: source: mistral-google-cloud-source sourceNamespace: mistral-openapi-google-cloud - sourceRevisionDigest: sha256:7fee22ae1a434b8919112c7feae87af7f1378952fcc6bde081deb55f65e5bfc2 - sourceBlobDigest: sha256:a4c011f461c73809a7d6cf1c9823d3c51d5050895aad246287ff14ac971efb8c - outLocation: ./packages/mistralai_gcp + sourceRevisionDigest: sha256:3047ad3ff8797fded89618b375d1398d48924a3a5f9ea1000c4284a110567c43 + sourceBlobDigest: sha256:02bbcef310f965d7ad089fb46d57b39f45b47cbc8f5cf90f728db03e960bdbca mistralai-sdk: source: mistral-openapi sourceNamespace: mistral-openapi - sourceRevisionDigest: sha256:421a4bd55fd50ba00d6ebf2db603888009e9996b642b0499110c223fd6ca21c2 - sourceBlobDigest: sha256:1c87b4b8287f6a3083167c13ab59c5e7ac180ab7e19ad1532f3f46495cc12a26 - outLocation: /github/workspace/repo + sourceRevisionDigest: sha256:8e2d62b2242960d958406ba266eda41a013c1459dbac67195f8e2662c04cd05f + sourceBlobDigest: sha256:9fbff48fe087e3b2f950b1cfa52b6a25143982741dc7e6750dd14d9c5bed4041 workflow: workflowVersion: 1.0.0 speakeasyVersion: latest diff --git a/MIGRATION.md b/MIGRATION.md index 1b83ac82..602146c5 100644 --- a/MIGRATION.md +++ b/MIGRATION.md @@ -229,7 +229,7 @@ async def main(): # content="What is the best French cheese?", # ), # ] - async_response = await client.chat.completstream_asynce_async( + async_response = await client.chat.stream_async( messages=messages, model="mistral-large-latest", ) diff --git a/README.md b/README.md index c73adebb..f4017d31 100644 --- a/README.md +++ b/README.md @@ -19,15 +19,46 @@ $ echo 'export MISTRAL_API_KEY=[your_key_here]' >> ~/.zshenv $ source ~/.zshenv ``` + +## Summary + +Mistral AI API: Our Chat Completion and Embeddings APIs specification. Create your account on [La Plateforme](https://console.mistral.ai) to get access and read the [docs](https://docs.mistral.ai) to learn how to use it. + + + +## Table of Contents + +* [SDK Installation](#sdk-installation) +* [IDE Support](#ide-support) +* [SDK Example Usage](#sdk-example-usage) +* [Available Resources and Operations](#available-resources-and-operations) +* [Server-sent event streaming](#server-sent-event-streaming) +* [File uploads](#file-uploads) +* [Retries](#retries) +* [Error Handling](#error-handling) +* [Server Selection](#server-selection) +* [Custom HTTP Client](#custom-http-client) +* [Authentication](#authentication) +* [Debugging](#debugging) + + ## SDK Installation -PIP +The SDK can be installed with either *pip* or *poetry* package managers. + +### PIP + +*PIP* is the default package installer for Python, enabling easy installation and management of packages from PyPI via the command line. + ```bash pip install mistralai ``` -Poetry +### Poetry + +*Poetry* is a modern tool that simplifies dependency management and package publishing by using a single `pyproject.toml` file to handle project metadata and dependencies. + ```bash poetry add mistralai ``` @@ -49,7 +80,6 @@ s = Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) - res = s.chat.complete(model="mistral-small-latest", messages=[ { "content": "Who is the best French painter? Answer in one short sentence.", @@ -101,10 +131,9 @@ s = Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) - res = s.files.upload(file={ - "file_name": "your_file_here", - "content": open("", "rb"), + "file_name": "example.file", + "content": open("example.file", "rb"), }) if res is not None: @@ -126,8 +155,8 @@ async def main(): api_key=os.getenv("MISTRAL_API_KEY", ""), ) res = await s.files.upload_async(file={ - "file_name": "your_file_here", - "content": open("", "rb"), + "file_name": "example.file", + "content": open("example.file", "rb"), }) if res is not None: # handle response @@ -149,11 +178,9 @@ s = Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) - res = s.agents.complete(messages=[ { - "content": "Who is the best French painter? Answer in one short sentence.", - "role": "user", + "content": "", }, ], agent_id="") @@ -287,14 +314,22 @@ The documentation for the GCP SDK is available [here](packages/mistralai_gcp/REA ## Available Resources and Operations -### [models](docs/sdks/models/README.md) +
+Available methods -* [list](docs/sdks/models/README.md#list) - List Models -* [retrieve](docs/sdks/models/README.md#retrieve) - Retrieve Model -* [delete](docs/sdks/models/README.md#delete) - Delete Model -* [update](docs/sdks/models/README.md#update) - Update Fine Tuned Model -* [archive](docs/sdks/models/README.md#archive) - Archive Fine Tuned Model -* [unarchive](docs/sdks/models/README.md#unarchive) - Unarchive Fine Tuned Model +### [agents](docs/sdks/agents/README.md) + +* [complete](docs/sdks/agents/README.md#complete) - Agents Completion +* [stream](docs/sdks/agents/README.md#stream) - Stream Agents completion + +### [chat](docs/sdks/chat/README.md) + +* [complete](docs/sdks/chat/README.md#complete) - Chat Completion +* [stream](docs/sdks/chat/README.md#stream) - Stream chat completion + +### [embeddings](docs/sdks/embeddings/README.md) + +* [create](docs/sdks/embeddings/README.md#create) - Embeddings ### [files](docs/sdks/files/README.md) @@ -303,8 +338,15 @@ The documentation for the GCP SDK is available [here](packages/mistralai_gcp/REA * [retrieve](docs/sdks/files/README.md#retrieve) - Retrieve File * [delete](docs/sdks/files/README.md#delete) - Delete File +### [fim](docs/sdks/fim/README.md) -### [fine_tuning.jobs](docs/sdks/jobs/README.md) +* [complete](docs/sdks/fim/README.md#complete) - Fim Completion +* [stream](docs/sdks/fim/README.md#stream) - Stream fim completion + +### [fine_tuning](docs/sdks/finetuning/README.md) + + +#### [fine_tuning.jobs](docs/sdks/jobs/README.md) * [list](docs/sdks/jobs/README.md#list) - Get Fine Tuning Jobs * [create](docs/sdks/jobs/README.md#create) - Create Fine Tuning Job @@ -312,24 +354,17 @@ The documentation for the GCP SDK is available [here](packages/mistralai_gcp/REA * [cancel](docs/sdks/jobs/README.md#cancel) - Cancel Fine Tuning Job * [start](docs/sdks/jobs/README.md#start) - Start Fine Tuning Job -### [chat](docs/sdks/chat/README.md) - -* [complete](docs/sdks/chat/README.md#complete) - Chat Completion -* [stream](docs/sdks/chat/README.md#stream) - Stream chat completion - -### [fim](docs/sdks/fim/README.md) - -* [complete](docs/sdks/fim/README.md#complete) - Fim Completion -* [stream](docs/sdks/fim/README.md#stream) - Stream fim completion - -### [agents](docs/sdks/agents/README.md) -* [complete](docs/sdks/agents/README.md#complete) - Agents Completion -* [stream](docs/sdks/agents/README.md#stream) - Stream Agents completion +### [models](docs/sdks/models/README.md) -### [embeddings](docs/sdks/embeddings/README.md) +* [list](docs/sdks/models/README.md#list) - List Models +* [retrieve](docs/sdks/models/README.md#retrieve) - Retrieve Model +* [delete](docs/sdks/models/README.md#delete) - Delete Model +* [update](docs/sdks/models/README.md#update) - Update Fine Tuned Model +* [archive](docs/sdks/models/README.md#archive) - Archive Fine Tuned Model +* [unarchive](docs/sdks/models/README.md#unarchive) - Unarchive Fine Tuned Model -* [create](docs/sdks/embeddings/README.md#create) - Embeddings +
@@ -349,7 +384,6 @@ s = Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) - res = s.chat.stream(model="mistral-small-latest", messages=[ { "content": "Who is the best French painter? Answer in one short sentence.", @@ -386,10 +420,9 @@ s = Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) - res = s.files.upload(file={ - "file_name": "your_file_here", - "content": open("", "rb"), + "file_name": "example.file", + "content": open("example.file", "rb"), }) if res is not None: @@ -414,7 +447,6 @@ s = Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) - res = s.models.list(, RetryConfig("backoff", BackoffStrategy(1, 50, 1.1, 100), False)) @@ -435,7 +467,6 @@ s = Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) - res = s.models.list() if res is not None: @@ -469,17 +500,16 @@ res = None try: res = s.models.list() + if res is not None: + # handle response + pass + except models.HTTPValidationError as e: - # handle exception + # handle e.data: models.HTTPValidationErrorData raise(e) except models.SDKError as e: # handle exception raise(e) - -if res is not None: - # handle response - pass - ``` @@ -505,7 +535,6 @@ s = Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) - res = s.models.list() if res is not None: @@ -527,7 +556,6 @@ s = Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) - res = s.models.list() if res is not None: @@ -638,7 +666,6 @@ s = Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) - res = s.models.list() if res is not None: diff --git a/USAGE.md b/USAGE.md index aace195c..6a6c46b5 100644 --- a/USAGE.md +++ b/USAGE.md @@ -12,12 +12,8 @@ s = Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) - res = s.chat.complete(model="mistral-small-latest", messages=[ - { - "content": "Who is the best French painter? Answer in one short sentence.", - "role": "user", - }, + ]) if res is not None: @@ -39,10 +35,7 @@ async def main(): api_key=os.getenv("MISTRAL_API_KEY", ""), ) res = await s.chat.complete_async(model="mistral-small-latest", messages=[ - { - "content": "Who is the best French painter? Answer in one short sentence.", - "role": "user", - }, + ]) if res is not None: # handle response @@ -64,10 +57,9 @@ s = Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) - res = s.files.upload(file={ - "file_name": "your_file_here", - "content": open("", "rb"), + "file_name": "example.file", + "content": open("example.file", "rb"), }) if res is not None: @@ -89,8 +81,8 @@ async def main(): api_key=os.getenv("MISTRAL_API_KEY", ""), ) res = await s.files.upload_async(file={ - "file_name": "your_file_here", - "content": open("", "rb"), + "file_name": "example.file", + "content": open("example.file", "rb"), }) if res is not None: # handle response @@ -112,11 +104,9 @@ s = Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) - res = s.agents.complete(messages=[ { - "content": "Who is the best French painter? Answer in one short sentence.", - "role": "user", + "content": "", }, ], agent_id="") @@ -140,8 +130,7 @@ async def main(): ) res = await s.agents.complete_async(messages=[ { - "content": "Who is the best French painter? Answer in one short sentence.", - "role": "user", + "content": "", }, ], agent_id="") if res is not None: diff --git a/docs/models/agentscompletionrequesttoolchoice.md b/docs/models/agentscompletionrequesttoolchoice.md index 4d58fb77..63b9dca9 100644 --- a/docs/models/agentscompletionrequesttoolchoice.md +++ b/docs/models/agentscompletionrequesttoolchoice.md @@ -1,10 +1,17 @@ # AgentsCompletionRequestToolChoice -## Values +## Supported Types + +### `models.ToolChoice` + +```python +value: models.ToolChoice = /* values here */ +``` + +### `models.ToolChoiceEnum` + +```python +value: models.ToolChoiceEnum = /* values here */ +``` -| Name | Value | -| ------ | ------ | -| `AUTO` | auto | -| `NONE` | none | -| `ANY` | any | \ No newline at end of file diff --git a/docs/models/agentscompletionstreamrequesttoolchoice.md b/docs/models/agentscompletionstreamrequesttoolchoice.md index e761d1e1..4354523a 100644 --- a/docs/models/agentscompletionstreamrequesttoolchoice.md +++ b/docs/models/agentscompletionstreamrequesttoolchoice.md @@ -1,10 +1,17 @@ # AgentsCompletionStreamRequestToolChoice -## Values +## Supported Types + +### `models.ToolChoice` + +```python +value: models.ToolChoice = /* values here */ +``` + +### `models.ToolChoiceEnum` + +```python +value: models.ToolChoiceEnum = /* values here */ +``` -| Name | Value | -| ------ | ------ | -| `AUTO` | auto | -| `NONE` | none | -| `ANY` | any | \ No newline at end of file diff --git a/docs/models/modelcard.md b/docs/models/basemodelcard.md similarity index 89% rename from docs/models/modelcard.md rename to docs/models/basemodelcard.md index 87951412..1c10ae31 100644 --- a/docs/models/modelcard.md +++ b/docs/models/basemodelcard.md @@ -1,4 +1,4 @@ -# ModelCard +# BaseModelCard ## Fields @@ -10,10 +10,9 @@ | `object` | *Optional[str]* | :heavy_minus_sign: | N/A | | `created` | *Optional[int]* | :heavy_minus_sign: | N/A | | `owned_by` | *Optional[str]* | :heavy_minus_sign: | N/A | -| `root` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `archived` | *Optional[bool]* | :heavy_minus_sign: | N/A | | `name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | `max_context_length` | *Optional[int]* | :heavy_minus_sign: | N/A | | `aliases` | List[*str*] | :heavy_minus_sign: | N/A | -| `deprecation` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | \ No newline at end of file +| `deprecation` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | +| `type` | *Optional[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/chatcompletionrequest.md b/docs/models/chatcompletionrequest.md index d22efc34..84197600 100644 --- a/docs/models/chatcompletionrequest.md +++ b/docs/models/chatcompletionrequest.md @@ -16,5 +16,5 @@ | `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | | `response_format` | [Optional[models.ResponseFormat]](../models/responseformat.md) | :heavy_minus_sign: | N/A | | | `tools` | List[[models.Tool](../models/tool.md)] | :heavy_minus_sign: | N/A | | -| `tool_choice` | [Optional[models.ToolChoice]](../models/toolchoice.md) | :heavy_minus_sign: | N/A | | +| `tool_choice` | [Optional[models.ChatCompletionRequestToolChoice]](../models/chatcompletionrequesttoolchoice.md) | :heavy_minus_sign: | N/A | | | `safe_prompt` | *Optional[bool]* | :heavy_minus_sign: | Whether to inject a safety prompt before all conversations. | | \ No newline at end of file diff --git a/docs/models/chatcompletionrequesttoolchoice.md b/docs/models/chatcompletionrequesttoolchoice.md new file mode 100644 index 00000000..1646528d --- /dev/null +++ b/docs/models/chatcompletionrequesttoolchoice.md @@ -0,0 +1,17 @@ +# ChatCompletionRequestToolChoice + + +## Supported Types + +### `models.ToolChoice` + +```python +value: models.ToolChoice = /* values here */ +``` + +### `models.ToolChoiceEnum` + +```python +value: models.ToolChoiceEnum = /* values here */ +``` + diff --git a/docs/models/chatcompletionstreamrequesttoolchoice.md b/docs/models/chatcompletionstreamrequesttoolchoice.md index 37a6e9bb..cce0ca3e 100644 --- a/docs/models/chatcompletionstreamrequesttoolchoice.md +++ b/docs/models/chatcompletionstreamrequesttoolchoice.md @@ -1,10 +1,17 @@ # ChatCompletionStreamRequestToolChoice -## Values +## Supported Types + +### `models.ToolChoice` + +```python +value: models.ToolChoice = /* values here */ +``` + +### `models.ToolChoiceEnum` + +```python +value: models.ToolChoiceEnum = /* values here */ +``` -| Name | Value | -| ------ | ------ | -| `AUTO` | auto | -| `NONE` | none | -| `ANY` | any | \ No newline at end of file diff --git a/docs/models/content.md b/docs/models/content.md index a833dc2c..4cd3cfd5 100644 --- a/docs/models/content.md +++ b/docs/models/content.md @@ -9,9 +9,9 @@ value: str = /* values here */ ``` -### `List[models.ContentChunk]` +### `List[models.TextChunk]` ```python -value: List[models.ContentChunk] = /* values here */ +value: List[models.TextChunk] = /* values here */ ``` diff --git a/docs/models/contentchunk.md b/docs/models/contentchunk.md index 64fc80d6..12f6430f 100644 --- a/docs/models/contentchunk.md +++ b/docs/models/contentchunk.md @@ -1,9 +1,17 @@ # ContentChunk -## Fields +## Supported Types + +### `models.ImageURLChunk` + +```python +value: models.ImageURLChunk = /* values here */ +``` + +### `models.TextChunk` + +```python +value: models.TextChunk = /* values here */ +``` -| Field | Type | Required | Description | -| ------------------ | ------------------ | ------------------ | ------------------ | -| `text` | *str* | :heavy_check_mark: | N/A | -| `type` | *Optional[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/data.md b/docs/models/data.md new file mode 100644 index 00000000..95dc8d28 --- /dev/null +++ b/docs/models/data.md @@ -0,0 +1,17 @@ +# Data + + +## Supported Types + +### `models.BaseModelCard` + +```python +value: models.BaseModelCard = /* values here */ +``` + +### `models.FTModelCard` + +```python +value: models.FTModelCard = /* values here */ +``` + diff --git a/docs/models/ftmodelcard.md b/docs/models/ftmodelcard.md new file mode 100644 index 00000000..fc633c06 --- /dev/null +++ b/docs/models/ftmodelcard.md @@ -0,0 +1,23 @@ +# FTModelCard + +Extra fields for fine-tuned models. + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | +| `id` | *str* | :heavy_check_mark: | N/A | +| `capabilities` | [models.ModelCapabilities](../models/modelcapabilities.md) | :heavy_check_mark: | N/A | +| `job` | *str* | :heavy_check_mark: | N/A | +| `root` | *str* | :heavy_check_mark: | N/A | +| `object` | *Optional[str]* | :heavy_minus_sign: | N/A | +| `created` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `owned_by` | *Optional[str]* | :heavy_minus_sign: | N/A | +| `name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `max_context_length` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `aliases` | List[*str*] | :heavy_minus_sign: | N/A | +| `deprecation` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | +| `type` | *Optional[str]* | :heavy_minus_sign: | N/A | +| `archived` | *Optional[bool]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/functionname.md b/docs/models/functionname.md new file mode 100644 index 00000000..87d7b485 --- /dev/null +++ b/docs/models/functionname.md @@ -0,0 +1,10 @@ +# FunctionName + +this restriction of `Function` is used to select a specific function to call + + +## Fields + +| Field | Type | Required | Description | +| ------------------ | ------------------ | ------------------ | ------------------ | +| `name` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/imageurl.md b/docs/models/imageurl.md new file mode 100644 index 00000000..7c2bcbc3 --- /dev/null +++ b/docs/models/imageurl.md @@ -0,0 +1,9 @@ +# ImageURL + + +## Fields + +| Field | Type | Required | Description | +| ----------------------- | ----------------------- | ----------------------- | ----------------------- | +| `url` | *str* | :heavy_check_mark: | N/A | +| `detail` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/imageurlchunk.md b/docs/models/imageurlchunk.md new file mode 100644 index 00000000..f1b926ef --- /dev/null +++ b/docs/models/imageurlchunk.md @@ -0,0 +1,11 @@ +# ImageURLChunk + +{"type":"image_url","image_url":{"url":"data:image/png;base64,iVBORw0 + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | +| `image_url` | [models.ImageURLChunkImageURL](../models/imageurlchunkimageurl.md) | :heavy_check_mark: | N/A | +| `type` | [Optional[models.ImageURLChunkType]](../models/imageurlchunktype.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/imageurlchunkimageurl.md b/docs/models/imageurlchunkimageurl.md new file mode 100644 index 00000000..76738908 --- /dev/null +++ b/docs/models/imageurlchunkimageurl.md @@ -0,0 +1,17 @@ +# ImageURLChunkImageURL + + +## Supported Types + +### `models.ImageURL` + +```python +value: models.ImageURL = /* values here */ +``` + +### `str` + +```python +value: str = /* values here */ +``` + diff --git a/docs/models/imageurlchunktype.md b/docs/models/imageurlchunktype.md new file mode 100644 index 00000000..2064a0b4 --- /dev/null +++ b/docs/models/imageurlchunktype.md @@ -0,0 +1,8 @@ +# ImageURLChunkType + + +## Values + +| Name | Value | +| ----------- | ----------- | +| `IMAGE_URL` | image_url | \ No newline at end of file diff --git a/docs/models/modelcapabilities.md b/docs/models/modelcapabilities.md index 89020970..2e399ab6 100644 --- a/docs/models/modelcapabilities.md +++ b/docs/models/modelcapabilities.md @@ -8,4 +8,5 @@ | `completion_chat` | *Optional[bool]* | :heavy_minus_sign: | N/A | | `completion_fim` | *Optional[bool]* | :heavy_minus_sign: | N/A | | `function_calling` | *Optional[bool]* | :heavy_minus_sign: | N/A | -| `fine_tuning` | *Optional[bool]* | :heavy_minus_sign: | N/A | \ No newline at end of file +| `fine_tuning` | *Optional[bool]* | :heavy_minus_sign: | N/A | +| `vision` | *Optional[bool]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/modellist.md b/docs/models/modellist.md index e3fefeef..760882c6 100644 --- a/docs/models/modellist.md +++ b/docs/models/modellist.md @@ -3,7 +3,7 @@ ## Fields -| Field | Type | Required | Description | -| ------------------------------------------------ | ------------------------------------------------ | ------------------------------------------------ | ------------------------------------------------ | -| `object` | *Optional[str]* | :heavy_minus_sign: | N/A | -| `data` | List[[models.ModelCard](../models/modelcard.md)] | :heavy_minus_sign: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| -------------------------------------- | -------------------------------------- | -------------------------------------- | -------------------------------------- | +| `object` | *Optional[str]* | :heavy_minus_sign: | N/A | +| `data` | List[[models.Data](../models/data.md)] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/responseformat.md b/docs/models/responseformat.md index 2704eab4..9c627f55 100644 --- a/docs/models/responseformat.md +++ b/docs/models/responseformat.md @@ -3,6 +3,6 @@ ## Fields -| Field | Type | Required | Description | -| ---------------------------------------------------------------- | ---------------------------------------------------------------- | ---------------------------------------------------------------- | ---------------------------------------------------------------- | -| `type` | [Optional[models.ResponseFormats]](../models/responseformats.md) | :heavy_minus_sign: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `type` | [Optional[models.ResponseFormats]](../models/responseformats.md) | :heavy_minus_sign: | An object specifying the format that the model must output. Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. | \ No newline at end of file diff --git a/docs/models/retrievemodelv1modelsmodelidgetresponseretrievemodelv1modelsmodelidget.md b/docs/models/retrievemodelv1modelsmodelidgetresponseretrievemodelv1modelsmodelidget.md new file mode 100644 index 00000000..3ac96521 --- /dev/null +++ b/docs/models/retrievemodelv1modelsmodelidgetresponseretrievemodelv1modelsmodelidget.md @@ -0,0 +1,19 @@ +# RetrieveModelV1ModelsModelIDGetResponseRetrieveModelV1ModelsModelIDGet + +Successful Response + + +## Supported Types + +### `models.BaseModelCard` + +```python +value: models.BaseModelCard = /* values here */ +``` + +### `models.FTModelCard` + +```python +value: models.FTModelCard = /* values here */ +``` + diff --git a/docs/models/textchunk.md b/docs/models/textchunk.md index 34e4dd6f..d488cb51 100644 --- a/docs/models/textchunk.md +++ b/docs/models/textchunk.md @@ -3,7 +3,7 @@ ## Fields -| Field | Type | Required | Description | -| ------------------ | ------------------ | ------------------ | ------------------ | -| `text` | *str* | :heavy_check_mark: | N/A | -| `type` | *Optional[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| ------------------------------------------------------------ | ------------------------------------------------------------ | ------------------------------------------------------------ | ------------------------------------------------------------ | +| `text` | *str* | :heavy_check_mark: | N/A | +| `type` | [Optional[models.TextChunkType]](../models/textchunktype.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/textchunktype.md b/docs/models/textchunktype.md new file mode 100644 index 00000000..e2a2ae8b --- /dev/null +++ b/docs/models/textchunktype.md @@ -0,0 +1,8 @@ +# TextChunkType + + +## Values + +| Name | Value | +| ------ | ------ | +| `TEXT` | text | \ No newline at end of file diff --git a/docs/models/tool.md b/docs/models/tool.md index ca624a90..822f86f8 100644 --- a/docs/models/tool.md +++ b/docs/models/tool.md @@ -3,7 +3,7 @@ ## Fields -| Field | Type | Required | Description | -| ------------------------------------------------------------ | ------------------------------------------------------------ | ------------------------------------------------------------ | ------------------------------------------------------------ | -| `function` | [models.Function](../models/function.md) | :heavy_check_mark: | N/A | -| `type` | [Optional[models.ToolToolTypes]](../models/tooltooltypes.md) | :heavy_minus_sign: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| ---------------------------------------------------- | ---------------------------------------------------- | ---------------------------------------------------- | ---------------------------------------------------- | +| `function` | [models.Function](../models/function.md) | :heavy_check_mark: | N/A | +| `type` | [Optional[models.ToolTypes]](../models/tooltypes.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/toolchoice.md b/docs/models/toolchoice.md index b84f51f6..792ebcd6 100644 --- a/docs/models/toolchoice.md +++ b/docs/models/toolchoice.md @@ -1,10 +1,11 @@ # ToolChoice +ToolChoice is either a ToolChoiceEnum or a ToolChoice -## Values -| Name | Value | -| ------ | ------ | -| `AUTO` | auto | -| `NONE` | none | -| `ANY` | any | \ No newline at end of file +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | +| `function` | [models.FunctionName](../models/functionname.md) | :heavy_check_mark: | this restriction of `Function` is used to select a specific function to call | +| `type` | [Optional[models.ToolTypes]](../models/tooltypes.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/toolchoiceenum.md b/docs/models/toolchoiceenum.md new file mode 100644 index 00000000..0be3d6c5 --- /dev/null +++ b/docs/models/toolchoiceenum.md @@ -0,0 +1,11 @@ +# ToolChoiceEnum + + +## Values + +| Name | Value | +| ---------- | ---------- | +| `AUTO` | auto | +| `NONE` | none | +| `ANY` | any | +| `REQUIRED` | required | \ No newline at end of file diff --git a/docs/models/tooltooltypes.md b/docs/models/tooltooltypes.md deleted file mode 100644 index e3964307..00000000 --- a/docs/models/tooltooltypes.md +++ /dev/null @@ -1,8 +0,0 @@ -# ToolToolTypes - - -## Values - -| Name | Value | -| ---------- | ---------- | -| `FUNCTION` | function | \ No newline at end of file diff --git a/docs/models/usermessagecontent.md b/docs/models/usermessagecontent.md index 86ebd18f..8350f9e8 100644 --- a/docs/models/usermessagecontent.md +++ b/docs/models/usermessagecontent.md @@ -9,9 +9,9 @@ value: str = /* values here */ ``` -### `List[models.TextChunk]` +### `List[models.ContentChunk]` ```python -value: List[models.TextChunk] = /* values here */ +value: List[models.ContentChunk] = /* values here */ ``` diff --git a/docs/sdks/agents/README.md b/docs/sdks/agents/README.md index 744fc17a..279a13fc 100644 --- a/docs/sdks/agents/README.md +++ b/docs/sdks/agents/README.md @@ -24,11 +24,9 @@ s = Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) - res = s.agents.complete(messages=[ { - "content": "Who is the best French painter? Answer in one short sentence.", - "role": "user", + "content": "", }, ], agent_id="") @@ -80,11 +78,15 @@ s = Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) - res = s.agents.stream(messages=[ { - "content": "Who is the best French painter? Answer in one short sentence.", - "role": "user", + "content": [ + { + "image_url": { + "url": "http://possible-veal.org", + }, + }, + ], }, ], agent_id="") diff --git a/docs/sdks/chat/README.md b/docs/sdks/chat/README.md index 2cfba980..d5e85cce 100644 --- a/docs/sdks/chat/README.md +++ b/docs/sdks/chat/README.md @@ -24,12 +24,8 @@ s = Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) - res = s.chat.complete(model="mistral-small-latest", messages=[ - { - "content": "Who is the best French painter? Answer in one short sentence.", - "role": "user", - }, + ]) if res is not None: @@ -53,7 +49,7 @@ if res is not None: | `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | | `response_format` | [Optional[models.ResponseFormat]](../../models/responseformat.md) | :heavy_minus_sign: | N/A | | | `tools` | List[[models.Tool](../../models/tool.md)] | :heavy_minus_sign: | N/A | | -| `tool_choice` | [Optional[models.ToolChoice]](../../models/toolchoice.md) | :heavy_minus_sign: | N/A | | +| `tool_choice` | [Optional[models.ChatCompletionRequestToolChoice]](../../models/chatcompletionrequesttoolchoice.md) | :heavy_minus_sign: | N/A | | | `safe_prompt` | *Optional[bool]* | :heavy_minus_sign: | Whether to inject a safety prompt before all conversations. | | | `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | | @@ -83,12 +79,8 @@ s = Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) - res = s.chat.stream(model="mistral-small-latest", messages=[ - { - "content": "Who is the best French painter? Answer in one short sentence.", - "role": "user", - }, + ]) if res is not None: diff --git a/docs/sdks/embeddings/README.md b/docs/sdks/embeddings/README.md index 38b8b53a..ae270866 100644 --- a/docs/sdks/embeddings/README.md +++ b/docs/sdks/embeddings/README.md @@ -23,8 +23,7 @@ s = Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) - -res = s.embeddings.create(inputs="", model="") +res = s.embeddings.create(inputs="", model="Wrangler") if res is not None: # handle response diff --git a/docs/sdks/files/README.md b/docs/sdks/files/README.md index c931f173..41ed9611 100644 --- a/docs/sdks/files/README.md +++ b/docs/sdks/files/README.md @@ -30,10 +30,9 @@ s = Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) - res = s.files.upload(file={ - "file_name": "your_file_here", - "content": open("", "rb"), + "file_name": "example.file", + "content": open("example.file", "rb"), }) if res is not None: @@ -74,7 +73,6 @@ s = Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) - res = s.files.list() if res is not None: @@ -114,7 +112,6 @@ s = Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) - res = s.files.retrieve(file_id="") if res is not None: @@ -155,7 +152,6 @@ s = Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) - res = s.files.delete(file_id="") if res is not None: diff --git a/docs/sdks/fim/README.md b/docs/sdks/fim/README.md index 2f3d8fe6..cfb3d508 100644 --- a/docs/sdks/fim/README.md +++ b/docs/sdks/fim/README.md @@ -24,7 +24,6 @@ s = Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) - res = s.fim.complete(model="codestral-2405", prompt="def", suffix="return a+b") if res is not None: @@ -75,7 +74,6 @@ s = Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) - res = s.fim.stream(model="codestral-2405", prompt="def", suffix="return a+b") if res is not None: diff --git a/docs/sdks/jobs/README.md b/docs/sdks/jobs/README.md index cecff0e7..0929c78d 100644 --- a/docs/sdks/jobs/README.md +++ b/docs/sdks/jobs/README.md @@ -25,7 +25,6 @@ s = Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) - res = s.fine_tuning.jobs.list() if res is not None: @@ -74,7 +73,6 @@ s = Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) - res = s.fine_tuning.jobs.create(model="codestral-latest", hyperparameters={}) if res is not None: @@ -122,7 +120,6 @@ s = Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) - res = s.fine_tuning.jobs.get(job_id="b18d8d81-fd7b-4764-a31e-475cb1f36591") if res is not None: @@ -163,7 +160,6 @@ s = Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) - res = s.fine_tuning.jobs.cancel(job_id="03fa7112-315a-4072-a9f2-43f3f1ec962e") if res is not None: @@ -204,7 +200,6 @@ s = Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) - res = s.fine_tuning.jobs.start(job_id="0eb0f807-fb9f-4e46-9c13-4e257df6e1ba") if res is not None: diff --git a/docs/sdks/models/README.md b/docs/sdks/models/README.md index a5d05eb1..1a54bbb2 100644 --- a/docs/sdks/models/README.md +++ b/docs/sdks/models/README.md @@ -28,7 +28,6 @@ s = Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) - res = s.models.list() if res is not None: @@ -69,7 +68,6 @@ s = Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) - res = s.models.retrieve(model_id="ft:open-mistral-7b:587a6b29:20240514:7e773925") if res is not None: @@ -87,7 +85,7 @@ if res is not None: ### Response -**[models.ModelCard](../../models/modelcard.md)** +**[models.RetrieveModelV1ModelsModelIDGetResponseRetrieveModelV1ModelsModelIDGet](../../models/retrievemodelv1modelsmodelidgetresponseretrievemodelv1modelsmodelidget.md)** ### Errors @@ -111,7 +109,6 @@ s = Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) - res = s.models.delete(model_id="ft:open-mistral-7b:587a6b29:20240514:7e773925") if res is not None: @@ -153,7 +150,6 @@ s = Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) - res = s.models.update(model_id="ft:open-mistral-7b:587a6b29:20240514:7e773925") if res is not None: @@ -196,7 +192,6 @@ s = Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) - res = s.models.archive(model_id="ft:open-mistral-7b:587a6b29:20240514:7e773925") if res is not None: @@ -237,7 +232,6 @@ s = Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) - res = s.models.unarchive(model_id="ft:open-mistral-7b:587a6b29:20240514:7e773925") if res is not None: diff --git a/examples/async_chat_with_image_no_streaming.py b/examples/async_chat_with_image_no_streaming.py new file mode 100755 index 00000000..22f9adc6 --- /dev/null +++ b/examples/async_chat_with_image_no_streaming.py @@ -0,0 +1,36 @@ +#!/usr/bin/env python + +import asyncio +import os + +import httpx + +from mistralai import Mistral +from mistralai.models import ImageURLChunk, TextChunk, UserMessage + + +async def main(): + api_key = os.environ["MISTRAL_API_KEY"] + model = "pixtral-12b" + client = Mistral(api_key=api_key) + + chat_response = await client.chat.complete_async( + model=model, + messages=[ + UserMessage( + content=[ + {"type": "text", "text": "What's in this image?"}, + { + "type": "image_url", + "image_url": "https://mistral.ai/images/news/codestral/FIM_table.png", + }, + ] + ) + ], + ) + + print(chat_response.choices[0].message.content) + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/packages/mistralai_azure/.speakeasy/gen.lock b/packages/mistralai_azure/.speakeasy/gen.lock index 9a1dd1fd..0972d2a6 100644 --- a/packages/mistralai_azure/.speakeasy/gen.lock +++ b/packages/mistralai_azure/.speakeasy/gen.lock @@ -1,19 +1,20 @@ lockVersion: 2.0.0 id: dc40fa48-2c4d-46ad-ac8b-270749770f34 management: - docChecksum: 9128fabc3ae45ecc9ed7fae8991b3d3e + docChecksum: e99cb4d498ede912c81ab20b7828c0e3 docVersion: 0.0.2 - speakeasyVersion: 1.373.1 - generationVersion: 2.399.0 - releaseVersion: 1.0.1 - configChecksum: dc28e30e8f503aee23a53bb77a46c902 + speakeasyVersion: 1.396.7 + generationVersion: 2.415.6 + releaseVersion: 1.2.2 + configChecksum: 36e70d966ca186be6efc57911c094dec published: true features: python: additionalDependencies: 1.0.0 constsAndDefaults: 1.0.2 - core: 5.3.8 + core: 5.5.3 defaultEnabledRetries: 0.2.0 + enumUnions: 0.1.0 envVarSecurityUsage: 0.3.1 examples: 3.0.0 flatRequests: 1.0.1 @@ -25,16 +26,61 @@ features: nullables: 1.0.0 openEnums: 1.0.0 responseFormat: 1.0.0 - retries: 3.0.0 + retries: 3.0.2 sdkHooks: 1.0.0 serverEvents: 1.0.2 serverEventsSentinels: 0.1.0 serverIDs: 3.0.0 unions: 3.0.2 generatedFiles: - - src/mistralai_azure/sdkconfiguration.py - - src/mistralai_azure/chat.py + - .gitattributes - .vscode/settings.json + - docs/models/arguments.md + - docs/models/assistantmessage.md + - docs/models/assistantmessagerole.md + - docs/models/chatcompletionchoice.md + - docs/models/chatcompletionchoicefinishreason.md + - docs/models/chatcompletionrequest.md + - docs/models/chatcompletionrequestmessages.md + - docs/models/chatcompletionrequeststop.md + - docs/models/chatcompletionrequesttoolchoice.md + - docs/models/chatcompletionresponse.md + - docs/models/chatcompletionstreamrequest.md + - docs/models/chatcompletionstreamrequesttoolchoice.md + - docs/models/completionchunk.md + - docs/models/completionevent.md + - docs/models/completionresponsestreamchoice.md + - docs/models/content.md + - docs/models/contentchunk.md + - docs/models/deltamessage.md + - docs/models/finishreason.md + - docs/models/function.md + - docs/models/functioncall.md + - docs/models/functionname.md + - docs/models/httpvalidationerror.md + - docs/models/loc.md + - docs/models/messages.md + - docs/models/responseformat.md + - docs/models/responseformats.md + - docs/models/role.md + - docs/models/security.md + - docs/models/stop.md + - docs/models/systemmessage.md + - docs/models/textchunk.md + - docs/models/tool.md + - docs/models/toolcall.md + - docs/models/toolchoice.md + - docs/models/toolchoiceenum.md + - docs/models/toolmessage.md + - docs/models/toolmessagerole.md + - docs/models/tooltypes.md + - docs/models/type.md + - docs/models/usageinfo.md + - docs/models/usermessage.md + - docs/models/usermessagecontent.md + - docs/models/usermessagerole.md + - docs/models/utils/retryconfig.md + - docs/models/validationerror.md - poetry.toml - py.typed - pylintrc @@ -43,9 +89,44 @@ generatedFiles: - scripts/prepare-readme.py - scripts/publish.sh - src/mistralai_azure/__init__.py + - src/mistralai_azure/_hooks/__init__.py + - src/mistralai_azure/_hooks/sdkhooks.py + - src/mistralai_azure/_hooks/types.py - src/mistralai_azure/basesdk.py + - src/mistralai_azure/chat.py - src/mistralai_azure/httpclient.py + - src/mistralai_azure/models/__init__.py + - src/mistralai_azure/models/assistantmessage.py + - src/mistralai_azure/models/chatcompletionchoice.py + - src/mistralai_azure/models/chatcompletionrequest.py + - src/mistralai_azure/models/chatcompletionresponse.py + - src/mistralai_azure/models/chatcompletionstreamrequest.py + - src/mistralai_azure/models/completionchunk.py + - src/mistralai_azure/models/completionevent.py + - src/mistralai_azure/models/completionresponsestreamchoice.py + - src/mistralai_azure/models/contentchunk.py + - src/mistralai_azure/models/deltamessage.py + - src/mistralai_azure/models/function.py + - src/mistralai_azure/models/functioncall.py + - src/mistralai_azure/models/functionname.py + - src/mistralai_azure/models/httpvalidationerror.py + - src/mistralai_azure/models/responseformat.py + - src/mistralai_azure/models/responseformats.py + - src/mistralai_azure/models/sdkerror.py + - src/mistralai_azure/models/security.py + - src/mistralai_azure/models/systemmessage.py + - src/mistralai_azure/models/textchunk.py + - src/mistralai_azure/models/tool.py + - src/mistralai_azure/models/toolcall.py + - src/mistralai_azure/models/toolchoice.py + - src/mistralai_azure/models/toolchoiceenum.py + - src/mistralai_azure/models/toolmessage.py + - src/mistralai_azure/models/tooltypes.py + - src/mistralai_azure/models/usageinfo.py + - src/mistralai_azure/models/usermessage.py + - src/mistralai_azure/models/validationerror.py - src/mistralai_azure/py.typed + - src/mistralai_azure/sdkconfiguration.py - src/mistralai_azure/types/__init__.py - src/mistralai_azure/types/basemodel.py - src/mistralai_azure/utils/__init__.py @@ -63,75 +144,16 @@ generatedFiles: - src/mistralai_azure/utils/serializers.py - src/mistralai_azure/utils/url.py - src/mistralai_azure/utils/values.py - - src/mistralai_azure/models/sdkerror.py - - src/mistralai_azure/models/completionevent.py - - src/mistralai_azure/models/completionchunk.py - - src/mistralai_azure/models/completionresponsestreamchoice.py - - src/mistralai_azure/models/deltamessage.py - - src/mistralai_azure/models/toolcall.py - - src/mistralai_azure/models/functioncall.py - - src/mistralai_azure/models/usageinfo.py - - src/mistralai_azure/models/httpvalidationerror.py - - src/mistralai_azure/models/validationerror.py - - src/mistralai_azure/models/chatcompletionstreamrequest.py - - src/mistralai_azure/models/tool.py - - src/mistralai_azure/models/function.py - - src/mistralai_azure/models/responseformat.py - - src/mistralai_azure/models/systemmessage.py - - src/mistralai_azure/models/contentchunk.py - - src/mistralai_azure/models/usermessage.py - - src/mistralai_azure/models/textchunk.py - - src/mistralai_azure/models/assistantmessage.py - - src/mistralai_azure/models/toolmessage.py - - src/mistralai_azure/models/chatcompletionresponse.py - - src/mistralai_azure/models/chatcompletionchoice.py - - src/mistralai_azure/models/chatcompletionrequest.py - - src/mistralai_azure/models/security.py - - src/mistralai_azure/models/__init__.py - - docs/models/completionevent.md - - docs/models/completionchunk.md - - docs/models/finishreason.md - - docs/models/completionresponsestreamchoice.md - - docs/models/deltamessage.md - - docs/models/tooltypes.md - - docs/models/toolcall.md - - docs/models/arguments.md - - docs/models/functioncall.md - - docs/models/usageinfo.md - - docs/models/httpvalidationerror.md - - docs/models/loc.md - - docs/models/validationerror.md - - docs/models/stop.md - - docs/models/messages.md - - docs/models/toolchoice.md - - docs/models/chatcompletionstreamrequest.md - - docs/models/tooltooltypes.md - - docs/models/tool.md - - docs/models/function.md - - docs/models/responseformats.md - - docs/models/responseformat.md - - docs/models/content.md - - docs/models/role.md - - docs/models/systemmessage.md - - docs/models/contentchunk.md - - docs/models/usermessagecontent.md - - docs/models/usermessagerole.md - - docs/models/usermessage.md - - docs/models/textchunk.md - - docs/models/assistantmessagerole.md - - docs/models/assistantmessage.md - - docs/models/toolmessagerole.md - - docs/models/toolmessage.md - - docs/models/chatcompletionresponse.md - - docs/models/chatcompletionchoicefinishreason.md - - docs/models/chatcompletionchoice.md - - docs/models/chatcompletionrequeststop.md - - docs/models/chatcompletionrequestmessages.md - - docs/models/chatcompletionrequesttoolchoice.md - - docs/models/chatcompletionrequest.md - - docs/models/security.md - - docs/models/utils/retryconfig.md - - .gitattributes - - src/mistralai_azure/_hooks/sdkhooks.py - - src/mistralai_azure/_hooks/types.py - - src/mistralai_azure/_hooks/__init__.py +examples: + stream_chat: + speakeasy-default-stream-chat: + requestBody: + application/json: {"model": "azureai", "messages": [{"content": []}]} + chat_completion_v1_chat_completions_post: + speakeasy-default-chat-completion-v1-chat-completions-post: + requestBody: + application/json: {"model": "azureai", "messages": [{"content": ""}]} + responses: + "200": + application/json: {"id": "cmpl-e5cc70bb28c444948073e77776eb30ef", "object": "chat.completion", "model": "mistral-small-latest", "usage": {"prompt_tokens": 16, "completion_tokens": 34, "total_tokens": 50}, "created": 1702256327, "choices": []} + "422": {} diff --git a/packages/mistralai_azure/.speakeasy/gen.yaml b/packages/mistralai_azure/.speakeasy/gen.yaml index 0660e8cc..edcb95b3 100644 --- a/packages/mistralai_azure/.speakeasy/gen.yaml +++ b/packages/mistralai_azure/.speakeasy/gen.yaml @@ -12,7 +12,7 @@ generation: auth: oAuth2ClientCredentialsEnabled: true python: - version: 1.0.1 + version: 1.2.2 additionalDependencies: dev: pytest: ^8.2.2 diff --git a/packages/mistralai_azure/docs/models/chatcompletionrequesttoolchoice.md b/packages/mistralai_azure/docs/models/chatcompletionrequesttoolchoice.md index ed32b75e..1646528d 100644 --- a/packages/mistralai_azure/docs/models/chatcompletionrequesttoolchoice.md +++ b/packages/mistralai_azure/docs/models/chatcompletionrequesttoolchoice.md @@ -1,10 +1,17 @@ # ChatCompletionRequestToolChoice -## Values +## Supported Types + +### `models.ToolChoice` + +```python +value: models.ToolChoice = /* values here */ +``` + +### `models.ToolChoiceEnum` + +```python +value: models.ToolChoiceEnum = /* values here */ +``` -| Name | Value | -| ------ | ------ | -| `AUTO` | auto | -| `NONE` | none | -| `ANY` | any | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/chatcompletionstreamrequest.md b/packages/mistralai_azure/docs/models/chatcompletionstreamrequest.md index 5ed2e2bc..05f711f9 100644 --- a/packages/mistralai_azure/docs/models/chatcompletionstreamrequest.md +++ b/packages/mistralai_azure/docs/models/chatcompletionstreamrequest.md @@ -16,5 +16,5 @@ | `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | | `response_format` | [Optional[models.ResponseFormat]](../models/responseformat.md) | :heavy_minus_sign: | N/A | | | `tools` | List[[models.Tool](../models/tool.md)] | :heavy_minus_sign: | N/A | | -| `tool_choice` | [Optional[models.ToolChoice]](../models/toolchoice.md) | :heavy_minus_sign: | N/A | | +| `tool_choice` | [Optional[models.ChatCompletionStreamRequestToolChoice]](../models/chatcompletionstreamrequesttoolchoice.md) | :heavy_minus_sign: | N/A | | | `safe_prompt` | *Optional[bool]* | :heavy_minus_sign: | Whether to inject a safety prompt before all conversations. | | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/chatcompletionstreamrequesttoolchoice.md b/packages/mistralai_azure/docs/models/chatcompletionstreamrequesttoolchoice.md new file mode 100644 index 00000000..cce0ca3e --- /dev/null +++ b/packages/mistralai_azure/docs/models/chatcompletionstreamrequesttoolchoice.md @@ -0,0 +1,17 @@ +# ChatCompletionStreamRequestToolChoice + + +## Supported Types + +### `models.ToolChoice` + +```python +value: models.ToolChoice = /* values here */ +``` + +### `models.ToolChoiceEnum` + +```python +value: models.ToolChoiceEnum = /* values here */ +``` + diff --git a/packages/mistralai_azure/docs/models/content.md b/packages/mistralai_azure/docs/models/content.md index a833dc2c..4cd3cfd5 100644 --- a/packages/mistralai_azure/docs/models/content.md +++ b/packages/mistralai_azure/docs/models/content.md @@ -9,9 +9,9 @@ value: str = /* values here */ ``` -### `List[models.ContentChunk]` +### `List[models.TextChunk]` ```python -value: List[models.ContentChunk] = /* values here */ +value: List[models.TextChunk] = /* values here */ ``` diff --git a/packages/mistralai_azure/docs/models/contentchunk.md b/packages/mistralai_azure/docs/models/contentchunk.md index 64fc80d6..4222920b 100644 --- a/packages/mistralai_azure/docs/models/contentchunk.md +++ b/packages/mistralai_azure/docs/models/contentchunk.md @@ -1,9 +1,11 @@ # ContentChunk -## Fields +## Supported Types + +### `models.TextChunk` + +```python +value: models.TextChunk = /* values here */ +``` -| Field | Type | Required | Description | -| ------------------ | ------------------ | ------------------ | ------------------ | -| `text` | *str* | :heavy_check_mark: | N/A | -| `type` | *Optional[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/functionname.md b/packages/mistralai_azure/docs/models/functionname.md new file mode 100644 index 00000000..87d7b485 --- /dev/null +++ b/packages/mistralai_azure/docs/models/functionname.md @@ -0,0 +1,10 @@ +# FunctionName + +this restriction of `Function` is used to select a specific function to call + + +## Fields + +| Field | Type | Required | Description | +| ------------------ | ------------------ | ------------------ | ------------------ | +| `name` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/responseformat.md b/packages/mistralai_azure/docs/models/responseformat.md index 2704eab4..9c627f55 100644 --- a/packages/mistralai_azure/docs/models/responseformat.md +++ b/packages/mistralai_azure/docs/models/responseformat.md @@ -3,6 +3,6 @@ ## Fields -| Field | Type | Required | Description | -| ---------------------------------------------------------------- | ---------------------------------------------------------------- | ---------------------------------------------------------------- | ---------------------------------------------------------------- | -| `type` | [Optional[models.ResponseFormats]](../models/responseformats.md) | :heavy_minus_sign: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `type` | [Optional[models.ResponseFormats]](../models/responseformats.md) | :heavy_minus_sign: | An object specifying the format that the model must output. Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/textchunk.md b/packages/mistralai_azure/docs/models/textchunk.md index 34e4dd6f..6daab3c3 100644 --- a/packages/mistralai_azure/docs/models/textchunk.md +++ b/packages/mistralai_azure/docs/models/textchunk.md @@ -3,7 +3,7 @@ ## Fields -| Field | Type | Required | Description | -| ------------------ | ------------------ | ------------------ | ------------------ | -| `text` | *str* | :heavy_check_mark: | N/A | -| `type` | *Optional[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| ------------------------------------------ | ------------------------------------------ | ------------------------------------------ | ------------------------------------------ | +| `text` | *str* | :heavy_check_mark: | N/A | +| `type` | [Optional[models.Type]](../models/type.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/tool.md b/packages/mistralai_azure/docs/models/tool.md index ca624a90..822f86f8 100644 --- a/packages/mistralai_azure/docs/models/tool.md +++ b/packages/mistralai_azure/docs/models/tool.md @@ -3,7 +3,7 @@ ## Fields -| Field | Type | Required | Description | -| ------------------------------------------------------------ | ------------------------------------------------------------ | ------------------------------------------------------------ | ------------------------------------------------------------ | -| `function` | [models.Function](../models/function.md) | :heavy_check_mark: | N/A | -| `type` | [Optional[models.ToolToolTypes]](../models/tooltooltypes.md) | :heavy_minus_sign: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| ---------------------------------------------------- | ---------------------------------------------------- | ---------------------------------------------------- | ---------------------------------------------------- | +| `function` | [models.Function](../models/function.md) | :heavy_check_mark: | N/A | +| `type` | [Optional[models.ToolTypes]](../models/tooltypes.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/toolchoice.md b/packages/mistralai_azure/docs/models/toolchoice.md index b84f51f6..792ebcd6 100644 --- a/packages/mistralai_azure/docs/models/toolchoice.md +++ b/packages/mistralai_azure/docs/models/toolchoice.md @@ -1,10 +1,11 @@ # ToolChoice +ToolChoice is either a ToolChoiceEnum or a ToolChoice -## Values -| Name | Value | -| ------ | ------ | -| `AUTO` | auto | -| `NONE` | none | -| `ANY` | any | \ No newline at end of file +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | +| `function` | [models.FunctionName](../models/functionname.md) | :heavy_check_mark: | this restriction of `Function` is used to select a specific function to call | +| `type` | [Optional[models.ToolTypes]](../models/tooltypes.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/toolchoiceenum.md b/packages/mistralai_azure/docs/models/toolchoiceenum.md new file mode 100644 index 00000000..0be3d6c5 --- /dev/null +++ b/packages/mistralai_azure/docs/models/toolchoiceenum.md @@ -0,0 +1,11 @@ +# ToolChoiceEnum + + +## Values + +| Name | Value | +| ---------- | ---------- | +| `AUTO` | auto | +| `NONE` | none | +| `ANY` | any | +| `REQUIRED` | required | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/tooltooltypes.md b/packages/mistralai_azure/docs/models/tooltooltypes.md deleted file mode 100644 index e3964307..00000000 --- a/packages/mistralai_azure/docs/models/tooltooltypes.md +++ /dev/null @@ -1,8 +0,0 @@ -# ToolToolTypes - - -## Values - -| Name | Value | -| ---------- | ---------- | -| `FUNCTION` | function | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/type.md b/packages/mistralai_azure/docs/models/type.md new file mode 100644 index 00000000..eb0581e7 --- /dev/null +++ b/packages/mistralai_azure/docs/models/type.md @@ -0,0 +1,8 @@ +# Type + + +## Values + +| Name | Value | +| ------ | ------ | +| `TEXT` | text | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/usermessagecontent.md b/packages/mistralai_azure/docs/models/usermessagecontent.md index 86ebd18f..8350f9e8 100644 --- a/packages/mistralai_azure/docs/models/usermessagecontent.md +++ b/packages/mistralai_azure/docs/models/usermessagecontent.md @@ -9,9 +9,9 @@ value: str = /* values here */ ``` -### `List[models.TextChunk]` +### `List[models.ContentChunk]` ```python -value: List[models.TextChunk] = /* values here */ +value: List[models.ContentChunk] = /* values here */ ``` diff --git a/packages/mistralai_azure/poetry.lock b/packages/mistralai_azure/poetry.lock index 2e5fecff..b9a32d7f 100644 --- a/packages/mistralai_azure/poetry.lock +++ b/packages/mistralai_azure/poetry.lock @@ -287,17 +287,6 @@ files = [ {file = "mypy_extensions-1.0.0.tar.gz", hash = "sha256:75dbf8955dc00442a438fc4d0666508a9a97b6bd41aa2f0ffe9d2f2725af0782"}, ] -[[package]] -name = "nodeenv" -version = "1.9.1" -description = "Node.js virtual environment builder" -optional = false -python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7" -files = [ - {file = "nodeenv-1.9.1-py2.py3-none-any.whl", hash = "sha256:ba11c9782d29c27c70ffbdda2d7415098754709be8a7056d79a737cd901155c9"}, - {file = "nodeenv-1.9.1.tar.gz", hash = "sha256:6ec12890a2dab7946721edbfbcd91f3319c6ccc9aec47be7c7e6b7011ee6645f"}, -] - [[package]] name = "packaging" version = "24.1" @@ -342,18 +331,18 @@ testing = ["pytest", "pytest-benchmark"] [[package]] name = "pydantic" -version = "2.8.2" +version = "2.9.1" description = "Data validation using Python type hints" optional = false python-versions = ">=3.8" files = [ - {file = "pydantic-2.8.2-py3-none-any.whl", hash = "sha256:73ee9fddd406dc318b885c7a2eab8a6472b68b8fb5ba8150949fc3db939f23c8"}, - {file = "pydantic-2.8.2.tar.gz", hash = "sha256:6f62c13d067b0755ad1c21a34bdd06c0c12625a22b0fc09c6b149816604f7c2a"}, + {file = "pydantic-2.9.1-py3-none-any.whl", hash = "sha256:7aff4db5fdf3cf573d4b3c30926a510a10e19a0774d38fc4967f78beb6deb612"}, + {file = "pydantic-2.9.1.tar.gz", hash = "sha256:1363c7d975c7036df0db2b4a61f2e062fbc0aa5ab5f2772e0ffc7191a4f4bce2"}, ] [package.dependencies] -annotated-types = ">=0.4.0" -pydantic-core = "2.20.1" +annotated-types = ">=0.6.0" +pydantic-core = "2.23.3" typing-extensions = [ {version = ">=4.6.1", markers = "python_version < \"3.13\""}, {version = ">=4.12.2", markers = "python_version >= \"3.13\""}, @@ -361,103 +350,104 @@ typing-extensions = [ [package.extras] email = ["email-validator (>=2.0.0)"] +timezone = ["tzdata"] [[package]] name = "pydantic-core" -version = "2.20.1" +version = "2.23.3" description = "Core functionality for Pydantic validation and serialization" optional = false python-versions = ">=3.8" files = [ - {file = "pydantic_core-2.20.1-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:3acae97ffd19bf091c72df4d726d552c473f3576409b2a7ca36b2f535ffff4a3"}, - {file = "pydantic_core-2.20.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:41f4c96227a67a013e7de5ff8f20fb496ce573893b7f4f2707d065907bffdbd6"}, - {file = "pydantic_core-2.20.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5f239eb799a2081495ea659d8d4a43a8f42cd1fe9ff2e7e436295c38a10c286a"}, - {file = "pydantic_core-2.20.1-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:53e431da3fc53360db73eedf6f7124d1076e1b4ee4276b36fb25514544ceb4a3"}, - {file = "pydantic_core-2.20.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f1f62b2413c3a0e846c3b838b2ecd6c7a19ec6793b2a522745b0869e37ab5bc1"}, - {file = "pydantic_core-2.20.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5d41e6daee2813ecceea8eda38062d69e280b39df793f5a942fa515b8ed67953"}, - {file = "pydantic_core-2.20.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3d482efec8b7dc6bfaedc0f166b2ce349df0011f5d2f1f25537ced4cfc34fd98"}, - {file = "pydantic_core-2.20.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:e93e1a4b4b33daed65d781a57a522ff153dcf748dee70b40c7258c5861e1768a"}, - {file = "pydantic_core-2.20.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:e7c4ea22b6739b162c9ecaaa41d718dfad48a244909fe7ef4b54c0b530effc5a"}, - {file = "pydantic_core-2.20.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:4f2790949cf385d985a31984907fecb3896999329103df4e4983a4a41e13e840"}, - {file = "pydantic_core-2.20.1-cp310-none-win32.whl", hash = "sha256:5e999ba8dd90e93d57410c5e67ebb67ffcaadcea0ad973240fdfd3a135506250"}, - {file = "pydantic_core-2.20.1-cp310-none-win_amd64.whl", hash = "sha256:512ecfbefef6dac7bc5eaaf46177b2de58cdf7acac8793fe033b24ece0b9566c"}, - {file = "pydantic_core-2.20.1-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:d2a8fa9d6d6f891f3deec72f5cc668e6f66b188ab14bb1ab52422fe8e644f312"}, - {file = "pydantic_core-2.20.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:175873691124f3d0da55aeea1d90660a6ea7a3cfea137c38afa0a5ffabe37b88"}, - {file = "pydantic_core-2.20.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:37eee5b638f0e0dcd18d21f59b679686bbd18917b87db0193ae36f9c23c355fc"}, - {file = "pydantic_core-2.20.1-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:25e9185e2d06c16ee438ed39bf62935ec436474a6ac4f9358524220f1b236e43"}, - {file = "pydantic_core-2.20.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:150906b40ff188a3260cbee25380e7494ee85048584998c1e66df0c7a11c17a6"}, - {file = "pydantic_core-2.20.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8ad4aeb3e9a97286573c03df758fc7627aecdd02f1da04516a86dc159bf70121"}, - {file = "pydantic_core-2.20.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d3f3ed29cd9f978c604708511a1f9c2fdcb6c38b9aae36a51905b8811ee5cbf1"}, - {file = "pydantic_core-2.20.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:b0dae11d8f5ded51699c74d9548dcc5938e0804cc8298ec0aa0da95c21fff57b"}, - {file = "pydantic_core-2.20.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:faa6b09ee09433b87992fb5a2859efd1c264ddc37280d2dd5db502126d0e7f27"}, - {file = "pydantic_core-2.20.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:9dc1b507c12eb0481d071f3c1808f0529ad41dc415d0ca11f7ebfc666e66a18b"}, - {file = "pydantic_core-2.20.1-cp311-none-win32.whl", hash = "sha256:fa2fddcb7107e0d1808086ca306dcade7df60a13a6c347a7acf1ec139aa6789a"}, - {file = "pydantic_core-2.20.1-cp311-none-win_amd64.whl", hash = "sha256:40a783fb7ee353c50bd3853e626f15677ea527ae556429453685ae32280c19c2"}, - {file = "pydantic_core-2.20.1-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:595ba5be69b35777474fa07f80fc260ea71255656191adb22a8c53aba4479231"}, - {file = "pydantic_core-2.20.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:a4f55095ad087474999ee28d3398bae183a66be4823f753cd7d67dd0153427c9"}, - {file = "pydantic_core-2.20.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f9aa05d09ecf4c75157197f27cdc9cfaeb7c5f15021c6373932bf3e124af029f"}, - {file = "pydantic_core-2.20.1-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:e97fdf088d4b31ff4ba35db26d9cc472ac7ef4a2ff2badeabf8d727b3377fc52"}, - {file = "pydantic_core-2.20.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:bc633a9fe1eb87e250b5c57d389cf28998e4292336926b0b6cdaee353f89a237"}, - {file = "pydantic_core-2.20.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d573faf8eb7e6b1cbbcb4f5b247c60ca8be39fe2c674495df0eb4318303137fe"}, - {file = "pydantic_core-2.20.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:26dc97754b57d2fd00ac2b24dfa341abffc380b823211994c4efac7f13b9e90e"}, - {file = "pydantic_core-2.20.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:33499e85e739a4b60c9dac710c20a08dc73cb3240c9a0e22325e671b27b70d24"}, - {file = "pydantic_core-2.20.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:bebb4d6715c814597f85297c332297c6ce81e29436125ca59d1159b07f423eb1"}, - {file = "pydantic_core-2.20.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:516d9227919612425c8ef1c9b869bbbee249bc91912c8aaffb66116c0b447ebd"}, - {file = "pydantic_core-2.20.1-cp312-none-win32.whl", hash = "sha256:469f29f9093c9d834432034d33f5fe45699e664f12a13bf38c04967ce233d688"}, - {file = "pydantic_core-2.20.1-cp312-none-win_amd64.whl", hash = "sha256:035ede2e16da7281041f0e626459bcae33ed998cca6a0a007a5ebb73414ac72d"}, - {file = "pydantic_core-2.20.1-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:0827505a5c87e8aa285dc31e9ec7f4a17c81a813d45f70b1d9164e03a813a686"}, - {file = "pydantic_core-2.20.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:19c0fa39fa154e7e0b7f82f88ef85faa2a4c23cc65aae2f5aea625e3c13c735a"}, - {file = "pydantic_core-2.20.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4aa223cd1e36b642092c326d694d8bf59b71ddddc94cdb752bbbb1c5c91d833b"}, - {file = "pydantic_core-2.20.1-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:c336a6d235522a62fef872c6295a42ecb0c4e1d0f1a3e500fe949415761b8a19"}, - {file = "pydantic_core-2.20.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7eb6a0587eded33aeefea9f916899d42b1799b7b14b8f8ff2753c0ac1741edac"}, - {file = "pydantic_core-2.20.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:70c8daf4faca8da5a6d655f9af86faf6ec2e1768f4b8b9d0226c02f3d6209703"}, - {file = "pydantic_core-2.20.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e9fa4c9bf273ca41f940bceb86922a7667cd5bf90e95dbb157cbb8441008482c"}, - {file = "pydantic_core-2.20.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:11b71d67b4725e7e2a9f6e9c0ac1239bbc0c48cce3dc59f98635efc57d6dac83"}, - {file = "pydantic_core-2.20.1-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:270755f15174fb983890c49881e93f8f1b80f0b5e3a3cc1394a255706cabd203"}, - {file = "pydantic_core-2.20.1-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:c81131869240e3e568916ef4c307f8b99583efaa60a8112ef27a366eefba8ef0"}, - {file = "pydantic_core-2.20.1-cp313-none-win32.whl", hash = "sha256:b91ced227c41aa29c672814f50dbb05ec93536abf8f43cd14ec9521ea09afe4e"}, - {file = "pydantic_core-2.20.1-cp313-none-win_amd64.whl", hash = "sha256:65db0f2eefcaad1a3950f498aabb4875c8890438bc80b19362cf633b87a8ab20"}, - {file = "pydantic_core-2.20.1-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:4745f4ac52cc6686390c40eaa01d48b18997cb130833154801a442323cc78f91"}, - {file = "pydantic_core-2.20.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:a8ad4c766d3f33ba8fd692f9aa297c9058970530a32c728a2c4bfd2616d3358b"}, - {file = "pydantic_core-2.20.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:41e81317dd6a0127cabce83c0c9c3fbecceae981c8391e6f1dec88a77c8a569a"}, - {file = "pydantic_core-2.20.1-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:04024d270cf63f586ad41fff13fde4311c4fc13ea74676962c876d9577bcc78f"}, - {file = "pydantic_core-2.20.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:eaad4ff2de1c3823fddf82f41121bdf453d922e9a238642b1dedb33c4e4f98ad"}, - {file = "pydantic_core-2.20.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:26ab812fa0c845df815e506be30337e2df27e88399b985d0bb4e3ecfe72df31c"}, - {file = "pydantic_core-2.20.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3c5ebac750d9d5f2706654c638c041635c385596caf68f81342011ddfa1e5598"}, - {file = "pydantic_core-2.20.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2aafc5a503855ea5885559eae883978c9b6d8c8993d67766ee73d82e841300dd"}, - {file = "pydantic_core-2.20.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:4868f6bd7c9d98904b748a2653031fc9c2f85b6237009d475b1008bfaeb0a5aa"}, - {file = "pydantic_core-2.20.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:aa2f457b4af386254372dfa78a2eda2563680d982422641a85f271c859df1987"}, - {file = "pydantic_core-2.20.1-cp38-none-win32.whl", hash = "sha256:225b67a1f6d602de0ce7f6c1c3ae89a4aa25d3de9be857999e9124f15dab486a"}, - {file = "pydantic_core-2.20.1-cp38-none-win_amd64.whl", hash = "sha256:6b507132dcfc0dea440cce23ee2182c0ce7aba7054576efc65634f080dbe9434"}, - {file = "pydantic_core-2.20.1-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:b03f7941783b4c4a26051846dea594628b38f6940a2fdc0df00b221aed39314c"}, - {file = "pydantic_core-2.20.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:1eedfeb6089ed3fad42e81a67755846ad4dcc14d73698c120a82e4ccf0f1f9f6"}, - {file = "pydantic_core-2.20.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:635fee4e041ab9c479e31edda27fcf966ea9614fff1317e280d99eb3e5ab6fe2"}, - {file = "pydantic_core-2.20.1-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:77bf3ac639c1ff567ae3b47f8d4cc3dc20f9966a2a6dd2311dcc055d3d04fb8a"}, - {file = "pydantic_core-2.20.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7ed1b0132f24beeec5a78b67d9388656d03e6a7c837394f99257e2d55b461611"}, - {file = "pydantic_core-2.20.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c6514f963b023aeee506678a1cf821fe31159b925c4b76fe2afa94cc70b3222b"}, - {file = "pydantic_core-2.20.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:10d4204d8ca33146e761c79f83cc861df20e7ae9f6487ca290a97702daf56006"}, - {file = "pydantic_core-2.20.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2d036c7187b9422ae5b262badb87a20a49eb6c5238b2004e96d4da1231badef1"}, - {file = "pydantic_core-2.20.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:9ebfef07dbe1d93efb94b4700f2d278494e9162565a54f124c404a5656d7ff09"}, - {file = "pydantic_core-2.20.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:6b9d9bb600328a1ce523ab4f454859e9d439150abb0906c5a1983c146580ebab"}, - {file = "pydantic_core-2.20.1-cp39-none-win32.whl", hash = "sha256:784c1214cb6dd1e3b15dd8b91b9a53852aed16671cc3fbe4786f4f1db07089e2"}, - {file = "pydantic_core-2.20.1-cp39-none-win_amd64.whl", hash = "sha256:d2fe69c5434391727efa54b47a1e7986bb0186e72a41b203df8f5b0a19a4f669"}, - {file = "pydantic_core-2.20.1-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:a45f84b09ac9c3d35dfcf6a27fd0634d30d183205230a0ebe8373a0e8cfa0906"}, - {file = "pydantic_core-2.20.1-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:d02a72df14dfdbaf228424573a07af10637bd490f0901cee872c4f434a735b94"}, - {file = "pydantic_core-2.20.1-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d2b27e6af28f07e2f195552b37d7d66b150adbaa39a6d327766ffd695799780f"}, - {file = "pydantic_core-2.20.1-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:084659fac3c83fd674596612aeff6041a18402f1e1bc19ca39e417d554468482"}, - {file = "pydantic_core-2.20.1-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:242b8feb3c493ab78be289c034a1f659e8826e2233786e36f2893a950a719bb6"}, - {file = "pydantic_core-2.20.1-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:38cf1c40a921d05c5edc61a785c0ddb4bed67827069f535d794ce6bcded919fc"}, - {file = "pydantic_core-2.20.1-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:e0bbdd76ce9aa5d4209d65f2b27fc6e5ef1312ae6c5333c26db3f5ade53a1e99"}, - {file = "pydantic_core-2.20.1-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:254ec27fdb5b1ee60684f91683be95e5133c994cc54e86a0b0963afa25c8f8a6"}, - {file = "pydantic_core-2.20.1-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:407653af5617f0757261ae249d3fba09504d7a71ab36ac057c938572d1bc9331"}, - {file = "pydantic_core-2.20.1-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:c693e916709c2465b02ca0ad7b387c4f8423d1db7b4649c551f27a529181c5ad"}, - {file = "pydantic_core-2.20.1-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5b5ff4911aea936a47d9376fd3ab17e970cc543d1b68921886e7f64bd28308d1"}, - {file = "pydantic_core-2.20.1-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:177f55a886d74f1808763976ac4efd29b7ed15c69f4d838bbd74d9d09cf6fa86"}, - {file = "pydantic_core-2.20.1-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:964faa8a861d2664f0c7ab0c181af0bea66098b1919439815ca8803ef136fc4e"}, - {file = "pydantic_core-2.20.1-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:4dd484681c15e6b9a977c785a345d3e378d72678fd5f1f3c0509608da24f2ac0"}, - {file = "pydantic_core-2.20.1-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:f6d6cff3538391e8486a431569b77921adfcdef14eb18fbf19b7c0a5294d4e6a"}, - {file = "pydantic_core-2.20.1-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:a6d511cc297ff0883bc3708b465ff82d7560193169a8b93260f74ecb0a5e08a7"}, - {file = "pydantic_core-2.20.1.tar.gz", hash = "sha256:26ca695eeee5f9f1aeeb211ffc12f10bcb6f71e2989988fda61dabd65db878d4"}, + {file = "pydantic_core-2.23.3-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:7f10a5d1b9281392f1bf507d16ac720e78285dfd635b05737c3911637601bae6"}, + {file = "pydantic_core-2.23.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:3c09a7885dd33ee8c65266e5aa7fb7e2f23d49d8043f089989726391dd7350c5"}, + {file = "pydantic_core-2.23.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6470b5a1ec4d1c2e9afe928c6cb37eb33381cab99292a708b8cb9aa89e62429b"}, + {file = "pydantic_core-2.23.3-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:9172d2088e27d9a185ea0a6c8cebe227a9139fd90295221d7d495944d2367700"}, + {file = "pydantic_core-2.23.3-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:86fc6c762ca7ac8fbbdff80d61b2c59fb6b7d144aa46e2d54d9e1b7b0e780e01"}, + {file = "pydantic_core-2.23.3-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f0cb80fd5c2df4898693aa841425ea1727b1b6d2167448253077d2a49003e0ed"}, + {file = "pydantic_core-2.23.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:03667cec5daf43ac4995cefa8aaf58f99de036204a37b889c24a80927b629cec"}, + {file = "pydantic_core-2.23.3-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:047531242f8e9c2db733599f1c612925de095e93c9cc0e599e96cf536aaf56ba"}, + {file = "pydantic_core-2.23.3-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:5499798317fff7f25dbef9347f4451b91ac2a4330c6669821c8202fd354c7bee"}, + {file = "pydantic_core-2.23.3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:bbb5e45eab7624440516ee3722a3044b83fff4c0372efe183fd6ba678ff681fe"}, + {file = "pydantic_core-2.23.3-cp310-none-win32.whl", hash = "sha256:8b5b3ed73abb147704a6e9f556d8c5cb078f8c095be4588e669d315e0d11893b"}, + {file = "pydantic_core-2.23.3-cp310-none-win_amd64.whl", hash = "sha256:2b603cde285322758a0279995b5796d64b63060bfbe214b50a3ca23b5cee3e83"}, + {file = "pydantic_core-2.23.3-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:c889fd87e1f1bbeb877c2ee56b63bb297de4636661cc9bbfcf4b34e5e925bc27"}, + {file = "pydantic_core-2.23.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:ea85bda3189fb27503af4c45273735bcde3dd31c1ab17d11f37b04877859ef45"}, + {file = "pydantic_core-2.23.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a7f7f72f721223f33d3dc98a791666ebc6a91fa023ce63733709f4894a7dc611"}, + {file = "pydantic_core-2.23.3-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:2b2b55b0448e9da68f56b696f313949cda1039e8ec7b5d294285335b53104b61"}, + {file = "pydantic_core-2.23.3-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c24574c7e92e2c56379706b9a3f07c1e0c7f2f87a41b6ee86653100c4ce343e5"}, + {file = "pydantic_core-2.23.3-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f2b05e6ccbee333a8f4b8f4d7c244fdb7a979e90977ad9c51ea31261e2085ce0"}, + {file = "pydantic_core-2.23.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e2c409ce1c219c091e47cb03feb3c4ed8c2b8e004efc940da0166aaee8f9d6c8"}, + {file = "pydantic_core-2.23.3-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d965e8b325f443ed3196db890d85dfebbb09f7384486a77461347f4adb1fa7f8"}, + {file = "pydantic_core-2.23.3-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:f56af3a420fb1ffaf43ece3ea09c2d27c444e7c40dcb7c6e7cf57aae764f2b48"}, + {file = "pydantic_core-2.23.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:5b01a078dd4f9a52494370af21aa52964e0a96d4862ac64ff7cea06e0f12d2c5"}, + {file = "pydantic_core-2.23.3-cp311-none-win32.whl", hash = "sha256:560e32f0df04ac69b3dd818f71339983f6d1f70eb99d4d1f8e9705fb6c34a5c1"}, + {file = "pydantic_core-2.23.3-cp311-none-win_amd64.whl", hash = "sha256:c744fa100fdea0d000d8bcddee95213d2de2e95b9c12be083370b2072333a0fa"}, + {file = "pydantic_core-2.23.3-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:e0ec50663feedf64d21bad0809f5857bac1ce91deded203efc4a84b31b2e4305"}, + {file = "pydantic_core-2.23.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:db6e6afcb95edbe6b357786684b71008499836e91f2a4a1e55b840955b341dbb"}, + {file = "pydantic_core-2.23.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:98ccd69edcf49f0875d86942f4418a4e83eb3047f20eb897bffa62a5d419c8fa"}, + {file = "pydantic_core-2.23.3-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:a678c1ac5c5ec5685af0133262103defb427114e62eafeda12f1357a12140162"}, + {file = "pydantic_core-2.23.3-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:01491d8b4d8db9f3391d93b0df60701e644ff0894352947f31fff3e52bd5c801"}, + {file = "pydantic_core-2.23.3-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:fcf31facf2796a2d3b7fe338fe8640aa0166e4e55b4cb108dbfd1058049bf4cb"}, + {file = "pydantic_core-2.23.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7200fd561fb3be06827340da066df4311d0b6b8eb0c2116a110be5245dceb326"}, + {file = "pydantic_core-2.23.3-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:dc1636770a809dee2bd44dd74b89cc80eb41172bcad8af75dd0bc182c2666d4c"}, + {file = "pydantic_core-2.23.3-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:67a5def279309f2e23014b608c4150b0c2d323bd7bccd27ff07b001c12c2415c"}, + {file = "pydantic_core-2.23.3-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:748bdf985014c6dd3e1e4cc3db90f1c3ecc7246ff5a3cd4ddab20c768b2f1dab"}, + {file = "pydantic_core-2.23.3-cp312-none-win32.whl", hash = "sha256:255ec6dcb899c115f1e2a64bc9ebc24cc0e3ab097775755244f77360d1f3c06c"}, + {file = "pydantic_core-2.23.3-cp312-none-win_amd64.whl", hash = "sha256:40b8441be16c1e940abebed83cd006ddb9e3737a279e339dbd6d31578b802f7b"}, + {file = "pydantic_core-2.23.3-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:6daaf5b1ba1369a22c8b050b643250e3e5efc6a78366d323294aee54953a4d5f"}, + {file = "pydantic_core-2.23.3-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:d015e63b985a78a3d4ccffd3bdf22b7c20b3bbd4b8227809b3e8e75bc37f9cb2"}, + {file = "pydantic_core-2.23.3-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a3fc572d9b5b5cfe13f8e8a6e26271d5d13f80173724b738557a8c7f3a8a3791"}, + {file = "pydantic_core-2.23.3-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:f6bd91345b5163ee7448bee201ed7dd601ca24f43f439109b0212e296eb5b423"}, + {file = "pydantic_core-2.23.3-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:fc379c73fd66606628b866f661e8785088afe2adaba78e6bbe80796baf708a63"}, + {file = "pydantic_core-2.23.3-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:fbdce4b47592f9e296e19ac31667daed8753c8367ebb34b9a9bd89dacaa299c9"}, + {file = "pydantic_core-2.23.3-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fc3cf31edf405a161a0adad83246568647c54404739b614b1ff43dad2b02e6d5"}, + {file = "pydantic_core-2.23.3-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:8e22b477bf90db71c156f89a55bfe4d25177b81fce4aa09294d9e805eec13855"}, + {file = "pydantic_core-2.23.3-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:0a0137ddf462575d9bce863c4c95bac3493ba8e22f8c28ca94634b4a1d3e2bb4"}, + {file = "pydantic_core-2.23.3-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:203171e48946c3164fe7691fc349c79241ff8f28306abd4cad5f4f75ed80bc8d"}, + {file = "pydantic_core-2.23.3-cp313-none-win32.whl", hash = "sha256:76bdab0de4acb3f119c2a4bff740e0c7dc2e6de7692774620f7452ce11ca76c8"}, + {file = "pydantic_core-2.23.3-cp313-none-win_amd64.whl", hash = "sha256:37ba321ac2a46100c578a92e9a6aa33afe9ec99ffa084424291d84e456f490c1"}, + {file = "pydantic_core-2.23.3-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:d063c6b9fed7d992bcbebfc9133f4c24b7a7f215d6b102f3e082b1117cddb72c"}, + {file = "pydantic_core-2.23.3-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:6cb968da9a0746a0cf521b2b5ef25fc5a0bee9b9a1a8214e0a1cfaea5be7e8a4"}, + {file = "pydantic_core-2.23.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:edbefe079a520c5984e30e1f1f29325054b59534729c25b874a16a5048028d16"}, + {file = "pydantic_core-2.23.3-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:cbaaf2ef20d282659093913da9d402108203f7cb5955020bd8d1ae5a2325d1c4"}, + {file = "pydantic_core-2.23.3-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:fb539d7e5dc4aac345846f290cf504d2fd3c1be26ac4e8b5e4c2b688069ff4cf"}, + {file = "pydantic_core-2.23.3-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7e6f33503c5495059148cc486867e1d24ca35df5fc064686e631e314d959ad5b"}, + {file = "pydantic_core-2.23.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:04b07490bc2f6f2717b10c3969e1b830f5720b632f8ae2f3b8b1542394c47a8e"}, + {file = "pydantic_core-2.23.3-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:03795b9e8a5d7fda05f3873efc3f59105e2dcff14231680296b87b80bb327295"}, + {file = "pydantic_core-2.23.3-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:c483dab0f14b8d3f0df0c6c18d70b21b086f74c87ab03c59250dbf6d3c89baba"}, + {file = "pydantic_core-2.23.3-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:8b2682038e255e94baf2c473dca914a7460069171ff5cdd4080be18ab8a7fd6e"}, + {file = "pydantic_core-2.23.3-cp38-none-win32.whl", hash = "sha256:f4a57db8966b3a1d1a350012839c6a0099f0898c56512dfade8a1fe5fb278710"}, + {file = "pydantic_core-2.23.3-cp38-none-win_amd64.whl", hash = "sha256:13dd45ba2561603681a2676ca56006d6dee94493f03d5cadc055d2055615c3ea"}, + {file = "pydantic_core-2.23.3-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:82da2f4703894134a9f000e24965df73cc103e31e8c31906cc1ee89fde72cbd8"}, + {file = "pydantic_core-2.23.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:dd9be0a42de08f4b58a3cc73a123f124f65c24698b95a54c1543065baca8cf0e"}, + {file = "pydantic_core-2.23.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:89b731f25c80830c76fdb13705c68fef6a2b6dc494402987c7ea9584fe189f5d"}, + {file = "pydantic_core-2.23.3-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:c6de1ec30c4bb94f3a69c9f5f2182baeda5b809f806676675e9ef6b8dc936f28"}, + {file = "pydantic_core-2.23.3-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:bb68b41c3fa64587412b104294b9cbb027509dc2f6958446c502638d481525ef"}, + {file = "pydantic_core-2.23.3-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1c3980f2843de5184656aab58698011b42763ccba11c4a8c35936c8dd6c7068c"}, + {file = "pydantic_core-2.23.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:94f85614f2cba13f62c3c6481716e4adeae48e1eaa7e8bac379b9d177d93947a"}, + {file = "pydantic_core-2.23.3-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:510b7fb0a86dc8f10a8bb43bd2f97beb63cffad1203071dc434dac26453955cd"}, + {file = "pydantic_core-2.23.3-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:1eba2f7ce3e30ee2170410e2171867ea73dbd692433b81a93758ab2de6c64835"}, + {file = "pydantic_core-2.23.3-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:4b259fd8409ab84b4041b7b3f24dcc41e4696f180b775961ca8142b5b21d0e70"}, + {file = "pydantic_core-2.23.3-cp39-none-win32.whl", hash = "sha256:40d9bd259538dba2f40963286009bf7caf18b5112b19d2b55b09c14dde6db6a7"}, + {file = "pydantic_core-2.23.3-cp39-none-win_amd64.whl", hash = "sha256:5a8cd3074a98ee70173a8633ad3c10e00dcb991ecec57263aacb4095c5efb958"}, + {file = "pydantic_core-2.23.3-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:f399e8657c67313476a121a6944311fab377085ca7f490648c9af97fc732732d"}, + {file = "pydantic_core-2.23.3-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:6b5547d098c76e1694ba85f05b595720d7c60d342f24d5aad32c3049131fa5c4"}, + {file = "pydantic_core-2.23.3-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0dda0290a6f608504882d9f7650975b4651ff91c85673341789a476b1159f211"}, + {file = "pydantic_core-2.23.3-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:65b6e5da855e9c55a0c67f4db8a492bf13d8d3316a59999cfbaf98cc6e401961"}, + {file = "pydantic_core-2.23.3-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:09e926397f392059ce0afdcac920df29d9c833256354d0c55f1584b0b70cf07e"}, + {file = "pydantic_core-2.23.3-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:87cfa0ed6b8c5bd6ae8b66de941cece179281239d482f363814d2b986b79cedc"}, + {file = "pydantic_core-2.23.3-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:e61328920154b6a44d98cabcb709f10e8b74276bc709c9a513a8c37a18786cc4"}, + {file = "pydantic_core-2.23.3-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:ce3317d155628301d649fe5e16a99528d5680af4ec7aa70b90b8dacd2d725c9b"}, + {file = "pydantic_core-2.23.3-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:e89513f014c6be0d17b00a9a7c81b1c426f4eb9224b15433f3d98c1a071f8433"}, + {file = "pydantic_core-2.23.3-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:4f62c1c953d7ee375df5eb2e44ad50ce2f5aff931723b398b8bc6f0ac159791a"}, + {file = "pydantic_core-2.23.3-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2718443bc671c7ac331de4eef9b673063b10af32a0bb385019ad61dcf2cc8f6c"}, + {file = "pydantic_core-2.23.3-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a0d90e08b2727c5d01af1b5ef4121d2f0c99fbee692c762f4d9d0409c9da6541"}, + {file = "pydantic_core-2.23.3-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2b676583fc459c64146debea14ba3af54e540b61762dfc0613dc4e98c3f66eeb"}, + {file = "pydantic_core-2.23.3-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:50e4661f3337977740fdbfbae084ae5693e505ca2b3130a6d4eb0f2281dc43b8"}, + {file = "pydantic_core-2.23.3-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:68f4cf373f0de6abfe599a38307f4417c1c867ca381c03df27c873a9069cda25"}, + {file = "pydantic_core-2.23.3-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:59d52cf01854cb26c46958552a21acb10dd78a52aa34c86f284e66b209db8cab"}, + {file = "pydantic_core-2.23.3.tar.gz", hash = "sha256:3cb0f65d8b4121c1b015c60104a685feb929a29d7cf204387c7f2688c7974690"}, ] [package.dependencies] @@ -493,24 +483,6 @@ typing-extensions = {version = ">=3.10.0", markers = "python_version < \"3.10\"" spelling = ["pyenchant (>=3.2,<4.0)"] testutils = ["gitpython (>3)"] -[[package]] -name = "pyright" -version = "1.1.374" -description = "Command line wrapper for pyright" -optional = false -python-versions = ">=3.7" -files = [ - {file = "pyright-1.1.374-py3-none-any.whl", hash = "sha256:55752bcf7a3646d293cd76710a983b71e16f6128aab2d42468e6eb7e46c0a70d"}, - {file = "pyright-1.1.374.tar.gz", hash = "sha256:d01b2daf864ba5e0362e56b844984865970d7204158e61eb685e2dab7804cb82"}, -] - -[package.dependencies] -nodeenv = ">=1.6.0" - -[package.extras] -all = ["twine (>=3.4.1)"] -dev = ["twine (>=3.4.1)"] - [[package]] name = "pytest" version = "8.3.2" @@ -649,4 +621,4 @@ typing-extensions = ">=3.7.4" [metadata] lock-version = "2.0" python-versions = "^3.8" -content-hash = "85499d03f45cd26302b8b267be44478c701581e8a56a3df0907bb38897fdb2e4" +content-hash = "4dfa1b4612afda308a6d0df6d282f34b7020cf4639d6668ac7c63e40807d9e0b" diff --git a/packages/mistralai_azure/pyproject.toml b/packages/mistralai_azure/pyproject.toml index c72b384e..a9f13e0d 100644 --- a/packages/mistralai_azure/pyproject.toml +++ b/packages/mistralai_azure/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "mistralai_azure" -version = "1.0.1" +version = "1.2.2" description = "Python Client SDK for the Mistral AI API in Azure." authors = ["Mistral",] readme = "README-PYPI.md" @@ -20,14 +20,13 @@ python = "^3.8" eval-type-backport = "^0.2.0" httpx = "^0.27.0" jsonpath-python = "^1.0.6" -pydantic = "~2.8.2" +pydantic = "~2.9.0" python-dateutil = "2.8.2" typing-inspect = "^0.9.0" [tool.poetry.group.dev.dependencies] mypy = "==1.10.1" pylint = "==3.2.3" -pyright = "==1.1.374" pytest = "^8.2.2" pytest-asyncio = "^0.23.7" types-python-dateutil = "^2.9.0.20240316" diff --git a/packages/mistralai_azure/scripts/prepare-readme.py b/packages/mistralai_azure/scripts/prepare-readme.py index a8ef8ea1..825d9ded 100644 --- a/packages/mistralai_azure/scripts/prepare-readme.py +++ b/packages/mistralai_azure/scripts/prepare-readme.py @@ -3,7 +3,7 @@ import shutil try: - shutil.copyfile('README.md', 'README-PYPI.md') + shutil.copyfile("README.md", "README-PYPI.md") except Exception as e: print("Failed to copy README.md to README-PYPI.md") print(e) diff --git a/packages/mistralai_azure/src/mistralai_azure/_hooks/sdkhooks.py b/packages/mistralai_azure/src/mistralai_azure/_hooks/sdkhooks.py index c8e9631a..37ff4e9f 100644 --- a/packages/mistralai_azure/src/mistralai_azure/_hooks/sdkhooks.py +++ b/packages/mistralai_azure/src/mistralai_azure/_hooks/sdkhooks.py @@ -1,11 +1,21 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" import httpx -from .types import SDKInitHook, BeforeRequestContext, BeforeRequestHook, AfterSuccessContext, AfterSuccessHook, AfterErrorContext, AfterErrorHook, Hooks +from .types import ( + SDKInitHook, + BeforeRequestContext, + BeforeRequestHook, + AfterSuccessContext, + AfterSuccessHook, + AfterErrorContext, + AfterErrorHook, + Hooks, +) from .registration import init_hooks from typing import List, Optional, Tuple from mistralai_azure.httpclient import HttpClient + class SDKHooks(Hooks): def __init__(self) -> None: self.sdk_init_hooks: List[SDKInitHook] = [] @@ -31,7 +41,9 @@ def sdk_init(self, base_url: str, client: HttpClient) -> Tuple[str, HttpClient]: base_url, client = hook.sdk_init(base_url, client) return base_url, client - def before_request(self, hook_ctx: BeforeRequestContext, request: httpx.Request) -> httpx.Request: + def before_request( + self, hook_ctx: BeforeRequestContext, request: httpx.Request + ) -> httpx.Request: for hook in self.before_request_hooks: out = hook.before_request(hook_ctx, request) if isinstance(out, Exception): @@ -40,7 +52,9 @@ def before_request(self, hook_ctx: BeforeRequestContext, request: httpx.Request) return request - def after_success(self, hook_ctx: AfterSuccessContext, response: httpx.Response) -> httpx.Response: + def after_success( + self, hook_ctx: AfterSuccessContext, response: httpx.Response + ) -> httpx.Response: for hook in self.after_success_hooks: out = hook.after_success(hook_ctx, response) if isinstance(out, Exception): @@ -48,7 +62,12 @@ def after_success(self, hook_ctx: AfterSuccessContext, response: httpx.Response) response = out return response - def after_error(self, hook_ctx: AfterErrorContext, response: Optional[httpx.Response], error: Optional[Exception]) -> Tuple[Optional[httpx.Response], Optional[Exception]]: + def after_error( + self, + hook_ctx: AfterErrorContext, + response: Optional[httpx.Response], + error: Optional[Exception], + ) -> Tuple[Optional[httpx.Response], Optional[Exception]]: for hook in self.after_error_hooks: result = hook.after_error(hook_ctx, response, error) if isinstance(result, Exception): diff --git a/packages/mistralai_azure/src/mistralai_azure/_hooks/types.py b/packages/mistralai_azure/src/mistralai_azure/_hooks/types.py index 3076b41d..5e34da26 100644 --- a/packages/mistralai_azure/src/mistralai_azure/_hooks/types.py +++ b/packages/mistralai_azure/src/mistralai_azure/_hooks/types.py @@ -1,6 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - from abc import ABC, abstractmethod import httpx from mistralai_azure.httpclient import HttpClient @@ -12,7 +11,12 @@ class HookContext: oauth2_scopes: Optional[List[str]] = None security_source: Optional[Union[Any, Callable[[], Any]]] = None - def __init__(self, operation_id: str, oauth2_scopes: Optional[List[str]], security_source: Optional[Union[Any, Callable[[], Any]]]): + def __init__( + self, + operation_id: str, + oauth2_scopes: Optional[List[str]], + security_source: Optional[Union[Any, Callable[[], Any]]], + ): self.operation_id = operation_id self.oauth2_scopes = oauth2_scopes self.security_source = security_source @@ -20,18 +24,23 @@ def __init__(self, operation_id: str, oauth2_scopes: Optional[List[str]], securi class BeforeRequestContext(HookContext): def __init__(self, hook_ctx: HookContext): - super().__init__(hook_ctx.operation_id, hook_ctx.oauth2_scopes, hook_ctx.security_source) + super().__init__( + hook_ctx.operation_id, hook_ctx.oauth2_scopes, hook_ctx.security_source + ) class AfterSuccessContext(HookContext): def __init__(self, hook_ctx: HookContext): - super().__init__(hook_ctx.operation_id, hook_ctx.oauth2_scopes, hook_ctx.security_source) - + super().__init__( + hook_ctx.operation_id, hook_ctx.oauth2_scopes, hook_ctx.security_source + ) class AfterErrorContext(HookContext): def __init__(self, hook_ctx: HookContext): - super().__init__(hook_ctx.operation_id, hook_ctx.oauth2_scopes, hook_ctx.security_source) + super().__init__( + hook_ctx.operation_id, hook_ctx.oauth2_scopes, hook_ctx.security_source + ) class SDKInitHook(ABC): @@ -42,19 +51,28 @@ def sdk_init(self, base_url: str, client: HttpClient) -> Tuple[str, HttpClient]: class BeforeRequestHook(ABC): @abstractmethod - def before_request(self, hook_ctx: BeforeRequestContext, request: httpx.Request) -> Union[httpx.Request, Exception]: + def before_request( + self, hook_ctx: BeforeRequestContext, request: httpx.Request + ) -> Union[httpx.Request, Exception]: pass class AfterSuccessHook(ABC): @abstractmethod - def after_success(self, hook_ctx: AfterSuccessContext, response: httpx.Response) -> Union[httpx.Response, Exception]: + def after_success( + self, hook_ctx: AfterSuccessContext, response: httpx.Response + ) -> Union[httpx.Response, Exception]: pass class AfterErrorHook(ABC): @abstractmethod - def after_error(self, hook_ctx: AfterErrorContext, response: Optional[httpx.Response], error: Optional[Exception]) -> Union[Tuple[Optional[httpx.Response], Optional[Exception]], Exception]: + def after_error( + self, + hook_ctx: AfterErrorContext, + response: Optional[httpx.Response], + error: Optional[Exception], + ) -> Union[Tuple[Optional[httpx.Response], Optional[Exception]], Exception]: pass diff --git a/packages/mistralai_azure/src/mistralai_azure/basesdk.py b/packages/mistralai_azure/src/mistralai_azure/basesdk.py index 1f22dbcf..772b44c1 100644 --- a/packages/mistralai_azure/src/mistralai_azure/basesdk.py +++ b/packages/mistralai_azure/src/mistralai_azure/basesdk.py @@ -3,10 +3,15 @@ from .sdkconfiguration import SDKConfiguration import httpx from mistralai_azure import models, utils -from mistralai_azure._hooks import AfterErrorContext, AfterSuccessContext, BeforeRequestContext +from mistralai_azure._hooks import ( + AfterErrorContext, + AfterSuccessContext, + BeforeRequestContext, +) from mistralai_azure.utils import RetryConfig, SerializedRequestBody, get_body_content from typing import Callable, List, Optional, Tuple + class BaseSDK: sdk_configuration: SDKConfiguration @@ -24,6 +29,46 @@ def get_url(self, base_url, url_variables): return utils.template_url(base_url, url_variables) + def build_request_async( + self, + method, + path, + base_url, + url_variables, + request, + request_body_required, + request_has_path_params, + request_has_query_params, + user_agent_header, + accept_header_value, + _globals=None, + security=None, + timeout_ms: Optional[int] = None, + get_serialized_body: Optional[ + Callable[[], Optional[SerializedRequestBody]] + ] = None, + url_override: Optional[str] = None, + ) -> httpx.Request: + client = self.sdk_configuration.async_client + return self.build_request_with_client( + client, + method, + path, + base_url, + url_variables, + request, + request_body_required, + request_has_path_params, + request_has_query_params, + user_agent_header, + accept_header_value, + _globals, + security, + timeout_ms, + get_serialized_body, + url_override, + ) + def build_request( self, method, @@ -45,7 +90,46 @@ def build_request( url_override: Optional[str] = None, ) -> httpx.Request: client = self.sdk_configuration.client + return self.build_request_with_client( + client, + method, + path, + base_url, + url_variables, + request, + request_body_required, + request_has_path_params, + request_has_query_params, + user_agent_header, + accept_header_value, + _globals, + security, + timeout_ms, + get_serialized_body, + url_override, + ) + def build_request_with_client( + self, + client, + method, + path, + base_url, + url_variables, + request, + request_body_required, + request_has_path_params, + request_has_query_params, + user_agent_header, + accept_header_value, + _globals=None, + security=None, + timeout_ms: Optional[int] = None, + get_serialized_body: Optional[ + Callable[[], Optional[SerializedRequestBody]] + ] = None, + url_override: Optional[str] = None, + ) -> httpx.Request: query_params = {} url = url_override @@ -69,7 +153,7 @@ def build_request( if security is not None: if callable(security): security = security() - + if security is not None: security_headers, security_query_params = utils.get_security(security) headers = {**headers, **security_headers} @@ -129,7 +213,7 @@ def do(): req.method, req.url, req.headers, - get_body_content(req) + get_body_content(req), ) http_res = client.send(req, stream=stream) except Exception as e: @@ -149,7 +233,7 @@ def do(): http_res.status_code, http_res.url, http_res.headers, - "" if stream else http_res.text + "" if stream else http_res.text, ) if utils.match_status_codes(error_status_codes, http_res.status_code): @@ -189,6 +273,7 @@ async def do_request_async( ) -> httpx.Response: client = self.sdk_configuration.async_client logger = self.sdk_configuration.debug_logger + async def do(): http_res = None try: @@ -200,7 +285,7 @@ async def do(): req.method, req.url, req.headers, - get_body_content(req) + get_body_content(req), ) http_res = await client.send(req, stream=stream) except Exception as e: @@ -220,7 +305,7 @@ async def do(): http_res.status_code, http_res.url, http_res.headers, - "" if stream else http_res.text + "" if stream else http_res.text, ) if utils.match_status_codes(error_status_codes, http_res.status_code): diff --git a/packages/mistralai_azure/src/mistralai_azure/chat.py b/packages/mistralai_azure/src/mistralai_azure/chat.py index a5e172db..5f1e539b 100644 --- a/packages/mistralai_azure/src/mistralai_azure/chat.py +++ b/packages/mistralai_azure/src/mistralai_azure/chat.py @@ -7,12 +7,13 @@ from mistralai_azure.utils import eventstreaming from typing import Any, AsyncGenerator, Generator, List, Optional, Union + class Chat(BaseSDK): r"""Chat Completion API.""" - - + def stream( - self, *, + self, + *, messages: Union[List[models.Messages], List[models.MessagesTypedDict]], model: OptionalNullable[str] = "azureai", temperature: Optional[float] = 0.7, @@ -22,9 +23,18 @@ def stream( stream: Optional[bool] = True, stop: Optional[Union[models.Stop, models.StopTypedDict]] = None, random_seed: OptionalNullable[int] = UNSET, - response_format: Optional[Union[models.ResponseFormat, models.ResponseFormatTypedDict]] = None, - tools: OptionalNullable[Union[List[models.Tool], List[models.ToolTypedDict]]] = UNSET, - tool_choice: Optional[models.ToolChoice] = "auto", + response_format: Optional[ + Union[models.ResponseFormat, models.ResponseFormatTypedDict] + ] = None, + tools: OptionalNullable[ + Union[List[models.Tool], List[models.ToolTypedDict]] + ] = UNSET, + tool_choice: Optional[ + Union[ + models.ChatCompletionStreamRequestToolChoice, + models.ChatCompletionStreamRequestToolChoiceTypedDict, + ] + ] = None, safe_prompt: Optional[bool] = False, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, @@ -40,12 +50,12 @@ def stream( :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. :param min_tokens: The minimum number of tokens to generate in the completion. - :param stream: + :param stream: :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. - :param response_format: - :param tools: - :param tool_choice: + :param response_format: + :param tools: + :param tool_choice: :param safe_prompt: Whether to inject a safety prompt before all conversations. :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method @@ -55,10 +65,10 @@ def stream( url_variables = None if timeout_ms is None: timeout_ms = self.sdk_configuration.timeout_ms - + if server_url is not None: base_url = server_url - + request = models.ChatCompletionStreamRequest( model=model, temperature=temperature, @@ -69,12 +79,16 @@ def stream( stop=stop, random_seed=random_seed, messages=utils.get_pydantic_model(messages, List[models.Messages]), - response_format=utils.get_pydantic_model(response_format, Optional[models.ResponseFormat]), + response_format=utils.get_pydantic_model( + response_format, Optional[models.ResponseFormat] + ), tools=utils.get_pydantic_model(tools, OptionalNullable[List[models.Tool]]), - tool_choice=tool_choice, + tool_choice=utils.get_pydantic_model( + tool_choice, Optional[models.ChatCompletionStreamRequestToolChoice] + ), safe_prompt=safe_prompt, ) - + req = self.build_request( method="POST", path="/chat/completions#stream", @@ -87,48 +101,58 @@ def stream( user_agent_header="user-agent", accept_header_value="text/event-stream", security=self.sdk_configuration.security, - get_serialized_body=lambda: utils.serialize_request_body(request, False, False, "json", models.ChatCompletionStreamRequest), + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.ChatCompletionStreamRequest + ), timeout_ms=timeout_ms, ) - + if retries == UNSET: if self.sdk_configuration.retry_config is not UNSET: retries = self.sdk_configuration.retry_config retry_config = None if isinstance(retries, utils.RetryConfig): - retry_config = (retries, [ - "429", - "500", - "502", - "503", - "504" - ]) - + retry_config = (retries, ["429", "500", "502", "503", "504"]) + http_res = self.do_request( - hook_ctx=HookContext(operation_id="stream_chat", oauth2_scopes=[], security_source=self.sdk_configuration.security), + hook_ctx=HookContext( + operation_id="stream_chat", + oauth2_scopes=[], + security_source=self.sdk_configuration.security, + ), request=req, - error_status_codes=["422","4XX","5XX"], + error_status_codes=["422", "4XX", "5XX"], stream=True, - retry_config=retry_config + retry_config=retry_config, ) - + data: Any = None if utils.match_response(http_res, "200", "text/event-stream"): - return eventstreaming.stream_events(http_res, lambda raw: utils.unmarshal_json(raw, models.CompletionEvent), sentinel="[DONE]") + return eventstreaming.stream_events( + http_res, + lambda raw: utils.unmarshal_json(raw, models.CompletionEvent), + sentinel="[DONE]", + ) if utils.match_response(http_res, "422", "application/json"): data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) raise models.HTTPValidationError(data=data) - if utils.match_response(http_res, ["4XX","5XX"], "*"): - raise models.SDKError("API error occurred", http_res.status_code, http_res.text, http_res) - + if utils.match_response(http_res, ["4XX", "5XX"], "*"): + raise models.SDKError( + "API error occurred", http_res.status_code, http_res.text, http_res + ) + content_type = http_res.headers.get("Content-Type") - raise models.SDKError(f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, http_res.text, http_res) + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res.text, + http_res, + ) - - async def stream_async( - self, *, + self, + *, messages: Union[List[models.Messages], List[models.MessagesTypedDict]], model: OptionalNullable[str] = "azureai", temperature: Optional[float] = 0.7, @@ -138,9 +162,18 @@ async def stream_async( stream: Optional[bool] = True, stop: Optional[Union[models.Stop, models.StopTypedDict]] = None, random_seed: OptionalNullable[int] = UNSET, - response_format: Optional[Union[models.ResponseFormat, models.ResponseFormatTypedDict]] = None, - tools: OptionalNullable[Union[List[models.Tool], List[models.ToolTypedDict]]] = UNSET, - tool_choice: Optional[models.ToolChoice] = "auto", + response_format: Optional[ + Union[models.ResponseFormat, models.ResponseFormatTypedDict] + ] = None, + tools: OptionalNullable[ + Union[List[models.Tool], List[models.ToolTypedDict]] + ] = UNSET, + tool_choice: Optional[ + Union[ + models.ChatCompletionStreamRequestToolChoice, + models.ChatCompletionStreamRequestToolChoiceTypedDict, + ] + ] = None, safe_prompt: Optional[bool] = False, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, @@ -156,12 +189,12 @@ async def stream_async( :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. :param min_tokens: The minimum number of tokens to generate in the completion. - :param stream: + :param stream: :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. - :param response_format: - :param tools: - :param tool_choice: + :param response_format: + :param tools: + :param tool_choice: :param safe_prompt: Whether to inject a safety prompt before all conversations. :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method @@ -171,10 +204,10 @@ async def stream_async( url_variables = None if timeout_ms is None: timeout_ms = self.sdk_configuration.timeout_ms - + if server_url is not None: base_url = server_url - + request = models.ChatCompletionStreamRequest( model=model, temperature=temperature, @@ -185,13 +218,17 @@ async def stream_async( stop=stop, random_seed=random_seed, messages=utils.get_pydantic_model(messages, List[models.Messages]), - response_format=utils.get_pydantic_model(response_format, Optional[models.ResponseFormat]), + response_format=utils.get_pydantic_model( + response_format, Optional[models.ResponseFormat] + ), tools=utils.get_pydantic_model(tools, OptionalNullable[List[models.Tool]]), - tool_choice=tool_choice, + tool_choice=utils.get_pydantic_model( + tool_choice, Optional[models.ChatCompletionStreamRequestToolChoice] + ), safe_prompt=safe_prompt, ) - - req = self.build_request( + + req = self.build_request_async( method="POST", path="/chat/completions#stream", base_url=base_url, @@ -203,60 +240,87 @@ async def stream_async( user_agent_header="user-agent", accept_header_value="text/event-stream", security=self.sdk_configuration.security, - get_serialized_body=lambda: utils.serialize_request_body(request, False, False, "json", models.ChatCompletionStreamRequest), + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.ChatCompletionStreamRequest + ), timeout_ms=timeout_ms, ) - + if retries == UNSET: if self.sdk_configuration.retry_config is not UNSET: retries = self.sdk_configuration.retry_config retry_config = None if isinstance(retries, utils.RetryConfig): - retry_config = (retries, [ - "429", - "500", - "502", - "503", - "504" - ]) - + retry_config = (retries, ["429", "500", "502", "503", "504"]) + http_res = await self.do_request_async( - hook_ctx=HookContext(operation_id="stream_chat", oauth2_scopes=[], security_source=self.sdk_configuration.security), + hook_ctx=HookContext( + operation_id="stream_chat", + oauth2_scopes=[], + security_source=self.sdk_configuration.security, + ), request=req, - error_status_codes=["422","4XX","5XX"], + error_status_codes=["422", "4XX", "5XX"], stream=True, - retry_config=retry_config + retry_config=retry_config, ) - + data: Any = None if utils.match_response(http_res, "200", "text/event-stream"): - return eventstreaming.stream_events_async(http_res, lambda raw: utils.unmarshal_json(raw, models.CompletionEvent), sentinel="[DONE]") + return eventstreaming.stream_events_async( + http_res, + lambda raw: utils.unmarshal_json(raw, models.CompletionEvent), + sentinel="[DONE]", + ) if utils.match_response(http_res, "422", "application/json"): data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) raise models.HTTPValidationError(data=data) - if utils.match_response(http_res, ["4XX","5XX"], "*"): - raise models.SDKError("API error occurred", http_res.status_code, http_res.text, http_res) - + if utils.match_response(http_res, ["4XX", "5XX"], "*"): + raise models.SDKError( + "API error occurred", http_res.status_code, http_res.text, http_res + ) + content_type = http_res.headers.get("Content-Type") - raise models.SDKError(f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, http_res.text, http_res) + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res.text, + http_res, + ) - - def complete( - self, *, - messages: Union[List[models.ChatCompletionRequestMessages], List[models.ChatCompletionRequestMessagesTypedDict]], + self, + *, + messages: Union[ + List[models.ChatCompletionRequestMessages], + List[models.ChatCompletionRequestMessagesTypedDict], + ], model: OptionalNullable[str] = "azureai", temperature: Optional[float] = 0.7, top_p: Optional[float] = 1, max_tokens: OptionalNullable[int] = UNSET, min_tokens: OptionalNullable[int] = UNSET, stream: Optional[bool] = False, - stop: Optional[Union[models.ChatCompletionRequestStop, models.ChatCompletionRequestStopTypedDict]] = None, + stop: Optional[ + Union[ + models.ChatCompletionRequestStop, + models.ChatCompletionRequestStopTypedDict, + ] + ] = None, random_seed: OptionalNullable[int] = UNSET, - response_format: Optional[Union[models.ResponseFormat, models.ResponseFormatTypedDict]] = None, - tools: OptionalNullable[Union[List[models.Tool], List[models.ToolTypedDict]]] = UNSET, - tool_choice: Optional[models.ChatCompletionRequestToolChoice] = "auto", + response_format: Optional[ + Union[models.ResponseFormat, models.ResponseFormatTypedDict] + ] = None, + tools: OptionalNullable[ + Union[List[models.Tool], List[models.ToolTypedDict]] + ] = UNSET, + tool_choice: Optional[ + Union[ + models.ChatCompletionRequestToolChoice, + models.ChatCompletionRequestToolChoiceTypedDict, + ] + ] = None, safe_prompt: Optional[bool] = False, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, @@ -273,9 +337,9 @@ def complete( :param stream: Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. - :param response_format: - :param tools: - :param tool_choice: + :param response_format: + :param tools: + :param tool_choice: :param safe_prompt: Whether to inject a safety prompt before all conversations. :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method @@ -285,10 +349,10 @@ def complete( url_variables = None if timeout_ms is None: timeout_ms = self.sdk_configuration.timeout_ms - + if server_url is not None: base_url = server_url - + request = models.ChatCompletionRequest( model=model, temperature=temperature, @@ -298,13 +362,19 @@ def complete( stream=stream, stop=stop, random_seed=random_seed, - messages=utils.get_pydantic_model(messages, List[models.ChatCompletionRequestMessages]), - response_format=utils.get_pydantic_model(response_format, Optional[models.ResponseFormat]), + messages=utils.get_pydantic_model( + messages, List[models.ChatCompletionRequestMessages] + ), + response_format=utils.get_pydantic_model( + response_format, Optional[models.ResponseFormat] + ), tools=utils.get_pydantic_model(tools, OptionalNullable[List[models.Tool]]), - tool_choice=tool_choice, + tool_choice=utils.get_pydantic_model( + tool_choice, Optional[models.ChatCompletionRequestToolChoice] + ), safe_prompt=safe_prompt, ) - + req = self.build_request( method="POST", path="/chat/completions", @@ -317,59 +387,84 @@ def complete( user_agent_header="user-agent", accept_header_value="application/json", security=self.sdk_configuration.security, - get_serialized_body=lambda: utils.serialize_request_body(request, False, False, "json", models.ChatCompletionRequest), + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.ChatCompletionRequest + ), timeout_ms=timeout_ms, ) - + if retries == UNSET: if self.sdk_configuration.retry_config is not UNSET: retries = self.sdk_configuration.retry_config retry_config = None if isinstance(retries, utils.RetryConfig): - retry_config = (retries, [ - "429", - "500", - "502", - "503", - "504" - ]) - + retry_config = (retries, ["429", "500", "502", "503", "504"]) + http_res = self.do_request( - hook_ctx=HookContext(operation_id="chat_completion_v1_chat_completions_post", oauth2_scopes=[], security_source=self.sdk_configuration.security), + hook_ctx=HookContext( + operation_id="chat_completion_v1_chat_completions_post", + oauth2_scopes=[], + security_source=self.sdk_configuration.security, + ), request=req, - error_status_codes=["422","4XX","5XX"], - retry_config=retry_config + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, ) - + data: Any = None if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, Optional[models.ChatCompletionResponse]) + return utils.unmarshal_json( + http_res.text, Optional[models.ChatCompletionResponse] + ) if utils.match_response(http_res, "422", "application/json"): data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) raise models.HTTPValidationError(data=data) - if utils.match_response(http_res, ["4XX","5XX"], "*"): - raise models.SDKError("API error occurred", http_res.status_code, http_res.text, http_res) - + if utils.match_response(http_res, ["4XX", "5XX"], "*"): + raise models.SDKError( + "API error occurred", http_res.status_code, http_res.text, http_res + ) + content_type = http_res.headers.get("Content-Type") - raise models.SDKError(f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, http_res.text, http_res) + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res.text, + http_res, + ) - - async def complete_async( - self, *, - messages: Union[List[models.ChatCompletionRequestMessages], List[models.ChatCompletionRequestMessagesTypedDict]], + self, + *, + messages: Union[ + List[models.ChatCompletionRequestMessages], + List[models.ChatCompletionRequestMessagesTypedDict], + ], model: OptionalNullable[str] = "azureai", temperature: Optional[float] = 0.7, top_p: Optional[float] = 1, max_tokens: OptionalNullable[int] = UNSET, min_tokens: OptionalNullable[int] = UNSET, stream: Optional[bool] = False, - stop: Optional[Union[models.ChatCompletionRequestStop, models.ChatCompletionRequestStopTypedDict]] = None, + stop: Optional[ + Union[ + models.ChatCompletionRequestStop, + models.ChatCompletionRequestStopTypedDict, + ] + ] = None, random_seed: OptionalNullable[int] = UNSET, - response_format: Optional[Union[models.ResponseFormat, models.ResponseFormatTypedDict]] = None, - tools: OptionalNullable[Union[List[models.Tool], List[models.ToolTypedDict]]] = UNSET, - tool_choice: Optional[models.ChatCompletionRequestToolChoice] = "auto", + response_format: Optional[ + Union[models.ResponseFormat, models.ResponseFormatTypedDict] + ] = None, + tools: OptionalNullable[ + Union[List[models.Tool], List[models.ToolTypedDict]] + ] = UNSET, + tool_choice: Optional[ + Union[ + models.ChatCompletionRequestToolChoice, + models.ChatCompletionRequestToolChoiceTypedDict, + ] + ] = None, safe_prompt: Optional[bool] = False, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, @@ -386,9 +481,9 @@ async def complete_async( :param stream: Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. - :param response_format: - :param tools: - :param tool_choice: + :param response_format: + :param tools: + :param tool_choice: :param safe_prompt: Whether to inject a safety prompt before all conversations. :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method @@ -398,10 +493,10 @@ async def complete_async( url_variables = None if timeout_ms is None: timeout_ms = self.sdk_configuration.timeout_ms - + if server_url is not None: base_url = server_url - + request = models.ChatCompletionRequest( model=model, temperature=temperature, @@ -411,14 +506,20 @@ async def complete_async( stream=stream, stop=stop, random_seed=random_seed, - messages=utils.get_pydantic_model(messages, List[models.ChatCompletionRequestMessages]), - response_format=utils.get_pydantic_model(response_format, Optional[models.ResponseFormat]), + messages=utils.get_pydantic_model( + messages, List[models.ChatCompletionRequestMessages] + ), + response_format=utils.get_pydantic_model( + response_format, Optional[models.ResponseFormat] + ), tools=utils.get_pydantic_model(tools, OptionalNullable[List[models.Tool]]), - tool_choice=tool_choice, + tool_choice=utils.get_pydantic_model( + tool_choice, Optional[models.ChatCompletionRequestToolChoice] + ), safe_prompt=safe_prompt, ) - - req = self.build_request( + + req = self.build_request_async( method="POST", path="/chat/completions", base_url=base_url, @@ -430,41 +531,48 @@ async def complete_async( user_agent_header="user-agent", accept_header_value="application/json", security=self.sdk_configuration.security, - get_serialized_body=lambda: utils.serialize_request_body(request, False, False, "json", models.ChatCompletionRequest), + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.ChatCompletionRequest + ), timeout_ms=timeout_ms, ) - + if retries == UNSET: if self.sdk_configuration.retry_config is not UNSET: retries = self.sdk_configuration.retry_config retry_config = None if isinstance(retries, utils.RetryConfig): - retry_config = (retries, [ - "429", - "500", - "502", - "503", - "504" - ]) - + retry_config = (retries, ["429", "500", "502", "503", "504"]) + http_res = await self.do_request_async( - hook_ctx=HookContext(operation_id="chat_completion_v1_chat_completions_post", oauth2_scopes=[], security_source=self.sdk_configuration.security), + hook_ctx=HookContext( + operation_id="chat_completion_v1_chat_completions_post", + oauth2_scopes=[], + security_source=self.sdk_configuration.security, + ), request=req, - error_status_codes=["422","4XX","5XX"], - retry_config=retry_config + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, ) - + data: Any = None if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, Optional[models.ChatCompletionResponse]) + return utils.unmarshal_json( + http_res.text, Optional[models.ChatCompletionResponse] + ) if utils.match_response(http_res, "422", "application/json"): data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) raise models.HTTPValidationError(data=data) - if utils.match_response(http_res, ["4XX","5XX"], "*"): - raise models.SDKError("API error occurred", http_res.status_code, http_res.text, http_res) - - content_type = http_res.headers.get("Content-Type") - raise models.SDKError(f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, http_res.text, http_res) + if utils.match_response(http_res, ["4XX", "5XX"], "*"): + raise models.SDKError( + "API error occurred", http_res.status_code, http_res.text, http_res + ) - + content_type = http_res.headers.get("Content-Type") + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res.text, + http_res, + ) diff --git a/packages/mistralai_azure/src/mistralai_azure/models/__init__.py b/packages/mistralai_azure/src/mistralai_azure/models/__init__.py index 710fe565..70f07999 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/__init__.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/__init__.py @@ -1,28 +1,170 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -from .assistantmessage import AssistantMessage, AssistantMessageRole, AssistantMessageTypedDict -from .chatcompletionchoice import ChatCompletionChoice, ChatCompletionChoiceFinishReason, ChatCompletionChoiceTypedDict -from .chatcompletionrequest import ChatCompletionRequest, ChatCompletionRequestMessages, ChatCompletionRequestMessagesTypedDict, ChatCompletionRequestStop, ChatCompletionRequestStopTypedDict, ChatCompletionRequestToolChoice, ChatCompletionRequestTypedDict -from .chatcompletionresponse import ChatCompletionResponse, ChatCompletionResponseTypedDict -from .chatcompletionstreamrequest import ChatCompletionStreamRequest, ChatCompletionStreamRequestTypedDict, Messages, MessagesTypedDict, Stop, StopTypedDict, ToolChoice +from .assistantmessage import ( + AssistantMessage, + AssistantMessageRole, + AssistantMessageTypedDict, +) +from .chatcompletionchoice import ( + ChatCompletionChoice, + ChatCompletionChoiceFinishReason, + ChatCompletionChoiceTypedDict, +) +from .chatcompletionrequest import ( + ChatCompletionRequest, + ChatCompletionRequestMessages, + ChatCompletionRequestMessagesTypedDict, + ChatCompletionRequestStop, + ChatCompletionRequestStopTypedDict, + ChatCompletionRequestToolChoice, + ChatCompletionRequestToolChoiceTypedDict, + ChatCompletionRequestTypedDict, +) +from .chatcompletionresponse import ( + ChatCompletionResponse, + ChatCompletionResponseTypedDict, +) +from .chatcompletionstreamrequest import ( + ChatCompletionStreamRequest, + ChatCompletionStreamRequestToolChoice, + ChatCompletionStreamRequestToolChoiceTypedDict, + ChatCompletionStreamRequestTypedDict, + Messages, + MessagesTypedDict, + Stop, + StopTypedDict, +) from .completionchunk import CompletionChunk, CompletionChunkTypedDict from .completionevent import CompletionEvent, CompletionEventTypedDict -from .completionresponsestreamchoice import CompletionResponseStreamChoice, CompletionResponseStreamChoiceTypedDict, FinishReason +from .completionresponsestreamchoice import ( + CompletionResponseStreamChoice, + CompletionResponseStreamChoiceTypedDict, + FinishReason, +) from .contentchunk import ContentChunk, ContentChunkTypedDict from .deltamessage import DeltaMessage, DeltaMessageTypedDict from .function import Function, FunctionTypedDict -from .functioncall import Arguments, ArgumentsTypedDict, FunctionCall, FunctionCallTypedDict +from .functioncall import ( + Arguments, + ArgumentsTypedDict, + FunctionCall, + FunctionCallTypedDict, +) +from .functionname import FunctionName, FunctionNameTypedDict from .httpvalidationerror import HTTPValidationError, HTTPValidationErrorData -from .responseformat import ResponseFormat, ResponseFormatTypedDict, ResponseFormats +from .responseformat import ResponseFormat, ResponseFormatTypedDict +from .responseformats import ResponseFormats from .sdkerror import SDKError from .security import Security, SecurityTypedDict -from .systemmessage import Content, ContentTypedDict, Role, SystemMessage, SystemMessageTypedDict -from .textchunk import TextChunk, TextChunkTypedDict -from .tool import Tool, ToolToolTypes, ToolTypedDict -from .toolcall import ToolCall, ToolCallTypedDict, ToolTypes +from .systemmessage import ( + Content, + ContentTypedDict, + Role, + SystemMessage, + SystemMessageTypedDict, +) +from .textchunk import TextChunk, TextChunkTypedDict, Type +from .tool import Tool, ToolTypedDict +from .toolcall import ToolCall, ToolCallTypedDict +from .toolchoice import ToolChoice, ToolChoiceTypedDict +from .toolchoiceenum import ToolChoiceEnum from .toolmessage import ToolMessage, ToolMessageRole, ToolMessageTypedDict +from .tooltypes import ToolTypes from .usageinfo import UsageInfo, UsageInfoTypedDict -from .usermessage import UserMessage, UserMessageContent, UserMessageContentTypedDict, UserMessageRole, UserMessageTypedDict -from .validationerror import Loc, LocTypedDict, ValidationError, ValidationErrorTypedDict +from .usermessage import ( + UserMessage, + UserMessageContent, + UserMessageContentTypedDict, + UserMessageRole, + UserMessageTypedDict, +) +from .validationerror import ( + Loc, + LocTypedDict, + ValidationError, + ValidationErrorTypedDict, +) -__all__ = ["Arguments", "ArgumentsTypedDict", "AssistantMessage", "AssistantMessageRole", "AssistantMessageTypedDict", "ChatCompletionChoice", "ChatCompletionChoiceFinishReason", "ChatCompletionChoiceTypedDict", "ChatCompletionRequest", "ChatCompletionRequestMessages", "ChatCompletionRequestMessagesTypedDict", "ChatCompletionRequestStop", "ChatCompletionRequestStopTypedDict", "ChatCompletionRequestToolChoice", "ChatCompletionRequestTypedDict", "ChatCompletionResponse", "ChatCompletionResponseTypedDict", "ChatCompletionStreamRequest", "ChatCompletionStreamRequestTypedDict", "CompletionChunk", "CompletionChunkTypedDict", "CompletionEvent", "CompletionEventTypedDict", "CompletionResponseStreamChoice", "CompletionResponseStreamChoiceTypedDict", "Content", "ContentChunk", "ContentChunkTypedDict", "ContentTypedDict", "DeltaMessage", "DeltaMessageTypedDict", "FinishReason", "Function", "FunctionCall", "FunctionCallTypedDict", "FunctionTypedDict", "HTTPValidationError", "HTTPValidationErrorData", "Loc", "LocTypedDict", "Messages", "MessagesTypedDict", "ResponseFormat", "ResponseFormatTypedDict", "ResponseFormats", "Role", "SDKError", "Security", "SecurityTypedDict", "Stop", "StopTypedDict", "SystemMessage", "SystemMessageTypedDict", "TextChunk", "TextChunkTypedDict", "Tool", "ToolCall", "ToolCallTypedDict", "ToolChoice", "ToolMessage", "ToolMessageRole", "ToolMessageTypedDict", "ToolToolTypes", "ToolTypedDict", "ToolTypes", "UsageInfo", "UsageInfoTypedDict", "UserMessage", "UserMessageContent", "UserMessageContentTypedDict", "UserMessageRole", "UserMessageTypedDict", "ValidationError", "ValidationErrorTypedDict"] +__all__ = [ + "Arguments", + "ArgumentsTypedDict", + "AssistantMessage", + "AssistantMessageRole", + "AssistantMessageTypedDict", + "ChatCompletionChoice", + "ChatCompletionChoiceFinishReason", + "ChatCompletionChoiceTypedDict", + "ChatCompletionRequest", + "ChatCompletionRequestMessages", + "ChatCompletionRequestMessagesTypedDict", + "ChatCompletionRequestStop", + "ChatCompletionRequestStopTypedDict", + "ChatCompletionRequestToolChoice", + "ChatCompletionRequestToolChoiceTypedDict", + "ChatCompletionRequestTypedDict", + "ChatCompletionResponse", + "ChatCompletionResponseTypedDict", + "ChatCompletionStreamRequest", + "ChatCompletionStreamRequestToolChoice", + "ChatCompletionStreamRequestToolChoiceTypedDict", + "ChatCompletionStreamRequestTypedDict", + "CompletionChunk", + "CompletionChunkTypedDict", + "CompletionEvent", + "CompletionEventTypedDict", + "CompletionResponseStreamChoice", + "CompletionResponseStreamChoiceTypedDict", + "Content", + "ContentChunk", + "ContentChunkTypedDict", + "ContentTypedDict", + "DeltaMessage", + "DeltaMessageTypedDict", + "FinishReason", + "Function", + "FunctionCall", + "FunctionCallTypedDict", + "FunctionName", + "FunctionNameTypedDict", + "FunctionTypedDict", + "HTTPValidationError", + "HTTPValidationErrorData", + "Loc", + "LocTypedDict", + "Messages", + "MessagesTypedDict", + "ResponseFormat", + "ResponseFormatTypedDict", + "ResponseFormats", + "Role", + "SDKError", + "Security", + "SecurityTypedDict", + "Stop", + "StopTypedDict", + "SystemMessage", + "SystemMessageTypedDict", + "TextChunk", + "TextChunkTypedDict", + "Tool", + "ToolCall", + "ToolCallTypedDict", + "ToolChoice", + "ToolChoiceEnum", + "ToolChoiceTypedDict", + "ToolMessage", + "ToolMessageRole", + "ToolMessageTypedDict", + "ToolTypedDict", + "ToolTypes", + "Type", + "UsageInfo", + "UsageInfoTypedDict", + "UserMessage", + "UserMessageContent", + "UserMessageContentTypedDict", + "UserMessageRole", + "UserMessageTypedDict", + "ValidationError", + "ValidationErrorTypedDict", +] diff --git a/packages/mistralai_azure/src/mistralai_azure/models/assistantmessage.py b/packages/mistralai_azure/src/mistralai_azure/models/assistantmessage.py index c7bc4b48..577b7e9a 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/assistantmessage.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/assistantmessage.py @@ -2,7 +2,13 @@ from __future__ import annotations from .toolcall import ToolCall, ToolCallTypedDict -from mistralai_azure.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +from mistralai_azure.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) from pydantic import model_serializer from typing import List, Literal, Optional, TypedDict from typing_extensions import NotRequired @@ -10,21 +16,25 @@ AssistantMessageRole = Literal["assistant"] + class AssistantMessageTypedDict(TypedDict): content: NotRequired[Nullable[str]] tool_calls: NotRequired[Nullable[List[ToolCallTypedDict]]] prefix: NotRequired[bool] r"""Set this to `true` when adding an assistant message as prefix to condition the model response. The role of the prefix message is to force the model to start its answer by the content of the message.""" role: NotRequired[AssistantMessageRole] - + class AssistantMessage(BaseModel): content: OptionalNullable[str] = UNSET + tool_calls: OptionalNullable[List[ToolCall]] = UNSET + prefix: Optional[bool] = False r"""Set this to `true` when adding an assistant message as prefix to condition the model response. The role of the prefix message is to force the model to start its answer by the content of the message.""" + role: Optional[AssistantMessageRole] = "assistant" - + @model_serializer(mode="wrap") def serialize_model(self, handler): optional_fields = ["content", "tool_calls", "prefix", "role"] @@ -38,9 +48,13 @@ def serialize_model(self, handler): for n, f in self.model_fields.items(): k = f.alias or n val = serialized.get(k) + serialized.pop(k, None) optional_nullable = k in optional_fields and k in nullable_fields - is_set = (self.__pydantic_fields_set__.intersection({n}) or k in null_default_fields) # pylint: disable=no-member + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member if val is not None and val != UNSET_SENTINEL: m[k] = val @@ -50,4 +64,3 @@ def serialize_model(self, handler): m[k] = val return m - diff --git a/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionchoice.py b/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionchoice.py index 91995452..a71cd085 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionchoice.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionchoice.py @@ -6,16 +6,20 @@ from typing import Literal, TypedDict -ChatCompletionChoiceFinishReason = Literal["stop", "length", "model_length", "error", "tool_calls"] +ChatCompletionChoiceFinishReason = Literal[ + "stop", "length", "model_length", "error", "tool_calls" +] + class ChatCompletionChoiceTypedDict(TypedDict): index: int message: AssistantMessageTypedDict finish_reason: ChatCompletionChoiceFinishReason - + class ChatCompletionChoice(BaseModel): index: int + message: AssistantMessage + finish_reason: ChatCompletionChoiceFinishReason - diff --git a/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionrequest.py b/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionrequest.py index 1de5cd86..f2ba2345 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionrequest.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionrequest.py @@ -5,12 +5,20 @@ from .responseformat import ResponseFormat, ResponseFormatTypedDict from .systemmessage import SystemMessage, SystemMessageTypedDict from .tool import Tool, ToolTypedDict +from .toolchoice import ToolChoice, ToolChoiceTypedDict +from .toolchoiceenum import ToolChoiceEnum from .toolmessage import ToolMessage, ToolMessageTypedDict from .usermessage import UserMessage, UserMessageTypedDict -from mistralai_azure.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +from mistralai_azure.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) from mistralai_azure.utils import get_discriminator from pydantic import Discriminator, Tag, model_serializer -from typing import List, Literal, Optional, TypedDict, Union +from typing import List, Optional, TypedDict, Union from typing_extensions import Annotated, NotRequired @@ -22,13 +30,30 @@ r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" -ChatCompletionRequestMessagesTypedDict = Union[SystemMessageTypedDict, UserMessageTypedDict, AssistantMessageTypedDict, ToolMessageTypedDict] +ChatCompletionRequestMessagesTypedDict = Union[ + SystemMessageTypedDict, + UserMessageTypedDict, + AssistantMessageTypedDict, + ToolMessageTypedDict, +] -ChatCompletionRequestMessages = Annotated[Union[Annotated[AssistantMessage, Tag("assistant")], Annotated[SystemMessage, Tag("system")], Annotated[ToolMessage, Tag("tool")], Annotated[UserMessage, Tag("user")]], Discriminator(lambda m: get_discriminator(m, "role", "role"))] +ChatCompletionRequestMessages = Annotated[ + Union[ + Annotated[AssistantMessage, Tag("assistant")], + Annotated[SystemMessage, Tag("system")], + Annotated[ToolMessage, Tag("tool")], + Annotated[UserMessage, Tag("user")], + ], + Discriminator(lambda m: get_discriminator(m, "role", "role")), +] -ChatCompletionRequestToolChoice = Literal["auto", "none", "any"] +ChatCompletionRequestToolChoiceTypedDict = Union[ToolChoiceTypedDict, ToolChoiceEnum] + + +ChatCompletionRequestToolChoice = Union[ToolChoice, ToolChoiceEnum] + class ChatCompletionRequestTypedDict(TypedDict): messages: List[ChatCompletionRequestMessagesTypedDict] @@ -51,39 +76,64 @@ class ChatCompletionRequestTypedDict(TypedDict): r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" response_format: NotRequired[ResponseFormatTypedDict] tools: NotRequired[Nullable[List[ToolTypedDict]]] - tool_choice: NotRequired[ChatCompletionRequestToolChoice] + tool_choice: NotRequired[ChatCompletionRequestToolChoiceTypedDict] safe_prompt: NotRequired[bool] r"""Whether to inject a safety prompt before all conversations.""" - + class ChatCompletionRequest(BaseModel): messages: List[ChatCompletionRequestMessages] r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content.""" + model: OptionalNullable[str] = "azureai" r"""The ID of the model to use for this request.""" + temperature: Optional[float] = 0.7 r"""What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both.""" + top_p: Optional[float] = 1 r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.""" + max_tokens: OptionalNullable[int] = UNSET r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" + min_tokens: OptionalNullable[int] = UNSET r"""The minimum number of tokens to generate in the completion.""" + stream: Optional[bool] = False r"""Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON.""" + stop: Optional[ChatCompletionRequestStop] = None r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + random_seed: OptionalNullable[int] = UNSET r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" + response_format: Optional[ResponseFormat] = None + tools: OptionalNullable[List[Tool]] = UNSET - tool_choice: Optional[ChatCompletionRequestToolChoice] = "auto" + + tool_choice: Optional[ChatCompletionRequestToolChoice] = None + safe_prompt: Optional[bool] = False r"""Whether to inject a safety prompt before all conversations.""" - + @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = ["model", "temperature", "top_p", "max_tokens", "min_tokens", "stream", "stop", "random_seed", "response_format", "tools", "tool_choice", "safe_prompt"] + optional_fields = [ + "model", + "temperature", + "top_p", + "max_tokens", + "min_tokens", + "stream", + "stop", + "random_seed", + "response_format", + "tools", + "tool_choice", + "safe_prompt", + ] nullable_fields = ["model", "max_tokens", "min_tokens", "random_seed", "tools"] null_default_fields = [] @@ -94,9 +144,13 @@ def serialize_model(self, handler): for n, f in self.model_fields.items(): k = f.alias or n val = serialized.get(k) + serialized.pop(k, None) optional_nullable = k in optional_fields and k in nullable_fields - is_set = (self.__pydantic_fields_set__.intersection({n}) or k in null_default_fields) # pylint: disable=no-member + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member if val is not None and val != UNSET_SENTINEL: m[k] = val @@ -106,4 +160,3 @@ def serialize_model(self, handler): m[k] = val return m - diff --git a/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionresponse.py b/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionresponse.py index 88591210..0a02e46c 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionresponse.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionresponse.py @@ -15,13 +15,17 @@ class ChatCompletionResponseTypedDict(TypedDict): usage: UsageInfoTypedDict created: NotRequired[int] choices: NotRequired[List[ChatCompletionChoiceTypedDict]] - + class ChatCompletionResponse(BaseModel): id: str + object: str + model: str + usage: UsageInfo + created: Optional[int] = None + choices: Optional[List[ChatCompletionChoice]] = None - diff --git a/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionstreamrequest.py b/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionstreamrequest.py index 86d3aa10..28abddb9 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionstreamrequest.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionstreamrequest.py @@ -5,12 +5,20 @@ from .responseformat import ResponseFormat, ResponseFormatTypedDict from .systemmessage import SystemMessage, SystemMessageTypedDict from .tool import Tool, ToolTypedDict +from .toolchoice import ToolChoice, ToolChoiceTypedDict +from .toolchoiceenum import ToolChoiceEnum from .toolmessage import ToolMessage, ToolMessageTypedDict from .usermessage import UserMessage, UserMessageTypedDict -from mistralai_azure.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +from mistralai_azure.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) from mistralai_azure.utils import get_discriminator from pydantic import Discriminator, Tag, model_serializer -from typing import List, Literal, Optional, TypedDict, Union +from typing import List, Optional, TypedDict, Union from typing_extensions import Annotated, NotRequired @@ -22,13 +30,32 @@ r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" -MessagesTypedDict = Union[SystemMessageTypedDict, UserMessageTypedDict, AssistantMessageTypedDict, ToolMessageTypedDict] +MessagesTypedDict = Union[ + SystemMessageTypedDict, + UserMessageTypedDict, + AssistantMessageTypedDict, + ToolMessageTypedDict, +] -Messages = Annotated[Union[Annotated[AssistantMessage, Tag("assistant")], Annotated[SystemMessage, Tag("system")], Annotated[ToolMessage, Tag("tool")], Annotated[UserMessage, Tag("user")]], Discriminator(lambda m: get_discriminator(m, "role", "role"))] +Messages = Annotated[ + Union[ + Annotated[AssistantMessage, Tag("assistant")], + Annotated[SystemMessage, Tag("system")], + Annotated[ToolMessage, Tag("tool")], + Annotated[UserMessage, Tag("user")], + ], + Discriminator(lambda m: get_discriminator(m, "role", "role")), +] -ToolChoice = Literal["auto", "none", "any"] +ChatCompletionStreamRequestToolChoiceTypedDict = Union[ + ToolChoiceTypedDict, ToolChoiceEnum +] + + +ChatCompletionStreamRequestToolChoice = Union[ToolChoice, ToolChoiceEnum] + class ChatCompletionStreamRequestTypedDict(TypedDict): messages: List[MessagesTypedDict] @@ -50,38 +77,63 @@ class ChatCompletionStreamRequestTypedDict(TypedDict): r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" response_format: NotRequired[ResponseFormatTypedDict] tools: NotRequired[Nullable[List[ToolTypedDict]]] - tool_choice: NotRequired[ToolChoice] + tool_choice: NotRequired[ChatCompletionStreamRequestToolChoiceTypedDict] safe_prompt: NotRequired[bool] r"""Whether to inject a safety prompt before all conversations.""" - + class ChatCompletionStreamRequest(BaseModel): messages: List[Messages] r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content.""" + model: OptionalNullable[str] = "azureai" r"""The ID of the model to use for this request.""" + temperature: Optional[float] = 0.7 r"""What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both.""" + top_p: Optional[float] = 1 r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.""" + max_tokens: OptionalNullable[int] = UNSET r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" + min_tokens: OptionalNullable[int] = UNSET r"""The minimum number of tokens to generate in the completion.""" + stream: Optional[bool] = True + stop: Optional[Stop] = None r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + random_seed: OptionalNullable[int] = UNSET r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" + response_format: Optional[ResponseFormat] = None + tools: OptionalNullable[List[Tool]] = UNSET - tool_choice: Optional[ToolChoice] = "auto" + + tool_choice: Optional[ChatCompletionStreamRequestToolChoice] = None + safe_prompt: Optional[bool] = False r"""Whether to inject a safety prompt before all conversations.""" - + @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = ["model", "temperature", "top_p", "max_tokens", "min_tokens", "stream", "stop", "random_seed", "response_format", "tools", "tool_choice", "safe_prompt"] + optional_fields = [ + "model", + "temperature", + "top_p", + "max_tokens", + "min_tokens", + "stream", + "stop", + "random_seed", + "response_format", + "tools", + "tool_choice", + "safe_prompt", + ] nullable_fields = ["model", "max_tokens", "min_tokens", "random_seed", "tools"] null_default_fields = [] @@ -92,9 +144,13 @@ def serialize_model(self, handler): for n, f in self.model_fields.items(): k = f.alias or n val = serialized.get(k) + serialized.pop(k, None) optional_nullable = k in optional_fields and k in nullable_fields - is_set = (self.__pydantic_fields_set__.intersection({n}) or k in null_default_fields) # pylint: disable=no-member + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member if val is not None and val != UNSET_SENTINEL: m[k] = val @@ -104,4 +160,3 @@ def serialize_model(self, handler): m[k] = val return m - diff --git a/packages/mistralai_azure/src/mistralai_azure/models/completionchunk.py b/packages/mistralai_azure/src/mistralai_azure/models/completionchunk.py index f51aca3c..d2f334d4 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/completionchunk.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/completionchunk.py @@ -1,7 +1,10 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from __future__ import annotations -from .completionresponsestreamchoice import CompletionResponseStreamChoice, CompletionResponseStreamChoiceTypedDict +from .completionresponsestreamchoice import ( + CompletionResponseStreamChoice, + CompletionResponseStreamChoiceTypedDict, +) from .usageinfo import UsageInfo, UsageInfoTypedDict from mistralai_azure.types import BaseModel from typing import List, Optional, TypedDict @@ -15,13 +18,17 @@ class CompletionChunkTypedDict(TypedDict): object: NotRequired[str] created: NotRequired[int] usage: NotRequired[UsageInfoTypedDict] - + class CompletionChunk(BaseModel): id: str + model: str + choices: List[CompletionResponseStreamChoice] + object: Optional[str] = None + created: Optional[int] = None + usage: Optional[UsageInfo] = None - diff --git a/packages/mistralai_azure/src/mistralai_azure/models/completionevent.py b/packages/mistralai_azure/src/mistralai_azure/models/completionevent.py index 2f8f4b9c..b9b68db3 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/completionevent.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/completionevent.py @@ -8,8 +8,7 @@ class CompletionEventTypedDict(TypedDict): data: CompletionChunkTypedDict - + class CompletionEvent(BaseModel): data: CompletionChunk - diff --git a/packages/mistralai_azure/src/mistralai_azure/models/completionresponsestreamchoice.py b/packages/mistralai_azure/src/mistralai_azure/models/completionresponsestreamchoice.py index 76f7fce8..c220a51d 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/completionresponsestreamchoice.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/completionresponsestreamchoice.py @@ -9,17 +9,20 @@ FinishReason = Literal["stop", "length", "error", "tool_calls"] + class CompletionResponseStreamChoiceTypedDict(TypedDict): index: int delta: DeltaMessageTypedDict finish_reason: Nullable[FinishReason] - + class CompletionResponseStreamChoice(BaseModel): index: int + delta: DeltaMessage + finish_reason: Nullable[FinishReason] - + @model_serializer(mode="wrap") def serialize_model(self, handler): optional_fields = [] @@ -33,9 +36,13 @@ def serialize_model(self, handler): for n, f in self.model_fields.items(): k = f.alias or n val = serialized.get(k) + serialized.pop(k, None) optional_nullable = k in optional_fields and k in nullable_fields - is_set = (self.__pydantic_fields_set__.intersection({n}) or k in null_default_fields) # pylint: disable=no-member + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member if val is not None and val != UNSET_SENTINEL: m[k] = val @@ -45,4 +52,3 @@ def serialize_model(self, handler): m[k] = val return m - diff --git a/packages/mistralai_azure/src/mistralai_azure/models/contentchunk.py b/packages/mistralai_azure/src/mistralai_azure/models/contentchunk.py index a45f2bdb..49aeba4c 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/contentchunk.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/contentchunk.py @@ -1,17 +1,10 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from __future__ import annotations -from mistralai_azure.types import BaseModel -import pydantic -from typing import Final, Optional, TypedDict -from typing_extensions import Annotated +from .textchunk import TextChunk, TextChunkTypedDict -class ContentChunkTypedDict(TypedDict): - text: str - +ContentChunkTypedDict = TextChunkTypedDict -class ContentChunk(BaseModel): - text: str - TYPE: Annotated[Final[Optional[str]], pydantic.Field(alias="type")] = "text" # type: ignore - + +ContentChunk = TextChunk diff --git a/packages/mistralai_azure/src/mistralai_azure/models/deltamessage.py b/packages/mistralai_azure/src/mistralai_azure/models/deltamessage.py index 4f9f395c..5e8011d0 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/deltamessage.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/deltamessage.py @@ -2,7 +2,13 @@ from __future__ import annotations from .toolcall import ToolCall, ToolCallTypedDict -from mistralai_azure.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +from mistralai_azure.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) from pydantic import model_serializer from typing import List, Optional, TypedDict from typing_extensions import NotRequired @@ -12,13 +18,15 @@ class DeltaMessageTypedDict(TypedDict): role: NotRequired[str] content: NotRequired[Nullable[str]] tool_calls: NotRequired[Nullable[List[ToolCallTypedDict]]] - + class DeltaMessage(BaseModel): role: Optional[str] = None + content: OptionalNullable[str] = UNSET + tool_calls: OptionalNullable[List[ToolCall]] = UNSET - + @model_serializer(mode="wrap") def serialize_model(self, handler): optional_fields = ["role", "content", "tool_calls"] @@ -32,9 +40,13 @@ def serialize_model(self, handler): for n, f in self.model_fields.items(): k = f.alias or n val = serialized.get(k) + serialized.pop(k, None) optional_nullable = k in optional_fields and k in nullable_fields - is_set = (self.__pydantic_fields_set__.intersection({n}) or k in null_default_fields) # pylint: disable=no-member + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member if val is not None and val != UNSET_SENTINEL: m[k] = val @@ -44,4 +56,3 @@ def serialize_model(self, handler): m[k] = val return m - diff --git a/packages/mistralai_azure/src/mistralai_azure/models/function.py b/packages/mistralai_azure/src/mistralai_azure/models/function.py index 6ffcacf2..081ce1d6 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/function.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/function.py @@ -10,10 +10,11 @@ class FunctionTypedDict(TypedDict): name: str parameters: Dict[str, Any] description: NotRequired[str] - + class Function(BaseModel): name: str + parameters: Dict[str, Any] + description: Optional[str] = "" - diff --git a/packages/mistralai_azure/src/mistralai_azure/models/functioncall.py b/packages/mistralai_azure/src/mistralai_azure/models/functioncall.py index 3259ad99..0afa5901 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/functioncall.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/functioncall.py @@ -14,9 +14,9 @@ class FunctionCallTypedDict(TypedDict): name: str arguments: ArgumentsTypedDict - + class FunctionCall(BaseModel): name: str + arguments: Arguments - diff --git a/packages/mistralai_azure/src/mistralai_azure/models/functionname.py b/packages/mistralai_azure/src/mistralai_azure/models/functionname.py new file mode 100644 index 00000000..c825a5ab --- /dev/null +++ b/packages/mistralai_azure/src/mistralai_azure/models/functionname.py @@ -0,0 +1,17 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai_azure.types import BaseModel +from typing import TypedDict + + +class FunctionNameTypedDict(TypedDict): + r"""this restriction of `Function` is used to select a specific function to call""" + + name: str + + +class FunctionName(BaseModel): + r"""this restriction of `Function` is used to select a specific function to call""" + + name: str diff --git a/packages/mistralai_azure/src/mistralai_azure/models/httpvalidationerror.py b/packages/mistralai_azure/src/mistralai_azure/models/httpvalidationerror.py index de07a3d4..28f9b4ed 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/httpvalidationerror.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/httpvalidationerror.py @@ -6,13 +6,14 @@ from mistralai_azure.types import BaseModel from typing import List, Optional + class HTTPValidationErrorData(BaseModel): detail: Optional[List[ValidationError]] = None - class HTTPValidationError(Exception): r"""Validation Error""" + data: HTTPValidationErrorData def __init__(self, data: HTTPValidationErrorData): @@ -20,4 +21,3 @@ def __init__(self, data: HTTPValidationErrorData): def __str__(self) -> str: return utils.marshal_json(self.data, HTTPValidationErrorData) - diff --git a/packages/mistralai_azure/src/mistralai_azure/models/responseformat.py b/packages/mistralai_azure/src/mistralai_azure/models/responseformat.py index 0dac0f6b..c692033c 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/responseformat.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/responseformat.py @@ -1,18 +1,17 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from __future__ import annotations +from .responseformats import ResponseFormats from mistralai_azure.types import BaseModel -from typing import Literal, Optional, TypedDict +from typing import Optional, TypedDict from typing_extensions import NotRequired -ResponseFormats = Literal["text", "json_object"] -r"""An object specifying the format that the model must output. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message.""" - class ResponseFormatTypedDict(TypedDict): type: NotRequired[ResponseFormats] - + r"""An object specifying the format that the model must output. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message.""" + class ResponseFormat(BaseModel): - type: Optional[ResponseFormats] = "text" - + type: Optional[ResponseFormats] = None + r"""An object specifying the format that the model must output. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message.""" diff --git a/packages/mistralai_azure/src/mistralai_azure/models/responseformats.py b/packages/mistralai_azure/src/mistralai_azure/models/responseformats.py new file mode 100644 index 00000000..2c06b812 --- /dev/null +++ b/packages/mistralai_azure/src/mistralai_azure/models/responseformats.py @@ -0,0 +1,8 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from typing import Literal + + +ResponseFormats = Literal["text", "json_object"] +r"""An object specifying the format that the model must output. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message.""" diff --git a/packages/mistralai_azure/src/mistralai_azure/models/security.py b/packages/mistralai_azure/src/mistralai_azure/models/security.py index 94d9e645..1245881b 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/security.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/security.py @@ -9,8 +9,17 @@ class SecurityTypedDict(TypedDict): api_key: str - + class Security(BaseModel): - api_key: Annotated[str, FieldMetadata(security=SecurityMetadata(scheme=True, scheme_type="http", sub_type="bearer", field_name="Authorization"))] - + api_key: Annotated[ + str, + FieldMetadata( + security=SecurityMetadata( + scheme=True, + scheme_type="http", + sub_type="bearer", + field_name="Authorization", + ) + ), + ] diff --git a/packages/mistralai_azure/src/mistralai_azure/models/systemmessage.py b/packages/mistralai_azure/src/mistralai_azure/models/systemmessage.py index 7898aecf..cf1775f7 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/systemmessage.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/systemmessage.py @@ -1,26 +1,27 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from __future__ import annotations -from .contentchunk import ContentChunk, ContentChunkTypedDict +from .textchunk import TextChunk, TextChunkTypedDict from mistralai_azure.types import BaseModel from typing import List, Literal, Optional, TypedDict, Union from typing_extensions import NotRequired -ContentTypedDict = Union[str, List[ContentChunkTypedDict]] +ContentTypedDict = Union[str, List[TextChunkTypedDict]] -Content = Union[str, List[ContentChunk]] +Content = Union[str, List[TextChunk]] Role = Literal["system"] + class SystemMessageTypedDict(TypedDict): content: ContentTypedDict role: NotRequired[Role] - + class SystemMessage(BaseModel): content: Content + role: Optional[Role] = "system" - diff --git a/packages/mistralai_azure/src/mistralai_azure/models/textchunk.py b/packages/mistralai_azure/src/mistralai_azure/models/textchunk.py index 12f2e781..75cc9490 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/textchunk.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/textchunk.py @@ -3,15 +3,20 @@ from __future__ import annotations from mistralai_azure.types import BaseModel import pydantic -from typing import Final, Optional, TypedDict +from typing import Final, Literal, Optional, TypedDict from typing_extensions import Annotated +Type = Literal["text"] + + class TextChunkTypedDict(TypedDict): text: str - + class TextChunk(BaseModel): text: str - TYPE: Annotated[Final[Optional[str]], pydantic.Field(alias="type")] = "text" # type: ignore - + + # fmt: off + TYPE: Annotated[Final[Optional[Type]], pydantic.Field(alias="type")] = "text" # type: ignore + # fmt: on diff --git a/packages/mistralai_azure/src/mistralai_azure/models/tool.py b/packages/mistralai_azure/src/mistralai_azure/models/tool.py index 48c5ba8e..3a02ed73 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/tool.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/tool.py @@ -2,21 +2,22 @@ from __future__ import annotations from .function import Function, FunctionTypedDict -from mistralai_azure.types import BaseModel, UnrecognizedStr +from .tooltypes import ToolTypes +from mistralai_azure.types import BaseModel from mistralai_azure.utils import validate_open_enum from pydantic.functional_validators import PlainValidator -from typing import Literal, Optional, TypedDict, Union +from typing import Optional, TypedDict from typing_extensions import Annotated, NotRequired -ToolToolTypes = Union[Literal["function"], UnrecognizedStr] - class ToolTypedDict(TypedDict): function: FunctionTypedDict - type: NotRequired[ToolToolTypes] - + type: NotRequired[ToolTypes] + class Tool(BaseModel): function: Function - type: Annotated[Optional[ToolToolTypes], PlainValidator(validate_open_enum(False))] = "function" - + + type: Annotated[Optional[ToolTypes], PlainValidator(validate_open_enum(False))] = ( + None + ) diff --git a/packages/mistralai_azure/src/mistralai_azure/models/toolcall.py b/packages/mistralai_azure/src/mistralai_azure/models/toolcall.py index 578d6ffc..2a768a2d 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/toolcall.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/toolcall.py @@ -2,23 +2,25 @@ from __future__ import annotations from .functioncall import FunctionCall, FunctionCallTypedDict -from mistralai_azure.types import BaseModel, UnrecognizedStr +from .tooltypes import ToolTypes +from mistralai_azure.types import BaseModel from mistralai_azure.utils import validate_open_enum from pydantic.functional_validators import PlainValidator -from typing import Literal, Optional, TypedDict, Union +from typing import Optional, TypedDict from typing_extensions import Annotated, NotRequired -ToolTypes = Union[Literal["function"], UnrecognizedStr] - class ToolCallTypedDict(TypedDict): function: FunctionCallTypedDict id: NotRequired[str] type: NotRequired[ToolTypes] - + class ToolCall(BaseModel): function: FunctionCall + id: Optional[str] = "null" - type: Annotated[Optional[ToolTypes], PlainValidator(validate_open_enum(False))] = "function" - + + type: Annotated[Optional[ToolTypes], PlainValidator(validate_open_enum(False))] = ( + None + ) diff --git a/packages/mistralai_azure/src/mistralai_azure/models/toolchoice.py b/packages/mistralai_azure/src/mistralai_azure/models/toolchoice.py new file mode 100644 index 00000000..2d3d87f0 --- /dev/null +++ b/packages/mistralai_azure/src/mistralai_azure/models/toolchoice.py @@ -0,0 +1,29 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .functionname import FunctionName, FunctionNameTypedDict +from .tooltypes import ToolTypes +from mistralai_azure.types import BaseModel +from mistralai_azure.utils import validate_open_enum +from pydantic.functional_validators import PlainValidator +from typing import Optional, TypedDict +from typing_extensions import Annotated, NotRequired + + +class ToolChoiceTypedDict(TypedDict): + r"""ToolChoice is either a ToolChoiceEnum or a ToolChoice""" + + function: FunctionNameTypedDict + r"""this restriction of `Function` is used to select a specific function to call""" + type: NotRequired[ToolTypes] + + +class ToolChoice(BaseModel): + r"""ToolChoice is either a ToolChoiceEnum or a ToolChoice""" + + function: FunctionName + r"""this restriction of `Function` is used to select a specific function to call""" + + type: Annotated[Optional[ToolTypes], PlainValidator(validate_open_enum(False))] = ( + None + ) diff --git a/packages/mistralai_azure/src/mistralai_azure/models/toolchoiceenum.py b/packages/mistralai_azure/src/mistralai_azure/models/toolchoiceenum.py new file mode 100644 index 00000000..8e6a6ad8 --- /dev/null +++ b/packages/mistralai_azure/src/mistralai_azure/models/toolchoiceenum.py @@ -0,0 +1,7 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from typing import Literal + + +ToolChoiceEnum = Literal["auto", "none", "any", "required"] diff --git a/packages/mistralai_azure/src/mistralai_azure/models/toolmessage.py b/packages/mistralai_azure/src/mistralai_azure/models/toolmessage.py index e8452977..14ecf73b 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/toolmessage.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/toolmessage.py @@ -1,7 +1,13 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from __future__ import annotations -from mistralai_azure.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +from mistralai_azure.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) from pydantic import model_serializer from typing import Literal, Optional, TypedDict from typing_extensions import NotRequired @@ -9,19 +15,23 @@ ToolMessageRole = Literal["tool"] + class ToolMessageTypedDict(TypedDict): content: str tool_call_id: NotRequired[Nullable[str]] name: NotRequired[Nullable[str]] role: NotRequired[ToolMessageRole] - + class ToolMessage(BaseModel): content: str + tool_call_id: OptionalNullable[str] = UNSET + name: OptionalNullable[str] = UNSET + role: Optional[ToolMessageRole] = "tool" - + @model_serializer(mode="wrap") def serialize_model(self, handler): optional_fields = ["tool_call_id", "name", "role"] @@ -35,9 +45,13 @@ def serialize_model(self, handler): for n, f in self.model_fields.items(): k = f.alias or n val = serialized.get(k) + serialized.pop(k, None) optional_nullable = k in optional_fields and k in nullable_fields - is_set = (self.__pydantic_fields_set__.intersection({n}) or k in null_default_fields) # pylint: disable=no-member + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member if val is not None and val != UNSET_SENTINEL: m[k] = val @@ -47,4 +61,3 @@ def serialize_model(self, handler): m[k] = val return m - diff --git a/packages/mistralai_azure/src/mistralai_azure/models/tooltypes.py b/packages/mistralai_azure/src/mistralai_azure/models/tooltypes.py new file mode 100644 index 00000000..dfcd31f0 --- /dev/null +++ b/packages/mistralai_azure/src/mistralai_azure/models/tooltypes.py @@ -0,0 +1,8 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai_azure.types import UnrecognizedStr +from typing import Literal, Union + + +ToolTypes = Union[Literal["function"], UnrecognizedStr] diff --git a/packages/mistralai_azure/src/mistralai_azure/models/usageinfo.py b/packages/mistralai_azure/src/mistralai_azure/models/usageinfo.py index f30c1ebe..2a926481 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/usageinfo.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/usageinfo.py @@ -9,10 +9,11 @@ class UsageInfoTypedDict(TypedDict): prompt_tokens: int completion_tokens: int total_tokens: int - + class UsageInfo(BaseModel): prompt_tokens: int + completion_tokens: int + total_tokens: int - diff --git a/packages/mistralai_azure/src/mistralai_azure/models/usermessage.py b/packages/mistralai_azure/src/mistralai_azure/models/usermessage.py index 7c525d98..e9488767 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/usermessage.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/usermessage.py @@ -1,26 +1,27 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from __future__ import annotations -from .textchunk import TextChunk, TextChunkTypedDict +from .contentchunk import ContentChunk, ContentChunkTypedDict from mistralai_azure.types import BaseModel from typing import List, Literal, Optional, TypedDict, Union from typing_extensions import NotRequired -UserMessageContentTypedDict = Union[str, List[TextChunkTypedDict]] +UserMessageContentTypedDict = Union[str, List[ContentChunkTypedDict]] -UserMessageContent = Union[str, List[TextChunk]] +UserMessageContent = Union[str, List[ContentChunk]] UserMessageRole = Literal["user"] + class UserMessageTypedDict(TypedDict): content: UserMessageContentTypedDict role: NotRequired[UserMessageRole] - + class UserMessage(BaseModel): content: UserMessageContent + role: Optional[UserMessageRole] = "user" - diff --git a/packages/mistralai_azure/src/mistralai_azure/models/validationerror.py b/packages/mistralai_azure/src/mistralai_azure/models/validationerror.py index 9b7b9a9a..6ab66a1b 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/validationerror.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/validationerror.py @@ -15,10 +15,11 @@ class ValidationErrorTypedDict(TypedDict): loc: List[LocTypedDict] msg: str type: str - + class ValidationError(BaseModel): loc: List[Loc] + msg: str + type: str - diff --git a/packages/mistralai_azure/src/mistralai_azure/sdkconfiguration.py b/packages/mistralai_azure/src/mistralai_azure/sdkconfiguration.py index 4a6f93b7..eefd8df4 100644 --- a/packages/mistralai_azure/src/mistralai_azure/sdkconfiguration.py +++ b/packages/mistralai_azure/src/mistralai_azure/sdkconfiguration.py @@ -1,6 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - from ._hooks import SDKHooks from .httpclient import AsyncHttpClient, HttpClient from .utils import Logger, RetryConfig, remove_suffix @@ -14,7 +13,7 @@ SERVER_PROD = "prod" r"""Production server""" SERVERS = { - SERVER_PROD: "https://api.mistral.ai", + SERVER_PROD: "https://api.mistral.ai", } """Contains the list of servers available to the SDK""" @@ -24,14 +23,14 @@ class SDKConfiguration: client: HttpClient async_client: AsyncHttpClient debug_logger: Logger - security: Optional[Union[models.Security,Callable[[], models.Security]]] = None + security: Optional[Union[models.Security, Callable[[], models.Security]]] = None server_url: Optional[str] = "" server: Optional[str] = "" language: str = "python" openapi_doc_version: str = "0.0.2" - sdk_version: str = "1.0.1" - gen_version: str = "2.399.0" - user_agent: str = "speakeasy-sdk/python 1.0.1 2.399.0 0.0.2 mistralai_azure" + sdk_version: str = "1.2.2" + gen_version: str = "2.415.6" + user_agent: str = "speakeasy-sdk/python 1.2.2 2.415.6 0.0.2 mistralai_azure" retry_config: OptionalNullable[RetryConfig] = Field(default_factory=lambda: UNSET) timeout_ms: Optional[int] = None @@ -45,10 +44,9 @@ def get_server_details(self) -> Tuple[str, Dict[str, str]]: self.server = SERVER_PROD if self.server not in SERVERS: - raise ValueError(f"Invalid server \"{self.server}\"") + raise ValueError(f'Invalid server "{self.server}"') return SERVERS[self.server], {} - def get_hooks(self) -> SDKHooks: return self._hooks diff --git a/packages/mistralai_azure/src/mistralai_azure/utils/__init__.py b/packages/mistralai_azure/src/mistralai_azure/utils/__init__.py index 95aa1b60..6c26aeb9 100644 --- a/packages/mistralai_azure/src/mistralai_azure/utils/__init__.py +++ b/packages/mistralai_azure/src/mistralai_azure/utils/__init__.py @@ -33,8 +33,13 @@ validate_open_enum, ) from .url import generate_url, template_url, remove_suffix -from .values import get_global_from_env, match_content_type, match_status_codes, match_response -from .logger import Logger, get_body_content, NoOpLogger +from .values import ( + get_global_from_env, + match_content_type, + match_status_codes, + match_response, +) +from .logger import Logger, get_body_content, get_default_logger __all__ = [ "BackoffStrategy", @@ -43,6 +48,7 @@ "FormMetadata", "generate_url", "get_body_content", + "get_default_logger", "get_discriminator", "get_global_from_env", "get_headers", @@ -57,7 +63,6 @@ "match_status_codes", "match_response", "MultipartFormMetadata", - "NoOpLogger", "OpenEnumMeta", "PathParamMetadata", "QueryParamMetadata", diff --git a/packages/mistralai_azure/src/mistralai_azure/utils/forms.py b/packages/mistralai_azure/src/mistralai_azure/utils/forms.py index 07f9b235..9f5a731e 100644 --- a/packages/mistralai_azure/src/mistralai_azure/utils/forms.py +++ b/packages/mistralai_azure/src/mistralai_azure/utils/forms.py @@ -17,7 +17,7 @@ MultipartFormMetadata, find_field_metadata, ) -from .values import _val_to_string +from .values import _is_set, _val_to_string def _populate_form( @@ -27,7 +27,7 @@ def _populate_form( delimiter: str, form: Dict[str, List[str]], ): - if obj is None: + if not _is_set(obj): return form if isinstance(obj, BaseModel): @@ -41,7 +41,7 @@ def _populate_form( continue val = getattr(obj, name) - if val is None: + if not _is_set(val): continue if explode: @@ -54,7 +54,7 @@ def _populate_form( elif isinstance(obj, Dict): items = [] for key, value in obj.items(): - if value is None: + if not _is_set(value): continue if explode: @@ -68,7 +68,7 @@ def _populate_form( items = [] for value in obj: - if value is None: + if not _is_set(value): continue if explode: @@ -102,7 +102,7 @@ def serialize_multipart_form( field = request_fields[name] val = getattr(request, name) - if val is None: + if not _is_set(val): continue field_metadata = find_field_metadata(field, MultipartFormMetadata) @@ -156,7 +156,7 @@ def serialize_multipart_form( values = [] for value in val: - if value is None: + if not _is_set(value): continue values.append(_val_to_string(value)) @@ -176,7 +176,7 @@ def serialize_form_data(data: Any) -> Dict[str, Any]: field = data_fields[name] val = getattr(data, name) - if val is None: + if not _is_set(val): continue metadata = find_field_metadata(field, FormMetadata) @@ -200,7 +200,8 @@ def serialize_form_data(data: Any) -> Dict[str, Any]: raise ValueError(f"Invalid form style for field {name}") elif isinstance(data, Dict): for key, value in data.items(): - form[key] = [_val_to_string(value)] + if _is_set(value): + form[key] = [_val_to_string(value)] else: raise TypeError(f"Invalid request body type {type(data)} for form data") diff --git a/packages/mistralai_azure/src/mistralai_azure/utils/headers.py b/packages/mistralai_azure/src/mistralai_azure/utils/headers.py index e14a0f4a..37864cbb 100644 --- a/packages/mistralai_azure/src/mistralai_azure/utils/headers.py +++ b/packages/mistralai_azure/src/mistralai_azure/utils/headers.py @@ -15,16 +15,16 @@ find_field_metadata, ) -from .values import _populate_from_globals, _val_to_string +from .values import _is_set, _populate_from_globals, _val_to_string def get_headers(headers_params: Any, gbls: Optional[Any] = None) -> Dict[str, str]: headers: Dict[str, str] = {} globals_already_populated = [] - if headers_params is not None: + if _is_set(headers_params): globals_already_populated = _populate_headers(headers_params, gbls, headers, []) - if gbls is not None: + if _is_set(gbls): _populate_headers(gbls, None, headers, globals_already_populated) return headers @@ -67,7 +67,7 @@ def _populate_headers( def _serialize_header(explode: bool, obj: Any) -> str: - if obj is None: + if not _is_set(obj): return "" if isinstance(obj, BaseModel): @@ -83,7 +83,7 @@ def _serialize_header(explode: bool, obj: Any) -> str: f_name = obj_field.alias if obj_field.alias is not None else name val = getattr(obj, name) - if val is None: + if not _is_set(val): continue if explode: @@ -98,7 +98,7 @@ def _serialize_header(explode: bool, obj: Any) -> str: items = [] for key, value in obj.items(): - if value is None: + if not _is_set(value): continue if explode: @@ -113,14 +113,14 @@ def _serialize_header(explode: bool, obj: Any) -> str: items = [] for value in obj: - if value is None: + if not _is_set(value): continue items.append(_val_to_string(value)) if len(items) > 0: return ",".join(items) - else: + elif _is_set(obj): return f"{_val_to_string(obj)}" return "" diff --git a/packages/mistralai_azure/src/mistralai_azure/utils/logger.py b/packages/mistralai_azure/src/mistralai_azure/utils/logger.py index 7e4bbeac..b661aff6 100644 --- a/packages/mistralai_azure/src/mistralai_azure/utils/logger.py +++ b/packages/mistralai_azure/src/mistralai_azure/utils/logger.py @@ -3,14 +3,20 @@ import httpx from typing import Any, Protocol + class Logger(Protocol): def debug(self, msg: str, *args: Any, **kwargs: Any) -> None: pass + class NoOpLogger: def debug(self, msg: str, *args: Any, **kwargs: Any) -> None: pass + def get_body_content(req: httpx.Request) -> str: return "" if not hasattr(req, "_content") else str(req.content) + +def get_default_logger() -> Logger: + return NoOpLogger() diff --git a/packages/mistralai_azure/src/mistralai_azure/utils/queryparams.py b/packages/mistralai_azure/src/mistralai_azure/utils/queryparams.py index 1c8c5834..37a6e7f9 100644 --- a/packages/mistralai_azure/src/mistralai_azure/utils/queryparams.py +++ b/packages/mistralai_azure/src/mistralai_azure/utils/queryparams.py @@ -15,7 +15,12 @@ QueryParamMetadata, find_field_metadata, ) -from .values import _get_serialized_params, _populate_from_globals, _val_to_string +from .values import ( + _get_serialized_params, + _is_set, + _populate_from_globals, + _val_to_string, +) from .forms import _populate_form @@ -26,7 +31,7 @@ def get_query_params( params: Dict[str, List[str]] = {} globals_already_populated = _populate_query_params(query_params, gbls, params, []) - if gbls is not None: + if _is_set(gbls): _populate_query_params(gbls, None, params, globals_already_populated) return params @@ -55,7 +60,7 @@ def _populate_query_params( if not metadata: continue - value = getattr(query_params, name) if query_params is not None else None + value = getattr(query_params, name) if _is_set(query_params) else None value, global_found = _populate_from_globals( name, value, QueryParamMetadata, gbls @@ -99,7 +104,7 @@ def _populate_deep_object_query_params( obj: Any, params: Dict[str, List[str]], ): - if obj is None: + if not _is_set(obj): return if isinstance(obj, BaseModel): @@ -113,10 +118,7 @@ def _populate_deep_object_query_params_basemodel( obj: Any, params: Dict[str, List[str]], ): - if obj is None: - return - - if not isinstance(obj, BaseModel): + if not _is_set(obj) or not isinstance(obj, BaseModel): return obj_fields: Dict[str, FieldInfo] = obj.__class__.model_fields @@ -128,11 +130,11 @@ def _populate_deep_object_query_params_basemodel( params_key = f"{prior_params_key}[{f_name}]" obj_param_metadata = find_field_metadata(obj_field, QueryParamMetadata) - if obj_param_metadata is None: + if not _is_set(obj_param_metadata): continue obj_val = getattr(obj, name) - if obj_val is None: + if not _is_set(obj_val): continue if isinstance(obj_val, BaseModel): @@ -150,11 +152,11 @@ def _populate_deep_object_query_params_dict( value: Dict, params: Dict[str, List[str]], ): - if value is None: + if not _is_set(value): return for key, val in value.items(): - if val is None: + if not _is_set(val): continue params_key = f"{prior_params_key}[{key}]" @@ -174,11 +176,11 @@ def _populate_deep_object_query_params_list( value: List, params: Dict[str, List[str]], ): - if value is None: + if not _is_set(value): return for val in value: - if val is None: + if not _is_set(val): continue if params.get(params_key) is None: diff --git a/packages/mistralai_azure/src/mistralai_azure/utils/retries.py b/packages/mistralai_azure/src/mistralai_azure/utils/retries.py index a06f9279..4d608671 100644 --- a/packages/mistralai_azure/src/mistralai_azure/utils/retries.py +++ b/packages/mistralai_azure/src/mistralai_azure/utils/retries.py @@ -1,5 +1,6 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +import asyncio import random import time from typing import List @@ -212,5 +213,5 @@ async def retry_with_backoff_async( raise sleep = (initial_interval / 1000) * exponent**retries + random.uniform(0, 1) sleep = min(sleep, max_interval / 1000) - time.sleep(sleep) + await asyncio.sleep(sleep) retries += 1 diff --git a/packages/mistralai_azure/src/mistralai_azure/utils/security.py b/packages/mistralai_azure/src/mistralai_azure/utils/security.py index aab4cb65..295a3f40 100644 --- a/packages/mistralai_azure/src/mistralai_azure/utils/security.py +++ b/packages/mistralai_azure/src/mistralai_azure/utils/security.py @@ -16,7 +16,6 @@ ) - def get_security(security: Any) -> Tuple[Dict[str, str], Dict[str, List[str]]]: headers: Dict[str, str] = {} query_params: Dict[str, List[str]] = {} @@ -42,8 +41,10 @@ def get_security(security: Any) -> Tuple[Dict[str, str], Dict[str, List[str]]]: _parse_security_option(headers, query_params, value) return headers, query_params if metadata.scheme: - # Special case for basic auth which could be a flattened model - if metadata.sub_type == "basic" and not isinstance(value, BaseModel): + # Special case for basic auth or custom auth which could be a flattened model + if metadata.sub_type in ["basic", "custom"] and not isinstance( + value, BaseModel + ): _parse_security_scheme(headers, query_params, metadata, name, security) else: _parse_security_scheme(headers, query_params, metadata, name, value) @@ -80,9 +81,12 @@ def _parse_security_scheme( sub_type = scheme_metadata.sub_type if isinstance(scheme, BaseModel): - if scheme_type == "http" and sub_type == "basic": - _parse_basic_auth_scheme(headers, scheme) - return + if scheme_type == "http": + if sub_type == "basic": + _parse_basic_auth_scheme(headers, scheme) + return + if sub_type == "custom": + return scheme_fields: Dict[str, FieldInfo] = scheme.__class__.model_fields for name in scheme_fields: @@ -131,6 +135,8 @@ def _parse_security_scheme_value( elif scheme_type == "http": if sub_type == "bearer": headers[header_name] = _apply_bearer(value) + elif sub_type == "custom": + return else: raise ValueError("sub type {sub_type} not supported") else: diff --git a/packages/mistralai_azure/src/mistralai_azure/utils/serializers.py b/packages/mistralai_azure/src/mistralai_azure/utils/serializers.py index a98998a3..85d57f43 100644 --- a/packages/mistralai_azure/src/mistralai_azure/utils/serializers.py +++ b/packages/mistralai_azure/src/mistralai_azure/utils/serializers.py @@ -9,13 +9,15 @@ from pydantic_core import from_json from typing_inspect import is_optional_type -from ..types.basemodel import BaseModel, Nullable, OptionalNullable +from ..types.basemodel import BaseModel, Nullable, OptionalNullable, Unset def serialize_decimal(as_str: bool): def serialize(d): if is_optional_type(type(d)) and d is None: return None + if isinstance(d, Unset): + return d if not isinstance(d, Decimal): raise ValueError("Expected Decimal object") @@ -29,7 +31,7 @@ def validate_decimal(d): if d is None: return None - if isinstance(d, Decimal): + if isinstance(d, (Decimal, Unset)): return d if not isinstance(d, (str, int, float)): @@ -42,6 +44,8 @@ def serialize_float(as_str: bool): def serialize(f): if is_optional_type(type(f)) and f is None: return None + if isinstance(f, Unset): + return f if not isinstance(f, float): raise ValueError("Expected float") @@ -55,7 +59,7 @@ def validate_float(f): if f is None: return None - if isinstance(f, float): + if isinstance(f, (float, Unset)): return f if not isinstance(f, str): @@ -65,14 +69,16 @@ def validate_float(f): def serialize_int(as_str: bool): - def serialize(b): - if is_optional_type(type(b)) and b is None: + def serialize(i): + if is_optional_type(type(i)) and i is None: return None + if isinstance(i, Unset): + return i - if not isinstance(b, int): + if not isinstance(i, int): raise ValueError("Expected int") - return str(b) if as_str else b + return str(i) if as_str else i return serialize @@ -81,7 +87,7 @@ def validate_int(b): if b is None: return None - if isinstance(b, int): + if isinstance(b, (int, Unset)): return b if not isinstance(b, str): @@ -95,6 +101,9 @@ def validate(e): if e is None: return None + if isinstance(e, Unset): + return e + if is_int: if not isinstance(e, int): raise ValueError("Expected int") diff --git a/packages/mistralai_azure/src/mistralai_azure/utils/url.py b/packages/mistralai_azure/src/mistralai_azure/utils/url.py index b201bfa4..c78ccbae 100644 --- a/packages/mistralai_azure/src/mistralai_azure/utils/url.py +++ b/packages/mistralai_azure/src/mistralai_azure/utils/url.py @@ -18,7 +18,12 @@ PathParamMetadata, find_field_metadata, ) -from .values import _get_serialized_params, _populate_from_globals, _val_to_string +from .values import ( + _get_serialized_params, + _is_set, + _populate_from_globals, + _val_to_string, +) def generate_url( @@ -32,7 +37,7 @@ def generate_url( globals_already_populated = _populate_path_params( path_params, gbls, path_param_values, [] ) - if gbls is not None: + if _is_set(gbls): _populate_path_params(gbls, None, path_param_values, globals_already_populated) for key, value in path_param_values.items(): @@ -64,14 +69,14 @@ def _populate_path_params( if param_metadata is None: continue - param = getattr(path_params, name) if path_params is not None else None + param = getattr(path_params, name) if _is_set(path_params) else None param, global_found = _populate_from_globals( name, param, PathParamMetadata, gbls ) if global_found: globals_already_populated.append(name) - if param is None: + if not _is_set(param): continue f_name = field.alias if field.alias is not None else name @@ -87,13 +92,13 @@ def _populate_path_params( if param_metadata.style == "simple": if isinstance(param, List): for pp_val in param: - if pp_val is None: + if not _is_set(pp_val): continue pp_vals.append(_val_to_string(pp_val)) path_param_values[f_name] = ",".join(pp_vals) elif isinstance(param, Dict): for pp_key in param: - if param[pp_key] is None: + if not _is_set(param[pp_key]): continue if param_metadata.explode: pp_vals.append(f"{pp_key}={_val_to_string(param[pp_key])}") @@ -116,7 +121,7 @@ def _populate_path_params( ) param_field_val = getattr(param, name) - if param_field_val is None: + if not _is_set(param_field_val): continue if param_metadata.explode: pp_vals.append( @@ -127,7 +132,7 @@ def _populate_path_params( f"{param_name},{_val_to_string(param_field_val)}" ) path_param_values[f_name] = ",".join(pp_vals) - else: + elif _is_set(param): path_param_values[f_name] = _val_to_string(param) return globals_already_populated diff --git a/packages/mistralai_azure/src/mistralai_azure/utils/values.py b/packages/mistralai_azure/src/mistralai_azure/utils/values.py index 24ccae3d..2b4b6832 100644 --- a/packages/mistralai_azure/src/mistralai_azure/utils/values.py +++ b/packages/mistralai_azure/src/mistralai_azure/utils/values.py @@ -10,6 +10,8 @@ from pydantic import BaseModel from pydantic.fields import FieldInfo +from ..types.basemodel import Unset + from .serializers import marshal_json from .metadata import ParamMetadata, find_field_metadata @@ -126,3 +128,7 @@ def _get_serialized_params( params[field_name] = marshal_json(obj, typ) return params + + +def _is_set(value: Any) -> bool: + return value is not None and not isinstance(value, Unset) diff --git a/packages/mistralai_gcp/.speakeasy/gen.lock b/packages/mistralai_gcp/.speakeasy/gen.lock index 4dcdd752..e5d61fb2 100644 --- a/packages/mistralai_gcp/.speakeasy/gen.lock +++ b/packages/mistralai_gcp/.speakeasy/gen.lock @@ -1,19 +1,20 @@ lockVersion: 2.0.0 id: ec60f2d8-7869-45c1-918e-773d41a8cf74 management: - docChecksum: b276b71cb6764f11b10461fe70962781 + docChecksum: 823d9b94fcb9c6588d0af16b7301f4ac docVersion: 0.0.2 - speakeasyVersion: 1.373.1 - generationVersion: 2.399.0 - releaseVersion: 1.0.1 - configChecksum: 698bd633a47a664d6f3515c1b9ecdbaa + speakeasyVersion: 1.396.7 + generationVersion: 2.415.6 + releaseVersion: 1.2.2 + configChecksum: fa993b7253c0c8c0d114d51422ffb486 published: true features: python: additionalDependencies: 1.0.0 constsAndDefaults: 1.0.2 - core: 5.3.8 + core: 5.5.3 defaultEnabledRetries: 0.2.0 + enumUnions: 0.1.0 envVarSecurityUsage: 0.3.1 examples: 3.0.0 flatRequests: 1.0.1 @@ -25,17 +26,66 @@ features: nullables: 1.0.0 openEnums: 1.0.0 responseFormat: 1.0.0 - retries: 3.0.0 + retries: 3.0.2 sdkHooks: 1.0.0 serverEvents: 1.0.2 serverEventsSentinels: 0.1.0 serverIDs: 3.0.0 unions: 3.0.2 generatedFiles: - - src/mistralai_gcp/sdkconfiguration.py - - src/mistralai_gcp/chat.py - - src/mistralai_gcp/fim.py + - .gitattributes - .vscode/settings.json + - docs/models/arguments.md + - docs/models/assistantmessage.md + - docs/models/assistantmessagerole.md + - docs/models/chatcompletionchoice.md + - docs/models/chatcompletionchoicefinishreason.md + - docs/models/chatcompletionrequest.md + - docs/models/chatcompletionrequestmessages.md + - docs/models/chatcompletionrequeststop.md + - docs/models/chatcompletionrequesttoolchoice.md + - docs/models/chatcompletionresponse.md + - docs/models/chatcompletionstreamrequest.md + - docs/models/chatcompletionstreamrequesttoolchoice.md + - docs/models/completionchunk.md + - docs/models/completionevent.md + - docs/models/completionresponsestreamchoice.md + - docs/models/content.md + - docs/models/contentchunk.md + - docs/models/deltamessage.md + - docs/models/fimcompletionrequest.md + - docs/models/fimcompletionrequeststop.md + - docs/models/fimcompletionresponse.md + - docs/models/fimcompletionstreamrequest.md + - docs/models/fimcompletionstreamrequeststop.md + - docs/models/finishreason.md + - docs/models/function.md + - docs/models/functioncall.md + - docs/models/functionname.md + - docs/models/httpvalidationerror.md + - docs/models/loc.md + - docs/models/messages.md + - docs/models/responseformat.md + - docs/models/responseformats.md + - docs/models/role.md + - docs/models/security.md + - docs/models/stop.md + - docs/models/systemmessage.md + - docs/models/textchunk.md + - docs/models/tool.md + - docs/models/toolcall.md + - docs/models/toolchoice.md + - docs/models/toolchoiceenum.md + - docs/models/toolmessage.md + - docs/models/toolmessagerole.md + - docs/models/tooltypes.md + - docs/models/type.md + - docs/models/usageinfo.md + - docs/models/usermessage.md + - docs/models/usermessagecontent.md + - docs/models/usermessagerole.md + - docs/models/utils/retryconfig.md + - docs/models/validationerror.md - poetry.toml - py.typed - pylintrc @@ -44,9 +94,48 @@ generatedFiles: - scripts/prepare-readme.py - scripts/publish.sh - src/mistralai_gcp/__init__.py + - src/mistralai_gcp/_hooks/__init__.py + - src/mistralai_gcp/_hooks/sdkhooks.py + - src/mistralai_gcp/_hooks/types.py - src/mistralai_gcp/basesdk.py + - src/mistralai_gcp/chat.py + - src/mistralai_gcp/fim.py - src/mistralai_gcp/httpclient.py + - src/mistralai_gcp/models/__init__.py + - src/mistralai_gcp/models/assistantmessage.py + - src/mistralai_gcp/models/chatcompletionchoice.py + - src/mistralai_gcp/models/chatcompletionrequest.py + - src/mistralai_gcp/models/chatcompletionresponse.py + - src/mistralai_gcp/models/chatcompletionstreamrequest.py + - src/mistralai_gcp/models/completionchunk.py + - src/mistralai_gcp/models/completionevent.py + - src/mistralai_gcp/models/completionresponsestreamchoice.py + - src/mistralai_gcp/models/contentchunk.py + - src/mistralai_gcp/models/deltamessage.py + - src/mistralai_gcp/models/fimcompletionrequest.py + - src/mistralai_gcp/models/fimcompletionresponse.py + - src/mistralai_gcp/models/fimcompletionstreamrequest.py + - src/mistralai_gcp/models/function.py + - src/mistralai_gcp/models/functioncall.py + - src/mistralai_gcp/models/functionname.py + - src/mistralai_gcp/models/httpvalidationerror.py + - src/mistralai_gcp/models/responseformat.py + - src/mistralai_gcp/models/responseformats.py + - src/mistralai_gcp/models/sdkerror.py + - src/mistralai_gcp/models/security.py + - src/mistralai_gcp/models/systemmessage.py + - src/mistralai_gcp/models/textchunk.py + - src/mistralai_gcp/models/tool.py + - src/mistralai_gcp/models/toolcall.py + - src/mistralai_gcp/models/toolchoice.py + - src/mistralai_gcp/models/toolchoiceenum.py + - src/mistralai_gcp/models/toolmessage.py + - src/mistralai_gcp/models/tooltypes.py + - src/mistralai_gcp/models/usageinfo.py + - src/mistralai_gcp/models/usermessage.py + - src/mistralai_gcp/models/validationerror.py - src/mistralai_gcp/py.typed + - src/mistralai_gcp/sdkconfiguration.py - src/mistralai_gcp/types/__init__.py - src/mistralai_gcp/types/basemodel.py - src/mistralai_gcp/utils/__init__.py @@ -64,83 +153,28 @@ generatedFiles: - src/mistralai_gcp/utils/serializers.py - src/mistralai_gcp/utils/url.py - src/mistralai_gcp/utils/values.py - - src/mistralai_gcp/models/sdkerror.py - - src/mistralai_gcp/models/completionevent.py - - src/mistralai_gcp/models/completionchunk.py - - src/mistralai_gcp/models/completionresponsestreamchoice.py - - src/mistralai_gcp/models/deltamessage.py - - src/mistralai_gcp/models/toolcall.py - - src/mistralai_gcp/models/functioncall.py - - src/mistralai_gcp/models/usageinfo.py - - src/mistralai_gcp/models/httpvalidationerror.py - - src/mistralai_gcp/models/validationerror.py - - src/mistralai_gcp/models/chatcompletionstreamrequest.py - - src/mistralai_gcp/models/tool.py - - src/mistralai_gcp/models/function.py - - src/mistralai_gcp/models/responseformat.py - - src/mistralai_gcp/models/systemmessage.py - - src/mistralai_gcp/models/contentchunk.py - - src/mistralai_gcp/models/usermessage.py - - src/mistralai_gcp/models/textchunk.py - - src/mistralai_gcp/models/assistantmessage.py - - src/mistralai_gcp/models/toolmessage.py - - src/mistralai_gcp/models/chatcompletionresponse.py - - src/mistralai_gcp/models/chatcompletionchoice.py - - src/mistralai_gcp/models/chatcompletionrequest.py - - src/mistralai_gcp/models/fimcompletionstreamrequest.py - - src/mistralai_gcp/models/fimcompletionresponse.py - - src/mistralai_gcp/models/fimcompletionrequest.py - - src/mistralai_gcp/models/security.py - - src/mistralai_gcp/models/__init__.py - - docs/models/completionevent.md - - docs/models/completionchunk.md - - docs/models/finishreason.md - - docs/models/completionresponsestreamchoice.md - - docs/models/deltamessage.md - - docs/models/tooltypes.md - - docs/models/toolcall.md - - docs/models/arguments.md - - docs/models/functioncall.md - - docs/models/usageinfo.md - - docs/models/httpvalidationerror.md - - docs/models/loc.md - - docs/models/validationerror.md - - docs/models/stop.md - - docs/models/messages.md - - docs/models/toolchoice.md - - docs/models/chatcompletionstreamrequest.md - - docs/models/tooltooltypes.md - - docs/models/tool.md - - docs/models/function.md - - docs/models/responseformats.md - - docs/models/responseformat.md - - docs/models/content.md - - docs/models/role.md - - docs/models/systemmessage.md - - docs/models/contentchunk.md - - docs/models/usermessagecontent.md - - docs/models/usermessagerole.md - - docs/models/usermessage.md - - docs/models/textchunk.md - - docs/models/assistantmessagerole.md - - docs/models/assistantmessage.md - - docs/models/toolmessagerole.md - - docs/models/toolmessage.md - - docs/models/chatcompletionresponse.md - - docs/models/chatcompletionchoicefinishreason.md - - docs/models/chatcompletionchoice.md - - docs/models/chatcompletionrequeststop.md - - docs/models/chatcompletionrequestmessages.md - - docs/models/chatcompletionrequesttoolchoice.md - - docs/models/chatcompletionrequest.md - - docs/models/fimcompletionstreamrequeststop.md - - docs/models/fimcompletionstreamrequest.md - - docs/models/fimcompletionresponse.md - - docs/models/fimcompletionrequeststop.md - - docs/models/fimcompletionrequest.md - - docs/models/security.md - - docs/models/utils/retryconfig.md - - .gitattributes - - src/mistralai_gcp/_hooks/sdkhooks.py - - src/mistralai_gcp/_hooks/types.py - - src/mistralai_gcp/_hooks/__init__.py +examples: + stream_chat: + speakeasy-default-stream-chat: + requestBody: + application/json: {"model": "mistral-small-latest", "messages": [{"content": []}]} + chat_completion_v1_chat_completions_post: + speakeasy-default-chat-completion-v1-chat-completions-post: + requestBody: + application/json: {"model": "mistral-small-latest", "messages": [{"content": ""}]} + responses: + "200": + application/json: {"id": "cmpl-e5cc70bb28c444948073e77776eb30ef", "object": "chat.completion", "model": "mistral-small-latest", "usage": {"prompt_tokens": 16, "completion_tokens": 34, "total_tokens": 50}, "created": 1702256327, "choices": []} + "422": {} + stream_fim: + speakeasy-default-stream-fim: + requestBody: + application/json: {"model": "codestral-2405", "prompt": "def", "suffix": "return a+b"} + fim_completion_v1_fim_completions_post: + speakeasy-default-fim-completion-v1-fim-completions-post: + requestBody: + application/json: {"model": "codestral-2405", "prompt": "def", "suffix": "return a+b"} + responses: + "200": + application/json: {"id": "cmpl-e5cc70bb28c444948073e77776eb30ef", "object": "chat.completion", "model": "codestral-latest", "usage": {"prompt_tokens": 16, "completion_tokens": 34, "total_tokens": 50}, "created": 1702256327, "choices": []} + "422": {} diff --git a/packages/mistralai_gcp/.speakeasy/gen.yaml b/packages/mistralai_gcp/.speakeasy/gen.yaml index 4f520700..43da5ef7 100644 --- a/packages/mistralai_gcp/.speakeasy/gen.yaml +++ b/packages/mistralai_gcp/.speakeasy/gen.yaml @@ -12,7 +12,7 @@ generation: auth: oAuth2ClientCredentialsEnabled: true python: - version: 1.0.1 + version: 1.2.2 additionalDependencies: dev: pytest: ^8.2.2 diff --git a/packages/mistralai_gcp/docs/models/chatcompletionrequesttoolchoice.md b/packages/mistralai_gcp/docs/models/chatcompletionrequesttoolchoice.md index ed32b75e..1646528d 100644 --- a/packages/mistralai_gcp/docs/models/chatcompletionrequesttoolchoice.md +++ b/packages/mistralai_gcp/docs/models/chatcompletionrequesttoolchoice.md @@ -1,10 +1,17 @@ # ChatCompletionRequestToolChoice -## Values +## Supported Types + +### `models.ToolChoice` + +```python +value: models.ToolChoice = /* values here */ +``` + +### `models.ToolChoiceEnum` + +```python +value: models.ToolChoiceEnum = /* values here */ +``` -| Name | Value | -| ------ | ------ | -| `AUTO` | auto | -| `NONE` | none | -| `ANY` | any | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/chatcompletionstreamrequest.md b/packages/mistralai_gcp/docs/models/chatcompletionstreamrequest.md index d7b7fe98..eb0d11ed 100644 --- a/packages/mistralai_gcp/docs/models/chatcompletionstreamrequest.md +++ b/packages/mistralai_gcp/docs/models/chatcompletionstreamrequest.md @@ -16,4 +16,4 @@ | `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | | `response_format` | [Optional[models.ResponseFormat]](../models/responseformat.md) | :heavy_minus_sign: | N/A | | | `tools` | List[[models.Tool](../models/tool.md)] | :heavy_minus_sign: | N/A | | -| `tool_choice` | [Optional[models.ToolChoice]](../models/toolchoice.md) | :heavy_minus_sign: | N/A | | \ No newline at end of file +| `tool_choice` | [Optional[models.ChatCompletionStreamRequestToolChoice]](../models/chatcompletionstreamrequesttoolchoice.md) | :heavy_minus_sign: | N/A | | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/chatcompletionstreamrequesttoolchoice.md b/packages/mistralai_gcp/docs/models/chatcompletionstreamrequesttoolchoice.md new file mode 100644 index 00000000..cce0ca3e --- /dev/null +++ b/packages/mistralai_gcp/docs/models/chatcompletionstreamrequesttoolchoice.md @@ -0,0 +1,17 @@ +# ChatCompletionStreamRequestToolChoice + + +## Supported Types + +### `models.ToolChoice` + +```python +value: models.ToolChoice = /* values here */ +``` + +### `models.ToolChoiceEnum` + +```python +value: models.ToolChoiceEnum = /* values here */ +``` + diff --git a/packages/mistralai_gcp/docs/models/content.md b/packages/mistralai_gcp/docs/models/content.md index a833dc2c..4cd3cfd5 100644 --- a/packages/mistralai_gcp/docs/models/content.md +++ b/packages/mistralai_gcp/docs/models/content.md @@ -9,9 +9,9 @@ value: str = /* values here */ ``` -### `List[models.ContentChunk]` +### `List[models.TextChunk]` ```python -value: List[models.ContentChunk] = /* values here */ +value: List[models.TextChunk] = /* values here */ ``` diff --git a/packages/mistralai_gcp/docs/models/contentchunk.md b/packages/mistralai_gcp/docs/models/contentchunk.md index 64fc80d6..4222920b 100644 --- a/packages/mistralai_gcp/docs/models/contentchunk.md +++ b/packages/mistralai_gcp/docs/models/contentchunk.md @@ -1,9 +1,11 @@ # ContentChunk -## Fields +## Supported Types + +### `models.TextChunk` + +```python +value: models.TextChunk = /* values here */ +``` -| Field | Type | Required | Description | -| ------------------ | ------------------ | ------------------ | ------------------ | -| `text` | *str* | :heavy_check_mark: | N/A | -| `type` | *Optional[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/functionname.md b/packages/mistralai_gcp/docs/models/functionname.md new file mode 100644 index 00000000..87d7b485 --- /dev/null +++ b/packages/mistralai_gcp/docs/models/functionname.md @@ -0,0 +1,10 @@ +# FunctionName + +this restriction of `Function` is used to select a specific function to call + + +## Fields + +| Field | Type | Required | Description | +| ------------------ | ------------------ | ------------------ | ------------------ | +| `name` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/responseformat.md b/packages/mistralai_gcp/docs/models/responseformat.md index 2704eab4..9c627f55 100644 --- a/packages/mistralai_gcp/docs/models/responseformat.md +++ b/packages/mistralai_gcp/docs/models/responseformat.md @@ -3,6 +3,6 @@ ## Fields -| Field | Type | Required | Description | -| ---------------------------------------------------------------- | ---------------------------------------------------------------- | ---------------------------------------------------------------- | ---------------------------------------------------------------- | -| `type` | [Optional[models.ResponseFormats]](../models/responseformats.md) | :heavy_minus_sign: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `type` | [Optional[models.ResponseFormats]](../models/responseformats.md) | :heavy_minus_sign: | An object specifying the format that the model must output. Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/textchunk.md b/packages/mistralai_gcp/docs/models/textchunk.md index 34e4dd6f..6daab3c3 100644 --- a/packages/mistralai_gcp/docs/models/textchunk.md +++ b/packages/mistralai_gcp/docs/models/textchunk.md @@ -3,7 +3,7 @@ ## Fields -| Field | Type | Required | Description | -| ------------------ | ------------------ | ------------------ | ------------------ | -| `text` | *str* | :heavy_check_mark: | N/A | -| `type` | *Optional[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| ------------------------------------------ | ------------------------------------------ | ------------------------------------------ | ------------------------------------------ | +| `text` | *str* | :heavy_check_mark: | N/A | +| `type` | [Optional[models.Type]](../models/type.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/tool.md b/packages/mistralai_gcp/docs/models/tool.md index ca624a90..822f86f8 100644 --- a/packages/mistralai_gcp/docs/models/tool.md +++ b/packages/mistralai_gcp/docs/models/tool.md @@ -3,7 +3,7 @@ ## Fields -| Field | Type | Required | Description | -| ------------------------------------------------------------ | ------------------------------------------------------------ | ------------------------------------------------------------ | ------------------------------------------------------------ | -| `function` | [models.Function](../models/function.md) | :heavy_check_mark: | N/A | -| `type` | [Optional[models.ToolToolTypes]](../models/tooltooltypes.md) | :heavy_minus_sign: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| ---------------------------------------------------- | ---------------------------------------------------- | ---------------------------------------------------- | ---------------------------------------------------- | +| `function` | [models.Function](../models/function.md) | :heavy_check_mark: | N/A | +| `type` | [Optional[models.ToolTypes]](../models/tooltypes.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/toolchoice.md b/packages/mistralai_gcp/docs/models/toolchoice.md index b84f51f6..792ebcd6 100644 --- a/packages/mistralai_gcp/docs/models/toolchoice.md +++ b/packages/mistralai_gcp/docs/models/toolchoice.md @@ -1,10 +1,11 @@ # ToolChoice +ToolChoice is either a ToolChoiceEnum or a ToolChoice -## Values -| Name | Value | -| ------ | ------ | -| `AUTO` | auto | -| `NONE` | none | -| `ANY` | any | \ No newline at end of file +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | +| `function` | [models.FunctionName](../models/functionname.md) | :heavy_check_mark: | this restriction of `Function` is used to select a specific function to call | +| `type` | [Optional[models.ToolTypes]](../models/tooltypes.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/toolchoiceenum.md b/packages/mistralai_gcp/docs/models/toolchoiceenum.md new file mode 100644 index 00000000..0be3d6c5 --- /dev/null +++ b/packages/mistralai_gcp/docs/models/toolchoiceenum.md @@ -0,0 +1,11 @@ +# ToolChoiceEnum + + +## Values + +| Name | Value | +| ---------- | ---------- | +| `AUTO` | auto | +| `NONE` | none | +| `ANY` | any | +| `REQUIRED` | required | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/tooltooltypes.md b/packages/mistralai_gcp/docs/models/tooltooltypes.md deleted file mode 100644 index e3964307..00000000 --- a/packages/mistralai_gcp/docs/models/tooltooltypes.md +++ /dev/null @@ -1,8 +0,0 @@ -# ToolToolTypes - - -## Values - -| Name | Value | -| ---------- | ---------- | -| `FUNCTION` | function | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/type.md b/packages/mistralai_gcp/docs/models/type.md new file mode 100644 index 00000000..eb0581e7 --- /dev/null +++ b/packages/mistralai_gcp/docs/models/type.md @@ -0,0 +1,8 @@ +# Type + + +## Values + +| Name | Value | +| ------ | ------ | +| `TEXT` | text | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/usermessagecontent.md b/packages/mistralai_gcp/docs/models/usermessagecontent.md index 86ebd18f..8350f9e8 100644 --- a/packages/mistralai_gcp/docs/models/usermessagecontent.md +++ b/packages/mistralai_gcp/docs/models/usermessagecontent.md @@ -9,9 +9,9 @@ value: str = /* values here */ ``` -### `List[models.TextChunk]` +### `List[models.ContentChunk]` ```python -value: List[models.TextChunk] = /* values here */ +value: List[models.ContentChunk] = /* values here */ ``` diff --git a/packages/mistralai_gcp/poetry.lock b/packages/mistralai_gcp/poetry.lock index 67c9cec7..5483cc8f 100644 --- a/packages/mistralai_gcp/poetry.lock +++ b/packages/mistralai_gcp/poetry.lock @@ -1,4 +1,4 @@ -# This file is automatically @generated by Poetry 1.8.1 and should not be changed by hand. +# This file is automatically @generated by Poetry 1.8.3 and should not be changed by hand. [[package]] name = "annotated-types" @@ -420,17 +420,6 @@ files = [ {file = "mypy_extensions-1.0.0.tar.gz", hash = "sha256:75dbf8955dc00442a438fc4d0666508a9a97b6bd41aa2f0ffe9d2f2725af0782"}, ] -[[package]] -name = "nodeenv" -version = "1.9.1" -description = "Node.js virtual environment builder" -optional = false -python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7" -files = [ - {file = "nodeenv-1.9.1-py2.py3-none-any.whl", hash = "sha256:ba11c9782d29c27c70ffbdda2d7415098754709be8a7056d79a737cd901155c9"}, - {file = "nodeenv-1.9.1.tar.gz", hash = "sha256:6ec12890a2dab7946721edbfbcd91f3319c6ccc9aec47be7c7e6b7011ee6645f"}, -] - [[package]] name = "packaging" version = "24.1" @@ -500,18 +489,18 @@ pyasn1 = ">=0.4.6,<0.7.0" [[package]] name = "pydantic" -version = "2.8.2" +version = "2.9.1" description = "Data validation using Python type hints" optional = false python-versions = ">=3.8" files = [ - {file = "pydantic-2.8.2-py3-none-any.whl", hash = "sha256:73ee9fddd406dc318b885c7a2eab8a6472b68b8fb5ba8150949fc3db939f23c8"}, - {file = "pydantic-2.8.2.tar.gz", hash = "sha256:6f62c13d067b0755ad1c21a34bdd06c0c12625a22b0fc09c6b149816604f7c2a"}, + {file = "pydantic-2.9.1-py3-none-any.whl", hash = "sha256:7aff4db5fdf3cf573d4b3c30926a510a10e19a0774d38fc4967f78beb6deb612"}, + {file = "pydantic-2.9.1.tar.gz", hash = "sha256:1363c7d975c7036df0db2b4a61f2e062fbc0aa5ab5f2772e0ffc7191a4f4bce2"}, ] [package.dependencies] -annotated-types = ">=0.4.0" -pydantic-core = "2.20.1" +annotated-types = ">=0.6.0" +pydantic-core = "2.23.3" typing-extensions = [ {version = ">=4.6.1", markers = "python_version < \"3.13\""}, {version = ">=4.12.2", markers = "python_version >= \"3.13\""}, @@ -519,103 +508,104 @@ typing-extensions = [ [package.extras] email = ["email-validator (>=2.0.0)"] +timezone = ["tzdata"] [[package]] name = "pydantic-core" -version = "2.20.1" +version = "2.23.3" description = "Core functionality for Pydantic validation and serialization" optional = false python-versions = ">=3.8" files = [ - {file = "pydantic_core-2.20.1-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:3acae97ffd19bf091c72df4d726d552c473f3576409b2a7ca36b2f535ffff4a3"}, - {file = "pydantic_core-2.20.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:41f4c96227a67a013e7de5ff8f20fb496ce573893b7f4f2707d065907bffdbd6"}, - {file = "pydantic_core-2.20.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5f239eb799a2081495ea659d8d4a43a8f42cd1fe9ff2e7e436295c38a10c286a"}, - {file = "pydantic_core-2.20.1-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:53e431da3fc53360db73eedf6f7124d1076e1b4ee4276b36fb25514544ceb4a3"}, - {file = "pydantic_core-2.20.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f1f62b2413c3a0e846c3b838b2ecd6c7a19ec6793b2a522745b0869e37ab5bc1"}, - {file = "pydantic_core-2.20.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5d41e6daee2813ecceea8eda38062d69e280b39df793f5a942fa515b8ed67953"}, - {file = "pydantic_core-2.20.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3d482efec8b7dc6bfaedc0f166b2ce349df0011f5d2f1f25537ced4cfc34fd98"}, - {file = "pydantic_core-2.20.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:e93e1a4b4b33daed65d781a57a522ff153dcf748dee70b40c7258c5861e1768a"}, - {file = "pydantic_core-2.20.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:e7c4ea22b6739b162c9ecaaa41d718dfad48a244909fe7ef4b54c0b530effc5a"}, - {file = "pydantic_core-2.20.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:4f2790949cf385d985a31984907fecb3896999329103df4e4983a4a41e13e840"}, - {file = "pydantic_core-2.20.1-cp310-none-win32.whl", hash = "sha256:5e999ba8dd90e93d57410c5e67ebb67ffcaadcea0ad973240fdfd3a135506250"}, - {file = "pydantic_core-2.20.1-cp310-none-win_amd64.whl", hash = "sha256:512ecfbefef6dac7bc5eaaf46177b2de58cdf7acac8793fe033b24ece0b9566c"}, - {file = "pydantic_core-2.20.1-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:d2a8fa9d6d6f891f3deec72f5cc668e6f66b188ab14bb1ab52422fe8e644f312"}, - {file = "pydantic_core-2.20.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:175873691124f3d0da55aeea1d90660a6ea7a3cfea137c38afa0a5ffabe37b88"}, - {file = "pydantic_core-2.20.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:37eee5b638f0e0dcd18d21f59b679686bbd18917b87db0193ae36f9c23c355fc"}, - {file = "pydantic_core-2.20.1-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:25e9185e2d06c16ee438ed39bf62935ec436474a6ac4f9358524220f1b236e43"}, - {file = "pydantic_core-2.20.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:150906b40ff188a3260cbee25380e7494ee85048584998c1e66df0c7a11c17a6"}, - {file = "pydantic_core-2.20.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8ad4aeb3e9a97286573c03df758fc7627aecdd02f1da04516a86dc159bf70121"}, - {file = "pydantic_core-2.20.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d3f3ed29cd9f978c604708511a1f9c2fdcb6c38b9aae36a51905b8811ee5cbf1"}, - {file = "pydantic_core-2.20.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:b0dae11d8f5ded51699c74d9548dcc5938e0804cc8298ec0aa0da95c21fff57b"}, - {file = "pydantic_core-2.20.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:faa6b09ee09433b87992fb5a2859efd1c264ddc37280d2dd5db502126d0e7f27"}, - {file = "pydantic_core-2.20.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:9dc1b507c12eb0481d071f3c1808f0529ad41dc415d0ca11f7ebfc666e66a18b"}, - {file = "pydantic_core-2.20.1-cp311-none-win32.whl", hash = "sha256:fa2fddcb7107e0d1808086ca306dcade7df60a13a6c347a7acf1ec139aa6789a"}, - {file = "pydantic_core-2.20.1-cp311-none-win_amd64.whl", hash = "sha256:40a783fb7ee353c50bd3853e626f15677ea527ae556429453685ae32280c19c2"}, - {file = "pydantic_core-2.20.1-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:595ba5be69b35777474fa07f80fc260ea71255656191adb22a8c53aba4479231"}, - {file = "pydantic_core-2.20.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:a4f55095ad087474999ee28d3398bae183a66be4823f753cd7d67dd0153427c9"}, - {file = "pydantic_core-2.20.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f9aa05d09ecf4c75157197f27cdc9cfaeb7c5f15021c6373932bf3e124af029f"}, - {file = "pydantic_core-2.20.1-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:e97fdf088d4b31ff4ba35db26d9cc472ac7ef4a2ff2badeabf8d727b3377fc52"}, - {file = "pydantic_core-2.20.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:bc633a9fe1eb87e250b5c57d389cf28998e4292336926b0b6cdaee353f89a237"}, - {file = "pydantic_core-2.20.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d573faf8eb7e6b1cbbcb4f5b247c60ca8be39fe2c674495df0eb4318303137fe"}, - {file = "pydantic_core-2.20.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:26dc97754b57d2fd00ac2b24dfa341abffc380b823211994c4efac7f13b9e90e"}, - {file = "pydantic_core-2.20.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:33499e85e739a4b60c9dac710c20a08dc73cb3240c9a0e22325e671b27b70d24"}, - {file = "pydantic_core-2.20.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:bebb4d6715c814597f85297c332297c6ce81e29436125ca59d1159b07f423eb1"}, - {file = "pydantic_core-2.20.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:516d9227919612425c8ef1c9b869bbbee249bc91912c8aaffb66116c0b447ebd"}, - {file = "pydantic_core-2.20.1-cp312-none-win32.whl", hash = "sha256:469f29f9093c9d834432034d33f5fe45699e664f12a13bf38c04967ce233d688"}, - {file = "pydantic_core-2.20.1-cp312-none-win_amd64.whl", hash = "sha256:035ede2e16da7281041f0e626459bcae33ed998cca6a0a007a5ebb73414ac72d"}, - {file = "pydantic_core-2.20.1-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:0827505a5c87e8aa285dc31e9ec7f4a17c81a813d45f70b1d9164e03a813a686"}, - {file = "pydantic_core-2.20.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:19c0fa39fa154e7e0b7f82f88ef85faa2a4c23cc65aae2f5aea625e3c13c735a"}, - {file = "pydantic_core-2.20.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4aa223cd1e36b642092c326d694d8bf59b71ddddc94cdb752bbbb1c5c91d833b"}, - {file = "pydantic_core-2.20.1-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:c336a6d235522a62fef872c6295a42ecb0c4e1d0f1a3e500fe949415761b8a19"}, - {file = "pydantic_core-2.20.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7eb6a0587eded33aeefea9f916899d42b1799b7b14b8f8ff2753c0ac1741edac"}, - {file = "pydantic_core-2.20.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:70c8daf4faca8da5a6d655f9af86faf6ec2e1768f4b8b9d0226c02f3d6209703"}, - {file = "pydantic_core-2.20.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e9fa4c9bf273ca41f940bceb86922a7667cd5bf90e95dbb157cbb8441008482c"}, - {file = "pydantic_core-2.20.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:11b71d67b4725e7e2a9f6e9c0ac1239bbc0c48cce3dc59f98635efc57d6dac83"}, - {file = "pydantic_core-2.20.1-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:270755f15174fb983890c49881e93f8f1b80f0b5e3a3cc1394a255706cabd203"}, - {file = "pydantic_core-2.20.1-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:c81131869240e3e568916ef4c307f8b99583efaa60a8112ef27a366eefba8ef0"}, - {file = "pydantic_core-2.20.1-cp313-none-win32.whl", hash = "sha256:b91ced227c41aa29c672814f50dbb05ec93536abf8f43cd14ec9521ea09afe4e"}, - {file = "pydantic_core-2.20.1-cp313-none-win_amd64.whl", hash = "sha256:65db0f2eefcaad1a3950f498aabb4875c8890438bc80b19362cf633b87a8ab20"}, - {file = "pydantic_core-2.20.1-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:4745f4ac52cc6686390c40eaa01d48b18997cb130833154801a442323cc78f91"}, - {file = "pydantic_core-2.20.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:a8ad4c766d3f33ba8fd692f9aa297c9058970530a32c728a2c4bfd2616d3358b"}, - {file = "pydantic_core-2.20.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:41e81317dd6a0127cabce83c0c9c3fbecceae981c8391e6f1dec88a77c8a569a"}, - {file = "pydantic_core-2.20.1-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:04024d270cf63f586ad41fff13fde4311c4fc13ea74676962c876d9577bcc78f"}, - {file = "pydantic_core-2.20.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:eaad4ff2de1c3823fddf82f41121bdf453d922e9a238642b1dedb33c4e4f98ad"}, - {file = "pydantic_core-2.20.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:26ab812fa0c845df815e506be30337e2df27e88399b985d0bb4e3ecfe72df31c"}, - {file = "pydantic_core-2.20.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3c5ebac750d9d5f2706654c638c041635c385596caf68f81342011ddfa1e5598"}, - {file = "pydantic_core-2.20.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2aafc5a503855ea5885559eae883978c9b6d8c8993d67766ee73d82e841300dd"}, - {file = "pydantic_core-2.20.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:4868f6bd7c9d98904b748a2653031fc9c2f85b6237009d475b1008bfaeb0a5aa"}, - {file = "pydantic_core-2.20.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:aa2f457b4af386254372dfa78a2eda2563680d982422641a85f271c859df1987"}, - {file = "pydantic_core-2.20.1-cp38-none-win32.whl", hash = "sha256:225b67a1f6d602de0ce7f6c1c3ae89a4aa25d3de9be857999e9124f15dab486a"}, - {file = "pydantic_core-2.20.1-cp38-none-win_amd64.whl", hash = "sha256:6b507132dcfc0dea440cce23ee2182c0ce7aba7054576efc65634f080dbe9434"}, - {file = "pydantic_core-2.20.1-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:b03f7941783b4c4a26051846dea594628b38f6940a2fdc0df00b221aed39314c"}, - {file = "pydantic_core-2.20.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:1eedfeb6089ed3fad42e81a67755846ad4dcc14d73698c120a82e4ccf0f1f9f6"}, - {file = "pydantic_core-2.20.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:635fee4e041ab9c479e31edda27fcf966ea9614fff1317e280d99eb3e5ab6fe2"}, - {file = "pydantic_core-2.20.1-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:77bf3ac639c1ff567ae3b47f8d4cc3dc20f9966a2a6dd2311dcc055d3d04fb8a"}, - {file = "pydantic_core-2.20.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7ed1b0132f24beeec5a78b67d9388656d03e6a7c837394f99257e2d55b461611"}, - {file = "pydantic_core-2.20.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c6514f963b023aeee506678a1cf821fe31159b925c4b76fe2afa94cc70b3222b"}, - {file = "pydantic_core-2.20.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:10d4204d8ca33146e761c79f83cc861df20e7ae9f6487ca290a97702daf56006"}, - {file = "pydantic_core-2.20.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2d036c7187b9422ae5b262badb87a20a49eb6c5238b2004e96d4da1231badef1"}, - {file = "pydantic_core-2.20.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:9ebfef07dbe1d93efb94b4700f2d278494e9162565a54f124c404a5656d7ff09"}, - {file = "pydantic_core-2.20.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:6b9d9bb600328a1ce523ab4f454859e9d439150abb0906c5a1983c146580ebab"}, - {file = "pydantic_core-2.20.1-cp39-none-win32.whl", hash = "sha256:784c1214cb6dd1e3b15dd8b91b9a53852aed16671cc3fbe4786f4f1db07089e2"}, - {file = "pydantic_core-2.20.1-cp39-none-win_amd64.whl", hash = "sha256:d2fe69c5434391727efa54b47a1e7986bb0186e72a41b203df8f5b0a19a4f669"}, - {file = "pydantic_core-2.20.1-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:a45f84b09ac9c3d35dfcf6a27fd0634d30d183205230a0ebe8373a0e8cfa0906"}, - {file = "pydantic_core-2.20.1-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:d02a72df14dfdbaf228424573a07af10637bd490f0901cee872c4f434a735b94"}, - {file = "pydantic_core-2.20.1-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d2b27e6af28f07e2f195552b37d7d66b150adbaa39a6d327766ffd695799780f"}, - {file = "pydantic_core-2.20.1-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:084659fac3c83fd674596612aeff6041a18402f1e1bc19ca39e417d554468482"}, - {file = "pydantic_core-2.20.1-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:242b8feb3c493ab78be289c034a1f659e8826e2233786e36f2893a950a719bb6"}, - {file = "pydantic_core-2.20.1-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:38cf1c40a921d05c5edc61a785c0ddb4bed67827069f535d794ce6bcded919fc"}, - {file = "pydantic_core-2.20.1-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:e0bbdd76ce9aa5d4209d65f2b27fc6e5ef1312ae6c5333c26db3f5ade53a1e99"}, - {file = "pydantic_core-2.20.1-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:254ec27fdb5b1ee60684f91683be95e5133c994cc54e86a0b0963afa25c8f8a6"}, - {file = "pydantic_core-2.20.1-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:407653af5617f0757261ae249d3fba09504d7a71ab36ac057c938572d1bc9331"}, - {file = "pydantic_core-2.20.1-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:c693e916709c2465b02ca0ad7b387c4f8423d1db7b4649c551f27a529181c5ad"}, - {file = "pydantic_core-2.20.1-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5b5ff4911aea936a47d9376fd3ab17e970cc543d1b68921886e7f64bd28308d1"}, - {file = "pydantic_core-2.20.1-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:177f55a886d74f1808763976ac4efd29b7ed15c69f4d838bbd74d9d09cf6fa86"}, - {file = "pydantic_core-2.20.1-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:964faa8a861d2664f0c7ab0c181af0bea66098b1919439815ca8803ef136fc4e"}, - {file = "pydantic_core-2.20.1-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:4dd484681c15e6b9a977c785a345d3e378d72678fd5f1f3c0509608da24f2ac0"}, - {file = "pydantic_core-2.20.1-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:f6d6cff3538391e8486a431569b77921adfcdef14eb18fbf19b7c0a5294d4e6a"}, - {file = "pydantic_core-2.20.1-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:a6d511cc297ff0883bc3708b465ff82d7560193169a8b93260f74ecb0a5e08a7"}, - {file = "pydantic_core-2.20.1.tar.gz", hash = "sha256:26ca695eeee5f9f1aeeb211ffc12f10bcb6f71e2989988fda61dabd65db878d4"}, + {file = "pydantic_core-2.23.3-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:7f10a5d1b9281392f1bf507d16ac720e78285dfd635b05737c3911637601bae6"}, + {file = "pydantic_core-2.23.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:3c09a7885dd33ee8c65266e5aa7fb7e2f23d49d8043f089989726391dd7350c5"}, + {file = "pydantic_core-2.23.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6470b5a1ec4d1c2e9afe928c6cb37eb33381cab99292a708b8cb9aa89e62429b"}, + {file = "pydantic_core-2.23.3-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:9172d2088e27d9a185ea0a6c8cebe227a9139fd90295221d7d495944d2367700"}, + {file = "pydantic_core-2.23.3-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:86fc6c762ca7ac8fbbdff80d61b2c59fb6b7d144aa46e2d54d9e1b7b0e780e01"}, + {file = "pydantic_core-2.23.3-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f0cb80fd5c2df4898693aa841425ea1727b1b6d2167448253077d2a49003e0ed"}, + {file = "pydantic_core-2.23.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:03667cec5daf43ac4995cefa8aaf58f99de036204a37b889c24a80927b629cec"}, + {file = "pydantic_core-2.23.3-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:047531242f8e9c2db733599f1c612925de095e93c9cc0e599e96cf536aaf56ba"}, + {file = "pydantic_core-2.23.3-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:5499798317fff7f25dbef9347f4451b91ac2a4330c6669821c8202fd354c7bee"}, + {file = "pydantic_core-2.23.3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:bbb5e45eab7624440516ee3722a3044b83fff4c0372efe183fd6ba678ff681fe"}, + {file = "pydantic_core-2.23.3-cp310-none-win32.whl", hash = "sha256:8b5b3ed73abb147704a6e9f556d8c5cb078f8c095be4588e669d315e0d11893b"}, + {file = "pydantic_core-2.23.3-cp310-none-win_amd64.whl", hash = "sha256:2b603cde285322758a0279995b5796d64b63060bfbe214b50a3ca23b5cee3e83"}, + {file = "pydantic_core-2.23.3-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:c889fd87e1f1bbeb877c2ee56b63bb297de4636661cc9bbfcf4b34e5e925bc27"}, + {file = "pydantic_core-2.23.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:ea85bda3189fb27503af4c45273735bcde3dd31c1ab17d11f37b04877859ef45"}, + {file = "pydantic_core-2.23.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a7f7f72f721223f33d3dc98a791666ebc6a91fa023ce63733709f4894a7dc611"}, + {file = "pydantic_core-2.23.3-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:2b2b55b0448e9da68f56b696f313949cda1039e8ec7b5d294285335b53104b61"}, + {file = "pydantic_core-2.23.3-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c24574c7e92e2c56379706b9a3f07c1e0c7f2f87a41b6ee86653100c4ce343e5"}, + {file = "pydantic_core-2.23.3-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f2b05e6ccbee333a8f4b8f4d7c244fdb7a979e90977ad9c51ea31261e2085ce0"}, + {file = "pydantic_core-2.23.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e2c409ce1c219c091e47cb03feb3c4ed8c2b8e004efc940da0166aaee8f9d6c8"}, + {file = "pydantic_core-2.23.3-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d965e8b325f443ed3196db890d85dfebbb09f7384486a77461347f4adb1fa7f8"}, + {file = "pydantic_core-2.23.3-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:f56af3a420fb1ffaf43ece3ea09c2d27c444e7c40dcb7c6e7cf57aae764f2b48"}, + {file = "pydantic_core-2.23.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:5b01a078dd4f9a52494370af21aa52964e0a96d4862ac64ff7cea06e0f12d2c5"}, + {file = "pydantic_core-2.23.3-cp311-none-win32.whl", hash = "sha256:560e32f0df04ac69b3dd818f71339983f6d1f70eb99d4d1f8e9705fb6c34a5c1"}, + {file = "pydantic_core-2.23.3-cp311-none-win_amd64.whl", hash = "sha256:c744fa100fdea0d000d8bcddee95213d2de2e95b9c12be083370b2072333a0fa"}, + {file = "pydantic_core-2.23.3-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:e0ec50663feedf64d21bad0809f5857bac1ce91deded203efc4a84b31b2e4305"}, + {file = "pydantic_core-2.23.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:db6e6afcb95edbe6b357786684b71008499836e91f2a4a1e55b840955b341dbb"}, + {file = "pydantic_core-2.23.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:98ccd69edcf49f0875d86942f4418a4e83eb3047f20eb897bffa62a5d419c8fa"}, + {file = "pydantic_core-2.23.3-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:a678c1ac5c5ec5685af0133262103defb427114e62eafeda12f1357a12140162"}, + {file = "pydantic_core-2.23.3-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:01491d8b4d8db9f3391d93b0df60701e644ff0894352947f31fff3e52bd5c801"}, + {file = "pydantic_core-2.23.3-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:fcf31facf2796a2d3b7fe338fe8640aa0166e4e55b4cb108dbfd1058049bf4cb"}, + {file = "pydantic_core-2.23.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7200fd561fb3be06827340da066df4311d0b6b8eb0c2116a110be5245dceb326"}, + {file = "pydantic_core-2.23.3-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:dc1636770a809dee2bd44dd74b89cc80eb41172bcad8af75dd0bc182c2666d4c"}, + {file = "pydantic_core-2.23.3-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:67a5def279309f2e23014b608c4150b0c2d323bd7bccd27ff07b001c12c2415c"}, + {file = "pydantic_core-2.23.3-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:748bdf985014c6dd3e1e4cc3db90f1c3ecc7246ff5a3cd4ddab20c768b2f1dab"}, + {file = "pydantic_core-2.23.3-cp312-none-win32.whl", hash = "sha256:255ec6dcb899c115f1e2a64bc9ebc24cc0e3ab097775755244f77360d1f3c06c"}, + {file = "pydantic_core-2.23.3-cp312-none-win_amd64.whl", hash = "sha256:40b8441be16c1e940abebed83cd006ddb9e3737a279e339dbd6d31578b802f7b"}, + {file = "pydantic_core-2.23.3-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:6daaf5b1ba1369a22c8b050b643250e3e5efc6a78366d323294aee54953a4d5f"}, + {file = "pydantic_core-2.23.3-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:d015e63b985a78a3d4ccffd3bdf22b7c20b3bbd4b8227809b3e8e75bc37f9cb2"}, + {file = "pydantic_core-2.23.3-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a3fc572d9b5b5cfe13f8e8a6e26271d5d13f80173724b738557a8c7f3a8a3791"}, + {file = "pydantic_core-2.23.3-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:f6bd91345b5163ee7448bee201ed7dd601ca24f43f439109b0212e296eb5b423"}, + {file = "pydantic_core-2.23.3-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:fc379c73fd66606628b866f661e8785088afe2adaba78e6bbe80796baf708a63"}, + {file = "pydantic_core-2.23.3-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:fbdce4b47592f9e296e19ac31667daed8753c8367ebb34b9a9bd89dacaa299c9"}, + {file = "pydantic_core-2.23.3-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fc3cf31edf405a161a0adad83246568647c54404739b614b1ff43dad2b02e6d5"}, + {file = "pydantic_core-2.23.3-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:8e22b477bf90db71c156f89a55bfe4d25177b81fce4aa09294d9e805eec13855"}, + {file = "pydantic_core-2.23.3-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:0a0137ddf462575d9bce863c4c95bac3493ba8e22f8c28ca94634b4a1d3e2bb4"}, + {file = "pydantic_core-2.23.3-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:203171e48946c3164fe7691fc349c79241ff8f28306abd4cad5f4f75ed80bc8d"}, + {file = "pydantic_core-2.23.3-cp313-none-win32.whl", hash = "sha256:76bdab0de4acb3f119c2a4bff740e0c7dc2e6de7692774620f7452ce11ca76c8"}, + {file = "pydantic_core-2.23.3-cp313-none-win_amd64.whl", hash = "sha256:37ba321ac2a46100c578a92e9a6aa33afe9ec99ffa084424291d84e456f490c1"}, + {file = "pydantic_core-2.23.3-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:d063c6b9fed7d992bcbebfc9133f4c24b7a7f215d6b102f3e082b1117cddb72c"}, + {file = "pydantic_core-2.23.3-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:6cb968da9a0746a0cf521b2b5ef25fc5a0bee9b9a1a8214e0a1cfaea5be7e8a4"}, + {file = "pydantic_core-2.23.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:edbefe079a520c5984e30e1f1f29325054b59534729c25b874a16a5048028d16"}, + {file = "pydantic_core-2.23.3-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:cbaaf2ef20d282659093913da9d402108203f7cb5955020bd8d1ae5a2325d1c4"}, + {file = "pydantic_core-2.23.3-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:fb539d7e5dc4aac345846f290cf504d2fd3c1be26ac4e8b5e4c2b688069ff4cf"}, + {file = "pydantic_core-2.23.3-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7e6f33503c5495059148cc486867e1d24ca35df5fc064686e631e314d959ad5b"}, + {file = "pydantic_core-2.23.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:04b07490bc2f6f2717b10c3969e1b830f5720b632f8ae2f3b8b1542394c47a8e"}, + {file = "pydantic_core-2.23.3-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:03795b9e8a5d7fda05f3873efc3f59105e2dcff14231680296b87b80bb327295"}, + {file = "pydantic_core-2.23.3-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:c483dab0f14b8d3f0df0c6c18d70b21b086f74c87ab03c59250dbf6d3c89baba"}, + {file = "pydantic_core-2.23.3-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:8b2682038e255e94baf2c473dca914a7460069171ff5cdd4080be18ab8a7fd6e"}, + {file = "pydantic_core-2.23.3-cp38-none-win32.whl", hash = "sha256:f4a57db8966b3a1d1a350012839c6a0099f0898c56512dfade8a1fe5fb278710"}, + {file = "pydantic_core-2.23.3-cp38-none-win_amd64.whl", hash = "sha256:13dd45ba2561603681a2676ca56006d6dee94493f03d5cadc055d2055615c3ea"}, + {file = "pydantic_core-2.23.3-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:82da2f4703894134a9f000e24965df73cc103e31e8c31906cc1ee89fde72cbd8"}, + {file = "pydantic_core-2.23.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:dd9be0a42de08f4b58a3cc73a123f124f65c24698b95a54c1543065baca8cf0e"}, + {file = "pydantic_core-2.23.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:89b731f25c80830c76fdb13705c68fef6a2b6dc494402987c7ea9584fe189f5d"}, + {file = "pydantic_core-2.23.3-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:c6de1ec30c4bb94f3a69c9f5f2182baeda5b809f806676675e9ef6b8dc936f28"}, + {file = "pydantic_core-2.23.3-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:bb68b41c3fa64587412b104294b9cbb027509dc2f6958446c502638d481525ef"}, + {file = "pydantic_core-2.23.3-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1c3980f2843de5184656aab58698011b42763ccba11c4a8c35936c8dd6c7068c"}, + {file = "pydantic_core-2.23.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:94f85614f2cba13f62c3c6481716e4adeae48e1eaa7e8bac379b9d177d93947a"}, + {file = "pydantic_core-2.23.3-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:510b7fb0a86dc8f10a8bb43bd2f97beb63cffad1203071dc434dac26453955cd"}, + {file = "pydantic_core-2.23.3-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:1eba2f7ce3e30ee2170410e2171867ea73dbd692433b81a93758ab2de6c64835"}, + {file = "pydantic_core-2.23.3-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:4b259fd8409ab84b4041b7b3f24dcc41e4696f180b775961ca8142b5b21d0e70"}, + {file = "pydantic_core-2.23.3-cp39-none-win32.whl", hash = "sha256:40d9bd259538dba2f40963286009bf7caf18b5112b19d2b55b09c14dde6db6a7"}, + {file = "pydantic_core-2.23.3-cp39-none-win_amd64.whl", hash = "sha256:5a8cd3074a98ee70173a8633ad3c10e00dcb991ecec57263aacb4095c5efb958"}, + {file = "pydantic_core-2.23.3-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:f399e8657c67313476a121a6944311fab377085ca7f490648c9af97fc732732d"}, + {file = "pydantic_core-2.23.3-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:6b5547d098c76e1694ba85f05b595720d7c60d342f24d5aad32c3049131fa5c4"}, + {file = "pydantic_core-2.23.3-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0dda0290a6f608504882d9f7650975b4651ff91c85673341789a476b1159f211"}, + {file = "pydantic_core-2.23.3-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:65b6e5da855e9c55a0c67f4db8a492bf13d8d3316a59999cfbaf98cc6e401961"}, + {file = "pydantic_core-2.23.3-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:09e926397f392059ce0afdcac920df29d9c833256354d0c55f1584b0b70cf07e"}, + {file = "pydantic_core-2.23.3-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:87cfa0ed6b8c5bd6ae8b66de941cece179281239d482f363814d2b986b79cedc"}, + {file = "pydantic_core-2.23.3-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:e61328920154b6a44d98cabcb709f10e8b74276bc709c9a513a8c37a18786cc4"}, + {file = "pydantic_core-2.23.3-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:ce3317d155628301d649fe5e16a99528d5680af4ec7aa70b90b8dacd2d725c9b"}, + {file = "pydantic_core-2.23.3-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:e89513f014c6be0d17b00a9a7c81b1c426f4eb9224b15433f3d98c1a071f8433"}, + {file = "pydantic_core-2.23.3-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:4f62c1c953d7ee375df5eb2e44ad50ce2f5aff931723b398b8bc6f0ac159791a"}, + {file = "pydantic_core-2.23.3-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2718443bc671c7ac331de4eef9b673063b10af32a0bb385019ad61dcf2cc8f6c"}, + {file = "pydantic_core-2.23.3-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a0d90e08b2727c5d01af1b5ef4121d2f0c99fbee692c762f4d9d0409c9da6541"}, + {file = "pydantic_core-2.23.3-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2b676583fc459c64146debea14ba3af54e540b61762dfc0613dc4e98c3f66eeb"}, + {file = "pydantic_core-2.23.3-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:50e4661f3337977740fdbfbae084ae5693e505ca2b3130a6d4eb0f2281dc43b8"}, + {file = "pydantic_core-2.23.3-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:68f4cf373f0de6abfe599a38307f4417c1c867ca381c03df27c873a9069cda25"}, + {file = "pydantic_core-2.23.3-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:59d52cf01854cb26c46958552a21acb10dd78a52aa34c86f284e66b209db8cab"}, + {file = "pydantic_core-2.23.3.tar.gz", hash = "sha256:3cb0f65d8b4121c1b015c60104a685feb929a29d7cf204387c7f2688c7974690"}, ] [package.dependencies] @@ -651,24 +641,6 @@ typing-extensions = {version = ">=3.10.0", markers = "python_version < \"3.10\"" spelling = ["pyenchant (>=3.2,<4.0)"] testutils = ["gitpython (>3)"] -[[package]] -name = "pyright" -version = "1.1.374" -description = "Command line wrapper for pyright" -optional = false -python-versions = ">=3.7" -files = [ - {file = "pyright-1.1.374-py3-none-any.whl", hash = "sha256:55752bcf7a3646d293cd76710a983b71e16f6128aab2d42468e6eb7e46c0a70d"}, - {file = "pyright-1.1.374.tar.gz", hash = "sha256:d01b2daf864ba5e0362e56b844984865970d7204158e61eb685e2dab7804cb82"}, -] - -[package.dependencies] -nodeenv = ">=1.6.0" - -[package.extras] -all = ["twine (>=3.4.1)"] -dev = ["twine (>=3.4.1)"] - [[package]] name = "pytest" version = "8.3.2" @@ -859,4 +831,4 @@ zstd = ["zstandard (>=0.18.0)"] [metadata] lock-version = "2.0" python-versions = "^3.8" -content-hash = "6a01b3944f3e2b62891369e56c6e0e00815d65e9a137f0558ee13fd17f674669" +content-hash = "fc4716156ed5774ad5090ce141d42d8081750f92e5d1e3ef3192b5f13ef8e815" diff --git a/packages/mistralai_gcp/pyproject.toml b/packages/mistralai_gcp/pyproject.toml index c169af4f..34ea7e55 100644 --- a/packages/mistralai_gcp/pyproject.toml +++ b/packages/mistralai_gcp/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "mistralai-gcp" -version = "1.0.1" +version = "1.2.2" description = "Python Client SDK for the Mistral AI API in GCP." authors = ["Mistral",] readme = "README-PYPI.md" @@ -18,10 +18,10 @@ in-project = true [tool.poetry.dependencies] python = "^3.8" eval-type-backport = "^0.2.0" -google-auth = "^2.31.0" +google-auth = "2.27.0" httpx = "^0.27.0" jsonpath-python = "^1.0.6" -pydantic = "~2.8.2" +pydantic = "~2.9.0" python-dateutil = "2.8.2" requests = "^2.32.3" typing-inspect = "^0.9.0" @@ -29,7 +29,6 @@ typing-inspect = "^0.9.0" [tool.poetry.group.dev.dependencies] mypy = "==1.10.1" pylint = "==3.2.3" -pyright = "==1.1.374" pytest = "^8.2.2" pytest-asyncio = "^0.23.7" types-python-dateutil = "^2.9.0.20240316" diff --git a/packages/mistralai_gcp/scripts/prepare-readme.py b/packages/mistralai_gcp/scripts/prepare-readme.py index a8ef8ea1..825d9ded 100644 --- a/packages/mistralai_gcp/scripts/prepare-readme.py +++ b/packages/mistralai_gcp/scripts/prepare-readme.py @@ -3,7 +3,7 @@ import shutil try: - shutil.copyfile('README.md', 'README-PYPI.md') + shutil.copyfile("README.md", "README-PYPI.md") except Exception as e: print("Failed to copy README.md to README-PYPI.md") print(e) diff --git a/packages/mistralai_gcp/src/mistralai_gcp/_hooks/sdkhooks.py b/packages/mistralai_gcp/src/mistralai_gcp/_hooks/sdkhooks.py index ca3b7b36..b81c2a27 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/_hooks/sdkhooks.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/_hooks/sdkhooks.py @@ -1,11 +1,21 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" import httpx -from .types import SDKInitHook, BeforeRequestContext, BeforeRequestHook, AfterSuccessContext, AfterSuccessHook, AfterErrorContext, AfterErrorHook, Hooks +from .types import ( + SDKInitHook, + BeforeRequestContext, + BeforeRequestHook, + AfterSuccessContext, + AfterSuccessHook, + AfterErrorContext, + AfterErrorHook, + Hooks, +) from .registration import init_hooks from typing import List, Optional, Tuple from mistralai_gcp.httpclient import HttpClient + class SDKHooks(Hooks): def __init__(self) -> None: self.sdk_init_hooks: List[SDKInitHook] = [] @@ -31,7 +41,9 @@ def sdk_init(self, base_url: str, client: HttpClient) -> Tuple[str, HttpClient]: base_url, client = hook.sdk_init(base_url, client) return base_url, client - def before_request(self, hook_ctx: BeforeRequestContext, request: httpx.Request) -> httpx.Request: + def before_request( + self, hook_ctx: BeforeRequestContext, request: httpx.Request + ) -> httpx.Request: for hook in self.before_request_hooks: out = hook.before_request(hook_ctx, request) if isinstance(out, Exception): @@ -40,7 +52,9 @@ def before_request(self, hook_ctx: BeforeRequestContext, request: httpx.Request) return request - def after_success(self, hook_ctx: AfterSuccessContext, response: httpx.Response) -> httpx.Response: + def after_success( + self, hook_ctx: AfterSuccessContext, response: httpx.Response + ) -> httpx.Response: for hook in self.after_success_hooks: out = hook.after_success(hook_ctx, response) if isinstance(out, Exception): @@ -48,7 +62,12 @@ def after_success(self, hook_ctx: AfterSuccessContext, response: httpx.Response) response = out return response - def after_error(self, hook_ctx: AfterErrorContext, response: Optional[httpx.Response], error: Optional[Exception]) -> Tuple[Optional[httpx.Response], Optional[Exception]]: + def after_error( + self, + hook_ctx: AfterErrorContext, + response: Optional[httpx.Response], + error: Optional[Exception], + ) -> Tuple[Optional[httpx.Response], Optional[Exception]]: for hook in self.after_error_hooks: result = hook.after_error(hook_ctx, response, error) if isinstance(result, Exception): diff --git a/packages/mistralai_gcp/src/mistralai_gcp/_hooks/types.py b/packages/mistralai_gcp/src/mistralai_gcp/_hooks/types.py index f4ee7f37..417126fd 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/_hooks/types.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/_hooks/types.py @@ -1,6 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - from abc import ABC, abstractmethod import httpx from mistralai_gcp.httpclient import HttpClient @@ -12,7 +11,12 @@ class HookContext: oauth2_scopes: Optional[List[str]] = None security_source: Optional[Union[Any, Callable[[], Any]]] = None - def __init__(self, operation_id: str, oauth2_scopes: Optional[List[str]], security_source: Optional[Union[Any, Callable[[], Any]]]): + def __init__( + self, + operation_id: str, + oauth2_scopes: Optional[List[str]], + security_source: Optional[Union[Any, Callable[[], Any]]], + ): self.operation_id = operation_id self.oauth2_scopes = oauth2_scopes self.security_source = security_source @@ -20,18 +24,23 @@ def __init__(self, operation_id: str, oauth2_scopes: Optional[List[str]], securi class BeforeRequestContext(HookContext): def __init__(self, hook_ctx: HookContext): - super().__init__(hook_ctx.operation_id, hook_ctx.oauth2_scopes, hook_ctx.security_source) + super().__init__( + hook_ctx.operation_id, hook_ctx.oauth2_scopes, hook_ctx.security_source + ) class AfterSuccessContext(HookContext): def __init__(self, hook_ctx: HookContext): - super().__init__(hook_ctx.operation_id, hook_ctx.oauth2_scopes, hook_ctx.security_source) - + super().__init__( + hook_ctx.operation_id, hook_ctx.oauth2_scopes, hook_ctx.security_source + ) class AfterErrorContext(HookContext): def __init__(self, hook_ctx: HookContext): - super().__init__(hook_ctx.operation_id, hook_ctx.oauth2_scopes, hook_ctx.security_source) + super().__init__( + hook_ctx.operation_id, hook_ctx.oauth2_scopes, hook_ctx.security_source + ) class SDKInitHook(ABC): @@ -42,19 +51,28 @@ def sdk_init(self, base_url: str, client: HttpClient) -> Tuple[str, HttpClient]: class BeforeRequestHook(ABC): @abstractmethod - def before_request(self, hook_ctx: BeforeRequestContext, request: httpx.Request) -> Union[httpx.Request, Exception]: + def before_request( + self, hook_ctx: BeforeRequestContext, request: httpx.Request + ) -> Union[httpx.Request, Exception]: pass class AfterSuccessHook(ABC): @abstractmethod - def after_success(self, hook_ctx: AfterSuccessContext, response: httpx.Response) -> Union[httpx.Response, Exception]: + def after_success( + self, hook_ctx: AfterSuccessContext, response: httpx.Response + ) -> Union[httpx.Response, Exception]: pass class AfterErrorHook(ABC): @abstractmethod - def after_error(self, hook_ctx: AfterErrorContext, response: Optional[httpx.Response], error: Optional[Exception]) -> Union[Tuple[Optional[httpx.Response], Optional[Exception]], Exception]: + def after_error( + self, + hook_ctx: AfterErrorContext, + response: Optional[httpx.Response], + error: Optional[Exception], + ) -> Union[Tuple[Optional[httpx.Response], Optional[Exception]], Exception]: pass diff --git a/packages/mistralai_gcp/src/mistralai_gcp/basesdk.py b/packages/mistralai_gcp/src/mistralai_gcp/basesdk.py index fd4854f7..c647eba2 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/basesdk.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/basesdk.py @@ -3,10 +3,15 @@ from .sdkconfiguration import SDKConfiguration import httpx from mistralai_gcp import models, utils -from mistralai_gcp._hooks import AfterErrorContext, AfterSuccessContext, BeforeRequestContext +from mistralai_gcp._hooks import ( + AfterErrorContext, + AfterSuccessContext, + BeforeRequestContext, +) from mistralai_gcp.utils import RetryConfig, SerializedRequestBody, get_body_content from typing import Callable, List, Optional, Tuple + class BaseSDK: sdk_configuration: SDKConfiguration @@ -24,6 +29,46 @@ def get_url(self, base_url, url_variables): return utils.template_url(base_url, url_variables) + def build_request_async( + self, + method, + path, + base_url, + url_variables, + request, + request_body_required, + request_has_path_params, + request_has_query_params, + user_agent_header, + accept_header_value, + _globals=None, + security=None, + timeout_ms: Optional[int] = None, + get_serialized_body: Optional[ + Callable[[], Optional[SerializedRequestBody]] + ] = None, + url_override: Optional[str] = None, + ) -> httpx.Request: + client = self.sdk_configuration.async_client + return self.build_request_with_client( + client, + method, + path, + base_url, + url_variables, + request, + request_body_required, + request_has_path_params, + request_has_query_params, + user_agent_header, + accept_header_value, + _globals, + security, + timeout_ms, + get_serialized_body, + url_override, + ) + def build_request( self, method, @@ -45,7 +90,46 @@ def build_request( url_override: Optional[str] = None, ) -> httpx.Request: client = self.sdk_configuration.client + return self.build_request_with_client( + client, + method, + path, + base_url, + url_variables, + request, + request_body_required, + request_has_path_params, + request_has_query_params, + user_agent_header, + accept_header_value, + _globals, + security, + timeout_ms, + get_serialized_body, + url_override, + ) + def build_request_with_client( + self, + client, + method, + path, + base_url, + url_variables, + request, + request_body_required, + request_has_path_params, + request_has_query_params, + user_agent_header, + accept_header_value, + _globals=None, + security=None, + timeout_ms: Optional[int] = None, + get_serialized_body: Optional[ + Callable[[], Optional[SerializedRequestBody]] + ] = None, + url_override: Optional[str] = None, + ) -> httpx.Request: query_params = {} url = url_override @@ -69,7 +153,7 @@ def build_request( if security is not None: if callable(security): security = security() - + if security is not None: security_headers, security_query_params = utils.get_security(security) headers = {**headers, **security_headers} @@ -129,7 +213,7 @@ def do(): req.method, req.url, req.headers, - get_body_content(req) + get_body_content(req), ) http_res = client.send(req, stream=stream) except Exception as e: @@ -149,7 +233,7 @@ def do(): http_res.status_code, http_res.url, http_res.headers, - "" if stream else http_res.text + "" if stream else http_res.text, ) if utils.match_status_codes(error_status_codes, http_res.status_code): @@ -189,6 +273,7 @@ async def do_request_async( ) -> httpx.Response: client = self.sdk_configuration.async_client logger = self.sdk_configuration.debug_logger + async def do(): http_res = None try: @@ -200,7 +285,7 @@ async def do(): req.method, req.url, req.headers, - get_body_content(req) + get_body_content(req), ) http_res = await client.send(req, stream=stream) except Exception as e: @@ -220,7 +305,7 @@ async def do(): http_res.status_code, http_res.url, http_res.headers, - "" if stream else http_res.text + "" if stream else http_res.text, ) if utils.match_status_codes(error_status_codes, http_res.status_code): diff --git a/packages/mistralai_gcp/src/mistralai_gcp/chat.py b/packages/mistralai_gcp/src/mistralai_gcp/chat.py index 7a1e7f7e..044dd192 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/chat.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/chat.py @@ -7,12 +7,13 @@ from mistralai_gcp.utils import eventstreaming from typing import Any, AsyncGenerator, Generator, List, Optional, Union + class Chat(BaseSDK): r"""Chat Completion API.""" - - + def stream( - self, *, + self, + *, model: Nullable[str], messages: Union[List[models.Messages], List[models.MessagesTypedDict]], temperature: Optional[float] = 0.7, @@ -22,9 +23,18 @@ def stream( stream: Optional[bool] = True, stop: Optional[Union[models.Stop, models.StopTypedDict]] = None, random_seed: OptionalNullable[int] = UNSET, - response_format: Optional[Union[models.ResponseFormat, models.ResponseFormatTypedDict]] = None, - tools: OptionalNullable[Union[List[models.Tool], List[models.ToolTypedDict]]] = UNSET, - tool_choice: Optional[models.ToolChoice] = "auto", + response_format: Optional[ + Union[models.ResponseFormat, models.ResponseFormatTypedDict] + ] = None, + tools: OptionalNullable[ + Union[List[models.Tool], List[models.ToolTypedDict]] + ] = UNSET, + tool_choice: Optional[ + Union[ + models.ChatCompletionStreamRequestToolChoice, + models.ChatCompletionStreamRequestToolChoiceTypedDict, + ] + ] = None, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -39,12 +49,12 @@ def stream( :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. :param min_tokens: The minimum number of tokens to generate in the completion. - :param stream: + :param stream: :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. - :param response_format: - :param tools: - :param tool_choice: + :param response_format: + :param tools: + :param tool_choice: :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -53,10 +63,10 @@ def stream( url_variables = None if timeout_ms is None: timeout_ms = self.sdk_configuration.timeout_ms - + if server_url is not None: base_url = server_url - + request = models.ChatCompletionStreamRequest( model=model, temperature=temperature, @@ -67,11 +77,15 @@ def stream( stop=stop, random_seed=random_seed, messages=utils.get_pydantic_model(messages, List[models.Messages]), - response_format=utils.get_pydantic_model(response_format, Optional[models.ResponseFormat]), + response_format=utils.get_pydantic_model( + response_format, Optional[models.ResponseFormat] + ), tools=utils.get_pydantic_model(tools, OptionalNullable[List[models.Tool]]), - tool_choice=tool_choice, + tool_choice=utils.get_pydantic_model( + tool_choice, Optional[models.ChatCompletionStreamRequestToolChoice] + ), ) - + req = self.build_request( method="POST", path="/streamRawPredict", @@ -84,48 +98,58 @@ def stream( user_agent_header="user-agent", accept_header_value="text/event-stream", security=self.sdk_configuration.security, - get_serialized_body=lambda: utils.serialize_request_body(request, False, False, "json", models.ChatCompletionStreamRequest), + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.ChatCompletionStreamRequest + ), timeout_ms=timeout_ms, ) - + if retries == UNSET: if self.sdk_configuration.retry_config is not UNSET: retries = self.sdk_configuration.retry_config retry_config = None if isinstance(retries, utils.RetryConfig): - retry_config = (retries, [ - "429", - "500", - "502", - "503", - "504" - ]) - + retry_config = (retries, ["429", "500", "502", "503", "504"]) + http_res = self.do_request( - hook_ctx=HookContext(operation_id="stream_chat", oauth2_scopes=[], security_source=self.sdk_configuration.security), + hook_ctx=HookContext( + operation_id="stream_chat", + oauth2_scopes=[], + security_source=self.sdk_configuration.security, + ), request=req, - error_status_codes=["422","4XX","5XX"], + error_status_codes=["422", "4XX", "5XX"], stream=True, - retry_config=retry_config + retry_config=retry_config, ) - + data: Any = None if utils.match_response(http_res, "200", "text/event-stream"): - return eventstreaming.stream_events(http_res, lambda raw: utils.unmarshal_json(raw, models.CompletionEvent), sentinel="[DONE]") + return eventstreaming.stream_events( + http_res, + lambda raw: utils.unmarshal_json(raw, models.CompletionEvent), + sentinel="[DONE]", + ) if utils.match_response(http_res, "422", "application/json"): data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) raise models.HTTPValidationError(data=data) - if utils.match_response(http_res, ["4XX","5XX"], "*"): - raise models.SDKError("API error occurred", http_res.status_code, http_res.text, http_res) - + if utils.match_response(http_res, ["4XX", "5XX"], "*"): + raise models.SDKError( + "API error occurred", http_res.status_code, http_res.text, http_res + ) + content_type = http_res.headers.get("Content-Type") - raise models.SDKError(f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, http_res.text, http_res) + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res.text, + http_res, + ) - - async def stream_async( - self, *, + self, + *, model: Nullable[str], messages: Union[List[models.Messages], List[models.MessagesTypedDict]], temperature: Optional[float] = 0.7, @@ -135,9 +159,18 @@ async def stream_async( stream: Optional[bool] = True, stop: Optional[Union[models.Stop, models.StopTypedDict]] = None, random_seed: OptionalNullable[int] = UNSET, - response_format: Optional[Union[models.ResponseFormat, models.ResponseFormatTypedDict]] = None, - tools: OptionalNullable[Union[List[models.Tool], List[models.ToolTypedDict]]] = UNSET, - tool_choice: Optional[models.ToolChoice] = "auto", + response_format: Optional[ + Union[models.ResponseFormat, models.ResponseFormatTypedDict] + ] = None, + tools: OptionalNullable[ + Union[List[models.Tool], List[models.ToolTypedDict]] + ] = UNSET, + tool_choice: Optional[ + Union[ + models.ChatCompletionStreamRequestToolChoice, + models.ChatCompletionStreamRequestToolChoiceTypedDict, + ] + ] = None, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -152,12 +185,12 @@ async def stream_async( :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. :param min_tokens: The minimum number of tokens to generate in the completion. - :param stream: + :param stream: :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. - :param response_format: - :param tools: - :param tool_choice: + :param response_format: + :param tools: + :param tool_choice: :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -166,10 +199,10 @@ async def stream_async( url_variables = None if timeout_ms is None: timeout_ms = self.sdk_configuration.timeout_ms - + if server_url is not None: base_url = server_url - + request = models.ChatCompletionStreamRequest( model=model, temperature=temperature, @@ -180,12 +213,16 @@ async def stream_async( stop=stop, random_seed=random_seed, messages=utils.get_pydantic_model(messages, List[models.Messages]), - response_format=utils.get_pydantic_model(response_format, Optional[models.ResponseFormat]), + response_format=utils.get_pydantic_model( + response_format, Optional[models.ResponseFormat] + ), tools=utils.get_pydantic_model(tools, OptionalNullable[List[models.Tool]]), - tool_choice=tool_choice, + tool_choice=utils.get_pydantic_model( + tool_choice, Optional[models.ChatCompletionStreamRequestToolChoice] + ), ) - - req = self.build_request( + + req = self.build_request_async( method="POST", path="/streamRawPredict", base_url=base_url, @@ -197,60 +234,87 @@ async def stream_async( user_agent_header="user-agent", accept_header_value="text/event-stream", security=self.sdk_configuration.security, - get_serialized_body=lambda: utils.serialize_request_body(request, False, False, "json", models.ChatCompletionStreamRequest), + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.ChatCompletionStreamRequest + ), timeout_ms=timeout_ms, ) - + if retries == UNSET: if self.sdk_configuration.retry_config is not UNSET: retries = self.sdk_configuration.retry_config retry_config = None if isinstance(retries, utils.RetryConfig): - retry_config = (retries, [ - "429", - "500", - "502", - "503", - "504" - ]) - + retry_config = (retries, ["429", "500", "502", "503", "504"]) + http_res = await self.do_request_async( - hook_ctx=HookContext(operation_id="stream_chat", oauth2_scopes=[], security_source=self.sdk_configuration.security), + hook_ctx=HookContext( + operation_id="stream_chat", + oauth2_scopes=[], + security_source=self.sdk_configuration.security, + ), request=req, - error_status_codes=["422","4XX","5XX"], + error_status_codes=["422", "4XX", "5XX"], stream=True, - retry_config=retry_config + retry_config=retry_config, ) - + data: Any = None if utils.match_response(http_res, "200", "text/event-stream"): - return eventstreaming.stream_events_async(http_res, lambda raw: utils.unmarshal_json(raw, models.CompletionEvent), sentinel="[DONE]") + return eventstreaming.stream_events_async( + http_res, + lambda raw: utils.unmarshal_json(raw, models.CompletionEvent), + sentinel="[DONE]", + ) if utils.match_response(http_res, "422", "application/json"): data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) raise models.HTTPValidationError(data=data) - if utils.match_response(http_res, ["4XX","5XX"], "*"): - raise models.SDKError("API error occurred", http_res.status_code, http_res.text, http_res) - + if utils.match_response(http_res, ["4XX", "5XX"], "*"): + raise models.SDKError( + "API error occurred", http_res.status_code, http_res.text, http_res + ) + content_type = http_res.headers.get("Content-Type") - raise models.SDKError(f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, http_res.text, http_res) + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res.text, + http_res, + ) - - def complete( - self, *, + self, + *, model: Nullable[str], - messages: Union[List[models.ChatCompletionRequestMessages], List[models.ChatCompletionRequestMessagesTypedDict]], + messages: Union[ + List[models.ChatCompletionRequestMessages], + List[models.ChatCompletionRequestMessagesTypedDict], + ], temperature: Optional[float] = 0.7, top_p: Optional[float] = 1, max_tokens: OptionalNullable[int] = UNSET, min_tokens: OptionalNullable[int] = UNSET, stream: Optional[bool] = False, - stop: Optional[Union[models.ChatCompletionRequestStop, models.ChatCompletionRequestStopTypedDict]] = None, + stop: Optional[ + Union[ + models.ChatCompletionRequestStop, + models.ChatCompletionRequestStopTypedDict, + ] + ] = None, random_seed: OptionalNullable[int] = UNSET, - response_format: Optional[Union[models.ResponseFormat, models.ResponseFormatTypedDict]] = None, - tools: OptionalNullable[Union[List[models.Tool], List[models.ToolTypedDict]]] = UNSET, - tool_choice: Optional[models.ChatCompletionRequestToolChoice] = "auto", + response_format: Optional[ + Union[models.ResponseFormat, models.ResponseFormatTypedDict] + ] = None, + tools: OptionalNullable[ + Union[List[models.Tool], List[models.ToolTypedDict]] + ] = UNSET, + tool_choice: Optional[ + Union[ + models.ChatCompletionRequestToolChoice, + models.ChatCompletionRequestToolChoiceTypedDict, + ] + ] = None, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -266,9 +330,9 @@ def complete( :param stream: Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. - :param response_format: - :param tools: - :param tool_choice: + :param response_format: + :param tools: + :param tool_choice: :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -277,10 +341,10 @@ def complete( url_variables = None if timeout_ms is None: timeout_ms = self.sdk_configuration.timeout_ms - + if server_url is not None: base_url = server_url - + request = models.ChatCompletionRequest( model=model, temperature=temperature, @@ -290,12 +354,18 @@ def complete( stream=stream, stop=stop, random_seed=random_seed, - messages=utils.get_pydantic_model(messages, List[models.ChatCompletionRequestMessages]), - response_format=utils.get_pydantic_model(response_format, Optional[models.ResponseFormat]), + messages=utils.get_pydantic_model( + messages, List[models.ChatCompletionRequestMessages] + ), + response_format=utils.get_pydantic_model( + response_format, Optional[models.ResponseFormat] + ), tools=utils.get_pydantic_model(tools, OptionalNullable[List[models.Tool]]), - tool_choice=tool_choice, + tool_choice=utils.get_pydantic_model( + tool_choice, Optional[models.ChatCompletionRequestToolChoice] + ), ) - + req = self.build_request( method="POST", path="/rawPredict", @@ -308,59 +378,84 @@ def complete( user_agent_header="user-agent", accept_header_value="application/json", security=self.sdk_configuration.security, - get_serialized_body=lambda: utils.serialize_request_body(request, False, False, "json", models.ChatCompletionRequest), + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.ChatCompletionRequest + ), timeout_ms=timeout_ms, ) - + if retries == UNSET: if self.sdk_configuration.retry_config is not UNSET: retries = self.sdk_configuration.retry_config retry_config = None if isinstance(retries, utils.RetryConfig): - retry_config = (retries, [ - "429", - "500", - "502", - "503", - "504" - ]) - + retry_config = (retries, ["429", "500", "502", "503", "504"]) + http_res = self.do_request( - hook_ctx=HookContext(operation_id="chat_completion_v1_chat_completions_post", oauth2_scopes=[], security_source=self.sdk_configuration.security), + hook_ctx=HookContext( + operation_id="chat_completion_v1_chat_completions_post", + oauth2_scopes=[], + security_source=self.sdk_configuration.security, + ), request=req, - error_status_codes=["422","4XX","5XX"], - retry_config=retry_config + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, ) - + data: Any = None if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, Optional[models.ChatCompletionResponse]) + return utils.unmarshal_json( + http_res.text, Optional[models.ChatCompletionResponse] + ) if utils.match_response(http_res, "422", "application/json"): data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) raise models.HTTPValidationError(data=data) - if utils.match_response(http_res, ["4XX","5XX"], "*"): - raise models.SDKError("API error occurred", http_res.status_code, http_res.text, http_res) - + if utils.match_response(http_res, ["4XX", "5XX"], "*"): + raise models.SDKError( + "API error occurred", http_res.status_code, http_res.text, http_res + ) + content_type = http_res.headers.get("Content-Type") - raise models.SDKError(f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, http_res.text, http_res) + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res.text, + http_res, + ) - - async def complete_async( - self, *, + self, + *, model: Nullable[str], - messages: Union[List[models.ChatCompletionRequestMessages], List[models.ChatCompletionRequestMessagesTypedDict]], + messages: Union[ + List[models.ChatCompletionRequestMessages], + List[models.ChatCompletionRequestMessagesTypedDict], + ], temperature: Optional[float] = 0.7, top_p: Optional[float] = 1, max_tokens: OptionalNullable[int] = UNSET, min_tokens: OptionalNullable[int] = UNSET, stream: Optional[bool] = False, - stop: Optional[Union[models.ChatCompletionRequestStop, models.ChatCompletionRequestStopTypedDict]] = None, + stop: Optional[ + Union[ + models.ChatCompletionRequestStop, + models.ChatCompletionRequestStopTypedDict, + ] + ] = None, random_seed: OptionalNullable[int] = UNSET, - response_format: Optional[Union[models.ResponseFormat, models.ResponseFormatTypedDict]] = None, - tools: OptionalNullable[Union[List[models.Tool], List[models.ToolTypedDict]]] = UNSET, - tool_choice: Optional[models.ChatCompletionRequestToolChoice] = "auto", + response_format: Optional[ + Union[models.ResponseFormat, models.ResponseFormatTypedDict] + ] = None, + tools: OptionalNullable[ + Union[List[models.Tool], List[models.ToolTypedDict]] + ] = UNSET, + tool_choice: Optional[ + Union[ + models.ChatCompletionRequestToolChoice, + models.ChatCompletionRequestToolChoiceTypedDict, + ] + ] = None, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -376,9 +471,9 @@ async def complete_async( :param stream: Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. - :param response_format: - :param tools: - :param tool_choice: + :param response_format: + :param tools: + :param tool_choice: :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -387,10 +482,10 @@ async def complete_async( url_variables = None if timeout_ms is None: timeout_ms = self.sdk_configuration.timeout_ms - + if server_url is not None: base_url = server_url - + request = models.ChatCompletionRequest( model=model, temperature=temperature, @@ -400,13 +495,19 @@ async def complete_async( stream=stream, stop=stop, random_seed=random_seed, - messages=utils.get_pydantic_model(messages, List[models.ChatCompletionRequestMessages]), - response_format=utils.get_pydantic_model(response_format, Optional[models.ResponseFormat]), + messages=utils.get_pydantic_model( + messages, List[models.ChatCompletionRequestMessages] + ), + response_format=utils.get_pydantic_model( + response_format, Optional[models.ResponseFormat] + ), tools=utils.get_pydantic_model(tools, OptionalNullable[List[models.Tool]]), - tool_choice=tool_choice, + tool_choice=utils.get_pydantic_model( + tool_choice, Optional[models.ChatCompletionRequestToolChoice] + ), ) - - req = self.build_request( + + req = self.build_request_async( method="POST", path="/rawPredict", base_url=base_url, @@ -418,41 +519,48 @@ async def complete_async( user_agent_header="user-agent", accept_header_value="application/json", security=self.sdk_configuration.security, - get_serialized_body=lambda: utils.serialize_request_body(request, False, False, "json", models.ChatCompletionRequest), + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.ChatCompletionRequest + ), timeout_ms=timeout_ms, ) - + if retries == UNSET: if self.sdk_configuration.retry_config is not UNSET: retries = self.sdk_configuration.retry_config retry_config = None if isinstance(retries, utils.RetryConfig): - retry_config = (retries, [ - "429", - "500", - "502", - "503", - "504" - ]) - + retry_config = (retries, ["429", "500", "502", "503", "504"]) + http_res = await self.do_request_async( - hook_ctx=HookContext(operation_id="chat_completion_v1_chat_completions_post", oauth2_scopes=[], security_source=self.sdk_configuration.security), + hook_ctx=HookContext( + operation_id="chat_completion_v1_chat_completions_post", + oauth2_scopes=[], + security_source=self.sdk_configuration.security, + ), request=req, - error_status_codes=["422","4XX","5XX"], - retry_config=retry_config + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, ) - + data: Any = None if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, Optional[models.ChatCompletionResponse]) + return utils.unmarshal_json( + http_res.text, Optional[models.ChatCompletionResponse] + ) if utils.match_response(http_res, "422", "application/json"): data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) raise models.HTTPValidationError(data=data) - if utils.match_response(http_res, ["4XX","5XX"], "*"): - raise models.SDKError("API error occurred", http_res.status_code, http_res.text, http_res) - - content_type = http_res.headers.get("Content-Type") - raise models.SDKError(f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, http_res.text, http_res) + if utils.match_response(http_res, ["4XX", "5XX"], "*"): + raise models.SDKError( + "API error occurred", http_res.status_code, http_res.text, http_res + ) - + content_type = http_res.headers.get("Content-Type") + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res.text, + http_res, + ) diff --git a/packages/mistralai_gcp/src/mistralai_gcp/fim.py b/packages/mistralai_gcp/src/mistralai_gcp/fim.py index 47d8c9a9..cda380c8 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/fim.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/fim.py @@ -7,12 +7,13 @@ from mistralai_gcp.utils import eventstreaming from typing import Any, AsyncGenerator, Generator, Optional, Union + class Fim(BaseSDK): r"""Fill-in-the-middle API.""" - - + def stream( - self, *, + self, + *, model: Nullable[str], prompt: str, temperature: Optional[float] = 0.7, @@ -20,7 +21,12 @@ def stream( max_tokens: OptionalNullable[int] = UNSET, min_tokens: OptionalNullable[int] = UNSET, stream: Optional[bool] = True, - stop: Optional[Union[models.FIMCompletionStreamRequestStop, models.FIMCompletionStreamRequestStopTypedDict]] = None, + stop: Optional[ + Union[ + models.FIMCompletionStreamRequestStop, + models.FIMCompletionStreamRequestStopTypedDict, + ] + ] = None, random_seed: OptionalNullable[int] = UNSET, suffix: OptionalNullable[str] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, @@ -37,7 +43,7 @@ def stream( :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. :param min_tokens: The minimum number of tokens to generate in the completion. - :param stream: + :param stream: :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. :param suffix: Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`. @@ -49,10 +55,10 @@ def stream( url_variables = None if timeout_ms is None: timeout_ms = self.sdk_configuration.timeout_ms - + if server_url is not None: base_url = server_url - + request = models.FIMCompletionStreamRequest( model=model, temperature=temperature, @@ -65,7 +71,7 @@ def stream( prompt=prompt, suffix=suffix, ) - + req = self.build_request( method="POST", path="/streamRawPredict#fim", @@ -78,48 +84,58 @@ def stream( user_agent_header="user-agent", accept_header_value="text/event-stream", security=self.sdk_configuration.security, - get_serialized_body=lambda: utils.serialize_request_body(request, False, False, "json", models.FIMCompletionStreamRequest), + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.FIMCompletionStreamRequest + ), timeout_ms=timeout_ms, ) - + if retries == UNSET: if self.sdk_configuration.retry_config is not UNSET: retries = self.sdk_configuration.retry_config retry_config = None if isinstance(retries, utils.RetryConfig): - retry_config = (retries, [ - "429", - "500", - "502", - "503", - "504" - ]) - + retry_config = (retries, ["429", "500", "502", "503", "504"]) + http_res = self.do_request( - hook_ctx=HookContext(operation_id="stream_fim", oauth2_scopes=[], security_source=self.sdk_configuration.security), + hook_ctx=HookContext( + operation_id="stream_fim", + oauth2_scopes=[], + security_source=self.sdk_configuration.security, + ), request=req, - error_status_codes=["422","4XX","5XX"], + error_status_codes=["422", "4XX", "5XX"], stream=True, - retry_config=retry_config + retry_config=retry_config, ) - + data: Any = None if utils.match_response(http_res, "200", "text/event-stream"): - return eventstreaming.stream_events(http_res, lambda raw: utils.unmarshal_json(raw, models.CompletionEvent), sentinel="[DONE]") + return eventstreaming.stream_events( + http_res, + lambda raw: utils.unmarshal_json(raw, models.CompletionEvent), + sentinel="[DONE]", + ) if utils.match_response(http_res, "422", "application/json"): data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) raise models.HTTPValidationError(data=data) - if utils.match_response(http_res, ["4XX","5XX"], "*"): - raise models.SDKError("API error occurred", http_res.status_code, http_res.text, http_res) - + if utils.match_response(http_res, ["4XX", "5XX"], "*"): + raise models.SDKError( + "API error occurred", http_res.status_code, http_res.text, http_res + ) + content_type = http_res.headers.get("Content-Type") - raise models.SDKError(f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, http_res.text, http_res) + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res.text, + http_res, + ) - - async def stream_async( - self, *, + self, + *, model: Nullable[str], prompt: str, temperature: Optional[float] = 0.7, @@ -127,7 +143,12 @@ async def stream_async( max_tokens: OptionalNullable[int] = UNSET, min_tokens: OptionalNullable[int] = UNSET, stream: Optional[bool] = True, - stop: Optional[Union[models.FIMCompletionStreamRequestStop, models.FIMCompletionStreamRequestStopTypedDict]] = None, + stop: Optional[ + Union[ + models.FIMCompletionStreamRequestStop, + models.FIMCompletionStreamRequestStopTypedDict, + ] + ] = None, random_seed: OptionalNullable[int] = UNSET, suffix: OptionalNullable[str] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, @@ -144,7 +165,7 @@ async def stream_async( :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. :param min_tokens: The minimum number of tokens to generate in the completion. - :param stream: + :param stream: :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. :param suffix: Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`. @@ -156,10 +177,10 @@ async def stream_async( url_variables = None if timeout_ms is None: timeout_ms = self.sdk_configuration.timeout_ms - + if server_url is not None: base_url = server_url - + request = models.FIMCompletionStreamRequest( model=model, temperature=temperature, @@ -172,8 +193,8 @@ async def stream_async( prompt=prompt, suffix=suffix, ) - - req = self.build_request( + + req = self.build_request_async( method="POST", path="/streamRawPredict#fim", base_url=base_url, @@ -185,48 +206,58 @@ async def stream_async( user_agent_header="user-agent", accept_header_value="text/event-stream", security=self.sdk_configuration.security, - get_serialized_body=lambda: utils.serialize_request_body(request, False, False, "json", models.FIMCompletionStreamRequest), + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.FIMCompletionStreamRequest + ), timeout_ms=timeout_ms, ) - + if retries == UNSET: if self.sdk_configuration.retry_config is not UNSET: retries = self.sdk_configuration.retry_config retry_config = None if isinstance(retries, utils.RetryConfig): - retry_config = (retries, [ - "429", - "500", - "502", - "503", - "504" - ]) - + retry_config = (retries, ["429", "500", "502", "503", "504"]) + http_res = await self.do_request_async( - hook_ctx=HookContext(operation_id="stream_fim", oauth2_scopes=[], security_source=self.sdk_configuration.security), + hook_ctx=HookContext( + operation_id="stream_fim", + oauth2_scopes=[], + security_source=self.sdk_configuration.security, + ), request=req, - error_status_codes=["422","4XX","5XX"], + error_status_codes=["422", "4XX", "5XX"], stream=True, - retry_config=retry_config + retry_config=retry_config, ) - + data: Any = None if utils.match_response(http_res, "200", "text/event-stream"): - return eventstreaming.stream_events_async(http_res, lambda raw: utils.unmarshal_json(raw, models.CompletionEvent), sentinel="[DONE]") + return eventstreaming.stream_events_async( + http_res, + lambda raw: utils.unmarshal_json(raw, models.CompletionEvent), + sentinel="[DONE]", + ) if utils.match_response(http_res, "422", "application/json"): data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) raise models.HTTPValidationError(data=data) - if utils.match_response(http_res, ["4XX","5XX"], "*"): - raise models.SDKError("API error occurred", http_res.status_code, http_res.text, http_res) - + if utils.match_response(http_res, ["4XX", "5XX"], "*"): + raise models.SDKError( + "API error occurred", http_res.status_code, http_res.text, http_res + ) + content_type = http_res.headers.get("Content-Type") - raise models.SDKError(f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, http_res.text, http_res) + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res.text, + http_res, + ) - - def complete( - self, *, + self, + *, model: Nullable[str], prompt: str, temperature: Optional[float] = 0.7, @@ -234,7 +265,12 @@ def complete( max_tokens: OptionalNullable[int] = UNSET, min_tokens: OptionalNullable[int] = UNSET, stream: Optional[bool] = False, - stop: Optional[Union[models.FIMCompletionRequestStop, models.FIMCompletionRequestStopTypedDict]] = None, + stop: Optional[ + Union[ + models.FIMCompletionRequestStop, + models.FIMCompletionRequestStopTypedDict, + ] + ] = None, random_seed: OptionalNullable[int] = UNSET, suffix: OptionalNullable[str] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, @@ -263,10 +299,10 @@ def complete( url_variables = None if timeout_ms is None: timeout_ms = self.sdk_configuration.timeout_ms - + if server_url is not None: base_url = server_url - + request = models.FIMCompletionRequest( model=model, temperature=temperature, @@ -279,7 +315,7 @@ def complete( prompt=prompt, suffix=suffix, ) - + req = self.build_request( method="POST", path="/rawPredict#fim", @@ -292,47 +328,55 @@ def complete( user_agent_header="user-agent", accept_header_value="application/json", security=self.sdk_configuration.security, - get_serialized_body=lambda: utils.serialize_request_body(request, False, False, "json", models.FIMCompletionRequest), + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.FIMCompletionRequest + ), timeout_ms=timeout_ms, ) - + if retries == UNSET: if self.sdk_configuration.retry_config is not UNSET: retries = self.sdk_configuration.retry_config retry_config = None if isinstance(retries, utils.RetryConfig): - retry_config = (retries, [ - "429", - "500", - "502", - "503", - "504" - ]) - + retry_config = (retries, ["429", "500", "502", "503", "504"]) + http_res = self.do_request( - hook_ctx=HookContext(operation_id="fim_completion_v1_fim_completions_post", oauth2_scopes=[], security_source=self.sdk_configuration.security), + hook_ctx=HookContext( + operation_id="fim_completion_v1_fim_completions_post", + oauth2_scopes=[], + security_source=self.sdk_configuration.security, + ), request=req, - error_status_codes=["422","4XX","5XX"], - retry_config=retry_config + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, ) - + data: Any = None if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, Optional[models.FIMCompletionResponse]) + return utils.unmarshal_json( + http_res.text, Optional[models.FIMCompletionResponse] + ) if utils.match_response(http_res, "422", "application/json"): data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) raise models.HTTPValidationError(data=data) - if utils.match_response(http_res, ["4XX","5XX"], "*"): - raise models.SDKError("API error occurred", http_res.status_code, http_res.text, http_res) - + if utils.match_response(http_res, ["4XX", "5XX"], "*"): + raise models.SDKError( + "API error occurred", http_res.status_code, http_res.text, http_res + ) + content_type = http_res.headers.get("Content-Type") - raise models.SDKError(f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, http_res.text, http_res) + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res.text, + http_res, + ) - - async def complete_async( - self, *, + self, + *, model: Nullable[str], prompt: str, temperature: Optional[float] = 0.7, @@ -340,7 +384,12 @@ async def complete_async( max_tokens: OptionalNullable[int] = UNSET, min_tokens: OptionalNullable[int] = UNSET, stream: Optional[bool] = False, - stop: Optional[Union[models.FIMCompletionRequestStop, models.FIMCompletionRequestStopTypedDict]] = None, + stop: Optional[ + Union[ + models.FIMCompletionRequestStop, + models.FIMCompletionRequestStopTypedDict, + ] + ] = None, random_seed: OptionalNullable[int] = UNSET, suffix: OptionalNullable[str] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, @@ -369,10 +418,10 @@ async def complete_async( url_variables = None if timeout_ms is None: timeout_ms = self.sdk_configuration.timeout_ms - + if server_url is not None: base_url = server_url - + request = models.FIMCompletionRequest( model=model, temperature=temperature, @@ -385,8 +434,8 @@ async def complete_async( prompt=prompt, suffix=suffix, ) - - req = self.build_request( + + req = self.build_request_async( method="POST", path="/rawPredict#fim", base_url=base_url, @@ -398,41 +447,48 @@ async def complete_async( user_agent_header="user-agent", accept_header_value="application/json", security=self.sdk_configuration.security, - get_serialized_body=lambda: utils.serialize_request_body(request, False, False, "json", models.FIMCompletionRequest), + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.FIMCompletionRequest + ), timeout_ms=timeout_ms, ) - + if retries == UNSET: if self.sdk_configuration.retry_config is not UNSET: retries = self.sdk_configuration.retry_config retry_config = None if isinstance(retries, utils.RetryConfig): - retry_config = (retries, [ - "429", - "500", - "502", - "503", - "504" - ]) - + retry_config = (retries, ["429", "500", "502", "503", "504"]) + http_res = await self.do_request_async( - hook_ctx=HookContext(operation_id="fim_completion_v1_fim_completions_post", oauth2_scopes=[], security_source=self.sdk_configuration.security), + hook_ctx=HookContext( + operation_id="fim_completion_v1_fim_completions_post", + oauth2_scopes=[], + security_source=self.sdk_configuration.security, + ), request=req, - error_status_codes=["422","4XX","5XX"], - retry_config=retry_config + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, ) - + data: Any = None if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, Optional[models.FIMCompletionResponse]) + return utils.unmarshal_json( + http_res.text, Optional[models.FIMCompletionResponse] + ) if utils.match_response(http_res, "422", "application/json"): data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) raise models.HTTPValidationError(data=data) - if utils.match_response(http_res, ["4XX","5XX"], "*"): - raise models.SDKError("API error occurred", http_res.status_code, http_res.text, http_res) - - content_type = http_res.headers.get("Content-Type") - raise models.SDKError(f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, http_res.text, http_res) + if utils.match_response(http_res, ["4XX", "5XX"], "*"): + raise models.SDKError( + "API error occurred", http_res.status_code, http_res.text, http_res + ) - + content_type = http_res.headers.get("Content-Type") + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res.text, + http_res, + ) diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/__init__.py b/packages/mistralai_gcp/src/mistralai_gcp/models/__init__.py index 7c8c1f4a..84acf245 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/__init__.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/__init__.py @@ -1,31 +1,193 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -from .assistantmessage import AssistantMessage, AssistantMessageRole, AssistantMessageTypedDict -from .chatcompletionchoice import ChatCompletionChoice, ChatCompletionChoiceFinishReason, ChatCompletionChoiceTypedDict -from .chatcompletionrequest import ChatCompletionRequest, ChatCompletionRequestMessages, ChatCompletionRequestMessagesTypedDict, ChatCompletionRequestStop, ChatCompletionRequestStopTypedDict, ChatCompletionRequestToolChoice, ChatCompletionRequestTypedDict -from .chatcompletionresponse import ChatCompletionResponse, ChatCompletionResponseTypedDict -from .chatcompletionstreamrequest import ChatCompletionStreamRequest, ChatCompletionStreamRequestTypedDict, Messages, MessagesTypedDict, Stop, StopTypedDict, ToolChoice +from .assistantmessage import ( + AssistantMessage, + AssistantMessageRole, + AssistantMessageTypedDict, +) +from .chatcompletionchoice import ( + ChatCompletionChoice, + ChatCompletionChoiceFinishReason, + ChatCompletionChoiceTypedDict, +) +from .chatcompletionrequest import ( + ChatCompletionRequest, + ChatCompletionRequestMessages, + ChatCompletionRequestMessagesTypedDict, + ChatCompletionRequestStop, + ChatCompletionRequestStopTypedDict, + ChatCompletionRequestToolChoice, + ChatCompletionRequestToolChoiceTypedDict, + ChatCompletionRequestTypedDict, +) +from .chatcompletionresponse import ( + ChatCompletionResponse, + ChatCompletionResponseTypedDict, +) +from .chatcompletionstreamrequest import ( + ChatCompletionStreamRequest, + ChatCompletionStreamRequestToolChoice, + ChatCompletionStreamRequestToolChoiceTypedDict, + ChatCompletionStreamRequestTypedDict, + Messages, + MessagesTypedDict, + Stop, + StopTypedDict, +) from .completionchunk import CompletionChunk, CompletionChunkTypedDict from .completionevent import CompletionEvent, CompletionEventTypedDict -from .completionresponsestreamchoice import CompletionResponseStreamChoice, CompletionResponseStreamChoiceTypedDict, FinishReason +from .completionresponsestreamchoice import ( + CompletionResponseStreamChoice, + CompletionResponseStreamChoiceTypedDict, + FinishReason, +) from .contentchunk import ContentChunk, ContentChunkTypedDict from .deltamessage import DeltaMessage, DeltaMessageTypedDict -from .fimcompletionrequest import FIMCompletionRequest, FIMCompletionRequestStop, FIMCompletionRequestStopTypedDict, FIMCompletionRequestTypedDict +from .fimcompletionrequest import ( + FIMCompletionRequest, + FIMCompletionRequestStop, + FIMCompletionRequestStopTypedDict, + FIMCompletionRequestTypedDict, +) from .fimcompletionresponse import FIMCompletionResponse, FIMCompletionResponseTypedDict -from .fimcompletionstreamrequest import FIMCompletionStreamRequest, FIMCompletionStreamRequestStop, FIMCompletionStreamRequestStopTypedDict, FIMCompletionStreamRequestTypedDict +from .fimcompletionstreamrequest import ( + FIMCompletionStreamRequest, + FIMCompletionStreamRequestStop, + FIMCompletionStreamRequestStopTypedDict, + FIMCompletionStreamRequestTypedDict, +) from .function import Function, FunctionTypedDict -from .functioncall import Arguments, ArgumentsTypedDict, FunctionCall, FunctionCallTypedDict +from .functioncall import ( + Arguments, + ArgumentsTypedDict, + FunctionCall, + FunctionCallTypedDict, +) +from .functionname import FunctionName, FunctionNameTypedDict from .httpvalidationerror import HTTPValidationError, HTTPValidationErrorData -from .responseformat import ResponseFormat, ResponseFormatTypedDict, ResponseFormats +from .responseformat import ResponseFormat, ResponseFormatTypedDict +from .responseformats import ResponseFormats from .sdkerror import SDKError from .security import Security, SecurityTypedDict -from .systemmessage import Content, ContentTypedDict, Role, SystemMessage, SystemMessageTypedDict -from .textchunk import TextChunk, TextChunkTypedDict -from .tool import Tool, ToolToolTypes, ToolTypedDict -from .toolcall import ToolCall, ToolCallTypedDict, ToolTypes +from .systemmessage import ( + Content, + ContentTypedDict, + Role, + SystemMessage, + SystemMessageTypedDict, +) +from .textchunk import TextChunk, TextChunkTypedDict, Type +from .tool import Tool, ToolTypedDict +from .toolcall import ToolCall, ToolCallTypedDict +from .toolchoice import ToolChoice, ToolChoiceTypedDict +from .toolchoiceenum import ToolChoiceEnum from .toolmessage import ToolMessage, ToolMessageRole, ToolMessageTypedDict +from .tooltypes import ToolTypes from .usageinfo import UsageInfo, UsageInfoTypedDict -from .usermessage import UserMessage, UserMessageContent, UserMessageContentTypedDict, UserMessageRole, UserMessageTypedDict -from .validationerror import Loc, LocTypedDict, ValidationError, ValidationErrorTypedDict +from .usermessage import ( + UserMessage, + UserMessageContent, + UserMessageContentTypedDict, + UserMessageRole, + UserMessageTypedDict, +) +from .validationerror import ( + Loc, + LocTypedDict, + ValidationError, + ValidationErrorTypedDict, +) -__all__ = ["Arguments", "ArgumentsTypedDict", "AssistantMessage", "AssistantMessageRole", "AssistantMessageTypedDict", "ChatCompletionChoice", "ChatCompletionChoiceFinishReason", "ChatCompletionChoiceTypedDict", "ChatCompletionRequest", "ChatCompletionRequestMessages", "ChatCompletionRequestMessagesTypedDict", "ChatCompletionRequestStop", "ChatCompletionRequestStopTypedDict", "ChatCompletionRequestToolChoice", "ChatCompletionRequestTypedDict", "ChatCompletionResponse", "ChatCompletionResponseTypedDict", "ChatCompletionStreamRequest", "ChatCompletionStreamRequestTypedDict", "CompletionChunk", "CompletionChunkTypedDict", "CompletionEvent", "CompletionEventTypedDict", "CompletionResponseStreamChoice", "CompletionResponseStreamChoiceTypedDict", "Content", "ContentChunk", "ContentChunkTypedDict", "ContentTypedDict", "DeltaMessage", "DeltaMessageTypedDict", "FIMCompletionRequest", "FIMCompletionRequestStop", "FIMCompletionRequestStopTypedDict", "FIMCompletionRequestTypedDict", "FIMCompletionResponse", "FIMCompletionResponseTypedDict", "FIMCompletionStreamRequest", "FIMCompletionStreamRequestStop", "FIMCompletionStreamRequestStopTypedDict", "FIMCompletionStreamRequestTypedDict", "FinishReason", "Function", "FunctionCall", "FunctionCallTypedDict", "FunctionTypedDict", "HTTPValidationError", "HTTPValidationErrorData", "Loc", "LocTypedDict", "Messages", "MessagesTypedDict", "ResponseFormat", "ResponseFormatTypedDict", "ResponseFormats", "Role", "SDKError", "Security", "SecurityTypedDict", "Stop", "StopTypedDict", "SystemMessage", "SystemMessageTypedDict", "TextChunk", "TextChunkTypedDict", "Tool", "ToolCall", "ToolCallTypedDict", "ToolChoice", "ToolMessage", "ToolMessageRole", "ToolMessageTypedDict", "ToolToolTypes", "ToolTypedDict", "ToolTypes", "UsageInfo", "UsageInfoTypedDict", "UserMessage", "UserMessageContent", "UserMessageContentTypedDict", "UserMessageRole", "UserMessageTypedDict", "ValidationError", "ValidationErrorTypedDict"] +__all__ = [ + "Arguments", + "ArgumentsTypedDict", + "AssistantMessage", + "AssistantMessageRole", + "AssistantMessageTypedDict", + "ChatCompletionChoice", + "ChatCompletionChoiceFinishReason", + "ChatCompletionChoiceTypedDict", + "ChatCompletionRequest", + "ChatCompletionRequestMessages", + "ChatCompletionRequestMessagesTypedDict", + "ChatCompletionRequestStop", + "ChatCompletionRequestStopTypedDict", + "ChatCompletionRequestToolChoice", + "ChatCompletionRequestToolChoiceTypedDict", + "ChatCompletionRequestTypedDict", + "ChatCompletionResponse", + "ChatCompletionResponseTypedDict", + "ChatCompletionStreamRequest", + "ChatCompletionStreamRequestToolChoice", + "ChatCompletionStreamRequestToolChoiceTypedDict", + "ChatCompletionStreamRequestTypedDict", + "CompletionChunk", + "CompletionChunkTypedDict", + "CompletionEvent", + "CompletionEventTypedDict", + "CompletionResponseStreamChoice", + "CompletionResponseStreamChoiceTypedDict", + "Content", + "ContentChunk", + "ContentChunkTypedDict", + "ContentTypedDict", + "DeltaMessage", + "DeltaMessageTypedDict", + "FIMCompletionRequest", + "FIMCompletionRequestStop", + "FIMCompletionRequestStopTypedDict", + "FIMCompletionRequestTypedDict", + "FIMCompletionResponse", + "FIMCompletionResponseTypedDict", + "FIMCompletionStreamRequest", + "FIMCompletionStreamRequestStop", + "FIMCompletionStreamRequestStopTypedDict", + "FIMCompletionStreamRequestTypedDict", + "FinishReason", + "Function", + "FunctionCall", + "FunctionCallTypedDict", + "FunctionName", + "FunctionNameTypedDict", + "FunctionTypedDict", + "HTTPValidationError", + "HTTPValidationErrorData", + "Loc", + "LocTypedDict", + "Messages", + "MessagesTypedDict", + "ResponseFormat", + "ResponseFormatTypedDict", + "ResponseFormats", + "Role", + "SDKError", + "Security", + "SecurityTypedDict", + "Stop", + "StopTypedDict", + "SystemMessage", + "SystemMessageTypedDict", + "TextChunk", + "TextChunkTypedDict", + "Tool", + "ToolCall", + "ToolCallTypedDict", + "ToolChoice", + "ToolChoiceEnum", + "ToolChoiceTypedDict", + "ToolMessage", + "ToolMessageRole", + "ToolMessageTypedDict", + "ToolTypedDict", + "ToolTypes", + "Type", + "UsageInfo", + "UsageInfoTypedDict", + "UserMessage", + "UserMessageContent", + "UserMessageContentTypedDict", + "UserMessageRole", + "UserMessageTypedDict", + "ValidationError", + "ValidationErrorTypedDict", +] diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/assistantmessage.py b/packages/mistralai_gcp/src/mistralai_gcp/models/assistantmessage.py index f4e94f38..33a4965f 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/assistantmessage.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/assistantmessage.py @@ -2,7 +2,13 @@ from __future__ import annotations from .toolcall import ToolCall, ToolCallTypedDict -from mistralai_gcp.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +from mistralai_gcp.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) from pydantic import model_serializer from typing import List, Literal, Optional, TypedDict from typing_extensions import NotRequired @@ -10,21 +16,25 @@ AssistantMessageRole = Literal["assistant"] + class AssistantMessageTypedDict(TypedDict): content: NotRequired[Nullable[str]] tool_calls: NotRequired[Nullable[List[ToolCallTypedDict]]] prefix: NotRequired[bool] r"""Set this to `true` when adding an assistant message as prefix to condition the model response. The role of the prefix message is to force the model to start its answer by the content of the message.""" role: NotRequired[AssistantMessageRole] - + class AssistantMessage(BaseModel): content: OptionalNullable[str] = UNSET + tool_calls: OptionalNullable[List[ToolCall]] = UNSET + prefix: Optional[bool] = False r"""Set this to `true` when adding an assistant message as prefix to condition the model response. The role of the prefix message is to force the model to start its answer by the content of the message.""" + role: Optional[AssistantMessageRole] = "assistant" - + @model_serializer(mode="wrap") def serialize_model(self, handler): optional_fields = ["content", "tool_calls", "prefix", "role"] @@ -38,9 +48,13 @@ def serialize_model(self, handler): for n, f in self.model_fields.items(): k = f.alias or n val = serialized.get(k) + serialized.pop(k, None) optional_nullable = k in optional_fields and k in nullable_fields - is_set = (self.__pydantic_fields_set__.intersection({n}) or k in null_default_fields) # pylint: disable=no-member + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member if val is not None and val != UNSET_SENTINEL: m[k] = val @@ -50,4 +64,3 @@ def serialize_model(self, handler): m[k] = val return m - diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionchoice.py b/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionchoice.py index 67ff1f50..c585e1ed 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionchoice.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionchoice.py @@ -6,16 +6,20 @@ from typing import Literal, TypedDict -ChatCompletionChoiceFinishReason = Literal["stop", "length", "model_length", "error", "tool_calls"] +ChatCompletionChoiceFinishReason = Literal[ + "stop", "length", "model_length", "error", "tool_calls" +] + class ChatCompletionChoiceTypedDict(TypedDict): index: int message: AssistantMessageTypedDict finish_reason: ChatCompletionChoiceFinishReason - + class ChatCompletionChoice(BaseModel): index: int + message: AssistantMessage + finish_reason: ChatCompletionChoiceFinishReason - diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionrequest.py b/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionrequest.py index 45f61e7f..dbe6f55c 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionrequest.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionrequest.py @@ -5,12 +5,20 @@ from .responseformat import ResponseFormat, ResponseFormatTypedDict from .systemmessage import SystemMessage, SystemMessageTypedDict from .tool import Tool, ToolTypedDict +from .toolchoice import ToolChoice, ToolChoiceTypedDict +from .toolchoiceenum import ToolChoiceEnum from .toolmessage import ToolMessage, ToolMessageTypedDict from .usermessage import UserMessage, UserMessageTypedDict -from mistralai_gcp.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +from mistralai_gcp.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) from mistralai_gcp.utils import get_discriminator from pydantic import Discriminator, Tag, model_serializer -from typing import List, Literal, Optional, TypedDict, Union +from typing import List, Optional, TypedDict, Union from typing_extensions import Annotated, NotRequired @@ -22,13 +30,30 @@ r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" -ChatCompletionRequestMessagesTypedDict = Union[SystemMessageTypedDict, UserMessageTypedDict, AssistantMessageTypedDict, ToolMessageTypedDict] +ChatCompletionRequestMessagesTypedDict = Union[ + SystemMessageTypedDict, + UserMessageTypedDict, + AssistantMessageTypedDict, + ToolMessageTypedDict, +] -ChatCompletionRequestMessages = Annotated[Union[Annotated[AssistantMessage, Tag("assistant")], Annotated[SystemMessage, Tag("system")], Annotated[ToolMessage, Tag("tool")], Annotated[UserMessage, Tag("user")]], Discriminator(lambda m: get_discriminator(m, "role", "role"))] +ChatCompletionRequestMessages = Annotated[ + Union[ + Annotated[AssistantMessage, Tag("assistant")], + Annotated[SystemMessage, Tag("system")], + Annotated[ToolMessage, Tag("tool")], + Annotated[UserMessage, Tag("user")], + ], + Discriminator(lambda m: get_discriminator(m, "role", "role")), +] -ChatCompletionRequestToolChoice = Literal["auto", "none", "any"] +ChatCompletionRequestToolChoiceTypedDict = Union[ToolChoiceTypedDict, ToolChoiceEnum] + + +ChatCompletionRequestToolChoice = Union[ToolChoice, ToolChoiceEnum] + class ChatCompletionRequestTypedDict(TypedDict): model: Nullable[str] @@ -51,35 +76,57 @@ class ChatCompletionRequestTypedDict(TypedDict): r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" response_format: NotRequired[ResponseFormatTypedDict] tools: NotRequired[Nullable[List[ToolTypedDict]]] - tool_choice: NotRequired[ChatCompletionRequestToolChoice] - + tool_choice: NotRequired[ChatCompletionRequestToolChoiceTypedDict] + class ChatCompletionRequest(BaseModel): model: Nullable[str] r"""ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions.""" + messages: List[ChatCompletionRequestMessages] r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content.""" + temperature: Optional[float] = 0.7 r"""What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both.""" + top_p: Optional[float] = 1 r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.""" + max_tokens: OptionalNullable[int] = UNSET r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" + min_tokens: OptionalNullable[int] = UNSET r"""The minimum number of tokens to generate in the completion.""" + stream: Optional[bool] = False r"""Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON.""" + stop: Optional[ChatCompletionRequestStop] = None r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + random_seed: OptionalNullable[int] = UNSET r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" + response_format: Optional[ResponseFormat] = None + tools: OptionalNullable[List[Tool]] = UNSET - tool_choice: Optional[ChatCompletionRequestToolChoice] = "auto" - + + tool_choice: Optional[ChatCompletionRequestToolChoice] = None + @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = ["temperature", "top_p", "max_tokens", "min_tokens", "stream", "stop", "random_seed", "response_format", "tools", "tool_choice"] + optional_fields = [ + "temperature", + "top_p", + "max_tokens", + "min_tokens", + "stream", + "stop", + "random_seed", + "response_format", + "tools", + "tool_choice", + ] nullable_fields = ["model", "max_tokens", "min_tokens", "random_seed", "tools"] null_default_fields = [] @@ -90,9 +137,13 @@ def serialize_model(self, handler): for n, f in self.model_fields.items(): k = f.alias or n val = serialized.get(k) + serialized.pop(k, None) optional_nullable = k in optional_fields and k in nullable_fields - is_set = (self.__pydantic_fields_set__.intersection({n}) or k in null_default_fields) # pylint: disable=no-member + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member if val is not None and val != UNSET_SENTINEL: m[k] = val @@ -102,4 +153,3 @@ def serialize_model(self, handler): m[k] = val return m - diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionresponse.py b/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionresponse.py index c8ccdfca..5fb10447 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionresponse.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionresponse.py @@ -15,13 +15,17 @@ class ChatCompletionResponseTypedDict(TypedDict): usage: UsageInfoTypedDict created: NotRequired[int] choices: NotRequired[List[ChatCompletionChoiceTypedDict]] - + class ChatCompletionResponse(BaseModel): id: str + object: str + model: str + usage: UsageInfo + created: Optional[int] = None + choices: Optional[List[ChatCompletionChoice]] = None - diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionstreamrequest.py b/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionstreamrequest.py index a07f71e2..5bb7059c 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionstreamrequest.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionstreamrequest.py @@ -5,12 +5,20 @@ from .responseformat import ResponseFormat, ResponseFormatTypedDict from .systemmessage import SystemMessage, SystemMessageTypedDict from .tool import Tool, ToolTypedDict +from .toolchoice import ToolChoice, ToolChoiceTypedDict +from .toolchoiceenum import ToolChoiceEnum from .toolmessage import ToolMessage, ToolMessageTypedDict from .usermessage import UserMessage, UserMessageTypedDict -from mistralai_gcp.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +from mistralai_gcp.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) from mistralai_gcp.utils import get_discriminator from pydantic import Discriminator, Tag, model_serializer -from typing import List, Literal, Optional, TypedDict, Union +from typing import List, Optional, TypedDict, Union from typing_extensions import Annotated, NotRequired @@ -22,13 +30,32 @@ r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" -MessagesTypedDict = Union[SystemMessageTypedDict, UserMessageTypedDict, AssistantMessageTypedDict, ToolMessageTypedDict] +MessagesTypedDict = Union[ + SystemMessageTypedDict, + UserMessageTypedDict, + AssistantMessageTypedDict, + ToolMessageTypedDict, +] -Messages = Annotated[Union[Annotated[AssistantMessage, Tag("assistant")], Annotated[SystemMessage, Tag("system")], Annotated[ToolMessage, Tag("tool")], Annotated[UserMessage, Tag("user")]], Discriminator(lambda m: get_discriminator(m, "role", "role"))] +Messages = Annotated[ + Union[ + Annotated[AssistantMessage, Tag("assistant")], + Annotated[SystemMessage, Tag("system")], + Annotated[ToolMessage, Tag("tool")], + Annotated[UserMessage, Tag("user")], + ], + Discriminator(lambda m: get_discriminator(m, "role", "role")), +] -ToolChoice = Literal["auto", "none", "any"] +ChatCompletionStreamRequestToolChoiceTypedDict = Union[ + ToolChoiceTypedDict, ToolChoiceEnum +] + + +ChatCompletionStreamRequestToolChoice = Union[ToolChoice, ToolChoiceEnum] + class ChatCompletionStreamRequestTypedDict(TypedDict): model: Nullable[str] @@ -50,34 +77,56 @@ class ChatCompletionStreamRequestTypedDict(TypedDict): r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" response_format: NotRequired[ResponseFormatTypedDict] tools: NotRequired[Nullable[List[ToolTypedDict]]] - tool_choice: NotRequired[ToolChoice] - + tool_choice: NotRequired[ChatCompletionStreamRequestToolChoiceTypedDict] + class ChatCompletionStreamRequest(BaseModel): model: Nullable[str] r"""ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions.""" + messages: List[Messages] r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content.""" + temperature: Optional[float] = 0.7 r"""What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both.""" + top_p: Optional[float] = 1 r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.""" + max_tokens: OptionalNullable[int] = UNSET r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" + min_tokens: OptionalNullable[int] = UNSET r"""The minimum number of tokens to generate in the completion.""" + stream: Optional[bool] = True + stop: Optional[Stop] = None r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + random_seed: OptionalNullable[int] = UNSET r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" + response_format: Optional[ResponseFormat] = None + tools: OptionalNullable[List[Tool]] = UNSET - tool_choice: Optional[ToolChoice] = "auto" - + + tool_choice: Optional[ChatCompletionStreamRequestToolChoice] = None + @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = ["temperature", "top_p", "max_tokens", "min_tokens", "stream", "stop", "random_seed", "response_format", "tools", "tool_choice"] + optional_fields = [ + "temperature", + "top_p", + "max_tokens", + "min_tokens", + "stream", + "stop", + "random_seed", + "response_format", + "tools", + "tool_choice", + ] nullable_fields = ["model", "max_tokens", "min_tokens", "random_seed", "tools"] null_default_fields = [] @@ -88,9 +137,13 @@ def serialize_model(self, handler): for n, f in self.model_fields.items(): k = f.alias or n val = serialized.get(k) + serialized.pop(k, None) optional_nullable = k in optional_fields and k in nullable_fields - is_set = (self.__pydantic_fields_set__.intersection({n}) or k in null_default_fields) # pylint: disable=no-member + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member if val is not None and val != UNSET_SENTINEL: m[k] = val @@ -100,4 +153,3 @@ def serialize_model(self, handler): m[k] = val return m - diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/completionchunk.py b/packages/mistralai_gcp/src/mistralai_gcp/models/completionchunk.py index 52266f47..f0561ef7 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/completionchunk.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/completionchunk.py @@ -1,7 +1,10 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from __future__ import annotations -from .completionresponsestreamchoice import CompletionResponseStreamChoice, CompletionResponseStreamChoiceTypedDict +from .completionresponsestreamchoice import ( + CompletionResponseStreamChoice, + CompletionResponseStreamChoiceTypedDict, +) from .usageinfo import UsageInfo, UsageInfoTypedDict from mistralai_gcp.types import BaseModel from typing import List, Optional, TypedDict @@ -15,13 +18,17 @@ class CompletionChunkTypedDict(TypedDict): object: NotRequired[str] created: NotRequired[int] usage: NotRequired[UsageInfoTypedDict] - + class CompletionChunk(BaseModel): id: str + model: str + choices: List[CompletionResponseStreamChoice] + object: Optional[str] = None + created: Optional[int] = None + usage: Optional[UsageInfo] = None - diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/completionevent.py b/packages/mistralai_gcp/src/mistralai_gcp/models/completionevent.py index 5a6e3c2d..7086fce0 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/completionevent.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/completionevent.py @@ -8,8 +8,7 @@ class CompletionEventTypedDict(TypedDict): data: CompletionChunkTypedDict - + class CompletionEvent(BaseModel): data: CompletionChunk - diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/completionresponsestreamchoice.py b/packages/mistralai_gcp/src/mistralai_gcp/models/completionresponsestreamchoice.py index 83a0b02a..a09f67fa 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/completionresponsestreamchoice.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/completionresponsestreamchoice.py @@ -9,17 +9,20 @@ FinishReason = Literal["stop", "length", "error", "tool_calls"] + class CompletionResponseStreamChoiceTypedDict(TypedDict): index: int delta: DeltaMessageTypedDict finish_reason: Nullable[FinishReason] - + class CompletionResponseStreamChoice(BaseModel): index: int + delta: DeltaMessage + finish_reason: Nullable[FinishReason] - + @model_serializer(mode="wrap") def serialize_model(self, handler): optional_fields = [] @@ -33,9 +36,13 @@ def serialize_model(self, handler): for n, f in self.model_fields.items(): k = f.alias or n val = serialized.get(k) + serialized.pop(k, None) optional_nullable = k in optional_fields and k in nullable_fields - is_set = (self.__pydantic_fields_set__.intersection({n}) or k in null_default_fields) # pylint: disable=no-member + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member if val is not None and val != UNSET_SENTINEL: m[k] = val @@ -45,4 +52,3 @@ def serialize_model(self, handler): m[k] = val return m - diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/contentchunk.py b/packages/mistralai_gcp/src/mistralai_gcp/models/contentchunk.py index 9adcb95e..49aeba4c 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/contentchunk.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/contentchunk.py @@ -1,17 +1,10 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from __future__ import annotations -from mistralai_gcp.types import BaseModel -import pydantic -from typing import Final, Optional, TypedDict -from typing_extensions import Annotated +from .textchunk import TextChunk, TextChunkTypedDict -class ContentChunkTypedDict(TypedDict): - text: str - +ContentChunkTypedDict = TextChunkTypedDict -class ContentChunk(BaseModel): - text: str - TYPE: Annotated[Final[Optional[str]], pydantic.Field(alias="type")] = "text" # type: ignore - + +ContentChunk = TextChunk diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/deltamessage.py b/packages/mistralai_gcp/src/mistralai_gcp/models/deltamessage.py index 763b48ec..314e52a7 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/deltamessage.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/deltamessage.py @@ -2,7 +2,13 @@ from __future__ import annotations from .toolcall import ToolCall, ToolCallTypedDict -from mistralai_gcp.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +from mistralai_gcp.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) from pydantic import model_serializer from typing import List, Optional, TypedDict from typing_extensions import NotRequired @@ -12,13 +18,15 @@ class DeltaMessageTypedDict(TypedDict): role: NotRequired[str] content: NotRequired[Nullable[str]] tool_calls: NotRequired[Nullable[List[ToolCallTypedDict]]] - + class DeltaMessage(BaseModel): role: Optional[str] = None + content: OptionalNullable[str] = UNSET + tool_calls: OptionalNullable[List[ToolCall]] = UNSET - + @model_serializer(mode="wrap") def serialize_model(self, handler): optional_fields = ["role", "content", "tool_calls"] @@ -32,9 +40,13 @@ def serialize_model(self, handler): for n, f in self.model_fields.items(): k = f.alias or n val = serialized.get(k) + serialized.pop(k, None) optional_nullable = k in optional_fields and k in nullable_fields - is_set = (self.__pydantic_fields_set__.intersection({n}) or k in null_default_fields) # pylint: disable=no-member + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member if val is not None and val != UNSET_SENTINEL: m[k] = val @@ -44,4 +56,3 @@ def serialize_model(self, handler): m[k] = val return m - diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/fimcompletionrequest.py b/packages/mistralai_gcp/src/mistralai_gcp/models/fimcompletionrequest.py index 7e0e1b5a..8693e34f 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/fimcompletionrequest.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/fimcompletionrequest.py @@ -1,7 +1,13 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from __future__ import annotations -from mistralai_gcp.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +from mistralai_gcp.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) from pydantic import model_serializer from typing import List, Optional, TypedDict, Union from typing_extensions import NotRequired @@ -39,7 +45,7 @@ class FIMCompletionRequestTypedDict(TypedDict): r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" suffix: NotRequired[Nullable[str]] r"""Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`.""" - + class FIMCompletionRequest(BaseModel): model: Nullable[str] @@ -47,28 +53,46 @@ class FIMCompletionRequest(BaseModel): - `codestral-2405` - `codestral-latest` """ + prompt: str r"""The text/code to complete.""" + temperature: Optional[float] = 0.7 r"""What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both.""" + top_p: Optional[float] = 1 r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.""" + max_tokens: OptionalNullable[int] = UNSET r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" + min_tokens: OptionalNullable[int] = UNSET r"""The minimum number of tokens to generate in the completion.""" + stream: Optional[bool] = False r"""Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON.""" + stop: Optional[FIMCompletionRequestStop] = None r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + random_seed: OptionalNullable[int] = UNSET r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" + suffix: OptionalNullable[str] = UNSET r"""Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`.""" - + @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = ["temperature", "top_p", "max_tokens", "min_tokens", "stream", "stop", "random_seed", "suffix"] + optional_fields = [ + "temperature", + "top_p", + "max_tokens", + "min_tokens", + "stream", + "stop", + "random_seed", + "suffix", + ] nullable_fields = ["model", "max_tokens", "min_tokens", "random_seed", "suffix"] null_default_fields = [] @@ -79,9 +103,13 @@ def serialize_model(self, handler): for n, f in self.model_fields.items(): k = f.alias or n val = serialized.get(k) + serialized.pop(k, None) optional_nullable = k in optional_fields and k in nullable_fields - is_set = (self.__pydantic_fields_set__.intersection({n}) or k in null_default_fields) # pylint: disable=no-member + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member if val is not None and val != UNSET_SENTINEL: m[k] = val @@ -91,4 +119,3 @@ def serialize_model(self, handler): m[k] = val return m - diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/fimcompletionresponse.py b/packages/mistralai_gcp/src/mistralai_gcp/models/fimcompletionresponse.py index 27fcc4fe..ad285153 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/fimcompletionresponse.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/fimcompletionresponse.py @@ -15,13 +15,17 @@ class FIMCompletionResponseTypedDict(TypedDict): usage: UsageInfoTypedDict created: NotRequired[int] choices: NotRequired[List[ChatCompletionChoiceTypedDict]] - + class FIMCompletionResponse(BaseModel): id: str + object: str + model: str + usage: UsageInfo + created: Optional[int] = None + choices: Optional[List[ChatCompletionChoice]] = None - diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/fimcompletionstreamrequest.py b/packages/mistralai_gcp/src/mistralai_gcp/models/fimcompletionstreamrequest.py index 3f2dc80c..d05918ca 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/fimcompletionstreamrequest.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/fimcompletionstreamrequest.py @@ -1,7 +1,13 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from __future__ import annotations -from mistralai_gcp.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +from mistralai_gcp.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) from pydantic import model_serializer from typing import List, Optional, TypedDict, Union from typing_extensions import NotRequired @@ -38,7 +44,7 @@ class FIMCompletionStreamRequestTypedDict(TypedDict): r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" suffix: NotRequired[Nullable[str]] r"""Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`.""" - + class FIMCompletionStreamRequest(BaseModel): model: Nullable[str] @@ -46,27 +52,45 @@ class FIMCompletionStreamRequest(BaseModel): - `codestral-2405` - `codestral-latest` """ + prompt: str r"""The text/code to complete.""" + temperature: Optional[float] = 0.7 r"""What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both.""" + top_p: Optional[float] = 1 r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.""" + max_tokens: OptionalNullable[int] = UNSET r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" + min_tokens: OptionalNullable[int] = UNSET r"""The minimum number of tokens to generate in the completion.""" + stream: Optional[bool] = True + stop: Optional[FIMCompletionStreamRequestStop] = None r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + random_seed: OptionalNullable[int] = UNSET r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" + suffix: OptionalNullable[str] = UNSET r"""Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`.""" - + @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = ["temperature", "top_p", "max_tokens", "min_tokens", "stream", "stop", "random_seed", "suffix"] + optional_fields = [ + "temperature", + "top_p", + "max_tokens", + "min_tokens", + "stream", + "stop", + "random_seed", + "suffix", + ] nullable_fields = ["model", "max_tokens", "min_tokens", "random_seed", "suffix"] null_default_fields = [] @@ -77,9 +101,13 @@ def serialize_model(self, handler): for n, f in self.model_fields.items(): k = f.alias or n val = serialized.get(k) + serialized.pop(k, None) optional_nullable = k in optional_fields and k in nullable_fields - is_set = (self.__pydantic_fields_set__.intersection({n}) or k in null_default_fields) # pylint: disable=no-member + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member if val is not None and val != UNSET_SENTINEL: m[k] = val @@ -89,4 +117,3 @@ def serialize_model(self, handler): m[k] = val return m - diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/function.py b/packages/mistralai_gcp/src/mistralai_gcp/models/function.py index 235eb34c..533c3dea 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/function.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/function.py @@ -10,10 +10,11 @@ class FunctionTypedDict(TypedDict): name: str parameters: Dict[str, Any] description: NotRequired[str] - + class Function(BaseModel): name: str + parameters: Dict[str, Any] + description: Optional[str] = "" - diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/functioncall.py b/packages/mistralai_gcp/src/mistralai_gcp/models/functioncall.py index a036ad75..d8daaef9 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/functioncall.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/functioncall.py @@ -14,9 +14,9 @@ class FunctionCallTypedDict(TypedDict): name: str arguments: ArgumentsTypedDict - + class FunctionCall(BaseModel): name: str + arguments: Arguments - diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/functionname.py b/packages/mistralai_gcp/src/mistralai_gcp/models/functionname.py new file mode 100644 index 00000000..47af74a9 --- /dev/null +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/functionname.py @@ -0,0 +1,17 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai_gcp.types import BaseModel +from typing import TypedDict + + +class FunctionNameTypedDict(TypedDict): + r"""this restriction of `Function` is used to select a specific function to call""" + + name: str + + +class FunctionName(BaseModel): + r"""this restriction of `Function` is used to select a specific function to call""" + + name: str diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/httpvalidationerror.py b/packages/mistralai_gcp/src/mistralai_gcp/models/httpvalidationerror.py index 0347dc16..68b1f780 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/httpvalidationerror.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/httpvalidationerror.py @@ -6,13 +6,14 @@ from mistralai_gcp.types import BaseModel from typing import List, Optional + class HTTPValidationErrorData(BaseModel): detail: Optional[List[ValidationError]] = None - class HTTPValidationError(Exception): r"""Validation Error""" + data: HTTPValidationErrorData def __init__(self, data: HTTPValidationErrorData): @@ -20,4 +21,3 @@ def __init__(self, data: HTTPValidationErrorData): def __str__(self) -> str: return utils.marshal_json(self.data, HTTPValidationErrorData) - diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/responseformat.py b/packages/mistralai_gcp/src/mistralai_gcp/models/responseformat.py index 5c3e9b7e..0398e9b2 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/responseformat.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/responseformat.py @@ -1,18 +1,17 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from __future__ import annotations +from .responseformats import ResponseFormats from mistralai_gcp.types import BaseModel -from typing import Literal, Optional, TypedDict +from typing import Optional, TypedDict from typing_extensions import NotRequired -ResponseFormats = Literal["text", "json_object"] -r"""An object specifying the format that the model must output. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message.""" - class ResponseFormatTypedDict(TypedDict): type: NotRequired[ResponseFormats] - + r"""An object specifying the format that the model must output. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message.""" + class ResponseFormat(BaseModel): - type: Optional[ResponseFormats] = "text" - + type: Optional[ResponseFormats] = None + r"""An object specifying the format that the model must output. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message.""" diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/responseformats.py b/packages/mistralai_gcp/src/mistralai_gcp/models/responseformats.py new file mode 100644 index 00000000..2c06b812 --- /dev/null +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/responseformats.py @@ -0,0 +1,8 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from typing import Literal + + +ResponseFormats = Literal["text", "json_object"] +r"""An object specifying the format that the model must output. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message.""" diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/security.py b/packages/mistralai_gcp/src/mistralai_gcp/models/security.py index cd4d8f3e..c9c0e0fc 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/security.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/security.py @@ -9,8 +9,17 @@ class SecurityTypedDict(TypedDict): api_key: str - + class Security(BaseModel): - api_key: Annotated[str, FieldMetadata(security=SecurityMetadata(scheme=True, scheme_type="http", sub_type="bearer", field_name="Authorization"))] - + api_key: Annotated[ + str, + FieldMetadata( + security=SecurityMetadata( + scheme=True, + scheme_type="http", + sub_type="bearer", + field_name="Authorization", + ) + ), + ] diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/systemmessage.py b/packages/mistralai_gcp/src/mistralai_gcp/models/systemmessage.py index 0209c5bb..872b9e32 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/systemmessage.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/systemmessage.py @@ -1,26 +1,27 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from __future__ import annotations -from .contentchunk import ContentChunk, ContentChunkTypedDict +from .textchunk import TextChunk, TextChunkTypedDict from mistralai_gcp.types import BaseModel from typing import List, Literal, Optional, TypedDict, Union from typing_extensions import NotRequired -ContentTypedDict = Union[str, List[ContentChunkTypedDict]] +ContentTypedDict = Union[str, List[TextChunkTypedDict]] -Content = Union[str, List[ContentChunk]] +Content = Union[str, List[TextChunk]] Role = Literal["system"] + class SystemMessageTypedDict(TypedDict): content: ContentTypedDict role: NotRequired[Role] - + class SystemMessage(BaseModel): content: Content + role: Optional[Role] = "system" - diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/textchunk.py b/packages/mistralai_gcp/src/mistralai_gcp/models/textchunk.py index ecf27413..5c3774c1 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/textchunk.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/textchunk.py @@ -3,15 +3,20 @@ from __future__ import annotations from mistralai_gcp.types import BaseModel import pydantic -from typing import Final, Optional, TypedDict +from typing import Final, Literal, Optional, TypedDict from typing_extensions import Annotated +Type = Literal["text"] + + class TextChunkTypedDict(TypedDict): text: str - + class TextChunk(BaseModel): text: str - TYPE: Annotated[Final[Optional[str]], pydantic.Field(alias="type")] = "text" # type: ignore - + + # fmt: off + TYPE: Annotated[Final[Optional[Type]], pydantic.Field(alias="type")] = "text" # type: ignore + # fmt: on diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/tool.py b/packages/mistralai_gcp/src/mistralai_gcp/models/tool.py index 2e860d9f..24e1a9ff 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/tool.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/tool.py @@ -2,21 +2,22 @@ from __future__ import annotations from .function import Function, FunctionTypedDict -from mistralai_gcp.types import BaseModel, UnrecognizedStr +from .tooltypes import ToolTypes +from mistralai_gcp.types import BaseModel from mistralai_gcp.utils import validate_open_enum from pydantic.functional_validators import PlainValidator -from typing import Literal, Optional, TypedDict, Union +from typing import Optional, TypedDict from typing_extensions import Annotated, NotRequired -ToolToolTypes = Union[Literal["function"], UnrecognizedStr] - class ToolTypedDict(TypedDict): function: FunctionTypedDict - type: NotRequired[ToolToolTypes] - + type: NotRequired[ToolTypes] + class Tool(BaseModel): function: Function - type: Annotated[Optional[ToolToolTypes], PlainValidator(validate_open_enum(False))] = "function" - + + type: Annotated[Optional[ToolTypes], PlainValidator(validate_open_enum(False))] = ( + None + ) diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/toolcall.py b/packages/mistralai_gcp/src/mistralai_gcp/models/toolcall.py index 7f22889b..6374f2ca 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/toolcall.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/toolcall.py @@ -2,23 +2,25 @@ from __future__ import annotations from .functioncall import FunctionCall, FunctionCallTypedDict -from mistralai_gcp.types import BaseModel, UnrecognizedStr +from .tooltypes import ToolTypes +from mistralai_gcp.types import BaseModel from mistralai_gcp.utils import validate_open_enum from pydantic.functional_validators import PlainValidator -from typing import Literal, Optional, TypedDict, Union +from typing import Optional, TypedDict from typing_extensions import Annotated, NotRequired -ToolTypes = Union[Literal["function"], UnrecognizedStr] - class ToolCallTypedDict(TypedDict): function: FunctionCallTypedDict id: NotRequired[str] type: NotRequired[ToolTypes] - + class ToolCall(BaseModel): function: FunctionCall + id: Optional[str] = "null" - type: Annotated[Optional[ToolTypes], PlainValidator(validate_open_enum(False))] = "function" - + + type: Annotated[Optional[ToolTypes], PlainValidator(validate_open_enum(False))] = ( + None + ) diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/toolchoice.py b/packages/mistralai_gcp/src/mistralai_gcp/models/toolchoice.py new file mode 100644 index 00000000..bd6dbe7a --- /dev/null +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/toolchoice.py @@ -0,0 +1,29 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .functionname import FunctionName, FunctionNameTypedDict +from .tooltypes import ToolTypes +from mistralai_gcp.types import BaseModel +from mistralai_gcp.utils import validate_open_enum +from pydantic.functional_validators import PlainValidator +from typing import Optional, TypedDict +from typing_extensions import Annotated, NotRequired + + +class ToolChoiceTypedDict(TypedDict): + r"""ToolChoice is either a ToolChoiceEnum or a ToolChoice""" + + function: FunctionNameTypedDict + r"""this restriction of `Function` is used to select a specific function to call""" + type: NotRequired[ToolTypes] + + +class ToolChoice(BaseModel): + r"""ToolChoice is either a ToolChoiceEnum or a ToolChoice""" + + function: FunctionName + r"""this restriction of `Function` is used to select a specific function to call""" + + type: Annotated[Optional[ToolTypes], PlainValidator(validate_open_enum(False))] = ( + None + ) diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/toolchoiceenum.py b/packages/mistralai_gcp/src/mistralai_gcp/models/toolchoiceenum.py new file mode 100644 index 00000000..8e6a6ad8 --- /dev/null +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/toolchoiceenum.py @@ -0,0 +1,7 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from typing import Literal + + +ToolChoiceEnum = Literal["auto", "none", "any", "required"] diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/toolmessage.py b/packages/mistralai_gcp/src/mistralai_gcp/models/toolmessage.py index e36f8033..caff0ad7 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/toolmessage.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/toolmessage.py @@ -1,7 +1,13 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from __future__ import annotations -from mistralai_gcp.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +from mistralai_gcp.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) from pydantic import model_serializer from typing import Literal, Optional, TypedDict from typing_extensions import NotRequired @@ -9,19 +15,23 @@ ToolMessageRole = Literal["tool"] + class ToolMessageTypedDict(TypedDict): content: str tool_call_id: NotRequired[Nullable[str]] name: NotRequired[Nullable[str]] role: NotRequired[ToolMessageRole] - + class ToolMessage(BaseModel): content: str + tool_call_id: OptionalNullable[str] = UNSET + name: OptionalNullable[str] = UNSET + role: Optional[ToolMessageRole] = "tool" - + @model_serializer(mode="wrap") def serialize_model(self, handler): optional_fields = ["tool_call_id", "name", "role"] @@ -35,9 +45,13 @@ def serialize_model(self, handler): for n, f in self.model_fields.items(): k = f.alias or n val = serialized.get(k) + serialized.pop(k, None) optional_nullable = k in optional_fields and k in nullable_fields - is_set = (self.__pydantic_fields_set__.intersection({n}) or k in null_default_fields) # pylint: disable=no-member + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member if val is not None and val != UNSET_SENTINEL: m[k] = val @@ -47,4 +61,3 @@ def serialize_model(self, handler): m[k] = val return m - diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/tooltypes.py b/packages/mistralai_gcp/src/mistralai_gcp/models/tooltypes.py new file mode 100644 index 00000000..878444c6 --- /dev/null +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/tooltypes.py @@ -0,0 +1,8 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai_gcp.types import UnrecognizedStr +from typing import Literal, Union + + +ToolTypes = Union[Literal["function"], UnrecognizedStr] diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/usageinfo.py b/packages/mistralai_gcp/src/mistralai_gcp/models/usageinfo.py index 43877c9e..d63486bd 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/usageinfo.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/usageinfo.py @@ -9,10 +9,11 @@ class UsageInfoTypedDict(TypedDict): prompt_tokens: int completion_tokens: int total_tokens: int - + class UsageInfo(BaseModel): prompt_tokens: int + completion_tokens: int + total_tokens: int - diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/usermessage.py b/packages/mistralai_gcp/src/mistralai_gcp/models/usermessage.py index 98649bf1..ccc6efb1 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/usermessage.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/usermessage.py @@ -1,26 +1,27 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from __future__ import annotations -from .textchunk import TextChunk, TextChunkTypedDict +from .contentchunk import ContentChunk, ContentChunkTypedDict from mistralai_gcp.types import BaseModel from typing import List, Literal, Optional, TypedDict, Union from typing_extensions import NotRequired -UserMessageContentTypedDict = Union[str, List[TextChunkTypedDict]] +UserMessageContentTypedDict = Union[str, List[ContentChunkTypedDict]] -UserMessageContent = Union[str, List[TextChunk]] +UserMessageContent = Union[str, List[ContentChunk]] UserMessageRole = Literal["user"] + class UserMessageTypedDict(TypedDict): content: UserMessageContentTypedDict role: NotRequired[UserMessageRole] - + class UserMessage(BaseModel): content: UserMessageContent + role: Optional[UserMessageRole] = "user" - diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/validationerror.py b/packages/mistralai_gcp/src/mistralai_gcp/models/validationerror.py index 23008f45..23e95956 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/validationerror.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/validationerror.py @@ -15,10 +15,11 @@ class ValidationErrorTypedDict(TypedDict): loc: List[LocTypedDict] msg: str type: str - + class ValidationError(BaseModel): loc: List[Loc] + msg: str + type: str - diff --git a/packages/mistralai_gcp/src/mistralai_gcp/sdk.py b/packages/mistralai_gcp/src/mistralai_gcp/sdk.py index 262f60e8..bb4c1dea 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/sdk.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/sdk.py @@ -58,19 +58,18 @@ def __init__( "credentials must be an instance of google.auth.credentials.Credentials" ) - project_id = project_id or loaded_project_id + project_id = project_id or loaded_project_id if project_id is None: raise models.SDKError("project_id must be provided") def auth_token() -> str: if access_token: return access_token - else: - credentials.refresh(google.auth.transport.requests.Request()) - token = credentials.token - if not token: - raise models.SDKError("Failed to get token from credentials") - return token + credentials.refresh(google.auth.transport.requests.Request()) + token = credentials.token + if not token: + raise models.SDKError("Failed to get token from credentials") + return token if client is None: client = httpx.Client() diff --git a/packages/mistralai_gcp/src/mistralai_gcp/sdkconfiguration.py b/packages/mistralai_gcp/src/mistralai_gcp/sdkconfiguration.py index 54b73aa6..408d8c3e 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/sdkconfiguration.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/sdkconfiguration.py @@ -1,6 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - from ._hooks import SDKHooks from .httpclient import AsyncHttpClient, HttpClient from .utils import Logger, RetryConfig, remove_suffix @@ -14,7 +13,7 @@ SERVER_PROD = "prod" r"""Production server""" SERVERS = { - SERVER_PROD: "https://api.mistral.ai", + SERVER_PROD: "https://api.mistral.ai", } """Contains the list of servers available to the SDK""" @@ -24,14 +23,14 @@ class SDKConfiguration: client: HttpClient async_client: AsyncHttpClient debug_logger: Logger - security: Optional[Union[models.Security,Callable[[], models.Security]]] = None + security: Optional[Union[models.Security, Callable[[], models.Security]]] = None server_url: Optional[str] = "" server: Optional[str] = "" language: str = "python" openapi_doc_version: str = "0.0.2" - sdk_version: str = "1.0.1" - gen_version: str = "2.399.0" - user_agent: str = "speakeasy-sdk/python 1.0.1 2.399.0 0.0.2 mistralai-gcp" + sdk_version: str = "1.2.2" + gen_version: str = "2.415.6" + user_agent: str = "speakeasy-sdk/python 1.2.2 2.415.6 0.0.2 mistralai-gcp" retry_config: OptionalNullable[RetryConfig] = Field(default_factory=lambda: UNSET) timeout_ms: Optional[int] = None @@ -45,10 +44,9 @@ def get_server_details(self) -> Tuple[str, Dict[str, str]]: self.server = SERVER_PROD if self.server not in SERVERS: - raise ValueError(f"Invalid server \"{self.server}\"") + raise ValueError(f'Invalid server "{self.server}"') return SERVERS[self.server], {} - def get_hooks(self) -> SDKHooks: return self._hooks diff --git a/packages/mistralai_gcp/src/mistralai_gcp/utils/__init__.py b/packages/mistralai_gcp/src/mistralai_gcp/utils/__init__.py index 95aa1b60..6c26aeb9 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/utils/__init__.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/utils/__init__.py @@ -33,8 +33,13 @@ validate_open_enum, ) from .url import generate_url, template_url, remove_suffix -from .values import get_global_from_env, match_content_type, match_status_codes, match_response -from .logger import Logger, get_body_content, NoOpLogger +from .values import ( + get_global_from_env, + match_content_type, + match_status_codes, + match_response, +) +from .logger import Logger, get_body_content, get_default_logger __all__ = [ "BackoffStrategy", @@ -43,6 +48,7 @@ "FormMetadata", "generate_url", "get_body_content", + "get_default_logger", "get_discriminator", "get_global_from_env", "get_headers", @@ -57,7 +63,6 @@ "match_status_codes", "match_response", "MultipartFormMetadata", - "NoOpLogger", "OpenEnumMeta", "PathParamMetadata", "QueryParamMetadata", diff --git a/packages/mistralai_gcp/src/mistralai_gcp/utils/forms.py b/packages/mistralai_gcp/src/mistralai_gcp/utils/forms.py index 07f9b235..9f5a731e 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/utils/forms.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/utils/forms.py @@ -17,7 +17,7 @@ MultipartFormMetadata, find_field_metadata, ) -from .values import _val_to_string +from .values import _is_set, _val_to_string def _populate_form( @@ -27,7 +27,7 @@ def _populate_form( delimiter: str, form: Dict[str, List[str]], ): - if obj is None: + if not _is_set(obj): return form if isinstance(obj, BaseModel): @@ -41,7 +41,7 @@ def _populate_form( continue val = getattr(obj, name) - if val is None: + if not _is_set(val): continue if explode: @@ -54,7 +54,7 @@ def _populate_form( elif isinstance(obj, Dict): items = [] for key, value in obj.items(): - if value is None: + if not _is_set(value): continue if explode: @@ -68,7 +68,7 @@ def _populate_form( items = [] for value in obj: - if value is None: + if not _is_set(value): continue if explode: @@ -102,7 +102,7 @@ def serialize_multipart_form( field = request_fields[name] val = getattr(request, name) - if val is None: + if not _is_set(val): continue field_metadata = find_field_metadata(field, MultipartFormMetadata) @@ -156,7 +156,7 @@ def serialize_multipart_form( values = [] for value in val: - if value is None: + if not _is_set(value): continue values.append(_val_to_string(value)) @@ -176,7 +176,7 @@ def serialize_form_data(data: Any) -> Dict[str, Any]: field = data_fields[name] val = getattr(data, name) - if val is None: + if not _is_set(val): continue metadata = find_field_metadata(field, FormMetadata) @@ -200,7 +200,8 @@ def serialize_form_data(data: Any) -> Dict[str, Any]: raise ValueError(f"Invalid form style for field {name}") elif isinstance(data, Dict): for key, value in data.items(): - form[key] = [_val_to_string(value)] + if _is_set(value): + form[key] = [_val_to_string(value)] else: raise TypeError(f"Invalid request body type {type(data)} for form data") diff --git a/packages/mistralai_gcp/src/mistralai_gcp/utils/headers.py b/packages/mistralai_gcp/src/mistralai_gcp/utils/headers.py index e14a0f4a..37864cbb 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/utils/headers.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/utils/headers.py @@ -15,16 +15,16 @@ find_field_metadata, ) -from .values import _populate_from_globals, _val_to_string +from .values import _is_set, _populate_from_globals, _val_to_string def get_headers(headers_params: Any, gbls: Optional[Any] = None) -> Dict[str, str]: headers: Dict[str, str] = {} globals_already_populated = [] - if headers_params is not None: + if _is_set(headers_params): globals_already_populated = _populate_headers(headers_params, gbls, headers, []) - if gbls is not None: + if _is_set(gbls): _populate_headers(gbls, None, headers, globals_already_populated) return headers @@ -67,7 +67,7 @@ def _populate_headers( def _serialize_header(explode: bool, obj: Any) -> str: - if obj is None: + if not _is_set(obj): return "" if isinstance(obj, BaseModel): @@ -83,7 +83,7 @@ def _serialize_header(explode: bool, obj: Any) -> str: f_name = obj_field.alias if obj_field.alias is not None else name val = getattr(obj, name) - if val is None: + if not _is_set(val): continue if explode: @@ -98,7 +98,7 @@ def _serialize_header(explode: bool, obj: Any) -> str: items = [] for key, value in obj.items(): - if value is None: + if not _is_set(value): continue if explode: @@ -113,14 +113,14 @@ def _serialize_header(explode: bool, obj: Any) -> str: items = [] for value in obj: - if value is None: + if not _is_set(value): continue items.append(_val_to_string(value)) if len(items) > 0: return ",".join(items) - else: + elif _is_set(obj): return f"{_val_to_string(obj)}" return "" diff --git a/packages/mistralai_gcp/src/mistralai_gcp/utils/logger.py b/packages/mistralai_gcp/src/mistralai_gcp/utils/logger.py index 7e4bbeac..b661aff6 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/utils/logger.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/utils/logger.py @@ -3,14 +3,20 @@ import httpx from typing import Any, Protocol + class Logger(Protocol): def debug(self, msg: str, *args: Any, **kwargs: Any) -> None: pass + class NoOpLogger: def debug(self, msg: str, *args: Any, **kwargs: Any) -> None: pass + def get_body_content(req: httpx.Request) -> str: return "" if not hasattr(req, "_content") else str(req.content) + +def get_default_logger() -> Logger: + return NoOpLogger() diff --git a/packages/mistralai_gcp/src/mistralai_gcp/utils/queryparams.py b/packages/mistralai_gcp/src/mistralai_gcp/utils/queryparams.py index 1c8c5834..37a6e7f9 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/utils/queryparams.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/utils/queryparams.py @@ -15,7 +15,12 @@ QueryParamMetadata, find_field_metadata, ) -from .values import _get_serialized_params, _populate_from_globals, _val_to_string +from .values import ( + _get_serialized_params, + _is_set, + _populate_from_globals, + _val_to_string, +) from .forms import _populate_form @@ -26,7 +31,7 @@ def get_query_params( params: Dict[str, List[str]] = {} globals_already_populated = _populate_query_params(query_params, gbls, params, []) - if gbls is not None: + if _is_set(gbls): _populate_query_params(gbls, None, params, globals_already_populated) return params @@ -55,7 +60,7 @@ def _populate_query_params( if not metadata: continue - value = getattr(query_params, name) if query_params is not None else None + value = getattr(query_params, name) if _is_set(query_params) else None value, global_found = _populate_from_globals( name, value, QueryParamMetadata, gbls @@ -99,7 +104,7 @@ def _populate_deep_object_query_params( obj: Any, params: Dict[str, List[str]], ): - if obj is None: + if not _is_set(obj): return if isinstance(obj, BaseModel): @@ -113,10 +118,7 @@ def _populate_deep_object_query_params_basemodel( obj: Any, params: Dict[str, List[str]], ): - if obj is None: - return - - if not isinstance(obj, BaseModel): + if not _is_set(obj) or not isinstance(obj, BaseModel): return obj_fields: Dict[str, FieldInfo] = obj.__class__.model_fields @@ -128,11 +130,11 @@ def _populate_deep_object_query_params_basemodel( params_key = f"{prior_params_key}[{f_name}]" obj_param_metadata = find_field_metadata(obj_field, QueryParamMetadata) - if obj_param_metadata is None: + if not _is_set(obj_param_metadata): continue obj_val = getattr(obj, name) - if obj_val is None: + if not _is_set(obj_val): continue if isinstance(obj_val, BaseModel): @@ -150,11 +152,11 @@ def _populate_deep_object_query_params_dict( value: Dict, params: Dict[str, List[str]], ): - if value is None: + if not _is_set(value): return for key, val in value.items(): - if val is None: + if not _is_set(val): continue params_key = f"{prior_params_key}[{key}]" @@ -174,11 +176,11 @@ def _populate_deep_object_query_params_list( value: List, params: Dict[str, List[str]], ): - if value is None: + if not _is_set(value): return for val in value: - if val is None: + if not _is_set(val): continue if params.get(params_key) is None: diff --git a/packages/mistralai_gcp/src/mistralai_gcp/utils/retries.py b/packages/mistralai_gcp/src/mistralai_gcp/utils/retries.py index a06f9279..4d608671 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/utils/retries.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/utils/retries.py @@ -1,5 +1,6 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +import asyncio import random import time from typing import List @@ -212,5 +213,5 @@ async def retry_with_backoff_async( raise sleep = (initial_interval / 1000) * exponent**retries + random.uniform(0, 1) sleep = min(sleep, max_interval / 1000) - time.sleep(sleep) + await asyncio.sleep(sleep) retries += 1 diff --git a/packages/mistralai_gcp/src/mistralai_gcp/utils/security.py b/packages/mistralai_gcp/src/mistralai_gcp/utils/security.py index aab4cb65..295a3f40 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/utils/security.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/utils/security.py @@ -16,7 +16,6 @@ ) - def get_security(security: Any) -> Tuple[Dict[str, str], Dict[str, List[str]]]: headers: Dict[str, str] = {} query_params: Dict[str, List[str]] = {} @@ -42,8 +41,10 @@ def get_security(security: Any) -> Tuple[Dict[str, str], Dict[str, List[str]]]: _parse_security_option(headers, query_params, value) return headers, query_params if metadata.scheme: - # Special case for basic auth which could be a flattened model - if metadata.sub_type == "basic" and not isinstance(value, BaseModel): + # Special case for basic auth or custom auth which could be a flattened model + if metadata.sub_type in ["basic", "custom"] and not isinstance( + value, BaseModel + ): _parse_security_scheme(headers, query_params, metadata, name, security) else: _parse_security_scheme(headers, query_params, metadata, name, value) @@ -80,9 +81,12 @@ def _parse_security_scheme( sub_type = scheme_metadata.sub_type if isinstance(scheme, BaseModel): - if scheme_type == "http" and sub_type == "basic": - _parse_basic_auth_scheme(headers, scheme) - return + if scheme_type == "http": + if sub_type == "basic": + _parse_basic_auth_scheme(headers, scheme) + return + if sub_type == "custom": + return scheme_fields: Dict[str, FieldInfo] = scheme.__class__.model_fields for name in scheme_fields: @@ -131,6 +135,8 @@ def _parse_security_scheme_value( elif scheme_type == "http": if sub_type == "bearer": headers[header_name] = _apply_bearer(value) + elif sub_type == "custom": + return else: raise ValueError("sub type {sub_type} not supported") else: diff --git a/packages/mistralai_gcp/src/mistralai_gcp/utils/serializers.py b/packages/mistralai_gcp/src/mistralai_gcp/utils/serializers.py index a98998a3..85d57f43 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/utils/serializers.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/utils/serializers.py @@ -9,13 +9,15 @@ from pydantic_core import from_json from typing_inspect import is_optional_type -from ..types.basemodel import BaseModel, Nullable, OptionalNullable +from ..types.basemodel import BaseModel, Nullable, OptionalNullable, Unset def serialize_decimal(as_str: bool): def serialize(d): if is_optional_type(type(d)) and d is None: return None + if isinstance(d, Unset): + return d if not isinstance(d, Decimal): raise ValueError("Expected Decimal object") @@ -29,7 +31,7 @@ def validate_decimal(d): if d is None: return None - if isinstance(d, Decimal): + if isinstance(d, (Decimal, Unset)): return d if not isinstance(d, (str, int, float)): @@ -42,6 +44,8 @@ def serialize_float(as_str: bool): def serialize(f): if is_optional_type(type(f)) and f is None: return None + if isinstance(f, Unset): + return f if not isinstance(f, float): raise ValueError("Expected float") @@ -55,7 +59,7 @@ def validate_float(f): if f is None: return None - if isinstance(f, float): + if isinstance(f, (float, Unset)): return f if not isinstance(f, str): @@ -65,14 +69,16 @@ def validate_float(f): def serialize_int(as_str: bool): - def serialize(b): - if is_optional_type(type(b)) and b is None: + def serialize(i): + if is_optional_type(type(i)) and i is None: return None + if isinstance(i, Unset): + return i - if not isinstance(b, int): + if not isinstance(i, int): raise ValueError("Expected int") - return str(b) if as_str else b + return str(i) if as_str else i return serialize @@ -81,7 +87,7 @@ def validate_int(b): if b is None: return None - if isinstance(b, int): + if isinstance(b, (int, Unset)): return b if not isinstance(b, str): @@ -95,6 +101,9 @@ def validate(e): if e is None: return None + if isinstance(e, Unset): + return e + if is_int: if not isinstance(e, int): raise ValueError("Expected int") diff --git a/packages/mistralai_gcp/src/mistralai_gcp/utils/url.py b/packages/mistralai_gcp/src/mistralai_gcp/utils/url.py index b201bfa4..c78ccbae 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/utils/url.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/utils/url.py @@ -18,7 +18,12 @@ PathParamMetadata, find_field_metadata, ) -from .values import _get_serialized_params, _populate_from_globals, _val_to_string +from .values import ( + _get_serialized_params, + _is_set, + _populate_from_globals, + _val_to_string, +) def generate_url( @@ -32,7 +37,7 @@ def generate_url( globals_already_populated = _populate_path_params( path_params, gbls, path_param_values, [] ) - if gbls is not None: + if _is_set(gbls): _populate_path_params(gbls, None, path_param_values, globals_already_populated) for key, value in path_param_values.items(): @@ -64,14 +69,14 @@ def _populate_path_params( if param_metadata is None: continue - param = getattr(path_params, name) if path_params is not None else None + param = getattr(path_params, name) if _is_set(path_params) else None param, global_found = _populate_from_globals( name, param, PathParamMetadata, gbls ) if global_found: globals_already_populated.append(name) - if param is None: + if not _is_set(param): continue f_name = field.alias if field.alias is not None else name @@ -87,13 +92,13 @@ def _populate_path_params( if param_metadata.style == "simple": if isinstance(param, List): for pp_val in param: - if pp_val is None: + if not _is_set(pp_val): continue pp_vals.append(_val_to_string(pp_val)) path_param_values[f_name] = ",".join(pp_vals) elif isinstance(param, Dict): for pp_key in param: - if param[pp_key] is None: + if not _is_set(param[pp_key]): continue if param_metadata.explode: pp_vals.append(f"{pp_key}={_val_to_string(param[pp_key])}") @@ -116,7 +121,7 @@ def _populate_path_params( ) param_field_val = getattr(param, name) - if param_field_val is None: + if not _is_set(param_field_val): continue if param_metadata.explode: pp_vals.append( @@ -127,7 +132,7 @@ def _populate_path_params( f"{param_name},{_val_to_string(param_field_val)}" ) path_param_values[f_name] = ",".join(pp_vals) - else: + elif _is_set(param): path_param_values[f_name] = _val_to_string(param) return globals_already_populated diff --git a/packages/mistralai_gcp/src/mistralai_gcp/utils/values.py b/packages/mistralai_gcp/src/mistralai_gcp/utils/values.py index 24ccae3d..2b4b6832 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/utils/values.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/utils/values.py @@ -10,6 +10,8 @@ from pydantic import BaseModel from pydantic.fields import FieldInfo +from ..types.basemodel import Unset + from .serializers import marshal_json from .metadata import ParamMetadata, find_field_metadata @@ -126,3 +128,7 @@ def _get_serialized_params( params[field_name] = marshal_json(obj, typ) return params + + +def _is_set(value: Any) -> bool: + return value is not None and not isinstance(value, Unset) diff --git a/pyproject.toml b/pyproject.toml index 35bcf5e1..6c630a9b 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "mistralai" -version = "1.0.3" +version = "1.1.1" description = "Python Client SDK for the Mistral AI API." authors = ["Mistral"] readme = "README.md" diff --git a/src/mistralai/_hooks/sdkhooks.py b/src/mistralai/_hooks/sdkhooks.py index 24b0d08c..1f9a9316 100644 --- a/src/mistralai/_hooks/sdkhooks.py +++ b/src/mistralai/_hooks/sdkhooks.py @@ -1,11 +1,21 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" import httpx -from .types import SDKInitHook, BeforeRequestContext, BeforeRequestHook, AfterSuccessContext, AfterSuccessHook, AfterErrorContext, AfterErrorHook, Hooks +from .types import ( + SDKInitHook, + BeforeRequestContext, + BeforeRequestHook, + AfterSuccessContext, + AfterSuccessHook, + AfterErrorContext, + AfterErrorHook, + Hooks, +) from .registration import init_hooks from typing import List, Optional, Tuple from mistralai.httpclient import HttpClient + class SDKHooks(Hooks): def __init__(self) -> None: self.sdk_init_hooks: List[SDKInitHook] = [] @@ -31,7 +41,9 @@ def sdk_init(self, base_url: str, client: HttpClient) -> Tuple[str, HttpClient]: base_url, client = hook.sdk_init(base_url, client) return base_url, client - def before_request(self, hook_ctx: BeforeRequestContext, request: httpx.Request) -> httpx.Request: + def before_request( + self, hook_ctx: BeforeRequestContext, request: httpx.Request + ) -> httpx.Request: for hook in self.before_request_hooks: out = hook.before_request(hook_ctx, request) if isinstance(out, Exception): @@ -40,7 +52,9 @@ def before_request(self, hook_ctx: BeforeRequestContext, request: httpx.Request) return request - def after_success(self, hook_ctx: AfterSuccessContext, response: httpx.Response) -> httpx.Response: + def after_success( + self, hook_ctx: AfterSuccessContext, response: httpx.Response + ) -> httpx.Response: for hook in self.after_success_hooks: out = hook.after_success(hook_ctx, response) if isinstance(out, Exception): @@ -48,7 +62,12 @@ def after_success(self, hook_ctx: AfterSuccessContext, response: httpx.Response) response = out return response - def after_error(self, hook_ctx: AfterErrorContext, response: Optional[httpx.Response], error: Optional[Exception]) -> Tuple[Optional[httpx.Response], Optional[Exception]]: + def after_error( + self, + hook_ctx: AfterErrorContext, + response: Optional[httpx.Response], + error: Optional[Exception], + ) -> Tuple[Optional[httpx.Response], Optional[Exception]]: for hook in self.after_error_hooks: result = hook.after_error(hook_ctx, response, error) if isinstance(result, Exception): diff --git a/src/mistralai/_hooks/types.py b/src/mistralai/_hooks/types.py index e9391f3b..fe448e94 100644 --- a/src/mistralai/_hooks/types.py +++ b/src/mistralai/_hooks/types.py @@ -1,6 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - from abc import ABC, abstractmethod import httpx from mistralai.httpclient import HttpClient @@ -12,7 +11,12 @@ class HookContext: oauth2_scopes: Optional[List[str]] = None security_source: Optional[Union[Any, Callable[[], Any]]] = None - def __init__(self, operation_id: str, oauth2_scopes: Optional[List[str]], security_source: Optional[Union[Any, Callable[[], Any]]]): + def __init__( + self, + operation_id: str, + oauth2_scopes: Optional[List[str]], + security_source: Optional[Union[Any, Callable[[], Any]]], + ): self.operation_id = operation_id self.oauth2_scopes = oauth2_scopes self.security_source = security_source @@ -20,18 +24,23 @@ def __init__(self, operation_id: str, oauth2_scopes: Optional[List[str]], securi class BeforeRequestContext(HookContext): def __init__(self, hook_ctx: HookContext): - super().__init__(hook_ctx.operation_id, hook_ctx.oauth2_scopes, hook_ctx.security_source) + super().__init__( + hook_ctx.operation_id, hook_ctx.oauth2_scopes, hook_ctx.security_source + ) class AfterSuccessContext(HookContext): def __init__(self, hook_ctx: HookContext): - super().__init__(hook_ctx.operation_id, hook_ctx.oauth2_scopes, hook_ctx.security_source) - + super().__init__( + hook_ctx.operation_id, hook_ctx.oauth2_scopes, hook_ctx.security_source + ) class AfterErrorContext(HookContext): def __init__(self, hook_ctx: HookContext): - super().__init__(hook_ctx.operation_id, hook_ctx.oauth2_scopes, hook_ctx.security_source) + super().__init__( + hook_ctx.operation_id, hook_ctx.oauth2_scopes, hook_ctx.security_source + ) class SDKInitHook(ABC): @@ -42,19 +51,28 @@ def sdk_init(self, base_url: str, client: HttpClient) -> Tuple[str, HttpClient]: class BeforeRequestHook(ABC): @abstractmethod - def before_request(self, hook_ctx: BeforeRequestContext, request: httpx.Request) -> Union[httpx.Request, Exception]: + def before_request( + self, hook_ctx: BeforeRequestContext, request: httpx.Request + ) -> Union[httpx.Request, Exception]: pass class AfterSuccessHook(ABC): @abstractmethod - def after_success(self, hook_ctx: AfterSuccessContext, response: httpx.Response) -> Union[httpx.Response, Exception]: + def after_success( + self, hook_ctx: AfterSuccessContext, response: httpx.Response + ) -> Union[httpx.Response, Exception]: pass class AfterErrorHook(ABC): @abstractmethod - def after_error(self, hook_ctx: AfterErrorContext, response: Optional[httpx.Response], error: Optional[Exception]) -> Union[Tuple[Optional[httpx.Response], Optional[Exception]], Exception]: + def after_error( + self, + hook_ctx: AfterErrorContext, + response: Optional[httpx.Response], + error: Optional[Exception], + ) -> Union[Tuple[Optional[httpx.Response], Optional[Exception]], Exception]: pass diff --git a/src/mistralai/agents.py b/src/mistralai/agents.py index aa291254..05d17753 100644 --- a/src/mistralai/agents.py +++ b/src/mistralai/agents.py @@ -7,22 +7,40 @@ from mistralai.utils import eventstreaming, get_security_from_env from typing import Any, AsyncGenerator, Generator, List, Optional, Union + class Agents(BaseSDK): r"""Agents API.""" - - + def complete( - self, *, - messages: Union[List[models.AgentsCompletionRequestMessages], List[models.AgentsCompletionRequestMessagesTypedDict]], + self, + *, + messages: Union[ + List[models.AgentsCompletionRequestMessages], + List[models.AgentsCompletionRequestMessagesTypedDict], + ], agent_id: str, max_tokens: OptionalNullable[int] = UNSET, min_tokens: OptionalNullable[int] = UNSET, stream: Optional[bool] = False, - stop: Optional[Union[models.AgentsCompletionRequestStop, models.AgentsCompletionRequestStopTypedDict]] = None, + stop: Optional[ + Union[ + models.AgentsCompletionRequestStop, + models.AgentsCompletionRequestStopTypedDict, + ] + ] = None, random_seed: OptionalNullable[int] = UNSET, - response_format: Optional[Union[models.ResponseFormat, models.ResponseFormatTypedDict]] = None, - tools: OptionalNullable[Union[List[models.Tool], List[models.ToolTypedDict]]] = UNSET, - tool_choice: Optional[models.AgentsCompletionRequestToolChoice] = "auto", + response_format: Optional[ + Union[models.ResponseFormat, models.ResponseFormatTypedDict] + ] = None, + tools: OptionalNullable[ + Union[List[models.Tool], List[models.ToolTypedDict]] + ] = UNSET, + tool_choice: Optional[ + Union[ + models.AgentsCompletionRequestToolChoice, + models.AgentsCompletionRequestToolChoiceTypedDict, + ] + ] = None, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -36,9 +54,9 @@ def complete( :param stream: Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. - :param response_format: - :param tools: - :param tool_choice: + :param response_format: + :param tools: + :param tool_choice: :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -47,23 +65,29 @@ def complete( url_variables = None if timeout_ms is None: timeout_ms = self.sdk_configuration.timeout_ms - + if server_url is not None: base_url = server_url - + request = models.AgentsCompletionRequest( max_tokens=max_tokens, min_tokens=min_tokens, stream=stream, stop=stop, random_seed=random_seed, - messages=utils.get_pydantic_model(messages, List[models.AgentsCompletionRequestMessages]), - response_format=utils.get_pydantic_model(response_format, Optional[models.ResponseFormat]), + messages=utils.get_pydantic_model( + messages, List[models.AgentsCompletionRequestMessages] + ), + response_format=utils.get_pydantic_model( + response_format, Optional[models.ResponseFormat] + ), tools=utils.get_pydantic_model(tools, OptionalNullable[List[models.Tool]]), - tool_choice=tool_choice, + tool_choice=utils.get_pydantic_model( + tool_choice, Optional[models.AgentsCompletionRequestToolChoice] + ), agent_id=agent_id, ) - + req = self.build_request( method="POST", path="/v1/agents/completions", @@ -76,57 +100,84 @@ def complete( user_agent_header="user-agent", accept_header_value="application/json", security=self.sdk_configuration.security, - get_serialized_body=lambda: utils.serialize_request_body(request, False, False, "json", models.AgentsCompletionRequest), + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.AgentsCompletionRequest + ), timeout_ms=timeout_ms, ) - + if retries == UNSET: if self.sdk_configuration.retry_config is not UNSET: retries = self.sdk_configuration.retry_config retry_config = None if isinstance(retries, utils.RetryConfig): - retry_config = (retries, [ - "429", - "500", - "502", - "503", - "504" - ]) - + retry_config = (retries, ["429", "500", "502", "503", "504"]) + http_res = self.do_request( - hook_ctx=HookContext(operation_id="agents_completion_v1_agents_completions_post", oauth2_scopes=[], security_source=get_security_from_env(self.sdk_configuration.security, models.Security)), + hook_ctx=HookContext( + operation_id="agents_completion_v1_agents_completions_post", + oauth2_scopes=[], + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), request=req, - error_status_codes=["422","4XX","5XX"], - retry_config=retry_config + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, ) - + data: Any = None if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, Optional[models.ChatCompletionResponse]) + return utils.unmarshal_json( + http_res.text, Optional[models.ChatCompletionResponse] + ) if utils.match_response(http_res, "422", "application/json"): data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) raise models.HTTPValidationError(data=data) - if utils.match_response(http_res, ["4XX","5XX"], "*"): - raise models.SDKError("API error occurred", http_res.status_code, http_res.text, http_res) - + if utils.match_response(http_res, ["4XX", "5XX"], "*"): + raise models.SDKError( + "API error occurred", http_res.status_code, http_res.text, http_res + ) + content_type = http_res.headers.get("Content-Type") - raise models.SDKError(f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, http_res.text, http_res) + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res.text, + http_res, + ) - - async def complete_async( - self, *, - messages: Union[List[models.AgentsCompletionRequestMessages], List[models.AgentsCompletionRequestMessagesTypedDict]], + self, + *, + messages: Union[ + List[models.AgentsCompletionRequestMessages], + List[models.AgentsCompletionRequestMessagesTypedDict], + ], agent_id: str, max_tokens: OptionalNullable[int] = UNSET, min_tokens: OptionalNullable[int] = UNSET, stream: Optional[bool] = False, - stop: Optional[Union[models.AgentsCompletionRequestStop, models.AgentsCompletionRequestStopTypedDict]] = None, + stop: Optional[ + Union[ + models.AgentsCompletionRequestStop, + models.AgentsCompletionRequestStopTypedDict, + ] + ] = None, random_seed: OptionalNullable[int] = UNSET, - response_format: Optional[Union[models.ResponseFormat, models.ResponseFormatTypedDict]] = None, - tools: OptionalNullable[Union[List[models.Tool], List[models.ToolTypedDict]]] = UNSET, - tool_choice: Optional[models.AgentsCompletionRequestToolChoice] = "auto", + response_format: Optional[ + Union[models.ResponseFormat, models.ResponseFormatTypedDict] + ] = None, + tools: OptionalNullable[ + Union[List[models.Tool], List[models.ToolTypedDict]] + ] = UNSET, + tool_choice: Optional[ + Union[ + models.AgentsCompletionRequestToolChoice, + models.AgentsCompletionRequestToolChoiceTypedDict, + ] + ] = None, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -140,9 +191,9 @@ async def complete_async( :param stream: Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. - :param response_format: - :param tools: - :param tool_choice: + :param response_format: + :param tools: + :param tool_choice: :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -151,24 +202,30 @@ async def complete_async( url_variables = None if timeout_ms is None: timeout_ms = self.sdk_configuration.timeout_ms - + if server_url is not None: base_url = server_url - + request = models.AgentsCompletionRequest( max_tokens=max_tokens, min_tokens=min_tokens, stream=stream, stop=stop, random_seed=random_seed, - messages=utils.get_pydantic_model(messages, List[models.AgentsCompletionRequestMessages]), - response_format=utils.get_pydantic_model(response_format, Optional[models.ResponseFormat]), + messages=utils.get_pydantic_model( + messages, List[models.AgentsCompletionRequestMessages] + ), + response_format=utils.get_pydantic_model( + response_format, Optional[models.ResponseFormat] + ), tools=utils.get_pydantic_model(tools, OptionalNullable[List[models.Tool]]), - tool_choice=tool_choice, + tool_choice=utils.get_pydantic_model( + tool_choice, Optional[models.AgentsCompletionRequestToolChoice] + ), agent_id=agent_id, ) - - req = self.build_request( + + req = self.build_request_async( method="POST", path="/v1/agents/completions", base_url=base_url, @@ -180,57 +237,84 @@ async def complete_async( user_agent_header="user-agent", accept_header_value="application/json", security=self.sdk_configuration.security, - get_serialized_body=lambda: utils.serialize_request_body(request, False, False, "json", models.AgentsCompletionRequest), + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.AgentsCompletionRequest + ), timeout_ms=timeout_ms, ) - + if retries == UNSET: if self.sdk_configuration.retry_config is not UNSET: retries = self.sdk_configuration.retry_config retry_config = None if isinstance(retries, utils.RetryConfig): - retry_config = (retries, [ - "429", - "500", - "502", - "503", - "504" - ]) - + retry_config = (retries, ["429", "500", "502", "503", "504"]) + http_res = await self.do_request_async( - hook_ctx=HookContext(operation_id="agents_completion_v1_agents_completions_post", oauth2_scopes=[], security_source=get_security_from_env(self.sdk_configuration.security, models.Security)), + hook_ctx=HookContext( + operation_id="agents_completion_v1_agents_completions_post", + oauth2_scopes=[], + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), request=req, - error_status_codes=["422","4XX","5XX"], - retry_config=retry_config + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, ) - + data: Any = None if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, Optional[models.ChatCompletionResponse]) + return utils.unmarshal_json( + http_res.text, Optional[models.ChatCompletionResponse] + ) if utils.match_response(http_res, "422", "application/json"): data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) raise models.HTTPValidationError(data=data) - if utils.match_response(http_res, ["4XX","5XX"], "*"): - raise models.SDKError("API error occurred", http_res.status_code, http_res.text, http_res) - + if utils.match_response(http_res, ["4XX", "5XX"], "*"): + raise models.SDKError( + "API error occurred", http_res.status_code, http_res.text, http_res + ) + content_type = http_res.headers.get("Content-Type") - raise models.SDKError(f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, http_res.text, http_res) + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res.text, + http_res, + ) - - def stream( - self, *, - messages: Union[List[models.AgentsCompletionStreamRequestMessages], List[models.AgentsCompletionStreamRequestMessagesTypedDict]], + self, + *, + messages: Union[ + List[models.AgentsCompletionStreamRequestMessages], + List[models.AgentsCompletionStreamRequestMessagesTypedDict], + ], agent_id: str, max_tokens: OptionalNullable[int] = UNSET, min_tokens: OptionalNullable[int] = UNSET, stream: Optional[bool] = True, - stop: Optional[Union[models.AgentsCompletionStreamRequestStop, models.AgentsCompletionStreamRequestStopTypedDict]] = None, + stop: Optional[ + Union[ + models.AgentsCompletionStreamRequestStop, + models.AgentsCompletionStreamRequestStopTypedDict, + ] + ] = None, random_seed: OptionalNullable[int] = UNSET, - response_format: Optional[Union[models.ResponseFormat, models.ResponseFormatTypedDict]] = None, - tools: OptionalNullable[Union[List[models.Tool], List[models.ToolTypedDict]]] = UNSET, - tool_choice: Optional[models.AgentsCompletionStreamRequestToolChoice] = "auto", + response_format: Optional[ + Union[models.ResponseFormat, models.ResponseFormatTypedDict] + ] = None, + tools: OptionalNullable[ + Union[List[models.Tool], List[models.ToolTypedDict]] + ] = UNSET, + tool_choice: Optional[ + Union[ + models.AgentsCompletionStreamRequestToolChoice, + models.AgentsCompletionStreamRequestToolChoiceTypedDict, + ] + ] = None, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -243,12 +327,12 @@ def stream( :param agent_id: The ID of the agent to use for this completion. :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. :param min_tokens: The minimum number of tokens to generate in the completion. - :param stream: + :param stream: :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. - :param response_format: - :param tools: - :param tool_choice: + :param response_format: + :param tools: + :param tool_choice: :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -257,23 +341,29 @@ def stream( url_variables = None if timeout_ms is None: timeout_ms = self.sdk_configuration.timeout_ms - + if server_url is not None: base_url = server_url - + request = models.AgentsCompletionStreamRequest( max_tokens=max_tokens, min_tokens=min_tokens, stream=stream, stop=stop, random_seed=random_seed, - messages=utils.get_pydantic_model(messages, List[models.AgentsCompletionStreamRequestMessages]), - response_format=utils.get_pydantic_model(response_format, Optional[models.ResponseFormat]), + messages=utils.get_pydantic_model( + messages, List[models.AgentsCompletionStreamRequestMessages] + ), + response_format=utils.get_pydantic_model( + response_format, Optional[models.ResponseFormat] + ), tools=utils.get_pydantic_model(tools, OptionalNullable[List[models.Tool]]), - tool_choice=tool_choice, + tool_choice=utils.get_pydantic_model( + tool_choice, Optional[models.AgentsCompletionStreamRequestToolChoice] + ), agent_id=agent_id, ) - + req = self.build_request( method="POST", path="/v1/agents/completions#stream", @@ -286,58 +376,87 @@ def stream( user_agent_header="user-agent", accept_header_value="text/event-stream", security=self.sdk_configuration.security, - get_serialized_body=lambda: utils.serialize_request_body(request, False, False, "json", models.AgentsCompletionStreamRequest), + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.AgentsCompletionStreamRequest + ), timeout_ms=timeout_ms, ) - + if retries == UNSET: if self.sdk_configuration.retry_config is not UNSET: retries = self.sdk_configuration.retry_config retry_config = None if isinstance(retries, utils.RetryConfig): - retry_config = (retries, [ - "429", - "500", - "502", - "503", - "504" - ]) - + retry_config = (retries, ["429", "500", "502", "503", "504"]) + http_res = self.do_request( - hook_ctx=HookContext(operation_id="stream_agents", oauth2_scopes=[], security_source=get_security_from_env(self.sdk_configuration.security, models.Security)), + hook_ctx=HookContext( + operation_id="stream_agents", + oauth2_scopes=[], + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), request=req, - error_status_codes=["422","4XX","5XX"], + error_status_codes=["422", "4XX", "5XX"], stream=True, - retry_config=retry_config + retry_config=retry_config, ) - + data: Any = None if utils.match_response(http_res, "200", "text/event-stream"): - return eventstreaming.stream_events(http_res, lambda raw: utils.unmarshal_json(raw, models.CompletionEvent), sentinel="[DONE]") + return eventstreaming.stream_events( + http_res, + lambda raw: utils.unmarshal_json(raw, models.CompletionEvent), + sentinel="[DONE]", + ) if utils.match_response(http_res, "422", "application/json"): data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) raise models.HTTPValidationError(data=data) - if utils.match_response(http_res, ["4XX","5XX"], "*"): - raise models.SDKError("API error occurred", http_res.status_code, http_res.text, http_res) - + if utils.match_response(http_res, ["4XX", "5XX"], "*"): + raise models.SDKError( + "API error occurred", http_res.status_code, http_res.text, http_res + ) + content_type = http_res.headers.get("Content-Type") - raise models.SDKError(f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, http_res.text, http_res) + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res.text, + http_res, + ) - - async def stream_async( - self, *, - messages: Union[List[models.AgentsCompletionStreamRequestMessages], List[models.AgentsCompletionStreamRequestMessagesTypedDict]], + self, + *, + messages: Union[ + List[models.AgentsCompletionStreamRequestMessages], + List[models.AgentsCompletionStreamRequestMessagesTypedDict], + ], agent_id: str, max_tokens: OptionalNullable[int] = UNSET, min_tokens: OptionalNullable[int] = UNSET, stream: Optional[bool] = True, - stop: Optional[Union[models.AgentsCompletionStreamRequestStop, models.AgentsCompletionStreamRequestStopTypedDict]] = None, + stop: Optional[ + Union[ + models.AgentsCompletionStreamRequestStop, + models.AgentsCompletionStreamRequestStopTypedDict, + ] + ] = None, random_seed: OptionalNullable[int] = UNSET, - response_format: Optional[Union[models.ResponseFormat, models.ResponseFormatTypedDict]] = None, - tools: OptionalNullable[Union[List[models.Tool], List[models.ToolTypedDict]]] = UNSET, - tool_choice: Optional[models.AgentsCompletionStreamRequestToolChoice] = "auto", + response_format: Optional[ + Union[models.ResponseFormat, models.ResponseFormatTypedDict] + ] = None, + tools: OptionalNullable[ + Union[List[models.Tool], List[models.ToolTypedDict]] + ] = UNSET, + tool_choice: Optional[ + Union[ + models.AgentsCompletionStreamRequestToolChoice, + models.AgentsCompletionStreamRequestToolChoiceTypedDict, + ] + ] = None, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -350,12 +469,12 @@ async def stream_async( :param agent_id: The ID of the agent to use for this completion. :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. :param min_tokens: The minimum number of tokens to generate in the completion. - :param stream: + :param stream: :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. - :param response_format: - :param tools: - :param tool_choice: + :param response_format: + :param tools: + :param tool_choice: :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -364,24 +483,30 @@ async def stream_async( url_variables = None if timeout_ms is None: timeout_ms = self.sdk_configuration.timeout_ms - + if server_url is not None: base_url = server_url - + request = models.AgentsCompletionStreamRequest( max_tokens=max_tokens, min_tokens=min_tokens, stream=stream, stop=stop, random_seed=random_seed, - messages=utils.get_pydantic_model(messages, List[models.AgentsCompletionStreamRequestMessages]), - response_format=utils.get_pydantic_model(response_format, Optional[models.ResponseFormat]), + messages=utils.get_pydantic_model( + messages, List[models.AgentsCompletionStreamRequestMessages] + ), + response_format=utils.get_pydantic_model( + response_format, Optional[models.ResponseFormat] + ), tools=utils.get_pydantic_model(tools, OptionalNullable[List[models.Tool]]), - tool_choice=tool_choice, + tool_choice=utils.get_pydantic_model( + tool_choice, Optional[models.AgentsCompletionStreamRequestToolChoice] + ), agent_id=agent_id, ) - - req = self.build_request( + + req = self.build_request_async( method="POST", path="/v1/agents/completions#stream", base_url=base_url, @@ -393,42 +518,53 @@ async def stream_async( user_agent_header="user-agent", accept_header_value="text/event-stream", security=self.sdk_configuration.security, - get_serialized_body=lambda: utils.serialize_request_body(request, False, False, "json", models.AgentsCompletionStreamRequest), + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.AgentsCompletionStreamRequest + ), timeout_ms=timeout_ms, ) - + if retries == UNSET: if self.sdk_configuration.retry_config is not UNSET: retries = self.sdk_configuration.retry_config retry_config = None if isinstance(retries, utils.RetryConfig): - retry_config = (retries, [ - "429", - "500", - "502", - "503", - "504" - ]) - + retry_config = (retries, ["429", "500", "502", "503", "504"]) + http_res = await self.do_request_async( - hook_ctx=HookContext(operation_id="stream_agents", oauth2_scopes=[], security_source=get_security_from_env(self.sdk_configuration.security, models.Security)), + hook_ctx=HookContext( + operation_id="stream_agents", + oauth2_scopes=[], + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), request=req, - error_status_codes=["422","4XX","5XX"], + error_status_codes=["422", "4XX", "5XX"], stream=True, - retry_config=retry_config + retry_config=retry_config, ) - + data: Any = None if utils.match_response(http_res, "200", "text/event-stream"): - return eventstreaming.stream_events_async(http_res, lambda raw: utils.unmarshal_json(raw, models.CompletionEvent), sentinel="[DONE]") + return eventstreaming.stream_events_async( + http_res, + lambda raw: utils.unmarshal_json(raw, models.CompletionEvent), + sentinel="[DONE]", + ) if utils.match_response(http_res, "422", "application/json"): data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) raise models.HTTPValidationError(data=data) - if utils.match_response(http_res, ["4XX","5XX"], "*"): - raise models.SDKError("API error occurred", http_res.status_code, http_res.text, http_res) - - content_type = http_res.headers.get("Content-Type") - raise models.SDKError(f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, http_res.text, http_res) + if utils.match_response(http_res, ["4XX", "5XX"], "*"): + raise models.SDKError( + "API error occurred", http_res.status_code, http_res.text, http_res + ) - + content_type = http_res.headers.get("Content-Type") + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res.text, + http_res, + ) diff --git a/src/mistralai/basesdk.py b/src/mistralai/basesdk.py index f9e54c5a..3fc2bdd4 100644 --- a/src/mistralai/basesdk.py +++ b/src/mistralai/basesdk.py @@ -3,10 +3,15 @@ from .sdkconfiguration import SDKConfiguration import httpx from mistralai import models, utils -from mistralai._hooks import AfterErrorContext, AfterSuccessContext, BeforeRequestContext +from mistralai._hooks import ( + AfterErrorContext, + AfterSuccessContext, + BeforeRequestContext, +) from mistralai.utils import RetryConfig, SerializedRequestBody, get_body_content from typing import Callable, List, Optional, Tuple + class BaseSDK: sdk_configuration: SDKConfiguration @@ -24,6 +29,46 @@ def get_url(self, base_url, url_variables): return utils.template_url(base_url, url_variables) + def build_request_async( + self, + method, + path, + base_url, + url_variables, + request, + request_body_required, + request_has_path_params, + request_has_query_params, + user_agent_header, + accept_header_value, + _globals=None, + security=None, + timeout_ms: Optional[int] = None, + get_serialized_body: Optional[ + Callable[[], Optional[SerializedRequestBody]] + ] = None, + url_override: Optional[str] = None, + ) -> httpx.Request: + client = self.sdk_configuration.async_client + return self.build_request_with_client( + client, + method, + path, + base_url, + url_variables, + request, + request_body_required, + request_has_path_params, + request_has_query_params, + user_agent_header, + accept_header_value, + _globals, + security, + timeout_ms, + get_serialized_body, + url_override, + ) + def build_request( self, method, @@ -45,7 +90,46 @@ def build_request( url_override: Optional[str] = None, ) -> httpx.Request: client = self.sdk_configuration.client + return self.build_request_with_client( + client, + method, + path, + base_url, + url_variables, + request, + request_body_required, + request_has_path_params, + request_has_query_params, + user_agent_header, + accept_header_value, + _globals, + security, + timeout_ms, + get_serialized_body, + url_override, + ) + def build_request_with_client( + self, + client, + method, + path, + base_url, + url_variables, + request, + request_body_required, + request_has_path_params, + request_has_query_params, + user_agent_header, + accept_header_value, + _globals=None, + security=None, + timeout_ms: Optional[int] = None, + get_serialized_body: Optional[ + Callable[[], Optional[SerializedRequestBody]] + ] = None, + url_override: Optional[str] = None, + ) -> httpx.Request: query_params = {} url = url_override @@ -129,7 +213,7 @@ def do(): req.method, req.url, req.headers, - get_body_content(req) + get_body_content(req), ) http_res = client.send(req, stream=stream) except Exception as e: @@ -149,7 +233,7 @@ def do(): http_res.status_code, http_res.url, http_res.headers, - "" if stream else http_res.text + "" if stream else http_res.text, ) if utils.match_status_codes(error_status_codes, http_res.status_code): @@ -189,6 +273,7 @@ async def do_request_async( ) -> httpx.Response: client = self.sdk_configuration.async_client logger = self.sdk_configuration.debug_logger + async def do(): http_res = None try: @@ -200,7 +285,7 @@ async def do(): req.method, req.url, req.headers, - get_body_content(req) + get_body_content(req), ) http_res = await client.send(req, stream=stream) except Exception as e: @@ -220,7 +305,7 @@ async def do(): http_res.status_code, http_res.url, http_res.headers, - "" if stream else http_res.text + "" if stream else http_res.text, ) if utils.match_status_codes(error_status_codes, http_res.status_code): diff --git a/src/mistralai/chat.py b/src/mistralai/chat.py index e83fc34a..3e770f14 100644 --- a/src/mistralai/chat.py +++ b/src/mistralai/chat.py @@ -7,12 +7,13 @@ from mistralai.utils import eventstreaming, get_security_from_env from typing import Any, AsyncGenerator, Generator, List, Optional, Union + class Chat(BaseSDK): r"""Chat Completion API.""" - - + def complete( - self, *, + self, + *, model: Nullable[str], messages: Union[List[models.Messages], List[models.MessagesTypedDict]], temperature: Optional[float] = 0.7, @@ -22,9 +23,18 @@ def complete( stream: Optional[bool] = False, stop: Optional[Union[models.Stop, models.StopTypedDict]] = None, random_seed: OptionalNullable[int] = UNSET, - response_format: Optional[Union[models.ResponseFormat, models.ResponseFormatTypedDict]] = None, - tools: OptionalNullable[Union[List[models.Tool], List[models.ToolTypedDict]]] = UNSET, - tool_choice: Optional[models.ToolChoice] = "auto", + response_format: Optional[ + Union[models.ResponseFormat, models.ResponseFormatTypedDict] + ] = None, + tools: OptionalNullable[ + Union[List[models.Tool], List[models.ToolTypedDict]] + ] = UNSET, + tool_choice: Optional[ + Union[ + models.ChatCompletionRequestToolChoice, + models.ChatCompletionRequestToolChoiceTypedDict, + ] + ] = None, safe_prompt: Optional[bool] = False, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, @@ -41,9 +51,9 @@ def complete( :param stream: Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. - :param response_format: - :param tools: - :param tool_choice: + :param response_format: + :param tools: + :param tool_choice: :param safe_prompt: Whether to inject a safety prompt before all conversations. :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method @@ -53,10 +63,10 @@ def complete( url_variables = None if timeout_ms is None: timeout_ms = self.sdk_configuration.timeout_ms - + if server_url is not None: base_url = server_url - + request = models.ChatCompletionRequest( model=model, temperature=temperature, @@ -67,12 +77,16 @@ def complete( stop=stop, random_seed=random_seed, messages=utils.get_pydantic_model(messages, List[models.Messages]), - response_format=utils.get_pydantic_model(response_format, Optional[models.ResponseFormat]), + response_format=utils.get_pydantic_model( + response_format, Optional[models.ResponseFormat] + ), tools=utils.get_pydantic_model(tools, OptionalNullable[List[models.Tool]]), - tool_choice=tool_choice, + tool_choice=utils.get_pydantic_model( + tool_choice, Optional[models.ChatCompletionRequestToolChoice] + ), safe_prompt=safe_prompt, ) - + req = self.build_request( method="POST", path="/v1/chat/completions", @@ -85,47 +99,57 @@ def complete( user_agent_header="user-agent", accept_header_value="application/json", security=self.sdk_configuration.security, - get_serialized_body=lambda: utils.serialize_request_body(request, False, False, "json", models.ChatCompletionRequest), + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.ChatCompletionRequest + ), timeout_ms=timeout_ms, ) - + if retries == UNSET: if self.sdk_configuration.retry_config is not UNSET: retries = self.sdk_configuration.retry_config retry_config = None if isinstance(retries, utils.RetryConfig): - retry_config = (retries, [ - "429", - "500", - "502", - "503", - "504" - ]) - + retry_config = (retries, ["429", "500", "502", "503", "504"]) + http_res = self.do_request( - hook_ctx=HookContext(operation_id="chat_completion_v1_chat_completions_post", oauth2_scopes=[], security_source=get_security_from_env(self.sdk_configuration.security, models.Security)), + hook_ctx=HookContext( + operation_id="chat_completion_v1_chat_completions_post", + oauth2_scopes=[], + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), request=req, - error_status_codes=["422","4XX","5XX"], - retry_config=retry_config + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, ) - + data: Any = None if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, Optional[models.ChatCompletionResponse]) + return utils.unmarshal_json( + http_res.text, Optional[models.ChatCompletionResponse] + ) if utils.match_response(http_res, "422", "application/json"): data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) raise models.HTTPValidationError(data=data) - if utils.match_response(http_res, ["4XX","5XX"], "*"): - raise models.SDKError("API error occurred", http_res.status_code, http_res.text, http_res) - + if utils.match_response(http_res, ["4XX", "5XX"], "*"): + raise models.SDKError( + "API error occurred", http_res.status_code, http_res.text, http_res + ) + content_type = http_res.headers.get("Content-Type") - raise models.SDKError(f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, http_res.text, http_res) + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res.text, + http_res, + ) - - async def complete_async( - self, *, + self, + *, model: Nullable[str], messages: Union[List[models.Messages], List[models.MessagesTypedDict]], temperature: Optional[float] = 0.7, @@ -135,9 +159,18 @@ async def complete_async( stream: Optional[bool] = False, stop: Optional[Union[models.Stop, models.StopTypedDict]] = None, random_seed: OptionalNullable[int] = UNSET, - response_format: Optional[Union[models.ResponseFormat, models.ResponseFormatTypedDict]] = None, - tools: OptionalNullable[Union[List[models.Tool], List[models.ToolTypedDict]]] = UNSET, - tool_choice: Optional[models.ToolChoice] = "auto", + response_format: Optional[ + Union[models.ResponseFormat, models.ResponseFormatTypedDict] + ] = None, + tools: OptionalNullable[ + Union[List[models.Tool], List[models.ToolTypedDict]] + ] = UNSET, + tool_choice: Optional[ + Union[ + models.ChatCompletionRequestToolChoice, + models.ChatCompletionRequestToolChoiceTypedDict, + ] + ] = None, safe_prompt: Optional[bool] = False, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, @@ -154,9 +187,9 @@ async def complete_async( :param stream: Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. - :param response_format: - :param tools: - :param tool_choice: + :param response_format: + :param tools: + :param tool_choice: :param safe_prompt: Whether to inject a safety prompt before all conversations. :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method @@ -166,10 +199,10 @@ async def complete_async( url_variables = None if timeout_ms is None: timeout_ms = self.sdk_configuration.timeout_ms - + if server_url is not None: base_url = server_url - + request = models.ChatCompletionRequest( model=model, temperature=temperature, @@ -180,13 +213,17 @@ async def complete_async( stop=stop, random_seed=random_seed, messages=utils.get_pydantic_model(messages, List[models.Messages]), - response_format=utils.get_pydantic_model(response_format, Optional[models.ResponseFormat]), + response_format=utils.get_pydantic_model( + response_format, Optional[models.ResponseFormat] + ), tools=utils.get_pydantic_model(tools, OptionalNullable[List[models.Tool]]), - tool_choice=tool_choice, + tool_choice=utils.get_pydantic_model( + tool_choice, Optional[models.ChatCompletionRequestToolChoice] + ), safe_prompt=safe_prompt, ) - - req = self.build_request( + + req = self.build_request_async( method="POST", path="/v1/chat/completions", base_url=base_url, @@ -198,59 +235,86 @@ async def complete_async( user_agent_header="user-agent", accept_header_value="application/json", security=self.sdk_configuration.security, - get_serialized_body=lambda: utils.serialize_request_body(request, False, False, "json", models.ChatCompletionRequest), + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.ChatCompletionRequest + ), timeout_ms=timeout_ms, ) - + if retries == UNSET: if self.sdk_configuration.retry_config is not UNSET: retries = self.sdk_configuration.retry_config retry_config = None if isinstance(retries, utils.RetryConfig): - retry_config = (retries, [ - "429", - "500", - "502", - "503", - "504" - ]) - + retry_config = (retries, ["429", "500", "502", "503", "504"]) + http_res = await self.do_request_async( - hook_ctx=HookContext(operation_id="chat_completion_v1_chat_completions_post", oauth2_scopes=[], security_source=get_security_from_env(self.sdk_configuration.security, models.Security)), + hook_ctx=HookContext( + operation_id="chat_completion_v1_chat_completions_post", + oauth2_scopes=[], + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), request=req, - error_status_codes=["422","4XX","5XX"], - retry_config=retry_config + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, ) - + data: Any = None if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, Optional[models.ChatCompletionResponse]) + return utils.unmarshal_json( + http_res.text, Optional[models.ChatCompletionResponse] + ) if utils.match_response(http_res, "422", "application/json"): data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) raise models.HTTPValidationError(data=data) - if utils.match_response(http_res, ["4XX","5XX"], "*"): - raise models.SDKError("API error occurred", http_res.status_code, http_res.text, http_res) - + if utils.match_response(http_res, ["4XX", "5XX"], "*"): + raise models.SDKError( + "API error occurred", http_res.status_code, http_res.text, http_res + ) + content_type = http_res.headers.get("Content-Type") - raise models.SDKError(f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, http_res.text, http_res) + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res.text, + http_res, + ) - - def stream( - self, *, + self, + *, model: Nullable[str], - messages: Union[List[models.ChatCompletionStreamRequestMessages], List[models.ChatCompletionStreamRequestMessagesTypedDict]], + messages: Union[ + List[models.ChatCompletionStreamRequestMessages], + List[models.ChatCompletionStreamRequestMessagesTypedDict], + ], temperature: Optional[float] = 0.7, top_p: Optional[float] = 1, max_tokens: OptionalNullable[int] = UNSET, min_tokens: OptionalNullable[int] = UNSET, stream: Optional[bool] = True, - stop: Optional[Union[models.ChatCompletionStreamRequestStop, models.ChatCompletionStreamRequestStopTypedDict]] = None, + stop: Optional[ + Union[ + models.ChatCompletionStreamRequestStop, + models.ChatCompletionStreamRequestStopTypedDict, + ] + ] = None, random_seed: OptionalNullable[int] = UNSET, - response_format: Optional[Union[models.ResponseFormat, models.ResponseFormatTypedDict]] = None, - tools: OptionalNullable[Union[List[models.Tool], List[models.ToolTypedDict]]] = UNSET, - tool_choice: Optional[models.ChatCompletionStreamRequestToolChoice] = "auto", + response_format: Optional[ + Union[models.ResponseFormat, models.ResponseFormatTypedDict] + ] = None, + tools: OptionalNullable[ + Union[List[models.Tool], List[models.ToolTypedDict]] + ] = UNSET, + tool_choice: Optional[ + Union[ + models.ChatCompletionStreamRequestToolChoice, + models.ChatCompletionStreamRequestToolChoiceTypedDict, + ] + ] = None, safe_prompt: Optional[bool] = False, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, @@ -266,12 +330,12 @@ def stream( :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. :param min_tokens: The minimum number of tokens to generate in the completion. - :param stream: + :param stream: :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. - :param response_format: - :param tools: - :param tool_choice: + :param response_format: + :param tools: + :param tool_choice: :param safe_prompt: Whether to inject a safety prompt before all conversations. :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method @@ -281,10 +345,10 @@ def stream( url_variables = None if timeout_ms is None: timeout_ms = self.sdk_configuration.timeout_ms - + if server_url is not None: base_url = server_url - + request = models.ChatCompletionStreamRequest( model=model, temperature=temperature, @@ -294,13 +358,19 @@ def stream( stream=stream, stop=stop, random_seed=random_seed, - messages=utils.get_pydantic_model(messages, List[models.ChatCompletionStreamRequestMessages]), - response_format=utils.get_pydantic_model(response_format, Optional[models.ResponseFormat]), + messages=utils.get_pydantic_model( + messages, List[models.ChatCompletionStreamRequestMessages] + ), + response_format=utils.get_pydantic_model( + response_format, Optional[models.ResponseFormat] + ), tools=utils.get_pydantic_model(tools, OptionalNullable[List[models.Tool]]), - tool_choice=tool_choice, + tool_choice=utils.get_pydantic_model( + tool_choice, Optional[models.ChatCompletionStreamRequestToolChoice] + ), safe_prompt=safe_prompt, ) - + req = self.build_request( method="POST", path="/v1/chat/completions#stream", @@ -313,60 +383,89 @@ def stream( user_agent_header="user-agent", accept_header_value="text/event-stream", security=self.sdk_configuration.security, - get_serialized_body=lambda: utils.serialize_request_body(request, False, False, "json", models.ChatCompletionStreamRequest), + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.ChatCompletionStreamRequest + ), timeout_ms=timeout_ms, ) - + if retries == UNSET: if self.sdk_configuration.retry_config is not UNSET: retries = self.sdk_configuration.retry_config retry_config = None if isinstance(retries, utils.RetryConfig): - retry_config = (retries, [ - "429", - "500", - "502", - "503", - "504" - ]) - + retry_config = (retries, ["429", "500", "502", "503", "504"]) + http_res = self.do_request( - hook_ctx=HookContext(operation_id="stream_chat", oauth2_scopes=[], security_source=get_security_from_env(self.sdk_configuration.security, models.Security)), + hook_ctx=HookContext( + operation_id="stream_chat", + oauth2_scopes=[], + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), request=req, - error_status_codes=["422","4XX","5XX"], + error_status_codes=["422", "4XX", "5XX"], stream=True, - retry_config=retry_config + retry_config=retry_config, ) - + data: Any = None if utils.match_response(http_res, "200", "text/event-stream"): - return eventstreaming.stream_events(http_res, lambda raw: utils.unmarshal_json(raw, models.CompletionEvent), sentinel="[DONE]") + return eventstreaming.stream_events( + http_res, + lambda raw: utils.unmarshal_json(raw, models.CompletionEvent), + sentinel="[DONE]", + ) if utils.match_response(http_res, "422", "application/json"): data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) raise models.HTTPValidationError(data=data) - if utils.match_response(http_res, ["4XX","5XX"], "*"): - raise models.SDKError("API error occurred", http_res.status_code, http_res.text, http_res) - + if utils.match_response(http_res, ["4XX", "5XX"], "*"): + raise models.SDKError( + "API error occurred", http_res.status_code, http_res.text, http_res + ) + content_type = http_res.headers.get("Content-Type") - raise models.SDKError(f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, http_res.text, http_res) + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res.text, + http_res, + ) - - async def stream_async( - self, *, + self, + *, model: Nullable[str], - messages: Union[List[models.ChatCompletionStreamRequestMessages], List[models.ChatCompletionStreamRequestMessagesTypedDict]], + messages: Union[ + List[models.ChatCompletionStreamRequestMessages], + List[models.ChatCompletionStreamRequestMessagesTypedDict], + ], temperature: Optional[float] = 0.7, top_p: Optional[float] = 1, max_tokens: OptionalNullable[int] = UNSET, min_tokens: OptionalNullable[int] = UNSET, stream: Optional[bool] = True, - stop: Optional[Union[models.ChatCompletionStreamRequestStop, models.ChatCompletionStreamRequestStopTypedDict]] = None, + stop: Optional[ + Union[ + models.ChatCompletionStreamRequestStop, + models.ChatCompletionStreamRequestStopTypedDict, + ] + ] = None, random_seed: OptionalNullable[int] = UNSET, - response_format: Optional[Union[models.ResponseFormat, models.ResponseFormatTypedDict]] = None, - tools: OptionalNullable[Union[List[models.Tool], List[models.ToolTypedDict]]] = UNSET, - tool_choice: Optional[models.ChatCompletionStreamRequestToolChoice] = "auto", + response_format: Optional[ + Union[models.ResponseFormat, models.ResponseFormatTypedDict] + ] = None, + tools: OptionalNullable[ + Union[List[models.Tool], List[models.ToolTypedDict]] + ] = UNSET, + tool_choice: Optional[ + Union[ + models.ChatCompletionStreamRequestToolChoice, + models.ChatCompletionStreamRequestToolChoiceTypedDict, + ] + ] = None, safe_prompt: Optional[bool] = False, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, @@ -382,12 +481,12 @@ async def stream_async( :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. :param min_tokens: The minimum number of tokens to generate in the completion. - :param stream: + :param stream: :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. - :param response_format: - :param tools: - :param tool_choice: + :param response_format: + :param tools: + :param tool_choice: :param safe_prompt: Whether to inject a safety prompt before all conversations. :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method @@ -397,10 +496,10 @@ async def stream_async( url_variables = None if timeout_ms is None: timeout_ms = self.sdk_configuration.timeout_ms - + if server_url is not None: base_url = server_url - + request = models.ChatCompletionStreamRequest( model=model, temperature=temperature, @@ -410,14 +509,20 @@ async def stream_async( stream=stream, stop=stop, random_seed=random_seed, - messages=utils.get_pydantic_model(messages, List[models.ChatCompletionStreamRequestMessages]), - response_format=utils.get_pydantic_model(response_format, Optional[models.ResponseFormat]), + messages=utils.get_pydantic_model( + messages, List[models.ChatCompletionStreamRequestMessages] + ), + response_format=utils.get_pydantic_model( + response_format, Optional[models.ResponseFormat] + ), tools=utils.get_pydantic_model(tools, OptionalNullable[List[models.Tool]]), - tool_choice=tool_choice, + tool_choice=utils.get_pydantic_model( + tool_choice, Optional[models.ChatCompletionStreamRequestToolChoice] + ), safe_prompt=safe_prompt, ) - - req = self.build_request( + + req = self.build_request_async( method="POST", path="/v1/chat/completions#stream", base_url=base_url, @@ -429,42 +534,53 @@ async def stream_async( user_agent_header="user-agent", accept_header_value="text/event-stream", security=self.sdk_configuration.security, - get_serialized_body=lambda: utils.serialize_request_body(request, False, False, "json", models.ChatCompletionStreamRequest), + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.ChatCompletionStreamRequest + ), timeout_ms=timeout_ms, ) - + if retries == UNSET: if self.sdk_configuration.retry_config is not UNSET: retries = self.sdk_configuration.retry_config retry_config = None if isinstance(retries, utils.RetryConfig): - retry_config = (retries, [ - "429", - "500", - "502", - "503", - "504" - ]) - + retry_config = (retries, ["429", "500", "502", "503", "504"]) + http_res = await self.do_request_async( - hook_ctx=HookContext(operation_id="stream_chat", oauth2_scopes=[], security_source=get_security_from_env(self.sdk_configuration.security, models.Security)), + hook_ctx=HookContext( + operation_id="stream_chat", + oauth2_scopes=[], + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), request=req, - error_status_codes=["422","4XX","5XX"], + error_status_codes=["422", "4XX", "5XX"], stream=True, - retry_config=retry_config + retry_config=retry_config, ) - + data: Any = None if utils.match_response(http_res, "200", "text/event-stream"): - return eventstreaming.stream_events_async(http_res, lambda raw: utils.unmarshal_json(raw, models.CompletionEvent), sentinel="[DONE]") + return eventstreaming.stream_events_async( + http_res, + lambda raw: utils.unmarshal_json(raw, models.CompletionEvent), + sentinel="[DONE]", + ) if utils.match_response(http_res, "422", "application/json"): data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) raise models.HTTPValidationError(data=data) - if utils.match_response(http_res, ["4XX","5XX"], "*"): - raise models.SDKError("API error occurred", http_res.status_code, http_res.text, http_res) - - content_type = http_res.headers.get("Content-Type") - raise models.SDKError(f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, http_res.text, http_res) + if utils.match_response(http_res, ["4XX", "5XX"], "*"): + raise models.SDKError( + "API error occurred", http_res.status_code, http_res.text, http_res + ) - + content_type = http_res.headers.get("Content-Type") + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res.text, + http_res, + ) diff --git a/src/mistralai/embeddings.py b/src/mistralai/embeddings.py index 193758ef..c19a9e38 100644 --- a/src/mistralai/embeddings.py +++ b/src/mistralai/embeddings.py @@ -7,12 +7,13 @@ from mistralai.utils import get_security_from_env from typing import Any, Optional, Union + class Embeddings(BaseSDK): r"""Embeddings API.""" - - + def create( - self, *, + self, + *, inputs: Union[models.Inputs, models.InputsTypedDict], model: str, encoding_format: OptionalNullable[str] = UNSET, @@ -35,16 +36,16 @@ def create( url_variables = None if timeout_ms is None: timeout_ms = self.sdk_configuration.timeout_ms - + if server_url is not None: base_url = server_url - + request = models.EmbeddingRequest( inputs=inputs, model=model, encoding_format=encoding_format, ) - + req = self.build_request( method="POST", path="/v1/embeddings", @@ -57,47 +58,57 @@ def create( user_agent_header="user-agent", accept_header_value="application/json", security=self.sdk_configuration.security, - get_serialized_body=lambda: utils.serialize_request_body(request, False, False, "json", models.EmbeddingRequest), + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.EmbeddingRequest + ), timeout_ms=timeout_ms, ) - + if retries == UNSET: if self.sdk_configuration.retry_config is not UNSET: retries = self.sdk_configuration.retry_config retry_config = None if isinstance(retries, utils.RetryConfig): - retry_config = (retries, [ - "429", - "500", - "502", - "503", - "504" - ]) - + retry_config = (retries, ["429", "500", "502", "503", "504"]) + http_res = self.do_request( - hook_ctx=HookContext(operation_id="embeddings_v1_embeddings_post", oauth2_scopes=[], security_source=get_security_from_env(self.sdk_configuration.security, models.Security)), + hook_ctx=HookContext( + operation_id="embeddings_v1_embeddings_post", + oauth2_scopes=[], + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), request=req, - error_status_codes=["422","4XX","5XX"], - retry_config=retry_config + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, ) - + data: Any = None if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, Optional[models.EmbeddingResponse]) + return utils.unmarshal_json( + http_res.text, Optional[models.EmbeddingResponse] + ) if utils.match_response(http_res, "422", "application/json"): data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) raise models.HTTPValidationError(data=data) - if utils.match_response(http_res, ["4XX","5XX"], "*"): - raise models.SDKError("API error occurred", http_res.status_code, http_res.text, http_res) - + if utils.match_response(http_res, ["4XX", "5XX"], "*"): + raise models.SDKError( + "API error occurred", http_res.status_code, http_res.text, http_res + ) + content_type = http_res.headers.get("Content-Type") - raise models.SDKError(f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, http_res.text, http_res) + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res.text, + http_res, + ) - - async def create_async( - self, *, + self, + *, inputs: Union[models.Inputs, models.InputsTypedDict], model: str, encoding_format: OptionalNullable[str] = UNSET, @@ -120,17 +131,17 @@ async def create_async( url_variables = None if timeout_ms is None: timeout_ms = self.sdk_configuration.timeout_ms - + if server_url is not None: base_url = server_url - + request = models.EmbeddingRequest( inputs=inputs, model=model, encoding_format=encoding_format, ) - - req = self.build_request( + + req = self.build_request_async( method="POST", path="/v1/embeddings", base_url=base_url, @@ -142,41 +153,50 @@ async def create_async( user_agent_header="user-agent", accept_header_value="application/json", security=self.sdk_configuration.security, - get_serialized_body=lambda: utils.serialize_request_body(request, False, False, "json", models.EmbeddingRequest), + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.EmbeddingRequest + ), timeout_ms=timeout_ms, ) - + if retries == UNSET: if self.sdk_configuration.retry_config is not UNSET: retries = self.sdk_configuration.retry_config retry_config = None if isinstance(retries, utils.RetryConfig): - retry_config = (retries, [ - "429", - "500", - "502", - "503", - "504" - ]) - + retry_config = (retries, ["429", "500", "502", "503", "504"]) + http_res = await self.do_request_async( - hook_ctx=HookContext(operation_id="embeddings_v1_embeddings_post", oauth2_scopes=[], security_source=get_security_from_env(self.sdk_configuration.security, models.Security)), + hook_ctx=HookContext( + operation_id="embeddings_v1_embeddings_post", + oauth2_scopes=[], + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), request=req, - error_status_codes=["422","4XX","5XX"], - retry_config=retry_config + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, ) - + data: Any = None if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, Optional[models.EmbeddingResponse]) + return utils.unmarshal_json( + http_res.text, Optional[models.EmbeddingResponse] + ) if utils.match_response(http_res, "422", "application/json"): data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) raise models.HTTPValidationError(data=data) - if utils.match_response(http_res, ["4XX","5XX"], "*"): - raise models.SDKError("API error occurred", http_res.status_code, http_res.text, http_res) - - content_type = http_res.headers.get("Content-Type") - raise models.SDKError(f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, http_res.text, http_res) + if utils.match_response(http_res, ["4XX", "5XX"], "*"): + raise models.SDKError( + "API error occurred", http_res.status_code, http_res.text, http_res + ) - + content_type = http_res.headers.get("Content-Type") + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res.text, + http_res, + ) diff --git a/src/mistralai/files.py b/src/mistralai/files.py index 2aa37650..06724056 100644 --- a/src/mistralai/files.py +++ b/src/mistralai/files.py @@ -7,12 +7,13 @@ from mistralai.utils import get_security_from_env from typing import Optional, Union + class Files(BaseSDK): r"""Files API""" - - + def upload( - self, *, + self, + *, file: Union[models.File, models.FileTypedDict], retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, @@ -35,14 +36,14 @@ def upload( url_variables = None if timeout_ms is None: timeout_ms = self.sdk_configuration.timeout_ms - + if server_url is not None: base_url = server_url - + request = models.FilesAPIRoutesUploadFileMultiPartBodyParams( file=utils.get_pydantic_model(file, models.File), ) - + req = self.build_request( method="POST", path="/v1/files", @@ -55,43 +56,55 @@ def upload( user_agent_header="user-agent", accept_header_value="application/json", security=self.sdk_configuration.security, - get_serialized_body=lambda: utils.serialize_request_body(request, False, False, "multipart", models.FilesAPIRoutesUploadFileMultiPartBodyParams), + get_serialized_body=lambda: utils.serialize_request_body( + request, + False, + False, + "multipart", + models.FilesAPIRoutesUploadFileMultiPartBodyParams, + ), timeout_ms=timeout_ms, ) - + if retries == UNSET: if self.sdk_configuration.retry_config is not UNSET: retries = self.sdk_configuration.retry_config retry_config = None if isinstance(retries, utils.RetryConfig): - retry_config = (retries, [ - "429", - "500", - "502", - "503", - "504" - ]) - + retry_config = (retries, ["429", "500", "502", "503", "504"]) + http_res = self.do_request( - hook_ctx=HookContext(operation_id="files_api_routes_upload_file", oauth2_scopes=[], security_source=get_security_from_env(self.sdk_configuration.security, models.Security)), + hook_ctx=HookContext( + operation_id="files_api_routes_upload_file", + oauth2_scopes=[], + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), request=req, - error_status_codes=["4XX","5XX"], - retry_config=retry_config + error_status_codes=["4XX", "5XX"], + retry_config=retry_config, ) - + if utils.match_response(http_res, "200", "application/json"): return utils.unmarshal_json(http_res.text, Optional[models.UploadFileOut]) - if utils.match_response(http_res, ["4XX","5XX"], "*"): - raise models.SDKError("API error occurred", http_res.status_code, http_res.text, http_res) - + if utils.match_response(http_res, ["4XX", "5XX"], "*"): + raise models.SDKError( + "API error occurred", http_res.status_code, http_res.text, http_res + ) + content_type = http_res.headers.get("Content-Type") - raise models.SDKError(f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, http_res.text, http_res) + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res.text, + http_res, + ) - - async def upload_async( - self, *, + self, + *, file: Union[models.File, models.FileTypedDict], retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, @@ -114,15 +127,15 @@ async def upload_async( url_variables = None if timeout_ms is None: timeout_ms = self.sdk_configuration.timeout_ms - + if server_url is not None: base_url = server_url - + request = models.FilesAPIRoutesUploadFileMultiPartBodyParams( file=utils.get_pydantic_model(file, models.File), ) - - req = self.build_request( + + req = self.build_request_async( method="POST", path="/v1/files", base_url=base_url, @@ -134,43 +147,55 @@ async def upload_async( user_agent_header="user-agent", accept_header_value="application/json", security=self.sdk_configuration.security, - get_serialized_body=lambda: utils.serialize_request_body(request, False, False, "multipart", models.FilesAPIRoutesUploadFileMultiPartBodyParams), + get_serialized_body=lambda: utils.serialize_request_body( + request, + False, + False, + "multipart", + models.FilesAPIRoutesUploadFileMultiPartBodyParams, + ), timeout_ms=timeout_ms, ) - + if retries == UNSET: if self.sdk_configuration.retry_config is not UNSET: retries = self.sdk_configuration.retry_config retry_config = None if isinstance(retries, utils.RetryConfig): - retry_config = (retries, [ - "429", - "500", - "502", - "503", - "504" - ]) - + retry_config = (retries, ["429", "500", "502", "503", "504"]) + http_res = await self.do_request_async( - hook_ctx=HookContext(operation_id="files_api_routes_upload_file", oauth2_scopes=[], security_source=get_security_from_env(self.sdk_configuration.security, models.Security)), + hook_ctx=HookContext( + operation_id="files_api_routes_upload_file", + oauth2_scopes=[], + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), request=req, - error_status_codes=["4XX","5XX"], - retry_config=retry_config + error_status_codes=["4XX", "5XX"], + retry_config=retry_config, ) - + if utils.match_response(http_res, "200", "application/json"): return utils.unmarshal_json(http_res.text, Optional[models.UploadFileOut]) - if utils.match_response(http_res, ["4XX","5XX"], "*"): - raise models.SDKError("API error occurred", http_res.status_code, http_res.text, http_res) - + if utils.match_response(http_res, ["4XX", "5XX"], "*"): + raise models.SDKError( + "API error occurred", http_res.status_code, http_res.text, http_res + ) + content_type = http_res.headers.get("Content-Type") - raise models.SDKError(f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, http_res.text, http_res) + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res.text, + http_res, + ) - - def list( - self, *, + self, + *, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -187,7 +212,7 @@ def list( url_variables = None if timeout_ms is None: timeout_ms = self.sdk_configuration.timeout_ms - + if server_url is not None: base_url = server_url req = self.build_request( @@ -204,40 +229,46 @@ def list( security=self.sdk_configuration.security, timeout_ms=timeout_ms, ) - + if retries == UNSET: if self.sdk_configuration.retry_config is not UNSET: retries = self.sdk_configuration.retry_config retry_config = None if isinstance(retries, utils.RetryConfig): - retry_config = (retries, [ - "429", - "500", - "502", - "503", - "504" - ]) - + retry_config = (retries, ["429", "500", "502", "503", "504"]) + http_res = self.do_request( - hook_ctx=HookContext(operation_id="files_api_routes_list_files", oauth2_scopes=[], security_source=get_security_from_env(self.sdk_configuration.security, models.Security)), + hook_ctx=HookContext( + operation_id="files_api_routes_list_files", + oauth2_scopes=[], + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), request=req, - error_status_codes=["4XX","5XX"], - retry_config=retry_config + error_status_codes=["4XX", "5XX"], + retry_config=retry_config, ) - + if utils.match_response(http_res, "200", "application/json"): return utils.unmarshal_json(http_res.text, Optional[models.ListFilesOut]) - if utils.match_response(http_res, ["4XX","5XX"], "*"): - raise models.SDKError("API error occurred", http_res.status_code, http_res.text, http_res) - + if utils.match_response(http_res, ["4XX", "5XX"], "*"): + raise models.SDKError( + "API error occurred", http_res.status_code, http_res.text, http_res + ) + content_type = http_res.headers.get("Content-Type") - raise models.SDKError(f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, http_res.text, http_res) + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res.text, + http_res, + ) - - async def list_async( - self, *, + self, + *, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -254,10 +285,10 @@ async def list_async( url_variables = None if timeout_ms is None: timeout_ms = self.sdk_configuration.timeout_ms - + if server_url is not None: base_url = server_url - req = self.build_request( + req = self.build_request_async( method="GET", path="/v1/files", base_url=base_url, @@ -271,40 +302,46 @@ async def list_async( security=self.sdk_configuration.security, timeout_ms=timeout_ms, ) - + if retries == UNSET: if self.sdk_configuration.retry_config is not UNSET: retries = self.sdk_configuration.retry_config retry_config = None if isinstance(retries, utils.RetryConfig): - retry_config = (retries, [ - "429", - "500", - "502", - "503", - "504" - ]) - + retry_config = (retries, ["429", "500", "502", "503", "504"]) + http_res = await self.do_request_async( - hook_ctx=HookContext(operation_id="files_api_routes_list_files", oauth2_scopes=[], security_source=get_security_from_env(self.sdk_configuration.security, models.Security)), + hook_ctx=HookContext( + operation_id="files_api_routes_list_files", + oauth2_scopes=[], + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), request=req, - error_status_codes=["4XX","5XX"], - retry_config=retry_config + error_status_codes=["4XX", "5XX"], + retry_config=retry_config, ) - + if utils.match_response(http_res, "200", "application/json"): return utils.unmarshal_json(http_res.text, Optional[models.ListFilesOut]) - if utils.match_response(http_res, ["4XX","5XX"], "*"): - raise models.SDKError("API error occurred", http_res.status_code, http_res.text, http_res) - + if utils.match_response(http_res, ["4XX", "5XX"], "*"): + raise models.SDKError( + "API error occurred", http_res.status_code, http_res.text, http_res + ) + content_type = http_res.headers.get("Content-Type") - raise models.SDKError(f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, http_res.text, http_res) + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res.text, + http_res, + ) - - def retrieve( - self, *, + self, + *, file_id: str, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, @@ -314,7 +351,7 @@ def retrieve( Returns information about a specific file. - :param file_id: + :param file_id: :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -323,14 +360,14 @@ def retrieve( url_variables = None if timeout_ms is None: timeout_ms = self.sdk_configuration.timeout_ms - + if server_url is not None: base_url = server_url - + request = models.FilesAPIRoutesRetrieveFileRequest( file_id=file_id, ) - + req = self.build_request( method="GET", path="/v1/files/{file_id}", @@ -345,40 +382,46 @@ def retrieve( security=self.sdk_configuration.security, timeout_ms=timeout_ms, ) - + if retries == UNSET: if self.sdk_configuration.retry_config is not UNSET: retries = self.sdk_configuration.retry_config retry_config = None if isinstance(retries, utils.RetryConfig): - retry_config = (retries, [ - "429", - "500", - "502", - "503", - "504" - ]) - + retry_config = (retries, ["429", "500", "502", "503", "504"]) + http_res = self.do_request( - hook_ctx=HookContext(operation_id="files_api_routes_retrieve_file", oauth2_scopes=[], security_source=get_security_from_env(self.sdk_configuration.security, models.Security)), + hook_ctx=HookContext( + operation_id="files_api_routes_retrieve_file", + oauth2_scopes=[], + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), request=req, - error_status_codes=["4XX","5XX"], - retry_config=retry_config + error_status_codes=["4XX", "5XX"], + retry_config=retry_config, ) - + if utils.match_response(http_res, "200", "application/json"): return utils.unmarshal_json(http_res.text, Optional[models.RetrieveFileOut]) - if utils.match_response(http_res, ["4XX","5XX"], "*"): - raise models.SDKError("API error occurred", http_res.status_code, http_res.text, http_res) - + if utils.match_response(http_res, ["4XX", "5XX"], "*"): + raise models.SDKError( + "API error occurred", http_res.status_code, http_res.text, http_res + ) + content_type = http_res.headers.get("Content-Type") - raise models.SDKError(f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, http_res.text, http_res) + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res.text, + http_res, + ) - - async def retrieve_async( - self, *, + self, + *, file_id: str, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, @@ -388,7 +431,7 @@ async def retrieve_async( Returns information about a specific file. - :param file_id: + :param file_id: :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -397,15 +440,15 @@ async def retrieve_async( url_variables = None if timeout_ms is None: timeout_ms = self.sdk_configuration.timeout_ms - + if server_url is not None: base_url = server_url - + request = models.FilesAPIRoutesRetrieveFileRequest( file_id=file_id, ) - - req = self.build_request( + + req = self.build_request_async( method="GET", path="/v1/files/{file_id}", base_url=base_url, @@ -419,40 +462,46 @@ async def retrieve_async( security=self.sdk_configuration.security, timeout_ms=timeout_ms, ) - + if retries == UNSET: if self.sdk_configuration.retry_config is not UNSET: retries = self.sdk_configuration.retry_config retry_config = None if isinstance(retries, utils.RetryConfig): - retry_config = (retries, [ - "429", - "500", - "502", - "503", - "504" - ]) - + retry_config = (retries, ["429", "500", "502", "503", "504"]) + http_res = await self.do_request_async( - hook_ctx=HookContext(operation_id="files_api_routes_retrieve_file", oauth2_scopes=[], security_source=get_security_from_env(self.sdk_configuration.security, models.Security)), + hook_ctx=HookContext( + operation_id="files_api_routes_retrieve_file", + oauth2_scopes=[], + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), request=req, - error_status_codes=["4XX","5XX"], - retry_config=retry_config + error_status_codes=["4XX", "5XX"], + retry_config=retry_config, ) - + if utils.match_response(http_res, "200", "application/json"): return utils.unmarshal_json(http_res.text, Optional[models.RetrieveFileOut]) - if utils.match_response(http_res, ["4XX","5XX"], "*"): - raise models.SDKError("API error occurred", http_res.status_code, http_res.text, http_res) - + if utils.match_response(http_res, ["4XX", "5XX"], "*"): + raise models.SDKError( + "API error occurred", http_res.status_code, http_res.text, http_res + ) + content_type = http_res.headers.get("Content-Type") - raise models.SDKError(f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, http_res.text, http_res) + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res.text, + http_res, + ) - - def delete( - self, *, + self, + *, file_id: str, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, @@ -462,7 +511,7 @@ def delete( Delete a file. - :param file_id: + :param file_id: :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -471,14 +520,14 @@ def delete( url_variables = None if timeout_ms is None: timeout_ms = self.sdk_configuration.timeout_ms - + if server_url is not None: base_url = server_url - + request = models.FilesAPIRoutesDeleteFileRequest( file_id=file_id, ) - + req = self.build_request( method="DELETE", path="/v1/files/{file_id}", @@ -493,40 +542,46 @@ def delete( security=self.sdk_configuration.security, timeout_ms=timeout_ms, ) - + if retries == UNSET: if self.sdk_configuration.retry_config is not UNSET: retries = self.sdk_configuration.retry_config retry_config = None if isinstance(retries, utils.RetryConfig): - retry_config = (retries, [ - "429", - "500", - "502", - "503", - "504" - ]) - + retry_config = (retries, ["429", "500", "502", "503", "504"]) + http_res = self.do_request( - hook_ctx=HookContext(operation_id="files_api_routes_delete_file", oauth2_scopes=[], security_source=get_security_from_env(self.sdk_configuration.security, models.Security)), + hook_ctx=HookContext( + operation_id="files_api_routes_delete_file", + oauth2_scopes=[], + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), request=req, - error_status_codes=["4XX","5XX"], - retry_config=retry_config + error_status_codes=["4XX", "5XX"], + retry_config=retry_config, ) - + if utils.match_response(http_res, "200", "application/json"): return utils.unmarshal_json(http_res.text, Optional[models.DeleteFileOut]) - if utils.match_response(http_res, ["4XX","5XX"], "*"): - raise models.SDKError("API error occurred", http_res.status_code, http_res.text, http_res) - + if utils.match_response(http_res, ["4XX", "5XX"], "*"): + raise models.SDKError( + "API error occurred", http_res.status_code, http_res.text, http_res + ) + content_type = http_res.headers.get("Content-Type") - raise models.SDKError(f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, http_res.text, http_res) + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res.text, + http_res, + ) - - async def delete_async( - self, *, + self, + *, file_id: str, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, @@ -536,7 +591,7 @@ async def delete_async( Delete a file. - :param file_id: + :param file_id: :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -545,15 +600,15 @@ async def delete_async( url_variables = None if timeout_ms is None: timeout_ms = self.sdk_configuration.timeout_ms - + if server_url is not None: base_url = server_url - + request = models.FilesAPIRoutesDeleteFileRequest( file_id=file_id, ) - - req = self.build_request( + + req = self.build_request_async( method="DELETE", path="/v1/files/{file_id}", base_url=base_url, @@ -567,34 +622,39 @@ async def delete_async( security=self.sdk_configuration.security, timeout_ms=timeout_ms, ) - + if retries == UNSET: if self.sdk_configuration.retry_config is not UNSET: retries = self.sdk_configuration.retry_config retry_config = None if isinstance(retries, utils.RetryConfig): - retry_config = (retries, [ - "429", - "500", - "502", - "503", - "504" - ]) - + retry_config = (retries, ["429", "500", "502", "503", "504"]) + http_res = await self.do_request_async( - hook_ctx=HookContext(operation_id="files_api_routes_delete_file", oauth2_scopes=[], security_source=get_security_from_env(self.sdk_configuration.security, models.Security)), + hook_ctx=HookContext( + operation_id="files_api_routes_delete_file", + oauth2_scopes=[], + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), request=req, - error_status_codes=["4XX","5XX"], - retry_config=retry_config + error_status_codes=["4XX", "5XX"], + retry_config=retry_config, ) - + if utils.match_response(http_res, "200", "application/json"): return utils.unmarshal_json(http_res.text, Optional[models.DeleteFileOut]) - if utils.match_response(http_res, ["4XX","5XX"], "*"): - raise models.SDKError("API error occurred", http_res.status_code, http_res.text, http_res) - - content_type = http_res.headers.get("Content-Type") - raise models.SDKError(f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, http_res.text, http_res) + if utils.match_response(http_res, ["4XX", "5XX"], "*"): + raise models.SDKError( + "API error occurred", http_res.status_code, http_res.text, http_res + ) - + content_type = http_res.headers.get("Content-Type") + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res.text, + http_res, + ) diff --git a/src/mistralai/fim.py b/src/mistralai/fim.py index 19090d91..5239e90d 100644 --- a/src/mistralai/fim.py +++ b/src/mistralai/fim.py @@ -7,12 +7,13 @@ from mistralai.utils import eventstreaming, get_security_from_env from typing import Any, AsyncGenerator, Generator, Optional, Union + class Fim(BaseSDK): r"""Fill-in-the-middle API.""" - - + def complete( - self, *, + self, + *, model: Nullable[str], prompt: str, temperature: Optional[float] = 0.7, @@ -20,7 +21,12 @@ def complete( max_tokens: OptionalNullable[int] = UNSET, min_tokens: OptionalNullable[int] = UNSET, stream: Optional[bool] = False, - stop: Optional[Union[models.FIMCompletionRequestStop, models.FIMCompletionRequestStopTypedDict]] = None, + stop: Optional[ + Union[ + models.FIMCompletionRequestStop, + models.FIMCompletionRequestStopTypedDict, + ] + ] = None, random_seed: OptionalNullable[int] = UNSET, suffix: OptionalNullable[str] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, @@ -49,10 +55,10 @@ def complete( url_variables = None if timeout_ms is None: timeout_ms = self.sdk_configuration.timeout_ms - + if server_url is not None: base_url = server_url - + request = models.FIMCompletionRequest( model=model, temperature=temperature, @@ -65,7 +71,7 @@ def complete( prompt=prompt, suffix=suffix, ) - + req = self.build_request( method="POST", path="/v1/fim/completions", @@ -78,47 +84,57 @@ def complete( user_agent_header="user-agent", accept_header_value="application/json", security=self.sdk_configuration.security, - get_serialized_body=lambda: utils.serialize_request_body(request, False, False, "json", models.FIMCompletionRequest), + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.FIMCompletionRequest + ), timeout_ms=timeout_ms, ) - + if retries == UNSET: if self.sdk_configuration.retry_config is not UNSET: retries = self.sdk_configuration.retry_config retry_config = None if isinstance(retries, utils.RetryConfig): - retry_config = (retries, [ - "429", - "500", - "502", - "503", - "504" - ]) - + retry_config = (retries, ["429", "500", "502", "503", "504"]) + http_res = self.do_request( - hook_ctx=HookContext(operation_id="fim_completion_v1_fim_completions_post", oauth2_scopes=[], security_source=get_security_from_env(self.sdk_configuration.security, models.Security)), + hook_ctx=HookContext( + operation_id="fim_completion_v1_fim_completions_post", + oauth2_scopes=[], + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), request=req, - error_status_codes=["422","4XX","5XX"], - retry_config=retry_config + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, ) - + data: Any = None if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, Optional[models.FIMCompletionResponse]) + return utils.unmarshal_json( + http_res.text, Optional[models.FIMCompletionResponse] + ) if utils.match_response(http_res, "422", "application/json"): data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) raise models.HTTPValidationError(data=data) - if utils.match_response(http_res, ["4XX","5XX"], "*"): - raise models.SDKError("API error occurred", http_res.status_code, http_res.text, http_res) - + if utils.match_response(http_res, ["4XX", "5XX"], "*"): + raise models.SDKError( + "API error occurred", http_res.status_code, http_res.text, http_res + ) + content_type = http_res.headers.get("Content-Type") - raise models.SDKError(f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, http_res.text, http_res) + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res.text, + http_res, + ) - - async def complete_async( - self, *, + self, + *, model: Nullable[str], prompt: str, temperature: Optional[float] = 0.7, @@ -126,7 +142,12 @@ async def complete_async( max_tokens: OptionalNullable[int] = UNSET, min_tokens: OptionalNullable[int] = UNSET, stream: Optional[bool] = False, - stop: Optional[Union[models.FIMCompletionRequestStop, models.FIMCompletionRequestStopTypedDict]] = None, + stop: Optional[ + Union[ + models.FIMCompletionRequestStop, + models.FIMCompletionRequestStopTypedDict, + ] + ] = None, random_seed: OptionalNullable[int] = UNSET, suffix: OptionalNullable[str] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, @@ -155,10 +176,10 @@ async def complete_async( url_variables = None if timeout_ms is None: timeout_ms = self.sdk_configuration.timeout_ms - + if server_url is not None: base_url = server_url - + request = models.FIMCompletionRequest( model=model, temperature=temperature, @@ -171,8 +192,8 @@ async def complete_async( prompt=prompt, suffix=suffix, ) - - req = self.build_request( + + req = self.build_request_async( method="POST", path="/v1/fim/completions", base_url=base_url, @@ -184,47 +205,57 @@ async def complete_async( user_agent_header="user-agent", accept_header_value="application/json", security=self.sdk_configuration.security, - get_serialized_body=lambda: utils.serialize_request_body(request, False, False, "json", models.FIMCompletionRequest), + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.FIMCompletionRequest + ), timeout_ms=timeout_ms, ) - + if retries == UNSET: if self.sdk_configuration.retry_config is not UNSET: retries = self.sdk_configuration.retry_config retry_config = None if isinstance(retries, utils.RetryConfig): - retry_config = (retries, [ - "429", - "500", - "502", - "503", - "504" - ]) - + retry_config = (retries, ["429", "500", "502", "503", "504"]) + http_res = await self.do_request_async( - hook_ctx=HookContext(operation_id="fim_completion_v1_fim_completions_post", oauth2_scopes=[], security_source=get_security_from_env(self.sdk_configuration.security, models.Security)), + hook_ctx=HookContext( + operation_id="fim_completion_v1_fim_completions_post", + oauth2_scopes=[], + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), request=req, - error_status_codes=["422","4XX","5XX"], - retry_config=retry_config + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, ) - + data: Any = None if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, Optional[models.FIMCompletionResponse]) + return utils.unmarshal_json( + http_res.text, Optional[models.FIMCompletionResponse] + ) if utils.match_response(http_res, "422", "application/json"): data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) raise models.HTTPValidationError(data=data) - if utils.match_response(http_res, ["4XX","5XX"], "*"): - raise models.SDKError("API error occurred", http_res.status_code, http_res.text, http_res) - + if utils.match_response(http_res, ["4XX", "5XX"], "*"): + raise models.SDKError( + "API error occurred", http_res.status_code, http_res.text, http_res + ) + content_type = http_res.headers.get("Content-Type") - raise models.SDKError(f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, http_res.text, http_res) + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res.text, + http_res, + ) - - def stream( - self, *, + self, + *, model: Nullable[str], prompt: str, temperature: Optional[float] = 0.7, @@ -232,7 +263,12 @@ def stream( max_tokens: OptionalNullable[int] = UNSET, min_tokens: OptionalNullable[int] = UNSET, stream: Optional[bool] = True, - stop: Optional[Union[models.FIMCompletionStreamRequestStop, models.FIMCompletionStreamRequestStopTypedDict]] = None, + stop: Optional[ + Union[ + models.FIMCompletionStreamRequestStop, + models.FIMCompletionStreamRequestStopTypedDict, + ] + ] = None, random_seed: OptionalNullable[int] = UNSET, suffix: OptionalNullable[str] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, @@ -249,7 +285,7 @@ def stream( :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. :param min_tokens: The minimum number of tokens to generate in the completion. - :param stream: + :param stream: :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. :param suffix: Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`. @@ -261,10 +297,10 @@ def stream( url_variables = None if timeout_ms is None: timeout_ms = self.sdk_configuration.timeout_ms - + if server_url is not None: base_url = server_url - + request = models.FIMCompletionStreamRequest( model=model, temperature=temperature, @@ -277,7 +313,7 @@ def stream( prompt=prompt, suffix=suffix, ) - + req = self.build_request( method="POST", path="/v1/fim/completions#stream", @@ -290,48 +326,60 @@ def stream( user_agent_header="user-agent", accept_header_value="text/event-stream", security=self.sdk_configuration.security, - get_serialized_body=lambda: utils.serialize_request_body(request, False, False, "json", models.FIMCompletionStreamRequest), + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.FIMCompletionStreamRequest + ), timeout_ms=timeout_ms, ) - + if retries == UNSET: if self.sdk_configuration.retry_config is not UNSET: retries = self.sdk_configuration.retry_config retry_config = None if isinstance(retries, utils.RetryConfig): - retry_config = (retries, [ - "429", - "500", - "502", - "503", - "504" - ]) - + retry_config = (retries, ["429", "500", "502", "503", "504"]) + http_res = self.do_request( - hook_ctx=HookContext(operation_id="stream_fim", oauth2_scopes=[], security_source=get_security_from_env(self.sdk_configuration.security, models.Security)), + hook_ctx=HookContext( + operation_id="stream_fim", + oauth2_scopes=[], + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), request=req, - error_status_codes=["422","4XX","5XX"], + error_status_codes=["422", "4XX", "5XX"], stream=True, - retry_config=retry_config + retry_config=retry_config, ) - + data: Any = None if utils.match_response(http_res, "200", "text/event-stream"): - return eventstreaming.stream_events(http_res, lambda raw: utils.unmarshal_json(raw, models.CompletionEvent), sentinel="[DONE]") + return eventstreaming.stream_events( + http_res, + lambda raw: utils.unmarshal_json(raw, models.CompletionEvent), + sentinel="[DONE]", + ) if utils.match_response(http_res, "422", "application/json"): data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) raise models.HTTPValidationError(data=data) - if utils.match_response(http_res, ["4XX","5XX"], "*"): - raise models.SDKError("API error occurred", http_res.status_code, http_res.text, http_res) - + if utils.match_response(http_res, ["4XX", "5XX"], "*"): + raise models.SDKError( + "API error occurred", http_res.status_code, http_res.text, http_res + ) + content_type = http_res.headers.get("Content-Type") - raise models.SDKError(f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, http_res.text, http_res) + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res.text, + http_res, + ) - - async def stream_async( - self, *, + self, + *, model: Nullable[str], prompt: str, temperature: Optional[float] = 0.7, @@ -339,7 +387,12 @@ async def stream_async( max_tokens: OptionalNullable[int] = UNSET, min_tokens: OptionalNullable[int] = UNSET, stream: Optional[bool] = True, - stop: Optional[Union[models.FIMCompletionStreamRequestStop, models.FIMCompletionStreamRequestStopTypedDict]] = None, + stop: Optional[ + Union[ + models.FIMCompletionStreamRequestStop, + models.FIMCompletionStreamRequestStopTypedDict, + ] + ] = None, random_seed: OptionalNullable[int] = UNSET, suffix: OptionalNullable[str] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, @@ -356,7 +409,7 @@ async def stream_async( :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. :param min_tokens: The minimum number of tokens to generate in the completion. - :param stream: + :param stream: :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. :param suffix: Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`. @@ -368,10 +421,10 @@ async def stream_async( url_variables = None if timeout_ms is None: timeout_ms = self.sdk_configuration.timeout_ms - + if server_url is not None: base_url = server_url - + request = models.FIMCompletionStreamRequest( model=model, temperature=temperature, @@ -384,8 +437,8 @@ async def stream_async( prompt=prompt, suffix=suffix, ) - - req = self.build_request( + + req = self.build_request_async( method="POST", path="/v1/fim/completions#stream", base_url=base_url, @@ -397,42 +450,53 @@ async def stream_async( user_agent_header="user-agent", accept_header_value="text/event-stream", security=self.sdk_configuration.security, - get_serialized_body=lambda: utils.serialize_request_body(request, False, False, "json", models.FIMCompletionStreamRequest), + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.FIMCompletionStreamRequest + ), timeout_ms=timeout_ms, ) - + if retries == UNSET: if self.sdk_configuration.retry_config is not UNSET: retries = self.sdk_configuration.retry_config retry_config = None if isinstance(retries, utils.RetryConfig): - retry_config = (retries, [ - "429", - "500", - "502", - "503", - "504" - ]) - + retry_config = (retries, ["429", "500", "502", "503", "504"]) + http_res = await self.do_request_async( - hook_ctx=HookContext(operation_id="stream_fim", oauth2_scopes=[], security_source=get_security_from_env(self.sdk_configuration.security, models.Security)), + hook_ctx=HookContext( + operation_id="stream_fim", + oauth2_scopes=[], + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), request=req, - error_status_codes=["422","4XX","5XX"], + error_status_codes=["422", "4XX", "5XX"], stream=True, - retry_config=retry_config + retry_config=retry_config, ) - + data: Any = None if utils.match_response(http_res, "200", "text/event-stream"): - return eventstreaming.stream_events_async(http_res, lambda raw: utils.unmarshal_json(raw, models.CompletionEvent), sentinel="[DONE]") + return eventstreaming.stream_events_async( + http_res, + lambda raw: utils.unmarshal_json(raw, models.CompletionEvent), + sentinel="[DONE]", + ) if utils.match_response(http_res, "422", "application/json"): data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) raise models.HTTPValidationError(data=data) - if utils.match_response(http_res, ["4XX","5XX"], "*"): - raise models.SDKError("API error occurred", http_res.status_code, http_res.text, http_res) - - content_type = http_res.headers.get("Content-Type") - raise models.SDKError(f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, http_res.text, http_res) + if utils.match_response(http_res, ["4XX", "5XX"], "*"): + raise models.SDKError( + "API error occurred", http_res.status_code, http_res.text, http_res + ) - + content_type = http_res.headers.get("Content-Type") + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res.text, + http_res, + ) diff --git a/src/mistralai/fine_tuning.py b/src/mistralai/fine_tuning.py index 998100a0..ce3d1389 100644 --- a/src/mistralai/fine_tuning.py +++ b/src/mistralai/fine_tuning.py @@ -4,13 +4,14 @@ from .sdkconfiguration import SDKConfiguration from mistralai.jobs import Jobs + class FineTuning(BaseSDK): jobs: Jobs + def __init__(self, sdk_config: SDKConfiguration) -> None: BaseSDK.__init__(self, sdk_config) self.sdk_configuration = sdk_config self._init_sdks() - + def _init_sdks(self): self.jobs = Jobs(self.sdk_configuration) - diff --git a/src/mistralai/jobs.py b/src/mistralai/jobs.py index 2ea3e4a0..b6c1b243 100644 --- a/src/mistralai/jobs.py +++ b/src/mistralai/jobs.py @@ -8,11 +8,11 @@ from mistralai.utils import get_security_from_env from typing import List, Optional, Union + class Jobs(BaseSDK): - - def list( - self, *, + self, + *, page: Optional[int] = 0, page_size: Optional[int] = 100, model: OptionalNullable[str] = UNSET, @@ -47,10 +47,10 @@ def list( url_variables = None if timeout_ms is None: timeout_ms = self.sdk_configuration.timeout_ms - + if server_url is not None: base_url = server_url - + request = models.JobsAPIRoutesFineTuningGetFineTuningJobsRequest( page=page, page_size=page_size, @@ -62,7 +62,7 @@ def list( wandb_name=wandb_name, suffix=suffix, ) - + req = self.build_request( method="GET", path="/v1/fine_tuning/jobs", @@ -77,40 +77,46 @@ def list( security=self.sdk_configuration.security, timeout_ms=timeout_ms, ) - + if retries == UNSET: if self.sdk_configuration.retry_config is not UNSET: retries = self.sdk_configuration.retry_config retry_config = None if isinstance(retries, utils.RetryConfig): - retry_config = (retries, [ - "429", - "500", - "502", - "503", - "504" - ]) - + retry_config = (retries, ["429", "500", "502", "503", "504"]) + http_res = self.do_request( - hook_ctx=HookContext(operation_id="jobs_api_routes_fine_tuning_get_fine_tuning_jobs", oauth2_scopes=[], security_source=get_security_from_env(self.sdk_configuration.security, models.Security)), + hook_ctx=HookContext( + operation_id="jobs_api_routes_fine_tuning_get_fine_tuning_jobs", + oauth2_scopes=[], + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), request=req, - error_status_codes=["4XX","5XX"], - retry_config=retry_config + error_status_codes=["4XX", "5XX"], + retry_config=retry_config, ) - + if utils.match_response(http_res, "200", "application/json"): return utils.unmarshal_json(http_res.text, Optional[models.JobsOut]) - if utils.match_response(http_res, ["4XX","5XX"], "*"): - raise models.SDKError("API error occurred", http_res.status_code, http_res.text, http_res) - + if utils.match_response(http_res, ["4XX", "5XX"], "*"): + raise models.SDKError( + "API error occurred", http_res.status_code, http_res.text, http_res + ) + content_type = http_res.headers.get("Content-Type") - raise models.SDKError(f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, http_res.text, http_res) + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res.text, + http_res, + ) - - async def list_async( - self, *, + self, + *, page: Optional[int] = 0, page_size: Optional[int] = 100, model: OptionalNullable[str] = UNSET, @@ -145,10 +151,10 @@ async def list_async( url_variables = None if timeout_ms is None: timeout_ms = self.sdk_configuration.timeout_ms - + if server_url is not None: base_url = server_url - + request = models.JobsAPIRoutesFineTuningGetFineTuningJobsRequest( page=page, page_size=page_size, @@ -160,8 +166,8 @@ async def list_async( wandb_name=wandb_name, suffix=suffix, ) - - req = self.build_request( + + req = self.build_request_async( method="GET", path="/v1/fine_tuning/jobs", base_url=base_url, @@ -175,47 +181,65 @@ async def list_async( security=self.sdk_configuration.security, timeout_ms=timeout_ms, ) - + if retries == UNSET: if self.sdk_configuration.retry_config is not UNSET: retries = self.sdk_configuration.retry_config retry_config = None if isinstance(retries, utils.RetryConfig): - retry_config = (retries, [ - "429", - "500", - "502", - "503", - "504" - ]) - + retry_config = (retries, ["429", "500", "502", "503", "504"]) + http_res = await self.do_request_async( - hook_ctx=HookContext(operation_id="jobs_api_routes_fine_tuning_get_fine_tuning_jobs", oauth2_scopes=[], security_source=get_security_from_env(self.sdk_configuration.security, models.Security)), + hook_ctx=HookContext( + operation_id="jobs_api_routes_fine_tuning_get_fine_tuning_jobs", + oauth2_scopes=[], + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), request=req, - error_status_codes=["4XX","5XX"], - retry_config=retry_config + error_status_codes=["4XX", "5XX"], + retry_config=retry_config, ) - + if utils.match_response(http_res, "200", "application/json"): return utils.unmarshal_json(http_res.text, Optional[models.JobsOut]) - if utils.match_response(http_res, ["4XX","5XX"], "*"): - raise models.SDKError("API error occurred", http_res.status_code, http_res.text, http_res) - + if utils.match_response(http_res, ["4XX", "5XX"], "*"): + raise models.SDKError( + "API error occurred", http_res.status_code, http_res.text, http_res + ) + content_type = http_res.headers.get("Content-Type") - raise models.SDKError(f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, http_res.text, http_res) + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res.text, + http_res, + ) - - def create( - self, *, + self, + *, model: models.FineTuneableModel, - hyperparameters: Union[models.TrainingParametersIn, models.TrainingParametersInTypedDict], - training_files: Optional[Union[List[models.TrainingFile], List[models.TrainingFileTypedDict]]] = None, + hyperparameters: Union[ + models.TrainingParametersIn, models.TrainingParametersInTypedDict + ], + training_files: Optional[ + Union[List[models.TrainingFile], List[models.TrainingFileTypedDict]] + ] = None, validation_files: OptionalNullable[List[str]] = UNSET, suffix: OptionalNullable[str] = UNSET, - integrations: OptionalNullable[Union[List[models.JobInIntegrations], List[models.JobInIntegrationsTypedDict]]] = UNSET, - repositories: Optional[Union[List[models.JobInRepositories], List[models.JobInRepositoriesTypedDict]]] = None, + integrations: OptionalNullable[ + Union[ + List[models.JobInIntegrations], List[models.JobInIntegrationsTypedDict] + ] + ] = UNSET, + repositories: Optional[ + Union[ + List[models.JobInRepositories], List[models.JobInRepositoriesTypedDict] + ] + ] = None, auto_start: Optional[bool] = None, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, @@ -227,11 +251,11 @@ def create( :param model: The name of the model to fine-tune. :param hyperparameters: The fine-tuning hyperparameter settings used in a fine-tune job. - :param training_files: + :param training_files: :param validation_files: A list containing the IDs of uploaded files that contain validation data. If you provide these files, the data is used to generate validation metrics periodically during fine-tuning. These metrics can be viewed in `checkpoints` when getting the status of a running fine-tuning job. The same data should not be present in both train and validation files. :param suffix: A string that will be added to your fine-tuning model name. For example, a suffix of \"my-great-model\" would produce a model name like `ft:open-mistral-7b:my-great-model:xxx...` :param integrations: A list of integrations to enable for your fine-tuning job. - :param repositories: + :param repositories: :param auto_start: This field will be required in a future release. :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method @@ -241,21 +265,29 @@ def create( url_variables = None if timeout_ms is None: timeout_ms = self.sdk_configuration.timeout_ms - + if server_url is not None: base_url = server_url - + request = models.JobIn( model=model, - training_files=utils.get_pydantic_model(training_files, Optional[List[models.TrainingFile]]), + training_files=utils.get_pydantic_model( + training_files, Optional[List[models.TrainingFile]] + ), validation_files=validation_files, - hyperparameters=utils.get_pydantic_model(hyperparameters, models.TrainingParametersIn), + hyperparameters=utils.get_pydantic_model( + hyperparameters, models.TrainingParametersIn + ), suffix=suffix, - integrations=utils.get_pydantic_model(integrations, OptionalNullable[List[models.JobInIntegrations]]), - repositories=utils.get_pydantic_model(repositories, Optional[List[models.JobInRepositories]]), + integrations=utils.get_pydantic_model( + integrations, OptionalNullable[List[models.JobInIntegrations]] + ), + repositories=utils.get_pydantic_model( + repositories, Optional[List[models.JobInRepositories]] + ), auto_start=auto_start, ) - + req = self.build_request( method="POST", path="/v1/fine_tuning/jobs", @@ -268,50 +300,73 @@ def create( user_agent_header="user-agent", accept_header_value="application/json", security=self.sdk_configuration.security, - get_serialized_body=lambda: utils.serialize_request_body(request, False, False, "json", models.JobIn), + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.JobIn + ), timeout_ms=timeout_ms, ) - + if retries == UNSET: if self.sdk_configuration.retry_config is not UNSET: retries = self.sdk_configuration.retry_config retry_config = None if isinstance(retries, utils.RetryConfig): - retry_config = (retries, [ - "429", - "500", - "502", - "503", - "504" - ]) - + retry_config = (retries, ["429", "500", "502", "503", "504"]) + http_res = self.do_request( - hook_ctx=HookContext(operation_id="jobs_api_routes_fine_tuning_create_fine_tuning_job", oauth2_scopes=[], security_source=get_security_from_env(self.sdk_configuration.security, models.Security)), + hook_ctx=HookContext( + operation_id="jobs_api_routes_fine_tuning_create_fine_tuning_job", + oauth2_scopes=[], + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), request=req, - error_status_codes=["4XX","5XX"], - retry_config=retry_config + error_status_codes=["4XX", "5XX"], + retry_config=retry_config, ) - + if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, Optional[models.JobsAPIRoutesFineTuningCreateFineTuningJobResponse]) - if utils.match_response(http_res, ["4XX","5XX"], "*"): - raise models.SDKError("API error occurred", http_res.status_code, http_res.text, http_res) - + return utils.unmarshal_json( + http_res.text, + Optional[models.JobsAPIRoutesFineTuningCreateFineTuningJobResponse], + ) + if utils.match_response(http_res, ["4XX", "5XX"], "*"): + raise models.SDKError( + "API error occurred", http_res.status_code, http_res.text, http_res + ) + content_type = http_res.headers.get("Content-Type") - raise models.SDKError(f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, http_res.text, http_res) + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res.text, + http_res, + ) - - async def create_async( - self, *, + self, + *, model: models.FineTuneableModel, - hyperparameters: Union[models.TrainingParametersIn, models.TrainingParametersInTypedDict], - training_files: Optional[Union[List[models.TrainingFile], List[models.TrainingFileTypedDict]]] = None, + hyperparameters: Union[ + models.TrainingParametersIn, models.TrainingParametersInTypedDict + ], + training_files: Optional[ + Union[List[models.TrainingFile], List[models.TrainingFileTypedDict]] + ] = None, validation_files: OptionalNullable[List[str]] = UNSET, suffix: OptionalNullable[str] = UNSET, - integrations: OptionalNullable[Union[List[models.JobInIntegrations], List[models.JobInIntegrationsTypedDict]]] = UNSET, - repositories: Optional[Union[List[models.JobInRepositories], List[models.JobInRepositoriesTypedDict]]] = None, + integrations: OptionalNullable[ + Union[ + List[models.JobInIntegrations], List[models.JobInIntegrationsTypedDict] + ] + ] = UNSET, + repositories: Optional[ + Union[ + List[models.JobInRepositories], List[models.JobInRepositoriesTypedDict] + ] + ] = None, auto_start: Optional[bool] = None, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, @@ -323,11 +378,11 @@ async def create_async( :param model: The name of the model to fine-tune. :param hyperparameters: The fine-tuning hyperparameter settings used in a fine-tune job. - :param training_files: + :param training_files: :param validation_files: A list containing the IDs of uploaded files that contain validation data. If you provide these files, the data is used to generate validation metrics periodically during fine-tuning. These metrics can be viewed in `checkpoints` when getting the status of a running fine-tuning job. The same data should not be present in both train and validation files. :param suffix: A string that will be added to your fine-tuning model name. For example, a suffix of \"my-great-model\" would produce a model name like `ft:open-mistral-7b:my-great-model:xxx...` :param integrations: A list of integrations to enable for your fine-tuning job. - :param repositories: + :param repositories: :param auto_start: This field will be required in a future release. :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method @@ -337,22 +392,30 @@ async def create_async( url_variables = None if timeout_ms is None: timeout_ms = self.sdk_configuration.timeout_ms - + if server_url is not None: base_url = server_url - + request = models.JobIn( model=model, - training_files=utils.get_pydantic_model(training_files, Optional[List[models.TrainingFile]]), + training_files=utils.get_pydantic_model( + training_files, Optional[List[models.TrainingFile]] + ), validation_files=validation_files, - hyperparameters=utils.get_pydantic_model(hyperparameters, models.TrainingParametersIn), + hyperparameters=utils.get_pydantic_model( + hyperparameters, models.TrainingParametersIn + ), suffix=suffix, - integrations=utils.get_pydantic_model(integrations, OptionalNullable[List[models.JobInIntegrations]]), - repositories=utils.get_pydantic_model(repositories, Optional[List[models.JobInRepositories]]), + integrations=utils.get_pydantic_model( + integrations, OptionalNullable[List[models.JobInIntegrations]] + ), + repositories=utils.get_pydantic_model( + repositories, Optional[List[models.JobInRepositories]] + ), auto_start=auto_start, ) - - req = self.build_request( + + req = self.build_request_async( method="POST", path="/v1/fine_tuning/jobs", base_url=base_url, @@ -364,43 +427,54 @@ async def create_async( user_agent_header="user-agent", accept_header_value="application/json", security=self.sdk_configuration.security, - get_serialized_body=lambda: utils.serialize_request_body(request, False, False, "json", models.JobIn), + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.JobIn + ), timeout_ms=timeout_ms, ) - + if retries == UNSET: if self.sdk_configuration.retry_config is not UNSET: retries = self.sdk_configuration.retry_config retry_config = None if isinstance(retries, utils.RetryConfig): - retry_config = (retries, [ - "429", - "500", - "502", - "503", - "504" - ]) - + retry_config = (retries, ["429", "500", "502", "503", "504"]) + http_res = await self.do_request_async( - hook_ctx=HookContext(operation_id="jobs_api_routes_fine_tuning_create_fine_tuning_job", oauth2_scopes=[], security_source=get_security_from_env(self.sdk_configuration.security, models.Security)), + hook_ctx=HookContext( + operation_id="jobs_api_routes_fine_tuning_create_fine_tuning_job", + oauth2_scopes=[], + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), request=req, - error_status_codes=["4XX","5XX"], - retry_config=retry_config + error_status_codes=["4XX", "5XX"], + retry_config=retry_config, ) - + if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, Optional[models.JobsAPIRoutesFineTuningCreateFineTuningJobResponse]) - if utils.match_response(http_res, ["4XX","5XX"], "*"): - raise models.SDKError("API error occurred", http_res.status_code, http_res.text, http_res) - + return utils.unmarshal_json( + http_res.text, + Optional[models.JobsAPIRoutesFineTuningCreateFineTuningJobResponse], + ) + if utils.match_response(http_res, ["4XX", "5XX"], "*"): + raise models.SDKError( + "API error occurred", http_res.status_code, http_res.text, http_res + ) + content_type = http_res.headers.get("Content-Type") - raise models.SDKError(f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, http_res.text, http_res) + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res.text, + http_res, + ) - - def get( - self, *, + self, + *, job_id: str, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, @@ -419,14 +493,14 @@ def get( url_variables = None if timeout_ms is None: timeout_ms = self.sdk_configuration.timeout_ms - + if server_url is not None: base_url = server_url - + request = models.JobsAPIRoutesFineTuningGetFineTuningJobRequest( job_id=job_id, ) - + req = self.build_request( method="GET", path="/v1/fine_tuning/jobs/{job_id}", @@ -441,40 +515,46 @@ def get( security=self.sdk_configuration.security, timeout_ms=timeout_ms, ) - + if retries == UNSET: if self.sdk_configuration.retry_config is not UNSET: retries = self.sdk_configuration.retry_config retry_config = None if isinstance(retries, utils.RetryConfig): - retry_config = (retries, [ - "429", - "500", - "502", - "503", - "504" - ]) - + retry_config = (retries, ["429", "500", "502", "503", "504"]) + http_res = self.do_request( - hook_ctx=HookContext(operation_id="jobs_api_routes_fine_tuning_get_fine_tuning_job", oauth2_scopes=[], security_source=get_security_from_env(self.sdk_configuration.security, models.Security)), + hook_ctx=HookContext( + operation_id="jobs_api_routes_fine_tuning_get_fine_tuning_job", + oauth2_scopes=[], + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), request=req, - error_status_codes=["4XX","5XX"], - retry_config=retry_config + error_status_codes=["4XX", "5XX"], + retry_config=retry_config, ) - + if utils.match_response(http_res, "200", "application/json"): return utils.unmarshal_json(http_res.text, Optional[models.DetailedJobOut]) - if utils.match_response(http_res, ["4XX","5XX"], "*"): - raise models.SDKError("API error occurred", http_res.status_code, http_res.text, http_res) - + if utils.match_response(http_res, ["4XX", "5XX"], "*"): + raise models.SDKError( + "API error occurred", http_res.status_code, http_res.text, http_res + ) + content_type = http_res.headers.get("Content-Type") - raise models.SDKError(f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, http_res.text, http_res) + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res.text, + http_res, + ) - - async def get_async( - self, *, + self, + *, job_id: str, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, @@ -493,15 +573,15 @@ async def get_async( url_variables = None if timeout_ms is None: timeout_ms = self.sdk_configuration.timeout_ms - + if server_url is not None: base_url = server_url - + request = models.JobsAPIRoutesFineTuningGetFineTuningJobRequest( job_id=job_id, ) - - req = self.build_request( + + req = self.build_request_async( method="GET", path="/v1/fine_tuning/jobs/{job_id}", base_url=base_url, @@ -515,40 +595,46 @@ async def get_async( security=self.sdk_configuration.security, timeout_ms=timeout_ms, ) - + if retries == UNSET: if self.sdk_configuration.retry_config is not UNSET: retries = self.sdk_configuration.retry_config retry_config = None if isinstance(retries, utils.RetryConfig): - retry_config = (retries, [ - "429", - "500", - "502", - "503", - "504" - ]) - + retry_config = (retries, ["429", "500", "502", "503", "504"]) + http_res = await self.do_request_async( - hook_ctx=HookContext(operation_id="jobs_api_routes_fine_tuning_get_fine_tuning_job", oauth2_scopes=[], security_source=get_security_from_env(self.sdk_configuration.security, models.Security)), + hook_ctx=HookContext( + operation_id="jobs_api_routes_fine_tuning_get_fine_tuning_job", + oauth2_scopes=[], + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), request=req, - error_status_codes=["4XX","5XX"], - retry_config=retry_config + error_status_codes=["4XX", "5XX"], + retry_config=retry_config, ) - + if utils.match_response(http_res, "200", "application/json"): return utils.unmarshal_json(http_res.text, Optional[models.DetailedJobOut]) - if utils.match_response(http_res, ["4XX","5XX"], "*"): - raise models.SDKError("API error occurred", http_res.status_code, http_res.text, http_res) - + if utils.match_response(http_res, ["4XX", "5XX"], "*"): + raise models.SDKError( + "API error occurred", http_res.status_code, http_res.text, http_res + ) + content_type = http_res.headers.get("Content-Type") - raise models.SDKError(f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, http_res.text, http_res) + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res.text, + http_res, + ) - - def cancel( - self, *, + self, + *, job_id: str, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, @@ -567,14 +653,14 @@ def cancel( url_variables = None if timeout_ms is None: timeout_ms = self.sdk_configuration.timeout_ms - + if server_url is not None: base_url = server_url - + request = models.JobsAPIRoutesFineTuningCancelFineTuningJobRequest( job_id=job_id, ) - + req = self.build_request( method="POST", path="/v1/fine_tuning/jobs/{job_id}/cancel", @@ -589,40 +675,46 @@ def cancel( security=self.sdk_configuration.security, timeout_ms=timeout_ms, ) - + if retries == UNSET: if self.sdk_configuration.retry_config is not UNSET: retries = self.sdk_configuration.retry_config retry_config = None if isinstance(retries, utils.RetryConfig): - retry_config = (retries, [ - "429", - "500", - "502", - "503", - "504" - ]) - + retry_config = (retries, ["429", "500", "502", "503", "504"]) + http_res = self.do_request( - hook_ctx=HookContext(operation_id="jobs_api_routes_fine_tuning_cancel_fine_tuning_job", oauth2_scopes=[], security_source=get_security_from_env(self.sdk_configuration.security, models.Security)), + hook_ctx=HookContext( + operation_id="jobs_api_routes_fine_tuning_cancel_fine_tuning_job", + oauth2_scopes=[], + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), request=req, - error_status_codes=["4XX","5XX"], - retry_config=retry_config + error_status_codes=["4XX", "5XX"], + retry_config=retry_config, ) - + if utils.match_response(http_res, "200", "application/json"): return utils.unmarshal_json(http_res.text, Optional[models.DetailedJobOut]) - if utils.match_response(http_res, ["4XX","5XX"], "*"): - raise models.SDKError("API error occurred", http_res.status_code, http_res.text, http_res) - + if utils.match_response(http_res, ["4XX", "5XX"], "*"): + raise models.SDKError( + "API error occurred", http_res.status_code, http_res.text, http_res + ) + content_type = http_res.headers.get("Content-Type") - raise models.SDKError(f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, http_res.text, http_res) + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res.text, + http_res, + ) - - async def cancel_async( - self, *, + self, + *, job_id: str, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, @@ -641,15 +733,15 @@ async def cancel_async( url_variables = None if timeout_ms is None: timeout_ms = self.sdk_configuration.timeout_ms - + if server_url is not None: base_url = server_url - + request = models.JobsAPIRoutesFineTuningCancelFineTuningJobRequest( job_id=job_id, ) - - req = self.build_request( + + req = self.build_request_async( method="POST", path="/v1/fine_tuning/jobs/{job_id}/cancel", base_url=base_url, @@ -663,40 +755,46 @@ async def cancel_async( security=self.sdk_configuration.security, timeout_ms=timeout_ms, ) - + if retries == UNSET: if self.sdk_configuration.retry_config is not UNSET: retries = self.sdk_configuration.retry_config retry_config = None if isinstance(retries, utils.RetryConfig): - retry_config = (retries, [ - "429", - "500", - "502", - "503", - "504" - ]) - + retry_config = (retries, ["429", "500", "502", "503", "504"]) + http_res = await self.do_request_async( - hook_ctx=HookContext(operation_id="jobs_api_routes_fine_tuning_cancel_fine_tuning_job", oauth2_scopes=[], security_source=get_security_from_env(self.sdk_configuration.security, models.Security)), + hook_ctx=HookContext( + operation_id="jobs_api_routes_fine_tuning_cancel_fine_tuning_job", + oauth2_scopes=[], + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), request=req, - error_status_codes=["4XX","5XX"], - retry_config=retry_config + error_status_codes=["4XX", "5XX"], + retry_config=retry_config, ) - + if utils.match_response(http_res, "200", "application/json"): return utils.unmarshal_json(http_res.text, Optional[models.DetailedJobOut]) - if utils.match_response(http_res, ["4XX","5XX"], "*"): - raise models.SDKError("API error occurred", http_res.status_code, http_res.text, http_res) - + if utils.match_response(http_res, ["4XX", "5XX"], "*"): + raise models.SDKError( + "API error occurred", http_res.status_code, http_res.text, http_res + ) + content_type = http_res.headers.get("Content-Type") - raise models.SDKError(f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, http_res.text, http_res) + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res.text, + http_res, + ) - - def start( - self, *, + self, + *, job_id: str, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, @@ -706,7 +804,7 @@ def start( Request the start of a validated fine tuning job. - :param job_id: + :param job_id: :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -715,14 +813,14 @@ def start( url_variables = None if timeout_ms is None: timeout_ms = self.sdk_configuration.timeout_ms - + if server_url is not None: base_url = server_url - + request = models.JobsAPIRoutesFineTuningStartFineTuningJobRequest( job_id=job_id, ) - + req = self.build_request( method="POST", path="/v1/fine_tuning/jobs/{job_id}/start", @@ -737,40 +835,46 @@ def start( security=self.sdk_configuration.security, timeout_ms=timeout_ms, ) - + if retries == UNSET: if self.sdk_configuration.retry_config is not UNSET: retries = self.sdk_configuration.retry_config retry_config = None if isinstance(retries, utils.RetryConfig): - retry_config = (retries, [ - "429", - "500", - "502", - "503", - "504" - ]) - + retry_config = (retries, ["429", "500", "502", "503", "504"]) + http_res = self.do_request( - hook_ctx=HookContext(operation_id="jobs_api_routes_fine_tuning_start_fine_tuning_job", oauth2_scopes=[], security_source=get_security_from_env(self.sdk_configuration.security, models.Security)), + hook_ctx=HookContext( + operation_id="jobs_api_routes_fine_tuning_start_fine_tuning_job", + oauth2_scopes=[], + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), request=req, - error_status_codes=["4XX","5XX"], - retry_config=retry_config + error_status_codes=["4XX", "5XX"], + retry_config=retry_config, ) - + if utils.match_response(http_res, "200", "application/json"): return utils.unmarshal_json(http_res.text, Optional[models.DetailedJobOut]) - if utils.match_response(http_res, ["4XX","5XX"], "*"): - raise models.SDKError("API error occurred", http_res.status_code, http_res.text, http_res) - + if utils.match_response(http_res, ["4XX", "5XX"], "*"): + raise models.SDKError( + "API error occurred", http_res.status_code, http_res.text, http_res + ) + content_type = http_res.headers.get("Content-Type") - raise models.SDKError(f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, http_res.text, http_res) + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res.text, + http_res, + ) - - async def start_async( - self, *, + self, + *, job_id: str, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, @@ -780,7 +884,7 @@ async def start_async( Request the start of a validated fine tuning job. - :param job_id: + :param job_id: :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -789,15 +893,15 @@ async def start_async( url_variables = None if timeout_ms is None: timeout_ms = self.sdk_configuration.timeout_ms - + if server_url is not None: base_url = server_url - + request = models.JobsAPIRoutesFineTuningStartFineTuningJobRequest( job_id=job_id, ) - - req = self.build_request( + + req = self.build_request_async( method="POST", path="/v1/fine_tuning/jobs/{job_id}/start", base_url=base_url, @@ -811,34 +915,39 @@ async def start_async( security=self.sdk_configuration.security, timeout_ms=timeout_ms, ) - + if retries == UNSET: if self.sdk_configuration.retry_config is not UNSET: retries = self.sdk_configuration.retry_config retry_config = None if isinstance(retries, utils.RetryConfig): - retry_config = (retries, [ - "429", - "500", - "502", - "503", - "504" - ]) - + retry_config = (retries, ["429", "500", "502", "503", "504"]) + http_res = await self.do_request_async( - hook_ctx=HookContext(operation_id="jobs_api_routes_fine_tuning_start_fine_tuning_job", oauth2_scopes=[], security_source=get_security_from_env(self.sdk_configuration.security, models.Security)), + hook_ctx=HookContext( + operation_id="jobs_api_routes_fine_tuning_start_fine_tuning_job", + oauth2_scopes=[], + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), request=req, - error_status_codes=["4XX","5XX"], - retry_config=retry_config + error_status_codes=["4XX", "5XX"], + retry_config=retry_config, ) - + if utils.match_response(http_res, "200", "application/json"): return utils.unmarshal_json(http_res.text, Optional[models.DetailedJobOut]) - if utils.match_response(http_res, ["4XX","5XX"], "*"): - raise models.SDKError("API error occurred", http_res.status_code, http_res.text, http_res) - - content_type = http_res.headers.get("Content-Type") - raise models.SDKError(f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, http_res.text, http_res) + if utils.match_response(http_res, ["4XX", "5XX"], "*"): + raise models.SDKError( + "API error occurred", http_res.status_code, http_res.text, http_res + ) - + content_type = http_res.headers.get("Content-Type") + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res.text, + http_res, + ) diff --git a/src/mistralai/models/__init__.py b/src/mistralai/models/__init__.py index 647bbdf5..8b7f1a22 100644 --- a/src/mistralai/models/__init__.py +++ b/src/mistralai/models/__init__.py @@ -1,82 +1,535 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -from .agentscompletionrequest import AgentsCompletionRequest, AgentsCompletionRequestMessages, AgentsCompletionRequestMessagesTypedDict, AgentsCompletionRequestStop, AgentsCompletionRequestStopTypedDict, AgentsCompletionRequestToolChoice, AgentsCompletionRequestTypedDict -from .agentscompletionstreamrequest import AgentsCompletionStreamRequest, AgentsCompletionStreamRequestMessages, AgentsCompletionStreamRequestMessagesTypedDict, AgentsCompletionStreamRequestStop, AgentsCompletionStreamRequestStopTypedDict, AgentsCompletionStreamRequestToolChoice, AgentsCompletionStreamRequestTypedDict -from .archiveftmodelout import ArchiveFTModelOut, ArchiveFTModelOutObject, ArchiveFTModelOutTypedDict -from .assistantmessage import AssistantMessage, AssistantMessageRole, AssistantMessageTypedDict -from .chatcompletionchoice import ChatCompletionChoice, ChatCompletionChoiceTypedDict, FinishReason -from .chatcompletionrequest import ChatCompletionRequest, ChatCompletionRequestTypedDict, Messages, MessagesTypedDict, Stop, StopTypedDict, ToolChoice -from .chatcompletionresponse import ChatCompletionResponse, ChatCompletionResponseTypedDict -from .chatcompletionstreamrequest import ChatCompletionStreamRequest, ChatCompletionStreamRequestMessages, ChatCompletionStreamRequestMessagesTypedDict, ChatCompletionStreamRequestStop, ChatCompletionStreamRequestStopTypedDict, ChatCompletionStreamRequestToolChoice, ChatCompletionStreamRequestTypedDict +from .agentscompletionrequest import ( + AgentsCompletionRequest, + AgentsCompletionRequestMessages, + AgentsCompletionRequestMessagesTypedDict, + AgentsCompletionRequestStop, + AgentsCompletionRequestStopTypedDict, + AgentsCompletionRequestToolChoice, + AgentsCompletionRequestToolChoiceTypedDict, + AgentsCompletionRequestTypedDict, +) +from .agentscompletionstreamrequest import ( + AgentsCompletionStreamRequest, + AgentsCompletionStreamRequestMessages, + AgentsCompletionStreamRequestMessagesTypedDict, + AgentsCompletionStreamRequestStop, + AgentsCompletionStreamRequestStopTypedDict, + AgentsCompletionStreamRequestToolChoice, + AgentsCompletionStreamRequestToolChoiceTypedDict, + AgentsCompletionStreamRequestTypedDict, +) +from .archiveftmodelout import ( + ArchiveFTModelOut, + ArchiveFTModelOutObject, + ArchiveFTModelOutTypedDict, +) +from .assistantmessage import ( + AssistantMessage, + AssistantMessageRole, + AssistantMessageTypedDict, +) +from .basemodelcard import BaseModelCard, BaseModelCardTypedDict +from .chatcompletionchoice import ( + ChatCompletionChoice, + ChatCompletionChoiceTypedDict, + FinishReason, +) +from .chatcompletionrequest import ( + ChatCompletionRequest, + ChatCompletionRequestToolChoice, + ChatCompletionRequestToolChoiceTypedDict, + ChatCompletionRequestTypedDict, + Messages, + MessagesTypedDict, + Stop, + StopTypedDict, +) +from .chatcompletionresponse import ( + ChatCompletionResponse, + ChatCompletionResponseTypedDict, +) +from .chatcompletionstreamrequest import ( + ChatCompletionStreamRequest, + ChatCompletionStreamRequestMessages, + ChatCompletionStreamRequestMessagesTypedDict, + ChatCompletionStreamRequestStop, + ChatCompletionStreamRequestStopTypedDict, + ChatCompletionStreamRequestToolChoice, + ChatCompletionStreamRequestToolChoiceTypedDict, + ChatCompletionStreamRequestTypedDict, +) from .checkpointout import CheckpointOut, CheckpointOutTypedDict from .completionchunk import CompletionChunk, CompletionChunkTypedDict from .completionevent import CompletionEvent, CompletionEventTypedDict -from .completionresponsestreamchoice import CompletionResponseStreamChoice, CompletionResponseStreamChoiceFinishReason, CompletionResponseStreamChoiceTypedDict +from .completionresponsestreamchoice import ( + CompletionResponseStreamChoice, + CompletionResponseStreamChoiceFinishReason, + CompletionResponseStreamChoiceTypedDict, +) from .contentchunk import ContentChunk, ContentChunkTypedDict -from .delete_model_v1_models_model_id_deleteop import DeleteModelV1ModelsModelIDDeleteRequest, DeleteModelV1ModelsModelIDDeleteRequestTypedDict +from .delete_model_v1_models_model_id_deleteop import ( + DeleteModelV1ModelsModelIDDeleteRequest, + DeleteModelV1ModelsModelIDDeleteRequestTypedDict, +) from .deletefileout import DeleteFileOut, DeleteFileOutTypedDict from .deletemodelout import DeleteModelOut, DeleteModelOutTypedDict from .deltamessage import DeltaMessage, DeltaMessageTypedDict -from .detailedjobout import DetailedJobOut, DetailedJobOutIntegrations, DetailedJobOutIntegrationsTypedDict, DetailedJobOutObject, DetailedJobOutRepositories, DetailedJobOutRepositoriesTypedDict, DetailedJobOutStatus, DetailedJobOutTypedDict -from .embeddingrequest import EmbeddingRequest, EmbeddingRequestTypedDict, Inputs, InputsTypedDict +from .detailedjobout import ( + DetailedJobOut, + DetailedJobOutIntegrations, + DetailedJobOutIntegrationsTypedDict, + DetailedJobOutObject, + DetailedJobOutRepositories, + DetailedJobOutRepositoriesTypedDict, + DetailedJobOutStatus, + DetailedJobOutTypedDict, +) +from .embeddingrequest import ( + EmbeddingRequest, + EmbeddingRequestTypedDict, + Inputs, + InputsTypedDict, +) from .embeddingresponse import EmbeddingResponse, EmbeddingResponseTypedDict from .embeddingresponsedata import EmbeddingResponseData, EmbeddingResponseDataTypedDict from .eventout import EventOut, EventOutTypedDict -from .files_api_routes_delete_fileop import FilesAPIRoutesDeleteFileRequest, FilesAPIRoutesDeleteFileRequestTypedDict -from .files_api_routes_retrieve_fileop import FilesAPIRoutesRetrieveFileRequest, FilesAPIRoutesRetrieveFileRequestTypedDict -from .files_api_routes_upload_fileop import File, FileTypedDict, FilesAPIRoutesUploadFileMultiPartBodyParams, FilesAPIRoutesUploadFileMultiPartBodyParamsTypedDict, FilesAPIRoutesUploadFilePurpose +from .files_api_routes_delete_fileop import ( + FilesAPIRoutesDeleteFileRequest, + FilesAPIRoutesDeleteFileRequestTypedDict, +) +from .files_api_routes_retrieve_fileop import ( + FilesAPIRoutesRetrieveFileRequest, + FilesAPIRoutesRetrieveFileRequestTypedDict, +) +from .files_api_routes_upload_fileop import ( + File, + FileTypedDict, + FilesAPIRoutesUploadFileMultiPartBodyParams, + FilesAPIRoutesUploadFileMultiPartBodyParamsTypedDict, + FilesAPIRoutesUploadFilePurpose, +) from .fileschema import FileSchema, FileSchemaPurpose, FileSchemaTypedDict -from .fimcompletionrequest import FIMCompletionRequest, FIMCompletionRequestStop, FIMCompletionRequestStopTypedDict, FIMCompletionRequestTypedDict +from .fimcompletionrequest import ( + FIMCompletionRequest, + FIMCompletionRequestStop, + FIMCompletionRequestStopTypedDict, + FIMCompletionRequestTypedDict, +) from .fimcompletionresponse import FIMCompletionResponse, FIMCompletionResponseTypedDict -from .fimcompletionstreamrequest import FIMCompletionStreamRequest, FIMCompletionStreamRequestStop, FIMCompletionStreamRequestStopTypedDict, FIMCompletionStreamRequestTypedDict +from .fimcompletionstreamrequest import ( + FIMCompletionStreamRequest, + FIMCompletionStreamRequestStop, + FIMCompletionStreamRequestStopTypedDict, + FIMCompletionStreamRequestTypedDict, +) from .finetuneablemodel import FineTuneableModel -from .ftmodelcapabilitiesout import FTModelCapabilitiesOut, FTModelCapabilitiesOutTypedDict +from .ftmodelcapabilitiesout import ( + FTModelCapabilitiesOut, + FTModelCapabilitiesOutTypedDict, +) +from .ftmodelcard import FTModelCard, FTModelCardTypedDict from .ftmodelout import FTModelOut, FTModelOutObject, FTModelOutTypedDict from .function import Function, FunctionTypedDict -from .functioncall import Arguments, ArgumentsTypedDict, FunctionCall, FunctionCallTypedDict -from .githubrepositoryin import GithubRepositoryIn, GithubRepositoryInType, GithubRepositoryInTypedDict -from .githubrepositoryout import GithubRepositoryOut, GithubRepositoryOutType, GithubRepositoryOutTypedDict +from .functioncall import ( + Arguments, + ArgumentsTypedDict, + FunctionCall, + FunctionCallTypedDict, +) +from .functionname import FunctionName, FunctionNameTypedDict +from .githubrepositoryin import ( + GithubRepositoryIn, + GithubRepositoryInType, + GithubRepositoryInTypedDict, +) +from .githubrepositoryout import ( + GithubRepositoryOut, + GithubRepositoryOutType, + GithubRepositoryOutTypedDict, +) from .httpvalidationerror import HTTPValidationError, HTTPValidationErrorData -from .jobin import JobIn, JobInIntegrations, JobInIntegrationsTypedDict, JobInRepositories, JobInRepositoriesTypedDict, JobInTypedDict +from .imageurl import ImageURL, ImageURLTypedDict +from .imageurlchunk import ( + ImageURLChunk, + ImageURLChunkImageURL, + ImageURLChunkImageURLTypedDict, + ImageURLChunkType, + ImageURLChunkTypedDict, +) +from .jobin import ( + JobIn, + JobInIntegrations, + JobInIntegrationsTypedDict, + JobInRepositories, + JobInRepositoriesTypedDict, + JobInTypedDict, +) from .jobmetadataout import JobMetadataOut, JobMetadataOutTypedDict -from .jobout import Integrations, IntegrationsTypedDict, JobOut, JobOutTypedDict, Object, Repositories, RepositoriesTypedDict, Status -from .jobs_api_routes_fine_tuning_archive_fine_tuned_modelop import JobsAPIRoutesFineTuningArchiveFineTunedModelRequest, JobsAPIRoutesFineTuningArchiveFineTunedModelRequestTypedDict -from .jobs_api_routes_fine_tuning_cancel_fine_tuning_jobop import JobsAPIRoutesFineTuningCancelFineTuningJobRequest, JobsAPIRoutesFineTuningCancelFineTuningJobRequestTypedDict -from .jobs_api_routes_fine_tuning_create_fine_tuning_jobop import JobsAPIRoutesFineTuningCreateFineTuningJobResponse, JobsAPIRoutesFineTuningCreateFineTuningJobResponseTypedDict -from .jobs_api_routes_fine_tuning_get_fine_tuning_jobop import JobsAPIRoutesFineTuningGetFineTuningJobRequest, JobsAPIRoutesFineTuningGetFineTuningJobRequestTypedDict -from .jobs_api_routes_fine_tuning_get_fine_tuning_jobsop import JobsAPIRoutesFineTuningGetFineTuningJobsRequest, JobsAPIRoutesFineTuningGetFineTuningJobsRequestTypedDict, QueryParamStatus -from .jobs_api_routes_fine_tuning_start_fine_tuning_jobop import JobsAPIRoutesFineTuningStartFineTuningJobRequest, JobsAPIRoutesFineTuningStartFineTuningJobRequestTypedDict -from .jobs_api_routes_fine_tuning_unarchive_fine_tuned_modelop import JobsAPIRoutesFineTuningUnarchiveFineTunedModelRequest, JobsAPIRoutesFineTuningUnarchiveFineTunedModelRequestTypedDict -from .jobs_api_routes_fine_tuning_update_fine_tuned_modelop import JobsAPIRoutesFineTuningUpdateFineTunedModelRequest, JobsAPIRoutesFineTuningUpdateFineTunedModelRequestTypedDict +from .jobout import ( + Integrations, + IntegrationsTypedDict, + JobOut, + JobOutTypedDict, + Object, + Repositories, + RepositoriesTypedDict, + Status, +) +from .jobs_api_routes_fine_tuning_archive_fine_tuned_modelop import ( + JobsAPIRoutesFineTuningArchiveFineTunedModelRequest, + JobsAPIRoutesFineTuningArchiveFineTunedModelRequestTypedDict, +) +from .jobs_api_routes_fine_tuning_cancel_fine_tuning_jobop import ( + JobsAPIRoutesFineTuningCancelFineTuningJobRequest, + JobsAPIRoutesFineTuningCancelFineTuningJobRequestTypedDict, +) +from .jobs_api_routes_fine_tuning_create_fine_tuning_jobop import ( + JobsAPIRoutesFineTuningCreateFineTuningJobResponse, + JobsAPIRoutesFineTuningCreateFineTuningJobResponseTypedDict, +) +from .jobs_api_routes_fine_tuning_get_fine_tuning_jobop import ( + JobsAPIRoutesFineTuningGetFineTuningJobRequest, + JobsAPIRoutesFineTuningGetFineTuningJobRequestTypedDict, +) +from .jobs_api_routes_fine_tuning_get_fine_tuning_jobsop import ( + JobsAPIRoutesFineTuningGetFineTuningJobsRequest, + JobsAPIRoutesFineTuningGetFineTuningJobsRequestTypedDict, + QueryParamStatus, +) +from .jobs_api_routes_fine_tuning_start_fine_tuning_jobop import ( + JobsAPIRoutesFineTuningStartFineTuningJobRequest, + JobsAPIRoutesFineTuningStartFineTuningJobRequestTypedDict, +) +from .jobs_api_routes_fine_tuning_unarchive_fine_tuned_modelop import ( + JobsAPIRoutesFineTuningUnarchiveFineTunedModelRequest, + JobsAPIRoutesFineTuningUnarchiveFineTunedModelRequestTypedDict, +) +from .jobs_api_routes_fine_tuning_update_fine_tuned_modelop import ( + JobsAPIRoutesFineTuningUpdateFineTunedModelRequest, + JobsAPIRoutesFineTuningUpdateFineTunedModelRequestTypedDict, +) from .jobsout import JobsOut, JobsOutObject, JobsOutTypedDict -from .legacyjobmetadataout import LegacyJobMetadataOut, LegacyJobMetadataOutObject, LegacyJobMetadataOutTypedDict +from .legacyjobmetadataout import ( + LegacyJobMetadataOut, + LegacyJobMetadataOutObject, + LegacyJobMetadataOutTypedDict, +) from .listfilesout import ListFilesOut, ListFilesOutTypedDict from .metricout import MetricOut, MetricOutTypedDict from .modelcapabilities import ModelCapabilities, ModelCapabilitiesTypedDict -from .modelcard import ModelCard, ModelCardTypedDict -from .modellist import ModelList, ModelListTypedDict -from .responseformat import ResponseFormat, ResponseFormatTypedDict, ResponseFormats -from .retrieve_model_v1_models_model_id_getop import RetrieveModelV1ModelsModelIDGetRequest, RetrieveModelV1ModelsModelIDGetRequestTypedDict -from .retrievefileout import RetrieveFileOut, RetrieveFileOutPurpose, RetrieveFileOutTypedDict +from .modellist import Data, DataTypedDict, ModelList, ModelListTypedDict +from .responseformat import ResponseFormat, ResponseFormatTypedDict +from .responseformats import ResponseFormats +from .retrieve_model_v1_models_model_id_getop import ( + RetrieveModelV1ModelsModelIDGetRequest, + RetrieveModelV1ModelsModelIDGetRequestTypedDict, + RetrieveModelV1ModelsModelIDGetResponseRetrieveModelV1ModelsModelIDGet, + RetrieveModelV1ModelsModelIDGetResponseRetrieveModelV1ModelsModelIDGetTypedDict, +) +from .retrievefileout import ( + RetrieveFileOut, + RetrieveFileOutPurpose, + RetrieveFileOutTypedDict, +) from .sampletype import SampleType from .sdkerror import SDKError from .security import Security, SecurityTypedDict from .source import Source -from .systemmessage import Content, ContentTypedDict, Role, SystemMessage, SystemMessageTypedDict -from .textchunk import TextChunk, TextChunkTypedDict -from .tool import Tool, ToolToolTypes, ToolTypedDict -from .toolcall import ToolCall, ToolCallTypedDict, ToolTypes +from .systemmessage import ( + Content, + ContentTypedDict, + Role, + SystemMessage, + SystemMessageTypedDict, +) +from .textchunk import TextChunk, TextChunkType, TextChunkTypedDict +from .tool import Tool, ToolTypedDict +from .toolcall import ToolCall, ToolCallTypedDict +from .toolchoice import ToolChoice, ToolChoiceTypedDict +from .toolchoiceenum import ToolChoiceEnum from .toolmessage import ToolMessage, ToolMessageRole, ToolMessageTypedDict +from .tooltypes import ToolTypes from .trainingfile import TrainingFile, TrainingFileTypedDict from .trainingparameters import TrainingParameters, TrainingParametersTypedDict from .trainingparametersin import TrainingParametersIn, TrainingParametersInTypedDict -from .unarchiveftmodelout import UnarchiveFTModelOut, UnarchiveFTModelOutObject, UnarchiveFTModelOutTypedDict +from .unarchiveftmodelout import ( + UnarchiveFTModelOut, + UnarchiveFTModelOutObject, + UnarchiveFTModelOutTypedDict, +) from .updateftmodelin import UpdateFTModelIn, UpdateFTModelInTypedDict from .uploadfileout import Purpose, UploadFileOut, UploadFileOutTypedDict from .usageinfo import UsageInfo, UsageInfoTypedDict -from .usermessage import UserMessage, UserMessageContent, UserMessageContentTypedDict, UserMessageRole, UserMessageTypedDict -from .validationerror import Loc, LocTypedDict, ValidationError, ValidationErrorTypedDict -from .wandbintegration import WandbIntegration, WandbIntegrationType, WandbIntegrationTypedDict +from .usermessage import ( + UserMessage, + UserMessageContent, + UserMessageContentTypedDict, + UserMessageRole, + UserMessageTypedDict, +) +from .validationerror import ( + Loc, + LocTypedDict, + ValidationError, + ValidationErrorTypedDict, +) +from .wandbintegration import ( + WandbIntegration, + WandbIntegrationType, + WandbIntegrationTypedDict, +) from .wandbintegrationout import Type, WandbIntegrationOut, WandbIntegrationOutTypedDict -__all__ = ["AgentsCompletionRequest", "AgentsCompletionRequestMessages", "AgentsCompletionRequestMessagesTypedDict", "AgentsCompletionRequestStop", "AgentsCompletionRequestStopTypedDict", "AgentsCompletionRequestToolChoice", "AgentsCompletionRequestTypedDict", "AgentsCompletionStreamRequest", "AgentsCompletionStreamRequestMessages", "AgentsCompletionStreamRequestMessagesTypedDict", "AgentsCompletionStreamRequestStop", "AgentsCompletionStreamRequestStopTypedDict", "AgentsCompletionStreamRequestToolChoice", "AgentsCompletionStreamRequestTypedDict", "ArchiveFTModelOut", "ArchiveFTModelOutObject", "ArchiveFTModelOutTypedDict", "Arguments", "ArgumentsTypedDict", "AssistantMessage", "AssistantMessageRole", "AssistantMessageTypedDict", "ChatCompletionChoice", "ChatCompletionChoiceTypedDict", "ChatCompletionRequest", "ChatCompletionRequestTypedDict", "ChatCompletionResponse", "ChatCompletionResponseTypedDict", "ChatCompletionStreamRequest", "ChatCompletionStreamRequestMessages", "ChatCompletionStreamRequestMessagesTypedDict", "ChatCompletionStreamRequestStop", "ChatCompletionStreamRequestStopTypedDict", "ChatCompletionStreamRequestToolChoice", "ChatCompletionStreamRequestTypedDict", "CheckpointOut", "CheckpointOutTypedDict", "CompletionChunk", "CompletionChunkTypedDict", "CompletionEvent", "CompletionEventTypedDict", "CompletionResponseStreamChoice", "CompletionResponseStreamChoiceFinishReason", "CompletionResponseStreamChoiceTypedDict", "Content", "ContentChunk", "ContentChunkTypedDict", "ContentTypedDict", "DeleteFileOut", "DeleteFileOutTypedDict", "DeleteModelOut", "DeleteModelOutTypedDict", "DeleteModelV1ModelsModelIDDeleteRequest", "DeleteModelV1ModelsModelIDDeleteRequestTypedDict", "DeltaMessage", "DeltaMessageTypedDict", "DetailedJobOut", "DetailedJobOutIntegrations", "DetailedJobOutIntegrationsTypedDict", "DetailedJobOutObject", "DetailedJobOutRepositories", "DetailedJobOutRepositoriesTypedDict", "DetailedJobOutStatus", "DetailedJobOutTypedDict", "EmbeddingRequest", "EmbeddingRequestTypedDict", "EmbeddingResponse", "EmbeddingResponseData", "EmbeddingResponseDataTypedDict", "EmbeddingResponseTypedDict", "EventOut", "EventOutTypedDict", "FIMCompletionRequest", "FIMCompletionRequestStop", "FIMCompletionRequestStopTypedDict", "FIMCompletionRequestTypedDict", "FIMCompletionResponse", "FIMCompletionResponseTypedDict", "FIMCompletionStreamRequest", "FIMCompletionStreamRequestStop", "FIMCompletionStreamRequestStopTypedDict", "FIMCompletionStreamRequestTypedDict", "FTModelCapabilitiesOut", "FTModelCapabilitiesOutTypedDict", "FTModelOut", "FTModelOutObject", "FTModelOutTypedDict", "File", "FileSchema", "FileSchemaPurpose", "FileSchemaTypedDict", "FileTypedDict", "FilesAPIRoutesDeleteFileRequest", "FilesAPIRoutesDeleteFileRequestTypedDict", "FilesAPIRoutesRetrieveFileRequest", "FilesAPIRoutesRetrieveFileRequestTypedDict", "FilesAPIRoutesUploadFileMultiPartBodyParams", "FilesAPIRoutesUploadFileMultiPartBodyParamsTypedDict", "FilesAPIRoutesUploadFilePurpose", "FineTuneableModel", "FinishReason", "Function", "FunctionCall", "FunctionCallTypedDict", "FunctionTypedDict", "GithubRepositoryIn", "GithubRepositoryInType", "GithubRepositoryInTypedDict", "GithubRepositoryOut", "GithubRepositoryOutType", "GithubRepositoryOutTypedDict", "HTTPValidationError", "HTTPValidationErrorData", "Inputs", "InputsTypedDict", "Integrations", "IntegrationsTypedDict", "JobIn", "JobInIntegrations", "JobInIntegrationsTypedDict", "JobInRepositories", "JobInRepositoriesTypedDict", "JobInTypedDict", "JobMetadataOut", "JobMetadataOutTypedDict", "JobOut", "JobOutTypedDict", "JobsAPIRoutesFineTuningArchiveFineTunedModelRequest", "JobsAPIRoutesFineTuningArchiveFineTunedModelRequestTypedDict", "JobsAPIRoutesFineTuningCancelFineTuningJobRequest", "JobsAPIRoutesFineTuningCancelFineTuningJobRequestTypedDict", "JobsAPIRoutesFineTuningCreateFineTuningJobResponse", "JobsAPIRoutesFineTuningCreateFineTuningJobResponseTypedDict", "JobsAPIRoutesFineTuningGetFineTuningJobRequest", "JobsAPIRoutesFineTuningGetFineTuningJobRequestTypedDict", "JobsAPIRoutesFineTuningGetFineTuningJobsRequest", "JobsAPIRoutesFineTuningGetFineTuningJobsRequestTypedDict", "JobsAPIRoutesFineTuningStartFineTuningJobRequest", "JobsAPIRoutesFineTuningStartFineTuningJobRequestTypedDict", "JobsAPIRoutesFineTuningUnarchiveFineTunedModelRequest", "JobsAPIRoutesFineTuningUnarchiveFineTunedModelRequestTypedDict", "JobsAPIRoutesFineTuningUpdateFineTunedModelRequest", "JobsAPIRoutesFineTuningUpdateFineTunedModelRequestTypedDict", "JobsOut", "JobsOutObject", "JobsOutTypedDict", "LegacyJobMetadataOut", "LegacyJobMetadataOutObject", "LegacyJobMetadataOutTypedDict", "ListFilesOut", "ListFilesOutTypedDict", "Loc", "LocTypedDict", "Messages", "MessagesTypedDict", "MetricOut", "MetricOutTypedDict", "ModelCapabilities", "ModelCapabilitiesTypedDict", "ModelCard", "ModelCardTypedDict", "ModelList", "ModelListTypedDict", "Object", "Purpose", "QueryParamStatus", "Repositories", "RepositoriesTypedDict", "ResponseFormat", "ResponseFormatTypedDict", "ResponseFormats", "RetrieveFileOut", "RetrieveFileOutPurpose", "RetrieveFileOutTypedDict", "RetrieveModelV1ModelsModelIDGetRequest", "RetrieveModelV1ModelsModelIDGetRequestTypedDict", "Role", "SDKError", "SampleType", "Security", "SecurityTypedDict", "Source", "Status", "Stop", "StopTypedDict", "SystemMessage", "SystemMessageTypedDict", "TextChunk", "TextChunkTypedDict", "Tool", "ToolCall", "ToolCallTypedDict", "ToolChoice", "ToolMessage", "ToolMessageRole", "ToolMessageTypedDict", "ToolToolTypes", "ToolTypedDict", "ToolTypes", "TrainingFile", "TrainingFileTypedDict", "TrainingParameters", "TrainingParametersIn", "TrainingParametersInTypedDict", "TrainingParametersTypedDict", "Type", "UnarchiveFTModelOut", "UnarchiveFTModelOutObject", "UnarchiveFTModelOutTypedDict", "UpdateFTModelIn", "UpdateFTModelInTypedDict", "UploadFileOut", "UploadFileOutTypedDict", "UsageInfo", "UsageInfoTypedDict", "UserMessage", "UserMessageContent", "UserMessageContentTypedDict", "UserMessageRole", "UserMessageTypedDict", "ValidationError", "ValidationErrorTypedDict", "WandbIntegration", "WandbIntegrationOut", "WandbIntegrationOutTypedDict", "WandbIntegrationType", "WandbIntegrationTypedDict"] +__all__ = [ + "AgentsCompletionRequest", + "AgentsCompletionRequestMessages", + "AgentsCompletionRequestMessagesTypedDict", + "AgentsCompletionRequestStop", + "AgentsCompletionRequestStopTypedDict", + "AgentsCompletionRequestToolChoice", + "AgentsCompletionRequestToolChoiceTypedDict", + "AgentsCompletionRequestTypedDict", + "AgentsCompletionStreamRequest", + "AgentsCompletionStreamRequestMessages", + "AgentsCompletionStreamRequestMessagesTypedDict", + "AgentsCompletionStreamRequestStop", + "AgentsCompletionStreamRequestStopTypedDict", + "AgentsCompletionStreamRequestToolChoice", + "AgentsCompletionStreamRequestToolChoiceTypedDict", + "AgentsCompletionStreamRequestTypedDict", + "ArchiveFTModelOut", + "ArchiveFTModelOutObject", + "ArchiveFTModelOutTypedDict", + "Arguments", + "ArgumentsTypedDict", + "AssistantMessage", + "AssistantMessageRole", + "AssistantMessageTypedDict", + "BaseModelCard", + "BaseModelCardTypedDict", + "ChatCompletionChoice", + "ChatCompletionChoiceTypedDict", + "ChatCompletionRequest", + "ChatCompletionRequestToolChoice", + "ChatCompletionRequestToolChoiceTypedDict", + "ChatCompletionRequestTypedDict", + "ChatCompletionResponse", + "ChatCompletionResponseTypedDict", + "ChatCompletionStreamRequest", + "ChatCompletionStreamRequestMessages", + "ChatCompletionStreamRequestMessagesTypedDict", + "ChatCompletionStreamRequestStop", + "ChatCompletionStreamRequestStopTypedDict", + "ChatCompletionStreamRequestToolChoice", + "ChatCompletionStreamRequestToolChoiceTypedDict", + "ChatCompletionStreamRequestTypedDict", + "CheckpointOut", + "CheckpointOutTypedDict", + "CompletionChunk", + "CompletionChunkTypedDict", + "CompletionEvent", + "CompletionEventTypedDict", + "CompletionResponseStreamChoice", + "CompletionResponseStreamChoiceFinishReason", + "CompletionResponseStreamChoiceTypedDict", + "Content", + "ContentChunk", + "ContentChunkTypedDict", + "ContentTypedDict", + "Data", + "DataTypedDict", + "DeleteFileOut", + "DeleteFileOutTypedDict", + "DeleteModelOut", + "DeleteModelOutTypedDict", + "DeleteModelV1ModelsModelIDDeleteRequest", + "DeleteModelV1ModelsModelIDDeleteRequestTypedDict", + "DeltaMessage", + "DeltaMessageTypedDict", + "DetailedJobOut", + "DetailedJobOutIntegrations", + "DetailedJobOutIntegrationsTypedDict", + "DetailedJobOutObject", + "DetailedJobOutRepositories", + "DetailedJobOutRepositoriesTypedDict", + "DetailedJobOutStatus", + "DetailedJobOutTypedDict", + "EmbeddingRequest", + "EmbeddingRequestTypedDict", + "EmbeddingResponse", + "EmbeddingResponseData", + "EmbeddingResponseDataTypedDict", + "EmbeddingResponseTypedDict", + "EventOut", + "EventOutTypedDict", + "FIMCompletionRequest", + "FIMCompletionRequestStop", + "FIMCompletionRequestStopTypedDict", + "FIMCompletionRequestTypedDict", + "FIMCompletionResponse", + "FIMCompletionResponseTypedDict", + "FIMCompletionStreamRequest", + "FIMCompletionStreamRequestStop", + "FIMCompletionStreamRequestStopTypedDict", + "FIMCompletionStreamRequestTypedDict", + "FTModelCapabilitiesOut", + "FTModelCapabilitiesOutTypedDict", + "FTModelCard", + "FTModelCardTypedDict", + "FTModelOut", + "FTModelOutObject", + "FTModelOutTypedDict", + "File", + "FileSchema", + "FileSchemaPurpose", + "FileSchemaTypedDict", + "FileTypedDict", + "FilesAPIRoutesDeleteFileRequest", + "FilesAPIRoutesDeleteFileRequestTypedDict", + "FilesAPIRoutesRetrieveFileRequest", + "FilesAPIRoutesRetrieveFileRequestTypedDict", + "FilesAPIRoutesUploadFileMultiPartBodyParams", + "FilesAPIRoutesUploadFileMultiPartBodyParamsTypedDict", + "FilesAPIRoutesUploadFilePurpose", + "FineTuneableModel", + "FinishReason", + "Function", + "FunctionCall", + "FunctionCallTypedDict", + "FunctionName", + "FunctionNameTypedDict", + "FunctionTypedDict", + "GithubRepositoryIn", + "GithubRepositoryInType", + "GithubRepositoryInTypedDict", + "GithubRepositoryOut", + "GithubRepositoryOutType", + "GithubRepositoryOutTypedDict", + "HTTPValidationError", + "HTTPValidationErrorData", + "ImageURL", + "ImageURLChunk", + "ImageURLChunkImageURL", + "ImageURLChunkImageURLTypedDict", + "ImageURLChunkType", + "ImageURLChunkTypedDict", + "ImageURLTypedDict", + "Inputs", + "InputsTypedDict", + "Integrations", + "IntegrationsTypedDict", + "JobIn", + "JobInIntegrations", + "JobInIntegrationsTypedDict", + "JobInRepositories", + "JobInRepositoriesTypedDict", + "JobInTypedDict", + "JobMetadataOut", + "JobMetadataOutTypedDict", + "JobOut", + "JobOutTypedDict", + "JobsAPIRoutesFineTuningArchiveFineTunedModelRequest", + "JobsAPIRoutesFineTuningArchiveFineTunedModelRequestTypedDict", + "JobsAPIRoutesFineTuningCancelFineTuningJobRequest", + "JobsAPIRoutesFineTuningCancelFineTuningJobRequestTypedDict", + "JobsAPIRoutesFineTuningCreateFineTuningJobResponse", + "JobsAPIRoutesFineTuningCreateFineTuningJobResponseTypedDict", + "JobsAPIRoutesFineTuningGetFineTuningJobRequest", + "JobsAPIRoutesFineTuningGetFineTuningJobRequestTypedDict", + "JobsAPIRoutesFineTuningGetFineTuningJobsRequest", + "JobsAPIRoutesFineTuningGetFineTuningJobsRequestTypedDict", + "JobsAPIRoutesFineTuningStartFineTuningJobRequest", + "JobsAPIRoutesFineTuningStartFineTuningJobRequestTypedDict", + "JobsAPIRoutesFineTuningUnarchiveFineTunedModelRequest", + "JobsAPIRoutesFineTuningUnarchiveFineTunedModelRequestTypedDict", + "JobsAPIRoutesFineTuningUpdateFineTunedModelRequest", + "JobsAPIRoutesFineTuningUpdateFineTunedModelRequestTypedDict", + "JobsOut", + "JobsOutObject", + "JobsOutTypedDict", + "LegacyJobMetadataOut", + "LegacyJobMetadataOutObject", + "LegacyJobMetadataOutTypedDict", + "ListFilesOut", + "ListFilesOutTypedDict", + "Loc", + "LocTypedDict", + "Messages", + "MessagesTypedDict", + "MetricOut", + "MetricOutTypedDict", + "ModelCapabilities", + "ModelCapabilitiesTypedDict", + "ModelList", + "ModelListTypedDict", + "Object", + "Purpose", + "QueryParamStatus", + "Repositories", + "RepositoriesTypedDict", + "ResponseFormat", + "ResponseFormatTypedDict", + "ResponseFormats", + "RetrieveFileOut", + "RetrieveFileOutPurpose", + "RetrieveFileOutTypedDict", + "RetrieveModelV1ModelsModelIDGetRequest", + "RetrieveModelV1ModelsModelIDGetRequestTypedDict", + "RetrieveModelV1ModelsModelIDGetResponseRetrieveModelV1ModelsModelIDGet", + "RetrieveModelV1ModelsModelIDGetResponseRetrieveModelV1ModelsModelIDGetTypedDict", + "Role", + "SDKError", + "SampleType", + "Security", + "SecurityTypedDict", + "Source", + "Status", + "Stop", + "StopTypedDict", + "SystemMessage", + "SystemMessageTypedDict", + "TextChunk", + "TextChunkType", + "TextChunkTypedDict", + "Tool", + "ToolCall", + "ToolCallTypedDict", + "ToolChoice", + "ToolChoiceEnum", + "ToolChoiceTypedDict", + "ToolMessage", + "ToolMessageRole", + "ToolMessageTypedDict", + "ToolTypedDict", + "ToolTypes", + "TrainingFile", + "TrainingFileTypedDict", + "TrainingParameters", + "TrainingParametersIn", + "TrainingParametersInTypedDict", + "TrainingParametersTypedDict", + "Type", + "UnarchiveFTModelOut", + "UnarchiveFTModelOutObject", + "UnarchiveFTModelOutTypedDict", + "UpdateFTModelIn", + "UpdateFTModelInTypedDict", + "UploadFileOut", + "UploadFileOutTypedDict", + "UsageInfo", + "UsageInfoTypedDict", + "UserMessage", + "UserMessageContent", + "UserMessageContentTypedDict", + "UserMessageRole", + "UserMessageTypedDict", + "ValidationError", + "ValidationErrorTypedDict", + "WandbIntegration", + "WandbIntegrationOut", + "WandbIntegrationOutTypedDict", + "WandbIntegrationType", + "WandbIntegrationTypedDict", +] diff --git a/src/mistralai/models/agentscompletionrequest.py b/src/mistralai/models/agentscompletionrequest.py index 6a62b46e..1f0523a6 100644 --- a/src/mistralai/models/agentscompletionrequest.py +++ b/src/mistralai/models/agentscompletionrequest.py @@ -4,12 +4,14 @@ from .assistantmessage import AssistantMessage, AssistantMessageTypedDict from .responseformat import ResponseFormat, ResponseFormatTypedDict from .tool import Tool, ToolTypedDict +from .toolchoice import ToolChoice, ToolChoiceTypedDict +from .toolchoiceenum import ToolChoiceEnum from .toolmessage import ToolMessage, ToolMessageTypedDict from .usermessage import UserMessage, UserMessageTypedDict from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL from mistralai.utils import get_discriminator from pydantic import Discriminator, Tag, model_serializer -from typing import List, Literal, Optional, TypedDict, Union +from typing import List, Optional, TypedDict, Union from typing_extensions import Annotated, NotRequired @@ -21,13 +23,26 @@ r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" -AgentsCompletionRequestMessagesTypedDict = Union[UserMessageTypedDict, AssistantMessageTypedDict, ToolMessageTypedDict] +AgentsCompletionRequestMessagesTypedDict = Union[ + UserMessageTypedDict, AssistantMessageTypedDict, ToolMessageTypedDict +] -AgentsCompletionRequestMessages = Annotated[Union[Annotated[AssistantMessage, Tag("assistant")], Annotated[ToolMessage, Tag("tool")], Annotated[UserMessage, Tag("user")]], Discriminator(lambda m: get_discriminator(m, "role", "role"))] +AgentsCompletionRequestMessages = Annotated[ + Union[ + Annotated[AssistantMessage, Tag("assistant")], + Annotated[ToolMessage, Tag("tool")], + Annotated[UserMessage, Tag("user")], + ], + Discriminator(lambda m: get_discriminator(m, "role", "role")), +] -AgentsCompletionRequestToolChoice = Literal["auto", "none", "any"] +AgentsCompletionRequestToolChoiceTypedDict = Union[ToolChoiceTypedDict, ToolChoiceEnum] + + +AgentsCompletionRequestToolChoice = Union[ToolChoice, ToolChoiceEnum] + class AgentsCompletionRequestTypedDict(TypedDict): messages: List[AgentsCompletionRequestMessagesTypedDict] @@ -46,31 +61,49 @@ class AgentsCompletionRequestTypedDict(TypedDict): r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" response_format: NotRequired[ResponseFormatTypedDict] tools: NotRequired[Nullable[List[ToolTypedDict]]] - tool_choice: NotRequired[AgentsCompletionRequestToolChoice] - + tool_choice: NotRequired[AgentsCompletionRequestToolChoiceTypedDict] + class AgentsCompletionRequest(BaseModel): messages: List[AgentsCompletionRequestMessages] r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content.""" + agent_id: str r"""The ID of the agent to use for this completion.""" + max_tokens: OptionalNullable[int] = UNSET r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" + min_tokens: OptionalNullable[int] = UNSET r"""The minimum number of tokens to generate in the completion.""" + stream: Optional[bool] = False r"""Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON.""" + stop: Optional[AgentsCompletionRequestStop] = None r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + random_seed: OptionalNullable[int] = UNSET r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" + response_format: Optional[ResponseFormat] = None + tools: OptionalNullable[List[Tool]] = UNSET - tool_choice: Optional[AgentsCompletionRequestToolChoice] = "auto" - + + tool_choice: Optional[AgentsCompletionRequestToolChoice] = None + @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = ["max_tokens", "min_tokens", "stream", "stop", "random_seed", "response_format", "tools", "tool_choice"] + optional_fields = [ + "max_tokens", + "min_tokens", + "stream", + "stop", + "random_seed", + "response_format", + "tools", + "tool_choice", + ] nullable_fields = ["max_tokens", "min_tokens", "random_seed", "tools"] null_default_fields = [] @@ -81,9 +114,13 @@ def serialize_model(self, handler): for n, f in self.model_fields.items(): k = f.alias or n val = serialized.get(k) + serialized.pop(k, None) optional_nullable = k in optional_fields and k in nullable_fields - is_set = (self.__pydantic_fields_set__.intersection({n}) or k in null_default_fields) # pylint: disable=no-member + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member if val is not None and val != UNSET_SENTINEL: m[k] = val @@ -93,4 +130,3 @@ def serialize_model(self, handler): m[k] = val return m - diff --git a/src/mistralai/models/agentscompletionstreamrequest.py b/src/mistralai/models/agentscompletionstreamrequest.py index 9398081d..57d1177c 100644 --- a/src/mistralai/models/agentscompletionstreamrequest.py +++ b/src/mistralai/models/agentscompletionstreamrequest.py @@ -4,12 +4,14 @@ from .assistantmessage import AssistantMessage, AssistantMessageTypedDict from .responseformat import ResponseFormat, ResponseFormatTypedDict from .tool import Tool, ToolTypedDict +from .toolchoice import ToolChoice, ToolChoiceTypedDict +from .toolchoiceenum import ToolChoiceEnum from .toolmessage import ToolMessage, ToolMessageTypedDict from .usermessage import UserMessage, UserMessageTypedDict from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL from mistralai.utils import get_discriminator from pydantic import Discriminator, Tag, model_serializer -from typing import List, Literal, Optional, TypedDict, Union +from typing import List, Optional, TypedDict, Union from typing_extensions import Annotated, NotRequired @@ -21,13 +23,28 @@ r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" -AgentsCompletionStreamRequestMessagesTypedDict = Union[UserMessageTypedDict, AssistantMessageTypedDict, ToolMessageTypedDict] +AgentsCompletionStreamRequestMessagesTypedDict = Union[ + UserMessageTypedDict, AssistantMessageTypedDict, ToolMessageTypedDict +] -AgentsCompletionStreamRequestMessages = Annotated[Union[Annotated[AssistantMessage, Tag("assistant")], Annotated[ToolMessage, Tag("tool")], Annotated[UserMessage, Tag("user")]], Discriminator(lambda m: get_discriminator(m, "role", "role"))] +AgentsCompletionStreamRequestMessages = Annotated[ + Union[ + Annotated[AssistantMessage, Tag("assistant")], + Annotated[ToolMessage, Tag("tool")], + Annotated[UserMessage, Tag("user")], + ], + Discriminator(lambda m: get_discriminator(m, "role", "role")), +] -AgentsCompletionStreamRequestToolChoice = Literal["auto", "none", "any"] +AgentsCompletionStreamRequestToolChoiceTypedDict = Union[ + ToolChoiceTypedDict, ToolChoiceEnum +] + + +AgentsCompletionStreamRequestToolChoice = Union[ToolChoice, ToolChoiceEnum] + class AgentsCompletionStreamRequestTypedDict(TypedDict): messages: List[AgentsCompletionStreamRequestMessagesTypedDict] @@ -45,30 +62,48 @@ class AgentsCompletionStreamRequestTypedDict(TypedDict): r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" response_format: NotRequired[ResponseFormatTypedDict] tools: NotRequired[Nullable[List[ToolTypedDict]]] - tool_choice: NotRequired[AgentsCompletionStreamRequestToolChoice] - + tool_choice: NotRequired[AgentsCompletionStreamRequestToolChoiceTypedDict] + class AgentsCompletionStreamRequest(BaseModel): messages: List[AgentsCompletionStreamRequestMessages] r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content.""" + agent_id: str r"""The ID of the agent to use for this completion.""" + max_tokens: OptionalNullable[int] = UNSET r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" + min_tokens: OptionalNullable[int] = UNSET r"""The minimum number of tokens to generate in the completion.""" + stream: Optional[bool] = True + stop: Optional[AgentsCompletionStreamRequestStop] = None r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + random_seed: OptionalNullable[int] = UNSET r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" + response_format: Optional[ResponseFormat] = None + tools: OptionalNullable[List[Tool]] = UNSET - tool_choice: Optional[AgentsCompletionStreamRequestToolChoice] = "auto" - + + tool_choice: Optional[AgentsCompletionStreamRequestToolChoice] = None + @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = ["max_tokens", "min_tokens", "stream", "stop", "random_seed", "response_format", "tools", "tool_choice"] + optional_fields = [ + "max_tokens", + "min_tokens", + "stream", + "stop", + "random_seed", + "response_format", + "tools", + "tool_choice", + ] nullable_fields = ["max_tokens", "min_tokens", "random_seed", "tools"] null_default_fields = [] @@ -79,9 +114,13 @@ def serialize_model(self, handler): for n, f in self.model_fields.items(): k = f.alias or n val = serialized.get(k) + serialized.pop(k, None) optional_nullable = k in optional_fields and k in nullable_fields - is_set = (self.__pydantic_fields_set__.intersection({n}) or k in null_default_fields) # pylint: disable=no-member + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member if val is not None and val != UNSET_SENTINEL: m[k] = val @@ -91,4 +130,3 @@ def serialize_model(self, handler): m[k] = val return m - diff --git a/src/mistralai/models/archiveftmodelout.py b/src/mistralai/models/archiveftmodelout.py index be2e9040..eeffa5d2 100644 --- a/src/mistralai/models/archiveftmodelout.py +++ b/src/mistralai/models/archiveftmodelout.py @@ -9,13 +9,17 @@ ArchiveFTModelOutObject = Literal["model"] + class ArchiveFTModelOutTypedDict(TypedDict): id: str archived: NotRequired[bool] - + class ArchiveFTModelOut(BaseModel): id: str + + # fmt: off OBJECT: Annotated[Final[Optional[ArchiveFTModelOutObject]], pydantic.Field(alias="object")] = "model" # type: ignore + # fmt: on + archived: Optional[bool] = True - diff --git a/src/mistralai/models/assistantmessage.py b/src/mistralai/models/assistantmessage.py index b7080d55..92af66a3 100644 --- a/src/mistralai/models/assistantmessage.py +++ b/src/mistralai/models/assistantmessage.py @@ -10,21 +10,25 @@ AssistantMessageRole = Literal["assistant"] + class AssistantMessageTypedDict(TypedDict): content: NotRequired[Nullable[str]] tool_calls: NotRequired[Nullable[List[ToolCallTypedDict]]] prefix: NotRequired[bool] r"""Set this to `true` when adding an assistant message as prefix to condition the model response. The role of the prefix message is to force the model to start its answer by the content of the message.""" role: NotRequired[AssistantMessageRole] - + class AssistantMessage(BaseModel): content: OptionalNullable[str] = UNSET + tool_calls: OptionalNullable[List[ToolCall]] = UNSET + prefix: Optional[bool] = False r"""Set this to `true` when adding an assistant message as prefix to condition the model response. The role of the prefix message is to force the model to start its answer by the content of the message.""" + role: Optional[AssistantMessageRole] = "assistant" - + @model_serializer(mode="wrap") def serialize_model(self, handler): optional_fields = ["content", "tool_calls", "prefix", "role"] @@ -38,9 +42,13 @@ def serialize_model(self, handler): for n, f in self.model_fields.items(): k = f.alias or n val = serialized.get(k) + serialized.pop(k, None) optional_nullable = k in optional_fields and k in nullable_fields - is_set = (self.__pydantic_fields_set__.intersection({n}) or k in null_default_fields) # pylint: disable=no-member + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member if val is not None and val != UNSET_SENTINEL: m[k] = val @@ -50,4 +58,3 @@ def serialize_model(self, handler): m[k] = val return m - diff --git a/src/mistralai/models/modelcard.py b/src/mistralai/models/basemodelcard.py similarity index 66% rename from src/mistralai/models/modelcard.py rename to src/mistralai/models/basemodelcard.py index 80e082e4..85af1f11 100644 --- a/src/mistralai/models/modelcard.py +++ b/src/mistralai/models/basemodelcard.py @@ -4,44 +4,64 @@ from .modelcapabilities import ModelCapabilities, ModelCapabilitiesTypedDict from datetime import datetime from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +import pydantic from pydantic import model_serializer -from typing import List, Optional, TypedDict -from typing_extensions import NotRequired +from typing import Final, List, Optional, TypedDict +from typing_extensions import Annotated, NotRequired -class ModelCardTypedDict(TypedDict): +class BaseModelCardTypedDict(TypedDict): id: str capabilities: ModelCapabilitiesTypedDict object: NotRequired[str] created: NotRequired[int] owned_by: NotRequired[str] - root: NotRequired[Nullable[str]] - archived: NotRequired[bool] name: NotRequired[Nullable[str]] description: NotRequired[Nullable[str]] max_context_length: NotRequired[int] aliases: NotRequired[List[str]] deprecation: NotRequired[Nullable[datetime]] - -class ModelCard(BaseModel): + +class BaseModelCard(BaseModel): id: str + capabilities: ModelCapabilities + object: Optional[str] = "model" + created: Optional[int] = None + owned_by: Optional[str] = "mistralai" - root: OptionalNullable[str] = UNSET - archived: Optional[bool] = False + name: OptionalNullable[str] = UNSET + description: OptionalNullable[str] = UNSET + max_context_length: Optional[int] = 32768 + aliases: Optional[List[str]] = None + deprecation: OptionalNullable[datetime] = UNSET - + + # fmt: off + TYPE: Annotated[Final[Optional[str]], pydantic.Field(alias="type")] = "base" # type: ignore + # fmt: on + @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = ["object", "created", "owned_by", "root", "archived", "name", "description", "max_context_length", "aliases", "deprecation"] - nullable_fields = ["root", "name", "description", "deprecation"] + optional_fields = [ + "object", + "created", + "owned_by", + "name", + "description", + "max_context_length", + "aliases", + "deprecation", + "type", + ] + nullable_fields = ["name", "description", "deprecation"] null_default_fields = [] serialized = handler(self) @@ -51,9 +71,13 @@ def serialize_model(self, handler): for n, f in self.model_fields.items(): k = f.alias or n val = serialized.get(k) + serialized.pop(k, None) optional_nullable = k in optional_fields and k in nullable_fields - is_set = (self.__pydantic_fields_set__.intersection({n}) or k in null_default_fields) # pylint: disable=no-member + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member if val is not None and val != UNSET_SENTINEL: m[k] = val @@ -63,4 +87,3 @@ def serialize_model(self, handler): m[k] = val return m - diff --git a/src/mistralai/models/chatcompletionchoice.py b/src/mistralai/models/chatcompletionchoice.py index 748dbc1b..20d674bf 100644 --- a/src/mistralai/models/chatcompletionchoice.py +++ b/src/mistralai/models/chatcompletionchoice.py @@ -8,14 +8,16 @@ FinishReason = Literal["stop", "length", "model_length", "error", "tool_calls"] + class ChatCompletionChoiceTypedDict(TypedDict): index: int message: AssistantMessageTypedDict finish_reason: FinishReason - + class ChatCompletionChoice(BaseModel): index: int + message: AssistantMessage + finish_reason: FinishReason - diff --git a/src/mistralai/models/chatcompletionrequest.py b/src/mistralai/models/chatcompletionrequest.py index e3440b7b..78722167 100644 --- a/src/mistralai/models/chatcompletionrequest.py +++ b/src/mistralai/models/chatcompletionrequest.py @@ -5,12 +5,14 @@ from .responseformat import ResponseFormat, ResponseFormatTypedDict from .systemmessage import SystemMessage, SystemMessageTypedDict from .tool import Tool, ToolTypedDict +from .toolchoice import ToolChoice, ToolChoiceTypedDict +from .toolchoiceenum import ToolChoiceEnum from .toolmessage import ToolMessage, ToolMessageTypedDict from .usermessage import UserMessage, UserMessageTypedDict from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL from mistralai.utils import get_discriminator from pydantic import Discriminator, Tag, model_serializer -from typing import List, Literal, Optional, TypedDict, Union +from typing import List, Optional, TypedDict, Union from typing_extensions import Annotated, NotRequired @@ -22,13 +24,30 @@ r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" -MessagesTypedDict = Union[SystemMessageTypedDict, UserMessageTypedDict, AssistantMessageTypedDict, ToolMessageTypedDict] +MessagesTypedDict = Union[ + SystemMessageTypedDict, + UserMessageTypedDict, + AssistantMessageTypedDict, + ToolMessageTypedDict, +] -Messages = Annotated[Union[Annotated[AssistantMessage, Tag("assistant")], Annotated[SystemMessage, Tag("system")], Annotated[ToolMessage, Tag("tool")], Annotated[UserMessage, Tag("user")]], Discriminator(lambda m: get_discriminator(m, "role", "role"))] +Messages = Annotated[ + Union[ + Annotated[AssistantMessage, Tag("assistant")], + Annotated[SystemMessage, Tag("system")], + Annotated[ToolMessage, Tag("tool")], + Annotated[UserMessage, Tag("user")], + ], + Discriminator(lambda m: get_discriminator(m, "role", "role")), +] -ToolChoice = Literal["auto", "none", "any"] +ChatCompletionRequestToolChoiceTypedDict = Union[ToolChoiceTypedDict, ToolChoiceEnum] + + +ChatCompletionRequestToolChoice = Union[ToolChoice, ToolChoiceEnum] + class ChatCompletionRequestTypedDict(TypedDict): model: Nullable[str] @@ -51,39 +70,63 @@ class ChatCompletionRequestTypedDict(TypedDict): r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" response_format: NotRequired[ResponseFormatTypedDict] tools: NotRequired[Nullable[List[ToolTypedDict]]] - tool_choice: NotRequired[ToolChoice] + tool_choice: NotRequired[ChatCompletionRequestToolChoiceTypedDict] safe_prompt: NotRequired[bool] r"""Whether to inject a safety prompt before all conversations.""" - + class ChatCompletionRequest(BaseModel): model: Nullable[str] r"""ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions.""" + messages: List[Messages] r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content.""" + temperature: Optional[float] = 0.7 r"""What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both.""" + top_p: Optional[float] = 1 r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.""" + max_tokens: OptionalNullable[int] = UNSET r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" + min_tokens: OptionalNullable[int] = UNSET r"""The minimum number of tokens to generate in the completion.""" + stream: Optional[bool] = False r"""Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON.""" + stop: Optional[Stop] = None r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + random_seed: OptionalNullable[int] = UNSET r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" + response_format: Optional[ResponseFormat] = None + tools: OptionalNullable[List[Tool]] = UNSET - tool_choice: Optional[ToolChoice] = "auto" + + tool_choice: Optional[ChatCompletionRequestToolChoice] = None + safe_prompt: Optional[bool] = False r"""Whether to inject a safety prompt before all conversations.""" - + @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = ["temperature", "top_p", "max_tokens", "min_tokens", "stream", "stop", "random_seed", "response_format", "tools", "tool_choice", "safe_prompt"] + optional_fields = [ + "temperature", + "top_p", + "max_tokens", + "min_tokens", + "stream", + "stop", + "random_seed", + "response_format", + "tools", + "tool_choice", + "safe_prompt", + ] nullable_fields = ["model", "max_tokens", "min_tokens", "random_seed", "tools"] null_default_fields = [] @@ -94,9 +137,13 @@ def serialize_model(self, handler): for n, f in self.model_fields.items(): k = f.alias or n val = serialized.get(k) + serialized.pop(k, None) optional_nullable = k in optional_fields and k in nullable_fields - is_set = (self.__pydantic_fields_set__.intersection({n}) or k in null_default_fields) # pylint: disable=no-member + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member if val is not None and val != UNSET_SENTINEL: m[k] = val @@ -106,4 +153,3 @@ def serialize_model(self, handler): m[k] = val return m - diff --git a/src/mistralai/models/chatcompletionresponse.py b/src/mistralai/models/chatcompletionresponse.py index dacb0acb..20c9010f 100644 --- a/src/mistralai/models/chatcompletionresponse.py +++ b/src/mistralai/models/chatcompletionresponse.py @@ -15,13 +15,17 @@ class ChatCompletionResponseTypedDict(TypedDict): usage: UsageInfoTypedDict created: NotRequired[int] choices: NotRequired[List[ChatCompletionChoiceTypedDict]] - + class ChatCompletionResponse(BaseModel): id: str + object: str + model: str + usage: UsageInfo + created: Optional[int] = None + choices: Optional[List[ChatCompletionChoice]] = None - diff --git a/src/mistralai/models/chatcompletionstreamrequest.py b/src/mistralai/models/chatcompletionstreamrequest.py index 992584dc..ccba04af 100644 --- a/src/mistralai/models/chatcompletionstreamrequest.py +++ b/src/mistralai/models/chatcompletionstreamrequest.py @@ -5,12 +5,14 @@ from .responseformat import ResponseFormat, ResponseFormatTypedDict from .systemmessage import SystemMessage, SystemMessageTypedDict from .tool import Tool, ToolTypedDict +from .toolchoice import ToolChoice, ToolChoiceTypedDict +from .toolchoiceenum import ToolChoiceEnum from .toolmessage import ToolMessage, ToolMessageTypedDict from .usermessage import UserMessage, UserMessageTypedDict from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL from mistralai.utils import get_discriminator from pydantic import Discriminator, Tag, model_serializer -from typing import List, Literal, Optional, TypedDict, Union +from typing import List, Optional, TypedDict, Union from typing_extensions import Annotated, NotRequired @@ -22,13 +24,32 @@ r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" -ChatCompletionStreamRequestMessagesTypedDict = Union[SystemMessageTypedDict, UserMessageTypedDict, AssistantMessageTypedDict, ToolMessageTypedDict] +ChatCompletionStreamRequestMessagesTypedDict = Union[ + SystemMessageTypedDict, + UserMessageTypedDict, + AssistantMessageTypedDict, + ToolMessageTypedDict, +] -ChatCompletionStreamRequestMessages = Annotated[Union[Annotated[AssistantMessage, Tag("assistant")], Annotated[SystemMessage, Tag("system")], Annotated[ToolMessage, Tag("tool")], Annotated[UserMessage, Tag("user")]], Discriminator(lambda m: get_discriminator(m, "role", "role"))] +ChatCompletionStreamRequestMessages = Annotated[ + Union[ + Annotated[AssistantMessage, Tag("assistant")], + Annotated[SystemMessage, Tag("system")], + Annotated[ToolMessage, Tag("tool")], + Annotated[UserMessage, Tag("user")], + ], + Discriminator(lambda m: get_discriminator(m, "role", "role")), +] -ChatCompletionStreamRequestToolChoice = Literal["auto", "none", "any"] +ChatCompletionStreamRequestToolChoiceTypedDict = Union[ + ToolChoiceTypedDict, ToolChoiceEnum +] + + +ChatCompletionStreamRequestToolChoice = Union[ToolChoice, ToolChoiceEnum] + class ChatCompletionStreamRequestTypedDict(TypedDict): model: Nullable[str] @@ -50,38 +71,62 @@ class ChatCompletionStreamRequestTypedDict(TypedDict): r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" response_format: NotRequired[ResponseFormatTypedDict] tools: NotRequired[Nullable[List[ToolTypedDict]]] - tool_choice: NotRequired[ChatCompletionStreamRequestToolChoice] + tool_choice: NotRequired[ChatCompletionStreamRequestToolChoiceTypedDict] safe_prompt: NotRequired[bool] r"""Whether to inject a safety prompt before all conversations.""" - + class ChatCompletionStreamRequest(BaseModel): model: Nullable[str] r"""ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions.""" + messages: List[ChatCompletionStreamRequestMessages] r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content.""" + temperature: Optional[float] = 0.7 r"""What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both.""" + top_p: Optional[float] = 1 r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.""" + max_tokens: OptionalNullable[int] = UNSET r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" + min_tokens: OptionalNullable[int] = UNSET r"""The minimum number of tokens to generate in the completion.""" + stream: Optional[bool] = True + stop: Optional[ChatCompletionStreamRequestStop] = None r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + random_seed: OptionalNullable[int] = UNSET r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" + response_format: Optional[ResponseFormat] = None + tools: OptionalNullable[List[Tool]] = UNSET - tool_choice: Optional[ChatCompletionStreamRequestToolChoice] = "auto" + + tool_choice: Optional[ChatCompletionStreamRequestToolChoice] = None + safe_prompt: Optional[bool] = False r"""Whether to inject a safety prompt before all conversations.""" - + @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = ["temperature", "top_p", "max_tokens", "min_tokens", "stream", "stop", "random_seed", "response_format", "tools", "tool_choice", "safe_prompt"] + optional_fields = [ + "temperature", + "top_p", + "max_tokens", + "min_tokens", + "stream", + "stop", + "random_seed", + "response_format", + "tools", + "tool_choice", + "safe_prompt", + ] nullable_fields = ["model", "max_tokens", "min_tokens", "random_seed", "tools"] null_default_fields = [] @@ -92,9 +137,13 @@ def serialize_model(self, handler): for n, f in self.model_fields.items(): k = f.alias or n val = serialized.get(k) + serialized.pop(k, None) optional_nullable = k in optional_fields and k in nullable_fields - is_set = (self.__pydantic_fields_set__.intersection({n}) or k in null_default_fields) # pylint: disable=no-member + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member if val is not None and val != UNSET_SENTINEL: m[k] = val @@ -104,4 +153,3 @@ def serialize_model(self, handler): m[k] = val return m - diff --git a/src/mistralai/models/checkpointout.py b/src/mistralai/models/checkpointout.py index 108356c0..f818dae1 100644 --- a/src/mistralai/models/checkpointout.py +++ b/src/mistralai/models/checkpointout.py @@ -13,13 +13,14 @@ class CheckpointOutTypedDict(TypedDict): r"""The step number that the checkpoint was created at.""" created_at: int r"""The UNIX timestamp (in seconds) for when the checkpoint was created.""" - + class CheckpointOut(BaseModel): metrics: MetricOut r"""Metrics at the step number during the fine-tuning job. Use these metrics to assess if the training is going smoothly (loss should decrease, token accuracy should increase).""" + step_number: int r"""The step number that the checkpoint was created at.""" + created_at: int r"""The UNIX timestamp (in seconds) for when the checkpoint was created.""" - diff --git a/src/mistralai/models/completionchunk.py b/src/mistralai/models/completionchunk.py index f3a12c12..8859d22f 100644 --- a/src/mistralai/models/completionchunk.py +++ b/src/mistralai/models/completionchunk.py @@ -1,7 +1,10 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from __future__ import annotations -from .completionresponsestreamchoice import CompletionResponseStreamChoice, CompletionResponseStreamChoiceTypedDict +from .completionresponsestreamchoice import ( + CompletionResponseStreamChoice, + CompletionResponseStreamChoiceTypedDict, +) from .usageinfo import UsageInfo, UsageInfoTypedDict from mistralai.types import BaseModel from typing import List, Optional, TypedDict @@ -15,13 +18,17 @@ class CompletionChunkTypedDict(TypedDict): object: NotRequired[str] created: NotRequired[int] usage: NotRequired[UsageInfoTypedDict] - + class CompletionChunk(BaseModel): id: str + model: str + choices: List[CompletionResponseStreamChoice] + object: Optional[str] = None + created: Optional[int] = None + usage: Optional[UsageInfo] = None - diff --git a/src/mistralai/models/completionevent.py b/src/mistralai/models/completionevent.py index 9b75f730..b0286fde 100644 --- a/src/mistralai/models/completionevent.py +++ b/src/mistralai/models/completionevent.py @@ -8,8 +8,7 @@ class CompletionEventTypedDict(TypedDict): data: CompletionChunkTypedDict - + class CompletionEvent(BaseModel): data: CompletionChunk - diff --git a/src/mistralai/models/completionresponsestreamchoice.py b/src/mistralai/models/completionresponsestreamchoice.py index bd3cf9b2..227a2f76 100644 --- a/src/mistralai/models/completionresponsestreamchoice.py +++ b/src/mistralai/models/completionresponsestreamchoice.py @@ -7,19 +7,24 @@ from typing import Literal, TypedDict -CompletionResponseStreamChoiceFinishReason = Literal["stop", "length", "error", "tool_calls"] +CompletionResponseStreamChoiceFinishReason = Literal[ + "stop", "length", "error", "tool_calls" +] + class CompletionResponseStreamChoiceTypedDict(TypedDict): index: int delta: DeltaMessageTypedDict finish_reason: Nullable[CompletionResponseStreamChoiceFinishReason] - + class CompletionResponseStreamChoice(BaseModel): index: int + delta: DeltaMessage + finish_reason: Nullable[CompletionResponseStreamChoiceFinishReason] - + @model_serializer(mode="wrap") def serialize_model(self, handler): optional_fields = [] @@ -33,9 +38,13 @@ def serialize_model(self, handler): for n, f in self.model_fields.items(): k = f.alias or n val = serialized.get(k) + serialized.pop(k, None) optional_nullable = k in optional_fields and k in nullable_fields - is_set = (self.__pydantic_fields_set__.intersection({n}) or k in null_default_fields) # pylint: disable=no-member + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member if val is not None and val != UNSET_SENTINEL: m[k] = val @@ -45,4 +54,3 @@ def serialize_model(self, handler): m[k] = val return m - diff --git a/src/mistralai/models/contentchunk.py b/src/mistralai/models/contentchunk.py index 06954920..9b9db095 100644 --- a/src/mistralai/models/contentchunk.py +++ b/src/mistralai/models/contentchunk.py @@ -1,17 +1,20 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from __future__ import annotations -from mistralai.types import BaseModel -import pydantic -from typing import Final, Optional, TypedDict +from .imageurlchunk import ImageURLChunk, ImageURLChunkTypedDict +from .textchunk import TextChunk, TextChunkTypedDict +from mistralai.utils import get_discriminator +from pydantic import Discriminator, Tag +from typing import Union from typing_extensions import Annotated -class ContentChunkTypedDict(TypedDict): - text: str - +ContentChunkTypedDict = Union[TextChunkTypedDict, ImageURLChunkTypedDict] -class ContentChunk(BaseModel): - text: str - TYPE: Annotated[Final[Optional[str]], pydantic.Field(alias="type")] = "text" # type: ignore - + +ContentChunk = Annotated[ + Union[ + Annotated[ImageURLChunk, Tag("image_url")], Annotated[TextChunk, Tag("text")] + ], + Discriminator(lambda m: get_discriminator(m, "type", "type")), +] diff --git a/src/mistralai/models/delete_model_v1_models_model_id_deleteop.py b/src/mistralai/models/delete_model_v1_models_model_id_deleteop.py index 8935acb3..2093245d 100644 --- a/src/mistralai/models/delete_model_v1_models_model_id_deleteop.py +++ b/src/mistralai/models/delete_model_v1_models_model_id_deleteop.py @@ -10,9 +10,10 @@ class DeleteModelV1ModelsModelIDDeleteRequestTypedDict(TypedDict): model_id: str r"""The ID of the model to delete.""" - + class DeleteModelV1ModelsModelIDDeleteRequest(BaseModel): - model_id: Annotated[str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False))] + model_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] r"""The ID of the model to delete.""" - diff --git a/src/mistralai/models/deletefileout.py b/src/mistralai/models/deletefileout.py index 90c60ffa..dc1a87f2 100644 --- a/src/mistralai/models/deletefileout.py +++ b/src/mistralai/models/deletefileout.py @@ -12,13 +12,14 @@ class DeleteFileOutTypedDict(TypedDict): r"""The object type that was deleted""" deleted: bool r"""The deletion status.""" - + class DeleteFileOut(BaseModel): id: str r"""The ID of the deleted file.""" + object: str r"""The object type that was deleted""" + deleted: bool r"""The deletion status.""" - diff --git a/src/mistralai/models/deletemodelout.py b/src/mistralai/models/deletemodelout.py index bab96e07..96dbeb13 100644 --- a/src/mistralai/models/deletemodelout.py +++ b/src/mistralai/models/deletemodelout.py @@ -13,13 +13,14 @@ class DeleteModelOutTypedDict(TypedDict): r"""The object type that was deleted""" deleted: NotRequired[bool] r"""The deletion status""" - + class DeleteModelOut(BaseModel): id: str r"""The ID of the deleted model.""" + object: Optional[str] = "model" r"""The object type that was deleted""" + deleted: Optional[bool] = True r"""The deletion status""" - diff --git a/src/mistralai/models/deltamessage.py b/src/mistralai/models/deltamessage.py index 97bef0ef..7b7fe796 100644 --- a/src/mistralai/models/deltamessage.py +++ b/src/mistralai/models/deltamessage.py @@ -12,13 +12,15 @@ class DeltaMessageTypedDict(TypedDict): role: NotRequired[str] content: NotRequired[Nullable[str]] tool_calls: NotRequired[Nullable[List[ToolCallTypedDict]]] - + class DeltaMessage(BaseModel): role: Optional[str] = None + content: OptionalNullable[str] = UNSET + tool_calls: OptionalNullable[List[ToolCall]] = UNSET - + @model_serializer(mode="wrap") def serialize_model(self, handler): optional_fields = ["role", "content", "tool_calls"] @@ -32,9 +34,13 @@ def serialize_model(self, handler): for n, f in self.model_fields.items(): k = f.alias or n val = serialized.get(k) + serialized.pop(k, None) optional_nullable = k in optional_fields and k in nullable_fields - is_set = (self.__pydantic_fields_set__.intersection({n}) or k in null_default_fields) # pylint: disable=no-member + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member if val is not None and val != UNSET_SENTINEL: m[k] = val @@ -44,4 +50,3 @@ def serialize_model(self, handler): m[k] = val return m - diff --git a/src/mistralai/models/detailedjobout.py b/src/mistralai/models/detailedjobout.py index b33b6e3e..336190c2 100644 --- a/src/mistralai/models/detailedjobout.py +++ b/src/mistralai/models/detailedjobout.py @@ -15,7 +15,18 @@ from typing_extensions import Annotated, NotRequired -DetailedJobOutStatus = Literal["QUEUED", "STARTED", "VALIDATING", "VALIDATED", "RUNNING", "FAILED_VALIDATION", "FAILED", "SUCCESS", "CANCELLED", "CANCELLATION_REQUESTED"] +DetailedJobOutStatus = Literal[ + "QUEUED", + "STARTED", + "VALIDATING", + "VALIDATED", + "RUNNING", + "FAILED_VALIDATION", + "FAILED", + "SUCCESS", + "CANCELLED", + "CANCELLATION_REQUESTED", +] DetailedJobOutObject = Literal["job"] @@ -52,35 +63,73 @@ class DetailedJobOutTypedDict(TypedDict): events: NotRequired[List[EventOutTypedDict]] r"""Event items are created every time the status of a fine-tuning job changes. The timestamped list of all events is accessible here.""" checkpoints: NotRequired[List[CheckpointOutTypedDict]] - + class DetailedJobOut(BaseModel): id: str + auto_start: bool + hyperparameters: TrainingParameters + model: FineTuneableModel r"""The name of the model to fine-tune.""" + status: DetailedJobOutStatus + job_type: str + created_at: int + modified_at: int + training_files: List[str] + validation_files: OptionalNullable[List[str]] = UNSET + + # fmt: off OBJECT: Annotated[Final[Optional[DetailedJobOutObject]], pydantic.Field(alias="object")] = "job" # type: ignore + # fmt: on + fine_tuned_model: OptionalNullable[str] = UNSET + suffix: OptionalNullable[str] = UNSET + integrations: OptionalNullable[List[DetailedJobOutIntegrations]] = UNSET + trained_tokens: OptionalNullable[int] = UNSET + repositories: Optional[List[DetailedJobOutRepositories]] = None + metadata: OptionalNullable[JobMetadataOut] = UNSET + events: Optional[List[EventOut]] = None r"""Event items are created every time the status of a fine-tuning job changes. The timestamped list of all events is accessible here.""" + checkpoints: Optional[List[CheckpointOut]] = None - + @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = ["validation_files", "object", "fine_tuned_model", "suffix", "integrations", "trained_tokens", "repositories", "metadata", "events", "checkpoints"] - nullable_fields = ["validation_files", "fine_tuned_model", "suffix", "integrations", "trained_tokens", "metadata"] + optional_fields = [ + "validation_files", + "object", + "fine_tuned_model", + "suffix", + "integrations", + "trained_tokens", + "repositories", + "metadata", + "events", + "checkpoints", + ] + nullable_fields = [ + "validation_files", + "fine_tuned_model", + "suffix", + "integrations", + "trained_tokens", + "metadata", + ] null_default_fields = [] serialized = handler(self) @@ -90,9 +139,13 @@ def serialize_model(self, handler): for n, f in self.model_fields.items(): k = f.alias or n val = serialized.get(k) + serialized.pop(k, None) optional_nullable = k in optional_fields and k in nullable_fields - is_set = (self.__pydantic_fields_set__.intersection({n}) or k in null_default_fields) # pylint: disable=no-member + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member if val is not None and val != UNSET_SENTINEL: m[k] = val @@ -102,4 +155,3 @@ def serialize_model(self, handler): m[k] = val return m - diff --git a/src/mistralai/models/embeddingrequest.py b/src/mistralai/models/embeddingrequest.py index 6d70cc8b..5655472d 100644 --- a/src/mistralai/models/embeddingrequest.py +++ b/src/mistralai/models/embeddingrequest.py @@ -23,16 +23,18 @@ class EmbeddingRequestTypedDict(TypedDict): r"""ID of the model to use.""" encoding_format: NotRequired[Nullable[str]] r"""The format to return the embeddings in.""" - + class EmbeddingRequest(BaseModel): inputs: Annotated[Inputs, pydantic.Field(alias="input")] r"""Text to embed.""" + model: str r"""ID of the model to use.""" + encoding_format: OptionalNullable[str] = UNSET r"""The format to return the embeddings in.""" - + @model_serializer(mode="wrap") def serialize_model(self, handler): optional_fields = ["encoding_format"] @@ -46,9 +48,13 @@ def serialize_model(self, handler): for n, f in self.model_fields.items(): k = f.alias or n val = serialized.get(k) + serialized.pop(k, None) optional_nullable = k in optional_fields and k in nullable_fields - is_set = (self.__pydantic_fields_set__.intersection({n}) or k in null_default_fields) # pylint: disable=no-member + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member if val is not None and val != UNSET_SENTINEL: m[k] = val @@ -58,4 +64,3 @@ def serialize_model(self, handler): m[k] = val return m - diff --git a/src/mistralai/models/embeddingresponse.py b/src/mistralai/models/embeddingresponse.py index 040c42dc..d85ceec7 100644 --- a/src/mistralai/models/embeddingresponse.py +++ b/src/mistralai/models/embeddingresponse.py @@ -13,12 +13,15 @@ class EmbeddingResponseTypedDict(TypedDict): model: str usage: UsageInfoTypedDict data: List[EmbeddingResponseDataTypedDict] - + class EmbeddingResponse(BaseModel): id: str + object: str + model: str + usage: UsageInfo + data: List[EmbeddingResponseData] - diff --git a/src/mistralai/models/embeddingresponsedata.py b/src/mistralai/models/embeddingresponsedata.py index 07a061bc..f37995eb 100644 --- a/src/mistralai/models/embeddingresponsedata.py +++ b/src/mistralai/models/embeddingresponsedata.py @@ -10,10 +10,11 @@ class EmbeddingResponseDataTypedDict(TypedDict): object: NotRequired[str] embedding: NotRequired[List[float]] index: NotRequired[int] - + class EmbeddingResponseData(BaseModel): object: Optional[str] = None + embedding: Optional[List[float]] = None + index: Optional[int] = None - diff --git a/src/mistralai/models/eventout.py b/src/mistralai/models/eventout.py index d522abed..fa427f15 100644 --- a/src/mistralai/models/eventout.py +++ b/src/mistralai/models/eventout.py @@ -13,15 +13,17 @@ class EventOutTypedDict(TypedDict): created_at: int r"""The UNIX timestamp (in seconds) of the event.""" data: NotRequired[Nullable[Dict[str, Any]]] - + class EventOut(BaseModel): name: str r"""The name of the event.""" + created_at: int r"""The UNIX timestamp (in seconds) of the event.""" + data: OptionalNullable[Dict[str, Any]] = UNSET - + @model_serializer(mode="wrap") def serialize_model(self, handler): optional_fields = ["data"] @@ -35,9 +37,13 @@ def serialize_model(self, handler): for n, f in self.model_fields.items(): k = f.alias or n val = serialized.get(k) + serialized.pop(k, None) optional_nullable = k in optional_fields and k in nullable_fields - is_set = (self.__pydantic_fields_set__.intersection({n}) or k in null_default_fields) # pylint: disable=no-member + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member if val is not None and val != UNSET_SENTINEL: m[k] = val @@ -47,4 +53,3 @@ def serialize_model(self, handler): m[k] = val return m - diff --git a/src/mistralai/models/files_api_routes_delete_fileop.py b/src/mistralai/models/files_api_routes_delete_fileop.py index 85714838..def67911 100644 --- a/src/mistralai/models/files_api_routes_delete_fileop.py +++ b/src/mistralai/models/files_api_routes_delete_fileop.py @@ -9,8 +9,9 @@ class FilesAPIRoutesDeleteFileRequestTypedDict(TypedDict): file_id: str - + class FilesAPIRoutesDeleteFileRequest(BaseModel): - file_id: Annotated[str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False))] - + file_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] diff --git a/src/mistralai/models/files_api_routes_retrieve_fileop.py b/src/mistralai/models/files_api_routes_retrieve_fileop.py index 76063be9..bfbad272 100644 --- a/src/mistralai/models/files_api_routes_retrieve_fileop.py +++ b/src/mistralai/models/files_api_routes_retrieve_fileop.py @@ -9,8 +9,9 @@ class FilesAPIRoutesRetrieveFileRequestTypedDict(TypedDict): file_id: str - + class FilesAPIRoutesRetrieveFileRequest(BaseModel): - file_id: Annotated[str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False))] - + file_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] diff --git a/src/mistralai/models/files_api_routes_upload_fileop.py b/src/mistralai/models/files_api_routes_upload_fileop.py index 74720d6a..8eae7af7 100644 --- a/src/mistralai/models/files_api_routes_upload_fileop.py +++ b/src/mistralai/models/files_api_routes_upload_fileop.py @@ -12,17 +12,30 @@ FilesAPIRoutesUploadFilePurpose = Union[Literal["fine-tune"], UnrecognizedStr] + class FileTypedDict(TypedDict): file_name: str content: Union[bytes, IO[bytes], io.BufferedReader] content_type: NotRequired[str] - + class File(BaseModel): - file_name: Annotated[str, pydantic.Field(alias="file"), FieldMetadata(multipart=True)] - content: Annotated[Union[bytes, IO[bytes], io.BufferedReader], pydantic.Field(alias=""), FieldMetadata(multipart=MultipartFormMetadata(content=True))] - content_type: Annotated[Optional[str], pydantic.Field(alias="Content-Type"), FieldMetadata(multipart=True)] = None - + file_name: Annotated[ + str, pydantic.Field(alias="file"), FieldMetadata(multipart=True) + ] + + content: Annotated[ + Union[bytes, IO[bytes], io.BufferedReader], + pydantic.Field(alias=""), + FieldMetadata(multipart=MultipartFormMetadata(content=True)), + ] + + content_type: Annotated[ + Optional[str], + pydantic.Field(alias="Content-Type"), + FieldMetadata(multipart=True), + ] = None + class FilesAPIRoutesUploadFileMultiPartBodyParamsTypedDict(TypedDict): file: FileTypedDict @@ -36,10 +49,14 @@ class FilesAPIRoutesUploadFileMultiPartBodyParamsTypedDict(TypedDict): file=@path/to/your/file.jsonl ``` """ - + class FilesAPIRoutesUploadFileMultiPartBodyParams(BaseModel): - file: Annotated[File, pydantic.Field(alias=""), FieldMetadata(multipart=MultipartFormMetadata(file=True))] + file: Annotated[ + File, + pydantic.Field(alias=""), + FieldMetadata(multipart=MultipartFormMetadata(file=True)), + ] r"""The File object (not file name) to be uploaded. To upload a file and specify a custom file name you should format your request as such: ```bash @@ -50,5 +67,7 @@ class FilesAPIRoutesUploadFileMultiPartBodyParams(BaseModel): file=@path/to/your/file.jsonl ``` """ + + # fmt: off PURPOSE: Annotated[Final[Annotated[Optional[FilesAPIRoutesUploadFilePurpose], PlainValidator(validate_open_enum(False))]], pydantic.Field(alias="purpose"), FieldMetadata(multipart=True)] = "fine-tune" # type: ignore - + # fmt: on diff --git a/src/mistralai/models/fileschema.py b/src/mistralai/models/fileschema.py index b852dcb4..1ace0fab 100644 --- a/src/mistralai/models/fileschema.py +++ b/src/mistralai/models/fileschema.py @@ -3,7 +3,14 @@ from __future__ import annotations from .sampletype import SampleType from .source import Source -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL, UnrecognizedStr +from mistralai.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, + UnrecognizedStr, +) from mistralai.utils import validate_open_enum import pydantic from pydantic import model_serializer @@ -15,6 +22,7 @@ FileSchemaPurpose = Union[Literal["fine-tune"], UnrecognizedStr] r"""The intended purpose of the uploaded file. Only accepts fine-tuning (`fine-tune`) for now.""" + class FileSchemaTypedDict(TypedDict): id: str r"""The unique identifier of the file.""" @@ -29,25 +37,35 @@ class FileSchemaTypedDict(TypedDict): sample_type: SampleType source: Source num_lines: NotRequired[Nullable[int]] - + class FileSchema(BaseModel): id: str r"""The unique identifier of the file.""" + object: str r"""The object type, which is always \"file\".""" + bytes: int r"""The size of the file, in bytes.""" + created_at: int r"""The UNIX timestamp (in seconds) of the event.""" + filename: str r"""The name of the uploaded file.""" + sample_type: SampleType + source: Source + + # fmt: off PURPOSE: Annotated[Final[Annotated[FileSchemaPurpose, PlainValidator(validate_open_enum(False))]], pydantic.Field(alias="purpose")] = "fine-tune" # type: ignore + # fmt: on r"""The intended purpose of the uploaded file. Only accepts fine-tuning (`fine-tune`) for now.""" + num_lines: OptionalNullable[int] = UNSET - + @model_serializer(mode="wrap") def serialize_model(self, handler): optional_fields = ["num_lines"] @@ -61,9 +79,13 @@ def serialize_model(self, handler): for n, f in self.model_fields.items(): k = f.alias or n val = serialized.get(k) + serialized.pop(k, None) optional_nullable = k in optional_fields and k in nullable_fields - is_set = (self.__pydantic_fields_set__.intersection({n}) or k in null_default_fields) # pylint: disable=no-member + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member if val is not None and val != UNSET_SENTINEL: m[k] = val @@ -73,4 +95,3 @@ def serialize_model(self, handler): m[k] = val return m - diff --git a/src/mistralai/models/fimcompletionrequest.py b/src/mistralai/models/fimcompletionrequest.py index 7e041681..4f00d3dc 100644 --- a/src/mistralai/models/fimcompletionrequest.py +++ b/src/mistralai/models/fimcompletionrequest.py @@ -39,7 +39,7 @@ class FIMCompletionRequestTypedDict(TypedDict): r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" suffix: NotRequired[Nullable[str]] r"""Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`.""" - + class FIMCompletionRequest(BaseModel): model: Nullable[str] @@ -47,28 +47,46 @@ class FIMCompletionRequest(BaseModel): - `codestral-2405` - `codestral-latest` """ + prompt: str r"""The text/code to complete.""" + temperature: Optional[float] = 0.7 r"""What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both.""" + top_p: Optional[float] = 1 r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.""" + max_tokens: OptionalNullable[int] = UNSET r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" + min_tokens: OptionalNullable[int] = UNSET r"""The minimum number of tokens to generate in the completion.""" + stream: Optional[bool] = False r"""Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON.""" + stop: Optional[FIMCompletionRequestStop] = None r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + random_seed: OptionalNullable[int] = UNSET r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" + suffix: OptionalNullable[str] = UNSET r"""Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`.""" - + @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = ["temperature", "top_p", "max_tokens", "min_tokens", "stream", "stop", "random_seed", "suffix"] + optional_fields = [ + "temperature", + "top_p", + "max_tokens", + "min_tokens", + "stream", + "stop", + "random_seed", + "suffix", + ] nullable_fields = ["model", "max_tokens", "min_tokens", "random_seed", "suffix"] null_default_fields = [] @@ -79,9 +97,13 @@ def serialize_model(self, handler): for n, f in self.model_fields.items(): k = f.alias or n val = serialized.get(k) + serialized.pop(k, None) optional_nullable = k in optional_fields and k in nullable_fields - is_set = (self.__pydantic_fields_set__.intersection({n}) or k in null_default_fields) # pylint: disable=no-member + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member if val is not None and val != UNSET_SENTINEL: m[k] = val @@ -91,4 +113,3 @@ def serialize_model(self, handler): m[k] = val return m - diff --git a/src/mistralai/models/fimcompletionresponse.py b/src/mistralai/models/fimcompletionresponse.py index f359adb9..d9e11df3 100644 --- a/src/mistralai/models/fimcompletionresponse.py +++ b/src/mistralai/models/fimcompletionresponse.py @@ -15,13 +15,17 @@ class FIMCompletionResponseTypedDict(TypedDict): usage: UsageInfoTypedDict created: NotRequired[int] choices: NotRequired[List[ChatCompletionChoiceTypedDict]] - + class FIMCompletionResponse(BaseModel): id: str + object: str + model: str + usage: UsageInfo + created: Optional[int] = None + choices: Optional[List[ChatCompletionChoice]] = None - diff --git a/src/mistralai/models/fimcompletionstreamrequest.py b/src/mistralai/models/fimcompletionstreamrequest.py index 6d01053a..708542d9 100644 --- a/src/mistralai/models/fimcompletionstreamrequest.py +++ b/src/mistralai/models/fimcompletionstreamrequest.py @@ -38,7 +38,7 @@ class FIMCompletionStreamRequestTypedDict(TypedDict): r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" suffix: NotRequired[Nullable[str]] r"""Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`.""" - + class FIMCompletionStreamRequest(BaseModel): model: Nullable[str] @@ -46,27 +46,45 @@ class FIMCompletionStreamRequest(BaseModel): - `codestral-2405` - `codestral-latest` """ + prompt: str r"""The text/code to complete.""" + temperature: Optional[float] = 0.7 r"""What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both.""" + top_p: Optional[float] = 1 r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.""" + max_tokens: OptionalNullable[int] = UNSET r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" + min_tokens: OptionalNullable[int] = UNSET r"""The minimum number of tokens to generate in the completion.""" + stream: Optional[bool] = True + stop: Optional[FIMCompletionStreamRequestStop] = None r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + random_seed: OptionalNullable[int] = UNSET r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" + suffix: OptionalNullable[str] = UNSET r"""Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`.""" - + @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = ["temperature", "top_p", "max_tokens", "min_tokens", "stream", "stop", "random_seed", "suffix"] + optional_fields = [ + "temperature", + "top_p", + "max_tokens", + "min_tokens", + "stream", + "stop", + "random_seed", + "suffix", + ] nullable_fields = ["model", "max_tokens", "min_tokens", "random_seed", "suffix"] null_default_fields = [] @@ -77,9 +95,13 @@ def serialize_model(self, handler): for n, f in self.model_fields.items(): k = f.alias or n val = serialized.get(k) + serialized.pop(k, None) optional_nullable = k in optional_fields and k in nullable_fields - is_set = (self.__pydantic_fields_set__.intersection({n}) or k in null_default_fields) # pylint: disable=no-member + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member if val is not None and val != UNSET_SENTINEL: m[k] = val @@ -89,4 +111,3 @@ def serialize_model(self, handler): m[k] = val return m - diff --git a/src/mistralai/models/finetuneablemodel.py b/src/mistralai/models/finetuneablemodel.py index 22c8e4c6..947991c2 100644 --- a/src/mistralai/models/finetuneablemodel.py +++ b/src/mistralai/models/finetuneablemodel.py @@ -4,5 +4,11 @@ from typing import Literal -FineTuneableModel = Literal["open-mistral-7b", "mistral-small-latest", "codestral-latest", "mistral-large-latest", "open-mistral-nemo"] +FineTuneableModel = Literal[ + "open-mistral-7b", + "mistral-small-latest", + "codestral-latest", + "mistral-large-latest", + "open-mistral-nemo", +] r"""The name of the model to fine-tune.""" diff --git a/src/mistralai/models/ftmodelcapabilitiesout.py b/src/mistralai/models/ftmodelcapabilitiesout.py index ab76af38..fe66d303 100644 --- a/src/mistralai/models/ftmodelcapabilitiesout.py +++ b/src/mistralai/models/ftmodelcapabilitiesout.py @@ -11,11 +11,13 @@ class FTModelCapabilitiesOutTypedDict(TypedDict): completion_fim: NotRequired[bool] function_calling: NotRequired[bool] fine_tuning: NotRequired[bool] - + class FTModelCapabilitiesOut(BaseModel): completion_chat: Optional[bool] = True + completion_fim: Optional[bool] = False + function_calling: Optional[bool] = False + fine_tuning: Optional[bool] = False - diff --git a/src/mistralai/models/ftmodelcard.py b/src/mistralai/models/ftmodelcard.py new file mode 100644 index 00000000..b282a09d --- /dev/null +++ b/src/mistralai/models/ftmodelcard.py @@ -0,0 +1,103 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .modelcapabilities import ModelCapabilities, ModelCapabilitiesTypedDict +from datetime import datetime +from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +import pydantic +from pydantic import model_serializer +from typing import Final, List, Optional, TypedDict +from typing_extensions import Annotated, NotRequired + + +class FTModelCardTypedDict(TypedDict): + r"""Extra fields for fine-tuned models.""" + + id: str + capabilities: ModelCapabilitiesTypedDict + job: str + root: str + object: NotRequired[str] + created: NotRequired[int] + owned_by: NotRequired[str] + name: NotRequired[Nullable[str]] + description: NotRequired[Nullable[str]] + max_context_length: NotRequired[int] + aliases: NotRequired[List[str]] + deprecation: NotRequired[Nullable[datetime]] + archived: NotRequired[bool] + + +class FTModelCard(BaseModel): + r"""Extra fields for fine-tuned models.""" + + id: str + + capabilities: ModelCapabilities + + job: str + + root: str + + object: Optional[str] = "model" + + created: Optional[int] = None + + owned_by: Optional[str] = "mistralai" + + name: OptionalNullable[str] = UNSET + + description: OptionalNullable[str] = UNSET + + max_context_length: Optional[int] = 32768 + + aliases: Optional[List[str]] = None + + deprecation: OptionalNullable[datetime] = UNSET + + # fmt: off + TYPE: Annotated[Final[Optional[str]], pydantic.Field(alias="type")] = "fine-tuned" # type: ignore + # fmt: on + + archived: Optional[bool] = False + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = [ + "object", + "created", + "owned_by", + "name", + "description", + "max_context_length", + "aliases", + "deprecation", + "type", + "archived", + ] + nullable_fields = ["name", "description", "deprecation"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in self.model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/models/ftmodelout.py b/src/mistralai/models/ftmodelout.py index 6f99bcbe..664dd5d4 100644 --- a/src/mistralai/models/ftmodelout.py +++ b/src/mistralai/models/ftmodelout.py @@ -1,7 +1,10 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from __future__ import annotations -from .ftmodelcapabilitiesout import FTModelCapabilitiesOut, FTModelCapabilitiesOutTypedDict +from .ftmodelcapabilitiesout import ( + FTModelCapabilitiesOut, + FTModelCapabilitiesOutTypedDict, +) from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL import pydantic from pydantic import model_serializer @@ -11,6 +14,7 @@ FTModelOutObject = Literal["model"] + class FTModelOutTypedDict(TypedDict): id: str created: int @@ -23,25 +27,44 @@ class FTModelOutTypedDict(TypedDict): description: NotRequired[Nullable[str]] max_context_length: NotRequired[int] aliases: NotRequired[List[str]] - + class FTModelOut(BaseModel): id: str + created: int + owned_by: str + root: str + archived: bool + capabilities: FTModelCapabilitiesOut + job: str + + # fmt: off OBJECT: Annotated[Final[Optional[FTModelOutObject]], pydantic.Field(alias="object")] = "model" # type: ignore + # fmt: on + name: OptionalNullable[str] = UNSET + description: OptionalNullable[str] = UNSET + max_context_length: Optional[int] = 32768 + aliases: Optional[List[str]] = None - + @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = ["object", "name", "description", "max_context_length", "aliases"] + optional_fields = [ + "object", + "name", + "description", + "max_context_length", + "aliases", + ] nullable_fields = ["name", "description"] null_default_fields = [] @@ -52,9 +75,13 @@ def serialize_model(self, handler): for n, f in self.model_fields.items(): k = f.alias or n val = serialized.get(k) + serialized.pop(k, None) optional_nullable = k in optional_fields and k in nullable_fields - is_set = (self.__pydantic_fields_set__.intersection({n}) or k in null_default_fields) # pylint: disable=no-member + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member if val is not None and val != UNSET_SENTINEL: m[k] = val @@ -64,4 +91,3 @@ def serialize_model(self, handler): m[k] = val return m - diff --git a/src/mistralai/models/function.py b/src/mistralai/models/function.py index 78eb2594..a872eea1 100644 --- a/src/mistralai/models/function.py +++ b/src/mistralai/models/function.py @@ -10,10 +10,11 @@ class FunctionTypedDict(TypedDict): name: str parameters: Dict[str, Any] description: NotRequired[str] - + class Function(BaseModel): name: str + parameters: Dict[str, Any] + description: Optional[str] = "" - diff --git a/src/mistralai/models/functioncall.py b/src/mistralai/models/functioncall.py index 4b79c325..941cc5e9 100644 --- a/src/mistralai/models/functioncall.py +++ b/src/mistralai/models/functioncall.py @@ -14,9 +14,9 @@ class FunctionCallTypedDict(TypedDict): name: str arguments: ArgumentsTypedDict - + class FunctionCall(BaseModel): name: str + arguments: Arguments - diff --git a/src/mistralai/models/functionname.py b/src/mistralai/models/functionname.py new file mode 100644 index 00000000..20fc9bef --- /dev/null +++ b/src/mistralai/models/functionname.py @@ -0,0 +1,17 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.types import BaseModel +from typing import TypedDict + + +class FunctionNameTypedDict(TypedDict): + r"""this restriction of `Function` is used to select a specific function to call""" + + name: str + + +class FunctionName(BaseModel): + r"""this restriction of `Function` is used to select a specific function to call""" + + name: str diff --git a/src/mistralai/models/githubrepositoryin.py b/src/mistralai/models/githubrepositoryin.py index 234afeb1..cb8bad68 100644 --- a/src/mistralai/models/githubrepositoryin.py +++ b/src/mistralai/models/githubrepositoryin.py @@ -10,22 +10,30 @@ GithubRepositoryInType = Literal["github"] + class GithubRepositoryInTypedDict(TypedDict): name: str owner: str token: str ref: NotRequired[Nullable[str]] weight: NotRequired[float] - + class GithubRepositoryIn(BaseModel): name: str + owner: str + token: str + + # fmt: off TYPE: Annotated[Final[Optional[GithubRepositoryInType]], pydantic.Field(alias="type")] = "github" # type: ignore + # fmt: on + ref: OptionalNullable[str] = UNSET + weight: Optional[float] = 1 - + @model_serializer(mode="wrap") def serialize_model(self, handler): optional_fields = ["type", "ref", "weight"] @@ -39,9 +47,13 @@ def serialize_model(self, handler): for n, f in self.model_fields.items(): k = f.alias or n val = serialized.get(k) + serialized.pop(k, None) optional_nullable = k in optional_fields and k in nullable_fields - is_set = (self.__pydantic_fields_set__.intersection({n}) or k in null_default_fields) # pylint: disable=no-member + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member if val is not None and val != UNSET_SENTINEL: m[k] = val @@ -51,4 +63,3 @@ def serialize_model(self, handler): m[k] = val return m - diff --git a/src/mistralai/models/githubrepositoryout.py b/src/mistralai/models/githubrepositoryout.py index 2c0a4276..7f023c75 100644 --- a/src/mistralai/models/githubrepositoryout.py +++ b/src/mistralai/models/githubrepositoryout.py @@ -10,22 +10,30 @@ GithubRepositoryOutType = Literal["github"] + class GithubRepositoryOutTypedDict(TypedDict): name: str owner: str commit_id: str ref: NotRequired[Nullable[str]] weight: NotRequired[float] - + class GithubRepositoryOut(BaseModel): name: str + owner: str + commit_id: str + + # fmt: off TYPE: Annotated[Final[Optional[GithubRepositoryOutType]], pydantic.Field(alias="type")] = "github" # type: ignore + # fmt: on + ref: OptionalNullable[str] = UNSET + weight: Optional[float] = 1 - + @model_serializer(mode="wrap") def serialize_model(self, handler): optional_fields = ["type", "ref", "weight"] @@ -39,9 +47,13 @@ def serialize_model(self, handler): for n, f in self.model_fields.items(): k = f.alias or n val = serialized.get(k) + serialized.pop(k, None) optional_nullable = k in optional_fields and k in nullable_fields - is_set = (self.__pydantic_fields_set__.intersection({n}) or k in null_default_fields) # pylint: disable=no-member + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member if val is not None and val != UNSET_SENTINEL: m[k] = val @@ -51,4 +63,3 @@ def serialize_model(self, handler): m[k] = val return m - diff --git a/src/mistralai/models/httpvalidationerror.py b/src/mistralai/models/httpvalidationerror.py index 4e4a2094..991b8bd6 100644 --- a/src/mistralai/models/httpvalidationerror.py +++ b/src/mistralai/models/httpvalidationerror.py @@ -6,13 +6,14 @@ from mistralai.types import BaseModel from typing import List, Optional + class HTTPValidationErrorData(BaseModel): detail: Optional[List[ValidationError]] = None - class HTTPValidationError(Exception): r"""Validation Error""" + data: HTTPValidationErrorData def __init__(self, data: HTTPValidationErrorData): @@ -20,4 +21,3 @@ def __init__(self, data: HTTPValidationErrorData): def __str__(self) -> str: return utils.marshal_json(self.data, HTTPValidationErrorData) - diff --git a/src/mistralai/models/imageurl.py b/src/mistralai/models/imageurl.py new file mode 100644 index 00000000..af24a1a9 --- /dev/null +++ b/src/mistralai/models/imageurl.py @@ -0,0 +1,48 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +from pydantic import model_serializer +from typing import TypedDict +from typing_extensions import NotRequired + + +class ImageURLTypedDict(TypedDict): + url: str + detail: NotRequired[Nullable[str]] + + +class ImageURL(BaseModel): + url: str + + detail: OptionalNullable[str] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["detail"] + nullable_fields = ["detail"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in self.model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/models/imageurlchunk.py b/src/mistralai/models/imageurlchunk.py new file mode 100644 index 00000000..44409020 --- /dev/null +++ b/src/mistralai/models/imageurlchunk.py @@ -0,0 +1,32 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .imageurl import ImageURL, ImageURLTypedDict +from mistralai.types import BaseModel +import pydantic +from typing import Final, Literal, Optional, TypedDict, Union +from typing_extensions import Annotated + + +ImageURLChunkType = Literal["image_url"] + +ImageURLChunkImageURLTypedDict = Union[ImageURLTypedDict, str] + + +ImageURLChunkImageURL = Union[ImageURL, str] + + +class ImageURLChunkTypedDict(TypedDict): + r"""{\"type\":\"image_url\",\"image_url\":{\"url\":\"data:image/png;base64,iVBORw0""" + + image_url: ImageURLChunkImageURLTypedDict + + +class ImageURLChunk(BaseModel): + r"""{\"type\":\"image_url\",\"image_url\":{\"url\":\"data:image/png;base64,iVBORw0""" + + image_url: ImageURLChunkImageURL + + # fmt: off + TYPE: Annotated[Final[Optional[ImageURLChunkType]], pydantic.Field(alias="type")] = "image_url" # type: ignore + # fmt: on diff --git a/src/mistralai/models/jobin.py b/src/mistralai/models/jobin.py index dd8e4ee8..db875c11 100644 --- a/src/mistralai/models/jobin.py +++ b/src/mistralai/models/jobin.py @@ -39,27 +39,41 @@ class JobInTypedDict(TypedDict): repositories: NotRequired[List[JobInRepositoriesTypedDict]] auto_start: NotRequired[bool] r"""This field will be required in a future release.""" - + class JobIn(BaseModel): model: FineTuneableModel r"""The name of the model to fine-tune.""" + hyperparameters: TrainingParametersIn r"""The fine-tuning hyperparameter settings used in a fine-tune job.""" + training_files: Optional[List[TrainingFile]] = None + validation_files: OptionalNullable[List[str]] = UNSET r"""A list containing the IDs of uploaded files that contain validation data. If you provide these files, the data is used to generate validation metrics periodically during fine-tuning. These metrics can be viewed in `checkpoints` when getting the status of a running fine-tuning job. The same data should not be present in both train and validation files.""" + suffix: OptionalNullable[str] = UNSET r"""A string that will be added to your fine-tuning model name. For example, a suffix of \"my-great-model\" would produce a model name like `ft:open-mistral-7b:my-great-model:xxx...`""" + integrations: OptionalNullable[List[JobInIntegrations]] = UNSET r"""A list of integrations to enable for your fine-tuning job.""" + repositories: Optional[List[JobInRepositories]] = None + auto_start: Optional[bool] = None r"""This field will be required in a future release.""" - + @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = ["training_files", "validation_files", "suffix", "integrations", "repositories", "auto_start"] + optional_fields = [ + "training_files", + "validation_files", + "suffix", + "integrations", + "repositories", + "auto_start", + ] nullable_fields = ["validation_files", "suffix", "integrations"] null_default_fields = [] @@ -70,9 +84,13 @@ def serialize_model(self, handler): for n, f in self.model_fields.items(): k = f.alias or n val = serialized.get(k) + serialized.pop(k, None) optional_nullable = k in optional_fields and k in nullable_fields - is_set = (self.__pydantic_fields_set__.intersection({n}) or k in null_default_fields) # pylint: disable=no-member + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member if val is not None and val != UNSET_SENTINEL: m[k] = val @@ -82,4 +100,3 @@ def serialize_model(self, handler): m[k] = val return m - diff --git a/src/mistralai/models/jobmetadataout.py b/src/mistralai/models/jobmetadataout.py index 9d3bfba2..690540da 100644 --- a/src/mistralai/models/jobmetadataout.py +++ b/src/mistralai/models/jobmetadataout.py @@ -15,21 +15,43 @@ class JobMetadataOutTypedDict(TypedDict): train_tokens: NotRequired[Nullable[int]] data_tokens: NotRequired[Nullable[int]] estimated_start_time: NotRequired[Nullable[int]] - + class JobMetadataOut(BaseModel): expected_duration_seconds: OptionalNullable[int] = UNSET + cost: OptionalNullable[float] = UNSET + cost_currency: OptionalNullable[str] = UNSET + train_tokens_per_step: OptionalNullable[int] = UNSET + train_tokens: OptionalNullable[int] = UNSET + data_tokens: OptionalNullable[int] = UNSET + estimated_start_time: OptionalNullable[int] = UNSET - + @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = ["expected_duration_seconds", "cost", "cost_currency", "train_tokens_per_step", "train_tokens", "data_tokens", "estimated_start_time"] - nullable_fields = ["expected_duration_seconds", "cost", "cost_currency", "train_tokens_per_step", "train_tokens", "data_tokens", "estimated_start_time"] + optional_fields = [ + "expected_duration_seconds", + "cost", + "cost_currency", + "train_tokens_per_step", + "train_tokens", + "data_tokens", + "estimated_start_time", + ] + nullable_fields = [ + "expected_duration_seconds", + "cost", + "cost_currency", + "train_tokens_per_step", + "train_tokens", + "data_tokens", + "estimated_start_time", + ] null_default_fields = [] serialized = handler(self) @@ -39,9 +61,13 @@ def serialize_model(self, handler): for n, f in self.model_fields.items(): k = f.alias or n val = serialized.get(k) + serialized.pop(k, None) optional_nullable = k in optional_fields and k in nullable_fields - is_set = (self.__pydantic_fields_set__.intersection({n}) or k in null_default_fields) # pylint: disable=no-member + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member if val is not None and val != UNSET_SENTINEL: m[k] = val @@ -51,4 +77,3 @@ def serialize_model(self, handler): m[k] = val return m - diff --git a/src/mistralai/models/jobout.py b/src/mistralai/models/jobout.py index f0e0d253..a716cb7f 100644 --- a/src/mistralai/models/jobout.py +++ b/src/mistralai/models/jobout.py @@ -13,7 +13,18 @@ from typing_extensions import Annotated, NotRequired -Status = Literal["QUEUED", "STARTED", "VALIDATING", "VALIDATED", "RUNNING", "FAILED_VALIDATION", "FAILED", "SUCCESS", "CANCELLED", "CANCELLATION_REQUESTED"] +Status = Literal[ + "QUEUED", + "STARTED", + "VALIDATING", + "VALIDATED", + "RUNNING", + "FAILED_VALIDATION", + "FAILED", + "SUCCESS", + "CANCELLED", + "CANCELLATION_REQUESTED", +] r"""The current status of the fine-tuning job.""" Object = Literal["job"] @@ -60,44 +71,78 @@ class JobOutTypedDict(TypedDict): r"""Total number of tokens trained.""" repositories: NotRequired[List[RepositoriesTypedDict]] metadata: NotRequired[Nullable[JobMetadataOutTypedDict]] - + class JobOut(BaseModel): id: str r"""The ID of the job.""" + auto_start: bool + hyperparameters: TrainingParameters + model: FineTuneableModel r"""The name of the model to fine-tune.""" + status: Status r"""The current status of the fine-tuning job.""" + job_type: str r"""The type of job (`FT` for fine-tuning).""" + created_at: int r"""The UNIX timestamp (in seconds) for when the fine-tuning job was created.""" + modified_at: int r"""The UNIX timestamp (in seconds) for when the fine-tuning job was last modified.""" + training_files: List[str] r"""A list containing the IDs of uploaded files that contain training data.""" + validation_files: OptionalNullable[List[str]] = UNSET r"""A list containing the IDs of uploaded files that contain validation data.""" + + # fmt: off OBJECT: Annotated[Final[Optional[Object]], pydantic.Field(alias="object")] = "job" # type: ignore + # fmt: on r"""The object type of the fine-tuning job.""" + fine_tuned_model: OptionalNullable[str] = UNSET r"""The name of the fine-tuned model that is being created. The value will be `null` if the fine-tuning job is still running.""" + suffix: OptionalNullable[str] = UNSET r"""Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`.""" + integrations: OptionalNullable[List[Integrations]] = UNSET r"""A list of integrations enabled for your fine-tuning job.""" + trained_tokens: OptionalNullable[int] = UNSET r"""Total number of tokens trained.""" + repositories: Optional[List[Repositories]] = None + metadata: OptionalNullable[JobMetadataOut] = UNSET - + @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = ["validation_files", "object", "fine_tuned_model", "suffix", "integrations", "trained_tokens", "repositories", "metadata"] - nullable_fields = ["validation_files", "fine_tuned_model", "suffix", "integrations", "trained_tokens", "metadata"] + optional_fields = [ + "validation_files", + "object", + "fine_tuned_model", + "suffix", + "integrations", + "trained_tokens", + "repositories", + "metadata", + ] + nullable_fields = [ + "validation_files", + "fine_tuned_model", + "suffix", + "integrations", + "trained_tokens", + "metadata", + ] null_default_fields = [] serialized = handler(self) @@ -107,9 +152,13 @@ def serialize_model(self, handler): for n, f in self.model_fields.items(): k = f.alias or n val = serialized.get(k) + serialized.pop(k, None) optional_nullable = k in optional_fields and k in nullable_fields - is_set = (self.__pydantic_fields_set__.intersection({n}) or k in null_default_fields) # pylint: disable=no-member + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member if val is not None and val != UNSET_SENTINEL: m[k] = val @@ -119,4 +168,3 @@ def serialize_model(self, handler): m[k] = val return m - diff --git a/src/mistralai/models/jobs_api_routes_fine_tuning_archive_fine_tuned_modelop.py b/src/mistralai/models/jobs_api_routes_fine_tuning_archive_fine_tuned_modelop.py index e32d52b1..da521422 100644 --- a/src/mistralai/models/jobs_api_routes_fine_tuning_archive_fine_tuned_modelop.py +++ b/src/mistralai/models/jobs_api_routes_fine_tuning_archive_fine_tuned_modelop.py @@ -10,9 +10,10 @@ class JobsAPIRoutesFineTuningArchiveFineTunedModelRequestTypedDict(TypedDict): model_id: str r"""The ID of the model to archive.""" - + class JobsAPIRoutesFineTuningArchiveFineTunedModelRequest(BaseModel): - model_id: Annotated[str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False))] + model_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] r"""The ID of the model to archive.""" - diff --git a/src/mistralai/models/jobs_api_routes_fine_tuning_cancel_fine_tuning_jobop.py b/src/mistralai/models/jobs_api_routes_fine_tuning_cancel_fine_tuning_jobop.py index 0ba05ee5..e84b0825 100644 --- a/src/mistralai/models/jobs_api_routes_fine_tuning_cancel_fine_tuning_jobop.py +++ b/src/mistralai/models/jobs_api_routes_fine_tuning_cancel_fine_tuning_jobop.py @@ -10,9 +10,10 @@ class JobsAPIRoutesFineTuningCancelFineTuningJobRequestTypedDict(TypedDict): job_id: str r"""The ID of the job to cancel.""" - + class JobsAPIRoutesFineTuningCancelFineTuningJobRequest(BaseModel): - job_id: Annotated[str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False))] + job_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] r"""The ID of the job to cancel.""" - diff --git a/src/mistralai/models/jobs_api_routes_fine_tuning_create_fine_tuning_jobop.py b/src/mistralai/models/jobs_api_routes_fine_tuning_create_fine_tuning_jobop.py index c4ba3c19..1925a1a6 100644 --- a/src/mistralai/models/jobs_api_routes_fine_tuning_create_fine_tuning_jobop.py +++ b/src/mistralai/models/jobs_api_routes_fine_tuning_create_fine_tuning_jobop.py @@ -6,10 +6,11 @@ from typing import Union -JobsAPIRoutesFineTuningCreateFineTuningJobResponseTypedDict = Union[LegacyJobMetadataOutTypedDict, JobOutTypedDict] +JobsAPIRoutesFineTuningCreateFineTuningJobResponseTypedDict = Union[ + LegacyJobMetadataOutTypedDict, JobOutTypedDict +] r"""OK""" JobsAPIRoutesFineTuningCreateFineTuningJobResponse = Union[LegacyJobMetadataOut, JobOut] r"""OK""" - diff --git a/src/mistralai/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobop.py b/src/mistralai/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobop.py index f8924c89..05706126 100644 --- a/src/mistralai/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobop.py +++ b/src/mistralai/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobop.py @@ -10,9 +10,10 @@ class JobsAPIRoutesFineTuningGetFineTuningJobRequestTypedDict(TypedDict): job_id: str r"""The ID of the job to analyse.""" - + class JobsAPIRoutesFineTuningGetFineTuningJobRequest(BaseModel): - job_id: Annotated[str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False))] + job_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] r"""The ID of the job to analyse.""" - diff --git a/src/mistralai/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobsop.py b/src/mistralai/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobsop.py index bb5bf3b5..3320b100 100644 --- a/src/mistralai/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobsop.py +++ b/src/mistralai/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobsop.py @@ -9,9 +9,21 @@ from typing_extensions import Annotated, NotRequired -QueryParamStatus = Literal["QUEUED", "STARTED", "VALIDATING", "VALIDATED", "RUNNING", "FAILED_VALIDATION", "FAILED", "SUCCESS", "CANCELLED", "CANCELLATION_REQUESTED"] +QueryParamStatus = Literal[ + "QUEUED", + "STARTED", + "VALIDATING", + "VALIDATED", + "RUNNING", + "FAILED_VALIDATION", + "FAILED", + "SUCCESS", + "CANCELLED", + "CANCELLATION_REQUESTED", +] r"""The current job state to filter on. When set, the other results are not displayed.""" + class JobsAPIRoutesFineTuningGetFineTuningJobsRequestTypedDict(TypedDict): page: NotRequired[int] r"""The page number of the results to be returned.""" @@ -31,32 +43,84 @@ class JobsAPIRoutesFineTuningGetFineTuningJobsRequestTypedDict(TypedDict): r"""The Weight and Biases run name to filter on. When set, the other results are not displayed.""" suffix: NotRequired[Nullable[str]] r"""The model suffix to filter on. When set, the other results are not displayed.""" - + class JobsAPIRoutesFineTuningGetFineTuningJobsRequest(BaseModel): - page: Annotated[Optional[int], FieldMetadata(query=QueryParamMetadata(style="form", explode=True))] = 0 + page: Annotated[ + Optional[int], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = 0 r"""The page number of the results to be returned.""" - page_size: Annotated[Optional[int], FieldMetadata(query=QueryParamMetadata(style="form", explode=True))] = 100 + + page_size: Annotated[ + Optional[int], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = 100 r"""The number of items to return per page.""" - model: Annotated[OptionalNullable[str], FieldMetadata(query=QueryParamMetadata(style="form", explode=True))] = UNSET + + model: Annotated[ + OptionalNullable[str], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = UNSET r"""The model name used for fine-tuning to filter on. When set, the other results are not displayed.""" - created_after: Annotated[OptionalNullable[datetime], FieldMetadata(query=QueryParamMetadata(style="form", explode=True))] = UNSET + + created_after: Annotated[ + OptionalNullable[datetime], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = UNSET r"""The date/time to filter on. When set, the results for previous creation times are not displayed.""" - created_by_me: Annotated[Optional[bool], FieldMetadata(query=QueryParamMetadata(style="form", explode=True))] = False + + created_by_me: Annotated[ + Optional[bool], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = False r"""When set, only return results for jobs created by the API caller. Other results are not displayed.""" - status: Annotated[OptionalNullable[QueryParamStatus], FieldMetadata(query=QueryParamMetadata(style="form", explode=True))] = UNSET + + status: Annotated[ + OptionalNullable[QueryParamStatus], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = UNSET r"""The current job state to filter on. When set, the other results are not displayed.""" - wandb_project: Annotated[OptionalNullable[str], FieldMetadata(query=QueryParamMetadata(style="form", explode=True))] = UNSET + + wandb_project: Annotated[ + OptionalNullable[str], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = UNSET r"""The Weights and Biases project to filter on. When set, the other results are not displayed.""" - wandb_name: Annotated[OptionalNullable[str], FieldMetadata(query=QueryParamMetadata(style="form", explode=True))] = UNSET + + wandb_name: Annotated[ + OptionalNullable[str], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = UNSET r"""The Weight and Biases run name to filter on. When set, the other results are not displayed.""" - suffix: Annotated[OptionalNullable[str], FieldMetadata(query=QueryParamMetadata(style="form", explode=True))] = UNSET + + suffix: Annotated[ + OptionalNullable[str], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = UNSET r"""The model suffix to filter on. When set, the other results are not displayed.""" - + @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = ["page", "page_size", "model", "created_after", "created_by_me", "status", "wandb_project", "wandb_name", "suffix"] - nullable_fields = ["model", "created_after", "status", "wandb_project", "wandb_name", "suffix"] + optional_fields = [ + "page", + "page_size", + "model", + "created_after", + "created_by_me", + "status", + "wandb_project", + "wandb_name", + "suffix", + ] + nullable_fields = [ + "model", + "created_after", + "status", + "wandb_project", + "wandb_name", + "suffix", + ] null_default_fields = [] serialized = handler(self) @@ -66,9 +130,13 @@ def serialize_model(self, handler): for n, f in self.model_fields.items(): k = f.alias or n val = serialized.get(k) + serialized.pop(k, None) optional_nullable = k in optional_fields and k in nullable_fields - is_set = (self.__pydantic_fields_set__.intersection({n}) or k in null_default_fields) # pylint: disable=no-member + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member if val is not None and val != UNSET_SENTINEL: m[k] = val @@ -78,4 +146,3 @@ def serialize_model(self, handler): m[k] = val return m - diff --git a/src/mistralai/models/jobs_api_routes_fine_tuning_start_fine_tuning_jobop.py b/src/mistralai/models/jobs_api_routes_fine_tuning_start_fine_tuning_jobop.py index 312063fd..bc1b6d47 100644 --- a/src/mistralai/models/jobs_api_routes_fine_tuning_start_fine_tuning_jobop.py +++ b/src/mistralai/models/jobs_api_routes_fine_tuning_start_fine_tuning_jobop.py @@ -9,8 +9,9 @@ class JobsAPIRoutesFineTuningStartFineTuningJobRequestTypedDict(TypedDict): job_id: str - + class JobsAPIRoutesFineTuningStartFineTuningJobRequest(BaseModel): - job_id: Annotated[str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False))] - + job_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] diff --git a/src/mistralai/models/jobs_api_routes_fine_tuning_unarchive_fine_tuned_modelop.py b/src/mistralai/models/jobs_api_routes_fine_tuning_unarchive_fine_tuned_modelop.py index ef44fedd..acc6bf42 100644 --- a/src/mistralai/models/jobs_api_routes_fine_tuning_unarchive_fine_tuned_modelop.py +++ b/src/mistralai/models/jobs_api_routes_fine_tuning_unarchive_fine_tuned_modelop.py @@ -10,9 +10,10 @@ class JobsAPIRoutesFineTuningUnarchiveFineTunedModelRequestTypedDict(TypedDict): model_id: str r"""The ID of the model to unarchive.""" - + class JobsAPIRoutesFineTuningUnarchiveFineTunedModelRequest(BaseModel): - model_id: Annotated[str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False))] + model_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] r"""The ID of the model to unarchive.""" - diff --git a/src/mistralai/models/jobs_api_routes_fine_tuning_update_fine_tuned_modelop.py b/src/mistralai/models/jobs_api_routes_fine_tuning_update_fine_tuned_modelop.py index 8a229f0e..50298ce1 100644 --- a/src/mistralai/models/jobs_api_routes_fine_tuning_update_fine_tuned_modelop.py +++ b/src/mistralai/models/jobs_api_routes_fine_tuning_update_fine_tuned_modelop.py @@ -12,10 +12,15 @@ class JobsAPIRoutesFineTuningUpdateFineTunedModelRequestTypedDict(TypedDict): model_id: str r"""The ID of the model to update.""" update_ft_model_in: UpdateFTModelInTypedDict - + class JobsAPIRoutesFineTuningUpdateFineTunedModelRequest(BaseModel): - model_id: Annotated[str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False))] + model_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] r"""The ID of the model to update.""" - update_ft_model_in: Annotated[UpdateFTModelIn, FieldMetadata(request=RequestMetadata(media_type="application/json"))] - + + update_ft_model_in: Annotated[ + UpdateFTModelIn, + FieldMetadata(request=RequestMetadata(media_type="application/json")), + ] diff --git a/src/mistralai/models/jobsout.py b/src/mistralai/models/jobsout.py index 0ed51c8e..bd5edf69 100644 --- a/src/mistralai/models/jobsout.py +++ b/src/mistralai/models/jobsout.py @@ -10,13 +10,17 @@ JobsOutObject = Literal["list"] + class JobsOutTypedDict(TypedDict): total: int data: NotRequired[List[JobOutTypedDict]] - + class JobsOut(BaseModel): total: int + data: Optional[List[JobOut]] = None + + # fmt: off OBJECT: Annotated[Final[Optional[JobsOutObject]], pydantic.Field(alias="object")] = "list" # type: ignore - + # fmt: on diff --git a/src/mistralai/models/legacyjobmetadataout.py b/src/mistralai/models/legacyjobmetadataout.py index 3b3106d0..677cad8e 100644 --- a/src/mistralai/models/legacyjobmetadataout.py +++ b/src/mistralai/models/legacyjobmetadataout.py @@ -10,6 +10,7 @@ LegacyJobMetadataOutObject = Literal["job.metadata"] + class LegacyJobMetadataOutTypedDict(TypedDict): details: str expected_duration_seconds: NotRequired[Nullable[int]] @@ -30,34 +31,69 @@ class LegacyJobMetadataOutTypedDict(TypedDict): r"""The number of complete passes through the entire training dataset.""" training_steps: NotRequired[Nullable[int]] r"""The number of training steps to perform. A training step refers to a single update of the model weights during the fine-tuning process. This update is typically calculated using a batch of samples from the training dataset.""" - + class LegacyJobMetadataOut(BaseModel): details: str + expected_duration_seconds: OptionalNullable[int] = UNSET r"""The approximated time (in seconds) for the fine-tuning process to complete.""" + cost: OptionalNullable[float] = UNSET r"""The cost of the fine-tuning job.""" + cost_currency: OptionalNullable[str] = UNSET r"""The currency used for the fine-tuning job cost.""" + train_tokens_per_step: OptionalNullable[int] = UNSET r"""The number of tokens consumed by one training step.""" + train_tokens: OptionalNullable[int] = UNSET r"""The total number of tokens used during the fine-tuning process.""" + data_tokens: OptionalNullable[int] = UNSET r"""The total number of tokens in the training dataset.""" + estimated_start_time: OptionalNullable[int] = UNSET + deprecated: Optional[bool] = True + epochs: OptionalNullable[float] = UNSET r"""The number of complete passes through the entire training dataset.""" + training_steps: OptionalNullable[int] = UNSET r"""The number of training steps to perform. A training step refers to a single update of the model weights during the fine-tuning process. This update is typically calculated using a batch of samples from the training dataset.""" + + # fmt: off OBJECT: Annotated[Final[Optional[LegacyJobMetadataOutObject]], pydantic.Field(alias="object")] = "job.metadata" # type: ignore - + # fmt: on + @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = ["expected_duration_seconds", "cost", "cost_currency", "train_tokens_per_step", "train_tokens", "data_tokens", "estimated_start_time", "deprecated", "epochs", "training_steps", "object"] - nullable_fields = ["expected_duration_seconds", "cost", "cost_currency", "train_tokens_per_step", "train_tokens", "data_tokens", "estimated_start_time", "epochs", "training_steps"] + optional_fields = [ + "expected_duration_seconds", + "cost", + "cost_currency", + "train_tokens_per_step", + "train_tokens", + "data_tokens", + "estimated_start_time", + "deprecated", + "epochs", + "training_steps", + "object", + ] + nullable_fields = [ + "expected_duration_seconds", + "cost", + "cost_currency", + "train_tokens_per_step", + "train_tokens", + "data_tokens", + "estimated_start_time", + "epochs", + "training_steps", + ] null_default_fields = [] serialized = handler(self) @@ -67,9 +103,13 @@ def serialize_model(self, handler): for n, f in self.model_fields.items(): k = f.alias or n val = serialized.get(k) + serialized.pop(k, None) optional_nullable = k in optional_fields and k in nullable_fields - is_set = (self.__pydantic_fields_set__.intersection({n}) or k in null_default_fields) # pylint: disable=no-member + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member if val is not None and val != UNSET_SENTINEL: m[k] = val @@ -79,4 +119,3 @@ def serialize_model(self, handler): m[k] = val return m - diff --git a/src/mistralai/models/listfilesout.py b/src/mistralai/models/listfilesout.py index b6f4dd10..928a7be9 100644 --- a/src/mistralai/models/listfilesout.py +++ b/src/mistralai/models/listfilesout.py @@ -9,9 +9,9 @@ class ListFilesOutTypedDict(TypedDict): data: List[FileSchemaTypedDict] object: str - + class ListFilesOut(BaseModel): data: List[FileSchema] + object: str - diff --git a/src/mistralai/models/metricout.py b/src/mistralai/models/metricout.py index b85cd7d0..99fe9fb0 100644 --- a/src/mistralai/models/metricout.py +++ b/src/mistralai/models/metricout.py @@ -9,19 +9,21 @@ class MetricOutTypedDict(TypedDict): r"""Metrics at the step number during the fine-tuning job. Use these metrics to assess if the training is going smoothly (loss should decrease, token accuracy should increase).""" - + train_loss: NotRequired[Nullable[float]] valid_loss: NotRequired[Nullable[float]] valid_mean_token_accuracy: NotRequired[Nullable[float]] - + class MetricOut(BaseModel): r"""Metrics at the step number during the fine-tuning job. Use these metrics to assess if the training is going smoothly (loss should decrease, token accuracy should increase).""" - + train_loss: OptionalNullable[float] = UNSET + valid_loss: OptionalNullable[float] = UNSET + valid_mean_token_accuracy: OptionalNullable[float] = UNSET - + @model_serializer(mode="wrap") def serialize_model(self, handler): optional_fields = ["train_loss", "valid_loss", "valid_mean_token_accuracy"] @@ -35,9 +37,13 @@ def serialize_model(self, handler): for n, f in self.model_fields.items(): k = f.alias or n val = serialized.get(k) + serialized.pop(k, None) optional_nullable = k in optional_fields and k in nullable_fields - is_set = (self.__pydantic_fields_set__.intersection({n}) or k in null_default_fields) # pylint: disable=no-member + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member if val is not None and val != UNSET_SENTINEL: m[k] = val @@ -47,4 +53,3 @@ def serialize_model(self, handler): m[k] = val return m - diff --git a/src/mistralai/models/modelcapabilities.py b/src/mistralai/models/modelcapabilities.py index c22ce59d..af981cc9 100644 --- a/src/mistralai/models/modelcapabilities.py +++ b/src/mistralai/models/modelcapabilities.py @@ -11,11 +11,16 @@ class ModelCapabilitiesTypedDict(TypedDict): completion_fim: NotRequired[bool] function_calling: NotRequired[bool] fine_tuning: NotRequired[bool] - + vision: NotRequired[bool] + class ModelCapabilities(BaseModel): completion_chat: Optional[bool] = True + completion_fim: Optional[bool] = False + function_calling: Optional[bool] = True + fine_tuning: Optional[bool] = False - + + vision: Optional[bool] = False diff --git a/src/mistralai/models/modellist.py b/src/mistralai/models/modellist.py index 0c76b322..759b9310 100644 --- a/src/mistralai/models/modellist.py +++ b/src/mistralai/models/modellist.py @@ -1,18 +1,32 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from __future__ import annotations -from .modelcard import ModelCard, ModelCardTypedDict +from .basemodelcard import BaseModelCard, BaseModelCardTypedDict +from .ftmodelcard import FTModelCard, FTModelCardTypedDict from mistralai.types import BaseModel -from typing import List, Optional, TypedDict -from typing_extensions import NotRequired +from mistralai.utils import get_discriminator +from pydantic import Discriminator, Tag +from typing import List, Optional, TypedDict, Union +from typing_extensions import Annotated, NotRequired + + +DataTypedDict = Union[BaseModelCardTypedDict, FTModelCardTypedDict] + + +Data = Annotated[ + Union[ + Annotated[BaseModelCard, Tag("base")], Annotated[FTModelCard, Tag("fine-tuned")] + ], + Discriminator(lambda m: get_discriminator(m, "type", "type")), +] class ModelListTypedDict(TypedDict): object: NotRequired[str] - data: NotRequired[List[ModelCardTypedDict]] - + data: NotRequired[List[DataTypedDict]] + class ModelList(BaseModel): object: Optional[str] = "list" - data: Optional[List[ModelCard]] = None - + + data: Optional[List[Data]] = None diff --git a/src/mistralai/models/responseformat.py b/src/mistralai/models/responseformat.py index 0ead91a4..bf538698 100644 --- a/src/mistralai/models/responseformat.py +++ b/src/mistralai/models/responseformat.py @@ -1,18 +1,17 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from __future__ import annotations +from .responseformats import ResponseFormats from mistralai.types import BaseModel -from typing import Literal, Optional, TypedDict +from typing import Optional, TypedDict from typing_extensions import NotRequired -ResponseFormats = Literal["text", "json_object"] -r"""An object specifying the format that the model must output. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message.""" - class ResponseFormatTypedDict(TypedDict): type: NotRequired[ResponseFormats] - + r"""An object specifying the format that the model must output. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message.""" + class ResponseFormat(BaseModel): - type: Optional[ResponseFormats] = "text" - + type: Optional[ResponseFormats] = None + r"""An object specifying the format that the model must output. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message.""" diff --git a/src/mistralai/models/responseformats.py b/src/mistralai/models/responseformats.py new file mode 100644 index 00000000..2c06b812 --- /dev/null +++ b/src/mistralai/models/responseformats.py @@ -0,0 +1,8 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from typing import Literal + + +ResponseFormats = Literal["text", "json_object"] +r"""An object specifying the format that the model must output. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message.""" diff --git a/src/mistralai/models/retrieve_model_v1_models_model_id_getop.py b/src/mistralai/models/retrieve_model_v1_models_model_id_getop.py index 8a37b1ca..37c52c95 100644 --- a/src/mistralai/models/retrieve_model_v1_models_model_id_getop.py +++ b/src/mistralai/models/retrieve_model_v1_models_model_id_getop.py @@ -1,18 +1,37 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from __future__ import annotations +from .basemodelcard import BaseModelCard, BaseModelCardTypedDict +from .ftmodelcard import FTModelCard, FTModelCardTypedDict from mistralai.types import BaseModel -from mistralai.utils import FieldMetadata, PathParamMetadata -from typing import TypedDict +from mistralai.utils import FieldMetadata, PathParamMetadata, get_discriminator +from pydantic import Discriminator, Tag +from typing import TypedDict, Union from typing_extensions import Annotated class RetrieveModelV1ModelsModelIDGetRequestTypedDict(TypedDict): model_id: str r"""The ID of the model to retrieve.""" - + class RetrieveModelV1ModelsModelIDGetRequest(BaseModel): - model_id: Annotated[str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False))] + model_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] r"""The ID of the model to retrieve.""" - + + +RetrieveModelV1ModelsModelIDGetResponseRetrieveModelV1ModelsModelIDGetTypedDict = Union[ + BaseModelCardTypedDict, FTModelCardTypedDict +] +r"""Successful Response""" + + +RetrieveModelV1ModelsModelIDGetResponseRetrieveModelV1ModelsModelIDGet = Annotated[ + Union[ + Annotated[BaseModelCard, Tag("base")], Annotated[FTModelCard, Tag("fine-tuned")] + ], + Discriminator(lambda m: get_discriminator(m, "type", "type")), +] +r"""Successful Response""" diff --git a/src/mistralai/models/retrievefileout.py b/src/mistralai/models/retrievefileout.py index cab3b658..9cc9bb2d 100644 --- a/src/mistralai/models/retrievefileout.py +++ b/src/mistralai/models/retrievefileout.py @@ -3,7 +3,14 @@ from __future__ import annotations from .sampletype import SampleType from .source import Source -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL, UnrecognizedStr +from mistralai.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, + UnrecognizedStr, +) from mistralai.utils import validate_open_enum import pydantic from pydantic import model_serializer @@ -15,6 +22,7 @@ RetrieveFileOutPurpose = Union[Literal["fine-tune"], UnrecognizedStr] r"""The intended purpose of the uploaded file. Only accepts fine-tuning (`fine-tune`) for now.""" + class RetrieveFileOutTypedDict(TypedDict): id: str r"""The unique identifier of the file.""" @@ -29,25 +37,35 @@ class RetrieveFileOutTypedDict(TypedDict): sample_type: SampleType source: Source num_lines: NotRequired[Nullable[int]] - + class RetrieveFileOut(BaseModel): id: str r"""The unique identifier of the file.""" + object: str r"""The object type, which is always \"file\".""" + bytes: int r"""The size of the file, in bytes.""" + created_at: int r"""The UNIX timestamp (in seconds) of the event.""" + filename: str r"""The name of the uploaded file.""" + sample_type: SampleType + source: Source + + # fmt: off PURPOSE: Annotated[Final[Annotated[RetrieveFileOutPurpose, PlainValidator(validate_open_enum(False))]], pydantic.Field(alias="purpose")] = "fine-tune" # type: ignore + # fmt: on r"""The intended purpose of the uploaded file. Only accepts fine-tuning (`fine-tune`) for now.""" + num_lines: OptionalNullable[int] = UNSET - + @model_serializer(mode="wrap") def serialize_model(self, handler): optional_fields = ["num_lines"] @@ -61,9 +79,13 @@ def serialize_model(self, handler): for n, f in self.model_fields.items(): k = f.alias or n val = serialized.get(k) + serialized.pop(k, None) optional_nullable = k in optional_fields and k in nullable_fields - is_set = (self.__pydantic_fields_set__.intersection({n}) or k in null_default_fields) # pylint: disable=no-member + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member if val is not None and val != UNSET_SENTINEL: m[k] = val @@ -73,4 +95,3 @@ def serialize_model(self, handler): m[k] = val return m - diff --git a/src/mistralai/models/security.py b/src/mistralai/models/security.py index 3d69602f..5bd4c7ed 100644 --- a/src/mistralai/models/security.py +++ b/src/mistralai/models/security.py @@ -9,8 +9,17 @@ class SecurityTypedDict(TypedDict): api_key: NotRequired[str] - + class Security(BaseModel): - api_key: Annotated[Optional[str], FieldMetadata(security=SecurityMetadata(scheme=True, scheme_type="http", sub_type="bearer", field_name="Authorization"))] = None - + api_key: Annotated[ + Optional[str], + FieldMetadata( + security=SecurityMetadata( + scheme=True, + scheme_type="http", + sub_type="bearer", + field_name="Authorization", + ) + ), + ] = None diff --git a/src/mistralai/models/systemmessage.py b/src/mistralai/models/systemmessage.py index 91d1a6ed..47dc7781 100644 --- a/src/mistralai/models/systemmessage.py +++ b/src/mistralai/models/systemmessage.py @@ -1,26 +1,27 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from __future__ import annotations -from .contentchunk import ContentChunk, ContentChunkTypedDict +from .textchunk import TextChunk, TextChunkTypedDict from mistralai.types import BaseModel from typing import List, Literal, Optional, TypedDict, Union from typing_extensions import NotRequired -ContentTypedDict = Union[str, List[ContentChunkTypedDict]] +ContentTypedDict = Union[str, List[TextChunkTypedDict]] -Content = Union[str, List[ContentChunk]] +Content = Union[str, List[TextChunk]] Role = Literal["system"] + class SystemMessageTypedDict(TypedDict): content: ContentTypedDict role: NotRequired[Role] - + class SystemMessage(BaseModel): content: Content + role: Optional[Role] = "system" - diff --git a/src/mistralai/models/textchunk.py b/src/mistralai/models/textchunk.py index fd95ab82..9c1f9d7d 100644 --- a/src/mistralai/models/textchunk.py +++ b/src/mistralai/models/textchunk.py @@ -3,15 +3,20 @@ from __future__ import annotations from mistralai.types import BaseModel import pydantic -from typing import Final, Optional, TypedDict +from typing import Final, Literal, Optional, TypedDict from typing_extensions import Annotated +TextChunkType = Literal["text"] + + class TextChunkTypedDict(TypedDict): text: str - + class TextChunk(BaseModel): text: str - TYPE: Annotated[Final[Optional[str]], pydantic.Field(alias="type")] = "text" # type: ignore - + + # fmt: off + TYPE: Annotated[Final[Optional[TextChunkType]], pydantic.Field(alias="type")] = "text" # type: ignore + # fmt: on diff --git a/src/mistralai/models/tool.py b/src/mistralai/models/tool.py index 3a3ccdf8..51295f39 100644 --- a/src/mistralai/models/tool.py +++ b/src/mistralai/models/tool.py @@ -2,21 +2,22 @@ from __future__ import annotations from .function import Function, FunctionTypedDict -from mistralai.types import BaseModel, UnrecognizedStr +from .tooltypes import ToolTypes +from mistralai.types import BaseModel from mistralai.utils import validate_open_enum from pydantic.functional_validators import PlainValidator -from typing import Literal, Optional, TypedDict, Union +from typing import Optional, TypedDict from typing_extensions import Annotated, NotRequired -ToolToolTypes = Union[Literal["function"], UnrecognizedStr] - class ToolTypedDict(TypedDict): function: FunctionTypedDict - type: NotRequired[ToolToolTypes] - + type: NotRequired[ToolTypes] + class Tool(BaseModel): function: Function - type: Annotated[Optional[ToolToolTypes], PlainValidator(validate_open_enum(False))] = "function" - + + type: Annotated[Optional[ToolTypes], PlainValidator(validate_open_enum(False))] = ( + None + ) diff --git a/src/mistralai/models/toolcall.py b/src/mistralai/models/toolcall.py index 4842aff2..66d570e6 100644 --- a/src/mistralai/models/toolcall.py +++ b/src/mistralai/models/toolcall.py @@ -2,23 +2,25 @@ from __future__ import annotations from .functioncall import FunctionCall, FunctionCallTypedDict -from mistralai.types import BaseModel, UnrecognizedStr +from .tooltypes import ToolTypes +from mistralai.types import BaseModel from mistralai.utils import validate_open_enum from pydantic.functional_validators import PlainValidator -from typing import Literal, Optional, TypedDict, Union +from typing import Optional, TypedDict from typing_extensions import Annotated, NotRequired -ToolTypes = Union[Literal["function"], UnrecognizedStr] - class ToolCallTypedDict(TypedDict): function: FunctionCallTypedDict id: NotRequired[str] type: NotRequired[ToolTypes] - + class ToolCall(BaseModel): function: FunctionCall + id: Optional[str] = "null" - type: Annotated[Optional[ToolTypes], PlainValidator(validate_open_enum(False))] = "function" - + + type: Annotated[Optional[ToolTypes], PlainValidator(validate_open_enum(False))] = ( + None + ) diff --git a/src/mistralai/models/toolchoice.py b/src/mistralai/models/toolchoice.py new file mode 100644 index 00000000..fc36512a --- /dev/null +++ b/src/mistralai/models/toolchoice.py @@ -0,0 +1,29 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .functionname import FunctionName, FunctionNameTypedDict +from .tooltypes import ToolTypes +from mistralai.types import BaseModel +from mistralai.utils import validate_open_enum +from pydantic.functional_validators import PlainValidator +from typing import Optional, TypedDict +from typing_extensions import Annotated, NotRequired + + +class ToolChoiceTypedDict(TypedDict): + r"""ToolChoice is either a ToolChoiceEnum or a ToolChoice""" + + function: FunctionNameTypedDict + r"""this restriction of `Function` is used to select a specific function to call""" + type: NotRequired[ToolTypes] + + +class ToolChoice(BaseModel): + r"""ToolChoice is either a ToolChoiceEnum or a ToolChoice""" + + function: FunctionName + r"""this restriction of `Function` is used to select a specific function to call""" + + type: Annotated[Optional[ToolTypes], PlainValidator(validate_open_enum(False))] = ( + None + ) diff --git a/src/mistralai/models/toolchoiceenum.py b/src/mistralai/models/toolchoiceenum.py new file mode 100644 index 00000000..8e6a6ad8 --- /dev/null +++ b/src/mistralai/models/toolchoiceenum.py @@ -0,0 +1,7 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from typing import Literal + + +ToolChoiceEnum = Literal["auto", "none", "any", "required"] diff --git a/src/mistralai/models/toolmessage.py b/src/mistralai/models/toolmessage.py index 8445861a..3c4be635 100644 --- a/src/mistralai/models/toolmessage.py +++ b/src/mistralai/models/toolmessage.py @@ -9,19 +9,23 @@ ToolMessageRole = Literal["tool"] + class ToolMessageTypedDict(TypedDict): content: str tool_call_id: NotRequired[Nullable[str]] name: NotRequired[Nullable[str]] role: NotRequired[ToolMessageRole] - + class ToolMessage(BaseModel): content: str + tool_call_id: OptionalNullable[str] = UNSET + name: OptionalNullable[str] = UNSET + role: Optional[ToolMessageRole] = "tool" - + @model_serializer(mode="wrap") def serialize_model(self, handler): optional_fields = ["tool_call_id", "name", "role"] @@ -35,9 +39,13 @@ def serialize_model(self, handler): for n, f in self.model_fields.items(): k = f.alias or n val = serialized.get(k) + serialized.pop(k, None) optional_nullable = k in optional_fields and k in nullable_fields - is_set = (self.__pydantic_fields_set__.intersection({n}) or k in null_default_fields) # pylint: disable=no-member + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member if val is not None and val != UNSET_SENTINEL: m[k] = val @@ -47,4 +55,3 @@ def serialize_model(self, handler): m[k] = val return m - diff --git a/src/mistralai/models/tooltypes.py b/src/mistralai/models/tooltypes.py new file mode 100644 index 00000000..fb581820 --- /dev/null +++ b/src/mistralai/models/tooltypes.py @@ -0,0 +1,8 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.types import UnrecognizedStr +from typing import Literal, Union + + +ToolTypes = Union[Literal["function"], UnrecognizedStr] diff --git a/src/mistralai/models/trainingfile.py b/src/mistralai/models/trainingfile.py index 097ea174..1917d377 100644 --- a/src/mistralai/models/trainingfile.py +++ b/src/mistralai/models/trainingfile.py @@ -9,9 +9,9 @@ class TrainingFileTypedDict(TypedDict): file_id: str weight: NotRequired[float] - + class TrainingFile(BaseModel): file_id: str + weight: Optional[float] = 1 - diff --git a/src/mistralai/models/trainingparameters.py b/src/mistralai/models/trainingparameters.py index dcbb3949..885f3ff5 100644 --- a/src/mistralai/models/trainingparameters.py +++ b/src/mistralai/models/trainingparameters.py @@ -14,20 +14,38 @@ class TrainingParametersTypedDict(TypedDict): warmup_fraction: NotRequired[Nullable[float]] epochs: NotRequired[Nullable[float]] fim_ratio: NotRequired[Nullable[float]] - + class TrainingParameters(BaseModel): training_steps: OptionalNullable[int] = UNSET + learning_rate: Optional[float] = 0.0001 + weight_decay: OptionalNullable[float] = UNSET + warmup_fraction: OptionalNullable[float] = UNSET + epochs: OptionalNullable[float] = UNSET + fim_ratio: OptionalNullable[float] = UNSET - + @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = ["training_steps", "learning_rate", "weight_decay", "warmup_fraction", "epochs", "fim_ratio"] - nullable_fields = ["training_steps", "weight_decay", "warmup_fraction", "epochs", "fim_ratio"] + optional_fields = [ + "training_steps", + "learning_rate", + "weight_decay", + "warmup_fraction", + "epochs", + "fim_ratio", + ] + nullable_fields = [ + "training_steps", + "weight_decay", + "warmup_fraction", + "epochs", + "fim_ratio", + ] null_default_fields = [] serialized = handler(self) @@ -37,9 +55,13 @@ def serialize_model(self, handler): for n, f in self.model_fields.items(): k = f.alias or n val = serialized.get(k) + serialized.pop(k, None) optional_nullable = k in optional_fields and k in nullable_fields - is_set = (self.__pydantic_fields_set__.intersection({n}) or k in null_default_fields) # pylint: disable=no-member + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member if val is not None and val != UNSET_SENTINEL: m[k] = val @@ -49,4 +71,3 @@ def serialize_model(self, handler): m[k] = val return m - diff --git a/src/mistralai/models/trainingparametersin.py b/src/mistralai/models/trainingparametersin.py index f4ec585d..8ecb027b 100644 --- a/src/mistralai/models/trainingparametersin.py +++ b/src/mistralai/models/trainingparametersin.py @@ -9,7 +9,7 @@ class TrainingParametersInTypedDict(TypedDict): r"""The fine-tuning hyperparameter settings used in a fine-tune job.""" - + training_steps: NotRequired[Nullable[int]] r"""The number of training steps to perform. A training step refers to a single update of the model weights during the fine-tuning process. This update is typically calculated using a batch of samples from the training dataset.""" learning_rate: NotRequired[float] @@ -20,26 +20,44 @@ class TrainingParametersInTypedDict(TypedDict): r"""(Advanced Usage) A parameter that specifies the percentage of the total training steps at which the learning rate warm-up phase ends. During this phase, the learning rate gradually increases from a small value to the initial learning rate, helping to stabilize the training process and improve convergence. Similar to `pct_start` in [mistral-finetune](https://github.com/mistralai/mistral-finetune)""" epochs: NotRequired[Nullable[float]] fim_ratio: NotRequired[Nullable[float]] - + class TrainingParametersIn(BaseModel): r"""The fine-tuning hyperparameter settings used in a fine-tune job.""" - + training_steps: OptionalNullable[int] = UNSET r"""The number of training steps to perform. A training step refers to a single update of the model weights during the fine-tuning process. This update is typically calculated using a batch of samples from the training dataset.""" + learning_rate: Optional[float] = 0.0001 r"""A parameter describing how much to adjust the pre-trained model's weights in response to the estimated error each time the weights are updated during the fine-tuning process.""" + weight_decay: OptionalNullable[float] = UNSET r"""(Advanced Usage) Weight decay adds a term to the loss function that is proportional to the sum of the squared weights. This term reduces the magnitude of the weights and prevents them from growing too large.""" + warmup_fraction: OptionalNullable[float] = UNSET r"""(Advanced Usage) A parameter that specifies the percentage of the total training steps at which the learning rate warm-up phase ends. During this phase, the learning rate gradually increases from a small value to the initial learning rate, helping to stabilize the training process and improve convergence. Similar to `pct_start` in [mistral-finetune](https://github.com/mistralai/mistral-finetune)""" + epochs: OptionalNullable[float] = UNSET + fim_ratio: OptionalNullable[float] = UNSET - + @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = ["training_steps", "learning_rate", "weight_decay", "warmup_fraction", "epochs", "fim_ratio"] - nullable_fields = ["training_steps", "weight_decay", "warmup_fraction", "epochs", "fim_ratio"] + optional_fields = [ + "training_steps", + "learning_rate", + "weight_decay", + "warmup_fraction", + "epochs", + "fim_ratio", + ] + nullable_fields = [ + "training_steps", + "weight_decay", + "warmup_fraction", + "epochs", + "fim_ratio", + ] null_default_fields = [] serialized = handler(self) @@ -49,9 +67,13 @@ def serialize_model(self, handler): for n, f in self.model_fields.items(): k = f.alias or n val = serialized.get(k) + serialized.pop(k, None) optional_nullable = k in optional_fields and k in nullable_fields - is_set = (self.__pydantic_fields_set__.intersection({n}) or k in null_default_fields) # pylint: disable=no-member + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member if val is not None and val != UNSET_SENTINEL: m[k] = val @@ -61,4 +83,3 @@ def serialize_model(self, handler): m[k] = val return m - diff --git a/src/mistralai/models/unarchiveftmodelout.py b/src/mistralai/models/unarchiveftmodelout.py index 7391df2c..6eac8200 100644 --- a/src/mistralai/models/unarchiveftmodelout.py +++ b/src/mistralai/models/unarchiveftmodelout.py @@ -9,13 +9,17 @@ UnarchiveFTModelOutObject = Literal["model"] + class UnarchiveFTModelOutTypedDict(TypedDict): id: str archived: NotRequired[bool] - + class UnarchiveFTModelOut(BaseModel): id: str + + # fmt: off OBJECT: Annotated[Final[Optional[UnarchiveFTModelOutObject]], pydantic.Field(alias="object")] = "model" # type: ignore + # fmt: on + archived: Optional[bool] = False - diff --git a/src/mistralai/models/updateftmodelin.py b/src/mistralai/models/updateftmodelin.py index 8c3d8475..c22c5115 100644 --- a/src/mistralai/models/updateftmodelin.py +++ b/src/mistralai/models/updateftmodelin.py @@ -10,12 +10,13 @@ class UpdateFTModelInTypedDict(TypedDict): name: NotRequired[Nullable[str]] description: NotRequired[Nullable[str]] - + class UpdateFTModelIn(BaseModel): name: OptionalNullable[str] = UNSET + description: OptionalNullable[str] = UNSET - + @model_serializer(mode="wrap") def serialize_model(self, handler): optional_fields = ["name", "description"] @@ -29,9 +30,13 @@ def serialize_model(self, handler): for n, f in self.model_fields.items(): k = f.alias or n val = serialized.get(k) + serialized.pop(k, None) optional_nullable = k in optional_fields and k in nullable_fields - is_set = (self.__pydantic_fields_set__.intersection({n}) or k in null_default_fields) # pylint: disable=no-member + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member if val is not None and val != UNSET_SENTINEL: m[k] = val @@ -41,4 +46,3 @@ def serialize_model(self, handler): m[k] = val return m - diff --git a/src/mistralai/models/uploadfileout.py b/src/mistralai/models/uploadfileout.py index dce8d0f2..7754ae3d 100644 --- a/src/mistralai/models/uploadfileout.py +++ b/src/mistralai/models/uploadfileout.py @@ -3,7 +3,14 @@ from __future__ import annotations from .sampletype import SampleType from .source import Source -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL, UnrecognizedStr +from mistralai.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, + UnrecognizedStr, +) from mistralai.utils import validate_open_enum import pydantic from pydantic import model_serializer @@ -15,6 +22,7 @@ Purpose = Union[Literal["fine-tune"], UnrecognizedStr] r"""The intended purpose of the uploaded file. Only accepts fine-tuning (`fine-tune`) for now.""" + class UploadFileOutTypedDict(TypedDict): id: str r"""The unique identifier of the file.""" @@ -29,25 +37,35 @@ class UploadFileOutTypedDict(TypedDict): sample_type: SampleType source: Source num_lines: NotRequired[Nullable[int]] - + class UploadFileOut(BaseModel): id: str r"""The unique identifier of the file.""" + object: str r"""The object type, which is always \"file\".""" + bytes: int r"""The size of the file, in bytes.""" + created_at: int r"""The UNIX timestamp (in seconds) of the event.""" + filename: str r"""The name of the uploaded file.""" + sample_type: SampleType + source: Source + + # fmt: off PURPOSE: Annotated[Final[Annotated[Purpose, PlainValidator(validate_open_enum(False))]], pydantic.Field(alias="purpose")] = "fine-tune" # type: ignore + # fmt: on r"""The intended purpose of the uploaded file. Only accepts fine-tuning (`fine-tune`) for now.""" + num_lines: OptionalNullable[int] = UNSET - + @model_serializer(mode="wrap") def serialize_model(self, handler): optional_fields = ["num_lines"] @@ -61,9 +79,13 @@ def serialize_model(self, handler): for n, f in self.model_fields.items(): k = f.alias or n val = serialized.get(k) + serialized.pop(k, None) optional_nullable = k in optional_fields and k in nullable_fields - is_set = (self.__pydantic_fields_set__.intersection({n}) or k in null_default_fields) # pylint: disable=no-member + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member if val is not None and val != UNSET_SENTINEL: m[k] = val @@ -73,4 +95,3 @@ def serialize_model(self, handler): m[k] = val return m - diff --git a/src/mistralai/models/usageinfo.py b/src/mistralai/models/usageinfo.py index 153ab6b5..e8113e3b 100644 --- a/src/mistralai/models/usageinfo.py +++ b/src/mistralai/models/usageinfo.py @@ -9,10 +9,11 @@ class UsageInfoTypedDict(TypedDict): prompt_tokens: int completion_tokens: int total_tokens: int - + class UsageInfo(BaseModel): prompt_tokens: int + completion_tokens: int + total_tokens: int - diff --git a/src/mistralai/models/usermessage.py b/src/mistralai/models/usermessage.py index a1749ec8..db4176ad 100644 --- a/src/mistralai/models/usermessage.py +++ b/src/mistralai/models/usermessage.py @@ -1,26 +1,27 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from __future__ import annotations -from .textchunk import TextChunk, TextChunkTypedDict +from .contentchunk import ContentChunk, ContentChunkTypedDict from mistralai.types import BaseModel from typing import List, Literal, Optional, TypedDict, Union from typing_extensions import NotRequired -UserMessageContentTypedDict = Union[str, List[TextChunkTypedDict]] +UserMessageContentTypedDict = Union[str, List[ContentChunkTypedDict]] -UserMessageContent = Union[str, List[TextChunk]] +UserMessageContent = Union[str, List[ContentChunk]] UserMessageRole = Literal["user"] + class UserMessageTypedDict(TypedDict): content: UserMessageContentTypedDict role: NotRequired[UserMessageRole] - + class UserMessage(BaseModel): content: UserMessageContent + role: Optional[UserMessageRole] = "user" - diff --git a/src/mistralai/models/validationerror.py b/src/mistralai/models/validationerror.py index 42b9af48..ed394a60 100644 --- a/src/mistralai/models/validationerror.py +++ b/src/mistralai/models/validationerror.py @@ -15,10 +15,11 @@ class ValidationErrorTypedDict(TypedDict): loc: List[LocTypedDict] msg: str type: str - + class ValidationError(BaseModel): loc: List[Loc] + msg: str + type: str - diff --git a/src/mistralai/models/wandbintegration.py b/src/mistralai/models/wandbintegration.py index 2a86caa3..7659e274 100644 --- a/src/mistralai/models/wandbintegration.py +++ b/src/mistralai/models/wandbintegration.py @@ -10,6 +10,7 @@ WandbIntegrationType = Literal["wandb"] + class WandbIntegrationTypedDict(TypedDict): project: str r"""The name of the project that the new run will be created under.""" @@ -18,18 +19,24 @@ class WandbIntegrationTypedDict(TypedDict): name: NotRequired[Nullable[str]] r"""A display name to set for the run. If not set, will use the job ID as the name.""" run_name: NotRequired[Nullable[str]] - + class WandbIntegration(BaseModel): project: str r"""The name of the project that the new run will be created under.""" + api_key: str r"""The WandB API key to use for authentication.""" + + # fmt: off TYPE: Annotated[Final[Optional[WandbIntegrationType]], pydantic.Field(alias="type")] = "wandb" # type: ignore + # fmt: on + name: OptionalNullable[str] = UNSET r"""A display name to set for the run. If not set, will use the job ID as the name.""" + run_name: OptionalNullable[str] = UNSET - + @model_serializer(mode="wrap") def serialize_model(self, handler): optional_fields = ["type", "name", "run_name"] @@ -43,9 +50,13 @@ def serialize_model(self, handler): for n, f in self.model_fields.items(): k = f.alias or n val = serialized.get(k) + serialized.pop(k, None) optional_nullable = k in optional_fields and k in nullable_fields - is_set = (self.__pydantic_fields_set__.intersection({n}) or k in null_default_fields) # pylint: disable=no-member + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member if val is not None and val != UNSET_SENTINEL: m[k] = val @@ -55,4 +66,3 @@ def serialize_model(self, handler): m[k] = val return m - diff --git a/src/mistralai/models/wandbintegrationout.py b/src/mistralai/models/wandbintegrationout.py index f6e185a9..5635af79 100644 --- a/src/mistralai/models/wandbintegrationout.py +++ b/src/mistralai/models/wandbintegrationout.py @@ -10,22 +10,28 @@ Type = Literal["wandb"] + class WandbIntegrationOutTypedDict(TypedDict): project: str r"""The name of the project that the new run will be created under.""" name: NotRequired[Nullable[str]] r"""A display name to set for the run. If not set, will use the job ID as the name.""" run_name: NotRequired[Nullable[str]] - + class WandbIntegrationOut(BaseModel): project: str r"""The name of the project that the new run will be created under.""" + + # fmt: off TYPE: Annotated[Final[Optional[Type]], pydantic.Field(alias="type")] = "wandb" # type: ignore + # fmt: on + name: OptionalNullable[str] = UNSET r"""A display name to set for the run. If not set, will use the job ID as the name.""" + run_name: OptionalNullable[str] = UNSET - + @model_serializer(mode="wrap") def serialize_model(self, handler): optional_fields = ["type", "name", "run_name"] @@ -39,9 +45,13 @@ def serialize_model(self, handler): for n, f in self.model_fields.items(): k = f.alias or n val = serialized.get(k) + serialized.pop(k, None) optional_nullable = k in optional_fields and k in nullable_fields - is_set = (self.__pydantic_fields_set__.intersection({n}) or k in null_default_fields) # pylint: disable=no-member + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member if val is not None and val != UNSET_SENTINEL: m[k] = val @@ -51,4 +61,3 @@ def serialize_model(self, handler): m[k] = val return m - diff --git a/src/mistralai/models_.py b/src/mistralai/models_.py index aba57826..32fdcbce 100644 --- a/src/mistralai/models_.py +++ b/src/mistralai/models_.py @@ -7,12 +7,13 @@ from mistralai.utils import get_security_from_env from typing import Any, Optional + class Models(BaseSDK): r"""Model Management API""" - - + def list( - self, *, + self, + *, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -29,7 +30,7 @@ def list( url_variables = None if timeout_ms is None: timeout_ms = self.sdk_configuration.timeout_ms - + if server_url is not None: base_url = server_url req = self.build_request( @@ -46,44 +47,50 @@ def list( security=self.sdk_configuration.security, timeout_ms=timeout_ms, ) - + if retries == UNSET: if self.sdk_configuration.retry_config is not UNSET: retries = self.sdk_configuration.retry_config retry_config = None if isinstance(retries, utils.RetryConfig): - retry_config = (retries, [ - "429", - "500", - "502", - "503", - "504" - ]) - + retry_config = (retries, ["429", "500", "502", "503", "504"]) + http_res = self.do_request( - hook_ctx=HookContext(operation_id="list_models_v1_models_get", oauth2_scopes=[], security_source=get_security_from_env(self.sdk_configuration.security, models.Security)), + hook_ctx=HookContext( + operation_id="list_models_v1_models_get", + oauth2_scopes=[], + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), request=req, - error_status_codes=["422","4XX","5XX"], - retry_config=retry_config + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, ) - + data: Any = None if utils.match_response(http_res, "200", "application/json"): return utils.unmarshal_json(http_res.text, Optional[models.ModelList]) if utils.match_response(http_res, "422", "application/json"): data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) raise models.HTTPValidationError(data=data) - if utils.match_response(http_res, ["4XX","5XX"], "*"): - raise models.SDKError("API error occurred", http_res.status_code, http_res.text, http_res) - + if utils.match_response(http_res, ["4XX", "5XX"], "*"): + raise models.SDKError( + "API error occurred", http_res.status_code, http_res.text, http_res + ) + content_type = http_res.headers.get("Content-Type") - raise models.SDKError(f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, http_res.text, http_res) + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res.text, + http_res, + ) - - async def list_async( - self, *, + self, + *, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -100,10 +107,10 @@ async def list_async( url_variables = None if timeout_ms is None: timeout_ms = self.sdk_configuration.timeout_ms - + if server_url is not None: base_url = server_url - req = self.build_request( + req = self.build_request_async( method="GET", path="/v1/models", base_url=base_url, @@ -117,49 +124,57 @@ async def list_async( security=self.sdk_configuration.security, timeout_ms=timeout_ms, ) - + if retries == UNSET: if self.sdk_configuration.retry_config is not UNSET: retries = self.sdk_configuration.retry_config retry_config = None if isinstance(retries, utils.RetryConfig): - retry_config = (retries, [ - "429", - "500", - "502", - "503", - "504" - ]) - + retry_config = (retries, ["429", "500", "502", "503", "504"]) + http_res = await self.do_request_async( - hook_ctx=HookContext(operation_id="list_models_v1_models_get", oauth2_scopes=[], security_source=get_security_from_env(self.sdk_configuration.security, models.Security)), + hook_ctx=HookContext( + operation_id="list_models_v1_models_get", + oauth2_scopes=[], + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), request=req, - error_status_codes=["422","4XX","5XX"], - retry_config=retry_config + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, ) - + data: Any = None if utils.match_response(http_res, "200", "application/json"): return utils.unmarshal_json(http_res.text, Optional[models.ModelList]) if utils.match_response(http_res, "422", "application/json"): data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) raise models.HTTPValidationError(data=data) - if utils.match_response(http_res, ["4XX","5XX"], "*"): - raise models.SDKError("API error occurred", http_res.status_code, http_res.text, http_res) - + if utils.match_response(http_res, ["4XX", "5XX"], "*"): + raise models.SDKError( + "API error occurred", http_res.status_code, http_res.text, http_res + ) + content_type = http_res.headers.get("Content-Type") - raise models.SDKError(f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, http_res.text, http_res) + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res.text, + http_res, + ) - - def retrieve( - self, *, + self, + *, model_id: str, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, - ) -> Optional[models.ModelCard]: + ) -> Optional[ + models.RetrieveModelV1ModelsModelIDGetResponseRetrieveModelV1ModelsModelIDGet + ]: r"""Retrieve Model Retrieve a model information. @@ -173,14 +188,14 @@ def retrieve( url_variables = None if timeout_ms is None: timeout_ms = self.sdk_configuration.timeout_ms - + if server_url is not None: base_url = server_url - + request = models.RetrieveModelV1ModelsModelIDGetRequest( model_id=model_id, ) - + req = self.build_request( method="GET", path="/v1/models/{model_id}", @@ -195,49 +210,62 @@ def retrieve( security=self.sdk_configuration.security, timeout_ms=timeout_ms, ) - + if retries == UNSET: if self.sdk_configuration.retry_config is not UNSET: retries = self.sdk_configuration.retry_config retry_config = None if isinstance(retries, utils.RetryConfig): - retry_config = (retries, [ - "429", - "500", - "502", - "503", - "504" - ]) - + retry_config = (retries, ["429", "500", "502", "503", "504"]) + http_res = self.do_request( - hook_ctx=HookContext(operation_id="retrieve_model_v1_models__model_id__get", oauth2_scopes=[], security_source=get_security_from_env(self.sdk_configuration.security, models.Security)), + hook_ctx=HookContext( + operation_id="retrieve_model_v1_models__model_id__get", + oauth2_scopes=[], + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), request=req, - error_status_codes=["422","4XX","5XX"], - retry_config=retry_config + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, ) - + data: Any = None if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, Optional[models.ModelCard]) + return utils.unmarshal_json( + http_res.text, + Optional[ + models.RetrieveModelV1ModelsModelIDGetResponseRetrieveModelV1ModelsModelIDGet + ], + ) if utils.match_response(http_res, "422", "application/json"): data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) raise models.HTTPValidationError(data=data) - if utils.match_response(http_res, ["4XX","5XX"], "*"): - raise models.SDKError("API error occurred", http_res.status_code, http_res.text, http_res) - + if utils.match_response(http_res, ["4XX", "5XX"], "*"): + raise models.SDKError( + "API error occurred", http_res.status_code, http_res.text, http_res + ) + content_type = http_res.headers.get("Content-Type") - raise models.SDKError(f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, http_res.text, http_res) + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res.text, + http_res, + ) - - async def retrieve_async( - self, *, + self, + *, model_id: str, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, - ) -> Optional[models.ModelCard]: + ) -> Optional[ + models.RetrieveModelV1ModelsModelIDGetResponseRetrieveModelV1ModelsModelIDGet + ]: r"""Retrieve Model Retrieve a model information. @@ -251,15 +279,15 @@ async def retrieve_async( url_variables = None if timeout_ms is None: timeout_ms = self.sdk_configuration.timeout_ms - + if server_url is not None: base_url = server_url - + request = models.RetrieveModelV1ModelsModelIDGetRequest( model_id=model_id, ) - - req = self.build_request( + + req = self.build_request_async( method="GET", path="/v1/models/{model_id}", base_url=base_url, @@ -273,44 +301,55 @@ async def retrieve_async( security=self.sdk_configuration.security, timeout_ms=timeout_ms, ) - + if retries == UNSET: if self.sdk_configuration.retry_config is not UNSET: retries = self.sdk_configuration.retry_config retry_config = None if isinstance(retries, utils.RetryConfig): - retry_config = (retries, [ - "429", - "500", - "502", - "503", - "504" - ]) - + retry_config = (retries, ["429", "500", "502", "503", "504"]) + http_res = await self.do_request_async( - hook_ctx=HookContext(operation_id="retrieve_model_v1_models__model_id__get", oauth2_scopes=[], security_source=get_security_from_env(self.sdk_configuration.security, models.Security)), + hook_ctx=HookContext( + operation_id="retrieve_model_v1_models__model_id__get", + oauth2_scopes=[], + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), request=req, - error_status_codes=["422","4XX","5XX"], - retry_config=retry_config + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, ) - + data: Any = None if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, Optional[models.ModelCard]) + return utils.unmarshal_json( + http_res.text, + Optional[ + models.RetrieveModelV1ModelsModelIDGetResponseRetrieveModelV1ModelsModelIDGet + ], + ) if utils.match_response(http_res, "422", "application/json"): data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) raise models.HTTPValidationError(data=data) - if utils.match_response(http_res, ["4XX","5XX"], "*"): - raise models.SDKError("API error occurred", http_res.status_code, http_res.text, http_res) - + if utils.match_response(http_res, ["4XX", "5XX"], "*"): + raise models.SDKError( + "API error occurred", http_res.status_code, http_res.text, http_res + ) + content_type = http_res.headers.get("Content-Type") - raise models.SDKError(f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, http_res.text, http_res) + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res.text, + http_res, + ) - - def delete( - self, *, + self, + *, model_id: str, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, @@ -329,14 +368,14 @@ def delete( url_variables = None if timeout_ms is None: timeout_ms = self.sdk_configuration.timeout_ms - + if server_url is not None: base_url = server_url - + request = models.DeleteModelV1ModelsModelIDDeleteRequest( model_id=model_id, ) - + req = self.build_request( method="DELETE", path="/v1/models/{model_id}", @@ -351,44 +390,50 @@ def delete( security=self.sdk_configuration.security, timeout_ms=timeout_ms, ) - + if retries == UNSET: if self.sdk_configuration.retry_config is not UNSET: retries = self.sdk_configuration.retry_config retry_config = None if isinstance(retries, utils.RetryConfig): - retry_config = (retries, [ - "429", - "500", - "502", - "503", - "504" - ]) - + retry_config = (retries, ["429", "500", "502", "503", "504"]) + http_res = self.do_request( - hook_ctx=HookContext(operation_id="delete_model_v1_models__model_id__delete", oauth2_scopes=[], security_source=get_security_from_env(self.sdk_configuration.security, models.Security)), + hook_ctx=HookContext( + operation_id="delete_model_v1_models__model_id__delete", + oauth2_scopes=[], + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), request=req, - error_status_codes=["422","4XX","5XX"], - retry_config=retry_config + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, ) - + data: Any = None if utils.match_response(http_res, "200", "application/json"): return utils.unmarshal_json(http_res.text, Optional[models.DeleteModelOut]) if utils.match_response(http_res, "422", "application/json"): data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) raise models.HTTPValidationError(data=data) - if utils.match_response(http_res, ["4XX","5XX"], "*"): - raise models.SDKError("API error occurred", http_res.status_code, http_res.text, http_res) - + if utils.match_response(http_res, ["4XX", "5XX"], "*"): + raise models.SDKError( + "API error occurred", http_res.status_code, http_res.text, http_res + ) + content_type = http_res.headers.get("Content-Type") - raise models.SDKError(f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, http_res.text, http_res) + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res.text, + http_res, + ) - - async def delete_async( - self, *, + self, + *, model_id: str, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, @@ -407,15 +452,15 @@ async def delete_async( url_variables = None if timeout_ms is None: timeout_ms = self.sdk_configuration.timeout_ms - + if server_url is not None: base_url = server_url - + request = models.DeleteModelV1ModelsModelIDDeleteRequest( model_id=model_id, ) - - req = self.build_request( + + req = self.build_request_async( method="DELETE", path="/v1/models/{model_id}", base_url=base_url, @@ -429,44 +474,50 @@ async def delete_async( security=self.sdk_configuration.security, timeout_ms=timeout_ms, ) - + if retries == UNSET: if self.sdk_configuration.retry_config is not UNSET: retries = self.sdk_configuration.retry_config retry_config = None if isinstance(retries, utils.RetryConfig): - retry_config = (retries, [ - "429", - "500", - "502", - "503", - "504" - ]) - + retry_config = (retries, ["429", "500", "502", "503", "504"]) + http_res = await self.do_request_async( - hook_ctx=HookContext(operation_id="delete_model_v1_models__model_id__delete", oauth2_scopes=[], security_source=get_security_from_env(self.sdk_configuration.security, models.Security)), + hook_ctx=HookContext( + operation_id="delete_model_v1_models__model_id__delete", + oauth2_scopes=[], + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), request=req, - error_status_codes=["422","4XX","5XX"], - retry_config=retry_config + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, ) - + data: Any = None if utils.match_response(http_res, "200", "application/json"): return utils.unmarshal_json(http_res.text, Optional[models.DeleteModelOut]) if utils.match_response(http_res, "422", "application/json"): data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) raise models.HTTPValidationError(data=data) - if utils.match_response(http_res, ["4XX","5XX"], "*"): - raise models.SDKError("API error occurred", http_res.status_code, http_res.text, http_res) - + if utils.match_response(http_res, ["4XX", "5XX"], "*"): + raise models.SDKError( + "API error occurred", http_res.status_code, http_res.text, http_res + ) + content_type = http_res.headers.get("Content-Type") - raise models.SDKError(f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, http_res.text, http_res) + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res.text, + http_res, + ) - - def update( - self, *, + self, + *, model_id: str, name: OptionalNullable[str] = UNSET, description: OptionalNullable[str] = UNSET, @@ -479,8 +530,8 @@ def update( Update a model name or description. :param model_id: The ID of the model to update. - :param name: - :param description: + :param name: + :param description: :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -489,10 +540,10 @@ def update( url_variables = None if timeout_ms is None: timeout_ms = self.sdk_configuration.timeout_ms - + if server_url is not None: base_url = server_url - + request = models.JobsAPIRoutesFineTuningUpdateFineTunedModelRequest( model_id=model_id, update_ft_model_in=models.UpdateFTModelIn( @@ -500,7 +551,7 @@ def update( description=description, ), ) - + req = self.build_request( method="PATCH", path="/v1/fine_tuning/models/{model_id}", @@ -513,43 +564,51 @@ def update( user_agent_header="user-agent", accept_header_value="application/json", security=self.sdk_configuration.security, - get_serialized_body=lambda: utils.serialize_request_body(request.update_ft_model_in, False, False, "json", models.UpdateFTModelIn), + get_serialized_body=lambda: utils.serialize_request_body( + request.update_ft_model_in, False, False, "json", models.UpdateFTModelIn + ), timeout_ms=timeout_ms, ) - + if retries == UNSET: if self.sdk_configuration.retry_config is not UNSET: retries = self.sdk_configuration.retry_config retry_config = None if isinstance(retries, utils.RetryConfig): - retry_config = (retries, [ - "429", - "500", - "502", - "503", - "504" - ]) - + retry_config = (retries, ["429", "500", "502", "503", "504"]) + http_res = self.do_request( - hook_ctx=HookContext(operation_id="jobs_api_routes_fine_tuning_update_fine_tuned_model", oauth2_scopes=[], security_source=get_security_from_env(self.sdk_configuration.security, models.Security)), + hook_ctx=HookContext( + operation_id="jobs_api_routes_fine_tuning_update_fine_tuned_model", + oauth2_scopes=[], + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), request=req, - error_status_codes=["4XX","5XX"], - retry_config=retry_config + error_status_codes=["4XX", "5XX"], + retry_config=retry_config, ) - + if utils.match_response(http_res, "200", "application/json"): return utils.unmarshal_json(http_res.text, Optional[models.FTModelOut]) - if utils.match_response(http_res, ["4XX","5XX"], "*"): - raise models.SDKError("API error occurred", http_res.status_code, http_res.text, http_res) - + if utils.match_response(http_res, ["4XX", "5XX"], "*"): + raise models.SDKError( + "API error occurred", http_res.status_code, http_res.text, http_res + ) + content_type = http_res.headers.get("Content-Type") - raise models.SDKError(f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, http_res.text, http_res) + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res.text, + http_res, + ) - - async def update_async( - self, *, + self, + *, model_id: str, name: OptionalNullable[str] = UNSET, description: OptionalNullable[str] = UNSET, @@ -562,8 +621,8 @@ async def update_async( Update a model name or description. :param model_id: The ID of the model to update. - :param name: - :param description: + :param name: + :param description: :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -572,10 +631,10 @@ async def update_async( url_variables = None if timeout_ms is None: timeout_ms = self.sdk_configuration.timeout_ms - + if server_url is not None: base_url = server_url - + request = models.JobsAPIRoutesFineTuningUpdateFineTunedModelRequest( model_id=model_id, update_ft_model_in=models.UpdateFTModelIn( @@ -583,8 +642,8 @@ async def update_async( description=description, ), ) - - req = self.build_request( + + req = self.build_request_async( method="PATCH", path="/v1/fine_tuning/models/{model_id}", base_url=base_url, @@ -596,43 +655,51 @@ async def update_async( user_agent_header="user-agent", accept_header_value="application/json", security=self.sdk_configuration.security, - get_serialized_body=lambda: utils.serialize_request_body(request.update_ft_model_in, False, False, "json", models.UpdateFTModelIn), + get_serialized_body=lambda: utils.serialize_request_body( + request.update_ft_model_in, False, False, "json", models.UpdateFTModelIn + ), timeout_ms=timeout_ms, ) - + if retries == UNSET: if self.sdk_configuration.retry_config is not UNSET: retries = self.sdk_configuration.retry_config retry_config = None if isinstance(retries, utils.RetryConfig): - retry_config = (retries, [ - "429", - "500", - "502", - "503", - "504" - ]) - + retry_config = (retries, ["429", "500", "502", "503", "504"]) + http_res = await self.do_request_async( - hook_ctx=HookContext(operation_id="jobs_api_routes_fine_tuning_update_fine_tuned_model", oauth2_scopes=[], security_source=get_security_from_env(self.sdk_configuration.security, models.Security)), + hook_ctx=HookContext( + operation_id="jobs_api_routes_fine_tuning_update_fine_tuned_model", + oauth2_scopes=[], + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), request=req, - error_status_codes=["4XX","5XX"], - retry_config=retry_config + error_status_codes=["4XX", "5XX"], + retry_config=retry_config, ) - + if utils.match_response(http_res, "200", "application/json"): return utils.unmarshal_json(http_res.text, Optional[models.FTModelOut]) - if utils.match_response(http_res, ["4XX","5XX"], "*"): - raise models.SDKError("API error occurred", http_res.status_code, http_res.text, http_res) - + if utils.match_response(http_res, ["4XX", "5XX"], "*"): + raise models.SDKError( + "API error occurred", http_res.status_code, http_res.text, http_res + ) + content_type = http_res.headers.get("Content-Type") - raise models.SDKError(f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, http_res.text, http_res) + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res.text, + http_res, + ) - - def archive( - self, *, + self, + *, model_id: str, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, @@ -651,14 +718,14 @@ def archive( url_variables = None if timeout_ms is None: timeout_ms = self.sdk_configuration.timeout_ms - + if server_url is not None: base_url = server_url - + request = models.JobsAPIRoutesFineTuningArchiveFineTunedModelRequest( model_id=model_id, ) - + req = self.build_request( method="POST", path="/v1/fine_tuning/models/{model_id}/archive", @@ -673,40 +740,48 @@ def archive( security=self.sdk_configuration.security, timeout_ms=timeout_ms, ) - + if retries == UNSET: if self.sdk_configuration.retry_config is not UNSET: retries = self.sdk_configuration.retry_config retry_config = None if isinstance(retries, utils.RetryConfig): - retry_config = (retries, [ - "429", - "500", - "502", - "503", - "504" - ]) - + retry_config = (retries, ["429", "500", "502", "503", "504"]) + http_res = self.do_request( - hook_ctx=HookContext(operation_id="jobs_api_routes_fine_tuning_archive_fine_tuned_model", oauth2_scopes=[], security_source=get_security_from_env(self.sdk_configuration.security, models.Security)), + hook_ctx=HookContext( + operation_id="jobs_api_routes_fine_tuning_archive_fine_tuned_model", + oauth2_scopes=[], + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), request=req, - error_status_codes=["4XX","5XX"], - retry_config=retry_config + error_status_codes=["4XX", "5XX"], + retry_config=retry_config, ) - + if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, Optional[models.ArchiveFTModelOut]) - if utils.match_response(http_res, ["4XX","5XX"], "*"): - raise models.SDKError("API error occurred", http_res.status_code, http_res.text, http_res) - + return utils.unmarshal_json( + http_res.text, Optional[models.ArchiveFTModelOut] + ) + if utils.match_response(http_res, ["4XX", "5XX"], "*"): + raise models.SDKError( + "API error occurred", http_res.status_code, http_res.text, http_res + ) + content_type = http_res.headers.get("Content-Type") - raise models.SDKError(f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, http_res.text, http_res) + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res.text, + http_res, + ) - - async def archive_async( - self, *, + self, + *, model_id: str, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, @@ -725,15 +800,15 @@ async def archive_async( url_variables = None if timeout_ms is None: timeout_ms = self.sdk_configuration.timeout_ms - + if server_url is not None: base_url = server_url - + request = models.JobsAPIRoutesFineTuningArchiveFineTunedModelRequest( model_id=model_id, ) - - req = self.build_request( + + req = self.build_request_async( method="POST", path="/v1/fine_tuning/models/{model_id}/archive", base_url=base_url, @@ -747,40 +822,48 @@ async def archive_async( security=self.sdk_configuration.security, timeout_ms=timeout_ms, ) - + if retries == UNSET: if self.sdk_configuration.retry_config is not UNSET: retries = self.sdk_configuration.retry_config retry_config = None if isinstance(retries, utils.RetryConfig): - retry_config = (retries, [ - "429", - "500", - "502", - "503", - "504" - ]) - + retry_config = (retries, ["429", "500", "502", "503", "504"]) + http_res = await self.do_request_async( - hook_ctx=HookContext(operation_id="jobs_api_routes_fine_tuning_archive_fine_tuned_model", oauth2_scopes=[], security_source=get_security_from_env(self.sdk_configuration.security, models.Security)), + hook_ctx=HookContext( + operation_id="jobs_api_routes_fine_tuning_archive_fine_tuned_model", + oauth2_scopes=[], + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), request=req, - error_status_codes=["4XX","5XX"], - retry_config=retry_config + error_status_codes=["4XX", "5XX"], + retry_config=retry_config, ) - + if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, Optional[models.ArchiveFTModelOut]) - if utils.match_response(http_res, ["4XX","5XX"], "*"): - raise models.SDKError("API error occurred", http_res.status_code, http_res.text, http_res) - + return utils.unmarshal_json( + http_res.text, Optional[models.ArchiveFTModelOut] + ) + if utils.match_response(http_res, ["4XX", "5XX"], "*"): + raise models.SDKError( + "API error occurred", http_res.status_code, http_res.text, http_res + ) + content_type = http_res.headers.get("Content-Type") - raise models.SDKError(f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, http_res.text, http_res) + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res.text, + http_res, + ) - - def unarchive( - self, *, + self, + *, model_id: str, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, @@ -799,14 +882,14 @@ def unarchive( url_variables = None if timeout_ms is None: timeout_ms = self.sdk_configuration.timeout_ms - + if server_url is not None: base_url = server_url - + request = models.JobsAPIRoutesFineTuningUnarchiveFineTunedModelRequest( model_id=model_id, ) - + req = self.build_request( method="DELETE", path="/v1/fine_tuning/models/{model_id}/archive", @@ -821,40 +904,48 @@ def unarchive( security=self.sdk_configuration.security, timeout_ms=timeout_ms, ) - + if retries == UNSET: if self.sdk_configuration.retry_config is not UNSET: retries = self.sdk_configuration.retry_config retry_config = None if isinstance(retries, utils.RetryConfig): - retry_config = (retries, [ - "429", - "500", - "502", - "503", - "504" - ]) - + retry_config = (retries, ["429", "500", "502", "503", "504"]) + http_res = self.do_request( - hook_ctx=HookContext(operation_id="jobs_api_routes_fine_tuning_unarchive_fine_tuned_model", oauth2_scopes=[], security_source=get_security_from_env(self.sdk_configuration.security, models.Security)), + hook_ctx=HookContext( + operation_id="jobs_api_routes_fine_tuning_unarchive_fine_tuned_model", + oauth2_scopes=[], + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), request=req, - error_status_codes=["4XX","5XX"], - retry_config=retry_config + error_status_codes=["4XX", "5XX"], + retry_config=retry_config, ) - + if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, Optional[models.UnarchiveFTModelOut]) - if utils.match_response(http_res, ["4XX","5XX"], "*"): - raise models.SDKError("API error occurred", http_res.status_code, http_res.text, http_res) - + return utils.unmarshal_json( + http_res.text, Optional[models.UnarchiveFTModelOut] + ) + if utils.match_response(http_res, ["4XX", "5XX"], "*"): + raise models.SDKError( + "API error occurred", http_res.status_code, http_res.text, http_res + ) + content_type = http_res.headers.get("Content-Type") - raise models.SDKError(f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, http_res.text, http_res) + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res.text, + http_res, + ) - - async def unarchive_async( - self, *, + self, + *, model_id: str, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, @@ -873,15 +964,15 @@ async def unarchive_async( url_variables = None if timeout_ms is None: timeout_ms = self.sdk_configuration.timeout_ms - + if server_url is not None: base_url = server_url - + request = models.JobsAPIRoutesFineTuningUnarchiveFineTunedModelRequest( model_id=model_id, ) - - req = self.build_request( + + req = self.build_request_async( method="DELETE", path="/v1/fine_tuning/models/{model_id}/archive", base_url=base_url, @@ -895,34 +986,41 @@ async def unarchive_async( security=self.sdk_configuration.security, timeout_ms=timeout_ms, ) - + if retries == UNSET: if self.sdk_configuration.retry_config is not UNSET: retries = self.sdk_configuration.retry_config retry_config = None if isinstance(retries, utils.RetryConfig): - retry_config = (retries, [ - "429", - "500", - "502", - "503", - "504" - ]) - + retry_config = (retries, ["429", "500", "502", "503", "504"]) + http_res = await self.do_request_async( - hook_ctx=HookContext(operation_id="jobs_api_routes_fine_tuning_unarchive_fine_tuned_model", oauth2_scopes=[], security_source=get_security_from_env(self.sdk_configuration.security, models.Security)), + hook_ctx=HookContext( + operation_id="jobs_api_routes_fine_tuning_unarchive_fine_tuned_model", + oauth2_scopes=[], + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), request=req, - error_status_codes=["4XX","5XX"], - retry_config=retry_config + error_status_codes=["4XX", "5XX"], + retry_config=retry_config, ) - + if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, Optional[models.UnarchiveFTModelOut]) - if utils.match_response(http_res, ["4XX","5XX"], "*"): - raise models.SDKError("API error occurred", http_res.status_code, http_res.text, http_res) - - content_type = http_res.headers.get("Content-Type") - raise models.SDKError(f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, http_res.text, http_res) + return utils.unmarshal_json( + http_res.text, Optional[models.UnarchiveFTModelOut] + ) + if utils.match_response(http_res, ["4XX", "5XX"], "*"): + raise models.SDKError( + "API error occurred", http_res.status_code, http_res.text, http_res + ) - + content_type = http_res.headers.get("Content-Type") + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res.text, + http_res, + ) diff --git a/src/mistralai/sdk.py b/src/mistralai/sdk.py index 1b79f700..05029abb 100644 --- a/src/mistralai/sdk.py +++ b/src/mistralai/sdk.py @@ -18,8 +18,10 @@ from mistralai.types import OptionalNullable, UNSET from typing import Any, Callable, Dict, Optional, Union + class Mistral(BaseSDK): r"""Mistral AI API: Our Chat Completion and Embeddings APIs specification. Create your account on [La Plateforme](https://console.mistral.ai) to get access and read the [docs](https://docs.mistral.ai) to learn how to use it.""" + models: Models r"""Model Management API""" files: Files @@ -33,6 +35,7 @@ class Mistral(BaseSDK): r"""Agents API.""" embeddings: Embeddings r"""Embeddings API.""" + def __init__( self, api_key: Optional[Union[Optional[str], Callable[[], Optional[str]]]] = None, @@ -43,7 +46,7 @@ def __init__( async_client: Optional[AsyncHttpClient] = None, retry_config: OptionalNullable[RetryConfig] = UNSET, timeout_ms: Optional[int] = None, - debug_logger: Optional[Logger] = None + debug_logger: Optional[Logger] = None, ) -> None: r"""Instantiates the SDK configuring it with the provided parameters. @@ -72,33 +75,37 @@ def __init__( assert issubclass( type(async_client), AsyncHttpClient ), "The provided async_client must implement the AsyncHttpClient protocol." - + security: Any = None if callable(api_key): - security = lambda: models.Security(api_key = api_key()) # pylint: disable=unnecessary-lambda-assignment + security = lambda: models.Security(api_key=api_key()) # pylint: disable=unnecessary-lambda-assignment else: - security = models.Security(api_key = api_key) + security = models.Security(api_key=api_key) if server_url is not None: if url_params is not None: server_url = utils.template_url(server_url, url_params) - - - BaseSDK.__init__(self, SDKConfiguration( - client=client, - async_client=async_client, - security=security, - server_url=server_url, - server=server, - retry_config=retry_config, - timeout_ms=timeout_ms, - debug_logger=debug_logger - )) + + BaseSDK.__init__( + self, + SDKConfiguration( + client=client, + async_client=async_client, + security=security, + server_url=server_url, + server=server, + retry_config=retry_config, + timeout_ms=timeout_ms, + debug_logger=debug_logger, + ), + ) hooks = SDKHooks() current_server_url, *_ = self.sdk_configuration.get_server_details() - server_url, self.sdk_configuration.client = hooks.sdk_init(current_server_url, self.sdk_configuration.client) + server_url, self.sdk_configuration.client = hooks.sdk_init( + current_server_url, self.sdk_configuration.client + ) if current_server_url != server_url: self.sdk_configuration.server_url = server_url @@ -107,7 +114,6 @@ def __init__( self._init_sdks() - def _init_sdks(self): self.models = Models(self.sdk_configuration) self.files = Files(self.sdk_configuration) @@ -116,4 +122,3 @@ def _init_sdks(self): self.fim = Fim(self.sdk_configuration) self.agents = Agents(self.sdk_configuration) self.embeddings = Embeddings(self.sdk_configuration) - diff --git a/src/mistralai/sdkconfiguration.py b/src/mistralai/sdkconfiguration.py index a8699133..3faf7050 100644 --- a/src/mistralai/sdkconfiguration.py +++ b/src/mistralai/sdkconfiguration.py @@ -1,6 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - from ._hooks import SDKHooks from .httpclient import AsyncHttpClient, HttpClient from .utils import Logger, RetryConfig, remove_suffix @@ -14,7 +13,7 @@ SERVER_PROD = "prod" r"""Production server""" SERVERS = { - SERVER_PROD: "https://api.mistral.ai", + SERVER_PROD: "https://api.mistral.ai", } """Contains the list of servers available to the SDK""" @@ -24,14 +23,14 @@ class SDKConfiguration: client: HttpClient async_client: AsyncHttpClient debug_logger: Logger - security: Optional[Union[models.Security,Callable[[], models.Security]]] = None + security: Optional[Union[models.Security, Callable[[], models.Security]]] = None server_url: Optional[str] = "" server: Optional[str] = "" language: str = "python" openapi_doc_version: str = "0.0.2" - sdk_version: str = "1.0.3" - gen_version: str = "2.404.11" - user_agent: str = "speakeasy-sdk/python 1.0.3 2.404.11 0.0.2 mistralai" + sdk_version: str = "1.1.1" + gen_version: str = "2.415.6" + user_agent: str = "speakeasy-sdk/python 1.1.1 2.415.6 0.0.2 mistralai" retry_config: OptionalNullable[RetryConfig] = Field(default_factory=lambda: UNSET) timeout_ms: Optional[int] = None @@ -45,10 +44,9 @@ def get_server_details(self) -> Tuple[str, Dict[str, str]]: self.server = SERVER_PROD if self.server not in SERVERS: - raise ValueError(f"Invalid server \"{self.server}\"") + raise ValueError(f'Invalid server "{self.server}"') return SERVERS[self.server], {} - def get_hooks(self) -> SDKHooks: return self._hooks diff --git a/src/mistralai/utils/__init__.py b/src/mistralai/utils/__init__.py index feee4dc5..74109c11 100644 --- a/src/mistralai/utils/__init__.py +++ b/src/mistralai/utils/__init__.py @@ -34,7 +34,12 @@ validate_open_enum, ) from .url import generate_url, template_url, remove_suffix -from .values import get_global_from_env, match_content_type, match_status_codes, match_response +from .values import ( + get_global_from_env, + match_content_type, + match_status_codes, + match_response, +) from .logger import Logger, get_body_content, get_default_logger __all__ = [ diff --git a/src/mistralai/utils/logger.py b/src/mistralai/utils/logger.py index c4ea1a03..cc089307 100644 --- a/src/mistralai/utils/logger.py +++ b/src/mistralai/utils/logger.py @@ -5,20 +5,23 @@ import os from typing import Any, Protocol + class Logger(Protocol): def debug(self, msg: str, *args: Any, **kwargs: Any) -> None: pass + class NoOpLogger: def debug(self, msg: str, *args: Any, **kwargs: Any) -> None: pass + def get_body_content(req: httpx.Request) -> str: return "" if not hasattr(req, "_content") else str(req.content) + def get_default_logger() -> Logger: if os.getenv("MISTRAL_DEBUG"): logging.basicConfig(level=logging.DEBUG) return logging.getLogger("mistralai") return NoOpLogger() - diff --git a/src/mistralai/utils/retries.py b/src/mistralai/utils/retries.py index a06f9279..4d608671 100644 --- a/src/mistralai/utils/retries.py +++ b/src/mistralai/utils/retries.py @@ -1,5 +1,6 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +import asyncio import random import time from typing import List @@ -212,5 +213,5 @@ async def retry_with_backoff_async( raise sleep = (initial_interval / 1000) * exponent**retries + random.uniform(0, 1) sleep = min(sleep, max_interval / 1000) - time.sleep(sleep) + await asyncio.sleep(sleep) retries += 1 diff --git a/src/mistralai/utils/security.py b/src/mistralai/utils/security.py index 4c511d94..3b8526bf 100644 --- a/src/mistralai/utils/security.py +++ b/src/mistralai/utils/security.py @@ -44,8 +44,10 @@ def get_security(security: Any) -> Tuple[Dict[str, str], Dict[str, List[str]]]: _parse_security_option(headers, query_params, value) return headers, query_params if metadata.scheme: - # Special case for basic auth which could be a flattened model - if metadata.sub_type == "basic" and not isinstance(value, BaseModel): + # Special case for basic auth or custom auth which could be a flattened model + if metadata.sub_type in ["basic", "custom"] and not isinstance( + value, BaseModel + ): _parse_security_scheme(headers, query_params, metadata, name, security) else: _parse_security_scheme(headers, query_params, metadata, name, value) @@ -64,7 +66,7 @@ def get_security_from_env(security: Any, security_class: Any) -> Optional[BaseMo if os.getenv("MISTRAL_API_KEY"): security_dict["api_key"] = os.getenv("MISTRAL_API_KEY") - + return security_class(**security_dict) if security_dict else None @@ -97,9 +99,12 @@ def _parse_security_scheme( sub_type = scheme_metadata.sub_type if isinstance(scheme, BaseModel): - if scheme_type == "http" and sub_type == "basic": - _parse_basic_auth_scheme(headers, scheme) - return + if scheme_type == "http": + if sub_type == "basic": + _parse_basic_auth_scheme(headers, scheme) + return + if sub_type == "custom": + return scheme_fields: Dict[str, FieldInfo] = scheme.__class__.model_fields for name in scheme_fields: @@ -148,6 +153,8 @@ def _parse_security_scheme_value( elif scheme_type == "http": if sub_type == "bearer": headers[header_name] = _apply_bearer(value) + elif sub_type == "custom": + return else: raise ValueError("sub type {sub_type} not supported") else: From d8ba2ef909043d977440da63d1287d51631a129e Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Tue, 17 Sep 2024 18:24:05 +0200 Subject: [PATCH 077/223] =?UTF-8?q?chore:=20=F0=9F=90=9D=20Update=20SDK=20?= =?UTF-8?q?-=20Generate=20MISTRALAI=20MISTRALAI-SDK=201.1.0=20(#145)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * ci: regenerated with OpenAPI Doc , Speakeasy CLI 1.396.7 * fix readme * change pydantic constraint --------- Co-authored-by: speakeasybot Co-authored-by: gaspardBT --- .speakeasy/gen.lock | 9 +- .speakeasy/gen.yaml | 2 +- .speakeasy/workflow.lock | 2 +- README.md | 22 +-- RELEASES.md | 12 +- poetry.lock | 293 ++++++++++++++++-------------- pyproject.toml | 17 +- src/mistralai/sdkconfiguration.py | 6 +- 8 files changed, 201 insertions(+), 162 deletions(-) diff --git a/.speakeasy/gen.lock b/.speakeasy/gen.lock index d333d1a2..b23077ed 100644 --- a/.speakeasy/gen.lock +++ b/.speakeasy/gen.lock @@ -3,10 +3,10 @@ id: 2d045ec7-2ebb-4f4d-ad25-40953b132161 management: docChecksum: e75ca54601920b2770d9a559b299d272 docVersion: 0.0.2 - speakeasyVersion: 1.396.7 - generationVersion: 2.415.6 - releaseVersion: 1.1.1 - configChecksum: 450e609764e2b24aa8ece63616b81348 + speakeasyVersion: 1.398.0 + generationVersion: 2.415.8 + releaseVersion: 1.1.0 + configChecksum: 49094e0f156d020bd164f8b4bd41e97b repoURL: https://github.com/mistralai/client-python.git installationURL: https://github.com/mistralai/client-python.git published: true @@ -194,6 +194,7 @@ generatedFiles: - poetry.toml - py.typed - pylintrc + - pyproject.toml - scripts/compile.sh - scripts/prepare-readme.py - scripts/publish.sh diff --git a/.speakeasy/gen.yaml b/.speakeasy/gen.yaml index ffccc0b7..8fd69aba 100644 --- a/.speakeasy/gen.yaml +++ b/.speakeasy/gen.yaml @@ -12,7 +12,7 @@ generation: auth: oAuth2ClientCredentialsEnabled: true python: - version: 1.1.1 + version: 1.1.0 additionalDependencies: dev: pytest: ^8.2.2 diff --git a/.speakeasy/workflow.lock b/.speakeasy/workflow.lock index 4ef07869..44dff84f 100644 --- a/.speakeasy/workflow.lock +++ b/.speakeasy/workflow.lock @@ -1,4 +1,4 @@ -speakeasyVersion: 1.396.7 +speakeasyVersion: 1.398.0 sources: mistral-azure-source: sourceNamespace: mistral-openapi-azure diff --git a/README.md b/README.md index f4017d31..0c63e5ed 100644 --- a/README.md +++ b/README.md @@ -481,10 +481,10 @@ if res is not None: Handling errors in this SDK should largely match your expectations. All operations return a response object or raise an error. If Error objects are specified in your OpenAPI Spec, the SDK will raise the appropriate Error type. -| Error Object | Status Code | Content Type | -| -------------------------- | -------------------------- | -------------------------- | -| models.HTTPValidationError | 422 | application/json | -| models.SDKError | 4xx-5xx | */* | +| Error Object | Status Code | Content Type | +| -------------------------- | ----------- | ---------------- | +| models.HTTPValidationError | 422 | application/json | +| models.SDKError | 4xx-5xx | */* | ### Example @@ -520,9 +520,9 @@ except models.SDKError as e: You can override the default server globally by passing a server name to the `server: str` optional parameter when initializing the SDK client instance. The selected server will then be used as the default on the operations that use it. This table lists the names associated with the available servers: -| Name | Server | Variables | -| ----- | ------ | --------- | -| `prod` | `https://api.mistral.ai` | None | +| Name | Server | Variables | +| ------ | ------------------------ | --------- | +| `prod` | `https://api.mistral.ai` | None | #### Example @@ -653,9 +653,9 @@ s = Mistral(async_client=CustomClient(httpx.AsyncClient())) This SDK supports the following security scheme globally: -| Name | Type | Scheme | Environment Variable | -| -------------------- | -------------------- | -------------------- | -------------------- | -| `api_key` | http | HTTP Bearer | `MISTRAL_API_KEY` | +| Name | Type | Scheme | Environment Variable | +| --------- | ---- | ----------- | -------------------- | +| `api_key` | http | HTTP Bearer | `MISTRAL_API_KEY` | To authenticate with the API the `api_key` parameter must be set when initializing the SDK client instance. For example: ```python @@ -709,4 +709,4 @@ Generally, the SDK will work well with most IDEs out of the box. However, when u ## Contributions While we value open-source contributions to this SDK, this library is generated programmatically. Any manual changes added to internal files will be overwritten on the next generation. -We look forward to hearing your feedback. Feel free to open a PR or an issue with a proof of concept and we'll do our best to include it in a future release. +We look forward to hearing your feedback. Feel free to open a PR or an issue with a proof of concept and we'll do our best to include it in a future release. \ No newline at end of file diff --git a/RELEASES.md b/RELEASES.md index b92e67ac..fee21dfb 100644 --- a/RELEASES.md +++ b/RELEASES.md @@ -38,4 +38,14 @@ Based on: ### Generated - [python v1.0.3] . ### Releases -- [PyPI v1.0.3] https://pypi.org/project/mistralai/1.0.3 - . \ No newline at end of file +- [PyPI v1.0.3] https://pypi.org/project/mistralai/1.0.3 - . + +## 2024-09-13 16:21:24 +### Changes +Based on: +- OpenAPI Doc +- Speakeasy CLI 1.396.7 (2.415.6) https://github.com/speakeasy-api/speakeasy +### Generated +- [python v1.1.0] . +### Releases +- [PyPI v1.1.0] https://pypi.org/project/mistralai/1.1.0 - . \ No newline at end of file diff --git a/poetry.lock b/poetry.lock index 78e95d41..5575630b 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1,4 +1,4 @@ -# This file is automatically @generated by Poetry 1.8.1 and should not be changed by hand. +# This file is automatically @generated by Poetry 1.8.3 and should not be changed by hand. [[package]] name = "annotated-types" @@ -52,24 +52,24 @@ typing-extensions = {version = ">=4.0.0", markers = "python_version < \"3.11\""} [[package]] name = "cachetools" -version = "5.4.0" +version = "5.5.0" description = "Extensible memoizing collections and decorators" optional = true python-versions = ">=3.7" files = [ - {file = "cachetools-5.4.0-py3-none-any.whl", hash = "sha256:3ae3b49a3d5e28a77a0be2b37dbcb89005058959cb2323858c2657c4a8cab474"}, - {file = "cachetools-5.4.0.tar.gz", hash = "sha256:b8adc2e7c07f105ced7bc56dbb6dfbe7c4a00acce20e2227b3f355be89bc6827"}, + {file = "cachetools-5.5.0-py3-none-any.whl", hash = "sha256:02134e8439cdc2ffb62023ce1debca2944c3f289d66bb17ead3ab3dede74b292"}, + {file = "cachetools-5.5.0.tar.gz", hash = "sha256:2cc24fb4cbe39633fb7badd9db9ca6295d766d9c2995f245725a46715d050f2a"}, ] [[package]] name = "certifi" -version = "2024.7.4" +version = "2024.8.30" description = "Python package for providing Mozilla's CA Bundle." optional = false python-versions = ">=3.6" files = [ - {file = "certifi-2024.7.4-py3-none-any.whl", hash = "sha256:c198e21b1289c2ab85ee4e67bb4b4ef3ead0892059901a8d5b622f24a1101e90"}, - {file = "certifi-2024.7.4.tar.gz", hash = "sha256:5a1e7645bc0ec61a09e26c36f6106dd4cf40c6db3a1fb6352b0244e7fb057c7b"}, + {file = "certifi-2024.8.30-py3-none-any.whl", hash = "sha256:922820b53db7a7257ffbda3f597266d435245903d80737e34f8a45ff3e3230d8"}, + {file = "certifi-2024.8.30.tar.gz", hash = "sha256:bec941d2aa8195e248a60b31ff9f0558284cf01a52591ceda73ea9afffd69fd9"}, ] [[package]] @@ -197,6 +197,20 @@ files = [ graph = ["objgraph (>=1.7.2)"] profile = ["gprof2dot (>=2022.7.29)"] +[[package]] +name = "eval-type-backport" +version = "0.2.0" +description = "Like `typing._eval_type`, but lets older Python versions use newer typing features." +optional = false +python-versions = ">=3.8" +files = [ + {file = "eval_type_backport-0.2.0-py3-none-any.whl", hash = "sha256:ac2f73d30d40c5a30a80b8739a789d6bb5e49fdffa66d7912667e2015d9c9933"}, + {file = "eval_type_backport-0.2.0.tar.gz", hash = "sha256:68796cfbc7371ebf923f03bdf7bef415f3ec098aeced24e054b253a0e78f7b37"}, +] + +[package.extras] +tests = ["pytest"] + [[package]] name = "exceptiongroup" version = "1.2.2" @@ -268,13 +282,13 @@ trio = ["trio (>=0.22.0,<0.26.0)"] [[package]] name = "httpx" -version = "0.27.0" +version = "0.27.2" description = "The next generation HTTP client." optional = false python-versions = ">=3.8" files = [ - {file = "httpx-0.27.0-py3-none-any.whl", hash = "sha256:71d5465162c13681bff01ad59b2cc68dd838ea1f10e51574bac27103f00c91a5"}, - {file = "httpx-0.27.0.tar.gz", hash = "sha256:a0cb88a46f32dc874e04ee956e4c2764aba2aa228f650b06788ba6bda2962ab5"}, + {file = "httpx-0.27.2-py3-none-any.whl", hash = "sha256:7bb2708e112d8fdd7829cd4243970f0c223274051cb35ee80c03301ee29a3df0"}, + {file = "httpx-0.27.2.tar.gz", hash = "sha256:f7c2be1d2f3c3c3160d441802406b206c2b76f5947b11115e6df10c6c65e66c2"}, ] [package.dependencies] @@ -289,18 +303,22 @@ brotli = ["brotli", "brotlicffi"] cli = ["click (==8.*)", "pygments (==2.*)", "rich (>=10,<14)"] http2 = ["h2 (>=3,<5)"] socks = ["socksio (==1.*)"] +zstd = ["zstandard (>=0.18.0)"] [[package]] name = "idna" -version = "3.7" +version = "3.10" description = "Internationalized Domain Names in Applications (IDNA)" optional = false -python-versions = ">=3.5" +python-versions = ">=3.6" files = [ - {file = "idna-3.7-py3-none-any.whl", hash = "sha256:82fee1fc78add43492d3a1898bfa6d8a904cc97d8427f683ed8e798d07761aa0"}, - {file = "idna-3.7.tar.gz", hash = "sha256:028ff3aadf0609c1fd278d8ea3089299412a7a8b9bd005dd08b9f8285bcb5cfc"}, + {file = "idna-3.10-py3-none-any.whl", hash = "sha256:946d195a0d259cbba61165e88e65941f16e9b36ea6ddb97f00452bae8b1287d3"}, + {file = "idna-3.10.tar.gz", hash = "sha256:12f65c9b470abda6dc35cf8e63cc574b1c52b11df2c86030af0ac09b01b13ea9"}, ] +[package.extras] +all = ["flake8 (>=7.1.1)", "mypy (>=1.11.2)", "pytest (>=8.3.2)", "ruff (>=0.6.2)"] + [[package]] name = "iniconfig" version = "2.0.0" @@ -419,19 +437,19 @@ files = [ [[package]] name = "platformdirs" -version = "4.2.2" +version = "4.3.3" description = "A small Python package for determining appropriate platform-specific dirs, e.g. a `user data dir`." optional = false python-versions = ">=3.8" files = [ - {file = "platformdirs-4.2.2-py3-none-any.whl", hash = "sha256:2d7a1657e36a80ea911db832a8a6ece5ee53d8de21edd5cc5879af6530b1bfee"}, - {file = "platformdirs-4.2.2.tar.gz", hash = "sha256:38b7b51f512eed9e84a22788b4bce1de17c0adb134d6becb09836e37d8654cd3"}, + {file = "platformdirs-4.3.3-py3-none-any.whl", hash = "sha256:50a5450e2e84f44539718293cbb1da0a0885c9d14adf21b77bae4e66fc99d9b5"}, + {file = "platformdirs-4.3.3.tar.gz", hash = "sha256:d4e0b7d8ec176b341fb03cb11ca12d0276faa8c485f9cd218f613840463fc2c0"}, ] [package.extras] -docs = ["furo (>=2023.9.10)", "proselint (>=0.13)", "sphinx (>=7.2.6)", "sphinx-autodoc-typehints (>=1.25.2)"] -test = ["appdirs (==1.4.4)", "covdefaults (>=2.3)", "pytest (>=7.4.3)", "pytest-cov (>=4.1)", "pytest-mock (>=3.12)"] -type = ["mypy (>=1.8)"] +docs = ["furo (>=2024.8.6)", "proselint (>=0.14)", "sphinx (>=8.0.2)", "sphinx-autodoc-typehints (>=2.4)"] +test = ["appdirs (==1.4.4)", "covdefaults (>=2.3)", "pytest (>=8.3.2)", "pytest-cov (>=5)", "pytest-mock (>=3.14)"] +type = ["mypy (>=1.11.2)"] [[package]] name = "pluggy" @@ -450,24 +468,24 @@ testing = ["pytest", "pytest-benchmark"] [[package]] name = "pyasn1" -version = "0.6.0" +version = "0.6.1" description = "Pure-Python implementation of ASN.1 types and DER/BER/CER codecs (X.208)" optional = true python-versions = ">=3.8" files = [ - {file = "pyasn1-0.6.0-py2.py3-none-any.whl", hash = "sha256:cca4bb0f2df5504f02f6f8a775b6e416ff9b0b3b16f7ee80b5a3153d9b804473"}, - {file = "pyasn1-0.6.0.tar.gz", hash = "sha256:3a35ab2c4b5ef98e17dfdec8ab074046fbda76e281c5a706ccd82328cfc8f64c"}, + {file = "pyasn1-0.6.1-py3-none-any.whl", hash = "sha256:0d632f46f2ba09143da3a8afe9e33fb6f92fa2320ab7e886e2d0f7672af84629"}, + {file = "pyasn1-0.6.1.tar.gz", hash = "sha256:6f580d2bdd84365380830acf45550f2511469f673cb4a5ae3857a3170128b034"}, ] [[package]] name = "pyasn1-modules" -version = "0.4.0" +version = "0.4.1" description = "A collection of ASN.1-based protocols modules" optional = true python-versions = ">=3.8" files = [ - {file = "pyasn1_modules-0.4.0-py3-none-any.whl", hash = "sha256:be04f15b66c206eed667e0bb5ab27e2b1855ea54a842e5037738099e8ca4ae0b"}, - {file = "pyasn1_modules-0.4.0.tar.gz", hash = "sha256:831dbcea1b177b28c9baddf4c6d1013c24c3accd14a1873fffaa6a2e905f17b6"}, + {file = "pyasn1_modules-0.4.1-py3-none-any.whl", hash = "sha256:49bfa96b45a292b711e986f222502c1c9a5e1f4e568fc30e2574a6c7d07838fd"}, + {file = "pyasn1_modules-0.4.1.tar.gz", hash = "sha256:c28e2dbf9c06ad61c71a075c7e0f9fd0f1b0bb2d2ad4377f240d33ac2ab60a7c"}, ] [package.dependencies] @@ -475,18 +493,18 @@ pyasn1 = ">=0.4.6,<0.7.0" [[package]] name = "pydantic" -version = "2.8.2" +version = "2.9.1" description = "Data validation using Python type hints" optional = false python-versions = ">=3.8" files = [ - {file = "pydantic-2.8.2-py3-none-any.whl", hash = "sha256:73ee9fddd406dc318b885c7a2eab8a6472b68b8fb5ba8150949fc3db939f23c8"}, - {file = "pydantic-2.8.2.tar.gz", hash = "sha256:6f62c13d067b0755ad1c21a34bdd06c0c12625a22b0fc09c6b149816604f7c2a"}, + {file = "pydantic-2.9.1-py3-none-any.whl", hash = "sha256:7aff4db5fdf3cf573d4b3c30926a510a10e19a0774d38fc4967f78beb6deb612"}, + {file = "pydantic-2.9.1.tar.gz", hash = "sha256:1363c7d975c7036df0db2b4a61f2e062fbc0aa5ab5f2772e0ffc7191a4f4bce2"}, ] [package.dependencies] -annotated-types = ">=0.4.0" -pydantic-core = "2.20.1" +annotated-types = ">=0.6.0" +pydantic-core = "2.23.3" typing-extensions = [ {version = ">=4.6.1", markers = "python_version < \"3.13\""}, {version = ">=4.12.2", markers = "python_version >= \"3.13\""}, @@ -494,103 +512,104 @@ typing-extensions = [ [package.extras] email = ["email-validator (>=2.0.0)"] +timezone = ["tzdata"] [[package]] name = "pydantic-core" -version = "2.20.1" +version = "2.23.3" description = "Core functionality for Pydantic validation and serialization" optional = false python-versions = ">=3.8" files = [ - {file = "pydantic_core-2.20.1-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:3acae97ffd19bf091c72df4d726d552c473f3576409b2a7ca36b2f535ffff4a3"}, - {file = "pydantic_core-2.20.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:41f4c96227a67a013e7de5ff8f20fb496ce573893b7f4f2707d065907bffdbd6"}, - {file = "pydantic_core-2.20.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5f239eb799a2081495ea659d8d4a43a8f42cd1fe9ff2e7e436295c38a10c286a"}, - {file = "pydantic_core-2.20.1-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:53e431da3fc53360db73eedf6f7124d1076e1b4ee4276b36fb25514544ceb4a3"}, - {file = "pydantic_core-2.20.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f1f62b2413c3a0e846c3b838b2ecd6c7a19ec6793b2a522745b0869e37ab5bc1"}, - {file = "pydantic_core-2.20.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5d41e6daee2813ecceea8eda38062d69e280b39df793f5a942fa515b8ed67953"}, - {file = "pydantic_core-2.20.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3d482efec8b7dc6bfaedc0f166b2ce349df0011f5d2f1f25537ced4cfc34fd98"}, - {file = "pydantic_core-2.20.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:e93e1a4b4b33daed65d781a57a522ff153dcf748dee70b40c7258c5861e1768a"}, - {file = "pydantic_core-2.20.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:e7c4ea22b6739b162c9ecaaa41d718dfad48a244909fe7ef4b54c0b530effc5a"}, - {file = "pydantic_core-2.20.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:4f2790949cf385d985a31984907fecb3896999329103df4e4983a4a41e13e840"}, - {file = "pydantic_core-2.20.1-cp310-none-win32.whl", hash = "sha256:5e999ba8dd90e93d57410c5e67ebb67ffcaadcea0ad973240fdfd3a135506250"}, - {file = "pydantic_core-2.20.1-cp310-none-win_amd64.whl", hash = "sha256:512ecfbefef6dac7bc5eaaf46177b2de58cdf7acac8793fe033b24ece0b9566c"}, - {file = "pydantic_core-2.20.1-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:d2a8fa9d6d6f891f3deec72f5cc668e6f66b188ab14bb1ab52422fe8e644f312"}, - {file = "pydantic_core-2.20.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:175873691124f3d0da55aeea1d90660a6ea7a3cfea137c38afa0a5ffabe37b88"}, - {file = "pydantic_core-2.20.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:37eee5b638f0e0dcd18d21f59b679686bbd18917b87db0193ae36f9c23c355fc"}, - {file = "pydantic_core-2.20.1-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:25e9185e2d06c16ee438ed39bf62935ec436474a6ac4f9358524220f1b236e43"}, - {file = "pydantic_core-2.20.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:150906b40ff188a3260cbee25380e7494ee85048584998c1e66df0c7a11c17a6"}, - {file = "pydantic_core-2.20.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8ad4aeb3e9a97286573c03df758fc7627aecdd02f1da04516a86dc159bf70121"}, - {file = "pydantic_core-2.20.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d3f3ed29cd9f978c604708511a1f9c2fdcb6c38b9aae36a51905b8811ee5cbf1"}, - {file = "pydantic_core-2.20.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:b0dae11d8f5ded51699c74d9548dcc5938e0804cc8298ec0aa0da95c21fff57b"}, - {file = "pydantic_core-2.20.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:faa6b09ee09433b87992fb5a2859efd1c264ddc37280d2dd5db502126d0e7f27"}, - {file = "pydantic_core-2.20.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:9dc1b507c12eb0481d071f3c1808f0529ad41dc415d0ca11f7ebfc666e66a18b"}, - {file = "pydantic_core-2.20.1-cp311-none-win32.whl", hash = "sha256:fa2fddcb7107e0d1808086ca306dcade7df60a13a6c347a7acf1ec139aa6789a"}, - {file = "pydantic_core-2.20.1-cp311-none-win_amd64.whl", hash = "sha256:40a783fb7ee353c50bd3853e626f15677ea527ae556429453685ae32280c19c2"}, - {file = "pydantic_core-2.20.1-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:595ba5be69b35777474fa07f80fc260ea71255656191adb22a8c53aba4479231"}, - {file = "pydantic_core-2.20.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:a4f55095ad087474999ee28d3398bae183a66be4823f753cd7d67dd0153427c9"}, - {file = "pydantic_core-2.20.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f9aa05d09ecf4c75157197f27cdc9cfaeb7c5f15021c6373932bf3e124af029f"}, - {file = "pydantic_core-2.20.1-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:e97fdf088d4b31ff4ba35db26d9cc472ac7ef4a2ff2badeabf8d727b3377fc52"}, - {file = "pydantic_core-2.20.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:bc633a9fe1eb87e250b5c57d389cf28998e4292336926b0b6cdaee353f89a237"}, - {file = "pydantic_core-2.20.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d573faf8eb7e6b1cbbcb4f5b247c60ca8be39fe2c674495df0eb4318303137fe"}, - {file = "pydantic_core-2.20.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:26dc97754b57d2fd00ac2b24dfa341abffc380b823211994c4efac7f13b9e90e"}, - {file = "pydantic_core-2.20.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:33499e85e739a4b60c9dac710c20a08dc73cb3240c9a0e22325e671b27b70d24"}, - {file = "pydantic_core-2.20.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:bebb4d6715c814597f85297c332297c6ce81e29436125ca59d1159b07f423eb1"}, - {file = "pydantic_core-2.20.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:516d9227919612425c8ef1c9b869bbbee249bc91912c8aaffb66116c0b447ebd"}, - {file = "pydantic_core-2.20.1-cp312-none-win32.whl", hash = "sha256:469f29f9093c9d834432034d33f5fe45699e664f12a13bf38c04967ce233d688"}, - {file = "pydantic_core-2.20.1-cp312-none-win_amd64.whl", hash = "sha256:035ede2e16da7281041f0e626459bcae33ed998cca6a0a007a5ebb73414ac72d"}, - {file = "pydantic_core-2.20.1-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:0827505a5c87e8aa285dc31e9ec7f4a17c81a813d45f70b1d9164e03a813a686"}, - {file = "pydantic_core-2.20.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:19c0fa39fa154e7e0b7f82f88ef85faa2a4c23cc65aae2f5aea625e3c13c735a"}, - {file = "pydantic_core-2.20.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4aa223cd1e36b642092c326d694d8bf59b71ddddc94cdb752bbbb1c5c91d833b"}, - {file = "pydantic_core-2.20.1-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:c336a6d235522a62fef872c6295a42ecb0c4e1d0f1a3e500fe949415761b8a19"}, - {file = "pydantic_core-2.20.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7eb6a0587eded33aeefea9f916899d42b1799b7b14b8f8ff2753c0ac1741edac"}, - {file = "pydantic_core-2.20.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:70c8daf4faca8da5a6d655f9af86faf6ec2e1768f4b8b9d0226c02f3d6209703"}, - {file = "pydantic_core-2.20.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e9fa4c9bf273ca41f940bceb86922a7667cd5bf90e95dbb157cbb8441008482c"}, - {file = "pydantic_core-2.20.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:11b71d67b4725e7e2a9f6e9c0ac1239bbc0c48cce3dc59f98635efc57d6dac83"}, - {file = "pydantic_core-2.20.1-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:270755f15174fb983890c49881e93f8f1b80f0b5e3a3cc1394a255706cabd203"}, - {file = "pydantic_core-2.20.1-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:c81131869240e3e568916ef4c307f8b99583efaa60a8112ef27a366eefba8ef0"}, - {file = "pydantic_core-2.20.1-cp313-none-win32.whl", hash = "sha256:b91ced227c41aa29c672814f50dbb05ec93536abf8f43cd14ec9521ea09afe4e"}, - {file = "pydantic_core-2.20.1-cp313-none-win_amd64.whl", hash = "sha256:65db0f2eefcaad1a3950f498aabb4875c8890438bc80b19362cf633b87a8ab20"}, - {file = "pydantic_core-2.20.1-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:4745f4ac52cc6686390c40eaa01d48b18997cb130833154801a442323cc78f91"}, - {file = "pydantic_core-2.20.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:a8ad4c766d3f33ba8fd692f9aa297c9058970530a32c728a2c4bfd2616d3358b"}, - {file = "pydantic_core-2.20.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:41e81317dd6a0127cabce83c0c9c3fbecceae981c8391e6f1dec88a77c8a569a"}, - {file = "pydantic_core-2.20.1-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:04024d270cf63f586ad41fff13fde4311c4fc13ea74676962c876d9577bcc78f"}, - {file = "pydantic_core-2.20.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:eaad4ff2de1c3823fddf82f41121bdf453d922e9a238642b1dedb33c4e4f98ad"}, - {file = "pydantic_core-2.20.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:26ab812fa0c845df815e506be30337e2df27e88399b985d0bb4e3ecfe72df31c"}, - {file = "pydantic_core-2.20.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3c5ebac750d9d5f2706654c638c041635c385596caf68f81342011ddfa1e5598"}, - {file = "pydantic_core-2.20.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2aafc5a503855ea5885559eae883978c9b6d8c8993d67766ee73d82e841300dd"}, - {file = "pydantic_core-2.20.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:4868f6bd7c9d98904b748a2653031fc9c2f85b6237009d475b1008bfaeb0a5aa"}, - {file = "pydantic_core-2.20.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:aa2f457b4af386254372dfa78a2eda2563680d982422641a85f271c859df1987"}, - {file = "pydantic_core-2.20.1-cp38-none-win32.whl", hash = "sha256:225b67a1f6d602de0ce7f6c1c3ae89a4aa25d3de9be857999e9124f15dab486a"}, - {file = "pydantic_core-2.20.1-cp38-none-win_amd64.whl", hash = "sha256:6b507132dcfc0dea440cce23ee2182c0ce7aba7054576efc65634f080dbe9434"}, - {file = "pydantic_core-2.20.1-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:b03f7941783b4c4a26051846dea594628b38f6940a2fdc0df00b221aed39314c"}, - {file = "pydantic_core-2.20.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:1eedfeb6089ed3fad42e81a67755846ad4dcc14d73698c120a82e4ccf0f1f9f6"}, - {file = "pydantic_core-2.20.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:635fee4e041ab9c479e31edda27fcf966ea9614fff1317e280d99eb3e5ab6fe2"}, - {file = "pydantic_core-2.20.1-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:77bf3ac639c1ff567ae3b47f8d4cc3dc20f9966a2a6dd2311dcc055d3d04fb8a"}, - {file = "pydantic_core-2.20.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7ed1b0132f24beeec5a78b67d9388656d03e6a7c837394f99257e2d55b461611"}, - {file = "pydantic_core-2.20.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c6514f963b023aeee506678a1cf821fe31159b925c4b76fe2afa94cc70b3222b"}, - {file = "pydantic_core-2.20.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:10d4204d8ca33146e761c79f83cc861df20e7ae9f6487ca290a97702daf56006"}, - {file = "pydantic_core-2.20.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2d036c7187b9422ae5b262badb87a20a49eb6c5238b2004e96d4da1231badef1"}, - {file = "pydantic_core-2.20.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:9ebfef07dbe1d93efb94b4700f2d278494e9162565a54f124c404a5656d7ff09"}, - {file = "pydantic_core-2.20.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:6b9d9bb600328a1ce523ab4f454859e9d439150abb0906c5a1983c146580ebab"}, - {file = "pydantic_core-2.20.1-cp39-none-win32.whl", hash = "sha256:784c1214cb6dd1e3b15dd8b91b9a53852aed16671cc3fbe4786f4f1db07089e2"}, - {file = "pydantic_core-2.20.1-cp39-none-win_amd64.whl", hash = "sha256:d2fe69c5434391727efa54b47a1e7986bb0186e72a41b203df8f5b0a19a4f669"}, - {file = "pydantic_core-2.20.1-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:a45f84b09ac9c3d35dfcf6a27fd0634d30d183205230a0ebe8373a0e8cfa0906"}, - {file = "pydantic_core-2.20.1-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:d02a72df14dfdbaf228424573a07af10637bd490f0901cee872c4f434a735b94"}, - {file = "pydantic_core-2.20.1-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d2b27e6af28f07e2f195552b37d7d66b150adbaa39a6d327766ffd695799780f"}, - {file = "pydantic_core-2.20.1-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:084659fac3c83fd674596612aeff6041a18402f1e1bc19ca39e417d554468482"}, - {file = "pydantic_core-2.20.1-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:242b8feb3c493ab78be289c034a1f659e8826e2233786e36f2893a950a719bb6"}, - {file = "pydantic_core-2.20.1-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:38cf1c40a921d05c5edc61a785c0ddb4bed67827069f535d794ce6bcded919fc"}, - {file = "pydantic_core-2.20.1-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:e0bbdd76ce9aa5d4209d65f2b27fc6e5ef1312ae6c5333c26db3f5ade53a1e99"}, - {file = "pydantic_core-2.20.1-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:254ec27fdb5b1ee60684f91683be95e5133c994cc54e86a0b0963afa25c8f8a6"}, - {file = "pydantic_core-2.20.1-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:407653af5617f0757261ae249d3fba09504d7a71ab36ac057c938572d1bc9331"}, - {file = "pydantic_core-2.20.1-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:c693e916709c2465b02ca0ad7b387c4f8423d1db7b4649c551f27a529181c5ad"}, - {file = "pydantic_core-2.20.1-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5b5ff4911aea936a47d9376fd3ab17e970cc543d1b68921886e7f64bd28308d1"}, - {file = "pydantic_core-2.20.1-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:177f55a886d74f1808763976ac4efd29b7ed15c69f4d838bbd74d9d09cf6fa86"}, - {file = "pydantic_core-2.20.1-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:964faa8a861d2664f0c7ab0c181af0bea66098b1919439815ca8803ef136fc4e"}, - {file = "pydantic_core-2.20.1-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:4dd484681c15e6b9a977c785a345d3e378d72678fd5f1f3c0509608da24f2ac0"}, - {file = "pydantic_core-2.20.1-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:f6d6cff3538391e8486a431569b77921adfcdef14eb18fbf19b7c0a5294d4e6a"}, - {file = "pydantic_core-2.20.1-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:a6d511cc297ff0883bc3708b465ff82d7560193169a8b93260f74ecb0a5e08a7"}, - {file = "pydantic_core-2.20.1.tar.gz", hash = "sha256:26ca695eeee5f9f1aeeb211ffc12f10bcb6f71e2989988fda61dabd65db878d4"}, + {file = "pydantic_core-2.23.3-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:7f10a5d1b9281392f1bf507d16ac720e78285dfd635b05737c3911637601bae6"}, + {file = "pydantic_core-2.23.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:3c09a7885dd33ee8c65266e5aa7fb7e2f23d49d8043f089989726391dd7350c5"}, + {file = "pydantic_core-2.23.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6470b5a1ec4d1c2e9afe928c6cb37eb33381cab99292a708b8cb9aa89e62429b"}, + {file = "pydantic_core-2.23.3-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:9172d2088e27d9a185ea0a6c8cebe227a9139fd90295221d7d495944d2367700"}, + {file = "pydantic_core-2.23.3-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:86fc6c762ca7ac8fbbdff80d61b2c59fb6b7d144aa46e2d54d9e1b7b0e780e01"}, + {file = "pydantic_core-2.23.3-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f0cb80fd5c2df4898693aa841425ea1727b1b6d2167448253077d2a49003e0ed"}, + {file = "pydantic_core-2.23.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:03667cec5daf43ac4995cefa8aaf58f99de036204a37b889c24a80927b629cec"}, + {file = "pydantic_core-2.23.3-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:047531242f8e9c2db733599f1c612925de095e93c9cc0e599e96cf536aaf56ba"}, + {file = "pydantic_core-2.23.3-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:5499798317fff7f25dbef9347f4451b91ac2a4330c6669821c8202fd354c7bee"}, + {file = "pydantic_core-2.23.3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:bbb5e45eab7624440516ee3722a3044b83fff4c0372efe183fd6ba678ff681fe"}, + {file = "pydantic_core-2.23.3-cp310-none-win32.whl", hash = "sha256:8b5b3ed73abb147704a6e9f556d8c5cb078f8c095be4588e669d315e0d11893b"}, + {file = "pydantic_core-2.23.3-cp310-none-win_amd64.whl", hash = "sha256:2b603cde285322758a0279995b5796d64b63060bfbe214b50a3ca23b5cee3e83"}, + {file = "pydantic_core-2.23.3-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:c889fd87e1f1bbeb877c2ee56b63bb297de4636661cc9bbfcf4b34e5e925bc27"}, + {file = "pydantic_core-2.23.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:ea85bda3189fb27503af4c45273735bcde3dd31c1ab17d11f37b04877859ef45"}, + {file = "pydantic_core-2.23.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a7f7f72f721223f33d3dc98a791666ebc6a91fa023ce63733709f4894a7dc611"}, + {file = "pydantic_core-2.23.3-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:2b2b55b0448e9da68f56b696f313949cda1039e8ec7b5d294285335b53104b61"}, + {file = "pydantic_core-2.23.3-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c24574c7e92e2c56379706b9a3f07c1e0c7f2f87a41b6ee86653100c4ce343e5"}, + {file = "pydantic_core-2.23.3-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f2b05e6ccbee333a8f4b8f4d7c244fdb7a979e90977ad9c51ea31261e2085ce0"}, + {file = "pydantic_core-2.23.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e2c409ce1c219c091e47cb03feb3c4ed8c2b8e004efc940da0166aaee8f9d6c8"}, + {file = "pydantic_core-2.23.3-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d965e8b325f443ed3196db890d85dfebbb09f7384486a77461347f4adb1fa7f8"}, + {file = "pydantic_core-2.23.3-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:f56af3a420fb1ffaf43ece3ea09c2d27c444e7c40dcb7c6e7cf57aae764f2b48"}, + {file = "pydantic_core-2.23.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:5b01a078dd4f9a52494370af21aa52964e0a96d4862ac64ff7cea06e0f12d2c5"}, + {file = "pydantic_core-2.23.3-cp311-none-win32.whl", hash = "sha256:560e32f0df04ac69b3dd818f71339983f6d1f70eb99d4d1f8e9705fb6c34a5c1"}, + {file = "pydantic_core-2.23.3-cp311-none-win_amd64.whl", hash = "sha256:c744fa100fdea0d000d8bcddee95213d2de2e95b9c12be083370b2072333a0fa"}, + {file = "pydantic_core-2.23.3-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:e0ec50663feedf64d21bad0809f5857bac1ce91deded203efc4a84b31b2e4305"}, + {file = "pydantic_core-2.23.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:db6e6afcb95edbe6b357786684b71008499836e91f2a4a1e55b840955b341dbb"}, + {file = "pydantic_core-2.23.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:98ccd69edcf49f0875d86942f4418a4e83eb3047f20eb897bffa62a5d419c8fa"}, + {file = "pydantic_core-2.23.3-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:a678c1ac5c5ec5685af0133262103defb427114e62eafeda12f1357a12140162"}, + {file = "pydantic_core-2.23.3-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:01491d8b4d8db9f3391d93b0df60701e644ff0894352947f31fff3e52bd5c801"}, + {file = "pydantic_core-2.23.3-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:fcf31facf2796a2d3b7fe338fe8640aa0166e4e55b4cb108dbfd1058049bf4cb"}, + {file = "pydantic_core-2.23.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7200fd561fb3be06827340da066df4311d0b6b8eb0c2116a110be5245dceb326"}, + {file = "pydantic_core-2.23.3-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:dc1636770a809dee2bd44dd74b89cc80eb41172bcad8af75dd0bc182c2666d4c"}, + {file = "pydantic_core-2.23.3-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:67a5def279309f2e23014b608c4150b0c2d323bd7bccd27ff07b001c12c2415c"}, + {file = "pydantic_core-2.23.3-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:748bdf985014c6dd3e1e4cc3db90f1c3ecc7246ff5a3cd4ddab20c768b2f1dab"}, + {file = "pydantic_core-2.23.3-cp312-none-win32.whl", hash = "sha256:255ec6dcb899c115f1e2a64bc9ebc24cc0e3ab097775755244f77360d1f3c06c"}, + {file = "pydantic_core-2.23.3-cp312-none-win_amd64.whl", hash = "sha256:40b8441be16c1e940abebed83cd006ddb9e3737a279e339dbd6d31578b802f7b"}, + {file = "pydantic_core-2.23.3-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:6daaf5b1ba1369a22c8b050b643250e3e5efc6a78366d323294aee54953a4d5f"}, + {file = "pydantic_core-2.23.3-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:d015e63b985a78a3d4ccffd3bdf22b7c20b3bbd4b8227809b3e8e75bc37f9cb2"}, + {file = "pydantic_core-2.23.3-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a3fc572d9b5b5cfe13f8e8a6e26271d5d13f80173724b738557a8c7f3a8a3791"}, + {file = "pydantic_core-2.23.3-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:f6bd91345b5163ee7448bee201ed7dd601ca24f43f439109b0212e296eb5b423"}, + {file = "pydantic_core-2.23.3-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:fc379c73fd66606628b866f661e8785088afe2adaba78e6bbe80796baf708a63"}, + {file = "pydantic_core-2.23.3-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:fbdce4b47592f9e296e19ac31667daed8753c8367ebb34b9a9bd89dacaa299c9"}, + {file = "pydantic_core-2.23.3-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fc3cf31edf405a161a0adad83246568647c54404739b614b1ff43dad2b02e6d5"}, + {file = "pydantic_core-2.23.3-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:8e22b477bf90db71c156f89a55bfe4d25177b81fce4aa09294d9e805eec13855"}, + {file = "pydantic_core-2.23.3-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:0a0137ddf462575d9bce863c4c95bac3493ba8e22f8c28ca94634b4a1d3e2bb4"}, + {file = "pydantic_core-2.23.3-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:203171e48946c3164fe7691fc349c79241ff8f28306abd4cad5f4f75ed80bc8d"}, + {file = "pydantic_core-2.23.3-cp313-none-win32.whl", hash = "sha256:76bdab0de4acb3f119c2a4bff740e0c7dc2e6de7692774620f7452ce11ca76c8"}, + {file = "pydantic_core-2.23.3-cp313-none-win_amd64.whl", hash = "sha256:37ba321ac2a46100c578a92e9a6aa33afe9ec99ffa084424291d84e456f490c1"}, + {file = "pydantic_core-2.23.3-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:d063c6b9fed7d992bcbebfc9133f4c24b7a7f215d6b102f3e082b1117cddb72c"}, + {file = "pydantic_core-2.23.3-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:6cb968da9a0746a0cf521b2b5ef25fc5a0bee9b9a1a8214e0a1cfaea5be7e8a4"}, + {file = "pydantic_core-2.23.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:edbefe079a520c5984e30e1f1f29325054b59534729c25b874a16a5048028d16"}, + {file = "pydantic_core-2.23.3-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:cbaaf2ef20d282659093913da9d402108203f7cb5955020bd8d1ae5a2325d1c4"}, + {file = "pydantic_core-2.23.3-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:fb539d7e5dc4aac345846f290cf504d2fd3c1be26ac4e8b5e4c2b688069ff4cf"}, + {file = "pydantic_core-2.23.3-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7e6f33503c5495059148cc486867e1d24ca35df5fc064686e631e314d959ad5b"}, + {file = "pydantic_core-2.23.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:04b07490bc2f6f2717b10c3969e1b830f5720b632f8ae2f3b8b1542394c47a8e"}, + {file = "pydantic_core-2.23.3-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:03795b9e8a5d7fda05f3873efc3f59105e2dcff14231680296b87b80bb327295"}, + {file = "pydantic_core-2.23.3-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:c483dab0f14b8d3f0df0c6c18d70b21b086f74c87ab03c59250dbf6d3c89baba"}, + {file = "pydantic_core-2.23.3-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:8b2682038e255e94baf2c473dca914a7460069171ff5cdd4080be18ab8a7fd6e"}, + {file = "pydantic_core-2.23.3-cp38-none-win32.whl", hash = "sha256:f4a57db8966b3a1d1a350012839c6a0099f0898c56512dfade8a1fe5fb278710"}, + {file = "pydantic_core-2.23.3-cp38-none-win_amd64.whl", hash = "sha256:13dd45ba2561603681a2676ca56006d6dee94493f03d5cadc055d2055615c3ea"}, + {file = "pydantic_core-2.23.3-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:82da2f4703894134a9f000e24965df73cc103e31e8c31906cc1ee89fde72cbd8"}, + {file = "pydantic_core-2.23.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:dd9be0a42de08f4b58a3cc73a123f124f65c24698b95a54c1543065baca8cf0e"}, + {file = "pydantic_core-2.23.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:89b731f25c80830c76fdb13705c68fef6a2b6dc494402987c7ea9584fe189f5d"}, + {file = "pydantic_core-2.23.3-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:c6de1ec30c4bb94f3a69c9f5f2182baeda5b809f806676675e9ef6b8dc936f28"}, + {file = "pydantic_core-2.23.3-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:bb68b41c3fa64587412b104294b9cbb027509dc2f6958446c502638d481525ef"}, + {file = "pydantic_core-2.23.3-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1c3980f2843de5184656aab58698011b42763ccba11c4a8c35936c8dd6c7068c"}, + {file = "pydantic_core-2.23.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:94f85614f2cba13f62c3c6481716e4adeae48e1eaa7e8bac379b9d177d93947a"}, + {file = "pydantic_core-2.23.3-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:510b7fb0a86dc8f10a8bb43bd2f97beb63cffad1203071dc434dac26453955cd"}, + {file = "pydantic_core-2.23.3-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:1eba2f7ce3e30ee2170410e2171867ea73dbd692433b81a93758ab2de6c64835"}, + {file = "pydantic_core-2.23.3-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:4b259fd8409ab84b4041b7b3f24dcc41e4696f180b775961ca8142b5b21d0e70"}, + {file = "pydantic_core-2.23.3-cp39-none-win32.whl", hash = "sha256:40d9bd259538dba2f40963286009bf7caf18b5112b19d2b55b09c14dde6db6a7"}, + {file = "pydantic_core-2.23.3-cp39-none-win_amd64.whl", hash = "sha256:5a8cd3074a98ee70173a8633ad3c10e00dcb991ecec57263aacb4095c5efb958"}, + {file = "pydantic_core-2.23.3-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:f399e8657c67313476a121a6944311fab377085ca7f490648c9af97fc732732d"}, + {file = "pydantic_core-2.23.3-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:6b5547d098c76e1694ba85f05b595720d7c60d342f24d5aad32c3049131fa5c4"}, + {file = "pydantic_core-2.23.3-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0dda0290a6f608504882d9f7650975b4651ff91c85673341789a476b1159f211"}, + {file = "pydantic_core-2.23.3-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:65b6e5da855e9c55a0c67f4db8a492bf13d8d3316a59999cfbaf98cc6e401961"}, + {file = "pydantic_core-2.23.3-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:09e926397f392059ce0afdcac920df29d9c833256354d0c55f1584b0b70cf07e"}, + {file = "pydantic_core-2.23.3-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:87cfa0ed6b8c5bd6ae8b66de941cece179281239d482f363814d2b986b79cedc"}, + {file = "pydantic_core-2.23.3-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:e61328920154b6a44d98cabcb709f10e8b74276bc709c9a513a8c37a18786cc4"}, + {file = "pydantic_core-2.23.3-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:ce3317d155628301d649fe5e16a99528d5680af4ec7aa70b90b8dacd2d725c9b"}, + {file = "pydantic_core-2.23.3-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:e89513f014c6be0d17b00a9a7c81b1c426f4eb9224b15433f3d98c1a071f8433"}, + {file = "pydantic_core-2.23.3-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:4f62c1c953d7ee375df5eb2e44ad50ce2f5aff931723b398b8bc6f0ac159791a"}, + {file = "pydantic_core-2.23.3-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2718443bc671c7ac331de4eef9b673063b10af32a0bb385019ad61dcf2cc8f6c"}, + {file = "pydantic_core-2.23.3-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a0d90e08b2727c5d01af1b5ef4121d2f0c99fbee692c762f4d9d0409c9da6541"}, + {file = "pydantic_core-2.23.3-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2b676583fc459c64146debea14ba3af54e540b61762dfc0613dc4e98c3f66eeb"}, + {file = "pydantic_core-2.23.3-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:50e4661f3337977740fdbfbae084ae5693e505ca2b3130a6d4eb0f2281dc43b8"}, + {file = "pydantic_core-2.23.3-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:68f4cf373f0de6abfe599a38307f4417c1c867ca381c03df27c873a9069cda25"}, + {file = "pydantic_core-2.23.3-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:59d52cf01854cb26c46958552a21acb10dd78a52aa34c86f284e66b209db8cab"}, + {file = "pydantic_core-2.23.3.tar.gz", hash = "sha256:3cb0f65d8b4121c1b015c60104a685feb929a29d7cf204387c7f2688c7974690"}, ] [package.dependencies] @@ -628,13 +647,13 @@ testutils = ["gitpython (>3)"] [[package]] name = "pytest" -version = "8.3.2" +version = "8.3.3" description = "pytest: simple powerful testing with Python" optional = false python-versions = ">=3.8" files = [ - {file = "pytest-8.3.2-py3-none-any.whl", hash = "sha256:4ba08f9ae7dcf84ded419494d229b48d0903ea6407b030eaec46df5e6a73bba5"}, - {file = "pytest-8.3.2.tar.gz", hash = "sha256:c132345d12ce551242c87269de812483f5bcc87cdbb4722e48487ba194f9fdce"}, + {file = "pytest-8.3.3-py3-none-any.whl", hash = "sha256:a6853c7375b2663155079443d2e45de913a911a11d669df02a50814944db57b2"}, + {file = "pytest-8.3.3.tar.gz", hash = "sha256:70b98107bd648308a7952b06e6ca9a50bc660be218d53c257cc1fc94fda10181"}, ] [package.dependencies] @@ -668,13 +687,13 @@ testing = ["coverage (>=6.2)", "hypothesis (>=5.7.1)"] [[package]] name = "python-dateutil" -version = "2.9.0.post0" +version = "2.8.2" description = "Extensions to the standard Python datetime module" optional = false python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7" files = [ - {file = "python-dateutil-2.9.0.post0.tar.gz", hash = "sha256:37dd54208da7e1cd875388217d5e00ebd4179249f90fb72437e91a35459a0ad3"}, - {file = "python_dateutil-2.9.0.post0-py2.py3-none-any.whl", hash = "sha256:a8b2bc7bffae282281c8140a97d3aa9c14da0b136dfe83f850eea9a5f7470427"}, + {file = "python-dateutil-2.8.2.tar.gz", hash = "sha256:0123cacc1627ae19ddf3c27a5de5bd67ee4586fbdd6440d9748f8abb483d3e86"}, + {file = "python_dateutil-2.8.2-py2.py3-none-any.whl", hash = "sha256:961d03dc3453ebbc59dbdea9e4e11c5651520a876d0f4db161e8674aae935da9"}, ] [package.dependencies] @@ -750,24 +769,24 @@ files = [ [[package]] name = "tomlkit" -version = "0.13.0" +version = "0.13.2" description = "Style preserving TOML library" optional = false python-versions = ">=3.8" files = [ - {file = "tomlkit-0.13.0-py3-none-any.whl", hash = "sha256:7075d3042d03b80f603482d69bf0c8f345c2b30e41699fd8883227f89972b264"}, - {file = "tomlkit-0.13.0.tar.gz", hash = "sha256:08ad192699734149f5b97b45f1f18dad7eb1b6d16bc72ad0c2335772650d7b72"}, + {file = "tomlkit-0.13.2-py3-none-any.whl", hash = "sha256:7a974427f6e119197f670fbbbeae7bef749a6c14e793db934baefc1b5f03efde"}, + {file = "tomlkit-0.13.2.tar.gz", hash = "sha256:fff5fe59a87295b278abd31bec92c15d9bc4a06885ab12bcea52c71119392e79"}, ] [[package]] name = "types-python-dateutil" -version = "2.9.0.20240316" +version = "2.9.0.20240906" description = "Typing stubs for python-dateutil" optional = false python-versions = ">=3.8" files = [ - {file = "types-python-dateutil-2.9.0.20240316.tar.gz", hash = "sha256:5d2f2e240b86905e40944dd787db6da9263f0deabef1076ddaed797351ec0202"}, - {file = "types_python_dateutil-2.9.0.20240316-py3-none-any.whl", hash = "sha256:6b8cb66d960771ce5ff974e9dd45e38facb81718cc1e208b10b1baccbfdbee3b"}, + {file = "types-python-dateutil-2.9.0.20240906.tar.gz", hash = "sha256:9706c3b68284c25adffc47319ecc7947e5bb86b3773f843c73906fd598bc176e"}, + {file = "types_python_dateutil-2.9.0.20240906-py3-none-any.whl", hash = "sha256:27c8cc2d058ccb14946eebcaaa503088f4f6dbc4fb6093d3d456a49aef2753f6"}, ] [[package]] @@ -798,13 +817,13 @@ typing-extensions = ">=3.7.4" [[package]] name = "urllib3" -version = "2.2.2" +version = "2.2.3" description = "HTTP library with thread-safe connection pooling, file post, and more." optional = true python-versions = ">=3.8" files = [ - {file = "urllib3-2.2.2-py3-none-any.whl", hash = "sha256:a448b2f64d686155468037e1ace9f2d2199776e17f0a46610480d311f73e3472"}, - {file = "urllib3-2.2.2.tar.gz", hash = "sha256:dd505485549a7a552833da5e6063639d0d177c04f23bc3864e41e5dc5f612168"}, + {file = "urllib3-2.2.3-py3-none-any.whl", hash = "sha256:ca899ca043dcb1bafa3e262d73aa25c465bfb49e0bd9dd5d59f1d0acba2f8fac"}, + {file = "urllib3-2.2.3.tar.gz", hash = "sha256:e7d814a81dad81e6caf2ec9fdedb284ecc9c73076b62654547cc64ccdcae26e9"}, ] [package.extras] @@ -819,4 +838,4 @@ gcp = ["google-auth", "requests"] [metadata] lock-version = "2.0" python-versions = "^3.8" -content-hash = "ed93474ac9f1d994cf76bfbd505206701b3c0ace3a2402e9c638f270301401cd" +content-hash = "4b71116df8e3bfdf2d9d75058277f721dc8ac40f7a51b3f4d82b7a3e78a21706" diff --git a/pyproject.toml b/pyproject.toml index 6c630a9b..4200b2ba 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,9 +1,10 @@ [tool.poetry] name = "mistralai" -version = "1.1.1" +version = "1.1.0" description = "Python Client SDK for the Mistral AI API." authors = ["Mistral"] -readme = "README.md" +readme = "README-PYPI.md" +repository = "https://github.com/mistralai/client-python.git" packages = [ { include = "mistralai", from = "src" }, { include = "mistralai_azure", from = "packages/mistralai_azure/src" }, @@ -14,12 +15,16 @@ include = ["py.typed", "src/mistralai/py.typed"] [tool.setuptools.package-data] "*" = ["py.typed", "src/mistralai/py.typed"] +[virtualenvs] +in-project = true + [tool.poetry.dependencies] python = "^3.8" +eval-type-backport = "^0.2.0" httpx = "^0.27.0" jsonpath-python = "^1.0.6" -pydantic = "~2.8.2" -python-dateutil = "^2.9.0.post0" +pydantic = "^2.9.0" +python-dateutil = "2.8.2" typing-inspect = "^0.9.0" google-auth = { version = "2.27.0", optional = true } requests = { version = "^2.32.3", optional = true } @@ -51,3 +56,7 @@ ignore_missing_imports = true [[tool.mypy.overrides]] module = "jsonpath" ignore_missing_imports = true + +[tool.pyright] +venvPath = "." +venv = ".venv" diff --git a/src/mistralai/sdkconfiguration.py b/src/mistralai/sdkconfiguration.py index 3faf7050..0a7c3322 100644 --- a/src/mistralai/sdkconfiguration.py +++ b/src/mistralai/sdkconfiguration.py @@ -28,9 +28,9 @@ class SDKConfiguration: server: Optional[str] = "" language: str = "python" openapi_doc_version: str = "0.0.2" - sdk_version: str = "1.1.1" - gen_version: str = "2.415.6" - user_agent: str = "speakeasy-sdk/python 1.1.1 2.415.6 0.0.2 mistralai" + sdk_version: str = "1.1.0" + gen_version: str = "2.415.8" + user_agent: str = "speakeasy-sdk/python 1.1.0 2.415.8 0.0.2 mistralai" retry_config: OptionalNullable[RetryConfig] = Field(default_factory=lambda: UNSET) timeout_ms: Optional[int] = None From 9f487dc998e948a52e17bcaa9aace5ccfa92571b Mon Sep 17 00:00:00 2001 From: Gaspard Blanchet <26529448+GaspardBT@users.noreply.github.com> Date: Thu, 26 Sep 2024 13:39:13 +0200 Subject: [PATCH 078/223] only gen from main tag (#147) --- .speakeasy/workflow.yaml | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/.speakeasy/workflow.yaml b/.speakeasy/workflow.yaml index 4076ff32..6ef130ae 100644 --- a/.speakeasy/workflow.yaml +++ b/.speakeasy/workflow.yaml @@ -3,19 +3,19 @@ speakeasyVersion: latest sources: mistral-azure-source: inputs: - - location: registry.speakeasyapi.dev/mistral-dev/mistral-dev/mistral-openapi-azure + - location: registry.speakeasyapi.dev/mistral-dev/mistral-dev/mistral-openapi-azure:main registry: - location: registry.speakeasyapi.dev/mistral-dev/mistral-dev/mistral-openapi-azure + location: registry.speakeasyapi.dev/mistral-dev/mistral-dev/mistral-openapi-azure:main mistral-google-cloud-source: inputs: - - location: registry.speakeasyapi.dev/mistral-dev/mistral-dev/mistral-openapi-google-cloud + - location: registry.speakeasyapi.dev/mistral-dev/mistral-dev/mistral-openapi-google-cloud:main registry: - location: registry.speakeasyapi.dev/mistral-dev/mistral-dev/mistral-openapi-google-cloud + location: registry.speakeasyapi.dev/mistral-dev/mistral-dev/mistral-openapi-google-cloud:main mistral-openapi: inputs: - - location: registry.speakeasyapi.dev/mistral-dev/mistral-dev/mistral-openapi + - location: registry.speakeasyapi.dev/mistral-dev/mistral-dev/mistral-openapi:main registry: - location: registry.speakeasyapi.dev/mistral-dev/mistral-dev/mistral-openapi + location: registry.speakeasyapi.dev/mistral-dev/mistral-dev/mistral-openapi:main targets: mistralai-azure-sdk: target: python From cc2a09a458f417ced42cf08b707ad71738064998 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Thu, 7 Nov 2024 22:05:46 +0100 Subject: [PATCH 079/223] =?UTF-8?q?chore:=20=F0=9F=90=9D=20Update=20SDK=20?= =?UTF-8?q?-=20Generate=20MISTRALAI=20MISTRALAI-SDK=201.2.0=20(#153)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * ci: regenerated with OpenAPI Doc , Speakeasy CLI 1.434.3 * push providers * fix assistant message * fix assistant message in providers --------- Co-authored-by: speakeasybot Co-authored-by: gaspardBT --- .speakeasy/gen.lock | 150 +++- .speakeasy/gen.yaml | 8 +- .speakeasy/workflow.lock | 52 +- .speakeasy/workflow.yaml | 12 + README.md | 101 ++- RELEASES.md | 12 +- USAGE.md | 63 +- docs/models/agentscompletionrequest.md | 6 +- .../models/agentscompletionrequestmessages.md | 6 + docs/models/agentscompletionstreamrequest.md | 26 +- .../agentscompletionstreamrequestmessages.md | 6 + docs/models/apiendpoint.md | 11 + docs/models/assistantmessage.md | 12 +- docs/models/assistantmessagecontent.md | 17 + docs/models/basemodelcard.md | 3 +- docs/models/batcherror.md | 9 + docs/models/batchjobin.md | 12 + docs/models/batchjobout.md | 24 + docs/models/batchjoboutobject.md | 8 + docs/models/batchjobsout.md | 10 + docs/models/batchjobsoutobject.md | 8 + docs/models/batchjobstatus.md | 14 + docs/models/chatclassificationrequest.md | 9 + .../models/chatclassificationrequestinputs.md | 19 + docs/models/chatcompletionrequest.md | 32 +- docs/models/chatcompletionstreamrequest.md | 32 +- docs/models/classificationobject.md | 9 + docs/models/classificationrequest.md | 9 + docs/models/classificationrequestinputs.md | 19 + docs/models/classificationresponse.md | 10 + docs/models/content.md | 4 +- docs/models/deltamessage.md | 10 +- docs/models/embeddingrequest.md | 10 +- ...tesuploadfilepurpose.md => filepurpose.md} | 5 +- .../filesapiroutesdownloadfilerequest.md | 8 + docs/models/filesapirouteslistfilesrequest.md | 13 + ...sapiroutesuploadfilemultipartbodyparams.md | 2 +- docs/models/fileschema.md | 22 +- docs/models/fileschemapurpose.md | 10 - docs/models/fimcompletionrequest.md | 24 +- docs/models/fimcompletionstreamrequest.md | 24 +- docs/models/ftmodelcard.md | 3 +- docs/models/ftmodelcardtype.md | 8 + docs/models/httpvalidationerror.md | 2 - ...jobsapiroutesbatchcancelbatchjobrequest.md | 8 + .../jobsapiroutesbatchgetbatchjobrequest.md | 8 + .../jobsapiroutesbatchgetbatchjobsrequest.md | 14 + docs/models/listfilesout.md | 3 +- docs/models/one.md | 29 + docs/models/purpose.md | 10 - docs/models/retrievefileout.md | 23 +- docs/models/retrievefileoutpurpose.md | 10 - docs/models/sampletype.md | 11 +- docs/models/source.md | 3 +- docs/models/systemmessage.md | 8 +- docs/models/systemmessagecontent.md | 17 + docs/models/trainingparameters.md | 3 +- docs/models/trainingparametersin.md | 3 +- docs/models/two.md | 29 + docs/models/type.md | 6 +- docs/models/uploadfileout.md | 22 +- docs/models/usermessage.md | 8 +- docs/models/wandbintegrationout.md | 12 +- docs/models/wandbintegrationouttype.md | 8 + docs/sdks/agents/README.md | 53 +- docs/sdks/batch/README.md | 2 + docs/sdks/chat/README.md | 87 ++- docs/sdks/classifiers/README.md | 101 +++ docs/sdks/embeddings/README.md | 21 +- docs/sdks/files/README.md | 66 +- docs/sdks/fim/README.md | 61 +- docs/sdks/jobs/README.md | 24 +- docs/sdks/mistraljobs/README.md | 179 +++++ docs/sdks/models/README.md | 29 +- examples/function_calling.py | 29 +- packages/mistralai_azure/.speakeasy/gen.lock | 33 +- packages/mistralai_azure/.speakeasy/gen.yaml | 6 +- .../docs/models/assistantmessage.md | 12 +- .../docs/models/assistantmessagecontent.md | 17 + .../docs/models/chatcompletionrequest.md | 32 +- .../models/chatcompletionstreamrequest.md | 32 +- .../mistralai_azure/docs/models/content.md | 4 +- .../docs/models/deltamessage.md | 10 +- .../docs/models/httpvalidationerror.md | 2 - .../docs/models/systemmessage.md | 8 +- .../docs/models/systemmessagecontent.md | 17 + .../docs/models/usermessage.md | 8 +- packages/mistralai_azure/pylintrc | 1 + packages/mistralai_azure/pyproject.toml | 4 +- packages/mistralai_azure/scripts/compile.sh | 85 -- .../src/mistralai_azure/__init__.py | 4 + .../src/mistralai_azure/_version.py | 12 + .../src/mistralai_azure/chat.py | 94 ++- .../src/mistralai_azure/models/__init__.py | 12 +- .../models/assistantmessage.py | 17 +- .../models/chatcompletionchoice.py | 15 +- .../models/chatcompletionrequest.py | 45 +- .../models/chatcompletionresponse.py | 4 +- .../models/chatcompletionstreamrequest.py | 45 +- .../mistralai_azure/models/completionchunk.py | 4 +- .../mistralai_azure/models/completionevent.py | 2 +- .../models/completionresponsestreamchoice.py | 13 +- .../mistralai_azure/models/deltamessage.py | 21 +- .../src/mistralai_azure/models/function.py | 4 +- .../mistralai_azure/models/functioncall.py | 3 +- .../mistralai_azure/models/functionname.py | 2 +- .../models/httpvalidationerror.py | 2 - .../mistralai_azure/models/responseformat.py | 4 +- .../src/mistralai_azure/models/security.py | 3 +- .../mistralai_azure/models/systemmessage.py | 12 +- .../src/mistralai_azure/models/textchunk.py | 14 +- .../src/mistralai_azure/models/tool.py | 4 +- .../src/mistralai_azure/models/toolcall.py | 4 +- .../src/mistralai_azure/models/toolchoice.py | 4 +- .../src/mistralai_azure/models/toolmessage.py | 4 +- .../src/mistralai_azure/models/usageinfo.py | 2 +- .../src/mistralai_azure/models/usermessage.py | 41 +- .../mistralai_azure/models/validationerror.py | 3 +- .../src/mistralai_azure/sdkconfiguration.py | 14 +- .../src/mistralai_azure/utils/__init__.py | 8 + .../src/mistralai_azure/utils/annotations.py | 15 +- .../src/mistralai_azure/utils/serializers.py | 25 + packages/mistralai_gcp/.speakeasy/gen.lock | 33 +- packages/mistralai_gcp/.speakeasy/gen.yaml | 6 +- .../docs/models/assistantmessage.md | 12 +- .../docs/models/assistantmessagecontent.md | 17 + .../docs/models/chatcompletionrequest.md | 30 +- .../models/chatcompletionstreamrequest.md | 30 +- packages/mistralai_gcp/docs/models/content.md | 4 +- .../mistralai_gcp/docs/models/deltamessage.md | 10 +- .../docs/models/fimcompletionrequest.md | 24 +- .../docs/models/fimcompletionstreamrequest.md | 24 +- .../docs/models/httpvalidationerror.md | 2 - .../docs/models/systemmessage.md | 8 +- .../docs/models/systemmessagecontent.md | 17 + .../mistralai_gcp/docs/models/usermessage.md | 8 +- packages/mistralai_gcp/pylintrc | 1 + packages/mistralai_gcp/pyproject.toml | 4 +- packages/mistralai_gcp/scripts/compile.sh | 85 -- .../src/mistralai_gcp/__init__.py | 4 + .../src/mistralai_gcp/_version.py | 12 + .../mistralai_gcp/src/mistralai_gcp/chat.py | 94 ++- .../mistralai_gcp/src/mistralai_gcp/fim.py | 70 +- .../src/mistralai_gcp/models/__init__.py | 12 +- .../mistralai_gcp/models/assistantmessage.py | 17 +- .../models/chatcompletionchoice.py | 15 +- .../models/chatcompletionrequest.py | 45 +- .../models/chatcompletionresponse.py | 4 +- .../models/chatcompletionstreamrequest.py | 45 +- .../mistralai_gcp/models/completionchunk.py | 4 +- .../mistralai_gcp/models/completionevent.py | 2 +- .../models/completionresponsestreamchoice.py | 13 +- .../src/mistralai_gcp/models/deltamessage.py | 21 +- .../models/fimcompletionrequest.py | 33 +- .../models/fimcompletionresponse.py | 4 +- .../models/fimcompletionstreamrequest.py | 33 +- .../src/mistralai_gcp/models/function.py | 4 +- .../src/mistralai_gcp/models/functioncall.py | 3 +- .../src/mistralai_gcp/models/functionname.py | 2 +- .../models/httpvalidationerror.py | 2 - .../mistralai_gcp/models/responseformat.py | 4 +- .../src/mistralai_gcp/models/security.py | 3 +- .../src/mistralai_gcp/models/systemmessage.py | 12 +- .../src/mistralai_gcp/models/textchunk.py | 14 +- .../src/mistralai_gcp/models/tool.py | 4 +- .../src/mistralai_gcp/models/toolcall.py | 4 +- .../src/mistralai_gcp/models/toolchoice.py | 4 +- .../src/mistralai_gcp/models/toolmessage.py | 4 +- .../src/mistralai_gcp/models/usageinfo.py | 2 +- .../src/mistralai_gcp/models/usermessage.py | 41 +- .../mistralai_gcp/models/validationerror.py | 3 +- .../src/mistralai_gcp/sdkconfiguration.py | 14 +- .../src/mistralai_gcp/utils/__init__.py | 8 + .../src/mistralai_gcp/utils/annotations.py | 15 +- .../src/mistralai_gcp/utils/serializers.py | 25 + pylintrc | 1 + pyproject.toml | 2 +- scripts/compile.sh | 85 -- src/mistralai/__init__.py | 4 + src/mistralai/_version.py | 12 + src/mistralai/agents.py | 78 +- src/mistralai/batch.py | 17 + src/mistralai/chat.py | 94 ++- src/mistralai/classifiers.py | 396 ++++++++++ src/mistralai/embeddings.py | 16 +- src/mistralai/files.py | 271 ++++++- src/mistralai/fim.py | 70 +- src/mistralai/jobs.py | 60 +- src/mistralai/mistral_jobs.py | 733 ++++++++++++++++++ src/mistralai/models/__init__.py | 126 ++- .../models/agentscompletionrequest.py | 37 +- .../models/agentscompletionstreamrequest.py | 37 +- src/mistralai/models/apiendpoint.py | 9 + src/mistralai/models/archiveftmodelout.py | 16 +- src/mistralai/models/assistantmessage.py | 17 +- src/mistralai/models/basemodelcard.py | 28 +- src/mistralai/models/batcherror.py | 17 + src/mistralai/models/batchjobin.py | 58 ++ src/mistralai/models/batchjobout.py | 117 +++ src/mistralai/models/batchjobsout.py | 30 + src/mistralai/models/batchjobstatus.py | 15 + .../models/chatclassificationrequest.py | 104 +++ src/mistralai/models/chatcompletionchoice.py | 13 +- src/mistralai/models/chatcompletionrequest.py | 45 +- .../models/chatcompletionresponse.py | 4 +- .../models/chatcompletionstreamrequest.py | 45 +- src/mistralai/models/checkpointout.py | 2 +- src/mistralai/models/classificationobject.py | 21 + src/mistralai/models/classificationrequest.py | 59 ++ .../models/classificationresponse.py | 21 + src/mistralai/models/completionchunk.py | 4 +- src/mistralai/models/completionevent.py | 2 +- .../models/completionresponsestreamchoice.py | 16 +- ...elete_model_v1_models_model_id_deleteop.py | 3 +- src/mistralai/models/deletefileout.py | 2 +- src/mistralai/models/deletemodelout.py | 4 +- src/mistralai/models/deltamessage.py | 21 +- src/mistralai/models/detailedjobout.py | 16 +- src/mistralai/models/embeddingrequest.py | 10 +- src/mistralai/models/embeddingresponse.py | 3 +- src/mistralai/models/embeddingresponsedata.py | 4 +- src/mistralai/models/eventout.py | 4 +- src/mistralai/models/filepurpose.py | 8 + .../models/files_api_routes_delete_fileop.py | 3 +- .../files_api_routes_download_fileop.py | 16 + .../models/files_api_routes_list_filesop.py | 96 +++ .../files_api_routes_retrieve_fileop.py | 3 +- .../models/files_api_routes_upload_fileop.py | 18 +- src/mistralai/models/fileschema.py | 28 +- src/mistralai/models/fimcompletionrequest.py | 33 +- src/mistralai/models/fimcompletionresponse.py | 4 +- .../models/fimcompletionstreamrequest.py | 33 +- .../models/ftmodelcapabilitiesout.py | 4 +- src/mistralai/models/ftmodelcard.py | 30 +- src/mistralai/models/ftmodelout.py | 14 +- src/mistralai/models/function.py | 4 +- src/mistralai/models/functioncall.py | 3 +- src/mistralai/models/functionname.py | 2 +- src/mistralai/models/githubrepositoryin.py | 16 +- src/mistralai/models/githubrepositoryout.py | 16 +- src/mistralai/models/httpvalidationerror.py | 2 - src/mistralai/models/imageurl.py | 3 +- src/mistralai/models/imageurlchunk.py | 16 +- src/mistralai/models/jobin.py | 4 +- src/mistralai/models/jobmetadataout.py | 3 +- src/mistralai/models/jobout.py | 15 +- ...obs_api_routes_batch_cancel_batch_jobop.py | 16 + .../jobs_api_routes_batch_get_batch_jobop.py | 16 + .../jobs_api_routes_batch_get_batch_jobsop.py | 95 +++ ..._fine_tuning_archive_fine_tuned_modelop.py | 3 +- ...es_fine_tuning_cancel_fine_tuning_jobop.py | 3 +- ...outes_fine_tuning_get_fine_tuning_jobop.py | 3 +- ...utes_fine_tuning_get_fine_tuning_jobsop.py | 4 +- ...tes_fine_tuning_start_fine_tuning_jobop.py | 3 +- ...ine_tuning_unarchive_fine_tuned_modelop.py | 3 +- ...s_fine_tuning_update_fine_tuned_modelop.py | 3 +- src/mistralai/models/jobsout.py | 14 +- src/mistralai/models/legacyjobmetadataout.py | 17 +- src/mistralai/models/listfilesout.py | 6 +- src/mistralai/models/metricout.py | 3 +- src/mistralai/models/modelcapabilities.py | 4 +- src/mistralai/models/modellist.py | 4 +- src/mistralai/models/responseformat.py | 4 +- ...retrieve_model_v1_models_model_id_getop.py | 4 +- src/mistralai/models/retrievefileout.py | 31 +- src/mistralai/models/sampletype.py | 8 +- src/mistralai/models/security.py | 4 +- src/mistralai/models/source.py | 5 +- src/mistralai/models/systemmessage.py | 12 +- src/mistralai/models/textchunk.py | 14 +- src/mistralai/models/tool.py | 4 +- src/mistralai/models/toolcall.py | 4 +- src/mistralai/models/toolchoice.py | 4 +- src/mistralai/models/toolmessage.py | 4 +- src/mistralai/models/trainingfile.py | 4 +- src/mistralai/models/trainingparameters.py | 9 +- src/mistralai/models/trainingparametersin.py | 9 +- src/mistralai/models/unarchiveftmodelout.py | 16 +- src/mistralai/models/updateftmodelin.py | 3 +- src/mistralai/models/uploadfileout.py | 28 +- src/mistralai/models/usageinfo.py | 2 +- src/mistralai/models/usermessage.py | 41 +- src/mistralai/models/validationerror.py | 3 +- src/mistralai/models/wandbintegration.py | 16 +- src/mistralai/models/wandbintegrationout.py | 18 +- src/mistralai/models_.py | 72 +- src/mistralai/sdk.py | 7 + src/mistralai/sdkconfiguration.py | 14 +- src/mistralai/utils/__init__.py | 8 + src/mistralai/utils/annotations.py | 15 +- src/mistralai/utils/serializers.py | 25 + 291 files changed, 5416 insertions(+), 1671 deletions(-) create mode 100644 docs/models/apiendpoint.md create mode 100644 docs/models/assistantmessagecontent.md create mode 100644 docs/models/batcherror.md create mode 100644 docs/models/batchjobin.md create mode 100644 docs/models/batchjobout.md create mode 100644 docs/models/batchjoboutobject.md create mode 100644 docs/models/batchjobsout.md create mode 100644 docs/models/batchjobsoutobject.md create mode 100644 docs/models/batchjobstatus.md create mode 100644 docs/models/chatclassificationrequest.md create mode 100644 docs/models/chatclassificationrequestinputs.md create mode 100644 docs/models/classificationobject.md create mode 100644 docs/models/classificationrequest.md create mode 100644 docs/models/classificationrequestinputs.md create mode 100644 docs/models/classificationresponse.md rename docs/models/{filesapiroutesuploadfilepurpose.md => filepurpose.md} (50%) create mode 100644 docs/models/filesapiroutesdownloadfilerequest.md create mode 100644 docs/models/filesapirouteslistfilesrequest.md delete mode 100644 docs/models/fileschemapurpose.md create mode 100644 docs/models/ftmodelcardtype.md create mode 100644 docs/models/jobsapiroutesbatchcancelbatchjobrequest.md create mode 100644 docs/models/jobsapiroutesbatchgetbatchjobrequest.md create mode 100644 docs/models/jobsapiroutesbatchgetbatchjobsrequest.md create mode 100644 docs/models/one.md delete mode 100644 docs/models/purpose.md delete mode 100644 docs/models/retrievefileoutpurpose.md create mode 100644 docs/models/systemmessagecontent.md create mode 100644 docs/models/two.md create mode 100644 docs/models/wandbintegrationouttype.md create mode 100644 docs/sdks/batch/README.md create mode 100644 docs/sdks/classifiers/README.md create mode 100644 docs/sdks/mistraljobs/README.md create mode 100644 packages/mistralai_azure/docs/models/assistantmessagecontent.md create mode 100644 packages/mistralai_azure/docs/models/systemmessagecontent.md delete mode 100755 packages/mistralai_azure/scripts/compile.sh create mode 100644 packages/mistralai_azure/src/mistralai_azure/_version.py create mode 100644 packages/mistralai_gcp/docs/models/assistantmessagecontent.md create mode 100644 packages/mistralai_gcp/docs/models/systemmessagecontent.md delete mode 100755 packages/mistralai_gcp/scripts/compile.sh create mode 100644 packages/mistralai_gcp/src/mistralai_gcp/_version.py delete mode 100755 scripts/compile.sh create mode 100644 src/mistralai/_version.py create mode 100644 src/mistralai/batch.py create mode 100644 src/mistralai/classifiers.py create mode 100644 src/mistralai/mistral_jobs.py create mode 100644 src/mistralai/models/apiendpoint.py create mode 100644 src/mistralai/models/batcherror.py create mode 100644 src/mistralai/models/batchjobin.py create mode 100644 src/mistralai/models/batchjobout.py create mode 100644 src/mistralai/models/batchjobsout.py create mode 100644 src/mistralai/models/batchjobstatus.py create mode 100644 src/mistralai/models/chatclassificationrequest.py create mode 100644 src/mistralai/models/classificationobject.py create mode 100644 src/mistralai/models/classificationrequest.py create mode 100644 src/mistralai/models/classificationresponse.py create mode 100644 src/mistralai/models/filepurpose.py create mode 100644 src/mistralai/models/files_api_routes_download_fileop.py create mode 100644 src/mistralai/models/files_api_routes_list_filesop.py create mode 100644 src/mistralai/models/jobs_api_routes_batch_cancel_batch_jobop.py create mode 100644 src/mistralai/models/jobs_api_routes_batch_get_batch_jobop.py create mode 100644 src/mistralai/models/jobs_api_routes_batch_get_batch_jobsop.py diff --git a/.speakeasy/gen.lock b/.speakeasy/gen.lock index b23077ed..513be40d 100644 --- a/.speakeasy/gen.lock +++ b/.speakeasy/gen.lock @@ -1,44 +1,48 @@ lockVersion: 2.0.0 id: 2d045ec7-2ebb-4f4d-ad25-40953b132161 management: - docChecksum: e75ca54601920b2770d9a559b299d272 + docChecksum: ee15d853ecc28d415d6b33191893a6ff docVersion: 0.0.2 - speakeasyVersion: 1.398.0 - generationVersion: 2.415.8 - releaseVersion: 1.1.0 - configChecksum: 49094e0f156d020bd164f8b4bd41e97b + speakeasyVersion: 1.434.4 + generationVersion: 2.452.0 + releaseVersion: 1.2.0 + configChecksum: 17ae764aa509274d1cf2d75af5bf6abb repoURL: https://github.com/mistralai/client-python.git installationURL: https://github.com/mistralai/client-python.git published: true features: python: additionalDependencies: 1.0.0 - constsAndDefaults: 1.0.2 - core: 5.5.3 + constsAndDefaults: 1.0.4 + core: 5.6.4 defaultEnabledRetries: 0.2.0 + downloadStreams: 1.0.1 enumUnions: 0.1.0 - envVarSecurityUsage: 0.3.1 + envVarSecurityUsage: 0.3.2 examples: 3.0.0 flatRequests: 1.0.1 - flattening: 3.0.0 + flattening: 3.1.0 globalSecurity: 3.0.2 globalSecurityCallbacks: 1.0.0 globalSecurityFlattening: 1.0.0 globalServerURLs: 3.0.0 + methodArguments: 1.0.2 multipartFileContentType: 1.0.0 nameOverrides: 3.0.0 nullables: 1.0.0 openEnums: 1.0.0 - responseFormat: 1.0.0 + responseFormat: 1.0.1 retries: 3.0.2 sdkHooks: 1.0.0 - serverEvents: 1.0.2 + serverEvents: 1.0.4 serverEventsSentinels: 0.1.0 serverIDs: 3.0.0 - unions: 3.0.2 + tests: 1.6.0 + unions: 3.0.3 uploadStreams: 1.0.0 generatedFiles: - .gitattributes + - .python-version - .vscode/settings.json - USAGE.md - docs/models/agentscompletionrequest.md @@ -49,12 +53,23 @@ generatedFiles: - docs/models/agentscompletionstreamrequestmessages.md - docs/models/agentscompletionstreamrequeststop.md - docs/models/agentscompletionstreamrequesttoolchoice.md + - docs/models/apiendpoint.md - docs/models/archiveftmodelout.md - docs/models/archiveftmodeloutobject.md - docs/models/arguments.md - docs/models/assistantmessage.md + - docs/models/assistantmessagecontent.md - docs/models/assistantmessagerole.md - docs/models/basemodelcard.md + - docs/models/batcherror.md + - docs/models/batchjobin.md + - docs/models/batchjobout.md + - docs/models/batchjoboutobject.md + - docs/models/batchjobsout.md + - docs/models/batchjobsoutobject.md + - docs/models/batchjobstatus.md + - docs/models/chatclassificationrequest.md + - docs/models/chatclassificationrequestinputs.md - docs/models/chatcompletionchoice.md - docs/models/chatcompletionrequest.md - docs/models/chatcompletionrequesttoolchoice.md @@ -64,6 +79,10 @@ generatedFiles: - docs/models/chatcompletionstreamrequeststop.md - docs/models/chatcompletionstreamrequesttoolchoice.md - docs/models/checkpointout.md + - docs/models/classificationobject.md + - docs/models/classificationrequest.md + - docs/models/classificationrequestinputs.md + - docs/models/classificationresponse.md - docs/models/completionchunk.md - docs/models/completionevent.md - docs/models/completionresponsestreamchoice.md @@ -85,12 +104,13 @@ generatedFiles: - docs/models/embeddingresponsedata.md - docs/models/eventout.md - docs/models/file.md + - docs/models/filepurpose.md - docs/models/filesapiroutesdeletefilerequest.md + - docs/models/filesapiroutesdownloadfilerequest.md + - docs/models/filesapirouteslistfilesrequest.md - docs/models/filesapiroutesretrievefilerequest.md - docs/models/filesapiroutesuploadfilemultipartbodyparams.md - - docs/models/filesapiroutesuploadfilepurpose.md - docs/models/fileschema.md - - docs/models/fileschemapurpose.md - docs/models/fimcompletionrequest.md - docs/models/fimcompletionrequeststop.md - docs/models/fimcompletionresponse.md @@ -100,6 +120,7 @@ generatedFiles: - docs/models/finishreason.md - docs/models/ftmodelcapabilitiesout.md - docs/models/ftmodelcard.md + - docs/models/ftmodelcardtype.md - docs/models/ftmodelout.md - docs/models/ftmodeloutobject.md - docs/models/function.md @@ -121,6 +142,9 @@ generatedFiles: - docs/models/jobinrepositories.md - docs/models/jobmetadataout.md - docs/models/jobout.md + - docs/models/jobsapiroutesbatchcancelbatchjobrequest.md + - docs/models/jobsapiroutesbatchgetbatchjobrequest.md + - docs/models/jobsapiroutesbatchgetbatchjobsrequest.md - docs/models/jobsapiroutesfinetuningarchivefinetunedmodelrequest.md - docs/models/jobsapiroutesfinetuningcancelfinetuningjobrequest.md - docs/models/jobsapiroutesfinetuningcreatefinetuningjobresponse.md @@ -140,13 +164,12 @@ generatedFiles: - docs/models/modelcapabilities.md - docs/models/modellist.md - docs/models/object.md - - docs/models/purpose.md + - docs/models/one.md - docs/models/queryparamstatus.md - docs/models/repositories.md - docs/models/responseformat.md - docs/models/responseformats.md - docs/models/retrievefileout.md - - docs/models/retrievefileoutpurpose.md - docs/models/retrievemodelv1modelsmodelidgetrequest.md - docs/models/retrievemodelv1modelsmodelidgetresponseretrievemodelv1modelsmodelidget.md - docs/models/role.md @@ -156,6 +179,7 @@ generatedFiles: - docs/models/status.md - docs/models/stop.md - docs/models/systemmessage.md + - docs/models/systemmessagecontent.md - docs/models/textchunk.md - docs/models/textchunktype.md - docs/models/tool.md @@ -168,6 +192,7 @@ generatedFiles: - docs/models/trainingfile.md - docs/models/trainingparameters.md - docs/models/trainingparametersin.md + - docs/models/two.md - docs/models/type.md - docs/models/unarchiveftmodelout.md - docs/models/unarchiveftmodeloutobject.md @@ -181,47 +206,63 @@ generatedFiles: - docs/models/validationerror.md - docs/models/wandbintegration.md - docs/models/wandbintegrationout.md + - docs/models/wandbintegrationouttype.md - docs/models/wandbintegrationtype.md - docs/sdks/agents/README.md + - docs/sdks/batch/README.md - docs/sdks/chat/README.md + - docs/sdks/classifiers/README.md - docs/sdks/embeddings/README.md - docs/sdks/files/README.md - docs/sdks/fim/README.md - docs/sdks/finetuning/README.md - docs/sdks/jobs/README.md - docs/sdks/mistral/README.md + - docs/sdks/mistraljobs/README.md - docs/sdks/models/README.md - poetry.toml - py.typed - pylintrc - - pyproject.toml - - scripts/compile.sh - scripts/prepare-readme.py - scripts/publish.sh - src/mistralai/__init__.py - src/mistralai/_hooks/__init__.py - src/mistralai/_hooks/sdkhooks.py - src/mistralai/_hooks/types.py + - src/mistralai/_version.py - src/mistralai/agents.py - src/mistralai/basesdk.py + - src/mistralai/batch.py - src/mistralai/chat.py + - src/mistralai/classifiers.py - src/mistralai/embeddings.py - src/mistralai/files.py - src/mistralai/fim.py - src/mistralai/fine_tuning.py - src/mistralai/httpclient.py - src/mistralai/jobs.py + - src/mistralai/mistral_jobs.py - src/mistralai/models/__init__.py - src/mistralai/models/agentscompletionrequest.py - src/mistralai/models/agentscompletionstreamrequest.py + - src/mistralai/models/apiendpoint.py - src/mistralai/models/archiveftmodelout.py - src/mistralai/models/assistantmessage.py - src/mistralai/models/basemodelcard.py + - src/mistralai/models/batcherror.py + - src/mistralai/models/batchjobin.py + - src/mistralai/models/batchjobout.py + - src/mistralai/models/batchjobsout.py + - src/mistralai/models/batchjobstatus.py + - src/mistralai/models/chatclassificationrequest.py - src/mistralai/models/chatcompletionchoice.py - src/mistralai/models/chatcompletionrequest.py - src/mistralai/models/chatcompletionresponse.py - src/mistralai/models/chatcompletionstreamrequest.py - src/mistralai/models/checkpointout.py + - src/mistralai/models/classificationobject.py + - src/mistralai/models/classificationrequest.py + - src/mistralai/models/classificationresponse.py - src/mistralai/models/completionchunk.py - src/mistralai/models/completionevent.py - src/mistralai/models/completionresponsestreamchoice.py @@ -235,7 +276,10 @@ generatedFiles: - src/mistralai/models/embeddingresponse.py - src/mistralai/models/embeddingresponsedata.py - src/mistralai/models/eventout.py + - src/mistralai/models/filepurpose.py - src/mistralai/models/files_api_routes_delete_fileop.py + - src/mistralai/models/files_api_routes_download_fileop.py + - src/mistralai/models/files_api_routes_list_filesop.py - src/mistralai/models/files_api_routes_retrieve_fileop.py - src/mistralai/models/files_api_routes_upload_fileop.py - src/mistralai/models/fileschema.py @@ -257,6 +301,9 @@ generatedFiles: - src/mistralai/models/jobin.py - src/mistralai/models/jobmetadataout.py - src/mistralai/models/jobout.py + - src/mistralai/models/jobs_api_routes_batch_cancel_batch_jobop.py + - src/mistralai/models/jobs_api_routes_batch_get_batch_jobop.py + - src/mistralai/models/jobs_api_routes_batch_get_batch_jobsop.py - src/mistralai/models/jobs_api_routes_fine_tuning_archive_fine_tuned_modelop.py - src/mistralai/models/jobs_api_routes_fine_tuning_cancel_fine_tuning_jobop.py - src/mistralai/models/jobs_api_routes_fine_tuning_create_fine_tuning_jobop.py @@ -370,12 +417,12 @@ examples: multipart/form-data: {"file": {}} responses: "200": - application/json: {"id": "497f6eca-6276-4993-bfeb-53cbbbba6f09", "object": "file", "bytes": 13000, "created_at": 1716963433, "filename": "files_upload.jsonl", "sample_type": "pretrain", "source": "upload"} + application/json: {"id": "497f6eca-6276-4993-bfeb-53cbbbba6f09", "object": "file", "bytes": 13000, "created_at": 1716963433, "filename": "files_upload.jsonl", "purpose": "fine-tune", "sample_type": "pretrain", "source": "upload"} files_api_routes_list_files: speakeasy-default-files-api-routes-list-files: responses: "200": - application/json: {"data": [], "object": ""} + application/json: {"data": [], "object": "", "total": 768578} files_api_routes_retrieve_file: speakeasy-default-files-api-routes-retrieve-file: parameters: @@ -383,7 +430,7 @@ examples: file_id: "" responses: "200": - application/json: {"id": "497f6eca-6276-4993-bfeb-53cbbbba6f09", "object": "file", "bytes": 13000, "created_at": 1716963433, "filename": "files_upload.jsonl", "sample_type": "pretrain", "source": "repository"} + application/json: {"id": "497f6eca-6276-4993-bfeb-53cbbbba6f09", "object": "file", "bytes": 13000, "created_at": 1716963433, "filename": "files_upload.jsonl", "purpose": "fine-tune", "sample_type": "pretrain", "source": "repository", "deleted": false} files_api_routes_delete_file: speakeasy-default-files-api-routes-delete-file: parameters: @@ -403,7 +450,7 @@ examples: application/json: {"model": "codestral-latest"} responses: "200": - application/json: {"id": "7ad642c1-fc6f-4e07-a41b-cdd89dc7fa50", "auto_start": true, "model": "open-mistral-nemo", "status": "QUEUED", "job_type": "", "created_at": 519028, "modified_at": 230313, "training_files": []} + application/json: {"id": "a621cf02-1cd9-4cf5-8403-315211a509a3", "auto_start": false, "model": "open-mistral-7b", "status": "FAILED", "job_type": "", "created_at": 550483, "modified_at": 906537, "training_files": ["74c2becc-3769-4177-b5e0-24985613de0e"]} jobs_api_routes_fine_tuning_get_fine_tuning_job: speakeasy-default-jobs-api-routes-fine-tuning-get-fine-tuning-job: parameters: @@ -431,7 +478,7 @@ examples: chat_completion_v1_chat_completions_post: speakeasy-default-chat-completion-v1-chat-completions-post: requestBody: - application/json: {"model": "mistral-small-latest", "messages": [{"content": ""}]} + application/json: {"model": "mistral-small-latest", "messages": [{"content": "Who is the best French painter? Answer in one short sentence.", "role": "user"}]} responses: "200": application/json: {"id": "cmpl-e5cc70bb28c444948073e77776eb30ef", "object": "chat.completion", "model": "mistral-small-latest", "usage": {"prompt_tokens": 16, "completion_tokens": 34, "total_tokens": 50}, "created": 1702256327, "choices": []} @@ -439,7 +486,7 @@ examples: stream_chat: speakeasy-default-stream-chat: requestBody: - application/json: {"model": "mistral-small-latest", "messages": [{"content": []}]} + application/json: {"model": "mistral-small-latest", "messages": [{"content": "Who is the best French painter? Answer in one short sentence.", "role": "user"}]} fim_completion_v1_fim_completions_post: speakeasy-default-fim-completion-v1-fim-completions-post: requestBody: @@ -455,7 +502,7 @@ examples: agents_completion_v1_agents_completions_post: speakeasy-default-agents-completion-v1-agents-completions-post: requestBody: - application/json: {"messages": [{"content": ""}], "agent_id": ""} + application/json: {"messages": [{"content": "Who is the best French painter? Answer in one short sentence.", "role": "user"}], "agent_id": ""} responses: "200": application/json: {"id": "cmpl-e5cc70bb28c444948073e77776eb30ef", "object": "chat.completion", "model": "mistral-small-latest", "usage": {"prompt_tokens": 16, "completion_tokens": 34, "total_tokens": 50}, "created": 1702256327, "choices": []} @@ -463,12 +510,61 @@ examples: stream_agents: speakeasy-default-stream-agents: requestBody: - application/json: {"messages": [{"content": []}], "agent_id": ""} + application/json: {"messages": [{"content": "Who is the best French painter? Answer in one short sentence.", "role": "user"}], "agent_id": ""} embeddings_v1_embeddings_post: speakeasy-default-embeddings-v1-embeddings-post: requestBody: - application/json: {"input": "", "model": "Wrangler"} + application/json: {"input": ["Embed this sentence.", "As well as this one."], "model": "Wrangler"} responses: "200": application/json: {"id": "cmpl-e5cc70bb28c444948073e77776eb30ef", "object": "chat.completion", "model": "mistral-small-latest", "usage": {"prompt_tokens": 16, "completion_tokens": 34, "total_tokens": 50}, "data": [{"object": "embedding", "embedding": [0.1, 0.2, 0.3], "index": 0}]} "422": {} + files_api_routes_download_file: + speakeasy-default-files-api-routes-download-file: + parameters: + path: + file_id: "" + jobs_api_routes_batch_get_batch_jobs: + speakeasy-default-jobs-api-routes-batch-get-batch-jobs: + responses: + "200": + application/json: {"total": 768578} + jobs_api_routes_batch_create_batch_job: + speakeasy-default-jobs-api-routes-batch-create-batch-job: + requestBody: + application/json: {"input_files": ["a621cf02-1cd9-4cf5-8403-315211a509a3"], "endpoint": "/v1/fim/completions", "model": "2"} + responses: + "200": + application/json: {"id": "", "input_files": ["8e774c2b-ecc3-4769-b177-5e024985613d", "0ee803d5-6a1d-4f94-836b-fd39494798bc"], "endpoint": "", "model": "Impala", "errors": [{"message": ""}, {"message": ""}, {"message": ""}], "status": "RUNNING", "created_at": 770370, "total_requests": 350586, "completed_requests": 95214, "succeeded_requests": 930830, "failed_requests": 617761} + jobs_api_routes_batch_get_batch_job: + speakeasy-default-jobs-api-routes-batch-get-batch-job: + parameters: + path: + job_id: "b888f774-3e7c-4135-a18c-6b985523c4bc" + responses: + "200": + application/json: {"id": "", "input_files": ["50f76228-1da8-44bc-b661-c8a99c6b71b6", "cd62b8f7-112a-4af0-bab4-e43b4cca3716", "620807aa-1f8c-4f05-ad89-d58ee381f6b4"], "endpoint": "", "model": "Golf", "errors": [{"message": ""}, {"message": ""}], "status": "SUCCESS", "created_at": 790898, "total_requests": 55097, "completed_requests": 578320, "succeeded_requests": 856562, "failed_requests": 328633} + jobs_api_routes_batch_cancel_batch_job: + speakeasy-default-jobs-api-routes-batch-cancel-batch-job: + parameters: + path: + job_id: "0f713502-9233-41c6-9ebd-c570b7edb496" + responses: + "200": + application/json: {"id": "", "input_files": ["50fbe4e3-e326-4135-8744-d82f3fd6b3c1", "eb45e247-ac10-4cdc-8311-2f7cc9241230", "4afaa0f8-4bd4-4945-9116-89d07a64aa72"], "endpoint": "", "model": "Alpine", "errors": [{"message": ""}, {"message": ""}], "status": "QUEUED", "created_at": 709109, "total_requests": 275794, "completed_requests": 158938, "succeeded_requests": 12381, "failed_requests": 11864} + moderations_v1_moderations_post: + speakeasy-default-moderations-v1-moderations-post: + requestBody: + application/json: {"input": [""]} + responses: + "200": + application/json: {"id": "mod-e5cc70bb28c444948073e77776eb30ef"} + "422": {} + moderations_chat_v1_chat_moderations_post: + speakeasy-default-moderations-chat-v1-chat-moderations-post: + requestBody: + application/json: {"input": [[{"content": ""}, {"content": []}, {"content": ""}], []], "model": "V90"} + responses: + "200": + application/json: {"id": "mod-e5cc70bb28c444948073e77776eb30ef"} + "422": {} diff --git a/.speakeasy/gen.yaml b/.speakeasy/gen.yaml index 8fd69aba..5a4f1a05 100644 --- a/.speakeasy/gen.yaml +++ b/.speakeasy/gen.yaml @@ -11,8 +11,9 @@ generation: requestResponseComponentNamesFeb2024: true auth: oAuth2ClientCredentialsEnabled: true + oAuth2PasswordEnabled: false python: - version: 1.1.0 + version: 1.2.0 additionalDependencies: dev: pytest: ^8.2.2 @@ -23,8 +24,11 @@ python: description: Python Client SDK for the Mistral AI API. enumFormat: union envVarPrefix: MISTRAL + fixFlags: + responseRequiredSep2024: false flattenGlobalSecurity: true flattenRequests: true + flatteningOrder: parameters-first imports: option: openapi paths: @@ -34,7 +38,7 @@ python: shared: "" webhooks: "" inputModelSuffix: input - maxMethodParams: 4 + maxMethodParams: 15 methodArguments: infer-optional-args outputModelSuffix: output packageName: mistralai diff --git a/.speakeasy/workflow.lock b/.speakeasy/workflow.lock index 44dff84f..46a7d2e7 100644 --- a/.speakeasy/workflow.lock +++ b/.speakeasy/workflow.lock @@ -1,58 +1,60 @@ -speakeasyVersion: 1.398.0 +speakeasyVersion: 1.434.4 sources: mistral-azure-source: sourceNamespace: mistral-openapi-azure - sourceRevisionDigest: sha256:4e9539e6903e630aa69e48af190a24d3702f6038c7b7a92472c7942597c2a6f5 - sourceBlobDigest: sha256:3ace0709471c04a040c9763097fef0081d6c21a1be0b694dfe5991c045b76d18 + sourceRevisionDigest: sha256:8fda8235e30128cc8e1c4e1b828316551d03b584568789f262dc287b81d584ee + sourceBlobDigest: sha256:3c039e1f8a2230a86b0e1acec6224f6b8d6f181fb222b6b3b39d38b52075a8ec tags: - latest mistral-google-cloud-source: sourceNamespace: mistral-openapi-google-cloud - sourceRevisionDigest: sha256:3047ad3ff8797fded89618b375d1398d48924a3a5f9ea1000c4284a110567c43 - sourceBlobDigest: sha256:02bbcef310f965d7ad089fb46d57b39f45b47cbc8f5cf90f728db03e960bdbca + sourceRevisionDigest: sha256:b2ce8e0e63674ea7ccfa3a75ff231bb97a39748331bcc0a3629f29c158f5b31e + sourceBlobDigest: sha256:a895adbf903776492b28daa3dd8c624f509decbbfe9ca6cda6510a33226604be tags: - latest mistral-openapi: sourceNamespace: mistral-openapi - sourceRevisionDigest: sha256:8e2d62b2242960d958406ba266eda41a013c1459dbac67195f8e2662c04cd05f - sourceBlobDigest: sha256:9fbff48fe087e3b2f950b1cfa52b6a25143982741dc7e6750dd14d9c5bed4041 + sourceRevisionDigest: sha256:e658442ebfc83351cbb7873fb17b03f07ff9edebd8eddfce5577e2c5c7bfafce + sourceBlobDigest: sha256:559403eaaa97c021eaf0022adddb1066694d879a946c87057e942806d5a2a2a2 tags: - latest targets: mistralai-azure-sdk: source: mistral-azure-source sourceNamespace: mistral-openapi-azure - sourceRevisionDigest: sha256:4e9539e6903e630aa69e48af190a24d3702f6038c7b7a92472c7942597c2a6f5 - sourceBlobDigest: sha256:3ace0709471c04a040c9763097fef0081d6c21a1be0b694dfe5991c045b76d18 + sourceRevisionDigest: sha256:8fda8235e30128cc8e1c4e1b828316551d03b584568789f262dc287b81d584ee + sourceBlobDigest: sha256:3c039e1f8a2230a86b0e1acec6224f6b8d6f181fb222b6b3b39d38b52075a8ec mistralai-gcp-sdk: source: mistral-google-cloud-source sourceNamespace: mistral-openapi-google-cloud - sourceRevisionDigest: sha256:3047ad3ff8797fded89618b375d1398d48924a3a5f9ea1000c4284a110567c43 - sourceBlobDigest: sha256:02bbcef310f965d7ad089fb46d57b39f45b47cbc8f5cf90f728db03e960bdbca + sourceRevisionDigest: sha256:b2ce8e0e63674ea7ccfa3a75ff231bb97a39748331bcc0a3629f29c158f5b31e + sourceBlobDigest: sha256:a895adbf903776492b28daa3dd8c624f509decbbfe9ca6cda6510a33226604be mistralai-sdk: source: mistral-openapi sourceNamespace: mistral-openapi - sourceRevisionDigest: sha256:8e2d62b2242960d958406ba266eda41a013c1459dbac67195f8e2662c04cd05f - sourceBlobDigest: sha256:9fbff48fe087e3b2f950b1cfa52b6a25143982741dc7e6750dd14d9c5bed4041 + sourceRevisionDigest: sha256:e658442ebfc83351cbb7873fb17b03f07ff9edebd8eddfce5577e2c5c7bfafce + sourceBlobDigest: sha256:559403eaaa97c021eaf0022adddb1066694d879a946c87057e942806d5a2a2a2 + codeSamplesNamespace: mistral-openapi-code-samples + codeSamplesRevisionDigest: sha256:e56faedc510d1c011d19e5fbbaa9d41917ffd6c22833b0795a61aa6da1cbca9b workflow: workflowVersion: 1.0.0 speakeasyVersion: latest sources: mistral-azure-source: inputs: - - location: registry.speakeasyapi.dev/mistral-dev/mistral-dev/mistral-openapi-azure + - location: registry.speakeasyapi.dev/mistral-dev/mistral-dev/mistral-openapi-azure:main registry: - location: registry.speakeasyapi.dev/mistral-dev/mistral-dev/mistral-openapi-azure + location: registry.speakeasyapi.dev/mistral-dev/mistral-dev/mistral-openapi-azure:main mistral-google-cloud-source: inputs: - - location: registry.speakeasyapi.dev/mistral-dev/mistral-dev/mistral-openapi-google-cloud + - location: registry.speakeasyapi.dev/mistral-dev/mistral-dev/mistral-openapi-google-cloud:main registry: - location: registry.speakeasyapi.dev/mistral-dev/mistral-dev/mistral-openapi-google-cloud + location: registry.speakeasyapi.dev/mistral-dev/mistral-dev/mistral-openapi-google-cloud:main mistral-openapi: inputs: - - location: registry.speakeasyapi.dev/mistral-dev/mistral-dev/mistral-openapi + - location: registry.speakeasyapi.dev/mistral-dev/mistral-dev/mistral-openapi:main registry: - location: registry.speakeasyapi.dev/mistral-dev/mistral-dev/mistral-openapi + location: registry.speakeasyapi.dev/mistral-dev/mistral-dev/mistral-openapi:main targets: mistralai-azure-sdk: target: python @@ -61,6 +63,10 @@ workflow: publish: pypi: token: $pypi_token + codeSamples: + registry: + location: registry.speakeasyapi.dev/mistral-dev/mistral-dev/mistral-openapi-azure-code-samples + blocking: false mistralai-gcp-sdk: target: python source: mistral-google-cloud-source @@ -68,9 +74,17 @@ workflow: publish: pypi: token: $pypi_token + codeSamples: + registry: + location: registry.speakeasyapi.dev/mistral-dev/mistral-dev/mistral-openapi-google-cloud-code-samples + blocking: false mistralai-sdk: target: python source: mistral-openapi publish: pypi: token: $pypi_token + codeSamples: + registry: + location: registry.speakeasyapi.dev/mistral-dev/mistral-dev/mistral-openapi-code-samples + blocking: false diff --git a/.speakeasy/workflow.yaml b/.speakeasy/workflow.yaml index 6ef130ae..164d3995 100644 --- a/.speakeasy/workflow.yaml +++ b/.speakeasy/workflow.yaml @@ -24,6 +24,10 @@ targets: publish: pypi: token: $pypi_token + codeSamples: + registry: + location: registry.speakeasyapi.dev/mistral-dev/mistral-dev/mistral-openapi-azure-code-samples + blocking: false mistralai-gcp-sdk: target: python source: mistral-google-cloud-source @@ -31,9 +35,17 @@ targets: publish: pypi: token: $pypi_token + codeSamples: + registry: + location: registry.speakeasyapi.dev/mistral-dev/mistral-dev/mistral-openapi-google-cloud-code-samples + blocking: false mistralai-sdk: target: python source: mistral-openapi publish: pypi: token: $pypi_token + codeSamples: + registry: + location: registry.speakeasyapi.dev/mistral-dev/mistral-dev/mistral-openapi-code-samples + blocking: false diff --git a/README.md b/README.md index 0c63e5ed..a73c1333 100644 --- a/README.md +++ b/README.md @@ -180,7 +180,8 @@ s = Mistral( res = s.agents.complete(messages=[ { - "content": "", + "content": "Who is the best French painter? Answer in one short sentence.", + "role": "user", }, ], agent_id="") @@ -212,6 +213,53 @@ async def main(): # handle response pass +asyncio.run(main()) +``` + +### Create Embedding Request + +This example shows how to create embedding request. + +```python +# Synchronous Example +from mistralai import Mistral +import os + +s = Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) + +res = s.embeddings.create(inputs=[ + "Embed this sentence.", + "As well as this one.", +], model="Wrangler") + +if res is not None: + # handle response + pass +``` + +
+ +The same SDK client can also be used to make asychronous requests by importing asyncio. +```python +# Asynchronous Example +import asyncio +from mistralai import Mistral +import os + +async def main(): + s = Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), + ) + res = await s.embeddings.create_async(inputs=[ + "Embed this sentence.", + "As well as this one.", + ], model="Wrangler") + if res is not None: + # handle response + pass + asyncio.run(main()) ``` @@ -322,11 +370,26 @@ The documentation for the GCP SDK is available [here](packages/mistralai_gcp/REA * [complete](docs/sdks/agents/README.md#complete) - Agents Completion * [stream](docs/sdks/agents/README.md#stream) - Stream Agents completion +### [batch](docs/sdks/batch/README.md) + + +#### [batch.jobs](docs/sdks/mistraljobs/README.md) + +* [list](docs/sdks/mistraljobs/README.md#list) - Get Batch Jobs +* [create](docs/sdks/mistraljobs/README.md#create) - Create Batch Job +* [get](docs/sdks/mistraljobs/README.md#get) - Get Batch Job +* [cancel](docs/sdks/mistraljobs/README.md#cancel) - Cancel Batch Job + ### [chat](docs/sdks/chat/README.md) * [complete](docs/sdks/chat/README.md#complete) - Chat Completion * [stream](docs/sdks/chat/README.md#stream) - Stream chat completion +### [classifiers](docs/sdks/classifiers/README.md) + +* [moderate](docs/sdks/classifiers/README.md#moderate) - Moderations +* [moderate_chat](docs/sdks/classifiers/README.md#moderate_chat) - Moderations Chat + ### [embeddings](docs/sdks/embeddings/README.md) * [create](docs/sdks/embeddings/README.md#create) - Embeddings @@ -337,6 +400,7 @@ The documentation for the GCP SDK is available [here](packages/mistralai_gcp/REA * [list](docs/sdks/files/README.md#list) - List Files * [retrieve](docs/sdks/files/README.md#retrieve) - Retrieve File * [delete](docs/sdks/files/README.md#delete) - Delete File +* [download](docs/sdks/files/README.md#download) - Download File ### [fim](docs/sdks/fim/README.md) @@ -479,12 +543,23 @@ if res is not None: ## Error Handling -Handling errors in this SDK should largely match your expectations. All operations return a response object or raise an error. If Error objects are specified in your OpenAPI Spec, the SDK will raise the appropriate Error type. +Handling errors in this SDK should largely match your expectations. All operations return a response object or raise an exception. + +By default, an API error will raise a models.SDKError exception, which has the following properties: + +| Property | Type | Description | +|-----------------|------------------|-----------------------| +| `.status_code` | *int* | The HTTP status code | +| `.message` | *str* | The error message | +| `.raw_response` | *httpx.Response* | The raw HTTP response | +| `.body` | *str* | The response content | + +When custom error responses are specified for an operation, the SDK may also raise their associated exceptions. You can refer to respective *Errors* tables in SDK docs for more details on possible exception types for each operation. For example, the `list_async` method may raise the following exceptions: -| Error Object | Status Code | Content Type | -| -------------------------- | ----------- | ---------------- | -| models.HTTPValidationError | 422 | application/json | -| models.SDKError | 4xx-5xx | */* | +| Error Type | Status Code | Content Type | +| -------------------------- | -------------------------- | -------------------------- | +| models.HTTPValidationError | 422 | application/json | +| models.SDKError | 4XX, 5XX | \*/\* | ### Example @@ -520,9 +595,9 @@ except models.SDKError as e: You can override the default server globally by passing a server name to the `server: str` optional parameter when initializing the SDK client instance. The selected server will then be used as the default on the operations that use it. This table lists the names associated with the available servers: -| Name | Server | Variables | -| ------ | ------------------------ | --------- | -| `prod` | `https://api.mistral.ai` | None | +| Name | Server | Variables | +| ----- | ------ | --------- | +| `eu` | `https://api.mistral.ai` | None | #### Example @@ -531,7 +606,7 @@ from mistralai import Mistral import os s = Mistral( - server="prod", + server="eu", api_key=os.getenv("MISTRAL_API_KEY", ""), ) @@ -653,9 +728,9 @@ s = Mistral(async_client=CustomClient(httpx.AsyncClient())) This SDK supports the following security scheme globally: -| Name | Type | Scheme | Environment Variable | -| --------- | ---- | ----------- | -------------------- | -| `api_key` | http | HTTP Bearer | `MISTRAL_API_KEY` | +| Name | Type | Scheme | Environment Variable | +| -------------------- | -------------------- | -------------------- | -------------------- | +| `api_key` | http | HTTP Bearer | `MISTRAL_API_KEY` | To authenticate with the API the `api_key` parameter must be set when initializing the SDK client instance. For example: ```python diff --git a/RELEASES.md b/RELEASES.md index fee21dfb..b504c7f8 100644 --- a/RELEASES.md +++ b/RELEASES.md @@ -48,4 +48,14 @@ Based on: ### Generated - [python v1.1.0] . ### Releases -- [PyPI v1.1.0] https://pypi.org/project/mistralai/1.1.0 - . \ No newline at end of file +- [PyPI v1.1.0] https://pypi.org/project/mistralai/1.1.0 - . + +## 2024-11-07 19:52:56 +### Changes +Based on: +- OpenAPI Doc +- Speakeasy CLI 1.434.3 (2.452.0) https://github.com/speakeasy-api/speakeasy +### Generated +- [python v1.2.0] . +### Releases +- [PyPI v1.2.0] https://pypi.org/project/mistralai/1.2.0 - . \ No newline at end of file diff --git a/USAGE.md b/USAGE.md index 6a6c46b5..7d9d2ce2 100644 --- a/USAGE.md +++ b/USAGE.md @@ -13,7 +13,10 @@ s = Mistral( ) res = s.chat.complete(model="mistral-small-latest", messages=[ - + { + "content": "Who is the best French painter? Answer in one short sentence.", + "role": "user", + }, ]) if res is not None: @@ -35,7 +38,10 @@ async def main(): api_key=os.getenv("MISTRAL_API_KEY", ""), ) res = await s.chat.complete_async(model="mistral-small-latest", messages=[ - + { + "content": "Who is the best French painter? Answer in one short sentence.", + "role": "user", + }, ]) if res is not None: # handle response @@ -106,7 +112,8 @@ s = Mistral( res = s.agents.complete(messages=[ { - "content": "", + "content": "Who is the best French painter? Answer in one short sentence.", + "role": "user", }, ], agent_id="") @@ -130,13 +137,61 @@ async def main(): ) res = await s.agents.complete_async(messages=[ { - "content": "", + "content": "Who is the best French painter? Answer in one short sentence.", + "role": "user", }, ], agent_id="") if res is not None: # handle response pass +asyncio.run(main()) +``` + +### Create Embedding Request + +This example shows how to create embedding request. + +```python +# Synchronous Example +from mistralai import Mistral +import os + +s = Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) + +res = s.embeddings.create(inputs=[ + "Embed this sentence.", + "As well as this one.", +], model="Wrangler") + +if res is not None: + # handle response + pass +``` + +
+ +The same SDK client can also be used to make asychronous requests by importing asyncio. +```python +# Asynchronous Example +import asyncio +from mistralai import Mistral +import os + +async def main(): + s = Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), + ) + res = await s.embeddings.create_async(inputs=[ + "Embed this sentence.", + "As well as this one.", + ], model="Wrangler") + if res is not None: + # handle response + pass + asyncio.run(main()) ``` \ No newline at end of file diff --git a/docs/models/agentscompletionrequest.md b/docs/models/agentscompletionrequest.md index 7f6c4283..c4259f2b 100644 --- a/docs/models/agentscompletionrequest.md +++ b/docs/models/agentscompletionrequest.md @@ -8,10 +8,12 @@ | `messages` | List[[models.AgentsCompletionRequestMessages](../models/agentscompletionrequestmessages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | | `agent_id` | *str* | :heavy_check_mark: | The ID of the agent to use for this completion. | | | `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | -| `min_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The minimum number of tokens to generate in the completion. | | | `stream` | *Optional[bool]* | :heavy_minus_sign: | Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. | | | `stop` | [Optional[models.AgentsCompletionRequestStop]](../models/agentscompletionrequeststop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | | `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | | `response_format` | [Optional[models.ResponseFormat]](../models/responseformat.md) | :heavy_minus_sign: | N/A | | | `tools` | List[[models.Tool](../models/tool.md)] | :heavy_minus_sign: | N/A | | -| `tool_choice` | [Optional[models.AgentsCompletionRequestToolChoice]](../models/agentscompletionrequesttoolchoice.md) | :heavy_minus_sign: | N/A | | \ No newline at end of file +| `tool_choice` | [Optional[models.AgentsCompletionRequestToolChoice]](../models/agentscompletionrequesttoolchoice.md) | :heavy_minus_sign: | N/A | | +| `presence_penalty` | *Optional[float]* | :heavy_minus_sign: | presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. | | +| `frequency_penalty` | *Optional[float]* | :heavy_minus_sign: | frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. | | +| `n` | *OptionalNullable[int]* | :heavy_minus_sign: | Number of completions to return for each request, input tokens are only billed once. | | \ No newline at end of file diff --git a/docs/models/agentscompletionrequestmessages.md b/docs/models/agentscompletionrequestmessages.md index 946ef460..d6a1e691 100644 --- a/docs/models/agentscompletionrequestmessages.md +++ b/docs/models/agentscompletionrequestmessages.md @@ -9,6 +9,12 @@ value: models.AssistantMessage = /* values here */ ``` +### `models.SystemMessage` + +```python +value: models.SystemMessage = /* values here */ +``` + ### `models.ToolMessage` ```python diff --git a/docs/models/agentscompletionstreamrequest.md b/docs/models/agentscompletionstreamrequest.md index d849a95d..21e19b56 100644 --- a/docs/models/agentscompletionstreamrequest.md +++ b/docs/models/agentscompletionstreamrequest.md @@ -3,15 +3,17 @@ ## Fields -| Field | Type | Required | Description | Example | -| ------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------ | -| `messages` | List[[models.AgentsCompletionStreamRequestMessages](../models/agentscompletionstreamrequestmessages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | -| `agent_id` | *str* | :heavy_check_mark: | The ID of the agent to use for this completion. | | -| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | -| `min_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The minimum number of tokens to generate in the completion. | | -| `stream` | *Optional[bool]* | :heavy_minus_sign: | N/A | | -| `stop` | [Optional[models.AgentsCompletionStreamRequestStop]](../models/agentscompletionstreamrequeststop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | -| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | -| `response_format` | [Optional[models.ResponseFormat]](../models/responseformat.md) | :heavy_minus_sign: | N/A | | -| `tools` | List[[models.Tool](../models/tool.md)] | :heavy_minus_sign: | N/A | | -| `tool_choice` | [Optional[models.AgentsCompletionStreamRequestToolChoice]](../models/agentscompletionstreamrequesttoolchoice.md) | :heavy_minus_sign: | N/A | | \ No newline at end of file +| Field | Type | Required | Description | Example | +| --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `messages` | List[[models.AgentsCompletionStreamRequestMessages](../models/agentscompletionstreamrequestmessages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | +| `agent_id` | *str* | :heavy_check_mark: | The ID of the agent to use for this completion. | | +| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | +| `stream` | *Optional[bool]* | :heavy_minus_sign: | N/A | | +| `stop` | [Optional[models.AgentsCompletionStreamRequestStop]](../models/agentscompletionstreamrequeststop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | +| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | +| `response_format` | [Optional[models.ResponseFormat]](../models/responseformat.md) | :heavy_minus_sign: | N/A | | +| `tools` | List[[models.Tool](../models/tool.md)] | :heavy_minus_sign: | N/A | | +| `tool_choice` | [Optional[models.AgentsCompletionStreamRequestToolChoice]](../models/agentscompletionstreamrequesttoolchoice.md) | :heavy_minus_sign: | N/A | | +| `presence_penalty` | *Optional[float]* | :heavy_minus_sign: | presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. | | +| `frequency_penalty` | *Optional[float]* | :heavy_minus_sign: | frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. | | +| `n` | *OptionalNullable[int]* | :heavy_minus_sign: | Number of completions to return for each request, input tokens are only billed once. | | \ No newline at end of file diff --git a/docs/models/agentscompletionstreamrequestmessages.md b/docs/models/agentscompletionstreamrequestmessages.md index d8cf99e2..1bc736af 100644 --- a/docs/models/agentscompletionstreamrequestmessages.md +++ b/docs/models/agentscompletionstreamrequestmessages.md @@ -9,6 +9,12 @@ value: models.AssistantMessage = /* values here */ ``` +### `models.SystemMessage` + +```python +value: models.SystemMessage = /* values here */ +``` + ### `models.ToolMessage` ```python diff --git a/docs/models/apiendpoint.md b/docs/models/apiendpoint.md new file mode 100644 index 00000000..5dfa68ae --- /dev/null +++ b/docs/models/apiendpoint.md @@ -0,0 +1,11 @@ +# APIEndpoint + + +## Values + +| Name | Value | +| -------------------------- | -------------------------- | +| `ROOT_V1_CHAT_COMPLETIONS` | /v1/chat/completions | +| `ROOT_V1_EMBEDDINGS` | /v1/embeddings | +| `ROOT_V1_FIM_COMPLETIONS` | /v1/fim/completions | +| `ROOT_V1_MODERATIONS` | /v1/moderations | \ No newline at end of file diff --git a/docs/models/assistantmessage.md b/docs/models/assistantmessage.md index 0c36cde9..53f1cc76 100644 --- a/docs/models/assistantmessage.md +++ b/docs/models/assistantmessage.md @@ -3,9 +3,9 @@ ## Fields -| Field | Type | Required | Description | -| ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | -| `content` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `tool_calls` | List[[models.ToolCall](../models/toolcall.md)] | :heavy_minus_sign: | N/A | -| `prefix` | *Optional[bool]* | :heavy_minus_sign: | Set this to `true` when adding an assistant message as prefix to condition the model response. The role of the prefix message is to force the model to start its answer by the content of the message. | -| `role` | [Optional[models.AssistantMessageRole]](../models/assistantmessagerole.md) | :heavy_minus_sign: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | +| `content` | [OptionalNullable[models.AssistantMessageContent]](../models/assistantmessagecontent.md) | :heavy_minus_sign: | N/A | +| `tool_calls` | List[[models.ToolCall](../models/toolcall.md)] | :heavy_minus_sign: | N/A | +| `prefix` | *Optional[bool]* | :heavy_minus_sign: | N/A | +| `role` | [Optional[models.AssistantMessageRole]](../models/assistantmessagerole.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/assistantmessagecontent.md b/docs/models/assistantmessagecontent.md new file mode 100644 index 00000000..047b7cf9 --- /dev/null +++ b/docs/models/assistantmessagecontent.md @@ -0,0 +1,17 @@ +# AssistantMessageContent + + +## Supported Types + +### `str` + +```python +value: str = /* values here */ +``` + +### `List[models.ContentChunk]` + +```python +value: List[models.ContentChunk] = /* values here */ +``` + diff --git a/docs/models/basemodelcard.md b/docs/models/basemodelcard.md index 1c10ae31..0bdbb65f 100644 --- a/docs/models/basemodelcard.md +++ b/docs/models/basemodelcard.md @@ -15,4 +15,5 @@ | `max_context_length` | *Optional[int]* | :heavy_minus_sign: | N/A | | `aliases` | List[*str*] | :heavy_minus_sign: | N/A | | `deprecation` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | -| `type` | *Optional[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file +| `default_model_temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | N/A | +| `type` | [Optional[models.Type]](../models/type.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/batcherror.md b/docs/models/batcherror.md new file mode 100644 index 00000000..95016cdc --- /dev/null +++ b/docs/models/batcherror.md @@ -0,0 +1,9 @@ +# BatchError + + +## Fields + +| Field | Type | Required | Description | +| ------------------ | ------------------ | ------------------ | ------------------ | +| `message` | *str* | :heavy_check_mark: | N/A | +| `count` | *Optional[int]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/batchjobin.md b/docs/models/batchjobin.md new file mode 100644 index 00000000..5203a520 --- /dev/null +++ b/docs/models/batchjobin.md @@ -0,0 +1,12 @@ +# BatchJobIn + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------- | ---------------------------------------------- | ---------------------------------------------- | ---------------------------------------------- | +| `input_files` | List[*str*] | :heavy_check_mark: | N/A | +| `endpoint` | [models.APIEndpoint](../models/apiendpoint.md) | :heavy_check_mark: | N/A | +| `model` | *str* | :heavy_check_mark: | N/A | +| `metadata` | Dict[str, *str*] | :heavy_minus_sign: | N/A | +| `timeout_hours` | *Optional[int]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/batchjobout.md b/docs/models/batchjobout.md new file mode 100644 index 00000000..d79d9a27 --- /dev/null +++ b/docs/models/batchjobout.md @@ -0,0 +1,24 @@ +# BatchJobOut + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | +| `id` | *str* | :heavy_check_mark: | N/A | +| `input_files` | List[*str*] | :heavy_check_mark: | N/A | +| `endpoint` | *str* | :heavy_check_mark: | N/A | +| `model` | *str* | :heavy_check_mark: | N/A | +| `errors` | List[[models.BatchError](../models/batcherror.md)] | :heavy_check_mark: | N/A | +| `status` | [models.BatchJobStatus](../models/batchjobstatus.md) | :heavy_check_mark: | N/A | +| `created_at` | *int* | :heavy_check_mark: | N/A | +| `total_requests` | *int* | :heavy_check_mark: | N/A | +| `completed_requests` | *int* | :heavy_check_mark: | N/A | +| `succeeded_requests` | *int* | :heavy_check_mark: | N/A | +| `failed_requests` | *int* | :heavy_check_mark: | N/A | +| `object` | [Optional[models.BatchJobOutObject]](../models/batchjoboutobject.md) | :heavy_minus_sign: | N/A | +| `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | +| `output_file` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `error_file` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `started_at` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | +| `completed_at` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/batchjoboutobject.md b/docs/models/batchjoboutobject.md new file mode 100644 index 00000000..64ae8965 --- /dev/null +++ b/docs/models/batchjoboutobject.md @@ -0,0 +1,8 @@ +# BatchJobOutObject + + +## Values + +| Name | Value | +| ------- | ------- | +| `BATCH` | batch | \ No newline at end of file diff --git a/docs/models/batchjobsout.md b/docs/models/batchjobsout.md new file mode 100644 index 00000000..3104118c --- /dev/null +++ b/docs/models/batchjobsout.md @@ -0,0 +1,10 @@ +# BatchJobsOut + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | +| `total` | *int* | :heavy_check_mark: | N/A | +| `data` | List[[models.BatchJobOut](../models/batchjobout.md)] | :heavy_minus_sign: | N/A | +| `object` | [Optional[models.BatchJobsOutObject]](../models/batchjobsoutobject.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/batchjobsoutobject.md b/docs/models/batchjobsoutobject.md new file mode 100644 index 00000000..d4bf9f65 --- /dev/null +++ b/docs/models/batchjobsoutobject.md @@ -0,0 +1,8 @@ +# BatchJobsOutObject + + +## Values + +| Name | Value | +| ------ | ------ | +| `LIST` | list | \ No newline at end of file diff --git a/docs/models/batchjobstatus.md b/docs/models/batchjobstatus.md new file mode 100644 index 00000000..64617b31 --- /dev/null +++ b/docs/models/batchjobstatus.md @@ -0,0 +1,14 @@ +# BatchJobStatus + + +## Values + +| Name | Value | +| ------------------------ | ------------------------ | +| `QUEUED` | QUEUED | +| `RUNNING` | RUNNING | +| `SUCCESS` | SUCCESS | +| `FAILED` | FAILED | +| `TIMEOUT_EXCEEDED` | TIMEOUT_EXCEEDED | +| `CANCELLATION_REQUESTED` | CANCELLATION_REQUESTED | +| `CANCELLED` | CANCELLED | \ No newline at end of file diff --git a/docs/models/chatclassificationrequest.md b/docs/models/chatclassificationrequest.md new file mode 100644 index 00000000..990408b1 --- /dev/null +++ b/docs/models/chatclassificationrequest.md @@ -0,0 +1,9 @@ +# ChatClassificationRequest + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------- | +| `inputs` | [models.ChatClassificationRequestInputs](../models/chatclassificationrequestinputs.md) | :heavy_check_mark: | Chat to classify | +| `model` | *Nullable[str]* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/chatclassificationrequestinputs.md b/docs/models/chatclassificationrequestinputs.md new file mode 100644 index 00000000..290c9ad2 --- /dev/null +++ b/docs/models/chatclassificationrequestinputs.md @@ -0,0 +1,19 @@ +# ChatClassificationRequestInputs + +Chat to classify + + +## Supported Types + +### `List[models.One]` + +```python +value: List[models.One] = /* values here */ +``` + +### `List[List[models.Two]]` + +```python +value: List[List[models.Two]] = /* values here */ +``` + diff --git a/docs/models/chatcompletionrequest.md b/docs/models/chatcompletionrequest.md index 84197600..d458081d 100644 --- a/docs/models/chatcompletionrequest.md +++ b/docs/models/chatcompletionrequest.md @@ -3,18 +3,20 @@ ## Fields -| Field | Type | Required | Description | Example | -| ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `model` | *Nullable[str]* | :heavy_check_mark: | ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions. | mistral-small-latest | -| `messages` | List[[models.Messages](../models/messages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | -| `temperature` | *Optional[float]* | :heavy_minus_sign: | What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. | | -| `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | -| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | -| `min_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The minimum number of tokens to generate in the completion. | | -| `stream` | *Optional[bool]* | :heavy_minus_sign: | Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. | | -| `stop` | [Optional[models.Stop]](../models/stop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | -| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | -| `response_format` | [Optional[models.ResponseFormat]](../models/responseformat.md) | :heavy_minus_sign: | N/A | | -| `tools` | List[[models.Tool](../models/tool.md)] | :heavy_minus_sign: | N/A | | -| `tool_choice` | [Optional[models.ChatCompletionRequestToolChoice]](../models/chatcompletionrequesttoolchoice.md) | :heavy_minus_sign: | N/A | | -| `safe_prompt` | *Optional[bool]* | :heavy_minus_sign: | Whether to inject a safety prompt before all conversations. | | \ No newline at end of file +| Field | Type | Required | Description | Example | +| ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `model` | *Nullable[str]* | :heavy_check_mark: | ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions. | mistral-small-latest | +| `messages` | List[[models.Messages](../models/messages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | +| `temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. | | +| `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | +| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | +| `stream` | *Optional[bool]* | :heavy_minus_sign: | Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. | | +| `stop` | [Optional[models.Stop]](../models/stop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | +| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | +| `response_format` | [Optional[models.ResponseFormat]](../models/responseformat.md) | :heavy_minus_sign: | N/A | | +| `tools` | List[[models.Tool](../models/tool.md)] | :heavy_minus_sign: | N/A | | +| `tool_choice` | [Optional[models.ChatCompletionRequestToolChoice]](../models/chatcompletionrequesttoolchoice.md) | :heavy_minus_sign: | N/A | | +| `presence_penalty` | *Optional[float]* | :heavy_minus_sign: | presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. | | +| `frequency_penalty` | *Optional[float]* | :heavy_minus_sign: | frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. | | +| `n` | *OptionalNullable[int]* | :heavy_minus_sign: | Number of completions to return for each request, input tokens are only billed once. | | +| `safe_prompt` | *Optional[bool]* | :heavy_minus_sign: | Whether to inject a safety prompt before all conversations. | | \ No newline at end of file diff --git a/docs/models/chatcompletionstreamrequest.md b/docs/models/chatcompletionstreamrequest.md index fd1fc484..63865c11 100644 --- a/docs/models/chatcompletionstreamrequest.md +++ b/docs/models/chatcompletionstreamrequest.md @@ -3,18 +3,20 @@ ## Fields -| Field | Type | Required | Description | Example | -| ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `model` | *Nullable[str]* | :heavy_check_mark: | ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions. | mistral-small-latest | -| `messages` | List[[models.ChatCompletionStreamRequestMessages](../models/chatcompletionstreamrequestmessages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | -| `temperature` | *Optional[float]* | :heavy_minus_sign: | What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. | | -| `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | -| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | -| `min_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The minimum number of tokens to generate in the completion. | | -| `stream` | *Optional[bool]* | :heavy_minus_sign: | N/A | | -| `stop` | [Optional[models.ChatCompletionStreamRequestStop]](../models/chatcompletionstreamrequeststop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | -| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | -| `response_format` | [Optional[models.ResponseFormat]](../models/responseformat.md) | :heavy_minus_sign: | N/A | | -| `tools` | List[[models.Tool](../models/tool.md)] | :heavy_minus_sign: | N/A | | -| `tool_choice` | [Optional[models.ChatCompletionStreamRequestToolChoice]](../models/chatcompletionstreamrequesttoolchoice.md) | :heavy_minus_sign: | N/A | | -| `safe_prompt` | *Optional[bool]* | :heavy_minus_sign: | Whether to inject a safety prompt before all conversations. | | \ No newline at end of file +| Field | Type | Required | Description | Example | +| ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `model` | *Nullable[str]* | :heavy_check_mark: | ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions. | mistral-small-latest | +| `messages` | List[[models.ChatCompletionStreamRequestMessages](../models/chatcompletionstreamrequestmessages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | +| `temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. | | +| `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | +| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | +| `stream` | *Optional[bool]* | :heavy_minus_sign: | N/A | | +| `stop` | [Optional[models.ChatCompletionStreamRequestStop]](../models/chatcompletionstreamrequeststop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | +| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | +| `response_format` | [Optional[models.ResponseFormat]](../models/responseformat.md) | :heavy_minus_sign: | N/A | | +| `tools` | List[[models.Tool](../models/tool.md)] | :heavy_minus_sign: | N/A | | +| `tool_choice` | [Optional[models.ChatCompletionStreamRequestToolChoice]](../models/chatcompletionstreamrequesttoolchoice.md) | :heavy_minus_sign: | N/A | | +| `presence_penalty` | *Optional[float]* | :heavy_minus_sign: | presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. | | +| `frequency_penalty` | *Optional[float]* | :heavy_minus_sign: | frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. | | +| `n` | *OptionalNullable[int]* | :heavy_minus_sign: | Number of completions to return for each request, input tokens are only billed once. | | +| `safe_prompt` | *Optional[bool]* | :heavy_minus_sign: | Whether to inject a safety prompt before all conversations. | | \ No newline at end of file diff --git a/docs/models/classificationobject.md b/docs/models/classificationobject.md new file mode 100644 index 00000000..68f2e2b2 --- /dev/null +++ b/docs/models/classificationobject.md @@ -0,0 +1,9 @@ +# ClassificationObject + + +## Fields + +| Field | Type | Required | Description | +| ----------------------------- | ----------------------------- | ----------------------------- | ----------------------------- | +| `categories` | Dict[str, *bool*] | :heavy_minus_sign: | Classifier result thresholded | +| `category_scores` | Dict[str, *float*] | :heavy_minus_sign: | Classifier result | \ No newline at end of file diff --git a/docs/models/classificationrequest.md b/docs/models/classificationrequest.md new file mode 100644 index 00000000..e1556684 --- /dev/null +++ b/docs/models/classificationrequest.md @@ -0,0 +1,9 @@ +# ClassificationRequest + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | +| `inputs` | [models.ClassificationRequestInputs](../models/classificationrequestinputs.md) | :heavy_check_mark: | Text to classify. | +| `model` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/classificationrequestinputs.md b/docs/models/classificationrequestinputs.md new file mode 100644 index 00000000..69d75d11 --- /dev/null +++ b/docs/models/classificationrequestinputs.md @@ -0,0 +1,19 @@ +# ClassificationRequestInputs + +Text to classify. + + +## Supported Types + +### `str` + +```python +value: str = /* values here */ +``` + +### `List[str]` + +```python +value: List[str] = /* values here */ +``` + diff --git a/docs/models/classificationresponse.md b/docs/models/classificationresponse.md new file mode 100644 index 00000000..4765ff62 --- /dev/null +++ b/docs/models/classificationresponse.md @@ -0,0 +1,10 @@ +# ClassificationResponse + + +## Fields + +| Field | Type | Required | Description | Example | +| ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | +| `id` | *Optional[str]* | :heavy_minus_sign: | N/A | mod-e5cc70bb28c444948073e77776eb30ef | +| `model` | *Optional[str]* | :heavy_minus_sign: | N/A | | +| `results` | List[[models.ClassificationObject](../models/classificationobject.md)] | :heavy_minus_sign: | N/A | | \ No newline at end of file diff --git a/docs/models/content.md b/docs/models/content.md index 4cd3cfd5..a833dc2c 100644 --- a/docs/models/content.md +++ b/docs/models/content.md @@ -9,9 +9,9 @@ value: str = /* values here */ ``` -### `List[models.TextChunk]` +### `List[models.ContentChunk]` ```python -value: List[models.TextChunk] = /* values here */ +value: List[models.ContentChunk] = /* values here */ ``` diff --git a/docs/models/deltamessage.md b/docs/models/deltamessage.md index d32f8e10..61deabbf 100644 --- a/docs/models/deltamessage.md +++ b/docs/models/deltamessage.md @@ -3,8 +3,8 @@ ## Fields -| Field | Type | Required | Description | -| ---------------------------------------------- | ---------------------------------------------- | ---------------------------------------------- | ---------------------------------------------- | -| `role` | *Optional[str]* | :heavy_minus_sign: | N/A | -| `content` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `tool_calls` | List[[models.ToolCall](../models/toolcall.md)] | :heavy_minus_sign: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| -------------------------------------------------------- | -------------------------------------------------------- | -------------------------------------------------------- | -------------------------------------------------------- | +| `role` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `content` | [OptionalNullable[models.Content]](../models/content.md) | :heavy_minus_sign: | N/A | +| `tool_calls` | List[[models.ToolCall](../models/toolcall.md)] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/embeddingrequest.md b/docs/models/embeddingrequest.md index 584a8bea..4d215c7b 100644 --- a/docs/models/embeddingrequest.md +++ b/docs/models/embeddingrequest.md @@ -3,8 +3,8 @@ ## Fields -| Field | Type | Required | Description | -| --------------------------------------- | --------------------------------------- | --------------------------------------- | --------------------------------------- | -| `inputs` | [models.Inputs](../models/inputs.md) | :heavy_check_mark: | Text to embed. | -| `model` | *str* | :heavy_check_mark: | ID of the model to use. | -| `encoding_format` | *OptionalNullable[str]* | :heavy_minus_sign: | The format to return the embeddings in. | \ No newline at end of file +| Field | Type | Required | Description | Example | +| -------------------------------------------------- | -------------------------------------------------- | -------------------------------------------------- | -------------------------------------------------- | -------------------------------------------------- | +| `inputs` | [models.Inputs](../models/inputs.md) | :heavy_check_mark: | Text to embed. | [
"Embed this sentence.",
"As well as this one."
] | +| `model` | *Optional[str]* | :heavy_minus_sign: | ID of the model to use. | | +| `encoding_format` | *OptionalNullable[str]* | :heavy_minus_sign: | The format to return the embeddings in. | | \ No newline at end of file diff --git a/docs/models/filesapiroutesuploadfilepurpose.md b/docs/models/filepurpose.md similarity index 50% rename from docs/models/filesapiroutesuploadfilepurpose.md rename to docs/models/filepurpose.md index 164af615..5152aeeb 100644 --- a/docs/models/filesapiroutesuploadfilepurpose.md +++ b/docs/models/filepurpose.md @@ -1,8 +1,9 @@ -# FilesAPIRoutesUploadFilePurpose +# FilePurpose ## Values | Name | Value | | ----------- | ----------- | -| `FINE_TUNE` | fine-tune | \ No newline at end of file +| `FINE_TUNE` | fine-tune | +| `BATCH` | batch | \ No newline at end of file diff --git a/docs/models/filesapiroutesdownloadfilerequest.md b/docs/models/filesapiroutesdownloadfilerequest.md new file mode 100644 index 00000000..8b28cb0e --- /dev/null +++ b/docs/models/filesapiroutesdownloadfilerequest.md @@ -0,0 +1,8 @@ +# FilesAPIRoutesDownloadFileRequest + + +## Fields + +| Field | Type | Required | Description | +| ------------------ | ------------------ | ------------------ | ------------------ | +| `file_id` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/filesapirouteslistfilesrequest.md b/docs/models/filesapirouteslistfilesrequest.md new file mode 100644 index 00000000..b28ab3fe --- /dev/null +++ b/docs/models/filesapirouteslistfilesrequest.md @@ -0,0 +1,13 @@ +# FilesAPIRoutesListFilesRequest + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------------------- | ---------------------------------------------------------------- | ---------------------------------------------------------------- | ---------------------------------------------------------------- | +| `page` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `page_size` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `sample_type` | List[[models.SampleType](../models/sampletype.md)] | :heavy_minus_sign: | N/A | +| `source` | List[[models.Source](../models/source.md)] | :heavy_minus_sign: | N/A | +| `search` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `purpose` | [OptionalNullable[models.FilePurpose]](../models/filepurpose.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/filesapiroutesuploadfilemultipartbodyparams.md b/docs/models/filesapiroutesuploadfilemultipartbodyparams.md index 2472dccd..41631b28 100644 --- a/docs/models/filesapiroutesuploadfilemultipartbodyparams.md +++ b/docs/models/filesapiroutesuploadfilemultipartbodyparams.md @@ -6,4 +6,4 @@ | Field | Type | Required | Description | | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | | `file` | [models.File](../models/file.md) | :heavy_check_mark: | The File object (not file name) to be uploaded.
To upload a file and specify a custom file name you should format your request as such:
```bash
file=@path/to/your/file.jsonl;filename=custom_name.jsonl
```
Otherwise, you can just keep the original file name:
```bash
file=@path/to/your/file.jsonl
``` | -| `purpose` | [Optional[models.FilesAPIRoutesUploadFilePurpose]](../models/filesapiroutesuploadfilepurpose.md) | :heavy_minus_sign: | N/A | \ No newline at end of file +| `purpose` | [Optional[models.FilePurpose]](../models/filepurpose.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/fileschema.md b/docs/models/fileschema.md index a877bee3..47fa4869 100644 --- a/docs/models/fileschema.md +++ b/docs/models/fileschema.md @@ -3,14 +3,14 @@ ## Fields -| Field | Type | Required | Description | Example | -| ------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------ | -| `id` | *str* | :heavy_check_mark: | The unique identifier of the file. | 497f6eca-6276-4993-bfeb-53cbbbba6f09 | -| `object` | *str* | :heavy_check_mark: | The object type, which is always "file". | file | -| `bytes` | *int* | :heavy_check_mark: | The size of the file, in bytes. | 13000 | -| `created_at` | *int* | :heavy_check_mark: | The UNIX timestamp (in seconds) of the event. | 1716963433 | -| `filename` | *str* | :heavy_check_mark: | The name of the uploaded file. | files_upload.jsonl | -| `sample_type` | [models.SampleType](../models/sampletype.md) | :heavy_check_mark: | N/A | | -| `source` | [models.Source](../models/source.md) | :heavy_check_mark: | N/A | | -| `purpose` | [models.FileSchemaPurpose](../models/fileschemapurpose.md) | :heavy_check_mark: | The intended purpose of the uploaded file. Only accepts fine-tuning (`fine-tune`) for now. | fine-tune | -| `num_lines` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | | \ No newline at end of file +| Field | Type | Required | Description | Example | +| ---------------------------------------------- | ---------------------------------------------- | ---------------------------------------------- | ---------------------------------------------- | ---------------------------------------------- | +| `id` | *str* | :heavy_check_mark: | The unique identifier of the file. | 497f6eca-6276-4993-bfeb-53cbbbba6f09 | +| `object` | *str* | :heavy_check_mark: | The object type, which is always "file". | file | +| `bytes` | *int* | :heavy_check_mark: | The size of the file, in bytes. | 13000 | +| `created_at` | *int* | :heavy_check_mark: | The UNIX timestamp (in seconds) of the event. | 1716963433 | +| `filename` | *str* | :heavy_check_mark: | The name of the uploaded file. | files_upload.jsonl | +| `purpose` | [models.FilePurpose](../models/filepurpose.md) | :heavy_check_mark: | N/A | | +| `sample_type` | [models.SampleType](../models/sampletype.md) | :heavy_check_mark: | N/A | | +| `source` | [models.Source](../models/source.md) | :heavy_check_mark: | N/A | | +| `num_lines` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | | \ No newline at end of file diff --git a/docs/models/fileschemapurpose.md b/docs/models/fileschemapurpose.md deleted file mode 100644 index b7ba5113..00000000 --- a/docs/models/fileschemapurpose.md +++ /dev/null @@ -1,10 +0,0 @@ -# FileSchemaPurpose - -The intended purpose of the uploaded file. Only accepts fine-tuning (`fine-tune`) for now. - - -## Values - -| Name | Value | -| ----------- | ----------- | -| `FINE_TUNE` | fine-tune | \ No newline at end of file diff --git a/docs/models/fimcompletionrequest.md b/docs/models/fimcompletionrequest.md index b4b024ed..236d2d21 100644 --- a/docs/models/fimcompletionrequest.md +++ b/docs/models/fimcompletionrequest.md @@ -3,15 +3,15 @@ ## Fields -| Field | Type | Required | Description | Example | -| ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `model` | *Nullable[str]* | :heavy_check_mark: | ID of the model to use. Only compatible for now with:
- `codestral-2405`
- `codestral-latest` | codestral-2405 | -| `prompt` | *str* | :heavy_check_mark: | The text/code to complete. | def | -| `temperature` | *Optional[float]* | :heavy_minus_sign: | What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. | | -| `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | -| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | -| `min_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The minimum number of tokens to generate in the completion. | | -| `stream` | *Optional[bool]* | :heavy_minus_sign: | Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. | | -| `stop` | [Optional[models.FIMCompletionRequestStop]](../models/fimcompletionrequeststop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | -| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | -| `suffix` | *OptionalNullable[str]* | :heavy_minus_sign: | Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`. | return a+b | \ No newline at end of file +| Field | Type | Required | Description | Example | +| ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `model` | *Nullable[str]* | :heavy_check_mark: | ID of the model to use. Only compatible for now with:
- `codestral-2405`
- `codestral-latest` | codestral-2405 | +| `prompt` | *str* | :heavy_check_mark: | The text/code to complete. | def | +| `temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. | | +| `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | +| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | +| `stream` | *Optional[bool]* | :heavy_minus_sign: | Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. | | +| `stop` | [Optional[models.FIMCompletionRequestStop]](../models/fimcompletionrequeststop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | +| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | +| `suffix` | *OptionalNullable[str]* | :heavy_minus_sign: | Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`. | return a+b | +| `min_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The minimum number of tokens to generate in the completion. | | \ No newline at end of file diff --git a/docs/models/fimcompletionstreamrequest.md b/docs/models/fimcompletionstreamrequest.md index acffb536..fa635932 100644 --- a/docs/models/fimcompletionstreamrequest.md +++ b/docs/models/fimcompletionstreamrequest.md @@ -3,15 +3,15 @@ ## Fields -| Field | Type | Required | Description | Example | -| ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `model` | *Nullable[str]* | :heavy_check_mark: | ID of the model to use. Only compatible for now with:
- `codestral-2405`
- `codestral-latest` | codestral-2405 | -| `prompt` | *str* | :heavy_check_mark: | The text/code to complete. | def | -| `temperature` | *Optional[float]* | :heavy_minus_sign: | What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. | | -| `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | -| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | -| `min_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The minimum number of tokens to generate in the completion. | | -| `stream` | *Optional[bool]* | :heavy_minus_sign: | N/A | | -| `stop` | [Optional[models.FIMCompletionStreamRequestStop]](../models/fimcompletionstreamrequeststop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | -| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | -| `suffix` | *OptionalNullable[str]* | :heavy_minus_sign: | Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`. | return a+b | \ No newline at end of file +| Field | Type | Required | Description | Example | +| ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `model` | *Nullable[str]* | :heavy_check_mark: | ID of the model to use. Only compatible for now with:
- `codestral-2405`
- `codestral-latest` | codestral-2405 | +| `prompt` | *str* | :heavy_check_mark: | The text/code to complete. | def | +| `temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. | | +| `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | +| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | +| `stream` | *Optional[bool]* | :heavy_minus_sign: | N/A | | +| `stop` | [Optional[models.FIMCompletionStreamRequestStop]](../models/fimcompletionstreamrequeststop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | +| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | +| `suffix` | *OptionalNullable[str]* | :heavy_minus_sign: | Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`. | return a+b | +| `min_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The minimum number of tokens to generate in the completion. | | \ No newline at end of file diff --git a/docs/models/ftmodelcard.md b/docs/models/ftmodelcard.md index fc633c06..1efeadb2 100644 --- a/docs/models/ftmodelcard.md +++ b/docs/models/ftmodelcard.md @@ -19,5 +19,6 @@ Extra fields for fine-tuned models. | `max_context_length` | *Optional[int]* | :heavy_minus_sign: | N/A | | `aliases` | List[*str*] | :heavy_minus_sign: | N/A | | `deprecation` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | -| `type` | *Optional[str]* | :heavy_minus_sign: | N/A | +| `default_model_temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | N/A | +| `type` | [Optional[models.FTModelCardType]](../models/ftmodelcardtype.md) | :heavy_minus_sign: | N/A | | `archived` | *Optional[bool]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/ftmodelcardtype.md b/docs/models/ftmodelcardtype.md new file mode 100644 index 00000000..0b38470b --- /dev/null +++ b/docs/models/ftmodelcardtype.md @@ -0,0 +1,8 @@ +# FTModelCardType + + +## Values + +| Name | Value | +| ------------ | ------------ | +| `FINE_TUNED` | fine-tuned | \ No newline at end of file diff --git a/docs/models/httpvalidationerror.md b/docs/models/httpvalidationerror.md index 63892430..712a148c 100644 --- a/docs/models/httpvalidationerror.md +++ b/docs/models/httpvalidationerror.md @@ -1,7 +1,5 @@ # HTTPValidationError -Validation Error - ## Fields diff --git a/docs/models/jobsapiroutesbatchcancelbatchjobrequest.md b/docs/models/jobsapiroutesbatchcancelbatchjobrequest.md new file mode 100644 index 00000000..c19d0241 --- /dev/null +++ b/docs/models/jobsapiroutesbatchcancelbatchjobrequest.md @@ -0,0 +1,8 @@ +# JobsAPIRoutesBatchCancelBatchJobRequest + + +## Fields + +| Field | Type | Required | Description | +| ------------------ | ------------------ | ------------------ | ------------------ | +| `job_id` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/jobsapiroutesbatchgetbatchjobrequest.md b/docs/models/jobsapiroutesbatchgetbatchjobrequest.md new file mode 100644 index 00000000..3930aacd --- /dev/null +++ b/docs/models/jobsapiroutesbatchgetbatchjobrequest.md @@ -0,0 +1,8 @@ +# JobsAPIRoutesBatchGetBatchJobRequest + + +## Fields + +| Field | Type | Required | Description | +| ------------------ | ------------------ | ------------------ | ------------------ | +| `job_id` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/jobsapiroutesbatchgetbatchjobsrequest.md b/docs/models/jobsapiroutesbatchgetbatchjobsrequest.md new file mode 100644 index 00000000..93de090e --- /dev/null +++ b/docs/models/jobsapiroutesbatchgetbatchjobsrequest.md @@ -0,0 +1,14 @@ +# JobsAPIRoutesBatchGetBatchJobsRequest + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | +| `page` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `page_size` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `model` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | +| `created_after` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | +| `created_by_me` | *Optional[bool]* | :heavy_minus_sign: | N/A | +| `status` | [OptionalNullable[models.BatchJobStatus]](../models/batchjobstatus.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/listfilesout.md b/docs/models/listfilesout.md index 3694739f..ee544c1b 100644 --- a/docs/models/listfilesout.md +++ b/docs/models/listfilesout.md @@ -6,4 +6,5 @@ | Field | Type | Required | Description | | -------------------------------------------------- | -------------------------------------------------- | -------------------------------------------------- | -------------------------------------------------- | | `data` | List[[models.FileSchema](../models/fileschema.md)] | :heavy_check_mark: | N/A | -| `object` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file +| `object` | *str* | :heavy_check_mark: | N/A | +| `total` | *int* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/one.md b/docs/models/one.md new file mode 100644 index 00000000..3de496a6 --- /dev/null +++ b/docs/models/one.md @@ -0,0 +1,29 @@ +# One + + +## Supported Types + +### `models.AssistantMessage` + +```python +value: models.AssistantMessage = /* values here */ +``` + +### `models.SystemMessage` + +```python +value: models.SystemMessage = /* values here */ +``` + +### `models.ToolMessage` + +```python +value: models.ToolMessage = /* values here */ +``` + +### `models.UserMessage` + +```python +value: models.UserMessage = /* values here */ +``` + diff --git a/docs/models/purpose.md b/docs/models/purpose.md deleted file mode 100644 index 6c795b93..00000000 --- a/docs/models/purpose.md +++ /dev/null @@ -1,10 +0,0 @@ -# Purpose - -The intended purpose of the uploaded file. Only accepts fine-tuning (`fine-tune`) for now. - - -## Values - -| Name | Value | -| ----------- | ----------- | -| `FINE_TUNE` | fine-tune | \ No newline at end of file diff --git a/docs/models/retrievefileout.md b/docs/models/retrievefileout.md index 02311777..93aa5026 100644 --- a/docs/models/retrievefileout.md +++ b/docs/models/retrievefileout.md @@ -3,14 +3,15 @@ ## Fields -| Field | Type | Required | Description | Example | -| ------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------ | -| `id` | *str* | :heavy_check_mark: | The unique identifier of the file. | 497f6eca-6276-4993-bfeb-53cbbbba6f09 | -| `object` | *str* | :heavy_check_mark: | The object type, which is always "file". | file | -| `bytes` | *int* | :heavy_check_mark: | The size of the file, in bytes. | 13000 | -| `created_at` | *int* | :heavy_check_mark: | The UNIX timestamp (in seconds) of the event. | 1716963433 | -| `filename` | *str* | :heavy_check_mark: | The name of the uploaded file. | files_upload.jsonl | -| `sample_type` | [models.SampleType](../models/sampletype.md) | :heavy_check_mark: | N/A | | -| `source` | [models.Source](../models/source.md) | :heavy_check_mark: | N/A | | -| `purpose` | [models.RetrieveFileOutPurpose](../models/retrievefileoutpurpose.md) | :heavy_check_mark: | The intended purpose of the uploaded file. Only accepts fine-tuning (`fine-tune`) for now. | fine-tune | -| `num_lines` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | | \ No newline at end of file +| Field | Type | Required | Description | Example | +| ---------------------------------------------- | ---------------------------------------------- | ---------------------------------------------- | ---------------------------------------------- | ---------------------------------------------- | +| `id` | *str* | :heavy_check_mark: | The unique identifier of the file. | 497f6eca-6276-4993-bfeb-53cbbbba6f09 | +| `object` | *str* | :heavy_check_mark: | The object type, which is always "file". | file | +| `bytes` | *int* | :heavy_check_mark: | The size of the file, in bytes. | 13000 | +| `created_at` | *int* | :heavy_check_mark: | The UNIX timestamp (in seconds) of the event. | 1716963433 | +| `filename` | *str* | :heavy_check_mark: | The name of the uploaded file. | files_upload.jsonl | +| `purpose` | [models.FilePurpose](../models/filepurpose.md) | :heavy_check_mark: | N/A | | +| `sample_type` | [models.SampleType](../models/sampletype.md) | :heavy_check_mark: | N/A | | +| `source` | [models.Source](../models/source.md) | :heavy_check_mark: | N/A | | +| `deleted` | *bool* | :heavy_check_mark: | N/A | | +| `num_lines` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | | \ No newline at end of file diff --git a/docs/models/retrievefileoutpurpose.md b/docs/models/retrievefileoutpurpose.md deleted file mode 100644 index 8b1df1a8..00000000 --- a/docs/models/retrievefileoutpurpose.md +++ /dev/null @@ -1,10 +0,0 @@ -# RetrieveFileOutPurpose - -The intended purpose of the uploaded file. Only accepts fine-tuning (`fine-tune`) for now. - - -## Values - -| Name | Value | -| ----------- | ----------- | -| `FINE_TUNE` | fine-tune | \ No newline at end of file diff --git a/docs/models/sampletype.md b/docs/models/sampletype.md index 888fd63d..34a6a012 100644 --- a/docs/models/sampletype.md +++ b/docs/models/sampletype.md @@ -3,7 +3,10 @@ ## Values -| Name | Value | -| ---------- | ---------- | -| `PRETRAIN` | pretrain | -| `INSTRUCT` | instruct | \ No newline at end of file +| Name | Value | +| --------------- | --------------- | +| `PRETRAIN` | pretrain | +| `INSTRUCT` | instruct | +| `BATCH_REQUEST` | batch_request | +| `BATCH_RESULT` | batch_result | +| `BATCH_ERROR` | batch_error | \ No newline at end of file diff --git a/docs/models/source.md b/docs/models/source.md index ef055622..bb1ed612 100644 --- a/docs/models/source.md +++ b/docs/models/source.md @@ -6,4 +6,5 @@ | Name | Value | | ------------ | ------------ | | `UPLOAD` | upload | -| `REPOSITORY` | repository | \ No newline at end of file +| `REPOSITORY` | repository | +| `MISTRAL` | mistral | \ No newline at end of file diff --git a/docs/models/systemmessage.md b/docs/models/systemmessage.md index 7f827984..0dba71c0 100644 --- a/docs/models/systemmessage.md +++ b/docs/models/systemmessage.md @@ -3,7 +3,7 @@ ## Fields -| Field | Type | Required | Description | -| ------------------------------------------ | ------------------------------------------ | ------------------------------------------ | ------------------------------------------ | -| `content` | [models.Content](../models/content.md) | :heavy_check_mark: | N/A | -| `role` | [Optional[models.Role]](../models/role.md) | :heavy_minus_sign: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| ---------------------------------------------------------------- | ---------------------------------------------------------------- | ---------------------------------------------------------------- | ---------------------------------------------------------------- | +| `content` | [models.SystemMessageContent](../models/systemmessagecontent.md) | :heavy_check_mark: | N/A | +| `role` | [Optional[models.Role]](../models/role.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/systemmessagecontent.md b/docs/models/systemmessagecontent.md new file mode 100644 index 00000000..e0d27d9f --- /dev/null +++ b/docs/models/systemmessagecontent.md @@ -0,0 +1,17 @@ +# SystemMessageContent + + +## Supported Types + +### `str` + +```python +value: str = /* values here */ +``` + +### `List[models.TextChunk]` + +```python +value: List[models.TextChunk] = /* values here */ +``` + diff --git a/docs/models/trainingparameters.md b/docs/models/trainingparameters.md index 0a47b615..e56df8e9 100644 --- a/docs/models/trainingparameters.md +++ b/docs/models/trainingparameters.md @@ -10,4 +10,5 @@ | `weight_decay` | *OptionalNullable[float]* | :heavy_minus_sign: | N/A | | `warmup_fraction` | *OptionalNullable[float]* | :heavy_minus_sign: | N/A | | `epochs` | *OptionalNullable[float]* | :heavy_minus_sign: | N/A | -| `fim_ratio` | *OptionalNullable[float]* | :heavy_minus_sign: | N/A | \ No newline at end of file +| `fim_ratio` | *OptionalNullable[float]* | :heavy_minus_sign: | N/A | +| `seq_len` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/trainingparametersin.md b/docs/models/trainingparametersin.md index 34918ce3..64c31a44 100644 --- a/docs/models/trainingparametersin.md +++ b/docs/models/trainingparametersin.md @@ -12,4 +12,5 @@ The fine-tuning hyperparameter settings used in a fine-tune job. | `weight_decay` | *OptionalNullable[float]* | :heavy_minus_sign: | (Advanced Usage) Weight decay adds a term to the loss function that is proportional to the sum of the squared weights. This term reduces the magnitude of the weights and prevents them from growing too large. | | `warmup_fraction` | *OptionalNullable[float]* | :heavy_minus_sign: | (Advanced Usage) A parameter that specifies the percentage of the total training steps at which the learning rate warm-up phase ends. During this phase, the learning rate gradually increases from a small value to the initial learning rate, helping to stabilize the training process and improve convergence. Similar to `pct_start` in [mistral-finetune](https://github.com/mistralai/mistral-finetune) | | `epochs` | *OptionalNullable[float]* | :heavy_minus_sign: | N/A | -| `fim_ratio` | *OptionalNullable[float]* | :heavy_minus_sign: | N/A | \ No newline at end of file +| `fim_ratio` | *OptionalNullable[float]* | :heavy_minus_sign: | N/A | +| `seq_len` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/two.md b/docs/models/two.md new file mode 100644 index 00000000..59dc2be2 --- /dev/null +++ b/docs/models/two.md @@ -0,0 +1,29 @@ +# Two + + +## Supported Types + +### `models.AssistantMessage` + +```python +value: models.AssistantMessage = /* values here */ +``` + +### `models.SystemMessage` + +```python +value: models.SystemMessage = /* values here */ +``` + +### `models.ToolMessage` + +```python +value: models.ToolMessage = /* values here */ +``` + +### `models.UserMessage` + +```python +value: models.UserMessage = /* values here */ +``` + diff --git a/docs/models/type.md b/docs/models/type.md index 342c8c7b..239a00f5 100644 --- a/docs/models/type.md +++ b/docs/models/type.md @@ -3,6 +3,6 @@ ## Values -| Name | Value | -| ------- | ------- | -| `WANDB` | wandb | \ No newline at end of file +| Name | Value | +| ------ | ------ | +| `BASE` | base | \ No newline at end of file diff --git a/docs/models/uploadfileout.md b/docs/models/uploadfileout.md index 7a695ba5..c9974436 100644 --- a/docs/models/uploadfileout.md +++ b/docs/models/uploadfileout.md @@ -3,14 +3,14 @@ ## Fields -| Field | Type | Required | Description | Example | -| ------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------ | -| `id` | *str* | :heavy_check_mark: | The unique identifier of the file. | 497f6eca-6276-4993-bfeb-53cbbbba6f09 | -| `object` | *str* | :heavy_check_mark: | The object type, which is always "file". | file | -| `bytes` | *int* | :heavy_check_mark: | The size of the file, in bytes. | 13000 | -| `created_at` | *int* | :heavy_check_mark: | The UNIX timestamp (in seconds) of the event. | 1716963433 | -| `filename` | *str* | :heavy_check_mark: | The name of the uploaded file. | files_upload.jsonl | -| `sample_type` | [models.SampleType](../models/sampletype.md) | :heavy_check_mark: | N/A | | -| `source` | [models.Source](../models/source.md) | :heavy_check_mark: | N/A | | -| `purpose` | [models.Purpose](../models/purpose.md) | :heavy_check_mark: | The intended purpose of the uploaded file. Only accepts fine-tuning (`fine-tune`) for now. | fine-tune | -| `num_lines` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | | \ No newline at end of file +| Field | Type | Required | Description | Example | +| ---------------------------------------------- | ---------------------------------------------- | ---------------------------------------------- | ---------------------------------------------- | ---------------------------------------------- | +| `id` | *str* | :heavy_check_mark: | The unique identifier of the file. | 497f6eca-6276-4993-bfeb-53cbbbba6f09 | +| `object` | *str* | :heavy_check_mark: | The object type, which is always "file". | file | +| `bytes` | *int* | :heavy_check_mark: | The size of the file, in bytes. | 13000 | +| `created_at` | *int* | :heavy_check_mark: | The UNIX timestamp (in seconds) of the event. | 1716963433 | +| `filename` | *str* | :heavy_check_mark: | The name of the uploaded file. | files_upload.jsonl | +| `purpose` | [models.FilePurpose](../models/filepurpose.md) | :heavy_check_mark: | N/A | | +| `sample_type` | [models.SampleType](../models/sampletype.md) | :heavy_check_mark: | N/A | | +| `source` | [models.Source](../models/source.md) | :heavy_check_mark: | N/A | | +| `num_lines` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | | \ No newline at end of file diff --git a/docs/models/usermessage.md b/docs/models/usermessage.md index 3d96f1cd..63b01310 100644 --- a/docs/models/usermessage.md +++ b/docs/models/usermessage.md @@ -3,7 +3,7 @@ ## Fields -| Field | Type | Required | Description | -| ---------------------------------------------------------------- | ---------------------------------------------------------------- | ---------------------------------------------------------------- | ---------------------------------------------------------------- | -| `content` | [models.UserMessageContent](../models/usermessagecontent.md) | :heavy_check_mark: | N/A | -| `role` | [Optional[models.UserMessageRole]](../models/usermessagerole.md) | :heavy_minus_sign: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | +| `content` | [Nullable[models.UserMessageContent]](../models/usermessagecontent.md) | :heavy_check_mark: | N/A | +| `role` | [Optional[models.UserMessageRole]](../models/usermessagerole.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/wandbintegrationout.md b/docs/models/wandbintegrationout.md index e7616fc6..b9a3a86d 100644 --- a/docs/models/wandbintegrationout.md +++ b/docs/models/wandbintegrationout.md @@ -3,9 +3,9 @@ ## Fields -| Field | Type | Required | Description | -| ------------------------------------------------------------------------------- | ------------------------------------------------------------------------------- | ------------------------------------------------------------------------------- | ------------------------------------------------------------------------------- | -| `project` | *str* | :heavy_check_mark: | The name of the project that the new run will be created under. | -| `type` | [Optional[models.Type]](../models/type.md) | :heavy_minus_sign: | N/A | -| `name` | *OptionalNullable[str]* | :heavy_minus_sign: | A display name to set for the run. If not set, will use the job ID as the name. | -| `run_name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | +| `project` | *str* | :heavy_check_mark: | The name of the project that the new run will be created under. | +| `type` | [Optional[models.WandbIntegrationOutType]](../models/wandbintegrationouttype.md) | :heavy_minus_sign: | N/A | +| `name` | *OptionalNullable[str]* | :heavy_minus_sign: | A display name to set for the run. If not set, will use the job ID as the name. | +| `run_name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/wandbintegrationouttype.md b/docs/models/wandbintegrationouttype.md new file mode 100644 index 00000000..5a7533c9 --- /dev/null +++ b/docs/models/wandbintegrationouttype.md @@ -0,0 +1,8 @@ +# WandbIntegrationOutType + + +## Values + +| Name | Value | +| ------- | ------- | +| `WANDB` | wandb | \ No newline at end of file diff --git a/docs/sdks/agents/README.md b/docs/sdks/agents/README.md index 279a13fc..3eb946a8 100644 --- a/docs/sdks/agents/README.md +++ b/docs/sdks/agents/README.md @@ -26,7 +26,8 @@ s = Mistral( res = s.agents.complete(messages=[ { - "content": "", + "content": "Who is the best French painter? Answer in one short sentence.", + "role": "user", }, ], agent_id="") @@ -43,13 +44,15 @@ if res is not None: | `messages` | List[[models.AgentsCompletionRequestMessages](../../models/agentscompletionrequestmessages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | | `agent_id` | *str* | :heavy_check_mark: | The ID of the agent to use for this completion. | | | `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | -| `min_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The minimum number of tokens to generate in the completion. | | | `stream` | *Optional[bool]* | :heavy_minus_sign: | Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. | | | `stop` | [Optional[models.AgentsCompletionRequestStop]](../../models/agentscompletionrequeststop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | | `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | | `response_format` | [Optional[models.ResponseFormat]](../../models/responseformat.md) | :heavy_minus_sign: | N/A | | | `tools` | List[[models.Tool](../../models/tool.md)] | :heavy_minus_sign: | N/A | | | `tool_choice` | [Optional[models.AgentsCompletionRequestToolChoice]](../../models/agentscompletionrequesttoolchoice.md) | :heavy_minus_sign: | N/A | | +| `presence_penalty` | *Optional[float]* | :heavy_minus_sign: | presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. | | +| `frequency_penalty` | *Optional[float]* | :heavy_minus_sign: | frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. | | +| `n` | *OptionalNullable[int]* | :heavy_minus_sign: | Number of completions to return for each request, input tokens are only billed once. | | | `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | | ### Response @@ -58,11 +61,10 @@ if res is not None: ### Errors -| Error Object | Status Code | Content Type | +| Error Type | Status Code | Content Type | | -------------------------- | -------------------------- | -------------------------- | | models.HTTPValidationError | 422 | application/json | -| models.SDKError | 4xx-5xx | */* | - +| models.SDKError | 4XX, 5XX | \*/\* | ## stream @@ -80,13 +82,8 @@ s = Mistral( res = s.agents.stream(messages=[ { - "content": [ - { - "image_url": { - "url": "http://possible-veal.org", - }, - }, - ], + "content": "Who is the best French painter? Answer in one short sentence.", + "role": "user", }, ], agent_id="") @@ -99,19 +96,21 @@ if res is not None: ### Parameters -| Parameter | Type | Required | Description | Example | -| ------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------ | -| `messages` | List[[models.AgentsCompletionStreamRequestMessages](../../models/agentscompletionstreamrequestmessages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | -| `agent_id` | *str* | :heavy_check_mark: | The ID of the agent to use for this completion. | | -| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | -| `min_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The minimum number of tokens to generate in the completion. | | -| `stream` | *Optional[bool]* | :heavy_minus_sign: | N/A | | -| `stop` | [Optional[models.AgentsCompletionStreamRequestStop]](../../models/agentscompletionstreamrequeststop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | -| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | -| `response_format` | [Optional[models.ResponseFormat]](../../models/responseformat.md) | :heavy_minus_sign: | N/A | | -| `tools` | List[[models.Tool](../../models/tool.md)] | :heavy_minus_sign: | N/A | | -| `tool_choice` | [Optional[models.AgentsCompletionStreamRequestToolChoice]](../../models/agentscompletionstreamrequesttoolchoice.md) | :heavy_minus_sign: | N/A | | -| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | | +| Parameter | Type | Required | Description | Example | +| --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `messages` | List[[models.AgentsCompletionStreamRequestMessages](../../models/agentscompletionstreamrequestmessages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | +| `agent_id` | *str* | :heavy_check_mark: | The ID of the agent to use for this completion. | | +| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | +| `stream` | *Optional[bool]* | :heavy_minus_sign: | N/A | | +| `stop` | [Optional[models.AgentsCompletionStreamRequestStop]](../../models/agentscompletionstreamrequeststop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | +| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | +| `response_format` | [Optional[models.ResponseFormat]](../../models/responseformat.md) | :heavy_minus_sign: | N/A | | +| `tools` | List[[models.Tool](../../models/tool.md)] | :heavy_minus_sign: | N/A | | +| `tool_choice` | [Optional[models.AgentsCompletionStreamRequestToolChoice]](../../models/agentscompletionstreamrequesttoolchoice.md) | :heavy_minus_sign: | N/A | | +| `presence_penalty` | *Optional[float]* | :heavy_minus_sign: | presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. | | +| `frequency_penalty` | *Optional[float]* | :heavy_minus_sign: | frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. | | +| `n` | *OptionalNullable[int]* | :heavy_minus_sign: | Number of completions to return for each request, input tokens are only billed once. | | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | | ### Response @@ -119,7 +118,7 @@ if res is not None: ### Errors -| Error Object | Status Code | Content Type | +| Error Type | Status Code | Content Type | | -------------------------- | -------------------------- | -------------------------- | | models.HTTPValidationError | 422 | application/json | -| models.SDKError | 4xx-5xx | */* | +| models.SDKError | 4XX, 5XX | \*/\* | \ No newline at end of file diff --git a/docs/sdks/batch/README.md b/docs/sdks/batch/README.md new file mode 100644 index 00000000..55a9c135 --- /dev/null +++ b/docs/sdks/batch/README.md @@ -0,0 +1,2 @@ +# Batch +(*batch*) \ No newline at end of file diff --git a/docs/sdks/chat/README.md b/docs/sdks/chat/README.md index d5e85cce..d6f4a768 100644 --- a/docs/sdks/chat/README.md +++ b/docs/sdks/chat/README.md @@ -25,7 +25,10 @@ s = Mistral( ) res = s.chat.complete(model="mistral-small-latest", messages=[ - + { + "content": "Who is the best French painter? Answer in one short sentence.", + "role": "user", + }, ]) if res is not None: @@ -36,22 +39,24 @@ if res is not None: ### Parameters -| Parameter | Type | Required | Description | Example | -| ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `model` | *Nullable[str]* | :heavy_check_mark: | ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions. | mistral-small-latest | -| `messages` | List[[models.Messages](../../models/messages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | -| `temperature` | *Optional[float]* | :heavy_minus_sign: | What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. | | -| `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | -| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | -| `min_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The minimum number of tokens to generate in the completion. | | -| `stream` | *Optional[bool]* | :heavy_minus_sign: | Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. | | -| `stop` | [Optional[models.Stop]](../../models/stop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | -| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | -| `response_format` | [Optional[models.ResponseFormat]](../../models/responseformat.md) | :heavy_minus_sign: | N/A | | -| `tools` | List[[models.Tool](../../models/tool.md)] | :heavy_minus_sign: | N/A | | -| `tool_choice` | [Optional[models.ChatCompletionRequestToolChoice]](../../models/chatcompletionrequesttoolchoice.md) | :heavy_minus_sign: | N/A | | -| `safe_prompt` | *Optional[bool]* | :heavy_minus_sign: | Whether to inject a safety prompt before all conversations. | | -| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | | +| Parameter | Type | Required | Description | Example | +| ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `model` | *Nullable[str]* | :heavy_check_mark: | ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions. | mistral-small-latest | +| `messages` | List[[models.Messages](../../models/messages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | +| `temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. | | +| `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | +| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | +| `stream` | *Optional[bool]* | :heavy_minus_sign: | Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. | | +| `stop` | [Optional[models.Stop]](../../models/stop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | +| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | +| `response_format` | [Optional[models.ResponseFormat]](../../models/responseformat.md) | :heavy_minus_sign: | N/A | | +| `tools` | List[[models.Tool](../../models/tool.md)] | :heavy_minus_sign: | N/A | | +| `tool_choice` | [Optional[models.ChatCompletionRequestToolChoice]](../../models/chatcompletionrequesttoolchoice.md) | :heavy_minus_sign: | N/A | | +| `presence_penalty` | *Optional[float]* | :heavy_minus_sign: | presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. | | +| `frequency_penalty` | *Optional[float]* | :heavy_minus_sign: | frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. | | +| `n` | *OptionalNullable[int]* | :heavy_minus_sign: | Number of completions to return for each request, input tokens are only billed once. | | +| `safe_prompt` | *Optional[bool]* | :heavy_minus_sign: | Whether to inject a safety prompt before all conversations. | | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | | ### Response @@ -59,11 +64,10 @@ if res is not None: ### Errors -| Error Object | Status Code | Content Type | +| Error Type | Status Code | Content Type | | -------------------------- | -------------------------- | -------------------------- | | models.HTTPValidationError | 422 | application/json | -| models.SDKError | 4xx-5xx | */* | - +| models.SDKError | 4XX, 5XX | \*/\* | ## stream @@ -80,7 +84,10 @@ s = Mistral( ) res = s.chat.stream(model="mistral-small-latest", messages=[ - + { + "content": "Who is the best French painter? Answer in one short sentence.", + "role": "user", + }, ]) if res is not None: @@ -92,22 +99,24 @@ if res is not None: ### Parameters -| Parameter | Type | Required | Description | Example | -| ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `model` | *Nullable[str]* | :heavy_check_mark: | ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions. | mistral-small-latest | -| `messages` | List[[models.ChatCompletionStreamRequestMessages](../../models/chatcompletionstreamrequestmessages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | -| `temperature` | *Optional[float]* | :heavy_minus_sign: | What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. | | -| `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | -| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | -| `min_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The minimum number of tokens to generate in the completion. | | -| `stream` | *Optional[bool]* | :heavy_minus_sign: | N/A | | -| `stop` | [Optional[models.ChatCompletionStreamRequestStop]](../../models/chatcompletionstreamrequeststop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | -| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | -| `response_format` | [Optional[models.ResponseFormat]](../../models/responseformat.md) | :heavy_minus_sign: | N/A | | -| `tools` | List[[models.Tool](../../models/tool.md)] | :heavy_minus_sign: | N/A | | -| `tool_choice` | [Optional[models.ChatCompletionStreamRequestToolChoice]](../../models/chatcompletionstreamrequesttoolchoice.md) | :heavy_minus_sign: | N/A | | -| `safe_prompt` | *Optional[bool]* | :heavy_minus_sign: | Whether to inject a safety prompt before all conversations. | | -| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | | +| Parameter | Type | Required | Description | Example | +| ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `model` | *Nullable[str]* | :heavy_check_mark: | ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions. | mistral-small-latest | +| `messages` | List[[models.ChatCompletionStreamRequestMessages](../../models/chatcompletionstreamrequestmessages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | +| `temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. | | +| `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | +| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | +| `stream` | *Optional[bool]* | :heavy_minus_sign: | N/A | | +| `stop` | [Optional[models.ChatCompletionStreamRequestStop]](../../models/chatcompletionstreamrequeststop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | +| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | +| `response_format` | [Optional[models.ResponseFormat]](../../models/responseformat.md) | :heavy_minus_sign: | N/A | | +| `tools` | List[[models.Tool](../../models/tool.md)] | :heavy_minus_sign: | N/A | | +| `tool_choice` | [Optional[models.ChatCompletionStreamRequestToolChoice]](../../models/chatcompletionstreamrequesttoolchoice.md) | :heavy_minus_sign: | N/A | | +| `presence_penalty` | *Optional[float]* | :heavy_minus_sign: | presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. | | +| `frequency_penalty` | *Optional[float]* | :heavy_minus_sign: | frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. | | +| `n` | *OptionalNullable[int]* | :heavy_minus_sign: | Number of completions to return for each request, input tokens are only billed once. | | +| `safe_prompt` | *Optional[bool]* | :heavy_minus_sign: | Whether to inject a safety prompt before all conversations. | | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | | ### Response @@ -115,7 +124,7 @@ if res is not None: ### Errors -| Error Object | Status Code | Content Type | +| Error Type | Status Code | Content Type | | -------------------------- | -------------------------- | -------------------------- | | models.HTTPValidationError | 422 | application/json | -| models.SDKError | 4xx-5xx | */* | +| models.SDKError | 4XX, 5XX | \*/\* | \ No newline at end of file diff --git a/docs/sdks/classifiers/README.md b/docs/sdks/classifiers/README.md new file mode 100644 index 00000000..7e48638a --- /dev/null +++ b/docs/sdks/classifiers/README.md @@ -0,0 +1,101 @@ +# Classifiers +(*classifiers*) + +## Overview + +Classifiers API. + +### Available Operations + +* [moderate](#moderate) - Moderations +* [moderate_chat](#moderate_chat) - Moderations Chat + +## moderate + +Moderations + +### Example Usage + +```python +from mistralai import Mistral +import os + +s = Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) + +res = s.classifiers.moderate(inputs=[ + "", +]) + +if res is not None: + # handle response + pass + +``` + +### Parameters + +| Parameter | Type | Required | Description | +| --------------------------------------------------------------------------------- | --------------------------------------------------------------------------------- | --------------------------------------------------------------------------------- | --------------------------------------------------------------------------------- | +| `inputs` | [models.ClassificationRequestInputs](../../models/classificationrequestinputs.md) | :heavy_check_mark: | Text to classify. | +| `model` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | + +### Response + +**[models.ClassificationResponse](../../models/classificationresponse.md)** + +### Errors + +| Error Type | Status Code | Content Type | +| -------------------------- | -------------------------- | -------------------------- | +| models.HTTPValidationError | 422 | application/json | +| models.SDKError | 4XX, 5XX | \*/\* | + +## moderate_chat + +Moderations Chat + +### Example Usage + +```python +from mistralai import Mistral +import os + +s = Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) + +res = s.classifiers.moderate_chat(inputs=[ + [ + { + "content": "", + }, + ], +], model="V90") + +if res is not None: + # handle response + pass + +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ----------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------- | +| `inputs` | [models.ChatClassificationRequestInputs](../../models/chatclassificationrequestinputs.md) | :heavy_check_mark: | Chat to classify | +| `model` | *Nullable[str]* | :heavy_check_mark: | N/A | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | + +### Response + +**[models.ClassificationResponse](../../models/classificationresponse.md)** + +### Errors + +| Error Type | Status Code | Content Type | +| -------------------------- | -------------------------- | -------------------------- | +| models.HTTPValidationError | 422 | application/json | +| models.SDKError | 4XX, 5XX | \*/\* | \ No newline at end of file diff --git a/docs/sdks/embeddings/README.md b/docs/sdks/embeddings/README.md index ae270866..9f47e703 100644 --- a/docs/sdks/embeddings/README.md +++ b/docs/sdks/embeddings/README.md @@ -23,7 +23,10 @@ s = Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) -res = s.embeddings.create(inputs="", model="Wrangler") +res = s.embeddings.create(inputs=[ + "Embed this sentence.", + "As well as this one.", +], model="Wrangler") if res is not None: # handle response @@ -33,12 +36,12 @@ if res is not None: ### Parameters -| Parameter | Type | Required | Description | -| ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | -| `inputs` | [models.Inputs](../../models/inputs.md) | :heavy_check_mark: | Text to embed. | -| `model` | *str* | :heavy_check_mark: | ID of the model to use. | -| `encoding_format` | *OptionalNullable[str]* | :heavy_minus_sign: | The format to return the embeddings in. | -| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | +| Parameter | Type | Required | Description | Example | +| ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | +| `inputs` | [models.Inputs](../../models/inputs.md) | :heavy_check_mark: | Text to embed. | [
"Embed this sentence.",
"As well as this one."
] | +| `model` | *Optional[str]* | :heavy_minus_sign: | ID of the model to use. | | +| `encoding_format` | *OptionalNullable[str]* | :heavy_minus_sign: | The format to return the embeddings in. | | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | | ### Response @@ -46,7 +49,7 @@ if res is not None: ### Errors -| Error Object | Status Code | Content Type | +| Error Type | Status Code | Content Type | | -------------------------- | -------------------------- | -------------------------- | | models.HTTPValidationError | 422 | application/json | -| models.SDKError | 4xx-5xx | */* | +| models.SDKError | 4XX, 5XX | \*/\* | \ No newline at end of file diff --git a/docs/sdks/files/README.md b/docs/sdks/files/README.md index 41ed9611..fc5784a4 100644 --- a/docs/sdks/files/README.md +++ b/docs/sdks/files/README.md @@ -11,6 +11,7 @@ Files API * [list](#list) - List Files * [retrieve](#retrieve) - Retrieve File * [delete](#delete) - Delete File +* [download](#download) - Download File ## upload @@ -46,6 +47,7 @@ if res is not None: | Parameter | Type | Required | Description | | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | | `file` | [models.File](../../models/file.md) | :heavy_check_mark: | The File object (not file name) to be uploaded.
To upload a file and specify a custom file name you should format your request as such:
```bash
file=@path/to/your/file.jsonl;filename=custom_name.jsonl
```
Otherwise, you can just keep the original file name:
```bash
file=@path/to/your/file.jsonl
``` | +| `purpose` | [Optional[models.FilePurpose]](../../models/filepurpose.md) | :heavy_minus_sign: | N/A | | `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | ### Response @@ -54,10 +56,9 @@ if res is not None: ### Errors -| Error Object | Status Code | Content Type | +| Error Type | Status Code | Content Type | | --------------- | --------------- | --------------- | -| models.SDKError | 4xx-5xx | */* | - +| models.SDKError | 4XX, 5XX | \*/\* | ## list @@ -85,6 +86,12 @@ if res is not None: | Parameter | Type | Required | Description | | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | +| `page` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `page_size` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `sample_type` | List[[models.SampleType](../../models/sampletype.md)] | :heavy_minus_sign: | N/A | +| `source` | List[[models.Source](../../models/source.md)] | :heavy_minus_sign: | N/A | +| `search` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `purpose` | [OptionalNullable[models.FilePurpose]](../../models/filepurpose.md) | :heavy_minus_sign: | N/A | | `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | ### Response @@ -93,10 +100,9 @@ if res is not None: ### Errors -| Error Object | Status Code | Content Type | +| Error Type | Status Code | Content Type | | --------------- | --------------- | --------------- | -| models.SDKError | 4xx-5xx | */* | - +| models.SDKError | 4XX, 5XX | \*/\* | ## retrieve @@ -133,10 +139,9 @@ if res is not None: ### Errors -| Error Object | Status Code | Content Type | +| Error Type | Status Code | Content Type | | --------------- | --------------- | --------------- | -| models.SDKError | 4xx-5xx | */* | - +| models.SDKError | 4XX, 5XX | \*/\* | ## delete @@ -173,6 +178,45 @@ if res is not None: ### Errors -| Error Object | Status Code | Content Type | +| Error Type | Status Code | Content Type | +| --------------- | --------------- | --------------- | +| models.SDKError | 4XX, 5XX | \*/\* | + +## download + +Download a file + +### Example Usage + +```python +from mistralai import Mistral +import os + +s = Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) + +res = s.files.download(file_id="") + +if res is not None: + # handle response + pass + +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | +| `file_id` | *str* | :heavy_check_mark: | N/A | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | + +### Response + +**[httpx.Response](../../models/.md)** + +### Errors + +| Error Type | Status Code | Content Type | | --------------- | --------------- | --------------- | -| models.SDKError | 4xx-5xx | */* | +| models.SDKError | 4XX, 5XX | \*/\* | \ No newline at end of file diff --git a/docs/sdks/fim/README.md b/docs/sdks/fim/README.md index cfb3d508..d9811521 100644 --- a/docs/sdks/fim/README.md +++ b/docs/sdks/fim/README.md @@ -34,19 +34,19 @@ if res is not None: ### Parameters -| Parameter | Type | Required | Description | Example | -| ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `model` | *Nullable[str]* | :heavy_check_mark: | ID of the model to use. Only compatible for now with:
- `codestral-2405`
- `codestral-latest` | codestral-2405 | -| `prompt` | *str* | :heavy_check_mark: | The text/code to complete. | def | -| `temperature` | *Optional[float]* | :heavy_minus_sign: | What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. | | -| `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | -| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | -| `min_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The minimum number of tokens to generate in the completion. | | -| `stream` | *Optional[bool]* | :heavy_minus_sign: | Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. | | -| `stop` | [Optional[models.FIMCompletionRequestStop]](../../models/fimcompletionrequeststop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | -| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | -| `suffix` | *OptionalNullable[str]* | :heavy_minus_sign: | Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`. | return a+b | -| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | | +| Parameter | Type | Required | Description | Example | +| ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `model` | *Nullable[str]* | :heavy_check_mark: | ID of the model to use. Only compatible for now with:
- `codestral-2405`
- `codestral-latest` | codestral-2405 | +| `prompt` | *str* | :heavy_check_mark: | The text/code to complete. | def | +| `temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. | | +| `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | +| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | +| `stream` | *Optional[bool]* | :heavy_minus_sign: | Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. | | +| `stop` | [Optional[models.FIMCompletionRequestStop]](../../models/fimcompletionrequeststop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | +| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | +| `suffix` | *OptionalNullable[str]* | :heavy_minus_sign: | Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`. | return a+b | +| `min_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The minimum number of tokens to generate in the completion. | | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | | ### Response @@ -54,11 +54,10 @@ if res is not None: ### Errors -| Error Object | Status Code | Content Type | +| Error Type | Status Code | Content Type | | -------------------------- | -------------------------- | -------------------------- | | models.HTTPValidationError | 422 | application/json | -| models.SDKError | 4xx-5xx | */* | - +| models.SDKError | 4XX, 5XX | \*/\* | ## stream @@ -85,19 +84,19 @@ if res is not None: ### Parameters -| Parameter | Type | Required | Description | Example | -| ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `model` | *Nullable[str]* | :heavy_check_mark: | ID of the model to use. Only compatible for now with:
- `codestral-2405`
- `codestral-latest` | codestral-2405 | -| `prompt` | *str* | :heavy_check_mark: | The text/code to complete. | def | -| `temperature` | *Optional[float]* | :heavy_minus_sign: | What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. | | -| `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | -| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | -| `min_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The minimum number of tokens to generate in the completion. | | -| `stream` | *Optional[bool]* | :heavy_minus_sign: | N/A | | -| `stop` | [Optional[models.FIMCompletionStreamRequestStop]](../../models/fimcompletionstreamrequeststop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | -| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | -| `suffix` | *OptionalNullable[str]* | :heavy_minus_sign: | Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`. | return a+b | -| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | | +| Parameter | Type | Required | Description | Example | +| ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `model` | *Nullable[str]* | :heavy_check_mark: | ID of the model to use. Only compatible for now with:
- `codestral-2405`
- `codestral-latest` | codestral-2405 | +| `prompt` | *str* | :heavy_check_mark: | The text/code to complete. | def | +| `temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. | | +| `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | +| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | +| `stream` | *Optional[bool]* | :heavy_minus_sign: | N/A | | +| `stop` | [Optional[models.FIMCompletionStreamRequestStop]](../../models/fimcompletionstreamrequeststop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | +| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | +| `suffix` | *OptionalNullable[str]* | :heavy_minus_sign: | Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`. | return a+b | +| `min_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The minimum number of tokens to generate in the completion. | | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | | ### Response @@ -105,7 +104,7 @@ if res is not None: ### Errors -| Error Object | Status Code | Content Type | +| Error Type | Status Code | Content Type | | -------------------------- | -------------------------- | -------------------------- | | models.HTTPValidationError | 422 | application/json | -| models.SDKError | 4xx-5xx | */* | +| models.SDKError | 4XX, 5XX | \*/\* | \ No newline at end of file diff --git a/docs/sdks/jobs/README.md b/docs/sdks/jobs/README.md index 0929c78d..6ecf6e51 100644 --- a/docs/sdks/jobs/README.md +++ b/docs/sdks/jobs/README.md @@ -54,10 +54,9 @@ if res is not None: ### Errors -| Error Object | Status Code | Content Type | +| Error Type | Status Code | Content Type | | --------------- | --------------- | --------------- | -| models.SDKError | 4xx-5xx | */* | - +| models.SDKError | 4XX, 5XX | \*/\* | ## create @@ -101,10 +100,9 @@ if res is not None: ### Errors -| Error Object | Status Code | Content Type | +| Error Type | Status Code | Content Type | | --------------- | --------------- | --------------- | -| models.SDKError | 4xx-5xx | */* | - +| models.SDKError | 4XX, 5XX | \*/\* | ## get @@ -141,10 +139,9 @@ if res is not None: ### Errors -| Error Object | Status Code | Content Type | +| Error Type | Status Code | Content Type | | --------------- | --------------- | --------------- | -| models.SDKError | 4xx-5xx | */* | - +| models.SDKError | 4XX, 5XX | \*/\* | ## cancel @@ -181,10 +178,9 @@ if res is not None: ### Errors -| Error Object | Status Code | Content Type | +| Error Type | Status Code | Content Type | | --------------- | --------------- | --------------- | -| models.SDKError | 4xx-5xx | */* | - +| models.SDKError | 4XX, 5XX | \*/\* | ## start @@ -221,6 +217,6 @@ if res is not None: ### Errors -| Error Object | Status Code | Content Type | +| Error Type | Status Code | Content Type | | --------------- | --------------- | --------------- | -| models.SDKError | 4xx-5xx | */* | +| models.SDKError | 4XX, 5XX | \*/\* | \ No newline at end of file diff --git a/docs/sdks/mistraljobs/README.md b/docs/sdks/mistraljobs/README.md new file mode 100644 index 00000000..5852c2cb --- /dev/null +++ b/docs/sdks/mistraljobs/README.md @@ -0,0 +1,179 @@ +# MistralJobs +(*batch.jobs*) + +## Overview + +### Available Operations + +* [list](#list) - Get Batch Jobs +* [create](#create) - Create Batch Job +* [get](#get) - Get Batch Job +* [cancel](#cancel) - Cancel Batch Job + +## list + +Get a list of batch jobs for your organization and user. + +### Example Usage + +```python +from mistralai import Mistral +import os + +s = Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) + +res = s.batch.jobs.list() + +if res is not None: + # handle response + pass + +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ------------------------------------------------------------------------- | ------------------------------------------------------------------------- | ------------------------------------------------------------------------- | ------------------------------------------------------------------------- | +| `page` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `page_size` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `model` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | +| `created_after` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | +| `created_by_me` | *Optional[bool]* | :heavy_minus_sign: | N/A | +| `status` | [OptionalNullable[models.BatchJobStatus]](../../models/batchjobstatus.md) | :heavy_minus_sign: | N/A | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | + +### Response + +**[models.BatchJobsOut](../../models/batchjobsout.md)** + +### Errors + +| Error Type | Status Code | Content Type | +| --------------- | --------------- | --------------- | +| models.SDKError | 4XX, 5XX | \*/\* | + +## create + +Create a new batch job, it will be queued for processing. + +### Example Usage + +```python +from mistralai import Mistral +import os + +s = Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) + +res = s.batch.jobs.create(input_files=[ + "a621cf02-1cd9-4cf5-8403-315211a509a3", +], endpoint="/v1/fim/completions", model="2") + +if res is not None: + # handle response + pass + +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | +| `input_files` | List[*str*] | :heavy_check_mark: | N/A | +| `endpoint` | [models.APIEndpoint](../../models/apiendpoint.md) | :heavy_check_mark: | N/A | +| `model` | *str* | :heavy_check_mark: | N/A | +| `metadata` | Dict[str, *str*] | :heavy_minus_sign: | N/A | +| `timeout_hours` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | + +### Response + +**[models.BatchJobOut](../../models/batchjobout.md)** + +### Errors + +| Error Type | Status Code | Content Type | +| --------------- | --------------- | --------------- | +| models.SDKError | 4XX, 5XX | \*/\* | + +## get + +Get a batch job details by its UUID. + +### Example Usage + +```python +from mistralai import Mistral +import os + +s = Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) + +res = s.batch.jobs.get(job_id="b888f774-3e7c-4135-a18c-6b985523c4bc") + +if res is not None: + # handle response + pass + +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | +| `job_id` | *str* | :heavy_check_mark: | N/A | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | + +### Response + +**[models.BatchJobOut](../../models/batchjobout.md)** + +### Errors + +| Error Type | Status Code | Content Type | +| --------------- | --------------- | --------------- | +| models.SDKError | 4XX, 5XX | \*/\* | + +## cancel + +Request the cancellation of a batch job. + +### Example Usage + +```python +from mistralai import Mistral +import os + +s = Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) + +res = s.batch.jobs.cancel(job_id="0f713502-9233-41c6-9ebd-c570b7edb496") + +if res is not None: + # handle response + pass + +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | +| `job_id` | *str* | :heavy_check_mark: | N/A | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | + +### Response + +**[models.BatchJobOut](../../models/batchjobout.md)** + +### Errors + +| Error Type | Status Code | Content Type | +| --------------- | --------------- | --------------- | +| models.SDKError | 4XX, 5XX | \*/\* | \ No newline at end of file diff --git a/docs/sdks/models/README.md b/docs/sdks/models/README.md index 1a54bbb2..2ad489e0 100644 --- a/docs/sdks/models/README.md +++ b/docs/sdks/models/README.md @@ -48,11 +48,10 @@ if res is not None: ### Errors -| Error Object | Status Code | Content Type | +| Error Type | Status Code | Content Type | | -------------------------- | -------------------------- | -------------------------- | | models.HTTPValidationError | 422 | application/json | -| models.SDKError | 4xx-5xx | */* | - +| models.SDKError | 4XX, 5XX | \*/\* | ## retrieve @@ -89,11 +88,10 @@ if res is not None: ### Errors -| Error Object | Status Code | Content Type | +| Error Type | Status Code | Content Type | | -------------------------- | -------------------------- | -------------------------- | | models.HTTPValidationError | 422 | application/json | -| models.SDKError | 4xx-5xx | */* | - +| models.SDKError | 4XX, 5XX | \*/\* | ## delete @@ -130,11 +128,10 @@ if res is not None: ### Errors -| Error Object | Status Code | Content Type | +| Error Type | Status Code | Content Type | | -------------------------- | -------------------------- | -------------------------- | | models.HTTPValidationError | 422 | application/json | -| models.SDKError | 4xx-5xx | */* | - +| models.SDKError | 4XX, 5XX | \*/\* | ## update @@ -173,10 +170,9 @@ if res is not None: ### Errors -| Error Object | Status Code | Content Type | +| Error Type | Status Code | Content Type | | --------------- | --------------- | --------------- | -| models.SDKError | 4xx-5xx | */* | - +| models.SDKError | 4XX, 5XX | \*/\* | ## archive @@ -213,10 +209,9 @@ if res is not None: ### Errors -| Error Object | Status Code | Content Type | +| Error Type | Status Code | Content Type | | --------------- | --------------- | --------------- | -| models.SDKError | 4xx-5xx | */* | - +| models.SDKError | 4XX, 5XX | \*/\* | ## unarchive @@ -253,6 +248,6 @@ if res is not None: ### Errors -| Error Object | Status Code | Content Type | +| Error Type | Status Code | Content Type | | --------------- | --------------- | --------------- | -| models.SDKError | 4xx-5xx | */* | +| models.SDKError | 4XX, 5XX | \*/\* | \ No newline at end of file diff --git a/examples/function_calling.py b/examples/function_calling.py index 76ce489a..766a825b 100644 --- a/examples/function_calling.py +++ b/examples/function_calling.py @@ -98,26 +98,25 @@ def retrieve_payment_date(data: Dict[str, List], transaction_id: str) -> str: messages.append(UserMessage(content="My transaction ID is T1001.")) response = client.chat.complete(model=model, messages=messages, tools=tools) +messages.append(response.choices[0].message) -tool_call = response.choices[0].message.tool_calls[0] -function_name = tool_call.function.name -function_params = json.loads(tool_call.function.arguments) - -print( - f"calling function_name: {function_name}, with function_params: {function_params}" -) +for tool_call in response.choices[0].message.tool_calls: -function_result = names_to_functions[function_name](**function_params) + function_name = tool_call.function.name + function_params = json.loads(tool_call.function.arguments) -messages.append(response.choices[0].message) -messages.append( - ToolMessage( - name=function_name, - content=function_result, - tool_call_id=tool_call.id, + print( + f"calling function_name: {function_name}, with function_params: {function_params}" ) -) + function_result =names_to_functions[function_name](**function_params) + messages.append( + ToolMessage( + name=function_name, + content=function_result, + tool_call_id=tool_call.id, + ) + ) response = client.chat.complete(model=model, messages=messages, tools=tools) print(f"{response.choices[0].message.content}") diff --git a/packages/mistralai_azure/.speakeasy/gen.lock b/packages/mistralai_azure/.speakeasy/gen.lock index 0972d2a6..bc550ff2 100644 --- a/packages/mistralai_azure/.speakeasy/gen.lock +++ b/packages/mistralai_azure/.speakeasy/gen.lock @@ -1,42 +1,46 @@ lockVersion: 2.0.0 id: dc40fa48-2c4d-46ad-ac8b-270749770f34 management: - docChecksum: e99cb4d498ede912c81ab20b7828c0e3 + docChecksum: 0f195020b1080b5c3b1fc5834d30a929 docVersion: 0.0.2 - speakeasyVersion: 1.396.7 - generationVersion: 2.415.6 - releaseVersion: 1.2.2 - configChecksum: 36e70d966ca186be6efc57911c094dec + speakeasyVersion: 1.434.4 + generationVersion: 2.452.0 + releaseVersion: 1.2.0 + configChecksum: 0600a305e49d44a5fcb3a5a33dc00999 published: true features: python: additionalDependencies: 1.0.0 - constsAndDefaults: 1.0.2 - core: 5.5.3 + constsAndDefaults: 1.0.4 + core: 5.6.4 defaultEnabledRetries: 0.2.0 enumUnions: 0.1.0 - envVarSecurityUsage: 0.3.1 + envVarSecurityUsage: 0.3.2 examples: 3.0.0 flatRequests: 1.0.1 globalSecurity: 3.0.2 globalSecurityCallbacks: 1.0.0 globalSecurityFlattening: 1.0.0 globalServerURLs: 3.0.0 + methodArguments: 1.0.2 nameOverrides: 3.0.0 nullables: 1.0.0 openEnums: 1.0.0 - responseFormat: 1.0.0 + responseFormat: 1.0.1 retries: 3.0.2 sdkHooks: 1.0.0 - serverEvents: 1.0.2 + serverEvents: 1.0.4 serverEventsSentinels: 0.1.0 serverIDs: 3.0.0 - unions: 3.0.2 + tests: 1.6.0 + unions: 3.0.3 generatedFiles: - .gitattributes + - .python-version - .vscode/settings.json - docs/models/arguments.md - docs/models/assistantmessage.md + - docs/models/assistantmessagecontent.md - docs/models/assistantmessagerole.md - docs/models/chatcompletionchoice.md - docs/models/chatcompletionchoicefinishreason.md @@ -66,6 +70,7 @@ generatedFiles: - docs/models/security.md - docs/models/stop.md - docs/models/systemmessage.md + - docs/models/systemmessagecontent.md - docs/models/textchunk.md - docs/models/tool.md - docs/models/toolcall.md @@ -85,13 +90,13 @@ generatedFiles: - py.typed - pylintrc - pyproject.toml - - scripts/compile.sh - scripts/prepare-readme.py - scripts/publish.sh - src/mistralai_azure/__init__.py - src/mistralai_azure/_hooks/__init__.py - src/mistralai_azure/_hooks/sdkhooks.py - src/mistralai_azure/_hooks/types.py + - src/mistralai_azure/_version.py - src/mistralai_azure/basesdk.py - src/mistralai_azure/chat.py - src/mistralai_azure/httpclient.py @@ -148,11 +153,11 @@ examples: stream_chat: speakeasy-default-stream-chat: requestBody: - application/json: {"model": "azureai", "messages": [{"content": []}]} + application/json: {"model": "azureai", "messages": [{"content": "Who is the best French painter? Answer in one short sentence.", "role": "user"}]} chat_completion_v1_chat_completions_post: speakeasy-default-chat-completion-v1-chat-completions-post: requestBody: - application/json: {"model": "azureai", "messages": [{"content": ""}]} + application/json: {"model": "azureai", "messages": [{"content": "Who is the best French painter? Answer in one short sentence.", "role": "user"}]} responses: "200": application/json: {"id": "cmpl-e5cc70bb28c444948073e77776eb30ef", "object": "chat.completion", "model": "mistral-small-latest", "usage": {"prompt_tokens": 16, "completion_tokens": 34, "total_tokens": 50}, "created": 1702256327, "choices": []} diff --git a/packages/mistralai_azure/.speakeasy/gen.yaml b/packages/mistralai_azure/.speakeasy/gen.yaml index edcb95b3..7280691b 100644 --- a/packages/mistralai_azure/.speakeasy/gen.yaml +++ b/packages/mistralai_azure/.speakeasy/gen.yaml @@ -11,8 +11,9 @@ generation: requestResponseComponentNamesFeb2024: true auth: oAuth2ClientCredentialsEnabled: true + oAuth2PasswordEnabled: false python: - version: 1.2.2 + version: 1.2.0 additionalDependencies: dev: pytest: ^8.2.2 @@ -22,8 +23,11 @@ python: clientServerStatusCodesAsErrors: true description: Python Client SDK for the Mistral AI API in Azure. enumFormat: union + fixFlags: + responseRequiredSep2024: false flattenGlobalSecurity: true flattenRequests: true + flatteningOrder: parameters-first imports: option: openapi paths: diff --git a/packages/mistralai_azure/docs/models/assistantmessage.md b/packages/mistralai_azure/docs/models/assistantmessage.md index 0c36cde9..53f1cc76 100644 --- a/packages/mistralai_azure/docs/models/assistantmessage.md +++ b/packages/mistralai_azure/docs/models/assistantmessage.md @@ -3,9 +3,9 @@ ## Fields -| Field | Type | Required | Description | -| ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | -| `content` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `tool_calls` | List[[models.ToolCall](../models/toolcall.md)] | :heavy_minus_sign: | N/A | -| `prefix` | *Optional[bool]* | :heavy_minus_sign: | Set this to `true` when adding an assistant message as prefix to condition the model response. The role of the prefix message is to force the model to start its answer by the content of the message. | -| `role` | [Optional[models.AssistantMessageRole]](../models/assistantmessagerole.md) | :heavy_minus_sign: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | +| `content` | [OptionalNullable[models.AssistantMessageContent]](../models/assistantmessagecontent.md) | :heavy_minus_sign: | N/A | +| `tool_calls` | List[[models.ToolCall](../models/toolcall.md)] | :heavy_minus_sign: | N/A | +| `prefix` | *Optional[bool]* | :heavy_minus_sign: | N/A | +| `role` | [Optional[models.AssistantMessageRole]](../models/assistantmessagerole.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/assistantmessagecontent.md b/packages/mistralai_azure/docs/models/assistantmessagecontent.md new file mode 100644 index 00000000..047b7cf9 --- /dev/null +++ b/packages/mistralai_azure/docs/models/assistantmessagecontent.md @@ -0,0 +1,17 @@ +# AssistantMessageContent + + +## Supported Types + +### `str` + +```python +value: str = /* values here */ +``` + +### `List[models.ContentChunk]` + +```python +value: List[models.ContentChunk] = /* values here */ +``` + diff --git a/packages/mistralai_azure/docs/models/chatcompletionrequest.md b/packages/mistralai_azure/docs/models/chatcompletionrequest.md index 307b2796..68cef4a1 100644 --- a/packages/mistralai_azure/docs/models/chatcompletionrequest.md +++ b/packages/mistralai_azure/docs/models/chatcompletionrequest.md @@ -3,18 +3,20 @@ ## Fields -| Field | Type | Required | Description | Example | -| ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `messages` | List[[models.ChatCompletionRequestMessages](../models/chatcompletionrequestmessages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | -| `model` | *OptionalNullable[str]* | :heavy_minus_sign: | The ID of the model to use for this request. | azureai | -| `temperature` | *Optional[float]* | :heavy_minus_sign: | What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. | | -| `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | -| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | -| `min_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The minimum number of tokens to generate in the completion. | | -| `stream` | *Optional[bool]* | :heavy_minus_sign: | Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. | | -| `stop` | [Optional[models.ChatCompletionRequestStop]](../models/chatcompletionrequeststop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | -| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | -| `response_format` | [Optional[models.ResponseFormat]](../models/responseformat.md) | :heavy_minus_sign: | N/A | | -| `tools` | List[[models.Tool](../models/tool.md)] | :heavy_minus_sign: | N/A | | -| `tool_choice` | [Optional[models.ChatCompletionRequestToolChoice]](../models/chatcompletionrequesttoolchoice.md) | :heavy_minus_sign: | N/A | | -| `safe_prompt` | *Optional[bool]* | :heavy_minus_sign: | Whether to inject a safety prompt before all conversations. | | \ No newline at end of file +| Field | Type | Required | Description | Example | +| ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `messages` | List[[models.ChatCompletionRequestMessages](../models/chatcompletionrequestmessages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | +| `model` | *OptionalNullable[str]* | :heavy_minus_sign: | The ID of the model to use for this request. | azureai | +| `temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. | | +| `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | +| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | +| `stream` | *Optional[bool]* | :heavy_minus_sign: | Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. | | +| `stop` | [Optional[models.ChatCompletionRequestStop]](../models/chatcompletionrequeststop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | +| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | +| `response_format` | [Optional[models.ResponseFormat]](../models/responseformat.md) | :heavy_minus_sign: | N/A | | +| `tools` | List[[models.Tool](../models/tool.md)] | :heavy_minus_sign: | N/A | | +| `tool_choice` | [Optional[models.ChatCompletionRequestToolChoice]](../models/chatcompletionrequesttoolchoice.md) | :heavy_minus_sign: | N/A | | +| `presence_penalty` | *Optional[float]* | :heavy_minus_sign: | presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. | | +| `frequency_penalty` | *Optional[float]* | :heavy_minus_sign: | frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. | | +| `n` | *OptionalNullable[int]* | :heavy_minus_sign: | Number of completions to return for each request, input tokens are only billed once. | | +| `safe_prompt` | *Optional[bool]* | :heavy_minus_sign: | Whether to inject a safety prompt before all conversations. | | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/chatcompletionstreamrequest.md b/packages/mistralai_azure/docs/models/chatcompletionstreamrequest.md index 05f711f9..c9c5c87b 100644 --- a/packages/mistralai_azure/docs/models/chatcompletionstreamrequest.md +++ b/packages/mistralai_azure/docs/models/chatcompletionstreamrequest.md @@ -3,18 +3,20 @@ ## Fields -| Field | Type | Required | Description | Example | -| ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `messages` | List[[models.Messages](../models/messages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | -| `model` | *OptionalNullable[str]* | :heavy_minus_sign: | The ID of the model to use for this request. | azureai | -| `temperature` | *Optional[float]* | :heavy_minus_sign: | What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. | | -| `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | -| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | -| `min_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The minimum number of tokens to generate in the completion. | | -| `stream` | *Optional[bool]* | :heavy_minus_sign: | N/A | | -| `stop` | [Optional[models.Stop]](../models/stop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | -| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | -| `response_format` | [Optional[models.ResponseFormat]](../models/responseformat.md) | :heavy_minus_sign: | N/A | | -| `tools` | List[[models.Tool](../models/tool.md)] | :heavy_minus_sign: | N/A | | -| `tool_choice` | [Optional[models.ChatCompletionStreamRequestToolChoice]](../models/chatcompletionstreamrequesttoolchoice.md) | :heavy_minus_sign: | N/A | | -| `safe_prompt` | *Optional[bool]* | :heavy_minus_sign: | Whether to inject a safety prompt before all conversations. | | \ No newline at end of file +| Field | Type | Required | Description | Example | +| ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `messages` | List[[models.Messages](../models/messages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | +| `model` | *OptionalNullable[str]* | :heavy_minus_sign: | The ID of the model to use for this request. | azureai | +| `temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. | | +| `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | +| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | +| `stream` | *Optional[bool]* | :heavy_minus_sign: | N/A | | +| `stop` | [Optional[models.Stop]](../models/stop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | +| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | +| `response_format` | [Optional[models.ResponseFormat]](../models/responseformat.md) | :heavy_minus_sign: | N/A | | +| `tools` | List[[models.Tool](../models/tool.md)] | :heavy_minus_sign: | N/A | | +| `tool_choice` | [Optional[models.ChatCompletionStreamRequestToolChoice]](../models/chatcompletionstreamrequesttoolchoice.md) | :heavy_minus_sign: | N/A | | +| `presence_penalty` | *Optional[float]* | :heavy_minus_sign: | presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. | | +| `frequency_penalty` | *Optional[float]* | :heavy_minus_sign: | frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. | | +| `n` | *OptionalNullable[int]* | :heavy_minus_sign: | Number of completions to return for each request, input tokens are only billed once. | | +| `safe_prompt` | *Optional[bool]* | :heavy_minus_sign: | Whether to inject a safety prompt before all conversations. | | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/content.md b/packages/mistralai_azure/docs/models/content.md index 4cd3cfd5..a833dc2c 100644 --- a/packages/mistralai_azure/docs/models/content.md +++ b/packages/mistralai_azure/docs/models/content.md @@ -9,9 +9,9 @@ value: str = /* values here */ ``` -### `List[models.TextChunk]` +### `List[models.ContentChunk]` ```python -value: List[models.TextChunk] = /* values here */ +value: List[models.ContentChunk] = /* values here */ ``` diff --git a/packages/mistralai_azure/docs/models/deltamessage.md b/packages/mistralai_azure/docs/models/deltamessage.md index d32f8e10..61deabbf 100644 --- a/packages/mistralai_azure/docs/models/deltamessage.md +++ b/packages/mistralai_azure/docs/models/deltamessage.md @@ -3,8 +3,8 @@ ## Fields -| Field | Type | Required | Description | -| ---------------------------------------------- | ---------------------------------------------- | ---------------------------------------------- | ---------------------------------------------- | -| `role` | *Optional[str]* | :heavy_minus_sign: | N/A | -| `content` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `tool_calls` | List[[models.ToolCall](../models/toolcall.md)] | :heavy_minus_sign: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| -------------------------------------------------------- | -------------------------------------------------------- | -------------------------------------------------------- | -------------------------------------------------------- | +| `role` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `content` | [OptionalNullable[models.Content]](../models/content.md) | :heavy_minus_sign: | N/A | +| `tool_calls` | List[[models.ToolCall](../models/toolcall.md)] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/httpvalidationerror.md b/packages/mistralai_azure/docs/models/httpvalidationerror.md index 63892430..712a148c 100644 --- a/packages/mistralai_azure/docs/models/httpvalidationerror.md +++ b/packages/mistralai_azure/docs/models/httpvalidationerror.md @@ -1,7 +1,5 @@ # HTTPValidationError -Validation Error - ## Fields diff --git a/packages/mistralai_azure/docs/models/systemmessage.md b/packages/mistralai_azure/docs/models/systemmessage.md index 7f827984..0dba71c0 100644 --- a/packages/mistralai_azure/docs/models/systemmessage.md +++ b/packages/mistralai_azure/docs/models/systemmessage.md @@ -3,7 +3,7 @@ ## Fields -| Field | Type | Required | Description | -| ------------------------------------------ | ------------------------------------------ | ------------------------------------------ | ------------------------------------------ | -| `content` | [models.Content](../models/content.md) | :heavy_check_mark: | N/A | -| `role` | [Optional[models.Role]](../models/role.md) | :heavy_minus_sign: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| ---------------------------------------------------------------- | ---------------------------------------------------------------- | ---------------------------------------------------------------- | ---------------------------------------------------------------- | +| `content` | [models.SystemMessageContent](../models/systemmessagecontent.md) | :heavy_check_mark: | N/A | +| `role` | [Optional[models.Role]](../models/role.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/systemmessagecontent.md b/packages/mistralai_azure/docs/models/systemmessagecontent.md new file mode 100644 index 00000000..e0d27d9f --- /dev/null +++ b/packages/mistralai_azure/docs/models/systemmessagecontent.md @@ -0,0 +1,17 @@ +# SystemMessageContent + + +## Supported Types + +### `str` + +```python +value: str = /* values here */ +``` + +### `List[models.TextChunk]` + +```python +value: List[models.TextChunk] = /* values here */ +``` + diff --git a/packages/mistralai_azure/docs/models/usermessage.md b/packages/mistralai_azure/docs/models/usermessage.md index 3d96f1cd..63b01310 100644 --- a/packages/mistralai_azure/docs/models/usermessage.md +++ b/packages/mistralai_azure/docs/models/usermessage.md @@ -3,7 +3,7 @@ ## Fields -| Field | Type | Required | Description | -| ---------------------------------------------------------------- | ---------------------------------------------------------------- | ---------------------------------------------------------------- | ---------------------------------------------------------------- | -| `content` | [models.UserMessageContent](../models/usermessagecontent.md) | :heavy_check_mark: | N/A | -| `role` | [Optional[models.UserMessageRole]](../models/usermessagerole.md) | :heavy_minus_sign: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | +| `content` | [Nullable[models.UserMessageContent]](../models/usermessagecontent.md) | :heavy_check_mark: | N/A | +| `role` | [Optional[models.UserMessageRole]](../models/usermessagerole.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai_azure/pylintrc b/packages/mistralai_azure/pylintrc index 50800386..393d0f70 100644 --- a/packages/mistralai_azure/pylintrc +++ b/packages/mistralai_azure/pylintrc @@ -188,6 +188,7 @@ good-names=i, Run, _, e, + n, id # Good variable names regexes, separated by a comma. If names match any regex, diff --git a/packages/mistralai_azure/pyproject.toml b/packages/mistralai_azure/pyproject.toml index a9f13e0d..99001165 100644 --- a/packages/mistralai_azure/pyproject.toml +++ b/packages/mistralai_azure/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "mistralai_azure" -version = "1.2.2" +version = "1.2.0" description = "Python Client SDK for the Mistral AI API in Azure." authors = ["Mistral",] readme = "README-PYPI.md" @@ -20,7 +20,7 @@ python = "^3.8" eval-type-backport = "^0.2.0" httpx = "^0.27.0" jsonpath-python = "^1.0.6" -pydantic = "~2.9.0" +pydantic = "~2.9.2" python-dateutil = "2.8.2" typing-inspect = "^0.9.0" diff --git a/packages/mistralai_azure/scripts/compile.sh b/packages/mistralai_azure/scripts/compile.sh deleted file mode 100755 index fafe635b..00000000 --- a/packages/mistralai_azure/scripts/compile.sh +++ /dev/null @@ -1,85 +0,0 @@ -#!/usr/bin/env bash - -set -o pipefail # Ensure pipeline failures are propagated - -# Use temporary files to store outputs and exit statuses -declare -A output_files -declare -A status_files - -# Function to run a command with temporary output and status files -run_command() { - local cmd="$1" - local key="$2" - local output_file="$3" - local status_file="$4" - - # Run the command and store output and exit status - { - eval "$cmd" - echo $? > "$status_file" - } &> "$output_file" & -} - -poetry run python scripts/prepare-readme.py - -# Create temporary files for outputs and statuses -for cmd in compileall pylint mypy pyright; do - output_files[$cmd]=$(mktemp) - status_files[$cmd]=$(mktemp) -done - -# Collect PIDs for background processes -declare -a pids - -# Run commands in parallel using temporary files -echo "Running python -m compileall" -run_command 'poetry run python -m compileall -q . && echo "Success"' 'compileall' "${output_files[compileall]}" "${status_files[compileall]}" -pids+=($!) - -echo "Running pylint" -run_command 'poetry run pylint src' 'pylint' "${output_files[pylint]}" "${status_files[pylint]}" -pids+=($!) - -echo "Running mypy" -run_command 'poetry run mypy src' 'mypy' "${output_files[mypy]}" "${status_files[mypy]}" -pids+=($!) - -echo "Running pyright (optional)" -run_command 'if command -v pyright > /dev/null 2>&1; then pyright src; else echo "pyright not found, skipping"; fi' 'pyright' "${output_files[pyright]}" "${status_files[pyright]}" -pids+=($!) - -# Wait for all processes to complete -echo "Waiting for processes to complete" -for pid in "${pids[@]}"; do - wait "$pid" -done - -# Print output sequentially and check for failures -failed=false -for key in "${!output_files[@]}"; do - echo "--- Output from Command: $key ---" - echo - cat "${output_files[$key]}" - echo # Empty line for separation - echo "--- End of Output from Command: $key ---" - echo - - exit_status=$(cat "${status_files[$key]}") - if [ "$exit_status" -ne 0 ]; then - echo "Command $key failed with exit status $exit_status" >&2 - failed=true - fi -done - -# Clean up temporary files -for tmp_file in "${output_files[@]}" "${status_files[@]}"; do - rm -f "$tmp_file" -done - -if $failed; then - echo "One or more commands failed." >&2 - exit 1 -else - echo "All commands completed successfully." - exit 0 -fi diff --git a/packages/mistralai_azure/src/mistralai_azure/__init__.py b/packages/mistralai_azure/src/mistralai_azure/__init__.py index 68138c47..a1b7f626 100644 --- a/packages/mistralai_azure/src/mistralai_azure/__init__.py +++ b/packages/mistralai_azure/src/mistralai_azure/__init__.py @@ -1,5 +1,9 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +from ._version import __title__, __version__ from .sdk import * from .sdkconfiguration import * from .models import * + + +VERSION: str = __version__ diff --git a/packages/mistralai_azure/src/mistralai_azure/_version.py b/packages/mistralai_azure/src/mistralai_azure/_version.py new file mode 100644 index 00000000..6a45a91a --- /dev/null +++ b/packages/mistralai_azure/src/mistralai_azure/_version.py @@ -0,0 +1,12 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +import importlib.metadata + +__title__: str = "mistralai_azure" +__version__: str = "1.2.0" + +try: + if __package__ is not None: + __version__ = importlib.metadata.version(__package__) +except importlib.metadata.PackageNotFoundError: + pass diff --git a/packages/mistralai_azure/src/mistralai_azure/chat.py b/packages/mistralai_azure/src/mistralai_azure/chat.py index 5f1e539b..e1d33901 100644 --- a/packages/mistralai_azure/src/mistralai_azure/chat.py +++ b/packages/mistralai_azure/src/mistralai_azure/chat.py @@ -16,10 +16,9 @@ def stream( *, messages: Union[List[models.Messages], List[models.MessagesTypedDict]], model: OptionalNullable[str] = "azureai", - temperature: Optional[float] = 0.7, + temperature: OptionalNullable[float] = UNSET, top_p: Optional[float] = 1, max_tokens: OptionalNullable[int] = UNSET, - min_tokens: OptionalNullable[int] = UNSET, stream: Optional[bool] = True, stop: Optional[Union[models.Stop, models.StopTypedDict]] = None, random_seed: OptionalNullable[int] = UNSET, @@ -35,6 +34,9 @@ def stream( models.ChatCompletionStreamRequestToolChoiceTypedDict, ] ] = None, + presence_penalty: Optional[float] = 0, + frequency_penalty: Optional[float] = 0, + n: OptionalNullable[int] = UNSET, safe_prompt: Optional[bool] = False, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, @@ -46,16 +48,18 @@ def stream( :param messages: The prompt(s) to generate completions for, encoded as a list of dict with role and content. :param model: The ID of the model to use for this request. - :param temperature: What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. + :param temperature: What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. - :param min_tokens: The minimum number of tokens to generate in the completion. :param stream: :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. :param response_format: :param tools: :param tool_choice: + :param presence_penalty: presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. + :param frequency_penalty: frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. + :param n: Number of completions to return for each request, input tokens are only billed once. :param safe_prompt: Whether to inject a safety prompt before all conversations. :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method @@ -74,7 +78,6 @@ def stream( temperature=temperature, top_p=top_p, max_tokens=max_tokens, - min_tokens=min_tokens, stream=stream, stop=stop, random_seed=random_seed, @@ -86,6 +89,9 @@ def stream( tool_choice=utils.get_pydantic_model( tool_choice, Optional[models.ChatCompletionStreamRequestToolChoice] ), + presence_penalty=presence_penalty, + frequency_penalty=frequency_penalty, + n=n, safe_prompt=safe_prompt, ) @@ -135,18 +141,21 @@ def stream( sentinel="[DONE]", ) if utils.match_response(http_res, "422", "application/json"): - data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) + http_res_text = utils.stream_to_text(http_res) + data = utils.unmarshal_json(http_res_text, models.HTTPValidationErrorData) raise models.HTTPValidationError(data=data) if utils.match_response(http_res, ["4XX", "5XX"], "*"): + http_res_text = utils.stream_to_text(http_res) raise models.SDKError( - "API error occurred", http_res.status_code, http_res.text, http_res + "API error occurred", http_res.status_code, http_res_text, http_res ) content_type = http_res.headers.get("Content-Type") + http_res_text = utils.stream_to_text(http_res) raise models.SDKError( f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, - http_res.text, + http_res_text, http_res, ) @@ -155,10 +164,9 @@ async def stream_async( *, messages: Union[List[models.Messages], List[models.MessagesTypedDict]], model: OptionalNullable[str] = "azureai", - temperature: Optional[float] = 0.7, + temperature: OptionalNullable[float] = UNSET, top_p: Optional[float] = 1, max_tokens: OptionalNullable[int] = UNSET, - min_tokens: OptionalNullable[int] = UNSET, stream: Optional[bool] = True, stop: Optional[Union[models.Stop, models.StopTypedDict]] = None, random_seed: OptionalNullable[int] = UNSET, @@ -174,6 +182,9 @@ async def stream_async( models.ChatCompletionStreamRequestToolChoiceTypedDict, ] ] = None, + presence_penalty: Optional[float] = 0, + frequency_penalty: Optional[float] = 0, + n: OptionalNullable[int] = UNSET, safe_prompt: Optional[bool] = False, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, @@ -185,16 +196,18 @@ async def stream_async( :param messages: The prompt(s) to generate completions for, encoded as a list of dict with role and content. :param model: The ID of the model to use for this request. - :param temperature: What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. + :param temperature: What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. - :param min_tokens: The minimum number of tokens to generate in the completion. :param stream: :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. :param response_format: :param tools: :param tool_choice: + :param presence_penalty: presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. + :param frequency_penalty: frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. + :param n: Number of completions to return for each request, input tokens are only billed once. :param safe_prompt: Whether to inject a safety prompt before all conversations. :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method @@ -213,7 +226,6 @@ async def stream_async( temperature=temperature, top_p=top_p, max_tokens=max_tokens, - min_tokens=min_tokens, stream=stream, stop=stop, random_seed=random_seed, @@ -225,6 +237,9 @@ async def stream_async( tool_choice=utils.get_pydantic_model( tool_choice, Optional[models.ChatCompletionStreamRequestToolChoice] ), + presence_penalty=presence_penalty, + frequency_penalty=frequency_penalty, + n=n, safe_prompt=safe_prompt, ) @@ -274,18 +289,21 @@ async def stream_async( sentinel="[DONE]", ) if utils.match_response(http_res, "422", "application/json"): - data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) + http_res_text = await utils.stream_to_text_async(http_res) + data = utils.unmarshal_json(http_res_text, models.HTTPValidationErrorData) raise models.HTTPValidationError(data=data) if utils.match_response(http_res, ["4XX", "5XX"], "*"): + http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( - "API error occurred", http_res.status_code, http_res.text, http_res + "API error occurred", http_res.status_code, http_res_text, http_res ) content_type = http_res.headers.get("Content-Type") + http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, - http_res.text, + http_res_text, http_res, ) @@ -297,10 +315,9 @@ def complete( List[models.ChatCompletionRequestMessagesTypedDict], ], model: OptionalNullable[str] = "azureai", - temperature: Optional[float] = 0.7, + temperature: OptionalNullable[float] = UNSET, top_p: Optional[float] = 1, max_tokens: OptionalNullable[int] = UNSET, - min_tokens: OptionalNullable[int] = UNSET, stream: Optional[bool] = False, stop: Optional[ Union[ @@ -321,6 +338,9 @@ def complete( models.ChatCompletionRequestToolChoiceTypedDict, ] ] = None, + presence_penalty: Optional[float] = 0, + frequency_penalty: Optional[float] = 0, + n: OptionalNullable[int] = UNSET, safe_prompt: Optional[bool] = False, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, @@ -330,16 +350,18 @@ def complete( :param messages: The prompt(s) to generate completions for, encoded as a list of dict with role and content. :param model: The ID of the model to use for this request. - :param temperature: What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. + :param temperature: What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. - :param min_tokens: The minimum number of tokens to generate in the completion. :param stream: Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. :param response_format: :param tools: :param tool_choice: + :param presence_penalty: presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. + :param frequency_penalty: frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. + :param n: Number of completions to return for each request, input tokens are only billed once. :param safe_prompt: Whether to inject a safety prompt before all conversations. :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method @@ -358,7 +380,6 @@ def complete( temperature=temperature, top_p=top_p, max_tokens=max_tokens, - min_tokens=min_tokens, stream=stream, stop=stop, random_seed=random_seed, @@ -372,6 +393,9 @@ def complete( tool_choice=utils.get_pydantic_model( tool_choice, Optional[models.ChatCompletionRequestToolChoice] ), + presence_penalty=presence_penalty, + frequency_penalty=frequency_penalty, + n=n, safe_prompt=safe_prompt, ) @@ -421,15 +445,17 @@ def complete( data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) raise models.HTTPValidationError(data=data) if utils.match_response(http_res, ["4XX", "5XX"], "*"): + http_res_text = utils.stream_to_text(http_res) raise models.SDKError( - "API error occurred", http_res.status_code, http_res.text, http_res + "API error occurred", http_res.status_code, http_res_text, http_res ) content_type = http_res.headers.get("Content-Type") + http_res_text = utils.stream_to_text(http_res) raise models.SDKError( f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, - http_res.text, + http_res_text, http_res, ) @@ -441,10 +467,9 @@ async def complete_async( List[models.ChatCompletionRequestMessagesTypedDict], ], model: OptionalNullable[str] = "azureai", - temperature: Optional[float] = 0.7, + temperature: OptionalNullable[float] = UNSET, top_p: Optional[float] = 1, max_tokens: OptionalNullable[int] = UNSET, - min_tokens: OptionalNullable[int] = UNSET, stream: Optional[bool] = False, stop: Optional[ Union[ @@ -465,6 +490,9 @@ async def complete_async( models.ChatCompletionRequestToolChoiceTypedDict, ] ] = None, + presence_penalty: Optional[float] = 0, + frequency_penalty: Optional[float] = 0, + n: OptionalNullable[int] = UNSET, safe_prompt: Optional[bool] = False, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, @@ -474,16 +502,18 @@ async def complete_async( :param messages: The prompt(s) to generate completions for, encoded as a list of dict with role and content. :param model: The ID of the model to use for this request. - :param temperature: What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. + :param temperature: What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. - :param min_tokens: The minimum number of tokens to generate in the completion. :param stream: Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. :param response_format: :param tools: :param tool_choice: + :param presence_penalty: presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. + :param frequency_penalty: frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. + :param n: Number of completions to return for each request, input tokens are only billed once. :param safe_prompt: Whether to inject a safety prompt before all conversations. :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method @@ -502,7 +532,6 @@ async def complete_async( temperature=temperature, top_p=top_p, max_tokens=max_tokens, - min_tokens=min_tokens, stream=stream, stop=stop, random_seed=random_seed, @@ -516,6 +545,9 @@ async def complete_async( tool_choice=utils.get_pydantic_model( tool_choice, Optional[models.ChatCompletionRequestToolChoice] ), + presence_penalty=presence_penalty, + frequency_penalty=frequency_penalty, + n=n, safe_prompt=safe_prompt, ) @@ -565,14 +597,16 @@ async def complete_async( data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) raise models.HTTPValidationError(data=data) if utils.match_response(http_res, ["4XX", "5XX"], "*"): + http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( - "API error occurred", http_res.status_code, http_res.text, http_res + "API error occurred", http_res.status_code, http_res_text, http_res ) content_type = http_res.headers.get("Content-Type") + http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, - http_res.text, + http_res_text, http_res, ) diff --git a/packages/mistralai_azure/src/mistralai_azure/models/__init__.py b/packages/mistralai_azure/src/mistralai_azure/models/__init__.py index 70f07999..e662fa75 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/__init__.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/__init__.py @@ -2,6 +2,8 @@ from .assistantmessage import ( AssistantMessage, + AssistantMessageContent, + AssistantMessageContentTypedDict, AssistantMessageRole, AssistantMessageTypedDict, ) @@ -42,7 +44,7 @@ FinishReason, ) from .contentchunk import ContentChunk, ContentChunkTypedDict -from .deltamessage import DeltaMessage, DeltaMessageTypedDict +from .deltamessage import Content, ContentTypedDict, DeltaMessage, DeltaMessageTypedDict from .function import Function, FunctionTypedDict from .functioncall import ( Arguments, @@ -57,10 +59,10 @@ from .sdkerror import SDKError from .security import Security, SecurityTypedDict from .systemmessage import ( - Content, - ContentTypedDict, Role, SystemMessage, + SystemMessageContent, + SystemMessageContentTypedDict, SystemMessageTypedDict, ) from .textchunk import TextChunk, TextChunkTypedDict, Type @@ -89,6 +91,8 @@ "Arguments", "ArgumentsTypedDict", "AssistantMessage", + "AssistantMessageContent", + "AssistantMessageContentTypedDict", "AssistantMessageRole", "AssistantMessageTypedDict", "ChatCompletionChoice", @@ -143,6 +147,8 @@ "Stop", "StopTypedDict", "SystemMessage", + "SystemMessageContent", + "SystemMessageContentTypedDict", "SystemMessageTypedDict", "TextChunk", "TextChunkTypedDict", diff --git a/packages/mistralai_azure/src/mistralai_azure/models/assistantmessage.py b/packages/mistralai_azure/src/mistralai_azure/models/assistantmessage.py index 577b7e9a..5d978f01 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/assistantmessage.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/assistantmessage.py @@ -1,6 +1,7 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from __future__ import annotations +from .contentchunk import ContentChunk, ContentChunkTypedDict from .toolcall import ToolCall, ToolCallTypedDict from mistralai_azure.types import ( BaseModel, @@ -10,28 +11,32 @@ UNSET_SENTINEL, ) from pydantic import model_serializer -from typing import List, Literal, Optional, TypedDict -from typing_extensions import NotRequired +from typing import List, Literal, Optional, Union +from typing_extensions import NotRequired, TypedDict + + +AssistantMessageContentTypedDict = Union[str, List[ContentChunkTypedDict]] + + +AssistantMessageContent = Union[str, List[ContentChunk]] AssistantMessageRole = Literal["assistant"] class AssistantMessageTypedDict(TypedDict): - content: NotRequired[Nullable[str]] + content: NotRequired[Nullable[AssistantMessageContentTypedDict]] tool_calls: NotRequired[Nullable[List[ToolCallTypedDict]]] prefix: NotRequired[bool] - r"""Set this to `true` when adding an assistant message as prefix to condition the model response. The role of the prefix message is to force the model to start its answer by the content of the message.""" role: NotRequired[AssistantMessageRole] class AssistantMessage(BaseModel): - content: OptionalNullable[str] = UNSET + content: OptionalNullable[AssistantMessageContent] = UNSET tool_calls: OptionalNullable[List[ToolCall]] = UNSET prefix: Optional[bool] = False - r"""Set this to `true` when adding an assistant message as prefix to condition the model response. The role of the prefix message is to force the model to start its answer by the content of the message.""" role: Optional[AssistantMessageRole] = "assistant" diff --git a/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionchoice.py b/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionchoice.py index a71cd085..a78b72d5 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionchoice.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionchoice.py @@ -2,12 +2,15 @@ from __future__ import annotations from .assistantmessage import AssistantMessage, AssistantMessageTypedDict -from mistralai_azure.types import BaseModel -from typing import Literal, TypedDict +from mistralai_azure.types import BaseModel, UnrecognizedStr +from mistralai_azure.utils import validate_open_enum +from pydantic.functional_validators import PlainValidator +from typing import Literal, Union +from typing_extensions import Annotated, TypedDict -ChatCompletionChoiceFinishReason = Literal[ - "stop", "length", "model_length", "error", "tool_calls" +ChatCompletionChoiceFinishReason = Union[ + Literal["stop", "length", "model_length", "error", "tool_calls"], UnrecognizedStr ] @@ -22,4 +25,6 @@ class ChatCompletionChoice(BaseModel): message: AssistantMessage - finish_reason: ChatCompletionChoiceFinishReason + finish_reason: Annotated[ + ChatCompletionChoiceFinishReason, PlainValidator(validate_open_enum(False)) + ] diff --git a/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionrequest.py b/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionrequest.py index f2ba2345..fd3cb7bd 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionrequest.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionrequest.py @@ -18,8 +18,8 @@ ) from mistralai_azure.utils import get_discriminator from pydantic import Discriminator, Tag, model_serializer -from typing import List, Optional, TypedDict, Union -from typing_extensions import Annotated, NotRequired +from typing import List, Optional, Union +from typing_extensions import Annotated, NotRequired, TypedDict ChatCompletionRequestStopTypedDict = Union[str, List[str]] @@ -60,14 +60,12 @@ class ChatCompletionRequestTypedDict(TypedDict): r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content.""" model: NotRequired[Nullable[str]] r"""The ID of the model to use for this request.""" - temperature: NotRequired[float] - r"""What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both.""" + temperature: NotRequired[Nullable[float]] + r"""What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value.""" top_p: NotRequired[float] r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.""" max_tokens: NotRequired[Nullable[int]] r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" - min_tokens: NotRequired[Nullable[int]] - r"""The minimum number of tokens to generate in the completion.""" stream: NotRequired[bool] r"""Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON.""" stop: NotRequired[ChatCompletionRequestStopTypedDict] @@ -77,6 +75,12 @@ class ChatCompletionRequestTypedDict(TypedDict): response_format: NotRequired[ResponseFormatTypedDict] tools: NotRequired[Nullable[List[ToolTypedDict]]] tool_choice: NotRequired[ChatCompletionRequestToolChoiceTypedDict] + presence_penalty: NotRequired[float] + r"""presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative.""" + frequency_penalty: NotRequired[float] + r"""frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.""" + n: NotRequired[Nullable[int]] + r"""Number of completions to return for each request, input tokens are only billed once.""" safe_prompt: NotRequired[bool] r"""Whether to inject a safety prompt before all conversations.""" @@ -88,8 +92,8 @@ class ChatCompletionRequest(BaseModel): model: OptionalNullable[str] = "azureai" r"""The ID of the model to use for this request.""" - temperature: Optional[float] = 0.7 - r"""What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both.""" + temperature: OptionalNullable[float] = UNSET + r"""What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value.""" top_p: Optional[float] = 1 r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.""" @@ -97,9 +101,6 @@ class ChatCompletionRequest(BaseModel): max_tokens: OptionalNullable[int] = UNSET r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" - min_tokens: OptionalNullable[int] = UNSET - r"""The minimum number of tokens to generate in the completion.""" - stream: Optional[bool] = False r"""Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON.""" @@ -115,6 +116,15 @@ class ChatCompletionRequest(BaseModel): tool_choice: Optional[ChatCompletionRequestToolChoice] = None + presence_penalty: Optional[float] = 0 + r"""presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative.""" + + frequency_penalty: Optional[float] = 0 + r"""frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.""" + + n: OptionalNullable[int] = UNSET + r"""Number of completions to return for each request, input tokens are only billed once.""" + safe_prompt: Optional[bool] = False r"""Whether to inject a safety prompt before all conversations.""" @@ -125,16 +135,25 @@ def serialize_model(self, handler): "temperature", "top_p", "max_tokens", - "min_tokens", "stream", "stop", "random_seed", "response_format", "tools", "tool_choice", + "presence_penalty", + "frequency_penalty", + "n", "safe_prompt", ] - nullable_fields = ["model", "max_tokens", "min_tokens", "random_seed", "tools"] + nullable_fields = [ + "model", + "temperature", + "max_tokens", + "random_seed", + "tools", + "n", + ] null_default_fields = [] serialized = handler(self) diff --git a/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionresponse.py b/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionresponse.py index 0a02e46c..ecd85d5c 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionresponse.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionresponse.py @@ -4,8 +4,8 @@ from .chatcompletionchoice import ChatCompletionChoice, ChatCompletionChoiceTypedDict from .usageinfo import UsageInfo, UsageInfoTypedDict from mistralai_azure.types import BaseModel -from typing import List, Optional, TypedDict -from typing_extensions import NotRequired +from typing import List, Optional +from typing_extensions import NotRequired, TypedDict class ChatCompletionResponseTypedDict(TypedDict): diff --git a/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionstreamrequest.py b/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionstreamrequest.py index 28abddb9..8f71f892 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionstreamrequest.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionstreamrequest.py @@ -18,8 +18,8 @@ ) from mistralai_azure.utils import get_discriminator from pydantic import Discriminator, Tag, model_serializer -from typing import List, Optional, TypedDict, Union -from typing_extensions import Annotated, NotRequired +from typing import List, Optional, Union +from typing_extensions import Annotated, NotRequired, TypedDict StopTypedDict = Union[str, List[str]] @@ -62,14 +62,12 @@ class ChatCompletionStreamRequestTypedDict(TypedDict): r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content.""" model: NotRequired[Nullable[str]] r"""The ID of the model to use for this request.""" - temperature: NotRequired[float] - r"""What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both.""" + temperature: NotRequired[Nullable[float]] + r"""What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value.""" top_p: NotRequired[float] r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.""" max_tokens: NotRequired[Nullable[int]] r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" - min_tokens: NotRequired[Nullable[int]] - r"""The minimum number of tokens to generate in the completion.""" stream: NotRequired[bool] stop: NotRequired[StopTypedDict] r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" @@ -78,6 +76,12 @@ class ChatCompletionStreamRequestTypedDict(TypedDict): response_format: NotRequired[ResponseFormatTypedDict] tools: NotRequired[Nullable[List[ToolTypedDict]]] tool_choice: NotRequired[ChatCompletionStreamRequestToolChoiceTypedDict] + presence_penalty: NotRequired[float] + r"""presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative.""" + frequency_penalty: NotRequired[float] + r"""frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.""" + n: NotRequired[Nullable[int]] + r"""Number of completions to return for each request, input tokens are only billed once.""" safe_prompt: NotRequired[bool] r"""Whether to inject a safety prompt before all conversations.""" @@ -89,8 +93,8 @@ class ChatCompletionStreamRequest(BaseModel): model: OptionalNullable[str] = "azureai" r"""The ID of the model to use for this request.""" - temperature: Optional[float] = 0.7 - r"""What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both.""" + temperature: OptionalNullable[float] = UNSET + r"""What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value.""" top_p: Optional[float] = 1 r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.""" @@ -98,9 +102,6 @@ class ChatCompletionStreamRequest(BaseModel): max_tokens: OptionalNullable[int] = UNSET r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" - min_tokens: OptionalNullable[int] = UNSET - r"""The minimum number of tokens to generate in the completion.""" - stream: Optional[bool] = True stop: Optional[Stop] = None @@ -115,6 +116,15 @@ class ChatCompletionStreamRequest(BaseModel): tool_choice: Optional[ChatCompletionStreamRequestToolChoice] = None + presence_penalty: Optional[float] = 0 + r"""presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative.""" + + frequency_penalty: Optional[float] = 0 + r"""frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.""" + + n: OptionalNullable[int] = UNSET + r"""Number of completions to return for each request, input tokens are only billed once.""" + safe_prompt: Optional[bool] = False r"""Whether to inject a safety prompt before all conversations.""" @@ -125,16 +135,25 @@ def serialize_model(self, handler): "temperature", "top_p", "max_tokens", - "min_tokens", "stream", "stop", "random_seed", "response_format", "tools", "tool_choice", + "presence_penalty", + "frequency_penalty", + "n", "safe_prompt", ] - nullable_fields = ["model", "max_tokens", "min_tokens", "random_seed", "tools"] + nullable_fields = [ + "model", + "temperature", + "max_tokens", + "random_seed", + "tools", + "n", + ] null_default_fields = [] serialized = handler(self) diff --git a/packages/mistralai_azure/src/mistralai_azure/models/completionchunk.py b/packages/mistralai_azure/src/mistralai_azure/models/completionchunk.py index d2f334d4..d6cc2a86 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/completionchunk.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/completionchunk.py @@ -7,8 +7,8 @@ ) from .usageinfo import UsageInfo, UsageInfoTypedDict from mistralai_azure.types import BaseModel -from typing import List, Optional, TypedDict -from typing_extensions import NotRequired +from typing import List, Optional +from typing_extensions import NotRequired, TypedDict class CompletionChunkTypedDict(TypedDict): diff --git a/packages/mistralai_azure/src/mistralai_azure/models/completionevent.py b/packages/mistralai_azure/src/mistralai_azure/models/completionevent.py index b9b68db3..5a2039c2 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/completionevent.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/completionevent.py @@ -3,7 +3,7 @@ from __future__ import annotations from .completionchunk import CompletionChunk, CompletionChunkTypedDict from mistralai_azure.types import BaseModel -from typing import TypedDict +from typing_extensions import TypedDict class CompletionEventTypedDict(TypedDict): diff --git a/packages/mistralai_azure/src/mistralai_azure/models/completionresponsestreamchoice.py b/packages/mistralai_azure/src/mistralai_azure/models/completionresponsestreamchoice.py index c220a51d..37294d9b 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/completionresponsestreamchoice.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/completionresponsestreamchoice.py @@ -2,12 +2,15 @@ from __future__ import annotations from .deltamessage import DeltaMessage, DeltaMessageTypedDict -from mistralai_azure.types import BaseModel, Nullable, UNSET_SENTINEL +from mistralai_azure.types import BaseModel, Nullable, UNSET_SENTINEL, UnrecognizedStr +from mistralai_azure.utils import validate_open_enum from pydantic import model_serializer -from typing import Literal, TypedDict +from pydantic.functional_validators import PlainValidator +from typing import Literal, Union +from typing_extensions import Annotated, TypedDict -FinishReason = Literal["stop", "length", "error", "tool_calls"] +FinishReason = Union[Literal["stop", "length", "error", "tool_calls"], UnrecognizedStr] class CompletionResponseStreamChoiceTypedDict(TypedDict): @@ -21,7 +24,9 @@ class CompletionResponseStreamChoice(BaseModel): delta: DeltaMessage - finish_reason: Nullable[FinishReason] + finish_reason: Annotated[ + Nullable[FinishReason], PlainValidator(validate_open_enum(False)) + ] @model_serializer(mode="wrap") def serialize_model(self, handler): diff --git a/packages/mistralai_azure/src/mistralai_azure/models/deltamessage.py b/packages/mistralai_azure/src/mistralai_azure/models/deltamessage.py index 5e8011d0..bb394494 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/deltamessage.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/deltamessage.py @@ -1,6 +1,7 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from __future__ import annotations +from .contentchunk import ContentChunk, ContentChunkTypedDict from .toolcall import ToolCall, ToolCallTypedDict from mistralai_azure.types import ( BaseModel, @@ -10,27 +11,33 @@ UNSET_SENTINEL, ) from pydantic import model_serializer -from typing import List, Optional, TypedDict -from typing_extensions import NotRequired +from typing import List, Union +from typing_extensions import NotRequired, TypedDict + + +ContentTypedDict = Union[str, List[ContentChunkTypedDict]] + + +Content = Union[str, List[ContentChunk]] class DeltaMessageTypedDict(TypedDict): - role: NotRequired[str] - content: NotRequired[Nullable[str]] + role: NotRequired[Nullable[str]] + content: NotRequired[Nullable[ContentTypedDict]] tool_calls: NotRequired[Nullable[List[ToolCallTypedDict]]] class DeltaMessage(BaseModel): - role: Optional[str] = None + role: OptionalNullable[str] = UNSET - content: OptionalNullable[str] = UNSET + content: OptionalNullable[Content] = UNSET tool_calls: OptionalNullable[List[ToolCall]] = UNSET @model_serializer(mode="wrap") def serialize_model(self, handler): optional_fields = ["role", "content", "tool_calls"] - nullable_fields = ["content", "tool_calls"] + nullable_fields = ["role", "content", "tool_calls"] null_default_fields = [] serialized = handler(self) diff --git a/packages/mistralai_azure/src/mistralai_azure/models/function.py b/packages/mistralai_azure/src/mistralai_azure/models/function.py index 081ce1d6..488cdcea 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/function.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/function.py @@ -2,8 +2,8 @@ from __future__ import annotations from mistralai_azure.types import BaseModel -from typing import Any, Dict, Optional, TypedDict -from typing_extensions import NotRequired +from typing import Any, Dict, Optional +from typing_extensions import NotRequired, TypedDict class FunctionTypedDict(TypedDict): diff --git a/packages/mistralai_azure/src/mistralai_azure/models/functioncall.py b/packages/mistralai_azure/src/mistralai_azure/models/functioncall.py index 0afa5901..d2f136cd 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/functioncall.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/functioncall.py @@ -2,7 +2,8 @@ from __future__ import annotations from mistralai_azure.types import BaseModel -from typing import Any, Dict, TypedDict, Union +from typing import Any, Dict, Union +from typing_extensions import TypedDict ArgumentsTypedDict = Union[Dict[str, Any], str] diff --git a/packages/mistralai_azure/src/mistralai_azure/models/functionname.py b/packages/mistralai_azure/src/mistralai_azure/models/functionname.py index c825a5ab..b55c82af 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/functionname.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/functionname.py @@ -2,7 +2,7 @@ from __future__ import annotations from mistralai_azure.types import BaseModel -from typing import TypedDict +from typing_extensions import TypedDict class FunctionNameTypedDict(TypedDict): diff --git a/packages/mistralai_azure/src/mistralai_azure/models/httpvalidationerror.py b/packages/mistralai_azure/src/mistralai_azure/models/httpvalidationerror.py index 28f9b4ed..1d22d97a 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/httpvalidationerror.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/httpvalidationerror.py @@ -12,8 +12,6 @@ class HTTPValidationErrorData(BaseModel): class HTTPValidationError(Exception): - r"""Validation Error""" - data: HTTPValidationErrorData def __init__(self, data: HTTPValidationErrorData): diff --git a/packages/mistralai_azure/src/mistralai_azure/models/responseformat.py b/packages/mistralai_azure/src/mistralai_azure/models/responseformat.py index c692033c..e4a9d7dd 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/responseformat.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/responseformat.py @@ -3,8 +3,8 @@ from __future__ import annotations from .responseformats import ResponseFormats from mistralai_azure.types import BaseModel -from typing import Optional, TypedDict -from typing_extensions import NotRequired +from typing import Optional +from typing_extensions import NotRequired, TypedDict class ResponseFormatTypedDict(TypedDict): diff --git a/packages/mistralai_azure/src/mistralai_azure/models/security.py b/packages/mistralai_azure/src/mistralai_azure/models/security.py index 1245881b..c1ae8313 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/security.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/security.py @@ -3,8 +3,7 @@ from __future__ import annotations from mistralai_azure.types import BaseModel from mistralai_azure.utils import FieldMetadata, SecurityMetadata -from typing import TypedDict -from typing_extensions import Annotated +from typing_extensions import Annotated, TypedDict class SecurityTypedDict(TypedDict): diff --git a/packages/mistralai_azure/src/mistralai_azure/models/systemmessage.py b/packages/mistralai_azure/src/mistralai_azure/models/systemmessage.py index cf1775f7..3c00a82b 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/systemmessage.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/systemmessage.py @@ -3,25 +3,25 @@ from __future__ import annotations from .textchunk import TextChunk, TextChunkTypedDict from mistralai_azure.types import BaseModel -from typing import List, Literal, Optional, TypedDict, Union -from typing_extensions import NotRequired +from typing import List, Literal, Optional, Union +from typing_extensions import NotRequired, TypedDict -ContentTypedDict = Union[str, List[TextChunkTypedDict]] +SystemMessageContentTypedDict = Union[str, List[TextChunkTypedDict]] -Content = Union[str, List[TextChunk]] +SystemMessageContent = Union[str, List[TextChunk]] Role = Literal["system"] class SystemMessageTypedDict(TypedDict): - content: ContentTypedDict + content: SystemMessageContentTypedDict role: NotRequired[Role] class SystemMessage(BaseModel): - content: Content + content: SystemMessageContent role: Optional[Role] = "system" diff --git a/packages/mistralai_azure/src/mistralai_azure/models/textchunk.py b/packages/mistralai_azure/src/mistralai_azure/models/textchunk.py index 75cc9490..583ce18d 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/textchunk.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/textchunk.py @@ -2,9 +2,11 @@ from __future__ import annotations from mistralai_azure.types import BaseModel +from mistralai_azure.utils import validate_const import pydantic -from typing import Final, Literal, Optional, TypedDict -from typing_extensions import Annotated +from pydantic.functional_validators import AfterValidator +from typing import Literal, Optional +from typing_extensions import Annotated, TypedDict Type = Literal["text"] @@ -12,11 +14,13 @@ class TextChunkTypedDict(TypedDict): text: str + type: Type class TextChunk(BaseModel): text: str - # fmt: off - TYPE: Annotated[Final[Optional[Type]], pydantic.Field(alias="type")] = "text" # type: ignore - # fmt: on + TYPE: Annotated[ + Annotated[Optional[Type], AfterValidator(validate_const("text"))], + pydantic.Field(alias="type"), + ] = "text" diff --git a/packages/mistralai_azure/src/mistralai_azure/models/tool.py b/packages/mistralai_azure/src/mistralai_azure/models/tool.py index 3a02ed73..ffd9b062 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/tool.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/tool.py @@ -6,8 +6,8 @@ from mistralai_azure.types import BaseModel from mistralai_azure.utils import validate_open_enum from pydantic.functional_validators import PlainValidator -from typing import Optional, TypedDict -from typing_extensions import Annotated, NotRequired +from typing import Optional +from typing_extensions import Annotated, NotRequired, TypedDict class ToolTypedDict(TypedDict): diff --git a/packages/mistralai_azure/src/mistralai_azure/models/toolcall.py b/packages/mistralai_azure/src/mistralai_azure/models/toolcall.py index 2a768a2d..69b47310 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/toolcall.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/toolcall.py @@ -6,8 +6,8 @@ from mistralai_azure.types import BaseModel from mistralai_azure.utils import validate_open_enum from pydantic.functional_validators import PlainValidator -from typing import Optional, TypedDict -from typing_extensions import Annotated, NotRequired +from typing import Optional +from typing_extensions import Annotated, NotRequired, TypedDict class ToolCallTypedDict(TypedDict): diff --git a/packages/mistralai_azure/src/mistralai_azure/models/toolchoice.py b/packages/mistralai_azure/src/mistralai_azure/models/toolchoice.py index 2d3d87f0..cc3c2c1f 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/toolchoice.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/toolchoice.py @@ -6,8 +6,8 @@ from mistralai_azure.types import BaseModel from mistralai_azure.utils import validate_open_enum from pydantic.functional_validators import PlainValidator -from typing import Optional, TypedDict -from typing_extensions import Annotated, NotRequired +from typing import Optional +from typing_extensions import Annotated, NotRequired, TypedDict class ToolChoiceTypedDict(TypedDict): diff --git a/packages/mistralai_azure/src/mistralai_azure/models/toolmessage.py b/packages/mistralai_azure/src/mistralai_azure/models/toolmessage.py index 14ecf73b..4362bc9f 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/toolmessage.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/toolmessage.py @@ -9,8 +9,8 @@ UNSET_SENTINEL, ) from pydantic import model_serializer -from typing import Literal, Optional, TypedDict -from typing_extensions import NotRequired +from typing import Literal, Optional +from typing_extensions import NotRequired, TypedDict ToolMessageRole = Literal["tool"] diff --git a/packages/mistralai_azure/src/mistralai_azure/models/usageinfo.py b/packages/mistralai_azure/src/mistralai_azure/models/usageinfo.py index 2a926481..b1d094fc 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/usageinfo.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/usageinfo.py @@ -2,7 +2,7 @@ from __future__ import annotations from mistralai_azure.types import BaseModel -from typing import TypedDict +from typing_extensions import TypedDict class UsageInfoTypedDict(TypedDict): diff --git a/packages/mistralai_azure/src/mistralai_azure/models/usermessage.py b/packages/mistralai_azure/src/mistralai_azure/models/usermessage.py index e9488767..eddfb856 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/usermessage.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/usermessage.py @@ -2,9 +2,10 @@ from __future__ import annotations from .contentchunk import ContentChunk, ContentChunkTypedDict -from mistralai_azure.types import BaseModel -from typing import List, Literal, Optional, TypedDict, Union -from typing_extensions import NotRequired +from mistralai_azure.types import BaseModel, Nullable, UNSET_SENTINEL +from pydantic import model_serializer +from typing import List, Literal, Optional, Union +from typing_extensions import NotRequired, TypedDict UserMessageContentTypedDict = Union[str, List[ContentChunkTypedDict]] @@ -17,11 +18,41 @@ class UserMessageTypedDict(TypedDict): - content: UserMessageContentTypedDict + content: Nullable[UserMessageContentTypedDict] role: NotRequired[UserMessageRole] class UserMessage(BaseModel): - content: UserMessageContent + content: Nullable[UserMessageContent] role: Optional[UserMessageRole] = "user" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["role"] + nullable_fields = ["content"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in self.model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/packages/mistralai_azure/src/mistralai_azure/models/validationerror.py b/packages/mistralai_azure/src/mistralai_azure/models/validationerror.py index 6ab66a1b..aa8eaff9 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/validationerror.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/validationerror.py @@ -2,7 +2,8 @@ from __future__ import annotations from mistralai_azure.types import BaseModel -from typing import List, TypedDict, Union +from typing import List, Union +from typing_extensions import TypedDict LocTypedDict = Union[str, int] diff --git a/packages/mistralai_azure/src/mistralai_azure/sdkconfiguration.py b/packages/mistralai_azure/src/mistralai_azure/sdkconfiguration.py index eefd8df4..1a319444 100644 --- a/packages/mistralai_azure/src/mistralai_azure/sdkconfiguration.py +++ b/packages/mistralai_azure/src/mistralai_azure/sdkconfiguration.py @@ -10,10 +10,10 @@ from typing import Callable, Dict, Optional, Tuple, Union -SERVER_PROD = "prod" -r"""Production server""" +SERVER_EU = "eu" +r"""EU Production server""" SERVERS = { - SERVER_PROD: "https://api.mistral.ai", + SERVER_EU: "https://api.mistral.ai", } """Contains the list of servers available to the SDK""" @@ -28,9 +28,9 @@ class SDKConfiguration: server: Optional[str] = "" language: str = "python" openapi_doc_version: str = "0.0.2" - sdk_version: str = "1.2.2" - gen_version: str = "2.415.6" - user_agent: str = "speakeasy-sdk/python 1.2.2 2.415.6 0.0.2 mistralai_azure" + sdk_version: str = "1.2.0" + gen_version: str = "2.452.0" + user_agent: str = "speakeasy-sdk/python 1.2.0 2.452.0 0.0.2 mistralai_azure" retry_config: OptionalNullable[RetryConfig] = Field(default_factory=lambda: UNSET) timeout_ms: Optional[int] = None @@ -41,7 +41,7 @@ def get_server_details(self) -> Tuple[str, Dict[str, str]]: if self.server_url is not None and self.server_url: return remove_suffix(self.server_url, "/"), {} if not self.server: - self.server = SERVER_PROD + self.server = SERVER_EU if self.server not in SERVERS: raise ValueError(f'Invalid server "{self.server}"') diff --git a/packages/mistralai_azure/src/mistralai_azure/utils/__init__.py b/packages/mistralai_azure/src/mistralai_azure/utils/__init__.py index 6c26aeb9..26d51ae8 100644 --- a/packages/mistralai_azure/src/mistralai_azure/utils/__init__.py +++ b/packages/mistralai_azure/src/mistralai_azure/utils/__init__.py @@ -27,6 +27,10 @@ serialize_float, serialize_int, stream_to_text, + stream_to_text_async, + stream_to_bytes, + stream_to_bytes_async, + validate_const, validate_decimal, validate_float, validate_int, @@ -79,10 +83,14 @@ "serialize_request_body", "SerializedRequestBody", "stream_to_text", + "stream_to_text_async", + "stream_to_bytes", + "stream_to_bytes_async", "template_url", "unmarshal", "unmarshal_json", "validate_decimal", + "validate_const", "validate_float", "validate_int", "validate_open_enum", diff --git a/packages/mistralai_azure/src/mistralai_azure/utils/annotations.py b/packages/mistralai_azure/src/mistralai_azure/utils/annotations.py index 0d17472b..5b3bbb02 100644 --- a/packages/mistralai_azure/src/mistralai_azure/utils/annotations.py +++ b/packages/mistralai_azure/src/mistralai_azure/utils/annotations.py @@ -1,5 +1,6 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +from enum import Enum from typing import Any def get_discriminator(model: Any, fieldname: str, key: str) -> str: @@ -10,10 +11,20 @@ def get_discriminator(model: Any, fieldname: str, key: str) -> str: raise ValueError(f'Could not find discriminator key {key} in {model}') from e if hasattr(model, fieldname): - return f'{getattr(model, fieldname)}' + attr = getattr(model, fieldname) + + if isinstance(attr, Enum): + return f'{attr.value}' + + return f'{attr}' fieldname = fieldname.upper() if hasattr(model, fieldname): - return f'{getattr(model, fieldname)}' + attr = getattr(model, fieldname) + + if isinstance(attr, Enum): + return f'{attr.value}' + + return f'{attr}' raise ValueError(f'Could not find discriminator field {fieldname} in {model}') diff --git a/packages/mistralai_azure/src/mistralai_azure/utils/serializers.py b/packages/mistralai_azure/src/mistralai_azure/utils/serializers.py index 85d57f43..c5eb3659 100644 --- a/packages/mistralai_azure/src/mistralai_azure/utils/serializers.py +++ b/packages/mistralai_azure/src/mistralai_azure/utils/serializers.py @@ -116,6 +116,19 @@ def validate(e): return validate +def validate_const(v): + def validate(c): + if is_optional_type(type(c)) and c is None: + return None + + if v != c: + raise ValueError(f"Expected {v}") + + return c + + return validate + + def unmarshal_json(raw, typ: Any) -> Any: return unmarshal(from_json(raw), typ) @@ -172,6 +185,18 @@ def stream_to_text(stream: httpx.Response) -> str: return "".join(stream.iter_text()) +async def stream_to_text_async(stream: httpx.Response) -> str: + return "".join([chunk async for chunk in stream.aiter_text()]) + + +def stream_to_bytes(stream: httpx.Response) -> bytes: + return stream.content + + +async def stream_to_bytes_async(stream: httpx.Response) -> bytes: + return await stream.aread() + + def get_pydantic_model(data: Any, typ: Any) -> Any: if not _contains_pydantic_model(data): return unmarshal(data, typ) diff --git a/packages/mistralai_gcp/.speakeasy/gen.lock b/packages/mistralai_gcp/.speakeasy/gen.lock index e5d61fb2..6add3601 100644 --- a/packages/mistralai_gcp/.speakeasy/gen.lock +++ b/packages/mistralai_gcp/.speakeasy/gen.lock @@ -1,42 +1,46 @@ lockVersion: 2.0.0 id: ec60f2d8-7869-45c1-918e-773d41a8cf74 management: - docChecksum: 823d9b94fcb9c6588d0af16b7301f4ac + docChecksum: 46baf8da7636ea1bf44557571d011045 docVersion: 0.0.2 - speakeasyVersion: 1.396.7 - generationVersion: 2.415.6 - releaseVersion: 1.2.2 - configChecksum: fa993b7253c0c8c0d114d51422ffb486 + speakeasyVersion: 1.434.4 + generationVersion: 2.452.0 + releaseVersion: 1.2.0 + configChecksum: 6036ab871ca1cf21d35bfc75dc25089b published: true features: python: additionalDependencies: 1.0.0 - constsAndDefaults: 1.0.2 - core: 5.5.3 + constsAndDefaults: 1.0.4 + core: 5.6.4 defaultEnabledRetries: 0.2.0 enumUnions: 0.1.0 - envVarSecurityUsage: 0.3.1 + envVarSecurityUsage: 0.3.2 examples: 3.0.0 flatRequests: 1.0.1 globalSecurity: 3.0.2 globalSecurityCallbacks: 1.0.0 globalSecurityFlattening: 1.0.0 globalServerURLs: 3.0.0 + methodArguments: 1.0.2 nameOverrides: 3.0.0 nullables: 1.0.0 openEnums: 1.0.0 - responseFormat: 1.0.0 + responseFormat: 1.0.1 retries: 3.0.2 sdkHooks: 1.0.0 - serverEvents: 1.0.2 + serverEvents: 1.0.4 serverEventsSentinels: 0.1.0 serverIDs: 3.0.0 - unions: 3.0.2 + tests: 1.6.0 + unions: 3.0.3 generatedFiles: - .gitattributes + - .python-version - .vscode/settings.json - docs/models/arguments.md - docs/models/assistantmessage.md + - docs/models/assistantmessagecontent.md - docs/models/assistantmessagerole.md - docs/models/chatcompletionchoice.md - docs/models/chatcompletionchoicefinishreason.md @@ -71,6 +75,7 @@ generatedFiles: - docs/models/security.md - docs/models/stop.md - docs/models/systemmessage.md + - docs/models/systemmessagecontent.md - docs/models/textchunk.md - docs/models/tool.md - docs/models/toolcall.md @@ -90,13 +95,13 @@ generatedFiles: - py.typed - pylintrc - pyproject.toml - - scripts/compile.sh - scripts/prepare-readme.py - scripts/publish.sh - src/mistralai_gcp/__init__.py - src/mistralai_gcp/_hooks/__init__.py - src/mistralai_gcp/_hooks/sdkhooks.py - src/mistralai_gcp/_hooks/types.py + - src/mistralai_gcp/_version.py - src/mistralai_gcp/basesdk.py - src/mistralai_gcp/chat.py - src/mistralai_gcp/fim.py @@ -157,11 +162,11 @@ examples: stream_chat: speakeasy-default-stream-chat: requestBody: - application/json: {"model": "mistral-small-latest", "messages": [{"content": []}]} + application/json: {"model": "mistral-small-latest", "messages": [{"content": "Who is the best French painter? Answer in one short sentence.", "role": "user"}]} chat_completion_v1_chat_completions_post: speakeasy-default-chat-completion-v1-chat-completions-post: requestBody: - application/json: {"model": "mistral-small-latest", "messages": [{"content": ""}]} + application/json: {"model": "mistral-small-latest", "messages": [{"content": "Who is the best French painter? Answer in one short sentence.", "role": "user"}]} responses: "200": application/json: {"id": "cmpl-e5cc70bb28c444948073e77776eb30ef", "object": "chat.completion", "model": "mistral-small-latest", "usage": {"prompt_tokens": 16, "completion_tokens": 34, "total_tokens": 50}, "created": 1702256327, "choices": []} diff --git a/packages/mistralai_gcp/.speakeasy/gen.yaml b/packages/mistralai_gcp/.speakeasy/gen.yaml index 43da5ef7..97e9faf1 100644 --- a/packages/mistralai_gcp/.speakeasy/gen.yaml +++ b/packages/mistralai_gcp/.speakeasy/gen.yaml @@ -11,8 +11,9 @@ generation: requestResponseComponentNamesFeb2024: true auth: oAuth2ClientCredentialsEnabled: true + oAuth2PasswordEnabled: false python: - version: 1.2.2 + version: 1.2.0 additionalDependencies: dev: pytest: ^8.2.2 @@ -25,8 +26,11 @@ python: clientServerStatusCodesAsErrors: true description: Python Client SDK for the Mistral AI API in GCP. enumFormat: union + fixFlags: + responseRequiredSep2024: false flattenGlobalSecurity: true flattenRequests: true + flatteningOrder: parameters-first imports: option: openapi paths: diff --git a/packages/mistralai_gcp/docs/models/assistantmessage.md b/packages/mistralai_gcp/docs/models/assistantmessage.md index 0c36cde9..53f1cc76 100644 --- a/packages/mistralai_gcp/docs/models/assistantmessage.md +++ b/packages/mistralai_gcp/docs/models/assistantmessage.md @@ -3,9 +3,9 @@ ## Fields -| Field | Type | Required | Description | -| ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | -| `content` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `tool_calls` | List[[models.ToolCall](../models/toolcall.md)] | :heavy_minus_sign: | N/A | -| `prefix` | *Optional[bool]* | :heavy_minus_sign: | Set this to `true` when adding an assistant message as prefix to condition the model response. The role of the prefix message is to force the model to start its answer by the content of the message. | -| `role` | [Optional[models.AssistantMessageRole]](../models/assistantmessagerole.md) | :heavy_minus_sign: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | +| `content` | [OptionalNullable[models.AssistantMessageContent]](../models/assistantmessagecontent.md) | :heavy_minus_sign: | N/A | +| `tool_calls` | List[[models.ToolCall](../models/toolcall.md)] | :heavy_minus_sign: | N/A | +| `prefix` | *Optional[bool]* | :heavy_minus_sign: | N/A | +| `role` | [Optional[models.AssistantMessageRole]](../models/assistantmessagerole.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/assistantmessagecontent.md b/packages/mistralai_gcp/docs/models/assistantmessagecontent.md new file mode 100644 index 00000000..047b7cf9 --- /dev/null +++ b/packages/mistralai_gcp/docs/models/assistantmessagecontent.md @@ -0,0 +1,17 @@ +# AssistantMessageContent + + +## Supported Types + +### `str` + +```python +value: str = /* values here */ +``` + +### `List[models.ContentChunk]` + +```python +value: List[models.ContentChunk] = /* values here */ +``` + diff --git a/packages/mistralai_gcp/docs/models/chatcompletionrequest.md b/packages/mistralai_gcp/docs/models/chatcompletionrequest.md index fb3bfb42..abc83281 100644 --- a/packages/mistralai_gcp/docs/models/chatcompletionrequest.md +++ b/packages/mistralai_gcp/docs/models/chatcompletionrequest.md @@ -3,17 +3,19 @@ ## Fields -| Field | Type | Required | Description | Example | -| ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `model` | *Nullable[str]* | :heavy_check_mark: | ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions. | mistral-small-latest | -| `messages` | List[[models.ChatCompletionRequestMessages](../models/chatcompletionrequestmessages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | -| `temperature` | *Optional[float]* | :heavy_minus_sign: | What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. | | -| `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | -| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | -| `min_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The minimum number of tokens to generate in the completion. | | -| `stream` | *Optional[bool]* | :heavy_minus_sign: | Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. | | -| `stop` | [Optional[models.ChatCompletionRequestStop]](../models/chatcompletionrequeststop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | -| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | -| `response_format` | [Optional[models.ResponseFormat]](../models/responseformat.md) | :heavy_minus_sign: | N/A | | -| `tools` | List[[models.Tool](../models/tool.md)] | :heavy_minus_sign: | N/A | | -| `tool_choice` | [Optional[models.ChatCompletionRequestToolChoice]](../models/chatcompletionrequesttoolchoice.md) | :heavy_minus_sign: | N/A | | \ No newline at end of file +| Field | Type | Required | Description | Example | +| ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `model` | *Nullable[str]* | :heavy_check_mark: | ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions. | mistral-small-latest | +| `messages` | List[[models.ChatCompletionRequestMessages](../models/chatcompletionrequestmessages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | +| `temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. | | +| `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | +| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | +| `stream` | *Optional[bool]* | :heavy_minus_sign: | Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. | | +| `stop` | [Optional[models.ChatCompletionRequestStop]](../models/chatcompletionrequeststop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | +| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | +| `response_format` | [Optional[models.ResponseFormat]](../models/responseformat.md) | :heavy_minus_sign: | N/A | | +| `tools` | List[[models.Tool](../models/tool.md)] | :heavy_minus_sign: | N/A | | +| `tool_choice` | [Optional[models.ChatCompletionRequestToolChoice]](../models/chatcompletionrequesttoolchoice.md) | :heavy_minus_sign: | N/A | | +| `presence_penalty` | *Optional[float]* | :heavy_minus_sign: | presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. | | +| `frequency_penalty` | *Optional[float]* | :heavy_minus_sign: | frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. | | +| `n` | *OptionalNullable[int]* | :heavy_minus_sign: | Number of completions to return for each request, input tokens are only billed once. | | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/chatcompletionstreamrequest.md b/packages/mistralai_gcp/docs/models/chatcompletionstreamrequest.md index eb0d11ed..863c0229 100644 --- a/packages/mistralai_gcp/docs/models/chatcompletionstreamrequest.md +++ b/packages/mistralai_gcp/docs/models/chatcompletionstreamrequest.md @@ -3,17 +3,19 @@ ## Fields -| Field | Type | Required | Description | Example | -| ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `model` | *Nullable[str]* | :heavy_check_mark: | ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions. | mistral-small-latest | -| `messages` | List[[models.Messages](../models/messages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | -| `temperature` | *Optional[float]* | :heavy_minus_sign: | What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. | | -| `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | -| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | -| `min_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The minimum number of tokens to generate in the completion. | | -| `stream` | *Optional[bool]* | :heavy_minus_sign: | N/A | | -| `stop` | [Optional[models.Stop]](../models/stop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | -| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | -| `response_format` | [Optional[models.ResponseFormat]](../models/responseformat.md) | :heavy_minus_sign: | N/A | | -| `tools` | List[[models.Tool](../models/tool.md)] | :heavy_minus_sign: | N/A | | -| `tool_choice` | [Optional[models.ChatCompletionStreamRequestToolChoice]](../models/chatcompletionstreamrequesttoolchoice.md) | :heavy_minus_sign: | N/A | | \ No newline at end of file +| Field | Type | Required | Description | Example | +| ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `model` | *Nullable[str]* | :heavy_check_mark: | ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions. | mistral-small-latest | +| `messages` | List[[models.Messages](../models/messages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | +| `temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. | | +| `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | +| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | +| `stream` | *Optional[bool]* | :heavy_minus_sign: | N/A | | +| `stop` | [Optional[models.Stop]](../models/stop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | +| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | +| `response_format` | [Optional[models.ResponseFormat]](../models/responseformat.md) | :heavy_minus_sign: | N/A | | +| `tools` | List[[models.Tool](../models/tool.md)] | :heavy_minus_sign: | N/A | | +| `tool_choice` | [Optional[models.ChatCompletionStreamRequestToolChoice]](../models/chatcompletionstreamrequesttoolchoice.md) | :heavy_minus_sign: | N/A | | +| `presence_penalty` | *Optional[float]* | :heavy_minus_sign: | presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. | | +| `frequency_penalty` | *Optional[float]* | :heavy_minus_sign: | frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. | | +| `n` | *OptionalNullable[int]* | :heavy_minus_sign: | Number of completions to return for each request, input tokens are only billed once. | | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/content.md b/packages/mistralai_gcp/docs/models/content.md index 4cd3cfd5..a833dc2c 100644 --- a/packages/mistralai_gcp/docs/models/content.md +++ b/packages/mistralai_gcp/docs/models/content.md @@ -9,9 +9,9 @@ value: str = /* values here */ ``` -### `List[models.TextChunk]` +### `List[models.ContentChunk]` ```python -value: List[models.TextChunk] = /* values here */ +value: List[models.ContentChunk] = /* values here */ ``` diff --git a/packages/mistralai_gcp/docs/models/deltamessage.md b/packages/mistralai_gcp/docs/models/deltamessage.md index d32f8e10..61deabbf 100644 --- a/packages/mistralai_gcp/docs/models/deltamessage.md +++ b/packages/mistralai_gcp/docs/models/deltamessage.md @@ -3,8 +3,8 @@ ## Fields -| Field | Type | Required | Description | -| ---------------------------------------------- | ---------------------------------------------- | ---------------------------------------------- | ---------------------------------------------- | -| `role` | *Optional[str]* | :heavy_minus_sign: | N/A | -| `content` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `tool_calls` | List[[models.ToolCall](../models/toolcall.md)] | :heavy_minus_sign: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| -------------------------------------------------------- | -------------------------------------------------------- | -------------------------------------------------------- | -------------------------------------------------------- | +| `role` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `content` | [OptionalNullable[models.Content]](../models/content.md) | :heavy_minus_sign: | N/A | +| `tool_calls` | List[[models.ToolCall](../models/toolcall.md)] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/fimcompletionrequest.md b/packages/mistralai_gcp/docs/models/fimcompletionrequest.md index b4b024ed..236d2d21 100644 --- a/packages/mistralai_gcp/docs/models/fimcompletionrequest.md +++ b/packages/mistralai_gcp/docs/models/fimcompletionrequest.md @@ -3,15 +3,15 @@ ## Fields -| Field | Type | Required | Description | Example | -| ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `model` | *Nullable[str]* | :heavy_check_mark: | ID of the model to use. Only compatible for now with:
- `codestral-2405`
- `codestral-latest` | codestral-2405 | -| `prompt` | *str* | :heavy_check_mark: | The text/code to complete. | def | -| `temperature` | *Optional[float]* | :heavy_minus_sign: | What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. | | -| `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | -| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | -| `min_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The minimum number of tokens to generate in the completion. | | -| `stream` | *Optional[bool]* | :heavy_minus_sign: | Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. | | -| `stop` | [Optional[models.FIMCompletionRequestStop]](../models/fimcompletionrequeststop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | -| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | -| `suffix` | *OptionalNullable[str]* | :heavy_minus_sign: | Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`. | return a+b | \ No newline at end of file +| Field | Type | Required | Description | Example | +| ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `model` | *Nullable[str]* | :heavy_check_mark: | ID of the model to use. Only compatible for now with:
- `codestral-2405`
- `codestral-latest` | codestral-2405 | +| `prompt` | *str* | :heavy_check_mark: | The text/code to complete. | def | +| `temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. | | +| `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | +| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | +| `stream` | *Optional[bool]* | :heavy_minus_sign: | Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. | | +| `stop` | [Optional[models.FIMCompletionRequestStop]](../models/fimcompletionrequeststop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | +| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | +| `suffix` | *OptionalNullable[str]* | :heavy_minus_sign: | Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`. | return a+b | +| `min_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The minimum number of tokens to generate in the completion. | | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/fimcompletionstreamrequest.md b/packages/mistralai_gcp/docs/models/fimcompletionstreamrequest.md index acffb536..fa635932 100644 --- a/packages/mistralai_gcp/docs/models/fimcompletionstreamrequest.md +++ b/packages/mistralai_gcp/docs/models/fimcompletionstreamrequest.md @@ -3,15 +3,15 @@ ## Fields -| Field | Type | Required | Description | Example | -| ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `model` | *Nullable[str]* | :heavy_check_mark: | ID of the model to use. Only compatible for now with:
- `codestral-2405`
- `codestral-latest` | codestral-2405 | -| `prompt` | *str* | :heavy_check_mark: | The text/code to complete. | def | -| `temperature` | *Optional[float]* | :heavy_minus_sign: | What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. | | -| `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | -| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | -| `min_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The minimum number of tokens to generate in the completion. | | -| `stream` | *Optional[bool]* | :heavy_minus_sign: | N/A | | -| `stop` | [Optional[models.FIMCompletionStreamRequestStop]](../models/fimcompletionstreamrequeststop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | -| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | -| `suffix` | *OptionalNullable[str]* | :heavy_minus_sign: | Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`. | return a+b | \ No newline at end of file +| Field | Type | Required | Description | Example | +| ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `model` | *Nullable[str]* | :heavy_check_mark: | ID of the model to use. Only compatible for now with:
- `codestral-2405`
- `codestral-latest` | codestral-2405 | +| `prompt` | *str* | :heavy_check_mark: | The text/code to complete. | def | +| `temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. | | +| `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | +| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | +| `stream` | *Optional[bool]* | :heavy_minus_sign: | N/A | | +| `stop` | [Optional[models.FIMCompletionStreamRequestStop]](../models/fimcompletionstreamrequeststop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | +| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | +| `suffix` | *OptionalNullable[str]* | :heavy_minus_sign: | Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`. | return a+b | +| `min_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The minimum number of tokens to generate in the completion. | | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/httpvalidationerror.md b/packages/mistralai_gcp/docs/models/httpvalidationerror.md index 63892430..712a148c 100644 --- a/packages/mistralai_gcp/docs/models/httpvalidationerror.md +++ b/packages/mistralai_gcp/docs/models/httpvalidationerror.md @@ -1,7 +1,5 @@ # HTTPValidationError -Validation Error - ## Fields diff --git a/packages/mistralai_gcp/docs/models/systemmessage.md b/packages/mistralai_gcp/docs/models/systemmessage.md index 7f827984..0dba71c0 100644 --- a/packages/mistralai_gcp/docs/models/systemmessage.md +++ b/packages/mistralai_gcp/docs/models/systemmessage.md @@ -3,7 +3,7 @@ ## Fields -| Field | Type | Required | Description | -| ------------------------------------------ | ------------------------------------------ | ------------------------------------------ | ------------------------------------------ | -| `content` | [models.Content](../models/content.md) | :heavy_check_mark: | N/A | -| `role` | [Optional[models.Role]](../models/role.md) | :heavy_minus_sign: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| ---------------------------------------------------------------- | ---------------------------------------------------------------- | ---------------------------------------------------------------- | ---------------------------------------------------------------- | +| `content` | [models.SystemMessageContent](../models/systemmessagecontent.md) | :heavy_check_mark: | N/A | +| `role` | [Optional[models.Role]](../models/role.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/systemmessagecontent.md b/packages/mistralai_gcp/docs/models/systemmessagecontent.md new file mode 100644 index 00000000..e0d27d9f --- /dev/null +++ b/packages/mistralai_gcp/docs/models/systemmessagecontent.md @@ -0,0 +1,17 @@ +# SystemMessageContent + + +## Supported Types + +### `str` + +```python +value: str = /* values here */ +``` + +### `List[models.TextChunk]` + +```python +value: List[models.TextChunk] = /* values here */ +``` + diff --git a/packages/mistralai_gcp/docs/models/usermessage.md b/packages/mistralai_gcp/docs/models/usermessage.md index 3d96f1cd..63b01310 100644 --- a/packages/mistralai_gcp/docs/models/usermessage.md +++ b/packages/mistralai_gcp/docs/models/usermessage.md @@ -3,7 +3,7 @@ ## Fields -| Field | Type | Required | Description | -| ---------------------------------------------------------------- | ---------------------------------------------------------------- | ---------------------------------------------------------------- | ---------------------------------------------------------------- | -| `content` | [models.UserMessageContent](../models/usermessagecontent.md) | :heavy_check_mark: | N/A | -| `role` | [Optional[models.UserMessageRole]](../models/usermessagerole.md) | :heavy_minus_sign: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | +| `content` | [Nullable[models.UserMessageContent]](../models/usermessagecontent.md) | :heavy_check_mark: | N/A | +| `role` | [Optional[models.UserMessageRole]](../models/usermessagerole.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai_gcp/pylintrc b/packages/mistralai_gcp/pylintrc index 50800386..393d0f70 100644 --- a/packages/mistralai_gcp/pylintrc +++ b/packages/mistralai_gcp/pylintrc @@ -188,6 +188,7 @@ good-names=i, Run, _, e, + n, id # Good variable names regexes, separated by a comma. If names match any regex, diff --git a/packages/mistralai_gcp/pyproject.toml b/packages/mistralai_gcp/pyproject.toml index 34ea7e55..c4e64885 100644 --- a/packages/mistralai_gcp/pyproject.toml +++ b/packages/mistralai_gcp/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "mistralai-gcp" -version = "1.2.2" +version = "1.2.0" description = "Python Client SDK for the Mistral AI API in GCP." authors = ["Mistral",] readme = "README-PYPI.md" @@ -21,7 +21,7 @@ eval-type-backport = "^0.2.0" google-auth = "2.27.0" httpx = "^0.27.0" jsonpath-python = "^1.0.6" -pydantic = "~2.9.0" +pydantic = "~2.9.2" python-dateutil = "2.8.2" requests = "^2.32.3" typing-inspect = "^0.9.0" diff --git a/packages/mistralai_gcp/scripts/compile.sh b/packages/mistralai_gcp/scripts/compile.sh deleted file mode 100755 index fafe635b..00000000 --- a/packages/mistralai_gcp/scripts/compile.sh +++ /dev/null @@ -1,85 +0,0 @@ -#!/usr/bin/env bash - -set -o pipefail # Ensure pipeline failures are propagated - -# Use temporary files to store outputs and exit statuses -declare -A output_files -declare -A status_files - -# Function to run a command with temporary output and status files -run_command() { - local cmd="$1" - local key="$2" - local output_file="$3" - local status_file="$4" - - # Run the command and store output and exit status - { - eval "$cmd" - echo $? > "$status_file" - } &> "$output_file" & -} - -poetry run python scripts/prepare-readme.py - -# Create temporary files for outputs and statuses -for cmd in compileall pylint mypy pyright; do - output_files[$cmd]=$(mktemp) - status_files[$cmd]=$(mktemp) -done - -# Collect PIDs for background processes -declare -a pids - -# Run commands in parallel using temporary files -echo "Running python -m compileall" -run_command 'poetry run python -m compileall -q . && echo "Success"' 'compileall' "${output_files[compileall]}" "${status_files[compileall]}" -pids+=($!) - -echo "Running pylint" -run_command 'poetry run pylint src' 'pylint' "${output_files[pylint]}" "${status_files[pylint]}" -pids+=($!) - -echo "Running mypy" -run_command 'poetry run mypy src' 'mypy' "${output_files[mypy]}" "${status_files[mypy]}" -pids+=($!) - -echo "Running pyright (optional)" -run_command 'if command -v pyright > /dev/null 2>&1; then pyright src; else echo "pyright not found, skipping"; fi' 'pyright' "${output_files[pyright]}" "${status_files[pyright]}" -pids+=($!) - -# Wait for all processes to complete -echo "Waiting for processes to complete" -for pid in "${pids[@]}"; do - wait "$pid" -done - -# Print output sequentially and check for failures -failed=false -for key in "${!output_files[@]}"; do - echo "--- Output from Command: $key ---" - echo - cat "${output_files[$key]}" - echo # Empty line for separation - echo "--- End of Output from Command: $key ---" - echo - - exit_status=$(cat "${status_files[$key]}") - if [ "$exit_status" -ne 0 ]; then - echo "Command $key failed with exit status $exit_status" >&2 - failed=true - fi -done - -# Clean up temporary files -for tmp_file in "${output_files[@]}" "${status_files[@]}"; do - rm -f "$tmp_file" -done - -if $failed; then - echo "One or more commands failed." >&2 - exit 1 -else - echo "All commands completed successfully." - exit 0 -fi diff --git a/packages/mistralai_gcp/src/mistralai_gcp/__init__.py b/packages/mistralai_gcp/src/mistralai_gcp/__init__.py index 68138c47..a1b7f626 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/__init__.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/__init__.py @@ -1,5 +1,9 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +from ._version import __title__, __version__ from .sdk import * from .sdkconfiguration import * from .models import * + + +VERSION: str = __version__ diff --git a/packages/mistralai_gcp/src/mistralai_gcp/_version.py b/packages/mistralai_gcp/src/mistralai_gcp/_version.py new file mode 100644 index 00000000..0472b64b --- /dev/null +++ b/packages/mistralai_gcp/src/mistralai_gcp/_version.py @@ -0,0 +1,12 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +import importlib.metadata + +__title__: str = "mistralai-gcp" +__version__: str = "1.2.0" + +try: + if __package__ is not None: + __version__ = importlib.metadata.version(__package__) +except importlib.metadata.PackageNotFoundError: + pass diff --git a/packages/mistralai_gcp/src/mistralai_gcp/chat.py b/packages/mistralai_gcp/src/mistralai_gcp/chat.py index 044dd192..19c74351 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/chat.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/chat.py @@ -16,10 +16,9 @@ def stream( *, model: Nullable[str], messages: Union[List[models.Messages], List[models.MessagesTypedDict]], - temperature: Optional[float] = 0.7, + temperature: OptionalNullable[float] = UNSET, top_p: Optional[float] = 1, max_tokens: OptionalNullable[int] = UNSET, - min_tokens: OptionalNullable[int] = UNSET, stream: Optional[bool] = True, stop: Optional[Union[models.Stop, models.StopTypedDict]] = None, random_seed: OptionalNullable[int] = UNSET, @@ -35,6 +34,9 @@ def stream( models.ChatCompletionStreamRequestToolChoiceTypedDict, ] ] = None, + presence_penalty: Optional[float] = 0, + frequency_penalty: Optional[float] = 0, + n: OptionalNullable[int] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -45,16 +47,18 @@ def stream( :param model: ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions. :param messages: The prompt(s) to generate completions for, encoded as a list of dict with role and content. - :param temperature: What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. + :param temperature: What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. - :param min_tokens: The minimum number of tokens to generate in the completion. :param stream: :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. :param response_format: :param tools: :param tool_choice: + :param presence_penalty: presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. + :param frequency_penalty: frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. + :param n: Number of completions to return for each request, input tokens are only billed once. :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -72,7 +76,6 @@ def stream( temperature=temperature, top_p=top_p, max_tokens=max_tokens, - min_tokens=min_tokens, stream=stream, stop=stop, random_seed=random_seed, @@ -84,6 +87,9 @@ def stream( tool_choice=utils.get_pydantic_model( tool_choice, Optional[models.ChatCompletionStreamRequestToolChoice] ), + presence_penalty=presence_penalty, + frequency_penalty=frequency_penalty, + n=n, ) req = self.build_request( @@ -132,18 +138,21 @@ def stream( sentinel="[DONE]", ) if utils.match_response(http_res, "422", "application/json"): - data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) + http_res_text = utils.stream_to_text(http_res) + data = utils.unmarshal_json(http_res_text, models.HTTPValidationErrorData) raise models.HTTPValidationError(data=data) if utils.match_response(http_res, ["4XX", "5XX"], "*"): + http_res_text = utils.stream_to_text(http_res) raise models.SDKError( - "API error occurred", http_res.status_code, http_res.text, http_res + "API error occurred", http_res.status_code, http_res_text, http_res ) content_type = http_res.headers.get("Content-Type") + http_res_text = utils.stream_to_text(http_res) raise models.SDKError( f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, - http_res.text, + http_res_text, http_res, ) @@ -152,10 +161,9 @@ async def stream_async( *, model: Nullable[str], messages: Union[List[models.Messages], List[models.MessagesTypedDict]], - temperature: Optional[float] = 0.7, + temperature: OptionalNullable[float] = UNSET, top_p: Optional[float] = 1, max_tokens: OptionalNullable[int] = UNSET, - min_tokens: OptionalNullable[int] = UNSET, stream: Optional[bool] = True, stop: Optional[Union[models.Stop, models.StopTypedDict]] = None, random_seed: OptionalNullable[int] = UNSET, @@ -171,6 +179,9 @@ async def stream_async( models.ChatCompletionStreamRequestToolChoiceTypedDict, ] ] = None, + presence_penalty: Optional[float] = 0, + frequency_penalty: Optional[float] = 0, + n: OptionalNullable[int] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -181,16 +192,18 @@ async def stream_async( :param model: ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions. :param messages: The prompt(s) to generate completions for, encoded as a list of dict with role and content. - :param temperature: What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. + :param temperature: What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. - :param min_tokens: The minimum number of tokens to generate in the completion. :param stream: :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. :param response_format: :param tools: :param tool_choice: + :param presence_penalty: presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. + :param frequency_penalty: frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. + :param n: Number of completions to return for each request, input tokens are only billed once. :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -208,7 +221,6 @@ async def stream_async( temperature=temperature, top_p=top_p, max_tokens=max_tokens, - min_tokens=min_tokens, stream=stream, stop=stop, random_seed=random_seed, @@ -220,6 +232,9 @@ async def stream_async( tool_choice=utils.get_pydantic_model( tool_choice, Optional[models.ChatCompletionStreamRequestToolChoice] ), + presence_penalty=presence_penalty, + frequency_penalty=frequency_penalty, + n=n, ) req = self.build_request_async( @@ -268,18 +283,21 @@ async def stream_async( sentinel="[DONE]", ) if utils.match_response(http_res, "422", "application/json"): - data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) + http_res_text = await utils.stream_to_text_async(http_res) + data = utils.unmarshal_json(http_res_text, models.HTTPValidationErrorData) raise models.HTTPValidationError(data=data) if utils.match_response(http_res, ["4XX", "5XX"], "*"): + http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( - "API error occurred", http_res.status_code, http_res.text, http_res + "API error occurred", http_res.status_code, http_res_text, http_res ) content_type = http_res.headers.get("Content-Type") + http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, - http_res.text, + http_res_text, http_res, ) @@ -291,10 +309,9 @@ def complete( List[models.ChatCompletionRequestMessages], List[models.ChatCompletionRequestMessagesTypedDict], ], - temperature: Optional[float] = 0.7, + temperature: OptionalNullable[float] = UNSET, top_p: Optional[float] = 1, max_tokens: OptionalNullable[int] = UNSET, - min_tokens: OptionalNullable[int] = UNSET, stream: Optional[bool] = False, stop: Optional[ Union[ @@ -315,6 +332,9 @@ def complete( models.ChatCompletionRequestToolChoiceTypedDict, ] ] = None, + presence_penalty: Optional[float] = 0, + frequency_penalty: Optional[float] = 0, + n: OptionalNullable[int] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -323,16 +343,18 @@ def complete( :param model: ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions. :param messages: The prompt(s) to generate completions for, encoded as a list of dict with role and content. - :param temperature: What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. + :param temperature: What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. - :param min_tokens: The minimum number of tokens to generate in the completion. :param stream: Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. :param response_format: :param tools: :param tool_choice: + :param presence_penalty: presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. + :param frequency_penalty: frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. + :param n: Number of completions to return for each request, input tokens are only billed once. :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -350,7 +372,6 @@ def complete( temperature=temperature, top_p=top_p, max_tokens=max_tokens, - min_tokens=min_tokens, stream=stream, stop=stop, random_seed=random_seed, @@ -364,6 +385,9 @@ def complete( tool_choice=utils.get_pydantic_model( tool_choice, Optional[models.ChatCompletionRequestToolChoice] ), + presence_penalty=presence_penalty, + frequency_penalty=frequency_penalty, + n=n, ) req = self.build_request( @@ -412,15 +436,17 @@ def complete( data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) raise models.HTTPValidationError(data=data) if utils.match_response(http_res, ["4XX", "5XX"], "*"): + http_res_text = utils.stream_to_text(http_res) raise models.SDKError( - "API error occurred", http_res.status_code, http_res.text, http_res + "API error occurred", http_res.status_code, http_res_text, http_res ) content_type = http_res.headers.get("Content-Type") + http_res_text = utils.stream_to_text(http_res) raise models.SDKError( f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, - http_res.text, + http_res_text, http_res, ) @@ -432,10 +458,9 @@ async def complete_async( List[models.ChatCompletionRequestMessages], List[models.ChatCompletionRequestMessagesTypedDict], ], - temperature: Optional[float] = 0.7, + temperature: OptionalNullable[float] = UNSET, top_p: Optional[float] = 1, max_tokens: OptionalNullable[int] = UNSET, - min_tokens: OptionalNullable[int] = UNSET, stream: Optional[bool] = False, stop: Optional[ Union[ @@ -456,6 +481,9 @@ async def complete_async( models.ChatCompletionRequestToolChoiceTypedDict, ] ] = None, + presence_penalty: Optional[float] = 0, + frequency_penalty: Optional[float] = 0, + n: OptionalNullable[int] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -464,16 +492,18 @@ async def complete_async( :param model: ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions. :param messages: The prompt(s) to generate completions for, encoded as a list of dict with role and content. - :param temperature: What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. + :param temperature: What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. - :param min_tokens: The minimum number of tokens to generate in the completion. :param stream: Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. :param response_format: :param tools: :param tool_choice: + :param presence_penalty: presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. + :param frequency_penalty: frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. + :param n: Number of completions to return for each request, input tokens are only billed once. :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -491,7 +521,6 @@ async def complete_async( temperature=temperature, top_p=top_p, max_tokens=max_tokens, - min_tokens=min_tokens, stream=stream, stop=stop, random_seed=random_seed, @@ -505,6 +534,9 @@ async def complete_async( tool_choice=utils.get_pydantic_model( tool_choice, Optional[models.ChatCompletionRequestToolChoice] ), + presence_penalty=presence_penalty, + frequency_penalty=frequency_penalty, + n=n, ) req = self.build_request_async( @@ -553,14 +585,16 @@ async def complete_async( data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) raise models.HTTPValidationError(data=data) if utils.match_response(http_res, ["4XX", "5XX"], "*"): + http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( - "API error occurred", http_res.status_code, http_res.text, http_res + "API error occurred", http_res.status_code, http_res_text, http_res ) content_type = http_res.headers.get("Content-Type") + http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, - http_res.text, + http_res_text, http_res, ) diff --git a/packages/mistralai_gcp/src/mistralai_gcp/fim.py b/packages/mistralai_gcp/src/mistralai_gcp/fim.py index cda380c8..fb3bf902 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/fim.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/fim.py @@ -16,10 +16,9 @@ def stream( *, model: Nullable[str], prompt: str, - temperature: Optional[float] = 0.7, + temperature: OptionalNullable[float] = UNSET, top_p: Optional[float] = 1, max_tokens: OptionalNullable[int] = UNSET, - min_tokens: OptionalNullable[int] = UNSET, stream: Optional[bool] = True, stop: Optional[ Union[ @@ -29,6 +28,7 @@ def stream( ] = None, random_seed: OptionalNullable[int] = UNSET, suffix: OptionalNullable[str] = UNSET, + min_tokens: OptionalNullable[int] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -39,14 +39,14 @@ def stream( :param model: ID of the model to use. Only compatible for now with: - `codestral-2405` - `codestral-latest` :param prompt: The text/code to complete. - :param temperature: What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. + :param temperature: What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. - :param min_tokens: The minimum number of tokens to generate in the completion. :param stream: :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. :param suffix: Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`. + :param min_tokens: The minimum number of tokens to generate in the completion. :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -64,12 +64,12 @@ def stream( temperature=temperature, top_p=top_p, max_tokens=max_tokens, - min_tokens=min_tokens, stream=stream, stop=stop, random_seed=random_seed, prompt=prompt, suffix=suffix, + min_tokens=min_tokens, ) req = self.build_request( @@ -118,18 +118,21 @@ def stream( sentinel="[DONE]", ) if utils.match_response(http_res, "422", "application/json"): - data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) + http_res_text = utils.stream_to_text(http_res) + data = utils.unmarshal_json(http_res_text, models.HTTPValidationErrorData) raise models.HTTPValidationError(data=data) if utils.match_response(http_res, ["4XX", "5XX"], "*"): + http_res_text = utils.stream_to_text(http_res) raise models.SDKError( - "API error occurred", http_res.status_code, http_res.text, http_res + "API error occurred", http_res.status_code, http_res_text, http_res ) content_type = http_res.headers.get("Content-Type") + http_res_text = utils.stream_to_text(http_res) raise models.SDKError( f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, - http_res.text, + http_res_text, http_res, ) @@ -138,10 +141,9 @@ async def stream_async( *, model: Nullable[str], prompt: str, - temperature: Optional[float] = 0.7, + temperature: OptionalNullable[float] = UNSET, top_p: Optional[float] = 1, max_tokens: OptionalNullable[int] = UNSET, - min_tokens: OptionalNullable[int] = UNSET, stream: Optional[bool] = True, stop: Optional[ Union[ @@ -151,6 +153,7 @@ async def stream_async( ] = None, random_seed: OptionalNullable[int] = UNSET, suffix: OptionalNullable[str] = UNSET, + min_tokens: OptionalNullable[int] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -161,14 +164,14 @@ async def stream_async( :param model: ID of the model to use. Only compatible for now with: - `codestral-2405` - `codestral-latest` :param prompt: The text/code to complete. - :param temperature: What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. + :param temperature: What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. - :param min_tokens: The minimum number of tokens to generate in the completion. :param stream: :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. :param suffix: Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`. + :param min_tokens: The minimum number of tokens to generate in the completion. :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -186,12 +189,12 @@ async def stream_async( temperature=temperature, top_p=top_p, max_tokens=max_tokens, - min_tokens=min_tokens, stream=stream, stop=stop, random_seed=random_seed, prompt=prompt, suffix=suffix, + min_tokens=min_tokens, ) req = self.build_request_async( @@ -240,18 +243,21 @@ async def stream_async( sentinel="[DONE]", ) if utils.match_response(http_res, "422", "application/json"): - data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) + http_res_text = await utils.stream_to_text_async(http_res) + data = utils.unmarshal_json(http_res_text, models.HTTPValidationErrorData) raise models.HTTPValidationError(data=data) if utils.match_response(http_res, ["4XX", "5XX"], "*"): + http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( - "API error occurred", http_res.status_code, http_res.text, http_res + "API error occurred", http_res.status_code, http_res_text, http_res ) content_type = http_res.headers.get("Content-Type") + http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, - http_res.text, + http_res_text, http_res, ) @@ -260,10 +266,9 @@ def complete( *, model: Nullable[str], prompt: str, - temperature: Optional[float] = 0.7, + temperature: OptionalNullable[float] = UNSET, top_p: Optional[float] = 1, max_tokens: OptionalNullable[int] = UNSET, - min_tokens: OptionalNullable[int] = UNSET, stream: Optional[bool] = False, stop: Optional[ Union[ @@ -273,6 +278,7 @@ def complete( ] = None, random_seed: OptionalNullable[int] = UNSET, suffix: OptionalNullable[str] = UNSET, + min_tokens: OptionalNullable[int] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -283,14 +289,14 @@ def complete( :param model: ID of the model to use. Only compatible for now with: - `codestral-2405` - `codestral-latest` :param prompt: The text/code to complete. - :param temperature: What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. + :param temperature: What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. - :param min_tokens: The minimum number of tokens to generate in the completion. :param stream: Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. :param suffix: Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`. + :param min_tokens: The minimum number of tokens to generate in the completion. :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -308,12 +314,12 @@ def complete( temperature=temperature, top_p=top_p, max_tokens=max_tokens, - min_tokens=min_tokens, stream=stream, stop=stop, random_seed=random_seed, prompt=prompt, suffix=suffix, + min_tokens=min_tokens, ) req = self.build_request( @@ -362,15 +368,17 @@ def complete( data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) raise models.HTTPValidationError(data=data) if utils.match_response(http_res, ["4XX", "5XX"], "*"): + http_res_text = utils.stream_to_text(http_res) raise models.SDKError( - "API error occurred", http_res.status_code, http_res.text, http_res + "API error occurred", http_res.status_code, http_res_text, http_res ) content_type = http_res.headers.get("Content-Type") + http_res_text = utils.stream_to_text(http_res) raise models.SDKError( f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, - http_res.text, + http_res_text, http_res, ) @@ -379,10 +387,9 @@ async def complete_async( *, model: Nullable[str], prompt: str, - temperature: Optional[float] = 0.7, + temperature: OptionalNullable[float] = UNSET, top_p: Optional[float] = 1, max_tokens: OptionalNullable[int] = UNSET, - min_tokens: OptionalNullable[int] = UNSET, stream: Optional[bool] = False, stop: Optional[ Union[ @@ -392,6 +399,7 @@ async def complete_async( ] = None, random_seed: OptionalNullable[int] = UNSET, suffix: OptionalNullable[str] = UNSET, + min_tokens: OptionalNullable[int] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -402,14 +410,14 @@ async def complete_async( :param model: ID of the model to use. Only compatible for now with: - `codestral-2405` - `codestral-latest` :param prompt: The text/code to complete. - :param temperature: What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. + :param temperature: What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. - :param min_tokens: The minimum number of tokens to generate in the completion. :param stream: Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. :param suffix: Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`. + :param min_tokens: The minimum number of tokens to generate in the completion. :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -427,12 +435,12 @@ async def complete_async( temperature=temperature, top_p=top_p, max_tokens=max_tokens, - min_tokens=min_tokens, stream=stream, stop=stop, random_seed=random_seed, prompt=prompt, suffix=suffix, + min_tokens=min_tokens, ) req = self.build_request_async( @@ -481,14 +489,16 @@ async def complete_async( data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) raise models.HTTPValidationError(data=data) if utils.match_response(http_res, ["4XX", "5XX"], "*"): + http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( - "API error occurred", http_res.status_code, http_res.text, http_res + "API error occurred", http_res.status_code, http_res_text, http_res ) content_type = http_res.headers.get("Content-Type") + http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, - http_res.text, + http_res_text, http_res, ) diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/__init__.py b/packages/mistralai_gcp/src/mistralai_gcp/models/__init__.py index 84acf245..db408df5 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/__init__.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/__init__.py @@ -2,6 +2,8 @@ from .assistantmessage import ( AssistantMessage, + AssistantMessageContent, + AssistantMessageContentTypedDict, AssistantMessageRole, AssistantMessageTypedDict, ) @@ -42,7 +44,7 @@ FinishReason, ) from .contentchunk import ContentChunk, ContentChunkTypedDict -from .deltamessage import DeltaMessage, DeltaMessageTypedDict +from .deltamessage import Content, ContentTypedDict, DeltaMessage, DeltaMessageTypedDict from .fimcompletionrequest import ( FIMCompletionRequest, FIMCompletionRequestStop, @@ -70,10 +72,10 @@ from .sdkerror import SDKError from .security import Security, SecurityTypedDict from .systemmessage import ( - Content, - ContentTypedDict, Role, SystemMessage, + SystemMessageContent, + SystemMessageContentTypedDict, SystemMessageTypedDict, ) from .textchunk import TextChunk, TextChunkTypedDict, Type @@ -102,6 +104,8 @@ "Arguments", "ArgumentsTypedDict", "AssistantMessage", + "AssistantMessageContent", + "AssistantMessageContentTypedDict", "AssistantMessageRole", "AssistantMessageTypedDict", "ChatCompletionChoice", @@ -166,6 +170,8 @@ "Stop", "StopTypedDict", "SystemMessage", + "SystemMessageContent", + "SystemMessageContentTypedDict", "SystemMessageTypedDict", "TextChunk", "TextChunkTypedDict", diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/assistantmessage.py b/packages/mistralai_gcp/src/mistralai_gcp/models/assistantmessage.py index 33a4965f..f93a06cf 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/assistantmessage.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/assistantmessage.py @@ -1,6 +1,7 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from __future__ import annotations +from .contentchunk import ContentChunk, ContentChunkTypedDict from .toolcall import ToolCall, ToolCallTypedDict from mistralai_gcp.types import ( BaseModel, @@ -10,28 +11,32 @@ UNSET_SENTINEL, ) from pydantic import model_serializer -from typing import List, Literal, Optional, TypedDict -from typing_extensions import NotRequired +from typing import List, Literal, Optional, Union +from typing_extensions import NotRequired, TypedDict + + +AssistantMessageContentTypedDict = Union[str, List[ContentChunkTypedDict]] + + +AssistantMessageContent = Union[str, List[ContentChunk]] AssistantMessageRole = Literal["assistant"] class AssistantMessageTypedDict(TypedDict): - content: NotRequired[Nullable[str]] + content: NotRequired[Nullable[AssistantMessageContentTypedDict]] tool_calls: NotRequired[Nullable[List[ToolCallTypedDict]]] prefix: NotRequired[bool] - r"""Set this to `true` when adding an assistant message as prefix to condition the model response. The role of the prefix message is to force the model to start its answer by the content of the message.""" role: NotRequired[AssistantMessageRole] class AssistantMessage(BaseModel): - content: OptionalNullable[str] = UNSET + content: OptionalNullable[AssistantMessageContent] = UNSET tool_calls: OptionalNullable[List[ToolCall]] = UNSET prefix: Optional[bool] = False - r"""Set this to `true` when adding an assistant message as prefix to condition the model response. The role of the prefix message is to force the model to start its answer by the content of the message.""" role: Optional[AssistantMessageRole] = "assistant" diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionchoice.py b/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionchoice.py index c585e1ed..9bcf1240 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionchoice.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionchoice.py @@ -2,12 +2,15 @@ from __future__ import annotations from .assistantmessage import AssistantMessage, AssistantMessageTypedDict -from mistralai_gcp.types import BaseModel -from typing import Literal, TypedDict +from mistralai_gcp.types import BaseModel, UnrecognizedStr +from mistralai_gcp.utils import validate_open_enum +from pydantic.functional_validators import PlainValidator +from typing import Literal, Union +from typing_extensions import Annotated, TypedDict -ChatCompletionChoiceFinishReason = Literal[ - "stop", "length", "model_length", "error", "tool_calls" +ChatCompletionChoiceFinishReason = Union[ + Literal["stop", "length", "model_length", "error", "tool_calls"], UnrecognizedStr ] @@ -22,4 +25,6 @@ class ChatCompletionChoice(BaseModel): message: AssistantMessage - finish_reason: ChatCompletionChoiceFinishReason + finish_reason: Annotated[ + ChatCompletionChoiceFinishReason, PlainValidator(validate_open_enum(False)) + ] diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionrequest.py b/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionrequest.py index dbe6f55c..e1c263b7 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionrequest.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionrequest.py @@ -18,8 +18,8 @@ ) from mistralai_gcp.utils import get_discriminator from pydantic import Discriminator, Tag, model_serializer -from typing import List, Optional, TypedDict, Union -from typing_extensions import Annotated, NotRequired +from typing import List, Optional, Union +from typing_extensions import Annotated, NotRequired, TypedDict ChatCompletionRequestStopTypedDict = Union[str, List[str]] @@ -60,14 +60,12 @@ class ChatCompletionRequestTypedDict(TypedDict): r"""ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions.""" messages: List[ChatCompletionRequestMessagesTypedDict] r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content.""" - temperature: NotRequired[float] - r"""What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both.""" + temperature: NotRequired[Nullable[float]] + r"""What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value.""" top_p: NotRequired[float] r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.""" max_tokens: NotRequired[Nullable[int]] r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" - min_tokens: NotRequired[Nullable[int]] - r"""The minimum number of tokens to generate in the completion.""" stream: NotRequired[bool] r"""Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON.""" stop: NotRequired[ChatCompletionRequestStopTypedDict] @@ -77,6 +75,12 @@ class ChatCompletionRequestTypedDict(TypedDict): response_format: NotRequired[ResponseFormatTypedDict] tools: NotRequired[Nullable[List[ToolTypedDict]]] tool_choice: NotRequired[ChatCompletionRequestToolChoiceTypedDict] + presence_penalty: NotRequired[float] + r"""presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative.""" + frequency_penalty: NotRequired[float] + r"""frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.""" + n: NotRequired[Nullable[int]] + r"""Number of completions to return for each request, input tokens are only billed once.""" class ChatCompletionRequest(BaseModel): @@ -86,8 +90,8 @@ class ChatCompletionRequest(BaseModel): messages: List[ChatCompletionRequestMessages] r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content.""" - temperature: Optional[float] = 0.7 - r"""What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both.""" + temperature: OptionalNullable[float] = UNSET + r"""What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value.""" top_p: Optional[float] = 1 r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.""" @@ -95,9 +99,6 @@ class ChatCompletionRequest(BaseModel): max_tokens: OptionalNullable[int] = UNSET r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" - min_tokens: OptionalNullable[int] = UNSET - r"""The minimum number of tokens to generate in the completion.""" - stream: Optional[bool] = False r"""Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON.""" @@ -113,21 +114,39 @@ class ChatCompletionRequest(BaseModel): tool_choice: Optional[ChatCompletionRequestToolChoice] = None + presence_penalty: Optional[float] = 0 + r"""presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative.""" + + frequency_penalty: Optional[float] = 0 + r"""frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.""" + + n: OptionalNullable[int] = UNSET + r"""Number of completions to return for each request, input tokens are only billed once.""" + @model_serializer(mode="wrap") def serialize_model(self, handler): optional_fields = [ "temperature", "top_p", "max_tokens", - "min_tokens", "stream", "stop", "random_seed", "response_format", "tools", "tool_choice", + "presence_penalty", + "frequency_penalty", + "n", + ] + nullable_fields = [ + "model", + "temperature", + "max_tokens", + "random_seed", + "tools", + "n", ] - nullable_fields = ["model", "max_tokens", "min_tokens", "random_seed", "tools"] null_default_fields = [] serialized = handler(self) diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionresponse.py b/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionresponse.py index 5fb10447..0404a9d2 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionresponse.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionresponse.py @@ -4,8 +4,8 @@ from .chatcompletionchoice import ChatCompletionChoice, ChatCompletionChoiceTypedDict from .usageinfo import UsageInfo, UsageInfoTypedDict from mistralai_gcp.types import BaseModel -from typing import List, Optional, TypedDict -from typing_extensions import NotRequired +from typing import List, Optional +from typing_extensions import NotRequired, TypedDict class ChatCompletionResponseTypedDict(TypedDict): diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionstreamrequest.py b/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionstreamrequest.py index 5bb7059c..5fc40850 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionstreamrequest.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionstreamrequest.py @@ -18,8 +18,8 @@ ) from mistralai_gcp.utils import get_discriminator from pydantic import Discriminator, Tag, model_serializer -from typing import List, Optional, TypedDict, Union -from typing_extensions import Annotated, NotRequired +from typing import List, Optional, Union +from typing_extensions import Annotated, NotRequired, TypedDict StopTypedDict = Union[str, List[str]] @@ -62,14 +62,12 @@ class ChatCompletionStreamRequestTypedDict(TypedDict): r"""ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions.""" messages: List[MessagesTypedDict] r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content.""" - temperature: NotRequired[float] - r"""What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both.""" + temperature: NotRequired[Nullable[float]] + r"""What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value.""" top_p: NotRequired[float] r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.""" max_tokens: NotRequired[Nullable[int]] r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" - min_tokens: NotRequired[Nullable[int]] - r"""The minimum number of tokens to generate in the completion.""" stream: NotRequired[bool] stop: NotRequired[StopTypedDict] r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" @@ -78,6 +76,12 @@ class ChatCompletionStreamRequestTypedDict(TypedDict): response_format: NotRequired[ResponseFormatTypedDict] tools: NotRequired[Nullable[List[ToolTypedDict]]] tool_choice: NotRequired[ChatCompletionStreamRequestToolChoiceTypedDict] + presence_penalty: NotRequired[float] + r"""presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative.""" + frequency_penalty: NotRequired[float] + r"""frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.""" + n: NotRequired[Nullable[int]] + r"""Number of completions to return for each request, input tokens are only billed once.""" class ChatCompletionStreamRequest(BaseModel): @@ -87,8 +91,8 @@ class ChatCompletionStreamRequest(BaseModel): messages: List[Messages] r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content.""" - temperature: Optional[float] = 0.7 - r"""What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both.""" + temperature: OptionalNullable[float] = UNSET + r"""What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value.""" top_p: Optional[float] = 1 r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.""" @@ -96,9 +100,6 @@ class ChatCompletionStreamRequest(BaseModel): max_tokens: OptionalNullable[int] = UNSET r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" - min_tokens: OptionalNullable[int] = UNSET - r"""The minimum number of tokens to generate in the completion.""" - stream: Optional[bool] = True stop: Optional[Stop] = None @@ -113,21 +114,39 @@ class ChatCompletionStreamRequest(BaseModel): tool_choice: Optional[ChatCompletionStreamRequestToolChoice] = None + presence_penalty: Optional[float] = 0 + r"""presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative.""" + + frequency_penalty: Optional[float] = 0 + r"""frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.""" + + n: OptionalNullable[int] = UNSET + r"""Number of completions to return for each request, input tokens are only billed once.""" + @model_serializer(mode="wrap") def serialize_model(self, handler): optional_fields = [ "temperature", "top_p", "max_tokens", - "min_tokens", "stream", "stop", "random_seed", "response_format", "tools", "tool_choice", + "presence_penalty", + "frequency_penalty", + "n", + ] + nullable_fields = [ + "model", + "temperature", + "max_tokens", + "random_seed", + "tools", + "n", ] - nullable_fields = ["model", "max_tokens", "min_tokens", "random_seed", "tools"] null_default_fields = [] serialized = handler(self) diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/completionchunk.py b/packages/mistralai_gcp/src/mistralai_gcp/models/completionchunk.py index f0561ef7..ca002f52 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/completionchunk.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/completionchunk.py @@ -7,8 +7,8 @@ ) from .usageinfo import UsageInfo, UsageInfoTypedDict from mistralai_gcp.types import BaseModel -from typing import List, Optional, TypedDict -from typing_extensions import NotRequired +from typing import List, Optional +from typing_extensions import NotRequired, TypedDict class CompletionChunkTypedDict(TypedDict): diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/completionevent.py b/packages/mistralai_gcp/src/mistralai_gcp/models/completionevent.py index 7086fce0..33278c11 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/completionevent.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/completionevent.py @@ -3,7 +3,7 @@ from __future__ import annotations from .completionchunk import CompletionChunk, CompletionChunkTypedDict from mistralai_gcp.types import BaseModel -from typing import TypedDict +from typing_extensions import TypedDict class CompletionEventTypedDict(TypedDict): diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/completionresponsestreamchoice.py b/packages/mistralai_gcp/src/mistralai_gcp/models/completionresponsestreamchoice.py index a09f67fa..8d779971 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/completionresponsestreamchoice.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/completionresponsestreamchoice.py @@ -2,12 +2,15 @@ from __future__ import annotations from .deltamessage import DeltaMessage, DeltaMessageTypedDict -from mistralai_gcp.types import BaseModel, Nullable, UNSET_SENTINEL +from mistralai_gcp.types import BaseModel, Nullable, UNSET_SENTINEL, UnrecognizedStr +from mistralai_gcp.utils import validate_open_enum from pydantic import model_serializer -from typing import Literal, TypedDict +from pydantic.functional_validators import PlainValidator +from typing import Literal, Union +from typing_extensions import Annotated, TypedDict -FinishReason = Literal["stop", "length", "error", "tool_calls"] +FinishReason = Union[Literal["stop", "length", "error", "tool_calls"], UnrecognizedStr] class CompletionResponseStreamChoiceTypedDict(TypedDict): @@ -21,7 +24,9 @@ class CompletionResponseStreamChoice(BaseModel): delta: DeltaMessage - finish_reason: Nullable[FinishReason] + finish_reason: Annotated[ + Nullable[FinishReason], PlainValidator(validate_open_enum(False)) + ] @model_serializer(mode="wrap") def serialize_model(self, handler): diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/deltamessage.py b/packages/mistralai_gcp/src/mistralai_gcp/models/deltamessage.py index 314e52a7..bb540c96 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/deltamessage.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/deltamessage.py @@ -1,6 +1,7 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from __future__ import annotations +from .contentchunk import ContentChunk, ContentChunkTypedDict from .toolcall import ToolCall, ToolCallTypedDict from mistralai_gcp.types import ( BaseModel, @@ -10,27 +11,33 @@ UNSET_SENTINEL, ) from pydantic import model_serializer -from typing import List, Optional, TypedDict -from typing_extensions import NotRequired +from typing import List, Union +from typing_extensions import NotRequired, TypedDict + + +ContentTypedDict = Union[str, List[ContentChunkTypedDict]] + + +Content = Union[str, List[ContentChunk]] class DeltaMessageTypedDict(TypedDict): - role: NotRequired[str] - content: NotRequired[Nullable[str]] + role: NotRequired[Nullable[str]] + content: NotRequired[Nullable[ContentTypedDict]] tool_calls: NotRequired[Nullable[List[ToolCallTypedDict]]] class DeltaMessage(BaseModel): - role: Optional[str] = None + role: OptionalNullable[str] = UNSET - content: OptionalNullable[str] = UNSET + content: OptionalNullable[Content] = UNSET tool_calls: OptionalNullable[List[ToolCall]] = UNSET @model_serializer(mode="wrap") def serialize_model(self, handler): optional_fields = ["role", "content", "tool_calls"] - nullable_fields = ["content", "tool_calls"] + nullable_fields = ["role", "content", "tool_calls"] null_default_fields = [] serialized = handler(self) diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/fimcompletionrequest.py b/packages/mistralai_gcp/src/mistralai_gcp/models/fimcompletionrequest.py index 8693e34f..3a851768 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/fimcompletionrequest.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/fimcompletionrequest.py @@ -9,8 +9,8 @@ UNSET_SENTINEL, ) from pydantic import model_serializer -from typing import List, Optional, TypedDict, Union -from typing_extensions import NotRequired +from typing import List, Optional, Union +from typing_extensions import NotRequired, TypedDict FIMCompletionRequestStopTypedDict = Union[str, List[str]] @@ -29,14 +29,12 @@ class FIMCompletionRequestTypedDict(TypedDict): """ prompt: str r"""The text/code to complete.""" - temperature: NotRequired[float] - r"""What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both.""" + temperature: NotRequired[Nullable[float]] + r"""What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value.""" top_p: NotRequired[float] r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.""" max_tokens: NotRequired[Nullable[int]] r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" - min_tokens: NotRequired[Nullable[int]] - r"""The minimum number of tokens to generate in the completion.""" stream: NotRequired[bool] r"""Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON.""" stop: NotRequired[FIMCompletionRequestStopTypedDict] @@ -45,6 +43,8 @@ class FIMCompletionRequestTypedDict(TypedDict): r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" suffix: NotRequired[Nullable[str]] r"""Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`.""" + min_tokens: NotRequired[Nullable[int]] + r"""The minimum number of tokens to generate in the completion.""" class FIMCompletionRequest(BaseModel): @@ -57,8 +57,8 @@ class FIMCompletionRequest(BaseModel): prompt: str r"""The text/code to complete.""" - temperature: Optional[float] = 0.7 - r"""What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both.""" + temperature: OptionalNullable[float] = UNSET + r"""What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value.""" top_p: Optional[float] = 1 r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.""" @@ -66,9 +66,6 @@ class FIMCompletionRequest(BaseModel): max_tokens: OptionalNullable[int] = UNSET r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" - min_tokens: OptionalNullable[int] = UNSET - r"""The minimum number of tokens to generate in the completion.""" - stream: Optional[bool] = False r"""Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON.""" @@ -81,19 +78,29 @@ class FIMCompletionRequest(BaseModel): suffix: OptionalNullable[str] = UNSET r"""Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`.""" + min_tokens: OptionalNullable[int] = UNSET + r"""The minimum number of tokens to generate in the completion.""" + @model_serializer(mode="wrap") def serialize_model(self, handler): optional_fields = [ "temperature", "top_p", "max_tokens", - "min_tokens", "stream", "stop", "random_seed", "suffix", + "min_tokens", + ] + nullable_fields = [ + "model", + "temperature", + "max_tokens", + "random_seed", + "suffix", + "min_tokens", ] - nullable_fields = ["model", "max_tokens", "min_tokens", "random_seed", "suffix"] null_default_fields = [] serialized = handler(self) diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/fimcompletionresponse.py b/packages/mistralai_gcp/src/mistralai_gcp/models/fimcompletionresponse.py index ad285153..a4d273a2 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/fimcompletionresponse.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/fimcompletionresponse.py @@ -4,8 +4,8 @@ from .chatcompletionchoice import ChatCompletionChoice, ChatCompletionChoiceTypedDict from .usageinfo import UsageInfo, UsageInfoTypedDict from mistralai_gcp.types import BaseModel -from typing import List, Optional, TypedDict -from typing_extensions import NotRequired +from typing import List, Optional +from typing_extensions import NotRequired, TypedDict class FIMCompletionResponseTypedDict(TypedDict): diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/fimcompletionstreamrequest.py b/packages/mistralai_gcp/src/mistralai_gcp/models/fimcompletionstreamrequest.py index d05918ca..f47937b9 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/fimcompletionstreamrequest.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/fimcompletionstreamrequest.py @@ -9,8 +9,8 @@ UNSET_SENTINEL, ) from pydantic import model_serializer -from typing import List, Optional, TypedDict, Union -from typing_extensions import NotRequired +from typing import List, Optional, Union +from typing_extensions import NotRequired, TypedDict FIMCompletionStreamRequestStopTypedDict = Union[str, List[str]] @@ -29,14 +29,12 @@ class FIMCompletionStreamRequestTypedDict(TypedDict): """ prompt: str r"""The text/code to complete.""" - temperature: NotRequired[float] - r"""What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both.""" + temperature: NotRequired[Nullable[float]] + r"""What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value.""" top_p: NotRequired[float] r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.""" max_tokens: NotRequired[Nullable[int]] r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" - min_tokens: NotRequired[Nullable[int]] - r"""The minimum number of tokens to generate in the completion.""" stream: NotRequired[bool] stop: NotRequired[FIMCompletionStreamRequestStopTypedDict] r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" @@ -44,6 +42,8 @@ class FIMCompletionStreamRequestTypedDict(TypedDict): r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" suffix: NotRequired[Nullable[str]] r"""Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`.""" + min_tokens: NotRequired[Nullable[int]] + r"""The minimum number of tokens to generate in the completion.""" class FIMCompletionStreamRequest(BaseModel): @@ -56,8 +56,8 @@ class FIMCompletionStreamRequest(BaseModel): prompt: str r"""The text/code to complete.""" - temperature: Optional[float] = 0.7 - r"""What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both.""" + temperature: OptionalNullable[float] = UNSET + r"""What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value.""" top_p: Optional[float] = 1 r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.""" @@ -65,9 +65,6 @@ class FIMCompletionStreamRequest(BaseModel): max_tokens: OptionalNullable[int] = UNSET r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" - min_tokens: OptionalNullable[int] = UNSET - r"""The minimum number of tokens to generate in the completion.""" - stream: Optional[bool] = True stop: Optional[FIMCompletionStreamRequestStop] = None @@ -79,19 +76,29 @@ class FIMCompletionStreamRequest(BaseModel): suffix: OptionalNullable[str] = UNSET r"""Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`.""" + min_tokens: OptionalNullable[int] = UNSET + r"""The minimum number of tokens to generate in the completion.""" + @model_serializer(mode="wrap") def serialize_model(self, handler): optional_fields = [ "temperature", "top_p", "max_tokens", - "min_tokens", "stream", "stop", "random_seed", "suffix", + "min_tokens", + ] + nullable_fields = [ + "model", + "temperature", + "max_tokens", + "random_seed", + "suffix", + "min_tokens", ] - nullable_fields = ["model", "max_tokens", "min_tokens", "random_seed", "suffix"] null_default_fields = [] serialized = handler(self) diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/function.py b/packages/mistralai_gcp/src/mistralai_gcp/models/function.py index 533c3dea..c3168eec 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/function.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/function.py @@ -2,8 +2,8 @@ from __future__ import annotations from mistralai_gcp.types import BaseModel -from typing import Any, Dict, Optional, TypedDict -from typing_extensions import NotRequired +from typing import Any, Dict, Optional +from typing_extensions import NotRequired, TypedDict class FunctionTypedDict(TypedDict): diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/functioncall.py b/packages/mistralai_gcp/src/mistralai_gcp/models/functioncall.py index d8daaef9..02da9bba 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/functioncall.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/functioncall.py @@ -2,7 +2,8 @@ from __future__ import annotations from mistralai_gcp.types import BaseModel -from typing import Any, Dict, TypedDict, Union +from typing import Any, Dict, Union +from typing_extensions import TypedDict ArgumentsTypedDict = Union[Dict[str, Any], str] diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/functionname.py b/packages/mistralai_gcp/src/mistralai_gcp/models/functionname.py index 47af74a9..00ec22f5 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/functionname.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/functionname.py @@ -2,7 +2,7 @@ from __future__ import annotations from mistralai_gcp.types import BaseModel -from typing import TypedDict +from typing_extensions import TypedDict class FunctionNameTypedDict(TypedDict): diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/httpvalidationerror.py b/packages/mistralai_gcp/src/mistralai_gcp/models/httpvalidationerror.py index 68b1f780..11024f85 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/httpvalidationerror.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/httpvalidationerror.py @@ -12,8 +12,6 @@ class HTTPValidationErrorData(BaseModel): class HTTPValidationError(Exception): - r"""Validation Error""" - data: HTTPValidationErrorData def __init__(self, data: HTTPValidationErrorData): diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/responseformat.py b/packages/mistralai_gcp/src/mistralai_gcp/models/responseformat.py index 0398e9b2..fde89862 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/responseformat.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/responseformat.py @@ -3,8 +3,8 @@ from __future__ import annotations from .responseformats import ResponseFormats from mistralai_gcp.types import BaseModel -from typing import Optional, TypedDict -from typing_extensions import NotRequired +from typing import Optional +from typing_extensions import NotRequired, TypedDict class ResponseFormatTypedDict(TypedDict): diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/security.py b/packages/mistralai_gcp/src/mistralai_gcp/models/security.py index c9c0e0fc..38574942 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/security.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/security.py @@ -3,8 +3,7 @@ from __future__ import annotations from mistralai_gcp.types import BaseModel from mistralai_gcp.utils import FieldMetadata, SecurityMetadata -from typing import TypedDict -from typing_extensions import Annotated +from typing_extensions import Annotated, TypedDict class SecurityTypedDict(TypedDict): diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/systemmessage.py b/packages/mistralai_gcp/src/mistralai_gcp/models/systemmessage.py index 872b9e32..87798558 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/systemmessage.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/systemmessage.py @@ -3,25 +3,25 @@ from __future__ import annotations from .textchunk import TextChunk, TextChunkTypedDict from mistralai_gcp.types import BaseModel -from typing import List, Literal, Optional, TypedDict, Union -from typing_extensions import NotRequired +from typing import List, Literal, Optional, Union +from typing_extensions import NotRequired, TypedDict -ContentTypedDict = Union[str, List[TextChunkTypedDict]] +SystemMessageContentTypedDict = Union[str, List[TextChunkTypedDict]] -Content = Union[str, List[TextChunk]] +SystemMessageContent = Union[str, List[TextChunk]] Role = Literal["system"] class SystemMessageTypedDict(TypedDict): - content: ContentTypedDict + content: SystemMessageContentTypedDict role: NotRequired[Role] class SystemMessage(BaseModel): - content: Content + content: SystemMessageContent role: Optional[Role] = "system" diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/textchunk.py b/packages/mistralai_gcp/src/mistralai_gcp/models/textchunk.py index 5c3774c1..48367e4e 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/textchunk.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/textchunk.py @@ -2,9 +2,11 @@ from __future__ import annotations from mistralai_gcp.types import BaseModel +from mistralai_gcp.utils import validate_const import pydantic -from typing import Final, Literal, Optional, TypedDict -from typing_extensions import Annotated +from pydantic.functional_validators import AfterValidator +from typing import Literal, Optional +from typing_extensions import Annotated, TypedDict Type = Literal["text"] @@ -12,11 +14,13 @@ class TextChunkTypedDict(TypedDict): text: str + type: Type class TextChunk(BaseModel): text: str - # fmt: off - TYPE: Annotated[Final[Optional[Type]], pydantic.Field(alias="type")] = "text" # type: ignore - # fmt: on + TYPE: Annotated[ + Annotated[Optional[Type], AfterValidator(validate_const("text"))], + pydantic.Field(alias="type"), + ] = "text" diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/tool.py b/packages/mistralai_gcp/src/mistralai_gcp/models/tool.py index 24e1a9ff..a1d477da 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/tool.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/tool.py @@ -6,8 +6,8 @@ from mistralai_gcp.types import BaseModel from mistralai_gcp.utils import validate_open_enum from pydantic.functional_validators import PlainValidator -from typing import Optional, TypedDict -from typing_extensions import Annotated, NotRequired +from typing import Optional +from typing_extensions import Annotated, NotRequired, TypedDict class ToolTypedDict(TypedDict): diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/toolcall.py b/packages/mistralai_gcp/src/mistralai_gcp/models/toolcall.py index 6374f2ca..5b4b217a 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/toolcall.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/toolcall.py @@ -6,8 +6,8 @@ from mistralai_gcp.types import BaseModel from mistralai_gcp.utils import validate_open_enum from pydantic.functional_validators import PlainValidator -from typing import Optional, TypedDict -from typing_extensions import Annotated, NotRequired +from typing import Optional +from typing_extensions import Annotated, NotRequired, TypedDict class ToolCallTypedDict(TypedDict): diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/toolchoice.py b/packages/mistralai_gcp/src/mistralai_gcp/models/toolchoice.py index bd6dbe7a..dc213e62 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/toolchoice.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/toolchoice.py @@ -6,8 +6,8 @@ from mistralai_gcp.types import BaseModel from mistralai_gcp.utils import validate_open_enum from pydantic.functional_validators import PlainValidator -from typing import Optional, TypedDict -from typing_extensions import Annotated, NotRequired +from typing import Optional +from typing_extensions import Annotated, NotRequired, TypedDict class ToolChoiceTypedDict(TypedDict): diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/toolmessage.py b/packages/mistralai_gcp/src/mistralai_gcp/models/toolmessage.py index caff0ad7..80e44ede 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/toolmessage.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/toolmessage.py @@ -9,8 +9,8 @@ UNSET_SENTINEL, ) from pydantic import model_serializer -from typing import Literal, Optional, TypedDict -from typing_extensions import NotRequired +from typing import Literal, Optional +from typing_extensions import NotRequired, TypedDict ToolMessageRole = Literal["tool"] diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/usageinfo.py b/packages/mistralai_gcp/src/mistralai_gcp/models/usageinfo.py index d63486bd..9de6af7e 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/usageinfo.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/usageinfo.py @@ -2,7 +2,7 @@ from __future__ import annotations from mistralai_gcp.types import BaseModel -from typing import TypedDict +from typing_extensions import TypedDict class UsageInfoTypedDict(TypedDict): diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/usermessage.py b/packages/mistralai_gcp/src/mistralai_gcp/models/usermessage.py index ccc6efb1..229dbaf9 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/usermessage.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/usermessage.py @@ -2,9 +2,10 @@ from __future__ import annotations from .contentchunk import ContentChunk, ContentChunkTypedDict -from mistralai_gcp.types import BaseModel -from typing import List, Literal, Optional, TypedDict, Union -from typing_extensions import NotRequired +from mistralai_gcp.types import BaseModel, Nullable, UNSET_SENTINEL +from pydantic import model_serializer +from typing import List, Literal, Optional, Union +from typing_extensions import NotRequired, TypedDict UserMessageContentTypedDict = Union[str, List[ContentChunkTypedDict]] @@ -17,11 +18,41 @@ class UserMessageTypedDict(TypedDict): - content: UserMessageContentTypedDict + content: Nullable[UserMessageContentTypedDict] role: NotRequired[UserMessageRole] class UserMessage(BaseModel): - content: UserMessageContent + content: Nullable[UserMessageContent] role: Optional[UserMessageRole] = "user" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["role"] + nullable_fields = ["content"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in self.model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/validationerror.py b/packages/mistralai_gcp/src/mistralai_gcp/models/validationerror.py index 23e95956..b8bd4345 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/validationerror.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/validationerror.py @@ -2,7 +2,8 @@ from __future__ import annotations from mistralai_gcp.types import BaseModel -from typing import List, TypedDict, Union +from typing import List, Union +from typing_extensions import TypedDict LocTypedDict = Union[str, int] diff --git a/packages/mistralai_gcp/src/mistralai_gcp/sdkconfiguration.py b/packages/mistralai_gcp/src/mistralai_gcp/sdkconfiguration.py index 408d8c3e..fdb296cc 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/sdkconfiguration.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/sdkconfiguration.py @@ -10,10 +10,10 @@ from typing import Callable, Dict, Optional, Tuple, Union -SERVER_PROD = "prod" -r"""Production server""" +SERVER_EU = "eu" +r"""EU Production server""" SERVERS = { - SERVER_PROD: "https://api.mistral.ai", + SERVER_EU: "https://api.mistral.ai", } """Contains the list of servers available to the SDK""" @@ -28,9 +28,9 @@ class SDKConfiguration: server: Optional[str] = "" language: str = "python" openapi_doc_version: str = "0.0.2" - sdk_version: str = "1.2.2" - gen_version: str = "2.415.6" - user_agent: str = "speakeasy-sdk/python 1.2.2 2.415.6 0.0.2 mistralai-gcp" + sdk_version: str = "1.2.0" + gen_version: str = "2.452.0" + user_agent: str = "speakeasy-sdk/python 1.2.0 2.452.0 0.0.2 mistralai-gcp" retry_config: OptionalNullable[RetryConfig] = Field(default_factory=lambda: UNSET) timeout_ms: Optional[int] = None @@ -41,7 +41,7 @@ def get_server_details(self) -> Tuple[str, Dict[str, str]]: if self.server_url is not None and self.server_url: return remove_suffix(self.server_url, "/"), {} if not self.server: - self.server = SERVER_PROD + self.server = SERVER_EU if self.server not in SERVERS: raise ValueError(f'Invalid server "{self.server}"') diff --git a/packages/mistralai_gcp/src/mistralai_gcp/utils/__init__.py b/packages/mistralai_gcp/src/mistralai_gcp/utils/__init__.py index 6c26aeb9..26d51ae8 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/utils/__init__.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/utils/__init__.py @@ -27,6 +27,10 @@ serialize_float, serialize_int, stream_to_text, + stream_to_text_async, + stream_to_bytes, + stream_to_bytes_async, + validate_const, validate_decimal, validate_float, validate_int, @@ -79,10 +83,14 @@ "serialize_request_body", "SerializedRequestBody", "stream_to_text", + "stream_to_text_async", + "stream_to_bytes", + "stream_to_bytes_async", "template_url", "unmarshal", "unmarshal_json", "validate_decimal", + "validate_const", "validate_float", "validate_int", "validate_open_enum", diff --git a/packages/mistralai_gcp/src/mistralai_gcp/utils/annotations.py b/packages/mistralai_gcp/src/mistralai_gcp/utils/annotations.py index 0d17472b..5b3bbb02 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/utils/annotations.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/utils/annotations.py @@ -1,5 +1,6 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +from enum import Enum from typing import Any def get_discriminator(model: Any, fieldname: str, key: str) -> str: @@ -10,10 +11,20 @@ def get_discriminator(model: Any, fieldname: str, key: str) -> str: raise ValueError(f'Could not find discriminator key {key} in {model}') from e if hasattr(model, fieldname): - return f'{getattr(model, fieldname)}' + attr = getattr(model, fieldname) + + if isinstance(attr, Enum): + return f'{attr.value}' + + return f'{attr}' fieldname = fieldname.upper() if hasattr(model, fieldname): - return f'{getattr(model, fieldname)}' + attr = getattr(model, fieldname) + + if isinstance(attr, Enum): + return f'{attr.value}' + + return f'{attr}' raise ValueError(f'Could not find discriminator field {fieldname} in {model}') diff --git a/packages/mistralai_gcp/src/mistralai_gcp/utils/serializers.py b/packages/mistralai_gcp/src/mistralai_gcp/utils/serializers.py index 85d57f43..c5eb3659 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/utils/serializers.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/utils/serializers.py @@ -116,6 +116,19 @@ def validate(e): return validate +def validate_const(v): + def validate(c): + if is_optional_type(type(c)) and c is None: + return None + + if v != c: + raise ValueError(f"Expected {v}") + + return c + + return validate + + def unmarshal_json(raw, typ: Any) -> Any: return unmarshal(from_json(raw), typ) @@ -172,6 +185,18 @@ def stream_to_text(stream: httpx.Response) -> str: return "".join(stream.iter_text()) +async def stream_to_text_async(stream: httpx.Response) -> str: + return "".join([chunk async for chunk in stream.aiter_text()]) + + +def stream_to_bytes(stream: httpx.Response) -> bytes: + return stream.content + + +async def stream_to_bytes_async(stream: httpx.Response) -> bytes: + return await stream.aread() + + def get_pydantic_model(data: Any, typ: Any) -> Any: if not _contains_pydantic_model(data): return unmarshal(data, typ) diff --git a/pylintrc b/pylintrc index 50800386..393d0f70 100644 --- a/pylintrc +++ b/pylintrc @@ -188,6 +188,7 @@ good-names=i, Run, _, e, + n, id # Good variable names regexes, separated by a comma. If names match any regex, diff --git a/pyproject.toml b/pyproject.toml index 4200b2ba..31aea5d6 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "mistralai" -version = "1.1.0" +version = "1.2.0" description = "Python Client SDK for the Mistral AI API." authors = ["Mistral"] readme = "README-PYPI.md" diff --git a/scripts/compile.sh b/scripts/compile.sh deleted file mode 100755 index fafe635b..00000000 --- a/scripts/compile.sh +++ /dev/null @@ -1,85 +0,0 @@ -#!/usr/bin/env bash - -set -o pipefail # Ensure pipeline failures are propagated - -# Use temporary files to store outputs and exit statuses -declare -A output_files -declare -A status_files - -# Function to run a command with temporary output and status files -run_command() { - local cmd="$1" - local key="$2" - local output_file="$3" - local status_file="$4" - - # Run the command and store output and exit status - { - eval "$cmd" - echo $? > "$status_file" - } &> "$output_file" & -} - -poetry run python scripts/prepare-readme.py - -# Create temporary files for outputs and statuses -for cmd in compileall pylint mypy pyright; do - output_files[$cmd]=$(mktemp) - status_files[$cmd]=$(mktemp) -done - -# Collect PIDs for background processes -declare -a pids - -# Run commands in parallel using temporary files -echo "Running python -m compileall" -run_command 'poetry run python -m compileall -q . && echo "Success"' 'compileall' "${output_files[compileall]}" "${status_files[compileall]}" -pids+=($!) - -echo "Running pylint" -run_command 'poetry run pylint src' 'pylint' "${output_files[pylint]}" "${status_files[pylint]}" -pids+=($!) - -echo "Running mypy" -run_command 'poetry run mypy src' 'mypy' "${output_files[mypy]}" "${status_files[mypy]}" -pids+=($!) - -echo "Running pyright (optional)" -run_command 'if command -v pyright > /dev/null 2>&1; then pyright src; else echo "pyright not found, skipping"; fi' 'pyright' "${output_files[pyright]}" "${status_files[pyright]}" -pids+=($!) - -# Wait for all processes to complete -echo "Waiting for processes to complete" -for pid in "${pids[@]}"; do - wait "$pid" -done - -# Print output sequentially and check for failures -failed=false -for key in "${!output_files[@]}"; do - echo "--- Output from Command: $key ---" - echo - cat "${output_files[$key]}" - echo # Empty line for separation - echo "--- End of Output from Command: $key ---" - echo - - exit_status=$(cat "${status_files[$key]}") - if [ "$exit_status" -ne 0 ]; then - echo "Command $key failed with exit status $exit_status" >&2 - failed=true - fi -done - -# Clean up temporary files -for tmp_file in "${output_files[@]}" "${status_files[@]}"; do - rm -f "$tmp_file" -done - -if $failed; then - echo "One or more commands failed." >&2 - exit 1 -else - echo "All commands completed successfully." - exit 0 -fi diff --git a/src/mistralai/__init__.py b/src/mistralai/__init__.py index 68138c47..a1b7f626 100644 --- a/src/mistralai/__init__.py +++ b/src/mistralai/__init__.py @@ -1,5 +1,9 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +from ._version import __title__, __version__ from .sdk import * from .sdkconfiguration import * from .models import * + + +VERSION: str = __version__ diff --git a/src/mistralai/_version.py b/src/mistralai/_version.py new file mode 100644 index 00000000..752c9ed6 --- /dev/null +++ b/src/mistralai/_version.py @@ -0,0 +1,12 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +import importlib.metadata + +__title__: str = "mistralai" +__version__: str = "1.2.0" + +try: + if __package__ is not None: + __version__ = importlib.metadata.version(__package__) +except importlib.metadata.PackageNotFoundError: + pass diff --git a/src/mistralai/agents.py b/src/mistralai/agents.py index 05d17753..1b5c6a1f 100644 --- a/src/mistralai/agents.py +++ b/src/mistralai/agents.py @@ -20,7 +20,6 @@ def complete( ], agent_id: str, max_tokens: OptionalNullable[int] = UNSET, - min_tokens: OptionalNullable[int] = UNSET, stream: Optional[bool] = False, stop: Optional[ Union[ @@ -41,6 +40,9 @@ def complete( models.AgentsCompletionRequestToolChoiceTypedDict, ] ] = None, + presence_penalty: Optional[float] = 0, + frequency_penalty: Optional[float] = 0, + n: OptionalNullable[int] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -50,13 +52,15 @@ def complete( :param messages: The prompt(s) to generate completions for, encoded as a list of dict with role and content. :param agent_id: The ID of the agent to use for this completion. :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. - :param min_tokens: The minimum number of tokens to generate in the completion. :param stream: Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. :param response_format: :param tools: :param tool_choice: + :param presence_penalty: presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. + :param frequency_penalty: frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. + :param n: Number of completions to return for each request, input tokens are only billed once. :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -71,7 +75,6 @@ def complete( request = models.AgentsCompletionRequest( max_tokens=max_tokens, - min_tokens=min_tokens, stream=stream, stop=stop, random_seed=random_seed, @@ -85,6 +88,9 @@ def complete( tool_choice=utils.get_pydantic_model( tool_choice, Optional[models.AgentsCompletionRequestToolChoice] ), + presence_penalty=presence_penalty, + frequency_penalty=frequency_penalty, + n=n, agent_id=agent_id, ) @@ -136,15 +142,17 @@ def complete( data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) raise models.HTTPValidationError(data=data) if utils.match_response(http_res, ["4XX", "5XX"], "*"): + http_res_text = utils.stream_to_text(http_res) raise models.SDKError( - "API error occurred", http_res.status_code, http_res.text, http_res + "API error occurred", http_res.status_code, http_res_text, http_res ) content_type = http_res.headers.get("Content-Type") + http_res_text = utils.stream_to_text(http_res) raise models.SDKError( f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, - http_res.text, + http_res_text, http_res, ) @@ -157,7 +165,6 @@ async def complete_async( ], agent_id: str, max_tokens: OptionalNullable[int] = UNSET, - min_tokens: OptionalNullable[int] = UNSET, stream: Optional[bool] = False, stop: Optional[ Union[ @@ -178,6 +185,9 @@ async def complete_async( models.AgentsCompletionRequestToolChoiceTypedDict, ] ] = None, + presence_penalty: Optional[float] = 0, + frequency_penalty: Optional[float] = 0, + n: OptionalNullable[int] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -187,13 +197,15 @@ async def complete_async( :param messages: The prompt(s) to generate completions for, encoded as a list of dict with role and content. :param agent_id: The ID of the agent to use for this completion. :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. - :param min_tokens: The minimum number of tokens to generate in the completion. :param stream: Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. :param response_format: :param tools: :param tool_choice: + :param presence_penalty: presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. + :param frequency_penalty: frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. + :param n: Number of completions to return for each request, input tokens are only billed once. :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -208,7 +220,6 @@ async def complete_async( request = models.AgentsCompletionRequest( max_tokens=max_tokens, - min_tokens=min_tokens, stream=stream, stop=stop, random_seed=random_seed, @@ -222,6 +233,9 @@ async def complete_async( tool_choice=utils.get_pydantic_model( tool_choice, Optional[models.AgentsCompletionRequestToolChoice] ), + presence_penalty=presence_penalty, + frequency_penalty=frequency_penalty, + n=n, agent_id=agent_id, ) @@ -273,15 +287,17 @@ async def complete_async( data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) raise models.HTTPValidationError(data=data) if utils.match_response(http_res, ["4XX", "5XX"], "*"): + http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( - "API error occurred", http_res.status_code, http_res.text, http_res + "API error occurred", http_res.status_code, http_res_text, http_res ) content_type = http_res.headers.get("Content-Type") + http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, - http_res.text, + http_res_text, http_res, ) @@ -294,7 +310,6 @@ def stream( ], agent_id: str, max_tokens: OptionalNullable[int] = UNSET, - min_tokens: OptionalNullable[int] = UNSET, stream: Optional[bool] = True, stop: Optional[ Union[ @@ -315,6 +330,9 @@ def stream( models.AgentsCompletionStreamRequestToolChoiceTypedDict, ] ] = None, + presence_penalty: Optional[float] = 0, + frequency_penalty: Optional[float] = 0, + n: OptionalNullable[int] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -326,13 +344,15 @@ def stream( :param messages: The prompt(s) to generate completions for, encoded as a list of dict with role and content. :param agent_id: The ID of the agent to use for this completion. :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. - :param min_tokens: The minimum number of tokens to generate in the completion. :param stream: :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. :param response_format: :param tools: :param tool_choice: + :param presence_penalty: presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. + :param frequency_penalty: frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. + :param n: Number of completions to return for each request, input tokens are only billed once. :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -347,7 +367,6 @@ def stream( request = models.AgentsCompletionStreamRequest( max_tokens=max_tokens, - min_tokens=min_tokens, stream=stream, stop=stop, random_seed=random_seed, @@ -361,6 +380,9 @@ def stream( tool_choice=utils.get_pydantic_model( tool_choice, Optional[models.AgentsCompletionStreamRequestToolChoice] ), + presence_penalty=presence_penalty, + frequency_penalty=frequency_penalty, + n=n, agent_id=agent_id, ) @@ -412,18 +434,21 @@ def stream( sentinel="[DONE]", ) if utils.match_response(http_res, "422", "application/json"): - data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) + http_res_text = utils.stream_to_text(http_res) + data = utils.unmarshal_json(http_res_text, models.HTTPValidationErrorData) raise models.HTTPValidationError(data=data) if utils.match_response(http_res, ["4XX", "5XX"], "*"): + http_res_text = utils.stream_to_text(http_res) raise models.SDKError( - "API error occurred", http_res.status_code, http_res.text, http_res + "API error occurred", http_res.status_code, http_res_text, http_res ) content_type = http_res.headers.get("Content-Type") + http_res_text = utils.stream_to_text(http_res) raise models.SDKError( f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, - http_res.text, + http_res_text, http_res, ) @@ -436,7 +461,6 @@ async def stream_async( ], agent_id: str, max_tokens: OptionalNullable[int] = UNSET, - min_tokens: OptionalNullable[int] = UNSET, stream: Optional[bool] = True, stop: Optional[ Union[ @@ -457,6 +481,9 @@ async def stream_async( models.AgentsCompletionStreamRequestToolChoiceTypedDict, ] ] = None, + presence_penalty: Optional[float] = 0, + frequency_penalty: Optional[float] = 0, + n: OptionalNullable[int] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -468,13 +495,15 @@ async def stream_async( :param messages: The prompt(s) to generate completions for, encoded as a list of dict with role and content. :param agent_id: The ID of the agent to use for this completion. :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. - :param min_tokens: The minimum number of tokens to generate in the completion. :param stream: :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. :param response_format: :param tools: :param tool_choice: + :param presence_penalty: presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. + :param frequency_penalty: frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. + :param n: Number of completions to return for each request, input tokens are only billed once. :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -489,7 +518,6 @@ async def stream_async( request = models.AgentsCompletionStreamRequest( max_tokens=max_tokens, - min_tokens=min_tokens, stream=stream, stop=stop, random_seed=random_seed, @@ -503,6 +531,9 @@ async def stream_async( tool_choice=utils.get_pydantic_model( tool_choice, Optional[models.AgentsCompletionStreamRequestToolChoice] ), + presence_penalty=presence_penalty, + frequency_penalty=frequency_penalty, + n=n, agent_id=agent_id, ) @@ -554,17 +585,20 @@ async def stream_async( sentinel="[DONE]", ) if utils.match_response(http_res, "422", "application/json"): - data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) + http_res_text = await utils.stream_to_text_async(http_res) + data = utils.unmarshal_json(http_res_text, models.HTTPValidationErrorData) raise models.HTTPValidationError(data=data) if utils.match_response(http_res, ["4XX", "5XX"], "*"): + http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( - "API error occurred", http_res.status_code, http_res.text, http_res + "API error occurred", http_res.status_code, http_res_text, http_res ) content_type = http_res.headers.get("Content-Type") + http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, - http_res.text, + http_res_text, http_res, ) diff --git a/src/mistralai/batch.py b/src/mistralai/batch.py new file mode 100644 index 00000000..bb59abda --- /dev/null +++ b/src/mistralai/batch.py @@ -0,0 +1,17 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from .basesdk import BaseSDK +from .sdkconfiguration import SDKConfiguration +from mistralai.mistral_jobs import MistralJobs + + +class Batch(BaseSDK): + jobs: MistralJobs + + def __init__(self, sdk_config: SDKConfiguration) -> None: + BaseSDK.__init__(self, sdk_config) + self.sdk_configuration = sdk_config + self._init_sdks() + + def _init_sdks(self): + self.jobs = MistralJobs(self.sdk_configuration) diff --git a/src/mistralai/chat.py b/src/mistralai/chat.py index 3e770f14..dd5ca693 100644 --- a/src/mistralai/chat.py +++ b/src/mistralai/chat.py @@ -16,10 +16,9 @@ def complete( *, model: Nullable[str], messages: Union[List[models.Messages], List[models.MessagesTypedDict]], - temperature: Optional[float] = 0.7, + temperature: OptionalNullable[float] = UNSET, top_p: Optional[float] = 1, max_tokens: OptionalNullable[int] = UNSET, - min_tokens: OptionalNullable[int] = UNSET, stream: Optional[bool] = False, stop: Optional[Union[models.Stop, models.StopTypedDict]] = None, random_seed: OptionalNullable[int] = UNSET, @@ -35,6 +34,9 @@ def complete( models.ChatCompletionRequestToolChoiceTypedDict, ] ] = None, + presence_penalty: Optional[float] = 0, + frequency_penalty: Optional[float] = 0, + n: OptionalNullable[int] = UNSET, safe_prompt: Optional[bool] = False, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, @@ -44,16 +46,18 @@ def complete( :param model: ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions. :param messages: The prompt(s) to generate completions for, encoded as a list of dict with role and content. - :param temperature: What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. + :param temperature: What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. - :param min_tokens: The minimum number of tokens to generate in the completion. :param stream: Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. :param response_format: :param tools: :param tool_choice: + :param presence_penalty: presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. + :param frequency_penalty: frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. + :param n: Number of completions to return for each request, input tokens are only billed once. :param safe_prompt: Whether to inject a safety prompt before all conversations. :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method @@ -72,7 +76,6 @@ def complete( temperature=temperature, top_p=top_p, max_tokens=max_tokens, - min_tokens=min_tokens, stream=stream, stop=stop, random_seed=random_seed, @@ -84,6 +87,9 @@ def complete( tool_choice=utils.get_pydantic_model( tool_choice, Optional[models.ChatCompletionRequestToolChoice] ), + presence_penalty=presence_penalty, + frequency_penalty=frequency_penalty, + n=n, safe_prompt=safe_prompt, ) @@ -135,15 +141,17 @@ def complete( data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) raise models.HTTPValidationError(data=data) if utils.match_response(http_res, ["4XX", "5XX"], "*"): + http_res_text = utils.stream_to_text(http_res) raise models.SDKError( - "API error occurred", http_res.status_code, http_res.text, http_res + "API error occurred", http_res.status_code, http_res_text, http_res ) content_type = http_res.headers.get("Content-Type") + http_res_text = utils.stream_to_text(http_res) raise models.SDKError( f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, - http_res.text, + http_res_text, http_res, ) @@ -152,10 +160,9 @@ async def complete_async( *, model: Nullable[str], messages: Union[List[models.Messages], List[models.MessagesTypedDict]], - temperature: Optional[float] = 0.7, + temperature: OptionalNullable[float] = UNSET, top_p: Optional[float] = 1, max_tokens: OptionalNullable[int] = UNSET, - min_tokens: OptionalNullable[int] = UNSET, stream: Optional[bool] = False, stop: Optional[Union[models.Stop, models.StopTypedDict]] = None, random_seed: OptionalNullable[int] = UNSET, @@ -171,6 +178,9 @@ async def complete_async( models.ChatCompletionRequestToolChoiceTypedDict, ] ] = None, + presence_penalty: Optional[float] = 0, + frequency_penalty: Optional[float] = 0, + n: OptionalNullable[int] = UNSET, safe_prompt: Optional[bool] = False, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, @@ -180,16 +190,18 @@ async def complete_async( :param model: ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions. :param messages: The prompt(s) to generate completions for, encoded as a list of dict with role and content. - :param temperature: What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. + :param temperature: What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. - :param min_tokens: The minimum number of tokens to generate in the completion. :param stream: Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. :param response_format: :param tools: :param tool_choice: + :param presence_penalty: presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. + :param frequency_penalty: frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. + :param n: Number of completions to return for each request, input tokens are only billed once. :param safe_prompt: Whether to inject a safety prompt before all conversations. :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method @@ -208,7 +220,6 @@ async def complete_async( temperature=temperature, top_p=top_p, max_tokens=max_tokens, - min_tokens=min_tokens, stream=stream, stop=stop, random_seed=random_seed, @@ -220,6 +231,9 @@ async def complete_async( tool_choice=utils.get_pydantic_model( tool_choice, Optional[models.ChatCompletionRequestToolChoice] ), + presence_penalty=presence_penalty, + frequency_penalty=frequency_penalty, + n=n, safe_prompt=safe_prompt, ) @@ -271,15 +285,17 @@ async def complete_async( data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) raise models.HTTPValidationError(data=data) if utils.match_response(http_res, ["4XX", "5XX"], "*"): + http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( - "API error occurred", http_res.status_code, http_res.text, http_res + "API error occurred", http_res.status_code, http_res_text, http_res ) content_type = http_res.headers.get("Content-Type") + http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, - http_res.text, + http_res_text, http_res, ) @@ -291,10 +307,9 @@ def stream( List[models.ChatCompletionStreamRequestMessages], List[models.ChatCompletionStreamRequestMessagesTypedDict], ], - temperature: Optional[float] = 0.7, + temperature: OptionalNullable[float] = UNSET, top_p: Optional[float] = 1, max_tokens: OptionalNullable[int] = UNSET, - min_tokens: OptionalNullable[int] = UNSET, stream: Optional[bool] = True, stop: Optional[ Union[ @@ -315,6 +330,9 @@ def stream( models.ChatCompletionStreamRequestToolChoiceTypedDict, ] ] = None, + presence_penalty: Optional[float] = 0, + frequency_penalty: Optional[float] = 0, + n: OptionalNullable[int] = UNSET, safe_prompt: Optional[bool] = False, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, @@ -326,16 +344,18 @@ def stream( :param model: ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions. :param messages: The prompt(s) to generate completions for, encoded as a list of dict with role and content. - :param temperature: What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. + :param temperature: What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. - :param min_tokens: The minimum number of tokens to generate in the completion. :param stream: :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. :param response_format: :param tools: :param tool_choice: + :param presence_penalty: presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. + :param frequency_penalty: frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. + :param n: Number of completions to return for each request, input tokens are only billed once. :param safe_prompt: Whether to inject a safety prompt before all conversations. :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method @@ -354,7 +374,6 @@ def stream( temperature=temperature, top_p=top_p, max_tokens=max_tokens, - min_tokens=min_tokens, stream=stream, stop=stop, random_seed=random_seed, @@ -368,6 +387,9 @@ def stream( tool_choice=utils.get_pydantic_model( tool_choice, Optional[models.ChatCompletionStreamRequestToolChoice] ), + presence_penalty=presence_penalty, + frequency_penalty=frequency_penalty, + n=n, safe_prompt=safe_prompt, ) @@ -419,18 +441,21 @@ def stream( sentinel="[DONE]", ) if utils.match_response(http_res, "422", "application/json"): - data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) + http_res_text = utils.stream_to_text(http_res) + data = utils.unmarshal_json(http_res_text, models.HTTPValidationErrorData) raise models.HTTPValidationError(data=data) if utils.match_response(http_res, ["4XX", "5XX"], "*"): + http_res_text = utils.stream_to_text(http_res) raise models.SDKError( - "API error occurred", http_res.status_code, http_res.text, http_res + "API error occurred", http_res.status_code, http_res_text, http_res ) content_type = http_res.headers.get("Content-Type") + http_res_text = utils.stream_to_text(http_res) raise models.SDKError( f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, - http_res.text, + http_res_text, http_res, ) @@ -442,10 +467,9 @@ async def stream_async( List[models.ChatCompletionStreamRequestMessages], List[models.ChatCompletionStreamRequestMessagesTypedDict], ], - temperature: Optional[float] = 0.7, + temperature: OptionalNullable[float] = UNSET, top_p: Optional[float] = 1, max_tokens: OptionalNullable[int] = UNSET, - min_tokens: OptionalNullable[int] = UNSET, stream: Optional[bool] = True, stop: Optional[ Union[ @@ -466,6 +490,9 @@ async def stream_async( models.ChatCompletionStreamRequestToolChoiceTypedDict, ] ] = None, + presence_penalty: Optional[float] = 0, + frequency_penalty: Optional[float] = 0, + n: OptionalNullable[int] = UNSET, safe_prompt: Optional[bool] = False, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, @@ -477,16 +504,18 @@ async def stream_async( :param model: ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions. :param messages: The prompt(s) to generate completions for, encoded as a list of dict with role and content. - :param temperature: What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. + :param temperature: What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. - :param min_tokens: The minimum number of tokens to generate in the completion. :param stream: :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. :param response_format: :param tools: :param tool_choice: + :param presence_penalty: presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. + :param frequency_penalty: frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. + :param n: Number of completions to return for each request, input tokens are only billed once. :param safe_prompt: Whether to inject a safety prompt before all conversations. :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method @@ -505,7 +534,6 @@ async def stream_async( temperature=temperature, top_p=top_p, max_tokens=max_tokens, - min_tokens=min_tokens, stream=stream, stop=stop, random_seed=random_seed, @@ -519,6 +547,9 @@ async def stream_async( tool_choice=utils.get_pydantic_model( tool_choice, Optional[models.ChatCompletionStreamRequestToolChoice] ), + presence_penalty=presence_penalty, + frequency_penalty=frequency_penalty, + n=n, safe_prompt=safe_prompt, ) @@ -570,17 +601,20 @@ async def stream_async( sentinel="[DONE]", ) if utils.match_response(http_res, "422", "application/json"): - data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) + http_res_text = await utils.stream_to_text_async(http_res) + data = utils.unmarshal_json(http_res_text, models.HTTPValidationErrorData) raise models.HTTPValidationError(data=data) if utils.match_response(http_res, ["4XX", "5XX"], "*"): + http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( - "API error occurred", http_res.status_code, http_res.text, http_res + "API error occurred", http_res.status_code, http_res_text, http_res ) content_type = http_res.headers.get("Content-Type") + http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, - http_res.text, + http_res_text, http_res, ) diff --git a/src/mistralai/classifiers.py b/src/mistralai/classifiers.py new file mode 100644 index 00000000..3a772068 --- /dev/null +++ b/src/mistralai/classifiers.py @@ -0,0 +1,396 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from .basesdk import BaseSDK +from mistralai import models, utils +from mistralai._hooks import HookContext +from mistralai.types import Nullable, OptionalNullable, UNSET +from mistralai.utils import get_security_from_env +from typing import Any, Optional, Union + + +class Classifiers(BaseSDK): + r"""Classifiers API.""" + + def moderate( + self, + *, + inputs: Union[ + models.ClassificationRequestInputs, + models.ClassificationRequestInputsTypedDict, + ], + model: OptionalNullable[str] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + ) -> Optional[models.ClassificationResponse]: + r"""Moderations + + :param inputs: Text to classify. + :param model: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + + request = models.ClassificationRequest( + inputs=inputs, + model=model, + ) + + req = self.build_request( + method="POST", + path="/v1/moderations", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.ClassificationRequest + ), + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + operation_id="moderations_v1_moderations_post", + oauth2_scopes=[], + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return utils.unmarshal_json( + http_res.text, Optional[models.ClassificationResponse] + ) + if utils.match_response(http_res, "422", "application/json"): + data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) + raise models.HTTPValidationError(data=data) + if utils.match_response(http_res, ["4XX", "5XX"], "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + + content_type = http_res.headers.get("Content-Type") + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res_text, + http_res, + ) + + async def moderate_async( + self, + *, + inputs: Union[ + models.ClassificationRequestInputs, + models.ClassificationRequestInputsTypedDict, + ], + model: OptionalNullable[str] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + ) -> Optional[models.ClassificationResponse]: + r"""Moderations + + :param inputs: Text to classify. + :param model: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + + request = models.ClassificationRequest( + inputs=inputs, + model=model, + ) + + req = self.build_request_async( + method="POST", + path="/v1/moderations", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.ClassificationRequest + ), + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + operation_id="moderations_v1_moderations_post", + oauth2_scopes=[], + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return utils.unmarshal_json( + http_res.text, Optional[models.ClassificationResponse] + ) + if utils.match_response(http_res, "422", "application/json"): + data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) + raise models.HTTPValidationError(data=data) + if utils.match_response(http_res, ["4XX", "5XX"], "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + + content_type = http_res.headers.get("Content-Type") + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res_text, + http_res, + ) + + def moderate_chat( + self, + *, + inputs: Union[ + models.ChatClassificationRequestInputs, + models.ChatClassificationRequestInputsTypedDict, + ], + model: Nullable[str], + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + ) -> Optional[models.ClassificationResponse]: + r"""Moderations Chat + + :param inputs: Chat to classify + :param model: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + + request = models.ChatClassificationRequest( + inputs=utils.get_pydantic_model( + inputs, models.ChatClassificationRequestInputs + ), + model=model, + ) + + req = self.build_request( + method="POST", + path="/v1/chat/moderations", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.ChatClassificationRequest + ), + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + operation_id="moderations_chat_v1_chat_moderations_post", + oauth2_scopes=[], + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return utils.unmarshal_json( + http_res.text, Optional[models.ClassificationResponse] + ) + if utils.match_response(http_res, "422", "application/json"): + data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) + raise models.HTTPValidationError(data=data) + if utils.match_response(http_res, ["4XX", "5XX"], "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + + content_type = http_res.headers.get("Content-Type") + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res_text, + http_res, + ) + + async def moderate_chat_async( + self, + *, + inputs: Union[ + models.ChatClassificationRequestInputs, + models.ChatClassificationRequestInputsTypedDict, + ], + model: Nullable[str], + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + ) -> Optional[models.ClassificationResponse]: + r"""Moderations Chat + + :param inputs: Chat to classify + :param model: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + + request = models.ChatClassificationRequest( + inputs=utils.get_pydantic_model( + inputs, models.ChatClassificationRequestInputs + ), + model=model, + ) + + req = self.build_request_async( + method="POST", + path="/v1/chat/moderations", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.ChatClassificationRequest + ), + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + operation_id="moderations_chat_v1_chat_moderations_post", + oauth2_scopes=[], + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return utils.unmarshal_json( + http_res.text, Optional[models.ClassificationResponse] + ) + if utils.match_response(http_res, "422", "application/json"): + data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) + raise models.HTTPValidationError(data=data) + if utils.match_response(http_res, ["4XX", "5XX"], "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + + content_type = http_res.headers.get("Content-Type") + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res_text, + http_res, + ) diff --git a/src/mistralai/embeddings.py b/src/mistralai/embeddings.py index c19a9e38..2aa115c8 100644 --- a/src/mistralai/embeddings.py +++ b/src/mistralai/embeddings.py @@ -15,7 +15,7 @@ def create( self, *, inputs: Union[models.Inputs, models.InputsTypedDict], - model: str, + model: Optional[str] = "mistral-embed", encoding_format: OptionalNullable[str] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, @@ -94,15 +94,17 @@ def create( data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) raise models.HTTPValidationError(data=data) if utils.match_response(http_res, ["4XX", "5XX"], "*"): + http_res_text = utils.stream_to_text(http_res) raise models.SDKError( - "API error occurred", http_res.status_code, http_res.text, http_res + "API error occurred", http_res.status_code, http_res_text, http_res ) content_type = http_res.headers.get("Content-Type") + http_res_text = utils.stream_to_text(http_res) raise models.SDKError( f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, - http_res.text, + http_res_text, http_res, ) @@ -110,7 +112,7 @@ async def create_async( self, *, inputs: Union[models.Inputs, models.InputsTypedDict], - model: str, + model: Optional[str] = "mistral-embed", encoding_format: OptionalNullable[str] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, @@ -189,14 +191,16 @@ async def create_async( data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) raise models.HTTPValidationError(data=data) if utils.match_response(http_res, ["4XX", "5XX"], "*"): + http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( - "API error occurred", http_res.status_code, http_res.text, http_res + "API error occurred", http_res.status_code, http_res_text, http_res ) content_type = http_res.headers.get("Content-Type") + http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, - http_res.text, + http_res_text, http_res, ) diff --git a/src/mistralai/files.py b/src/mistralai/files.py index 06724056..6cf0fcb7 100644 --- a/src/mistralai/files.py +++ b/src/mistralai/files.py @@ -1,11 +1,12 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from .basesdk import BaseSDK +import httpx from mistralai import models, utils from mistralai._hooks import HookContext from mistralai.types import OptionalNullable, UNSET from mistralai.utils import get_security_from_env -from typing import Optional, Union +from typing import List, Optional, Union class Files(BaseSDK): @@ -15,6 +16,7 @@ def upload( self, *, file: Union[models.File, models.FileTypedDict], + purpose: Optional[models.FilePurpose] = None, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -28,6 +30,7 @@ def upload( Please contact us if you need to increase these storage limits. :param file: The File object (not file name) to be uploaded. To upload a file and specify a custom file name you should format your request as such: ```bash file=@path/to/your/file.jsonl;filename=custom_name.jsonl ``` Otherwise, you can just keep the original file name: ```bash file=@path/to/your/file.jsonl ``` + :param purpose: :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -42,6 +45,7 @@ def upload( request = models.FilesAPIRoutesUploadFileMultiPartBodyParams( file=utils.get_pydantic_model(file, models.File), + purpose=purpose, ) req = self.build_request( @@ -90,15 +94,17 @@ def upload( if utils.match_response(http_res, "200", "application/json"): return utils.unmarshal_json(http_res.text, Optional[models.UploadFileOut]) if utils.match_response(http_res, ["4XX", "5XX"], "*"): + http_res_text = utils.stream_to_text(http_res) raise models.SDKError( - "API error occurred", http_res.status_code, http_res.text, http_res + "API error occurred", http_res.status_code, http_res_text, http_res ) content_type = http_res.headers.get("Content-Type") + http_res_text = utils.stream_to_text(http_res) raise models.SDKError( f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, - http_res.text, + http_res_text, http_res, ) @@ -106,6 +112,7 @@ async def upload_async( self, *, file: Union[models.File, models.FileTypedDict], + purpose: Optional[models.FilePurpose] = None, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -119,6 +126,7 @@ async def upload_async( Please contact us if you need to increase these storage limits. :param file: The File object (not file name) to be uploaded. To upload a file and specify a custom file name you should format your request as such: ```bash file=@path/to/your/file.jsonl;filename=custom_name.jsonl ``` Otherwise, you can just keep the original file name: ```bash file=@path/to/your/file.jsonl ``` + :param purpose: :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -133,6 +141,7 @@ async def upload_async( request = models.FilesAPIRoutesUploadFileMultiPartBodyParams( file=utils.get_pydantic_model(file, models.File), + purpose=purpose, ) req = self.build_request_async( @@ -181,21 +190,29 @@ async def upload_async( if utils.match_response(http_res, "200", "application/json"): return utils.unmarshal_json(http_res.text, Optional[models.UploadFileOut]) if utils.match_response(http_res, ["4XX", "5XX"], "*"): + http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( - "API error occurred", http_res.status_code, http_res.text, http_res + "API error occurred", http_res.status_code, http_res_text, http_res ) content_type = http_res.headers.get("Content-Type") + http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, - http_res.text, + http_res_text, http_res, ) def list( self, *, + page: Optional[int] = 0, + page_size: Optional[int] = 100, + sample_type: OptionalNullable[List[models.SampleType]] = UNSET, + source: OptionalNullable[List[models.Source]] = UNSET, + search: OptionalNullable[str] = UNSET, + purpose: OptionalNullable[models.FilePurpose] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -204,6 +221,12 @@ def list( Returns a list of files that belong to the user's organization. + :param page: + :param page_size: + :param sample_type: + :param source: + :param search: + :param purpose: :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -215,12 +238,22 @@ def list( if server_url is not None: base_url = server_url + + request = models.FilesAPIRoutesListFilesRequest( + page=page, + page_size=page_size, + sample_type=sample_type, + source=source, + search=search, + purpose=purpose, + ) + req = self.build_request( method="GET", path="/v1/files", base_url=base_url, url_variables=url_variables, - request=None, + request=request, request_body_required=False, request_has_path_params=False, request_has_query_params=True, @@ -254,21 +287,29 @@ def list( if utils.match_response(http_res, "200", "application/json"): return utils.unmarshal_json(http_res.text, Optional[models.ListFilesOut]) if utils.match_response(http_res, ["4XX", "5XX"], "*"): + http_res_text = utils.stream_to_text(http_res) raise models.SDKError( - "API error occurred", http_res.status_code, http_res.text, http_res + "API error occurred", http_res.status_code, http_res_text, http_res ) content_type = http_res.headers.get("Content-Type") + http_res_text = utils.stream_to_text(http_res) raise models.SDKError( f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, - http_res.text, + http_res_text, http_res, ) async def list_async( self, *, + page: Optional[int] = 0, + page_size: Optional[int] = 100, + sample_type: OptionalNullable[List[models.SampleType]] = UNSET, + source: OptionalNullable[List[models.Source]] = UNSET, + search: OptionalNullable[str] = UNSET, + purpose: OptionalNullable[models.FilePurpose] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -277,6 +318,12 @@ async def list_async( Returns a list of files that belong to the user's organization. + :param page: + :param page_size: + :param sample_type: + :param source: + :param search: + :param purpose: :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -288,12 +335,22 @@ async def list_async( if server_url is not None: base_url = server_url + + request = models.FilesAPIRoutesListFilesRequest( + page=page, + page_size=page_size, + sample_type=sample_type, + source=source, + search=search, + purpose=purpose, + ) + req = self.build_request_async( method="GET", path="/v1/files", base_url=base_url, url_variables=url_variables, - request=None, + request=request, request_body_required=False, request_has_path_params=False, request_has_query_params=True, @@ -327,15 +384,17 @@ async def list_async( if utils.match_response(http_res, "200", "application/json"): return utils.unmarshal_json(http_res.text, Optional[models.ListFilesOut]) if utils.match_response(http_res, ["4XX", "5XX"], "*"): + http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( - "API error occurred", http_res.status_code, http_res.text, http_res + "API error occurred", http_res.status_code, http_res_text, http_res ) content_type = http_res.headers.get("Content-Type") + http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, - http_res.text, + http_res_text, http_res, ) @@ -407,15 +466,17 @@ def retrieve( if utils.match_response(http_res, "200", "application/json"): return utils.unmarshal_json(http_res.text, Optional[models.RetrieveFileOut]) if utils.match_response(http_res, ["4XX", "5XX"], "*"): + http_res_text = utils.stream_to_text(http_res) raise models.SDKError( - "API error occurred", http_res.status_code, http_res.text, http_res + "API error occurred", http_res.status_code, http_res_text, http_res ) content_type = http_res.headers.get("Content-Type") + http_res_text = utils.stream_to_text(http_res) raise models.SDKError( f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, - http_res.text, + http_res_text, http_res, ) @@ -487,15 +548,17 @@ async def retrieve_async( if utils.match_response(http_res, "200", "application/json"): return utils.unmarshal_json(http_res.text, Optional[models.RetrieveFileOut]) if utils.match_response(http_res, ["4XX", "5XX"], "*"): + http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( - "API error occurred", http_res.status_code, http_res.text, http_res + "API error occurred", http_res.status_code, http_res_text, http_res ) content_type = http_res.headers.get("Content-Type") + http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, - http_res.text, + http_res_text, http_res, ) @@ -567,15 +630,17 @@ def delete( if utils.match_response(http_res, "200", "application/json"): return utils.unmarshal_json(http_res.text, Optional[models.DeleteFileOut]) if utils.match_response(http_res, ["4XX", "5XX"], "*"): + http_res_text = utils.stream_to_text(http_res) raise models.SDKError( - "API error occurred", http_res.status_code, http_res.text, http_res + "API error occurred", http_res.status_code, http_res_text, http_res ) content_type = http_res.headers.get("Content-Type") + http_res_text = utils.stream_to_text(http_res) raise models.SDKError( f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, - http_res.text, + http_res_text, http_res, ) @@ -647,14 +712,182 @@ async def delete_async( if utils.match_response(http_res, "200", "application/json"): return utils.unmarshal_json(http_res.text, Optional[models.DeleteFileOut]) if utils.match_response(http_res, ["4XX", "5XX"], "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + + content_type = http_res.headers.get("Content-Type") + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res_text, + http_res, + ) + + def download( + self, + *, + file_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + ) -> Optional[httpx.Response]: + r"""Download File + + Download a file + + :param file_id: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + + request = models.FilesAPIRoutesDownloadFileRequest( + file_id=file_id, + ) + + req = self.build_request( + method="GET", + path="/v1/files/{file_id}/content", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/octet-stream", + security=self.sdk_configuration.security, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + operation_id="files_api_routes_download_file", + oauth2_scopes=[], + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["4XX", "5XX"], + stream=True, + retry_config=retry_config, + ) + + if utils.match_response(http_res, "200", "application/octet-stream"): + return http_res + if utils.match_response(http_res, ["4XX", "5XX"], "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + + content_type = http_res.headers.get("Content-Type") + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res_text, + http_res, + ) + + async def download_async( + self, + *, + file_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + ) -> Optional[httpx.Response]: + r"""Download File + + Download a file + + :param file_id: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + + request = models.FilesAPIRoutesDownloadFileRequest( + file_id=file_id, + ) + + req = self.build_request_async( + method="GET", + path="/v1/files/{file_id}/content", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/octet-stream", + security=self.sdk_configuration.security, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + operation_id="files_api_routes_download_file", + oauth2_scopes=[], + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["4XX", "5XX"], + stream=True, + retry_config=retry_config, + ) + + if utils.match_response(http_res, "200", "application/octet-stream"): + return http_res + if utils.match_response(http_res, ["4XX", "5XX"], "*"): + http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( - "API error occurred", http_res.status_code, http_res.text, http_res + "API error occurred", http_res.status_code, http_res_text, http_res ) content_type = http_res.headers.get("Content-Type") + http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, - http_res.text, + http_res_text, http_res, ) diff --git a/src/mistralai/fim.py b/src/mistralai/fim.py index 5239e90d..8f8c8529 100644 --- a/src/mistralai/fim.py +++ b/src/mistralai/fim.py @@ -16,10 +16,9 @@ def complete( *, model: Nullable[str], prompt: str, - temperature: Optional[float] = 0.7, + temperature: OptionalNullable[float] = UNSET, top_p: Optional[float] = 1, max_tokens: OptionalNullable[int] = UNSET, - min_tokens: OptionalNullable[int] = UNSET, stream: Optional[bool] = False, stop: Optional[ Union[ @@ -29,6 +28,7 @@ def complete( ] = None, random_seed: OptionalNullable[int] = UNSET, suffix: OptionalNullable[str] = UNSET, + min_tokens: OptionalNullable[int] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -39,14 +39,14 @@ def complete( :param model: ID of the model to use. Only compatible for now with: - `codestral-2405` - `codestral-latest` :param prompt: The text/code to complete. - :param temperature: What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. + :param temperature: What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. - :param min_tokens: The minimum number of tokens to generate in the completion. :param stream: Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. :param suffix: Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`. + :param min_tokens: The minimum number of tokens to generate in the completion. :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -64,12 +64,12 @@ def complete( temperature=temperature, top_p=top_p, max_tokens=max_tokens, - min_tokens=min_tokens, stream=stream, stop=stop, random_seed=random_seed, prompt=prompt, suffix=suffix, + min_tokens=min_tokens, ) req = self.build_request( @@ -120,15 +120,17 @@ def complete( data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) raise models.HTTPValidationError(data=data) if utils.match_response(http_res, ["4XX", "5XX"], "*"): + http_res_text = utils.stream_to_text(http_res) raise models.SDKError( - "API error occurred", http_res.status_code, http_res.text, http_res + "API error occurred", http_res.status_code, http_res_text, http_res ) content_type = http_res.headers.get("Content-Type") + http_res_text = utils.stream_to_text(http_res) raise models.SDKError( f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, - http_res.text, + http_res_text, http_res, ) @@ -137,10 +139,9 @@ async def complete_async( *, model: Nullable[str], prompt: str, - temperature: Optional[float] = 0.7, + temperature: OptionalNullable[float] = UNSET, top_p: Optional[float] = 1, max_tokens: OptionalNullable[int] = UNSET, - min_tokens: OptionalNullable[int] = UNSET, stream: Optional[bool] = False, stop: Optional[ Union[ @@ -150,6 +151,7 @@ async def complete_async( ] = None, random_seed: OptionalNullable[int] = UNSET, suffix: OptionalNullable[str] = UNSET, + min_tokens: OptionalNullable[int] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -160,14 +162,14 @@ async def complete_async( :param model: ID of the model to use. Only compatible for now with: - `codestral-2405` - `codestral-latest` :param prompt: The text/code to complete. - :param temperature: What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. + :param temperature: What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. - :param min_tokens: The minimum number of tokens to generate in the completion. :param stream: Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. :param suffix: Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`. + :param min_tokens: The minimum number of tokens to generate in the completion. :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -185,12 +187,12 @@ async def complete_async( temperature=temperature, top_p=top_p, max_tokens=max_tokens, - min_tokens=min_tokens, stream=stream, stop=stop, random_seed=random_seed, prompt=prompt, suffix=suffix, + min_tokens=min_tokens, ) req = self.build_request_async( @@ -241,15 +243,17 @@ async def complete_async( data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) raise models.HTTPValidationError(data=data) if utils.match_response(http_res, ["4XX", "5XX"], "*"): + http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( - "API error occurred", http_res.status_code, http_res.text, http_res + "API error occurred", http_res.status_code, http_res_text, http_res ) content_type = http_res.headers.get("Content-Type") + http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, - http_res.text, + http_res_text, http_res, ) @@ -258,10 +262,9 @@ def stream( *, model: Nullable[str], prompt: str, - temperature: Optional[float] = 0.7, + temperature: OptionalNullable[float] = UNSET, top_p: Optional[float] = 1, max_tokens: OptionalNullable[int] = UNSET, - min_tokens: OptionalNullable[int] = UNSET, stream: Optional[bool] = True, stop: Optional[ Union[ @@ -271,6 +274,7 @@ def stream( ] = None, random_seed: OptionalNullable[int] = UNSET, suffix: OptionalNullable[str] = UNSET, + min_tokens: OptionalNullable[int] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -281,14 +285,14 @@ def stream( :param model: ID of the model to use. Only compatible for now with: - `codestral-2405` - `codestral-latest` :param prompt: The text/code to complete. - :param temperature: What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. + :param temperature: What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. - :param min_tokens: The minimum number of tokens to generate in the completion. :param stream: :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. :param suffix: Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`. + :param min_tokens: The minimum number of tokens to generate in the completion. :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -306,12 +310,12 @@ def stream( temperature=temperature, top_p=top_p, max_tokens=max_tokens, - min_tokens=min_tokens, stream=stream, stop=stop, random_seed=random_seed, prompt=prompt, suffix=suffix, + min_tokens=min_tokens, ) req = self.build_request( @@ -362,18 +366,21 @@ def stream( sentinel="[DONE]", ) if utils.match_response(http_res, "422", "application/json"): - data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) + http_res_text = utils.stream_to_text(http_res) + data = utils.unmarshal_json(http_res_text, models.HTTPValidationErrorData) raise models.HTTPValidationError(data=data) if utils.match_response(http_res, ["4XX", "5XX"], "*"): + http_res_text = utils.stream_to_text(http_res) raise models.SDKError( - "API error occurred", http_res.status_code, http_res.text, http_res + "API error occurred", http_res.status_code, http_res_text, http_res ) content_type = http_res.headers.get("Content-Type") + http_res_text = utils.stream_to_text(http_res) raise models.SDKError( f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, - http_res.text, + http_res_text, http_res, ) @@ -382,10 +389,9 @@ async def stream_async( *, model: Nullable[str], prompt: str, - temperature: Optional[float] = 0.7, + temperature: OptionalNullable[float] = UNSET, top_p: Optional[float] = 1, max_tokens: OptionalNullable[int] = UNSET, - min_tokens: OptionalNullable[int] = UNSET, stream: Optional[bool] = True, stop: Optional[ Union[ @@ -395,6 +401,7 @@ async def stream_async( ] = None, random_seed: OptionalNullable[int] = UNSET, suffix: OptionalNullable[str] = UNSET, + min_tokens: OptionalNullable[int] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -405,14 +412,14 @@ async def stream_async( :param model: ID of the model to use. Only compatible for now with: - `codestral-2405` - `codestral-latest` :param prompt: The text/code to complete. - :param temperature: What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. + :param temperature: What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. - :param min_tokens: The minimum number of tokens to generate in the completion. :param stream: :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. :param suffix: Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`. + :param min_tokens: The minimum number of tokens to generate in the completion. :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -430,12 +437,12 @@ async def stream_async( temperature=temperature, top_p=top_p, max_tokens=max_tokens, - min_tokens=min_tokens, stream=stream, stop=stop, random_seed=random_seed, prompt=prompt, suffix=suffix, + min_tokens=min_tokens, ) req = self.build_request_async( @@ -486,17 +493,20 @@ async def stream_async( sentinel="[DONE]", ) if utils.match_response(http_res, "422", "application/json"): - data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) + http_res_text = await utils.stream_to_text_async(http_res) + data = utils.unmarshal_json(http_res_text, models.HTTPValidationErrorData) raise models.HTTPValidationError(data=data) if utils.match_response(http_res, ["4XX", "5XX"], "*"): + http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( - "API error occurred", http_res.status_code, http_res.text, http_res + "API error occurred", http_res.status_code, http_res_text, http_res ) content_type = http_res.headers.get("Content-Type") + http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, - http_res.text, + http_res_text, http_res, ) diff --git a/src/mistralai/jobs.py b/src/mistralai/jobs.py index b6c1b243..9f472de1 100644 --- a/src/mistralai/jobs.py +++ b/src/mistralai/jobs.py @@ -102,15 +102,17 @@ def list( if utils.match_response(http_res, "200", "application/json"): return utils.unmarshal_json(http_res.text, Optional[models.JobsOut]) if utils.match_response(http_res, ["4XX", "5XX"], "*"): + http_res_text = utils.stream_to_text(http_res) raise models.SDKError( - "API error occurred", http_res.status_code, http_res.text, http_res + "API error occurred", http_res.status_code, http_res_text, http_res ) content_type = http_res.headers.get("Content-Type") + http_res_text = utils.stream_to_text(http_res) raise models.SDKError( f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, - http_res.text, + http_res_text, http_res, ) @@ -206,15 +208,17 @@ async def list_async( if utils.match_response(http_res, "200", "application/json"): return utils.unmarshal_json(http_res.text, Optional[models.JobsOut]) if utils.match_response(http_res, ["4XX", "5XX"], "*"): + http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( - "API error occurred", http_res.status_code, http_res.text, http_res + "API error occurred", http_res.status_code, http_res_text, http_res ) content_type = http_res.headers.get("Content-Type") + http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, - http_res.text, + http_res_text, http_res, ) @@ -333,15 +337,17 @@ def create( Optional[models.JobsAPIRoutesFineTuningCreateFineTuningJobResponse], ) if utils.match_response(http_res, ["4XX", "5XX"], "*"): + http_res_text = utils.stream_to_text(http_res) raise models.SDKError( - "API error occurred", http_res.status_code, http_res.text, http_res + "API error occurred", http_res.status_code, http_res_text, http_res ) content_type = http_res.headers.get("Content-Type") + http_res_text = utils.stream_to_text(http_res) raise models.SDKError( f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, - http_res.text, + http_res_text, http_res, ) @@ -460,15 +466,17 @@ async def create_async( Optional[models.JobsAPIRoutesFineTuningCreateFineTuningJobResponse], ) if utils.match_response(http_res, ["4XX", "5XX"], "*"): + http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( - "API error occurred", http_res.status_code, http_res.text, http_res + "API error occurred", http_res.status_code, http_res_text, http_res ) content_type = http_res.headers.get("Content-Type") + http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, - http_res.text, + http_res_text, http_res, ) @@ -540,15 +548,17 @@ def get( if utils.match_response(http_res, "200", "application/json"): return utils.unmarshal_json(http_res.text, Optional[models.DetailedJobOut]) if utils.match_response(http_res, ["4XX", "5XX"], "*"): + http_res_text = utils.stream_to_text(http_res) raise models.SDKError( - "API error occurred", http_res.status_code, http_res.text, http_res + "API error occurred", http_res.status_code, http_res_text, http_res ) content_type = http_res.headers.get("Content-Type") + http_res_text = utils.stream_to_text(http_res) raise models.SDKError( f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, - http_res.text, + http_res_text, http_res, ) @@ -620,15 +630,17 @@ async def get_async( if utils.match_response(http_res, "200", "application/json"): return utils.unmarshal_json(http_res.text, Optional[models.DetailedJobOut]) if utils.match_response(http_res, ["4XX", "5XX"], "*"): + http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( - "API error occurred", http_res.status_code, http_res.text, http_res + "API error occurred", http_res.status_code, http_res_text, http_res ) content_type = http_res.headers.get("Content-Type") + http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, - http_res.text, + http_res_text, http_res, ) @@ -700,15 +712,17 @@ def cancel( if utils.match_response(http_res, "200", "application/json"): return utils.unmarshal_json(http_res.text, Optional[models.DetailedJobOut]) if utils.match_response(http_res, ["4XX", "5XX"], "*"): + http_res_text = utils.stream_to_text(http_res) raise models.SDKError( - "API error occurred", http_res.status_code, http_res.text, http_res + "API error occurred", http_res.status_code, http_res_text, http_res ) content_type = http_res.headers.get("Content-Type") + http_res_text = utils.stream_to_text(http_res) raise models.SDKError( f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, - http_res.text, + http_res_text, http_res, ) @@ -780,15 +794,17 @@ async def cancel_async( if utils.match_response(http_res, "200", "application/json"): return utils.unmarshal_json(http_res.text, Optional[models.DetailedJobOut]) if utils.match_response(http_res, ["4XX", "5XX"], "*"): + http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( - "API error occurred", http_res.status_code, http_res.text, http_res + "API error occurred", http_res.status_code, http_res_text, http_res ) content_type = http_res.headers.get("Content-Type") + http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, - http_res.text, + http_res_text, http_res, ) @@ -860,15 +876,17 @@ def start( if utils.match_response(http_res, "200", "application/json"): return utils.unmarshal_json(http_res.text, Optional[models.DetailedJobOut]) if utils.match_response(http_res, ["4XX", "5XX"], "*"): + http_res_text = utils.stream_to_text(http_res) raise models.SDKError( - "API error occurred", http_res.status_code, http_res.text, http_res + "API error occurred", http_res.status_code, http_res_text, http_res ) content_type = http_res.headers.get("Content-Type") + http_res_text = utils.stream_to_text(http_res) raise models.SDKError( f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, - http_res.text, + http_res_text, http_res, ) @@ -940,14 +958,16 @@ async def start_async( if utils.match_response(http_res, "200", "application/json"): return utils.unmarshal_json(http_res.text, Optional[models.DetailedJobOut]) if utils.match_response(http_res, ["4XX", "5XX"], "*"): + http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( - "API error occurred", http_res.status_code, http_res.text, http_res + "API error occurred", http_res.status_code, http_res_text, http_res ) content_type = http_res.headers.get("Content-Type") + http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, - http_res.text, + http_res_text, http_res, ) diff --git a/src/mistralai/mistral_jobs.py b/src/mistralai/mistral_jobs.py new file mode 100644 index 00000000..59ea13fc --- /dev/null +++ b/src/mistralai/mistral_jobs.py @@ -0,0 +1,733 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from .basesdk import BaseSDK +from datetime import datetime +from mistralai import models, utils +from mistralai._hooks import HookContext +from mistralai.types import OptionalNullable, UNSET +from mistralai.utils import get_security_from_env +from typing import Any, Dict, List, Optional + + +class MistralJobs(BaseSDK): + def list( + self, + *, + page: Optional[int] = 0, + page_size: Optional[int] = 100, + model: OptionalNullable[str] = UNSET, + metadata: OptionalNullable[Dict[str, Any]] = UNSET, + created_after: OptionalNullable[datetime] = UNSET, + created_by_me: Optional[bool] = False, + status: OptionalNullable[models.BatchJobStatus] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + ) -> Optional[models.BatchJobsOut]: + r"""Get Batch Jobs + + Get a list of batch jobs for your organization and user. + + :param page: + :param page_size: + :param model: + :param metadata: + :param created_after: + :param created_by_me: + :param status: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + + request = models.JobsAPIRoutesBatchGetBatchJobsRequest( + page=page, + page_size=page_size, + model=model, + metadata=metadata, + created_after=created_after, + created_by_me=created_by_me, + status=status, + ) + + req = self.build_request( + method="GET", + path="/v1/batch/jobs", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + security=self.sdk_configuration.security, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + operation_id="jobs_api_routes_batch_get_batch_jobs", + oauth2_scopes=[], + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["4XX", "5XX"], + retry_config=retry_config, + ) + + if utils.match_response(http_res, "200", "application/json"): + return utils.unmarshal_json(http_res.text, Optional[models.BatchJobsOut]) + if utils.match_response(http_res, ["4XX", "5XX"], "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + + content_type = http_res.headers.get("Content-Type") + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res_text, + http_res, + ) + + async def list_async( + self, + *, + page: Optional[int] = 0, + page_size: Optional[int] = 100, + model: OptionalNullable[str] = UNSET, + metadata: OptionalNullable[Dict[str, Any]] = UNSET, + created_after: OptionalNullable[datetime] = UNSET, + created_by_me: Optional[bool] = False, + status: OptionalNullable[models.BatchJobStatus] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + ) -> Optional[models.BatchJobsOut]: + r"""Get Batch Jobs + + Get a list of batch jobs for your organization and user. + + :param page: + :param page_size: + :param model: + :param metadata: + :param created_after: + :param created_by_me: + :param status: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + + request = models.JobsAPIRoutesBatchGetBatchJobsRequest( + page=page, + page_size=page_size, + model=model, + metadata=metadata, + created_after=created_after, + created_by_me=created_by_me, + status=status, + ) + + req = self.build_request_async( + method="GET", + path="/v1/batch/jobs", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + security=self.sdk_configuration.security, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + operation_id="jobs_api_routes_batch_get_batch_jobs", + oauth2_scopes=[], + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["4XX", "5XX"], + retry_config=retry_config, + ) + + if utils.match_response(http_res, "200", "application/json"): + return utils.unmarshal_json(http_res.text, Optional[models.BatchJobsOut]) + if utils.match_response(http_res, ["4XX", "5XX"], "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + + content_type = http_res.headers.get("Content-Type") + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res_text, + http_res, + ) + + def create( + self, + *, + input_files: List[str], + endpoint: models.APIEndpoint, + model: str, + metadata: OptionalNullable[Dict[str, str]] = UNSET, + timeout_hours: Optional[int] = 24, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + ) -> Optional[models.BatchJobOut]: + r"""Create Batch Job + + Create a new batch job, it will be queued for processing. + + :param input_files: + :param endpoint: + :param model: + :param metadata: + :param timeout_hours: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + + request = models.BatchJobIn( + input_files=input_files, + endpoint=endpoint, + model=model, + metadata=metadata, + timeout_hours=timeout_hours, + ) + + req = self.build_request( + method="POST", + path="/v1/batch/jobs", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.BatchJobIn + ), + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + operation_id="jobs_api_routes_batch_create_batch_job", + oauth2_scopes=[], + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["4XX", "5XX"], + retry_config=retry_config, + ) + + if utils.match_response(http_res, "200", "application/json"): + return utils.unmarshal_json(http_res.text, Optional[models.BatchJobOut]) + if utils.match_response(http_res, ["4XX", "5XX"], "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + + content_type = http_res.headers.get("Content-Type") + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res_text, + http_res, + ) + + async def create_async( + self, + *, + input_files: List[str], + endpoint: models.APIEndpoint, + model: str, + metadata: OptionalNullable[Dict[str, str]] = UNSET, + timeout_hours: Optional[int] = 24, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + ) -> Optional[models.BatchJobOut]: + r"""Create Batch Job + + Create a new batch job, it will be queued for processing. + + :param input_files: + :param endpoint: + :param model: + :param metadata: + :param timeout_hours: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + + request = models.BatchJobIn( + input_files=input_files, + endpoint=endpoint, + model=model, + metadata=metadata, + timeout_hours=timeout_hours, + ) + + req = self.build_request_async( + method="POST", + path="/v1/batch/jobs", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.BatchJobIn + ), + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + operation_id="jobs_api_routes_batch_create_batch_job", + oauth2_scopes=[], + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["4XX", "5XX"], + retry_config=retry_config, + ) + + if utils.match_response(http_res, "200", "application/json"): + return utils.unmarshal_json(http_res.text, Optional[models.BatchJobOut]) + if utils.match_response(http_res, ["4XX", "5XX"], "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + + content_type = http_res.headers.get("Content-Type") + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res_text, + http_res, + ) + + def get( + self, + *, + job_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + ) -> Optional[models.BatchJobOut]: + r"""Get Batch Job + + Get a batch job details by its UUID. + + :param job_id: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + + request = models.JobsAPIRoutesBatchGetBatchJobRequest( + job_id=job_id, + ) + + req = self.build_request( + method="GET", + path="/v1/batch/jobs/{job_id}", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + security=self.sdk_configuration.security, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + operation_id="jobs_api_routes_batch_get_batch_job", + oauth2_scopes=[], + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["4XX", "5XX"], + retry_config=retry_config, + ) + + if utils.match_response(http_res, "200", "application/json"): + return utils.unmarshal_json(http_res.text, Optional[models.BatchJobOut]) + if utils.match_response(http_res, ["4XX", "5XX"], "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + + content_type = http_res.headers.get("Content-Type") + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res_text, + http_res, + ) + + async def get_async( + self, + *, + job_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + ) -> Optional[models.BatchJobOut]: + r"""Get Batch Job + + Get a batch job details by its UUID. + + :param job_id: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + + request = models.JobsAPIRoutesBatchGetBatchJobRequest( + job_id=job_id, + ) + + req = self.build_request_async( + method="GET", + path="/v1/batch/jobs/{job_id}", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + security=self.sdk_configuration.security, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + operation_id="jobs_api_routes_batch_get_batch_job", + oauth2_scopes=[], + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["4XX", "5XX"], + retry_config=retry_config, + ) + + if utils.match_response(http_res, "200", "application/json"): + return utils.unmarshal_json(http_res.text, Optional[models.BatchJobOut]) + if utils.match_response(http_res, ["4XX", "5XX"], "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + + content_type = http_res.headers.get("Content-Type") + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res_text, + http_res, + ) + + def cancel( + self, + *, + job_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + ) -> Optional[models.BatchJobOut]: + r"""Cancel Batch Job + + Request the cancellation of a batch job. + + :param job_id: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + + request = models.JobsAPIRoutesBatchCancelBatchJobRequest( + job_id=job_id, + ) + + req = self.build_request( + method="POST", + path="/v1/batch/jobs/{job_id}/cancel", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + security=self.sdk_configuration.security, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + operation_id="jobs_api_routes_batch_cancel_batch_job", + oauth2_scopes=[], + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["4XX", "5XX"], + retry_config=retry_config, + ) + + if utils.match_response(http_res, "200", "application/json"): + return utils.unmarshal_json(http_res.text, Optional[models.BatchJobOut]) + if utils.match_response(http_res, ["4XX", "5XX"], "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + + content_type = http_res.headers.get("Content-Type") + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res_text, + http_res, + ) + + async def cancel_async( + self, + *, + job_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + ) -> Optional[models.BatchJobOut]: + r"""Cancel Batch Job + + Request the cancellation of a batch job. + + :param job_id: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + + request = models.JobsAPIRoutesBatchCancelBatchJobRequest( + job_id=job_id, + ) + + req = self.build_request_async( + method="POST", + path="/v1/batch/jobs/{job_id}/cancel", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + security=self.sdk_configuration.security, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + operation_id="jobs_api_routes_batch_cancel_batch_job", + oauth2_scopes=[], + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["4XX", "5XX"], + retry_config=retry_config, + ) + + if utils.match_response(http_res, "200", "application/json"): + return utils.unmarshal_json(http_res.text, Optional[models.BatchJobOut]) + if utils.match_response(http_res, ["4XX", "5XX"], "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + + content_type = http_res.headers.get("Content-Type") + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res_text, + http_res, + ) diff --git a/src/mistralai/models/__init__.py b/src/mistralai/models/__init__.py index 8b7f1a22..42d2c66a 100644 --- a/src/mistralai/models/__init__.py +++ b/src/mistralai/models/__init__.py @@ -20,6 +20,7 @@ AgentsCompletionStreamRequestToolChoiceTypedDict, AgentsCompletionStreamRequestTypedDict, ) +from .apiendpoint import APIEndpoint from .archiveftmodelout import ( ArchiveFTModelOut, ArchiveFTModelOutObject, @@ -27,10 +28,27 @@ ) from .assistantmessage import ( AssistantMessage, + AssistantMessageContent, + AssistantMessageContentTypedDict, AssistantMessageRole, AssistantMessageTypedDict, ) -from .basemodelcard import BaseModelCard, BaseModelCardTypedDict +from .basemodelcard import BaseModelCard, BaseModelCardTypedDict, Type +from .batcherror import BatchError, BatchErrorTypedDict +from .batchjobin import BatchJobIn, BatchJobInTypedDict +from .batchjobout import BatchJobOut, BatchJobOutObject, BatchJobOutTypedDict +from .batchjobsout import BatchJobsOut, BatchJobsOutObject, BatchJobsOutTypedDict +from .batchjobstatus import BatchJobStatus +from .chatclassificationrequest import ( + ChatClassificationRequest, + ChatClassificationRequestInputs, + ChatClassificationRequestInputsTypedDict, + ChatClassificationRequestTypedDict, + One, + OneTypedDict, + Two, + TwoTypedDict, +) from .chatcompletionchoice import ( ChatCompletionChoice, ChatCompletionChoiceTypedDict, @@ -61,6 +79,17 @@ ChatCompletionStreamRequestTypedDict, ) from .checkpointout import CheckpointOut, CheckpointOutTypedDict +from .classificationobject import ClassificationObject, ClassificationObjectTypedDict +from .classificationrequest import ( + ClassificationRequest, + ClassificationRequestInputs, + ClassificationRequestInputsTypedDict, + ClassificationRequestTypedDict, +) +from .classificationresponse import ( + ClassificationResponse, + ClassificationResponseTypedDict, +) from .completionchunk import CompletionChunk, CompletionChunkTypedDict from .completionevent import CompletionEvent, CompletionEventTypedDict from .completionresponsestreamchoice import ( @@ -75,7 +104,7 @@ ) from .deletefileout import DeleteFileOut, DeleteFileOutTypedDict from .deletemodelout import DeleteModelOut, DeleteModelOutTypedDict -from .deltamessage import DeltaMessage, DeltaMessageTypedDict +from .deltamessage import Content, ContentTypedDict, DeltaMessage, DeltaMessageTypedDict from .detailedjobout import ( DetailedJobOut, DetailedJobOutIntegrations, @@ -95,10 +124,19 @@ from .embeddingresponse import EmbeddingResponse, EmbeddingResponseTypedDict from .embeddingresponsedata import EmbeddingResponseData, EmbeddingResponseDataTypedDict from .eventout import EventOut, EventOutTypedDict +from .filepurpose import FilePurpose from .files_api_routes_delete_fileop import ( FilesAPIRoutesDeleteFileRequest, FilesAPIRoutesDeleteFileRequestTypedDict, ) +from .files_api_routes_download_fileop import ( + FilesAPIRoutesDownloadFileRequest, + FilesAPIRoutesDownloadFileRequestTypedDict, +) +from .files_api_routes_list_filesop import ( + FilesAPIRoutesListFilesRequest, + FilesAPIRoutesListFilesRequestTypedDict, +) from .files_api_routes_retrieve_fileop import ( FilesAPIRoutesRetrieveFileRequest, FilesAPIRoutesRetrieveFileRequestTypedDict, @@ -108,9 +146,8 @@ FileTypedDict, FilesAPIRoutesUploadFileMultiPartBodyParams, FilesAPIRoutesUploadFileMultiPartBodyParamsTypedDict, - FilesAPIRoutesUploadFilePurpose, ) -from .fileschema import FileSchema, FileSchemaPurpose, FileSchemaTypedDict +from .fileschema import FileSchema, FileSchemaTypedDict from .fimcompletionrequest import ( FIMCompletionRequest, FIMCompletionRequestStop, @@ -129,7 +166,7 @@ FTModelCapabilitiesOut, FTModelCapabilitiesOutTypedDict, ) -from .ftmodelcard import FTModelCard, FTModelCardTypedDict +from .ftmodelcard import FTModelCard, FTModelCardType, FTModelCardTypedDict from .ftmodelout import FTModelOut, FTModelOutObject, FTModelOutTypedDict from .function import Function, FunctionTypedDict from .functioncall import ( @@ -177,6 +214,18 @@ RepositoriesTypedDict, Status, ) +from .jobs_api_routes_batch_cancel_batch_jobop import ( + JobsAPIRoutesBatchCancelBatchJobRequest, + JobsAPIRoutesBatchCancelBatchJobRequestTypedDict, +) +from .jobs_api_routes_batch_get_batch_jobop import ( + JobsAPIRoutesBatchGetBatchJobRequest, + JobsAPIRoutesBatchGetBatchJobRequestTypedDict, +) +from .jobs_api_routes_batch_get_batch_jobsop import ( + JobsAPIRoutesBatchGetBatchJobsRequest, + JobsAPIRoutesBatchGetBatchJobsRequestTypedDict, +) from .jobs_api_routes_fine_tuning_archive_fine_tuned_modelop import ( JobsAPIRoutesFineTuningArchiveFineTunedModelRequest, JobsAPIRoutesFineTuningArchiveFineTunedModelRequestTypedDict, @@ -228,20 +277,16 @@ RetrieveModelV1ModelsModelIDGetResponseRetrieveModelV1ModelsModelIDGet, RetrieveModelV1ModelsModelIDGetResponseRetrieveModelV1ModelsModelIDGetTypedDict, ) -from .retrievefileout import ( - RetrieveFileOut, - RetrieveFileOutPurpose, - RetrieveFileOutTypedDict, -) +from .retrievefileout import RetrieveFileOut, RetrieveFileOutTypedDict from .sampletype import SampleType from .sdkerror import SDKError from .security import Security, SecurityTypedDict from .source import Source from .systemmessage import ( - Content, - ContentTypedDict, Role, SystemMessage, + SystemMessageContent, + SystemMessageContentTypedDict, SystemMessageTypedDict, ) from .textchunk import TextChunk, TextChunkType, TextChunkTypedDict @@ -260,7 +305,7 @@ UnarchiveFTModelOutTypedDict, ) from .updateftmodelin import UpdateFTModelIn, UpdateFTModelInTypedDict -from .uploadfileout import Purpose, UploadFileOut, UploadFileOutTypedDict +from .uploadfileout import UploadFileOut, UploadFileOutTypedDict from .usageinfo import UsageInfo, UsageInfoTypedDict from .usermessage import ( UserMessage, @@ -280,9 +325,14 @@ WandbIntegrationType, WandbIntegrationTypedDict, ) -from .wandbintegrationout import Type, WandbIntegrationOut, WandbIntegrationOutTypedDict +from .wandbintegrationout import ( + WandbIntegrationOut, + WandbIntegrationOutType, + WandbIntegrationOutTypedDict, +) __all__ = [ + "APIEndpoint", "AgentsCompletionRequest", "AgentsCompletionRequestMessages", "AgentsCompletionRequestMessagesTypedDict", @@ -305,10 +355,27 @@ "Arguments", "ArgumentsTypedDict", "AssistantMessage", + "AssistantMessageContent", + "AssistantMessageContentTypedDict", "AssistantMessageRole", "AssistantMessageTypedDict", "BaseModelCard", "BaseModelCardTypedDict", + "BatchError", + "BatchErrorTypedDict", + "BatchJobIn", + "BatchJobInTypedDict", + "BatchJobOut", + "BatchJobOutObject", + "BatchJobOutTypedDict", + "BatchJobStatus", + "BatchJobsOut", + "BatchJobsOutObject", + "BatchJobsOutTypedDict", + "ChatClassificationRequest", + "ChatClassificationRequestInputs", + "ChatClassificationRequestInputsTypedDict", + "ChatClassificationRequestTypedDict", "ChatCompletionChoice", "ChatCompletionChoiceTypedDict", "ChatCompletionRequest", @@ -327,6 +394,14 @@ "ChatCompletionStreamRequestTypedDict", "CheckpointOut", "CheckpointOutTypedDict", + "ClassificationObject", + "ClassificationObjectTypedDict", + "ClassificationRequest", + "ClassificationRequestInputs", + "ClassificationRequestInputsTypedDict", + "ClassificationRequestTypedDict", + "ClassificationResponse", + "ClassificationResponseTypedDict", "CompletionChunk", "CompletionChunkTypedDict", "CompletionEvent", @@ -377,22 +452,26 @@ "FTModelCapabilitiesOut", "FTModelCapabilitiesOutTypedDict", "FTModelCard", + "FTModelCardType", "FTModelCardTypedDict", "FTModelOut", "FTModelOutObject", "FTModelOutTypedDict", "File", + "FilePurpose", "FileSchema", - "FileSchemaPurpose", "FileSchemaTypedDict", "FileTypedDict", "FilesAPIRoutesDeleteFileRequest", "FilesAPIRoutesDeleteFileRequestTypedDict", + "FilesAPIRoutesDownloadFileRequest", + "FilesAPIRoutesDownloadFileRequestTypedDict", + "FilesAPIRoutesListFilesRequest", + "FilesAPIRoutesListFilesRequestTypedDict", "FilesAPIRoutesRetrieveFileRequest", "FilesAPIRoutesRetrieveFileRequestTypedDict", "FilesAPIRoutesUploadFileMultiPartBodyParams", "FilesAPIRoutesUploadFileMultiPartBodyParamsTypedDict", - "FilesAPIRoutesUploadFilePurpose", "FineTuneableModel", "FinishReason", "Function", @@ -430,6 +509,12 @@ "JobMetadataOutTypedDict", "JobOut", "JobOutTypedDict", + "JobsAPIRoutesBatchCancelBatchJobRequest", + "JobsAPIRoutesBatchCancelBatchJobRequestTypedDict", + "JobsAPIRoutesBatchGetBatchJobRequest", + "JobsAPIRoutesBatchGetBatchJobRequestTypedDict", + "JobsAPIRoutesBatchGetBatchJobsRequest", + "JobsAPIRoutesBatchGetBatchJobsRequestTypedDict", "JobsAPIRoutesFineTuningArchiveFineTunedModelRequest", "JobsAPIRoutesFineTuningArchiveFineTunedModelRequestTypedDict", "JobsAPIRoutesFineTuningCancelFineTuningJobRequest", @@ -465,7 +550,8 @@ "ModelList", "ModelListTypedDict", "Object", - "Purpose", + "One", + "OneTypedDict", "QueryParamStatus", "Repositories", "RepositoriesTypedDict", @@ -473,7 +559,6 @@ "ResponseFormatTypedDict", "ResponseFormats", "RetrieveFileOut", - "RetrieveFileOutPurpose", "RetrieveFileOutTypedDict", "RetrieveModelV1ModelsModelIDGetRequest", "RetrieveModelV1ModelsModelIDGetRequestTypedDict", @@ -489,6 +574,8 @@ "Stop", "StopTypedDict", "SystemMessage", + "SystemMessageContent", + "SystemMessageContentTypedDict", "SystemMessageTypedDict", "TextChunk", "TextChunkType", @@ -510,6 +597,8 @@ "TrainingParametersIn", "TrainingParametersInTypedDict", "TrainingParametersTypedDict", + "Two", + "TwoTypedDict", "Type", "UnarchiveFTModelOut", "UnarchiveFTModelOutObject", @@ -529,6 +618,7 @@ "ValidationErrorTypedDict", "WandbIntegration", "WandbIntegrationOut", + "WandbIntegrationOutType", "WandbIntegrationOutTypedDict", "WandbIntegrationType", "WandbIntegrationTypedDict", diff --git a/src/mistralai/models/agentscompletionrequest.py b/src/mistralai/models/agentscompletionrequest.py index 1f0523a6..99d074d7 100644 --- a/src/mistralai/models/agentscompletionrequest.py +++ b/src/mistralai/models/agentscompletionrequest.py @@ -3,6 +3,7 @@ from __future__ import annotations from .assistantmessage import AssistantMessage, AssistantMessageTypedDict from .responseformat import ResponseFormat, ResponseFormatTypedDict +from .systemmessage import SystemMessage, SystemMessageTypedDict from .tool import Tool, ToolTypedDict from .toolchoice import ToolChoice, ToolChoiceTypedDict from .toolchoiceenum import ToolChoiceEnum @@ -11,8 +12,8 @@ from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL from mistralai.utils import get_discriminator from pydantic import Discriminator, Tag, model_serializer -from typing import List, Optional, TypedDict, Union -from typing_extensions import Annotated, NotRequired +from typing import List, Optional, Union +from typing_extensions import Annotated, NotRequired, TypedDict AgentsCompletionRequestStopTypedDict = Union[str, List[str]] @@ -24,13 +25,17 @@ AgentsCompletionRequestMessagesTypedDict = Union[ - UserMessageTypedDict, AssistantMessageTypedDict, ToolMessageTypedDict + SystemMessageTypedDict, + UserMessageTypedDict, + AssistantMessageTypedDict, + ToolMessageTypedDict, ] AgentsCompletionRequestMessages = Annotated[ Union[ Annotated[AssistantMessage, Tag("assistant")], + Annotated[SystemMessage, Tag("system")], Annotated[ToolMessage, Tag("tool")], Annotated[UserMessage, Tag("user")], ], @@ -51,8 +56,6 @@ class AgentsCompletionRequestTypedDict(TypedDict): r"""The ID of the agent to use for this completion.""" max_tokens: NotRequired[Nullable[int]] r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" - min_tokens: NotRequired[Nullable[int]] - r"""The minimum number of tokens to generate in the completion.""" stream: NotRequired[bool] r"""Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON.""" stop: NotRequired[AgentsCompletionRequestStopTypedDict] @@ -62,6 +65,12 @@ class AgentsCompletionRequestTypedDict(TypedDict): response_format: NotRequired[ResponseFormatTypedDict] tools: NotRequired[Nullable[List[ToolTypedDict]]] tool_choice: NotRequired[AgentsCompletionRequestToolChoiceTypedDict] + presence_penalty: NotRequired[float] + r"""presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative.""" + frequency_penalty: NotRequired[float] + r"""frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.""" + n: NotRequired[Nullable[int]] + r"""Number of completions to return for each request, input tokens are only billed once.""" class AgentsCompletionRequest(BaseModel): @@ -74,9 +83,6 @@ class AgentsCompletionRequest(BaseModel): max_tokens: OptionalNullable[int] = UNSET r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" - min_tokens: OptionalNullable[int] = UNSET - r"""The minimum number of tokens to generate in the completion.""" - stream: Optional[bool] = False r"""Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON.""" @@ -92,19 +98,30 @@ class AgentsCompletionRequest(BaseModel): tool_choice: Optional[AgentsCompletionRequestToolChoice] = None + presence_penalty: Optional[float] = 0 + r"""presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative.""" + + frequency_penalty: Optional[float] = 0 + r"""frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.""" + + n: OptionalNullable[int] = UNSET + r"""Number of completions to return for each request, input tokens are only billed once.""" + @model_serializer(mode="wrap") def serialize_model(self, handler): optional_fields = [ "max_tokens", - "min_tokens", "stream", "stop", "random_seed", "response_format", "tools", "tool_choice", + "presence_penalty", + "frequency_penalty", + "n", ] - nullable_fields = ["max_tokens", "min_tokens", "random_seed", "tools"] + nullable_fields = ["max_tokens", "random_seed", "tools", "n"] null_default_fields = [] serialized = handler(self) diff --git a/src/mistralai/models/agentscompletionstreamrequest.py b/src/mistralai/models/agentscompletionstreamrequest.py index 57d1177c..4e1757ac 100644 --- a/src/mistralai/models/agentscompletionstreamrequest.py +++ b/src/mistralai/models/agentscompletionstreamrequest.py @@ -3,6 +3,7 @@ from __future__ import annotations from .assistantmessage import AssistantMessage, AssistantMessageTypedDict from .responseformat import ResponseFormat, ResponseFormatTypedDict +from .systemmessage import SystemMessage, SystemMessageTypedDict from .tool import Tool, ToolTypedDict from .toolchoice import ToolChoice, ToolChoiceTypedDict from .toolchoiceenum import ToolChoiceEnum @@ -11,8 +12,8 @@ from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL from mistralai.utils import get_discriminator from pydantic import Discriminator, Tag, model_serializer -from typing import List, Optional, TypedDict, Union -from typing_extensions import Annotated, NotRequired +from typing import List, Optional, Union +from typing_extensions import Annotated, NotRequired, TypedDict AgentsCompletionStreamRequestStopTypedDict = Union[str, List[str]] @@ -24,13 +25,17 @@ AgentsCompletionStreamRequestMessagesTypedDict = Union[ - UserMessageTypedDict, AssistantMessageTypedDict, ToolMessageTypedDict + SystemMessageTypedDict, + UserMessageTypedDict, + AssistantMessageTypedDict, + ToolMessageTypedDict, ] AgentsCompletionStreamRequestMessages = Annotated[ Union[ Annotated[AssistantMessage, Tag("assistant")], + Annotated[SystemMessage, Tag("system")], Annotated[ToolMessage, Tag("tool")], Annotated[UserMessage, Tag("user")], ], @@ -53,8 +58,6 @@ class AgentsCompletionStreamRequestTypedDict(TypedDict): r"""The ID of the agent to use for this completion.""" max_tokens: NotRequired[Nullable[int]] r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" - min_tokens: NotRequired[Nullable[int]] - r"""The minimum number of tokens to generate in the completion.""" stream: NotRequired[bool] stop: NotRequired[AgentsCompletionStreamRequestStopTypedDict] r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" @@ -63,6 +66,12 @@ class AgentsCompletionStreamRequestTypedDict(TypedDict): response_format: NotRequired[ResponseFormatTypedDict] tools: NotRequired[Nullable[List[ToolTypedDict]]] tool_choice: NotRequired[AgentsCompletionStreamRequestToolChoiceTypedDict] + presence_penalty: NotRequired[float] + r"""presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative.""" + frequency_penalty: NotRequired[float] + r"""frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.""" + n: NotRequired[Nullable[int]] + r"""Number of completions to return for each request, input tokens are only billed once.""" class AgentsCompletionStreamRequest(BaseModel): @@ -75,9 +84,6 @@ class AgentsCompletionStreamRequest(BaseModel): max_tokens: OptionalNullable[int] = UNSET r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" - min_tokens: OptionalNullable[int] = UNSET - r"""The minimum number of tokens to generate in the completion.""" - stream: Optional[bool] = True stop: Optional[AgentsCompletionStreamRequestStop] = None @@ -92,19 +98,30 @@ class AgentsCompletionStreamRequest(BaseModel): tool_choice: Optional[AgentsCompletionStreamRequestToolChoice] = None + presence_penalty: Optional[float] = 0 + r"""presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative.""" + + frequency_penalty: Optional[float] = 0 + r"""frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.""" + + n: OptionalNullable[int] = UNSET + r"""Number of completions to return for each request, input tokens are only billed once.""" + @model_serializer(mode="wrap") def serialize_model(self, handler): optional_fields = [ "max_tokens", - "min_tokens", "stream", "stop", "random_seed", "response_format", "tools", "tool_choice", + "presence_penalty", + "frequency_penalty", + "n", ] - nullable_fields = ["max_tokens", "min_tokens", "random_seed", "tools"] + nullable_fields = ["max_tokens", "random_seed", "tools", "n"] null_default_fields = [] serialized = handler(self) diff --git a/src/mistralai/models/apiendpoint.py b/src/mistralai/models/apiendpoint.py new file mode 100644 index 00000000..00621eba --- /dev/null +++ b/src/mistralai/models/apiendpoint.py @@ -0,0 +1,9 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from typing import Literal + + +APIEndpoint = Literal[ + "/v1/chat/completions", "/v1/embeddings", "/v1/fim/completions", "/v1/moderations" +] diff --git a/src/mistralai/models/archiveftmodelout.py b/src/mistralai/models/archiveftmodelout.py index eeffa5d2..e78e98c4 100644 --- a/src/mistralai/models/archiveftmodelout.py +++ b/src/mistralai/models/archiveftmodelout.py @@ -2,9 +2,11 @@ from __future__ import annotations from mistralai.types import BaseModel +from mistralai.utils import validate_const import pydantic -from typing import Final, Literal, Optional, TypedDict -from typing_extensions import Annotated, NotRequired +from pydantic.functional_validators import AfterValidator +from typing import Literal, Optional +from typing_extensions import Annotated, NotRequired, TypedDict ArchiveFTModelOutObject = Literal["model"] @@ -12,14 +14,18 @@ class ArchiveFTModelOutTypedDict(TypedDict): id: str + object: ArchiveFTModelOutObject archived: NotRequired[bool] class ArchiveFTModelOut(BaseModel): id: str - # fmt: off - OBJECT: Annotated[Final[Optional[ArchiveFTModelOutObject]], pydantic.Field(alias="object")] = "model" # type: ignore - # fmt: on + OBJECT: Annotated[ + Annotated[ + Optional[ArchiveFTModelOutObject], AfterValidator(validate_const("model")) + ], + pydantic.Field(alias="object"), + ] = "model" archived: Optional[bool] = True diff --git a/src/mistralai/models/assistantmessage.py b/src/mistralai/models/assistantmessage.py index 92af66a3..d7b929bf 100644 --- a/src/mistralai/models/assistantmessage.py +++ b/src/mistralai/models/assistantmessage.py @@ -1,31 +1,36 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from __future__ import annotations +from .contentchunk import ContentChunk, ContentChunkTypedDict from .toolcall import ToolCall, ToolCallTypedDict from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL from pydantic import model_serializer -from typing import List, Literal, Optional, TypedDict -from typing_extensions import NotRequired +from typing import List, Literal, Optional, Union +from typing_extensions import NotRequired, TypedDict + + +AssistantMessageContentTypedDict = Union[str, List[ContentChunkTypedDict]] + + +AssistantMessageContent = Union[str, List[ContentChunk]] AssistantMessageRole = Literal["assistant"] class AssistantMessageTypedDict(TypedDict): - content: NotRequired[Nullable[str]] + content: NotRequired[Nullable[AssistantMessageContentTypedDict]] tool_calls: NotRequired[Nullable[List[ToolCallTypedDict]]] prefix: NotRequired[bool] - r"""Set this to `true` when adding an assistant message as prefix to condition the model response. The role of the prefix message is to force the model to start its answer by the content of the message.""" role: NotRequired[AssistantMessageRole] class AssistantMessage(BaseModel): - content: OptionalNullable[str] = UNSET + content: OptionalNullable[AssistantMessageContent] = UNSET tool_calls: OptionalNullable[List[ToolCall]] = UNSET prefix: Optional[bool] = False - r"""Set this to `true` when adding an assistant message as prefix to condition the model response. The role of the prefix message is to force the model to start its answer by the content of the message.""" role: Optional[AssistantMessageRole] = "assistant" diff --git a/src/mistralai/models/basemodelcard.py b/src/mistralai/models/basemodelcard.py index 85af1f11..edb81741 100644 --- a/src/mistralai/models/basemodelcard.py +++ b/src/mistralai/models/basemodelcard.py @@ -4,10 +4,15 @@ from .modelcapabilities import ModelCapabilities, ModelCapabilitiesTypedDict from datetime import datetime from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +from mistralai.utils import validate_const import pydantic from pydantic import model_serializer -from typing import Final, List, Optional, TypedDict -from typing_extensions import Annotated, NotRequired +from pydantic.functional_validators import AfterValidator +from typing import List, Literal, Optional +from typing_extensions import Annotated, NotRequired, TypedDict + + +Type = Literal["base"] class BaseModelCardTypedDict(TypedDict): @@ -21,6 +26,8 @@ class BaseModelCardTypedDict(TypedDict): max_context_length: NotRequired[int] aliases: NotRequired[List[str]] deprecation: NotRequired[Nullable[datetime]] + default_model_temperature: NotRequired[Nullable[float]] + type: Type class BaseModelCard(BaseModel): @@ -44,9 +51,12 @@ class BaseModelCard(BaseModel): deprecation: OptionalNullable[datetime] = UNSET - # fmt: off - TYPE: Annotated[Final[Optional[str]], pydantic.Field(alias="type")] = "base" # type: ignore - # fmt: on + default_model_temperature: OptionalNullable[float] = UNSET + + TYPE: Annotated[ + Annotated[Optional[Type], AfterValidator(validate_const("base"))], + pydantic.Field(alias="type"), + ] = "base" @model_serializer(mode="wrap") def serialize_model(self, handler): @@ -59,9 +69,15 @@ def serialize_model(self, handler): "max_context_length", "aliases", "deprecation", + "default_model_temperature", "type", ] - nullable_fields = ["name", "description", "deprecation"] + nullable_fields = [ + "name", + "description", + "deprecation", + "default_model_temperature", + ] null_default_fields = [] serialized = handler(self) diff --git a/src/mistralai/models/batcherror.py b/src/mistralai/models/batcherror.py new file mode 100644 index 00000000..4f823446 --- /dev/null +++ b/src/mistralai/models/batcherror.py @@ -0,0 +1,17 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.types import BaseModel +from typing import Optional +from typing_extensions import NotRequired, TypedDict + + +class BatchErrorTypedDict(TypedDict): + message: str + count: NotRequired[int] + + +class BatchError(BaseModel): + message: str + + count: Optional[int] = 1 diff --git a/src/mistralai/models/batchjobin.py b/src/mistralai/models/batchjobin.py new file mode 100644 index 00000000..20f054b8 --- /dev/null +++ b/src/mistralai/models/batchjobin.py @@ -0,0 +1,58 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .apiendpoint import APIEndpoint +from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +from pydantic import model_serializer +from typing import Dict, List, Optional +from typing_extensions import NotRequired, TypedDict + + +class BatchJobInTypedDict(TypedDict): + input_files: List[str] + endpoint: APIEndpoint + model: str + metadata: NotRequired[Nullable[Dict[str, str]]] + timeout_hours: NotRequired[int] + + +class BatchJobIn(BaseModel): + input_files: List[str] + + endpoint: APIEndpoint + + model: str + + metadata: OptionalNullable[Dict[str, str]] = UNSET + + timeout_hours: Optional[int] = 24 + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["metadata", "timeout_hours"] + nullable_fields = ["metadata"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in self.model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/models/batchjobout.py b/src/mistralai/models/batchjobout.py new file mode 100644 index 00000000..677284f2 --- /dev/null +++ b/src/mistralai/models/batchjobout.py @@ -0,0 +1,117 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .batcherror import BatchError, BatchErrorTypedDict +from .batchjobstatus import BatchJobStatus +from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +from mistralai.utils import validate_const +import pydantic +from pydantic import model_serializer +from pydantic.functional_validators import AfterValidator +from typing import Any, Dict, List, Literal, Optional +from typing_extensions import Annotated, NotRequired, TypedDict + + +BatchJobOutObject = Literal["batch"] + + +class BatchJobOutTypedDict(TypedDict): + id: str + input_files: List[str] + endpoint: str + model: str + errors: List[BatchErrorTypedDict] + status: BatchJobStatus + created_at: int + total_requests: int + completed_requests: int + succeeded_requests: int + failed_requests: int + object: BatchJobOutObject + metadata: NotRequired[Nullable[Dict[str, Any]]] + output_file: NotRequired[Nullable[str]] + error_file: NotRequired[Nullable[str]] + started_at: NotRequired[Nullable[int]] + completed_at: NotRequired[Nullable[int]] + + +class BatchJobOut(BaseModel): + id: str + + input_files: List[str] + + endpoint: str + + model: str + + errors: List[BatchError] + + status: BatchJobStatus + + created_at: int + + total_requests: int + + completed_requests: int + + succeeded_requests: int + + failed_requests: int + + OBJECT: Annotated[ + Annotated[Optional[BatchJobOutObject], AfterValidator(validate_const("batch"))], + pydantic.Field(alias="object"), + ] = "batch" + + metadata: OptionalNullable[Dict[str, Any]] = UNSET + + output_file: OptionalNullable[str] = UNSET + + error_file: OptionalNullable[str] = UNSET + + started_at: OptionalNullable[int] = UNSET + + completed_at: OptionalNullable[int] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = [ + "object", + "metadata", + "output_file", + "error_file", + "started_at", + "completed_at", + ] + nullable_fields = [ + "metadata", + "output_file", + "error_file", + "started_at", + "completed_at", + ] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in self.model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/models/batchjobsout.py b/src/mistralai/models/batchjobsout.py new file mode 100644 index 00000000..f8c63a33 --- /dev/null +++ b/src/mistralai/models/batchjobsout.py @@ -0,0 +1,30 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .batchjobout import BatchJobOut, BatchJobOutTypedDict +from mistralai.types import BaseModel +from mistralai.utils import validate_const +import pydantic +from pydantic.functional_validators import AfterValidator +from typing import List, Literal, Optional +from typing_extensions import Annotated, NotRequired, TypedDict + + +BatchJobsOutObject = Literal["list"] + + +class BatchJobsOutTypedDict(TypedDict): + total: int + data: NotRequired[List[BatchJobOutTypedDict]] + object: BatchJobsOutObject + + +class BatchJobsOut(BaseModel): + total: int + + data: Optional[List[BatchJobOut]] = None + + OBJECT: Annotated[ + Annotated[Optional[BatchJobsOutObject], AfterValidator(validate_const("list"))], + pydantic.Field(alias="object"), + ] = "list" diff --git a/src/mistralai/models/batchjobstatus.py b/src/mistralai/models/batchjobstatus.py new file mode 100644 index 00000000..4b28059b --- /dev/null +++ b/src/mistralai/models/batchjobstatus.py @@ -0,0 +1,15 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from typing import Literal + + +BatchJobStatus = Literal[ + "QUEUED", + "RUNNING", + "SUCCESS", + "FAILED", + "TIMEOUT_EXCEEDED", + "CANCELLATION_REQUESTED", + "CANCELLED", +] diff --git a/src/mistralai/models/chatclassificationrequest.py b/src/mistralai/models/chatclassificationrequest.py new file mode 100644 index 00000000..6b4cc136 --- /dev/null +++ b/src/mistralai/models/chatclassificationrequest.py @@ -0,0 +1,104 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .assistantmessage import AssistantMessage, AssistantMessageTypedDict +from .systemmessage import SystemMessage, SystemMessageTypedDict +from .toolmessage import ToolMessage, ToolMessageTypedDict +from .usermessage import UserMessage, UserMessageTypedDict +from mistralai.types import BaseModel, Nullable, UNSET_SENTINEL +from mistralai.utils import get_discriminator +import pydantic +from pydantic import Discriminator, Tag, model_serializer +from typing import List, Union +from typing_extensions import Annotated, TypedDict + + +TwoTypedDict = Union[ + SystemMessageTypedDict, + UserMessageTypedDict, + AssistantMessageTypedDict, + ToolMessageTypedDict, +] + + +Two = Annotated[ + Union[ + Annotated[AssistantMessage, Tag("assistant")], + Annotated[SystemMessage, Tag("system")], + Annotated[ToolMessage, Tag("tool")], + Annotated[UserMessage, Tag("user")], + ], + Discriminator(lambda m: get_discriminator(m, "role", "role")), +] + + +OneTypedDict = Union[ + SystemMessageTypedDict, + UserMessageTypedDict, + AssistantMessageTypedDict, + ToolMessageTypedDict, +] + + +One = Annotated[ + Union[ + Annotated[AssistantMessage, Tag("assistant")], + Annotated[SystemMessage, Tag("system")], + Annotated[ToolMessage, Tag("tool")], + Annotated[UserMessage, Tag("user")], + ], + Discriminator(lambda m: get_discriminator(m, "role", "role")), +] + + +ChatClassificationRequestInputsTypedDict = Union[ + List[OneTypedDict], List[List[TwoTypedDict]] +] +r"""Chat to classify""" + + +ChatClassificationRequestInputs = Union[List[One], List[List[Two]]] +r"""Chat to classify""" + + +class ChatClassificationRequestTypedDict(TypedDict): + inputs: ChatClassificationRequestInputsTypedDict + r"""Chat to classify""" + model: Nullable[str] + + +class ChatClassificationRequest(BaseModel): + inputs: Annotated[ChatClassificationRequestInputs, pydantic.Field(alias="input")] + r"""Chat to classify""" + + model: Nullable[str] + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = [] + nullable_fields = ["model"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in self.model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/models/chatcompletionchoice.py b/src/mistralai/models/chatcompletionchoice.py index 20d674bf..f4f37fb4 100644 --- a/src/mistralai/models/chatcompletionchoice.py +++ b/src/mistralai/models/chatcompletionchoice.py @@ -2,11 +2,16 @@ from __future__ import annotations from .assistantmessage import AssistantMessage, AssistantMessageTypedDict -from mistralai.types import BaseModel -from typing import Literal, TypedDict +from mistralai.types import BaseModel, UnrecognizedStr +from mistralai.utils import validate_open_enum +from pydantic.functional_validators import PlainValidator +from typing import Literal, Union +from typing_extensions import Annotated, TypedDict -FinishReason = Literal["stop", "length", "model_length", "error", "tool_calls"] +FinishReason = Union[ + Literal["stop", "length", "model_length", "error", "tool_calls"], UnrecognizedStr +] class ChatCompletionChoiceTypedDict(TypedDict): @@ -20,4 +25,4 @@ class ChatCompletionChoice(BaseModel): message: AssistantMessage - finish_reason: FinishReason + finish_reason: Annotated[FinishReason, PlainValidator(validate_open_enum(False))] diff --git a/src/mistralai/models/chatcompletionrequest.py b/src/mistralai/models/chatcompletionrequest.py index 78722167..6cdf97bf 100644 --- a/src/mistralai/models/chatcompletionrequest.py +++ b/src/mistralai/models/chatcompletionrequest.py @@ -12,8 +12,8 @@ from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL from mistralai.utils import get_discriminator from pydantic import Discriminator, Tag, model_serializer -from typing import List, Optional, TypedDict, Union -from typing_extensions import Annotated, NotRequired +from typing import List, Optional, Union +from typing_extensions import Annotated, NotRequired, TypedDict StopTypedDict = Union[str, List[str]] @@ -54,14 +54,12 @@ class ChatCompletionRequestTypedDict(TypedDict): r"""ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions.""" messages: List[MessagesTypedDict] r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content.""" - temperature: NotRequired[float] - r"""What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both.""" + temperature: NotRequired[Nullable[float]] + r"""What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value.""" top_p: NotRequired[float] r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.""" max_tokens: NotRequired[Nullable[int]] r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" - min_tokens: NotRequired[Nullable[int]] - r"""The minimum number of tokens to generate in the completion.""" stream: NotRequired[bool] r"""Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON.""" stop: NotRequired[StopTypedDict] @@ -71,6 +69,12 @@ class ChatCompletionRequestTypedDict(TypedDict): response_format: NotRequired[ResponseFormatTypedDict] tools: NotRequired[Nullable[List[ToolTypedDict]]] tool_choice: NotRequired[ChatCompletionRequestToolChoiceTypedDict] + presence_penalty: NotRequired[float] + r"""presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative.""" + frequency_penalty: NotRequired[float] + r"""frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.""" + n: NotRequired[Nullable[int]] + r"""Number of completions to return for each request, input tokens are only billed once.""" safe_prompt: NotRequired[bool] r"""Whether to inject a safety prompt before all conversations.""" @@ -82,8 +86,8 @@ class ChatCompletionRequest(BaseModel): messages: List[Messages] r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content.""" - temperature: Optional[float] = 0.7 - r"""What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both.""" + temperature: OptionalNullable[float] = UNSET + r"""What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value.""" top_p: Optional[float] = 1 r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.""" @@ -91,9 +95,6 @@ class ChatCompletionRequest(BaseModel): max_tokens: OptionalNullable[int] = UNSET r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" - min_tokens: OptionalNullable[int] = UNSET - r"""The minimum number of tokens to generate in the completion.""" - stream: Optional[bool] = False r"""Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON.""" @@ -109,6 +110,15 @@ class ChatCompletionRequest(BaseModel): tool_choice: Optional[ChatCompletionRequestToolChoice] = None + presence_penalty: Optional[float] = 0 + r"""presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative.""" + + frequency_penalty: Optional[float] = 0 + r"""frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.""" + + n: OptionalNullable[int] = UNSET + r"""Number of completions to return for each request, input tokens are only billed once.""" + safe_prompt: Optional[bool] = False r"""Whether to inject a safety prompt before all conversations.""" @@ -118,16 +128,25 @@ def serialize_model(self, handler): "temperature", "top_p", "max_tokens", - "min_tokens", "stream", "stop", "random_seed", "response_format", "tools", "tool_choice", + "presence_penalty", + "frequency_penalty", + "n", "safe_prompt", ] - nullable_fields = ["model", "max_tokens", "min_tokens", "random_seed", "tools"] + nullable_fields = [ + "model", + "temperature", + "max_tokens", + "random_seed", + "tools", + "n", + ] null_default_fields = [] serialized = handler(self) diff --git a/src/mistralai/models/chatcompletionresponse.py b/src/mistralai/models/chatcompletionresponse.py index 20c9010f..67f19651 100644 --- a/src/mistralai/models/chatcompletionresponse.py +++ b/src/mistralai/models/chatcompletionresponse.py @@ -4,8 +4,8 @@ from .chatcompletionchoice import ChatCompletionChoice, ChatCompletionChoiceTypedDict from .usageinfo import UsageInfo, UsageInfoTypedDict from mistralai.types import BaseModel -from typing import List, Optional, TypedDict -from typing_extensions import NotRequired +from typing import List, Optional +from typing_extensions import NotRequired, TypedDict class ChatCompletionResponseTypedDict(TypedDict): diff --git a/src/mistralai/models/chatcompletionstreamrequest.py b/src/mistralai/models/chatcompletionstreamrequest.py index ccba04af..c56f5230 100644 --- a/src/mistralai/models/chatcompletionstreamrequest.py +++ b/src/mistralai/models/chatcompletionstreamrequest.py @@ -12,8 +12,8 @@ from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL from mistralai.utils import get_discriminator from pydantic import Discriminator, Tag, model_serializer -from typing import List, Optional, TypedDict, Union -from typing_extensions import Annotated, NotRequired +from typing import List, Optional, Union +from typing_extensions import Annotated, NotRequired, TypedDict ChatCompletionStreamRequestStopTypedDict = Union[str, List[str]] @@ -56,14 +56,12 @@ class ChatCompletionStreamRequestTypedDict(TypedDict): r"""ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions.""" messages: List[ChatCompletionStreamRequestMessagesTypedDict] r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content.""" - temperature: NotRequired[float] - r"""What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both.""" + temperature: NotRequired[Nullable[float]] + r"""What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value.""" top_p: NotRequired[float] r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.""" max_tokens: NotRequired[Nullable[int]] r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" - min_tokens: NotRequired[Nullable[int]] - r"""The minimum number of tokens to generate in the completion.""" stream: NotRequired[bool] stop: NotRequired[ChatCompletionStreamRequestStopTypedDict] r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" @@ -72,6 +70,12 @@ class ChatCompletionStreamRequestTypedDict(TypedDict): response_format: NotRequired[ResponseFormatTypedDict] tools: NotRequired[Nullable[List[ToolTypedDict]]] tool_choice: NotRequired[ChatCompletionStreamRequestToolChoiceTypedDict] + presence_penalty: NotRequired[float] + r"""presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative.""" + frequency_penalty: NotRequired[float] + r"""frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.""" + n: NotRequired[Nullable[int]] + r"""Number of completions to return for each request, input tokens are only billed once.""" safe_prompt: NotRequired[bool] r"""Whether to inject a safety prompt before all conversations.""" @@ -83,8 +87,8 @@ class ChatCompletionStreamRequest(BaseModel): messages: List[ChatCompletionStreamRequestMessages] r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content.""" - temperature: Optional[float] = 0.7 - r"""What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both.""" + temperature: OptionalNullable[float] = UNSET + r"""What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value.""" top_p: Optional[float] = 1 r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.""" @@ -92,9 +96,6 @@ class ChatCompletionStreamRequest(BaseModel): max_tokens: OptionalNullable[int] = UNSET r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" - min_tokens: OptionalNullable[int] = UNSET - r"""The minimum number of tokens to generate in the completion.""" - stream: Optional[bool] = True stop: Optional[ChatCompletionStreamRequestStop] = None @@ -109,6 +110,15 @@ class ChatCompletionStreamRequest(BaseModel): tool_choice: Optional[ChatCompletionStreamRequestToolChoice] = None + presence_penalty: Optional[float] = 0 + r"""presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative.""" + + frequency_penalty: Optional[float] = 0 + r"""frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.""" + + n: OptionalNullable[int] = UNSET + r"""Number of completions to return for each request, input tokens are only billed once.""" + safe_prompt: Optional[bool] = False r"""Whether to inject a safety prompt before all conversations.""" @@ -118,16 +128,25 @@ def serialize_model(self, handler): "temperature", "top_p", "max_tokens", - "min_tokens", "stream", "stop", "random_seed", "response_format", "tools", "tool_choice", + "presence_penalty", + "frequency_penalty", + "n", "safe_prompt", ] - nullable_fields = ["model", "max_tokens", "min_tokens", "random_seed", "tools"] + nullable_fields = [ + "model", + "temperature", + "max_tokens", + "random_seed", + "tools", + "n", + ] null_default_fields = [] serialized = handler(self) diff --git a/src/mistralai/models/checkpointout.py b/src/mistralai/models/checkpointout.py index f818dae1..aefb7731 100644 --- a/src/mistralai/models/checkpointout.py +++ b/src/mistralai/models/checkpointout.py @@ -3,7 +3,7 @@ from __future__ import annotations from .metricout import MetricOut, MetricOutTypedDict from mistralai.types import BaseModel -from typing import TypedDict +from typing_extensions import TypedDict class CheckpointOutTypedDict(TypedDict): diff --git a/src/mistralai/models/classificationobject.py b/src/mistralai/models/classificationobject.py new file mode 100644 index 00000000..e4ee3624 --- /dev/null +++ b/src/mistralai/models/classificationobject.py @@ -0,0 +1,21 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.types import BaseModel +from typing import Dict, Optional +from typing_extensions import NotRequired, TypedDict + + +class ClassificationObjectTypedDict(TypedDict): + categories: NotRequired[Dict[str, bool]] + r"""Classifier result thresholded""" + category_scores: NotRequired[Dict[str, float]] + r"""Classifier result""" + + +class ClassificationObject(BaseModel): + categories: Optional[Dict[str, bool]] = None + r"""Classifier result thresholded""" + + category_scores: Optional[Dict[str, float]] = None + r"""Classifier result""" diff --git a/src/mistralai/models/classificationrequest.py b/src/mistralai/models/classificationrequest.py new file mode 100644 index 00000000..d2426c4d --- /dev/null +++ b/src/mistralai/models/classificationrequest.py @@ -0,0 +1,59 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +import pydantic +from pydantic import model_serializer +from typing import List, Union +from typing_extensions import Annotated, NotRequired, TypedDict + + +ClassificationRequestInputsTypedDict = Union[str, List[str]] +r"""Text to classify.""" + + +ClassificationRequestInputs = Union[str, List[str]] +r"""Text to classify.""" + + +class ClassificationRequestTypedDict(TypedDict): + inputs: ClassificationRequestInputsTypedDict + r"""Text to classify.""" + model: NotRequired[Nullable[str]] + + +class ClassificationRequest(BaseModel): + inputs: Annotated[ClassificationRequestInputs, pydantic.Field(alias="input")] + r"""Text to classify.""" + + model: OptionalNullable[str] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["model"] + nullable_fields = ["model"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in self.model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/models/classificationresponse.py b/src/mistralai/models/classificationresponse.py new file mode 100644 index 00000000..5716db42 --- /dev/null +++ b/src/mistralai/models/classificationresponse.py @@ -0,0 +1,21 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .classificationobject import ClassificationObject, ClassificationObjectTypedDict +from mistralai.types import BaseModel +from typing import List, Optional +from typing_extensions import NotRequired, TypedDict + + +class ClassificationResponseTypedDict(TypedDict): + id: NotRequired[str] + model: NotRequired[str] + results: NotRequired[List[ClassificationObjectTypedDict]] + + +class ClassificationResponse(BaseModel): + id: Optional[str] = None + + model: Optional[str] = None + + results: Optional[List[ClassificationObject]] = None diff --git a/src/mistralai/models/completionchunk.py b/src/mistralai/models/completionchunk.py index 8859d22f..4d1fcfbf 100644 --- a/src/mistralai/models/completionchunk.py +++ b/src/mistralai/models/completionchunk.py @@ -7,8 +7,8 @@ ) from .usageinfo import UsageInfo, UsageInfoTypedDict from mistralai.types import BaseModel -from typing import List, Optional, TypedDict -from typing_extensions import NotRequired +from typing import List, Optional +from typing_extensions import NotRequired, TypedDict class CompletionChunkTypedDict(TypedDict): diff --git a/src/mistralai/models/completionevent.py b/src/mistralai/models/completionevent.py index b0286fde..cc859910 100644 --- a/src/mistralai/models/completionevent.py +++ b/src/mistralai/models/completionevent.py @@ -3,7 +3,7 @@ from __future__ import annotations from .completionchunk import CompletionChunk, CompletionChunkTypedDict from mistralai.types import BaseModel -from typing import TypedDict +from typing_extensions import TypedDict class CompletionEventTypedDict(TypedDict): diff --git a/src/mistralai/models/completionresponsestreamchoice.py b/src/mistralai/models/completionresponsestreamchoice.py index 227a2f76..b3b2a287 100644 --- a/src/mistralai/models/completionresponsestreamchoice.py +++ b/src/mistralai/models/completionresponsestreamchoice.py @@ -2,13 +2,16 @@ from __future__ import annotations from .deltamessage import DeltaMessage, DeltaMessageTypedDict -from mistralai.types import BaseModel, Nullable, UNSET_SENTINEL +from mistralai.types import BaseModel, Nullable, UNSET_SENTINEL, UnrecognizedStr +from mistralai.utils import validate_open_enum from pydantic import model_serializer -from typing import Literal, TypedDict +from pydantic.functional_validators import PlainValidator +from typing import Literal, Union +from typing_extensions import Annotated, TypedDict -CompletionResponseStreamChoiceFinishReason = Literal[ - "stop", "length", "error", "tool_calls" +CompletionResponseStreamChoiceFinishReason = Union[ + Literal["stop", "length", "error", "tool_calls"], UnrecognizedStr ] @@ -23,7 +26,10 @@ class CompletionResponseStreamChoice(BaseModel): delta: DeltaMessage - finish_reason: Nullable[CompletionResponseStreamChoiceFinishReason] + finish_reason: Annotated[ + Nullable[CompletionResponseStreamChoiceFinishReason], + PlainValidator(validate_open_enum(False)), + ] @model_serializer(mode="wrap") def serialize_model(self, handler): diff --git a/src/mistralai/models/delete_model_v1_models_model_id_deleteop.py b/src/mistralai/models/delete_model_v1_models_model_id_deleteop.py index 2093245d..4acb8d53 100644 --- a/src/mistralai/models/delete_model_v1_models_model_id_deleteop.py +++ b/src/mistralai/models/delete_model_v1_models_model_id_deleteop.py @@ -3,8 +3,7 @@ from __future__ import annotations from mistralai.types import BaseModel from mistralai.utils import FieldMetadata, PathParamMetadata -from typing import TypedDict -from typing_extensions import Annotated +from typing_extensions import Annotated, TypedDict class DeleteModelV1ModelsModelIDDeleteRequestTypedDict(TypedDict): diff --git a/src/mistralai/models/deletefileout.py b/src/mistralai/models/deletefileout.py index dc1a87f2..2b346ec4 100644 --- a/src/mistralai/models/deletefileout.py +++ b/src/mistralai/models/deletefileout.py @@ -2,7 +2,7 @@ from __future__ import annotations from mistralai.types import BaseModel -from typing import TypedDict +from typing_extensions import TypedDict class DeleteFileOutTypedDict(TypedDict): diff --git a/src/mistralai/models/deletemodelout.py b/src/mistralai/models/deletemodelout.py index 96dbeb13..c1b1effc 100644 --- a/src/mistralai/models/deletemodelout.py +++ b/src/mistralai/models/deletemodelout.py @@ -2,8 +2,8 @@ from __future__ import annotations from mistralai.types import BaseModel -from typing import Optional, TypedDict -from typing_extensions import NotRequired +from typing import Optional +from typing_extensions import NotRequired, TypedDict class DeleteModelOutTypedDict(TypedDict): diff --git a/src/mistralai/models/deltamessage.py b/src/mistralai/models/deltamessage.py index 7b7fe796..7a966e09 100644 --- a/src/mistralai/models/deltamessage.py +++ b/src/mistralai/models/deltamessage.py @@ -1,30 +1,37 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from __future__ import annotations +from .contentchunk import ContentChunk, ContentChunkTypedDict from .toolcall import ToolCall, ToolCallTypedDict from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL from pydantic import model_serializer -from typing import List, Optional, TypedDict -from typing_extensions import NotRequired +from typing import List, Union +from typing_extensions import NotRequired, TypedDict + + +ContentTypedDict = Union[str, List[ContentChunkTypedDict]] + + +Content = Union[str, List[ContentChunk]] class DeltaMessageTypedDict(TypedDict): - role: NotRequired[str] - content: NotRequired[Nullable[str]] + role: NotRequired[Nullable[str]] + content: NotRequired[Nullable[ContentTypedDict]] tool_calls: NotRequired[Nullable[List[ToolCallTypedDict]]] class DeltaMessage(BaseModel): - role: Optional[str] = None + role: OptionalNullable[str] = UNSET - content: OptionalNullable[str] = UNSET + content: OptionalNullable[Content] = UNSET tool_calls: OptionalNullable[List[ToolCall]] = UNSET @model_serializer(mode="wrap") def serialize_model(self, handler): optional_fields = ["role", "content", "tool_calls"] - nullable_fields = ["content", "tool_calls"] + nullable_fields = ["role", "content", "tool_calls"] null_default_fields = [] serialized = handler(self) diff --git a/src/mistralai/models/detailedjobout.py b/src/mistralai/models/detailedjobout.py index 336190c2..a4be707d 100644 --- a/src/mistralai/models/detailedjobout.py +++ b/src/mistralai/models/detailedjobout.py @@ -9,10 +9,12 @@ from .trainingparameters import TrainingParameters, TrainingParametersTypedDict from .wandbintegrationout import WandbIntegrationOut, WandbIntegrationOutTypedDict from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +from mistralai.utils import validate_const import pydantic from pydantic import model_serializer -from typing import Final, List, Literal, Optional, TypedDict -from typing_extensions import Annotated, NotRequired +from pydantic.functional_validators import AfterValidator +from typing import List, Literal, Optional +from typing_extensions import Annotated, NotRequired, TypedDict DetailedJobOutStatus = Literal[ @@ -54,6 +56,7 @@ class DetailedJobOutTypedDict(TypedDict): modified_at: int training_files: List[str] validation_files: NotRequired[Nullable[List[str]]] + object: DetailedJobOutObject fine_tuned_model: NotRequired[Nullable[str]] suffix: NotRequired[Nullable[str]] integrations: NotRequired[Nullable[List[DetailedJobOutIntegrationsTypedDict]]] @@ -87,9 +90,12 @@ class DetailedJobOut(BaseModel): validation_files: OptionalNullable[List[str]] = UNSET - # fmt: off - OBJECT: Annotated[Final[Optional[DetailedJobOutObject]], pydantic.Field(alias="object")] = "job" # type: ignore - # fmt: on + OBJECT: Annotated[ + Annotated[ + Optional[DetailedJobOutObject], AfterValidator(validate_const("job")) + ], + pydantic.Field(alias="object"), + ] = "job" fine_tuned_model: OptionalNullable[str] = UNSET diff --git a/src/mistralai/models/embeddingrequest.py b/src/mistralai/models/embeddingrequest.py index 5655472d..61e181ce 100644 --- a/src/mistralai/models/embeddingrequest.py +++ b/src/mistralai/models/embeddingrequest.py @@ -4,8 +4,8 @@ from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL import pydantic from pydantic import model_serializer -from typing import List, TypedDict, Union -from typing_extensions import Annotated, NotRequired +from typing import List, Optional, Union +from typing_extensions import Annotated, NotRequired, TypedDict InputsTypedDict = Union[str, List[str]] @@ -19,7 +19,7 @@ class EmbeddingRequestTypedDict(TypedDict): inputs: InputsTypedDict r"""Text to embed.""" - model: str + model: NotRequired[str] r"""ID of the model to use.""" encoding_format: NotRequired[Nullable[str]] r"""The format to return the embeddings in.""" @@ -29,7 +29,7 @@ class EmbeddingRequest(BaseModel): inputs: Annotated[Inputs, pydantic.Field(alias="input")] r"""Text to embed.""" - model: str + model: Optional[str] = "mistral-embed" r"""ID of the model to use.""" encoding_format: OptionalNullable[str] = UNSET @@ -37,7 +37,7 @@ class EmbeddingRequest(BaseModel): @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = ["encoding_format"] + optional_fields = ["model", "encoding_format"] nullable_fields = ["encoding_format"] null_default_fields = [] diff --git a/src/mistralai/models/embeddingresponse.py b/src/mistralai/models/embeddingresponse.py index d85ceec7..aae6fa60 100644 --- a/src/mistralai/models/embeddingresponse.py +++ b/src/mistralai/models/embeddingresponse.py @@ -4,7 +4,8 @@ from .embeddingresponsedata import EmbeddingResponseData, EmbeddingResponseDataTypedDict from .usageinfo import UsageInfo, UsageInfoTypedDict from mistralai.types import BaseModel -from typing import List, TypedDict +from typing import List +from typing_extensions import TypedDict class EmbeddingResponseTypedDict(TypedDict): diff --git a/src/mistralai/models/embeddingresponsedata.py b/src/mistralai/models/embeddingresponsedata.py index f37995eb..01e2765f 100644 --- a/src/mistralai/models/embeddingresponsedata.py +++ b/src/mistralai/models/embeddingresponsedata.py @@ -2,8 +2,8 @@ from __future__ import annotations from mistralai.types import BaseModel -from typing import List, Optional, TypedDict -from typing_extensions import NotRequired +from typing import List, Optional +from typing_extensions import NotRequired, TypedDict class EmbeddingResponseDataTypedDict(TypedDict): diff --git a/src/mistralai/models/eventout.py b/src/mistralai/models/eventout.py index fa427f15..a9f22874 100644 --- a/src/mistralai/models/eventout.py +++ b/src/mistralai/models/eventout.py @@ -3,8 +3,8 @@ from __future__ import annotations from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL from pydantic import model_serializer -from typing import Any, Dict, TypedDict -from typing_extensions import NotRequired +from typing import Any, Dict +from typing_extensions import NotRequired, TypedDict class EventOutTypedDict(TypedDict): diff --git a/src/mistralai/models/filepurpose.py b/src/mistralai/models/filepurpose.py new file mode 100644 index 00000000..8628b308 --- /dev/null +++ b/src/mistralai/models/filepurpose.py @@ -0,0 +1,8 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.types import UnrecognizedStr +from typing import Literal, Union + + +FilePurpose = Union[Literal["fine-tune", "batch"], UnrecognizedStr] diff --git a/src/mistralai/models/files_api_routes_delete_fileop.py b/src/mistralai/models/files_api_routes_delete_fileop.py index def67911..a84a7a8e 100644 --- a/src/mistralai/models/files_api_routes_delete_fileop.py +++ b/src/mistralai/models/files_api_routes_delete_fileop.py @@ -3,8 +3,7 @@ from __future__ import annotations from mistralai.types import BaseModel from mistralai.utils import FieldMetadata, PathParamMetadata -from typing import TypedDict -from typing_extensions import Annotated +from typing_extensions import Annotated, TypedDict class FilesAPIRoutesDeleteFileRequestTypedDict(TypedDict): diff --git a/src/mistralai/models/files_api_routes_download_fileop.py b/src/mistralai/models/files_api_routes_download_fileop.py new file mode 100644 index 00000000..168a7fa6 --- /dev/null +++ b/src/mistralai/models/files_api_routes_download_fileop.py @@ -0,0 +1,16 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.types import BaseModel +from mistralai.utils import FieldMetadata, PathParamMetadata +from typing_extensions import Annotated, TypedDict + + +class FilesAPIRoutesDownloadFileRequestTypedDict(TypedDict): + file_id: str + + +class FilesAPIRoutesDownloadFileRequest(BaseModel): + file_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] diff --git a/src/mistralai/models/files_api_routes_list_filesop.py b/src/mistralai/models/files_api_routes_list_filesop.py new file mode 100644 index 00000000..03a33af7 --- /dev/null +++ b/src/mistralai/models/files_api_routes_list_filesop.py @@ -0,0 +1,96 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .filepurpose import FilePurpose +from .sampletype import SampleType +from .source import Source +from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +from mistralai.utils import FieldMetadata, QueryParamMetadata, validate_open_enum +from pydantic import model_serializer +from pydantic.functional_validators import PlainValidator +from typing import List, Optional +from typing_extensions import Annotated, NotRequired, TypedDict + + +class FilesAPIRoutesListFilesRequestTypedDict(TypedDict): + page: NotRequired[int] + page_size: NotRequired[int] + sample_type: NotRequired[Nullable[List[SampleType]]] + source: NotRequired[Nullable[List[Source]]] + search: NotRequired[Nullable[str]] + purpose: NotRequired[Nullable[FilePurpose]] + + +class FilesAPIRoutesListFilesRequest(BaseModel): + page: Annotated[ + Optional[int], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = 0 + + page_size: Annotated[ + Optional[int], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = 100 + + sample_type: Annotated[ + OptionalNullable[ + List[Annotated[SampleType, PlainValidator(validate_open_enum(False))]] + ], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = UNSET + + source: Annotated[ + OptionalNullable[ + List[Annotated[Source, PlainValidator(validate_open_enum(False))]] + ], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = UNSET + + search: Annotated[ + OptionalNullable[str], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = UNSET + + purpose: Annotated[ + Annotated[ + OptionalNullable[FilePurpose], PlainValidator(validate_open_enum(False)) + ], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = [ + "page", + "page_size", + "sample_type", + "source", + "search", + "purpose", + ] + nullable_fields = ["sample_type", "source", "search", "purpose"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in self.model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/models/files_api_routes_retrieve_fileop.py b/src/mistralai/models/files_api_routes_retrieve_fileop.py index bfbad272..0c2a95ef 100644 --- a/src/mistralai/models/files_api_routes_retrieve_fileop.py +++ b/src/mistralai/models/files_api_routes_retrieve_fileop.py @@ -3,8 +3,7 @@ from __future__ import annotations from mistralai.types import BaseModel from mistralai.utils import FieldMetadata, PathParamMetadata -from typing import TypedDict -from typing_extensions import Annotated +from typing_extensions import Annotated, TypedDict class FilesAPIRoutesRetrieveFileRequestTypedDict(TypedDict): diff --git a/src/mistralai/models/files_api_routes_upload_fileop.py b/src/mistralai/models/files_api_routes_upload_fileop.py index 8eae7af7..4f2bb0c2 100644 --- a/src/mistralai/models/files_api_routes_upload_fileop.py +++ b/src/mistralai/models/files_api_routes_upload_fileop.py @@ -1,16 +1,14 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from __future__ import annotations +from .filepurpose import FilePurpose import io -from mistralai.types import BaseModel, UnrecognizedStr +from mistralai.types import BaseModel from mistralai.utils import FieldMetadata, MultipartFormMetadata, validate_open_enum import pydantic from pydantic.functional_validators import PlainValidator -from typing import Final, IO, Literal, Optional, TypedDict, Union -from typing_extensions import Annotated, NotRequired - - -FilesAPIRoutesUploadFilePurpose = Union[Literal["fine-tune"], UnrecognizedStr] +from typing import IO, Optional, Union +from typing_extensions import Annotated, NotRequired, TypedDict class FileTypedDict(TypedDict): @@ -49,6 +47,7 @@ class FilesAPIRoutesUploadFileMultiPartBodyParamsTypedDict(TypedDict): file=@path/to/your/file.jsonl ``` """ + purpose: NotRequired[FilePurpose] class FilesAPIRoutesUploadFileMultiPartBodyParams(BaseModel): @@ -68,6 +67,7 @@ class FilesAPIRoutesUploadFileMultiPartBodyParams(BaseModel): ``` """ - # fmt: off - PURPOSE: Annotated[Final[Annotated[Optional[FilesAPIRoutesUploadFilePurpose], PlainValidator(validate_open_enum(False))]], pydantic.Field(alias="purpose"), FieldMetadata(multipart=True)] = "fine-tune" # type: ignore - # fmt: on + purpose: Annotated[ + Annotated[Optional[FilePurpose], PlainValidator(validate_open_enum(False))], + FieldMetadata(multipart=True), + ] = None diff --git a/src/mistralai/models/fileschema.py b/src/mistralai/models/fileschema.py index 1ace0fab..952d23a0 100644 --- a/src/mistralai/models/fileschema.py +++ b/src/mistralai/models/fileschema.py @@ -1,26 +1,14 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from __future__ import annotations +from .filepurpose import FilePurpose from .sampletype import SampleType from .source import Source -from mistralai.types import ( - BaseModel, - Nullable, - OptionalNullable, - UNSET, - UNSET_SENTINEL, - UnrecognizedStr, -) +from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL from mistralai.utils import validate_open_enum -import pydantic from pydantic import model_serializer from pydantic.functional_validators import PlainValidator -from typing import Final, Literal, TypedDict, Union -from typing_extensions import Annotated, NotRequired - - -FileSchemaPurpose = Union[Literal["fine-tune"], UnrecognizedStr] -r"""The intended purpose of the uploaded file. Only accepts fine-tuning (`fine-tune`) for now.""" +from typing_extensions import Annotated, NotRequired, TypedDict class FileSchemaTypedDict(TypedDict): @@ -34,6 +22,7 @@ class FileSchemaTypedDict(TypedDict): r"""The UNIX timestamp (in seconds) of the event.""" filename: str r"""The name of the uploaded file.""" + purpose: FilePurpose sample_type: SampleType source: Source num_lines: NotRequired[Nullable[int]] @@ -55,14 +44,11 @@ class FileSchema(BaseModel): filename: str r"""The name of the uploaded file.""" - sample_type: SampleType + purpose: Annotated[FilePurpose, PlainValidator(validate_open_enum(False))] - source: Source + sample_type: Annotated[SampleType, PlainValidator(validate_open_enum(False))] - # fmt: off - PURPOSE: Annotated[Final[Annotated[FileSchemaPurpose, PlainValidator(validate_open_enum(False))]], pydantic.Field(alias="purpose")] = "fine-tune" # type: ignore - # fmt: on - r"""The intended purpose of the uploaded file. Only accepts fine-tuning (`fine-tune`) for now.""" + source: Annotated[Source, PlainValidator(validate_open_enum(False))] num_lines: OptionalNullable[int] = UNSET diff --git a/src/mistralai/models/fimcompletionrequest.py b/src/mistralai/models/fimcompletionrequest.py index 4f00d3dc..409aa256 100644 --- a/src/mistralai/models/fimcompletionrequest.py +++ b/src/mistralai/models/fimcompletionrequest.py @@ -3,8 +3,8 @@ from __future__ import annotations from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL from pydantic import model_serializer -from typing import List, Optional, TypedDict, Union -from typing_extensions import NotRequired +from typing import List, Optional, Union +from typing_extensions import NotRequired, TypedDict FIMCompletionRequestStopTypedDict = Union[str, List[str]] @@ -23,14 +23,12 @@ class FIMCompletionRequestTypedDict(TypedDict): """ prompt: str r"""The text/code to complete.""" - temperature: NotRequired[float] - r"""What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both.""" + temperature: NotRequired[Nullable[float]] + r"""What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value.""" top_p: NotRequired[float] r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.""" max_tokens: NotRequired[Nullable[int]] r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" - min_tokens: NotRequired[Nullable[int]] - r"""The minimum number of tokens to generate in the completion.""" stream: NotRequired[bool] r"""Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON.""" stop: NotRequired[FIMCompletionRequestStopTypedDict] @@ -39,6 +37,8 @@ class FIMCompletionRequestTypedDict(TypedDict): r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" suffix: NotRequired[Nullable[str]] r"""Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`.""" + min_tokens: NotRequired[Nullable[int]] + r"""The minimum number of tokens to generate in the completion.""" class FIMCompletionRequest(BaseModel): @@ -51,8 +51,8 @@ class FIMCompletionRequest(BaseModel): prompt: str r"""The text/code to complete.""" - temperature: Optional[float] = 0.7 - r"""What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both.""" + temperature: OptionalNullable[float] = UNSET + r"""What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value.""" top_p: Optional[float] = 1 r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.""" @@ -60,9 +60,6 @@ class FIMCompletionRequest(BaseModel): max_tokens: OptionalNullable[int] = UNSET r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" - min_tokens: OptionalNullable[int] = UNSET - r"""The minimum number of tokens to generate in the completion.""" - stream: Optional[bool] = False r"""Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON.""" @@ -75,19 +72,29 @@ class FIMCompletionRequest(BaseModel): suffix: OptionalNullable[str] = UNSET r"""Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`.""" + min_tokens: OptionalNullable[int] = UNSET + r"""The minimum number of tokens to generate in the completion.""" + @model_serializer(mode="wrap") def serialize_model(self, handler): optional_fields = [ "temperature", "top_p", "max_tokens", - "min_tokens", "stream", "stop", "random_seed", "suffix", + "min_tokens", + ] + nullable_fields = [ + "model", + "temperature", + "max_tokens", + "random_seed", + "suffix", + "min_tokens", ] - nullable_fields = ["model", "max_tokens", "min_tokens", "random_seed", "suffix"] null_default_fields = [] serialized = handler(self) diff --git a/src/mistralai/models/fimcompletionresponse.py b/src/mistralai/models/fimcompletionresponse.py index d9e11df3..9fe05820 100644 --- a/src/mistralai/models/fimcompletionresponse.py +++ b/src/mistralai/models/fimcompletionresponse.py @@ -4,8 +4,8 @@ from .chatcompletionchoice import ChatCompletionChoice, ChatCompletionChoiceTypedDict from .usageinfo import UsageInfo, UsageInfoTypedDict from mistralai.types import BaseModel -from typing import List, Optional, TypedDict -from typing_extensions import NotRequired +from typing import List, Optional +from typing_extensions import NotRequired, TypedDict class FIMCompletionResponseTypedDict(TypedDict): diff --git a/src/mistralai/models/fimcompletionstreamrequest.py b/src/mistralai/models/fimcompletionstreamrequest.py index 708542d9..8f9c1dac 100644 --- a/src/mistralai/models/fimcompletionstreamrequest.py +++ b/src/mistralai/models/fimcompletionstreamrequest.py @@ -3,8 +3,8 @@ from __future__ import annotations from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL from pydantic import model_serializer -from typing import List, Optional, TypedDict, Union -from typing_extensions import NotRequired +from typing import List, Optional, Union +from typing_extensions import NotRequired, TypedDict FIMCompletionStreamRequestStopTypedDict = Union[str, List[str]] @@ -23,14 +23,12 @@ class FIMCompletionStreamRequestTypedDict(TypedDict): """ prompt: str r"""The text/code to complete.""" - temperature: NotRequired[float] - r"""What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both.""" + temperature: NotRequired[Nullable[float]] + r"""What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value.""" top_p: NotRequired[float] r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.""" max_tokens: NotRequired[Nullable[int]] r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" - min_tokens: NotRequired[Nullable[int]] - r"""The minimum number of tokens to generate in the completion.""" stream: NotRequired[bool] stop: NotRequired[FIMCompletionStreamRequestStopTypedDict] r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" @@ -38,6 +36,8 @@ class FIMCompletionStreamRequestTypedDict(TypedDict): r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" suffix: NotRequired[Nullable[str]] r"""Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`.""" + min_tokens: NotRequired[Nullable[int]] + r"""The minimum number of tokens to generate in the completion.""" class FIMCompletionStreamRequest(BaseModel): @@ -50,8 +50,8 @@ class FIMCompletionStreamRequest(BaseModel): prompt: str r"""The text/code to complete.""" - temperature: Optional[float] = 0.7 - r"""What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both.""" + temperature: OptionalNullable[float] = UNSET + r"""What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value.""" top_p: Optional[float] = 1 r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.""" @@ -59,9 +59,6 @@ class FIMCompletionStreamRequest(BaseModel): max_tokens: OptionalNullable[int] = UNSET r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" - min_tokens: OptionalNullable[int] = UNSET - r"""The minimum number of tokens to generate in the completion.""" - stream: Optional[bool] = True stop: Optional[FIMCompletionStreamRequestStop] = None @@ -73,19 +70,29 @@ class FIMCompletionStreamRequest(BaseModel): suffix: OptionalNullable[str] = UNSET r"""Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`.""" + min_tokens: OptionalNullable[int] = UNSET + r"""The minimum number of tokens to generate in the completion.""" + @model_serializer(mode="wrap") def serialize_model(self, handler): optional_fields = [ "temperature", "top_p", "max_tokens", - "min_tokens", "stream", "stop", "random_seed", "suffix", + "min_tokens", + ] + nullable_fields = [ + "model", + "temperature", + "max_tokens", + "random_seed", + "suffix", + "min_tokens", ] - nullable_fields = ["model", "max_tokens", "min_tokens", "random_seed", "suffix"] null_default_fields = [] serialized = handler(self) diff --git a/src/mistralai/models/ftmodelcapabilitiesout.py b/src/mistralai/models/ftmodelcapabilitiesout.py index fe66d303..b5e1e521 100644 --- a/src/mistralai/models/ftmodelcapabilitiesout.py +++ b/src/mistralai/models/ftmodelcapabilitiesout.py @@ -2,8 +2,8 @@ from __future__ import annotations from mistralai.types import BaseModel -from typing import Optional, TypedDict -from typing_extensions import NotRequired +from typing import Optional +from typing_extensions import NotRequired, TypedDict class FTModelCapabilitiesOutTypedDict(TypedDict): diff --git a/src/mistralai/models/ftmodelcard.py b/src/mistralai/models/ftmodelcard.py index b282a09d..9a640a28 100644 --- a/src/mistralai/models/ftmodelcard.py +++ b/src/mistralai/models/ftmodelcard.py @@ -4,10 +4,15 @@ from .modelcapabilities import ModelCapabilities, ModelCapabilitiesTypedDict from datetime import datetime from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +from mistralai.utils import validate_const import pydantic from pydantic import model_serializer -from typing import Final, List, Optional, TypedDict -from typing_extensions import Annotated, NotRequired +from pydantic.functional_validators import AfterValidator +from typing import List, Literal, Optional +from typing_extensions import Annotated, NotRequired, TypedDict + + +FTModelCardType = Literal["fine-tuned"] class FTModelCardTypedDict(TypedDict): @@ -25,6 +30,8 @@ class FTModelCardTypedDict(TypedDict): max_context_length: NotRequired[int] aliases: NotRequired[List[str]] deprecation: NotRequired[Nullable[datetime]] + default_model_temperature: NotRequired[Nullable[float]] + type: FTModelCardType archived: NotRequired[bool] @@ -55,9 +62,14 @@ class FTModelCard(BaseModel): deprecation: OptionalNullable[datetime] = UNSET - # fmt: off - TYPE: Annotated[Final[Optional[str]], pydantic.Field(alias="type")] = "fine-tuned" # type: ignore - # fmt: on + default_model_temperature: OptionalNullable[float] = UNSET + + TYPE: Annotated[ + Annotated[ + Optional[FTModelCardType], AfterValidator(validate_const("fine-tuned")) + ], + pydantic.Field(alias="type"), + ] = "fine-tuned" archived: Optional[bool] = False @@ -72,10 +84,16 @@ def serialize_model(self, handler): "max_context_length", "aliases", "deprecation", + "default_model_temperature", "type", "archived", ] - nullable_fields = ["name", "description", "deprecation"] + nullable_fields = [ + "name", + "description", + "deprecation", + "default_model_temperature", + ] null_default_fields = [] serialized = handler(self) diff --git a/src/mistralai/models/ftmodelout.py b/src/mistralai/models/ftmodelout.py index 664dd5d4..e8d6864c 100644 --- a/src/mistralai/models/ftmodelout.py +++ b/src/mistralai/models/ftmodelout.py @@ -6,10 +6,12 @@ FTModelCapabilitiesOutTypedDict, ) from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +from mistralai.utils import validate_const import pydantic from pydantic import model_serializer -from typing import Final, List, Literal, Optional, TypedDict -from typing_extensions import Annotated, NotRequired +from pydantic.functional_validators import AfterValidator +from typing import List, Literal, Optional +from typing_extensions import Annotated, NotRequired, TypedDict FTModelOutObject = Literal["model"] @@ -23,6 +25,7 @@ class FTModelOutTypedDict(TypedDict): archived: bool capabilities: FTModelCapabilitiesOutTypedDict job: str + object: FTModelOutObject name: NotRequired[Nullable[str]] description: NotRequired[Nullable[str]] max_context_length: NotRequired[int] @@ -44,9 +47,10 @@ class FTModelOut(BaseModel): job: str - # fmt: off - OBJECT: Annotated[Final[Optional[FTModelOutObject]], pydantic.Field(alias="object")] = "model" # type: ignore - # fmt: on + OBJECT: Annotated[ + Annotated[Optional[FTModelOutObject], AfterValidator(validate_const("model"))], + pydantic.Field(alias="object"), + ] = "model" name: OptionalNullable[str] = UNSET diff --git a/src/mistralai/models/function.py b/src/mistralai/models/function.py index a872eea1..942b042f 100644 --- a/src/mistralai/models/function.py +++ b/src/mistralai/models/function.py @@ -2,8 +2,8 @@ from __future__ import annotations from mistralai.types import BaseModel -from typing import Any, Dict, Optional, TypedDict -from typing_extensions import NotRequired +from typing import Any, Dict, Optional +from typing_extensions import NotRequired, TypedDict class FunctionTypedDict(TypedDict): diff --git a/src/mistralai/models/functioncall.py b/src/mistralai/models/functioncall.py index 941cc5e9..a57d2350 100644 --- a/src/mistralai/models/functioncall.py +++ b/src/mistralai/models/functioncall.py @@ -2,7 +2,8 @@ from __future__ import annotations from mistralai.types import BaseModel -from typing import Any, Dict, TypedDict, Union +from typing import Any, Dict, Union +from typing_extensions import TypedDict ArgumentsTypedDict = Union[Dict[str, Any], str] diff --git a/src/mistralai/models/functionname.py b/src/mistralai/models/functionname.py index 20fc9bef..0a6c0b14 100644 --- a/src/mistralai/models/functionname.py +++ b/src/mistralai/models/functionname.py @@ -2,7 +2,7 @@ from __future__ import annotations from mistralai.types import BaseModel -from typing import TypedDict +from typing_extensions import TypedDict class FunctionNameTypedDict(TypedDict): diff --git a/src/mistralai/models/githubrepositoryin.py b/src/mistralai/models/githubrepositoryin.py index cb8bad68..715db6b7 100644 --- a/src/mistralai/models/githubrepositoryin.py +++ b/src/mistralai/models/githubrepositoryin.py @@ -2,10 +2,12 @@ from __future__ import annotations from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +from mistralai.utils import validate_const import pydantic from pydantic import model_serializer -from typing import Final, Literal, Optional, TypedDict -from typing_extensions import Annotated, NotRequired +from pydantic.functional_validators import AfterValidator +from typing import Literal, Optional +from typing_extensions import Annotated, NotRequired, TypedDict GithubRepositoryInType = Literal["github"] @@ -15,6 +17,7 @@ class GithubRepositoryInTypedDict(TypedDict): name: str owner: str token: str + type: GithubRepositoryInType ref: NotRequired[Nullable[str]] weight: NotRequired[float] @@ -26,9 +29,12 @@ class GithubRepositoryIn(BaseModel): token: str - # fmt: off - TYPE: Annotated[Final[Optional[GithubRepositoryInType]], pydantic.Field(alias="type")] = "github" # type: ignore - # fmt: on + TYPE: Annotated[ + Annotated[ + Optional[GithubRepositoryInType], AfterValidator(validate_const("github")) + ], + pydantic.Field(alias="type"), + ] = "github" ref: OptionalNullable[str] = UNSET diff --git a/src/mistralai/models/githubrepositoryout.py b/src/mistralai/models/githubrepositoryout.py index 7f023c75..5a0ce31a 100644 --- a/src/mistralai/models/githubrepositoryout.py +++ b/src/mistralai/models/githubrepositoryout.py @@ -2,10 +2,12 @@ from __future__ import annotations from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +from mistralai.utils import validate_const import pydantic from pydantic import model_serializer -from typing import Final, Literal, Optional, TypedDict -from typing_extensions import Annotated, NotRequired +from pydantic.functional_validators import AfterValidator +from typing import Literal, Optional +from typing_extensions import Annotated, NotRequired, TypedDict GithubRepositoryOutType = Literal["github"] @@ -15,6 +17,7 @@ class GithubRepositoryOutTypedDict(TypedDict): name: str owner: str commit_id: str + type: GithubRepositoryOutType ref: NotRequired[Nullable[str]] weight: NotRequired[float] @@ -26,9 +29,12 @@ class GithubRepositoryOut(BaseModel): commit_id: str - # fmt: off - TYPE: Annotated[Final[Optional[GithubRepositoryOutType]], pydantic.Field(alias="type")] = "github" # type: ignore - # fmt: on + TYPE: Annotated[ + Annotated[ + Optional[GithubRepositoryOutType], AfterValidator(validate_const("github")) + ], + pydantic.Field(alias="type"), + ] = "github" ref: OptionalNullable[str] = UNSET diff --git a/src/mistralai/models/httpvalidationerror.py b/src/mistralai/models/httpvalidationerror.py index 991b8bd6..37f2dd76 100644 --- a/src/mistralai/models/httpvalidationerror.py +++ b/src/mistralai/models/httpvalidationerror.py @@ -12,8 +12,6 @@ class HTTPValidationErrorData(BaseModel): class HTTPValidationError(Exception): - r"""Validation Error""" - data: HTTPValidationErrorData def __init__(self, data: HTTPValidationErrorData): diff --git a/src/mistralai/models/imageurl.py b/src/mistralai/models/imageurl.py index af24a1a9..1e8276ad 100644 --- a/src/mistralai/models/imageurl.py +++ b/src/mistralai/models/imageurl.py @@ -3,8 +3,7 @@ from __future__ import annotations from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL from pydantic import model_serializer -from typing import TypedDict -from typing_extensions import NotRequired +from typing_extensions import NotRequired, TypedDict class ImageURLTypedDict(TypedDict): diff --git a/src/mistralai/models/imageurlchunk.py b/src/mistralai/models/imageurlchunk.py index 44409020..1c37fe3b 100644 --- a/src/mistralai/models/imageurlchunk.py +++ b/src/mistralai/models/imageurlchunk.py @@ -3,9 +3,11 @@ from __future__ import annotations from .imageurl import ImageURL, ImageURLTypedDict from mistralai.types import BaseModel +from mistralai.utils import validate_const import pydantic -from typing import Final, Literal, Optional, TypedDict, Union -from typing_extensions import Annotated +from pydantic.functional_validators import AfterValidator +from typing import Literal, Optional, Union +from typing_extensions import Annotated, TypedDict ImageURLChunkType = Literal["image_url"] @@ -20,6 +22,7 @@ class ImageURLChunkTypedDict(TypedDict): r"""{\"type\":\"image_url\",\"image_url\":{\"url\":\"data:image/png;base64,iVBORw0""" image_url: ImageURLChunkImageURLTypedDict + type: ImageURLChunkType class ImageURLChunk(BaseModel): @@ -27,6 +30,9 @@ class ImageURLChunk(BaseModel): image_url: ImageURLChunkImageURL - # fmt: off - TYPE: Annotated[Final[Optional[ImageURLChunkType]], pydantic.Field(alias="type")] = "image_url" # type: ignore - # fmt: on + TYPE: Annotated[ + Annotated[ + Optional[ImageURLChunkType], AfterValidator(validate_const("image_url")) + ], + pydantic.Field(alias="type"), + ] = "image_url" diff --git a/src/mistralai/models/jobin.py b/src/mistralai/models/jobin.py index db875c11..a294d292 100644 --- a/src/mistralai/models/jobin.py +++ b/src/mistralai/models/jobin.py @@ -8,8 +8,8 @@ from .wandbintegration import WandbIntegration, WandbIntegrationTypedDict from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL from pydantic import model_serializer -from typing import List, Optional, TypedDict -from typing_extensions import NotRequired +from typing import List, Optional +from typing_extensions import NotRequired, TypedDict JobInIntegrationsTypedDict = WandbIntegrationTypedDict diff --git a/src/mistralai/models/jobmetadataout.py b/src/mistralai/models/jobmetadataout.py index 690540da..d1eeb4f2 100644 --- a/src/mistralai/models/jobmetadataout.py +++ b/src/mistralai/models/jobmetadataout.py @@ -3,8 +3,7 @@ from __future__ import annotations from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL from pydantic import model_serializer -from typing import TypedDict -from typing_extensions import NotRequired +from typing_extensions import NotRequired, TypedDict class JobMetadataOutTypedDict(TypedDict): diff --git a/src/mistralai/models/jobout.py b/src/mistralai/models/jobout.py index a716cb7f..71edce01 100644 --- a/src/mistralai/models/jobout.py +++ b/src/mistralai/models/jobout.py @@ -7,10 +7,12 @@ from .trainingparameters import TrainingParameters, TrainingParametersTypedDict from .wandbintegrationout import WandbIntegrationOut, WandbIntegrationOutTypedDict from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +from mistralai.utils import validate_const import pydantic from pydantic import model_serializer -from typing import Final, List, Literal, Optional, TypedDict -from typing_extensions import Annotated, NotRequired +from pydantic.functional_validators import AfterValidator +from typing import List, Literal, Optional +from typing_extensions import Annotated, NotRequired, TypedDict Status = Literal[ @@ -61,6 +63,8 @@ class JobOutTypedDict(TypedDict): r"""A list containing the IDs of uploaded files that contain training data.""" validation_files: NotRequired[Nullable[List[str]]] r"""A list containing the IDs of uploaded files that contain validation data.""" + object: Object + r"""The object type of the fine-tuning job.""" fine_tuned_model: NotRequired[Nullable[str]] r"""The name of the fine-tuned model that is being created. The value will be `null` if the fine-tuning job is still running.""" suffix: NotRequired[Nullable[str]] @@ -102,9 +106,10 @@ class JobOut(BaseModel): validation_files: OptionalNullable[List[str]] = UNSET r"""A list containing the IDs of uploaded files that contain validation data.""" - # fmt: off - OBJECT: Annotated[Final[Optional[Object]], pydantic.Field(alias="object")] = "job" # type: ignore - # fmt: on + OBJECT: Annotated[ + Annotated[Optional[Object], AfterValidator(validate_const("job"))], + pydantic.Field(alias="object"), + ] = "job" r"""The object type of the fine-tuning job.""" fine_tuned_model: OptionalNullable[str] = UNSET diff --git a/src/mistralai/models/jobs_api_routes_batch_cancel_batch_jobop.py b/src/mistralai/models/jobs_api_routes_batch_cancel_batch_jobop.py new file mode 100644 index 00000000..5b83d534 --- /dev/null +++ b/src/mistralai/models/jobs_api_routes_batch_cancel_batch_jobop.py @@ -0,0 +1,16 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.types import BaseModel +from mistralai.utils import FieldMetadata, PathParamMetadata +from typing_extensions import Annotated, TypedDict + + +class JobsAPIRoutesBatchCancelBatchJobRequestTypedDict(TypedDict): + job_id: str + + +class JobsAPIRoutesBatchCancelBatchJobRequest(BaseModel): + job_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] diff --git a/src/mistralai/models/jobs_api_routes_batch_get_batch_jobop.py b/src/mistralai/models/jobs_api_routes_batch_get_batch_jobop.py new file mode 100644 index 00000000..d9c7b398 --- /dev/null +++ b/src/mistralai/models/jobs_api_routes_batch_get_batch_jobop.py @@ -0,0 +1,16 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.types import BaseModel +from mistralai.utils import FieldMetadata, PathParamMetadata +from typing_extensions import Annotated, TypedDict + + +class JobsAPIRoutesBatchGetBatchJobRequestTypedDict(TypedDict): + job_id: str + + +class JobsAPIRoutesBatchGetBatchJobRequest(BaseModel): + job_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] diff --git a/src/mistralai/models/jobs_api_routes_batch_get_batch_jobsop.py b/src/mistralai/models/jobs_api_routes_batch_get_batch_jobsop.py new file mode 100644 index 00000000..8f0c66ca --- /dev/null +++ b/src/mistralai/models/jobs_api_routes_batch_get_batch_jobsop.py @@ -0,0 +1,95 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .batchjobstatus import BatchJobStatus +from datetime import datetime +from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +from mistralai.utils import FieldMetadata, QueryParamMetadata +from pydantic import model_serializer +from typing import Any, Dict, Optional +from typing_extensions import Annotated, NotRequired, TypedDict + + +class JobsAPIRoutesBatchGetBatchJobsRequestTypedDict(TypedDict): + page: NotRequired[int] + page_size: NotRequired[int] + model: NotRequired[Nullable[str]] + metadata: NotRequired[Nullable[Dict[str, Any]]] + created_after: NotRequired[Nullable[datetime]] + created_by_me: NotRequired[bool] + status: NotRequired[Nullable[BatchJobStatus]] + + +class JobsAPIRoutesBatchGetBatchJobsRequest(BaseModel): + page: Annotated[ + Optional[int], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = 0 + + page_size: Annotated[ + Optional[int], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = 100 + + model: Annotated[ + OptionalNullable[str], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = UNSET + + metadata: Annotated[ + OptionalNullable[Dict[str, Any]], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = UNSET + + created_after: Annotated[ + OptionalNullable[datetime], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = UNSET + + created_by_me: Annotated[ + Optional[bool], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = False + + status: Annotated[ + OptionalNullable[BatchJobStatus], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = [ + "page", + "page_size", + "model", + "metadata", + "created_after", + "created_by_me", + "status", + ] + nullable_fields = ["model", "metadata", "created_after", "status"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in self.model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/models/jobs_api_routes_fine_tuning_archive_fine_tuned_modelop.py b/src/mistralai/models/jobs_api_routes_fine_tuning_archive_fine_tuned_modelop.py index da521422..d728efd1 100644 --- a/src/mistralai/models/jobs_api_routes_fine_tuning_archive_fine_tuned_modelop.py +++ b/src/mistralai/models/jobs_api_routes_fine_tuning_archive_fine_tuned_modelop.py @@ -3,8 +3,7 @@ from __future__ import annotations from mistralai.types import BaseModel from mistralai.utils import FieldMetadata, PathParamMetadata -from typing import TypedDict -from typing_extensions import Annotated +from typing_extensions import Annotated, TypedDict class JobsAPIRoutesFineTuningArchiveFineTunedModelRequestTypedDict(TypedDict): diff --git a/src/mistralai/models/jobs_api_routes_fine_tuning_cancel_fine_tuning_jobop.py b/src/mistralai/models/jobs_api_routes_fine_tuning_cancel_fine_tuning_jobop.py index e84b0825..b72ff42f 100644 --- a/src/mistralai/models/jobs_api_routes_fine_tuning_cancel_fine_tuning_jobop.py +++ b/src/mistralai/models/jobs_api_routes_fine_tuning_cancel_fine_tuning_jobop.py @@ -3,8 +3,7 @@ from __future__ import annotations from mistralai.types import BaseModel from mistralai.utils import FieldMetadata, PathParamMetadata -from typing import TypedDict -from typing_extensions import Annotated +from typing_extensions import Annotated, TypedDict class JobsAPIRoutesFineTuningCancelFineTuningJobRequestTypedDict(TypedDict): diff --git a/src/mistralai/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobop.py b/src/mistralai/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobop.py index 05706126..896d34f5 100644 --- a/src/mistralai/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobop.py +++ b/src/mistralai/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobop.py @@ -3,8 +3,7 @@ from __future__ import annotations from mistralai.types import BaseModel from mistralai.utils import FieldMetadata, PathParamMetadata -from typing import TypedDict -from typing_extensions import Annotated +from typing_extensions import Annotated, TypedDict class JobsAPIRoutesFineTuningGetFineTuningJobRequestTypedDict(TypedDict): diff --git a/src/mistralai/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobsop.py b/src/mistralai/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobsop.py index 3320b100..b51b1958 100644 --- a/src/mistralai/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobsop.py +++ b/src/mistralai/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobsop.py @@ -5,8 +5,8 @@ from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL from mistralai.utils import FieldMetadata, QueryParamMetadata from pydantic import model_serializer -from typing import Literal, Optional, TypedDict -from typing_extensions import Annotated, NotRequired +from typing import Literal, Optional +from typing_extensions import Annotated, NotRequired, TypedDict QueryParamStatus = Literal[ diff --git a/src/mistralai/models/jobs_api_routes_fine_tuning_start_fine_tuning_jobop.py b/src/mistralai/models/jobs_api_routes_fine_tuning_start_fine_tuning_jobop.py index bc1b6d47..3e7989a7 100644 --- a/src/mistralai/models/jobs_api_routes_fine_tuning_start_fine_tuning_jobop.py +++ b/src/mistralai/models/jobs_api_routes_fine_tuning_start_fine_tuning_jobop.py @@ -3,8 +3,7 @@ from __future__ import annotations from mistralai.types import BaseModel from mistralai.utils import FieldMetadata, PathParamMetadata -from typing import TypedDict -from typing_extensions import Annotated +from typing_extensions import Annotated, TypedDict class JobsAPIRoutesFineTuningStartFineTuningJobRequestTypedDict(TypedDict): diff --git a/src/mistralai/models/jobs_api_routes_fine_tuning_unarchive_fine_tuned_modelop.py b/src/mistralai/models/jobs_api_routes_fine_tuning_unarchive_fine_tuned_modelop.py index acc6bf42..a84274ff 100644 --- a/src/mistralai/models/jobs_api_routes_fine_tuning_unarchive_fine_tuned_modelop.py +++ b/src/mistralai/models/jobs_api_routes_fine_tuning_unarchive_fine_tuned_modelop.py @@ -3,8 +3,7 @@ from __future__ import annotations from mistralai.types import BaseModel from mistralai.utils import FieldMetadata, PathParamMetadata -from typing import TypedDict -from typing_extensions import Annotated +from typing_extensions import Annotated, TypedDict class JobsAPIRoutesFineTuningUnarchiveFineTunedModelRequestTypedDict(TypedDict): diff --git a/src/mistralai/models/jobs_api_routes_fine_tuning_update_fine_tuned_modelop.py b/src/mistralai/models/jobs_api_routes_fine_tuning_update_fine_tuned_modelop.py index 50298ce1..11e23f8c 100644 --- a/src/mistralai/models/jobs_api_routes_fine_tuning_update_fine_tuned_modelop.py +++ b/src/mistralai/models/jobs_api_routes_fine_tuning_update_fine_tuned_modelop.py @@ -4,8 +4,7 @@ from .updateftmodelin import UpdateFTModelIn, UpdateFTModelInTypedDict from mistralai.types import BaseModel from mistralai.utils import FieldMetadata, PathParamMetadata, RequestMetadata -from typing import TypedDict -from typing_extensions import Annotated +from typing_extensions import Annotated, TypedDict class JobsAPIRoutesFineTuningUpdateFineTunedModelRequestTypedDict(TypedDict): diff --git a/src/mistralai/models/jobsout.py b/src/mistralai/models/jobsout.py index bd5edf69..316bf89f 100644 --- a/src/mistralai/models/jobsout.py +++ b/src/mistralai/models/jobsout.py @@ -3,9 +3,11 @@ from __future__ import annotations from .jobout import JobOut, JobOutTypedDict from mistralai.types import BaseModel +from mistralai.utils import validate_const import pydantic -from typing import Final, List, Literal, Optional, TypedDict -from typing_extensions import Annotated, NotRequired +from pydantic.functional_validators import AfterValidator +from typing import List, Literal, Optional +from typing_extensions import Annotated, NotRequired, TypedDict JobsOutObject = Literal["list"] @@ -14,6 +16,7 @@ class JobsOutTypedDict(TypedDict): total: int data: NotRequired[List[JobOutTypedDict]] + object: JobsOutObject class JobsOut(BaseModel): @@ -21,6 +24,7 @@ class JobsOut(BaseModel): data: Optional[List[JobOut]] = None - # fmt: off - OBJECT: Annotated[Final[Optional[JobsOutObject]], pydantic.Field(alias="object")] = "list" # type: ignore - # fmt: on + OBJECT: Annotated[ + Annotated[Optional[JobsOutObject], AfterValidator(validate_const("list"))], + pydantic.Field(alias="object"), + ] = "list" diff --git a/src/mistralai/models/legacyjobmetadataout.py b/src/mistralai/models/legacyjobmetadataout.py index 677cad8e..df6b3d35 100644 --- a/src/mistralai/models/legacyjobmetadataout.py +++ b/src/mistralai/models/legacyjobmetadataout.py @@ -2,10 +2,12 @@ from __future__ import annotations from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +from mistralai.utils import validate_const import pydantic from pydantic import model_serializer -from typing import Final, Literal, Optional, TypedDict -from typing_extensions import Annotated, NotRequired +from pydantic.functional_validators import AfterValidator +from typing import Literal, Optional +from typing_extensions import Annotated, NotRequired, TypedDict LegacyJobMetadataOutObject = Literal["job.metadata"] @@ -31,6 +33,7 @@ class LegacyJobMetadataOutTypedDict(TypedDict): r"""The number of complete passes through the entire training dataset.""" training_steps: NotRequired[Nullable[int]] r"""The number of training steps to perform. A training step refers to a single update of the model weights during the fine-tuning process. This update is typically calculated using a batch of samples from the training dataset.""" + object: LegacyJobMetadataOutObject class LegacyJobMetadataOut(BaseModel): @@ -64,9 +67,13 @@ class LegacyJobMetadataOut(BaseModel): training_steps: OptionalNullable[int] = UNSET r"""The number of training steps to perform. A training step refers to a single update of the model weights during the fine-tuning process. This update is typically calculated using a batch of samples from the training dataset.""" - # fmt: off - OBJECT: Annotated[Final[Optional[LegacyJobMetadataOutObject]], pydantic.Field(alias="object")] = "job.metadata" # type: ignore - # fmt: on + OBJECT: Annotated[ + Annotated[ + Optional[LegacyJobMetadataOutObject], + AfterValidator(validate_const("job.metadata")), + ], + pydantic.Field(alias="object"), + ] = "job.metadata" @model_serializer(mode="wrap") def serialize_model(self, handler): diff --git a/src/mistralai/models/listfilesout.py b/src/mistralai/models/listfilesout.py index 928a7be9..b032f632 100644 --- a/src/mistralai/models/listfilesout.py +++ b/src/mistralai/models/listfilesout.py @@ -3,15 +3,19 @@ from __future__ import annotations from .fileschema import FileSchema, FileSchemaTypedDict from mistralai.types import BaseModel -from typing import List, TypedDict +from typing import List +from typing_extensions import TypedDict class ListFilesOutTypedDict(TypedDict): data: List[FileSchemaTypedDict] object: str + total: int class ListFilesOut(BaseModel): data: List[FileSchema] object: str + + total: int diff --git a/src/mistralai/models/metricout.py b/src/mistralai/models/metricout.py index 99fe9fb0..7583d927 100644 --- a/src/mistralai/models/metricout.py +++ b/src/mistralai/models/metricout.py @@ -3,8 +3,7 @@ from __future__ import annotations from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL from pydantic import model_serializer -from typing import TypedDict -from typing_extensions import NotRequired +from typing_extensions import NotRequired, TypedDict class MetricOutTypedDict(TypedDict): diff --git a/src/mistralai/models/modelcapabilities.py b/src/mistralai/models/modelcapabilities.py index af981cc9..961f8664 100644 --- a/src/mistralai/models/modelcapabilities.py +++ b/src/mistralai/models/modelcapabilities.py @@ -2,8 +2,8 @@ from __future__ import annotations from mistralai.types import BaseModel -from typing import Optional, TypedDict -from typing_extensions import NotRequired +from typing import Optional +from typing_extensions import NotRequired, TypedDict class ModelCapabilitiesTypedDict(TypedDict): diff --git a/src/mistralai/models/modellist.py b/src/mistralai/models/modellist.py index 759b9310..97ae4c38 100644 --- a/src/mistralai/models/modellist.py +++ b/src/mistralai/models/modellist.py @@ -6,8 +6,8 @@ from mistralai.types import BaseModel from mistralai.utils import get_discriminator from pydantic import Discriminator, Tag -from typing import List, Optional, TypedDict, Union -from typing_extensions import Annotated, NotRequired +from typing import List, Optional, Union +from typing_extensions import Annotated, NotRequired, TypedDict DataTypedDict = Union[BaseModelCardTypedDict, FTModelCardTypedDict] diff --git a/src/mistralai/models/responseformat.py b/src/mistralai/models/responseformat.py index bf538698..aa60ba5d 100644 --- a/src/mistralai/models/responseformat.py +++ b/src/mistralai/models/responseformat.py @@ -3,8 +3,8 @@ from __future__ import annotations from .responseformats import ResponseFormats from mistralai.types import BaseModel -from typing import Optional, TypedDict -from typing_extensions import NotRequired +from typing import Optional +from typing_extensions import NotRequired, TypedDict class ResponseFormatTypedDict(TypedDict): diff --git a/src/mistralai/models/retrieve_model_v1_models_model_id_getop.py b/src/mistralai/models/retrieve_model_v1_models_model_id_getop.py index 37c52c95..dd4bcccc 100644 --- a/src/mistralai/models/retrieve_model_v1_models_model_id_getop.py +++ b/src/mistralai/models/retrieve_model_v1_models_model_id_getop.py @@ -6,8 +6,8 @@ from mistralai.types import BaseModel from mistralai.utils import FieldMetadata, PathParamMetadata, get_discriminator from pydantic import Discriminator, Tag -from typing import TypedDict, Union -from typing_extensions import Annotated +from typing import Union +from typing_extensions import Annotated, TypedDict class RetrieveModelV1ModelsModelIDGetRequestTypedDict(TypedDict): diff --git a/src/mistralai/models/retrievefileout.py b/src/mistralai/models/retrievefileout.py index 9cc9bb2d..6bf4a5bf 100644 --- a/src/mistralai/models/retrievefileout.py +++ b/src/mistralai/models/retrievefileout.py @@ -1,26 +1,14 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from __future__ import annotations +from .filepurpose import FilePurpose from .sampletype import SampleType from .source import Source -from mistralai.types import ( - BaseModel, - Nullable, - OptionalNullable, - UNSET, - UNSET_SENTINEL, - UnrecognizedStr, -) +from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL from mistralai.utils import validate_open_enum -import pydantic from pydantic import model_serializer from pydantic.functional_validators import PlainValidator -from typing import Final, Literal, TypedDict, Union -from typing_extensions import Annotated, NotRequired - - -RetrieveFileOutPurpose = Union[Literal["fine-tune"], UnrecognizedStr] -r"""The intended purpose of the uploaded file. Only accepts fine-tuning (`fine-tune`) for now.""" +from typing_extensions import Annotated, NotRequired, TypedDict class RetrieveFileOutTypedDict(TypedDict): @@ -34,8 +22,10 @@ class RetrieveFileOutTypedDict(TypedDict): r"""The UNIX timestamp (in seconds) of the event.""" filename: str r"""The name of the uploaded file.""" + purpose: FilePurpose sample_type: SampleType source: Source + deleted: bool num_lines: NotRequired[Nullable[int]] @@ -55,14 +45,13 @@ class RetrieveFileOut(BaseModel): filename: str r"""The name of the uploaded file.""" - sample_type: SampleType + purpose: Annotated[FilePurpose, PlainValidator(validate_open_enum(False))] - source: Source + sample_type: Annotated[SampleType, PlainValidator(validate_open_enum(False))] + + source: Annotated[Source, PlainValidator(validate_open_enum(False))] - # fmt: off - PURPOSE: Annotated[Final[Annotated[RetrieveFileOutPurpose, PlainValidator(validate_open_enum(False))]], pydantic.Field(alias="purpose")] = "fine-tune" # type: ignore - # fmt: on - r"""The intended purpose of the uploaded file. Only accepts fine-tuning (`fine-tune`) for now.""" + deleted: bool num_lines: OptionalNullable[int] = UNSET diff --git a/src/mistralai/models/sampletype.py b/src/mistralai/models/sampletype.py index 83424f3a..adc90ec7 100644 --- a/src/mistralai/models/sampletype.py +++ b/src/mistralai/models/sampletype.py @@ -1,7 +1,11 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from __future__ import annotations -from typing import Literal +from mistralai.types import UnrecognizedStr +from typing import Literal, Union -SampleType = Literal["pretrain", "instruct"] +SampleType = Union[ + Literal["pretrain", "instruct", "batch_request", "batch_result", "batch_error"], + UnrecognizedStr, +] diff --git a/src/mistralai/models/security.py b/src/mistralai/models/security.py index 5bd4c7ed..cf05ba8f 100644 --- a/src/mistralai/models/security.py +++ b/src/mistralai/models/security.py @@ -3,8 +3,8 @@ from __future__ import annotations from mistralai.types import BaseModel from mistralai.utils import FieldMetadata, SecurityMetadata -from typing import Optional, TypedDict -from typing_extensions import Annotated, NotRequired +from typing import Optional +from typing_extensions import Annotated, NotRequired, TypedDict class SecurityTypedDict(TypedDict): diff --git a/src/mistralai/models/source.py b/src/mistralai/models/source.py index 66d09aeb..c21550f2 100644 --- a/src/mistralai/models/source.py +++ b/src/mistralai/models/source.py @@ -1,7 +1,8 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from __future__ import annotations -from typing import Literal +from mistralai.types import UnrecognizedStr +from typing import Literal, Union -Source = Literal["upload", "repository"] +Source = Union[Literal["upload", "repository", "mistral"], UnrecognizedStr] diff --git a/src/mistralai/models/systemmessage.py b/src/mistralai/models/systemmessage.py index 47dc7781..f6f30743 100644 --- a/src/mistralai/models/systemmessage.py +++ b/src/mistralai/models/systemmessage.py @@ -3,25 +3,25 @@ from __future__ import annotations from .textchunk import TextChunk, TextChunkTypedDict from mistralai.types import BaseModel -from typing import List, Literal, Optional, TypedDict, Union -from typing_extensions import NotRequired +from typing import List, Literal, Optional, Union +from typing_extensions import NotRequired, TypedDict -ContentTypedDict = Union[str, List[TextChunkTypedDict]] +SystemMessageContentTypedDict = Union[str, List[TextChunkTypedDict]] -Content = Union[str, List[TextChunk]] +SystemMessageContent = Union[str, List[TextChunk]] Role = Literal["system"] class SystemMessageTypedDict(TypedDict): - content: ContentTypedDict + content: SystemMessageContentTypedDict role: NotRequired[Role] class SystemMessage(BaseModel): - content: Content + content: SystemMessageContent role: Optional[Role] = "system" diff --git a/src/mistralai/models/textchunk.py b/src/mistralai/models/textchunk.py index 9c1f9d7d..130a91c5 100644 --- a/src/mistralai/models/textchunk.py +++ b/src/mistralai/models/textchunk.py @@ -2,9 +2,11 @@ from __future__ import annotations from mistralai.types import BaseModel +from mistralai.utils import validate_const import pydantic -from typing import Final, Literal, Optional, TypedDict -from typing_extensions import Annotated +from pydantic.functional_validators import AfterValidator +from typing import Literal, Optional +from typing_extensions import Annotated, TypedDict TextChunkType = Literal["text"] @@ -12,11 +14,13 @@ class TextChunkTypedDict(TypedDict): text: str + type: TextChunkType class TextChunk(BaseModel): text: str - # fmt: off - TYPE: Annotated[Final[Optional[TextChunkType]], pydantic.Field(alias="type")] = "text" # type: ignore - # fmt: on + TYPE: Annotated[ + Annotated[Optional[TextChunkType], AfterValidator(validate_const("text"))], + pydantic.Field(alias="type"), + ] = "text" diff --git a/src/mistralai/models/tool.py b/src/mistralai/models/tool.py index 51295f39..6e746df3 100644 --- a/src/mistralai/models/tool.py +++ b/src/mistralai/models/tool.py @@ -6,8 +6,8 @@ from mistralai.types import BaseModel from mistralai.utils import validate_open_enum from pydantic.functional_validators import PlainValidator -from typing import Optional, TypedDict -from typing_extensions import Annotated, NotRequired +from typing import Optional +from typing_extensions import Annotated, NotRequired, TypedDict class ToolTypedDict(TypedDict): diff --git a/src/mistralai/models/toolcall.py b/src/mistralai/models/toolcall.py index 66d570e6..827fd00d 100644 --- a/src/mistralai/models/toolcall.py +++ b/src/mistralai/models/toolcall.py @@ -6,8 +6,8 @@ from mistralai.types import BaseModel from mistralai.utils import validate_open_enum from pydantic.functional_validators import PlainValidator -from typing import Optional, TypedDict -from typing_extensions import Annotated, NotRequired +from typing import Optional +from typing_extensions import Annotated, NotRequired, TypedDict class ToolCallTypedDict(TypedDict): diff --git a/src/mistralai/models/toolchoice.py b/src/mistralai/models/toolchoice.py index fc36512a..3b7d60e0 100644 --- a/src/mistralai/models/toolchoice.py +++ b/src/mistralai/models/toolchoice.py @@ -6,8 +6,8 @@ from mistralai.types import BaseModel from mistralai.utils import validate_open_enum from pydantic.functional_validators import PlainValidator -from typing import Optional, TypedDict -from typing_extensions import Annotated, NotRequired +from typing import Optional +from typing_extensions import Annotated, NotRequired, TypedDict class ToolChoiceTypedDict(TypedDict): diff --git a/src/mistralai/models/toolmessage.py b/src/mistralai/models/toolmessage.py index 3c4be635..2d469d09 100644 --- a/src/mistralai/models/toolmessage.py +++ b/src/mistralai/models/toolmessage.py @@ -3,8 +3,8 @@ from __future__ import annotations from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL from pydantic import model_serializer -from typing import Literal, Optional, TypedDict -from typing_extensions import NotRequired +from typing import Literal, Optional +from typing_extensions import NotRequired, TypedDict ToolMessageRole = Literal["tool"] diff --git a/src/mistralai/models/trainingfile.py b/src/mistralai/models/trainingfile.py index 1917d377..99bd49dd 100644 --- a/src/mistralai/models/trainingfile.py +++ b/src/mistralai/models/trainingfile.py @@ -2,8 +2,8 @@ from __future__ import annotations from mistralai.types import BaseModel -from typing import Optional, TypedDict -from typing_extensions import NotRequired +from typing import Optional +from typing_extensions import NotRequired, TypedDict class TrainingFileTypedDict(TypedDict): diff --git a/src/mistralai/models/trainingparameters.py b/src/mistralai/models/trainingparameters.py index 885f3ff5..cc2b037a 100644 --- a/src/mistralai/models/trainingparameters.py +++ b/src/mistralai/models/trainingparameters.py @@ -3,8 +3,8 @@ from __future__ import annotations from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL from pydantic import model_serializer -from typing import Optional, TypedDict -from typing_extensions import NotRequired +from typing import Optional +from typing_extensions import NotRequired, TypedDict class TrainingParametersTypedDict(TypedDict): @@ -14,6 +14,7 @@ class TrainingParametersTypedDict(TypedDict): warmup_fraction: NotRequired[Nullable[float]] epochs: NotRequired[Nullable[float]] fim_ratio: NotRequired[Nullable[float]] + seq_len: NotRequired[Nullable[int]] class TrainingParameters(BaseModel): @@ -29,6 +30,8 @@ class TrainingParameters(BaseModel): fim_ratio: OptionalNullable[float] = UNSET + seq_len: OptionalNullable[int] = UNSET + @model_serializer(mode="wrap") def serialize_model(self, handler): optional_fields = [ @@ -38,6 +41,7 @@ def serialize_model(self, handler): "warmup_fraction", "epochs", "fim_ratio", + "seq_len", ] nullable_fields = [ "training_steps", @@ -45,6 +49,7 @@ def serialize_model(self, handler): "warmup_fraction", "epochs", "fim_ratio", + "seq_len", ] null_default_fields = [] diff --git a/src/mistralai/models/trainingparametersin.py b/src/mistralai/models/trainingparametersin.py index 8ecb027b..7d2e414b 100644 --- a/src/mistralai/models/trainingparametersin.py +++ b/src/mistralai/models/trainingparametersin.py @@ -3,8 +3,8 @@ from __future__ import annotations from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL from pydantic import model_serializer -from typing import Optional, TypedDict -from typing_extensions import NotRequired +from typing import Optional +from typing_extensions import NotRequired, TypedDict class TrainingParametersInTypedDict(TypedDict): @@ -20,6 +20,7 @@ class TrainingParametersInTypedDict(TypedDict): r"""(Advanced Usage) A parameter that specifies the percentage of the total training steps at which the learning rate warm-up phase ends. During this phase, the learning rate gradually increases from a small value to the initial learning rate, helping to stabilize the training process and improve convergence. Similar to `pct_start` in [mistral-finetune](https://github.com/mistralai/mistral-finetune)""" epochs: NotRequired[Nullable[float]] fim_ratio: NotRequired[Nullable[float]] + seq_len: NotRequired[Nullable[int]] class TrainingParametersIn(BaseModel): @@ -41,6 +42,8 @@ class TrainingParametersIn(BaseModel): fim_ratio: OptionalNullable[float] = UNSET + seq_len: OptionalNullable[int] = UNSET + @model_serializer(mode="wrap") def serialize_model(self, handler): optional_fields = [ @@ -50,6 +53,7 @@ def serialize_model(self, handler): "warmup_fraction", "epochs", "fim_ratio", + "seq_len", ] nullable_fields = [ "training_steps", @@ -57,6 +61,7 @@ def serialize_model(self, handler): "warmup_fraction", "epochs", "fim_ratio", + "seq_len", ] null_default_fields = [] diff --git a/src/mistralai/models/unarchiveftmodelout.py b/src/mistralai/models/unarchiveftmodelout.py index 6eac8200..6540df1f 100644 --- a/src/mistralai/models/unarchiveftmodelout.py +++ b/src/mistralai/models/unarchiveftmodelout.py @@ -2,9 +2,11 @@ from __future__ import annotations from mistralai.types import BaseModel +from mistralai.utils import validate_const import pydantic -from typing import Final, Literal, Optional, TypedDict -from typing_extensions import Annotated, NotRequired +from pydantic.functional_validators import AfterValidator +from typing import Literal, Optional +from typing_extensions import Annotated, NotRequired, TypedDict UnarchiveFTModelOutObject = Literal["model"] @@ -12,14 +14,18 @@ class UnarchiveFTModelOutTypedDict(TypedDict): id: str + object: UnarchiveFTModelOutObject archived: NotRequired[bool] class UnarchiveFTModelOut(BaseModel): id: str - # fmt: off - OBJECT: Annotated[Final[Optional[UnarchiveFTModelOutObject]], pydantic.Field(alias="object")] = "model" # type: ignore - # fmt: on + OBJECT: Annotated[ + Annotated[ + Optional[UnarchiveFTModelOutObject], AfterValidator(validate_const("model")) + ], + pydantic.Field(alias="object"), + ] = "model" archived: Optional[bool] = False diff --git a/src/mistralai/models/updateftmodelin.py b/src/mistralai/models/updateftmodelin.py index c22c5115..603f031c 100644 --- a/src/mistralai/models/updateftmodelin.py +++ b/src/mistralai/models/updateftmodelin.py @@ -3,8 +3,7 @@ from __future__ import annotations from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL from pydantic import model_serializer -from typing import TypedDict -from typing_extensions import NotRequired +from typing_extensions import NotRequired, TypedDict class UpdateFTModelInTypedDict(TypedDict): diff --git a/src/mistralai/models/uploadfileout.py b/src/mistralai/models/uploadfileout.py index 7754ae3d..23e25d5b 100644 --- a/src/mistralai/models/uploadfileout.py +++ b/src/mistralai/models/uploadfileout.py @@ -1,26 +1,14 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from __future__ import annotations +from .filepurpose import FilePurpose from .sampletype import SampleType from .source import Source -from mistralai.types import ( - BaseModel, - Nullable, - OptionalNullable, - UNSET, - UNSET_SENTINEL, - UnrecognizedStr, -) +from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL from mistralai.utils import validate_open_enum -import pydantic from pydantic import model_serializer from pydantic.functional_validators import PlainValidator -from typing import Final, Literal, TypedDict, Union -from typing_extensions import Annotated, NotRequired - - -Purpose = Union[Literal["fine-tune"], UnrecognizedStr] -r"""The intended purpose of the uploaded file. Only accepts fine-tuning (`fine-tune`) for now.""" +from typing_extensions import Annotated, NotRequired, TypedDict class UploadFileOutTypedDict(TypedDict): @@ -34,6 +22,7 @@ class UploadFileOutTypedDict(TypedDict): r"""The UNIX timestamp (in seconds) of the event.""" filename: str r"""The name of the uploaded file.""" + purpose: FilePurpose sample_type: SampleType source: Source num_lines: NotRequired[Nullable[int]] @@ -55,14 +44,11 @@ class UploadFileOut(BaseModel): filename: str r"""The name of the uploaded file.""" - sample_type: SampleType + purpose: Annotated[FilePurpose, PlainValidator(validate_open_enum(False))] - source: Source + sample_type: Annotated[SampleType, PlainValidator(validate_open_enum(False))] - # fmt: off - PURPOSE: Annotated[Final[Annotated[Purpose, PlainValidator(validate_open_enum(False))]], pydantic.Field(alias="purpose")] = "fine-tune" # type: ignore - # fmt: on - r"""The intended purpose of the uploaded file. Only accepts fine-tuning (`fine-tune`) for now.""" + source: Annotated[Source, PlainValidator(validate_open_enum(False))] num_lines: OptionalNullable[int] = UNSET diff --git a/src/mistralai/models/usageinfo.py b/src/mistralai/models/usageinfo.py index e8113e3b..f7a6e99e 100644 --- a/src/mistralai/models/usageinfo.py +++ b/src/mistralai/models/usageinfo.py @@ -2,7 +2,7 @@ from __future__ import annotations from mistralai.types import BaseModel -from typing import TypedDict +from typing_extensions import TypedDict class UsageInfoTypedDict(TypedDict): diff --git a/src/mistralai/models/usermessage.py b/src/mistralai/models/usermessage.py index db4176ad..af698955 100644 --- a/src/mistralai/models/usermessage.py +++ b/src/mistralai/models/usermessage.py @@ -2,9 +2,10 @@ from __future__ import annotations from .contentchunk import ContentChunk, ContentChunkTypedDict -from mistralai.types import BaseModel -from typing import List, Literal, Optional, TypedDict, Union -from typing_extensions import NotRequired +from mistralai.types import BaseModel, Nullable, UNSET_SENTINEL +from pydantic import model_serializer +from typing import List, Literal, Optional, Union +from typing_extensions import NotRequired, TypedDict UserMessageContentTypedDict = Union[str, List[ContentChunkTypedDict]] @@ -17,11 +18,41 @@ class UserMessageTypedDict(TypedDict): - content: UserMessageContentTypedDict + content: Nullable[UserMessageContentTypedDict] role: NotRequired[UserMessageRole] class UserMessage(BaseModel): - content: UserMessageContent + content: Nullable[UserMessageContent] role: Optional[UserMessageRole] = "user" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["role"] + nullable_fields = ["content"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in self.model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/models/validationerror.py b/src/mistralai/models/validationerror.py index ed394a60..03ce9845 100644 --- a/src/mistralai/models/validationerror.py +++ b/src/mistralai/models/validationerror.py @@ -2,7 +2,8 @@ from __future__ import annotations from mistralai.types import BaseModel -from typing import List, TypedDict, Union +from typing import List, Union +from typing_extensions import TypedDict LocTypedDict = Union[str, int] diff --git a/src/mistralai/models/wandbintegration.py b/src/mistralai/models/wandbintegration.py index 7659e274..d82f921a 100644 --- a/src/mistralai/models/wandbintegration.py +++ b/src/mistralai/models/wandbintegration.py @@ -2,10 +2,12 @@ from __future__ import annotations from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +from mistralai.utils import validate_const import pydantic from pydantic import model_serializer -from typing import Final, Literal, Optional, TypedDict -from typing_extensions import Annotated, NotRequired +from pydantic.functional_validators import AfterValidator +from typing import Literal, Optional +from typing_extensions import Annotated, NotRequired, TypedDict WandbIntegrationType = Literal["wandb"] @@ -16,6 +18,7 @@ class WandbIntegrationTypedDict(TypedDict): r"""The name of the project that the new run will be created under.""" api_key: str r"""The WandB API key to use for authentication.""" + type: WandbIntegrationType name: NotRequired[Nullable[str]] r"""A display name to set for the run. If not set, will use the job ID as the name.""" run_name: NotRequired[Nullable[str]] @@ -28,9 +31,12 @@ class WandbIntegration(BaseModel): api_key: str r"""The WandB API key to use for authentication.""" - # fmt: off - TYPE: Annotated[Final[Optional[WandbIntegrationType]], pydantic.Field(alias="type")] = "wandb" # type: ignore - # fmt: on + TYPE: Annotated[ + Annotated[ + Optional[WandbIntegrationType], AfterValidator(validate_const("wandb")) + ], + pydantic.Field(alias="type"), + ] = "wandb" name: OptionalNullable[str] = UNSET r"""A display name to set for the run. If not set, will use the job ID as the name.""" diff --git a/src/mistralai/models/wandbintegrationout.py b/src/mistralai/models/wandbintegrationout.py index 5635af79..5514b595 100644 --- a/src/mistralai/models/wandbintegrationout.py +++ b/src/mistralai/models/wandbintegrationout.py @@ -2,18 +2,21 @@ from __future__ import annotations from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +from mistralai.utils import validate_const import pydantic from pydantic import model_serializer -from typing import Final, Literal, Optional, TypedDict -from typing_extensions import Annotated, NotRequired +from pydantic.functional_validators import AfterValidator +from typing import Literal, Optional +from typing_extensions import Annotated, NotRequired, TypedDict -Type = Literal["wandb"] +WandbIntegrationOutType = Literal["wandb"] class WandbIntegrationOutTypedDict(TypedDict): project: str r"""The name of the project that the new run will be created under.""" + type: WandbIntegrationOutType name: NotRequired[Nullable[str]] r"""A display name to set for the run. If not set, will use the job ID as the name.""" run_name: NotRequired[Nullable[str]] @@ -23,9 +26,12 @@ class WandbIntegrationOut(BaseModel): project: str r"""The name of the project that the new run will be created under.""" - # fmt: off - TYPE: Annotated[Final[Optional[Type]], pydantic.Field(alias="type")] = "wandb" # type: ignore - # fmt: on + TYPE: Annotated[ + Annotated[ + Optional[WandbIntegrationOutType], AfterValidator(validate_const("wandb")) + ], + pydantic.Field(alias="type"), + ] = "wandb" name: OptionalNullable[str] = UNSET r"""A display name to set for the run. If not set, will use the job ID as the name.""" diff --git a/src/mistralai/models_.py b/src/mistralai/models_.py index 32fdcbce..44e95ce3 100644 --- a/src/mistralai/models_.py +++ b/src/mistralai/models_.py @@ -76,15 +76,17 @@ def list( data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) raise models.HTTPValidationError(data=data) if utils.match_response(http_res, ["4XX", "5XX"], "*"): + http_res_text = utils.stream_to_text(http_res) raise models.SDKError( - "API error occurred", http_res.status_code, http_res.text, http_res + "API error occurred", http_res.status_code, http_res_text, http_res ) content_type = http_res.headers.get("Content-Type") + http_res_text = utils.stream_to_text(http_res) raise models.SDKError( f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, - http_res.text, + http_res_text, http_res, ) @@ -153,15 +155,17 @@ async def list_async( data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) raise models.HTTPValidationError(data=data) if utils.match_response(http_res, ["4XX", "5XX"], "*"): + http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( - "API error occurred", http_res.status_code, http_res.text, http_res + "API error occurred", http_res.status_code, http_res_text, http_res ) content_type = http_res.headers.get("Content-Type") + http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, - http_res.text, + http_res_text, http_res, ) @@ -244,15 +248,17 @@ def retrieve( data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) raise models.HTTPValidationError(data=data) if utils.match_response(http_res, ["4XX", "5XX"], "*"): + http_res_text = utils.stream_to_text(http_res) raise models.SDKError( - "API error occurred", http_res.status_code, http_res.text, http_res + "API error occurred", http_res.status_code, http_res_text, http_res ) content_type = http_res.headers.get("Content-Type") + http_res_text = utils.stream_to_text(http_res) raise models.SDKError( f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, - http_res.text, + http_res_text, http_res, ) @@ -335,15 +341,17 @@ async def retrieve_async( data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) raise models.HTTPValidationError(data=data) if utils.match_response(http_res, ["4XX", "5XX"], "*"): + http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( - "API error occurred", http_res.status_code, http_res.text, http_res + "API error occurred", http_res.status_code, http_res_text, http_res ) content_type = http_res.headers.get("Content-Type") + http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, - http_res.text, + http_res_text, http_res, ) @@ -419,15 +427,17 @@ def delete( data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) raise models.HTTPValidationError(data=data) if utils.match_response(http_res, ["4XX", "5XX"], "*"): + http_res_text = utils.stream_to_text(http_res) raise models.SDKError( - "API error occurred", http_res.status_code, http_res.text, http_res + "API error occurred", http_res.status_code, http_res_text, http_res ) content_type = http_res.headers.get("Content-Type") + http_res_text = utils.stream_to_text(http_res) raise models.SDKError( f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, - http_res.text, + http_res_text, http_res, ) @@ -503,15 +513,17 @@ async def delete_async( data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) raise models.HTTPValidationError(data=data) if utils.match_response(http_res, ["4XX", "5XX"], "*"): + http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( - "API error occurred", http_res.status_code, http_res.text, http_res + "API error occurred", http_res.status_code, http_res_text, http_res ) content_type = http_res.headers.get("Content-Type") + http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, - http_res.text, + http_res_text, http_res, ) @@ -594,15 +606,17 @@ def update( if utils.match_response(http_res, "200", "application/json"): return utils.unmarshal_json(http_res.text, Optional[models.FTModelOut]) if utils.match_response(http_res, ["4XX", "5XX"], "*"): + http_res_text = utils.stream_to_text(http_res) raise models.SDKError( - "API error occurred", http_res.status_code, http_res.text, http_res + "API error occurred", http_res.status_code, http_res_text, http_res ) content_type = http_res.headers.get("Content-Type") + http_res_text = utils.stream_to_text(http_res) raise models.SDKError( f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, - http_res.text, + http_res_text, http_res, ) @@ -685,15 +699,17 @@ async def update_async( if utils.match_response(http_res, "200", "application/json"): return utils.unmarshal_json(http_res.text, Optional[models.FTModelOut]) if utils.match_response(http_res, ["4XX", "5XX"], "*"): + http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( - "API error occurred", http_res.status_code, http_res.text, http_res + "API error occurred", http_res.status_code, http_res_text, http_res ) content_type = http_res.headers.get("Content-Type") + http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, - http_res.text, + http_res_text, http_res, ) @@ -767,15 +783,17 @@ def archive( http_res.text, Optional[models.ArchiveFTModelOut] ) if utils.match_response(http_res, ["4XX", "5XX"], "*"): + http_res_text = utils.stream_to_text(http_res) raise models.SDKError( - "API error occurred", http_res.status_code, http_res.text, http_res + "API error occurred", http_res.status_code, http_res_text, http_res ) content_type = http_res.headers.get("Content-Type") + http_res_text = utils.stream_to_text(http_res) raise models.SDKError( f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, - http_res.text, + http_res_text, http_res, ) @@ -849,15 +867,17 @@ async def archive_async( http_res.text, Optional[models.ArchiveFTModelOut] ) if utils.match_response(http_res, ["4XX", "5XX"], "*"): + http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( - "API error occurred", http_res.status_code, http_res.text, http_res + "API error occurred", http_res.status_code, http_res_text, http_res ) content_type = http_res.headers.get("Content-Type") + http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, - http_res.text, + http_res_text, http_res, ) @@ -931,15 +951,17 @@ def unarchive( http_res.text, Optional[models.UnarchiveFTModelOut] ) if utils.match_response(http_res, ["4XX", "5XX"], "*"): + http_res_text = utils.stream_to_text(http_res) raise models.SDKError( - "API error occurred", http_res.status_code, http_res.text, http_res + "API error occurred", http_res.status_code, http_res_text, http_res ) content_type = http_res.headers.get("Content-Type") + http_res_text = utils.stream_to_text(http_res) raise models.SDKError( f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, - http_res.text, + http_res_text, http_res, ) @@ -1013,14 +1035,16 @@ async def unarchive_async( http_res.text, Optional[models.UnarchiveFTModelOut] ) if utils.match_response(http_res, ["4XX", "5XX"], "*"): + http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( - "API error occurred", http_res.status_code, http_res.text, http_res + "API error occurred", http_res.status_code, http_res_text, http_res ) content_type = http_res.headers.get("Content-Type") + http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", http_res.status_code, - http_res.text, + http_res_text, http_res, ) diff --git a/src/mistralai/sdk.py b/src/mistralai/sdk.py index 05029abb..71c60fcf 100644 --- a/src/mistralai/sdk.py +++ b/src/mistralai/sdk.py @@ -9,7 +9,9 @@ from mistralai import models, utils from mistralai._hooks import SDKHooks from mistralai.agents import Agents +from mistralai.batch import Batch from mistralai.chat import Chat +from mistralai.classifiers import Classifiers from mistralai.embeddings import Embeddings from mistralai.files import Files from mistralai.fim import Fim @@ -27,6 +29,7 @@ class Mistral(BaseSDK): files: Files r"""Files API""" fine_tuning: FineTuning + batch: Batch chat: Chat r"""Chat Completion API.""" fim: Fim @@ -35,6 +38,8 @@ class Mistral(BaseSDK): r"""Agents API.""" embeddings: Embeddings r"""Embeddings API.""" + classifiers: Classifiers + r"""Classifiers API.""" def __init__( self, @@ -118,7 +123,9 @@ def _init_sdks(self): self.models = Models(self.sdk_configuration) self.files = Files(self.sdk_configuration) self.fine_tuning = FineTuning(self.sdk_configuration) + self.batch = Batch(self.sdk_configuration) self.chat = Chat(self.sdk_configuration) self.fim = Fim(self.sdk_configuration) self.agents = Agents(self.sdk_configuration) self.embeddings = Embeddings(self.sdk_configuration) + self.classifiers = Classifiers(self.sdk_configuration) diff --git a/src/mistralai/sdkconfiguration.py b/src/mistralai/sdkconfiguration.py index 0a7c3322..101757f7 100644 --- a/src/mistralai/sdkconfiguration.py +++ b/src/mistralai/sdkconfiguration.py @@ -10,10 +10,10 @@ from typing import Callable, Dict, Optional, Tuple, Union -SERVER_PROD = "prod" -r"""Production server""" +SERVER_EU = "eu" +r"""EU Production server""" SERVERS = { - SERVER_PROD: "https://api.mistral.ai", + SERVER_EU: "https://api.mistral.ai", } """Contains the list of servers available to the SDK""" @@ -28,9 +28,9 @@ class SDKConfiguration: server: Optional[str] = "" language: str = "python" openapi_doc_version: str = "0.0.2" - sdk_version: str = "1.1.0" - gen_version: str = "2.415.8" - user_agent: str = "speakeasy-sdk/python 1.1.0 2.415.8 0.0.2 mistralai" + sdk_version: str = "1.2.0" + gen_version: str = "2.452.0" + user_agent: str = "speakeasy-sdk/python 1.2.0 2.452.0 0.0.2 mistralai" retry_config: OptionalNullable[RetryConfig] = Field(default_factory=lambda: UNSET) timeout_ms: Optional[int] = None @@ -41,7 +41,7 @@ def get_server_details(self) -> Tuple[str, Dict[str, str]]: if self.server_url is not None and self.server_url: return remove_suffix(self.server_url, "/"), {} if not self.server: - self.server = SERVER_PROD + self.server = SERVER_EU if self.server not in SERVERS: raise ValueError(f'Invalid server "{self.server}"') diff --git a/src/mistralai/utils/__init__.py b/src/mistralai/utils/__init__.py index 74109c11..151c87d4 100644 --- a/src/mistralai/utils/__init__.py +++ b/src/mistralai/utils/__init__.py @@ -28,6 +28,10 @@ serialize_float, serialize_int, stream_to_text, + stream_to_text_async, + stream_to_bytes, + stream_to_bytes_async, + validate_const, validate_decimal, validate_float, validate_int, @@ -81,10 +85,14 @@ "serialize_request_body", "SerializedRequestBody", "stream_to_text", + "stream_to_text_async", + "stream_to_bytes", + "stream_to_bytes_async", "template_url", "unmarshal", "unmarshal_json", "validate_decimal", + "validate_const", "validate_float", "validate_int", "validate_open_enum", diff --git a/src/mistralai/utils/annotations.py b/src/mistralai/utils/annotations.py index 0d17472b..5b3bbb02 100644 --- a/src/mistralai/utils/annotations.py +++ b/src/mistralai/utils/annotations.py @@ -1,5 +1,6 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +from enum import Enum from typing import Any def get_discriminator(model: Any, fieldname: str, key: str) -> str: @@ -10,10 +11,20 @@ def get_discriminator(model: Any, fieldname: str, key: str) -> str: raise ValueError(f'Could not find discriminator key {key} in {model}') from e if hasattr(model, fieldname): - return f'{getattr(model, fieldname)}' + attr = getattr(model, fieldname) + + if isinstance(attr, Enum): + return f'{attr.value}' + + return f'{attr}' fieldname = fieldname.upper() if hasattr(model, fieldname): - return f'{getattr(model, fieldname)}' + attr = getattr(model, fieldname) + + if isinstance(attr, Enum): + return f'{attr.value}' + + return f'{attr}' raise ValueError(f'Could not find discriminator field {fieldname} in {model}') diff --git a/src/mistralai/utils/serializers.py b/src/mistralai/utils/serializers.py index 85d57f43..c5eb3659 100644 --- a/src/mistralai/utils/serializers.py +++ b/src/mistralai/utils/serializers.py @@ -116,6 +116,19 @@ def validate(e): return validate +def validate_const(v): + def validate(c): + if is_optional_type(type(c)) and c is None: + return None + + if v != c: + raise ValueError(f"Expected {v}") + + return c + + return validate + + def unmarshal_json(raw, typ: Any) -> Any: return unmarshal(from_json(raw), typ) @@ -172,6 +185,18 @@ def stream_to_text(stream: httpx.Response) -> str: return "".join(stream.iter_text()) +async def stream_to_text_async(stream: httpx.Response) -> str: + return "".join([chunk async for chunk in stream.aiter_text()]) + + +def stream_to_bytes(stream: httpx.Response) -> bytes: + return stream.content + + +async def stream_to_bytes_async(stream: httpx.Response) -> bytes: + return await stream.aread() + + def get_pydantic_model(data: Any, typ: Any) -> Any: if not _contains_pydantic_model(data): return unmarshal(data, typ) From a53f0d17a82b903faa11f5bfc76c3ad629f89f35 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Fri, 8 Nov 2024 15:13:27 +0100 Subject: [PATCH 080/223] =?UTF-8?q?chore:=20=F0=9F=90=9D=20Update=20SDK=20?= =?UTF-8?q?-=20Generate=20MISTRALAI=20MISTRALAI-SDK=201.2.1=20(#154)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * ci: regenerated with OpenAPI Doc , Speakeasy CLI 1.434.7 * update the gcp name logic --------- Co-authored-by: speakeasybot Co-authored-by: gaspardBT --- .speakeasy/gen.lock | 6 ++-- .speakeasy/gen.yaml | 2 +- .speakeasy/workflow.lock | 5 +-- RELEASES.md | 12 ++++++- .../mistralai_gcp/src/mistralai_gcp/sdk.py | 31 ++++++++++++------- pyproject.toml | 2 +- src/mistralai/_version.py | 2 +- src/mistralai/sdkconfiguration.py | 4 +-- 8 files changed, 42 insertions(+), 22 deletions(-) diff --git a/.speakeasy/gen.lock b/.speakeasy/gen.lock index 513be40d..6ad8a4b8 100644 --- a/.speakeasy/gen.lock +++ b/.speakeasy/gen.lock @@ -3,10 +3,10 @@ id: 2d045ec7-2ebb-4f4d-ad25-40953b132161 management: docChecksum: ee15d853ecc28d415d6b33191893a6ff docVersion: 0.0.2 - speakeasyVersion: 1.434.4 + speakeasyVersion: 1.434.7 generationVersion: 2.452.0 - releaseVersion: 1.2.0 - configChecksum: 17ae764aa509274d1cf2d75af5bf6abb + releaseVersion: 1.2.1 + configChecksum: 5d659f51b118508e47958545e6b539d2 repoURL: https://github.com/mistralai/client-python.git installationURL: https://github.com/mistralai/client-python.git published: true diff --git a/.speakeasy/gen.yaml b/.speakeasy/gen.yaml index 5a4f1a05..501ac536 100644 --- a/.speakeasy/gen.yaml +++ b/.speakeasy/gen.yaml @@ -13,7 +13,7 @@ generation: oAuth2ClientCredentialsEnabled: true oAuth2PasswordEnabled: false python: - version: 1.2.0 + version: 1.2.1 additionalDependencies: dev: pytest: ^8.2.2 diff --git a/.speakeasy/workflow.lock b/.speakeasy/workflow.lock index 46a7d2e7..5e498aa3 100644 --- a/.speakeasy/workflow.lock +++ b/.speakeasy/workflow.lock @@ -1,4 +1,4 @@ -speakeasyVersion: 1.434.4 +speakeasyVersion: 1.434.7 sources: mistral-azure-source: sourceNamespace: mistral-openapi-azure @@ -18,6 +18,7 @@ sources: sourceBlobDigest: sha256:559403eaaa97c021eaf0022adddb1066694d879a946c87057e942806d5a2a2a2 tags: - latest + - main targets: mistralai-azure-sdk: source: mistral-azure-source @@ -35,7 +36,7 @@ targets: sourceRevisionDigest: sha256:e658442ebfc83351cbb7873fb17b03f07ff9edebd8eddfce5577e2c5c7bfafce sourceBlobDigest: sha256:559403eaaa97c021eaf0022adddb1066694d879a946c87057e942806d5a2a2a2 codeSamplesNamespace: mistral-openapi-code-samples - codeSamplesRevisionDigest: sha256:e56faedc510d1c011d19e5fbbaa9d41917ffd6c22833b0795a61aa6da1cbca9b + codeSamplesRevisionDigest: sha256:f48900efe513aa95888e4035452ba1d54a2b1f8e872454ccb5bdca24d4fc7b09 workflow: workflowVersion: 1.0.0 speakeasyVersion: latest diff --git a/RELEASES.md b/RELEASES.md index b504c7f8..6ffcd17d 100644 --- a/RELEASES.md +++ b/RELEASES.md @@ -58,4 +58,14 @@ Based on: ### Generated - [python v1.2.0] . ### Releases -- [PyPI v1.2.0] https://pypi.org/project/mistralai/1.2.0 - . \ No newline at end of file +- [PyPI v1.2.0] https://pypi.org/project/mistralai/1.2.0 - . + +## 2024-11-08 13:41:24 +### Changes +Based on: +- OpenAPI Doc +- Speakeasy CLI 1.434.7 (2.452.0) https://github.com/speakeasy-api/speakeasy +### Generated +- [python v1.2.1] . +### Releases +- [PyPI v1.2.1] https://pypi.org/project/mistralai/1.2.1 - . \ No newline at end of file diff --git a/packages/mistralai_gcp/src/mistralai_gcp/sdk.py b/packages/mistralai_gcp/src/mistralai_gcp/sdk.py index bb4c1dea..7e7adbdc 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/sdk.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/sdk.py @@ -1,7 +1,7 @@ """Code generated by Speakeasy (https://speakeasyapi.dev). DO NOT EDIT.""" import json -from typing import Optional, Union +from typing import Optional, Tuple, Union import google.auth import google.auth.credentials @@ -20,6 +20,19 @@ from .utils.logger import Logger, NoOpLogger from .utils.retries import RetryConfig +LEGACY_MODEL_ID_FORMAT = { + "codestral-2405": "codestral@2405", + "mistral-large-2407": "mistral-large@2407", + "mistral-nemo-2407": "mistral-nemo@2407", +} + +def get_model_info(model: str) -> Tuple[str,str]: + # if the model requiers the legacy fomat, use it, else do nothing. + model_id = LEGACY_MODEL_ID_FORMAT.get(model, model) + model = "-".join(model.split("-")[:-1]) + return model, model_id + + class MistralGoogleCloud(BaseSDK): r"""Mistral AI API: Our Chat Completion and Embeddings APIs specification. Create your account on [La Plateforme](https://console.mistral.ai) to get access and read the [docs](https://docs.mistral.ai) to learn how to use it.""" @@ -140,28 +153,24 @@ def __init__(self, region: str, project_id: str): def before_request( self, hook_ctx, request: httpx.Request ) -> Union[httpx.Request, Exception]: - # The goal of this function is to template in the region, project, model, and model_version into the URL path + # The goal of this function is to template in the region, project and model into the URL path # We do this here so that the API remains more user-friendly - model = None - model_version = None + model_id = None new_content = None if request.content: parsed = json.loads(request.content.decode("utf-8")) model_raw = parsed.get("model") - model = "-".join(model_raw.split("-")[:-1]) - model_version = model_raw.split("-")[-1] - parsed["model"] = model + model_name, model_id = get_model_info(model_raw) + parsed["model"] = model_name new_content = json.dumps(parsed).encode("utf-8") - if model == "": + if model_id == "": raise models.SDKError("model must be provided") - if model_version is None: - raise models.SDKError("model_version must be provided") stream = "streamRawPredict" in request.url.path specifier = "streamRawPredict" if stream else "rawPredict" - url = f"/v1/projects/{self.project_id}/locations/{self.region}/publishers/mistralai/models/{model}@{model_version}:{specifier}" + url = f"/v1/projects/{self.project_id}/locations/{self.region}/publishers/mistralai/models/{model_id}:{specifier}" headers = dict(request.headers) # Delete content-length header as it will need to be recalculated diff --git a/pyproject.toml b/pyproject.toml index 31aea5d6..a239400b 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "mistralai" -version = "1.2.0" +version = "1.2.1" description = "Python Client SDK for the Mistral AI API." authors = ["Mistral"] readme = "README-PYPI.md" diff --git a/src/mistralai/_version.py b/src/mistralai/_version.py index 752c9ed6..b7e6d2fe 100644 --- a/src/mistralai/_version.py +++ b/src/mistralai/_version.py @@ -3,7 +3,7 @@ import importlib.metadata __title__: str = "mistralai" -__version__: str = "1.2.0" +__version__: str = "1.2.1" try: if __package__ is not None: diff --git a/src/mistralai/sdkconfiguration.py b/src/mistralai/sdkconfiguration.py index 101757f7..cc20551d 100644 --- a/src/mistralai/sdkconfiguration.py +++ b/src/mistralai/sdkconfiguration.py @@ -28,9 +28,9 @@ class SDKConfiguration: server: Optional[str] = "" language: str = "python" openapi_doc_version: str = "0.0.2" - sdk_version: str = "1.2.0" + sdk_version: str = "1.2.1" gen_version: str = "2.452.0" - user_agent: str = "speakeasy-sdk/python 1.2.0 2.452.0 0.0.2 mistralai" + user_agent: str = "speakeasy-sdk/python 1.2.1 2.452.0 0.0.2 mistralai" retry_config: OptionalNullable[RetryConfig] = Field(default_factory=lambda: UNSET) timeout_ms: Optional[int] = None From 9a64ab6c53004aa725f6d509c1dd904bad8af90b Mon Sep 17 00:00:00 2001 From: Ryan Albert <42415738+ryan-timothy-albert@users.noreply.github.com> Date: Fri, 8 Nov 2024 09:43:26 -0800 Subject: [PATCH 081/223] feat: don't source push to registry (#156) --- .speakeasy/workflow.yaml | 6 ------ 1 file changed, 6 deletions(-) diff --git a/.speakeasy/workflow.yaml b/.speakeasy/workflow.yaml index 164d3995..3b3c6d55 100644 --- a/.speakeasy/workflow.yaml +++ b/.speakeasy/workflow.yaml @@ -4,18 +4,12 @@ sources: mistral-azure-source: inputs: - location: registry.speakeasyapi.dev/mistral-dev/mistral-dev/mistral-openapi-azure:main - registry: - location: registry.speakeasyapi.dev/mistral-dev/mistral-dev/mistral-openapi-azure:main mistral-google-cloud-source: inputs: - location: registry.speakeasyapi.dev/mistral-dev/mistral-dev/mistral-openapi-google-cloud:main - registry: - location: registry.speakeasyapi.dev/mistral-dev/mistral-dev/mistral-openapi-google-cloud:main mistral-openapi: inputs: - location: registry.speakeasyapi.dev/mistral-dev/mistral-dev/mistral-openapi:main - registry: - location: registry.speakeasyapi.dev/mistral-dev/mistral-dev/mistral-openapi:main targets: mistralai-azure-sdk: target: python From ab110099c955c8b78670149fde7a57b2d61edaa4 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Tue, 12 Nov 2024 19:27:39 +0100 Subject: [PATCH 082/223] =?UTF-8?q?chore:=20=F0=9F=90=9D=20Update=20SDK=20?= =?UTF-8?q?-=20Generate=20MISTRALAI=20MISTRALAI-SDK=201.2.2=20(#155)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * ci: regenerated with OpenAPI Doc , Speakeasy CLI 1.438.1 * gen gcp * regen azure --------- Co-authored-by: speakeasybot Co-authored-by: gaspardBT --- .speakeasy/gen.lock | 10 ++-- .speakeasy/gen.yaml | 3 +- .speakeasy/workflow.lock | 46 +++++++++---------- README.md | 21 ++++----- RELEASES.md | 12 ++++- packages/mistralai_azure/.speakeasy/gen.lock | 8 ++-- packages/mistralai_azure/.speakeasy/gen.yaml | 1 + .../src/mistralai_azure/chat.py | 16 +++---- .../models/chatcompletionrequest.py | 4 +- .../models/chatcompletionstreamrequest.py | 4 +- .../src/mistralai_azure/sdkconfiguration.py | 4 +- packages/mistralai_gcp/.speakeasy/gen.lock | 8 ++-- packages/mistralai_gcp/.speakeasy/gen.yaml | 1 + .../mistralai_gcp/src/mistralai_gcp/chat.py | 16 +++---- .../models/chatcompletionrequest.py | 4 +- .../models/chatcompletionstreamrequest.py | 4 +- .../src/mistralai_gcp/sdkconfiguration.py | 4 +- pyproject.toml | 2 +- src/mistralai/_version.py | 2 +- src/mistralai/agents.py | 16 +++---- src/mistralai/chat.py | 16 +++---- .../models/agentscompletionrequest.py | 4 +- .../models/agentscompletionstreamrequest.py | 4 +- src/mistralai/models/chatcompletionrequest.py | 4 +- .../models/chatcompletionstreamrequest.py | 4 +- src/mistralai/sdkconfiguration.py | 6 +-- 26 files changed, 117 insertions(+), 107 deletions(-) diff --git a/.speakeasy/gen.lock b/.speakeasy/gen.lock index 6ad8a4b8..26d49c2e 100644 --- a/.speakeasy/gen.lock +++ b/.speakeasy/gen.lock @@ -1,12 +1,12 @@ lockVersion: 2.0.0 id: 2d045ec7-2ebb-4f4d-ad25-40953b132161 management: - docChecksum: ee15d853ecc28d415d6b33191893a6ff + docChecksum: 87f5a2bcfbc64bec79241e2b0de25b9f docVersion: 0.0.2 - speakeasyVersion: 1.434.7 - generationVersion: 2.452.0 - releaseVersion: 1.2.1 - configChecksum: 5d659f51b118508e47958545e6b539d2 + speakeasyVersion: 1.438.1 + generationVersion: 2.457.2 + releaseVersion: 1.2.2 + configChecksum: bfa571f32b68bdb4917b69135c2eb818 repoURL: https://github.com/mistralai/client-python.git installationURL: https://github.com/mistralai/client-python.git published: true diff --git a/.speakeasy/gen.yaml b/.speakeasy/gen.yaml index 501ac536..2419fdba 100644 --- a/.speakeasy/gen.yaml +++ b/.speakeasy/gen.yaml @@ -13,7 +13,7 @@ generation: oAuth2ClientCredentialsEnabled: true oAuth2PasswordEnabled: false python: - version: 1.2.1 + version: 1.2.2 additionalDependencies: dev: pytest: ^8.2.2 @@ -21,6 +21,7 @@ python: authors: - Mistral clientServerStatusCodesAsErrors: true + defaultErrorName: SDKError description: Python Client SDK for the Mistral AI API. enumFormat: union envVarPrefix: MISTRAL diff --git a/.speakeasy/workflow.lock b/.speakeasy/workflow.lock index 5e498aa3..734e61ce 100644 --- a/.speakeasy/workflow.lock +++ b/.speakeasy/workflow.lock @@ -1,42 +1,46 @@ -speakeasyVersion: 1.434.7 +speakeasyVersion: 1.438.1 sources: mistral-azure-source: - sourceNamespace: mistral-openapi-azure - sourceRevisionDigest: sha256:8fda8235e30128cc8e1c4e1b828316551d03b584568789f262dc287b81d584ee - sourceBlobDigest: sha256:3c039e1f8a2230a86b0e1acec6224f6b8d6f181fb222b6b3b39d38b52075a8ec + sourceNamespace: mistral-azure-source + sourceRevisionDigest: sha256:2e3e3792ec63f3d59dd2a4de2f97ea6a80bd46f8905576069c54810feb930c12 + sourceBlobDigest: sha256:da059f78d331ea36f2ec69dfc6c5aa65be0bf0eda011597281cea6499b7ebac8 tags: - latest mistral-google-cloud-source: - sourceNamespace: mistral-openapi-google-cloud - sourceRevisionDigest: sha256:b2ce8e0e63674ea7ccfa3a75ff231bb97a39748331bcc0a3629f29c158f5b31e - sourceBlobDigest: sha256:a895adbf903776492b28daa3dd8c624f509decbbfe9ca6cda6510a33226604be + sourceNamespace: mistral-google-cloud-source + sourceRevisionDigest: sha256:22da209c58cb1591b3fde77467a9affce33c874724b220dd68f11a8f5fe92fbe + sourceBlobDigest: sha256:79c11900db52030ed8d8fff8066f9fe716670f4eadca41936f2cbc1a270fb087 tags: - latest mistral-openapi: sourceNamespace: mistral-openapi - sourceRevisionDigest: sha256:e658442ebfc83351cbb7873fb17b03f07ff9edebd8eddfce5577e2c5c7bfafce - sourceBlobDigest: sha256:559403eaaa97c021eaf0022adddb1066694d879a946c87057e942806d5a2a2a2 + sourceRevisionDigest: sha256:e44702b93f6a8ac450f1d85b4197f6640f8efb3d5e06be98418ea79acd8f70be + sourceBlobDigest: sha256:94a9891a3bdf3fafea5c41cee226c5e366c206e91e39e734cd2d1163af74f688 tags: - latest - main targets: mistralai-azure-sdk: source: mistral-azure-source - sourceNamespace: mistral-openapi-azure - sourceRevisionDigest: sha256:8fda8235e30128cc8e1c4e1b828316551d03b584568789f262dc287b81d584ee - sourceBlobDigest: sha256:3c039e1f8a2230a86b0e1acec6224f6b8d6f181fb222b6b3b39d38b52075a8ec + sourceNamespace: mistral-azure-source + sourceRevisionDigest: sha256:2e3e3792ec63f3d59dd2a4de2f97ea6a80bd46f8905576069c54810feb930c12 + sourceBlobDigest: sha256:da059f78d331ea36f2ec69dfc6c5aa65be0bf0eda011597281cea6499b7ebac8 + codeSamplesNamespace: mistral-openapi-azure-code-samples + codeSamplesRevisionDigest: sha256:e90c3293cb23081fed599d4528b168335a2ac58deb76ecc3afd7140d49b68816 mistralai-gcp-sdk: source: mistral-google-cloud-source - sourceNamespace: mistral-openapi-google-cloud - sourceRevisionDigest: sha256:b2ce8e0e63674ea7ccfa3a75ff231bb97a39748331bcc0a3629f29c158f5b31e - sourceBlobDigest: sha256:a895adbf903776492b28daa3dd8c624f509decbbfe9ca6cda6510a33226604be + sourceNamespace: mistral-google-cloud-source + sourceRevisionDigest: sha256:22da209c58cb1591b3fde77467a9affce33c874724b220dd68f11a8f5fe92fbe + sourceBlobDigest: sha256:79c11900db52030ed8d8fff8066f9fe716670f4eadca41936f2cbc1a270fb087 + codeSamplesNamespace: mistral-openapi-google-cloud-code-samples + codeSamplesRevisionDigest: sha256:8539ff0f99be949906dad55250e1d230dfebf062e8f7c38398713e11c6bb48ec mistralai-sdk: source: mistral-openapi sourceNamespace: mistral-openapi - sourceRevisionDigest: sha256:e658442ebfc83351cbb7873fb17b03f07ff9edebd8eddfce5577e2c5c7bfafce - sourceBlobDigest: sha256:559403eaaa97c021eaf0022adddb1066694d879a946c87057e942806d5a2a2a2 + sourceRevisionDigest: sha256:e44702b93f6a8ac450f1d85b4197f6640f8efb3d5e06be98418ea79acd8f70be + sourceBlobDigest: sha256:94a9891a3bdf3fafea5c41cee226c5e366c206e91e39e734cd2d1163af74f688 codeSamplesNamespace: mistral-openapi-code-samples - codeSamplesRevisionDigest: sha256:f48900efe513aa95888e4035452ba1d54a2b1f8e872454ccb5bdca24d4fc7b09 + codeSamplesRevisionDigest: sha256:9e9f8f0da360280a8d42e2ccbee423871b74abd07e872e10892636ba4be4e3a5 workflow: workflowVersion: 1.0.0 speakeasyVersion: latest @@ -44,18 +48,12 @@ workflow: mistral-azure-source: inputs: - location: registry.speakeasyapi.dev/mistral-dev/mistral-dev/mistral-openapi-azure:main - registry: - location: registry.speakeasyapi.dev/mistral-dev/mistral-dev/mistral-openapi-azure:main mistral-google-cloud-source: inputs: - location: registry.speakeasyapi.dev/mistral-dev/mistral-dev/mistral-openapi-google-cloud:main - registry: - location: registry.speakeasyapi.dev/mistral-dev/mistral-dev/mistral-openapi-google-cloud:main mistral-openapi: inputs: - location: registry.speakeasyapi.dev/mistral-dev/mistral-dev/mistral-openapi:main - registry: - location: registry.speakeasyapi.dev/mistral-dev/mistral-dev/mistral-openapi:main targets: mistralai-azure-sdk: target: python diff --git a/README.md b/README.md index a73c1333..24c744ab 100644 --- a/README.md +++ b/README.md @@ -556,10 +556,10 @@ By default, an API error will raise a models.SDKError exception, which has the f When custom error responses are specified for an operation, the SDK may also raise their associated exceptions. You can refer to respective *Errors* tables in SDK docs for more details on possible exception types for each operation. For example, the `list_async` method may raise the following exceptions: -| Error Type | Status Code | Content Type | -| -------------------------- | -------------------------- | -------------------------- | -| models.HTTPValidationError | 422 | application/json | -| models.SDKError | 4XX, 5XX | \*/\* | +| Error Type | Status Code | Content Type | +| -------------------------- | ----------- | ---------------- | +| models.HTTPValidationError | 422 | application/json | +| models.SDKError | 4XX, 5XX | \*/\* | ### Example @@ -595,9 +595,9 @@ except models.SDKError as e: You can override the default server globally by passing a server name to the `server: str` optional parameter when initializing the SDK client instance. The selected server will then be used as the default on the operations that use it. This table lists the names associated with the available servers: -| Name | Server | Variables | -| ----- | ------ | --------- | -| `eu` | `https://api.mistral.ai` | None | +| Name | Server | +| ---- | ------------------------ | +| `eu` | `https://api.mistral.ai` | #### Example @@ -618,7 +618,6 @@ if res is not None: ``` - ### Override Server URL Per-Client The default server can also be overridden globally by passing a URL to the `server_url: str` optional parameter when initializing the SDK client instance. For example: @@ -728,9 +727,9 @@ s = Mistral(async_client=CustomClient(httpx.AsyncClient())) This SDK supports the following security scheme globally: -| Name | Type | Scheme | Environment Variable | -| -------------------- | -------------------- | -------------------- | -------------------- | -| `api_key` | http | HTTP Bearer | `MISTRAL_API_KEY` | +| Name | Type | Scheme | Environment Variable | +| --------- | ---- | ----------- | -------------------- | +| `api_key` | http | HTTP Bearer | `MISTRAL_API_KEY` | To authenticate with the API the `api_key` parameter must be set when initializing the SDK client instance. For example: ```python diff --git a/RELEASES.md b/RELEASES.md index 6ffcd17d..19d5c3be 100644 --- a/RELEASES.md +++ b/RELEASES.md @@ -68,4 +68,14 @@ Based on: ### Generated - [python v1.2.1] . ### Releases -- [PyPI v1.2.1] https://pypi.org/project/mistralai/1.2.1 - . \ No newline at end of file +- [PyPI v1.2.1] https://pypi.org/project/mistralai/1.2.1 - . + +## 2024-11-12 18:04:16 +### Changes +Based on: +- OpenAPI Doc +- Speakeasy CLI 1.438.1 (2.457.2) https://github.com/speakeasy-api/speakeasy +### Generated +- [python v1.2.2] . +### Releases +- [PyPI v1.2.2] https://pypi.org/project/mistralai/1.2.2 - . \ No newline at end of file diff --git a/packages/mistralai_azure/.speakeasy/gen.lock b/packages/mistralai_azure/.speakeasy/gen.lock index bc550ff2..cc18655a 100644 --- a/packages/mistralai_azure/.speakeasy/gen.lock +++ b/packages/mistralai_azure/.speakeasy/gen.lock @@ -1,12 +1,12 @@ lockVersion: 2.0.0 id: dc40fa48-2c4d-46ad-ac8b-270749770f34 management: - docChecksum: 0f195020b1080b5c3b1fc5834d30a929 + docChecksum: 1eac78d7698423fcd3bc58124f860d30 docVersion: 0.0.2 - speakeasyVersion: 1.434.4 - generationVersion: 2.452.0 + speakeasyVersion: 1.438.1 + generationVersion: 2.457.2 releaseVersion: 1.2.0 - configChecksum: 0600a305e49d44a5fcb3a5a33dc00999 + configChecksum: f69f129b40abc60e88685a36201ebb87 published: true features: python: diff --git a/packages/mistralai_azure/.speakeasy/gen.yaml b/packages/mistralai_azure/.speakeasy/gen.yaml index 7280691b..dcf160a4 100644 --- a/packages/mistralai_azure/.speakeasy/gen.yaml +++ b/packages/mistralai_azure/.speakeasy/gen.yaml @@ -21,6 +21,7 @@ python: authors: - Mistral clientServerStatusCodesAsErrors: true + defaultErrorName: SDKError description: Python Client SDK for the Mistral AI API in Azure. enumFormat: union fixFlags: diff --git a/packages/mistralai_azure/src/mistralai_azure/chat.py b/packages/mistralai_azure/src/mistralai_azure/chat.py index e1d33901..fb443e52 100644 --- a/packages/mistralai_azure/src/mistralai_azure/chat.py +++ b/packages/mistralai_azure/src/mistralai_azure/chat.py @@ -34,8 +34,8 @@ def stream( models.ChatCompletionStreamRequestToolChoiceTypedDict, ] ] = None, - presence_penalty: Optional[float] = 0, - frequency_penalty: Optional[float] = 0, + presence_penalty: Optional[float] = None, + frequency_penalty: Optional[float] = None, n: OptionalNullable[int] = UNSET, safe_prompt: Optional[bool] = False, retries: OptionalNullable[utils.RetryConfig] = UNSET, @@ -182,8 +182,8 @@ async def stream_async( models.ChatCompletionStreamRequestToolChoiceTypedDict, ] ] = None, - presence_penalty: Optional[float] = 0, - frequency_penalty: Optional[float] = 0, + presence_penalty: Optional[float] = None, + frequency_penalty: Optional[float] = None, n: OptionalNullable[int] = UNSET, safe_prompt: Optional[bool] = False, retries: OptionalNullable[utils.RetryConfig] = UNSET, @@ -338,8 +338,8 @@ def complete( models.ChatCompletionRequestToolChoiceTypedDict, ] ] = None, - presence_penalty: Optional[float] = 0, - frequency_penalty: Optional[float] = 0, + presence_penalty: Optional[float] = None, + frequency_penalty: Optional[float] = None, n: OptionalNullable[int] = UNSET, safe_prompt: Optional[bool] = False, retries: OptionalNullable[utils.RetryConfig] = UNSET, @@ -490,8 +490,8 @@ async def complete_async( models.ChatCompletionRequestToolChoiceTypedDict, ] ] = None, - presence_penalty: Optional[float] = 0, - frequency_penalty: Optional[float] = 0, + presence_penalty: Optional[float] = None, + frequency_penalty: Optional[float] = None, n: OptionalNullable[int] = UNSET, safe_prompt: Optional[bool] = False, retries: OptionalNullable[utils.RetryConfig] = UNSET, diff --git a/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionrequest.py b/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionrequest.py index fd3cb7bd..beedf520 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionrequest.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionrequest.py @@ -116,10 +116,10 @@ class ChatCompletionRequest(BaseModel): tool_choice: Optional[ChatCompletionRequestToolChoice] = None - presence_penalty: Optional[float] = 0 + presence_penalty: Optional[float] = None r"""presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative.""" - frequency_penalty: Optional[float] = 0 + frequency_penalty: Optional[float] = None r"""frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.""" n: OptionalNullable[int] = UNSET diff --git a/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionstreamrequest.py b/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionstreamrequest.py index 8f71f892..9d78371b 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionstreamrequest.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionstreamrequest.py @@ -116,10 +116,10 @@ class ChatCompletionStreamRequest(BaseModel): tool_choice: Optional[ChatCompletionStreamRequestToolChoice] = None - presence_penalty: Optional[float] = 0 + presence_penalty: Optional[float] = None r"""presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative.""" - frequency_penalty: Optional[float] = 0 + frequency_penalty: Optional[float] = None r"""frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.""" n: OptionalNullable[int] = UNSET diff --git a/packages/mistralai_azure/src/mistralai_azure/sdkconfiguration.py b/packages/mistralai_azure/src/mistralai_azure/sdkconfiguration.py index 1a319444..75216e1f 100644 --- a/packages/mistralai_azure/src/mistralai_azure/sdkconfiguration.py +++ b/packages/mistralai_azure/src/mistralai_azure/sdkconfiguration.py @@ -29,8 +29,8 @@ class SDKConfiguration: language: str = "python" openapi_doc_version: str = "0.0.2" sdk_version: str = "1.2.0" - gen_version: str = "2.452.0" - user_agent: str = "speakeasy-sdk/python 1.2.0 2.452.0 0.0.2 mistralai_azure" + gen_version: str = "2.457.2" + user_agent: str = "speakeasy-sdk/python 1.2.0 2.457.2 0.0.2 mistralai_azure" retry_config: OptionalNullable[RetryConfig] = Field(default_factory=lambda: UNSET) timeout_ms: Optional[int] = None diff --git a/packages/mistralai_gcp/.speakeasy/gen.lock b/packages/mistralai_gcp/.speakeasy/gen.lock index 6add3601..e1778244 100644 --- a/packages/mistralai_gcp/.speakeasy/gen.lock +++ b/packages/mistralai_gcp/.speakeasy/gen.lock @@ -1,12 +1,12 @@ lockVersion: 2.0.0 id: ec60f2d8-7869-45c1-918e-773d41a8cf74 management: - docChecksum: 46baf8da7636ea1bf44557571d011045 + docChecksum: a4b0284a2171be4279ac869462fc2ba5 docVersion: 0.0.2 - speakeasyVersion: 1.434.4 - generationVersion: 2.452.0 + speakeasyVersion: 1.438.1 + generationVersion: 2.457.2 releaseVersion: 1.2.0 - configChecksum: 6036ab871ca1cf21d35bfc75dc25089b + configChecksum: 5a6e207048dd06f191872088627d9018 published: true features: python: diff --git a/packages/mistralai_gcp/.speakeasy/gen.yaml b/packages/mistralai_gcp/.speakeasy/gen.yaml index 97e9faf1..360352e7 100644 --- a/packages/mistralai_gcp/.speakeasy/gen.yaml +++ b/packages/mistralai_gcp/.speakeasy/gen.yaml @@ -24,6 +24,7 @@ python: authors: - Mistral clientServerStatusCodesAsErrors: true + defaultErrorName: SDKError description: Python Client SDK for the Mistral AI API in GCP. enumFormat: union fixFlags: diff --git a/packages/mistralai_gcp/src/mistralai_gcp/chat.py b/packages/mistralai_gcp/src/mistralai_gcp/chat.py index 19c74351..17913668 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/chat.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/chat.py @@ -34,8 +34,8 @@ def stream( models.ChatCompletionStreamRequestToolChoiceTypedDict, ] ] = None, - presence_penalty: Optional[float] = 0, - frequency_penalty: Optional[float] = 0, + presence_penalty: Optional[float] = None, + frequency_penalty: Optional[float] = None, n: OptionalNullable[int] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, @@ -179,8 +179,8 @@ async def stream_async( models.ChatCompletionStreamRequestToolChoiceTypedDict, ] ] = None, - presence_penalty: Optional[float] = 0, - frequency_penalty: Optional[float] = 0, + presence_penalty: Optional[float] = None, + frequency_penalty: Optional[float] = None, n: OptionalNullable[int] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, @@ -332,8 +332,8 @@ def complete( models.ChatCompletionRequestToolChoiceTypedDict, ] ] = None, - presence_penalty: Optional[float] = 0, - frequency_penalty: Optional[float] = 0, + presence_penalty: Optional[float] = None, + frequency_penalty: Optional[float] = None, n: OptionalNullable[int] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, @@ -481,8 +481,8 @@ async def complete_async( models.ChatCompletionRequestToolChoiceTypedDict, ] ] = None, - presence_penalty: Optional[float] = 0, - frequency_penalty: Optional[float] = 0, + presence_penalty: Optional[float] = None, + frequency_penalty: Optional[float] = None, n: OptionalNullable[int] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionrequest.py b/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionrequest.py index e1c263b7..1f956d0a 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionrequest.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionrequest.py @@ -114,10 +114,10 @@ class ChatCompletionRequest(BaseModel): tool_choice: Optional[ChatCompletionRequestToolChoice] = None - presence_penalty: Optional[float] = 0 + presence_penalty: Optional[float] = None r"""presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative.""" - frequency_penalty: Optional[float] = 0 + frequency_penalty: Optional[float] = None r"""frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.""" n: OptionalNullable[int] = UNSET diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionstreamrequest.py b/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionstreamrequest.py index 5fc40850..f12a5477 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionstreamrequest.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionstreamrequest.py @@ -114,10 +114,10 @@ class ChatCompletionStreamRequest(BaseModel): tool_choice: Optional[ChatCompletionStreamRequestToolChoice] = None - presence_penalty: Optional[float] = 0 + presence_penalty: Optional[float] = None r"""presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative.""" - frequency_penalty: Optional[float] = 0 + frequency_penalty: Optional[float] = None r"""frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.""" n: OptionalNullable[int] = UNSET diff --git a/packages/mistralai_gcp/src/mistralai_gcp/sdkconfiguration.py b/packages/mistralai_gcp/src/mistralai_gcp/sdkconfiguration.py index fdb296cc..d94a4e40 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/sdkconfiguration.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/sdkconfiguration.py @@ -29,8 +29,8 @@ class SDKConfiguration: language: str = "python" openapi_doc_version: str = "0.0.2" sdk_version: str = "1.2.0" - gen_version: str = "2.452.0" - user_agent: str = "speakeasy-sdk/python 1.2.0 2.452.0 0.0.2 mistralai-gcp" + gen_version: str = "2.457.2" + user_agent: str = "speakeasy-sdk/python 1.2.0 2.457.2 0.0.2 mistralai-gcp" retry_config: OptionalNullable[RetryConfig] = Field(default_factory=lambda: UNSET) timeout_ms: Optional[int] = None diff --git a/pyproject.toml b/pyproject.toml index a239400b..95ac3053 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "mistralai" -version = "1.2.1" +version = "1.2.2" description = "Python Client SDK for the Mistral AI API." authors = ["Mistral"] readme = "README-PYPI.md" diff --git a/src/mistralai/_version.py b/src/mistralai/_version.py index b7e6d2fe..c6e29cc9 100644 --- a/src/mistralai/_version.py +++ b/src/mistralai/_version.py @@ -3,7 +3,7 @@ import importlib.metadata __title__: str = "mistralai" -__version__: str = "1.2.1" +__version__: str = "1.2.2" try: if __package__ is not None: diff --git a/src/mistralai/agents.py b/src/mistralai/agents.py index 1b5c6a1f..a45bcec3 100644 --- a/src/mistralai/agents.py +++ b/src/mistralai/agents.py @@ -40,8 +40,8 @@ def complete( models.AgentsCompletionRequestToolChoiceTypedDict, ] ] = None, - presence_penalty: Optional[float] = 0, - frequency_penalty: Optional[float] = 0, + presence_penalty: Optional[float] = None, + frequency_penalty: Optional[float] = None, n: OptionalNullable[int] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, @@ -185,8 +185,8 @@ async def complete_async( models.AgentsCompletionRequestToolChoiceTypedDict, ] ] = None, - presence_penalty: Optional[float] = 0, - frequency_penalty: Optional[float] = 0, + presence_penalty: Optional[float] = None, + frequency_penalty: Optional[float] = None, n: OptionalNullable[int] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, @@ -330,8 +330,8 @@ def stream( models.AgentsCompletionStreamRequestToolChoiceTypedDict, ] ] = None, - presence_penalty: Optional[float] = 0, - frequency_penalty: Optional[float] = 0, + presence_penalty: Optional[float] = None, + frequency_penalty: Optional[float] = None, n: OptionalNullable[int] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, @@ -481,8 +481,8 @@ async def stream_async( models.AgentsCompletionStreamRequestToolChoiceTypedDict, ] ] = None, - presence_penalty: Optional[float] = 0, - frequency_penalty: Optional[float] = 0, + presence_penalty: Optional[float] = None, + frequency_penalty: Optional[float] = None, n: OptionalNullable[int] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, diff --git a/src/mistralai/chat.py b/src/mistralai/chat.py index dd5ca693..53313ca7 100644 --- a/src/mistralai/chat.py +++ b/src/mistralai/chat.py @@ -34,8 +34,8 @@ def complete( models.ChatCompletionRequestToolChoiceTypedDict, ] ] = None, - presence_penalty: Optional[float] = 0, - frequency_penalty: Optional[float] = 0, + presence_penalty: Optional[float] = None, + frequency_penalty: Optional[float] = None, n: OptionalNullable[int] = UNSET, safe_prompt: Optional[bool] = False, retries: OptionalNullable[utils.RetryConfig] = UNSET, @@ -178,8 +178,8 @@ async def complete_async( models.ChatCompletionRequestToolChoiceTypedDict, ] ] = None, - presence_penalty: Optional[float] = 0, - frequency_penalty: Optional[float] = 0, + presence_penalty: Optional[float] = None, + frequency_penalty: Optional[float] = None, n: OptionalNullable[int] = UNSET, safe_prompt: Optional[bool] = False, retries: OptionalNullable[utils.RetryConfig] = UNSET, @@ -330,8 +330,8 @@ def stream( models.ChatCompletionStreamRequestToolChoiceTypedDict, ] ] = None, - presence_penalty: Optional[float] = 0, - frequency_penalty: Optional[float] = 0, + presence_penalty: Optional[float] = None, + frequency_penalty: Optional[float] = None, n: OptionalNullable[int] = UNSET, safe_prompt: Optional[bool] = False, retries: OptionalNullable[utils.RetryConfig] = UNSET, @@ -490,8 +490,8 @@ async def stream_async( models.ChatCompletionStreamRequestToolChoiceTypedDict, ] ] = None, - presence_penalty: Optional[float] = 0, - frequency_penalty: Optional[float] = 0, + presence_penalty: Optional[float] = None, + frequency_penalty: Optional[float] = None, n: OptionalNullable[int] = UNSET, safe_prompt: Optional[bool] = False, retries: OptionalNullable[utils.RetryConfig] = UNSET, diff --git a/src/mistralai/models/agentscompletionrequest.py b/src/mistralai/models/agentscompletionrequest.py index 99d074d7..bce326a5 100644 --- a/src/mistralai/models/agentscompletionrequest.py +++ b/src/mistralai/models/agentscompletionrequest.py @@ -98,10 +98,10 @@ class AgentsCompletionRequest(BaseModel): tool_choice: Optional[AgentsCompletionRequestToolChoice] = None - presence_penalty: Optional[float] = 0 + presence_penalty: Optional[float] = None r"""presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative.""" - frequency_penalty: Optional[float] = 0 + frequency_penalty: Optional[float] = None r"""frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.""" n: OptionalNullable[int] = UNSET diff --git a/src/mistralai/models/agentscompletionstreamrequest.py b/src/mistralai/models/agentscompletionstreamrequest.py index 4e1757ac..94cc983a 100644 --- a/src/mistralai/models/agentscompletionstreamrequest.py +++ b/src/mistralai/models/agentscompletionstreamrequest.py @@ -98,10 +98,10 @@ class AgentsCompletionStreamRequest(BaseModel): tool_choice: Optional[AgentsCompletionStreamRequestToolChoice] = None - presence_penalty: Optional[float] = 0 + presence_penalty: Optional[float] = None r"""presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative.""" - frequency_penalty: Optional[float] = 0 + frequency_penalty: Optional[float] = None r"""frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.""" n: OptionalNullable[int] = UNSET diff --git a/src/mistralai/models/chatcompletionrequest.py b/src/mistralai/models/chatcompletionrequest.py index 6cdf97bf..b3435d52 100644 --- a/src/mistralai/models/chatcompletionrequest.py +++ b/src/mistralai/models/chatcompletionrequest.py @@ -110,10 +110,10 @@ class ChatCompletionRequest(BaseModel): tool_choice: Optional[ChatCompletionRequestToolChoice] = None - presence_penalty: Optional[float] = 0 + presence_penalty: Optional[float] = None r"""presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative.""" - frequency_penalty: Optional[float] = 0 + frequency_penalty: Optional[float] = None r"""frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.""" n: OptionalNullable[int] = UNSET diff --git a/src/mistralai/models/chatcompletionstreamrequest.py b/src/mistralai/models/chatcompletionstreamrequest.py index c56f5230..a98eb335 100644 --- a/src/mistralai/models/chatcompletionstreamrequest.py +++ b/src/mistralai/models/chatcompletionstreamrequest.py @@ -110,10 +110,10 @@ class ChatCompletionStreamRequest(BaseModel): tool_choice: Optional[ChatCompletionStreamRequestToolChoice] = None - presence_penalty: Optional[float] = 0 + presence_penalty: Optional[float] = None r"""presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative.""" - frequency_penalty: Optional[float] = 0 + frequency_penalty: Optional[float] = None r"""frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.""" n: OptionalNullable[int] = UNSET diff --git a/src/mistralai/sdkconfiguration.py b/src/mistralai/sdkconfiguration.py index cc20551d..72d59f51 100644 --- a/src/mistralai/sdkconfiguration.py +++ b/src/mistralai/sdkconfiguration.py @@ -28,9 +28,9 @@ class SDKConfiguration: server: Optional[str] = "" language: str = "python" openapi_doc_version: str = "0.0.2" - sdk_version: str = "1.2.1" - gen_version: str = "2.452.0" - user_agent: str = "speakeasy-sdk/python 1.2.1 2.452.0 0.0.2 mistralai" + sdk_version: str = "1.2.2" + gen_version: str = "2.457.2" + user_agent: str = "speakeasy-sdk/python 1.2.2 2.457.2 0.0.2 mistralai" retry_config: OptionalNullable[RetryConfig] = Field(default_factory=lambda: UNSET) timeout_ms: Optional[int] = None From 0fd42415435e5c6314e252148802aed77a1d0c6a Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Mon, 18 Nov 2024 16:34:29 +0100 Subject: [PATCH 083/223] =?UTF-8?q?chore:=20=F0=9F=90=9D=20Update=20SDK=20?= =?UTF-8?q?-=20Generate=20MISTRALAI=20MISTRALAI-SDK=201.2.3=20(#157)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * ci: regenerated with OpenAPI Doc , Speakeasy CLI 1.440.1 * update gcp client * update azure client --------- Co-authored-by: speakeasybot Co-authored-by: gaspardBT --- .speakeasy/gen.lock | 20 ++++++----- .speakeasy/gen.yaml | 2 +- .speakeasy/workflow.lock | 34 +++++++++---------- RELEASES.md | 12 ++++++- docs/models/contentchunk.md | 6 ++++ docs/models/referencechunk.md | 9 +++++ docs/models/referencechunktype.md | 8 +++++ docs/models/toolmessage.md | 12 +++---- docs/models/toolmessagecontent.md | 17 ++++++++++ docs/sdks/classifiers/README.md | 7 +++- packages/mistralai_azure/.speakeasy/gen.lock | 18 ++++++---- packages/mistralai_azure/.speakeasy/gen.yaml | 2 +- .../docs/models/contentchunk.md | 6 ++++ .../docs/models/referencechunk.md | 9 +++++ .../docs/models/referencechunktype.md | 8 +++++ .../docs/models/toolmessage.md | 12 +++---- .../docs/models/toolmessagecontent.md | 17 ++++++++++ packages/mistralai_azure/pyproject.toml | 2 +- .../src/mistralai_azure/_version.py | 2 +- .../src/mistralai_azure/models/__init__.py | 14 +++++++- .../mistralai_azure/models/contentchunk.py | 14 ++++++-- .../mistralai_azure/models/referencechunk.py | 28 +++++++++++++++ .../src/mistralai_azure/models/toolmessage.py | 15 +++++--- .../src/mistralai_azure/sdkconfiguration.py | 6 ++-- packages/mistralai_gcp/.speakeasy/gen.lock | 18 ++++++---- packages/mistralai_gcp/.speakeasy/gen.yaml | 2 +- .../mistralai_gcp/docs/models/contentchunk.md | 6 ++++ .../docs/models/referencechunk.md | 9 +++++ .../docs/models/referencechunktype.md | 8 +++++ .../mistralai_gcp/docs/models/toolmessage.md | 12 +++---- .../docs/models/toolmessagecontent.md | 17 ++++++++++ packages/mistralai_gcp/pyproject.toml | 2 +- .../src/mistralai_gcp/_version.py | 2 +- .../src/mistralai_gcp/models/__init__.py | 14 +++++++- .../src/mistralai_gcp/models/contentchunk.py | 14 ++++++-- .../mistralai_gcp/models/referencechunk.py | 28 +++++++++++++++ .../src/mistralai_gcp/models/toolmessage.py | 15 +++++--- .../src/mistralai_gcp/sdkconfiguration.py | 6 ++-- pyproject.toml | 2 +- src/mistralai/_version.py | 2 +- src/mistralai/models/__init__.py | 14 +++++++- src/mistralai/models/contentchunk.py | 9 +++-- src/mistralai/models/referencechunk.py | 28 +++++++++++++++ src/mistralai/models/toolmessage.py | 15 +++++--- src/mistralai/sdkconfiguration.py | 6 ++-- 45 files changed, 411 insertions(+), 98 deletions(-) create mode 100644 docs/models/referencechunk.md create mode 100644 docs/models/referencechunktype.md create mode 100644 docs/models/toolmessagecontent.md create mode 100644 packages/mistralai_azure/docs/models/referencechunk.md create mode 100644 packages/mistralai_azure/docs/models/referencechunktype.md create mode 100644 packages/mistralai_azure/docs/models/toolmessagecontent.md create mode 100644 packages/mistralai_azure/src/mistralai_azure/models/referencechunk.py create mode 100644 packages/mistralai_gcp/docs/models/referencechunk.md create mode 100644 packages/mistralai_gcp/docs/models/referencechunktype.md create mode 100644 packages/mistralai_gcp/docs/models/toolmessagecontent.md create mode 100644 packages/mistralai_gcp/src/mistralai_gcp/models/referencechunk.py create mode 100644 src/mistralai/models/referencechunk.py diff --git a/.speakeasy/gen.lock b/.speakeasy/gen.lock index 26d49c2e..d59c2911 100644 --- a/.speakeasy/gen.lock +++ b/.speakeasy/gen.lock @@ -1,20 +1,20 @@ lockVersion: 2.0.0 id: 2d045ec7-2ebb-4f4d-ad25-40953b132161 management: - docChecksum: 87f5a2bcfbc64bec79241e2b0de25b9f + docChecksum: d066d98bdd0ef905d4126e0e69940946 docVersion: 0.0.2 - speakeasyVersion: 1.438.1 - generationVersion: 2.457.2 - releaseVersion: 1.2.2 - configChecksum: bfa571f32b68bdb4917b69135c2eb818 + speakeasyVersion: 1.440.1 + generationVersion: 2.460.1 + releaseVersion: 1.2.3 + configChecksum: 25fa33668bf14110dfe5bac267dcc8b4 repoURL: https://github.com/mistralai/client-python.git installationURL: https://github.com/mistralai/client-python.git published: true features: python: additionalDependencies: 1.0.0 - constsAndDefaults: 1.0.4 - core: 5.6.4 + constsAndDefaults: 1.0.5 + core: 5.6.5 defaultEnabledRetries: 0.2.0 downloadStreams: 1.0.1 enumUnions: 0.1.0 @@ -166,6 +166,8 @@ generatedFiles: - docs/models/object.md - docs/models/one.md - docs/models/queryparamstatus.md + - docs/models/referencechunk.md + - docs/models/referencechunktype.md - docs/models/repositories.md - docs/models/responseformat.md - docs/models/responseformats.md @@ -187,6 +189,7 @@ generatedFiles: - docs/models/toolchoice.md - docs/models/toolchoiceenum.md - docs/models/toolmessage.md + - docs/models/toolmessagecontent.md - docs/models/toolmessagerole.md - docs/models/tooltypes.md - docs/models/trainingfile.md @@ -318,6 +321,7 @@ generatedFiles: - src/mistralai/models/metricout.py - src/mistralai/models/modelcapabilities.py - src/mistralai/models/modellist.py + - src/mistralai/models/referencechunk.py - src/mistralai/models/responseformat.py - src/mistralai/models/responseformats.py - src/mistralai/models/retrieve_model_v1_models_model_id_getop.py @@ -563,7 +567,7 @@ examples: moderations_chat_v1_chat_moderations_post: speakeasy-default-moderations-chat-v1-chat-moderations-post: requestBody: - application/json: {"input": [[{"content": ""}, {"content": []}, {"content": ""}], []], "model": "V90"} + application/json: {"input": [[{"content": ""}, {"content": ""}, {"content": ""}], [{"content": ""}]], "model": "V90"} responses: "200": application/json: {"id": "mod-e5cc70bb28c444948073e77776eb30ef"} diff --git a/.speakeasy/gen.yaml b/.speakeasy/gen.yaml index 2419fdba..68a158cd 100644 --- a/.speakeasy/gen.yaml +++ b/.speakeasy/gen.yaml @@ -13,7 +13,7 @@ generation: oAuth2ClientCredentialsEnabled: true oAuth2PasswordEnabled: false python: - version: 1.2.2 + version: 1.2.3 additionalDependencies: dev: pytest: ^8.2.2 diff --git a/.speakeasy/workflow.lock b/.speakeasy/workflow.lock index 734e61ce..e61ff013 100644 --- a/.speakeasy/workflow.lock +++ b/.speakeasy/workflow.lock @@ -1,46 +1,46 @@ -speakeasyVersion: 1.438.1 +speakeasyVersion: 1.440.1 sources: mistral-azure-source: sourceNamespace: mistral-azure-source - sourceRevisionDigest: sha256:2e3e3792ec63f3d59dd2a4de2f97ea6a80bd46f8905576069c54810feb930c12 - sourceBlobDigest: sha256:da059f78d331ea36f2ec69dfc6c5aa65be0bf0eda011597281cea6499b7ebac8 + sourceRevisionDigest: sha256:c441f2d21e7879f5fb9d8d99e2ae242d1e5a84c0c06db971911eb578173e7f62 + sourceBlobDigest: sha256:de4af0f100f15fef89e093a6b5393302b2218fb154230594ec811aacdd4f2ec7 tags: - latest mistral-google-cloud-source: sourceNamespace: mistral-google-cloud-source - sourceRevisionDigest: sha256:22da209c58cb1591b3fde77467a9affce33c874724b220dd68f11a8f5fe92fbe - sourceBlobDigest: sha256:79c11900db52030ed8d8fff8066f9fe716670f4eadca41936f2cbc1a270fb087 + sourceRevisionDigest: sha256:d3e3d15303dcc1acb27b8895aa3064328bd5b8013ea635c2bce553b6e647b498 + sourceBlobDigest: sha256:db72004ee842a27c3e77980be28a727811e0581daa7a51ad34d142302f8ba2f3 tags: - latest mistral-openapi: sourceNamespace: mistral-openapi - sourceRevisionDigest: sha256:e44702b93f6a8ac450f1d85b4197f6640f8efb3d5e06be98418ea79acd8f70be - sourceBlobDigest: sha256:94a9891a3bdf3fafea5c41cee226c5e366c206e91e39e734cd2d1163af74f688 + sourceRevisionDigest: sha256:9b6ad47076b570f4e23494bf744a7822547d0003e4b10985f26f1c3b5128e631 + sourceBlobDigest: sha256:150e3da2a6bfb74e86e2ce864e9a094fc796f3a08df91f6a6e8745b54b3e16bc tags: - latest - - main + - speakeasy-sdk-regen-1731693697 targets: mistralai-azure-sdk: source: mistral-azure-source sourceNamespace: mistral-azure-source - sourceRevisionDigest: sha256:2e3e3792ec63f3d59dd2a4de2f97ea6a80bd46f8905576069c54810feb930c12 - sourceBlobDigest: sha256:da059f78d331ea36f2ec69dfc6c5aa65be0bf0eda011597281cea6499b7ebac8 + sourceRevisionDigest: sha256:c441f2d21e7879f5fb9d8d99e2ae242d1e5a84c0c06db971911eb578173e7f62 + sourceBlobDigest: sha256:de4af0f100f15fef89e093a6b5393302b2218fb154230594ec811aacdd4f2ec7 codeSamplesNamespace: mistral-openapi-azure-code-samples - codeSamplesRevisionDigest: sha256:e90c3293cb23081fed599d4528b168335a2ac58deb76ecc3afd7140d49b68816 + codeSamplesRevisionDigest: sha256:5db0b04cc2b3962de41cb07e87fe817dd5ec8bc5d8b0254245b26faf70ede027 mistralai-gcp-sdk: source: mistral-google-cloud-source sourceNamespace: mistral-google-cloud-source - sourceRevisionDigest: sha256:22da209c58cb1591b3fde77467a9affce33c874724b220dd68f11a8f5fe92fbe - sourceBlobDigest: sha256:79c11900db52030ed8d8fff8066f9fe716670f4eadca41936f2cbc1a270fb087 + sourceRevisionDigest: sha256:d3e3d15303dcc1acb27b8895aa3064328bd5b8013ea635c2bce553b6e647b498 + sourceBlobDigest: sha256:db72004ee842a27c3e77980be28a727811e0581daa7a51ad34d142302f8ba2f3 codeSamplesNamespace: mistral-openapi-google-cloud-code-samples - codeSamplesRevisionDigest: sha256:8539ff0f99be949906dad55250e1d230dfebf062e8f7c38398713e11c6bb48ec + codeSamplesRevisionDigest: sha256:7d95ba7aa230088b9975be341ba638e51cc574b6e863bd3a0f53e9c5ee261bba mistralai-sdk: source: mistral-openapi sourceNamespace: mistral-openapi - sourceRevisionDigest: sha256:e44702b93f6a8ac450f1d85b4197f6640f8efb3d5e06be98418ea79acd8f70be - sourceBlobDigest: sha256:94a9891a3bdf3fafea5c41cee226c5e366c206e91e39e734cd2d1163af74f688 + sourceRevisionDigest: sha256:9b6ad47076b570f4e23494bf744a7822547d0003e4b10985f26f1c3b5128e631 + sourceBlobDigest: sha256:150e3da2a6bfb74e86e2ce864e9a094fc796f3a08df91f6a6e8745b54b3e16bc codeSamplesNamespace: mistral-openapi-code-samples - codeSamplesRevisionDigest: sha256:9e9f8f0da360280a8d42e2ccbee423871b74abd07e872e10892636ba4be4e3a5 + codeSamplesRevisionDigest: sha256:d1ea6603d96bdc063cc747cde1cadcbf443be580935e12438e7b915d0cd9a019 workflow: workflowVersion: 1.0.0 speakeasyVersion: latest diff --git a/RELEASES.md b/RELEASES.md index 19d5c3be..b8f671ba 100644 --- a/RELEASES.md +++ b/RELEASES.md @@ -78,4 +78,14 @@ Based on: ### Generated - [python v1.2.2] . ### Releases -- [PyPI v1.2.2] https://pypi.org/project/mistralai/1.2.2 - . \ No newline at end of file +- [PyPI v1.2.2] https://pypi.org/project/mistralai/1.2.2 - . + +## 2024-11-15 18:37:23 +### Changes +Based on: +- OpenAPI Doc +- Speakeasy CLI 1.440.1 (2.460.1) https://github.com/speakeasy-api/speakeasy +### Generated +- [python v1.2.3] . +### Releases +- [PyPI v1.2.3] https://pypi.org/project/mistralai/1.2.3 - . \ No newline at end of file diff --git a/docs/models/contentchunk.md b/docs/models/contentchunk.md index 12f6430f..22023e8b 100644 --- a/docs/models/contentchunk.md +++ b/docs/models/contentchunk.md @@ -15,3 +15,9 @@ value: models.ImageURLChunk = /* values here */ value: models.TextChunk = /* values here */ ``` +### `models.ReferenceChunk` + +```python +value: models.ReferenceChunk = /* values here */ +``` + diff --git a/docs/models/referencechunk.md b/docs/models/referencechunk.md new file mode 100644 index 00000000..a132ca2f --- /dev/null +++ b/docs/models/referencechunk.md @@ -0,0 +1,9 @@ +# ReferenceChunk + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | +| `reference_ids` | List[*int*] | :heavy_check_mark: | N/A | +| `type` | [Optional[models.ReferenceChunkType]](../models/referencechunktype.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/referencechunktype.md b/docs/models/referencechunktype.md new file mode 100644 index 00000000..1e0e2fe6 --- /dev/null +++ b/docs/models/referencechunktype.md @@ -0,0 +1,8 @@ +# ReferenceChunkType + + +## Values + +| Name | Value | +| ----------- | ----------- | +| `REFERENCE` | reference | \ No newline at end of file diff --git a/docs/models/toolmessage.md b/docs/models/toolmessage.md index 364339e1..a54f4933 100644 --- a/docs/models/toolmessage.md +++ b/docs/models/toolmessage.md @@ -3,9 +3,9 @@ ## Fields -| Field | Type | Required | Description | -| ---------------------------------------------------------------- | ---------------------------------------------------------------- | ---------------------------------------------------------------- | ---------------------------------------------------------------- | -| `content` | *str* | :heavy_check_mark: | N/A | -| `tool_call_id` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `role` | [Optional[models.ToolMessageRole]](../models/toolmessagerole.md) | :heavy_minus_sign: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | +| `content` | [Nullable[models.ToolMessageContent]](../models/toolmessagecontent.md) | :heavy_check_mark: | N/A | +| `tool_call_id` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `role` | [Optional[models.ToolMessageRole]](../models/toolmessagerole.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/toolmessagecontent.md b/docs/models/toolmessagecontent.md new file mode 100644 index 00000000..5c76091f --- /dev/null +++ b/docs/models/toolmessagecontent.md @@ -0,0 +1,17 @@ +# ToolMessageContent + + +## Supported Types + +### `str` + +```python +value: str = /* values here */ +``` + +### `List[models.ContentChunk]` + +```python +value: List[models.ContentChunk] = /* values here */ +``` + diff --git a/docs/sdks/classifiers/README.md b/docs/sdks/classifiers/README.md index 7e48638a..7e59fa2c 100644 --- a/docs/sdks/classifiers/README.md +++ b/docs/sdks/classifiers/README.md @@ -70,7 +70,12 @@ s = Mistral( res = s.classifiers.moderate_chat(inputs=[ [ { - "content": "", + "content": [ + { + "text": "", + "type": "text", + }, + ], }, ], ], model="V90") diff --git a/packages/mistralai_azure/.speakeasy/gen.lock b/packages/mistralai_azure/.speakeasy/gen.lock index cc18655a..518aba16 100644 --- a/packages/mistralai_azure/.speakeasy/gen.lock +++ b/packages/mistralai_azure/.speakeasy/gen.lock @@ -1,18 +1,18 @@ lockVersion: 2.0.0 id: dc40fa48-2c4d-46ad-ac8b-270749770f34 management: - docChecksum: 1eac78d7698423fcd3bc58124f860d30 + docChecksum: d0000cbe03848bfe843794965cba332f docVersion: 0.0.2 - speakeasyVersion: 1.438.1 - generationVersion: 2.457.2 - releaseVersion: 1.2.0 - configChecksum: f69f129b40abc60e88685a36201ebb87 + speakeasyVersion: 1.440.1 + generationVersion: 2.460.1 + releaseVersion: 1.2.3 + configChecksum: 60295c765204eb0aa26205ec02e574fc published: true features: python: additionalDependencies: 1.0.0 - constsAndDefaults: 1.0.4 - core: 5.6.4 + constsAndDefaults: 1.0.5 + core: 5.6.5 defaultEnabledRetries: 0.2.0 enumUnions: 0.1.0 envVarSecurityUsage: 0.3.2 @@ -64,6 +64,8 @@ generatedFiles: - docs/models/httpvalidationerror.md - docs/models/loc.md - docs/models/messages.md + - docs/models/referencechunk.md + - docs/models/referencechunktype.md - docs/models/responseformat.md - docs/models/responseformats.md - docs/models/role.md @@ -77,6 +79,7 @@ generatedFiles: - docs/models/toolchoice.md - docs/models/toolchoiceenum.md - docs/models/toolmessage.md + - docs/models/toolmessagecontent.md - docs/models/toolmessagerole.md - docs/models/tooltypes.md - docs/models/type.md @@ -115,6 +118,7 @@ generatedFiles: - src/mistralai_azure/models/functioncall.py - src/mistralai_azure/models/functionname.py - src/mistralai_azure/models/httpvalidationerror.py + - src/mistralai_azure/models/referencechunk.py - src/mistralai_azure/models/responseformat.py - src/mistralai_azure/models/responseformats.py - src/mistralai_azure/models/sdkerror.py diff --git a/packages/mistralai_azure/.speakeasy/gen.yaml b/packages/mistralai_azure/.speakeasy/gen.yaml index dcf160a4..aae8dd2a 100644 --- a/packages/mistralai_azure/.speakeasy/gen.yaml +++ b/packages/mistralai_azure/.speakeasy/gen.yaml @@ -13,7 +13,7 @@ generation: oAuth2ClientCredentialsEnabled: true oAuth2PasswordEnabled: false python: - version: 1.2.0 + version: 1.2.3 additionalDependencies: dev: pytest: ^8.2.2 diff --git a/packages/mistralai_azure/docs/models/contentchunk.md b/packages/mistralai_azure/docs/models/contentchunk.md index 4222920b..98b86391 100644 --- a/packages/mistralai_azure/docs/models/contentchunk.md +++ b/packages/mistralai_azure/docs/models/contentchunk.md @@ -9,3 +9,9 @@ value: models.TextChunk = /* values here */ ``` +### `models.ReferenceChunk` + +```python +value: models.ReferenceChunk = /* values here */ +``` + diff --git a/packages/mistralai_azure/docs/models/referencechunk.md b/packages/mistralai_azure/docs/models/referencechunk.md new file mode 100644 index 00000000..a132ca2f --- /dev/null +++ b/packages/mistralai_azure/docs/models/referencechunk.md @@ -0,0 +1,9 @@ +# ReferenceChunk + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | +| `reference_ids` | List[*int*] | :heavy_check_mark: | N/A | +| `type` | [Optional[models.ReferenceChunkType]](../models/referencechunktype.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/referencechunktype.md b/packages/mistralai_azure/docs/models/referencechunktype.md new file mode 100644 index 00000000..1e0e2fe6 --- /dev/null +++ b/packages/mistralai_azure/docs/models/referencechunktype.md @@ -0,0 +1,8 @@ +# ReferenceChunkType + + +## Values + +| Name | Value | +| ----------- | ----------- | +| `REFERENCE` | reference | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/toolmessage.md b/packages/mistralai_azure/docs/models/toolmessage.md index 364339e1..a54f4933 100644 --- a/packages/mistralai_azure/docs/models/toolmessage.md +++ b/packages/mistralai_azure/docs/models/toolmessage.md @@ -3,9 +3,9 @@ ## Fields -| Field | Type | Required | Description | -| ---------------------------------------------------------------- | ---------------------------------------------------------------- | ---------------------------------------------------------------- | ---------------------------------------------------------------- | -| `content` | *str* | :heavy_check_mark: | N/A | -| `tool_call_id` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `role` | [Optional[models.ToolMessageRole]](../models/toolmessagerole.md) | :heavy_minus_sign: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | +| `content` | [Nullable[models.ToolMessageContent]](../models/toolmessagecontent.md) | :heavy_check_mark: | N/A | +| `tool_call_id` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `role` | [Optional[models.ToolMessageRole]](../models/toolmessagerole.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/toolmessagecontent.md b/packages/mistralai_azure/docs/models/toolmessagecontent.md new file mode 100644 index 00000000..5c76091f --- /dev/null +++ b/packages/mistralai_azure/docs/models/toolmessagecontent.md @@ -0,0 +1,17 @@ +# ToolMessageContent + + +## Supported Types + +### `str` + +```python +value: str = /* values here */ +``` + +### `List[models.ContentChunk]` + +```python +value: List[models.ContentChunk] = /* values here */ +``` + diff --git a/packages/mistralai_azure/pyproject.toml b/packages/mistralai_azure/pyproject.toml index 99001165..75b1d0c4 100644 --- a/packages/mistralai_azure/pyproject.toml +++ b/packages/mistralai_azure/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "mistralai_azure" -version = "1.2.0" +version = "1.2.3" description = "Python Client SDK for the Mistral AI API in Azure." authors = ["Mistral",] readme = "README-PYPI.md" diff --git a/packages/mistralai_azure/src/mistralai_azure/_version.py b/packages/mistralai_azure/src/mistralai_azure/_version.py index 6a45a91a..1e41e4f0 100644 --- a/packages/mistralai_azure/src/mistralai_azure/_version.py +++ b/packages/mistralai_azure/src/mistralai_azure/_version.py @@ -3,7 +3,7 @@ import importlib.metadata __title__: str = "mistralai_azure" -__version__: str = "1.2.0" +__version__: str = "1.2.3" try: if __package__ is not None: diff --git a/packages/mistralai_azure/src/mistralai_azure/models/__init__.py b/packages/mistralai_azure/src/mistralai_azure/models/__init__.py index e662fa75..379a0dfe 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/__init__.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/__init__.py @@ -54,6 +54,7 @@ ) from .functionname import FunctionName, FunctionNameTypedDict from .httpvalidationerror import HTTPValidationError, HTTPValidationErrorData +from .referencechunk import ReferenceChunk, ReferenceChunkType, ReferenceChunkTypedDict from .responseformat import ResponseFormat, ResponseFormatTypedDict from .responseformats import ResponseFormats from .sdkerror import SDKError @@ -70,7 +71,13 @@ from .toolcall import ToolCall, ToolCallTypedDict from .toolchoice import ToolChoice, ToolChoiceTypedDict from .toolchoiceenum import ToolChoiceEnum -from .toolmessage import ToolMessage, ToolMessageRole, ToolMessageTypedDict +from .toolmessage import ( + ToolMessage, + ToolMessageContent, + ToolMessageContentTypedDict, + ToolMessageRole, + ToolMessageTypedDict, +) from .tooltypes import ToolTypes from .usageinfo import UsageInfo, UsageInfoTypedDict from .usermessage import ( @@ -137,6 +144,9 @@ "LocTypedDict", "Messages", "MessagesTypedDict", + "ReferenceChunk", + "ReferenceChunkType", + "ReferenceChunkTypedDict", "ResponseFormat", "ResponseFormatTypedDict", "ResponseFormats", @@ -159,6 +169,8 @@ "ToolChoiceEnum", "ToolChoiceTypedDict", "ToolMessage", + "ToolMessageContent", + "ToolMessageContentTypedDict", "ToolMessageRole", "ToolMessageTypedDict", "ToolTypedDict", diff --git a/packages/mistralai_azure/src/mistralai_azure/models/contentchunk.py b/packages/mistralai_azure/src/mistralai_azure/models/contentchunk.py index 49aeba4c..e8013323 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/contentchunk.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/contentchunk.py @@ -1,10 +1,20 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from __future__ import annotations +from .referencechunk import ReferenceChunk, ReferenceChunkTypedDict from .textchunk import TextChunk, TextChunkTypedDict +from mistralai_azure.utils import get_discriminator +from pydantic import Discriminator, Tag +from typing import Union +from typing_extensions import Annotated -ContentChunkTypedDict = TextChunkTypedDict +ContentChunkTypedDict = Union[TextChunkTypedDict, ReferenceChunkTypedDict] -ContentChunk = TextChunk +ContentChunk = Annotated[ + Union[ + Annotated[TextChunk, Tag("text")], Annotated[ReferenceChunk, Tag("reference")] + ], + Discriminator(lambda m: get_discriminator(m, "type", "type")), +] diff --git a/packages/mistralai_azure/src/mistralai_azure/models/referencechunk.py b/packages/mistralai_azure/src/mistralai_azure/models/referencechunk.py new file mode 100644 index 00000000..ddc89195 --- /dev/null +++ b/packages/mistralai_azure/src/mistralai_azure/models/referencechunk.py @@ -0,0 +1,28 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai_azure.types import BaseModel +from mistralai_azure.utils import validate_const +import pydantic +from pydantic.functional_validators import AfterValidator +from typing import List, Literal, Optional +from typing_extensions import Annotated, TypedDict + + +ReferenceChunkType = Literal["reference"] + + +class ReferenceChunkTypedDict(TypedDict): + reference_ids: List[int] + type: ReferenceChunkType + + +class ReferenceChunk(BaseModel): + reference_ids: List[int] + + TYPE: Annotated[ + Annotated[ + Optional[ReferenceChunkType], AfterValidator(validate_const("reference")) + ], + pydantic.Field(alias="type"), + ] = "reference" diff --git a/packages/mistralai_azure/src/mistralai_azure/models/toolmessage.py b/packages/mistralai_azure/src/mistralai_azure/models/toolmessage.py index 4362bc9f..1004c439 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/toolmessage.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/toolmessage.py @@ -1,6 +1,7 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from __future__ import annotations +from .contentchunk import ContentChunk, ContentChunkTypedDict from mistralai_azure.types import ( BaseModel, Nullable, @@ -9,22 +10,28 @@ UNSET_SENTINEL, ) from pydantic import model_serializer -from typing import Literal, Optional +from typing import List, Literal, Optional, Union from typing_extensions import NotRequired, TypedDict +ToolMessageContentTypedDict = Union[str, List[ContentChunkTypedDict]] + + +ToolMessageContent = Union[str, List[ContentChunk]] + + ToolMessageRole = Literal["tool"] class ToolMessageTypedDict(TypedDict): - content: str + content: Nullable[ToolMessageContentTypedDict] tool_call_id: NotRequired[Nullable[str]] name: NotRequired[Nullable[str]] role: NotRequired[ToolMessageRole] class ToolMessage(BaseModel): - content: str + content: Nullable[ToolMessageContent] tool_call_id: OptionalNullable[str] = UNSET @@ -35,7 +42,7 @@ class ToolMessage(BaseModel): @model_serializer(mode="wrap") def serialize_model(self, handler): optional_fields = ["tool_call_id", "name", "role"] - nullable_fields = ["tool_call_id", "name"] + nullable_fields = ["content", "tool_call_id", "name"] null_default_fields = [] serialized = handler(self) diff --git a/packages/mistralai_azure/src/mistralai_azure/sdkconfiguration.py b/packages/mistralai_azure/src/mistralai_azure/sdkconfiguration.py index 75216e1f..8f64c457 100644 --- a/packages/mistralai_azure/src/mistralai_azure/sdkconfiguration.py +++ b/packages/mistralai_azure/src/mistralai_azure/sdkconfiguration.py @@ -28,9 +28,9 @@ class SDKConfiguration: server: Optional[str] = "" language: str = "python" openapi_doc_version: str = "0.0.2" - sdk_version: str = "1.2.0" - gen_version: str = "2.457.2" - user_agent: str = "speakeasy-sdk/python 1.2.0 2.457.2 0.0.2 mistralai_azure" + sdk_version: str = "1.2.3" + gen_version: str = "2.460.1" + user_agent: str = "speakeasy-sdk/python 1.2.3 2.460.1 0.0.2 mistralai_azure" retry_config: OptionalNullable[RetryConfig] = Field(default_factory=lambda: UNSET) timeout_ms: Optional[int] = None diff --git a/packages/mistralai_gcp/.speakeasy/gen.lock b/packages/mistralai_gcp/.speakeasy/gen.lock index e1778244..4ac979ee 100644 --- a/packages/mistralai_gcp/.speakeasy/gen.lock +++ b/packages/mistralai_gcp/.speakeasy/gen.lock @@ -1,18 +1,18 @@ lockVersion: 2.0.0 id: ec60f2d8-7869-45c1-918e-773d41a8cf74 management: - docChecksum: a4b0284a2171be4279ac869462fc2ba5 + docChecksum: e6c0a4254e61b1f171b409862f717867 docVersion: 0.0.2 - speakeasyVersion: 1.438.1 - generationVersion: 2.457.2 - releaseVersion: 1.2.0 - configChecksum: 5a6e207048dd06f191872088627d9018 + speakeasyVersion: 1.440.1 + generationVersion: 2.460.1 + releaseVersion: 1.2.3 + configChecksum: 3fc99d7ec7ee057a323b593ebf8fdb8c published: true features: python: additionalDependencies: 1.0.0 - constsAndDefaults: 1.0.4 - core: 5.6.4 + constsAndDefaults: 1.0.5 + core: 5.6.5 defaultEnabledRetries: 0.2.0 enumUnions: 0.1.0 envVarSecurityUsage: 0.3.2 @@ -69,6 +69,8 @@ generatedFiles: - docs/models/httpvalidationerror.md - docs/models/loc.md - docs/models/messages.md + - docs/models/referencechunk.md + - docs/models/referencechunktype.md - docs/models/responseformat.md - docs/models/responseformats.md - docs/models/role.md @@ -82,6 +84,7 @@ generatedFiles: - docs/models/toolchoice.md - docs/models/toolchoiceenum.md - docs/models/toolmessage.md + - docs/models/toolmessagecontent.md - docs/models/toolmessagerole.md - docs/models/tooltypes.md - docs/models/type.md @@ -124,6 +127,7 @@ generatedFiles: - src/mistralai_gcp/models/functioncall.py - src/mistralai_gcp/models/functionname.py - src/mistralai_gcp/models/httpvalidationerror.py + - src/mistralai_gcp/models/referencechunk.py - src/mistralai_gcp/models/responseformat.py - src/mistralai_gcp/models/responseformats.py - src/mistralai_gcp/models/sdkerror.py diff --git a/packages/mistralai_gcp/.speakeasy/gen.yaml b/packages/mistralai_gcp/.speakeasy/gen.yaml index 360352e7..a77e2f5e 100644 --- a/packages/mistralai_gcp/.speakeasy/gen.yaml +++ b/packages/mistralai_gcp/.speakeasy/gen.yaml @@ -13,7 +13,7 @@ generation: oAuth2ClientCredentialsEnabled: true oAuth2PasswordEnabled: false python: - version: 1.2.0 + version: 1.2.3 additionalDependencies: dev: pytest: ^8.2.2 diff --git a/packages/mistralai_gcp/docs/models/contentchunk.md b/packages/mistralai_gcp/docs/models/contentchunk.md index 4222920b..98b86391 100644 --- a/packages/mistralai_gcp/docs/models/contentchunk.md +++ b/packages/mistralai_gcp/docs/models/contentchunk.md @@ -9,3 +9,9 @@ value: models.TextChunk = /* values here */ ``` +### `models.ReferenceChunk` + +```python +value: models.ReferenceChunk = /* values here */ +``` + diff --git a/packages/mistralai_gcp/docs/models/referencechunk.md b/packages/mistralai_gcp/docs/models/referencechunk.md new file mode 100644 index 00000000..a132ca2f --- /dev/null +++ b/packages/mistralai_gcp/docs/models/referencechunk.md @@ -0,0 +1,9 @@ +# ReferenceChunk + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | +| `reference_ids` | List[*int*] | :heavy_check_mark: | N/A | +| `type` | [Optional[models.ReferenceChunkType]](../models/referencechunktype.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/referencechunktype.md b/packages/mistralai_gcp/docs/models/referencechunktype.md new file mode 100644 index 00000000..1e0e2fe6 --- /dev/null +++ b/packages/mistralai_gcp/docs/models/referencechunktype.md @@ -0,0 +1,8 @@ +# ReferenceChunkType + + +## Values + +| Name | Value | +| ----------- | ----------- | +| `REFERENCE` | reference | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/toolmessage.md b/packages/mistralai_gcp/docs/models/toolmessage.md index 364339e1..a54f4933 100644 --- a/packages/mistralai_gcp/docs/models/toolmessage.md +++ b/packages/mistralai_gcp/docs/models/toolmessage.md @@ -3,9 +3,9 @@ ## Fields -| Field | Type | Required | Description | -| ---------------------------------------------------------------- | ---------------------------------------------------------------- | ---------------------------------------------------------------- | ---------------------------------------------------------------- | -| `content` | *str* | :heavy_check_mark: | N/A | -| `tool_call_id` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `role` | [Optional[models.ToolMessageRole]](../models/toolmessagerole.md) | :heavy_minus_sign: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | +| `content` | [Nullable[models.ToolMessageContent]](../models/toolmessagecontent.md) | :heavy_check_mark: | N/A | +| `tool_call_id` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `role` | [Optional[models.ToolMessageRole]](../models/toolmessagerole.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/toolmessagecontent.md b/packages/mistralai_gcp/docs/models/toolmessagecontent.md new file mode 100644 index 00000000..5c76091f --- /dev/null +++ b/packages/mistralai_gcp/docs/models/toolmessagecontent.md @@ -0,0 +1,17 @@ +# ToolMessageContent + + +## Supported Types + +### `str` + +```python +value: str = /* values here */ +``` + +### `List[models.ContentChunk]` + +```python +value: List[models.ContentChunk] = /* values here */ +``` + diff --git a/packages/mistralai_gcp/pyproject.toml b/packages/mistralai_gcp/pyproject.toml index c4e64885..f20d9b17 100644 --- a/packages/mistralai_gcp/pyproject.toml +++ b/packages/mistralai_gcp/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "mistralai-gcp" -version = "1.2.0" +version = "1.2.3" description = "Python Client SDK for the Mistral AI API in GCP." authors = ["Mistral",] readme = "README-PYPI.md" diff --git a/packages/mistralai_gcp/src/mistralai_gcp/_version.py b/packages/mistralai_gcp/src/mistralai_gcp/_version.py index 0472b64b..5b65a1b5 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/_version.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/_version.py @@ -3,7 +3,7 @@ import importlib.metadata __title__: str = "mistralai-gcp" -__version__: str = "1.2.0" +__version__: str = "1.2.3" try: if __package__ is not None: diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/__init__.py b/packages/mistralai_gcp/src/mistralai_gcp/models/__init__.py index db408df5..f3c6ce7e 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/__init__.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/__init__.py @@ -67,6 +67,7 @@ ) from .functionname import FunctionName, FunctionNameTypedDict from .httpvalidationerror import HTTPValidationError, HTTPValidationErrorData +from .referencechunk import ReferenceChunk, ReferenceChunkType, ReferenceChunkTypedDict from .responseformat import ResponseFormat, ResponseFormatTypedDict from .responseformats import ResponseFormats from .sdkerror import SDKError @@ -83,7 +84,13 @@ from .toolcall import ToolCall, ToolCallTypedDict from .toolchoice import ToolChoice, ToolChoiceTypedDict from .toolchoiceenum import ToolChoiceEnum -from .toolmessage import ToolMessage, ToolMessageRole, ToolMessageTypedDict +from .toolmessage import ( + ToolMessage, + ToolMessageContent, + ToolMessageContentTypedDict, + ToolMessageRole, + ToolMessageTypedDict, +) from .tooltypes import ToolTypes from .usageinfo import UsageInfo, UsageInfoTypedDict from .usermessage import ( @@ -160,6 +167,9 @@ "LocTypedDict", "Messages", "MessagesTypedDict", + "ReferenceChunk", + "ReferenceChunkType", + "ReferenceChunkTypedDict", "ResponseFormat", "ResponseFormatTypedDict", "ResponseFormats", @@ -182,6 +192,8 @@ "ToolChoiceEnum", "ToolChoiceTypedDict", "ToolMessage", + "ToolMessageContent", + "ToolMessageContentTypedDict", "ToolMessageRole", "ToolMessageTypedDict", "ToolTypedDict", diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/contentchunk.py b/packages/mistralai_gcp/src/mistralai_gcp/models/contentchunk.py index 49aeba4c..1c882f7e 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/contentchunk.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/contentchunk.py @@ -1,10 +1,20 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from __future__ import annotations +from .referencechunk import ReferenceChunk, ReferenceChunkTypedDict from .textchunk import TextChunk, TextChunkTypedDict +from mistralai_gcp.utils import get_discriminator +from pydantic import Discriminator, Tag +from typing import Union +from typing_extensions import Annotated -ContentChunkTypedDict = TextChunkTypedDict +ContentChunkTypedDict = Union[TextChunkTypedDict, ReferenceChunkTypedDict] -ContentChunk = TextChunk +ContentChunk = Annotated[ + Union[ + Annotated[TextChunk, Tag("text")], Annotated[ReferenceChunk, Tag("reference")] + ], + Discriminator(lambda m: get_discriminator(m, "type", "type")), +] diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/referencechunk.py b/packages/mistralai_gcp/src/mistralai_gcp/models/referencechunk.py new file mode 100644 index 00000000..d409a70d --- /dev/null +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/referencechunk.py @@ -0,0 +1,28 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai_gcp.types import BaseModel +from mistralai_gcp.utils import validate_const +import pydantic +from pydantic.functional_validators import AfterValidator +from typing import List, Literal, Optional +from typing_extensions import Annotated, TypedDict + + +ReferenceChunkType = Literal["reference"] + + +class ReferenceChunkTypedDict(TypedDict): + reference_ids: List[int] + type: ReferenceChunkType + + +class ReferenceChunk(BaseModel): + reference_ids: List[int] + + TYPE: Annotated[ + Annotated[ + Optional[ReferenceChunkType], AfterValidator(validate_const("reference")) + ], + pydantic.Field(alias="type"), + ] = "reference" diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/toolmessage.py b/packages/mistralai_gcp/src/mistralai_gcp/models/toolmessage.py index 80e44ede..ad6b800c 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/toolmessage.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/toolmessage.py @@ -1,6 +1,7 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from __future__ import annotations +from .contentchunk import ContentChunk, ContentChunkTypedDict from mistralai_gcp.types import ( BaseModel, Nullable, @@ -9,22 +10,28 @@ UNSET_SENTINEL, ) from pydantic import model_serializer -from typing import Literal, Optional +from typing import List, Literal, Optional, Union from typing_extensions import NotRequired, TypedDict +ToolMessageContentTypedDict = Union[str, List[ContentChunkTypedDict]] + + +ToolMessageContent = Union[str, List[ContentChunk]] + + ToolMessageRole = Literal["tool"] class ToolMessageTypedDict(TypedDict): - content: str + content: Nullable[ToolMessageContentTypedDict] tool_call_id: NotRequired[Nullable[str]] name: NotRequired[Nullable[str]] role: NotRequired[ToolMessageRole] class ToolMessage(BaseModel): - content: str + content: Nullable[ToolMessageContent] tool_call_id: OptionalNullable[str] = UNSET @@ -35,7 +42,7 @@ class ToolMessage(BaseModel): @model_serializer(mode="wrap") def serialize_model(self, handler): optional_fields = ["tool_call_id", "name", "role"] - nullable_fields = ["tool_call_id", "name"] + nullable_fields = ["content", "tool_call_id", "name"] null_default_fields = [] serialized = handler(self) diff --git a/packages/mistralai_gcp/src/mistralai_gcp/sdkconfiguration.py b/packages/mistralai_gcp/src/mistralai_gcp/sdkconfiguration.py index d94a4e40..061467eb 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/sdkconfiguration.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/sdkconfiguration.py @@ -28,9 +28,9 @@ class SDKConfiguration: server: Optional[str] = "" language: str = "python" openapi_doc_version: str = "0.0.2" - sdk_version: str = "1.2.0" - gen_version: str = "2.457.2" - user_agent: str = "speakeasy-sdk/python 1.2.0 2.457.2 0.0.2 mistralai-gcp" + sdk_version: str = "1.2.3" + gen_version: str = "2.460.1" + user_agent: str = "speakeasy-sdk/python 1.2.3 2.460.1 0.0.2 mistralai-gcp" retry_config: OptionalNullable[RetryConfig] = Field(default_factory=lambda: UNSET) timeout_ms: Optional[int] = None diff --git a/pyproject.toml b/pyproject.toml index 95ac3053..2fced3a9 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "mistralai" -version = "1.2.2" +version = "1.2.3" description = "Python Client SDK for the Mistral AI API." authors = ["Mistral"] readme = "README-PYPI.md" diff --git a/src/mistralai/_version.py b/src/mistralai/_version.py index c6e29cc9..326a3e6e 100644 --- a/src/mistralai/_version.py +++ b/src/mistralai/_version.py @@ -3,7 +3,7 @@ import importlib.metadata __title__: str = "mistralai" -__version__: str = "1.2.2" +__version__: str = "1.2.3" try: if __package__ is not None: diff --git a/src/mistralai/models/__init__.py b/src/mistralai/models/__init__.py index 42d2c66a..c62bebc1 100644 --- a/src/mistralai/models/__init__.py +++ b/src/mistralai/models/__init__.py @@ -269,6 +269,7 @@ from .metricout import MetricOut, MetricOutTypedDict from .modelcapabilities import ModelCapabilities, ModelCapabilitiesTypedDict from .modellist import Data, DataTypedDict, ModelList, ModelListTypedDict +from .referencechunk import ReferenceChunk, ReferenceChunkType, ReferenceChunkTypedDict from .responseformat import ResponseFormat, ResponseFormatTypedDict from .responseformats import ResponseFormats from .retrieve_model_v1_models_model_id_getop import ( @@ -294,7 +295,13 @@ from .toolcall import ToolCall, ToolCallTypedDict from .toolchoice import ToolChoice, ToolChoiceTypedDict from .toolchoiceenum import ToolChoiceEnum -from .toolmessage import ToolMessage, ToolMessageRole, ToolMessageTypedDict +from .toolmessage import ( + ToolMessage, + ToolMessageContent, + ToolMessageContentTypedDict, + ToolMessageRole, + ToolMessageTypedDict, +) from .tooltypes import ToolTypes from .trainingfile import TrainingFile, TrainingFileTypedDict from .trainingparameters import TrainingParameters, TrainingParametersTypedDict @@ -553,6 +560,9 @@ "One", "OneTypedDict", "QueryParamStatus", + "ReferenceChunk", + "ReferenceChunkType", + "ReferenceChunkTypedDict", "Repositories", "RepositoriesTypedDict", "ResponseFormat", @@ -587,6 +597,8 @@ "ToolChoiceEnum", "ToolChoiceTypedDict", "ToolMessage", + "ToolMessageContent", + "ToolMessageContentTypedDict", "ToolMessageRole", "ToolMessageTypedDict", "ToolTypedDict", diff --git a/src/mistralai/models/contentchunk.py b/src/mistralai/models/contentchunk.py index 9b9db095..717ba828 100644 --- a/src/mistralai/models/contentchunk.py +++ b/src/mistralai/models/contentchunk.py @@ -2,6 +2,7 @@ from __future__ import annotations from .imageurlchunk import ImageURLChunk, ImageURLChunkTypedDict +from .referencechunk import ReferenceChunk, ReferenceChunkTypedDict from .textchunk import TextChunk, TextChunkTypedDict from mistralai.utils import get_discriminator from pydantic import Discriminator, Tag @@ -9,12 +10,16 @@ from typing_extensions import Annotated -ContentChunkTypedDict = Union[TextChunkTypedDict, ImageURLChunkTypedDict] +ContentChunkTypedDict = Union[ + TextChunkTypedDict, ImageURLChunkTypedDict, ReferenceChunkTypedDict +] ContentChunk = Annotated[ Union[ - Annotated[ImageURLChunk, Tag("image_url")], Annotated[TextChunk, Tag("text")] + Annotated[ImageURLChunk, Tag("image_url")], + Annotated[TextChunk, Tag("text")], + Annotated[ReferenceChunk, Tag("reference")], ], Discriminator(lambda m: get_discriminator(m, "type", "type")), ] diff --git a/src/mistralai/models/referencechunk.py b/src/mistralai/models/referencechunk.py new file mode 100644 index 00000000..33acdb35 --- /dev/null +++ b/src/mistralai/models/referencechunk.py @@ -0,0 +1,28 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.types import BaseModel +from mistralai.utils import validate_const +import pydantic +from pydantic.functional_validators import AfterValidator +from typing import List, Literal, Optional +from typing_extensions import Annotated, TypedDict + + +ReferenceChunkType = Literal["reference"] + + +class ReferenceChunkTypedDict(TypedDict): + reference_ids: List[int] + type: ReferenceChunkType + + +class ReferenceChunk(BaseModel): + reference_ids: List[int] + + TYPE: Annotated[ + Annotated[ + Optional[ReferenceChunkType], AfterValidator(validate_const("reference")) + ], + pydantic.Field(alias="type"), + ] = "reference" diff --git a/src/mistralai/models/toolmessage.py b/src/mistralai/models/toolmessage.py index 2d469d09..c42f34e9 100644 --- a/src/mistralai/models/toolmessage.py +++ b/src/mistralai/models/toolmessage.py @@ -1,24 +1,31 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from __future__ import annotations +from .contentchunk import ContentChunk, ContentChunkTypedDict from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL from pydantic import model_serializer -from typing import Literal, Optional +from typing import List, Literal, Optional, Union from typing_extensions import NotRequired, TypedDict +ToolMessageContentTypedDict = Union[str, List[ContentChunkTypedDict]] + + +ToolMessageContent = Union[str, List[ContentChunk]] + + ToolMessageRole = Literal["tool"] class ToolMessageTypedDict(TypedDict): - content: str + content: Nullable[ToolMessageContentTypedDict] tool_call_id: NotRequired[Nullable[str]] name: NotRequired[Nullable[str]] role: NotRequired[ToolMessageRole] class ToolMessage(BaseModel): - content: str + content: Nullable[ToolMessageContent] tool_call_id: OptionalNullable[str] = UNSET @@ -29,7 +36,7 @@ class ToolMessage(BaseModel): @model_serializer(mode="wrap") def serialize_model(self, handler): optional_fields = ["tool_call_id", "name", "role"] - nullable_fields = ["tool_call_id", "name"] + nullable_fields = ["content", "tool_call_id", "name"] null_default_fields = [] serialized = handler(self) diff --git a/src/mistralai/sdkconfiguration.py b/src/mistralai/sdkconfiguration.py index 72d59f51..f4351c2e 100644 --- a/src/mistralai/sdkconfiguration.py +++ b/src/mistralai/sdkconfiguration.py @@ -28,9 +28,9 @@ class SDKConfiguration: server: Optional[str] = "" language: str = "python" openapi_doc_version: str = "0.0.2" - sdk_version: str = "1.2.2" - gen_version: str = "2.457.2" - user_agent: str = "speakeasy-sdk/python 1.2.2 2.457.2 0.0.2 mistralai" + sdk_version: str = "1.2.3" + gen_version: str = "2.460.1" + user_agent: str = "speakeasy-sdk/python 1.2.3 2.460.1 0.0.2 mistralai" retry_config: OptionalNullable[RetryConfig] = Field(default_factory=lambda: UNSET) timeout_ms: Optional[int] = None From 76f7a5be041a838f47e87f5dd963612c69c18251 Mon Sep 17 00:00:00 2001 From: alex-ac Date: Mon, 2 Dec 2024 15:08:09 +0100 Subject: [PATCH 084/223] updating dependencies --- poetry.lock | 561 +++++++++++++++++++++++++++---------------------- pyproject.toml | 4 +- 2 files changed, 312 insertions(+), 253 deletions(-) diff --git a/poetry.lock b/poetry.lock index 5575630b..ac37e529 100644 --- a/poetry.lock +++ b/poetry.lock @@ -16,13 +16,13 @@ typing-extensions = {version = ">=4.0.0", markers = "python_version < \"3.9\""} [[package]] name = "anyio" -version = "4.4.0" +version = "4.5.2" description = "High level compatibility layer for multiple asynchronous event loop implementations" optional = false python-versions = ">=3.8" files = [ - {file = "anyio-4.4.0-py3-none-any.whl", hash = "sha256:c1b2d8f46a8a812513012e1107cb0e68c17159a7a594208005a57dc776e1bdc7"}, - {file = "anyio-4.4.0.tar.gz", hash = "sha256:5aadc6a1bbb7cdb0bede386cac5e2940f5e2ff3aa20277e991cf028e0585ce94"}, + {file = "anyio-4.5.2-py3-none-any.whl", hash = "sha256:c011ee36bc1e8ba40e5a81cb9df91925c218fe9b778554e0b56a21e1b5d4716f"}, + {file = "anyio-4.5.2.tar.gz", hash = "sha256:23009af4ed04ce05991845451e11ef02fc7c5ed29179ac9a420e5ad0ac7ddc5b"}, ] [package.dependencies] @@ -32,9 +32,9 @@ sniffio = ">=1.1" typing-extensions = {version = ">=4.1", markers = "python_version < \"3.11\""} [package.extras] -doc = ["Sphinx (>=7)", "packaging", "sphinx-autodoc-typehints (>=1.2.0)", "sphinx-rtd-theme"] -test = ["anyio[trio]", "coverage[toml] (>=7)", "exceptiongroup (>=1.2.0)", "hypothesis (>=4.0)", "psutil (>=5.9)", "pytest (>=7.0)", "pytest-mock (>=3.6.1)", "trustme", "uvloop (>=0.17)"] -trio = ["trio (>=0.23)"] +doc = ["Sphinx (>=7.4,<8.0)", "packaging", "sphinx-autodoc-typehints (>=1.2.0)", "sphinx-rtd-theme"] +test = ["anyio[trio]", "coverage[toml] (>=7)", "exceptiongroup (>=1.2.0)", "hypothesis (>=4.0)", "psutil (>=5.9)", "pytest (>=7.0)", "pytest-mock (>=3.6.1)", "trustme", "truststore (>=0.9.1)", "uvloop (>=0.21.0b1)"] +trio = ["trio (>=0.26.1)"] [[package]] name = "astroid" @@ -74,101 +74,116 @@ files = [ [[package]] name = "charset-normalizer" -version = "3.3.2" +version = "3.4.0" description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet." optional = true python-versions = ">=3.7.0" files = [ - {file = "charset-normalizer-3.3.2.tar.gz", hash = "sha256:f30c3cb33b24454a82faecaf01b19c18562b1e89558fb6c56de4d9118a032fd5"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:25baf083bf6f6b341f4121c2f3c548875ee6f5339300e08be3f2b2ba1721cdd3"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:06435b539f889b1f6f4ac1758871aae42dc3a8c0e24ac9e60c2384973ad73027"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9063e24fdb1e498ab71cb7419e24622516c4a04476b17a2dab57e8baa30d6e03"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6897af51655e3691ff853668779c7bad41579facacf5fd7253b0133308cf000d"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1d3193f4a680c64b4b6a9115943538edb896edc190f0b222e73761716519268e"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:cd70574b12bb8a4d2aaa0094515df2463cb429d8536cfb6c7ce983246983e5a6"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8465322196c8b4d7ab6d1e049e4c5cb460d0394da4a27d23cc242fbf0034b6b5"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a9a8e9031d613fd2009c182b69c7b2c1ef8239a0efb1df3f7c8da66d5dd3d537"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:beb58fe5cdb101e3a055192ac291b7a21e3b7ef4f67fa1d74e331a7f2124341c"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:e06ed3eb3218bc64786f7db41917d4e686cc4856944f53d5bdf83a6884432e12"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:2e81c7b9c8979ce92ed306c249d46894776a909505d8f5a4ba55b14206e3222f"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:572c3763a264ba47b3cf708a44ce965d98555f618ca42c926a9c1616d8f34269"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:fd1abc0d89e30cc4e02e4064dc67fcc51bd941eb395c502aac3ec19fab46b519"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-win32.whl", hash = "sha256:3d47fa203a7bd9c5b6cee4736ee84ca03b8ef23193c0d1ca99b5089f72645c73"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-win_amd64.whl", hash = "sha256:10955842570876604d404661fbccbc9c7e684caf432c09c715ec38fbae45ae09"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:802fe99cca7457642125a8a88a084cef28ff0cf9407060f7b93dca5aa25480db"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:573f6eac48f4769d667c4442081b1794f52919e7edada77495aaed9236d13a96"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:549a3a73da901d5bc3ce8d24e0600d1fa85524c10287f6004fbab87672bf3e1e"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f27273b60488abe721a075bcca6d7f3964f9f6f067c8c4c605743023d7d3944f"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1ceae2f17a9c33cb48e3263960dc5fc8005351ee19db217e9b1bb15d28c02574"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:65f6f63034100ead094b8744b3b97965785388f308a64cf8d7c34f2f2e5be0c4"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:753f10e867343b4511128c6ed8c82f7bec3bd026875576dfd88483c5c73b2fd8"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4a78b2b446bd7c934f5dcedc588903fb2f5eec172f3d29e52a9096a43722adfc"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:e537484df0d8f426ce2afb2d0f8e1c3d0b114b83f8850e5f2fbea0e797bd82ae"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:eb6904c354526e758fda7167b33005998fb68c46fbc10e013ca97f21ca5c8887"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:deb6be0ac38ece9ba87dea880e438f25ca3eddfac8b002a2ec3d9183a454e8ae"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:4ab2fe47fae9e0f9dee8c04187ce5d09f48eabe611be8259444906793ab7cbce"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:80402cd6ee291dcb72644d6eac93785fe2c8b9cb30893c1af5b8fdd753b9d40f"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-win32.whl", hash = "sha256:7cd13a2e3ddeed6913a65e66e94b51d80a041145a026c27e6bb76c31a853c6ab"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-win_amd64.whl", hash = "sha256:663946639d296df6a2bb2aa51b60a2454ca1cb29835324c640dafb5ff2131a77"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:0b2b64d2bb6d3fb9112bafa732def486049e63de9618b5843bcdd081d8144cd8"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:ddbb2551d7e0102e7252db79ba445cdab71b26640817ab1e3e3648dad515003b"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:55086ee1064215781fff39a1af09518bc9255b50d6333f2e4c74ca09fac6a8f6"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8f4a014bc36d3c57402e2977dada34f9c12300af536839dc38c0beab8878f38a"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a10af20b82360ab00827f916a6058451b723b4e65030c5a18577c8b2de5b3389"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8d756e44e94489e49571086ef83b2bb8ce311e730092d2c34ca8f7d925cb20aa"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:90d558489962fd4918143277a773316e56c72da56ec7aa3dc3dbbe20fdfed15b"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6ac7ffc7ad6d040517be39eb591cac5ff87416c2537df6ba3cba3bae290c0fed"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:7ed9e526742851e8d5cc9e6cf41427dfc6068d4f5a3bb03659444b4cabf6bc26"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:8bdb58ff7ba23002a4c5808d608e4e6c687175724f54a5dade5fa8c67b604e4d"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_ppc64le.whl", hash = "sha256:6b3251890fff30ee142c44144871185dbe13b11bab478a88887a639655be1068"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_s390x.whl", hash = "sha256:b4a23f61ce87adf89be746c8a8974fe1c823c891d8f86eb218bb957c924bb143"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:efcb3f6676480691518c177e3b465bcddf57cea040302f9f4e6e191af91174d4"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-win32.whl", hash = "sha256:d965bba47ddeec8cd560687584e88cf699fd28f192ceb452d1d7ee807c5597b7"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-win_amd64.whl", hash = "sha256:96b02a3dc4381e5494fad39be677abcb5e6634bf7b4fa83a6dd3112607547001"}, - {file = "charset_normalizer-3.3.2-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:95f2a5796329323b8f0512e09dbb7a1860c46a39da62ecb2324f116fa8fdc85c"}, - {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c002b4ffc0be611f0d9da932eb0f704fe2602a9a949d1f738e4c34c75b0863d5"}, - {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a981a536974bbc7a512cf44ed14938cf01030a99e9b3a06dd59578882f06f985"}, - {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3287761bc4ee9e33561a7e058c72ac0938c4f57fe49a09eae428fd88aafe7bb6"}, - {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:42cb296636fcc8b0644486d15c12376cb9fa75443e00fb25de0b8602e64c1714"}, - {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0a55554a2fa0d408816b3b5cedf0045f4b8e1a6065aec45849de2d6f3f8e9786"}, - {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:c083af607d2515612056a31f0a8d9e0fcb5876b7bfc0abad3ecd275bc4ebc2d5"}, - {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:87d1351268731db79e0f8e745d92493ee2841c974128ef629dc518b937d9194c"}, - {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:bd8f7df7d12c2db9fab40bdd87a7c09b1530128315d047a086fa3ae3435cb3a8"}, - {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:c180f51afb394e165eafe4ac2936a14bee3eb10debc9d9e4db8958fe36afe711"}, - {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:8c622a5fe39a48f78944a87d4fb8a53ee07344641b0562c540d840748571b811"}, - {file = "charset_normalizer-3.3.2-cp37-cp37m-win32.whl", hash = "sha256:db364eca23f876da6f9e16c9da0df51aa4f104a972735574842618b8c6d999d4"}, - {file = "charset_normalizer-3.3.2-cp37-cp37m-win_amd64.whl", hash = "sha256:86216b5cee4b06df986d214f664305142d9c76df9b6512be2738aa72a2048f99"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:6463effa3186ea09411d50efc7d85360b38d5f09b870c48e4600f63af490e56a"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:6c4caeef8fa63d06bd437cd4bdcf3ffefe6738fb1b25951440d80dc7df8c03ac"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:37e55c8e51c236f95b033f6fb391d7d7970ba5fe7ff453dad675e88cf303377a"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fb69256e180cb6c8a894fee62b3afebae785babc1ee98b81cdf68bbca1987f33"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ae5f4161f18c61806f411a13b0310bea87f987c7d2ecdbdaad0e94eb2e404238"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b2b0a0c0517616b6869869f8c581d4eb2dd83a4d79e0ebcb7d373ef9956aeb0a"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:45485e01ff4d3630ec0d9617310448a8702f70e9c01906b0d0118bdf9d124cf2"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:eb00ed941194665c332bf8e078baf037d6c35d7c4f3102ea2d4f16ca94a26dc8"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:2127566c664442652f024c837091890cb1942c30937add288223dc895793f898"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:a50aebfa173e157099939b17f18600f72f84eed3049e743b68ad15bd69b6bf99"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:4d0d1650369165a14e14e1e47b372cfcb31d6ab44e6e33cb2d4e57265290044d"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:923c0c831b7cfcb071580d3f46c4baf50f174be571576556269530f4bbd79d04"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:06a81e93cd441c56a9b65d8e1d043daeb97a3d0856d177d5c90ba85acb3db087"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-win32.whl", hash = "sha256:6ef1d82a3af9d3eecdba2321dc1b3c238245d890843e040e41e470ffa64c3e25"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-win_amd64.whl", hash = "sha256:eb8821e09e916165e160797a6c17edda0679379a4be5c716c260e836e122f54b"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:c235ebd9baae02f1b77bcea61bce332cb4331dc3617d254df3323aa01ab47bd4"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:5b4c145409bef602a690e7cfad0a15a55c13320ff7a3ad7ca59c13bb8ba4d45d"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:68d1f8a9e9e37c1223b656399be5d6b448dea850bed7d0f87a8311f1ff3dabb0"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:22afcb9f253dac0696b5a4be4a1c0f8762f8239e21b99680099abd9b2b1b2269"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e27ad930a842b4c5eb8ac0016b0a54f5aebbe679340c26101df33424142c143c"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1f79682fbe303db92bc2b1136016a38a42e835d932bab5b3b1bfcfbf0640e519"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b261ccdec7821281dade748d088bb6e9b69e6d15b30652b74cbbac25e280b796"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:122c7fa62b130ed55f8f285bfd56d5f4b4a5b503609d181f9ad85e55c89f4185"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:d0eccceffcb53201b5bfebb52600a5fb483a20b61da9dbc885f8b103cbe7598c"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:9f96df6923e21816da7e0ad3fd47dd8f94b2a5ce594e00677c0013018b813458"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:7f04c839ed0b6b98b1a7501a002144b76c18fb1c1850c8b98d458ac269e26ed2"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:34d1c8da1e78d2e001f363791c98a272bb734000fcef47a491c1e3b0505657a8"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:ff8fa367d09b717b2a17a052544193ad76cd49979c805768879cb63d9ca50561"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-win32.whl", hash = "sha256:aed38f6e4fb3f5d6bf81bfa990a07806be9d83cf7bacef998ab1a9bd660a581f"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-win_amd64.whl", hash = "sha256:b01b88d45a6fcb69667cd6d2f7a9aeb4bf53760d7fc536bf679ec94fe9f3ff3d"}, - {file = "charset_normalizer-3.3.2-py3-none-any.whl", hash = "sha256:3e4d1f6587322d2788836a99c69062fbb091331ec940e02d12d179c1d53e25fc"}, + {file = "charset_normalizer-3.4.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:4f9fc98dad6c2eaa32fc3af1417d95b5e3d08aff968df0cd320066def971f9a6"}, + {file = "charset_normalizer-3.4.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0de7b687289d3c1b3e8660d0741874abe7888100efe14bd0f9fd7141bcbda92b"}, + {file = "charset_normalizer-3.4.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:5ed2e36c3e9b4f21dd9422f6893dec0abf2cca553af509b10cd630f878d3eb99"}, + {file = "charset_normalizer-3.4.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:40d3ff7fc90b98c637bda91c89d51264a3dcf210cade3a2c6f838c7268d7a4ca"}, + {file = "charset_normalizer-3.4.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1110e22af8ca26b90bd6364fe4c763329b0ebf1ee213ba32b68c73de5752323d"}, + {file = "charset_normalizer-3.4.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:86f4e8cca779080f66ff4f191a685ced73d2f72d50216f7112185dc02b90b9b7"}, + {file = "charset_normalizer-3.4.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7f683ddc7eedd742e2889d2bfb96d69573fde1d92fcb811979cdb7165bb9c7d3"}, + {file = "charset_normalizer-3.4.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:27623ba66c183eca01bf9ff833875b459cad267aeeb044477fedac35e19ba907"}, + {file = "charset_normalizer-3.4.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:f606a1881d2663630ea5b8ce2efe2111740df4b687bd78b34a8131baa007f79b"}, + {file = "charset_normalizer-3.4.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:0b309d1747110feb25d7ed6b01afdec269c647d382c857ef4663bbe6ad95a912"}, + {file = "charset_normalizer-3.4.0-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:136815f06a3ae311fae551c3df1f998a1ebd01ddd424aa5603a4336997629e95"}, + {file = "charset_normalizer-3.4.0-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:14215b71a762336254351b00ec720a8e85cada43b987da5a042e4ce3e82bd68e"}, + {file = "charset_normalizer-3.4.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:79983512b108e4a164b9c8d34de3992f76d48cadc9554c9e60b43f308988aabe"}, + {file = "charset_normalizer-3.4.0-cp310-cp310-win32.whl", hash = "sha256:c94057af19bc953643a33581844649a7fdab902624d2eb739738a30e2b3e60fc"}, + {file = "charset_normalizer-3.4.0-cp310-cp310-win_amd64.whl", hash = "sha256:55f56e2ebd4e3bc50442fbc0888c9d8c94e4e06a933804e2af3e89e2f9c1c749"}, + {file = "charset_normalizer-3.4.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:0d99dd8ff461990f12d6e42c7347fd9ab2532fb70e9621ba520f9e8637161d7c"}, + {file = "charset_normalizer-3.4.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:c57516e58fd17d03ebe67e181a4e4e2ccab1168f8c2976c6a334d4f819fe5944"}, + {file = "charset_normalizer-3.4.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:6dba5d19c4dfab08e58d5b36304b3f92f3bd5d42c1a3fa37b5ba5cdf6dfcbcee"}, + {file = "charset_normalizer-3.4.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bf4475b82be41b07cc5e5ff94810e6a01f276e37c2d55571e3fe175e467a1a1c"}, + {file = "charset_normalizer-3.4.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ce031db0408e487fd2775d745ce30a7cd2923667cf3b69d48d219f1d8f5ddeb6"}, + {file = "charset_normalizer-3.4.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8ff4e7cdfdb1ab5698e675ca622e72d58a6fa2a8aa58195de0c0061288e6e3ea"}, + {file = "charset_normalizer-3.4.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3710a9751938947e6327ea9f3ea6332a09bf0ba0c09cae9cb1f250bd1f1549bc"}, + {file = "charset_normalizer-3.4.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:82357d85de703176b5587dbe6ade8ff67f9f69a41c0733cf2425378b49954de5"}, + {file = "charset_normalizer-3.4.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:47334db71978b23ebcf3c0f9f5ee98b8d65992b65c9c4f2d34c2eaf5bcaf0594"}, + {file = "charset_normalizer-3.4.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:8ce7fd6767a1cc5a92a639b391891bf1c268b03ec7e021c7d6d902285259685c"}, + {file = "charset_normalizer-3.4.0-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:f1a2f519ae173b5b6a2c9d5fa3116ce16e48b3462c8b96dfdded11055e3d6365"}, + {file = "charset_normalizer-3.4.0-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:63bc5c4ae26e4bc6be6469943b8253c0fd4e4186c43ad46e713ea61a0ba49129"}, + {file = "charset_normalizer-3.4.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:bcb4f8ea87d03bc51ad04add8ceaf9b0f085ac045ab4d74e73bbc2dc033f0236"}, + {file = "charset_normalizer-3.4.0-cp311-cp311-win32.whl", hash = "sha256:9ae4ef0b3f6b41bad6366fb0ea4fc1d7ed051528e113a60fa2a65a9abb5b1d99"}, + {file = "charset_normalizer-3.4.0-cp311-cp311-win_amd64.whl", hash = "sha256:cee4373f4d3ad28f1ab6290684d8e2ebdb9e7a1b74fdc39e4c211995f77bec27"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:0713f3adb9d03d49d365b70b84775d0a0d18e4ab08d12bc46baa6132ba78aaf6"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:de7376c29d95d6719048c194a9cf1a1b0393fbe8488a22008610b0361d834ecf"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:4a51b48f42d9358460b78725283f04bddaf44a9358197b889657deba38f329db"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b295729485b06c1a0683af02a9e42d2caa9db04a373dc38a6a58cdd1e8abddf1"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ee803480535c44e7f5ad00788526da7d85525cfefaf8acf8ab9a310000be4b03"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3d59d125ffbd6d552765510e3f31ed75ebac2c7470c7274195b9161a32350284"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8cda06946eac330cbe6598f77bb54e690b4ca93f593dee1568ad22b04f347c15"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:07afec21bbbbf8a5cc3651aa96b980afe2526e7f048fdfb7f1014d84acc8b6d8"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:6b40e8d38afe634559e398cc32b1472f376a4099c75fe6299ae607e404c033b2"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:b8dcd239c743aa2f9c22ce674a145e0a25cb1566c495928440a181ca1ccf6719"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:84450ba661fb96e9fd67629b93d2941c871ca86fc38d835d19d4225ff946a631"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:44aeb140295a2f0659e113b31cfe92c9061622cadbc9e2a2f7b8ef6b1e29ef4b"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:1db4e7fefefd0f548d73e2e2e041f9df5c59e178b4c72fbac4cc6f535cfb1565"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-win32.whl", hash = "sha256:5726cf76c982532c1863fb64d8c6dd0e4c90b6ece9feb06c9f202417a31f7dd7"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-win_amd64.whl", hash = "sha256:b197e7094f232959f8f20541ead1d9862ac5ebea1d58e9849c1bf979255dfac9"}, + {file = "charset_normalizer-3.4.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:dd4eda173a9fcccb5f2e2bd2a9f423d180194b1bf17cf59e3269899235b2a114"}, + {file = "charset_normalizer-3.4.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:e9e3c4c9e1ed40ea53acf11e2a386383c3304212c965773704e4603d589343ed"}, + {file = "charset_normalizer-3.4.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:92a7e36b000bf022ef3dbb9c46bfe2d52c047d5e3f3343f43204263c5addc250"}, + {file = "charset_normalizer-3.4.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:54b6a92d009cbe2fb11054ba694bc9e284dad30a26757b1e372a1fdddaf21920"}, + {file = "charset_normalizer-3.4.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1ffd9493de4c922f2a38c2bf62b831dcec90ac673ed1ca182fe11b4d8e9f2a64"}, + {file = "charset_normalizer-3.4.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:35c404d74c2926d0287fbd63ed5d27eb911eb9e4a3bb2c6d294f3cfd4a9e0c23"}, + {file = "charset_normalizer-3.4.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4796efc4faf6b53a18e3d46343535caed491776a22af773f366534056c4e1fbc"}, + {file = "charset_normalizer-3.4.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e7fdd52961feb4c96507aa649550ec2a0d527c086d284749b2f582f2d40a2e0d"}, + {file = "charset_normalizer-3.4.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:92db3c28b5b2a273346bebb24857fda45601aef6ae1c011c0a997106581e8a88"}, + {file = "charset_normalizer-3.4.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:ab973df98fc99ab39080bfb0eb3a925181454d7c3ac8a1e695fddfae696d9e90"}, + {file = "charset_normalizer-3.4.0-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:4b67fdab07fdd3c10bb21edab3cbfe8cf5696f453afce75d815d9d7223fbe88b"}, + {file = "charset_normalizer-3.4.0-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:aa41e526a5d4a9dfcfbab0716c7e8a1b215abd3f3df5a45cf18a12721d31cb5d"}, + {file = "charset_normalizer-3.4.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:ffc519621dce0c767e96b9c53f09c5d215578e10b02c285809f76509a3931482"}, + {file = "charset_normalizer-3.4.0-cp313-cp313-win32.whl", hash = "sha256:f19c1585933c82098c2a520f8ec1227f20e339e33aca8fa6f956f6691b784e67"}, + {file = "charset_normalizer-3.4.0-cp313-cp313-win_amd64.whl", hash = "sha256:707b82d19e65c9bd28b81dde95249b07bf9f5b90ebe1ef17d9b57473f8a64b7b"}, + {file = "charset_normalizer-3.4.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:dbe03226baf438ac4fda9e2d0715022fd579cb641c4cf639fa40d53b2fe6f3e2"}, + {file = "charset_normalizer-3.4.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dd9a8bd8900e65504a305bf8ae6fa9fbc66de94178c420791d0293702fce2df7"}, + {file = "charset_normalizer-3.4.0-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b8831399554b92b72af5932cdbbd4ddc55c55f631bb13ff8fe4e6536a06c5c51"}, + {file = "charset_normalizer-3.4.0-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a14969b8691f7998e74663b77b4c36c0337cb1df552da83d5c9004a93afdb574"}, + {file = "charset_normalizer-3.4.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dcaf7c1524c0542ee2fc82cc8ec337f7a9f7edee2532421ab200d2b920fc97cf"}, + {file = "charset_normalizer-3.4.0-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:425c5f215d0eecee9a56cdb703203dda90423247421bf0d67125add85d0c4455"}, + {file = "charset_normalizer-3.4.0-cp37-cp37m-musllinux_1_2_aarch64.whl", hash = "sha256:d5b054862739d276e09928de37c79ddeec42a6e1bfc55863be96a36ba22926f6"}, + {file = "charset_normalizer-3.4.0-cp37-cp37m-musllinux_1_2_i686.whl", hash = "sha256:f3e73a4255342d4eb26ef6df01e3962e73aa29baa3124a8e824c5d3364a65748"}, + {file = "charset_normalizer-3.4.0-cp37-cp37m-musllinux_1_2_ppc64le.whl", hash = "sha256:2f6c34da58ea9c1a9515621f4d9ac379871a8f21168ba1b5e09d74250de5ad62"}, + {file = "charset_normalizer-3.4.0-cp37-cp37m-musllinux_1_2_s390x.whl", hash = "sha256:f09cb5a7bbe1ecae6e87901a2eb23e0256bb524a79ccc53eb0b7629fbe7677c4"}, + {file = "charset_normalizer-3.4.0-cp37-cp37m-musllinux_1_2_x86_64.whl", hash = "sha256:0099d79bdfcf5c1f0c2c72f91516702ebf8b0b8ddd8905f97a8aecf49712c621"}, + {file = "charset_normalizer-3.4.0-cp37-cp37m-win32.whl", hash = "sha256:9c98230f5042f4945f957d006edccc2af1e03ed5e37ce7c373f00a5a4daa6149"}, + {file = "charset_normalizer-3.4.0-cp37-cp37m-win_amd64.whl", hash = "sha256:62f60aebecfc7f4b82e3f639a7d1433a20ec32824db2199a11ad4f5e146ef5ee"}, + {file = "charset_normalizer-3.4.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:af73657b7a68211996527dbfeffbb0864e043d270580c5aef06dc4b659a4b578"}, + {file = "charset_normalizer-3.4.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:cab5d0b79d987c67f3b9e9c53f54a61360422a5a0bc075f43cab5621d530c3b6"}, + {file = "charset_normalizer-3.4.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:9289fd5dddcf57bab41d044f1756550f9e7cf0c8e373b8cdf0ce8773dc4bd417"}, + {file = "charset_normalizer-3.4.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6b493a043635eb376e50eedf7818f2f322eabbaa974e948bd8bdd29eb7ef2a51"}, + {file = "charset_normalizer-3.4.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9fa2566ca27d67c86569e8c85297aaf413ffab85a8960500f12ea34ff98e4c41"}, + {file = "charset_normalizer-3.4.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a8e538f46104c815be19c975572d74afb53f29650ea2025bbfaef359d2de2f7f"}, + {file = "charset_normalizer-3.4.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6fd30dc99682dc2c603c2b315bded2799019cea829f8bf57dc6b61efde6611c8"}, + {file = "charset_normalizer-3.4.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2006769bd1640bdf4d5641c69a3d63b71b81445473cac5ded39740a226fa88ab"}, + {file = "charset_normalizer-3.4.0-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:dc15e99b2d8a656f8e666854404f1ba54765871104e50c8e9813af8a7db07f12"}, + {file = "charset_normalizer-3.4.0-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:ab2e5bef076f5a235c3774b4f4028a680432cded7cad37bba0fd90d64b187d19"}, + {file = "charset_normalizer-3.4.0-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:4ec9dd88a5b71abfc74e9df5ebe7921c35cbb3b641181a531ca65cdb5e8e4dea"}, + {file = "charset_normalizer-3.4.0-cp38-cp38-musllinux_1_2_s390x.whl", hash = "sha256:43193c5cda5d612f247172016c4bb71251c784d7a4d9314677186a838ad34858"}, + {file = "charset_normalizer-3.4.0-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:aa693779a8b50cd97570e5a0f343538a8dbd3e496fa5dcb87e29406ad0299654"}, + {file = "charset_normalizer-3.4.0-cp38-cp38-win32.whl", hash = "sha256:7706f5850360ac01d80c89bcef1640683cc12ed87f42579dab6c5d3ed6888613"}, + {file = "charset_normalizer-3.4.0-cp38-cp38-win_amd64.whl", hash = "sha256:c3e446d253bd88f6377260d07c895816ebf33ffffd56c1c792b13bff9c3e1ade"}, + {file = "charset_normalizer-3.4.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:980b4f289d1d90ca5efcf07958d3eb38ed9c0b7676bf2831a54d4f66f9c27dfa"}, + {file = "charset_normalizer-3.4.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:f28f891ccd15c514a0981f3b9db9aa23d62fe1a99997512b0491d2ed323d229a"}, + {file = "charset_normalizer-3.4.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:a8aacce6e2e1edcb6ac625fb0f8c3a9570ccc7bfba1f63419b3769ccf6a00ed0"}, + {file = "charset_normalizer-3.4.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bd7af3717683bea4c87acd8c0d3d5b44d56120b26fd3f8a692bdd2d5260c620a"}, + {file = "charset_normalizer-3.4.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5ff2ed8194587faf56555927b3aa10e6fb69d931e33953943bc4f837dfee2242"}, + {file = "charset_normalizer-3.4.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e91f541a85298cf35433bf66f3fab2a4a2cff05c127eeca4af174f6d497f0d4b"}, + {file = "charset_normalizer-3.4.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:309a7de0a0ff3040acaebb35ec45d18db4b28232f21998851cfa709eeff49d62"}, + {file = "charset_normalizer-3.4.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:285e96d9d53422efc0d7a17c60e59f37fbf3dfa942073f666db4ac71e8d726d0"}, + {file = "charset_normalizer-3.4.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:5d447056e2ca60382d460a604b6302d8db69476fd2015c81e7c35417cfabe4cd"}, + {file = "charset_normalizer-3.4.0-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:20587d20f557fe189b7947d8e7ec5afa110ccf72a3128d61a2a387c3313f46be"}, + {file = "charset_normalizer-3.4.0-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:130272c698667a982a5d0e626851ceff662565379baf0ff2cc58067b81d4f11d"}, + {file = "charset_normalizer-3.4.0-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:ab22fbd9765e6954bc0bcff24c25ff71dcbfdb185fcdaca49e81bac68fe724d3"}, + {file = "charset_normalizer-3.4.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:7782afc9b6b42200f7362858f9e73b1f8316afb276d316336c0ec3bd73312742"}, + {file = "charset_normalizer-3.4.0-cp39-cp39-win32.whl", hash = "sha256:2de62e8801ddfff069cd5c504ce3bc9672b23266597d4e4f50eda28846c322f2"}, + {file = "charset_normalizer-3.4.0-cp39-cp39-win_amd64.whl", hash = "sha256:95c3c157765b031331dd4db3c775e58deaee050a3042fcad72cbc4189d7c8dca"}, + {file = "charset_normalizer-3.4.0-py3-none-any.whl", hash = "sha256:fe9f97feb71aa9896b81973a7bbada8c49501dc73e58a10fcef6663af95e5079"}, + {file = "charset_normalizer-3.4.0.tar.gz", hash = "sha256:223217c3d4f82c3ac5e29032b3f1c2eb0fb591b72161f86d93f5719079dae93e"}, ] [[package]] @@ -184,13 +199,13 @@ files = [ [[package]] name = "dill" -version = "0.3.8" +version = "0.3.9" description = "serialize all of Python" optional = false python-versions = ">=3.8" files = [ - {file = "dill-0.3.8-py3-none-any.whl", hash = "sha256:c36ca9ffb54365bdd2f8eb3eff7d2a21237f8452b57ace88b1ac615b7e815bd7"}, - {file = "dill-0.3.8.tar.gz", hash = "sha256:3ebe3c479ad625c4553aca177444d89b486b1d84982eeacded644afc0cf797ca"}, + {file = "dill-0.3.9-py3-none-any.whl", hash = "sha256:468dff3b89520b474c0397703366b7b95eebe6303f108adf9b19da1f702be87a"}, + {file = "dill-0.3.9.tar.gz", hash = "sha256:81aa267dddf68cbfe8029c42ca9ec6a4ab3b22371d1c450abc54422577b4512c"}, ] [package.extras] @@ -261,13 +276,13 @@ files = [ [[package]] name = "httpcore" -version = "1.0.5" +version = "1.0.7" description = "A minimal low-level HTTP client." optional = false python-versions = ">=3.8" files = [ - {file = "httpcore-1.0.5-py3-none-any.whl", hash = "sha256:421f18bac248b25d310f3cacd198d55b8e6125c107797b609ff9b7a6ba7991b5"}, - {file = "httpcore-1.0.5.tar.gz", hash = "sha256:34a38e2f9291467ee3b44e89dd52615370e152954ba21721378a87b2960f7a61"}, + {file = "httpcore-1.0.7-py3-none-any.whl", hash = "sha256:a3fff8f43dc260d5bd363d9f9cf1830fa3a458b332856f34282de498ed420edd"}, + {file = "httpcore-1.0.7.tar.gz", hash = "sha256:8551cb62a169ec7162ac7be8d4817d561f60e08eaa485234898414bb5a8a0b4c"}, ] [package.dependencies] @@ -278,7 +293,7 @@ h11 = ">=0.13,<0.15" asyncio = ["anyio (>=4.0,<5.0)"] http2 = ["h2 (>=3,<5)"] socks = ["socksio (==1.*)"] -trio = ["trio (>=0.22.0,<0.26.0)"] +trio = ["trio (>=0.22.0,<1.0)"] [[package]] name = "httpx" @@ -368,47 +383,53 @@ files = [ [[package]] name = "mypy" -version = "1.10.1" +version = "1.13.0" description = "Optional static typing for Python" optional = false python-versions = ">=3.8" files = [ - {file = "mypy-1.10.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:e36f229acfe250dc660790840916eb49726c928e8ce10fbdf90715090fe4ae02"}, - {file = "mypy-1.10.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:51a46974340baaa4145363b9e051812a2446cf583dfaeba124af966fa44593f7"}, - {file = "mypy-1.10.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:901c89c2d67bba57aaaca91ccdb659aa3a312de67f23b9dfb059727cce2e2e0a"}, - {file = "mypy-1.10.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:0cd62192a4a32b77ceb31272d9e74d23cd88c8060c34d1d3622db3267679a5d9"}, - {file = "mypy-1.10.1-cp310-cp310-win_amd64.whl", hash = "sha256:a2cbc68cb9e943ac0814c13e2452d2046c2f2b23ff0278e26599224cf164e78d"}, - {file = "mypy-1.10.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:bd6f629b67bb43dc0d9211ee98b96d8dabc97b1ad38b9b25f5e4c4d7569a0c6a"}, - {file = "mypy-1.10.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:a1bbb3a6f5ff319d2b9d40b4080d46cd639abe3516d5a62c070cf0114a457d84"}, - {file = "mypy-1.10.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b8edd4e9bbbc9d7b79502eb9592cab808585516ae1bcc1446eb9122656c6066f"}, - {file = "mypy-1.10.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:6166a88b15f1759f94a46fa474c7b1b05d134b1b61fca627dd7335454cc9aa6b"}, - {file = "mypy-1.10.1-cp311-cp311-win_amd64.whl", hash = "sha256:5bb9cd11c01c8606a9d0b83ffa91d0b236a0e91bc4126d9ba9ce62906ada868e"}, - {file = "mypy-1.10.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:d8681909f7b44d0b7b86e653ca152d6dff0eb5eb41694e163c6092124f8246d7"}, - {file = "mypy-1.10.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:378c03f53f10bbdd55ca94e46ec3ba255279706a6aacaecac52ad248f98205d3"}, - {file = "mypy-1.10.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6bacf8f3a3d7d849f40ca6caea5c055122efe70e81480c8328ad29c55c69e93e"}, - {file = "mypy-1.10.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:701b5f71413f1e9855566a34d6e9d12624e9e0a8818a5704d74d6b0402e66c04"}, - {file = "mypy-1.10.1-cp312-cp312-win_amd64.whl", hash = "sha256:3c4c2992f6ea46ff7fce0072642cfb62af7a2484efe69017ed8b095f7b39ef31"}, - {file = "mypy-1.10.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:604282c886497645ffb87b8f35a57ec773a4a2721161e709a4422c1636ddde5c"}, - {file = "mypy-1.10.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:37fd87cab83f09842653f08de066ee68f1182b9b5282e4634cdb4b407266bade"}, - {file = "mypy-1.10.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8addf6313777dbb92e9564c5d32ec122bf2c6c39d683ea64de6a1fd98b90fe37"}, - {file = "mypy-1.10.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:5cc3ca0a244eb9a5249c7c583ad9a7e881aa5d7b73c35652296ddcdb33b2b9c7"}, - {file = "mypy-1.10.1-cp38-cp38-win_amd64.whl", hash = "sha256:1b3a2ffce52cc4dbaeee4df762f20a2905aa171ef157b82192f2e2f368eec05d"}, - {file = "mypy-1.10.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:fe85ed6836165d52ae8b88f99527d3d1b2362e0cb90b005409b8bed90e9059b3"}, - {file = "mypy-1.10.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:c2ae450d60d7d020d67ab440c6e3fae375809988119817214440033f26ddf7bf"}, - {file = "mypy-1.10.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6be84c06e6abd72f960ba9a71561c14137a583093ffcf9bbfaf5e613d63fa531"}, - {file = "mypy-1.10.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:2189ff1e39db399f08205e22a797383613ce1cb0cb3b13d8bcf0170e45b96cc3"}, - {file = "mypy-1.10.1-cp39-cp39-win_amd64.whl", hash = "sha256:97a131ee36ac37ce9581f4220311247ab6cba896b4395b9c87af0675a13a755f"}, - {file = "mypy-1.10.1-py3-none-any.whl", hash = "sha256:71d8ac0b906354ebda8ef1673e5fde785936ac1f29ff6987c7483cfbd5a4235a"}, - {file = "mypy-1.10.1.tar.gz", hash = "sha256:1f8f492d7db9e3593ef42d4f115f04e556130f2819ad33ab84551403e97dd4c0"}, + {file = "mypy-1.13.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:6607e0f1dd1fb7f0aca14d936d13fd19eba5e17e1cd2a14f808fa5f8f6d8f60a"}, + {file = "mypy-1.13.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:8a21be69bd26fa81b1f80a61ee7ab05b076c674d9b18fb56239d72e21d9f4c80"}, + {file = "mypy-1.13.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:7b2353a44d2179846a096e25691d54d59904559f4232519d420d64da6828a3a7"}, + {file = "mypy-1.13.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:0730d1c6a2739d4511dc4253f8274cdd140c55c32dfb0a4cf8b7a43f40abfa6f"}, + {file = "mypy-1.13.0-cp310-cp310-win_amd64.whl", hash = "sha256:c5fc54dbb712ff5e5a0fca797e6e0aa25726c7e72c6a5850cfd2adbc1eb0a372"}, + {file = "mypy-1.13.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:581665e6f3a8a9078f28d5502f4c334c0c8d802ef55ea0e7276a6e409bc0d82d"}, + {file = "mypy-1.13.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:3ddb5b9bf82e05cc9a627e84707b528e5c7caaa1c55c69e175abb15a761cec2d"}, + {file = "mypy-1.13.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:20c7ee0bc0d5a9595c46f38beb04201f2620065a93755704e141fcac9f59db2b"}, + {file = "mypy-1.13.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:3790ded76f0b34bc9c8ba4def8f919dd6a46db0f5a6610fb994fe8efdd447f73"}, + {file = "mypy-1.13.0-cp311-cp311-win_amd64.whl", hash = "sha256:51f869f4b6b538229c1d1bcc1dd7d119817206e2bc54e8e374b3dfa202defcca"}, + {file = "mypy-1.13.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:5c7051a3461ae84dfb5dd15eff5094640c61c5f22257c8b766794e6dd85e72d5"}, + {file = "mypy-1.13.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:39bb21c69a5d6342f4ce526e4584bc5c197fd20a60d14a8624d8743fffb9472e"}, + {file = "mypy-1.13.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:164f28cb9d6367439031f4c81e84d3ccaa1e19232d9d05d37cb0bd880d3f93c2"}, + {file = "mypy-1.13.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:a4c1bfcdbce96ff5d96fc9b08e3831acb30dc44ab02671eca5953eadad07d6d0"}, + {file = "mypy-1.13.0-cp312-cp312-win_amd64.whl", hash = "sha256:a0affb3a79a256b4183ba09811e3577c5163ed06685e4d4b46429a271ba174d2"}, + {file = "mypy-1.13.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:a7b44178c9760ce1a43f544e595d35ed61ac2c3de306599fa59b38a6048e1aa7"}, + {file = "mypy-1.13.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:5d5092efb8516d08440e36626f0153b5006d4088c1d663d88bf79625af3d1d62"}, + {file = "mypy-1.13.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:de2904956dac40ced10931ac967ae63c5089bd498542194b436eb097a9f77bc8"}, + {file = "mypy-1.13.0-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:7bfd8836970d33c2105562650656b6846149374dc8ed77d98424b40b09340ba7"}, + {file = "mypy-1.13.0-cp313-cp313-win_amd64.whl", hash = "sha256:9f73dba9ec77acb86457a8fc04b5239822df0c14a082564737833d2963677dbc"}, + {file = "mypy-1.13.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:100fac22ce82925f676a734af0db922ecfea991e1d7ec0ceb1e115ebe501301a"}, + {file = "mypy-1.13.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:7bcb0bb7f42a978bb323a7c88f1081d1b5dee77ca86f4100735a6f541299d8fb"}, + {file = "mypy-1.13.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:bde31fc887c213e223bbfc34328070996061b0833b0a4cfec53745ed61f3519b"}, + {file = "mypy-1.13.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:07de989f89786f62b937851295ed62e51774722e5444a27cecca993fc3f9cd74"}, + {file = "mypy-1.13.0-cp38-cp38-win_amd64.whl", hash = "sha256:4bde84334fbe19bad704b3f5b78c4abd35ff1026f8ba72b29de70dda0916beb6"}, + {file = "mypy-1.13.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:0246bcb1b5de7f08f2826451abd947bf656945209b140d16ed317f65a17dc7dc"}, + {file = "mypy-1.13.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:7f5b7deae912cf8b77e990b9280f170381fdfbddf61b4ef80927edd813163732"}, + {file = "mypy-1.13.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:7029881ec6ffb8bc233a4fa364736789582c738217b133f1b55967115288a2bc"}, + {file = "mypy-1.13.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:3e38b980e5681f28f033f3be86b099a247b13c491f14bb8b1e1e134d23bb599d"}, + {file = "mypy-1.13.0-cp39-cp39-win_amd64.whl", hash = "sha256:a6789be98a2017c912ae6ccb77ea553bbaf13d27605d2ca20a76dfbced631b24"}, + {file = "mypy-1.13.0-py3-none-any.whl", hash = "sha256:9c250883f9fd81d212e0952c92dbfcc96fc237f4b7c92f56ac81fd48460b3e5a"}, + {file = "mypy-1.13.0.tar.gz", hash = "sha256:0291a61b6fbf3e6673e3405cfcc0e7650bebc7939659fdca2702958038bd835e"}, ] [package.dependencies] mypy-extensions = ">=1.0.0" tomli = {version = ">=1.1.0", markers = "python_version < \"3.11\""} -typing-extensions = ">=4.1.0" +typing-extensions = ">=4.6.0" [package.extras] dmypy = ["psutil (>=4.0)"] +faster-cache = ["orjson"] install-types = ["pip"] mypyc = ["setuptools (>=50)"] reports = ["lxml"] @@ -426,24 +447,24 @@ files = [ [[package]] name = "packaging" -version = "24.1" +version = "24.2" description = "Core utilities for Python packages" optional = false python-versions = ">=3.8" files = [ - {file = "packaging-24.1-py3-none-any.whl", hash = "sha256:5b8f2217dbdbd2f7f384c41c628544e6d52f2d0f53c6d0c3ea61aa5d1d7ff124"}, - {file = "packaging-24.1.tar.gz", hash = "sha256:026ed72c8ed3fcce5bf8950572258698927fd1dbda10a5e981cdf0ac37f4f002"}, + {file = "packaging-24.2-py3-none-any.whl", hash = "sha256:09abb1bccd265c01f4a3aa3f7a7db064b36514d2cba19a2f694fe6150451a759"}, + {file = "packaging-24.2.tar.gz", hash = "sha256:c228a6dc5e932d346bc5739379109d49e8853dd8223571c7c5b55260edc0b97f"}, ] [[package]] name = "platformdirs" -version = "4.3.3" +version = "4.3.6" description = "A small Python package for determining appropriate platform-specific dirs, e.g. a `user data dir`." optional = false python-versions = ">=3.8" files = [ - {file = "platformdirs-4.3.3-py3-none-any.whl", hash = "sha256:50a5450e2e84f44539718293cbb1da0a0885c9d14adf21b77bae4e66fc99d9b5"}, - {file = "platformdirs-4.3.3.tar.gz", hash = "sha256:d4e0b7d8ec176b341fb03cb11ca12d0276faa8c485f9cd218f613840463fc2c0"}, + {file = "platformdirs-4.3.6-py3-none-any.whl", hash = "sha256:73e575e1408ab8103900836b97580d5307456908a03e92031bab39e4554cc3fb"}, + {file = "platformdirs-4.3.6.tar.gz", hash = "sha256:357fb2acbc885b0419afd3ce3ed34564c13c9b95c89360cd9563f73aa5e2b907"}, ] [package.extras] @@ -493,22 +514,19 @@ pyasn1 = ">=0.4.6,<0.7.0" [[package]] name = "pydantic" -version = "2.9.1" +version = "2.10.2" description = "Data validation using Python type hints" optional = false python-versions = ">=3.8" files = [ - {file = "pydantic-2.9.1-py3-none-any.whl", hash = "sha256:7aff4db5fdf3cf573d4b3c30926a510a10e19a0774d38fc4967f78beb6deb612"}, - {file = "pydantic-2.9.1.tar.gz", hash = "sha256:1363c7d975c7036df0db2b4a61f2e062fbc0aa5ab5f2772e0ffc7191a4f4bce2"}, + {file = "pydantic-2.10.2-py3-none-any.whl", hash = "sha256:cfb96e45951117c3024e6b67b25cdc33a3cb7b2fa62e239f7af1378358a1d99e"}, + {file = "pydantic-2.10.2.tar.gz", hash = "sha256:2bc2d7f17232e0841cbba4641e65ba1eb6fafb3a08de3a091ff3ce14a197c4fa"}, ] [package.dependencies] annotated-types = ">=0.6.0" -pydantic-core = "2.23.3" -typing-extensions = [ - {version = ">=4.6.1", markers = "python_version < \"3.13\""}, - {version = ">=4.12.2", markers = "python_version >= \"3.13\""}, -] +pydantic-core = "2.27.1" +typing-extensions = ">=4.12.2" [package.extras] email = ["email-validator (>=2.0.0)"] @@ -516,100 +534,111 @@ timezone = ["tzdata"] [[package]] name = "pydantic-core" -version = "2.23.3" +version = "2.27.1" description = "Core functionality for Pydantic validation and serialization" optional = false python-versions = ">=3.8" files = [ - {file = "pydantic_core-2.23.3-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:7f10a5d1b9281392f1bf507d16ac720e78285dfd635b05737c3911637601bae6"}, - {file = "pydantic_core-2.23.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:3c09a7885dd33ee8c65266e5aa7fb7e2f23d49d8043f089989726391dd7350c5"}, - {file = "pydantic_core-2.23.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6470b5a1ec4d1c2e9afe928c6cb37eb33381cab99292a708b8cb9aa89e62429b"}, - {file = "pydantic_core-2.23.3-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:9172d2088e27d9a185ea0a6c8cebe227a9139fd90295221d7d495944d2367700"}, - {file = "pydantic_core-2.23.3-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:86fc6c762ca7ac8fbbdff80d61b2c59fb6b7d144aa46e2d54d9e1b7b0e780e01"}, - {file = "pydantic_core-2.23.3-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f0cb80fd5c2df4898693aa841425ea1727b1b6d2167448253077d2a49003e0ed"}, - {file = "pydantic_core-2.23.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:03667cec5daf43ac4995cefa8aaf58f99de036204a37b889c24a80927b629cec"}, - {file = "pydantic_core-2.23.3-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:047531242f8e9c2db733599f1c612925de095e93c9cc0e599e96cf536aaf56ba"}, - {file = "pydantic_core-2.23.3-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:5499798317fff7f25dbef9347f4451b91ac2a4330c6669821c8202fd354c7bee"}, - {file = "pydantic_core-2.23.3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:bbb5e45eab7624440516ee3722a3044b83fff4c0372efe183fd6ba678ff681fe"}, - {file = "pydantic_core-2.23.3-cp310-none-win32.whl", hash = "sha256:8b5b3ed73abb147704a6e9f556d8c5cb078f8c095be4588e669d315e0d11893b"}, - {file = "pydantic_core-2.23.3-cp310-none-win_amd64.whl", hash = "sha256:2b603cde285322758a0279995b5796d64b63060bfbe214b50a3ca23b5cee3e83"}, - {file = "pydantic_core-2.23.3-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:c889fd87e1f1bbeb877c2ee56b63bb297de4636661cc9bbfcf4b34e5e925bc27"}, - {file = "pydantic_core-2.23.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:ea85bda3189fb27503af4c45273735bcde3dd31c1ab17d11f37b04877859ef45"}, - {file = "pydantic_core-2.23.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a7f7f72f721223f33d3dc98a791666ebc6a91fa023ce63733709f4894a7dc611"}, - {file = "pydantic_core-2.23.3-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:2b2b55b0448e9da68f56b696f313949cda1039e8ec7b5d294285335b53104b61"}, - {file = "pydantic_core-2.23.3-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c24574c7e92e2c56379706b9a3f07c1e0c7f2f87a41b6ee86653100c4ce343e5"}, - {file = "pydantic_core-2.23.3-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f2b05e6ccbee333a8f4b8f4d7c244fdb7a979e90977ad9c51ea31261e2085ce0"}, - {file = "pydantic_core-2.23.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e2c409ce1c219c091e47cb03feb3c4ed8c2b8e004efc940da0166aaee8f9d6c8"}, - {file = "pydantic_core-2.23.3-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d965e8b325f443ed3196db890d85dfebbb09f7384486a77461347f4adb1fa7f8"}, - {file = "pydantic_core-2.23.3-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:f56af3a420fb1ffaf43ece3ea09c2d27c444e7c40dcb7c6e7cf57aae764f2b48"}, - {file = "pydantic_core-2.23.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:5b01a078dd4f9a52494370af21aa52964e0a96d4862ac64ff7cea06e0f12d2c5"}, - {file = "pydantic_core-2.23.3-cp311-none-win32.whl", hash = "sha256:560e32f0df04ac69b3dd818f71339983f6d1f70eb99d4d1f8e9705fb6c34a5c1"}, - {file = "pydantic_core-2.23.3-cp311-none-win_amd64.whl", hash = "sha256:c744fa100fdea0d000d8bcddee95213d2de2e95b9c12be083370b2072333a0fa"}, - {file = "pydantic_core-2.23.3-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:e0ec50663feedf64d21bad0809f5857bac1ce91deded203efc4a84b31b2e4305"}, - {file = "pydantic_core-2.23.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:db6e6afcb95edbe6b357786684b71008499836e91f2a4a1e55b840955b341dbb"}, - {file = "pydantic_core-2.23.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:98ccd69edcf49f0875d86942f4418a4e83eb3047f20eb897bffa62a5d419c8fa"}, - {file = "pydantic_core-2.23.3-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:a678c1ac5c5ec5685af0133262103defb427114e62eafeda12f1357a12140162"}, - {file = "pydantic_core-2.23.3-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:01491d8b4d8db9f3391d93b0df60701e644ff0894352947f31fff3e52bd5c801"}, - {file = "pydantic_core-2.23.3-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:fcf31facf2796a2d3b7fe338fe8640aa0166e4e55b4cb108dbfd1058049bf4cb"}, - {file = "pydantic_core-2.23.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7200fd561fb3be06827340da066df4311d0b6b8eb0c2116a110be5245dceb326"}, - {file = "pydantic_core-2.23.3-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:dc1636770a809dee2bd44dd74b89cc80eb41172bcad8af75dd0bc182c2666d4c"}, - {file = "pydantic_core-2.23.3-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:67a5def279309f2e23014b608c4150b0c2d323bd7bccd27ff07b001c12c2415c"}, - {file = "pydantic_core-2.23.3-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:748bdf985014c6dd3e1e4cc3db90f1c3ecc7246ff5a3cd4ddab20c768b2f1dab"}, - {file = "pydantic_core-2.23.3-cp312-none-win32.whl", hash = "sha256:255ec6dcb899c115f1e2a64bc9ebc24cc0e3ab097775755244f77360d1f3c06c"}, - {file = "pydantic_core-2.23.3-cp312-none-win_amd64.whl", hash = "sha256:40b8441be16c1e940abebed83cd006ddb9e3737a279e339dbd6d31578b802f7b"}, - {file = "pydantic_core-2.23.3-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:6daaf5b1ba1369a22c8b050b643250e3e5efc6a78366d323294aee54953a4d5f"}, - {file = "pydantic_core-2.23.3-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:d015e63b985a78a3d4ccffd3bdf22b7c20b3bbd4b8227809b3e8e75bc37f9cb2"}, - {file = "pydantic_core-2.23.3-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a3fc572d9b5b5cfe13f8e8a6e26271d5d13f80173724b738557a8c7f3a8a3791"}, - {file = "pydantic_core-2.23.3-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:f6bd91345b5163ee7448bee201ed7dd601ca24f43f439109b0212e296eb5b423"}, - {file = "pydantic_core-2.23.3-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:fc379c73fd66606628b866f661e8785088afe2adaba78e6bbe80796baf708a63"}, - {file = "pydantic_core-2.23.3-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:fbdce4b47592f9e296e19ac31667daed8753c8367ebb34b9a9bd89dacaa299c9"}, - {file = "pydantic_core-2.23.3-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fc3cf31edf405a161a0adad83246568647c54404739b614b1ff43dad2b02e6d5"}, - {file = "pydantic_core-2.23.3-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:8e22b477bf90db71c156f89a55bfe4d25177b81fce4aa09294d9e805eec13855"}, - {file = "pydantic_core-2.23.3-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:0a0137ddf462575d9bce863c4c95bac3493ba8e22f8c28ca94634b4a1d3e2bb4"}, - {file = "pydantic_core-2.23.3-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:203171e48946c3164fe7691fc349c79241ff8f28306abd4cad5f4f75ed80bc8d"}, - {file = "pydantic_core-2.23.3-cp313-none-win32.whl", hash = "sha256:76bdab0de4acb3f119c2a4bff740e0c7dc2e6de7692774620f7452ce11ca76c8"}, - {file = "pydantic_core-2.23.3-cp313-none-win_amd64.whl", hash = "sha256:37ba321ac2a46100c578a92e9a6aa33afe9ec99ffa084424291d84e456f490c1"}, - {file = "pydantic_core-2.23.3-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:d063c6b9fed7d992bcbebfc9133f4c24b7a7f215d6b102f3e082b1117cddb72c"}, - {file = "pydantic_core-2.23.3-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:6cb968da9a0746a0cf521b2b5ef25fc5a0bee9b9a1a8214e0a1cfaea5be7e8a4"}, - {file = "pydantic_core-2.23.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:edbefe079a520c5984e30e1f1f29325054b59534729c25b874a16a5048028d16"}, - {file = "pydantic_core-2.23.3-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:cbaaf2ef20d282659093913da9d402108203f7cb5955020bd8d1ae5a2325d1c4"}, - {file = "pydantic_core-2.23.3-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:fb539d7e5dc4aac345846f290cf504d2fd3c1be26ac4e8b5e4c2b688069ff4cf"}, - {file = "pydantic_core-2.23.3-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7e6f33503c5495059148cc486867e1d24ca35df5fc064686e631e314d959ad5b"}, - {file = "pydantic_core-2.23.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:04b07490bc2f6f2717b10c3969e1b830f5720b632f8ae2f3b8b1542394c47a8e"}, - {file = "pydantic_core-2.23.3-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:03795b9e8a5d7fda05f3873efc3f59105e2dcff14231680296b87b80bb327295"}, - {file = "pydantic_core-2.23.3-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:c483dab0f14b8d3f0df0c6c18d70b21b086f74c87ab03c59250dbf6d3c89baba"}, - {file = "pydantic_core-2.23.3-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:8b2682038e255e94baf2c473dca914a7460069171ff5cdd4080be18ab8a7fd6e"}, - {file = "pydantic_core-2.23.3-cp38-none-win32.whl", hash = "sha256:f4a57db8966b3a1d1a350012839c6a0099f0898c56512dfade8a1fe5fb278710"}, - {file = "pydantic_core-2.23.3-cp38-none-win_amd64.whl", hash = "sha256:13dd45ba2561603681a2676ca56006d6dee94493f03d5cadc055d2055615c3ea"}, - {file = "pydantic_core-2.23.3-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:82da2f4703894134a9f000e24965df73cc103e31e8c31906cc1ee89fde72cbd8"}, - {file = "pydantic_core-2.23.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:dd9be0a42de08f4b58a3cc73a123f124f65c24698b95a54c1543065baca8cf0e"}, - {file = "pydantic_core-2.23.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:89b731f25c80830c76fdb13705c68fef6a2b6dc494402987c7ea9584fe189f5d"}, - {file = "pydantic_core-2.23.3-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:c6de1ec30c4bb94f3a69c9f5f2182baeda5b809f806676675e9ef6b8dc936f28"}, - {file = "pydantic_core-2.23.3-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:bb68b41c3fa64587412b104294b9cbb027509dc2f6958446c502638d481525ef"}, - {file = "pydantic_core-2.23.3-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1c3980f2843de5184656aab58698011b42763ccba11c4a8c35936c8dd6c7068c"}, - {file = "pydantic_core-2.23.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:94f85614f2cba13f62c3c6481716e4adeae48e1eaa7e8bac379b9d177d93947a"}, - {file = "pydantic_core-2.23.3-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:510b7fb0a86dc8f10a8bb43bd2f97beb63cffad1203071dc434dac26453955cd"}, - {file = "pydantic_core-2.23.3-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:1eba2f7ce3e30ee2170410e2171867ea73dbd692433b81a93758ab2de6c64835"}, - {file = "pydantic_core-2.23.3-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:4b259fd8409ab84b4041b7b3f24dcc41e4696f180b775961ca8142b5b21d0e70"}, - {file = "pydantic_core-2.23.3-cp39-none-win32.whl", hash = "sha256:40d9bd259538dba2f40963286009bf7caf18b5112b19d2b55b09c14dde6db6a7"}, - {file = "pydantic_core-2.23.3-cp39-none-win_amd64.whl", hash = "sha256:5a8cd3074a98ee70173a8633ad3c10e00dcb991ecec57263aacb4095c5efb958"}, - {file = "pydantic_core-2.23.3-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:f399e8657c67313476a121a6944311fab377085ca7f490648c9af97fc732732d"}, - {file = "pydantic_core-2.23.3-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:6b5547d098c76e1694ba85f05b595720d7c60d342f24d5aad32c3049131fa5c4"}, - {file = "pydantic_core-2.23.3-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0dda0290a6f608504882d9f7650975b4651ff91c85673341789a476b1159f211"}, - {file = "pydantic_core-2.23.3-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:65b6e5da855e9c55a0c67f4db8a492bf13d8d3316a59999cfbaf98cc6e401961"}, - {file = "pydantic_core-2.23.3-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:09e926397f392059ce0afdcac920df29d9c833256354d0c55f1584b0b70cf07e"}, - {file = "pydantic_core-2.23.3-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:87cfa0ed6b8c5bd6ae8b66de941cece179281239d482f363814d2b986b79cedc"}, - {file = "pydantic_core-2.23.3-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:e61328920154b6a44d98cabcb709f10e8b74276bc709c9a513a8c37a18786cc4"}, - {file = "pydantic_core-2.23.3-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:ce3317d155628301d649fe5e16a99528d5680af4ec7aa70b90b8dacd2d725c9b"}, - {file = "pydantic_core-2.23.3-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:e89513f014c6be0d17b00a9a7c81b1c426f4eb9224b15433f3d98c1a071f8433"}, - {file = "pydantic_core-2.23.3-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:4f62c1c953d7ee375df5eb2e44ad50ce2f5aff931723b398b8bc6f0ac159791a"}, - {file = "pydantic_core-2.23.3-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2718443bc671c7ac331de4eef9b673063b10af32a0bb385019ad61dcf2cc8f6c"}, - {file = "pydantic_core-2.23.3-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a0d90e08b2727c5d01af1b5ef4121d2f0c99fbee692c762f4d9d0409c9da6541"}, - {file = "pydantic_core-2.23.3-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2b676583fc459c64146debea14ba3af54e540b61762dfc0613dc4e98c3f66eeb"}, - {file = "pydantic_core-2.23.3-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:50e4661f3337977740fdbfbae084ae5693e505ca2b3130a6d4eb0f2281dc43b8"}, - {file = "pydantic_core-2.23.3-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:68f4cf373f0de6abfe599a38307f4417c1c867ca381c03df27c873a9069cda25"}, - {file = "pydantic_core-2.23.3-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:59d52cf01854cb26c46958552a21acb10dd78a52aa34c86f284e66b209db8cab"}, - {file = "pydantic_core-2.23.3.tar.gz", hash = "sha256:3cb0f65d8b4121c1b015c60104a685feb929a29d7cf204387c7f2688c7974690"}, + {file = "pydantic_core-2.27.1-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:71a5e35c75c021aaf400ac048dacc855f000bdfed91614b4a726f7432f1f3d6a"}, + {file = "pydantic_core-2.27.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:f82d068a2d6ecfc6e054726080af69a6764a10015467d7d7b9f66d6ed5afa23b"}, + {file = "pydantic_core-2.27.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:121ceb0e822f79163dd4699e4c54f5ad38b157084d97b34de8b232bcaad70278"}, + {file = "pydantic_core-2.27.1-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:4603137322c18eaf2e06a4495f426aa8d8388940f3c457e7548145011bb68e05"}, + {file = "pydantic_core-2.27.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a33cd6ad9017bbeaa9ed78a2e0752c5e250eafb9534f308e7a5f7849b0b1bfb4"}, + {file = "pydantic_core-2.27.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:15cc53a3179ba0fcefe1e3ae50beb2784dede4003ad2dfd24f81bba4b23a454f"}, + {file = "pydantic_core-2.27.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:45d9c5eb9273aa50999ad6adc6be5e0ecea7e09dbd0d31bd0c65a55a2592ca08"}, + {file = "pydantic_core-2.27.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:8bf7b66ce12a2ac52d16f776b31d16d91033150266eb796967a7e4621707e4f6"}, + {file = "pydantic_core-2.27.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:655d7dd86f26cb15ce8a431036f66ce0318648f8853d709b4167786ec2fa4807"}, + {file = "pydantic_core-2.27.1-cp310-cp310-musllinux_1_1_armv7l.whl", hash = "sha256:5556470f1a2157031e676f776c2bc20acd34c1990ca5f7e56f1ebf938b9ab57c"}, + {file = "pydantic_core-2.27.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:f69ed81ab24d5a3bd93861c8c4436f54afdf8e8cc421562b0c7504cf3be58206"}, + {file = "pydantic_core-2.27.1-cp310-none-win32.whl", hash = "sha256:f5a823165e6d04ccea61a9f0576f345f8ce40ed533013580e087bd4d7442b52c"}, + {file = "pydantic_core-2.27.1-cp310-none-win_amd64.whl", hash = "sha256:57866a76e0b3823e0b56692d1a0bf722bffb324839bb5b7226a7dbd6c9a40b17"}, + {file = "pydantic_core-2.27.1-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:ac3b20653bdbe160febbea8aa6c079d3df19310d50ac314911ed8cc4eb7f8cb8"}, + {file = "pydantic_core-2.27.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:a5a8e19d7c707c4cadb8c18f5f60c843052ae83c20fa7d44f41594c644a1d330"}, + {file = "pydantic_core-2.27.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7f7059ca8d64fea7f238994c97d91f75965216bcbe5f695bb44f354893f11d52"}, + {file = "pydantic_core-2.27.1-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:bed0f8a0eeea9fb72937ba118f9db0cb7e90773462af7962d382445f3005e5a4"}, + {file = "pydantic_core-2.27.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a3cb37038123447cf0f3ea4c74751f6a9d7afef0eb71aa07bf5f652b5e6a132c"}, + {file = "pydantic_core-2.27.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:84286494f6c5d05243456e04223d5a9417d7f443c3b76065e75001beb26f88de"}, + {file = "pydantic_core-2.27.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:acc07b2cfc5b835444b44a9956846b578d27beeacd4b52e45489e93276241025"}, + {file = "pydantic_core-2.27.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:4fefee876e07a6e9aad7a8c8c9f85b0cdbe7df52b8a9552307b09050f7512c7e"}, + {file = "pydantic_core-2.27.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:258c57abf1188926c774a4c94dd29237e77eda19462e5bb901d88adcab6af919"}, + {file = "pydantic_core-2.27.1-cp311-cp311-musllinux_1_1_armv7l.whl", hash = "sha256:35c14ac45fcfdf7167ca76cc80b2001205a8d5d16d80524e13508371fb8cdd9c"}, + {file = "pydantic_core-2.27.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:d1b26e1dff225c31897696cab7d4f0a315d4c0d9e8666dbffdb28216f3b17fdc"}, + {file = "pydantic_core-2.27.1-cp311-none-win32.whl", hash = "sha256:2cdf7d86886bc6982354862204ae3b2f7f96f21a3eb0ba5ca0ac42c7b38598b9"}, + {file = "pydantic_core-2.27.1-cp311-none-win_amd64.whl", hash = "sha256:3af385b0cee8df3746c3f406f38bcbfdc9041b5c2d5ce3e5fc6637256e60bbc5"}, + {file = "pydantic_core-2.27.1-cp311-none-win_arm64.whl", hash = "sha256:81f2ec23ddc1b476ff96563f2e8d723830b06dceae348ce02914a37cb4e74b89"}, + {file = "pydantic_core-2.27.1-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:9cbd94fc661d2bab2bc702cddd2d3370bbdcc4cd0f8f57488a81bcce90c7a54f"}, + {file = "pydantic_core-2.27.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:5f8c4718cd44ec1580e180cb739713ecda2bdee1341084c1467802a417fe0f02"}, + {file = "pydantic_core-2.27.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:15aae984e46de8d376df515f00450d1522077254ef6b7ce189b38ecee7c9677c"}, + {file = "pydantic_core-2.27.1-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:1ba5e3963344ff25fc8c40da90f44b0afca8cfd89d12964feb79ac1411a260ac"}, + {file = "pydantic_core-2.27.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:992cea5f4f3b29d6b4f7f1726ed8ee46c8331c6b4eed6db5b40134c6fe1768bb"}, + {file = "pydantic_core-2.27.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0325336f348dbee6550d129b1627cb8f5351a9dc91aad141ffb96d4937bd9529"}, + {file = "pydantic_core-2.27.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7597c07fbd11515f654d6ece3d0e4e5093edc30a436c63142d9a4b8e22f19c35"}, + {file = "pydantic_core-2.27.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:3bbd5d8cc692616d5ef6fbbbd50dbec142c7e6ad9beb66b78a96e9c16729b089"}, + {file = "pydantic_core-2.27.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:dc61505e73298a84a2f317255fcc72b710b72980f3a1f670447a21efc88f8381"}, + {file = "pydantic_core-2.27.1-cp312-cp312-musllinux_1_1_armv7l.whl", hash = "sha256:e1f735dc43da318cad19b4173dd1ffce1d84aafd6c9b782b3abc04a0d5a6f5bb"}, + {file = "pydantic_core-2.27.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:f4e5658dbffe8843a0f12366a4c2d1c316dbe09bb4dfbdc9d2d9cd6031de8aae"}, + {file = "pydantic_core-2.27.1-cp312-none-win32.whl", hash = "sha256:672ebbe820bb37988c4d136eca2652ee114992d5d41c7e4858cdd90ea94ffe5c"}, + {file = "pydantic_core-2.27.1-cp312-none-win_amd64.whl", hash = "sha256:66ff044fd0bb1768688aecbe28b6190f6e799349221fb0de0e6f4048eca14c16"}, + {file = "pydantic_core-2.27.1-cp312-none-win_arm64.whl", hash = "sha256:9a3b0793b1bbfd4146304e23d90045f2a9b5fd5823aa682665fbdaf2a6c28f3e"}, + {file = "pydantic_core-2.27.1-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:f216dbce0e60e4d03e0c4353c7023b202d95cbaeff12e5fd2e82ea0a66905073"}, + {file = "pydantic_core-2.27.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:a2e02889071850bbfd36b56fd6bc98945e23670773bc7a76657e90e6b6603c08"}, + {file = "pydantic_core-2.27.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:42b0e23f119b2b456d07ca91b307ae167cc3f6c846a7b169fca5326e32fdc6cf"}, + {file = "pydantic_core-2.27.1-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:764be71193f87d460a03f1f7385a82e226639732214b402f9aa61f0d025f0737"}, + {file = "pydantic_core-2.27.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1c00666a3bd2f84920a4e94434f5974d7bbc57e461318d6bb34ce9cdbbc1f6b2"}, + {file = "pydantic_core-2.27.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3ccaa88b24eebc0f849ce0a4d09e8a408ec5a94afff395eb69baf868f5183107"}, + {file = "pydantic_core-2.27.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c65af9088ac534313e1963443d0ec360bb2b9cba6c2909478d22c2e363d98a51"}, + {file = "pydantic_core-2.27.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:206b5cf6f0c513baffaeae7bd817717140770c74528f3e4c3e1cec7871ddd61a"}, + {file = "pydantic_core-2.27.1-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:062f60e512fc7fff8b8a9d680ff0ddaaef0193dba9fa83e679c0c5f5fbd018bc"}, + {file = "pydantic_core-2.27.1-cp313-cp313-musllinux_1_1_armv7l.whl", hash = "sha256:a0697803ed7d4af5e4c1adf1670af078f8fcab7a86350e969f454daf598c4960"}, + {file = "pydantic_core-2.27.1-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:58ca98a950171f3151c603aeea9303ef6c235f692fe555e883591103da709b23"}, + {file = "pydantic_core-2.27.1-cp313-none-win32.whl", hash = "sha256:8065914ff79f7eab1599bd80406681f0ad08f8e47c880f17b416c9f8f7a26d05"}, + {file = "pydantic_core-2.27.1-cp313-none-win_amd64.whl", hash = "sha256:ba630d5e3db74c79300d9a5bdaaf6200172b107f263c98a0539eeecb857b2337"}, + {file = "pydantic_core-2.27.1-cp313-none-win_arm64.whl", hash = "sha256:45cf8588c066860b623cd11c4ba687f8d7175d5f7ef65f7129df8a394c502de5"}, + {file = "pydantic_core-2.27.1-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:5897bec80a09b4084aee23f9b73a9477a46c3304ad1d2d07acca19723fb1de62"}, + {file = "pydantic_core-2.27.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:d0165ab2914379bd56908c02294ed8405c252250668ebcb438a55494c69f44ab"}, + {file = "pydantic_core-2.27.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6b9af86e1d8e4cfc82c2022bfaa6f459381a50b94a29e95dcdda8442d6d83864"}, + {file = "pydantic_core-2.27.1-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:5f6c8a66741c5f5447e047ab0ba7a1c61d1e95580d64bce852e3df1f895c4067"}, + {file = "pydantic_core-2.27.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9a42d6a8156ff78981f8aa56eb6394114e0dedb217cf8b729f438f643608cbcd"}, + {file = "pydantic_core-2.27.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:64c65f40b4cd8b0e049a8edde07e38b476da7e3aaebe63287c899d2cff253fa5"}, + {file = "pydantic_core-2.27.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9fdcf339322a3fae5cbd504edcefddd5a50d9ee00d968696846f089b4432cf78"}, + {file = "pydantic_core-2.27.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:bf99c8404f008750c846cb4ac4667b798a9f7de673ff719d705d9b2d6de49c5f"}, + {file = "pydantic_core-2.27.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:8f1edcea27918d748c7e5e4d917297b2a0ab80cad10f86631e488b7cddf76a36"}, + {file = "pydantic_core-2.27.1-cp38-cp38-musllinux_1_1_armv7l.whl", hash = "sha256:159cac0a3d096f79ab6a44d77a961917219707e2a130739c64d4dd46281f5c2a"}, + {file = "pydantic_core-2.27.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:029d9757eb621cc6e1848fa0b0310310de7301057f623985698ed7ebb014391b"}, + {file = "pydantic_core-2.27.1-cp38-none-win32.whl", hash = "sha256:a28af0695a45f7060e6f9b7092558a928a28553366519f64083c63a44f70e618"}, + {file = "pydantic_core-2.27.1-cp38-none-win_amd64.whl", hash = "sha256:2d4567c850905d5eaaed2f7a404e61012a51caf288292e016360aa2b96ff38d4"}, + {file = "pydantic_core-2.27.1-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:e9386266798d64eeb19dd3677051f5705bf873e98e15897ddb7d76f477131967"}, + {file = "pydantic_core-2.27.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:4228b5b646caa73f119b1ae756216b59cc6e2267201c27d3912b592c5e323b60"}, + {file = "pydantic_core-2.27.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0b3dfe500de26c52abe0477dde16192ac39c98f05bf2d80e76102d394bd13854"}, + {file = "pydantic_core-2.27.1-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:aee66be87825cdf72ac64cb03ad4c15ffef4143dbf5c113f64a5ff4f81477bf9"}, + {file = "pydantic_core-2.27.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3b748c44bb9f53031c8cbc99a8a061bc181c1000c60a30f55393b6e9c45cc5bd"}, + {file = "pydantic_core-2.27.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5ca038c7f6a0afd0b2448941b6ef9d5e1949e999f9e5517692eb6da58e9d44be"}, + {file = "pydantic_core-2.27.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6e0bd57539da59a3e4671b90a502da9a28c72322a4f17866ba3ac63a82c4498e"}, + {file = "pydantic_core-2.27.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:ac6c2c45c847bbf8f91930d88716a0fb924b51e0c6dad329b793d670ec5db792"}, + {file = "pydantic_core-2.27.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:b94d4ba43739bbe8b0ce4262bcc3b7b9f31459ad120fb595627eaeb7f9b9ca01"}, + {file = "pydantic_core-2.27.1-cp39-cp39-musllinux_1_1_armv7l.whl", hash = "sha256:00e6424f4b26fe82d44577b4c842d7df97c20be6439e8e685d0d715feceb9fb9"}, + {file = "pydantic_core-2.27.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:38de0a70160dd97540335b7ad3a74571b24f1dc3ed33f815f0880682e6880131"}, + {file = "pydantic_core-2.27.1-cp39-none-win32.whl", hash = "sha256:7ccebf51efc61634f6c2344da73e366c75e735960b5654b63d7e6f69a5885fa3"}, + {file = "pydantic_core-2.27.1-cp39-none-win_amd64.whl", hash = "sha256:a57847b090d7892f123726202b7daa20df6694cbd583b67a592e856bff603d6c"}, + {file = "pydantic_core-2.27.1-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:3fa80ac2bd5856580e242dbc202db873c60a01b20309c8319b5c5986fbe53ce6"}, + {file = "pydantic_core-2.27.1-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:d950caa237bb1954f1b8c9227b5065ba6875ac9771bb8ec790d956a699b78676"}, + {file = "pydantic_core-2.27.1-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0e4216e64d203e39c62df627aa882f02a2438d18a5f21d7f721621f7a5d3611d"}, + {file = "pydantic_core-2.27.1-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:02a3d637bd387c41d46b002f0e49c52642281edacd2740e5a42f7017feea3f2c"}, + {file = "pydantic_core-2.27.1-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:161c27ccce13b6b0c8689418da3885d3220ed2eae2ea5e9b2f7f3d48f1d52c27"}, + {file = "pydantic_core-2.27.1-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:19910754e4cc9c63bc1c7f6d73aa1cfee82f42007e407c0f413695c2f7ed777f"}, + {file = "pydantic_core-2.27.1-pp310-pypy310_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:e173486019cc283dc9778315fa29a363579372fe67045e971e89b6365cc035ed"}, + {file = "pydantic_core-2.27.1-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:af52d26579b308921b73b956153066481f064875140ccd1dfd4e77db89dbb12f"}, + {file = "pydantic_core-2.27.1-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:981fb88516bd1ae8b0cbbd2034678a39dedc98752f264ac9bc5839d3923fa04c"}, + {file = "pydantic_core-2.27.1-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:5fde892e6c697ce3e30c61b239330fc5d569a71fefd4eb6512fc6caec9dd9e2f"}, + {file = "pydantic_core-2.27.1-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:816f5aa087094099fff7edabb5e01cc370eb21aa1a1d44fe2d2aefdfb5599b31"}, + {file = "pydantic_core-2.27.1-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9c10c309e18e443ddb108f0ef64e8729363adbfd92d6d57beec680f6261556f3"}, + {file = "pydantic_core-2.27.1-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:98476c98b02c8e9b2eec76ac4156fd006628b1b2d0ef27e548ffa978393fd154"}, + {file = "pydantic_core-2.27.1-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:c3027001c28434e7ca5a6e1e527487051136aa81803ac812be51802150d880dd"}, + {file = "pydantic_core-2.27.1-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:7699b1df36a48169cdebda7ab5a2bac265204003f153b4bd17276153d997670a"}, + {file = "pydantic_core-2.27.1-pp39-pypy39_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:1c39b07d90be6b48968ddc8c19e7585052088fd7ec8d568bb31ff64c70ae3c97"}, + {file = "pydantic_core-2.27.1-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:46ccfe3032b3915586e469d4972973f893c0a2bb65669194a5bdea9bacc088c2"}, + {file = "pydantic_core-2.27.1-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:62ba45e21cf6571d7f716d903b5b7b6d2617e2d5d67c0923dc47b9d41369f840"}, + {file = "pydantic_core-2.27.1.tar.gz", hash = "sha256:62a763352879b84aa31058fc931884055fd75089cccbd9d58bb6afd01141b235"}, ] [package.dependencies] @@ -647,13 +676,13 @@ testutils = ["gitpython (>3)"] [[package]] name = "pytest" -version = "8.3.3" +version = "8.3.4" description = "pytest: simple powerful testing with Python" optional = false python-versions = ">=3.8" files = [ - {file = "pytest-8.3.3-py3-none-any.whl", hash = "sha256:a6853c7375b2663155079443d2e45de913a911a11d669df02a50814944db57b2"}, - {file = "pytest-8.3.3.tar.gz", hash = "sha256:70b98107bd648308a7952b06e6ca9a50bc660be218d53c257cc1fc94fda10181"}, + {file = "pytest-8.3.4-py3-none-any.whl", hash = "sha256:50e16d954148559c9a74109af1eaf0c945ba2d8f30f0a3d3335edde19788b6f6"}, + {file = "pytest-8.3.4.tar.gz", hash = "sha256:965370d062bce11e73868e0335abac31b4d3de0e82f4007408d242b4f8610761"}, ] [package.dependencies] @@ -687,13 +716,13 @@ testing = ["coverage (>=6.2)", "hypothesis (>=5.7.1)"] [[package]] name = "python-dateutil" -version = "2.8.2" +version = "2.9.0.post0" description = "Extensions to the standard Python datetime module" optional = false python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7" files = [ - {file = "python-dateutil-2.8.2.tar.gz", hash = "sha256:0123cacc1627ae19ddf3c27a5de5bd67ee4586fbdd6440d9748f8abb483d3e86"}, - {file = "python_dateutil-2.8.2-py2.py3-none-any.whl", hash = "sha256:961d03dc3453ebbc59dbdea9e4e11c5651520a876d0f4db161e8674aae935da9"}, + {file = "python-dateutil-2.9.0.post0.tar.gz", hash = "sha256:37dd54208da7e1cd875388217d5e00ebd4179249f90fb72437e91a35459a0ad3"}, + {file = "python_dateutil-2.9.0.post0-py2.py3-none-any.whl", hash = "sha256:a8b2bc7bffae282281c8140a97d3aa9c14da0b136dfe83f850eea9a5f7470427"}, ] [package.dependencies] @@ -758,13 +787,43 @@ files = [ [[package]] name = "tomli" -version = "2.0.1" +version = "2.2.1" description = "A lil' TOML parser" optional = false -python-versions = ">=3.7" +python-versions = ">=3.8" files = [ - {file = "tomli-2.0.1-py3-none-any.whl", hash = "sha256:939de3e7a6161af0c887ef91b7d41a53e7c5a1ca976325f429cb46ea9bc30ecc"}, - {file = "tomli-2.0.1.tar.gz", hash = "sha256:de526c12914f0c550d15924c62d72abc48d6fe7364aa87328337a31007fe8a4f"}, + {file = "tomli-2.2.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:678e4fa69e4575eb77d103de3df8a895e1591b48e740211bd1067378c69e8249"}, + {file = "tomli-2.2.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:023aa114dd824ade0100497eb2318602af309e5a55595f76b626d6d9f3b7b0a6"}, + {file = "tomli-2.2.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ece47d672db52ac607a3d9599a9d48dcb2f2f735c6c2d1f34130085bb12b112a"}, + {file = "tomli-2.2.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6972ca9c9cc9f0acaa56a8ca1ff51e7af152a9f87fb64623e31d5c83700080ee"}, + {file = "tomli-2.2.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c954d2250168d28797dd4e3ac5cf812a406cd5a92674ee4c8f123c889786aa8e"}, + {file = "tomli-2.2.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:8dd28b3e155b80f4d54beb40a441d366adcfe740969820caf156c019fb5c7ec4"}, + {file = "tomli-2.2.1-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:e59e304978767a54663af13c07b3d1af22ddee3bb2fb0618ca1593e4f593a106"}, + {file = "tomli-2.2.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:33580bccab0338d00994d7f16f4c4ec25b776af3ffaac1ed74e0b3fc95e885a8"}, + {file = "tomli-2.2.1-cp311-cp311-win32.whl", hash = "sha256:465af0e0875402f1d226519c9904f37254b3045fc5084697cefb9bdde1ff99ff"}, + {file = "tomli-2.2.1-cp311-cp311-win_amd64.whl", hash = "sha256:2d0f2fdd22b02c6d81637a3c95f8cd77f995846af7414c5c4b8d0545afa1bc4b"}, + {file = "tomli-2.2.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:4a8f6e44de52d5e6c657c9fe83b562f5f4256d8ebbfe4ff922c495620a7f6cea"}, + {file = "tomli-2.2.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:8d57ca8095a641b8237d5b079147646153d22552f1c637fd3ba7f4b0b29167a8"}, + {file = "tomli-2.2.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4e340144ad7ae1533cb897d406382b4b6fede8890a03738ff1683af800d54192"}, + {file = "tomli-2.2.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:db2b95f9de79181805df90bedc5a5ab4c165e6ec3fe99f970d0e302f384ad222"}, + {file = "tomli-2.2.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:40741994320b232529c802f8bc86da4e1aa9f413db394617b9a256ae0f9a7f77"}, + {file = "tomli-2.2.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:400e720fe168c0f8521520190686ef8ef033fb19fc493da09779e592861b78c6"}, + {file = "tomli-2.2.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:02abe224de6ae62c19f090f68da4e27b10af2b93213d36cf44e6e1c5abd19fdd"}, + {file = "tomli-2.2.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:b82ebccc8c8a36f2094e969560a1b836758481f3dc360ce9a3277c65f374285e"}, + {file = "tomli-2.2.1-cp312-cp312-win32.whl", hash = "sha256:889f80ef92701b9dbb224e49ec87c645ce5df3fa2cc548664eb8a25e03127a98"}, + {file = "tomli-2.2.1-cp312-cp312-win_amd64.whl", hash = "sha256:7fc04e92e1d624a4a63c76474610238576942d6b8950a2d7f908a340494e67e4"}, + {file = "tomli-2.2.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:f4039b9cbc3048b2416cc57ab3bda989a6fcf9b36cf8937f01a6e731b64f80d7"}, + {file = "tomli-2.2.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:286f0ca2ffeeb5b9bd4fcc8d6c330534323ec51b2f52da063b11c502da16f30c"}, + {file = "tomli-2.2.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a92ef1a44547e894e2a17d24e7557a5e85a9e1d0048b0b5e7541f76c5032cb13"}, + {file = "tomli-2.2.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9316dc65bed1684c9a98ee68759ceaed29d229e985297003e494aa825ebb0281"}, + {file = "tomli-2.2.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e85e99945e688e32d5a35c1ff38ed0b3f41f43fad8df0bdf79f72b2ba7bc5272"}, + {file = "tomli-2.2.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:ac065718db92ca818f8d6141b5f66369833d4a80a9d74435a268c52bdfa73140"}, + {file = "tomli-2.2.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:d920f33822747519673ee656a4b6ac33e382eca9d331c87770faa3eef562aeb2"}, + {file = "tomli-2.2.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:a198f10c4d1b1375d7687bc25294306e551bf1abfa4eace6650070a5c1ae2744"}, + {file = "tomli-2.2.1-cp313-cp313-win32.whl", hash = "sha256:d3f5614314d758649ab2ab3a62d4f2004c825922f9e370b29416484086b264ec"}, + {file = "tomli-2.2.1-cp313-cp313-win_amd64.whl", hash = "sha256:a38aa0308e754b0e3c67e344754dff64999ff9b513e691d0e786265c93583c69"}, + {file = "tomli-2.2.1-py3-none-any.whl", hash = "sha256:cb55c73c5f4408779d0cf3eef9f762b9c9f147a77de7b258bef0a5628adc85cc"}, + {file = "tomli-2.2.1.tar.gz", hash = "sha256:cd45e1dc79c835ce60f7404ec8119f2eb06d38b1deba146f07ced3bbc44505ff"}, ] [[package]] @@ -780,13 +839,13 @@ files = [ [[package]] name = "types-python-dateutil" -version = "2.9.0.20240906" +version = "2.9.0.20241003" description = "Typing stubs for python-dateutil" optional = false python-versions = ">=3.8" files = [ - {file = "types-python-dateutil-2.9.0.20240906.tar.gz", hash = "sha256:9706c3b68284c25adffc47319ecc7947e5bb86b3773f843c73906fd598bc176e"}, - {file = "types_python_dateutil-2.9.0.20240906-py3-none-any.whl", hash = "sha256:27c8cc2d058ccb14946eebcaaa503088f4f6dbc4fb6093d3d456a49aef2753f6"}, + {file = "types-python-dateutil-2.9.0.20241003.tar.gz", hash = "sha256:58cb85449b2a56d6684e41aeefb4c4280631246a0da1a719bdbe6f3fb0317446"}, + {file = "types_python_dateutil-2.9.0.20241003-py3-none-any.whl", hash = "sha256:250e1d8e80e7bbc3a6c99b907762711d1a1cdd00e978ad39cb5940f6f0a87f3d"}, ] [[package]] @@ -838,4 +897,4 @@ gcp = ["google-auth", "requests"] [metadata] lock-version = "2.0" python-versions = "^3.8" -content-hash = "4b71116df8e3bfdf2d9d75058277f721dc8ac40f7a51b3f4d82b7a3e78a21706" +content-hash = "2d6b37c110c1e85e5beeb35bf03affb94d079bd3768e66e0f6dc9277238a483a" diff --git a/pyproject.toml b/pyproject.toml index 2fced3a9..7cd81a7a 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -24,13 +24,13 @@ eval-type-backport = "^0.2.0" httpx = "^0.27.0" jsonpath-python = "^1.0.6" pydantic = "^2.9.0" -python-dateutil = "2.8.2" +python-dateutil = "^2.8.2" typing-inspect = "^0.9.0" google-auth = { version = "2.27.0", optional = true } requests = { version = "^2.32.3", optional = true } [tool.poetry.group.dev.dependencies] -mypy = "==1.10.1" +mypy = "==1.13.0" pylint = "==3.2.3" pytest = "^8.2.2" pytest-asyncio = "^0.23.7" From 9d24a83c56effa51c1add47b96c53baa0a8d75c4 Mon Sep 17 00:00:00 2001 From: speakeasybot Date: Mon, 2 Dec 2024 14:27:07 +0000 Subject: [PATCH 085/223] ci: regenerated with OpenAPI Doc , Speakeasy CLI 1.440.1 --- .speakeasy/gen.lock | 25 ++- .speakeasy/gen.yaml | 2 +- .speakeasy/workflow.lock | 12 +- README.md | 1 + RELEASES.md | 12 +- docs/models/apiendpoint.md | 3 +- docs/models/detailedjobout.md | 2 +- .../filesapiroutesgetsignedurlrequest.md | 9 + docs/models/filesignedurl.md | 8 + docs/models/finetuneablemodel.md | 14 -- docs/models/jobin.md | 2 +- docs/models/jobout.md | 2 +- docs/sdks/classifiers/README.md | 1 - docs/sdks/files/README.md | 41 +++++ docs/sdks/jobs/README.md | 2 +- pyproject.toml | 2 +- src/mistralai/_version.py | 2 +- src/mistralai/files.py | 166 ++++++++++++++++++ src/mistralai/jobs.py | 4 +- src/mistralai/models/__init__.py | 11 +- src/mistralai/models/apiendpoint.py | 14 +- src/mistralai/models/batchjobin.py | 6 +- src/mistralai/models/detailedjobout.py | 5 +- .../files_api_routes_get_signed_urlop.py | 25 +++ src/mistralai/models/filesignedurl.py | 13 ++ src/mistralai/models/finetuneablemodel.py | 14 -- src/mistralai/models/imageurlchunk.py | 19 +- src/mistralai/models/jobin.py | 5 +- src/mistralai/models/jobout.py | 5 +- src/mistralai/models/referencechunk.py | 14 +- src/mistralai/models/textchunk.py | 12 +- src/mistralai/sdkconfiguration.py | 4 +- 32 files changed, 353 insertions(+), 104 deletions(-) create mode 100644 docs/models/filesapiroutesgetsignedurlrequest.md create mode 100644 docs/models/filesignedurl.md delete mode 100644 docs/models/finetuneablemodel.md create mode 100644 src/mistralai/models/files_api_routes_get_signed_urlop.py create mode 100644 src/mistralai/models/filesignedurl.py delete mode 100644 src/mistralai/models/finetuneablemodel.py diff --git a/.speakeasy/gen.lock b/.speakeasy/gen.lock index d59c2911..7ab50a79 100644 --- a/.speakeasy/gen.lock +++ b/.speakeasy/gen.lock @@ -1,12 +1,12 @@ lockVersion: 2.0.0 id: 2d045ec7-2ebb-4f4d-ad25-40953b132161 management: - docChecksum: d066d98bdd0ef905d4126e0e69940946 + docChecksum: 36ad3563d9d2b3af47015100d060570b docVersion: 0.0.2 speakeasyVersion: 1.440.1 generationVersion: 2.460.1 - releaseVersion: 1.2.3 - configChecksum: 25fa33668bf14110dfe5bac267dcc8b4 + releaseVersion: 1.2.4 + configChecksum: 4fe789bac842073beb4e2d9c6c3f833d repoURL: https://github.com/mistralai/client-python.git installationURL: https://github.com/mistralai/client-python.git published: true @@ -107,16 +107,17 @@ generatedFiles: - docs/models/filepurpose.md - docs/models/filesapiroutesdeletefilerequest.md - docs/models/filesapiroutesdownloadfilerequest.md + - docs/models/filesapiroutesgetsignedurlrequest.md - docs/models/filesapirouteslistfilesrequest.md - docs/models/filesapiroutesretrievefilerequest.md - docs/models/filesapiroutesuploadfilemultipartbodyparams.md - docs/models/fileschema.md + - docs/models/filesignedurl.md - docs/models/fimcompletionrequest.md - docs/models/fimcompletionrequeststop.md - docs/models/fimcompletionresponse.md - docs/models/fimcompletionstreamrequest.md - docs/models/fimcompletionstreamrequeststop.md - - docs/models/finetuneablemodel.md - docs/models/finishreason.md - docs/models/ftmodelcapabilitiesout.md - docs/models/ftmodelcard.md @@ -282,14 +283,15 @@ generatedFiles: - src/mistralai/models/filepurpose.py - src/mistralai/models/files_api_routes_delete_fileop.py - src/mistralai/models/files_api_routes_download_fileop.py + - src/mistralai/models/files_api_routes_get_signed_urlop.py - src/mistralai/models/files_api_routes_list_filesop.py - src/mistralai/models/files_api_routes_retrieve_fileop.py - src/mistralai/models/files_api_routes_upload_fileop.py - src/mistralai/models/fileschema.py + - src/mistralai/models/filesignedurl.py - src/mistralai/models/fimcompletionrequest.py - src/mistralai/models/fimcompletionresponse.py - src/mistralai/models/fimcompletionstreamrequest.py - - src/mistralai/models/finetuneablemodel.py - src/mistralai/models/ftmodelcapabilitiesout.py - src/mistralai/models/ftmodelcard.py - src/mistralai/models/ftmodelout.py @@ -454,7 +456,7 @@ examples: application/json: {"model": "codestral-latest"} responses: "200": - application/json: {"id": "a621cf02-1cd9-4cf5-8403-315211a509a3", "auto_start": false, "model": "open-mistral-7b", "status": "FAILED", "job_type": "", "created_at": 550483, "modified_at": 906537, "training_files": ["74c2becc-3769-4177-b5e0-24985613de0e"]} + application/json: {"id": "a621cf02-1cd9-4cf5-8403-315211a509a3", "auto_start": false, "model": "2", "status": "FAILED", "job_type": "", "created_at": 550483, "modified_at": 906537, "training_files": ["74c2becc-3769-4177-b5e0-24985613de0e"]} jobs_api_routes_fine_tuning_get_fine_tuning_job: speakeasy-default-jobs-api-routes-fine-tuning-get-fine-tuning-job: parameters: @@ -521,7 +523,7 @@ examples: application/json: {"input": ["Embed this sentence.", "As well as this one."], "model": "Wrangler"} responses: "200": - application/json: {"id": "cmpl-e5cc70bb28c444948073e77776eb30ef", "object": "chat.completion", "model": "mistral-small-latest", "usage": {"prompt_tokens": 16, "completion_tokens": 34, "total_tokens": 50}, "data": [{"object": "embedding", "embedding": [0.1, 0.2, 0.3], "index": 0}]} + application/json: {"id": "cmpl-e5cc70bb28c444948073e77776eb30ef", "object": "chat.completion", "model": "mistral-small-latest", "usage": {"prompt_tokens": 16, "completion_tokens": 34, "total_tokens": 50}, "data": [{"object": "embedding", "embedding": [0.1, 0.2, 0.3], "index": 0}, {"object": "embedding", "embedding": [0.4, 0.5, 0.6], "index": 1}]} "422": {} files_api_routes_download_file: speakeasy-default-files-api-routes-download-file: @@ -572,3 +574,12 @@ examples: "200": application/json: {"id": "mod-e5cc70bb28c444948073e77776eb30ef"} "422": {} + files_api_routes_get_signed_url: + speakeasy-default-files-api-routes-get-signed-url: + parameters: + path: + file_id: "" + query: {} + responses: + "200": + application/json: {"url": "https://scornful-daughter.com/"} diff --git a/.speakeasy/gen.yaml b/.speakeasy/gen.yaml index 68a158cd..dfee8409 100644 --- a/.speakeasy/gen.yaml +++ b/.speakeasy/gen.yaml @@ -13,7 +13,7 @@ generation: oAuth2ClientCredentialsEnabled: true oAuth2PasswordEnabled: false python: - version: 1.2.3 + version: 1.2.4 additionalDependencies: dev: pytest: ^8.2.2 diff --git a/.speakeasy/workflow.lock b/.speakeasy/workflow.lock index e61ff013..07f3f6b8 100644 --- a/.speakeasy/workflow.lock +++ b/.speakeasy/workflow.lock @@ -14,11 +14,11 @@ sources: - latest mistral-openapi: sourceNamespace: mistral-openapi - sourceRevisionDigest: sha256:9b6ad47076b570f4e23494bf744a7822547d0003e4b10985f26f1c3b5128e631 - sourceBlobDigest: sha256:150e3da2a6bfb74e86e2ce864e9a094fc796f3a08df91f6a6e8745b54b3e16bc + sourceRevisionDigest: sha256:f74c08bdc7ae39f5fe2394df8f31ae623ece30a7f65019ab6b7bcea352953f05 + sourceBlobDigest: sha256:5de08a038994ec94c0889341d434b598f541459d114f9935deb9ef3b3af90c5f tags: - latest - - speakeasy-sdk-regen-1731693697 + - speakeasy-sdk-regen-1733149559 targets: mistralai-azure-sdk: source: mistral-azure-source @@ -37,10 +37,10 @@ targets: mistralai-sdk: source: mistral-openapi sourceNamespace: mistral-openapi - sourceRevisionDigest: sha256:9b6ad47076b570f4e23494bf744a7822547d0003e4b10985f26f1c3b5128e631 - sourceBlobDigest: sha256:150e3da2a6bfb74e86e2ce864e9a094fc796f3a08df91f6a6e8745b54b3e16bc + sourceRevisionDigest: sha256:f74c08bdc7ae39f5fe2394df8f31ae623ece30a7f65019ab6b7bcea352953f05 + sourceBlobDigest: sha256:5de08a038994ec94c0889341d434b598f541459d114f9935deb9ef3b3af90c5f codeSamplesNamespace: mistral-openapi-code-samples - codeSamplesRevisionDigest: sha256:d1ea6603d96bdc063cc747cde1cadcbf443be580935e12438e7b915d0cd9a019 + codeSamplesRevisionDigest: sha256:800804bcf76f579fd76510126828c03be89bb4964a01da87f376148f86cc88dc workflow: workflowVersion: 1.0.0 speakeasyVersion: latest diff --git a/README.md b/README.md index 24c744ab..4a1fc830 100644 --- a/README.md +++ b/README.md @@ -401,6 +401,7 @@ The documentation for the GCP SDK is available [here](packages/mistralai_gcp/REA * [retrieve](docs/sdks/files/README.md#retrieve) - Retrieve File * [delete](docs/sdks/files/README.md#delete) - Delete File * [download](docs/sdks/files/README.md#download) - Download File +* [get_signed_url](docs/sdks/files/README.md#get_signed_url) - Get Signed Url ### [fim](docs/sdks/fim/README.md) diff --git a/RELEASES.md b/RELEASES.md index b8f671ba..845891e8 100644 --- a/RELEASES.md +++ b/RELEASES.md @@ -88,4 +88,14 @@ Based on: ### Generated - [python v1.2.3] . ### Releases -- [PyPI v1.2.3] https://pypi.org/project/mistralai/1.2.3 - . \ No newline at end of file +- [PyPI v1.2.3] https://pypi.org/project/mistralai/1.2.3 - . + +## 2024-12-02 14:25:56 +### Changes +Based on: +- OpenAPI Doc +- Speakeasy CLI 1.440.1 (2.460.1) https://github.com/speakeasy-api/speakeasy +### Generated +- [python v1.2.4] . +### Releases +- [PyPI v1.2.4] https://pypi.org/project/mistralai/1.2.4 - . \ No newline at end of file diff --git a/docs/models/apiendpoint.md b/docs/models/apiendpoint.md index 5dfa68ae..700b932f 100644 --- a/docs/models/apiendpoint.md +++ b/docs/models/apiendpoint.md @@ -8,4 +8,5 @@ | `ROOT_V1_CHAT_COMPLETIONS` | /v1/chat/completions | | `ROOT_V1_EMBEDDINGS` | /v1/embeddings | | `ROOT_V1_FIM_COMPLETIONS` | /v1/fim/completions | -| `ROOT_V1_MODERATIONS` | /v1/moderations | \ No newline at end of file +| `ROOT_V1_MODERATIONS` | /v1/moderations | +| `ROOT_V1_CHAT_MODERATIONS` | /v1/chat/moderations | \ No newline at end of file diff --git a/docs/models/detailedjobout.md b/docs/models/detailedjobout.md index 3eae6b30..f7470327 100644 --- a/docs/models/detailedjobout.md +++ b/docs/models/detailedjobout.md @@ -8,7 +8,7 @@ | `id` | *str* | :heavy_check_mark: | N/A | | `auto_start` | *bool* | :heavy_check_mark: | N/A | | `hyperparameters` | [models.TrainingParameters](../models/trainingparameters.md) | :heavy_check_mark: | N/A | -| `model` | [models.FineTuneableModel](../models/finetuneablemodel.md) | :heavy_check_mark: | The name of the model to fine-tune. | +| `model` | *str* | :heavy_check_mark: | The name of the model to fine-tune. | | `status` | [models.DetailedJobOutStatus](../models/detailedjoboutstatus.md) | :heavy_check_mark: | N/A | | `job_type` | *str* | :heavy_check_mark: | N/A | | `created_at` | *int* | :heavy_check_mark: | N/A | diff --git a/docs/models/filesapiroutesgetsignedurlrequest.md b/docs/models/filesapiroutesgetsignedurlrequest.md new file mode 100644 index 00000000..dbe3c801 --- /dev/null +++ b/docs/models/filesapiroutesgetsignedurlrequest.md @@ -0,0 +1,9 @@ +# FilesAPIRoutesGetSignedURLRequest + + +## Fields + +| Field | Type | Required | Description | +| --------------------------------------------------------------- | --------------------------------------------------------------- | --------------------------------------------------------------- | --------------------------------------------------------------- | +| `file_id` | *str* | :heavy_check_mark: | N/A | +| `expiry` | *Optional[int]* | :heavy_minus_sign: | Number of hours before the url becomes invalid. Defaults to 24h | \ No newline at end of file diff --git a/docs/models/filesignedurl.md b/docs/models/filesignedurl.md new file mode 100644 index 00000000..52ce3f4f --- /dev/null +++ b/docs/models/filesignedurl.md @@ -0,0 +1,8 @@ +# FileSignedURL + + +## Fields + +| Field | Type | Required | Description | +| ------------------ | ------------------ | ------------------ | ------------------ | +| `url` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/finetuneablemodel.md b/docs/models/finetuneablemodel.md deleted file mode 100644 index cb429284..00000000 --- a/docs/models/finetuneablemodel.md +++ /dev/null @@ -1,14 +0,0 @@ -# FineTuneableModel - -The name of the model to fine-tune. - - -## Values - -| Name | Value | -| ---------------------- | ---------------------- | -| `OPEN_MISTRAL_7B` | open-mistral-7b | -| `MISTRAL_SMALL_LATEST` | mistral-small-latest | -| `CODESTRAL_LATEST` | codestral-latest | -| `MISTRAL_LARGE_LATEST` | mistral-large-latest | -| `OPEN_MISTRAL_NEMO` | open-mistral-nemo | \ No newline at end of file diff --git a/docs/models/jobin.md b/docs/models/jobin.md index ebaed9a9..6fd661cf 100644 --- a/docs/models/jobin.md +++ b/docs/models/jobin.md @@ -5,7 +5,7 @@ | Field | Type | Required | Description | | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `model` | [models.FineTuneableModel](../models/finetuneablemodel.md) | :heavy_check_mark: | The name of the model to fine-tune. | +| `model` | *str* | :heavy_check_mark: | The name of the model to fine-tune. | | `hyperparameters` | [models.TrainingParametersIn](../models/trainingparametersin.md) | :heavy_check_mark: | The fine-tuning hyperparameter settings used in a fine-tune job. | | `training_files` | List[[models.TrainingFile](../models/trainingfile.md)] | :heavy_minus_sign: | N/A | | `validation_files` | List[*str*] | :heavy_minus_sign: | A list containing the IDs of uploaded files that contain validation data. If you provide these files, the data is used to generate validation metrics periodically during fine-tuning. These metrics can be viewed in `checkpoints` when getting the status of a running fine-tuning job. The same data should not be present in both train and validation files. | diff --git a/docs/models/jobout.md b/docs/models/jobout.md index 2fe60fd8..652c9d16 100644 --- a/docs/models/jobout.md +++ b/docs/models/jobout.md @@ -8,7 +8,7 @@ | `id` | *str* | :heavy_check_mark: | The ID of the job. | | `auto_start` | *bool* | :heavy_check_mark: | N/A | | `hyperparameters` | [models.TrainingParameters](../models/trainingparameters.md) | :heavy_check_mark: | N/A | -| `model` | [models.FineTuneableModel](../models/finetuneablemodel.md) | :heavy_check_mark: | The name of the model to fine-tune. | +| `model` | *str* | :heavy_check_mark: | The name of the model to fine-tune. | | `status` | [models.Status](../models/status.md) | :heavy_check_mark: | The current status of the fine-tuning job. | | `job_type` | *str* | :heavy_check_mark: | The type of job (`FT` for fine-tuning). | | `created_at` | *int* | :heavy_check_mark: | The UNIX timestamp (in seconds) for when the fine-tuning job was created. | diff --git a/docs/sdks/classifiers/README.md b/docs/sdks/classifiers/README.md index 7e59fa2c..05b8b7ca 100644 --- a/docs/sdks/classifiers/README.md +++ b/docs/sdks/classifiers/README.md @@ -73,7 +73,6 @@ res = s.classifiers.moderate_chat(inputs=[ "content": [ { "text": "", - "type": "text", }, ], }, diff --git a/docs/sdks/files/README.md b/docs/sdks/files/README.md index fc5784a4..886d57ed 100644 --- a/docs/sdks/files/README.md +++ b/docs/sdks/files/README.md @@ -12,6 +12,7 @@ Files API * [retrieve](#retrieve) - Retrieve File * [delete](#delete) - Delete File * [download](#download) - Download File +* [get_signed_url](#get_signed_url) - Get Signed Url ## upload @@ -217,6 +218,46 @@ if res is not None: ### Errors +| Error Type | Status Code | Content Type | +| --------------- | --------------- | --------------- | +| models.SDKError | 4XX, 5XX | \*/\* | + +## get_signed_url + +Get Signed Url + +### Example Usage + +```python +from mistralai import Mistral +import os + +s = Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) + +res = s.files.get_signed_url(file_id="") + +if res is not None: + # handle response + pass + +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | +| `file_id` | *str* | :heavy_check_mark: | N/A | +| `expiry` | *Optional[int]* | :heavy_minus_sign: | Number of hours before the url becomes invalid. Defaults to 24h | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | + +### Response + +**[models.FileSignedURL](../../models/filesignedurl.md)** + +### Errors + | Error Type | Status Code | Content Type | | --------------- | --------------- | --------------- | | models.SDKError | 4XX, 5XX | \*/\* | \ No newline at end of file diff --git a/docs/sdks/jobs/README.md b/docs/sdks/jobs/README.md index 6ecf6e51..05b8b428 100644 --- a/docs/sdks/jobs/README.md +++ b/docs/sdks/jobs/README.md @@ -84,7 +84,7 @@ if res is not None: | Parameter | Type | Required | Description | | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `model` | [models.FineTuneableModel](../../models/finetuneablemodel.md) | :heavy_check_mark: | The name of the model to fine-tune. | +| `model` | *str* | :heavy_check_mark: | The name of the model to fine-tune. | | `hyperparameters` | [models.TrainingParametersIn](../../models/trainingparametersin.md) | :heavy_check_mark: | The fine-tuning hyperparameter settings used in a fine-tune job. | | `training_files` | List[[models.TrainingFile](../../models/trainingfile.md)] | :heavy_minus_sign: | N/A | | `validation_files` | List[*str*] | :heavy_minus_sign: | A list containing the IDs of uploaded files that contain validation data. If you provide these files, the data is used to generate validation metrics periodically during fine-tuning. These metrics can be viewed in `checkpoints` when getting the status of a running fine-tuning job. The same data should not be present in both train and validation files. | diff --git a/pyproject.toml b/pyproject.toml index 7cd81a7a..ddd41722 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "mistralai" -version = "1.2.3" +version = "1.2.4" description = "Python Client SDK for the Mistral AI API." authors = ["Mistral"] readme = "README-PYPI.md" diff --git a/src/mistralai/_version.py b/src/mistralai/_version.py index 326a3e6e..41970f16 100644 --- a/src/mistralai/_version.py +++ b/src/mistralai/_version.py @@ -3,7 +3,7 @@ import importlib.metadata __title__: str = "mistralai" -__version__: str = "1.2.3" +__version__: str = "1.2.4" try: if __package__ is not None: diff --git a/src/mistralai/files.py b/src/mistralai/files.py index 6cf0fcb7..e2977be2 100644 --- a/src/mistralai/files.py +++ b/src/mistralai/files.py @@ -891,3 +891,169 @@ async def download_async( http_res_text, http_res, ) + + def get_signed_url( + self, + *, + file_id: str, + expiry: Optional[int] = 24, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + ) -> Optional[models.FileSignedURL]: + r"""Get Signed Url + + :param file_id: + :param expiry: Number of hours before the url becomes invalid. Defaults to 24h + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + + request = models.FilesAPIRoutesGetSignedURLRequest( + file_id=file_id, + expiry=expiry, + ) + + req = self.build_request( + method="GET", + path="/v1/files/{file_id}/url", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + security=self.sdk_configuration.security, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + operation_id="files_api_routes_get_signed_url", + oauth2_scopes=[], + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["4XX", "5XX"], + retry_config=retry_config, + ) + + if utils.match_response(http_res, "200", "application/json"): + return utils.unmarshal_json(http_res.text, Optional[models.FileSignedURL]) + if utils.match_response(http_res, ["4XX", "5XX"], "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + + content_type = http_res.headers.get("Content-Type") + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res_text, + http_res, + ) + + async def get_signed_url_async( + self, + *, + file_id: str, + expiry: Optional[int] = 24, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + ) -> Optional[models.FileSignedURL]: + r"""Get Signed Url + + :param file_id: + :param expiry: Number of hours before the url becomes invalid. Defaults to 24h + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + + request = models.FilesAPIRoutesGetSignedURLRequest( + file_id=file_id, + expiry=expiry, + ) + + req = self.build_request_async( + method="GET", + path="/v1/files/{file_id}/url", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + security=self.sdk_configuration.security, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + operation_id="files_api_routes_get_signed_url", + oauth2_scopes=[], + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["4XX", "5XX"], + retry_config=retry_config, + ) + + if utils.match_response(http_res, "200", "application/json"): + return utils.unmarshal_json(http_res.text, Optional[models.FileSignedURL]) + if utils.match_response(http_res, ["4XX", "5XX"], "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + + content_type = http_res.headers.get("Content-Type") + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res_text, + http_res, + ) diff --git a/src/mistralai/jobs.py b/src/mistralai/jobs.py index 9f472de1..17085b9d 100644 --- a/src/mistralai/jobs.py +++ b/src/mistralai/jobs.py @@ -225,7 +225,7 @@ async def list_async( def create( self, *, - model: models.FineTuneableModel, + model: str, hyperparameters: Union[ models.TrainingParametersIn, models.TrainingParametersInTypedDict ], @@ -354,7 +354,7 @@ def create( async def create_async( self, *, - model: models.FineTuneableModel, + model: str, hyperparameters: Union[ models.TrainingParametersIn, models.TrainingParametersInTypedDict ], diff --git a/src/mistralai/models/__init__.py b/src/mistralai/models/__init__.py index c62bebc1..4e7e4d12 100644 --- a/src/mistralai/models/__init__.py +++ b/src/mistralai/models/__init__.py @@ -133,6 +133,10 @@ FilesAPIRoutesDownloadFileRequest, FilesAPIRoutesDownloadFileRequestTypedDict, ) +from .files_api_routes_get_signed_urlop import ( + FilesAPIRoutesGetSignedURLRequest, + FilesAPIRoutesGetSignedURLRequestTypedDict, +) from .files_api_routes_list_filesop import ( FilesAPIRoutesListFilesRequest, FilesAPIRoutesListFilesRequestTypedDict, @@ -148,6 +152,7 @@ FilesAPIRoutesUploadFileMultiPartBodyParamsTypedDict, ) from .fileschema import FileSchema, FileSchemaTypedDict +from .filesignedurl import FileSignedURL, FileSignedURLTypedDict from .fimcompletionrequest import ( FIMCompletionRequest, FIMCompletionRequestStop, @@ -161,7 +166,6 @@ FIMCompletionStreamRequestStopTypedDict, FIMCompletionStreamRequestTypedDict, ) -from .finetuneablemodel import FineTuneableModel from .ftmodelcapabilitiesout import ( FTModelCapabilitiesOut, FTModelCapabilitiesOutTypedDict, @@ -468,18 +472,21 @@ "FilePurpose", "FileSchema", "FileSchemaTypedDict", + "FileSignedURL", + "FileSignedURLTypedDict", "FileTypedDict", "FilesAPIRoutesDeleteFileRequest", "FilesAPIRoutesDeleteFileRequestTypedDict", "FilesAPIRoutesDownloadFileRequest", "FilesAPIRoutesDownloadFileRequestTypedDict", + "FilesAPIRoutesGetSignedURLRequest", + "FilesAPIRoutesGetSignedURLRequestTypedDict", "FilesAPIRoutesListFilesRequest", "FilesAPIRoutesListFilesRequestTypedDict", "FilesAPIRoutesRetrieveFileRequest", "FilesAPIRoutesRetrieveFileRequestTypedDict", "FilesAPIRoutesUploadFileMultiPartBodyParams", "FilesAPIRoutesUploadFileMultiPartBodyParamsTypedDict", - "FineTuneableModel", "FinishReason", "Function", "FunctionCall", diff --git a/src/mistralai/models/apiendpoint.py b/src/mistralai/models/apiendpoint.py index 00621eba..a1b42e88 100644 --- a/src/mistralai/models/apiendpoint.py +++ b/src/mistralai/models/apiendpoint.py @@ -1,9 +1,17 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from __future__ import annotations -from typing import Literal +from mistralai.types import UnrecognizedStr +from typing import Literal, Union -APIEndpoint = Literal[ - "/v1/chat/completions", "/v1/embeddings", "/v1/fim/completions", "/v1/moderations" +APIEndpoint = Union[ + Literal[ + "/v1/chat/completions", + "/v1/embeddings", + "/v1/fim/completions", + "/v1/moderations", + "/v1/chat/moderations", + ], + UnrecognizedStr, ] diff --git a/src/mistralai/models/batchjobin.py b/src/mistralai/models/batchjobin.py index 20f054b8..e249e526 100644 --- a/src/mistralai/models/batchjobin.py +++ b/src/mistralai/models/batchjobin.py @@ -3,9 +3,11 @@ from __future__ import annotations from .apiendpoint import APIEndpoint from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +from mistralai.utils import validate_open_enum from pydantic import model_serializer +from pydantic.functional_validators import PlainValidator from typing import Dict, List, Optional -from typing_extensions import NotRequired, TypedDict +from typing_extensions import Annotated, NotRequired, TypedDict class BatchJobInTypedDict(TypedDict): @@ -19,7 +21,7 @@ class BatchJobInTypedDict(TypedDict): class BatchJobIn(BaseModel): input_files: List[str] - endpoint: APIEndpoint + endpoint: Annotated[APIEndpoint, PlainValidator(validate_open_enum(False))] model: str diff --git a/src/mistralai/models/detailedjobout.py b/src/mistralai/models/detailedjobout.py index a4be707d..b2a1c8d9 100644 --- a/src/mistralai/models/detailedjobout.py +++ b/src/mistralai/models/detailedjobout.py @@ -3,7 +3,6 @@ from __future__ import annotations from .checkpointout import CheckpointOut, CheckpointOutTypedDict from .eventout import EventOut, EventOutTypedDict -from .finetuneablemodel import FineTuneableModel from .githubrepositoryout import GithubRepositoryOut, GithubRepositoryOutTypedDict from .jobmetadataout import JobMetadataOut, JobMetadataOutTypedDict from .trainingparameters import TrainingParameters, TrainingParametersTypedDict @@ -48,7 +47,7 @@ class DetailedJobOutTypedDict(TypedDict): id: str auto_start: bool hyperparameters: TrainingParametersTypedDict - model: FineTuneableModel + model: str r"""The name of the model to fine-tune.""" status: DetailedJobOutStatus job_type: str @@ -75,7 +74,7 @@ class DetailedJobOut(BaseModel): hyperparameters: TrainingParameters - model: FineTuneableModel + model: str r"""The name of the model to fine-tune.""" status: DetailedJobOutStatus diff --git a/src/mistralai/models/files_api_routes_get_signed_urlop.py b/src/mistralai/models/files_api_routes_get_signed_urlop.py new file mode 100644 index 00000000..708d40ab --- /dev/null +++ b/src/mistralai/models/files_api_routes_get_signed_urlop.py @@ -0,0 +1,25 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.types import BaseModel +from mistralai.utils import FieldMetadata, PathParamMetadata, QueryParamMetadata +from typing import Optional +from typing_extensions import Annotated, NotRequired, TypedDict + + +class FilesAPIRoutesGetSignedURLRequestTypedDict(TypedDict): + file_id: str + expiry: NotRequired[int] + r"""Number of hours before the url becomes invalid. Defaults to 24h""" + + +class FilesAPIRoutesGetSignedURLRequest(BaseModel): + file_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] + + expiry: Annotated[ + Optional[int], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = 24 + r"""Number of hours before the url becomes invalid. Defaults to 24h""" diff --git a/src/mistralai/models/filesignedurl.py b/src/mistralai/models/filesignedurl.py new file mode 100644 index 00000000..092be7f8 --- /dev/null +++ b/src/mistralai/models/filesignedurl.py @@ -0,0 +1,13 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.types import BaseModel +from typing_extensions import TypedDict + + +class FileSignedURLTypedDict(TypedDict): + url: str + + +class FileSignedURL(BaseModel): + url: str diff --git a/src/mistralai/models/finetuneablemodel.py b/src/mistralai/models/finetuneablemodel.py deleted file mode 100644 index 947991c2..00000000 --- a/src/mistralai/models/finetuneablemodel.py +++ /dev/null @@ -1,14 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from typing import Literal - - -FineTuneableModel = Literal[ - "open-mistral-7b", - "mistral-small-latest", - "codestral-latest", - "mistral-large-latest", - "open-mistral-nemo", -] -r"""The name of the model to fine-tune.""" diff --git a/src/mistralai/models/imageurlchunk.py b/src/mistralai/models/imageurlchunk.py index 1c37fe3b..f18c63a1 100644 --- a/src/mistralai/models/imageurlchunk.py +++ b/src/mistralai/models/imageurlchunk.py @@ -3,26 +3,24 @@ from __future__ import annotations from .imageurl import ImageURL, ImageURLTypedDict from mistralai.types import BaseModel -from mistralai.utils import validate_const -import pydantic -from pydantic.functional_validators import AfterValidator from typing import Literal, Optional, Union -from typing_extensions import Annotated, TypedDict +from typing_extensions import NotRequired, TypedDict -ImageURLChunkType = Literal["image_url"] - ImageURLChunkImageURLTypedDict = Union[ImageURLTypedDict, str] ImageURLChunkImageURL = Union[ImageURL, str] +ImageURLChunkType = Literal["image_url"] + + class ImageURLChunkTypedDict(TypedDict): r"""{\"type\":\"image_url\",\"image_url\":{\"url\":\"data:image/png;base64,iVBORw0""" image_url: ImageURLChunkImageURLTypedDict - type: ImageURLChunkType + type: NotRequired[ImageURLChunkType] class ImageURLChunk(BaseModel): @@ -30,9 +28,4 @@ class ImageURLChunk(BaseModel): image_url: ImageURLChunkImageURL - TYPE: Annotated[ - Annotated[ - Optional[ImageURLChunkType], AfterValidator(validate_const("image_url")) - ], - pydantic.Field(alias="type"), - ] = "image_url" + type: Optional[ImageURLChunkType] = "image_url" diff --git a/src/mistralai/models/jobin.py b/src/mistralai/models/jobin.py index a294d292..0ef66da3 100644 --- a/src/mistralai/models/jobin.py +++ b/src/mistralai/models/jobin.py @@ -1,7 +1,6 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from __future__ import annotations -from .finetuneablemodel import FineTuneableModel from .githubrepositoryin import GithubRepositoryIn, GithubRepositoryInTypedDict from .trainingfile import TrainingFile, TrainingFileTypedDict from .trainingparametersin import TrainingParametersIn, TrainingParametersInTypedDict @@ -25,7 +24,7 @@ class JobInTypedDict(TypedDict): - model: FineTuneableModel + model: str r"""The name of the model to fine-tune.""" hyperparameters: TrainingParametersInTypedDict r"""The fine-tuning hyperparameter settings used in a fine-tune job.""" @@ -42,7 +41,7 @@ class JobInTypedDict(TypedDict): class JobIn(BaseModel): - model: FineTuneableModel + model: str r"""The name of the model to fine-tune.""" hyperparameters: TrainingParametersIn diff --git a/src/mistralai/models/jobout.py b/src/mistralai/models/jobout.py index 71edce01..c3ffb248 100644 --- a/src/mistralai/models/jobout.py +++ b/src/mistralai/models/jobout.py @@ -1,7 +1,6 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from __future__ import annotations -from .finetuneablemodel import FineTuneableModel from .githubrepositoryout import GithubRepositoryOut, GithubRepositoryOutTypedDict from .jobmetadataout import JobMetadataOut, JobMetadataOutTypedDict from .trainingparameters import TrainingParameters, TrainingParametersTypedDict @@ -49,7 +48,7 @@ class JobOutTypedDict(TypedDict): r"""The ID of the job.""" auto_start: bool hyperparameters: TrainingParametersTypedDict - model: FineTuneableModel + model: str r"""The name of the model to fine-tune.""" status: Status r"""The current status of the fine-tuning job.""" @@ -85,7 +84,7 @@ class JobOut(BaseModel): hyperparameters: TrainingParameters - model: FineTuneableModel + model: str r"""The name of the model to fine-tune.""" status: Status diff --git a/src/mistralai/models/referencechunk.py b/src/mistralai/models/referencechunk.py index 33acdb35..4a5503f2 100644 --- a/src/mistralai/models/referencechunk.py +++ b/src/mistralai/models/referencechunk.py @@ -2,11 +2,8 @@ from __future__ import annotations from mistralai.types import BaseModel -from mistralai.utils import validate_const -import pydantic -from pydantic.functional_validators import AfterValidator from typing import List, Literal, Optional -from typing_extensions import Annotated, TypedDict +from typing_extensions import NotRequired, TypedDict ReferenceChunkType = Literal["reference"] @@ -14,15 +11,10 @@ class ReferenceChunkTypedDict(TypedDict): reference_ids: List[int] - type: ReferenceChunkType + type: NotRequired[ReferenceChunkType] class ReferenceChunk(BaseModel): reference_ids: List[int] - TYPE: Annotated[ - Annotated[ - Optional[ReferenceChunkType], AfterValidator(validate_const("reference")) - ], - pydantic.Field(alias="type"), - ] = "reference" + type: Optional[ReferenceChunkType] = "reference" diff --git a/src/mistralai/models/textchunk.py b/src/mistralai/models/textchunk.py index 130a91c5..02b115f6 100644 --- a/src/mistralai/models/textchunk.py +++ b/src/mistralai/models/textchunk.py @@ -2,11 +2,8 @@ from __future__ import annotations from mistralai.types import BaseModel -from mistralai.utils import validate_const -import pydantic -from pydantic.functional_validators import AfterValidator from typing import Literal, Optional -from typing_extensions import Annotated, TypedDict +from typing_extensions import NotRequired, TypedDict TextChunkType = Literal["text"] @@ -14,13 +11,10 @@ class TextChunkTypedDict(TypedDict): text: str - type: TextChunkType + type: NotRequired[TextChunkType] class TextChunk(BaseModel): text: str - TYPE: Annotated[ - Annotated[Optional[TextChunkType], AfterValidator(validate_const("text"))], - pydantic.Field(alias="type"), - ] = "text" + type: Optional[TextChunkType] = "text" diff --git a/src/mistralai/sdkconfiguration.py b/src/mistralai/sdkconfiguration.py index f4351c2e..9030b1be 100644 --- a/src/mistralai/sdkconfiguration.py +++ b/src/mistralai/sdkconfiguration.py @@ -28,9 +28,9 @@ class SDKConfiguration: server: Optional[str] = "" language: str = "python" openapi_doc_version: str = "0.0.2" - sdk_version: str = "1.2.3" + sdk_version: str = "1.2.4" gen_version: str = "2.460.1" - user_agent: str = "speakeasy-sdk/python 1.2.3 2.460.1 0.0.2 mistralai" + user_agent: str = "speakeasy-sdk/python 1.2.4 2.460.1 0.0.2 mistralai" retry_config: OptionalNullable[RetryConfig] = Field(default_factory=lambda: UNSET) timeout_ms: Optional[int] = None From 6eb43e1237841b45447ed220f72579f146ade33e Mon Sep 17 00:00:00 2001 From: gaspardBT Date: Mon, 2 Dec 2024 18:04:10 +0100 Subject: [PATCH 086/223] fix gcp model parsing --- packages/mistralai_gcp/src/mistralai_gcp/sdk.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/packages/mistralai_gcp/src/mistralai_gcp/sdk.py b/packages/mistralai_gcp/src/mistralai_gcp/sdk.py index 7e7adbdc..d2b41dcf 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/sdk.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/sdk.py @@ -26,11 +26,12 @@ "mistral-nemo-2407": "mistral-nemo@2407", } -def get_model_info(model: str) -> Tuple[str,str]: +def get_model_info(model: str) -> Tuple[str, str]: # if the model requiers the legacy fomat, use it, else do nothing. - model_id = LEGACY_MODEL_ID_FORMAT.get(model, model) - model = "-".join(model.split("-")[:-1]) - return model, model_id + if model in LEGACY_MODEL_ID_FORMAT: + return "-".join(model.split("-")[:-1]), LEGACY_MODEL_ID_FORMAT[model] + else: + return model, model From 2282e4aa2f4b83e9ca1f727e90a9ca12b1a7f383 Mon Sep 17 00:00:00 2001 From: alex-ac Date: Wed, 4 Dec 2024 14:10:32 +0100 Subject: [PATCH 087/223] regent main sdk - new speakeasy version --- .speakeasy/gen.lock | 12 +- .speakeasy/workflow.lock | 5 +- README.md | 352 +++++++++--------- USAGE.md | 172 ++++----- docs/sdks/agents/README.md | 55 ++- docs/sdks/batch/README.md | 6 +- docs/sdks/chat/README.md | 55 ++- docs/sdks/classifiers/README.md | 52 ++- docs/sdks/embeddings/README.md | 21 +- docs/sdks/files/README.md | 84 ++--- docs/sdks/fim/README.md | 31 +- docs/sdks/finetuning/README.md | 6 +- docs/sdks/jobs/README.md | 65 ++-- docs/sdks/mistral/README.md | 4 +- docs/sdks/mistraljobs/README.md | 56 ++- docs/sdks/models/README.md | 78 ++-- src/mistralai/agents.py | 10 +- src/mistralai/chat.py | 10 +- src/mistralai/fim.py | 10 +- src/mistralai/httpclient.py | 6 + .../models/agentscompletionrequest.py | 34 +- .../models/agentscompletionstreamrequest.py | 36 +- src/mistralai/models/assistantmessage.py | 10 +- .../models/chatclassificationrequest.py | 43 ++- src/mistralai/models/chatcompletionrequest.py | 30 +- .../models/chatcompletionstreamrequest.py | 36 +- src/mistralai/models/classificationrequest.py | 10 +- src/mistralai/models/contentchunk.py | 9 +- src/mistralai/models/deltamessage.py | 8 +- src/mistralai/models/embeddingrequest.py | 6 +- src/mistralai/models/fimcompletionrequest.py | 10 +- .../models/fimcompletionstreamrequest.py | 10 +- src/mistralai/models/functioncall.py | 6 +- src/mistralai/models/imageurlchunk.py | 8 +- ...es_fine_tuning_create_fine_tuning_jobop.py | 13 +- src/mistralai/models/modellist.py | 6 +- ...retrieve_model_v1_models_model_id_getop.py | 9 +- src/mistralai/models/systemmessage.py | 10 +- src/mistralai/models/toolmessage.py | 8 +- src/mistralai/models/usermessage.py | 8 +- src/mistralai/models/validationerror.py | 6 +- src/mistralai/sdk.py | 14 + src/mistralai/sdkconfiguration.py | 4 +- src/mistralai/utils/annotations.py | 59 ++- src/mistralai/utils/eventstreaming.py | 62 ++- 45 files changed, 861 insertions(+), 684 deletions(-) diff --git a/.speakeasy/gen.lock b/.speakeasy/gen.lock index 7ab50a79..e2422227 100644 --- a/.speakeasy/gen.lock +++ b/.speakeasy/gen.lock @@ -3,8 +3,8 @@ id: 2d045ec7-2ebb-4f4d-ad25-40953b132161 management: docChecksum: 36ad3563d9d2b3af47015100d060570b docVersion: 0.0.2 - speakeasyVersion: 1.440.1 - generationVersion: 2.460.1 + speakeasyVersion: 1.451.1 + generationVersion: 2.470.1 releaseVersion: 1.2.4 configChecksum: 4fe789bac842073beb4e2d9c6c3f833d repoURL: https://github.com/mistralai/client-python.git @@ -14,7 +14,7 @@ features: python: additionalDependencies: 1.0.0 constsAndDefaults: 1.0.5 - core: 5.6.5 + core: 5.6.8 defaultEnabledRetries: 0.2.0 downloadStreams: 1.0.1 enumUnions: 0.1.0 @@ -34,11 +34,10 @@ features: responseFormat: 1.0.1 retries: 3.0.2 sdkHooks: 1.0.0 - serverEvents: 1.0.4 + serverEvents: 1.0.7 serverEventsSentinels: 0.1.0 serverIDs: 3.0.0 - tests: 1.6.0 - unions: 3.0.3 + unions: 3.0.4 uploadStreams: 1.0.0 generatedFiles: - .gitattributes @@ -583,3 +582,4 @@ examples: responses: "200": application/json: {"url": "https://scornful-daughter.com/"} +generatedTests: {} diff --git a/.speakeasy/workflow.lock b/.speakeasy/workflow.lock index 07f3f6b8..770090a2 100644 --- a/.speakeasy/workflow.lock +++ b/.speakeasy/workflow.lock @@ -1,4 +1,4 @@ -speakeasyVersion: 1.440.1 +speakeasyVersion: 1.451.1 sources: mistral-azure-source: sourceNamespace: mistral-azure-source @@ -18,7 +18,6 @@ sources: sourceBlobDigest: sha256:5de08a038994ec94c0889341d434b598f541459d114f9935deb9ef3b3af90c5f tags: - latest - - speakeasy-sdk-regen-1733149559 targets: mistralai-azure-sdk: source: mistral-azure-source @@ -40,7 +39,7 @@ targets: sourceRevisionDigest: sha256:f74c08bdc7ae39f5fe2394df8f31ae623ece30a7f65019ab6b7bcea352953f05 sourceBlobDigest: sha256:5de08a038994ec94c0889341d434b598f541459d114f9935deb9ef3b3af90c5f codeSamplesNamespace: mistral-openapi-code-samples - codeSamplesRevisionDigest: sha256:800804bcf76f579fd76510126828c03be89bb4964a01da87f376148f86cc88dc + codeSamplesRevisionDigest: sha256:09212fda8fc13e0f486f157495d028138bc9babedfba6dd85f7024575f30fd0e workflow: workflowVersion: 1.0.0 speakeasyVersion: latest diff --git a/README.md b/README.md index 4a1fc830..7a886085 100644 --- a/README.md +++ b/README.md @@ -27,19 +27,26 @@ Mistral AI API: Our Chat Completion and Embeddings APIs specification. Create yo ## Table of Contents + +* [Mistral Python Client](#mistral-python-client) + * [Migration warning](#migration-warning) + * [API Key Setup](#api-key-setup) + * [SDK Installation](#sdk-installation) + * [SDK Example Usage](#sdk-example-usage) + * [Providers' SDKs Example Usage](#providers-sdks-example-usage) + * [Available Resources and Operations](#available-resources-and-operations) + * [Server-sent event streaming](#server-sent-event-streaming) + * [File uploads](#file-uploads) + * [Retries](#retries) + * [Error Handling](#error-handling) + * [Server Selection](#server-selection) + * [Custom HTTP Client](#custom-http-client) + * [Authentication](#authentication) + * [Debugging](#debugging) + * [IDE Support](#ide-support) +* [Development](#development) + * [Contributions](#contributions) -* [SDK Installation](#sdk-installation) -* [IDE Support](#ide-support) -* [SDK Example Usage](#sdk-example-usage) -* [Available Resources and Operations](#available-resources-and-operations) -* [Server-sent event streaming](#server-sent-event-streaming) -* [File uploads](#file-uploads) -* [Retries](#retries) -* [Error Handling](#error-handling) -* [Server Selection](#server-selection) -* [Custom HTTP Client](#custom-http-client) -* [Authentication](#authentication) -* [Debugging](#debugging) @@ -76,20 +83,19 @@ This example shows how to create chat completions. from mistralai import Mistral import os -s = Mistral( +with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), -) - -res = s.chat.complete(model="mistral-small-latest", messages=[ - { - "content": "Who is the best French painter? Answer in one short sentence.", - "role": "user", - }, -]) +) as s: + res = s.chat.complete(model="mistral-small-latest", messages=[ + { + "content": "Who is the best French painter? Answer in one short sentence.", + "role": "user", + }, + ]) -if res is not None: - # handle response - pass + if res is not None: + # handle response + pass ```
@@ -102,18 +108,19 @@ from mistralai import Mistral import os async def main(): - s = Mistral( + async with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), - ) - res = await s.chat.complete_async(model="mistral-small-latest", messages=[ - { - "content": "Who is the best French painter? Answer in one short sentence.", - "role": "user", - }, - ]) - if res is not None: - # handle response - pass + ) as s: + res = await s.chat.complete_async(model="mistral-small-latest", messages=[ + { + "content": "Who is the best French painter? Answer in one short sentence.", + "role": "user", + }, + ]) + + if res is not None: + # handle response + pass asyncio.run(main()) ``` @@ -127,18 +134,17 @@ This example shows how to upload a file. from mistralai import Mistral import os -s = Mistral( +with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), -) - -res = s.files.upload(file={ - "file_name": "example.file", - "content": open("example.file", "rb"), -}) +) as s: + res = s.files.upload(file={ + "file_name": "example.file", + "content": open("example.file", "rb"), + }) -if res is not None: - # handle response - pass + if res is not None: + # handle response + pass ```
@@ -151,16 +157,17 @@ from mistralai import Mistral import os async def main(): - s = Mistral( + async with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), - ) - res = await s.files.upload_async(file={ - "file_name": "example.file", - "content": open("example.file", "rb"), - }) - if res is not None: - # handle response - pass + ) as s: + res = await s.files.upload_async(file={ + "file_name": "example.file", + "content": open("example.file", "rb"), + }) + + if res is not None: + # handle response + pass asyncio.run(main()) ``` @@ -174,20 +181,19 @@ This example shows how to create agents completions. from mistralai import Mistral import os -s = Mistral( +with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), -) - -res = s.agents.complete(messages=[ - { - "content": "Who is the best French painter? Answer in one short sentence.", - "role": "user", - }, -], agent_id="") +) as s: + res = s.agents.complete(messages=[ + { + "content": "Who is the best French painter? Answer in one short sentence.", + "role": "user", + }, + ], agent_id="") -if res is not None: - # handle response - pass + if res is not None: + # handle response + pass ```
@@ -200,18 +206,19 @@ from mistralai import Mistral import os async def main(): - s = Mistral( + async with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), - ) - res = await s.agents.complete_async(messages=[ - { - "content": "Who is the best French painter? Answer in one short sentence.", - "role": "user", - }, - ], agent_id="") - if res is not None: - # handle response - pass + ) as s: + res = await s.agents.complete_async(messages=[ + { + "content": "Who is the best French painter? Answer in one short sentence.", + "role": "user", + }, + ], agent_id="") + + if res is not None: + # handle response + pass asyncio.run(main()) ``` @@ -225,18 +232,17 @@ This example shows how to create embedding request. from mistralai import Mistral import os -s = Mistral( +with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), -) - -res = s.embeddings.create(inputs=[ - "Embed this sentence.", - "As well as this one.", -], model="Wrangler") +) as s: + res = s.embeddings.create(inputs=[ + "Embed this sentence.", + "As well as this one.", + ], model="Wrangler") -if res is not None: - # handle response - pass + if res is not None: + # handle response + pass ```
@@ -249,16 +255,17 @@ from mistralai import Mistral import os async def main(): - s = Mistral( + async with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), - ) - res = await s.embeddings.create_async(inputs=[ - "Embed this sentence.", - "As well as this one.", - ], model="Wrangler") - if res is not None: - # handle response - pass + ) as s: + res = await s.embeddings.create_async(inputs=[ + "Embed this sentence.", + "As well as this one.", + ], model="Wrangler") + + if res is not None: + # handle response + pass asyncio.run(main()) ``` @@ -439,32 +446,36 @@ The documentation for the GCP SDK is available [here](packages/mistralai_gcp/REA operations. These operations will expose the stream as [Generator][generator] that can be consumed using a simple `for` loop. The loop will terminate when the server no longer has any events to send and closes the -underlying connection. +underlying connection. + +The stream is also a [Context Manager][context-manager] and can be used with the `with` statement and will close the +underlying connection when the context is exited. ```python from mistralai import Mistral import os -s = Mistral( +with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), -) - -res = s.chat.stream(model="mistral-small-latest", messages=[ - { - "content": "Who is the best French painter? Answer in one short sentence.", - "role": "user", - }, -]) +) as s: + res = s.chat.stream(model="mistral-small-latest", messages=[ + { + "content": "Who is the best French painter? Answer in one short sentence.", + "role": "user", + }, + ]) -if res is not None: - for event in res: - # handle event - print(event, flush=True) + if res is not None: + with res as event_stream: + for event in event_stream: + # handle event + print(event, flush=True) ``` [mdn-sse]: https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events -[generator]: https://wiki.python.org/moin/Generators +[generator]: https://book.pythontips.com/en/latest/generators.html +[context-manager]: https://book.pythontips.com/en/latest/context_managers.html @@ -481,18 +492,17 @@ Certain SDK methods accept file objects as part of a request body or multi-part from mistralai import Mistral import os -s = Mistral( +with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), -) - -res = s.files.upload(file={ - "file_name": "example.file", - "content": open("example.file", "rb"), -}) +) as s: + res = s.files.upload(file={ + "file_name": "example.file", + "content": open("example.file", "rb"), + }) -if res is not None: - # handle response - pass + if res is not None: + # handle response + pass ``` @@ -508,16 +518,15 @@ from mistral.utils import BackoffStrategy, RetryConfig from mistralai import Mistral import os -s = Mistral( +with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), -) +) as s: + res = s.models.list(, + RetryConfig("backoff", BackoffStrategy(1, 50, 1.1, 100), False)) -res = s.models.list(, - RetryConfig("backoff", BackoffStrategy(1, 50, 1.1, 100), False)) - -if res is not None: - # handle response - pass + if res is not None: + # handle response + pass ``` @@ -527,16 +536,15 @@ from mistral.utils import BackoffStrategy, RetryConfig from mistralai import Mistral import os -s = Mistral( +with Mistral( retry_config=RetryConfig("backoff", BackoffStrategy(1, 50, 1.1, 100), False), api_key=os.getenv("MISTRAL_API_KEY", ""), -) - -res = s.models.list() +) as s: + res = s.models.list() -if res is not None: - # handle response - pass + if res is not None: + # handle response + pass ``` @@ -568,24 +576,23 @@ When custom error responses are specified for an operation, the SDK may also rai from mistralai import Mistral, models import os -s = Mistral( +with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), -) - -res = None -try: - res = s.models.list() - - if res is not None: - # handle response - pass - -except models.HTTPValidationError as e: - # handle e.data: models.HTTPValidationErrorData - raise(e) -except models.SDKError as e: - # handle exception - raise(e) +) as s: + res = None + try: + res = s.models.list() + + if res is not None: + # handle response + pass + + except models.HTTPValidationError as e: + # handle e.data: models.HTTPValidationErrorData + raise(e) + except models.SDKError as e: + # handle exception + raise(e) ``` @@ -606,16 +613,15 @@ You can override the default server globally by passing a server name to the `se from mistralai import Mistral import os -s = Mistral( +with Mistral( server="eu", api_key=os.getenv("MISTRAL_API_KEY", ""), -) - -res = s.models.list() +) as s: + res = s.models.list() -if res is not None: - # handle response - pass + if res is not None: + # handle response + pass ``` @@ -626,16 +632,15 @@ The default server can also be overridden globally by passing a URL to the `serv from mistralai import Mistral import os -s = Mistral( +with Mistral( server_url="https://api.mistral.ai", api_key=os.getenv("MISTRAL_API_KEY", ""), -) - -res = s.models.list() +) as s: + res = s.models.list() -if res is not None: - # handle response - pass + if res is not None: + # handle response + pass ``` @@ -737,15 +742,14 @@ To authenticate with the API the `api_key` parameter must be set when initializi from mistralai import Mistral import os -s = Mistral( +with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), -) - -res = s.models.list() +) as s: + res = s.models.list() -if res is not None: - # handle response - pass + if res is not None: + # handle response + pass ``` diff --git a/USAGE.md b/USAGE.md index 7d9d2ce2..e523aa92 100644 --- a/USAGE.md +++ b/USAGE.md @@ -8,20 +8,19 @@ This example shows how to create chat completions. from mistralai import Mistral import os -s = Mistral( +with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), -) - -res = s.chat.complete(model="mistral-small-latest", messages=[ - { - "content": "Who is the best French painter? Answer in one short sentence.", - "role": "user", - }, -]) - -if res is not None: - # handle response - pass +) as s: + res = s.chat.complete(model="mistral-small-latest", messages=[ + { + "content": "Who is the best French painter? Answer in one short sentence.", + "role": "user", + }, + ]) + + if res is not None: + # handle response + pass ```
@@ -34,18 +33,19 @@ from mistralai import Mistral import os async def main(): - s = Mistral( + async with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), - ) - res = await s.chat.complete_async(model="mistral-small-latest", messages=[ - { - "content": "Who is the best French painter? Answer in one short sentence.", - "role": "user", - }, - ]) - if res is not None: - # handle response - pass + ) as s: + res = await s.chat.complete_async(model="mistral-small-latest", messages=[ + { + "content": "Who is the best French painter? Answer in one short sentence.", + "role": "user", + }, + ]) + + if res is not None: + # handle response + pass asyncio.run(main()) ``` @@ -59,18 +59,17 @@ This example shows how to upload a file. from mistralai import Mistral import os -s = Mistral( +with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), -) - -res = s.files.upload(file={ - "file_name": "example.file", - "content": open("example.file", "rb"), -}) +) as s: + res = s.files.upload(file={ + "file_name": "example.file", + "content": open("example.file", "rb"), + }) -if res is not None: - # handle response - pass + if res is not None: + # handle response + pass ```
@@ -83,16 +82,17 @@ from mistralai import Mistral import os async def main(): - s = Mistral( + async with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), - ) - res = await s.files.upload_async(file={ - "file_name": "example.file", - "content": open("example.file", "rb"), - }) - if res is not None: - # handle response - pass + ) as s: + res = await s.files.upload_async(file={ + "file_name": "example.file", + "content": open("example.file", "rb"), + }) + + if res is not None: + # handle response + pass asyncio.run(main()) ``` @@ -106,20 +106,19 @@ This example shows how to create agents completions. from mistralai import Mistral import os -s = Mistral( +with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), -) - -res = s.agents.complete(messages=[ - { - "content": "Who is the best French painter? Answer in one short sentence.", - "role": "user", - }, -], agent_id="") - -if res is not None: - # handle response - pass +) as s: + res = s.agents.complete(messages=[ + { + "content": "Who is the best French painter? Answer in one short sentence.", + "role": "user", + }, + ], agent_id="") + + if res is not None: + # handle response + pass ```
@@ -132,18 +131,19 @@ from mistralai import Mistral import os async def main(): - s = Mistral( + async with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), - ) - res = await s.agents.complete_async(messages=[ - { - "content": "Who is the best French painter? Answer in one short sentence.", - "role": "user", - }, - ], agent_id="") - if res is not None: - # handle response - pass + ) as s: + res = await s.agents.complete_async(messages=[ + { + "content": "Who is the best French painter? Answer in one short sentence.", + "role": "user", + }, + ], agent_id="") + + if res is not None: + # handle response + pass asyncio.run(main()) ``` @@ -157,18 +157,17 @@ This example shows how to create embedding request. from mistralai import Mistral import os -s = Mistral( +with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), -) - -res = s.embeddings.create(inputs=[ - "Embed this sentence.", - "As well as this one.", -], model="Wrangler") +) as s: + res = s.embeddings.create(inputs=[ + "Embed this sentence.", + "As well as this one.", + ], model="Wrangler") -if res is not None: - # handle response - pass + if res is not None: + # handle response + pass ```
@@ -181,16 +180,17 @@ from mistralai import Mistral import os async def main(): - s = Mistral( + async with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), - ) - res = await s.embeddings.create_async(inputs=[ - "Embed this sentence.", - "As well as this one.", - ], model="Wrangler") - if res is not None: - # handle response - pass + ) as s: + res = await s.embeddings.create_async(inputs=[ + "Embed this sentence.", + "As well as this one.", + ], model="Wrangler") + + if res is not None: + # handle response + pass asyncio.run(main()) ``` diff --git a/docs/sdks/agents/README.md b/docs/sdks/agents/README.md index 3eb946a8..792b796d 100644 --- a/docs/sdks/agents/README.md +++ b/docs/sdks/agents/README.md @@ -20,20 +20,19 @@ Agents Completion from mistralai import Mistral import os -s = Mistral( +with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), -) - -res = s.agents.complete(messages=[ - { - "content": "Who is the best French painter? Answer in one short sentence.", - "role": "user", - }, -], agent_id="") - -if res is not None: - # handle response - pass +) as s: + res = s.agents.complete(messages=[ + { + "content": "Who is the best French painter? Answer in one short sentence.", + "role": "user", + }, + ], agent_id="") + + if res is not None: + # handle response + pass ``` @@ -76,21 +75,21 @@ Mistral AI provides the ability to stream responses back to a client in order to from mistralai import Mistral import os -s = Mistral( +with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), -) - -res = s.agents.stream(messages=[ - { - "content": "Who is the best French painter? Answer in one short sentence.", - "role": "user", - }, -], agent_id="") - -if res is not None: - for event in res: - # handle event - print(event, flush=True) +) as s: + res = s.agents.stream(messages=[ + { + "content": "Who is the best French painter? Answer in one short sentence.", + "role": "user", + }, + ], agent_id="") + + if res is not None: + with res as event_stream: + for event in event_stream: + # handle event + print(event, flush=True) ``` @@ -114,7 +113,7 @@ if res is not None: ### Response -**[Union[Generator[models.CompletionEvent, None, None], AsyncGenerator[models.CompletionEvent, None]]](../../models/.md)** +**[Union[eventstreaming.EventStream[models.CompletionEvent], eventstreaming.EventStreamAsync[models.CompletionEvent]]](../../models/.md)** ### Errors diff --git a/docs/sdks/batch/README.md b/docs/sdks/batch/README.md index 55a9c135..ec7d8340 100644 --- a/docs/sdks/batch/README.md +++ b/docs/sdks/batch/README.md @@ -1,2 +1,6 @@ # Batch -(*batch*) \ No newline at end of file +(*batch*) + +## Overview + +### Available Operations diff --git a/docs/sdks/chat/README.md b/docs/sdks/chat/README.md index d6f4a768..6e00d3d2 100644 --- a/docs/sdks/chat/README.md +++ b/docs/sdks/chat/README.md @@ -20,20 +20,19 @@ Chat Completion from mistralai import Mistral import os -s = Mistral( +with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), -) - -res = s.chat.complete(model="mistral-small-latest", messages=[ - { - "content": "Who is the best French painter? Answer in one short sentence.", - "role": "user", - }, -]) - -if res is not None: - # handle response - pass +) as s: + res = s.chat.complete(model="mistral-small-latest", messages=[ + { + "content": "Who is the best French painter? Answer in one short sentence.", + "role": "user", + }, + ]) + + if res is not None: + # handle response + pass ``` @@ -79,21 +78,21 @@ Mistral AI provides the ability to stream responses back to a client in order to from mistralai import Mistral import os -s = Mistral( +with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), -) - -res = s.chat.stream(model="mistral-small-latest", messages=[ - { - "content": "Who is the best French painter? Answer in one short sentence.", - "role": "user", - }, -]) - -if res is not None: - for event in res: - # handle event - print(event, flush=True) +) as s: + res = s.chat.stream(model="mistral-small-latest", messages=[ + { + "content": "Who is the best French painter? Answer in one short sentence.", + "role": "user", + }, + ]) + + if res is not None: + with res as event_stream: + for event in event_stream: + # handle event + print(event, flush=True) ``` @@ -120,7 +119,7 @@ if res is not None: ### Response -**[Union[Generator[models.CompletionEvent, None, None], AsyncGenerator[models.CompletionEvent, None]]](../../models/.md)** +**[Union[eventstreaming.EventStream[models.CompletionEvent], eventstreaming.EventStreamAsync[models.CompletionEvent]]](../../models/.md)** ### Errors diff --git a/docs/sdks/classifiers/README.md b/docs/sdks/classifiers/README.md index 05b8b7ca..da90019a 100644 --- a/docs/sdks/classifiers/README.md +++ b/docs/sdks/classifiers/README.md @@ -20,17 +20,16 @@ Moderations from mistralai import Mistral import os -s = Mistral( +with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), -) +) as s: + res = s.classifiers.moderate(inputs=[ + "", + ]) -res = s.classifiers.moderate(inputs=[ - "", -]) - -if res is not None: - # handle response - pass + if res is not None: + # handle response + pass ``` @@ -63,25 +62,24 @@ Moderations Chat from mistralai import Mistral import os -s = Mistral( +with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), -) - -res = s.classifiers.moderate_chat(inputs=[ - [ - { - "content": [ - { - "text": "", - }, - ], - }, - ], -], model="V90") - -if res is not None: - # handle response - pass +) as s: + res = s.classifiers.moderate_chat(inputs=[ + [ + { + "content": [ + { + "text": "", + }, + ], + }, + ], + ], model="V90") + + if res is not None: + # handle response + pass ``` diff --git a/docs/sdks/embeddings/README.md b/docs/sdks/embeddings/README.md index 9f47e703..1f9f1956 100644 --- a/docs/sdks/embeddings/README.md +++ b/docs/sdks/embeddings/README.md @@ -19,18 +19,17 @@ Embeddings from mistralai import Mistral import os -s = Mistral( +with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), -) - -res = s.embeddings.create(inputs=[ - "Embed this sentence.", - "As well as this one.", -], model="Wrangler") - -if res is not None: - # handle response - pass +) as s: + res = s.embeddings.create(inputs=[ + "Embed this sentence.", + "As well as this one.", + ], model="Wrangler") + + if res is not None: + # handle response + pass ``` diff --git a/docs/sdks/files/README.md b/docs/sdks/files/README.md index 886d57ed..ad2e0f09 100644 --- a/docs/sdks/files/README.md +++ b/docs/sdks/files/README.md @@ -28,18 +28,17 @@ Please contact us if you need to increase these storage limits. from mistralai import Mistral import os -s = Mistral( +with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), -) +) as s: + res = s.files.upload(file={ + "file_name": "example.file", + "content": open("example.file", "rb"), + }) -res = s.files.upload(file={ - "file_name": "example.file", - "content": open("example.file", "rb"), -}) - -if res is not None: - # handle response - pass + if res is not None: + # handle response + pass ``` @@ -71,15 +70,14 @@ Returns a list of files that belong to the user's organization. from mistralai import Mistral import os -s = Mistral( +with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), -) - -res = s.files.list() +) as s: + res = s.files.list() -if res is not None: - # handle response - pass + if res is not None: + # handle response + pass ``` @@ -115,15 +113,14 @@ Returns information about a specific file. from mistralai import Mistral import os -s = Mistral( +with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), -) - -res = s.files.retrieve(file_id="") +) as s: + res = s.files.retrieve(file_id="") -if res is not None: - # handle response - pass + if res is not None: + # handle response + pass ``` @@ -154,15 +151,14 @@ Delete a file. from mistralai import Mistral import os -s = Mistral( +with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), -) +) as s: + res = s.files.delete(file_id="") -res = s.files.delete(file_id="") - -if res is not None: - # handle response - pass + if res is not None: + # handle response + pass ``` @@ -193,15 +189,14 @@ Download a file from mistralai import Mistral import os -s = Mistral( +with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), -) - -res = s.files.download(file_id="") +) as s: + res = s.files.download(file_id="") -if res is not None: - # handle response - pass + if res is not None: + # handle response + pass ``` @@ -232,15 +227,14 @@ Get Signed Url from mistralai import Mistral import os -s = Mistral( +with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), -) - -res = s.files.get_signed_url(file_id="") +) as s: + res = s.files.get_signed_url(file_id="") -if res is not None: - # handle response - pass + if res is not None: + # handle response + pass ``` diff --git a/docs/sdks/fim/README.md b/docs/sdks/fim/README.md index d9811521..eed1893e 100644 --- a/docs/sdks/fim/README.md +++ b/docs/sdks/fim/README.md @@ -20,15 +20,14 @@ FIM completion. from mistralai import Mistral import os -s = Mistral( +with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), -) +) as s: + res = s.fim.complete(model="codestral-2405", prompt="def", suffix="return a+b") -res = s.fim.complete(model="codestral-2405", prompt="def", suffix="return a+b") - -if res is not None: - # handle response - pass + if res is not None: + # handle response + pass ``` @@ -69,16 +68,16 @@ Mistral AI provides the ability to stream responses back to a client in order to from mistralai import Mistral import os -s = Mistral( +with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), -) - -res = s.fim.stream(model="codestral-2405", prompt="def", suffix="return a+b") +) as s: + res = s.fim.stream(model="codestral-2405", prompt="def", suffix="return a+b") -if res is not None: - for event in res: - # handle event - print(event, flush=True) + if res is not None: + with res as event_stream: + for event in event_stream: + # handle event + print(event, flush=True) ``` @@ -100,7 +99,7 @@ if res is not None: ### Response -**[Union[Generator[models.CompletionEvent, None, None], AsyncGenerator[models.CompletionEvent, None]]](../../models/.md)** +**[Union[eventstreaming.EventStream[models.CompletionEvent], eventstreaming.EventStreamAsync[models.CompletionEvent]]](../../models/.md)** ### Errors diff --git a/docs/sdks/finetuning/README.md b/docs/sdks/finetuning/README.md index fdcbd62a..3e0f12ce 100644 --- a/docs/sdks/finetuning/README.md +++ b/docs/sdks/finetuning/README.md @@ -1,2 +1,6 @@ # FineTuning -(*fine_tuning*) \ No newline at end of file +(*fine_tuning*) + +## Overview + +### Available Operations diff --git a/docs/sdks/jobs/README.md b/docs/sdks/jobs/README.md index 05b8b428..b4779580 100644 --- a/docs/sdks/jobs/README.md +++ b/docs/sdks/jobs/README.md @@ -21,15 +21,14 @@ Get a list of fine-tuning jobs for your organization and user. from mistralai import Mistral import os -s = Mistral( +with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), -) +) as s: + res = s.fine_tuning.jobs.list() -res = s.fine_tuning.jobs.list() - -if res is not None: - # handle response - pass + if res is not None: + # handle response + pass ``` @@ -68,15 +67,14 @@ Create a new fine-tuning job, it will be queued for processing. from mistralai import Mistral import os -s = Mistral( +with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), -) - -res = s.fine_tuning.jobs.create(model="codestral-latest", hyperparameters={}) +) as s: + res = s.fine_tuning.jobs.create(model="codestral-latest", hyperparameters={}) -if res is not None: - # handle response - pass + if res is not None: + # handle response + pass ``` @@ -114,15 +112,14 @@ Get a fine-tuned job details by its UUID. from mistralai import Mistral import os -s = Mistral( +with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), -) - -res = s.fine_tuning.jobs.get(job_id="b18d8d81-fd7b-4764-a31e-475cb1f36591") +) as s: + res = s.fine_tuning.jobs.get(job_id="b18d8d81-fd7b-4764-a31e-475cb1f36591") -if res is not None: - # handle response - pass + if res is not None: + # handle response + pass ``` @@ -153,15 +150,14 @@ Request the cancellation of a fine tuning job. from mistralai import Mistral import os -s = Mistral( +with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), -) +) as s: + res = s.fine_tuning.jobs.cancel(job_id="03fa7112-315a-4072-a9f2-43f3f1ec962e") -res = s.fine_tuning.jobs.cancel(job_id="03fa7112-315a-4072-a9f2-43f3f1ec962e") - -if res is not None: - # handle response - pass + if res is not None: + # handle response + pass ``` @@ -192,15 +188,14 @@ Request the start of a validated fine tuning job. from mistralai import Mistral import os -s = Mistral( +with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), -) - -res = s.fine_tuning.jobs.start(job_id="0eb0f807-fb9f-4e46-9c13-4e257df6e1ba") +) as s: + res = s.fine_tuning.jobs.start(job_id="0eb0f807-fb9f-4e46-9c13-4e257df6e1ba") -if res is not None: - # handle response - pass + if res is not None: + # handle response + pass ``` diff --git a/docs/sdks/mistral/README.md b/docs/sdks/mistral/README.md index 0189a6c4..4b9573d0 100644 --- a/docs/sdks/mistral/README.md +++ b/docs/sdks/mistral/README.md @@ -2,4 +2,6 @@ ## Overview -Mistral AI API: Our Chat Completion and Embeddings APIs specification. Create your account on [La Plateforme](https://console.mistral.ai) to get access and read the [docs](https://docs.mistral.ai) to learn how to use it. \ No newline at end of file +Mistral AI API: Our Chat Completion and Embeddings APIs specification. Create your account on [La Plateforme](https://console.mistral.ai) to get access and read the [docs](https://docs.mistral.ai) to learn how to use it. + +### Available Operations diff --git a/docs/sdks/mistraljobs/README.md b/docs/sdks/mistraljobs/README.md index 5852c2cb..1880c83e 100644 --- a/docs/sdks/mistraljobs/README.md +++ b/docs/sdks/mistraljobs/README.md @@ -20,15 +20,14 @@ Get a list of batch jobs for your organization and user. from mistralai import Mistral import os -s = Mistral( +with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), -) +) as s: + res = s.batch.jobs.list() -res = s.batch.jobs.list() - -if res is not None: - # handle response - pass + if res is not None: + # handle response + pass ``` @@ -65,17 +64,16 @@ Create a new batch job, it will be queued for processing. from mistralai import Mistral import os -s = Mistral( +with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), -) - -res = s.batch.jobs.create(input_files=[ - "a621cf02-1cd9-4cf5-8403-315211a509a3", -], endpoint="/v1/fim/completions", model="2") +) as s: + res = s.batch.jobs.create(input_files=[ + "a621cf02-1cd9-4cf5-8403-315211a509a3", + ], endpoint="/v1/fim/completions", model="2") -if res is not None: - # handle response - pass + if res is not None: + # handle response + pass ``` @@ -110,15 +108,14 @@ Get a batch job details by its UUID. from mistralai import Mistral import os -s = Mistral( +with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), -) +) as s: + res = s.batch.jobs.get(job_id="b888f774-3e7c-4135-a18c-6b985523c4bc") -res = s.batch.jobs.get(job_id="b888f774-3e7c-4135-a18c-6b985523c4bc") - -if res is not None: - # handle response - pass + if res is not None: + # handle response + pass ``` @@ -149,15 +146,14 @@ Request the cancellation of a batch job. from mistralai import Mistral import os -s = Mistral( +with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), -) - -res = s.batch.jobs.cancel(job_id="0f713502-9233-41c6-9ebd-c570b7edb496") +) as s: + res = s.batch.jobs.cancel(job_id="0f713502-9233-41c6-9ebd-c570b7edb496") -if res is not None: - # handle response - pass + if res is not None: + # handle response + pass ``` diff --git a/docs/sdks/models/README.md b/docs/sdks/models/README.md index 2ad489e0..78884947 100644 --- a/docs/sdks/models/README.md +++ b/docs/sdks/models/README.md @@ -24,15 +24,14 @@ List all models available to the user. from mistralai import Mistral import os -s = Mistral( +with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), -) +) as s: + res = s.models.list() -res = s.models.list() - -if res is not None: - # handle response - pass + if res is not None: + # handle response + pass ``` @@ -63,15 +62,14 @@ Retrieve a model information. from mistralai import Mistral import os -s = Mistral( +with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), -) - -res = s.models.retrieve(model_id="ft:open-mistral-7b:587a6b29:20240514:7e773925") +) as s: + res = s.models.retrieve(model_id="ft:open-mistral-7b:587a6b29:20240514:7e773925") -if res is not None: - # handle response - pass + if res is not None: + # handle response + pass ``` @@ -103,15 +101,14 @@ Delete a fine-tuned model. from mistralai import Mistral import os -s = Mistral( +with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), -) - -res = s.models.delete(model_id="ft:open-mistral-7b:587a6b29:20240514:7e773925") +) as s: + res = s.models.delete(model_id="ft:open-mistral-7b:587a6b29:20240514:7e773925") -if res is not None: - # handle response - pass + if res is not None: + # handle response + pass ``` @@ -143,15 +140,14 @@ Update a model name or description. from mistralai import Mistral import os -s = Mistral( +with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), -) +) as s: + res = s.models.update(model_id="ft:open-mistral-7b:587a6b29:20240514:7e773925") -res = s.models.update(model_id="ft:open-mistral-7b:587a6b29:20240514:7e773925") - -if res is not None: - # handle response - pass + if res is not None: + # handle response + pass ``` @@ -184,15 +180,14 @@ Archive a fine-tuned model. from mistralai import Mistral import os -s = Mistral( +with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), -) - -res = s.models.archive(model_id="ft:open-mistral-7b:587a6b29:20240514:7e773925") +) as s: + res = s.models.archive(model_id="ft:open-mistral-7b:587a6b29:20240514:7e773925") -if res is not None: - # handle response - pass + if res is not None: + # handle response + pass ``` @@ -223,15 +218,14 @@ Un-archive a fine-tuned model. from mistralai import Mistral import os -s = Mistral( +with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), -) - -res = s.models.unarchive(model_id="ft:open-mistral-7b:587a6b29:20240514:7e773925") +) as s: + res = s.models.unarchive(model_id="ft:open-mistral-7b:587a6b29:20240514:7e773925") -if res is not None: - # handle response - pass + if res is not None: + # handle response + pass ``` diff --git a/src/mistralai/agents.py b/src/mistralai/agents.py index a45bcec3..246cab4e 100644 --- a/src/mistralai/agents.py +++ b/src/mistralai/agents.py @@ -5,7 +5,7 @@ from mistralai._hooks import HookContext from mistralai.types import OptionalNullable, UNSET from mistralai.utils import eventstreaming, get_security_from_env -from typing import Any, AsyncGenerator, Generator, List, Optional, Union +from typing import Any, List, Optional, Union class Agents(BaseSDK): @@ -336,7 +336,7 @@ def stream( retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, - ) -> Optional[Generator[models.CompletionEvent, None, None]]: + ) -> Optional[eventstreaming.EventStream[models.CompletionEvent]]: r"""Stream Agents completion Mistral AI provides the ability to stream responses back to a client in order to allow partial results for certain requests. Tokens will be sent as data-only server-sent events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. @@ -428,7 +428,7 @@ def stream( data: Any = None if utils.match_response(http_res, "200", "text/event-stream"): - return eventstreaming.stream_events( + return eventstreaming.EventStream( http_res, lambda raw: utils.unmarshal_json(raw, models.CompletionEvent), sentinel="[DONE]", @@ -487,7 +487,7 @@ async def stream_async( retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, - ) -> Optional[AsyncGenerator[models.CompletionEvent, None]]: + ) -> Optional[eventstreaming.EventStreamAsync[models.CompletionEvent]]: r"""Stream Agents completion Mistral AI provides the ability to stream responses back to a client in order to allow partial results for certain requests. Tokens will be sent as data-only server-sent events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. @@ -579,7 +579,7 @@ async def stream_async( data: Any = None if utils.match_response(http_res, "200", "text/event-stream"): - return eventstreaming.stream_events_async( + return eventstreaming.EventStreamAsync( http_res, lambda raw: utils.unmarshal_json(raw, models.CompletionEvent), sentinel="[DONE]", diff --git a/src/mistralai/chat.py b/src/mistralai/chat.py index 53313ca7..4b7aad3b 100644 --- a/src/mistralai/chat.py +++ b/src/mistralai/chat.py @@ -5,7 +5,7 @@ from mistralai._hooks import HookContext from mistralai.types import Nullable, OptionalNullable, UNSET from mistralai.utils import eventstreaming, get_security_from_env -from typing import Any, AsyncGenerator, Generator, List, Optional, Union +from typing import Any, List, Optional, Union class Chat(BaseSDK): @@ -337,7 +337,7 @@ def stream( retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, - ) -> Optional[Generator[models.CompletionEvent, None, None]]: + ) -> Optional[eventstreaming.EventStream[models.CompletionEvent]]: r"""Stream chat completion Mistral AI provides the ability to stream responses back to a client in order to allow partial results for certain requests. Tokens will be sent as data-only server-sent events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. @@ -435,7 +435,7 @@ def stream( data: Any = None if utils.match_response(http_res, "200", "text/event-stream"): - return eventstreaming.stream_events( + return eventstreaming.EventStream( http_res, lambda raw: utils.unmarshal_json(raw, models.CompletionEvent), sentinel="[DONE]", @@ -497,7 +497,7 @@ async def stream_async( retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, - ) -> Optional[AsyncGenerator[models.CompletionEvent, None]]: + ) -> Optional[eventstreaming.EventStreamAsync[models.CompletionEvent]]: r"""Stream chat completion Mistral AI provides the ability to stream responses back to a client in order to allow partial results for certain requests. Tokens will be sent as data-only server-sent events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. @@ -595,7 +595,7 @@ async def stream_async( data: Any = None if utils.match_response(http_res, "200", "text/event-stream"): - return eventstreaming.stream_events_async( + return eventstreaming.EventStreamAsync( http_res, lambda raw: utils.unmarshal_json(raw, models.CompletionEvent), sentinel="[DONE]", diff --git a/src/mistralai/fim.py b/src/mistralai/fim.py index 8f8c8529..6f036311 100644 --- a/src/mistralai/fim.py +++ b/src/mistralai/fim.py @@ -5,7 +5,7 @@ from mistralai._hooks import HookContext from mistralai.types import Nullable, OptionalNullable, UNSET from mistralai.utils import eventstreaming, get_security_from_env -from typing import Any, AsyncGenerator, Generator, Optional, Union +from typing import Any, Optional, Union class Fim(BaseSDK): @@ -278,7 +278,7 @@ def stream( retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, - ) -> Optional[Generator[models.CompletionEvent, None, None]]: + ) -> Optional[eventstreaming.EventStream[models.CompletionEvent]]: r"""Stream fim completion Mistral AI provides the ability to stream responses back to a client in order to allow partial results for certain requests. Tokens will be sent as data-only server-sent events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. @@ -360,7 +360,7 @@ def stream( data: Any = None if utils.match_response(http_res, "200", "text/event-stream"): - return eventstreaming.stream_events( + return eventstreaming.EventStream( http_res, lambda raw: utils.unmarshal_json(raw, models.CompletionEvent), sentinel="[DONE]", @@ -405,7 +405,7 @@ async def stream_async( retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, - ) -> Optional[AsyncGenerator[models.CompletionEvent, None]]: + ) -> Optional[eventstreaming.EventStreamAsync[models.CompletionEvent]]: r"""Stream fim completion Mistral AI provides the ability to stream responses back to a client in order to allow partial results for certain requests. Tokens will be sent as data-only server-sent events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. @@ -487,7 +487,7 @@ async def stream_async( data: Any = None if utils.match_response(http_res, "200", "text/event-stream"): - return eventstreaming.stream_events_async( + return eventstreaming.EventStreamAsync( http_res, lambda raw: utils.unmarshal_json(raw, models.CompletionEvent), sentinel="[DONE]", diff --git a/src/mistralai/httpclient.py b/src/mistralai/httpclient.py index 36b642a0..167cea4e 100644 --- a/src/mistralai/httpclient.py +++ b/src/mistralai/httpclient.py @@ -41,6 +41,9 @@ def build_request( ) -> httpx.Request: pass + def close(self) -> None: + pass + @runtime_checkable class AsyncHttpClient(Protocol): @@ -76,3 +79,6 @@ def build_request( extensions: Optional[httpx._types.RequestExtensions] = None, ) -> httpx.Request: pass + + async def aclose(self) -> None: + pass diff --git a/src/mistralai/models/agentscompletionrequest.py b/src/mistralai/models/agentscompletionrequest.py index bce326a5..5f53dddb 100644 --- a/src/mistralai/models/agentscompletionrequest.py +++ b/src/mistralai/models/agentscompletionrequest.py @@ -13,23 +13,30 @@ from mistralai.utils import get_discriminator from pydantic import Discriminator, Tag, model_serializer from typing import List, Optional, Union -from typing_extensions import Annotated, NotRequired, TypedDict +from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict -AgentsCompletionRequestStopTypedDict = Union[str, List[str]] +AgentsCompletionRequestStopTypedDict = TypeAliasType( + "AgentsCompletionRequestStopTypedDict", Union[str, List[str]] +) r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" -AgentsCompletionRequestStop = Union[str, List[str]] +AgentsCompletionRequestStop = TypeAliasType( + "AgentsCompletionRequestStop", Union[str, List[str]] +) r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" -AgentsCompletionRequestMessagesTypedDict = Union[ - SystemMessageTypedDict, - UserMessageTypedDict, - AssistantMessageTypedDict, - ToolMessageTypedDict, -] +AgentsCompletionRequestMessagesTypedDict = TypeAliasType( + "AgentsCompletionRequestMessagesTypedDict", + Union[ + SystemMessageTypedDict, + UserMessageTypedDict, + AssistantMessageTypedDict, + ToolMessageTypedDict, + ], +) AgentsCompletionRequestMessages = Annotated[ @@ -43,10 +50,15 @@ ] -AgentsCompletionRequestToolChoiceTypedDict = Union[ToolChoiceTypedDict, ToolChoiceEnum] +AgentsCompletionRequestToolChoiceTypedDict = TypeAliasType( + "AgentsCompletionRequestToolChoiceTypedDict", + Union[ToolChoiceTypedDict, ToolChoiceEnum], +) -AgentsCompletionRequestToolChoice = Union[ToolChoice, ToolChoiceEnum] +AgentsCompletionRequestToolChoice = TypeAliasType( + "AgentsCompletionRequestToolChoice", Union[ToolChoice, ToolChoiceEnum] +) class AgentsCompletionRequestTypedDict(TypedDict): diff --git a/src/mistralai/models/agentscompletionstreamrequest.py b/src/mistralai/models/agentscompletionstreamrequest.py index 94cc983a..fdc15328 100644 --- a/src/mistralai/models/agentscompletionstreamrequest.py +++ b/src/mistralai/models/agentscompletionstreamrequest.py @@ -13,23 +13,30 @@ from mistralai.utils import get_discriminator from pydantic import Discriminator, Tag, model_serializer from typing import List, Optional, Union -from typing_extensions import Annotated, NotRequired, TypedDict +from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict -AgentsCompletionStreamRequestStopTypedDict = Union[str, List[str]] +AgentsCompletionStreamRequestStopTypedDict = TypeAliasType( + "AgentsCompletionStreamRequestStopTypedDict", Union[str, List[str]] +) r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" -AgentsCompletionStreamRequestStop = Union[str, List[str]] +AgentsCompletionStreamRequestStop = TypeAliasType( + "AgentsCompletionStreamRequestStop", Union[str, List[str]] +) r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" -AgentsCompletionStreamRequestMessagesTypedDict = Union[ - SystemMessageTypedDict, - UserMessageTypedDict, - AssistantMessageTypedDict, - ToolMessageTypedDict, -] +AgentsCompletionStreamRequestMessagesTypedDict = TypeAliasType( + "AgentsCompletionStreamRequestMessagesTypedDict", + Union[ + SystemMessageTypedDict, + UserMessageTypedDict, + AssistantMessageTypedDict, + ToolMessageTypedDict, + ], +) AgentsCompletionStreamRequestMessages = Annotated[ @@ -43,12 +50,15 @@ ] -AgentsCompletionStreamRequestToolChoiceTypedDict = Union[ - ToolChoiceTypedDict, ToolChoiceEnum -] +AgentsCompletionStreamRequestToolChoiceTypedDict = TypeAliasType( + "AgentsCompletionStreamRequestToolChoiceTypedDict", + Union[ToolChoiceTypedDict, ToolChoiceEnum], +) -AgentsCompletionStreamRequestToolChoice = Union[ToolChoice, ToolChoiceEnum] +AgentsCompletionStreamRequestToolChoice = TypeAliasType( + "AgentsCompletionStreamRequestToolChoice", Union[ToolChoice, ToolChoiceEnum] +) class AgentsCompletionStreamRequestTypedDict(TypedDict): diff --git a/src/mistralai/models/assistantmessage.py b/src/mistralai/models/assistantmessage.py index d7b929bf..c9a28945 100644 --- a/src/mistralai/models/assistantmessage.py +++ b/src/mistralai/models/assistantmessage.py @@ -6,13 +6,17 @@ from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL from pydantic import model_serializer from typing import List, Literal, Optional, Union -from typing_extensions import NotRequired, TypedDict +from typing_extensions import NotRequired, TypeAliasType, TypedDict -AssistantMessageContentTypedDict = Union[str, List[ContentChunkTypedDict]] +AssistantMessageContentTypedDict = TypeAliasType( + "AssistantMessageContentTypedDict", Union[str, List[ContentChunkTypedDict]] +) -AssistantMessageContent = Union[str, List[ContentChunk]] +AssistantMessageContent = TypeAliasType( + "AssistantMessageContent", Union[str, List[ContentChunk]] +) AssistantMessageRole = Literal["assistant"] diff --git a/src/mistralai/models/chatclassificationrequest.py b/src/mistralai/models/chatclassificationrequest.py index 6b4cc136..6f3967dc 100644 --- a/src/mistralai/models/chatclassificationrequest.py +++ b/src/mistralai/models/chatclassificationrequest.py @@ -10,15 +10,18 @@ import pydantic from pydantic import Discriminator, Tag, model_serializer from typing import List, Union -from typing_extensions import Annotated, TypedDict +from typing_extensions import Annotated, TypeAliasType, TypedDict -TwoTypedDict = Union[ - SystemMessageTypedDict, - UserMessageTypedDict, - AssistantMessageTypedDict, - ToolMessageTypedDict, -] +TwoTypedDict = TypeAliasType( + "TwoTypedDict", + Union[ + SystemMessageTypedDict, + UserMessageTypedDict, + AssistantMessageTypedDict, + ToolMessageTypedDict, + ], +) Two = Annotated[ @@ -32,12 +35,15 @@ ] -OneTypedDict = Union[ - SystemMessageTypedDict, - UserMessageTypedDict, - AssistantMessageTypedDict, - ToolMessageTypedDict, -] +OneTypedDict = TypeAliasType( + "OneTypedDict", + Union[ + SystemMessageTypedDict, + UserMessageTypedDict, + AssistantMessageTypedDict, + ToolMessageTypedDict, + ], +) One = Annotated[ @@ -51,13 +57,16 @@ ] -ChatClassificationRequestInputsTypedDict = Union[ - List[OneTypedDict], List[List[TwoTypedDict]] -] +ChatClassificationRequestInputsTypedDict = TypeAliasType( + "ChatClassificationRequestInputsTypedDict", + Union[List[OneTypedDict], List[List[TwoTypedDict]]], +) r"""Chat to classify""" -ChatClassificationRequestInputs = Union[List[One], List[List[Two]]] +ChatClassificationRequestInputs = TypeAliasType( + "ChatClassificationRequestInputs", Union[List[One], List[List[Two]]] +) r"""Chat to classify""" diff --git a/src/mistralai/models/chatcompletionrequest.py b/src/mistralai/models/chatcompletionrequest.py index b3435d52..195ea593 100644 --- a/src/mistralai/models/chatcompletionrequest.py +++ b/src/mistralai/models/chatcompletionrequest.py @@ -13,23 +13,26 @@ from mistralai.utils import get_discriminator from pydantic import Discriminator, Tag, model_serializer from typing import List, Optional, Union -from typing_extensions import Annotated, NotRequired, TypedDict +from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict -StopTypedDict = Union[str, List[str]] +StopTypedDict = TypeAliasType("StopTypedDict", Union[str, List[str]]) r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" -Stop = Union[str, List[str]] +Stop = TypeAliasType("Stop", Union[str, List[str]]) r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" -MessagesTypedDict = Union[ - SystemMessageTypedDict, - UserMessageTypedDict, - AssistantMessageTypedDict, - ToolMessageTypedDict, -] +MessagesTypedDict = TypeAliasType( + "MessagesTypedDict", + Union[ + SystemMessageTypedDict, + UserMessageTypedDict, + AssistantMessageTypedDict, + ToolMessageTypedDict, + ], +) Messages = Annotated[ @@ -43,10 +46,15 @@ ] -ChatCompletionRequestToolChoiceTypedDict = Union[ToolChoiceTypedDict, ToolChoiceEnum] +ChatCompletionRequestToolChoiceTypedDict = TypeAliasType( + "ChatCompletionRequestToolChoiceTypedDict", + Union[ToolChoiceTypedDict, ToolChoiceEnum], +) -ChatCompletionRequestToolChoice = Union[ToolChoice, ToolChoiceEnum] +ChatCompletionRequestToolChoice = TypeAliasType( + "ChatCompletionRequestToolChoice", Union[ToolChoice, ToolChoiceEnum] +) class ChatCompletionRequestTypedDict(TypedDict): diff --git a/src/mistralai/models/chatcompletionstreamrequest.py b/src/mistralai/models/chatcompletionstreamrequest.py index a98eb335..fee65092 100644 --- a/src/mistralai/models/chatcompletionstreamrequest.py +++ b/src/mistralai/models/chatcompletionstreamrequest.py @@ -13,23 +13,30 @@ from mistralai.utils import get_discriminator from pydantic import Discriminator, Tag, model_serializer from typing import List, Optional, Union -from typing_extensions import Annotated, NotRequired, TypedDict +from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict -ChatCompletionStreamRequestStopTypedDict = Union[str, List[str]] +ChatCompletionStreamRequestStopTypedDict = TypeAliasType( + "ChatCompletionStreamRequestStopTypedDict", Union[str, List[str]] +) r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" -ChatCompletionStreamRequestStop = Union[str, List[str]] +ChatCompletionStreamRequestStop = TypeAliasType( + "ChatCompletionStreamRequestStop", Union[str, List[str]] +) r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" -ChatCompletionStreamRequestMessagesTypedDict = Union[ - SystemMessageTypedDict, - UserMessageTypedDict, - AssistantMessageTypedDict, - ToolMessageTypedDict, -] +ChatCompletionStreamRequestMessagesTypedDict = TypeAliasType( + "ChatCompletionStreamRequestMessagesTypedDict", + Union[ + SystemMessageTypedDict, + UserMessageTypedDict, + AssistantMessageTypedDict, + ToolMessageTypedDict, + ], +) ChatCompletionStreamRequestMessages = Annotated[ @@ -43,12 +50,15 @@ ] -ChatCompletionStreamRequestToolChoiceTypedDict = Union[ - ToolChoiceTypedDict, ToolChoiceEnum -] +ChatCompletionStreamRequestToolChoiceTypedDict = TypeAliasType( + "ChatCompletionStreamRequestToolChoiceTypedDict", + Union[ToolChoiceTypedDict, ToolChoiceEnum], +) -ChatCompletionStreamRequestToolChoice = Union[ToolChoice, ToolChoiceEnum] +ChatCompletionStreamRequestToolChoice = TypeAliasType( + "ChatCompletionStreamRequestToolChoice", Union[ToolChoice, ToolChoiceEnum] +) class ChatCompletionStreamRequestTypedDict(TypedDict): diff --git a/src/mistralai/models/classificationrequest.py b/src/mistralai/models/classificationrequest.py index d2426c4d..d18ffa61 100644 --- a/src/mistralai/models/classificationrequest.py +++ b/src/mistralai/models/classificationrequest.py @@ -5,14 +5,18 @@ import pydantic from pydantic import model_serializer from typing import List, Union -from typing_extensions import Annotated, NotRequired, TypedDict +from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict -ClassificationRequestInputsTypedDict = Union[str, List[str]] +ClassificationRequestInputsTypedDict = TypeAliasType( + "ClassificationRequestInputsTypedDict", Union[str, List[str]] +) r"""Text to classify.""" -ClassificationRequestInputs = Union[str, List[str]] +ClassificationRequestInputs = TypeAliasType( + "ClassificationRequestInputs", Union[str, List[str]] +) r"""Text to classify.""" diff --git a/src/mistralai/models/contentchunk.py b/src/mistralai/models/contentchunk.py index 717ba828..feeda7cd 100644 --- a/src/mistralai/models/contentchunk.py +++ b/src/mistralai/models/contentchunk.py @@ -7,12 +7,13 @@ from mistralai.utils import get_discriminator from pydantic import Discriminator, Tag from typing import Union -from typing_extensions import Annotated +from typing_extensions import Annotated, TypeAliasType -ContentChunkTypedDict = Union[ - TextChunkTypedDict, ImageURLChunkTypedDict, ReferenceChunkTypedDict -] +ContentChunkTypedDict = TypeAliasType( + "ContentChunkTypedDict", + Union[TextChunkTypedDict, ImageURLChunkTypedDict, ReferenceChunkTypedDict], +) ContentChunk = Annotated[ diff --git a/src/mistralai/models/deltamessage.py b/src/mistralai/models/deltamessage.py index 7a966e09..b46cf641 100644 --- a/src/mistralai/models/deltamessage.py +++ b/src/mistralai/models/deltamessage.py @@ -6,13 +6,15 @@ from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL from pydantic import model_serializer from typing import List, Union -from typing_extensions import NotRequired, TypedDict +from typing_extensions import NotRequired, TypeAliasType, TypedDict -ContentTypedDict = Union[str, List[ContentChunkTypedDict]] +ContentTypedDict = TypeAliasType( + "ContentTypedDict", Union[str, List[ContentChunkTypedDict]] +) -Content = Union[str, List[ContentChunk]] +Content = TypeAliasType("Content", Union[str, List[ContentChunk]]) class DeltaMessageTypedDict(TypedDict): diff --git a/src/mistralai/models/embeddingrequest.py b/src/mistralai/models/embeddingrequest.py index 61e181ce..4de8c312 100644 --- a/src/mistralai/models/embeddingrequest.py +++ b/src/mistralai/models/embeddingrequest.py @@ -5,14 +5,14 @@ import pydantic from pydantic import model_serializer from typing import List, Optional, Union -from typing_extensions import Annotated, NotRequired, TypedDict +from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict -InputsTypedDict = Union[str, List[str]] +InputsTypedDict = TypeAliasType("InputsTypedDict", Union[str, List[str]]) r"""Text to embed.""" -Inputs = Union[str, List[str]] +Inputs = TypeAliasType("Inputs", Union[str, List[str]]) r"""Text to embed.""" diff --git a/src/mistralai/models/fimcompletionrequest.py b/src/mistralai/models/fimcompletionrequest.py index 409aa256..fb72ba41 100644 --- a/src/mistralai/models/fimcompletionrequest.py +++ b/src/mistralai/models/fimcompletionrequest.py @@ -4,14 +4,18 @@ from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL from pydantic import model_serializer from typing import List, Optional, Union -from typing_extensions import NotRequired, TypedDict +from typing_extensions import NotRequired, TypeAliasType, TypedDict -FIMCompletionRequestStopTypedDict = Union[str, List[str]] +FIMCompletionRequestStopTypedDict = TypeAliasType( + "FIMCompletionRequestStopTypedDict", Union[str, List[str]] +) r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" -FIMCompletionRequestStop = Union[str, List[str]] +FIMCompletionRequestStop = TypeAliasType( + "FIMCompletionRequestStop", Union[str, List[str]] +) r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" diff --git a/src/mistralai/models/fimcompletionstreamrequest.py b/src/mistralai/models/fimcompletionstreamrequest.py index 8f9c1dac..5e16a170 100644 --- a/src/mistralai/models/fimcompletionstreamrequest.py +++ b/src/mistralai/models/fimcompletionstreamrequest.py @@ -4,14 +4,18 @@ from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL from pydantic import model_serializer from typing import List, Optional, Union -from typing_extensions import NotRequired, TypedDict +from typing_extensions import NotRequired, TypeAliasType, TypedDict -FIMCompletionStreamRequestStopTypedDict = Union[str, List[str]] +FIMCompletionStreamRequestStopTypedDict = TypeAliasType( + "FIMCompletionStreamRequestStopTypedDict", Union[str, List[str]] +) r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" -FIMCompletionStreamRequestStop = Union[str, List[str]] +FIMCompletionStreamRequestStop = TypeAliasType( + "FIMCompletionStreamRequestStop", Union[str, List[str]] +) r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" diff --git a/src/mistralai/models/functioncall.py b/src/mistralai/models/functioncall.py index a57d2350..0cce622a 100644 --- a/src/mistralai/models/functioncall.py +++ b/src/mistralai/models/functioncall.py @@ -3,13 +3,13 @@ from __future__ import annotations from mistralai.types import BaseModel from typing import Any, Dict, Union -from typing_extensions import TypedDict +from typing_extensions import TypeAliasType, TypedDict -ArgumentsTypedDict = Union[Dict[str, Any], str] +ArgumentsTypedDict = TypeAliasType("ArgumentsTypedDict", Union[Dict[str, Any], str]) -Arguments = Union[Dict[str, Any], str] +Arguments = TypeAliasType("Arguments", Union[Dict[str, Any], str]) class FunctionCallTypedDict(TypedDict): diff --git a/src/mistralai/models/imageurlchunk.py b/src/mistralai/models/imageurlchunk.py index f18c63a1..498690f5 100644 --- a/src/mistralai/models/imageurlchunk.py +++ b/src/mistralai/models/imageurlchunk.py @@ -4,13 +4,15 @@ from .imageurl import ImageURL, ImageURLTypedDict from mistralai.types import BaseModel from typing import Literal, Optional, Union -from typing_extensions import NotRequired, TypedDict +from typing_extensions import NotRequired, TypeAliasType, TypedDict -ImageURLChunkImageURLTypedDict = Union[ImageURLTypedDict, str] +ImageURLChunkImageURLTypedDict = TypeAliasType( + "ImageURLChunkImageURLTypedDict", Union[ImageURLTypedDict, str] +) -ImageURLChunkImageURL = Union[ImageURL, str] +ImageURLChunkImageURL = TypeAliasType("ImageURLChunkImageURL", Union[ImageURL, str]) ImageURLChunkType = Literal["image_url"] diff --git a/src/mistralai/models/jobs_api_routes_fine_tuning_create_fine_tuning_jobop.py b/src/mistralai/models/jobs_api_routes_fine_tuning_create_fine_tuning_jobop.py index 1925a1a6..d7a5d10d 100644 --- a/src/mistralai/models/jobs_api_routes_fine_tuning_create_fine_tuning_jobop.py +++ b/src/mistralai/models/jobs_api_routes_fine_tuning_create_fine_tuning_jobop.py @@ -4,13 +4,18 @@ from .jobout import JobOut, JobOutTypedDict from .legacyjobmetadataout import LegacyJobMetadataOut, LegacyJobMetadataOutTypedDict from typing import Union +from typing_extensions import TypeAliasType -JobsAPIRoutesFineTuningCreateFineTuningJobResponseTypedDict = Union[ - LegacyJobMetadataOutTypedDict, JobOutTypedDict -] +JobsAPIRoutesFineTuningCreateFineTuningJobResponseTypedDict = TypeAliasType( + "JobsAPIRoutesFineTuningCreateFineTuningJobResponseTypedDict", + Union[LegacyJobMetadataOutTypedDict, JobOutTypedDict], +) r"""OK""" -JobsAPIRoutesFineTuningCreateFineTuningJobResponse = Union[LegacyJobMetadataOut, JobOut] +JobsAPIRoutesFineTuningCreateFineTuningJobResponse = TypeAliasType( + "JobsAPIRoutesFineTuningCreateFineTuningJobResponse", + Union[LegacyJobMetadataOut, JobOut], +) r"""OK""" diff --git a/src/mistralai/models/modellist.py b/src/mistralai/models/modellist.py index 97ae4c38..394cb3fa 100644 --- a/src/mistralai/models/modellist.py +++ b/src/mistralai/models/modellist.py @@ -7,10 +7,12 @@ from mistralai.utils import get_discriminator from pydantic import Discriminator, Tag from typing import List, Optional, Union -from typing_extensions import Annotated, NotRequired, TypedDict +from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict -DataTypedDict = Union[BaseModelCardTypedDict, FTModelCardTypedDict] +DataTypedDict = TypeAliasType( + "DataTypedDict", Union[BaseModelCardTypedDict, FTModelCardTypedDict] +) Data = Annotated[ diff --git a/src/mistralai/models/retrieve_model_v1_models_model_id_getop.py b/src/mistralai/models/retrieve_model_v1_models_model_id_getop.py index dd4bcccc..bfe62474 100644 --- a/src/mistralai/models/retrieve_model_v1_models_model_id_getop.py +++ b/src/mistralai/models/retrieve_model_v1_models_model_id_getop.py @@ -7,7 +7,7 @@ from mistralai.utils import FieldMetadata, PathParamMetadata, get_discriminator from pydantic import Discriminator, Tag from typing import Union -from typing_extensions import Annotated, TypedDict +from typing_extensions import Annotated, TypeAliasType, TypedDict class RetrieveModelV1ModelsModelIDGetRequestTypedDict(TypedDict): @@ -22,9 +22,10 @@ class RetrieveModelV1ModelsModelIDGetRequest(BaseModel): r"""The ID of the model to retrieve.""" -RetrieveModelV1ModelsModelIDGetResponseRetrieveModelV1ModelsModelIDGetTypedDict = Union[ - BaseModelCardTypedDict, FTModelCardTypedDict -] +RetrieveModelV1ModelsModelIDGetResponseRetrieveModelV1ModelsModelIDGetTypedDict = TypeAliasType( + "RetrieveModelV1ModelsModelIDGetResponseRetrieveModelV1ModelsModelIDGetTypedDict", + Union[BaseModelCardTypedDict, FTModelCardTypedDict], +) r"""Successful Response""" diff --git a/src/mistralai/models/systemmessage.py b/src/mistralai/models/systemmessage.py index f6f30743..7827ac4b 100644 --- a/src/mistralai/models/systemmessage.py +++ b/src/mistralai/models/systemmessage.py @@ -4,13 +4,17 @@ from .textchunk import TextChunk, TextChunkTypedDict from mistralai.types import BaseModel from typing import List, Literal, Optional, Union -from typing_extensions import NotRequired, TypedDict +from typing_extensions import NotRequired, TypeAliasType, TypedDict -SystemMessageContentTypedDict = Union[str, List[TextChunkTypedDict]] +SystemMessageContentTypedDict = TypeAliasType( + "SystemMessageContentTypedDict", Union[str, List[TextChunkTypedDict]] +) -SystemMessageContent = Union[str, List[TextChunk]] +SystemMessageContent = TypeAliasType( + "SystemMessageContent", Union[str, List[TextChunk]] +) Role = Literal["system"] diff --git a/src/mistralai/models/toolmessage.py b/src/mistralai/models/toolmessage.py index c42f34e9..bee9c700 100644 --- a/src/mistralai/models/toolmessage.py +++ b/src/mistralai/models/toolmessage.py @@ -5,13 +5,15 @@ from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL from pydantic import model_serializer from typing import List, Literal, Optional, Union -from typing_extensions import NotRequired, TypedDict +from typing_extensions import NotRequired, TypeAliasType, TypedDict -ToolMessageContentTypedDict = Union[str, List[ContentChunkTypedDict]] +ToolMessageContentTypedDict = TypeAliasType( + "ToolMessageContentTypedDict", Union[str, List[ContentChunkTypedDict]] +) -ToolMessageContent = Union[str, List[ContentChunk]] +ToolMessageContent = TypeAliasType("ToolMessageContent", Union[str, List[ContentChunk]]) ToolMessageRole = Literal["tool"] diff --git a/src/mistralai/models/usermessage.py b/src/mistralai/models/usermessage.py index af698955..dac2618a 100644 --- a/src/mistralai/models/usermessage.py +++ b/src/mistralai/models/usermessage.py @@ -5,13 +5,15 @@ from mistralai.types import BaseModel, Nullable, UNSET_SENTINEL from pydantic import model_serializer from typing import List, Literal, Optional, Union -from typing_extensions import NotRequired, TypedDict +from typing_extensions import NotRequired, TypeAliasType, TypedDict -UserMessageContentTypedDict = Union[str, List[ContentChunkTypedDict]] +UserMessageContentTypedDict = TypeAliasType( + "UserMessageContentTypedDict", Union[str, List[ContentChunkTypedDict]] +) -UserMessageContent = Union[str, List[ContentChunk]] +UserMessageContent = TypeAliasType("UserMessageContent", Union[str, List[ContentChunk]]) UserMessageRole = Literal["user"] diff --git a/src/mistralai/models/validationerror.py b/src/mistralai/models/validationerror.py index 03ce9845..e971e016 100644 --- a/src/mistralai/models/validationerror.py +++ b/src/mistralai/models/validationerror.py @@ -3,13 +3,13 @@ from __future__ import annotations from mistralai.types import BaseModel from typing import List, Union -from typing_extensions import TypedDict +from typing_extensions import TypeAliasType, TypedDict -LocTypedDict = Union[str, int] +LocTypedDict = TypeAliasType("LocTypedDict", Union[str, int]) -Loc = Union[str, int] +Loc = TypeAliasType("Loc", Union[str, int]) class ValidationErrorTypedDict(TypedDict): diff --git a/src/mistralai/sdk.py b/src/mistralai/sdk.py index 71c60fcf..7778560e 100644 --- a/src/mistralai/sdk.py +++ b/src/mistralai/sdk.py @@ -129,3 +129,17 @@ def _init_sdks(self): self.agents = Agents(self.sdk_configuration) self.embeddings = Embeddings(self.sdk_configuration) self.classifiers = Classifiers(self.sdk_configuration) + + def __enter__(self): + return self + + async def __aenter__(self): + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + if self.sdk_configuration.client is not None: + self.sdk_configuration.client.close() + + async def __aexit__(self, exc_type, exc_val, exc_tb): + if self.sdk_configuration.async_client is not None: + await self.sdk_configuration.async_client.aclose() diff --git a/src/mistralai/sdkconfiguration.py b/src/mistralai/sdkconfiguration.py index 9030b1be..1f8261ae 100644 --- a/src/mistralai/sdkconfiguration.py +++ b/src/mistralai/sdkconfiguration.py @@ -29,8 +29,8 @@ class SDKConfiguration: language: str = "python" openapi_doc_version: str = "0.0.2" sdk_version: str = "1.2.4" - gen_version: str = "2.460.1" - user_agent: str = "speakeasy-sdk/python 1.2.4 2.460.1 0.0.2 mistralai" + gen_version: str = "2.470.1" + user_agent: str = "speakeasy-sdk/python 1.2.4 2.470.1 0.0.2 mistralai" retry_config: OptionalNullable[RetryConfig] = Field(default_factory=lambda: UNSET) timeout_ms: Optional[int] = None diff --git a/src/mistralai/utils/annotations.py b/src/mistralai/utils/annotations.py index 5b3bbb02..387874ed 100644 --- a/src/mistralai/utils/annotations.py +++ b/src/mistralai/utils/annotations.py @@ -1,30 +1,55 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from enum import Enum -from typing import Any +from typing import Any, Optional def get_discriminator(model: Any, fieldname: str, key: str) -> str: - if isinstance(model, dict): - try: - return f'{model.get(key)}' - except AttributeError as e: - raise ValueError(f'Could not find discriminator key {key} in {model}') from e + """ + Recursively search for the discriminator attribute in a model. - if hasattr(model, fieldname): - attr = getattr(model, fieldname) + Args: + model (Any): The model to search within. + fieldname (str): The name of the field to search for. + key (str): The key to search for in dictionaries. - if isinstance(attr, Enum): - return f'{attr.value}' + Returns: + str: The name of the discriminator attribute. - return f'{attr}' + Raises: + ValueError: If the discriminator attribute is not found. + """ + upper_fieldname = fieldname.upper() - fieldname = fieldname.upper() - if hasattr(model, fieldname): - attr = getattr(model, fieldname) + def get_field_discriminator(field: Any) -> Optional[str]: + """Search for the discriminator attribute in a given field.""" - if isinstance(attr, Enum): - return f'{attr.value}' + if isinstance(field, dict): + if key in field: + return f'{field[key]}' - return f'{attr}' + if hasattr(field, fieldname): + attr = getattr(field, fieldname) + if isinstance(attr, Enum): + return f'{attr.value}' + return f'{attr}' + + if hasattr(field, upper_fieldname): + attr = getattr(field, upper_fieldname) + if isinstance(attr, Enum): + return f'{attr.value}' + return f'{attr}' + + return None + + + if isinstance(model, list): + for field in model: + discriminator = get_field_discriminator(field) + if discriminator is not None: + return discriminator + + discriminator = get_field_discriminator(model) + if discriminator is not None: + return discriminator raise ValueError(f'Could not find discriminator field {fieldname} in {model}') diff --git a/src/mistralai/utils/eventstreaming.py b/src/mistralai/utils/eventstreaming.py index 553b386b..74a63f75 100644 --- a/src/mistralai/utils/eventstreaming.py +++ b/src/mistralai/utils/eventstreaming.py @@ -2,12 +2,72 @@ import re import json -from typing import Callable, TypeVar, Optional, Generator, AsyncGenerator, Tuple +from typing import ( + Callable, + Generic, + TypeVar, + Optional, + Generator, + AsyncGenerator, + Tuple, +) import httpx T = TypeVar("T") +class EventStream(Generic[T]): + response: httpx.Response + generator: Generator[T, None, None] + + def __init__( + self, + response: httpx.Response, + decoder: Callable[[str], T], + sentinel: Optional[str] = None, + ): + self.response = response + self.generator = stream_events(response, decoder, sentinel) + + def __iter__(self): + return self + + def __next__(self): + return next(self.generator) + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + self.response.close() + + +class EventStreamAsync(Generic[T]): + response: httpx.Response + generator: AsyncGenerator[T, None] + + def __init__( + self, + response: httpx.Response, + decoder: Callable[[str], T], + sentinel: Optional[str] = None, + ): + self.response = response + self.generator = stream_events_async(response, decoder, sentinel) + + def __aiter__(self): + return self + + async def __anext__(self): + return await self.generator.__anext__() + + async def __aenter__(self): + return self + + async def __aexit__(self, exc_type, exc_val, exc_tb): + await self.response.aclose() + + class ServerEvent: id: Optional[str] = None event: Optional[str] = None From cd263917c3d88d2414c7960b26a708e077c035b7 Mon Sep 17 00:00:00 2001 From: alex-ac Date: Wed, 4 Dec 2024 14:11:44 +0100 Subject: [PATCH 088/223] gen sdk GCP --- .speakeasy/workflow.lock | 10 +-- packages/mistralai_gcp/.speakeasy/gen.lock | 14 ++--- packages/mistralai_gcp/pyproject.toml | 4 +- .../mistralai_gcp/src/mistralai_gcp/chat.py | 10 +-- .../mistralai_gcp/src/mistralai_gcp/fim.py | 10 +-- .../src/mistralai_gcp/httpclient.py | 6 ++ .../mistralai_gcp/models/assistantmessage.py | 10 ++- .../models/chatcompletionrequest.py | 34 ++++++---- .../models/chatcompletionstreamrequest.py | 32 ++++++---- .../src/mistralai_gcp/models/contentchunk.py | 6 +- .../src/mistralai_gcp/models/deltamessage.py | 8 ++- .../models/fimcompletionrequest.py | 10 ++- .../models/fimcompletionstreamrequest.py | 10 ++- .../src/mistralai_gcp/models/functioncall.py | 6 +- .../mistralai_gcp/models/referencechunk.py | 14 +---- .../src/mistralai_gcp/models/systemmessage.py | 10 ++- .../src/mistralai_gcp/models/textchunk.py | 12 +--- .../src/mistralai_gcp/models/toolmessage.py | 8 ++- .../src/mistralai_gcp/models/usermessage.py | 8 ++- .../mistralai_gcp/models/validationerror.py | 6 +- .../src/mistralai_gcp/sdkconfiguration.py | 4 +- .../src/mistralai_gcp/utils/annotations.py | 59 +++++++++++++----- .../src/mistralai_gcp/utils/eventstreaming.py | 62 ++++++++++++++++++- 23 files changed, 236 insertions(+), 117 deletions(-) diff --git a/.speakeasy/workflow.lock b/.speakeasy/workflow.lock index 770090a2..c3ecf410 100644 --- a/.speakeasy/workflow.lock +++ b/.speakeasy/workflow.lock @@ -8,8 +8,8 @@ sources: - latest mistral-google-cloud-source: sourceNamespace: mistral-google-cloud-source - sourceRevisionDigest: sha256:d3e3d15303dcc1acb27b8895aa3064328bd5b8013ea635c2bce553b6e647b498 - sourceBlobDigest: sha256:db72004ee842a27c3e77980be28a727811e0581daa7a51ad34d142302f8ba2f3 + sourceRevisionDigest: sha256:e0fd58ce2dbba068f375d3a23d758b8678c2a68cf4fc7bc46ea7e1b37abe0647 + sourceBlobDigest: sha256:0707d8d2566a9ef4ef286bb0abe467f8696ccf83ba73091065d7caf627a06611 tags: - latest mistral-openapi: @@ -29,10 +29,10 @@ targets: mistralai-gcp-sdk: source: mistral-google-cloud-source sourceNamespace: mistral-google-cloud-source - sourceRevisionDigest: sha256:d3e3d15303dcc1acb27b8895aa3064328bd5b8013ea635c2bce553b6e647b498 - sourceBlobDigest: sha256:db72004ee842a27c3e77980be28a727811e0581daa7a51ad34d142302f8ba2f3 + sourceRevisionDigest: sha256:e0fd58ce2dbba068f375d3a23d758b8678c2a68cf4fc7bc46ea7e1b37abe0647 + sourceBlobDigest: sha256:0707d8d2566a9ef4ef286bb0abe467f8696ccf83ba73091065d7caf627a06611 codeSamplesNamespace: mistral-openapi-google-cloud-code-samples - codeSamplesRevisionDigest: sha256:7d95ba7aa230088b9975be341ba638e51cc574b6e863bd3a0f53e9c5ee261bba + codeSamplesRevisionDigest: sha256:0657ec41e473356a5a0eeaca3dff137e9ff16080ec1fb50e72553245aa86ffe5 mistralai-sdk: source: mistral-openapi sourceNamespace: mistral-openapi diff --git a/packages/mistralai_gcp/.speakeasy/gen.lock b/packages/mistralai_gcp/.speakeasy/gen.lock index 4ac979ee..ee99e6bb 100644 --- a/packages/mistralai_gcp/.speakeasy/gen.lock +++ b/packages/mistralai_gcp/.speakeasy/gen.lock @@ -1,10 +1,10 @@ lockVersion: 2.0.0 id: ec60f2d8-7869-45c1-918e-773d41a8cf74 management: - docChecksum: e6c0a4254e61b1f171b409862f717867 + docChecksum: d50a06ac34844141709fa2e57cc940c5 docVersion: 0.0.2 - speakeasyVersion: 1.440.1 - generationVersion: 2.460.1 + speakeasyVersion: 1.451.1 + generationVersion: 2.470.1 releaseVersion: 1.2.3 configChecksum: 3fc99d7ec7ee057a323b593ebf8fdb8c published: true @@ -12,7 +12,7 @@ features: python: additionalDependencies: 1.0.0 constsAndDefaults: 1.0.5 - core: 5.6.5 + core: 5.6.8 defaultEnabledRetries: 0.2.0 enumUnions: 0.1.0 envVarSecurityUsage: 0.3.2 @@ -29,11 +29,10 @@ features: responseFormat: 1.0.1 retries: 3.0.2 sdkHooks: 1.0.0 - serverEvents: 1.0.4 + serverEvents: 1.0.7 serverEventsSentinels: 0.1.0 serverIDs: 3.0.0 - tests: 1.6.0 - unions: 3.0.3 + unions: 3.0.4 generatedFiles: - .gitattributes - .python-version @@ -187,3 +186,4 @@ examples: "200": application/json: {"id": "cmpl-e5cc70bb28c444948073e77776eb30ef", "object": "chat.completion", "model": "codestral-latest", "usage": {"prompt_tokens": 16, "completion_tokens": 34, "total_tokens": 50}, "created": 1702256327, "choices": []} "422": {} +generatedTests: {} diff --git a/packages/mistralai_gcp/pyproject.toml b/packages/mistralai_gcp/pyproject.toml index f20d9b17..670c1e3a 100644 --- a/packages/mistralai_gcp/pyproject.toml +++ b/packages/mistralai_gcp/pyproject.toml @@ -22,12 +22,12 @@ google-auth = "2.27.0" httpx = "^0.27.0" jsonpath-python = "^1.0.6" pydantic = "~2.9.2" -python-dateutil = "2.8.2" +python-dateutil = "^2.8.2" requests = "^2.32.3" typing-inspect = "^0.9.0" [tool.poetry.group.dev.dependencies] -mypy = "==1.10.1" +mypy = "==1.13.0" pylint = "==3.2.3" pytest = "^8.2.2" pytest-asyncio = "^0.23.7" diff --git a/packages/mistralai_gcp/src/mistralai_gcp/chat.py b/packages/mistralai_gcp/src/mistralai_gcp/chat.py index 17913668..19c92651 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/chat.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/chat.py @@ -5,7 +5,7 @@ from mistralai_gcp._hooks import HookContext from mistralai_gcp.types import Nullable, OptionalNullable, UNSET from mistralai_gcp.utils import eventstreaming -from typing import Any, AsyncGenerator, Generator, List, Optional, Union +from typing import Any, List, Optional, Union class Chat(BaseSDK): @@ -40,7 +40,7 @@ def stream( retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, - ) -> Optional[Generator[models.CompletionEvent, None, None]]: + ) -> Optional[eventstreaming.EventStream[models.CompletionEvent]]: r"""Stream chat completion Mistral AI provides the ability to stream responses back to a client in order to allow partial results for certain requests. Tokens will be sent as data-only server-sent events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. @@ -132,7 +132,7 @@ def stream( data: Any = None if utils.match_response(http_res, "200", "text/event-stream"): - return eventstreaming.stream_events( + return eventstreaming.EventStream( http_res, lambda raw: utils.unmarshal_json(raw, models.CompletionEvent), sentinel="[DONE]", @@ -185,7 +185,7 @@ async def stream_async( retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, - ) -> Optional[AsyncGenerator[models.CompletionEvent, None]]: + ) -> Optional[eventstreaming.EventStreamAsync[models.CompletionEvent]]: r"""Stream chat completion Mistral AI provides the ability to stream responses back to a client in order to allow partial results for certain requests. Tokens will be sent as data-only server-sent events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. @@ -277,7 +277,7 @@ async def stream_async( data: Any = None if utils.match_response(http_res, "200", "text/event-stream"): - return eventstreaming.stream_events_async( + return eventstreaming.EventStreamAsync( http_res, lambda raw: utils.unmarshal_json(raw, models.CompletionEvent), sentinel="[DONE]", diff --git a/packages/mistralai_gcp/src/mistralai_gcp/fim.py b/packages/mistralai_gcp/src/mistralai_gcp/fim.py index fb3bf902..bddc010f 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/fim.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/fim.py @@ -5,7 +5,7 @@ from mistralai_gcp._hooks import HookContext from mistralai_gcp.types import Nullable, OptionalNullable, UNSET from mistralai_gcp.utils import eventstreaming -from typing import Any, AsyncGenerator, Generator, Optional, Union +from typing import Any, Optional, Union class Fim(BaseSDK): @@ -32,7 +32,7 @@ def stream( retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, - ) -> Optional[Generator[models.CompletionEvent, None, None]]: + ) -> Optional[eventstreaming.EventStream[models.CompletionEvent]]: r"""Stream fim completion Mistral AI provides the ability to stream responses back to a client in order to allow partial results for certain requests. Tokens will be sent as data-only server-sent events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. @@ -112,7 +112,7 @@ def stream( data: Any = None if utils.match_response(http_res, "200", "text/event-stream"): - return eventstreaming.stream_events( + return eventstreaming.EventStream( http_res, lambda raw: utils.unmarshal_json(raw, models.CompletionEvent), sentinel="[DONE]", @@ -157,7 +157,7 @@ async def stream_async( retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, - ) -> Optional[AsyncGenerator[models.CompletionEvent, None]]: + ) -> Optional[eventstreaming.EventStreamAsync[models.CompletionEvent]]: r"""Stream fim completion Mistral AI provides the ability to stream responses back to a client in order to allow partial results for certain requests. Tokens will be sent as data-only server-sent events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. @@ -237,7 +237,7 @@ async def stream_async( data: Any = None if utils.match_response(http_res, "200", "text/event-stream"): - return eventstreaming.stream_events_async( + return eventstreaming.EventStreamAsync( http_res, lambda raw: utils.unmarshal_json(raw, models.CompletionEvent), sentinel="[DONE]", diff --git a/packages/mistralai_gcp/src/mistralai_gcp/httpclient.py b/packages/mistralai_gcp/src/mistralai_gcp/httpclient.py index 36b642a0..167cea4e 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/httpclient.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/httpclient.py @@ -41,6 +41,9 @@ def build_request( ) -> httpx.Request: pass + def close(self) -> None: + pass + @runtime_checkable class AsyncHttpClient(Protocol): @@ -76,3 +79,6 @@ def build_request( extensions: Optional[httpx._types.RequestExtensions] = None, ) -> httpx.Request: pass + + async def aclose(self) -> None: + pass diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/assistantmessage.py b/packages/mistralai_gcp/src/mistralai_gcp/models/assistantmessage.py index f93a06cf..6a9b58f2 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/assistantmessage.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/assistantmessage.py @@ -12,13 +12,17 @@ ) from pydantic import model_serializer from typing import List, Literal, Optional, Union -from typing_extensions import NotRequired, TypedDict +from typing_extensions import NotRequired, TypeAliasType, TypedDict -AssistantMessageContentTypedDict = Union[str, List[ContentChunkTypedDict]] +AssistantMessageContentTypedDict = TypeAliasType( + "AssistantMessageContentTypedDict", Union[str, List[ContentChunkTypedDict]] +) -AssistantMessageContent = Union[str, List[ContentChunk]] +AssistantMessageContent = TypeAliasType( + "AssistantMessageContent", Union[str, List[ContentChunk]] +) AssistantMessageRole = Literal["assistant"] diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionrequest.py b/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionrequest.py index 1f956d0a..b8ebfc91 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionrequest.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionrequest.py @@ -19,23 +19,30 @@ from mistralai_gcp.utils import get_discriminator from pydantic import Discriminator, Tag, model_serializer from typing import List, Optional, Union -from typing_extensions import Annotated, NotRequired, TypedDict +from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict -ChatCompletionRequestStopTypedDict = Union[str, List[str]] +ChatCompletionRequestStopTypedDict = TypeAliasType( + "ChatCompletionRequestStopTypedDict", Union[str, List[str]] +) r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" -ChatCompletionRequestStop = Union[str, List[str]] +ChatCompletionRequestStop = TypeAliasType( + "ChatCompletionRequestStop", Union[str, List[str]] +) r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" -ChatCompletionRequestMessagesTypedDict = Union[ - SystemMessageTypedDict, - UserMessageTypedDict, - AssistantMessageTypedDict, - ToolMessageTypedDict, -] +ChatCompletionRequestMessagesTypedDict = TypeAliasType( + "ChatCompletionRequestMessagesTypedDict", + Union[ + SystemMessageTypedDict, + UserMessageTypedDict, + AssistantMessageTypedDict, + ToolMessageTypedDict, + ], +) ChatCompletionRequestMessages = Annotated[ @@ -49,10 +56,15 @@ ] -ChatCompletionRequestToolChoiceTypedDict = Union[ToolChoiceTypedDict, ToolChoiceEnum] +ChatCompletionRequestToolChoiceTypedDict = TypeAliasType( + "ChatCompletionRequestToolChoiceTypedDict", + Union[ToolChoiceTypedDict, ToolChoiceEnum], +) -ChatCompletionRequestToolChoice = Union[ToolChoice, ToolChoiceEnum] +ChatCompletionRequestToolChoice = TypeAliasType( + "ChatCompletionRequestToolChoice", Union[ToolChoice, ToolChoiceEnum] +) class ChatCompletionRequestTypedDict(TypedDict): diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionstreamrequest.py b/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionstreamrequest.py index f12a5477..b710a27d 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionstreamrequest.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionstreamrequest.py @@ -19,23 +19,26 @@ from mistralai_gcp.utils import get_discriminator from pydantic import Discriminator, Tag, model_serializer from typing import List, Optional, Union -from typing_extensions import Annotated, NotRequired, TypedDict +from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict -StopTypedDict = Union[str, List[str]] +StopTypedDict = TypeAliasType("StopTypedDict", Union[str, List[str]]) r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" -Stop = Union[str, List[str]] +Stop = TypeAliasType("Stop", Union[str, List[str]]) r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" -MessagesTypedDict = Union[ - SystemMessageTypedDict, - UserMessageTypedDict, - AssistantMessageTypedDict, - ToolMessageTypedDict, -] +MessagesTypedDict = TypeAliasType( + "MessagesTypedDict", + Union[ + SystemMessageTypedDict, + UserMessageTypedDict, + AssistantMessageTypedDict, + ToolMessageTypedDict, + ], +) Messages = Annotated[ @@ -49,12 +52,15 @@ ] -ChatCompletionStreamRequestToolChoiceTypedDict = Union[ - ToolChoiceTypedDict, ToolChoiceEnum -] +ChatCompletionStreamRequestToolChoiceTypedDict = TypeAliasType( + "ChatCompletionStreamRequestToolChoiceTypedDict", + Union[ToolChoiceTypedDict, ToolChoiceEnum], +) -ChatCompletionStreamRequestToolChoice = Union[ToolChoice, ToolChoiceEnum] +ChatCompletionStreamRequestToolChoice = TypeAliasType( + "ChatCompletionStreamRequestToolChoice", Union[ToolChoice, ToolChoiceEnum] +) class ChatCompletionStreamRequestTypedDict(TypedDict): diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/contentchunk.py b/packages/mistralai_gcp/src/mistralai_gcp/models/contentchunk.py index 1c882f7e..4da1153a 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/contentchunk.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/contentchunk.py @@ -6,10 +6,12 @@ from mistralai_gcp.utils import get_discriminator from pydantic import Discriminator, Tag from typing import Union -from typing_extensions import Annotated +from typing_extensions import Annotated, TypeAliasType -ContentChunkTypedDict = Union[TextChunkTypedDict, ReferenceChunkTypedDict] +ContentChunkTypedDict = TypeAliasType( + "ContentChunkTypedDict", Union[TextChunkTypedDict, ReferenceChunkTypedDict] +) ContentChunk = Annotated[ diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/deltamessage.py b/packages/mistralai_gcp/src/mistralai_gcp/models/deltamessage.py index bb540c96..f9f0868b 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/deltamessage.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/deltamessage.py @@ -12,13 +12,15 @@ ) from pydantic import model_serializer from typing import List, Union -from typing_extensions import NotRequired, TypedDict +from typing_extensions import NotRequired, TypeAliasType, TypedDict -ContentTypedDict = Union[str, List[ContentChunkTypedDict]] +ContentTypedDict = TypeAliasType( + "ContentTypedDict", Union[str, List[ContentChunkTypedDict]] +) -Content = Union[str, List[ContentChunk]] +Content = TypeAliasType("Content", Union[str, List[ContentChunk]]) class DeltaMessageTypedDict(TypedDict): diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/fimcompletionrequest.py b/packages/mistralai_gcp/src/mistralai_gcp/models/fimcompletionrequest.py index 3a851768..81c87b7e 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/fimcompletionrequest.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/fimcompletionrequest.py @@ -10,14 +10,18 @@ ) from pydantic import model_serializer from typing import List, Optional, Union -from typing_extensions import NotRequired, TypedDict +from typing_extensions import NotRequired, TypeAliasType, TypedDict -FIMCompletionRequestStopTypedDict = Union[str, List[str]] +FIMCompletionRequestStopTypedDict = TypeAliasType( + "FIMCompletionRequestStopTypedDict", Union[str, List[str]] +) r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" -FIMCompletionRequestStop = Union[str, List[str]] +FIMCompletionRequestStop = TypeAliasType( + "FIMCompletionRequestStop", Union[str, List[str]] +) r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/fimcompletionstreamrequest.py b/packages/mistralai_gcp/src/mistralai_gcp/models/fimcompletionstreamrequest.py index f47937b9..356758d3 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/fimcompletionstreamrequest.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/fimcompletionstreamrequest.py @@ -10,14 +10,18 @@ ) from pydantic import model_serializer from typing import List, Optional, Union -from typing_extensions import NotRequired, TypedDict +from typing_extensions import NotRequired, TypeAliasType, TypedDict -FIMCompletionStreamRequestStopTypedDict = Union[str, List[str]] +FIMCompletionStreamRequestStopTypedDict = TypeAliasType( + "FIMCompletionStreamRequestStopTypedDict", Union[str, List[str]] +) r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" -FIMCompletionStreamRequestStop = Union[str, List[str]] +FIMCompletionStreamRequestStop = TypeAliasType( + "FIMCompletionStreamRequestStop", Union[str, List[str]] +) r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/functioncall.py b/packages/mistralai_gcp/src/mistralai_gcp/models/functioncall.py index 02da9bba..99554c88 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/functioncall.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/functioncall.py @@ -3,13 +3,13 @@ from __future__ import annotations from mistralai_gcp.types import BaseModel from typing import Any, Dict, Union -from typing_extensions import TypedDict +from typing_extensions import TypeAliasType, TypedDict -ArgumentsTypedDict = Union[Dict[str, Any], str] +ArgumentsTypedDict = TypeAliasType("ArgumentsTypedDict", Union[Dict[str, Any], str]) -Arguments = Union[Dict[str, Any], str] +Arguments = TypeAliasType("Arguments", Union[Dict[str, Any], str]) class FunctionCallTypedDict(TypedDict): diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/referencechunk.py b/packages/mistralai_gcp/src/mistralai_gcp/models/referencechunk.py index d409a70d..c4fa3b8b 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/referencechunk.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/referencechunk.py @@ -2,11 +2,8 @@ from __future__ import annotations from mistralai_gcp.types import BaseModel -from mistralai_gcp.utils import validate_const -import pydantic -from pydantic.functional_validators import AfterValidator from typing import List, Literal, Optional -from typing_extensions import Annotated, TypedDict +from typing_extensions import NotRequired, TypedDict ReferenceChunkType = Literal["reference"] @@ -14,15 +11,10 @@ class ReferenceChunkTypedDict(TypedDict): reference_ids: List[int] - type: ReferenceChunkType + type: NotRequired[ReferenceChunkType] class ReferenceChunk(BaseModel): reference_ids: List[int] - TYPE: Annotated[ - Annotated[ - Optional[ReferenceChunkType], AfterValidator(validate_const("reference")) - ], - pydantic.Field(alias="type"), - ] = "reference" + type: Optional[ReferenceChunkType] = "reference" diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/systemmessage.py b/packages/mistralai_gcp/src/mistralai_gcp/models/systemmessage.py index 87798558..f14acf12 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/systemmessage.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/systemmessage.py @@ -4,13 +4,17 @@ from .textchunk import TextChunk, TextChunkTypedDict from mistralai_gcp.types import BaseModel from typing import List, Literal, Optional, Union -from typing_extensions import NotRequired, TypedDict +from typing_extensions import NotRequired, TypeAliasType, TypedDict -SystemMessageContentTypedDict = Union[str, List[TextChunkTypedDict]] +SystemMessageContentTypedDict = TypeAliasType( + "SystemMessageContentTypedDict", Union[str, List[TextChunkTypedDict]] +) -SystemMessageContent = Union[str, List[TextChunk]] +SystemMessageContent = TypeAliasType( + "SystemMessageContent", Union[str, List[TextChunk]] +) Role = Literal["system"] diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/textchunk.py b/packages/mistralai_gcp/src/mistralai_gcp/models/textchunk.py index 48367e4e..12f666cd 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/textchunk.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/textchunk.py @@ -2,11 +2,8 @@ from __future__ import annotations from mistralai_gcp.types import BaseModel -from mistralai_gcp.utils import validate_const -import pydantic -from pydantic.functional_validators import AfterValidator from typing import Literal, Optional -from typing_extensions import Annotated, TypedDict +from typing_extensions import NotRequired, TypedDict Type = Literal["text"] @@ -14,13 +11,10 @@ class TextChunkTypedDict(TypedDict): text: str - type: Type + type: NotRequired[Type] class TextChunk(BaseModel): text: str - TYPE: Annotated[ - Annotated[Optional[Type], AfterValidator(validate_const("text"))], - pydantic.Field(alias="type"), - ] = "text" + type: Optional[Type] = "text" diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/toolmessage.py b/packages/mistralai_gcp/src/mistralai_gcp/models/toolmessage.py index ad6b800c..886b6ff1 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/toolmessage.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/toolmessage.py @@ -11,13 +11,15 @@ ) from pydantic import model_serializer from typing import List, Literal, Optional, Union -from typing_extensions import NotRequired, TypedDict +from typing_extensions import NotRequired, TypeAliasType, TypedDict -ToolMessageContentTypedDict = Union[str, List[ContentChunkTypedDict]] +ToolMessageContentTypedDict = TypeAliasType( + "ToolMessageContentTypedDict", Union[str, List[ContentChunkTypedDict]] +) -ToolMessageContent = Union[str, List[ContentChunk]] +ToolMessageContent = TypeAliasType("ToolMessageContent", Union[str, List[ContentChunk]]) ToolMessageRole = Literal["tool"] diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/usermessage.py b/packages/mistralai_gcp/src/mistralai_gcp/models/usermessage.py index 229dbaf9..287bb1b4 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/usermessage.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/usermessage.py @@ -5,13 +5,15 @@ from mistralai_gcp.types import BaseModel, Nullable, UNSET_SENTINEL from pydantic import model_serializer from typing import List, Literal, Optional, Union -from typing_extensions import NotRequired, TypedDict +from typing_extensions import NotRequired, TypeAliasType, TypedDict -UserMessageContentTypedDict = Union[str, List[ContentChunkTypedDict]] +UserMessageContentTypedDict = TypeAliasType( + "UserMessageContentTypedDict", Union[str, List[ContentChunkTypedDict]] +) -UserMessageContent = Union[str, List[ContentChunk]] +UserMessageContent = TypeAliasType("UserMessageContent", Union[str, List[ContentChunk]]) UserMessageRole = Literal["user"] diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/validationerror.py b/packages/mistralai_gcp/src/mistralai_gcp/models/validationerror.py index b8bd4345..033d4b63 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/validationerror.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/validationerror.py @@ -3,13 +3,13 @@ from __future__ import annotations from mistralai_gcp.types import BaseModel from typing import List, Union -from typing_extensions import TypedDict +from typing_extensions import TypeAliasType, TypedDict -LocTypedDict = Union[str, int] +LocTypedDict = TypeAliasType("LocTypedDict", Union[str, int]) -Loc = Union[str, int] +Loc = TypeAliasType("Loc", Union[str, int]) class ValidationErrorTypedDict(TypedDict): diff --git a/packages/mistralai_gcp/src/mistralai_gcp/sdkconfiguration.py b/packages/mistralai_gcp/src/mistralai_gcp/sdkconfiguration.py index 061467eb..b5800815 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/sdkconfiguration.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/sdkconfiguration.py @@ -29,8 +29,8 @@ class SDKConfiguration: language: str = "python" openapi_doc_version: str = "0.0.2" sdk_version: str = "1.2.3" - gen_version: str = "2.460.1" - user_agent: str = "speakeasy-sdk/python 1.2.3 2.460.1 0.0.2 mistralai-gcp" + gen_version: str = "2.470.1" + user_agent: str = "speakeasy-sdk/python 1.2.3 2.470.1 0.0.2 mistralai-gcp" retry_config: OptionalNullable[RetryConfig] = Field(default_factory=lambda: UNSET) timeout_ms: Optional[int] = None diff --git a/packages/mistralai_gcp/src/mistralai_gcp/utils/annotations.py b/packages/mistralai_gcp/src/mistralai_gcp/utils/annotations.py index 5b3bbb02..387874ed 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/utils/annotations.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/utils/annotations.py @@ -1,30 +1,55 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from enum import Enum -from typing import Any +from typing import Any, Optional def get_discriminator(model: Any, fieldname: str, key: str) -> str: - if isinstance(model, dict): - try: - return f'{model.get(key)}' - except AttributeError as e: - raise ValueError(f'Could not find discriminator key {key} in {model}') from e + """ + Recursively search for the discriminator attribute in a model. - if hasattr(model, fieldname): - attr = getattr(model, fieldname) + Args: + model (Any): The model to search within. + fieldname (str): The name of the field to search for. + key (str): The key to search for in dictionaries. - if isinstance(attr, Enum): - return f'{attr.value}' + Returns: + str: The name of the discriminator attribute. - return f'{attr}' + Raises: + ValueError: If the discriminator attribute is not found. + """ + upper_fieldname = fieldname.upper() - fieldname = fieldname.upper() - if hasattr(model, fieldname): - attr = getattr(model, fieldname) + def get_field_discriminator(field: Any) -> Optional[str]: + """Search for the discriminator attribute in a given field.""" - if isinstance(attr, Enum): - return f'{attr.value}' + if isinstance(field, dict): + if key in field: + return f'{field[key]}' - return f'{attr}' + if hasattr(field, fieldname): + attr = getattr(field, fieldname) + if isinstance(attr, Enum): + return f'{attr.value}' + return f'{attr}' + + if hasattr(field, upper_fieldname): + attr = getattr(field, upper_fieldname) + if isinstance(attr, Enum): + return f'{attr.value}' + return f'{attr}' + + return None + + + if isinstance(model, list): + for field in model: + discriminator = get_field_discriminator(field) + if discriminator is not None: + return discriminator + + discriminator = get_field_discriminator(model) + if discriminator is not None: + return discriminator raise ValueError(f'Could not find discriminator field {fieldname} in {model}') diff --git a/packages/mistralai_gcp/src/mistralai_gcp/utils/eventstreaming.py b/packages/mistralai_gcp/src/mistralai_gcp/utils/eventstreaming.py index 553b386b..74a63f75 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/utils/eventstreaming.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/utils/eventstreaming.py @@ -2,12 +2,72 @@ import re import json -from typing import Callable, TypeVar, Optional, Generator, AsyncGenerator, Tuple +from typing import ( + Callable, + Generic, + TypeVar, + Optional, + Generator, + AsyncGenerator, + Tuple, +) import httpx T = TypeVar("T") +class EventStream(Generic[T]): + response: httpx.Response + generator: Generator[T, None, None] + + def __init__( + self, + response: httpx.Response, + decoder: Callable[[str], T], + sentinel: Optional[str] = None, + ): + self.response = response + self.generator = stream_events(response, decoder, sentinel) + + def __iter__(self): + return self + + def __next__(self): + return next(self.generator) + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + self.response.close() + + +class EventStreamAsync(Generic[T]): + response: httpx.Response + generator: AsyncGenerator[T, None] + + def __init__( + self, + response: httpx.Response, + decoder: Callable[[str], T], + sentinel: Optional[str] = None, + ): + self.response = response + self.generator = stream_events_async(response, decoder, sentinel) + + def __aiter__(self): + return self + + async def __anext__(self): + return await self.generator.__anext__() + + async def __aenter__(self): + return self + + async def __aexit__(self, exc_type, exc_val, exc_tb): + await self.response.aclose() + + class ServerEvent: id: Optional[str] = None event: Optional[str] = None From c27ad5410c83efa47b0c61f2bf991b06aa7a7b0d Mon Sep 17 00:00:00 2001 From: alex-ac Date: Wed, 4 Dec 2024 14:13:12 +0100 Subject: [PATCH 089/223] gen azure sdk --- .speakeasy/workflow.lock | 10 +-- packages/mistralai_azure/.speakeasy/gen.lock | 14 ++--- packages/mistralai_azure/pyproject.toml | 4 +- .../src/mistralai_azure/chat.py | 10 +-- .../src/mistralai_azure/httpclient.py | 6 ++ .../models/assistantmessage.py | 10 ++- .../models/chatcompletionrequest.py | 34 ++++++---- .../models/chatcompletionstreamrequest.py | 32 ++++++---- .../mistralai_azure/models/contentchunk.py | 6 +- .../mistralai_azure/models/deltamessage.py | 8 ++- .../mistralai_azure/models/functioncall.py | 6 +- .../mistralai_azure/models/referencechunk.py | 14 +---- .../mistralai_azure/models/systemmessage.py | 10 ++- .../src/mistralai_azure/models/textchunk.py | 12 +--- .../src/mistralai_azure/models/toolmessage.py | 8 ++- .../src/mistralai_azure/models/usermessage.py | 8 ++- .../mistralai_azure/models/validationerror.py | 6 +- .../src/mistralai_azure/sdkconfiguration.py | 4 +- .../src/mistralai_azure/utils/annotations.py | 59 +++++++++++++----- .../mistralai_azure/utils/eventstreaming.py | 62 ++++++++++++++++++- 20 files changed, 217 insertions(+), 106 deletions(-) diff --git a/.speakeasy/workflow.lock b/.speakeasy/workflow.lock index c3ecf410..4d2a32b8 100644 --- a/.speakeasy/workflow.lock +++ b/.speakeasy/workflow.lock @@ -2,8 +2,8 @@ speakeasyVersion: 1.451.1 sources: mistral-azure-source: sourceNamespace: mistral-azure-source - sourceRevisionDigest: sha256:c441f2d21e7879f5fb9d8d99e2ae242d1e5a84c0c06db971911eb578173e7f62 - sourceBlobDigest: sha256:de4af0f100f15fef89e093a6b5393302b2218fb154230594ec811aacdd4f2ec7 + sourceRevisionDigest: sha256:9c35eed0174f2d8165807bcd7c8e7b7111fa97c059a77ae7eeaa352ca7e83b4d + sourceBlobDigest: sha256:07283bfde08363f9f69b133888b482472c4bf12d2e5b59cb33c8993c517278e3 tags: - latest mistral-google-cloud-source: @@ -22,10 +22,10 @@ targets: mistralai-azure-sdk: source: mistral-azure-source sourceNamespace: mistral-azure-source - sourceRevisionDigest: sha256:c441f2d21e7879f5fb9d8d99e2ae242d1e5a84c0c06db971911eb578173e7f62 - sourceBlobDigest: sha256:de4af0f100f15fef89e093a6b5393302b2218fb154230594ec811aacdd4f2ec7 + sourceRevisionDigest: sha256:9c35eed0174f2d8165807bcd7c8e7b7111fa97c059a77ae7eeaa352ca7e83b4d + sourceBlobDigest: sha256:07283bfde08363f9f69b133888b482472c4bf12d2e5b59cb33c8993c517278e3 codeSamplesNamespace: mistral-openapi-azure-code-samples - codeSamplesRevisionDigest: sha256:5db0b04cc2b3962de41cb07e87fe817dd5ec8bc5d8b0254245b26faf70ede027 + codeSamplesRevisionDigest: sha256:79a227720579444358a825b1a272c153f3d9dd48cd0913be6c988d7931a44241 mistralai-gcp-sdk: source: mistral-google-cloud-source sourceNamespace: mistral-google-cloud-source diff --git a/packages/mistralai_azure/.speakeasy/gen.lock b/packages/mistralai_azure/.speakeasy/gen.lock index 518aba16..15388f4c 100644 --- a/packages/mistralai_azure/.speakeasy/gen.lock +++ b/packages/mistralai_azure/.speakeasy/gen.lock @@ -1,10 +1,10 @@ lockVersion: 2.0.0 id: dc40fa48-2c4d-46ad-ac8b-270749770f34 management: - docChecksum: d0000cbe03848bfe843794965cba332f + docChecksum: 26271aa279a7a7182f7af19df8b67038 docVersion: 0.0.2 - speakeasyVersion: 1.440.1 - generationVersion: 2.460.1 + speakeasyVersion: 1.451.1 + generationVersion: 2.470.1 releaseVersion: 1.2.3 configChecksum: 60295c765204eb0aa26205ec02e574fc published: true @@ -12,7 +12,7 @@ features: python: additionalDependencies: 1.0.0 constsAndDefaults: 1.0.5 - core: 5.6.5 + core: 5.6.8 defaultEnabledRetries: 0.2.0 enumUnions: 0.1.0 envVarSecurityUsage: 0.3.2 @@ -29,11 +29,10 @@ features: responseFormat: 1.0.1 retries: 3.0.2 sdkHooks: 1.0.0 - serverEvents: 1.0.4 + serverEvents: 1.0.7 serverEventsSentinels: 0.1.0 serverIDs: 3.0.0 - tests: 1.6.0 - unions: 3.0.3 + unions: 3.0.4 generatedFiles: - .gitattributes - .python-version @@ -166,3 +165,4 @@ examples: "200": application/json: {"id": "cmpl-e5cc70bb28c444948073e77776eb30ef", "object": "chat.completion", "model": "mistral-small-latest", "usage": {"prompt_tokens": 16, "completion_tokens": 34, "total_tokens": 50}, "created": 1702256327, "choices": []} "422": {} +generatedTests: {} diff --git a/packages/mistralai_azure/pyproject.toml b/packages/mistralai_azure/pyproject.toml index 75b1d0c4..a7a0a374 100644 --- a/packages/mistralai_azure/pyproject.toml +++ b/packages/mistralai_azure/pyproject.toml @@ -21,11 +21,11 @@ eval-type-backport = "^0.2.0" httpx = "^0.27.0" jsonpath-python = "^1.0.6" pydantic = "~2.9.2" -python-dateutil = "2.8.2" +python-dateutil = "^2.8.2" typing-inspect = "^0.9.0" [tool.poetry.group.dev.dependencies] -mypy = "==1.10.1" +mypy = "==1.13.0" pylint = "==3.2.3" pytest = "^8.2.2" pytest-asyncio = "^0.23.7" diff --git a/packages/mistralai_azure/src/mistralai_azure/chat.py b/packages/mistralai_azure/src/mistralai_azure/chat.py index fb443e52..afab9ba4 100644 --- a/packages/mistralai_azure/src/mistralai_azure/chat.py +++ b/packages/mistralai_azure/src/mistralai_azure/chat.py @@ -5,7 +5,7 @@ from mistralai_azure._hooks import HookContext from mistralai_azure.types import OptionalNullable, UNSET from mistralai_azure.utils import eventstreaming -from typing import Any, AsyncGenerator, Generator, List, Optional, Union +from typing import Any, List, Optional, Union class Chat(BaseSDK): @@ -41,7 +41,7 @@ def stream( retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, - ) -> Optional[Generator[models.CompletionEvent, None, None]]: + ) -> Optional[eventstreaming.EventStream[models.CompletionEvent]]: r"""Stream chat completion Mistral AI provides the ability to stream responses back to a client in order to allow partial results for certain requests. Tokens will be sent as data-only server-sent events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. @@ -135,7 +135,7 @@ def stream( data: Any = None if utils.match_response(http_res, "200", "text/event-stream"): - return eventstreaming.stream_events( + return eventstreaming.EventStream( http_res, lambda raw: utils.unmarshal_json(raw, models.CompletionEvent), sentinel="[DONE]", @@ -189,7 +189,7 @@ async def stream_async( retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, - ) -> Optional[AsyncGenerator[models.CompletionEvent, None]]: + ) -> Optional[eventstreaming.EventStreamAsync[models.CompletionEvent]]: r"""Stream chat completion Mistral AI provides the ability to stream responses back to a client in order to allow partial results for certain requests. Tokens will be sent as data-only server-sent events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. @@ -283,7 +283,7 @@ async def stream_async( data: Any = None if utils.match_response(http_res, "200", "text/event-stream"): - return eventstreaming.stream_events_async( + return eventstreaming.EventStreamAsync( http_res, lambda raw: utils.unmarshal_json(raw, models.CompletionEvent), sentinel="[DONE]", diff --git a/packages/mistralai_azure/src/mistralai_azure/httpclient.py b/packages/mistralai_azure/src/mistralai_azure/httpclient.py index 36b642a0..167cea4e 100644 --- a/packages/mistralai_azure/src/mistralai_azure/httpclient.py +++ b/packages/mistralai_azure/src/mistralai_azure/httpclient.py @@ -41,6 +41,9 @@ def build_request( ) -> httpx.Request: pass + def close(self) -> None: + pass + @runtime_checkable class AsyncHttpClient(Protocol): @@ -76,3 +79,6 @@ def build_request( extensions: Optional[httpx._types.RequestExtensions] = None, ) -> httpx.Request: pass + + async def aclose(self) -> None: + pass diff --git a/packages/mistralai_azure/src/mistralai_azure/models/assistantmessage.py b/packages/mistralai_azure/src/mistralai_azure/models/assistantmessage.py index 5d978f01..031677cf 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/assistantmessage.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/assistantmessage.py @@ -12,13 +12,17 @@ ) from pydantic import model_serializer from typing import List, Literal, Optional, Union -from typing_extensions import NotRequired, TypedDict +from typing_extensions import NotRequired, TypeAliasType, TypedDict -AssistantMessageContentTypedDict = Union[str, List[ContentChunkTypedDict]] +AssistantMessageContentTypedDict = TypeAliasType( + "AssistantMessageContentTypedDict", Union[str, List[ContentChunkTypedDict]] +) -AssistantMessageContent = Union[str, List[ContentChunk]] +AssistantMessageContent = TypeAliasType( + "AssistantMessageContent", Union[str, List[ContentChunk]] +) AssistantMessageRole = Literal["assistant"] diff --git a/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionrequest.py b/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionrequest.py index beedf520..3e4e9a3a 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionrequest.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionrequest.py @@ -19,23 +19,30 @@ from mistralai_azure.utils import get_discriminator from pydantic import Discriminator, Tag, model_serializer from typing import List, Optional, Union -from typing_extensions import Annotated, NotRequired, TypedDict +from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict -ChatCompletionRequestStopTypedDict = Union[str, List[str]] +ChatCompletionRequestStopTypedDict = TypeAliasType( + "ChatCompletionRequestStopTypedDict", Union[str, List[str]] +) r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" -ChatCompletionRequestStop = Union[str, List[str]] +ChatCompletionRequestStop = TypeAliasType( + "ChatCompletionRequestStop", Union[str, List[str]] +) r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" -ChatCompletionRequestMessagesTypedDict = Union[ - SystemMessageTypedDict, - UserMessageTypedDict, - AssistantMessageTypedDict, - ToolMessageTypedDict, -] +ChatCompletionRequestMessagesTypedDict = TypeAliasType( + "ChatCompletionRequestMessagesTypedDict", + Union[ + SystemMessageTypedDict, + UserMessageTypedDict, + AssistantMessageTypedDict, + ToolMessageTypedDict, + ], +) ChatCompletionRequestMessages = Annotated[ @@ -49,10 +56,15 @@ ] -ChatCompletionRequestToolChoiceTypedDict = Union[ToolChoiceTypedDict, ToolChoiceEnum] +ChatCompletionRequestToolChoiceTypedDict = TypeAliasType( + "ChatCompletionRequestToolChoiceTypedDict", + Union[ToolChoiceTypedDict, ToolChoiceEnum], +) -ChatCompletionRequestToolChoice = Union[ToolChoice, ToolChoiceEnum] +ChatCompletionRequestToolChoice = TypeAliasType( + "ChatCompletionRequestToolChoice", Union[ToolChoice, ToolChoiceEnum] +) class ChatCompletionRequestTypedDict(TypedDict): diff --git a/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionstreamrequest.py b/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionstreamrequest.py index 9d78371b..6d1f6bb7 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionstreamrequest.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionstreamrequest.py @@ -19,23 +19,26 @@ from mistralai_azure.utils import get_discriminator from pydantic import Discriminator, Tag, model_serializer from typing import List, Optional, Union -from typing_extensions import Annotated, NotRequired, TypedDict +from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict -StopTypedDict = Union[str, List[str]] +StopTypedDict = TypeAliasType("StopTypedDict", Union[str, List[str]]) r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" -Stop = Union[str, List[str]] +Stop = TypeAliasType("Stop", Union[str, List[str]]) r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" -MessagesTypedDict = Union[ - SystemMessageTypedDict, - UserMessageTypedDict, - AssistantMessageTypedDict, - ToolMessageTypedDict, -] +MessagesTypedDict = TypeAliasType( + "MessagesTypedDict", + Union[ + SystemMessageTypedDict, + UserMessageTypedDict, + AssistantMessageTypedDict, + ToolMessageTypedDict, + ], +) Messages = Annotated[ @@ -49,12 +52,15 @@ ] -ChatCompletionStreamRequestToolChoiceTypedDict = Union[ - ToolChoiceTypedDict, ToolChoiceEnum -] +ChatCompletionStreamRequestToolChoiceTypedDict = TypeAliasType( + "ChatCompletionStreamRequestToolChoiceTypedDict", + Union[ToolChoiceTypedDict, ToolChoiceEnum], +) -ChatCompletionStreamRequestToolChoice = Union[ToolChoice, ToolChoiceEnum] +ChatCompletionStreamRequestToolChoice = TypeAliasType( + "ChatCompletionStreamRequestToolChoice", Union[ToolChoice, ToolChoiceEnum] +) class ChatCompletionStreamRequestTypedDict(TypedDict): diff --git a/packages/mistralai_azure/src/mistralai_azure/models/contentchunk.py b/packages/mistralai_azure/src/mistralai_azure/models/contentchunk.py index e8013323..70c94e70 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/contentchunk.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/contentchunk.py @@ -6,10 +6,12 @@ from mistralai_azure.utils import get_discriminator from pydantic import Discriminator, Tag from typing import Union -from typing_extensions import Annotated +from typing_extensions import Annotated, TypeAliasType -ContentChunkTypedDict = Union[TextChunkTypedDict, ReferenceChunkTypedDict] +ContentChunkTypedDict = TypeAliasType( + "ContentChunkTypedDict", Union[TextChunkTypedDict, ReferenceChunkTypedDict] +) ContentChunk = Annotated[ diff --git a/packages/mistralai_azure/src/mistralai_azure/models/deltamessage.py b/packages/mistralai_azure/src/mistralai_azure/models/deltamessage.py index bb394494..112eb127 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/deltamessage.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/deltamessage.py @@ -12,13 +12,15 @@ ) from pydantic import model_serializer from typing import List, Union -from typing_extensions import NotRequired, TypedDict +from typing_extensions import NotRequired, TypeAliasType, TypedDict -ContentTypedDict = Union[str, List[ContentChunkTypedDict]] +ContentTypedDict = TypeAliasType( + "ContentTypedDict", Union[str, List[ContentChunkTypedDict]] +) -Content = Union[str, List[ContentChunk]] +Content = TypeAliasType("Content", Union[str, List[ContentChunk]]) class DeltaMessageTypedDict(TypedDict): diff --git a/packages/mistralai_azure/src/mistralai_azure/models/functioncall.py b/packages/mistralai_azure/src/mistralai_azure/models/functioncall.py index d2f136cd..dd93c462 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/functioncall.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/functioncall.py @@ -3,13 +3,13 @@ from __future__ import annotations from mistralai_azure.types import BaseModel from typing import Any, Dict, Union -from typing_extensions import TypedDict +from typing_extensions import TypeAliasType, TypedDict -ArgumentsTypedDict = Union[Dict[str, Any], str] +ArgumentsTypedDict = TypeAliasType("ArgumentsTypedDict", Union[Dict[str, Any], str]) -Arguments = Union[Dict[str, Any], str] +Arguments = TypeAliasType("Arguments", Union[Dict[str, Any], str]) class FunctionCallTypedDict(TypedDict): diff --git a/packages/mistralai_azure/src/mistralai_azure/models/referencechunk.py b/packages/mistralai_azure/src/mistralai_azure/models/referencechunk.py index ddc89195..4df3bfbc 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/referencechunk.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/referencechunk.py @@ -2,11 +2,8 @@ from __future__ import annotations from mistralai_azure.types import BaseModel -from mistralai_azure.utils import validate_const -import pydantic -from pydantic.functional_validators import AfterValidator from typing import List, Literal, Optional -from typing_extensions import Annotated, TypedDict +from typing_extensions import NotRequired, TypedDict ReferenceChunkType = Literal["reference"] @@ -14,15 +11,10 @@ class ReferenceChunkTypedDict(TypedDict): reference_ids: List[int] - type: ReferenceChunkType + type: NotRequired[ReferenceChunkType] class ReferenceChunk(BaseModel): reference_ids: List[int] - TYPE: Annotated[ - Annotated[ - Optional[ReferenceChunkType], AfterValidator(validate_const("reference")) - ], - pydantic.Field(alias="type"), - ] = "reference" + type: Optional[ReferenceChunkType] = "reference" diff --git a/packages/mistralai_azure/src/mistralai_azure/models/systemmessage.py b/packages/mistralai_azure/src/mistralai_azure/models/systemmessage.py index 3c00a82b..b7d975b6 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/systemmessage.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/systemmessage.py @@ -4,13 +4,17 @@ from .textchunk import TextChunk, TextChunkTypedDict from mistralai_azure.types import BaseModel from typing import List, Literal, Optional, Union -from typing_extensions import NotRequired, TypedDict +from typing_extensions import NotRequired, TypeAliasType, TypedDict -SystemMessageContentTypedDict = Union[str, List[TextChunkTypedDict]] +SystemMessageContentTypedDict = TypeAliasType( + "SystemMessageContentTypedDict", Union[str, List[TextChunkTypedDict]] +) -SystemMessageContent = Union[str, List[TextChunk]] +SystemMessageContent = TypeAliasType( + "SystemMessageContent", Union[str, List[TextChunk]] +) Role = Literal["system"] diff --git a/packages/mistralai_azure/src/mistralai_azure/models/textchunk.py b/packages/mistralai_azure/src/mistralai_azure/models/textchunk.py index 583ce18d..be60c8f9 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/textchunk.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/textchunk.py @@ -2,11 +2,8 @@ from __future__ import annotations from mistralai_azure.types import BaseModel -from mistralai_azure.utils import validate_const -import pydantic -from pydantic.functional_validators import AfterValidator from typing import Literal, Optional -from typing_extensions import Annotated, TypedDict +from typing_extensions import NotRequired, TypedDict Type = Literal["text"] @@ -14,13 +11,10 @@ class TextChunkTypedDict(TypedDict): text: str - type: Type + type: NotRequired[Type] class TextChunk(BaseModel): text: str - TYPE: Annotated[ - Annotated[Optional[Type], AfterValidator(validate_const("text"))], - pydantic.Field(alias="type"), - ] = "text" + type: Optional[Type] = "text" diff --git a/packages/mistralai_azure/src/mistralai_azure/models/toolmessage.py b/packages/mistralai_azure/src/mistralai_azure/models/toolmessage.py index 1004c439..3e9aa3da 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/toolmessage.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/toolmessage.py @@ -11,13 +11,15 @@ ) from pydantic import model_serializer from typing import List, Literal, Optional, Union -from typing_extensions import NotRequired, TypedDict +from typing_extensions import NotRequired, TypeAliasType, TypedDict -ToolMessageContentTypedDict = Union[str, List[ContentChunkTypedDict]] +ToolMessageContentTypedDict = TypeAliasType( + "ToolMessageContentTypedDict", Union[str, List[ContentChunkTypedDict]] +) -ToolMessageContent = Union[str, List[ContentChunk]] +ToolMessageContent = TypeAliasType("ToolMessageContent", Union[str, List[ContentChunk]]) ToolMessageRole = Literal["tool"] diff --git a/packages/mistralai_azure/src/mistralai_azure/models/usermessage.py b/packages/mistralai_azure/src/mistralai_azure/models/usermessage.py index eddfb856..8cce1745 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/usermessage.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/usermessage.py @@ -5,13 +5,15 @@ from mistralai_azure.types import BaseModel, Nullable, UNSET_SENTINEL from pydantic import model_serializer from typing import List, Literal, Optional, Union -from typing_extensions import NotRequired, TypedDict +from typing_extensions import NotRequired, TypeAliasType, TypedDict -UserMessageContentTypedDict = Union[str, List[ContentChunkTypedDict]] +UserMessageContentTypedDict = TypeAliasType( + "UserMessageContentTypedDict", Union[str, List[ContentChunkTypedDict]] +) -UserMessageContent = Union[str, List[ContentChunk]] +UserMessageContent = TypeAliasType("UserMessageContent", Union[str, List[ContentChunk]]) UserMessageRole = Literal["user"] diff --git a/packages/mistralai_azure/src/mistralai_azure/models/validationerror.py b/packages/mistralai_azure/src/mistralai_azure/models/validationerror.py index aa8eaff9..4caff4a6 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/validationerror.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/validationerror.py @@ -3,13 +3,13 @@ from __future__ import annotations from mistralai_azure.types import BaseModel from typing import List, Union -from typing_extensions import TypedDict +from typing_extensions import TypeAliasType, TypedDict -LocTypedDict = Union[str, int] +LocTypedDict = TypeAliasType("LocTypedDict", Union[str, int]) -Loc = Union[str, int] +Loc = TypeAliasType("Loc", Union[str, int]) class ValidationErrorTypedDict(TypedDict): diff --git a/packages/mistralai_azure/src/mistralai_azure/sdkconfiguration.py b/packages/mistralai_azure/src/mistralai_azure/sdkconfiguration.py index 8f64c457..191aa320 100644 --- a/packages/mistralai_azure/src/mistralai_azure/sdkconfiguration.py +++ b/packages/mistralai_azure/src/mistralai_azure/sdkconfiguration.py @@ -29,8 +29,8 @@ class SDKConfiguration: language: str = "python" openapi_doc_version: str = "0.0.2" sdk_version: str = "1.2.3" - gen_version: str = "2.460.1" - user_agent: str = "speakeasy-sdk/python 1.2.3 2.460.1 0.0.2 mistralai_azure" + gen_version: str = "2.470.1" + user_agent: str = "speakeasy-sdk/python 1.2.3 2.470.1 0.0.2 mistralai_azure" retry_config: OptionalNullable[RetryConfig] = Field(default_factory=lambda: UNSET) timeout_ms: Optional[int] = None diff --git a/packages/mistralai_azure/src/mistralai_azure/utils/annotations.py b/packages/mistralai_azure/src/mistralai_azure/utils/annotations.py index 5b3bbb02..387874ed 100644 --- a/packages/mistralai_azure/src/mistralai_azure/utils/annotations.py +++ b/packages/mistralai_azure/src/mistralai_azure/utils/annotations.py @@ -1,30 +1,55 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from enum import Enum -from typing import Any +from typing import Any, Optional def get_discriminator(model: Any, fieldname: str, key: str) -> str: - if isinstance(model, dict): - try: - return f'{model.get(key)}' - except AttributeError as e: - raise ValueError(f'Could not find discriminator key {key} in {model}') from e + """ + Recursively search for the discriminator attribute in a model. - if hasattr(model, fieldname): - attr = getattr(model, fieldname) + Args: + model (Any): The model to search within. + fieldname (str): The name of the field to search for. + key (str): The key to search for in dictionaries. - if isinstance(attr, Enum): - return f'{attr.value}' + Returns: + str: The name of the discriminator attribute. - return f'{attr}' + Raises: + ValueError: If the discriminator attribute is not found. + """ + upper_fieldname = fieldname.upper() - fieldname = fieldname.upper() - if hasattr(model, fieldname): - attr = getattr(model, fieldname) + def get_field_discriminator(field: Any) -> Optional[str]: + """Search for the discriminator attribute in a given field.""" - if isinstance(attr, Enum): - return f'{attr.value}' + if isinstance(field, dict): + if key in field: + return f'{field[key]}' - return f'{attr}' + if hasattr(field, fieldname): + attr = getattr(field, fieldname) + if isinstance(attr, Enum): + return f'{attr.value}' + return f'{attr}' + + if hasattr(field, upper_fieldname): + attr = getattr(field, upper_fieldname) + if isinstance(attr, Enum): + return f'{attr.value}' + return f'{attr}' + + return None + + + if isinstance(model, list): + for field in model: + discriminator = get_field_discriminator(field) + if discriminator is not None: + return discriminator + + discriminator = get_field_discriminator(model) + if discriminator is not None: + return discriminator raise ValueError(f'Could not find discriminator field {fieldname} in {model}') diff --git a/packages/mistralai_azure/src/mistralai_azure/utils/eventstreaming.py b/packages/mistralai_azure/src/mistralai_azure/utils/eventstreaming.py index 553b386b..74a63f75 100644 --- a/packages/mistralai_azure/src/mistralai_azure/utils/eventstreaming.py +++ b/packages/mistralai_azure/src/mistralai_azure/utils/eventstreaming.py @@ -2,12 +2,72 @@ import re import json -from typing import Callable, TypeVar, Optional, Generator, AsyncGenerator, Tuple +from typing import ( + Callable, + Generic, + TypeVar, + Optional, + Generator, + AsyncGenerator, + Tuple, +) import httpx T = TypeVar("T") +class EventStream(Generic[T]): + response: httpx.Response + generator: Generator[T, None, None] + + def __init__( + self, + response: httpx.Response, + decoder: Callable[[str], T], + sentinel: Optional[str] = None, + ): + self.response = response + self.generator = stream_events(response, decoder, sentinel) + + def __iter__(self): + return self + + def __next__(self): + return next(self.generator) + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + self.response.close() + + +class EventStreamAsync(Generic[T]): + response: httpx.Response + generator: AsyncGenerator[T, None] + + def __init__( + self, + response: httpx.Response, + decoder: Callable[[str], T], + sentinel: Optional[str] = None, + ): + self.response = response + self.generator = stream_events_async(response, decoder, sentinel) + + def __aiter__(self): + return self + + async def __anext__(self): + return await self.generator.__anext__() + + async def __aenter__(self): + return self + + async def __aexit__(self, exc_type, exc_val, exc_tb): + await self.response.aclose() + + class ServerEvent: id: Optional[str] = None event: Optional[str] = None From e0e7886ff1586b0fd7c7f925431fd9edaa7bf33f Mon Sep 17 00:00:00 2001 From: Gaspard Blanchet <26529448+GaspardBT@users.noreply.github.com> Date: Wed, 4 Dec 2024 14:42:48 +0100 Subject: [PATCH 090/223] fix: only replaced ua if needed (#163) * fix: don't replace ua when already modified * add eof * fix for providers --- .../src/mistralai_azure/_hooks/custom_user_agent.py | 10 ++++++++-- .../src/mistralai_gcp/_hooks/custom_user_agent.py | 8 +++++++- src/mistralai/_hooks/custom_user_agent.py | 8 +++++++- 3 files changed, 22 insertions(+), 4 deletions(-) diff --git a/packages/mistralai_azure/src/mistralai_azure/_hooks/custom_user_agent.py b/packages/mistralai_azure/src/mistralai_azure/_hooks/custom_user_agent.py index b03549c3..00bac959 100644 --- a/packages/mistralai_azure/src/mistralai_azure/_hooks/custom_user_agent.py +++ b/packages/mistralai_azure/src/mistralai_azure/_hooks/custom_user_agent.py @@ -5,12 +5,18 @@ from .types import BeforeRequestContext, BeforeRequestHook +prefix = "mistral-client-python/" class CustomUserAgentHook(BeforeRequestHook): def before_request( self, hook_ctx: BeforeRequestContext, request: httpx.Request ) -> Union[httpx.Request, Exception]: + current = request.headers["user-agent"] + if current.startswith(prefix): + return request + request.headers["user-agent"] = ( - "mistral-client-python/" + request.headers["user-agent"].split(" ")[1] + prefix + current.split(" ")[1] ) - return request + + return request \ No newline at end of file diff --git a/packages/mistralai_gcp/src/mistralai_gcp/_hooks/custom_user_agent.py b/packages/mistralai_gcp/src/mistralai_gcp/_hooks/custom_user_agent.py index b03549c3..6a7083dc 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/_hooks/custom_user_agent.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/_hooks/custom_user_agent.py @@ -5,12 +5,18 @@ from .types import BeforeRequestContext, BeforeRequestHook +prefix = "mistral-client-python/" class CustomUserAgentHook(BeforeRequestHook): def before_request( self, hook_ctx: BeforeRequestContext, request: httpx.Request ) -> Union[httpx.Request, Exception]: + current = request.headers["user-agent"] + if current.startswith(prefix): + return request + request.headers["user-agent"] = ( - "mistral-client-python/" + request.headers["user-agent"].split(" ")[1] + prefix + current.split(" ")[1] ) + return request diff --git a/src/mistralai/_hooks/custom_user_agent.py b/src/mistralai/_hooks/custom_user_agent.py index 59506ea2..29675c03 100644 --- a/src/mistralai/_hooks/custom_user_agent.py +++ b/src/mistralai/_hooks/custom_user_agent.py @@ -5,12 +5,18 @@ from .types import BeforeRequestContext, BeforeRequestHook +prefix = "mistral-client-python/" class CustomUserAgentHook(BeforeRequestHook): def before_request( self, hook_ctx: BeforeRequestContext, request: httpx.Request ) -> Union[httpx.Request, Exception]: + current = request.headers["user-agent"] + if current.startswith(prefix): + return request + request.headers["user-agent"] = ( - "mistral-client-python/" + request.headers["user-agent"].split(" ")[1] + prefix + current.split(" ")[1] ) + return request From a7fa1f007b74f20e7596050f50c3e802d9277175 Mon Sep 17 00:00:00 2001 From: Gaspard Blanchet <26529448+GaspardBT@users.noreply.github.com> Date: Wed, 4 Dec 2024 15:55:47 +0100 Subject: [PATCH 091/223] use to UPPER_CASE naming style (#164) --- .../src/mistralai_azure/_hooks/custom_user_agent.py | 6 +++--- .../src/mistralai_gcp/_hooks/custom_user_agent.py | 6 +++--- src/mistralai/_hooks/custom_user_agent.py | 6 +++--- 3 files changed, 9 insertions(+), 9 deletions(-) diff --git a/packages/mistralai_azure/src/mistralai_azure/_hooks/custom_user_agent.py b/packages/mistralai_azure/src/mistralai_azure/_hooks/custom_user_agent.py index 00bac959..2b71a96a 100644 --- a/packages/mistralai_azure/src/mistralai_azure/_hooks/custom_user_agent.py +++ b/packages/mistralai_azure/src/mistralai_azure/_hooks/custom_user_agent.py @@ -5,18 +5,18 @@ from .types import BeforeRequestContext, BeforeRequestHook -prefix = "mistral-client-python/" +PREFIX = "mistral-client-python/" class CustomUserAgentHook(BeforeRequestHook): def before_request( self, hook_ctx: BeforeRequestContext, request: httpx.Request ) -> Union[httpx.Request, Exception]: current = request.headers["user-agent"] - if current.startswith(prefix): + if current.startswith(PREFIX): return request request.headers["user-agent"] = ( - prefix + current.split(" ")[1] + PREFIX + current.split(" ")[1] ) return request \ No newline at end of file diff --git a/packages/mistralai_gcp/src/mistralai_gcp/_hooks/custom_user_agent.py b/packages/mistralai_gcp/src/mistralai_gcp/_hooks/custom_user_agent.py index 6a7083dc..77df6aef 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/_hooks/custom_user_agent.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/_hooks/custom_user_agent.py @@ -5,18 +5,18 @@ from .types import BeforeRequestContext, BeforeRequestHook -prefix = "mistral-client-python/" +PREFIX = "mistral-client-python/" class CustomUserAgentHook(BeforeRequestHook): def before_request( self, hook_ctx: BeforeRequestContext, request: httpx.Request ) -> Union[httpx.Request, Exception]: current = request.headers["user-agent"] - if current.startswith(prefix): + if current.startswith(PREFIX): return request request.headers["user-agent"] = ( - prefix + current.split(" ")[1] + PREFIX + current.split(" ")[1] ) return request diff --git a/src/mistralai/_hooks/custom_user_agent.py b/src/mistralai/_hooks/custom_user_agent.py index 29675c03..212f2172 100644 --- a/src/mistralai/_hooks/custom_user_agent.py +++ b/src/mistralai/_hooks/custom_user_agent.py @@ -5,18 +5,18 @@ from .types import BeforeRequestContext, BeforeRequestHook -prefix = "mistral-client-python/" +PREFIX = "mistral-client-python/" class CustomUserAgentHook(BeforeRequestHook): def before_request( self, hook_ctx: BeforeRequestContext, request: httpx.Request ) -> Union[httpx.Request, Exception]: current = request.headers["user-agent"] - if current.startswith(prefix): + if current.startswith(PREFIX): return request request.headers["user-agent"] = ( - prefix + current.split(" ")[1] + PREFIX + current.split(" ")[1] ) return request From 329ce8caecb3ccfd4d25cc622782ba2f280377f3 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Wed, 4 Dec 2024 16:31:36 +0100 Subject: [PATCH 092/223] ci: regenerated with OpenAPI Doc , Speakeasy CLI 1.451.1 (#166) Co-authored-by: speakeasybot --- .speakeasy/gen.lock | 4 ++-- .speakeasy/gen.yaml | 2 +- .speakeasy/workflow.lock | 3 ++- RELEASES.md | 12 +++++++++++- pyproject.toml | 2 +- src/mistralai/_version.py | 2 +- src/mistralai/sdkconfiguration.py | 4 ++-- 7 files changed, 20 insertions(+), 9 deletions(-) diff --git a/.speakeasy/gen.lock b/.speakeasy/gen.lock index e2422227..0b077597 100644 --- a/.speakeasy/gen.lock +++ b/.speakeasy/gen.lock @@ -5,8 +5,8 @@ management: docVersion: 0.0.2 speakeasyVersion: 1.451.1 generationVersion: 2.470.1 - releaseVersion: 1.2.4 - configChecksum: 4fe789bac842073beb4e2d9c6c3f833d + releaseVersion: 1.2.5 + configChecksum: 9dbada68b30d84dc9c102c143b926b3d repoURL: https://github.com/mistralai/client-python.git installationURL: https://github.com/mistralai/client-python.git published: true diff --git a/.speakeasy/gen.yaml b/.speakeasy/gen.yaml index dfee8409..5c043c4c 100644 --- a/.speakeasy/gen.yaml +++ b/.speakeasy/gen.yaml @@ -13,7 +13,7 @@ generation: oAuth2ClientCredentialsEnabled: true oAuth2PasswordEnabled: false python: - version: 1.2.4 + version: 1.2.5 additionalDependencies: dev: pytest: ^8.2.2 diff --git a/.speakeasy/workflow.lock b/.speakeasy/workflow.lock index 4d2a32b8..593960c5 100644 --- a/.speakeasy/workflow.lock +++ b/.speakeasy/workflow.lock @@ -18,6 +18,7 @@ sources: sourceBlobDigest: sha256:5de08a038994ec94c0889341d434b598f541459d114f9935deb9ef3b3af90c5f tags: - latest + - speakeasy-sdk-regen-1733325251 targets: mistralai-azure-sdk: source: mistral-azure-source @@ -39,7 +40,7 @@ targets: sourceRevisionDigest: sha256:f74c08bdc7ae39f5fe2394df8f31ae623ece30a7f65019ab6b7bcea352953f05 sourceBlobDigest: sha256:5de08a038994ec94c0889341d434b598f541459d114f9935deb9ef3b3af90c5f codeSamplesNamespace: mistral-openapi-code-samples - codeSamplesRevisionDigest: sha256:09212fda8fc13e0f486f157495d028138bc9babedfba6dd85f7024575f30fd0e + codeSamplesRevisionDigest: sha256:9a22e0289ff4e72dd43c1b65ddb8c7870814176d36e1eb6352d06e93dec597ba workflow: workflowVersion: 1.0.0 speakeasyVersion: latest diff --git a/RELEASES.md b/RELEASES.md index 845891e8..a13990d2 100644 --- a/RELEASES.md +++ b/RELEASES.md @@ -98,4 +98,14 @@ Based on: ### Generated - [python v1.2.4] . ### Releases -- [PyPI v1.2.4] https://pypi.org/project/mistralai/1.2.4 - . \ No newline at end of file +- [PyPI v1.2.4] https://pypi.org/project/mistralai/1.2.4 - . + +## 2024-12-04 15:14:08 +### Changes +Based on: +- OpenAPI Doc +- Speakeasy CLI 1.451.1 (2.470.1) https://github.com/speakeasy-api/speakeasy +### Generated +- [python v1.2.5] . +### Releases +- [PyPI v1.2.5] https://pypi.org/project/mistralai/1.2.5 - . \ No newline at end of file diff --git a/pyproject.toml b/pyproject.toml index ddd41722..d3c6fb53 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "mistralai" -version = "1.2.4" +version = "1.2.5" description = "Python Client SDK for the Mistral AI API." authors = ["Mistral"] readme = "README-PYPI.md" diff --git a/src/mistralai/_version.py b/src/mistralai/_version.py index 41970f16..d1350966 100644 --- a/src/mistralai/_version.py +++ b/src/mistralai/_version.py @@ -3,7 +3,7 @@ import importlib.metadata __title__: str = "mistralai" -__version__: str = "1.2.4" +__version__: str = "1.2.5" try: if __package__ is not None: diff --git a/src/mistralai/sdkconfiguration.py b/src/mistralai/sdkconfiguration.py index 1f8261ae..bb046ba0 100644 --- a/src/mistralai/sdkconfiguration.py +++ b/src/mistralai/sdkconfiguration.py @@ -28,9 +28,9 @@ class SDKConfiguration: server: Optional[str] = "" language: str = "python" openapi_doc_version: str = "0.0.2" - sdk_version: str = "1.2.4" + sdk_version: str = "1.2.5" gen_version: str = "2.470.1" - user_agent: str = "speakeasy-sdk/python 1.2.4 2.470.1 0.0.2 mistralai" + user_agent: str = "speakeasy-sdk/python 1.2.5 2.470.1 0.0.2 mistralai" retry_config: OptionalNullable[RetryConfig] = Field(default_factory=lambda: UNSET) timeout_ms: Optional[int] = None From d14559af390123cbd2d923e1e05f780500ce5411 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Mon, 6 Jan 2025 17:47:09 +0100 Subject: [PATCH 093/223] =?UTF-8?q?chore:=20=F0=9F=90=9D=20Update=20SDK=20?= =?UTF-8?q?-=20Generate=20MISTRALAI=20MISTRALAI-SDK=201.2.6=20(#169)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * ci: regenerated with OpenAPI Doc , Speakeasy CLI 1.462.2 * update gcp client 1.2.6 * update azure client 1.2.6 --------- Co-authored-by: speakeasybot Co-authored-by: gaspardBT --- .gitignore | 1 + .speakeasy/gen.lock | 50 +++-- .speakeasy/gen.yaml | 2 +- .speakeasy/workflow.lock | 34 +-- README.md | 200 ++++++++++-------- RELEASES.md | 12 +- USAGE.md | 96 +++++---- docs/sdks/agents/README.md | 28 +-- docs/sdks/chat/README.md | 28 +-- docs/sdks/classifiers/README.md | 24 ++- docs/sdks/embeddings/README.md | 12 +- docs/sdks/files/README.md | 72 ++++--- docs/sdks/fim/README.md | 30 +-- docs/sdks/jobs/README.md | 60 +++--- docs/sdks/mistraljobs/README.md | 48 +++-- docs/sdks/models/README.md | 72 ++++--- packages/mistralai_azure/.gitignore | 1 + packages/mistralai_azure/.speakeasy/gen.lock | 22 +- packages/mistralai_azure/.speakeasy/gen.yaml | 2 +- packages/mistralai_azure/pyproject.toml | 6 +- .../src/mistralai_azure/_version.py | 2 +- .../src/mistralai_azure/basesdk.py | 34 ++- .../src/mistralai_azure/chat.py | 38 ++-- .../models/chatcompletionrequest.py | 4 +- .../models/chatcompletionstreamrequest.py | 4 +- .../src/mistralai_azure/sdkconfiguration.py | 6 +- .../src/mistralai_azure/utils/forms.py | 14 +- .../mistralai_azure/utils/requestbodies.py | 2 +- packages/mistralai_gcp/.gitignore | 1 + packages/mistralai_gcp/.speakeasy/gen.lock | 28 ++- packages/mistralai_gcp/.speakeasy/gen.yaml | 2 +- packages/mistralai_gcp/pyproject.toml | 6 +- .../src/mistralai_gcp/_version.py | 2 +- .../src/mistralai_gcp/basesdk.py | 34 ++- .../mistralai_gcp/src/mistralai_gcp/chat.py | 30 ++- .../mistralai_gcp/src/mistralai_gcp/fim.py | 22 +- .../models/chatcompletionrequest.py | 2 +- .../models/chatcompletionstreamrequest.py | 2 +- .../src/mistralai_gcp/sdkconfiguration.py | 6 +- .../src/mistralai_gcp/utils/forms.py | 14 +- .../src/mistralai_gcp/utils/requestbodies.py | 2 +- pyproject.toml | 2 +- src/mistralai/_version.py | 2 +- src/mistralai/agents.py | 22 +- src/mistralai/basesdk.py | 34 ++- src/mistralai/chat.py | 38 ++-- src/mistralai/classifiers.py | 22 +- src/mistralai/embeddings.py | 12 +- src/mistralai/files.py | 62 ++++-- src/mistralai/fim.py | 22 +- src/mistralai/jobs.py | 52 ++++- src/mistralai/mistral_jobs.py | 42 +++- src/mistralai/models/chatcompletionrequest.py | 4 +- .../models/chatcompletionstreamrequest.py | 4 +- .../models/files_api_routes_upload_fileop.py | 8 +- src/mistralai/models_.py | 62 ++++-- src/mistralai/sdkconfiguration.py | 6 +- src/mistralai/utils/forms.py | 14 +- src/mistralai/utils/requestbodies.py | 2 +- 59 files changed, 930 insertions(+), 535 deletions(-) diff --git a/.gitignore b/.gitignore index 965344c8..767c3b29 100644 --- a/.gitignore +++ b/.gitignore @@ -1,3 +1,4 @@ +.speakeasy/reports README-PYPI.md .venv/ pyrightconfig.json diff --git a/.speakeasy/gen.lock b/.speakeasy/gen.lock index 0b077597..d8a77e97 100644 --- a/.speakeasy/gen.lock +++ b/.speakeasy/gen.lock @@ -1,12 +1,12 @@ lockVersion: 2.0.0 id: 2d045ec7-2ebb-4f4d-ad25-40953b132161 management: - docChecksum: 36ad3563d9d2b3af47015100d060570b + docChecksum: 7ad277ed0527b2949ed9d503ce742fab docVersion: 0.0.2 - speakeasyVersion: 1.451.1 - generationVersion: 2.470.1 - releaseVersion: 1.2.5 - configChecksum: 9dbada68b30d84dc9c102c143b926b3d + speakeasyVersion: 1.462.2 + generationVersion: 2.486.1 + releaseVersion: 1.2.6 + configChecksum: 195a36c6a88eb19c3a487f1fe4a39bcc repoURL: https://github.com/mistralai/client-python.git installationURL: https://github.com/mistralai/client-python.git published: true @@ -14,7 +14,7 @@ features: python: additionalDependencies: 1.0.0 constsAndDefaults: 1.0.5 - core: 5.6.8 + core: 5.7.4 defaultEnabledRetries: 0.2.0 downloadStreams: 1.0.1 enumUnions: 0.1.0 @@ -25,10 +25,10 @@ features: globalSecurity: 3.0.2 globalSecurityCallbacks: 1.0.0 globalSecurityFlattening: 1.0.0 - globalServerURLs: 3.0.0 + globalServerURLs: 3.1.0 methodArguments: 1.0.2 multipartFileContentType: 1.0.0 - nameOverrides: 3.0.0 + nameOverrides: 3.0.1 nullables: 1.0.0 openEnums: 1.0.0 responseFormat: 1.0.1 @@ -373,7 +373,12 @@ generatedFiles: - src/mistralai/utils/values.py examples: list_models_v1_models_get: - speakeasy-default-list-models-v1-models-get: {} + speakeasy-default-list-models-v1-models-get: + responses: + "200": + application/json: {} + "422": + application/json: {} retrieve_model_v1_models__model_id__get: "": parameters: @@ -487,11 +492,15 @@ examples: responses: "200": application/json: {"id": "cmpl-e5cc70bb28c444948073e77776eb30ef", "object": "chat.completion", "model": "mistral-small-latest", "usage": {"prompt_tokens": 16, "completion_tokens": 34, "total_tokens": 50}, "created": 1702256327, "choices": []} - "422": {} + "422": + application/json: {} stream_chat: speakeasy-default-stream-chat: requestBody: application/json: {"model": "mistral-small-latest", "messages": [{"content": "Who is the best French painter? Answer in one short sentence.", "role": "user"}]} + responses: + "422": + application/json: {} fim_completion_v1_fim_completions_post: speakeasy-default-fim-completion-v1-fim-completions-post: requestBody: @@ -499,11 +508,15 @@ examples: responses: "200": application/json: {"id": "cmpl-e5cc70bb28c444948073e77776eb30ef", "object": "chat.completion", "model": "codestral-latest", "usage": {"prompt_tokens": 16, "completion_tokens": 34, "total_tokens": 50}, "created": 1702256327, "choices": []} - "422": {} + "422": + application/json: {} stream_fim: speakeasy-default-stream-fim: requestBody: application/json: {"model": "codestral-2405", "prompt": "def", "suffix": "return a+b"} + responses: + "422": + application/json: {} agents_completion_v1_agents_completions_post: speakeasy-default-agents-completion-v1-agents-completions-post: requestBody: @@ -511,11 +524,15 @@ examples: responses: "200": application/json: {"id": "cmpl-e5cc70bb28c444948073e77776eb30ef", "object": "chat.completion", "model": "mistral-small-latest", "usage": {"prompt_tokens": 16, "completion_tokens": 34, "total_tokens": 50}, "created": 1702256327, "choices": []} - "422": {} + "422": + application/json: {} stream_agents: speakeasy-default-stream-agents: requestBody: application/json: {"messages": [{"content": "Who is the best French painter? Answer in one short sentence.", "role": "user"}], "agent_id": ""} + responses: + "422": + application/json: {} embeddings_v1_embeddings_post: speakeasy-default-embeddings-v1-embeddings-post: requestBody: @@ -523,7 +540,8 @@ examples: responses: "200": application/json: {"id": "cmpl-e5cc70bb28c444948073e77776eb30ef", "object": "chat.completion", "model": "mistral-small-latest", "usage": {"prompt_tokens": 16, "completion_tokens": 34, "total_tokens": 50}, "data": [{"object": "embedding", "embedding": [0.1, 0.2, 0.3], "index": 0}, {"object": "embedding", "embedding": [0.4, 0.5, 0.6], "index": 1}]} - "422": {} + "422": + application/json: {} files_api_routes_download_file: speakeasy-default-files-api-routes-download-file: parameters: @@ -564,7 +582,8 @@ examples: responses: "200": application/json: {"id": "mod-e5cc70bb28c444948073e77776eb30ef"} - "422": {} + "422": + application/json: {} moderations_chat_v1_chat_moderations_post: speakeasy-default-moderations-chat-v1-chat-moderations-post: requestBody: @@ -572,7 +591,8 @@ examples: responses: "200": application/json: {"id": "mod-e5cc70bb28c444948073e77776eb30ef"} - "422": {} + "422": + application/json: {} files_api_routes_get_signed_url: speakeasy-default-files-api-routes-get-signed-url: parameters: diff --git a/.speakeasy/gen.yaml b/.speakeasy/gen.yaml index 5c043c4c..8680a0e2 100644 --- a/.speakeasy/gen.yaml +++ b/.speakeasy/gen.yaml @@ -13,7 +13,7 @@ generation: oAuth2ClientCredentialsEnabled: true oAuth2PasswordEnabled: false python: - version: 1.2.5 + version: 1.2.6 additionalDependencies: dev: pytest: ^8.2.2 diff --git a/.speakeasy/workflow.lock b/.speakeasy/workflow.lock index 593960c5..aa87f0ae 100644 --- a/.speakeasy/workflow.lock +++ b/.speakeasy/workflow.lock @@ -1,46 +1,46 @@ -speakeasyVersion: 1.451.1 +speakeasyVersion: 1.462.2 sources: mistral-azure-source: sourceNamespace: mistral-azure-source - sourceRevisionDigest: sha256:9c35eed0174f2d8165807bcd7c8e7b7111fa97c059a77ae7eeaa352ca7e83b4d - sourceBlobDigest: sha256:07283bfde08363f9f69b133888b482472c4bf12d2e5b59cb33c8993c517278e3 + sourceRevisionDigest: sha256:ff181b1e0e3894a4925f7ae87415323058538a13bae9d9d508a8fe3b6ec0e333 + sourceBlobDigest: sha256:a2b4fd69298ebb9adb0d3c8dfb452db52defac512a7532203eebffb6a252df76 tags: - latest mistral-google-cloud-source: sourceNamespace: mistral-google-cloud-source - sourceRevisionDigest: sha256:e0fd58ce2dbba068f375d3a23d758b8678c2a68cf4fc7bc46ea7e1b37abe0647 - sourceBlobDigest: sha256:0707d8d2566a9ef4ef286bb0abe467f8696ccf83ba73091065d7caf627a06611 + sourceRevisionDigest: sha256:36c7de11e35023dc8fa5f3c0fb0e486d2a102275a2df808c08cfe9d43089be04 + sourceBlobDigest: sha256:dd65bfa5d0448ad1851ebb18b57aa675533cd3e166beb86a390b0ab51d16a1c1 tags: - latest mistral-openapi: sourceNamespace: mistral-openapi - sourceRevisionDigest: sha256:f74c08bdc7ae39f5fe2394df8f31ae623ece30a7f65019ab6b7bcea352953f05 - sourceBlobDigest: sha256:5de08a038994ec94c0889341d434b598f541459d114f9935deb9ef3b3af90c5f + sourceRevisionDigest: sha256:84bbc6f6011a31e21c8a674b01104446f986c7b5a6b002357800be8ef939b8da + sourceBlobDigest: sha256:ebc7c1bb20aa87873a255cebea1e451099d8949ea1bbff81ec5fd45a107e3a32 tags: - latest - - speakeasy-sdk-regen-1733325251 + - speakeasy-sdk-regen-1736155788 targets: mistralai-azure-sdk: source: mistral-azure-source sourceNamespace: mistral-azure-source - sourceRevisionDigest: sha256:9c35eed0174f2d8165807bcd7c8e7b7111fa97c059a77ae7eeaa352ca7e83b4d - sourceBlobDigest: sha256:07283bfde08363f9f69b133888b482472c4bf12d2e5b59cb33c8993c517278e3 + sourceRevisionDigest: sha256:ff181b1e0e3894a4925f7ae87415323058538a13bae9d9d508a8fe3b6ec0e333 + sourceBlobDigest: sha256:a2b4fd69298ebb9adb0d3c8dfb452db52defac512a7532203eebffb6a252df76 codeSamplesNamespace: mistral-openapi-azure-code-samples - codeSamplesRevisionDigest: sha256:79a227720579444358a825b1a272c153f3d9dd48cd0913be6c988d7931a44241 + codeSamplesRevisionDigest: sha256:28356dba7ea28436035e20182b8ce4d1951e19503b5accef6a128d860361e5c0 mistralai-gcp-sdk: source: mistral-google-cloud-source sourceNamespace: mistral-google-cloud-source - sourceRevisionDigest: sha256:e0fd58ce2dbba068f375d3a23d758b8678c2a68cf4fc7bc46ea7e1b37abe0647 - sourceBlobDigest: sha256:0707d8d2566a9ef4ef286bb0abe467f8696ccf83ba73091065d7caf627a06611 + sourceRevisionDigest: sha256:36c7de11e35023dc8fa5f3c0fb0e486d2a102275a2df808c08cfe9d43089be04 + sourceBlobDigest: sha256:dd65bfa5d0448ad1851ebb18b57aa675533cd3e166beb86a390b0ab51d16a1c1 codeSamplesNamespace: mistral-openapi-google-cloud-code-samples - codeSamplesRevisionDigest: sha256:0657ec41e473356a5a0eeaca3dff137e9ff16080ec1fb50e72553245aa86ffe5 + codeSamplesRevisionDigest: sha256:7de23f90d6543356f310f46375bef4db7f43eb22b2871ad4dfe1b7d0cc875bb4 mistralai-sdk: source: mistral-openapi sourceNamespace: mistral-openapi - sourceRevisionDigest: sha256:f74c08bdc7ae39f5fe2394df8f31ae623ece30a7f65019ab6b7bcea352953f05 - sourceBlobDigest: sha256:5de08a038994ec94c0889341d434b598f541459d114f9935deb9ef3b3af90c5f + sourceRevisionDigest: sha256:84bbc6f6011a31e21c8a674b01104446f986c7b5a6b002357800be8ef939b8da + sourceBlobDigest: sha256:ebc7c1bb20aa87873a255cebea1e451099d8949ea1bbff81ec5fd45a107e3a32 codeSamplesNamespace: mistral-openapi-code-samples - codeSamplesRevisionDigest: sha256:9a22e0289ff4e72dd43c1b65ddb8c7870814176d36e1eb6352d06e93dec597ba + codeSamplesRevisionDigest: sha256:3d7ebf5043f98a2e9b07c66057c279f90272a813875b29bd9b75595f77caf0c4 workflow: workflowVersion: 1.0.0 speakeasyVersion: latest diff --git a/README.md b/README.md index 7a886085..501e0ba6 100644 --- a/README.md +++ b/README.md @@ -85,17 +85,19 @@ import os with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), -) as s: - res = s.chat.complete(model="mistral-small-latest", messages=[ +) as mistral: + + res = mistral.chat.complete(model="mistral-small-latest", messages=[ { "content": "Who is the best French painter? Answer in one short sentence.", "role": "user", }, ]) - if res is not None: - # handle response - pass + assert res is not None + + # Handle response + print(res) ```
@@ -110,17 +112,19 @@ import os async def main(): async with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), - ) as s: - res = await s.chat.complete_async(model="mistral-small-latest", messages=[ + ) as mistral: + + res = await mistral.chat.complete_async(model="mistral-small-latest", messages=[ { "content": "Who is the best French painter? Answer in one short sentence.", "role": "user", }, ]) - if res is not None: - # handle response - pass + assert res is not None + + # Handle response + print(res) asyncio.run(main()) ``` @@ -136,15 +140,17 @@ import os with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), -) as s: - res = s.files.upload(file={ +) as mistral: + + res = mistral.files.upload(file={ "file_name": "example.file", "content": open("example.file", "rb"), }) - if res is not None: - # handle response - pass + assert res is not None + + # Handle response + print(res) ```
@@ -159,15 +165,17 @@ import os async def main(): async with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), - ) as s: - res = await s.files.upload_async(file={ + ) as mistral: + + res = await mistral.files.upload_async(file={ "file_name": "example.file", "content": open("example.file", "rb"), }) - if res is not None: - # handle response - pass + assert res is not None + + # Handle response + print(res) asyncio.run(main()) ``` @@ -183,17 +191,19 @@ import os with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), -) as s: - res = s.agents.complete(messages=[ +) as mistral: + + res = mistral.agents.complete(messages=[ { "content": "Who is the best French painter? Answer in one short sentence.", "role": "user", }, ], agent_id="") - if res is not None: - # handle response - pass + assert res is not None + + # Handle response + print(res) ```
@@ -208,17 +218,19 @@ import os async def main(): async with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), - ) as s: - res = await s.agents.complete_async(messages=[ + ) as mistral: + + res = await mistral.agents.complete_async(messages=[ { "content": "Who is the best French painter? Answer in one short sentence.", "role": "user", }, ], agent_id="") - if res is not None: - # handle response - pass + assert res is not None + + # Handle response + print(res) asyncio.run(main()) ``` @@ -234,15 +246,17 @@ import os with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), -) as s: - res = s.embeddings.create(inputs=[ +) as mistral: + + res = mistral.embeddings.create(inputs=[ "Embed this sentence.", "As well as this one.", ], model="Wrangler") - if res is not None: - # handle response - pass + assert res is not None + + # Handle response + print(res) ```
@@ -257,15 +271,17 @@ import os async def main(): async with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), - ) as s: - res = await s.embeddings.create_async(inputs=[ + ) as mistral: + + res = await mistral.embeddings.create_async(inputs=[ "Embed this sentence.", "As well as this one.", ], model="Wrangler") - if res is not None: - # handle response - pass + assert res is not None + + # Handle response + print(res) asyncio.run(main()) ``` @@ -457,19 +473,21 @@ import os with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), -) as s: - res = s.chat.stream(model="mistral-small-latest", messages=[ +) as mistral: + + res = mistral.chat.stream(model="mistral-small-latest", messages=[ { "content": "Who is the best French painter? Answer in one short sentence.", "role": "user", }, ]) - if res is not None: - with res as event_stream: - for event in event_stream: - # handle event - print(event, flush=True) + assert res is not None + + with res as event_stream: + for event in event_stream: + # handle event + print(event, flush=True) ``` @@ -494,15 +512,17 @@ import os with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), -) as s: - res = s.files.upload(file={ +) as mistral: + + res = mistral.files.upload(file={ "file_name": "example.file", "content": open("example.file", "rb"), }) - if res is not None: - # handle response - pass + assert res is not None + + # Handle response + print(res) ``` @@ -514,37 +534,41 @@ Some of the endpoints in this SDK support retries. If you use the SDK without an To change the default retry strategy for a single API call, simply provide a `RetryConfig` object to the call: ```python -from mistral.utils import BackoffStrategy, RetryConfig from mistralai import Mistral +from mistralai.utils import BackoffStrategy, RetryConfig import os with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), -) as s: - res = s.models.list(, +) as mistral: + + res = mistral.models.list(, RetryConfig("backoff", BackoffStrategy(1, 50, 1.1, 100), False)) - if res is not None: - # handle response - pass + assert res is not None + + # Handle response + print(res) ``` If you'd like to override the default retry strategy for all operations that support retries, you can use the `retry_config` optional parameter when initializing the SDK: ```python -from mistral.utils import BackoffStrategy, RetryConfig from mistralai import Mistral +from mistralai.utils import BackoffStrategy, RetryConfig import os with Mistral( retry_config=RetryConfig("backoff", BackoffStrategy(1, 50, 1.1, 100), False), api_key=os.getenv("MISTRAL_API_KEY", ""), -) as s: - res = s.models.list() +) as mistral: + + res = mistral.models.list() - if res is not None: - # handle response - pass + assert res is not None + + # Handle response + print(res) ``` @@ -578,14 +602,16 @@ import os with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), -) as s: +) as mistral: res = None try: - res = s.models.list() - if res is not None: - # handle response - pass + res = mistral.models.list() + + assert res is not None + + # Handle response + print(res) except models.HTTPValidationError as e: # handle e.data: models.HTTPValidationErrorData @@ -616,12 +642,14 @@ import os with Mistral( server="eu", api_key=os.getenv("MISTRAL_API_KEY", ""), -) as s: - res = s.models.list() +) as mistral: + + res = mistral.models.list() - if res is not None: - # handle response - pass + assert res is not None + + # Handle response + print(res) ``` @@ -635,12 +663,14 @@ import os with Mistral( server_url="https://api.mistral.ai", api_key=os.getenv("MISTRAL_API_KEY", ""), -) as s: - res = s.models.list() +) as mistral: + + res = mistral.models.list() - if res is not None: - # handle response - pass + assert res is not None + + # Handle response + print(res) ``` @@ -744,12 +774,14 @@ import os with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), -) as s: - res = s.models.list() +) as mistral: + + res = mistral.models.list() - if res is not None: - # handle response - pass + assert res is not None + + # Handle response + print(res) ``` diff --git a/RELEASES.md b/RELEASES.md index a13990d2..6544f991 100644 --- a/RELEASES.md +++ b/RELEASES.md @@ -108,4 +108,14 @@ Based on: ### Generated - [python v1.2.5] . ### Releases -- [PyPI v1.2.5] https://pypi.org/project/mistralai/1.2.5 - . \ No newline at end of file +- [PyPI v1.2.5] https://pypi.org/project/mistralai/1.2.5 - . + +## 2025-01-06 09:57:47 +### Changes +Based on: +- OpenAPI Doc +- Speakeasy CLI 1.462.2 (2.486.1) https://github.com/speakeasy-api/speakeasy +### Generated +- [python v1.2.6] . +### Releases +- [PyPI v1.2.6] https://pypi.org/project/mistralai/1.2.6 - . \ No newline at end of file diff --git a/USAGE.md b/USAGE.md index e523aa92..88762965 100644 --- a/USAGE.md +++ b/USAGE.md @@ -10,17 +10,19 @@ import os with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), -) as s: - res = s.chat.complete(model="mistral-small-latest", messages=[ +) as mistral: + + res = mistral.chat.complete(model="mistral-small-latest", messages=[ { "content": "Who is the best French painter? Answer in one short sentence.", "role": "user", }, ]) - if res is not None: - # handle response - pass + assert res is not None + + # Handle response + print(res) ```
@@ -35,17 +37,19 @@ import os async def main(): async with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), - ) as s: - res = await s.chat.complete_async(model="mistral-small-latest", messages=[ + ) as mistral: + + res = await mistral.chat.complete_async(model="mistral-small-latest", messages=[ { "content": "Who is the best French painter? Answer in one short sentence.", "role": "user", }, ]) - if res is not None: - # handle response - pass + assert res is not None + + # Handle response + print(res) asyncio.run(main()) ``` @@ -61,15 +65,17 @@ import os with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), -) as s: - res = s.files.upload(file={ +) as mistral: + + res = mistral.files.upload(file={ "file_name": "example.file", "content": open("example.file", "rb"), }) - if res is not None: - # handle response - pass + assert res is not None + + # Handle response + print(res) ```
@@ -84,15 +90,17 @@ import os async def main(): async with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), - ) as s: - res = await s.files.upload_async(file={ + ) as mistral: + + res = await mistral.files.upload_async(file={ "file_name": "example.file", "content": open("example.file", "rb"), }) - if res is not None: - # handle response - pass + assert res is not None + + # Handle response + print(res) asyncio.run(main()) ``` @@ -108,17 +116,19 @@ import os with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), -) as s: - res = s.agents.complete(messages=[ +) as mistral: + + res = mistral.agents.complete(messages=[ { "content": "Who is the best French painter? Answer in one short sentence.", "role": "user", }, ], agent_id="") - if res is not None: - # handle response - pass + assert res is not None + + # Handle response + print(res) ```
@@ -133,17 +143,19 @@ import os async def main(): async with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), - ) as s: - res = await s.agents.complete_async(messages=[ + ) as mistral: + + res = await mistral.agents.complete_async(messages=[ { "content": "Who is the best French painter? Answer in one short sentence.", "role": "user", }, ], agent_id="") - if res is not None: - # handle response - pass + assert res is not None + + # Handle response + print(res) asyncio.run(main()) ``` @@ -159,15 +171,17 @@ import os with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), -) as s: - res = s.embeddings.create(inputs=[ +) as mistral: + + res = mistral.embeddings.create(inputs=[ "Embed this sentence.", "As well as this one.", ], model="Wrangler") - if res is not None: - # handle response - pass + assert res is not None + + # Handle response + print(res) ```
@@ -182,15 +196,17 @@ import os async def main(): async with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), - ) as s: - res = await s.embeddings.create_async(inputs=[ + ) as mistral: + + res = await mistral.embeddings.create_async(inputs=[ "Embed this sentence.", "As well as this one.", ], model="Wrangler") - if res is not None: - # handle response - pass + assert res is not None + + # Handle response + print(res) asyncio.run(main()) ``` diff --git a/docs/sdks/agents/README.md b/docs/sdks/agents/README.md index 792b796d..3675fad9 100644 --- a/docs/sdks/agents/README.md +++ b/docs/sdks/agents/README.md @@ -22,17 +22,19 @@ import os with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), -) as s: - res = s.agents.complete(messages=[ +) as mistral: + + res = mistral.agents.complete(messages=[ { "content": "Who is the best French painter? Answer in one short sentence.", "role": "user", }, ], agent_id="") - if res is not None: - # handle response - pass + assert res is not None + + # Handle response + print(res) ``` @@ -77,19 +79,21 @@ import os with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), -) as s: - res = s.agents.stream(messages=[ +) as mistral: + + res = mistral.agents.stream(messages=[ { "content": "Who is the best French painter? Answer in one short sentence.", "role": "user", }, ], agent_id="") - if res is not None: - with res as event_stream: - for event in event_stream: - # handle event - print(event, flush=True) + assert res is not None + + with res as event_stream: + for event in event_stream: + # handle event + print(event, flush=True) ``` diff --git a/docs/sdks/chat/README.md b/docs/sdks/chat/README.md index 6e00d3d2..8ab0eb6b 100644 --- a/docs/sdks/chat/README.md +++ b/docs/sdks/chat/README.md @@ -22,17 +22,19 @@ import os with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), -) as s: - res = s.chat.complete(model="mistral-small-latest", messages=[ +) as mistral: + + res = mistral.chat.complete(model="mistral-small-latest", messages=[ { "content": "Who is the best French painter? Answer in one short sentence.", "role": "user", }, ]) - if res is not None: - # handle response - pass + assert res is not None + + # Handle response + print(res) ``` @@ -80,19 +82,21 @@ import os with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), -) as s: - res = s.chat.stream(model="mistral-small-latest", messages=[ +) as mistral: + + res = mistral.chat.stream(model="mistral-small-latest", messages=[ { "content": "Who is the best French painter? Answer in one short sentence.", "role": "user", }, ]) - if res is not None: - with res as event_stream: - for event in event_stream: - # handle event - print(event, flush=True) + assert res is not None + + with res as event_stream: + for event in event_stream: + # handle event + print(event, flush=True) ``` diff --git a/docs/sdks/classifiers/README.md b/docs/sdks/classifiers/README.md index da90019a..37ee3e10 100644 --- a/docs/sdks/classifiers/README.md +++ b/docs/sdks/classifiers/README.md @@ -22,14 +22,16 @@ import os with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), -) as s: - res = s.classifiers.moderate(inputs=[ +) as mistral: + + res = mistral.classifiers.moderate(inputs=[ "", ]) - if res is not None: - # handle response - pass + assert res is not None + + # Handle response + print(res) ``` @@ -64,8 +66,9 @@ import os with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), -) as s: - res = s.classifiers.moderate_chat(inputs=[ +) as mistral: + + res = mistral.classifiers.moderate_chat(inputs=[ [ { "content": [ @@ -77,9 +80,10 @@ with Mistral( ], ], model="V90") - if res is not None: - # handle response - pass + assert res is not None + + # Handle response + print(res) ``` diff --git a/docs/sdks/embeddings/README.md b/docs/sdks/embeddings/README.md index 1f9f1956..b3610f78 100644 --- a/docs/sdks/embeddings/README.md +++ b/docs/sdks/embeddings/README.md @@ -21,15 +21,17 @@ import os with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), -) as s: - res = s.embeddings.create(inputs=[ +) as mistral: + + res = mistral.embeddings.create(inputs=[ "Embed this sentence.", "As well as this one.", ], model="Wrangler") - if res is not None: - # handle response - pass + assert res is not None + + # Handle response + print(res) ``` diff --git a/docs/sdks/files/README.md b/docs/sdks/files/README.md index ad2e0f09..63a0023c 100644 --- a/docs/sdks/files/README.md +++ b/docs/sdks/files/README.md @@ -30,15 +30,17 @@ import os with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), -) as s: - res = s.files.upload(file={ +) as mistral: + + res = mistral.files.upload(file={ "file_name": "example.file", "content": open("example.file", "rb"), }) - if res is not None: - # handle response - pass + assert res is not None + + # Handle response + print(res) ``` @@ -72,12 +74,14 @@ import os with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), -) as s: - res = s.files.list() +) as mistral: + + res = mistral.files.list() - if res is not None: - # handle response - pass + assert res is not None + + # Handle response + print(res) ``` @@ -115,12 +119,14 @@ import os with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), -) as s: - res = s.files.retrieve(file_id="") +) as mistral: + + res = mistral.files.retrieve(file_id="") + + assert res is not None - if res is not None: - # handle response - pass + # Handle response + print(res) ``` @@ -153,12 +159,14 @@ import os with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), -) as s: - res = s.files.delete(file_id="") +) as mistral: - if res is not None: - # handle response - pass + res = mistral.files.delete(file_id="") + + assert res is not None + + # Handle response + print(res) ``` @@ -191,12 +199,14 @@ import os with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), -) as s: - res = s.files.download(file_id="") +) as mistral: + + res = mistral.files.download(file_id="") - if res is not None: - # handle response - pass + assert res is not None + + # Handle response + print(res) ``` @@ -229,12 +239,14 @@ import os with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), -) as s: - res = s.files.get_signed_url(file_id="") +) as mistral: + + res = mistral.files.get_signed_url(file_id="") + + assert res is not None - if res is not None: - # handle response - pass + # Handle response + print(res) ``` diff --git a/docs/sdks/fim/README.md b/docs/sdks/fim/README.md index eed1893e..06099974 100644 --- a/docs/sdks/fim/README.md +++ b/docs/sdks/fim/README.md @@ -22,12 +22,14 @@ import os with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), -) as s: - res = s.fim.complete(model="codestral-2405", prompt="def", suffix="return a+b") +) as mistral: - if res is not None: - # handle response - pass + res = mistral.fim.complete(model="codestral-2405", prompt="def", suffix="return a+b") + + assert res is not None + + # Handle response + print(res) ``` @@ -70,14 +72,16 @@ import os with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), -) as s: - res = s.fim.stream(model="codestral-2405", prompt="def", suffix="return a+b") - - if res is not None: - with res as event_stream: - for event in event_stream: - # handle event - print(event, flush=True) +) as mistral: + + res = mistral.fim.stream(model="codestral-2405", prompt="def", suffix="return a+b") + + assert res is not None + + with res as event_stream: + for event in event_stream: + # handle event + print(event, flush=True) ``` diff --git a/docs/sdks/jobs/README.md b/docs/sdks/jobs/README.md index b4779580..92406630 100644 --- a/docs/sdks/jobs/README.md +++ b/docs/sdks/jobs/README.md @@ -23,12 +23,14 @@ import os with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), -) as s: - res = s.fine_tuning.jobs.list() +) as mistral: - if res is not None: - # handle response - pass + res = mistral.fine_tuning.jobs.list() + + assert res is not None + + # Handle response + print(res) ``` @@ -69,12 +71,14 @@ import os with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), -) as s: - res = s.fine_tuning.jobs.create(model="codestral-latest", hyperparameters={}) +) as mistral: + + res = mistral.fine_tuning.jobs.create(model="codestral-latest", hyperparameters={}) - if res is not None: - # handle response - pass + assert res is not None + + # Handle response + print(res) ``` @@ -114,12 +118,14 @@ import os with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), -) as s: - res = s.fine_tuning.jobs.get(job_id="b18d8d81-fd7b-4764-a31e-475cb1f36591") +) as mistral: + + res = mistral.fine_tuning.jobs.get(job_id="b18d8d81-fd7b-4764-a31e-475cb1f36591") - if res is not None: - # handle response - pass + assert res is not None + + # Handle response + print(res) ``` @@ -152,12 +158,14 @@ import os with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), -) as s: - res = s.fine_tuning.jobs.cancel(job_id="03fa7112-315a-4072-a9f2-43f3f1ec962e") +) as mistral: + + res = mistral.fine_tuning.jobs.cancel(job_id="03fa7112-315a-4072-a9f2-43f3f1ec962e") + + assert res is not None - if res is not None: - # handle response - pass + # Handle response + print(res) ``` @@ -190,12 +198,14 @@ import os with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), -) as s: - res = s.fine_tuning.jobs.start(job_id="0eb0f807-fb9f-4e46-9c13-4e257df6e1ba") +) as mistral: + + res = mistral.fine_tuning.jobs.start(job_id="0eb0f807-fb9f-4e46-9c13-4e257df6e1ba") + + assert res is not None - if res is not None: - # handle response - pass + # Handle response + print(res) ``` diff --git a/docs/sdks/mistraljobs/README.md b/docs/sdks/mistraljobs/README.md index 1880c83e..118cfccd 100644 --- a/docs/sdks/mistraljobs/README.md +++ b/docs/sdks/mistraljobs/README.md @@ -22,12 +22,14 @@ import os with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), -) as s: - res = s.batch.jobs.list() +) as mistral: - if res is not None: - # handle response - pass + res = mistral.batch.jobs.list() + + assert res is not None + + # Handle response + print(res) ``` @@ -66,14 +68,16 @@ import os with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), -) as s: - res = s.batch.jobs.create(input_files=[ +) as mistral: + + res = mistral.batch.jobs.create(input_files=[ "a621cf02-1cd9-4cf5-8403-315211a509a3", ], endpoint="/v1/fim/completions", model="2") - if res is not None: - # handle response - pass + assert res is not None + + # Handle response + print(res) ``` @@ -110,12 +114,14 @@ import os with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), -) as s: - res = s.batch.jobs.get(job_id="b888f774-3e7c-4135-a18c-6b985523c4bc") +) as mistral: - if res is not None: - # handle response - pass + res = mistral.batch.jobs.get(job_id="b888f774-3e7c-4135-a18c-6b985523c4bc") + + assert res is not None + + # Handle response + print(res) ``` @@ -148,12 +154,14 @@ import os with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), -) as s: - res = s.batch.jobs.cancel(job_id="0f713502-9233-41c6-9ebd-c570b7edb496") +) as mistral: + + res = mistral.batch.jobs.cancel(job_id="0f713502-9233-41c6-9ebd-c570b7edb496") + + assert res is not None - if res is not None: - # handle response - pass + # Handle response + print(res) ``` diff --git a/docs/sdks/models/README.md b/docs/sdks/models/README.md index 78884947..080b355f 100644 --- a/docs/sdks/models/README.md +++ b/docs/sdks/models/README.md @@ -26,12 +26,14 @@ import os with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), -) as s: - res = s.models.list() +) as mistral: - if res is not None: - # handle response - pass + res = mistral.models.list() + + assert res is not None + + # Handle response + print(res) ``` @@ -64,12 +66,14 @@ import os with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), -) as s: - res = s.models.retrieve(model_id="ft:open-mistral-7b:587a6b29:20240514:7e773925") +) as mistral: + + res = mistral.models.retrieve(model_id="ft:open-mistral-7b:587a6b29:20240514:7e773925") - if res is not None: - # handle response - pass + assert res is not None + + # Handle response + print(res) ``` @@ -103,12 +107,14 @@ import os with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), -) as s: - res = s.models.delete(model_id="ft:open-mistral-7b:587a6b29:20240514:7e773925") +) as mistral: + + res = mistral.models.delete(model_id="ft:open-mistral-7b:587a6b29:20240514:7e773925") + + assert res is not None - if res is not None: - # handle response - pass + # Handle response + print(res) ``` @@ -142,12 +148,14 @@ import os with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), -) as s: - res = s.models.update(model_id="ft:open-mistral-7b:587a6b29:20240514:7e773925") +) as mistral: - if res is not None: - # handle response - pass + res = mistral.models.update(model_id="ft:open-mistral-7b:587a6b29:20240514:7e773925") + + assert res is not None + + # Handle response + print(res) ``` @@ -182,12 +190,14 @@ import os with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), -) as s: - res = s.models.archive(model_id="ft:open-mistral-7b:587a6b29:20240514:7e773925") +) as mistral: + + res = mistral.models.archive(model_id="ft:open-mistral-7b:587a6b29:20240514:7e773925") - if res is not None: - # handle response - pass + assert res is not None + + # Handle response + print(res) ``` @@ -220,12 +230,14 @@ import os with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), -) as s: - res = s.models.unarchive(model_id="ft:open-mistral-7b:587a6b29:20240514:7e773925") +) as mistral: + + res = mistral.models.unarchive(model_id="ft:open-mistral-7b:587a6b29:20240514:7e773925") + + assert res is not None - if res is not None: - # handle response - pass + # Handle response + print(res) ``` diff --git a/packages/mistralai_azure/.gitignore b/packages/mistralai_azure/.gitignore index 7755092b..5a82b069 100644 --- a/packages/mistralai_azure/.gitignore +++ b/packages/mistralai_azure/.gitignore @@ -1,3 +1,4 @@ +.speakeasy/reports README-PYPI.md .venv/ venv/ diff --git a/packages/mistralai_azure/.speakeasy/gen.lock b/packages/mistralai_azure/.speakeasy/gen.lock index 15388f4c..16a5196b 100644 --- a/packages/mistralai_azure/.speakeasy/gen.lock +++ b/packages/mistralai_azure/.speakeasy/gen.lock @@ -1,18 +1,18 @@ lockVersion: 2.0.0 id: dc40fa48-2c4d-46ad-ac8b-270749770f34 management: - docChecksum: 26271aa279a7a7182f7af19df8b67038 + docChecksum: 4da7c33f650ddf206c58fa6c941d347f docVersion: 0.0.2 - speakeasyVersion: 1.451.1 - generationVersion: 2.470.1 - releaseVersion: 1.2.3 - configChecksum: 60295c765204eb0aa26205ec02e574fc + speakeasyVersion: 1.462.2 + generationVersion: 2.486.1 + releaseVersion: 1.2.6 + configChecksum: cc2ac1769a87215774fce0075ff2e77d published: true features: python: additionalDependencies: 1.0.0 constsAndDefaults: 1.0.5 - core: 5.6.8 + core: 5.7.4 defaultEnabledRetries: 0.2.0 enumUnions: 0.1.0 envVarSecurityUsage: 0.3.2 @@ -21,9 +21,9 @@ features: globalSecurity: 3.0.2 globalSecurityCallbacks: 1.0.0 globalSecurityFlattening: 1.0.0 - globalServerURLs: 3.0.0 + globalServerURLs: 3.1.0 methodArguments: 1.0.2 - nameOverrides: 3.0.0 + nameOverrides: 3.0.1 nullables: 1.0.0 openEnums: 1.0.0 responseFormat: 1.0.1 @@ -157,6 +157,9 @@ examples: speakeasy-default-stream-chat: requestBody: application/json: {"model": "azureai", "messages": [{"content": "Who is the best French painter? Answer in one short sentence.", "role": "user"}]} + responses: + "422": + application/json: {} chat_completion_v1_chat_completions_post: speakeasy-default-chat-completion-v1-chat-completions-post: requestBody: @@ -164,5 +167,6 @@ examples: responses: "200": application/json: {"id": "cmpl-e5cc70bb28c444948073e77776eb30ef", "object": "chat.completion", "model": "mistral-small-latest", "usage": {"prompt_tokens": 16, "completion_tokens": 34, "total_tokens": 50}, "created": 1702256327, "choices": []} - "422": {} + "422": + application/json: {} generatedTests: {} diff --git a/packages/mistralai_azure/.speakeasy/gen.yaml b/packages/mistralai_azure/.speakeasy/gen.yaml index aae8dd2a..17344d9b 100644 --- a/packages/mistralai_azure/.speakeasy/gen.yaml +++ b/packages/mistralai_azure/.speakeasy/gen.yaml @@ -13,7 +13,7 @@ generation: oAuth2ClientCredentialsEnabled: true oAuth2PasswordEnabled: false python: - version: 1.2.3 + version: 1.2.6 additionalDependencies: dev: pytest: ^8.2.2 diff --git a/packages/mistralai_azure/pyproject.toml b/packages/mistralai_azure/pyproject.toml index a7a0a374..5c227f66 100644 --- a/packages/mistralai_azure/pyproject.toml +++ b/packages/mistralai_azure/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "mistralai_azure" -version = "1.2.3" +version = "1.2.6" description = "Python Client SDK for the Mistral AI API in Azure." authors = ["Mistral",] readme = "README-PYPI.md" @@ -18,9 +18,9 @@ in-project = true [tool.poetry.dependencies] python = "^3.8" eval-type-backport = "^0.2.0" -httpx = "^0.27.0" +httpx = "^0.28.1" jsonpath-python = "^1.0.6" -pydantic = "~2.9.2" +pydantic = "~2.10.3" python-dateutil = "^2.8.2" typing-inspect = "^0.9.0" diff --git a/packages/mistralai_azure/src/mistralai_azure/_version.py b/packages/mistralai_azure/src/mistralai_azure/_version.py index 1e41e4f0..c7215b89 100644 --- a/packages/mistralai_azure/src/mistralai_azure/_version.py +++ b/packages/mistralai_azure/src/mistralai_azure/_version.py @@ -3,7 +3,7 @@ import importlib.metadata __title__: str = "mistralai_azure" -__version__: str = "1.2.3" +__version__: str = "1.2.6" try: if __package__ is not None: diff --git a/packages/mistralai_azure/src/mistralai_azure/basesdk.py b/packages/mistralai_azure/src/mistralai_azure/basesdk.py index 772b44c1..05c100d4 100644 --- a/packages/mistralai_azure/src/mistralai_azure/basesdk.py +++ b/packages/mistralai_azure/src/mistralai_azure/basesdk.py @@ -9,7 +9,8 @@ BeforeRequestContext, ) from mistralai_azure.utils import RetryConfig, SerializedRequestBody, get_body_content -from typing import Callable, List, Optional, Tuple +from typing import Callable, List, Mapping, Optional, Tuple +from urllib.parse import parse_qs, urlparse class BaseSDK: @@ -18,7 +19,7 @@ class BaseSDK: def __init__(self, sdk_config: SDKConfiguration) -> None: self.sdk_configuration = sdk_config - def get_url(self, base_url, url_variables): + def _get_url(self, base_url, url_variables): sdk_url, sdk_variables = self.sdk_configuration.get_server_details() if base_url is None: @@ -29,7 +30,7 @@ def get_url(self, base_url, url_variables): return utils.template_url(base_url, url_variables) - def build_request_async( + def _build_request_async( self, method, path, @@ -48,9 +49,10 @@ def build_request_async( Callable[[], Optional[SerializedRequestBody]] ] = None, url_override: Optional[str] = None, + http_headers: Optional[Mapping[str, str]] = None, ) -> httpx.Request: client = self.sdk_configuration.async_client - return self.build_request_with_client( + return self._build_request_with_client( client, method, path, @@ -67,9 +69,10 @@ def build_request_async( timeout_ms, get_serialized_body, url_override, + http_headers, ) - def build_request( + def _build_request( self, method, path, @@ -88,9 +91,10 @@ def build_request( Callable[[], Optional[SerializedRequestBody]] ] = None, url_override: Optional[str] = None, + http_headers: Optional[Mapping[str, str]] = None, ) -> httpx.Request: client = self.sdk_configuration.client - return self.build_request_with_client( + return self._build_request_with_client( client, method, path, @@ -107,9 +111,10 @@ def build_request( timeout_ms, get_serialized_body, url_override, + http_headers, ) - def build_request_with_client( + def _build_request_with_client( self, client, method, @@ -129,13 +134,14 @@ def build_request_with_client( Callable[[], Optional[SerializedRequestBody]] ] = None, url_override: Optional[str] = None, + http_headers: Optional[Mapping[str, str]] = None, ) -> httpx.Request: query_params = {} url = url_override if url is None: url = utils.generate_url( - self.get_url(base_url, url_variables), + self._get_url(base_url, url_variables), path, request if request_has_path_params else None, _globals if request_has_path_params else None, @@ -145,6 +151,12 @@ def build_request_with_client( request if request_has_query_params else None, _globals if request_has_query_params else None, ) + else: + # Pick up the query parameter from the override so they can be + # preserved when building the request later on (necessary as of + # httpx 0.28). + parsed_override = urlparse(str(url_override)) + query_params = parse_qs(parsed_override.query, keep_blank_values=True) headers = utils.get_headers(request, _globals) headers["Accept"] = accept_header_value @@ -159,7 +171,7 @@ def build_request_with_client( headers = {**headers, **security_headers} query_params = {**query_params, **security_query_params} - serialized_request_body = SerializedRequestBody("application/octet-stream") + serialized_request_body = SerializedRequestBody() if get_serialized_body is not None: rb = get_serialized_body() if request_body_required and rb is None: @@ -178,6 +190,10 @@ def build_request_with_client( ): headers["content-type"] = serialized_request_body.media_type + if http_headers is not None: + for header, value in http_headers.items(): + headers[header] = value + timeout = timeout_ms / 1000 if timeout_ms is not None else None return client.build_request( diff --git a/packages/mistralai_azure/src/mistralai_azure/chat.py b/packages/mistralai_azure/src/mistralai_azure/chat.py index afab9ba4..0ed464ba 100644 --- a/packages/mistralai_azure/src/mistralai_azure/chat.py +++ b/packages/mistralai_azure/src/mistralai_azure/chat.py @@ -5,7 +5,7 @@ from mistralai_azure._hooks import HookContext from mistralai_azure.types import OptionalNullable, UNSET from mistralai_azure.utils import eventstreaming -from typing import Any, List, Optional, Union +from typing import Any, List, Mapping, Optional, Union class Chat(BaseSDK): @@ -17,7 +17,7 @@ def stream( messages: Union[List[models.Messages], List[models.MessagesTypedDict]], model: OptionalNullable[str] = "azureai", temperature: OptionalNullable[float] = UNSET, - top_p: Optional[float] = 1, + top_p: Optional[float] = None, max_tokens: OptionalNullable[int] = UNSET, stream: Optional[bool] = True, stop: Optional[Union[models.Stop, models.StopTypedDict]] = None, @@ -37,10 +37,11 @@ def stream( presence_penalty: Optional[float] = None, frequency_penalty: Optional[float] = None, n: OptionalNullable[int] = UNSET, - safe_prompt: Optional[bool] = False, + safe_prompt: Optional[bool] = None, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, ) -> Optional[eventstreaming.EventStream[models.CompletionEvent]]: r"""Stream chat completion @@ -64,6 +65,7 @@ def stream( :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. """ base_url = None url_variables = None @@ -95,7 +97,7 @@ def stream( safe_prompt=safe_prompt, ) - req = self.build_request( + req = self._build_request( method="POST", path="/chat/completions#stream", base_url=base_url, @@ -106,6 +108,7 @@ def stream( request_has_query_params=True, user_agent_header="user-agent", accept_header_value="text/event-stream", + http_headers=http_headers, security=self.sdk_configuration.security, get_serialized_body=lambda: utils.serialize_request_body( request, False, False, "json", models.ChatCompletionStreamRequest @@ -165,7 +168,7 @@ async def stream_async( messages: Union[List[models.Messages], List[models.MessagesTypedDict]], model: OptionalNullable[str] = "azureai", temperature: OptionalNullable[float] = UNSET, - top_p: Optional[float] = 1, + top_p: Optional[float] = None, max_tokens: OptionalNullable[int] = UNSET, stream: Optional[bool] = True, stop: Optional[Union[models.Stop, models.StopTypedDict]] = None, @@ -185,10 +188,11 @@ async def stream_async( presence_penalty: Optional[float] = None, frequency_penalty: Optional[float] = None, n: OptionalNullable[int] = UNSET, - safe_prompt: Optional[bool] = False, + safe_prompt: Optional[bool] = None, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, ) -> Optional[eventstreaming.EventStreamAsync[models.CompletionEvent]]: r"""Stream chat completion @@ -212,6 +216,7 @@ async def stream_async( :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. """ base_url = None url_variables = None @@ -243,7 +248,7 @@ async def stream_async( safe_prompt=safe_prompt, ) - req = self.build_request_async( + req = self._build_request_async( method="POST", path="/chat/completions#stream", base_url=base_url, @@ -254,6 +259,7 @@ async def stream_async( request_has_query_params=True, user_agent_header="user-agent", accept_header_value="text/event-stream", + http_headers=http_headers, security=self.sdk_configuration.security, get_serialized_body=lambda: utils.serialize_request_body( request, False, False, "json", models.ChatCompletionStreamRequest @@ -316,7 +322,7 @@ def complete( ], model: OptionalNullable[str] = "azureai", temperature: OptionalNullable[float] = UNSET, - top_p: Optional[float] = 1, + top_p: Optional[float] = None, max_tokens: OptionalNullable[int] = UNSET, stream: Optional[bool] = False, stop: Optional[ @@ -341,10 +347,11 @@ def complete( presence_penalty: Optional[float] = None, frequency_penalty: Optional[float] = None, n: OptionalNullable[int] = UNSET, - safe_prompt: Optional[bool] = False, + safe_prompt: Optional[bool] = None, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, ) -> Optional[models.ChatCompletionResponse]: r"""Chat Completion @@ -366,6 +373,7 @@ def complete( :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. """ base_url = None url_variables = None @@ -399,7 +407,7 @@ def complete( safe_prompt=safe_prompt, ) - req = self.build_request( + req = self._build_request( method="POST", path="/chat/completions", base_url=base_url, @@ -410,6 +418,7 @@ def complete( request_has_query_params=True, user_agent_header="user-agent", accept_header_value="application/json", + http_headers=http_headers, security=self.sdk_configuration.security, get_serialized_body=lambda: utils.serialize_request_body( request, False, False, "json", models.ChatCompletionRequest @@ -468,7 +477,7 @@ async def complete_async( ], model: OptionalNullable[str] = "azureai", temperature: OptionalNullable[float] = UNSET, - top_p: Optional[float] = 1, + top_p: Optional[float] = None, max_tokens: OptionalNullable[int] = UNSET, stream: Optional[bool] = False, stop: Optional[ @@ -493,10 +502,11 @@ async def complete_async( presence_penalty: Optional[float] = None, frequency_penalty: Optional[float] = None, n: OptionalNullable[int] = UNSET, - safe_prompt: Optional[bool] = False, + safe_prompt: Optional[bool] = None, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, ) -> Optional[models.ChatCompletionResponse]: r"""Chat Completion @@ -518,6 +528,7 @@ async def complete_async( :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. """ base_url = None url_variables = None @@ -551,7 +562,7 @@ async def complete_async( safe_prompt=safe_prompt, ) - req = self.build_request_async( + req = self._build_request_async( method="POST", path="/chat/completions", base_url=base_url, @@ -562,6 +573,7 @@ async def complete_async( request_has_query_params=True, user_agent_header="user-agent", accept_header_value="application/json", + http_headers=http_headers, security=self.sdk_configuration.security, get_serialized_body=lambda: utils.serialize_request_body( request, False, False, "json", models.ChatCompletionRequest diff --git a/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionrequest.py b/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionrequest.py index 3e4e9a3a..67c91bba 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionrequest.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionrequest.py @@ -107,7 +107,7 @@ class ChatCompletionRequest(BaseModel): temperature: OptionalNullable[float] = UNSET r"""What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value.""" - top_p: Optional[float] = 1 + top_p: Optional[float] = None r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.""" max_tokens: OptionalNullable[int] = UNSET @@ -137,7 +137,7 @@ class ChatCompletionRequest(BaseModel): n: OptionalNullable[int] = UNSET r"""Number of completions to return for each request, input tokens are only billed once.""" - safe_prompt: Optional[bool] = False + safe_prompt: Optional[bool] = None r"""Whether to inject a safety prompt before all conversations.""" @model_serializer(mode="wrap") diff --git a/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionstreamrequest.py b/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionstreamrequest.py index 6d1f6bb7..465647eb 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionstreamrequest.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionstreamrequest.py @@ -102,7 +102,7 @@ class ChatCompletionStreamRequest(BaseModel): temperature: OptionalNullable[float] = UNSET r"""What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value.""" - top_p: Optional[float] = 1 + top_p: Optional[float] = None r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.""" max_tokens: OptionalNullable[int] = UNSET @@ -131,7 +131,7 @@ class ChatCompletionStreamRequest(BaseModel): n: OptionalNullable[int] = UNSET r"""Number of completions to return for each request, input tokens are only billed once.""" - safe_prompt: Optional[bool] = False + safe_prompt: Optional[bool] = None r"""Whether to inject a safety prompt before all conversations.""" @model_serializer(mode="wrap") diff --git a/packages/mistralai_azure/src/mistralai_azure/sdkconfiguration.py b/packages/mistralai_azure/src/mistralai_azure/sdkconfiguration.py index 191aa320..73b8d517 100644 --- a/packages/mistralai_azure/src/mistralai_azure/sdkconfiguration.py +++ b/packages/mistralai_azure/src/mistralai_azure/sdkconfiguration.py @@ -28,9 +28,9 @@ class SDKConfiguration: server: Optional[str] = "" language: str = "python" openapi_doc_version: str = "0.0.2" - sdk_version: str = "1.2.3" - gen_version: str = "2.470.1" - user_agent: str = "speakeasy-sdk/python 1.2.3 2.470.1 0.0.2 mistralai_azure" + sdk_version: str = "1.2.6" + gen_version: str = "2.486.1" + user_agent: str = "speakeasy-sdk/python 1.2.6 2.486.1 0.0.2 mistralai_azure" retry_config: OptionalNullable[RetryConfig] = Field(default_factory=lambda: UNSET) timeout_ms: Optional[int] = None diff --git a/packages/mistralai_azure/src/mistralai_azure/utils/forms.py b/packages/mistralai_azure/src/mistralai_azure/utils/forms.py index 9f5a731e..0472aba8 100644 --- a/packages/mistralai_azure/src/mistralai_azure/utils/forms.py +++ b/packages/mistralai_azure/src/mistralai_azure/utils/forms.py @@ -109,13 +109,12 @@ def serialize_multipart_form( if not field_metadata: continue - f_name = field.alias if field.alias is not None else name + f_name = field.alias if field.alias else name if field_metadata.file: file_fields: Dict[str, FieldInfo] = val.__class__.model_fields file_name = "" - field_name = "" content = None content_type = None @@ -131,20 +130,15 @@ def serialize_multipart_form( elif file_field_name == "content_type": content_type = getattr(val, file_field_name, None) else: - field_name = ( - file_field.alias - if file_field.alias is not None - else file_field_name - ) file_name = getattr(val, file_field_name) - if field_name == "" or file_name == "" or content is None: + if file_name == "" or content is None: raise ValueError("invalid multipart/form-data file") if content_type is not None: - files[field_name] = (file_name, content, content_type) + files[f_name] = (file_name, content, content_type) else: - files[field_name] = (file_name, content) + files[f_name] = (file_name, content) elif field_metadata.json: files[f_name] = ( None, diff --git a/packages/mistralai_azure/src/mistralai_azure/utils/requestbodies.py b/packages/mistralai_azure/src/mistralai_azure/utils/requestbodies.py index 4f586ae7..d5240dd5 100644 --- a/packages/mistralai_azure/src/mistralai_azure/utils/requestbodies.py +++ b/packages/mistralai_azure/src/mistralai_azure/utils/requestbodies.py @@ -23,7 +23,7 @@ @dataclass class SerializedRequestBody: - media_type: str + media_type: Optional[str] = None content: Optional[Any] = None data: Optional[Any] = None files: Optional[Any] = None diff --git a/packages/mistralai_gcp/.gitignore b/packages/mistralai_gcp/.gitignore index 7755092b..5a82b069 100644 --- a/packages/mistralai_gcp/.gitignore +++ b/packages/mistralai_gcp/.gitignore @@ -1,3 +1,4 @@ +.speakeasy/reports README-PYPI.md .venv/ venv/ diff --git a/packages/mistralai_gcp/.speakeasy/gen.lock b/packages/mistralai_gcp/.speakeasy/gen.lock index ee99e6bb..f74b9759 100644 --- a/packages/mistralai_gcp/.speakeasy/gen.lock +++ b/packages/mistralai_gcp/.speakeasy/gen.lock @@ -1,18 +1,18 @@ lockVersion: 2.0.0 id: ec60f2d8-7869-45c1-918e-773d41a8cf74 management: - docChecksum: d50a06ac34844141709fa2e57cc940c5 + docChecksum: 849dde0ef239604ca71711ffc1220b54 docVersion: 0.0.2 - speakeasyVersion: 1.451.1 - generationVersion: 2.470.1 - releaseVersion: 1.2.3 - configChecksum: 3fc99d7ec7ee057a323b593ebf8fdb8c + speakeasyVersion: 1.462.2 + generationVersion: 2.486.1 + releaseVersion: 1.2.6 + configChecksum: ba11718a5b49fb4a979ae9693a68b191 published: true features: python: additionalDependencies: 1.0.0 constsAndDefaults: 1.0.5 - core: 5.6.8 + core: 5.7.4 defaultEnabledRetries: 0.2.0 enumUnions: 0.1.0 envVarSecurityUsage: 0.3.2 @@ -21,9 +21,9 @@ features: globalSecurity: 3.0.2 globalSecurityCallbacks: 1.0.0 globalSecurityFlattening: 1.0.0 - globalServerURLs: 3.0.0 + globalServerURLs: 3.1.0 methodArguments: 1.0.2 - nameOverrides: 3.0.0 + nameOverrides: 3.0.1 nullables: 1.0.0 openEnums: 1.0.0 responseFormat: 1.0.1 @@ -166,6 +166,9 @@ examples: speakeasy-default-stream-chat: requestBody: application/json: {"model": "mistral-small-latest", "messages": [{"content": "Who is the best French painter? Answer in one short sentence.", "role": "user"}]} + responses: + "422": + application/json: {} chat_completion_v1_chat_completions_post: speakeasy-default-chat-completion-v1-chat-completions-post: requestBody: @@ -173,11 +176,15 @@ examples: responses: "200": application/json: {"id": "cmpl-e5cc70bb28c444948073e77776eb30ef", "object": "chat.completion", "model": "mistral-small-latest", "usage": {"prompt_tokens": 16, "completion_tokens": 34, "total_tokens": 50}, "created": 1702256327, "choices": []} - "422": {} + "422": + application/json: {} stream_fim: speakeasy-default-stream-fim: requestBody: application/json: {"model": "codestral-2405", "prompt": "def", "suffix": "return a+b"} + responses: + "422": + application/json: {} fim_completion_v1_fim_completions_post: speakeasy-default-fim-completion-v1-fim-completions-post: requestBody: @@ -185,5 +192,6 @@ examples: responses: "200": application/json: {"id": "cmpl-e5cc70bb28c444948073e77776eb30ef", "object": "chat.completion", "model": "codestral-latest", "usage": {"prompt_tokens": 16, "completion_tokens": 34, "total_tokens": 50}, "created": 1702256327, "choices": []} - "422": {} + "422": + application/json: {} generatedTests: {} diff --git a/packages/mistralai_gcp/.speakeasy/gen.yaml b/packages/mistralai_gcp/.speakeasy/gen.yaml index a77e2f5e..583aaf5b 100644 --- a/packages/mistralai_gcp/.speakeasy/gen.yaml +++ b/packages/mistralai_gcp/.speakeasy/gen.yaml @@ -13,7 +13,7 @@ generation: oAuth2ClientCredentialsEnabled: true oAuth2PasswordEnabled: false python: - version: 1.2.3 + version: 1.2.6 additionalDependencies: dev: pytest: ^8.2.2 diff --git a/packages/mistralai_gcp/pyproject.toml b/packages/mistralai_gcp/pyproject.toml index 670c1e3a..6692f1d5 100644 --- a/packages/mistralai_gcp/pyproject.toml +++ b/packages/mistralai_gcp/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "mistralai-gcp" -version = "1.2.3" +version = "1.2.6" description = "Python Client SDK for the Mistral AI API in GCP." authors = ["Mistral",] readme = "README-PYPI.md" @@ -19,9 +19,9 @@ in-project = true python = "^3.8" eval-type-backport = "^0.2.0" google-auth = "2.27.0" -httpx = "^0.27.0" +httpx = "^0.28.1" jsonpath-python = "^1.0.6" -pydantic = "~2.9.2" +pydantic = "~2.10.3" python-dateutil = "^2.8.2" requests = "^2.32.3" typing-inspect = "^0.9.0" diff --git a/packages/mistralai_gcp/src/mistralai_gcp/_version.py b/packages/mistralai_gcp/src/mistralai_gcp/_version.py index 5b65a1b5..30081f34 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/_version.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/_version.py @@ -3,7 +3,7 @@ import importlib.metadata __title__: str = "mistralai-gcp" -__version__: str = "1.2.3" +__version__: str = "1.2.6" try: if __package__ is not None: diff --git a/packages/mistralai_gcp/src/mistralai_gcp/basesdk.py b/packages/mistralai_gcp/src/mistralai_gcp/basesdk.py index c647eba2..40620018 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/basesdk.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/basesdk.py @@ -9,7 +9,8 @@ BeforeRequestContext, ) from mistralai_gcp.utils import RetryConfig, SerializedRequestBody, get_body_content -from typing import Callable, List, Optional, Tuple +from typing import Callable, List, Mapping, Optional, Tuple +from urllib.parse import parse_qs, urlparse class BaseSDK: @@ -18,7 +19,7 @@ class BaseSDK: def __init__(self, sdk_config: SDKConfiguration) -> None: self.sdk_configuration = sdk_config - def get_url(self, base_url, url_variables): + def _get_url(self, base_url, url_variables): sdk_url, sdk_variables = self.sdk_configuration.get_server_details() if base_url is None: @@ -29,7 +30,7 @@ def get_url(self, base_url, url_variables): return utils.template_url(base_url, url_variables) - def build_request_async( + def _build_request_async( self, method, path, @@ -48,9 +49,10 @@ def build_request_async( Callable[[], Optional[SerializedRequestBody]] ] = None, url_override: Optional[str] = None, + http_headers: Optional[Mapping[str, str]] = None, ) -> httpx.Request: client = self.sdk_configuration.async_client - return self.build_request_with_client( + return self._build_request_with_client( client, method, path, @@ -67,9 +69,10 @@ def build_request_async( timeout_ms, get_serialized_body, url_override, + http_headers, ) - def build_request( + def _build_request( self, method, path, @@ -88,9 +91,10 @@ def build_request( Callable[[], Optional[SerializedRequestBody]] ] = None, url_override: Optional[str] = None, + http_headers: Optional[Mapping[str, str]] = None, ) -> httpx.Request: client = self.sdk_configuration.client - return self.build_request_with_client( + return self._build_request_with_client( client, method, path, @@ -107,9 +111,10 @@ def build_request( timeout_ms, get_serialized_body, url_override, + http_headers, ) - def build_request_with_client( + def _build_request_with_client( self, client, method, @@ -129,13 +134,14 @@ def build_request_with_client( Callable[[], Optional[SerializedRequestBody]] ] = None, url_override: Optional[str] = None, + http_headers: Optional[Mapping[str, str]] = None, ) -> httpx.Request: query_params = {} url = url_override if url is None: url = utils.generate_url( - self.get_url(base_url, url_variables), + self._get_url(base_url, url_variables), path, request if request_has_path_params else None, _globals if request_has_path_params else None, @@ -145,6 +151,12 @@ def build_request_with_client( request if request_has_query_params else None, _globals if request_has_query_params else None, ) + else: + # Pick up the query parameter from the override so they can be + # preserved when building the request later on (necessary as of + # httpx 0.28). + parsed_override = urlparse(str(url_override)) + query_params = parse_qs(parsed_override.query, keep_blank_values=True) headers = utils.get_headers(request, _globals) headers["Accept"] = accept_header_value @@ -159,7 +171,7 @@ def build_request_with_client( headers = {**headers, **security_headers} query_params = {**query_params, **security_query_params} - serialized_request_body = SerializedRequestBody("application/octet-stream") + serialized_request_body = SerializedRequestBody() if get_serialized_body is not None: rb = get_serialized_body() if request_body_required and rb is None: @@ -178,6 +190,10 @@ def build_request_with_client( ): headers["content-type"] = serialized_request_body.media_type + if http_headers is not None: + for header, value in http_headers.items(): + headers[header] = value + timeout = timeout_ms / 1000 if timeout_ms is not None else None return client.build_request( diff --git a/packages/mistralai_gcp/src/mistralai_gcp/chat.py b/packages/mistralai_gcp/src/mistralai_gcp/chat.py index 19c92651..47e5b63a 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/chat.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/chat.py @@ -5,7 +5,7 @@ from mistralai_gcp._hooks import HookContext from mistralai_gcp.types import Nullable, OptionalNullable, UNSET from mistralai_gcp.utils import eventstreaming -from typing import Any, List, Optional, Union +from typing import Any, List, Mapping, Optional, Union class Chat(BaseSDK): @@ -17,7 +17,7 @@ def stream( model: Nullable[str], messages: Union[List[models.Messages], List[models.MessagesTypedDict]], temperature: OptionalNullable[float] = UNSET, - top_p: Optional[float] = 1, + top_p: Optional[float] = None, max_tokens: OptionalNullable[int] = UNSET, stream: Optional[bool] = True, stop: Optional[Union[models.Stop, models.StopTypedDict]] = None, @@ -40,6 +40,7 @@ def stream( retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, ) -> Optional[eventstreaming.EventStream[models.CompletionEvent]]: r"""Stream chat completion @@ -62,6 +63,7 @@ def stream( :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. """ base_url = None url_variables = None @@ -92,7 +94,7 @@ def stream( n=n, ) - req = self.build_request( + req = self._build_request( method="POST", path="/streamRawPredict", base_url=base_url, @@ -103,6 +105,7 @@ def stream( request_has_query_params=True, user_agent_header="user-agent", accept_header_value="text/event-stream", + http_headers=http_headers, security=self.sdk_configuration.security, get_serialized_body=lambda: utils.serialize_request_body( request, False, False, "json", models.ChatCompletionStreamRequest @@ -162,7 +165,7 @@ async def stream_async( model: Nullable[str], messages: Union[List[models.Messages], List[models.MessagesTypedDict]], temperature: OptionalNullable[float] = UNSET, - top_p: Optional[float] = 1, + top_p: Optional[float] = None, max_tokens: OptionalNullable[int] = UNSET, stream: Optional[bool] = True, stop: Optional[Union[models.Stop, models.StopTypedDict]] = None, @@ -185,6 +188,7 @@ async def stream_async( retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, ) -> Optional[eventstreaming.EventStreamAsync[models.CompletionEvent]]: r"""Stream chat completion @@ -207,6 +211,7 @@ async def stream_async( :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. """ base_url = None url_variables = None @@ -237,7 +242,7 @@ async def stream_async( n=n, ) - req = self.build_request_async( + req = self._build_request_async( method="POST", path="/streamRawPredict", base_url=base_url, @@ -248,6 +253,7 @@ async def stream_async( request_has_query_params=True, user_agent_header="user-agent", accept_header_value="text/event-stream", + http_headers=http_headers, security=self.sdk_configuration.security, get_serialized_body=lambda: utils.serialize_request_body( request, False, False, "json", models.ChatCompletionStreamRequest @@ -310,7 +316,7 @@ def complete( List[models.ChatCompletionRequestMessagesTypedDict], ], temperature: OptionalNullable[float] = UNSET, - top_p: Optional[float] = 1, + top_p: Optional[float] = None, max_tokens: OptionalNullable[int] = UNSET, stream: Optional[bool] = False, stop: Optional[ @@ -338,6 +344,7 @@ def complete( retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, ) -> Optional[models.ChatCompletionResponse]: r"""Chat Completion @@ -358,6 +365,7 @@ def complete( :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. """ base_url = None url_variables = None @@ -390,7 +398,7 @@ def complete( n=n, ) - req = self.build_request( + req = self._build_request( method="POST", path="/rawPredict", base_url=base_url, @@ -401,6 +409,7 @@ def complete( request_has_query_params=True, user_agent_header="user-agent", accept_header_value="application/json", + http_headers=http_headers, security=self.sdk_configuration.security, get_serialized_body=lambda: utils.serialize_request_body( request, False, False, "json", models.ChatCompletionRequest @@ -459,7 +468,7 @@ async def complete_async( List[models.ChatCompletionRequestMessagesTypedDict], ], temperature: OptionalNullable[float] = UNSET, - top_p: Optional[float] = 1, + top_p: Optional[float] = None, max_tokens: OptionalNullable[int] = UNSET, stream: Optional[bool] = False, stop: Optional[ @@ -487,6 +496,7 @@ async def complete_async( retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, ) -> Optional[models.ChatCompletionResponse]: r"""Chat Completion @@ -507,6 +517,7 @@ async def complete_async( :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. """ base_url = None url_variables = None @@ -539,7 +550,7 @@ async def complete_async( n=n, ) - req = self.build_request_async( + req = self._build_request_async( method="POST", path="/rawPredict", base_url=base_url, @@ -550,6 +561,7 @@ async def complete_async( request_has_query_params=True, user_agent_header="user-agent", accept_header_value="application/json", + http_headers=http_headers, security=self.sdk_configuration.security, get_serialized_body=lambda: utils.serialize_request_body( request, False, False, "json", models.ChatCompletionRequest diff --git a/packages/mistralai_gcp/src/mistralai_gcp/fim.py b/packages/mistralai_gcp/src/mistralai_gcp/fim.py index bddc010f..89146a4a 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/fim.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/fim.py @@ -5,7 +5,7 @@ from mistralai_gcp._hooks import HookContext from mistralai_gcp.types import Nullable, OptionalNullable, UNSET from mistralai_gcp.utils import eventstreaming -from typing import Any, Optional, Union +from typing import Any, Mapping, Optional, Union class Fim(BaseSDK): @@ -32,6 +32,7 @@ def stream( retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, ) -> Optional[eventstreaming.EventStream[models.CompletionEvent]]: r"""Stream fim completion @@ -50,6 +51,7 @@ def stream( :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. """ base_url = None url_variables = None @@ -72,7 +74,7 @@ def stream( min_tokens=min_tokens, ) - req = self.build_request( + req = self._build_request( method="POST", path="/streamRawPredict#fim", base_url=base_url, @@ -83,6 +85,7 @@ def stream( request_has_query_params=True, user_agent_header="user-agent", accept_header_value="text/event-stream", + http_headers=http_headers, security=self.sdk_configuration.security, get_serialized_body=lambda: utils.serialize_request_body( request, False, False, "json", models.FIMCompletionStreamRequest @@ -157,6 +160,7 @@ async def stream_async( retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, ) -> Optional[eventstreaming.EventStreamAsync[models.CompletionEvent]]: r"""Stream fim completion @@ -175,6 +179,7 @@ async def stream_async( :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. """ base_url = None url_variables = None @@ -197,7 +202,7 @@ async def stream_async( min_tokens=min_tokens, ) - req = self.build_request_async( + req = self._build_request_async( method="POST", path="/streamRawPredict#fim", base_url=base_url, @@ -208,6 +213,7 @@ async def stream_async( request_has_query_params=True, user_agent_header="user-agent", accept_header_value="text/event-stream", + http_headers=http_headers, security=self.sdk_configuration.security, get_serialized_body=lambda: utils.serialize_request_body( request, False, False, "json", models.FIMCompletionStreamRequest @@ -282,6 +288,7 @@ def complete( retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, ) -> Optional[models.FIMCompletionResponse]: r"""Fim Completion @@ -300,6 +307,7 @@ def complete( :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. """ base_url = None url_variables = None @@ -322,7 +330,7 @@ def complete( min_tokens=min_tokens, ) - req = self.build_request( + req = self._build_request( method="POST", path="/rawPredict#fim", base_url=base_url, @@ -333,6 +341,7 @@ def complete( request_has_query_params=True, user_agent_header="user-agent", accept_header_value="application/json", + http_headers=http_headers, security=self.sdk_configuration.security, get_serialized_body=lambda: utils.serialize_request_body( request, False, False, "json", models.FIMCompletionRequest @@ -403,6 +412,7 @@ async def complete_async( retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, ) -> Optional[models.FIMCompletionResponse]: r"""Fim Completion @@ -421,6 +431,7 @@ async def complete_async( :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. """ base_url = None url_variables = None @@ -443,7 +454,7 @@ async def complete_async( min_tokens=min_tokens, ) - req = self.build_request_async( + req = self._build_request_async( method="POST", path="/rawPredict#fim", base_url=base_url, @@ -454,6 +465,7 @@ async def complete_async( request_has_query_params=True, user_agent_header="user-agent", accept_header_value="application/json", + http_headers=http_headers, security=self.sdk_configuration.security, get_serialized_body=lambda: utils.serialize_request_body( request, False, False, "json", models.FIMCompletionRequest diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionrequest.py b/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionrequest.py index b8ebfc91..ab97e52a 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionrequest.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionrequest.py @@ -105,7 +105,7 @@ class ChatCompletionRequest(BaseModel): temperature: OptionalNullable[float] = UNSET r"""What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value.""" - top_p: Optional[float] = 1 + top_p: Optional[float] = None r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.""" max_tokens: OptionalNullable[int] = UNSET diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionstreamrequest.py b/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionstreamrequest.py index b710a27d..e6c5429b 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionstreamrequest.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionstreamrequest.py @@ -100,7 +100,7 @@ class ChatCompletionStreamRequest(BaseModel): temperature: OptionalNullable[float] = UNSET r"""What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value.""" - top_p: Optional[float] = 1 + top_p: Optional[float] = None r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.""" max_tokens: OptionalNullable[int] = UNSET diff --git a/packages/mistralai_gcp/src/mistralai_gcp/sdkconfiguration.py b/packages/mistralai_gcp/src/mistralai_gcp/sdkconfiguration.py index b5800815..3c149cc6 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/sdkconfiguration.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/sdkconfiguration.py @@ -28,9 +28,9 @@ class SDKConfiguration: server: Optional[str] = "" language: str = "python" openapi_doc_version: str = "0.0.2" - sdk_version: str = "1.2.3" - gen_version: str = "2.470.1" - user_agent: str = "speakeasy-sdk/python 1.2.3 2.470.1 0.0.2 mistralai-gcp" + sdk_version: str = "1.2.6" + gen_version: str = "2.486.1" + user_agent: str = "speakeasy-sdk/python 1.2.6 2.486.1 0.0.2 mistralai-gcp" retry_config: OptionalNullable[RetryConfig] = Field(default_factory=lambda: UNSET) timeout_ms: Optional[int] = None diff --git a/packages/mistralai_gcp/src/mistralai_gcp/utils/forms.py b/packages/mistralai_gcp/src/mistralai_gcp/utils/forms.py index 9f5a731e..0472aba8 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/utils/forms.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/utils/forms.py @@ -109,13 +109,12 @@ def serialize_multipart_form( if not field_metadata: continue - f_name = field.alias if field.alias is not None else name + f_name = field.alias if field.alias else name if field_metadata.file: file_fields: Dict[str, FieldInfo] = val.__class__.model_fields file_name = "" - field_name = "" content = None content_type = None @@ -131,20 +130,15 @@ def serialize_multipart_form( elif file_field_name == "content_type": content_type = getattr(val, file_field_name, None) else: - field_name = ( - file_field.alias - if file_field.alias is not None - else file_field_name - ) file_name = getattr(val, file_field_name) - if field_name == "" or file_name == "" or content is None: + if file_name == "" or content is None: raise ValueError("invalid multipart/form-data file") if content_type is not None: - files[field_name] = (file_name, content, content_type) + files[f_name] = (file_name, content, content_type) else: - files[field_name] = (file_name, content) + files[f_name] = (file_name, content) elif field_metadata.json: files[f_name] = ( None, diff --git a/packages/mistralai_gcp/src/mistralai_gcp/utils/requestbodies.py b/packages/mistralai_gcp/src/mistralai_gcp/utils/requestbodies.py index 4f586ae7..d5240dd5 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/utils/requestbodies.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/utils/requestbodies.py @@ -23,7 +23,7 @@ @dataclass class SerializedRequestBody: - media_type: str + media_type: Optional[str] = None content: Optional[Any] = None data: Optional[Any] = None files: Optional[Any] = None diff --git a/pyproject.toml b/pyproject.toml index d3c6fb53..9f5e9f16 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "mistralai" -version = "1.2.5" +version = "1.2.6" description = "Python Client SDK for the Mistral AI API." authors = ["Mistral"] readme = "README-PYPI.md" diff --git a/src/mistralai/_version.py b/src/mistralai/_version.py index d1350966..d4a21d0d 100644 --- a/src/mistralai/_version.py +++ b/src/mistralai/_version.py @@ -3,7 +3,7 @@ import importlib.metadata __title__: str = "mistralai" -__version__: str = "1.2.5" +__version__: str = "1.2.6" try: if __package__ is not None: diff --git a/src/mistralai/agents.py b/src/mistralai/agents.py index 246cab4e..621224e0 100644 --- a/src/mistralai/agents.py +++ b/src/mistralai/agents.py @@ -5,7 +5,7 @@ from mistralai._hooks import HookContext from mistralai.types import OptionalNullable, UNSET from mistralai.utils import eventstreaming, get_security_from_env -from typing import Any, List, Optional, Union +from typing import Any, List, Mapping, Optional, Union class Agents(BaseSDK): @@ -46,6 +46,7 @@ def complete( retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, ) -> Optional[models.ChatCompletionResponse]: r"""Agents Completion @@ -64,6 +65,7 @@ def complete( :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. """ base_url = None url_variables = None @@ -94,7 +96,7 @@ def complete( agent_id=agent_id, ) - req = self.build_request( + req = self._build_request( method="POST", path="/v1/agents/completions", base_url=base_url, @@ -105,6 +107,7 @@ def complete( request_has_query_params=True, user_agent_header="user-agent", accept_header_value="application/json", + http_headers=http_headers, security=self.sdk_configuration.security, get_serialized_body=lambda: utils.serialize_request_body( request, False, False, "json", models.AgentsCompletionRequest @@ -191,6 +194,7 @@ async def complete_async( retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, ) -> Optional[models.ChatCompletionResponse]: r"""Agents Completion @@ -209,6 +213,7 @@ async def complete_async( :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. """ base_url = None url_variables = None @@ -239,7 +244,7 @@ async def complete_async( agent_id=agent_id, ) - req = self.build_request_async( + req = self._build_request_async( method="POST", path="/v1/agents/completions", base_url=base_url, @@ -250,6 +255,7 @@ async def complete_async( request_has_query_params=True, user_agent_header="user-agent", accept_header_value="application/json", + http_headers=http_headers, security=self.sdk_configuration.security, get_serialized_body=lambda: utils.serialize_request_body( request, False, False, "json", models.AgentsCompletionRequest @@ -336,6 +342,7 @@ def stream( retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, ) -> Optional[eventstreaming.EventStream[models.CompletionEvent]]: r"""Stream Agents completion @@ -356,6 +363,7 @@ def stream( :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. """ base_url = None url_variables = None @@ -386,7 +394,7 @@ def stream( agent_id=agent_id, ) - req = self.build_request( + req = self._build_request( method="POST", path="/v1/agents/completions#stream", base_url=base_url, @@ -397,6 +405,7 @@ def stream( request_has_query_params=True, user_agent_header="user-agent", accept_header_value="text/event-stream", + http_headers=http_headers, security=self.sdk_configuration.security, get_serialized_body=lambda: utils.serialize_request_body( request, False, False, "json", models.AgentsCompletionStreamRequest @@ -487,6 +496,7 @@ async def stream_async( retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, ) -> Optional[eventstreaming.EventStreamAsync[models.CompletionEvent]]: r"""Stream Agents completion @@ -507,6 +517,7 @@ async def stream_async( :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. """ base_url = None url_variables = None @@ -537,7 +548,7 @@ async def stream_async( agent_id=agent_id, ) - req = self.build_request_async( + req = self._build_request_async( method="POST", path="/v1/agents/completions#stream", base_url=base_url, @@ -548,6 +559,7 @@ async def stream_async( request_has_query_params=True, user_agent_header="user-agent", accept_header_value="text/event-stream", + http_headers=http_headers, security=self.sdk_configuration.security, get_serialized_body=lambda: utils.serialize_request_body( request, False, False, "json", models.AgentsCompletionStreamRequest diff --git a/src/mistralai/basesdk.py b/src/mistralai/basesdk.py index 3fc2bdd4..cda8adda 100644 --- a/src/mistralai/basesdk.py +++ b/src/mistralai/basesdk.py @@ -9,7 +9,8 @@ BeforeRequestContext, ) from mistralai.utils import RetryConfig, SerializedRequestBody, get_body_content -from typing import Callable, List, Optional, Tuple +from typing import Callable, List, Mapping, Optional, Tuple +from urllib.parse import parse_qs, urlparse class BaseSDK: @@ -18,7 +19,7 @@ class BaseSDK: def __init__(self, sdk_config: SDKConfiguration) -> None: self.sdk_configuration = sdk_config - def get_url(self, base_url, url_variables): + def _get_url(self, base_url, url_variables): sdk_url, sdk_variables = self.sdk_configuration.get_server_details() if base_url is None: @@ -29,7 +30,7 @@ def get_url(self, base_url, url_variables): return utils.template_url(base_url, url_variables) - def build_request_async( + def _build_request_async( self, method, path, @@ -48,9 +49,10 @@ def build_request_async( Callable[[], Optional[SerializedRequestBody]] ] = None, url_override: Optional[str] = None, + http_headers: Optional[Mapping[str, str]] = None, ) -> httpx.Request: client = self.sdk_configuration.async_client - return self.build_request_with_client( + return self._build_request_with_client( client, method, path, @@ -67,9 +69,10 @@ def build_request_async( timeout_ms, get_serialized_body, url_override, + http_headers, ) - def build_request( + def _build_request( self, method, path, @@ -88,9 +91,10 @@ def build_request( Callable[[], Optional[SerializedRequestBody]] ] = None, url_override: Optional[str] = None, + http_headers: Optional[Mapping[str, str]] = None, ) -> httpx.Request: client = self.sdk_configuration.client - return self.build_request_with_client( + return self._build_request_with_client( client, method, path, @@ -107,9 +111,10 @@ def build_request( timeout_ms, get_serialized_body, url_override, + http_headers, ) - def build_request_with_client( + def _build_request_with_client( self, client, method, @@ -129,13 +134,14 @@ def build_request_with_client( Callable[[], Optional[SerializedRequestBody]] ] = None, url_override: Optional[str] = None, + http_headers: Optional[Mapping[str, str]] = None, ) -> httpx.Request: query_params = {} url = url_override if url is None: url = utils.generate_url( - self.get_url(base_url, url_variables), + self._get_url(base_url, url_variables), path, request if request_has_path_params else None, _globals if request_has_path_params else None, @@ -145,6 +151,12 @@ def build_request_with_client( request if request_has_query_params else None, _globals if request_has_query_params else None, ) + else: + # Pick up the query parameter from the override so they can be + # preserved when building the request later on (necessary as of + # httpx 0.28). + parsed_override = urlparse(str(url_override)) + query_params = parse_qs(parsed_override.query, keep_blank_values=True) headers = utils.get_headers(request, _globals) headers["Accept"] = accept_header_value @@ -159,7 +171,7 @@ def build_request_with_client( headers = {**headers, **security_headers} query_params = {**query_params, **security_query_params} - serialized_request_body = SerializedRequestBody("application/octet-stream") + serialized_request_body = SerializedRequestBody() if get_serialized_body is not None: rb = get_serialized_body() if request_body_required and rb is None: @@ -178,6 +190,10 @@ def build_request_with_client( ): headers["content-type"] = serialized_request_body.media_type + if http_headers is not None: + for header, value in http_headers.items(): + headers[header] = value + timeout = timeout_ms / 1000 if timeout_ms is not None else None return client.build_request( diff --git a/src/mistralai/chat.py b/src/mistralai/chat.py index 4b7aad3b..9e07f784 100644 --- a/src/mistralai/chat.py +++ b/src/mistralai/chat.py @@ -5,7 +5,7 @@ from mistralai._hooks import HookContext from mistralai.types import Nullable, OptionalNullable, UNSET from mistralai.utils import eventstreaming, get_security_from_env -from typing import Any, List, Optional, Union +from typing import Any, List, Mapping, Optional, Union class Chat(BaseSDK): @@ -17,7 +17,7 @@ def complete( model: Nullable[str], messages: Union[List[models.Messages], List[models.MessagesTypedDict]], temperature: OptionalNullable[float] = UNSET, - top_p: Optional[float] = 1, + top_p: Optional[float] = None, max_tokens: OptionalNullable[int] = UNSET, stream: Optional[bool] = False, stop: Optional[Union[models.Stop, models.StopTypedDict]] = None, @@ -37,10 +37,11 @@ def complete( presence_penalty: Optional[float] = None, frequency_penalty: Optional[float] = None, n: OptionalNullable[int] = UNSET, - safe_prompt: Optional[bool] = False, + safe_prompt: Optional[bool] = None, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, ) -> Optional[models.ChatCompletionResponse]: r"""Chat Completion @@ -62,6 +63,7 @@ def complete( :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. """ base_url = None url_variables = None @@ -93,7 +95,7 @@ def complete( safe_prompt=safe_prompt, ) - req = self.build_request( + req = self._build_request( method="POST", path="/v1/chat/completions", base_url=base_url, @@ -104,6 +106,7 @@ def complete( request_has_query_params=True, user_agent_header="user-agent", accept_header_value="application/json", + http_headers=http_headers, security=self.sdk_configuration.security, get_serialized_body=lambda: utils.serialize_request_body( request, False, False, "json", models.ChatCompletionRequest @@ -161,7 +164,7 @@ async def complete_async( model: Nullable[str], messages: Union[List[models.Messages], List[models.MessagesTypedDict]], temperature: OptionalNullable[float] = UNSET, - top_p: Optional[float] = 1, + top_p: Optional[float] = None, max_tokens: OptionalNullable[int] = UNSET, stream: Optional[bool] = False, stop: Optional[Union[models.Stop, models.StopTypedDict]] = None, @@ -181,10 +184,11 @@ async def complete_async( presence_penalty: Optional[float] = None, frequency_penalty: Optional[float] = None, n: OptionalNullable[int] = UNSET, - safe_prompt: Optional[bool] = False, + safe_prompt: Optional[bool] = None, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, ) -> Optional[models.ChatCompletionResponse]: r"""Chat Completion @@ -206,6 +210,7 @@ async def complete_async( :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. """ base_url = None url_variables = None @@ -237,7 +242,7 @@ async def complete_async( safe_prompt=safe_prompt, ) - req = self.build_request_async( + req = self._build_request_async( method="POST", path="/v1/chat/completions", base_url=base_url, @@ -248,6 +253,7 @@ async def complete_async( request_has_query_params=True, user_agent_header="user-agent", accept_header_value="application/json", + http_headers=http_headers, security=self.sdk_configuration.security, get_serialized_body=lambda: utils.serialize_request_body( request, False, False, "json", models.ChatCompletionRequest @@ -308,7 +314,7 @@ def stream( List[models.ChatCompletionStreamRequestMessagesTypedDict], ], temperature: OptionalNullable[float] = UNSET, - top_p: Optional[float] = 1, + top_p: Optional[float] = None, max_tokens: OptionalNullable[int] = UNSET, stream: Optional[bool] = True, stop: Optional[ @@ -333,10 +339,11 @@ def stream( presence_penalty: Optional[float] = None, frequency_penalty: Optional[float] = None, n: OptionalNullable[int] = UNSET, - safe_prompt: Optional[bool] = False, + safe_prompt: Optional[bool] = None, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, ) -> Optional[eventstreaming.EventStream[models.CompletionEvent]]: r"""Stream chat completion @@ -360,6 +367,7 @@ def stream( :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. """ base_url = None url_variables = None @@ -393,7 +401,7 @@ def stream( safe_prompt=safe_prompt, ) - req = self.build_request( + req = self._build_request( method="POST", path="/v1/chat/completions#stream", base_url=base_url, @@ -404,6 +412,7 @@ def stream( request_has_query_params=True, user_agent_header="user-agent", accept_header_value="text/event-stream", + http_headers=http_headers, security=self.sdk_configuration.security, get_serialized_body=lambda: utils.serialize_request_body( request, False, False, "json", models.ChatCompletionStreamRequest @@ -468,7 +477,7 @@ async def stream_async( List[models.ChatCompletionStreamRequestMessagesTypedDict], ], temperature: OptionalNullable[float] = UNSET, - top_p: Optional[float] = 1, + top_p: Optional[float] = None, max_tokens: OptionalNullable[int] = UNSET, stream: Optional[bool] = True, stop: Optional[ @@ -493,10 +502,11 @@ async def stream_async( presence_penalty: Optional[float] = None, frequency_penalty: Optional[float] = None, n: OptionalNullable[int] = UNSET, - safe_prompt: Optional[bool] = False, + safe_prompt: Optional[bool] = None, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, ) -> Optional[eventstreaming.EventStreamAsync[models.CompletionEvent]]: r"""Stream chat completion @@ -520,6 +530,7 @@ async def stream_async( :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. """ base_url = None url_variables = None @@ -553,7 +564,7 @@ async def stream_async( safe_prompt=safe_prompt, ) - req = self.build_request_async( + req = self._build_request_async( method="POST", path="/v1/chat/completions#stream", base_url=base_url, @@ -564,6 +575,7 @@ async def stream_async( request_has_query_params=True, user_agent_header="user-agent", accept_header_value="text/event-stream", + http_headers=http_headers, security=self.sdk_configuration.security, get_serialized_body=lambda: utils.serialize_request_body( request, False, False, "json", models.ChatCompletionStreamRequest diff --git a/src/mistralai/classifiers.py b/src/mistralai/classifiers.py index 3a772068..55253a11 100644 --- a/src/mistralai/classifiers.py +++ b/src/mistralai/classifiers.py @@ -5,7 +5,7 @@ from mistralai._hooks import HookContext from mistralai.types import Nullable, OptionalNullable, UNSET from mistralai.utils import get_security_from_env -from typing import Any, Optional, Union +from typing import Any, Mapping, Optional, Union class Classifiers(BaseSDK): @@ -22,6 +22,7 @@ def moderate( retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, ) -> Optional[models.ClassificationResponse]: r"""Moderations @@ -30,6 +31,7 @@ def moderate( :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. """ base_url = None url_variables = None @@ -44,7 +46,7 @@ def moderate( model=model, ) - req = self.build_request( + req = self._build_request( method="POST", path="/v1/moderations", base_url=base_url, @@ -55,6 +57,7 @@ def moderate( request_has_query_params=True, user_agent_header="user-agent", accept_header_value="application/json", + http_headers=http_headers, security=self.sdk_configuration.security, get_serialized_body=lambda: utils.serialize_request_body( request, False, False, "json", models.ClassificationRequest @@ -117,6 +120,7 @@ async def moderate_async( retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, ) -> Optional[models.ClassificationResponse]: r"""Moderations @@ -125,6 +129,7 @@ async def moderate_async( :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. """ base_url = None url_variables = None @@ -139,7 +144,7 @@ async def moderate_async( model=model, ) - req = self.build_request_async( + req = self._build_request_async( method="POST", path="/v1/moderations", base_url=base_url, @@ -150,6 +155,7 @@ async def moderate_async( request_has_query_params=True, user_agent_header="user-agent", accept_header_value="application/json", + http_headers=http_headers, security=self.sdk_configuration.security, get_serialized_body=lambda: utils.serialize_request_body( request, False, False, "json", models.ClassificationRequest @@ -212,6 +218,7 @@ def moderate_chat( retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, ) -> Optional[models.ClassificationResponse]: r"""Moderations Chat @@ -220,6 +227,7 @@ def moderate_chat( :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. """ base_url = None url_variables = None @@ -236,7 +244,7 @@ def moderate_chat( model=model, ) - req = self.build_request( + req = self._build_request( method="POST", path="/v1/chat/moderations", base_url=base_url, @@ -247,6 +255,7 @@ def moderate_chat( request_has_query_params=True, user_agent_header="user-agent", accept_header_value="application/json", + http_headers=http_headers, security=self.sdk_configuration.security, get_serialized_body=lambda: utils.serialize_request_body( request, False, False, "json", models.ChatClassificationRequest @@ -309,6 +318,7 @@ async def moderate_chat_async( retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, ) -> Optional[models.ClassificationResponse]: r"""Moderations Chat @@ -317,6 +327,7 @@ async def moderate_chat_async( :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. """ base_url = None url_variables = None @@ -333,7 +344,7 @@ async def moderate_chat_async( model=model, ) - req = self.build_request_async( + req = self._build_request_async( method="POST", path="/v1/chat/moderations", base_url=base_url, @@ -344,6 +355,7 @@ async def moderate_chat_async( request_has_query_params=True, user_agent_header="user-agent", accept_header_value="application/json", + http_headers=http_headers, security=self.sdk_configuration.security, get_serialized_body=lambda: utils.serialize_request_body( request, False, False, "json", models.ChatClassificationRequest diff --git a/src/mistralai/embeddings.py b/src/mistralai/embeddings.py index 2aa115c8..bf80861d 100644 --- a/src/mistralai/embeddings.py +++ b/src/mistralai/embeddings.py @@ -5,7 +5,7 @@ from mistralai._hooks import HookContext from mistralai.types import OptionalNullable, UNSET from mistralai.utils import get_security_from_env -from typing import Any, Optional, Union +from typing import Any, Mapping, Optional, Union class Embeddings(BaseSDK): @@ -20,6 +20,7 @@ def create( retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, ) -> Optional[models.EmbeddingResponse]: r"""Embeddings @@ -31,6 +32,7 @@ def create( :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. """ base_url = None url_variables = None @@ -46,7 +48,7 @@ def create( encoding_format=encoding_format, ) - req = self.build_request( + req = self._build_request( method="POST", path="/v1/embeddings", base_url=base_url, @@ -57,6 +59,7 @@ def create( request_has_query_params=True, user_agent_header="user-agent", accept_header_value="application/json", + http_headers=http_headers, security=self.sdk_configuration.security, get_serialized_body=lambda: utils.serialize_request_body( request, False, False, "json", models.EmbeddingRequest @@ -117,6 +120,7 @@ async def create_async( retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, ) -> Optional[models.EmbeddingResponse]: r"""Embeddings @@ -128,6 +132,7 @@ async def create_async( :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. """ base_url = None url_variables = None @@ -143,7 +148,7 @@ async def create_async( encoding_format=encoding_format, ) - req = self.build_request_async( + req = self._build_request_async( method="POST", path="/v1/embeddings", base_url=base_url, @@ -154,6 +159,7 @@ async def create_async( request_has_query_params=True, user_agent_header="user-agent", accept_header_value="application/json", + http_headers=http_headers, security=self.sdk_configuration.security, get_serialized_body=lambda: utils.serialize_request_body( request, False, False, "json", models.EmbeddingRequest diff --git a/src/mistralai/files.py b/src/mistralai/files.py index e2977be2..aefa025a 100644 --- a/src/mistralai/files.py +++ b/src/mistralai/files.py @@ -6,7 +6,7 @@ from mistralai._hooks import HookContext from mistralai.types import OptionalNullable, UNSET from mistralai.utils import get_security_from_env -from typing import List, Optional, Union +from typing import List, Mapping, Optional, Union class Files(BaseSDK): @@ -20,6 +20,7 @@ def upload( retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, ) -> Optional[models.UploadFileOut]: r"""Upload File @@ -34,6 +35,7 @@ def upload( :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. """ base_url = None url_variables = None @@ -48,7 +50,7 @@ def upload( purpose=purpose, ) - req = self.build_request( + req = self._build_request( method="POST", path="/v1/files", base_url=base_url, @@ -59,6 +61,7 @@ def upload( request_has_query_params=True, user_agent_header="user-agent", accept_header_value="application/json", + http_headers=http_headers, security=self.sdk_configuration.security, get_serialized_body=lambda: utils.serialize_request_body( request, @@ -116,6 +119,7 @@ async def upload_async( retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, ) -> Optional[models.UploadFileOut]: r"""Upload File @@ -130,6 +134,7 @@ async def upload_async( :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. """ base_url = None url_variables = None @@ -144,7 +149,7 @@ async def upload_async( purpose=purpose, ) - req = self.build_request_async( + req = self._build_request_async( method="POST", path="/v1/files", base_url=base_url, @@ -155,6 +160,7 @@ async def upload_async( request_has_query_params=True, user_agent_header="user-agent", accept_header_value="application/json", + http_headers=http_headers, security=self.sdk_configuration.security, get_serialized_body=lambda: utils.serialize_request_body( request, @@ -216,6 +222,7 @@ def list( retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, ) -> Optional[models.ListFilesOut]: r"""List Files @@ -230,6 +237,7 @@ def list( :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. """ base_url = None url_variables = None @@ -248,7 +256,7 @@ def list( purpose=purpose, ) - req = self.build_request( + req = self._build_request( method="GET", path="/v1/files", base_url=base_url, @@ -259,6 +267,7 @@ def list( request_has_query_params=True, user_agent_header="user-agent", accept_header_value="application/json", + http_headers=http_headers, security=self.sdk_configuration.security, timeout_ms=timeout_ms, ) @@ -313,6 +322,7 @@ async def list_async( retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, ) -> Optional[models.ListFilesOut]: r"""List Files @@ -327,6 +337,7 @@ async def list_async( :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. """ base_url = None url_variables = None @@ -345,7 +356,7 @@ async def list_async( purpose=purpose, ) - req = self.build_request_async( + req = self._build_request_async( method="GET", path="/v1/files", base_url=base_url, @@ -356,6 +367,7 @@ async def list_async( request_has_query_params=True, user_agent_header="user-agent", accept_header_value="application/json", + http_headers=http_headers, security=self.sdk_configuration.security, timeout_ms=timeout_ms, ) @@ -405,6 +417,7 @@ def retrieve( retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, ) -> Optional[models.RetrieveFileOut]: r"""Retrieve File @@ -414,6 +427,7 @@ def retrieve( :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. """ base_url = None url_variables = None @@ -427,7 +441,7 @@ def retrieve( file_id=file_id, ) - req = self.build_request( + req = self._build_request( method="GET", path="/v1/files/{file_id}", base_url=base_url, @@ -438,6 +452,7 @@ def retrieve( request_has_query_params=True, user_agent_header="user-agent", accept_header_value="application/json", + http_headers=http_headers, security=self.sdk_configuration.security, timeout_ms=timeout_ms, ) @@ -487,6 +502,7 @@ async def retrieve_async( retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, ) -> Optional[models.RetrieveFileOut]: r"""Retrieve File @@ -496,6 +512,7 @@ async def retrieve_async( :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. """ base_url = None url_variables = None @@ -509,7 +526,7 @@ async def retrieve_async( file_id=file_id, ) - req = self.build_request_async( + req = self._build_request_async( method="GET", path="/v1/files/{file_id}", base_url=base_url, @@ -520,6 +537,7 @@ async def retrieve_async( request_has_query_params=True, user_agent_header="user-agent", accept_header_value="application/json", + http_headers=http_headers, security=self.sdk_configuration.security, timeout_ms=timeout_ms, ) @@ -569,6 +587,7 @@ def delete( retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, ) -> Optional[models.DeleteFileOut]: r"""Delete File @@ -578,6 +597,7 @@ def delete( :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. """ base_url = None url_variables = None @@ -591,7 +611,7 @@ def delete( file_id=file_id, ) - req = self.build_request( + req = self._build_request( method="DELETE", path="/v1/files/{file_id}", base_url=base_url, @@ -602,6 +622,7 @@ def delete( request_has_query_params=True, user_agent_header="user-agent", accept_header_value="application/json", + http_headers=http_headers, security=self.sdk_configuration.security, timeout_ms=timeout_ms, ) @@ -651,6 +672,7 @@ async def delete_async( retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, ) -> Optional[models.DeleteFileOut]: r"""Delete File @@ -660,6 +682,7 @@ async def delete_async( :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. """ base_url = None url_variables = None @@ -673,7 +696,7 @@ async def delete_async( file_id=file_id, ) - req = self.build_request_async( + req = self._build_request_async( method="DELETE", path="/v1/files/{file_id}", base_url=base_url, @@ -684,6 +707,7 @@ async def delete_async( request_has_query_params=True, user_agent_header="user-agent", accept_header_value="application/json", + http_headers=http_headers, security=self.sdk_configuration.security, timeout_ms=timeout_ms, ) @@ -733,6 +757,7 @@ def download( retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, ) -> Optional[httpx.Response]: r"""Download File @@ -742,6 +767,7 @@ def download( :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. """ base_url = None url_variables = None @@ -755,7 +781,7 @@ def download( file_id=file_id, ) - req = self.build_request( + req = self._build_request( method="GET", path="/v1/files/{file_id}/content", base_url=base_url, @@ -766,6 +792,7 @@ def download( request_has_query_params=True, user_agent_header="user-agent", accept_header_value="application/octet-stream", + http_headers=http_headers, security=self.sdk_configuration.security, timeout_ms=timeout_ms, ) @@ -816,6 +843,7 @@ async def download_async( retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, ) -> Optional[httpx.Response]: r"""Download File @@ -825,6 +853,7 @@ async def download_async( :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. """ base_url = None url_variables = None @@ -838,7 +867,7 @@ async def download_async( file_id=file_id, ) - req = self.build_request_async( + req = self._build_request_async( method="GET", path="/v1/files/{file_id}/content", base_url=base_url, @@ -849,6 +878,7 @@ async def download_async( request_has_query_params=True, user_agent_header="user-agent", accept_header_value="application/octet-stream", + http_headers=http_headers, security=self.sdk_configuration.security, timeout_ms=timeout_ms, ) @@ -900,6 +930,7 @@ def get_signed_url( retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, ) -> Optional[models.FileSignedURL]: r"""Get Signed Url @@ -908,6 +939,7 @@ def get_signed_url( :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. """ base_url = None url_variables = None @@ -922,7 +954,7 @@ def get_signed_url( expiry=expiry, ) - req = self.build_request( + req = self._build_request( method="GET", path="/v1/files/{file_id}/url", base_url=base_url, @@ -933,6 +965,7 @@ def get_signed_url( request_has_query_params=True, user_agent_header="user-agent", accept_header_value="application/json", + http_headers=http_headers, security=self.sdk_configuration.security, timeout_ms=timeout_ms, ) @@ -983,6 +1016,7 @@ async def get_signed_url_async( retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, ) -> Optional[models.FileSignedURL]: r"""Get Signed Url @@ -991,6 +1025,7 @@ async def get_signed_url_async( :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. """ base_url = None url_variables = None @@ -1005,7 +1040,7 @@ async def get_signed_url_async( expiry=expiry, ) - req = self.build_request_async( + req = self._build_request_async( method="GET", path="/v1/files/{file_id}/url", base_url=base_url, @@ -1016,6 +1051,7 @@ async def get_signed_url_async( request_has_query_params=True, user_agent_header="user-agent", accept_header_value="application/json", + http_headers=http_headers, security=self.sdk_configuration.security, timeout_ms=timeout_ms, ) diff --git a/src/mistralai/fim.py b/src/mistralai/fim.py index 6f036311..60a4fd6c 100644 --- a/src/mistralai/fim.py +++ b/src/mistralai/fim.py @@ -5,7 +5,7 @@ from mistralai._hooks import HookContext from mistralai.types import Nullable, OptionalNullable, UNSET from mistralai.utils import eventstreaming, get_security_from_env -from typing import Any, Optional, Union +from typing import Any, Mapping, Optional, Union class Fim(BaseSDK): @@ -32,6 +32,7 @@ def complete( retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, ) -> Optional[models.FIMCompletionResponse]: r"""Fim Completion @@ -50,6 +51,7 @@ def complete( :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. """ base_url = None url_variables = None @@ -72,7 +74,7 @@ def complete( min_tokens=min_tokens, ) - req = self.build_request( + req = self._build_request( method="POST", path="/v1/fim/completions", base_url=base_url, @@ -83,6 +85,7 @@ def complete( request_has_query_params=True, user_agent_header="user-agent", accept_header_value="application/json", + http_headers=http_headers, security=self.sdk_configuration.security, get_serialized_body=lambda: utils.serialize_request_body( request, False, False, "json", models.FIMCompletionRequest @@ -155,6 +158,7 @@ async def complete_async( retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, ) -> Optional[models.FIMCompletionResponse]: r"""Fim Completion @@ -173,6 +177,7 @@ async def complete_async( :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. """ base_url = None url_variables = None @@ -195,7 +200,7 @@ async def complete_async( min_tokens=min_tokens, ) - req = self.build_request_async( + req = self._build_request_async( method="POST", path="/v1/fim/completions", base_url=base_url, @@ -206,6 +211,7 @@ async def complete_async( request_has_query_params=True, user_agent_header="user-agent", accept_header_value="application/json", + http_headers=http_headers, security=self.sdk_configuration.security, get_serialized_body=lambda: utils.serialize_request_body( request, False, False, "json", models.FIMCompletionRequest @@ -278,6 +284,7 @@ def stream( retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, ) -> Optional[eventstreaming.EventStream[models.CompletionEvent]]: r"""Stream fim completion @@ -296,6 +303,7 @@ def stream( :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. """ base_url = None url_variables = None @@ -318,7 +326,7 @@ def stream( min_tokens=min_tokens, ) - req = self.build_request( + req = self._build_request( method="POST", path="/v1/fim/completions#stream", base_url=base_url, @@ -329,6 +337,7 @@ def stream( request_has_query_params=True, user_agent_header="user-agent", accept_header_value="text/event-stream", + http_headers=http_headers, security=self.sdk_configuration.security, get_serialized_body=lambda: utils.serialize_request_body( request, False, False, "json", models.FIMCompletionStreamRequest @@ -405,6 +414,7 @@ async def stream_async( retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, ) -> Optional[eventstreaming.EventStreamAsync[models.CompletionEvent]]: r"""Stream fim completion @@ -423,6 +433,7 @@ async def stream_async( :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. """ base_url = None url_variables = None @@ -445,7 +456,7 @@ async def stream_async( min_tokens=min_tokens, ) - req = self.build_request_async( + req = self._build_request_async( method="POST", path="/v1/fim/completions#stream", base_url=base_url, @@ -456,6 +467,7 @@ async def stream_async( request_has_query_params=True, user_agent_header="user-agent", accept_header_value="text/event-stream", + http_headers=http_headers, security=self.sdk_configuration.security, get_serialized_body=lambda: utils.serialize_request_body( request, False, False, "json", models.FIMCompletionStreamRequest diff --git a/src/mistralai/jobs.py b/src/mistralai/jobs.py index 17085b9d..afa1ff44 100644 --- a/src/mistralai/jobs.py +++ b/src/mistralai/jobs.py @@ -6,7 +6,7 @@ from mistralai._hooks import HookContext from mistralai.types import OptionalNullable, UNSET from mistralai.utils import get_security_from_env -from typing import List, Optional, Union +from typing import List, Mapping, Optional, Union class Jobs(BaseSDK): @@ -25,6 +25,7 @@ def list( retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, ) -> Optional[models.JobsOut]: r"""Get Fine Tuning Jobs @@ -42,6 +43,7 @@ def list( :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. """ base_url = None url_variables = None @@ -63,7 +65,7 @@ def list( suffix=suffix, ) - req = self.build_request( + req = self._build_request( method="GET", path="/v1/fine_tuning/jobs", base_url=base_url, @@ -74,6 +76,7 @@ def list( request_has_query_params=True, user_agent_header="user-agent", accept_header_value="application/json", + http_headers=http_headers, security=self.sdk_configuration.security, timeout_ms=timeout_ms, ) @@ -131,6 +134,7 @@ async def list_async( retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, ) -> Optional[models.JobsOut]: r"""Get Fine Tuning Jobs @@ -148,6 +152,7 @@ async def list_async( :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. """ base_url = None url_variables = None @@ -169,7 +174,7 @@ async def list_async( suffix=suffix, ) - req = self.build_request_async( + req = self._build_request_async( method="GET", path="/v1/fine_tuning/jobs", base_url=base_url, @@ -180,6 +185,7 @@ async def list_async( request_has_query_params=True, user_agent_header="user-agent", accept_header_value="application/json", + http_headers=http_headers, security=self.sdk_configuration.security, timeout_ms=timeout_ms, ) @@ -248,6 +254,7 @@ def create( retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, ) -> Optional[models.JobsAPIRoutesFineTuningCreateFineTuningJobResponse]: r"""Create Fine Tuning Job @@ -264,6 +271,7 @@ def create( :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. """ base_url = None url_variables = None @@ -292,7 +300,7 @@ def create( auto_start=auto_start, ) - req = self.build_request( + req = self._build_request( method="POST", path="/v1/fine_tuning/jobs", base_url=base_url, @@ -303,6 +311,7 @@ def create( request_has_query_params=True, user_agent_header="user-agent", accept_header_value="application/json", + http_headers=http_headers, security=self.sdk_configuration.security, get_serialized_body=lambda: utils.serialize_request_body( request, False, False, "json", models.JobIn @@ -377,6 +386,7 @@ async def create_async( retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, ) -> Optional[models.JobsAPIRoutesFineTuningCreateFineTuningJobResponse]: r"""Create Fine Tuning Job @@ -393,6 +403,7 @@ async def create_async( :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. """ base_url = None url_variables = None @@ -421,7 +432,7 @@ async def create_async( auto_start=auto_start, ) - req = self.build_request_async( + req = self._build_request_async( method="POST", path="/v1/fine_tuning/jobs", base_url=base_url, @@ -432,6 +443,7 @@ async def create_async( request_has_query_params=True, user_agent_header="user-agent", accept_header_value="application/json", + http_headers=http_headers, security=self.sdk_configuration.security, get_serialized_body=lambda: utils.serialize_request_body( request, False, False, "json", models.JobIn @@ -487,6 +499,7 @@ def get( retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, ) -> Optional[models.DetailedJobOut]: r"""Get Fine Tuning Job @@ -496,6 +509,7 @@ def get( :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. """ base_url = None url_variables = None @@ -509,7 +523,7 @@ def get( job_id=job_id, ) - req = self.build_request( + req = self._build_request( method="GET", path="/v1/fine_tuning/jobs/{job_id}", base_url=base_url, @@ -520,6 +534,7 @@ def get( request_has_query_params=True, user_agent_header="user-agent", accept_header_value="application/json", + http_headers=http_headers, security=self.sdk_configuration.security, timeout_ms=timeout_ms, ) @@ -569,6 +584,7 @@ async def get_async( retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, ) -> Optional[models.DetailedJobOut]: r"""Get Fine Tuning Job @@ -578,6 +594,7 @@ async def get_async( :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. """ base_url = None url_variables = None @@ -591,7 +608,7 @@ async def get_async( job_id=job_id, ) - req = self.build_request_async( + req = self._build_request_async( method="GET", path="/v1/fine_tuning/jobs/{job_id}", base_url=base_url, @@ -602,6 +619,7 @@ async def get_async( request_has_query_params=True, user_agent_header="user-agent", accept_header_value="application/json", + http_headers=http_headers, security=self.sdk_configuration.security, timeout_ms=timeout_ms, ) @@ -651,6 +669,7 @@ def cancel( retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, ) -> Optional[models.DetailedJobOut]: r"""Cancel Fine Tuning Job @@ -660,6 +679,7 @@ def cancel( :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. """ base_url = None url_variables = None @@ -673,7 +693,7 @@ def cancel( job_id=job_id, ) - req = self.build_request( + req = self._build_request( method="POST", path="/v1/fine_tuning/jobs/{job_id}/cancel", base_url=base_url, @@ -684,6 +704,7 @@ def cancel( request_has_query_params=True, user_agent_header="user-agent", accept_header_value="application/json", + http_headers=http_headers, security=self.sdk_configuration.security, timeout_ms=timeout_ms, ) @@ -733,6 +754,7 @@ async def cancel_async( retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, ) -> Optional[models.DetailedJobOut]: r"""Cancel Fine Tuning Job @@ -742,6 +764,7 @@ async def cancel_async( :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. """ base_url = None url_variables = None @@ -755,7 +778,7 @@ async def cancel_async( job_id=job_id, ) - req = self.build_request_async( + req = self._build_request_async( method="POST", path="/v1/fine_tuning/jobs/{job_id}/cancel", base_url=base_url, @@ -766,6 +789,7 @@ async def cancel_async( request_has_query_params=True, user_agent_header="user-agent", accept_header_value="application/json", + http_headers=http_headers, security=self.sdk_configuration.security, timeout_ms=timeout_ms, ) @@ -815,6 +839,7 @@ def start( retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, ) -> Optional[models.DetailedJobOut]: r"""Start Fine Tuning Job @@ -824,6 +849,7 @@ def start( :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. """ base_url = None url_variables = None @@ -837,7 +863,7 @@ def start( job_id=job_id, ) - req = self.build_request( + req = self._build_request( method="POST", path="/v1/fine_tuning/jobs/{job_id}/start", base_url=base_url, @@ -848,6 +874,7 @@ def start( request_has_query_params=True, user_agent_header="user-agent", accept_header_value="application/json", + http_headers=http_headers, security=self.sdk_configuration.security, timeout_ms=timeout_ms, ) @@ -897,6 +924,7 @@ async def start_async( retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, ) -> Optional[models.DetailedJobOut]: r"""Start Fine Tuning Job @@ -906,6 +934,7 @@ async def start_async( :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. """ base_url = None url_variables = None @@ -919,7 +948,7 @@ async def start_async( job_id=job_id, ) - req = self.build_request_async( + req = self._build_request_async( method="POST", path="/v1/fine_tuning/jobs/{job_id}/start", base_url=base_url, @@ -930,6 +959,7 @@ async def start_async( request_has_query_params=True, user_agent_header="user-agent", accept_header_value="application/json", + http_headers=http_headers, security=self.sdk_configuration.security, timeout_ms=timeout_ms, ) diff --git a/src/mistralai/mistral_jobs.py b/src/mistralai/mistral_jobs.py index 59ea13fc..8642d9fa 100644 --- a/src/mistralai/mistral_jobs.py +++ b/src/mistralai/mistral_jobs.py @@ -6,7 +6,7 @@ from mistralai._hooks import HookContext from mistralai.types import OptionalNullable, UNSET from mistralai.utils import get_security_from_env -from typing import Any, Dict, List, Optional +from typing import Any, Dict, List, Mapping, Optional class MistralJobs(BaseSDK): @@ -23,6 +23,7 @@ def list( retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, ) -> Optional[models.BatchJobsOut]: r"""Get Batch Jobs @@ -38,6 +39,7 @@ def list( :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. """ base_url = None url_variables = None @@ -57,7 +59,7 @@ def list( status=status, ) - req = self.build_request( + req = self._build_request( method="GET", path="/v1/batch/jobs", base_url=base_url, @@ -68,6 +70,7 @@ def list( request_has_query_params=True, user_agent_header="user-agent", accept_header_value="application/json", + http_headers=http_headers, security=self.sdk_configuration.security, timeout_ms=timeout_ms, ) @@ -123,6 +126,7 @@ async def list_async( retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, ) -> Optional[models.BatchJobsOut]: r"""Get Batch Jobs @@ -138,6 +142,7 @@ async def list_async( :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. """ base_url = None url_variables = None @@ -157,7 +162,7 @@ async def list_async( status=status, ) - req = self.build_request_async( + req = self._build_request_async( method="GET", path="/v1/batch/jobs", base_url=base_url, @@ -168,6 +173,7 @@ async def list_async( request_has_query_params=True, user_agent_header="user-agent", accept_header_value="application/json", + http_headers=http_headers, security=self.sdk_configuration.security, timeout_ms=timeout_ms, ) @@ -221,6 +227,7 @@ def create( retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, ) -> Optional[models.BatchJobOut]: r"""Create Batch Job @@ -234,6 +241,7 @@ def create( :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. """ base_url = None url_variables = None @@ -251,7 +259,7 @@ def create( timeout_hours=timeout_hours, ) - req = self.build_request( + req = self._build_request( method="POST", path="/v1/batch/jobs", base_url=base_url, @@ -262,6 +270,7 @@ def create( request_has_query_params=True, user_agent_header="user-agent", accept_header_value="application/json", + http_headers=http_headers, security=self.sdk_configuration.security, get_serialized_body=lambda: utils.serialize_request_body( request, False, False, "json", models.BatchJobIn @@ -318,6 +327,7 @@ async def create_async( retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, ) -> Optional[models.BatchJobOut]: r"""Create Batch Job @@ -331,6 +341,7 @@ async def create_async( :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. """ base_url = None url_variables = None @@ -348,7 +359,7 @@ async def create_async( timeout_hours=timeout_hours, ) - req = self.build_request_async( + req = self._build_request_async( method="POST", path="/v1/batch/jobs", base_url=base_url, @@ -359,6 +370,7 @@ async def create_async( request_has_query_params=True, user_agent_header="user-agent", accept_header_value="application/json", + http_headers=http_headers, security=self.sdk_configuration.security, get_serialized_body=lambda: utils.serialize_request_body( request, False, False, "json", models.BatchJobIn @@ -411,6 +423,7 @@ def get( retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, ) -> Optional[models.BatchJobOut]: r"""Get Batch Job @@ -420,6 +433,7 @@ def get( :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. """ base_url = None url_variables = None @@ -433,7 +447,7 @@ def get( job_id=job_id, ) - req = self.build_request( + req = self._build_request( method="GET", path="/v1/batch/jobs/{job_id}", base_url=base_url, @@ -444,6 +458,7 @@ def get( request_has_query_params=True, user_agent_header="user-agent", accept_header_value="application/json", + http_headers=http_headers, security=self.sdk_configuration.security, timeout_ms=timeout_ms, ) @@ -493,6 +508,7 @@ async def get_async( retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, ) -> Optional[models.BatchJobOut]: r"""Get Batch Job @@ -502,6 +518,7 @@ async def get_async( :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. """ base_url = None url_variables = None @@ -515,7 +532,7 @@ async def get_async( job_id=job_id, ) - req = self.build_request_async( + req = self._build_request_async( method="GET", path="/v1/batch/jobs/{job_id}", base_url=base_url, @@ -526,6 +543,7 @@ async def get_async( request_has_query_params=True, user_agent_header="user-agent", accept_header_value="application/json", + http_headers=http_headers, security=self.sdk_configuration.security, timeout_ms=timeout_ms, ) @@ -575,6 +593,7 @@ def cancel( retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, ) -> Optional[models.BatchJobOut]: r"""Cancel Batch Job @@ -584,6 +603,7 @@ def cancel( :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. """ base_url = None url_variables = None @@ -597,7 +617,7 @@ def cancel( job_id=job_id, ) - req = self.build_request( + req = self._build_request( method="POST", path="/v1/batch/jobs/{job_id}/cancel", base_url=base_url, @@ -608,6 +628,7 @@ def cancel( request_has_query_params=True, user_agent_header="user-agent", accept_header_value="application/json", + http_headers=http_headers, security=self.sdk_configuration.security, timeout_ms=timeout_ms, ) @@ -657,6 +678,7 @@ async def cancel_async( retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, ) -> Optional[models.BatchJobOut]: r"""Cancel Batch Job @@ -666,6 +688,7 @@ async def cancel_async( :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. """ base_url = None url_variables = None @@ -679,7 +702,7 @@ async def cancel_async( job_id=job_id, ) - req = self.build_request_async( + req = self._build_request_async( method="POST", path="/v1/batch/jobs/{job_id}/cancel", base_url=base_url, @@ -690,6 +713,7 @@ async def cancel_async( request_has_query_params=True, user_agent_header="user-agent", accept_header_value="application/json", + http_headers=http_headers, security=self.sdk_configuration.security, timeout_ms=timeout_ms, ) diff --git a/src/mistralai/models/chatcompletionrequest.py b/src/mistralai/models/chatcompletionrequest.py index 195ea593..4658324d 100644 --- a/src/mistralai/models/chatcompletionrequest.py +++ b/src/mistralai/models/chatcompletionrequest.py @@ -97,7 +97,7 @@ class ChatCompletionRequest(BaseModel): temperature: OptionalNullable[float] = UNSET r"""What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value.""" - top_p: Optional[float] = 1 + top_p: Optional[float] = None r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.""" max_tokens: OptionalNullable[int] = UNSET @@ -127,7 +127,7 @@ class ChatCompletionRequest(BaseModel): n: OptionalNullable[int] = UNSET r"""Number of completions to return for each request, input tokens are only billed once.""" - safe_prompt: Optional[bool] = False + safe_prompt: Optional[bool] = None r"""Whether to inject a safety prompt before all conversations.""" @model_serializer(mode="wrap") diff --git a/src/mistralai/models/chatcompletionstreamrequest.py b/src/mistralai/models/chatcompletionstreamrequest.py index fee65092..243f0697 100644 --- a/src/mistralai/models/chatcompletionstreamrequest.py +++ b/src/mistralai/models/chatcompletionstreamrequest.py @@ -100,7 +100,7 @@ class ChatCompletionStreamRequest(BaseModel): temperature: OptionalNullable[float] = UNSET r"""What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value.""" - top_p: Optional[float] = 1 + top_p: Optional[float] = None r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.""" max_tokens: OptionalNullable[int] = UNSET @@ -129,7 +129,7 @@ class ChatCompletionStreamRequest(BaseModel): n: OptionalNullable[int] = UNSET r"""Number of completions to return for each request, input tokens are only billed once.""" - safe_prompt: Optional[bool] = False + safe_prompt: Optional[bool] = None r"""Whether to inject a safety prompt before all conversations.""" @model_serializer(mode="wrap") diff --git a/src/mistralai/models/files_api_routes_upload_fileop.py b/src/mistralai/models/files_api_routes_upload_fileop.py index 4f2bb0c2..e6d86877 100644 --- a/src/mistralai/models/files_api_routes_upload_fileop.py +++ b/src/mistralai/models/files_api_routes_upload_fileop.py @@ -19,7 +19,7 @@ class FileTypedDict(TypedDict): class File(BaseModel): file_name: Annotated[ - str, pydantic.Field(alias="file"), FieldMetadata(multipart=True) + str, pydantic.Field(alias="fileName"), FieldMetadata(multipart=True) ] content: Annotated[ @@ -51,11 +51,7 @@ class FilesAPIRoutesUploadFileMultiPartBodyParamsTypedDict(TypedDict): class FilesAPIRoutesUploadFileMultiPartBodyParams(BaseModel): - file: Annotated[ - File, - pydantic.Field(alias=""), - FieldMetadata(multipart=MultipartFormMetadata(file=True)), - ] + file: Annotated[File, FieldMetadata(multipart=MultipartFormMetadata(file=True))] r"""The File object (not file name) to be uploaded. To upload a file and specify a custom file name you should format your request as such: ```bash diff --git a/src/mistralai/models_.py b/src/mistralai/models_.py index 44e95ce3..e01e8c91 100644 --- a/src/mistralai/models_.py +++ b/src/mistralai/models_.py @@ -5,7 +5,7 @@ from mistralai._hooks import HookContext from mistralai.types import OptionalNullable, UNSET from mistralai.utils import get_security_from_env -from typing import Any, Optional +from typing import Any, Mapping, Optional class Models(BaseSDK): @@ -17,6 +17,7 @@ def list( retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, ) -> Optional[models.ModelList]: r"""List Models @@ -25,6 +26,7 @@ def list( :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. """ base_url = None url_variables = None @@ -33,7 +35,7 @@ def list( if server_url is not None: base_url = server_url - req = self.build_request( + req = self._build_request( method="GET", path="/v1/models", base_url=base_url, @@ -44,6 +46,7 @@ def list( request_has_query_params=True, user_agent_header="user-agent", accept_header_value="application/json", + http_headers=http_headers, security=self.sdk_configuration.security, timeout_ms=timeout_ms, ) @@ -96,6 +99,7 @@ async def list_async( retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, ) -> Optional[models.ModelList]: r"""List Models @@ -104,6 +108,7 @@ async def list_async( :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. """ base_url = None url_variables = None @@ -112,7 +117,7 @@ async def list_async( if server_url is not None: base_url = server_url - req = self.build_request_async( + req = self._build_request_async( method="GET", path="/v1/models", base_url=base_url, @@ -123,6 +128,7 @@ async def list_async( request_has_query_params=True, user_agent_header="user-agent", accept_header_value="application/json", + http_headers=http_headers, security=self.sdk_configuration.security, timeout_ms=timeout_ms, ) @@ -176,6 +182,7 @@ def retrieve( retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, ) -> Optional[ models.RetrieveModelV1ModelsModelIDGetResponseRetrieveModelV1ModelsModelIDGet ]: @@ -187,6 +194,7 @@ def retrieve( :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. """ base_url = None url_variables = None @@ -200,7 +208,7 @@ def retrieve( model_id=model_id, ) - req = self.build_request( + req = self._build_request( method="GET", path="/v1/models/{model_id}", base_url=base_url, @@ -211,6 +219,7 @@ def retrieve( request_has_query_params=True, user_agent_header="user-agent", accept_header_value="application/json", + http_headers=http_headers, security=self.sdk_configuration.security, timeout_ms=timeout_ms, ) @@ -269,6 +278,7 @@ async def retrieve_async( retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, ) -> Optional[ models.RetrieveModelV1ModelsModelIDGetResponseRetrieveModelV1ModelsModelIDGet ]: @@ -280,6 +290,7 @@ async def retrieve_async( :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. """ base_url = None url_variables = None @@ -293,7 +304,7 @@ async def retrieve_async( model_id=model_id, ) - req = self.build_request_async( + req = self._build_request_async( method="GET", path="/v1/models/{model_id}", base_url=base_url, @@ -304,6 +315,7 @@ async def retrieve_async( request_has_query_params=True, user_agent_header="user-agent", accept_header_value="application/json", + http_headers=http_headers, security=self.sdk_configuration.security, timeout_ms=timeout_ms, ) @@ -362,6 +374,7 @@ def delete( retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, ) -> Optional[models.DeleteModelOut]: r"""Delete Model @@ -371,6 +384,7 @@ def delete( :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. """ base_url = None url_variables = None @@ -384,7 +398,7 @@ def delete( model_id=model_id, ) - req = self.build_request( + req = self._build_request( method="DELETE", path="/v1/models/{model_id}", base_url=base_url, @@ -395,6 +409,7 @@ def delete( request_has_query_params=True, user_agent_header="user-agent", accept_header_value="application/json", + http_headers=http_headers, security=self.sdk_configuration.security, timeout_ms=timeout_ms, ) @@ -448,6 +463,7 @@ async def delete_async( retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, ) -> Optional[models.DeleteModelOut]: r"""Delete Model @@ -457,6 +473,7 @@ async def delete_async( :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. """ base_url = None url_variables = None @@ -470,7 +487,7 @@ async def delete_async( model_id=model_id, ) - req = self.build_request_async( + req = self._build_request_async( method="DELETE", path="/v1/models/{model_id}", base_url=base_url, @@ -481,6 +498,7 @@ async def delete_async( request_has_query_params=True, user_agent_header="user-agent", accept_header_value="application/json", + http_headers=http_headers, security=self.sdk_configuration.security, timeout_ms=timeout_ms, ) @@ -536,6 +554,7 @@ def update( retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, ) -> Optional[models.FTModelOut]: r"""Update Fine Tuned Model @@ -547,6 +566,7 @@ def update( :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. """ base_url = None url_variables = None @@ -564,7 +584,7 @@ def update( ), ) - req = self.build_request( + req = self._build_request( method="PATCH", path="/v1/fine_tuning/models/{model_id}", base_url=base_url, @@ -575,6 +595,7 @@ def update( request_has_query_params=True, user_agent_header="user-agent", accept_header_value="application/json", + http_headers=http_headers, security=self.sdk_configuration.security, get_serialized_body=lambda: utils.serialize_request_body( request.update_ft_model_in, False, False, "json", models.UpdateFTModelIn @@ -629,6 +650,7 @@ async def update_async( retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, ) -> Optional[models.FTModelOut]: r"""Update Fine Tuned Model @@ -640,6 +662,7 @@ async def update_async( :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. """ base_url = None url_variables = None @@ -657,7 +680,7 @@ async def update_async( ), ) - req = self.build_request_async( + req = self._build_request_async( method="PATCH", path="/v1/fine_tuning/models/{model_id}", base_url=base_url, @@ -668,6 +691,7 @@ async def update_async( request_has_query_params=True, user_agent_header="user-agent", accept_header_value="application/json", + http_headers=http_headers, security=self.sdk_configuration.security, get_serialized_body=lambda: utils.serialize_request_body( request.update_ft_model_in, False, False, "json", models.UpdateFTModelIn @@ -720,6 +744,7 @@ def archive( retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, ) -> Optional[models.ArchiveFTModelOut]: r"""Archive Fine Tuned Model @@ -729,6 +754,7 @@ def archive( :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. """ base_url = None url_variables = None @@ -742,7 +768,7 @@ def archive( model_id=model_id, ) - req = self.build_request( + req = self._build_request( method="POST", path="/v1/fine_tuning/models/{model_id}/archive", base_url=base_url, @@ -753,6 +779,7 @@ def archive( request_has_query_params=True, user_agent_header="user-agent", accept_header_value="application/json", + http_headers=http_headers, security=self.sdk_configuration.security, timeout_ms=timeout_ms, ) @@ -804,6 +831,7 @@ async def archive_async( retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, ) -> Optional[models.ArchiveFTModelOut]: r"""Archive Fine Tuned Model @@ -813,6 +841,7 @@ async def archive_async( :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. """ base_url = None url_variables = None @@ -826,7 +855,7 @@ async def archive_async( model_id=model_id, ) - req = self.build_request_async( + req = self._build_request_async( method="POST", path="/v1/fine_tuning/models/{model_id}/archive", base_url=base_url, @@ -837,6 +866,7 @@ async def archive_async( request_has_query_params=True, user_agent_header="user-agent", accept_header_value="application/json", + http_headers=http_headers, security=self.sdk_configuration.security, timeout_ms=timeout_ms, ) @@ -888,6 +918,7 @@ def unarchive( retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, ) -> Optional[models.UnarchiveFTModelOut]: r"""Unarchive Fine Tuned Model @@ -897,6 +928,7 @@ def unarchive( :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. """ base_url = None url_variables = None @@ -910,7 +942,7 @@ def unarchive( model_id=model_id, ) - req = self.build_request( + req = self._build_request( method="DELETE", path="/v1/fine_tuning/models/{model_id}/archive", base_url=base_url, @@ -921,6 +953,7 @@ def unarchive( request_has_query_params=True, user_agent_header="user-agent", accept_header_value="application/json", + http_headers=http_headers, security=self.sdk_configuration.security, timeout_ms=timeout_ms, ) @@ -972,6 +1005,7 @@ async def unarchive_async( retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, ) -> Optional[models.UnarchiveFTModelOut]: r"""Unarchive Fine Tuned Model @@ -981,6 +1015,7 @@ async def unarchive_async( :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. """ base_url = None url_variables = None @@ -994,7 +1029,7 @@ async def unarchive_async( model_id=model_id, ) - req = self.build_request_async( + req = self._build_request_async( method="DELETE", path="/v1/fine_tuning/models/{model_id}/archive", base_url=base_url, @@ -1005,6 +1040,7 @@ async def unarchive_async( request_has_query_params=True, user_agent_header="user-agent", accept_header_value="application/json", + http_headers=http_headers, security=self.sdk_configuration.security, timeout_ms=timeout_ms, ) diff --git a/src/mistralai/sdkconfiguration.py b/src/mistralai/sdkconfiguration.py index bb046ba0..e7c07181 100644 --- a/src/mistralai/sdkconfiguration.py +++ b/src/mistralai/sdkconfiguration.py @@ -28,9 +28,9 @@ class SDKConfiguration: server: Optional[str] = "" language: str = "python" openapi_doc_version: str = "0.0.2" - sdk_version: str = "1.2.5" - gen_version: str = "2.470.1" - user_agent: str = "speakeasy-sdk/python 1.2.5 2.470.1 0.0.2 mistralai" + sdk_version: str = "1.2.6" + gen_version: str = "2.486.1" + user_agent: str = "speakeasy-sdk/python 1.2.6 2.486.1 0.0.2 mistralai" retry_config: OptionalNullable[RetryConfig] = Field(default_factory=lambda: UNSET) timeout_ms: Optional[int] = None diff --git a/src/mistralai/utils/forms.py b/src/mistralai/utils/forms.py index 9f5a731e..0472aba8 100644 --- a/src/mistralai/utils/forms.py +++ b/src/mistralai/utils/forms.py @@ -109,13 +109,12 @@ def serialize_multipart_form( if not field_metadata: continue - f_name = field.alias if field.alias is not None else name + f_name = field.alias if field.alias else name if field_metadata.file: file_fields: Dict[str, FieldInfo] = val.__class__.model_fields file_name = "" - field_name = "" content = None content_type = None @@ -131,20 +130,15 @@ def serialize_multipart_form( elif file_field_name == "content_type": content_type = getattr(val, file_field_name, None) else: - field_name = ( - file_field.alias - if file_field.alias is not None - else file_field_name - ) file_name = getattr(val, file_field_name) - if field_name == "" or file_name == "" or content is None: + if file_name == "" or content is None: raise ValueError("invalid multipart/form-data file") if content_type is not None: - files[field_name] = (file_name, content, content_type) + files[f_name] = (file_name, content, content_type) else: - files[field_name] = (file_name, content) + files[f_name] = (file_name, content) elif field_metadata.json: files[f_name] = ( None, diff --git a/src/mistralai/utils/requestbodies.py b/src/mistralai/utils/requestbodies.py index 4f586ae7..d5240dd5 100644 --- a/src/mistralai/utils/requestbodies.py +++ b/src/mistralai/utils/requestbodies.py @@ -23,7 +23,7 @@ @dataclass class SerializedRequestBody: - media_type: str + media_type: Optional[str] = None content: Optional[Any] = None data: Optional[Any] = None files: Optional[Any] = None From a1c6933f2ccca5e63d7235f97676bc8eb2a04d05 Mon Sep 17 00:00:00 2001 From: Gaspard Blanchet <26529448+GaspardBT@users.noreply.github.com> Date: Mon, 13 Jan 2025 19:15:36 +0100 Subject: [PATCH 094/223] fix Unpin dependencies (#172) --- pyproject.toml | 26 +++++++++++++------------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 9f5e9f16..5f2f4a65 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -20,21 +20,21 @@ in-project = true [tool.poetry.dependencies] python = "^3.8" -eval-type-backport = "^0.2.0" -httpx = "^0.27.0" -jsonpath-python = "^1.0.6" -pydantic = "^2.9.0" -python-dateutil = "^2.8.2" -typing-inspect = "^0.9.0" -google-auth = { version = "2.27.0", optional = true } -requests = { version = "^2.32.3", optional = true } +eval-type-backport = ">=0.2.0" +httpx = ">=0.27.0" +jsonpath-python = ">=1.0.6" +pydantic = ">=2.9.0" +python-dateutil = ">=2.8.2" +typing-inspect = ">=0.9.0" +google-auth = { version = ">=2.27.0", optional = true } +requests = { version = ">=2.32.3", optional = true } [tool.poetry.group.dev.dependencies] -mypy = "==1.13.0" -pylint = "==3.2.3" -pytest = "^8.2.2" -pytest-asyncio = "^0.23.7" -types-python-dateutil = "^2.9.0.20240316" +mypy = ">=1.13.0" +pylint = ">=3.2.3" +pytest = ">=8.2.2" +pytest-asyncio = ">=0.23.7" +types-python-dateutil = ">=2.9.0.20240316" [tool.poetry.extras] gcp = ["google-auth", "requests"] From a117be86efd6368acee8afac3a1ce03783763f96 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Tue, 14 Jan 2025 11:06:22 +0100 Subject: [PATCH 095/223] =?UTF-8?q?chore:=20=F0=9F=90=9D=20Update=20SDK=20?= =?UTF-8?q?-=20Generate=20MISTRALAI=20MISTRALAI-SDK=201.3.0=20(#173)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * ci: regenerated with OpenAPI Doc , Speakeasy CLI 1.468.5 * regen with 1.462.2 --------- Co-authored-by: speakeasybot Co-authored-by: gaspardBT --- .speakeasy/gen.lock | 22 ++++++++---- .speakeasy/gen.yaml | 2 +- .speakeasy/workflow.lock | 5 ++- .speakeasy/workflow.yaml | 2 +- RELEASES.md | 12 ++++++- poetry.lock | 59 +++++++++++++++++++++++++++++-- pyproject.toml | 2 +- src/mistralai/_version.py | 2 +- src/mistralai/sdkconfiguration.py | 4 +-- 9 files changed, 90 insertions(+), 20 deletions(-) diff --git a/.speakeasy/gen.lock b/.speakeasy/gen.lock index d8a77e97..d568dc71 100644 --- a/.speakeasy/gen.lock +++ b/.speakeasy/gen.lock @@ -5,8 +5,8 @@ management: docVersion: 0.0.2 speakeasyVersion: 1.462.2 generationVersion: 2.486.1 - releaseVersion: 1.2.6 - configChecksum: 195a36c6a88eb19c3a487f1fe4a39bcc + releaseVersion: 1.3.0 + configChecksum: e3b0a94e8a244b9061b81373ae58d4ed repoURL: https://github.com/mistralai/client-python.git installationURL: https://github.com/mistralai/client-python.git published: true @@ -402,9 +402,11 @@ examples: parameters: path: model_id: "ft:open-mistral-7b:587a6b29:20240514:7e773925" + requestBody: + application/json: {} responses: "200": - application/json: {"id": "", "created": 857478, "owned_by": "", "root": "", "archived": true, "job": "905bf4aa-77f2-404e-b754-c352acfe5407"} + application/json: {"id": "", "created": 857478, "owned_by": "", "root": "", "archived": true, "capabilities": {}, "job": "905bf4aa-77f2-404e-b754-c352acfe5407"} jobs_api_routes_fine_tuning_archive_fine_tuned_model: "": parameters: @@ -457,7 +459,7 @@ examples: jobs_api_routes_fine_tuning_create_fine_tuning_job: speakeasy-default-jobs-api-routes-fine-tuning-create-fine-tuning-job: requestBody: - application/json: {"model": "codestral-latest"} + application/json: {"model": "codestral-latest", "hyperparameters": {}} responses: "200": application/json: {"id": "a621cf02-1cd9-4cf5-8403-315211a509a3", "auto_start": false, "model": "2", "status": "FAILED", "job_type": "", "created_at": 550483, "modified_at": 906537, "training_files": ["74c2becc-3769-4177-b5e0-24985613de0e"]} @@ -468,7 +470,7 @@ examples: job_id: "b18d8d81-fd7b-4764-a31e-475cb1f36591" responses: "200": - application/json: {"id": "58ccc65b-c928-4154-952e-30c048b8c2b5", "auto_start": false, "model": "open-mistral-nemo", "status": "VALIDATED", "job_type": "", "created_at": 968091, "modified_at": 32069, "training_files": [], "checkpoints": []} + application/json: {"id": "58ccc65b-c928-4154-952e-30c048b8c2b5", "auto_start": false, "hyperparameters": {}, "model": "open-mistral-nemo", "status": "VALIDATED", "job_type": "", "created_at": 968091, "modified_at": 32069, "training_files": [], "checkpoints": []} jobs_api_routes_fine_tuning_cancel_fine_tuning_job: speakeasy-default-jobs-api-routes-fine-tuning-cancel-fine-tuning-job: parameters: @@ -476,7 +478,7 @@ examples: job_id: "03fa7112-315a-4072-a9f2-43f3f1ec962e" responses: "200": - application/json: {"id": "fb7dec95-f740-47b2-b8ee-d9b046936a67", "auto_start": true, "model": "mistral-large-latest", "status": "VALIDATED", "job_type": "", "created_at": 252151, "modified_at": 56775, "training_files": [], "checkpoints": []} + application/json: {"id": "fb7dec95-f740-47b2-b8ee-d9b046936a67", "auto_start": true, "hyperparameters": {}, "model": "mistral-large-latest", "status": "VALIDATED", "job_type": "", "created_at": 252151, "modified_at": 56775, "training_files": [], "checkpoints": []} jobs_api_routes_fine_tuning_start_fine_tuning_job: speakeasy-default-jobs-api-routes-fine-tuning-start-fine-tuning-job: parameters: @@ -484,7 +486,7 @@ examples: job_id: "0eb0f807-fb9f-4e46-9c13-4e257df6e1ba" responses: "200": - application/json: {"id": "bc3810ce-43e6-4fde-85a4-cd01d1f9cf8f", "auto_start": true, "model": "codestral-latest", "status": "RUNNING", "job_type": "", "created_at": 186591, "modified_at": 451468, "training_files": [], "checkpoints": []} + application/json: {"id": "bc3810ce-43e6-4fde-85a4-cd01d1f9cf8f", "auto_start": true, "hyperparameters": {}, "model": "codestral-latest", "status": "RUNNING", "job_type": "", "created_at": 186591, "modified_at": 451468, "training_files": [], "checkpoints": []} chat_completion_v1_chat_completions_post: speakeasy-default-chat-completion-v1-chat-completions-post: requestBody: @@ -501,6 +503,7 @@ examples: responses: "422": application/json: {} + "200": {} fim_completion_v1_fim_completions_post: speakeasy-default-fim-completion-v1-fim-completions-post: requestBody: @@ -517,6 +520,7 @@ examples: responses: "422": application/json: {} + "200": {} agents_completion_v1_agents_completions_post: speakeasy-default-agents-completion-v1-agents-completions-post: requestBody: @@ -533,6 +537,7 @@ examples: responses: "422": application/json: {} + "200": {} embeddings_v1_embeddings_post: speakeasy-default-embeddings-v1-embeddings-post: requestBody: @@ -547,6 +552,9 @@ examples: parameters: path: file_id: "" + responses: + "200": + application/octet-stream: "x-file: example.file" jobs_api_routes_batch_get_batch_jobs: speakeasy-default-jobs-api-routes-batch-get-batch-jobs: responses: diff --git a/.speakeasy/gen.yaml b/.speakeasy/gen.yaml index 8680a0e2..6d47a6bf 100644 --- a/.speakeasy/gen.yaml +++ b/.speakeasy/gen.yaml @@ -13,7 +13,7 @@ generation: oAuth2ClientCredentialsEnabled: true oAuth2PasswordEnabled: false python: - version: 1.2.6 + version: 1.3.0 additionalDependencies: dev: pytest: ^8.2.2 diff --git a/.speakeasy/workflow.lock b/.speakeasy/workflow.lock index aa87f0ae..2a246222 100644 --- a/.speakeasy/workflow.lock +++ b/.speakeasy/workflow.lock @@ -18,7 +18,6 @@ sources: sourceBlobDigest: sha256:ebc7c1bb20aa87873a255cebea1e451099d8949ea1bbff81ec5fd45a107e3a32 tags: - latest - - speakeasy-sdk-regen-1736155788 targets: mistralai-azure-sdk: source: mistral-azure-source @@ -40,10 +39,10 @@ targets: sourceRevisionDigest: sha256:84bbc6f6011a31e21c8a674b01104446f986c7b5a6b002357800be8ef939b8da sourceBlobDigest: sha256:ebc7c1bb20aa87873a255cebea1e451099d8949ea1bbff81ec5fd45a107e3a32 codeSamplesNamespace: mistral-openapi-code-samples - codeSamplesRevisionDigest: sha256:3d7ebf5043f98a2e9b07c66057c279f90272a813875b29bd9b75595f77caf0c4 + codeSamplesRevisionDigest: sha256:3868602e7de978e42d8372711d9711af5338a2f2bbf7671a5998c6176f74f58f workflow: workflowVersion: 1.0.0 - speakeasyVersion: latest + speakeasyVersion: 1.462.2 sources: mistral-azure-source: inputs: diff --git a/.speakeasy/workflow.yaml b/.speakeasy/workflow.yaml index 3b3c6d55..4dc74452 100644 --- a/.speakeasy/workflow.yaml +++ b/.speakeasy/workflow.yaml @@ -1,5 +1,5 @@ workflowVersion: 1.0.0 -speakeasyVersion: latest +speakeasyVersion: 1.462.2 # # Pinned to unblock https://github.com/mistralai/client-python/pull/173 sources: mistral-azure-source: inputs: diff --git a/RELEASES.md b/RELEASES.md index 6544f991..c4fa9b9c 100644 --- a/RELEASES.md +++ b/RELEASES.md @@ -118,4 +118,14 @@ Based on: ### Generated - [python v1.2.6] . ### Releases -- [PyPI v1.2.6] https://pypi.org/project/mistralai/1.2.6 - . \ No newline at end of file +- [PyPI v1.2.6] https://pypi.org/project/mistralai/1.2.6 - . + +## 2025-01-14 09:35:05 +### Changes +Based on: +- OpenAPI Doc +- Speakeasy CLI 1.468.5 (2.493.11) https://github.com/speakeasy-api/speakeasy +### Generated +- [python v1.3.0] . +### Releases +- [PyPI v1.3.0] https://pypi.org/project/mistralai/1.3.0 - . \ No newline at end of file diff --git a/poetry.lock b/poetry.lock index ac37e529..fa5e0ef5 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1,4 +1,4 @@ -# This file is automatically @generated by Poetry 1.8.3 and should not be changed by hand. +# This file is automatically @generated by Poetry 2.0.1 and should not be changed by hand. [[package]] name = "annotated-types" @@ -6,6 +6,7 @@ version = "0.7.0" description = "Reusable constraint types to use with typing.Annotated" optional = false python-versions = ">=3.8" +groups = ["main"] files = [ {file = "annotated_types-0.7.0-py3-none-any.whl", hash = "sha256:1f02e8b43a8fbbc3f3e0d4f0f4bfc8131bcb4eebe8849b8e5c773f3a1c582a53"}, {file = "annotated_types-0.7.0.tar.gz", hash = "sha256:aff07c09a53a08bc8cfccb9c85b05f1aa9a2a6f23728d790723543408344ce89"}, @@ -20,6 +21,7 @@ version = "4.5.2" description = "High level compatibility layer for multiple asynchronous event loop implementations" optional = false python-versions = ">=3.8" +groups = ["main"] files = [ {file = "anyio-4.5.2-py3-none-any.whl", hash = "sha256:c011ee36bc1e8ba40e5a81cb9df91925c218fe9b778554e0b56a21e1b5d4716f"}, {file = "anyio-4.5.2.tar.gz", hash = "sha256:23009af4ed04ce05991845451e11ef02fc7c5ed29179ac9a420e5ad0ac7ddc5b"}, @@ -42,6 +44,7 @@ version = "3.2.4" description = "An abstract syntax tree for Python with inference support." optional = false python-versions = ">=3.8.0" +groups = ["dev"] files = [ {file = "astroid-3.2.4-py3-none-any.whl", hash = "sha256:413658a61eeca6202a59231abb473f932038fbcbf1666587f66d482083413a25"}, {file = "astroid-3.2.4.tar.gz", hash = "sha256:0e14202810b30da1b735827f78f5157be2bbd4a7a59b7707ca0bfc2fb4c0063a"}, @@ -56,6 +59,8 @@ version = "5.5.0" description = "Extensible memoizing collections and decorators" optional = true python-versions = ">=3.7" +groups = ["main"] +markers = "extra == \"gcp\"" files = [ {file = "cachetools-5.5.0-py3-none-any.whl", hash = "sha256:02134e8439cdc2ffb62023ce1debca2944c3f289d66bb17ead3ab3dede74b292"}, {file = "cachetools-5.5.0.tar.gz", hash = "sha256:2cc24fb4cbe39633fb7badd9db9ca6295d766d9c2995f245725a46715d050f2a"}, @@ -67,6 +72,7 @@ version = "2024.8.30" description = "Python package for providing Mozilla's CA Bundle." optional = false python-versions = ">=3.6" +groups = ["main"] files = [ {file = "certifi-2024.8.30-py3-none-any.whl", hash = "sha256:922820b53db7a7257ffbda3f597266d435245903d80737e34f8a45ff3e3230d8"}, {file = "certifi-2024.8.30.tar.gz", hash = "sha256:bec941d2aa8195e248a60b31ff9f0558284cf01a52591ceda73ea9afffd69fd9"}, @@ -78,6 +84,8 @@ version = "3.4.0" description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet." optional = true python-versions = ">=3.7.0" +groups = ["main"] +markers = "extra == \"gcp\"" files = [ {file = "charset_normalizer-3.4.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:4f9fc98dad6c2eaa32fc3af1417d95b5e3d08aff968df0cd320066def971f9a6"}, {file = "charset_normalizer-3.4.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0de7b687289d3c1b3e8660d0741874abe7888100efe14bd0f9fd7141bcbda92b"}, @@ -192,6 +200,8 @@ version = "0.4.6" description = "Cross-platform colored terminal text." optional = false python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7" +groups = ["dev"] +markers = "sys_platform == \"win32\"" files = [ {file = "colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6"}, {file = "colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44"}, @@ -203,6 +213,7 @@ version = "0.3.9" description = "serialize all of Python" optional = false python-versions = ">=3.8" +groups = ["dev"] files = [ {file = "dill-0.3.9-py3-none-any.whl", hash = "sha256:468dff3b89520b474c0397703366b7b95eebe6303f108adf9b19da1f702be87a"}, {file = "dill-0.3.9.tar.gz", hash = "sha256:81aa267dddf68cbfe8029c42ca9ec6a4ab3b22371d1c450abc54422577b4512c"}, @@ -218,6 +229,7 @@ version = "0.2.0" description = "Like `typing._eval_type`, but lets older Python versions use newer typing features." optional = false python-versions = ">=3.8" +groups = ["main"] files = [ {file = "eval_type_backport-0.2.0-py3-none-any.whl", hash = "sha256:ac2f73d30d40c5a30a80b8739a789d6bb5e49fdffa66d7912667e2015d9c9933"}, {file = "eval_type_backport-0.2.0.tar.gz", hash = "sha256:68796cfbc7371ebf923f03bdf7bef415f3ec098aeced24e054b253a0e78f7b37"}, @@ -232,6 +244,8 @@ version = "1.2.2" description = "Backport of PEP 654 (exception groups)" optional = false python-versions = ">=3.7" +groups = ["main", "dev"] +markers = "python_version < \"3.11\"" files = [ {file = "exceptiongroup-1.2.2-py3-none-any.whl", hash = "sha256:3111b9d131c238bec2f8f516e123e14ba243563fb135d3fe885990585aa7795b"}, {file = "exceptiongroup-1.2.2.tar.gz", hash = "sha256:47c2edf7c6738fafb49fd34290706d1a1a2f4d1c6df275526b62cbb4aa5393cc"}, @@ -246,6 +260,8 @@ version = "2.27.0" description = "Google Authentication Library" optional = true python-versions = ">=3.7" +groups = ["main"] +markers = "extra == \"gcp\"" files = [ {file = "google-auth-2.27.0.tar.gz", hash = "sha256:e863a56ccc2d8efa83df7a80272601e43487fa9a728a376205c86c26aaefa821"}, {file = "google_auth-2.27.0-py2.py3-none-any.whl", hash = "sha256:8e4bad367015430ff253fe49d500fdc3396c1a434db5740828c728e45bcce245"}, @@ -269,6 +285,7 @@ version = "0.14.0" description = "A pure-Python, bring-your-own-I/O implementation of HTTP/1.1" optional = false python-versions = ">=3.7" +groups = ["main"] files = [ {file = "h11-0.14.0-py3-none-any.whl", hash = "sha256:e3fe4ac4b851c468cc8363d500db52c2ead036020723024a109d37346efaa761"}, {file = "h11-0.14.0.tar.gz", hash = "sha256:8f19fbbe99e72420ff35c00b27a34cb9937e902a8b810e2c88300c6f0a3b699d"}, @@ -280,6 +297,7 @@ version = "1.0.7" description = "A minimal low-level HTTP client." optional = false python-versions = ">=3.8" +groups = ["main"] files = [ {file = "httpcore-1.0.7-py3-none-any.whl", hash = "sha256:a3fff8f43dc260d5bd363d9f9cf1830fa3a458b332856f34282de498ed420edd"}, {file = "httpcore-1.0.7.tar.gz", hash = "sha256:8551cb62a169ec7162ac7be8d4817d561f60e08eaa485234898414bb5a8a0b4c"}, @@ -301,6 +319,7 @@ version = "0.27.2" description = "The next generation HTTP client." optional = false python-versions = ">=3.8" +groups = ["main"] files = [ {file = "httpx-0.27.2-py3-none-any.whl", hash = "sha256:7bb2708e112d8fdd7829cd4243970f0c223274051cb35ee80c03301ee29a3df0"}, {file = "httpx-0.27.2.tar.gz", hash = "sha256:f7c2be1d2f3c3c3160d441802406b206c2b76f5947b11115e6df10c6c65e66c2"}, @@ -326,6 +345,7 @@ version = "3.10" description = "Internationalized Domain Names in Applications (IDNA)" optional = false python-versions = ">=3.6" +groups = ["main"] files = [ {file = "idna-3.10-py3-none-any.whl", hash = "sha256:946d195a0d259cbba61165e88e65941f16e9b36ea6ddb97f00452bae8b1287d3"}, {file = "idna-3.10.tar.gz", hash = "sha256:12f65c9b470abda6dc35cf8e63cc574b1c52b11df2c86030af0ac09b01b13ea9"}, @@ -340,6 +360,7 @@ version = "2.0.0" description = "brain-dead simple config-ini parsing" optional = false python-versions = ">=3.7" +groups = ["dev"] files = [ {file = "iniconfig-2.0.0-py3-none-any.whl", hash = "sha256:b6a85871a79d2e3b22d2d1b94ac2824226a63c6b741c88f7ae975f18b6778374"}, {file = "iniconfig-2.0.0.tar.gz", hash = "sha256:2d91e135bf72d31a410b17c16da610a82cb55f6b0477d1a902134b24a455b8b3"}, @@ -351,6 +372,7 @@ version = "5.13.2" description = "A Python utility / library to sort Python imports." optional = false python-versions = ">=3.8.0" +groups = ["dev"] files = [ {file = "isort-5.13.2-py3-none-any.whl", hash = "sha256:8ca5e72a8d85860d5a3fa69b8745237f2939afe12dbf656afbcb47fe72d947a6"}, {file = "isort-5.13.2.tar.gz", hash = "sha256:48fdfcb9face5d58a4f6dde2e72a1fb8dcaf8ab26f95ab49fab84c2ddefb0109"}, @@ -365,6 +387,7 @@ version = "1.0.6" description = "A more powerful JSONPath implementation in modern python" optional = false python-versions = ">=3.6" +groups = ["main"] files = [ {file = "jsonpath-python-1.0.6.tar.gz", hash = "sha256:dd5be4a72d8a2995c3f583cf82bf3cd1a9544cfdabf2d22595b67aff07349666"}, {file = "jsonpath_python-1.0.6-py3-none-any.whl", hash = "sha256:1e3b78df579f5efc23565293612decee04214609208a2335884b3ee3f786b575"}, @@ -376,6 +399,7 @@ version = "0.7.0" description = "McCabe checker, plugin for flake8" optional = false python-versions = ">=3.6" +groups = ["dev"] files = [ {file = "mccabe-0.7.0-py2.py3-none-any.whl", hash = "sha256:6c2d30ab6be0e4a46919781807b4f0d834ebdd6c6e3dca0bda5a15f863427b6e"}, {file = "mccabe-0.7.0.tar.gz", hash = "sha256:348e0240c33b60bbdf4e523192ef919f28cb2c3d7d5c7794f74009290f236325"}, @@ -387,6 +411,7 @@ version = "1.13.0" description = "Optional static typing for Python" optional = false python-versions = ">=3.8" +groups = ["dev"] files = [ {file = "mypy-1.13.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:6607e0f1dd1fb7f0aca14d936d13fd19eba5e17e1cd2a14f808fa5f8f6d8f60a"}, {file = "mypy-1.13.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:8a21be69bd26fa81b1f80a61ee7ab05b076c674d9b18fb56239d72e21d9f4c80"}, @@ -440,6 +465,7 @@ version = "1.0.0" description = "Type system extensions for programs checked with the mypy type checker." optional = false python-versions = ">=3.5" +groups = ["main", "dev"] files = [ {file = "mypy_extensions-1.0.0-py3-none-any.whl", hash = "sha256:4392f6c0eb8a5668a69e23d168ffa70f0be9ccfd32b5cc2d26a34ae5b844552d"}, {file = "mypy_extensions-1.0.0.tar.gz", hash = "sha256:75dbf8955dc00442a438fc4d0666508a9a97b6bd41aa2f0ffe9d2f2725af0782"}, @@ -451,6 +477,7 @@ version = "24.2" description = "Core utilities for Python packages" optional = false python-versions = ">=3.8" +groups = ["dev"] files = [ {file = "packaging-24.2-py3-none-any.whl", hash = "sha256:09abb1bccd265c01f4a3aa3f7a7db064b36514d2cba19a2f694fe6150451a759"}, {file = "packaging-24.2.tar.gz", hash = "sha256:c228a6dc5e932d346bc5739379109d49e8853dd8223571c7c5b55260edc0b97f"}, @@ -462,6 +489,7 @@ version = "4.3.6" description = "A small Python package for determining appropriate platform-specific dirs, e.g. a `user data dir`." optional = false python-versions = ">=3.8" +groups = ["dev"] files = [ {file = "platformdirs-4.3.6-py3-none-any.whl", hash = "sha256:73e575e1408ab8103900836b97580d5307456908a03e92031bab39e4554cc3fb"}, {file = "platformdirs-4.3.6.tar.gz", hash = "sha256:357fb2acbc885b0419afd3ce3ed34564c13c9b95c89360cd9563f73aa5e2b907"}, @@ -478,6 +506,7 @@ version = "1.5.0" description = "plugin and hook calling mechanisms for python" optional = false python-versions = ">=3.8" +groups = ["dev"] files = [ {file = "pluggy-1.5.0-py3-none-any.whl", hash = "sha256:44e1ad92c8ca002de6377e165f3e0f1be63266ab4d554740532335b9d75ea669"}, {file = "pluggy-1.5.0.tar.gz", hash = "sha256:2cffa88e94fdc978c4c574f15f9e59b7f4201d439195c3715ca9e2486f1d0cf1"}, @@ -493,6 +522,8 @@ version = "0.6.1" description = "Pure-Python implementation of ASN.1 types and DER/BER/CER codecs (X.208)" optional = true python-versions = ">=3.8" +groups = ["main"] +markers = "extra == \"gcp\"" files = [ {file = "pyasn1-0.6.1-py3-none-any.whl", hash = "sha256:0d632f46f2ba09143da3a8afe9e33fb6f92fa2320ab7e886e2d0f7672af84629"}, {file = "pyasn1-0.6.1.tar.gz", hash = "sha256:6f580d2bdd84365380830acf45550f2511469f673cb4a5ae3857a3170128b034"}, @@ -504,6 +535,8 @@ version = "0.4.1" description = "A collection of ASN.1-based protocols modules" optional = true python-versions = ">=3.8" +groups = ["main"] +markers = "extra == \"gcp\"" files = [ {file = "pyasn1_modules-0.4.1-py3-none-any.whl", hash = "sha256:49bfa96b45a292b711e986f222502c1c9a5e1f4e568fc30e2574a6c7d07838fd"}, {file = "pyasn1_modules-0.4.1.tar.gz", hash = "sha256:c28e2dbf9c06ad61c71a075c7e0f9fd0f1b0bb2d2ad4377f240d33ac2ab60a7c"}, @@ -518,6 +551,7 @@ version = "2.10.2" description = "Data validation using Python type hints" optional = false python-versions = ">=3.8" +groups = ["main"] files = [ {file = "pydantic-2.10.2-py3-none-any.whl", hash = "sha256:cfb96e45951117c3024e6b67b25cdc33a3cb7b2fa62e239f7af1378358a1d99e"}, {file = "pydantic-2.10.2.tar.gz", hash = "sha256:2bc2d7f17232e0841cbba4641e65ba1eb6fafb3a08de3a091ff3ce14a197c4fa"}, @@ -538,6 +572,7 @@ version = "2.27.1" description = "Core functionality for Pydantic validation and serialization" optional = false python-versions = ">=3.8" +groups = ["main"] files = [ {file = "pydantic_core-2.27.1-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:71a5e35c75c021aaf400ac048dacc855f000bdfed91614b4a726f7432f1f3d6a"}, {file = "pydantic_core-2.27.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:f82d068a2d6ecfc6e054726080af69a6764a10015467d7d7b9f66d6ed5afa23b"}, @@ -650,6 +685,7 @@ version = "3.2.3" description = "python code static checker" optional = false python-versions = ">=3.8.0" +groups = ["dev"] files = [ {file = "pylint-3.2.3-py3-none-any.whl", hash = "sha256:b3d7d2708a3e04b4679e02d99e72329a8b7ee8afb8d04110682278781f889fa8"}, {file = "pylint-3.2.3.tar.gz", hash = "sha256:02f6c562b215582386068d52a30f520d84fdbcf2a95fc7e855b816060d048b60"}, @@ -680,6 +716,7 @@ version = "8.3.4" description = "pytest: simple powerful testing with Python" optional = false python-versions = ">=3.8" +groups = ["dev"] files = [ {file = "pytest-8.3.4-py3-none-any.whl", hash = "sha256:50e16d954148559c9a74109af1eaf0c945ba2d8f30f0a3d3335edde19788b6f6"}, {file = "pytest-8.3.4.tar.gz", hash = "sha256:965370d062bce11e73868e0335abac31b4d3de0e82f4007408d242b4f8610761"}, @@ -702,6 +739,7 @@ version = "0.23.8" description = "Pytest support for asyncio" optional = false python-versions = ">=3.8" +groups = ["dev"] files = [ {file = "pytest_asyncio-0.23.8-py3-none-any.whl", hash = "sha256:50265d892689a5faefb84df80819d1ecef566eb3549cf915dfb33569359d1ce2"}, {file = "pytest_asyncio-0.23.8.tar.gz", hash = "sha256:759b10b33a6dc61cce40a8bd5205e302978bbbcc00e279a8b61d9a6a3c82e4d3"}, @@ -720,6 +758,7 @@ version = "2.9.0.post0" description = "Extensions to the standard Python datetime module" optional = false python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7" +groups = ["main"] files = [ {file = "python-dateutil-2.9.0.post0.tar.gz", hash = "sha256:37dd54208da7e1cd875388217d5e00ebd4179249f90fb72437e91a35459a0ad3"}, {file = "python_dateutil-2.9.0.post0-py2.py3-none-any.whl", hash = "sha256:a8b2bc7bffae282281c8140a97d3aa9c14da0b136dfe83f850eea9a5f7470427"}, @@ -734,6 +773,8 @@ version = "2.32.3" description = "Python HTTP for Humans." optional = true python-versions = ">=3.8" +groups = ["main"] +markers = "extra == \"gcp\"" files = [ {file = "requests-2.32.3-py3-none-any.whl", hash = "sha256:70761cfe03c773ceb22aa2f671b4757976145175cdfca038c02654d061d6dcc6"}, {file = "requests-2.32.3.tar.gz", hash = "sha256:55365417734eb18255590a9ff9eb97e9e1da868d4ccd6402399eaf68af20a760"}, @@ -755,6 +796,8 @@ version = "4.9" description = "Pure-Python RSA implementation" optional = true python-versions = ">=3.6,<4" +groups = ["main"] +markers = "extra == \"gcp\"" files = [ {file = "rsa-4.9-py3-none-any.whl", hash = "sha256:90260d9058e514786967344d0ef75fa8727eed8a7d2e43ce9f4bcf1b536174f7"}, {file = "rsa-4.9.tar.gz", hash = "sha256:e38464a49c6c85d7f1351b0126661487a7e0a14a50f1675ec50eb34d4f20ef21"}, @@ -769,6 +812,7 @@ version = "1.16.0" description = "Python 2 and 3 compatibility utilities" optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*" +groups = ["main"] files = [ {file = "six-1.16.0-py2.py3-none-any.whl", hash = "sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254"}, {file = "six-1.16.0.tar.gz", hash = "sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926"}, @@ -780,6 +824,7 @@ version = "1.3.1" description = "Sniff out which async library your code is running under" optional = false python-versions = ">=3.7" +groups = ["main"] files = [ {file = "sniffio-1.3.1-py3-none-any.whl", hash = "sha256:2f6da418d1f1e0fddd844478f41680e794e6051915791a034ff65e5f100525a2"}, {file = "sniffio-1.3.1.tar.gz", hash = "sha256:f4324edc670a0f49750a81b895f35c3adb843cca46f0530f79fc1babb23789dc"}, @@ -791,6 +836,8 @@ version = "2.2.1" description = "A lil' TOML parser" optional = false python-versions = ">=3.8" +groups = ["dev"] +markers = "python_version < \"3.11\"" files = [ {file = "tomli-2.2.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:678e4fa69e4575eb77d103de3df8a895e1591b48e740211bd1067378c69e8249"}, {file = "tomli-2.2.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:023aa114dd824ade0100497eb2318602af309e5a55595f76b626d6d9f3b7b0a6"}, @@ -832,6 +879,7 @@ version = "0.13.2" description = "Style preserving TOML library" optional = false python-versions = ">=3.8" +groups = ["dev"] files = [ {file = "tomlkit-0.13.2-py3-none-any.whl", hash = "sha256:7a974427f6e119197f670fbbbeae7bef749a6c14e793db934baefc1b5f03efde"}, {file = "tomlkit-0.13.2.tar.gz", hash = "sha256:fff5fe59a87295b278abd31bec92c15d9bc4a06885ab12bcea52c71119392e79"}, @@ -843,6 +891,7 @@ version = "2.9.0.20241003" description = "Typing stubs for python-dateutil" optional = false python-versions = ">=3.8" +groups = ["dev"] files = [ {file = "types-python-dateutil-2.9.0.20241003.tar.gz", hash = "sha256:58cb85449b2a56d6684e41aeefb4c4280631246a0da1a719bdbe6f3fb0317446"}, {file = "types_python_dateutil-2.9.0.20241003-py3-none-any.whl", hash = "sha256:250e1d8e80e7bbc3a6c99b907762711d1a1cdd00e978ad39cb5940f6f0a87f3d"}, @@ -854,6 +903,7 @@ version = "4.12.2" description = "Backported and Experimental Type Hints for Python 3.8+" optional = false python-versions = ">=3.8" +groups = ["main", "dev"] files = [ {file = "typing_extensions-4.12.2-py3-none-any.whl", hash = "sha256:04e5ca0351e0f3f85c6853954072df659d0d13fac324d0072316b67d7794700d"}, {file = "typing_extensions-4.12.2.tar.gz", hash = "sha256:1a7ead55c7e559dd4dee8856e3a88b41225abfe1ce8df57b7c13915fe121ffb8"}, @@ -865,6 +915,7 @@ version = "0.9.0" description = "Runtime inspection utilities for typing module." optional = false python-versions = "*" +groups = ["main"] files = [ {file = "typing_inspect-0.9.0-py3-none-any.whl", hash = "sha256:9ee6fc59062311ef8547596ab6b955e1b8aa46242d854bfc78f4f6b0eff35f9f"}, {file = "typing_inspect-0.9.0.tar.gz", hash = "sha256:b23fc42ff6f6ef6954e4852c1fb512cdd18dbea03134f91f856a95ccc9461f78"}, @@ -880,6 +931,8 @@ version = "2.2.3" description = "HTTP library with thread-safe connection pooling, file post, and more." optional = true python-versions = ">=3.8" +groups = ["main"] +markers = "extra == \"gcp\"" files = [ {file = "urllib3-2.2.3-py3-none-any.whl", hash = "sha256:ca899ca043dcb1bafa3e262d73aa25c465bfb49e0bd9dd5d59f1d0acba2f8fac"}, {file = "urllib3-2.2.3.tar.gz", hash = "sha256:e7d814a81dad81e6caf2ec9fdedb284ecc9c73076b62654547cc64ccdcae26e9"}, @@ -895,6 +948,6 @@ zstd = ["zstandard (>=0.18.0)"] gcp = ["google-auth", "requests"] [metadata] -lock-version = "2.0" +lock-version = "2.1" python-versions = "^3.8" -content-hash = "2d6b37c110c1e85e5beeb35bf03affb94d079bd3768e66e0f6dc9277238a483a" +content-hash = "e47806611b5611f5dd208a05ba3772561280b9a23756fdb7046a46cb78d4f8ad" diff --git a/pyproject.toml b/pyproject.toml index 5f2f4a65..d68f83ac 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "mistralai" -version = "1.2.6" +version = "1.3.0" description = "Python Client SDK for the Mistral AI API." authors = ["Mistral"] readme = "README-PYPI.md" diff --git a/src/mistralai/_version.py b/src/mistralai/_version.py index d4a21d0d..dc0cc3b9 100644 --- a/src/mistralai/_version.py +++ b/src/mistralai/_version.py @@ -3,7 +3,7 @@ import importlib.metadata __title__: str = "mistralai" -__version__: str = "1.2.6" +__version__: str = "1.3.0" try: if __package__ is not None: diff --git a/src/mistralai/sdkconfiguration.py b/src/mistralai/sdkconfiguration.py index e7c07181..398725e1 100644 --- a/src/mistralai/sdkconfiguration.py +++ b/src/mistralai/sdkconfiguration.py @@ -28,9 +28,9 @@ class SDKConfiguration: server: Optional[str] = "" language: str = "python" openapi_doc_version: str = "0.0.2" - sdk_version: str = "1.2.6" + sdk_version: str = "1.3.0" gen_version: str = "2.486.1" - user_agent: str = "speakeasy-sdk/python 1.2.6 2.486.1 0.0.2 mistralai" + user_agent: str = "speakeasy-sdk/python 1.3.0 2.486.1 0.0.2 mistralai" retry_config: OptionalNullable[RetryConfig] = Field(default_factory=lambda: UNSET) timeout_ms: Optional[int] = None From 2764471679725ea48847382bc13cd5b3f019d1e7 Mon Sep 17 00:00:00 2001 From: Marcelo Trylesinski Date: Wed, 15 Jan 2025 09:24:53 +0000 Subject: [PATCH 096/223] Update pyproject.toml (#175) --- pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index d68f83ac..a3c6e28e 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -19,7 +19,7 @@ include = ["py.typed", "src/mistralai/py.typed"] in-project = true [tool.poetry.dependencies] -python = "^3.8" +python = ">=3.8" eval-type-backport = ">=0.2.0" httpx = ">=0.27.0" jsonpath-python = ">=1.0.6" From ba0561dcef2ca87187ddb1cb938b24de3537a784 Mon Sep 17 00:00:00 2001 From: Alexandre Menasria Date: Wed, 15 Jan 2025 11:10:08 +0100 Subject: [PATCH 097/223] [Github Actions] Pin Poetry to 1.8.5 to avoid speakeasy run failure --- .github/workflows/sdk_generation_mistralai_azure_sdk.yaml | 3 +++ .github/workflows/sdk_generation_mistralai_gcp_sdk.yaml | 3 +++ .github/workflows/sdk_generation_mistralai_sdk.yaml | 3 +++ .github/workflows/sdk_publish_mistralai_sdk.yaml | 4 ++++ .speakeasy/workflow.yaml | 5 ++++- 5 files changed, 17 insertions(+), 1 deletion(-) diff --git a/.github/workflows/sdk_generation_mistralai_azure_sdk.yaml b/.github/workflows/sdk_generation_mistralai_azure_sdk.yaml index 7ec5bb8d..a4c7e54b 100644 --- a/.github/workflows/sdk_generation_mistralai_azure_sdk.yaml +++ b/.github/workflows/sdk_generation_mistralai_azure_sdk.yaml @@ -20,6 +20,9 @@ jobs: with: force: ${{ github.event.inputs.force }} mode: pr + # We need poetry < 2.0 to avoid the speakeasy run failure. + # This was fixed in speakasy v1.467.0 but we're locked at 1.462.2 waiting for other speakeasy fixes. + poetry_version: 1.8.5 set_version: ${{ github.event.inputs.set_version }} speakeasy_version: latest target: mistral-python-sdk-azure diff --git a/.github/workflows/sdk_generation_mistralai_gcp_sdk.yaml b/.github/workflows/sdk_generation_mistralai_gcp_sdk.yaml index c4da64f7..c1af10b9 100644 --- a/.github/workflows/sdk_generation_mistralai_gcp_sdk.yaml +++ b/.github/workflows/sdk_generation_mistralai_gcp_sdk.yaml @@ -20,6 +20,9 @@ jobs: with: force: ${{ github.event.inputs.force }} mode: pr + # We need poetry < 2.0 to avoid the speakeasy run failure. + # This was fixed in speakasy v1.467.0 but we're locked at 1.462.2 waiting for other speakeasy fixes. + poetry_version: 1.8.5 set_version: ${{ github.event.inputs.set_version }} speakeasy_version: latest target: mistral-python-sdk-google-cloud diff --git a/.github/workflows/sdk_generation_mistralai_sdk.yaml b/.github/workflows/sdk_generation_mistralai_sdk.yaml index 7d0540e7..3f034dac 100644 --- a/.github/workflows/sdk_generation_mistralai_sdk.yaml +++ b/.github/workflows/sdk_generation_mistralai_sdk.yaml @@ -20,6 +20,9 @@ jobs: with: force: ${{ github.event.inputs.force }} mode: pr + # We need poetry < 2.0 to avoid the speakeasy run failure. + # This was fixed in speakasy v1.467.0 but we're locked at 1.462.2 waiting for other speakeasy fixes. + poetry_version: 1.8.5 set_version: ${{ github.event.inputs.set_version }} speakeasy_version: latest target: mistralai-sdk diff --git a/.github/workflows/sdk_publish_mistralai_sdk.yaml b/.github/workflows/sdk_publish_mistralai_sdk.yaml index 87160243..a7f08998 100644 --- a/.github/workflows/sdk_publish_mistralai_sdk.yaml +++ b/.github/workflows/sdk_publish_mistralai_sdk.yaml @@ -14,6 +14,10 @@ permissions: jobs: publish: uses: speakeasy-api/sdk-generation-action/.github/workflows/sdk-publish.yaml@v15 + with: + # We need poetry < 2.0 to avoid the speakeasy run failure. + # This was fixed in speakasy v1.467.0 but we're locked at 1.462.2 waiting for other speakeasy fixes. + poetry_version: 1.8.5 secrets: github_access_token: ${{ secrets.GITHUB_TOKEN }} pypi_token: ${{ secrets.PYPI_TOKEN }} diff --git a/.speakeasy/workflow.yaml b/.speakeasy/workflow.yaml index 4dc74452..b3d2039f 100644 --- a/.speakeasy/workflow.yaml +++ b/.speakeasy/workflow.yaml @@ -1,5 +1,8 @@ workflowVersion: 1.0.0 -speakeasyVersion: 1.462.2 # # Pinned to unblock https://github.com/mistralai/client-python/pull/173 +# speakeasyVersion is pinned to unblock https://github.com/mistralai/client-python/pull/173 +# The speakeasy run was appending `_` to some attributes to avoid conflicts with reserved keywords. +# This would have change the SDK APIs and break the existing clients which we don't want. +speakeasyVersion: 1.462.2 sources: mistral-azure-source: inputs: From feb471236a0432d8fbb9203ae24c39318b25641a Mon Sep 17 00:00:00 2001 From: speakeasybot Date: Wed, 15 Jan 2025 10:45:18 +0000 Subject: [PATCH 098/223] ci: regenerated with OpenAPI Doc , Speakeasy CLI 1.462.2 --- .speakeasy/gen.lock | 4 +- .speakeasy/gen.yaml | 2 +- .speakeasy/workflow.lock | 3 +- RELEASES.md | 12 +++++- poetry.lock | 68 ++++--------------------------- pyproject.toml | 2 +- src/mistralai/_version.py | 2 +- src/mistralai/sdkconfiguration.py | 4 +- 8 files changed, 27 insertions(+), 70 deletions(-) diff --git a/.speakeasy/gen.lock b/.speakeasy/gen.lock index d568dc71..117dba88 100644 --- a/.speakeasy/gen.lock +++ b/.speakeasy/gen.lock @@ -5,8 +5,8 @@ management: docVersion: 0.0.2 speakeasyVersion: 1.462.2 generationVersion: 2.486.1 - releaseVersion: 1.3.0 - configChecksum: e3b0a94e8a244b9061b81373ae58d4ed + releaseVersion: 1.3.1 + configChecksum: 8f82dea19ae1fcd5cea1d4f7837fc137 repoURL: https://github.com/mistralai/client-python.git installationURL: https://github.com/mistralai/client-python.git published: true diff --git a/.speakeasy/gen.yaml b/.speakeasy/gen.yaml index 6d47a6bf..40dee601 100644 --- a/.speakeasy/gen.yaml +++ b/.speakeasy/gen.yaml @@ -13,7 +13,7 @@ generation: oAuth2ClientCredentialsEnabled: true oAuth2PasswordEnabled: false python: - version: 1.3.0 + version: 1.3.1 additionalDependencies: dev: pytest: ^8.2.2 diff --git a/.speakeasy/workflow.lock b/.speakeasy/workflow.lock index 2a246222..063d062a 100644 --- a/.speakeasy/workflow.lock +++ b/.speakeasy/workflow.lock @@ -18,6 +18,7 @@ sources: sourceBlobDigest: sha256:ebc7c1bb20aa87873a255cebea1e451099d8949ea1bbff81ec5fd45a107e3a32 tags: - latest + - speakeasy-sdk-regen-1736937863 targets: mistralai-azure-sdk: source: mistral-azure-source @@ -39,7 +40,7 @@ targets: sourceRevisionDigest: sha256:84bbc6f6011a31e21c8a674b01104446f986c7b5a6b002357800be8ef939b8da sourceBlobDigest: sha256:ebc7c1bb20aa87873a255cebea1e451099d8949ea1bbff81ec5fd45a107e3a32 codeSamplesNamespace: mistral-openapi-code-samples - codeSamplesRevisionDigest: sha256:3868602e7de978e42d8372711d9711af5338a2f2bbf7671a5998c6176f74f58f + codeSamplesRevisionDigest: sha256:7461afcdcac02dc78b61b234ee4c5e25abbaca9ad6cf5aab415e7c97b5638b49 workflow: workflowVersion: 1.0.0 speakeasyVersion: 1.462.2 diff --git a/RELEASES.md b/RELEASES.md index c4fa9b9c..7c74ac75 100644 --- a/RELEASES.md +++ b/RELEASES.md @@ -128,4 +128,14 @@ Based on: ### Generated - [python v1.3.0] . ### Releases -- [PyPI v1.3.0] https://pypi.org/project/mistralai/1.3.0 - . \ No newline at end of file +- [PyPI v1.3.0] https://pypi.org/project/mistralai/1.3.0 - . + +## 2025-01-15 10:44:07 +### Changes +Based on: +- OpenAPI Doc +- Speakeasy CLI 1.462.2 (2.486.1) https://github.com/speakeasy-api/speakeasy +### Generated +- [python v1.3.1] . +### Releases +- [PyPI v1.3.1] https://pypi.org/project/mistralai/1.3.1 - . \ No newline at end of file diff --git a/poetry.lock b/poetry.lock index fa5e0ef5..e93eb66b 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1,4 +1,4 @@ -# This file is automatically @generated by Poetry 2.0.1 and should not be changed by hand. +# This file is automatically @generated by Poetry 1.8.5 and should not be changed by hand. [[package]] name = "annotated-types" @@ -6,7 +6,6 @@ version = "0.7.0" description = "Reusable constraint types to use with typing.Annotated" optional = false python-versions = ">=3.8" -groups = ["main"] files = [ {file = "annotated_types-0.7.0-py3-none-any.whl", hash = "sha256:1f02e8b43a8fbbc3f3e0d4f0f4bfc8131bcb4eebe8849b8e5c773f3a1c582a53"}, {file = "annotated_types-0.7.0.tar.gz", hash = "sha256:aff07c09a53a08bc8cfccb9c85b05f1aa9a2a6f23728d790723543408344ce89"}, @@ -21,7 +20,6 @@ version = "4.5.2" description = "High level compatibility layer for multiple asynchronous event loop implementations" optional = false python-versions = ">=3.8" -groups = ["main"] files = [ {file = "anyio-4.5.2-py3-none-any.whl", hash = "sha256:c011ee36bc1e8ba40e5a81cb9df91925c218fe9b778554e0b56a21e1b5d4716f"}, {file = "anyio-4.5.2.tar.gz", hash = "sha256:23009af4ed04ce05991845451e11ef02fc7c5ed29179ac9a420e5ad0ac7ddc5b"}, @@ -44,7 +42,6 @@ version = "3.2.4" description = "An abstract syntax tree for Python with inference support." optional = false python-versions = ">=3.8.0" -groups = ["dev"] files = [ {file = "astroid-3.2.4-py3-none-any.whl", hash = "sha256:413658a61eeca6202a59231abb473f932038fbcbf1666587f66d482083413a25"}, {file = "astroid-3.2.4.tar.gz", hash = "sha256:0e14202810b30da1b735827f78f5157be2bbd4a7a59b7707ca0bfc2fb4c0063a"}, @@ -59,8 +56,6 @@ version = "5.5.0" description = "Extensible memoizing collections and decorators" optional = true python-versions = ">=3.7" -groups = ["main"] -markers = "extra == \"gcp\"" files = [ {file = "cachetools-5.5.0-py3-none-any.whl", hash = "sha256:02134e8439cdc2ffb62023ce1debca2944c3f289d66bb17ead3ab3dede74b292"}, {file = "cachetools-5.5.0.tar.gz", hash = "sha256:2cc24fb4cbe39633fb7badd9db9ca6295d766d9c2995f245725a46715d050f2a"}, @@ -72,7 +67,6 @@ version = "2024.8.30" description = "Python package for providing Mozilla's CA Bundle." optional = false python-versions = ">=3.6" -groups = ["main"] files = [ {file = "certifi-2024.8.30-py3-none-any.whl", hash = "sha256:922820b53db7a7257ffbda3f597266d435245903d80737e34f8a45ff3e3230d8"}, {file = "certifi-2024.8.30.tar.gz", hash = "sha256:bec941d2aa8195e248a60b31ff9f0558284cf01a52591ceda73ea9afffd69fd9"}, @@ -84,8 +78,6 @@ version = "3.4.0" description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet." optional = true python-versions = ">=3.7.0" -groups = ["main"] -markers = "extra == \"gcp\"" files = [ {file = "charset_normalizer-3.4.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:4f9fc98dad6c2eaa32fc3af1417d95b5e3d08aff968df0cd320066def971f9a6"}, {file = "charset_normalizer-3.4.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0de7b687289d3c1b3e8660d0741874abe7888100efe14bd0f9fd7141bcbda92b"}, @@ -200,8 +192,6 @@ version = "0.4.6" description = "Cross-platform colored terminal text." optional = false python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7" -groups = ["dev"] -markers = "sys_platform == \"win32\"" files = [ {file = "colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6"}, {file = "colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44"}, @@ -213,7 +203,6 @@ version = "0.3.9" description = "serialize all of Python" optional = false python-versions = ">=3.8" -groups = ["dev"] files = [ {file = "dill-0.3.9-py3-none-any.whl", hash = "sha256:468dff3b89520b474c0397703366b7b95eebe6303f108adf9b19da1f702be87a"}, {file = "dill-0.3.9.tar.gz", hash = "sha256:81aa267dddf68cbfe8029c42ca9ec6a4ab3b22371d1c450abc54422577b4512c"}, @@ -229,7 +218,6 @@ version = "0.2.0" description = "Like `typing._eval_type`, but lets older Python versions use newer typing features." optional = false python-versions = ">=3.8" -groups = ["main"] files = [ {file = "eval_type_backport-0.2.0-py3-none-any.whl", hash = "sha256:ac2f73d30d40c5a30a80b8739a789d6bb5e49fdffa66d7912667e2015d9c9933"}, {file = "eval_type_backport-0.2.0.tar.gz", hash = "sha256:68796cfbc7371ebf923f03bdf7bef415f3ec098aeced24e054b253a0e78f7b37"}, @@ -244,8 +232,6 @@ version = "1.2.2" description = "Backport of PEP 654 (exception groups)" optional = false python-versions = ">=3.7" -groups = ["main", "dev"] -markers = "python_version < \"3.11\"" files = [ {file = "exceptiongroup-1.2.2-py3-none-any.whl", hash = "sha256:3111b9d131c238bec2f8f516e123e14ba243563fb135d3fe885990585aa7795b"}, {file = "exceptiongroup-1.2.2.tar.gz", hash = "sha256:47c2edf7c6738fafb49fd34290706d1a1a2f4d1c6df275526b62cbb4aa5393cc"}, @@ -260,8 +246,6 @@ version = "2.27.0" description = "Google Authentication Library" optional = true python-versions = ">=3.7" -groups = ["main"] -markers = "extra == \"gcp\"" files = [ {file = "google-auth-2.27.0.tar.gz", hash = "sha256:e863a56ccc2d8efa83df7a80272601e43487fa9a728a376205c86c26aaefa821"}, {file = "google_auth-2.27.0-py2.py3-none-any.whl", hash = "sha256:8e4bad367015430ff253fe49d500fdc3396c1a434db5740828c728e45bcce245"}, @@ -285,7 +269,6 @@ version = "0.14.0" description = "A pure-Python, bring-your-own-I/O implementation of HTTP/1.1" optional = false python-versions = ">=3.7" -groups = ["main"] files = [ {file = "h11-0.14.0-py3-none-any.whl", hash = "sha256:e3fe4ac4b851c468cc8363d500db52c2ead036020723024a109d37346efaa761"}, {file = "h11-0.14.0.tar.gz", hash = "sha256:8f19fbbe99e72420ff35c00b27a34cb9937e902a8b810e2c88300c6f0a3b699d"}, @@ -297,7 +280,6 @@ version = "1.0.7" description = "A minimal low-level HTTP client." optional = false python-versions = ">=3.8" -groups = ["main"] files = [ {file = "httpcore-1.0.7-py3-none-any.whl", hash = "sha256:a3fff8f43dc260d5bd363d9f9cf1830fa3a458b332856f34282de498ed420edd"}, {file = "httpcore-1.0.7.tar.gz", hash = "sha256:8551cb62a169ec7162ac7be8d4817d561f60e08eaa485234898414bb5a8a0b4c"}, @@ -319,7 +301,6 @@ version = "0.27.2" description = "The next generation HTTP client." optional = false python-versions = ">=3.8" -groups = ["main"] files = [ {file = "httpx-0.27.2-py3-none-any.whl", hash = "sha256:7bb2708e112d8fdd7829cd4243970f0c223274051cb35ee80c03301ee29a3df0"}, {file = "httpx-0.27.2.tar.gz", hash = "sha256:f7c2be1d2f3c3c3160d441802406b206c2b76f5947b11115e6df10c6c65e66c2"}, @@ -345,7 +326,6 @@ version = "3.10" description = "Internationalized Domain Names in Applications (IDNA)" optional = false python-versions = ">=3.6" -groups = ["main"] files = [ {file = "idna-3.10-py3-none-any.whl", hash = "sha256:946d195a0d259cbba61165e88e65941f16e9b36ea6ddb97f00452bae8b1287d3"}, {file = "idna-3.10.tar.gz", hash = "sha256:12f65c9b470abda6dc35cf8e63cc574b1c52b11df2c86030af0ac09b01b13ea9"}, @@ -360,7 +340,6 @@ version = "2.0.0" description = "brain-dead simple config-ini parsing" optional = false python-versions = ">=3.7" -groups = ["dev"] files = [ {file = "iniconfig-2.0.0-py3-none-any.whl", hash = "sha256:b6a85871a79d2e3b22d2d1b94ac2824226a63c6b741c88f7ae975f18b6778374"}, {file = "iniconfig-2.0.0.tar.gz", hash = "sha256:2d91e135bf72d31a410b17c16da610a82cb55f6b0477d1a902134b24a455b8b3"}, @@ -372,7 +351,6 @@ version = "5.13.2" description = "A Python utility / library to sort Python imports." optional = false python-versions = ">=3.8.0" -groups = ["dev"] files = [ {file = "isort-5.13.2-py3-none-any.whl", hash = "sha256:8ca5e72a8d85860d5a3fa69b8745237f2939afe12dbf656afbcb47fe72d947a6"}, {file = "isort-5.13.2.tar.gz", hash = "sha256:48fdfcb9face5d58a4f6dde2e72a1fb8dcaf8ab26f95ab49fab84c2ddefb0109"}, @@ -387,7 +365,6 @@ version = "1.0.6" description = "A more powerful JSONPath implementation in modern python" optional = false python-versions = ">=3.6" -groups = ["main"] files = [ {file = "jsonpath-python-1.0.6.tar.gz", hash = "sha256:dd5be4a72d8a2995c3f583cf82bf3cd1a9544cfdabf2d22595b67aff07349666"}, {file = "jsonpath_python-1.0.6-py3-none-any.whl", hash = "sha256:1e3b78df579f5efc23565293612decee04214609208a2335884b3ee3f786b575"}, @@ -399,7 +376,6 @@ version = "0.7.0" description = "McCabe checker, plugin for flake8" optional = false python-versions = ">=3.6" -groups = ["dev"] files = [ {file = "mccabe-0.7.0-py2.py3-none-any.whl", hash = "sha256:6c2d30ab6be0e4a46919781807b4f0d834ebdd6c6e3dca0bda5a15f863427b6e"}, {file = "mccabe-0.7.0.tar.gz", hash = "sha256:348e0240c33b60bbdf4e523192ef919f28cb2c3d7d5c7794f74009290f236325"}, @@ -411,7 +387,6 @@ version = "1.13.0" description = "Optional static typing for Python" optional = false python-versions = ">=3.8" -groups = ["dev"] files = [ {file = "mypy-1.13.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:6607e0f1dd1fb7f0aca14d936d13fd19eba5e17e1cd2a14f808fa5f8f6d8f60a"}, {file = "mypy-1.13.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:8a21be69bd26fa81b1f80a61ee7ab05b076c674d9b18fb56239d72e21d9f4c80"}, @@ -465,7 +440,6 @@ version = "1.0.0" description = "Type system extensions for programs checked with the mypy type checker." optional = false python-versions = ">=3.5" -groups = ["main", "dev"] files = [ {file = "mypy_extensions-1.0.0-py3-none-any.whl", hash = "sha256:4392f6c0eb8a5668a69e23d168ffa70f0be9ccfd32b5cc2d26a34ae5b844552d"}, {file = "mypy_extensions-1.0.0.tar.gz", hash = "sha256:75dbf8955dc00442a438fc4d0666508a9a97b6bd41aa2f0ffe9d2f2725af0782"}, @@ -477,7 +451,6 @@ version = "24.2" description = "Core utilities for Python packages" optional = false python-versions = ">=3.8" -groups = ["dev"] files = [ {file = "packaging-24.2-py3-none-any.whl", hash = "sha256:09abb1bccd265c01f4a3aa3f7a7db064b36514d2cba19a2f694fe6150451a759"}, {file = "packaging-24.2.tar.gz", hash = "sha256:c228a6dc5e932d346bc5739379109d49e8853dd8223571c7c5b55260edc0b97f"}, @@ -489,7 +462,6 @@ version = "4.3.6" description = "A small Python package for determining appropriate platform-specific dirs, e.g. a `user data dir`." optional = false python-versions = ">=3.8" -groups = ["dev"] files = [ {file = "platformdirs-4.3.6-py3-none-any.whl", hash = "sha256:73e575e1408ab8103900836b97580d5307456908a03e92031bab39e4554cc3fb"}, {file = "platformdirs-4.3.6.tar.gz", hash = "sha256:357fb2acbc885b0419afd3ce3ed34564c13c9b95c89360cd9563f73aa5e2b907"}, @@ -506,7 +478,6 @@ version = "1.5.0" description = "plugin and hook calling mechanisms for python" optional = false python-versions = ">=3.8" -groups = ["dev"] files = [ {file = "pluggy-1.5.0-py3-none-any.whl", hash = "sha256:44e1ad92c8ca002de6377e165f3e0f1be63266ab4d554740532335b9d75ea669"}, {file = "pluggy-1.5.0.tar.gz", hash = "sha256:2cffa88e94fdc978c4c574f15f9e59b7f4201d439195c3715ca9e2486f1d0cf1"}, @@ -522,8 +493,6 @@ version = "0.6.1" description = "Pure-Python implementation of ASN.1 types and DER/BER/CER codecs (X.208)" optional = true python-versions = ">=3.8" -groups = ["main"] -markers = "extra == \"gcp\"" files = [ {file = "pyasn1-0.6.1-py3-none-any.whl", hash = "sha256:0d632f46f2ba09143da3a8afe9e33fb6f92fa2320ab7e886e2d0f7672af84629"}, {file = "pyasn1-0.6.1.tar.gz", hash = "sha256:6f580d2bdd84365380830acf45550f2511469f673cb4a5ae3857a3170128b034"}, @@ -535,8 +504,6 @@ version = "0.4.1" description = "A collection of ASN.1-based protocols modules" optional = true python-versions = ">=3.8" -groups = ["main"] -markers = "extra == \"gcp\"" files = [ {file = "pyasn1_modules-0.4.1-py3-none-any.whl", hash = "sha256:49bfa96b45a292b711e986f222502c1c9a5e1f4e568fc30e2574a6c7d07838fd"}, {file = "pyasn1_modules-0.4.1.tar.gz", hash = "sha256:c28e2dbf9c06ad61c71a075c7e0f9fd0f1b0bb2d2ad4377f240d33ac2ab60a7c"}, @@ -551,7 +518,6 @@ version = "2.10.2" description = "Data validation using Python type hints" optional = false python-versions = ">=3.8" -groups = ["main"] files = [ {file = "pydantic-2.10.2-py3-none-any.whl", hash = "sha256:cfb96e45951117c3024e6b67b25cdc33a3cb7b2fa62e239f7af1378358a1d99e"}, {file = "pydantic-2.10.2.tar.gz", hash = "sha256:2bc2d7f17232e0841cbba4641e65ba1eb6fafb3a08de3a091ff3ce14a197c4fa"}, @@ -572,7 +538,6 @@ version = "2.27.1" description = "Core functionality for Pydantic validation and serialization" optional = false python-versions = ">=3.8" -groups = ["main"] files = [ {file = "pydantic_core-2.27.1-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:71a5e35c75c021aaf400ac048dacc855f000bdfed91614b4a726f7432f1f3d6a"}, {file = "pydantic_core-2.27.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:f82d068a2d6ecfc6e054726080af69a6764a10015467d7d7b9f66d6ed5afa23b"}, @@ -685,7 +650,6 @@ version = "3.2.3" description = "python code static checker" optional = false python-versions = ">=3.8.0" -groups = ["dev"] files = [ {file = "pylint-3.2.3-py3-none-any.whl", hash = "sha256:b3d7d2708a3e04b4679e02d99e72329a8b7ee8afb8d04110682278781f889fa8"}, {file = "pylint-3.2.3.tar.gz", hash = "sha256:02f6c562b215582386068d52a30f520d84fdbcf2a95fc7e855b816060d048b60"}, @@ -716,7 +680,6 @@ version = "8.3.4" description = "pytest: simple powerful testing with Python" optional = false python-versions = ">=3.8" -groups = ["dev"] files = [ {file = "pytest-8.3.4-py3-none-any.whl", hash = "sha256:50e16d954148559c9a74109af1eaf0c945ba2d8f30f0a3d3335edde19788b6f6"}, {file = "pytest-8.3.4.tar.gz", hash = "sha256:965370d062bce11e73868e0335abac31b4d3de0e82f4007408d242b4f8610761"}, @@ -739,7 +702,6 @@ version = "0.23.8" description = "Pytest support for asyncio" optional = false python-versions = ">=3.8" -groups = ["dev"] files = [ {file = "pytest_asyncio-0.23.8-py3-none-any.whl", hash = "sha256:50265d892689a5faefb84df80819d1ecef566eb3549cf915dfb33569359d1ce2"}, {file = "pytest_asyncio-0.23.8.tar.gz", hash = "sha256:759b10b33a6dc61cce40a8bd5205e302978bbbcc00e279a8b61d9a6a3c82e4d3"}, @@ -758,7 +720,6 @@ version = "2.9.0.post0" description = "Extensions to the standard Python datetime module" optional = false python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7" -groups = ["main"] files = [ {file = "python-dateutil-2.9.0.post0.tar.gz", hash = "sha256:37dd54208da7e1cd875388217d5e00ebd4179249f90fb72437e91a35459a0ad3"}, {file = "python_dateutil-2.9.0.post0-py2.py3-none-any.whl", hash = "sha256:a8b2bc7bffae282281c8140a97d3aa9c14da0b136dfe83f850eea9a5f7470427"}, @@ -773,8 +734,6 @@ version = "2.32.3" description = "Python HTTP for Humans." optional = true python-versions = ">=3.8" -groups = ["main"] -markers = "extra == \"gcp\"" files = [ {file = "requests-2.32.3-py3-none-any.whl", hash = "sha256:70761cfe03c773ceb22aa2f671b4757976145175cdfca038c02654d061d6dcc6"}, {file = "requests-2.32.3.tar.gz", hash = "sha256:55365417734eb18255590a9ff9eb97e9e1da868d4ccd6402399eaf68af20a760"}, @@ -792,15 +751,12 @@ use-chardet-on-py3 = ["chardet (>=3.0.2,<6)"] [[package]] name = "rsa" -version = "4.9" +version = "4.2" description = "Pure-Python RSA implementation" optional = true -python-versions = ">=3.6,<4" -groups = ["main"] -markers = "extra == \"gcp\"" +python-versions = "*" files = [ - {file = "rsa-4.9-py3-none-any.whl", hash = "sha256:90260d9058e514786967344d0ef75fa8727eed8a7d2e43ce9f4bcf1b536174f7"}, - {file = "rsa-4.9.tar.gz", hash = "sha256:e38464a49c6c85d7f1351b0126661487a7e0a14a50f1675ec50eb34d4f20ef21"}, + {file = "rsa-4.2.tar.gz", hash = "sha256:aaefa4b84752e3e99bd8333a2e1e3e7a7da64614042bd66f775573424370108a"}, ] [package.dependencies] @@ -812,7 +768,6 @@ version = "1.16.0" description = "Python 2 and 3 compatibility utilities" optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*" -groups = ["main"] files = [ {file = "six-1.16.0-py2.py3-none-any.whl", hash = "sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254"}, {file = "six-1.16.0.tar.gz", hash = "sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926"}, @@ -824,7 +779,6 @@ version = "1.3.1" description = "Sniff out which async library your code is running under" optional = false python-versions = ">=3.7" -groups = ["main"] files = [ {file = "sniffio-1.3.1-py3-none-any.whl", hash = "sha256:2f6da418d1f1e0fddd844478f41680e794e6051915791a034ff65e5f100525a2"}, {file = "sniffio-1.3.1.tar.gz", hash = "sha256:f4324edc670a0f49750a81b895f35c3adb843cca46f0530f79fc1babb23789dc"}, @@ -836,8 +790,6 @@ version = "2.2.1" description = "A lil' TOML parser" optional = false python-versions = ">=3.8" -groups = ["dev"] -markers = "python_version < \"3.11\"" files = [ {file = "tomli-2.2.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:678e4fa69e4575eb77d103de3df8a895e1591b48e740211bd1067378c69e8249"}, {file = "tomli-2.2.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:023aa114dd824ade0100497eb2318602af309e5a55595f76b626d6d9f3b7b0a6"}, @@ -879,7 +831,6 @@ version = "0.13.2" description = "Style preserving TOML library" optional = false python-versions = ">=3.8" -groups = ["dev"] files = [ {file = "tomlkit-0.13.2-py3-none-any.whl", hash = "sha256:7a974427f6e119197f670fbbbeae7bef749a6c14e793db934baefc1b5f03efde"}, {file = "tomlkit-0.13.2.tar.gz", hash = "sha256:fff5fe59a87295b278abd31bec92c15d9bc4a06885ab12bcea52c71119392e79"}, @@ -891,7 +842,6 @@ version = "2.9.0.20241003" description = "Typing stubs for python-dateutil" optional = false python-versions = ">=3.8" -groups = ["dev"] files = [ {file = "types-python-dateutil-2.9.0.20241003.tar.gz", hash = "sha256:58cb85449b2a56d6684e41aeefb4c4280631246a0da1a719bdbe6f3fb0317446"}, {file = "types_python_dateutil-2.9.0.20241003-py3-none-any.whl", hash = "sha256:250e1d8e80e7bbc3a6c99b907762711d1a1cdd00e978ad39cb5940f6f0a87f3d"}, @@ -903,7 +853,6 @@ version = "4.12.2" description = "Backported and Experimental Type Hints for Python 3.8+" optional = false python-versions = ">=3.8" -groups = ["main", "dev"] files = [ {file = "typing_extensions-4.12.2-py3-none-any.whl", hash = "sha256:04e5ca0351e0f3f85c6853954072df659d0d13fac324d0072316b67d7794700d"}, {file = "typing_extensions-4.12.2.tar.gz", hash = "sha256:1a7ead55c7e559dd4dee8856e3a88b41225abfe1ce8df57b7c13915fe121ffb8"}, @@ -915,7 +864,6 @@ version = "0.9.0" description = "Runtime inspection utilities for typing module." optional = false python-versions = "*" -groups = ["main"] files = [ {file = "typing_inspect-0.9.0-py3-none-any.whl", hash = "sha256:9ee6fc59062311ef8547596ab6b955e1b8aa46242d854bfc78f4f6b0eff35f9f"}, {file = "typing_inspect-0.9.0.tar.gz", hash = "sha256:b23fc42ff6f6ef6954e4852c1fb512cdd18dbea03134f91f856a95ccc9461f78"}, @@ -931,8 +879,6 @@ version = "2.2.3" description = "HTTP library with thread-safe connection pooling, file post, and more." optional = true python-versions = ">=3.8" -groups = ["main"] -markers = "extra == \"gcp\"" files = [ {file = "urllib3-2.2.3-py3-none-any.whl", hash = "sha256:ca899ca043dcb1bafa3e262d73aa25c465bfb49e0bd9dd5d59f1d0acba2f8fac"}, {file = "urllib3-2.2.3.tar.gz", hash = "sha256:e7d814a81dad81e6caf2ec9fdedb284ecc9c73076b62654547cc64ccdcae26e9"}, @@ -948,6 +894,6 @@ zstd = ["zstandard (>=0.18.0)"] gcp = ["google-auth", "requests"] [metadata] -lock-version = "2.1" -python-versions = "^3.8" -content-hash = "e47806611b5611f5dd208a05ba3772561280b9a23756fdb7046a46cb78d4f8ad" +lock-version = "2.0" +python-versions = ">=3.8" +content-hash = "f0f19d81d36ebe966895f21a0a9dd33118783904418f4103189c475e5903b958" diff --git a/pyproject.toml b/pyproject.toml index a3c6e28e..782e372e 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "mistralai" -version = "1.3.0" +version = "1.3.1" description = "Python Client SDK for the Mistral AI API." authors = ["Mistral"] readme = "README-PYPI.md" diff --git a/src/mistralai/_version.py b/src/mistralai/_version.py index dc0cc3b9..db6a2275 100644 --- a/src/mistralai/_version.py +++ b/src/mistralai/_version.py @@ -3,7 +3,7 @@ import importlib.metadata __title__: str = "mistralai" -__version__: str = "1.3.0" +__version__: str = "1.3.1" try: if __package__ is not None: diff --git a/src/mistralai/sdkconfiguration.py b/src/mistralai/sdkconfiguration.py index 398725e1..8a991703 100644 --- a/src/mistralai/sdkconfiguration.py +++ b/src/mistralai/sdkconfiguration.py @@ -28,9 +28,9 @@ class SDKConfiguration: server: Optional[str] = "" language: str = "python" openapi_doc_version: str = "0.0.2" - sdk_version: str = "1.3.0" + sdk_version: str = "1.3.1" gen_version: str = "2.486.1" - user_agent: str = "speakeasy-sdk/python 1.3.0 2.486.1 0.0.2 mistralai" + user_agent: str = "speakeasy-sdk/python 1.3.1 2.486.1 0.0.2 mistralai" retry_config: OptionalNullable[RetryConfig] = Field(default_factory=lambda: UNSET) timeout_ms: Optional[int] = None From 97f10c19099f60265f6d36f7090a83e8cd63039c Mon Sep 17 00:00:00 2001 From: Gaspard Blanchet <26529448+GaspardBT@users.noreply.github.com> Date: Mon, 20 Jan 2025 16:46:57 +0100 Subject: [PATCH 099/223] add fix flag (#179) --- .speakeasy/gen.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.speakeasy/gen.yaml b/.speakeasy/gen.yaml index 40dee601..834cbc7f 100644 --- a/.speakeasy/gen.yaml +++ b/.speakeasy/gen.yaml @@ -26,7 +26,7 @@ python: enumFormat: union envVarPrefix: MISTRAL fixFlags: - responseRequiredSep2024: false + responseRequiredSep2024: true flattenGlobalSecurity: true flattenRequests: true flatteningOrder: parameters-first From 47cf89131b1b1ba3bfa695ef1649d940f99ea118 Mon Sep 17 00:00:00 2001 From: Gaspard Blanchet <26529448+GaspardBT@users.noreply.github.com> Date: Mon, 20 Jan 2025 17:59:21 +0100 Subject: [PATCH 100/223] fix multiple generation config + linting issue (#180) * repin latest * update gcp for proper type * proper formating * add newline in hook --- .speakeasy/workflow.yaml | 2 +- .../src/mistralai_azure/_hooks/custom_user_agent.py | 2 +- packages/mistralai_gcp/.speakeasy/gen.yaml | 2 +- packages/mistralai_gcp/src/mistralai_gcp/sdk.py | 3 +-- 4 files changed, 4 insertions(+), 5 deletions(-) diff --git a/.speakeasy/workflow.yaml b/.speakeasy/workflow.yaml index b3d2039f..db0b11d1 100644 --- a/.speakeasy/workflow.yaml +++ b/.speakeasy/workflow.yaml @@ -2,7 +2,7 @@ workflowVersion: 1.0.0 # speakeasyVersion is pinned to unblock https://github.com/mistralai/client-python/pull/173 # The speakeasy run was appending `_` to some attributes to avoid conflicts with reserved keywords. # This would have change the SDK APIs and break the existing clients which we don't want. -speakeasyVersion: 1.462.2 +speakeasyVersion: latest sources: mistral-azure-source: inputs: diff --git a/packages/mistralai_azure/src/mistralai_azure/_hooks/custom_user_agent.py b/packages/mistralai_azure/src/mistralai_azure/_hooks/custom_user_agent.py index 2b71a96a..77df6aef 100644 --- a/packages/mistralai_azure/src/mistralai_azure/_hooks/custom_user_agent.py +++ b/packages/mistralai_azure/src/mistralai_azure/_hooks/custom_user_agent.py @@ -19,4 +19,4 @@ def before_request( PREFIX + current.split(" ")[1] ) - return request \ No newline at end of file + return request diff --git a/packages/mistralai_gcp/.speakeasy/gen.yaml b/packages/mistralai_gcp/.speakeasy/gen.yaml index 583aaf5b..afa4d1d0 100644 --- a/packages/mistralai_gcp/.speakeasy/gen.yaml +++ b/packages/mistralai_gcp/.speakeasy/gen.yaml @@ -19,7 +19,7 @@ python: pytest: ^8.2.2 pytest-asyncio: ^0.23.7 main: - google-auth: 2.27.0 + google-auth: ^2.31.0 requests: ^2.32.3 authors: - Mistral diff --git a/packages/mistralai_gcp/src/mistralai_gcp/sdk.py b/packages/mistralai_gcp/src/mistralai_gcp/sdk.py index d2b41dcf..abfea8db 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/sdk.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/sdk.py @@ -30,8 +30,7 @@ def get_model_info(model: str) -> Tuple[str, str]: # if the model requiers the legacy fomat, use it, else do nothing. if model in LEGACY_MODEL_ID_FORMAT: return "-".join(model.split("-")[:-1]), LEGACY_MODEL_ID_FORMAT[model] - else: - return model, model + return model, model From d482b3cfd7f27d4f5a6a6a4b2a76b4c3d292a0f6 Mon Sep 17 00:00:00 2001 From: Gaspard Blanchet <26529448+GaspardBT@users.noreply.github.com> Date: Mon, 20 Jan 2025 18:11:46 +0100 Subject: [PATCH 101/223] Use poetry 2 in GH actions (#182) --- .github/workflows/sdk_generation_mistralai_azure_sdk.yaml | 3 --- .github/workflows/sdk_generation_mistralai_gcp_sdk.yaml | 3 --- .github/workflows/sdk_generation_mistralai_sdk.yaml | 3 --- .github/workflows/sdk_publish_mistralai_sdk.yaml | 6 +----- 4 files changed, 1 insertion(+), 14 deletions(-) diff --git a/.github/workflows/sdk_generation_mistralai_azure_sdk.yaml b/.github/workflows/sdk_generation_mistralai_azure_sdk.yaml index a4c7e54b..7ec5bb8d 100644 --- a/.github/workflows/sdk_generation_mistralai_azure_sdk.yaml +++ b/.github/workflows/sdk_generation_mistralai_azure_sdk.yaml @@ -20,9 +20,6 @@ jobs: with: force: ${{ github.event.inputs.force }} mode: pr - # We need poetry < 2.0 to avoid the speakeasy run failure. - # This was fixed in speakasy v1.467.0 but we're locked at 1.462.2 waiting for other speakeasy fixes. - poetry_version: 1.8.5 set_version: ${{ github.event.inputs.set_version }} speakeasy_version: latest target: mistral-python-sdk-azure diff --git a/.github/workflows/sdk_generation_mistralai_gcp_sdk.yaml b/.github/workflows/sdk_generation_mistralai_gcp_sdk.yaml index c1af10b9..c4da64f7 100644 --- a/.github/workflows/sdk_generation_mistralai_gcp_sdk.yaml +++ b/.github/workflows/sdk_generation_mistralai_gcp_sdk.yaml @@ -20,9 +20,6 @@ jobs: with: force: ${{ github.event.inputs.force }} mode: pr - # We need poetry < 2.0 to avoid the speakeasy run failure. - # This was fixed in speakasy v1.467.0 but we're locked at 1.462.2 waiting for other speakeasy fixes. - poetry_version: 1.8.5 set_version: ${{ github.event.inputs.set_version }} speakeasy_version: latest target: mistral-python-sdk-google-cloud diff --git a/.github/workflows/sdk_generation_mistralai_sdk.yaml b/.github/workflows/sdk_generation_mistralai_sdk.yaml index 3f034dac..7d0540e7 100644 --- a/.github/workflows/sdk_generation_mistralai_sdk.yaml +++ b/.github/workflows/sdk_generation_mistralai_sdk.yaml @@ -20,9 +20,6 @@ jobs: with: force: ${{ github.event.inputs.force }} mode: pr - # We need poetry < 2.0 to avoid the speakeasy run failure. - # This was fixed in speakasy v1.467.0 but we're locked at 1.462.2 waiting for other speakeasy fixes. - poetry_version: 1.8.5 set_version: ${{ github.event.inputs.set_version }} speakeasy_version: latest target: mistralai-sdk diff --git a/.github/workflows/sdk_publish_mistralai_sdk.yaml b/.github/workflows/sdk_publish_mistralai_sdk.yaml index a7f08998..46af0ad3 100644 --- a/.github/workflows/sdk_publish_mistralai_sdk.yaml +++ b/.github/workflows/sdk_publish_mistralai_sdk.yaml @@ -10,14 +10,10 @@ permissions: - main paths: - RELEASES.md - - '*/RELEASES.md' + - "*/RELEASES.md" jobs: publish: uses: speakeasy-api/sdk-generation-action/.github/workflows/sdk-publish.yaml@v15 - with: - # We need poetry < 2.0 to avoid the speakeasy run failure. - # This was fixed in speakasy v1.467.0 but we're locked at 1.462.2 waiting for other speakeasy fixes. - poetry_version: 1.8.5 secrets: github_access_token: ${{ secrets.GITHUB_TOKEN }} pypi_token: ${{ secrets.PYPI_TOKEN }} From 2611c827245060f1b226cde8cf2d17bb290e20ae Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Tue, 21 Jan 2025 14:37:48 +0100 Subject: [PATCH 102/223] =?UTF-8?q?chore:=20=F0=9F=90=9D=20Update=20SDK=20?= =?UTF-8?q?-=20Generate=20MISTRALAI=20MISTRALAI-SDK=201.4.0=20(#183)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * ci: regenerated with OpenAPI Doc , Speakeasy CLI 1.469.11 * Add chat_prediction example * Git ignore .vscode/ --------- Co-authored-by: speakeasybot Co-authored-by: Alexandre Menasria --- .gitignore | 1 + .speakeasy/gen.lock | 188 ++++++++++-------- .speakeasy/gen.yaml | 3 +- .speakeasy/workflow.lock | 16 +- .speakeasy/workflow.yaml | 3 - README.md | 46 +---- RELEASES.md | 12 +- USAGE.md | 28 +-- docs/models/agentscompletionrequest.md | 3 +- docs/models/agentscompletionstreamrequest.md | 3 +- docs/models/chatcompletionrequest.md | 1 + docs/models/chatcompletionstreamrequest.md | 1 + docs/models/fileschema.md | 2 +- docs/models/function.md | 3 +- docs/models/prediction.md | 9 + docs/models/predictiontype.md | 8 + docs/models/retrievefileout.md | 2 +- docs/models/toolcall.md | 3 +- docs/models/uploadfileout.md | 2 +- docs/sdks/agents/README.md | 10 +- docs/sdks/chat/README.md | 10 +- docs/sdks/classifiers/README.md | 35 +++- docs/sdks/embeddings/README.md | 4 +- docs/sdks/files/README.md | 20 +- docs/sdks/fim/README.md | 8 +- docs/sdks/jobs/README.md | 22 +- docs/sdks/mistraljobs/README.md | 12 +- docs/sdks/models/README.md | 12 -- examples/chat_prediction.py | 38 ++++ poetry.lock | 57 +++++- pylintrc | 2 +- pyproject.toml | 2 +- src/mistralai/__init__.py | 11 +- src/mistralai/_version.py | 5 +- src/mistralai/agents.py | 72 +++++-- src/mistralai/chat.py | 72 +++++-- src/mistralai/classifiers.py | 52 +++-- src/mistralai/embeddings.py | 26 ++- src/mistralai/files.py | 128 ++++++++---- src/mistralai/fim.py | 44 ++-- src/mistralai/jobs.py | 112 ++++++++--- src/mistralai/mistral_jobs.py | 88 +++++--- src/mistralai/models/__init__.py | 5 + .../models/agentscompletionrequest.py | 5 + .../models/agentscompletionstreamrequest.py | 5 + src/mistralai/models/chatcompletionrequest.py | 5 + .../models/chatcompletionstreamrequest.py | 5 + src/mistralai/models/fileschema.py | 5 +- src/mistralai/models/function.py | 3 + src/mistralai/models/prediction.py | 26 +++ src/mistralai/models/retrievefileout.py | 5 +- src/mistralai/models/toolcall.py | 3 + src/mistralai/models/uploadfileout.py | 5 +- src/mistralai/models_.py | 140 ++++++++----- src/mistralai/sdk.py | 3 +- src/mistralai/sdkconfiguration.py | 14 +- 56 files changed, 927 insertions(+), 478 deletions(-) create mode 100644 docs/models/prediction.md create mode 100644 docs/models/predictiontype.md create mode 100644 examples/chat_prediction.py create mode 100644 src/mistralai/models/prediction.py diff --git a/.gitignore b/.gitignore index 767c3b29..ab3be6d0 100644 --- a/.gitignore +++ b/.gitignore @@ -1,3 +1,4 @@ +.vscode/ .speakeasy/reports README-PYPI.md .venv/ diff --git a/.speakeasy/gen.lock b/.speakeasy/gen.lock index 117dba88..722fd297 100644 --- a/.speakeasy/gen.lock +++ b/.speakeasy/gen.lock @@ -1,12 +1,12 @@ lockVersion: 2.0.0 id: 2d045ec7-2ebb-4f4d-ad25-40953b132161 management: - docChecksum: 7ad277ed0527b2949ed9d503ce742fab + docChecksum: dbfa566129ede53f4e3b2c91e81f6f74 docVersion: 0.0.2 - speakeasyVersion: 1.462.2 - generationVersion: 2.486.1 - releaseVersion: 1.3.1 - configChecksum: 8f82dea19ae1fcd5cea1d4f7837fc137 + speakeasyVersion: 1.469.11 + generationVersion: 2.493.32 + releaseVersion: 1.4.0 + configChecksum: 46cde4e28fb5efba97051b54ac2e1c83 repoURL: https://github.com/mistralai/client-python.git installationURL: https://github.com/mistralai/client-python.git published: true @@ -14,12 +14,12 @@ features: python: additionalDependencies: 1.0.0 constsAndDefaults: 1.0.5 - core: 5.7.4 + core: 5.10.4 defaultEnabledRetries: 0.2.0 downloadStreams: 1.0.1 enumUnions: 0.1.0 envVarSecurityUsage: 0.3.2 - examples: 3.0.0 + examples: 3.0.1 flatRequests: 1.0.1 flattening: 3.1.0 globalSecurity: 3.0.2 @@ -165,6 +165,8 @@ generatedFiles: - docs/models/modellist.md - docs/models/object.md - docs/models/one.md + - docs/models/prediction.md + - docs/models/predictiontype.md - docs/models/queryparamstatus.md - docs/models/referencechunk.md - docs/models/referencechunktype.md @@ -322,6 +324,7 @@ generatedFiles: - src/mistralai/models/metricout.py - src/mistralai/models/modelcapabilities.py - src/mistralai/models/modellist.py + - src/mistralai/models/prediction.py - src/mistralai/models/referencechunk.py - src/mistralai/models/responseformat.py - src/mistralai/models/responseformats.py @@ -376,7 +379,7 @@ examples: speakeasy-default-list-models-v1-models-get: responses: "200": - application/json: {} + application/json: {"object": "list"} "422": application/json: {} retrieve_model_v1_models__model_id__get: @@ -386,7 +389,7 @@ examples: model_id: "ft:open-mistral-7b:587a6b29:20240514:7e773925" responses: "200": - application/json: {"id": ""} + application/json: {"id": "", "object": "model", "owned_by": "mistralai", "capabilities": {"completion_chat": true, "completion_fim": false, "function_calling": true, "fine_tuning": false, "vision": false}, "max_context_length": 32768} "422": {} delete_model_v1_models__model_id__delete: "": @@ -395,7 +398,7 @@ examples: model_id: "ft:open-mistral-7b:587a6b29:20240514:7e773925" responses: "200": - application/json: {"id": "ft:open-mistral-7b:587a6b29:20240514:7e773925", "deleted": true} + application/json: {"id": "ft:open-mistral-7b:587a6b29:20240514:7e773925", "object": "model", "deleted": true} "422": {} jobs_api_routes_fine_tuning_update_fine_tuned_model: "": @@ -406,7 +409,7 @@ examples: application/json: {} responses: "200": - application/json: {"id": "", "created": 857478, "owned_by": "", "root": "", "archived": true, "capabilities": {}, "job": "905bf4aa-77f2-404e-b754-c352acfe5407"} + application/json: {"id": "", "created": 857478, "owned_by": "", "root": "", "archived": false, "capabilities": {"completion_chat": true, "completion_fim": false, "function_calling": false, "fine_tuning": false}, "max_context_length": 32768, "job": "5fa7f0e7-432c-4e47-acb6-0cc78135ddeb"} jobs_api_routes_fine_tuning_archive_fine_tuned_model: "": parameters: @@ -414,7 +417,7 @@ examples: model_id: "ft:open-mistral-7b:587a6b29:20240514:7e773925" responses: "200": - application/json: {"id": ""} + application/json: {"id": "", "archived": true} jobs_api_routes_fine_tuning_unarchive_fine_tuned_model: "": parameters: @@ -422,167 +425,188 @@ examples: model_id: "ft:open-mistral-7b:587a6b29:20240514:7e773925" responses: "200": - application/json: {"id": ""} + application/json: {"id": "", "archived": false} files_api_routes_upload_file: speakeasy-default-files-api-routes-upload-file: requestBody: - multipart/form-data: {"file": {}} + multipart/form-data: {"file": {"": "x-file: example.file"}} responses: "200": - application/json: {"id": "497f6eca-6276-4993-bfeb-53cbbbba6f09", "object": "file", "bytes": 13000, "created_at": 1716963433, "filename": "files_upload.jsonl", "purpose": "fine-tune", "sample_type": "pretrain", "source": "upload"} + application/json: {"id": "497f6eca-6276-4993-bfeb-53cbbbba6f09", "object": "file", "bytes": 13000, "created_at": 1716963433, "filename": "files_upload.jsonl", "purpose": "fine-tune", "sample_type": "batch_request", "source": "repository"} files_api_routes_list_files: speakeasy-default-files-api-routes-list-files: + parameters: + query: + page: 0 + page_size: 100 responses: "200": - application/json: {"data": [], "object": "", "total": 768578} + application/json: {"data": [{"id": "497f6eca-6276-4993-bfeb-53cbbbba6f09", "object": "file", "bytes": 13000, "created_at": 1716963433, "filename": "files_upload.jsonl", "purpose": "batch", "sample_type": "batch_result", "source": "upload"}, {"id": "497f6eca-6276-4993-bfeb-53cbbbba6f09", "object": "file", "bytes": 13000, "created_at": 1716963433, "filename": "files_upload.jsonl", "purpose": "fine-tune", "sample_type": "pretrain", "source": "repository"}, {"id": "497f6eca-6276-4993-bfeb-53cbbbba6f09", "object": "file", "bytes": 13000, "created_at": 1716963433, "filename": "files_upload.jsonl", "purpose": "batch", "sample_type": "pretrain", "source": "mistral"}], "object": "", "total": 86140} files_api_routes_retrieve_file: speakeasy-default-files-api-routes-retrieve-file: parameters: path: - file_id: "" + file_id: "" responses: "200": - application/json: {"id": "497f6eca-6276-4993-bfeb-53cbbbba6f09", "object": "file", "bytes": 13000, "created_at": 1716963433, "filename": "files_upload.jsonl", "purpose": "fine-tune", "sample_type": "pretrain", "source": "repository", "deleted": false} + application/json: {"id": "497f6eca-6276-4993-bfeb-53cbbbba6f09", "object": "file", "bytes": 13000, "created_at": 1716963433, "filename": "files_upload.jsonl", "purpose": "fine-tune", "sample_type": "batch_error", "source": "upload", "deleted": true} files_api_routes_delete_file: speakeasy-default-files-api-routes-delete-file: parameters: path: - file_id: "" + file_id: "" responses: "200": application/json: {"id": "497f6eca-6276-4993-bfeb-53cbbbba6f09", "object": "file", "deleted": false} + files_api_routes_download_file: + speakeasy-default-files-api-routes-download-file: + parameters: + path: + file_id: "" + responses: + "200": + application/octet-stream: "x-file: example.file" + files_api_routes_get_signed_url: + speakeasy-default-files-api-routes-get-signed-url: + parameters: + path: + file_id: "" + query: + expiry: 24 + responses: + "200": + application/json: {"url": "https://scornful-daughter.com/"} jobs_api_routes_fine_tuning_get_fine_tuning_jobs: speakeasy-default-jobs-api-routes-fine-tuning-get-fine-tuning-jobs: + parameters: + query: + page: 0 + page_size: 100 + created_by_me: false responses: "200": application/json: {"total": 768578} jobs_api_routes_fine_tuning_create_fine_tuning_job: speakeasy-default-jobs-api-routes-fine-tuning-create-fine-tuning-job: requestBody: - application/json: {"model": "codestral-latest", "hyperparameters": {}} + application/json: {"model": "Fiesta", "hyperparameters": {"learning_rate": 0.0001}} responses: "200": - application/json: {"id": "a621cf02-1cd9-4cf5-8403-315211a509a3", "auto_start": false, "model": "2", "status": "FAILED", "job_type": "", "created_at": 550483, "modified_at": 906537, "training_files": ["74c2becc-3769-4177-b5e0-24985613de0e"]} + application/json: {"expected_duration_seconds": 220, "cost": 10, "cost_currency": "EUR", "train_tokens_per_step": 131072, "train_tokens": 1310720, "data_tokens": 305375, "deprecated": true, "details": "", "epochs": 4.2922, "training_steps": 10} jobs_api_routes_fine_tuning_get_fine_tuning_job: speakeasy-default-jobs-api-routes-fine-tuning-get-fine-tuning-job: parameters: path: - job_id: "b18d8d81-fd7b-4764-a31e-475cb1f36591" + job_id: "b888f774-3e7c-4135-a18c-6b985523c4bc" responses: "200": - application/json: {"id": "58ccc65b-c928-4154-952e-30c048b8c2b5", "auto_start": false, "hyperparameters": {}, "model": "open-mistral-nemo", "status": "VALIDATED", "job_type": "", "created_at": 968091, "modified_at": 32069, "training_files": [], "checkpoints": []} + application/json: {"id": "e50f7622-81da-484b-9c66-1c8a99c6b71b", "auto_start": false, "hyperparameters": {"learning_rate": 0.0001}, "model": "Model Y", "status": "CANCELLED", "job_type": "", "created_at": 415305, "modified_at": 149108, "training_files": ["8f7112aa-f0ab-44e4-83b4-cca3716f6208", "7aa1f8cf-05d8-49d5-88ee-381f6b4b885c"], "checkpoints": [{"metrics": {}, "step_number": 856562, "created_at": 1716963433}, {"metrics": {}, "step_number": 328633, "created_at": 1716963433}]} jobs_api_routes_fine_tuning_cancel_fine_tuning_job: speakeasy-default-jobs-api-routes-fine-tuning-cancel-fine-tuning-job: parameters: path: - job_id: "03fa7112-315a-4072-a9f2-43f3f1ec962e" + job_id: "0f713502-9233-41c6-9ebd-c570b7edb496" responses: "200": - application/json: {"id": "fb7dec95-f740-47b2-b8ee-d9b046936a67", "auto_start": true, "hyperparameters": {}, "model": "mistral-large-latest", "status": "VALIDATED", "job_type": "", "created_at": 252151, "modified_at": 56775, "training_files": [], "checkpoints": []} + application/json: {"id": "d50fbe4e-3e32-4613-8574-4d82f3fd6b3c", "auto_start": true, "hyperparameters": {"learning_rate": 0.0001}, "model": "Taurus", "status": "SUCCESS", "job_type": "", "created_at": 251316, "modified_at": 342605, "training_files": ["247ac10c-dc31-412f-a7cc-924123024afa", "0f84bd49-4511-4689-9d07-a64aa724280b", "200aa489-3801-4d6e-a454-eb14cac448cb"], "checkpoints": [{"metrics": {}, "step_number": 949854, "created_at": 1716963433}, {"metrics": {}, "step_number": 516599, "created_at": 1716963433}, {"metrics": {}, "step_number": 439590, "created_at": 1716963433}]} jobs_api_routes_fine_tuning_start_fine_tuning_job: speakeasy-default-jobs-api-routes-fine-tuning-start-fine-tuning-job: parameters: path: - job_id: "0eb0f807-fb9f-4e46-9c13-4e257df6e1ba" + job_id: "0bf0f9e6-c3e5-4d61-aac8-0e36dcac0dfc" + responses: + "200": + application/json: {"id": "b676fe58-2c47-483e-831e-c71dbed4c90a", "auto_start": false, "hyperparameters": {"learning_rate": 0.0001}, "model": "A4", "status": "CANCELLED", "job_type": "", "created_at": 874397, "modified_at": 483387, "training_files": [], "checkpoints": [{"metrics": {}, "step_number": 331375, "created_at": 1716963433}, {"metrics": {}, "step_number": 590686, "created_at": 1716963433}, {"metrics": {}, "step_number": 543177, "created_at": 1716963433}]} + jobs_api_routes_batch_get_batch_jobs: + speakeasy-default-jobs-api-routes-batch-get-batch-jobs: + parameters: + query: + page: 0 + page_size: 100 + created_by_me: false + responses: + "200": + application/json: {"total": 768578} + jobs_api_routes_batch_create_batch_job: + speakeasy-default-jobs-api-routes-batch-create-batch-job: + requestBody: + application/json: {"input_files": ["a621cf02-1cd9-4cf5-8403-315211a509a3"], "endpoint": "/v1/fim/completions", "model": "2", "timeout_hours": 24} + responses: + "200": + application/json: {"id": "", "input_files": ["8e774c2b-ecc3-4769-b177-5e024985613d", "0ee803d5-6a1d-4f94-836b-fd39494798bc"], "endpoint": "", "model": "Impala", "errors": [{"message": "", "count": 1}, {"message": "", "count": 1}, {"message": "", "count": 1}], "status": "RUNNING", "created_at": 770370, "total_requests": 350586, "completed_requests": 95214, "succeeded_requests": 930830, "failed_requests": 617761} + jobs_api_routes_batch_get_batch_job: + speakeasy-default-jobs-api-routes-batch-get-batch-job: + parameters: + path: + job_id: "b888f774-3e7c-4135-a18c-6b985523c4bc" + responses: + "200": + application/json: {"id": "", "input_files": ["50f76228-1da8-44bc-b661-c8a99c6b71b6", "cd62b8f7-112a-4af0-bab4-e43b4cca3716", "620807aa-1f8c-4f05-ad89-d58ee381f6b4"], "endpoint": "", "model": "Golf", "errors": [{"message": "", "count": 1}, {"message": "", "count": 1}], "status": "SUCCESS", "created_at": 790898, "total_requests": 55097, "completed_requests": 578320, "succeeded_requests": 856562, "failed_requests": 328633} + jobs_api_routes_batch_cancel_batch_job: + speakeasy-default-jobs-api-routes-batch-cancel-batch-job: + parameters: + path: + job_id: "0f713502-9233-41c6-9ebd-c570b7edb496" responses: "200": - application/json: {"id": "bc3810ce-43e6-4fde-85a4-cd01d1f9cf8f", "auto_start": true, "hyperparameters": {}, "model": "codestral-latest", "status": "RUNNING", "job_type": "", "created_at": 186591, "modified_at": 451468, "training_files": [], "checkpoints": []} + application/json: {"id": "", "input_files": ["50fbe4e3-e326-4135-8744-d82f3fd6b3c1", "eb45e247-ac10-4cdc-8311-2f7cc9241230", "4afaa0f8-4bd4-4945-9116-89d07a64aa72"], "endpoint": "", "model": "Alpine", "errors": [{"message": "", "count": 1}, {"message": "", "count": 1}], "status": "QUEUED", "created_at": 709109, "total_requests": 275794, "completed_requests": 158938, "succeeded_requests": 12381, "failed_requests": 11864} chat_completion_v1_chat_completions_post: speakeasy-default-chat-completion-v1-chat-completions-post: requestBody: - application/json: {"model": "mistral-small-latest", "messages": [{"content": "Who is the best French painter? Answer in one short sentence.", "role": "user"}]} + application/json: {"model": "mistral-small-latest", "stream": false, "messages": [{"content": "Who is the best French painter? Answer in one short sentence.", "role": "user"}]} responses: "200": - application/json: {"id": "cmpl-e5cc70bb28c444948073e77776eb30ef", "object": "chat.completion", "model": "mistral-small-latest", "usage": {"prompt_tokens": 16, "completion_tokens": 34, "total_tokens": 50}, "created": 1702256327, "choices": []} + application/json: {"id": "cmpl-e5cc70bb28c444948073e77776eb30ef", "object": "chat.completion", "model": "mistral-small-latest", "usage": {"prompt_tokens": 16, "completion_tokens": 34, "total_tokens": 50}, "created": 1702256327, "choices": [{"index": 0, "message": {"prefix": false, "role": "assistant"}, "finish_reason": "stop"}, {"index": 0, "message": {"prefix": false, "role": "assistant"}, "finish_reason": "stop"}, {"index": 0, "message": {"prefix": false, "role": "assistant"}, "finish_reason": "stop"}]} "422": application/json: {} stream_chat: speakeasy-default-stream-chat: requestBody: - application/json: {"model": "mistral-small-latest", "messages": [{"content": "Who is the best French painter? Answer in one short sentence.", "role": "user"}]} + application/json: {"model": "mistral-small-latest", "stream": true, "messages": [{"content": "Who is the best French painter? Answer in one short sentence.", "role": "user"}]} responses: "422": application/json: {} - "200": {} fim_completion_v1_fim_completions_post: speakeasy-default-fim-completion-v1-fim-completions-post: requestBody: - application/json: {"model": "codestral-2405", "prompt": "def", "suffix": "return a+b"} + application/json: {"model": "codestral-2405", "top_p": 1, "stream": false, "prompt": "def", "suffix": "return a+b"} responses: "200": - application/json: {"id": "cmpl-e5cc70bb28c444948073e77776eb30ef", "object": "chat.completion", "model": "codestral-latest", "usage": {"prompt_tokens": 16, "completion_tokens": 34, "total_tokens": 50}, "created": 1702256327, "choices": []} + application/json: {"id": "cmpl-e5cc70bb28c444948073e77776eb30ef", "object": "chat.completion", "model": "codestral-latest", "usage": {"prompt_tokens": 16, "completion_tokens": 34, "total_tokens": 50}, "created": 1702256327, "choices": [{"index": 0, "message": {"prefix": false, "role": "assistant"}, "finish_reason": "stop"}, {"index": 0, "message": {"prefix": false, "role": "assistant"}, "finish_reason": "stop"}, {"index": 0, "message": {"prefix": false, "role": "assistant"}, "finish_reason": "stop"}]} "422": application/json: {} stream_fim: speakeasy-default-stream-fim: requestBody: - application/json: {"model": "codestral-2405", "prompt": "def", "suffix": "return a+b"} + application/json: {"model": "codestral-2405", "top_p": 1, "stream": true, "prompt": "def", "suffix": "return a+b"} responses: "422": application/json: {} - "200": {} agents_completion_v1_agents_completions_post: speakeasy-default-agents-completion-v1-agents-completions-post: requestBody: - application/json: {"messages": [{"content": "Who is the best French painter? Answer in one short sentence.", "role": "user"}], "agent_id": ""} + application/json: {"stream": false, "messages": [{"content": "Who is the best French painter? Answer in one short sentence.", "role": "user"}], "agent_id": ""} responses: "200": - application/json: {"id": "cmpl-e5cc70bb28c444948073e77776eb30ef", "object": "chat.completion", "model": "mistral-small-latest", "usage": {"prompt_tokens": 16, "completion_tokens": 34, "total_tokens": 50}, "created": 1702256327, "choices": []} + application/json: {"id": "cmpl-e5cc70bb28c444948073e77776eb30ef", "object": "chat.completion", "model": "mistral-small-latest", "usage": {"prompt_tokens": 16, "completion_tokens": 34, "total_tokens": 50}, "created": 1702256327, "choices": [{"index": 0, "message": {"prefix": false, "role": "assistant"}, "finish_reason": "stop"}, {"index": 0, "message": {"prefix": false, "role": "assistant"}, "finish_reason": "stop"}, {"index": 0, "message": {"prefix": false, "role": "assistant"}, "finish_reason": "stop"}]} "422": application/json: {} stream_agents: speakeasy-default-stream-agents: requestBody: - application/json: {"messages": [{"content": "Who is the best French painter? Answer in one short sentence.", "role": "user"}], "agent_id": ""} + application/json: {"stream": true, "messages": [{"content": "Who is the best French painter? Answer in one short sentence.", "role": "user"}], "agent_id": ""} responses: "422": application/json: {} - "200": {} embeddings_v1_embeddings_post: speakeasy-default-embeddings-v1-embeddings-post: requestBody: - application/json: {"input": ["Embed this sentence.", "As well as this one."], "model": "Wrangler"} + application/json: {"input": ["Embed this sentence.", "As well as this one."], "model": "mistral-embed"} responses: "200": application/json: {"id": "cmpl-e5cc70bb28c444948073e77776eb30ef", "object": "chat.completion", "model": "mistral-small-latest", "usage": {"prompt_tokens": 16, "completion_tokens": 34, "total_tokens": 50}, "data": [{"object": "embedding", "embedding": [0.1, 0.2, 0.3], "index": 0}, {"object": "embedding", "embedding": [0.4, 0.5, 0.6], "index": 1}]} "422": application/json: {} - files_api_routes_download_file: - speakeasy-default-files-api-routes-download-file: - parameters: - path: - file_id: "" - responses: - "200": - application/octet-stream: "x-file: example.file" - jobs_api_routes_batch_get_batch_jobs: - speakeasy-default-jobs-api-routes-batch-get-batch-jobs: - responses: - "200": - application/json: {"total": 768578} - jobs_api_routes_batch_create_batch_job: - speakeasy-default-jobs-api-routes-batch-create-batch-job: - requestBody: - application/json: {"input_files": ["a621cf02-1cd9-4cf5-8403-315211a509a3"], "endpoint": "/v1/fim/completions", "model": "2"} - responses: - "200": - application/json: {"id": "", "input_files": ["8e774c2b-ecc3-4769-b177-5e024985613d", "0ee803d5-6a1d-4f94-836b-fd39494798bc"], "endpoint": "", "model": "Impala", "errors": [{"message": ""}, {"message": ""}, {"message": ""}], "status": "RUNNING", "created_at": 770370, "total_requests": 350586, "completed_requests": 95214, "succeeded_requests": 930830, "failed_requests": 617761} - jobs_api_routes_batch_get_batch_job: - speakeasy-default-jobs-api-routes-batch-get-batch-job: - parameters: - path: - job_id: "b888f774-3e7c-4135-a18c-6b985523c4bc" - responses: - "200": - application/json: {"id": "", "input_files": ["50f76228-1da8-44bc-b661-c8a99c6b71b6", "cd62b8f7-112a-4af0-bab4-e43b4cca3716", "620807aa-1f8c-4f05-ad89-d58ee381f6b4"], "endpoint": "", "model": "Golf", "errors": [{"message": ""}, {"message": ""}], "status": "SUCCESS", "created_at": 790898, "total_requests": 55097, "completed_requests": 578320, "succeeded_requests": 856562, "failed_requests": 328633} - jobs_api_routes_batch_cancel_batch_job: - speakeasy-default-jobs-api-routes-batch-cancel-batch-job: - parameters: - path: - job_id: "0f713502-9233-41c6-9ebd-c570b7edb496" - responses: - "200": - application/json: {"id": "", "input_files": ["50fbe4e3-e326-4135-8744-d82f3fd6b3c1", "eb45e247-ac10-4cdc-8311-2f7cc9241230", "4afaa0f8-4bd4-4945-9116-89d07a64aa72"], "endpoint": "", "model": "Alpine", "errors": [{"message": ""}, {"message": ""}], "status": "QUEUED", "created_at": 709109, "total_requests": 275794, "completed_requests": 158938, "succeeded_requests": 12381, "failed_requests": 11864} moderations_v1_moderations_post: speakeasy-default-moderations-v1-moderations-post: requestBody: @@ -595,19 +619,11 @@ examples: moderations_chat_v1_chat_moderations_post: speakeasy-default-moderations-chat-v1-chat-moderations-post: requestBody: - application/json: {"input": [[{"content": ""}, {"content": ""}, {"content": ""}], [{"content": ""}]], "model": "V90"} + application/json: {"input": [[{"content": "", "role": "tool"}, {"content": "", "role": "tool"}, {"content": "", "role": "tool"}], [{"prefix": false, "role": "assistant"}, {"content": "", "role": "user"}, {"prefix": false, "role": "assistant"}]], "model": "Roadster"} responses: "200": application/json: {"id": "mod-e5cc70bb28c444948073e77776eb30ef"} "422": application/json: {} - files_api_routes_get_signed_url: - speakeasy-default-files-api-routes-get-signed-url: - parameters: - path: - file_id: "" - query: {} - responses: - "200": - application/json: {"url": "https://scornful-daughter.com/"} +examplesVersion: 1.0.0 generatedTests: {} diff --git a/.speakeasy/gen.yaml b/.speakeasy/gen.yaml index 834cbc7f..30a10834 100644 --- a/.speakeasy/gen.yaml +++ b/.speakeasy/gen.yaml @@ -13,7 +13,7 @@ generation: oAuth2ClientCredentialsEnabled: true oAuth2PasswordEnabled: false python: - version: 1.3.1 + version: 1.4.0 additionalDependencies: dev: pytest: ^8.2.2 @@ -23,6 +23,7 @@ python: clientServerStatusCodesAsErrors: true defaultErrorName: SDKError description: Python Client SDK for the Mistral AI API. + enableCustomCodeRegions: false enumFormat: union envVarPrefix: MISTRAL fixFlags: diff --git a/.speakeasy/workflow.lock b/.speakeasy/workflow.lock index 063d062a..36a74525 100644 --- a/.speakeasy/workflow.lock +++ b/.speakeasy/workflow.lock @@ -1,4 +1,4 @@ -speakeasyVersion: 1.462.2 +speakeasyVersion: 1.469.11 sources: mistral-azure-source: sourceNamespace: mistral-azure-source @@ -14,11 +14,11 @@ sources: - latest mistral-openapi: sourceNamespace: mistral-openapi - sourceRevisionDigest: sha256:84bbc6f6011a31e21c8a674b01104446f986c7b5a6b002357800be8ef939b8da - sourceBlobDigest: sha256:ebc7c1bb20aa87873a255cebea1e451099d8949ea1bbff81ec5fd45a107e3a32 + sourceRevisionDigest: sha256:c414dd5eecca5f02fe9012a1d131f696e0257fe100c371609272dbc6c522ef07 + sourceBlobDigest: sha256:f48af039106d00de84345fd095fbf4831f18fbeeef07e9ff7bba70a0e07eda07 tags: - latest - - speakeasy-sdk-regen-1736937863 + - speakeasy-sdk-regen-1737393201 targets: mistralai-azure-sdk: source: mistral-azure-source @@ -37,13 +37,13 @@ targets: mistralai-sdk: source: mistral-openapi sourceNamespace: mistral-openapi - sourceRevisionDigest: sha256:84bbc6f6011a31e21c8a674b01104446f986c7b5a6b002357800be8ef939b8da - sourceBlobDigest: sha256:ebc7c1bb20aa87873a255cebea1e451099d8949ea1bbff81ec5fd45a107e3a32 + sourceRevisionDigest: sha256:c414dd5eecca5f02fe9012a1d131f696e0257fe100c371609272dbc6c522ef07 + sourceBlobDigest: sha256:f48af039106d00de84345fd095fbf4831f18fbeeef07e9ff7bba70a0e07eda07 codeSamplesNamespace: mistral-openapi-code-samples - codeSamplesRevisionDigest: sha256:7461afcdcac02dc78b61b234ee4c5e25abbaca9ad6cf5aab415e7c97b5638b49 + codeSamplesRevisionDigest: sha256:3f61d33c46733b24ecd422423900425b381529da038992e59bdb5a9b766bdf89 workflow: workflowVersion: 1.0.0 - speakeasyVersion: 1.462.2 + speakeasyVersion: latest sources: mistral-azure-source: inputs: diff --git a/.speakeasy/workflow.yaml b/.speakeasy/workflow.yaml index db0b11d1..3b3c6d55 100644 --- a/.speakeasy/workflow.yaml +++ b/.speakeasy/workflow.yaml @@ -1,7 +1,4 @@ workflowVersion: 1.0.0 -# speakeasyVersion is pinned to unblock https://github.com/mistralai/client-python/pull/173 -# The speakeasy run was appending `_` to some attributes to avoid conflicts with reserved keywords. -# This would have change the SDK APIs and break the existing clients which we don't want. speakeasyVersion: latest sources: mistral-azure-source: diff --git a/README.md b/README.md index 501e0ba6..dc492590 100644 --- a/README.md +++ b/README.md @@ -92,9 +92,7 @@ with Mistral( "content": "Who is the best French painter? Answer in one short sentence.", "role": "user", }, - ]) - - assert res is not None + ], stream=False) # Handle response print(res) @@ -119,9 +117,7 @@ async def main(): "content": "Who is the best French painter? Answer in one short sentence.", "role": "user", }, - ]) - - assert res is not None + ], stream=False) # Handle response print(res) @@ -147,8 +143,6 @@ with Mistral( "content": open("example.file", "rb"), }) - assert res is not None - # Handle response print(res) ``` @@ -172,8 +166,6 @@ async def main(): "content": open("example.file", "rb"), }) - assert res is not None - # Handle response print(res) @@ -198,9 +190,7 @@ with Mistral( "content": "Who is the best French painter? Answer in one short sentence.", "role": "user", }, - ], agent_id="") - - assert res is not None + ], agent_id="", stream=False) # Handle response print(res) @@ -225,9 +215,7 @@ async def main(): "content": "Who is the best French painter? Answer in one short sentence.", "role": "user", }, - ], agent_id="") - - assert res is not None + ], agent_id="", stream=False) # Handle response print(res) @@ -251,9 +239,7 @@ with Mistral( res = mistral.embeddings.create(inputs=[ "Embed this sentence.", "As well as this one.", - ], model="Wrangler") - - assert res is not None + ], model="mistral-embed") # Handle response print(res) @@ -276,9 +262,7 @@ async def main(): res = await mistral.embeddings.create_async(inputs=[ "Embed this sentence.", "As well as this one.", - ], model="Wrangler") - - assert res is not None + ], model="mistral-embed") # Handle response print(res) @@ -480,9 +464,7 @@ with Mistral( "content": "Who is the best French painter? Answer in one short sentence.", "role": "user", }, - ]) - - assert res is not None + ], stream=True) with res as event_stream: for event in event_stream: @@ -519,8 +501,6 @@ with Mistral( "content": open("example.file", "rb"), }) - assert res is not None - # Handle response print(res) @@ -545,8 +525,6 @@ with Mistral( res = mistral.models.list(, RetryConfig("backoff", BackoffStrategy(1, 50, 1.1, 100), False)) - assert res is not None - # Handle response print(res) @@ -565,8 +543,6 @@ with Mistral( res = mistral.models.list() - assert res is not None - # Handle response print(res) @@ -608,8 +584,6 @@ with Mistral( res = mistral.models.list() - assert res is not None - # Handle response print(res) @@ -646,8 +620,6 @@ with Mistral( res = mistral.models.list() - assert res is not None - # Handle response print(res) @@ -667,8 +639,6 @@ with Mistral( res = mistral.models.list() - assert res is not None - # Handle response print(res) @@ -778,8 +748,6 @@ with Mistral( res = mistral.models.list() - assert res is not None - # Handle response print(res) diff --git a/RELEASES.md b/RELEASES.md index 7c74ac75..f441230f 100644 --- a/RELEASES.md +++ b/RELEASES.md @@ -138,4 +138,14 @@ Based on: ### Generated - [python v1.3.1] . ### Releases -- [PyPI v1.3.1] https://pypi.org/project/mistralai/1.3.1 - . \ No newline at end of file +- [PyPI v1.3.1] https://pypi.org/project/mistralai/1.3.1 - . + +## 2025-01-21 11:09:53 +### Changes +Based on: +- OpenAPI Doc +- Speakeasy CLI 1.469.11 (2.493.32) https://github.com/speakeasy-api/speakeasy +### Generated +- [python v1.4.0] . +### Releases +- [PyPI v1.4.0] https://pypi.org/project/mistralai/1.4.0 - . \ No newline at end of file diff --git a/USAGE.md b/USAGE.md index 88762965..3e1cae03 100644 --- a/USAGE.md +++ b/USAGE.md @@ -17,9 +17,7 @@ with Mistral( "content": "Who is the best French painter? Answer in one short sentence.", "role": "user", }, - ]) - - assert res is not None + ], stream=False) # Handle response print(res) @@ -44,9 +42,7 @@ async def main(): "content": "Who is the best French painter? Answer in one short sentence.", "role": "user", }, - ]) - - assert res is not None + ], stream=False) # Handle response print(res) @@ -72,8 +68,6 @@ with Mistral( "content": open("example.file", "rb"), }) - assert res is not None - # Handle response print(res) ``` @@ -97,8 +91,6 @@ async def main(): "content": open("example.file", "rb"), }) - assert res is not None - # Handle response print(res) @@ -123,9 +115,7 @@ with Mistral( "content": "Who is the best French painter? Answer in one short sentence.", "role": "user", }, - ], agent_id="") - - assert res is not None + ], agent_id="", stream=False) # Handle response print(res) @@ -150,9 +140,7 @@ async def main(): "content": "Who is the best French painter? Answer in one short sentence.", "role": "user", }, - ], agent_id="") - - assert res is not None + ], agent_id="", stream=False) # Handle response print(res) @@ -176,9 +164,7 @@ with Mistral( res = mistral.embeddings.create(inputs=[ "Embed this sentence.", "As well as this one.", - ], model="Wrangler") - - assert res is not None + ], model="mistral-embed") # Handle response print(res) @@ -201,9 +187,7 @@ async def main(): res = await mistral.embeddings.create_async(inputs=[ "Embed this sentence.", "As well as this one.", - ], model="Wrangler") - - assert res is not None + ], model="mistral-embed") # Handle response print(res) diff --git a/docs/models/agentscompletionrequest.md b/docs/models/agentscompletionrequest.md index c4259f2b..8976849d 100644 --- a/docs/models/agentscompletionrequest.md +++ b/docs/models/agentscompletionrequest.md @@ -16,4 +16,5 @@ | `tool_choice` | [Optional[models.AgentsCompletionRequestToolChoice]](../models/agentscompletionrequesttoolchoice.md) | :heavy_minus_sign: | N/A | | | `presence_penalty` | *Optional[float]* | :heavy_minus_sign: | presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. | | | `frequency_penalty` | *Optional[float]* | :heavy_minus_sign: | frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. | | -| `n` | *OptionalNullable[int]* | :heavy_minus_sign: | Number of completions to return for each request, input tokens are only billed once. | | \ No newline at end of file +| `n` | *OptionalNullable[int]* | :heavy_minus_sign: | Number of completions to return for each request, input tokens are only billed once. | | +| `prediction` | [Optional[models.Prediction]](../models/prediction.md) | :heavy_minus_sign: | N/A | | \ No newline at end of file diff --git a/docs/models/agentscompletionstreamrequest.md b/docs/models/agentscompletionstreamrequest.md index 21e19b56..3a6c8a12 100644 --- a/docs/models/agentscompletionstreamrequest.md +++ b/docs/models/agentscompletionstreamrequest.md @@ -16,4 +16,5 @@ | `tool_choice` | [Optional[models.AgentsCompletionStreamRequestToolChoice]](../models/agentscompletionstreamrequesttoolchoice.md) | :heavy_minus_sign: | N/A | | | `presence_penalty` | *Optional[float]* | :heavy_minus_sign: | presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. | | | `frequency_penalty` | *Optional[float]* | :heavy_minus_sign: | frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. | | -| `n` | *OptionalNullable[int]* | :heavy_minus_sign: | Number of completions to return for each request, input tokens are only billed once. | | \ No newline at end of file +| `n` | *OptionalNullable[int]* | :heavy_minus_sign: | Number of completions to return for each request, input tokens are only billed once. | | +| `prediction` | [Optional[models.Prediction]](../models/prediction.md) | :heavy_minus_sign: | N/A | | \ No newline at end of file diff --git a/docs/models/chatcompletionrequest.md b/docs/models/chatcompletionrequest.md index d458081d..ac743583 100644 --- a/docs/models/chatcompletionrequest.md +++ b/docs/models/chatcompletionrequest.md @@ -19,4 +19,5 @@ | `presence_penalty` | *Optional[float]* | :heavy_minus_sign: | presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. | | | `frequency_penalty` | *Optional[float]* | :heavy_minus_sign: | frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. | | | `n` | *OptionalNullable[int]* | :heavy_minus_sign: | Number of completions to return for each request, input tokens are only billed once. | | +| `prediction` | [Optional[models.Prediction]](../models/prediction.md) | :heavy_minus_sign: | N/A | | | `safe_prompt` | *Optional[bool]* | :heavy_minus_sign: | Whether to inject a safety prompt before all conversations. | | \ No newline at end of file diff --git a/docs/models/chatcompletionstreamrequest.md b/docs/models/chatcompletionstreamrequest.md index 63865c11..8ca0f21f 100644 --- a/docs/models/chatcompletionstreamrequest.md +++ b/docs/models/chatcompletionstreamrequest.md @@ -19,4 +19,5 @@ | `presence_penalty` | *Optional[float]* | :heavy_minus_sign: | presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. | | | `frequency_penalty` | *Optional[float]* | :heavy_minus_sign: | frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. | | | `n` | *OptionalNullable[int]* | :heavy_minus_sign: | Number of completions to return for each request, input tokens are only billed once. | | +| `prediction` | [Optional[models.Prediction]](../models/prediction.md) | :heavy_minus_sign: | N/A | | | `safe_prompt` | *Optional[bool]* | :heavy_minus_sign: | Whether to inject a safety prompt before all conversations. | | \ No newline at end of file diff --git a/docs/models/fileschema.md b/docs/models/fileschema.md index 47fa4869..4fbcd718 100644 --- a/docs/models/fileschema.md +++ b/docs/models/fileschema.md @@ -7,7 +7,7 @@ | ---------------------------------------------- | ---------------------------------------------- | ---------------------------------------------- | ---------------------------------------------- | ---------------------------------------------- | | `id` | *str* | :heavy_check_mark: | The unique identifier of the file. | 497f6eca-6276-4993-bfeb-53cbbbba6f09 | | `object` | *str* | :heavy_check_mark: | The object type, which is always "file". | file | -| `bytes` | *int* | :heavy_check_mark: | The size of the file, in bytes. | 13000 | +| `size_bytes` | *int* | :heavy_check_mark: | The size of the file, in bytes. | 13000 | | `created_at` | *int* | :heavy_check_mark: | The UNIX timestamp (in seconds) of the event. | 1716963433 | | `filename` | *str* | :heavy_check_mark: | The name of the uploaded file. | files_upload.jsonl | | `purpose` | [models.FilePurpose](../models/filepurpose.md) | :heavy_check_mark: | N/A | | diff --git a/docs/models/function.md b/docs/models/function.md index 8af398f5..a166b7bb 100644 --- a/docs/models/function.md +++ b/docs/models/function.md @@ -7,4 +7,5 @@ | ------------------ | ------------------ | ------------------ | ------------------ | | `name` | *str* | :heavy_check_mark: | N/A | | `parameters` | Dict[str, *Any*] | :heavy_check_mark: | N/A | -| `description` | *Optional[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file +| `description` | *Optional[str]* | :heavy_minus_sign: | N/A | +| `strict` | *Optional[bool]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/prediction.md b/docs/models/prediction.md new file mode 100644 index 00000000..578cdcee --- /dev/null +++ b/docs/models/prediction.md @@ -0,0 +1,9 @@ +# Prediction + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------------------------- | -------------------------------------------------------------- | -------------------------------------------------------------- | -------------------------------------------------------------- | +| `type` | [Optional[models.PredictionType]](../models/predictiontype.md) | :heavy_minus_sign: | N/A | +| `content` | *Optional[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/predictiontype.md b/docs/models/predictiontype.md new file mode 100644 index 00000000..67ff99e2 --- /dev/null +++ b/docs/models/predictiontype.md @@ -0,0 +1,8 @@ +# PredictionType + + +## Values + +| Name | Value | +| --------- | --------- | +| `CONTENT` | content | \ No newline at end of file diff --git a/docs/models/retrievefileout.md b/docs/models/retrievefileout.md index 93aa5026..30861f5c 100644 --- a/docs/models/retrievefileout.md +++ b/docs/models/retrievefileout.md @@ -7,7 +7,7 @@ | ---------------------------------------------- | ---------------------------------------------- | ---------------------------------------------- | ---------------------------------------------- | ---------------------------------------------- | | `id` | *str* | :heavy_check_mark: | The unique identifier of the file. | 497f6eca-6276-4993-bfeb-53cbbbba6f09 | | `object` | *str* | :heavy_check_mark: | The object type, which is always "file". | file | -| `bytes` | *int* | :heavy_check_mark: | The size of the file, in bytes. | 13000 | +| `size_bytes` | *int* | :heavy_check_mark: | The size of the file, in bytes. | 13000 | | `created_at` | *int* | :heavy_check_mark: | The UNIX timestamp (in seconds) of the event. | 1716963433 | | `filename` | *str* | :heavy_check_mark: | The name of the uploaded file. | files_upload.jsonl | | `purpose` | [models.FilePurpose](../models/filepurpose.md) | :heavy_check_mark: | N/A | | diff --git a/docs/models/toolcall.md b/docs/models/toolcall.md index 7aca5fc9..574be1ea 100644 --- a/docs/models/toolcall.md +++ b/docs/models/toolcall.md @@ -7,4 +7,5 @@ | ---------------------------------------------------- | ---------------------------------------------------- | ---------------------------------------------------- | ---------------------------------------------------- | | `function` | [models.FunctionCall](../models/functioncall.md) | :heavy_check_mark: | N/A | | `id` | *Optional[str]* | :heavy_minus_sign: | N/A | -| `type` | [Optional[models.ToolTypes]](../models/tooltypes.md) | :heavy_minus_sign: | N/A | \ No newline at end of file +| `type` | [Optional[models.ToolTypes]](../models/tooltypes.md) | :heavy_minus_sign: | N/A | +| `index` | *Optional[int]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/uploadfileout.md b/docs/models/uploadfileout.md index c9974436..cf3c5994 100644 --- a/docs/models/uploadfileout.md +++ b/docs/models/uploadfileout.md @@ -7,7 +7,7 @@ | ---------------------------------------------- | ---------------------------------------------- | ---------------------------------------------- | ---------------------------------------------- | ---------------------------------------------- | | `id` | *str* | :heavy_check_mark: | The unique identifier of the file. | 497f6eca-6276-4993-bfeb-53cbbbba6f09 | | `object` | *str* | :heavy_check_mark: | The object type, which is always "file". | file | -| `bytes` | *int* | :heavy_check_mark: | The size of the file, in bytes. | 13000 | +| `size_bytes` | *int* | :heavy_check_mark: | The size of the file, in bytes. | 13000 | | `created_at` | *int* | :heavy_check_mark: | The UNIX timestamp (in seconds) of the event. | 1716963433 | | `filename` | *str* | :heavy_check_mark: | The name of the uploaded file. | files_upload.jsonl | | `purpose` | [models.FilePurpose](../models/filepurpose.md) | :heavy_check_mark: | N/A | | diff --git a/docs/sdks/agents/README.md b/docs/sdks/agents/README.md index 3675fad9..00ca33ac 100644 --- a/docs/sdks/agents/README.md +++ b/docs/sdks/agents/README.md @@ -29,9 +29,7 @@ with Mistral( "content": "Who is the best French painter? Answer in one short sentence.", "role": "user", }, - ], agent_id="") - - assert res is not None + ], agent_id="", stream=False) # Handle response print(res) @@ -54,6 +52,7 @@ with Mistral( | `presence_penalty` | *Optional[float]* | :heavy_minus_sign: | presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. | | | `frequency_penalty` | *Optional[float]* | :heavy_minus_sign: | frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. | | | `n` | *OptionalNullable[int]* | :heavy_minus_sign: | Number of completions to return for each request, input tokens are only billed once. | | +| `prediction` | [Optional[models.Prediction]](../../models/prediction.md) | :heavy_minus_sign: | N/A | | | `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | | ### Response @@ -86,9 +85,7 @@ with Mistral( "content": "Who is the best French painter? Answer in one short sentence.", "role": "user", }, - ], agent_id="") - - assert res is not None + ], agent_id="", stream=True) with res as event_stream: for event in event_stream: @@ -113,6 +110,7 @@ with Mistral( | `presence_penalty` | *Optional[float]* | :heavy_minus_sign: | presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. | | | `frequency_penalty` | *Optional[float]* | :heavy_minus_sign: | frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. | | | `n` | *OptionalNullable[int]* | :heavy_minus_sign: | Number of completions to return for each request, input tokens are only billed once. | | +| `prediction` | [Optional[models.Prediction]](../../models/prediction.md) | :heavy_minus_sign: | N/A | | | `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | | ### Response diff --git a/docs/sdks/chat/README.md b/docs/sdks/chat/README.md index 8ab0eb6b..dbdfba27 100644 --- a/docs/sdks/chat/README.md +++ b/docs/sdks/chat/README.md @@ -29,9 +29,7 @@ with Mistral( "content": "Who is the best French painter? Answer in one short sentence.", "role": "user", }, - ]) - - assert res is not None + ], stream=False) # Handle response print(res) @@ -56,6 +54,7 @@ with Mistral( | `presence_penalty` | *Optional[float]* | :heavy_minus_sign: | presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. | | | `frequency_penalty` | *Optional[float]* | :heavy_minus_sign: | frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. | | | `n` | *OptionalNullable[int]* | :heavy_minus_sign: | Number of completions to return for each request, input tokens are only billed once. | | +| `prediction` | [Optional[models.Prediction]](../../models/prediction.md) | :heavy_minus_sign: | N/A | | | `safe_prompt` | *Optional[bool]* | :heavy_minus_sign: | Whether to inject a safety prompt before all conversations. | | | `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | | @@ -89,9 +88,7 @@ with Mistral( "content": "Who is the best French painter? Answer in one short sentence.", "role": "user", }, - ]) - - assert res is not None + ], stream=True) with res as event_stream: for event in event_stream: @@ -118,6 +115,7 @@ with Mistral( | `presence_penalty` | *Optional[float]* | :heavy_minus_sign: | presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. | | | `frequency_penalty` | *Optional[float]* | :heavy_minus_sign: | frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. | | | `n` | *OptionalNullable[int]* | :heavy_minus_sign: | Number of completions to return for each request, input tokens are only billed once. | | +| `prediction` | [Optional[models.Prediction]](../../models/prediction.md) | :heavy_minus_sign: | N/A | | | `safe_prompt` | *Optional[bool]* | :heavy_minus_sign: | Whether to inject a safety prompt before all conversations. | | | `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | | diff --git a/docs/sdks/classifiers/README.md b/docs/sdks/classifiers/README.md index 37ee3e10..185711a7 100644 --- a/docs/sdks/classifiers/README.md +++ b/docs/sdks/classifiers/README.md @@ -28,8 +28,6 @@ with Mistral( "", ]) - assert res is not None - # Handle response print(res) @@ -71,16 +69,33 @@ with Mistral( res = mistral.classifiers.moderate_chat(inputs=[ [ { - "content": [ - { - "text": "", - }, - ], + "content": "", + "role": "tool", + }, + { + "content": "", + "role": "tool", + }, + { + "content": "", + "role": "tool", }, ], - ], model="V90") - - assert res is not None + [ + { + "prefix": False, + "role": "assistant", + }, + { + "content": "", + "role": "user", + }, + { + "prefix": False, + "role": "assistant", + }, + ], + ], model="Roadster") # Handle response print(res) diff --git a/docs/sdks/embeddings/README.md b/docs/sdks/embeddings/README.md index b3610f78..899c626f 100644 --- a/docs/sdks/embeddings/README.md +++ b/docs/sdks/embeddings/README.md @@ -26,9 +26,7 @@ with Mistral( res = mistral.embeddings.create(inputs=[ "Embed this sentence.", "As well as this one.", - ], model="Wrangler") - - assert res is not None + ], model="mistral-embed") # Handle response print(res) diff --git a/docs/sdks/files/README.md b/docs/sdks/files/README.md index 63a0023c..8f01a668 100644 --- a/docs/sdks/files/README.md +++ b/docs/sdks/files/README.md @@ -37,8 +37,6 @@ with Mistral( "content": open("example.file", "rb"), }) - assert res is not None - # Handle response print(res) @@ -76,9 +74,7 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.files.list() - - assert res is not None + res = mistral.files.list(page=0, page_size=100) # Handle response print(res) @@ -121,9 +117,7 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.files.retrieve(file_id="") - - assert res is not None + res = mistral.files.retrieve(file_id="") # Handle response print(res) @@ -161,9 +155,7 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.files.delete(file_id="") - - assert res is not None + res = mistral.files.delete(file_id="") # Handle response print(res) @@ -203,8 +195,6 @@ with Mistral( res = mistral.files.download(file_id="") - assert res is not None - # Handle response print(res) @@ -241,9 +231,7 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.files.get_signed_url(file_id="") - - assert res is not None + res = mistral.files.get_signed_url(file_id="", expiry=24) # Handle response print(res) diff --git a/docs/sdks/fim/README.md b/docs/sdks/fim/README.md index 06099974..0339c213 100644 --- a/docs/sdks/fim/README.md +++ b/docs/sdks/fim/README.md @@ -24,9 +24,7 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.fim.complete(model="codestral-2405", prompt="def", suffix="return a+b") - - assert res is not None + res = mistral.fim.complete(model="codestral-2405", prompt="def", top_p=1, stream=False, suffix="return a+b") # Handle response print(res) @@ -74,9 +72,7 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.fim.stream(model="codestral-2405", prompt="def", suffix="return a+b") - - assert res is not None + res = mistral.fim.stream(model="codestral-2405", prompt="def", top_p=1, stream=True, suffix="return a+b") with res as event_stream: for event in event_stream: diff --git a/docs/sdks/jobs/README.md b/docs/sdks/jobs/README.md index 92406630..06605877 100644 --- a/docs/sdks/jobs/README.md +++ b/docs/sdks/jobs/README.md @@ -25,9 +25,7 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.fine_tuning.jobs.list() - - assert res is not None + res = mistral.fine_tuning.jobs.list(page=0, page_size=100, created_by_me=False) # Handle response print(res) @@ -73,9 +71,9 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.fine_tuning.jobs.create(model="codestral-latest", hyperparameters={}) - - assert res is not None + res = mistral.fine_tuning.jobs.create(model="Fiesta", hyperparameters={ + "learning_rate": 0.0001, + }) # Handle response print(res) @@ -120,9 +118,7 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.fine_tuning.jobs.get(job_id="b18d8d81-fd7b-4764-a31e-475cb1f36591") - - assert res is not None + res = mistral.fine_tuning.jobs.get(job_id="b888f774-3e7c-4135-a18c-6b985523c4bc") # Handle response print(res) @@ -160,9 +156,7 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.fine_tuning.jobs.cancel(job_id="03fa7112-315a-4072-a9f2-43f3f1ec962e") - - assert res is not None + res = mistral.fine_tuning.jobs.cancel(job_id="0f713502-9233-41c6-9ebd-c570b7edb496") # Handle response print(res) @@ -200,9 +194,7 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.fine_tuning.jobs.start(job_id="0eb0f807-fb9f-4e46-9c13-4e257df6e1ba") - - assert res is not None + res = mistral.fine_tuning.jobs.start(job_id="0bf0f9e6-c3e5-4d61-aac8-0e36dcac0dfc") # Handle response print(res) diff --git a/docs/sdks/mistraljobs/README.md b/docs/sdks/mistraljobs/README.md index 118cfccd..56a7f60b 100644 --- a/docs/sdks/mistraljobs/README.md +++ b/docs/sdks/mistraljobs/README.md @@ -24,9 +24,7 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.batch.jobs.list() - - assert res is not None + res = mistral.batch.jobs.list(page=0, page_size=100, created_by_me=False) # Handle response print(res) @@ -72,9 +70,7 @@ with Mistral( res = mistral.batch.jobs.create(input_files=[ "a621cf02-1cd9-4cf5-8403-315211a509a3", - ], endpoint="/v1/fim/completions", model="2") - - assert res is not None + ], endpoint="/v1/fim/completions", model="2", timeout_hours=24) # Handle response print(res) @@ -118,8 +114,6 @@ with Mistral( res = mistral.batch.jobs.get(job_id="b888f774-3e7c-4135-a18c-6b985523c4bc") - assert res is not None - # Handle response print(res) @@ -158,8 +152,6 @@ with Mistral( res = mistral.batch.jobs.cancel(job_id="0f713502-9233-41c6-9ebd-c570b7edb496") - assert res is not None - # Handle response print(res) diff --git a/docs/sdks/models/README.md b/docs/sdks/models/README.md index 080b355f..e048d20e 100644 --- a/docs/sdks/models/README.md +++ b/docs/sdks/models/README.md @@ -30,8 +30,6 @@ with Mistral( res = mistral.models.list() - assert res is not None - # Handle response print(res) @@ -70,8 +68,6 @@ with Mistral( res = mistral.models.retrieve(model_id="ft:open-mistral-7b:587a6b29:20240514:7e773925") - assert res is not None - # Handle response print(res) @@ -111,8 +107,6 @@ with Mistral( res = mistral.models.delete(model_id="ft:open-mistral-7b:587a6b29:20240514:7e773925") - assert res is not None - # Handle response print(res) @@ -152,8 +146,6 @@ with Mistral( res = mistral.models.update(model_id="ft:open-mistral-7b:587a6b29:20240514:7e773925") - assert res is not None - # Handle response print(res) @@ -194,8 +186,6 @@ with Mistral( res = mistral.models.archive(model_id="ft:open-mistral-7b:587a6b29:20240514:7e773925") - assert res is not None - # Handle response print(res) @@ -234,8 +224,6 @@ with Mistral( res = mistral.models.unarchive(model_id="ft:open-mistral-7b:587a6b29:20240514:7e773925") - assert res is not None - # Handle response print(res) diff --git a/examples/chat_prediction.py b/examples/chat_prediction.py new file mode 100644 index 00000000..1ff87e3f --- /dev/null +++ b/examples/chat_prediction.py @@ -0,0 +1,38 @@ +#!/usr/bin/env python + +import os + +from mistralai import Mistral +from mistralai.models import UserMessage + + +def main(): + + api_key = os.environ["MISTRAL_API_KEY"] + client = Mistral(api_key=api_key) + + code = """class Cheese(BaseModel): + name: str + type: str + country_of_origin: str + +my_cheese = Cheese(name="Brie", type="Soft", country_of_origin="France") +""" + refactor_prompt = 'Add a "price" property of type float to the Cheese class. Respond only with code, no explanation, no formatting.' + + chat_response = client.chat.complete( + model="codestral-latest", + messages=[ + UserMessage(content=refactor_prompt), + UserMessage(content=code) + ], + prediction= { + "type": "content", + "content": refactor_prompt, + } + ) + print(chat_response.choices[0].message.content) + + +if __name__ == "__main__": + main() diff --git a/poetry.lock b/poetry.lock index e93eb66b..154485d2 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1,4 +1,4 @@ -# This file is automatically @generated by Poetry 1.8.5 and should not be changed by hand. +# This file is automatically @generated by Poetry 2.0.1 and should not be changed by hand. [[package]] name = "annotated-types" @@ -6,6 +6,7 @@ version = "0.7.0" description = "Reusable constraint types to use with typing.Annotated" optional = false python-versions = ">=3.8" +groups = ["main"] files = [ {file = "annotated_types-0.7.0-py3-none-any.whl", hash = "sha256:1f02e8b43a8fbbc3f3e0d4f0f4bfc8131bcb4eebe8849b8e5c773f3a1c582a53"}, {file = "annotated_types-0.7.0.tar.gz", hash = "sha256:aff07c09a53a08bc8cfccb9c85b05f1aa9a2a6f23728d790723543408344ce89"}, @@ -20,6 +21,7 @@ version = "4.5.2" description = "High level compatibility layer for multiple asynchronous event loop implementations" optional = false python-versions = ">=3.8" +groups = ["main"] files = [ {file = "anyio-4.5.2-py3-none-any.whl", hash = "sha256:c011ee36bc1e8ba40e5a81cb9df91925c218fe9b778554e0b56a21e1b5d4716f"}, {file = "anyio-4.5.2.tar.gz", hash = "sha256:23009af4ed04ce05991845451e11ef02fc7c5ed29179ac9a420e5ad0ac7ddc5b"}, @@ -42,6 +44,7 @@ version = "3.2.4" description = "An abstract syntax tree for Python with inference support." optional = false python-versions = ">=3.8.0" +groups = ["dev"] files = [ {file = "astroid-3.2.4-py3-none-any.whl", hash = "sha256:413658a61eeca6202a59231abb473f932038fbcbf1666587f66d482083413a25"}, {file = "astroid-3.2.4.tar.gz", hash = "sha256:0e14202810b30da1b735827f78f5157be2bbd4a7a59b7707ca0bfc2fb4c0063a"}, @@ -56,6 +59,8 @@ version = "5.5.0" description = "Extensible memoizing collections and decorators" optional = true python-versions = ">=3.7" +groups = ["main"] +markers = "extra == \"gcp\"" files = [ {file = "cachetools-5.5.0-py3-none-any.whl", hash = "sha256:02134e8439cdc2ffb62023ce1debca2944c3f289d66bb17ead3ab3dede74b292"}, {file = "cachetools-5.5.0.tar.gz", hash = "sha256:2cc24fb4cbe39633fb7badd9db9ca6295d766d9c2995f245725a46715d050f2a"}, @@ -67,6 +72,7 @@ version = "2024.8.30" description = "Python package for providing Mozilla's CA Bundle." optional = false python-versions = ">=3.6" +groups = ["main"] files = [ {file = "certifi-2024.8.30-py3-none-any.whl", hash = "sha256:922820b53db7a7257ffbda3f597266d435245903d80737e34f8a45ff3e3230d8"}, {file = "certifi-2024.8.30.tar.gz", hash = "sha256:bec941d2aa8195e248a60b31ff9f0558284cf01a52591ceda73ea9afffd69fd9"}, @@ -78,6 +84,8 @@ version = "3.4.0" description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet." optional = true python-versions = ">=3.7.0" +groups = ["main"] +markers = "extra == \"gcp\"" files = [ {file = "charset_normalizer-3.4.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:4f9fc98dad6c2eaa32fc3af1417d95b5e3d08aff968df0cd320066def971f9a6"}, {file = "charset_normalizer-3.4.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0de7b687289d3c1b3e8660d0741874abe7888100efe14bd0f9fd7141bcbda92b"}, @@ -192,6 +200,8 @@ version = "0.4.6" description = "Cross-platform colored terminal text." optional = false python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7" +groups = ["dev"] +markers = "sys_platform == \"win32\"" files = [ {file = "colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6"}, {file = "colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44"}, @@ -203,6 +213,7 @@ version = "0.3.9" description = "serialize all of Python" optional = false python-versions = ">=3.8" +groups = ["dev"] files = [ {file = "dill-0.3.9-py3-none-any.whl", hash = "sha256:468dff3b89520b474c0397703366b7b95eebe6303f108adf9b19da1f702be87a"}, {file = "dill-0.3.9.tar.gz", hash = "sha256:81aa267dddf68cbfe8029c42ca9ec6a4ab3b22371d1c450abc54422577b4512c"}, @@ -218,6 +229,7 @@ version = "0.2.0" description = "Like `typing._eval_type`, but lets older Python versions use newer typing features." optional = false python-versions = ">=3.8" +groups = ["main"] files = [ {file = "eval_type_backport-0.2.0-py3-none-any.whl", hash = "sha256:ac2f73d30d40c5a30a80b8739a789d6bb5e49fdffa66d7912667e2015d9c9933"}, {file = "eval_type_backport-0.2.0.tar.gz", hash = "sha256:68796cfbc7371ebf923f03bdf7bef415f3ec098aeced24e054b253a0e78f7b37"}, @@ -232,6 +244,8 @@ version = "1.2.2" description = "Backport of PEP 654 (exception groups)" optional = false python-versions = ">=3.7" +groups = ["main", "dev"] +markers = "python_version < \"3.11\"" files = [ {file = "exceptiongroup-1.2.2-py3-none-any.whl", hash = "sha256:3111b9d131c238bec2f8f516e123e14ba243563fb135d3fe885990585aa7795b"}, {file = "exceptiongroup-1.2.2.tar.gz", hash = "sha256:47c2edf7c6738fafb49fd34290706d1a1a2f4d1c6df275526b62cbb4aa5393cc"}, @@ -246,6 +260,8 @@ version = "2.27.0" description = "Google Authentication Library" optional = true python-versions = ">=3.7" +groups = ["main"] +markers = "extra == \"gcp\"" files = [ {file = "google-auth-2.27.0.tar.gz", hash = "sha256:e863a56ccc2d8efa83df7a80272601e43487fa9a728a376205c86c26aaefa821"}, {file = "google_auth-2.27.0-py2.py3-none-any.whl", hash = "sha256:8e4bad367015430ff253fe49d500fdc3396c1a434db5740828c728e45bcce245"}, @@ -269,6 +285,7 @@ version = "0.14.0" description = "A pure-Python, bring-your-own-I/O implementation of HTTP/1.1" optional = false python-versions = ">=3.7" +groups = ["main"] files = [ {file = "h11-0.14.0-py3-none-any.whl", hash = "sha256:e3fe4ac4b851c468cc8363d500db52c2ead036020723024a109d37346efaa761"}, {file = "h11-0.14.0.tar.gz", hash = "sha256:8f19fbbe99e72420ff35c00b27a34cb9937e902a8b810e2c88300c6f0a3b699d"}, @@ -280,6 +297,7 @@ version = "1.0.7" description = "A minimal low-level HTTP client." optional = false python-versions = ">=3.8" +groups = ["main"] files = [ {file = "httpcore-1.0.7-py3-none-any.whl", hash = "sha256:a3fff8f43dc260d5bd363d9f9cf1830fa3a458b332856f34282de498ed420edd"}, {file = "httpcore-1.0.7.tar.gz", hash = "sha256:8551cb62a169ec7162ac7be8d4817d561f60e08eaa485234898414bb5a8a0b4c"}, @@ -301,6 +319,7 @@ version = "0.27.2" description = "The next generation HTTP client." optional = false python-versions = ">=3.8" +groups = ["main"] files = [ {file = "httpx-0.27.2-py3-none-any.whl", hash = "sha256:7bb2708e112d8fdd7829cd4243970f0c223274051cb35ee80c03301ee29a3df0"}, {file = "httpx-0.27.2.tar.gz", hash = "sha256:f7c2be1d2f3c3c3160d441802406b206c2b76f5947b11115e6df10c6c65e66c2"}, @@ -326,6 +345,7 @@ version = "3.10" description = "Internationalized Domain Names in Applications (IDNA)" optional = false python-versions = ">=3.6" +groups = ["main"] files = [ {file = "idna-3.10-py3-none-any.whl", hash = "sha256:946d195a0d259cbba61165e88e65941f16e9b36ea6ddb97f00452bae8b1287d3"}, {file = "idna-3.10.tar.gz", hash = "sha256:12f65c9b470abda6dc35cf8e63cc574b1c52b11df2c86030af0ac09b01b13ea9"}, @@ -340,6 +360,7 @@ version = "2.0.0" description = "brain-dead simple config-ini parsing" optional = false python-versions = ">=3.7" +groups = ["dev"] files = [ {file = "iniconfig-2.0.0-py3-none-any.whl", hash = "sha256:b6a85871a79d2e3b22d2d1b94ac2824226a63c6b741c88f7ae975f18b6778374"}, {file = "iniconfig-2.0.0.tar.gz", hash = "sha256:2d91e135bf72d31a410b17c16da610a82cb55f6b0477d1a902134b24a455b8b3"}, @@ -351,6 +372,7 @@ version = "5.13.2" description = "A Python utility / library to sort Python imports." optional = false python-versions = ">=3.8.0" +groups = ["dev"] files = [ {file = "isort-5.13.2-py3-none-any.whl", hash = "sha256:8ca5e72a8d85860d5a3fa69b8745237f2939afe12dbf656afbcb47fe72d947a6"}, {file = "isort-5.13.2.tar.gz", hash = "sha256:48fdfcb9face5d58a4f6dde2e72a1fb8dcaf8ab26f95ab49fab84c2ddefb0109"}, @@ -365,6 +387,7 @@ version = "1.0.6" description = "A more powerful JSONPath implementation in modern python" optional = false python-versions = ">=3.6" +groups = ["main"] files = [ {file = "jsonpath-python-1.0.6.tar.gz", hash = "sha256:dd5be4a72d8a2995c3f583cf82bf3cd1a9544cfdabf2d22595b67aff07349666"}, {file = "jsonpath_python-1.0.6-py3-none-any.whl", hash = "sha256:1e3b78df579f5efc23565293612decee04214609208a2335884b3ee3f786b575"}, @@ -376,6 +399,7 @@ version = "0.7.0" description = "McCabe checker, plugin for flake8" optional = false python-versions = ">=3.6" +groups = ["dev"] files = [ {file = "mccabe-0.7.0-py2.py3-none-any.whl", hash = "sha256:6c2d30ab6be0e4a46919781807b4f0d834ebdd6c6e3dca0bda5a15f863427b6e"}, {file = "mccabe-0.7.0.tar.gz", hash = "sha256:348e0240c33b60bbdf4e523192ef919f28cb2c3d7d5c7794f74009290f236325"}, @@ -387,6 +411,7 @@ version = "1.13.0" description = "Optional static typing for Python" optional = false python-versions = ">=3.8" +groups = ["dev"] files = [ {file = "mypy-1.13.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:6607e0f1dd1fb7f0aca14d936d13fd19eba5e17e1cd2a14f808fa5f8f6d8f60a"}, {file = "mypy-1.13.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:8a21be69bd26fa81b1f80a61ee7ab05b076c674d9b18fb56239d72e21d9f4c80"}, @@ -440,6 +465,7 @@ version = "1.0.0" description = "Type system extensions for programs checked with the mypy type checker." optional = false python-versions = ">=3.5" +groups = ["main", "dev"] files = [ {file = "mypy_extensions-1.0.0-py3-none-any.whl", hash = "sha256:4392f6c0eb8a5668a69e23d168ffa70f0be9ccfd32b5cc2d26a34ae5b844552d"}, {file = "mypy_extensions-1.0.0.tar.gz", hash = "sha256:75dbf8955dc00442a438fc4d0666508a9a97b6bd41aa2f0ffe9d2f2725af0782"}, @@ -451,6 +477,7 @@ version = "24.2" description = "Core utilities for Python packages" optional = false python-versions = ">=3.8" +groups = ["dev"] files = [ {file = "packaging-24.2-py3-none-any.whl", hash = "sha256:09abb1bccd265c01f4a3aa3f7a7db064b36514d2cba19a2f694fe6150451a759"}, {file = "packaging-24.2.tar.gz", hash = "sha256:c228a6dc5e932d346bc5739379109d49e8853dd8223571c7c5b55260edc0b97f"}, @@ -462,6 +489,7 @@ version = "4.3.6" description = "A small Python package for determining appropriate platform-specific dirs, e.g. a `user data dir`." optional = false python-versions = ">=3.8" +groups = ["dev"] files = [ {file = "platformdirs-4.3.6-py3-none-any.whl", hash = "sha256:73e575e1408ab8103900836b97580d5307456908a03e92031bab39e4554cc3fb"}, {file = "platformdirs-4.3.6.tar.gz", hash = "sha256:357fb2acbc885b0419afd3ce3ed34564c13c9b95c89360cd9563f73aa5e2b907"}, @@ -478,6 +506,7 @@ version = "1.5.0" description = "plugin and hook calling mechanisms for python" optional = false python-versions = ">=3.8" +groups = ["dev"] files = [ {file = "pluggy-1.5.0-py3-none-any.whl", hash = "sha256:44e1ad92c8ca002de6377e165f3e0f1be63266ab4d554740532335b9d75ea669"}, {file = "pluggy-1.5.0.tar.gz", hash = "sha256:2cffa88e94fdc978c4c574f15f9e59b7f4201d439195c3715ca9e2486f1d0cf1"}, @@ -493,6 +522,8 @@ version = "0.6.1" description = "Pure-Python implementation of ASN.1 types and DER/BER/CER codecs (X.208)" optional = true python-versions = ">=3.8" +groups = ["main"] +markers = "extra == \"gcp\"" files = [ {file = "pyasn1-0.6.1-py3-none-any.whl", hash = "sha256:0d632f46f2ba09143da3a8afe9e33fb6f92fa2320ab7e886e2d0f7672af84629"}, {file = "pyasn1-0.6.1.tar.gz", hash = "sha256:6f580d2bdd84365380830acf45550f2511469f673cb4a5ae3857a3170128b034"}, @@ -504,6 +535,8 @@ version = "0.4.1" description = "A collection of ASN.1-based protocols modules" optional = true python-versions = ">=3.8" +groups = ["main"] +markers = "extra == \"gcp\"" files = [ {file = "pyasn1_modules-0.4.1-py3-none-any.whl", hash = "sha256:49bfa96b45a292b711e986f222502c1c9a5e1f4e568fc30e2574a6c7d07838fd"}, {file = "pyasn1_modules-0.4.1.tar.gz", hash = "sha256:c28e2dbf9c06ad61c71a075c7e0f9fd0f1b0bb2d2ad4377f240d33ac2ab60a7c"}, @@ -518,6 +551,7 @@ version = "2.10.2" description = "Data validation using Python type hints" optional = false python-versions = ">=3.8" +groups = ["main"] files = [ {file = "pydantic-2.10.2-py3-none-any.whl", hash = "sha256:cfb96e45951117c3024e6b67b25cdc33a3cb7b2fa62e239f7af1378358a1d99e"}, {file = "pydantic-2.10.2.tar.gz", hash = "sha256:2bc2d7f17232e0841cbba4641e65ba1eb6fafb3a08de3a091ff3ce14a197c4fa"}, @@ -538,6 +572,7 @@ version = "2.27.1" description = "Core functionality for Pydantic validation and serialization" optional = false python-versions = ">=3.8" +groups = ["main"] files = [ {file = "pydantic_core-2.27.1-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:71a5e35c75c021aaf400ac048dacc855f000bdfed91614b4a726f7432f1f3d6a"}, {file = "pydantic_core-2.27.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:f82d068a2d6ecfc6e054726080af69a6764a10015467d7d7b9f66d6ed5afa23b"}, @@ -650,6 +685,7 @@ version = "3.2.3" description = "python code static checker" optional = false python-versions = ">=3.8.0" +groups = ["dev"] files = [ {file = "pylint-3.2.3-py3-none-any.whl", hash = "sha256:b3d7d2708a3e04b4679e02d99e72329a8b7ee8afb8d04110682278781f889fa8"}, {file = "pylint-3.2.3.tar.gz", hash = "sha256:02f6c562b215582386068d52a30f520d84fdbcf2a95fc7e855b816060d048b60"}, @@ -680,6 +716,7 @@ version = "8.3.4" description = "pytest: simple powerful testing with Python" optional = false python-versions = ">=3.8" +groups = ["dev"] files = [ {file = "pytest-8.3.4-py3-none-any.whl", hash = "sha256:50e16d954148559c9a74109af1eaf0c945ba2d8f30f0a3d3335edde19788b6f6"}, {file = "pytest-8.3.4.tar.gz", hash = "sha256:965370d062bce11e73868e0335abac31b4d3de0e82f4007408d242b4f8610761"}, @@ -702,6 +739,7 @@ version = "0.23.8" description = "Pytest support for asyncio" optional = false python-versions = ">=3.8" +groups = ["dev"] files = [ {file = "pytest_asyncio-0.23.8-py3-none-any.whl", hash = "sha256:50265d892689a5faefb84df80819d1ecef566eb3549cf915dfb33569359d1ce2"}, {file = "pytest_asyncio-0.23.8.tar.gz", hash = "sha256:759b10b33a6dc61cce40a8bd5205e302978bbbcc00e279a8b61d9a6a3c82e4d3"}, @@ -720,6 +758,7 @@ version = "2.9.0.post0" description = "Extensions to the standard Python datetime module" optional = false python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7" +groups = ["main"] files = [ {file = "python-dateutil-2.9.0.post0.tar.gz", hash = "sha256:37dd54208da7e1cd875388217d5e00ebd4179249f90fb72437e91a35459a0ad3"}, {file = "python_dateutil-2.9.0.post0-py2.py3-none-any.whl", hash = "sha256:a8b2bc7bffae282281c8140a97d3aa9c14da0b136dfe83f850eea9a5f7470427"}, @@ -734,6 +773,8 @@ version = "2.32.3" description = "Python HTTP for Humans." optional = true python-versions = ">=3.8" +groups = ["main"] +markers = "extra == \"gcp\"" files = [ {file = "requests-2.32.3-py3-none-any.whl", hash = "sha256:70761cfe03c773ceb22aa2f671b4757976145175cdfca038c02654d061d6dcc6"}, {file = "requests-2.32.3.tar.gz", hash = "sha256:55365417734eb18255590a9ff9eb97e9e1da868d4ccd6402399eaf68af20a760"}, @@ -755,6 +796,8 @@ version = "4.2" description = "Pure-Python RSA implementation" optional = true python-versions = "*" +groups = ["main"] +markers = "extra == \"gcp\"" files = [ {file = "rsa-4.2.tar.gz", hash = "sha256:aaefa4b84752e3e99bd8333a2e1e3e7a7da64614042bd66f775573424370108a"}, ] @@ -768,6 +811,7 @@ version = "1.16.0" description = "Python 2 and 3 compatibility utilities" optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*" +groups = ["main"] files = [ {file = "six-1.16.0-py2.py3-none-any.whl", hash = "sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254"}, {file = "six-1.16.0.tar.gz", hash = "sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926"}, @@ -779,6 +823,7 @@ version = "1.3.1" description = "Sniff out which async library your code is running under" optional = false python-versions = ">=3.7" +groups = ["main"] files = [ {file = "sniffio-1.3.1-py3-none-any.whl", hash = "sha256:2f6da418d1f1e0fddd844478f41680e794e6051915791a034ff65e5f100525a2"}, {file = "sniffio-1.3.1.tar.gz", hash = "sha256:f4324edc670a0f49750a81b895f35c3adb843cca46f0530f79fc1babb23789dc"}, @@ -790,6 +835,8 @@ version = "2.2.1" description = "A lil' TOML parser" optional = false python-versions = ">=3.8" +groups = ["dev"] +markers = "python_version < \"3.11\"" files = [ {file = "tomli-2.2.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:678e4fa69e4575eb77d103de3df8a895e1591b48e740211bd1067378c69e8249"}, {file = "tomli-2.2.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:023aa114dd824ade0100497eb2318602af309e5a55595f76b626d6d9f3b7b0a6"}, @@ -831,6 +878,7 @@ version = "0.13.2" description = "Style preserving TOML library" optional = false python-versions = ">=3.8" +groups = ["dev"] files = [ {file = "tomlkit-0.13.2-py3-none-any.whl", hash = "sha256:7a974427f6e119197f670fbbbeae7bef749a6c14e793db934baefc1b5f03efde"}, {file = "tomlkit-0.13.2.tar.gz", hash = "sha256:fff5fe59a87295b278abd31bec92c15d9bc4a06885ab12bcea52c71119392e79"}, @@ -842,6 +890,7 @@ version = "2.9.0.20241003" description = "Typing stubs for python-dateutil" optional = false python-versions = ">=3.8" +groups = ["dev"] files = [ {file = "types-python-dateutil-2.9.0.20241003.tar.gz", hash = "sha256:58cb85449b2a56d6684e41aeefb4c4280631246a0da1a719bdbe6f3fb0317446"}, {file = "types_python_dateutil-2.9.0.20241003-py3-none-any.whl", hash = "sha256:250e1d8e80e7bbc3a6c99b907762711d1a1cdd00e978ad39cb5940f6f0a87f3d"}, @@ -853,6 +902,7 @@ version = "4.12.2" description = "Backported and Experimental Type Hints for Python 3.8+" optional = false python-versions = ">=3.8" +groups = ["main", "dev"] files = [ {file = "typing_extensions-4.12.2-py3-none-any.whl", hash = "sha256:04e5ca0351e0f3f85c6853954072df659d0d13fac324d0072316b67d7794700d"}, {file = "typing_extensions-4.12.2.tar.gz", hash = "sha256:1a7ead55c7e559dd4dee8856e3a88b41225abfe1ce8df57b7c13915fe121ffb8"}, @@ -864,6 +914,7 @@ version = "0.9.0" description = "Runtime inspection utilities for typing module." optional = false python-versions = "*" +groups = ["main"] files = [ {file = "typing_inspect-0.9.0-py3-none-any.whl", hash = "sha256:9ee6fc59062311ef8547596ab6b955e1b8aa46242d854bfc78f4f6b0eff35f9f"}, {file = "typing_inspect-0.9.0.tar.gz", hash = "sha256:b23fc42ff6f6ef6954e4852c1fb512cdd18dbea03134f91f856a95ccc9461f78"}, @@ -879,6 +930,8 @@ version = "2.2.3" description = "HTTP library with thread-safe connection pooling, file post, and more." optional = true python-versions = ">=3.8" +groups = ["main"] +markers = "extra == \"gcp\"" files = [ {file = "urllib3-2.2.3-py3-none-any.whl", hash = "sha256:ca899ca043dcb1bafa3e262d73aa25c465bfb49e0bd9dd5d59f1d0acba2f8fac"}, {file = "urllib3-2.2.3.tar.gz", hash = "sha256:e7d814a81dad81e6caf2ec9fdedb284ecc9c73076b62654547cc64ccdcae26e9"}, @@ -894,6 +947,6 @@ zstd = ["zstandard (>=0.18.0)"] gcp = ["google-auth", "requests"] [metadata] -lock-version = "2.0" +lock-version = "2.1" python-versions = ">=3.8" content-hash = "f0f19d81d36ebe966895f21a0a9dd33118783904418f4103189c475e5903b958" diff --git a/pylintrc b/pylintrc index 393d0f70..9d193c42 100644 --- a/pylintrc +++ b/pylintrc @@ -89,7 +89,7 @@ persistent=yes # Minimum Python version to use for version dependent checks. Will default to # the version used to run pylint. -py-version=3.8 +py-version=3.9 # Discover python modules and packages in the file system subtree. recursive=no diff --git a/pyproject.toml b/pyproject.toml index 782e372e..79ebece2 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "mistralai" -version = "1.3.1" +version = "1.4.0" description = "Python Client SDK for the Mistral AI API." authors = ["Mistral"] readme = "README-PYPI.md" diff --git a/src/mistralai/__init__.py b/src/mistralai/__init__.py index a1b7f626..dd02e42e 100644 --- a/src/mistralai/__init__.py +++ b/src/mistralai/__init__.py @@ -1,9 +1,18 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -from ._version import __title__, __version__ +from ._version import ( + __title__, + __version__, + __openapi_doc_version__, + __gen_version__, + __user_agent__, +) from .sdk import * from .sdkconfiguration import * from .models import * VERSION: str = __version__ +OPENAPI_DOC_VERSION = __openapi_doc_version__ +SPEAKEASY_GENERATOR_VERSION = __gen_version__ +USER_AGENT = __user_agent__ diff --git a/src/mistralai/_version.py b/src/mistralai/_version.py index db6a2275..b39db764 100644 --- a/src/mistralai/_version.py +++ b/src/mistralai/_version.py @@ -3,7 +3,10 @@ import importlib.metadata __title__: str = "mistralai" -__version__: str = "1.3.1" +__version__: str = "1.4.0" +__openapi_doc_version__: str = "0.0.2" +__gen_version__: str = "2.493.32" +__user_agent__: str = "speakeasy-sdk/python 1.4.0 2.493.32 0.0.2 mistralai" try: if __package__ is not None: diff --git a/src/mistralai/agents.py b/src/mistralai/agents.py index 621224e0..05fd165c 100644 --- a/src/mistralai/agents.py +++ b/src/mistralai/agents.py @@ -43,11 +43,14 @@ def complete( presence_penalty: Optional[float] = None, frequency_penalty: Optional[float] = None, n: OptionalNullable[int] = UNSET, + prediction: Optional[ + Union[models.Prediction, models.PredictionTypedDict] + ] = None, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> Optional[models.ChatCompletionResponse]: + ) -> models.ChatCompletionResponse: r"""Agents Completion :param messages: The prompt(s) to generate completions for, encoded as a list of dict with role and content. @@ -62,6 +65,7 @@ def complete( :param presence_penalty: presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. :param frequency_penalty: frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. :param n: Number of completions to return for each request, input tokens are only billed once. + :param prediction: :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -93,6 +97,9 @@ def complete( presence_penalty=presence_penalty, frequency_penalty=frequency_penalty, n=n, + prediction=utils.get_pydantic_model( + prediction, Optional[models.Prediction] + ), agent_id=agent_id, ) @@ -138,13 +145,16 @@ def complete( data: Any = None if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json( - http_res.text, Optional[models.ChatCompletionResponse] - ) + return utils.unmarshal_json(http_res.text, models.ChatCompletionResponse) if utils.match_response(http_res, "422", "application/json"): data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) raise models.HTTPValidationError(data=data) - if utils.match_response(http_res, ["4XX", "5XX"], "*"): + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) raise models.SDKError( "API error occurred", http_res.status_code, http_res_text, http_res @@ -191,11 +201,14 @@ async def complete_async( presence_penalty: Optional[float] = None, frequency_penalty: Optional[float] = None, n: OptionalNullable[int] = UNSET, + prediction: Optional[ + Union[models.Prediction, models.PredictionTypedDict] + ] = None, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> Optional[models.ChatCompletionResponse]: + ) -> models.ChatCompletionResponse: r"""Agents Completion :param messages: The prompt(s) to generate completions for, encoded as a list of dict with role and content. @@ -210,6 +223,7 @@ async def complete_async( :param presence_penalty: presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. :param frequency_penalty: frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. :param n: Number of completions to return for each request, input tokens are only billed once. + :param prediction: :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -241,6 +255,9 @@ async def complete_async( presence_penalty=presence_penalty, frequency_penalty=frequency_penalty, n=n, + prediction=utils.get_pydantic_model( + prediction, Optional[models.Prediction] + ), agent_id=agent_id, ) @@ -286,13 +303,16 @@ async def complete_async( data: Any = None if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json( - http_res.text, Optional[models.ChatCompletionResponse] - ) + return utils.unmarshal_json(http_res.text, models.ChatCompletionResponse) if utils.match_response(http_res, "422", "application/json"): data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) raise models.HTTPValidationError(data=data) - if utils.match_response(http_res, ["4XX", "5XX"], "*"): + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( "API error occurred", http_res.status_code, http_res_text, http_res @@ -339,11 +359,14 @@ def stream( presence_penalty: Optional[float] = None, frequency_penalty: Optional[float] = None, n: OptionalNullable[int] = UNSET, + prediction: Optional[ + Union[models.Prediction, models.PredictionTypedDict] + ] = None, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> Optional[eventstreaming.EventStream[models.CompletionEvent]]: + ) -> eventstreaming.EventStream[models.CompletionEvent]: r"""Stream Agents completion Mistral AI provides the ability to stream responses back to a client in order to allow partial results for certain requests. Tokens will be sent as data-only server-sent events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. @@ -360,6 +383,7 @@ def stream( :param presence_penalty: presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. :param frequency_penalty: frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. :param n: Number of completions to return for each request, input tokens are only billed once. + :param prediction: :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -391,6 +415,9 @@ def stream( presence_penalty=presence_penalty, frequency_penalty=frequency_penalty, n=n, + prediction=utils.get_pydantic_model( + prediction, Optional[models.Prediction] + ), agent_id=agent_id, ) @@ -446,7 +473,12 @@ def stream( http_res_text = utils.stream_to_text(http_res) data = utils.unmarshal_json(http_res_text, models.HTTPValidationErrorData) raise models.HTTPValidationError(data=data) - if utils.match_response(http_res, ["4XX", "5XX"], "*"): + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) raise models.SDKError( "API error occurred", http_res.status_code, http_res_text, http_res @@ -493,11 +525,14 @@ async def stream_async( presence_penalty: Optional[float] = None, frequency_penalty: Optional[float] = None, n: OptionalNullable[int] = UNSET, + prediction: Optional[ + Union[models.Prediction, models.PredictionTypedDict] + ] = None, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> Optional[eventstreaming.EventStreamAsync[models.CompletionEvent]]: + ) -> eventstreaming.EventStreamAsync[models.CompletionEvent]: r"""Stream Agents completion Mistral AI provides the ability to stream responses back to a client in order to allow partial results for certain requests. Tokens will be sent as data-only server-sent events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. @@ -514,6 +549,7 @@ async def stream_async( :param presence_penalty: presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. :param frequency_penalty: frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. :param n: Number of completions to return for each request, input tokens are only billed once. + :param prediction: :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -545,6 +581,9 @@ async def stream_async( presence_penalty=presence_penalty, frequency_penalty=frequency_penalty, n=n, + prediction=utils.get_pydantic_model( + prediction, Optional[models.Prediction] + ), agent_id=agent_id, ) @@ -600,7 +639,12 @@ async def stream_async( http_res_text = await utils.stream_to_text_async(http_res) data = utils.unmarshal_json(http_res_text, models.HTTPValidationErrorData) raise models.HTTPValidationError(data=data) - if utils.match_response(http_res, ["4XX", "5XX"], "*"): + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( "API error occurred", http_res.status_code, http_res_text, http_res diff --git a/src/mistralai/chat.py b/src/mistralai/chat.py index 9e07f784..547cc043 100644 --- a/src/mistralai/chat.py +++ b/src/mistralai/chat.py @@ -37,12 +37,15 @@ def complete( presence_penalty: Optional[float] = None, frequency_penalty: Optional[float] = None, n: OptionalNullable[int] = UNSET, + prediction: Optional[ + Union[models.Prediction, models.PredictionTypedDict] + ] = None, safe_prompt: Optional[bool] = None, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> Optional[models.ChatCompletionResponse]: + ) -> models.ChatCompletionResponse: r"""Chat Completion :param model: ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions. @@ -59,6 +62,7 @@ def complete( :param presence_penalty: presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. :param frequency_penalty: frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. :param n: Number of completions to return for each request, input tokens are only billed once. + :param prediction: :param safe_prompt: Whether to inject a safety prompt before all conversations. :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method @@ -92,6 +96,9 @@ def complete( presence_penalty=presence_penalty, frequency_penalty=frequency_penalty, n=n, + prediction=utils.get_pydantic_model( + prediction, Optional[models.Prediction] + ), safe_prompt=safe_prompt, ) @@ -137,13 +144,16 @@ def complete( data: Any = None if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json( - http_res.text, Optional[models.ChatCompletionResponse] - ) + return utils.unmarshal_json(http_res.text, models.ChatCompletionResponse) if utils.match_response(http_res, "422", "application/json"): data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) raise models.HTTPValidationError(data=data) - if utils.match_response(http_res, ["4XX", "5XX"], "*"): + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) raise models.SDKError( "API error occurred", http_res.status_code, http_res_text, http_res @@ -184,12 +194,15 @@ async def complete_async( presence_penalty: Optional[float] = None, frequency_penalty: Optional[float] = None, n: OptionalNullable[int] = UNSET, + prediction: Optional[ + Union[models.Prediction, models.PredictionTypedDict] + ] = None, safe_prompt: Optional[bool] = None, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> Optional[models.ChatCompletionResponse]: + ) -> models.ChatCompletionResponse: r"""Chat Completion :param model: ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions. @@ -206,6 +219,7 @@ async def complete_async( :param presence_penalty: presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. :param frequency_penalty: frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. :param n: Number of completions to return for each request, input tokens are only billed once. + :param prediction: :param safe_prompt: Whether to inject a safety prompt before all conversations. :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method @@ -239,6 +253,9 @@ async def complete_async( presence_penalty=presence_penalty, frequency_penalty=frequency_penalty, n=n, + prediction=utils.get_pydantic_model( + prediction, Optional[models.Prediction] + ), safe_prompt=safe_prompt, ) @@ -284,13 +301,16 @@ async def complete_async( data: Any = None if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json( - http_res.text, Optional[models.ChatCompletionResponse] - ) + return utils.unmarshal_json(http_res.text, models.ChatCompletionResponse) if utils.match_response(http_res, "422", "application/json"): data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) raise models.HTTPValidationError(data=data) - if utils.match_response(http_res, ["4XX", "5XX"], "*"): + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( "API error occurred", http_res.status_code, http_res_text, http_res @@ -339,12 +359,15 @@ def stream( presence_penalty: Optional[float] = None, frequency_penalty: Optional[float] = None, n: OptionalNullable[int] = UNSET, + prediction: Optional[ + Union[models.Prediction, models.PredictionTypedDict] + ] = None, safe_prompt: Optional[bool] = None, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> Optional[eventstreaming.EventStream[models.CompletionEvent]]: + ) -> eventstreaming.EventStream[models.CompletionEvent]: r"""Stream chat completion Mistral AI provides the ability to stream responses back to a client in order to allow partial results for certain requests. Tokens will be sent as data-only server-sent events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. @@ -363,6 +386,7 @@ def stream( :param presence_penalty: presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. :param frequency_penalty: frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. :param n: Number of completions to return for each request, input tokens are only billed once. + :param prediction: :param safe_prompt: Whether to inject a safety prompt before all conversations. :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method @@ -398,6 +422,9 @@ def stream( presence_penalty=presence_penalty, frequency_penalty=frequency_penalty, n=n, + prediction=utils.get_pydantic_model( + prediction, Optional[models.Prediction] + ), safe_prompt=safe_prompt, ) @@ -453,7 +480,12 @@ def stream( http_res_text = utils.stream_to_text(http_res) data = utils.unmarshal_json(http_res_text, models.HTTPValidationErrorData) raise models.HTTPValidationError(data=data) - if utils.match_response(http_res, ["4XX", "5XX"], "*"): + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) raise models.SDKError( "API error occurred", http_res.status_code, http_res_text, http_res @@ -502,12 +534,15 @@ async def stream_async( presence_penalty: Optional[float] = None, frequency_penalty: Optional[float] = None, n: OptionalNullable[int] = UNSET, + prediction: Optional[ + Union[models.Prediction, models.PredictionTypedDict] + ] = None, safe_prompt: Optional[bool] = None, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> Optional[eventstreaming.EventStreamAsync[models.CompletionEvent]]: + ) -> eventstreaming.EventStreamAsync[models.CompletionEvent]: r"""Stream chat completion Mistral AI provides the ability to stream responses back to a client in order to allow partial results for certain requests. Tokens will be sent as data-only server-sent events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. @@ -526,6 +561,7 @@ async def stream_async( :param presence_penalty: presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. :param frequency_penalty: frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. :param n: Number of completions to return for each request, input tokens are only billed once. + :param prediction: :param safe_prompt: Whether to inject a safety prompt before all conversations. :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method @@ -561,6 +597,9 @@ async def stream_async( presence_penalty=presence_penalty, frequency_penalty=frequency_penalty, n=n, + prediction=utils.get_pydantic_model( + prediction, Optional[models.Prediction] + ), safe_prompt=safe_prompt, ) @@ -616,7 +655,12 @@ async def stream_async( http_res_text = await utils.stream_to_text_async(http_res) data = utils.unmarshal_json(http_res_text, models.HTTPValidationErrorData) raise models.HTTPValidationError(data=data) - if utils.match_response(http_res, ["4XX", "5XX"], "*"): + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( "API error occurred", http_res.status_code, http_res_text, http_res diff --git a/src/mistralai/classifiers.py b/src/mistralai/classifiers.py index 55253a11..af54e96e 100644 --- a/src/mistralai/classifiers.py +++ b/src/mistralai/classifiers.py @@ -23,7 +23,7 @@ def moderate( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> Optional[models.ClassificationResponse]: + ) -> models.ClassificationResponse: r"""Moderations :param inputs: Text to classify. @@ -88,13 +88,16 @@ def moderate( data: Any = None if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json( - http_res.text, Optional[models.ClassificationResponse] - ) + return utils.unmarshal_json(http_res.text, models.ClassificationResponse) if utils.match_response(http_res, "422", "application/json"): data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) raise models.HTTPValidationError(data=data) - if utils.match_response(http_res, ["4XX", "5XX"], "*"): + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) raise models.SDKError( "API error occurred", http_res.status_code, http_res_text, http_res @@ -121,7 +124,7 @@ async def moderate_async( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> Optional[models.ClassificationResponse]: + ) -> models.ClassificationResponse: r"""Moderations :param inputs: Text to classify. @@ -186,13 +189,16 @@ async def moderate_async( data: Any = None if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json( - http_res.text, Optional[models.ClassificationResponse] - ) + return utils.unmarshal_json(http_res.text, models.ClassificationResponse) if utils.match_response(http_res, "422", "application/json"): data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) raise models.HTTPValidationError(data=data) - if utils.match_response(http_res, ["4XX", "5XX"], "*"): + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( "API error occurred", http_res.status_code, http_res_text, http_res @@ -219,7 +225,7 @@ def moderate_chat( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> Optional[models.ClassificationResponse]: + ) -> models.ClassificationResponse: r"""Moderations Chat :param inputs: Chat to classify @@ -286,13 +292,16 @@ def moderate_chat( data: Any = None if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json( - http_res.text, Optional[models.ClassificationResponse] - ) + return utils.unmarshal_json(http_res.text, models.ClassificationResponse) if utils.match_response(http_res, "422", "application/json"): data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) raise models.HTTPValidationError(data=data) - if utils.match_response(http_res, ["4XX", "5XX"], "*"): + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) raise models.SDKError( "API error occurred", http_res.status_code, http_res_text, http_res @@ -319,7 +328,7 @@ async def moderate_chat_async( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> Optional[models.ClassificationResponse]: + ) -> models.ClassificationResponse: r"""Moderations Chat :param inputs: Chat to classify @@ -386,13 +395,16 @@ async def moderate_chat_async( data: Any = None if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json( - http_res.text, Optional[models.ClassificationResponse] - ) + return utils.unmarshal_json(http_res.text, models.ClassificationResponse) if utils.match_response(http_res, "422", "application/json"): data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) raise models.HTTPValidationError(data=data) - if utils.match_response(http_res, ["4XX", "5XX"], "*"): + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( "API error occurred", http_res.status_code, http_res_text, http_res diff --git a/src/mistralai/embeddings.py b/src/mistralai/embeddings.py index bf80861d..524f09c7 100644 --- a/src/mistralai/embeddings.py +++ b/src/mistralai/embeddings.py @@ -21,7 +21,7 @@ def create( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> Optional[models.EmbeddingResponse]: + ) -> models.EmbeddingResponse: r"""Embeddings Embeddings @@ -90,13 +90,16 @@ def create( data: Any = None if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json( - http_res.text, Optional[models.EmbeddingResponse] - ) + return utils.unmarshal_json(http_res.text, models.EmbeddingResponse) if utils.match_response(http_res, "422", "application/json"): data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) raise models.HTTPValidationError(data=data) - if utils.match_response(http_res, ["4XX", "5XX"], "*"): + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) raise models.SDKError( "API error occurred", http_res.status_code, http_res_text, http_res @@ -121,7 +124,7 @@ async def create_async( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> Optional[models.EmbeddingResponse]: + ) -> models.EmbeddingResponse: r"""Embeddings Embeddings @@ -190,13 +193,16 @@ async def create_async( data: Any = None if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json( - http_res.text, Optional[models.EmbeddingResponse] - ) + return utils.unmarshal_json(http_res.text, models.EmbeddingResponse) if utils.match_response(http_res, "422", "application/json"): data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) raise models.HTTPValidationError(data=data) - if utils.match_response(http_res, ["4XX", "5XX"], "*"): + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( "API error occurred", http_res.status_code, http_res_text, http_res diff --git a/src/mistralai/files.py b/src/mistralai/files.py index aefa025a..042e4aea 100644 --- a/src/mistralai/files.py +++ b/src/mistralai/files.py @@ -21,7 +21,7 @@ def upload( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> Optional[models.UploadFileOut]: + ) -> models.UploadFileOut: r"""Upload File Upload a file that can be used across various endpoints. @@ -95,8 +95,13 @@ def upload( ) if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, Optional[models.UploadFileOut]) - if utils.match_response(http_res, ["4XX", "5XX"], "*"): + return utils.unmarshal_json(http_res.text, models.UploadFileOut) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) raise models.SDKError( "API error occurred", http_res.status_code, http_res_text, http_res @@ -120,7 +125,7 @@ async def upload_async( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> Optional[models.UploadFileOut]: + ) -> models.UploadFileOut: r"""Upload File Upload a file that can be used across various endpoints. @@ -194,8 +199,13 @@ async def upload_async( ) if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, Optional[models.UploadFileOut]) - if utils.match_response(http_res, ["4XX", "5XX"], "*"): + return utils.unmarshal_json(http_res.text, models.UploadFileOut) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( "API error occurred", http_res.status_code, http_res_text, http_res @@ -223,7 +233,7 @@ def list( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> Optional[models.ListFilesOut]: + ) -> models.ListFilesOut: r"""List Files Returns a list of files that belong to the user's organization. @@ -294,8 +304,13 @@ def list( ) if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, Optional[models.ListFilesOut]) - if utils.match_response(http_res, ["4XX", "5XX"], "*"): + return utils.unmarshal_json(http_res.text, models.ListFilesOut) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) raise models.SDKError( "API error occurred", http_res.status_code, http_res_text, http_res @@ -323,7 +338,7 @@ async def list_async( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> Optional[models.ListFilesOut]: + ) -> models.ListFilesOut: r"""List Files Returns a list of files that belong to the user's organization. @@ -394,8 +409,13 @@ async def list_async( ) if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, Optional[models.ListFilesOut]) - if utils.match_response(http_res, ["4XX", "5XX"], "*"): + return utils.unmarshal_json(http_res.text, models.ListFilesOut) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( "API error occurred", http_res.status_code, http_res_text, http_res @@ -418,7 +438,7 @@ def retrieve( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> Optional[models.RetrieveFileOut]: + ) -> models.RetrieveFileOut: r"""Retrieve File Returns information about a specific file. @@ -479,8 +499,13 @@ def retrieve( ) if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, Optional[models.RetrieveFileOut]) - if utils.match_response(http_res, ["4XX", "5XX"], "*"): + return utils.unmarshal_json(http_res.text, models.RetrieveFileOut) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) raise models.SDKError( "API error occurred", http_res.status_code, http_res_text, http_res @@ -503,7 +528,7 @@ async def retrieve_async( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> Optional[models.RetrieveFileOut]: + ) -> models.RetrieveFileOut: r"""Retrieve File Returns information about a specific file. @@ -564,8 +589,13 @@ async def retrieve_async( ) if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, Optional[models.RetrieveFileOut]) - if utils.match_response(http_res, ["4XX", "5XX"], "*"): + return utils.unmarshal_json(http_res.text, models.RetrieveFileOut) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( "API error occurred", http_res.status_code, http_res_text, http_res @@ -588,7 +618,7 @@ def delete( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> Optional[models.DeleteFileOut]: + ) -> models.DeleteFileOut: r"""Delete File Delete a file. @@ -649,8 +679,13 @@ def delete( ) if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, Optional[models.DeleteFileOut]) - if utils.match_response(http_res, ["4XX", "5XX"], "*"): + return utils.unmarshal_json(http_res.text, models.DeleteFileOut) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) raise models.SDKError( "API error occurred", http_res.status_code, http_res_text, http_res @@ -673,7 +708,7 @@ async def delete_async( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> Optional[models.DeleteFileOut]: + ) -> models.DeleteFileOut: r"""Delete File Delete a file. @@ -734,8 +769,13 @@ async def delete_async( ) if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, Optional[models.DeleteFileOut]) - if utils.match_response(http_res, ["4XX", "5XX"], "*"): + return utils.unmarshal_json(http_res.text, models.DeleteFileOut) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( "API error occurred", http_res.status_code, http_res_text, http_res @@ -758,7 +798,7 @@ def download( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> Optional[httpx.Response]: + ) -> httpx.Response: r"""Download File Download a file @@ -821,7 +861,12 @@ def download( if utils.match_response(http_res, "200", "application/octet-stream"): return http_res - if utils.match_response(http_res, ["4XX", "5XX"], "*"): + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) raise models.SDKError( "API error occurred", http_res.status_code, http_res_text, http_res @@ -844,7 +889,7 @@ async def download_async( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> Optional[httpx.Response]: + ) -> httpx.Response: r"""Download File Download a file @@ -907,7 +952,12 @@ async def download_async( if utils.match_response(http_res, "200", "application/octet-stream"): return http_res - if utils.match_response(http_res, ["4XX", "5XX"], "*"): + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( "API error occurred", http_res.status_code, http_res_text, http_res @@ -931,7 +981,7 @@ def get_signed_url( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> Optional[models.FileSignedURL]: + ) -> models.FileSignedURL: r"""Get Signed Url :param file_id: @@ -992,8 +1042,13 @@ def get_signed_url( ) if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, Optional[models.FileSignedURL]) - if utils.match_response(http_res, ["4XX", "5XX"], "*"): + return utils.unmarshal_json(http_res.text, models.FileSignedURL) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) raise models.SDKError( "API error occurred", http_res.status_code, http_res_text, http_res @@ -1017,7 +1072,7 @@ async def get_signed_url_async( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> Optional[models.FileSignedURL]: + ) -> models.FileSignedURL: r"""Get Signed Url :param file_id: @@ -1078,8 +1133,13 @@ async def get_signed_url_async( ) if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, Optional[models.FileSignedURL]) - if utils.match_response(http_res, ["4XX", "5XX"], "*"): + return utils.unmarshal_json(http_res.text, models.FileSignedURL) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( "API error occurred", http_res.status_code, http_res_text, http_res diff --git a/src/mistralai/fim.py b/src/mistralai/fim.py index 60a4fd6c..40e596be 100644 --- a/src/mistralai/fim.py +++ b/src/mistralai/fim.py @@ -33,7 +33,7 @@ def complete( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> Optional[models.FIMCompletionResponse]: + ) -> models.FIMCompletionResponse: r"""Fim Completion FIM completion. @@ -116,13 +116,16 @@ def complete( data: Any = None if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json( - http_res.text, Optional[models.FIMCompletionResponse] - ) + return utils.unmarshal_json(http_res.text, models.FIMCompletionResponse) if utils.match_response(http_res, "422", "application/json"): data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) raise models.HTTPValidationError(data=data) - if utils.match_response(http_res, ["4XX", "5XX"], "*"): + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) raise models.SDKError( "API error occurred", http_res.status_code, http_res_text, http_res @@ -159,7 +162,7 @@ async def complete_async( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> Optional[models.FIMCompletionResponse]: + ) -> models.FIMCompletionResponse: r"""Fim Completion FIM completion. @@ -242,13 +245,16 @@ async def complete_async( data: Any = None if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json( - http_res.text, Optional[models.FIMCompletionResponse] - ) + return utils.unmarshal_json(http_res.text, models.FIMCompletionResponse) if utils.match_response(http_res, "422", "application/json"): data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) raise models.HTTPValidationError(data=data) - if utils.match_response(http_res, ["4XX", "5XX"], "*"): + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( "API error occurred", http_res.status_code, http_res_text, http_res @@ -285,7 +291,7 @@ def stream( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> Optional[eventstreaming.EventStream[models.CompletionEvent]]: + ) -> eventstreaming.EventStream[models.CompletionEvent]: r"""Stream fim completion Mistral AI provides the ability to stream responses back to a client in order to allow partial results for certain requests. Tokens will be sent as data-only server-sent events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. @@ -378,7 +384,12 @@ def stream( http_res_text = utils.stream_to_text(http_res) data = utils.unmarshal_json(http_res_text, models.HTTPValidationErrorData) raise models.HTTPValidationError(data=data) - if utils.match_response(http_res, ["4XX", "5XX"], "*"): + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) raise models.SDKError( "API error occurred", http_res.status_code, http_res_text, http_res @@ -415,7 +426,7 @@ async def stream_async( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> Optional[eventstreaming.EventStreamAsync[models.CompletionEvent]]: + ) -> eventstreaming.EventStreamAsync[models.CompletionEvent]: r"""Stream fim completion Mistral AI provides the ability to stream responses back to a client in order to allow partial results for certain requests. Tokens will be sent as data-only server-sent events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. @@ -508,7 +519,12 @@ async def stream_async( http_res_text = await utils.stream_to_text_async(http_res) data = utils.unmarshal_json(http_res_text, models.HTTPValidationErrorData) raise models.HTTPValidationError(data=data) - if utils.match_response(http_res, ["4XX", "5XX"], "*"): + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( "API error occurred", http_res.status_code, http_res_text, http_res diff --git a/src/mistralai/jobs.py b/src/mistralai/jobs.py index afa1ff44..ea66bfc6 100644 --- a/src/mistralai/jobs.py +++ b/src/mistralai/jobs.py @@ -26,7 +26,7 @@ def list( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> Optional[models.JobsOut]: + ) -> models.JobsOut: r"""Get Fine Tuning Jobs Get a list of fine-tuning jobs for your organization and user. @@ -103,8 +103,13 @@ def list( ) if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, Optional[models.JobsOut]) - if utils.match_response(http_res, ["4XX", "5XX"], "*"): + return utils.unmarshal_json(http_res.text, models.JobsOut) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) raise models.SDKError( "API error occurred", http_res.status_code, http_res_text, http_res @@ -135,7 +140,7 @@ async def list_async( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> Optional[models.JobsOut]: + ) -> models.JobsOut: r"""Get Fine Tuning Jobs Get a list of fine-tuning jobs for your organization and user. @@ -212,8 +217,13 @@ async def list_async( ) if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, Optional[models.JobsOut]) - if utils.match_response(http_res, ["4XX", "5XX"], "*"): + return utils.unmarshal_json(http_res.text, models.JobsOut) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( "API error occurred", http_res.status_code, http_res_text, http_res @@ -255,7 +265,7 @@ def create( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> Optional[models.JobsAPIRoutesFineTuningCreateFineTuningJobResponse]: + ) -> models.JobsAPIRoutesFineTuningCreateFineTuningJobResponse: r"""Create Fine Tuning Job Create a new fine-tuning job, it will be queued for processing. @@ -342,10 +352,14 @@ def create( if utils.match_response(http_res, "200", "application/json"): return utils.unmarshal_json( - http_res.text, - Optional[models.JobsAPIRoutesFineTuningCreateFineTuningJobResponse], + http_res.text, models.JobsAPIRoutesFineTuningCreateFineTuningJobResponse + ) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res ) - if utils.match_response(http_res, ["4XX", "5XX"], "*"): + if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) raise models.SDKError( "API error occurred", http_res.status_code, http_res_text, http_res @@ -387,7 +401,7 @@ async def create_async( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> Optional[models.JobsAPIRoutesFineTuningCreateFineTuningJobResponse]: + ) -> models.JobsAPIRoutesFineTuningCreateFineTuningJobResponse: r"""Create Fine Tuning Job Create a new fine-tuning job, it will be queued for processing. @@ -474,10 +488,14 @@ async def create_async( if utils.match_response(http_res, "200", "application/json"): return utils.unmarshal_json( - http_res.text, - Optional[models.JobsAPIRoutesFineTuningCreateFineTuningJobResponse], + http_res.text, models.JobsAPIRoutesFineTuningCreateFineTuningJobResponse + ) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res ) - if utils.match_response(http_res, ["4XX", "5XX"], "*"): + if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( "API error occurred", http_res.status_code, http_res_text, http_res @@ -500,7 +518,7 @@ def get( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> Optional[models.DetailedJobOut]: + ) -> models.DetailedJobOut: r"""Get Fine Tuning Job Get a fine-tuned job details by its UUID. @@ -561,8 +579,13 @@ def get( ) if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, Optional[models.DetailedJobOut]) - if utils.match_response(http_res, ["4XX", "5XX"], "*"): + return utils.unmarshal_json(http_res.text, models.DetailedJobOut) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) raise models.SDKError( "API error occurred", http_res.status_code, http_res_text, http_res @@ -585,7 +608,7 @@ async def get_async( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> Optional[models.DetailedJobOut]: + ) -> models.DetailedJobOut: r"""Get Fine Tuning Job Get a fine-tuned job details by its UUID. @@ -646,8 +669,13 @@ async def get_async( ) if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, Optional[models.DetailedJobOut]) - if utils.match_response(http_res, ["4XX", "5XX"], "*"): + return utils.unmarshal_json(http_res.text, models.DetailedJobOut) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( "API error occurred", http_res.status_code, http_res_text, http_res @@ -670,7 +698,7 @@ def cancel( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> Optional[models.DetailedJobOut]: + ) -> models.DetailedJobOut: r"""Cancel Fine Tuning Job Request the cancellation of a fine tuning job. @@ -731,8 +759,13 @@ def cancel( ) if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, Optional[models.DetailedJobOut]) - if utils.match_response(http_res, ["4XX", "5XX"], "*"): + return utils.unmarshal_json(http_res.text, models.DetailedJobOut) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) raise models.SDKError( "API error occurred", http_res.status_code, http_res_text, http_res @@ -755,7 +788,7 @@ async def cancel_async( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> Optional[models.DetailedJobOut]: + ) -> models.DetailedJobOut: r"""Cancel Fine Tuning Job Request the cancellation of a fine tuning job. @@ -816,8 +849,13 @@ async def cancel_async( ) if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, Optional[models.DetailedJobOut]) - if utils.match_response(http_res, ["4XX", "5XX"], "*"): + return utils.unmarshal_json(http_res.text, models.DetailedJobOut) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( "API error occurred", http_res.status_code, http_res_text, http_res @@ -840,7 +878,7 @@ def start( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> Optional[models.DetailedJobOut]: + ) -> models.DetailedJobOut: r"""Start Fine Tuning Job Request the start of a validated fine tuning job. @@ -901,8 +939,13 @@ def start( ) if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, Optional[models.DetailedJobOut]) - if utils.match_response(http_res, ["4XX", "5XX"], "*"): + return utils.unmarshal_json(http_res.text, models.DetailedJobOut) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) raise models.SDKError( "API error occurred", http_res.status_code, http_res_text, http_res @@ -925,7 +968,7 @@ async def start_async( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> Optional[models.DetailedJobOut]: + ) -> models.DetailedJobOut: r"""Start Fine Tuning Job Request the start of a validated fine tuning job. @@ -986,8 +1029,13 @@ async def start_async( ) if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, Optional[models.DetailedJobOut]) - if utils.match_response(http_res, ["4XX", "5XX"], "*"): + return utils.unmarshal_json(http_res.text, models.DetailedJobOut) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( "API error occurred", http_res.status_code, http_res_text, http_res diff --git a/src/mistralai/mistral_jobs.py b/src/mistralai/mistral_jobs.py index 8642d9fa..fe6b266a 100644 --- a/src/mistralai/mistral_jobs.py +++ b/src/mistralai/mistral_jobs.py @@ -24,7 +24,7 @@ def list( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> Optional[models.BatchJobsOut]: + ) -> models.BatchJobsOut: r"""Get Batch Jobs Get a list of batch jobs for your organization and user. @@ -97,8 +97,13 @@ def list( ) if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, Optional[models.BatchJobsOut]) - if utils.match_response(http_res, ["4XX", "5XX"], "*"): + return utils.unmarshal_json(http_res.text, models.BatchJobsOut) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) raise models.SDKError( "API error occurred", http_res.status_code, http_res_text, http_res @@ -127,7 +132,7 @@ async def list_async( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> Optional[models.BatchJobsOut]: + ) -> models.BatchJobsOut: r"""Get Batch Jobs Get a list of batch jobs for your organization and user. @@ -200,8 +205,13 @@ async def list_async( ) if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, Optional[models.BatchJobsOut]) - if utils.match_response(http_res, ["4XX", "5XX"], "*"): + return utils.unmarshal_json(http_res.text, models.BatchJobsOut) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( "API error occurred", http_res.status_code, http_res_text, http_res @@ -228,7 +238,7 @@ def create( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> Optional[models.BatchJobOut]: + ) -> models.BatchJobOut: r"""Create Batch Job Create a new batch job, it will be queued for processing. @@ -300,8 +310,13 @@ def create( ) if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, Optional[models.BatchJobOut]) - if utils.match_response(http_res, ["4XX", "5XX"], "*"): + return utils.unmarshal_json(http_res.text, models.BatchJobOut) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) raise models.SDKError( "API error occurred", http_res.status_code, http_res_text, http_res @@ -328,7 +343,7 @@ async def create_async( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> Optional[models.BatchJobOut]: + ) -> models.BatchJobOut: r"""Create Batch Job Create a new batch job, it will be queued for processing. @@ -400,8 +415,13 @@ async def create_async( ) if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, Optional[models.BatchJobOut]) - if utils.match_response(http_res, ["4XX", "5XX"], "*"): + return utils.unmarshal_json(http_res.text, models.BatchJobOut) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( "API error occurred", http_res.status_code, http_res_text, http_res @@ -424,7 +444,7 @@ def get( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> Optional[models.BatchJobOut]: + ) -> models.BatchJobOut: r"""Get Batch Job Get a batch job details by its UUID. @@ -485,8 +505,13 @@ def get( ) if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, Optional[models.BatchJobOut]) - if utils.match_response(http_res, ["4XX", "5XX"], "*"): + return utils.unmarshal_json(http_res.text, models.BatchJobOut) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) raise models.SDKError( "API error occurred", http_res.status_code, http_res_text, http_res @@ -509,7 +534,7 @@ async def get_async( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> Optional[models.BatchJobOut]: + ) -> models.BatchJobOut: r"""Get Batch Job Get a batch job details by its UUID. @@ -570,8 +595,13 @@ async def get_async( ) if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, Optional[models.BatchJobOut]) - if utils.match_response(http_res, ["4XX", "5XX"], "*"): + return utils.unmarshal_json(http_res.text, models.BatchJobOut) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( "API error occurred", http_res.status_code, http_res_text, http_res @@ -594,7 +624,7 @@ def cancel( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> Optional[models.BatchJobOut]: + ) -> models.BatchJobOut: r"""Cancel Batch Job Request the cancellation of a batch job. @@ -655,8 +685,13 @@ def cancel( ) if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, Optional[models.BatchJobOut]) - if utils.match_response(http_res, ["4XX", "5XX"], "*"): + return utils.unmarshal_json(http_res.text, models.BatchJobOut) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) raise models.SDKError( "API error occurred", http_res.status_code, http_res_text, http_res @@ -679,7 +714,7 @@ async def cancel_async( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> Optional[models.BatchJobOut]: + ) -> models.BatchJobOut: r"""Cancel Batch Job Request the cancellation of a batch job. @@ -740,8 +775,13 @@ async def cancel_async( ) if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, Optional[models.BatchJobOut]) - if utils.match_response(http_res, ["4XX", "5XX"], "*"): + return utils.unmarshal_json(http_res.text, models.BatchJobOut) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( "API error occurred", http_res.status_code, http_res_text, http_res diff --git a/src/mistralai/models/__init__.py b/src/mistralai/models/__init__.py index 4e7e4d12..608edba0 100644 --- a/src/mistralai/models/__init__.py +++ b/src/mistralai/models/__init__.py @@ -273,6 +273,7 @@ from .metricout import MetricOut, MetricOutTypedDict from .modelcapabilities import ModelCapabilities, ModelCapabilitiesTypedDict from .modellist import Data, DataTypedDict, ModelList, ModelListTypedDict +from .prediction import Prediction, PredictionType, PredictionTypedDict from .referencechunk import ReferenceChunk, ReferenceChunkType, ReferenceChunkTypedDict from .responseformat import ResponseFormat, ResponseFormatTypedDict from .responseformats import ResponseFormats @@ -342,6 +343,7 @@ WandbIntegrationOutTypedDict, ) + __all__ = [ "APIEndpoint", "AgentsCompletionRequest", @@ -566,6 +568,9 @@ "Object", "One", "OneTypedDict", + "Prediction", + "PredictionType", + "PredictionTypedDict", "QueryParamStatus", "ReferenceChunk", "ReferenceChunkType", diff --git a/src/mistralai/models/agentscompletionrequest.py b/src/mistralai/models/agentscompletionrequest.py index 5f53dddb..7d806835 100644 --- a/src/mistralai/models/agentscompletionrequest.py +++ b/src/mistralai/models/agentscompletionrequest.py @@ -2,6 +2,7 @@ from __future__ import annotations from .assistantmessage import AssistantMessage, AssistantMessageTypedDict +from .prediction import Prediction, PredictionTypedDict from .responseformat import ResponseFormat, ResponseFormatTypedDict from .systemmessage import SystemMessage, SystemMessageTypedDict from .tool import Tool, ToolTypedDict @@ -83,6 +84,7 @@ class AgentsCompletionRequestTypedDict(TypedDict): r"""frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.""" n: NotRequired[Nullable[int]] r"""Number of completions to return for each request, input tokens are only billed once.""" + prediction: NotRequired[PredictionTypedDict] class AgentsCompletionRequest(BaseModel): @@ -119,6 +121,8 @@ class AgentsCompletionRequest(BaseModel): n: OptionalNullable[int] = UNSET r"""Number of completions to return for each request, input tokens are only billed once.""" + prediction: Optional[Prediction] = None + @model_serializer(mode="wrap") def serialize_model(self, handler): optional_fields = [ @@ -132,6 +136,7 @@ def serialize_model(self, handler): "presence_penalty", "frequency_penalty", "n", + "prediction", ] nullable_fields = ["max_tokens", "random_seed", "tools", "n"] null_default_fields = [] diff --git a/src/mistralai/models/agentscompletionstreamrequest.py b/src/mistralai/models/agentscompletionstreamrequest.py index fdc15328..0eac55a5 100644 --- a/src/mistralai/models/agentscompletionstreamrequest.py +++ b/src/mistralai/models/agentscompletionstreamrequest.py @@ -2,6 +2,7 @@ from __future__ import annotations from .assistantmessage import AssistantMessage, AssistantMessageTypedDict +from .prediction import Prediction, PredictionTypedDict from .responseformat import ResponseFormat, ResponseFormatTypedDict from .systemmessage import SystemMessage, SystemMessageTypedDict from .tool import Tool, ToolTypedDict @@ -82,6 +83,7 @@ class AgentsCompletionStreamRequestTypedDict(TypedDict): r"""frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.""" n: NotRequired[Nullable[int]] r"""Number of completions to return for each request, input tokens are only billed once.""" + prediction: NotRequired[PredictionTypedDict] class AgentsCompletionStreamRequest(BaseModel): @@ -117,6 +119,8 @@ class AgentsCompletionStreamRequest(BaseModel): n: OptionalNullable[int] = UNSET r"""Number of completions to return for each request, input tokens are only billed once.""" + prediction: Optional[Prediction] = None + @model_serializer(mode="wrap") def serialize_model(self, handler): optional_fields = [ @@ -130,6 +134,7 @@ def serialize_model(self, handler): "presence_penalty", "frequency_penalty", "n", + "prediction", ] nullable_fields = ["max_tokens", "random_seed", "tools", "n"] null_default_fields = [] diff --git a/src/mistralai/models/chatcompletionrequest.py b/src/mistralai/models/chatcompletionrequest.py index 4658324d..a253ac4d 100644 --- a/src/mistralai/models/chatcompletionrequest.py +++ b/src/mistralai/models/chatcompletionrequest.py @@ -2,6 +2,7 @@ from __future__ import annotations from .assistantmessage import AssistantMessage, AssistantMessageTypedDict +from .prediction import Prediction, PredictionTypedDict from .responseformat import ResponseFormat, ResponseFormatTypedDict from .systemmessage import SystemMessage, SystemMessageTypedDict from .tool import Tool, ToolTypedDict @@ -83,6 +84,7 @@ class ChatCompletionRequestTypedDict(TypedDict): r"""frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.""" n: NotRequired[Nullable[int]] r"""Number of completions to return for each request, input tokens are only billed once.""" + prediction: NotRequired[PredictionTypedDict] safe_prompt: NotRequired[bool] r"""Whether to inject a safety prompt before all conversations.""" @@ -127,6 +129,8 @@ class ChatCompletionRequest(BaseModel): n: OptionalNullable[int] = UNSET r"""Number of completions to return for each request, input tokens are only billed once.""" + prediction: Optional[Prediction] = None + safe_prompt: Optional[bool] = None r"""Whether to inject a safety prompt before all conversations.""" @@ -145,6 +149,7 @@ def serialize_model(self, handler): "presence_penalty", "frequency_penalty", "n", + "prediction", "safe_prompt", ] nullable_fields = [ diff --git a/src/mistralai/models/chatcompletionstreamrequest.py b/src/mistralai/models/chatcompletionstreamrequest.py index 243f0697..a1697d58 100644 --- a/src/mistralai/models/chatcompletionstreamrequest.py +++ b/src/mistralai/models/chatcompletionstreamrequest.py @@ -2,6 +2,7 @@ from __future__ import annotations from .assistantmessage import AssistantMessage, AssistantMessageTypedDict +from .prediction import Prediction, PredictionTypedDict from .responseformat import ResponseFormat, ResponseFormatTypedDict from .systemmessage import SystemMessage, SystemMessageTypedDict from .tool import Tool, ToolTypedDict @@ -86,6 +87,7 @@ class ChatCompletionStreamRequestTypedDict(TypedDict): r"""frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.""" n: NotRequired[Nullable[int]] r"""Number of completions to return for each request, input tokens are only billed once.""" + prediction: NotRequired[PredictionTypedDict] safe_prompt: NotRequired[bool] r"""Whether to inject a safety prompt before all conversations.""" @@ -129,6 +131,8 @@ class ChatCompletionStreamRequest(BaseModel): n: OptionalNullable[int] = UNSET r"""Number of completions to return for each request, input tokens are only billed once.""" + prediction: Optional[Prediction] = None + safe_prompt: Optional[bool] = None r"""Whether to inject a safety prompt before all conversations.""" @@ -147,6 +151,7 @@ def serialize_model(self, handler): "presence_penalty", "frequency_penalty", "n", + "prediction", "safe_prompt", ] nullable_fields = [ diff --git a/src/mistralai/models/fileschema.py b/src/mistralai/models/fileschema.py index 952d23a0..4cf51c02 100644 --- a/src/mistralai/models/fileschema.py +++ b/src/mistralai/models/fileschema.py @@ -6,6 +6,7 @@ from .source import Source from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL from mistralai.utils import validate_open_enum +import pydantic from pydantic import model_serializer from pydantic.functional_validators import PlainValidator from typing_extensions import Annotated, NotRequired, TypedDict @@ -16,7 +17,7 @@ class FileSchemaTypedDict(TypedDict): r"""The unique identifier of the file.""" object: str r"""The object type, which is always \"file\".""" - bytes: int + size_bytes: int r"""The size of the file, in bytes.""" created_at: int r"""The UNIX timestamp (in seconds) of the event.""" @@ -35,7 +36,7 @@ class FileSchema(BaseModel): object: str r"""The object type, which is always \"file\".""" - bytes: int + size_bytes: Annotated[int, pydantic.Field(alias="bytes")] r"""The size of the file, in bytes.""" created_at: int diff --git a/src/mistralai/models/function.py b/src/mistralai/models/function.py index 942b042f..2430fa4f 100644 --- a/src/mistralai/models/function.py +++ b/src/mistralai/models/function.py @@ -10,6 +10,7 @@ class FunctionTypedDict(TypedDict): name: str parameters: Dict[str, Any] description: NotRequired[str] + strict: NotRequired[bool] class Function(BaseModel): @@ -18,3 +19,5 @@ class Function(BaseModel): parameters: Dict[str, Any] description: Optional[str] = "" + + strict: Optional[bool] = False diff --git a/src/mistralai/models/prediction.py b/src/mistralai/models/prediction.py new file mode 100644 index 00000000..63593122 --- /dev/null +++ b/src/mistralai/models/prediction.py @@ -0,0 +1,26 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.types import BaseModel +from mistralai.utils import validate_const +import pydantic +from pydantic.functional_validators import AfterValidator +from typing import Literal, Optional +from typing_extensions import Annotated, NotRequired, TypedDict + + +PredictionType = Literal["content"] + + +class PredictionTypedDict(TypedDict): + type: PredictionType + content: NotRequired[str] + + +class Prediction(BaseModel): + TYPE: Annotated[ + Annotated[Optional[PredictionType], AfterValidator(validate_const("content"))], + pydantic.Field(alias="type"), + ] = "content" + + content: Optional[str] = "" diff --git a/src/mistralai/models/retrievefileout.py b/src/mistralai/models/retrievefileout.py index 6bf4a5bf..70d688cc 100644 --- a/src/mistralai/models/retrievefileout.py +++ b/src/mistralai/models/retrievefileout.py @@ -6,6 +6,7 @@ from .source import Source from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL from mistralai.utils import validate_open_enum +import pydantic from pydantic import model_serializer from pydantic.functional_validators import PlainValidator from typing_extensions import Annotated, NotRequired, TypedDict @@ -16,7 +17,7 @@ class RetrieveFileOutTypedDict(TypedDict): r"""The unique identifier of the file.""" object: str r"""The object type, which is always \"file\".""" - bytes: int + size_bytes: int r"""The size of the file, in bytes.""" created_at: int r"""The UNIX timestamp (in seconds) of the event.""" @@ -36,7 +37,7 @@ class RetrieveFileOut(BaseModel): object: str r"""The object type, which is always \"file\".""" - bytes: int + size_bytes: Annotated[int, pydantic.Field(alias="bytes")] r"""The size of the file, in bytes.""" created_at: int diff --git a/src/mistralai/models/toolcall.py b/src/mistralai/models/toolcall.py index 827fd00d..92dbb4a9 100644 --- a/src/mistralai/models/toolcall.py +++ b/src/mistralai/models/toolcall.py @@ -14,6 +14,7 @@ class ToolCallTypedDict(TypedDict): function: FunctionCallTypedDict id: NotRequired[str] type: NotRequired[ToolTypes] + index: NotRequired[int] class ToolCall(BaseModel): @@ -24,3 +25,5 @@ class ToolCall(BaseModel): type: Annotated[Optional[ToolTypes], PlainValidator(validate_open_enum(False))] = ( None ) + + index: Optional[int] = 0 diff --git a/src/mistralai/models/uploadfileout.py b/src/mistralai/models/uploadfileout.py index 23e25d5b..cf783862 100644 --- a/src/mistralai/models/uploadfileout.py +++ b/src/mistralai/models/uploadfileout.py @@ -6,6 +6,7 @@ from .source import Source from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL from mistralai.utils import validate_open_enum +import pydantic from pydantic import model_serializer from pydantic.functional_validators import PlainValidator from typing_extensions import Annotated, NotRequired, TypedDict @@ -16,7 +17,7 @@ class UploadFileOutTypedDict(TypedDict): r"""The unique identifier of the file.""" object: str r"""The object type, which is always \"file\".""" - bytes: int + size_bytes: int r"""The size of the file, in bytes.""" created_at: int r"""The UNIX timestamp (in seconds) of the event.""" @@ -35,7 +36,7 @@ class UploadFileOut(BaseModel): object: str r"""The object type, which is always \"file\".""" - bytes: int + size_bytes: Annotated[int, pydantic.Field(alias="bytes")] r"""The size of the file, in bytes.""" created_at: int diff --git a/src/mistralai/models_.py b/src/mistralai/models_.py index e01e8c91..ec45eb36 100644 --- a/src/mistralai/models_.py +++ b/src/mistralai/models_.py @@ -18,7 +18,7 @@ def list( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> Optional[models.ModelList]: + ) -> models.ModelList: r"""List Models List all models available to the user. @@ -74,11 +74,16 @@ def list( data: Any = None if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, Optional[models.ModelList]) + return utils.unmarshal_json(http_res.text, models.ModelList) if utils.match_response(http_res, "422", "application/json"): data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) raise models.HTTPValidationError(data=data) - if utils.match_response(http_res, ["4XX", "5XX"], "*"): + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) raise models.SDKError( "API error occurred", http_res.status_code, http_res_text, http_res @@ -100,7 +105,7 @@ async def list_async( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> Optional[models.ModelList]: + ) -> models.ModelList: r"""List Models List all models available to the user. @@ -156,11 +161,16 @@ async def list_async( data: Any = None if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, Optional[models.ModelList]) + return utils.unmarshal_json(http_res.text, models.ModelList) if utils.match_response(http_res, "422", "application/json"): data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) raise models.HTTPValidationError(data=data) - if utils.match_response(http_res, ["4XX", "5XX"], "*"): + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( "API error occurred", http_res.status_code, http_res_text, http_res @@ -183,9 +193,7 @@ def retrieve( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> Optional[ - models.RetrieveModelV1ModelsModelIDGetResponseRetrieveModelV1ModelsModelIDGet - ]: + ) -> models.RetrieveModelV1ModelsModelIDGetResponseRetrieveModelV1ModelsModelIDGet: r"""Retrieve Model Retrieve a model information. @@ -249,14 +257,17 @@ def retrieve( if utils.match_response(http_res, "200", "application/json"): return utils.unmarshal_json( http_res.text, - Optional[ - models.RetrieveModelV1ModelsModelIDGetResponseRetrieveModelV1ModelsModelIDGet - ], + models.RetrieveModelV1ModelsModelIDGetResponseRetrieveModelV1ModelsModelIDGet, ) if utils.match_response(http_res, "422", "application/json"): data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) raise models.HTTPValidationError(data=data) - if utils.match_response(http_res, ["4XX", "5XX"], "*"): + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) raise models.SDKError( "API error occurred", http_res.status_code, http_res_text, http_res @@ -279,9 +290,7 @@ async def retrieve_async( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> Optional[ - models.RetrieveModelV1ModelsModelIDGetResponseRetrieveModelV1ModelsModelIDGet - ]: + ) -> models.RetrieveModelV1ModelsModelIDGetResponseRetrieveModelV1ModelsModelIDGet: r"""Retrieve Model Retrieve a model information. @@ -345,14 +354,17 @@ async def retrieve_async( if utils.match_response(http_res, "200", "application/json"): return utils.unmarshal_json( http_res.text, - Optional[ - models.RetrieveModelV1ModelsModelIDGetResponseRetrieveModelV1ModelsModelIDGet - ], + models.RetrieveModelV1ModelsModelIDGetResponseRetrieveModelV1ModelsModelIDGet, ) if utils.match_response(http_res, "422", "application/json"): data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) raise models.HTTPValidationError(data=data) - if utils.match_response(http_res, ["4XX", "5XX"], "*"): + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( "API error occurred", http_res.status_code, http_res_text, http_res @@ -375,7 +387,7 @@ def delete( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> Optional[models.DeleteModelOut]: + ) -> models.DeleteModelOut: r"""Delete Model Delete a fine-tuned model. @@ -437,11 +449,16 @@ def delete( data: Any = None if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, Optional[models.DeleteModelOut]) + return utils.unmarshal_json(http_res.text, models.DeleteModelOut) if utils.match_response(http_res, "422", "application/json"): data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) raise models.HTTPValidationError(data=data) - if utils.match_response(http_res, ["4XX", "5XX"], "*"): + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) raise models.SDKError( "API error occurred", http_res.status_code, http_res_text, http_res @@ -464,7 +481,7 @@ async def delete_async( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> Optional[models.DeleteModelOut]: + ) -> models.DeleteModelOut: r"""Delete Model Delete a fine-tuned model. @@ -526,11 +543,16 @@ async def delete_async( data: Any = None if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, Optional[models.DeleteModelOut]) + return utils.unmarshal_json(http_res.text, models.DeleteModelOut) if utils.match_response(http_res, "422", "application/json"): data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) raise models.HTTPValidationError(data=data) - if utils.match_response(http_res, ["4XX", "5XX"], "*"): + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( "API error occurred", http_res.status_code, http_res_text, http_res @@ -555,7 +577,7 @@ def update( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> Optional[models.FTModelOut]: + ) -> models.FTModelOut: r"""Update Fine Tuned Model Update a model name or description. @@ -625,8 +647,13 @@ def update( ) if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, Optional[models.FTModelOut]) - if utils.match_response(http_res, ["4XX", "5XX"], "*"): + return utils.unmarshal_json(http_res.text, models.FTModelOut) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) raise models.SDKError( "API error occurred", http_res.status_code, http_res_text, http_res @@ -651,7 +678,7 @@ async def update_async( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> Optional[models.FTModelOut]: + ) -> models.FTModelOut: r"""Update Fine Tuned Model Update a model name or description. @@ -721,8 +748,13 @@ async def update_async( ) if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, Optional[models.FTModelOut]) - if utils.match_response(http_res, ["4XX", "5XX"], "*"): + return utils.unmarshal_json(http_res.text, models.FTModelOut) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( "API error occurred", http_res.status_code, http_res_text, http_res @@ -745,7 +777,7 @@ def archive( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> Optional[models.ArchiveFTModelOut]: + ) -> models.ArchiveFTModelOut: r"""Archive Fine Tuned Model Archive a fine-tuned model. @@ -806,10 +838,13 @@ def archive( ) if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json( - http_res.text, Optional[models.ArchiveFTModelOut] + return utils.unmarshal_json(http_res.text, models.ArchiveFTModelOut) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res ) - if utils.match_response(http_res, ["4XX", "5XX"], "*"): + if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) raise models.SDKError( "API error occurred", http_res.status_code, http_res_text, http_res @@ -832,7 +867,7 @@ async def archive_async( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> Optional[models.ArchiveFTModelOut]: + ) -> models.ArchiveFTModelOut: r"""Archive Fine Tuned Model Archive a fine-tuned model. @@ -893,10 +928,13 @@ async def archive_async( ) if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json( - http_res.text, Optional[models.ArchiveFTModelOut] + return utils.unmarshal_json(http_res.text, models.ArchiveFTModelOut) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res ) - if utils.match_response(http_res, ["4XX", "5XX"], "*"): + if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( "API error occurred", http_res.status_code, http_res_text, http_res @@ -919,7 +957,7 @@ def unarchive( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> Optional[models.UnarchiveFTModelOut]: + ) -> models.UnarchiveFTModelOut: r"""Unarchive Fine Tuned Model Un-archive a fine-tuned model. @@ -980,10 +1018,13 @@ def unarchive( ) if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json( - http_res.text, Optional[models.UnarchiveFTModelOut] + return utils.unmarshal_json(http_res.text, models.UnarchiveFTModelOut) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res ) - if utils.match_response(http_res, ["4XX", "5XX"], "*"): + if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) raise models.SDKError( "API error occurred", http_res.status_code, http_res_text, http_res @@ -1006,7 +1047,7 @@ async def unarchive_async( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> Optional[models.UnarchiveFTModelOut]: + ) -> models.UnarchiveFTModelOut: r"""Unarchive Fine Tuned Model Un-archive a fine-tuned model. @@ -1067,10 +1108,13 @@ async def unarchive_async( ) if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json( - http_res.text, Optional[models.UnarchiveFTModelOut] + return utils.unmarshal_json(http_res.text, models.UnarchiveFTModelOut) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res ) - if utils.match_response(http_res, ["4XX", "5XX"], "*"): + if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( "API error occurred", http_res.status_code, http_res_text, http_res diff --git a/src/mistralai/sdk.py b/src/mistralai/sdk.py index 7778560e..e0ff7acd 100644 --- a/src/mistralai/sdk.py +++ b/src/mistralai/sdk.py @@ -83,7 +83,8 @@ def __init__( security: Any = None if callable(api_key): - security = lambda: models.Security(api_key=api_key()) # pylint: disable=unnecessary-lambda-assignment + # pylint: disable=unnecessary-lambda-assignment + security = lambda: models.Security(api_key=api_key()) else: security = models.Security(api_key=api_key) diff --git a/src/mistralai/sdkconfiguration.py b/src/mistralai/sdkconfiguration.py index 8a991703..2ccbcbe1 100644 --- a/src/mistralai/sdkconfiguration.py +++ b/src/mistralai/sdkconfiguration.py @@ -1,6 +1,12 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from ._hooks import SDKHooks +from ._version import ( + __gen_version__, + __openapi_doc_version__, + __user_agent__, + __version__, +) from .httpclient import AsyncHttpClient, HttpClient from .utils import Logger, RetryConfig, remove_suffix from dataclasses import dataclass @@ -27,10 +33,10 @@ class SDKConfiguration: server_url: Optional[str] = "" server: Optional[str] = "" language: str = "python" - openapi_doc_version: str = "0.0.2" - sdk_version: str = "1.3.1" - gen_version: str = "2.486.1" - user_agent: str = "speakeasy-sdk/python 1.3.1 2.486.1 0.0.2 mistralai" + openapi_doc_version: str = __openapi_doc_version__ + sdk_version: str = __version__ + gen_version: str = __gen_version__ + user_agent: str = __user_agent__ retry_config: OptionalNullable[RetryConfig] = Field(default_factory=lambda: UNSET) timeout_ms: Optional[int] = None From 77c722095f1fa1bef6bf2fef1177278895d73277 Mon Sep 17 00:00:00 2001 From: Lucas <63519673+WikiLucas00@users.noreply.github.com> Date: Tue, 21 Jan 2025 15:03:28 +0100 Subject: [PATCH 103/223] Fix typo in MIGRATION.md (#152) --- MIGRATION.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/MIGRATION.md b/MIGRATION.md index 602146c5..7ccdf9c0 100644 --- a/MIGRATION.md +++ b/MIGRATION.md @@ -28,7 +28,7 @@ We have made significant changes to the `mistralai` library to improve its usabi | `client.chat` | `client.chat.complete` | | `client.chat_stream` | `client.chat.stream` | | `client.completions` | `client.fim.complete` | -| `client.completions_strem` | `client.fim.stream` | +| `client.completions_stream`| `client.fim.stream` | | `client.embeddings` | `client.embeddings.create` | | `client.list_models` | `client.models.list` | | `client.delete_model` | `client.models.delete` | @@ -49,7 +49,7 @@ We have made significant changes to the `mistralai` library to improve its usabi | `async_client.chat` | `client.chat.complete_async` | | `async_client.chat_stream` | `client.chat.stream_async` | | `async_client.completions` | `client.fim.complete_async` | -| `async_client.completions_strem` | `client.fim.stream_async` | +| `async_client.completions_stream`| `client.fim.stream_async` | | `async_client.embeddings` | `client.embeddings.create_async` | | `async_client.list_models` | `client.models.list_async` | | `async_client.delete_model` | `client.models.delete_async` | From bcc774830f8da226567054fc71ade4dd6cd5ebdd Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Tue, 28 Jan 2025 16:48:47 +0100 Subject: [PATCH 104/223] =?UTF-8?q?chore:=20=F0=9F=90=9D=20Update=20SDK=20?= =?UTF-8?q?-=20Generate=20MISTRALAI=20MISTRALAI-SDK=201.5.0=20(#184)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * ci: regenerated with OpenAPI Doc , Speakeasy CLI 1.476.2 * [Feature] Structured Outputs * Add --no-root to CI's poetry install * Different CI fix * Fix CI * Rename schema_ to schema_definition * Set version to 1.5.0 --------- Co-authored-by: speakeasybot Co-authored-by: Alexandre Menasria --- .genignore | 3 +- .github/workflows/lint_custom_code.yaml | 32 ++++ .github/workflows/test_custom_code.yaml | 48 ++++++ .speakeasy/gen.lock | 18 +- .speakeasy/gen.yaml | 4 +- .speakeasy/workflow.lock | 13 +- .vscode/settings.json | 6 - README.md | 32 ++++ RELEASES.md | 12 +- docs/models/jsonschema.md | 11 ++ docs/models/responseformat.md | 3 +- docs/models/responseformats.md | 3 +- examples/async_agents_no_streaming.py | 3 +- .../async_chat_with_image_no_streaming.py | 3 +- examples/async_structured_outputs.py | 33 ++++ examples/chat_with_streaming.py | 4 +- examples/function_calling.py | 2 + examples/structured_outputs.py | 45 +++++ pyproject.toml | 2 +- src/mistralai/_version.py | 6 +- src/mistralai/chat.py | 82 +++++++++ src/mistralai/extra/README.md | 56 ++++++ src/mistralai/extra/__init__.py | 5 + src/mistralai/extra/struct_chat.py | 41 +++++ src/mistralai/extra/tests/__init__.py | 0 src/mistralai/extra/tests/test_struct_chat.py | 103 +++++++++++ src/mistralai/extra/tests/test_utils.py | 162 ++++++++++++++++++ src/mistralai/extra/utils/__init__.py | 3 + src/mistralai/extra/utils/_pydantic_helper.py | 20 +++ src/mistralai/extra/utils/response_format.py | 24 +++ src/mistralai/httpclient.py | 50 ++++++ src/mistralai/models/__init__.py | 3 + src/mistralai/models/jsonschema.py | 55 ++++++ src/mistralai/models/responseformat.py | 37 +++- src/mistralai/models/responseformats.py | 2 +- src/mistralai/sdk.py | 13 +- 36 files changed, 901 insertions(+), 38 deletions(-) create mode 100644 .github/workflows/lint_custom_code.yaml create mode 100644 .github/workflows/test_custom_code.yaml delete mode 100644 .vscode/settings.json create mode 100644 docs/models/jsonschema.md create mode 100644 examples/async_structured_outputs.py create mode 100644 examples/structured_outputs.py create mode 100644 src/mistralai/extra/README.md create mode 100644 src/mistralai/extra/__init__.py create mode 100644 src/mistralai/extra/struct_chat.py create mode 100644 src/mistralai/extra/tests/__init__.py create mode 100644 src/mistralai/extra/tests/test_struct_chat.py create mode 100644 src/mistralai/extra/tests/test_utils.py create mode 100644 src/mistralai/extra/utils/__init__.py create mode 100644 src/mistralai/extra/utils/_pydantic_helper.py create mode 100644 src/mistralai/extra/utils/response_format.py create mode 100644 src/mistralai/models/jsonschema.py diff --git a/.genignore b/.genignore index 1186de66..1ded5670 100644 --- a/.genignore +++ b/.genignore @@ -1,2 +1,3 @@ pyproject.toml -examples/* \ No newline at end of file +examples/* +src/mistral/extra/* diff --git a/.github/workflows/lint_custom_code.yaml b/.github/workflows/lint_custom_code.yaml new file mode 100644 index 00000000..f9289188 --- /dev/null +++ b/.github/workflows/lint_custom_code.yaml @@ -0,0 +1,32 @@ +name: Linting Python custom files + +on: + push: + branches: + - main + pull_request: + branches: + - main + +jobs: + lint: + runs-on: ubuntu-latest + + steps: + - name: Checkout code + uses: actions/checkout@v3 + + - name: Set up Python + uses: actions/setup-python@v4 + with: + python-version: '3.12' + + - name: Install ruff + run: pip install ruff + + - name: Lint with ruff + # No need to lint the automatically generated Speakeasy code + run: | + ruff check examples/ + ruff check src/mistralai/_hooks/ --exclude __init__.py --exclude sdkhooks.py --exclude types.py + ruff check src/mistralai/extra/ diff --git a/.github/workflows/test_custom_code.yaml b/.github/workflows/test_custom_code.yaml new file mode 100644 index 00000000..230066cb --- /dev/null +++ b/.github/workflows/test_custom_code.yaml @@ -0,0 +1,48 @@ +name: Testing Python custom files + +on: + push: + branches: + - main + pull_request: + branches: + - main + +jobs: + lint: + runs-on: ubuntu-latest + + steps: + - name: Checkout code + uses: actions/checkout@v3 + + - name: Set up Python + id: setup-python + uses: actions/setup-python@v4 + with: + python-version: '3.12' + + - name: Install Poetry + uses: snok/install-poetry@76e04a911780d5b312d89783f7b1cd627778900a # v1.4.1 + with: + virtualenvs-create: true + virtualenvs-in-project: true + virtualenvs-path: .venv + installer-parallel: true + + - name: Load cached venv + id: cached-poetry-dependencies + uses: actions/cache@v4 + with: + path: .venv + key: venv-${{ runner.os }}-${{ steps.setup-python.outputs.python-version }}-${{ hashFiles('**/poetry.lock') }} + + - name: Install dependencies + # Install dependencies if cache does not exist + if: steps.cached-poetry-dependencies.outputs.cache-hit != 'true' + run: poetry install --no-interaction --no-root + + - name: Run the 'src/mistralai/extra' package unit tests + run: | + source .venv/bin/activate + python3.12 -m unittest discover -s src/mistralai/extra/tests -t src diff --git a/.speakeasy/gen.lock b/.speakeasy/gen.lock index 722fd297..ac3e0111 100644 --- a/.speakeasy/gen.lock +++ b/.speakeasy/gen.lock @@ -1,12 +1,12 @@ lockVersion: 2.0.0 id: 2d045ec7-2ebb-4f4d-ad25-40953b132161 management: - docChecksum: dbfa566129ede53f4e3b2c91e81f6f74 + docChecksum: 553c31591e8dc33a58cb75f348c3aa72 docVersion: 0.0.2 - speakeasyVersion: 1.469.11 - generationVersion: 2.493.32 - releaseVersion: 1.4.0 - configChecksum: 46cde4e28fb5efba97051b54ac2e1c83 + speakeasyVersion: 1.477.0 + generationVersion: 2.497.0 + releaseVersion: 1.5.0 + configChecksum: 9a5649c5c372dc5fd2fde38a0faee40e repoURL: https://github.com/mistralai/client-python.git installationURL: https://github.com/mistralai/client-python.git published: true @@ -14,7 +14,8 @@ features: python: additionalDependencies: 1.0.0 constsAndDefaults: 1.0.5 - core: 5.10.4 + core: 5.10.5 + customCodeRegions: 0.1.1 defaultEnabledRetries: 0.2.0 downloadStreams: 1.0.1 enumUnions: 0.1.0 @@ -155,6 +156,7 @@ generatedFiles: - docs/models/jobsapiroutesfinetuningupdatefinetunedmodelrequest.md - docs/models/jobsout.md - docs/models/jobsoutobject.md + - docs/models/jsonschema.md - docs/models/legacyjobmetadataout.md - docs/models/legacyjobmetadataoutobject.md - docs/models/listfilesout.md @@ -319,6 +321,7 @@ generatedFiles: - src/mistralai/models/jobs_api_routes_fine_tuning_unarchive_fine_tuned_modelop.py - src/mistralai/models/jobs_api_routes_fine_tuning_update_fine_tuned_modelop.py - src/mistralai/models/jobsout.py + - src/mistralai/models/jsonschema.py - src/mistralai/models/legacyjobmetadataout.py - src/mistralai/models/listfilesout.py - src/mistralai/models/metricout.py @@ -566,6 +569,7 @@ examples: responses: "422": application/json: {} + "200": {} fim_completion_v1_fim_completions_post: speakeasy-default-fim-completion-v1-fim-completions-post: requestBody: @@ -582,6 +586,7 @@ examples: responses: "422": application/json: {} + "200": {} agents_completion_v1_agents_completions_post: speakeasy-default-agents-completion-v1-agents-completions-post: requestBody: @@ -598,6 +603,7 @@ examples: responses: "422": application/json: {} + "200": {} embeddings_v1_embeddings_post: speakeasy-default-embeddings-v1-embeddings-post: requestBody: diff --git a/.speakeasy/gen.yaml b/.speakeasy/gen.yaml index 30a10834..069ce07d 100644 --- a/.speakeasy/gen.yaml +++ b/.speakeasy/gen.yaml @@ -13,7 +13,7 @@ generation: oAuth2ClientCredentialsEnabled: true oAuth2PasswordEnabled: false python: - version: 1.4.0 + version: 1.5.0 additionalDependencies: dev: pytest: ^8.2.2 @@ -23,7 +23,7 @@ python: clientServerStatusCodesAsErrors: true defaultErrorName: SDKError description: Python Client SDK for the Mistral AI API. - enableCustomCodeRegions: false + enableCustomCodeRegions: true enumFormat: union envVarPrefix: MISTRAL fixFlags: diff --git a/.speakeasy/workflow.lock b/.speakeasy/workflow.lock index 36a74525..ea74f7d9 100644 --- a/.speakeasy/workflow.lock +++ b/.speakeasy/workflow.lock @@ -1,4 +1,4 @@ -speakeasyVersion: 1.469.11 +speakeasyVersion: 1.477.0 sources: mistral-azure-source: sourceNamespace: mistral-azure-source @@ -14,11 +14,10 @@ sources: - latest mistral-openapi: sourceNamespace: mistral-openapi - sourceRevisionDigest: sha256:c414dd5eecca5f02fe9012a1d131f696e0257fe100c371609272dbc6c522ef07 - sourceBlobDigest: sha256:f48af039106d00de84345fd095fbf4831f18fbeeef07e9ff7bba70a0e07eda07 + sourceRevisionDigest: sha256:af4a2854e017abc0ec9e4b557186611dcd69468d82d5ac7f81bfbe49165fc18d + sourceBlobDigest: sha256:9f1bbc418fba3c7b5031bacdf9d431aff476fb4b2aa3838ed50fb3922563703c tags: - latest - - speakeasy-sdk-regen-1737393201 targets: mistralai-azure-sdk: source: mistral-azure-source @@ -37,10 +36,10 @@ targets: mistralai-sdk: source: mistral-openapi sourceNamespace: mistral-openapi - sourceRevisionDigest: sha256:c414dd5eecca5f02fe9012a1d131f696e0257fe100c371609272dbc6c522ef07 - sourceBlobDigest: sha256:f48af039106d00de84345fd095fbf4831f18fbeeef07e9ff7bba70a0e07eda07 + sourceRevisionDigest: sha256:af4a2854e017abc0ec9e4b557186611dcd69468d82d5ac7f81bfbe49165fc18d + sourceBlobDigest: sha256:9f1bbc418fba3c7b5031bacdf9d431aff476fb4b2aa3838ed50fb3922563703c codeSamplesNamespace: mistral-openapi-code-samples - codeSamplesRevisionDigest: sha256:3f61d33c46733b24ecd422423900425b381529da038992e59bdb5a9b766bdf89 + codeSamplesRevisionDigest: sha256:cbf9b277d16c47816fc5d63b4c69cf0fbd1fe99d424c34ab465d2b61fcc6e5e8 workflow: workflowVersion: 1.0.0 speakeasyVersion: latest diff --git a/.vscode/settings.json b/.vscode/settings.json deleted file mode 100644 index 8d79f0ab..00000000 --- a/.vscode/settings.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "python.testing.pytestArgs": ["tests", "-vv"], - "python.testing.unittestEnabled": false, - "python.testing.pytestEnabled": true, - "pylint.args": ["--rcfile=pylintrc"] -} diff --git a/README.md b/README.md index dc492590..617c6071 100644 --- a/README.md +++ b/README.md @@ -42,6 +42,7 @@ Mistral AI API: Our Chat Completion and Embeddings APIs specification. Create yo * [Server Selection](#server-selection) * [Custom HTTP Client](#custom-http-client) * [Authentication](#authentication) + * [Resource Management](#resource-management) * [Debugging](#debugging) * [IDE Support](#ide-support) * [Development](#development) @@ -52,6 +53,11 @@ Mistral AI API: Our Chat Completion and Embeddings APIs specification. Create yo ## SDK Installation +> [!NOTE] +> **Python version upgrade policy** +> +> Once a Python version reaches its [official end of life date](https://devguide.python.org/versions/), a 3-month grace period is provided for users to upgrade. Following this grace period, the minimum python version supported in the SDK will be updated. + The SDK can be installed with either *pip* or *poetry* package managers. ### PIP @@ -754,6 +760,32 @@ with Mistral( ``` + +## Resource Management + +The `Mistral` class implements the context manager protocol and registers a finalizer function to close the underlying sync and async HTTPX clients it uses under the hood. This will close HTTP connections, release memory and free up other resources held by the SDK. In short-lived Python programs and notebooks that make a few SDK method calls, resource management may not be a concern. However, in longer-lived programs, it is beneficial to create a single SDK instance via a [context manager][context-manager] and reuse it across the application. + +[context-manager]: https://docs.python.org/3/reference/datamodel.html#context-managers + +```python +from mistralai import Mistral +import os +def main(): + with Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), + ) as mistral: + # Rest of application here... + + +# Or when using async: +async def amain(): + async with Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), + ) as mistral: + # Rest of application here... +``` + + ## Debugging diff --git a/RELEASES.md b/RELEASES.md index f441230f..cc8c6c29 100644 --- a/RELEASES.md +++ b/RELEASES.md @@ -148,4 +148,14 @@ Based on: ### Generated - [python v1.4.0] . ### Releases -- [PyPI v1.4.0] https://pypi.org/project/mistralai/1.4.0 - . \ No newline at end of file +- [PyPI v1.4.0] https://pypi.org/project/mistralai/1.4.0 - . + +## 2025-01-27 13:57:39 +### Changes +Based on: +- OpenAPI Doc +- Speakeasy CLI 1.476.2 (2.495.1) https://github.com/speakeasy-api/speakeasy +### Generated +- [python v1.5.0] . +### Releases +- [PyPI v1.5.0] https://pypi.org/project/mistralai/1.5.0 - . \ No newline at end of file diff --git a/docs/models/jsonschema.md b/docs/models/jsonschema.md new file mode 100644 index 00000000..ae387867 --- /dev/null +++ b/docs/models/jsonschema.md @@ -0,0 +1,11 @@ +# JSONSchema + + +## Fields + +| Field | Type | Required | Description | +| ----------------------- | ----------------------- | ----------------------- | ----------------------- | +| `name` | *str* | :heavy_check_mark: | N/A | +| `schema_definition` | Dict[str, *Any*] | :heavy_check_mark: | N/A | +| `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `strict` | *Optional[bool]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/responseformat.md b/docs/models/responseformat.md index 9c627f55..23a1641b 100644 --- a/docs/models/responseformat.md +++ b/docs/models/responseformat.md @@ -5,4 +5,5 @@ | Field | Type | Required | Description | | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `type` | [Optional[models.ResponseFormats]](../models/responseformats.md) | :heavy_minus_sign: | An object specifying the format that the model must output. Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. | \ No newline at end of file +| `type` | [Optional[models.ResponseFormats]](../models/responseformats.md) | :heavy_minus_sign: | An object specifying the format that the model must output. Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. | +| `json_schema` | [OptionalNullable[models.JSONSchema]](../models/jsonschema.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/responseformats.md b/docs/models/responseformats.md index ce35fbb3..06886afe 100644 --- a/docs/models/responseformats.md +++ b/docs/models/responseformats.md @@ -8,4 +8,5 @@ An object specifying the format that the model must output. Setting to `{ "type" | Name | Value | | ------------- | ------------- | | `TEXT` | text | -| `JSON_OBJECT` | json_object | \ No newline at end of file +| `JSON_OBJECT` | json_object | +| `JSON_SCHEMA` | json_schema | \ No newline at end of file diff --git a/examples/async_agents_no_streaming.py b/examples/async_agents_no_streaming.py index 799333b4..45f300ac 100755 --- a/examples/async_agents_no_streaming.py +++ b/examples/async_agents_no_streaming.py @@ -9,11 +9,12 @@ async def main(): api_key = os.environ["MISTRAL_API_KEY"] + agent_id = os.environ["MISTRAL_AGENT_ID"] client = Mistral(api_key=api_key) chat_response = await client.agents.complete_async( - agent_id="", + agent_id=agent_id, messages=[UserMessage(content="What is the best French cheese?")], ) diff --git a/examples/async_chat_with_image_no_streaming.py b/examples/async_chat_with_image_no_streaming.py index 22f9adc6..7e415305 100755 --- a/examples/async_chat_with_image_no_streaming.py +++ b/examples/async_chat_with_image_no_streaming.py @@ -3,10 +3,9 @@ import asyncio import os -import httpx from mistralai import Mistral -from mistralai.models import ImageURLChunk, TextChunk, UserMessage +from mistralai.models import UserMessage async def main(): diff --git a/examples/async_structured_outputs.py b/examples/async_structured_outputs.py new file mode 100644 index 00000000..4fafc991 --- /dev/null +++ b/examples/async_structured_outputs.py @@ -0,0 +1,33 @@ +#!/usr/bin/env python + +import asyncio +import os +from pydantic import BaseModel + +from mistralai import Mistral + +async def main(): + + api_key = os.environ["MISTRAL_API_KEY"] + client = Mistral(api_key=api_key) + + class Explanation(BaseModel): + explanation: str + output: str + + class MathDemonstration(BaseModel): + steps: list[Explanation] + final_answer: str + + chat_response = await client.chat.parse_async( + model="mistral-large-2411", + messages=[ + {"role": "system", "content": "You are a helpful math tutor. You will be provided with a math problem, and your goal will be to output a step by step solution, along with a final answer. For each step, just provide the output as an equation use the explanation field to detail the reasoning."}, + {"role": "user", "content": "How can I solve 8x + 7 = -23"}, + ], + response_format = MathDemonstration + ) + print(chat_response.choices[0].message.parsed) + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/examples/chat_with_streaming.py b/examples/chat_with_streaming.py index 5fc75038..19d48a15 100755 --- a/examples/chat_with_streaming.py +++ b/examples/chat_with_streaming.py @@ -8,7 +8,7 @@ def main(): api_key = os.environ["MISTRAL_API_KEY"] - model = "mistral-tiny" + model = "mistral-large-latest" client = Mistral(api_key=api_key) @@ -17,7 +17,7 @@ def main(): messages=[UserMessage(content="What is the best French cheese?")], ): - print(chunk.data.choices[0].delta.content) + print(chunk.data.choices[0].delta.content, end="") if __name__ == "__main__": diff --git a/examples/function_calling.py b/examples/function_calling.py index 766a825b..e7eba594 100644 --- a/examples/function_calling.py +++ b/examples/function_calling.py @@ -117,6 +117,8 @@ def retrieve_payment_date(data: Dict[str, List], transaction_id: str) -> str: tool_call_id=tool_call.id, ) ) +print(messages) + response = client.chat.complete(model=model, messages=messages, tools=tools) print(f"{response.choices[0].message.content}") diff --git a/examples/structured_outputs.py b/examples/structured_outputs.py new file mode 100644 index 00000000..15dc1bff --- /dev/null +++ b/examples/structured_outputs.py @@ -0,0 +1,45 @@ +#!/usr/bin/env python + +import os +from pydantic import BaseModel + +from mistralai import Mistral + +def main(): + api_key = os.environ["MISTRAL_API_KEY"] + client = Mistral(api_key=api_key) + + class Explanation(BaseModel): + explanation: str + output: str + + class MathDemonstration(BaseModel): + steps: list[Explanation] + final_answer: str + + print("Using the .parse method to parse the response into a Pydantic model:\n") + chat_response = client.chat.parse( + model="mistral-large-latest", + messages=[ + {"role": "system", "content": "You are a helpful math tutor. You will be provided with a math problem, and your goal will be to output a step by step solution, along with a final answer. For each step, just provide the output as an equation use the explanation field to detail the reasoning."}, + {"role": "user", "content": "How can I solve 8x + 7 = -23"}, + ], + response_format = MathDemonstration + ) + print(chat_response.choices[0].message.parsed) + + # Or with the streaming API + print("\nUsing the .parse_stream method to stream back the response into a JSON Schema:\n") + with client.chat.parse_stream( + model="mistral-large-latest", + messages=[ + {"role": "system", "content": "You are a helpful math tutor. You will be provided with a math problem, and your goal will be to output a step by step solution, along with a final answer. For each step, just provide the output as an equation use the explanation field to detail the reasoning."}, + {"role": "user", "content": "How can I solve 8x + 7 = -23"}, + ], + response_format=MathDemonstration + ) as stream: + for chunk in stream: + print(chunk.data.choices[0].delta.content, end="") + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/pyproject.toml b/pyproject.toml index 79ebece2..42f36f14 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "mistralai" -version = "1.4.0" +version = "1.5.0" description = "Python Client SDK for the Mistral AI API." authors = ["Mistral"] readme = "README-PYPI.md" diff --git a/src/mistralai/_version.py b/src/mistralai/_version.py index b39db764..7f36cf10 100644 --- a/src/mistralai/_version.py +++ b/src/mistralai/_version.py @@ -3,10 +3,10 @@ import importlib.metadata __title__: str = "mistralai" -__version__: str = "1.4.0" +__version__: str = "1.5.0" __openapi_doc_version__: str = "0.0.2" -__gen_version__: str = "2.493.32" -__user_agent__: str = "speakeasy-sdk/python 1.4.0 2.493.32 0.0.2 mistralai" +__gen_version__: str = "2.497.0" +__user_agent__: str = "speakeasy-sdk/python 1.5.0 2.497.0 0.0.2 mistralai" try: if __package__ is not None: diff --git a/src/mistralai/chat.py b/src/mistralai/chat.py index 547cc043..55ad60a9 100644 --- a/src/mistralai/chat.py +++ b/src/mistralai/chat.py @@ -7,10 +7,92 @@ from mistralai.utils import eventstreaming, get_security_from_env from typing import Any, List, Mapping, Optional, Union +# region imports +from typing import Type +from mistralai.extra import ( + convert_to_parsed_chat_completion_response, + response_format_from_pydantic_model, + CustomPydanticModel, + ParsedChatCompletionResponse, +) +# endregion imports + class Chat(BaseSDK): r"""Chat Completion API.""" + # region sdk-class-body + # Custom .parse methods for the Structure Outputs Feature. + + def parse( + self, response_format: Type[CustomPydanticModel], **kwargs: Any + ) -> ParsedChatCompletionResponse[CustomPydanticModel]: + """ + Parse the response using the provided response format. + :param Type[CustomPydanticModel] response_format: The Pydantic model to parse the response into + :param Any **kwargs Additional keyword arguments to pass to the .complete method + :return: The parsed response + """ + # Convert the input Pydantic Model to a strict JSON ready to be passed to chat.complete + json_response_format = response_format_from_pydantic_model(response_format) + # Run the inference + response = self.complete(**kwargs, response_format=json_response_format) + # Parse response back to the input pydantic model + parsed_response = convert_to_parsed_chat_completion_response( + response, response_format + ) + return parsed_response + + async def parse_async( + self, response_format: Type[CustomPydanticModel], **kwargs + ) -> ParsedChatCompletionResponse[CustomPydanticModel]: + """ + Asynchronously parse the response using the provided response format. + :param Type[CustomPydanticModel] response_format: The Pydantic model to parse the response into + :param Any **kwargs Additional keyword arguments to pass to the .complete method + :return: The parsed response + """ + json_response_format = response_format_from_pydantic_model(response_format) + response = await self.complete_async( # pylint: disable=E1125 + **kwargs, response_format=json_response_format + ) + parsed_response = convert_to_parsed_chat_completion_response( + response, response_format + ) + return parsed_response + + def parse_stream( + self, response_format: Type[CustomPydanticModel], **kwargs + ) -> eventstreaming.EventStream[models.CompletionEvent]: + """ + Parse the response using the provided response format. + For now the response will be in JSON format not in the input Pydantic model. + :param Type[CustomPydanticModel] response_format: The Pydantic model to parse the response into + :param Any **kwargs Additional keyword arguments to pass to the .stream method + :return: The JSON parsed response + """ + json_response_format = response_format_from_pydantic_model(response_format) + response = self.stream(**kwargs, response_format=json_response_format) + return response + + async def parse_stream_async( + self, response_format: Type[CustomPydanticModel], **kwargs + ) -> eventstreaming.EventStreamAsync[models.CompletionEvent]: + """ + Asynchronously parse the response using the provided response format. + For now the response will be in JSON format not in the input Pydantic model. + :param Type[CustomPydanticModel] response_format: The Pydantic model to parse the response into + :param Any **kwargs Additional keyword arguments to pass to the .stream method + :return: The JSON parsed response + """ + json_response_format = response_format_from_pydantic_model(response_format) + response = await self.stream_async( # pylint: disable=E1125 + **kwargs, response_format=json_response_format + ) + return response + + # endregion sdk-class-body + def complete( self, *, diff --git a/src/mistralai/extra/README.md b/src/mistralai/extra/README.md new file mode 100644 index 00000000..dfce43b3 --- /dev/null +++ b/src/mistralai/extra/README.md @@ -0,0 +1,56 @@ +## Context + +The extra package contains the custom logic which is too complex to be generated by Speakeasy from the OpenAPI specs. It was introduced to add the Structured Outputs feature. + +## Development / Contributing + +To add custom code in the SDK, you need to use [Speakeasy custom code regions](https://www.speakeasy.com/docs/customize/code/code-regions/overview) as below. + +### Runbook of SDK customization + +1. Add the code you want to import in the `src/mistralai/extra/` package. To have it importable from the SDK, you need to add it in the `__init__.py` file: +```python +from .my_custom_file import my_custom_function + +__all__ = ["my_custom_function"] +``` + +2. Add a new custom code region in the SDK files, e.g in `src/mistralai/chat.py`: +```python +# region imports +from typing import Type +from mistralai.extra import my_custom_function +# endregion imports + +class Chat(BaseSDK): + r"""Chat Completion API.""" + + # region sdk-class-body + def my_custom_method(self, param: str) -> Type[some_type]: + output = my_custom_function(param1) + return output + # endregion sdk-class-body +``` + +3. Now build the SDK with the custom code: +```bash +rm -rf dist; poetry build; python3 -m pip install ~/client-python/dist/mistralai-1.4.1-py3-none-any.whl --force-reinstall +``` + +4. And now you should be able to call the custom method: +```python +import os +from mistralai import Mistral + +api_key = os.environ["MISTRAL_API_KEY"] +client = Mistral(api_key=api_key) + +client.chat.my_custom_method(param="test") +``` + +### Run the unit tests + +To run the unit tests for the `extra` package, you can run the following command from the root of the repository: +```bash +python3.12 -m unittest discover -s src/mistralai/extra/tests -t src +``` diff --git a/src/mistralai/extra/__init__.py b/src/mistralai/extra/__init__.py new file mode 100644 index 00000000..d8f7a21a --- /dev/null +++ b/src/mistralai/extra/__init__.py @@ -0,0 +1,5 @@ +from .struct_chat import ParsedChatCompletionResponse, convert_to_parsed_chat_completion_response +from .utils import response_format_from_pydantic_model +from .utils.response_format import CustomPydanticModel + +__all__ = ["convert_to_parsed_chat_completion_response", "response_format_from_pydantic_model", "CustomPydanticModel", "ParsedChatCompletionResponse"] diff --git a/src/mistralai/extra/struct_chat.py b/src/mistralai/extra/struct_chat.py new file mode 100644 index 00000000..364b450f --- /dev/null +++ b/src/mistralai/extra/struct_chat.py @@ -0,0 +1,41 @@ +from ..models import ChatCompletionResponse, ChatCompletionChoice, AssistantMessage +from .utils.response_format import CustomPydanticModel, pydantic_model_from_json +from typing import List, Optional, Type, Generic +from pydantic import BaseModel +import json + +class ParsedAssistantMessage(AssistantMessage, Generic[CustomPydanticModel]): + parsed: Optional[CustomPydanticModel] + +class ParsedChatCompletionChoice(ChatCompletionChoice, Generic[CustomPydanticModel]): + message: Optional[ParsedAssistantMessage[CustomPydanticModel]] # type: ignore + +class ParsedChatCompletionResponse(ChatCompletionResponse, Generic[CustomPydanticModel]): + choices: Optional[List[ParsedChatCompletionChoice[CustomPydanticModel]]] # type: ignore + +def convert_to_parsed_chat_completion_response(response: ChatCompletionResponse, response_format: Type[BaseModel]) -> ParsedChatCompletionResponse: + parsed_choices = [] + + if response.choices: + for choice in response.choices: + if choice.message: + parsed_message: ParsedAssistantMessage = ParsedAssistantMessage( + **choice.message.model_dump(), + parsed=None + ) + if isinstance(parsed_message.content, str): + parsed_message.parsed = pydantic_model_from_json(json.loads(parsed_message.content), response_format) + elif parsed_message.content is None: + parsed_message.parsed = None + else: + raise TypeError(f"Unexpected type for message.content: {type(parsed_message.content)}") + choice_dict = choice.model_dump() + choice_dict["message"] = parsed_message + parsed_choice: ParsedChatCompletionChoice = ParsedChatCompletionChoice(**choice_dict) + parsed_choices.append(parsed_choice) + else: + parsed_choice = ParsedChatCompletionChoice(**choice.model_dump()) + parsed_choices.append(parsed_choice) + response_dict = response.model_dump() + response_dict["choices"] = parsed_choices + return ParsedChatCompletionResponse(**response_dict) diff --git a/src/mistralai/extra/tests/__init__.py b/src/mistralai/extra/tests/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/src/mistralai/extra/tests/test_struct_chat.py b/src/mistralai/extra/tests/test_struct_chat.py new file mode 100644 index 00000000..fd93575e --- /dev/null +++ b/src/mistralai/extra/tests/test_struct_chat.py @@ -0,0 +1,103 @@ +import unittest +from ..struct_chat import ( + convert_to_parsed_chat_completion_response, + ParsedChatCompletionResponse, + ParsedChatCompletionChoice, + ParsedAssistantMessage, +) +from ...models import ( + ChatCompletionResponse, + UsageInfo, + ChatCompletionChoice, + AssistantMessage, +) +from pydantic import BaseModel + + +class Explanation(BaseModel): + explanation: str + output: str + + +class MathDemonstration(BaseModel): + steps: list[Explanation] + final_answer: str + + +mock_cc_response = ChatCompletionResponse( + id="c0271b2098954c6094231703875ca0bc", + object="chat.completion", + model="mistral-large-latest", + usage=UsageInfo(prompt_tokens=75, completion_tokens=220, total_tokens=295), + created=1737727558, + choices=[ + ChatCompletionChoice( + index=0, + message=AssistantMessage( + content='{\n "final_answer": "x = -4",\n "steps": [\n {\n "explanation": "Start with the given equation.",\n "output": "8x + 7 = -23"\n },\n {\n "explanation": "Subtract 7 from both sides to isolate the term with x.",\n "output": "8x = -23 - 7"\n },\n {\n "explanation": "Simplify the right side of the equation.",\n "output": "8x = -30"\n },\n {\n "explanation": "Divide both sides by 8 to solve for x.",\n "output": "x = -30 / 8"\n },\n {\n "explanation": "Simplify the fraction to get the final answer.",\n "output": "x = -4"\n }\n ]\n}', + tool_calls=None, + prefix=False, + role="assistant", + ), + finish_reason="stop", + ) + ], +) + + +expected_response = ParsedChatCompletionResponse( + choices=[ + ParsedChatCompletionChoice( + index=0, + message=ParsedAssistantMessage( + content='{\n "final_answer": "x = -4",\n "steps": [\n {\n "explanation": "Start with the given equation.",\n "output": "8x + 7 = -23"\n },\n {\n "explanation": "Subtract 7 from both sides to isolate the term with x.",\n "output": "8x = -23 - 7"\n },\n {\n "explanation": "Simplify the right side of the equation.",\n "output": "8x = -30"\n },\n {\n "explanation": "Divide both sides by 8 to solve for x.",\n "output": "x = -30 / 8"\n },\n {\n "explanation": "Simplify the fraction to get the final answer.",\n "output": "x = -4"\n }\n ]\n}', + tool_calls=None, + prefix=False, + role="assistant", + parsed=MathDemonstration( + steps=[ + Explanation( + explanation="Start with the given equation.", + output="8x + 7 = -23", + ), + Explanation( + explanation="Subtract 7 from both sides to isolate the term with x.", + output="8x = -23 - 7", + ), + Explanation( + explanation="Simplify the right side of the equation.", + output="8x = -30", + ), + Explanation( + explanation="Divide both sides by 8 to solve for x.", + output="x = -30 / 8", + ), + Explanation( + explanation="Simplify the fraction to get the final answer.", + output="x = -4", + ), + ], + final_answer="x = -4", + ), + ), + finish_reason="stop", + ) + ], + created=1737727558, + id="c0271b2098954c6094231703875ca0bc", + model="mistral-large-latest", + object="chat.completion", + usage=UsageInfo(prompt_tokens=75, completion_tokens=220, total_tokens=295), +) + + +class TestConvertToParsedChatCompletionResponse(unittest.TestCase): + def test_convert_to_parsed_chat_completion_response(self): + output = convert_to_parsed_chat_completion_response( + mock_cc_response, MathDemonstration + ) + self.assertEqual(output, expected_response) + + +if __name__ == "__main__": + unittest.main() diff --git a/src/mistralai/extra/tests/test_utils.py b/src/mistralai/extra/tests/test_utils.py new file mode 100644 index 00000000..41fa53e3 --- /dev/null +++ b/src/mistralai/extra/tests/test_utils.py @@ -0,0 +1,162 @@ +from ..utils.response_format import ( + pydantic_model_from_json, + response_format_from_pydantic_model, + rec_strict_json_schema, +) +from pydantic import BaseModel, ValidationError + +from ...models import ResponseFormat, JSONSchema +from ...types.basemodel import Unset + +import unittest + + +class Student(BaseModel): + name: str + age: int + + +class Explanation(BaseModel): + explanation: str + output: str + + +class MathDemonstration(BaseModel): + steps: list[Explanation] + final_answer: str + + +mathdemo_schema = { + "$defs": { + "Explanation": { + "properties": { + "explanation": {"title": "Explanation", "type": "string"}, + "output": {"title": "Output", "type": "string"}, + }, + "required": ["explanation", "output"], + "title": "Explanation", + "type": "object", + } + }, + "properties": { + "steps": { + "items": {"$ref": "#/$defs/Explanation"}, + "title": "Steps", + "type": "array", + }, + "final_answer": {"title": "Final Answer", "type": "string"}, + }, + "required": ["steps", "final_answer"], + "title": "MathDemonstration", + "type": "object", +} + +mathdemo_strict_schema = mathdemo_schema.copy() +mathdemo_strict_schema["$defs"]["Explanation"]["additionalProperties"] = False # type: ignore +mathdemo_strict_schema["additionalProperties"] = False + +mathdemo_response_format = ResponseFormat( + type="json_schema", + json_schema=JSONSchema( + name="MathDemonstration", + schema_definition=mathdemo_strict_schema, + description=Unset(), + strict=True, + ), +) + + +class TestResponseFormat(unittest.TestCase): + def test_pydantic_model_from_json(self): + missing_json_data = {"name": "Jean Dupont"} + good_json_data = {"name": "Jean Dupont", "age": 25} + extra_json_data = { + "name": "Jean Dupont", + "age": 25, + "extra_field": "extra_value", + } + complex_json_data = { + "final_answer": "x = -4", + "steps": [ + { + "explanation": "Start with the given equation.", + "output": "8x + 7 = -23", + }, + { + "explanation": "Subtract 7 from both sides to isolate the term with x.", + "output": "8x = -23 - 7", + }, + { + "explanation": "Simplify the right side of the equation.", + "output": "8x = -30", + }, + { + "explanation": "Divide both sides by 8 to solve for x.", + "output": "x = -30 / 8", + }, + { + "explanation": "Simplify the fraction to get the final answer.", + "output": "x = -4", + }, + ], + } + + self.assertEqual( + pydantic_model_from_json(good_json_data, Student), + Student(name="Jean Dupont", age=25), + ) + self.assertEqual( + pydantic_model_from_json(extra_json_data, Student), + Student(name="Jean Dupont", age=25), + ) + self.assertEqual( + pydantic_model_from_json(complex_json_data, MathDemonstration), + MathDemonstration( + steps=[ + Explanation( + explanation="Start with the given equation.", + output="8x + 7 = -23", + ), + Explanation( + explanation="Subtract 7 from both sides to isolate the term with x.", + output="8x = -23 - 7", + ), + Explanation( + explanation="Simplify the right side of the equation.", + output="8x = -30", + ), + Explanation( + explanation="Divide both sides by 8 to solve for x.", + output="x = -30 / 8", + ), + Explanation( + explanation="Simplify the fraction to get the final answer.", + output="x = -4", + ), + ], + final_answer="x = -4", + ), + ) + + # Check it raises a validation error + with self.assertRaises(ValidationError): + pydantic_model_from_json(missing_json_data, Student) # type: ignore + + def test_response_format_from_pydantic_model(self): + self.assertEqual( + response_format_from_pydantic_model(MathDemonstration), + mathdemo_response_format, + ) + + def test_rec_strict_json_schema(self): + invalid_schema = mathdemo_schema | {"wrong_value": 1} + self.assertEqual( + rec_strict_json_schema(mathdemo_schema), mathdemo_strict_schema + ) + + with self.assertRaises(ValueError): + rec_strict_json_schema(invalid_schema) + + +if __name__ == "__main__": + unittest.main() diff --git a/src/mistralai/extra/utils/__init__.py b/src/mistralai/extra/utils/__init__.py new file mode 100644 index 00000000..5011f1a6 --- /dev/null +++ b/src/mistralai/extra/utils/__init__.py @@ -0,0 +1,3 @@ +from .response_format import response_format_from_pydantic_model + +__all__ = ["response_format_from_pydantic_model"] diff --git a/src/mistralai/extra/utils/_pydantic_helper.py b/src/mistralai/extra/utils/_pydantic_helper.py new file mode 100644 index 00000000..08523f41 --- /dev/null +++ b/src/mistralai/extra/utils/_pydantic_helper.py @@ -0,0 +1,20 @@ +from typing import Any + +def rec_strict_json_schema(schema_node: Any) -> Any: + """ + Recursively set the additionalProperties property to False for all objects in the JSON Schema. + This makes the JSON Schema strict (i.e. no additional properties are allowed). + """ + if isinstance(schema_node, (str, bool)): + return schema_node + if isinstance(schema_node, dict): + if "type" in schema_node and schema_node["type"] == "object": + schema_node["additionalProperties"] = False + for key, value in schema_node.items(): + schema_node[key] = rec_strict_json_schema(value) + elif isinstance(schema_node, list): + for i, value in enumerate(schema_node): + schema_node[i] = rec_strict_json_schema(value) + else: + raise ValueError(f"Unexpected type: {schema_node}") + return schema_node diff --git a/src/mistralai/extra/utils/response_format.py b/src/mistralai/extra/utils/response_format.py new file mode 100644 index 00000000..f9ded3ff --- /dev/null +++ b/src/mistralai/extra/utils/response_format.py @@ -0,0 +1,24 @@ +from pydantic import BaseModel +from typing import TypeVar, Any, Type +from ...models import JSONSchema, ResponseFormat +from ._pydantic_helper import rec_strict_json_schema + +CustomPydanticModel = TypeVar("CustomPydanticModel", bound=BaseModel) + + +def response_format_from_pydantic_model( + model: type[CustomPydanticModel], +) -> ResponseFormat: + """Generate a strict JSON schema from a pydantic model.""" + model_schema = rec_strict_json_schema(model.model_json_schema()) + json_schema = JSONSchema.model_validate( + {"name": model.__name__, "schema": model_schema, "strict": True} + ) + return ResponseFormat(type="json_schema", json_schema=json_schema) + + +def pydantic_model_from_json( + json_data: dict[str, Any], pydantic_model: Type[CustomPydanticModel] +) -> CustomPydanticModel: + """Parse a JSON schema into a pydantic model.""" + return pydantic_model.model_validate(json_data) diff --git a/src/mistralai/httpclient.py b/src/mistralai/httpclient.py index 167cea4e..9dc43cb0 100644 --- a/src/mistralai/httpclient.py +++ b/src/mistralai/httpclient.py @@ -1,6 +1,8 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" # pyright: reportReturnType = false +import asyncio +from concurrent.futures import ThreadPoolExecutor from typing_extensions import Protocol, runtime_checkable import httpx from typing import Any, Optional, Union @@ -82,3 +84,51 @@ def build_request( async def aclose(self) -> None: pass + + +class ClientOwner(Protocol): + client: Union[HttpClient, None] + async_client: Union[AsyncHttpClient, None] + + +def close_clients( + owner: ClientOwner, + sync_client: Union[HttpClient, None], + async_client: Union[AsyncHttpClient, None], +) -> None: + """ + A finalizer function that is meant to be used with weakref.finalize to close + httpx clients used by an SDK so that underlying resources can be garbage + collected. + """ + + # Unset the client/async_client properties so there are no more references + # to them from the owning SDK instance and they can be reaped. + owner.client = None + owner.async_client = None + + if sync_client is not None: + try: + sync_client.close() + except Exception: + pass + + if async_client is not None: + is_async = False + try: + asyncio.get_running_loop() + is_async = True + except RuntimeError: + pass + + try: + # If this function is called in an async loop then start another + # loop in a separate thread to close the async http client. + if is_async: + with ThreadPoolExecutor(max_workers=1) as executor: + future = executor.submit(asyncio.run, async_client.aclose()) + future.result() + else: + asyncio.run(async_client.aclose()) + except Exception: + pass diff --git a/src/mistralai/models/__init__.py b/src/mistralai/models/__init__.py index 608edba0..ee083f3a 100644 --- a/src/mistralai/models/__init__.py +++ b/src/mistralai/models/__init__.py @@ -264,6 +264,7 @@ JobsAPIRoutesFineTuningUpdateFineTunedModelRequestTypedDict, ) from .jobsout import JobsOut, JobsOutObject, JobsOutTypedDict +from .jsonschema import JSONSchema, JSONSchemaTypedDict from .legacyjobmetadataout import ( LegacyJobMetadataOut, LegacyJobMetadataOutObject, @@ -515,6 +516,8 @@ "InputsTypedDict", "Integrations", "IntegrationsTypedDict", + "JSONSchema", + "JSONSchemaTypedDict", "JobIn", "JobInIntegrations", "JobInIntegrationsTypedDict", diff --git a/src/mistralai/models/jsonschema.py b/src/mistralai/models/jsonschema.py new file mode 100644 index 00000000..76e40330 --- /dev/null +++ b/src/mistralai/models/jsonschema.py @@ -0,0 +1,55 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +import pydantic +from pydantic import model_serializer +from typing import Any, Dict, Optional +from typing_extensions import Annotated, NotRequired, TypedDict + + +class JSONSchemaTypedDict(TypedDict): + name: str + schema_definition: Dict[str, Any] + description: NotRequired[Nullable[str]] + strict: NotRequired[bool] + + +class JSONSchema(BaseModel): + name: str + + schema_definition: Annotated[Dict[str, Any], pydantic.Field(alias="schema")] + + description: OptionalNullable[str] = UNSET + + strict: Optional[bool] = False + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["description", "strict"] + nullable_fields = ["description"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in self.model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/models/responseformat.py b/src/mistralai/models/responseformat.py index aa60ba5d..17424afb 100644 --- a/src/mistralai/models/responseformat.py +++ b/src/mistralai/models/responseformat.py @@ -1,8 +1,10 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from __future__ import annotations +from .jsonschema import JSONSchema, JSONSchemaTypedDict from .responseformats import ResponseFormats -from mistralai.types import BaseModel +from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +from pydantic import model_serializer from typing import Optional from typing_extensions import NotRequired, TypedDict @@ -10,8 +12,41 @@ class ResponseFormatTypedDict(TypedDict): type: NotRequired[ResponseFormats] r"""An object specifying the format that the model must output. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message.""" + json_schema: NotRequired[Nullable[JSONSchemaTypedDict]] class ResponseFormat(BaseModel): type: Optional[ResponseFormats] = None r"""An object specifying the format that the model must output. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message.""" + + json_schema: OptionalNullable[JSONSchema] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["type", "json_schema"] + nullable_fields = ["json_schema"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in self.model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/models/responseformats.py b/src/mistralai/models/responseformats.py index 2c06b812..08c39951 100644 --- a/src/mistralai/models/responseformats.py +++ b/src/mistralai/models/responseformats.py @@ -4,5 +4,5 @@ from typing import Literal -ResponseFormats = Literal["text", "json_object"] +ResponseFormats = Literal["text", "json_object", "json_schema"] r"""An object specifying the format that the model must output. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message.""" diff --git a/src/mistralai/sdk.py b/src/mistralai/sdk.py index e0ff7acd..e24f1581 100644 --- a/src/mistralai/sdk.py +++ b/src/mistralai/sdk.py @@ -1,7 +1,7 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from .basesdk import BaseSDK -from .httpclient import AsyncHttpClient, HttpClient +from .httpclient import AsyncHttpClient, ClientOwner, HttpClient, close_clients from .sdkconfiguration import SDKConfiguration from .utils.logger import Logger, get_default_logger from .utils.retries import RetryConfig @@ -18,7 +18,8 @@ from mistralai.fine_tuning import FineTuning from mistralai.models_ import Models from mistralai.types import OptionalNullable, UNSET -from typing import Any, Callable, Dict, Optional, Union +from typing import Any, Callable, Dict, Optional, Union, cast +import weakref class Mistral(BaseSDK): @@ -118,6 +119,14 @@ def __init__( # pylint: disable=protected-access self.sdk_configuration.__dict__["_hooks"] = hooks + weakref.finalize( + self, + close_clients, + cast(ClientOwner, self.sdk_configuration), + self.sdk_configuration.client, + self.sdk_configuration.async_client, + ) + self._init_sdks() def _init_sdks(self): From 48bb2438ba4d64c9a8f056a5268ce80b5b2ae64d Mon Sep 17 00:00:00 2001 From: Alexandre Menasria <47357713+amenasria@users.noreply.github.com> Date: Thu, 6 Mar 2025 17:37:38 +0100 Subject: [PATCH 105/223] [Pre-Release] Pin Speakeasy to 1.477.0 (#187) --- .speakeasy/workflow.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.speakeasy/workflow.yaml b/.speakeasy/workflow.yaml index 3b3c6d55..00aefc99 100644 --- a/.speakeasy/workflow.yaml +++ b/.speakeasy/workflow.yaml @@ -1,5 +1,5 @@ workflowVersion: 1.0.0 -speakeasyVersion: latest +speakeasyVersion: 1.477.0 sources: mistral-azure-source: inputs: From 6f80af2f350412ed3346f739cc0836af9e31d178 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Thu, 6 Mar 2025 19:28:51 +0100 Subject: [PATCH 106/223] =?UTF-8?q?chore:=20=F0=9F=90=9D=20Update=20SDK=20?= =?UTF-8?q?-=20Generate=20MISTRALAI=20MISTRALAI-SDK=201.5.1=20(#188)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * ci: regenerated with OpenAPI Doc , Speakeasy CLI 1.477.0 * Add OCR example * Add OCR example with file upload * Even with TS example * Fix example url --------- Co-authored-by: speakeasybot Co-authored-by: Alexandre Menasria --- .speakeasy/gen.lock | 45 +++- .speakeasy/gen.yaml | 2 +- .speakeasy/workflow.lock | 13 +- README.md | 4 + RELEASES.md | 12 +- docs/models/assistantmessage.md | 12 +- docs/models/chatclassificationrequest.md | 9 - docs/models/chatcompletionrequest.md | 2 +- docs/models/chatcompletionstreamrequest.md | 2 +- docs/models/chatmoderationrequest.md | 10 + ...puts.md => chatmoderationrequestinputs.md} | 2 +- docs/models/classificationrequest.md | 4 +- docs/models/contentchunk.md | 6 + docs/models/document.md | 19 ++ docs/models/documenturlchunk.md | 10 + docs/models/embeddingrequest.md | 3 +- docs/models/fimcompletionrequest.md | 2 +- docs/models/fimcompletionstreamrequest.md | 2 +- docs/models/ocrimageobject.md | 13 + docs/models/ocrpagedimensions.md | 10 + docs/models/ocrpageobject.md | 11 + docs/models/ocrrequest.md | 14 ++ docs/models/ocrresponse.md | 10 + docs/models/ocrusageinfo.md | 9 + docs/models/prediction.md | 8 +- docs/models/predictiontype.md | 8 - docs/sdks/chat/README.md | 4 +- docs/sdks/classifiers/README.md | 19 +- docs/sdks/embeddings/README.md | 1 - docs/sdks/fim/README.md | 4 +- docs/sdks/ocr/README.md | 58 +++++ .../async_chat_with_image_no_streaming.py | 2 +- examples/ocr_process_from_file.py | 47 ++++ examples/ocr_process_from_url.py | 25 ++ poetry.lock | 12 +- pyproject.toml | 2 +- src/mistralai/_version.py | 4 +- src/mistralai/chat.py | 10 +- src/mistralai/classifiers.py | 52 ++-- src/mistralai/embeddings.py | 10 +- src/mistralai/fim.py | 10 +- src/mistralai/models/__init__.py | 54 ++-- src/mistralai/models/assistantmessage.py | 2 + .../models/chatclassificationrequest.py | 113 --------- src/mistralai/models/chatcompletionrequest.py | 13 +- .../models/chatcompletionstreamrequest.py | 13 +- src/mistralai/models/chatmoderationrequest.py | 86 +++++++ src/mistralai/models/classificationrequest.py | 43 +--- src/mistralai/models/contentchunk.py | 9 +- src/mistralai/models/documenturlchunk.py | 62 +++++ src/mistralai/models/embeddingrequest.py | 38 +-- src/mistralai/models/fimcompletionrequest.py | 5 +- .../models/fimcompletionstreamrequest.py | 5 +- src/mistralai/models/ocrimageobject.py | 77 ++++++ src/mistralai/models/ocrpagedimensions.py | 25 ++ src/mistralai/models/ocrpageobject.py | 64 +++++ src/mistralai/models/ocrrequest.py | 97 +++++++ src/mistralai/models/ocrresponse.py | 26 ++ src/mistralai/models/ocrusageinfo.py | 51 ++++ src/mistralai/models/prediction.py | 9 +- src/mistralai/ocr.py | 238 ++++++++++++++++++ src/mistralai/sdk.py | 4 + 62 files changed, 1181 insertions(+), 355 deletions(-) delete mode 100644 docs/models/chatclassificationrequest.md create mode 100644 docs/models/chatmoderationrequest.md rename docs/models/{chatclassificationrequestinputs.md => chatmoderationrequestinputs.md} (86%) create mode 100644 docs/models/document.md create mode 100644 docs/models/documenturlchunk.md create mode 100644 docs/models/ocrimageobject.md create mode 100644 docs/models/ocrpagedimensions.md create mode 100644 docs/models/ocrpageobject.md create mode 100644 docs/models/ocrrequest.md create mode 100644 docs/models/ocrresponse.md create mode 100644 docs/models/ocrusageinfo.md delete mode 100644 docs/models/predictiontype.md create mode 100644 docs/sdks/ocr/README.md create mode 100644 examples/ocr_process_from_file.py create mode 100644 examples/ocr_process_from_url.py delete mode 100644 src/mistralai/models/chatclassificationrequest.py create mode 100644 src/mistralai/models/chatmoderationrequest.py create mode 100644 src/mistralai/models/documenturlchunk.py create mode 100644 src/mistralai/models/ocrimageobject.py create mode 100644 src/mistralai/models/ocrpagedimensions.py create mode 100644 src/mistralai/models/ocrpageobject.py create mode 100644 src/mistralai/models/ocrrequest.py create mode 100644 src/mistralai/models/ocrresponse.py create mode 100644 src/mistralai/models/ocrusageinfo.py create mode 100644 src/mistralai/ocr.py diff --git a/.speakeasy/gen.lock b/.speakeasy/gen.lock index ac3e0111..6eb1248e 100644 --- a/.speakeasy/gen.lock +++ b/.speakeasy/gen.lock @@ -1,12 +1,12 @@ lockVersion: 2.0.0 id: 2d045ec7-2ebb-4f4d-ad25-40953b132161 management: - docChecksum: 553c31591e8dc33a58cb75f348c3aa72 + docChecksum: 81cc8be96362e2f1cb145b08a2e6c4fa docVersion: 0.0.2 speakeasyVersion: 1.477.0 generationVersion: 2.497.0 - releaseVersion: 1.5.0 - configChecksum: 9a5649c5c372dc5fd2fde38a0faee40e + releaseVersion: 1.5.1 + configChecksum: ef3439d915c5d16e7cfb88fe2bf94907 repoURL: https://github.com/mistralai/client-python.git installationURL: https://github.com/mistralai/client-python.git published: true @@ -68,8 +68,6 @@ generatedFiles: - docs/models/batchjobsout.md - docs/models/batchjobsoutobject.md - docs/models/batchjobstatus.md - - docs/models/chatclassificationrequest.md - - docs/models/chatclassificationrequestinputs.md - docs/models/chatcompletionchoice.md - docs/models/chatcompletionrequest.md - docs/models/chatcompletionrequesttoolchoice.md @@ -78,6 +76,8 @@ generatedFiles: - docs/models/chatcompletionstreamrequestmessages.md - docs/models/chatcompletionstreamrequeststop.md - docs/models/chatcompletionstreamrequesttoolchoice.md + - docs/models/chatmoderationrequest.md + - docs/models/chatmoderationrequestinputs.md - docs/models/checkpointout.md - docs/models/classificationobject.md - docs/models/classificationrequest.md @@ -99,6 +99,8 @@ generatedFiles: - docs/models/detailedjoboutobject.md - docs/models/detailedjoboutrepositories.md - docs/models/detailedjoboutstatus.md + - docs/models/document.md + - docs/models/documenturlchunk.md - docs/models/embeddingrequest.md - docs/models/embeddingresponse.md - docs/models/embeddingresponsedata.md @@ -166,9 +168,14 @@ generatedFiles: - docs/models/modelcapabilities.md - docs/models/modellist.md - docs/models/object.md + - docs/models/ocrimageobject.md + - docs/models/ocrpagedimensions.md + - docs/models/ocrpageobject.md + - docs/models/ocrrequest.md + - docs/models/ocrresponse.md + - docs/models/ocrusageinfo.md - docs/models/one.md - docs/models/prediction.md - - docs/models/predictiontype.md - docs/models/queryparamstatus.md - docs/models/referencechunk.md - docs/models/referencechunktype.md @@ -227,6 +234,7 @@ generatedFiles: - docs/sdks/mistral/README.md - docs/sdks/mistraljobs/README.md - docs/sdks/models/README.md + - docs/sdks/ocr/README.md - poetry.toml - py.typed - pylintrc @@ -261,11 +269,11 @@ generatedFiles: - src/mistralai/models/batchjobout.py - src/mistralai/models/batchjobsout.py - src/mistralai/models/batchjobstatus.py - - src/mistralai/models/chatclassificationrequest.py - src/mistralai/models/chatcompletionchoice.py - src/mistralai/models/chatcompletionrequest.py - src/mistralai/models/chatcompletionresponse.py - src/mistralai/models/chatcompletionstreamrequest.py + - src/mistralai/models/chatmoderationrequest.py - src/mistralai/models/checkpointout.py - src/mistralai/models/classificationobject.py - src/mistralai/models/classificationrequest.py @@ -279,6 +287,7 @@ generatedFiles: - src/mistralai/models/deletemodelout.py - src/mistralai/models/deltamessage.py - src/mistralai/models/detailedjobout.py + - src/mistralai/models/documenturlchunk.py - src/mistralai/models/embeddingrequest.py - src/mistralai/models/embeddingresponse.py - src/mistralai/models/embeddingresponsedata.py @@ -327,6 +336,12 @@ generatedFiles: - src/mistralai/models/metricout.py - src/mistralai/models/modelcapabilities.py - src/mistralai/models/modellist.py + - src/mistralai/models/ocrimageobject.py + - src/mistralai/models/ocrpagedimensions.py + - src/mistralai/models/ocrpageobject.py + - src/mistralai/models/ocrrequest.py + - src/mistralai/models/ocrresponse.py + - src/mistralai/models/ocrusageinfo.py - src/mistralai/models/prediction.py - src/mistralai/models/referencechunk.py - src/mistralai/models/responseformat.py @@ -357,6 +372,7 @@ generatedFiles: - src/mistralai/models/wandbintegration.py - src/mistralai/models/wandbintegrationout.py - src/mistralai/models_.py + - src/mistralai/ocr.py - src/mistralai/py.typed - src/mistralai/sdk.py - src/mistralai/sdkconfiguration.py @@ -607,7 +623,7 @@ examples: embeddings_v1_embeddings_post: speakeasy-default-embeddings-v1-embeddings-post: requestBody: - application/json: {"input": ["Embed this sentence.", "As well as this one."], "model": "mistral-embed"} + application/json: {"model": "mistral-embed", "input": ["Embed this sentence.", "As well as this one."]} responses: "200": application/json: {"id": "cmpl-e5cc70bb28c444948073e77776eb30ef", "object": "chat.completion", "model": "mistral-small-latest", "usage": {"prompt_tokens": 16, "completion_tokens": 34, "total_tokens": 50}, "data": [{"object": "embedding", "embedding": [0.1, 0.2, 0.3], "index": 0}, {"object": "embedding", "embedding": [0.4, 0.5, 0.6], "index": 1}]} @@ -616,7 +632,7 @@ examples: moderations_v1_moderations_post: speakeasy-default-moderations-v1-moderations-post: requestBody: - application/json: {"input": [""]} + application/json: {"model": "V90", "input": [""]} responses: "200": application/json: {"id": "mod-e5cc70bb28c444948073e77776eb30ef"} @@ -625,11 +641,20 @@ examples: moderations_chat_v1_chat_moderations_post: speakeasy-default-moderations-chat-v1-chat-moderations-post: requestBody: - application/json: {"input": [[{"content": "", "role": "tool"}, {"content": "", "role": "tool"}, {"content": "", "role": "tool"}], [{"prefix": false, "role": "assistant"}, {"content": "", "role": "user"}, {"prefix": false, "role": "assistant"}]], "model": "Roadster"} + application/json: {"model": "Roadster", "input": [[{"content": "", "role": "tool"}, {"content": "", "role": "tool"}, {"content": "", "role": "tool"}], [{"prefix": false, "role": "assistant"}, {"content": "", "role": "user"}, {"prefix": false, "role": "assistant"}]], "truncate_for_context_length": false} responses: "200": application/json: {"id": "mod-e5cc70bb28c444948073e77776eb30ef"} "422": application/json: {} + ocr_v1_ocr_post: + speakeasy-default-ocr-v1-ocr-post: + requestBody: + application/json: {"model": "Focus", "document": {"document_url": "https://dutiful-horst.org"}} + responses: + "200": + application/json: {"pages": [], "model": "A4", "usage_info": {"pages_processed": 442675}} + "422": + application/json: {} examplesVersion: 1.0.0 generatedTests: {} diff --git a/.speakeasy/gen.yaml b/.speakeasy/gen.yaml index 069ce07d..f020895b 100644 --- a/.speakeasy/gen.yaml +++ b/.speakeasy/gen.yaml @@ -13,7 +13,7 @@ generation: oAuth2ClientCredentialsEnabled: true oAuth2PasswordEnabled: false python: - version: 1.5.0 + version: 1.5.1 additionalDependencies: dev: pytest: ^8.2.2 diff --git a/.speakeasy/workflow.lock b/.speakeasy/workflow.lock index ea74f7d9..21228dc5 100644 --- a/.speakeasy/workflow.lock +++ b/.speakeasy/workflow.lock @@ -14,10 +14,11 @@ sources: - latest mistral-openapi: sourceNamespace: mistral-openapi - sourceRevisionDigest: sha256:af4a2854e017abc0ec9e4b557186611dcd69468d82d5ac7f81bfbe49165fc18d - sourceBlobDigest: sha256:9f1bbc418fba3c7b5031bacdf9d431aff476fb4b2aa3838ed50fb3922563703c + sourceRevisionDigest: sha256:bdfe3bd4e867529e1821e0f195c2d5832083f7699315f4a42d6b5551bd7847a3 + sourceBlobDigest: sha256:7e8a475b75404d724fc7936bd6f585b8e5226d3dca00ab4b69807b53fb63151b tags: - latest + - speakeasy-sdk-regen-1741279153 targets: mistralai-azure-sdk: source: mistral-azure-source @@ -36,13 +37,13 @@ targets: mistralai-sdk: source: mistral-openapi sourceNamespace: mistral-openapi - sourceRevisionDigest: sha256:af4a2854e017abc0ec9e4b557186611dcd69468d82d5ac7f81bfbe49165fc18d - sourceBlobDigest: sha256:9f1bbc418fba3c7b5031bacdf9d431aff476fb4b2aa3838ed50fb3922563703c + sourceRevisionDigest: sha256:bdfe3bd4e867529e1821e0f195c2d5832083f7699315f4a42d6b5551bd7847a3 + sourceBlobDigest: sha256:7e8a475b75404d724fc7936bd6f585b8e5226d3dca00ab4b69807b53fb63151b codeSamplesNamespace: mistral-openapi-code-samples - codeSamplesRevisionDigest: sha256:cbf9b277d16c47816fc5d63b4c69cf0fbd1fe99d424c34ab465d2b61fcc6e5e8 + codeSamplesRevisionDigest: sha256:ba10be893f3e6dae275eb8fb09a688f3652de81eebd314427f28c274800edc48 workflow: workflowVersion: 1.0.0 - speakeasyVersion: latest + speakeasyVersion: 1.477.0 sources: mistral-azure-source: inputs: diff --git a/README.md b/README.md index 617c6071..fd31bcd8 100644 --- a/README.md +++ b/README.md @@ -442,6 +442,10 @@ The documentation for the GCP SDK is available [here](packages/mistralai_gcp/REA * [archive](docs/sdks/models/README.md#archive) - Archive Fine Tuned Model * [unarchive](docs/sdks/models/README.md#unarchive) - Unarchive Fine Tuned Model +### [ocr](docs/sdks/ocr/README.md) + +* [process](docs/sdks/ocr/README.md#process) - OCR + diff --git a/RELEASES.md b/RELEASES.md index cc8c6c29..d7b657bb 100644 --- a/RELEASES.md +++ b/RELEASES.md @@ -158,4 +158,14 @@ Based on: ### Generated - [python v1.5.0] . ### Releases -- [PyPI v1.5.0] https://pypi.org/project/mistralai/1.5.0 - . \ No newline at end of file +- [PyPI v1.5.0] https://pypi.org/project/mistralai/1.5.0 - . + +## 2025-03-06 16:38:57 +### Changes +Based on: +- OpenAPI Doc +- Speakeasy CLI 1.477.0 (2.497.0) https://github.com/speakeasy-api/speakeasy +### Generated +- [python v1.5.1] . +### Releases +- [PyPI v1.5.1] https://pypi.org/project/mistralai/1.5.1 - . \ No newline at end of file diff --git a/docs/models/assistantmessage.md b/docs/models/assistantmessage.md index 53f1cc76..3d0bd90b 100644 --- a/docs/models/assistantmessage.md +++ b/docs/models/assistantmessage.md @@ -3,9 +3,9 @@ ## Fields -| Field | Type | Required | Description | -| ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | -| `content` | [OptionalNullable[models.AssistantMessageContent]](../models/assistantmessagecontent.md) | :heavy_minus_sign: | N/A | -| `tool_calls` | List[[models.ToolCall](../models/toolcall.md)] | :heavy_minus_sign: | N/A | -| `prefix` | *Optional[bool]* | :heavy_minus_sign: | N/A | -| `role` | [Optional[models.AssistantMessageRole]](../models/assistantmessagerole.md) | :heavy_minus_sign: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| `content` | [OptionalNullable[models.AssistantMessageContent]](../models/assistantmessagecontent.md) | :heavy_minus_sign: | N/A | +| `tool_calls` | List[[models.ToolCall](../models/toolcall.md)] | :heavy_minus_sign: | N/A | +| `prefix` | *Optional[bool]* | :heavy_minus_sign: | Set this to `true` when adding an assistant message as prefix to condition the model response. The role of the prefix message is to force the model to start its answer by the content of the message. | +| `role` | [Optional[models.AssistantMessageRole]](../models/assistantmessagerole.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/chatclassificationrequest.md b/docs/models/chatclassificationrequest.md deleted file mode 100644 index 990408b1..00000000 --- a/docs/models/chatclassificationrequest.md +++ /dev/null @@ -1,9 +0,0 @@ -# ChatClassificationRequest - - -## Fields - -| Field | Type | Required | Description | -| -------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------- | -| `inputs` | [models.ChatClassificationRequestInputs](../models/chatclassificationrequestinputs.md) | :heavy_check_mark: | Chat to classify | -| `model` | *Nullable[str]* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/chatcompletionrequest.md b/docs/models/chatcompletionrequest.md index ac743583..714f4f5a 100644 --- a/docs/models/chatcompletionrequest.md +++ b/docs/models/chatcompletionrequest.md @@ -5,7 +5,7 @@ | Field | Type | Required | Description | Example | | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `model` | *Nullable[str]* | :heavy_check_mark: | ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions. | mistral-small-latest | +| `model` | *str* | :heavy_check_mark: | ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions. | mistral-small-latest | | `messages` | List[[models.Messages](../models/messages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | | `temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. | | | `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | diff --git a/docs/models/chatcompletionstreamrequest.md b/docs/models/chatcompletionstreamrequest.md index 8ca0f21f..378ccd41 100644 --- a/docs/models/chatcompletionstreamrequest.md +++ b/docs/models/chatcompletionstreamrequest.md @@ -5,7 +5,7 @@ | Field | Type | Required | Description | Example | | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `model` | *Nullable[str]* | :heavy_check_mark: | ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions. | mistral-small-latest | +| `model` | *str* | :heavy_check_mark: | ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions. | mistral-small-latest | | `messages` | List[[models.ChatCompletionStreamRequestMessages](../models/chatcompletionstreamrequestmessages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | | `temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. | | | `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | diff --git a/docs/models/chatmoderationrequest.md b/docs/models/chatmoderationrequest.md new file mode 100644 index 00000000..2b8f46cb --- /dev/null +++ b/docs/models/chatmoderationrequest.md @@ -0,0 +1,10 @@ +# ChatModerationRequest + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | +| `model` | *str* | :heavy_check_mark: | N/A | +| `inputs` | [models.ChatModerationRequestInputs](../models/chatmoderationrequestinputs.md) | :heavy_check_mark: | Chat to classify | +| `truncate_for_context_length` | *Optional[bool]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/chatclassificationrequestinputs.md b/docs/models/chatmoderationrequestinputs.md similarity index 86% rename from docs/models/chatclassificationrequestinputs.md rename to docs/models/chatmoderationrequestinputs.md index 290c9ad2..cf775d60 100644 --- a/docs/models/chatclassificationrequestinputs.md +++ b/docs/models/chatmoderationrequestinputs.md @@ -1,4 +1,4 @@ -# ChatClassificationRequestInputs +# ChatModerationRequestInputs Chat to classify diff --git a/docs/models/classificationrequest.md b/docs/models/classificationrequest.md index e1556684..b9befc89 100644 --- a/docs/models/classificationrequest.md +++ b/docs/models/classificationrequest.md @@ -5,5 +5,5 @@ | Field | Type | Required | Description | | ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | -| `inputs` | [models.ClassificationRequestInputs](../models/classificationrequestinputs.md) | :heavy_check_mark: | Text to classify. | -| `model` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file +| `model` | *str* | :heavy_check_mark: | ID of the model to use. | +| `inputs` | [models.ClassificationRequestInputs](../models/classificationrequestinputs.md) | :heavy_check_mark: | Text to classify. | \ No newline at end of file diff --git a/docs/models/contentchunk.md b/docs/models/contentchunk.md index 22023e8b..8cf7fad1 100644 --- a/docs/models/contentchunk.md +++ b/docs/models/contentchunk.md @@ -9,6 +9,12 @@ value: models.ImageURLChunk = /* values here */ ``` +### `models.DocumentURLChunk` + +```python +value: models.DocumentURLChunk = /* values here */ +``` + ### `models.TextChunk` ```python diff --git a/docs/models/document.md b/docs/models/document.md new file mode 100644 index 00000000..e2940355 --- /dev/null +++ b/docs/models/document.md @@ -0,0 +1,19 @@ +# Document + +Document to run OCR on + + +## Supported Types + +### `models.DocumentURLChunk` + +```python +value: models.DocumentURLChunk = /* values here */ +``` + +### `models.ImageURLChunk` + +```python +value: models.ImageURLChunk = /* values here */ +``` + diff --git a/docs/models/documenturlchunk.md b/docs/models/documenturlchunk.md new file mode 100644 index 00000000..33785c34 --- /dev/null +++ b/docs/models/documenturlchunk.md @@ -0,0 +1,10 @@ +# DocumentURLChunk + + +## Fields + +| Field | Type | Required | Description | +| ----------------------------------- | ----------------------------------- | ----------------------------------- | ----------------------------------- | +| `document_url` | *str* | :heavy_check_mark: | N/A | +| `type` | *Optional[Literal["document_url"]]* | :heavy_minus_sign: | N/A | +| `document_name` | *OptionalNullable[str]* | :heavy_minus_sign: | The filename of the document | \ No newline at end of file diff --git a/docs/models/embeddingrequest.md b/docs/models/embeddingrequest.md index 4d215c7b..07ab903a 100644 --- a/docs/models/embeddingrequest.md +++ b/docs/models/embeddingrequest.md @@ -6,5 +6,4 @@ | Field | Type | Required | Description | Example | | -------------------------------------------------- | -------------------------------------------------- | -------------------------------------------------- | -------------------------------------------------- | -------------------------------------------------- | | `inputs` | [models.Inputs](../models/inputs.md) | :heavy_check_mark: | Text to embed. | [
"Embed this sentence.",
"As well as this one."
] | -| `model` | *Optional[str]* | :heavy_minus_sign: | ID of the model to use. | | -| `encoding_format` | *OptionalNullable[str]* | :heavy_minus_sign: | The format to return the embeddings in. | | \ No newline at end of file +| `model` | *Optional[str]* | :heavy_minus_sign: | ID of the model to use. | | \ No newline at end of file diff --git a/docs/models/fimcompletionrequest.md b/docs/models/fimcompletionrequest.md index 236d2d21..7507b90c 100644 --- a/docs/models/fimcompletionrequest.md +++ b/docs/models/fimcompletionrequest.md @@ -5,7 +5,7 @@ | Field | Type | Required | Description | Example | | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `model` | *Nullable[str]* | :heavy_check_mark: | ID of the model to use. Only compatible for now with:
- `codestral-2405`
- `codestral-latest` | codestral-2405 | +| `model` | *str* | :heavy_check_mark: | ID of the model to use. Only compatible for now with:
- `codestral-2405`
- `codestral-latest` | codestral-2405 | | `prompt` | *str* | :heavy_check_mark: | The text/code to complete. | def | | `temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. | | | `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | diff --git a/docs/models/fimcompletionstreamrequest.md b/docs/models/fimcompletionstreamrequest.md index fa635932..6cc439c7 100644 --- a/docs/models/fimcompletionstreamrequest.md +++ b/docs/models/fimcompletionstreamrequest.md @@ -5,7 +5,7 @@ | Field | Type | Required | Description | Example | | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `model` | *Nullable[str]* | :heavy_check_mark: | ID of the model to use. Only compatible for now with:
- `codestral-2405`
- `codestral-latest` | codestral-2405 | +| `model` | *str* | :heavy_check_mark: | ID of the model to use. Only compatible for now with:
- `codestral-2405`
- `codestral-latest` | codestral-2405 | | `prompt` | *str* | :heavy_check_mark: | The text/code to complete. | def | | `temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. | | | `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | diff --git a/docs/models/ocrimageobject.md b/docs/models/ocrimageobject.md new file mode 100644 index 00000000..273cfa9a --- /dev/null +++ b/docs/models/ocrimageobject.md @@ -0,0 +1,13 @@ +# OCRImageObject + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------------- | ---------------------------------------------------------- | ---------------------------------------------------------- | ---------------------------------------------------------- | +| `id` | *str* | :heavy_check_mark: | Image ID for extracted image in a page | +| `top_left_x` | *Nullable[int]* | :heavy_check_mark: | X coordinate of top-left corner of the extracted image | +| `top_left_y` | *Nullable[int]* | :heavy_check_mark: | Y coordinate of top-left corner of the extracted image | +| `bottom_right_x` | *Nullable[int]* | :heavy_check_mark: | X coordinate of bottom-right corner of the extracted image | +| `bottom_right_y` | *Nullable[int]* | :heavy_check_mark: | Y coordinate of bottom-right corner of the extracted image | +| `image_base64` | *OptionalNullable[str]* | :heavy_minus_sign: | Base64 string of the extracted image | \ No newline at end of file diff --git a/docs/models/ocrpagedimensions.md b/docs/models/ocrpagedimensions.md new file mode 100644 index 00000000..c93ca64d --- /dev/null +++ b/docs/models/ocrpagedimensions.md @@ -0,0 +1,10 @@ +# OCRPageDimensions + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------- | ------------------------------- | ------------------------------- | ------------------------------- | +| `dpi` | *int* | :heavy_check_mark: | Dots per inch of the page-image | +| `height` | *int* | :heavy_check_mark: | Height of the image in pixels | +| `width` | *int* | :heavy_check_mark: | Width of the image in pixels | \ No newline at end of file diff --git a/docs/models/ocrpageobject.md b/docs/models/ocrpageobject.md new file mode 100644 index 00000000..9db3bb77 --- /dev/null +++ b/docs/models/ocrpageobject.md @@ -0,0 +1,11 @@ +# OCRPageObject + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | +| `index` | *int* | :heavy_check_mark: | The page index in a pdf document starting from 0 | +| `markdown` | *str* | :heavy_check_mark: | The markdown string response of the page | +| `images` | List[[models.OCRImageObject](../models/ocrimageobject.md)] | :heavy_check_mark: | List of all extracted images in the page | +| `dimensions` | [Nullable[models.OCRPageDimensions]](../models/ocrpagedimensions.md) | :heavy_check_mark: | The dimensions of the PDF Page's screenshot image | \ No newline at end of file diff --git a/docs/models/ocrrequest.md b/docs/models/ocrrequest.md new file mode 100644 index 00000000..dbc4dc80 --- /dev/null +++ b/docs/models/ocrrequest.md @@ -0,0 +1,14 @@ +# OCRRequest + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------- | +| `model` | *Nullable[str]* | :heavy_check_mark: | N/A | +| `document` | [models.Document](../models/document.md) | :heavy_check_mark: | Document to run OCR on | +| `id` | *Optional[str]* | :heavy_minus_sign: | N/A | +| `pages` | List[*int*] | :heavy_minus_sign: | Specific pages user wants to process in various formats: single number, range, or list of both. Starts from 0 | +| `include_image_base64` | *OptionalNullable[bool]* | :heavy_minus_sign: | Include image URLs in response | +| `image_limit` | *OptionalNullable[int]* | :heavy_minus_sign: | Max images to extract | +| `image_min_size` | *OptionalNullable[int]* | :heavy_minus_sign: | Minimum height and width of image to extract | \ No newline at end of file diff --git a/docs/models/ocrresponse.md b/docs/models/ocrresponse.md new file mode 100644 index 00000000..690d992d --- /dev/null +++ b/docs/models/ocrresponse.md @@ -0,0 +1,10 @@ +# OCRResponse + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------------------- | -------------------------------------------------------- | -------------------------------------------------------- | -------------------------------------------------------- | +| `pages` | List[[models.OCRPageObject](../models/ocrpageobject.md)] | :heavy_check_mark: | List of OCR info for pages. | +| `model` | *str* | :heavy_check_mark: | The model used to generate the OCR. | +| `usage_info` | [models.OCRUsageInfo](../models/ocrusageinfo.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/ocrusageinfo.md b/docs/models/ocrusageinfo.md new file mode 100644 index 00000000..d9d79125 --- /dev/null +++ b/docs/models/ocrusageinfo.md @@ -0,0 +1,9 @@ +# OCRUsageInfo + + +## Fields + +| Field | Type | Required | Description | +| ------------------------- | ------------------------- | ------------------------- | ------------------------- | +| `pages_processed` | *int* | :heavy_check_mark: | Number of pages processed | +| `doc_size_bytes` | *OptionalNullable[int]* | :heavy_minus_sign: | Document size in bytes | \ No newline at end of file diff --git a/docs/models/prediction.md b/docs/models/prediction.md index 578cdcee..86e9c396 100644 --- a/docs/models/prediction.md +++ b/docs/models/prediction.md @@ -3,7 +3,7 @@ ## Fields -| Field | Type | Required | Description | -| -------------------------------------------------------------- | -------------------------------------------------------------- | -------------------------------------------------------------- | -------------------------------------------------------------- | -| `type` | [Optional[models.PredictionType]](../models/predictiontype.md) | :heavy_minus_sign: | N/A | -| `content` | *Optional[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| ------------------------------ | ------------------------------ | ------------------------------ | ------------------------------ | +| `type` | *Optional[Literal["content"]]* | :heavy_minus_sign: | N/A | +| `content` | *Optional[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/predictiontype.md b/docs/models/predictiontype.md deleted file mode 100644 index 67ff99e2..00000000 --- a/docs/models/predictiontype.md +++ /dev/null @@ -1,8 +0,0 @@ -# PredictionType - - -## Values - -| Name | Value | -| --------- | --------- | -| `CONTENT` | content | \ No newline at end of file diff --git a/docs/sdks/chat/README.md b/docs/sdks/chat/README.md index dbdfba27..38e16adc 100644 --- a/docs/sdks/chat/README.md +++ b/docs/sdks/chat/README.md @@ -40,7 +40,7 @@ with Mistral( | Parameter | Type | Required | Description | Example | | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `model` | *Nullable[str]* | :heavy_check_mark: | ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions. | mistral-small-latest | +| `model` | *str* | :heavy_check_mark: | ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions. | mistral-small-latest | | `messages` | List[[models.Messages](../../models/messages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | | `temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. | | | `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | @@ -101,7 +101,7 @@ with Mistral( | Parameter | Type | Required | Description | Example | | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `model` | *Nullable[str]* | :heavy_check_mark: | ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions. | mistral-small-latest | +| `model` | *str* | :heavy_check_mark: | ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions. | mistral-small-latest | | `messages` | List[[models.ChatCompletionStreamRequestMessages](../../models/chatcompletionstreamrequestmessages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | | `temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. | | | `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | diff --git a/docs/sdks/classifiers/README.md b/docs/sdks/classifiers/README.md index 185711a7..6bcc68a9 100644 --- a/docs/sdks/classifiers/README.md +++ b/docs/sdks/classifiers/README.md @@ -24,7 +24,7 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.classifiers.moderate(inputs=[ + res = mistral.classifiers.moderate(model="V90", inputs=[ "", ]) @@ -37,8 +37,8 @@ with Mistral( | Parameter | Type | Required | Description | | --------------------------------------------------------------------------------- | --------------------------------------------------------------------------------- | --------------------------------------------------------------------------------- | --------------------------------------------------------------------------------- | +| `model` | *str* | :heavy_check_mark: | ID of the model to use. | | `inputs` | [models.ClassificationRequestInputs](../../models/classificationrequestinputs.md) | :heavy_check_mark: | Text to classify. | -| `model` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | ### Response @@ -66,7 +66,7 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.classifiers.moderate_chat(inputs=[ + res = mistral.classifiers.moderate_chat(model="Roadster", inputs=[ [ { "content": "", @@ -95,7 +95,7 @@ with Mistral( "role": "assistant", }, ], - ], model="Roadster") + ], truncate_for_context_length=False) # Handle response print(res) @@ -104,11 +104,12 @@ with Mistral( ### Parameters -| Parameter | Type | Required | Description | -| ----------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------- | -| `inputs` | [models.ChatClassificationRequestInputs](../../models/chatclassificationrequestinputs.md) | :heavy_check_mark: | Chat to classify | -| `model` | *Nullable[str]* | :heavy_check_mark: | N/A | -| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | +| Parameter | Type | Required | Description | +| --------------------------------------------------------------------------------- | --------------------------------------------------------------------------------- | --------------------------------------------------------------------------------- | --------------------------------------------------------------------------------- | +| `model` | *str* | :heavy_check_mark: | N/A | +| `inputs` | [models.ChatModerationRequestInputs](../../models/chatmoderationrequestinputs.md) | :heavy_check_mark: | Chat to classify | +| `truncate_for_context_length` | *Optional[bool]* | :heavy_minus_sign: | N/A | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | ### Response diff --git a/docs/sdks/embeddings/README.md b/docs/sdks/embeddings/README.md index 899c626f..44fae4ac 100644 --- a/docs/sdks/embeddings/README.md +++ b/docs/sdks/embeddings/README.md @@ -39,7 +39,6 @@ with Mistral( | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | | `inputs` | [models.Inputs](../../models/inputs.md) | :heavy_check_mark: | Text to embed. | [
"Embed this sentence.",
"As well as this one."
] | | `model` | *Optional[str]* | :heavy_minus_sign: | ID of the model to use. | | -| `encoding_format` | *OptionalNullable[str]* | :heavy_minus_sign: | The format to return the embeddings in. | | | `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | | ### Response diff --git a/docs/sdks/fim/README.md b/docs/sdks/fim/README.md index 0339c213..28de6c02 100644 --- a/docs/sdks/fim/README.md +++ b/docs/sdks/fim/README.md @@ -35,7 +35,7 @@ with Mistral( | Parameter | Type | Required | Description | Example | | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `model` | *Nullable[str]* | :heavy_check_mark: | ID of the model to use. Only compatible for now with:
- `codestral-2405`
- `codestral-latest` | codestral-2405 | +| `model` | *str* | :heavy_check_mark: | ID of the model to use. Only compatible for now with:
- `codestral-2405`
- `codestral-latest` | codestral-2405 | | `prompt` | *str* | :heavy_check_mark: | The text/code to complete. | def | | `temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. | | | `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | @@ -85,7 +85,7 @@ with Mistral( | Parameter | Type | Required | Description | Example | | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `model` | *Nullable[str]* | :heavy_check_mark: | ID of the model to use. Only compatible for now with:
- `codestral-2405`
- `codestral-latest` | codestral-2405 | +| `model` | *str* | :heavy_check_mark: | ID of the model to use. Only compatible for now with:
- `codestral-2405`
- `codestral-latest` | codestral-2405 | | `prompt` | *str* | :heavy_check_mark: | The text/code to complete. | def | | `temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. | | | `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | diff --git a/docs/sdks/ocr/README.md b/docs/sdks/ocr/README.md new file mode 100644 index 00000000..54f8af96 --- /dev/null +++ b/docs/sdks/ocr/README.md @@ -0,0 +1,58 @@ +# Ocr +(*ocr*) + +## Overview + +OCR API + +### Available Operations + +* [process](#process) - OCR + +## process + +OCR + +### Example Usage + +```python +from mistralai import Mistral +import os + +with Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) as mistral: + + res = mistral.ocr.process(model="Focus", document={ + "document_url": "https://dutiful-horst.org", + "type": "document_url", + }) + + # Handle response + print(res) + +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------- | +| `model` | *Nullable[str]* | :heavy_check_mark: | N/A | +| `document` | [models.Document](../../models/document.md) | :heavy_check_mark: | Document to run OCR on | +| `id` | *Optional[str]* | :heavy_minus_sign: | N/A | +| `pages` | List[*int*] | :heavy_minus_sign: | Specific pages user wants to process in various formats: single number, range, or list of both. Starts from 0 | +| `include_image_base64` | *OptionalNullable[bool]* | :heavy_minus_sign: | Include image URLs in response | +| `image_limit` | *OptionalNullable[int]* | :heavy_minus_sign: | Max images to extract | +| `image_min_size` | *OptionalNullable[int]* | :heavy_minus_sign: | Minimum height and width of image to extract | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | + +### Response + +**[models.OCRResponse](../../models/ocrresponse.md)** + +### Errors + +| Error Type | Status Code | Content Type | +| -------------------------- | -------------------------- | -------------------------- | +| models.HTTPValidationError | 422 | application/json | +| models.SDKError | 4XX, 5XX | \*/\* | \ No newline at end of file diff --git a/examples/async_chat_with_image_no_streaming.py b/examples/async_chat_with_image_no_streaming.py index 7e415305..ecb42257 100755 --- a/examples/async_chat_with_image_no_streaming.py +++ b/examples/async_chat_with_image_no_streaming.py @@ -21,7 +21,7 @@ async def main(): {"type": "text", "text": "What's in this image?"}, { "type": "image_url", - "image_url": "https://mistral.ai/images/news/codestral/FIM_table.png", + "image_url": "https://cms.mistral.ai/assets/af26a11d-0793-439f-a06e-7694b24b8270", }, ] ) diff --git a/examples/ocr_process_from_file.py b/examples/ocr_process_from_file.py new file mode 100644 index 00000000..70c9d4a8 --- /dev/null +++ b/examples/ocr_process_from_file.py @@ -0,0 +1,47 @@ +from mistralai import Mistral +import os +import json +from pathlib import Path +import urllib.request + +MIXTRAL_OF_EXPERTS_PDF_URL = "https://arxiv.org/pdf/2401.04088" +MOE_FILENAME = "mixtral_of_experts.pdf" + + +def main(): + api_key = os.environ["MISTRAL_API_KEY"] + client = Mistral(api_key=api_key) + pdf_file = Path(MOE_FILENAME) + + # Download the file if it doesn't exist + if not pdf_file.is_file(): + urllib.request.urlretrieve(MIXTRAL_OF_EXPERTS_PDF_URL, MOE_FILENAME) + + # Upload the file + uploaded_file = client.files.upload( + file={ + "file_name": pdf_file.stem, + "content": pdf_file.read_bytes(), + }, + purpose="ocr", + ) + + signed_url = client.files.get_signed_url(file_id=uploaded_file.id, expiry=1) + + pdf_response = client.ocr.process(document={ + "document_url": signed_url.url, + "type": "document_url", + "document_name": "mistral-7b-pdf", + }, model="mistral-ocr-latest", include_image_base64=True) + + # Print the parsed PDF + response_dict = json.loads(pdf_response.model_dump_json()) + json_string = json.dumps(response_dict, indent=4) + print(json_string) + + # Remove the file + pdf_file.unlink() + + +if __name__ == "__main__": + main() diff --git a/examples/ocr_process_from_url.py b/examples/ocr_process_from_url.py new file mode 100644 index 00000000..4cb11835 --- /dev/null +++ b/examples/ocr_process_from_url.py @@ -0,0 +1,25 @@ +from mistralai import Mistral +import os +import json + +MISTRAL_7B_PDF_URL = "https://arxiv.org/pdf/2310.06825" + +def main(): + api_key = os.environ["MISTRAL_API_KEY"] + client = Mistral(api_key=api_key) + + # Using an URL + pdf_response = client.ocr.process(document={ + "document_url": MISTRAL_7B_PDF_URL, + "type": "document_url", + "document_name": "mistral-7b-pdf", + }, model="mistral-ocr-latest", include_image_base64=True) + + # Print the parsed PDF + response_dict = json.loads(pdf_response.model_dump_json()) + json_string = json.dumps(response_dict, indent=4) + print(json_string) + + +if __name__ == "__main__": + main() diff --git a/poetry.lock b/poetry.lock index 154485d2..78003ff1 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1,4 +1,4 @@ -# This file is automatically @generated by Poetry 2.0.1 and should not be changed by hand. +# This file is automatically @generated by Poetry 2.1.1 and should not be changed by hand. [[package]] name = "annotated-types" @@ -35,7 +35,7 @@ typing-extensions = {version = ">=4.1", markers = "python_version < \"3.11\""} [package.extras] doc = ["Sphinx (>=7.4,<8.0)", "packaging", "sphinx-autodoc-typehints (>=1.2.0)", "sphinx-rtd-theme"] -test = ["anyio[trio]", "coverage[toml] (>=7)", "exceptiongroup (>=1.2.0)", "hypothesis (>=4.0)", "psutil (>=5.9)", "pytest (>=7.0)", "pytest-mock (>=3.6.1)", "trustme", "truststore (>=0.9.1)", "uvloop (>=0.21.0b1)"] +test = ["anyio[trio]", "coverage[toml] (>=7)", "exceptiongroup (>=1.2.0)", "hypothesis (>=4.0)", "psutil (>=5.9)", "pytest (>=7.0)", "pytest-mock (>=3.6.1)", "trustme", "truststore (>=0.9.1) ; python_version >= \"3.10\"", "uvloop (>=0.21.0b1) ; platform_python_implementation == \"CPython\" and platform_system != \"Windows\""] trio = ["trio (>=0.26.1)"] [[package]] @@ -333,7 +333,7 @@ idna = "*" sniffio = "*" [package.extras] -brotli = ["brotli", "brotlicffi"] +brotli = ["brotli ; platform_python_implementation == \"CPython\"", "brotlicffi ; platform_python_implementation != \"CPython\""] cli = ["click (==8.*)", "pygments (==2.*)", "rich (>=10,<14)"] http2 = ["h2 (>=3,<5)"] socks = ["socksio (==1.*)"] @@ -564,7 +564,7 @@ typing-extensions = ">=4.12.2" [package.extras] email = ["email-validator (>=2.0.0)"] -timezone = ["tzdata"] +timezone = ["tzdata ; python_version >= \"3.9\" and platform_system == \"Windows\""] [[package]] name = "pydantic-core" @@ -697,7 +697,7 @@ colorama = {version = ">=0.4.5", markers = "sys_platform == \"win32\""} dill = [ {version = ">=0.2", markers = "python_version < \"3.11\""}, {version = ">=0.3.7", markers = "python_version >= \"3.12\""}, - {version = ">=0.3.6", markers = "python_version >= \"3.11\" and python_version < \"3.12\""}, + {version = ">=0.3.6", markers = "python_version == \"3.11\""}, ] isort = ">=4.2.5,<5.13.0 || >5.13.0,<6" mccabe = ">=0.6,<0.8" @@ -938,7 +938,7 @@ files = [ ] [package.extras] -brotli = ["brotli (>=1.0.9)", "brotlicffi (>=0.8.0)"] +brotli = ["brotli (>=1.0.9) ; platform_python_implementation == \"CPython\"", "brotlicffi (>=0.8.0) ; platform_python_implementation != \"CPython\""] h2 = ["h2 (>=4,<5)"] socks = ["pysocks (>=1.5.6,!=1.5.7,<2.0)"] zstd = ["zstandard (>=0.18.0)"] diff --git a/pyproject.toml b/pyproject.toml index 42f36f14..8eec1a78 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "mistralai" -version = "1.5.0" +version = "1.5.1" description = "Python Client SDK for the Mistral AI API." authors = ["Mistral"] readme = "README-PYPI.md" diff --git a/src/mistralai/_version.py b/src/mistralai/_version.py index 7f36cf10..700c880e 100644 --- a/src/mistralai/_version.py +++ b/src/mistralai/_version.py @@ -3,10 +3,10 @@ import importlib.metadata __title__: str = "mistralai" -__version__: str = "1.5.0" +__version__: str = "1.5.1" __openapi_doc_version__: str = "0.0.2" __gen_version__: str = "2.497.0" -__user_agent__: str = "speakeasy-sdk/python 1.5.0 2.497.0 0.0.2 mistralai" +__user_agent__: str = "speakeasy-sdk/python 1.5.1 2.497.0 0.0.2 mistralai" try: if __package__ is not None: diff --git a/src/mistralai/chat.py b/src/mistralai/chat.py index 55ad60a9..67646ffe 100644 --- a/src/mistralai/chat.py +++ b/src/mistralai/chat.py @@ -3,7 +3,7 @@ from .basesdk import BaseSDK from mistralai import models, utils from mistralai._hooks import HookContext -from mistralai.types import Nullable, OptionalNullable, UNSET +from mistralai.types import OptionalNullable, UNSET from mistralai.utils import eventstreaming, get_security_from_env from typing import Any, List, Mapping, Optional, Union @@ -96,7 +96,7 @@ async def parse_stream_async( def complete( self, *, - model: Nullable[str], + model: str, messages: Union[List[models.Messages], List[models.MessagesTypedDict]], temperature: OptionalNullable[float] = UNSET, top_p: Optional[float] = None, @@ -253,7 +253,7 @@ def complete( async def complete_async( self, *, - model: Nullable[str], + model: str, messages: Union[List[models.Messages], List[models.MessagesTypedDict]], temperature: OptionalNullable[float] = UNSET, top_p: Optional[float] = None, @@ -410,7 +410,7 @@ async def complete_async( def stream( self, *, - model: Nullable[str], + model: str, messages: Union[ List[models.ChatCompletionStreamRequestMessages], List[models.ChatCompletionStreamRequestMessagesTypedDict], @@ -585,7 +585,7 @@ def stream( async def stream_async( self, *, - model: Nullable[str], + model: str, messages: Union[ List[models.ChatCompletionStreamRequestMessages], List[models.ChatCompletionStreamRequestMessagesTypedDict], diff --git a/src/mistralai/classifiers.py b/src/mistralai/classifiers.py index af54e96e..6ff1d6a8 100644 --- a/src/mistralai/classifiers.py +++ b/src/mistralai/classifiers.py @@ -3,7 +3,7 @@ from .basesdk import BaseSDK from mistralai import models, utils from mistralai._hooks import HookContext -from mistralai.types import Nullable, OptionalNullable, UNSET +from mistralai.types import OptionalNullable, UNSET from mistralai.utils import get_security_from_env from typing import Any, Mapping, Optional, Union @@ -14,11 +14,11 @@ class Classifiers(BaseSDK): def moderate( self, *, + model: str, inputs: Union[ models.ClassificationRequestInputs, models.ClassificationRequestInputsTypedDict, ], - model: OptionalNullable[str] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -26,8 +26,8 @@ def moderate( ) -> models.ClassificationResponse: r"""Moderations + :param model: ID of the model to use. :param inputs: Text to classify. - :param model: :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -42,8 +42,8 @@ def moderate( base_url = server_url request = models.ClassificationRequest( - inputs=inputs, model=model, + inputs=inputs, ) req = self._build_request( @@ -115,11 +115,11 @@ def moderate( async def moderate_async( self, *, + model: str, inputs: Union[ models.ClassificationRequestInputs, models.ClassificationRequestInputsTypedDict, ], - model: OptionalNullable[str] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -127,8 +127,8 @@ async def moderate_async( ) -> models.ClassificationResponse: r"""Moderations + :param model: ID of the model to use. :param inputs: Text to classify. - :param model: :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -143,8 +143,8 @@ async def moderate_async( base_url = server_url request = models.ClassificationRequest( - inputs=inputs, model=model, + inputs=inputs, ) req = self._build_request_async( @@ -216,11 +216,12 @@ async def moderate_async( def moderate_chat( self, *, + model: str, inputs: Union[ - models.ChatClassificationRequestInputs, - models.ChatClassificationRequestInputsTypedDict, + models.ChatModerationRequestInputs, + models.ChatModerationRequestInputsTypedDict, ], - model: Nullable[str], + truncate_for_context_length: Optional[bool] = False, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -228,8 +229,9 @@ def moderate_chat( ) -> models.ClassificationResponse: r"""Moderations Chat - :param inputs: Chat to classify :param model: + :param inputs: Chat to classify + :param truncate_for_context_length: :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -243,11 +245,10 @@ def moderate_chat( if server_url is not None: base_url = server_url - request = models.ChatClassificationRequest( - inputs=utils.get_pydantic_model( - inputs, models.ChatClassificationRequestInputs - ), + request = models.ChatModerationRequest( model=model, + inputs=utils.get_pydantic_model(inputs, models.ChatModerationRequestInputs), + truncate_for_context_length=truncate_for_context_length, ) req = self._build_request( @@ -264,7 +265,7 @@ def moderate_chat( http_headers=http_headers, security=self.sdk_configuration.security, get_serialized_body=lambda: utils.serialize_request_body( - request, False, False, "json", models.ChatClassificationRequest + request, False, False, "json", models.ChatModerationRequest ), timeout_ms=timeout_ms, ) @@ -319,11 +320,12 @@ def moderate_chat( async def moderate_chat_async( self, *, + model: str, inputs: Union[ - models.ChatClassificationRequestInputs, - models.ChatClassificationRequestInputsTypedDict, + models.ChatModerationRequestInputs, + models.ChatModerationRequestInputsTypedDict, ], - model: Nullable[str], + truncate_for_context_length: Optional[bool] = False, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -331,8 +333,9 @@ async def moderate_chat_async( ) -> models.ClassificationResponse: r"""Moderations Chat - :param inputs: Chat to classify :param model: + :param inputs: Chat to classify + :param truncate_for_context_length: :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -346,11 +349,10 @@ async def moderate_chat_async( if server_url is not None: base_url = server_url - request = models.ChatClassificationRequest( - inputs=utils.get_pydantic_model( - inputs, models.ChatClassificationRequestInputs - ), + request = models.ChatModerationRequest( model=model, + inputs=utils.get_pydantic_model(inputs, models.ChatModerationRequestInputs), + truncate_for_context_length=truncate_for_context_length, ) req = self._build_request_async( @@ -367,7 +369,7 @@ async def moderate_chat_async( http_headers=http_headers, security=self.sdk_configuration.security, get_serialized_body=lambda: utils.serialize_request_body( - request, False, False, "json", models.ChatClassificationRequest + request, False, False, "json", models.ChatModerationRequest ), timeout_ms=timeout_ms, ) diff --git a/src/mistralai/embeddings.py b/src/mistralai/embeddings.py index 524f09c7..f6f558b8 100644 --- a/src/mistralai/embeddings.py +++ b/src/mistralai/embeddings.py @@ -16,7 +16,6 @@ def create( *, inputs: Union[models.Inputs, models.InputsTypedDict], model: Optional[str] = "mistral-embed", - encoding_format: OptionalNullable[str] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -28,7 +27,6 @@ def create( :param inputs: Text to embed. :param model: ID of the model to use. - :param encoding_format: The format to return the embeddings in. :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -43,9 +41,8 @@ def create( base_url = server_url request = models.EmbeddingRequest( - inputs=inputs, model=model, - encoding_format=encoding_format, + inputs=inputs, ) req = self._build_request( @@ -119,7 +116,6 @@ async def create_async( *, inputs: Union[models.Inputs, models.InputsTypedDict], model: Optional[str] = "mistral-embed", - encoding_format: OptionalNullable[str] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -131,7 +127,6 @@ async def create_async( :param inputs: Text to embed. :param model: ID of the model to use. - :param encoding_format: The format to return the embeddings in. :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -146,9 +141,8 @@ async def create_async( base_url = server_url request = models.EmbeddingRequest( - inputs=inputs, model=model, - encoding_format=encoding_format, + inputs=inputs, ) req = self._build_request_async( diff --git a/src/mistralai/fim.py b/src/mistralai/fim.py index 40e596be..c11f6c99 100644 --- a/src/mistralai/fim.py +++ b/src/mistralai/fim.py @@ -3,7 +3,7 @@ from .basesdk import BaseSDK from mistralai import models, utils from mistralai._hooks import HookContext -from mistralai.types import Nullable, OptionalNullable, UNSET +from mistralai.types import OptionalNullable, UNSET from mistralai.utils import eventstreaming, get_security_from_env from typing import Any, Mapping, Optional, Union @@ -14,7 +14,7 @@ class Fim(BaseSDK): def complete( self, *, - model: Nullable[str], + model: str, prompt: str, temperature: OptionalNullable[float] = UNSET, top_p: Optional[float] = 1, @@ -143,7 +143,7 @@ def complete( async def complete_async( self, *, - model: Nullable[str], + model: str, prompt: str, temperature: OptionalNullable[float] = UNSET, top_p: Optional[float] = 1, @@ -272,7 +272,7 @@ async def complete_async( def stream( self, *, - model: Nullable[str], + model: str, prompt: str, temperature: OptionalNullable[float] = UNSET, top_p: Optional[float] = 1, @@ -407,7 +407,7 @@ def stream( async def stream_async( self, *, - model: Nullable[str], + model: str, prompt: str, temperature: OptionalNullable[float] = UNSET, top_p: Optional[float] = 1, diff --git a/src/mistralai/models/__init__.py b/src/mistralai/models/__init__.py index ee083f3a..197f6e1f 100644 --- a/src/mistralai/models/__init__.py +++ b/src/mistralai/models/__init__.py @@ -39,16 +39,6 @@ from .batchjobout import BatchJobOut, BatchJobOutObject, BatchJobOutTypedDict from .batchjobsout import BatchJobsOut, BatchJobsOutObject, BatchJobsOutTypedDict from .batchjobstatus import BatchJobStatus -from .chatclassificationrequest import ( - ChatClassificationRequest, - ChatClassificationRequestInputs, - ChatClassificationRequestInputsTypedDict, - ChatClassificationRequestTypedDict, - One, - OneTypedDict, - Two, - TwoTypedDict, -) from .chatcompletionchoice import ( ChatCompletionChoice, ChatCompletionChoiceTypedDict, @@ -78,6 +68,16 @@ ChatCompletionStreamRequestToolChoiceTypedDict, ChatCompletionStreamRequestTypedDict, ) +from .chatmoderationrequest import ( + ChatModerationRequest, + ChatModerationRequestInputs, + ChatModerationRequestInputsTypedDict, + ChatModerationRequestTypedDict, + One, + OneTypedDict, + Two, + TwoTypedDict, +) from .checkpointout import CheckpointOut, CheckpointOutTypedDict from .classificationobject import ClassificationObject, ClassificationObjectTypedDict from .classificationrequest import ( @@ -115,6 +115,7 @@ DetailedJobOutStatus, DetailedJobOutTypedDict, ) +from .documenturlchunk import DocumentURLChunk, DocumentURLChunkTypedDict from .embeddingrequest import ( EmbeddingRequest, EmbeddingRequestTypedDict, @@ -274,7 +275,13 @@ from .metricout import MetricOut, MetricOutTypedDict from .modelcapabilities import ModelCapabilities, ModelCapabilitiesTypedDict from .modellist import Data, DataTypedDict, ModelList, ModelListTypedDict -from .prediction import Prediction, PredictionType, PredictionTypedDict +from .ocrimageobject import OCRImageObject, OCRImageObjectTypedDict +from .ocrpagedimensions import OCRPageDimensions, OCRPageDimensionsTypedDict +from .ocrpageobject import OCRPageObject, OCRPageObjectTypedDict +from .ocrrequest import Document, DocumentTypedDict, OCRRequest, OCRRequestTypedDict +from .ocrresponse import OCRResponse, OCRResponseTypedDict +from .ocrusageinfo import OCRUsageInfo, OCRUsageInfoTypedDict +from .prediction import Prediction, PredictionTypedDict from .referencechunk import ReferenceChunk, ReferenceChunkType, ReferenceChunkTypedDict from .responseformat import ResponseFormat, ResponseFormatTypedDict from .responseformats import ResponseFormats @@ -386,10 +393,6 @@ "BatchJobsOut", "BatchJobsOutObject", "BatchJobsOutTypedDict", - "ChatClassificationRequest", - "ChatClassificationRequestInputs", - "ChatClassificationRequestInputsTypedDict", - "ChatClassificationRequestTypedDict", "ChatCompletionChoice", "ChatCompletionChoiceTypedDict", "ChatCompletionRequest", @@ -406,6 +409,10 @@ "ChatCompletionStreamRequestToolChoice", "ChatCompletionStreamRequestToolChoiceTypedDict", "ChatCompletionStreamRequestTypedDict", + "ChatModerationRequest", + "ChatModerationRequestInputs", + "ChatModerationRequestInputsTypedDict", + "ChatModerationRequestTypedDict", "CheckpointOut", "CheckpointOutTypedDict", "ClassificationObject", @@ -445,6 +452,10 @@ "DetailedJobOutRepositoriesTypedDict", "DetailedJobOutStatus", "DetailedJobOutTypedDict", + "Document", + "DocumentTypedDict", + "DocumentURLChunk", + "DocumentURLChunkTypedDict", "EmbeddingRequest", "EmbeddingRequestTypedDict", "EmbeddingResponse", @@ -568,11 +579,22 @@ "ModelCapabilitiesTypedDict", "ModelList", "ModelListTypedDict", + "OCRImageObject", + "OCRImageObjectTypedDict", + "OCRPageDimensions", + "OCRPageDimensionsTypedDict", + "OCRPageObject", + "OCRPageObjectTypedDict", + "OCRRequest", + "OCRRequestTypedDict", + "OCRResponse", + "OCRResponseTypedDict", + "OCRUsageInfo", + "OCRUsageInfoTypedDict", "Object", "One", "OneTypedDict", "Prediction", - "PredictionType", "PredictionTypedDict", "QueryParamStatus", "ReferenceChunk", diff --git a/src/mistralai/models/assistantmessage.py b/src/mistralai/models/assistantmessage.py index c9a28945..fd540d99 100644 --- a/src/mistralai/models/assistantmessage.py +++ b/src/mistralai/models/assistantmessage.py @@ -26,6 +26,7 @@ class AssistantMessageTypedDict(TypedDict): content: NotRequired[Nullable[AssistantMessageContentTypedDict]] tool_calls: NotRequired[Nullable[List[ToolCallTypedDict]]] prefix: NotRequired[bool] + r"""Set this to `true` when adding an assistant message as prefix to condition the model response. The role of the prefix message is to force the model to start its answer by the content of the message.""" role: NotRequired[AssistantMessageRole] @@ -35,6 +36,7 @@ class AssistantMessage(BaseModel): tool_calls: OptionalNullable[List[ToolCall]] = UNSET prefix: Optional[bool] = False + r"""Set this to `true` when adding an assistant message as prefix to condition the model response. The role of the prefix message is to force the model to start its answer by the content of the message.""" role: Optional[AssistantMessageRole] = "assistant" diff --git a/src/mistralai/models/chatclassificationrequest.py b/src/mistralai/models/chatclassificationrequest.py deleted file mode 100644 index 6f3967dc..00000000 --- a/src/mistralai/models/chatclassificationrequest.py +++ /dev/null @@ -1,113 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .assistantmessage import AssistantMessage, AssistantMessageTypedDict -from .systemmessage import SystemMessage, SystemMessageTypedDict -from .toolmessage import ToolMessage, ToolMessageTypedDict -from .usermessage import UserMessage, UserMessageTypedDict -from mistralai.types import BaseModel, Nullable, UNSET_SENTINEL -from mistralai.utils import get_discriminator -import pydantic -from pydantic import Discriminator, Tag, model_serializer -from typing import List, Union -from typing_extensions import Annotated, TypeAliasType, TypedDict - - -TwoTypedDict = TypeAliasType( - "TwoTypedDict", - Union[ - SystemMessageTypedDict, - UserMessageTypedDict, - AssistantMessageTypedDict, - ToolMessageTypedDict, - ], -) - - -Two = Annotated[ - Union[ - Annotated[AssistantMessage, Tag("assistant")], - Annotated[SystemMessage, Tag("system")], - Annotated[ToolMessage, Tag("tool")], - Annotated[UserMessage, Tag("user")], - ], - Discriminator(lambda m: get_discriminator(m, "role", "role")), -] - - -OneTypedDict = TypeAliasType( - "OneTypedDict", - Union[ - SystemMessageTypedDict, - UserMessageTypedDict, - AssistantMessageTypedDict, - ToolMessageTypedDict, - ], -) - - -One = Annotated[ - Union[ - Annotated[AssistantMessage, Tag("assistant")], - Annotated[SystemMessage, Tag("system")], - Annotated[ToolMessage, Tag("tool")], - Annotated[UserMessage, Tag("user")], - ], - Discriminator(lambda m: get_discriminator(m, "role", "role")), -] - - -ChatClassificationRequestInputsTypedDict = TypeAliasType( - "ChatClassificationRequestInputsTypedDict", - Union[List[OneTypedDict], List[List[TwoTypedDict]]], -) -r"""Chat to classify""" - - -ChatClassificationRequestInputs = TypeAliasType( - "ChatClassificationRequestInputs", Union[List[One], List[List[Two]]] -) -r"""Chat to classify""" - - -class ChatClassificationRequestTypedDict(TypedDict): - inputs: ChatClassificationRequestInputsTypedDict - r"""Chat to classify""" - model: Nullable[str] - - -class ChatClassificationRequest(BaseModel): - inputs: Annotated[ChatClassificationRequestInputs, pydantic.Field(alias="input")] - r"""Chat to classify""" - - model: Nullable[str] - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = [] - nullable_fields = ["model"] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in self.model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/chatcompletionrequest.py b/src/mistralai/models/chatcompletionrequest.py index a253ac4d..eaed8435 100644 --- a/src/mistralai/models/chatcompletionrequest.py +++ b/src/mistralai/models/chatcompletionrequest.py @@ -59,7 +59,7 @@ class ChatCompletionRequestTypedDict(TypedDict): - model: Nullable[str] + model: str r"""ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions.""" messages: List[MessagesTypedDict] r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content.""" @@ -90,7 +90,7 @@ class ChatCompletionRequestTypedDict(TypedDict): class ChatCompletionRequest(BaseModel): - model: Nullable[str] + model: str r"""ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions.""" messages: List[Messages] @@ -152,14 +152,7 @@ def serialize_model(self, handler): "prediction", "safe_prompt", ] - nullable_fields = [ - "model", - "temperature", - "max_tokens", - "random_seed", - "tools", - "n", - ] + nullable_fields = ["temperature", "max_tokens", "random_seed", "tools", "n"] null_default_fields = [] serialized = handler(self) diff --git a/src/mistralai/models/chatcompletionstreamrequest.py b/src/mistralai/models/chatcompletionstreamrequest.py index a1697d58..4f593c01 100644 --- a/src/mistralai/models/chatcompletionstreamrequest.py +++ b/src/mistralai/models/chatcompletionstreamrequest.py @@ -63,7 +63,7 @@ class ChatCompletionStreamRequestTypedDict(TypedDict): - model: Nullable[str] + model: str r"""ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions.""" messages: List[ChatCompletionStreamRequestMessagesTypedDict] r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content.""" @@ -93,7 +93,7 @@ class ChatCompletionStreamRequestTypedDict(TypedDict): class ChatCompletionStreamRequest(BaseModel): - model: Nullable[str] + model: str r"""ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions.""" messages: List[ChatCompletionStreamRequestMessages] @@ -154,14 +154,7 @@ def serialize_model(self, handler): "prediction", "safe_prompt", ] - nullable_fields = [ - "model", - "temperature", - "max_tokens", - "random_seed", - "tools", - "n", - ] + nullable_fields = ["temperature", "max_tokens", "random_seed", "tools", "n"] null_default_fields = [] serialized = handler(self) diff --git a/src/mistralai/models/chatmoderationrequest.py b/src/mistralai/models/chatmoderationrequest.py new file mode 100644 index 00000000..5b25b877 --- /dev/null +++ b/src/mistralai/models/chatmoderationrequest.py @@ -0,0 +1,86 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .assistantmessage import AssistantMessage, AssistantMessageTypedDict +from .systemmessage import SystemMessage, SystemMessageTypedDict +from .toolmessage import ToolMessage, ToolMessageTypedDict +from .usermessage import UserMessage, UserMessageTypedDict +from mistralai.types import BaseModel +from mistralai.utils import get_discriminator +import pydantic +from pydantic import Discriminator, Tag +from typing import List, Optional, Union +from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict + + +TwoTypedDict = TypeAliasType( + "TwoTypedDict", + Union[ + SystemMessageTypedDict, + UserMessageTypedDict, + AssistantMessageTypedDict, + ToolMessageTypedDict, + ], +) + + +Two = Annotated[ + Union[ + Annotated[AssistantMessage, Tag("assistant")], + Annotated[SystemMessage, Tag("system")], + Annotated[ToolMessage, Tag("tool")], + Annotated[UserMessage, Tag("user")], + ], + Discriminator(lambda m: get_discriminator(m, "role", "role")), +] + + +OneTypedDict = TypeAliasType( + "OneTypedDict", + Union[ + SystemMessageTypedDict, + UserMessageTypedDict, + AssistantMessageTypedDict, + ToolMessageTypedDict, + ], +) + + +One = Annotated[ + Union[ + Annotated[AssistantMessage, Tag("assistant")], + Annotated[SystemMessage, Tag("system")], + Annotated[ToolMessage, Tag("tool")], + Annotated[UserMessage, Tag("user")], + ], + Discriminator(lambda m: get_discriminator(m, "role", "role")), +] + + +ChatModerationRequestInputsTypedDict = TypeAliasType( + "ChatModerationRequestInputsTypedDict", + Union[List[OneTypedDict], List[List[TwoTypedDict]]], +) +r"""Chat to classify""" + + +ChatModerationRequestInputs = TypeAliasType( + "ChatModerationRequestInputs", Union[List[One], List[List[Two]]] +) +r"""Chat to classify""" + + +class ChatModerationRequestTypedDict(TypedDict): + model: str + inputs: ChatModerationRequestInputsTypedDict + r"""Chat to classify""" + truncate_for_context_length: NotRequired[bool] + + +class ChatModerationRequest(BaseModel): + model: str + + inputs: Annotated[ChatModerationRequestInputs, pydantic.Field(alias="input")] + r"""Chat to classify""" + + truncate_for_context_length: Optional[bool] = False diff --git a/src/mistralai/models/classificationrequest.py b/src/mistralai/models/classificationrequest.py index d18ffa61..39e25390 100644 --- a/src/mistralai/models/classificationrequest.py +++ b/src/mistralai/models/classificationrequest.py @@ -1,11 +1,10 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from __future__ import annotations -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +from mistralai.types import BaseModel import pydantic -from pydantic import model_serializer from typing import List, Union -from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict +from typing_extensions import Annotated, TypeAliasType, TypedDict ClassificationRequestInputsTypedDict = TypeAliasType( @@ -21,43 +20,15 @@ class ClassificationRequestTypedDict(TypedDict): + model: str + r"""ID of the model to use.""" inputs: ClassificationRequestInputsTypedDict r"""Text to classify.""" - model: NotRequired[Nullable[str]] class ClassificationRequest(BaseModel): + model: str + r"""ID of the model to use.""" + inputs: Annotated[ClassificationRequestInputs, pydantic.Field(alias="input")] r"""Text to classify.""" - - model: OptionalNullable[str] = UNSET - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = ["model"] - nullable_fields = ["model"] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in self.model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/contentchunk.py b/src/mistralai/models/contentchunk.py index feeda7cd..ff7d9fcf 100644 --- a/src/mistralai/models/contentchunk.py +++ b/src/mistralai/models/contentchunk.py @@ -1,6 +1,7 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from __future__ import annotations +from .documenturlchunk import DocumentURLChunk, DocumentURLChunkTypedDict from .imageurlchunk import ImageURLChunk, ImageURLChunkTypedDict from .referencechunk import ReferenceChunk, ReferenceChunkTypedDict from .textchunk import TextChunk, TextChunkTypedDict @@ -12,13 +13,19 @@ ContentChunkTypedDict = TypeAliasType( "ContentChunkTypedDict", - Union[TextChunkTypedDict, ImageURLChunkTypedDict, ReferenceChunkTypedDict], + Union[ + TextChunkTypedDict, + ImageURLChunkTypedDict, + ReferenceChunkTypedDict, + DocumentURLChunkTypedDict, + ], ) ContentChunk = Annotated[ Union[ Annotated[ImageURLChunk, Tag("image_url")], + Annotated[DocumentURLChunk, Tag("document_url")], Annotated[TextChunk, Tag("text")], Annotated[ReferenceChunk, Tag("reference")], ], diff --git a/src/mistralai/models/documenturlchunk.py b/src/mistralai/models/documenturlchunk.py new file mode 100644 index 00000000..23622335 --- /dev/null +++ b/src/mistralai/models/documenturlchunk.py @@ -0,0 +1,62 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +from mistralai.utils import validate_const +import pydantic +from pydantic import model_serializer +from pydantic.functional_validators import AfterValidator +from typing import Literal, Optional +from typing_extensions import Annotated, NotRequired, TypedDict + + +class DocumentURLChunkTypedDict(TypedDict): + document_url: str + type: Literal["document_url"] + document_name: NotRequired[Nullable[str]] + r"""The filename of the document""" + + +class DocumentURLChunk(BaseModel): + document_url: str + + TYPE: Annotated[ + Annotated[ + Optional[Literal["document_url"]], + AfterValidator(validate_const("document_url")), + ], + pydantic.Field(alias="type"), + ] = "document_url" + + document_name: OptionalNullable[str] = UNSET + r"""The filename of the document""" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["type", "document_name"] + nullable_fields = ["document_name"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in self.model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/models/embeddingrequest.py b/src/mistralai/models/embeddingrequest.py index 4de8c312..b5ccd26e 100644 --- a/src/mistralai/models/embeddingrequest.py +++ b/src/mistralai/models/embeddingrequest.py @@ -1,9 +1,8 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from __future__ import annotations -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +from mistralai.types import BaseModel import pydantic -from pydantic import model_serializer from typing import List, Optional, Union from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict @@ -21,8 +20,6 @@ class EmbeddingRequestTypedDict(TypedDict): r"""Text to embed.""" model: NotRequired[str] r"""ID of the model to use.""" - encoding_format: NotRequired[Nullable[str]] - r"""The format to return the embeddings in.""" class EmbeddingRequest(BaseModel): @@ -31,36 +28,3 @@ class EmbeddingRequest(BaseModel): model: Optional[str] = "mistral-embed" r"""ID of the model to use.""" - - encoding_format: OptionalNullable[str] = UNSET - r"""The format to return the embeddings in.""" - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = ["model", "encoding_format"] - nullable_fields = ["encoding_format"] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in self.model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/fimcompletionrequest.py b/src/mistralai/models/fimcompletionrequest.py index fb72ba41..01f8b2d1 100644 --- a/src/mistralai/models/fimcompletionrequest.py +++ b/src/mistralai/models/fimcompletionrequest.py @@ -20,7 +20,7 @@ class FIMCompletionRequestTypedDict(TypedDict): - model: Nullable[str] + model: str r"""ID of the model to use. Only compatible for now with: - `codestral-2405` - `codestral-latest` @@ -46,7 +46,7 @@ class FIMCompletionRequestTypedDict(TypedDict): class FIMCompletionRequest(BaseModel): - model: Nullable[str] + model: str r"""ID of the model to use. Only compatible for now with: - `codestral-2405` - `codestral-latest` @@ -92,7 +92,6 @@ def serialize_model(self, handler): "min_tokens", ] nullable_fields = [ - "model", "temperature", "max_tokens", "random_seed", diff --git a/src/mistralai/models/fimcompletionstreamrequest.py b/src/mistralai/models/fimcompletionstreamrequest.py index 5e16a170..cc4cf6e8 100644 --- a/src/mistralai/models/fimcompletionstreamrequest.py +++ b/src/mistralai/models/fimcompletionstreamrequest.py @@ -20,7 +20,7 @@ class FIMCompletionStreamRequestTypedDict(TypedDict): - model: Nullable[str] + model: str r"""ID of the model to use. Only compatible for now with: - `codestral-2405` - `codestral-latest` @@ -45,7 +45,7 @@ class FIMCompletionStreamRequestTypedDict(TypedDict): class FIMCompletionStreamRequest(BaseModel): - model: Nullable[str] + model: str r"""ID of the model to use. Only compatible for now with: - `codestral-2405` - `codestral-latest` @@ -90,7 +90,6 @@ def serialize_model(self, handler): "min_tokens", ] nullable_fields = [ - "model", "temperature", "max_tokens", "random_seed", diff --git a/src/mistralai/models/ocrimageobject.py b/src/mistralai/models/ocrimageobject.py new file mode 100644 index 00000000..16b41e6c --- /dev/null +++ b/src/mistralai/models/ocrimageobject.py @@ -0,0 +1,77 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +from pydantic import model_serializer +from typing_extensions import NotRequired, TypedDict + + +class OCRImageObjectTypedDict(TypedDict): + id: str + r"""Image ID for extracted image in a page""" + top_left_x: Nullable[int] + r"""X coordinate of top-left corner of the extracted image""" + top_left_y: Nullable[int] + r"""Y coordinate of top-left corner of the extracted image""" + bottom_right_x: Nullable[int] + r"""X coordinate of bottom-right corner of the extracted image""" + bottom_right_y: Nullable[int] + r"""Y coordinate of bottom-right corner of the extracted image""" + image_base64: NotRequired[Nullable[str]] + r"""Base64 string of the extracted image""" + + +class OCRImageObject(BaseModel): + id: str + r"""Image ID for extracted image in a page""" + + top_left_x: Nullable[int] + r"""X coordinate of top-left corner of the extracted image""" + + top_left_y: Nullable[int] + r"""Y coordinate of top-left corner of the extracted image""" + + bottom_right_x: Nullable[int] + r"""X coordinate of bottom-right corner of the extracted image""" + + bottom_right_y: Nullable[int] + r"""Y coordinate of bottom-right corner of the extracted image""" + + image_base64: OptionalNullable[str] = UNSET + r"""Base64 string of the extracted image""" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["image_base64"] + nullable_fields = [ + "top_left_x", + "top_left_y", + "bottom_right_x", + "bottom_right_y", + "image_base64", + ] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in self.model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/models/ocrpagedimensions.py b/src/mistralai/models/ocrpagedimensions.py new file mode 100644 index 00000000..d1aeb54d --- /dev/null +++ b/src/mistralai/models/ocrpagedimensions.py @@ -0,0 +1,25 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.types import BaseModel +from typing_extensions import TypedDict + + +class OCRPageDimensionsTypedDict(TypedDict): + dpi: int + r"""Dots per inch of the page-image""" + height: int + r"""Height of the image in pixels""" + width: int + r"""Width of the image in pixels""" + + +class OCRPageDimensions(BaseModel): + dpi: int + r"""Dots per inch of the page-image""" + + height: int + r"""Height of the image in pixels""" + + width: int + r"""Width of the image in pixels""" diff --git a/src/mistralai/models/ocrpageobject.py b/src/mistralai/models/ocrpageobject.py new file mode 100644 index 00000000..c3ef8916 --- /dev/null +++ b/src/mistralai/models/ocrpageobject.py @@ -0,0 +1,64 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .ocrimageobject import OCRImageObject, OCRImageObjectTypedDict +from .ocrpagedimensions import OCRPageDimensions, OCRPageDimensionsTypedDict +from mistralai.types import BaseModel, Nullable, UNSET_SENTINEL +from pydantic import model_serializer +from typing import List +from typing_extensions import TypedDict + + +class OCRPageObjectTypedDict(TypedDict): + index: int + r"""The page index in a pdf document starting from 0""" + markdown: str + r"""The markdown string response of the page""" + images: List[OCRImageObjectTypedDict] + r"""List of all extracted images in the page""" + dimensions: Nullable[OCRPageDimensionsTypedDict] + r"""The dimensions of the PDF Page's screenshot image""" + + +class OCRPageObject(BaseModel): + index: int + r"""The page index in a pdf document starting from 0""" + + markdown: str + r"""The markdown string response of the page""" + + images: List[OCRImageObject] + r"""List of all extracted images in the page""" + + dimensions: Nullable[OCRPageDimensions] + r"""The dimensions of the PDF Page's screenshot image""" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = [] + nullable_fields = ["dimensions"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in self.model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/models/ocrrequest.py b/src/mistralai/models/ocrrequest.py new file mode 100644 index 00000000..54339e9e --- /dev/null +++ b/src/mistralai/models/ocrrequest.py @@ -0,0 +1,97 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .documenturlchunk import DocumentURLChunk, DocumentURLChunkTypedDict +from .imageurlchunk import ImageURLChunk, ImageURLChunkTypedDict +from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +from pydantic import model_serializer +from typing import List, Optional, Union +from typing_extensions import NotRequired, TypeAliasType, TypedDict + + +DocumentTypedDict = TypeAliasType( + "DocumentTypedDict", Union[ImageURLChunkTypedDict, DocumentURLChunkTypedDict] +) +r"""Document to run OCR on""" + + +Document = TypeAliasType("Document", Union[ImageURLChunk, DocumentURLChunk]) +r"""Document to run OCR on""" + + +class OCRRequestTypedDict(TypedDict): + model: Nullable[str] + document: DocumentTypedDict + r"""Document to run OCR on""" + id: NotRequired[str] + pages: NotRequired[Nullable[List[int]]] + r"""Specific pages user wants to process in various formats: single number, range, or list of both. Starts from 0""" + include_image_base64: NotRequired[Nullable[bool]] + r"""Include image URLs in response""" + image_limit: NotRequired[Nullable[int]] + r"""Max images to extract""" + image_min_size: NotRequired[Nullable[int]] + r"""Minimum height and width of image to extract""" + + +class OCRRequest(BaseModel): + model: Nullable[str] + + document: Document + r"""Document to run OCR on""" + + id: Optional[str] = None + + pages: OptionalNullable[List[int]] = UNSET + r"""Specific pages user wants to process in various formats: single number, range, or list of both. Starts from 0""" + + include_image_base64: OptionalNullable[bool] = UNSET + r"""Include image URLs in response""" + + image_limit: OptionalNullable[int] = UNSET + r"""Max images to extract""" + + image_min_size: OptionalNullable[int] = UNSET + r"""Minimum height and width of image to extract""" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = [ + "id", + "pages", + "include_image_base64", + "image_limit", + "image_min_size", + ] + nullable_fields = [ + "model", + "pages", + "include_image_base64", + "image_limit", + "image_min_size", + ] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in self.model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/models/ocrresponse.py b/src/mistralai/models/ocrresponse.py new file mode 100644 index 00000000..45fb06e3 --- /dev/null +++ b/src/mistralai/models/ocrresponse.py @@ -0,0 +1,26 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .ocrpageobject import OCRPageObject, OCRPageObjectTypedDict +from .ocrusageinfo import OCRUsageInfo, OCRUsageInfoTypedDict +from mistralai.types import BaseModel +from typing import List +from typing_extensions import TypedDict + + +class OCRResponseTypedDict(TypedDict): + pages: List[OCRPageObjectTypedDict] + r"""List of OCR info for pages.""" + model: str + r"""The model used to generate the OCR.""" + usage_info: OCRUsageInfoTypedDict + + +class OCRResponse(BaseModel): + pages: List[OCRPageObject] + r"""List of OCR info for pages.""" + + model: str + r"""The model used to generate the OCR.""" + + usage_info: OCRUsageInfo diff --git a/src/mistralai/models/ocrusageinfo.py b/src/mistralai/models/ocrusageinfo.py new file mode 100644 index 00000000..9dced73b --- /dev/null +++ b/src/mistralai/models/ocrusageinfo.py @@ -0,0 +1,51 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +from pydantic import model_serializer +from typing_extensions import NotRequired, TypedDict + + +class OCRUsageInfoTypedDict(TypedDict): + pages_processed: int + r"""Number of pages processed""" + doc_size_bytes: NotRequired[Nullable[int]] + r"""Document size in bytes""" + + +class OCRUsageInfo(BaseModel): + pages_processed: int + r"""Number of pages processed""" + + doc_size_bytes: OptionalNullable[int] = UNSET + r"""Document size in bytes""" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["doc_size_bytes"] + nullable_fields = ["doc_size_bytes"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in self.model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/models/prediction.py b/src/mistralai/models/prediction.py index 63593122..7937c9d1 100644 --- a/src/mistralai/models/prediction.py +++ b/src/mistralai/models/prediction.py @@ -9,17 +9,16 @@ from typing_extensions import Annotated, NotRequired, TypedDict -PredictionType = Literal["content"] - - class PredictionTypedDict(TypedDict): - type: PredictionType + type: Literal["content"] content: NotRequired[str] class Prediction(BaseModel): TYPE: Annotated[ - Annotated[Optional[PredictionType], AfterValidator(validate_const("content"))], + Annotated[ + Optional[Literal["content"]], AfterValidator(validate_const("content")) + ], pydantic.Field(alias="type"), ] = "content" diff --git a/src/mistralai/ocr.py b/src/mistralai/ocr.py new file mode 100644 index 00000000..56c1da51 --- /dev/null +++ b/src/mistralai/ocr.py @@ -0,0 +1,238 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from .basesdk import BaseSDK +from mistralai import models, utils +from mistralai._hooks import HookContext +from mistralai.types import Nullable, OptionalNullable, UNSET +from mistralai.utils import get_security_from_env +from typing import Any, List, Mapping, Optional, Union + + +class Ocr(BaseSDK): + r"""OCR API""" + + def process( + self, + *, + model: Nullable[str], + document: Union[models.Document, models.DocumentTypedDict], + id: Optional[str] = None, + pages: OptionalNullable[List[int]] = UNSET, + include_image_base64: OptionalNullable[bool] = UNSET, + image_limit: OptionalNullable[int] = UNSET, + image_min_size: OptionalNullable[int] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.OCRResponse: + r"""OCR + + :param model: + :param document: Document to run OCR on + :param id: + :param pages: Specific pages user wants to process in various formats: single number, range, or list of both. Starts from 0 + :param include_image_base64: Include image URLs in response + :param image_limit: Max images to extract + :param image_min_size: Minimum height and width of image to extract + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + + request = models.OCRRequest( + model=model, + id=id, + document=utils.get_pydantic_model(document, models.Document), + pages=pages, + include_image_base64=include_image_base64, + image_limit=image_limit, + image_min_size=image_min_size, + ) + + req = self._build_request( + method="POST", + path="/v1/ocr", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.OCRRequest + ), + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + operation_id="ocr_v1_ocr_post", + oauth2_scopes=[], + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return utils.unmarshal_json(http_res.text, models.OCRResponse) + if utils.match_response(http_res, "422", "application/json"): + data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) + raise models.HTTPValidationError(data=data) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + + content_type = http_res.headers.get("Content-Type") + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res_text, + http_res, + ) + + async def process_async( + self, + *, + model: Nullable[str], + document: Union[models.Document, models.DocumentTypedDict], + id: Optional[str] = None, + pages: OptionalNullable[List[int]] = UNSET, + include_image_base64: OptionalNullable[bool] = UNSET, + image_limit: OptionalNullable[int] = UNSET, + image_min_size: OptionalNullable[int] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.OCRResponse: + r"""OCR + + :param model: + :param document: Document to run OCR on + :param id: + :param pages: Specific pages user wants to process in various formats: single number, range, or list of both. Starts from 0 + :param include_image_base64: Include image URLs in response + :param image_limit: Max images to extract + :param image_min_size: Minimum height and width of image to extract + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + + request = models.OCRRequest( + model=model, + id=id, + document=utils.get_pydantic_model(document, models.Document), + pages=pages, + include_image_base64=include_image_base64, + image_limit=image_limit, + image_min_size=image_min_size, + ) + + req = self._build_request_async( + method="POST", + path="/v1/ocr", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.OCRRequest + ), + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + operation_id="ocr_v1_ocr_post", + oauth2_scopes=[], + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return utils.unmarshal_json(http_res.text, models.OCRResponse) + if utils.match_response(http_res, "422", "application/json"): + data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) + raise models.HTTPValidationError(data=data) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + + content_type = http_res.headers.get("Content-Type") + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res_text, + http_res, + ) diff --git a/src/mistralai/sdk.py b/src/mistralai/sdk.py index e24f1581..00d8370a 100644 --- a/src/mistralai/sdk.py +++ b/src/mistralai/sdk.py @@ -17,6 +17,7 @@ from mistralai.fim import Fim from mistralai.fine_tuning import FineTuning from mistralai.models_ import Models +from mistralai.ocr import Ocr from mistralai.types import OptionalNullable, UNSET from typing import Any, Callable, Dict, Optional, Union, cast import weakref @@ -41,6 +42,8 @@ class Mistral(BaseSDK): r"""Embeddings API.""" classifiers: Classifiers r"""Classifiers API.""" + ocr: Ocr + r"""OCR API""" def __init__( self, @@ -139,6 +142,7 @@ def _init_sdks(self): self.agents = Agents(self.sdk_configuration) self.embeddings = Embeddings(self.sdk_configuration) self.classifiers = Classifiers(self.sdk_configuration) + self.ocr = Ocr(self.sdk_configuration) def __enter__(self): return self From f8dd2539ec37f72ec80cd2f614643f59e03e19f1 Mon Sep 17 00:00:00 2001 From: Gaspard Blanchet <26529448+GaspardBT@users.noreply.github.com> Date: Fri, 7 Mar 2025 10:34:06 +0100 Subject: [PATCH 107/223] Fix OCR example (#190) * Add extension * cleaning --- examples/ocr_process_from_url.py | 22 ++++++++++++++-------- 1 file changed, 14 insertions(+), 8 deletions(-) diff --git a/examples/ocr_process_from_url.py b/examples/ocr_process_from_url.py index 4cb11835..55f31282 100644 --- a/examples/ocr_process_from_url.py +++ b/examples/ocr_process_from_url.py @@ -1,19 +1,25 @@ -from mistralai import Mistral -import os import json +import os + +from mistralai import Mistral + +MISTRAL_7B_PDF_URL = "https://arxiv.org/pdf/2310.06825.pdf" -MISTRAL_7B_PDF_URL = "https://arxiv.org/pdf/2310.06825" def main(): api_key = os.environ["MISTRAL_API_KEY"] client = Mistral(api_key=api_key) # Using an URL - pdf_response = client.ocr.process(document={ - "document_url": MISTRAL_7B_PDF_URL, - "type": "document_url", - "document_name": "mistral-7b-pdf", - }, model="mistral-ocr-latest", include_image_base64=True) + pdf_response = client.ocr.process( + document={ + "document_url": MISTRAL_7B_PDF_URL, + "type": "document_url", + "document_name": "mistral-7b-pdf", + }, + model="mistral-ocr-latest", + include_image_base64=True, + ) # Print the parsed PDF response_dict = json.loads(pdf_response.model_dump_json()) From 9c37e232667047c8d7555add60bf7017e72e58d9 Mon Sep 17 00:00:00 2001 From: Alexandre Menasria <47357713+amenasria@users.noreply.github.com> Date: Fri, 7 Mar 2025 15:23:11 +0100 Subject: [PATCH 108/223] [CI] Run examples in the CI to look for regressions (#193) --- .github/workflows/run_example_scripts.yaml | 64 ++++++++++++++++++++++ 1 file changed, 64 insertions(+) create mode 100644 .github/workflows/run_example_scripts.yaml diff --git a/.github/workflows/run_example_scripts.yaml b/.github/workflows/run_example_scripts.yaml new file mode 100644 index 00000000..db0a30aa --- /dev/null +++ b/.github/workflows/run_example_scripts.yaml @@ -0,0 +1,64 @@ +name: Running the examples + +on: + push: + branches: + - main + pull_request: + branches: + - main + +jobs: + run_examples: + runs-on: ubuntu-latest + strategy: + fail-fast: false + matrix: + python-version: ['3.8', '3.9', '3.10', '3.11', '3.12', '3.13'] + + steps: + - name: Checkout code + uses: actions/checkout@v3 + + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@42375524e23c412d93fb67b49958b491fce71c38 # v5.4.0 + with: + python-version: ${{ matrix.python-version }} + + - name: Install Poetry + uses: snok/install-poetry@76e04a911780d5b312d89783f7b1cd627778900a # v1.4.1 + with: + version: ${{ matrix.python-version == '3.8' && '1.8.5' || '2.1.1' }} + + - name: Build and install client + run: | + touch README-PYPI.md # Create this file since the client is not built from Speakeasy + poetry build + python3 -m pip install dist/mistralai-*.whl + + - name: Set VERSION + run: | + VERSION=$(echo ${{ matrix.python-version }} | tr -d .) + echo "VERSION=$VERSION" >> $GITHUB_ENV + + - name: Set MISTRAL_API_KEY using VERSION + run: | + echo "MISTRAL_API_KEY=${{ secrets[format('CI_MISTRAL_API_KEY_PYTHON_{0}', env.VERSION)] }}" >> $GITHUB_ENV + + - name: Run the example scripts + run: | + failed=0 + for file in examples/*.py; do + if [ -f "$file" ] && [ "$file" != "examples/chatbot_with_streaming.py" ]; then + echo "Running $file" + # Do not fail if the script fails, but save it in the failed variable + python3 "$file" > /dev/null || failed=1 + fi + done + # If one of the example script failed then exit + if [ $failed -ne 0 ]; then + exit 1 + fi + env: + MISTRAL_AGENT_ID: ${{ secrets.CI_AGENT_ID }} + MISTRAL_API_KEY: ${{ env.MISTRAL_API_KEY }} From c9592d6e10c153754ad56c472fc912e908702c7c Mon Sep 17 00:00:00 2001 From: Mohamed Bashar Touil Date: Fri, 7 Mar 2025 15:25:05 +0100 Subject: [PATCH 109/223] fix TypeError: 'type' object is not subscriptable when importing component from mistralai module (#192) --- src/mistralai/extra/utils/response_format.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/mistralai/extra/utils/response_format.py b/src/mistralai/extra/utils/response_format.py index f9ded3ff..67e15912 100644 --- a/src/mistralai/extra/utils/response_format.py +++ b/src/mistralai/extra/utils/response_format.py @@ -1,5 +1,5 @@ from pydantic import BaseModel -from typing import TypeVar, Any, Type +from typing import TypeVar, Any, Type, Dict from ...models import JSONSchema, ResponseFormat from ._pydantic_helper import rec_strict_json_schema @@ -7,7 +7,7 @@ def response_format_from_pydantic_model( - model: type[CustomPydanticModel], + model: Type[CustomPydanticModel], ) -> ResponseFormat: """Generate a strict JSON schema from a pydantic model.""" model_schema = rec_strict_json_schema(model.model_json_schema()) @@ -18,7 +18,7 @@ def response_format_from_pydantic_model( def pydantic_model_from_json( - json_data: dict[str, Any], pydantic_model: Type[CustomPydanticModel] + json_data: Dict[str, Any], pydantic_model: Type[CustomPydanticModel] ) -> CustomPydanticModel: """Parse a JSON schema into a pydantic model.""" return pydantic_model.model_validate(json_data) From 263e5f5ddb79414162f713848148e1003b2c10db Mon Sep 17 00:00:00 2001 From: Alexandre Menasria <47357713+amenasria@users.noreply.github.com> Date: Mon, 10 Mar 2025 11:53:53 +0100 Subject: [PATCH 110/223] [CI] Fix the CI for Python 3.8 (#198) * [CI] Fix the CI for Python 3.8 * Fix the non async example as well --- examples/async_structured_outputs.py | 13 +++++++++---- examples/structured_outputs.py | 26 +++++++++++++++++++------- 2 files changed, 28 insertions(+), 11 deletions(-) diff --git a/examples/async_structured_outputs.py b/examples/async_structured_outputs.py index 4fafc991..560934e9 100644 --- a/examples/async_structured_outputs.py +++ b/examples/async_structured_outputs.py @@ -5,9 +5,10 @@ from pydantic import BaseModel from mistralai import Mistral +from typing import List -async def main(): +async def main(): api_key = os.environ["MISTRAL_API_KEY"] client = Mistral(api_key=api_key) @@ -16,18 +17,22 @@ class Explanation(BaseModel): output: str class MathDemonstration(BaseModel): - steps: list[Explanation] + steps: List[Explanation] final_answer: str chat_response = await client.chat.parse_async( model="mistral-large-2411", messages=[ - {"role": "system", "content": "You are a helpful math tutor. You will be provided with a math problem, and your goal will be to output a step by step solution, along with a final answer. For each step, just provide the output as an equation use the explanation field to detail the reasoning."}, + { + "role": "system", + "content": "You are a helpful math tutor. You will be provided with a math problem, and your goal will be to output a step by step solution, along with a final answer. For each step, just provide the output as an equation use the explanation field to detail the reasoning.", + }, {"role": "user", "content": "How can I solve 8x + 7 = -23"}, ], - response_format = MathDemonstration + response_format=MathDemonstration, ) print(chat_response.choices[0].message.parsed) + if __name__ == "__main__": asyncio.run(main()) diff --git a/examples/structured_outputs.py b/examples/structured_outputs.py index 15dc1bff..299f7509 100644 --- a/examples/structured_outputs.py +++ b/examples/structured_outputs.py @@ -5,6 +5,9 @@ from mistralai import Mistral +from typing import List + + def main(): api_key = os.environ["MISTRAL_API_KEY"] client = Mistral(api_key=api_key) @@ -14,32 +17,41 @@ class Explanation(BaseModel): output: str class MathDemonstration(BaseModel): - steps: list[Explanation] + steps: List[Explanation] final_answer: str print("Using the .parse method to parse the response into a Pydantic model:\n") chat_response = client.chat.parse( model="mistral-large-latest", messages=[ - {"role": "system", "content": "You are a helpful math tutor. You will be provided with a math problem, and your goal will be to output a step by step solution, along with a final answer. For each step, just provide the output as an equation use the explanation field to detail the reasoning."}, + { + "role": "system", + "content": "You are a helpful math tutor. You will be provided with a math problem, and your goal will be to output a step by step solution, along with a final answer. For each step, just provide the output as an equation use the explanation field to detail the reasoning.", + }, {"role": "user", "content": "How can I solve 8x + 7 = -23"}, ], - response_format = MathDemonstration + response_format=MathDemonstration, ) print(chat_response.choices[0].message.parsed) # Or with the streaming API - print("\nUsing the .parse_stream method to stream back the response into a JSON Schema:\n") + print( + "\nUsing the .parse_stream method to stream back the response into a JSON Schema:\n" + ) with client.chat.parse_stream( model="mistral-large-latest", messages=[ - {"role": "system", "content": "You are a helpful math tutor. You will be provided with a math problem, and your goal will be to output a step by step solution, along with a final answer. For each step, just provide the output as an equation use the explanation field to detail the reasoning."}, + { + "role": "system", + "content": "You are a helpful math tutor. You will be provided with a math problem, and your goal will be to output a step by step solution, along with a final answer. For each step, just provide the output as an equation use the explanation field to detail the reasoning.", + }, {"role": "user", "content": "How can I solve 8x + 7 = -23"}, ], - response_format=MathDemonstration + response_format=MathDemonstration, ) as stream: for chunk in stream: print(chunk.data.choices[0].delta.content, end="") + if __name__ == "__main__": - main() \ No newline at end of file + main() From 32ff73893b9156f290b447da2a191ae9be4e184d Mon Sep 17 00:00:00 2001 From: Gaspard Blanchet <26529448+GaspardBT@users.noreply.github.com> Date: Fri, 14 Mar 2025 10:24:19 +0100 Subject: [PATCH 111/223] pin to latest (#200) --- .speakeasy/workflow.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.speakeasy/workflow.yaml b/.speakeasy/workflow.yaml index 00aefc99..9f22c9f6 100644 --- a/.speakeasy/workflow.yaml +++ b/.speakeasy/workflow.yaml @@ -1,5 +1,5 @@ workflowVersion: 1.0.0 -speakeasyVersion: 1.477.0 +speakeasyVersion: 1.517.3 sources: mistral-azure-source: inputs: From 81f02dd942fb404d1afb8614a2f21e764a0e9487 Mon Sep 17 00:00:00 2001 From: Gaspard Blanchet <26529448+GaspardBT@users.noreply.github.com> Date: Fri, 14 Mar 2025 15:55:55 +0100 Subject: [PATCH 112/223] chore: update pyproject (#201) * update pyproject * remove 3.8 --- .github/workflows/run_example_scripts.yaml | 4 +- pyproject.toml | 43 ++++++++++++---------- 2 files changed, 24 insertions(+), 23 deletions(-) diff --git a/.github/workflows/run_example_scripts.yaml b/.github/workflows/run_example_scripts.yaml index db0a30aa..b3cc08ac 100644 --- a/.github/workflows/run_example_scripts.yaml +++ b/.github/workflows/run_example_scripts.yaml @@ -14,7 +14,7 @@ jobs: strategy: fail-fast: false matrix: - python-version: ['3.8', '3.9', '3.10', '3.11', '3.12', '3.13'] + python-version: [ '3.9', '3.10', '3.11', '3.12', '3.13'] steps: - name: Checkout code @@ -27,8 +27,6 @@ jobs: - name: Install Poetry uses: snok/install-poetry@76e04a911780d5b312d89783f7b1cd627778900a # v1.4.1 - with: - version: ${{ matrix.python-version == '3.8' && '1.8.5' || '2.1.1' }} - name: Build and install client run: | diff --git a/pyproject.toml b/pyproject.toml index 8eec1a78..5e7ddb8c 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,9 +1,19 @@ -[tool.poetry] +[project] name = "mistralai" version = "1.5.1" description = "Python Client SDK for the Mistral AI API." -authors = ["Mistral"] +authors = [{ name = "Mistral" },] readme = "README-PYPI.md" +requires-python = ">=3.9" +dependencies = [ + "eval-type-backport >=0.2.0", + "httpx >=0.28.1", + "pydantic >=2.10.3", + "python-dateutil >=2.8.2", + "typing-inspection >=0.4.0", +] + +[tool.poetry] repository = "https://github.com/mistralai/client-python.git" packages = [ { include = "mistralai", from = "src" }, @@ -18,32 +28,25 @@ include = ["py.typed", "src/mistralai/py.typed"] [virtualenvs] in-project = true -[tool.poetry.dependencies] -python = ">=3.8" -eval-type-backport = ">=0.2.0" -httpx = ">=0.27.0" -jsonpath-python = ">=1.0.6" -pydantic = ">=2.9.0" -python-dateutil = ">=2.8.2" -typing-inspect = ">=0.9.0" -google-auth = { version = ">=2.27.0", optional = true } -requests = { version = ">=2.32.3", optional = true } - [tool.poetry.group.dev.dependencies] -mypy = ">=1.13.0" -pylint = ">=3.2.3" -pytest = ">=8.2.2" -pytest-asyncio = ">=0.23.7" -types-python-dateutil = ">=2.9.0.20240316" +mypy = "==1.14.1" +pylint = "==3.2.3" +pytest = "^8.2.2" +pytest-asyncio = "^0.23.7" +types-python-dateutil = "^2.9.0.20240316" -[tool.poetry.extras] -gcp = ["google-auth", "requests"] +[project.optional-dependencies] +gcp = [ + "google-auth >=2.27.0", + "requests >=2.32.3" +] [build-system] requires = ["poetry-core"] build-backend = "poetry.core.masonry.api" [tool.pytest.ini_options] +asyncio_default_fixture_loop_scope = "function" pythonpath = ["src"] [tool.mypy] From 1741021792af56d2f940b76dfd286d8f386e8894 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Mon, 17 Mar 2025 13:22:56 +0100 Subject: [PATCH 113/223] =?UTF-8?q?chore:=20=F0=9F=90=9D=20Update=20SDK=20?= =?UTF-8?q?-=20Generate=20MISTRALAI=20MISTRALAI-SDK=201.5.2-rc.1=20(#202)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * ci: regenerated with OpenAPI Doc , Speakeasy CLI 1.517.3 * bump gcp * fix gcp .genignore * bump gcp * fix fix --------- Co-authored-by: speakeasybot Co-authored-by: gaspardBT --- .speakeasy/gen.lock | 45 +-- .speakeasy/gen.yaml | 5 +- .speakeasy/workflow.lock | 44 +-- README.md | 73 +++- RELEASES.md | 12 +- USAGE.md | 24 +- docs/models/documenturlchunk.md | 10 +- docs/models/documenturlchunktype.md | 8 + docs/models/embeddingrequest.md | 4 +- docs/models/filepurpose.md | 3 +- docs/sdks/agents/README.md | 6 +- docs/sdks/chat/README.md | 6 +- docs/sdks/classifiers/README.md | 4 +- docs/sdks/embeddings/README.md | 7 +- docs/sdks/files/README.md | 10 +- docs/sdks/fim/README.md | 6 +- docs/sdks/jobs/README.md | 11 +- docs/sdks/mistraljobs/README.md | 8 +- docs/sdks/models/README.md | 6 + docs/sdks/ocr/README.md | 1 + packages/mistralai_azure/.speakeasy/gen.lock | 33 +- packages/mistralai_azure/.speakeasy/gen.yaml | 4 + .../docs/models/assistantmessage.md | 12 +- .../docs/models/chatcompletionrequest.md | 3 +- .../models/chatcompletionstreamrequest.md | 3 +- .../mistralai_azure/docs/models/function.md | 3 +- .../mistralai_azure/docs/models/jsonschema.md | 11 + .../mistralai_azure/docs/models/prediction.md | 9 + .../docs/models/responseformat.md | 3 +- .../docs/models/responseformats.md | 3 +- .../mistralai_azure/docs/models/toolcall.md | 3 +- packages/mistralai_azure/pylintrc | 7 +- packages/mistralai_azure/pyproject.toml | 26 +- .../{prepare-readme.py => prepare_readme.py} | 0 packages/mistralai_azure/scripts/publish.sh | 2 +- .../src/mistralai_azure/__init__.py | 11 +- .../src/mistralai_azure/_hooks/types.py | 18 +- .../src/mistralai_azure/_version.py | 3 + .../src/mistralai_azure/basesdk.py | 8 + .../src/mistralai_azure/chat.py | 108 ++++-- .../src/mistralai_azure/httpclient.py | 52 +++ .../src/mistralai_azure/models/__init__.py | 7 + .../models/assistantmessage.py | 2 + .../models/chatcompletionrequest.py | 18 +- .../models/chatcompletionstreamrequest.py | 18 +- .../src/mistralai_azure/models/function.py | 3 + .../src/mistralai_azure/models/jsonschema.py | 61 ++++ .../src/mistralai_azure/models/prediction.py | 25 ++ .../mistralai_azure/models/responseformat.py | 43 ++- .../mistralai_azure/models/responseformats.py | 2 +- .../src/mistralai_azure/models/toolcall.py | 3 + .../src/mistralai_azure/sdk.py | 70 +++- .../src/mistralai_azure/sdkconfiguration.py | 20 +- .../src/mistralai_azure/utils/__init__.py | 2 + .../src/mistralai_azure/utils/serializers.py | 16 +- .../src/mistralai_azure/utils/values.py | 5 +- packages/mistralai_gcp/.speakeasy/gen.lock | 40 ++- packages/mistralai_gcp/.speakeasy/gen.yaml | 4 + .../docs/models/assistantmessage.md | 12 +- .../docs/models/chatcompletionrequest.md | 5 +- .../models/chatcompletionstreamrequest.md | 5 +- .../docs/models/fimcompletionrequest.md | 2 +- .../docs/models/fimcompletionstreamrequest.md | 2 +- .../mistralai_gcp/docs/models/function.md | 3 +- .../mistralai_gcp/docs/models/jsonschema.md | 11 + .../mistralai_gcp/docs/models/prediction.md | 9 + .../docs/models/responseformat.md | 3 +- .../docs/models/responseformats.md | 3 +- .../mistralai_gcp/docs/models/toolcall.md | 3 +- packages/mistralai_gcp/pylintrc | 7 +- packages/mistralai_gcp/pyproject.toml | 30 +- .../{prepare-readme.py => prepare_readme.py} | 0 packages/mistralai_gcp/scripts/publish.sh | 2 +- .../src/mistralai_gcp/__init__.py | 11 +- .../src/mistralai_gcp/_hooks/types.py | 18 +- .../src/mistralai_gcp/_version.py | 3 + .../src/mistralai_gcp/basesdk.py | 8 + .../mistralai_gcp/src/mistralai_gcp/chat.py | 110 ++++-- .../mistralai_gcp/src/mistralai_gcp/fim.py | 82 +++-- .../src/mistralai_gcp/httpclient.py | 52 +++ .../src/mistralai_gcp/models/__init__.py | 7 + .../mistralai_gcp/models/assistantmessage.py | 2 + .../models/chatcompletionrequest.py | 18 +- .../models/chatcompletionstreamrequest.py | 18 +- .../models/fimcompletionrequest.py | 5 +- .../models/fimcompletionstreamrequest.py | 5 +- .../src/mistralai_gcp/models/function.py | 3 + .../src/mistralai_gcp/models/jsonschema.py | 61 ++++ .../src/mistralai_gcp/models/prediction.py | 25 ++ .../mistralai_gcp/models/responseformat.py | 43 ++- .../mistralai_gcp/models/responseformats.py | 2 +- .../src/mistralai_gcp/models/toolcall.py | 3 + .../mistralai_gcp/src/mistralai_gcp/sdk.py | 82 ++++- .../src/mistralai_gcp/sdkconfiguration.py | 20 +- .../src/mistralai_gcp/utils/__init__.py | 2 + .../src/mistralai_gcp/utils/serializers.py | 16 +- .../src/mistralai_gcp/utils/values.py | 5 +- poetry.lock | 329 +++++++++--------- pylintrc | 5 +- pyproject.toml | 2 +- .../{prepare-readme.py => prepare_readme.py} | 8 +- scripts/publish.sh | 2 +- src/mistralai/_hooks/types.py | 18 +- src/mistralai/_version.py | 6 +- src/mistralai/agents.py | 44 ++- src/mistralai/basesdk.py | 8 + src/mistralai/chat.py | 44 ++- src/mistralai/classifiers.py | 44 ++- src/mistralai/embeddings.py | 30 +- src/mistralai/files.py | 36 ++ src/mistralai/fim.py | 44 ++- src/mistralai/httpclient.py | 6 +- src/mistralai/jobs.py | 30 ++ src/mistralai/mistral_jobs.py | 24 ++ src/mistralai/models/__init__.py | 7 +- src/mistralai/models/documenturlchunk.py | 22 +- src/mistralai/models/embeddingrequest.py | 14 +- src/mistralai/models/filepurpose.py | 2 +- src/mistralai/models_.py | 84 ++++- src/mistralai/ocr.py | 22 +- src/mistralai/sdk.py | 22 +- src/mistralai/sdkconfiguration.py | 6 +- src/mistralai/utils/__init__.py | 2 + src/mistralai/utils/serializers.py | 16 +- src/mistralai/utils/values.py | 5 +- 125 files changed, 1872 insertions(+), 662 deletions(-) create mode 100644 docs/models/documenturlchunktype.md create mode 100644 packages/mistralai_azure/docs/models/jsonschema.md create mode 100644 packages/mistralai_azure/docs/models/prediction.md rename packages/mistralai_azure/scripts/{prepare-readme.py => prepare_readme.py} (100%) create mode 100644 packages/mistralai_azure/src/mistralai_azure/models/jsonschema.py create mode 100644 packages/mistralai_azure/src/mistralai_azure/models/prediction.py create mode 100644 packages/mistralai_gcp/docs/models/jsonschema.md create mode 100644 packages/mistralai_gcp/docs/models/prediction.md rename packages/mistralai_gcp/scripts/{prepare-readme.py => prepare_readme.py} (100%) create mode 100644 packages/mistralai_gcp/src/mistralai_gcp/models/jsonschema.py create mode 100644 packages/mistralai_gcp/src/mistralai_gcp/models/prediction.py rename scripts/{prepare-readme.py => prepare_readme.py} (84%) diff --git a/.speakeasy/gen.lock b/.speakeasy/gen.lock index 6eb1248e..59249dd4 100644 --- a/.speakeasy/gen.lock +++ b/.speakeasy/gen.lock @@ -1,12 +1,12 @@ lockVersion: 2.0.0 id: 2d045ec7-2ebb-4f4d-ad25-40953b132161 management: - docChecksum: 81cc8be96362e2f1cb145b08a2e6c4fa + docChecksum: 406e00c323dba0db26d6994620926af4 docVersion: 0.0.2 - speakeasyVersion: 1.477.0 - generationVersion: 2.497.0 - releaseVersion: 1.5.1 - configChecksum: ef3439d915c5d16e7cfb88fe2bf94907 + speakeasyVersion: 1.517.3 + generationVersion: 2.548.6 + releaseVersion: 1.5.2-rc.1 + configChecksum: 321ba0a46c45c1b64b391afe2abe901c repoURL: https://github.com/mistralai/client-python.git installationURL: https://github.com/mistralai/client-python.git published: true @@ -14,7 +14,7 @@ features: python: additionalDependencies: 1.0.0 constsAndDefaults: 1.0.5 - core: 5.10.5 + core: 5.12.3 customCodeRegions: 0.1.1 defaultEnabledRetries: 0.2.0 downloadStreams: 1.0.1 @@ -22,19 +22,19 @@ features: envVarSecurityUsage: 0.3.2 examples: 3.0.1 flatRequests: 1.0.1 - flattening: 3.1.0 - globalSecurity: 3.0.2 + flattening: 3.1.1 + globalSecurity: 3.0.3 globalSecurityCallbacks: 1.0.0 globalSecurityFlattening: 1.0.0 globalServerURLs: 3.1.0 methodArguments: 1.0.2 multipartFileContentType: 1.0.0 nameOverrides: 3.0.1 - nullables: 1.0.0 + nullables: 1.0.1 openEnums: 1.0.0 responseFormat: 1.0.1 retries: 3.0.2 - sdkHooks: 1.0.0 + sdkHooks: 1.0.1 serverEvents: 1.0.7 serverEventsSentinels: 0.1.0 serverIDs: 3.0.0 @@ -101,6 +101,7 @@ generatedFiles: - docs/models/detailedjoboutstatus.md - docs/models/document.md - docs/models/documenturlchunk.md + - docs/models/documenturlchunktype.md - docs/models/embeddingrequest.md - docs/models/embeddingresponse.md - docs/models/embeddingresponsedata.md @@ -238,7 +239,7 @@ generatedFiles: - poetry.toml - py.typed - pylintrc - - scripts/prepare-readme.py + - scripts/prepare_readme.py - scripts/publish.sh - src/mistralai/__init__.py - src/mistralai/_hooks/__init__.py @@ -448,7 +449,7 @@ examples: files_api_routes_upload_file: speakeasy-default-files-api-routes-upload-file: requestBody: - multipart/form-data: {"file": {"": "x-file: example.file"}} + multipart/form-data: {"file": {}} responses: "200": application/json: {"id": "497f6eca-6276-4993-bfeb-53cbbbba6f09", "object": "file", "bytes": 13000, "created_at": 1716963433, "filename": "files_upload.jsonl", "purpose": "fine-tune", "sample_type": "batch_request", "source": "repository"} @@ -504,14 +505,14 @@ examples: created_by_me: false responses: "200": - application/json: {"total": 768578} + application/json: {"object": "list", "total": 768578} jobs_api_routes_fine_tuning_create_fine_tuning_job: speakeasy-default-jobs-api-routes-fine-tuning-create-fine-tuning-job: requestBody: application/json: {"model": "Fiesta", "hyperparameters": {"learning_rate": 0.0001}} responses: "200": - application/json: {"expected_duration_seconds": 220, "cost": 10, "cost_currency": "EUR", "train_tokens_per_step": 131072, "train_tokens": 1310720, "data_tokens": 305375, "deprecated": true, "details": "", "epochs": 4.2922, "training_steps": 10} + application/json: {"expected_duration_seconds": 220, "cost": 10, "cost_currency": "EUR", "train_tokens_per_step": 131072, "train_tokens": 1310720, "data_tokens": 305375, "deprecated": true, "details": "", "epochs": 4.2922, "training_steps": 10, "object": "job.metadata"} jobs_api_routes_fine_tuning_get_fine_tuning_job: speakeasy-default-jobs-api-routes-fine-tuning-get-fine-tuning-job: parameters: @@ -519,7 +520,7 @@ examples: job_id: "b888f774-3e7c-4135-a18c-6b985523c4bc" responses: "200": - application/json: {"id": "e50f7622-81da-484b-9c66-1c8a99c6b71b", "auto_start": false, "hyperparameters": {"learning_rate": 0.0001}, "model": "Model Y", "status": "CANCELLED", "job_type": "", "created_at": 415305, "modified_at": 149108, "training_files": ["8f7112aa-f0ab-44e4-83b4-cca3716f6208", "7aa1f8cf-05d8-49d5-88ee-381f6b4b885c"], "checkpoints": [{"metrics": {}, "step_number": 856562, "created_at": 1716963433}, {"metrics": {}, "step_number": 328633, "created_at": 1716963433}]} + application/json: {"id": "e50f7622-81da-484b-9c66-1c8a99c6b71b", "auto_start": false, "hyperparameters": {"learning_rate": 0.0001}, "model": "Model Y", "status": "CANCELLED", "job_type": "", "created_at": 415305, "modified_at": 149108, "training_files": ["8f7112aa-f0ab-44e4-83b4-cca3716f6208", "7aa1f8cf-05d8-49d5-88ee-381f6b4b885c"], "object": "job", "checkpoints": [{"metrics": {}, "step_number": 856562, "created_at": 1716963433}, {"metrics": {}, "step_number": 328633, "created_at": 1716963433}]} jobs_api_routes_fine_tuning_cancel_fine_tuning_job: speakeasy-default-jobs-api-routes-fine-tuning-cancel-fine-tuning-job: parameters: @@ -527,7 +528,7 @@ examples: job_id: "0f713502-9233-41c6-9ebd-c570b7edb496" responses: "200": - application/json: {"id": "d50fbe4e-3e32-4613-8574-4d82f3fd6b3c", "auto_start": true, "hyperparameters": {"learning_rate": 0.0001}, "model": "Taurus", "status": "SUCCESS", "job_type": "", "created_at": 251316, "modified_at": 342605, "training_files": ["247ac10c-dc31-412f-a7cc-924123024afa", "0f84bd49-4511-4689-9d07-a64aa724280b", "200aa489-3801-4d6e-a454-eb14cac448cb"], "checkpoints": [{"metrics": {}, "step_number": 949854, "created_at": 1716963433}, {"metrics": {}, "step_number": 516599, "created_at": 1716963433}, {"metrics": {}, "step_number": 439590, "created_at": 1716963433}]} + application/json: {"id": "d50fbe4e-3e32-4613-8574-4d82f3fd6b3c", "auto_start": true, "hyperparameters": {"learning_rate": 0.0001}, "model": "Taurus", "status": "SUCCESS", "job_type": "", "created_at": 251316, "modified_at": 342605, "training_files": ["247ac10c-dc31-412f-a7cc-924123024afa", "0f84bd49-4511-4689-9d07-a64aa724280b", "200aa489-3801-4d6e-a454-eb14cac448cb"], "object": "job", "checkpoints": [{"metrics": {}, "step_number": 949854, "created_at": 1716963433}, {"metrics": {}, "step_number": 516599, "created_at": 1716963433}, {"metrics": {}, "step_number": 439590, "created_at": 1716963433}]} jobs_api_routes_fine_tuning_start_fine_tuning_job: speakeasy-default-jobs-api-routes-fine-tuning-start-fine-tuning-job: parameters: @@ -535,7 +536,7 @@ examples: job_id: "0bf0f9e6-c3e5-4d61-aac8-0e36dcac0dfc" responses: "200": - application/json: {"id": "b676fe58-2c47-483e-831e-c71dbed4c90a", "auto_start": false, "hyperparameters": {"learning_rate": 0.0001}, "model": "A4", "status": "CANCELLED", "job_type": "", "created_at": 874397, "modified_at": 483387, "training_files": [], "checkpoints": [{"metrics": {}, "step_number": 331375, "created_at": 1716963433}, {"metrics": {}, "step_number": 590686, "created_at": 1716963433}, {"metrics": {}, "step_number": 543177, "created_at": 1716963433}]} + application/json: {"id": "b676fe58-2c47-483e-831e-c71dbed4c90a", "auto_start": false, "hyperparameters": {"learning_rate": 0.0001}, "model": "A4", "status": "CANCELLED", "job_type": "", "created_at": 874397, "modified_at": 483387, "training_files": [], "object": "job", "checkpoints": [{"metrics": {}, "step_number": 331375, "created_at": 1716963433}, {"metrics": {}, "step_number": 590686, "created_at": 1716963433}, {"metrics": {}, "step_number": 543177, "created_at": 1716963433}]} jobs_api_routes_batch_get_batch_jobs: speakeasy-default-jobs-api-routes-batch-get-batch-jobs: parameters: @@ -545,14 +546,14 @@ examples: created_by_me: false responses: "200": - application/json: {"total": 768578} + application/json: {"object": "list", "total": 768578} jobs_api_routes_batch_create_batch_job: speakeasy-default-jobs-api-routes-batch-create-batch-job: requestBody: application/json: {"input_files": ["a621cf02-1cd9-4cf5-8403-315211a509a3"], "endpoint": "/v1/fim/completions", "model": "2", "timeout_hours": 24} responses: "200": - application/json: {"id": "", "input_files": ["8e774c2b-ecc3-4769-b177-5e024985613d", "0ee803d5-6a1d-4f94-836b-fd39494798bc"], "endpoint": "", "model": "Impala", "errors": [{"message": "", "count": 1}, {"message": "", "count": 1}, {"message": "", "count": 1}], "status": "RUNNING", "created_at": 770370, "total_requests": 350586, "completed_requests": 95214, "succeeded_requests": 930830, "failed_requests": 617761} + application/json: {"id": "", "object": "batch", "input_files": ["8e774c2b-ecc3-4769-b177-5e024985613d", "0ee803d5-6a1d-4f94-836b-fd39494798bc"], "endpoint": "", "model": "Impala", "errors": [{"message": "", "count": 1}, {"message": "", "count": 1}, {"message": "", "count": 1}], "status": "RUNNING", "created_at": 770370, "total_requests": 350586, "completed_requests": 95214, "succeeded_requests": 930830, "failed_requests": 617761} jobs_api_routes_batch_get_batch_job: speakeasy-default-jobs-api-routes-batch-get-batch-job: parameters: @@ -560,7 +561,7 @@ examples: job_id: "b888f774-3e7c-4135-a18c-6b985523c4bc" responses: "200": - application/json: {"id": "", "input_files": ["50f76228-1da8-44bc-b661-c8a99c6b71b6", "cd62b8f7-112a-4af0-bab4-e43b4cca3716", "620807aa-1f8c-4f05-ad89-d58ee381f6b4"], "endpoint": "", "model": "Golf", "errors": [{"message": "", "count": 1}, {"message": "", "count": 1}], "status": "SUCCESS", "created_at": 790898, "total_requests": 55097, "completed_requests": 578320, "succeeded_requests": 856562, "failed_requests": 328633} + application/json: {"id": "", "object": "batch", "input_files": ["50f76228-1da8-44bc-b661-c8a99c6b71b6", "cd62b8f7-112a-4af0-bab4-e43b4cca3716", "620807aa-1f8c-4f05-ad89-d58ee381f6b4"], "endpoint": "", "model": "Golf", "errors": [{"message": "", "count": 1}, {"message": "", "count": 1}], "status": "SUCCESS", "created_at": 790898, "total_requests": 55097, "completed_requests": 578320, "succeeded_requests": 856562, "failed_requests": 328633} jobs_api_routes_batch_cancel_batch_job: speakeasy-default-jobs-api-routes-batch-cancel-batch-job: parameters: @@ -568,7 +569,7 @@ examples: job_id: "0f713502-9233-41c6-9ebd-c570b7edb496" responses: "200": - application/json: {"id": "", "input_files": ["50fbe4e3-e326-4135-8744-d82f3fd6b3c1", "eb45e247-ac10-4cdc-8311-2f7cc9241230", "4afaa0f8-4bd4-4945-9116-89d07a64aa72"], "endpoint": "", "model": "Alpine", "errors": [{"message": "", "count": 1}, {"message": "", "count": 1}], "status": "QUEUED", "created_at": 709109, "total_requests": 275794, "completed_requests": 158938, "succeeded_requests": 12381, "failed_requests": 11864} + application/json: {"id": "", "object": "batch", "input_files": ["50fbe4e3-e326-4135-8744-d82f3fd6b3c1", "eb45e247-ac10-4cdc-8311-2f7cc9241230", "4afaa0f8-4bd4-4945-9116-89d07a64aa72"], "endpoint": "", "model": "Alpine", "errors": [{"message": "", "count": 1}, {"message": "", "count": 1}], "status": "QUEUED", "created_at": 709109, "total_requests": 275794, "completed_requests": 158938, "succeeded_requests": 12381, "failed_requests": 11864} chat_completion_v1_chat_completions_post: speakeasy-default-chat-completion-v1-chat-completions-post: requestBody: @@ -650,7 +651,7 @@ examples: ocr_v1_ocr_post: speakeasy-default-ocr-v1-ocr-post: requestBody: - application/json: {"model": "Focus", "document": {"document_url": "https://dutiful-horst.org"}} + application/json: {"model": "Focus", "document": {"document_url": "https://dutiful-horst.org", "type": "document_url"}} responses: "200": application/json: {"pages": [], "model": "A4", "usage_info": {"pages_processed": 442675}} diff --git a/.speakeasy/gen.yaml b/.speakeasy/gen.yaml index f020895b..666982eb 100644 --- a/.speakeasy/gen.yaml +++ b/.speakeasy/gen.yaml @@ -7,13 +7,15 @@ generation: useClassNamesForArrayFields: true fixes: nameResolutionDec2023: true + nameResolutionFeb2025: false parameterOrderingFeb2024: true requestResponseComponentNamesFeb2024: true + securityFeb2025: false auth: oAuth2ClientCredentialsEnabled: true oAuth2PasswordEnabled: false python: - version: 1.5.1 + version: 1.5.2-rc.1 additionalDependencies: dev: pytest: ^8.2.2 @@ -44,5 +46,6 @@ python: methodArguments: infer-optional-args outputModelSuffix: output packageName: mistralai + pytestTimeout: 0 responseFormat: flat templateVersion: v2 diff --git a/.speakeasy/workflow.lock b/.speakeasy/workflow.lock index 21228dc5..ae45ec0e 100644 --- a/.speakeasy/workflow.lock +++ b/.speakeasy/workflow.lock @@ -1,49 +1,49 @@ -speakeasyVersion: 1.477.0 +speakeasyVersion: 1.517.3 sources: mistral-azure-source: - sourceNamespace: mistral-azure-source - sourceRevisionDigest: sha256:ff181b1e0e3894a4925f7ae87415323058538a13bae9d9d508a8fe3b6ec0e333 - sourceBlobDigest: sha256:a2b4fd69298ebb9adb0d3c8dfb452db52defac512a7532203eebffb6a252df76 + sourceNamespace: mistral-openapi-azure + sourceRevisionDigest: sha256:b9be39effd24c50514ea00965c7b7089b6ae09d7aaacfb5f9eeafe465f131a62 + sourceBlobDigest: sha256:38505cbdf426ed228e4cce7667721237ddb32f72fb7df8f26c289082a568d7cb tags: - latest mistral-google-cloud-source: - sourceNamespace: mistral-google-cloud-source - sourceRevisionDigest: sha256:36c7de11e35023dc8fa5f3c0fb0e486d2a102275a2df808c08cfe9d43089be04 - sourceBlobDigest: sha256:dd65bfa5d0448ad1851ebb18b57aa675533cd3e166beb86a390b0ab51d16a1c1 + sourceNamespace: mistral-openapi-google-cloud + sourceRevisionDigest: sha256:f4b8b9311a39f5c62deaed92c473eff44f139d784f122fa3c9e41d5236c96cd7 + sourceBlobDigest: sha256:cd5c242a598ba671e83fc7572ce7def4486cba035d2729d61cf7c64189a6fd89 tags: - latest mistral-openapi: sourceNamespace: mistral-openapi - sourceRevisionDigest: sha256:bdfe3bd4e867529e1821e0f195c2d5832083f7699315f4a42d6b5551bd7847a3 - sourceBlobDigest: sha256:7e8a475b75404d724fc7936bd6f585b8e5226d3dca00ab4b69807b53fb63151b + sourceRevisionDigest: sha256:8655bba5635f9f9bc3aa94690c26d6124c778e03478786074288cd63414a7a84 + sourceBlobDigest: sha256:cd9280b2b089ef5e0b06ba94ed2736b928f7e4e542d04f408df84e6912049ba6 tags: - latest - - speakeasy-sdk-regen-1741279153 + - speakeasy-sdk-regen-1741964260 targets: mistralai-azure-sdk: source: mistral-azure-source - sourceNamespace: mistral-azure-source - sourceRevisionDigest: sha256:ff181b1e0e3894a4925f7ae87415323058538a13bae9d9d508a8fe3b6ec0e333 - sourceBlobDigest: sha256:a2b4fd69298ebb9adb0d3c8dfb452db52defac512a7532203eebffb6a252df76 + sourceNamespace: mistral-openapi-azure + sourceRevisionDigest: sha256:b9be39effd24c50514ea00965c7b7089b6ae09d7aaacfb5f9eeafe465f131a62 + sourceBlobDigest: sha256:38505cbdf426ed228e4cce7667721237ddb32f72fb7df8f26c289082a568d7cb codeSamplesNamespace: mistral-openapi-azure-code-samples - codeSamplesRevisionDigest: sha256:28356dba7ea28436035e20182b8ce4d1951e19503b5accef6a128d860361e5c0 + codeSamplesRevisionDigest: sha256:6a21f785e0bc1861ae9bf237939c6252d4589f4b5ece596938bad54b3f5c1ac9 mistralai-gcp-sdk: source: mistral-google-cloud-source - sourceNamespace: mistral-google-cloud-source - sourceRevisionDigest: sha256:36c7de11e35023dc8fa5f3c0fb0e486d2a102275a2df808c08cfe9d43089be04 - sourceBlobDigest: sha256:dd65bfa5d0448ad1851ebb18b57aa675533cd3e166beb86a390b0ab51d16a1c1 + sourceNamespace: mistral-openapi-google-cloud + sourceRevisionDigest: sha256:f4b8b9311a39f5c62deaed92c473eff44f139d784f122fa3c9e41d5236c96cd7 + sourceBlobDigest: sha256:cd5c242a598ba671e83fc7572ce7def4486cba035d2729d61cf7c64189a6fd89 codeSamplesNamespace: mistral-openapi-google-cloud-code-samples - codeSamplesRevisionDigest: sha256:7de23f90d6543356f310f46375bef4db7f43eb22b2871ad4dfe1b7d0cc875bb4 + codeSamplesRevisionDigest: sha256:a0d0890a8e87eac8ade9832883c7a129a749142696e01b1e611cf2d97fbeed9d mistralai-sdk: source: mistral-openapi sourceNamespace: mistral-openapi - sourceRevisionDigest: sha256:bdfe3bd4e867529e1821e0f195c2d5832083f7699315f4a42d6b5551bd7847a3 - sourceBlobDigest: sha256:7e8a475b75404d724fc7936bd6f585b8e5226d3dca00ab4b69807b53fb63151b + sourceRevisionDigest: sha256:8655bba5635f9f9bc3aa94690c26d6124c778e03478786074288cd63414a7a84 + sourceBlobDigest: sha256:cd9280b2b089ef5e0b06ba94ed2736b928f7e4e542d04f408df84e6912049ba6 codeSamplesNamespace: mistral-openapi-code-samples - codeSamplesRevisionDigest: sha256:ba10be893f3e6dae275eb8fb09a688f3652de81eebd314427f28c274800edc48 + codeSamplesRevisionDigest: sha256:2a2f61e5c5e1eaad48e6a74164bd6249855c3ad7976ef83068199d57ebcdd055 workflow: workflowVersion: 1.0.0 - speakeasyVersion: 1.477.0 + speakeasyVersion: 1.517.3 sources: mistral-azure-source: inputs: diff --git a/README.md b/README.md index fd31bcd8..fd17da18 100644 --- a/README.md +++ b/README.md @@ -75,6 +75,37 @@ pip install mistralai ```bash poetry add mistralai ``` + +### Shell and script usage with `uv` + +You can use this SDK in a Python shell with [uv](https://docs.astral.sh/uv/) and the `uvx` command that comes with it like so: + +```shell +uvx --from mistralai python +``` + +It's also possible to write a standalone Python script without needing to set up a whole project like so: + +```python +#!/usr/bin/env -S uv run --script +# /// script +# requires-python = ">=3.9" +# dependencies = [ +# "mistralai", +# ] +# /// + +from mistralai import Mistral + +sdk = Mistral( + # SDK arguments +) + +# Rest of script here... +``` + +Once that is saved to a file, you can run it with `uv run script.py` where +`script.py` can be replaced with the actual file name. @@ -89,6 +120,7 @@ This example shows how to create chat completions. from mistralai import Mistral import os + with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: @@ -98,7 +130,7 @@ with Mistral( "content": "Who is the best French painter? Answer in one short sentence.", "role": "user", }, - ], stream=False) + ]) # Handle response print(res) @@ -114,6 +146,7 @@ from mistralai import Mistral import os async def main(): + async with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: @@ -123,7 +156,7 @@ async def main(): "content": "Who is the best French painter? Answer in one short sentence.", "role": "user", }, - ], stream=False) + ]) # Handle response print(res) @@ -140,6 +173,7 @@ This example shows how to upload a file. from mistralai import Mistral import os + with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: @@ -163,6 +197,7 @@ from mistralai import Mistral import os async def main(): + async with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: @@ -187,6 +222,7 @@ This example shows how to create agents completions. from mistralai import Mistral import os + with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: @@ -196,7 +232,7 @@ with Mistral( "content": "Who is the best French painter? Answer in one short sentence.", "role": "user", }, - ], agent_id="", stream=False) + ], agent_id="") # Handle response print(res) @@ -212,6 +248,7 @@ from mistralai import Mistral import os async def main(): + async with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: @@ -221,7 +258,7 @@ async def main(): "content": "Who is the best French painter? Answer in one short sentence.", "role": "user", }, - ], agent_id="", stream=False) + ], agent_id="") # Handle response print(res) @@ -238,14 +275,15 @@ This example shows how to create embedding request. from mistralai import Mistral import os + with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.embeddings.create(inputs=[ + res = mistral.embeddings.create(model="mistral-embed", inputs=[ "Embed this sentence.", "As well as this one.", - ], model="mistral-embed") + ]) # Handle response print(res) @@ -261,14 +299,15 @@ from mistralai import Mistral import os async def main(): + async with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = await mistral.embeddings.create_async(inputs=[ + res = await mistral.embeddings.create_async(model="mistral-embed", inputs=[ "Embed this sentence.", "As well as this one.", - ], model="mistral-embed") + ]) # Handle response print(res) @@ -465,6 +504,7 @@ underlying connection when the context is exited. from mistralai import Mistral import os + with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: @@ -474,7 +514,7 @@ with Mistral( "content": "Who is the best French painter? Answer in one short sentence.", "role": "user", }, - ], stream=True) + ]) with res as event_stream: for event in event_stream: @@ -502,6 +542,7 @@ Certain SDK methods accept file objects as part of a request body or multi-part from mistralai import Mistral import os + with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: @@ -528,6 +569,7 @@ from mistralai import Mistral from mistralai.utils import BackoffStrategy, RetryConfig import os + with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: @@ -546,6 +588,7 @@ from mistralai import Mistral from mistralai.utils import BackoffStrategy, RetryConfig import os + with Mistral( retry_config=RetryConfig("backoff", BackoffStrategy(1, 50, 1.1, 100), False), api_key=os.getenv("MISTRAL_API_KEY", ""), @@ -586,6 +629,7 @@ When custom error responses are specified for an operation, the SDK may also rai from mistralai import Mistral, models import os + with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: @@ -613,9 +657,9 @@ with Mistral( You can override the default server globally by passing a server name to the `server: str` optional parameter when initializing the SDK client instance. The selected server will then be used as the default on the operations that use it. This table lists the names associated with the available servers: -| Name | Server | -| ---- | ------------------------ | -| `eu` | `https://api.mistral.ai` | +| Name | Server | Description | +| ---- | ------------------------ | -------------------- | +| `eu` | `https://api.mistral.ai` | EU Production server | #### Example @@ -623,6 +667,7 @@ You can override the default server globally by passing a server name to the `se from mistralai import Mistral import os + with Mistral( server="eu", api_key=os.getenv("MISTRAL_API_KEY", ""), @@ -642,6 +687,7 @@ The default server can also be overridden globally by passing a URL to the `serv from mistralai import Mistral import os + with Mistral( server_url="https://api.mistral.ai", api_key=os.getenv("MISTRAL_API_KEY", ""), @@ -752,6 +798,7 @@ To authenticate with the API the `api_key` parameter must be set when initializi from mistralai import Mistral import os + with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: @@ -775,6 +822,7 @@ The `Mistral` class implements the context manager protocol and registers a fina from mistralai import Mistral import os def main(): + with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: @@ -783,6 +831,7 @@ def main(): # Or when using async: async def amain(): + async with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: diff --git a/RELEASES.md b/RELEASES.md index d7b657bb..0ad3429c 100644 --- a/RELEASES.md +++ b/RELEASES.md @@ -168,4 +168,14 @@ Based on: ### Generated - [python v1.5.1] . ### Releases -- [PyPI v1.5.1] https://pypi.org/project/mistralai/1.5.1 - . \ No newline at end of file +- [PyPI v1.5.1] https://pypi.org/project/mistralai/1.5.1 - . + +## 2025-03-14 15:08:57 +### Changes +Based on: +- OpenAPI Doc +- Speakeasy CLI 1.517.3 (2.548.6) https://github.com/speakeasy-api/speakeasy +### Generated +- [python v1.5.2-rc.1] . +### Releases +- [PyPI v1.5.2-rc.1] https://pypi.org/project/mistralai/1.5.2-rc.1 - . \ No newline at end of file diff --git a/USAGE.md b/USAGE.md index 3e1cae03..fa3a77de 100644 --- a/USAGE.md +++ b/USAGE.md @@ -8,6 +8,7 @@ This example shows how to create chat completions. from mistralai import Mistral import os + with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: @@ -17,7 +18,7 @@ with Mistral( "content": "Who is the best French painter? Answer in one short sentence.", "role": "user", }, - ], stream=False) + ]) # Handle response print(res) @@ -33,6 +34,7 @@ from mistralai import Mistral import os async def main(): + async with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: @@ -42,7 +44,7 @@ async def main(): "content": "Who is the best French painter? Answer in one short sentence.", "role": "user", }, - ], stream=False) + ]) # Handle response print(res) @@ -59,6 +61,7 @@ This example shows how to upload a file. from mistralai import Mistral import os + with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: @@ -82,6 +85,7 @@ from mistralai import Mistral import os async def main(): + async with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: @@ -106,6 +110,7 @@ This example shows how to create agents completions. from mistralai import Mistral import os + with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: @@ -115,7 +120,7 @@ with Mistral( "content": "Who is the best French painter? Answer in one short sentence.", "role": "user", }, - ], agent_id="", stream=False) + ], agent_id="") # Handle response print(res) @@ -131,6 +136,7 @@ from mistralai import Mistral import os async def main(): + async with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: @@ -140,7 +146,7 @@ async def main(): "content": "Who is the best French painter? Answer in one short sentence.", "role": "user", }, - ], agent_id="", stream=False) + ], agent_id="") # Handle response print(res) @@ -157,14 +163,15 @@ This example shows how to create embedding request. from mistralai import Mistral import os + with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.embeddings.create(inputs=[ + res = mistral.embeddings.create(model="mistral-embed", inputs=[ "Embed this sentence.", "As well as this one.", - ], model="mistral-embed") + ]) # Handle response print(res) @@ -180,14 +187,15 @@ from mistralai import Mistral import os async def main(): + async with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = await mistral.embeddings.create_async(inputs=[ + res = await mistral.embeddings.create_async(model="mistral-embed", inputs=[ "Embed this sentence.", "As well as this one.", - ], model="mistral-embed") + ]) # Handle response print(res) diff --git a/docs/models/documenturlchunk.md b/docs/models/documenturlchunk.md index 33785c34..6c9a5b4d 100644 --- a/docs/models/documenturlchunk.md +++ b/docs/models/documenturlchunk.md @@ -3,8 +3,8 @@ ## Fields -| Field | Type | Required | Description | -| ----------------------------------- | ----------------------------------- | ----------------------------------- | ----------------------------------- | -| `document_url` | *str* | :heavy_check_mark: | N/A | -| `type` | *Optional[Literal["document_url"]]* | :heavy_minus_sign: | N/A | -| `document_name` | *OptionalNullable[str]* | :heavy_minus_sign: | The filename of the document | \ No newline at end of file +| Field | Type | Required | Description | +| -------------------------------------------------------------------------- | -------------------------------------------------------------------------- | -------------------------------------------------------------------------- | -------------------------------------------------------------------------- | +| `document_url` | *str* | :heavy_check_mark: | N/A | +| `document_name` | *OptionalNullable[str]* | :heavy_minus_sign: | The filename of the document | +| `type` | [Optional[models.DocumentURLChunkType]](../models/documenturlchunktype.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/documenturlchunktype.md b/docs/models/documenturlchunktype.md new file mode 100644 index 00000000..32e1fa9e --- /dev/null +++ b/docs/models/documenturlchunktype.md @@ -0,0 +1,8 @@ +# DocumentURLChunkType + + +## Values + +| Name | Value | +| -------------- | -------------- | +| `DOCUMENT_URL` | document_url | \ No newline at end of file diff --git a/docs/models/embeddingrequest.md b/docs/models/embeddingrequest.md index 07ab903a..3bdd79e8 100644 --- a/docs/models/embeddingrequest.md +++ b/docs/models/embeddingrequest.md @@ -5,5 +5,5 @@ | Field | Type | Required | Description | Example | | -------------------------------------------------- | -------------------------------------------------- | -------------------------------------------------- | -------------------------------------------------- | -------------------------------------------------- | -| `inputs` | [models.Inputs](../models/inputs.md) | :heavy_check_mark: | Text to embed. | [
"Embed this sentence.",
"As well as this one."
] | -| `model` | *Optional[str]* | :heavy_minus_sign: | ID of the model to use. | | \ No newline at end of file +| `model` | *str* | :heavy_check_mark: | ID of the model to use. | mistral-embed | +| `inputs` | [models.Inputs](../models/inputs.md) | :heavy_check_mark: | Text to embed. | [
"Embed this sentence.",
"As well as this one."
] | \ No newline at end of file diff --git a/docs/models/filepurpose.md b/docs/models/filepurpose.md index 5152aeeb..14cab13e 100644 --- a/docs/models/filepurpose.md +++ b/docs/models/filepurpose.md @@ -6,4 +6,5 @@ | Name | Value | | ----------- | ----------- | | `FINE_TUNE` | fine-tune | -| `BATCH` | batch | \ No newline at end of file +| `BATCH` | batch | +| `OCR` | ocr | \ No newline at end of file diff --git a/docs/sdks/agents/README.md b/docs/sdks/agents/README.md index 00ca33ac..1e6f9069 100644 --- a/docs/sdks/agents/README.md +++ b/docs/sdks/agents/README.md @@ -20,6 +20,7 @@ Agents Completion from mistralai import Mistral import os + with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: @@ -29,7 +30,7 @@ with Mistral( "content": "Who is the best French painter? Answer in one short sentence.", "role": "user", }, - ], agent_id="", stream=False) + ], agent_id="") # Handle response print(res) @@ -76,6 +77,7 @@ Mistral AI provides the ability to stream responses back to a client in order to from mistralai import Mistral import os + with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: @@ -85,7 +87,7 @@ with Mistral( "content": "Who is the best French painter? Answer in one short sentence.", "role": "user", }, - ], agent_id="", stream=True) + ], agent_id="") with res as event_stream: for event in event_stream: diff --git a/docs/sdks/chat/README.md b/docs/sdks/chat/README.md index 38e16adc..8cac6db4 100644 --- a/docs/sdks/chat/README.md +++ b/docs/sdks/chat/README.md @@ -20,6 +20,7 @@ Chat Completion from mistralai import Mistral import os + with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: @@ -29,7 +30,7 @@ with Mistral( "content": "Who is the best French painter? Answer in one short sentence.", "role": "user", }, - ], stream=False) + ]) # Handle response print(res) @@ -79,6 +80,7 @@ Mistral AI provides the ability to stream responses back to a client in order to from mistralai import Mistral import os + with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: @@ -88,7 +90,7 @@ with Mistral( "content": "Who is the best French painter? Answer in one short sentence.", "role": "user", }, - ], stream=True) + ]) with res as event_stream: for event in event_stream: diff --git a/docs/sdks/classifiers/README.md b/docs/sdks/classifiers/README.md index 6bcc68a9..6c7127af 100644 --- a/docs/sdks/classifiers/README.md +++ b/docs/sdks/classifiers/README.md @@ -20,6 +20,7 @@ Moderations from mistralai import Mistral import os + with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: @@ -62,6 +63,7 @@ Moderations Chat from mistralai import Mistral import os + with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: @@ -95,7 +97,7 @@ with Mistral( "role": "assistant", }, ], - ], truncate_for_context_length=False) + ]) # Handle response print(res) diff --git a/docs/sdks/embeddings/README.md b/docs/sdks/embeddings/README.md index 44fae4ac..8c386439 100644 --- a/docs/sdks/embeddings/README.md +++ b/docs/sdks/embeddings/README.md @@ -19,14 +19,15 @@ Embeddings from mistralai import Mistral import os + with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.embeddings.create(inputs=[ + res = mistral.embeddings.create(model="mistral-embed", inputs=[ "Embed this sentence.", "As well as this one.", - ], model="mistral-embed") + ]) # Handle response print(res) @@ -37,8 +38,8 @@ with Mistral( | Parameter | Type | Required | Description | Example | | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | +| `model` | *str* | :heavy_check_mark: | ID of the model to use. | mistral-embed | | `inputs` | [models.Inputs](../../models/inputs.md) | :heavy_check_mark: | Text to embed. | [
"Embed this sentence.",
"As well as this one."
] | -| `model` | *Optional[str]* | :heavy_minus_sign: | ID of the model to use. | | | `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | | ### Response diff --git a/docs/sdks/files/README.md b/docs/sdks/files/README.md index 8f01a668..befa4d67 100644 --- a/docs/sdks/files/README.md +++ b/docs/sdks/files/README.md @@ -28,6 +28,7 @@ Please contact us if you need to increase these storage limits. from mistralai import Mistral import os + with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: @@ -70,11 +71,12 @@ Returns a list of files that belong to the user's organization. from mistralai import Mistral import os + with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.files.list(page=0, page_size=100) + res = mistral.files.list() # Handle response print(res) @@ -113,6 +115,7 @@ Returns information about a specific file. from mistralai import Mistral import os + with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: @@ -151,6 +154,7 @@ Delete a file. from mistralai import Mistral import os + with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: @@ -189,6 +193,7 @@ Download a file from mistralai import Mistral import os + with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: @@ -227,11 +232,12 @@ Get Signed Url from mistralai import Mistral import os + with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.files.get_signed_url(file_id="", expiry=24) + res = mistral.files.get_signed_url(file_id="") # Handle response print(res) diff --git a/docs/sdks/fim/README.md b/docs/sdks/fim/README.md index 28de6c02..c70b3da4 100644 --- a/docs/sdks/fim/README.md +++ b/docs/sdks/fim/README.md @@ -20,11 +20,12 @@ FIM completion. from mistralai import Mistral import os + with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.fim.complete(model="codestral-2405", prompt="def", top_p=1, stream=False, suffix="return a+b") + res = mistral.fim.complete(model="codestral-2405", prompt="def", suffix="return a+b") # Handle response print(res) @@ -68,11 +69,12 @@ Mistral AI provides the ability to stream responses back to a client in order to from mistralai import Mistral import os + with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.fim.stream(model="codestral-2405", prompt="def", top_p=1, stream=True, suffix="return a+b") + res = mistral.fim.stream(model="codestral-2405", prompt="def", suffix="return a+b") with res as event_stream: for event in event_stream: diff --git a/docs/sdks/jobs/README.md b/docs/sdks/jobs/README.md index 06605877..ecb11def 100644 --- a/docs/sdks/jobs/README.md +++ b/docs/sdks/jobs/README.md @@ -21,11 +21,12 @@ Get a list of fine-tuning jobs for your organization and user. from mistralai import Mistral import os + with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.fine_tuning.jobs.list(page=0, page_size=100, created_by_me=False) + res = mistral.fine_tuning.jobs.list() # Handle response print(res) @@ -67,13 +68,12 @@ Create a new fine-tuning job, it will be queued for processing. from mistralai import Mistral import os + with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.fine_tuning.jobs.create(model="Fiesta", hyperparameters={ - "learning_rate": 0.0001, - }) + res = mistral.fine_tuning.jobs.create(model="Fiesta", hyperparameters={}) # Handle response print(res) @@ -114,6 +114,7 @@ Get a fine-tuned job details by its UUID. from mistralai import Mistral import os + with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: @@ -152,6 +153,7 @@ Request the cancellation of a fine tuning job. from mistralai import Mistral import os + with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: @@ -190,6 +192,7 @@ Request the start of a validated fine tuning job. from mistralai import Mistral import os + with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: diff --git a/docs/sdks/mistraljobs/README.md b/docs/sdks/mistraljobs/README.md index 56a7f60b..5b80a45b 100644 --- a/docs/sdks/mistraljobs/README.md +++ b/docs/sdks/mistraljobs/README.md @@ -20,11 +20,12 @@ Get a list of batch jobs for your organization and user. from mistralai import Mistral import os + with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.batch.jobs.list(page=0, page_size=100, created_by_me=False) + res = mistral.batch.jobs.list() # Handle response print(res) @@ -64,13 +65,14 @@ Create a new batch job, it will be queued for processing. from mistralai import Mistral import os + with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: res = mistral.batch.jobs.create(input_files=[ "a621cf02-1cd9-4cf5-8403-315211a509a3", - ], endpoint="/v1/fim/completions", model="2", timeout_hours=24) + ], endpoint="/v1/fim/completions", model="2") # Handle response print(res) @@ -108,6 +110,7 @@ Get a batch job details by its UUID. from mistralai import Mistral import os + with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: @@ -146,6 +149,7 @@ Request the cancellation of a batch job. from mistralai import Mistral import os + with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: diff --git a/docs/sdks/models/README.md b/docs/sdks/models/README.md index e048d20e..dd7baf50 100644 --- a/docs/sdks/models/README.md +++ b/docs/sdks/models/README.md @@ -24,6 +24,7 @@ List all models available to the user. from mistralai import Mistral import os + with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: @@ -62,6 +63,7 @@ Retrieve a model information. from mistralai import Mistral import os + with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: @@ -101,6 +103,7 @@ Delete a fine-tuned model. from mistralai import Mistral import os + with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: @@ -140,6 +143,7 @@ Update a model name or description. from mistralai import Mistral import os + with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: @@ -180,6 +184,7 @@ Archive a fine-tuned model. from mistralai import Mistral import os + with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: @@ -218,6 +223,7 @@ Un-archive a fine-tuned model. from mistralai import Mistral import os + with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: diff --git a/docs/sdks/ocr/README.md b/docs/sdks/ocr/README.md index 54f8af96..61988ea6 100644 --- a/docs/sdks/ocr/README.md +++ b/docs/sdks/ocr/README.md @@ -19,6 +19,7 @@ OCR from mistralai import Mistral import os + with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: diff --git a/packages/mistralai_azure/.speakeasy/gen.lock b/packages/mistralai_azure/.speakeasy/gen.lock index 16a5196b..8b8ef6ae 100644 --- a/packages/mistralai_azure/.speakeasy/gen.lock +++ b/packages/mistralai_azure/.speakeasy/gen.lock @@ -1,34 +1,34 @@ lockVersion: 2.0.0 id: dc40fa48-2c4d-46ad-ac8b-270749770f34 management: - docChecksum: 4da7c33f650ddf206c58fa6c941d347f + docChecksum: 3b99cf44527c23ce3298616720b00a62 docVersion: 0.0.2 - speakeasyVersion: 1.462.2 - generationVersion: 2.486.1 + speakeasyVersion: 1.517.3 + generationVersion: 2.548.6 releaseVersion: 1.2.6 - configChecksum: cc2ac1769a87215774fce0075ff2e77d + configChecksum: 1a623455d46169b8a271df9cd9d58d86 published: true features: python: additionalDependencies: 1.0.0 constsAndDefaults: 1.0.5 - core: 5.7.4 + core: 5.12.3 defaultEnabledRetries: 0.2.0 enumUnions: 0.1.0 envVarSecurityUsage: 0.3.2 - examples: 3.0.0 + examples: 3.0.1 flatRequests: 1.0.1 - globalSecurity: 3.0.2 + globalSecurity: 3.0.3 globalSecurityCallbacks: 1.0.0 globalSecurityFlattening: 1.0.0 globalServerURLs: 3.1.0 methodArguments: 1.0.2 nameOverrides: 3.0.1 - nullables: 1.0.0 + nullables: 1.0.1 openEnums: 1.0.0 responseFormat: 1.0.1 retries: 3.0.2 - sdkHooks: 1.0.0 + sdkHooks: 1.0.1 serverEvents: 1.0.7 serverEventsSentinels: 0.1.0 serverIDs: 3.0.0 @@ -61,8 +61,10 @@ generatedFiles: - docs/models/functioncall.md - docs/models/functionname.md - docs/models/httpvalidationerror.md + - docs/models/jsonschema.md - docs/models/loc.md - docs/models/messages.md + - docs/models/prediction.md - docs/models/referencechunk.md - docs/models/referencechunktype.md - docs/models/responseformat.md @@ -92,7 +94,7 @@ generatedFiles: - py.typed - pylintrc - pyproject.toml - - scripts/prepare-readme.py + - scripts/prepare_readme.py - scripts/publish.sh - src/mistralai_azure/__init__.py - src/mistralai_azure/_hooks/__init__.py @@ -117,6 +119,8 @@ generatedFiles: - src/mistralai_azure/models/functioncall.py - src/mistralai_azure/models/functionname.py - src/mistralai_azure/models/httpvalidationerror.py + - src/mistralai_azure/models/jsonschema.py + - src/mistralai_azure/models/prediction.py - src/mistralai_azure/models/referencechunk.py - src/mistralai_azure/models/responseformat.py - src/mistralai_azure/models/responseformats.py @@ -134,6 +138,7 @@ generatedFiles: - src/mistralai_azure/models/usermessage.py - src/mistralai_azure/models/validationerror.py - src/mistralai_azure/py.typed + - src/mistralai_azure/sdk.py - src/mistralai_azure/sdkconfiguration.py - src/mistralai_azure/types/__init__.py - src/mistralai_azure/types/basemodel.py @@ -156,17 +161,19 @@ examples: stream_chat: speakeasy-default-stream-chat: requestBody: - application/json: {"model": "azureai", "messages": [{"content": "Who is the best French painter? Answer in one short sentence.", "role": "user"}]} + application/json: {"model": "azureai", "stream": true, "messages": [{"content": "Who is the best French painter? Answer in one short sentence.", "role": "user"}]} responses: "422": application/json: {} + "200": {} chat_completion_v1_chat_completions_post: speakeasy-default-chat-completion-v1-chat-completions-post: requestBody: - application/json: {"model": "azureai", "messages": [{"content": "Who is the best French painter? Answer in one short sentence.", "role": "user"}]} + application/json: {"model": "azureai", "stream": false, "messages": [{"content": "Who is the best French painter? Answer in one short sentence.", "role": "user"}]} responses: "200": - application/json: {"id": "cmpl-e5cc70bb28c444948073e77776eb30ef", "object": "chat.completion", "model": "mistral-small-latest", "usage": {"prompt_tokens": 16, "completion_tokens": 34, "total_tokens": 50}, "created": 1702256327, "choices": []} + application/json: {"id": "cmpl-e5cc70bb28c444948073e77776eb30ef", "object": "chat.completion", "model": "mistral-small-latest", "usage": {"prompt_tokens": 16, "completion_tokens": 34, "total_tokens": 50}, "created": 1702256327, "choices": [{"index": 0, "message": {"prefix": false, "role": "assistant"}, "finish_reason": "stop"}, {"index": 0, "message": {"prefix": false, "role": "assistant"}, "finish_reason": "stop"}, {"index": 0, "message": {"prefix": false, "role": "assistant"}, "finish_reason": "stop"}]} "422": application/json: {} +examplesVersion: 1.0.0 generatedTests: {} diff --git a/packages/mistralai_azure/.speakeasy/gen.yaml b/packages/mistralai_azure/.speakeasy/gen.yaml index 17344d9b..04ed562b 100644 --- a/packages/mistralai_azure/.speakeasy/gen.yaml +++ b/packages/mistralai_azure/.speakeasy/gen.yaml @@ -7,8 +7,10 @@ generation: useClassNamesForArrayFields: true fixes: nameResolutionDec2023: true + nameResolutionFeb2025: false parameterOrderingFeb2024: true requestResponseComponentNamesFeb2024: true + securityFeb2025: false auth: oAuth2ClientCredentialsEnabled: true oAuth2PasswordEnabled: false @@ -23,6 +25,7 @@ python: clientServerStatusCodesAsErrors: true defaultErrorName: SDKError description: Python Client SDK for the Mistral AI API in Azure. + enableCustomCodeRegions: false enumFormat: union fixFlags: responseRequiredSep2024: false @@ -42,5 +45,6 @@ python: methodArguments: infer-optional-args outputModelSuffix: output packageName: mistralai_azure + pytestTimeout: 0 responseFormat: flat templateVersion: v2 diff --git a/packages/mistralai_azure/docs/models/assistantmessage.md b/packages/mistralai_azure/docs/models/assistantmessage.md index 53f1cc76..3d0bd90b 100644 --- a/packages/mistralai_azure/docs/models/assistantmessage.md +++ b/packages/mistralai_azure/docs/models/assistantmessage.md @@ -3,9 +3,9 @@ ## Fields -| Field | Type | Required | Description | -| ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | -| `content` | [OptionalNullable[models.AssistantMessageContent]](../models/assistantmessagecontent.md) | :heavy_minus_sign: | N/A | -| `tool_calls` | List[[models.ToolCall](../models/toolcall.md)] | :heavy_minus_sign: | N/A | -| `prefix` | *Optional[bool]* | :heavy_minus_sign: | N/A | -| `role` | [Optional[models.AssistantMessageRole]](../models/assistantmessagerole.md) | :heavy_minus_sign: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| `content` | [OptionalNullable[models.AssistantMessageContent]](../models/assistantmessagecontent.md) | :heavy_minus_sign: | N/A | +| `tool_calls` | List[[models.ToolCall](../models/toolcall.md)] | :heavy_minus_sign: | N/A | +| `prefix` | *Optional[bool]* | :heavy_minus_sign: | Set this to `true` when adding an assistant message as prefix to condition the model response. The role of the prefix message is to force the model to start its answer by the content of the message. | +| `role` | [Optional[models.AssistantMessageRole]](../models/assistantmessagerole.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/chatcompletionrequest.md b/packages/mistralai_azure/docs/models/chatcompletionrequest.md index 68cef4a1..eb43a4da 100644 --- a/packages/mistralai_azure/docs/models/chatcompletionrequest.md +++ b/packages/mistralai_azure/docs/models/chatcompletionrequest.md @@ -6,7 +6,7 @@ | Field | Type | Required | Description | Example | | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | | `messages` | List[[models.ChatCompletionRequestMessages](../models/chatcompletionrequestmessages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | -| `model` | *OptionalNullable[str]* | :heavy_minus_sign: | The ID of the model to use for this request. | azureai | +| `model` | *Optional[str]* | :heavy_minus_sign: | The ID of the model to use for this request. | azureai | | `temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. | | | `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | | `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | @@ -19,4 +19,5 @@ | `presence_penalty` | *Optional[float]* | :heavy_minus_sign: | presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. | | | `frequency_penalty` | *Optional[float]* | :heavy_minus_sign: | frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. | | | `n` | *OptionalNullable[int]* | :heavy_minus_sign: | Number of completions to return for each request, input tokens are only billed once. | | +| `prediction` | [Optional[models.Prediction]](../models/prediction.md) | :heavy_minus_sign: | N/A | | | `safe_prompt` | *Optional[bool]* | :heavy_minus_sign: | Whether to inject a safety prompt before all conversations. | | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/chatcompletionstreamrequest.md b/packages/mistralai_azure/docs/models/chatcompletionstreamrequest.md index c9c5c87b..78442736 100644 --- a/packages/mistralai_azure/docs/models/chatcompletionstreamrequest.md +++ b/packages/mistralai_azure/docs/models/chatcompletionstreamrequest.md @@ -6,7 +6,7 @@ | Field | Type | Required | Description | Example | | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | | `messages` | List[[models.Messages](../models/messages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | -| `model` | *OptionalNullable[str]* | :heavy_minus_sign: | The ID of the model to use for this request. | azureai | +| `model` | *Optional[str]* | :heavy_minus_sign: | The ID of the model to use for this request. | azureai | | `temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. | | | `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | | `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | @@ -19,4 +19,5 @@ | `presence_penalty` | *Optional[float]* | :heavy_minus_sign: | presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. | | | `frequency_penalty` | *Optional[float]* | :heavy_minus_sign: | frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. | | | `n` | *OptionalNullable[int]* | :heavy_minus_sign: | Number of completions to return for each request, input tokens are only billed once. | | +| `prediction` | [Optional[models.Prediction]](../models/prediction.md) | :heavy_minus_sign: | N/A | | | `safe_prompt` | *Optional[bool]* | :heavy_minus_sign: | Whether to inject a safety prompt before all conversations. | | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/function.md b/packages/mistralai_azure/docs/models/function.md index 8af398f5..a166b7bb 100644 --- a/packages/mistralai_azure/docs/models/function.md +++ b/packages/mistralai_azure/docs/models/function.md @@ -7,4 +7,5 @@ | ------------------ | ------------------ | ------------------ | ------------------ | | `name` | *str* | :heavy_check_mark: | N/A | | `parameters` | Dict[str, *Any*] | :heavy_check_mark: | N/A | -| `description` | *Optional[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file +| `description` | *Optional[str]* | :heavy_minus_sign: | N/A | +| `strict` | *Optional[bool]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/jsonschema.md b/packages/mistralai_azure/docs/models/jsonschema.md new file mode 100644 index 00000000..ae387867 --- /dev/null +++ b/packages/mistralai_azure/docs/models/jsonschema.md @@ -0,0 +1,11 @@ +# JSONSchema + + +## Fields + +| Field | Type | Required | Description | +| ----------------------- | ----------------------- | ----------------------- | ----------------------- | +| `name` | *str* | :heavy_check_mark: | N/A | +| `schema_definition` | Dict[str, *Any*] | :heavy_check_mark: | N/A | +| `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `strict` | *Optional[bool]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/prediction.md b/packages/mistralai_azure/docs/models/prediction.md new file mode 100644 index 00000000..86e9c396 --- /dev/null +++ b/packages/mistralai_azure/docs/models/prediction.md @@ -0,0 +1,9 @@ +# Prediction + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------ | ------------------------------ | ------------------------------ | ------------------------------ | +| `type` | *Optional[Literal["content"]]* | :heavy_minus_sign: | N/A | +| `content` | *Optional[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/responseformat.md b/packages/mistralai_azure/docs/models/responseformat.md index 9c627f55..23a1641b 100644 --- a/packages/mistralai_azure/docs/models/responseformat.md +++ b/packages/mistralai_azure/docs/models/responseformat.md @@ -5,4 +5,5 @@ | Field | Type | Required | Description | | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `type` | [Optional[models.ResponseFormats]](../models/responseformats.md) | :heavy_minus_sign: | An object specifying the format that the model must output. Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. | \ No newline at end of file +| `type` | [Optional[models.ResponseFormats]](../models/responseformats.md) | :heavy_minus_sign: | An object specifying the format that the model must output. Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. | +| `json_schema` | [OptionalNullable[models.JSONSchema]](../models/jsonschema.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/responseformats.md b/packages/mistralai_azure/docs/models/responseformats.md index ce35fbb3..06886afe 100644 --- a/packages/mistralai_azure/docs/models/responseformats.md +++ b/packages/mistralai_azure/docs/models/responseformats.md @@ -8,4 +8,5 @@ An object specifying the format that the model must output. Setting to `{ "type" | Name | Value | | ------------- | ------------- | | `TEXT` | text | -| `JSON_OBJECT` | json_object | \ No newline at end of file +| `JSON_OBJECT` | json_object | +| `JSON_SCHEMA` | json_schema | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/toolcall.md b/packages/mistralai_azure/docs/models/toolcall.md index 7aca5fc9..574be1ea 100644 --- a/packages/mistralai_azure/docs/models/toolcall.md +++ b/packages/mistralai_azure/docs/models/toolcall.md @@ -7,4 +7,5 @@ | ---------------------------------------------------- | ---------------------------------------------------- | ---------------------------------------------------- | ---------------------------------------------------- | | `function` | [models.FunctionCall](../models/functioncall.md) | :heavy_check_mark: | N/A | | `id` | *Optional[str]* | :heavy_minus_sign: | N/A | -| `type` | [Optional[models.ToolTypes]](../models/tooltypes.md) | :heavy_minus_sign: | N/A | \ No newline at end of file +| `type` | [Optional[models.ToolTypes]](../models/tooltypes.md) | :heavy_minus_sign: | N/A | +| `index` | *Optional[int]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai_azure/pylintrc b/packages/mistralai_azure/pylintrc index 393d0f70..266bc815 100644 --- a/packages/mistralai_azure/pylintrc +++ b/packages/mistralai_azure/pylintrc @@ -89,7 +89,7 @@ persistent=yes # Minimum Python version to use for version dependent checks. Will default to # the version used to run pylint. -py-version=3.8 +py-version=3.9 # Discover python modules and packages in the file system subtree. recursive=no @@ -455,7 +455,10 @@ disable=raw-checker-failed, bare-except, broad-exception-caught, fixme, - relative-beyond-top-level + relative-beyond-top-level, + consider-using-with, + wildcard-import, + unused-wildcard-import # Enable the message, report, category or checker with the given id(s). You can # either give multiple identifier separated by comma (,) or put this option diff --git a/packages/mistralai_azure/pyproject.toml b/packages/mistralai_azure/pyproject.toml index 5c227f66..bf120e67 100644 --- a/packages/mistralai_azure/pyproject.toml +++ b/packages/mistralai_azure/pyproject.toml @@ -1,9 +1,19 @@ -[tool.poetry] +[project] name = "mistralai_azure" version = "1.2.6" description = "Python Client SDK for the Mistral AI API in Azure." -authors = ["Mistral",] +authors = [{ name = "Mistral" },] readme = "README-PYPI.md" +requires-python = ">=3.9" +dependencies = [ + "eval-type-backport >=0.2.0", + "httpx >=0.28.1", + "pydantic >=2.10.3", + "python-dateutil >=2.8.2", + "typing-inspection >=0.4.0", +] + +[tool.poetry] packages = [ { include = "mistralai_azure", from = "src" } ] @@ -15,17 +25,8 @@ include = ["py.typed", "src/mistralai_azure/py.typed"] [virtualenvs] in-project = true -[tool.poetry.dependencies] -python = "^3.8" -eval-type-backport = "^0.2.0" -httpx = "^0.28.1" -jsonpath-python = "^1.0.6" -pydantic = "~2.10.3" -python-dateutil = "^2.8.2" -typing-inspect = "^0.9.0" - [tool.poetry.group.dev.dependencies] -mypy = "==1.13.0" +mypy = "==1.14.1" pylint = "==3.2.3" pytest = "^8.2.2" pytest-asyncio = "^0.23.7" @@ -36,6 +37,7 @@ requires = ["poetry-core"] build-backend = "poetry.core.masonry.api" [tool.pytest.ini_options] +asyncio_default_fixture_loop_scope = "function" pythonpath = ["src"] [tool.mypy] diff --git a/packages/mistralai_azure/scripts/prepare-readme.py b/packages/mistralai_azure/scripts/prepare_readme.py similarity index 100% rename from packages/mistralai_azure/scripts/prepare-readme.py rename to packages/mistralai_azure/scripts/prepare_readme.py diff --git a/packages/mistralai_azure/scripts/publish.sh b/packages/mistralai_azure/scripts/publish.sh index ab45b1f9..f2f2cf2c 100755 --- a/packages/mistralai_azure/scripts/publish.sh +++ b/packages/mistralai_azure/scripts/publish.sh @@ -2,6 +2,6 @@ export POETRY_PYPI_TOKEN_PYPI=${PYPI_TOKEN} -poetry run python scripts/prepare-readme.py +poetry run python scripts/prepare_readme.py poetry publish --build --skip-existing diff --git a/packages/mistralai_azure/src/mistralai_azure/__init__.py b/packages/mistralai_azure/src/mistralai_azure/__init__.py index a1b7f626..dd02e42e 100644 --- a/packages/mistralai_azure/src/mistralai_azure/__init__.py +++ b/packages/mistralai_azure/src/mistralai_azure/__init__.py @@ -1,9 +1,18 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -from ._version import __title__, __version__ +from ._version import ( + __title__, + __version__, + __openapi_doc_version__, + __gen_version__, + __user_agent__, +) from .sdk import * from .sdkconfiguration import * from .models import * VERSION: str = __version__ +OPENAPI_DOC_VERSION = __openapi_doc_version__ +SPEAKEASY_GENERATOR_VERSION = __gen_version__ +USER_AGENT = __user_agent__ diff --git a/packages/mistralai_azure/src/mistralai_azure/_hooks/types.py b/packages/mistralai_azure/src/mistralai_azure/_hooks/types.py index 5e34da26..297dfa2f 100644 --- a/packages/mistralai_azure/src/mistralai_azure/_hooks/types.py +++ b/packages/mistralai_azure/src/mistralai_azure/_hooks/types.py @@ -7,16 +7,19 @@ class HookContext: + base_url: str operation_id: str oauth2_scopes: Optional[List[str]] = None security_source: Optional[Union[Any, Callable[[], Any]]] = None def __init__( self, + base_url: str, operation_id: str, oauth2_scopes: Optional[List[str]], security_source: Optional[Union[Any, Callable[[], Any]]], ): + self.base_url = base_url self.operation_id = operation_id self.oauth2_scopes = oauth2_scopes self.security_source = security_source @@ -25,21 +28,30 @@ def __init__( class BeforeRequestContext(HookContext): def __init__(self, hook_ctx: HookContext): super().__init__( - hook_ctx.operation_id, hook_ctx.oauth2_scopes, hook_ctx.security_source + hook_ctx.base_url, + hook_ctx.operation_id, + hook_ctx.oauth2_scopes, + hook_ctx.security_source, ) class AfterSuccessContext(HookContext): def __init__(self, hook_ctx: HookContext): super().__init__( - hook_ctx.operation_id, hook_ctx.oauth2_scopes, hook_ctx.security_source + hook_ctx.base_url, + hook_ctx.operation_id, + hook_ctx.oauth2_scopes, + hook_ctx.security_source, ) class AfterErrorContext(HookContext): def __init__(self, hook_ctx: HookContext): super().__init__( - hook_ctx.operation_id, hook_ctx.oauth2_scopes, hook_ctx.security_source + hook_ctx.base_url, + hook_ctx.operation_id, + hook_ctx.oauth2_scopes, + hook_ctx.security_source, ) diff --git a/packages/mistralai_azure/src/mistralai_azure/_version.py b/packages/mistralai_azure/src/mistralai_azure/_version.py index c7215b89..23e2d1c2 100644 --- a/packages/mistralai_azure/src/mistralai_azure/_version.py +++ b/packages/mistralai_azure/src/mistralai_azure/_version.py @@ -4,6 +4,9 @@ __title__: str = "mistralai_azure" __version__: str = "1.2.6" +__openapi_doc_version__: str = "0.0.2" +__gen_version__: str = "2.548.6" +__user_agent__: str = "speakeasy-sdk/python 1.2.6 2.548.6 0.0.2 mistralai_azure" try: if __package__ is not None: diff --git a/packages/mistralai_azure/src/mistralai_azure/basesdk.py b/packages/mistralai_azure/src/mistralai_azure/basesdk.py index 05c100d4..24e4935e 100644 --- a/packages/mistralai_azure/src/mistralai_azure/basesdk.py +++ b/packages/mistralai_azure/src/mistralai_azure/basesdk.py @@ -231,6 +231,10 @@ def do(): req.headers, get_body_content(req), ) + + if client is None: + raise ValueError("client is required") + http_res = client.send(req, stream=stream) except Exception as e: _, e = self.sdk_configuration.get_hooks().after_error( @@ -303,6 +307,10 @@ async def do(): req.headers, get_body_content(req), ) + + if client is None: + raise ValueError("client is required") + http_res = await client.send(req, stream=stream) except Exception as e: _, e = self.sdk_configuration.get_hooks().after_error( diff --git a/packages/mistralai_azure/src/mistralai_azure/chat.py b/packages/mistralai_azure/src/mistralai_azure/chat.py index 0ed464ba..6f126a4b 100644 --- a/packages/mistralai_azure/src/mistralai_azure/chat.py +++ b/packages/mistralai_azure/src/mistralai_azure/chat.py @@ -15,7 +15,7 @@ def stream( self, *, messages: Union[List[models.Messages], List[models.MessagesTypedDict]], - model: OptionalNullable[str] = "azureai", + model: Optional[str] = "azureai", temperature: OptionalNullable[float] = UNSET, top_p: Optional[float] = None, max_tokens: OptionalNullable[int] = UNSET, @@ -37,6 +37,9 @@ def stream( presence_penalty: Optional[float] = None, frequency_penalty: Optional[float] = None, n: OptionalNullable[int] = UNSET, + prediction: Optional[ + Union[models.Prediction, models.PredictionTypedDict] + ] = None, safe_prompt: Optional[bool] = None, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, @@ -61,6 +64,7 @@ def stream( :param presence_penalty: presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. :param frequency_penalty: frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. :param n: Number of completions to return for each request, input tokens are only billed once. + :param prediction: :param safe_prompt: Whether to inject a safety prompt before all conversations. :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method @@ -74,6 +78,8 @@ def stream( if server_url is not None: base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) request = models.ChatCompletionStreamRequest( model=model, @@ -94,6 +100,9 @@ def stream( presence_penalty=presence_penalty, frequency_penalty=frequency_penalty, n=n, + prediction=utils.get_pydantic_model( + prediction, Optional[models.Prediction] + ), safe_prompt=safe_prompt, ) @@ -126,6 +135,7 @@ def stream( http_res = self.do_request( hook_ctx=HookContext( + base_url=base_url or "", operation_id="stream_chat", oauth2_scopes=[], security_source=self.sdk_configuration.security, @@ -136,7 +146,7 @@ def stream( retry_config=retry_config, ) - data: Any = None + response_data: Any = None if utils.match_response(http_res, "200", "text/event-stream"): return eventstreaming.EventStream( http_res, @@ -145,9 +155,16 @@ def stream( ) if utils.match_response(http_res, "422", "application/json"): http_res_text = utils.stream_to_text(http_res) - data = utils.unmarshal_json(http_res_text, models.HTTPValidationErrorData) - raise models.HTTPValidationError(data=data) - if utils.match_response(http_res, ["4XX", "5XX"], "*"): + response_data = utils.unmarshal_json( + http_res_text, models.HTTPValidationErrorData + ) + raise models.HTTPValidationError(data=response_data) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) raise models.SDKError( "API error occurred", http_res.status_code, http_res_text, http_res @@ -166,7 +183,7 @@ async def stream_async( self, *, messages: Union[List[models.Messages], List[models.MessagesTypedDict]], - model: OptionalNullable[str] = "azureai", + model: Optional[str] = "azureai", temperature: OptionalNullable[float] = UNSET, top_p: Optional[float] = None, max_tokens: OptionalNullable[int] = UNSET, @@ -188,6 +205,9 @@ async def stream_async( presence_penalty: Optional[float] = None, frequency_penalty: Optional[float] = None, n: OptionalNullable[int] = UNSET, + prediction: Optional[ + Union[models.Prediction, models.PredictionTypedDict] + ] = None, safe_prompt: Optional[bool] = None, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, @@ -212,6 +232,7 @@ async def stream_async( :param presence_penalty: presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. :param frequency_penalty: frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. :param n: Number of completions to return for each request, input tokens are only billed once. + :param prediction: :param safe_prompt: Whether to inject a safety prompt before all conversations. :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method @@ -225,6 +246,8 @@ async def stream_async( if server_url is not None: base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) request = models.ChatCompletionStreamRequest( model=model, @@ -245,6 +268,9 @@ async def stream_async( presence_penalty=presence_penalty, frequency_penalty=frequency_penalty, n=n, + prediction=utils.get_pydantic_model( + prediction, Optional[models.Prediction] + ), safe_prompt=safe_prompt, ) @@ -277,6 +303,7 @@ async def stream_async( http_res = await self.do_request_async( hook_ctx=HookContext( + base_url=base_url or "", operation_id="stream_chat", oauth2_scopes=[], security_source=self.sdk_configuration.security, @@ -287,7 +314,7 @@ async def stream_async( retry_config=retry_config, ) - data: Any = None + response_data: Any = None if utils.match_response(http_res, "200", "text/event-stream"): return eventstreaming.EventStreamAsync( http_res, @@ -296,9 +323,16 @@ async def stream_async( ) if utils.match_response(http_res, "422", "application/json"): http_res_text = await utils.stream_to_text_async(http_res) - data = utils.unmarshal_json(http_res_text, models.HTTPValidationErrorData) - raise models.HTTPValidationError(data=data) - if utils.match_response(http_res, ["4XX", "5XX"], "*"): + response_data = utils.unmarshal_json( + http_res_text, models.HTTPValidationErrorData + ) + raise models.HTTPValidationError(data=response_data) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( "API error occurred", http_res.status_code, http_res_text, http_res @@ -320,7 +354,7 @@ def complete( List[models.ChatCompletionRequestMessages], List[models.ChatCompletionRequestMessagesTypedDict], ], - model: OptionalNullable[str] = "azureai", + model: Optional[str] = "azureai", temperature: OptionalNullable[float] = UNSET, top_p: Optional[float] = None, max_tokens: OptionalNullable[int] = UNSET, @@ -347,6 +381,9 @@ def complete( presence_penalty: Optional[float] = None, frequency_penalty: Optional[float] = None, n: OptionalNullable[int] = UNSET, + prediction: Optional[ + Union[models.Prediction, models.PredictionTypedDict] + ] = None, safe_prompt: Optional[bool] = None, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, @@ -369,6 +406,7 @@ def complete( :param presence_penalty: presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. :param frequency_penalty: frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. :param n: Number of completions to return for each request, input tokens are only billed once. + :param prediction: :param safe_prompt: Whether to inject a safety prompt before all conversations. :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method @@ -382,6 +420,8 @@ def complete( if server_url is not None: base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) request = models.ChatCompletionRequest( model=model, @@ -404,6 +444,9 @@ def complete( presence_penalty=presence_penalty, frequency_penalty=frequency_penalty, n=n, + prediction=utils.get_pydantic_model( + prediction, Optional[models.Prediction] + ), safe_prompt=safe_prompt, ) @@ -436,6 +479,7 @@ def complete( http_res = self.do_request( hook_ctx=HookContext( + base_url=base_url or "", operation_id="chat_completion_v1_chat_completions_post", oauth2_scopes=[], security_source=self.sdk_configuration.security, @@ -445,15 +489,22 @@ def complete( retry_config=retry_config, ) - data: Any = None + response_data: Any = None if utils.match_response(http_res, "200", "application/json"): return utils.unmarshal_json( http_res.text, Optional[models.ChatCompletionResponse] ) if utils.match_response(http_res, "422", "application/json"): - data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) - raise models.HTTPValidationError(data=data) - if utils.match_response(http_res, ["4XX", "5XX"], "*"): + response_data = utils.unmarshal_json( + http_res.text, models.HTTPValidationErrorData + ) + raise models.HTTPValidationError(data=response_data) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) raise models.SDKError( "API error occurred", http_res.status_code, http_res_text, http_res @@ -475,7 +526,7 @@ async def complete_async( List[models.ChatCompletionRequestMessages], List[models.ChatCompletionRequestMessagesTypedDict], ], - model: OptionalNullable[str] = "azureai", + model: Optional[str] = "azureai", temperature: OptionalNullable[float] = UNSET, top_p: Optional[float] = None, max_tokens: OptionalNullable[int] = UNSET, @@ -502,6 +553,9 @@ async def complete_async( presence_penalty: Optional[float] = None, frequency_penalty: Optional[float] = None, n: OptionalNullable[int] = UNSET, + prediction: Optional[ + Union[models.Prediction, models.PredictionTypedDict] + ] = None, safe_prompt: Optional[bool] = None, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, @@ -524,6 +578,7 @@ async def complete_async( :param presence_penalty: presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. :param frequency_penalty: frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. :param n: Number of completions to return for each request, input tokens are only billed once. + :param prediction: :param safe_prompt: Whether to inject a safety prompt before all conversations. :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method @@ -537,6 +592,8 @@ async def complete_async( if server_url is not None: base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) request = models.ChatCompletionRequest( model=model, @@ -559,6 +616,9 @@ async def complete_async( presence_penalty=presence_penalty, frequency_penalty=frequency_penalty, n=n, + prediction=utils.get_pydantic_model( + prediction, Optional[models.Prediction] + ), safe_prompt=safe_prompt, ) @@ -591,6 +651,7 @@ async def complete_async( http_res = await self.do_request_async( hook_ctx=HookContext( + base_url=base_url or "", operation_id="chat_completion_v1_chat_completions_post", oauth2_scopes=[], security_source=self.sdk_configuration.security, @@ -600,15 +661,22 @@ async def complete_async( retry_config=retry_config, ) - data: Any = None + response_data: Any = None if utils.match_response(http_res, "200", "application/json"): return utils.unmarshal_json( http_res.text, Optional[models.ChatCompletionResponse] ) if utils.match_response(http_res, "422", "application/json"): - data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) - raise models.HTTPValidationError(data=data) - if utils.match_response(http_res, ["4XX", "5XX"], "*"): + response_data = utils.unmarshal_json( + http_res.text, models.HTTPValidationErrorData + ) + raise models.HTTPValidationError(data=response_data) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( "API error occurred", http_res.status_code, http_res_text, http_res diff --git a/packages/mistralai_azure/src/mistralai_azure/httpclient.py b/packages/mistralai_azure/src/mistralai_azure/httpclient.py index 167cea4e..1e426352 100644 --- a/packages/mistralai_azure/src/mistralai_azure/httpclient.py +++ b/packages/mistralai_azure/src/mistralai_azure/httpclient.py @@ -1,6 +1,8 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" # pyright: reportReturnType = false +import asyncio +from concurrent.futures import ThreadPoolExecutor from typing_extensions import Protocol, runtime_checkable import httpx from typing import Any, Optional, Union @@ -82,3 +84,53 @@ def build_request( async def aclose(self) -> None: pass + + +class ClientOwner(Protocol): + client: Union[HttpClient, None] + async_client: Union[AsyncHttpClient, None] + + +def close_clients( + owner: ClientOwner, + sync_client: Union[HttpClient, None], + sync_client_supplied: bool, + async_client: Union[AsyncHttpClient, None], + async_client_supplied: bool, +) -> None: + """ + A finalizer function that is meant to be used with weakref.finalize to close + httpx clients used by an SDK so that underlying resources can be garbage + collected. + """ + + # Unset the client/async_client properties so there are no more references + # to them from the owning SDK instance and they can be reaped. + owner.client = None + owner.async_client = None + + if sync_client is not None and not sync_client_supplied: + try: + sync_client.close() + except Exception: + pass + + if async_client is not None and not async_client_supplied: + is_async = False + try: + asyncio.get_running_loop() + is_async = True + except RuntimeError: + pass + + try: + # If this function is called in an async loop then start another + # loop in a separate thread to close the async http client. + if is_async: + with ThreadPoolExecutor(max_workers=1) as executor: + future = executor.submit(asyncio.run, async_client.aclose()) + future.result() + else: + asyncio.run(async_client.aclose()) + except Exception: + pass diff --git a/packages/mistralai_azure/src/mistralai_azure/models/__init__.py b/packages/mistralai_azure/src/mistralai_azure/models/__init__.py index 379a0dfe..ed9d9362 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/__init__.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/__init__.py @@ -54,6 +54,8 @@ ) from .functionname import FunctionName, FunctionNameTypedDict from .httpvalidationerror import HTTPValidationError, HTTPValidationErrorData +from .jsonschema import JSONSchema, JSONSchemaTypedDict +from .prediction import Prediction, PredictionTypedDict from .referencechunk import ReferenceChunk, ReferenceChunkType, ReferenceChunkTypedDict from .responseformat import ResponseFormat, ResponseFormatTypedDict from .responseformats import ResponseFormats @@ -94,6 +96,7 @@ ValidationErrorTypedDict, ) + __all__ = [ "Arguments", "ArgumentsTypedDict", @@ -140,10 +143,14 @@ "FunctionTypedDict", "HTTPValidationError", "HTTPValidationErrorData", + "JSONSchema", + "JSONSchemaTypedDict", "Loc", "LocTypedDict", "Messages", "MessagesTypedDict", + "Prediction", + "PredictionTypedDict", "ReferenceChunk", "ReferenceChunkType", "ReferenceChunkTypedDict", diff --git a/packages/mistralai_azure/src/mistralai_azure/models/assistantmessage.py b/packages/mistralai_azure/src/mistralai_azure/models/assistantmessage.py index 031677cf..530b33df 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/assistantmessage.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/assistantmessage.py @@ -32,6 +32,7 @@ class AssistantMessageTypedDict(TypedDict): content: NotRequired[Nullable[AssistantMessageContentTypedDict]] tool_calls: NotRequired[Nullable[List[ToolCallTypedDict]]] prefix: NotRequired[bool] + r"""Set this to `true` when adding an assistant message as prefix to condition the model response. The role of the prefix message is to force the model to start its answer by the content of the message.""" role: NotRequired[AssistantMessageRole] @@ -41,6 +42,7 @@ class AssistantMessage(BaseModel): tool_calls: OptionalNullable[List[ToolCall]] = UNSET prefix: Optional[bool] = False + r"""Set this to `true` when adding an assistant message as prefix to condition the model response. The role of the prefix message is to force the model to start its answer by the content of the message.""" role: Optional[AssistantMessageRole] = "assistant" diff --git a/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionrequest.py b/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionrequest.py index 67c91bba..08c66467 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionrequest.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionrequest.py @@ -2,6 +2,7 @@ from __future__ import annotations from .assistantmessage import AssistantMessage, AssistantMessageTypedDict +from .prediction import Prediction, PredictionTypedDict from .responseformat import ResponseFormat, ResponseFormatTypedDict from .systemmessage import SystemMessage, SystemMessageTypedDict from .tool import Tool, ToolTypedDict @@ -70,7 +71,7 @@ class ChatCompletionRequestTypedDict(TypedDict): messages: List[ChatCompletionRequestMessagesTypedDict] r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content.""" - model: NotRequired[Nullable[str]] + model: NotRequired[str] r"""The ID of the model to use for this request.""" temperature: NotRequired[Nullable[float]] r"""What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value.""" @@ -93,6 +94,7 @@ class ChatCompletionRequestTypedDict(TypedDict): r"""frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.""" n: NotRequired[Nullable[int]] r"""Number of completions to return for each request, input tokens are only billed once.""" + prediction: NotRequired[PredictionTypedDict] safe_prompt: NotRequired[bool] r"""Whether to inject a safety prompt before all conversations.""" @@ -101,7 +103,7 @@ class ChatCompletionRequest(BaseModel): messages: List[ChatCompletionRequestMessages] r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content.""" - model: OptionalNullable[str] = "azureai" + model: Optional[str] = "azureai" r"""The ID of the model to use for this request.""" temperature: OptionalNullable[float] = UNSET @@ -137,6 +139,8 @@ class ChatCompletionRequest(BaseModel): n: OptionalNullable[int] = UNSET r"""Number of completions to return for each request, input tokens are only billed once.""" + prediction: Optional[Prediction] = None + safe_prompt: Optional[bool] = None r"""Whether to inject a safety prompt before all conversations.""" @@ -156,16 +160,10 @@ def serialize_model(self, handler): "presence_penalty", "frequency_penalty", "n", + "prediction", "safe_prompt", ] - nullable_fields = [ - "model", - "temperature", - "max_tokens", - "random_seed", - "tools", - "n", - ] + nullable_fields = ["temperature", "max_tokens", "random_seed", "tools", "n"] null_default_fields = [] serialized = handler(self) diff --git a/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionstreamrequest.py b/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionstreamrequest.py index 465647eb..a2eec92b 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionstreamrequest.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionstreamrequest.py @@ -2,6 +2,7 @@ from __future__ import annotations from .assistantmessage import AssistantMessage, AssistantMessageTypedDict +from .prediction import Prediction, PredictionTypedDict from .responseformat import ResponseFormat, ResponseFormatTypedDict from .systemmessage import SystemMessage, SystemMessageTypedDict from .tool import Tool, ToolTypedDict @@ -66,7 +67,7 @@ class ChatCompletionStreamRequestTypedDict(TypedDict): messages: List[MessagesTypedDict] r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content.""" - model: NotRequired[Nullable[str]] + model: NotRequired[str] r"""The ID of the model to use for this request.""" temperature: NotRequired[Nullable[float]] r"""What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value.""" @@ -88,6 +89,7 @@ class ChatCompletionStreamRequestTypedDict(TypedDict): r"""frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.""" n: NotRequired[Nullable[int]] r"""Number of completions to return for each request, input tokens are only billed once.""" + prediction: NotRequired[PredictionTypedDict] safe_prompt: NotRequired[bool] r"""Whether to inject a safety prompt before all conversations.""" @@ -96,7 +98,7 @@ class ChatCompletionStreamRequest(BaseModel): messages: List[Messages] r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content.""" - model: OptionalNullable[str] = "azureai" + model: Optional[str] = "azureai" r"""The ID of the model to use for this request.""" temperature: OptionalNullable[float] = UNSET @@ -131,6 +133,8 @@ class ChatCompletionStreamRequest(BaseModel): n: OptionalNullable[int] = UNSET r"""Number of completions to return for each request, input tokens are only billed once.""" + prediction: Optional[Prediction] = None + safe_prompt: Optional[bool] = None r"""Whether to inject a safety prompt before all conversations.""" @@ -150,16 +154,10 @@ def serialize_model(self, handler): "presence_penalty", "frequency_penalty", "n", + "prediction", "safe_prompt", ] - nullable_fields = [ - "model", - "temperature", - "max_tokens", - "random_seed", - "tools", - "n", - ] + nullable_fields = ["temperature", "max_tokens", "random_seed", "tools", "n"] null_default_fields = [] serialized = handler(self) diff --git a/packages/mistralai_azure/src/mistralai_azure/models/function.py b/packages/mistralai_azure/src/mistralai_azure/models/function.py index 488cdcea..e6ea8495 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/function.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/function.py @@ -10,6 +10,7 @@ class FunctionTypedDict(TypedDict): name: str parameters: Dict[str, Any] description: NotRequired[str] + strict: NotRequired[bool] class Function(BaseModel): @@ -18,3 +19,5 @@ class Function(BaseModel): parameters: Dict[str, Any] description: Optional[str] = "" + + strict: Optional[bool] = False diff --git a/packages/mistralai_azure/src/mistralai_azure/models/jsonschema.py b/packages/mistralai_azure/src/mistralai_azure/models/jsonschema.py new file mode 100644 index 00000000..210417c7 --- /dev/null +++ b/packages/mistralai_azure/src/mistralai_azure/models/jsonschema.py @@ -0,0 +1,61 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai_azure.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +import pydantic +from pydantic import model_serializer +from typing import Any, Dict, Optional +from typing_extensions import Annotated, NotRequired, TypedDict + + +class JSONSchemaTypedDict(TypedDict): + name: str + schema_definition: Dict[str, Any] + description: NotRequired[Nullable[str]] + strict: NotRequired[bool] + + +class JSONSchema(BaseModel): + name: str + + schema_definition: Annotated[Dict[str, Any], pydantic.Field(alias="schema")] + + description: OptionalNullable[str] = UNSET + + strict: Optional[bool] = False + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["description", "strict"] + nullable_fields = ["description"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in self.model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/packages/mistralai_azure/src/mistralai_azure/models/prediction.py b/packages/mistralai_azure/src/mistralai_azure/models/prediction.py new file mode 100644 index 00000000..888337d3 --- /dev/null +++ b/packages/mistralai_azure/src/mistralai_azure/models/prediction.py @@ -0,0 +1,25 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai_azure.types import BaseModel +from mistralai_azure.utils import validate_const +import pydantic +from pydantic.functional_validators import AfterValidator +from typing import Literal, Optional +from typing_extensions import Annotated, NotRequired, TypedDict + + +class PredictionTypedDict(TypedDict): + type: Literal["content"] + content: NotRequired[str] + + +class Prediction(BaseModel): + TYPE: Annotated[ + Annotated[ + Optional[Literal["content"]], AfterValidator(validate_const("content")) + ], + pydantic.Field(alias="type"), + ] = "content" + + content: Optional[str] = "" diff --git a/packages/mistralai_azure/src/mistralai_azure/models/responseformat.py b/packages/mistralai_azure/src/mistralai_azure/models/responseformat.py index e4a9d7dd..cfd58dcf 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/responseformat.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/responseformat.py @@ -1,8 +1,16 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from __future__ import annotations +from .jsonschema import JSONSchema, JSONSchemaTypedDict from .responseformats import ResponseFormats -from mistralai_azure.types import BaseModel +from mistralai_azure.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer from typing import Optional from typing_extensions import NotRequired, TypedDict @@ -10,8 +18,41 @@ class ResponseFormatTypedDict(TypedDict): type: NotRequired[ResponseFormats] r"""An object specifying the format that the model must output. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message.""" + json_schema: NotRequired[Nullable[JSONSchemaTypedDict]] class ResponseFormat(BaseModel): type: Optional[ResponseFormats] = None r"""An object specifying the format that the model must output. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message.""" + + json_schema: OptionalNullable[JSONSchema] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["type", "json_schema"] + nullable_fields = ["json_schema"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in self.model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/packages/mistralai_azure/src/mistralai_azure/models/responseformats.py b/packages/mistralai_azure/src/mistralai_azure/models/responseformats.py index 2c06b812..08c39951 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/responseformats.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/responseformats.py @@ -4,5 +4,5 @@ from typing import Literal -ResponseFormats = Literal["text", "json_object"] +ResponseFormats = Literal["text", "json_object", "json_schema"] r"""An object specifying the format that the model must output. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message.""" diff --git a/packages/mistralai_azure/src/mistralai_azure/models/toolcall.py b/packages/mistralai_azure/src/mistralai_azure/models/toolcall.py index 69b47310..6ccdcaa2 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/toolcall.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/toolcall.py @@ -14,6 +14,7 @@ class ToolCallTypedDict(TypedDict): function: FunctionCallTypedDict id: NotRequired[str] type: NotRequired[ToolTypes] + index: NotRequired[int] class ToolCall(BaseModel): @@ -24,3 +25,5 @@ class ToolCall(BaseModel): type: Annotated[Optional[ToolTypes], PlainValidator(validate_open_enum(False))] = ( None ) + + index: Optional[int] = 0 diff --git a/packages/mistralai_azure/src/mistralai_azure/sdk.py b/packages/mistralai_azure/src/mistralai_azure/sdk.py index a83faa7b..8379e55f 100644 --- a/packages/mistralai_azure/src/mistralai_azure/sdk.py +++ b/packages/mistralai_azure/src/mistralai_azure/sdk.py @@ -1,17 +1,19 @@ -"""Code generated by Speakeasy (https://speakeasyapi.dev). DO NOT EDIT.""" +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -from typing import Any, Callable, Dict, Optional, Union +import weakref +from typing import Any, Callable, Dict, Optional, Union, cast import httpx + from mistralai_azure import models, utils from mistralai_azure._hooks import SDKHooks from mistralai_azure.chat import Chat -from mistralai_azure.types import Nullable +from mistralai_azure.types import UNSET, OptionalNullable from .basesdk import BaseSDK -from .httpclient import AsyncHttpClient, HttpClient +from .httpclient import AsyncHttpClient, ClientOwner, HttpClient, close_clients from .sdkconfiguration import SDKConfiguration -from .utils.logger import Logger, NoOpLogger +from .utils.logger import Logger, get_default_logger from .utils.retries import RetryConfig @@ -19,7 +21,7 @@ class MistralAzure(BaseSDK): r"""Mistral AI API: Our Chat Completion and Embeddings APIs specification. Create your account on [La Plateforme](https://console.mistral.ai) to get access and read the [docs](https://docs.mistral.ai) to learn how to use it.""" chat: Chat - r"""Chat Completion API""" + r"""Chat Completion API.""" def __init__( self, @@ -28,7 +30,8 @@ def __init__( url_params: Optional[Dict[str, str]] = None, client: Optional[HttpClient] = None, async_client: Optional[AsyncHttpClient] = None, - retry_config: Optional[Nullable[RetryConfig]] = None, + retry_config: OptionalNullable[RetryConfig] = UNSET, + timeout_ms: Optional[int] = None, debug_logger: Optional[Logger] = None, ) -> None: r"""Instantiates the SDK configuring it with the provided parameters. @@ -39,7 +42,9 @@ def __init__( :param client: The HTTP client to use for all synchronous methods :param async_client: The Async HTTP client to use for all asynchronous methods :param retry_config: The retry configuration to use for all supported methods + :param timeout_ms: Optional request timeout applied to each operation in milliseconds """ + # if azure_endpoint doesn't end with `/v1` add it if not azure_endpoint.endswith("/"): azure_endpoint += "/" @@ -47,28 +52,30 @@ def __init__( azure_endpoint += "v1/" server_url = azure_endpoint + client_supplied = True if client is None: client = httpx.Client() + client_supplied = False assert issubclass( type(client), HttpClient ), "The provided client must implement the HttpClient protocol." + async_client_supplied = True if async_client is None: async_client = httpx.AsyncClient() + async_client_supplied = False + + if debug_logger is None: + debug_logger = get_default_logger() assert issubclass( type(async_client), AsyncHttpClient ), "The provided async_client must implement the AsyncHttpClient protocol." - if debug_logger is None: - debug_logger = NoOpLogger() - security: Any = None if callable(azure_api_key): - security = lambda: models.Security( # pylint: disable=unnecessary-lambda-assignment - api_key=azure_api_key() - ) + security = lambda: models.Security(api_key=azure_api_key()) # pylint: disable=unnecessary-lambda-assignment else: security = models.Security(api_key=azure_api_key) @@ -80,11 +87,14 @@ def __init__( self, SDKConfiguration( client=client, + client_supplied=client_supplied, async_client=async_client, + async_client_supplied=async_client_supplied, security=security, server_url=server_url, server=None, retry_config=retry_config, + timeout_ms=timeout_ms, debug_logger=debug_logger, ), ) @@ -93,7 +103,7 @@ def __init__( current_server_url, *_ = self.sdk_configuration.get_server_details() server_url, self.sdk_configuration.client = hooks.sdk_init( - current_server_url, self.sdk_configuration.client + current_server_url, client ) if current_server_url != server_url: self.sdk_configuration.server_url = server_url @@ -101,7 +111,39 @@ def __init__( # pylint: disable=protected-access self.sdk_configuration.__dict__["_hooks"] = hooks + weakref.finalize( + self, + close_clients, + cast(ClientOwner, self.sdk_configuration), + self.sdk_configuration.client, + self.sdk_configuration.client_supplied, + self.sdk_configuration.async_client, + self.sdk_configuration.async_client_supplied, + ) + self._init_sdks() def _init_sdks(self): self.chat = Chat(self.sdk_configuration) + + def __enter__(self): + return self + + async def __aenter__(self): + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + if ( + self.sdk_configuration.client is not None + and not self.sdk_configuration.client_supplied + ): + self.sdk_configuration.client.close() + self.sdk_configuration.client = None + + async def __aexit__(self, exc_type, exc_val, exc_tb): + if ( + self.sdk_configuration.async_client is not None + and not self.sdk_configuration.async_client_supplied + ): + await self.sdk_configuration.async_client.aclose() + self.sdk_configuration.async_client = None diff --git a/packages/mistralai_azure/src/mistralai_azure/sdkconfiguration.py b/packages/mistralai_azure/src/mistralai_azure/sdkconfiguration.py index 73b8d517..605e5d74 100644 --- a/packages/mistralai_azure/src/mistralai_azure/sdkconfiguration.py +++ b/packages/mistralai_azure/src/mistralai_azure/sdkconfiguration.py @@ -1,6 +1,12 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from ._hooks import SDKHooks +from ._version import ( + __gen_version__, + __openapi_doc_version__, + __user_agent__, + __version__, +) from .httpclient import AsyncHttpClient, HttpClient from .utils import Logger, RetryConfig, remove_suffix from dataclasses import dataclass @@ -20,17 +26,19 @@ @dataclass class SDKConfiguration: - client: HttpClient - async_client: AsyncHttpClient + client: Union[HttpClient, None] + client_supplied: bool + async_client: Union[AsyncHttpClient, None] + async_client_supplied: bool debug_logger: Logger security: Optional[Union[models.Security, Callable[[], models.Security]]] = None server_url: Optional[str] = "" server: Optional[str] = "" language: str = "python" - openapi_doc_version: str = "0.0.2" - sdk_version: str = "1.2.6" - gen_version: str = "2.486.1" - user_agent: str = "speakeasy-sdk/python 1.2.6 2.486.1 0.0.2 mistralai_azure" + openapi_doc_version: str = __openapi_doc_version__ + sdk_version: str = __version__ + gen_version: str = __gen_version__ + user_agent: str = __user_agent__ retry_config: OptionalNullable[RetryConfig] = Field(default_factory=lambda: UNSET) timeout_ms: Optional[int] = None diff --git a/packages/mistralai_azure/src/mistralai_azure/utils/__init__.py b/packages/mistralai_azure/src/mistralai_azure/utils/__init__.py index 26d51ae8..3cded8fe 100644 --- a/packages/mistralai_azure/src/mistralai_azure/utils/__init__.py +++ b/packages/mistralai_azure/src/mistralai_azure/utils/__init__.py @@ -42,6 +42,7 @@ match_content_type, match_status_codes, match_response, + cast_partial, ) from .logger import Logger, get_body_content, get_default_logger @@ -94,4 +95,5 @@ "validate_float", "validate_int", "validate_open_enum", + "cast_partial", ] diff --git a/packages/mistralai_azure/src/mistralai_azure/utils/serializers.py b/packages/mistralai_azure/src/mistralai_azure/utils/serializers.py index c5eb3659..baa41fbd 100644 --- a/packages/mistralai_azure/src/mistralai_azure/utils/serializers.py +++ b/packages/mistralai_azure/src/mistralai_azure/utils/serializers.py @@ -7,14 +7,15 @@ from typing_extensions import get_origin from pydantic import ConfigDict, create_model from pydantic_core import from_json -from typing_inspect import is_optional_type +from typing_inspection.typing_objects import is_union from ..types.basemodel import BaseModel, Nullable, OptionalNullable, Unset def serialize_decimal(as_str: bool): def serialize(d): - if is_optional_type(type(d)) and d is None: + # Optional[T] is a Union[T, None] + if is_union(type(d)) and type(None) in get_args(type(d)) and d is None: return None if isinstance(d, Unset): return d @@ -42,7 +43,8 @@ def validate_decimal(d): def serialize_float(as_str: bool): def serialize(f): - if is_optional_type(type(f)) and f is None: + # Optional[T] is a Union[T, None] + if is_union(type(f)) and type(None) in get_args(type(f)) and f is None: return None if isinstance(f, Unset): return f @@ -70,7 +72,8 @@ def validate_float(f): def serialize_int(as_str: bool): def serialize(i): - if is_optional_type(type(i)) and i is None: + # Optional[T] is a Union[T, None] + if is_union(type(i)) and type(None) in get_args(type(i)) and i is None: return None if isinstance(i, Unset): return i @@ -118,7 +121,8 @@ def validate(e): def validate_const(v): def validate(c): - if is_optional_type(type(c)) and c is None: + # Optional[T] is a Union[T, None] + if is_union(type(c)) and type(None) in get_args(type(c)) and c is None: return None if v != c: @@ -163,7 +167,7 @@ def marshal_json(val, typ): if len(d) == 0: return "" - return json.dumps(d[next(iter(d))], separators=(",", ":"), sort_keys=True) + return json.dumps(d[next(iter(d))], separators=(",", ":")) def is_nullable(field): diff --git a/packages/mistralai_azure/src/mistralai_azure/utils/values.py b/packages/mistralai_azure/src/mistralai_azure/utils/values.py index 2b4b6832..dae01a44 100644 --- a/packages/mistralai_azure/src/mistralai_azure/utils/values.py +++ b/packages/mistralai_azure/src/mistralai_azure/utils/values.py @@ -3,8 +3,9 @@ from datetime import datetime from enum import Enum from email.message import Message +from functools import partial import os -from typing import Any, Callable, Dict, List, Optional, Tuple, TypeVar, Union +from typing import Any, Callable, Dict, List, Optional, Tuple, TypeVar, Union, cast from httpx import Response from pydantic import BaseModel @@ -51,6 +52,8 @@ def match_status_codes(status_codes: List[str], status_code: int) -> bool: T = TypeVar("T") +def cast_partial(typ): + return partial(cast, typ) def get_global_from_env( value: Optional[T], env_key: str, type_cast: Callable[[str], T] diff --git a/packages/mistralai_gcp/.speakeasy/gen.lock b/packages/mistralai_gcp/.speakeasy/gen.lock index f74b9759..eab663b0 100644 --- a/packages/mistralai_gcp/.speakeasy/gen.lock +++ b/packages/mistralai_gcp/.speakeasy/gen.lock @@ -1,34 +1,34 @@ lockVersion: 2.0.0 id: ec60f2d8-7869-45c1-918e-773d41a8cf74 management: - docChecksum: 849dde0ef239604ca71711ffc1220b54 + docChecksum: 0e9c7ff94b487395628de6c99a5954ce docVersion: 0.0.2 - speakeasyVersion: 1.462.2 - generationVersion: 2.486.1 + speakeasyVersion: 1.517.3 + generationVersion: 2.548.6 releaseVersion: 1.2.6 - configChecksum: ba11718a5b49fb4a979ae9693a68b191 + configChecksum: 31db65297a20fe8af5b30effb1421b52 published: true features: python: additionalDependencies: 1.0.0 constsAndDefaults: 1.0.5 - core: 5.7.4 + core: 5.12.3 defaultEnabledRetries: 0.2.0 enumUnions: 0.1.0 envVarSecurityUsage: 0.3.2 - examples: 3.0.0 + examples: 3.0.1 flatRequests: 1.0.1 - globalSecurity: 3.0.2 + globalSecurity: 3.0.3 globalSecurityCallbacks: 1.0.0 globalSecurityFlattening: 1.0.0 globalServerURLs: 3.1.0 methodArguments: 1.0.2 nameOverrides: 3.0.1 - nullables: 1.0.0 + nullables: 1.0.1 openEnums: 1.0.0 responseFormat: 1.0.1 retries: 3.0.2 - sdkHooks: 1.0.0 + sdkHooks: 1.0.1 serverEvents: 1.0.7 serverEventsSentinels: 0.1.0 serverIDs: 3.0.0 @@ -66,8 +66,10 @@ generatedFiles: - docs/models/functioncall.md - docs/models/functionname.md - docs/models/httpvalidationerror.md + - docs/models/jsonschema.md - docs/models/loc.md - docs/models/messages.md + - docs/models/prediction.md - docs/models/referencechunk.md - docs/models/referencechunktype.md - docs/models/responseformat.md @@ -97,7 +99,7 @@ generatedFiles: - py.typed - pylintrc - pyproject.toml - - scripts/prepare-readme.py + - scripts/prepare_readme.py - scripts/publish.sh - src/mistralai_gcp/__init__.py - src/mistralai_gcp/_hooks/__init__.py @@ -126,6 +128,8 @@ generatedFiles: - src/mistralai_gcp/models/functioncall.py - src/mistralai_gcp/models/functionname.py - src/mistralai_gcp/models/httpvalidationerror.py + - src/mistralai_gcp/models/jsonschema.py + - src/mistralai_gcp/models/prediction.py - src/mistralai_gcp/models/referencechunk.py - src/mistralai_gcp/models/responseformat.py - src/mistralai_gcp/models/responseformats.py @@ -143,6 +147,7 @@ generatedFiles: - src/mistralai_gcp/models/usermessage.py - src/mistralai_gcp/models/validationerror.py - src/mistralai_gcp/py.typed + - src/mistralai_gcp/sdk.py - src/mistralai_gcp/sdkconfiguration.py - src/mistralai_gcp/types/__init__.py - src/mistralai_gcp/types/basemodel.py @@ -165,33 +170,36 @@ examples: stream_chat: speakeasy-default-stream-chat: requestBody: - application/json: {"model": "mistral-small-latest", "messages": [{"content": "Who is the best French painter? Answer in one short sentence.", "role": "user"}]} + application/json: {"model": "mistral-small-latest", "stream": true, "messages": [{"content": "Who is the best French painter? Answer in one short sentence.", "role": "user"}]} responses: "422": application/json: {} + "200": {} chat_completion_v1_chat_completions_post: speakeasy-default-chat-completion-v1-chat-completions-post: requestBody: - application/json: {"model": "mistral-small-latest", "messages": [{"content": "Who is the best French painter? Answer in one short sentence.", "role": "user"}]} + application/json: {"model": "mistral-small-latest", "stream": false, "messages": [{"content": "Who is the best French painter? Answer in one short sentence.", "role": "user"}]} responses: "200": - application/json: {"id": "cmpl-e5cc70bb28c444948073e77776eb30ef", "object": "chat.completion", "model": "mistral-small-latest", "usage": {"prompt_tokens": 16, "completion_tokens": 34, "total_tokens": 50}, "created": 1702256327, "choices": []} + application/json: {"id": "cmpl-e5cc70bb28c444948073e77776eb30ef", "object": "chat.completion", "model": "mistral-small-latest", "usage": {"prompt_tokens": 16, "completion_tokens": 34, "total_tokens": 50}, "created": 1702256327, "choices": [{"index": 0, "message": {"prefix": false, "role": "assistant"}, "finish_reason": "stop"}, {"index": 0, "message": {"prefix": false, "role": "assistant"}, "finish_reason": "stop"}, {"index": 0, "message": {"prefix": false, "role": "assistant"}, "finish_reason": "stop"}]} "422": application/json: {} stream_fim: speakeasy-default-stream-fim: requestBody: - application/json: {"model": "codestral-2405", "prompt": "def", "suffix": "return a+b"} + application/json: {"model": "codestral-2405", "top_p": 1, "stream": true, "prompt": "def", "suffix": "return a+b"} responses: "422": application/json: {} + "200": {} fim_completion_v1_fim_completions_post: speakeasy-default-fim-completion-v1-fim-completions-post: requestBody: - application/json: {"model": "codestral-2405", "prompt": "def", "suffix": "return a+b"} + application/json: {"model": "codestral-2405", "top_p": 1, "stream": false, "prompt": "def", "suffix": "return a+b"} responses: "200": - application/json: {"id": "cmpl-e5cc70bb28c444948073e77776eb30ef", "object": "chat.completion", "model": "codestral-latest", "usage": {"prompt_tokens": 16, "completion_tokens": 34, "total_tokens": 50}, "created": 1702256327, "choices": []} + application/json: {"id": "cmpl-e5cc70bb28c444948073e77776eb30ef", "object": "chat.completion", "model": "codestral-latest", "usage": {"prompt_tokens": 16, "completion_tokens": 34, "total_tokens": 50}, "created": 1702256327, "choices": [{"index": 0, "message": {"prefix": false, "role": "assistant"}, "finish_reason": "stop"}, {"index": 0, "message": {"prefix": false, "role": "assistant"}, "finish_reason": "stop"}, {"index": 0, "message": {"prefix": false, "role": "assistant"}, "finish_reason": "stop"}]} "422": application/json: {} +examplesVersion: 1.0.0 generatedTests: {} diff --git a/packages/mistralai_gcp/.speakeasy/gen.yaml b/packages/mistralai_gcp/.speakeasy/gen.yaml index afa4d1d0..572d3ed1 100644 --- a/packages/mistralai_gcp/.speakeasy/gen.yaml +++ b/packages/mistralai_gcp/.speakeasy/gen.yaml @@ -7,8 +7,10 @@ generation: useClassNamesForArrayFields: true fixes: nameResolutionDec2023: true + nameResolutionFeb2025: false parameterOrderingFeb2024: true requestResponseComponentNamesFeb2024: true + securityFeb2025: false auth: oAuth2ClientCredentialsEnabled: true oAuth2PasswordEnabled: false @@ -26,6 +28,7 @@ python: clientServerStatusCodesAsErrors: true defaultErrorName: SDKError description: Python Client SDK for the Mistral AI API in GCP. + enableCustomCodeRegions: false enumFormat: union fixFlags: responseRequiredSep2024: false @@ -45,5 +48,6 @@ python: methodArguments: infer-optional-args outputModelSuffix: output packageName: mistralai-gcp + pytestTimeout: 0 responseFormat: flat templateVersion: v2 diff --git a/packages/mistralai_gcp/docs/models/assistantmessage.md b/packages/mistralai_gcp/docs/models/assistantmessage.md index 53f1cc76..3d0bd90b 100644 --- a/packages/mistralai_gcp/docs/models/assistantmessage.md +++ b/packages/mistralai_gcp/docs/models/assistantmessage.md @@ -3,9 +3,9 @@ ## Fields -| Field | Type | Required | Description | -| ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | -| `content` | [OptionalNullable[models.AssistantMessageContent]](../models/assistantmessagecontent.md) | :heavy_minus_sign: | N/A | -| `tool_calls` | List[[models.ToolCall](../models/toolcall.md)] | :heavy_minus_sign: | N/A | -| `prefix` | *Optional[bool]* | :heavy_minus_sign: | N/A | -| `role` | [Optional[models.AssistantMessageRole]](../models/assistantmessagerole.md) | :heavy_minus_sign: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| `content` | [OptionalNullable[models.AssistantMessageContent]](../models/assistantmessagecontent.md) | :heavy_minus_sign: | N/A | +| `tool_calls` | List[[models.ToolCall](../models/toolcall.md)] | :heavy_minus_sign: | N/A | +| `prefix` | *Optional[bool]* | :heavy_minus_sign: | Set this to `true` when adding an assistant message as prefix to condition the model response. The role of the prefix message is to force the model to start its answer by the content of the message. | +| `role` | [Optional[models.AssistantMessageRole]](../models/assistantmessagerole.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/chatcompletionrequest.md b/packages/mistralai_gcp/docs/models/chatcompletionrequest.md index abc83281..3e9de262 100644 --- a/packages/mistralai_gcp/docs/models/chatcompletionrequest.md +++ b/packages/mistralai_gcp/docs/models/chatcompletionrequest.md @@ -5,7 +5,7 @@ | Field | Type | Required | Description | Example | | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `model` | *Nullable[str]* | :heavy_check_mark: | ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions. | mistral-small-latest | +| `model` | *str* | :heavy_check_mark: | ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions. | mistral-small-latest | | `messages` | List[[models.ChatCompletionRequestMessages](../models/chatcompletionrequestmessages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | | `temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. | | | `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | @@ -18,4 +18,5 @@ | `tool_choice` | [Optional[models.ChatCompletionRequestToolChoice]](../models/chatcompletionrequesttoolchoice.md) | :heavy_minus_sign: | N/A | | | `presence_penalty` | *Optional[float]* | :heavy_minus_sign: | presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. | | | `frequency_penalty` | *Optional[float]* | :heavy_minus_sign: | frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. | | -| `n` | *OptionalNullable[int]* | :heavy_minus_sign: | Number of completions to return for each request, input tokens are only billed once. | | \ No newline at end of file +| `n` | *OptionalNullable[int]* | :heavy_minus_sign: | Number of completions to return for each request, input tokens are only billed once. | | +| `prediction` | [Optional[models.Prediction]](../models/prediction.md) | :heavy_minus_sign: | N/A | | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/chatcompletionstreamrequest.md b/packages/mistralai_gcp/docs/models/chatcompletionstreamrequest.md index 863c0229..8200f8a3 100644 --- a/packages/mistralai_gcp/docs/models/chatcompletionstreamrequest.md +++ b/packages/mistralai_gcp/docs/models/chatcompletionstreamrequest.md @@ -5,7 +5,7 @@ | Field | Type | Required | Description | Example | | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `model` | *Nullable[str]* | :heavy_check_mark: | ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions. | mistral-small-latest | +| `model` | *str* | :heavy_check_mark: | ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions. | mistral-small-latest | | `messages` | List[[models.Messages](../models/messages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | | `temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. | | | `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | @@ -18,4 +18,5 @@ | `tool_choice` | [Optional[models.ChatCompletionStreamRequestToolChoice]](../models/chatcompletionstreamrequesttoolchoice.md) | :heavy_minus_sign: | N/A | | | `presence_penalty` | *Optional[float]* | :heavy_minus_sign: | presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. | | | `frequency_penalty` | *Optional[float]* | :heavy_minus_sign: | frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. | | -| `n` | *OptionalNullable[int]* | :heavy_minus_sign: | Number of completions to return for each request, input tokens are only billed once. | | \ No newline at end of file +| `n` | *OptionalNullable[int]* | :heavy_minus_sign: | Number of completions to return for each request, input tokens are only billed once. | | +| `prediction` | [Optional[models.Prediction]](../models/prediction.md) | :heavy_minus_sign: | N/A | | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/fimcompletionrequest.md b/packages/mistralai_gcp/docs/models/fimcompletionrequest.md index 236d2d21..7507b90c 100644 --- a/packages/mistralai_gcp/docs/models/fimcompletionrequest.md +++ b/packages/mistralai_gcp/docs/models/fimcompletionrequest.md @@ -5,7 +5,7 @@ | Field | Type | Required | Description | Example | | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `model` | *Nullable[str]* | :heavy_check_mark: | ID of the model to use. Only compatible for now with:
- `codestral-2405`
- `codestral-latest` | codestral-2405 | +| `model` | *str* | :heavy_check_mark: | ID of the model to use. Only compatible for now with:
- `codestral-2405`
- `codestral-latest` | codestral-2405 | | `prompt` | *str* | :heavy_check_mark: | The text/code to complete. | def | | `temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. | | | `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | diff --git a/packages/mistralai_gcp/docs/models/fimcompletionstreamrequest.md b/packages/mistralai_gcp/docs/models/fimcompletionstreamrequest.md index fa635932..6cc439c7 100644 --- a/packages/mistralai_gcp/docs/models/fimcompletionstreamrequest.md +++ b/packages/mistralai_gcp/docs/models/fimcompletionstreamrequest.md @@ -5,7 +5,7 @@ | Field | Type | Required | Description | Example | | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `model` | *Nullable[str]* | :heavy_check_mark: | ID of the model to use. Only compatible for now with:
- `codestral-2405`
- `codestral-latest` | codestral-2405 | +| `model` | *str* | :heavy_check_mark: | ID of the model to use. Only compatible for now with:
- `codestral-2405`
- `codestral-latest` | codestral-2405 | | `prompt` | *str* | :heavy_check_mark: | The text/code to complete. | def | | `temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. | | | `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | diff --git a/packages/mistralai_gcp/docs/models/function.md b/packages/mistralai_gcp/docs/models/function.md index 8af398f5..a166b7bb 100644 --- a/packages/mistralai_gcp/docs/models/function.md +++ b/packages/mistralai_gcp/docs/models/function.md @@ -7,4 +7,5 @@ | ------------------ | ------------------ | ------------------ | ------------------ | | `name` | *str* | :heavy_check_mark: | N/A | | `parameters` | Dict[str, *Any*] | :heavy_check_mark: | N/A | -| `description` | *Optional[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file +| `description` | *Optional[str]* | :heavy_minus_sign: | N/A | +| `strict` | *Optional[bool]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/jsonschema.md b/packages/mistralai_gcp/docs/models/jsonschema.md new file mode 100644 index 00000000..ae387867 --- /dev/null +++ b/packages/mistralai_gcp/docs/models/jsonschema.md @@ -0,0 +1,11 @@ +# JSONSchema + + +## Fields + +| Field | Type | Required | Description | +| ----------------------- | ----------------------- | ----------------------- | ----------------------- | +| `name` | *str* | :heavy_check_mark: | N/A | +| `schema_definition` | Dict[str, *Any*] | :heavy_check_mark: | N/A | +| `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `strict` | *Optional[bool]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/prediction.md b/packages/mistralai_gcp/docs/models/prediction.md new file mode 100644 index 00000000..86e9c396 --- /dev/null +++ b/packages/mistralai_gcp/docs/models/prediction.md @@ -0,0 +1,9 @@ +# Prediction + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------ | ------------------------------ | ------------------------------ | ------------------------------ | +| `type` | *Optional[Literal["content"]]* | :heavy_minus_sign: | N/A | +| `content` | *Optional[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/responseformat.md b/packages/mistralai_gcp/docs/models/responseformat.md index 9c627f55..23a1641b 100644 --- a/packages/mistralai_gcp/docs/models/responseformat.md +++ b/packages/mistralai_gcp/docs/models/responseformat.md @@ -5,4 +5,5 @@ | Field | Type | Required | Description | | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `type` | [Optional[models.ResponseFormats]](../models/responseformats.md) | :heavy_minus_sign: | An object specifying the format that the model must output. Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. | \ No newline at end of file +| `type` | [Optional[models.ResponseFormats]](../models/responseformats.md) | :heavy_minus_sign: | An object specifying the format that the model must output. Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. | +| `json_schema` | [OptionalNullable[models.JSONSchema]](../models/jsonschema.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/responseformats.md b/packages/mistralai_gcp/docs/models/responseformats.md index ce35fbb3..06886afe 100644 --- a/packages/mistralai_gcp/docs/models/responseformats.md +++ b/packages/mistralai_gcp/docs/models/responseformats.md @@ -8,4 +8,5 @@ An object specifying the format that the model must output. Setting to `{ "type" | Name | Value | | ------------- | ------------- | | `TEXT` | text | -| `JSON_OBJECT` | json_object | \ No newline at end of file +| `JSON_OBJECT` | json_object | +| `JSON_SCHEMA` | json_schema | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/toolcall.md b/packages/mistralai_gcp/docs/models/toolcall.md index 7aca5fc9..574be1ea 100644 --- a/packages/mistralai_gcp/docs/models/toolcall.md +++ b/packages/mistralai_gcp/docs/models/toolcall.md @@ -7,4 +7,5 @@ | ---------------------------------------------------- | ---------------------------------------------------- | ---------------------------------------------------- | ---------------------------------------------------- | | `function` | [models.FunctionCall](../models/functioncall.md) | :heavy_check_mark: | N/A | | `id` | *Optional[str]* | :heavy_minus_sign: | N/A | -| `type` | [Optional[models.ToolTypes]](../models/tooltypes.md) | :heavy_minus_sign: | N/A | \ No newline at end of file +| `type` | [Optional[models.ToolTypes]](../models/tooltypes.md) | :heavy_minus_sign: | N/A | +| `index` | *Optional[int]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai_gcp/pylintrc b/packages/mistralai_gcp/pylintrc index 393d0f70..266bc815 100644 --- a/packages/mistralai_gcp/pylintrc +++ b/packages/mistralai_gcp/pylintrc @@ -89,7 +89,7 @@ persistent=yes # Minimum Python version to use for version dependent checks. Will default to # the version used to run pylint. -py-version=3.8 +py-version=3.9 # Discover python modules and packages in the file system subtree. recursive=no @@ -455,7 +455,10 @@ disable=raw-checker-failed, bare-except, broad-exception-caught, fixme, - relative-beyond-top-level + relative-beyond-top-level, + consider-using-with, + wildcard-import, + unused-wildcard-import # Enable the message, report, category or checker with the given id(s). You can # either give multiple identifier separated by comma (,) or put this option diff --git a/packages/mistralai_gcp/pyproject.toml b/packages/mistralai_gcp/pyproject.toml index 6692f1d5..21cf7eb9 100644 --- a/packages/mistralai_gcp/pyproject.toml +++ b/packages/mistralai_gcp/pyproject.toml @@ -1,9 +1,21 @@ -[tool.poetry] +[project] name = "mistralai-gcp" version = "1.2.6" description = "Python Client SDK for the Mistral AI API in GCP." -authors = ["Mistral",] +authors = [{ name = "Mistral" },] readme = "README-PYPI.md" +requires-python = ">=3.9" +dependencies = [ + "eval-type-backport >=0.2.0", + "google-auth (>=2.31.0,<3.0.0)", + "httpx >=0.28.1", + "pydantic >=2.10.3", + "python-dateutil >=2.8.2", + "requests (>=2.32.3,<3.0.0)", + "typing-inspection >=0.4.0", +] + +[tool.poetry] packages = [ { include = "mistralai_gcp", from = "src" } ] @@ -15,19 +27,8 @@ include = ["py.typed", "src/mistralai_gcp/py.typed"] [virtualenvs] in-project = true -[tool.poetry.dependencies] -python = "^3.8" -eval-type-backport = "^0.2.0" -google-auth = "2.27.0" -httpx = "^0.28.1" -jsonpath-python = "^1.0.6" -pydantic = "~2.10.3" -python-dateutil = "^2.8.2" -requests = "^2.32.3" -typing-inspect = "^0.9.0" - [tool.poetry.group.dev.dependencies] -mypy = "==1.13.0" +mypy = "==1.14.1" pylint = "==3.2.3" pytest = "^8.2.2" pytest-asyncio = "^0.23.7" @@ -38,6 +39,7 @@ requires = ["poetry-core"] build-backend = "poetry.core.masonry.api" [tool.pytest.ini_options] +asyncio_default_fixture_loop_scope = "function" pythonpath = ["src"] [tool.mypy] diff --git a/packages/mistralai_gcp/scripts/prepare-readme.py b/packages/mistralai_gcp/scripts/prepare_readme.py similarity index 100% rename from packages/mistralai_gcp/scripts/prepare-readme.py rename to packages/mistralai_gcp/scripts/prepare_readme.py diff --git a/packages/mistralai_gcp/scripts/publish.sh b/packages/mistralai_gcp/scripts/publish.sh index ab45b1f9..f2f2cf2c 100755 --- a/packages/mistralai_gcp/scripts/publish.sh +++ b/packages/mistralai_gcp/scripts/publish.sh @@ -2,6 +2,6 @@ export POETRY_PYPI_TOKEN_PYPI=${PYPI_TOKEN} -poetry run python scripts/prepare-readme.py +poetry run python scripts/prepare_readme.py poetry publish --build --skip-existing diff --git a/packages/mistralai_gcp/src/mistralai_gcp/__init__.py b/packages/mistralai_gcp/src/mistralai_gcp/__init__.py index a1b7f626..dd02e42e 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/__init__.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/__init__.py @@ -1,9 +1,18 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -from ._version import __title__, __version__ +from ._version import ( + __title__, + __version__, + __openapi_doc_version__, + __gen_version__, + __user_agent__, +) from .sdk import * from .sdkconfiguration import * from .models import * VERSION: str = __version__ +OPENAPI_DOC_VERSION = __openapi_doc_version__ +SPEAKEASY_GENERATOR_VERSION = __gen_version__ +USER_AGENT = __user_agent__ diff --git a/packages/mistralai_gcp/src/mistralai_gcp/_hooks/types.py b/packages/mistralai_gcp/src/mistralai_gcp/_hooks/types.py index 417126fd..bb867b5b 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/_hooks/types.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/_hooks/types.py @@ -7,16 +7,19 @@ class HookContext: + base_url: str operation_id: str oauth2_scopes: Optional[List[str]] = None security_source: Optional[Union[Any, Callable[[], Any]]] = None def __init__( self, + base_url: str, operation_id: str, oauth2_scopes: Optional[List[str]], security_source: Optional[Union[Any, Callable[[], Any]]], ): + self.base_url = base_url self.operation_id = operation_id self.oauth2_scopes = oauth2_scopes self.security_source = security_source @@ -25,21 +28,30 @@ def __init__( class BeforeRequestContext(HookContext): def __init__(self, hook_ctx: HookContext): super().__init__( - hook_ctx.operation_id, hook_ctx.oauth2_scopes, hook_ctx.security_source + hook_ctx.base_url, + hook_ctx.operation_id, + hook_ctx.oauth2_scopes, + hook_ctx.security_source, ) class AfterSuccessContext(HookContext): def __init__(self, hook_ctx: HookContext): super().__init__( - hook_ctx.operation_id, hook_ctx.oauth2_scopes, hook_ctx.security_source + hook_ctx.base_url, + hook_ctx.operation_id, + hook_ctx.oauth2_scopes, + hook_ctx.security_source, ) class AfterErrorContext(HookContext): def __init__(self, hook_ctx: HookContext): super().__init__( - hook_ctx.operation_id, hook_ctx.oauth2_scopes, hook_ctx.security_source + hook_ctx.base_url, + hook_ctx.operation_id, + hook_ctx.oauth2_scopes, + hook_ctx.security_source, ) diff --git a/packages/mistralai_gcp/src/mistralai_gcp/_version.py b/packages/mistralai_gcp/src/mistralai_gcp/_version.py index 30081f34..32be746c 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/_version.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/_version.py @@ -4,6 +4,9 @@ __title__: str = "mistralai-gcp" __version__: str = "1.2.6" +__openapi_doc_version__: str = "0.0.2" +__gen_version__: str = "2.548.6" +__user_agent__: str = "speakeasy-sdk/python 1.2.6 2.548.6 0.0.2 mistralai-gcp" try: if __package__ is not None: diff --git a/packages/mistralai_gcp/src/mistralai_gcp/basesdk.py b/packages/mistralai_gcp/src/mistralai_gcp/basesdk.py index 40620018..bb0aab96 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/basesdk.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/basesdk.py @@ -231,6 +231,10 @@ def do(): req.headers, get_body_content(req), ) + + if client is None: + raise ValueError("client is required") + http_res = client.send(req, stream=stream) except Exception as e: _, e = self.sdk_configuration.get_hooks().after_error( @@ -303,6 +307,10 @@ async def do(): req.headers, get_body_content(req), ) + + if client is None: + raise ValueError("client is required") + http_res = await client.send(req, stream=stream) except Exception as e: _, e = self.sdk_configuration.get_hooks().after_error( diff --git a/packages/mistralai_gcp/src/mistralai_gcp/chat.py b/packages/mistralai_gcp/src/mistralai_gcp/chat.py index 47e5b63a..f162d2f7 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/chat.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/chat.py @@ -3,7 +3,7 @@ from .basesdk import BaseSDK from mistralai_gcp import models, utils from mistralai_gcp._hooks import HookContext -from mistralai_gcp.types import Nullable, OptionalNullable, UNSET +from mistralai_gcp.types import OptionalNullable, UNSET from mistralai_gcp.utils import eventstreaming from typing import Any, List, Mapping, Optional, Union @@ -14,7 +14,7 @@ class Chat(BaseSDK): def stream( self, *, - model: Nullable[str], + model: str, messages: Union[List[models.Messages], List[models.MessagesTypedDict]], temperature: OptionalNullable[float] = UNSET, top_p: Optional[float] = None, @@ -37,6 +37,9 @@ def stream( presence_penalty: Optional[float] = None, frequency_penalty: Optional[float] = None, n: OptionalNullable[int] = UNSET, + prediction: Optional[ + Union[models.Prediction, models.PredictionTypedDict] + ] = None, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -60,6 +63,7 @@ def stream( :param presence_penalty: presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. :param frequency_penalty: frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. :param n: Number of completions to return for each request, input tokens are only billed once. + :param prediction: :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -72,6 +76,8 @@ def stream( if server_url is not None: base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) request = models.ChatCompletionStreamRequest( model=model, @@ -92,6 +98,9 @@ def stream( presence_penalty=presence_penalty, frequency_penalty=frequency_penalty, n=n, + prediction=utils.get_pydantic_model( + prediction, Optional[models.Prediction] + ), ) req = self._build_request( @@ -123,6 +132,7 @@ def stream( http_res = self.do_request( hook_ctx=HookContext( + base_url=base_url or "", operation_id="stream_chat", oauth2_scopes=[], security_source=self.sdk_configuration.security, @@ -133,7 +143,7 @@ def stream( retry_config=retry_config, ) - data: Any = None + response_data: Any = None if utils.match_response(http_res, "200", "text/event-stream"): return eventstreaming.EventStream( http_res, @@ -142,9 +152,16 @@ def stream( ) if utils.match_response(http_res, "422", "application/json"): http_res_text = utils.stream_to_text(http_res) - data = utils.unmarshal_json(http_res_text, models.HTTPValidationErrorData) - raise models.HTTPValidationError(data=data) - if utils.match_response(http_res, ["4XX", "5XX"], "*"): + response_data = utils.unmarshal_json( + http_res_text, models.HTTPValidationErrorData + ) + raise models.HTTPValidationError(data=response_data) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) raise models.SDKError( "API error occurred", http_res.status_code, http_res_text, http_res @@ -162,7 +179,7 @@ def stream( async def stream_async( self, *, - model: Nullable[str], + model: str, messages: Union[List[models.Messages], List[models.MessagesTypedDict]], temperature: OptionalNullable[float] = UNSET, top_p: Optional[float] = None, @@ -185,6 +202,9 @@ async def stream_async( presence_penalty: Optional[float] = None, frequency_penalty: Optional[float] = None, n: OptionalNullable[int] = UNSET, + prediction: Optional[ + Union[models.Prediction, models.PredictionTypedDict] + ] = None, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -208,6 +228,7 @@ async def stream_async( :param presence_penalty: presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. :param frequency_penalty: frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. :param n: Number of completions to return for each request, input tokens are only billed once. + :param prediction: :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -220,6 +241,8 @@ async def stream_async( if server_url is not None: base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) request = models.ChatCompletionStreamRequest( model=model, @@ -240,6 +263,9 @@ async def stream_async( presence_penalty=presence_penalty, frequency_penalty=frequency_penalty, n=n, + prediction=utils.get_pydantic_model( + prediction, Optional[models.Prediction] + ), ) req = self._build_request_async( @@ -271,6 +297,7 @@ async def stream_async( http_res = await self.do_request_async( hook_ctx=HookContext( + base_url=base_url or "", operation_id="stream_chat", oauth2_scopes=[], security_source=self.sdk_configuration.security, @@ -281,7 +308,7 @@ async def stream_async( retry_config=retry_config, ) - data: Any = None + response_data: Any = None if utils.match_response(http_res, "200", "text/event-stream"): return eventstreaming.EventStreamAsync( http_res, @@ -290,9 +317,16 @@ async def stream_async( ) if utils.match_response(http_res, "422", "application/json"): http_res_text = await utils.stream_to_text_async(http_res) - data = utils.unmarshal_json(http_res_text, models.HTTPValidationErrorData) - raise models.HTTPValidationError(data=data) - if utils.match_response(http_res, ["4XX", "5XX"], "*"): + response_data = utils.unmarshal_json( + http_res_text, models.HTTPValidationErrorData + ) + raise models.HTTPValidationError(data=response_data) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( "API error occurred", http_res.status_code, http_res_text, http_res @@ -310,7 +344,7 @@ async def stream_async( def complete( self, *, - model: Nullable[str], + model: str, messages: Union[ List[models.ChatCompletionRequestMessages], List[models.ChatCompletionRequestMessagesTypedDict], @@ -341,6 +375,9 @@ def complete( presence_penalty: Optional[float] = None, frequency_penalty: Optional[float] = None, n: OptionalNullable[int] = UNSET, + prediction: Optional[ + Union[models.Prediction, models.PredictionTypedDict] + ] = None, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -362,6 +399,7 @@ def complete( :param presence_penalty: presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. :param frequency_penalty: frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. :param n: Number of completions to return for each request, input tokens are only billed once. + :param prediction: :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -374,6 +412,8 @@ def complete( if server_url is not None: base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) request = models.ChatCompletionRequest( model=model, @@ -396,6 +436,9 @@ def complete( presence_penalty=presence_penalty, frequency_penalty=frequency_penalty, n=n, + prediction=utils.get_pydantic_model( + prediction, Optional[models.Prediction] + ), ) req = self._build_request( @@ -427,6 +470,7 @@ def complete( http_res = self.do_request( hook_ctx=HookContext( + base_url=base_url or "", operation_id="chat_completion_v1_chat_completions_post", oauth2_scopes=[], security_source=self.sdk_configuration.security, @@ -436,15 +480,22 @@ def complete( retry_config=retry_config, ) - data: Any = None + response_data: Any = None if utils.match_response(http_res, "200", "application/json"): return utils.unmarshal_json( http_res.text, Optional[models.ChatCompletionResponse] ) if utils.match_response(http_res, "422", "application/json"): - data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) - raise models.HTTPValidationError(data=data) - if utils.match_response(http_res, ["4XX", "5XX"], "*"): + response_data = utils.unmarshal_json( + http_res.text, models.HTTPValidationErrorData + ) + raise models.HTTPValidationError(data=response_data) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) raise models.SDKError( "API error occurred", http_res.status_code, http_res_text, http_res @@ -462,7 +513,7 @@ def complete( async def complete_async( self, *, - model: Nullable[str], + model: str, messages: Union[ List[models.ChatCompletionRequestMessages], List[models.ChatCompletionRequestMessagesTypedDict], @@ -493,6 +544,9 @@ async def complete_async( presence_penalty: Optional[float] = None, frequency_penalty: Optional[float] = None, n: OptionalNullable[int] = UNSET, + prediction: Optional[ + Union[models.Prediction, models.PredictionTypedDict] + ] = None, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -514,6 +568,7 @@ async def complete_async( :param presence_penalty: presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. :param frequency_penalty: frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. :param n: Number of completions to return for each request, input tokens are only billed once. + :param prediction: :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -526,6 +581,8 @@ async def complete_async( if server_url is not None: base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) request = models.ChatCompletionRequest( model=model, @@ -548,6 +605,9 @@ async def complete_async( presence_penalty=presence_penalty, frequency_penalty=frequency_penalty, n=n, + prediction=utils.get_pydantic_model( + prediction, Optional[models.Prediction] + ), ) req = self._build_request_async( @@ -579,6 +639,7 @@ async def complete_async( http_res = await self.do_request_async( hook_ctx=HookContext( + base_url=base_url or "", operation_id="chat_completion_v1_chat_completions_post", oauth2_scopes=[], security_source=self.sdk_configuration.security, @@ -588,15 +649,22 @@ async def complete_async( retry_config=retry_config, ) - data: Any = None + response_data: Any = None if utils.match_response(http_res, "200", "application/json"): return utils.unmarshal_json( http_res.text, Optional[models.ChatCompletionResponse] ) if utils.match_response(http_res, "422", "application/json"): - data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) - raise models.HTTPValidationError(data=data) - if utils.match_response(http_res, ["4XX", "5XX"], "*"): + response_data = utils.unmarshal_json( + http_res.text, models.HTTPValidationErrorData + ) + raise models.HTTPValidationError(data=response_data) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( "API error occurred", http_res.status_code, http_res_text, http_res diff --git a/packages/mistralai_gcp/src/mistralai_gcp/fim.py b/packages/mistralai_gcp/src/mistralai_gcp/fim.py index 89146a4a..84821c6a 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/fim.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/fim.py @@ -3,7 +3,7 @@ from .basesdk import BaseSDK from mistralai_gcp import models, utils from mistralai_gcp._hooks import HookContext -from mistralai_gcp.types import Nullable, OptionalNullable, UNSET +from mistralai_gcp.types import OptionalNullable, UNSET from mistralai_gcp.utils import eventstreaming from typing import Any, Mapping, Optional, Union @@ -14,7 +14,7 @@ class Fim(BaseSDK): def stream( self, *, - model: Nullable[str], + model: str, prompt: str, temperature: OptionalNullable[float] = UNSET, top_p: Optional[float] = 1, @@ -60,6 +60,8 @@ def stream( if server_url is not None: base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) request = models.FIMCompletionStreamRequest( model=model, @@ -103,6 +105,7 @@ def stream( http_res = self.do_request( hook_ctx=HookContext( + base_url=base_url or "", operation_id="stream_fim", oauth2_scopes=[], security_source=self.sdk_configuration.security, @@ -113,7 +116,7 @@ def stream( retry_config=retry_config, ) - data: Any = None + response_data: Any = None if utils.match_response(http_res, "200", "text/event-stream"): return eventstreaming.EventStream( http_res, @@ -122,9 +125,16 @@ def stream( ) if utils.match_response(http_res, "422", "application/json"): http_res_text = utils.stream_to_text(http_res) - data = utils.unmarshal_json(http_res_text, models.HTTPValidationErrorData) - raise models.HTTPValidationError(data=data) - if utils.match_response(http_res, ["4XX", "5XX"], "*"): + response_data = utils.unmarshal_json( + http_res_text, models.HTTPValidationErrorData + ) + raise models.HTTPValidationError(data=response_data) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) raise models.SDKError( "API error occurred", http_res.status_code, http_res_text, http_res @@ -142,7 +152,7 @@ def stream( async def stream_async( self, *, - model: Nullable[str], + model: str, prompt: str, temperature: OptionalNullable[float] = UNSET, top_p: Optional[float] = 1, @@ -188,6 +198,8 @@ async def stream_async( if server_url is not None: base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) request = models.FIMCompletionStreamRequest( model=model, @@ -231,6 +243,7 @@ async def stream_async( http_res = await self.do_request_async( hook_ctx=HookContext( + base_url=base_url or "", operation_id="stream_fim", oauth2_scopes=[], security_source=self.sdk_configuration.security, @@ -241,7 +254,7 @@ async def stream_async( retry_config=retry_config, ) - data: Any = None + response_data: Any = None if utils.match_response(http_res, "200", "text/event-stream"): return eventstreaming.EventStreamAsync( http_res, @@ -250,9 +263,16 @@ async def stream_async( ) if utils.match_response(http_res, "422", "application/json"): http_res_text = await utils.stream_to_text_async(http_res) - data = utils.unmarshal_json(http_res_text, models.HTTPValidationErrorData) - raise models.HTTPValidationError(data=data) - if utils.match_response(http_res, ["4XX", "5XX"], "*"): + response_data = utils.unmarshal_json( + http_res_text, models.HTTPValidationErrorData + ) + raise models.HTTPValidationError(data=response_data) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( "API error occurred", http_res.status_code, http_res_text, http_res @@ -270,7 +290,7 @@ async def stream_async( def complete( self, *, - model: Nullable[str], + model: str, prompt: str, temperature: OptionalNullable[float] = UNSET, top_p: Optional[float] = 1, @@ -316,6 +336,8 @@ def complete( if server_url is not None: base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) request = models.FIMCompletionRequest( model=model, @@ -359,6 +381,7 @@ def complete( http_res = self.do_request( hook_ctx=HookContext( + base_url=base_url or "", operation_id="fim_completion_v1_fim_completions_post", oauth2_scopes=[], security_source=self.sdk_configuration.security, @@ -368,15 +391,22 @@ def complete( retry_config=retry_config, ) - data: Any = None + response_data: Any = None if utils.match_response(http_res, "200", "application/json"): return utils.unmarshal_json( http_res.text, Optional[models.FIMCompletionResponse] ) if utils.match_response(http_res, "422", "application/json"): - data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) - raise models.HTTPValidationError(data=data) - if utils.match_response(http_res, ["4XX", "5XX"], "*"): + response_data = utils.unmarshal_json( + http_res.text, models.HTTPValidationErrorData + ) + raise models.HTTPValidationError(data=response_data) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) raise models.SDKError( "API error occurred", http_res.status_code, http_res_text, http_res @@ -394,7 +424,7 @@ def complete( async def complete_async( self, *, - model: Nullable[str], + model: str, prompt: str, temperature: OptionalNullable[float] = UNSET, top_p: Optional[float] = 1, @@ -440,6 +470,8 @@ async def complete_async( if server_url is not None: base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) request = models.FIMCompletionRequest( model=model, @@ -483,6 +515,7 @@ async def complete_async( http_res = await self.do_request_async( hook_ctx=HookContext( + base_url=base_url or "", operation_id="fim_completion_v1_fim_completions_post", oauth2_scopes=[], security_source=self.sdk_configuration.security, @@ -492,15 +525,22 @@ async def complete_async( retry_config=retry_config, ) - data: Any = None + response_data: Any = None if utils.match_response(http_res, "200", "application/json"): return utils.unmarshal_json( http_res.text, Optional[models.FIMCompletionResponse] ) if utils.match_response(http_res, "422", "application/json"): - data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) - raise models.HTTPValidationError(data=data) - if utils.match_response(http_res, ["4XX", "5XX"], "*"): + response_data = utils.unmarshal_json( + http_res.text, models.HTTPValidationErrorData + ) + raise models.HTTPValidationError(data=response_data) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( "API error occurred", http_res.status_code, http_res_text, http_res diff --git a/packages/mistralai_gcp/src/mistralai_gcp/httpclient.py b/packages/mistralai_gcp/src/mistralai_gcp/httpclient.py index 167cea4e..1e426352 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/httpclient.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/httpclient.py @@ -1,6 +1,8 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" # pyright: reportReturnType = false +import asyncio +from concurrent.futures import ThreadPoolExecutor from typing_extensions import Protocol, runtime_checkable import httpx from typing import Any, Optional, Union @@ -82,3 +84,53 @@ def build_request( async def aclose(self) -> None: pass + + +class ClientOwner(Protocol): + client: Union[HttpClient, None] + async_client: Union[AsyncHttpClient, None] + + +def close_clients( + owner: ClientOwner, + sync_client: Union[HttpClient, None], + sync_client_supplied: bool, + async_client: Union[AsyncHttpClient, None], + async_client_supplied: bool, +) -> None: + """ + A finalizer function that is meant to be used with weakref.finalize to close + httpx clients used by an SDK so that underlying resources can be garbage + collected. + """ + + # Unset the client/async_client properties so there are no more references + # to them from the owning SDK instance and they can be reaped. + owner.client = None + owner.async_client = None + + if sync_client is not None and not sync_client_supplied: + try: + sync_client.close() + except Exception: + pass + + if async_client is not None and not async_client_supplied: + is_async = False + try: + asyncio.get_running_loop() + is_async = True + except RuntimeError: + pass + + try: + # If this function is called in an async loop then start another + # loop in a separate thread to close the async http client. + if is_async: + with ThreadPoolExecutor(max_workers=1) as executor: + future = executor.submit(asyncio.run, async_client.aclose()) + future.result() + else: + asyncio.run(async_client.aclose()) + except Exception: + pass diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/__init__.py b/packages/mistralai_gcp/src/mistralai_gcp/models/__init__.py index f3c6ce7e..154777da 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/__init__.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/__init__.py @@ -67,6 +67,8 @@ ) from .functionname import FunctionName, FunctionNameTypedDict from .httpvalidationerror import HTTPValidationError, HTTPValidationErrorData +from .jsonschema import JSONSchema, JSONSchemaTypedDict +from .prediction import Prediction, PredictionTypedDict from .referencechunk import ReferenceChunk, ReferenceChunkType, ReferenceChunkTypedDict from .responseformat import ResponseFormat, ResponseFormatTypedDict from .responseformats import ResponseFormats @@ -107,6 +109,7 @@ ValidationErrorTypedDict, ) + __all__ = [ "Arguments", "ArgumentsTypedDict", @@ -163,10 +166,14 @@ "FunctionTypedDict", "HTTPValidationError", "HTTPValidationErrorData", + "JSONSchema", + "JSONSchemaTypedDict", "Loc", "LocTypedDict", "Messages", "MessagesTypedDict", + "Prediction", + "PredictionTypedDict", "ReferenceChunk", "ReferenceChunkType", "ReferenceChunkTypedDict", diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/assistantmessage.py b/packages/mistralai_gcp/src/mistralai_gcp/models/assistantmessage.py index 6a9b58f2..9147f566 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/assistantmessage.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/assistantmessage.py @@ -32,6 +32,7 @@ class AssistantMessageTypedDict(TypedDict): content: NotRequired[Nullable[AssistantMessageContentTypedDict]] tool_calls: NotRequired[Nullable[List[ToolCallTypedDict]]] prefix: NotRequired[bool] + r"""Set this to `true` when adding an assistant message as prefix to condition the model response. The role of the prefix message is to force the model to start its answer by the content of the message.""" role: NotRequired[AssistantMessageRole] @@ -41,6 +42,7 @@ class AssistantMessage(BaseModel): tool_calls: OptionalNullable[List[ToolCall]] = UNSET prefix: Optional[bool] = False + r"""Set this to `true` when adding an assistant message as prefix to condition the model response. The role of the prefix message is to force the model to start its answer by the content of the message.""" role: Optional[AssistantMessageRole] = "assistant" diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionrequest.py b/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionrequest.py index ab97e52a..60a37f2f 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionrequest.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionrequest.py @@ -2,6 +2,7 @@ from __future__ import annotations from .assistantmessage import AssistantMessage, AssistantMessageTypedDict +from .prediction import Prediction, PredictionTypedDict from .responseformat import ResponseFormat, ResponseFormatTypedDict from .systemmessage import SystemMessage, SystemMessageTypedDict from .tool import Tool, ToolTypedDict @@ -68,7 +69,7 @@ class ChatCompletionRequestTypedDict(TypedDict): - model: Nullable[str] + model: str r"""ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions.""" messages: List[ChatCompletionRequestMessagesTypedDict] r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content.""" @@ -93,10 +94,11 @@ class ChatCompletionRequestTypedDict(TypedDict): r"""frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.""" n: NotRequired[Nullable[int]] r"""Number of completions to return for each request, input tokens are only billed once.""" + prediction: NotRequired[PredictionTypedDict] class ChatCompletionRequest(BaseModel): - model: Nullable[str] + model: str r"""ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions.""" messages: List[ChatCompletionRequestMessages] @@ -135,6 +137,8 @@ class ChatCompletionRequest(BaseModel): n: OptionalNullable[int] = UNSET r"""Number of completions to return for each request, input tokens are only billed once.""" + prediction: Optional[Prediction] = None + @model_serializer(mode="wrap") def serialize_model(self, handler): optional_fields = [ @@ -150,15 +154,9 @@ def serialize_model(self, handler): "presence_penalty", "frequency_penalty", "n", + "prediction", ] - nullable_fields = [ - "model", - "temperature", - "max_tokens", - "random_seed", - "tools", - "n", - ] + nullable_fields = ["temperature", "max_tokens", "random_seed", "tools", "n"] null_default_fields = [] serialized = handler(self) diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionstreamrequest.py b/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionstreamrequest.py index e6c5429b..f2041426 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionstreamrequest.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionstreamrequest.py @@ -2,6 +2,7 @@ from __future__ import annotations from .assistantmessage import AssistantMessage, AssistantMessageTypedDict +from .prediction import Prediction, PredictionTypedDict from .responseformat import ResponseFormat, ResponseFormatTypedDict from .systemmessage import SystemMessage, SystemMessageTypedDict from .tool import Tool, ToolTypedDict @@ -64,7 +65,7 @@ class ChatCompletionStreamRequestTypedDict(TypedDict): - model: Nullable[str] + model: str r"""ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions.""" messages: List[MessagesTypedDict] r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content.""" @@ -88,10 +89,11 @@ class ChatCompletionStreamRequestTypedDict(TypedDict): r"""frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.""" n: NotRequired[Nullable[int]] r"""Number of completions to return for each request, input tokens are only billed once.""" + prediction: NotRequired[PredictionTypedDict] class ChatCompletionStreamRequest(BaseModel): - model: Nullable[str] + model: str r"""ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions.""" messages: List[Messages] @@ -129,6 +131,8 @@ class ChatCompletionStreamRequest(BaseModel): n: OptionalNullable[int] = UNSET r"""Number of completions to return for each request, input tokens are only billed once.""" + prediction: Optional[Prediction] = None + @model_serializer(mode="wrap") def serialize_model(self, handler): optional_fields = [ @@ -144,15 +148,9 @@ def serialize_model(self, handler): "presence_penalty", "frequency_penalty", "n", + "prediction", ] - nullable_fields = [ - "model", - "temperature", - "max_tokens", - "random_seed", - "tools", - "n", - ] + nullable_fields = ["temperature", "max_tokens", "random_seed", "tools", "n"] null_default_fields = [] serialized = handler(self) diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/fimcompletionrequest.py b/packages/mistralai_gcp/src/mistralai_gcp/models/fimcompletionrequest.py index 81c87b7e..6dfb7373 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/fimcompletionrequest.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/fimcompletionrequest.py @@ -26,7 +26,7 @@ class FIMCompletionRequestTypedDict(TypedDict): - model: Nullable[str] + model: str r"""ID of the model to use. Only compatible for now with: - `codestral-2405` - `codestral-latest` @@ -52,7 +52,7 @@ class FIMCompletionRequestTypedDict(TypedDict): class FIMCompletionRequest(BaseModel): - model: Nullable[str] + model: str r"""ID of the model to use. Only compatible for now with: - `codestral-2405` - `codestral-latest` @@ -98,7 +98,6 @@ def serialize_model(self, handler): "min_tokens", ] nullable_fields = [ - "model", "temperature", "max_tokens", "random_seed", diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/fimcompletionstreamrequest.py b/packages/mistralai_gcp/src/mistralai_gcp/models/fimcompletionstreamrequest.py index 356758d3..406749bb 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/fimcompletionstreamrequest.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/fimcompletionstreamrequest.py @@ -26,7 +26,7 @@ class FIMCompletionStreamRequestTypedDict(TypedDict): - model: Nullable[str] + model: str r"""ID of the model to use. Only compatible for now with: - `codestral-2405` - `codestral-latest` @@ -51,7 +51,7 @@ class FIMCompletionStreamRequestTypedDict(TypedDict): class FIMCompletionStreamRequest(BaseModel): - model: Nullable[str] + model: str r"""ID of the model to use. Only compatible for now with: - `codestral-2405` - `codestral-latest` @@ -96,7 +96,6 @@ def serialize_model(self, handler): "min_tokens", ] nullable_fields = [ - "model", "temperature", "max_tokens", "random_seed", diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/function.py b/packages/mistralai_gcp/src/mistralai_gcp/models/function.py index c3168eec..3d61e624 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/function.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/function.py @@ -10,6 +10,7 @@ class FunctionTypedDict(TypedDict): name: str parameters: Dict[str, Any] description: NotRequired[str] + strict: NotRequired[bool] class Function(BaseModel): @@ -18,3 +19,5 @@ class Function(BaseModel): parameters: Dict[str, Any] description: Optional[str] = "" + + strict: Optional[bool] = False diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/jsonschema.py b/packages/mistralai_gcp/src/mistralai_gcp/models/jsonschema.py new file mode 100644 index 00000000..2c6bd478 --- /dev/null +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/jsonschema.py @@ -0,0 +1,61 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai_gcp.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +import pydantic +from pydantic import model_serializer +from typing import Any, Dict, Optional +from typing_extensions import Annotated, NotRequired, TypedDict + + +class JSONSchemaTypedDict(TypedDict): + name: str + schema_definition: Dict[str, Any] + description: NotRequired[Nullable[str]] + strict: NotRequired[bool] + + +class JSONSchema(BaseModel): + name: str + + schema_definition: Annotated[Dict[str, Any], pydantic.Field(alias="schema")] + + description: OptionalNullable[str] = UNSET + + strict: Optional[bool] = False + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["description", "strict"] + nullable_fields = ["description"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in self.model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/prediction.py b/packages/mistralai_gcp/src/mistralai_gcp/models/prediction.py new file mode 100644 index 00000000..742aac0b --- /dev/null +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/prediction.py @@ -0,0 +1,25 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai_gcp.types import BaseModel +from mistralai_gcp.utils import validate_const +import pydantic +from pydantic.functional_validators import AfterValidator +from typing import Literal, Optional +from typing_extensions import Annotated, NotRequired, TypedDict + + +class PredictionTypedDict(TypedDict): + type: Literal["content"] + content: NotRequired[str] + + +class Prediction(BaseModel): + TYPE: Annotated[ + Annotated[ + Optional[Literal["content"]], AfterValidator(validate_const("content")) + ], + pydantic.Field(alias="type"), + ] = "content" + + content: Optional[str] = "" diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/responseformat.py b/packages/mistralai_gcp/src/mistralai_gcp/models/responseformat.py index fde89862..5a24f644 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/responseformat.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/responseformat.py @@ -1,8 +1,16 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from __future__ import annotations +from .jsonschema import JSONSchema, JSONSchemaTypedDict from .responseformats import ResponseFormats -from mistralai_gcp.types import BaseModel +from mistralai_gcp.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer from typing import Optional from typing_extensions import NotRequired, TypedDict @@ -10,8 +18,41 @@ class ResponseFormatTypedDict(TypedDict): type: NotRequired[ResponseFormats] r"""An object specifying the format that the model must output. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message.""" + json_schema: NotRequired[Nullable[JSONSchemaTypedDict]] class ResponseFormat(BaseModel): type: Optional[ResponseFormats] = None r"""An object specifying the format that the model must output. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message.""" + + json_schema: OptionalNullable[JSONSchema] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["type", "json_schema"] + nullable_fields = ["json_schema"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in self.model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/responseformats.py b/packages/mistralai_gcp/src/mistralai_gcp/models/responseformats.py index 2c06b812..08c39951 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/responseformats.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/responseformats.py @@ -4,5 +4,5 @@ from typing import Literal -ResponseFormats = Literal["text", "json_object"] +ResponseFormats = Literal["text", "json_object", "json_schema"] r"""An object specifying the format that the model must output. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message.""" diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/toolcall.py b/packages/mistralai_gcp/src/mistralai_gcp/models/toolcall.py index 5b4b217a..ecbac8d6 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/toolcall.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/toolcall.py @@ -14,6 +14,7 @@ class ToolCallTypedDict(TypedDict): function: FunctionCallTypedDict id: NotRequired[str] type: NotRequired[ToolTypes] + index: NotRequired[int] class ToolCall(BaseModel): @@ -24,3 +25,5 @@ class ToolCall(BaseModel): type: Annotated[Optional[ToolTypes], PlainValidator(validate_open_enum(False))] = ( None ) + + index: Optional[int] = 0 diff --git a/packages/mistralai_gcp/src/mistralai_gcp/sdk.py b/packages/mistralai_gcp/src/mistralai_gcp/sdk.py index abfea8db..dd93cc7f 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/sdk.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/sdk.py @@ -1,23 +1,25 @@ -"""Code generated by Speakeasy (https://speakeasyapi.dev). DO NOT EDIT.""" +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" import json -from typing import Optional, Tuple, Union +import weakref +from typing import Any, Optional, cast import google.auth import google.auth.credentials import google.auth.transport import google.auth.transport.requests import httpx + from mistralai_gcp import models from mistralai_gcp._hooks import BeforeRequestHook, SDKHooks from mistralai_gcp.chat import Chat from mistralai_gcp.fim import Fim -from mistralai_gcp.types import Nullable +from mistralai_gcp.types import UNSET, OptionalNullable from .basesdk import BaseSDK -from .httpclient import AsyncHttpClient, HttpClient +from .httpclient import AsyncHttpClient, ClientOwner, HttpClient, close_clients from .sdkconfiguration import SDKConfiguration -from .utils.logger import Logger, NoOpLogger +from .utils.logger import Logger, get_default_logger from .utils.retries import RetryConfig LEGACY_MODEL_ID_FORMAT = { @@ -26,20 +28,21 @@ "mistral-nemo-2407": "mistral-nemo@2407", } -def get_model_info(model: str) -> Tuple[str, str]: + +def get_model_info(model: str) -> tuple[str, str]: # if the model requiers the legacy fomat, use it, else do nothing. if model in LEGACY_MODEL_ID_FORMAT: return "-".join(model.split("-")[:-1]), LEGACY_MODEL_ID_FORMAT[model] return model, model - class MistralGoogleCloud(BaseSDK): r"""Mistral AI API: Our Chat Completion and Embeddings APIs specification. Create your account on [La Plateforme](https://console.mistral.ai) to get access and read the [docs](https://docs.mistral.ai) to learn how to use it.""" chat: Chat + r"""Chat Completion API.""" fim: Fim - r"""Chat Completion API""" + r"""Fill-in-the-middle API.""" def __init__( self, @@ -48,16 +51,20 @@ def __init__( access_token: Optional[str] = None, client: Optional[HttpClient] = None, async_client: Optional[AsyncHttpClient] = None, - retry_config: Optional[Nullable[RetryConfig]] = None, + retry_config: OptionalNullable[RetryConfig] = UNSET, + timeout_ms: Optional[int] = None, debug_logger: Optional[Logger] = None, ) -> None: r"""Instantiates the SDK configuring it with the provided parameters. - :param region: The Google Cloud region to use for all methods - :param project_id: The project ID to use for all methods + :param api_key: The api_key required for authentication + :param server: The server by name to use for all methods + :param server_url: The server URL to use for all methods + :param url_params: Parameters to optionally template the server URL with :param client: The HTTP client to use for all synchronous methods :param async_client: The Async HTTP client to use for all asynchronous methods :param retry_config: The retry configuration to use for all supported methods + :param timeout_ms: Optional request timeout applied to each operation in milliseconds """ if not access_token: @@ -72,36 +79,42 @@ def __init__( ) project_id = project_id or loaded_project_id + if project_id is None: raise models.SDKError("project_id must be provided") def auth_token() -> str: if access_token: return access_token + credentials.refresh(google.auth.transport.requests.Request()) token = credentials.token if not token: raise models.SDKError("Failed to get token from credentials") return token + client_supplied = True if client is None: client = httpx.Client() + client_supplied = False assert issubclass( type(client), HttpClient ), "The provided client must implement the HttpClient protocol." + async_client_supplied = True if async_client is None: async_client = httpx.AsyncClient() + async_client_supplied = False if debug_logger is None: - debug_logger = NoOpLogger() + debug_logger = get_default_logger() assert issubclass( type(async_client), AsyncHttpClient ), "The provided async_client must implement the AsyncHttpClient protocol." - security = None + security: Any = None if callable(auth_token): security = lambda: models.Security( # pylint: disable=unnecessary-lambda-assignment api_key=auth_token() @@ -113,23 +126,24 @@ def auth_token() -> str: self, SDKConfiguration( client=client, + client_supplied=client_supplied, async_client=async_client, + async_client_supplied=async_client_supplied, security=security, server_url=f"https://{region}-aiplatform.googleapis.com", server=None, retry_config=retry_config, + timeout_ms=timeout_ms, debug_logger=debug_logger, ), ) hooks = SDKHooks() - hook = GoogleCloudBeforeRequestHook(region, project_id) hooks.register_before_request_hook(hook) - current_server_url, *_ = self.sdk_configuration.get_server_details() server_url, self.sdk_configuration.client = hooks.sdk_init( - current_server_url, self.sdk_configuration.client + current_server_url, client ) if current_server_url != server_url: self.sdk_configuration.server_url = server_url @@ -137,22 +151,53 @@ def auth_token() -> str: # pylint: disable=protected-access self.sdk_configuration.__dict__["_hooks"] = hooks + weakref.finalize( + self, + close_clients, + cast(ClientOwner, self.sdk_configuration), + self.sdk_configuration.client, + self.sdk_configuration.client_supplied, + self.sdk_configuration.async_client, + self.sdk_configuration.async_client_supplied, + ) + self._init_sdks() def _init_sdks(self): self.chat = Chat(self.sdk_configuration) self.fim = Fim(self.sdk_configuration) + def __enter__(self): + return self -class GoogleCloudBeforeRequestHook(BeforeRequestHook): + async def __aenter__(self): + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + if ( + self.sdk_configuration.client is not None + and not self.sdk_configuration.client_supplied + ): + self.sdk_configuration.client.close() + self.sdk_configuration.client = None + async def __aexit__(self, exc_type, exc_val, exc_tb): + if ( + self.sdk_configuration.async_client is not None + and not self.sdk_configuration.async_client_supplied + ): + await self.sdk_configuration.async_client.aclose() + self.sdk_configuration.async_client = None + + +class GoogleCloudBeforeRequestHook(BeforeRequestHook): def __init__(self, region: str, project_id: str): self.region = region self.project_id = project_id def before_request( self, hook_ctx, request: httpx.Request - ) -> Union[httpx.Request, Exception]: + ) -> httpx.Request | Exception: # The goal of this function is to template in the region, project and model into the URL path # We do this here so that the API remains more user-friendly model_id = None @@ -167,7 +212,6 @@ def before_request( if model_id == "": raise models.SDKError("model must be provided") - stream = "streamRawPredict" in request.url.path specifier = "streamRawPredict" if stream else "rawPredict" url = f"/v1/projects/{self.project_id}/locations/{self.region}/publishers/mistralai/models/{model_id}:{specifier}" diff --git a/packages/mistralai_gcp/src/mistralai_gcp/sdkconfiguration.py b/packages/mistralai_gcp/src/mistralai_gcp/sdkconfiguration.py index 3c149cc6..c373d27d 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/sdkconfiguration.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/sdkconfiguration.py @@ -1,6 +1,12 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from ._hooks import SDKHooks +from ._version import ( + __gen_version__, + __openapi_doc_version__, + __user_agent__, + __version__, +) from .httpclient import AsyncHttpClient, HttpClient from .utils import Logger, RetryConfig, remove_suffix from dataclasses import dataclass @@ -20,17 +26,19 @@ @dataclass class SDKConfiguration: - client: HttpClient - async_client: AsyncHttpClient + client: Union[HttpClient, None] + client_supplied: bool + async_client: Union[AsyncHttpClient, None] + async_client_supplied: bool debug_logger: Logger security: Optional[Union[models.Security, Callable[[], models.Security]]] = None server_url: Optional[str] = "" server: Optional[str] = "" language: str = "python" - openapi_doc_version: str = "0.0.2" - sdk_version: str = "1.2.6" - gen_version: str = "2.486.1" - user_agent: str = "speakeasy-sdk/python 1.2.6 2.486.1 0.0.2 mistralai-gcp" + openapi_doc_version: str = __openapi_doc_version__ + sdk_version: str = __version__ + gen_version: str = __gen_version__ + user_agent: str = __user_agent__ retry_config: OptionalNullable[RetryConfig] = Field(default_factory=lambda: UNSET) timeout_ms: Optional[int] = None diff --git a/packages/mistralai_gcp/src/mistralai_gcp/utils/__init__.py b/packages/mistralai_gcp/src/mistralai_gcp/utils/__init__.py index 26d51ae8..3cded8fe 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/utils/__init__.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/utils/__init__.py @@ -42,6 +42,7 @@ match_content_type, match_status_codes, match_response, + cast_partial, ) from .logger import Logger, get_body_content, get_default_logger @@ -94,4 +95,5 @@ "validate_float", "validate_int", "validate_open_enum", + "cast_partial", ] diff --git a/packages/mistralai_gcp/src/mistralai_gcp/utils/serializers.py b/packages/mistralai_gcp/src/mistralai_gcp/utils/serializers.py index c5eb3659..baa41fbd 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/utils/serializers.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/utils/serializers.py @@ -7,14 +7,15 @@ from typing_extensions import get_origin from pydantic import ConfigDict, create_model from pydantic_core import from_json -from typing_inspect import is_optional_type +from typing_inspection.typing_objects import is_union from ..types.basemodel import BaseModel, Nullable, OptionalNullable, Unset def serialize_decimal(as_str: bool): def serialize(d): - if is_optional_type(type(d)) and d is None: + # Optional[T] is a Union[T, None] + if is_union(type(d)) and type(None) in get_args(type(d)) and d is None: return None if isinstance(d, Unset): return d @@ -42,7 +43,8 @@ def validate_decimal(d): def serialize_float(as_str: bool): def serialize(f): - if is_optional_type(type(f)) and f is None: + # Optional[T] is a Union[T, None] + if is_union(type(f)) and type(None) in get_args(type(f)) and f is None: return None if isinstance(f, Unset): return f @@ -70,7 +72,8 @@ def validate_float(f): def serialize_int(as_str: bool): def serialize(i): - if is_optional_type(type(i)) and i is None: + # Optional[T] is a Union[T, None] + if is_union(type(i)) and type(None) in get_args(type(i)) and i is None: return None if isinstance(i, Unset): return i @@ -118,7 +121,8 @@ def validate(e): def validate_const(v): def validate(c): - if is_optional_type(type(c)) and c is None: + # Optional[T] is a Union[T, None] + if is_union(type(c)) and type(None) in get_args(type(c)) and c is None: return None if v != c: @@ -163,7 +167,7 @@ def marshal_json(val, typ): if len(d) == 0: return "" - return json.dumps(d[next(iter(d))], separators=(",", ":"), sort_keys=True) + return json.dumps(d[next(iter(d))], separators=(",", ":")) def is_nullable(field): diff --git a/packages/mistralai_gcp/src/mistralai_gcp/utils/values.py b/packages/mistralai_gcp/src/mistralai_gcp/utils/values.py index 2b4b6832..dae01a44 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/utils/values.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/utils/values.py @@ -3,8 +3,9 @@ from datetime import datetime from enum import Enum from email.message import Message +from functools import partial import os -from typing import Any, Callable, Dict, List, Optional, Tuple, TypeVar, Union +from typing import Any, Callable, Dict, List, Optional, Tuple, TypeVar, Union, cast from httpx import Response from pydantic import BaseModel @@ -51,6 +52,8 @@ def match_status_codes(status_codes: List[str], status_code: int) -> bool: T = TypeVar("T") +def cast_partial(typ): + return partial(cast, typ) def get_global_from_env( value: Optional[T], env_key: str, type_cast: Callable[[str], T] diff --git a/poetry.lock b/poetry.lock index 78003ff1..3d36b94f 100644 --- a/poetry.lock +++ b/poetry.lock @@ -12,9 +12,6 @@ files = [ {file = "annotated_types-0.7.0.tar.gz", hash = "sha256:aff07c09a53a08bc8cfccb9c85b05f1aa9a2a6f23728d790723543408344ce89"}, ] -[package.dependencies] -typing-extensions = {version = ">=4.0.0", markers = "python_version < \"3.9\""} - [[package]] name = "anyio" version = "4.5.2" @@ -315,14 +312,14 @@ trio = ["trio (>=0.22.0,<1.0)"] [[package]] name = "httpx" -version = "0.27.2" +version = "0.28.1" description = "The next generation HTTP client." optional = false python-versions = ">=3.8" groups = ["main"] files = [ - {file = "httpx-0.27.2-py3-none-any.whl", hash = "sha256:7bb2708e112d8fdd7829cd4243970f0c223274051cb35ee80c03301ee29a3df0"}, - {file = "httpx-0.27.2.tar.gz", hash = "sha256:f7c2be1d2f3c3c3160d441802406b206c2b76f5947b11115e6df10c6c65e66c2"}, + {file = "httpx-0.28.1-py3-none-any.whl", hash = "sha256:d909fcccc110f8c7faf814ca82a9a4d816bc5a6dbfea25d6591d6985b8ba59ad"}, + {file = "httpx-0.28.1.tar.gz", hash = "sha256:75e98c5f16b0f35b567856f597f06ff2270a374470a5c2392242528e3e3e42fc"}, ] [package.dependencies] @@ -330,7 +327,6 @@ anyio = "*" certifi = "*" httpcore = "==1.*" idna = "*" -sniffio = "*" [package.extras] brotli = ["brotli ; platform_python_implementation == \"CPython\"", "brotlicffi ; platform_python_implementation != \"CPython\""] @@ -381,18 +377,6 @@ files = [ [package.extras] colors = ["colorama (>=0.4.6)"] -[[package]] -name = "jsonpath-python" -version = "1.0.6" -description = "A more powerful JSONPath implementation in modern python" -optional = false -python-versions = ">=3.6" -groups = ["main"] -files = [ - {file = "jsonpath-python-1.0.6.tar.gz", hash = "sha256:dd5be4a72d8a2995c3f583cf82bf3cd1a9544cfdabf2d22595b67aff07349666"}, - {file = "jsonpath_python-1.0.6-py3-none-any.whl", hash = "sha256:1e3b78df579f5efc23565293612decee04214609208a2335884b3ee3f786b575"}, -] - [[package]] name = "mccabe" version = "0.7.0" @@ -407,50 +391,56 @@ files = [ [[package]] name = "mypy" -version = "1.13.0" +version = "1.14.1" description = "Optional static typing for Python" optional = false python-versions = ">=3.8" groups = ["dev"] files = [ - {file = "mypy-1.13.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:6607e0f1dd1fb7f0aca14d936d13fd19eba5e17e1cd2a14f808fa5f8f6d8f60a"}, - {file = "mypy-1.13.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:8a21be69bd26fa81b1f80a61ee7ab05b076c674d9b18fb56239d72e21d9f4c80"}, - {file = "mypy-1.13.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:7b2353a44d2179846a096e25691d54d59904559f4232519d420d64da6828a3a7"}, - {file = "mypy-1.13.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:0730d1c6a2739d4511dc4253f8274cdd140c55c32dfb0a4cf8b7a43f40abfa6f"}, - {file = "mypy-1.13.0-cp310-cp310-win_amd64.whl", hash = "sha256:c5fc54dbb712ff5e5a0fca797e6e0aa25726c7e72c6a5850cfd2adbc1eb0a372"}, - {file = "mypy-1.13.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:581665e6f3a8a9078f28d5502f4c334c0c8d802ef55ea0e7276a6e409bc0d82d"}, - {file = "mypy-1.13.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:3ddb5b9bf82e05cc9a627e84707b528e5c7caaa1c55c69e175abb15a761cec2d"}, - {file = "mypy-1.13.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:20c7ee0bc0d5a9595c46f38beb04201f2620065a93755704e141fcac9f59db2b"}, - {file = "mypy-1.13.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:3790ded76f0b34bc9c8ba4def8f919dd6a46db0f5a6610fb994fe8efdd447f73"}, - {file = "mypy-1.13.0-cp311-cp311-win_amd64.whl", hash = "sha256:51f869f4b6b538229c1d1bcc1dd7d119817206e2bc54e8e374b3dfa202defcca"}, - {file = "mypy-1.13.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:5c7051a3461ae84dfb5dd15eff5094640c61c5f22257c8b766794e6dd85e72d5"}, - {file = "mypy-1.13.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:39bb21c69a5d6342f4ce526e4584bc5c197fd20a60d14a8624d8743fffb9472e"}, - {file = "mypy-1.13.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:164f28cb9d6367439031f4c81e84d3ccaa1e19232d9d05d37cb0bd880d3f93c2"}, - {file = "mypy-1.13.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:a4c1bfcdbce96ff5d96fc9b08e3831acb30dc44ab02671eca5953eadad07d6d0"}, - {file = "mypy-1.13.0-cp312-cp312-win_amd64.whl", hash = "sha256:a0affb3a79a256b4183ba09811e3577c5163ed06685e4d4b46429a271ba174d2"}, - {file = "mypy-1.13.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:a7b44178c9760ce1a43f544e595d35ed61ac2c3de306599fa59b38a6048e1aa7"}, - {file = "mypy-1.13.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:5d5092efb8516d08440e36626f0153b5006d4088c1d663d88bf79625af3d1d62"}, - {file = "mypy-1.13.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:de2904956dac40ced10931ac967ae63c5089bd498542194b436eb097a9f77bc8"}, - {file = "mypy-1.13.0-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:7bfd8836970d33c2105562650656b6846149374dc8ed77d98424b40b09340ba7"}, - {file = "mypy-1.13.0-cp313-cp313-win_amd64.whl", hash = "sha256:9f73dba9ec77acb86457a8fc04b5239822df0c14a082564737833d2963677dbc"}, - {file = "mypy-1.13.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:100fac22ce82925f676a734af0db922ecfea991e1d7ec0ceb1e115ebe501301a"}, - {file = "mypy-1.13.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:7bcb0bb7f42a978bb323a7c88f1081d1b5dee77ca86f4100735a6f541299d8fb"}, - {file = "mypy-1.13.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:bde31fc887c213e223bbfc34328070996061b0833b0a4cfec53745ed61f3519b"}, - {file = "mypy-1.13.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:07de989f89786f62b937851295ed62e51774722e5444a27cecca993fc3f9cd74"}, - {file = "mypy-1.13.0-cp38-cp38-win_amd64.whl", hash = "sha256:4bde84334fbe19bad704b3f5b78c4abd35ff1026f8ba72b29de70dda0916beb6"}, - {file = "mypy-1.13.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:0246bcb1b5de7f08f2826451abd947bf656945209b140d16ed317f65a17dc7dc"}, - {file = "mypy-1.13.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:7f5b7deae912cf8b77e990b9280f170381fdfbddf61b4ef80927edd813163732"}, - {file = "mypy-1.13.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:7029881ec6ffb8bc233a4fa364736789582c738217b133f1b55967115288a2bc"}, - {file = "mypy-1.13.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:3e38b980e5681f28f033f3be86b099a247b13c491f14bb8b1e1e134d23bb599d"}, - {file = "mypy-1.13.0-cp39-cp39-win_amd64.whl", hash = "sha256:a6789be98a2017c912ae6ccb77ea553bbaf13d27605d2ca20a76dfbced631b24"}, - {file = "mypy-1.13.0-py3-none-any.whl", hash = "sha256:9c250883f9fd81d212e0952c92dbfcc96fc237f4b7c92f56ac81fd48460b3e5a"}, - {file = "mypy-1.13.0.tar.gz", hash = "sha256:0291a61b6fbf3e6673e3405cfcc0e7650bebc7939659fdca2702958038bd835e"}, + {file = "mypy-1.14.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:52686e37cf13d559f668aa398dd7ddf1f92c5d613e4f8cb262be2fb4fedb0fcb"}, + {file = "mypy-1.14.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:1fb545ca340537d4b45d3eecdb3def05e913299ca72c290326be19b3804b39c0"}, + {file = "mypy-1.14.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:90716d8b2d1f4cd503309788e51366f07c56635a3309b0f6a32547eaaa36a64d"}, + {file = "mypy-1.14.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:2ae753f5c9fef278bcf12e1a564351764f2a6da579d4a81347e1d5a15819997b"}, + {file = "mypy-1.14.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:e0fe0f5feaafcb04505bcf439e991c6d8f1bf8b15f12b05feeed96e9e7bf1427"}, + {file = "mypy-1.14.1-cp310-cp310-win_amd64.whl", hash = "sha256:7d54bd85b925e501c555a3227f3ec0cfc54ee8b6930bd6141ec872d1c572f81f"}, + {file = "mypy-1.14.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:f995e511de847791c3b11ed90084a7a0aafdc074ab88c5a9711622fe4751138c"}, + {file = "mypy-1.14.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:d64169ec3b8461311f8ce2fd2eb5d33e2d0f2c7b49116259c51d0d96edee48d1"}, + {file = "mypy-1.14.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ba24549de7b89b6381b91fbc068d798192b1b5201987070319889e93038967a8"}, + {file = "mypy-1.14.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:183cf0a45457d28ff9d758730cd0210419ac27d4d3f285beda038c9083363b1f"}, + {file = "mypy-1.14.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:f2a0ecc86378f45347f586e4163d1769dd81c5a223d577fe351f26b179e148b1"}, + {file = "mypy-1.14.1-cp311-cp311-win_amd64.whl", hash = "sha256:ad3301ebebec9e8ee7135d8e3109ca76c23752bac1e717bc84cd3836b4bf3eae"}, + {file = "mypy-1.14.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:30ff5ef8519bbc2e18b3b54521ec319513a26f1bba19a7582e7b1f58a6e69f14"}, + {file = "mypy-1.14.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:cb9f255c18052343c70234907e2e532bc7e55a62565d64536dbc7706a20b78b9"}, + {file = "mypy-1.14.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:8b4e3413e0bddea671012b063e27591b953d653209e7a4fa5e48759cda77ca11"}, + {file = "mypy-1.14.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:553c293b1fbdebb6c3c4030589dab9fafb6dfa768995a453d8a5d3b23784af2e"}, + {file = "mypy-1.14.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:fad79bfe3b65fe6a1efaed97b445c3d37f7be9fdc348bdb2d7cac75579607c89"}, + {file = "mypy-1.14.1-cp312-cp312-win_amd64.whl", hash = "sha256:8fa2220e54d2946e94ab6dbb3ba0a992795bd68b16dc852db33028df2b00191b"}, + {file = "mypy-1.14.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:92c3ed5afb06c3a8e188cb5da4984cab9ec9a77ba956ee419c68a388b4595255"}, + {file = "mypy-1.14.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:dbec574648b3e25f43d23577309b16534431db4ddc09fda50841f1e34e64ed34"}, + {file = "mypy-1.14.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:8c6d94b16d62eb3e947281aa7347d78236688e21081f11de976376cf010eb31a"}, + {file = "mypy-1.14.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:d4b19b03fdf54f3c5b2fa474c56b4c13c9dbfb9a2db4370ede7ec11a2c5927d9"}, + {file = "mypy-1.14.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:0c911fde686394753fff899c409fd4e16e9b294c24bfd5e1ea4675deae1ac6fd"}, + {file = "mypy-1.14.1-cp313-cp313-win_amd64.whl", hash = "sha256:8b21525cb51671219f5307be85f7e646a153e5acc656e5cebf64bfa076c50107"}, + {file = "mypy-1.14.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:7084fb8f1128c76cd9cf68fe5971b37072598e7c31b2f9f95586b65c741a9d31"}, + {file = "mypy-1.14.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:8f845a00b4f420f693f870eaee5f3e2692fa84cc8514496114649cfa8fd5e2c6"}, + {file = "mypy-1.14.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:44bf464499f0e3a2d14d58b54674dee25c031703b2ffc35064bd0df2e0fac319"}, + {file = "mypy-1.14.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c99f27732c0b7dc847adb21c9d47ce57eb48fa33a17bc6d7d5c5e9f9e7ae5bac"}, + {file = "mypy-1.14.1-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:bce23c7377b43602baa0bd22ea3265c49b9ff0b76eb315d6c34721af4cdf1d9b"}, + {file = "mypy-1.14.1-cp38-cp38-win_amd64.whl", hash = "sha256:8edc07eeade7ebc771ff9cf6b211b9a7d93687ff892150cb5692e4f4272b0837"}, + {file = "mypy-1.14.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:3888a1816d69f7ab92092f785a462944b3ca16d7c470d564165fe703b0970c35"}, + {file = "mypy-1.14.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:46c756a444117c43ee984bd055db99e498bc613a70bbbc120272bd13ca579fbc"}, + {file = "mypy-1.14.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:27fc248022907e72abfd8e22ab1f10e903915ff69961174784a3900a8cba9ad9"}, + {file = "mypy-1.14.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:499d6a72fb7e5de92218db961f1a66d5f11783f9ae549d214617edab5d4dbdbb"}, + {file = "mypy-1.14.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:57961db9795eb566dc1d1b4e9139ebc4c6b0cb6e7254ecde69d1552bf7613f60"}, + {file = "mypy-1.14.1-cp39-cp39-win_amd64.whl", hash = "sha256:07ba89fdcc9451f2ebb02853deb6aaaa3d2239a236669a63ab3801bbf923ef5c"}, + {file = "mypy-1.14.1-py3-none-any.whl", hash = "sha256:b66a60cc4073aeb8ae00057f9c1f64d49e90f918fbcef9a977eb121da8b8f1d1"}, + {file = "mypy-1.14.1.tar.gz", hash = "sha256:7ec88144fe9b510e8475ec2f5f251992690fcf89ccb4500b214b4226abcd32d6"}, ] [package.dependencies] -mypy-extensions = ">=1.0.0" +mypy_extensions = ">=1.0.0" tomli = {version = ">=1.1.0", markers = "python_version < \"3.11\""} -typing-extensions = ">=4.6.0" +typing_extensions = ">=4.6.0" [package.extras] dmypy = ["psutil (>=4.0)"] @@ -465,7 +455,7 @@ version = "1.0.0" description = "Type system extensions for programs checked with the mypy type checker." optional = false python-versions = ">=3.5" -groups = ["main", "dev"] +groups = ["dev"] files = [ {file = "mypy_extensions-1.0.0-py3-none-any.whl", hash = "sha256:4392f6c0eb8a5668a69e23d168ffa70f0be9ccfd32b5cc2d26a34ae5b844552d"}, {file = "mypy_extensions-1.0.0.tar.gz", hash = "sha256:75dbf8955dc00442a438fc4d0666508a9a97b6bd41aa2f0ffe9d2f2725af0782"}, @@ -547,19 +537,19 @@ pyasn1 = ">=0.4.6,<0.7.0" [[package]] name = "pydantic" -version = "2.10.2" +version = "2.10.6" description = "Data validation using Python type hints" optional = false python-versions = ">=3.8" groups = ["main"] files = [ - {file = "pydantic-2.10.2-py3-none-any.whl", hash = "sha256:cfb96e45951117c3024e6b67b25cdc33a3cb7b2fa62e239f7af1378358a1d99e"}, - {file = "pydantic-2.10.2.tar.gz", hash = "sha256:2bc2d7f17232e0841cbba4641e65ba1eb6fafb3a08de3a091ff3ce14a197c4fa"}, + {file = "pydantic-2.10.6-py3-none-any.whl", hash = "sha256:427d664bf0b8a2b34ff5dd0f5a18df00591adcee7198fbd71981054cef37b584"}, + {file = "pydantic-2.10.6.tar.gz", hash = "sha256:ca5daa827cce33de7a42be142548b0096bf05a7e7b365aebfa5f8eeec7128236"}, ] [package.dependencies] annotated-types = ">=0.6.0" -pydantic-core = "2.27.1" +pydantic-core = "2.27.2" typing-extensions = ">=4.12.2" [package.extras] @@ -568,112 +558,112 @@ timezone = ["tzdata ; python_version >= \"3.9\" and platform_system == \"Windows [[package]] name = "pydantic-core" -version = "2.27.1" +version = "2.27.2" description = "Core functionality for Pydantic validation and serialization" optional = false python-versions = ">=3.8" groups = ["main"] files = [ - {file = "pydantic_core-2.27.1-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:71a5e35c75c021aaf400ac048dacc855f000bdfed91614b4a726f7432f1f3d6a"}, - {file = "pydantic_core-2.27.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:f82d068a2d6ecfc6e054726080af69a6764a10015467d7d7b9f66d6ed5afa23b"}, - {file = "pydantic_core-2.27.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:121ceb0e822f79163dd4699e4c54f5ad38b157084d97b34de8b232bcaad70278"}, - {file = "pydantic_core-2.27.1-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:4603137322c18eaf2e06a4495f426aa8d8388940f3c457e7548145011bb68e05"}, - {file = "pydantic_core-2.27.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a33cd6ad9017bbeaa9ed78a2e0752c5e250eafb9534f308e7a5f7849b0b1bfb4"}, - {file = "pydantic_core-2.27.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:15cc53a3179ba0fcefe1e3ae50beb2784dede4003ad2dfd24f81bba4b23a454f"}, - {file = "pydantic_core-2.27.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:45d9c5eb9273aa50999ad6adc6be5e0ecea7e09dbd0d31bd0c65a55a2592ca08"}, - {file = "pydantic_core-2.27.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:8bf7b66ce12a2ac52d16f776b31d16d91033150266eb796967a7e4621707e4f6"}, - {file = "pydantic_core-2.27.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:655d7dd86f26cb15ce8a431036f66ce0318648f8853d709b4167786ec2fa4807"}, - {file = "pydantic_core-2.27.1-cp310-cp310-musllinux_1_1_armv7l.whl", hash = "sha256:5556470f1a2157031e676f776c2bc20acd34c1990ca5f7e56f1ebf938b9ab57c"}, - {file = "pydantic_core-2.27.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:f69ed81ab24d5a3bd93861c8c4436f54afdf8e8cc421562b0c7504cf3be58206"}, - {file = "pydantic_core-2.27.1-cp310-none-win32.whl", hash = "sha256:f5a823165e6d04ccea61a9f0576f345f8ce40ed533013580e087bd4d7442b52c"}, - {file = "pydantic_core-2.27.1-cp310-none-win_amd64.whl", hash = "sha256:57866a76e0b3823e0b56692d1a0bf722bffb324839bb5b7226a7dbd6c9a40b17"}, - {file = "pydantic_core-2.27.1-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:ac3b20653bdbe160febbea8aa6c079d3df19310d50ac314911ed8cc4eb7f8cb8"}, - {file = "pydantic_core-2.27.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:a5a8e19d7c707c4cadb8c18f5f60c843052ae83c20fa7d44f41594c644a1d330"}, - {file = "pydantic_core-2.27.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7f7059ca8d64fea7f238994c97d91f75965216bcbe5f695bb44f354893f11d52"}, - {file = "pydantic_core-2.27.1-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:bed0f8a0eeea9fb72937ba118f9db0cb7e90773462af7962d382445f3005e5a4"}, - {file = "pydantic_core-2.27.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a3cb37038123447cf0f3ea4c74751f6a9d7afef0eb71aa07bf5f652b5e6a132c"}, - {file = "pydantic_core-2.27.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:84286494f6c5d05243456e04223d5a9417d7f443c3b76065e75001beb26f88de"}, - {file = "pydantic_core-2.27.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:acc07b2cfc5b835444b44a9956846b578d27beeacd4b52e45489e93276241025"}, - {file = "pydantic_core-2.27.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:4fefee876e07a6e9aad7a8c8c9f85b0cdbe7df52b8a9552307b09050f7512c7e"}, - {file = "pydantic_core-2.27.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:258c57abf1188926c774a4c94dd29237e77eda19462e5bb901d88adcab6af919"}, - {file = "pydantic_core-2.27.1-cp311-cp311-musllinux_1_1_armv7l.whl", hash = "sha256:35c14ac45fcfdf7167ca76cc80b2001205a8d5d16d80524e13508371fb8cdd9c"}, - {file = "pydantic_core-2.27.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:d1b26e1dff225c31897696cab7d4f0a315d4c0d9e8666dbffdb28216f3b17fdc"}, - {file = "pydantic_core-2.27.1-cp311-none-win32.whl", hash = "sha256:2cdf7d86886bc6982354862204ae3b2f7f96f21a3eb0ba5ca0ac42c7b38598b9"}, - {file = "pydantic_core-2.27.1-cp311-none-win_amd64.whl", hash = "sha256:3af385b0cee8df3746c3f406f38bcbfdc9041b5c2d5ce3e5fc6637256e60bbc5"}, - {file = "pydantic_core-2.27.1-cp311-none-win_arm64.whl", hash = "sha256:81f2ec23ddc1b476ff96563f2e8d723830b06dceae348ce02914a37cb4e74b89"}, - {file = "pydantic_core-2.27.1-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:9cbd94fc661d2bab2bc702cddd2d3370bbdcc4cd0f8f57488a81bcce90c7a54f"}, - {file = "pydantic_core-2.27.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:5f8c4718cd44ec1580e180cb739713ecda2bdee1341084c1467802a417fe0f02"}, - {file = "pydantic_core-2.27.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:15aae984e46de8d376df515f00450d1522077254ef6b7ce189b38ecee7c9677c"}, - {file = "pydantic_core-2.27.1-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:1ba5e3963344ff25fc8c40da90f44b0afca8cfd89d12964feb79ac1411a260ac"}, - {file = "pydantic_core-2.27.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:992cea5f4f3b29d6b4f7f1726ed8ee46c8331c6b4eed6db5b40134c6fe1768bb"}, - {file = "pydantic_core-2.27.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0325336f348dbee6550d129b1627cb8f5351a9dc91aad141ffb96d4937bd9529"}, - {file = "pydantic_core-2.27.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7597c07fbd11515f654d6ece3d0e4e5093edc30a436c63142d9a4b8e22f19c35"}, - {file = "pydantic_core-2.27.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:3bbd5d8cc692616d5ef6fbbbd50dbec142c7e6ad9beb66b78a96e9c16729b089"}, - {file = "pydantic_core-2.27.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:dc61505e73298a84a2f317255fcc72b710b72980f3a1f670447a21efc88f8381"}, - {file = "pydantic_core-2.27.1-cp312-cp312-musllinux_1_1_armv7l.whl", hash = "sha256:e1f735dc43da318cad19b4173dd1ffce1d84aafd6c9b782b3abc04a0d5a6f5bb"}, - {file = "pydantic_core-2.27.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:f4e5658dbffe8843a0f12366a4c2d1c316dbe09bb4dfbdc9d2d9cd6031de8aae"}, - {file = "pydantic_core-2.27.1-cp312-none-win32.whl", hash = "sha256:672ebbe820bb37988c4d136eca2652ee114992d5d41c7e4858cdd90ea94ffe5c"}, - {file = "pydantic_core-2.27.1-cp312-none-win_amd64.whl", hash = "sha256:66ff044fd0bb1768688aecbe28b6190f6e799349221fb0de0e6f4048eca14c16"}, - {file = "pydantic_core-2.27.1-cp312-none-win_arm64.whl", hash = "sha256:9a3b0793b1bbfd4146304e23d90045f2a9b5fd5823aa682665fbdaf2a6c28f3e"}, - {file = "pydantic_core-2.27.1-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:f216dbce0e60e4d03e0c4353c7023b202d95cbaeff12e5fd2e82ea0a66905073"}, - {file = "pydantic_core-2.27.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:a2e02889071850bbfd36b56fd6bc98945e23670773bc7a76657e90e6b6603c08"}, - {file = "pydantic_core-2.27.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:42b0e23f119b2b456d07ca91b307ae167cc3f6c846a7b169fca5326e32fdc6cf"}, - {file = "pydantic_core-2.27.1-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:764be71193f87d460a03f1f7385a82e226639732214b402f9aa61f0d025f0737"}, - {file = "pydantic_core-2.27.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1c00666a3bd2f84920a4e94434f5974d7bbc57e461318d6bb34ce9cdbbc1f6b2"}, - {file = "pydantic_core-2.27.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3ccaa88b24eebc0f849ce0a4d09e8a408ec5a94afff395eb69baf868f5183107"}, - {file = "pydantic_core-2.27.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c65af9088ac534313e1963443d0ec360bb2b9cba6c2909478d22c2e363d98a51"}, - {file = "pydantic_core-2.27.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:206b5cf6f0c513baffaeae7bd817717140770c74528f3e4c3e1cec7871ddd61a"}, - {file = "pydantic_core-2.27.1-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:062f60e512fc7fff8b8a9d680ff0ddaaef0193dba9fa83e679c0c5f5fbd018bc"}, - {file = "pydantic_core-2.27.1-cp313-cp313-musllinux_1_1_armv7l.whl", hash = "sha256:a0697803ed7d4af5e4c1adf1670af078f8fcab7a86350e969f454daf598c4960"}, - {file = "pydantic_core-2.27.1-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:58ca98a950171f3151c603aeea9303ef6c235f692fe555e883591103da709b23"}, - {file = "pydantic_core-2.27.1-cp313-none-win32.whl", hash = "sha256:8065914ff79f7eab1599bd80406681f0ad08f8e47c880f17b416c9f8f7a26d05"}, - {file = "pydantic_core-2.27.1-cp313-none-win_amd64.whl", hash = "sha256:ba630d5e3db74c79300d9a5bdaaf6200172b107f263c98a0539eeecb857b2337"}, - {file = "pydantic_core-2.27.1-cp313-none-win_arm64.whl", hash = "sha256:45cf8588c066860b623cd11c4ba687f8d7175d5f7ef65f7129df8a394c502de5"}, - {file = "pydantic_core-2.27.1-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:5897bec80a09b4084aee23f9b73a9477a46c3304ad1d2d07acca19723fb1de62"}, - {file = "pydantic_core-2.27.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:d0165ab2914379bd56908c02294ed8405c252250668ebcb438a55494c69f44ab"}, - {file = "pydantic_core-2.27.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6b9af86e1d8e4cfc82c2022bfaa6f459381a50b94a29e95dcdda8442d6d83864"}, - {file = "pydantic_core-2.27.1-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:5f6c8a66741c5f5447e047ab0ba7a1c61d1e95580d64bce852e3df1f895c4067"}, - {file = "pydantic_core-2.27.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9a42d6a8156ff78981f8aa56eb6394114e0dedb217cf8b729f438f643608cbcd"}, - {file = "pydantic_core-2.27.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:64c65f40b4cd8b0e049a8edde07e38b476da7e3aaebe63287c899d2cff253fa5"}, - {file = "pydantic_core-2.27.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9fdcf339322a3fae5cbd504edcefddd5a50d9ee00d968696846f089b4432cf78"}, - {file = "pydantic_core-2.27.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:bf99c8404f008750c846cb4ac4667b798a9f7de673ff719d705d9b2d6de49c5f"}, - {file = "pydantic_core-2.27.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:8f1edcea27918d748c7e5e4d917297b2a0ab80cad10f86631e488b7cddf76a36"}, - {file = "pydantic_core-2.27.1-cp38-cp38-musllinux_1_1_armv7l.whl", hash = "sha256:159cac0a3d096f79ab6a44d77a961917219707e2a130739c64d4dd46281f5c2a"}, - {file = "pydantic_core-2.27.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:029d9757eb621cc6e1848fa0b0310310de7301057f623985698ed7ebb014391b"}, - {file = "pydantic_core-2.27.1-cp38-none-win32.whl", hash = "sha256:a28af0695a45f7060e6f9b7092558a928a28553366519f64083c63a44f70e618"}, - {file = "pydantic_core-2.27.1-cp38-none-win_amd64.whl", hash = "sha256:2d4567c850905d5eaaed2f7a404e61012a51caf288292e016360aa2b96ff38d4"}, - {file = "pydantic_core-2.27.1-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:e9386266798d64eeb19dd3677051f5705bf873e98e15897ddb7d76f477131967"}, - {file = "pydantic_core-2.27.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:4228b5b646caa73f119b1ae756216b59cc6e2267201c27d3912b592c5e323b60"}, - {file = "pydantic_core-2.27.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0b3dfe500de26c52abe0477dde16192ac39c98f05bf2d80e76102d394bd13854"}, - {file = "pydantic_core-2.27.1-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:aee66be87825cdf72ac64cb03ad4c15ffef4143dbf5c113f64a5ff4f81477bf9"}, - {file = "pydantic_core-2.27.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3b748c44bb9f53031c8cbc99a8a061bc181c1000c60a30f55393b6e9c45cc5bd"}, - {file = "pydantic_core-2.27.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5ca038c7f6a0afd0b2448941b6ef9d5e1949e999f9e5517692eb6da58e9d44be"}, - {file = "pydantic_core-2.27.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6e0bd57539da59a3e4671b90a502da9a28c72322a4f17866ba3ac63a82c4498e"}, - {file = "pydantic_core-2.27.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:ac6c2c45c847bbf8f91930d88716a0fb924b51e0c6dad329b793d670ec5db792"}, - {file = "pydantic_core-2.27.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:b94d4ba43739bbe8b0ce4262bcc3b7b9f31459ad120fb595627eaeb7f9b9ca01"}, - {file = "pydantic_core-2.27.1-cp39-cp39-musllinux_1_1_armv7l.whl", hash = "sha256:00e6424f4b26fe82d44577b4c842d7df97c20be6439e8e685d0d715feceb9fb9"}, - {file = "pydantic_core-2.27.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:38de0a70160dd97540335b7ad3a74571b24f1dc3ed33f815f0880682e6880131"}, - {file = "pydantic_core-2.27.1-cp39-none-win32.whl", hash = "sha256:7ccebf51efc61634f6c2344da73e366c75e735960b5654b63d7e6f69a5885fa3"}, - {file = "pydantic_core-2.27.1-cp39-none-win_amd64.whl", hash = "sha256:a57847b090d7892f123726202b7daa20df6694cbd583b67a592e856bff603d6c"}, - {file = "pydantic_core-2.27.1-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:3fa80ac2bd5856580e242dbc202db873c60a01b20309c8319b5c5986fbe53ce6"}, - {file = "pydantic_core-2.27.1-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:d950caa237bb1954f1b8c9227b5065ba6875ac9771bb8ec790d956a699b78676"}, - {file = "pydantic_core-2.27.1-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0e4216e64d203e39c62df627aa882f02a2438d18a5f21d7f721621f7a5d3611d"}, - {file = "pydantic_core-2.27.1-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:02a3d637bd387c41d46b002f0e49c52642281edacd2740e5a42f7017feea3f2c"}, - {file = "pydantic_core-2.27.1-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:161c27ccce13b6b0c8689418da3885d3220ed2eae2ea5e9b2f7f3d48f1d52c27"}, - {file = "pydantic_core-2.27.1-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:19910754e4cc9c63bc1c7f6d73aa1cfee82f42007e407c0f413695c2f7ed777f"}, - {file = "pydantic_core-2.27.1-pp310-pypy310_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:e173486019cc283dc9778315fa29a363579372fe67045e971e89b6365cc035ed"}, - {file = "pydantic_core-2.27.1-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:af52d26579b308921b73b956153066481f064875140ccd1dfd4e77db89dbb12f"}, - {file = "pydantic_core-2.27.1-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:981fb88516bd1ae8b0cbbd2034678a39dedc98752f264ac9bc5839d3923fa04c"}, - {file = "pydantic_core-2.27.1-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:5fde892e6c697ce3e30c61b239330fc5d569a71fefd4eb6512fc6caec9dd9e2f"}, - {file = "pydantic_core-2.27.1-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:816f5aa087094099fff7edabb5e01cc370eb21aa1a1d44fe2d2aefdfb5599b31"}, - {file = "pydantic_core-2.27.1-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9c10c309e18e443ddb108f0ef64e8729363adbfd92d6d57beec680f6261556f3"}, - {file = "pydantic_core-2.27.1-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:98476c98b02c8e9b2eec76ac4156fd006628b1b2d0ef27e548ffa978393fd154"}, - {file = "pydantic_core-2.27.1-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:c3027001c28434e7ca5a6e1e527487051136aa81803ac812be51802150d880dd"}, - {file = "pydantic_core-2.27.1-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:7699b1df36a48169cdebda7ab5a2bac265204003f153b4bd17276153d997670a"}, - {file = "pydantic_core-2.27.1-pp39-pypy39_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:1c39b07d90be6b48968ddc8c19e7585052088fd7ec8d568bb31ff64c70ae3c97"}, - {file = "pydantic_core-2.27.1-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:46ccfe3032b3915586e469d4972973f893c0a2bb65669194a5bdea9bacc088c2"}, - {file = "pydantic_core-2.27.1-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:62ba45e21cf6571d7f716d903b5b7b6d2617e2d5d67c0923dc47b9d41369f840"}, - {file = "pydantic_core-2.27.1.tar.gz", hash = "sha256:62a763352879b84aa31058fc931884055fd75089cccbd9d58bb6afd01141b235"}, + {file = "pydantic_core-2.27.2-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:2d367ca20b2f14095a8f4fa1210f5a7b78b8a20009ecced6b12818f455b1e9fa"}, + {file = "pydantic_core-2.27.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:491a2b73db93fab69731eaee494f320faa4e093dbed776be1a829c2eb222c34c"}, + {file = "pydantic_core-2.27.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7969e133a6f183be60e9f6f56bfae753585680f3b7307a8e555a948d443cc05a"}, + {file = "pydantic_core-2.27.2-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:3de9961f2a346257caf0aa508a4da705467f53778e9ef6fe744c038119737ef5"}, + {file = "pydantic_core-2.27.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e2bb4d3e5873c37bb3dd58714d4cd0b0e6238cebc4177ac8fe878f8b3aa8e74c"}, + {file = "pydantic_core-2.27.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:280d219beebb0752699480fe8f1dc61ab6615c2046d76b7ab7ee38858de0a4e7"}, + {file = "pydantic_core-2.27.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:47956ae78b6422cbd46f772f1746799cbb862de838fd8d1fbd34a82e05b0983a"}, + {file = "pydantic_core-2.27.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:14d4a5c49d2f009d62a2a7140d3064f686d17a5d1a268bc641954ba181880236"}, + {file = "pydantic_core-2.27.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:337b443af21d488716f8d0b6164de833e788aa6bd7e3a39c005febc1284f4962"}, + {file = "pydantic_core-2.27.2-cp310-cp310-musllinux_1_1_armv7l.whl", hash = "sha256:03d0f86ea3184a12f41a2d23f7ccb79cdb5a18e06993f8a45baa8dfec746f0e9"}, + {file = "pydantic_core-2.27.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:7041c36f5680c6e0f08d922aed302e98b3745d97fe1589db0a3eebf6624523af"}, + {file = "pydantic_core-2.27.2-cp310-cp310-win32.whl", hash = "sha256:50a68f3e3819077be2c98110c1f9dcb3817e93f267ba80a2c05bb4f8799e2ff4"}, + {file = "pydantic_core-2.27.2-cp310-cp310-win_amd64.whl", hash = "sha256:e0fd26b16394ead34a424eecf8a31a1f5137094cabe84a1bcb10fa6ba39d3d31"}, + {file = "pydantic_core-2.27.2-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:8e10c99ef58cfdf2a66fc15d66b16c4a04f62bca39db589ae8cba08bc55331bc"}, + {file = "pydantic_core-2.27.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:26f32e0adf166a84d0cb63be85c562ca8a6fa8de28e5f0d92250c6b7e9e2aff7"}, + {file = "pydantic_core-2.27.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8c19d1ea0673cd13cc2f872f6c9ab42acc4e4f492a7ca9d3795ce2b112dd7e15"}, + {file = "pydantic_core-2.27.2-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:5e68c4446fe0810e959cdff46ab0a41ce2f2c86d227d96dc3847af0ba7def306"}, + {file = "pydantic_core-2.27.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d9640b0059ff4f14d1f37321b94061c6db164fbe49b334b31643e0528d100d99"}, + {file = "pydantic_core-2.27.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:40d02e7d45c9f8af700f3452f329ead92da4c5f4317ca9b896de7ce7199ea459"}, + {file = "pydantic_core-2.27.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1c1fd185014191700554795c99b347d64f2bb637966c4cfc16998a0ca700d048"}, + {file = "pydantic_core-2.27.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d81d2068e1c1228a565af076598f9e7451712700b673de8f502f0334f281387d"}, + {file = "pydantic_core-2.27.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:1a4207639fb02ec2dbb76227d7c751a20b1a6b4bc52850568e52260cae64ca3b"}, + {file = "pydantic_core-2.27.2-cp311-cp311-musllinux_1_1_armv7l.whl", hash = "sha256:3de3ce3c9ddc8bbd88f6e0e304dea0e66d843ec9de1b0042b0911c1663ffd474"}, + {file = "pydantic_core-2.27.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:30c5f68ded0c36466acede341551106821043e9afaad516adfb6e8fa80a4e6a6"}, + {file = "pydantic_core-2.27.2-cp311-cp311-win32.whl", hash = "sha256:c70c26d2c99f78b125a3459f8afe1aed4d9687c24fd677c6a4436bc042e50d6c"}, + {file = "pydantic_core-2.27.2-cp311-cp311-win_amd64.whl", hash = "sha256:08e125dbdc505fa69ca7d9c499639ab6407cfa909214d500897d02afb816e7cc"}, + {file = "pydantic_core-2.27.2-cp311-cp311-win_arm64.whl", hash = "sha256:26f0d68d4b235a2bae0c3fc585c585b4ecc51382db0e3ba402a22cbc440915e4"}, + {file = "pydantic_core-2.27.2-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:9e0c8cfefa0ef83b4da9588448b6d8d2a2bf1a53c3f1ae5fca39eb3061e2f0b0"}, + {file = "pydantic_core-2.27.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:83097677b8e3bd7eaa6775720ec8e0405f1575015a463285a92bfdfe254529ef"}, + {file = "pydantic_core-2.27.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:172fce187655fece0c90d90a678424b013f8fbb0ca8b036ac266749c09438cb7"}, + {file = "pydantic_core-2.27.2-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:519f29f5213271eeeeb3093f662ba2fd512b91c5f188f3bb7b27bc5973816934"}, + {file = "pydantic_core-2.27.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:05e3a55d124407fffba0dd6b0c0cd056d10e983ceb4e5dbd10dda135c31071d6"}, + {file = "pydantic_core-2.27.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9c3ed807c7b91de05e63930188f19e921d1fe90de6b4f5cd43ee7fcc3525cb8c"}, + {file = "pydantic_core-2.27.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6fb4aadc0b9a0c063206846d603b92030eb6f03069151a625667f982887153e2"}, + {file = "pydantic_core-2.27.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:28ccb213807e037460326424ceb8b5245acb88f32f3d2777427476e1b32c48c4"}, + {file = "pydantic_core-2.27.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:de3cd1899e2c279b140adde9357c4495ed9d47131b4a4eaff9052f23398076b3"}, + {file = "pydantic_core-2.27.2-cp312-cp312-musllinux_1_1_armv7l.whl", hash = "sha256:220f892729375e2d736b97d0e51466252ad84c51857d4d15f5e9692f9ef12be4"}, + {file = "pydantic_core-2.27.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:a0fcd29cd6b4e74fe8ddd2c90330fd8edf2e30cb52acda47f06dd615ae72da57"}, + {file = "pydantic_core-2.27.2-cp312-cp312-win32.whl", hash = "sha256:1e2cb691ed9834cd6a8be61228471d0a503731abfb42f82458ff27be7b2186fc"}, + {file = "pydantic_core-2.27.2-cp312-cp312-win_amd64.whl", hash = "sha256:cc3f1a99a4f4f9dd1de4fe0312c114e740b5ddead65bb4102884b384c15d8bc9"}, + {file = "pydantic_core-2.27.2-cp312-cp312-win_arm64.whl", hash = "sha256:3911ac9284cd8a1792d3cb26a2da18f3ca26c6908cc434a18f730dc0db7bfa3b"}, + {file = "pydantic_core-2.27.2-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:7d14bd329640e63852364c306f4d23eb744e0f8193148d4044dd3dacdaacbd8b"}, + {file = "pydantic_core-2.27.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:82f91663004eb8ed30ff478d77c4d1179b3563df6cdb15c0817cd1cdaf34d154"}, + {file = "pydantic_core-2.27.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:71b24c7d61131bb83df10cc7e687433609963a944ccf45190cfc21e0887b08c9"}, + {file = "pydantic_core-2.27.2-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:fa8e459d4954f608fa26116118bb67f56b93b209c39b008277ace29937453dc9"}, + {file = "pydantic_core-2.27.2-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ce8918cbebc8da707ba805b7fd0b382816858728ae7fe19a942080c24e5b7cd1"}, + {file = "pydantic_core-2.27.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:eda3f5c2a021bbc5d976107bb302e0131351c2ba54343f8a496dc8783d3d3a6a"}, + {file = "pydantic_core-2.27.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bd8086fa684c4775c27f03f062cbb9eaa6e17f064307e86b21b9e0abc9c0f02e"}, + {file = "pydantic_core-2.27.2-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:8d9b3388db186ba0c099a6d20f0604a44eabdeef1777ddd94786cdae158729e4"}, + {file = "pydantic_core-2.27.2-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:7a66efda2387de898c8f38c0cf7f14fca0b51a8ef0b24bfea5849f1b3c95af27"}, + {file = "pydantic_core-2.27.2-cp313-cp313-musllinux_1_1_armv7l.whl", hash = "sha256:18a101c168e4e092ab40dbc2503bdc0f62010e95d292b27827871dc85450d7ee"}, + {file = "pydantic_core-2.27.2-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:ba5dd002f88b78a4215ed2f8ddbdf85e8513382820ba15ad5ad8955ce0ca19a1"}, + {file = "pydantic_core-2.27.2-cp313-cp313-win32.whl", hash = "sha256:1ebaf1d0481914d004a573394f4be3a7616334be70261007e47c2a6fe7e50130"}, + {file = "pydantic_core-2.27.2-cp313-cp313-win_amd64.whl", hash = "sha256:953101387ecf2f5652883208769a79e48db18c6df442568a0b5ccd8c2723abee"}, + {file = "pydantic_core-2.27.2-cp313-cp313-win_arm64.whl", hash = "sha256:ac4dbfd1691affb8f48c2c13241a2e3b60ff23247cbcf981759c768b6633cf8b"}, + {file = "pydantic_core-2.27.2-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:d3e8d504bdd3f10835468f29008d72fc8359d95c9c415ce6e767203db6127506"}, + {file = "pydantic_core-2.27.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:521eb9b7f036c9b6187f0b47318ab0d7ca14bd87f776240b90b21c1f4f149320"}, + {file = "pydantic_core-2.27.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:85210c4d99a0114f5a9481b44560d7d1e35e32cc5634c656bc48e590b669b145"}, + {file = "pydantic_core-2.27.2-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:d716e2e30c6f140d7560ef1538953a5cd1a87264c737643d481f2779fc247fe1"}, + {file = "pydantic_core-2.27.2-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f66d89ba397d92f840f8654756196d93804278457b5fbede59598a1f9f90b228"}, + {file = "pydantic_core-2.27.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:669e193c1c576a58f132e3158f9dfa9662969edb1a250c54d8fa52590045f046"}, + {file = "pydantic_core-2.27.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9fdbe7629b996647b99c01b37f11170a57ae675375b14b8c13b8518b8320ced5"}, + {file = "pydantic_core-2.27.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d262606bf386a5ba0b0af3b97f37c83d7011439e3dc1a9298f21efb292e42f1a"}, + {file = "pydantic_core-2.27.2-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:cabb9bcb7e0d97f74df8646f34fc76fbf793b7f6dc2438517d7a9e50eee4f14d"}, + {file = "pydantic_core-2.27.2-cp38-cp38-musllinux_1_1_armv7l.whl", hash = "sha256:d2d63f1215638d28221f664596b1ccb3944f6e25dd18cd3b86b0a4c408d5ebb9"}, + {file = "pydantic_core-2.27.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:bca101c00bff0adb45a833f8451b9105d9df18accb8743b08107d7ada14bd7da"}, + {file = "pydantic_core-2.27.2-cp38-cp38-win32.whl", hash = "sha256:f6f8e111843bbb0dee4cb6594cdc73e79b3329b526037ec242a3e49012495b3b"}, + {file = "pydantic_core-2.27.2-cp38-cp38-win_amd64.whl", hash = "sha256:fd1aea04935a508f62e0d0ef1f5ae968774a32afc306fb8545e06f5ff5cdf3ad"}, + {file = "pydantic_core-2.27.2-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:c10eb4f1659290b523af58fa7cffb452a61ad6ae5613404519aee4bfbf1df993"}, + {file = "pydantic_core-2.27.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:ef592d4bad47296fb11f96cd7dc898b92e795032b4894dfb4076cfccd43a9308"}, + {file = "pydantic_core-2.27.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c61709a844acc6bf0b7dce7daae75195a10aac96a596ea1b776996414791ede4"}, + {file = "pydantic_core-2.27.2-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:42c5f762659e47fdb7b16956c71598292f60a03aa92f8b6351504359dbdba6cf"}, + {file = "pydantic_core-2.27.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4c9775e339e42e79ec99c441d9730fccf07414af63eac2f0e48e08fd38a64d76"}, + {file = "pydantic_core-2.27.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:57762139821c31847cfb2df63c12f725788bd9f04bc2fb392790959b8f70f118"}, + {file = "pydantic_core-2.27.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0d1e85068e818c73e048fe28cfc769040bb1f475524f4745a5dc621f75ac7630"}, + {file = "pydantic_core-2.27.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:097830ed52fd9e427942ff3b9bc17fab52913b2f50f2880dc4a5611446606a54"}, + {file = "pydantic_core-2.27.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:044a50963a614ecfae59bb1eaf7ea7efc4bc62f49ed594e18fa1e5d953c40e9f"}, + {file = "pydantic_core-2.27.2-cp39-cp39-musllinux_1_1_armv7l.whl", hash = "sha256:4e0b4220ba5b40d727c7f879eac379b822eee5d8fff418e9d3381ee45b3b0362"}, + {file = "pydantic_core-2.27.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:5e4f4bb20d75e9325cc9696c6802657b58bc1dbbe3022f32cc2b2b632c3fbb96"}, + {file = "pydantic_core-2.27.2-cp39-cp39-win32.whl", hash = "sha256:cca63613e90d001b9f2f9a9ceb276c308bfa2a43fafb75c8031c4f66039e8c6e"}, + {file = "pydantic_core-2.27.2-cp39-cp39-win_amd64.whl", hash = "sha256:77d1bca19b0f7021b3a982e6f903dcd5b2b06076def36a652e3907f596e29f67"}, + {file = "pydantic_core-2.27.2-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:2bf14caea37e91198329b828eae1618c068dfb8ef17bb33287a7ad4b61ac314e"}, + {file = "pydantic_core-2.27.2-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:b0cb791f5b45307caae8810c2023a184c74605ec3bcbb67d13846c28ff731ff8"}, + {file = "pydantic_core-2.27.2-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:688d3fd9fcb71f41c4c015c023d12a79d1c4c0732ec9eb35d96e3388a120dcf3"}, + {file = "pydantic_core-2.27.2-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3d591580c34f4d731592f0e9fe40f9cc1b430d297eecc70b962e93c5c668f15f"}, + {file = "pydantic_core-2.27.2-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:82f986faf4e644ffc189a7f1aafc86e46ef70372bb153e7001e8afccc6e54133"}, + {file = "pydantic_core-2.27.2-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:bec317a27290e2537f922639cafd54990551725fc844249e64c523301d0822fc"}, + {file = "pydantic_core-2.27.2-pp310-pypy310_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:0296abcb83a797db256b773f45773da397da75a08f5fcaef41f2044adec05f50"}, + {file = "pydantic_core-2.27.2-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:0d75070718e369e452075a6017fbf187f788e17ed67a3abd47fa934d001863d9"}, + {file = "pydantic_core-2.27.2-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:7e17b560be3c98a8e3aa66ce828bdebb9e9ac6ad5466fba92eb74c4c95cb1151"}, + {file = "pydantic_core-2.27.2-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:c33939a82924da9ed65dab5a65d427205a73181d8098e79b6b426bdf8ad4e656"}, + {file = "pydantic_core-2.27.2-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:00bad2484fa6bda1e216e7345a798bd37c68fb2d97558edd584942aa41b7d278"}, + {file = "pydantic_core-2.27.2-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c817e2b40aba42bac6f457498dacabc568c3b7a986fc9ba7c8d9d260b71485fb"}, + {file = "pydantic_core-2.27.2-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:251136cdad0cb722e93732cb45ca5299fb56e1344a833640bf93b2803f8d1bfd"}, + {file = "pydantic_core-2.27.2-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d2088237af596f0a524d3afc39ab3b036e8adb054ee57cbb1dcf8e09da5b29cc"}, + {file = "pydantic_core-2.27.2-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:d4041c0b966a84b4ae7a09832eb691a35aec90910cd2dbe7a208de59be77965b"}, + {file = "pydantic_core-2.27.2-pp39-pypy39_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:8083d4e875ebe0b864ffef72a4304827015cff328a1be6e22cc850753bfb122b"}, + {file = "pydantic_core-2.27.2-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:f141ee28a0ad2123b6611b6ceff018039df17f32ada8b534e6aa039545a3efb2"}, + {file = "pydantic_core-2.27.2-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:7d0c8399fcc1848491f00e0314bd59fb34a9c008761bcb422a057670c3f65e35"}, + {file = "pydantic_core-2.27.2.tar.gz", hash = "sha256:eb026e5a4c1fee05726072337ff51d1efb6f59090b7da90d30ea58625b1ffb39"}, ] [package.dependencies] @@ -909,20 +899,19 @@ files = [ ] [[package]] -name = "typing-inspect" -version = "0.9.0" -description = "Runtime inspection utilities for typing module." +name = "typing-inspection" +version = "0.4.0" +description = "Runtime typing introspection tools" optional = false -python-versions = "*" +python-versions = ">=3.9" groups = ["main"] files = [ - {file = "typing_inspect-0.9.0-py3-none-any.whl", hash = "sha256:9ee6fc59062311ef8547596ab6b955e1b8aa46242d854bfc78f4f6b0eff35f9f"}, - {file = "typing_inspect-0.9.0.tar.gz", hash = "sha256:b23fc42ff6f6ef6954e4852c1fb512cdd18dbea03134f91f856a95ccc9461f78"}, + {file = "typing_inspection-0.4.0-py3-none-any.whl", hash = "sha256:50e72559fcd2a6367a19f7a7e610e6afcb9fac940c650290eed893d61386832f"}, + {file = "typing_inspection-0.4.0.tar.gz", hash = "sha256:9765c87de36671694a67904bf2c96e395be9c6439bb6c87b5142569dcdd65122"}, ] [package.dependencies] -mypy-extensions = ">=0.3.0" -typing-extensions = ">=3.7.4" +typing-extensions = ">=4.12.0" [[package]] name = "urllib3" @@ -948,5 +937,5 @@ gcp = ["google-auth", "requests"] [metadata] lock-version = "2.1" -python-versions = ">=3.8" -content-hash = "f0f19d81d36ebe966895f21a0a9dd33118783904418f4103189c475e5903b958" +python-versions = ">=3.9" +content-hash = "c3917a9114ca2a0c01aedf207fa1b59cc259bb07c4d2914fe2ed9a4cb3e1785e" diff --git a/pylintrc b/pylintrc index 9d193c42..266bc815 100644 --- a/pylintrc +++ b/pylintrc @@ -455,7 +455,10 @@ disable=raw-checker-failed, bare-except, broad-exception-caught, fixme, - relative-beyond-top-level + relative-beyond-top-level, + consider-using-with, + wildcard-import, + unused-wildcard-import # Enable the message, report, category or checker with the given id(s). You can # either give multiple identifier separated by comma (,) or put this option diff --git a/pyproject.toml b/pyproject.toml index 5e7ddb8c..8edc7537 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "mistralai" -version = "1.5.1" +version = "1.5.2-rc.1" description = "Python Client SDK for the Mistral AI API." authors = [{ name = "Mistral" },] readme = "README-PYPI.md" diff --git a/scripts/prepare-readme.py b/scripts/prepare_readme.py similarity index 84% rename from scripts/prepare-readme.py rename to scripts/prepare_readme.py index 9111d6cb..16f6fc7e 100644 --- a/scripts/prepare-readme.py +++ b/scripts/prepare_readme.py @@ -4,7 +4,7 @@ import shutil try: - with open("README.md", "r") as rh: + with open("README.md", "r", encoding="utf-8") as rh: readme_contents = rh.read() GITHUB_URL = "https://github.com/mistralai/client-python.git" GITHUB_URL = ( @@ -21,13 +21,13 @@ readme_contents, ) - with open("README-PYPI.md", "w") as wh: + with open("README-PYPI.md", "w", encoding="utf-8") as wh: wh.write(readme_contents) except Exception as e: try: print("Failed to rewrite README.md to README-PYPI.md, copying original instead") print(e) shutil.copyfile("README.md", "README-PYPI.md") - except Exception as e: + except Exception as ie: print("Failed to copy README.md to README-PYPI.md") - print(e) + print(ie) diff --git a/scripts/publish.sh b/scripts/publish.sh index ab45b1f9..f2f2cf2c 100755 --- a/scripts/publish.sh +++ b/scripts/publish.sh @@ -2,6 +2,6 @@ export POETRY_PYPI_TOKEN_PYPI=${PYPI_TOKEN} -poetry run python scripts/prepare-readme.py +poetry run python scripts/prepare_readme.py poetry publish --build --skip-existing diff --git a/src/mistralai/_hooks/types.py b/src/mistralai/_hooks/types.py index fe448e94..ebc789ff 100644 --- a/src/mistralai/_hooks/types.py +++ b/src/mistralai/_hooks/types.py @@ -7,16 +7,19 @@ class HookContext: + base_url: str operation_id: str oauth2_scopes: Optional[List[str]] = None security_source: Optional[Union[Any, Callable[[], Any]]] = None def __init__( self, + base_url: str, operation_id: str, oauth2_scopes: Optional[List[str]], security_source: Optional[Union[Any, Callable[[], Any]]], ): + self.base_url = base_url self.operation_id = operation_id self.oauth2_scopes = oauth2_scopes self.security_source = security_source @@ -25,21 +28,30 @@ def __init__( class BeforeRequestContext(HookContext): def __init__(self, hook_ctx: HookContext): super().__init__( - hook_ctx.operation_id, hook_ctx.oauth2_scopes, hook_ctx.security_source + hook_ctx.base_url, + hook_ctx.operation_id, + hook_ctx.oauth2_scopes, + hook_ctx.security_source, ) class AfterSuccessContext(HookContext): def __init__(self, hook_ctx: HookContext): super().__init__( - hook_ctx.operation_id, hook_ctx.oauth2_scopes, hook_ctx.security_source + hook_ctx.base_url, + hook_ctx.operation_id, + hook_ctx.oauth2_scopes, + hook_ctx.security_source, ) class AfterErrorContext(HookContext): def __init__(self, hook_ctx: HookContext): super().__init__( - hook_ctx.operation_id, hook_ctx.oauth2_scopes, hook_ctx.security_source + hook_ctx.base_url, + hook_ctx.operation_id, + hook_ctx.oauth2_scopes, + hook_ctx.security_source, ) diff --git a/src/mistralai/_version.py b/src/mistralai/_version.py index 700c880e..6b24498d 100644 --- a/src/mistralai/_version.py +++ b/src/mistralai/_version.py @@ -3,10 +3,10 @@ import importlib.metadata __title__: str = "mistralai" -__version__: str = "1.5.1" +__version__: str = "1.5.2-rc.1" __openapi_doc_version__: str = "0.0.2" -__gen_version__: str = "2.497.0" -__user_agent__: str = "speakeasy-sdk/python 1.5.1 2.497.0 0.0.2 mistralai" +__gen_version__: str = "2.548.6" +__user_agent__: str = "speakeasy-sdk/python 1.5.2-rc.1 2.548.6 0.0.2 mistralai" try: if __package__ is not None: diff --git a/src/mistralai/agents.py b/src/mistralai/agents.py index 05fd165c..6d43b480 100644 --- a/src/mistralai/agents.py +++ b/src/mistralai/agents.py @@ -78,6 +78,8 @@ def complete( if server_url is not None: base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) request = models.AgentsCompletionRequest( max_tokens=max_tokens, @@ -132,6 +134,7 @@ def complete( http_res = self.do_request( hook_ctx=HookContext( + base_url=base_url or "", operation_id="agents_completion_v1_agents_completions_post", oauth2_scopes=[], security_source=get_security_from_env( @@ -143,12 +146,14 @@ def complete( retry_config=retry_config, ) - data: Any = None + response_data: Any = None if utils.match_response(http_res, "200", "application/json"): return utils.unmarshal_json(http_res.text, models.ChatCompletionResponse) if utils.match_response(http_res, "422", "application/json"): - data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) - raise models.HTTPValidationError(data=data) + response_data = utils.unmarshal_json( + http_res.text, models.HTTPValidationErrorData + ) + raise models.HTTPValidationError(data=response_data) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) raise models.SDKError( @@ -236,6 +241,8 @@ async def complete_async( if server_url is not None: base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) request = models.AgentsCompletionRequest( max_tokens=max_tokens, @@ -290,6 +297,7 @@ async def complete_async( http_res = await self.do_request_async( hook_ctx=HookContext( + base_url=base_url or "", operation_id="agents_completion_v1_agents_completions_post", oauth2_scopes=[], security_source=get_security_from_env( @@ -301,12 +309,14 @@ async def complete_async( retry_config=retry_config, ) - data: Any = None + response_data: Any = None if utils.match_response(http_res, "200", "application/json"): return utils.unmarshal_json(http_res.text, models.ChatCompletionResponse) if utils.match_response(http_res, "422", "application/json"): - data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) - raise models.HTTPValidationError(data=data) + response_data = utils.unmarshal_json( + http_res.text, models.HTTPValidationErrorData + ) + raise models.HTTPValidationError(data=response_data) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( @@ -396,6 +406,8 @@ def stream( if server_url is not None: base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) request = models.AgentsCompletionStreamRequest( max_tokens=max_tokens, @@ -450,6 +462,7 @@ def stream( http_res = self.do_request( hook_ctx=HookContext( + base_url=base_url or "", operation_id="stream_agents", oauth2_scopes=[], security_source=get_security_from_env( @@ -462,7 +475,7 @@ def stream( retry_config=retry_config, ) - data: Any = None + response_data: Any = None if utils.match_response(http_res, "200", "text/event-stream"): return eventstreaming.EventStream( http_res, @@ -471,8 +484,10 @@ def stream( ) if utils.match_response(http_res, "422", "application/json"): http_res_text = utils.stream_to_text(http_res) - data = utils.unmarshal_json(http_res_text, models.HTTPValidationErrorData) - raise models.HTTPValidationError(data=data) + response_data = utils.unmarshal_json( + http_res_text, models.HTTPValidationErrorData + ) + raise models.HTTPValidationError(data=response_data) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) raise models.SDKError( @@ -562,6 +577,8 @@ async def stream_async( if server_url is not None: base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) request = models.AgentsCompletionStreamRequest( max_tokens=max_tokens, @@ -616,6 +633,7 @@ async def stream_async( http_res = await self.do_request_async( hook_ctx=HookContext( + base_url=base_url or "", operation_id="stream_agents", oauth2_scopes=[], security_source=get_security_from_env( @@ -628,7 +646,7 @@ async def stream_async( retry_config=retry_config, ) - data: Any = None + response_data: Any = None if utils.match_response(http_res, "200", "text/event-stream"): return eventstreaming.EventStreamAsync( http_res, @@ -637,8 +655,10 @@ async def stream_async( ) if utils.match_response(http_res, "422", "application/json"): http_res_text = await utils.stream_to_text_async(http_res) - data = utils.unmarshal_json(http_res_text, models.HTTPValidationErrorData) - raise models.HTTPValidationError(data=data) + response_data = utils.unmarshal_json( + http_res_text, models.HTTPValidationErrorData + ) + raise models.HTTPValidationError(data=response_data) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( diff --git a/src/mistralai/basesdk.py b/src/mistralai/basesdk.py index cda8adda..512e3072 100644 --- a/src/mistralai/basesdk.py +++ b/src/mistralai/basesdk.py @@ -231,6 +231,10 @@ def do(): req.headers, get_body_content(req), ) + + if client is None: + raise ValueError("client is required") + http_res = client.send(req, stream=stream) except Exception as e: _, e = self.sdk_configuration.get_hooks().after_error( @@ -303,6 +307,10 @@ async def do(): req.headers, get_body_content(req), ) + + if client is None: + raise ValueError("client is required") + http_res = await client.send(req, stream=stream) except Exception as e: _, e = self.sdk_configuration.get_hooks().after_error( diff --git a/src/mistralai/chat.py b/src/mistralai/chat.py index 67646ffe..558796d8 100644 --- a/src/mistralai/chat.py +++ b/src/mistralai/chat.py @@ -158,6 +158,8 @@ def complete( if server_url is not None: base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) request = models.ChatCompletionRequest( model=model, @@ -213,6 +215,7 @@ def complete( http_res = self.do_request( hook_ctx=HookContext( + base_url=base_url or "", operation_id="chat_completion_v1_chat_completions_post", oauth2_scopes=[], security_source=get_security_from_env( @@ -224,12 +227,14 @@ def complete( retry_config=retry_config, ) - data: Any = None + response_data: Any = None if utils.match_response(http_res, "200", "application/json"): return utils.unmarshal_json(http_res.text, models.ChatCompletionResponse) if utils.match_response(http_res, "422", "application/json"): - data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) - raise models.HTTPValidationError(data=data) + response_data = utils.unmarshal_json( + http_res.text, models.HTTPValidationErrorData + ) + raise models.HTTPValidationError(data=response_data) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) raise models.SDKError( @@ -315,6 +320,8 @@ async def complete_async( if server_url is not None: base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) request = models.ChatCompletionRequest( model=model, @@ -370,6 +377,7 @@ async def complete_async( http_res = await self.do_request_async( hook_ctx=HookContext( + base_url=base_url or "", operation_id="chat_completion_v1_chat_completions_post", oauth2_scopes=[], security_source=get_security_from_env( @@ -381,12 +389,14 @@ async def complete_async( retry_config=retry_config, ) - data: Any = None + response_data: Any = None if utils.match_response(http_res, "200", "application/json"): return utils.unmarshal_json(http_res.text, models.ChatCompletionResponse) if utils.match_response(http_res, "422", "application/json"): - data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) - raise models.HTTPValidationError(data=data) + response_data = utils.unmarshal_json( + http_res.text, models.HTTPValidationErrorData + ) + raise models.HTTPValidationError(data=response_data) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( @@ -482,6 +492,8 @@ def stream( if server_url is not None: base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) request = models.ChatCompletionStreamRequest( model=model, @@ -539,6 +551,7 @@ def stream( http_res = self.do_request( hook_ctx=HookContext( + base_url=base_url or "", operation_id="stream_chat", oauth2_scopes=[], security_source=get_security_from_env( @@ -551,7 +564,7 @@ def stream( retry_config=retry_config, ) - data: Any = None + response_data: Any = None if utils.match_response(http_res, "200", "text/event-stream"): return eventstreaming.EventStream( http_res, @@ -560,8 +573,10 @@ def stream( ) if utils.match_response(http_res, "422", "application/json"): http_res_text = utils.stream_to_text(http_res) - data = utils.unmarshal_json(http_res_text, models.HTTPValidationErrorData) - raise models.HTTPValidationError(data=data) + response_data = utils.unmarshal_json( + http_res_text, models.HTTPValidationErrorData + ) + raise models.HTTPValidationError(data=response_data) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) raise models.SDKError( @@ -657,6 +672,8 @@ async def stream_async( if server_url is not None: base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) request = models.ChatCompletionStreamRequest( model=model, @@ -714,6 +731,7 @@ async def stream_async( http_res = await self.do_request_async( hook_ctx=HookContext( + base_url=base_url or "", operation_id="stream_chat", oauth2_scopes=[], security_source=get_security_from_env( @@ -726,7 +744,7 @@ async def stream_async( retry_config=retry_config, ) - data: Any = None + response_data: Any = None if utils.match_response(http_res, "200", "text/event-stream"): return eventstreaming.EventStreamAsync( http_res, @@ -735,8 +753,10 @@ async def stream_async( ) if utils.match_response(http_res, "422", "application/json"): http_res_text = await utils.stream_to_text_async(http_res) - data = utils.unmarshal_json(http_res_text, models.HTTPValidationErrorData) - raise models.HTTPValidationError(data=data) + response_data = utils.unmarshal_json( + http_res_text, models.HTTPValidationErrorData + ) + raise models.HTTPValidationError(data=response_data) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( diff --git a/src/mistralai/classifiers.py b/src/mistralai/classifiers.py index 6ff1d6a8..7191df0c 100644 --- a/src/mistralai/classifiers.py +++ b/src/mistralai/classifiers.py @@ -40,6 +40,8 @@ def moderate( if server_url is not None: base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) request = models.ClassificationRequest( model=model, @@ -75,6 +77,7 @@ def moderate( http_res = self.do_request( hook_ctx=HookContext( + base_url=base_url or "", operation_id="moderations_v1_moderations_post", oauth2_scopes=[], security_source=get_security_from_env( @@ -86,12 +89,14 @@ def moderate( retry_config=retry_config, ) - data: Any = None + response_data: Any = None if utils.match_response(http_res, "200", "application/json"): return utils.unmarshal_json(http_res.text, models.ClassificationResponse) if utils.match_response(http_res, "422", "application/json"): - data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) - raise models.HTTPValidationError(data=data) + response_data = utils.unmarshal_json( + http_res.text, models.HTTPValidationErrorData + ) + raise models.HTTPValidationError(data=response_data) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) raise models.SDKError( @@ -141,6 +146,8 @@ async def moderate_async( if server_url is not None: base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) request = models.ClassificationRequest( model=model, @@ -176,6 +183,7 @@ async def moderate_async( http_res = await self.do_request_async( hook_ctx=HookContext( + base_url=base_url or "", operation_id="moderations_v1_moderations_post", oauth2_scopes=[], security_source=get_security_from_env( @@ -187,12 +195,14 @@ async def moderate_async( retry_config=retry_config, ) - data: Any = None + response_data: Any = None if utils.match_response(http_res, "200", "application/json"): return utils.unmarshal_json(http_res.text, models.ClassificationResponse) if utils.match_response(http_res, "422", "application/json"): - data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) - raise models.HTTPValidationError(data=data) + response_data = utils.unmarshal_json( + http_res.text, models.HTTPValidationErrorData + ) + raise models.HTTPValidationError(data=response_data) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( @@ -244,6 +254,8 @@ def moderate_chat( if server_url is not None: base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) request = models.ChatModerationRequest( model=model, @@ -280,6 +292,7 @@ def moderate_chat( http_res = self.do_request( hook_ctx=HookContext( + base_url=base_url or "", operation_id="moderations_chat_v1_chat_moderations_post", oauth2_scopes=[], security_source=get_security_from_env( @@ -291,12 +304,14 @@ def moderate_chat( retry_config=retry_config, ) - data: Any = None + response_data: Any = None if utils.match_response(http_res, "200", "application/json"): return utils.unmarshal_json(http_res.text, models.ClassificationResponse) if utils.match_response(http_res, "422", "application/json"): - data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) - raise models.HTTPValidationError(data=data) + response_data = utils.unmarshal_json( + http_res.text, models.HTTPValidationErrorData + ) + raise models.HTTPValidationError(data=response_data) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) raise models.SDKError( @@ -348,6 +363,8 @@ async def moderate_chat_async( if server_url is not None: base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) request = models.ChatModerationRequest( model=model, @@ -384,6 +401,7 @@ async def moderate_chat_async( http_res = await self.do_request_async( hook_ctx=HookContext( + base_url=base_url or "", operation_id="moderations_chat_v1_chat_moderations_post", oauth2_scopes=[], security_source=get_security_from_env( @@ -395,12 +413,14 @@ async def moderate_chat_async( retry_config=retry_config, ) - data: Any = None + response_data: Any = None if utils.match_response(http_res, "200", "application/json"): return utils.unmarshal_json(http_res.text, models.ClassificationResponse) if utils.match_response(http_res, "422", "application/json"): - data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) - raise models.HTTPValidationError(data=data) + response_data = utils.unmarshal_json( + http_res.text, models.HTTPValidationErrorData + ) + raise models.HTTPValidationError(data=response_data) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( diff --git a/src/mistralai/embeddings.py b/src/mistralai/embeddings.py index f6f558b8..b99ff0cf 100644 --- a/src/mistralai/embeddings.py +++ b/src/mistralai/embeddings.py @@ -14,8 +14,8 @@ class Embeddings(BaseSDK): def create( self, *, + model: str, inputs: Union[models.Inputs, models.InputsTypedDict], - model: Optional[str] = "mistral-embed", retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -25,8 +25,8 @@ def create( Embeddings - :param inputs: Text to embed. :param model: ID of the model to use. + :param inputs: Text to embed. :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -39,6 +39,8 @@ def create( if server_url is not None: base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) request = models.EmbeddingRequest( model=model, @@ -74,6 +76,7 @@ def create( http_res = self.do_request( hook_ctx=HookContext( + base_url=base_url or "", operation_id="embeddings_v1_embeddings_post", oauth2_scopes=[], security_source=get_security_from_env( @@ -85,12 +88,14 @@ def create( retry_config=retry_config, ) - data: Any = None + response_data: Any = None if utils.match_response(http_res, "200", "application/json"): return utils.unmarshal_json(http_res.text, models.EmbeddingResponse) if utils.match_response(http_res, "422", "application/json"): - data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) - raise models.HTTPValidationError(data=data) + response_data = utils.unmarshal_json( + http_res.text, models.HTTPValidationErrorData + ) + raise models.HTTPValidationError(data=response_data) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) raise models.SDKError( @@ -114,8 +119,8 @@ def create( async def create_async( self, *, + model: str, inputs: Union[models.Inputs, models.InputsTypedDict], - model: Optional[str] = "mistral-embed", retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -125,8 +130,8 @@ async def create_async( Embeddings - :param inputs: Text to embed. :param model: ID of the model to use. + :param inputs: Text to embed. :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -139,6 +144,8 @@ async def create_async( if server_url is not None: base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) request = models.EmbeddingRequest( model=model, @@ -174,6 +181,7 @@ async def create_async( http_res = await self.do_request_async( hook_ctx=HookContext( + base_url=base_url or "", operation_id="embeddings_v1_embeddings_post", oauth2_scopes=[], security_source=get_security_from_env( @@ -185,12 +193,14 @@ async def create_async( retry_config=retry_config, ) - data: Any = None + response_data: Any = None if utils.match_response(http_res, "200", "application/json"): return utils.unmarshal_json(http_res.text, models.EmbeddingResponse) if utils.match_response(http_res, "422", "application/json"): - data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) - raise models.HTTPValidationError(data=data) + response_data = utils.unmarshal_json( + http_res.text, models.HTTPValidationErrorData + ) + raise models.HTTPValidationError(data=response_data) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( diff --git a/src/mistralai/files.py b/src/mistralai/files.py index 042e4aea..0ffc4857 100644 --- a/src/mistralai/files.py +++ b/src/mistralai/files.py @@ -44,6 +44,8 @@ def upload( if server_url is not None: base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) request = models.FilesAPIRoutesUploadFileMultiPartBodyParams( file=utils.get_pydantic_model(file, models.File), @@ -83,6 +85,7 @@ def upload( http_res = self.do_request( hook_ctx=HookContext( + base_url=base_url or "", operation_id="files_api_routes_upload_file", oauth2_scopes=[], security_source=get_security_from_env( @@ -148,6 +151,8 @@ async def upload_async( if server_url is not None: base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) request = models.FilesAPIRoutesUploadFileMultiPartBodyParams( file=utils.get_pydantic_model(file, models.File), @@ -187,6 +192,7 @@ async def upload_async( http_res = await self.do_request_async( hook_ctx=HookContext( + base_url=base_url or "", operation_id="files_api_routes_upload_file", oauth2_scopes=[], security_source=get_security_from_env( @@ -256,6 +262,8 @@ def list( if server_url is not None: base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) request = models.FilesAPIRoutesListFilesRequest( page=page, @@ -292,6 +300,7 @@ def list( http_res = self.do_request( hook_ctx=HookContext( + base_url=base_url or "", operation_id="files_api_routes_list_files", oauth2_scopes=[], security_source=get_security_from_env( @@ -361,6 +370,8 @@ async def list_async( if server_url is not None: base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) request = models.FilesAPIRoutesListFilesRequest( page=page, @@ -397,6 +408,7 @@ async def list_async( http_res = await self.do_request_async( hook_ctx=HookContext( + base_url=base_url or "", operation_id="files_api_routes_list_files", oauth2_scopes=[], security_source=get_security_from_env( @@ -456,6 +468,8 @@ def retrieve( if server_url is not None: base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) request = models.FilesAPIRoutesRetrieveFileRequest( file_id=file_id, @@ -487,6 +501,7 @@ def retrieve( http_res = self.do_request( hook_ctx=HookContext( + base_url=base_url or "", operation_id="files_api_routes_retrieve_file", oauth2_scopes=[], security_source=get_security_from_env( @@ -546,6 +561,8 @@ async def retrieve_async( if server_url is not None: base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) request = models.FilesAPIRoutesRetrieveFileRequest( file_id=file_id, @@ -577,6 +594,7 @@ async def retrieve_async( http_res = await self.do_request_async( hook_ctx=HookContext( + base_url=base_url or "", operation_id="files_api_routes_retrieve_file", oauth2_scopes=[], security_source=get_security_from_env( @@ -636,6 +654,8 @@ def delete( if server_url is not None: base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) request = models.FilesAPIRoutesDeleteFileRequest( file_id=file_id, @@ -667,6 +687,7 @@ def delete( http_res = self.do_request( hook_ctx=HookContext( + base_url=base_url or "", operation_id="files_api_routes_delete_file", oauth2_scopes=[], security_source=get_security_from_env( @@ -726,6 +747,8 @@ async def delete_async( if server_url is not None: base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) request = models.FilesAPIRoutesDeleteFileRequest( file_id=file_id, @@ -757,6 +780,7 @@ async def delete_async( http_res = await self.do_request_async( hook_ctx=HookContext( + base_url=base_url or "", operation_id="files_api_routes_delete_file", oauth2_scopes=[], security_source=get_security_from_env( @@ -816,6 +840,8 @@ def download( if server_url is not None: base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) request = models.FilesAPIRoutesDownloadFileRequest( file_id=file_id, @@ -847,6 +873,7 @@ def download( http_res = self.do_request( hook_ctx=HookContext( + base_url=base_url or "", operation_id="files_api_routes_download_file", oauth2_scopes=[], security_source=get_security_from_env( @@ -907,6 +934,8 @@ async def download_async( if server_url is not None: base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) request = models.FilesAPIRoutesDownloadFileRequest( file_id=file_id, @@ -938,6 +967,7 @@ async def download_async( http_res = await self.do_request_async( hook_ctx=HookContext( + base_url=base_url or "", operation_id="files_api_routes_download_file", oauth2_scopes=[], security_source=get_security_from_env( @@ -998,6 +1028,8 @@ def get_signed_url( if server_url is not None: base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) request = models.FilesAPIRoutesGetSignedURLRequest( file_id=file_id, @@ -1030,6 +1062,7 @@ def get_signed_url( http_res = self.do_request( hook_ctx=HookContext( + base_url=base_url or "", operation_id="files_api_routes_get_signed_url", oauth2_scopes=[], security_source=get_security_from_env( @@ -1089,6 +1122,8 @@ async def get_signed_url_async( if server_url is not None: base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) request = models.FilesAPIRoutesGetSignedURLRequest( file_id=file_id, @@ -1121,6 +1156,7 @@ async def get_signed_url_async( http_res = await self.do_request_async( hook_ctx=HookContext( + base_url=base_url or "", operation_id="files_api_routes_get_signed_url", oauth2_scopes=[], security_source=get_security_from_env( diff --git a/src/mistralai/fim.py b/src/mistralai/fim.py index c11f6c99..032c722f 100644 --- a/src/mistralai/fim.py +++ b/src/mistralai/fim.py @@ -60,6 +60,8 @@ def complete( if server_url is not None: base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) request = models.FIMCompletionRequest( model=model, @@ -103,6 +105,7 @@ def complete( http_res = self.do_request( hook_ctx=HookContext( + base_url=base_url or "", operation_id="fim_completion_v1_fim_completions_post", oauth2_scopes=[], security_source=get_security_from_env( @@ -114,12 +117,14 @@ def complete( retry_config=retry_config, ) - data: Any = None + response_data: Any = None if utils.match_response(http_res, "200", "application/json"): return utils.unmarshal_json(http_res.text, models.FIMCompletionResponse) if utils.match_response(http_res, "422", "application/json"): - data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) - raise models.HTTPValidationError(data=data) + response_data = utils.unmarshal_json( + http_res.text, models.HTTPValidationErrorData + ) + raise models.HTTPValidationError(data=response_data) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) raise models.SDKError( @@ -189,6 +194,8 @@ async def complete_async( if server_url is not None: base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) request = models.FIMCompletionRequest( model=model, @@ -232,6 +239,7 @@ async def complete_async( http_res = await self.do_request_async( hook_ctx=HookContext( + base_url=base_url or "", operation_id="fim_completion_v1_fim_completions_post", oauth2_scopes=[], security_source=get_security_from_env( @@ -243,12 +251,14 @@ async def complete_async( retry_config=retry_config, ) - data: Any = None + response_data: Any = None if utils.match_response(http_res, "200", "application/json"): return utils.unmarshal_json(http_res.text, models.FIMCompletionResponse) if utils.match_response(http_res, "422", "application/json"): - data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) - raise models.HTTPValidationError(data=data) + response_data = utils.unmarshal_json( + http_res.text, models.HTTPValidationErrorData + ) + raise models.HTTPValidationError(data=response_data) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( @@ -318,6 +328,8 @@ def stream( if server_url is not None: base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) request = models.FIMCompletionStreamRequest( model=model, @@ -361,6 +373,7 @@ def stream( http_res = self.do_request( hook_ctx=HookContext( + base_url=base_url or "", operation_id="stream_fim", oauth2_scopes=[], security_source=get_security_from_env( @@ -373,7 +386,7 @@ def stream( retry_config=retry_config, ) - data: Any = None + response_data: Any = None if utils.match_response(http_res, "200", "text/event-stream"): return eventstreaming.EventStream( http_res, @@ -382,8 +395,10 @@ def stream( ) if utils.match_response(http_res, "422", "application/json"): http_res_text = utils.stream_to_text(http_res) - data = utils.unmarshal_json(http_res_text, models.HTTPValidationErrorData) - raise models.HTTPValidationError(data=data) + response_data = utils.unmarshal_json( + http_res_text, models.HTTPValidationErrorData + ) + raise models.HTTPValidationError(data=response_data) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) raise models.SDKError( @@ -453,6 +468,8 @@ async def stream_async( if server_url is not None: base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) request = models.FIMCompletionStreamRequest( model=model, @@ -496,6 +513,7 @@ async def stream_async( http_res = await self.do_request_async( hook_ctx=HookContext( + base_url=base_url or "", operation_id="stream_fim", oauth2_scopes=[], security_source=get_security_from_env( @@ -508,7 +526,7 @@ async def stream_async( retry_config=retry_config, ) - data: Any = None + response_data: Any = None if utils.match_response(http_res, "200", "text/event-stream"): return eventstreaming.EventStreamAsync( http_res, @@ -517,8 +535,10 @@ async def stream_async( ) if utils.match_response(http_res, "422", "application/json"): http_res_text = await utils.stream_to_text_async(http_res) - data = utils.unmarshal_json(http_res_text, models.HTTPValidationErrorData) - raise models.HTTPValidationError(data=data) + response_data = utils.unmarshal_json( + http_res_text, models.HTTPValidationErrorData + ) + raise models.HTTPValidationError(data=response_data) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( diff --git a/src/mistralai/httpclient.py b/src/mistralai/httpclient.py index 9dc43cb0..1e426352 100644 --- a/src/mistralai/httpclient.py +++ b/src/mistralai/httpclient.py @@ -94,7 +94,9 @@ class ClientOwner(Protocol): def close_clients( owner: ClientOwner, sync_client: Union[HttpClient, None], + sync_client_supplied: bool, async_client: Union[AsyncHttpClient, None], + async_client_supplied: bool, ) -> None: """ A finalizer function that is meant to be used with weakref.finalize to close @@ -107,13 +109,13 @@ def close_clients( owner.client = None owner.async_client = None - if sync_client is not None: + if sync_client is not None and not sync_client_supplied: try: sync_client.close() except Exception: pass - if async_client is not None: + if async_client is not None and not async_client_supplied: is_async = False try: asyncio.get_running_loop() diff --git a/src/mistralai/jobs.py b/src/mistralai/jobs.py index ea66bfc6..675ece0b 100644 --- a/src/mistralai/jobs.py +++ b/src/mistralai/jobs.py @@ -52,6 +52,8 @@ def list( if server_url is not None: base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) request = models.JobsAPIRoutesFineTuningGetFineTuningJobsRequest( page=page, @@ -91,6 +93,7 @@ def list( http_res = self.do_request( hook_ctx=HookContext( + base_url=base_url or "", operation_id="jobs_api_routes_fine_tuning_get_fine_tuning_jobs", oauth2_scopes=[], security_source=get_security_from_env( @@ -166,6 +169,8 @@ async def list_async( if server_url is not None: base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) request = models.JobsAPIRoutesFineTuningGetFineTuningJobsRequest( page=page, @@ -205,6 +210,7 @@ async def list_async( http_res = await self.do_request_async( hook_ctx=HookContext( + base_url=base_url or "", operation_id="jobs_api_routes_fine_tuning_get_fine_tuning_jobs", oauth2_scopes=[], security_source=get_security_from_env( @@ -290,6 +296,8 @@ def create( if server_url is not None: base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) request = models.JobIn( model=model, @@ -339,6 +347,7 @@ def create( http_res = self.do_request( hook_ctx=HookContext( + base_url=base_url or "", operation_id="jobs_api_routes_fine_tuning_create_fine_tuning_job", oauth2_scopes=[], security_source=get_security_from_env( @@ -426,6 +435,8 @@ async def create_async( if server_url is not None: base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) request = models.JobIn( model=model, @@ -475,6 +486,7 @@ async def create_async( http_res = await self.do_request_async( hook_ctx=HookContext( + base_url=base_url or "", operation_id="jobs_api_routes_fine_tuning_create_fine_tuning_job", oauth2_scopes=[], security_source=get_security_from_env( @@ -536,6 +548,8 @@ def get( if server_url is not None: base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) request = models.JobsAPIRoutesFineTuningGetFineTuningJobRequest( job_id=job_id, @@ -567,6 +581,7 @@ def get( http_res = self.do_request( hook_ctx=HookContext( + base_url=base_url or "", operation_id="jobs_api_routes_fine_tuning_get_fine_tuning_job", oauth2_scopes=[], security_source=get_security_from_env( @@ -626,6 +641,8 @@ async def get_async( if server_url is not None: base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) request = models.JobsAPIRoutesFineTuningGetFineTuningJobRequest( job_id=job_id, @@ -657,6 +674,7 @@ async def get_async( http_res = await self.do_request_async( hook_ctx=HookContext( + base_url=base_url or "", operation_id="jobs_api_routes_fine_tuning_get_fine_tuning_job", oauth2_scopes=[], security_source=get_security_from_env( @@ -716,6 +734,8 @@ def cancel( if server_url is not None: base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) request = models.JobsAPIRoutesFineTuningCancelFineTuningJobRequest( job_id=job_id, @@ -747,6 +767,7 @@ def cancel( http_res = self.do_request( hook_ctx=HookContext( + base_url=base_url or "", operation_id="jobs_api_routes_fine_tuning_cancel_fine_tuning_job", oauth2_scopes=[], security_source=get_security_from_env( @@ -806,6 +827,8 @@ async def cancel_async( if server_url is not None: base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) request = models.JobsAPIRoutesFineTuningCancelFineTuningJobRequest( job_id=job_id, @@ -837,6 +860,7 @@ async def cancel_async( http_res = await self.do_request_async( hook_ctx=HookContext( + base_url=base_url or "", operation_id="jobs_api_routes_fine_tuning_cancel_fine_tuning_job", oauth2_scopes=[], security_source=get_security_from_env( @@ -896,6 +920,8 @@ def start( if server_url is not None: base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) request = models.JobsAPIRoutesFineTuningStartFineTuningJobRequest( job_id=job_id, @@ -927,6 +953,7 @@ def start( http_res = self.do_request( hook_ctx=HookContext( + base_url=base_url or "", operation_id="jobs_api_routes_fine_tuning_start_fine_tuning_job", oauth2_scopes=[], security_source=get_security_from_env( @@ -986,6 +1013,8 @@ async def start_async( if server_url is not None: base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) request = models.JobsAPIRoutesFineTuningStartFineTuningJobRequest( job_id=job_id, @@ -1017,6 +1046,7 @@ async def start_async( http_res = await self.do_request_async( hook_ctx=HookContext( + base_url=base_url or "", operation_id="jobs_api_routes_fine_tuning_start_fine_tuning_job", oauth2_scopes=[], security_source=get_security_from_env( diff --git a/src/mistralai/mistral_jobs.py b/src/mistralai/mistral_jobs.py index fe6b266a..e0d3c616 100644 --- a/src/mistralai/mistral_jobs.py +++ b/src/mistralai/mistral_jobs.py @@ -48,6 +48,8 @@ def list( if server_url is not None: base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) request = models.JobsAPIRoutesBatchGetBatchJobsRequest( page=page, @@ -85,6 +87,7 @@ def list( http_res = self.do_request( hook_ctx=HookContext( + base_url=base_url or "", operation_id="jobs_api_routes_batch_get_batch_jobs", oauth2_scopes=[], security_source=get_security_from_env( @@ -156,6 +159,8 @@ async def list_async( if server_url is not None: base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) request = models.JobsAPIRoutesBatchGetBatchJobsRequest( page=page, @@ -193,6 +198,7 @@ async def list_async( http_res = await self.do_request_async( hook_ctx=HookContext( + base_url=base_url or "", operation_id="jobs_api_routes_batch_get_batch_jobs", oauth2_scopes=[], security_source=get_security_from_env( @@ -260,6 +266,8 @@ def create( if server_url is not None: base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) request = models.BatchJobIn( input_files=input_files, @@ -298,6 +306,7 @@ def create( http_res = self.do_request( hook_ctx=HookContext( + base_url=base_url or "", operation_id="jobs_api_routes_batch_create_batch_job", oauth2_scopes=[], security_source=get_security_from_env( @@ -365,6 +374,8 @@ async def create_async( if server_url is not None: base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) request = models.BatchJobIn( input_files=input_files, @@ -403,6 +414,7 @@ async def create_async( http_res = await self.do_request_async( hook_ctx=HookContext( + base_url=base_url or "", operation_id="jobs_api_routes_batch_create_batch_job", oauth2_scopes=[], security_source=get_security_from_env( @@ -462,6 +474,8 @@ def get( if server_url is not None: base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) request = models.JobsAPIRoutesBatchGetBatchJobRequest( job_id=job_id, @@ -493,6 +507,7 @@ def get( http_res = self.do_request( hook_ctx=HookContext( + base_url=base_url or "", operation_id="jobs_api_routes_batch_get_batch_job", oauth2_scopes=[], security_source=get_security_from_env( @@ -552,6 +567,8 @@ async def get_async( if server_url is not None: base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) request = models.JobsAPIRoutesBatchGetBatchJobRequest( job_id=job_id, @@ -583,6 +600,7 @@ async def get_async( http_res = await self.do_request_async( hook_ctx=HookContext( + base_url=base_url or "", operation_id="jobs_api_routes_batch_get_batch_job", oauth2_scopes=[], security_source=get_security_from_env( @@ -642,6 +660,8 @@ def cancel( if server_url is not None: base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) request = models.JobsAPIRoutesBatchCancelBatchJobRequest( job_id=job_id, @@ -673,6 +693,7 @@ def cancel( http_res = self.do_request( hook_ctx=HookContext( + base_url=base_url or "", operation_id="jobs_api_routes_batch_cancel_batch_job", oauth2_scopes=[], security_source=get_security_from_env( @@ -732,6 +753,8 @@ async def cancel_async( if server_url is not None: base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) request = models.JobsAPIRoutesBatchCancelBatchJobRequest( job_id=job_id, @@ -763,6 +786,7 @@ async def cancel_async( http_res = await self.do_request_async( hook_ctx=HookContext( + base_url=base_url or "", operation_id="jobs_api_routes_batch_cancel_batch_job", oauth2_scopes=[], security_source=get_security_from_env( diff --git a/src/mistralai/models/__init__.py b/src/mistralai/models/__init__.py index 197f6e1f..0750906a 100644 --- a/src/mistralai/models/__init__.py +++ b/src/mistralai/models/__init__.py @@ -115,7 +115,11 @@ DetailedJobOutStatus, DetailedJobOutTypedDict, ) -from .documenturlchunk import DocumentURLChunk, DocumentURLChunkTypedDict +from .documenturlchunk import ( + DocumentURLChunk, + DocumentURLChunkType, + DocumentURLChunkTypedDict, +) from .embeddingrequest import ( EmbeddingRequest, EmbeddingRequestTypedDict, @@ -455,6 +459,7 @@ "Document", "DocumentTypedDict", "DocumentURLChunk", + "DocumentURLChunkType", "DocumentURLChunkTypedDict", "EmbeddingRequest", "EmbeddingRequestTypedDict", diff --git a/src/mistralai/models/documenturlchunk.py b/src/mistralai/models/documenturlchunk.py index 23622335..29945102 100644 --- a/src/mistralai/models/documenturlchunk.py +++ b/src/mistralai/models/documenturlchunk.py @@ -2,38 +2,32 @@ from __future__ import annotations from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from mistralai.utils import validate_const -import pydantic from pydantic import model_serializer -from pydantic.functional_validators import AfterValidator from typing import Literal, Optional -from typing_extensions import Annotated, NotRequired, TypedDict +from typing_extensions import NotRequired, TypedDict + + +DocumentURLChunkType = Literal["document_url"] class DocumentURLChunkTypedDict(TypedDict): document_url: str - type: Literal["document_url"] document_name: NotRequired[Nullable[str]] r"""The filename of the document""" + type: NotRequired[DocumentURLChunkType] class DocumentURLChunk(BaseModel): document_url: str - TYPE: Annotated[ - Annotated[ - Optional[Literal["document_url"]], - AfterValidator(validate_const("document_url")), - ], - pydantic.Field(alias="type"), - ] = "document_url" - document_name: OptionalNullable[str] = UNSET r"""The filename of the document""" + type: Optional[DocumentURLChunkType] = "document_url" + @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = ["type", "document_name"] + optional_fields = ["document_name", "type"] nullable_fields = ["document_name"] null_default_fields = [] diff --git a/src/mistralai/models/embeddingrequest.py b/src/mistralai/models/embeddingrequest.py index b5ccd26e..5c37fd48 100644 --- a/src/mistralai/models/embeddingrequest.py +++ b/src/mistralai/models/embeddingrequest.py @@ -3,8 +3,8 @@ from __future__ import annotations from mistralai.types import BaseModel import pydantic -from typing import List, Optional, Union -from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict +from typing import List, Union +from typing_extensions import Annotated, TypeAliasType, TypedDict InputsTypedDict = TypeAliasType("InputsTypedDict", Union[str, List[str]]) @@ -16,15 +16,15 @@ class EmbeddingRequestTypedDict(TypedDict): + model: str + r"""ID of the model to use.""" inputs: InputsTypedDict r"""Text to embed.""" - model: NotRequired[str] - r"""ID of the model to use.""" class EmbeddingRequest(BaseModel): + model: str + r"""ID of the model to use.""" + inputs: Annotated[Inputs, pydantic.Field(alias="input")] r"""Text to embed.""" - - model: Optional[str] = "mistral-embed" - r"""ID of the model to use.""" diff --git a/src/mistralai/models/filepurpose.py b/src/mistralai/models/filepurpose.py index 8628b308..8599192b 100644 --- a/src/mistralai/models/filepurpose.py +++ b/src/mistralai/models/filepurpose.py @@ -5,4 +5,4 @@ from typing import Literal, Union -FilePurpose = Union[Literal["fine-tune", "batch"], UnrecognizedStr] +FilePurpose = Union[Literal["fine-tune", "batch", "ocr"], UnrecognizedStr] diff --git a/src/mistralai/models_.py b/src/mistralai/models_.py index ec45eb36..0b04694d 100644 --- a/src/mistralai/models_.py +++ b/src/mistralai/models_.py @@ -35,6 +35,8 @@ def list( if server_url is not None: base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) req = self._build_request( method="GET", path="/v1/models", @@ -61,6 +63,7 @@ def list( http_res = self.do_request( hook_ctx=HookContext( + base_url=base_url or "", operation_id="list_models_v1_models_get", oauth2_scopes=[], security_source=get_security_from_env( @@ -72,12 +75,14 @@ def list( retry_config=retry_config, ) - data: Any = None + response_data: Any = None if utils.match_response(http_res, "200", "application/json"): return utils.unmarshal_json(http_res.text, models.ModelList) if utils.match_response(http_res, "422", "application/json"): - data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) - raise models.HTTPValidationError(data=data) + response_data = utils.unmarshal_json( + http_res.text, models.HTTPValidationErrorData + ) + raise models.HTTPValidationError(data=response_data) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) raise models.SDKError( @@ -122,6 +127,8 @@ async def list_async( if server_url is not None: base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) req = self._build_request_async( method="GET", path="/v1/models", @@ -148,6 +155,7 @@ async def list_async( http_res = await self.do_request_async( hook_ctx=HookContext( + base_url=base_url or "", operation_id="list_models_v1_models_get", oauth2_scopes=[], security_source=get_security_from_env( @@ -159,12 +167,14 @@ async def list_async( retry_config=retry_config, ) - data: Any = None + response_data: Any = None if utils.match_response(http_res, "200", "application/json"): return utils.unmarshal_json(http_res.text, models.ModelList) if utils.match_response(http_res, "422", "application/json"): - data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) - raise models.HTTPValidationError(data=data) + response_data = utils.unmarshal_json( + http_res.text, models.HTTPValidationErrorData + ) + raise models.HTTPValidationError(data=response_data) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( @@ -211,6 +221,8 @@ def retrieve( if server_url is not None: base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) request = models.RetrieveModelV1ModelsModelIDGetRequest( model_id=model_id, @@ -242,6 +254,7 @@ def retrieve( http_res = self.do_request( hook_ctx=HookContext( + base_url=base_url or "", operation_id="retrieve_model_v1_models__model_id__get", oauth2_scopes=[], security_source=get_security_from_env( @@ -253,15 +266,17 @@ def retrieve( retry_config=retry_config, ) - data: Any = None + response_data: Any = None if utils.match_response(http_res, "200", "application/json"): return utils.unmarshal_json( http_res.text, models.RetrieveModelV1ModelsModelIDGetResponseRetrieveModelV1ModelsModelIDGet, ) if utils.match_response(http_res, "422", "application/json"): - data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) - raise models.HTTPValidationError(data=data) + response_data = utils.unmarshal_json( + http_res.text, models.HTTPValidationErrorData + ) + raise models.HTTPValidationError(data=response_data) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) raise models.SDKError( @@ -308,6 +323,8 @@ async def retrieve_async( if server_url is not None: base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) request = models.RetrieveModelV1ModelsModelIDGetRequest( model_id=model_id, @@ -339,6 +356,7 @@ async def retrieve_async( http_res = await self.do_request_async( hook_ctx=HookContext( + base_url=base_url or "", operation_id="retrieve_model_v1_models__model_id__get", oauth2_scopes=[], security_source=get_security_from_env( @@ -350,15 +368,17 @@ async def retrieve_async( retry_config=retry_config, ) - data: Any = None + response_data: Any = None if utils.match_response(http_res, "200", "application/json"): return utils.unmarshal_json( http_res.text, models.RetrieveModelV1ModelsModelIDGetResponseRetrieveModelV1ModelsModelIDGet, ) if utils.match_response(http_res, "422", "application/json"): - data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) - raise models.HTTPValidationError(data=data) + response_data = utils.unmarshal_json( + http_res.text, models.HTTPValidationErrorData + ) + raise models.HTTPValidationError(data=response_data) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( @@ -405,6 +425,8 @@ def delete( if server_url is not None: base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) request = models.DeleteModelV1ModelsModelIDDeleteRequest( model_id=model_id, @@ -436,6 +458,7 @@ def delete( http_res = self.do_request( hook_ctx=HookContext( + base_url=base_url or "", operation_id="delete_model_v1_models__model_id__delete", oauth2_scopes=[], security_source=get_security_from_env( @@ -447,12 +470,14 @@ def delete( retry_config=retry_config, ) - data: Any = None + response_data: Any = None if utils.match_response(http_res, "200", "application/json"): return utils.unmarshal_json(http_res.text, models.DeleteModelOut) if utils.match_response(http_res, "422", "application/json"): - data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) - raise models.HTTPValidationError(data=data) + response_data = utils.unmarshal_json( + http_res.text, models.HTTPValidationErrorData + ) + raise models.HTTPValidationError(data=response_data) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) raise models.SDKError( @@ -499,6 +524,8 @@ async def delete_async( if server_url is not None: base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) request = models.DeleteModelV1ModelsModelIDDeleteRequest( model_id=model_id, @@ -530,6 +557,7 @@ async def delete_async( http_res = await self.do_request_async( hook_ctx=HookContext( + base_url=base_url or "", operation_id="delete_model_v1_models__model_id__delete", oauth2_scopes=[], security_source=get_security_from_env( @@ -541,12 +569,14 @@ async def delete_async( retry_config=retry_config, ) - data: Any = None + response_data: Any = None if utils.match_response(http_res, "200", "application/json"): return utils.unmarshal_json(http_res.text, models.DeleteModelOut) if utils.match_response(http_res, "422", "application/json"): - data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) - raise models.HTTPValidationError(data=data) + response_data = utils.unmarshal_json( + http_res.text, models.HTTPValidationErrorData + ) + raise models.HTTPValidationError(data=response_data) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( @@ -597,6 +627,8 @@ def update( if server_url is not None: base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) request = models.JobsAPIRoutesFineTuningUpdateFineTunedModelRequest( model_id=model_id, @@ -635,6 +667,7 @@ def update( http_res = self.do_request( hook_ctx=HookContext( + base_url=base_url or "", operation_id="jobs_api_routes_fine_tuning_update_fine_tuned_model", oauth2_scopes=[], security_source=get_security_from_env( @@ -698,6 +731,8 @@ async def update_async( if server_url is not None: base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) request = models.JobsAPIRoutesFineTuningUpdateFineTunedModelRequest( model_id=model_id, @@ -736,6 +771,7 @@ async def update_async( http_res = await self.do_request_async( hook_ctx=HookContext( + base_url=base_url or "", operation_id="jobs_api_routes_fine_tuning_update_fine_tuned_model", oauth2_scopes=[], security_source=get_security_from_env( @@ -795,6 +831,8 @@ def archive( if server_url is not None: base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) request = models.JobsAPIRoutesFineTuningArchiveFineTunedModelRequest( model_id=model_id, @@ -826,6 +864,7 @@ def archive( http_res = self.do_request( hook_ctx=HookContext( + base_url=base_url or "", operation_id="jobs_api_routes_fine_tuning_archive_fine_tuned_model", oauth2_scopes=[], security_source=get_security_from_env( @@ -885,6 +924,8 @@ async def archive_async( if server_url is not None: base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) request = models.JobsAPIRoutesFineTuningArchiveFineTunedModelRequest( model_id=model_id, @@ -916,6 +957,7 @@ async def archive_async( http_res = await self.do_request_async( hook_ctx=HookContext( + base_url=base_url or "", operation_id="jobs_api_routes_fine_tuning_archive_fine_tuned_model", oauth2_scopes=[], security_source=get_security_from_env( @@ -975,6 +1017,8 @@ def unarchive( if server_url is not None: base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) request = models.JobsAPIRoutesFineTuningUnarchiveFineTunedModelRequest( model_id=model_id, @@ -1006,6 +1050,7 @@ def unarchive( http_res = self.do_request( hook_ctx=HookContext( + base_url=base_url or "", operation_id="jobs_api_routes_fine_tuning_unarchive_fine_tuned_model", oauth2_scopes=[], security_source=get_security_from_env( @@ -1065,6 +1110,8 @@ async def unarchive_async( if server_url is not None: base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) request = models.JobsAPIRoutesFineTuningUnarchiveFineTunedModelRequest( model_id=model_id, @@ -1096,6 +1143,7 @@ async def unarchive_async( http_res = await self.do_request_async( hook_ctx=HookContext( + base_url=base_url or "", operation_id="jobs_api_routes_fine_tuning_unarchive_fine_tuned_model", oauth2_scopes=[], security_source=get_security_from_env( diff --git a/src/mistralai/ocr.py b/src/mistralai/ocr.py index 56c1da51..5d0e2414 100644 --- a/src/mistralai/ocr.py +++ b/src/mistralai/ocr.py @@ -47,6 +47,8 @@ def process( if server_url is not None: base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) request = models.OCRRequest( model=model, @@ -87,6 +89,7 @@ def process( http_res = self.do_request( hook_ctx=HookContext( + base_url=base_url or "", operation_id="ocr_v1_ocr_post", oauth2_scopes=[], security_source=get_security_from_env( @@ -98,12 +101,14 @@ def process( retry_config=retry_config, ) - data: Any = None + response_data: Any = None if utils.match_response(http_res, "200", "application/json"): return utils.unmarshal_json(http_res.text, models.OCRResponse) if utils.match_response(http_res, "422", "application/json"): - data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) - raise models.HTTPValidationError(data=data) + response_data = utils.unmarshal_json( + http_res.text, models.HTTPValidationErrorData + ) + raise models.HTTPValidationError(data=response_data) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) raise models.SDKError( @@ -160,6 +165,8 @@ async def process_async( if server_url is not None: base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) request = models.OCRRequest( model=model, @@ -200,6 +207,7 @@ async def process_async( http_res = await self.do_request_async( hook_ctx=HookContext( + base_url=base_url or "", operation_id="ocr_v1_ocr_post", oauth2_scopes=[], security_source=get_security_from_env( @@ -211,12 +219,14 @@ async def process_async( retry_config=retry_config, ) - data: Any = None + response_data: Any = None if utils.match_response(http_res, "200", "application/json"): return utils.unmarshal_json(http_res.text, models.OCRResponse) if utils.match_response(http_res, "422", "application/json"): - data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) - raise models.HTTPValidationError(data=data) + response_data = utils.unmarshal_json( + http_res.text, models.HTTPValidationErrorData + ) + raise models.HTTPValidationError(data=response_data) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( diff --git a/src/mistralai/sdk.py b/src/mistralai/sdk.py index 00d8370a..e801eaf3 100644 --- a/src/mistralai/sdk.py +++ b/src/mistralai/sdk.py @@ -68,15 +68,19 @@ def __init__( :param retry_config: The retry configuration to use for all supported methods :param timeout_ms: Optional request timeout applied to each operation in milliseconds """ + client_supplied = True if client is None: client = httpx.Client() + client_supplied = False assert issubclass( type(client), HttpClient ), "The provided client must implement the HttpClient protocol." + async_client_supplied = True if async_client is None: async_client = httpx.AsyncClient() + async_client_supplied = False if debug_logger is None: debug_logger = get_default_logger() @@ -100,7 +104,9 @@ def __init__( self, SDKConfiguration( client=client, + client_supplied=client_supplied, async_client=async_client, + async_client_supplied=async_client_supplied, security=security, server_url=server_url, server=server, @@ -114,7 +120,7 @@ def __init__( current_server_url, *_ = self.sdk_configuration.get_server_details() server_url, self.sdk_configuration.client = hooks.sdk_init( - current_server_url, self.sdk_configuration.client + current_server_url, client ) if current_server_url != server_url: self.sdk_configuration.server_url = server_url @@ -127,7 +133,9 @@ def __init__( close_clients, cast(ClientOwner, self.sdk_configuration), self.sdk_configuration.client, + self.sdk_configuration.client_supplied, self.sdk_configuration.async_client, + self.sdk_configuration.async_client_supplied, ) self._init_sdks() @@ -151,9 +159,17 @@ async def __aenter__(self): return self def __exit__(self, exc_type, exc_val, exc_tb): - if self.sdk_configuration.client is not None: + if ( + self.sdk_configuration.client is not None + and not self.sdk_configuration.client_supplied + ): self.sdk_configuration.client.close() + self.sdk_configuration.client = None async def __aexit__(self, exc_type, exc_val, exc_tb): - if self.sdk_configuration.async_client is not None: + if ( + self.sdk_configuration.async_client is not None + and not self.sdk_configuration.async_client_supplied + ): await self.sdk_configuration.async_client.aclose() + self.sdk_configuration.async_client = None diff --git a/src/mistralai/sdkconfiguration.py b/src/mistralai/sdkconfiguration.py index 2ccbcbe1..257ff01d 100644 --- a/src/mistralai/sdkconfiguration.py +++ b/src/mistralai/sdkconfiguration.py @@ -26,8 +26,10 @@ @dataclass class SDKConfiguration: - client: HttpClient - async_client: AsyncHttpClient + client: Union[HttpClient, None] + client_supplied: bool + async_client: Union[AsyncHttpClient, None] + async_client_supplied: bool debug_logger: Logger security: Optional[Union[models.Security, Callable[[], models.Security]]] = None server_url: Optional[str] = "" diff --git a/src/mistralai/utils/__init__.py b/src/mistralai/utils/__init__.py index 151c87d4..d8b21128 100644 --- a/src/mistralai/utils/__init__.py +++ b/src/mistralai/utils/__init__.py @@ -43,6 +43,7 @@ match_content_type, match_status_codes, match_response, + cast_partial, ) from .logger import Logger, get_body_content, get_default_logger @@ -96,4 +97,5 @@ "validate_float", "validate_int", "validate_open_enum", + "cast_partial", ] diff --git a/src/mistralai/utils/serializers.py b/src/mistralai/utils/serializers.py index c5eb3659..baa41fbd 100644 --- a/src/mistralai/utils/serializers.py +++ b/src/mistralai/utils/serializers.py @@ -7,14 +7,15 @@ from typing_extensions import get_origin from pydantic import ConfigDict, create_model from pydantic_core import from_json -from typing_inspect import is_optional_type +from typing_inspection.typing_objects import is_union from ..types.basemodel import BaseModel, Nullable, OptionalNullable, Unset def serialize_decimal(as_str: bool): def serialize(d): - if is_optional_type(type(d)) and d is None: + # Optional[T] is a Union[T, None] + if is_union(type(d)) and type(None) in get_args(type(d)) and d is None: return None if isinstance(d, Unset): return d @@ -42,7 +43,8 @@ def validate_decimal(d): def serialize_float(as_str: bool): def serialize(f): - if is_optional_type(type(f)) and f is None: + # Optional[T] is a Union[T, None] + if is_union(type(f)) and type(None) in get_args(type(f)) and f is None: return None if isinstance(f, Unset): return f @@ -70,7 +72,8 @@ def validate_float(f): def serialize_int(as_str: bool): def serialize(i): - if is_optional_type(type(i)) and i is None: + # Optional[T] is a Union[T, None] + if is_union(type(i)) and type(None) in get_args(type(i)) and i is None: return None if isinstance(i, Unset): return i @@ -118,7 +121,8 @@ def validate(e): def validate_const(v): def validate(c): - if is_optional_type(type(c)) and c is None: + # Optional[T] is a Union[T, None] + if is_union(type(c)) and type(None) in get_args(type(c)) and c is None: return None if v != c: @@ -163,7 +167,7 @@ def marshal_json(val, typ): if len(d) == 0: return "" - return json.dumps(d[next(iter(d))], separators=(",", ":"), sort_keys=True) + return json.dumps(d[next(iter(d))], separators=(",", ":")) def is_nullable(field): diff --git a/src/mistralai/utils/values.py b/src/mistralai/utils/values.py index 2b4b6832..dae01a44 100644 --- a/src/mistralai/utils/values.py +++ b/src/mistralai/utils/values.py @@ -3,8 +3,9 @@ from datetime import datetime from enum import Enum from email.message import Message +from functools import partial import os -from typing import Any, Callable, Dict, List, Optional, Tuple, TypeVar, Union +from typing import Any, Callable, Dict, List, Optional, Tuple, TypeVar, Union, cast from httpx import Response from pydantic import BaseModel @@ -51,6 +52,8 @@ def match_status_codes(status_codes: List[str], status_code: int) -> bool: T = TypeVar("T") +def cast_partial(typ): + return partial(cast, typ) def get_global_from_env( value: Optional[T], env_key: str, type_cast: Callable[[str], T] From 0c0f209d1612924f76e949e74ebc3a2026ba3b06 Mon Sep 17 00:00:00 2001 From: Gaspard Blanchet <26529448+GaspardBT@users.noreply.github.com> Date: Wed, 19 Mar 2025 19:07:18 +0100 Subject: [PATCH 114/223] Revert to fix python 3.8 support (#204) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Revert "chore: 🐝 Update SDK - Generate MISTRALAI MISTRALAI-SDK 1.5.2-rc.1 (#202)" This reverts commit 1741021792af56d2f940b76dfd286d8f386e8894. * Revert "chore: update pyproject (#201)" This reverts commit 81f02dd942fb404d1afb8614a2f21e764a0e9487. * Revert "pin to latest (#200)" This reverts commit 32ff73893b9156f290b447da2a191ae9be4e184d. --- .github/workflows/run_example_scripts.yaml | 4 +- .speakeasy/gen.lock | 45 ++- .speakeasy/gen.yaml | 5 +- .speakeasy/workflow.lock | 44 +-- .speakeasy/workflow.yaml | 2 +- README.md | 73 +--- RELEASES.md | 12 +- USAGE.md | 24 +- docs/models/documenturlchunk.md | 10 +- docs/models/documenturlchunktype.md | 8 - docs/models/embeddingrequest.md | 4 +- docs/models/filepurpose.md | 3 +- docs/sdks/agents/README.md | 6 +- docs/sdks/chat/README.md | 6 +- docs/sdks/classifiers/README.md | 4 +- docs/sdks/embeddings/README.md | 7 +- docs/sdks/files/README.md | 10 +- docs/sdks/fim/README.md | 6 +- docs/sdks/jobs/README.md | 11 +- docs/sdks/mistraljobs/README.md | 8 +- docs/sdks/models/README.md | 6 - docs/sdks/ocr/README.md | 1 - packages/mistralai_azure/.speakeasy/gen.lock | 33 +- packages/mistralai_azure/.speakeasy/gen.yaml | 4 - .../docs/models/assistantmessage.md | 12 +- .../docs/models/chatcompletionrequest.md | 3 +- .../models/chatcompletionstreamrequest.md | 3 +- .../mistralai_azure/docs/models/function.md | 3 +- .../mistralai_azure/docs/models/jsonschema.md | 11 - .../mistralai_azure/docs/models/prediction.md | 9 - .../docs/models/responseformat.md | 3 +- .../docs/models/responseformats.md | 3 +- .../mistralai_azure/docs/models/toolcall.md | 3 +- packages/mistralai_azure/pylintrc | 7 +- packages/mistralai_azure/pyproject.toml | 26 +- .../{prepare_readme.py => prepare-readme.py} | 0 packages/mistralai_azure/scripts/publish.sh | 2 +- .../src/mistralai_azure/__init__.py | 11 +- .../src/mistralai_azure/_hooks/types.py | 18 +- .../src/mistralai_azure/_version.py | 3 - .../src/mistralai_azure/basesdk.py | 8 - .../src/mistralai_azure/chat.py | 108 ++---- .../src/mistralai_azure/httpclient.py | 52 --- .../src/mistralai_azure/models/__init__.py | 7 - .../models/assistantmessage.py | 2 - .../models/chatcompletionrequest.py | 18 +- .../models/chatcompletionstreamrequest.py | 18 +- .../src/mistralai_azure/models/function.py | 3 - .../src/mistralai_azure/models/jsonschema.py | 61 ---- .../src/mistralai_azure/models/prediction.py | 25 -- .../mistralai_azure/models/responseformat.py | 43 +-- .../mistralai_azure/models/responseformats.py | 2 +- .../src/mistralai_azure/models/toolcall.py | 3 - .../src/mistralai_azure/sdk.py | 70 +--- .../src/mistralai_azure/sdkconfiguration.py | 20 +- .../src/mistralai_azure/utils/__init__.py | 2 - .../src/mistralai_azure/utils/serializers.py | 16 +- .../src/mistralai_azure/utils/values.py | 5 +- packages/mistralai_gcp/.speakeasy/gen.lock | 40 +-- packages/mistralai_gcp/.speakeasy/gen.yaml | 4 - .../docs/models/assistantmessage.md | 12 +- .../docs/models/chatcompletionrequest.md | 5 +- .../models/chatcompletionstreamrequest.md | 5 +- .../docs/models/fimcompletionrequest.md | 2 +- .../docs/models/fimcompletionstreamrequest.md | 2 +- .../mistralai_gcp/docs/models/function.md | 3 +- .../mistralai_gcp/docs/models/jsonschema.md | 11 - .../mistralai_gcp/docs/models/prediction.md | 9 - .../docs/models/responseformat.md | 3 +- .../docs/models/responseformats.md | 3 +- .../mistralai_gcp/docs/models/toolcall.md | 3 +- packages/mistralai_gcp/pylintrc | 7 +- packages/mistralai_gcp/pyproject.toml | 30 +- .../{prepare_readme.py => prepare-readme.py} | 0 packages/mistralai_gcp/scripts/publish.sh | 2 +- .../src/mistralai_gcp/__init__.py | 11 +- .../src/mistralai_gcp/_hooks/types.py | 18 +- .../src/mistralai_gcp/_version.py | 3 - .../src/mistralai_gcp/basesdk.py | 8 - .../mistralai_gcp/src/mistralai_gcp/chat.py | 110 ++---- .../mistralai_gcp/src/mistralai_gcp/fim.py | 82 ++--- .../src/mistralai_gcp/httpclient.py | 52 --- .../src/mistralai_gcp/models/__init__.py | 7 - .../mistralai_gcp/models/assistantmessage.py | 2 - .../models/chatcompletionrequest.py | 18 +- .../models/chatcompletionstreamrequest.py | 18 +- .../models/fimcompletionrequest.py | 5 +- .../models/fimcompletionstreamrequest.py | 5 +- .../src/mistralai_gcp/models/function.py | 3 - .../src/mistralai_gcp/models/jsonschema.py | 61 ---- .../src/mistralai_gcp/models/prediction.py | 25 -- .../mistralai_gcp/models/responseformat.py | 43 +-- .../mistralai_gcp/models/responseformats.py | 2 +- .../src/mistralai_gcp/models/toolcall.py | 3 - .../mistralai_gcp/src/mistralai_gcp/sdk.py | 82 +---- .../src/mistralai_gcp/sdkconfiguration.py | 20 +- .../src/mistralai_gcp/utils/__init__.py | 2 - .../src/mistralai_gcp/utils/serializers.py | 16 +- .../src/mistralai_gcp/utils/values.py | 5 +- poetry.lock | 329 +++++++++--------- pylintrc | 5 +- pyproject.toml | 45 ++- .../{prepare_readme.py => prepare-readme.py} | 8 +- scripts/publish.sh | 2 +- src/mistralai/_hooks/types.py | 18 +- src/mistralai/_version.py | 6 +- src/mistralai/agents.py | 44 +-- src/mistralai/basesdk.py | 8 - src/mistralai/chat.py | 44 +-- src/mistralai/classifiers.py | 44 +-- src/mistralai/embeddings.py | 30 +- src/mistralai/files.py | 36 -- src/mistralai/fim.py | 44 +-- src/mistralai/httpclient.py | 6 +- src/mistralai/jobs.py | 30 -- src/mistralai/mistral_jobs.py | 24 -- src/mistralai/models/__init__.py | 7 +- src/mistralai/models/documenturlchunk.py | 22 +- src/mistralai/models/embeddingrequest.py | 14 +- src/mistralai/models/filepurpose.py | 2 +- src/mistralai/models_.py | 84 +---- src/mistralai/ocr.py | 22 +- src/mistralai/sdk.py | 22 +- src/mistralai/sdkconfiguration.py | 6 +- src/mistralai/utils/__init__.py | 2 - src/mistralai/utils/serializers.py | 16 +- src/mistralai/utils/values.py | 5 +- 127 files changed, 686 insertions(+), 1897 deletions(-) delete mode 100644 docs/models/documenturlchunktype.md delete mode 100644 packages/mistralai_azure/docs/models/jsonschema.md delete mode 100644 packages/mistralai_azure/docs/models/prediction.md rename packages/mistralai_azure/scripts/{prepare_readme.py => prepare-readme.py} (100%) delete mode 100644 packages/mistralai_azure/src/mistralai_azure/models/jsonschema.py delete mode 100644 packages/mistralai_azure/src/mistralai_azure/models/prediction.py delete mode 100644 packages/mistralai_gcp/docs/models/jsonschema.md delete mode 100644 packages/mistralai_gcp/docs/models/prediction.md rename packages/mistralai_gcp/scripts/{prepare_readme.py => prepare-readme.py} (100%) delete mode 100644 packages/mistralai_gcp/src/mistralai_gcp/models/jsonschema.py delete mode 100644 packages/mistralai_gcp/src/mistralai_gcp/models/prediction.py rename scripts/{prepare_readme.py => prepare-readme.py} (84%) diff --git a/.github/workflows/run_example_scripts.yaml b/.github/workflows/run_example_scripts.yaml index b3cc08ac..db0a30aa 100644 --- a/.github/workflows/run_example_scripts.yaml +++ b/.github/workflows/run_example_scripts.yaml @@ -14,7 +14,7 @@ jobs: strategy: fail-fast: false matrix: - python-version: [ '3.9', '3.10', '3.11', '3.12', '3.13'] + python-version: ['3.8', '3.9', '3.10', '3.11', '3.12', '3.13'] steps: - name: Checkout code @@ -27,6 +27,8 @@ jobs: - name: Install Poetry uses: snok/install-poetry@76e04a911780d5b312d89783f7b1cd627778900a # v1.4.1 + with: + version: ${{ matrix.python-version == '3.8' && '1.8.5' || '2.1.1' }} - name: Build and install client run: | diff --git a/.speakeasy/gen.lock b/.speakeasy/gen.lock index 59249dd4..6eb1248e 100644 --- a/.speakeasy/gen.lock +++ b/.speakeasy/gen.lock @@ -1,12 +1,12 @@ lockVersion: 2.0.0 id: 2d045ec7-2ebb-4f4d-ad25-40953b132161 management: - docChecksum: 406e00c323dba0db26d6994620926af4 + docChecksum: 81cc8be96362e2f1cb145b08a2e6c4fa docVersion: 0.0.2 - speakeasyVersion: 1.517.3 - generationVersion: 2.548.6 - releaseVersion: 1.5.2-rc.1 - configChecksum: 321ba0a46c45c1b64b391afe2abe901c + speakeasyVersion: 1.477.0 + generationVersion: 2.497.0 + releaseVersion: 1.5.1 + configChecksum: ef3439d915c5d16e7cfb88fe2bf94907 repoURL: https://github.com/mistralai/client-python.git installationURL: https://github.com/mistralai/client-python.git published: true @@ -14,7 +14,7 @@ features: python: additionalDependencies: 1.0.0 constsAndDefaults: 1.0.5 - core: 5.12.3 + core: 5.10.5 customCodeRegions: 0.1.1 defaultEnabledRetries: 0.2.0 downloadStreams: 1.0.1 @@ -22,19 +22,19 @@ features: envVarSecurityUsage: 0.3.2 examples: 3.0.1 flatRequests: 1.0.1 - flattening: 3.1.1 - globalSecurity: 3.0.3 + flattening: 3.1.0 + globalSecurity: 3.0.2 globalSecurityCallbacks: 1.0.0 globalSecurityFlattening: 1.0.0 globalServerURLs: 3.1.0 methodArguments: 1.0.2 multipartFileContentType: 1.0.0 nameOverrides: 3.0.1 - nullables: 1.0.1 + nullables: 1.0.0 openEnums: 1.0.0 responseFormat: 1.0.1 retries: 3.0.2 - sdkHooks: 1.0.1 + sdkHooks: 1.0.0 serverEvents: 1.0.7 serverEventsSentinels: 0.1.0 serverIDs: 3.0.0 @@ -101,7 +101,6 @@ generatedFiles: - docs/models/detailedjoboutstatus.md - docs/models/document.md - docs/models/documenturlchunk.md - - docs/models/documenturlchunktype.md - docs/models/embeddingrequest.md - docs/models/embeddingresponse.md - docs/models/embeddingresponsedata.md @@ -239,7 +238,7 @@ generatedFiles: - poetry.toml - py.typed - pylintrc - - scripts/prepare_readme.py + - scripts/prepare-readme.py - scripts/publish.sh - src/mistralai/__init__.py - src/mistralai/_hooks/__init__.py @@ -449,7 +448,7 @@ examples: files_api_routes_upload_file: speakeasy-default-files-api-routes-upload-file: requestBody: - multipart/form-data: {"file": {}} + multipart/form-data: {"file": {"": "x-file: example.file"}} responses: "200": application/json: {"id": "497f6eca-6276-4993-bfeb-53cbbbba6f09", "object": "file", "bytes": 13000, "created_at": 1716963433, "filename": "files_upload.jsonl", "purpose": "fine-tune", "sample_type": "batch_request", "source": "repository"} @@ -505,14 +504,14 @@ examples: created_by_me: false responses: "200": - application/json: {"object": "list", "total": 768578} + application/json: {"total": 768578} jobs_api_routes_fine_tuning_create_fine_tuning_job: speakeasy-default-jobs-api-routes-fine-tuning-create-fine-tuning-job: requestBody: application/json: {"model": "Fiesta", "hyperparameters": {"learning_rate": 0.0001}} responses: "200": - application/json: {"expected_duration_seconds": 220, "cost": 10, "cost_currency": "EUR", "train_tokens_per_step": 131072, "train_tokens": 1310720, "data_tokens": 305375, "deprecated": true, "details": "", "epochs": 4.2922, "training_steps": 10, "object": "job.metadata"} + application/json: {"expected_duration_seconds": 220, "cost": 10, "cost_currency": "EUR", "train_tokens_per_step": 131072, "train_tokens": 1310720, "data_tokens": 305375, "deprecated": true, "details": "", "epochs": 4.2922, "training_steps": 10} jobs_api_routes_fine_tuning_get_fine_tuning_job: speakeasy-default-jobs-api-routes-fine-tuning-get-fine-tuning-job: parameters: @@ -520,7 +519,7 @@ examples: job_id: "b888f774-3e7c-4135-a18c-6b985523c4bc" responses: "200": - application/json: {"id": "e50f7622-81da-484b-9c66-1c8a99c6b71b", "auto_start": false, "hyperparameters": {"learning_rate": 0.0001}, "model": "Model Y", "status": "CANCELLED", "job_type": "", "created_at": 415305, "modified_at": 149108, "training_files": ["8f7112aa-f0ab-44e4-83b4-cca3716f6208", "7aa1f8cf-05d8-49d5-88ee-381f6b4b885c"], "object": "job", "checkpoints": [{"metrics": {}, "step_number": 856562, "created_at": 1716963433}, {"metrics": {}, "step_number": 328633, "created_at": 1716963433}]} + application/json: {"id": "e50f7622-81da-484b-9c66-1c8a99c6b71b", "auto_start": false, "hyperparameters": {"learning_rate": 0.0001}, "model": "Model Y", "status": "CANCELLED", "job_type": "", "created_at": 415305, "modified_at": 149108, "training_files": ["8f7112aa-f0ab-44e4-83b4-cca3716f6208", "7aa1f8cf-05d8-49d5-88ee-381f6b4b885c"], "checkpoints": [{"metrics": {}, "step_number": 856562, "created_at": 1716963433}, {"metrics": {}, "step_number": 328633, "created_at": 1716963433}]} jobs_api_routes_fine_tuning_cancel_fine_tuning_job: speakeasy-default-jobs-api-routes-fine-tuning-cancel-fine-tuning-job: parameters: @@ -528,7 +527,7 @@ examples: job_id: "0f713502-9233-41c6-9ebd-c570b7edb496" responses: "200": - application/json: {"id": "d50fbe4e-3e32-4613-8574-4d82f3fd6b3c", "auto_start": true, "hyperparameters": {"learning_rate": 0.0001}, "model": "Taurus", "status": "SUCCESS", "job_type": "", "created_at": 251316, "modified_at": 342605, "training_files": ["247ac10c-dc31-412f-a7cc-924123024afa", "0f84bd49-4511-4689-9d07-a64aa724280b", "200aa489-3801-4d6e-a454-eb14cac448cb"], "object": "job", "checkpoints": [{"metrics": {}, "step_number": 949854, "created_at": 1716963433}, {"metrics": {}, "step_number": 516599, "created_at": 1716963433}, {"metrics": {}, "step_number": 439590, "created_at": 1716963433}]} + application/json: {"id": "d50fbe4e-3e32-4613-8574-4d82f3fd6b3c", "auto_start": true, "hyperparameters": {"learning_rate": 0.0001}, "model": "Taurus", "status": "SUCCESS", "job_type": "", "created_at": 251316, "modified_at": 342605, "training_files": ["247ac10c-dc31-412f-a7cc-924123024afa", "0f84bd49-4511-4689-9d07-a64aa724280b", "200aa489-3801-4d6e-a454-eb14cac448cb"], "checkpoints": [{"metrics": {}, "step_number": 949854, "created_at": 1716963433}, {"metrics": {}, "step_number": 516599, "created_at": 1716963433}, {"metrics": {}, "step_number": 439590, "created_at": 1716963433}]} jobs_api_routes_fine_tuning_start_fine_tuning_job: speakeasy-default-jobs-api-routes-fine-tuning-start-fine-tuning-job: parameters: @@ -536,7 +535,7 @@ examples: job_id: "0bf0f9e6-c3e5-4d61-aac8-0e36dcac0dfc" responses: "200": - application/json: {"id": "b676fe58-2c47-483e-831e-c71dbed4c90a", "auto_start": false, "hyperparameters": {"learning_rate": 0.0001}, "model": "A4", "status": "CANCELLED", "job_type": "", "created_at": 874397, "modified_at": 483387, "training_files": [], "object": "job", "checkpoints": [{"metrics": {}, "step_number": 331375, "created_at": 1716963433}, {"metrics": {}, "step_number": 590686, "created_at": 1716963433}, {"metrics": {}, "step_number": 543177, "created_at": 1716963433}]} + application/json: {"id": "b676fe58-2c47-483e-831e-c71dbed4c90a", "auto_start": false, "hyperparameters": {"learning_rate": 0.0001}, "model": "A4", "status": "CANCELLED", "job_type": "", "created_at": 874397, "modified_at": 483387, "training_files": [], "checkpoints": [{"metrics": {}, "step_number": 331375, "created_at": 1716963433}, {"metrics": {}, "step_number": 590686, "created_at": 1716963433}, {"metrics": {}, "step_number": 543177, "created_at": 1716963433}]} jobs_api_routes_batch_get_batch_jobs: speakeasy-default-jobs-api-routes-batch-get-batch-jobs: parameters: @@ -546,14 +545,14 @@ examples: created_by_me: false responses: "200": - application/json: {"object": "list", "total": 768578} + application/json: {"total": 768578} jobs_api_routes_batch_create_batch_job: speakeasy-default-jobs-api-routes-batch-create-batch-job: requestBody: application/json: {"input_files": ["a621cf02-1cd9-4cf5-8403-315211a509a3"], "endpoint": "/v1/fim/completions", "model": "2", "timeout_hours": 24} responses: "200": - application/json: {"id": "", "object": "batch", "input_files": ["8e774c2b-ecc3-4769-b177-5e024985613d", "0ee803d5-6a1d-4f94-836b-fd39494798bc"], "endpoint": "", "model": "Impala", "errors": [{"message": "", "count": 1}, {"message": "", "count": 1}, {"message": "", "count": 1}], "status": "RUNNING", "created_at": 770370, "total_requests": 350586, "completed_requests": 95214, "succeeded_requests": 930830, "failed_requests": 617761} + application/json: {"id": "", "input_files": ["8e774c2b-ecc3-4769-b177-5e024985613d", "0ee803d5-6a1d-4f94-836b-fd39494798bc"], "endpoint": "", "model": "Impala", "errors": [{"message": "", "count": 1}, {"message": "", "count": 1}, {"message": "", "count": 1}], "status": "RUNNING", "created_at": 770370, "total_requests": 350586, "completed_requests": 95214, "succeeded_requests": 930830, "failed_requests": 617761} jobs_api_routes_batch_get_batch_job: speakeasy-default-jobs-api-routes-batch-get-batch-job: parameters: @@ -561,7 +560,7 @@ examples: job_id: "b888f774-3e7c-4135-a18c-6b985523c4bc" responses: "200": - application/json: {"id": "", "object": "batch", "input_files": ["50f76228-1da8-44bc-b661-c8a99c6b71b6", "cd62b8f7-112a-4af0-bab4-e43b4cca3716", "620807aa-1f8c-4f05-ad89-d58ee381f6b4"], "endpoint": "", "model": "Golf", "errors": [{"message": "", "count": 1}, {"message": "", "count": 1}], "status": "SUCCESS", "created_at": 790898, "total_requests": 55097, "completed_requests": 578320, "succeeded_requests": 856562, "failed_requests": 328633} + application/json: {"id": "", "input_files": ["50f76228-1da8-44bc-b661-c8a99c6b71b6", "cd62b8f7-112a-4af0-bab4-e43b4cca3716", "620807aa-1f8c-4f05-ad89-d58ee381f6b4"], "endpoint": "", "model": "Golf", "errors": [{"message": "", "count": 1}, {"message": "", "count": 1}], "status": "SUCCESS", "created_at": 790898, "total_requests": 55097, "completed_requests": 578320, "succeeded_requests": 856562, "failed_requests": 328633} jobs_api_routes_batch_cancel_batch_job: speakeasy-default-jobs-api-routes-batch-cancel-batch-job: parameters: @@ -569,7 +568,7 @@ examples: job_id: "0f713502-9233-41c6-9ebd-c570b7edb496" responses: "200": - application/json: {"id": "", "object": "batch", "input_files": ["50fbe4e3-e326-4135-8744-d82f3fd6b3c1", "eb45e247-ac10-4cdc-8311-2f7cc9241230", "4afaa0f8-4bd4-4945-9116-89d07a64aa72"], "endpoint": "", "model": "Alpine", "errors": [{"message": "", "count": 1}, {"message": "", "count": 1}], "status": "QUEUED", "created_at": 709109, "total_requests": 275794, "completed_requests": 158938, "succeeded_requests": 12381, "failed_requests": 11864} + application/json: {"id": "", "input_files": ["50fbe4e3-e326-4135-8744-d82f3fd6b3c1", "eb45e247-ac10-4cdc-8311-2f7cc9241230", "4afaa0f8-4bd4-4945-9116-89d07a64aa72"], "endpoint": "", "model": "Alpine", "errors": [{"message": "", "count": 1}, {"message": "", "count": 1}], "status": "QUEUED", "created_at": 709109, "total_requests": 275794, "completed_requests": 158938, "succeeded_requests": 12381, "failed_requests": 11864} chat_completion_v1_chat_completions_post: speakeasy-default-chat-completion-v1-chat-completions-post: requestBody: @@ -651,7 +650,7 @@ examples: ocr_v1_ocr_post: speakeasy-default-ocr-v1-ocr-post: requestBody: - application/json: {"model": "Focus", "document": {"document_url": "https://dutiful-horst.org", "type": "document_url"}} + application/json: {"model": "Focus", "document": {"document_url": "https://dutiful-horst.org"}} responses: "200": application/json: {"pages": [], "model": "A4", "usage_info": {"pages_processed": 442675}} diff --git a/.speakeasy/gen.yaml b/.speakeasy/gen.yaml index 666982eb..f020895b 100644 --- a/.speakeasy/gen.yaml +++ b/.speakeasy/gen.yaml @@ -7,15 +7,13 @@ generation: useClassNamesForArrayFields: true fixes: nameResolutionDec2023: true - nameResolutionFeb2025: false parameterOrderingFeb2024: true requestResponseComponentNamesFeb2024: true - securityFeb2025: false auth: oAuth2ClientCredentialsEnabled: true oAuth2PasswordEnabled: false python: - version: 1.5.2-rc.1 + version: 1.5.1 additionalDependencies: dev: pytest: ^8.2.2 @@ -46,6 +44,5 @@ python: methodArguments: infer-optional-args outputModelSuffix: output packageName: mistralai - pytestTimeout: 0 responseFormat: flat templateVersion: v2 diff --git a/.speakeasy/workflow.lock b/.speakeasy/workflow.lock index ae45ec0e..21228dc5 100644 --- a/.speakeasy/workflow.lock +++ b/.speakeasy/workflow.lock @@ -1,49 +1,49 @@ -speakeasyVersion: 1.517.3 +speakeasyVersion: 1.477.0 sources: mistral-azure-source: - sourceNamespace: mistral-openapi-azure - sourceRevisionDigest: sha256:b9be39effd24c50514ea00965c7b7089b6ae09d7aaacfb5f9eeafe465f131a62 - sourceBlobDigest: sha256:38505cbdf426ed228e4cce7667721237ddb32f72fb7df8f26c289082a568d7cb + sourceNamespace: mistral-azure-source + sourceRevisionDigest: sha256:ff181b1e0e3894a4925f7ae87415323058538a13bae9d9d508a8fe3b6ec0e333 + sourceBlobDigest: sha256:a2b4fd69298ebb9adb0d3c8dfb452db52defac512a7532203eebffb6a252df76 tags: - latest mistral-google-cloud-source: - sourceNamespace: mistral-openapi-google-cloud - sourceRevisionDigest: sha256:f4b8b9311a39f5c62deaed92c473eff44f139d784f122fa3c9e41d5236c96cd7 - sourceBlobDigest: sha256:cd5c242a598ba671e83fc7572ce7def4486cba035d2729d61cf7c64189a6fd89 + sourceNamespace: mistral-google-cloud-source + sourceRevisionDigest: sha256:36c7de11e35023dc8fa5f3c0fb0e486d2a102275a2df808c08cfe9d43089be04 + sourceBlobDigest: sha256:dd65bfa5d0448ad1851ebb18b57aa675533cd3e166beb86a390b0ab51d16a1c1 tags: - latest mistral-openapi: sourceNamespace: mistral-openapi - sourceRevisionDigest: sha256:8655bba5635f9f9bc3aa94690c26d6124c778e03478786074288cd63414a7a84 - sourceBlobDigest: sha256:cd9280b2b089ef5e0b06ba94ed2736b928f7e4e542d04f408df84e6912049ba6 + sourceRevisionDigest: sha256:bdfe3bd4e867529e1821e0f195c2d5832083f7699315f4a42d6b5551bd7847a3 + sourceBlobDigest: sha256:7e8a475b75404d724fc7936bd6f585b8e5226d3dca00ab4b69807b53fb63151b tags: - latest - - speakeasy-sdk-regen-1741964260 + - speakeasy-sdk-regen-1741279153 targets: mistralai-azure-sdk: source: mistral-azure-source - sourceNamespace: mistral-openapi-azure - sourceRevisionDigest: sha256:b9be39effd24c50514ea00965c7b7089b6ae09d7aaacfb5f9eeafe465f131a62 - sourceBlobDigest: sha256:38505cbdf426ed228e4cce7667721237ddb32f72fb7df8f26c289082a568d7cb + sourceNamespace: mistral-azure-source + sourceRevisionDigest: sha256:ff181b1e0e3894a4925f7ae87415323058538a13bae9d9d508a8fe3b6ec0e333 + sourceBlobDigest: sha256:a2b4fd69298ebb9adb0d3c8dfb452db52defac512a7532203eebffb6a252df76 codeSamplesNamespace: mistral-openapi-azure-code-samples - codeSamplesRevisionDigest: sha256:6a21f785e0bc1861ae9bf237939c6252d4589f4b5ece596938bad54b3f5c1ac9 + codeSamplesRevisionDigest: sha256:28356dba7ea28436035e20182b8ce4d1951e19503b5accef6a128d860361e5c0 mistralai-gcp-sdk: source: mistral-google-cloud-source - sourceNamespace: mistral-openapi-google-cloud - sourceRevisionDigest: sha256:f4b8b9311a39f5c62deaed92c473eff44f139d784f122fa3c9e41d5236c96cd7 - sourceBlobDigest: sha256:cd5c242a598ba671e83fc7572ce7def4486cba035d2729d61cf7c64189a6fd89 + sourceNamespace: mistral-google-cloud-source + sourceRevisionDigest: sha256:36c7de11e35023dc8fa5f3c0fb0e486d2a102275a2df808c08cfe9d43089be04 + sourceBlobDigest: sha256:dd65bfa5d0448ad1851ebb18b57aa675533cd3e166beb86a390b0ab51d16a1c1 codeSamplesNamespace: mistral-openapi-google-cloud-code-samples - codeSamplesRevisionDigest: sha256:a0d0890a8e87eac8ade9832883c7a129a749142696e01b1e611cf2d97fbeed9d + codeSamplesRevisionDigest: sha256:7de23f90d6543356f310f46375bef4db7f43eb22b2871ad4dfe1b7d0cc875bb4 mistralai-sdk: source: mistral-openapi sourceNamespace: mistral-openapi - sourceRevisionDigest: sha256:8655bba5635f9f9bc3aa94690c26d6124c778e03478786074288cd63414a7a84 - sourceBlobDigest: sha256:cd9280b2b089ef5e0b06ba94ed2736b928f7e4e542d04f408df84e6912049ba6 + sourceRevisionDigest: sha256:bdfe3bd4e867529e1821e0f195c2d5832083f7699315f4a42d6b5551bd7847a3 + sourceBlobDigest: sha256:7e8a475b75404d724fc7936bd6f585b8e5226d3dca00ab4b69807b53fb63151b codeSamplesNamespace: mistral-openapi-code-samples - codeSamplesRevisionDigest: sha256:2a2f61e5c5e1eaad48e6a74164bd6249855c3ad7976ef83068199d57ebcdd055 + codeSamplesRevisionDigest: sha256:ba10be893f3e6dae275eb8fb09a688f3652de81eebd314427f28c274800edc48 workflow: workflowVersion: 1.0.0 - speakeasyVersion: 1.517.3 + speakeasyVersion: 1.477.0 sources: mistral-azure-source: inputs: diff --git a/.speakeasy/workflow.yaml b/.speakeasy/workflow.yaml index 9f22c9f6..00aefc99 100644 --- a/.speakeasy/workflow.yaml +++ b/.speakeasy/workflow.yaml @@ -1,5 +1,5 @@ workflowVersion: 1.0.0 -speakeasyVersion: 1.517.3 +speakeasyVersion: 1.477.0 sources: mistral-azure-source: inputs: diff --git a/README.md b/README.md index fd17da18..fd31bcd8 100644 --- a/README.md +++ b/README.md @@ -75,37 +75,6 @@ pip install mistralai ```bash poetry add mistralai ``` - -### Shell and script usage with `uv` - -You can use this SDK in a Python shell with [uv](https://docs.astral.sh/uv/) and the `uvx` command that comes with it like so: - -```shell -uvx --from mistralai python -``` - -It's also possible to write a standalone Python script without needing to set up a whole project like so: - -```python -#!/usr/bin/env -S uv run --script -# /// script -# requires-python = ">=3.9" -# dependencies = [ -# "mistralai", -# ] -# /// - -from mistralai import Mistral - -sdk = Mistral( - # SDK arguments -) - -# Rest of script here... -``` - -Once that is saved to a file, you can run it with `uv run script.py` where -`script.py` can be replaced with the actual file name. @@ -120,7 +89,6 @@ This example shows how to create chat completions. from mistralai import Mistral import os - with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: @@ -130,7 +98,7 @@ with Mistral( "content": "Who is the best French painter? Answer in one short sentence.", "role": "user", }, - ]) + ], stream=False) # Handle response print(res) @@ -146,7 +114,6 @@ from mistralai import Mistral import os async def main(): - async with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: @@ -156,7 +123,7 @@ async def main(): "content": "Who is the best French painter? Answer in one short sentence.", "role": "user", }, - ]) + ], stream=False) # Handle response print(res) @@ -173,7 +140,6 @@ This example shows how to upload a file. from mistralai import Mistral import os - with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: @@ -197,7 +163,6 @@ from mistralai import Mistral import os async def main(): - async with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: @@ -222,7 +187,6 @@ This example shows how to create agents completions. from mistralai import Mistral import os - with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: @@ -232,7 +196,7 @@ with Mistral( "content": "Who is the best French painter? Answer in one short sentence.", "role": "user", }, - ], agent_id="") + ], agent_id="", stream=False) # Handle response print(res) @@ -248,7 +212,6 @@ from mistralai import Mistral import os async def main(): - async with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: @@ -258,7 +221,7 @@ async def main(): "content": "Who is the best French painter? Answer in one short sentence.", "role": "user", }, - ], agent_id="") + ], agent_id="", stream=False) # Handle response print(res) @@ -275,15 +238,14 @@ This example shows how to create embedding request. from mistralai import Mistral import os - with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.embeddings.create(model="mistral-embed", inputs=[ + res = mistral.embeddings.create(inputs=[ "Embed this sentence.", "As well as this one.", - ]) + ], model="mistral-embed") # Handle response print(res) @@ -299,15 +261,14 @@ from mistralai import Mistral import os async def main(): - async with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = await mistral.embeddings.create_async(model="mistral-embed", inputs=[ + res = await mistral.embeddings.create_async(inputs=[ "Embed this sentence.", "As well as this one.", - ]) + ], model="mistral-embed") # Handle response print(res) @@ -504,7 +465,6 @@ underlying connection when the context is exited. from mistralai import Mistral import os - with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: @@ -514,7 +474,7 @@ with Mistral( "content": "Who is the best French painter? Answer in one short sentence.", "role": "user", }, - ]) + ], stream=True) with res as event_stream: for event in event_stream: @@ -542,7 +502,6 @@ Certain SDK methods accept file objects as part of a request body or multi-part from mistralai import Mistral import os - with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: @@ -569,7 +528,6 @@ from mistralai import Mistral from mistralai.utils import BackoffStrategy, RetryConfig import os - with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: @@ -588,7 +546,6 @@ from mistralai import Mistral from mistralai.utils import BackoffStrategy, RetryConfig import os - with Mistral( retry_config=RetryConfig("backoff", BackoffStrategy(1, 50, 1.1, 100), False), api_key=os.getenv("MISTRAL_API_KEY", ""), @@ -629,7 +586,6 @@ When custom error responses are specified for an operation, the SDK may also rai from mistralai import Mistral, models import os - with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: @@ -657,9 +613,9 @@ with Mistral( You can override the default server globally by passing a server name to the `server: str` optional parameter when initializing the SDK client instance. The selected server will then be used as the default on the operations that use it. This table lists the names associated with the available servers: -| Name | Server | Description | -| ---- | ------------------------ | -------------------- | -| `eu` | `https://api.mistral.ai` | EU Production server | +| Name | Server | +| ---- | ------------------------ | +| `eu` | `https://api.mistral.ai` | #### Example @@ -667,7 +623,6 @@ You can override the default server globally by passing a server name to the `se from mistralai import Mistral import os - with Mistral( server="eu", api_key=os.getenv("MISTRAL_API_KEY", ""), @@ -687,7 +642,6 @@ The default server can also be overridden globally by passing a URL to the `serv from mistralai import Mistral import os - with Mistral( server_url="https://api.mistral.ai", api_key=os.getenv("MISTRAL_API_KEY", ""), @@ -798,7 +752,6 @@ To authenticate with the API the `api_key` parameter must be set when initializi from mistralai import Mistral import os - with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: @@ -822,7 +775,6 @@ The `Mistral` class implements the context manager protocol and registers a fina from mistralai import Mistral import os def main(): - with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: @@ -831,7 +783,6 @@ def main(): # Or when using async: async def amain(): - async with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: diff --git a/RELEASES.md b/RELEASES.md index 0ad3429c..d7b657bb 100644 --- a/RELEASES.md +++ b/RELEASES.md @@ -168,14 +168,4 @@ Based on: ### Generated - [python v1.5.1] . ### Releases -- [PyPI v1.5.1] https://pypi.org/project/mistralai/1.5.1 - . - -## 2025-03-14 15:08:57 -### Changes -Based on: -- OpenAPI Doc -- Speakeasy CLI 1.517.3 (2.548.6) https://github.com/speakeasy-api/speakeasy -### Generated -- [python v1.5.2-rc.1] . -### Releases -- [PyPI v1.5.2-rc.1] https://pypi.org/project/mistralai/1.5.2-rc.1 - . \ No newline at end of file +- [PyPI v1.5.1] https://pypi.org/project/mistralai/1.5.1 - . \ No newline at end of file diff --git a/USAGE.md b/USAGE.md index fa3a77de..3e1cae03 100644 --- a/USAGE.md +++ b/USAGE.md @@ -8,7 +8,6 @@ This example shows how to create chat completions. from mistralai import Mistral import os - with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: @@ -18,7 +17,7 @@ with Mistral( "content": "Who is the best French painter? Answer in one short sentence.", "role": "user", }, - ]) + ], stream=False) # Handle response print(res) @@ -34,7 +33,6 @@ from mistralai import Mistral import os async def main(): - async with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: @@ -44,7 +42,7 @@ async def main(): "content": "Who is the best French painter? Answer in one short sentence.", "role": "user", }, - ]) + ], stream=False) # Handle response print(res) @@ -61,7 +59,6 @@ This example shows how to upload a file. from mistralai import Mistral import os - with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: @@ -85,7 +82,6 @@ from mistralai import Mistral import os async def main(): - async with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: @@ -110,7 +106,6 @@ This example shows how to create agents completions. from mistralai import Mistral import os - with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: @@ -120,7 +115,7 @@ with Mistral( "content": "Who is the best French painter? Answer in one short sentence.", "role": "user", }, - ], agent_id="") + ], agent_id="", stream=False) # Handle response print(res) @@ -136,7 +131,6 @@ from mistralai import Mistral import os async def main(): - async with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: @@ -146,7 +140,7 @@ async def main(): "content": "Who is the best French painter? Answer in one short sentence.", "role": "user", }, - ], agent_id="") + ], agent_id="", stream=False) # Handle response print(res) @@ -163,15 +157,14 @@ This example shows how to create embedding request. from mistralai import Mistral import os - with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.embeddings.create(model="mistral-embed", inputs=[ + res = mistral.embeddings.create(inputs=[ "Embed this sentence.", "As well as this one.", - ]) + ], model="mistral-embed") # Handle response print(res) @@ -187,15 +180,14 @@ from mistralai import Mistral import os async def main(): - async with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = await mistral.embeddings.create_async(model="mistral-embed", inputs=[ + res = await mistral.embeddings.create_async(inputs=[ "Embed this sentence.", "As well as this one.", - ]) + ], model="mistral-embed") # Handle response print(res) diff --git a/docs/models/documenturlchunk.md b/docs/models/documenturlchunk.md index 6c9a5b4d..33785c34 100644 --- a/docs/models/documenturlchunk.md +++ b/docs/models/documenturlchunk.md @@ -3,8 +3,8 @@ ## Fields -| Field | Type | Required | Description | -| -------------------------------------------------------------------------- | -------------------------------------------------------------------------- | -------------------------------------------------------------------------- | -------------------------------------------------------------------------- | -| `document_url` | *str* | :heavy_check_mark: | N/A | -| `document_name` | *OptionalNullable[str]* | :heavy_minus_sign: | The filename of the document | -| `type` | [Optional[models.DocumentURLChunkType]](../models/documenturlchunktype.md) | :heavy_minus_sign: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| ----------------------------------- | ----------------------------------- | ----------------------------------- | ----------------------------------- | +| `document_url` | *str* | :heavy_check_mark: | N/A | +| `type` | *Optional[Literal["document_url"]]* | :heavy_minus_sign: | N/A | +| `document_name` | *OptionalNullable[str]* | :heavy_minus_sign: | The filename of the document | \ No newline at end of file diff --git a/docs/models/documenturlchunktype.md b/docs/models/documenturlchunktype.md deleted file mode 100644 index 32e1fa9e..00000000 --- a/docs/models/documenturlchunktype.md +++ /dev/null @@ -1,8 +0,0 @@ -# DocumentURLChunkType - - -## Values - -| Name | Value | -| -------------- | -------------- | -| `DOCUMENT_URL` | document_url | \ No newline at end of file diff --git a/docs/models/embeddingrequest.md b/docs/models/embeddingrequest.md index 3bdd79e8..07ab903a 100644 --- a/docs/models/embeddingrequest.md +++ b/docs/models/embeddingrequest.md @@ -5,5 +5,5 @@ | Field | Type | Required | Description | Example | | -------------------------------------------------- | -------------------------------------------------- | -------------------------------------------------- | -------------------------------------------------- | -------------------------------------------------- | -| `model` | *str* | :heavy_check_mark: | ID of the model to use. | mistral-embed | -| `inputs` | [models.Inputs](../models/inputs.md) | :heavy_check_mark: | Text to embed. | [
"Embed this sentence.",
"As well as this one."
] | \ No newline at end of file +| `inputs` | [models.Inputs](../models/inputs.md) | :heavy_check_mark: | Text to embed. | [
"Embed this sentence.",
"As well as this one."
] | +| `model` | *Optional[str]* | :heavy_minus_sign: | ID of the model to use. | | \ No newline at end of file diff --git a/docs/models/filepurpose.md b/docs/models/filepurpose.md index 14cab13e..5152aeeb 100644 --- a/docs/models/filepurpose.md +++ b/docs/models/filepurpose.md @@ -6,5 +6,4 @@ | Name | Value | | ----------- | ----------- | | `FINE_TUNE` | fine-tune | -| `BATCH` | batch | -| `OCR` | ocr | \ No newline at end of file +| `BATCH` | batch | \ No newline at end of file diff --git a/docs/sdks/agents/README.md b/docs/sdks/agents/README.md index 1e6f9069..00ca33ac 100644 --- a/docs/sdks/agents/README.md +++ b/docs/sdks/agents/README.md @@ -20,7 +20,6 @@ Agents Completion from mistralai import Mistral import os - with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: @@ -30,7 +29,7 @@ with Mistral( "content": "Who is the best French painter? Answer in one short sentence.", "role": "user", }, - ], agent_id="") + ], agent_id="", stream=False) # Handle response print(res) @@ -77,7 +76,6 @@ Mistral AI provides the ability to stream responses back to a client in order to from mistralai import Mistral import os - with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: @@ -87,7 +85,7 @@ with Mistral( "content": "Who is the best French painter? Answer in one short sentence.", "role": "user", }, - ], agent_id="") + ], agent_id="", stream=True) with res as event_stream: for event in event_stream: diff --git a/docs/sdks/chat/README.md b/docs/sdks/chat/README.md index 8cac6db4..38e16adc 100644 --- a/docs/sdks/chat/README.md +++ b/docs/sdks/chat/README.md @@ -20,7 +20,6 @@ Chat Completion from mistralai import Mistral import os - with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: @@ -30,7 +29,7 @@ with Mistral( "content": "Who is the best French painter? Answer in one short sentence.", "role": "user", }, - ]) + ], stream=False) # Handle response print(res) @@ -80,7 +79,6 @@ Mistral AI provides the ability to stream responses back to a client in order to from mistralai import Mistral import os - with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: @@ -90,7 +88,7 @@ with Mistral( "content": "Who is the best French painter? Answer in one short sentence.", "role": "user", }, - ]) + ], stream=True) with res as event_stream: for event in event_stream: diff --git a/docs/sdks/classifiers/README.md b/docs/sdks/classifiers/README.md index 6c7127af..6bcc68a9 100644 --- a/docs/sdks/classifiers/README.md +++ b/docs/sdks/classifiers/README.md @@ -20,7 +20,6 @@ Moderations from mistralai import Mistral import os - with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: @@ -63,7 +62,6 @@ Moderations Chat from mistralai import Mistral import os - with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: @@ -97,7 +95,7 @@ with Mistral( "role": "assistant", }, ], - ]) + ], truncate_for_context_length=False) # Handle response print(res) diff --git a/docs/sdks/embeddings/README.md b/docs/sdks/embeddings/README.md index 8c386439..44fae4ac 100644 --- a/docs/sdks/embeddings/README.md +++ b/docs/sdks/embeddings/README.md @@ -19,15 +19,14 @@ Embeddings from mistralai import Mistral import os - with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.embeddings.create(model="mistral-embed", inputs=[ + res = mistral.embeddings.create(inputs=[ "Embed this sentence.", "As well as this one.", - ]) + ], model="mistral-embed") # Handle response print(res) @@ -38,8 +37,8 @@ with Mistral( | Parameter | Type | Required | Description | Example | | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | -| `model` | *str* | :heavy_check_mark: | ID of the model to use. | mistral-embed | | `inputs` | [models.Inputs](../../models/inputs.md) | :heavy_check_mark: | Text to embed. | [
"Embed this sentence.",
"As well as this one."
] | +| `model` | *Optional[str]* | :heavy_minus_sign: | ID of the model to use. | | | `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | | ### Response diff --git a/docs/sdks/files/README.md b/docs/sdks/files/README.md index befa4d67..8f01a668 100644 --- a/docs/sdks/files/README.md +++ b/docs/sdks/files/README.md @@ -28,7 +28,6 @@ Please contact us if you need to increase these storage limits. from mistralai import Mistral import os - with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: @@ -71,12 +70,11 @@ Returns a list of files that belong to the user's organization. from mistralai import Mistral import os - with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.files.list() + res = mistral.files.list(page=0, page_size=100) # Handle response print(res) @@ -115,7 +113,6 @@ Returns information about a specific file. from mistralai import Mistral import os - with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: @@ -154,7 +151,6 @@ Delete a file. from mistralai import Mistral import os - with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: @@ -193,7 +189,6 @@ Download a file from mistralai import Mistral import os - with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: @@ -232,12 +227,11 @@ Get Signed Url from mistralai import Mistral import os - with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.files.get_signed_url(file_id="") + res = mistral.files.get_signed_url(file_id="", expiry=24) # Handle response print(res) diff --git a/docs/sdks/fim/README.md b/docs/sdks/fim/README.md index c70b3da4..28de6c02 100644 --- a/docs/sdks/fim/README.md +++ b/docs/sdks/fim/README.md @@ -20,12 +20,11 @@ FIM completion. from mistralai import Mistral import os - with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.fim.complete(model="codestral-2405", prompt="def", suffix="return a+b") + res = mistral.fim.complete(model="codestral-2405", prompt="def", top_p=1, stream=False, suffix="return a+b") # Handle response print(res) @@ -69,12 +68,11 @@ Mistral AI provides the ability to stream responses back to a client in order to from mistralai import Mistral import os - with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.fim.stream(model="codestral-2405", prompt="def", suffix="return a+b") + res = mistral.fim.stream(model="codestral-2405", prompt="def", top_p=1, stream=True, suffix="return a+b") with res as event_stream: for event in event_stream: diff --git a/docs/sdks/jobs/README.md b/docs/sdks/jobs/README.md index ecb11def..06605877 100644 --- a/docs/sdks/jobs/README.md +++ b/docs/sdks/jobs/README.md @@ -21,12 +21,11 @@ Get a list of fine-tuning jobs for your organization and user. from mistralai import Mistral import os - with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.fine_tuning.jobs.list() + res = mistral.fine_tuning.jobs.list(page=0, page_size=100, created_by_me=False) # Handle response print(res) @@ -68,12 +67,13 @@ Create a new fine-tuning job, it will be queued for processing. from mistralai import Mistral import os - with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.fine_tuning.jobs.create(model="Fiesta", hyperparameters={}) + res = mistral.fine_tuning.jobs.create(model="Fiesta", hyperparameters={ + "learning_rate": 0.0001, + }) # Handle response print(res) @@ -114,7 +114,6 @@ Get a fine-tuned job details by its UUID. from mistralai import Mistral import os - with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: @@ -153,7 +152,6 @@ Request the cancellation of a fine tuning job. from mistralai import Mistral import os - with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: @@ -192,7 +190,6 @@ Request the start of a validated fine tuning job. from mistralai import Mistral import os - with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: diff --git a/docs/sdks/mistraljobs/README.md b/docs/sdks/mistraljobs/README.md index 5b80a45b..56a7f60b 100644 --- a/docs/sdks/mistraljobs/README.md +++ b/docs/sdks/mistraljobs/README.md @@ -20,12 +20,11 @@ Get a list of batch jobs for your organization and user. from mistralai import Mistral import os - with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.batch.jobs.list() + res = mistral.batch.jobs.list(page=0, page_size=100, created_by_me=False) # Handle response print(res) @@ -65,14 +64,13 @@ Create a new batch job, it will be queued for processing. from mistralai import Mistral import os - with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: res = mistral.batch.jobs.create(input_files=[ "a621cf02-1cd9-4cf5-8403-315211a509a3", - ], endpoint="/v1/fim/completions", model="2") + ], endpoint="/v1/fim/completions", model="2", timeout_hours=24) # Handle response print(res) @@ -110,7 +108,6 @@ Get a batch job details by its UUID. from mistralai import Mistral import os - with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: @@ -149,7 +146,6 @@ Request the cancellation of a batch job. from mistralai import Mistral import os - with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: diff --git a/docs/sdks/models/README.md b/docs/sdks/models/README.md index dd7baf50..e048d20e 100644 --- a/docs/sdks/models/README.md +++ b/docs/sdks/models/README.md @@ -24,7 +24,6 @@ List all models available to the user. from mistralai import Mistral import os - with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: @@ -63,7 +62,6 @@ Retrieve a model information. from mistralai import Mistral import os - with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: @@ -103,7 +101,6 @@ Delete a fine-tuned model. from mistralai import Mistral import os - with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: @@ -143,7 +140,6 @@ Update a model name or description. from mistralai import Mistral import os - with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: @@ -184,7 +180,6 @@ Archive a fine-tuned model. from mistralai import Mistral import os - with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: @@ -223,7 +218,6 @@ Un-archive a fine-tuned model. from mistralai import Mistral import os - with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: diff --git a/docs/sdks/ocr/README.md b/docs/sdks/ocr/README.md index 61988ea6..54f8af96 100644 --- a/docs/sdks/ocr/README.md +++ b/docs/sdks/ocr/README.md @@ -19,7 +19,6 @@ OCR from mistralai import Mistral import os - with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: diff --git a/packages/mistralai_azure/.speakeasy/gen.lock b/packages/mistralai_azure/.speakeasy/gen.lock index 8b8ef6ae..16a5196b 100644 --- a/packages/mistralai_azure/.speakeasy/gen.lock +++ b/packages/mistralai_azure/.speakeasy/gen.lock @@ -1,34 +1,34 @@ lockVersion: 2.0.0 id: dc40fa48-2c4d-46ad-ac8b-270749770f34 management: - docChecksum: 3b99cf44527c23ce3298616720b00a62 + docChecksum: 4da7c33f650ddf206c58fa6c941d347f docVersion: 0.0.2 - speakeasyVersion: 1.517.3 - generationVersion: 2.548.6 + speakeasyVersion: 1.462.2 + generationVersion: 2.486.1 releaseVersion: 1.2.6 - configChecksum: 1a623455d46169b8a271df9cd9d58d86 + configChecksum: cc2ac1769a87215774fce0075ff2e77d published: true features: python: additionalDependencies: 1.0.0 constsAndDefaults: 1.0.5 - core: 5.12.3 + core: 5.7.4 defaultEnabledRetries: 0.2.0 enumUnions: 0.1.0 envVarSecurityUsage: 0.3.2 - examples: 3.0.1 + examples: 3.0.0 flatRequests: 1.0.1 - globalSecurity: 3.0.3 + globalSecurity: 3.0.2 globalSecurityCallbacks: 1.0.0 globalSecurityFlattening: 1.0.0 globalServerURLs: 3.1.0 methodArguments: 1.0.2 nameOverrides: 3.0.1 - nullables: 1.0.1 + nullables: 1.0.0 openEnums: 1.0.0 responseFormat: 1.0.1 retries: 3.0.2 - sdkHooks: 1.0.1 + sdkHooks: 1.0.0 serverEvents: 1.0.7 serverEventsSentinels: 0.1.0 serverIDs: 3.0.0 @@ -61,10 +61,8 @@ generatedFiles: - docs/models/functioncall.md - docs/models/functionname.md - docs/models/httpvalidationerror.md - - docs/models/jsonschema.md - docs/models/loc.md - docs/models/messages.md - - docs/models/prediction.md - docs/models/referencechunk.md - docs/models/referencechunktype.md - docs/models/responseformat.md @@ -94,7 +92,7 @@ generatedFiles: - py.typed - pylintrc - pyproject.toml - - scripts/prepare_readme.py + - scripts/prepare-readme.py - scripts/publish.sh - src/mistralai_azure/__init__.py - src/mistralai_azure/_hooks/__init__.py @@ -119,8 +117,6 @@ generatedFiles: - src/mistralai_azure/models/functioncall.py - src/mistralai_azure/models/functionname.py - src/mistralai_azure/models/httpvalidationerror.py - - src/mistralai_azure/models/jsonschema.py - - src/mistralai_azure/models/prediction.py - src/mistralai_azure/models/referencechunk.py - src/mistralai_azure/models/responseformat.py - src/mistralai_azure/models/responseformats.py @@ -138,7 +134,6 @@ generatedFiles: - src/mistralai_azure/models/usermessage.py - src/mistralai_azure/models/validationerror.py - src/mistralai_azure/py.typed - - src/mistralai_azure/sdk.py - src/mistralai_azure/sdkconfiguration.py - src/mistralai_azure/types/__init__.py - src/mistralai_azure/types/basemodel.py @@ -161,19 +156,17 @@ examples: stream_chat: speakeasy-default-stream-chat: requestBody: - application/json: {"model": "azureai", "stream": true, "messages": [{"content": "Who is the best French painter? Answer in one short sentence.", "role": "user"}]} + application/json: {"model": "azureai", "messages": [{"content": "Who is the best French painter? Answer in one short sentence.", "role": "user"}]} responses: "422": application/json: {} - "200": {} chat_completion_v1_chat_completions_post: speakeasy-default-chat-completion-v1-chat-completions-post: requestBody: - application/json: {"model": "azureai", "stream": false, "messages": [{"content": "Who is the best French painter? Answer in one short sentence.", "role": "user"}]} + application/json: {"model": "azureai", "messages": [{"content": "Who is the best French painter? Answer in one short sentence.", "role": "user"}]} responses: "200": - application/json: {"id": "cmpl-e5cc70bb28c444948073e77776eb30ef", "object": "chat.completion", "model": "mistral-small-latest", "usage": {"prompt_tokens": 16, "completion_tokens": 34, "total_tokens": 50}, "created": 1702256327, "choices": [{"index": 0, "message": {"prefix": false, "role": "assistant"}, "finish_reason": "stop"}, {"index": 0, "message": {"prefix": false, "role": "assistant"}, "finish_reason": "stop"}, {"index": 0, "message": {"prefix": false, "role": "assistant"}, "finish_reason": "stop"}]} + application/json: {"id": "cmpl-e5cc70bb28c444948073e77776eb30ef", "object": "chat.completion", "model": "mistral-small-latest", "usage": {"prompt_tokens": 16, "completion_tokens": 34, "total_tokens": 50}, "created": 1702256327, "choices": []} "422": application/json: {} -examplesVersion: 1.0.0 generatedTests: {} diff --git a/packages/mistralai_azure/.speakeasy/gen.yaml b/packages/mistralai_azure/.speakeasy/gen.yaml index 04ed562b..17344d9b 100644 --- a/packages/mistralai_azure/.speakeasy/gen.yaml +++ b/packages/mistralai_azure/.speakeasy/gen.yaml @@ -7,10 +7,8 @@ generation: useClassNamesForArrayFields: true fixes: nameResolutionDec2023: true - nameResolutionFeb2025: false parameterOrderingFeb2024: true requestResponseComponentNamesFeb2024: true - securityFeb2025: false auth: oAuth2ClientCredentialsEnabled: true oAuth2PasswordEnabled: false @@ -25,7 +23,6 @@ python: clientServerStatusCodesAsErrors: true defaultErrorName: SDKError description: Python Client SDK for the Mistral AI API in Azure. - enableCustomCodeRegions: false enumFormat: union fixFlags: responseRequiredSep2024: false @@ -45,6 +42,5 @@ python: methodArguments: infer-optional-args outputModelSuffix: output packageName: mistralai_azure - pytestTimeout: 0 responseFormat: flat templateVersion: v2 diff --git a/packages/mistralai_azure/docs/models/assistantmessage.md b/packages/mistralai_azure/docs/models/assistantmessage.md index 3d0bd90b..53f1cc76 100644 --- a/packages/mistralai_azure/docs/models/assistantmessage.md +++ b/packages/mistralai_azure/docs/models/assistantmessage.md @@ -3,9 +3,9 @@ ## Fields -| Field | Type | Required | Description | -| ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | -| `content` | [OptionalNullable[models.AssistantMessageContent]](../models/assistantmessagecontent.md) | :heavy_minus_sign: | N/A | -| `tool_calls` | List[[models.ToolCall](../models/toolcall.md)] | :heavy_minus_sign: | N/A | -| `prefix` | *Optional[bool]* | :heavy_minus_sign: | Set this to `true` when adding an assistant message as prefix to condition the model response. The role of the prefix message is to force the model to start its answer by the content of the message. | -| `role` | [Optional[models.AssistantMessageRole]](../models/assistantmessagerole.md) | :heavy_minus_sign: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | +| `content` | [OptionalNullable[models.AssistantMessageContent]](../models/assistantmessagecontent.md) | :heavy_minus_sign: | N/A | +| `tool_calls` | List[[models.ToolCall](../models/toolcall.md)] | :heavy_minus_sign: | N/A | +| `prefix` | *Optional[bool]* | :heavy_minus_sign: | N/A | +| `role` | [Optional[models.AssistantMessageRole]](../models/assistantmessagerole.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/chatcompletionrequest.md b/packages/mistralai_azure/docs/models/chatcompletionrequest.md index eb43a4da..68cef4a1 100644 --- a/packages/mistralai_azure/docs/models/chatcompletionrequest.md +++ b/packages/mistralai_azure/docs/models/chatcompletionrequest.md @@ -6,7 +6,7 @@ | Field | Type | Required | Description | Example | | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | | `messages` | List[[models.ChatCompletionRequestMessages](../models/chatcompletionrequestmessages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | -| `model` | *Optional[str]* | :heavy_minus_sign: | The ID of the model to use for this request. | azureai | +| `model` | *OptionalNullable[str]* | :heavy_minus_sign: | The ID of the model to use for this request. | azureai | | `temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. | | | `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | | `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | @@ -19,5 +19,4 @@ | `presence_penalty` | *Optional[float]* | :heavy_minus_sign: | presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. | | | `frequency_penalty` | *Optional[float]* | :heavy_minus_sign: | frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. | | | `n` | *OptionalNullable[int]* | :heavy_minus_sign: | Number of completions to return for each request, input tokens are only billed once. | | -| `prediction` | [Optional[models.Prediction]](../models/prediction.md) | :heavy_minus_sign: | N/A | | | `safe_prompt` | *Optional[bool]* | :heavy_minus_sign: | Whether to inject a safety prompt before all conversations. | | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/chatcompletionstreamrequest.md b/packages/mistralai_azure/docs/models/chatcompletionstreamrequest.md index 78442736..c9c5c87b 100644 --- a/packages/mistralai_azure/docs/models/chatcompletionstreamrequest.md +++ b/packages/mistralai_azure/docs/models/chatcompletionstreamrequest.md @@ -6,7 +6,7 @@ | Field | Type | Required | Description | Example | | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | | `messages` | List[[models.Messages](../models/messages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | -| `model` | *Optional[str]* | :heavy_minus_sign: | The ID of the model to use for this request. | azureai | +| `model` | *OptionalNullable[str]* | :heavy_minus_sign: | The ID of the model to use for this request. | azureai | | `temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. | | | `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | | `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | @@ -19,5 +19,4 @@ | `presence_penalty` | *Optional[float]* | :heavy_minus_sign: | presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. | | | `frequency_penalty` | *Optional[float]* | :heavy_minus_sign: | frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. | | | `n` | *OptionalNullable[int]* | :heavy_minus_sign: | Number of completions to return for each request, input tokens are only billed once. | | -| `prediction` | [Optional[models.Prediction]](../models/prediction.md) | :heavy_minus_sign: | N/A | | | `safe_prompt` | *Optional[bool]* | :heavy_minus_sign: | Whether to inject a safety prompt before all conversations. | | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/function.md b/packages/mistralai_azure/docs/models/function.md index a166b7bb..8af398f5 100644 --- a/packages/mistralai_azure/docs/models/function.md +++ b/packages/mistralai_azure/docs/models/function.md @@ -7,5 +7,4 @@ | ------------------ | ------------------ | ------------------ | ------------------ | | `name` | *str* | :heavy_check_mark: | N/A | | `parameters` | Dict[str, *Any*] | :heavy_check_mark: | N/A | -| `description` | *Optional[str]* | :heavy_minus_sign: | N/A | -| `strict` | *Optional[bool]* | :heavy_minus_sign: | N/A | \ No newline at end of file +| `description` | *Optional[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/jsonschema.md b/packages/mistralai_azure/docs/models/jsonschema.md deleted file mode 100644 index ae387867..00000000 --- a/packages/mistralai_azure/docs/models/jsonschema.md +++ /dev/null @@ -1,11 +0,0 @@ -# JSONSchema - - -## Fields - -| Field | Type | Required | Description | -| ----------------------- | ----------------------- | ----------------------- | ----------------------- | -| `name` | *str* | :heavy_check_mark: | N/A | -| `schema_definition` | Dict[str, *Any*] | :heavy_check_mark: | N/A | -| `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `strict` | *Optional[bool]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/prediction.md b/packages/mistralai_azure/docs/models/prediction.md deleted file mode 100644 index 86e9c396..00000000 --- a/packages/mistralai_azure/docs/models/prediction.md +++ /dev/null @@ -1,9 +0,0 @@ -# Prediction - - -## Fields - -| Field | Type | Required | Description | -| ------------------------------ | ------------------------------ | ------------------------------ | ------------------------------ | -| `type` | *Optional[Literal["content"]]* | :heavy_minus_sign: | N/A | -| `content` | *Optional[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/responseformat.md b/packages/mistralai_azure/docs/models/responseformat.md index 23a1641b..9c627f55 100644 --- a/packages/mistralai_azure/docs/models/responseformat.md +++ b/packages/mistralai_azure/docs/models/responseformat.md @@ -5,5 +5,4 @@ | Field | Type | Required | Description | | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `type` | [Optional[models.ResponseFormats]](../models/responseformats.md) | :heavy_minus_sign: | An object specifying the format that the model must output. Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. | -| `json_schema` | [OptionalNullable[models.JSONSchema]](../models/jsonschema.md) | :heavy_minus_sign: | N/A | \ No newline at end of file +| `type` | [Optional[models.ResponseFormats]](../models/responseformats.md) | :heavy_minus_sign: | An object specifying the format that the model must output. Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/responseformats.md b/packages/mistralai_azure/docs/models/responseformats.md index 06886afe..ce35fbb3 100644 --- a/packages/mistralai_azure/docs/models/responseformats.md +++ b/packages/mistralai_azure/docs/models/responseformats.md @@ -8,5 +8,4 @@ An object specifying the format that the model must output. Setting to `{ "type" | Name | Value | | ------------- | ------------- | | `TEXT` | text | -| `JSON_OBJECT` | json_object | -| `JSON_SCHEMA` | json_schema | \ No newline at end of file +| `JSON_OBJECT` | json_object | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/toolcall.md b/packages/mistralai_azure/docs/models/toolcall.md index 574be1ea..7aca5fc9 100644 --- a/packages/mistralai_azure/docs/models/toolcall.md +++ b/packages/mistralai_azure/docs/models/toolcall.md @@ -7,5 +7,4 @@ | ---------------------------------------------------- | ---------------------------------------------------- | ---------------------------------------------------- | ---------------------------------------------------- | | `function` | [models.FunctionCall](../models/functioncall.md) | :heavy_check_mark: | N/A | | `id` | *Optional[str]* | :heavy_minus_sign: | N/A | -| `type` | [Optional[models.ToolTypes]](../models/tooltypes.md) | :heavy_minus_sign: | N/A | -| `index` | *Optional[int]* | :heavy_minus_sign: | N/A | \ No newline at end of file +| `type` | [Optional[models.ToolTypes]](../models/tooltypes.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai_azure/pylintrc b/packages/mistralai_azure/pylintrc index 266bc815..393d0f70 100644 --- a/packages/mistralai_azure/pylintrc +++ b/packages/mistralai_azure/pylintrc @@ -89,7 +89,7 @@ persistent=yes # Minimum Python version to use for version dependent checks. Will default to # the version used to run pylint. -py-version=3.9 +py-version=3.8 # Discover python modules and packages in the file system subtree. recursive=no @@ -455,10 +455,7 @@ disable=raw-checker-failed, bare-except, broad-exception-caught, fixme, - relative-beyond-top-level, - consider-using-with, - wildcard-import, - unused-wildcard-import + relative-beyond-top-level # Enable the message, report, category or checker with the given id(s). You can # either give multiple identifier separated by comma (,) or put this option diff --git a/packages/mistralai_azure/pyproject.toml b/packages/mistralai_azure/pyproject.toml index bf120e67..5c227f66 100644 --- a/packages/mistralai_azure/pyproject.toml +++ b/packages/mistralai_azure/pyproject.toml @@ -1,19 +1,9 @@ -[project] +[tool.poetry] name = "mistralai_azure" version = "1.2.6" description = "Python Client SDK for the Mistral AI API in Azure." -authors = [{ name = "Mistral" },] +authors = ["Mistral",] readme = "README-PYPI.md" -requires-python = ">=3.9" -dependencies = [ - "eval-type-backport >=0.2.0", - "httpx >=0.28.1", - "pydantic >=2.10.3", - "python-dateutil >=2.8.2", - "typing-inspection >=0.4.0", -] - -[tool.poetry] packages = [ { include = "mistralai_azure", from = "src" } ] @@ -25,8 +15,17 @@ include = ["py.typed", "src/mistralai_azure/py.typed"] [virtualenvs] in-project = true +[tool.poetry.dependencies] +python = "^3.8" +eval-type-backport = "^0.2.0" +httpx = "^0.28.1" +jsonpath-python = "^1.0.6" +pydantic = "~2.10.3" +python-dateutil = "^2.8.2" +typing-inspect = "^0.9.0" + [tool.poetry.group.dev.dependencies] -mypy = "==1.14.1" +mypy = "==1.13.0" pylint = "==3.2.3" pytest = "^8.2.2" pytest-asyncio = "^0.23.7" @@ -37,7 +36,6 @@ requires = ["poetry-core"] build-backend = "poetry.core.masonry.api" [tool.pytest.ini_options] -asyncio_default_fixture_loop_scope = "function" pythonpath = ["src"] [tool.mypy] diff --git a/packages/mistralai_azure/scripts/prepare_readme.py b/packages/mistralai_azure/scripts/prepare-readme.py similarity index 100% rename from packages/mistralai_azure/scripts/prepare_readme.py rename to packages/mistralai_azure/scripts/prepare-readme.py diff --git a/packages/mistralai_azure/scripts/publish.sh b/packages/mistralai_azure/scripts/publish.sh index f2f2cf2c..ab45b1f9 100755 --- a/packages/mistralai_azure/scripts/publish.sh +++ b/packages/mistralai_azure/scripts/publish.sh @@ -2,6 +2,6 @@ export POETRY_PYPI_TOKEN_PYPI=${PYPI_TOKEN} -poetry run python scripts/prepare_readme.py +poetry run python scripts/prepare-readme.py poetry publish --build --skip-existing diff --git a/packages/mistralai_azure/src/mistralai_azure/__init__.py b/packages/mistralai_azure/src/mistralai_azure/__init__.py index dd02e42e..a1b7f626 100644 --- a/packages/mistralai_azure/src/mistralai_azure/__init__.py +++ b/packages/mistralai_azure/src/mistralai_azure/__init__.py @@ -1,18 +1,9 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -from ._version import ( - __title__, - __version__, - __openapi_doc_version__, - __gen_version__, - __user_agent__, -) +from ._version import __title__, __version__ from .sdk import * from .sdkconfiguration import * from .models import * VERSION: str = __version__ -OPENAPI_DOC_VERSION = __openapi_doc_version__ -SPEAKEASY_GENERATOR_VERSION = __gen_version__ -USER_AGENT = __user_agent__ diff --git a/packages/mistralai_azure/src/mistralai_azure/_hooks/types.py b/packages/mistralai_azure/src/mistralai_azure/_hooks/types.py index 297dfa2f..5e34da26 100644 --- a/packages/mistralai_azure/src/mistralai_azure/_hooks/types.py +++ b/packages/mistralai_azure/src/mistralai_azure/_hooks/types.py @@ -7,19 +7,16 @@ class HookContext: - base_url: str operation_id: str oauth2_scopes: Optional[List[str]] = None security_source: Optional[Union[Any, Callable[[], Any]]] = None def __init__( self, - base_url: str, operation_id: str, oauth2_scopes: Optional[List[str]], security_source: Optional[Union[Any, Callable[[], Any]]], ): - self.base_url = base_url self.operation_id = operation_id self.oauth2_scopes = oauth2_scopes self.security_source = security_source @@ -28,30 +25,21 @@ def __init__( class BeforeRequestContext(HookContext): def __init__(self, hook_ctx: HookContext): super().__init__( - hook_ctx.base_url, - hook_ctx.operation_id, - hook_ctx.oauth2_scopes, - hook_ctx.security_source, + hook_ctx.operation_id, hook_ctx.oauth2_scopes, hook_ctx.security_source ) class AfterSuccessContext(HookContext): def __init__(self, hook_ctx: HookContext): super().__init__( - hook_ctx.base_url, - hook_ctx.operation_id, - hook_ctx.oauth2_scopes, - hook_ctx.security_source, + hook_ctx.operation_id, hook_ctx.oauth2_scopes, hook_ctx.security_source ) class AfterErrorContext(HookContext): def __init__(self, hook_ctx: HookContext): super().__init__( - hook_ctx.base_url, - hook_ctx.operation_id, - hook_ctx.oauth2_scopes, - hook_ctx.security_source, + hook_ctx.operation_id, hook_ctx.oauth2_scopes, hook_ctx.security_source ) diff --git a/packages/mistralai_azure/src/mistralai_azure/_version.py b/packages/mistralai_azure/src/mistralai_azure/_version.py index 23e2d1c2..c7215b89 100644 --- a/packages/mistralai_azure/src/mistralai_azure/_version.py +++ b/packages/mistralai_azure/src/mistralai_azure/_version.py @@ -4,9 +4,6 @@ __title__: str = "mistralai_azure" __version__: str = "1.2.6" -__openapi_doc_version__: str = "0.0.2" -__gen_version__: str = "2.548.6" -__user_agent__: str = "speakeasy-sdk/python 1.2.6 2.548.6 0.0.2 mistralai_azure" try: if __package__ is not None: diff --git a/packages/mistralai_azure/src/mistralai_azure/basesdk.py b/packages/mistralai_azure/src/mistralai_azure/basesdk.py index 24e4935e..05c100d4 100644 --- a/packages/mistralai_azure/src/mistralai_azure/basesdk.py +++ b/packages/mistralai_azure/src/mistralai_azure/basesdk.py @@ -231,10 +231,6 @@ def do(): req.headers, get_body_content(req), ) - - if client is None: - raise ValueError("client is required") - http_res = client.send(req, stream=stream) except Exception as e: _, e = self.sdk_configuration.get_hooks().after_error( @@ -307,10 +303,6 @@ async def do(): req.headers, get_body_content(req), ) - - if client is None: - raise ValueError("client is required") - http_res = await client.send(req, stream=stream) except Exception as e: _, e = self.sdk_configuration.get_hooks().after_error( diff --git a/packages/mistralai_azure/src/mistralai_azure/chat.py b/packages/mistralai_azure/src/mistralai_azure/chat.py index 6f126a4b..0ed464ba 100644 --- a/packages/mistralai_azure/src/mistralai_azure/chat.py +++ b/packages/mistralai_azure/src/mistralai_azure/chat.py @@ -15,7 +15,7 @@ def stream( self, *, messages: Union[List[models.Messages], List[models.MessagesTypedDict]], - model: Optional[str] = "azureai", + model: OptionalNullable[str] = "azureai", temperature: OptionalNullable[float] = UNSET, top_p: Optional[float] = None, max_tokens: OptionalNullable[int] = UNSET, @@ -37,9 +37,6 @@ def stream( presence_penalty: Optional[float] = None, frequency_penalty: Optional[float] = None, n: OptionalNullable[int] = UNSET, - prediction: Optional[ - Union[models.Prediction, models.PredictionTypedDict] - ] = None, safe_prompt: Optional[bool] = None, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, @@ -64,7 +61,6 @@ def stream( :param presence_penalty: presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. :param frequency_penalty: frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. :param n: Number of completions to return for each request, input tokens are only billed once. - :param prediction: :param safe_prompt: Whether to inject a safety prompt before all conversations. :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method @@ -78,8 +74,6 @@ def stream( if server_url is not None: base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) request = models.ChatCompletionStreamRequest( model=model, @@ -100,9 +94,6 @@ def stream( presence_penalty=presence_penalty, frequency_penalty=frequency_penalty, n=n, - prediction=utils.get_pydantic_model( - prediction, Optional[models.Prediction] - ), safe_prompt=safe_prompt, ) @@ -135,7 +126,6 @@ def stream( http_res = self.do_request( hook_ctx=HookContext( - base_url=base_url or "", operation_id="stream_chat", oauth2_scopes=[], security_source=self.sdk_configuration.security, @@ -146,7 +136,7 @@ def stream( retry_config=retry_config, ) - response_data: Any = None + data: Any = None if utils.match_response(http_res, "200", "text/event-stream"): return eventstreaming.EventStream( http_res, @@ -155,16 +145,9 @@ def stream( ) if utils.match_response(http_res, "422", "application/json"): http_res_text = utils.stream_to_text(http_res) - response_data = utils.unmarshal_json( - http_res_text, models.HTTPValidationErrorData - ) - raise models.HTTPValidationError(data=response_data) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) - if utils.match_response(http_res, "5XX", "*"): + data = utils.unmarshal_json(http_res_text, models.HTTPValidationErrorData) + raise models.HTTPValidationError(data=data) + if utils.match_response(http_res, ["4XX", "5XX"], "*"): http_res_text = utils.stream_to_text(http_res) raise models.SDKError( "API error occurred", http_res.status_code, http_res_text, http_res @@ -183,7 +166,7 @@ async def stream_async( self, *, messages: Union[List[models.Messages], List[models.MessagesTypedDict]], - model: Optional[str] = "azureai", + model: OptionalNullable[str] = "azureai", temperature: OptionalNullable[float] = UNSET, top_p: Optional[float] = None, max_tokens: OptionalNullable[int] = UNSET, @@ -205,9 +188,6 @@ async def stream_async( presence_penalty: Optional[float] = None, frequency_penalty: Optional[float] = None, n: OptionalNullable[int] = UNSET, - prediction: Optional[ - Union[models.Prediction, models.PredictionTypedDict] - ] = None, safe_prompt: Optional[bool] = None, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, @@ -232,7 +212,6 @@ async def stream_async( :param presence_penalty: presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. :param frequency_penalty: frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. :param n: Number of completions to return for each request, input tokens are only billed once. - :param prediction: :param safe_prompt: Whether to inject a safety prompt before all conversations. :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method @@ -246,8 +225,6 @@ async def stream_async( if server_url is not None: base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) request = models.ChatCompletionStreamRequest( model=model, @@ -268,9 +245,6 @@ async def stream_async( presence_penalty=presence_penalty, frequency_penalty=frequency_penalty, n=n, - prediction=utils.get_pydantic_model( - prediction, Optional[models.Prediction] - ), safe_prompt=safe_prompt, ) @@ -303,7 +277,6 @@ async def stream_async( http_res = await self.do_request_async( hook_ctx=HookContext( - base_url=base_url or "", operation_id="stream_chat", oauth2_scopes=[], security_source=self.sdk_configuration.security, @@ -314,7 +287,7 @@ async def stream_async( retry_config=retry_config, ) - response_data: Any = None + data: Any = None if utils.match_response(http_res, "200", "text/event-stream"): return eventstreaming.EventStreamAsync( http_res, @@ -323,16 +296,9 @@ async def stream_async( ) if utils.match_response(http_res, "422", "application/json"): http_res_text = await utils.stream_to_text_async(http_res) - response_data = utils.unmarshal_json( - http_res_text, models.HTTPValidationErrorData - ) - raise models.HTTPValidationError(data=response_data) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) - if utils.match_response(http_res, "5XX", "*"): + data = utils.unmarshal_json(http_res_text, models.HTTPValidationErrorData) + raise models.HTTPValidationError(data=data) + if utils.match_response(http_res, ["4XX", "5XX"], "*"): http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( "API error occurred", http_res.status_code, http_res_text, http_res @@ -354,7 +320,7 @@ def complete( List[models.ChatCompletionRequestMessages], List[models.ChatCompletionRequestMessagesTypedDict], ], - model: Optional[str] = "azureai", + model: OptionalNullable[str] = "azureai", temperature: OptionalNullable[float] = UNSET, top_p: Optional[float] = None, max_tokens: OptionalNullable[int] = UNSET, @@ -381,9 +347,6 @@ def complete( presence_penalty: Optional[float] = None, frequency_penalty: Optional[float] = None, n: OptionalNullable[int] = UNSET, - prediction: Optional[ - Union[models.Prediction, models.PredictionTypedDict] - ] = None, safe_prompt: Optional[bool] = None, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, @@ -406,7 +369,6 @@ def complete( :param presence_penalty: presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. :param frequency_penalty: frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. :param n: Number of completions to return for each request, input tokens are only billed once. - :param prediction: :param safe_prompt: Whether to inject a safety prompt before all conversations. :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method @@ -420,8 +382,6 @@ def complete( if server_url is not None: base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) request = models.ChatCompletionRequest( model=model, @@ -444,9 +404,6 @@ def complete( presence_penalty=presence_penalty, frequency_penalty=frequency_penalty, n=n, - prediction=utils.get_pydantic_model( - prediction, Optional[models.Prediction] - ), safe_prompt=safe_prompt, ) @@ -479,7 +436,6 @@ def complete( http_res = self.do_request( hook_ctx=HookContext( - base_url=base_url or "", operation_id="chat_completion_v1_chat_completions_post", oauth2_scopes=[], security_source=self.sdk_configuration.security, @@ -489,22 +445,15 @@ def complete( retry_config=retry_config, ) - response_data: Any = None + data: Any = None if utils.match_response(http_res, "200", "application/json"): return utils.unmarshal_json( http_res.text, Optional[models.ChatCompletionResponse] ) if utils.match_response(http_res, "422", "application/json"): - response_data = utils.unmarshal_json( - http_res.text, models.HTTPValidationErrorData - ) - raise models.HTTPValidationError(data=response_data) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) - if utils.match_response(http_res, "5XX", "*"): + data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) + raise models.HTTPValidationError(data=data) + if utils.match_response(http_res, ["4XX", "5XX"], "*"): http_res_text = utils.stream_to_text(http_res) raise models.SDKError( "API error occurred", http_res.status_code, http_res_text, http_res @@ -526,7 +475,7 @@ async def complete_async( List[models.ChatCompletionRequestMessages], List[models.ChatCompletionRequestMessagesTypedDict], ], - model: Optional[str] = "azureai", + model: OptionalNullable[str] = "azureai", temperature: OptionalNullable[float] = UNSET, top_p: Optional[float] = None, max_tokens: OptionalNullable[int] = UNSET, @@ -553,9 +502,6 @@ async def complete_async( presence_penalty: Optional[float] = None, frequency_penalty: Optional[float] = None, n: OptionalNullable[int] = UNSET, - prediction: Optional[ - Union[models.Prediction, models.PredictionTypedDict] - ] = None, safe_prompt: Optional[bool] = None, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, @@ -578,7 +524,6 @@ async def complete_async( :param presence_penalty: presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. :param frequency_penalty: frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. :param n: Number of completions to return for each request, input tokens are only billed once. - :param prediction: :param safe_prompt: Whether to inject a safety prompt before all conversations. :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method @@ -592,8 +537,6 @@ async def complete_async( if server_url is not None: base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) request = models.ChatCompletionRequest( model=model, @@ -616,9 +559,6 @@ async def complete_async( presence_penalty=presence_penalty, frequency_penalty=frequency_penalty, n=n, - prediction=utils.get_pydantic_model( - prediction, Optional[models.Prediction] - ), safe_prompt=safe_prompt, ) @@ -651,7 +591,6 @@ async def complete_async( http_res = await self.do_request_async( hook_ctx=HookContext( - base_url=base_url or "", operation_id="chat_completion_v1_chat_completions_post", oauth2_scopes=[], security_source=self.sdk_configuration.security, @@ -661,22 +600,15 @@ async def complete_async( retry_config=retry_config, ) - response_data: Any = None + data: Any = None if utils.match_response(http_res, "200", "application/json"): return utils.unmarshal_json( http_res.text, Optional[models.ChatCompletionResponse] ) if utils.match_response(http_res, "422", "application/json"): - response_data = utils.unmarshal_json( - http_res.text, models.HTTPValidationErrorData - ) - raise models.HTTPValidationError(data=response_data) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) - if utils.match_response(http_res, "5XX", "*"): + data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) + raise models.HTTPValidationError(data=data) + if utils.match_response(http_res, ["4XX", "5XX"], "*"): http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( "API error occurred", http_res.status_code, http_res_text, http_res diff --git a/packages/mistralai_azure/src/mistralai_azure/httpclient.py b/packages/mistralai_azure/src/mistralai_azure/httpclient.py index 1e426352..167cea4e 100644 --- a/packages/mistralai_azure/src/mistralai_azure/httpclient.py +++ b/packages/mistralai_azure/src/mistralai_azure/httpclient.py @@ -1,8 +1,6 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" # pyright: reportReturnType = false -import asyncio -from concurrent.futures import ThreadPoolExecutor from typing_extensions import Protocol, runtime_checkable import httpx from typing import Any, Optional, Union @@ -84,53 +82,3 @@ def build_request( async def aclose(self) -> None: pass - - -class ClientOwner(Protocol): - client: Union[HttpClient, None] - async_client: Union[AsyncHttpClient, None] - - -def close_clients( - owner: ClientOwner, - sync_client: Union[HttpClient, None], - sync_client_supplied: bool, - async_client: Union[AsyncHttpClient, None], - async_client_supplied: bool, -) -> None: - """ - A finalizer function that is meant to be used with weakref.finalize to close - httpx clients used by an SDK so that underlying resources can be garbage - collected. - """ - - # Unset the client/async_client properties so there are no more references - # to them from the owning SDK instance and they can be reaped. - owner.client = None - owner.async_client = None - - if sync_client is not None and not sync_client_supplied: - try: - sync_client.close() - except Exception: - pass - - if async_client is not None and not async_client_supplied: - is_async = False - try: - asyncio.get_running_loop() - is_async = True - except RuntimeError: - pass - - try: - # If this function is called in an async loop then start another - # loop in a separate thread to close the async http client. - if is_async: - with ThreadPoolExecutor(max_workers=1) as executor: - future = executor.submit(asyncio.run, async_client.aclose()) - future.result() - else: - asyncio.run(async_client.aclose()) - except Exception: - pass diff --git a/packages/mistralai_azure/src/mistralai_azure/models/__init__.py b/packages/mistralai_azure/src/mistralai_azure/models/__init__.py index ed9d9362..379a0dfe 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/__init__.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/__init__.py @@ -54,8 +54,6 @@ ) from .functionname import FunctionName, FunctionNameTypedDict from .httpvalidationerror import HTTPValidationError, HTTPValidationErrorData -from .jsonschema import JSONSchema, JSONSchemaTypedDict -from .prediction import Prediction, PredictionTypedDict from .referencechunk import ReferenceChunk, ReferenceChunkType, ReferenceChunkTypedDict from .responseformat import ResponseFormat, ResponseFormatTypedDict from .responseformats import ResponseFormats @@ -96,7 +94,6 @@ ValidationErrorTypedDict, ) - __all__ = [ "Arguments", "ArgumentsTypedDict", @@ -143,14 +140,10 @@ "FunctionTypedDict", "HTTPValidationError", "HTTPValidationErrorData", - "JSONSchema", - "JSONSchemaTypedDict", "Loc", "LocTypedDict", "Messages", "MessagesTypedDict", - "Prediction", - "PredictionTypedDict", "ReferenceChunk", "ReferenceChunkType", "ReferenceChunkTypedDict", diff --git a/packages/mistralai_azure/src/mistralai_azure/models/assistantmessage.py b/packages/mistralai_azure/src/mistralai_azure/models/assistantmessage.py index 530b33df..031677cf 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/assistantmessage.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/assistantmessage.py @@ -32,7 +32,6 @@ class AssistantMessageTypedDict(TypedDict): content: NotRequired[Nullable[AssistantMessageContentTypedDict]] tool_calls: NotRequired[Nullable[List[ToolCallTypedDict]]] prefix: NotRequired[bool] - r"""Set this to `true` when adding an assistant message as prefix to condition the model response. The role of the prefix message is to force the model to start its answer by the content of the message.""" role: NotRequired[AssistantMessageRole] @@ -42,7 +41,6 @@ class AssistantMessage(BaseModel): tool_calls: OptionalNullable[List[ToolCall]] = UNSET prefix: Optional[bool] = False - r"""Set this to `true` when adding an assistant message as prefix to condition the model response. The role of the prefix message is to force the model to start its answer by the content of the message.""" role: Optional[AssistantMessageRole] = "assistant" diff --git a/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionrequest.py b/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionrequest.py index 08c66467..67c91bba 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionrequest.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionrequest.py @@ -2,7 +2,6 @@ from __future__ import annotations from .assistantmessage import AssistantMessage, AssistantMessageTypedDict -from .prediction import Prediction, PredictionTypedDict from .responseformat import ResponseFormat, ResponseFormatTypedDict from .systemmessage import SystemMessage, SystemMessageTypedDict from .tool import Tool, ToolTypedDict @@ -71,7 +70,7 @@ class ChatCompletionRequestTypedDict(TypedDict): messages: List[ChatCompletionRequestMessagesTypedDict] r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content.""" - model: NotRequired[str] + model: NotRequired[Nullable[str]] r"""The ID of the model to use for this request.""" temperature: NotRequired[Nullable[float]] r"""What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value.""" @@ -94,7 +93,6 @@ class ChatCompletionRequestTypedDict(TypedDict): r"""frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.""" n: NotRequired[Nullable[int]] r"""Number of completions to return for each request, input tokens are only billed once.""" - prediction: NotRequired[PredictionTypedDict] safe_prompt: NotRequired[bool] r"""Whether to inject a safety prompt before all conversations.""" @@ -103,7 +101,7 @@ class ChatCompletionRequest(BaseModel): messages: List[ChatCompletionRequestMessages] r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content.""" - model: Optional[str] = "azureai" + model: OptionalNullable[str] = "azureai" r"""The ID of the model to use for this request.""" temperature: OptionalNullable[float] = UNSET @@ -139,8 +137,6 @@ class ChatCompletionRequest(BaseModel): n: OptionalNullable[int] = UNSET r"""Number of completions to return for each request, input tokens are only billed once.""" - prediction: Optional[Prediction] = None - safe_prompt: Optional[bool] = None r"""Whether to inject a safety prompt before all conversations.""" @@ -160,10 +156,16 @@ def serialize_model(self, handler): "presence_penalty", "frequency_penalty", "n", - "prediction", "safe_prompt", ] - nullable_fields = ["temperature", "max_tokens", "random_seed", "tools", "n"] + nullable_fields = [ + "model", + "temperature", + "max_tokens", + "random_seed", + "tools", + "n", + ] null_default_fields = [] serialized = handler(self) diff --git a/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionstreamrequest.py b/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionstreamrequest.py index a2eec92b..465647eb 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionstreamrequest.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionstreamrequest.py @@ -2,7 +2,6 @@ from __future__ import annotations from .assistantmessage import AssistantMessage, AssistantMessageTypedDict -from .prediction import Prediction, PredictionTypedDict from .responseformat import ResponseFormat, ResponseFormatTypedDict from .systemmessage import SystemMessage, SystemMessageTypedDict from .tool import Tool, ToolTypedDict @@ -67,7 +66,7 @@ class ChatCompletionStreamRequestTypedDict(TypedDict): messages: List[MessagesTypedDict] r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content.""" - model: NotRequired[str] + model: NotRequired[Nullable[str]] r"""The ID of the model to use for this request.""" temperature: NotRequired[Nullable[float]] r"""What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value.""" @@ -89,7 +88,6 @@ class ChatCompletionStreamRequestTypedDict(TypedDict): r"""frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.""" n: NotRequired[Nullable[int]] r"""Number of completions to return for each request, input tokens are only billed once.""" - prediction: NotRequired[PredictionTypedDict] safe_prompt: NotRequired[bool] r"""Whether to inject a safety prompt before all conversations.""" @@ -98,7 +96,7 @@ class ChatCompletionStreamRequest(BaseModel): messages: List[Messages] r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content.""" - model: Optional[str] = "azureai" + model: OptionalNullable[str] = "azureai" r"""The ID of the model to use for this request.""" temperature: OptionalNullable[float] = UNSET @@ -133,8 +131,6 @@ class ChatCompletionStreamRequest(BaseModel): n: OptionalNullable[int] = UNSET r"""Number of completions to return for each request, input tokens are only billed once.""" - prediction: Optional[Prediction] = None - safe_prompt: Optional[bool] = None r"""Whether to inject a safety prompt before all conversations.""" @@ -154,10 +150,16 @@ def serialize_model(self, handler): "presence_penalty", "frequency_penalty", "n", - "prediction", "safe_prompt", ] - nullable_fields = ["temperature", "max_tokens", "random_seed", "tools", "n"] + nullable_fields = [ + "model", + "temperature", + "max_tokens", + "random_seed", + "tools", + "n", + ] null_default_fields = [] serialized = handler(self) diff --git a/packages/mistralai_azure/src/mistralai_azure/models/function.py b/packages/mistralai_azure/src/mistralai_azure/models/function.py index e6ea8495..488cdcea 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/function.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/function.py @@ -10,7 +10,6 @@ class FunctionTypedDict(TypedDict): name: str parameters: Dict[str, Any] description: NotRequired[str] - strict: NotRequired[bool] class Function(BaseModel): @@ -19,5 +18,3 @@ class Function(BaseModel): parameters: Dict[str, Any] description: Optional[str] = "" - - strict: Optional[bool] = False diff --git a/packages/mistralai_azure/src/mistralai_azure/models/jsonschema.py b/packages/mistralai_azure/src/mistralai_azure/models/jsonschema.py deleted file mode 100644 index 210417c7..00000000 --- a/packages/mistralai_azure/src/mistralai_azure/models/jsonschema.py +++ /dev/null @@ -1,61 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai_azure.types import ( - BaseModel, - Nullable, - OptionalNullable, - UNSET, - UNSET_SENTINEL, -) -import pydantic -from pydantic import model_serializer -from typing import Any, Dict, Optional -from typing_extensions import Annotated, NotRequired, TypedDict - - -class JSONSchemaTypedDict(TypedDict): - name: str - schema_definition: Dict[str, Any] - description: NotRequired[Nullable[str]] - strict: NotRequired[bool] - - -class JSONSchema(BaseModel): - name: str - - schema_definition: Annotated[Dict[str, Any], pydantic.Field(alias="schema")] - - description: OptionalNullable[str] = UNSET - - strict: Optional[bool] = False - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = ["description", "strict"] - nullable_fields = ["description"] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in self.model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/packages/mistralai_azure/src/mistralai_azure/models/prediction.py b/packages/mistralai_azure/src/mistralai_azure/models/prediction.py deleted file mode 100644 index 888337d3..00000000 --- a/packages/mistralai_azure/src/mistralai_azure/models/prediction.py +++ /dev/null @@ -1,25 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai_azure.types import BaseModel -from mistralai_azure.utils import validate_const -import pydantic -from pydantic.functional_validators import AfterValidator -from typing import Literal, Optional -from typing_extensions import Annotated, NotRequired, TypedDict - - -class PredictionTypedDict(TypedDict): - type: Literal["content"] - content: NotRequired[str] - - -class Prediction(BaseModel): - TYPE: Annotated[ - Annotated[ - Optional[Literal["content"]], AfterValidator(validate_const("content")) - ], - pydantic.Field(alias="type"), - ] = "content" - - content: Optional[str] = "" diff --git a/packages/mistralai_azure/src/mistralai_azure/models/responseformat.py b/packages/mistralai_azure/src/mistralai_azure/models/responseformat.py index cfd58dcf..e4a9d7dd 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/responseformat.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/responseformat.py @@ -1,16 +1,8 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from __future__ import annotations -from .jsonschema import JSONSchema, JSONSchemaTypedDict from .responseformats import ResponseFormats -from mistralai_azure.types import ( - BaseModel, - Nullable, - OptionalNullable, - UNSET, - UNSET_SENTINEL, -) -from pydantic import model_serializer +from mistralai_azure.types import BaseModel from typing import Optional from typing_extensions import NotRequired, TypedDict @@ -18,41 +10,8 @@ class ResponseFormatTypedDict(TypedDict): type: NotRequired[ResponseFormats] r"""An object specifying the format that the model must output. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message.""" - json_schema: NotRequired[Nullable[JSONSchemaTypedDict]] class ResponseFormat(BaseModel): type: Optional[ResponseFormats] = None r"""An object specifying the format that the model must output. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message.""" - - json_schema: OptionalNullable[JSONSchema] = UNSET - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = ["type", "json_schema"] - nullable_fields = ["json_schema"] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in self.model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/packages/mistralai_azure/src/mistralai_azure/models/responseformats.py b/packages/mistralai_azure/src/mistralai_azure/models/responseformats.py index 08c39951..2c06b812 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/responseformats.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/responseformats.py @@ -4,5 +4,5 @@ from typing import Literal -ResponseFormats = Literal["text", "json_object", "json_schema"] +ResponseFormats = Literal["text", "json_object"] r"""An object specifying the format that the model must output. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message.""" diff --git a/packages/mistralai_azure/src/mistralai_azure/models/toolcall.py b/packages/mistralai_azure/src/mistralai_azure/models/toolcall.py index 6ccdcaa2..69b47310 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/toolcall.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/toolcall.py @@ -14,7 +14,6 @@ class ToolCallTypedDict(TypedDict): function: FunctionCallTypedDict id: NotRequired[str] type: NotRequired[ToolTypes] - index: NotRequired[int] class ToolCall(BaseModel): @@ -25,5 +24,3 @@ class ToolCall(BaseModel): type: Annotated[Optional[ToolTypes], PlainValidator(validate_open_enum(False))] = ( None ) - - index: Optional[int] = 0 diff --git a/packages/mistralai_azure/src/mistralai_azure/sdk.py b/packages/mistralai_azure/src/mistralai_azure/sdk.py index 8379e55f..a83faa7b 100644 --- a/packages/mistralai_azure/src/mistralai_azure/sdk.py +++ b/packages/mistralai_azure/src/mistralai_azure/sdk.py @@ -1,19 +1,17 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +"""Code generated by Speakeasy (https://speakeasyapi.dev). DO NOT EDIT.""" -import weakref -from typing import Any, Callable, Dict, Optional, Union, cast +from typing import Any, Callable, Dict, Optional, Union import httpx - from mistralai_azure import models, utils from mistralai_azure._hooks import SDKHooks from mistralai_azure.chat import Chat -from mistralai_azure.types import UNSET, OptionalNullable +from mistralai_azure.types import Nullable from .basesdk import BaseSDK -from .httpclient import AsyncHttpClient, ClientOwner, HttpClient, close_clients +from .httpclient import AsyncHttpClient, HttpClient from .sdkconfiguration import SDKConfiguration -from .utils.logger import Logger, get_default_logger +from .utils.logger import Logger, NoOpLogger from .utils.retries import RetryConfig @@ -21,7 +19,7 @@ class MistralAzure(BaseSDK): r"""Mistral AI API: Our Chat Completion and Embeddings APIs specification. Create your account on [La Plateforme](https://console.mistral.ai) to get access and read the [docs](https://docs.mistral.ai) to learn how to use it.""" chat: Chat - r"""Chat Completion API.""" + r"""Chat Completion API""" def __init__( self, @@ -30,8 +28,7 @@ def __init__( url_params: Optional[Dict[str, str]] = None, client: Optional[HttpClient] = None, async_client: Optional[AsyncHttpClient] = None, - retry_config: OptionalNullable[RetryConfig] = UNSET, - timeout_ms: Optional[int] = None, + retry_config: Optional[Nullable[RetryConfig]] = None, debug_logger: Optional[Logger] = None, ) -> None: r"""Instantiates the SDK configuring it with the provided parameters. @@ -42,9 +39,7 @@ def __init__( :param client: The HTTP client to use for all synchronous methods :param async_client: The Async HTTP client to use for all asynchronous methods :param retry_config: The retry configuration to use for all supported methods - :param timeout_ms: Optional request timeout applied to each operation in milliseconds """ - # if azure_endpoint doesn't end with `/v1` add it if not azure_endpoint.endswith("/"): azure_endpoint += "/" @@ -52,30 +47,28 @@ def __init__( azure_endpoint += "v1/" server_url = azure_endpoint - client_supplied = True if client is None: client = httpx.Client() - client_supplied = False assert issubclass( type(client), HttpClient ), "The provided client must implement the HttpClient protocol." - async_client_supplied = True if async_client is None: async_client = httpx.AsyncClient() - async_client_supplied = False - - if debug_logger is None: - debug_logger = get_default_logger() assert issubclass( type(async_client), AsyncHttpClient ), "The provided async_client must implement the AsyncHttpClient protocol." + if debug_logger is None: + debug_logger = NoOpLogger() + security: Any = None if callable(azure_api_key): - security = lambda: models.Security(api_key=azure_api_key()) # pylint: disable=unnecessary-lambda-assignment + security = lambda: models.Security( # pylint: disable=unnecessary-lambda-assignment + api_key=azure_api_key() + ) else: security = models.Security(api_key=azure_api_key) @@ -87,14 +80,11 @@ def __init__( self, SDKConfiguration( client=client, - client_supplied=client_supplied, async_client=async_client, - async_client_supplied=async_client_supplied, security=security, server_url=server_url, server=None, retry_config=retry_config, - timeout_ms=timeout_ms, debug_logger=debug_logger, ), ) @@ -103,7 +93,7 @@ def __init__( current_server_url, *_ = self.sdk_configuration.get_server_details() server_url, self.sdk_configuration.client = hooks.sdk_init( - current_server_url, client + current_server_url, self.sdk_configuration.client ) if current_server_url != server_url: self.sdk_configuration.server_url = server_url @@ -111,39 +101,7 @@ def __init__( # pylint: disable=protected-access self.sdk_configuration.__dict__["_hooks"] = hooks - weakref.finalize( - self, - close_clients, - cast(ClientOwner, self.sdk_configuration), - self.sdk_configuration.client, - self.sdk_configuration.client_supplied, - self.sdk_configuration.async_client, - self.sdk_configuration.async_client_supplied, - ) - self._init_sdks() def _init_sdks(self): self.chat = Chat(self.sdk_configuration) - - def __enter__(self): - return self - - async def __aenter__(self): - return self - - def __exit__(self, exc_type, exc_val, exc_tb): - if ( - self.sdk_configuration.client is not None - and not self.sdk_configuration.client_supplied - ): - self.sdk_configuration.client.close() - self.sdk_configuration.client = None - - async def __aexit__(self, exc_type, exc_val, exc_tb): - if ( - self.sdk_configuration.async_client is not None - and not self.sdk_configuration.async_client_supplied - ): - await self.sdk_configuration.async_client.aclose() - self.sdk_configuration.async_client = None diff --git a/packages/mistralai_azure/src/mistralai_azure/sdkconfiguration.py b/packages/mistralai_azure/src/mistralai_azure/sdkconfiguration.py index 605e5d74..73b8d517 100644 --- a/packages/mistralai_azure/src/mistralai_azure/sdkconfiguration.py +++ b/packages/mistralai_azure/src/mistralai_azure/sdkconfiguration.py @@ -1,12 +1,6 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from ._hooks import SDKHooks -from ._version import ( - __gen_version__, - __openapi_doc_version__, - __user_agent__, - __version__, -) from .httpclient import AsyncHttpClient, HttpClient from .utils import Logger, RetryConfig, remove_suffix from dataclasses import dataclass @@ -26,19 +20,17 @@ @dataclass class SDKConfiguration: - client: Union[HttpClient, None] - client_supplied: bool - async_client: Union[AsyncHttpClient, None] - async_client_supplied: bool + client: HttpClient + async_client: AsyncHttpClient debug_logger: Logger security: Optional[Union[models.Security, Callable[[], models.Security]]] = None server_url: Optional[str] = "" server: Optional[str] = "" language: str = "python" - openapi_doc_version: str = __openapi_doc_version__ - sdk_version: str = __version__ - gen_version: str = __gen_version__ - user_agent: str = __user_agent__ + openapi_doc_version: str = "0.0.2" + sdk_version: str = "1.2.6" + gen_version: str = "2.486.1" + user_agent: str = "speakeasy-sdk/python 1.2.6 2.486.1 0.0.2 mistralai_azure" retry_config: OptionalNullable[RetryConfig] = Field(default_factory=lambda: UNSET) timeout_ms: Optional[int] = None diff --git a/packages/mistralai_azure/src/mistralai_azure/utils/__init__.py b/packages/mistralai_azure/src/mistralai_azure/utils/__init__.py index 3cded8fe..26d51ae8 100644 --- a/packages/mistralai_azure/src/mistralai_azure/utils/__init__.py +++ b/packages/mistralai_azure/src/mistralai_azure/utils/__init__.py @@ -42,7 +42,6 @@ match_content_type, match_status_codes, match_response, - cast_partial, ) from .logger import Logger, get_body_content, get_default_logger @@ -95,5 +94,4 @@ "validate_float", "validate_int", "validate_open_enum", - "cast_partial", ] diff --git a/packages/mistralai_azure/src/mistralai_azure/utils/serializers.py b/packages/mistralai_azure/src/mistralai_azure/utils/serializers.py index baa41fbd..c5eb3659 100644 --- a/packages/mistralai_azure/src/mistralai_azure/utils/serializers.py +++ b/packages/mistralai_azure/src/mistralai_azure/utils/serializers.py @@ -7,15 +7,14 @@ from typing_extensions import get_origin from pydantic import ConfigDict, create_model from pydantic_core import from_json -from typing_inspection.typing_objects import is_union +from typing_inspect import is_optional_type from ..types.basemodel import BaseModel, Nullable, OptionalNullable, Unset def serialize_decimal(as_str: bool): def serialize(d): - # Optional[T] is a Union[T, None] - if is_union(type(d)) and type(None) in get_args(type(d)) and d is None: + if is_optional_type(type(d)) and d is None: return None if isinstance(d, Unset): return d @@ -43,8 +42,7 @@ def validate_decimal(d): def serialize_float(as_str: bool): def serialize(f): - # Optional[T] is a Union[T, None] - if is_union(type(f)) and type(None) in get_args(type(f)) and f is None: + if is_optional_type(type(f)) and f is None: return None if isinstance(f, Unset): return f @@ -72,8 +70,7 @@ def validate_float(f): def serialize_int(as_str: bool): def serialize(i): - # Optional[T] is a Union[T, None] - if is_union(type(i)) and type(None) in get_args(type(i)) and i is None: + if is_optional_type(type(i)) and i is None: return None if isinstance(i, Unset): return i @@ -121,8 +118,7 @@ def validate(e): def validate_const(v): def validate(c): - # Optional[T] is a Union[T, None] - if is_union(type(c)) and type(None) in get_args(type(c)) and c is None: + if is_optional_type(type(c)) and c is None: return None if v != c: @@ -167,7 +163,7 @@ def marshal_json(val, typ): if len(d) == 0: return "" - return json.dumps(d[next(iter(d))], separators=(",", ":")) + return json.dumps(d[next(iter(d))], separators=(",", ":"), sort_keys=True) def is_nullable(field): diff --git a/packages/mistralai_azure/src/mistralai_azure/utils/values.py b/packages/mistralai_azure/src/mistralai_azure/utils/values.py index dae01a44..2b4b6832 100644 --- a/packages/mistralai_azure/src/mistralai_azure/utils/values.py +++ b/packages/mistralai_azure/src/mistralai_azure/utils/values.py @@ -3,9 +3,8 @@ from datetime import datetime from enum import Enum from email.message import Message -from functools import partial import os -from typing import Any, Callable, Dict, List, Optional, Tuple, TypeVar, Union, cast +from typing import Any, Callable, Dict, List, Optional, Tuple, TypeVar, Union from httpx import Response from pydantic import BaseModel @@ -52,8 +51,6 @@ def match_status_codes(status_codes: List[str], status_code: int) -> bool: T = TypeVar("T") -def cast_partial(typ): - return partial(cast, typ) def get_global_from_env( value: Optional[T], env_key: str, type_cast: Callable[[str], T] diff --git a/packages/mistralai_gcp/.speakeasy/gen.lock b/packages/mistralai_gcp/.speakeasy/gen.lock index eab663b0..f74b9759 100644 --- a/packages/mistralai_gcp/.speakeasy/gen.lock +++ b/packages/mistralai_gcp/.speakeasy/gen.lock @@ -1,34 +1,34 @@ lockVersion: 2.0.0 id: ec60f2d8-7869-45c1-918e-773d41a8cf74 management: - docChecksum: 0e9c7ff94b487395628de6c99a5954ce + docChecksum: 849dde0ef239604ca71711ffc1220b54 docVersion: 0.0.2 - speakeasyVersion: 1.517.3 - generationVersion: 2.548.6 + speakeasyVersion: 1.462.2 + generationVersion: 2.486.1 releaseVersion: 1.2.6 - configChecksum: 31db65297a20fe8af5b30effb1421b52 + configChecksum: ba11718a5b49fb4a979ae9693a68b191 published: true features: python: additionalDependencies: 1.0.0 constsAndDefaults: 1.0.5 - core: 5.12.3 + core: 5.7.4 defaultEnabledRetries: 0.2.0 enumUnions: 0.1.0 envVarSecurityUsage: 0.3.2 - examples: 3.0.1 + examples: 3.0.0 flatRequests: 1.0.1 - globalSecurity: 3.0.3 + globalSecurity: 3.0.2 globalSecurityCallbacks: 1.0.0 globalSecurityFlattening: 1.0.0 globalServerURLs: 3.1.0 methodArguments: 1.0.2 nameOverrides: 3.0.1 - nullables: 1.0.1 + nullables: 1.0.0 openEnums: 1.0.0 responseFormat: 1.0.1 retries: 3.0.2 - sdkHooks: 1.0.1 + sdkHooks: 1.0.0 serverEvents: 1.0.7 serverEventsSentinels: 0.1.0 serverIDs: 3.0.0 @@ -66,10 +66,8 @@ generatedFiles: - docs/models/functioncall.md - docs/models/functionname.md - docs/models/httpvalidationerror.md - - docs/models/jsonschema.md - docs/models/loc.md - docs/models/messages.md - - docs/models/prediction.md - docs/models/referencechunk.md - docs/models/referencechunktype.md - docs/models/responseformat.md @@ -99,7 +97,7 @@ generatedFiles: - py.typed - pylintrc - pyproject.toml - - scripts/prepare_readme.py + - scripts/prepare-readme.py - scripts/publish.sh - src/mistralai_gcp/__init__.py - src/mistralai_gcp/_hooks/__init__.py @@ -128,8 +126,6 @@ generatedFiles: - src/mistralai_gcp/models/functioncall.py - src/mistralai_gcp/models/functionname.py - src/mistralai_gcp/models/httpvalidationerror.py - - src/mistralai_gcp/models/jsonschema.py - - src/mistralai_gcp/models/prediction.py - src/mistralai_gcp/models/referencechunk.py - src/mistralai_gcp/models/responseformat.py - src/mistralai_gcp/models/responseformats.py @@ -147,7 +143,6 @@ generatedFiles: - src/mistralai_gcp/models/usermessage.py - src/mistralai_gcp/models/validationerror.py - src/mistralai_gcp/py.typed - - src/mistralai_gcp/sdk.py - src/mistralai_gcp/sdkconfiguration.py - src/mistralai_gcp/types/__init__.py - src/mistralai_gcp/types/basemodel.py @@ -170,36 +165,33 @@ examples: stream_chat: speakeasy-default-stream-chat: requestBody: - application/json: {"model": "mistral-small-latest", "stream": true, "messages": [{"content": "Who is the best French painter? Answer in one short sentence.", "role": "user"}]} + application/json: {"model": "mistral-small-latest", "messages": [{"content": "Who is the best French painter? Answer in one short sentence.", "role": "user"}]} responses: "422": application/json: {} - "200": {} chat_completion_v1_chat_completions_post: speakeasy-default-chat-completion-v1-chat-completions-post: requestBody: - application/json: {"model": "mistral-small-latest", "stream": false, "messages": [{"content": "Who is the best French painter? Answer in one short sentence.", "role": "user"}]} + application/json: {"model": "mistral-small-latest", "messages": [{"content": "Who is the best French painter? Answer in one short sentence.", "role": "user"}]} responses: "200": - application/json: {"id": "cmpl-e5cc70bb28c444948073e77776eb30ef", "object": "chat.completion", "model": "mistral-small-latest", "usage": {"prompt_tokens": 16, "completion_tokens": 34, "total_tokens": 50}, "created": 1702256327, "choices": [{"index": 0, "message": {"prefix": false, "role": "assistant"}, "finish_reason": "stop"}, {"index": 0, "message": {"prefix": false, "role": "assistant"}, "finish_reason": "stop"}, {"index": 0, "message": {"prefix": false, "role": "assistant"}, "finish_reason": "stop"}]} + application/json: {"id": "cmpl-e5cc70bb28c444948073e77776eb30ef", "object": "chat.completion", "model": "mistral-small-latest", "usage": {"prompt_tokens": 16, "completion_tokens": 34, "total_tokens": 50}, "created": 1702256327, "choices": []} "422": application/json: {} stream_fim: speakeasy-default-stream-fim: requestBody: - application/json: {"model": "codestral-2405", "top_p": 1, "stream": true, "prompt": "def", "suffix": "return a+b"} + application/json: {"model": "codestral-2405", "prompt": "def", "suffix": "return a+b"} responses: "422": application/json: {} - "200": {} fim_completion_v1_fim_completions_post: speakeasy-default-fim-completion-v1-fim-completions-post: requestBody: - application/json: {"model": "codestral-2405", "top_p": 1, "stream": false, "prompt": "def", "suffix": "return a+b"} + application/json: {"model": "codestral-2405", "prompt": "def", "suffix": "return a+b"} responses: "200": - application/json: {"id": "cmpl-e5cc70bb28c444948073e77776eb30ef", "object": "chat.completion", "model": "codestral-latest", "usage": {"prompt_tokens": 16, "completion_tokens": 34, "total_tokens": 50}, "created": 1702256327, "choices": [{"index": 0, "message": {"prefix": false, "role": "assistant"}, "finish_reason": "stop"}, {"index": 0, "message": {"prefix": false, "role": "assistant"}, "finish_reason": "stop"}, {"index": 0, "message": {"prefix": false, "role": "assistant"}, "finish_reason": "stop"}]} + application/json: {"id": "cmpl-e5cc70bb28c444948073e77776eb30ef", "object": "chat.completion", "model": "codestral-latest", "usage": {"prompt_tokens": 16, "completion_tokens": 34, "total_tokens": 50}, "created": 1702256327, "choices": []} "422": application/json: {} -examplesVersion: 1.0.0 generatedTests: {} diff --git a/packages/mistralai_gcp/.speakeasy/gen.yaml b/packages/mistralai_gcp/.speakeasy/gen.yaml index 572d3ed1..afa4d1d0 100644 --- a/packages/mistralai_gcp/.speakeasy/gen.yaml +++ b/packages/mistralai_gcp/.speakeasy/gen.yaml @@ -7,10 +7,8 @@ generation: useClassNamesForArrayFields: true fixes: nameResolutionDec2023: true - nameResolutionFeb2025: false parameterOrderingFeb2024: true requestResponseComponentNamesFeb2024: true - securityFeb2025: false auth: oAuth2ClientCredentialsEnabled: true oAuth2PasswordEnabled: false @@ -28,7 +26,6 @@ python: clientServerStatusCodesAsErrors: true defaultErrorName: SDKError description: Python Client SDK for the Mistral AI API in GCP. - enableCustomCodeRegions: false enumFormat: union fixFlags: responseRequiredSep2024: false @@ -48,6 +45,5 @@ python: methodArguments: infer-optional-args outputModelSuffix: output packageName: mistralai-gcp - pytestTimeout: 0 responseFormat: flat templateVersion: v2 diff --git a/packages/mistralai_gcp/docs/models/assistantmessage.md b/packages/mistralai_gcp/docs/models/assistantmessage.md index 3d0bd90b..53f1cc76 100644 --- a/packages/mistralai_gcp/docs/models/assistantmessage.md +++ b/packages/mistralai_gcp/docs/models/assistantmessage.md @@ -3,9 +3,9 @@ ## Fields -| Field | Type | Required | Description | -| ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | -| `content` | [OptionalNullable[models.AssistantMessageContent]](../models/assistantmessagecontent.md) | :heavy_minus_sign: | N/A | -| `tool_calls` | List[[models.ToolCall](../models/toolcall.md)] | :heavy_minus_sign: | N/A | -| `prefix` | *Optional[bool]* | :heavy_minus_sign: | Set this to `true` when adding an assistant message as prefix to condition the model response. The role of the prefix message is to force the model to start its answer by the content of the message. | -| `role` | [Optional[models.AssistantMessageRole]](../models/assistantmessagerole.md) | :heavy_minus_sign: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | +| `content` | [OptionalNullable[models.AssistantMessageContent]](../models/assistantmessagecontent.md) | :heavy_minus_sign: | N/A | +| `tool_calls` | List[[models.ToolCall](../models/toolcall.md)] | :heavy_minus_sign: | N/A | +| `prefix` | *Optional[bool]* | :heavy_minus_sign: | N/A | +| `role` | [Optional[models.AssistantMessageRole]](../models/assistantmessagerole.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/chatcompletionrequest.md b/packages/mistralai_gcp/docs/models/chatcompletionrequest.md index 3e9de262..abc83281 100644 --- a/packages/mistralai_gcp/docs/models/chatcompletionrequest.md +++ b/packages/mistralai_gcp/docs/models/chatcompletionrequest.md @@ -5,7 +5,7 @@ | Field | Type | Required | Description | Example | | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `model` | *str* | :heavy_check_mark: | ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions. | mistral-small-latest | +| `model` | *Nullable[str]* | :heavy_check_mark: | ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions. | mistral-small-latest | | `messages` | List[[models.ChatCompletionRequestMessages](../models/chatcompletionrequestmessages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | | `temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. | | | `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | @@ -18,5 +18,4 @@ | `tool_choice` | [Optional[models.ChatCompletionRequestToolChoice]](../models/chatcompletionrequesttoolchoice.md) | :heavy_minus_sign: | N/A | | | `presence_penalty` | *Optional[float]* | :heavy_minus_sign: | presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. | | | `frequency_penalty` | *Optional[float]* | :heavy_minus_sign: | frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. | | -| `n` | *OptionalNullable[int]* | :heavy_minus_sign: | Number of completions to return for each request, input tokens are only billed once. | | -| `prediction` | [Optional[models.Prediction]](../models/prediction.md) | :heavy_minus_sign: | N/A | | \ No newline at end of file +| `n` | *OptionalNullable[int]* | :heavy_minus_sign: | Number of completions to return for each request, input tokens are only billed once. | | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/chatcompletionstreamrequest.md b/packages/mistralai_gcp/docs/models/chatcompletionstreamrequest.md index 8200f8a3..863c0229 100644 --- a/packages/mistralai_gcp/docs/models/chatcompletionstreamrequest.md +++ b/packages/mistralai_gcp/docs/models/chatcompletionstreamrequest.md @@ -5,7 +5,7 @@ | Field | Type | Required | Description | Example | | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `model` | *str* | :heavy_check_mark: | ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions. | mistral-small-latest | +| `model` | *Nullable[str]* | :heavy_check_mark: | ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions. | mistral-small-latest | | `messages` | List[[models.Messages](../models/messages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | | `temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. | | | `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | @@ -18,5 +18,4 @@ | `tool_choice` | [Optional[models.ChatCompletionStreamRequestToolChoice]](../models/chatcompletionstreamrequesttoolchoice.md) | :heavy_minus_sign: | N/A | | | `presence_penalty` | *Optional[float]* | :heavy_minus_sign: | presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. | | | `frequency_penalty` | *Optional[float]* | :heavy_minus_sign: | frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. | | -| `n` | *OptionalNullable[int]* | :heavy_minus_sign: | Number of completions to return for each request, input tokens are only billed once. | | -| `prediction` | [Optional[models.Prediction]](../models/prediction.md) | :heavy_minus_sign: | N/A | | \ No newline at end of file +| `n` | *OptionalNullable[int]* | :heavy_minus_sign: | Number of completions to return for each request, input tokens are only billed once. | | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/fimcompletionrequest.md b/packages/mistralai_gcp/docs/models/fimcompletionrequest.md index 7507b90c..236d2d21 100644 --- a/packages/mistralai_gcp/docs/models/fimcompletionrequest.md +++ b/packages/mistralai_gcp/docs/models/fimcompletionrequest.md @@ -5,7 +5,7 @@ | Field | Type | Required | Description | Example | | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `model` | *str* | :heavy_check_mark: | ID of the model to use. Only compatible for now with:
- `codestral-2405`
- `codestral-latest` | codestral-2405 | +| `model` | *Nullable[str]* | :heavy_check_mark: | ID of the model to use. Only compatible for now with:
- `codestral-2405`
- `codestral-latest` | codestral-2405 | | `prompt` | *str* | :heavy_check_mark: | The text/code to complete. | def | | `temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. | | | `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | diff --git a/packages/mistralai_gcp/docs/models/fimcompletionstreamrequest.md b/packages/mistralai_gcp/docs/models/fimcompletionstreamrequest.md index 6cc439c7..fa635932 100644 --- a/packages/mistralai_gcp/docs/models/fimcompletionstreamrequest.md +++ b/packages/mistralai_gcp/docs/models/fimcompletionstreamrequest.md @@ -5,7 +5,7 @@ | Field | Type | Required | Description | Example | | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `model` | *str* | :heavy_check_mark: | ID of the model to use. Only compatible for now with:
- `codestral-2405`
- `codestral-latest` | codestral-2405 | +| `model` | *Nullable[str]* | :heavy_check_mark: | ID of the model to use. Only compatible for now with:
- `codestral-2405`
- `codestral-latest` | codestral-2405 | | `prompt` | *str* | :heavy_check_mark: | The text/code to complete. | def | | `temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. | | | `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | diff --git a/packages/mistralai_gcp/docs/models/function.md b/packages/mistralai_gcp/docs/models/function.md index a166b7bb..8af398f5 100644 --- a/packages/mistralai_gcp/docs/models/function.md +++ b/packages/mistralai_gcp/docs/models/function.md @@ -7,5 +7,4 @@ | ------------------ | ------------------ | ------------------ | ------------------ | | `name` | *str* | :heavy_check_mark: | N/A | | `parameters` | Dict[str, *Any*] | :heavy_check_mark: | N/A | -| `description` | *Optional[str]* | :heavy_minus_sign: | N/A | -| `strict` | *Optional[bool]* | :heavy_minus_sign: | N/A | \ No newline at end of file +| `description` | *Optional[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/jsonschema.md b/packages/mistralai_gcp/docs/models/jsonschema.md deleted file mode 100644 index ae387867..00000000 --- a/packages/mistralai_gcp/docs/models/jsonschema.md +++ /dev/null @@ -1,11 +0,0 @@ -# JSONSchema - - -## Fields - -| Field | Type | Required | Description | -| ----------------------- | ----------------------- | ----------------------- | ----------------------- | -| `name` | *str* | :heavy_check_mark: | N/A | -| `schema_definition` | Dict[str, *Any*] | :heavy_check_mark: | N/A | -| `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `strict` | *Optional[bool]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/prediction.md b/packages/mistralai_gcp/docs/models/prediction.md deleted file mode 100644 index 86e9c396..00000000 --- a/packages/mistralai_gcp/docs/models/prediction.md +++ /dev/null @@ -1,9 +0,0 @@ -# Prediction - - -## Fields - -| Field | Type | Required | Description | -| ------------------------------ | ------------------------------ | ------------------------------ | ------------------------------ | -| `type` | *Optional[Literal["content"]]* | :heavy_minus_sign: | N/A | -| `content` | *Optional[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/responseformat.md b/packages/mistralai_gcp/docs/models/responseformat.md index 23a1641b..9c627f55 100644 --- a/packages/mistralai_gcp/docs/models/responseformat.md +++ b/packages/mistralai_gcp/docs/models/responseformat.md @@ -5,5 +5,4 @@ | Field | Type | Required | Description | | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `type` | [Optional[models.ResponseFormats]](../models/responseformats.md) | :heavy_minus_sign: | An object specifying the format that the model must output. Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. | -| `json_schema` | [OptionalNullable[models.JSONSchema]](../models/jsonschema.md) | :heavy_minus_sign: | N/A | \ No newline at end of file +| `type` | [Optional[models.ResponseFormats]](../models/responseformats.md) | :heavy_minus_sign: | An object specifying the format that the model must output. Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/responseformats.md b/packages/mistralai_gcp/docs/models/responseformats.md index 06886afe..ce35fbb3 100644 --- a/packages/mistralai_gcp/docs/models/responseformats.md +++ b/packages/mistralai_gcp/docs/models/responseformats.md @@ -8,5 +8,4 @@ An object specifying the format that the model must output. Setting to `{ "type" | Name | Value | | ------------- | ------------- | | `TEXT` | text | -| `JSON_OBJECT` | json_object | -| `JSON_SCHEMA` | json_schema | \ No newline at end of file +| `JSON_OBJECT` | json_object | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/toolcall.md b/packages/mistralai_gcp/docs/models/toolcall.md index 574be1ea..7aca5fc9 100644 --- a/packages/mistralai_gcp/docs/models/toolcall.md +++ b/packages/mistralai_gcp/docs/models/toolcall.md @@ -7,5 +7,4 @@ | ---------------------------------------------------- | ---------------------------------------------------- | ---------------------------------------------------- | ---------------------------------------------------- | | `function` | [models.FunctionCall](../models/functioncall.md) | :heavy_check_mark: | N/A | | `id` | *Optional[str]* | :heavy_minus_sign: | N/A | -| `type` | [Optional[models.ToolTypes]](../models/tooltypes.md) | :heavy_minus_sign: | N/A | -| `index` | *Optional[int]* | :heavy_minus_sign: | N/A | \ No newline at end of file +| `type` | [Optional[models.ToolTypes]](../models/tooltypes.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai_gcp/pylintrc b/packages/mistralai_gcp/pylintrc index 266bc815..393d0f70 100644 --- a/packages/mistralai_gcp/pylintrc +++ b/packages/mistralai_gcp/pylintrc @@ -89,7 +89,7 @@ persistent=yes # Minimum Python version to use for version dependent checks. Will default to # the version used to run pylint. -py-version=3.9 +py-version=3.8 # Discover python modules and packages in the file system subtree. recursive=no @@ -455,10 +455,7 @@ disable=raw-checker-failed, bare-except, broad-exception-caught, fixme, - relative-beyond-top-level, - consider-using-with, - wildcard-import, - unused-wildcard-import + relative-beyond-top-level # Enable the message, report, category or checker with the given id(s). You can # either give multiple identifier separated by comma (,) or put this option diff --git a/packages/mistralai_gcp/pyproject.toml b/packages/mistralai_gcp/pyproject.toml index 21cf7eb9..6692f1d5 100644 --- a/packages/mistralai_gcp/pyproject.toml +++ b/packages/mistralai_gcp/pyproject.toml @@ -1,21 +1,9 @@ -[project] +[tool.poetry] name = "mistralai-gcp" version = "1.2.6" description = "Python Client SDK for the Mistral AI API in GCP." -authors = [{ name = "Mistral" },] +authors = ["Mistral",] readme = "README-PYPI.md" -requires-python = ">=3.9" -dependencies = [ - "eval-type-backport >=0.2.0", - "google-auth (>=2.31.0,<3.0.0)", - "httpx >=0.28.1", - "pydantic >=2.10.3", - "python-dateutil >=2.8.2", - "requests (>=2.32.3,<3.0.0)", - "typing-inspection >=0.4.0", -] - -[tool.poetry] packages = [ { include = "mistralai_gcp", from = "src" } ] @@ -27,8 +15,19 @@ include = ["py.typed", "src/mistralai_gcp/py.typed"] [virtualenvs] in-project = true +[tool.poetry.dependencies] +python = "^3.8" +eval-type-backport = "^0.2.0" +google-auth = "2.27.0" +httpx = "^0.28.1" +jsonpath-python = "^1.0.6" +pydantic = "~2.10.3" +python-dateutil = "^2.8.2" +requests = "^2.32.3" +typing-inspect = "^0.9.0" + [tool.poetry.group.dev.dependencies] -mypy = "==1.14.1" +mypy = "==1.13.0" pylint = "==3.2.3" pytest = "^8.2.2" pytest-asyncio = "^0.23.7" @@ -39,7 +38,6 @@ requires = ["poetry-core"] build-backend = "poetry.core.masonry.api" [tool.pytest.ini_options] -asyncio_default_fixture_loop_scope = "function" pythonpath = ["src"] [tool.mypy] diff --git a/packages/mistralai_gcp/scripts/prepare_readme.py b/packages/mistralai_gcp/scripts/prepare-readme.py similarity index 100% rename from packages/mistralai_gcp/scripts/prepare_readme.py rename to packages/mistralai_gcp/scripts/prepare-readme.py diff --git a/packages/mistralai_gcp/scripts/publish.sh b/packages/mistralai_gcp/scripts/publish.sh index f2f2cf2c..ab45b1f9 100755 --- a/packages/mistralai_gcp/scripts/publish.sh +++ b/packages/mistralai_gcp/scripts/publish.sh @@ -2,6 +2,6 @@ export POETRY_PYPI_TOKEN_PYPI=${PYPI_TOKEN} -poetry run python scripts/prepare_readme.py +poetry run python scripts/prepare-readme.py poetry publish --build --skip-existing diff --git a/packages/mistralai_gcp/src/mistralai_gcp/__init__.py b/packages/mistralai_gcp/src/mistralai_gcp/__init__.py index dd02e42e..a1b7f626 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/__init__.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/__init__.py @@ -1,18 +1,9 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -from ._version import ( - __title__, - __version__, - __openapi_doc_version__, - __gen_version__, - __user_agent__, -) +from ._version import __title__, __version__ from .sdk import * from .sdkconfiguration import * from .models import * VERSION: str = __version__ -OPENAPI_DOC_VERSION = __openapi_doc_version__ -SPEAKEASY_GENERATOR_VERSION = __gen_version__ -USER_AGENT = __user_agent__ diff --git a/packages/mistralai_gcp/src/mistralai_gcp/_hooks/types.py b/packages/mistralai_gcp/src/mistralai_gcp/_hooks/types.py index bb867b5b..417126fd 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/_hooks/types.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/_hooks/types.py @@ -7,19 +7,16 @@ class HookContext: - base_url: str operation_id: str oauth2_scopes: Optional[List[str]] = None security_source: Optional[Union[Any, Callable[[], Any]]] = None def __init__( self, - base_url: str, operation_id: str, oauth2_scopes: Optional[List[str]], security_source: Optional[Union[Any, Callable[[], Any]]], ): - self.base_url = base_url self.operation_id = operation_id self.oauth2_scopes = oauth2_scopes self.security_source = security_source @@ -28,30 +25,21 @@ def __init__( class BeforeRequestContext(HookContext): def __init__(self, hook_ctx: HookContext): super().__init__( - hook_ctx.base_url, - hook_ctx.operation_id, - hook_ctx.oauth2_scopes, - hook_ctx.security_source, + hook_ctx.operation_id, hook_ctx.oauth2_scopes, hook_ctx.security_source ) class AfterSuccessContext(HookContext): def __init__(self, hook_ctx: HookContext): super().__init__( - hook_ctx.base_url, - hook_ctx.operation_id, - hook_ctx.oauth2_scopes, - hook_ctx.security_source, + hook_ctx.operation_id, hook_ctx.oauth2_scopes, hook_ctx.security_source ) class AfterErrorContext(HookContext): def __init__(self, hook_ctx: HookContext): super().__init__( - hook_ctx.base_url, - hook_ctx.operation_id, - hook_ctx.oauth2_scopes, - hook_ctx.security_source, + hook_ctx.operation_id, hook_ctx.oauth2_scopes, hook_ctx.security_source ) diff --git a/packages/mistralai_gcp/src/mistralai_gcp/_version.py b/packages/mistralai_gcp/src/mistralai_gcp/_version.py index 32be746c..30081f34 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/_version.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/_version.py @@ -4,9 +4,6 @@ __title__: str = "mistralai-gcp" __version__: str = "1.2.6" -__openapi_doc_version__: str = "0.0.2" -__gen_version__: str = "2.548.6" -__user_agent__: str = "speakeasy-sdk/python 1.2.6 2.548.6 0.0.2 mistralai-gcp" try: if __package__ is not None: diff --git a/packages/mistralai_gcp/src/mistralai_gcp/basesdk.py b/packages/mistralai_gcp/src/mistralai_gcp/basesdk.py index bb0aab96..40620018 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/basesdk.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/basesdk.py @@ -231,10 +231,6 @@ def do(): req.headers, get_body_content(req), ) - - if client is None: - raise ValueError("client is required") - http_res = client.send(req, stream=stream) except Exception as e: _, e = self.sdk_configuration.get_hooks().after_error( @@ -307,10 +303,6 @@ async def do(): req.headers, get_body_content(req), ) - - if client is None: - raise ValueError("client is required") - http_res = await client.send(req, stream=stream) except Exception as e: _, e = self.sdk_configuration.get_hooks().after_error( diff --git a/packages/mistralai_gcp/src/mistralai_gcp/chat.py b/packages/mistralai_gcp/src/mistralai_gcp/chat.py index f162d2f7..47e5b63a 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/chat.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/chat.py @@ -3,7 +3,7 @@ from .basesdk import BaseSDK from mistralai_gcp import models, utils from mistralai_gcp._hooks import HookContext -from mistralai_gcp.types import OptionalNullable, UNSET +from mistralai_gcp.types import Nullable, OptionalNullable, UNSET from mistralai_gcp.utils import eventstreaming from typing import Any, List, Mapping, Optional, Union @@ -14,7 +14,7 @@ class Chat(BaseSDK): def stream( self, *, - model: str, + model: Nullable[str], messages: Union[List[models.Messages], List[models.MessagesTypedDict]], temperature: OptionalNullable[float] = UNSET, top_p: Optional[float] = None, @@ -37,9 +37,6 @@ def stream( presence_penalty: Optional[float] = None, frequency_penalty: Optional[float] = None, n: OptionalNullable[int] = UNSET, - prediction: Optional[ - Union[models.Prediction, models.PredictionTypedDict] - ] = None, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -63,7 +60,6 @@ def stream( :param presence_penalty: presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. :param frequency_penalty: frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. :param n: Number of completions to return for each request, input tokens are only billed once. - :param prediction: :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -76,8 +72,6 @@ def stream( if server_url is not None: base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) request = models.ChatCompletionStreamRequest( model=model, @@ -98,9 +92,6 @@ def stream( presence_penalty=presence_penalty, frequency_penalty=frequency_penalty, n=n, - prediction=utils.get_pydantic_model( - prediction, Optional[models.Prediction] - ), ) req = self._build_request( @@ -132,7 +123,6 @@ def stream( http_res = self.do_request( hook_ctx=HookContext( - base_url=base_url or "", operation_id="stream_chat", oauth2_scopes=[], security_source=self.sdk_configuration.security, @@ -143,7 +133,7 @@ def stream( retry_config=retry_config, ) - response_data: Any = None + data: Any = None if utils.match_response(http_res, "200", "text/event-stream"): return eventstreaming.EventStream( http_res, @@ -152,16 +142,9 @@ def stream( ) if utils.match_response(http_res, "422", "application/json"): http_res_text = utils.stream_to_text(http_res) - response_data = utils.unmarshal_json( - http_res_text, models.HTTPValidationErrorData - ) - raise models.HTTPValidationError(data=response_data) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) - if utils.match_response(http_res, "5XX", "*"): + data = utils.unmarshal_json(http_res_text, models.HTTPValidationErrorData) + raise models.HTTPValidationError(data=data) + if utils.match_response(http_res, ["4XX", "5XX"], "*"): http_res_text = utils.stream_to_text(http_res) raise models.SDKError( "API error occurred", http_res.status_code, http_res_text, http_res @@ -179,7 +162,7 @@ def stream( async def stream_async( self, *, - model: str, + model: Nullable[str], messages: Union[List[models.Messages], List[models.MessagesTypedDict]], temperature: OptionalNullable[float] = UNSET, top_p: Optional[float] = None, @@ -202,9 +185,6 @@ async def stream_async( presence_penalty: Optional[float] = None, frequency_penalty: Optional[float] = None, n: OptionalNullable[int] = UNSET, - prediction: Optional[ - Union[models.Prediction, models.PredictionTypedDict] - ] = None, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -228,7 +208,6 @@ async def stream_async( :param presence_penalty: presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. :param frequency_penalty: frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. :param n: Number of completions to return for each request, input tokens are only billed once. - :param prediction: :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -241,8 +220,6 @@ async def stream_async( if server_url is not None: base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) request = models.ChatCompletionStreamRequest( model=model, @@ -263,9 +240,6 @@ async def stream_async( presence_penalty=presence_penalty, frequency_penalty=frequency_penalty, n=n, - prediction=utils.get_pydantic_model( - prediction, Optional[models.Prediction] - ), ) req = self._build_request_async( @@ -297,7 +271,6 @@ async def stream_async( http_res = await self.do_request_async( hook_ctx=HookContext( - base_url=base_url or "", operation_id="stream_chat", oauth2_scopes=[], security_source=self.sdk_configuration.security, @@ -308,7 +281,7 @@ async def stream_async( retry_config=retry_config, ) - response_data: Any = None + data: Any = None if utils.match_response(http_res, "200", "text/event-stream"): return eventstreaming.EventStreamAsync( http_res, @@ -317,16 +290,9 @@ async def stream_async( ) if utils.match_response(http_res, "422", "application/json"): http_res_text = await utils.stream_to_text_async(http_res) - response_data = utils.unmarshal_json( - http_res_text, models.HTTPValidationErrorData - ) - raise models.HTTPValidationError(data=response_data) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) - if utils.match_response(http_res, "5XX", "*"): + data = utils.unmarshal_json(http_res_text, models.HTTPValidationErrorData) + raise models.HTTPValidationError(data=data) + if utils.match_response(http_res, ["4XX", "5XX"], "*"): http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( "API error occurred", http_res.status_code, http_res_text, http_res @@ -344,7 +310,7 @@ async def stream_async( def complete( self, *, - model: str, + model: Nullable[str], messages: Union[ List[models.ChatCompletionRequestMessages], List[models.ChatCompletionRequestMessagesTypedDict], @@ -375,9 +341,6 @@ def complete( presence_penalty: Optional[float] = None, frequency_penalty: Optional[float] = None, n: OptionalNullable[int] = UNSET, - prediction: Optional[ - Union[models.Prediction, models.PredictionTypedDict] - ] = None, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -399,7 +362,6 @@ def complete( :param presence_penalty: presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. :param frequency_penalty: frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. :param n: Number of completions to return for each request, input tokens are only billed once. - :param prediction: :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -412,8 +374,6 @@ def complete( if server_url is not None: base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) request = models.ChatCompletionRequest( model=model, @@ -436,9 +396,6 @@ def complete( presence_penalty=presence_penalty, frequency_penalty=frequency_penalty, n=n, - prediction=utils.get_pydantic_model( - prediction, Optional[models.Prediction] - ), ) req = self._build_request( @@ -470,7 +427,6 @@ def complete( http_res = self.do_request( hook_ctx=HookContext( - base_url=base_url or "", operation_id="chat_completion_v1_chat_completions_post", oauth2_scopes=[], security_source=self.sdk_configuration.security, @@ -480,22 +436,15 @@ def complete( retry_config=retry_config, ) - response_data: Any = None + data: Any = None if utils.match_response(http_res, "200", "application/json"): return utils.unmarshal_json( http_res.text, Optional[models.ChatCompletionResponse] ) if utils.match_response(http_res, "422", "application/json"): - response_data = utils.unmarshal_json( - http_res.text, models.HTTPValidationErrorData - ) - raise models.HTTPValidationError(data=response_data) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) - if utils.match_response(http_res, "5XX", "*"): + data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) + raise models.HTTPValidationError(data=data) + if utils.match_response(http_res, ["4XX", "5XX"], "*"): http_res_text = utils.stream_to_text(http_res) raise models.SDKError( "API error occurred", http_res.status_code, http_res_text, http_res @@ -513,7 +462,7 @@ def complete( async def complete_async( self, *, - model: str, + model: Nullable[str], messages: Union[ List[models.ChatCompletionRequestMessages], List[models.ChatCompletionRequestMessagesTypedDict], @@ -544,9 +493,6 @@ async def complete_async( presence_penalty: Optional[float] = None, frequency_penalty: Optional[float] = None, n: OptionalNullable[int] = UNSET, - prediction: Optional[ - Union[models.Prediction, models.PredictionTypedDict] - ] = None, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -568,7 +514,6 @@ async def complete_async( :param presence_penalty: presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. :param frequency_penalty: frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. :param n: Number of completions to return for each request, input tokens are only billed once. - :param prediction: :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -581,8 +526,6 @@ async def complete_async( if server_url is not None: base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) request = models.ChatCompletionRequest( model=model, @@ -605,9 +548,6 @@ async def complete_async( presence_penalty=presence_penalty, frequency_penalty=frequency_penalty, n=n, - prediction=utils.get_pydantic_model( - prediction, Optional[models.Prediction] - ), ) req = self._build_request_async( @@ -639,7 +579,6 @@ async def complete_async( http_res = await self.do_request_async( hook_ctx=HookContext( - base_url=base_url or "", operation_id="chat_completion_v1_chat_completions_post", oauth2_scopes=[], security_source=self.sdk_configuration.security, @@ -649,22 +588,15 @@ async def complete_async( retry_config=retry_config, ) - response_data: Any = None + data: Any = None if utils.match_response(http_res, "200", "application/json"): return utils.unmarshal_json( http_res.text, Optional[models.ChatCompletionResponse] ) if utils.match_response(http_res, "422", "application/json"): - response_data = utils.unmarshal_json( - http_res.text, models.HTTPValidationErrorData - ) - raise models.HTTPValidationError(data=response_data) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) - if utils.match_response(http_res, "5XX", "*"): + data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) + raise models.HTTPValidationError(data=data) + if utils.match_response(http_res, ["4XX", "5XX"], "*"): http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( "API error occurred", http_res.status_code, http_res_text, http_res diff --git a/packages/mistralai_gcp/src/mistralai_gcp/fim.py b/packages/mistralai_gcp/src/mistralai_gcp/fim.py index 84821c6a..89146a4a 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/fim.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/fim.py @@ -3,7 +3,7 @@ from .basesdk import BaseSDK from mistralai_gcp import models, utils from mistralai_gcp._hooks import HookContext -from mistralai_gcp.types import OptionalNullable, UNSET +from mistralai_gcp.types import Nullable, OptionalNullable, UNSET from mistralai_gcp.utils import eventstreaming from typing import Any, Mapping, Optional, Union @@ -14,7 +14,7 @@ class Fim(BaseSDK): def stream( self, *, - model: str, + model: Nullable[str], prompt: str, temperature: OptionalNullable[float] = UNSET, top_p: Optional[float] = 1, @@ -60,8 +60,6 @@ def stream( if server_url is not None: base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) request = models.FIMCompletionStreamRequest( model=model, @@ -105,7 +103,6 @@ def stream( http_res = self.do_request( hook_ctx=HookContext( - base_url=base_url or "", operation_id="stream_fim", oauth2_scopes=[], security_source=self.sdk_configuration.security, @@ -116,7 +113,7 @@ def stream( retry_config=retry_config, ) - response_data: Any = None + data: Any = None if utils.match_response(http_res, "200", "text/event-stream"): return eventstreaming.EventStream( http_res, @@ -125,16 +122,9 @@ def stream( ) if utils.match_response(http_res, "422", "application/json"): http_res_text = utils.stream_to_text(http_res) - response_data = utils.unmarshal_json( - http_res_text, models.HTTPValidationErrorData - ) - raise models.HTTPValidationError(data=response_data) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) - if utils.match_response(http_res, "5XX", "*"): + data = utils.unmarshal_json(http_res_text, models.HTTPValidationErrorData) + raise models.HTTPValidationError(data=data) + if utils.match_response(http_res, ["4XX", "5XX"], "*"): http_res_text = utils.stream_to_text(http_res) raise models.SDKError( "API error occurred", http_res.status_code, http_res_text, http_res @@ -152,7 +142,7 @@ def stream( async def stream_async( self, *, - model: str, + model: Nullable[str], prompt: str, temperature: OptionalNullable[float] = UNSET, top_p: Optional[float] = 1, @@ -198,8 +188,6 @@ async def stream_async( if server_url is not None: base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) request = models.FIMCompletionStreamRequest( model=model, @@ -243,7 +231,6 @@ async def stream_async( http_res = await self.do_request_async( hook_ctx=HookContext( - base_url=base_url or "", operation_id="stream_fim", oauth2_scopes=[], security_source=self.sdk_configuration.security, @@ -254,7 +241,7 @@ async def stream_async( retry_config=retry_config, ) - response_data: Any = None + data: Any = None if utils.match_response(http_res, "200", "text/event-stream"): return eventstreaming.EventStreamAsync( http_res, @@ -263,16 +250,9 @@ async def stream_async( ) if utils.match_response(http_res, "422", "application/json"): http_res_text = await utils.stream_to_text_async(http_res) - response_data = utils.unmarshal_json( - http_res_text, models.HTTPValidationErrorData - ) - raise models.HTTPValidationError(data=response_data) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) - if utils.match_response(http_res, "5XX", "*"): + data = utils.unmarshal_json(http_res_text, models.HTTPValidationErrorData) + raise models.HTTPValidationError(data=data) + if utils.match_response(http_res, ["4XX", "5XX"], "*"): http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( "API error occurred", http_res.status_code, http_res_text, http_res @@ -290,7 +270,7 @@ async def stream_async( def complete( self, *, - model: str, + model: Nullable[str], prompt: str, temperature: OptionalNullable[float] = UNSET, top_p: Optional[float] = 1, @@ -336,8 +316,6 @@ def complete( if server_url is not None: base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) request = models.FIMCompletionRequest( model=model, @@ -381,7 +359,6 @@ def complete( http_res = self.do_request( hook_ctx=HookContext( - base_url=base_url or "", operation_id="fim_completion_v1_fim_completions_post", oauth2_scopes=[], security_source=self.sdk_configuration.security, @@ -391,22 +368,15 @@ def complete( retry_config=retry_config, ) - response_data: Any = None + data: Any = None if utils.match_response(http_res, "200", "application/json"): return utils.unmarshal_json( http_res.text, Optional[models.FIMCompletionResponse] ) if utils.match_response(http_res, "422", "application/json"): - response_data = utils.unmarshal_json( - http_res.text, models.HTTPValidationErrorData - ) - raise models.HTTPValidationError(data=response_data) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) - if utils.match_response(http_res, "5XX", "*"): + data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) + raise models.HTTPValidationError(data=data) + if utils.match_response(http_res, ["4XX", "5XX"], "*"): http_res_text = utils.stream_to_text(http_res) raise models.SDKError( "API error occurred", http_res.status_code, http_res_text, http_res @@ -424,7 +394,7 @@ def complete( async def complete_async( self, *, - model: str, + model: Nullable[str], prompt: str, temperature: OptionalNullable[float] = UNSET, top_p: Optional[float] = 1, @@ -470,8 +440,6 @@ async def complete_async( if server_url is not None: base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) request = models.FIMCompletionRequest( model=model, @@ -515,7 +483,6 @@ async def complete_async( http_res = await self.do_request_async( hook_ctx=HookContext( - base_url=base_url or "", operation_id="fim_completion_v1_fim_completions_post", oauth2_scopes=[], security_source=self.sdk_configuration.security, @@ -525,22 +492,15 @@ async def complete_async( retry_config=retry_config, ) - response_data: Any = None + data: Any = None if utils.match_response(http_res, "200", "application/json"): return utils.unmarshal_json( http_res.text, Optional[models.FIMCompletionResponse] ) if utils.match_response(http_res, "422", "application/json"): - response_data = utils.unmarshal_json( - http_res.text, models.HTTPValidationErrorData - ) - raise models.HTTPValidationError(data=response_data) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) - if utils.match_response(http_res, "5XX", "*"): + data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) + raise models.HTTPValidationError(data=data) + if utils.match_response(http_res, ["4XX", "5XX"], "*"): http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( "API error occurred", http_res.status_code, http_res_text, http_res diff --git a/packages/mistralai_gcp/src/mistralai_gcp/httpclient.py b/packages/mistralai_gcp/src/mistralai_gcp/httpclient.py index 1e426352..167cea4e 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/httpclient.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/httpclient.py @@ -1,8 +1,6 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" # pyright: reportReturnType = false -import asyncio -from concurrent.futures import ThreadPoolExecutor from typing_extensions import Protocol, runtime_checkable import httpx from typing import Any, Optional, Union @@ -84,53 +82,3 @@ def build_request( async def aclose(self) -> None: pass - - -class ClientOwner(Protocol): - client: Union[HttpClient, None] - async_client: Union[AsyncHttpClient, None] - - -def close_clients( - owner: ClientOwner, - sync_client: Union[HttpClient, None], - sync_client_supplied: bool, - async_client: Union[AsyncHttpClient, None], - async_client_supplied: bool, -) -> None: - """ - A finalizer function that is meant to be used with weakref.finalize to close - httpx clients used by an SDK so that underlying resources can be garbage - collected. - """ - - # Unset the client/async_client properties so there are no more references - # to them from the owning SDK instance and they can be reaped. - owner.client = None - owner.async_client = None - - if sync_client is not None and not sync_client_supplied: - try: - sync_client.close() - except Exception: - pass - - if async_client is not None and not async_client_supplied: - is_async = False - try: - asyncio.get_running_loop() - is_async = True - except RuntimeError: - pass - - try: - # If this function is called in an async loop then start another - # loop in a separate thread to close the async http client. - if is_async: - with ThreadPoolExecutor(max_workers=1) as executor: - future = executor.submit(asyncio.run, async_client.aclose()) - future.result() - else: - asyncio.run(async_client.aclose()) - except Exception: - pass diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/__init__.py b/packages/mistralai_gcp/src/mistralai_gcp/models/__init__.py index 154777da..f3c6ce7e 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/__init__.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/__init__.py @@ -67,8 +67,6 @@ ) from .functionname import FunctionName, FunctionNameTypedDict from .httpvalidationerror import HTTPValidationError, HTTPValidationErrorData -from .jsonschema import JSONSchema, JSONSchemaTypedDict -from .prediction import Prediction, PredictionTypedDict from .referencechunk import ReferenceChunk, ReferenceChunkType, ReferenceChunkTypedDict from .responseformat import ResponseFormat, ResponseFormatTypedDict from .responseformats import ResponseFormats @@ -109,7 +107,6 @@ ValidationErrorTypedDict, ) - __all__ = [ "Arguments", "ArgumentsTypedDict", @@ -166,14 +163,10 @@ "FunctionTypedDict", "HTTPValidationError", "HTTPValidationErrorData", - "JSONSchema", - "JSONSchemaTypedDict", "Loc", "LocTypedDict", "Messages", "MessagesTypedDict", - "Prediction", - "PredictionTypedDict", "ReferenceChunk", "ReferenceChunkType", "ReferenceChunkTypedDict", diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/assistantmessage.py b/packages/mistralai_gcp/src/mistralai_gcp/models/assistantmessage.py index 9147f566..6a9b58f2 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/assistantmessage.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/assistantmessage.py @@ -32,7 +32,6 @@ class AssistantMessageTypedDict(TypedDict): content: NotRequired[Nullable[AssistantMessageContentTypedDict]] tool_calls: NotRequired[Nullable[List[ToolCallTypedDict]]] prefix: NotRequired[bool] - r"""Set this to `true` when adding an assistant message as prefix to condition the model response. The role of the prefix message is to force the model to start its answer by the content of the message.""" role: NotRequired[AssistantMessageRole] @@ -42,7 +41,6 @@ class AssistantMessage(BaseModel): tool_calls: OptionalNullable[List[ToolCall]] = UNSET prefix: Optional[bool] = False - r"""Set this to `true` when adding an assistant message as prefix to condition the model response. The role of the prefix message is to force the model to start its answer by the content of the message.""" role: Optional[AssistantMessageRole] = "assistant" diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionrequest.py b/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionrequest.py index 60a37f2f..ab97e52a 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionrequest.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionrequest.py @@ -2,7 +2,6 @@ from __future__ import annotations from .assistantmessage import AssistantMessage, AssistantMessageTypedDict -from .prediction import Prediction, PredictionTypedDict from .responseformat import ResponseFormat, ResponseFormatTypedDict from .systemmessage import SystemMessage, SystemMessageTypedDict from .tool import Tool, ToolTypedDict @@ -69,7 +68,7 @@ class ChatCompletionRequestTypedDict(TypedDict): - model: str + model: Nullable[str] r"""ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions.""" messages: List[ChatCompletionRequestMessagesTypedDict] r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content.""" @@ -94,11 +93,10 @@ class ChatCompletionRequestTypedDict(TypedDict): r"""frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.""" n: NotRequired[Nullable[int]] r"""Number of completions to return for each request, input tokens are only billed once.""" - prediction: NotRequired[PredictionTypedDict] class ChatCompletionRequest(BaseModel): - model: str + model: Nullable[str] r"""ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions.""" messages: List[ChatCompletionRequestMessages] @@ -137,8 +135,6 @@ class ChatCompletionRequest(BaseModel): n: OptionalNullable[int] = UNSET r"""Number of completions to return for each request, input tokens are only billed once.""" - prediction: Optional[Prediction] = None - @model_serializer(mode="wrap") def serialize_model(self, handler): optional_fields = [ @@ -154,9 +150,15 @@ def serialize_model(self, handler): "presence_penalty", "frequency_penalty", "n", - "prediction", ] - nullable_fields = ["temperature", "max_tokens", "random_seed", "tools", "n"] + nullable_fields = [ + "model", + "temperature", + "max_tokens", + "random_seed", + "tools", + "n", + ] null_default_fields = [] serialized = handler(self) diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionstreamrequest.py b/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionstreamrequest.py index f2041426..e6c5429b 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionstreamrequest.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionstreamrequest.py @@ -2,7 +2,6 @@ from __future__ import annotations from .assistantmessage import AssistantMessage, AssistantMessageTypedDict -from .prediction import Prediction, PredictionTypedDict from .responseformat import ResponseFormat, ResponseFormatTypedDict from .systemmessage import SystemMessage, SystemMessageTypedDict from .tool import Tool, ToolTypedDict @@ -65,7 +64,7 @@ class ChatCompletionStreamRequestTypedDict(TypedDict): - model: str + model: Nullable[str] r"""ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions.""" messages: List[MessagesTypedDict] r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content.""" @@ -89,11 +88,10 @@ class ChatCompletionStreamRequestTypedDict(TypedDict): r"""frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.""" n: NotRequired[Nullable[int]] r"""Number of completions to return for each request, input tokens are only billed once.""" - prediction: NotRequired[PredictionTypedDict] class ChatCompletionStreamRequest(BaseModel): - model: str + model: Nullable[str] r"""ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions.""" messages: List[Messages] @@ -131,8 +129,6 @@ class ChatCompletionStreamRequest(BaseModel): n: OptionalNullable[int] = UNSET r"""Number of completions to return for each request, input tokens are only billed once.""" - prediction: Optional[Prediction] = None - @model_serializer(mode="wrap") def serialize_model(self, handler): optional_fields = [ @@ -148,9 +144,15 @@ def serialize_model(self, handler): "presence_penalty", "frequency_penalty", "n", - "prediction", ] - nullable_fields = ["temperature", "max_tokens", "random_seed", "tools", "n"] + nullable_fields = [ + "model", + "temperature", + "max_tokens", + "random_seed", + "tools", + "n", + ] null_default_fields = [] serialized = handler(self) diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/fimcompletionrequest.py b/packages/mistralai_gcp/src/mistralai_gcp/models/fimcompletionrequest.py index 6dfb7373..81c87b7e 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/fimcompletionrequest.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/fimcompletionrequest.py @@ -26,7 +26,7 @@ class FIMCompletionRequestTypedDict(TypedDict): - model: str + model: Nullable[str] r"""ID of the model to use. Only compatible for now with: - `codestral-2405` - `codestral-latest` @@ -52,7 +52,7 @@ class FIMCompletionRequestTypedDict(TypedDict): class FIMCompletionRequest(BaseModel): - model: str + model: Nullable[str] r"""ID of the model to use. Only compatible for now with: - `codestral-2405` - `codestral-latest` @@ -98,6 +98,7 @@ def serialize_model(self, handler): "min_tokens", ] nullable_fields = [ + "model", "temperature", "max_tokens", "random_seed", diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/fimcompletionstreamrequest.py b/packages/mistralai_gcp/src/mistralai_gcp/models/fimcompletionstreamrequest.py index 406749bb..356758d3 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/fimcompletionstreamrequest.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/fimcompletionstreamrequest.py @@ -26,7 +26,7 @@ class FIMCompletionStreamRequestTypedDict(TypedDict): - model: str + model: Nullable[str] r"""ID of the model to use. Only compatible for now with: - `codestral-2405` - `codestral-latest` @@ -51,7 +51,7 @@ class FIMCompletionStreamRequestTypedDict(TypedDict): class FIMCompletionStreamRequest(BaseModel): - model: str + model: Nullable[str] r"""ID of the model to use. Only compatible for now with: - `codestral-2405` - `codestral-latest` @@ -96,6 +96,7 @@ def serialize_model(self, handler): "min_tokens", ] nullable_fields = [ + "model", "temperature", "max_tokens", "random_seed", diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/function.py b/packages/mistralai_gcp/src/mistralai_gcp/models/function.py index 3d61e624..c3168eec 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/function.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/function.py @@ -10,7 +10,6 @@ class FunctionTypedDict(TypedDict): name: str parameters: Dict[str, Any] description: NotRequired[str] - strict: NotRequired[bool] class Function(BaseModel): @@ -19,5 +18,3 @@ class Function(BaseModel): parameters: Dict[str, Any] description: Optional[str] = "" - - strict: Optional[bool] = False diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/jsonschema.py b/packages/mistralai_gcp/src/mistralai_gcp/models/jsonschema.py deleted file mode 100644 index 2c6bd478..00000000 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/jsonschema.py +++ /dev/null @@ -1,61 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai_gcp.types import ( - BaseModel, - Nullable, - OptionalNullable, - UNSET, - UNSET_SENTINEL, -) -import pydantic -from pydantic import model_serializer -from typing import Any, Dict, Optional -from typing_extensions import Annotated, NotRequired, TypedDict - - -class JSONSchemaTypedDict(TypedDict): - name: str - schema_definition: Dict[str, Any] - description: NotRequired[Nullable[str]] - strict: NotRequired[bool] - - -class JSONSchema(BaseModel): - name: str - - schema_definition: Annotated[Dict[str, Any], pydantic.Field(alias="schema")] - - description: OptionalNullable[str] = UNSET - - strict: Optional[bool] = False - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = ["description", "strict"] - nullable_fields = ["description"] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in self.model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/prediction.py b/packages/mistralai_gcp/src/mistralai_gcp/models/prediction.py deleted file mode 100644 index 742aac0b..00000000 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/prediction.py +++ /dev/null @@ -1,25 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai_gcp.types import BaseModel -from mistralai_gcp.utils import validate_const -import pydantic -from pydantic.functional_validators import AfterValidator -from typing import Literal, Optional -from typing_extensions import Annotated, NotRequired, TypedDict - - -class PredictionTypedDict(TypedDict): - type: Literal["content"] - content: NotRequired[str] - - -class Prediction(BaseModel): - TYPE: Annotated[ - Annotated[ - Optional[Literal["content"]], AfterValidator(validate_const("content")) - ], - pydantic.Field(alias="type"), - ] = "content" - - content: Optional[str] = "" diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/responseformat.py b/packages/mistralai_gcp/src/mistralai_gcp/models/responseformat.py index 5a24f644..fde89862 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/responseformat.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/responseformat.py @@ -1,16 +1,8 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from __future__ import annotations -from .jsonschema import JSONSchema, JSONSchemaTypedDict from .responseformats import ResponseFormats -from mistralai_gcp.types import ( - BaseModel, - Nullable, - OptionalNullable, - UNSET, - UNSET_SENTINEL, -) -from pydantic import model_serializer +from mistralai_gcp.types import BaseModel from typing import Optional from typing_extensions import NotRequired, TypedDict @@ -18,41 +10,8 @@ class ResponseFormatTypedDict(TypedDict): type: NotRequired[ResponseFormats] r"""An object specifying the format that the model must output. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message.""" - json_schema: NotRequired[Nullable[JSONSchemaTypedDict]] class ResponseFormat(BaseModel): type: Optional[ResponseFormats] = None r"""An object specifying the format that the model must output. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message.""" - - json_schema: OptionalNullable[JSONSchema] = UNSET - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = ["type", "json_schema"] - nullable_fields = ["json_schema"] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in self.model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/responseformats.py b/packages/mistralai_gcp/src/mistralai_gcp/models/responseformats.py index 08c39951..2c06b812 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/responseformats.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/responseformats.py @@ -4,5 +4,5 @@ from typing import Literal -ResponseFormats = Literal["text", "json_object", "json_schema"] +ResponseFormats = Literal["text", "json_object"] r"""An object specifying the format that the model must output. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message.""" diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/toolcall.py b/packages/mistralai_gcp/src/mistralai_gcp/models/toolcall.py index ecbac8d6..5b4b217a 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/toolcall.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/toolcall.py @@ -14,7 +14,6 @@ class ToolCallTypedDict(TypedDict): function: FunctionCallTypedDict id: NotRequired[str] type: NotRequired[ToolTypes] - index: NotRequired[int] class ToolCall(BaseModel): @@ -25,5 +24,3 @@ class ToolCall(BaseModel): type: Annotated[Optional[ToolTypes], PlainValidator(validate_open_enum(False))] = ( None ) - - index: Optional[int] = 0 diff --git a/packages/mistralai_gcp/src/mistralai_gcp/sdk.py b/packages/mistralai_gcp/src/mistralai_gcp/sdk.py index dd93cc7f..abfea8db 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/sdk.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/sdk.py @@ -1,25 +1,23 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +"""Code generated by Speakeasy (https://speakeasyapi.dev). DO NOT EDIT.""" import json -import weakref -from typing import Any, Optional, cast +from typing import Optional, Tuple, Union import google.auth import google.auth.credentials import google.auth.transport import google.auth.transport.requests import httpx - from mistralai_gcp import models from mistralai_gcp._hooks import BeforeRequestHook, SDKHooks from mistralai_gcp.chat import Chat from mistralai_gcp.fim import Fim -from mistralai_gcp.types import UNSET, OptionalNullable +from mistralai_gcp.types import Nullable from .basesdk import BaseSDK -from .httpclient import AsyncHttpClient, ClientOwner, HttpClient, close_clients +from .httpclient import AsyncHttpClient, HttpClient from .sdkconfiguration import SDKConfiguration -from .utils.logger import Logger, get_default_logger +from .utils.logger import Logger, NoOpLogger from .utils.retries import RetryConfig LEGACY_MODEL_ID_FORMAT = { @@ -28,21 +26,20 @@ "mistral-nemo-2407": "mistral-nemo@2407", } - -def get_model_info(model: str) -> tuple[str, str]: +def get_model_info(model: str) -> Tuple[str, str]: # if the model requiers the legacy fomat, use it, else do nothing. if model in LEGACY_MODEL_ID_FORMAT: return "-".join(model.split("-")[:-1]), LEGACY_MODEL_ID_FORMAT[model] return model, model + class MistralGoogleCloud(BaseSDK): r"""Mistral AI API: Our Chat Completion and Embeddings APIs specification. Create your account on [La Plateforme](https://console.mistral.ai) to get access and read the [docs](https://docs.mistral.ai) to learn how to use it.""" chat: Chat - r"""Chat Completion API.""" fim: Fim - r"""Fill-in-the-middle API.""" + r"""Chat Completion API""" def __init__( self, @@ -51,20 +48,16 @@ def __init__( access_token: Optional[str] = None, client: Optional[HttpClient] = None, async_client: Optional[AsyncHttpClient] = None, - retry_config: OptionalNullable[RetryConfig] = UNSET, - timeout_ms: Optional[int] = None, + retry_config: Optional[Nullable[RetryConfig]] = None, debug_logger: Optional[Logger] = None, ) -> None: r"""Instantiates the SDK configuring it with the provided parameters. - :param api_key: The api_key required for authentication - :param server: The server by name to use for all methods - :param server_url: The server URL to use for all methods - :param url_params: Parameters to optionally template the server URL with + :param region: The Google Cloud region to use for all methods + :param project_id: The project ID to use for all methods :param client: The HTTP client to use for all synchronous methods :param async_client: The Async HTTP client to use for all asynchronous methods :param retry_config: The retry configuration to use for all supported methods - :param timeout_ms: Optional request timeout applied to each operation in milliseconds """ if not access_token: @@ -79,42 +72,36 @@ def __init__( ) project_id = project_id or loaded_project_id - if project_id is None: raise models.SDKError("project_id must be provided") def auth_token() -> str: if access_token: return access_token - credentials.refresh(google.auth.transport.requests.Request()) token = credentials.token if not token: raise models.SDKError("Failed to get token from credentials") return token - client_supplied = True if client is None: client = httpx.Client() - client_supplied = False assert issubclass( type(client), HttpClient ), "The provided client must implement the HttpClient protocol." - async_client_supplied = True if async_client is None: async_client = httpx.AsyncClient() - async_client_supplied = False if debug_logger is None: - debug_logger = get_default_logger() + debug_logger = NoOpLogger() assert issubclass( type(async_client), AsyncHttpClient ), "The provided async_client must implement the AsyncHttpClient protocol." - security: Any = None + security = None if callable(auth_token): security = lambda: models.Security( # pylint: disable=unnecessary-lambda-assignment api_key=auth_token() @@ -126,24 +113,23 @@ def auth_token() -> str: self, SDKConfiguration( client=client, - client_supplied=client_supplied, async_client=async_client, - async_client_supplied=async_client_supplied, security=security, server_url=f"https://{region}-aiplatform.googleapis.com", server=None, retry_config=retry_config, - timeout_ms=timeout_ms, debug_logger=debug_logger, ), ) hooks = SDKHooks() + hook = GoogleCloudBeforeRequestHook(region, project_id) hooks.register_before_request_hook(hook) + current_server_url, *_ = self.sdk_configuration.get_server_details() server_url, self.sdk_configuration.client = hooks.sdk_init( - current_server_url, client + current_server_url, self.sdk_configuration.client ) if current_server_url != server_url: self.sdk_configuration.server_url = server_url @@ -151,53 +137,22 @@ def auth_token() -> str: # pylint: disable=protected-access self.sdk_configuration.__dict__["_hooks"] = hooks - weakref.finalize( - self, - close_clients, - cast(ClientOwner, self.sdk_configuration), - self.sdk_configuration.client, - self.sdk_configuration.client_supplied, - self.sdk_configuration.async_client, - self.sdk_configuration.async_client_supplied, - ) - self._init_sdks() def _init_sdks(self): self.chat = Chat(self.sdk_configuration) self.fim = Fim(self.sdk_configuration) - def __enter__(self): - return self - - async def __aenter__(self): - return self - - def __exit__(self, exc_type, exc_val, exc_tb): - if ( - self.sdk_configuration.client is not None - and not self.sdk_configuration.client_supplied - ): - self.sdk_configuration.client.close() - self.sdk_configuration.client = None - - async def __aexit__(self, exc_type, exc_val, exc_tb): - if ( - self.sdk_configuration.async_client is not None - and not self.sdk_configuration.async_client_supplied - ): - await self.sdk_configuration.async_client.aclose() - self.sdk_configuration.async_client = None - class GoogleCloudBeforeRequestHook(BeforeRequestHook): + def __init__(self, region: str, project_id: str): self.region = region self.project_id = project_id def before_request( self, hook_ctx, request: httpx.Request - ) -> httpx.Request | Exception: + ) -> Union[httpx.Request, Exception]: # The goal of this function is to template in the region, project and model into the URL path # We do this here so that the API remains more user-friendly model_id = None @@ -212,6 +167,7 @@ def before_request( if model_id == "": raise models.SDKError("model must be provided") + stream = "streamRawPredict" in request.url.path specifier = "streamRawPredict" if stream else "rawPredict" url = f"/v1/projects/{self.project_id}/locations/{self.region}/publishers/mistralai/models/{model_id}:{specifier}" diff --git a/packages/mistralai_gcp/src/mistralai_gcp/sdkconfiguration.py b/packages/mistralai_gcp/src/mistralai_gcp/sdkconfiguration.py index c373d27d..3c149cc6 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/sdkconfiguration.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/sdkconfiguration.py @@ -1,12 +1,6 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from ._hooks import SDKHooks -from ._version import ( - __gen_version__, - __openapi_doc_version__, - __user_agent__, - __version__, -) from .httpclient import AsyncHttpClient, HttpClient from .utils import Logger, RetryConfig, remove_suffix from dataclasses import dataclass @@ -26,19 +20,17 @@ @dataclass class SDKConfiguration: - client: Union[HttpClient, None] - client_supplied: bool - async_client: Union[AsyncHttpClient, None] - async_client_supplied: bool + client: HttpClient + async_client: AsyncHttpClient debug_logger: Logger security: Optional[Union[models.Security, Callable[[], models.Security]]] = None server_url: Optional[str] = "" server: Optional[str] = "" language: str = "python" - openapi_doc_version: str = __openapi_doc_version__ - sdk_version: str = __version__ - gen_version: str = __gen_version__ - user_agent: str = __user_agent__ + openapi_doc_version: str = "0.0.2" + sdk_version: str = "1.2.6" + gen_version: str = "2.486.1" + user_agent: str = "speakeasy-sdk/python 1.2.6 2.486.1 0.0.2 mistralai-gcp" retry_config: OptionalNullable[RetryConfig] = Field(default_factory=lambda: UNSET) timeout_ms: Optional[int] = None diff --git a/packages/mistralai_gcp/src/mistralai_gcp/utils/__init__.py b/packages/mistralai_gcp/src/mistralai_gcp/utils/__init__.py index 3cded8fe..26d51ae8 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/utils/__init__.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/utils/__init__.py @@ -42,7 +42,6 @@ match_content_type, match_status_codes, match_response, - cast_partial, ) from .logger import Logger, get_body_content, get_default_logger @@ -95,5 +94,4 @@ "validate_float", "validate_int", "validate_open_enum", - "cast_partial", ] diff --git a/packages/mistralai_gcp/src/mistralai_gcp/utils/serializers.py b/packages/mistralai_gcp/src/mistralai_gcp/utils/serializers.py index baa41fbd..c5eb3659 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/utils/serializers.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/utils/serializers.py @@ -7,15 +7,14 @@ from typing_extensions import get_origin from pydantic import ConfigDict, create_model from pydantic_core import from_json -from typing_inspection.typing_objects import is_union +from typing_inspect import is_optional_type from ..types.basemodel import BaseModel, Nullable, OptionalNullable, Unset def serialize_decimal(as_str: bool): def serialize(d): - # Optional[T] is a Union[T, None] - if is_union(type(d)) and type(None) in get_args(type(d)) and d is None: + if is_optional_type(type(d)) and d is None: return None if isinstance(d, Unset): return d @@ -43,8 +42,7 @@ def validate_decimal(d): def serialize_float(as_str: bool): def serialize(f): - # Optional[T] is a Union[T, None] - if is_union(type(f)) and type(None) in get_args(type(f)) and f is None: + if is_optional_type(type(f)) and f is None: return None if isinstance(f, Unset): return f @@ -72,8 +70,7 @@ def validate_float(f): def serialize_int(as_str: bool): def serialize(i): - # Optional[T] is a Union[T, None] - if is_union(type(i)) and type(None) in get_args(type(i)) and i is None: + if is_optional_type(type(i)) and i is None: return None if isinstance(i, Unset): return i @@ -121,8 +118,7 @@ def validate(e): def validate_const(v): def validate(c): - # Optional[T] is a Union[T, None] - if is_union(type(c)) and type(None) in get_args(type(c)) and c is None: + if is_optional_type(type(c)) and c is None: return None if v != c: @@ -167,7 +163,7 @@ def marshal_json(val, typ): if len(d) == 0: return "" - return json.dumps(d[next(iter(d))], separators=(",", ":")) + return json.dumps(d[next(iter(d))], separators=(",", ":"), sort_keys=True) def is_nullable(field): diff --git a/packages/mistralai_gcp/src/mistralai_gcp/utils/values.py b/packages/mistralai_gcp/src/mistralai_gcp/utils/values.py index dae01a44..2b4b6832 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/utils/values.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/utils/values.py @@ -3,9 +3,8 @@ from datetime import datetime from enum import Enum from email.message import Message -from functools import partial import os -from typing import Any, Callable, Dict, List, Optional, Tuple, TypeVar, Union, cast +from typing import Any, Callable, Dict, List, Optional, Tuple, TypeVar, Union from httpx import Response from pydantic import BaseModel @@ -52,8 +51,6 @@ def match_status_codes(status_codes: List[str], status_code: int) -> bool: T = TypeVar("T") -def cast_partial(typ): - return partial(cast, typ) def get_global_from_env( value: Optional[T], env_key: str, type_cast: Callable[[str], T] diff --git a/poetry.lock b/poetry.lock index 3d36b94f..78003ff1 100644 --- a/poetry.lock +++ b/poetry.lock @@ -12,6 +12,9 @@ files = [ {file = "annotated_types-0.7.0.tar.gz", hash = "sha256:aff07c09a53a08bc8cfccb9c85b05f1aa9a2a6f23728d790723543408344ce89"}, ] +[package.dependencies] +typing-extensions = {version = ">=4.0.0", markers = "python_version < \"3.9\""} + [[package]] name = "anyio" version = "4.5.2" @@ -312,14 +315,14 @@ trio = ["trio (>=0.22.0,<1.0)"] [[package]] name = "httpx" -version = "0.28.1" +version = "0.27.2" description = "The next generation HTTP client." optional = false python-versions = ">=3.8" groups = ["main"] files = [ - {file = "httpx-0.28.1-py3-none-any.whl", hash = "sha256:d909fcccc110f8c7faf814ca82a9a4d816bc5a6dbfea25d6591d6985b8ba59ad"}, - {file = "httpx-0.28.1.tar.gz", hash = "sha256:75e98c5f16b0f35b567856f597f06ff2270a374470a5c2392242528e3e3e42fc"}, + {file = "httpx-0.27.2-py3-none-any.whl", hash = "sha256:7bb2708e112d8fdd7829cd4243970f0c223274051cb35ee80c03301ee29a3df0"}, + {file = "httpx-0.27.2.tar.gz", hash = "sha256:f7c2be1d2f3c3c3160d441802406b206c2b76f5947b11115e6df10c6c65e66c2"}, ] [package.dependencies] @@ -327,6 +330,7 @@ anyio = "*" certifi = "*" httpcore = "==1.*" idna = "*" +sniffio = "*" [package.extras] brotli = ["brotli ; platform_python_implementation == \"CPython\"", "brotlicffi ; platform_python_implementation != \"CPython\""] @@ -377,6 +381,18 @@ files = [ [package.extras] colors = ["colorama (>=0.4.6)"] +[[package]] +name = "jsonpath-python" +version = "1.0.6" +description = "A more powerful JSONPath implementation in modern python" +optional = false +python-versions = ">=3.6" +groups = ["main"] +files = [ + {file = "jsonpath-python-1.0.6.tar.gz", hash = "sha256:dd5be4a72d8a2995c3f583cf82bf3cd1a9544cfdabf2d22595b67aff07349666"}, + {file = "jsonpath_python-1.0.6-py3-none-any.whl", hash = "sha256:1e3b78df579f5efc23565293612decee04214609208a2335884b3ee3f786b575"}, +] + [[package]] name = "mccabe" version = "0.7.0" @@ -391,56 +407,50 @@ files = [ [[package]] name = "mypy" -version = "1.14.1" +version = "1.13.0" description = "Optional static typing for Python" optional = false python-versions = ">=3.8" groups = ["dev"] files = [ - {file = "mypy-1.14.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:52686e37cf13d559f668aa398dd7ddf1f92c5d613e4f8cb262be2fb4fedb0fcb"}, - {file = "mypy-1.14.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:1fb545ca340537d4b45d3eecdb3def05e913299ca72c290326be19b3804b39c0"}, - {file = "mypy-1.14.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:90716d8b2d1f4cd503309788e51366f07c56635a3309b0f6a32547eaaa36a64d"}, - {file = "mypy-1.14.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:2ae753f5c9fef278bcf12e1a564351764f2a6da579d4a81347e1d5a15819997b"}, - {file = "mypy-1.14.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:e0fe0f5feaafcb04505bcf439e991c6d8f1bf8b15f12b05feeed96e9e7bf1427"}, - {file = "mypy-1.14.1-cp310-cp310-win_amd64.whl", hash = "sha256:7d54bd85b925e501c555a3227f3ec0cfc54ee8b6930bd6141ec872d1c572f81f"}, - {file = "mypy-1.14.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:f995e511de847791c3b11ed90084a7a0aafdc074ab88c5a9711622fe4751138c"}, - {file = "mypy-1.14.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:d64169ec3b8461311f8ce2fd2eb5d33e2d0f2c7b49116259c51d0d96edee48d1"}, - {file = "mypy-1.14.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ba24549de7b89b6381b91fbc068d798192b1b5201987070319889e93038967a8"}, - {file = "mypy-1.14.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:183cf0a45457d28ff9d758730cd0210419ac27d4d3f285beda038c9083363b1f"}, - {file = "mypy-1.14.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:f2a0ecc86378f45347f586e4163d1769dd81c5a223d577fe351f26b179e148b1"}, - {file = "mypy-1.14.1-cp311-cp311-win_amd64.whl", hash = "sha256:ad3301ebebec9e8ee7135d8e3109ca76c23752bac1e717bc84cd3836b4bf3eae"}, - {file = "mypy-1.14.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:30ff5ef8519bbc2e18b3b54521ec319513a26f1bba19a7582e7b1f58a6e69f14"}, - {file = "mypy-1.14.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:cb9f255c18052343c70234907e2e532bc7e55a62565d64536dbc7706a20b78b9"}, - {file = "mypy-1.14.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:8b4e3413e0bddea671012b063e27591b953d653209e7a4fa5e48759cda77ca11"}, - {file = "mypy-1.14.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:553c293b1fbdebb6c3c4030589dab9fafb6dfa768995a453d8a5d3b23784af2e"}, - {file = "mypy-1.14.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:fad79bfe3b65fe6a1efaed97b445c3d37f7be9fdc348bdb2d7cac75579607c89"}, - {file = "mypy-1.14.1-cp312-cp312-win_amd64.whl", hash = "sha256:8fa2220e54d2946e94ab6dbb3ba0a992795bd68b16dc852db33028df2b00191b"}, - {file = "mypy-1.14.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:92c3ed5afb06c3a8e188cb5da4984cab9ec9a77ba956ee419c68a388b4595255"}, - {file = "mypy-1.14.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:dbec574648b3e25f43d23577309b16534431db4ddc09fda50841f1e34e64ed34"}, - {file = "mypy-1.14.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:8c6d94b16d62eb3e947281aa7347d78236688e21081f11de976376cf010eb31a"}, - {file = "mypy-1.14.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:d4b19b03fdf54f3c5b2fa474c56b4c13c9dbfb9a2db4370ede7ec11a2c5927d9"}, - {file = "mypy-1.14.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:0c911fde686394753fff899c409fd4e16e9b294c24bfd5e1ea4675deae1ac6fd"}, - {file = "mypy-1.14.1-cp313-cp313-win_amd64.whl", hash = "sha256:8b21525cb51671219f5307be85f7e646a153e5acc656e5cebf64bfa076c50107"}, - {file = "mypy-1.14.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:7084fb8f1128c76cd9cf68fe5971b37072598e7c31b2f9f95586b65c741a9d31"}, - {file = "mypy-1.14.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:8f845a00b4f420f693f870eaee5f3e2692fa84cc8514496114649cfa8fd5e2c6"}, - {file = "mypy-1.14.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:44bf464499f0e3a2d14d58b54674dee25c031703b2ffc35064bd0df2e0fac319"}, - {file = "mypy-1.14.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c99f27732c0b7dc847adb21c9d47ce57eb48fa33a17bc6d7d5c5e9f9e7ae5bac"}, - {file = "mypy-1.14.1-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:bce23c7377b43602baa0bd22ea3265c49b9ff0b76eb315d6c34721af4cdf1d9b"}, - {file = "mypy-1.14.1-cp38-cp38-win_amd64.whl", hash = "sha256:8edc07eeade7ebc771ff9cf6b211b9a7d93687ff892150cb5692e4f4272b0837"}, - {file = "mypy-1.14.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:3888a1816d69f7ab92092f785a462944b3ca16d7c470d564165fe703b0970c35"}, - {file = "mypy-1.14.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:46c756a444117c43ee984bd055db99e498bc613a70bbbc120272bd13ca579fbc"}, - {file = "mypy-1.14.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:27fc248022907e72abfd8e22ab1f10e903915ff69961174784a3900a8cba9ad9"}, - {file = "mypy-1.14.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:499d6a72fb7e5de92218db961f1a66d5f11783f9ae549d214617edab5d4dbdbb"}, - {file = "mypy-1.14.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:57961db9795eb566dc1d1b4e9139ebc4c6b0cb6e7254ecde69d1552bf7613f60"}, - {file = "mypy-1.14.1-cp39-cp39-win_amd64.whl", hash = "sha256:07ba89fdcc9451f2ebb02853deb6aaaa3d2239a236669a63ab3801bbf923ef5c"}, - {file = "mypy-1.14.1-py3-none-any.whl", hash = "sha256:b66a60cc4073aeb8ae00057f9c1f64d49e90f918fbcef9a977eb121da8b8f1d1"}, - {file = "mypy-1.14.1.tar.gz", hash = "sha256:7ec88144fe9b510e8475ec2f5f251992690fcf89ccb4500b214b4226abcd32d6"}, + {file = "mypy-1.13.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:6607e0f1dd1fb7f0aca14d936d13fd19eba5e17e1cd2a14f808fa5f8f6d8f60a"}, + {file = "mypy-1.13.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:8a21be69bd26fa81b1f80a61ee7ab05b076c674d9b18fb56239d72e21d9f4c80"}, + {file = "mypy-1.13.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:7b2353a44d2179846a096e25691d54d59904559f4232519d420d64da6828a3a7"}, + {file = "mypy-1.13.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:0730d1c6a2739d4511dc4253f8274cdd140c55c32dfb0a4cf8b7a43f40abfa6f"}, + {file = "mypy-1.13.0-cp310-cp310-win_amd64.whl", hash = "sha256:c5fc54dbb712ff5e5a0fca797e6e0aa25726c7e72c6a5850cfd2adbc1eb0a372"}, + {file = "mypy-1.13.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:581665e6f3a8a9078f28d5502f4c334c0c8d802ef55ea0e7276a6e409bc0d82d"}, + {file = "mypy-1.13.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:3ddb5b9bf82e05cc9a627e84707b528e5c7caaa1c55c69e175abb15a761cec2d"}, + {file = "mypy-1.13.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:20c7ee0bc0d5a9595c46f38beb04201f2620065a93755704e141fcac9f59db2b"}, + {file = "mypy-1.13.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:3790ded76f0b34bc9c8ba4def8f919dd6a46db0f5a6610fb994fe8efdd447f73"}, + {file = "mypy-1.13.0-cp311-cp311-win_amd64.whl", hash = "sha256:51f869f4b6b538229c1d1bcc1dd7d119817206e2bc54e8e374b3dfa202defcca"}, + {file = "mypy-1.13.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:5c7051a3461ae84dfb5dd15eff5094640c61c5f22257c8b766794e6dd85e72d5"}, + {file = "mypy-1.13.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:39bb21c69a5d6342f4ce526e4584bc5c197fd20a60d14a8624d8743fffb9472e"}, + {file = "mypy-1.13.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:164f28cb9d6367439031f4c81e84d3ccaa1e19232d9d05d37cb0bd880d3f93c2"}, + {file = "mypy-1.13.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:a4c1bfcdbce96ff5d96fc9b08e3831acb30dc44ab02671eca5953eadad07d6d0"}, + {file = "mypy-1.13.0-cp312-cp312-win_amd64.whl", hash = "sha256:a0affb3a79a256b4183ba09811e3577c5163ed06685e4d4b46429a271ba174d2"}, + {file = "mypy-1.13.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:a7b44178c9760ce1a43f544e595d35ed61ac2c3de306599fa59b38a6048e1aa7"}, + {file = "mypy-1.13.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:5d5092efb8516d08440e36626f0153b5006d4088c1d663d88bf79625af3d1d62"}, + {file = "mypy-1.13.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:de2904956dac40ced10931ac967ae63c5089bd498542194b436eb097a9f77bc8"}, + {file = "mypy-1.13.0-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:7bfd8836970d33c2105562650656b6846149374dc8ed77d98424b40b09340ba7"}, + {file = "mypy-1.13.0-cp313-cp313-win_amd64.whl", hash = "sha256:9f73dba9ec77acb86457a8fc04b5239822df0c14a082564737833d2963677dbc"}, + {file = "mypy-1.13.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:100fac22ce82925f676a734af0db922ecfea991e1d7ec0ceb1e115ebe501301a"}, + {file = "mypy-1.13.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:7bcb0bb7f42a978bb323a7c88f1081d1b5dee77ca86f4100735a6f541299d8fb"}, + {file = "mypy-1.13.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:bde31fc887c213e223bbfc34328070996061b0833b0a4cfec53745ed61f3519b"}, + {file = "mypy-1.13.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:07de989f89786f62b937851295ed62e51774722e5444a27cecca993fc3f9cd74"}, + {file = "mypy-1.13.0-cp38-cp38-win_amd64.whl", hash = "sha256:4bde84334fbe19bad704b3f5b78c4abd35ff1026f8ba72b29de70dda0916beb6"}, + {file = "mypy-1.13.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:0246bcb1b5de7f08f2826451abd947bf656945209b140d16ed317f65a17dc7dc"}, + {file = "mypy-1.13.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:7f5b7deae912cf8b77e990b9280f170381fdfbddf61b4ef80927edd813163732"}, + {file = "mypy-1.13.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:7029881ec6ffb8bc233a4fa364736789582c738217b133f1b55967115288a2bc"}, + {file = "mypy-1.13.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:3e38b980e5681f28f033f3be86b099a247b13c491f14bb8b1e1e134d23bb599d"}, + {file = "mypy-1.13.0-cp39-cp39-win_amd64.whl", hash = "sha256:a6789be98a2017c912ae6ccb77ea553bbaf13d27605d2ca20a76dfbced631b24"}, + {file = "mypy-1.13.0-py3-none-any.whl", hash = "sha256:9c250883f9fd81d212e0952c92dbfcc96fc237f4b7c92f56ac81fd48460b3e5a"}, + {file = "mypy-1.13.0.tar.gz", hash = "sha256:0291a61b6fbf3e6673e3405cfcc0e7650bebc7939659fdca2702958038bd835e"}, ] [package.dependencies] -mypy_extensions = ">=1.0.0" +mypy-extensions = ">=1.0.0" tomli = {version = ">=1.1.0", markers = "python_version < \"3.11\""} -typing_extensions = ">=4.6.0" +typing-extensions = ">=4.6.0" [package.extras] dmypy = ["psutil (>=4.0)"] @@ -455,7 +465,7 @@ version = "1.0.0" description = "Type system extensions for programs checked with the mypy type checker." optional = false python-versions = ">=3.5" -groups = ["dev"] +groups = ["main", "dev"] files = [ {file = "mypy_extensions-1.0.0-py3-none-any.whl", hash = "sha256:4392f6c0eb8a5668a69e23d168ffa70f0be9ccfd32b5cc2d26a34ae5b844552d"}, {file = "mypy_extensions-1.0.0.tar.gz", hash = "sha256:75dbf8955dc00442a438fc4d0666508a9a97b6bd41aa2f0ffe9d2f2725af0782"}, @@ -537,19 +547,19 @@ pyasn1 = ">=0.4.6,<0.7.0" [[package]] name = "pydantic" -version = "2.10.6" +version = "2.10.2" description = "Data validation using Python type hints" optional = false python-versions = ">=3.8" groups = ["main"] files = [ - {file = "pydantic-2.10.6-py3-none-any.whl", hash = "sha256:427d664bf0b8a2b34ff5dd0f5a18df00591adcee7198fbd71981054cef37b584"}, - {file = "pydantic-2.10.6.tar.gz", hash = "sha256:ca5daa827cce33de7a42be142548b0096bf05a7e7b365aebfa5f8eeec7128236"}, + {file = "pydantic-2.10.2-py3-none-any.whl", hash = "sha256:cfb96e45951117c3024e6b67b25cdc33a3cb7b2fa62e239f7af1378358a1d99e"}, + {file = "pydantic-2.10.2.tar.gz", hash = "sha256:2bc2d7f17232e0841cbba4641e65ba1eb6fafb3a08de3a091ff3ce14a197c4fa"}, ] [package.dependencies] annotated-types = ">=0.6.0" -pydantic-core = "2.27.2" +pydantic-core = "2.27.1" typing-extensions = ">=4.12.2" [package.extras] @@ -558,112 +568,112 @@ timezone = ["tzdata ; python_version >= \"3.9\" and platform_system == \"Windows [[package]] name = "pydantic-core" -version = "2.27.2" +version = "2.27.1" description = "Core functionality for Pydantic validation and serialization" optional = false python-versions = ">=3.8" groups = ["main"] files = [ - {file = "pydantic_core-2.27.2-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:2d367ca20b2f14095a8f4fa1210f5a7b78b8a20009ecced6b12818f455b1e9fa"}, - {file = "pydantic_core-2.27.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:491a2b73db93fab69731eaee494f320faa4e093dbed776be1a829c2eb222c34c"}, - {file = "pydantic_core-2.27.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7969e133a6f183be60e9f6f56bfae753585680f3b7307a8e555a948d443cc05a"}, - {file = "pydantic_core-2.27.2-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:3de9961f2a346257caf0aa508a4da705467f53778e9ef6fe744c038119737ef5"}, - {file = "pydantic_core-2.27.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e2bb4d3e5873c37bb3dd58714d4cd0b0e6238cebc4177ac8fe878f8b3aa8e74c"}, - {file = "pydantic_core-2.27.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:280d219beebb0752699480fe8f1dc61ab6615c2046d76b7ab7ee38858de0a4e7"}, - {file = "pydantic_core-2.27.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:47956ae78b6422cbd46f772f1746799cbb862de838fd8d1fbd34a82e05b0983a"}, - {file = "pydantic_core-2.27.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:14d4a5c49d2f009d62a2a7140d3064f686d17a5d1a268bc641954ba181880236"}, - {file = "pydantic_core-2.27.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:337b443af21d488716f8d0b6164de833e788aa6bd7e3a39c005febc1284f4962"}, - {file = "pydantic_core-2.27.2-cp310-cp310-musllinux_1_1_armv7l.whl", hash = "sha256:03d0f86ea3184a12f41a2d23f7ccb79cdb5a18e06993f8a45baa8dfec746f0e9"}, - {file = "pydantic_core-2.27.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:7041c36f5680c6e0f08d922aed302e98b3745d97fe1589db0a3eebf6624523af"}, - {file = "pydantic_core-2.27.2-cp310-cp310-win32.whl", hash = "sha256:50a68f3e3819077be2c98110c1f9dcb3817e93f267ba80a2c05bb4f8799e2ff4"}, - {file = "pydantic_core-2.27.2-cp310-cp310-win_amd64.whl", hash = "sha256:e0fd26b16394ead34a424eecf8a31a1f5137094cabe84a1bcb10fa6ba39d3d31"}, - {file = "pydantic_core-2.27.2-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:8e10c99ef58cfdf2a66fc15d66b16c4a04f62bca39db589ae8cba08bc55331bc"}, - {file = "pydantic_core-2.27.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:26f32e0adf166a84d0cb63be85c562ca8a6fa8de28e5f0d92250c6b7e9e2aff7"}, - {file = "pydantic_core-2.27.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8c19d1ea0673cd13cc2f872f6c9ab42acc4e4f492a7ca9d3795ce2b112dd7e15"}, - {file = "pydantic_core-2.27.2-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:5e68c4446fe0810e959cdff46ab0a41ce2f2c86d227d96dc3847af0ba7def306"}, - {file = "pydantic_core-2.27.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d9640b0059ff4f14d1f37321b94061c6db164fbe49b334b31643e0528d100d99"}, - {file = "pydantic_core-2.27.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:40d02e7d45c9f8af700f3452f329ead92da4c5f4317ca9b896de7ce7199ea459"}, - {file = "pydantic_core-2.27.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1c1fd185014191700554795c99b347d64f2bb637966c4cfc16998a0ca700d048"}, - {file = "pydantic_core-2.27.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d81d2068e1c1228a565af076598f9e7451712700b673de8f502f0334f281387d"}, - {file = "pydantic_core-2.27.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:1a4207639fb02ec2dbb76227d7c751a20b1a6b4bc52850568e52260cae64ca3b"}, - {file = "pydantic_core-2.27.2-cp311-cp311-musllinux_1_1_armv7l.whl", hash = "sha256:3de3ce3c9ddc8bbd88f6e0e304dea0e66d843ec9de1b0042b0911c1663ffd474"}, - {file = "pydantic_core-2.27.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:30c5f68ded0c36466acede341551106821043e9afaad516adfb6e8fa80a4e6a6"}, - {file = "pydantic_core-2.27.2-cp311-cp311-win32.whl", hash = "sha256:c70c26d2c99f78b125a3459f8afe1aed4d9687c24fd677c6a4436bc042e50d6c"}, - {file = "pydantic_core-2.27.2-cp311-cp311-win_amd64.whl", hash = "sha256:08e125dbdc505fa69ca7d9c499639ab6407cfa909214d500897d02afb816e7cc"}, - {file = "pydantic_core-2.27.2-cp311-cp311-win_arm64.whl", hash = "sha256:26f0d68d4b235a2bae0c3fc585c585b4ecc51382db0e3ba402a22cbc440915e4"}, - {file = "pydantic_core-2.27.2-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:9e0c8cfefa0ef83b4da9588448b6d8d2a2bf1a53c3f1ae5fca39eb3061e2f0b0"}, - {file = "pydantic_core-2.27.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:83097677b8e3bd7eaa6775720ec8e0405f1575015a463285a92bfdfe254529ef"}, - {file = "pydantic_core-2.27.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:172fce187655fece0c90d90a678424b013f8fbb0ca8b036ac266749c09438cb7"}, - {file = "pydantic_core-2.27.2-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:519f29f5213271eeeeb3093f662ba2fd512b91c5f188f3bb7b27bc5973816934"}, - {file = "pydantic_core-2.27.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:05e3a55d124407fffba0dd6b0c0cd056d10e983ceb4e5dbd10dda135c31071d6"}, - {file = "pydantic_core-2.27.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9c3ed807c7b91de05e63930188f19e921d1fe90de6b4f5cd43ee7fcc3525cb8c"}, - {file = "pydantic_core-2.27.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6fb4aadc0b9a0c063206846d603b92030eb6f03069151a625667f982887153e2"}, - {file = "pydantic_core-2.27.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:28ccb213807e037460326424ceb8b5245acb88f32f3d2777427476e1b32c48c4"}, - {file = "pydantic_core-2.27.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:de3cd1899e2c279b140adde9357c4495ed9d47131b4a4eaff9052f23398076b3"}, - {file = "pydantic_core-2.27.2-cp312-cp312-musllinux_1_1_armv7l.whl", hash = "sha256:220f892729375e2d736b97d0e51466252ad84c51857d4d15f5e9692f9ef12be4"}, - {file = "pydantic_core-2.27.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:a0fcd29cd6b4e74fe8ddd2c90330fd8edf2e30cb52acda47f06dd615ae72da57"}, - {file = "pydantic_core-2.27.2-cp312-cp312-win32.whl", hash = "sha256:1e2cb691ed9834cd6a8be61228471d0a503731abfb42f82458ff27be7b2186fc"}, - {file = "pydantic_core-2.27.2-cp312-cp312-win_amd64.whl", hash = "sha256:cc3f1a99a4f4f9dd1de4fe0312c114e740b5ddead65bb4102884b384c15d8bc9"}, - {file = "pydantic_core-2.27.2-cp312-cp312-win_arm64.whl", hash = "sha256:3911ac9284cd8a1792d3cb26a2da18f3ca26c6908cc434a18f730dc0db7bfa3b"}, - {file = "pydantic_core-2.27.2-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:7d14bd329640e63852364c306f4d23eb744e0f8193148d4044dd3dacdaacbd8b"}, - {file = "pydantic_core-2.27.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:82f91663004eb8ed30ff478d77c4d1179b3563df6cdb15c0817cd1cdaf34d154"}, - {file = "pydantic_core-2.27.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:71b24c7d61131bb83df10cc7e687433609963a944ccf45190cfc21e0887b08c9"}, - {file = "pydantic_core-2.27.2-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:fa8e459d4954f608fa26116118bb67f56b93b209c39b008277ace29937453dc9"}, - {file = "pydantic_core-2.27.2-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ce8918cbebc8da707ba805b7fd0b382816858728ae7fe19a942080c24e5b7cd1"}, - {file = "pydantic_core-2.27.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:eda3f5c2a021bbc5d976107bb302e0131351c2ba54343f8a496dc8783d3d3a6a"}, - {file = "pydantic_core-2.27.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bd8086fa684c4775c27f03f062cbb9eaa6e17f064307e86b21b9e0abc9c0f02e"}, - {file = "pydantic_core-2.27.2-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:8d9b3388db186ba0c099a6d20f0604a44eabdeef1777ddd94786cdae158729e4"}, - {file = "pydantic_core-2.27.2-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:7a66efda2387de898c8f38c0cf7f14fca0b51a8ef0b24bfea5849f1b3c95af27"}, - {file = "pydantic_core-2.27.2-cp313-cp313-musllinux_1_1_armv7l.whl", hash = "sha256:18a101c168e4e092ab40dbc2503bdc0f62010e95d292b27827871dc85450d7ee"}, - {file = "pydantic_core-2.27.2-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:ba5dd002f88b78a4215ed2f8ddbdf85e8513382820ba15ad5ad8955ce0ca19a1"}, - {file = "pydantic_core-2.27.2-cp313-cp313-win32.whl", hash = "sha256:1ebaf1d0481914d004a573394f4be3a7616334be70261007e47c2a6fe7e50130"}, - {file = "pydantic_core-2.27.2-cp313-cp313-win_amd64.whl", hash = "sha256:953101387ecf2f5652883208769a79e48db18c6df442568a0b5ccd8c2723abee"}, - {file = "pydantic_core-2.27.2-cp313-cp313-win_arm64.whl", hash = "sha256:ac4dbfd1691affb8f48c2c13241a2e3b60ff23247cbcf981759c768b6633cf8b"}, - {file = "pydantic_core-2.27.2-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:d3e8d504bdd3f10835468f29008d72fc8359d95c9c415ce6e767203db6127506"}, - {file = "pydantic_core-2.27.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:521eb9b7f036c9b6187f0b47318ab0d7ca14bd87f776240b90b21c1f4f149320"}, - {file = "pydantic_core-2.27.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:85210c4d99a0114f5a9481b44560d7d1e35e32cc5634c656bc48e590b669b145"}, - {file = "pydantic_core-2.27.2-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:d716e2e30c6f140d7560ef1538953a5cd1a87264c737643d481f2779fc247fe1"}, - {file = "pydantic_core-2.27.2-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f66d89ba397d92f840f8654756196d93804278457b5fbede59598a1f9f90b228"}, - {file = "pydantic_core-2.27.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:669e193c1c576a58f132e3158f9dfa9662969edb1a250c54d8fa52590045f046"}, - {file = "pydantic_core-2.27.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9fdbe7629b996647b99c01b37f11170a57ae675375b14b8c13b8518b8320ced5"}, - {file = "pydantic_core-2.27.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d262606bf386a5ba0b0af3b97f37c83d7011439e3dc1a9298f21efb292e42f1a"}, - {file = "pydantic_core-2.27.2-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:cabb9bcb7e0d97f74df8646f34fc76fbf793b7f6dc2438517d7a9e50eee4f14d"}, - {file = "pydantic_core-2.27.2-cp38-cp38-musllinux_1_1_armv7l.whl", hash = "sha256:d2d63f1215638d28221f664596b1ccb3944f6e25dd18cd3b86b0a4c408d5ebb9"}, - {file = "pydantic_core-2.27.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:bca101c00bff0adb45a833f8451b9105d9df18accb8743b08107d7ada14bd7da"}, - {file = "pydantic_core-2.27.2-cp38-cp38-win32.whl", hash = "sha256:f6f8e111843bbb0dee4cb6594cdc73e79b3329b526037ec242a3e49012495b3b"}, - {file = "pydantic_core-2.27.2-cp38-cp38-win_amd64.whl", hash = "sha256:fd1aea04935a508f62e0d0ef1f5ae968774a32afc306fb8545e06f5ff5cdf3ad"}, - {file = "pydantic_core-2.27.2-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:c10eb4f1659290b523af58fa7cffb452a61ad6ae5613404519aee4bfbf1df993"}, - {file = "pydantic_core-2.27.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:ef592d4bad47296fb11f96cd7dc898b92e795032b4894dfb4076cfccd43a9308"}, - {file = "pydantic_core-2.27.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c61709a844acc6bf0b7dce7daae75195a10aac96a596ea1b776996414791ede4"}, - {file = "pydantic_core-2.27.2-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:42c5f762659e47fdb7b16956c71598292f60a03aa92f8b6351504359dbdba6cf"}, - {file = "pydantic_core-2.27.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4c9775e339e42e79ec99c441d9730fccf07414af63eac2f0e48e08fd38a64d76"}, - {file = "pydantic_core-2.27.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:57762139821c31847cfb2df63c12f725788bd9f04bc2fb392790959b8f70f118"}, - {file = "pydantic_core-2.27.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0d1e85068e818c73e048fe28cfc769040bb1f475524f4745a5dc621f75ac7630"}, - {file = "pydantic_core-2.27.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:097830ed52fd9e427942ff3b9bc17fab52913b2f50f2880dc4a5611446606a54"}, - {file = "pydantic_core-2.27.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:044a50963a614ecfae59bb1eaf7ea7efc4bc62f49ed594e18fa1e5d953c40e9f"}, - {file = "pydantic_core-2.27.2-cp39-cp39-musllinux_1_1_armv7l.whl", hash = "sha256:4e0b4220ba5b40d727c7f879eac379b822eee5d8fff418e9d3381ee45b3b0362"}, - {file = "pydantic_core-2.27.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:5e4f4bb20d75e9325cc9696c6802657b58bc1dbbe3022f32cc2b2b632c3fbb96"}, - {file = "pydantic_core-2.27.2-cp39-cp39-win32.whl", hash = "sha256:cca63613e90d001b9f2f9a9ceb276c308bfa2a43fafb75c8031c4f66039e8c6e"}, - {file = "pydantic_core-2.27.2-cp39-cp39-win_amd64.whl", hash = "sha256:77d1bca19b0f7021b3a982e6f903dcd5b2b06076def36a652e3907f596e29f67"}, - {file = "pydantic_core-2.27.2-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:2bf14caea37e91198329b828eae1618c068dfb8ef17bb33287a7ad4b61ac314e"}, - {file = "pydantic_core-2.27.2-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:b0cb791f5b45307caae8810c2023a184c74605ec3bcbb67d13846c28ff731ff8"}, - {file = "pydantic_core-2.27.2-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:688d3fd9fcb71f41c4c015c023d12a79d1c4c0732ec9eb35d96e3388a120dcf3"}, - {file = "pydantic_core-2.27.2-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3d591580c34f4d731592f0e9fe40f9cc1b430d297eecc70b962e93c5c668f15f"}, - {file = "pydantic_core-2.27.2-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:82f986faf4e644ffc189a7f1aafc86e46ef70372bb153e7001e8afccc6e54133"}, - {file = "pydantic_core-2.27.2-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:bec317a27290e2537f922639cafd54990551725fc844249e64c523301d0822fc"}, - {file = "pydantic_core-2.27.2-pp310-pypy310_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:0296abcb83a797db256b773f45773da397da75a08f5fcaef41f2044adec05f50"}, - {file = "pydantic_core-2.27.2-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:0d75070718e369e452075a6017fbf187f788e17ed67a3abd47fa934d001863d9"}, - {file = "pydantic_core-2.27.2-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:7e17b560be3c98a8e3aa66ce828bdebb9e9ac6ad5466fba92eb74c4c95cb1151"}, - {file = "pydantic_core-2.27.2-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:c33939a82924da9ed65dab5a65d427205a73181d8098e79b6b426bdf8ad4e656"}, - {file = "pydantic_core-2.27.2-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:00bad2484fa6bda1e216e7345a798bd37c68fb2d97558edd584942aa41b7d278"}, - {file = "pydantic_core-2.27.2-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c817e2b40aba42bac6f457498dacabc568c3b7a986fc9ba7c8d9d260b71485fb"}, - {file = "pydantic_core-2.27.2-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:251136cdad0cb722e93732cb45ca5299fb56e1344a833640bf93b2803f8d1bfd"}, - {file = "pydantic_core-2.27.2-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d2088237af596f0a524d3afc39ab3b036e8adb054ee57cbb1dcf8e09da5b29cc"}, - {file = "pydantic_core-2.27.2-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:d4041c0b966a84b4ae7a09832eb691a35aec90910cd2dbe7a208de59be77965b"}, - {file = "pydantic_core-2.27.2-pp39-pypy39_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:8083d4e875ebe0b864ffef72a4304827015cff328a1be6e22cc850753bfb122b"}, - {file = "pydantic_core-2.27.2-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:f141ee28a0ad2123b6611b6ceff018039df17f32ada8b534e6aa039545a3efb2"}, - {file = "pydantic_core-2.27.2-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:7d0c8399fcc1848491f00e0314bd59fb34a9c008761bcb422a057670c3f65e35"}, - {file = "pydantic_core-2.27.2.tar.gz", hash = "sha256:eb026e5a4c1fee05726072337ff51d1efb6f59090b7da90d30ea58625b1ffb39"}, + {file = "pydantic_core-2.27.1-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:71a5e35c75c021aaf400ac048dacc855f000bdfed91614b4a726f7432f1f3d6a"}, + {file = "pydantic_core-2.27.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:f82d068a2d6ecfc6e054726080af69a6764a10015467d7d7b9f66d6ed5afa23b"}, + {file = "pydantic_core-2.27.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:121ceb0e822f79163dd4699e4c54f5ad38b157084d97b34de8b232bcaad70278"}, + {file = "pydantic_core-2.27.1-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:4603137322c18eaf2e06a4495f426aa8d8388940f3c457e7548145011bb68e05"}, + {file = "pydantic_core-2.27.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a33cd6ad9017bbeaa9ed78a2e0752c5e250eafb9534f308e7a5f7849b0b1bfb4"}, + {file = "pydantic_core-2.27.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:15cc53a3179ba0fcefe1e3ae50beb2784dede4003ad2dfd24f81bba4b23a454f"}, + {file = "pydantic_core-2.27.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:45d9c5eb9273aa50999ad6adc6be5e0ecea7e09dbd0d31bd0c65a55a2592ca08"}, + {file = "pydantic_core-2.27.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:8bf7b66ce12a2ac52d16f776b31d16d91033150266eb796967a7e4621707e4f6"}, + {file = "pydantic_core-2.27.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:655d7dd86f26cb15ce8a431036f66ce0318648f8853d709b4167786ec2fa4807"}, + {file = "pydantic_core-2.27.1-cp310-cp310-musllinux_1_1_armv7l.whl", hash = "sha256:5556470f1a2157031e676f776c2bc20acd34c1990ca5f7e56f1ebf938b9ab57c"}, + {file = "pydantic_core-2.27.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:f69ed81ab24d5a3bd93861c8c4436f54afdf8e8cc421562b0c7504cf3be58206"}, + {file = "pydantic_core-2.27.1-cp310-none-win32.whl", hash = "sha256:f5a823165e6d04ccea61a9f0576f345f8ce40ed533013580e087bd4d7442b52c"}, + {file = "pydantic_core-2.27.1-cp310-none-win_amd64.whl", hash = "sha256:57866a76e0b3823e0b56692d1a0bf722bffb324839bb5b7226a7dbd6c9a40b17"}, + {file = "pydantic_core-2.27.1-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:ac3b20653bdbe160febbea8aa6c079d3df19310d50ac314911ed8cc4eb7f8cb8"}, + {file = "pydantic_core-2.27.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:a5a8e19d7c707c4cadb8c18f5f60c843052ae83c20fa7d44f41594c644a1d330"}, + {file = "pydantic_core-2.27.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7f7059ca8d64fea7f238994c97d91f75965216bcbe5f695bb44f354893f11d52"}, + {file = "pydantic_core-2.27.1-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:bed0f8a0eeea9fb72937ba118f9db0cb7e90773462af7962d382445f3005e5a4"}, + {file = "pydantic_core-2.27.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a3cb37038123447cf0f3ea4c74751f6a9d7afef0eb71aa07bf5f652b5e6a132c"}, + {file = "pydantic_core-2.27.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:84286494f6c5d05243456e04223d5a9417d7f443c3b76065e75001beb26f88de"}, + {file = "pydantic_core-2.27.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:acc07b2cfc5b835444b44a9956846b578d27beeacd4b52e45489e93276241025"}, + {file = "pydantic_core-2.27.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:4fefee876e07a6e9aad7a8c8c9f85b0cdbe7df52b8a9552307b09050f7512c7e"}, + {file = "pydantic_core-2.27.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:258c57abf1188926c774a4c94dd29237e77eda19462e5bb901d88adcab6af919"}, + {file = "pydantic_core-2.27.1-cp311-cp311-musllinux_1_1_armv7l.whl", hash = "sha256:35c14ac45fcfdf7167ca76cc80b2001205a8d5d16d80524e13508371fb8cdd9c"}, + {file = "pydantic_core-2.27.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:d1b26e1dff225c31897696cab7d4f0a315d4c0d9e8666dbffdb28216f3b17fdc"}, + {file = "pydantic_core-2.27.1-cp311-none-win32.whl", hash = "sha256:2cdf7d86886bc6982354862204ae3b2f7f96f21a3eb0ba5ca0ac42c7b38598b9"}, + {file = "pydantic_core-2.27.1-cp311-none-win_amd64.whl", hash = "sha256:3af385b0cee8df3746c3f406f38bcbfdc9041b5c2d5ce3e5fc6637256e60bbc5"}, + {file = "pydantic_core-2.27.1-cp311-none-win_arm64.whl", hash = "sha256:81f2ec23ddc1b476ff96563f2e8d723830b06dceae348ce02914a37cb4e74b89"}, + {file = "pydantic_core-2.27.1-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:9cbd94fc661d2bab2bc702cddd2d3370bbdcc4cd0f8f57488a81bcce90c7a54f"}, + {file = "pydantic_core-2.27.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:5f8c4718cd44ec1580e180cb739713ecda2bdee1341084c1467802a417fe0f02"}, + {file = "pydantic_core-2.27.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:15aae984e46de8d376df515f00450d1522077254ef6b7ce189b38ecee7c9677c"}, + {file = "pydantic_core-2.27.1-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:1ba5e3963344ff25fc8c40da90f44b0afca8cfd89d12964feb79ac1411a260ac"}, + {file = "pydantic_core-2.27.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:992cea5f4f3b29d6b4f7f1726ed8ee46c8331c6b4eed6db5b40134c6fe1768bb"}, + {file = "pydantic_core-2.27.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0325336f348dbee6550d129b1627cb8f5351a9dc91aad141ffb96d4937bd9529"}, + {file = "pydantic_core-2.27.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7597c07fbd11515f654d6ece3d0e4e5093edc30a436c63142d9a4b8e22f19c35"}, + {file = "pydantic_core-2.27.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:3bbd5d8cc692616d5ef6fbbbd50dbec142c7e6ad9beb66b78a96e9c16729b089"}, + {file = "pydantic_core-2.27.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:dc61505e73298a84a2f317255fcc72b710b72980f3a1f670447a21efc88f8381"}, + {file = "pydantic_core-2.27.1-cp312-cp312-musllinux_1_1_armv7l.whl", hash = "sha256:e1f735dc43da318cad19b4173dd1ffce1d84aafd6c9b782b3abc04a0d5a6f5bb"}, + {file = "pydantic_core-2.27.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:f4e5658dbffe8843a0f12366a4c2d1c316dbe09bb4dfbdc9d2d9cd6031de8aae"}, + {file = "pydantic_core-2.27.1-cp312-none-win32.whl", hash = "sha256:672ebbe820bb37988c4d136eca2652ee114992d5d41c7e4858cdd90ea94ffe5c"}, + {file = "pydantic_core-2.27.1-cp312-none-win_amd64.whl", hash = "sha256:66ff044fd0bb1768688aecbe28b6190f6e799349221fb0de0e6f4048eca14c16"}, + {file = "pydantic_core-2.27.1-cp312-none-win_arm64.whl", hash = "sha256:9a3b0793b1bbfd4146304e23d90045f2a9b5fd5823aa682665fbdaf2a6c28f3e"}, + {file = "pydantic_core-2.27.1-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:f216dbce0e60e4d03e0c4353c7023b202d95cbaeff12e5fd2e82ea0a66905073"}, + {file = "pydantic_core-2.27.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:a2e02889071850bbfd36b56fd6bc98945e23670773bc7a76657e90e6b6603c08"}, + {file = "pydantic_core-2.27.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:42b0e23f119b2b456d07ca91b307ae167cc3f6c846a7b169fca5326e32fdc6cf"}, + {file = "pydantic_core-2.27.1-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:764be71193f87d460a03f1f7385a82e226639732214b402f9aa61f0d025f0737"}, + {file = "pydantic_core-2.27.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1c00666a3bd2f84920a4e94434f5974d7bbc57e461318d6bb34ce9cdbbc1f6b2"}, + {file = "pydantic_core-2.27.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3ccaa88b24eebc0f849ce0a4d09e8a408ec5a94afff395eb69baf868f5183107"}, + {file = "pydantic_core-2.27.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c65af9088ac534313e1963443d0ec360bb2b9cba6c2909478d22c2e363d98a51"}, + {file = "pydantic_core-2.27.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:206b5cf6f0c513baffaeae7bd817717140770c74528f3e4c3e1cec7871ddd61a"}, + {file = "pydantic_core-2.27.1-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:062f60e512fc7fff8b8a9d680ff0ddaaef0193dba9fa83e679c0c5f5fbd018bc"}, + {file = "pydantic_core-2.27.1-cp313-cp313-musllinux_1_1_armv7l.whl", hash = "sha256:a0697803ed7d4af5e4c1adf1670af078f8fcab7a86350e969f454daf598c4960"}, + {file = "pydantic_core-2.27.1-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:58ca98a950171f3151c603aeea9303ef6c235f692fe555e883591103da709b23"}, + {file = "pydantic_core-2.27.1-cp313-none-win32.whl", hash = "sha256:8065914ff79f7eab1599bd80406681f0ad08f8e47c880f17b416c9f8f7a26d05"}, + {file = "pydantic_core-2.27.1-cp313-none-win_amd64.whl", hash = "sha256:ba630d5e3db74c79300d9a5bdaaf6200172b107f263c98a0539eeecb857b2337"}, + {file = "pydantic_core-2.27.1-cp313-none-win_arm64.whl", hash = "sha256:45cf8588c066860b623cd11c4ba687f8d7175d5f7ef65f7129df8a394c502de5"}, + {file = "pydantic_core-2.27.1-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:5897bec80a09b4084aee23f9b73a9477a46c3304ad1d2d07acca19723fb1de62"}, + {file = "pydantic_core-2.27.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:d0165ab2914379bd56908c02294ed8405c252250668ebcb438a55494c69f44ab"}, + {file = "pydantic_core-2.27.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6b9af86e1d8e4cfc82c2022bfaa6f459381a50b94a29e95dcdda8442d6d83864"}, + {file = "pydantic_core-2.27.1-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:5f6c8a66741c5f5447e047ab0ba7a1c61d1e95580d64bce852e3df1f895c4067"}, + {file = "pydantic_core-2.27.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9a42d6a8156ff78981f8aa56eb6394114e0dedb217cf8b729f438f643608cbcd"}, + {file = "pydantic_core-2.27.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:64c65f40b4cd8b0e049a8edde07e38b476da7e3aaebe63287c899d2cff253fa5"}, + {file = "pydantic_core-2.27.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9fdcf339322a3fae5cbd504edcefddd5a50d9ee00d968696846f089b4432cf78"}, + {file = "pydantic_core-2.27.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:bf99c8404f008750c846cb4ac4667b798a9f7de673ff719d705d9b2d6de49c5f"}, + {file = "pydantic_core-2.27.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:8f1edcea27918d748c7e5e4d917297b2a0ab80cad10f86631e488b7cddf76a36"}, + {file = "pydantic_core-2.27.1-cp38-cp38-musllinux_1_1_armv7l.whl", hash = "sha256:159cac0a3d096f79ab6a44d77a961917219707e2a130739c64d4dd46281f5c2a"}, + {file = "pydantic_core-2.27.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:029d9757eb621cc6e1848fa0b0310310de7301057f623985698ed7ebb014391b"}, + {file = "pydantic_core-2.27.1-cp38-none-win32.whl", hash = "sha256:a28af0695a45f7060e6f9b7092558a928a28553366519f64083c63a44f70e618"}, + {file = "pydantic_core-2.27.1-cp38-none-win_amd64.whl", hash = "sha256:2d4567c850905d5eaaed2f7a404e61012a51caf288292e016360aa2b96ff38d4"}, + {file = "pydantic_core-2.27.1-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:e9386266798d64eeb19dd3677051f5705bf873e98e15897ddb7d76f477131967"}, + {file = "pydantic_core-2.27.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:4228b5b646caa73f119b1ae756216b59cc6e2267201c27d3912b592c5e323b60"}, + {file = "pydantic_core-2.27.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0b3dfe500de26c52abe0477dde16192ac39c98f05bf2d80e76102d394bd13854"}, + {file = "pydantic_core-2.27.1-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:aee66be87825cdf72ac64cb03ad4c15ffef4143dbf5c113f64a5ff4f81477bf9"}, + {file = "pydantic_core-2.27.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3b748c44bb9f53031c8cbc99a8a061bc181c1000c60a30f55393b6e9c45cc5bd"}, + {file = "pydantic_core-2.27.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5ca038c7f6a0afd0b2448941b6ef9d5e1949e999f9e5517692eb6da58e9d44be"}, + {file = "pydantic_core-2.27.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6e0bd57539da59a3e4671b90a502da9a28c72322a4f17866ba3ac63a82c4498e"}, + {file = "pydantic_core-2.27.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:ac6c2c45c847bbf8f91930d88716a0fb924b51e0c6dad329b793d670ec5db792"}, + {file = "pydantic_core-2.27.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:b94d4ba43739bbe8b0ce4262bcc3b7b9f31459ad120fb595627eaeb7f9b9ca01"}, + {file = "pydantic_core-2.27.1-cp39-cp39-musllinux_1_1_armv7l.whl", hash = "sha256:00e6424f4b26fe82d44577b4c842d7df97c20be6439e8e685d0d715feceb9fb9"}, + {file = "pydantic_core-2.27.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:38de0a70160dd97540335b7ad3a74571b24f1dc3ed33f815f0880682e6880131"}, + {file = "pydantic_core-2.27.1-cp39-none-win32.whl", hash = "sha256:7ccebf51efc61634f6c2344da73e366c75e735960b5654b63d7e6f69a5885fa3"}, + {file = "pydantic_core-2.27.1-cp39-none-win_amd64.whl", hash = "sha256:a57847b090d7892f123726202b7daa20df6694cbd583b67a592e856bff603d6c"}, + {file = "pydantic_core-2.27.1-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:3fa80ac2bd5856580e242dbc202db873c60a01b20309c8319b5c5986fbe53ce6"}, + {file = "pydantic_core-2.27.1-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:d950caa237bb1954f1b8c9227b5065ba6875ac9771bb8ec790d956a699b78676"}, + {file = "pydantic_core-2.27.1-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0e4216e64d203e39c62df627aa882f02a2438d18a5f21d7f721621f7a5d3611d"}, + {file = "pydantic_core-2.27.1-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:02a3d637bd387c41d46b002f0e49c52642281edacd2740e5a42f7017feea3f2c"}, + {file = "pydantic_core-2.27.1-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:161c27ccce13b6b0c8689418da3885d3220ed2eae2ea5e9b2f7f3d48f1d52c27"}, + {file = "pydantic_core-2.27.1-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:19910754e4cc9c63bc1c7f6d73aa1cfee82f42007e407c0f413695c2f7ed777f"}, + {file = "pydantic_core-2.27.1-pp310-pypy310_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:e173486019cc283dc9778315fa29a363579372fe67045e971e89b6365cc035ed"}, + {file = "pydantic_core-2.27.1-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:af52d26579b308921b73b956153066481f064875140ccd1dfd4e77db89dbb12f"}, + {file = "pydantic_core-2.27.1-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:981fb88516bd1ae8b0cbbd2034678a39dedc98752f264ac9bc5839d3923fa04c"}, + {file = "pydantic_core-2.27.1-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:5fde892e6c697ce3e30c61b239330fc5d569a71fefd4eb6512fc6caec9dd9e2f"}, + {file = "pydantic_core-2.27.1-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:816f5aa087094099fff7edabb5e01cc370eb21aa1a1d44fe2d2aefdfb5599b31"}, + {file = "pydantic_core-2.27.1-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9c10c309e18e443ddb108f0ef64e8729363adbfd92d6d57beec680f6261556f3"}, + {file = "pydantic_core-2.27.1-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:98476c98b02c8e9b2eec76ac4156fd006628b1b2d0ef27e548ffa978393fd154"}, + {file = "pydantic_core-2.27.1-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:c3027001c28434e7ca5a6e1e527487051136aa81803ac812be51802150d880dd"}, + {file = "pydantic_core-2.27.1-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:7699b1df36a48169cdebda7ab5a2bac265204003f153b4bd17276153d997670a"}, + {file = "pydantic_core-2.27.1-pp39-pypy39_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:1c39b07d90be6b48968ddc8c19e7585052088fd7ec8d568bb31ff64c70ae3c97"}, + {file = "pydantic_core-2.27.1-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:46ccfe3032b3915586e469d4972973f893c0a2bb65669194a5bdea9bacc088c2"}, + {file = "pydantic_core-2.27.1-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:62ba45e21cf6571d7f716d903b5b7b6d2617e2d5d67c0923dc47b9d41369f840"}, + {file = "pydantic_core-2.27.1.tar.gz", hash = "sha256:62a763352879b84aa31058fc931884055fd75089cccbd9d58bb6afd01141b235"}, ] [package.dependencies] @@ -899,19 +909,20 @@ files = [ ] [[package]] -name = "typing-inspection" -version = "0.4.0" -description = "Runtime typing introspection tools" +name = "typing-inspect" +version = "0.9.0" +description = "Runtime inspection utilities for typing module." optional = false -python-versions = ">=3.9" +python-versions = "*" groups = ["main"] files = [ - {file = "typing_inspection-0.4.0-py3-none-any.whl", hash = "sha256:50e72559fcd2a6367a19f7a7e610e6afcb9fac940c650290eed893d61386832f"}, - {file = "typing_inspection-0.4.0.tar.gz", hash = "sha256:9765c87de36671694a67904bf2c96e395be9c6439bb6c87b5142569dcdd65122"}, + {file = "typing_inspect-0.9.0-py3-none-any.whl", hash = "sha256:9ee6fc59062311ef8547596ab6b955e1b8aa46242d854bfc78f4f6b0eff35f9f"}, + {file = "typing_inspect-0.9.0.tar.gz", hash = "sha256:b23fc42ff6f6ef6954e4852c1fb512cdd18dbea03134f91f856a95ccc9461f78"}, ] [package.dependencies] -typing-extensions = ">=4.12.0" +mypy-extensions = ">=0.3.0" +typing-extensions = ">=3.7.4" [[package]] name = "urllib3" @@ -937,5 +948,5 @@ gcp = ["google-auth", "requests"] [metadata] lock-version = "2.1" -python-versions = ">=3.9" -content-hash = "c3917a9114ca2a0c01aedf207fa1b59cc259bb07c4d2914fe2ed9a4cb3e1785e" +python-versions = ">=3.8" +content-hash = "f0f19d81d36ebe966895f21a0a9dd33118783904418f4103189c475e5903b958" diff --git a/pylintrc b/pylintrc index 266bc815..9d193c42 100644 --- a/pylintrc +++ b/pylintrc @@ -455,10 +455,7 @@ disable=raw-checker-failed, bare-except, broad-exception-caught, fixme, - relative-beyond-top-level, - consider-using-with, - wildcard-import, - unused-wildcard-import + relative-beyond-top-level # Enable the message, report, category or checker with the given id(s). You can # either give multiple identifier separated by comma (,) or put this option diff --git a/pyproject.toml b/pyproject.toml index 8edc7537..8eec1a78 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,19 +1,9 @@ -[project] +[tool.poetry] name = "mistralai" -version = "1.5.2-rc.1" +version = "1.5.1" description = "Python Client SDK for the Mistral AI API." -authors = [{ name = "Mistral" },] +authors = ["Mistral"] readme = "README-PYPI.md" -requires-python = ">=3.9" -dependencies = [ - "eval-type-backport >=0.2.0", - "httpx >=0.28.1", - "pydantic >=2.10.3", - "python-dateutil >=2.8.2", - "typing-inspection >=0.4.0", -] - -[tool.poetry] repository = "https://github.com/mistralai/client-python.git" packages = [ { include = "mistralai", from = "src" }, @@ -28,25 +18,32 @@ include = ["py.typed", "src/mistralai/py.typed"] [virtualenvs] in-project = true +[tool.poetry.dependencies] +python = ">=3.8" +eval-type-backport = ">=0.2.0" +httpx = ">=0.27.0" +jsonpath-python = ">=1.0.6" +pydantic = ">=2.9.0" +python-dateutil = ">=2.8.2" +typing-inspect = ">=0.9.0" +google-auth = { version = ">=2.27.0", optional = true } +requests = { version = ">=2.32.3", optional = true } + [tool.poetry.group.dev.dependencies] -mypy = "==1.14.1" -pylint = "==3.2.3" -pytest = "^8.2.2" -pytest-asyncio = "^0.23.7" -types-python-dateutil = "^2.9.0.20240316" +mypy = ">=1.13.0" +pylint = ">=3.2.3" +pytest = ">=8.2.2" +pytest-asyncio = ">=0.23.7" +types-python-dateutil = ">=2.9.0.20240316" -[project.optional-dependencies] -gcp = [ - "google-auth >=2.27.0", - "requests >=2.32.3" -] +[tool.poetry.extras] +gcp = ["google-auth", "requests"] [build-system] requires = ["poetry-core"] build-backend = "poetry.core.masonry.api" [tool.pytest.ini_options] -asyncio_default_fixture_loop_scope = "function" pythonpath = ["src"] [tool.mypy] diff --git a/scripts/prepare_readme.py b/scripts/prepare-readme.py similarity index 84% rename from scripts/prepare_readme.py rename to scripts/prepare-readme.py index 16f6fc7e..9111d6cb 100644 --- a/scripts/prepare_readme.py +++ b/scripts/prepare-readme.py @@ -4,7 +4,7 @@ import shutil try: - with open("README.md", "r", encoding="utf-8") as rh: + with open("README.md", "r") as rh: readme_contents = rh.read() GITHUB_URL = "https://github.com/mistralai/client-python.git" GITHUB_URL = ( @@ -21,13 +21,13 @@ readme_contents, ) - with open("README-PYPI.md", "w", encoding="utf-8") as wh: + with open("README-PYPI.md", "w") as wh: wh.write(readme_contents) except Exception as e: try: print("Failed to rewrite README.md to README-PYPI.md, copying original instead") print(e) shutil.copyfile("README.md", "README-PYPI.md") - except Exception as ie: + except Exception as e: print("Failed to copy README.md to README-PYPI.md") - print(ie) + print(e) diff --git a/scripts/publish.sh b/scripts/publish.sh index f2f2cf2c..ab45b1f9 100755 --- a/scripts/publish.sh +++ b/scripts/publish.sh @@ -2,6 +2,6 @@ export POETRY_PYPI_TOKEN_PYPI=${PYPI_TOKEN} -poetry run python scripts/prepare_readme.py +poetry run python scripts/prepare-readme.py poetry publish --build --skip-existing diff --git a/src/mistralai/_hooks/types.py b/src/mistralai/_hooks/types.py index ebc789ff..fe448e94 100644 --- a/src/mistralai/_hooks/types.py +++ b/src/mistralai/_hooks/types.py @@ -7,19 +7,16 @@ class HookContext: - base_url: str operation_id: str oauth2_scopes: Optional[List[str]] = None security_source: Optional[Union[Any, Callable[[], Any]]] = None def __init__( self, - base_url: str, operation_id: str, oauth2_scopes: Optional[List[str]], security_source: Optional[Union[Any, Callable[[], Any]]], ): - self.base_url = base_url self.operation_id = operation_id self.oauth2_scopes = oauth2_scopes self.security_source = security_source @@ -28,30 +25,21 @@ def __init__( class BeforeRequestContext(HookContext): def __init__(self, hook_ctx: HookContext): super().__init__( - hook_ctx.base_url, - hook_ctx.operation_id, - hook_ctx.oauth2_scopes, - hook_ctx.security_source, + hook_ctx.operation_id, hook_ctx.oauth2_scopes, hook_ctx.security_source ) class AfterSuccessContext(HookContext): def __init__(self, hook_ctx: HookContext): super().__init__( - hook_ctx.base_url, - hook_ctx.operation_id, - hook_ctx.oauth2_scopes, - hook_ctx.security_source, + hook_ctx.operation_id, hook_ctx.oauth2_scopes, hook_ctx.security_source ) class AfterErrorContext(HookContext): def __init__(self, hook_ctx: HookContext): super().__init__( - hook_ctx.base_url, - hook_ctx.operation_id, - hook_ctx.oauth2_scopes, - hook_ctx.security_source, + hook_ctx.operation_id, hook_ctx.oauth2_scopes, hook_ctx.security_source ) diff --git a/src/mistralai/_version.py b/src/mistralai/_version.py index 6b24498d..700c880e 100644 --- a/src/mistralai/_version.py +++ b/src/mistralai/_version.py @@ -3,10 +3,10 @@ import importlib.metadata __title__: str = "mistralai" -__version__: str = "1.5.2-rc.1" +__version__: str = "1.5.1" __openapi_doc_version__: str = "0.0.2" -__gen_version__: str = "2.548.6" -__user_agent__: str = "speakeasy-sdk/python 1.5.2-rc.1 2.548.6 0.0.2 mistralai" +__gen_version__: str = "2.497.0" +__user_agent__: str = "speakeasy-sdk/python 1.5.1 2.497.0 0.0.2 mistralai" try: if __package__ is not None: diff --git a/src/mistralai/agents.py b/src/mistralai/agents.py index 6d43b480..05fd165c 100644 --- a/src/mistralai/agents.py +++ b/src/mistralai/agents.py @@ -78,8 +78,6 @@ def complete( if server_url is not None: base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) request = models.AgentsCompletionRequest( max_tokens=max_tokens, @@ -134,7 +132,6 @@ def complete( http_res = self.do_request( hook_ctx=HookContext( - base_url=base_url or "", operation_id="agents_completion_v1_agents_completions_post", oauth2_scopes=[], security_source=get_security_from_env( @@ -146,14 +143,12 @@ def complete( retry_config=retry_config, ) - response_data: Any = None + data: Any = None if utils.match_response(http_res, "200", "application/json"): return utils.unmarshal_json(http_res.text, models.ChatCompletionResponse) if utils.match_response(http_res, "422", "application/json"): - response_data = utils.unmarshal_json( - http_res.text, models.HTTPValidationErrorData - ) - raise models.HTTPValidationError(data=response_data) + data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) + raise models.HTTPValidationError(data=data) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) raise models.SDKError( @@ -241,8 +236,6 @@ async def complete_async( if server_url is not None: base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) request = models.AgentsCompletionRequest( max_tokens=max_tokens, @@ -297,7 +290,6 @@ async def complete_async( http_res = await self.do_request_async( hook_ctx=HookContext( - base_url=base_url or "", operation_id="agents_completion_v1_agents_completions_post", oauth2_scopes=[], security_source=get_security_from_env( @@ -309,14 +301,12 @@ async def complete_async( retry_config=retry_config, ) - response_data: Any = None + data: Any = None if utils.match_response(http_res, "200", "application/json"): return utils.unmarshal_json(http_res.text, models.ChatCompletionResponse) if utils.match_response(http_res, "422", "application/json"): - response_data = utils.unmarshal_json( - http_res.text, models.HTTPValidationErrorData - ) - raise models.HTTPValidationError(data=response_data) + data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) + raise models.HTTPValidationError(data=data) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( @@ -406,8 +396,6 @@ def stream( if server_url is not None: base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) request = models.AgentsCompletionStreamRequest( max_tokens=max_tokens, @@ -462,7 +450,6 @@ def stream( http_res = self.do_request( hook_ctx=HookContext( - base_url=base_url or "", operation_id="stream_agents", oauth2_scopes=[], security_source=get_security_from_env( @@ -475,7 +462,7 @@ def stream( retry_config=retry_config, ) - response_data: Any = None + data: Any = None if utils.match_response(http_res, "200", "text/event-stream"): return eventstreaming.EventStream( http_res, @@ -484,10 +471,8 @@ def stream( ) if utils.match_response(http_res, "422", "application/json"): http_res_text = utils.stream_to_text(http_res) - response_data = utils.unmarshal_json( - http_res_text, models.HTTPValidationErrorData - ) - raise models.HTTPValidationError(data=response_data) + data = utils.unmarshal_json(http_res_text, models.HTTPValidationErrorData) + raise models.HTTPValidationError(data=data) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) raise models.SDKError( @@ -577,8 +562,6 @@ async def stream_async( if server_url is not None: base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) request = models.AgentsCompletionStreamRequest( max_tokens=max_tokens, @@ -633,7 +616,6 @@ async def stream_async( http_res = await self.do_request_async( hook_ctx=HookContext( - base_url=base_url or "", operation_id="stream_agents", oauth2_scopes=[], security_source=get_security_from_env( @@ -646,7 +628,7 @@ async def stream_async( retry_config=retry_config, ) - response_data: Any = None + data: Any = None if utils.match_response(http_res, "200", "text/event-stream"): return eventstreaming.EventStreamAsync( http_res, @@ -655,10 +637,8 @@ async def stream_async( ) if utils.match_response(http_res, "422", "application/json"): http_res_text = await utils.stream_to_text_async(http_res) - response_data = utils.unmarshal_json( - http_res_text, models.HTTPValidationErrorData - ) - raise models.HTTPValidationError(data=response_data) + data = utils.unmarshal_json(http_res_text, models.HTTPValidationErrorData) + raise models.HTTPValidationError(data=data) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( diff --git a/src/mistralai/basesdk.py b/src/mistralai/basesdk.py index 512e3072..cda8adda 100644 --- a/src/mistralai/basesdk.py +++ b/src/mistralai/basesdk.py @@ -231,10 +231,6 @@ def do(): req.headers, get_body_content(req), ) - - if client is None: - raise ValueError("client is required") - http_res = client.send(req, stream=stream) except Exception as e: _, e = self.sdk_configuration.get_hooks().after_error( @@ -307,10 +303,6 @@ async def do(): req.headers, get_body_content(req), ) - - if client is None: - raise ValueError("client is required") - http_res = await client.send(req, stream=stream) except Exception as e: _, e = self.sdk_configuration.get_hooks().after_error( diff --git a/src/mistralai/chat.py b/src/mistralai/chat.py index 558796d8..67646ffe 100644 --- a/src/mistralai/chat.py +++ b/src/mistralai/chat.py @@ -158,8 +158,6 @@ def complete( if server_url is not None: base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) request = models.ChatCompletionRequest( model=model, @@ -215,7 +213,6 @@ def complete( http_res = self.do_request( hook_ctx=HookContext( - base_url=base_url or "", operation_id="chat_completion_v1_chat_completions_post", oauth2_scopes=[], security_source=get_security_from_env( @@ -227,14 +224,12 @@ def complete( retry_config=retry_config, ) - response_data: Any = None + data: Any = None if utils.match_response(http_res, "200", "application/json"): return utils.unmarshal_json(http_res.text, models.ChatCompletionResponse) if utils.match_response(http_res, "422", "application/json"): - response_data = utils.unmarshal_json( - http_res.text, models.HTTPValidationErrorData - ) - raise models.HTTPValidationError(data=response_data) + data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) + raise models.HTTPValidationError(data=data) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) raise models.SDKError( @@ -320,8 +315,6 @@ async def complete_async( if server_url is not None: base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) request = models.ChatCompletionRequest( model=model, @@ -377,7 +370,6 @@ async def complete_async( http_res = await self.do_request_async( hook_ctx=HookContext( - base_url=base_url or "", operation_id="chat_completion_v1_chat_completions_post", oauth2_scopes=[], security_source=get_security_from_env( @@ -389,14 +381,12 @@ async def complete_async( retry_config=retry_config, ) - response_data: Any = None + data: Any = None if utils.match_response(http_res, "200", "application/json"): return utils.unmarshal_json(http_res.text, models.ChatCompletionResponse) if utils.match_response(http_res, "422", "application/json"): - response_data = utils.unmarshal_json( - http_res.text, models.HTTPValidationErrorData - ) - raise models.HTTPValidationError(data=response_data) + data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) + raise models.HTTPValidationError(data=data) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( @@ -492,8 +482,6 @@ def stream( if server_url is not None: base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) request = models.ChatCompletionStreamRequest( model=model, @@ -551,7 +539,6 @@ def stream( http_res = self.do_request( hook_ctx=HookContext( - base_url=base_url or "", operation_id="stream_chat", oauth2_scopes=[], security_source=get_security_from_env( @@ -564,7 +551,7 @@ def stream( retry_config=retry_config, ) - response_data: Any = None + data: Any = None if utils.match_response(http_res, "200", "text/event-stream"): return eventstreaming.EventStream( http_res, @@ -573,10 +560,8 @@ def stream( ) if utils.match_response(http_res, "422", "application/json"): http_res_text = utils.stream_to_text(http_res) - response_data = utils.unmarshal_json( - http_res_text, models.HTTPValidationErrorData - ) - raise models.HTTPValidationError(data=response_data) + data = utils.unmarshal_json(http_res_text, models.HTTPValidationErrorData) + raise models.HTTPValidationError(data=data) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) raise models.SDKError( @@ -672,8 +657,6 @@ async def stream_async( if server_url is not None: base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) request = models.ChatCompletionStreamRequest( model=model, @@ -731,7 +714,6 @@ async def stream_async( http_res = await self.do_request_async( hook_ctx=HookContext( - base_url=base_url or "", operation_id="stream_chat", oauth2_scopes=[], security_source=get_security_from_env( @@ -744,7 +726,7 @@ async def stream_async( retry_config=retry_config, ) - response_data: Any = None + data: Any = None if utils.match_response(http_res, "200", "text/event-stream"): return eventstreaming.EventStreamAsync( http_res, @@ -753,10 +735,8 @@ async def stream_async( ) if utils.match_response(http_res, "422", "application/json"): http_res_text = await utils.stream_to_text_async(http_res) - response_data = utils.unmarshal_json( - http_res_text, models.HTTPValidationErrorData - ) - raise models.HTTPValidationError(data=response_data) + data = utils.unmarshal_json(http_res_text, models.HTTPValidationErrorData) + raise models.HTTPValidationError(data=data) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( diff --git a/src/mistralai/classifiers.py b/src/mistralai/classifiers.py index 7191df0c..6ff1d6a8 100644 --- a/src/mistralai/classifiers.py +++ b/src/mistralai/classifiers.py @@ -40,8 +40,6 @@ def moderate( if server_url is not None: base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) request = models.ClassificationRequest( model=model, @@ -77,7 +75,6 @@ def moderate( http_res = self.do_request( hook_ctx=HookContext( - base_url=base_url or "", operation_id="moderations_v1_moderations_post", oauth2_scopes=[], security_source=get_security_from_env( @@ -89,14 +86,12 @@ def moderate( retry_config=retry_config, ) - response_data: Any = None + data: Any = None if utils.match_response(http_res, "200", "application/json"): return utils.unmarshal_json(http_res.text, models.ClassificationResponse) if utils.match_response(http_res, "422", "application/json"): - response_data = utils.unmarshal_json( - http_res.text, models.HTTPValidationErrorData - ) - raise models.HTTPValidationError(data=response_data) + data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) + raise models.HTTPValidationError(data=data) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) raise models.SDKError( @@ -146,8 +141,6 @@ async def moderate_async( if server_url is not None: base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) request = models.ClassificationRequest( model=model, @@ -183,7 +176,6 @@ async def moderate_async( http_res = await self.do_request_async( hook_ctx=HookContext( - base_url=base_url or "", operation_id="moderations_v1_moderations_post", oauth2_scopes=[], security_source=get_security_from_env( @@ -195,14 +187,12 @@ async def moderate_async( retry_config=retry_config, ) - response_data: Any = None + data: Any = None if utils.match_response(http_res, "200", "application/json"): return utils.unmarshal_json(http_res.text, models.ClassificationResponse) if utils.match_response(http_res, "422", "application/json"): - response_data = utils.unmarshal_json( - http_res.text, models.HTTPValidationErrorData - ) - raise models.HTTPValidationError(data=response_data) + data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) + raise models.HTTPValidationError(data=data) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( @@ -254,8 +244,6 @@ def moderate_chat( if server_url is not None: base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) request = models.ChatModerationRequest( model=model, @@ -292,7 +280,6 @@ def moderate_chat( http_res = self.do_request( hook_ctx=HookContext( - base_url=base_url or "", operation_id="moderations_chat_v1_chat_moderations_post", oauth2_scopes=[], security_source=get_security_from_env( @@ -304,14 +291,12 @@ def moderate_chat( retry_config=retry_config, ) - response_data: Any = None + data: Any = None if utils.match_response(http_res, "200", "application/json"): return utils.unmarshal_json(http_res.text, models.ClassificationResponse) if utils.match_response(http_res, "422", "application/json"): - response_data = utils.unmarshal_json( - http_res.text, models.HTTPValidationErrorData - ) - raise models.HTTPValidationError(data=response_data) + data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) + raise models.HTTPValidationError(data=data) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) raise models.SDKError( @@ -363,8 +348,6 @@ async def moderate_chat_async( if server_url is not None: base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) request = models.ChatModerationRequest( model=model, @@ -401,7 +384,6 @@ async def moderate_chat_async( http_res = await self.do_request_async( hook_ctx=HookContext( - base_url=base_url or "", operation_id="moderations_chat_v1_chat_moderations_post", oauth2_scopes=[], security_source=get_security_from_env( @@ -413,14 +395,12 @@ async def moderate_chat_async( retry_config=retry_config, ) - response_data: Any = None + data: Any = None if utils.match_response(http_res, "200", "application/json"): return utils.unmarshal_json(http_res.text, models.ClassificationResponse) if utils.match_response(http_res, "422", "application/json"): - response_data = utils.unmarshal_json( - http_res.text, models.HTTPValidationErrorData - ) - raise models.HTTPValidationError(data=response_data) + data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) + raise models.HTTPValidationError(data=data) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( diff --git a/src/mistralai/embeddings.py b/src/mistralai/embeddings.py index b99ff0cf..f6f558b8 100644 --- a/src/mistralai/embeddings.py +++ b/src/mistralai/embeddings.py @@ -14,8 +14,8 @@ class Embeddings(BaseSDK): def create( self, *, - model: str, inputs: Union[models.Inputs, models.InputsTypedDict], + model: Optional[str] = "mistral-embed", retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -25,8 +25,8 @@ def create( Embeddings - :param model: ID of the model to use. :param inputs: Text to embed. + :param model: ID of the model to use. :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -39,8 +39,6 @@ def create( if server_url is not None: base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) request = models.EmbeddingRequest( model=model, @@ -76,7 +74,6 @@ def create( http_res = self.do_request( hook_ctx=HookContext( - base_url=base_url or "", operation_id="embeddings_v1_embeddings_post", oauth2_scopes=[], security_source=get_security_from_env( @@ -88,14 +85,12 @@ def create( retry_config=retry_config, ) - response_data: Any = None + data: Any = None if utils.match_response(http_res, "200", "application/json"): return utils.unmarshal_json(http_res.text, models.EmbeddingResponse) if utils.match_response(http_res, "422", "application/json"): - response_data = utils.unmarshal_json( - http_res.text, models.HTTPValidationErrorData - ) - raise models.HTTPValidationError(data=response_data) + data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) + raise models.HTTPValidationError(data=data) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) raise models.SDKError( @@ -119,8 +114,8 @@ def create( async def create_async( self, *, - model: str, inputs: Union[models.Inputs, models.InputsTypedDict], + model: Optional[str] = "mistral-embed", retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -130,8 +125,8 @@ async def create_async( Embeddings - :param model: ID of the model to use. :param inputs: Text to embed. + :param model: ID of the model to use. :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -144,8 +139,6 @@ async def create_async( if server_url is not None: base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) request = models.EmbeddingRequest( model=model, @@ -181,7 +174,6 @@ async def create_async( http_res = await self.do_request_async( hook_ctx=HookContext( - base_url=base_url or "", operation_id="embeddings_v1_embeddings_post", oauth2_scopes=[], security_source=get_security_from_env( @@ -193,14 +185,12 @@ async def create_async( retry_config=retry_config, ) - response_data: Any = None + data: Any = None if utils.match_response(http_res, "200", "application/json"): return utils.unmarshal_json(http_res.text, models.EmbeddingResponse) if utils.match_response(http_res, "422", "application/json"): - response_data = utils.unmarshal_json( - http_res.text, models.HTTPValidationErrorData - ) - raise models.HTTPValidationError(data=response_data) + data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) + raise models.HTTPValidationError(data=data) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( diff --git a/src/mistralai/files.py b/src/mistralai/files.py index 0ffc4857..042e4aea 100644 --- a/src/mistralai/files.py +++ b/src/mistralai/files.py @@ -44,8 +44,6 @@ def upload( if server_url is not None: base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) request = models.FilesAPIRoutesUploadFileMultiPartBodyParams( file=utils.get_pydantic_model(file, models.File), @@ -85,7 +83,6 @@ def upload( http_res = self.do_request( hook_ctx=HookContext( - base_url=base_url or "", operation_id="files_api_routes_upload_file", oauth2_scopes=[], security_source=get_security_from_env( @@ -151,8 +148,6 @@ async def upload_async( if server_url is not None: base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) request = models.FilesAPIRoutesUploadFileMultiPartBodyParams( file=utils.get_pydantic_model(file, models.File), @@ -192,7 +187,6 @@ async def upload_async( http_res = await self.do_request_async( hook_ctx=HookContext( - base_url=base_url or "", operation_id="files_api_routes_upload_file", oauth2_scopes=[], security_source=get_security_from_env( @@ -262,8 +256,6 @@ def list( if server_url is not None: base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) request = models.FilesAPIRoutesListFilesRequest( page=page, @@ -300,7 +292,6 @@ def list( http_res = self.do_request( hook_ctx=HookContext( - base_url=base_url or "", operation_id="files_api_routes_list_files", oauth2_scopes=[], security_source=get_security_from_env( @@ -370,8 +361,6 @@ async def list_async( if server_url is not None: base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) request = models.FilesAPIRoutesListFilesRequest( page=page, @@ -408,7 +397,6 @@ async def list_async( http_res = await self.do_request_async( hook_ctx=HookContext( - base_url=base_url or "", operation_id="files_api_routes_list_files", oauth2_scopes=[], security_source=get_security_from_env( @@ -468,8 +456,6 @@ def retrieve( if server_url is not None: base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) request = models.FilesAPIRoutesRetrieveFileRequest( file_id=file_id, @@ -501,7 +487,6 @@ def retrieve( http_res = self.do_request( hook_ctx=HookContext( - base_url=base_url or "", operation_id="files_api_routes_retrieve_file", oauth2_scopes=[], security_source=get_security_from_env( @@ -561,8 +546,6 @@ async def retrieve_async( if server_url is not None: base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) request = models.FilesAPIRoutesRetrieveFileRequest( file_id=file_id, @@ -594,7 +577,6 @@ async def retrieve_async( http_res = await self.do_request_async( hook_ctx=HookContext( - base_url=base_url or "", operation_id="files_api_routes_retrieve_file", oauth2_scopes=[], security_source=get_security_from_env( @@ -654,8 +636,6 @@ def delete( if server_url is not None: base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) request = models.FilesAPIRoutesDeleteFileRequest( file_id=file_id, @@ -687,7 +667,6 @@ def delete( http_res = self.do_request( hook_ctx=HookContext( - base_url=base_url or "", operation_id="files_api_routes_delete_file", oauth2_scopes=[], security_source=get_security_from_env( @@ -747,8 +726,6 @@ async def delete_async( if server_url is not None: base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) request = models.FilesAPIRoutesDeleteFileRequest( file_id=file_id, @@ -780,7 +757,6 @@ async def delete_async( http_res = await self.do_request_async( hook_ctx=HookContext( - base_url=base_url or "", operation_id="files_api_routes_delete_file", oauth2_scopes=[], security_source=get_security_from_env( @@ -840,8 +816,6 @@ def download( if server_url is not None: base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) request = models.FilesAPIRoutesDownloadFileRequest( file_id=file_id, @@ -873,7 +847,6 @@ def download( http_res = self.do_request( hook_ctx=HookContext( - base_url=base_url or "", operation_id="files_api_routes_download_file", oauth2_scopes=[], security_source=get_security_from_env( @@ -934,8 +907,6 @@ async def download_async( if server_url is not None: base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) request = models.FilesAPIRoutesDownloadFileRequest( file_id=file_id, @@ -967,7 +938,6 @@ async def download_async( http_res = await self.do_request_async( hook_ctx=HookContext( - base_url=base_url or "", operation_id="files_api_routes_download_file", oauth2_scopes=[], security_source=get_security_from_env( @@ -1028,8 +998,6 @@ def get_signed_url( if server_url is not None: base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) request = models.FilesAPIRoutesGetSignedURLRequest( file_id=file_id, @@ -1062,7 +1030,6 @@ def get_signed_url( http_res = self.do_request( hook_ctx=HookContext( - base_url=base_url or "", operation_id="files_api_routes_get_signed_url", oauth2_scopes=[], security_source=get_security_from_env( @@ -1122,8 +1089,6 @@ async def get_signed_url_async( if server_url is not None: base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) request = models.FilesAPIRoutesGetSignedURLRequest( file_id=file_id, @@ -1156,7 +1121,6 @@ async def get_signed_url_async( http_res = await self.do_request_async( hook_ctx=HookContext( - base_url=base_url or "", operation_id="files_api_routes_get_signed_url", oauth2_scopes=[], security_source=get_security_from_env( diff --git a/src/mistralai/fim.py b/src/mistralai/fim.py index 032c722f..c11f6c99 100644 --- a/src/mistralai/fim.py +++ b/src/mistralai/fim.py @@ -60,8 +60,6 @@ def complete( if server_url is not None: base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) request = models.FIMCompletionRequest( model=model, @@ -105,7 +103,6 @@ def complete( http_res = self.do_request( hook_ctx=HookContext( - base_url=base_url or "", operation_id="fim_completion_v1_fim_completions_post", oauth2_scopes=[], security_source=get_security_from_env( @@ -117,14 +114,12 @@ def complete( retry_config=retry_config, ) - response_data: Any = None + data: Any = None if utils.match_response(http_res, "200", "application/json"): return utils.unmarshal_json(http_res.text, models.FIMCompletionResponse) if utils.match_response(http_res, "422", "application/json"): - response_data = utils.unmarshal_json( - http_res.text, models.HTTPValidationErrorData - ) - raise models.HTTPValidationError(data=response_data) + data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) + raise models.HTTPValidationError(data=data) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) raise models.SDKError( @@ -194,8 +189,6 @@ async def complete_async( if server_url is not None: base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) request = models.FIMCompletionRequest( model=model, @@ -239,7 +232,6 @@ async def complete_async( http_res = await self.do_request_async( hook_ctx=HookContext( - base_url=base_url or "", operation_id="fim_completion_v1_fim_completions_post", oauth2_scopes=[], security_source=get_security_from_env( @@ -251,14 +243,12 @@ async def complete_async( retry_config=retry_config, ) - response_data: Any = None + data: Any = None if utils.match_response(http_res, "200", "application/json"): return utils.unmarshal_json(http_res.text, models.FIMCompletionResponse) if utils.match_response(http_res, "422", "application/json"): - response_data = utils.unmarshal_json( - http_res.text, models.HTTPValidationErrorData - ) - raise models.HTTPValidationError(data=response_data) + data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) + raise models.HTTPValidationError(data=data) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( @@ -328,8 +318,6 @@ def stream( if server_url is not None: base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) request = models.FIMCompletionStreamRequest( model=model, @@ -373,7 +361,6 @@ def stream( http_res = self.do_request( hook_ctx=HookContext( - base_url=base_url or "", operation_id="stream_fim", oauth2_scopes=[], security_source=get_security_from_env( @@ -386,7 +373,7 @@ def stream( retry_config=retry_config, ) - response_data: Any = None + data: Any = None if utils.match_response(http_res, "200", "text/event-stream"): return eventstreaming.EventStream( http_res, @@ -395,10 +382,8 @@ def stream( ) if utils.match_response(http_res, "422", "application/json"): http_res_text = utils.stream_to_text(http_res) - response_data = utils.unmarshal_json( - http_res_text, models.HTTPValidationErrorData - ) - raise models.HTTPValidationError(data=response_data) + data = utils.unmarshal_json(http_res_text, models.HTTPValidationErrorData) + raise models.HTTPValidationError(data=data) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) raise models.SDKError( @@ -468,8 +453,6 @@ async def stream_async( if server_url is not None: base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) request = models.FIMCompletionStreamRequest( model=model, @@ -513,7 +496,6 @@ async def stream_async( http_res = await self.do_request_async( hook_ctx=HookContext( - base_url=base_url or "", operation_id="stream_fim", oauth2_scopes=[], security_source=get_security_from_env( @@ -526,7 +508,7 @@ async def stream_async( retry_config=retry_config, ) - response_data: Any = None + data: Any = None if utils.match_response(http_res, "200", "text/event-stream"): return eventstreaming.EventStreamAsync( http_res, @@ -535,10 +517,8 @@ async def stream_async( ) if utils.match_response(http_res, "422", "application/json"): http_res_text = await utils.stream_to_text_async(http_res) - response_data = utils.unmarshal_json( - http_res_text, models.HTTPValidationErrorData - ) - raise models.HTTPValidationError(data=response_data) + data = utils.unmarshal_json(http_res_text, models.HTTPValidationErrorData) + raise models.HTTPValidationError(data=data) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( diff --git a/src/mistralai/httpclient.py b/src/mistralai/httpclient.py index 1e426352..9dc43cb0 100644 --- a/src/mistralai/httpclient.py +++ b/src/mistralai/httpclient.py @@ -94,9 +94,7 @@ class ClientOwner(Protocol): def close_clients( owner: ClientOwner, sync_client: Union[HttpClient, None], - sync_client_supplied: bool, async_client: Union[AsyncHttpClient, None], - async_client_supplied: bool, ) -> None: """ A finalizer function that is meant to be used with weakref.finalize to close @@ -109,13 +107,13 @@ def close_clients( owner.client = None owner.async_client = None - if sync_client is not None and not sync_client_supplied: + if sync_client is not None: try: sync_client.close() except Exception: pass - if async_client is not None and not async_client_supplied: + if async_client is not None: is_async = False try: asyncio.get_running_loop() diff --git a/src/mistralai/jobs.py b/src/mistralai/jobs.py index 675ece0b..ea66bfc6 100644 --- a/src/mistralai/jobs.py +++ b/src/mistralai/jobs.py @@ -52,8 +52,6 @@ def list( if server_url is not None: base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) request = models.JobsAPIRoutesFineTuningGetFineTuningJobsRequest( page=page, @@ -93,7 +91,6 @@ def list( http_res = self.do_request( hook_ctx=HookContext( - base_url=base_url or "", operation_id="jobs_api_routes_fine_tuning_get_fine_tuning_jobs", oauth2_scopes=[], security_source=get_security_from_env( @@ -169,8 +166,6 @@ async def list_async( if server_url is not None: base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) request = models.JobsAPIRoutesFineTuningGetFineTuningJobsRequest( page=page, @@ -210,7 +205,6 @@ async def list_async( http_res = await self.do_request_async( hook_ctx=HookContext( - base_url=base_url or "", operation_id="jobs_api_routes_fine_tuning_get_fine_tuning_jobs", oauth2_scopes=[], security_source=get_security_from_env( @@ -296,8 +290,6 @@ def create( if server_url is not None: base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) request = models.JobIn( model=model, @@ -347,7 +339,6 @@ def create( http_res = self.do_request( hook_ctx=HookContext( - base_url=base_url or "", operation_id="jobs_api_routes_fine_tuning_create_fine_tuning_job", oauth2_scopes=[], security_source=get_security_from_env( @@ -435,8 +426,6 @@ async def create_async( if server_url is not None: base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) request = models.JobIn( model=model, @@ -486,7 +475,6 @@ async def create_async( http_res = await self.do_request_async( hook_ctx=HookContext( - base_url=base_url or "", operation_id="jobs_api_routes_fine_tuning_create_fine_tuning_job", oauth2_scopes=[], security_source=get_security_from_env( @@ -548,8 +536,6 @@ def get( if server_url is not None: base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) request = models.JobsAPIRoutesFineTuningGetFineTuningJobRequest( job_id=job_id, @@ -581,7 +567,6 @@ def get( http_res = self.do_request( hook_ctx=HookContext( - base_url=base_url or "", operation_id="jobs_api_routes_fine_tuning_get_fine_tuning_job", oauth2_scopes=[], security_source=get_security_from_env( @@ -641,8 +626,6 @@ async def get_async( if server_url is not None: base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) request = models.JobsAPIRoutesFineTuningGetFineTuningJobRequest( job_id=job_id, @@ -674,7 +657,6 @@ async def get_async( http_res = await self.do_request_async( hook_ctx=HookContext( - base_url=base_url or "", operation_id="jobs_api_routes_fine_tuning_get_fine_tuning_job", oauth2_scopes=[], security_source=get_security_from_env( @@ -734,8 +716,6 @@ def cancel( if server_url is not None: base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) request = models.JobsAPIRoutesFineTuningCancelFineTuningJobRequest( job_id=job_id, @@ -767,7 +747,6 @@ def cancel( http_res = self.do_request( hook_ctx=HookContext( - base_url=base_url or "", operation_id="jobs_api_routes_fine_tuning_cancel_fine_tuning_job", oauth2_scopes=[], security_source=get_security_from_env( @@ -827,8 +806,6 @@ async def cancel_async( if server_url is not None: base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) request = models.JobsAPIRoutesFineTuningCancelFineTuningJobRequest( job_id=job_id, @@ -860,7 +837,6 @@ async def cancel_async( http_res = await self.do_request_async( hook_ctx=HookContext( - base_url=base_url or "", operation_id="jobs_api_routes_fine_tuning_cancel_fine_tuning_job", oauth2_scopes=[], security_source=get_security_from_env( @@ -920,8 +896,6 @@ def start( if server_url is not None: base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) request = models.JobsAPIRoutesFineTuningStartFineTuningJobRequest( job_id=job_id, @@ -953,7 +927,6 @@ def start( http_res = self.do_request( hook_ctx=HookContext( - base_url=base_url or "", operation_id="jobs_api_routes_fine_tuning_start_fine_tuning_job", oauth2_scopes=[], security_source=get_security_from_env( @@ -1013,8 +986,6 @@ async def start_async( if server_url is not None: base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) request = models.JobsAPIRoutesFineTuningStartFineTuningJobRequest( job_id=job_id, @@ -1046,7 +1017,6 @@ async def start_async( http_res = await self.do_request_async( hook_ctx=HookContext( - base_url=base_url or "", operation_id="jobs_api_routes_fine_tuning_start_fine_tuning_job", oauth2_scopes=[], security_source=get_security_from_env( diff --git a/src/mistralai/mistral_jobs.py b/src/mistralai/mistral_jobs.py index e0d3c616..fe6b266a 100644 --- a/src/mistralai/mistral_jobs.py +++ b/src/mistralai/mistral_jobs.py @@ -48,8 +48,6 @@ def list( if server_url is not None: base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) request = models.JobsAPIRoutesBatchGetBatchJobsRequest( page=page, @@ -87,7 +85,6 @@ def list( http_res = self.do_request( hook_ctx=HookContext( - base_url=base_url or "", operation_id="jobs_api_routes_batch_get_batch_jobs", oauth2_scopes=[], security_source=get_security_from_env( @@ -159,8 +156,6 @@ async def list_async( if server_url is not None: base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) request = models.JobsAPIRoutesBatchGetBatchJobsRequest( page=page, @@ -198,7 +193,6 @@ async def list_async( http_res = await self.do_request_async( hook_ctx=HookContext( - base_url=base_url or "", operation_id="jobs_api_routes_batch_get_batch_jobs", oauth2_scopes=[], security_source=get_security_from_env( @@ -266,8 +260,6 @@ def create( if server_url is not None: base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) request = models.BatchJobIn( input_files=input_files, @@ -306,7 +298,6 @@ def create( http_res = self.do_request( hook_ctx=HookContext( - base_url=base_url or "", operation_id="jobs_api_routes_batch_create_batch_job", oauth2_scopes=[], security_source=get_security_from_env( @@ -374,8 +365,6 @@ async def create_async( if server_url is not None: base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) request = models.BatchJobIn( input_files=input_files, @@ -414,7 +403,6 @@ async def create_async( http_res = await self.do_request_async( hook_ctx=HookContext( - base_url=base_url or "", operation_id="jobs_api_routes_batch_create_batch_job", oauth2_scopes=[], security_source=get_security_from_env( @@ -474,8 +462,6 @@ def get( if server_url is not None: base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) request = models.JobsAPIRoutesBatchGetBatchJobRequest( job_id=job_id, @@ -507,7 +493,6 @@ def get( http_res = self.do_request( hook_ctx=HookContext( - base_url=base_url or "", operation_id="jobs_api_routes_batch_get_batch_job", oauth2_scopes=[], security_source=get_security_from_env( @@ -567,8 +552,6 @@ async def get_async( if server_url is not None: base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) request = models.JobsAPIRoutesBatchGetBatchJobRequest( job_id=job_id, @@ -600,7 +583,6 @@ async def get_async( http_res = await self.do_request_async( hook_ctx=HookContext( - base_url=base_url or "", operation_id="jobs_api_routes_batch_get_batch_job", oauth2_scopes=[], security_source=get_security_from_env( @@ -660,8 +642,6 @@ def cancel( if server_url is not None: base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) request = models.JobsAPIRoutesBatchCancelBatchJobRequest( job_id=job_id, @@ -693,7 +673,6 @@ def cancel( http_res = self.do_request( hook_ctx=HookContext( - base_url=base_url or "", operation_id="jobs_api_routes_batch_cancel_batch_job", oauth2_scopes=[], security_source=get_security_from_env( @@ -753,8 +732,6 @@ async def cancel_async( if server_url is not None: base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) request = models.JobsAPIRoutesBatchCancelBatchJobRequest( job_id=job_id, @@ -786,7 +763,6 @@ async def cancel_async( http_res = await self.do_request_async( hook_ctx=HookContext( - base_url=base_url or "", operation_id="jobs_api_routes_batch_cancel_batch_job", oauth2_scopes=[], security_source=get_security_from_env( diff --git a/src/mistralai/models/__init__.py b/src/mistralai/models/__init__.py index 0750906a..197f6e1f 100644 --- a/src/mistralai/models/__init__.py +++ b/src/mistralai/models/__init__.py @@ -115,11 +115,7 @@ DetailedJobOutStatus, DetailedJobOutTypedDict, ) -from .documenturlchunk import ( - DocumentURLChunk, - DocumentURLChunkType, - DocumentURLChunkTypedDict, -) +from .documenturlchunk import DocumentURLChunk, DocumentURLChunkTypedDict from .embeddingrequest import ( EmbeddingRequest, EmbeddingRequestTypedDict, @@ -459,7 +455,6 @@ "Document", "DocumentTypedDict", "DocumentURLChunk", - "DocumentURLChunkType", "DocumentURLChunkTypedDict", "EmbeddingRequest", "EmbeddingRequestTypedDict", diff --git a/src/mistralai/models/documenturlchunk.py b/src/mistralai/models/documenturlchunk.py index 29945102..23622335 100644 --- a/src/mistralai/models/documenturlchunk.py +++ b/src/mistralai/models/documenturlchunk.py @@ -2,32 +2,38 @@ from __future__ import annotations from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +from mistralai.utils import validate_const +import pydantic from pydantic import model_serializer +from pydantic.functional_validators import AfterValidator from typing import Literal, Optional -from typing_extensions import NotRequired, TypedDict - - -DocumentURLChunkType = Literal["document_url"] +from typing_extensions import Annotated, NotRequired, TypedDict class DocumentURLChunkTypedDict(TypedDict): document_url: str + type: Literal["document_url"] document_name: NotRequired[Nullable[str]] r"""The filename of the document""" - type: NotRequired[DocumentURLChunkType] class DocumentURLChunk(BaseModel): document_url: str + TYPE: Annotated[ + Annotated[ + Optional[Literal["document_url"]], + AfterValidator(validate_const("document_url")), + ], + pydantic.Field(alias="type"), + ] = "document_url" + document_name: OptionalNullable[str] = UNSET r"""The filename of the document""" - type: Optional[DocumentURLChunkType] = "document_url" - @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = ["document_name", "type"] + optional_fields = ["type", "document_name"] nullable_fields = ["document_name"] null_default_fields = [] diff --git a/src/mistralai/models/embeddingrequest.py b/src/mistralai/models/embeddingrequest.py index 5c37fd48..b5ccd26e 100644 --- a/src/mistralai/models/embeddingrequest.py +++ b/src/mistralai/models/embeddingrequest.py @@ -3,8 +3,8 @@ from __future__ import annotations from mistralai.types import BaseModel import pydantic -from typing import List, Union -from typing_extensions import Annotated, TypeAliasType, TypedDict +from typing import List, Optional, Union +from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict InputsTypedDict = TypeAliasType("InputsTypedDict", Union[str, List[str]]) @@ -16,15 +16,15 @@ class EmbeddingRequestTypedDict(TypedDict): - model: str - r"""ID of the model to use.""" inputs: InputsTypedDict r"""Text to embed.""" + model: NotRequired[str] + r"""ID of the model to use.""" class EmbeddingRequest(BaseModel): - model: str - r"""ID of the model to use.""" - inputs: Annotated[Inputs, pydantic.Field(alias="input")] r"""Text to embed.""" + + model: Optional[str] = "mistral-embed" + r"""ID of the model to use.""" diff --git a/src/mistralai/models/filepurpose.py b/src/mistralai/models/filepurpose.py index 8599192b..8628b308 100644 --- a/src/mistralai/models/filepurpose.py +++ b/src/mistralai/models/filepurpose.py @@ -5,4 +5,4 @@ from typing import Literal, Union -FilePurpose = Union[Literal["fine-tune", "batch", "ocr"], UnrecognizedStr] +FilePurpose = Union[Literal["fine-tune", "batch"], UnrecognizedStr] diff --git a/src/mistralai/models_.py b/src/mistralai/models_.py index 0b04694d..ec45eb36 100644 --- a/src/mistralai/models_.py +++ b/src/mistralai/models_.py @@ -35,8 +35,6 @@ def list( if server_url is not None: base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) req = self._build_request( method="GET", path="/v1/models", @@ -63,7 +61,6 @@ def list( http_res = self.do_request( hook_ctx=HookContext( - base_url=base_url or "", operation_id="list_models_v1_models_get", oauth2_scopes=[], security_source=get_security_from_env( @@ -75,14 +72,12 @@ def list( retry_config=retry_config, ) - response_data: Any = None + data: Any = None if utils.match_response(http_res, "200", "application/json"): return utils.unmarshal_json(http_res.text, models.ModelList) if utils.match_response(http_res, "422", "application/json"): - response_data = utils.unmarshal_json( - http_res.text, models.HTTPValidationErrorData - ) - raise models.HTTPValidationError(data=response_data) + data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) + raise models.HTTPValidationError(data=data) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) raise models.SDKError( @@ -127,8 +122,6 @@ async def list_async( if server_url is not None: base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) req = self._build_request_async( method="GET", path="/v1/models", @@ -155,7 +148,6 @@ async def list_async( http_res = await self.do_request_async( hook_ctx=HookContext( - base_url=base_url or "", operation_id="list_models_v1_models_get", oauth2_scopes=[], security_source=get_security_from_env( @@ -167,14 +159,12 @@ async def list_async( retry_config=retry_config, ) - response_data: Any = None + data: Any = None if utils.match_response(http_res, "200", "application/json"): return utils.unmarshal_json(http_res.text, models.ModelList) if utils.match_response(http_res, "422", "application/json"): - response_data = utils.unmarshal_json( - http_res.text, models.HTTPValidationErrorData - ) - raise models.HTTPValidationError(data=response_data) + data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) + raise models.HTTPValidationError(data=data) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( @@ -221,8 +211,6 @@ def retrieve( if server_url is not None: base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) request = models.RetrieveModelV1ModelsModelIDGetRequest( model_id=model_id, @@ -254,7 +242,6 @@ def retrieve( http_res = self.do_request( hook_ctx=HookContext( - base_url=base_url or "", operation_id="retrieve_model_v1_models__model_id__get", oauth2_scopes=[], security_source=get_security_from_env( @@ -266,17 +253,15 @@ def retrieve( retry_config=retry_config, ) - response_data: Any = None + data: Any = None if utils.match_response(http_res, "200", "application/json"): return utils.unmarshal_json( http_res.text, models.RetrieveModelV1ModelsModelIDGetResponseRetrieveModelV1ModelsModelIDGet, ) if utils.match_response(http_res, "422", "application/json"): - response_data = utils.unmarshal_json( - http_res.text, models.HTTPValidationErrorData - ) - raise models.HTTPValidationError(data=response_data) + data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) + raise models.HTTPValidationError(data=data) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) raise models.SDKError( @@ -323,8 +308,6 @@ async def retrieve_async( if server_url is not None: base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) request = models.RetrieveModelV1ModelsModelIDGetRequest( model_id=model_id, @@ -356,7 +339,6 @@ async def retrieve_async( http_res = await self.do_request_async( hook_ctx=HookContext( - base_url=base_url or "", operation_id="retrieve_model_v1_models__model_id__get", oauth2_scopes=[], security_source=get_security_from_env( @@ -368,17 +350,15 @@ async def retrieve_async( retry_config=retry_config, ) - response_data: Any = None + data: Any = None if utils.match_response(http_res, "200", "application/json"): return utils.unmarshal_json( http_res.text, models.RetrieveModelV1ModelsModelIDGetResponseRetrieveModelV1ModelsModelIDGet, ) if utils.match_response(http_res, "422", "application/json"): - response_data = utils.unmarshal_json( - http_res.text, models.HTTPValidationErrorData - ) - raise models.HTTPValidationError(data=response_data) + data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) + raise models.HTTPValidationError(data=data) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( @@ -425,8 +405,6 @@ def delete( if server_url is not None: base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) request = models.DeleteModelV1ModelsModelIDDeleteRequest( model_id=model_id, @@ -458,7 +436,6 @@ def delete( http_res = self.do_request( hook_ctx=HookContext( - base_url=base_url or "", operation_id="delete_model_v1_models__model_id__delete", oauth2_scopes=[], security_source=get_security_from_env( @@ -470,14 +447,12 @@ def delete( retry_config=retry_config, ) - response_data: Any = None + data: Any = None if utils.match_response(http_res, "200", "application/json"): return utils.unmarshal_json(http_res.text, models.DeleteModelOut) if utils.match_response(http_res, "422", "application/json"): - response_data = utils.unmarshal_json( - http_res.text, models.HTTPValidationErrorData - ) - raise models.HTTPValidationError(data=response_data) + data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) + raise models.HTTPValidationError(data=data) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) raise models.SDKError( @@ -524,8 +499,6 @@ async def delete_async( if server_url is not None: base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) request = models.DeleteModelV1ModelsModelIDDeleteRequest( model_id=model_id, @@ -557,7 +530,6 @@ async def delete_async( http_res = await self.do_request_async( hook_ctx=HookContext( - base_url=base_url or "", operation_id="delete_model_v1_models__model_id__delete", oauth2_scopes=[], security_source=get_security_from_env( @@ -569,14 +541,12 @@ async def delete_async( retry_config=retry_config, ) - response_data: Any = None + data: Any = None if utils.match_response(http_res, "200", "application/json"): return utils.unmarshal_json(http_res.text, models.DeleteModelOut) if utils.match_response(http_res, "422", "application/json"): - response_data = utils.unmarshal_json( - http_res.text, models.HTTPValidationErrorData - ) - raise models.HTTPValidationError(data=response_data) + data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) + raise models.HTTPValidationError(data=data) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( @@ -627,8 +597,6 @@ def update( if server_url is not None: base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) request = models.JobsAPIRoutesFineTuningUpdateFineTunedModelRequest( model_id=model_id, @@ -667,7 +635,6 @@ def update( http_res = self.do_request( hook_ctx=HookContext( - base_url=base_url or "", operation_id="jobs_api_routes_fine_tuning_update_fine_tuned_model", oauth2_scopes=[], security_source=get_security_from_env( @@ -731,8 +698,6 @@ async def update_async( if server_url is not None: base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) request = models.JobsAPIRoutesFineTuningUpdateFineTunedModelRequest( model_id=model_id, @@ -771,7 +736,6 @@ async def update_async( http_res = await self.do_request_async( hook_ctx=HookContext( - base_url=base_url or "", operation_id="jobs_api_routes_fine_tuning_update_fine_tuned_model", oauth2_scopes=[], security_source=get_security_from_env( @@ -831,8 +795,6 @@ def archive( if server_url is not None: base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) request = models.JobsAPIRoutesFineTuningArchiveFineTunedModelRequest( model_id=model_id, @@ -864,7 +826,6 @@ def archive( http_res = self.do_request( hook_ctx=HookContext( - base_url=base_url or "", operation_id="jobs_api_routes_fine_tuning_archive_fine_tuned_model", oauth2_scopes=[], security_source=get_security_from_env( @@ -924,8 +885,6 @@ async def archive_async( if server_url is not None: base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) request = models.JobsAPIRoutesFineTuningArchiveFineTunedModelRequest( model_id=model_id, @@ -957,7 +916,6 @@ async def archive_async( http_res = await self.do_request_async( hook_ctx=HookContext( - base_url=base_url or "", operation_id="jobs_api_routes_fine_tuning_archive_fine_tuned_model", oauth2_scopes=[], security_source=get_security_from_env( @@ -1017,8 +975,6 @@ def unarchive( if server_url is not None: base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) request = models.JobsAPIRoutesFineTuningUnarchiveFineTunedModelRequest( model_id=model_id, @@ -1050,7 +1006,6 @@ def unarchive( http_res = self.do_request( hook_ctx=HookContext( - base_url=base_url or "", operation_id="jobs_api_routes_fine_tuning_unarchive_fine_tuned_model", oauth2_scopes=[], security_source=get_security_from_env( @@ -1110,8 +1065,6 @@ async def unarchive_async( if server_url is not None: base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) request = models.JobsAPIRoutesFineTuningUnarchiveFineTunedModelRequest( model_id=model_id, @@ -1143,7 +1096,6 @@ async def unarchive_async( http_res = await self.do_request_async( hook_ctx=HookContext( - base_url=base_url or "", operation_id="jobs_api_routes_fine_tuning_unarchive_fine_tuned_model", oauth2_scopes=[], security_source=get_security_from_env( diff --git a/src/mistralai/ocr.py b/src/mistralai/ocr.py index 5d0e2414..56c1da51 100644 --- a/src/mistralai/ocr.py +++ b/src/mistralai/ocr.py @@ -47,8 +47,6 @@ def process( if server_url is not None: base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) request = models.OCRRequest( model=model, @@ -89,7 +87,6 @@ def process( http_res = self.do_request( hook_ctx=HookContext( - base_url=base_url or "", operation_id="ocr_v1_ocr_post", oauth2_scopes=[], security_source=get_security_from_env( @@ -101,14 +98,12 @@ def process( retry_config=retry_config, ) - response_data: Any = None + data: Any = None if utils.match_response(http_res, "200", "application/json"): return utils.unmarshal_json(http_res.text, models.OCRResponse) if utils.match_response(http_res, "422", "application/json"): - response_data = utils.unmarshal_json( - http_res.text, models.HTTPValidationErrorData - ) - raise models.HTTPValidationError(data=response_data) + data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) + raise models.HTTPValidationError(data=data) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) raise models.SDKError( @@ -165,8 +160,6 @@ async def process_async( if server_url is not None: base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) request = models.OCRRequest( model=model, @@ -207,7 +200,6 @@ async def process_async( http_res = await self.do_request_async( hook_ctx=HookContext( - base_url=base_url or "", operation_id="ocr_v1_ocr_post", oauth2_scopes=[], security_source=get_security_from_env( @@ -219,14 +211,12 @@ async def process_async( retry_config=retry_config, ) - response_data: Any = None + data: Any = None if utils.match_response(http_res, "200", "application/json"): return utils.unmarshal_json(http_res.text, models.OCRResponse) if utils.match_response(http_res, "422", "application/json"): - response_data = utils.unmarshal_json( - http_res.text, models.HTTPValidationErrorData - ) - raise models.HTTPValidationError(data=response_data) + data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) + raise models.HTTPValidationError(data=data) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( diff --git a/src/mistralai/sdk.py b/src/mistralai/sdk.py index e801eaf3..00d8370a 100644 --- a/src/mistralai/sdk.py +++ b/src/mistralai/sdk.py @@ -68,19 +68,15 @@ def __init__( :param retry_config: The retry configuration to use for all supported methods :param timeout_ms: Optional request timeout applied to each operation in milliseconds """ - client_supplied = True if client is None: client = httpx.Client() - client_supplied = False assert issubclass( type(client), HttpClient ), "The provided client must implement the HttpClient protocol." - async_client_supplied = True if async_client is None: async_client = httpx.AsyncClient() - async_client_supplied = False if debug_logger is None: debug_logger = get_default_logger() @@ -104,9 +100,7 @@ def __init__( self, SDKConfiguration( client=client, - client_supplied=client_supplied, async_client=async_client, - async_client_supplied=async_client_supplied, security=security, server_url=server_url, server=server, @@ -120,7 +114,7 @@ def __init__( current_server_url, *_ = self.sdk_configuration.get_server_details() server_url, self.sdk_configuration.client = hooks.sdk_init( - current_server_url, client + current_server_url, self.sdk_configuration.client ) if current_server_url != server_url: self.sdk_configuration.server_url = server_url @@ -133,9 +127,7 @@ def __init__( close_clients, cast(ClientOwner, self.sdk_configuration), self.sdk_configuration.client, - self.sdk_configuration.client_supplied, self.sdk_configuration.async_client, - self.sdk_configuration.async_client_supplied, ) self._init_sdks() @@ -159,17 +151,9 @@ async def __aenter__(self): return self def __exit__(self, exc_type, exc_val, exc_tb): - if ( - self.sdk_configuration.client is not None - and not self.sdk_configuration.client_supplied - ): + if self.sdk_configuration.client is not None: self.sdk_configuration.client.close() - self.sdk_configuration.client = None async def __aexit__(self, exc_type, exc_val, exc_tb): - if ( - self.sdk_configuration.async_client is not None - and not self.sdk_configuration.async_client_supplied - ): + if self.sdk_configuration.async_client is not None: await self.sdk_configuration.async_client.aclose() - self.sdk_configuration.async_client = None diff --git a/src/mistralai/sdkconfiguration.py b/src/mistralai/sdkconfiguration.py index 257ff01d..2ccbcbe1 100644 --- a/src/mistralai/sdkconfiguration.py +++ b/src/mistralai/sdkconfiguration.py @@ -26,10 +26,8 @@ @dataclass class SDKConfiguration: - client: Union[HttpClient, None] - client_supplied: bool - async_client: Union[AsyncHttpClient, None] - async_client_supplied: bool + client: HttpClient + async_client: AsyncHttpClient debug_logger: Logger security: Optional[Union[models.Security, Callable[[], models.Security]]] = None server_url: Optional[str] = "" diff --git a/src/mistralai/utils/__init__.py b/src/mistralai/utils/__init__.py index d8b21128..151c87d4 100644 --- a/src/mistralai/utils/__init__.py +++ b/src/mistralai/utils/__init__.py @@ -43,7 +43,6 @@ match_content_type, match_status_codes, match_response, - cast_partial, ) from .logger import Logger, get_body_content, get_default_logger @@ -97,5 +96,4 @@ "validate_float", "validate_int", "validate_open_enum", - "cast_partial", ] diff --git a/src/mistralai/utils/serializers.py b/src/mistralai/utils/serializers.py index baa41fbd..c5eb3659 100644 --- a/src/mistralai/utils/serializers.py +++ b/src/mistralai/utils/serializers.py @@ -7,15 +7,14 @@ from typing_extensions import get_origin from pydantic import ConfigDict, create_model from pydantic_core import from_json -from typing_inspection.typing_objects import is_union +from typing_inspect import is_optional_type from ..types.basemodel import BaseModel, Nullable, OptionalNullable, Unset def serialize_decimal(as_str: bool): def serialize(d): - # Optional[T] is a Union[T, None] - if is_union(type(d)) and type(None) in get_args(type(d)) and d is None: + if is_optional_type(type(d)) and d is None: return None if isinstance(d, Unset): return d @@ -43,8 +42,7 @@ def validate_decimal(d): def serialize_float(as_str: bool): def serialize(f): - # Optional[T] is a Union[T, None] - if is_union(type(f)) and type(None) in get_args(type(f)) and f is None: + if is_optional_type(type(f)) and f is None: return None if isinstance(f, Unset): return f @@ -72,8 +70,7 @@ def validate_float(f): def serialize_int(as_str: bool): def serialize(i): - # Optional[T] is a Union[T, None] - if is_union(type(i)) and type(None) in get_args(type(i)) and i is None: + if is_optional_type(type(i)) and i is None: return None if isinstance(i, Unset): return i @@ -121,8 +118,7 @@ def validate(e): def validate_const(v): def validate(c): - # Optional[T] is a Union[T, None] - if is_union(type(c)) and type(None) in get_args(type(c)) and c is None: + if is_optional_type(type(c)) and c is None: return None if v != c: @@ -167,7 +163,7 @@ def marshal_json(val, typ): if len(d) == 0: return "" - return json.dumps(d[next(iter(d))], separators=(",", ":")) + return json.dumps(d[next(iter(d))], separators=(",", ":"), sort_keys=True) def is_nullable(field): diff --git a/src/mistralai/utils/values.py b/src/mistralai/utils/values.py index dae01a44..2b4b6832 100644 --- a/src/mistralai/utils/values.py +++ b/src/mistralai/utils/values.py @@ -3,9 +3,8 @@ from datetime import datetime from enum import Enum from email.message import Message -from functools import partial import os -from typing import Any, Callable, Dict, List, Optional, Tuple, TypeVar, Union, cast +from typing import Any, Callable, Dict, List, Optional, Tuple, TypeVar, Union from httpx import Response from pydantic import BaseModel @@ -52,8 +51,6 @@ def match_status_codes(status_codes: List[str], status_code: int) -> bool: T = TypeVar("T") -def cast_partial(typ): - return partial(cast, typ) def get_global_from_env( value: Optional[T], env_key: str, type_cast: Callable[[str], T] From f027a7d52b7372b6769e6738c2fb8f5090acf9dd Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Wed, 19 Mar 2025 19:38:42 +0100 Subject: [PATCH 115/223] ci: regenerated with OpenAPI Doc , Speakeasy CLI 1.477.0 (#206) Co-authored-by: speakeasybot --- .speakeasy/gen.lock | 9 +++++---- .speakeasy/gen.yaml | 2 +- .speakeasy/workflow.lock | 12 ++++++------ README.md | 8 ++++---- RELEASES.md | 12 +++++++++++- USAGE.md | 8 ++++---- docs/models/documenturlchunk.md | 10 +++++----- docs/models/documenturlchunktype.md | 8 ++++++++ docs/models/embeddingrequest.md | 4 ++-- docs/models/filepurpose.md | 3 ++- docs/sdks/embeddings/README.md | 6 +++--- pyproject.toml | 2 +- src/mistralai/_version.py | 4 ++-- src/mistralai/embeddings.py | 8 ++++---- src/mistralai/models/__init__.py | 7 ++++++- src/mistralai/models/documenturlchunk.py | 22 ++++++++-------------- src/mistralai/models/embeddingrequest.py | 14 +++++++------- src/mistralai/models/filepurpose.py | 2 +- 18 files changed, 80 insertions(+), 61 deletions(-) create mode 100644 docs/models/documenturlchunktype.md diff --git a/.speakeasy/gen.lock b/.speakeasy/gen.lock index 6eb1248e..038cb129 100644 --- a/.speakeasy/gen.lock +++ b/.speakeasy/gen.lock @@ -1,12 +1,12 @@ lockVersion: 2.0.0 id: 2d045ec7-2ebb-4f4d-ad25-40953b132161 management: - docChecksum: 81cc8be96362e2f1cb145b08a2e6c4fa + docChecksum: 406e00c323dba0db26d6994620926af4 docVersion: 0.0.2 speakeasyVersion: 1.477.0 generationVersion: 2.497.0 - releaseVersion: 1.5.1 - configChecksum: ef3439d915c5d16e7cfb88fe2bf94907 + releaseVersion: 1.5.2 + configChecksum: 27786d5cae3bfccc8b434aba9bde02a3 repoURL: https://github.com/mistralai/client-python.git installationURL: https://github.com/mistralai/client-python.git published: true @@ -101,6 +101,7 @@ generatedFiles: - docs/models/detailedjoboutstatus.md - docs/models/document.md - docs/models/documenturlchunk.md + - docs/models/documenturlchunktype.md - docs/models/embeddingrequest.md - docs/models/embeddingresponse.md - docs/models/embeddingresponsedata.md @@ -650,7 +651,7 @@ examples: ocr_v1_ocr_post: speakeasy-default-ocr-v1-ocr-post: requestBody: - application/json: {"model": "Focus", "document": {"document_url": "https://dutiful-horst.org"}} + application/json: {"model": "Focus", "document": {"document_url": "https://dutiful-horst.org", "type": "document_url"}} responses: "200": application/json: {"pages": [], "model": "A4", "usage_info": {"pages_processed": 442675}} diff --git a/.speakeasy/gen.yaml b/.speakeasy/gen.yaml index f020895b..50541983 100644 --- a/.speakeasy/gen.yaml +++ b/.speakeasy/gen.yaml @@ -13,7 +13,7 @@ generation: oAuth2ClientCredentialsEnabled: true oAuth2PasswordEnabled: false python: - version: 1.5.1 + version: 1.5.2 additionalDependencies: dev: pytest: ^8.2.2 diff --git a/.speakeasy/workflow.lock b/.speakeasy/workflow.lock index 21228dc5..255a803e 100644 --- a/.speakeasy/workflow.lock +++ b/.speakeasy/workflow.lock @@ -14,11 +14,11 @@ sources: - latest mistral-openapi: sourceNamespace: mistral-openapi - sourceRevisionDigest: sha256:bdfe3bd4e867529e1821e0f195c2d5832083f7699315f4a42d6b5551bd7847a3 - sourceBlobDigest: sha256:7e8a475b75404d724fc7936bd6f585b8e5226d3dca00ab4b69807b53fb63151b + sourceRevisionDigest: sha256:8655bba5635f9f9bc3aa94690c26d6124c778e03478786074288cd63414a7a84 + sourceBlobDigest: sha256:cd9280b2b089ef5e0b06ba94ed2736b928f7e4e542d04f408df84e6912049ba6 tags: - latest - - speakeasy-sdk-regen-1741279153 + - speakeasy-sdk-regen-1742407785 targets: mistralai-azure-sdk: source: mistral-azure-source @@ -37,10 +37,10 @@ targets: mistralai-sdk: source: mistral-openapi sourceNamespace: mistral-openapi - sourceRevisionDigest: sha256:bdfe3bd4e867529e1821e0f195c2d5832083f7699315f4a42d6b5551bd7847a3 - sourceBlobDigest: sha256:7e8a475b75404d724fc7936bd6f585b8e5226d3dca00ab4b69807b53fb63151b + sourceRevisionDigest: sha256:8655bba5635f9f9bc3aa94690c26d6124c778e03478786074288cd63414a7a84 + sourceBlobDigest: sha256:cd9280b2b089ef5e0b06ba94ed2736b928f7e4e542d04f408df84e6912049ba6 codeSamplesNamespace: mistral-openapi-code-samples - codeSamplesRevisionDigest: sha256:ba10be893f3e6dae275eb8fb09a688f3652de81eebd314427f28c274800edc48 + codeSamplesRevisionDigest: sha256:d98cc101a0bdcb0666e965e71e7a472ea1e9ab6170aa7e2b4676987107704a58 workflow: workflowVersion: 1.0.0 speakeasyVersion: 1.477.0 diff --git a/README.md b/README.md index fd31bcd8..dcc26eac 100644 --- a/README.md +++ b/README.md @@ -242,10 +242,10 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.embeddings.create(inputs=[ + res = mistral.embeddings.create(model="mistral-embed", inputs=[ "Embed this sentence.", "As well as this one.", - ], model="mistral-embed") + ]) # Handle response print(res) @@ -265,10 +265,10 @@ async def main(): api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = await mistral.embeddings.create_async(inputs=[ + res = await mistral.embeddings.create_async(model="mistral-embed", inputs=[ "Embed this sentence.", "As well as this one.", - ], model="mistral-embed") + ]) # Handle response print(res) diff --git a/RELEASES.md b/RELEASES.md index d7b657bb..a08f1fbb 100644 --- a/RELEASES.md +++ b/RELEASES.md @@ -168,4 +168,14 @@ Based on: ### Generated - [python v1.5.1] . ### Releases -- [PyPI v1.5.1] https://pypi.org/project/mistralai/1.5.1 - . \ No newline at end of file +- [PyPI v1.5.1] https://pypi.org/project/mistralai/1.5.1 - . + +## 2025-03-19 18:09:29 +### Changes +Based on: +- OpenAPI Doc +- Speakeasy CLI 1.477.0 (2.497.0) https://github.com/speakeasy-api/speakeasy +### Generated +- [python v1.5.2] . +### Releases +- [PyPI v1.5.2] https://pypi.org/project/mistralai/1.5.2 - . \ No newline at end of file diff --git a/USAGE.md b/USAGE.md index 3e1cae03..2e583dd5 100644 --- a/USAGE.md +++ b/USAGE.md @@ -161,10 +161,10 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.embeddings.create(inputs=[ + res = mistral.embeddings.create(model="mistral-embed", inputs=[ "Embed this sentence.", "As well as this one.", - ], model="mistral-embed") + ]) # Handle response print(res) @@ -184,10 +184,10 @@ async def main(): api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = await mistral.embeddings.create_async(inputs=[ + res = await mistral.embeddings.create_async(model="mistral-embed", inputs=[ "Embed this sentence.", "As well as this one.", - ], model="mistral-embed") + ]) # Handle response print(res) diff --git a/docs/models/documenturlchunk.md b/docs/models/documenturlchunk.md index 33785c34..6c9a5b4d 100644 --- a/docs/models/documenturlchunk.md +++ b/docs/models/documenturlchunk.md @@ -3,8 +3,8 @@ ## Fields -| Field | Type | Required | Description | -| ----------------------------------- | ----------------------------------- | ----------------------------------- | ----------------------------------- | -| `document_url` | *str* | :heavy_check_mark: | N/A | -| `type` | *Optional[Literal["document_url"]]* | :heavy_minus_sign: | N/A | -| `document_name` | *OptionalNullable[str]* | :heavy_minus_sign: | The filename of the document | \ No newline at end of file +| Field | Type | Required | Description | +| -------------------------------------------------------------------------- | -------------------------------------------------------------------------- | -------------------------------------------------------------------------- | -------------------------------------------------------------------------- | +| `document_url` | *str* | :heavy_check_mark: | N/A | +| `document_name` | *OptionalNullable[str]* | :heavy_minus_sign: | The filename of the document | +| `type` | [Optional[models.DocumentURLChunkType]](../models/documenturlchunktype.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/documenturlchunktype.md b/docs/models/documenturlchunktype.md new file mode 100644 index 00000000..32e1fa9e --- /dev/null +++ b/docs/models/documenturlchunktype.md @@ -0,0 +1,8 @@ +# DocumentURLChunkType + + +## Values + +| Name | Value | +| -------------- | -------------- | +| `DOCUMENT_URL` | document_url | \ No newline at end of file diff --git a/docs/models/embeddingrequest.md b/docs/models/embeddingrequest.md index 07ab903a..3bdd79e8 100644 --- a/docs/models/embeddingrequest.md +++ b/docs/models/embeddingrequest.md @@ -5,5 +5,5 @@ | Field | Type | Required | Description | Example | | -------------------------------------------------- | -------------------------------------------------- | -------------------------------------------------- | -------------------------------------------------- | -------------------------------------------------- | -| `inputs` | [models.Inputs](../models/inputs.md) | :heavy_check_mark: | Text to embed. | [
"Embed this sentence.",
"As well as this one."
] | -| `model` | *Optional[str]* | :heavy_minus_sign: | ID of the model to use. | | \ No newline at end of file +| `model` | *str* | :heavy_check_mark: | ID of the model to use. | mistral-embed | +| `inputs` | [models.Inputs](../models/inputs.md) | :heavy_check_mark: | Text to embed. | [
"Embed this sentence.",
"As well as this one."
] | \ No newline at end of file diff --git a/docs/models/filepurpose.md b/docs/models/filepurpose.md index 5152aeeb..14cab13e 100644 --- a/docs/models/filepurpose.md +++ b/docs/models/filepurpose.md @@ -6,4 +6,5 @@ | Name | Value | | ----------- | ----------- | | `FINE_TUNE` | fine-tune | -| `BATCH` | batch | \ No newline at end of file +| `BATCH` | batch | +| `OCR` | ocr | \ No newline at end of file diff --git a/docs/sdks/embeddings/README.md b/docs/sdks/embeddings/README.md index 44fae4ac..89e8e030 100644 --- a/docs/sdks/embeddings/README.md +++ b/docs/sdks/embeddings/README.md @@ -23,10 +23,10 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.embeddings.create(inputs=[ + res = mistral.embeddings.create(model="mistral-embed", inputs=[ "Embed this sentence.", "As well as this one.", - ], model="mistral-embed") + ]) # Handle response print(res) @@ -37,8 +37,8 @@ with Mistral( | Parameter | Type | Required | Description | Example | | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | +| `model` | *str* | :heavy_check_mark: | ID of the model to use. | mistral-embed | | `inputs` | [models.Inputs](../../models/inputs.md) | :heavy_check_mark: | Text to embed. | [
"Embed this sentence.",
"As well as this one."
] | -| `model` | *Optional[str]* | :heavy_minus_sign: | ID of the model to use. | | | `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | | ### Response diff --git a/pyproject.toml b/pyproject.toml index 8eec1a78..f75c36f0 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "mistralai" -version = "1.5.1" +version = "1.5.2" description = "Python Client SDK for the Mistral AI API." authors = ["Mistral"] readme = "README-PYPI.md" diff --git a/src/mistralai/_version.py b/src/mistralai/_version.py index 700c880e..64883488 100644 --- a/src/mistralai/_version.py +++ b/src/mistralai/_version.py @@ -3,10 +3,10 @@ import importlib.metadata __title__: str = "mistralai" -__version__: str = "1.5.1" +__version__: str = "1.5.2" __openapi_doc_version__: str = "0.0.2" __gen_version__: str = "2.497.0" -__user_agent__: str = "speakeasy-sdk/python 1.5.1 2.497.0 0.0.2 mistralai" +__user_agent__: str = "speakeasy-sdk/python 1.5.2 2.497.0 0.0.2 mistralai" try: if __package__ is not None: diff --git a/src/mistralai/embeddings.py b/src/mistralai/embeddings.py index f6f558b8..09101c4f 100644 --- a/src/mistralai/embeddings.py +++ b/src/mistralai/embeddings.py @@ -14,8 +14,8 @@ class Embeddings(BaseSDK): def create( self, *, + model: str, inputs: Union[models.Inputs, models.InputsTypedDict], - model: Optional[str] = "mistral-embed", retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -25,8 +25,8 @@ def create( Embeddings - :param inputs: Text to embed. :param model: ID of the model to use. + :param inputs: Text to embed. :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -114,8 +114,8 @@ def create( async def create_async( self, *, + model: str, inputs: Union[models.Inputs, models.InputsTypedDict], - model: Optional[str] = "mistral-embed", retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -125,8 +125,8 @@ async def create_async( Embeddings - :param inputs: Text to embed. :param model: ID of the model to use. + :param inputs: Text to embed. :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds diff --git a/src/mistralai/models/__init__.py b/src/mistralai/models/__init__.py index 197f6e1f..0750906a 100644 --- a/src/mistralai/models/__init__.py +++ b/src/mistralai/models/__init__.py @@ -115,7 +115,11 @@ DetailedJobOutStatus, DetailedJobOutTypedDict, ) -from .documenturlchunk import DocumentURLChunk, DocumentURLChunkTypedDict +from .documenturlchunk import ( + DocumentURLChunk, + DocumentURLChunkType, + DocumentURLChunkTypedDict, +) from .embeddingrequest import ( EmbeddingRequest, EmbeddingRequestTypedDict, @@ -455,6 +459,7 @@ "Document", "DocumentTypedDict", "DocumentURLChunk", + "DocumentURLChunkType", "DocumentURLChunkTypedDict", "EmbeddingRequest", "EmbeddingRequestTypedDict", diff --git a/src/mistralai/models/documenturlchunk.py b/src/mistralai/models/documenturlchunk.py index 23622335..29945102 100644 --- a/src/mistralai/models/documenturlchunk.py +++ b/src/mistralai/models/documenturlchunk.py @@ -2,38 +2,32 @@ from __future__ import annotations from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from mistralai.utils import validate_const -import pydantic from pydantic import model_serializer -from pydantic.functional_validators import AfterValidator from typing import Literal, Optional -from typing_extensions import Annotated, NotRequired, TypedDict +from typing_extensions import NotRequired, TypedDict + + +DocumentURLChunkType = Literal["document_url"] class DocumentURLChunkTypedDict(TypedDict): document_url: str - type: Literal["document_url"] document_name: NotRequired[Nullable[str]] r"""The filename of the document""" + type: NotRequired[DocumentURLChunkType] class DocumentURLChunk(BaseModel): document_url: str - TYPE: Annotated[ - Annotated[ - Optional[Literal["document_url"]], - AfterValidator(validate_const("document_url")), - ], - pydantic.Field(alias="type"), - ] = "document_url" - document_name: OptionalNullable[str] = UNSET r"""The filename of the document""" + type: Optional[DocumentURLChunkType] = "document_url" + @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = ["type", "document_name"] + optional_fields = ["document_name", "type"] nullable_fields = ["document_name"] null_default_fields = [] diff --git a/src/mistralai/models/embeddingrequest.py b/src/mistralai/models/embeddingrequest.py index b5ccd26e..5c37fd48 100644 --- a/src/mistralai/models/embeddingrequest.py +++ b/src/mistralai/models/embeddingrequest.py @@ -3,8 +3,8 @@ from __future__ import annotations from mistralai.types import BaseModel import pydantic -from typing import List, Optional, Union -from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict +from typing import List, Union +from typing_extensions import Annotated, TypeAliasType, TypedDict InputsTypedDict = TypeAliasType("InputsTypedDict", Union[str, List[str]]) @@ -16,15 +16,15 @@ class EmbeddingRequestTypedDict(TypedDict): + model: str + r"""ID of the model to use.""" inputs: InputsTypedDict r"""Text to embed.""" - model: NotRequired[str] - r"""ID of the model to use.""" class EmbeddingRequest(BaseModel): + model: str + r"""ID of the model to use.""" + inputs: Annotated[Inputs, pydantic.Field(alias="input")] r"""Text to embed.""" - - model: Optional[str] = "mistral-embed" - r"""ID of the model to use.""" diff --git a/src/mistralai/models/filepurpose.py b/src/mistralai/models/filepurpose.py index 8628b308..8599192b 100644 --- a/src/mistralai/models/filepurpose.py +++ b/src/mistralai/models/filepurpose.py @@ -5,4 +5,4 @@ from typing import Literal, Union -FilePurpose = Union[Literal["fine-tune", "batch"], UnrecognizedStr] +FilePurpose = Union[Literal["fine-tune", "batch", "ocr"], UnrecognizedStr] From b93ff4e51ef4bca6d74d483b37f4abed6cd7588d Mon Sep 17 00:00:00 2001 From: Gaspard Blanchet <26529448+GaspardBT@users.noreply.github.com> Date: Thu, 20 Mar 2025 11:32:54 +0100 Subject: [PATCH 116/223] update (#208) --- .github/workflows/run_example_scripts.yaml | 4 +- .speakeasy/workflow.yaml | 2 +- pyproject.toml | 43 ++++++++++++---------- 3 files changed, 25 insertions(+), 24 deletions(-) diff --git a/.github/workflows/run_example_scripts.yaml b/.github/workflows/run_example_scripts.yaml index db0a30aa..e55ca08d 100644 --- a/.github/workflows/run_example_scripts.yaml +++ b/.github/workflows/run_example_scripts.yaml @@ -14,7 +14,7 @@ jobs: strategy: fail-fast: false matrix: - python-version: ['3.8', '3.9', '3.10', '3.11', '3.12', '3.13'] + python-version: ['3.9', '3.10', '3.11', '3.12', '3.13'] steps: - name: Checkout code @@ -27,8 +27,6 @@ jobs: - name: Install Poetry uses: snok/install-poetry@76e04a911780d5b312d89783f7b1cd627778900a # v1.4.1 - with: - version: ${{ matrix.python-version == '3.8' && '1.8.5' || '2.1.1' }} - name: Build and install client run: | diff --git a/.speakeasy/workflow.yaml b/.speakeasy/workflow.yaml index 00aefc99..d448108d 100644 --- a/.speakeasy/workflow.yaml +++ b/.speakeasy/workflow.yaml @@ -1,5 +1,5 @@ workflowVersion: 1.0.0 -speakeasyVersion: 1.477.0 +speakeasyVersion: 1.517.3 sources: mistral-azure-source: inputs: diff --git a/pyproject.toml b/pyproject.toml index f75c36f0..785997ab 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,9 +1,19 @@ -[tool.poetry] +[project] name = "mistralai" version = "1.5.2" description = "Python Client SDK for the Mistral AI API." -authors = ["Mistral"] +authors = [{ name = "Mistral" },] readme = "README-PYPI.md" +requires-python = ">=3.9" +dependencies = [ + "eval-type-backport >=0.2.0", + "httpx >=0.28.1", + "pydantic >=2.10.3", + "python-dateutil >=2.8.2", + "typing-inspection >=0.4.0", +] + +[tool.poetry] repository = "https://github.com/mistralai/client-python.git" packages = [ { include = "mistralai", from = "src" }, @@ -18,32 +28,25 @@ include = ["py.typed", "src/mistralai/py.typed"] [virtualenvs] in-project = true -[tool.poetry.dependencies] -python = ">=3.8" -eval-type-backport = ">=0.2.0" -httpx = ">=0.27.0" -jsonpath-python = ">=1.0.6" -pydantic = ">=2.9.0" -python-dateutil = ">=2.8.2" -typing-inspect = ">=0.9.0" -google-auth = { version = ">=2.27.0", optional = true } -requests = { version = ">=2.32.3", optional = true } - [tool.poetry.group.dev.dependencies] -mypy = ">=1.13.0" -pylint = ">=3.2.3" -pytest = ">=8.2.2" -pytest-asyncio = ">=0.23.7" -types-python-dateutil = ">=2.9.0.20240316" +mypy = "==1.14.1" +pylint = "==3.2.3" +pytest = "^8.2.2" +pytest-asyncio = "^0.23.7" +types-python-dateutil = "^2.9.0.20240316" -[tool.poetry.extras] -gcp = ["google-auth", "requests"] +[project.optional-dependencies] +gcp = [ + "google-auth >=2.27.0", + "requests >=2.32.3" +] [build-system] requires = ["poetry-core"] build-backend = "poetry.core.masonry.api" [tool.pytest.ini_options] +asyncio_default_fixture_loop_scope = "function" pythonpath = ["src"] [tool.mypy] From fde481b662f75135339f1bf21b2be81a205fe079 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Fri, 21 Mar 2025 10:32:43 +0100 Subject: [PATCH 117/223] =?UTF-8?q?chore:=20=F0=9F=90=9D=20Update=20SDK=20?= =?UTF-8?q?-=20Generate=20MISTRALAI=20MISTRALAI-SDK=201.6.0=20(#209)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * ci: regenerated with OpenAPI Doc , Speakeasy CLI 1.517.3 * update azure * update gcp --------- Co-authored-by: speakeasybot Co-authored-by: gaspardBT --- .speakeasy/gen.lock | 51 ++- .speakeasy/gen.yaml | 5 +- .speakeasy/workflow.lock | 44 +- README.md | 67 ++- RELEASES.md | 12 +- USAGE.md | 16 +- docs/models/agentscompletionrequest.md | 3 +- docs/models/agentscompletionstreamrequest.md | 3 +- docs/models/chatcompletionrequest.md | 1 + docs/models/chatcompletionstreamrequest.md | 1 + docs/sdks/agents/README.md | 8 +- docs/sdks/chat/README.md | 8 +- docs/sdks/classifiers/README.md | 39 +- docs/sdks/embeddings/README.md | 1 + docs/sdks/files/README.md | 10 +- docs/sdks/fim/README.md | 6 +- docs/sdks/jobs/README.md | 11 +- docs/sdks/mistraljobs/README.md | 8 +- docs/sdks/models/README.md | 6 + docs/sdks/ocr/README.md | 1 + packages/mistralai_azure/.speakeasy/gen.lock | 41 +- packages/mistralai_azure/.speakeasy/gen.yaml | 8 +- .../docs/models/assistantmessage.md | 12 +- .../docs/models/chatcompletionrequest.md | 4 +- .../models/chatcompletionstreamrequest.md | 4 +- .../docs/models/contentchunk.md | 6 + .../mistralai_azure/docs/models/function.md | 3 +- .../mistralai_azure/docs/models/imageurl.md | 9 + .../docs/models/imageurlchunk.md | 11 + .../docs/models/imageurlchunkimageurl.md | 17 + .../docs/models/imageurlchunktype.md | 8 + .../mistralai_azure/docs/models/jsonschema.md | 11 + .../mistralai_azure/docs/models/prediction.md | 9 + .../docs/models/responseformat.md | 3 +- .../docs/models/responseformats.md | 3 +- .../mistralai_azure/docs/models/toolcall.md | 3 +- packages/mistralai_azure/poetry.lock | 369 +++++++++------- packages/mistralai_azure/pylintrc | 7 +- packages/mistralai_azure/pyproject.toml | 28 +- .../{prepare-readme.py => prepare_readme.py} | 0 packages/mistralai_azure/scripts/publish.sh | 2 +- .../src/mistralai_azure/__init__.py | 11 +- .../src/mistralai_azure/_hooks/types.py | 18 +- .../src/mistralai_azure/_version.py | 5 +- .../src/mistralai_azure/basesdk.py | 8 + .../src/mistralai_azure/chat.py | 120 +++++- .../src/mistralai_azure/httpclient.py | 52 +++ .../src/mistralai_azure/models/__init__.py | 22 + .../models/assistantmessage.py | 2 + .../models/chatcompletionrequest.py | 22 +- .../models/chatcompletionstreamrequest.py | 22 +- .../mistralai_azure/models/contentchunk.py | 8 +- .../src/mistralai_azure/models/function.py | 5 +- .../src/mistralai_azure/models/imageurl.py | 53 +++ .../mistralai_azure/models/imageurlchunk.py | 33 ++ .../src/mistralai_azure/models/jsonschema.py | 61 +++ .../src/mistralai_azure/models/prediction.py | 25 ++ .../mistralai_azure/models/responseformat.py | 43 +- .../mistralai_azure/models/responseformats.py | 2 +- .../src/mistralai_azure/models/toolcall.py | 3 + .../src/mistralai_azure/sdk.py | 70 ++- .../src/mistralai_azure/sdkconfiguration.py | 20 +- .../src/mistralai_azure/utils/__init__.py | 2 + .../src/mistralai_azure/utils/serializers.py | 16 +- .../src/mistralai_azure/utils/values.py | 5 +- packages/mistralai_gcp/.speakeasy/gen.lock | 48 ++- packages/mistralai_gcp/.speakeasy/gen.yaml | 8 +- .../docs/models/assistantmessage.md | 12 +- .../docs/models/chatcompletionrequest.md | 6 +- .../models/chatcompletionstreamrequest.md | 6 +- .../mistralai_gcp/docs/models/contentchunk.md | 6 + .../docs/models/fimcompletionrequest.md | 2 +- .../docs/models/fimcompletionstreamrequest.md | 2 +- .../mistralai_gcp/docs/models/function.md | 3 +- .../mistralai_gcp/docs/models/imageurl.md | 9 + .../docs/models/imageurlchunk.md | 11 + .../docs/models/imageurlchunkimageurl.md | 17 + .../docs/models/imageurlchunktype.md | 8 + .../mistralai_gcp/docs/models/jsonschema.md | 11 + .../mistralai_gcp/docs/models/prediction.md | 9 + .../docs/models/responseformat.md | 3 +- .../docs/models/responseformats.md | 3 +- .../mistralai_gcp/docs/models/toolcall.md | 3 +- packages/mistralai_gcp/poetry.lock | 404 ++++++++++-------- packages/mistralai_gcp/pylintrc | 7 +- packages/mistralai_gcp/pyproject.toml | 32 +- .../{prepare-readme.py => prepare_readme.py} | 0 packages/mistralai_gcp/scripts/publish.sh | 2 +- .../src/mistralai_gcp/__init__.py | 11 +- .../src/mistralai_gcp/_hooks/types.py | 18 +- .../src/mistralai_gcp/_version.py | 5 +- .../src/mistralai_gcp/basesdk.py | 8 + .../mistralai_gcp/src/mistralai_gcp/chat.py | 122 +++++- .../mistralai_gcp/src/mistralai_gcp/fim.py | 82 +++- .../src/mistralai_gcp/httpclient.py | 52 +++ .../src/mistralai_gcp/models/__init__.py | 22 + .../mistralai_gcp/models/assistantmessage.py | 2 + .../models/chatcompletionrequest.py | 22 +- .../models/chatcompletionstreamrequest.py | 22 +- .../src/mistralai_gcp/models/contentchunk.py | 8 +- .../models/fimcompletionrequest.py | 5 +- .../models/fimcompletionstreamrequest.py | 5 +- .../src/mistralai_gcp/models/function.py | 5 +- .../src/mistralai_gcp/models/imageurl.py | 53 +++ .../src/mistralai_gcp/models/imageurlchunk.py | 33 ++ .../src/mistralai_gcp/models/jsonschema.py | 61 +++ .../src/mistralai_gcp/models/prediction.py | 25 ++ .../mistralai_gcp/models/responseformat.py | 43 +- .../mistralai_gcp/models/responseformats.py | 2 +- .../src/mistralai_gcp/models/toolcall.py | 3 + .../mistralai_gcp/src/mistralai_gcp/sdk.py | 82 +++- .../src/mistralai_gcp/sdkconfiguration.py | 20 +- .../src/mistralai_gcp/utils/__init__.py | 2 + .../src/mistralai_gcp/utils/serializers.py | 16 +- .../src/mistralai_gcp/utils/values.py | 5 +- poetry.lock | 329 +++++++------- pylintrc | 5 +- pyproject.toml | 2 +- .../{prepare-readme.py => prepare_readme.py} | 8 +- scripts/publish.sh | 2 +- src/mistralai/_hooks/types.py | 18 +- src/mistralai/_version.py | 6 +- src/mistralai/agents.py | 56 ++- src/mistralai/basesdk.py | 8 + src/mistralai/chat.py | 56 ++- src/mistralai/classifiers.py | 52 ++- src/mistralai/embeddings.py | 22 +- src/mistralai/files.py | 36 ++ src/mistralai/fim.py | 44 +- src/mistralai/httpclient.py | 6 +- src/mistralai/jobs.py | 30 ++ src/mistralai/mistral_jobs.py | 24 ++ .../models/agentscompletionrequest.py | 4 + .../models/agentscompletionstreamrequest.py | 4 + src/mistralai/models/chatcompletionrequest.py | 4 + .../models/chatcompletionstreamrequest.py | 4 + src/mistralai/models/function.py | 4 +- src/mistralai/models/jsonschema.py | 2 +- src/mistralai/models_.py | 84 +++- src/mistralai/ocr.py | 22 +- src/mistralai/sdk.py | 22 +- src/mistralai/sdkconfiguration.py | 6 +- src/mistralai/utils/__init__.py | 2 + src/mistralai/utils/serializers.py | 16 +- src/mistralai/utils/values.py | 5 +- 145 files changed, 2733 insertions(+), 983 deletions(-) create mode 100644 packages/mistralai_azure/docs/models/imageurl.md create mode 100644 packages/mistralai_azure/docs/models/imageurlchunk.md create mode 100644 packages/mistralai_azure/docs/models/imageurlchunkimageurl.md create mode 100644 packages/mistralai_azure/docs/models/imageurlchunktype.md create mode 100644 packages/mistralai_azure/docs/models/jsonschema.md create mode 100644 packages/mistralai_azure/docs/models/prediction.md rename packages/mistralai_azure/scripts/{prepare-readme.py => prepare_readme.py} (100%) create mode 100644 packages/mistralai_azure/src/mistralai_azure/models/imageurl.py create mode 100644 packages/mistralai_azure/src/mistralai_azure/models/imageurlchunk.py create mode 100644 packages/mistralai_azure/src/mistralai_azure/models/jsonschema.py create mode 100644 packages/mistralai_azure/src/mistralai_azure/models/prediction.py create mode 100644 packages/mistralai_gcp/docs/models/imageurl.md create mode 100644 packages/mistralai_gcp/docs/models/imageurlchunk.md create mode 100644 packages/mistralai_gcp/docs/models/imageurlchunkimageurl.md create mode 100644 packages/mistralai_gcp/docs/models/imageurlchunktype.md create mode 100644 packages/mistralai_gcp/docs/models/jsonschema.md create mode 100644 packages/mistralai_gcp/docs/models/prediction.md rename packages/mistralai_gcp/scripts/{prepare-readme.py => prepare_readme.py} (100%) create mode 100644 packages/mistralai_gcp/src/mistralai_gcp/models/imageurl.py create mode 100644 packages/mistralai_gcp/src/mistralai_gcp/models/imageurlchunk.py create mode 100644 packages/mistralai_gcp/src/mistralai_gcp/models/jsonschema.py create mode 100644 packages/mistralai_gcp/src/mistralai_gcp/models/prediction.py rename scripts/{prepare-readme.py => prepare_readme.py} (84%) diff --git a/.speakeasy/gen.lock b/.speakeasy/gen.lock index 038cb129..fc748eb0 100644 --- a/.speakeasy/gen.lock +++ b/.speakeasy/gen.lock @@ -1,12 +1,12 @@ lockVersion: 2.0.0 id: 2d045ec7-2ebb-4f4d-ad25-40953b132161 management: - docChecksum: 406e00c323dba0db26d6994620926af4 + docChecksum: 292a97e4dd465554d420c92d78d70c5f docVersion: 0.0.2 - speakeasyVersion: 1.477.0 - generationVersion: 2.497.0 - releaseVersion: 1.5.2 - configChecksum: 27786d5cae3bfccc8b434aba9bde02a3 + speakeasyVersion: 1.517.3 + generationVersion: 2.548.6 + releaseVersion: 1.6.0 + configChecksum: 1cfb4e3f53a140213b7b400e79811fe5 repoURL: https://github.com/mistralai/client-python.git installationURL: https://github.com/mistralai/client-python.git published: true @@ -14,7 +14,7 @@ features: python: additionalDependencies: 1.0.0 constsAndDefaults: 1.0.5 - core: 5.10.5 + core: 5.12.3 customCodeRegions: 0.1.1 defaultEnabledRetries: 0.2.0 downloadStreams: 1.0.1 @@ -22,19 +22,19 @@ features: envVarSecurityUsage: 0.3.2 examples: 3.0.1 flatRequests: 1.0.1 - flattening: 3.1.0 - globalSecurity: 3.0.2 + flattening: 3.1.1 + globalSecurity: 3.0.3 globalSecurityCallbacks: 1.0.0 globalSecurityFlattening: 1.0.0 globalServerURLs: 3.1.0 methodArguments: 1.0.2 multipartFileContentType: 1.0.0 nameOverrides: 3.0.1 - nullables: 1.0.0 + nullables: 1.0.1 openEnums: 1.0.0 responseFormat: 1.0.1 retries: 3.0.2 - sdkHooks: 1.0.0 + sdkHooks: 1.0.1 serverEvents: 1.0.7 serverEventsSentinels: 0.1.0 serverIDs: 3.0.0 @@ -239,7 +239,7 @@ generatedFiles: - poetry.toml - py.typed - pylintrc - - scripts/prepare-readme.py + - scripts/prepare_readme.py - scripts/publish.sh - src/mistralai/__init__.py - src/mistralai/_hooks/__init__.py @@ -449,7 +449,7 @@ examples: files_api_routes_upload_file: speakeasy-default-files-api-routes-upload-file: requestBody: - multipart/form-data: {"file": {"": "x-file: example.file"}} + multipart/form-data: {"file": {}} responses: "200": application/json: {"id": "497f6eca-6276-4993-bfeb-53cbbbba6f09", "object": "file", "bytes": 13000, "created_at": 1716963433, "filename": "files_upload.jsonl", "purpose": "fine-tune", "sample_type": "batch_request", "source": "repository"} @@ -505,14 +505,14 @@ examples: created_by_me: false responses: "200": - application/json: {"total": 768578} + application/json: {"object": "list", "total": 768578} jobs_api_routes_fine_tuning_create_fine_tuning_job: speakeasy-default-jobs-api-routes-fine-tuning-create-fine-tuning-job: requestBody: application/json: {"model": "Fiesta", "hyperparameters": {"learning_rate": 0.0001}} responses: "200": - application/json: {"expected_duration_seconds": 220, "cost": 10, "cost_currency": "EUR", "train_tokens_per_step": 131072, "train_tokens": 1310720, "data_tokens": 305375, "deprecated": true, "details": "", "epochs": 4.2922, "training_steps": 10} + application/json: {"expected_duration_seconds": 220, "cost": 10, "cost_currency": "EUR", "train_tokens_per_step": 131072, "train_tokens": 1310720, "data_tokens": 305375, "deprecated": true, "details": "", "epochs": 4.2922, "training_steps": 10, "object": "job.metadata"} jobs_api_routes_fine_tuning_get_fine_tuning_job: speakeasy-default-jobs-api-routes-fine-tuning-get-fine-tuning-job: parameters: @@ -520,7 +520,7 @@ examples: job_id: "b888f774-3e7c-4135-a18c-6b985523c4bc" responses: "200": - application/json: {"id": "e50f7622-81da-484b-9c66-1c8a99c6b71b", "auto_start": false, "hyperparameters": {"learning_rate": 0.0001}, "model": "Model Y", "status": "CANCELLED", "job_type": "", "created_at": 415305, "modified_at": 149108, "training_files": ["8f7112aa-f0ab-44e4-83b4-cca3716f6208", "7aa1f8cf-05d8-49d5-88ee-381f6b4b885c"], "checkpoints": [{"metrics": {}, "step_number": 856562, "created_at": 1716963433}, {"metrics": {}, "step_number": 328633, "created_at": 1716963433}]} + application/json: {"id": "e50f7622-81da-484b-9c66-1c8a99c6b71b", "auto_start": false, "hyperparameters": {"learning_rate": 0.0001}, "model": "Model Y", "status": "CANCELLED", "job_type": "", "created_at": 415305, "modified_at": 149108, "training_files": ["8f7112aa-f0ab-44e4-83b4-cca3716f6208", "7aa1f8cf-05d8-49d5-88ee-381f6b4b885c"], "object": "job", "checkpoints": [{"metrics": {}, "step_number": 856562, "created_at": 1716963433}, {"metrics": {}, "step_number": 328633, "created_at": 1716963433}]} jobs_api_routes_fine_tuning_cancel_fine_tuning_job: speakeasy-default-jobs-api-routes-fine-tuning-cancel-fine-tuning-job: parameters: @@ -528,7 +528,7 @@ examples: job_id: "0f713502-9233-41c6-9ebd-c570b7edb496" responses: "200": - application/json: {"id": "d50fbe4e-3e32-4613-8574-4d82f3fd6b3c", "auto_start": true, "hyperparameters": {"learning_rate": 0.0001}, "model": "Taurus", "status": "SUCCESS", "job_type": "", "created_at": 251316, "modified_at": 342605, "training_files": ["247ac10c-dc31-412f-a7cc-924123024afa", "0f84bd49-4511-4689-9d07-a64aa724280b", "200aa489-3801-4d6e-a454-eb14cac448cb"], "checkpoints": [{"metrics": {}, "step_number": 949854, "created_at": 1716963433}, {"metrics": {}, "step_number": 516599, "created_at": 1716963433}, {"metrics": {}, "step_number": 439590, "created_at": 1716963433}]} + application/json: {"id": "d50fbe4e-3e32-4613-8574-4d82f3fd6b3c", "auto_start": true, "hyperparameters": {"learning_rate": 0.0001}, "model": "Taurus", "status": "SUCCESS", "job_type": "", "created_at": 251316, "modified_at": 342605, "training_files": ["247ac10c-dc31-412f-a7cc-924123024afa", "0f84bd49-4511-4689-9d07-a64aa724280b", "200aa489-3801-4d6e-a454-eb14cac448cb"], "object": "job", "checkpoints": [{"metrics": {}, "step_number": 949854, "created_at": 1716963433}, {"metrics": {}, "step_number": 516599, "created_at": 1716963433}, {"metrics": {}, "step_number": 439590, "created_at": 1716963433}]} jobs_api_routes_fine_tuning_start_fine_tuning_job: speakeasy-default-jobs-api-routes-fine-tuning-start-fine-tuning-job: parameters: @@ -536,7 +536,7 @@ examples: job_id: "0bf0f9e6-c3e5-4d61-aac8-0e36dcac0dfc" responses: "200": - application/json: {"id": "b676fe58-2c47-483e-831e-c71dbed4c90a", "auto_start": false, "hyperparameters": {"learning_rate": 0.0001}, "model": "A4", "status": "CANCELLED", "job_type": "", "created_at": 874397, "modified_at": 483387, "training_files": [], "checkpoints": [{"metrics": {}, "step_number": 331375, "created_at": 1716963433}, {"metrics": {}, "step_number": 590686, "created_at": 1716963433}, {"metrics": {}, "step_number": 543177, "created_at": 1716963433}]} + application/json: {"id": "b676fe58-2c47-483e-831e-c71dbed4c90a", "auto_start": false, "hyperparameters": {"learning_rate": 0.0001}, "model": "A4", "status": "CANCELLED", "job_type": "", "created_at": 874397, "modified_at": 483387, "training_files": [], "object": "job", "checkpoints": [{"metrics": {}, "step_number": 331375, "created_at": 1716963433}, {"metrics": {}, "step_number": 590686, "created_at": 1716963433}, {"metrics": {}, "step_number": 543177, "created_at": 1716963433}]} jobs_api_routes_batch_get_batch_jobs: speakeasy-default-jobs-api-routes-batch-get-batch-jobs: parameters: @@ -546,14 +546,14 @@ examples: created_by_me: false responses: "200": - application/json: {"total": 768578} + application/json: {"object": "list", "total": 768578} jobs_api_routes_batch_create_batch_job: speakeasy-default-jobs-api-routes-batch-create-batch-job: requestBody: application/json: {"input_files": ["a621cf02-1cd9-4cf5-8403-315211a509a3"], "endpoint": "/v1/fim/completions", "model": "2", "timeout_hours": 24} responses: "200": - application/json: {"id": "", "input_files": ["8e774c2b-ecc3-4769-b177-5e024985613d", "0ee803d5-6a1d-4f94-836b-fd39494798bc"], "endpoint": "", "model": "Impala", "errors": [{"message": "", "count": 1}, {"message": "", "count": 1}, {"message": "", "count": 1}], "status": "RUNNING", "created_at": 770370, "total_requests": 350586, "completed_requests": 95214, "succeeded_requests": 930830, "failed_requests": 617761} + application/json: {"id": "", "object": "batch", "input_files": ["8e774c2b-ecc3-4769-b177-5e024985613d", "0ee803d5-6a1d-4f94-836b-fd39494798bc"], "endpoint": "", "model": "Impala", "errors": [{"message": "", "count": 1}, {"message": "", "count": 1}, {"message": "", "count": 1}], "status": "RUNNING", "created_at": 770370, "total_requests": 350586, "completed_requests": 95214, "succeeded_requests": 930830, "failed_requests": 617761} jobs_api_routes_batch_get_batch_job: speakeasy-default-jobs-api-routes-batch-get-batch-job: parameters: @@ -561,7 +561,7 @@ examples: job_id: "b888f774-3e7c-4135-a18c-6b985523c4bc" responses: "200": - application/json: {"id": "", "input_files": ["50f76228-1da8-44bc-b661-c8a99c6b71b6", "cd62b8f7-112a-4af0-bab4-e43b4cca3716", "620807aa-1f8c-4f05-ad89-d58ee381f6b4"], "endpoint": "", "model": "Golf", "errors": [{"message": "", "count": 1}, {"message": "", "count": 1}], "status": "SUCCESS", "created_at": 790898, "total_requests": 55097, "completed_requests": 578320, "succeeded_requests": 856562, "failed_requests": 328633} + application/json: {"id": "", "object": "batch", "input_files": ["50f76228-1da8-44bc-b661-c8a99c6b71b6", "cd62b8f7-112a-4af0-bab4-e43b4cca3716", "620807aa-1f8c-4f05-ad89-d58ee381f6b4"], "endpoint": "", "model": "Golf", "errors": [{"message": "", "count": 1}, {"message": "", "count": 1}], "status": "SUCCESS", "created_at": 790898, "total_requests": 55097, "completed_requests": 578320, "succeeded_requests": 856562, "failed_requests": 328633} jobs_api_routes_batch_cancel_batch_job: speakeasy-default-jobs-api-routes-batch-cancel-batch-job: parameters: @@ -569,7 +569,7 @@ examples: job_id: "0f713502-9233-41c6-9ebd-c570b7edb496" responses: "200": - application/json: {"id": "", "input_files": ["50fbe4e3-e326-4135-8744-d82f3fd6b3c1", "eb45e247-ac10-4cdc-8311-2f7cc9241230", "4afaa0f8-4bd4-4945-9116-89d07a64aa72"], "endpoint": "", "model": "Alpine", "errors": [{"message": "", "count": 1}, {"message": "", "count": 1}], "status": "QUEUED", "created_at": 709109, "total_requests": 275794, "completed_requests": 158938, "succeeded_requests": 12381, "failed_requests": 11864} + application/json: {"id": "", "object": "batch", "input_files": ["50fbe4e3-e326-4135-8744-d82f3fd6b3c1", "eb45e247-ac10-4cdc-8311-2f7cc9241230", "4afaa0f8-4bd4-4945-9116-89d07a64aa72"], "endpoint": "", "model": "Alpine", "errors": [{"message": "", "count": 1}, {"message": "", "count": 1}], "status": "QUEUED", "created_at": 709109, "total_requests": 275794, "completed_requests": 158938, "succeeded_requests": 12381, "failed_requests": 11864} chat_completion_v1_chat_completions_post: speakeasy-default-chat-completion-v1-chat-completions-post: requestBody: @@ -657,5 +657,14 @@ examples: application/json: {"pages": [], "model": "A4", "usage_info": {"pages_processed": 442675}} "422": application/json: {} + chat_moderations_v1_chat_moderations_post: + speakeasy-default-chat-moderations-v1-chat-moderations-post: + requestBody: + application/json: {"model": "Model Y", "input": [[{"content": [], "role": "system"}, {"content": "", "role": "tool"}], [{"prefix": false, "role": "assistant"}, {"content": "", "role": "user"}, {"prefix": false, "role": "assistant"}], [{"content": "", "role": "system"}, {"content": [{"image_url": "https://fatherly-colon.name", "type": "image_url"}], "role": "user"}, {"content": "", "role": "user"}]], "truncate_for_context_length": false} + responses: + "200": + application/json: {"id": "mod-e5cc70bb28c444948073e77776eb30ef"} + "422": + application/json: {} examplesVersion: 1.0.0 generatedTests: {} diff --git a/.speakeasy/gen.yaml b/.speakeasy/gen.yaml index 50541983..3bc90dff 100644 --- a/.speakeasy/gen.yaml +++ b/.speakeasy/gen.yaml @@ -7,13 +7,15 @@ generation: useClassNamesForArrayFields: true fixes: nameResolutionDec2023: true + nameResolutionFeb2025: false parameterOrderingFeb2024: true requestResponseComponentNamesFeb2024: true + securityFeb2025: false auth: oAuth2ClientCredentialsEnabled: true oAuth2PasswordEnabled: false python: - version: 1.5.2 + version: 1.6.0 additionalDependencies: dev: pytest: ^8.2.2 @@ -44,5 +46,6 @@ python: methodArguments: infer-optional-args outputModelSuffix: output packageName: mistralai + pytestTimeout: 0 responseFormat: flat templateVersion: v2 diff --git a/.speakeasy/workflow.lock b/.speakeasy/workflow.lock index 255a803e..b4c6af57 100644 --- a/.speakeasy/workflow.lock +++ b/.speakeasy/workflow.lock @@ -1,49 +1,49 @@ -speakeasyVersion: 1.477.0 +speakeasyVersion: 1.517.3 sources: mistral-azure-source: - sourceNamespace: mistral-azure-source - sourceRevisionDigest: sha256:ff181b1e0e3894a4925f7ae87415323058538a13bae9d9d508a8fe3b6ec0e333 - sourceBlobDigest: sha256:a2b4fd69298ebb9adb0d3c8dfb452db52defac512a7532203eebffb6a252df76 + sourceNamespace: mistral-openapi-azure + sourceRevisionDigest: sha256:c5931a7e0cc2db844149d71db57dfc2178665f0400bc26c90ee113795ea2872f + sourceBlobDigest: sha256:504fff788fdac8d781e33d85e3a04d35f6d9f7a3ef5ed40da8b4567074e94f03 tags: - latest mistral-google-cloud-source: - sourceNamespace: mistral-google-cloud-source - sourceRevisionDigest: sha256:36c7de11e35023dc8fa5f3c0fb0e486d2a102275a2df808c08cfe9d43089be04 - sourceBlobDigest: sha256:dd65bfa5d0448ad1851ebb18b57aa675533cd3e166beb86a390b0ab51d16a1c1 + sourceNamespace: mistral-openapi-google-cloud + sourceRevisionDigest: sha256:4a5343e63c6a78152e472b00ccc46d7bcb15594496bc94c8040039d3a9d4c5f8 + sourceBlobDigest: sha256:3327f078a11596abdcbc21cd8a1adcf0b2aa474975cd9ab1feb745a2e50d555f tags: - latest mistral-openapi: sourceNamespace: mistral-openapi - sourceRevisionDigest: sha256:8655bba5635f9f9bc3aa94690c26d6124c778e03478786074288cd63414a7a84 - sourceBlobDigest: sha256:cd9280b2b089ef5e0b06ba94ed2736b928f7e4e542d04f408df84e6912049ba6 + sourceRevisionDigest: sha256:15e39124d61c30c69260e298a909e60996ac6e8623c202d1745b88fc3e67cb2f + sourceBlobDigest: sha256:d16b98efd9214ceb1c89beedc40e67dd09349d5122076f6e16d1a552ee5b3e63 tags: - latest - - speakeasy-sdk-regen-1742407785 + - speakeasy-sdk-regen-1742466858 targets: mistralai-azure-sdk: source: mistral-azure-source - sourceNamespace: mistral-azure-source - sourceRevisionDigest: sha256:ff181b1e0e3894a4925f7ae87415323058538a13bae9d9d508a8fe3b6ec0e333 - sourceBlobDigest: sha256:a2b4fd69298ebb9adb0d3c8dfb452db52defac512a7532203eebffb6a252df76 + sourceNamespace: mistral-openapi-azure + sourceRevisionDigest: sha256:c5931a7e0cc2db844149d71db57dfc2178665f0400bc26c90ee113795ea2872f + sourceBlobDigest: sha256:504fff788fdac8d781e33d85e3a04d35f6d9f7a3ef5ed40da8b4567074e94f03 codeSamplesNamespace: mistral-openapi-azure-code-samples - codeSamplesRevisionDigest: sha256:28356dba7ea28436035e20182b8ce4d1951e19503b5accef6a128d860361e5c0 + codeSamplesRevisionDigest: sha256:e242a7fc42e44d2bbc8e5637d4a6455da7fb3d0307dc275ee4c64867f5c4be55 mistralai-gcp-sdk: source: mistral-google-cloud-source - sourceNamespace: mistral-google-cloud-source - sourceRevisionDigest: sha256:36c7de11e35023dc8fa5f3c0fb0e486d2a102275a2df808c08cfe9d43089be04 - sourceBlobDigest: sha256:dd65bfa5d0448ad1851ebb18b57aa675533cd3e166beb86a390b0ab51d16a1c1 + sourceNamespace: mistral-openapi-google-cloud + sourceRevisionDigest: sha256:4a5343e63c6a78152e472b00ccc46d7bcb15594496bc94c8040039d3a9d4c5f8 + sourceBlobDigest: sha256:3327f078a11596abdcbc21cd8a1adcf0b2aa474975cd9ab1feb745a2e50d555f codeSamplesNamespace: mistral-openapi-google-cloud-code-samples - codeSamplesRevisionDigest: sha256:7de23f90d6543356f310f46375bef4db7f43eb22b2871ad4dfe1b7d0cc875bb4 + codeSamplesRevisionDigest: sha256:03b3e82c20d10faa8622f14696632b96b1a2e8d747b266fff345061298d5f3e4 mistralai-sdk: source: mistral-openapi sourceNamespace: mistral-openapi - sourceRevisionDigest: sha256:8655bba5635f9f9bc3aa94690c26d6124c778e03478786074288cd63414a7a84 - sourceBlobDigest: sha256:cd9280b2b089ef5e0b06ba94ed2736b928f7e4e542d04f408df84e6912049ba6 + sourceRevisionDigest: sha256:15e39124d61c30c69260e298a909e60996ac6e8623c202d1745b88fc3e67cb2f + sourceBlobDigest: sha256:d16b98efd9214ceb1c89beedc40e67dd09349d5122076f6e16d1a552ee5b3e63 codeSamplesNamespace: mistral-openapi-code-samples - codeSamplesRevisionDigest: sha256:d98cc101a0bdcb0666e965e71e7a472ea1e9ab6170aa7e2b4676987107704a58 + codeSamplesRevisionDigest: sha256:4de7cac024939b19dfba3601531280e278d2d8188dc063827694bda3992666fc workflow: workflowVersion: 1.0.0 - speakeasyVersion: 1.477.0 + speakeasyVersion: 1.517.3 sources: mistral-azure-source: inputs: diff --git a/README.md b/README.md index dcc26eac..6bfeae52 100644 --- a/README.md +++ b/README.md @@ -75,6 +75,37 @@ pip install mistralai ```bash poetry add mistralai ``` + +### Shell and script usage with `uv` + +You can use this SDK in a Python shell with [uv](https://docs.astral.sh/uv/) and the `uvx` command that comes with it like so: + +```shell +uvx --from mistralai python +``` + +It's also possible to write a standalone Python script without needing to set up a whole project like so: + +```python +#!/usr/bin/env -S uv run --script +# /// script +# requires-python = ">=3.9" +# dependencies = [ +# "mistralai", +# ] +# /// + +from mistralai import Mistral + +sdk = Mistral( + # SDK arguments +) + +# Rest of script here... +``` + +Once that is saved to a file, you can run it with `uv run script.py` where +`script.py` can be replaced with the actual file name. @@ -89,6 +120,7 @@ This example shows how to create chat completions. from mistralai import Mistral import os + with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: @@ -98,7 +130,7 @@ with Mistral( "content": "Who is the best French painter? Answer in one short sentence.", "role": "user", }, - ], stream=False) + ]) # Handle response print(res) @@ -114,6 +146,7 @@ from mistralai import Mistral import os async def main(): + async with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: @@ -123,7 +156,7 @@ async def main(): "content": "Who is the best French painter? Answer in one short sentence.", "role": "user", }, - ], stream=False) + ]) # Handle response print(res) @@ -140,6 +173,7 @@ This example shows how to upload a file. from mistralai import Mistral import os + with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: @@ -163,6 +197,7 @@ from mistralai import Mistral import os async def main(): + async with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: @@ -187,6 +222,7 @@ This example shows how to create agents completions. from mistralai import Mistral import os + with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: @@ -196,7 +232,7 @@ with Mistral( "content": "Who is the best French painter? Answer in one short sentence.", "role": "user", }, - ], agent_id="", stream=False) + ], agent_id="") # Handle response print(res) @@ -212,6 +248,7 @@ from mistralai import Mistral import os async def main(): + async with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: @@ -221,7 +258,7 @@ async def main(): "content": "Who is the best French painter? Answer in one short sentence.", "role": "user", }, - ], agent_id="", stream=False) + ], agent_id="") # Handle response print(res) @@ -238,6 +275,7 @@ This example shows how to create embedding request. from mistralai import Mistral import os + with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: @@ -261,6 +299,7 @@ from mistralai import Mistral import os async def main(): + async with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: @@ -401,7 +440,7 @@ The documentation for the GCP SDK is available [here](packages/mistralai_gcp/REA ### [classifiers](docs/sdks/classifiers/README.md) * [moderate](docs/sdks/classifiers/README.md#moderate) - Moderations -* [moderate_chat](docs/sdks/classifiers/README.md#moderate_chat) - Moderations Chat +* [moderate_chat](docs/sdks/classifiers/README.md#moderate_chat) - Chat Moderations ### [embeddings](docs/sdks/embeddings/README.md) @@ -465,6 +504,7 @@ underlying connection when the context is exited. from mistralai import Mistral import os + with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: @@ -474,7 +514,7 @@ with Mistral( "content": "Who is the best French painter? Answer in one short sentence.", "role": "user", }, - ], stream=True) + ]) with res as event_stream: for event in event_stream: @@ -502,6 +542,7 @@ Certain SDK methods accept file objects as part of a request body or multi-part from mistralai import Mistral import os + with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: @@ -528,6 +569,7 @@ from mistralai import Mistral from mistralai.utils import BackoffStrategy, RetryConfig import os + with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: @@ -546,6 +588,7 @@ from mistralai import Mistral from mistralai.utils import BackoffStrategy, RetryConfig import os + with Mistral( retry_config=RetryConfig("backoff", BackoffStrategy(1, 50, 1.1, 100), False), api_key=os.getenv("MISTRAL_API_KEY", ""), @@ -586,6 +629,7 @@ When custom error responses are specified for an operation, the SDK may also rai from mistralai import Mistral, models import os + with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: @@ -613,9 +657,9 @@ with Mistral( You can override the default server globally by passing a server name to the `server: str` optional parameter when initializing the SDK client instance. The selected server will then be used as the default on the operations that use it. This table lists the names associated with the available servers: -| Name | Server | -| ---- | ------------------------ | -| `eu` | `https://api.mistral.ai` | +| Name | Server | Description | +| ---- | ------------------------ | -------------------- | +| `eu` | `https://api.mistral.ai` | EU Production server | #### Example @@ -623,6 +667,7 @@ You can override the default server globally by passing a server name to the `se from mistralai import Mistral import os + with Mistral( server="eu", api_key=os.getenv("MISTRAL_API_KEY", ""), @@ -642,6 +687,7 @@ The default server can also be overridden globally by passing a URL to the `serv from mistralai import Mistral import os + with Mistral( server_url="https://api.mistral.ai", api_key=os.getenv("MISTRAL_API_KEY", ""), @@ -752,6 +798,7 @@ To authenticate with the API the `api_key` parameter must be set when initializi from mistralai import Mistral import os + with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: @@ -775,6 +822,7 @@ The `Mistral` class implements the context manager protocol and registers a fina from mistralai import Mistral import os def main(): + with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: @@ -783,6 +831,7 @@ def main(): # Or when using async: async def amain(): + async with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: diff --git a/RELEASES.md b/RELEASES.md index a08f1fbb..4827ac51 100644 --- a/RELEASES.md +++ b/RELEASES.md @@ -178,4 +178,14 @@ Based on: ### Generated - [python v1.5.2] . ### Releases -- [PyPI v1.5.2] https://pypi.org/project/mistralai/1.5.2 - . \ No newline at end of file +- [PyPI v1.5.2] https://pypi.org/project/mistralai/1.5.2 - . + +## 2025-03-20 10:34:02 +### Changes +Based on: +- OpenAPI Doc +- Speakeasy CLI 1.517.3 (2.548.6) https://github.com/speakeasy-api/speakeasy +### Generated +- [python v1.6.0] . +### Releases +- [PyPI v1.6.0] https://pypi.org/project/mistralai/1.6.0 - . \ No newline at end of file diff --git a/USAGE.md b/USAGE.md index 2e583dd5..fa3a77de 100644 --- a/USAGE.md +++ b/USAGE.md @@ -8,6 +8,7 @@ This example shows how to create chat completions. from mistralai import Mistral import os + with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: @@ -17,7 +18,7 @@ with Mistral( "content": "Who is the best French painter? Answer in one short sentence.", "role": "user", }, - ], stream=False) + ]) # Handle response print(res) @@ -33,6 +34,7 @@ from mistralai import Mistral import os async def main(): + async with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: @@ -42,7 +44,7 @@ async def main(): "content": "Who is the best French painter? Answer in one short sentence.", "role": "user", }, - ], stream=False) + ]) # Handle response print(res) @@ -59,6 +61,7 @@ This example shows how to upload a file. from mistralai import Mistral import os + with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: @@ -82,6 +85,7 @@ from mistralai import Mistral import os async def main(): + async with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: @@ -106,6 +110,7 @@ This example shows how to create agents completions. from mistralai import Mistral import os + with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: @@ -115,7 +120,7 @@ with Mistral( "content": "Who is the best French painter? Answer in one short sentence.", "role": "user", }, - ], agent_id="", stream=False) + ], agent_id="") # Handle response print(res) @@ -131,6 +136,7 @@ from mistralai import Mistral import os async def main(): + async with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: @@ -140,7 +146,7 @@ async def main(): "content": "Who is the best French painter? Answer in one short sentence.", "role": "user", }, - ], agent_id="", stream=False) + ], agent_id="") # Handle response print(res) @@ -157,6 +163,7 @@ This example shows how to create embedding request. from mistralai import Mistral import os + with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: @@ -180,6 +187,7 @@ from mistralai import Mistral import os async def main(): + async with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: diff --git a/docs/models/agentscompletionrequest.md b/docs/models/agentscompletionrequest.md index 8976849d..73a0f77a 100644 --- a/docs/models/agentscompletionrequest.md +++ b/docs/models/agentscompletionrequest.md @@ -17,4 +17,5 @@ | `presence_penalty` | *Optional[float]* | :heavy_minus_sign: | presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. | | | `frequency_penalty` | *Optional[float]* | :heavy_minus_sign: | frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. | | | `n` | *OptionalNullable[int]* | :heavy_minus_sign: | Number of completions to return for each request, input tokens are only billed once. | | -| `prediction` | [Optional[models.Prediction]](../models/prediction.md) | :heavy_minus_sign: | N/A | | \ No newline at end of file +| `prediction` | [Optional[models.Prediction]](../models/prediction.md) | :heavy_minus_sign: | N/A | | +| `parallel_tool_calls` | *Optional[bool]* | :heavy_minus_sign: | N/A | | \ No newline at end of file diff --git a/docs/models/agentscompletionstreamrequest.md b/docs/models/agentscompletionstreamrequest.md index 3a6c8a12..4a3093dd 100644 --- a/docs/models/agentscompletionstreamrequest.md +++ b/docs/models/agentscompletionstreamrequest.md @@ -17,4 +17,5 @@ | `presence_penalty` | *Optional[float]* | :heavy_minus_sign: | presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. | | | `frequency_penalty` | *Optional[float]* | :heavy_minus_sign: | frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. | | | `n` | *OptionalNullable[int]* | :heavy_minus_sign: | Number of completions to return for each request, input tokens are only billed once. | | -| `prediction` | [Optional[models.Prediction]](../models/prediction.md) | :heavy_minus_sign: | N/A | | \ No newline at end of file +| `prediction` | [Optional[models.Prediction]](../models/prediction.md) | :heavy_minus_sign: | N/A | | +| `parallel_tool_calls` | *Optional[bool]* | :heavy_minus_sign: | N/A | | \ No newline at end of file diff --git a/docs/models/chatcompletionrequest.md b/docs/models/chatcompletionrequest.md index 714f4f5a..904ad6c5 100644 --- a/docs/models/chatcompletionrequest.md +++ b/docs/models/chatcompletionrequest.md @@ -20,4 +20,5 @@ | `frequency_penalty` | *Optional[float]* | :heavy_minus_sign: | frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. | | | `n` | *OptionalNullable[int]* | :heavy_minus_sign: | Number of completions to return for each request, input tokens are only billed once. | | | `prediction` | [Optional[models.Prediction]](../models/prediction.md) | :heavy_minus_sign: | N/A | | +| `parallel_tool_calls` | *Optional[bool]* | :heavy_minus_sign: | N/A | | | `safe_prompt` | *Optional[bool]* | :heavy_minus_sign: | Whether to inject a safety prompt before all conversations. | | \ No newline at end of file diff --git a/docs/models/chatcompletionstreamrequest.md b/docs/models/chatcompletionstreamrequest.md index 378ccd41..f2cce68b 100644 --- a/docs/models/chatcompletionstreamrequest.md +++ b/docs/models/chatcompletionstreamrequest.md @@ -20,4 +20,5 @@ | `frequency_penalty` | *Optional[float]* | :heavy_minus_sign: | frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. | | | `n` | *OptionalNullable[int]* | :heavy_minus_sign: | Number of completions to return for each request, input tokens are only billed once. | | | `prediction` | [Optional[models.Prediction]](../models/prediction.md) | :heavy_minus_sign: | N/A | | +| `parallel_tool_calls` | *Optional[bool]* | :heavy_minus_sign: | N/A | | | `safe_prompt` | *Optional[bool]* | :heavy_minus_sign: | Whether to inject a safety prompt before all conversations. | | \ No newline at end of file diff --git a/docs/sdks/agents/README.md b/docs/sdks/agents/README.md index 00ca33ac..28e10497 100644 --- a/docs/sdks/agents/README.md +++ b/docs/sdks/agents/README.md @@ -20,6 +20,7 @@ Agents Completion from mistralai import Mistral import os + with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: @@ -29,7 +30,7 @@ with Mistral( "content": "Who is the best French painter? Answer in one short sentence.", "role": "user", }, - ], agent_id="", stream=False) + ], agent_id="") # Handle response print(res) @@ -53,6 +54,7 @@ with Mistral( | `frequency_penalty` | *Optional[float]* | :heavy_minus_sign: | frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. | | | `n` | *OptionalNullable[int]* | :heavy_minus_sign: | Number of completions to return for each request, input tokens are only billed once. | | | `prediction` | [Optional[models.Prediction]](../../models/prediction.md) | :heavy_minus_sign: | N/A | | +| `parallel_tool_calls` | *Optional[bool]* | :heavy_minus_sign: | N/A | | | `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | | ### Response @@ -76,6 +78,7 @@ Mistral AI provides the ability to stream responses back to a client in order to from mistralai import Mistral import os + with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: @@ -85,7 +88,7 @@ with Mistral( "content": "Who is the best French painter? Answer in one short sentence.", "role": "user", }, - ], agent_id="", stream=True) + ], agent_id="") with res as event_stream: for event in event_stream: @@ -111,6 +114,7 @@ with Mistral( | `frequency_penalty` | *Optional[float]* | :heavy_minus_sign: | frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. | | | `n` | *OptionalNullable[int]* | :heavy_minus_sign: | Number of completions to return for each request, input tokens are only billed once. | | | `prediction` | [Optional[models.Prediction]](../../models/prediction.md) | :heavy_minus_sign: | N/A | | +| `parallel_tool_calls` | *Optional[bool]* | :heavy_minus_sign: | N/A | | | `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | | ### Response diff --git a/docs/sdks/chat/README.md b/docs/sdks/chat/README.md index 38e16adc..12d9feca 100644 --- a/docs/sdks/chat/README.md +++ b/docs/sdks/chat/README.md @@ -20,6 +20,7 @@ Chat Completion from mistralai import Mistral import os + with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: @@ -29,7 +30,7 @@ with Mistral( "content": "Who is the best French painter? Answer in one short sentence.", "role": "user", }, - ], stream=False) + ]) # Handle response print(res) @@ -55,6 +56,7 @@ with Mistral( | `frequency_penalty` | *Optional[float]* | :heavy_minus_sign: | frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. | | | `n` | *OptionalNullable[int]* | :heavy_minus_sign: | Number of completions to return for each request, input tokens are only billed once. | | | `prediction` | [Optional[models.Prediction]](../../models/prediction.md) | :heavy_minus_sign: | N/A | | +| `parallel_tool_calls` | *Optional[bool]* | :heavy_minus_sign: | N/A | | | `safe_prompt` | *Optional[bool]* | :heavy_minus_sign: | Whether to inject a safety prompt before all conversations. | | | `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | | @@ -79,6 +81,7 @@ Mistral AI provides the ability to stream responses back to a client in order to from mistralai import Mistral import os + with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: @@ -88,7 +91,7 @@ with Mistral( "content": "Who is the best French painter? Answer in one short sentence.", "role": "user", }, - ], stream=True) + ]) with res as event_stream: for event in event_stream: @@ -116,6 +119,7 @@ with Mistral( | `frequency_penalty` | *Optional[float]* | :heavy_minus_sign: | frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. | | | `n` | *OptionalNullable[int]* | :heavy_minus_sign: | Number of completions to return for each request, input tokens are only billed once. | | | `prediction` | [Optional[models.Prediction]](../../models/prediction.md) | :heavy_minus_sign: | N/A | | +| `parallel_tool_calls` | *Optional[bool]* | :heavy_minus_sign: | N/A | | | `safe_prompt` | *Optional[bool]* | :heavy_minus_sign: | Whether to inject a safety prompt before all conversations. | | | `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | | diff --git a/docs/sdks/classifiers/README.md b/docs/sdks/classifiers/README.md index 6bcc68a9..cbe409bb 100644 --- a/docs/sdks/classifiers/README.md +++ b/docs/sdks/classifiers/README.md @@ -8,7 +8,7 @@ Classifiers API. ### Available Operations * [moderate](#moderate) - Moderations -* [moderate_chat](#moderate_chat) - Moderations Chat +* [moderate_chat](#moderate_chat) - Chat Moderations ## moderate @@ -20,6 +20,7 @@ Moderations from mistralai import Mistral import os + with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: @@ -54,7 +55,7 @@ with Mistral( ## moderate_chat -Moderations Chat +Chat Moderations ### Example Usage @@ -62,19 +63,18 @@ Moderations Chat from mistralai import Mistral import os + with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.classifiers.moderate_chat(model="Roadster", inputs=[ + res = mistral.classifiers.moderate_chat(model="Model Y", inputs=[ [ { - "content": "", - "role": "tool", - }, - { - "content": "", - "role": "tool", + "content": [ + + ], + "role": "system", }, { "content": "", @@ -95,7 +95,26 @@ with Mistral( "role": "assistant", }, ], - ], truncate_for_context_length=False) + [ + { + "content": "", + "role": "system", + }, + { + "content": [ + { + "image_url": "https://fatherly-colon.name", + "type": "image_url", + }, + ], + "role": "user", + }, + { + "content": "", + "role": "user", + }, + ], + ]) # Handle response print(res) diff --git a/docs/sdks/embeddings/README.md b/docs/sdks/embeddings/README.md index 89e8e030..8c386439 100644 --- a/docs/sdks/embeddings/README.md +++ b/docs/sdks/embeddings/README.md @@ -19,6 +19,7 @@ Embeddings from mistralai import Mistral import os + with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: diff --git a/docs/sdks/files/README.md b/docs/sdks/files/README.md index 8f01a668..befa4d67 100644 --- a/docs/sdks/files/README.md +++ b/docs/sdks/files/README.md @@ -28,6 +28,7 @@ Please contact us if you need to increase these storage limits. from mistralai import Mistral import os + with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: @@ -70,11 +71,12 @@ Returns a list of files that belong to the user's organization. from mistralai import Mistral import os + with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.files.list(page=0, page_size=100) + res = mistral.files.list() # Handle response print(res) @@ -113,6 +115,7 @@ Returns information about a specific file. from mistralai import Mistral import os + with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: @@ -151,6 +154,7 @@ Delete a file. from mistralai import Mistral import os + with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: @@ -189,6 +193,7 @@ Download a file from mistralai import Mistral import os + with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: @@ -227,11 +232,12 @@ Get Signed Url from mistralai import Mistral import os + with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.files.get_signed_url(file_id="", expiry=24) + res = mistral.files.get_signed_url(file_id="") # Handle response print(res) diff --git a/docs/sdks/fim/README.md b/docs/sdks/fim/README.md index 28de6c02..c70b3da4 100644 --- a/docs/sdks/fim/README.md +++ b/docs/sdks/fim/README.md @@ -20,11 +20,12 @@ FIM completion. from mistralai import Mistral import os + with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.fim.complete(model="codestral-2405", prompt="def", top_p=1, stream=False, suffix="return a+b") + res = mistral.fim.complete(model="codestral-2405", prompt="def", suffix="return a+b") # Handle response print(res) @@ -68,11 +69,12 @@ Mistral AI provides the ability to stream responses back to a client in order to from mistralai import Mistral import os + with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.fim.stream(model="codestral-2405", prompt="def", top_p=1, stream=True, suffix="return a+b") + res = mistral.fim.stream(model="codestral-2405", prompt="def", suffix="return a+b") with res as event_stream: for event in event_stream: diff --git a/docs/sdks/jobs/README.md b/docs/sdks/jobs/README.md index 06605877..ecb11def 100644 --- a/docs/sdks/jobs/README.md +++ b/docs/sdks/jobs/README.md @@ -21,11 +21,12 @@ Get a list of fine-tuning jobs for your organization and user. from mistralai import Mistral import os + with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.fine_tuning.jobs.list(page=0, page_size=100, created_by_me=False) + res = mistral.fine_tuning.jobs.list() # Handle response print(res) @@ -67,13 +68,12 @@ Create a new fine-tuning job, it will be queued for processing. from mistralai import Mistral import os + with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.fine_tuning.jobs.create(model="Fiesta", hyperparameters={ - "learning_rate": 0.0001, - }) + res = mistral.fine_tuning.jobs.create(model="Fiesta", hyperparameters={}) # Handle response print(res) @@ -114,6 +114,7 @@ Get a fine-tuned job details by its UUID. from mistralai import Mistral import os + with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: @@ -152,6 +153,7 @@ Request the cancellation of a fine tuning job. from mistralai import Mistral import os + with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: @@ -190,6 +192,7 @@ Request the start of a validated fine tuning job. from mistralai import Mistral import os + with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: diff --git a/docs/sdks/mistraljobs/README.md b/docs/sdks/mistraljobs/README.md index 56a7f60b..5b80a45b 100644 --- a/docs/sdks/mistraljobs/README.md +++ b/docs/sdks/mistraljobs/README.md @@ -20,11 +20,12 @@ Get a list of batch jobs for your organization and user. from mistralai import Mistral import os + with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.batch.jobs.list(page=0, page_size=100, created_by_me=False) + res = mistral.batch.jobs.list() # Handle response print(res) @@ -64,13 +65,14 @@ Create a new batch job, it will be queued for processing. from mistralai import Mistral import os + with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: res = mistral.batch.jobs.create(input_files=[ "a621cf02-1cd9-4cf5-8403-315211a509a3", - ], endpoint="/v1/fim/completions", model="2", timeout_hours=24) + ], endpoint="/v1/fim/completions", model="2") # Handle response print(res) @@ -108,6 +110,7 @@ Get a batch job details by its UUID. from mistralai import Mistral import os + with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: @@ -146,6 +149,7 @@ Request the cancellation of a batch job. from mistralai import Mistral import os + with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: diff --git a/docs/sdks/models/README.md b/docs/sdks/models/README.md index e048d20e..dd7baf50 100644 --- a/docs/sdks/models/README.md +++ b/docs/sdks/models/README.md @@ -24,6 +24,7 @@ List all models available to the user. from mistralai import Mistral import os + with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: @@ -62,6 +63,7 @@ Retrieve a model information. from mistralai import Mistral import os + with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: @@ -101,6 +103,7 @@ Delete a fine-tuned model. from mistralai import Mistral import os + with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: @@ -140,6 +143,7 @@ Update a model name or description. from mistralai import Mistral import os + with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: @@ -180,6 +184,7 @@ Archive a fine-tuned model. from mistralai import Mistral import os + with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: @@ -218,6 +223,7 @@ Un-archive a fine-tuned model. from mistralai import Mistral import os + with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: diff --git a/docs/sdks/ocr/README.md b/docs/sdks/ocr/README.md index 54f8af96..61988ea6 100644 --- a/docs/sdks/ocr/README.md +++ b/docs/sdks/ocr/README.md @@ -19,6 +19,7 @@ OCR from mistralai import Mistral import os + with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: diff --git a/packages/mistralai_azure/.speakeasy/gen.lock b/packages/mistralai_azure/.speakeasy/gen.lock index 16a5196b..b7d6f3ba 100644 --- a/packages/mistralai_azure/.speakeasy/gen.lock +++ b/packages/mistralai_azure/.speakeasy/gen.lock @@ -1,34 +1,34 @@ lockVersion: 2.0.0 id: dc40fa48-2c4d-46ad-ac8b-270749770f34 management: - docChecksum: 4da7c33f650ddf206c58fa6c941d347f + docChecksum: 574e96caec9a63dbe3f39d646830f2c2 docVersion: 0.0.2 - speakeasyVersion: 1.462.2 - generationVersion: 2.486.1 - releaseVersion: 1.2.6 - configChecksum: cc2ac1769a87215774fce0075ff2e77d + speakeasyVersion: 1.517.3 + generationVersion: 2.548.6 + releaseVersion: 1.6.0 + configChecksum: 3a4d9b204b5731c461ed7279710d5ed6 published: true features: python: additionalDependencies: 1.0.0 constsAndDefaults: 1.0.5 - core: 5.7.4 + core: 5.12.3 defaultEnabledRetries: 0.2.0 enumUnions: 0.1.0 envVarSecurityUsage: 0.3.2 - examples: 3.0.0 + examples: 3.0.1 flatRequests: 1.0.1 - globalSecurity: 3.0.2 + globalSecurity: 3.0.3 globalSecurityCallbacks: 1.0.0 globalSecurityFlattening: 1.0.0 globalServerURLs: 3.1.0 methodArguments: 1.0.2 nameOverrides: 3.0.1 - nullables: 1.0.0 + nullables: 1.0.1 openEnums: 1.0.0 responseFormat: 1.0.1 retries: 3.0.2 - sdkHooks: 1.0.0 + sdkHooks: 1.0.1 serverEvents: 1.0.7 serverEventsSentinels: 0.1.0 serverIDs: 3.0.0 @@ -61,8 +61,14 @@ generatedFiles: - docs/models/functioncall.md - docs/models/functionname.md - docs/models/httpvalidationerror.md + - docs/models/imageurl.md + - docs/models/imageurlchunk.md + - docs/models/imageurlchunkimageurl.md + - docs/models/imageurlchunktype.md + - docs/models/jsonschema.md - docs/models/loc.md - docs/models/messages.md + - docs/models/prediction.md - docs/models/referencechunk.md - docs/models/referencechunktype.md - docs/models/responseformat.md @@ -92,7 +98,7 @@ generatedFiles: - py.typed - pylintrc - pyproject.toml - - scripts/prepare-readme.py + - scripts/prepare_readme.py - scripts/publish.sh - src/mistralai_azure/__init__.py - src/mistralai_azure/_hooks/__init__.py @@ -117,6 +123,10 @@ generatedFiles: - src/mistralai_azure/models/functioncall.py - src/mistralai_azure/models/functionname.py - src/mistralai_azure/models/httpvalidationerror.py + - src/mistralai_azure/models/imageurl.py + - src/mistralai_azure/models/imageurlchunk.py + - src/mistralai_azure/models/jsonschema.py + - src/mistralai_azure/models/prediction.py - src/mistralai_azure/models/referencechunk.py - src/mistralai_azure/models/responseformat.py - src/mistralai_azure/models/responseformats.py @@ -134,6 +144,7 @@ generatedFiles: - src/mistralai_azure/models/usermessage.py - src/mistralai_azure/models/validationerror.py - src/mistralai_azure/py.typed + - src/mistralai_azure/sdk.py - src/mistralai_azure/sdkconfiguration.py - src/mistralai_azure/types/__init__.py - src/mistralai_azure/types/basemodel.py @@ -156,17 +167,19 @@ examples: stream_chat: speakeasy-default-stream-chat: requestBody: - application/json: {"model": "azureai", "messages": [{"content": "Who is the best French painter? Answer in one short sentence.", "role": "user"}]} + application/json: {"model": "azureai", "stream": true, "messages": [{"content": "Who is the best French painter? Answer in one short sentence.", "role": "user"}]} responses: "422": application/json: {} + "200": {} chat_completion_v1_chat_completions_post: speakeasy-default-chat-completion-v1-chat-completions-post: requestBody: - application/json: {"model": "azureai", "messages": [{"content": "Who is the best French painter? Answer in one short sentence.", "role": "user"}]} + application/json: {"model": "azureai", "stream": false, "messages": [{"content": "Who is the best French painter? Answer in one short sentence.", "role": "user"}]} responses: "200": - application/json: {"id": "cmpl-e5cc70bb28c444948073e77776eb30ef", "object": "chat.completion", "model": "mistral-small-latest", "usage": {"prompt_tokens": 16, "completion_tokens": 34, "total_tokens": 50}, "created": 1702256327, "choices": []} + application/json: {"id": "cmpl-e5cc70bb28c444948073e77776eb30ef", "object": "chat.completion", "model": "mistral-small-latest", "usage": {"prompt_tokens": 16, "completion_tokens": 34, "total_tokens": 50}, "created": 1702256327, "choices": [{"index": 0, "message": {"prefix": false, "role": "assistant"}, "finish_reason": "stop"}, {"index": 0, "message": {"prefix": false, "role": "assistant"}, "finish_reason": "stop"}, {"index": 0, "message": {"prefix": false, "role": "assistant"}, "finish_reason": "stop"}]} "422": application/json: {} +examplesVersion: 1.0.0 generatedTests: {} diff --git a/packages/mistralai_azure/.speakeasy/gen.yaml b/packages/mistralai_azure/.speakeasy/gen.yaml index 17344d9b..be4a1781 100644 --- a/packages/mistralai_azure/.speakeasy/gen.yaml +++ b/packages/mistralai_azure/.speakeasy/gen.yaml @@ -7,13 +7,15 @@ generation: useClassNamesForArrayFields: true fixes: nameResolutionDec2023: true + nameResolutionFeb2025: false parameterOrderingFeb2024: true requestResponseComponentNamesFeb2024: true + securityFeb2025: false auth: oAuth2ClientCredentialsEnabled: true oAuth2PasswordEnabled: false python: - version: 1.2.6 + version: 1.6.0 additionalDependencies: dev: pytest: ^8.2.2 @@ -23,6 +25,7 @@ python: clientServerStatusCodesAsErrors: true defaultErrorName: SDKError description: Python Client SDK for the Mistral AI API in Azure. + enableCustomCodeRegions: false enumFormat: union fixFlags: responseRequiredSep2024: false @@ -38,9 +41,10 @@ python: shared: "" webhooks: "" inputModelSuffix: input - maxMethodParams: 4 + maxMethodParams: 15 methodArguments: infer-optional-args outputModelSuffix: output packageName: mistralai_azure + pytestTimeout: 0 responseFormat: flat templateVersion: v2 diff --git a/packages/mistralai_azure/docs/models/assistantmessage.md b/packages/mistralai_azure/docs/models/assistantmessage.md index 53f1cc76..3d0bd90b 100644 --- a/packages/mistralai_azure/docs/models/assistantmessage.md +++ b/packages/mistralai_azure/docs/models/assistantmessage.md @@ -3,9 +3,9 @@ ## Fields -| Field | Type | Required | Description | -| ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | -| `content` | [OptionalNullable[models.AssistantMessageContent]](../models/assistantmessagecontent.md) | :heavy_minus_sign: | N/A | -| `tool_calls` | List[[models.ToolCall](../models/toolcall.md)] | :heavy_minus_sign: | N/A | -| `prefix` | *Optional[bool]* | :heavy_minus_sign: | N/A | -| `role` | [Optional[models.AssistantMessageRole]](../models/assistantmessagerole.md) | :heavy_minus_sign: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| `content` | [OptionalNullable[models.AssistantMessageContent]](../models/assistantmessagecontent.md) | :heavy_minus_sign: | N/A | +| `tool_calls` | List[[models.ToolCall](../models/toolcall.md)] | :heavy_minus_sign: | N/A | +| `prefix` | *Optional[bool]* | :heavy_minus_sign: | Set this to `true` when adding an assistant message as prefix to condition the model response. The role of the prefix message is to force the model to start its answer by the content of the message. | +| `role` | [Optional[models.AssistantMessageRole]](../models/assistantmessagerole.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/chatcompletionrequest.md b/packages/mistralai_azure/docs/models/chatcompletionrequest.md index 68cef4a1..a9a174fb 100644 --- a/packages/mistralai_azure/docs/models/chatcompletionrequest.md +++ b/packages/mistralai_azure/docs/models/chatcompletionrequest.md @@ -6,7 +6,7 @@ | Field | Type | Required | Description | Example | | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | | `messages` | List[[models.ChatCompletionRequestMessages](../models/chatcompletionrequestmessages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | -| `model` | *OptionalNullable[str]* | :heavy_minus_sign: | The ID of the model to use for this request. | azureai | +| `model` | *Optional[str]* | :heavy_minus_sign: | The ID of the model to use for this request. | azureai | | `temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. | | | `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | | `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | @@ -19,4 +19,6 @@ | `presence_penalty` | *Optional[float]* | :heavy_minus_sign: | presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. | | | `frequency_penalty` | *Optional[float]* | :heavy_minus_sign: | frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. | | | `n` | *OptionalNullable[int]* | :heavy_minus_sign: | Number of completions to return for each request, input tokens are only billed once. | | +| `prediction` | [Optional[models.Prediction]](../models/prediction.md) | :heavy_minus_sign: | N/A | | +| `parallel_tool_calls` | *Optional[bool]* | :heavy_minus_sign: | N/A | | | `safe_prompt` | *Optional[bool]* | :heavy_minus_sign: | Whether to inject a safety prompt before all conversations. | | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/chatcompletionstreamrequest.md b/packages/mistralai_azure/docs/models/chatcompletionstreamrequest.md index c9c5c87b..b3e06e7a 100644 --- a/packages/mistralai_azure/docs/models/chatcompletionstreamrequest.md +++ b/packages/mistralai_azure/docs/models/chatcompletionstreamrequest.md @@ -6,7 +6,7 @@ | Field | Type | Required | Description | Example | | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | | `messages` | List[[models.Messages](../models/messages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | -| `model` | *OptionalNullable[str]* | :heavy_minus_sign: | The ID of the model to use for this request. | azureai | +| `model` | *Optional[str]* | :heavy_minus_sign: | The ID of the model to use for this request. | azureai | | `temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. | | | `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | | `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | @@ -19,4 +19,6 @@ | `presence_penalty` | *Optional[float]* | :heavy_minus_sign: | presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. | | | `frequency_penalty` | *Optional[float]* | :heavy_minus_sign: | frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. | | | `n` | *OptionalNullable[int]* | :heavy_minus_sign: | Number of completions to return for each request, input tokens are only billed once. | | +| `prediction` | [Optional[models.Prediction]](../models/prediction.md) | :heavy_minus_sign: | N/A | | +| `parallel_tool_calls` | *Optional[bool]* | :heavy_minus_sign: | N/A | | | `safe_prompt` | *Optional[bool]* | :heavy_minus_sign: | Whether to inject a safety prompt before all conversations. | | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/contentchunk.md b/packages/mistralai_azure/docs/models/contentchunk.md index 98b86391..22023e8b 100644 --- a/packages/mistralai_azure/docs/models/contentchunk.md +++ b/packages/mistralai_azure/docs/models/contentchunk.md @@ -3,6 +3,12 @@ ## Supported Types +### `models.ImageURLChunk` + +```python +value: models.ImageURLChunk = /* values here */ +``` + ### `models.TextChunk` ```python diff --git a/packages/mistralai_azure/docs/models/function.md b/packages/mistralai_azure/docs/models/function.md index 8af398f5..a166b7bb 100644 --- a/packages/mistralai_azure/docs/models/function.md +++ b/packages/mistralai_azure/docs/models/function.md @@ -7,4 +7,5 @@ | ------------------ | ------------------ | ------------------ | ------------------ | | `name` | *str* | :heavy_check_mark: | N/A | | `parameters` | Dict[str, *Any*] | :heavy_check_mark: | N/A | -| `description` | *Optional[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file +| `description` | *Optional[str]* | :heavy_minus_sign: | N/A | +| `strict` | *Optional[bool]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/imageurl.md b/packages/mistralai_azure/docs/models/imageurl.md new file mode 100644 index 00000000..7c2bcbc3 --- /dev/null +++ b/packages/mistralai_azure/docs/models/imageurl.md @@ -0,0 +1,9 @@ +# ImageURL + + +## Fields + +| Field | Type | Required | Description | +| ----------------------- | ----------------------- | ----------------------- | ----------------------- | +| `url` | *str* | :heavy_check_mark: | N/A | +| `detail` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/imageurlchunk.md b/packages/mistralai_azure/docs/models/imageurlchunk.md new file mode 100644 index 00000000..f1b926ef --- /dev/null +++ b/packages/mistralai_azure/docs/models/imageurlchunk.md @@ -0,0 +1,11 @@ +# ImageURLChunk + +{"type":"image_url","image_url":{"url":"data:image/png;base64,iVBORw0 + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | +| `image_url` | [models.ImageURLChunkImageURL](../models/imageurlchunkimageurl.md) | :heavy_check_mark: | N/A | +| `type` | [Optional[models.ImageURLChunkType]](../models/imageurlchunktype.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/imageurlchunkimageurl.md b/packages/mistralai_azure/docs/models/imageurlchunkimageurl.md new file mode 100644 index 00000000..76738908 --- /dev/null +++ b/packages/mistralai_azure/docs/models/imageurlchunkimageurl.md @@ -0,0 +1,17 @@ +# ImageURLChunkImageURL + + +## Supported Types + +### `models.ImageURL` + +```python +value: models.ImageURL = /* values here */ +``` + +### `str` + +```python +value: str = /* values here */ +``` + diff --git a/packages/mistralai_azure/docs/models/imageurlchunktype.md b/packages/mistralai_azure/docs/models/imageurlchunktype.md new file mode 100644 index 00000000..2064a0b4 --- /dev/null +++ b/packages/mistralai_azure/docs/models/imageurlchunktype.md @@ -0,0 +1,8 @@ +# ImageURLChunkType + + +## Values + +| Name | Value | +| ----------- | ----------- | +| `IMAGE_URL` | image_url | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/jsonschema.md b/packages/mistralai_azure/docs/models/jsonschema.md new file mode 100644 index 00000000..ae387867 --- /dev/null +++ b/packages/mistralai_azure/docs/models/jsonschema.md @@ -0,0 +1,11 @@ +# JSONSchema + + +## Fields + +| Field | Type | Required | Description | +| ----------------------- | ----------------------- | ----------------------- | ----------------------- | +| `name` | *str* | :heavy_check_mark: | N/A | +| `schema_definition` | Dict[str, *Any*] | :heavy_check_mark: | N/A | +| `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `strict` | *Optional[bool]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/prediction.md b/packages/mistralai_azure/docs/models/prediction.md new file mode 100644 index 00000000..86e9c396 --- /dev/null +++ b/packages/mistralai_azure/docs/models/prediction.md @@ -0,0 +1,9 @@ +# Prediction + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------ | ------------------------------ | ------------------------------ | ------------------------------ | +| `type` | *Optional[Literal["content"]]* | :heavy_minus_sign: | N/A | +| `content` | *Optional[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/responseformat.md b/packages/mistralai_azure/docs/models/responseformat.md index 9c627f55..23a1641b 100644 --- a/packages/mistralai_azure/docs/models/responseformat.md +++ b/packages/mistralai_azure/docs/models/responseformat.md @@ -5,4 +5,5 @@ | Field | Type | Required | Description | | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `type` | [Optional[models.ResponseFormats]](../models/responseformats.md) | :heavy_minus_sign: | An object specifying the format that the model must output. Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. | \ No newline at end of file +| `type` | [Optional[models.ResponseFormats]](../models/responseformats.md) | :heavy_minus_sign: | An object specifying the format that the model must output. Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. | +| `json_schema` | [OptionalNullable[models.JSONSchema]](../models/jsonschema.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/responseformats.md b/packages/mistralai_azure/docs/models/responseformats.md index ce35fbb3..06886afe 100644 --- a/packages/mistralai_azure/docs/models/responseformats.md +++ b/packages/mistralai_azure/docs/models/responseformats.md @@ -8,4 +8,5 @@ An object specifying the format that the model must output. Setting to `{ "type" | Name | Value | | ------------- | ------------- | | `TEXT` | text | -| `JSON_OBJECT` | json_object | \ No newline at end of file +| `JSON_OBJECT` | json_object | +| `JSON_SCHEMA` | json_schema | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/toolcall.md b/packages/mistralai_azure/docs/models/toolcall.md index 7aca5fc9..574be1ea 100644 --- a/packages/mistralai_azure/docs/models/toolcall.md +++ b/packages/mistralai_azure/docs/models/toolcall.md @@ -7,4 +7,5 @@ | ---------------------------------------------------- | ---------------------------------------------------- | ---------------------------------------------------- | ---------------------------------------------------- | | `function` | [models.FunctionCall](../models/functioncall.md) | :heavy_check_mark: | N/A | | `id` | *Optional[str]* | :heavy_minus_sign: | N/A | -| `type` | [Optional[models.ToolTypes]](../models/tooltypes.md) | :heavy_minus_sign: | N/A | \ No newline at end of file +| `type` | [Optional[models.ToolTypes]](../models/tooltypes.md) | :heavy_minus_sign: | N/A | +| `index` | *Optional[int]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai_azure/poetry.lock b/packages/mistralai_azure/poetry.lock index b9a32d7f..8b70ddcc 100644 --- a/packages/mistralai_azure/poetry.lock +++ b/packages/mistralai_azure/poetry.lock @@ -1,4 +1,4 @@ -# This file is automatically @generated by Poetry 1.8.3 and should not be changed by hand. +# This file is automatically @generated by Poetry 2.1.1 and should not be changed by hand. [[package]] name = "annotated-types" @@ -6,20 +6,19 @@ version = "0.7.0" description = "Reusable constraint types to use with typing.Annotated" optional = false python-versions = ">=3.8" +groups = ["main"] files = [ {file = "annotated_types-0.7.0-py3-none-any.whl", hash = "sha256:1f02e8b43a8fbbc3f3e0d4f0f4bfc8131bcb4eebe8849b8e5c773f3a1c582a53"}, {file = "annotated_types-0.7.0.tar.gz", hash = "sha256:aff07c09a53a08bc8cfccb9c85b05f1aa9a2a6f23728d790723543408344ce89"}, ] -[package.dependencies] -typing-extensions = {version = ">=4.0.0", markers = "python_version < \"3.9\""} - [[package]] name = "anyio" version = "4.4.0" description = "High level compatibility layer for multiple asynchronous event loop implementations" optional = false python-versions = ">=3.8" +groups = ["main"] files = [ {file = "anyio-4.4.0-py3-none-any.whl", hash = "sha256:c1b2d8f46a8a812513012e1107cb0e68c17159a7a594208005a57dc776e1bdc7"}, {file = "anyio-4.4.0.tar.gz", hash = "sha256:5aadc6a1bbb7cdb0bede386cac5e2940f5e2ff3aa20277e991cf028e0585ce94"}, @@ -33,7 +32,7 @@ typing-extensions = {version = ">=4.1", markers = "python_version < \"3.11\""} [package.extras] doc = ["Sphinx (>=7)", "packaging", "sphinx-autodoc-typehints (>=1.2.0)", "sphinx-rtd-theme"] -test = ["anyio[trio]", "coverage[toml] (>=7)", "exceptiongroup (>=1.2.0)", "hypothesis (>=4.0)", "psutil (>=5.9)", "pytest (>=7.0)", "pytest-mock (>=3.6.1)", "trustme", "uvloop (>=0.17)"] +test = ["anyio[trio]", "coverage[toml] (>=7)", "exceptiongroup (>=1.2.0)", "hypothesis (>=4.0)", "psutil (>=5.9)", "pytest (>=7.0)", "pytest-mock (>=3.6.1)", "trustme", "uvloop (>=0.17) ; platform_python_implementation == \"CPython\" and platform_system != \"Windows\""] trio = ["trio (>=0.23)"] [[package]] @@ -42,6 +41,7 @@ version = "3.2.4" description = "An abstract syntax tree for Python with inference support." optional = false python-versions = ">=3.8.0" +groups = ["dev"] files = [ {file = "astroid-3.2.4-py3-none-any.whl", hash = "sha256:413658a61eeca6202a59231abb473f932038fbcbf1666587f66d482083413a25"}, {file = "astroid-3.2.4.tar.gz", hash = "sha256:0e14202810b30da1b735827f78f5157be2bbd4a7a59b7707ca0bfc2fb4c0063a"}, @@ -56,6 +56,7 @@ version = "2024.7.4" description = "Python package for providing Mozilla's CA Bundle." optional = false python-versions = ">=3.6" +groups = ["main"] files = [ {file = "certifi-2024.7.4-py3-none-any.whl", hash = "sha256:c198e21b1289c2ab85ee4e67bb4b4ef3ead0892059901a8d5b622f24a1101e90"}, {file = "certifi-2024.7.4.tar.gz", hash = "sha256:5a1e7645bc0ec61a09e26c36f6106dd4cf40c6db3a1fb6352b0244e7fb057c7b"}, @@ -67,6 +68,8 @@ version = "0.4.6" description = "Cross-platform colored terminal text." optional = false python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7" +groups = ["dev"] +markers = "sys_platform == \"win32\"" files = [ {file = "colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6"}, {file = "colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44"}, @@ -78,6 +81,7 @@ version = "0.3.8" description = "serialize all of Python" optional = false python-versions = ">=3.8" +groups = ["dev"] files = [ {file = "dill-0.3.8-py3-none-any.whl", hash = "sha256:c36ca9ffb54365bdd2f8eb3eff7d2a21237f8452b57ace88b1ac615b7e815bd7"}, {file = "dill-0.3.8.tar.gz", hash = "sha256:3ebe3c479ad625c4553aca177444d89b486b1d84982eeacded644afc0cf797ca"}, @@ -93,6 +97,7 @@ version = "0.2.0" description = "Like `typing._eval_type`, but lets older Python versions use newer typing features." optional = false python-versions = ">=3.8" +groups = ["main"] files = [ {file = "eval_type_backport-0.2.0-py3-none-any.whl", hash = "sha256:ac2f73d30d40c5a30a80b8739a789d6bb5e49fdffa66d7912667e2015d9c9933"}, {file = "eval_type_backport-0.2.0.tar.gz", hash = "sha256:68796cfbc7371ebf923f03bdf7bef415f3ec098aeced24e054b253a0e78f7b37"}, @@ -107,6 +112,8 @@ version = "1.2.2" description = "Backport of PEP 654 (exception groups)" optional = false python-versions = ">=3.7" +groups = ["main", "dev"] +markers = "python_version < \"3.11\"" files = [ {file = "exceptiongroup-1.2.2-py3-none-any.whl", hash = "sha256:3111b9d131c238bec2f8f516e123e14ba243563fb135d3fe885990585aa7795b"}, {file = "exceptiongroup-1.2.2.tar.gz", hash = "sha256:47c2edf7c6738fafb49fd34290706d1a1a2f4d1c6df275526b62cbb4aa5393cc"}, @@ -121,6 +128,7 @@ version = "0.14.0" description = "A pure-Python, bring-your-own-I/O implementation of HTTP/1.1" optional = false python-versions = ">=3.7" +groups = ["main"] files = [ {file = "h11-0.14.0-py3-none-any.whl", hash = "sha256:e3fe4ac4b851c468cc8363d500db52c2ead036020723024a109d37346efaa761"}, {file = "h11-0.14.0.tar.gz", hash = "sha256:8f19fbbe99e72420ff35c00b27a34cb9937e902a8b810e2c88300c6f0a3b699d"}, @@ -132,6 +140,7 @@ version = "1.0.5" description = "A minimal low-level HTTP client." optional = false python-versions = ">=3.8" +groups = ["main"] files = [ {file = "httpcore-1.0.5-py3-none-any.whl", hash = "sha256:421f18bac248b25d310f3cacd198d55b8e6125c107797b609ff9b7a6ba7991b5"}, {file = "httpcore-1.0.5.tar.gz", hash = "sha256:34a38e2f9291467ee3b44e89dd52615370e152954ba21721378a87b2960f7a61"}, @@ -149,13 +158,14 @@ trio = ["trio (>=0.22.0,<0.26.0)"] [[package]] name = "httpx" -version = "0.27.0" +version = "0.28.1" description = "The next generation HTTP client." optional = false python-versions = ">=3.8" +groups = ["main"] files = [ - {file = "httpx-0.27.0-py3-none-any.whl", hash = "sha256:71d5465162c13681bff01ad59b2cc68dd838ea1f10e51574bac27103f00c91a5"}, - {file = "httpx-0.27.0.tar.gz", hash = "sha256:a0cb88a46f32dc874e04ee956e4c2764aba2aa228f650b06788ba6bda2962ab5"}, + {file = "httpx-0.28.1-py3-none-any.whl", hash = "sha256:d909fcccc110f8c7faf814ca82a9a4d816bc5a6dbfea25d6591d6985b8ba59ad"}, + {file = "httpx-0.28.1.tar.gz", hash = "sha256:75e98c5f16b0f35b567856f597f06ff2270a374470a5c2392242528e3e3e42fc"}, ] [package.dependencies] @@ -163,13 +173,13 @@ anyio = "*" certifi = "*" httpcore = "==1.*" idna = "*" -sniffio = "*" [package.extras] -brotli = ["brotli", "brotlicffi"] +brotli = ["brotli ; platform_python_implementation == \"CPython\"", "brotlicffi ; platform_python_implementation != \"CPython\""] cli = ["click (==8.*)", "pygments (==2.*)", "rich (>=10,<14)"] http2 = ["h2 (>=3,<5)"] socks = ["socksio (==1.*)"] +zstd = ["zstandard (>=0.18.0)"] [[package]] name = "idna" @@ -177,6 +187,7 @@ version = "3.7" description = "Internationalized Domain Names in Applications (IDNA)" optional = false python-versions = ">=3.5" +groups = ["main"] files = [ {file = "idna-3.7-py3-none-any.whl", hash = "sha256:82fee1fc78add43492d3a1898bfa6d8a904cc97d8427f683ed8e798d07761aa0"}, {file = "idna-3.7.tar.gz", hash = "sha256:028ff3aadf0609c1fd278d8ea3089299412a7a8b9bd005dd08b9f8285bcb5cfc"}, @@ -188,6 +199,7 @@ version = "2.0.0" description = "brain-dead simple config-ini parsing" optional = false python-versions = ">=3.7" +groups = ["dev"] files = [ {file = "iniconfig-2.0.0-py3-none-any.whl", hash = "sha256:b6a85871a79d2e3b22d2d1b94ac2824226a63c6b741c88f7ae975f18b6778374"}, {file = "iniconfig-2.0.0.tar.gz", hash = "sha256:2d91e135bf72d31a410b17c16da610a82cb55f6b0477d1a902134b24a455b8b3"}, @@ -199,6 +211,7 @@ version = "5.13.2" description = "A Python utility / library to sort Python imports." optional = false python-versions = ">=3.8.0" +groups = ["dev"] files = [ {file = "isort-5.13.2-py3-none-any.whl", hash = "sha256:8ca5e72a8d85860d5a3fa69b8745237f2939afe12dbf656afbcb47fe72d947a6"}, {file = "isort-5.13.2.tar.gz", hash = "sha256:48fdfcb9face5d58a4f6dde2e72a1fb8dcaf8ab26f95ab49fab84c2ddefb0109"}, @@ -207,23 +220,13 @@ files = [ [package.extras] colors = ["colorama (>=0.4.6)"] -[[package]] -name = "jsonpath-python" -version = "1.0.6" -description = "A more powerful JSONPath implementation in modern python" -optional = false -python-versions = ">=3.6" -files = [ - {file = "jsonpath-python-1.0.6.tar.gz", hash = "sha256:dd5be4a72d8a2995c3f583cf82bf3cd1a9544cfdabf2d22595b67aff07349666"}, - {file = "jsonpath_python-1.0.6-py3-none-any.whl", hash = "sha256:1e3b78df579f5efc23565293612decee04214609208a2335884b3ee3f786b575"}, -] - [[package]] name = "mccabe" version = "0.7.0" description = "McCabe checker, plugin for flake8" optional = false python-versions = ">=3.6" +groups = ["dev"] files = [ {file = "mccabe-0.7.0-py2.py3-none-any.whl", hash = "sha256:6c2d30ab6be0e4a46919781807b4f0d834ebdd6c6e3dca0bda5a15f863427b6e"}, {file = "mccabe-0.7.0.tar.gz", hash = "sha256:348e0240c33b60bbdf4e523192ef919f28cb2c3d7d5c7794f74009290f236325"}, @@ -231,47 +234,60 @@ files = [ [[package]] name = "mypy" -version = "1.10.1" +version = "1.14.1" description = "Optional static typing for Python" optional = false python-versions = ">=3.8" -files = [ - {file = "mypy-1.10.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:e36f229acfe250dc660790840916eb49726c928e8ce10fbdf90715090fe4ae02"}, - {file = "mypy-1.10.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:51a46974340baaa4145363b9e051812a2446cf583dfaeba124af966fa44593f7"}, - {file = "mypy-1.10.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:901c89c2d67bba57aaaca91ccdb659aa3a312de67f23b9dfb059727cce2e2e0a"}, - {file = "mypy-1.10.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:0cd62192a4a32b77ceb31272d9e74d23cd88c8060c34d1d3622db3267679a5d9"}, - {file = "mypy-1.10.1-cp310-cp310-win_amd64.whl", hash = "sha256:a2cbc68cb9e943ac0814c13e2452d2046c2f2b23ff0278e26599224cf164e78d"}, - {file = "mypy-1.10.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:bd6f629b67bb43dc0d9211ee98b96d8dabc97b1ad38b9b25f5e4c4d7569a0c6a"}, - {file = "mypy-1.10.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:a1bbb3a6f5ff319d2b9d40b4080d46cd639abe3516d5a62c070cf0114a457d84"}, - {file = "mypy-1.10.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b8edd4e9bbbc9d7b79502eb9592cab808585516ae1bcc1446eb9122656c6066f"}, - {file = "mypy-1.10.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:6166a88b15f1759f94a46fa474c7b1b05d134b1b61fca627dd7335454cc9aa6b"}, - {file = "mypy-1.10.1-cp311-cp311-win_amd64.whl", hash = "sha256:5bb9cd11c01c8606a9d0b83ffa91d0b236a0e91bc4126d9ba9ce62906ada868e"}, - {file = "mypy-1.10.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:d8681909f7b44d0b7b86e653ca152d6dff0eb5eb41694e163c6092124f8246d7"}, - {file = "mypy-1.10.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:378c03f53f10bbdd55ca94e46ec3ba255279706a6aacaecac52ad248f98205d3"}, - {file = "mypy-1.10.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6bacf8f3a3d7d849f40ca6caea5c055122efe70e81480c8328ad29c55c69e93e"}, - {file = "mypy-1.10.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:701b5f71413f1e9855566a34d6e9d12624e9e0a8818a5704d74d6b0402e66c04"}, - {file = "mypy-1.10.1-cp312-cp312-win_amd64.whl", hash = "sha256:3c4c2992f6ea46ff7fce0072642cfb62af7a2484efe69017ed8b095f7b39ef31"}, - {file = "mypy-1.10.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:604282c886497645ffb87b8f35a57ec773a4a2721161e709a4422c1636ddde5c"}, - {file = "mypy-1.10.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:37fd87cab83f09842653f08de066ee68f1182b9b5282e4634cdb4b407266bade"}, - {file = "mypy-1.10.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8addf6313777dbb92e9564c5d32ec122bf2c6c39d683ea64de6a1fd98b90fe37"}, - {file = "mypy-1.10.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:5cc3ca0a244eb9a5249c7c583ad9a7e881aa5d7b73c35652296ddcdb33b2b9c7"}, - {file = "mypy-1.10.1-cp38-cp38-win_amd64.whl", hash = "sha256:1b3a2ffce52cc4dbaeee4df762f20a2905aa171ef157b82192f2e2f368eec05d"}, - {file = "mypy-1.10.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:fe85ed6836165d52ae8b88f99527d3d1b2362e0cb90b005409b8bed90e9059b3"}, - {file = "mypy-1.10.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:c2ae450d60d7d020d67ab440c6e3fae375809988119817214440033f26ddf7bf"}, - {file = "mypy-1.10.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6be84c06e6abd72f960ba9a71561c14137a583093ffcf9bbfaf5e613d63fa531"}, - {file = "mypy-1.10.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:2189ff1e39db399f08205e22a797383613ce1cb0cb3b13d8bcf0170e45b96cc3"}, - {file = "mypy-1.10.1-cp39-cp39-win_amd64.whl", hash = "sha256:97a131ee36ac37ce9581f4220311247ab6cba896b4395b9c87af0675a13a755f"}, - {file = "mypy-1.10.1-py3-none-any.whl", hash = "sha256:71d8ac0b906354ebda8ef1673e5fde785936ac1f29ff6987c7483cfbd5a4235a"}, - {file = "mypy-1.10.1.tar.gz", hash = "sha256:1f8f492d7db9e3593ef42d4f115f04e556130f2819ad33ab84551403e97dd4c0"}, +groups = ["dev"] +files = [ + {file = "mypy-1.14.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:52686e37cf13d559f668aa398dd7ddf1f92c5d613e4f8cb262be2fb4fedb0fcb"}, + {file = "mypy-1.14.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:1fb545ca340537d4b45d3eecdb3def05e913299ca72c290326be19b3804b39c0"}, + {file = "mypy-1.14.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:90716d8b2d1f4cd503309788e51366f07c56635a3309b0f6a32547eaaa36a64d"}, + {file = "mypy-1.14.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:2ae753f5c9fef278bcf12e1a564351764f2a6da579d4a81347e1d5a15819997b"}, + {file = "mypy-1.14.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:e0fe0f5feaafcb04505bcf439e991c6d8f1bf8b15f12b05feeed96e9e7bf1427"}, + {file = "mypy-1.14.1-cp310-cp310-win_amd64.whl", hash = "sha256:7d54bd85b925e501c555a3227f3ec0cfc54ee8b6930bd6141ec872d1c572f81f"}, + {file = "mypy-1.14.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:f995e511de847791c3b11ed90084a7a0aafdc074ab88c5a9711622fe4751138c"}, + {file = "mypy-1.14.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:d64169ec3b8461311f8ce2fd2eb5d33e2d0f2c7b49116259c51d0d96edee48d1"}, + {file = "mypy-1.14.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ba24549de7b89b6381b91fbc068d798192b1b5201987070319889e93038967a8"}, + {file = "mypy-1.14.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:183cf0a45457d28ff9d758730cd0210419ac27d4d3f285beda038c9083363b1f"}, + {file = "mypy-1.14.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:f2a0ecc86378f45347f586e4163d1769dd81c5a223d577fe351f26b179e148b1"}, + {file = "mypy-1.14.1-cp311-cp311-win_amd64.whl", hash = "sha256:ad3301ebebec9e8ee7135d8e3109ca76c23752bac1e717bc84cd3836b4bf3eae"}, + {file = "mypy-1.14.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:30ff5ef8519bbc2e18b3b54521ec319513a26f1bba19a7582e7b1f58a6e69f14"}, + {file = "mypy-1.14.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:cb9f255c18052343c70234907e2e532bc7e55a62565d64536dbc7706a20b78b9"}, + {file = "mypy-1.14.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:8b4e3413e0bddea671012b063e27591b953d653209e7a4fa5e48759cda77ca11"}, + {file = "mypy-1.14.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:553c293b1fbdebb6c3c4030589dab9fafb6dfa768995a453d8a5d3b23784af2e"}, + {file = "mypy-1.14.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:fad79bfe3b65fe6a1efaed97b445c3d37f7be9fdc348bdb2d7cac75579607c89"}, + {file = "mypy-1.14.1-cp312-cp312-win_amd64.whl", hash = "sha256:8fa2220e54d2946e94ab6dbb3ba0a992795bd68b16dc852db33028df2b00191b"}, + {file = "mypy-1.14.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:92c3ed5afb06c3a8e188cb5da4984cab9ec9a77ba956ee419c68a388b4595255"}, + {file = "mypy-1.14.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:dbec574648b3e25f43d23577309b16534431db4ddc09fda50841f1e34e64ed34"}, + {file = "mypy-1.14.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:8c6d94b16d62eb3e947281aa7347d78236688e21081f11de976376cf010eb31a"}, + {file = "mypy-1.14.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:d4b19b03fdf54f3c5b2fa474c56b4c13c9dbfb9a2db4370ede7ec11a2c5927d9"}, + {file = "mypy-1.14.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:0c911fde686394753fff899c409fd4e16e9b294c24bfd5e1ea4675deae1ac6fd"}, + {file = "mypy-1.14.1-cp313-cp313-win_amd64.whl", hash = "sha256:8b21525cb51671219f5307be85f7e646a153e5acc656e5cebf64bfa076c50107"}, + {file = "mypy-1.14.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:7084fb8f1128c76cd9cf68fe5971b37072598e7c31b2f9f95586b65c741a9d31"}, + {file = "mypy-1.14.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:8f845a00b4f420f693f870eaee5f3e2692fa84cc8514496114649cfa8fd5e2c6"}, + {file = "mypy-1.14.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:44bf464499f0e3a2d14d58b54674dee25c031703b2ffc35064bd0df2e0fac319"}, + {file = "mypy-1.14.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c99f27732c0b7dc847adb21c9d47ce57eb48fa33a17bc6d7d5c5e9f9e7ae5bac"}, + {file = "mypy-1.14.1-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:bce23c7377b43602baa0bd22ea3265c49b9ff0b76eb315d6c34721af4cdf1d9b"}, + {file = "mypy-1.14.1-cp38-cp38-win_amd64.whl", hash = "sha256:8edc07eeade7ebc771ff9cf6b211b9a7d93687ff892150cb5692e4f4272b0837"}, + {file = "mypy-1.14.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:3888a1816d69f7ab92092f785a462944b3ca16d7c470d564165fe703b0970c35"}, + {file = "mypy-1.14.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:46c756a444117c43ee984bd055db99e498bc613a70bbbc120272bd13ca579fbc"}, + {file = "mypy-1.14.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:27fc248022907e72abfd8e22ab1f10e903915ff69961174784a3900a8cba9ad9"}, + {file = "mypy-1.14.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:499d6a72fb7e5de92218db961f1a66d5f11783f9ae549d214617edab5d4dbdbb"}, + {file = "mypy-1.14.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:57961db9795eb566dc1d1b4e9139ebc4c6b0cb6e7254ecde69d1552bf7613f60"}, + {file = "mypy-1.14.1-cp39-cp39-win_amd64.whl", hash = "sha256:07ba89fdcc9451f2ebb02853deb6aaaa3d2239a236669a63ab3801bbf923ef5c"}, + {file = "mypy-1.14.1-py3-none-any.whl", hash = "sha256:b66a60cc4073aeb8ae00057f9c1f64d49e90f918fbcef9a977eb121da8b8f1d1"}, + {file = "mypy-1.14.1.tar.gz", hash = "sha256:7ec88144fe9b510e8475ec2f5f251992690fcf89ccb4500b214b4226abcd32d6"}, ] [package.dependencies] -mypy-extensions = ">=1.0.0" +mypy_extensions = ">=1.0.0" tomli = {version = ">=1.1.0", markers = "python_version < \"3.11\""} -typing-extensions = ">=4.1.0" +typing_extensions = ">=4.6.0" [package.extras] dmypy = ["psutil (>=4.0)"] +faster-cache = ["orjson"] install-types = ["pip"] mypyc = ["setuptools (>=50)"] reports = ["lxml"] @@ -282,6 +298,7 @@ version = "1.0.0" description = "Type system extensions for programs checked with the mypy type checker." optional = false python-versions = ">=3.5" +groups = ["dev"] files = [ {file = "mypy_extensions-1.0.0-py3-none-any.whl", hash = "sha256:4392f6c0eb8a5668a69e23d168ffa70f0be9ccfd32b5cc2d26a34ae5b844552d"}, {file = "mypy_extensions-1.0.0.tar.gz", hash = "sha256:75dbf8955dc00442a438fc4d0666508a9a97b6bd41aa2f0ffe9d2f2725af0782"}, @@ -293,6 +310,7 @@ version = "24.1" description = "Core utilities for Python packages" optional = false python-versions = ">=3.8" +groups = ["dev"] files = [ {file = "packaging-24.1-py3-none-any.whl", hash = "sha256:5b8f2217dbdbd2f7f384c41c628544e6d52f2d0f53c6d0c3ea61aa5d1d7ff124"}, {file = "packaging-24.1.tar.gz", hash = "sha256:026ed72c8ed3fcce5bf8950572258698927fd1dbda10a5e981cdf0ac37f4f002"}, @@ -304,6 +322,7 @@ version = "4.2.2" description = "A small Python package for determining appropriate platform-specific dirs, e.g. a `user data dir`." optional = false python-versions = ">=3.8" +groups = ["dev"] files = [ {file = "platformdirs-4.2.2-py3-none-any.whl", hash = "sha256:2d7a1657e36a80ea911db832a8a6ece5ee53d8de21edd5cc5879af6530b1bfee"}, {file = "platformdirs-4.2.2.tar.gz", hash = "sha256:38b7b51f512eed9e84a22788b4bce1de17c0adb134d6becb09836e37d8654cd3"}, @@ -320,6 +339,7 @@ version = "1.5.0" description = "plugin and hook calling mechanisms for python" optional = false python-versions = ">=3.8" +groups = ["dev"] files = [ {file = "pluggy-1.5.0-py3-none-any.whl", hash = "sha256:44e1ad92c8ca002de6377e165f3e0f1be63266ab4d554740532335b9d75ea669"}, {file = "pluggy-1.5.0.tar.gz", hash = "sha256:2cffa88e94fdc978c4c574f15f9e59b7f4201d439195c3715ca9e2486f1d0cf1"}, @@ -331,123 +351,133 @@ testing = ["pytest", "pytest-benchmark"] [[package]] name = "pydantic" -version = "2.9.1" +version = "2.10.6" description = "Data validation using Python type hints" optional = false python-versions = ">=3.8" +groups = ["main"] files = [ - {file = "pydantic-2.9.1-py3-none-any.whl", hash = "sha256:7aff4db5fdf3cf573d4b3c30926a510a10e19a0774d38fc4967f78beb6deb612"}, - {file = "pydantic-2.9.1.tar.gz", hash = "sha256:1363c7d975c7036df0db2b4a61f2e062fbc0aa5ab5f2772e0ffc7191a4f4bce2"}, + {file = "pydantic-2.10.6-py3-none-any.whl", hash = "sha256:427d664bf0b8a2b34ff5dd0f5a18df00591adcee7198fbd71981054cef37b584"}, + {file = "pydantic-2.10.6.tar.gz", hash = "sha256:ca5daa827cce33de7a42be142548b0096bf05a7e7b365aebfa5f8eeec7128236"}, ] [package.dependencies] annotated-types = ">=0.6.0" -pydantic-core = "2.23.3" -typing-extensions = [ - {version = ">=4.6.1", markers = "python_version < \"3.13\""}, - {version = ">=4.12.2", markers = "python_version >= \"3.13\""}, -] +pydantic-core = "2.27.2" +typing-extensions = ">=4.12.2" [package.extras] email = ["email-validator (>=2.0.0)"] -timezone = ["tzdata"] +timezone = ["tzdata ; python_version >= \"3.9\" and platform_system == \"Windows\""] [[package]] name = "pydantic-core" -version = "2.23.3" +version = "2.27.2" description = "Core functionality for Pydantic validation and serialization" optional = false python-versions = ">=3.8" -files = [ - {file = "pydantic_core-2.23.3-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:7f10a5d1b9281392f1bf507d16ac720e78285dfd635b05737c3911637601bae6"}, - {file = "pydantic_core-2.23.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:3c09a7885dd33ee8c65266e5aa7fb7e2f23d49d8043f089989726391dd7350c5"}, - {file = "pydantic_core-2.23.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6470b5a1ec4d1c2e9afe928c6cb37eb33381cab99292a708b8cb9aa89e62429b"}, - {file = "pydantic_core-2.23.3-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:9172d2088e27d9a185ea0a6c8cebe227a9139fd90295221d7d495944d2367700"}, - {file = "pydantic_core-2.23.3-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:86fc6c762ca7ac8fbbdff80d61b2c59fb6b7d144aa46e2d54d9e1b7b0e780e01"}, - {file = "pydantic_core-2.23.3-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f0cb80fd5c2df4898693aa841425ea1727b1b6d2167448253077d2a49003e0ed"}, - {file = "pydantic_core-2.23.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:03667cec5daf43ac4995cefa8aaf58f99de036204a37b889c24a80927b629cec"}, - {file = "pydantic_core-2.23.3-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:047531242f8e9c2db733599f1c612925de095e93c9cc0e599e96cf536aaf56ba"}, - {file = "pydantic_core-2.23.3-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:5499798317fff7f25dbef9347f4451b91ac2a4330c6669821c8202fd354c7bee"}, - {file = "pydantic_core-2.23.3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:bbb5e45eab7624440516ee3722a3044b83fff4c0372efe183fd6ba678ff681fe"}, - {file = "pydantic_core-2.23.3-cp310-none-win32.whl", hash = "sha256:8b5b3ed73abb147704a6e9f556d8c5cb078f8c095be4588e669d315e0d11893b"}, - {file = "pydantic_core-2.23.3-cp310-none-win_amd64.whl", hash = "sha256:2b603cde285322758a0279995b5796d64b63060bfbe214b50a3ca23b5cee3e83"}, - {file = "pydantic_core-2.23.3-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:c889fd87e1f1bbeb877c2ee56b63bb297de4636661cc9bbfcf4b34e5e925bc27"}, - {file = "pydantic_core-2.23.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:ea85bda3189fb27503af4c45273735bcde3dd31c1ab17d11f37b04877859ef45"}, - {file = "pydantic_core-2.23.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a7f7f72f721223f33d3dc98a791666ebc6a91fa023ce63733709f4894a7dc611"}, - {file = "pydantic_core-2.23.3-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:2b2b55b0448e9da68f56b696f313949cda1039e8ec7b5d294285335b53104b61"}, - {file = "pydantic_core-2.23.3-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c24574c7e92e2c56379706b9a3f07c1e0c7f2f87a41b6ee86653100c4ce343e5"}, - {file = "pydantic_core-2.23.3-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f2b05e6ccbee333a8f4b8f4d7c244fdb7a979e90977ad9c51ea31261e2085ce0"}, - {file = "pydantic_core-2.23.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e2c409ce1c219c091e47cb03feb3c4ed8c2b8e004efc940da0166aaee8f9d6c8"}, - {file = "pydantic_core-2.23.3-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d965e8b325f443ed3196db890d85dfebbb09f7384486a77461347f4adb1fa7f8"}, - {file = "pydantic_core-2.23.3-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:f56af3a420fb1ffaf43ece3ea09c2d27c444e7c40dcb7c6e7cf57aae764f2b48"}, - {file = "pydantic_core-2.23.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:5b01a078dd4f9a52494370af21aa52964e0a96d4862ac64ff7cea06e0f12d2c5"}, - {file = "pydantic_core-2.23.3-cp311-none-win32.whl", hash = "sha256:560e32f0df04ac69b3dd818f71339983f6d1f70eb99d4d1f8e9705fb6c34a5c1"}, - {file = "pydantic_core-2.23.3-cp311-none-win_amd64.whl", hash = "sha256:c744fa100fdea0d000d8bcddee95213d2de2e95b9c12be083370b2072333a0fa"}, - {file = "pydantic_core-2.23.3-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:e0ec50663feedf64d21bad0809f5857bac1ce91deded203efc4a84b31b2e4305"}, - {file = "pydantic_core-2.23.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:db6e6afcb95edbe6b357786684b71008499836e91f2a4a1e55b840955b341dbb"}, - {file = "pydantic_core-2.23.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:98ccd69edcf49f0875d86942f4418a4e83eb3047f20eb897bffa62a5d419c8fa"}, - {file = "pydantic_core-2.23.3-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:a678c1ac5c5ec5685af0133262103defb427114e62eafeda12f1357a12140162"}, - {file = "pydantic_core-2.23.3-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:01491d8b4d8db9f3391d93b0df60701e644ff0894352947f31fff3e52bd5c801"}, - {file = "pydantic_core-2.23.3-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:fcf31facf2796a2d3b7fe338fe8640aa0166e4e55b4cb108dbfd1058049bf4cb"}, - {file = "pydantic_core-2.23.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7200fd561fb3be06827340da066df4311d0b6b8eb0c2116a110be5245dceb326"}, - {file = "pydantic_core-2.23.3-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:dc1636770a809dee2bd44dd74b89cc80eb41172bcad8af75dd0bc182c2666d4c"}, - {file = "pydantic_core-2.23.3-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:67a5def279309f2e23014b608c4150b0c2d323bd7bccd27ff07b001c12c2415c"}, - {file = "pydantic_core-2.23.3-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:748bdf985014c6dd3e1e4cc3db90f1c3ecc7246ff5a3cd4ddab20c768b2f1dab"}, - {file = "pydantic_core-2.23.3-cp312-none-win32.whl", hash = "sha256:255ec6dcb899c115f1e2a64bc9ebc24cc0e3ab097775755244f77360d1f3c06c"}, - {file = "pydantic_core-2.23.3-cp312-none-win_amd64.whl", hash = "sha256:40b8441be16c1e940abebed83cd006ddb9e3737a279e339dbd6d31578b802f7b"}, - {file = "pydantic_core-2.23.3-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:6daaf5b1ba1369a22c8b050b643250e3e5efc6a78366d323294aee54953a4d5f"}, - {file = "pydantic_core-2.23.3-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:d015e63b985a78a3d4ccffd3bdf22b7c20b3bbd4b8227809b3e8e75bc37f9cb2"}, - {file = "pydantic_core-2.23.3-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a3fc572d9b5b5cfe13f8e8a6e26271d5d13f80173724b738557a8c7f3a8a3791"}, - {file = "pydantic_core-2.23.3-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:f6bd91345b5163ee7448bee201ed7dd601ca24f43f439109b0212e296eb5b423"}, - {file = "pydantic_core-2.23.3-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:fc379c73fd66606628b866f661e8785088afe2adaba78e6bbe80796baf708a63"}, - {file = "pydantic_core-2.23.3-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:fbdce4b47592f9e296e19ac31667daed8753c8367ebb34b9a9bd89dacaa299c9"}, - {file = "pydantic_core-2.23.3-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fc3cf31edf405a161a0adad83246568647c54404739b614b1ff43dad2b02e6d5"}, - {file = "pydantic_core-2.23.3-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:8e22b477bf90db71c156f89a55bfe4d25177b81fce4aa09294d9e805eec13855"}, - {file = "pydantic_core-2.23.3-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:0a0137ddf462575d9bce863c4c95bac3493ba8e22f8c28ca94634b4a1d3e2bb4"}, - {file = "pydantic_core-2.23.3-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:203171e48946c3164fe7691fc349c79241ff8f28306abd4cad5f4f75ed80bc8d"}, - {file = "pydantic_core-2.23.3-cp313-none-win32.whl", hash = "sha256:76bdab0de4acb3f119c2a4bff740e0c7dc2e6de7692774620f7452ce11ca76c8"}, - {file = "pydantic_core-2.23.3-cp313-none-win_amd64.whl", hash = "sha256:37ba321ac2a46100c578a92e9a6aa33afe9ec99ffa084424291d84e456f490c1"}, - {file = "pydantic_core-2.23.3-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:d063c6b9fed7d992bcbebfc9133f4c24b7a7f215d6b102f3e082b1117cddb72c"}, - {file = "pydantic_core-2.23.3-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:6cb968da9a0746a0cf521b2b5ef25fc5a0bee9b9a1a8214e0a1cfaea5be7e8a4"}, - {file = "pydantic_core-2.23.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:edbefe079a520c5984e30e1f1f29325054b59534729c25b874a16a5048028d16"}, - {file = "pydantic_core-2.23.3-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:cbaaf2ef20d282659093913da9d402108203f7cb5955020bd8d1ae5a2325d1c4"}, - {file = "pydantic_core-2.23.3-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:fb539d7e5dc4aac345846f290cf504d2fd3c1be26ac4e8b5e4c2b688069ff4cf"}, - {file = "pydantic_core-2.23.3-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7e6f33503c5495059148cc486867e1d24ca35df5fc064686e631e314d959ad5b"}, - {file = "pydantic_core-2.23.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:04b07490bc2f6f2717b10c3969e1b830f5720b632f8ae2f3b8b1542394c47a8e"}, - {file = "pydantic_core-2.23.3-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:03795b9e8a5d7fda05f3873efc3f59105e2dcff14231680296b87b80bb327295"}, - {file = "pydantic_core-2.23.3-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:c483dab0f14b8d3f0df0c6c18d70b21b086f74c87ab03c59250dbf6d3c89baba"}, - {file = "pydantic_core-2.23.3-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:8b2682038e255e94baf2c473dca914a7460069171ff5cdd4080be18ab8a7fd6e"}, - {file = "pydantic_core-2.23.3-cp38-none-win32.whl", hash = "sha256:f4a57db8966b3a1d1a350012839c6a0099f0898c56512dfade8a1fe5fb278710"}, - {file = "pydantic_core-2.23.3-cp38-none-win_amd64.whl", hash = "sha256:13dd45ba2561603681a2676ca56006d6dee94493f03d5cadc055d2055615c3ea"}, - {file = "pydantic_core-2.23.3-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:82da2f4703894134a9f000e24965df73cc103e31e8c31906cc1ee89fde72cbd8"}, - {file = "pydantic_core-2.23.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:dd9be0a42de08f4b58a3cc73a123f124f65c24698b95a54c1543065baca8cf0e"}, - {file = "pydantic_core-2.23.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:89b731f25c80830c76fdb13705c68fef6a2b6dc494402987c7ea9584fe189f5d"}, - {file = "pydantic_core-2.23.3-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:c6de1ec30c4bb94f3a69c9f5f2182baeda5b809f806676675e9ef6b8dc936f28"}, - {file = "pydantic_core-2.23.3-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:bb68b41c3fa64587412b104294b9cbb027509dc2f6958446c502638d481525ef"}, - {file = "pydantic_core-2.23.3-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1c3980f2843de5184656aab58698011b42763ccba11c4a8c35936c8dd6c7068c"}, - {file = "pydantic_core-2.23.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:94f85614f2cba13f62c3c6481716e4adeae48e1eaa7e8bac379b9d177d93947a"}, - {file = "pydantic_core-2.23.3-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:510b7fb0a86dc8f10a8bb43bd2f97beb63cffad1203071dc434dac26453955cd"}, - {file = "pydantic_core-2.23.3-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:1eba2f7ce3e30ee2170410e2171867ea73dbd692433b81a93758ab2de6c64835"}, - {file = "pydantic_core-2.23.3-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:4b259fd8409ab84b4041b7b3f24dcc41e4696f180b775961ca8142b5b21d0e70"}, - {file = "pydantic_core-2.23.3-cp39-none-win32.whl", hash = "sha256:40d9bd259538dba2f40963286009bf7caf18b5112b19d2b55b09c14dde6db6a7"}, - {file = "pydantic_core-2.23.3-cp39-none-win_amd64.whl", hash = "sha256:5a8cd3074a98ee70173a8633ad3c10e00dcb991ecec57263aacb4095c5efb958"}, - {file = "pydantic_core-2.23.3-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:f399e8657c67313476a121a6944311fab377085ca7f490648c9af97fc732732d"}, - {file = "pydantic_core-2.23.3-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:6b5547d098c76e1694ba85f05b595720d7c60d342f24d5aad32c3049131fa5c4"}, - {file = "pydantic_core-2.23.3-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0dda0290a6f608504882d9f7650975b4651ff91c85673341789a476b1159f211"}, - {file = "pydantic_core-2.23.3-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:65b6e5da855e9c55a0c67f4db8a492bf13d8d3316a59999cfbaf98cc6e401961"}, - {file = "pydantic_core-2.23.3-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:09e926397f392059ce0afdcac920df29d9c833256354d0c55f1584b0b70cf07e"}, - {file = "pydantic_core-2.23.3-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:87cfa0ed6b8c5bd6ae8b66de941cece179281239d482f363814d2b986b79cedc"}, - {file = "pydantic_core-2.23.3-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:e61328920154b6a44d98cabcb709f10e8b74276bc709c9a513a8c37a18786cc4"}, - {file = "pydantic_core-2.23.3-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:ce3317d155628301d649fe5e16a99528d5680af4ec7aa70b90b8dacd2d725c9b"}, - {file = "pydantic_core-2.23.3-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:e89513f014c6be0d17b00a9a7c81b1c426f4eb9224b15433f3d98c1a071f8433"}, - {file = "pydantic_core-2.23.3-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:4f62c1c953d7ee375df5eb2e44ad50ce2f5aff931723b398b8bc6f0ac159791a"}, - {file = "pydantic_core-2.23.3-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2718443bc671c7ac331de4eef9b673063b10af32a0bb385019ad61dcf2cc8f6c"}, - {file = "pydantic_core-2.23.3-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a0d90e08b2727c5d01af1b5ef4121d2f0c99fbee692c762f4d9d0409c9da6541"}, - {file = "pydantic_core-2.23.3-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2b676583fc459c64146debea14ba3af54e540b61762dfc0613dc4e98c3f66eeb"}, - {file = "pydantic_core-2.23.3-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:50e4661f3337977740fdbfbae084ae5693e505ca2b3130a6d4eb0f2281dc43b8"}, - {file = "pydantic_core-2.23.3-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:68f4cf373f0de6abfe599a38307f4417c1c867ca381c03df27c873a9069cda25"}, - {file = "pydantic_core-2.23.3-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:59d52cf01854cb26c46958552a21acb10dd78a52aa34c86f284e66b209db8cab"}, - {file = "pydantic_core-2.23.3.tar.gz", hash = "sha256:3cb0f65d8b4121c1b015c60104a685feb929a29d7cf204387c7f2688c7974690"}, +groups = ["main"] +files = [ + {file = "pydantic_core-2.27.2-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:2d367ca20b2f14095a8f4fa1210f5a7b78b8a20009ecced6b12818f455b1e9fa"}, + {file = "pydantic_core-2.27.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:491a2b73db93fab69731eaee494f320faa4e093dbed776be1a829c2eb222c34c"}, + {file = "pydantic_core-2.27.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7969e133a6f183be60e9f6f56bfae753585680f3b7307a8e555a948d443cc05a"}, + {file = "pydantic_core-2.27.2-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:3de9961f2a346257caf0aa508a4da705467f53778e9ef6fe744c038119737ef5"}, + {file = "pydantic_core-2.27.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e2bb4d3e5873c37bb3dd58714d4cd0b0e6238cebc4177ac8fe878f8b3aa8e74c"}, + {file = "pydantic_core-2.27.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:280d219beebb0752699480fe8f1dc61ab6615c2046d76b7ab7ee38858de0a4e7"}, + {file = "pydantic_core-2.27.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:47956ae78b6422cbd46f772f1746799cbb862de838fd8d1fbd34a82e05b0983a"}, + {file = "pydantic_core-2.27.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:14d4a5c49d2f009d62a2a7140d3064f686d17a5d1a268bc641954ba181880236"}, + {file = "pydantic_core-2.27.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:337b443af21d488716f8d0b6164de833e788aa6bd7e3a39c005febc1284f4962"}, + {file = "pydantic_core-2.27.2-cp310-cp310-musllinux_1_1_armv7l.whl", hash = "sha256:03d0f86ea3184a12f41a2d23f7ccb79cdb5a18e06993f8a45baa8dfec746f0e9"}, + {file = "pydantic_core-2.27.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:7041c36f5680c6e0f08d922aed302e98b3745d97fe1589db0a3eebf6624523af"}, + {file = "pydantic_core-2.27.2-cp310-cp310-win32.whl", hash = "sha256:50a68f3e3819077be2c98110c1f9dcb3817e93f267ba80a2c05bb4f8799e2ff4"}, + {file = "pydantic_core-2.27.2-cp310-cp310-win_amd64.whl", hash = "sha256:e0fd26b16394ead34a424eecf8a31a1f5137094cabe84a1bcb10fa6ba39d3d31"}, + {file = "pydantic_core-2.27.2-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:8e10c99ef58cfdf2a66fc15d66b16c4a04f62bca39db589ae8cba08bc55331bc"}, + {file = "pydantic_core-2.27.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:26f32e0adf166a84d0cb63be85c562ca8a6fa8de28e5f0d92250c6b7e9e2aff7"}, + {file = "pydantic_core-2.27.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8c19d1ea0673cd13cc2f872f6c9ab42acc4e4f492a7ca9d3795ce2b112dd7e15"}, + {file = "pydantic_core-2.27.2-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:5e68c4446fe0810e959cdff46ab0a41ce2f2c86d227d96dc3847af0ba7def306"}, + {file = "pydantic_core-2.27.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d9640b0059ff4f14d1f37321b94061c6db164fbe49b334b31643e0528d100d99"}, + {file = "pydantic_core-2.27.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:40d02e7d45c9f8af700f3452f329ead92da4c5f4317ca9b896de7ce7199ea459"}, + {file = "pydantic_core-2.27.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1c1fd185014191700554795c99b347d64f2bb637966c4cfc16998a0ca700d048"}, + {file = "pydantic_core-2.27.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d81d2068e1c1228a565af076598f9e7451712700b673de8f502f0334f281387d"}, + {file = "pydantic_core-2.27.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:1a4207639fb02ec2dbb76227d7c751a20b1a6b4bc52850568e52260cae64ca3b"}, + {file = "pydantic_core-2.27.2-cp311-cp311-musllinux_1_1_armv7l.whl", hash = "sha256:3de3ce3c9ddc8bbd88f6e0e304dea0e66d843ec9de1b0042b0911c1663ffd474"}, + {file = "pydantic_core-2.27.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:30c5f68ded0c36466acede341551106821043e9afaad516adfb6e8fa80a4e6a6"}, + {file = "pydantic_core-2.27.2-cp311-cp311-win32.whl", hash = "sha256:c70c26d2c99f78b125a3459f8afe1aed4d9687c24fd677c6a4436bc042e50d6c"}, + {file = "pydantic_core-2.27.2-cp311-cp311-win_amd64.whl", hash = "sha256:08e125dbdc505fa69ca7d9c499639ab6407cfa909214d500897d02afb816e7cc"}, + {file = "pydantic_core-2.27.2-cp311-cp311-win_arm64.whl", hash = "sha256:26f0d68d4b235a2bae0c3fc585c585b4ecc51382db0e3ba402a22cbc440915e4"}, + {file = "pydantic_core-2.27.2-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:9e0c8cfefa0ef83b4da9588448b6d8d2a2bf1a53c3f1ae5fca39eb3061e2f0b0"}, + {file = "pydantic_core-2.27.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:83097677b8e3bd7eaa6775720ec8e0405f1575015a463285a92bfdfe254529ef"}, + {file = "pydantic_core-2.27.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:172fce187655fece0c90d90a678424b013f8fbb0ca8b036ac266749c09438cb7"}, + {file = "pydantic_core-2.27.2-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:519f29f5213271eeeeb3093f662ba2fd512b91c5f188f3bb7b27bc5973816934"}, + {file = "pydantic_core-2.27.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:05e3a55d124407fffba0dd6b0c0cd056d10e983ceb4e5dbd10dda135c31071d6"}, + {file = "pydantic_core-2.27.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9c3ed807c7b91de05e63930188f19e921d1fe90de6b4f5cd43ee7fcc3525cb8c"}, + {file = "pydantic_core-2.27.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6fb4aadc0b9a0c063206846d603b92030eb6f03069151a625667f982887153e2"}, + {file = "pydantic_core-2.27.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:28ccb213807e037460326424ceb8b5245acb88f32f3d2777427476e1b32c48c4"}, + {file = "pydantic_core-2.27.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:de3cd1899e2c279b140adde9357c4495ed9d47131b4a4eaff9052f23398076b3"}, + {file = "pydantic_core-2.27.2-cp312-cp312-musllinux_1_1_armv7l.whl", hash = "sha256:220f892729375e2d736b97d0e51466252ad84c51857d4d15f5e9692f9ef12be4"}, + {file = "pydantic_core-2.27.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:a0fcd29cd6b4e74fe8ddd2c90330fd8edf2e30cb52acda47f06dd615ae72da57"}, + {file = "pydantic_core-2.27.2-cp312-cp312-win32.whl", hash = "sha256:1e2cb691ed9834cd6a8be61228471d0a503731abfb42f82458ff27be7b2186fc"}, + {file = "pydantic_core-2.27.2-cp312-cp312-win_amd64.whl", hash = "sha256:cc3f1a99a4f4f9dd1de4fe0312c114e740b5ddead65bb4102884b384c15d8bc9"}, + {file = "pydantic_core-2.27.2-cp312-cp312-win_arm64.whl", hash = "sha256:3911ac9284cd8a1792d3cb26a2da18f3ca26c6908cc434a18f730dc0db7bfa3b"}, + {file = "pydantic_core-2.27.2-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:7d14bd329640e63852364c306f4d23eb744e0f8193148d4044dd3dacdaacbd8b"}, + {file = "pydantic_core-2.27.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:82f91663004eb8ed30ff478d77c4d1179b3563df6cdb15c0817cd1cdaf34d154"}, + {file = "pydantic_core-2.27.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:71b24c7d61131bb83df10cc7e687433609963a944ccf45190cfc21e0887b08c9"}, + {file = "pydantic_core-2.27.2-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:fa8e459d4954f608fa26116118bb67f56b93b209c39b008277ace29937453dc9"}, + {file = "pydantic_core-2.27.2-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ce8918cbebc8da707ba805b7fd0b382816858728ae7fe19a942080c24e5b7cd1"}, + {file = "pydantic_core-2.27.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:eda3f5c2a021bbc5d976107bb302e0131351c2ba54343f8a496dc8783d3d3a6a"}, + {file = "pydantic_core-2.27.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bd8086fa684c4775c27f03f062cbb9eaa6e17f064307e86b21b9e0abc9c0f02e"}, + {file = "pydantic_core-2.27.2-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:8d9b3388db186ba0c099a6d20f0604a44eabdeef1777ddd94786cdae158729e4"}, + {file = "pydantic_core-2.27.2-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:7a66efda2387de898c8f38c0cf7f14fca0b51a8ef0b24bfea5849f1b3c95af27"}, + {file = "pydantic_core-2.27.2-cp313-cp313-musllinux_1_1_armv7l.whl", hash = "sha256:18a101c168e4e092ab40dbc2503bdc0f62010e95d292b27827871dc85450d7ee"}, + {file = "pydantic_core-2.27.2-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:ba5dd002f88b78a4215ed2f8ddbdf85e8513382820ba15ad5ad8955ce0ca19a1"}, + {file = "pydantic_core-2.27.2-cp313-cp313-win32.whl", hash = "sha256:1ebaf1d0481914d004a573394f4be3a7616334be70261007e47c2a6fe7e50130"}, + {file = "pydantic_core-2.27.2-cp313-cp313-win_amd64.whl", hash = "sha256:953101387ecf2f5652883208769a79e48db18c6df442568a0b5ccd8c2723abee"}, + {file = "pydantic_core-2.27.2-cp313-cp313-win_arm64.whl", hash = "sha256:ac4dbfd1691affb8f48c2c13241a2e3b60ff23247cbcf981759c768b6633cf8b"}, + {file = "pydantic_core-2.27.2-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:d3e8d504bdd3f10835468f29008d72fc8359d95c9c415ce6e767203db6127506"}, + {file = "pydantic_core-2.27.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:521eb9b7f036c9b6187f0b47318ab0d7ca14bd87f776240b90b21c1f4f149320"}, + {file = "pydantic_core-2.27.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:85210c4d99a0114f5a9481b44560d7d1e35e32cc5634c656bc48e590b669b145"}, + {file = "pydantic_core-2.27.2-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:d716e2e30c6f140d7560ef1538953a5cd1a87264c737643d481f2779fc247fe1"}, + {file = "pydantic_core-2.27.2-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f66d89ba397d92f840f8654756196d93804278457b5fbede59598a1f9f90b228"}, + {file = "pydantic_core-2.27.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:669e193c1c576a58f132e3158f9dfa9662969edb1a250c54d8fa52590045f046"}, + {file = "pydantic_core-2.27.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9fdbe7629b996647b99c01b37f11170a57ae675375b14b8c13b8518b8320ced5"}, + {file = "pydantic_core-2.27.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d262606bf386a5ba0b0af3b97f37c83d7011439e3dc1a9298f21efb292e42f1a"}, + {file = "pydantic_core-2.27.2-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:cabb9bcb7e0d97f74df8646f34fc76fbf793b7f6dc2438517d7a9e50eee4f14d"}, + {file = "pydantic_core-2.27.2-cp38-cp38-musllinux_1_1_armv7l.whl", hash = "sha256:d2d63f1215638d28221f664596b1ccb3944f6e25dd18cd3b86b0a4c408d5ebb9"}, + {file = "pydantic_core-2.27.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:bca101c00bff0adb45a833f8451b9105d9df18accb8743b08107d7ada14bd7da"}, + {file = "pydantic_core-2.27.2-cp38-cp38-win32.whl", hash = "sha256:f6f8e111843bbb0dee4cb6594cdc73e79b3329b526037ec242a3e49012495b3b"}, + {file = "pydantic_core-2.27.2-cp38-cp38-win_amd64.whl", hash = "sha256:fd1aea04935a508f62e0d0ef1f5ae968774a32afc306fb8545e06f5ff5cdf3ad"}, + {file = "pydantic_core-2.27.2-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:c10eb4f1659290b523af58fa7cffb452a61ad6ae5613404519aee4bfbf1df993"}, + {file = "pydantic_core-2.27.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:ef592d4bad47296fb11f96cd7dc898b92e795032b4894dfb4076cfccd43a9308"}, + {file = "pydantic_core-2.27.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c61709a844acc6bf0b7dce7daae75195a10aac96a596ea1b776996414791ede4"}, + {file = "pydantic_core-2.27.2-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:42c5f762659e47fdb7b16956c71598292f60a03aa92f8b6351504359dbdba6cf"}, + {file = "pydantic_core-2.27.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4c9775e339e42e79ec99c441d9730fccf07414af63eac2f0e48e08fd38a64d76"}, + {file = "pydantic_core-2.27.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:57762139821c31847cfb2df63c12f725788bd9f04bc2fb392790959b8f70f118"}, + {file = "pydantic_core-2.27.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0d1e85068e818c73e048fe28cfc769040bb1f475524f4745a5dc621f75ac7630"}, + {file = "pydantic_core-2.27.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:097830ed52fd9e427942ff3b9bc17fab52913b2f50f2880dc4a5611446606a54"}, + {file = "pydantic_core-2.27.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:044a50963a614ecfae59bb1eaf7ea7efc4bc62f49ed594e18fa1e5d953c40e9f"}, + {file = "pydantic_core-2.27.2-cp39-cp39-musllinux_1_1_armv7l.whl", hash = "sha256:4e0b4220ba5b40d727c7f879eac379b822eee5d8fff418e9d3381ee45b3b0362"}, + {file = "pydantic_core-2.27.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:5e4f4bb20d75e9325cc9696c6802657b58bc1dbbe3022f32cc2b2b632c3fbb96"}, + {file = "pydantic_core-2.27.2-cp39-cp39-win32.whl", hash = "sha256:cca63613e90d001b9f2f9a9ceb276c308bfa2a43fafb75c8031c4f66039e8c6e"}, + {file = "pydantic_core-2.27.2-cp39-cp39-win_amd64.whl", hash = "sha256:77d1bca19b0f7021b3a982e6f903dcd5b2b06076def36a652e3907f596e29f67"}, + {file = "pydantic_core-2.27.2-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:2bf14caea37e91198329b828eae1618c068dfb8ef17bb33287a7ad4b61ac314e"}, + {file = "pydantic_core-2.27.2-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:b0cb791f5b45307caae8810c2023a184c74605ec3bcbb67d13846c28ff731ff8"}, + {file = "pydantic_core-2.27.2-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:688d3fd9fcb71f41c4c015c023d12a79d1c4c0732ec9eb35d96e3388a120dcf3"}, + {file = "pydantic_core-2.27.2-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3d591580c34f4d731592f0e9fe40f9cc1b430d297eecc70b962e93c5c668f15f"}, + {file = "pydantic_core-2.27.2-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:82f986faf4e644ffc189a7f1aafc86e46ef70372bb153e7001e8afccc6e54133"}, + {file = "pydantic_core-2.27.2-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:bec317a27290e2537f922639cafd54990551725fc844249e64c523301d0822fc"}, + {file = "pydantic_core-2.27.2-pp310-pypy310_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:0296abcb83a797db256b773f45773da397da75a08f5fcaef41f2044adec05f50"}, + {file = "pydantic_core-2.27.2-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:0d75070718e369e452075a6017fbf187f788e17ed67a3abd47fa934d001863d9"}, + {file = "pydantic_core-2.27.2-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:7e17b560be3c98a8e3aa66ce828bdebb9e9ac6ad5466fba92eb74c4c95cb1151"}, + {file = "pydantic_core-2.27.2-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:c33939a82924da9ed65dab5a65d427205a73181d8098e79b6b426bdf8ad4e656"}, + {file = "pydantic_core-2.27.2-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:00bad2484fa6bda1e216e7345a798bd37c68fb2d97558edd584942aa41b7d278"}, + {file = "pydantic_core-2.27.2-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c817e2b40aba42bac6f457498dacabc568c3b7a986fc9ba7c8d9d260b71485fb"}, + {file = "pydantic_core-2.27.2-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:251136cdad0cb722e93732cb45ca5299fb56e1344a833640bf93b2803f8d1bfd"}, + {file = "pydantic_core-2.27.2-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d2088237af596f0a524d3afc39ab3b036e8adb054ee57cbb1dcf8e09da5b29cc"}, + {file = "pydantic_core-2.27.2-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:d4041c0b966a84b4ae7a09832eb691a35aec90910cd2dbe7a208de59be77965b"}, + {file = "pydantic_core-2.27.2-pp39-pypy39_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:8083d4e875ebe0b864ffef72a4304827015cff328a1be6e22cc850753bfb122b"}, + {file = "pydantic_core-2.27.2-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:f141ee28a0ad2123b6611b6ceff018039df17f32ada8b534e6aa039545a3efb2"}, + {file = "pydantic_core-2.27.2-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:7d0c8399fcc1848491f00e0314bd59fb34a9c008761bcb422a057670c3f65e35"}, + {file = "pydantic_core-2.27.2.tar.gz", hash = "sha256:eb026e5a4c1fee05726072337ff51d1efb6f59090b7da90d30ea58625b1ffb39"}, ] [package.dependencies] @@ -459,6 +489,7 @@ version = "3.2.3" description = "python code static checker" optional = false python-versions = ">=3.8.0" +groups = ["dev"] files = [ {file = "pylint-3.2.3-py3-none-any.whl", hash = "sha256:b3d7d2708a3e04b4679e02d99e72329a8b7ee8afb8d04110682278781f889fa8"}, {file = "pylint-3.2.3.tar.gz", hash = "sha256:02f6c562b215582386068d52a30f520d84fdbcf2a95fc7e855b816060d048b60"}, @@ -470,7 +501,7 @@ colorama = {version = ">=0.4.5", markers = "sys_platform == \"win32\""} dill = [ {version = ">=0.2", markers = "python_version < \"3.11\""}, {version = ">=0.3.7", markers = "python_version >= \"3.12\""}, - {version = ">=0.3.6", markers = "python_version >= \"3.11\" and python_version < \"3.12\""}, + {version = ">=0.3.6", markers = "python_version == \"3.11\""}, ] isort = ">=4.2.5,<5.13.0 || >5.13.0,<6" mccabe = ">=0.6,<0.8" @@ -489,6 +520,7 @@ version = "8.3.2" description = "pytest: simple powerful testing with Python" optional = false python-versions = ">=3.8" +groups = ["dev"] files = [ {file = "pytest-8.3.2-py3-none-any.whl", hash = "sha256:4ba08f9ae7dcf84ded419494d229b48d0903ea6407b030eaec46df5e6a73bba5"}, {file = "pytest-8.3.2.tar.gz", hash = "sha256:c132345d12ce551242c87269de812483f5bcc87cdbb4722e48487ba194f9fdce"}, @@ -511,6 +543,7 @@ version = "0.23.8" description = "Pytest support for asyncio" optional = false python-versions = ">=3.8" +groups = ["dev"] files = [ {file = "pytest_asyncio-0.23.8-py3-none-any.whl", hash = "sha256:50265d892689a5faefb84df80819d1ecef566eb3549cf915dfb33569359d1ce2"}, {file = "pytest_asyncio-0.23.8.tar.gz", hash = "sha256:759b10b33a6dc61cce40a8bd5205e302978bbbcc00e279a8b61d9a6a3c82e4d3"}, @@ -529,6 +562,7 @@ version = "2.8.2" description = "Extensions to the standard Python datetime module" optional = false python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7" +groups = ["main"] files = [ {file = "python-dateutil-2.8.2.tar.gz", hash = "sha256:0123cacc1627ae19ddf3c27a5de5bd67ee4586fbdd6440d9748f8abb483d3e86"}, {file = "python_dateutil-2.8.2-py2.py3-none-any.whl", hash = "sha256:961d03dc3453ebbc59dbdea9e4e11c5651520a876d0f4db161e8674aae935da9"}, @@ -543,6 +577,7 @@ version = "1.16.0" description = "Python 2 and 3 compatibility utilities" optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*" +groups = ["main"] files = [ {file = "six-1.16.0-py2.py3-none-any.whl", hash = "sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254"}, {file = "six-1.16.0.tar.gz", hash = "sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926"}, @@ -554,6 +589,7 @@ version = "1.3.1" description = "Sniff out which async library your code is running under" optional = false python-versions = ">=3.7" +groups = ["main"] files = [ {file = "sniffio-1.3.1-py3-none-any.whl", hash = "sha256:2f6da418d1f1e0fddd844478f41680e794e6051915791a034ff65e5f100525a2"}, {file = "sniffio-1.3.1.tar.gz", hash = "sha256:f4324edc670a0f49750a81b895f35c3adb843cca46f0530f79fc1babb23789dc"}, @@ -565,6 +601,8 @@ version = "2.0.1" description = "A lil' TOML parser" optional = false python-versions = ">=3.7" +groups = ["dev"] +markers = "python_version < \"3.11\"" files = [ {file = "tomli-2.0.1-py3-none-any.whl", hash = "sha256:939de3e7a6161af0c887ef91b7d41a53e7c5a1ca976325f429cb46ea9bc30ecc"}, {file = "tomli-2.0.1.tar.gz", hash = "sha256:de526c12914f0c550d15924c62d72abc48d6fe7364aa87328337a31007fe8a4f"}, @@ -576,6 +614,7 @@ version = "0.13.0" description = "Style preserving TOML library" optional = false python-versions = ">=3.8" +groups = ["dev"] files = [ {file = "tomlkit-0.13.0-py3-none-any.whl", hash = "sha256:7075d3042d03b80f603482d69bf0c8f345c2b30e41699fd8883227f89972b264"}, {file = "tomlkit-0.13.0.tar.gz", hash = "sha256:08ad192699734149f5b97b45f1f18dad7eb1b6d16bc72ad0c2335772650d7b72"}, @@ -587,6 +626,7 @@ version = "2.9.0.20240316" description = "Typing stubs for python-dateutil" optional = false python-versions = ">=3.8" +groups = ["dev"] files = [ {file = "types-python-dateutil-2.9.0.20240316.tar.gz", hash = "sha256:5d2f2e240b86905e40944dd787db6da9263f0deabef1076ddaed797351ec0202"}, {file = "types_python_dateutil-2.9.0.20240316-py3-none-any.whl", hash = "sha256:6b8cb66d960771ce5ff974e9dd45e38facb81718cc1e208b10b1baccbfdbee3b"}, @@ -598,27 +638,28 @@ version = "4.12.2" description = "Backported and Experimental Type Hints for Python 3.8+" optional = false python-versions = ">=3.8" +groups = ["main", "dev"] files = [ {file = "typing_extensions-4.12.2-py3-none-any.whl", hash = "sha256:04e5ca0351e0f3f85c6853954072df659d0d13fac324d0072316b67d7794700d"}, {file = "typing_extensions-4.12.2.tar.gz", hash = "sha256:1a7ead55c7e559dd4dee8856e3a88b41225abfe1ce8df57b7c13915fe121ffb8"}, ] [[package]] -name = "typing-inspect" -version = "0.9.0" -description = "Runtime inspection utilities for typing module." +name = "typing-inspection" +version = "0.4.0" +description = "Runtime typing introspection tools" optional = false -python-versions = "*" +python-versions = ">=3.9" +groups = ["main"] files = [ - {file = "typing_inspect-0.9.0-py3-none-any.whl", hash = "sha256:9ee6fc59062311ef8547596ab6b955e1b8aa46242d854bfc78f4f6b0eff35f9f"}, - {file = "typing_inspect-0.9.0.tar.gz", hash = "sha256:b23fc42ff6f6ef6954e4852c1fb512cdd18dbea03134f91f856a95ccc9461f78"}, + {file = "typing_inspection-0.4.0-py3-none-any.whl", hash = "sha256:50e72559fcd2a6367a19f7a7e610e6afcb9fac940c650290eed893d61386832f"}, + {file = "typing_inspection-0.4.0.tar.gz", hash = "sha256:9765c87de36671694a67904bf2c96e395be9c6439bb6c87b5142569dcdd65122"}, ] [package.dependencies] -mypy-extensions = ">=0.3.0" -typing-extensions = ">=3.7.4" +typing-extensions = ">=4.12.0" [metadata] -lock-version = "2.0" -python-versions = "^3.8" -content-hash = "4dfa1b4612afda308a6d0df6d282f34b7020cf4639d6668ac7c63e40807d9e0b" +lock-version = "2.1" +python-versions = ">=3.9" +content-hash = "16a8e7bb56287babdd384870773880315911c8f1851d21314cf11ca92104c600" diff --git a/packages/mistralai_azure/pylintrc b/packages/mistralai_azure/pylintrc index 393d0f70..266bc815 100644 --- a/packages/mistralai_azure/pylintrc +++ b/packages/mistralai_azure/pylintrc @@ -89,7 +89,7 @@ persistent=yes # Minimum Python version to use for version dependent checks. Will default to # the version used to run pylint. -py-version=3.8 +py-version=3.9 # Discover python modules and packages in the file system subtree. recursive=no @@ -455,7 +455,10 @@ disable=raw-checker-failed, bare-except, broad-exception-caught, fixme, - relative-beyond-top-level + relative-beyond-top-level, + consider-using-with, + wildcard-import, + unused-wildcard-import # Enable the message, report, category or checker with the given id(s). You can # either give multiple identifier separated by comma (,) or put this option diff --git a/packages/mistralai_azure/pyproject.toml b/packages/mistralai_azure/pyproject.toml index 5c227f66..cca906a7 100644 --- a/packages/mistralai_azure/pyproject.toml +++ b/packages/mistralai_azure/pyproject.toml @@ -1,9 +1,19 @@ -[tool.poetry] +[project] name = "mistralai_azure" -version = "1.2.6" +version = "1.6.0" description = "Python Client SDK for the Mistral AI API in Azure." -authors = ["Mistral",] +authors = [{ name = "Mistral" },] readme = "README-PYPI.md" +requires-python = ">=3.9" +dependencies = [ + "eval-type-backport >=0.2.0", + "httpx >=0.28.1", + "pydantic >=2.10.3", + "python-dateutil >=2.8.2", + "typing-inspection >=0.4.0", +] + +[tool.poetry] packages = [ { include = "mistralai_azure", from = "src" } ] @@ -15,17 +25,8 @@ include = ["py.typed", "src/mistralai_azure/py.typed"] [virtualenvs] in-project = true -[tool.poetry.dependencies] -python = "^3.8" -eval-type-backport = "^0.2.0" -httpx = "^0.28.1" -jsonpath-python = "^1.0.6" -pydantic = "~2.10.3" -python-dateutil = "^2.8.2" -typing-inspect = "^0.9.0" - [tool.poetry.group.dev.dependencies] -mypy = "==1.13.0" +mypy = "==1.14.1" pylint = "==3.2.3" pytest = "^8.2.2" pytest-asyncio = "^0.23.7" @@ -36,6 +37,7 @@ requires = ["poetry-core"] build-backend = "poetry.core.masonry.api" [tool.pytest.ini_options] +asyncio_default_fixture_loop_scope = "function" pythonpath = ["src"] [tool.mypy] diff --git a/packages/mistralai_azure/scripts/prepare-readme.py b/packages/mistralai_azure/scripts/prepare_readme.py similarity index 100% rename from packages/mistralai_azure/scripts/prepare-readme.py rename to packages/mistralai_azure/scripts/prepare_readme.py diff --git a/packages/mistralai_azure/scripts/publish.sh b/packages/mistralai_azure/scripts/publish.sh index ab45b1f9..f2f2cf2c 100755 --- a/packages/mistralai_azure/scripts/publish.sh +++ b/packages/mistralai_azure/scripts/publish.sh @@ -2,6 +2,6 @@ export POETRY_PYPI_TOKEN_PYPI=${PYPI_TOKEN} -poetry run python scripts/prepare-readme.py +poetry run python scripts/prepare_readme.py poetry publish --build --skip-existing diff --git a/packages/mistralai_azure/src/mistralai_azure/__init__.py b/packages/mistralai_azure/src/mistralai_azure/__init__.py index a1b7f626..dd02e42e 100644 --- a/packages/mistralai_azure/src/mistralai_azure/__init__.py +++ b/packages/mistralai_azure/src/mistralai_azure/__init__.py @@ -1,9 +1,18 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -from ._version import __title__, __version__ +from ._version import ( + __title__, + __version__, + __openapi_doc_version__, + __gen_version__, + __user_agent__, +) from .sdk import * from .sdkconfiguration import * from .models import * VERSION: str = __version__ +OPENAPI_DOC_VERSION = __openapi_doc_version__ +SPEAKEASY_GENERATOR_VERSION = __gen_version__ +USER_AGENT = __user_agent__ diff --git a/packages/mistralai_azure/src/mistralai_azure/_hooks/types.py b/packages/mistralai_azure/src/mistralai_azure/_hooks/types.py index 5e34da26..297dfa2f 100644 --- a/packages/mistralai_azure/src/mistralai_azure/_hooks/types.py +++ b/packages/mistralai_azure/src/mistralai_azure/_hooks/types.py @@ -7,16 +7,19 @@ class HookContext: + base_url: str operation_id: str oauth2_scopes: Optional[List[str]] = None security_source: Optional[Union[Any, Callable[[], Any]]] = None def __init__( self, + base_url: str, operation_id: str, oauth2_scopes: Optional[List[str]], security_source: Optional[Union[Any, Callable[[], Any]]], ): + self.base_url = base_url self.operation_id = operation_id self.oauth2_scopes = oauth2_scopes self.security_source = security_source @@ -25,21 +28,30 @@ def __init__( class BeforeRequestContext(HookContext): def __init__(self, hook_ctx: HookContext): super().__init__( - hook_ctx.operation_id, hook_ctx.oauth2_scopes, hook_ctx.security_source + hook_ctx.base_url, + hook_ctx.operation_id, + hook_ctx.oauth2_scopes, + hook_ctx.security_source, ) class AfterSuccessContext(HookContext): def __init__(self, hook_ctx: HookContext): super().__init__( - hook_ctx.operation_id, hook_ctx.oauth2_scopes, hook_ctx.security_source + hook_ctx.base_url, + hook_ctx.operation_id, + hook_ctx.oauth2_scopes, + hook_ctx.security_source, ) class AfterErrorContext(HookContext): def __init__(self, hook_ctx: HookContext): super().__init__( - hook_ctx.operation_id, hook_ctx.oauth2_scopes, hook_ctx.security_source + hook_ctx.base_url, + hook_ctx.operation_id, + hook_ctx.oauth2_scopes, + hook_ctx.security_source, ) diff --git a/packages/mistralai_azure/src/mistralai_azure/_version.py b/packages/mistralai_azure/src/mistralai_azure/_version.py index c7215b89..65696610 100644 --- a/packages/mistralai_azure/src/mistralai_azure/_version.py +++ b/packages/mistralai_azure/src/mistralai_azure/_version.py @@ -3,7 +3,10 @@ import importlib.metadata __title__: str = "mistralai_azure" -__version__: str = "1.2.6" +__version__: str = "1.6.0" +__openapi_doc_version__: str = "0.0.2" +__gen_version__: str = "2.548.6" +__user_agent__: str = "speakeasy-sdk/python 1.6.0 2.548.6 0.0.2 mistralai_azure" try: if __package__ is not None: diff --git a/packages/mistralai_azure/src/mistralai_azure/basesdk.py b/packages/mistralai_azure/src/mistralai_azure/basesdk.py index 05c100d4..24e4935e 100644 --- a/packages/mistralai_azure/src/mistralai_azure/basesdk.py +++ b/packages/mistralai_azure/src/mistralai_azure/basesdk.py @@ -231,6 +231,10 @@ def do(): req.headers, get_body_content(req), ) + + if client is None: + raise ValueError("client is required") + http_res = client.send(req, stream=stream) except Exception as e: _, e = self.sdk_configuration.get_hooks().after_error( @@ -303,6 +307,10 @@ async def do(): req.headers, get_body_content(req), ) + + if client is None: + raise ValueError("client is required") + http_res = await client.send(req, stream=stream) except Exception as e: _, e = self.sdk_configuration.get_hooks().after_error( diff --git a/packages/mistralai_azure/src/mistralai_azure/chat.py b/packages/mistralai_azure/src/mistralai_azure/chat.py index 0ed464ba..cf3511fd 100644 --- a/packages/mistralai_azure/src/mistralai_azure/chat.py +++ b/packages/mistralai_azure/src/mistralai_azure/chat.py @@ -15,7 +15,7 @@ def stream( self, *, messages: Union[List[models.Messages], List[models.MessagesTypedDict]], - model: OptionalNullable[str] = "azureai", + model: Optional[str] = "azureai", temperature: OptionalNullable[float] = UNSET, top_p: Optional[float] = None, max_tokens: OptionalNullable[int] = UNSET, @@ -37,6 +37,10 @@ def stream( presence_penalty: Optional[float] = None, frequency_penalty: Optional[float] = None, n: OptionalNullable[int] = UNSET, + prediction: Optional[ + Union[models.Prediction, models.PredictionTypedDict] + ] = None, + parallel_tool_calls: Optional[bool] = None, safe_prompt: Optional[bool] = None, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, @@ -61,6 +65,8 @@ def stream( :param presence_penalty: presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. :param frequency_penalty: frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. :param n: Number of completions to return for each request, input tokens are only billed once. + :param prediction: + :param parallel_tool_calls: :param safe_prompt: Whether to inject a safety prompt before all conversations. :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method @@ -74,6 +80,8 @@ def stream( if server_url is not None: base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) request = models.ChatCompletionStreamRequest( model=model, @@ -94,6 +102,10 @@ def stream( presence_penalty=presence_penalty, frequency_penalty=frequency_penalty, n=n, + prediction=utils.get_pydantic_model( + prediction, Optional[models.Prediction] + ), + parallel_tool_calls=parallel_tool_calls, safe_prompt=safe_prompt, ) @@ -126,6 +138,7 @@ def stream( http_res = self.do_request( hook_ctx=HookContext( + base_url=base_url or "", operation_id="stream_chat", oauth2_scopes=[], security_source=self.sdk_configuration.security, @@ -136,7 +149,7 @@ def stream( retry_config=retry_config, ) - data: Any = None + response_data: Any = None if utils.match_response(http_res, "200", "text/event-stream"): return eventstreaming.EventStream( http_res, @@ -145,9 +158,16 @@ def stream( ) if utils.match_response(http_res, "422", "application/json"): http_res_text = utils.stream_to_text(http_res) - data = utils.unmarshal_json(http_res_text, models.HTTPValidationErrorData) - raise models.HTTPValidationError(data=data) - if utils.match_response(http_res, ["4XX", "5XX"], "*"): + response_data = utils.unmarshal_json( + http_res_text, models.HTTPValidationErrorData + ) + raise models.HTTPValidationError(data=response_data) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) raise models.SDKError( "API error occurred", http_res.status_code, http_res_text, http_res @@ -166,7 +186,7 @@ async def stream_async( self, *, messages: Union[List[models.Messages], List[models.MessagesTypedDict]], - model: OptionalNullable[str] = "azureai", + model: Optional[str] = "azureai", temperature: OptionalNullable[float] = UNSET, top_p: Optional[float] = None, max_tokens: OptionalNullable[int] = UNSET, @@ -188,6 +208,10 @@ async def stream_async( presence_penalty: Optional[float] = None, frequency_penalty: Optional[float] = None, n: OptionalNullable[int] = UNSET, + prediction: Optional[ + Union[models.Prediction, models.PredictionTypedDict] + ] = None, + parallel_tool_calls: Optional[bool] = None, safe_prompt: Optional[bool] = None, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, @@ -212,6 +236,8 @@ async def stream_async( :param presence_penalty: presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. :param frequency_penalty: frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. :param n: Number of completions to return for each request, input tokens are only billed once. + :param prediction: + :param parallel_tool_calls: :param safe_prompt: Whether to inject a safety prompt before all conversations. :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method @@ -225,6 +251,8 @@ async def stream_async( if server_url is not None: base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) request = models.ChatCompletionStreamRequest( model=model, @@ -245,6 +273,10 @@ async def stream_async( presence_penalty=presence_penalty, frequency_penalty=frequency_penalty, n=n, + prediction=utils.get_pydantic_model( + prediction, Optional[models.Prediction] + ), + parallel_tool_calls=parallel_tool_calls, safe_prompt=safe_prompt, ) @@ -277,6 +309,7 @@ async def stream_async( http_res = await self.do_request_async( hook_ctx=HookContext( + base_url=base_url or "", operation_id="stream_chat", oauth2_scopes=[], security_source=self.sdk_configuration.security, @@ -287,7 +320,7 @@ async def stream_async( retry_config=retry_config, ) - data: Any = None + response_data: Any = None if utils.match_response(http_res, "200", "text/event-stream"): return eventstreaming.EventStreamAsync( http_res, @@ -296,9 +329,16 @@ async def stream_async( ) if utils.match_response(http_res, "422", "application/json"): http_res_text = await utils.stream_to_text_async(http_res) - data = utils.unmarshal_json(http_res_text, models.HTTPValidationErrorData) - raise models.HTTPValidationError(data=data) - if utils.match_response(http_res, ["4XX", "5XX"], "*"): + response_data = utils.unmarshal_json( + http_res_text, models.HTTPValidationErrorData + ) + raise models.HTTPValidationError(data=response_data) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( "API error occurred", http_res.status_code, http_res_text, http_res @@ -320,7 +360,7 @@ def complete( List[models.ChatCompletionRequestMessages], List[models.ChatCompletionRequestMessagesTypedDict], ], - model: OptionalNullable[str] = "azureai", + model: Optional[str] = "azureai", temperature: OptionalNullable[float] = UNSET, top_p: Optional[float] = None, max_tokens: OptionalNullable[int] = UNSET, @@ -347,6 +387,10 @@ def complete( presence_penalty: Optional[float] = None, frequency_penalty: Optional[float] = None, n: OptionalNullable[int] = UNSET, + prediction: Optional[ + Union[models.Prediction, models.PredictionTypedDict] + ] = None, + parallel_tool_calls: Optional[bool] = None, safe_prompt: Optional[bool] = None, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, @@ -369,6 +413,8 @@ def complete( :param presence_penalty: presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. :param frequency_penalty: frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. :param n: Number of completions to return for each request, input tokens are only billed once. + :param prediction: + :param parallel_tool_calls: :param safe_prompt: Whether to inject a safety prompt before all conversations. :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method @@ -382,6 +428,8 @@ def complete( if server_url is not None: base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) request = models.ChatCompletionRequest( model=model, @@ -404,6 +452,10 @@ def complete( presence_penalty=presence_penalty, frequency_penalty=frequency_penalty, n=n, + prediction=utils.get_pydantic_model( + prediction, Optional[models.Prediction] + ), + parallel_tool_calls=parallel_tool_calls, safe_prompt=safe_prompt, ) @@ -436,6 +488,7 @@ def complete( http_res = self.do_request( hook_ctx=HookContext( + base_url=base_url or "", operation_id="chat_completion_v1_chat_completions_post", oauth2_scopes=[], security_source=self.sdk_configuration.security, @@ -445,15 +498,22 @@ def complete( retry_config=retry_config, ) - data: Any = None + response_data: Any = None if utils.match_response(http_res, "200", "application/json"): return utils.unmarshal_json( http_res.text, Optional[models.ChatCompletionResponse] ) if utils.match_response(http_res, "422", "application/json"): - data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) - raise models.HTTPValidationError(data=data) - if utils.match_response(http_res, ["4XX", "5XX"], "*"): + response_data = utils.unmarshal_json( + http_res.text, models.HTTPValidationErrorData + ) + raise models.HTTPValidationError(data=response_data) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) raise models.SDKError( "API error occurred", http_res.status_code, http_res_text, http_res @@ -475,7 +535,7 @@ async def complete_async( List[models.ChatCompletionRequestMessages], List[models.ChatCompletionRequestMessagesTypedDict], ], - model: OptionalNullable[str] = "azureai", + model: Optional[str] = "azureai", temperature: OptionalNullable[float] = UNSET, top_p: Optional[float] = None, max_tokens: OptionalNullable[int] = UNSET, @@ -502,6 +562,10 @@ async def complete_async( presence_penalty: Optional[float] = None, frequency_penalty: Optional[float] = None, n: OptionalNullable[int] = UNSET, + prediction: Optional[ + Union[models.Prediction, models.PredictionTypedDict] + ] = None, + parallel_tool_calls: Optional[bool] = None, safe_prompt: Optional[bool] = None, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, @@ -524,6 +588,8 @@ async def complete_async( :param presence_penalty: presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. :param frequency_penalty: frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. :param n: Number of completions to return for each request, input tokens are only billed once. + :param prediction: + :param parallel_tool_calls: :param safe_prompt: Whether to inject a safety prompt before all conversations. :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method @@ -537,6 +603,8 @@ async def complete_async( if server_url is not None: base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) request = models.ChatCompletionRequest( model=model, @@ -559,6 +627,10 @@ async def complete_async( presence_penalty=presence_penalty, frequency_penalty=frequency_penalty, n=n, + prediction=utils.get_pydantic_model( + prediction, Optional[models.Prediction] + ), + parallel_tool_calls=parallel_tool_calls, safe_prompt=safe_prompt, ) @@ -591,6 +663,7 @@ async def complete_async( http_res = await self.do_request_async( hook_ctx=HookContext( + base_url=base_url or "", operation_id="chat_completion_v1_chat_completions_post", oauth2_scopes=[], security_source=self.sdk_configuration.security, @@ -600,15 +673,22 @@ async def complete_async( retry_config=retry_config, ) - data: Any = None + response_data: Any = None if utils.match_response(http_res, "200", "application/json"): return utils.unmarshal_json( http_res.text, Optional[models.ChatCompletionResponse] ) if utils.match_response(http_res, "422", "application/json"): - data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) - raise models.HTTPValidationError(data=data) - if utils.match_response(http_res, ["4XX", "5XX"], "*"): + response_data = utils.unmarshal_json( + http_res.text, models.HTTPValidationErrorData + ) + raise models.HTTPValidationError(data=response_data) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( "API error occurred", http_res.status_code, http_res_text, http_res diff --git a/packages/mistralai_azure/src/mistralai_azure/httpclient.py b/packages/mistralai_azure/src/mistralai_azure/httpclient.py index 167cea4e..1e426352 100644 --- a/packages/mistralai_azure/src/mistralai_azure/httpclient.py +++ b/packages/mistralai_azure/src/mistralai_azure/httpclient.py @@ -1,6 +1,8 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" # pyright: reportReturnType = false +import asyncio +from concurrent.futures import ThreadPoolExecutor from typing_extensions import Protocol, runtime_checkable import httpx from typing import Any, Optional, Union @@ -82,3 +84,53 @@ def build_request( async def aclose(self) -> None: pass + + +class ClientOwner(Protocol): + client: Union[HttpClient, None] + async_client: Union[AsyncHttpClient, None] + + +def close_clients( + owner: ClientOwner, + sync_client: Union[HttpClient, None], + sync_client_supplied: bool, + async_client: Union[AsyncHttpClient, None], + async_client_supplied: bool, +) -> None: + """ + A finalizer function that is meant to be used with weakref.finalize to close + httpx clients used by an SDK so that underlying resources can be garbage + collected. + """ + + # Unset the client/async_client properties so there are no more references + # to them from the owning SDK instance and they can be reaped. + owner.client = None + owner.async_client = None + + if sync_client is not None and not sync_client_supplied: + try: + sync_client.close() + except Exception: + pass + + if async_client is not None and not async_client_supplied: + is_async = False + try: + asyncio.get_running_loop() + is_async = True + except RuntimeError: + pass + + try: + # If this function is called in an async loop then start another + # loop in a separate thread to close the async http client. + if is_async: + with ThreadPoolExecutor(max_workers=1) as executor: + future = executor.submit(asyncio.run, async_client.aclose()) + future.result() + else: + asyncio.run(async_client.aclose()) + except Exception: + pass diff --git a/packages/mistralai_azure/src/mistralai_azure/models/__init__.py b/packages/mistralai_azure/src/mistralai_azure/models/__init__.py index 379a0dfe..2229c469 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/__init__.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/__init__.py @@ -54,6 +54,16 @@ ) from .functionname import FunctionName, FunctionNameTypedDict from .httpvalidationerror import HTTPValidationError, HTTPValidationErrorData +from .imageurl import ImageURL, ImageURLTypedDict +from .imageurlchunk import ( + ImageURLChunk, + ImageURLChunkImageURL, + ImageURLChunkImageURLTypedDict, + ImageURLChunkType, + ImageURLChunkTypedDict, +) +from .jsonschema import JSONSchema, JSONSchemaTypedDict +from .prediction import Prediction, PredictionTypedDict from .referencechunk import ReferenceChunk, ReferenceChunkType, ReferenceChunkTypedDict from .responseformat import ResponseFormat, ResponseFormatTypedDict from .responseformats import ResponseFormats @@ -94,6 +104,7 @@ ValidationErrorTypedDict, ) + __all__ = [ "Arguments", "ArgumentsTypedDict", @@ -140,10 +151,21 @@ "FunctionTypedDict", "HTTPValidationError", "HTTPValidationErrorData", + "ImageURL", + "ImageURLChunk", + "ImageURLChunkImageURL", + "ImageURLChunkImageURLTypedDict", + "ImageURLChunkType", + "ImageURLChunkTypedDict", + "ImageURLTypedDict", + "JSONSchema", + "JSONSchemaTypedDict", "Loc", "LocTypedDict", "Messages", "MessagesTypedDict", + "Prediction", + "PredictionTypedDict", "ReferenceChunk", "ReferenceChunkType", "ReferenceChunkTypedDict", diff --git a/packages/mistralai_azure/src/mistralai_azure/models/assistantmessage.py b/packages/mistralai_azure/src/mistralai_azure/models/assistantmessage.py index 031677cf..530b33df 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/assistantmessage.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/assistantmessage.py @@ -32,6 +32,7 @@ class AssistantMessageTypedDict(TypedDict): content: NotRequired[Nullable[AssistantMessageContentTypedDict]] tool_calls: NotRequired[Nullable[List[ToolCallTypedDict]]] prefix: NotRequired[bool] + r"""Set this to `true` when adding an assistant message as prefix to condition the model response. The role of the prefix message is to force the model to start its answer by the content of the message.""" role: NotRequired[AssistantMessageRole] @@ -41,6 +42,7 @@ class AssistantMessage(BaseModel): tool_calls: OptionalNullable[List[ToolCall]] = UNSET prefix: Optional[bool] = False + r"""Set this to `true` when adding an assistant message as prefix to condition the model response. The role of the prefix message is to force the model to start its answer by the content of the message.""" role: Optional[AssistantMessageRole] = "assistant" diff --git a/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionrequest.py b/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionrequest.py index 67c91bba..f48c1f50 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionrequest.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionrequest.py @@ -2,6 +2,7 @@ from __future__ import annotations from .assistantmessage import AssistantMessage, AssistantMessageTypedDict +from .prediction import Prediction, PredictionTypedDict from .responseformat import ResponseFormat, ResponseFormatTypedDict from .systemmessage import SystemMessage, SystemMessageTypedDict from .tool import Tool, ToolTypedDict @@ -70,7 +71,7 @@ class ChatCompletionRequestTypedDict(TypedDict): messages: List[ChatCompletionRequestMessagesTypedDict] r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content.""" - model: NotRequired[Nullable[str]] + model: NotRequired[str] r"""The ID of the model to use for this request.""" temperature: NotRequired[Nullable[float]] r"""What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value.""" @@ -93,6 +94,8 @@ class ChatCompletionRequestTypedDict(TypedDict): r"""frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.""" n: NotRequired[Nullable[int]] r"""Number of completions to return for each request, input tokens are only billed once.""" + prediction: NotRequired[PredictionTypedDict] + parallel_tool_calls: NotRequired[bool] safe_prompt: NotRequired[bool] r"""Whether to inject a safety prompt before all conversations.""" @@ -101,7 +104,7 @@ class ChatCompletionRequest(BaseModel): messages: List[ChatCompletionRequestMessages] r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content.""" - model: OptionalNullable[str] = "azureai" + model: Optional[str] = "azureai" r"""The ID of the model to use for this request.""" temperature: OptionalNullable[float] = UNSET @@ -137,6 +140,10 @@ class ChatCompletionRequest(BaseModel): n: OptionalNullable[int] = UNSET r"""Number of completions to return for each request, input tokens are only billed once.""" + prediction: Optional[Prediction] = None + + parallel_tool_calls: Optional[bool] = None + safe_prompt: Optional[bool] = None r"""Whether to inject a safety prompt before all conversations.""" @@ -156,16 +163,11 @@ def serialize_model(self, handler): "presence_penalty", "frequency_penalty", "n", + "prediction", + "parallel_tool_calls", "safe_prompt", ] - nullable_fields = [ - "model", - "temperature", - "max_tokens", - "random_seed", - "tools", - "n", - ] + nullable_fields = ["temperature", "max_tokens", "random_seed", "tools", "n"] null_default_fields = [] serialized = handler(self) diff --git a/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionstreamrequest.py b/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionstreamrequest.py index 465647eb..50cf1f01 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionstreamrequest.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionstreamrequest.py @@ -2,6 +2,7 @@ from __future__ import annotations from .assistantmessage import AssistantMessage, AssistantMessageTypedDict +from .prediction import Prediction, PredictionTypedDict from .responseformat import ResponseFormat, ResponseFormatTypedDict from .systemmessage import SystemMessage, SystemMessageTypedDict from .tool import Tool, ToolTypedDict @@ -66,7 +67,7 @@ class ChatCompletionStreamRequestTypedDict(TypedDict): messages: List[MessagesTypedDict] r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content.""" - model: NotRequired[Nullable[str]] + model: NotRequired[str] r"""The ID of the model to use for this request.""" temperature: NotRequired[Nullable[float]] r"""What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value.""" @@ -88,6 +89,8 @@ class ChatCompletionStreamRequestTypedDict(TypedDict): r"""frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.""" n: NotRequired[Nullable[int]] r"""Number of completions to return for each request, input tokens are only billed once.""" + prediction: NotRequired[PredictionTypedDict] + parallel_tool_calls: NotRequired[bool] safe_prompt: NotRequired[bool] r"""Whether to inject a safety prompt before all conversations.""" @@ -96,7 +99,7 @@ class ChatCompletionStreamRequest(BaseModel): messages: List[Messages] r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content.""" - model: OptionalNullable[str] = "azureai" + model: Optional[str] = "azureai" r"""The ID of the model to use for this request.""" temperature: OptionalNullable[float] = UNSET @@ -131,6 +134,10 @@ class ChatCompletionStreamRequest(BaseModel): n: OptionalNullable[int] = UNSET r"""Number of completions to return for each request, input tokens are only billed once.""" + prediction: Optional[Prediction] = None + + parallel_tool_calls: Optional[bool] = None + safe_prompt: Optional[bool] = None r"""Whether to inject a safety prompt before all conversations.""" @@ -150,16 +157,11 @@ def serialize_model(self, handler): "presence_penalty", "frequency_penalty", "n", + "prediction", + "parallel_tool_calls", "safe_prompt", ] - nullable_fields = [ - "model", - "temperature", - "max_tokens", - "random_seed", - "tools", - "n", - ] + nullable_fields = ["temperature", "max_tokens", "random_seed", "tools", "n"] null_default_fields = [] serialized = handler(self) diff --git a/packages/mistralai_azure/src/mistralai_azure/models/contentchunk.py b/packages/mistralai_azure/src/mistralai_azure/models/contentchunk.py index 70c94e70..e6a3e24a 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/contentchunk.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/contentchunk.py @@ -1,6 +1,7 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from __future__ import annotations +from .imageurlchunk import ImageURLChunk, ImageURLChunkTypedDict from .referencechunk import ReferenceChunk, ReferenceChunkTypedDict from .textchunk import TextChunk, TextChunkTypedDict from mistralai_azure.utils import get_discriminator @@ -10,13 +11,16 @@ ContentChunkTypedDict = TypeAliasType( - "ContentChunkTypedDict", Union[TextChunkTypedDict, ReferenceChunkTypedDict] + "ContentChunkTypedDict", + Union[TextChunkTypedDict, ImageURLChunkTypedDict, ReferenceChunkTypedDict], ) ContentChunk = Annotated[ Union[ - Annotated[TextChunk, Tag("text")], Annotated[ReferenceChunk, Tag("reference")] + Annotated[ImageURLChunk, Tag("image_url")], + Annotated[TextChunk, Tag("text")], + Annotated[ReferenceChunk, Tag("reference")], ], Discriminator(lambda m: get_discriminator(m, "type", "type")), ] diff --git a/packages/mistralai_azure/src/mistralai_azure/models/function.py b/packages/mistralai_azure/src/mistralai_azure/models/function.py index 488cdcea..a4642f92 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/function.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/function.py @@ -10,6 +10,7 @@ class FunctionTypedDict(TypedDict): name: str parameters: Dict[str, Any] description: NotRequired[str] + strict: NotRequired[bool] class Function(BaseModel): @@ -17,4 +18,6 @@ class Function(BaseModel): parameters: Dict[str, Any] - description: Optional[str] = "" + description: Optional[str] = None + + strict: Optional[bool] = None diff --git a/packages/mistralai_azure/src/mistralai_azure/models/imageurl.py b/packages/mistralai_azure/src/mistralai_azure/models/imageurl.py new file mode 100644 index 00000000..8faa272b --- /dev/null +++ b/packages/mistralai_azure/src/mistralai_azure/models/imageurl.py @@ -0,0 +1,53 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai_azure.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing_extensions import NotRequired, TypedDict + + +class ImageURLTypedDict(TypedDict): + url: str + detail: NotRequired[Nullable[str]] + + +class ImageURL(BaseModel): + url: str + + detail: OptionalNullable[str] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["detail"] + nullable_fields = ["detail"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in self.model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/packages/mistralai_azure/src/mistralai_azure/models/imageurlchunk.py b/packages/mistralai_azure/src/mistralai_azure/models/imageurlchunk.py new file mode 100644 index 00000000..734d7f79 --- /dev/null +++ b/packages/mistralai_azure/src/mistralai_azure/models/imageurlchunk.py @@ -0,0 +1,33 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .imageurl import ImageURL, ImageURLTypedDict +from mistralai_azure.types import BaseModel +from typing import Literal, Optional, Union +from typing_extensions import NotRequired, TypeAliasType, TypedDict + + +ImageURLChunkImageURLTypedDict = TypeAliasType( + "ImageURLChunkImageURLTypedDict", Union[ImageURLTypedDict, str] +) + + +ImageURLChunkImageURL = TypeAliasType("ImageURLChunkImageURL", Union[ImageURL, str]) + + +ImageURLChunkType = Literal["image_url"] + + +class ImageURLChunkTypedDict(TypedDict): + r"""{\"type\":\"image_url\",\"image_url\":{\"url\":\"data:image/png;base64,iVBORw0""" + + image_url: ImageURLChunkImageURLTypedDict + type: NotRequired[ImageURLChunkType] + + +class ImageURLChunk(BaseModel): + r"""{\"type\":\"image_url\",\"image_url\":{\"url\":\"data:image/png;base64,iVBORw0""" + + image_url: ImageURLChunkImageURL + + type: Optional[ImageURLChunkType] = "image_url" diff --git a/packages/mistralai_azure/src/mistralai_azure/models/jsonschema.py b/packages/mistralai_azure/src/mistralai_azure/models/jsonschema.py new file mode 100644 index 00000000..b2d07d3a --- /dev/null +++ b/packages/mistralai_azure/src/mistralai_azure/models/jsonschema.py @@ -0,0 +1,61 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai_azure.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +import pydantic +from pydantic import model_serializer +from typing import Any, Dict, Optional +from typing_extensions import Annotated, NotRequired, TypedDict + + +class JSONSchemaTypedDict(TypedDict): + name: str + schema_definition: Dict[str, Any] + description: NotRequired[Nullable[str]] + strict: NotRequired[bool] + + +class JSONSchema(BaseModel): + name: str + + schema_definition: Annotated[Dict[str, Any], pydantic.Field(alias="schema")] + + description: OptionalNullable[str] = UNSET + + strict: Optional[bool] = None + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["description", "strict"] + nullable_fields = ["description"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in self.model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/packages/mistralai_azure/src/mistralai_azure/models/prediction.py b/packages/mistralai_azure/src/mistralai_azure/models/prediction.py new file mode 100644 index 00000000..888337d3 --- /dev/null +++ b/packages/mistralai_azure/src/mistralai_azure/models/prediction.py @@ -0,0 +1,25 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai_azure.types import BaseModel +from mistralai_azure.utils import validate_const +import pydantic +from pydantic.functional_validators import AfterValidator +from typing import Literal, Optional +from typing_extensions import Annotated, NotRequired, TypedDict + + +class PredictionTypedDict(TypedDict): + type: Literal["content"] + content: NotRequired[str] + + +class Prediction(BaseModel): + TYPE: Annotated[ + Annotated[ + Optional[Literal["content"]], AfterValidator(validate_const("content")) + ], + pydantic.Field(alias="type"), + ] = "content" + + content: Optional[str] = "" diff --git a/packages/mistralai_azure/src/mistralai_azure/models/responseformat.py b/packages/mistralai_azure/src/mistralai_azure/models/responseformat.py index e4a9d7dd..cfd58dcf 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/responseformat.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/responseformat.py @@ -1,8 +1,16 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from __future__ import annotations +from .jsonschema import JSONSchema, JSONSchemaTypedDict from .responseformats import ResponseFormats -from mistralai_azure.types import BaseModel +from mistralai_azure.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer from typing import Optional from typing_extensions import NotRequired, TypedDict @@ -10,8 +18,41 @@ class ResponseFormatTypedDict(TypedDict): type: NotRequired[ResponseFormats] r"""An object specifying the format that the model must output. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message.""" + json_schema: NotRequired[Nullable[JSONSchemaTypedDict]] class ResponseFormat(BaseModel): type: Optional[ResponseFormats] = None r"""An object specifying the format that the model must output. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message.""" + + json_schema: OptionalNullable[JSONSchema] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["type", "json_schema"] + nullable_fields = ["json_schema"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in self.model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/packages/mistralai_azure/src/mistralai_azure/models/responseformats.py b/packages/mistralai_azure/src/mistralai_azure/models/responseformats.py index 2c06b812..08c39951 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/responseformats.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/responseformats.py @@ -4,5 +4,5 @@ from typing import Literal -ResponseFormats = Literal["text", "json_object"] +ResponseFormats = Literal["text", "json_object", "json_schema"] r"""An object specifying the format that the model must output. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message.""" diff --git a/packages/mistralai_azure/src/mistralai_azure/models/toolcall.py b/packages/mistralai_azure/src/mistralai_azure/models/toolcall.py index 69b47310..6ccdcaa2 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/toolcall.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/toolcall.py @@ -14,6 +14,7 @@ class ToolCallTypedDict(TypedDict): function: FunctionCallTypedDict id: NotRequired[str] type: NotRequired[ToolTypes] + index: NotRequired[int] class ToolCall(BaseModel): @@ -24,3 +25,5 @@ class ToolCall(BaseModel): type: Annotated[Optional[ToolTypes], PlainValidator(validate_open_enum(False))] = ( None ) + + index: Optional[int] = 0 diff --git a/packages/mistralai_azure/src/mistralai_azure/sdk.py b/packages/mistralai_azure/src/mistralai_azure/sdk.py index a83faa7b..8379e55f 100644 --- a/packages/mistralai_azure/src/mistralai_azure/sdk.py +++ b/packages/mistralai_azure/src/mistralai_azure/sdk.py @@ -1,17 +1,19 @@ -"""Code generated by Speakeasy (https://speakeasyapi.dev). DO NOT EDIT.""" +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -from typing import Any, Callable, Dict, Optional, Union +import weakref +from typing import Any, Callable, Dict, Optional, Union, cast import httpx + from mistralai_azure import models, utils from mistralai_azure._hooks import SDKHooks from mistralai_azure.chat import Chat -from mistralai_azure.types import Nullable +from mistralai_azure.types import UNSET, OptionalNullable from .basesdk import BaseSDK -from .httpclient import AsyncHttpClient, HttpClient +from .httpclient import AsyncHttpClient, ClientOwner, HttpClient, close_clients from .sdkconfiguration import SDKConfiguration -from .utils.logger import Logger, NoOpLogger +from .utils.logger import Logger, get_default_logger from .utils.retries import RetryConfig @@ -19,7 +21,7 @@ class MistralAzure(BaseSDK): r"""Mistral AI API: Our Chat Completion and Embeddings APIs specification. Create your account on [La Plateforme](https://console.mistral.ai) to get access and read the [docs](https://docs.mistral.ai) to learn how to use it.""" chat: Chat - r"""Chat Completion API""" + r"""Chat Completion API.""" def __init__( self, @@ -28,7 +30,8 @@ def __init__( url_params: Optional[Dict[str, str]] = None, client: Optional[HttpClient] = None, async_client: Optional[AsyncHttpClient] = None, - retry_config: Optional[Nullable[RetryConfig]] = None, + retry_config: OptionalNullable[RetryConfig] = UNSET, + timeout_ms: Optional[int] = None, debug_logger: Optional[Logger] = None, ) -> None: r"""Instantiates the SDK configuring it with the provided parameters. @@ -39,7 +42,9 @@ def __init__( :param client: The HTTP client to use for all synchronous methods :param async_client: The Async HTTP client to use for all asynchronous methods :param retry_config: The retry configuration to use for all supported methods + :param timeout_ms: Optional request timeout applied to each operation in milliseconds """ + # if azure_endpoint doesn't end with `/v1` add it if not azure_endpoint.endswith("/"): azure_endpoint += "/" @@ -47,28 +52,30 @@ def __init__( azure_endpoint += "v1/" server_url = azure_endpoint + client_supplied = True if client is None: client = httpx.Client() + client_supplied = False assert issubclass( type(client), HttpClient ), "The provided client must implement the HttpClient protocol." + async_client_supplied = True if async_client is None: async_client = httpx.AsyncClient() + async_client_supplied = False + + if debug_logger is None: + debug_logger = get_default_logger() assert issubclass( type(async_client), AsyncHttpClient ), "The provided async_client must implement the AsyncHttpClient protocol." - if debug_logger is None: - debug_logger = NoOpLogger() - security: Any = None if callable(azure_api_key): - security = lambda: models.Security( # pylint: disable=unnecessary-lambda-assignment - api_key=azure_api_key() - ) + security = lambda: models.Security(api_key=azure_api_key()) # pylint: disable=unnecessary-lambda-assignment else: security = models.Security(api_key=azure_api_key) @@ -80,11 +87,14 @@ def __init__( self, SDKConfiguration( client=client, + client_supplied=client_supplied, async_client=async_client, + async_client_supplied=async_client_supplied, security=security, server_url=server_url, server=None, retry_config=retry_config, + timeout_ms=timeout_ms, debug_logger=debug_logger, ), ) @@ -93,7 +103,7 @@ def __init__( current_server_url, *_ = self.sdk_configuration.get_server_details() server_url, self.sdk_configuration.client = hooks.sdk_init( - current_server_url, self.sdk_configuration.client + current_server_url, client ) if current_server_url != server_url: self.sdk_configuration.server_url = server_url @@ -101,7 +111,39 @@ def __init__( # pylint: disable=protected-access self.sdk_configuration.__dict__["_hooks"] = hooks + weakref.finalize( + self, + close_clients, + cast(ClientOwner, self.sdk_configuration), + self.sdk_configuration.client, + self.sdk_configuration.client_supplied, + self.sdk_configuration.async_client, + self.sdk_configuration.async_client_supplied, + ) + self._init_sdks() def _init_sdks(self): self.chat = Chat(self.sdk_configuration) + + def __enter__(self): + return self + + async def __aenter__(self): + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + if ( + self.sdk_configuration.client is not None + and not self.sdk_configuration.client_supplied + ): + self.sdk_configuration.client.close() + self.sdk_configuration.client = None + + async def __aexit__(self, exc_type, exc_val, exc_tb): + if ( + self.sdk_configuration.async_client is not None + and not self.sdk_configuration.async_client_supplied + ): + await self.sdk_configuration.async_client.aclose() + self.sdk_configuration.async_client = None diff --git a/packages/mistralai_azure/src/mistralai_azure/sdkconfiguration.py b/packages/mistralai_azure/src/mistralai_azure/sdkconfiguration.py index 73b8d517..605e5d74 100644 --- a/packages/mistralai_azure/src/mistralai_azure/sdkconfiguration.py +++ b/packages/mistralai_azure/src/mistralai_azure/sdkconfiguration.py @@ -1,6 +1,12 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from ._hooks import SDKHooks +from ._version import ( + __gen_version__, + __openapi_doc_version__, + __user_agent__, + __version__, +) from .httpclient import AsyncHttpClient, HttpClient from .utils import Logger, RetryConfig, remove_suffix from dataclasses import dataclass @@ -20,17 +26,19 @@ @dataclass class SDKConfiguration: - client: HttpClient - async_client: AsyncHttpClient + client: Union[HttpClient, None] + client_supplied: bool + async_client: Union[AsyncHttpClient, None] + async_client_supplied: bool debug_logger: Logger security: Optional[Union[models.Security, Callable[[], models.Security]]] = None server_url: Optional[str] = "" server: Optional[str] = "" language: str = "python" - openapi_doc_version: str = "0.0.2" - sdk_version: str = "1.2.6" - gen_version: str = "2.486.1" - user_agent: str = "speakeasy-sdk/python 1.2.6 2.486.1 0.0.2 mistralai_azure" + openapi_doc_version: str = __openapi_doc_version__ + sdk_version: str = __version__ + gen_version: str = __gen_version__ + user_agent: str = __user_agent__ retry_config: OptionalNullable[RetryConfig] = Field(default_factory=lambda: UNSET) timeout_ms: Optional[int] = None diff --git a/packages/mistralai_azure/src/mistralai_azure/utils/__init__.py b/packages/mistralai_azure/src/mistralai_azure/utils/__init__.py index 26d51ae8..3cded8fe 100644 --- a/packages/mistralai_azure/src/mistralai_azure/utils/__init__.py +++ b/packages/mistralai_azure/src/mistralai_azure/utils/__init__.py @@ -42,6 +42,7 @@ match_content_type, match_status_codes, match_response, + cast_partial, ) from .logger import Logger, get_body_content, get_default_logger @@ -94,4 +95,5 @@ "validate_float", "validate_int", "validate_open_enum", + "cast_partial", ] diff --git a/packages/mistralai_azure/src/mistralai_azure/utils/serializers.py b/packages/mistralai_azure/src/mistralai_azure/utils/serializers.py index c5eb3659..baa41fbd 100644 --- a/packages/mistralai_azure/src/mistralai_azure/utils/serializers.py +++ b/packages/mistralai_azure/src/mistralai_azure/utils/serializers.py @@ -7,14 +7,15 @@ from typing_extensions import get_origin from pydantic import ConfigDict, create_model from pydantic_core import from_json -from typing_inspect import is_optional_type +from typing_inspection.typing_objects import is_union from ..types.basemodel import BaseModel, Nullable, OptionalNullable, Unset def serialize_decimal(as_str: bool): def serialize(d): - if is_optional_type(type(d)) and d is None: + # Optional[T] is a Union[T, None] + if is_union(type(d)) and type(None) in get_args(type(d)) and d is None: return None if isinstance(d, Unset): return d @@ -42,7 +43,8 @@ def validate_decimal(d): def serialize_float(as_str: bool): def serialize(f): - if is_optional_type(type(f)) and f is None: + # Optional[T] is a Union[T, None] + if is_union(type(f)) and type(None) in get_args(type(f)) and f is None: return None if isinstance(f, Unset): return f @@ -70,7 +72,8 @@ def validate_float(f): def serialize_int(as_str: bool): def serialize(i): - if is_optional_type(type(i)) and i is None: + # Optional[T] is a Union[T, None] + if is_union(type(i)) and type(None) in get_args(type(i)) and i is None: return None if isinstance(i, Unset): return i @@ -118,7 +121,8 @@ def validate(e): def validate_const(v): def validate(c): - if is_optional_type(type(c)) and c is None: + # Optional[T] is a Union[T, None] + if is_union(type(c)) and type(None) in get_args(type(c)) and c is None: return None if v != c: @@ -163,7 +167,7 @@ def marshal_json(val, typ): if len(d) == 0: return "" - return json.dumps(d[next(iter(d))], separators=(",", ":"), sort_keys=True) + return json.dumps(d[next(iter(d))], separators=(",", ":")) def is_nullable(field): diff --git a/packages/mistralai_azure/src/mistralai_azure/utils/values.py b/packages/mistralai_azure/src/mistralai_azure/utils/values.py index 2b4b6832..dae01a44 100644 --- a/packages/mistralai_azure/src/mistralai_azure/utils/values.py +++ b/packages/mistralai_azure/src/mistralai_azure/utils/values.py @@ -3,8 +3,9 @@ from datetime import datetime from enum import Enum from email.message import Message +from functools import partial import os -from typing import Any, Callable, Dict, List, Optional, Tuple, TypeVar, Union +from typing import Any, Callable, Dict, List, Optional, Tuple, TypeVar, Union, cast from httpx import Response from pydantic import BaseModel @@ -51,6 +52,8 @@ def match_status_codes(status_codes: List[str], status_code: int) -> bool: T = TypeVar("T") +def cast_partial(typ): + return partial(cast, typ) def get_global_from_env( value: Optional[T], env_key: str, type_cast: Callable[[str], T] diff --git a/packages/mistralai_gcp/.speakeasy/gen.lock b/packages/mistralai_gcp/.speakeasy/gen.lock index f74b9759..5e157235 100644 --- a/packages/mistralai_gcp/.speakeasy/gen.lock +++ b/packages/mistralai_gcp/.speakeasy/gen.lock @@ -1,34 +1,34 @@ lockVersion: 2.0.0 id: ec60f2d8-7869-45c1-918e-773d41a8cf74 management: - docChecksum: 849dde0ef239604ca71711ffc1220b54 + docChecksum: 28fe1ab59b4dee005217f2dbbd836060 docVersion: 0.0.2 - speakeasyVersion: 1.462.2 - generationVersion: 2.486.1 - releaseVersion: 1.2.6 - configChecksum: ba11718a5b49fb4a979ae9693a68b191 + speakeasyVersion: 1.517.3 + generationVersion: 2.548.6 + releaseVersion: 1.6.0 + configChecksum: 66bf5911f59189922e03a75a72923b32 published: true features: python: additionalDependencies: 1.0.0 constsAndDefaults: 1.0.5 - core: 5.7.4 + core: 5.12.3 defaultEnabledRetries: 0.2.0 enumUnions: 0.1.0 envVarSecurityUsage: 0.3.2 - examples: 3.0.0 + examples: 3.0.1 flatRequests: 1.0.1 - globalSecurity: 3.0.2 + globalSecurity: 3.0.3 globalSecurityCallbacks: 1.0.0 globalSecurityFlattening: 1.0.0 globalServerURLs: 3.1.0 methodArguments: 1.0.2 nameOverrides: 3.0.1 - nullables: 1.0.0 + nullables: 1.0.1 openEnums: 1.0.0 responseFormat: 1.0.1 retries: 3.0.2 - sdkHooks: 1.0.0 + sdkHooks: 1.0.1 serverEvents: 1.0.7 serverEventsSentinels: 0.1.0 serverIDs: 3.0.0 @@ -66,8 +66,14 @@ generatedFiles: - docs/models/functioncall.md - docs/models/functionname.md - docs/models/httpvalidationerror.md + - docs/models/imageurl.md + - docs/models/imageurlchunk.md + - docs/models/imageurlchunkimageurl.md + - docs/models/imageurlchunktype.md + - docs/models/jsonschema.md - docs/models/loc.md - docs/models/messages.md + - docs/models/prediction.md - docs/models/referencechunk.md - docs/models/referencechunktype.md - docs/models/responseformat.md @@ -97,7 +103,7 @@ generatedFiles: - py.typed - pylintrc - pyproject.toml - - scripts/prepare-readme.py + - scripts/prepare_readme.py - scripts/publish.sh - src/mistralai_gcp/__init__.py - src/mistralai_gcp/_hooks/__init__.py @@ -126,6 +132,10 @@ generatedFiles: - src/mistralai_gcp/models/functioncall.py - src/mistralai_gcp/models/functionname.py - src/mistralai_gcp/models/httpvalidationerror.py + - src/mistralai_gcp/models/imageurl.py + - src/mistralai_gcp/models/imageurlchunk.py + - src/mistralai_gcp/models/jsonschema.py + - src/mistralai_gcp/models/prediction.py - src/mistralai_gcp/models/referencechunk.py - src/mistralai_gcp/models/responseformat.py - src/mistralai_gcp/models/responseformats.py @@ -143,6 +153,7 @@ generatedFiles: - src/mistralai_gcp/models/usermessage.py - src/mistralai_gcp/models/validationerror.py - src/mistralai_gcp/py.typed + - src/mistralai_gcp/sdk.py - src/mistralai_gcp/sdkconfiguration.py - src/mistralai_gcp/types/__init__.py - src/mistralai_gcp/types/basemodel.py @@ -165,33 +176,36 @@ examples: stream_chat: speakeasy-default-stream-chat: requestBody: - application/json: {"model": "mistral-small-latest", "messages": [{"content": "Who is the best French painter? Answer in one short sentence.", "role": "user"}]} + application/json: {"model": "mistral-small-latest", "stream": true, "messages": [{"content": "Who is the best French painter? Answer in one short sentence.", "role": "user"}]} responses: "422": application/json: {} + "200": {} chat_completion_v1_chat_completions_post: speakeasy-default-chat-completion-v1-chat-completions-post: requestBody: - application/json: {"model": "mistral-small-latest", "messages": [{"content": "Who is the best French painter? Answer in one short sentence.", "role": "user"}]} + application/json: {"model": "mistral-small-latest", "stream": false, "messages": [{"content": "Who is the best French painter? Answer in one short sentence.", "role": "user"}]} responses: "200": - application/json: {"id": "cmpl-e5cc70bb28c444948073e77776eb30ef", "object": "chat.completion", "model": "mistral-small-latest", "usage": {"prompt_tokens": 16, "completion_tokens": 34, "total_tokens": 50}, "created": 1702256327, "choices": []} + application/json: {"id": "cmpl-e5cc70bb28c444948073e77776eb30ef", "object": "chat.completion", "model": "mistral-small-latest", "usage": {"prompt_tokens": 16, "completion_tokens": 34, "total_tokens": 50}, "created": 1702256327, "choices": [{"index": 0, "message": {"prefix": false, "role": "assistant"}, "finish_reason": "stop"}, {"index": 0, "message": {"prefix": false, "role": "assistant"}, "finish_reason": "stop"}, {"index": 0, "message": {"prefix": false, "role": "assistant"}, "finish_reason": "stop"}]} "422": application/json: {} stream_fim: speakeasy-default-stream-fim: requestBody: - application/json: {"model": "codestral-2405", "prompt": "def", "suffix": "return a+b"} + application/json: {"model": "codestral-2405", "top_p": 1, "stream": true, "prompt": "def", "suffix": "return a+b"} responses: "422": application/json: {} + "200": {} fim_completion_v1_fim_completions_post: speakeasy-default-fim-completion-v1-fim-completions-post: requestBody: - application/json: {"model": "codestral-2405", "prompt": "def", "suffix": "return a+b"} + application/json: {"model": "codestral-2405", "top_p": 1, "stream": false, "prompt": "def", "suffix": "return a+b"} responses: "200": - application/json: {"id": "cmpl-e5cc70bb28c444948073e77776eb30ef", "object": "chat.completion", "model": "codestral-latest", "usage": {"prompt_tokens": 16, "completion_tokens": 34, "total_tokens": 50}, "created": 1702256327, "choices": []} + application/json: {"id": "cmpl-e5cc70bb28c444948073e77776eb30ef", "object": "chat.completion", "model": "codestral-latest", "usage": {"prompt_tokens": 16, "completion_tokens": 34, "total_tokens": 50}, "created": 1702256327, "choices": [{"index": 0, "message": {"prefix": false, "role": "assistant"}, "finish_reason": "stop"}, {"index": 0, "message": {"prefix": false, "role": "assistant"}, "finish_reason": "stop"}, {"index": 0, "message": {"prefix": false, "role": "assistant"}, "finish_reason": "stop"}]} "422": application/json: {} +examplesVersion: 1.0.0 generatedTests: {} diff --git a/packages/mistralai_gcp/.speakeasy/gen.yaml b/packages/mistralai_gcp/.speakeasy/gen.yaml index afa4d1d0..d7be7fed 100644 --- a/packages/mistralai_gcp/.speakeasy/gen.yaml +++ b/packages/mistralai_gcp/.speakeasy/gen.yaml @@ -7,13 +7,15 @@ generation: useClassNamesForArrayFields: true fixes: nameResolutionDec2023: true + nameResolutionFeb2025: false parameterOrderingFeb2024: true requestResponseComponentNamesFeb2024: true + securityFeb2025: false auth: oAuth2ClientCredentialsEnabled: true oAuth2PasswordEnabled: false python: - version: 1.2.6 + version: 1.6.0 additionalDependencies: dev: pytest: ^8.2.2 @@ -26,6 +28,7 @@ python: clientServerStatusCodesAsErrors: true defaultErrorName: SDKError description: Python Client SDK for the Mistral AI API in GCP. + enableCustomCodeRegions: false enumFormat: union fixFlags: responseRequiredSep2024: false @@ -41,9 +44,10 @@ python: shared: "" webhooks: "" inputModelSuffix: input - maxMethodParams: 4 + maxMethodParams: 15 methodArguments: infer-optional-args outputModelSuffix: output packageName: mistralai-gcp + pytestTimeout: 0 responseFormat: flat templateVersion: v2 diff --git a/packages/mistralai_gcp/docs/models/assistantmessage.md b/packages/mistralai_gcp/docs/models/assistantmessage.md index 53f1cc76..3d0bd90b 100644 --- a/packages/mistralai_gcp/docs/models/assistantmessage.md +++ b/packages/mistralai_gcp/docs/models/assistantmessage.md @@ -3,9 +3,9 @@ ## Fields -| Field | Type | Required | Description | -| ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | -| `content` | [OptionalNullable[models.AssistantMessageContent]](../models/assistantmessagecontent.md) | :heavy_minus_sign: | N/A | -| `tool_calls` | List[[models.ToolCall](../models/toolcall.md)] | :heavy_minus_sign: | N/A | -| `prefix` | *Optional[bool]* | :heavy_minus_sign: | N/A | -| `role` | [Optional[models.AssistantMessageRole]](../models/assistantmessagerole.md) | :heavy_minus_sign: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| `content` | [OptionalNullable[models.AssistantMessageContent]](../models/assistantmessagecontent.md) | :heavy_minus_sign: | N/A | +| `tool_calls` | List[[models.ToolCall](../models/toolcall.md)] | :heavy_minus_sign: | N/A | +| `prefix` | *Optional[bool]* | :heavy_minus_sign: | Set this to `true` when adding an assistant message as prefix to condition the model response. The role of the prefix message is to force the model to start its answer by the content of the message. | +| `role` | [Optional[models.AssistantMessageRole]](../models/assistantmessagerole.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/chatcompletionrequest.md b/packages/mistralai_gcp/docs/models/chatcompletionrequest.md index abc83281..9d735d08 100644 --- a/packages/mistralai_gcp/docs/models/chatcompletionrequest.md +++ b/packages/mistralai_gcp/docs/models/chatcompletionrequest.md @@ -5,7 +5,7 @@ | Field | Type | Required | Description | Example | | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `model` | *Nullable[str]* | :heavy_check_mark: | ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions. | mistral-small-latest | +| `model` | *str* | :heavy_check_mark: | ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions. | mistral-small-latest | | `messages` | List[[models.ChatCompletionRequestMessages](../models/chatcompletionrequestmessages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | | `temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. | | | `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | @@ -18,4 +18,6 @@ | `tool_choice` | [Optional[models.ChatCompletionRequestToolChoice]](../models/chatcompletionrequesttoolchoice.md) | :heavy_minus_sign: | N/A | | | `presence_penalty` | *Optional[float]* | :heavy_minus_sign: | presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. | | | `frequency_penalty` | *Optional[float]* | :heavy_minus_sign: | frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. | | -| `n` | *OptionalNullable[int]* | :heavy_minus_sign: | Number of completions to return for each request, input tokens are only billed once. | | \ No newline at end of file +| `n` | *OptionalNullable[int]* | :heavy_minus_sign: | Number of completions to return for each request, input tokens are only billed once. | | +| `prediction` | [Optional[models.Prediction]](../models/prediction.md) | :heavy_minus_sign: | N/A | | +| `parallel_tool_calls` | *Optional[bool]* | :heavy_minus_sign: | N/A | | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/chatcompletionstreamrequest.md b/packages/mistralai_gcp/docs/models/chatcompletionstreamrequest.md index 863c0229..827943cd 100644 --- a/packages/mistralai_gcp/docs/models/chatcompletionstreamrequest.md +++ b/packages/mistralai_gcp/docs/models/chatcompletionstreamrequest.md @@ -5,7 +5,7 @@ | Field | Type | Required | Description | Example | | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `model` | *Nullable[str]* | :heavy_check_mark: | ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions. | mistral-small-latest | +| `model` | *str* | :heavy_check_mark: | ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions. | mistral-small-latest | | `messages` | List[[models.Messages](../models/messages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | | `temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. | | | `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | @@ -18,4 +18,6 @@ | `tool_choice` | [Optional[models.ChatCompletionStreamRequestToolChoice]](../models/chatcompletionstreamrequesttoolchoice.md) | :heavy_minus_sign: | N/A | | | `presence_penalty` | *Optional[float]* | :heavy_minus_sign: | presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. | | | `frequency_penalty` | *Optional[float]* | :heavy_minus_sign: | frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. | | -| `n` | *OptionalNullable[int]* | :heavy_minus_sign: | Number of completions to return for each request, input tokens are only billed once. | | \ No newline at end of file +| `n` | *OptionalNullable[int]* | :heavy_minus_sign: | Number of completions to return for each request, input tokens are only billed once. | | +| `prediction` | [Optional[models.Prediction]](../models/prediction.md) | :heavy_minus_sign: | N/A | | +| `parallel_tool_calls` | *Optional[bool]* | :heavy_minus_sign: | N/A | | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/contentchunk.md b/packages/mistralai_gcp/docs/models/contentchunk.md index 98b86391..22023e8b 100644 --- a/packages/mistralai_gcp/docs/models/contentchunk.md +++ b/packages/mistralai_gcp/docs/models/contentchunk.md @@ -3,6 +3,12 @@ ## Supported Types +### `models.ImageURLChunk` + +```python +value: models.ImageURLChunk = /* values here */ +``` + ### `models.TextChunk` ```python diff --git a/packages/mistralai_gcp/docs/models/fimcompletionrequest.md b/packages/mistralai_gcp/docs/models/fimcompletionrequest.md index 236d2d21..7507b90c 100644 --- a/packages/mistralai_gcp/docs/models/fimcompletionrequest.md +++ b/packages/mistralai_gcp/docs/models/fimcompletionrequest.md @@ -5,7 +5,7 @@ | Field | Type | Required | Description | Example | | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `model` | *Nullable[str]* | :heavy_check_mark: | ID of the model to use. Only compatible for now with:
- `codestral-2405`
- `codestral-latest` | codestral-2405 | +| `model` | *str* | :heavy_check_mark: | ID of the model to use. Only compatible for now with:
- `codestral-2405`
- `codestral-latest` | codestral-2405 | | `prompt` | *str* | :heavy_check_mark: | The text/code to complete. | def | | `temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. | | | `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | diff --git a/packages/mistralai_gcp/docs/models/fimcompletionstreamrequest.md b/packages/mistralai_gcp/docs/models/fimcompletionstreamrequest.md index fa635932..6cc439c7 100644 --- a/packages/mistralai_gcp/docs/models/fimcompletionstreamrequest.md +++ b/packages/mistralai_gcp/docs/models/fimcompletionstreamrequest.md @@ -5,7 +5,7 @@ | Field | Type | Required | Description | Example | | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `model` | *Nullable[str]* | :heavy_check_mark: | ID of the model to use. Only compatible for now with:
- `codestral-2405`
- `codestral-latest` | codestral-2405 | +| `model` | *str* | :heavy_check_mark: | ID of the model to use. Only compatible for now with:
- `codestral-2405`
- `codestral-latest` | codestral-2405 | | `prompt` | *str* | :heavy_check_mark: | The text/code to complete. | def | | `temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. | | | `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | diff --git a/packages/mistralai_gcp/docs/models/function.md b/packages/mistralai_gcp/docs/models/function.md index 8af398f5..a166b7bb 100644 --- a/packages/mistralai_gcp/docs/models/function.md +++ b/packages/mistralai_gcp/docs/models/function.md @@ -7,4 +7,5 @@ | ------------------ | ------------------ | ------------------ | ------------------ | | `name` | *str* | :heavy_check_mark: | N/A | | `parameters` | Dict[str, *Any*] | :heavy_check_mark: | N/A | -| `description` | *Optional[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file +| `description` | *Optional[str]* | :heavy_minus_sign: | N/A | +| `strict` | *Optional[bool]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/imageurl.md b/packages/mistralai_gcp/docs/models/imageurl.md new file mode 100644 index 00000000..7c2bcbc3 --- /dev/null +++ b/packages/mistralai_gcp/docs/models/imageurl.md @@ -0,0 +1,9 @@ +# ImageURL + + +## Fields + +| Field | Type | Required | Description | +| ----------------------- | ----------------------- | ----------------------- | ----------------------- | +| `url` | *str* | :heavy_check_mark: | N/A | +| `detail` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/imageurlchunk.md b/packages/mistralai_gcp/docs/models/imageurlchunk.md new file mode 100644 index 00000000..f1b926ef --- /dev/null +++ b/packages/mistralai_gcp/docs/models/imageurlchunk.md @@ -0,0 +1,11 @@ +# ImageURLChunk + +{"type":"image_url","image_url":{"url":"data:image/png;base64,iVBORw0 + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | +| `image_url` | [models.ImageURLChunkImageURL](../models/imageurlchunkimageurl.md) | :heavy_check_mark: | N/A | +| `type` | [Optional[models.ImageURLChunkType]](../models/imageurlchunktype.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/imageurlchunkimageurl.md b/packages/mistralai_gcp/docs/models/imageurlchunkimageurl.md new file mode 100644 index 00000000..76738908 --- /dev/null +++ b/packages/mistralai_gcp/docs/models/imageurlchunkimageurl.md @@ -0,0 +1,17 @@ +# ImageURLChunkImageURL + + +## Supported Types + +### `models.ImageURL` + +```python +value: models.ImageURL = /* values here */ +``` + +### `str` + +```python +value: str = /* values here */ +``` + diff --git a/packages/mistralai_gcp/docs/models/imageurlchunktype.md b/packages/mistralai_gcp/docs/models/imageurlchunktype.md new file mode 100644 index 00000000..2064a0b4 --- /dev/null +++ b/packages/mistralai_gcp/docs/models/imageurlchunktype.md @@ -0,0 +1,8 @@ +# ImageURLChunkType + + +## Values + +| Name | Value | +| ----------- | ----------- | +| `IMAGE_URL` | image_url | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/jsonschema.md b/packages/mistralai_gcp/docs/models/jsonschema.md new file mode 100644 index 00000000..ae387867 --- /dev/null +++ b/packages/mistralai_gcp/docs/models/jsonschema.md @@ -0,0 +1,11 @@ +# JSONSchema + + +## Fields + +| Field | Type | Required | Description | +| ----------------------- | ----------------------- | ----------------------- | ----------------------- | +| `name` | *str* | :heavy_check_mark: | N/A | +| `schema_definition` | Dict[str, *Any*] | :heavy_check_mark: | N/A | +| `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `strict` | *Optional[bool]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/prediction.md b/packages/mistralai_gcp/docs/models/prediction.md new file mode 100644 index 00000000..86e9c396 --- /dev/null +++ b/packages/mistralai_gcp/docs/models/prediction.md @@ -0,0 +1,9 @@ +# Prediction + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------ | ------------------------------ | ------------------------------ | ------------------------------ | +| `type` | *Optional[Literal["content"]]* | :heavy_minus_sign: | N/A | +| `content` | *Optional[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/responseformat.md b/packages/mistralai_gcp/docs/models/responseformat.md index 9c627f55..23a1641b 100644 --- a/packages/mistralai_gcp/docs/models/responseformat.md +++ b/packages/mistralai_gcp/docs/models/responseformat.md @@ -5,4 +5,5 @@ | Field | Type | Required | Description | | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `type` | [Optional[models.ResponseFormats]](../models/responseformats.md) | :heavy_minus_sign: | An object specifying the format that the model must output. Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. | \ No newline at end of file +| `type` | [Optional[models.ResponseFormats]](../models/responseformats.md) | :heavy_minus_sign: | An object specifying the format that the model must output. Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. | +| `json_schema` | [OptionalNullable[models.JSONSchema]](../models/jsonschema.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/responseformats.md b/packages/mistralai_gcp/docs/models/responseformats.md index ce35fbb3..06886afe 100644 --- a/packages/mistralai_gcp/docs/models/responseformats.md +++ b/packages/mistralai_gcp/docs/models/responseformats.md @@ -8,4 +8,5 @@ An object specifying the format that the model must output. Setting to `{ "type" | Name | Value | | ------------- | ------------- | | `TEXT` | text | -| `JSON_OBJECT` | json_object | \ No newline at end of file +| `JSON_OBJECT` | json_object | +| `JSON_SCHEMA` | json_schema | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/toolcall.md b/packages/mistralai_gcp/docs/models/toolcall.md index 7aca5fc9..574be1ea 100644 --- a/packages/mistralai_gcp/docs/models/toolcall.md +++ b/packages/mistralai_gcp/docs/models/toolcall.md @@ -7,4 +7,5 @@ | ---------------------------------------------------- | ---------------------------------------------------- | ---------------------------------------------------- | ---------------------------------------------------- | | `function` | [models.FunctionCall](../models/functioncall.md) | :heavy_check_mark: | N/A | | `id` | *Optional[str]* | :heavy_minus_sign: | N/A | -| `type` | [Optional[models.ToolTypes]](../models/tooltypes.md) | :heavy_minus_sign: | N/A | \ No newline at end of file +| `type` | [Optional[models.ToolTypes]](../models/tooltypes.md) | :heavy_minus_sign: | N/A | +| `index` | *Optional[int]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai_gcp/poetry.lock b/packages/mistralai_gcp/poetry.lock index 5483cc8f..5f710a98 100644 --- a/packages/mistralai_gcp/poetry.lock +++ b/packages/mistralai_gcp/poetry.lock @@ -1,4 +1,4 @@ -# This file is automatically @generated by Poetry 1.8.3 and should not be changed by hand. +# This file is automatically @generated by Poetry 2.1.1 and should not be changed by hand. [[package]] name = "annotated-types" @@ -6,20 +6,19 @@ version = "0.7.0" description = "Reusable constraint types to use with typing.Annotated" optional = false python-versions = ">=3.8" +groups = ["main"] files = [ {file = "annotated_types-0.7.0-py3-none-any.whl", hash = "sha256:1f02e8b43a8fbbc3f3e0d4f0f4bfc8131bcb4eebe8849b8e5c773f3a1c582a53"}, {file = "annotated_types-0.7.0.tar.gz", hash = "sha256:aff07c09a53a08bc8cfccb9c85b05f1aa9a2a6f23728d790723543408344ce89"}, ] -[package.dependencies] -typing-extensions = {version = ">=4.0.0", markers = "python_version < \"3.9\""} - [[package]] name = "anyio" version = "4.4.0" description = "High level compatibility layer for multiple asynchronous event loop implementations" optional = false python-versions = ">=3.8" +groups = ["main"] files = [ {file = "anyio-4.4.0-py3-none-any.whl", hash = "sha256:c1b2d8f46a8a812513012e1107cb0e68c17159a7a594208005a57dc776e1bdc7"}, {file = "anyio-4.4.0.tar.gz", hash = "sha256:5aadc6a1bbb7cdb0bede386cac5e2940f5e2ff3aa20277e991cf028e0585ce94"}, @@ -33,7 +32,7 @@ typing-extensions = {version = ">=4.1", markers = "python_version < \"3.11\""} [package.extras] doc = ["Sphinx (>=7)", "packaging", "sphinx-autodoc-typehints (>=1.2.0)", "sphinx-rtd-theme"] -test = ["anyio[trio]", "coverage[toml] (>=7)", "exceptiongroup (>=1.2.0)", "hypothesis (>=4.0)", "psutil (>=5.9)", "pytest (>=7.0)", "pytest-mock (>=3.6.1)", "trustme", "uvloop (>=0.17)"] +test = ["anyio[trio]", "coverage[toml] (>=7)", "exceptiongroup (>=1.2.0)", "hypothesis (>=4.0)", "psutil (>=5.9)", "pytest (>=7.0)", "pytest-mock (>=3.6.1)", "trustme", "uvloop (>=0.17) ; platform_python_implementation == \"CPython\" and platform_system != \"Windows\""] trio = ["trio (>=0.23)"] [[package]] @@ -42,6 +41,7 @@ version = "3.2.4" description = "An abstract syntax tree for Python with inference support." optional = false python-versions = ">=3.8.0" +groups = ["dev"] files = [ {file = "astroid-3.2.4-py3-none-any.whl", hash = "sha256:413658a61eeca6202a59231abb473f932038fbcbf1666587f66d482083413a25"}, {file = "astroid-3.2.4.tar.gz", hash = "sha256:0e14202810b30da1b735827f78f5157be2bbd4a7a59b7707ca0bfc2fb4c0063a"}, @@ -56,6 +56,7 @@ version = "5.4.0" description = "Extensible memoizing collections and decorators" optional = false python-versions = ">=3.7" +groups = ["main"] files = [ {file = "cachetools-5.4.0-py3-none-any.whl", hash = "sha256:3ae3b49a3d5e28a77a0be2b37dbcb89005058959cb2323858c2657c4a8cab474"}, {file = "cachetools-5.4.0.tar.gz", hash = "sha256:b8adc2e7c07f105ced7bc56dbb6dfbe7c4a00acce20e2227b3f355be89bc6827"}, @@ -67,6 +68,7 @@ version = "2024.7.4" description = "Python package for providing Mozilla's CA Bundle." optional = false python-versions = ">=3.6" +groups = ["main"] files = [ {file = "certifi-2024.7.4-py3-none-any.whl", hash = "sha256:c198e21b1289c2ab85ee4e67bb4b4ef3ead0892059901a8d5b622f24a1101e90"}, {file = "certifi-2024.7.4.tar.gz", hash = "sha256:5a1e7645bc0ec61a09e26c36f6106dd4cf40c6db3a1fb6352b0244e7fb057c7b"}, @@ -78,6 +80,7 @@ version = "3.3.2" description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet." optional = false python-versions = ">=3.7.0" +groups = ["main"] files = [ {file = "charset-normalizer-3.3.2.tar.gz", hash = "sha256:f30c3cb33b24454a82faecaf01b19c18562b1e89558fb6c56de4d9118a032fd5"}, {file = "charset_normalizer-3.3.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:25baf083bf6f6b341f4121c2f3c548875ee6f5339300e08be3f2b2ba1721cdd3"}, @@ -177,6 +180,8 @@ version = "0.4.6" description = "Cross-platform colored terminal text." optional = false python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7" +groups = ["dev"] +markers = "sys_platform == \"win32\"" files = [ {file = "colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6"}, {file = "colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44"}, @@ -188,6 +193,7 @@ version = "0.3.8" description = "serialize all of Python" optional = false python-versions = ">=3.8" +groups = ["dev"] files = [ {file = "dill-0.3.8-py3-none-any.whl", hash = "sha256:c36ca9ffb54365bdd2f8eb3eff7d2a21237f8452b57ace88b1ac615b7e815bd7"}, {file = "dill-0.3.8.tar.gz", hash = "sha256:3ebe3c479ad625c4553aca177444d89b486b1d84982eeacded644afc0cf797ca"}, @@ -203,6 +209,7 @@ version = "0.2.0" description = "Like `typing._eval_type`, but lets older Python versions use newer typing features." optional = false python-versions = ">=3.8" +groups = ["main"] files = [ {file = "eval_type_backport-0.2.0-py3-none-any.whl", hash = "sha256:ac2f73d30d40c5a30a80b8739a789d6bb5e49fdffa66d7912667e2015d9c9933"}, {file = "eval_type_backport-0.2.0.tar.gz", hash = "sha256:68796cfbc7371ebf923f03bdf7bef415f3ec098aeced24e054b253a0e78f7b37"}, @@ -217,6 +224,8 @@ version = "1.2.2" description = "Backport of PEP 654 (exception groups)" optional = false python-versions = ">=3.7" +groups = ["main", "dev"] +markers = "python_version < \"3.11\"" files = [ {file = "exceptiongroup-1.2.2-py3-none-any.whl", hash = "sha256:3111b9d131c238bec2f8f516e123e14ba243563fb135d3fe885990585aa7795b"}, {file = "exceptiongroup-1.2.2.tar.gz", hash = "sha256:47c2edf7c6738fafb49fd34290706d1a1a2f4d1c6df275526b62cbb4aa5393cc"}, @@ -227,13 +236,14 @@ test = ["pytest (>=6)"] [[package]] name = "google-auth" -version = "2.27.0" +version = "2.38.0" description = "Google Authentication Library" optional = false python-versions = ">=3.7" +groups = ["main"] files = [ - {file = "google-auth-2.27.0.tar.gz", hash = "sha256:e863a56ccc2d8efa83df7a80272601e43487fa9a728a376205c86c26aaefa821"}, - {file = "google_auth-2.27.0-py2.py3-none-any.whl", hash = "sha256:8e4bad367015430ff253fe49d500fdc3396c1a434db5740828c728e45bcce245"}, + {file = "google_auth-2.38.0-py2.py3-none-any.whl", hash = "sha256:e7dae6694313f434a2727bf2906f27ad259bae090d7aa896590d86feec3d9d4a"}, + {file = "google_auth-2.38.0.tar.gz", hash = "sha256:8285113607d3b80a3f1543b75962447ba8a09fe85783432a784fdeef6ac094c4"}, ] [package.dependencies] @@ -243,7 +253,8 @@ rsa = ">=3.1.4,<5" [package.extras] aiohttp = ["aiohttp (>=3.6.2,<4.0.0.dev0)", "requests (>=2.20.0,<3.0.0.dev0)"] -enterprise-cert = ["cryptography (==36.0.2)", "pyopenssl (==22.0.0)"] +enterprise-cert = ["cryptography", "pyopenssl"] +pyjwt = ["cryptography (>=38.0.3)", "pyjwt (>=2.0)"] pyopenssl = ["cryptography (>=38.0.3)", "pyopenssl (>=20.0.0)"] reauth = ["pyu2f (>=0.1.5)"] requests = ["requests (>=2.20.0,<3.0.0.dev0)"] @@ -254,6 +265,7 @@ version = "0.14.0" description = "A pure-Python, bring-your-own-I/O implementation of HTTP/1.1" optional = false python-versions = ">=3.7" +groups = ["main"] files = [ {file = "h11-0.14.0-py3-none-any.whl", hash = "sha256:e3fe4ac4b851c468cc8363d500db52c2ead036020723024a109d37346efaa761"}, {file = "h11-0.14.0.tar.gz", hash = "sha256:8f19fbbe99e72420ff35c00b27a34cb9937e902a8b810e2c88300c6f0a3b699d"}, @@ -265,6 +277,7 @@ version = "1.0.5" description = "A minimal low-level HTTP client." optional = false python-versions = ">=3.8" +groups = ["main"] files = [ {file = "httpcore-1.0.5-py3-none-any.whl", hash = "sha256:421f18bac248b25d310f3cacd198d55b8e6125c107797b609ff9b7a6ba7991b5"}, {file = "httpcore-1.0.5.tar.gz", hash = "sha256:34a38e2f9291467ee3b44e89dd52615370e152954ba21721378a87b2960f7a61"}, @@ -282,13 +295,14 @@ trio = ["trio (>=0.22.0,<0.26.0)"] [[package]] name = "httpx" -version = "0.27.0" +version = "0.28.1" description = "The next generation HTTP client." optional = false python-versions = ">=3.8" +groups = ["main"] files = [ - {file = "httpx-0.27.0-py3-none-any.whl", hash = "sha256:71d5465162c13681bff01ad59b2cc68dd838ea1f10e51574bac27103f00c91a5"}, - {file = "httpx-0.27.0.tar.gz", hash = "sha256:a0cb88a46f32dc874e04ee956e4c2764aba2aa228f650b06788ba6bda2962ab5"}, + {file = "httpx-0.28.1-py3-none-any.whl", hash = "sha256:d909fcccc110f8c7faf814ca82a9a4d816bc5a6dbfea25d6591d6985b8ba59ad"}, + {file = "httpx-0.28.1.tar.gz", hash = "sha256:75e98c5f16b0f35b567856f597f06ff2270a374470a5c2392242528e3e3e42fc"}, ] [package.dependencies] @@ -296,13 +310,13 @@ anyio = "*" certifi = "*" httpcore = "==1.*" idna = "*" -sniffio = "*" [package.extras] -brotli = ["brotli", "brotlicffi"] +brotli = ["brotli ; platform_python_implementation == \"CPython\"", "brotlicffi ; platform_python_implementation != \"CPython\""] cli = ["click (==8.*)", "pygments (==2.*)", "rich (>=10,<14)"] http2 = ["h2 (>=3,<5)"] socks = ["socksio (==1.*)"] +zstd = ["zstandard (>=0.18.0)"] [[package]] name = "idna" @@ -310,6 +324,7 @@ version = "3.7" description = "Internationalized Domain Names in Applications (IDNA)" optional = false python-versions = ">=3.5" +groups = ["main"] files = [ {file = "idna-3.7-py3-none-any.whl", hash = "sha256:82fee1fc78add43492d3a1898bfa6d8a904cc97d8427f683ed8e798d07761aa0"}, {file = "idna-3.7.tar.gz", hash = "sha256:028ff3aadf0609c1fd278d8ea3089299412a7a8b9bd005dd08b9f8285bcb5cfc"}, @@ -321,6 +336,7 @@ version = "2.0.0" description = "brain-dead simple config-ini parsing" optional = false python-versions = ">=3.7" +groups = ["dev"] files = [ {file = "iniconfig-2.0.0-py3-none-any.whl", hash = "sha256:b6a85871a79d2e3b22d2d1b94ac2824226a63c6b741c88f7ae975f18b6778374"}, {file = "iniconfig-2.0.0.tar.gz", hash = "sha256:2d91e135bf72d31a410b17c16da610a82cb55f6b0477d1a902134b24a455b8b3"}, @@ -332,6 +348,7 @@ version = "5.13.2" description = "A Python utility / library to sort Python imports." optional = false python-versions = ">=3.8.0" +groups = ["dev"] files = [ {file = "isort-5.13.2-py3-none-any.whl", hash = "sha256:8ca5e72a8d85860d5a3fa69b8745237f2939afe12dbf656afbcb47fe72d947a6"}, {file = "isort-5.13.2.tar.gz", hash = "sha256:48fdfcb9face5d58a4f6dde2e72a1fb8dcaf8ab26f95ab49fab84c2ddefb0109"}, @@ -340,23 +357,13 @@ files = [ [package.extras] colors = ["colorama (>=0.4.6)"] -[[package]] -name = "jsonpath-python" -version = "1.0.6" -description = "A more powerful JSONPath implementation in modern python" -optional = false -python-versions = ">=3.6" -files = [ - {file = "jsonpath-python-1.0.6.tar.gz", hash = "sha256:dd5be4a72d8a2995c3f583cf82bf3cd1a9544cfdabf2d22595b67aff07349666"}, - {file = "jsonpath_python-1.0.6-py3-none-any.whl", hash = "sha256:1e3b78df579f5efc23565293612decee04214609208a2335884b3ee3f786b575"}, -] - [[package]] name = "mccabe" version = "0.7.0" description = "McCabe checker, plugin for flake8" optional = false python-versions = ">=3.6" +groups = ["dev"] files = [ {file = "mccabe-0.7.0-py2.py3-none-any.whl", hash = "sha256:6c2d30ab6be0e4a46919781807b4f0d834ebdd6c6e3dca0bda5a15f863427b6e"}, {file = "mccabe-0.7.0.tar.gz", hash = "sha256:348e0240c33b60bbdf4e523192ef919f28cb2c3d7d5c7794f74009290f236325"}, @@ -364,47 +371,60 @@ files = [ [[package]] name = "mypy" -version = "1.10.1" +version = "1.14.1" description = "Optional static typing for Python" optional = false python-versions = ">=3.8" -files = [ - {file = "mypy-1.10.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:e36f229acfe250dc660790840916eb49726c928e8ce10fbdf90715090fe4ae02"}, - {file = "mypy-1.10.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:51a46974340baaa4145363b9e051812a2446cf583dfaeba124af966fa44593f7"}, - {file = "mypy-1.10.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:901c89c2d67bba57aaaca91ccdb659aa3a312de67f23b9dfb059727cce2e2e0a"}, - {file = "mypy-1.10.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:0cd62192a4a32b77ceb31272d9e74d23cd88c8060c34d1d3622db3267679a5d9"}, - {file = "mypy-1.10.1-cp310-cp310-win_amd64.whl", hash = "sha256:a2cbc68cb9e943ac0814c13e2452d2046c2f2b23ff0278e26599224cf164e78d"}, - {file = "mypy-1.10.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:bd6f629b67bb43dc0d9211ee98b96d8dabc97b1ad38b9b25f5e4c4d7569a0c6a"}, - {file = "mypy-1.10.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:a1bbb3a6f5ff319d2b9d40b4080d46cd639abe3516d5a62c070cf0114a457d84"}, - {file = "mypy-1.10.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b8edd4e9bbbc9d7b79502eb9592cab808585516ae1bcc1446eb9122656c6066f"}, - {file = "mypy-1.10.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:6166a88b15f1759f94a46fa474c7b1b05d134b1b61fca627dd7335454cc9aa6b"}, - {file = "mypy-1.10.1-cp311-cp311-win_amd64.whl", hash = "sha256:5bb9cd11c01c8606a9d0b83ffa91d0b236a0e91bc4126d9ba9ce62906ada868e"}, - {file = "mypy-1.10.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:d8681909f7b44d0b7b86e653ca152d6dff0eb5eb41694e163c6092124f8246d7"}, - {file = "mypy-1.10.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:378c03f53f10bbdd55ca94e46ec3ba255279706a6aacaecac52ad248f98205d3"}, - {file = "mypy-1.10.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6bacf8f3a3d7d849f40ca6caea5c055122efe70e81480c8328ad29c55c69e93e"}, - {file = "mypy-1.10.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:701b5f71413f1e9855566a34d6e9d12624e9e0a8818a5704d74d6b0402e66c04"}, - {file = "mypy-1.10.1-cp312-cp312-win_amd64.whl", hash = "sha256:3c4c2992f6ea46ff7fce0072642cfb62af7a2484efe69017ed8b095f7b39ef31"}, - {file = "mypy-1.10.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:604282c886497645ffb87b8f35a57ec773a4a2721161e709a4422c1636ddde5c"}, - {file = "mypy-1.10.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:37fd87cab83f09842653f08de066ee68f1182b9b5282e4634cdb4b407266bade"}, - {file = "mypy-1.10.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8addf6313777dbb92e9564c5d32ec122bf2c6c39d683ea64de6a1fd98b90fe37"}, - {file = "mypy-1.10.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:5cc3ca0a244eb9a5249c7c583ad9a7e881aa5d7b73c35652296ddcdb33b2b9c7"}, - {file = "mypy-1.10.1-cp38-cp38-win_amd64.whl", hash = "sha256:1b3a2ffce52cc4dbaeee4df762f20a2905aa171ef157b82192f2e2f368eec05d"}, - {file = "mypy-1.10.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:fe85ed6836165d52ae8b88f99527d3d1b2362e0cb90b005409b8bed90e9059b3"}, - {file = "mypy-1.10.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:c2ae450d60d7d020d67ab440c6e3fae375809988119817214440033f26ddf7bf"}, - {file = "mypy-1.10.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6be84c06e6abd72f960ba9a71561c14137a583093ffcf9bbfaf5e613d63fa531"}, - {file = "mypy-1.10.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:2189ff1e39db399f08205e22a797383613ce1cb0cb3b13d8bcf0170e45b96cc3"}, - {file = "mypy-1.10.1-cp39-cp39-win_amd64.whl", hash = "sha256:97a131ee36ac37ce9581f4220311247ab6cba896b4395b9c87af0675a13a755f"}, - {file = "mypy-1.10.1-py3-none-any.whl", hash = "sha256:71d8ac0b906354ebda8ef1673e5fde785936ac1f29ff6987c7483cfbd5a4235a"}, - {file = "mypy-1.10.1.tar.gz", hash = "sha256:1f8f492d7db9e3593ef42d4f115f04e556130f2819ad33ab84551403e97dd4c0"}, +groups = ["dev"] +files = [ + {file = "mypy-1.14.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:52686e37cf13d559f668aa398dd7ddf1f92c5d613e4f8cb262be2fb4fedb0fcb"}, + {file = "mypy-1.14.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:1fb545ca340537d4b45d3eecdb3def05e913299ca72c290326be19b3804b39c0"}, + {file = "mypy-1.14.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:90716d8b2d1f4cd503309788e51366f07c56635a3309b0f6a32547eaaa36a64d"}, + {file = "mypy-1.14.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:2ae753f5c9fef278bcf12e1a564351764f2a6da579d4a81347e1d5a15819997b"}, + {file = "mypy-1.14.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:e0fe0f5feaafcb04505bcf439e991c6d8f1bf8b15f12b05feeed96e9e7bf1427"}, + {file = "mypy-1.14.1-cp310-cp310-win_amd64.whl", hash = "sha256:7d54bd85b925e501c555a3227f3ec0cfc54ee8b6930bd6141ec872d1c572f81f"}, + {file = "mypy-1.14.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:f995e511de847791c3b11ed90084a7a0aafdc074ab88c5a9711622fe4751138c"}, + {file = "mypy-1.14.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:d64169ec3b8461311f8ce2fd2eb5d33e2d0f2c7b49116259c51d0d96edee48d1"}, + {file = "mypy-1.14.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ba24549de7b89b6381b91fbc068d798192b1b5201987070319889e93038967a8"}, + {file = "mypy-1.14.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:183cf0a45457d28ff9d758730cd0210419ac27d4d3f285beda038c9083363b1f"}, + {file = "mypy-1.14.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:f2a0ecc86378f45347f586e4163d1769dd81c5a223d577fe351f26b179e148b1"}, + {file = "mypy-1.14.1-cp311-cp311-win_amd64.whl", hash = "sha256:ad3301ebebec9e8ee7135d8e3109ca76c23752bac1e717bc84cd3836b4bf3eae"}, + {file = "mypy-1.14.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:30ff5ef8519bbc2e18b3b54521ec319513a26f1bba19a7582e7b1f58a6e69f14"}, + {file = "mypy-1.14.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:cb9f255c18052343c70234907e2e532bc7e55a62565d64536dbc7706a20b78b9"}, + {file = "mypy-1.14.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:8b4e3413e0bddea671012b063e27591b953d653209e7a4fa5e48759cda77ca11"}, + {file = "mypy-1.14.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:553c293b1fbdebb6c3c4030589dab9fafb6dfa768995a453d8a5d3b23784af2e"}, + {file = "mypy-1.14.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:fad79bfe3b65fe6a1efaed97b445c3d37f7be9fdc348bdb2d7cac75579607c89"}, + {file = "mypy-1.14.1-cp312-cp312-win_amd64.whl", hash = "sha256:8fa2220e54d2946e94ab6dbb3ba0a992795bd68b16dc852db33028df2b00191b"}, + {file = "mypy-1.14.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:92c3ed5afb06c3a8e188cb5da4984cab9ec9a77ba956ee419c68a388b4595255"}, + {file = "mypy-1.14.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:dbec574648b3e25f43d23577309b16534431db4ddc09fda50841f1e34e64ed34"}, + {file = "mypy-1.14.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:8c6d94b16d62eb3e947281aa7347d78236688e21081f11de976376cf010eb31a"}, + {file = "mypy-1.14.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:d4b19b03fdf54f3c5b2fa474c56b4c13c9dbfb9a2db4370ede7ec11a2c5927d9"}, + {file = "mypy-1.14.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:0c911fde686394753fff899c409fd4e16e9b294c24bfd5e1ea4675deae1ac6fd"}, + {file = "mypy-1.14.1-cp313-cp313-win_amd64.whl", hash = "sha256:8b21525cb51671219f5307be85f7e646a153e5acc656e5cebf64bfa076c50107"}, + {file = "mypy-1.14.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:7084fb8f1128c76cd9cf68fe5971b37072598e7c31b2f9f95586b65c741a9d31"}, + {file = "mypy-1.14.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:8f845a00b4f420f693f870eaee5f3e2692fa84cc8514496114649cfa8fd5e2c6"}, + {file = "mypy-1.14.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:44bf464499f0e3a2d14d58b54674dee25c031703b2ffc35064bd0df2e0fac319"}, + {file = "mypy-1.14.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c99f27732c0b7dc847adb21c9d47ce57eb48fa33a17bc6d7d5c5e9f9e7ae5bac"}, + {file = "mypy-1.14.1-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:bce23c7377b43602baa0bd22ea3265c49b9ff0b76eb315d6c34721af4cdf1d9b"}, + {file = "mypy-1.14.1-cp38-cp38-win_amd64.whl", hash = "sha256:8edc07eeade7ebc771ff9cf6b211b9a7d93687ff892150cb5692e4f4272b0837"}, + {file = "mypy-1.14.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:3888a1816d69f7ab92092f785a462944b3ca16d7c470d564165fe703b0970c35"}, + {file = "mypy-1.14.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:46c756a444117c43ee984bd055db99e498bc613a70bbbc120272bd13ca579fbc"}, + {file = "mypy-1.14.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:27fc248022907e72abfd8e22ab1f10e903915ff69961174784a3900a8cba9ad9"}, + {file = "mypy-1.14.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:499d6a72fb7e5de92218db961f1a66d5f11783f9ae549d214617edab5d4dbdbb"}, + {file = "mypy-1.14.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:57961db9795eb566dc1d1b4e9139ebc4c6b0cb6e7254ecde69d1552bf7613f60"}, + {file = "mypy-1.14.1-cp39-cp39-win_amd64.whl", hash = "sha256:07ba89fdcc9451f2ebb02853deb6aaaa3d2239a236669a63ab3801bbf923ef5c"}, + {file = "mypy-1.14.1-py3-none-any.whl", hash = "sha256:b66a60cc4073aeb8ae00057f9c1f64d49e90f918fbcef9a977eb121da8b8f1d1"}, + {file = "mypy-1.14.1.tar.gz", hash = "sha256:7ec88144fe9b510e8475ec2f5f251992690fcf89ccb4500b214b4226abcd32d6"}, ] [package.dependencies] -mypy-extensions = ">=1.0.0" +mypy_extensions = ">=1.0.0" tomli = {version = ">=1.1.0", markers = "python_version < \"3.11\""} -typing-extensions = ">=4.1.0" +typing_extensions = ">=4.6.0" [package.extras] dmypy = ["psutil (>=4.0)"] +faster-cache = ["orjson"] install-types = ["pip"] mypyc = ["setuptools (>=50)"] reports = ["lxml"] @@ -415,6 +435,7 @@ version = "1.0.0" description = "Type system extensions for programs checked with the mypy type checker." optional = false python-versions = ">=3.5" +groups = ["dev"] files = [ {file = "mypy_extensions-1.0.0-py3-none-any.whl", hash = "sha256:4392f6c0eb8a5668a69e23d168ffa70f0be9ccfd32b5cc2d26a34ae5b844552d"}, {file = "mypy_extensions-1.0.0.tar.gz", hash = "sha256:75dbf8955dc00442a438fc4d0666508a9a97b6bd41aa2f0ffe9d2f2725af0782"}, @@ -426,6 +447,7 @@ version = "24.1" description = "Core utilities for Python packages" optional = false python-versions = ">=3.8" +groups = ["dev"] files = [ {file = "packaging-24.1-py3-none-any.whl", hash = "sha256:5b8f2217dbdbd2f7f384c41c628544e6d52f2d0f53c6d0c3ea61aa5d1d7ff124"}, {file = "packaging-24.1.tar.gz", hash = "sha256:026ed72c8ed3fcce5bf8950572258698927fd1dbda10a5e981cdf0ac37f4f002"}, @@ -437,6 +459,7 @@ version = "4.2.2" description = "A small Python package for determining appropriate platform-specific dirs, e.g. a `user data dir`." optional = false python-versions = ">=3.8" +groups = ["dev"] files = [ {file = "platformdirs-4.2.2-py3-none-any.whl", hash = "sha256:2d7a1657e36a80ea911db832a8a6ece5ee53d8de21edd5cc5879af6530b1bfee"}, {file = "platformdirs-4.2.2.tar.gz", hash = "sha256:38b7b51f512eed9e84a22788b4bce1de17c0adb134d6becb09836e37d8654cd3"}, @@ -453,6 +476,7 @@ version = "1.5.0" description = "plugin and hook calling mechanisms for python" optional = false python-versions = ">=3.8" +groups = ["dev"] files = [ {file = "pluggy-1.5.0-py3-none-any.whl", hash = "sha256:44e1ad92c8ca002de6377e165f3e0f1be63266ab4d554740532335b9d75ea669"}, {file = "pluggy-1.5.0.tar.gz", hash = "sha256:2cffa88e94fdc978c4c574f15f9e59b7f4201d439195c3715ca9e2486f1d0cf1"}, @@ -468,6 +492,7 @@ version = "0.6.0" description = "Pure-Python implementation of ASN.1 types and DER/BER/CER codecs (X.208)" optional = false python-versions = ">=3.8" +groups = ["main"] files = [ {file = "pyasn1-0.6.0-py2.py3-none-any.whl", hash = "sha256:cca4bb0f2df5504f02f6f8a775b6e416ff9b0b3b16f7ee80b5a3153d9b804473"}, {file = "pyasn1-0.6.0.tar.gz", hash = "sha256:3a35ab2c4b5ef98e17dfdec8ab074046fbda76e281c5a706ccd82328cfc8f64c"}, @@ -479,6 +504,7 @@ version = "0.4.0" description = "A collection of ASN.1-based protocols modules" optional = false python-versions = ">=3.8" +groups = ["main"] files = [ {file = "pyasn1_modules-0.4.0-py3-none-any.whl", hash = "sha256:be04f15b66c206eed667e0bb5ab27e2b1855ea54a842e5037738099e8ca4ae0b"}, {file = "pyasn1_modules-0.4.0.tar.gz", hash = "sha256:831dbcea1b177b28c9baddf4c6d1013c24c3accd14a1873fffaa6a2e905f17b6"}, @@ -489,123 +515,133 @@ pyasn1 = ">=0.4.6,<0.7.0" [[package]] name = "pydantic" -version = "2.9.1" +version = "2.10.6" description = "Data validation using Python type hints" optional = false python-versions = ">=3.8" +groups = ["main"] files = [ - {file = "pydantic-2.9.1-py3-none-any.whl", hash = "sha256:7aff4db5fdf3cf573d4b3c30926a510a10e19a0774d38fc4967f78beb6deb612"}, - {file = "pydantic-2.9.1.tar.gz", hash = "sha256:1363c7d975c7036df0db2b4a61f2e062fbc0aa5ab5f2772e0ffc7191a4f4bce2"}, + {file = "pydantic-2.10.6-py3-none-any.whl", hash = "sha256:427d664bf0b8a2b34ff5dd0f5a18df00591adcee7198fbd71981054cef37b584"}, + {file = "pydantic-2.10.6.tar.gz", hash = "sha256:ca5daa827cce33de7a42be142548b0096bf05a7e7b365aebfa5f8eeec7128236"}, ] [package.dependencies] annotated-types = ">=0.6.0" -pydantic-core = "2.23.3" -typing-extensions = [ - {version = ">=4.6.1", markers = "python_version < \"3.13\""}, - {version = ">=4.12.2", markers = "python_version >= \"3.13\""}, -] +pydantic-core = "2.27.2" +typing-extensions = ">=4.12.2" [package.extras] email = ["email-validator (>=2.0.0)"] -timezone = ["tzdata"] +timezone = ["tzdata ; python_version >= \"3.9\" and platform_system == \"Windows\""] [[package]] name = "pydantic-core" -version = "2.23.3" +version = "2.27.2" description = "Core functionality for Pydantic validation and serialization" optional = false python-versions = ">=3.8" -files = [ - {file = "pydantic_core-2.23.3-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:7f10a5d1b9281392f1bf507d16ac720e78285dfd635b05737c3911637601bae6"}, - {file = "pydantic_core-2.23.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:3c09a7885dd33ee8c65266e5aa7fb7e2f23d49d8043f089989726391dd7350c5"}, - {file = "pydantic_core-2.23.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6470b5a1ec4d1c2e9afe928c6cb37eb33381cab99292a708b8cb9aa89e62429b"}, - {file = "pydantic_core-2.23.3-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:9172d2088e27d9a185ea0a6c8cebe227a9139fd90295221d7d495944d2367700"}, - {file = "pydantic_core-2.23.3-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:86fc6c762ca7ac8fbbdff80d61b2c59fb6b7d144aa46e2d54d9e1b7b0e780e01"}, - {file = "pydantic_core-2.23.3-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f0cb80fd5c2df4898693aa841425ea1727b1b6d2167448253077d2a49003e0ed"}, - {file = "pydantic_core-2.23.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:03667cec5daf43ac4995cefa8aaf58f99de036204a37b889c24a80927b629cec"}, - {file = "pydantic_core-2.23.3-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:047531242f8e9c2db733599f1c612925de095e93c9cc0e599e96cf536aaf56ba"}, - {file = "pydantic_core-2.23.3-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:5499798317fff7f25dbef9347f4451b91ac2a4330c6669821c8202fd354c7bee"}, - {file = "pydantic_core-2.23.3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:bbb5e45eab7624440516ee3722a3044b83fff4c0372efe183fd6ba678ff681fe"}, - {file = "pydantic_core-2.23.3-cp310-none-win32.whl", hash = "sha256:8b5b3ed73abb147704a6e9f556d8c5cb078f8c095be4588e669d315e0d11893b"}, - {file = "pydantic_core-2.23.3-cp310-none-win_amd64.whl", hash = "sha256:2b603cde285322758a0279995b5796d64b63060bfbe214b50a3ca23b5cee3e83"}, - {file = "pydantic_core-2.23.3-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:c889fd87e1f1bbeb877c2ee56b63bb297de4636661cc9bbfcf4b34e5e925bc27"}, - {file = "pydantic_core-2.23.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:ea85bda3189fb27503af4c45273735bcde3dd31c1ab17d11f37b04877859ef45"}, - {file = "pydantic_core-2.23.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a7f7f72f721223f33d3dc98a791666ebc6a91fa023ce63733709f4894a7dc611"}, - {file = "pydantic_core-2.23.3-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:2b2b55b0448e9da68f56b696f313949cda1039e8ec7b5d294285335b53104b61"}, - {file = "pydantic_core-2.23.3-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c24574c7e92e2c56379706b9a3f07c1e0c7f2f87a41b6ee86653100c4ce343e5"}, - {file = "pydantic_core-2.23.3-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f2b05e6ccbee333a8f4b8f4d7c244fdb7a979e90977ad9c51ea31261e2085ce0"}, - {file = "pydantic_core-2.23.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e2c409ce1c219c091e47cb03feb3c4ed8c2b8e004efc940da0166aaee8f9d6c8"}, - {file = "pydantic_core-2.23.3-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d965e8b325f443ed3196db890d85dfebbb09f7384486a77461347f4adb1fa7f8"}, - {file = "pydantic_core-2.23.3-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:f56af3a420fb1ffaf43ece3ea09c2d27c444e7c40dcb7c6e7cf57aae764f2b48"}, - {file = "pydantic_core-2.23.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:5b01a078dd4f9a52494370af21aa52964e0a96d4862ac64ff7cea06e0f12d2c5"}, - {file = "pydantic_core-2.23.3-cp311-none-win32.whl", hash = "sha256:560e32f0df04ac69b3dd818f71339983f6d1f70eb99d4d1f8e9705fb6c34a5c1"}, - {file = "pydantic_core-2.23.3-cp311-none-win_amd64.whl", hash = "sha256:c744fa100fdea0d000d8bcddee95213d2de2e95b9c12be083370b2072333a0fa"}, - {file = "pydantic_core-2.23.3-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:e0ec50663feedf64d21bad0809f5857bac1ce91deded203efc4a84b31b2e4305"}, - {file = "pydantic_core-2.23.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:db6e6afcb95edbe6b357786684b71008499836e91f2a4a1e55b840955b341dbb"}, - {file = "pydantic_core-2.23.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:98ccd69edcf49f0875d86942f4418a4e83eb3047f20eb897bffa62a5d419c8fa"}, - {file = "pydantic_core-2.23.3-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:a678c1ac5c5ec5685af0133262103defb427114e62eafeda12f1357a12140162"}, - {file = "pydantic_core-2.23.3-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:01491d8b4d8db9f3391d93b0df60701e644ff0894352947f31fff3e52bd5c801"}, - {file = "pydantic_core-2.23.3-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:fcf31facf2796a2d3b7fe338fe8640aa0166e4e55b4cb108dbfd1058049bf4cb"}, - {file = "pydantic_core-2.23.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7200fd561fb3be06827340da066df4311d0b6b8eb0c2116a110be5245dceb326"}, - {file = "pydantic_core-2.23.3-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:dc1636770a809dee2bd44dd74b89cc80eb41172bcad8af75dd0bc182c2666d4c"}, - {file = "pydantic_core-2.23.3-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:67a5def279309f2e23014b608c4150b0c2d323bd7bccd27ff07b001c12c2415c"}, - {file = "pydantic_core-2.23.3-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:748bdf985014c6dd3e1e4cc3db90f1c3ecc7246ff5a3cd4ddab20c768b2f1dab"}, - {file = "pydantic_core-2.23.3-cp312-none-win32.whl", hash = "sha256:255ec6dcb899c115f1e2a64bc9ebc24cc0e3ab097775755244f77360d1f3c06c"}, - {file = "pydantic_core-2.23.3-cp312-none-win_amd64.whl", hash = "sha256:40b8441be16c1e940abebed83cd006ddb9e3737a279e339dbd6d31578b802f7b"}, - {file = "pydantic_core-2.23.3-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:6daaf5b1ba1369a22c8b050b643250e3e5efc6a78366d323294aee54953a4d5f"}, - {file = "pydantic_core-2.23.3-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:d015e63b985a78a3d4ccffd3bdf22b7c20b3bbd4b8227809b3e8e75bc37f9cb2"}, - {file = "pydantic_core-2.23.3-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a3fc572d9b5b5cfe13f8e8a6e26271d5d13f80173724b738557a8c7f3a8a3791"}, - {file = "pydantic_core-2.23.3-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:f6bd91345b5163ee7448bee201ed7dd601ca24f43f439109b0212e296eb5b423"}, - {file = "pydantic_core-2.23.3-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:fc379c73fd66606628b866f661e8785088afe2adaba78e6bbe80796baf708a63"}, - {file = "pydantic_core-2.23.3-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:fbdce4b47592f9e296e19ac31667daed8753c8367ebb34b9a9bd89dacaa299c9"}, - {file = "pydantic_core-2.23.3-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fc3cf31edf405a161a0adad83246568647c54404739b614b1ff43dad2b02e6d5"}, - {file = "pydantic_core-2.23.3-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:8e22b477bf90db71c156f89a55bfe4d25177b81fce4aa09294d9e805eec13855"}, - {file = "pydantic_core-2.23.3-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:0a0137ddf462575d9bce863c4c95bac3493ba8e22f8c28ca94634b4a1d3e2bb4"}, - {file = "pydantic_core-2.23.3-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:203171e48946c3164fe7691fc349c79241ff8f28306abd4cad5f4f75ed80bc8d"}, - {file = "pydantic_core-2.23.3-cp313-none-win32.whl", hash = "sha256:76bdab0de4acb3f119c2a4bff740e0c7dc2e6de7692774620f7452ce11ca76c8"}, - {file = "pydantic_core-2.23.3-cp313-none-win_amd64.whl", hash = "sha256:37ba321ac2a46100c578a92e9a6aa33afe9ec99ffa084424291d84e456f490c1"}, - {file = "pydantic_core-2.23.3-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:d063c6b9fed7d992bcbebfc9133f4c24b7a7f215d6b102f3e082b1117cddb72c"}, - {file = "pydantic_core-2.23.3-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:6cb968da9a0746a0cf521b2b5ef25fc5a0bee9b9a1a8214e0a1cfaea5be7e8a4"}, - {file = "pydantic_core-2.23.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:edbefe079a520c5984e30e1f1f29325054b59534729c25b874a16a5048028d16"}, - {file = "pydantic_core-2.23.3-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:cbaaf2ef20d282659093913da9d402108203f7cb5955020bd8d1ae5a2325d1c4"}, - {file = "pydantic_core-2.23.3-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:fb539d7e5dc4aac345846f290cf504d2fd3c1be26ac4e8b5e4c2b688069ff4cf"}, - {file = "pydantic_core-2.23.3-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7e6f33503c5495059148cc486867e1d24ca35df5fc064686e631e314d959ad5b"}, - {file = "pydantic_core-2.23.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:04b07490bc2f6f2717b10c3969e1b830f5720b632f8ae2f3b8b1542394c47a8e"}, - {file = "pydantic_core-2.23.3-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:03795b9e8a5d7fda05f3873efc3f59105e2dcff14231680296b87b80bb327295"}, - {file = "pydantic_core-2.23.3-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:c483dab0f14b8d3f0df0c6c18d70b21b086f74c87ab03c59250dbf6d3c89baba"}, - {file = "pydantic_core-2.23.3-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:8b2682038e255e94baf2c473dca914a7460069171ff5cdd4080be18ab8a7fd6e"}, - {file = "pydantic_core-2.23.3-cp38-none-win32.whl", hash = "sha256:f4a57db8966b3a1d1a350012839c6a0099f0898c56512dfade8a1fe5fb278710"}, - {file = "pydantic_core-2.23.3-cp38-none-win_amd64.whl", hash = "sha256:13dd45ba2561603681a2676ca56006d6dee94493f03d5cadc055d2055615c3ea"}, - {file = "pydantic_core-2.23.3-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:82da2f4703894134a9f000e24965df73cc103e31e8c31906cc1ee89fde72cbd8"}, - {file = "pydantic_core-2.23.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:dd9be0a42de08f4b58a3cc73a123f124f65c24698b95a54c1543065baca8cf0e"}, - {file = "pydantic_core-2.23.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:89b731f25c80830c76fdb13705c68fef6a2b6dc494402987c7ea9584fe189f5d"}, - {file = "pydantic_core-2.23.3-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:c6de1ec30c4bb94f3a69c9f5f2182baeda5b809f806676675e9ef6b8dc936f28"}, - {file = "pydantic_core-2.23.3-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:bb68b41c3fa64587412b104294b9cbb027509dc2f6958446c502638d481525ef"}, - {file = "pydantic_core-2.23.3-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1c3980f2843de5184656aab58698011b42763ccba11c4a8c35936c8dd6c7068c"}, - {file = "pydantic_core-2.23.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:94f85614f2cba13f62c3c6481716e4adeae48e1eaa7e8bac379b9d177d93947a"}, - {file = "pydantic_core-2.23.3-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:510b7fb0a86dc8f10a8bb43bd2f97beb63cffad1203071dc434dac26453955cd"}, - {file = "pydantic_core-2.23.3-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:1eba2f7ce3e30ee2170410e2171867ea73dbd692433b81a93758ab2de6c64835"}, - {file = "pydantic_core-2.23.3-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:4b259fd8409ab84b4041b7b3f24dcc41e4696f180b775961ca8142b5b21d0e70"}, - {file = "pydantic_core-2.23.3-cp39-none-win32.whl", hash = "sha256:40d9bd259538dba2f40963286009bf7caf18b5112b19d2b55b09c14dde6db6a7"}, - {file = "pydantic_core-2.23.3-cp39-none-win_amd64.whl", hash = "sha256:5a8cd3074a98ee70173a8633ad3c10e00dcb991ecec57263aacb4095c5efb958"}, - {file = "pydantic_core-2.23.3-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:f399e8657c67313476a121a6944311fab377085ca7f490648c9af97fc732732d"}, - {file = "pydantic_core-2.23.3-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:6b5547d098c76e1694ba85f05b595720d7c60d342f24d5aad32c3049131fa5c4"}, - {file = "pydantic_core-2.23.3-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0dda0290a6f608504882d9f7650975b4651ff91c85673341789a476b1159f211"}, - {file = "pydantic_core-2.23.3-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:65b6e5da855e9c55a0c67f4db8a492bf13d8d3316a59999cfbaf98cc6e401961"}, - {file = "pydantic_core-2.23.3-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:09e926397f392059ce0afdcac920df29d9c833256354d0c55f1584b0b70cf07e"}, - {file = "pydantic_core-2.23.3-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:87cfa0ed6b8c5bd6ae8b66de941cece179281239d482f363814d2b986b79cedc"}, - {file = "pydantic_core-2.23.3-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:e61328920154b6a44d98cabcb709f10e8b74276bc709c9a513a8c37a18786cc4"}, - {file = "pydantic_core-2.23.3-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:ce3317d155628301d649fe5e16a99528d5680af4ec7aa70b90b8dacd2d725c9b"}, - {file = "pydantic_core-2.23.3-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:e89513f014c6be0d17b00a9a7c81b1c426f4eb9224b15433f3d98c1a071f8433"}, - {file = "pydantic_core-2.23.3-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:4f62c1c953d7ee375df5eb2e44ad50ce2f5aff931723b398b8bc6f0ac159791a"}, - {file = "pydantic_core-2.23.3-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2718443bc671c7ac331de4eef9b673063b10af32a0bb385019ad61dcf2cc8f6c"}, - {file = "pydantic_core-2.23.3-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a0d90e08b2727c5d01af1b5ef4121d2f0c99fbee692c762f4d9d0409c9da6541"}, - {file = "pydantic_core-2.23.3-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2b676583fc459c64146debea14ba3af54e540b61762dfc0613dc4e98c3f66eeb"}, - {file = "pydantic_core-2.23.3-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:50e4661f3337977740fdbfbae084ae5693e505ca2b3130a6d4eb0f2281dc43b8"}, - {file = "pydantic_core-2.23.3-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:68f4cf373f0de6abfe599a38307f4417c1c867ca381c03df27c873a9069cda25"}, - {file = "pydantic_core-2.23.3-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:59d52cf01854cb26c46958552a21acb10dd78a52aa34c86f284e66b209db8cab"}, - {file = "pydantic_core-2.23.3.tar.gz", hash = "sha256:3cb0f65d8b4121c1b015c60104a685feb929a29d7cf204387c7f2688c7974690"}, +groups = ["main"] +files = [ + {file = "pydantic_core-2.27.2-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:2d367ca20b2f14095a8f4fa1210f5a7b78b8a20009ecced6b12818f455b1e9fa"}, + {file = "pydantic_core-2.27.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:491a2b73db93fab69731eaee494f320faa4e093dbed776be1a829c2eb222c34c"}, + {file = "pydantic_core-2.27.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7969e133a6f183be60e9f6f56bfae753585680f3b7307a8e555a948d443cc05a"}, + {file = "pydantic_core-2.27.2-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:3de9961f2a346257caf0aa508a4da705467f53778e9ef6fe744c038119737ef5"}, + {file = "pydantic_core-2.27.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e2bb4d3e5873c37bb3dd58714d4cd0b0e6238cebc4177ac8fe878f8b3aa8e74c"}, + {file = "pydantic_core-2.27.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:280d219beebb0752699480fe8f1dc61ab6615c2046d76b7ab7ee38858de0a4e7"}, + {file = "pydantic_core-2.27.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:47956ae78b6422cbd46f772f1746799cbb862de838fd8d1fbd34a82e05b0983a"}, + {file = "pydantic_core-2.27.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:14d4a5c49d2f009d62a2a7140d3064f686d17a5d1a268bc641954ba181880236"}, + {file = "pydantic_core-2.27.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:337b443af21d488716f8d0b6164de833e788aa6bd7e3a39c005febc1284f4962"}, + {file = "pydantic_core-2.27.2-cp310-cp310-musllinux_1_1_armv7l.whl", hash = "sha256:03d0f86ea3184a12f41a2d23f7ccb79cdb5a18e06993f8a45baa8dfec746f0e9"}, + {file = "pydantic_core-2.27.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:7041c36f5680c6e0f08d922aed302e98b3745d97fe1589db0a3eebf6624523af"}, + {file = "pydantic_core-2.27.2-cp310-cp310-win32.whl", hash = "sha256:50a68f3e3819077be2c98110c1f9dcb3817e93f267ba80a2c05bb4f8799e2ff4"}, + {file = "pydantic_core-2.27.2-cp310-cp310-win_amd64.whl", hash = "sha256:e0fd26b16394ead34a424eecf8a31a1f5137094cabe84a1bcb10fa6ba39d3d31"}, + {file = "pydantic_core-2.27.2-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:8e10c99ef58cfdf2a66fc15d66b16c4a04f62bca39db589ae8cba08bc55331bc"}, + {file = "pydantic_core-2.27.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:26f32e0adf166a84d0cb63be85c562ca8a6fa8de28e5f0d92250c6b7e9e2aff7"}, + {file = "pydantic_core-2.27.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8c19d1ea0673cd13cc2f872f6c9ab42acc4e4f492a7ca9d3795ce2b112dd7e15"}, + {file = "pydantic_core-2.27.2-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:5e68c4446fe0810e959cdff46ab0a41ce2f2c86d227d96dc3847af0ba7def306"}, + {file = "pydantic_core-2.27.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d9640b0059ff4f14d1f37321b94061c6db164fbe49b334b31643e0528d100d99"}, + {file = "pydantic_core-2.27.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:40d02e7d45c9f8af700f3452f329ead92da4c5f4317ca9b896de7ce7199ea459"}, + {file = "pydantic_core-2.27.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1c1fd185014191700554795c99b347d64f2bb637966c4cfc16998a0ca700d048"}, + {file = "pydantic_core-2.27.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d81d2068e1c1228a565af076598f9e7451712700b673de8f502f0334f281387d"}, + {file = "pydantic_core-2.27.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:1a4207639fb02ec2dbb76227d7c751a20b1a6b4bc52850568e52260cae64ca3b"}, + {file = "pydantic_core-2.27.2-cp311-cp311-musllinux_1_1_armv7l.whl", hash = "sha256:3de3ce3c9ddc8bbd88f6e0e304dea0e66d843ec9de1b0042b0911c1663ffd474"}, + {file = "pydantic_core-2.27.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:30c5f68ded0c36466acede341551106821043e9afaad516adfb6e8fa80a4e6a6"}, + {file = "pydantic_core-2.27.2-cp311-cp311-win32.whl", hash = "sha256:c70c26d2c99f78b125a3459f8afe1aed4d9687c24fd677c6a4436bc042e50d6c"}, + {file = "pydantic_core-2.27.2-cp311-cp311-win_amd64.whl", hash = "sha256:08e125dbdc505fa69ca7d9c499639ab6407cfa909214d500897d02afb816e7cc"}, + {file = "pydantic_core-2.27.2-cp311-cp311-win_arm64.whl", hash = "sha256:26f0d68d4b235a2bae0c3fc585c585b4ecc51382db0e3ba402a22cbc440915e4"}, + {file = "pydantic_core-2.27.2-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:9e0c8cfefa0ef83b4da9588448b6d8d2a2bf1a53c3f1ae5fca39eb3061e2f0b0"}, + {file = "pydantic_core-2.27.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:83097677b8e3bd7eaa6775720ec8e0405f1575015a463285a92bfdfe254529ef"}, + {file = "pydantic_core-2.27.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:172fce187655fece0c90d90a678424b013f8fbb0ca8b036ac266749c09438cb7"}, + {file = "pydantic_core-2.27.2-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:519f29f5213271eeeeb3093f662ba2fd512b91c5f188f3bb7b27bc5973816934"}, + {file = "pydantic_core-2.27.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:05e3a55d124407fffba0dd6b0c0cd056d10e983ceb4e5dbd10dda135c31071d6"}, + {file = "pydantic_core-2.27.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9c3ed807c7b91de05e63930188f19e921d1fe90de6b4f5cd43ee7fcc3525cb8c"}, + {file = "pydantic_core-2.27.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6fb4aadc0b9a0c063206846d603b92030eb6f03069151a625667f982887153e2"}, + {file = "pydantic_core-2.27.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:28ccb213807e037460326424ceb8b5245acb88f32f3d2777427476e1b32c48c4"}, + {file = "pydantic_core-2.27.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:de3cd1899e2c279b140adde9357c4495ed9d47131b4a4eaff9052f23398076b3"}, + {file = "pydantic_core-2.27.2-cp312-cp312-musllinux_1_1_armv7l.whl", hash = "sha256:220f892729375e2d736b97d0e51466252ad84c51857d4d15f5e9692f9ef12be4"}, + {file = "pydantic_core-2.27.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:a0fcd29cd6b4e74fe8ddd2c90330fd8edf2e30cb52acda47f06dd615ae72da57"}, + {file = "pydantic_core-2.27.2-cp312-cp312-win32.whl", hash = "sha256:1e2cb691ed9834cd6a8be61228471d0a503731abfb42f82458ff27be7b2186fc"}, + {file = "pydantic_core-2.27.2-cp312-cp312-win_amd64.whl", hash = "sha256:cc3f1a99a4f4f9dd1de4fe0312c114e740b5ddead65bb4102884b384c15d8bc9"}, + {file = "pydantic_core-2.27.2-cp312-cp312-win_arm64.whl", hash = "sha256:3911ac9284cd8a1792d3cb26a2da18f3ca26c6908cc434a18f730dc0db7bfa3b"}, + {file = "pydantic_core-2.27.2-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:7d14bd329640e63852364c306f4d23eb744e0f8193148d4044dd3dacdaacbd8b"}, + {file = "pydantic_core-2.27.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:82f91663004eb8ed30ff478d77c4d1179b3563df6cdb15c0817cd1cdaf34d154"}, + {file = "pydantic_core-2.27.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:71b24c7d61131bb83df10cc7e687433609963a944ccf45190cfc21e0887b08c9"}, + {file = "pydantic_core-2.27.2-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:fa8e459d4954f608fa26116118bb67f56b93b209c39b008277ace29937453dc9"}, + {file = "pydantic_core-2.27.2-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ce8918cbebc8da707ba805b7fd0b382816858728ae7fe19a942080c24e5b7cd1"}, + {file = "pydantic_core-2.27.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:eda3f5c2a021bbc5d976107bb302e0131351c2ba54343f8a496dc8783d3d3a6a"}, + {file = "pydantic_core-2.27.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bd8086fa684c4775c27f03f062cbb9eaa6e17f064307e86b21b9e0abc9c0f02e"}, + {file = "pydantic_core-2.27.2-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:8d9b3388db186ba0c099a6d20f0604a44eabdeef1777ddd94786cdae158729e4"}, + {file = "pydantic_core-2.27.2-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:7a66efda2387de898c8f38c0cf7f14fca0b51a8ef0b24bfea5849f1b3c95af27"}, + {file = "pydantic_core-2.27.2-cp313-cp313-musllinux_1_1_armv7l.whl", hash = "sha256:18a101c168e4e092ab40dbc2503bdc0f62010e95d292b27827871dc85450d7ee"}, + {file = "pydantic_core-2.27.2-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:ba5dd002f88b78a4215ed2f8ddbdf85e8513382820ba15ad5ad8955ce0ca19a1"}, + {file = "pydantic_core-2.27.2-cp313-cp313-win32.whl", hash = "sha256:1ebaf1d0481914d004a573394f4be3a7616334be70261007e47c2a6fe7e50130"}, + {file = "pydantic_core-2.27.2-cp313-cp313-win_amd64.whl", hash = "sha256:953101387ecf2f5652883208769a79e48db18c6df442568a0b5ccd8c2723abee"}, + {file = "pydantic_core-2.27.2-cp313-cp313-win_arm64.whl", hash = "sha256:ac4dbfd1691affb8f48c2c13241a2e3b60ff23247cbcf981759c768b6633cf8b"}, + {file = "pydantic_core-2.27.2-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:d3e8d504bdd3f10835468f29008d72fc8359d95c9c415ce6e767203db6127506"}, + {file = "pydantic_core-2.27.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:521eb9b7f036c9b6187f0b47318ab0d7ca14bd87f776240b90b21c1f4f149320"}, + {file = "pydantic_core-2.27.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:85210c4d99a0114f5a9481b44560d7d1e35e32cc5634c656bc48e590b669b145"}, + {file = "pydantic_core-2.27.2-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:d716e2e30c6f140d7560ef1538953a5cd1a87264c737643d481f2779fc247fe1"}, + {file = "pydantic_core-2.27.2-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f66d89ba397d92f840f8654756196d93804278457b5fbede59598a1f9f90b228"}, + {file = "pydantic_core-2.27.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:669e193c1c576a58f132e3158f9dfa9662969edb1a250c54d8fa52590045f046"}, + {file = "pydantic_core-2.27.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9fdbe7629b996647b99c01b37f11170a57ae675375b14b8c13b8518b8320ced5"}, + {file = "pydantic_core-2.27.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d262606bf386a5ba0b0af3b97f37c83d7011439e3dc1a9298f21efb292e42f1a"}, + {file = "pydantic_core-2.27.2-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:cabb9bcb7e0d97f74df8646f34fc76fbf793b7f6dc2438517d7a9e50eee4f14d"}, + {file = "pydantic_core-2.27.2-cp38-cp38-musllinux_1_1_armv7l.whl", hash = "sha256:d2d63f1215638d28221f664596b1ccb3944f6e25dd18cd3b86b0a4c408d5ebb9"}, + {file = "pydantic_core-2.27.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:bca101c00bff0adb45a833f8451b9105d9df18accb8743b08107d7ada14bd7da"}, + {file = "pydantic_core-2.27.2-cp38-cp38-win32.whl", hash = "sha256:f6f8e111843bbb0dee4cb6594cdc73e79b3329b526037ec242a3e49012495b3b"}, + {file = "pydantic_core-2.27.2-cp38-cp38-win_amd64.whl", hash = "sha256:fd1aea04935a508f62e0d0ef1f5ae968774a32afc306fb8545e06f5ff5cdf3ad"}, + {file = "pydantic_core-2.27.2-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:c10eb4f1659290b523af58fa7cffb452a61ad6ae5613404519aee4bfbf1df993"}, + {file = "pydantic_core-2.27.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:ef592d4bad47296fb11f96cd7dc898b92e795032b4894dfb4076cfccd43a9308"}, + {file = "pydantic_core-2.27.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c61709a844acc6bf0b7dce7daae75195a10aac96a596ea1b776996414791ede4"}, + {file = "pydantic_core-2.27.2-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:42c5f762659e47fdb7b16956c71598292f60a03aa92f8b6351504359dbdba6cf"}, + {file = "pydantic_core-2.27.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4c9775e339e42e79ec99c441d9730fccf07414af63eac2f0e48e08fd38a64d76"}, + {file = "pydantic_core-2.27.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:57762139821c31847cfb2df63c12f725788bd9f04bc2fb392790959b8f70f118"}, + {file = "pydantic_core-2.27.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0d1e85068e818c73e048fe28cfc769040bb1f475524f4745a5dc621f75ac7630"}, + {file = "pydantic_core-2.27.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:097830ed52fd9e427942ff3b9bc17fab52913b2f50f2880dc4a5611446606a54"}, + {file = "pydantic_core-2.27.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:044a50963a614ecfae59bb1eaf7ea7efc4bc62f49ed594e18fa1e5d953c40e9f"}, + {file = "pydantic_core-2.27.2-cp39-cp39-musllinux_1_1_armv7l.whl", hash = "sha256:4e0b4220ba5b40d727c7f879eac379b822eee5d8fff418e9d3381ee45b3b0362"}, + {file = "pydantic_core-2.27.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:5e4f4bb20d75e9325cc9696c6802657b58bc1dbbe3022f32cc2b2b632c3fbb96"}, + {file = "pydantic_core-2.27.2-cp39-cp39-win32.whl", hash = "sha256:cca63613e90d001b9f2f9a9ceb276c308bfa2a43fafb75c8031c4f66039e8c6e"}, + {file = "pydantic_core-2.27.2-cp39-cp39-win_amd64.whl", hash = "sha256:77d1bca19b0f7021b3a982e6f903dcd5b2b06076def36a652e3907f596e29f67"}, + {file = "pydantic_core-2.27.2-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:2bf14caea37e91198329b828eae1618c068dfb8ef17bb33287a7ad4b61ac314e"}, + {file = "pydantic_core-2.27.2-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:b0cb791f5b45307caae8810c2023a184c74605ec3bcbb67d13846c28ff731ff8"}, + {file = "pydantic_core-2.27.2-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:688d3fd9fcb71f41c4c015c023d12a79d1c4c0732ec9eb35d96e3388a120dcf3"}, + {file = "pydantic_core-2.27.2-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3d591580c34f4d731592f0e9fe40f9cc1b430d297eecc70b962e93c5c668f15f"}, + {file = "pydantic_core-2.27.2-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:82f986faf4e644ffc189a7f1aafc86e46ef70372bb153e7001e8afccc6e54133"}, + {file = "pydantic_core-2.27.2-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:bec317a27290e2537f922639cafd54990551725fc844249e64c523301d0822fc"}, + {file = "pydantic_core-2.27.2-pp310-pypy310_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:0296abcb83a797db256b773f45773da397da75a08f5fcaef41f2044adec05f50"}, + {file = "pydantic_core-2.27.2-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:0d75070718e369e452075a6017fbf187f788e17ed67a3abd47fa934d001863d9"}, + {file = "pydantic_core-2.27.2-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:7e17b560be3c98a8e3aa66ce828bdebb9e9ac6ad5466fba92eb74c4c95cb1151"}, + {file = "pydantic_core-2.27.2-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:c33939a82924da9ed65dab5a65d427205a73181d8098e79b6b426bdf8ad4e656"}, + {file = "pydantic_core-2.27.2-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:00bad2484fa6bda1e216e7345a798bd37c68fb2d97558edd584942aa41b7d278"}, + {file = "pydantic_core-2.27.2-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c817e2b40aba42bac6f457498dacabc568c3b7a986fc9ba7c8d9d260b71485fb"}, + {file = "pydantic_core-2.27.2-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:251136cdad0cb722e93732cb45ca5299fb56e1344a833640bf93b2803f8d1bfd"}, + {file = "pydantic_core-2.27.2-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d2088237af596f0a524d3afc39ab3b036e8adb054ee57cbb1dcf8e09da5b29cc"}, + {file = "pydantic_core-2.27.2-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:d4041c0b966a84b4ae7a09832eb691a35aec90910cd2dbe7a208de59be77965b"}, + {file = "pydantic_core-2.27.2-pp39-pypy39_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:8083d4e875ebe0b864ffef72a4304827015cff328a1be6e22cc850753bfb122b"}, + {file = "pydantic_core-2.27.2-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:f141ee28a0ad2123b6611b6ceff018039df17f32ada8b534e6aa039545a3efb2"}, + {file = "pydantic_core-2.27.2-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:7d0c8399fcc1848491f00e0314bd59fb34a9c008761bcb422a057670c3f65e35"}, + {file = "pydantic_core-2.27.2.tar.gz", hash = "sha256:eb026e5a4c1fee05726072337ff51d1efb6f59090b7da90d30ea58625b1ffb39"}, ] [package.dependencies] @@ -617,6 +653,7 @@ version = "3.2.3" description = "python code static checker" optional = false python-versions = ">=3.8.0" +groups = ["dev"] files = [ {file = "pylint-3.2.3-py3-none-any.whl", hash = "sha256:b3d7d2708a3e04b4679e02d99e72329a8b7ee8afb8d04110682278781f889fa8"}, {file = "pylint-3.2.3.tar.gz", hash = "sha256:02f6c562b215582386068d52a30f520d84fdbcf2a95fc7e855b816060d048b60"}, @@ -628,7 +665,7 @@ colorama = {version = ">=0.4.5", markers = "sys_platform == \"win32\""} dill = [ {version = ">=0.2", markers = "python_version < \"3.11\""}, {version = ">=0.3.7", markers = "python_version >= \"3.12\""}, - {version = ">=0.3.6", markers = "python_version >= \"3.11\" and python_version < \"3.12\""}, + {version = ">=0.3.6", markers = "python_version == \"3.11\""}, ] isort = ">=4.2.5,<5.13.0 || >5.13.0,<6" mccabe = ">=0.6,<0.8" @@ -647,6 +684,7 @@ version = "8.3.2" description = "pytest: simple powerful testing with Python" optional = false python-versions = ">=3.8" +groups = ["dev"] files = [ {file = "pytest-8.3.2-py3-none-any.whl", hash = "sha256:4ba08f9ae7dcf84ded419494d229b48d0903ea6407b030eaec46df5e6a73bba5"}, {file = "pytest-8.3.2.tar.gz", hash = "sha256:c132345d12ce551242c87269de812483f5bcc87cdbb4722e48487ba194f9fdce"}, @@ -669,6 +707,7 @@ version = "0.23.8" description = "Pytest support for asyncio" optional = false python-versions = ">=3.8" +groups = ["dev"] files = [ {file = "pytest_asyncio-0.23.8-py3-none-any.whl", hash = "sha256:50265d892689a5faefb84df80819d1ecef566eb3549cf915dfb33569359d1ce2"}, {file = "pytest_asyncio-0.23.8.tar.gz", hash = "sha256:759b10b33a6dc61cce40a8bd5205e302978bbbcc00e279a8b61d9a6a3c82e4d3"}, @@ -687,6 +726,7 @@ version = "2.8.2" description = "Extensions to the standard Python datetime module" optional = false python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7" +groups = ["main"] files = [ {file = "python-dateutil-2.8.2.tar.gz", hash = "sha256:0123cacc1627ae19ddf3c27a5de5bd67ee4586fbdd6440d9748f8abb483d3e86"}, {file = "python_dateutil-2.8.2-py2.py3-none-any.whl", hash = "sha256:961d03dc3453ebbc59dbdea9e4e11c5651520a876d0f4db161e8674aae935da9"}, @@ -701,6 +741,7 @@ version = "2.32.3" description = "Python HTTP for Humans." optional = false python-versions = ">=3.8" +groups = ["main"] files = [ {file = "requests-2.32.3-py3-none-any.whl", hash = "sha256:70761cfe03c773ceb22aa2f671b4757976145175cdfca038c02654d061d6dcc6"}, {file = "requests-2.32.3.tar.gz", hash = "sha256:55365417734eb18255590a9ff9eb97e9e1da868d4ccd6402399eaf68af20a760"}, @@ -716,12 +757,29 @@ urllib3 = ">=1.21.1,<3" socks = ["PySocks (>=1.5.6,!=1.5.7)"] use-chardet-on-py3 = ["chardet (>=3.0.2,<6)"] +[[package]] +name = "rsa" +version = "4.2" +description = "Pure-Python RSA implementation" +optional = false +python-versions = "*" +groups = ["main"] +markers = "python_version >= \"3.12\"" +files = [ + {file = "rsa-4.2.tar.gz", hash = "sha256:aaefa4b84752e3e99bd8333a2e1e3e7a7da64614042bd66f775573424370108a"}, +] + +[package.dependencies] +pyasn1 = ">=0.1.3" + [[package]] name = "rsa" version = "4.9" description = "Pure-Python RSA implementation" optional = false python-versions = ">=3.6,<4" +groups = ["main"] +markers = "python_version <= \"3.11\"" files = [ {file = "rsa-4.9-py3-none-any.whl", hash = "sha256:90260d9058e514786967344d0ef75fa8727eed8a7d2e43ce9f4bcf1b536174f7"}, {file = "rsa-4.9.tar.gz", hash = "sha256:e38464a49c6c85d7f1351b0126661487a7e0a14a50f1675ec50eb34d4f20ef21"}, @@ -736,6 +794,7 @@ version = "1.16.0" description = "Python 2 and 3 compatibility utilities" optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*" +groups = ["main"] files = [ {file = "six-1.16.0-py2.py3-none-any.whl", hash = "sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254"}, {file = "six-1.16.0.tar.gz", hash = "sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926"}, @@ -747,6 +806,7 @@ version = "1.3.1" description = "Sniff out which async library your code is running under" optional = false python-versions = ">=3.7" +groups = ["main"] files = [ {file = "sniffio-1.3.1-py3-none-any.whl", hash = "sha256:2f6da418d1f1e0fddd844478f41680e794e6051915791a034ff65e5f100525a2"}, {file = "sniffio-1.3.1.tar.gz", hash = "sha256:f4324edc670a0f49750a81b895f35c3adb843cca46f0530f79fc1babb23789dc"}, @@ -758,6 +818,8 @@ version = "2.0.1" description = "A lil' TOML parser" optional = false python-versions = ">=3.7" +groups = ["dev"] +markers = "python_version < \"3.11\"" files = [ {file = "tomli-2.0.1-py3-none-any.whl", hash = "sha256:939de3e7a6161af0c887ef91b7d41a53e7c5a1ca976325f429cb46ea9bc30ecc"}, {file = "tomli-2.0.1.tar.gz", hash = "sha256:de526c12914f0c550d15924c62d72abc48d6fe7364aa87328337a31007fe8a4f"}, @@ -769,6 +831,7 @@ version = "0.13.0" description = "Style preserving TOML library" optional = false python-versions = ">=3.8" +groups = ["dev"] files = [ {file = "tomlkit-0.13.0-py3-none-any.whl", hash = "sha256:7075d3042d03b80f603482d69bf0c8f345c2b30e41699fd8883227f89972b264"}, {file = "tomlkit-0.13.0.tar.gz", hash = "sha256:08ad192699734149f5b97b45f1f18dad7eb1b6d16bc72ad0c2335772650d7b72"}, @@ -780,6 +843,7 @@ version = "2.9.0.20240316" description = "Typing stubs for python-dateutil" optional = false python-versions = ">=3.8" +groups = ["dev"] files = [ {file = "types-python-dateutil-2.9.0.20240316.tar.gz", hash = "sha256:5d2f2e240b86905e40944dd787db6da9263f0deabef1076ddaed797351ec0202"}, {file = "types_python_dateutil-2.9.0.20240316-py3-none-any.whl", hash = "sha256:6b8cb66d960771ce5ff974e9dd45e38facb81718cc1e208b10b1baccbfdbee3b"}, @@ -791,25 +855,26 @@ version = "4.12.2" description = "Backported and Experimental Type Hints for Python 3.8+" optional = false python-versions = ">=3.8" +groups = ["main", "dev"] files = [ {file = "typing_extensions-4.12.2-py3-none-any.whl", hash = "sha256:04e5ca0351e0f3f85c6853954072df659d0d13fac324d0072316b67d7794700d"}, {file = "typing_extensions-4.12.2.tar.gz", hash = "sha256:1a7ead55c7e559dd4dee8856e3a88b41225abfe1ce8df57b7c13915fe121ffb8"}, ] [[package]] -name = "typing-inspect" -version = "0.9.0" -description = "Runtime inspection utilities for typing module." +name = "typing-inspection" +version = "0.4.0" +description = "Runtime typing introspection tools" optional = false -python-versions = "*" +python-versions = ">=3.9" +groups = ["main"] files = [ - {file = "typing_inspect-0.9.0-py3-none-any.whl", hash = "sha256:9ee6fc59062311ef8547596ab6b955e1b8aa46242d854bfc78f4f6b0eff35f9f"}, - {file = "typing_inspect-0.9.0.tar.gz", hash = "sha256:b23fc42ff6f6ef6954e4852c1fb512cdd18dbea03134f91f856a95ccc9461f78"}, + {file = "typing_inspection-0.4.0-py3-none-any.whl", hash = "sha256:50e72559fcd2a6367a19f7a7e610e6afcb9fac940c650290eed893d61386832f"}, + {file = "typing_inspection-0.4.0.tar.gz", hash = "sha256:9765c87de36671694a67904bf2c96e395be9c6439bb6c87b5142569dcdd65122"}, ] [package.dependencies] -mypy-extensions = ">=0.3.0" -typing-extensions = ">=3.7.4" +typing-extensions = ">=4.12.0" [[package]] name = "urllib3" @@ -817,18 +882,19 @@ version = "2.2.2" description = "HTTP library with thread-safe connection pooling, file post, and more." optional = false python-versions = ">=3.8" +groups = ["main"] files = [ {file = "urllib3-2.2.2-py3-none-any.whl", hash = "sha256:a448b2f64d686155468037e1ace9f2d2199776e17f0a46610480d311f73e3472"}, {file = "urllib3-2.2.2.tar.gz", hash = "sha256:dd505485549a7a552833da5e6063639d0d177c04f23bc3864e41e5dc5f612168"}, ] [package.extras] -brotli = ["brotli (>=1.0.9)", "brotlicffi (>=0.8.0)"] +brotli = ["brotli (>=1.0.9) ; platform_python_implementation == \"CPython\"", "brotlicffi (>=0.8.0) ; platform_python_implementation != \"CPython\""] h2 = ["h2 (>=4,<5)"] socks = ["pysocks (>=1.5.6,!=1.5.7,<2.0)"] zstd = ["zstandard (>=0.18.0)"] [metadata] -lock-version = "2.0" -python-versions = "^3.8" -content-hash = "fc4716156ed5774ad5090ce141d42d8081750f92e5d1e3ef3192b5f13ef8e815" +lock-version = "2.1" +python-versions = ">=3.9" +content-hash = "e393da36a5d5edb020e739f40ff611854b9940e11a34a4e221f3f1513efeb9db" diff --git a/packages/mistralai_gcp/pylintrc b/packages/mistralai_gcp/pylintrc index 393d0f70..266bc815 100644 --- a/packages/mistralai_gcp/pylintrc +++ b/packages/mistralai_gcp/pylintrc @@ -89,7 +89,7 @@ persistent=yes # Minimum Python version to use for version dependent checks. Will default to # the version used to run pylint. -py-version=3.8 +py-version=3.9 # Discover python modules and packages in the file system subtree. recursive=no @@ -455,7 +455,10 @@ disable=raw-checker-failed, bare-except, broad-exception-caught, fixme, - relative-beyond-top-level + relative-beyond-top-level, + consider-using-with, + wildcard-import, + unused-wildcard-import # Enable the message, report, category or checker with the given id(s). You can # either give multiple identifier separated by comma (,) or put this option diff --git a/packages/mistralai_gcp/pyproject.toml b/packages/mistralai_gcp/pyproject.toml index 6692f1d5..9763e417 100644 --- a/packages/mistralai_gcp/pyproject.toml +++ b/packages/mistralai_gcp/pyproject.toml @@ -1,9 +1,21 @@ -[tool.poetry] +[project] name = "mistralai-gcp" -version = "1.2.6" +version = "1.6.0" description = "Python Client SDK for the Mistral AI API in GCP." -authors = ["Mistral",] +authors = [{ name = "Mistral" },] readme = "README-PYPI.md" +requires-python = ">=3.9" +dependencies = [ + "eval-type-backport >=0.2.0", + "google-auth (>=2.31.0,<3.0.0)", + "httpx >=0.28.1", + "pydantic >=2.10.3", + "python-dateutil >=2.8.2", + "requests (>=2.32.3,<3.0.0)", + "typing-inspection >=0.4.0", +] + +[tool.poetry] packages = [ { include = "mistralai_gcp", from = "src" } ] @@ -15,19 +27,8 @@ include = ["py.typed", "src/mistralai_gcp/py.typed"] [virtualenvs] in-project = true -[tool.poetry.dependencies] -python = "^3.8" -eval-type-backport = "^0.2.0" -google-auth = "2.27.0" -httpx = "^0.28.1" -jsonpath-python = "^1.0.6" -pydantic = "~2.10.3" -python-dateutil = "^2.8.2" -requests = "^2.32.3" -typing-inspect = "^0.9.0" - [tool.poetry.group.dev.dependencies] -mypy = "==1.13.0" +mypy = "==1.14.1" pylint = "==3.2.3" pytest = "^8.2.2" pytest-asyncio = "^0.23.7" @@ -38,6 +39,7 @@ requires = ["poetry-core"] build-backend = "poetry.core.masonry.api" [tool.pytest.ini_options] +asyncio_default_fixture_loop_scope = "function" pythonpath = ["src"] [tool.mypy] diff --git a/packages/mistralai_gcp/scripts/prepare-readme.py b/packages/mistralai_gcp/scripts/prepare_readme.py similarity index 100% rename from packages/mistralai_gcp/scripts/prepare-readme.py rename to packages/mistralai_gcp/scripts/prepare_readme.py diff --git a/packages/mistralai_gcp/scripts/publish.sh b/packages/mistralai_gcp/scripts/publish.sh index ab45b1f9..f2f2cf2c 100755 --- a/packages/mistralai_gcp/scripts/publish.sh +++ b/packages/mistralai_gcp/scripts/publish.sh @@ -2,6 +2,6 @@ export POETRY_PYPI_TOKEN_PYPI=${PYPI_TOKEN} -poetry run python scripts/prepare-readme.py +poetry run python scripts/prepare_readme.py poetry publish --build --skip-existing diff --git a/packages/mistralai_gcp/src/mistralai_gcp/__init__.py b/packages/mistralai_gcp/src/mistralai_gcp/__init__.py index a1b7f626..dd02e42e 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/__init__.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/__init__.py @@ -1,9 +1,18 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -from ._version import __title__, __version__ +from ._version import ( + __title__, + __version__, + __openapi_doc_version__, + __gen_version__, + __user_agent__, +) from .sdk import * from .sdkconfiguration import * from .models import * VERSION: str = __version__ +OPENAPI_DOC_VERSION = __openapi_doc_version__ +SPEAKEASY_GENERATOR_VERSION = __gen_version__ +USER_AGENT = __user_agent__ diff --git a/packages/mistralai_gcp/src/mistralai_gcp/_hooks/types.py b/packages/mistralai_gcp/src/mistralai_gcp/_hooks/types.py index 417126fd..bb867b5b 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/_hooks/types.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/_hooks/types.py @@ -7,16 +7,19 @@ class HookContext: + base_url: str operation_id: str oauth2_scopes: Optional[List[str]] = None security_source: Optional[Union[Any, Callable[[], Any]]] = None def __init__( self, + base_url: str, operation_id: str, oauth2_scopes: Optional[List[str]], security_source: Optional[Union[Any, Callable[[], Any]]], ): + self.base_url = base_url self.operation_id = operation_id self.oauth2_scopes = oauth2_scopes self.security_source = security_source @@ -25,21 +28,30 @@ def __init__( class BeforeRequestContext(HookContext): def __init__(self, hook_ctx: HookContext): super().__init__( - hook_ctx.operation_id, hook_ctx.oauth2_scopes, hook_ctx.security_source + hook_ctx.base_url, + hook_ctx.operation_id, + hook_ctx.oauth2_scopes, + hook_ctx.security_source, ) class AfterSuccessContext(HookContext): def __init__(self, hook_ctx: HookContext): super().__init__( - hook_ctx.operation_id, hook_ctx.oauth2_scopes, hook_ctx.security_source + hook_ctx.base_url, + hook_ctx.operation_id, + hook_ctx.oauth2_scopes, + hook_ctx.security_source, ) class AfterErrorContext(HookContext): def __init__(self, hook_ctx: HookContext): super().__init__( - hook_ctx.operation_id, hook_ctx.oauth2_scopes, hook_ctx.security_source + hook_ctx.base_url, + hook_ctx.operation_id, + hook_ctx.oauth2_scopes, + hook_ctx.security_source, ) diff --git a/packages/mistralai_gcp/src/mistralai_gcp/_version.py b/packages/mistralai_gcp/src/mistralai_gcp/_version.py index 30081f34..11f38b63 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/_version.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/_version.py @@ -3,7 +3,10 @@ import importlib.metadata __title__: str = "mistralai-gcp" -__version__: str = "1.2.6" +__version__: str = "1.6.0" +__openapi_doc_version__: str = "0.0.2" +__gen_version__: str = "2.548.6" +__user_agent__: str = "speakeasy-sdk/python 1.6.0 2.548.6 0.0.2 mistralai-gcp" try: if __package__ is not None: diff --git a/packages/mistralai_gcp/src/mistralai_gcp/basesdk.py b/packages/mistralai_gcp/src/mistralai_gcp/basesdk.py index 40620018..bb0aab96 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/basesdk.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/basesdk.py @@ -231,6 +231,10 @@ def do(): req.headers, get_body_content(req), ) + + if client is None: + raise ValueError("client is required") + http_res = client.send(req, stream=stream) except Exception as e: _, e = self.sdk_configuration.get_hooks().after_error( @@ -303,6 +307,10 @@ async def do(): req.headers, get_body_content(req), ) + + if client is None: + raise ValueError("client is required") + http_res = await client.send(req, stream=stream) except Exception as e: _, e = self.sdk_configuration.get_hooks().after_error( diff --git a/packages/mistralai_gcp/src/mistralai_gcp/chat.py b/packages/mistralai_gcp/src/mistralai_gcp/chat.py index 47e5b63a..dba369bf 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/chat.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/chat.py @@ -3,7 +3,7 @@ from .basesdk import BaseSDK from mistralai_gcp import models, utils from mistralai_gcp._hooks import HookContext -from mistralai_gcp.types import Nullable, OptionalNullable, UNSET +from mistralai_gcp.types import OptionalNullable, UNSET from mistralai_gcp.utils import eventstreaming from typing import Any, List, Mapping, Optional, Union @@ -14,7 +14,7 @@ class Chat(BaseSDK): def stream( self, *, - model: Nullable[str], + model: str, messages: Union[List[models.Messages], List[models.MessagesTypedDict]], temperature: OptionalNullable[float] = UNSET, top_p: Optional[float] = None, @@ -37,6 +37,10 @@ def stream( presence_penalty: Optional[float] = None, frequency_penalty: Optional[float] = None, n: OptionalNullable[int] = UNSET, + prediction: Optional[ + Union[models.Prediction, models.PredictionTypedDict] + ] = None, + parallel_tool_calls: Optional[bool] = None, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -60,6 +64,8 @@ def stream( :param presence_penalty: presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. :param frequency_penalty: frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. :param n: Number of completions to return for each request, input tokens are only billed once. + :param prediction: + :param parallel_tool_calls: :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -72,6 +78,8 @@ def stream( if server_url is not None: base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) request = models.ChatCompletionStreamRequest( model=model, @@ -92,6 +100,10 @@ def stream( presence_penalty=presence_penalty, frequency_penalty=frequency_penalty, n=n, + prediction=utils.get_pydantic_model( + prediction, Optional[models.Prediction] + ), + parallel_tool_calls=parallel_tool_calls, ) req = self._build_request( @@ -123,6 +135,7 @@ def stream( http_res = self.do_request( hook_ctx=HookContext( + base_url=base_url or "", operation_id="stream_chat", oauth2_scopes=[], security_source=self.sdk_configuration.security, @@ -133,7 +146,7 @@ def stream( retry_config=retry_config, ) - data: Any = None + response_data: Any = None if utils.match_response(http_res, "200", "text/event-stream"): return eventstreaming.EventStream( http_res, @@ -142,9 +155,16 @@ def stream( ) if utils.match_response(http_res, "422", "application/json"): http_res_text = utils.stream_to_text(http_res) - data = utils.unmarshal_json(http_res_text, models.HTTPValidationErrorData) - raise models.HTTPValidationError(data=data) - if utils.match_response(http_res, ["4XX", "5XX"], "*"): + response_data = utils.unmarshal_json( + http_res_text, models.HTTPValidationErrorData + ) + raise models.HTTPValidationError(data=response_data) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) raise models.SDKError( "API error occurred", http_res.status_code, http_res_text, http_res @@ -162,7 +182,7 @@ def stream( async def stream_async( self, *, - model: Nullable[str], + model: str, messages: Union[List[models.Messages], List[models.MessagesTypedDict]], temperature: OptionalNullable[float] = UNSET, top_p: Optional[float] = None, @@ -185,6 +205,10 @@ async def stream_async( presence_penalty: Optional[float] = None, frequency_penalty: Optional[float] = None, n: OptionalNullable[int] = UNSET, + prediction: Optional[ + Union[models.Prediction, models.PredictionTypedDict] + ] = None, + parallel_tool_calls: Optional[bool] = None, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -208,6 +232,8 @@ async def stream_async( :param presence_penalty: presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. :param frequency_penalty: frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. :param n: Number of completions to return for each request, input tokens are only billed once. + :param prediction: + :param parallel_tool_calls: :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -220,6 +246,8 @@ async def stream_async( if server_url is not None: base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) request = models.ChatCompletionStreamRequest( model=model, @@ -240,6 +268,10 @@ async def stream_async( presence_penalty=presence_penalty, frequency_penalty=frequency_penalty, n=n, + prediction=utils.get_pydantic_model( + prediction, Optional[models.Prediction] + ), + parallel_tool_calls=parallel_tool_calls, ) req = self._build_request_async( @@ -271,6 +303,7 @@ async def stream_async( http_res = await self.do_request_async( hook_ctx=HookContext( + base_url=base_url or "", operation_id="stream_chat", oauth2_scopes=[], security_source=self.sdk_configuration.security, @@ -281,7 +314,7 @@ async def stream_async( retry_config=retry_config, ) - data: Any = None + response_data: Any = None if utils.match_response(http_res, "200", "text/event-stream"): return eventstreaming.EventStreamAsync( http_res, @@ -290,9 +323,16 @@ async def stream_async( ) if utils.match_response(http_res, "422", "application/json"): http_res_text = await utils.stream_to_text_async(http_res) - data = utils.unmarshal_json(http_res_text, models.HTTPValidationErrorData) - raise models.HTTPValidationError(data=data) - if utils.match_response(http_res, ["4XX", "5XX"], "*"): + response_data = utils.unmarshal_json( + http_res_text, models.HTTPValidationErrorData + ) + raise models.HTTPValidationError(data=response_data) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( "API error occurred", http_res.status_code, http_res_text, http_res @@ -310,7 +350,7 @@ async def stream_async( def complete( self, *, - model: Nullable[str], + model: str, messages: Union[ List[models.ChatCompletionRequestMessages], List[models.ChatCompletionRequestMessagesTypedDict], @@ -341,6 +381,10 @@ def complete( presence_penalty: Optional[float] = None, frequency_penalty: Optional[float] = None, n: OptionalNullable[int] = UNSET, + prediction: Optional[ + Union[models.Prediction, models.PredictionTypedDict] + ] = None, + parallel_tool_calls: Optional[bool] = None, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -362,6 +406,8 @@ def complete( :param presence_penalty: presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. :param frequency_penalty: frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. :param n: Number of completions to return for each request, input tokens are only billed once. + :param prediction: + :param parallel_tool_calls: :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -374,6 +420,8 @@ def complete( if server_url is not None: base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) request = models.ChatCompletionRequest( model=model, @@ -396,6 +444,10 @@ def complete( presence_penalty=presence_penalty, frequency_penalty=frequency_penalty, n=n, + prediction=utils.get_pydantic_model( + prediction, Optional[models.Prediction] + ), + parallel_tool_calls=parallel_tool_calls, ) req = self._build_request( @@ -427,6 +479,7 @@ def complete( http_res = self.do_request( hook_ctx=HookContext( + base_url=base_url or "", operation_id="chat_completion_v1_chat_completions_post", oauth2_scopes=[], security_source=self.sdk_configuration.security, @@ -436,15 +489,22 @@ def complete( retry_config=retry_config, ) - data: Any = None + response_data: Any = None if utils.match_response(http_res, "200", "application/json"): return utils.unmarshal_json( http_res.text, Optional[models.ChatCompletionResponse] ) if utils.match_response(http_res, "422", "application/json"): - data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) - raise models.HTTPValidationError(data=data) - if utils.match_response(http_res, ["4XX", "5XX"], "*"): + response_data = utils.unmarshal_json( + http_res.text, models.HTTPValidationErrorData + ) + raise models.HTTPValidationError(data=response_data) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) raise models.SDKError( "API error occurred", http_res.status_code, http_res_text, http_res @@ -462,7 +522,7 @@ def complete( async def complete_async( self, *, - model: Nullable[str], + model: str, messages: Union[ List[models.ChatCompletionRequestMessages], List[models.ChatCompletionRequestMessagesTypedDict], @@ -493,6 +553,10 @@ async def complete_async( presence_penalty: Optional[float] = None, frequency_penalty: Optional[float] = None, n: OptionalNullable[int] = UNSET, + prediction: Optional[ + Union[models.Prediction, models.PredictionTypedDict] + ] = None, + parallel_tool_calls: Optional[bool] = None, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -514,6 +578,8 @@ async def complete_async( :param presence_penalty: presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. :param frequency_penalty: frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. :param n: Number of completions to return for each request, input tokens are only billed once. + :param prediction: + :param parallel_tool_calls: :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -526,6 +592,8 @@ async def complete_async( if server_url is not None: base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) request = models.ChatCompletionRequest( model=model, @@ -548,6 +616,10 @@ async def complete_async( presence_penalty=presence_penalty, frequency_penalty=frequency_penalty, n=n, + prediction=utils.get_pydantic_model( + prediction, Optional[models.Prediction] + ), + parallel_tool_calls=parallel_tool_calls, ) req = self._build_request_async( @@ -579,6 +651,7 @@ async def complete_async( http_res = await self.do_request_async( hook_ctx=HookContext( + base_url=base_url or "", operation_id="chat_completion_v1_chat_completions_post", oauth2_scopes=[], security_source=self.sdk_configuration.security, @@ -588,15 +661,22 @@ async def complete_async( retry_config=retry_config, ) - data: Any = None + response_data: Any = None if utils.match_response(http_res, "200", "application/json"): return utils.unmarshal_json( http_res.text, Optional[models.ChatCompletionResponse] ) if utils.match_response(http_res, "422", "application/json"): - data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) - raise models.HTTPValidationError(data=data) - if utils.match_response(http_res, ["4XX", "5XX"], "*"): + response_data = utils.unmarshal_json( + http_res.text, models.HTTPValidationErrorData + ) + raise models.HTTPValidationError(data=response_data) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( "API error occurred", http_res.status_code, http_res_text, http_res diff --git a/packages/mistralai_gcp/src/mistralai_gcp/fim.py b/packages/mistralai_gcp/src/mistralai_gcp/fim.py index 89146a4a..84821c6a 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/fim.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/fim.py @@ -3,7 +3,7 @@ from .basesdk import BaseSDK from mistralai_gcp import models, utils from mistralai_gcp._hooks import HookContext -from mistralai_gcp.types import Nullable, OptionalNullable, UNSET +from mistralai_gcp.types import OptionalNullable, UNSET from mistralai_gcp.utils import eventstreaming from typing import Any, Mapping, Optional, Union @@ -14,7 +14,7 @@ class Fim(BaseSDK): def stream( self, *, - model: Nullable[str], + model: str, prompt: str, temperature: OptionalNullable[float] = UNSET, top_p: Optional[float] = 1, @@ -60,6 +60,8 @@ def stream( if server_url is not None: base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) request = models.FIMCompletionStreamRequest( model=model, @@ -103,6 +105,7 @@ def stream( http_res = self.do_request( hook_ctx=HookContext( + base_url=base_url or "", operation_id="stream_fim", oauth2_scopes=[], security_source=self.sdk_configuration.security, @@ -113,7 +116,7 @@ def stream( retry_config=retry_config, ) - data: Any = None + response_data: Any = None if utils.match_response(http_res, "200", "text/event-stream"): return eventstreaming.EventStream( http_res, @@ -122,9 +125,16 @@ def stream( ) if utils.match_response(http_res, "422", "application/json"): http_res_text = utils.stream_to_text(http_res) - data = utils.unmarshal_json(http_res_text, models.HTTPValidationErrorData) - raise models.HTTPValidationError(data=data) - if utils.match_response(http_res, ["4XX", "5XX"], "*"): + response_data = utils.unmarshal_json( + http_res_text, models.HTTPValidationErrorData + ) + raise models.HTTPValidationError(data=response_data) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) raise models.SDKError( "API error occurred", http_res.status_code, http_res_text, http_res @@ -142,7 +152,7 @@ def stream( async def stream_async( self, *, - model: Nullable[str], + model: str, prompt: str, temperature: OptionalNullable[float] = UNSET, top_p: Optional[float] = 1, @@ -188,6 +198,8 @@ async def stream_async( if server_url is not None: base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) request = models.FIMCompletionStreamRequest( model=model, @@ -231,6 +243,7 @@ async def stream_async( http_res = await self.do_request_async( hook_ctx=HookContext( + base_url=base_url or "", operation_id="stream_fim", oauth2_scopes=[], security_source=self.sdk_configuration.security, @@ -241,7 +254,7 @@ async def stream_async( retry_config=retry_config, ) - data: Any = None + response_data: Any = None if utils.match_response(http_res, "200", "text/event-stream"): return eventstreaming.EventStreamAsync( http_res, @@ -250,9 +263,16 @@ async def stream_async( ) if utils.match_response(http_res, "422", "application/json"): http_res_text = await utils.stream_to_text_async(http_res) - data = utils.unmarshal_json(http_res_text, models.HTTPValidationErrorData) - raise models.HTTPValidationError(data=data) - if utils.match_response(http_res, ["4XX", "5XX"], "*"): + response_data = utils.unmarshal_json( + http_res_text, models.HTTPValidationErrorData + ) + raise models.HTTPValidationError(data=response_data) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( "API error occurred", http_res.status_code, http_res_text, http_res @@ -270,7 +290,7 @@ async def stream_async( def complete( self, *, - model: Nullable[str], + model: str, prompt: str, temperature: OptionalNullable[float] = UNSET, top_p: Optional[float] = 1, @@ -316,6 +336,8 @@ def complete( if server_url is not None: base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) request = models.FIMCompletionRequest( model=model, @@ -359,6 +381,7 @@ def complete( http_res = self.do_request( hook_ctx=HookContext( + base_url=base_url or "", operation_id="fim_completion_v1_fim_completions_post", oauth2_scopes=[], security_source=self.sdk_configuration.security, @@ -368,15 +391,22 @@ def complete( retry_config=retry_config, ) - data: Any = None + response_data: Any = None if utils.match_response(http_res, "200", "application/json"): return utils.unmarshal_json( http_res.text, Optional[models.FIMCompletionResponse] ) if utils.match_response(http_res, "422", "application/json"): - data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) - raise models.HTTPValidationError(data=data) - if utils.match_response(http_res, ["4XX", "5XX"], "*"): + response_data = utils.unmarshal_json( + http_res.text, models.HTTPValidationErrorData + ) + raise models.HTTPValidationError(data=response_data) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) raise models.SDKError( "API error occurred", http_res.status_code, http_res_text, http_res @@ -394,7 +424,7 @@ def complete( async def complete_async( self, *, - model: Nullable[str], + model: str, prompt: str, temperature: OptionalNullable[float] = UNSET, top_p: Optional[float] = 1, @@ -440,6 +470,8 @@ async def complete_async( if server_url is not None: base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) request = models.FIMCompletionRequest( model=model, @@ -483,6 +515,7 @@ async def complete_async( http_res = await self.do_request_async( hook_ctx=HookContext( + base_url=base_url or "", operation_id="fim_completion_v1_fim_completions_post", oauth2_scopes=[], security_source=self.sdk_configuration.security, @@ -492,15 +525,22 @@ async def complete_async( retry_config=retry_config, ) - data: Any = None + response_data: Any = None if utils.match_response(http_res, "200", "application/json"): return utils.unmarshal_json( http_res.text, Optional[models.FIMCompletionResponse] ) if utils.match_response(http_res, "422", "application/json"): - data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) - raise models.HTTPValidationError(data=data) - if utils.match_response(http_res, ["4XX", "5XX"], "*"): + response_data = utils.unmarshal_json( + http_res.text, models.HTTPValidationErrorData + ) + raise models.HTTPValidationError(data=response_data) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( "API error occurred", http_res.status_code, http_res_text, http_res diff --git a/packages/mistralai_gcp/src/mistralai_gcp/httpclient.py b/packages/mistralai_gcp/src/mistralai_gcp/httpclient.py index 167cea4e..1e426352 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/httpclient.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/httpclient.py @@ -1,6 +1,8 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" # pyright: reportReturnType = false +import asyncio +from concurrent.futures import ThreadPoolExecutor from typing_extensions import Protocol, runtime_checkable import httpx from typing import Any, Optional, Union @@ -82,3 +84,53 @@ def build_request( async def aclose(self) -> None: pass + + +class ClientOwner(Protocol): + client: Union[HttpClient, None] + async_client: Union[AsyncHttpClient, None] + + +def close_clients( + owner: ClientOwner, + sync_client: Union[HttpClient, None], + sync_client_supplied: bool, + async_client: Union[AsyncHttpClient, None], + async_client_supplied: bool, +) -> None: + """ + A finalizer function that is meant to be used with weakref.finalize to close + httpx clients used by an SDK so that underlying resources can be garbage + collected. + """ + + # Unset the client/async_client properties so there are no more references + # to them from the owning SDK instance and they can be reaped. + owner.client = None + owner.async_client = None + + if sync_client is not None and not sync_client_supplied: + try: + sync_client.close() + except Exception: + pass + + if async_client is not None and not async_client_supplied: + is_async = False + try: + asyncio.get_running_loop() + is_async = True + except RuntimeError: + pass + + try: + # If this function is called in an async loop then start another + # loop in a separate thread to close the async http client. + if is_async: + with ThreadPoolExecutor(max_workers=1) as executor: + future = executor.submit(asyncio.run, async_client.aclose()) + future.result() + else: + asyncio.run(async_client.aclose()) + except Exception: + pass diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/__init__.py b/packages/mistralai_gcp/src/mistralai_gcp/models/__init__.py index f3c6ce7e..752e70e6 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/__init__.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/__init__.py @@ -67,6 +67,16 @@ ) from .functionname import FunctionName, FunctionNameTypedDict from .httpvalidationerror import HTTPValidationError, HTTPValidationErrorData +from .imageurl import ImageURL, ImageURLTypedDict +from .imageurlchunk import ( + ImageURLChunk, + ImageURLChunkImageURL, + ImageURLChunkImageURLTypedDict, + ImageURLChunkType, + ImageURLChunkTypedDict, +) +from .jsonschema import JSONSchema, JSONSchemaTypedDict +from .prediction import Prediction, PredictionTypedDict from .referencechunk import ReferenceChunk, ReferenceChunkType, ReferenceChunkTypedDict from .responseformat import ResponseFormat, ResponseFormatTypedDict from .responseformats import ResponseFormats @@ -107,6 +117,7 @@ ValidationErrorTypedDict, ) + __all__ = [ "Arguments", "ArgumentsTypedDict", @@ -163,10 +174,21 @@ "FunctionTypedDict", "HTTPValidationError", "HTTPValidationErrorData", + "ImageURL", + "ImageURLChunk", + "ImageURLChunkImageURL", + "ImageURLChunkImageURLTypedDict", + "ImageURLChunkType", + "ImageURLChunkTypedDict", + "ImageURLTypedDict", + "JSONSchema", + "JSONSchemaTypedDict", "Loc", "LocTypedDict", "Messages", "MessagesTypedDict", + "Prediction", + "PredictionTypedDict", "ReferenceChunk", "ReferenceChunkType", "ReferenceChunkTypedDict", diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/assistantmessage.py b/packages/mistralai_gcp/src/mistralai_gcp/models/assistantmessage.py index 6a9b58f2..9147f566 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/assistantmessage.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/assistantmessage.py @@ -32,6 +32,7 @@ class AssistantMessageTypedDict(TypedDict): content: NotRequired[Nullable[AssistantMessageContentTypedDict]] tool_calls: NotRequired[Nullable[List[ToolCallTypedDict]]] prefix: NotRequired[bool] + r"""Set this to `true` when adding an assistant message as prefix to condition the model response. The role of the prefix message is to force the model to start its answer by the content of the message.""" role: NotRequired[AssistantMessageRole] @@ -41,6 +42,7 @@ class AssistantMessage(BaseModel): tool_calls: OptionalNullable[List[ToolCall]] = UNSET prefix: Optional[bool] = False + r"""Set this to `true` when adding an assistant message as prefix to condition the model response. The role of the prefix message is to force the model to start its answer by the content of the message.""" role: Optional[AssistantMessageRole] = "assistant" diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionrequest.py b/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionrequest.py index ab97e52a..a0125c35 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionrequest.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionrequest.py @@ -2,6 +2,7 @@ from __future__ import annotations from .assistantmessage import AssistantMessage, AssistantMessageTypedDict +from .prediction import Prediction, PredictionTypedDict from .responseformat import ResponseFormat, ResponseFormatTypedDict from .systemmessage import SystemMessage, SystemMessageTypedDict from .tool import Tool, ToolTypedDict @@ -68,7 +69,7 @@ class ChatCompletionRequestTypedDict(TypedDict): - model: Nullable[str] + model: str r"""ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions.""" messages: List[ChatCompletionRequestMessagesTypedDict] r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content.""" @@ -93,10 +94,12 @@ class ChatCompletionRequestTypedDict(TypedDict): r"""frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.""" n: NotRequired[Nullable[int]] r"""Number of completions to return for each request, input tokens are only billed once.""" + prediction: NotRequired[PredictionTypedDict] + parallel_tool_calls: NotRequired[bool] class ChatCompletionRequest(BaseModel): - model: Nullable[str] + model: str r"""ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions.""" messages: List[ChatCompletionRequestMessages] @@ -135,6 +138,10 @@ class ChatCompletionRequest(BaseModel): n: OptionalNullable[int] = UNSET r"""Number of completions to return for each request, input tokens are only billed once.""" + prediction: Optional[Prediction] = None + + parallel_tool_calls: Optional[bool] = None + @model_serializer(mode="wrap") def serialize_model(self, handler): optional_fields = [ @@ -150,15 +157,10 @@ def serialize_model(self, handler): "presence_penalty", "frequency_penalty", "n", + "prediction", + "parallel_tool_calls", ] - nullable_fields = [ - "model", - "temperature", - "max_tokens", - "random_seed", - "tools", - "n", - ] + nullable_fields = ["temperature", "max_tokens", "random_seed", "tools", "n"] null_default_fields = [] serialized = handler(self) diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionstreamrequest.py b/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionstreamrequest.py index e6c5429b..656f1d58 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionstreamrequest.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionstreamrequest.py @@ -2,6 +2,7 @@ from __future__ import annotations from .assistantmessage import AssistantMessage, AssistantMessageTypedDict +from .prediction import Prediction, PredictionTypedDict from .responseformat import ResponseFormat, ResponseFormatTypedDict from .systemmessage import SystemMessage, SystemMessageTypedDict from .tool import Tool, ToolTypedDict @@ -64,7 +65,7 @@ class ChatCompletionStreamRequestTypedDict(TypedDict): - model: Nullable[str] + model: str r"""ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions.""" messages: List[MessagesTypedDict] r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content.""" @@ -88,10 +89,12 @@ class ChatCompletionStreamRequestTypedDict(TypedDict): r"""frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.""" n: NotRequired[Nullable[int]] r"""Number of completions to return for each request, input tokens are only billed once.""" + prediction: NotRequired[PredictionTypedDict] + parallel_tool_calls: NotRequired[bool] class ChatCompletionStreamRequest(BaseModel): - model: Nullable[str] + model: str r"""ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions.""" messages: List[Messages] @@ -129,6 +132,10 @@ class ChatCompletionStreamRequest(BaseModel): n: OptionalNullable[int] = UNSET r"""Number of completions to return for each request, input tokens are only billed once.""" + prediction: Optional[Prediction] = None + + parallel_tool_calls: Optional[bool] = None + @model_serializer(mode="wrap") def serialize_model(self, handler): optional_fields = [ @@ -144,15 +151,10 @@ def serialize_model(self, handler): "presence_penalty", "frequency_penalty", "n", + "prediction", + "parallel_tool_calls", ] - nullable_fields = [ - "model", - "temperature", - "max_tokens", - "random_seed", - "tools", - "n", - ] + nullable_fields = ["temperature", "max_tokens", "random_seed", "tools", "n"] null_default_fields = [] serialized = handler(self) diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/contentchunk.py b/packages/mistralai_gcp/src/mistralai_gcp/models/contentchunk.py index 4da1153a..da5671e3 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/contentchunk.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/contentchunk.py @@ -1,6 +1,7 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from __future__ import annotations +from .imageurlchunk import ImageURLChunk, ImageURLChunkTypedDict from .referencechunk import ReferenceChunk, ReferenceChunkTypedDict from .textchunk import TextChunk, TextChunkTypedDict from mistralai_gcp.utils import get_discriminator @@ -10,13 +11,16 @@ ContentChunkTypedDict = TypeAliasType( - "ContentChunkTypedDict", Union[TextChunkTypedDict, ReferenceChunkTypedDict] + "ContentChunkTypedDict", + Union[TextChunkTypedDict, ImageURLChunkTypedDict, ReferenceChunkTypedDict], ) ContentChunk = Annotated[ Union[ - Annotated[TextChunk, Tag("text")], Annotated[ReferenceChunk, Tag("reference")] + Annotated[ImageURLChunk, Tag("image_url")], + Annotated[TextChunk, Tag("text")], + Annotated[ReferenceChunk, Tag("reference")], ], Discriminator(lambda m: get_discriminator(m, "type", "type")), ] diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/fimcompletionrequest.py b/packages/mistralai_gcp/src/mistralai_gcp/models/fimcompletionrequest.py index 81c87b7e..6dfb7373 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/fimcompletionrequest.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/fimcompletionrequest.py @@ -26,7 +26,7 @@ class FIMCompletionRequestTypedDict(TypedDict): - model: Nullable[str] + model: str r"""ID of the model to use. Only compatible for now with: - `codestral-2405` - `codestral-latest` @@ -52,7 +52,7 @@ class FIMCompletionRequestTypedDict(TypedDict): class FIMCompletionRequest(BaseModel): - model: Nullable[str] + model: str r"""ID of the model to use. Only compatible for now with: - `codestral-2405` - `codestral-latest` @@ -98,7 +98,6 @@ def serialize_model(self, handler): "min_tokens", ] nullable_fields = [ - "model", "temperature", "max_tokens", "random_seed", diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/fimcompletionstreamrequest.py b/packages/mistralai_gcp/src/mistralai_gcp/models/fimcompletionstreamrequest.py index 356758d3..406749bb 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/fimcompletionstreamrequest.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/fimcompletionstreamrequest.py @@ -26,7 +26,7 @@ class FIMCompletionStreamRequestTypedDict(TypedDict): - model: Nullable[str] + model: str r"""ID of the model to use. Only compatible for now with: - `codestral-2405` - `codestral-latest` @@ -51,7 +51,7 @@ class FIMCompletionStreamRequestTypedDict(TypedDict): class FIMCompletionStreamRequest(BaseModel): - model: Nullable[str] + model: str r"""ID of the model to use. Only compatible for now with: - `codestral-2405` - `codestral-latest` @@ -96,7 +96,6 @@ def serialize_model(self, handler): "min_tokens", ] nullable_fields = [ - "model", "temperature", "max_tokens", "random_seed", diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/function.py b/packages/mistralai_gcp/src/mistralai_gcp/models/function.py index c3168eec..7ad1ae64 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/function.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/function.py @@ -10,6 +10,7 @@ class FunctionTypedDict(TypedDict): name: str parameters: Dict[str, Any] description: NotRequired[str] + strict: NotRequired[bool] class Function(BaseModel): @@ -17,4 +18,6 @@ class Function(BaseModel): parameters: Dict[str, Any] - description: Optional[str] = "" + description: Optional[str] = None + + strict: Optional[bool] = None diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/imageurl.py b/packages/mistralai_gcp/src/mistralai_gcp/models/imageurl.py new file mode 100644 index 00000000..e7aa11f0 --- /dev/null +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/imageurl.py @@ -0,0 +1,53 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai_gcp.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing_extensions import NotRequired, TypedDict + + +class ImageURLTypedDict(TypedDict): + url: str + detail: NotRequired[Nullable[str]] + + +class ImageURL(BaseModel): + url: str + + detail: OptionalNullable[str] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["detail"] + nullable_fields = ["detail"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in self.model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/imageurlchunk.py b/packages/mistralai_gcp/src/mistralai_gcp/models/imageurlchunk.py new file mode 100644 index 00000000..1fc0b808 --- /dev/null +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/imageurlchunk.py @@ -0,0 +1,33 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .imageurl import ImageURL, ImageURLTypedDict +from mistralai_gcp.types import BaseModel +from typing import Literal, Optional, Union +from typing_extensions import NotRequired, TypeAliasType, TypedDict + + +ImageURLChunkImageURLTypedDict = TypeAliasType( + "ImageURLChunkImageURLTypedDict", Union[ImageURLTypedDict, str] +) + + +ImageURLChunkImageURL = TypeAliasType("ImageURLChunkImageURL", Union[ImageURL, str]) + + +ImageURLChunkType = Literal["image_url"] + + +class ImageURLChunkTypedDict(TypedDict): + r"""{\"type\":\"image_url\",\"image_url\":{\"url\":\"data:image/png;base64,iVBORw0""" + + image_url: ImageURLChunkImageURLTypedDict + type: NotRequired[ImageURLChunkType] + + +class ImageURLChunk(BaseModel): + r"""{\"type\":\"image_url\",\"image_url\":{\"url\":\"data:image/png;base64,iVBORw0""" + + image_url: ImageURLChunkImageURL + + type: Optional[ImageURLChunkType] = "image_url" diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/jsonschema.py b/packages/mistralai_gcp/src/mistralai_gcp/models/jsonschema.py new file mode 100644 index 00000000..2529ce31 --- /dev/null +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/jsonschema.py @@ -0,0 +1,61 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai_gcp.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +import pydantic +from pydantic import model_serializer +from typing import Any, Dict, Optional +from typing_extensions import Annotated, NotRequired, TypedDict + + +class JSONSchemaTypedDict(TypedDict): + name: str + schema_definition: Dict[str, Any] + description: NotRequired[Nullable[str]] + strict: NotRequired[bool] + + +class JSONSchema(BaseModel): + name: str + + schema_definition: Annotated[Dict[str, Any], pydantic.Field(alias="schema")] + + description: OptionalNullable[str] = UNSET + + strict: Optional[bool] = None + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["description", "strict"] + nullable_fields = ["description"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in self.model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/prediction.py b/packages/mistralai_gcp/src/mistralai_gcp/models/prediction.py new file mode 100644 index 00000000..742aac0b --- /dev/null +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/prediction.py @@ -0,0 +1,25 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai_gcp.types import BaseModel +from mistralai_gcp.utils import validate_const +import pydantic +from pydantic.functional_validators import AfterValidator +from typing import Literal, Optional +from typing_extensions import Annotated, NotRequired, TypedDict + + +class PredictionTypedDict(TypedDict): + type: Literal["content"] + content: NotRequired[str] + + +class Prediction(BaseModel): + TYPE: Annotated[ + Annotated[ + Optional[Literal["content"]], AfterValidator(validate_const("content")) + ], + pydantic.Field(alias="type"), + ] = "content" + + content: Optional[str] = "" diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/responseformat.py b/packages/mistralai_gcp/src/mistralai_gcp/models/responseformat.py index fde89862..5a24f644 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/responseformat.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/responseformat.py @@ -1,8 +1,16 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from __future__ import annotations +from .jsonschema import JSONSchema, JSONSchemaTypedDict from .responseformats import ResponseFormats -from mistralai_gcp.types import BaseModel +from mistralai_gcp.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer from typing import Optional from typing_extensions import NotRequired, TypedDict @@ -10,8 +18,41 @@ class ResponseFormatTypedDict(TypedDict): type: NotRequired[ResponseFormats] r"""An object specifying the format that the model must output. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message.""" + json_schema: NotRequired[Nullable[JSONSchemaTypedDict]] class ResponseFormat(BaseModel): type: Optional[ResponseFormats] = None r"""An object specifying the format that the model must output. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message.""" + + json_schema: OptionalNullable[JSONSchema] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["type", "json_schema"] + nullable_fields = ["json_schema"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in self.model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/responseformats.py b/packages/mistralai_gcp/src/mistralai_gcp/models/responseformats.py index 2c06b812..08c39951 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/responseformats.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/responseformats.py @@ -4,5 +4,5 @@ from typing import Literal -ResponseFormats = Literal["text", "json_object"] +ResponseFormats = Literal["text", "json_object", "json_schema"] r"""An object specifying the format that the model must output. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message.""" diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/toolcall.py b/packages/mistralai_gcp/src/mistralai_gcp/models/toolcall.py index 5b4b217a..ecbac8d6 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/toolcall.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/toolcall.py @@ -14,6 +14,7 @@ class ToolCallTypedDict(TypedDict): function: FunctionCallTypedDict id: NotRequired[str] type: NotRequired[ToolTypes] + index: NotRequired[int] class ToolCall(BaseModel): @@ -24,3 +25,5 @@ class ToolCall(BaseModel): type: Annotated[Optional[ToolTypes], PlainValidator(validate_open_enum(False))] = ( None ) + + index: Optional[int] = 0 diff --git a/packages/mistralai_gcp/src/mistralai_gcp/sdk.py b/packages/mistralai_gcp/src/mistralai_gcp/sdk.py index abfea8db..dd93cc7f 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/sdk.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/sdk.py @@ -1,23 +1,25 @@ -"""Code generated by Speakeasy (https://speakeasyapi.dev). DO NOT EDIT.""" +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" import json -from typing import Optional, Tuple, Union +import weakref +from typing import Any, Optional, cast import google.auth import google.auth.credentials import google.auth.transport import google.auth.transport.requests import httpx + from mistralai_gcp import models from mistralai_gcp._hooks import BeforeRequestHook, SDKHooks from mistralai_gcp.chat import Chat from mistralai_gcp.fim import Fim -from mistralai_gcp.types import Nullable +from mistralai_gcp.types import UNSET, OptionalNullable from .basesdk import BaseSDK -from .httpclient import AsyncHttpClient, HttpClient +from .httpclient import AsyncHttpClient, ClientOwner, HttpClient, close_clients from .sdkconfiguration import SDKConfiguration -from .utils.logger import Logger, NoOpLogger +from .utils.logger import Logger, get_default_logger from .utils.retries import RetryConfig LEGACY_MODEL_ID_FORMAT = { @@ -26,20 +28,21 @@ "mistral-nemo-2407": "mistral-nemo@2407", } -def get_model_info(model: str) -> Tuple[str, str]: + +def get_model_info(model: str) -> tuple[str, str]: # if the model requiers the legacy fomat, use it, else do nothing. if model in LEGACY_MODEL_ID_FORMAT: return "-".join(model.split("-")[:-1]), LEGACY_MODEL_ID_FORMAT[model] return model, model - class MistralGoogleCloud(BaseSDK): r"""Mistral AI API: Our Chat Completion and Embeddings APIs specification. Create your account on [La Plateforme](https://console.mistral.ai) to get access and read the [docs](https://docs.mistral.ai) to learn how to use it.""" chat: Chat + r"""Chat Completion API.""" fim: Fim - r"""Chat Completion API""" + r"""Fill-in-the-middle API.""" def __init__( self, @@ -48,16 +51,20 @@ def __init__( access_token: Optional[str] = None, client: Optional[HttpClient] = None, async_client: Optional[AsyncHttpClient] = None, - retry_config: Optional[Nullable[RetryConfig]] = None, + retry_config: OptionalNullable[RetryConfig] = UNSET, + timeout_ms: Optional[int] = None, debug_logger: Optional[Logger] = None, ) -> None: r"""Instantiates the SDK configuring it with the provided parameters. - :param region: The Google Cloud region to use for all methods - :param project_id: The project ID to use for all methods + :param api_key: The api_key required for authentication + :param server: The server by name to use for all methods + :param server_url: The server URL to use for all methods + :param url_params: Parameters to optionally template the server URL with :param client: The HTTP client to use for all synchronous methods :param async_client: The Async HTTP client to use for all asynchronous methods :param retry_config: The retry configuration to use for all supported methods + :param timeout_ms: Optional request timeout applied to each operation in milliseconds """ if not access_token: @@ -72,36 +79,42 @@ def __init__( ) project_id = project_id or loaded_project_id + if project_id is None: raise models.SDKError("project_id must be provided") def auth_token() -> str: if access_token: return access_token + credentials.refresh(google.auth.transport.requests.Request()) token = credentials.token if not token: raise models.SDKError("Failed to get token from credentials") return token + client_supplied = True if client is None: client = httpx.Client() + client_supplied = False assert issubclass( type(client), HttpClient ), "The provided client must implement the HttpClient protocol." + async_client_supplied = True if async_client is None: async_client = httpx.AsyncClient() + async_client_supplied = False if debug_logger is None: - debug_logger = NoOpLogger() + debug_logger = get_default_logger() assert issubclass( type(async_client), AsyncHttpClient ), "The provided async_client must implement the AsyncHttpClient protocol." - security = None + security: Any = None if callable(auth_token): security = lambda: models.Security( # pylint: disable=unnecessary-lambda-assignment api_key=auth_token() @@ -113,23 +126,24 @@ def auth_token() -> str: self, SDKConfiguration( client=client, + client_supplied=client_supplied, async_client=async_client, + async_client_supplied=async_client_supplied, security=security, server_url=f"https://{region}-aiplatform.googleapis.com", server=None, retry_config=retry_config, + timeout_ms=timeout_ms, debug_logger=debug_logger, ), ) hooks = SDKHooks() - hook = GoogleCloudBeforeRequestHook(region, project_id) hooks.register_before_request_hook(hook) - current_server_url, *_ = self.sdk_configuration.get_server_details() server_url, self.sdk_configuration.client = hooks.sdk_init( - current_server_url, self.sdk_configuration.client + current_server_url, client ) if current_server_url != server_url: self.sdk_configuration.server_url = server_url @@ -137,22 +151,53 @@ def auth_token() -> str: # pylint: disable=protected-access self.sdk_configuration.__dict__["_hooks"] = hooks + weakref.finalize( + self, + close_clients, + cast(ClientOwner, self.sdk_configuration), + self.sdk_configuration.client, + self.sdk_configuration.client_supplied, + self.sdk_configuration.async_client, + self.sdk_configuration.async_client_supplied, + ) + self._init_sdks() def _init_sdks(self): self.chat = Chat(self.sdk_configuration) self.fim = Fim(self.sdk_configuration) + def __enter__(self): + return self -class GoogleCloudBeforeRequestHook(BeforeRequestHook): + async def __aenter__(self): + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + if ( + self.sdk_configuration.client is not None + and not self.sdk_configuration.client_supplied + ): + self.sdk_configuration.client.close() + self.sdk_configuration.client = None + async def __aexit__(self, exc_type, exc_val, exc_tb): + if ( + self.sdk_configuration.async_client is not None + and not self.sdk_configuration.async_client_supplied + ): + await self.sdk_configuration.async_client.aclose() + self.sdk_configuration.async_client = None + + +class GoogleCloudBeforeRequestHook(BeforeRequestHook): def __init__(self, region: str, project_id: str): self.region = region self.project_id = project_id def before_request( self, hook_ctx, request: httpx.Request - ) -> Union[httpx.Request, Exception]: + ) -> httpx.Request | Exception: # The goal of this function is to template in the region, project and model into the URL path # We do this here so that the API remains more user-friendly model_id = None @@ -167,7 +212,6 @@ def before_request( if model_id == "": raise models.SDKError("model must be provided") - stream = "streamRawPredict" in request.url.path specifier = "streamRawPredict" if stream else "rawPredict" url = f"/v1/projects/{self.project_id}/locations/{self.region}/publishers/mistralai/models/{model_id}:{specifier}" diff --git a/packages/mistralai_gcp/src/mistralai_gcp/sdkconfiguration.py b/packages/mistralai_gcp/src/mistralai_gcp/sdkconfiguration.py index 3c149cc6..c373d27d 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/sdkconfiguration.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/sdkconfiguration.py @@ -1,6 +1,12 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from ._hooks import SDKHooks +from ._version import ( + __gen_version__, + __openapi_doc_version__, + __user_agent__, + __version__, +) from .httpclient import AsyncHttpClient, HttpClient from .utils import Logger, RetryConfig, remove_suffix from dataclasses import dataclass @@ -20,17 +26,19 @@ @dataclass class SDKConfiguration: - client: HttpClient - async_client: AsyncHttpClient + client: Union[HttpClient, None] + client_supplied: bool + async_client: Union[AsyncHttpClient, None] + async_client_supplied: bool debug_logger: Logger security: Optional[Union[models.Security, Callable[[], models.Security]]] = None server_url: Optional[str] = "" server: Optional[str] = "" language: str = "python" - openapi_doc_version: str = "0.0.2" - sdk_version: str = "1.2.6" - gen_version: str = "2.486.1" - user_agent: str = "speakeasy-sdk/python 1.2.6 2.486.1 0.0.2 mistralai-gcp" + openapi_doc_version: str = __openapi_doc_version__ + sdk_version: str = __version__ + gen_version: str = __gen_version__ + user_agent: str = __user_agent__ retry_config: OptionalNullable[RetryConfig] = Field(default_factory=lambda: UNSET) timeout_ms: Optional[int] = None diff --git a/packages/mistralai_gcp/src/mistralai_gcp/utils/__init__.py b/packages/mistralai_gcp/src/mistralai_gcp/utils/__init__.py index 26d51ae8..3cded8fe 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/utils/__init__.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/utils/__init__.py @@ -42,6 +42,7 @@ match_content_type, match_status_codes, match_response, + cast_partial, ) from .logger import Logger, get_body_content, get_default_logger @@ -94,4 +95,5 @@ "validate_float", "validate_int", "validate_open_enum", + "cast_partial", ] diff --git a/packages/mistralai_gcp/src/mistralai_gcp/utils/serializers.py b/packages/mistralai_gcp/src/mistralai_gcp/utils/serializers.py index c5eb3659..baa41fbd 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/utils/serializers.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/utils/serializers.py @@ -7,14 +7,15 @@ from typing_extensions import get_origin from pydantic import ConfigDict, create_model from pydantic_core import from_json -from typing_inspect import is_optional_type +from typing_inspection.typing_objects import is_union from ..types.basemodel import BaseModel, Nullable, OptionalNullable, Unset def serialize_decimal(as_str: bool): def serialize(d): - if is_optional_type(type(d)) and d is None: + # Optional[T] is a Union[T, None] + if is_union(type(d)) and type(None) in get_args(type(d)) and d is None: return None if isinstance(d, Unset): return d @@ -42,7 +43,8 @@ def validate_decimal(d): def serialize_float(as_str: bool): def serialize(f): - if is_optional_type(type(f)) and f is None: + # Optional[T] is a Union[T, None] + if is_union(type(f)) and type(None) in get_args(type(f)) and f is None: return None if isinstance(f, Unset): return f @@ -70,7 +72,8 @@ def validate_float(f): def serialize_int(as_str: bool): def serialize(i): - if is_optional_type(type(i)) and i is None: + # Optional[T] is a Union[T, None] + if is_union(type(i)) and type(None) in get_args(type(i)) and i is None: return None if isinstance(i, Unset): return i @@ -118,7 +121,8 @@ def validate(e): def validate_const(v): def validate(c): - if is_optional_type(type(c)) and c is None: + # Optional[T] is a Union[T, None] + if is_union(type(c)) and type(None) in get_args(type(c)) and c is None: return None if v != c: @@ -163,7 +167,7 @@ def marshal_json(val, typ): if len(d) == 0: return "" - return json.dumps(d[next(iter(d))], separators=(",", ":"), sort_keys=True) + return json.dumps(d[next(iter(d))], separators=(",", ":")) def is_nullable(field): diff --git a/packages/mistralai_gcp/src/mistralai_gcp/utils/values.py b/packages/mistralai_gcp/src/mistralai_gcp/utils/values.py index 2b4b6832..dae01a44 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/utils/values.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/utils/values.py @@ -3,8 +3,9 @@ from datetime import datetime from enum import Enum from email.message import Message +from functools import partial import os -from typing import Any, Callable, Dict, List, Optional, Tuple, TypeVar, Union +from typing import Any, Callable, Dict, List, Optional, Tuple, TypeVar, Union, cast from httpx import Response from pydantic import BaseModel @@ -51,6 +52,8 @@ def match_status_codes(status_codes: List[str], status_code: int) -> bool: T = TypeVar("T") +def cast_partial(typ): + return partial(cast, typ) def get_global_from_env( value: Optional[T], env_key: str, type_cast: Callable[[str], T] diff --git a/poetry.lock b/poetry.lock index 78003ff1..3d36b94f 100644 --- a/poetry.lock +++ b/poetry.lock @@ -12,9 +12,6 @@ files = [ {file = "annotated_types-0.7.0.tar.gz", hash = "sha256:aff07c09a53a08bc8cfccb9c85b05f1aa9a2a6f23728d790723543408344ce89"}, ] -[package.dependencies] -typing-extensions = {version = ">=4.0.0", markers = "python_version < \"3.9\""} - [[package]] name = "anyio" version = "4.5.2" @@ -315,14 +312,14 @@ trio = ["trio (>=0.22.0,<1.0)"] [[package]] name = "httpx" -version = "0.27.2" +version = "0.28.1" description = "The next generation HTTP client." optional = false python-versions = ">=3.8" groups = ["main"] files = [ - {file = "httpx-0.27.2-py3-none-any.whl", hash = "sha256:7bb2708e112d8fdd7829cd4243970f0c223274051cb35ee80c03301ee29a3df0"}, - {file = "httpx-0.27.2.tar.gz", hash = "sha256:f7c2be1d2f3c3c3160d441802406b206c2b76f5947b11115e6df10c6c65e66c2"}, + {file = "httpx-0.28.1-py3-none-any.whl", hash = "sha256:d909fcccc110f8c7faf814ca82a9a4d816bc5a6dbfea25d6591d6985b8ba59ad"}, + {file = "httpx-0.28.1.tar.gz", hash = "sha256:75e98c5f16b0f35b567856f597f06ff2270a374470a5c2392242528e3e3e42fc"}, ] [package.dependencies] @@ -330,7 +327,6 @@ anyio = "*" certifi = "*" httpcore = "==1.*" idna = "*" -sniffio = "*" [package.extras] brotli = ["brotli ; platform_python_implementation == \"CPython\"", "brotlicffi ; platform_python_implementation != \"CPython\""] @@ -381,18 +377,6 @@ files = [ [package.extras] colors = ["colorama (>=0.4.6)"] -[[package]] -name = "jsonpath-python" -version = "1.0.6" -description = "A more powerful JSONPath implementation in modern python" -optional = false -python-versions = ">=3.6" -groups = ["main"] -files = [ - {file = "jsonpath-python-1.0.6.tar.gz", hash = "sha256:dd5be4a72d8a2995c3f583cf82bf3cd1a9544cfdabf2d22595b67aff07349666"}, - {file = "jsonpath_python-1.0.6-py3-none-any.whl", hash = "sha256:1e3b78df579f5efc23565293612decee04214609208a2335884b3ee3f786b575"}, -] - [[package]] name = "mccabe" version = "0.7.0" @@ -407,50 +391,56 @@ files = [ [[package]] name = "mypy" -version = "1.13.0" +version = "1.14.1" description = "Optional static typing for Python" optional = false python-versions = ">=3.8" groups = ["dev"] files = [ - {file = "mypy-1.13.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:6607e0f1dd1fb7f0aca14d936d13fd19eba5e17e1cd2a14f808fa5f8f6d8f60a"}, - {file = "mypy-1.13.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:8a21be69bd26fa81b1f80a61ee7ab05b076c674d9b18fb56239d72e21d9f4c80"}, - {file = "mypy-1.13.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:7b2353a44d2179846a096e25691d54d59904559f4232519d420d64da6828a3a7"}, - {file = "mypy-1.13.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:0730d1c6a2739d4511dc4253f8274cdd140c55c32dfb0a4cf8b7a43f40abfa6f"}, - {file = "mypy-1.13.0-cp310-cp310-win_amd64.whl", hash = "sha256:c5fc54dbb712ff5e5a0fca797e6e0aa25726c7e72c6a5850cfd2adbc1eb0a372"}, - {file = "mypy-1.13.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:581665e6f3a8a9078f28d5502f4c334c0c8d802ef55ea0e7276a6e409bc0d82d"}, - {file = "mypy-1.13.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:3ddb5b9bf82e05cc9a627e84707b528e5c7caaa1c55c69e175abb15a761cec2d"}, - {file = "mypy-1.13.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:20c7ee0bc0d5a9595c46f38beb04201f2620065a93755704e141fcac9f59db2b"}, - {file = "mypy-1.13.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:3790ded76f0b34bc9c8ba4def8f919dd6a46db0f5a6610fb994fe8efdd447f73"}, - {file = "mypy-1.13.0-cp311-cp311-win_amd64.whl", hash = "sha256:51f869f4b6b538229c1d1bcc1dd7d119817206e2bc54e8e374b3dfa202defcca"}, - {file = "mypy-1.13.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:5c7051a3461ae84dfb5dd15eff5094640c61c5f22257c8b766794e6dd85e72d5"}, - {file = "mypy-1.13.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:39bb21c69a5d6342f4ce526e4584bc5c197fd20a60d14a8624d8743fffb9472e"}, - {file = "mypy-1.13.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:164f28cb9d6367439031f4c81e84d3ccaa1e19232d9d05d37cb0bd880d3f93c2"}, - {file = "mypy-1.13.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:a4c1bfcdbce96ff5d96fc9b08e3831acb30dc44ab02671eca5953eadad07d6d0"}, - {file = "mypy-1.13.0-cp312-cp312-win_amd64.whl", hash = "sha256:a0affb3a79a256b4183ba09811e3577c5163ed06685e4d4b46429a271ba174d2"}, - {file = "mypy-1.13.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:a7b44178c9760ce1a43f544e595d35ed61ac2c3de306599fa59b38a6048e1aa7"}, - {file = "mypy-1.13.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:5d5092efb8516d08440e36626f0153b5006d4088c1d663d88bf79625af3d1d62"}, - {file = "mypy-1.13.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:de2904956dac40ced10931ac967ae63c5089bd498542194b436eb097a9f77bc8"}, - {file = "mypy-1.13.0-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:7bfd8836970d33c2105562650656b6846149374dc8ed77d98424b40b09340ba7"}, - {file = "mypy-1.13.0-cp313-cp313-win_amd64.whl", hash = "sha256:9f73dba9ec77acb86457a8fc04b5239822df0c14a082564737833d2963677dbc"}, - {file = "mypy-1.13.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:100fac22ce82925f676a734af0db922ecfea991e1d7ec0ceb1e115ebe501301a"}, - {file = "mypy-1.13.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:7bcb0bb7f42a978bb323a7c88f1081d1b5dee77ca86f4100735a6f541299d8fb"}, - {file = "mypy-1.13.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:bde31fc887c213e223bbfc34328070996061b0833b0a4cfec53745ed61f3519b"}, - {file = "mypy-1.13.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:07de989f89786f62b937851295ed62e51774722e5444a27cecca993fc3f9cd74"}, - {file = "mypy-1.13.0-cp38-cp38-win_amd64.whl", hash = "sha256:4bde84334fbe19bad704b3f5b78c4abd35ff1026f8ba72b29de70dda0916beb6"}, - {file = "mypy-1.13.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:0246bcb1b5de7f08f2826451abd947bf656945209b140d16ed317f65a17dc7dc"}, - {file = "mypy-1.13.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:7f5b7deae912cf8b77e990b9280f170381fdfbddf61b4ef80927edd813163732"}, - {file = "mypy-1.13.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:7029881ec6ffb8bc233a4fa364736789582c738217b133f1b55967115288a2bc"}, - {file = "mypy-1.13.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:3e38b980e5681f28f033f3be86b099a247b13c491f14bb8b1e1e134d23bb599d"}, - {file = "mypy-1.13.0-cp39-cp39-win_amd64.whl", hash = "sha256:a6789be98a2017c912ae6ccb77ea553bbaf13d27605d2ca20a76dfbced631b24"}, - {file = "mypy-1.13.0-py3-none-any.whl", hash = "sha256:9c250883f9fd81d212e0952c92dbfcc96fc237f4b7c92f56ac81fd48460b3e5a"}, - {file = "mypy-1.13.0.tar.gz", hash = "sha256:0291a61b6fbf3e6673e3405cfcc0e7650bebc7939659fdca2702958038bd835e"}, + {file = "mypy-1.14.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:52686e37cf13d559f668aa398dd7ddf1f92c5d613e4f8cb262be2fb4fedb0fcb"}, + {file = "mypy-1.14.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:1fb545ca340537d4b45d3eecdb3def05e913299ca72c290326be19b3804b39c0"}, + {file = "mypy-1.14.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:90716d8b2d1f4cd503309788e51366f07c56635a3309b0f6a32547eaaa36a64d"}, + {file = "mypy-1.14.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:2ae753f5c9fef278bcf12e1a564351764f2a6da579d4a81347e1d5a15819997b"}, + {file = "mypy-1.14.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:e0fe0f5feaafcb04505bcf439e991c6d8f1bf8b15f12b05feeed96e9e7bf1427"}, + {file = "mypy-1.14.1-cp310-cp310-win_amd64.whl", hash = "sha256:7d54bd85b925e501c555a3227f3ec0cfc54ee8b6930bd6141ec872d1c572f81f"}, + {file = "mypy-1.14.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:f995e511de847791c3b11ed90084a7a0aafdc074ab88c5a9711622fe4751138c"}, + {file = "mypy-1.14.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:d64169ec3b8461311f8ce2fd2eb5d33e2d0f2c7b49116259c51d0d96edee48d1"}, + {file = "mypy-1.14.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ba24549de7b89b6381b91fbc068d798192b1b5201987070319889e93038967a8"}, + {file = "mypy-1.14.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:183cf0a45457d28ff9d758730cd0210419ac27d4d3f285beda038c9083363b1f"}, + {file = "mypy-1.14.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:f2a0ecc86378f45347f586e4163d1769dd81c5a223d577fe351f26b179e148b1"}, + {file = "mypy-1.14.1-cp311-cp311-win_amd64.whl", hash = "sha256:ad3301ebebec9e8ee7135d8e3109ca76c23752bac1e717bc84cd3836b4bf3eae"}, + {file = "mypy-1.14.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:30ff5ef8519bbc2e18b3b54521ec319513a26f1bba19a7582e7b1f58a6e69f14"}, + {file = "mypy-1.14.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:cb9f255c18052343c70234907e2e532bc7e55a62565d64536dbc7706a20b78b9"}, + {file = "mypy-1.14.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:8b4e3413e0bddea671012b063e27591b953d653209e7a4fa5e48759cda77ca11"}, + {file = "mypy-1.14.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:553c293b1fbdebb6c3c4030589dab9fafb6dfa768995a453d8a5d3b23784af2e"}, + {file = "mypy-1.14.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:fad79bfe3b65fe6a1efaed97b445c3d37f7be9fdc348bdb2d7cac75579607c89"}, + {file = "mypy-1.14.1-cp312-cp312-win_amd64.whl", hash = "sha256:8fa2220e54d2946e94ab6dbb3ba0a992795bd68b16dc852db33028df2b00191b"}, + {file = "mypy-1.14.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:92c3ed5afb06c3a8e188cb5da4984cab9ec9a77ba956ee419c68a388b4595255"}, + {file = "mypy-1.14.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:dbec574648b3e25f43d23577309b16534431db4ddc09fda50841f1e34e64ed34"}, + {file = "mypy-1.14.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:8c6d94b16d62eb3e947281aa7347d78236688e21081f11de976376cf010eb31a"}, + {file = "mypy-1.14.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:d4b19b03fdf54f3c5b2fa474c56b4c13c9dbfb9a2db4370ede7ec11a2c5927d9"}, + {file = "mypy-1.14.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:0c911fde686394753fff899c409fd4e16e9b294c24bfd5e1ea4675deae1ac6fd"}, + {file = "mypy-1.14.1-cp313-cp313-win_amd64.whl", hash = "sha256:8b21525cb51671219f5307be85f7e646a153e5acc656e5cebf64bfa076c50107"}, + {file = "mypy-1.14.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:7084fb8f1128c76cd9cf68fe5971b37072598e7c31b2f9f95586b65c741a9d31"}, + {file = "mypy-1.14.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:8f845a00b4f420f693f870eaee5f3e2692fa84cc8514496114649cfa8fd5e2c6"}, + {file = "mypy-1.14.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:44bf464499f0e3a2d14d58b54674dee25c031703b2ffc35064bd0df2e0fac319"}, + {file = "mypy-1.14.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c99f27732c0b7dc847adb21c9d47ce57eb48fa33a17bc6d7d5c5e9f9e7ae5bac"}, + {file = "mypy-1.14.1-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:bce23c7377b43602baa0bd22ea3265c49b9ff0b76eb315d6c34721af4cdf1d9b"}, + {file = "mypy-1.14.1-cp38-cp38-win_amd64.whl", hash = "sha256:8edc07eeade7ebc771ff9cf6b211b9a7d93687ff892150cb5692e4f4272b0837"}, + {file = "mypy-1.14.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:3888a1816d69f7ab92092f785a462944b3ca16d7c470d564165fe703b0970c35"}, + {file = "mypy-1.14.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:46c756a444117c43ee984bd055db99e498bc613a70bbbc120272bd13ca579fbc"}, + {file = "mypy-1.14.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:27fc248022907e72abfd8e22ab1f10e903915ff69961174784a3900a8cba9ad9"}, + {file = "mypy-1.14.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:499d6a72fb7e5de92218db961f1a66d5f11783f9ae549d214617edab5d4dbdbb"}, + {file = "mypy-1.14.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:57961db9795eb566dc1d1b4e9139ebc4c6b0cb6e7254ecde69d1552bf7613f60"}, + {file = "mypy-1.14.1-cp39-cp39-win_amd64.whl", hash = "sha256:07ba89fdcc9451f2ebb02853deb6aaaa3d2239a236669a63ab3801bbf923ef5c"}, + {file = "mypy-1.14.1-py3-none-any.whl", hash = "sha256:b66a60cc4073aeb8ae00057f9c1f64d49e90f918fbcef9a977eb121da8b8f1d1"}, + {file = "mypy-1.14.1.tar.gz", hash = "sha256:7ec88144fe9b510e8475ec2f5f251992690fcf89ccb4500b214b4226abcd32d6"}, ] [package.dependencies] -mypy-extensions = ">=1.0.0" +mypy_extensions = ">=1.0.0" tomli = {version = ">=1.1.0", markers = "python_version < \"3.11\""} -typing-extensions = ">=4.6.0" +typing_extensions = ">=4.6.0" [package.extras] dmypy = ["psutil (>=4.0)"] @@ -465,7 +455,7 @@ version = "1.0.0" description = "Type system extensions for programs checked with the mypy type checker." optional = false python-versions = ">=3.5" -groups = ["main", "dev"] +groups = ["dev"] files = [ {file = "mypy_extensions-1.0.0-py3-none-any.whl", hash = "sha256:4392f6c0eb8a5668a69e23d168ffa70f0be9ccfd32b5cc2d26a34ae5b844552d"}, {file = "mypy_extensions-1.0.0.tar.gz", hash = "sha256:75dbf8955dc00442a438fc4d0666508a9a97b6bd41aa2f0ffe9d2f2725af0782"}, @@ -547,19 +537,19 @@ pyasn1 = ">=0.4.6,<0.7.0" [[package]] name = "pydantic" -version = "2.10.2" +version = "2.10.6" description = "Data validation using Python type hints" optional = false python-versions = ">=3.8" groups = ["main"] files = [ - {file = "pydantic-2.10.2-py3-none-any.whl", hash = "sha256:cfb96e45951117c3024e6b67b25cdc33a3cb7b2fa62e239f7af1378358a1d99e"}, - {file = "pydantic-2.10.2.tar.gz", hash = "sha256:2bc2d7f17232e0841cbba4641e65ba1eb6fafb3a08de3a091ff3ce14a197c4fa"}, + {file = "pydantic-2.10.6-py3-none-any.whl", hash = "sha256:427d664bf0b8a2b34ff5dd0f5a18df00591adcee7198fbd71981054cef37b584"}, + {file = "pydantic-2.10.6.tar.gz", hash = "sha256:ca5daa827cce33de7a42be142548b0096bf05a7e7b365aebfa5f8eeec7128236"}, ] [package.dependencies] annotated-types = ">=0.6.0" -pydantic-core = "2.27.1" +pydantic-core = "2.27.2" typing-extensions = ">=4.12.2" [package.extras] @@ -568,112 +558,112 @@ timezone = ["tzdata ; python_version >= \"3.9\" and platform_system == \"Windows [[package]] name = "pydantic-core" -version = "2.27.1" +version = "2.27.2" description = "Core functionality for Pydantic validation and serialization" optional = false python-versions = ">=3.8" groups = ["main"] files = [ - {file = "pydantic_core-2.27.1-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:71a5e35c75c021aaf400ac048dacc855f000bdfed91614b4a726f7432f1f3d6a"}, - {file = "pydantic_core-2.27.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:f82d068a2d6ecfc6e054726080af69a6764a10015467d7d7b9f66d6ed5afa23b"}, - {file = "pydantic_core-2.27.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:121ceb0e822f79163dd4699e4c54f5ad38b157084d97b34de8b232bcaad70278"}, - {file = "pydantic_core-2.27.1-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:4603137322c18eaf2e06a4495f426aa8d8388940f3c457e7548145011bb68e05"}, - {file = "pydantic_core-2.27.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a33cd6ad9017bbeaa9ed78a2e0752c5e250eafb9534f308e7a5f7849b0b1bfb4"}, - {file = "pydantic_core-2.27.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:15cc53a3179ba0fcefe1e3ae50beb2784dede4003ad2dfd24f81bba4b23a454f"}, - {file = "pydantic_core-2.27.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:45d9c5eb9273aa50999ad6adc6be5e0ecea7e09dbd0d31bd0c65a55a2592ca08"}, - {file = "pydantic_core-2.27.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:8bf7b66ce12a2ac52d16f776b31d16d91033150266eb796967a7e4621707e4f6"}, - {file = "pydantic_core-2.27.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:655d7dd86f26cb15ce8a431036f66ce0318648f8853d709b4167786ec2fa4807"}, - {file = "pydantic_core-2.27.1-cp310-cp310-musllinux_1_1_armv7l.whl", hash = "sha256:5556470f1a2157031e676f776c2bc20acd34c1990ca5f7e56f1ebf938b9ab57c"}, - {file = "pydantic_core-2.27.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:f69ed81ab24d5a3bd93861c8c4436f54afdf8e8cc421562b0c7504cf3be58206"}, - {file = "pydantic_core-2.27.1-cp310-none-win32.whl", hash = "sha256:f5a823165e6d04ccea61a9f0576f345f8ce40ed533013580e087bd4d7442b52c"}, - {file = "pydantic_core-2.27.1-cp310-none-win_amd64.whl", hash = "sha256:57866a76e0b3823e0b56692d1a0bf722bffb324839bb5b7226a7dbd6c9a40b17"}, - {file = "pydantic_core-2.27.1-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:ac3b20653bdbe160febbea8aa6c079d3df19310d50ac314911ed8cc4eb7f8cb8"}, - {file = "pydantic_core-2.27.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:a5a8e19d7c707c4cadb8c18f5f60c843052ae83c20fa7d44f41594c644a1d330"}, - {file = "pydantic_core-2.27.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7f7059ca8d64fea7f238994c97d91f75965216bcbe5f695bb44f354893f11d52"}, - {file = "pydantic_core-2.27.1-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:bed0f8a0eeea9fb72937ba118f9db0cb7e90773462af7962d382445f3005e5a4"}, - {file = "pydantic_core-2.27.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a3cb37038123447cf0f3ea4c74751f6a9d7afef0eb71aa07bf5f652b5e6a132c"}, - {file = "pydantic_core-2.27.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:84286494f6c5d05243456e04223d5a9417d7f443c3b76065e75001beb26f88de"}, - {file = "pydantic_core-2.27.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:acc07b2cfc5b835444b44a9956846b578d27beeacd4b52e45489e93276241025"}, - {file = "pydantic_core-2.27.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:4fefee876e07a6e9aad7a8c8c9f85b0cdbe7df52b8a9552307b09050f7512c7e"}, - {file = "pydantic_core-2.27.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:258c57abf1188926c774a4c94dd29237e77eda19462e5bb901d88adcab6af919"}, - {file = "pydantic_core-2.27.1-cp311-cp311-musllinux_1_1_armv7l.whl", hash = "sha256:35c14ac45fcfdf7167ca76cc80b2001205a8d5d16d80524e13508371fb8cdd9c"}, - {file = "pydantic_core-2.27.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:d1b26e1dff225c31897696cab7d4f0a315d4c0d9e8666dbffdb28216f3b17fdc"}, - {file = "pydantic_core-2.27.1-cp311-none-win32.whl", hash = "sha256:2cdf7d86886bc6982354862204ae3b2f7f96f21a3eb0ba5ca0ac42c7b38598b9"}, - {file = "pydantic_core-2.27.1-cp311-none-win_amd64.whl", hash = "sha256:3af385b0cee8df3746c3f406f38bcbfdc9041b5c2d5ce3e5fc6637256e60bbc5"}, - {file = "pydantic_core-2.27.1-cp311-none-win_arm64.whl", hash = "sha256:81f2ec23ddc1b476ff96563f2e8d723830b06dceae348ce02914a37cb4e74b89"}, - {file = "pydantic_core-2.27.1-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:9cbd94fc661d2bab2bc702cddd2d3370bbdcc4cd0f8f57488a81bcce90c7a54f"}, - {file = "pydantic_core-2.27.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:5f8c4718cd44ec1580e180cb739713ecda2bdee1341084c1467802a417fe0f02"}, - {file = "pydantic_core-2.27.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:15aae984e46de8d376df515f00450d1522077254ef6b7ce189b38ecee7c9677c"}, - {file = "pydantic_core-2.27.1-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:1ba5e3963344ff25fc8c40da90f44b0afca8cfd89d12964feb79ac1411a260ac"}, - {file = "pydantic_core-2.27.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:992cea5f4f3b29d6b4f7f1726ed8ee46c8331c6b4eed6db5b40134c6fe1768bb"}, - {file = "pydantic_core-2.27.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0325336f348dbee6550d129b1627cb8f5351a9dc91aad141ffb96d4937bd9529"}, - {file = "pydantic_core-2.27.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7597c07fbd11515f654d6ece3d0e4e5093edc30a436c63142d9a4b8e22f19c35"}, - {file = "pydantic_core-2.27.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:3bbd5d8cc692616d5ef6fbbbd50dbec142c7e6ad9beb66b78a96e9c16729b089"}, - {file = "pydantic_core-2.27.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:dc61505e73298a84a2f317255fcc72b710b72980f3a1f670447a21efc88f8381"}, - {file = "pydantic_core-2.27.1-cp312-cp312-musllinux_1_1_armv7l.whl", hash = "sha256:e1f735dc43da318cad19b4173dd1ffce1d84aafd6c9b782b3abc04a0d5a6f5bb"}, - {file = "pydantic_core-2.27.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:f4e5658dbffe8843a0f12366a4c2d1c316dbe09bb4dfbdc9d2d9cd6031de8aae"}, - {file = "pydantic_core-2.27.1-cp312-none-win32.whl", hash = "sha256:672ebbe820bb37988c4d136eca2652ee114992d5d41c7e4858cdd90ea94ffe5c"}, - {file = "pydantic_core-2.27.1-cp312-none-win_amd64.whl", hash = "sha256:66ff044fd0bb1768688aecbe28b6190f6e799349221fb0de0e6f4048eca14c16"}, - {file = "pydantic_core-2.27.1-cp312-none-win_arm64.whl", hash = "sha256:9a3b0793b1bbfd4146304e23d90045f2a9b5fd5823aa682665fbdaf2a6c28f3e"}, - {file = "pydantic_core-2.27.1-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:f216dbce0e60e4d03e0c4353c7023b202d95cbaeff12e5fd2e82ea0a66905073"}, - {file = "pydantic_core-2.27.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:a2e02889071850bbfd36b56fd6bc98945e23670773bc7a76657e90e6b6603c08"}, - {file = "pydantic_core-2.27.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:42b0e23f119b2b456d07ca91b307ae167cc3f6c846a7b169fca5326e32fdc6cf"}, - {file = "pydantic_core-2.27.1-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:764be71193f87d460a03f1f7385a82e226639732214b402f9aa61f0d025f0737"}, - {file = "pydantic_core-2.27.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1c00666a3bd2f84920a4e94434f5974d7bbc57e461318d6bb34ce9cdbbc1f6b2"}, - {file = "pydantic_core-2.27.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3ccaa88b24eebc0f849ce0a4d09e8a408ec5a94afff395eb69baf868f5183107"}, - {file = "pydantic_core-2.27.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c65af9088ac534313e1963443d0ec360bb2b9cba6c2909478d22c2e363d98a51"}, - {file = "pydantic_core-2.27.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:206b5cf6f0c513baffaeae7bd817717140770c74528f3e4c3e1cec7871ddd61a"}, - {file = "pydantic_core-2.27.1-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:062f60e512fc7fff8b8a9d680ff0ddaaef0193dba9fa83e679c0c5f5fbd018bc"}, - {file = "pydantic_core-2.27.1-cp313-cp313-musllinux_1_1_armv7l.whl", hash = "sha256:a0697803ed7d4af5e4c1adf1670af078f8fcab7a86350e969f454daf598c4960"}, - {file = "pydantic_core-2.27.1-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:58ca98a950171f3151c603aeea9303ef6c235f692fe555e883591103da709b23"}, - {file = "pydantic_core-2.27.1-cp313-none-win32.whl", hash = "sha256:8065914ff79f7eab1599bd80406681f0ad08f8e47c880f17b416c9f8f7a26d05"}, - {file = "pydantic_core-2.27.1-cp313-none-win_amd64.whl", hash = "sha256:ba630d5e3db74c79300d9a5bdaaf6200172b107f263c98a0539eeecb857b2337"}, - {file = "pydantic_core-2.27.1-cp313-none-win_arm64.whl", hash = "sha256:45cf8588c066860b623cd11c4ba687f8d7175d5f7ef65f7129df8a394c502de5"}, - {file = "pydantic_core-2.27.1-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:5897bec80a09b4084aee23f9b73a9477a46c3304ad1d2d07acca19723fb1de62"}, - {file = "pydantic_core-2.27.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:d0165ab2914379bd56908c02294ed8405c252250668ebcb438a55494c69f44ab"}, - {file = "pydantic_core-2.27.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6b9af86e1d8e4cfc82c2022bfaa6f459381a50b94a29e95dcdda8442d6d83864"}, - {file = "pydantic_core-2.27.1-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:5f6c8a66741c5f5447e047ab0ba7a1c61d1e95580d64bce852e3df1f895c4067"}, - {file = "pydantic_core-2.27.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9a42d6a8156ff78981f8aa56eb6394114e0dedb217cf8b729f438f643608cbcd"}, - {file = "pydantic_core-2.27.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:64c65f40b4cd8b0e049a8edde07e38b476da7e3aaebe63287c899d2cff253fa5"}, - {file = "pydantic_core-2.27.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9fdcf339322a3fae5cbd504edcefddd5a50d9ee00d968696846f089b4432cf78"}, - {file = "pydantic_core-2.27.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:bf99c8404f008750c846cb4ac4667b798a9f7de673ff719d705d9b2d6de49c5f"}, - {file = "pydantic_core-2.27.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:8f1edcea27918d748c7e5e4d917297b2a0ab80cad10f86631e488b7cddf76a36"}, - {file = "pydantic_core-2.27.1-cp38-cp38-musllinux_1_1_armv7l.whl", hash = "sha256:159cac0a3d096f79ab6a44d77a961917219707e2a130739c64d4dd46281f5c2a"}, - {file = "pydantic_core-2.27.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:029d9757eb621cc6e1848fa0b0310310de7301057f623985698ed7ebb014391b"}, - {file = "pydantic_core-2.27.1-cp38-none-win32.whl", hash = "sha256:a28af0695a45f7060e6f9b7092558a928a28553366519f64083c63a44f70e618"}, - {file = "pydantic_core-2.27.1-cp38-none-win_amd64.whl", hash = "sha256:2d4567c850905d5eaaed2f7a404e61012a51caf288292e016360aa2b96ff38d4"}, - {file = "pydantic_core-2.27.1-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:e9386266798d64eeb19dd3677051f5705bf873e98e15897ddb7d76f477131967"}, - {file = "pydantic_core-2.27.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:4228b5b646caa73f119b1ae756216b59cc6e2267201c27d3912b592c5e323b60"}, - {file = "pydantic_core-2.27.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0b3dfe500de26c52abe0477dde16192ac39c98f05bf2d80e76102d394bd13854"}, - {file = "pydantic_core-2.27.1-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:aee66be87825cdf72ac64cb03ad4c15ffef4143dbf5c113f64a5ff4f81477bf9"}, - {file = "pydantic_core-2.27.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3b748c44bb9f53031c8cbc99a8a061bc181c1000c60a30f55393b6e9c45cc5bd"}, - {file = "pydantic_core-2.27.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5ca038c7f6a0afd0b2448941b6ef9d5e1949e999f9e5517692eb6da58e9d44be"}, - {file = "pydantic_core-2.27.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6e0bd57539da59a3e4671b90a502da9a28c72322a4f17866ba3ac63a82c4498e"}, - {file = "pydantic_core-2.27.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:ac6c2c45c847bbf8f91930d88716a0fb924b51e0c6dad329b793d670ec5db792"}, - {file = "pydantic_core-2.27.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:b94d4ba43739bbe8b0ce4262bcc3b7b9f31459ad120fb595627eaeb7f9b9ca01"}, - {file = "pydantic_core-2.27.1-cp39-cp39-musllinux_1_1_armv7l.whl", hash = "sha256:00e6424f4b26fe82d44577b4c842d7df97c20be6439e8e685d0d715feceb9fb9"}, - {file = "pydantic_core-2.27.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:38de0a70160dd97540335b7ad3a74571b24f1dc3ed33f815f0880682e6880131"}, - {file = "pydantic_core-2.27.1-cp39-none-win32.whl", hash = "sha256:7ccebf51efc61634f6c2344da73e366c75e735960b5654b63d7e6f69a5885fa3"}, - {file = "pydantic_core-2.27.1-cp39-none-win_amd64.whl", hash = "sha256:a57847b090d7892f123726202b7daa20df6694cbd583b67a592e856bff603d6c"}, - {file = "pydantic_core-2.27.1-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:3fa80ac2bd5856580e242dbc202db873c60a01b20309c8319b5c5986fbe53ce6"}, - {file = "pydantic_core-2.27.1-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:d950caa237bb1954f1b8c9227b5065ba6875ac9771bb8ec790d956a699b78676"}, - {file = "pydantic_core-2.27.1-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0e4216e64d203e39c62df627aa882f02a2438d18a5f21d7f721621f7a5d3611d"}, - {file = "pydantic_core-2.27.1-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:02a3d637bd387c41d46b002f0e49c52642281edacd2740e5a42f7017feea3f2c"}, - {file = "pydantic_core-2.27.1-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:161c27ccce13b6b0c8689418da3885d3220ed2eae2ea5e9b2f7f3d48f1d52c27"}, - {file = "pydantic_core-2.27.1-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:19910754e4cc9c63bc1c7f6d73aa1cfee82f42007e407c0f413695c2f7ed777f"}, - {file = "pydantic_core-2.27.1-pp310-pypy310_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:e173486019cc283dc9778315fa29a363579372fe67045e971e89b6365cc035ed"}, - {file = "pydantic_core-2.27.1-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:af52d26579b308921b73b956153066481f064875140ccd1dfd4e77db89dbb12f"}, - {file = "pydantic_core-2.27.1-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:981fb88516bd1ae8b0cbbd2034678a39dedc98752f264ac9bc5839d3923fa04c"}, - {file = "pydantic_core-2.27.1-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:5fde892e6c697ce3e30c61b239330fc5d569a71fefd4eb6512fc6caec9dd9e2f"}, - {file = "pydantic_core-2.27.1-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:816f5aa087094099fff7edabb5e01cc370eb21aa1a1d44fe2d2aefdfb5599b31"}, - {file = "pydantic_core-2.27.1-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9c10c309e18e443ddb108f0ef64e8729363adbfd92d6d57beec680f6261556f3"}, - {file = "pydantic_core-2.27.1-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:98476c98b02c8e9b2eec76ac4156fd006628b1b2d0ef27e548ffa978393fd154"}, - {file = "pydantic_core-2.27.1-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:c3027001c28434e7ca5a6e1e527487051136aa81803ac812be51802150d880dd"}, - {file = "pydantic_core-2.27.1-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:7699b1df36a48169cdebda7ab5a2bac265204003f153b4bd17276153d997670a"}, - {file = "pydantic_core-2.27.1-pp39-pypy39_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:1c39b07d90be6b48968ddc8c19e7585052088fd7ec8d568bb31ff64c70ae3c97"}, - {file = "pydantic_core-2.27.1-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:46ccfe3032b3915586e469d4972973f893c0a2bb65669194a5bdea9bacc088c2"}, - {file = "pydantic_core-2.27.1-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:62ba45e21cf6571d7f716d903b5b7b6d2617e2d5d67c0923dc47b9d41369f840"}, - {file = "pydantic_core-2.27.1.tar.gz", hash = "sha256:62a763352879b84aa31058fc931884055fd75089cccbd9d58bb6afd01141b235"}, + {file = "pydantic_core-2.27.2-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:2d367ca20b2f14095a8f4fa1210f5a7b78b8a20009ecced6b12818f455b1e9fa"}, + {file = "pydantic_core-2.27.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:491a2b73db93fab69731eaee494f320faa4e093dbed776be1a829c2eb222c34c"}, + {file = "pydantic_core-2.27.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7969e133a6f183be60e9f6f56bfae753585680f3b7307a8e555a948d443cc05a"}, + {file = "pydantic_core-2.27.2-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:3de9961f2a346257caf0aa508a4da705467f53778e9ef6fe744c038119737ef5"}, + {file = "pydantic_core-2.27.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e2bb4d3e5873c37bb3dd58714d4cd0b0e6238cebc4177ac8fe878f8b3aa8e74c"}, + {file = "pydantic_core-2.27.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:280d219beebb0752699480fe8f1dc61ab6615c2046d76b7ab7ee38858de0a4e7"}, + {file = "pydantic_core-2.27.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:47956ae78b6422cbd46f772f1746799cbb862de838fd8d1fbd34a82e05b0983a"}, + {file = "pydantic_core-2.27.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:14d4a5c49d2f009d62a2a7140d3064f686d17a5d1a268bc641954ba181880236"}, + {file = "pydantic_core-2.27.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:337b443af21d488716f8d0b6164de833e788aa6bd7e3a39c005febc1284f4962"}, + {file = "pydantic_core-2.27.2-cp310-cp310-musllinux_1_1_armv7l.whl", hash = "sha256:03d0f86ea3184a12f41a2d23f7ccb79cdb5a18e06993f8a45baa8dfec746f0e9"}, + {file = "pydantic_core-2.27.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:7041c36f5680c6e0f08d922aed302e98b3745d97fe1589db0a3eebf6624523af"}, + {file = "pydantic_core-2.27.2-cp310-cp310-win32.whl", hash = "sha256:50a68f3e3819077be2c98110c1f9dcb3817e93f267ba80a2c05bb4f8799e2ff4"}, + {file = "pydantic_core-2.27.2-cp310-cp310-win_amd64.whl", hash = "sha256:e0fd26b16394ead34a424eecf8a31a1f5137094cabe84a1bcb10fa6ba39d3d31"}, + {file = "pydantic_core-2.27.2-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:8e10c99ef58cfdf2a66fc15d66b16c4a04f62bca39db589ae8cba08bc55331bc"}, + {file = "pydantic_core-2.27.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:26f32e0adf166a84d0cb63be85c562ca8a6fa8de28e5f0d92250c6b7e9e2aff7"}, + {file = "pydantic_core-2.27.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8c19d1ea0673cd13cc2f872f6c9ab42acc4e4f492a7ca9d3795ce2b112dd7e15"}, + {file = "pydantic_core-2.27.2-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:5e68c4446fe0810e959cdff46ab0a41ce2f2c86d227d96dc3847af0ba7def306"}, + {file = "pydantic_core-2.27.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d9640b0059ff4f14d1f37321b94061c6db164fbe49b334b31643e0528d100d99"}, + {file = "pydantic_core-2.27.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:40d02e7d45c9f8af700f3452f329ead92da4c5f4317ca9b896de7ce7199ea459"}, + {file = "pydantic_core-2.27.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1c1fd185014191700554795c99b347d64f2bb637966c4cfc16998a0ca700d048"}, + {file = "pydantic_core-2.27.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d81d2068e1c1228a565af076598f9e7451712700b673de8f502f0334f281387d"}, + {file = "pydantic_core-2.27.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:1a4207639fb02ec2dbb76227d7c751a20b1a6b4bc52850568e52260cae64ca3b"}, + {file = "pydantic_core-2.27.2-cp311-cp311-musllinux_1_1_armv7l.whl", hash = "sha256:3de3ce3c9ddc8bbd88f6e0e304dea0e66d843ec9de1b0042b0911c1663ffd474"}, + {file = "pydantic_core-2.27.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:30c5f68ded0c36466acede341551106821043e9afaad516adfb6e8fa80a4e6a6"}, + {file = "pydantic_core-2.27.2-cp311-cp311-win32.whl", hash = "sha256:c70c26d2c99f78b125a3459f8afe1aed4d9687c24fd677c6a4436bc042e50d6c"}, + {file = "pydantic_core-2.27.2-cp311-cp311-win_amd64.whl", hash = "sha256:08e125dbdc505fa69ca7d9c499639ab6407cfa909214d500897d02afb816e7cc"}, + {file = "pydantic_core-2.27.2-cp311-cp311-win_arm64.whl", hash = "sha256:26f0d68d4b235a2bae0c3fc585c585b4ecc51382db0e3ba402a22cbc440915e4"}, + {file = "pydantic_core-2.27.2-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:9e0c8cfefa0ef83b4da9588448b6d8d2a2bf1a53c3f1ae5fca39eb3061e2f0b0"}, + {file = "pydantic_core-2.27.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:83097677b8e3bd7eaa6775720ec8e0405f1575015a463285a92bfdfe254529ef"}, + {file = "pydantic_core-2.27.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:172fce187655fece0c90d90a678424b013f8fbb0ca8b036ac266749c09438cb7"}, + {file = "pydantic_core-2.27.2-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:519f29f5213271eeeeb3093f662ba2fd512b91c5f188f3bb7b27bc5973816934"}, + {file = "pydantic_core-2.27.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:05e3a55d124407fffba0dd6b0c0cd056d10e983ceb4e5dbd10dda135c31071d6"}, + {file = "pydantic_core-2.27.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9c3ed807c7b91de05e63930188f19e921d1fe90de6b4f5cd43ee7fcc3525cb8c"}, + {file = "pydantic_core-2.27.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6fb4aadc0b9a0c063206846d603b92030eb6f03069151a625667f982887153e2"}, + {file = "pydantic_core-2.27.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:28ccb213807e037460326424ceb8b5245acb88f32f3d2777427476e1b32c48c4"}, + {file = "pydantic_core-2.27.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:de3cd1899e2c279b140adde9357c4495ed9d47131b4a4eaff9052f23398076b3"}, + {file = "pydantic_core-2.27.2-cp312-cp312-musllinux_1_1_armv7l.whl", hash = "sha256:220f892729375e2d736b97d0e51466252ad84c51857d4d15f5e9692f9ef12be4"}, + {file = "pydantic_core-2.27.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:a0fcd29cd6b4e74fe8ddd2c90330fd8edf2e30cb52acda47f06dd615ae72da57"}, + {file = "pydantic_core-2.27.2-cp312-cp312-win32.whl", hash = "sha256:1e2cb691ed9834cd6a8be61228471d0a503731abfb42f82458ff27be7b2186fc"}, + {file = "pydantic_core-2.27.2-cp312-cp312-win_amd64.whl", hash = "sha256:cc3f1a99a4f4f9dd1de4fe0312c114e740b5ddead65bb4102884b384c15d8bc9"}, + {file = "pydantic_core-2.27.2-cp312-cp312-win_arm64.whl", hash = "sha256:3911ac9284cd8a1792d3cb26a2da18f3ca26c6908cc434a18f730dc0db7bfa3b"}, + {file = "pydantic_core-2.27.2-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:7d14bd329640e63852364c306f4d23eb744e0f8193148d4044dd3dacdaacbd8b"}, + {file = "pydantic_core-2.27.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:82f91663004eb8ed30ff478d77c4d1179b3563df6cdb15c0817cd1cdaf34d154"}, + {file = "pydantic_core-2.27.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:71b24c7d61131bb83df10cc7e687433609963a944ccf45190cfc21e0887b08c9"}, + {file = "pydantic_core-2.27.2-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:fa8e459d4954f608fa26116118bb67f56b93b209c39b008277ace29937453dc9"}, + {file = "pydantic_core-2.27.2-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ce8918cbebc8da707ba805b7fd0b382816858728ae7fe19a942080c24e5b7cd1"}, + {file = "pydantic_core-2.27.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:eda3f5c2a021bbc5d976107bb302e0131351c2ba54343f8a496dc8783d3d3a6a"}, + {file = "pydantic_core-2.27.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bd8086fa684c4775c27f03f062cbb9eaa6e17f064307e86b21b9e0abc9c0f02e"}, + {file = "pydantic_core-2.27.2-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:8d9b3388db186ba0c099a6d20f0604a44eabdeef1777ddd94786cdae158729e4"}, + {file = "pydantic_core-2.27.2-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:7a66efda2387de898c8f38c0cf7f14fca0b51a8ef0b24bfea5849f1b3c95af27"}, + {file = "pydantic_core-2.27.2-cp313-cp313-musllinux_1_1_armv7l.whl", hash = "sha256:18a101c168e4e092ab40dbc2503bdc0f62010e95d292b27827871dc85450d7ee"}, + {file = "pydantic_core-2.27.2-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:ba5dd002f88b78a4215ed2f8ddbdf85e8513382820ba15ad5ad8955ce0ca19a1"}, + {file = "pydantic_core-2.27.2-cp313-cp313-win32.whl", hash = "sha256:1ebaf1d0481914d004a573394f4be3a7616334be70261007e47c2a6fe7e50130"}, + {file = "pydantic_core-2.27.2-cp313-cp313-win_amd64.whl", hash = "sha256:953101387ecf2f5652883208769a79e48db18c6df442568a0b5ccd8c2723abee"}, + {file = "pydantic_core-2.27.2-cp313-cp313-win_arm64.whl", hash = "sha256:ac4dbfd1691affb8f48c2c13241a2e3b60ff23247cbcf981759c768b6633cf8b"}, + {file = "pydantic_core-2.27.2-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:d3e8d504bdd3f10835468f29008d72fc8359d95c9c415ce6e767203db6127506"}, + {file = "pydantic_core-2.27.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:521eb9b7f036c9b6187f0b47318ab0d7ca14bd87f776240b90b21c1f4f149320"}, + {file = "pydantic_core-2.27.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:85210c4d99a0114f5a9481b44560d7d1e35e32cc5634c656bc48e590b669b145"}, + {file = "pydantic_core-2.27.2-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:d716e2e30c6f140d7560ef1538953a5cd1a87264c737643d481f2779fc247fe1"}, + {file = "pydantic_core-2.27.2-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f66d89ba397d92f840f8654756196d93804278457b5fbede59598a1f9f90b228"}, + {file = "pydantic_core-2.27.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:669e193c1c576a58f132e3158f9dfa9662969edb1a250c54d8fa52590045f046"}, + {file = "pydantic_core-2.27.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9fdbe7629b996647b99c01b37f11170a57ae675375b14b8c13b8518b8320ced5"}, + {file = "pydantic_core-2.27.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d262606bf386a5ba0b0af3b97f37c83d7011439e3dc1a9298f21efb292e42f1a"}, + {file = "pydantic_core-2.27.2-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:cabb9bcb7e0d97f74df8646f34fc76fbf793b7f6dc2438517d7a9e50eee4f14d"}, + {file = "pydantic_core-2.27.2-cp38-cp38-musllinux_1_1_armv7l.whl", hash = "sha256:d2d63f1215638d28221f664596b1ccb3944f6e25dd18cd3b86b0a4c408d5ebb9"}, + {file = "pydantic_core-2.27.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:bca101c00bff0adb45a833f8451b9105d9df18accb8743b08107d7ada14bd7da"}, + {file = "pydantic_core-2.27.2-cp38-cp38-win32.whl", hash = "sha256:f6f8e111843bbb0dee4cb6594cdc73e79b3329b526037ec242a3e49012495b3b"}, + {file = "pydantic_core-2.27.2-cp38-cp38-win_amd64.whl", hash = "sha256:fd1aea04935a508f62e0d0ef1f5ae968774a32afc306fb8545e06f5ff5cdf3ad"}, + {file = "pydantic_core-2.27.2-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:c10eb4f1659290b523af58fa7cffb452a61ad6ae5613404519aee4bfbf1df993"}, + {file = "pydantic_core-2.27.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:ef592d4bad47296fb11f96cd7dc898b92e795032b4894dfb4076cfccd43a9308"}, + {file = "pydantic_core-2.27.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c61709a844acc6bf0b7dce7daae75195a10aac96a596ea1b776996414791ede4"}, + {file = "pydantic_core-2.27.2-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:42c5f762659e47fdb7b16956c71598292f60a03aa92f8b6351504359dbdba6cf"}, + {file = "pydantic_core-2.27.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4c9775e339e42e79ec99c441d9730fccf07414af63eac2f0e48e08fd38a64d76"}, + {file = "pydantic_core-2.27.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:57762139821c31847cfb2df63c12f725788bd9f04bc2fb392790959b8f70f118"}, + {file = "pydantic_core-2.27.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0d1e85068e818c73e048fe28cfc769040bb1f475524f4745a5dc621f75ac7630"}, + {file = "pydantic_core-2.27.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:097830ed52fd9e427942ff3b9bc17fab52913b2f50f2880dc4a5611446606a54"}, + {file = "pydantic_core-2.27.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:044a50963a614ecfae59bb1eaf7ea7efc4bc62f49ed594e18fa1e5d953c40e9f"}, + {file = "pydantic_core-2.27.2-cp39-cp39-musllinux_1_1_armv7l.whl", hash = "sha256:4e0b4220ba5b40d727c7f879eac379b822eee5d8fff418e9d3381ee45b3b0362"}, + {file = "pydantic_core-2.27.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:5e4f4bb20d75e9325cc9696c6802657b58bc1dbbe3022f32cc2b2b632c3fbb96"}, + {file = "pydantic_core-2.27.2-cp39-cp39-win32.whl", hash = "sha256:cca63613e90d001b9f2f9a9ceb276c308bfa2a43fafb75c8031c4f66039e8c6e"}, + {file = "pydantic_core-2.27.2-cp39-cp39-win_amd64.whl", hash = "sha256:77d1bca19b0f7021b3a982e6f903dcd5b2b06076def36a652e3907f596e29f67"}, + {file = "pydantic_core-2.27.2-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:2bf14caea37e91198329b828eae1618c068dfb8ef17bb33287a7ad4b61ac314e"}, + {file = "pydantic_core-2.27.2-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:b0cb791f5b45307caae8810c2023a184c74605ec3bcbb67d13846c28ff731ff8"}, + {file = "pydantic_core-2.27.2-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:688d3fd9fcb71f41c4c015c023d12a79d1c4c0732ec9eb35d96e3388a120dcf3"}, + {file = "pydantic_core-2.27.2-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3d591580c34f4d731592f0e9fe40f9cc1b430d297eecc70b962e93c5c668f15f"}, + {file = "pydantic_core-2.27.2-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:82f986faf4e644ffc189a7f1aafc86e46ef70372bb153e7001e8afccc6e54133"}, + {file = "pydantic_core-2.27.2-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:bec317a27290e2537f922639cafd54990551725fc844249e64c523301d0822fc"}, + {file = "pydantic_core-2.27.2-pp310-pypy310_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:0296abcb83a797db256b773f45773da397da75a08f5fcaef41f2044adec05f50"}, + {file = "pydantic_core-2.27.2-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:0d75070718e369e452075a6017fbf187f788e17ed67a3abd47fa934d001863d9"}, + {file = "pydantic_core-2.27.2-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:7e17b560be3c98a8e3aa66ce828bdebb9e9ac6ad5466fba92eb74c4c95cb1151"}, + {file = "pydantic_core-2.27.2-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:c33939a82924da9ed65dab5a65d427205a73181d8098e79b6b426bdf8ad4e656"}, + {file = "pydantic_core-2.27.2-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:00bad2484fa6bda1e216e7345a798bd37c68fb2d97558edd584942aa41b7d278"}, + {file = "pydantic_core-2.27.2-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c817e2b40aba42bac6f457498dacabc568c3b7a986fc9ba7c8d9d260b71485fb"}, + {file = "pydantic_core-2.27.2-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:251136cdad0cb722e93732cb45ca5299fb56e1344a833640bf93b2803f8d1bfd"}, + {file = "pydantic_core-2.27.2-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d2088237af596f0a524d3afc39ab3b036e8adb054ee57cbb1dcf8e09da5b29cc"}, + {file = "pydantic_core-2.27.2-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:d4041c0b966a84b4ae7a09832eb691a35aec90910cd2dbe7a208de59be77965b"}, + {file = "pydantic_core-2.27.2-pp39-pypy39_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:8083d4e875ebe0b864ffef72a4304827015cff328a1be6e22cc850753bfb122b"}, + {file = "pydantic_core-2.27.2-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:f141ee28a0ad2123b6611b6ceff018039df17f32ada8b534e6aa039545a3efb2"}, + {file = "pydantic_core-2.27.2-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:7d0c8399fcc1848491f00e0314bd59fb34a9c008761bcb422a057670c3f65e35"}, + {file = "pydantic_core-2.27.2.tar.gz", hash = "sha256:eb026e5a4c1fee05726072337ff51d1efb6f59090b7da90d30ea58625b1ffb39"}, ] [package.dependencies] @@ -909,20 +899,19 @@ files = [ ] [[package]] -name = "typing-inspect" -version = "0.9.0" -description = "Runtime inspection utilities for typing module." +name = "typing-inspection" +version = "0.4.0" +description = "Runtime typing introspection tools" optional = false -python-versions = "*" +python-versions = ">=3.9" groups = ["main"] files = [ - {file = "typing_inspect-0.9.0-py3-none-any.whl", hash = "sha256:9ee6fc59062311ef8547596ab6b955e1b8aa46242d854bfc78f4f6b0eff35f9f"}, - {file = "typing_inspect-0.9.0.tar.gz", hash = "sha256:b23fc42ff6f6ef6954e4852c1fb512cdd18dbea03134f91f856a95ccc9461f78"}, + {file = "typing_inspection-0.4.0-py3-none-any.whl", hash = "sha256:50e72559fcd2a6367a19f7a7e610e6afcb9fac940c650290eed893d61386832f"}, + {file = "typing_inspection-0.4.0.tar.gz", hash = "sha256:9765c87de36671694a67904bf2c96e395be9c6439bb6c87b5142569dcdd65122"}, ] [package.dependencies] -mypy-extensions = ">=0.3.0" -typing-extensions = ">=3.7.4" +typing-extensions = ">=4.12.0" [[package]] name = "urllib3" @@ -948,5 +937,5 @@ gcp = ["google-auth", "requests"] [metadata] lock-version = "2.1" -python-versions = ">=3.8" -content-hash = "f0f19d81d36ebe966895f21a0a9dd33118783904418f4103189c475e5903b958" +python-versions = ">=3.9" +content-hash = "c3917a9114ca2a0c01aedf207fa1b59cc259bb07c4d2914fe2ed9a4cb3e1785e" diff --git a/pylintrc b/pylintrc index 9d193c42..266bc815 100644 --- a/pylintrc +++ b/pylintrc @@ -455,7 +455,10 @@ disable=raw-checker-failed, bare-except, broad-exception-caught, fixme, - relative-beyond-top-level + relative-beyond-top-level, + consider-using-with, + wildcard-import, + unused-wildcard-import # Enable the message, report, category or checker with the given id(s). You can # either give multiple identifier separated by comma (,) or put this option diff --git a/pyproject.toml b/pyproject.toml index 785997ab..6307fc75 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "mistralai" -version = "1.5.2" +version = "1.6.0" description = "Python Client SDK for the Mistral AI API." authors = [{ name = "Mistral" },] readme = "README-PYPI.md" diff --git a/scripts/prepare-readme.py b/scripts/prepare_readme.py similarity index 84% rename from scripts/prepare-readme.py rename to scripts/prepare_readme.py index 9111d6cb..16f6fc7e 100644 --- a/scripts/prepare-readme.py +++ b/scripts/prepare_readme.py @@ -4,7 +4,7 @@ import shutil try: - with open("README.md", "r") as rh: + with open("README.md", "r", encoding="utf-8") as rh: readme_contents = rh.read() GITHUB_URL = "https://github.com/mistralai/client-python.git" GITHUB_URL = ( @@ -21,13 +21,13 @@ readme_contents, ) - with open("README-PYPI.md", "w") as wh: + with open("README-PYPI.md", "w", encoding="utf-8") as wh: wh.write(readme_contents) except Exception as e: try: print("Failed to rewrite README.md to README-PYPI.md, copying original instead") print(e) shutil.copyfile("README.md", "README-PYPI.md") - except Exception as e: + except Exception as ie: print("Failed to copy README.md to README-PYPI.md") - print(e) + print(ie) diff --git a/scripts/publish.sh b/scripts/publish.sh index ab45b1f9..f2f2cf2c 100755 --- a/scripts/publish.sh +++ b/scripts/publish.sh @@ -2,6 +2,6 @@ export POETRY_PYPI_TOKEN_PYPI=${PYPI_TOKEN} -poetry run python scripts/prepare-readme.py +poetry run python scripts/prepare_readme.py poetry publish --build --skip-existing diff --git a/src/mistralai/_hooks/types.py b/src/mistralai/_hooks/types.py index fe448e94..ebc789ff 100644 --- a/src/mistralai/_hooks/types.py +++ b/src/mistralai/_hooks/types.py @@ -7,16 +7,19 @@ class HookContext: + base_url: str operation_id: str oauth2_scopes: Optional[List[str]] = None security_source: Optional[Union[Any, Callable[[], Any]]] = None def __init__( self, + base_url: str, operation_id: str, oauth2_scopes: Optional[List[str]], security_source: Optional[Union[Any, Callable[[], Any]]], ): + self.base_url = base_url self.operation_id = operation_id self.oauth2_scopes = oauth2_scopes self.security_source = security_source @@ -25,21 +28,30 @@ def __init__( class BeforeRequestContext(HookContext): def __init__(self, hook_ctx: HookContext): super().__init__( - hook_ctx.operation_id, hook_ctx.oauth2_scopes, hook_ctx.security_source + hook_ctx.base_url, + hook_ctx.operation_id, + hook_ctx.oauth2_scopes, + hook_ctx.security_source, ) class AfterSuccessContext(HookContext): def __init__(self, hook_ctx: HookContext): super().__init__( - hook_ctx.operation_id, hook_ctx.oauth2_scopes, hook_ctx.security_source + hook_ctx.base_url, + hook_ctx.operation_id, + hook_ctx.oauth2_scopes, + hook_ctx.security_source, ) class AfterErrorContext(HookContext): def __init__(self, hook_ctx: HookContext): super().__init__( - hook_ctx.operation_id, hook_ctx.oauth2_scopes, hook_ctx.security_source + hook_ctx.base_url, + hook_ctx.operation_id, + hook_ctx.oauth2_scopes, + hook_ctx.security_source, ) diff --git a/src/mistralai/_version.py b/src/mistralai/_version.py index 64883488..9507529b 100644 --- a/src/mistralai/_version.py +++ b/src/mistralai/_version.py @@ -3,10 +3,10 @@ import importlib.metadata __title__: str = "mistralai" -__version__: str = "1.5.2" +__version__: str = "1.6.0" __openapi_doc_version__: str = "0.0.2" -__gen_version__: str = "2.497.0" -__user_agent__: str = "speakeasy-sdk/python 1.5.2 2.497.0 0.0.2 mistralai" +__gen_version__: str = "2.548.6" +__user_agent__: str = "speakeasy-sdk/python 1.6.0 2.548.6 0.0.2 mistralai" try: if __package__ is not None: diff --git a/src/mistralai/agents.py b/src/mistralai/agents.py index 05fd165c..e81f01aa 100644 --- a/src/mistralai/agents.py +++ b/src/mistralai/agents.py @@ -46,6 +46,7 @@ def complete( prediction: Optional[ Union[models.Prediction, models.PredictionTypedDict] ] = None, + parallel_tool_calls: Optional[bool] = None, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -66,6 +67,7 @@ def complete( :param frequency_penalty: frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. :param n: Number of completions to return for each request, input tokens are only billed once. :param prediction: + :param parallel_tool_calls: :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -78,6 +80,8 @@ def complete( if server_url is not None: base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) request = models.AgentsCompletionRequest( max_tokens=max_tokens, @@ -100,6 +104,7 @@ def complete( prediction=utils.get_pydantic_model( prediction, Optional[models.Prediction] ), + parallel_tool_calls=parallel_tool_calls, agent_id=agent_id, ) @@ -132,6 +137,7 @@ def complete( http_res = self.do_request( hook_ctx=HookContext( + base_url=base_url or "", operation_id="agents_completion_v1_agents_completions_post", oauth2_scopes=[], security_source=get_security_from_env( @@ -143,12 +149,14 @@ def complete( retry_config=retry_config, ) - data: Any = None + response_data: Any = None if utils.match_response(http_res, "200", "application/json"): return utils.unmarshal_json(http_res.text, models.ChatCompletionResponse) if utils.match_response(http_res, "422", "application/json"): - data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) - raise models.HTTPValidationError(data=data) + response_data = utils.unmarshal_json( + http_res.text, models.HTTPValidationErrorData + ) + raise models.HTTPValidationError(data=response_data) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) raise models.SDKError( @@ -204,6 +212,7 @@ async def complete_async( prediction: Optional[ Union[models.Prediction, models.PredictionTypedDict] ] = None, + parallel_tool_calls: Optional[bool] = None, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -224,6 +233,7 @@ async def complete_async( :param frequency_penalty: frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. :param n: Number of completions to return for each request, input tokens are only billed once. :param prediction: + :param parallel_tool_calls: :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -236,6 +246,8 @@ async def complete_async( if server_url is not None: base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) request = models.AgentsCompletionRequest( max_tokens=max_tokens, @@ -258,6 +270,7 @@ async def complete_async( prediction=utils.get_pydantic_model( prediction, Optional[models.Prediction] ), + parallel_tool_calls=parallel_tool_calls, agent_id=agent_id, ) @@ -290,6 +303,7 @@ async def complete_async( http_res = await self.do_request_async( hook_ctx=HookContext( + base_url=base_url or "", operation_id="agents_completion_v1_agents_completions_post", oauth2_scopes=[], security_source=get_security_from_env( @@ -301,12 +315,14 @@ async def complete_async( retry_config=retry_config, ) - data: Any = None + response_data: Any = None if utils.match_response(http_res, "200", "application/json"): return utils.unmarshal_json(http_res.text, models.ChatCompletionResponse) if utils.match_response(http_res, "422", "application/json"): - data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) - raise models.HTTPValidationError(data=data) + response_data = utils.unmarshal_json( + http_res.text, models.HTTPValidationErrorData + ) + raise models.HTTPValidationError(data=response_data) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( @@ -362,6 +378,7 @@ def stream( prediction: Optional[ Union[models.Prediction, models.PredictionTypedDict] ] = None, + parallel_tool_calls: Optional[bool] = None, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -384,6 +401,7 @@ def stream( :param frequency_penalty: frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. :param n: Number of completions to return for each request, input tokens are only billed once. :param prediction: + :param parallel_tool_calls: :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -396,6 +414,8 @@ def stream( if server_url is not None: base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) request = models.AgentsCompletionStreamRequest( max_tokens=max_tokens, @@ -418,6 +438,7 @@ def stream( prediction=utils.get_pydantic_model( prediction, Optional[models.Prediction] ), + parallel_tool_calls=parallel_tool_calls, agent_id=agent_id, ) @@ -450,6 +471,7 @@ def stream( http_res = self.do_request( hook_ctx=HookContext( + base_url=base_url or "", operation_id="stream_agents", oauth2_scopes=[], security_source=get_security_from_env( @@ -462,7 +484,7 @@ def stream( retry_config=retry_config, ) - data: Any = None + response_data: Any = None if utils.match_response(http_res, "200", "text/event-stream"): return eventstreaming.EventStream( http_res, @@ -471,8 +493,10 @@ def stream( ) if utils.match_response(http_res, "422", "application/json"): http_res_text = utils.stream_to_text(http_res) - data = utils.unmarshal_json(http_res_text, models.HTTPValidationErrorData) - raise models.HTTPValidationError(data=data) + response_data = utils.unmarshal_json( + http_res_text, models.HTTPValidationErrorData + ) + raise models.HTTPValidationError(data=response_data) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) raise models.SDKError( @@ -528,6 +552,7 @@ async def stream_async( prediction: Optional[ Union[models.Prediction, models.PredictionTypedDict] ] = None, + parallel_tool_calls: Optional[bool] = None, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -550,6 +575,7 @@ async def stream_async( :param frequency_penalty: frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. :param n: Number of completions to return for each request, input tokens are only billed once. :param prediction: + :param parallel_tool_calls: :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -562,6 +588,8 @@ async def stream_async( if server_url is not None: base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) request = models.AgentsCompletionStreamRequest( max_tokens=max_tokens, @@ -584,6 +612,7 @@ async def stream_async( prediction=utils.get_pydantic_model( prediction, Optional[models.Prediction] ), + parallel_tool_calls=parallel_tool_calls, agent_id=agent_id, ) @@ -616,6 +645,7 @@ async def stream_async( http_res = await self.do_request_async( hook_ctx=HookContext( + base_url=base_url or "", operation_id="stream_agents", oauth2_scopes=[], security_source=get_security_from_env( @@ -628,7 +658,7 @@ async def stream_async( retry_config=retry_config, ) - data: Any = None + response_data: Any = None if utils.match_response(http_res, "200", "text/event-stream"): return eventstreaming.EventStreamAsync( http_res, @@ -637,8 +667,10 @@ async def stream_async( ) if utils.match_response(http_res, "422", "application/json"): http_res_text = await utils.stream_to_text_async(http_res) - data = utils.unmarshal_json(http_res_text, models.HTTPValidationErrorData) - raise models.HTTPValidationError(data=data) + response_data = utils.unmarshal_json( + http_res_text, models.HTTPValidationErrorData + ) + raise models.HTTPValidationError(data=response_data) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( diff --git a/src/mistralai/basesdk.py b/src/mistralai/basesdk.py index cda8adda..512e3072 100644 --- a/src/mistralai/basesdk.py +++ b/src/mistralai/basesdk.py @@ -231,6 +231,10 @@ def do(): req.headers, get_body_content(req), ) + + if client is None: + raise ValueError("client is required") + http_res = client.send(req, stream=stream) except Exception as e: _, e = self.sdk_configuration.get_hooks().after_error( @@ -303,6 +307,10 @@ async def do(): req.headers, get_body_content(req), ) + + if client is None: + raise ValueError("client is required") + http_res = await client.send(req, stream=stream) except Exception as e: _, e = self.sdk_configuration.get_hooks().after_error( diff --git a/src/mistralai/chat.py b/src/mistralai/chat.py index 67646ffe..0e7294f9 100644 --- a/src/mistralai/chat.py +++ b/src/mistralai/chat.py @@ -122,6 +122,7 @@ def complete( prediction: Optional[ Union[models.Prediction, models.PredictionTypedDict] ] = None, + parallel_tool_calls: Optional[bool] = None, safe_prompt: Optional[bool] = None, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, @@ -145,6 +146,7 @@ def complete( :param frequency_penalty: frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. :param n: Number of completions to return for each request, input tokens are only billed once. :param prediction: + :param parallel_tool_calls: :param safe_prompt: Whether to inject a safety prompt before all conversations. :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method @@ -158,6 +160,8 @@ def complete( if server_url is not None: base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) request = models.ChatCompletionRequest( model=model, @@ -181,6 +185,7 @@ def complete( prediction=utils.get_pydantic_model( prediction, Optional[models.Prediction] ), + parallel_tool_calls=parallel_tool_calls, safe_prompt=safe_prompt, ) @@ -213,6 +218,7 @@ def complete( http_res = self.do_request( hook_ctx=HookContext( + base_url=base_url or "", operation_id="chat_completion_v1_chat_completions_post", oauth2_scopes=[], security_source=get_security_from_env( @@ -224,12 +230,14 @@ def complete( retry_config=retry_config, ) - data: Any = None + response_data: Any = None if utils.match_response(http_res, "200", "application/json"): return utils.unmarshal_json(http_res.text, models.ChatCompletionResponse) if utils.match_response(http_res, "422", "application/json"): - data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) - raise models.HTTPValidationError(data=data) + response_data = utils.unmarshal_json( + http_res.text, models.HTTPValidationErrorData + ) + raise models.HTTPValidationError(data=response_data) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) raise models.SDKError( @@ -279,6 +287,7 @@ async def complete_async( prediction: Optional[ Union[models.Prediction, models.PredictionTypedDict] ] = None, + parallel_tool_calls: Optional[bool] = None, safe_prompt: Optional[bool] = None, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, @@ -302,6 +311,7 @@ async def complete_async( :param frequency_penalty: frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. :param n: Number of completions to return for each request, input tokens are only billed once. :param prediction: + :param parallel_tool_calls: :param safe_prompt: Whether to inject a safety prompt before all conversations. :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method @@ -315,6 +325,8 @@ async def complete_async( if server_url is not None: base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) request = models.ChatCompletionRequest( model=model, @@ -338,6 +350,7 @@ async def complete_async( prediction=utils.get_pydantic_model( prediction, Optional[models.Prediction] ), + parallel_tool_calls=parallel_tool_calls, safe_prompt=safe_prompt, ) @@ -370,6 +383,7 @@ async def complete_async( http_res = await self.do_request_async( hook_ctx=HookContext( + base_url=base_url or "", operation_id="chat_completion_v1_chat_completions_post", oauth2_scopes=[], security_source=get_security_from_env( @@ -381,12 +395,14 @@ async def complete_async( retry_config=retry_config, ) - data: Any = None + response_data: Any = None if utils.match_response(http_res, "200", "application/json"): return utils.unmarshal_json(http_res.text, models.ChatCompletionResponse) if utils.match_response(http_res, "422", "application/json"): - data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) - raise models.HTTPValidationError(data=data) + response_data = utils.unmarshal_json( + http_res.text, models.HTTPValidationErrorData + ) + raise models.HTTPValidationError(data=response_data) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( @@ -444,6 +460,7 @@ def stream( prediction: Optional[ Union[models.Prediction, models.PredictionTypedDict] ] = None, + parallel_tool_calls: Optional[bool] = None, safe_prompt: Optional[bool] = None, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, @@ -469,6 +486,7 @@ def stream( :param frequency_penalty: frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. :param n: Number of completions to return for each request, input tokens are only billed once. :param prediction: + :param parallel_tool_calls: :param safe_prompt: Whether to inject a safety prompt before all conversations. :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method @@ -482,6 +500,8 @@ def stream( if server_url is not None: base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) request = models.ChatCompletionStreamRequest( model=model, @@ -507,6 +527,7 @@ def stream( prediction=utils.get_pydantic_model( prediction, Optional[models.Prediction] ), + parallel_tool_calls=parallel_tool_calls, safe_prompt=safe_prompt, ) @@ -539,6 +560,7 @@ def stream( http_res = self.do_request( hook_ctx=HookContext( + base_url=base_url or "", operation_id="stream_chat", oauth2_scopes=[], security_source=get_security_from_env( @@ -551,7 +573,7 @@ def stream( retry_config=retry_config, ) - data: Any = None + response_data: Any = None if utils.match_response(http_res, "200", "text/event-stream"): return eventstreaming.EventStream( http_res, @@ -560,8 +582,10 @@ def stream( ) if utils.match_response(http_res, "422", "application/json"): http_res_text = utils.stream_to_text(http_res) - data = utils.unmarshal_json(http_res_text, models.HTTPValidationErrorData) - raise models.HTTPValidationError(data=data) + response_data = utils.unmarshal_json( + http_res_text, models.HTTPValidationErrorData + ) + raise models.HTTPValidationError(data=response_data) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) raise models.SDKError( @@ -619,6 +643,7 @@ async def stream_async( prediction: Optional[ Union[models.Prediction, models.PredictionTypedDict] ] = None, + parallel_tool_calls: Optional[bool] = None, safe_prompt: Optional[bool] = None, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, @@ -644,6 +669,7 @@ async def stream_async( :param frequency_penalty: frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. :param n: Number of completions to return for each request, input tokens are only billed once. :param prediction: + :param parallel_tool_calls: :param safe_prompt: Whether to inject a safety prompt before all conversations. :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method @@ -657,6 +683,8 @@ async def stream_async( if server_url is not None: base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) request = models.ChatCompletionStreamRequest( model=model, @@ -682,6 +710,7 @@ async def stream_async( prediction=utils.get_pydantic_model( prediction, Optional[models.Prediction] ), + parallel_tool_calls=parallel_tool_calls, safe_prompt=safe_prompt, ) @@ -714,6 +743,7 @@ async def stream_async( http_res = await self.do_request_async( hook_ctx=HookContext( + base_url=base_url or "", operation_id="stream_chat", oauth2_scopes=[], security_source=get_security_from_env( @@ -726,7 +756,7 @@ async def stream_async( retry_config=retry_config, ) - data: Any = None + response_data: Any = None if utils.match_response(http_res, "200", "text/event-stream"): return eventstreaming.EventStreamAsync( http_res, @@ -735,8 +765,10 @@ async def stream_async( ) if utils.match_response(http_res, "422", "application/json"): http_res_text = await utils.stream_to_text_async(http_res) - data = utils.unmarshal_json(http_res_text, models.HTTPValidationErrorData) - raise models.HTTPValidationError(data=data) + response_data = utils.unmarshal_json( + http_res_text, models.HTTPValidationErrorData + ) + raise models.HTTPValidationError(data=response_data) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( diff --git a/src/mistralai/classifiers.py b/src/mistralai/classifiers.py index 6ff1d6a8..d85961f3 100644 --- a/src/mistralai/classifiers.py +++ b/src/mistralai/classifiers.py @@ -40,6 +40,8 @@ def moderate( if server_url is not None: base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) request = models.ClassificationRequest( model=model, @@ -75,6 +77,7 @@ def moderate( http_res = self.do_request( hook_ctx=HookContext( + base_url=base_url or "", operation_id="moderations_v1_moderations_post", oauth2_scopes=[], security_source=get_security_from_env( @@ -86,12 +89,14 @@ def moderate( retry_config=retry_config, ) - data: Any = None + response_data: Any = None if utils.match_response(http_res, "200", "application/json"): return utils.unmarshal_json(http_res.text, models.ClassificationResponse) if utils.match_response(http_res, "422", "application/json"): - data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) - raise models.HTTPValidationError(data=data) + response_data = utils.unmarshal_json( + http_res.text, models.HTTPValidationErrorData + ) + raise models.HTTPValidationError(data=response_data) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) raise models.SDKError( @@ -141,6 +146,8 @@ async def moderate_async( if server_url is not None: base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) request = models.ClassificationRequest( model=model, @@ -176,6 +183,7 @@ async def moderate_async( http_res = await self.do_request_async( hook_ctx=HookContext( + base_url=base_url or "", operation_id="moderations_v1_moderations_post", oauth2_scopes=[], security_source=get_security_from_env( @@ -187,12 +195,14 @@ async def moderate_async( retry_config=retry_config, ) - data: Any = None + response_data: Any = None if utils.match_response(http_res, "200", "application/json"): return utils.unmarshal_json(http_res.text, models.ClassificationResponse) if utils.match_response(http_res, "422", "application/json"): - data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) - raise models.HTTPValidationError(data=data) + response_data = utils.unmarshal_json( + http_res.text, models.HTTPValidationErrorData + ) + raise models.HTTPValidationError(data=response_data) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( @@ -227,7 +237,7 @@ def moderate_chat( timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, ) -> models.ClassificationResponse: - r"""Moderations Chat + r"""Chat Moderations :param model: :param inputs: Chat to classify @@ -244,6 +254,8 @@ def moderate_chat( if server_url is not None: base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) request = models.ChatModerationRequest( model=model, @@ -280,7 +292,8 @@ def moderate_chat( http_res = self.do_request( hook_ctx=HookContext( - operation_id="moderations_chat_v1_chat_moderations_post", + base_url=base_url or "", + operation_id="chat_moderations_v1_chat_moderations_post", oauth2_scopes=[], security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -291,12 +304,14 @@ def moderate_chat( retry_config=retry_config, ) - data: Any = None + response_data: Any = None if utils.match_response(http_res, "200", "application/json"): return utils.unmarshal_json(http_res.text, models.ClassificationResponse) if utils.match_response(http_res, "422", "application/json"): - data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) - raise models.HTTPValidationError(data=data) + response_data = utils.unmarshal_json( + http_res.text, models.HTTPValidationErrorData + ) + raise models.HTTPValidationError(data=response_data) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) raise models.SDKError( @@ -331,7 +346,7 @@ async def moderate_chat_async( timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, ) -> models.ClassificationResponse: - r"""Moderations Chat + r"""Chat Moderations :param model: :param inputs: Chat to classify @@ -348,6 +363,8 @@ async def moderate_chat_async( if server_url is not None: base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) request = models.ChatModerationRequest( model=model, @@ -384,7 +401,8 @@ async def moderate_chat_async( http_res = await self.do_request_async( hook_ctx=HookContext( - operation_id="moderations_chat_v1_chat_moderations_post", + base_url=base_url or "", + operation_id="chat_moderations_v1_chat_moderations_post", oauth2_scopes=[], security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -395,12 +413,14 @@ async def moderate_chat_async( retry_config=retry_config, ) - data: Any = None + response_data: Any = None if utils.match_response(http_res, "200", "application/json"): return utils.unmarshal_json(http_res.text, models.ClassificationResponse) if utils.match_response(http_res, "422", "application/json"): - data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) - raise models.HTTPValidationError(data=data) + response_data = utils.unmarshal_json( + http_res.text, models.HTTPValidationErrorData + ) + raise models.HTTPValidationError(data=response_data) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( diff --git a/src/mistralai/embeddings.py b/src/mistralai/embeddings.py index 09101c4f..b99ff0cf 100644 --- a/src/mistralai/embeddings.py +++ b/src/mistralai/embeddings.py @@ -39,6 +39,8 @@ def create( if server_url is not None: base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) request = models.EmbeddingRequest( model=model, @@ -74,6 +76,7 @@ def create( http_res = self.do_request( hook_ctx=HookContext( + base_url=base_url or "", operation_id="embeddings_v1_embeddings_post", oauth2_scopes=[], security_source=get_security_from_env( @@ -85,12 +88,14 @@ def create( retry_config=retry_config, ) - data: Any = None + response_data: Any = None if utils.match_response(http_res, "200", "application/json"): return utils.unmarshal_json(http_res.text, models.EmbeddingResponse) if utils.match_response(http_res, "422", "application/json"): - data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) - raise models.HTTPValidationError(data=data) + response_data = utils.unmarshal_json( + http_res.text, models.HTTPValidationErrorData + ) + raise models.HTTPValidationError(data=response_data) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) raise models.SDKError( @@ -139,6 +144,8 @@ async def create_async( if server_url is not None: base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) request = models.EmbeddingRequest( model=model, @@ -174,6 +181,7 @@ async def create_async( http_res = await self.do_request_async( hook_ctx=HookContext( + base_url=base_url or "", operation_id="embeddings_v1_embeddings_post", oauth2_scopes=[], security_source=get_security_from_env( @@ -185,12 +193,14 @@ async def create_async( retry_config=retry_config, ) - data: Any = None + response_data: Any = None if utils.match_response(http_res, "200", "application/json"): return utils.unmarshal_json(http_res.text, models.EmbeddingResponse) if utils.match_response(http_res, "422", "application/json"): - data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) - raise models.HTTPValidationError(data=data) + response_data = utils.unmarshal_json( + http_res.text, models.HTTPValidationErrorData + ) + raise models.HTTPValidationError(data=response_data) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( diff --git a/src/mistralai/files.py b/src/mistralai/files.py index 042e4aea..0ffc4857 100644 --- a/src/mistralai/files.py +++ b/src/mistralai/files.py @@ -44,6 +44,8 @@ def upload( if server_url is not None: base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) request = models.FilesAPIRoutesUploadFileMultiPartBodyParams( file=utils.get_pydantic_model(file, models.File), @@ -83,6 +85,7 @@ def upload( http_res = self.do_request( hook_ctx=HookContext( + base_url=base_url or "", operation_id="files_api_routes_upload_file", oauth2_scopes=[], security_source=get_security_from_env( @@ -148,6 +151,8 @@ async def upload_async( if server_url is not None: base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) request = models.FilesAPIRoutesUploadFileMultiPartBodyParams( file=utils.get_pydantic_model(file, models.File), @@ -187,6 +192,7 @@ async def upload_async( http_res = await self.do_request_async( hook_ctx=HookContext( + base_url=base_url or "", operation_id="files_api_routes_upload_file", oauth2_scopes=[], security_source=get_security_from_env( @@ -256,6 +262,8 @@ def list( if server_url is not None: base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) request = models.FilesAPIRoutesListFilesRequest( page=page, @@ -292,6 +300,7 @@ def list( http_res = self.do_request( hook_ctx=HookContext( + base_url=base_url or "", operation_id="files_api_routes_list_files", oauth2_scopes=[], security_source=get_security_from_env( @@ -361,6 +370,8 @@ async def list_async( if server_url is not None: base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) request = models.FilesAPIRoutesListFilesRequest( page=page, @@ -397,6 +408,7 @@ async def list_async( http_res = await self.do_request_async( hook_ctx=HookContext( + base_url=base_url or "", operation_id="files_api_routes_list_files", oauth2_scopes=[], security_source=get_security_from_env( @@ -456,6 +468,8 @@ def retrieve( if server_url is not None: base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) request = models.FilesAPIRoutesRetrieveFileRequest( file_id=file_id, @@ -487,6 +501,7 @@ def retrieve( http_res = self.do_request( hook_ctx=HookContext( + base_url=base_url or "", operation_id="files_api_routes_retrieve_file", oauth2_scopes=[], security_source=get_security_from_env( @@ -546,6 +561,8 @@ async def retrieve_async( if server_url is not None: base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) request = models.FilesAPIRoutesRetrieveFileRequest( file_id=file_id, @@ -577,6 +594,7 @@ async def retrieve_async( http_res = await self.do_request_async( hook_ctx=HookContext( + base_url=base_url or "", operation_id="files_api_routes_retrieve_file", oauth2_scopes=[], security_source=get_security_from_env( @@ -636,6 +654,8 @@ def delete( if server_url is not None: base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) request = models.FilesAPIRoutesDeleteFileRequest( file_id=file_id, @@ -667,6 +687,7 @@ def delete( http_res = self.do_request( hook_ctx=HookContext( + base_url=base_url or "", operation_id="files_api_routes_delete_file", oauth2_scopes=[], security_source=get_security_from_env( @@ -726,6 +747,8 @@ async def delete_async( if server_url is not None: base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) request = models.FilesAPIRoutesDeleteFileRequest( file_id=file_id, @@ -757,6 +780,7 @@ async def delete_async( http_res = await self.do_request_async( hook_ctx=HookContext( + base_url=base_url or "", operation_id="files_api_routes_delete_file", oauth2_scopes=[], security_source=get_security_from_env( @@ -816,6 +840,8 @@ def download( if server_url is not None: base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) request = models.FilesAPIRoutesDownloadFileRequest( file_id=file_id, @@ -847,6 +873,7 @@ def download( http_res = self.do_request( hook_ctx=HookContext( + base_url=base_url or "", operation_id="files_api_routes_download_file", oauth2_scopes=[], security_source=get_security_from_env( @@ -907,6 +934,8 @@ async def download_async( if server_url is not None: base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) request = models.FilesAPIRoutesDownloadFileRequest( file_id=file_id, @@ -938,6 +967,7 @@ async def download_async( http_res = await self.do_request_async( hook_ctx=HookContext( + base_url=base_url or "", operation_id="files_api_routes_download_file", oauth2_scopes=[], security_source=get_security_from_env( @@ -998,6 +1028,8 @@ def get_signed_url( if server_url is not None: base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) request = models.FilesAPIRoutesGetSignedURLRequest( file_id=file_id, @@ -1030,6 +1062,7 @@ def get_signed_url( http_res = self.do_request( hook_ctx=HookContext( + base_url=base_url or "", operation_id="files_api_routes_get_signed_url", oauth2_scopes=[], security_source=get_security_from_env( @@ -1089,6 +1122,8 @@ async def get_signed_url_async( if server_url is not None: base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) request = models.FilesAPIRoutesGetSignedURLRequest( file_id=file_id, @@ -1121,6 +1156,7 @@ async def get_signed_url_async( http_res = await self.do_request_async( hook_ctx=HookContext( + base_url=base_url or "", operation_id="files_api_routes_get_signed_url", oauth2_scopes=[], security_source=get_security_from_env( diff --git a/src/mistralai/fim.py b/src/mistralai/fim.py index c11f6c99..032c722f 100644 --- a/src/mistralai/fim.py +++ b/src/mistralai/fim.py @@ -60,6 +60,8 @@ def complete( if server_url is not None: base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) request = models.FIMCompletionRequest( model=model, @@ -103,6 +105,7 @@ def complete( http_res = self.do_request( hook_ctx=HookContext( + base_url=base_url or "", operation_id="fim_completion_v1_fim_completions_post", oauth2_scopes=[], security_source=get_security_from_env( @@ -114,12 +117,14 @@ def complete( retry_config=retry_config, ) - data: Any = None + response_data: Any = None if utils.match_response(http_res, "200", "application/json"): return utils.unmarshal_json(http_res.text, models.FIMCompletionResponse) if utils.match_response(http_res, "422", "application/json"): - data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) - raise models.HTTPValidationError(data=data) + response_data = utils.unmarshal_json( + http_res.text, models.HTTPValidationErrorData + ) + raise models.HTTPValidationError(data=response_data) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) raise models.SDKError( @@ -189,6 +194,8 @@ async def complete_async( if server_url is not None: base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) request = models.FIMCompletionRequest( model=model, @@ -232,6 +239,7 @@ async def complete_async( http_res = await self.do_request_async( hook_ctx=HookContext( + base_url=base_url or "", operation_id="fim_completion_v1_fim_completions_post", oauth2_scopes=[], security_source=get_security_from_env( @@ -243,12 +251,14 @@ async def complete_async( retry_config=retry_config, ) - data: Any = None + response_data: Any = None if utils.match_response(http_res, "200", "application/json"): return utils.unmarshal_json(http_res.text, models.FIMCompletionResponse) if utils.match_response(http_res, "422", "application/json"): - data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) - raise models.HTTPValidationError(data=data) + response_data = utils.unmarshal_json( + http_res.text, models.HTTPValidationErrorData + ) + raise models.HTTPValidationError(data=response_data) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( @@ -318,6 +328,8 @@ def stream( if server_url is not None: base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) request = models.FIMCompletionStreamRequest( model=model, @@ -361,6 +373,7 @@ def stream( http_res = self.do_request( hook_ctx=HookContext( + base_url=base_url or "", operation_id="stream_fim", oauth2_scopes=[], security_source=get_security_from_env( @@ -373,7 +386,7 @@ def stream( retry_config=retry_config, ) - data: Any = None + response_data: Any = None if utils.match_response(http_res, "200", "text/event-stream"): return eventstreaming.EventStream( http_res, @@ -382,8 +395,10 @@ def stream( ) if utils.match_response(http_res, "422", "application/json"): http_res_text = utils.stream_to_text(http_res) - data = utils.unmarshal_json(http_res_text, models.HTTPValidationErrorData) - raise models.HTTPValidationError(data=data) + response_data = utils.unmarshal_json( + http_res_text, models.HTTPValidationErrorData + ) + raise models.HTTPValidationError(data=response_data) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) raise models.SDKError( @@ -453,6 +468,8 @@ async def stream_async( if server_url is not None: base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) request = models.FIMCompletionStreamRequest( model=model, @@ -496,6 +513,7 @@ async def stream_async( http_res = await self.do_request_async( hook_ctx=HookContext( + base_url=base_url or "", operation_id="stream_fim", oauth2_scopes=[], security_source=get_security_from_env( @@ -508,7 +526,7 @@ async def stream_async( retry_config=retry_config, ) - data: Any = None + response_data: Any = None if utils.match_response(http_res, "200", "text/event-stream"): return eventstreaming.EventStreamAsync( http_res, @@ -517,8 +535,10 @@ async def stream_async( ) if utils.match_response(http_res, "422", "application/json"): http_res_text = await utils.stream_to_text_async(http_res) - data = utils.unmarshal_json(http_res_text, models.HTTPValidationErrorData) - raise models.HTTPValidationError(data=data) + response_data = utils.unmarshal_json( + http_res_text, models.HTTPValidationErrorData + ) + raise models.HTTPValidationError(data=response_data) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( diff --git a/src/mistralai/httpclient.py b/src/mistralai/httpclient.py index 9dc43cb0..1e426352 100644 --- a/src/mistralai/httpclient.py +++ b/src/mistralai/httpclient.py @@ -94,7 +94,9 @@ class ClientOwner(Protocol): def close_clients( owner: ClientOwner, sync_client: Union[HttpClient, None], + sync_client_supplied: bool, async_client: Union[AsyncHttpClient, None], + async_client_supplied: bool, ) -> None: """ A finalizer function that is meant to be used with weakref.finalize to close @@ -107,13 +109,13 @@ def close_clients( owner.client = None owner.async_client = None - if sync_client is not None: + if sync_client is not None and not sync_client_supplied: try: sync_client.close() except Exception: pass - if async_client is not None: + if async_client is not None and not async_client_supplied: is_async = False try: asyncio.get_running_loop() diff --git a/src/mistralai/jobs.py b/src/mistralai/jobs.py index ea66bfc6..675ece0b 100644 --- a/src/mistralai/jobs.py +++ b/src/mistralai/jobs.py @@ -52,6 +52,8 @@ def list( if server_url is not None: base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) request = models.JobsAPIRoutesFineTuningGetFineTuningJobsRequest( page=page, @@ -91,6 +93,7 @@ def list( http_res = self.do_request( hook_ctx=HookContext( + base_url=base_url or "", operation_id="jobs_api_routes_fine_tuning_get_fine_tuning_jobs", oauth2_scopes=[], security_source=get_security_from_env( @@ -166,6 +169,8 @@ async def list_async( if server_url is not None: base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) request = models.JobsAPIRoutesFineTuningGetFineTuningJobsRequest( page=page, @@ -205,6 +210,7 @@ async def list_async( http_res = await self.do_request_async( hook_ctx=HookContext( + base_url=base_url or "", operation_id="jobs_api_routes_fine_tuning_get_fine_tuning_jobs", oauth2_scopes=[], security_source=get_security_from_env( @@ -290,6 +296,8 @@ def create( if server_url is not None: base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) request = models.JobIn( model=model, @@ -339,6 +347,7 @@ def create( http_res = self.do_request( hook_ctx=HookContext( + base_url=base_url or "", operation_id="jobs_api_routes_fine_tuning_create_fine_tuning_job", oauth2_scopes=[], security_source=get_security_from_env( @@ -426,6 +435,8 @@ async def create_async( if server_url is not None: base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) request = models.JobIn( model=model, @@ -475,6 +486,7 @@ async def create_async( http_res = await self.do_request_async( hook_ctx=HookContext( + base_url=base_url or "", operation_id="jobs_api_routes_fine_tuning_create_fine_tuning_job", oauth2_scopes=[], security_source=get_security_from_env( @@ -536,6 +548,8 @@ def get( if server_url is not None: base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) request = models.JobsAPIRoutesFineTuningGetFineTuningJobRequest( job_id=job_id, @@ -567,6 +581,7 @@ def get( http_res = self.do_request( hook_ctx=HookContext( + base_url=base_url or "", operation_id="jobs_api_routes_fine_tuning_get_fine_tuning_job", oauth2_scopes=[], security_source=get_security_from_env( @@ -626,6 +641,8 @@ async def get_async( if server_url is not None: base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) request = models.JobsAPIRoutesFineTuningGetFineTuningJobRequest( job_id=job_id, @@ -657,6 +674,7 @@ async def get_async( http_res = await self.do_request_async( hook_ctx=HookContext( + base_url=base_url or "", operation_id="jobs_api_routes_fine_tuning_get_fine_tuning_job", oauth2_scopes=[], security_source=get_security_from_env( @@ -716,6 +734,8 @@ def cancel( if server_url is not None: base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) request = models.JobsAPIRoutesFineTuningCancelFineTuningJobRequest( job_id=job_id, @@ -747,6 +767,7 @@ def cancel( http_res = self.do_request( hook_ctx=HookContext( + base_url=base_url or "", operation_id="jobs_api_routes_fine_tuning_cancel_fine_tuning_job", oauth2_scopes=[], security_source=get_security_from_env( @@ -806,6 +827,8 @@ async def cancel_async( if server_url is not None: base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) request = models.JobsAPIRoutesFineTuningCancelFineTuningJobRequest( job_id=job_id, @@ -837,6 +860,7 @@ async def cancel_async( http_res = await self.do_request_async( hook_ctx=HookContext( + base_url=base_url or "", operation_id="jobs_api_routes_fine_tuning_cancel_fine_tuning_job", oauth2_scopes=[], security_source=get_security_from_env( @@ -896,6 +920,8 @@ def start( if server_url is not None: base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) request = models.JobsAPIRoutesFineTuningStartFineTuningJobRequest( job_id=job_id, @@ -927,6 +953,7 @@ def start( http_res = self.do_request( hook_ctx=HookContext( + base_url=base_url or "", operation_id="jobs_api_routes_fine_tuning_start_fine_tuning_job", oauth2_scopes=[], security_source=get_security_from_env( @@ -986,6 +1013,8 @@ async def start_async( if server_url is not None: base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) request = models.JobsAPIRoutesFineTuningStartFineTuningJobRequest( job_id=job_id, @@ -1017,6 +1046,7 @@ async def start_async( http_res = await self.do_request_async( hook_ctx=HookContext( + base_url=base_url or "", operation_id="jobs_api_routes_fine_tuning_start_fine_tuning_job", oauth2_scopes=[], security_source=get_security_from_env( diff --git a/src/mistralai/mistral_jobs.py b/src/mistralai/mistral_jobs.py index fe6b266a..e0d3c616 100644 --- a/src/mistralai/mistral_jobs.py +++ b/src/mistralai/mistral_jobs.py @@ -48,6 +48,8 @@ def list( if server_url is not None: base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) request = models.JobsAPIRoutesBatchGetBatchJobsRequest( page=page, @@ -85,6 +87,7 @@ def list( http_res = self.do_request( hook_ctx=HookContext( + base_url=base_url or "", operation_id="jobs_api_routes_batch_get_batch_jobs", oauth2_scopes=[], security_source=get_security_from_env( @@ -156,6 +159,8 @@ async def list_async( if server_url is not None: base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) request = models.JobsAPIRoutesBatchGetBatchJobsRequest( page=page, @@ -193,6 +198,7 @@ async def list_async( http_res = await self.do_request_async( hook_ctx=HookContext( + base_url=base_url or "", operation_id="jobs_api_routes_batch_get_batch_jobs", oauth2_scopes=[], security_source=get_security_from_env( @@ -260,6 +266,8 @@ def create( if server_url is not None: base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) request = models.BatchJobIn( input_files=input_files, @@ -298,6 +306,7 @@ def create( http_res = self.do_request( hook_ctx=HookContext( + base_url=base_url or "", operation_id="jobs_api_routes_batch_create_batch_job", oauth2_scopes=[], security_source=get_security_from_env( @@ -365,6 +374,8 @@ async def create_async( if server_url is not None: base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) request = models.BatchJobIn( input_files=input_files, @@ -403,6 +414,7 @@ async def create_async( http_res = await self.do_request_async( hook_ctx=HookContext( + base_url=base_url or "", operation_id="jobs_api_routes_batch_create_batch_job", oauth2_scopes=[], security_source=get_security_from_env( @@ -462,6 +474,8 @@ def get( if server_url is not None: base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) request = models.JobsAPIRoutesBatchGetBatchJobRequest( job_id=job_id, @@ -493,6 +507,7 @@ def get( http_res = self.do_request( hook_ctx=HookContext( + base_url=base_url or "", operation_id="jobs_api_routes_batch_get_batch_job", oauth2_scopes=[], security_source=get_security_from_env( @@ -552,6 +567,8 @@ async def get_async( if server_url is not None: base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) request = models.JobsAPIRoutesBatchGetBatchJobRequest( job_id=job_id, @@ -583,6 +600,7 @@ async def get_async( http_res = await self.do_request_async( hook_ctx=HookContext( + base_url=base_url or "", operation_id="jobs_api_routes_batch_get_batch_job", oauth2_scopes=[], security_source=get_security_from_env( @@ -642,6 +660,8 @@ def cancel( if server_url is not None: base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) request = models.JobsAPIRoutesBatchCancelBatchJobRequest( job_id=job_id, @@ -673,6 +693,7 @@ def cancel( http_res = self.do_request( hook_ctx=HookContext( + base_url=base_url or "", operation_id="jobs_api_routes_batch_cancel_batch_job", oauth2_scopes=[], security_source=get_security_from_env( @@ -732,6 +753,8 @@ async def cancel_async( if server_url is not None: base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) request = models.JobsAPIRoutesBatchCancelBatchJobRequest( job_id=job_id, @@ -763,6 +786,7 @@ async def cancel_async( http_res = await self.do_request_async( hook_ctx=HookContext( + base_url=base_url or "", operation_id="jobs_api_routes_batch_cancel_batch_job", oauth2_scopes=[], security_source=get_security_from_env( diff --git a/src/mistralai/models/agentscompletionrequest.py b/src/mistralai/models/agentscompletionrequest.py index 7d806835..cd81393a 100644 --- a/src/mistralai/models/agentscompletionrequest.py +++ b/src/mistralai/models/agentscompletionrequest.py @@ -85,6 +85,7 @@ class AgentsCompletionRequestTypedDict(TypedDict): n: NotRequired[Nullable[int]] r"""Number of completions to return for each request, input tokens are only billed once.""" prediction: NotRequired[PredictionTypedDict] + parallel_tool_calls: NotRequired[bool] class AgentsCompletionRequest(BaseModel): @@ -123,6 +124,8 @@ class AgentsCompletionRequest(BaseModel): prediction: Optional[Prediction] = None + parallel_tool_calls: Optional[bool] = None + @model_serializer(mode="wrap") def serialize_model(self, handler): optional_fields = [ @@ -137,6 +140,7 @@ def serialize_model(self, handler): "frequency_penalty", "n", "prediction", + "parallel_tool_calls", ] nullable_fields = ["max_tokens", "random_seed", "tools", "n"] null_default_fields = [] diff --git a/src/mistralai/models/agentscompletionstreamrequest.py b/src/mistralai/models/agentscompletionstreamrequest.py index 0eac55a5..ab6a307a 100644 --- a/src/mistralai/models/agentscompletionstreamrequest.py +++ b/src/mistralai/models/agentscompletionstreamrequest.py @@ -84,6 +84,7 @@ class AgentsCompletionStreamRequestTypedDict(TypedDict): n: NotRequired[Nullable[int]] r"""Number of completions to return for each request, input tokens are only billed once.""" prediction: NotRequired[PredictionTypedDict] + parallel_tool_calls: NotRequired[bool] class AgentsCompletionStreamRequest(BaseModel): @@ -121,6 +122,8 @@ class AgentsCompletionStreamRequest(BaseModel): prediction: Optional[Prediction] = None + parallel_tool_calls: Optional[bool] = None + @model_serializer(mode="wrap") def serialize_model(self, handler): optional_fields = [ @@ -135,6 +138,7 @@ def serialize_model(self, handler): "frequency_penalty", "n", "prediction", + "parallel_tool_calls", ] nullable_fields = ["max_tokens", "random_seed", "tools", "n"] null_default_fields = [] diff --git a/src/mistralai/models/chatcompletionrequest.py b/src/mistralai/models/chatcompletionrequest.py index eaed8435..a277db8f 100644 --- a/src/mistralai/models/chatcompletionrequest.py +++ b/src/mistralai/models/chatcompletionrequest.py @@ -85,6 +85,7 @@ class ChatCompletionRequestTypedDict(TypedDict): n: NotRequired[Nullable[int]] r"""Number of completions to return for each request, input tokens are only billed once.""" prediction: NotRequired[PredictionTypedDict] + parallel_tool_calls: NotRequired[bool] safe_prompt: NotRequired[bool] r"""Whether to inject a safety prompt before all conversations.""" @@ -131,6 +132,8 @@ class ChatCompletionRequest(BaseModel): prediction: Optional[Prediction] = None + parallel_tool_calls: Optional[bool] = None + safe_prompt: Optional[bool] = None r"""Whether to inject a safety prompt before all conversations.""" @@ -150,6 +153,7 @@ def serialize_model(self, handler): "frequency_penalty", "n", "prediction", + "parallel_tool_calls", "safe_prompt", ] nullable_fields = ["temperature", "max_tokens", "random_seed", "tools", "n"] diff --git a/src/mistralai/models/chatcompletionstreamrequest.py b/src/mistralai/models/chatcompletionstreamrequest.py index 4f593c01..9ed7b3f6 100644 --- a/src/mistralai/models/chatcompletionstreamrequest.py +++ b/src/mistralai/models/chatcompletionstreamrequest.py @@ -88,6 +88,7 @@ class ChatCompletionStreamRequestTypedDict(TypedDict): n: NotRequired[Nullable[int]] r"""Number of completions to return for each request, input tokens are only billed once.""" prediction: NotRequired[PredictionTypedDict] + parallel_tool_calls: NotRequired[bool] safe_prompt: NotRequired[bool] r"""Whether to inject a safety prompt before all conversations.""" @@ -133,6 +134,8 @@ class ChatCompletionStreamRequest(BaseModel): prediction: Optional[Prediction] = None + parallel_tool_calls: Optional[bool] = None + safe_prompt: Optional[bool] = None r"""Whether to inject a safety prompt before all conversations.""" @@ -152,6 +155,7 @@ def serialize_model(self, handler): "frequency_penalty", "n", "prediction", + "parallel_tool_calls", "safe_prompt", ] nullable_fields = ["temperature", "max_tokens", "random_seed", "tools", "n"] diff --git a/src/mistralai/models/function.py b/src/mistralai/models/function.py index 2430fa4f..7d40cf75 100644 --- a/src/mistralai/models/function.py +++ b/src/mistralai/models/function.py @@ -18,6 +18,6 @@ class Function(BaseModel): parameters: Dict[str, Any] - description: Optional[str] = "" + description: Optional[str] = None - strict: Optional[bool] = False + strict: Optional[bool] = None diff --git a/src/mistralai/models/jsonschema.py b/src/mistralai/models/jsonschema.py index 76e40330..5d96d1fd 100644 --- a/src/mistralai/models/jsonschema.py +++ b/src/mistralai/models/jsonschema.py @@ -22,7 +22,7 @@ class JSONSchema(BaseModel): description: OptionalNullable[str] = UNSET - strict: Optional[bool] = False + strict: Optional[bool] = None @model_serializer(mode="wrap") def serialize_model(self, handler): diff --git a/src/mistralai/models_.py b/src/mistralai/models_.py index ec45eb36..0b04694d 100644 --- a/src/mistralai/models_.py +++ b/src/mistralai/models_.py @@ -35,6 +35,8 @@ def list( if server_url is not None: base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) req = self._build_request( method="GET", path="/v1/models", @@ -61,6 +63,7 @@ def list( http_res = self.do_request( hook_ctx=HookContext( + base_url=base_url or "", operation_id="list_models_v1_models_get", oauth2_scopes=[], security_source=get_security_from_env( @@ -72,12 +75,14 @@ def list( retry_config=retry_config, ) - data: Any = None + response_data: Any = None if utils.match_response(http_res, "200", "application/json"): return utils.unmarshal_json(http_res.text, models.ModelList) if utils.match_response(http_res, "422", "application/json"): - data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) - raise models.HTTPValidationError(data=data) + response_data = utils.unmarshal_json( + http_res.text, models.HTTPValidationErrorData + ) + raise models.HTTPValidationError(data=response_data) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) raise models.SDKError( @@ -122,6 +127,8 @@ async def list_async( if server_url is not None: base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) req = self._build_request_async( method="GET", path="/v1/models", @@ -148,6 +155,7 @@ async def list_async( http_res = await self.do_request_async( hook_ctx=HookContext( + base_url=base_url or "", operation_id="list_models_v1_models_get", oauth2_scopes=[], security_source=get_security_from_env( @@ -159,12 +167,14 @@ async def list_async( retry_config=retry_config, ) - data: Any = None + response_data: Any = None if utils.match_response(http_res, "200", "application/json"): return utils.unmarshal_json(http_res.text, models.ModelList) if utils.match_response(http_res, "422", "application/json"): - data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) - raise models.HTTPValidationError(data=data) + response_data = utils.unmarshal_json( + http_res.text, models.HTTPValidationErrorData + ) + raise models.HTTPValidationError(data=response_data) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( @@ -211,6 +221,8 @@ def retrieve( if server_url is not None: base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) request = models.RetrieveModelV1ModelsModelIDGetRequest( model_id=model_id, @@ -242,6 +254,7 @@ def retrieve( http_res = self.do_request( hook_ctx=HookContext( + base_url=base_url or "", operation_id="retrieve_model_v1_models__model_id__get", oauth2_scopes=[], security_source=get_security_from_env( @@ -253,15 +266,17 @@ def retrieve( retry_config=retry_config, ) - data: Any = None + response_data: Any = None if utils.match_response(http_res, "200", "application/json"): return utils.unmarshal_json( http_res.text, models.RetrieveModelV1ModelsModelIDGetResponseRetrieveModelV1ModelsModelIDGet, ) if utils.match_response(http_res, "422", "application/json"): - data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) - raise models.HTTPValidationError(data=data) + response_data = utils.unmarshal_json( + http_res.text, models.HTTPValidationErrorData + ) + raise models.HTTPValidationError(data=response_data) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) raise models.SDKError( @@ -308,6 +323,8 @@ async def retrieve_async( if server_url is not None: base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) request = models.RetrieveModelV1ModelsModelIDGetRequest( model_id=model_id, @@ -339,6 +356,7 @@ async def retrieve_async( http_res = await self.do_request_async( hook_ctx=HookContext( + base_url=base_url or "", operation_id="retrieve_model_v1_models__model_id__get", oauth2_scopes=[], security_source=get_security_from_env( @@ -350,15 +368,17 @@ async def retrieve_async( retry_config=retry_config, ) - data: Any = None + response_data: Any = None if utils.match_response(http_res, "200", "application/json"): return utils.unmarshal_json( http_res.text, models.RetrieveModelV1ModelsModelIDGetResponseRetrieveModelV1ModelsModelIDGet, ) if utils.match_response(http_res, "422", "application/json"): - data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) - raise models.HTTPValidationError(data=data) + response_data = utils.unmarshal_json( + http_res.text, models.HTTPValidationErrorData + ) + raise models.HTTPValidationError(data=response_data) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( @@ -405,6 +425,8 @@ def delete( if server_url is not None: base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) request = models.DeleteModelV1ModelsModelIDDeleteRequest( model_id=model_id, @@ -436,6 +458,7 @@ def delete( http_res = self.do_request( hook_ctx=HookContext( + base_url=base_url or "", operation_id="delete_model_v1_models__model_id__delete", oauth2_scopes=[], security_source=get_security_from_env( @@ -447,12 +470,14 @@ def delete( retry_config=retry_config, ) - data: Any = None + response_data: Any = None if utils.match_response(http_res, "200", "application/json"): return utils.unmarshal_json(http_res.text, models.DeleteModelOut) if utils.match_response(http_res, "422", "application/json"): - data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) - raise models.HTTPValidationError(data=data) + response_data = utils.unmarshal_json( + http_res.text, models.HTTPValidationErrorData + ) + raise models.HTTPValidationError(data=response_data) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) raise models.SDKError( @@ -499,6 +524,8 @@ async def delete_async( if server_url is not None: base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) request = models.DeleteModelV1ModelsModelIDDeleteRequest( model_id=model_id, @@ -530,6 +557,7 @@ async def delete_async( http_res = await self.do_request_async( hook_ctx=HookContext( + base_url=base_url or "", operation_id="delete_model_v1_models__model_id__delete", oauth2_scopes=[], security_source=get_security_from_env( @@ -541,12 +569,14 @@ async def delete_async( retry_config=retry_config, ) - data: Any = None + response_data: Any = None if utils.match_response(http_res, "200", "application/json"): return utils.unmarshal_json(http_res.text, models.DeleteModelOut) if utils.match_response(http_res, "422", "application/json"): - data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) - raise models.HTTPValidationError(data=data) + response_data = utils.unmarshal_json( + http_res.text, models.HTTPValidationErrorData + ) + raise models.HTTPValidationError(data=response_data) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( @@ -597,6 +627,8 @@ def update( if server_url is not None: base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) request = models.JobsAPIRoutesFineTuningUpdateFineTunedModelRequest( model_id=model_id, @@ -635,6 +667,7 @@ def update( http_res = self.do_request( hook_ctx=HookContext( + base_url=base_url or "", operation_id="jobs_api_routes_fine_tuning_update_fine_tuned_model", oauth2_scopes=[], security_source=get_security_from_env( @@ -698,6 +731,8 @@ async def update_async( if server_url is not None: base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) request = models.JobsAPIRoutesFineTuningUpdateFineTunedModelRequest( model_id=model_id, @@ -736,6 +771,7 @@ async def update_async( http_res = await self.do_request_async( hook_ctx=HookContext( + base_url=base_url or "", operation_id="jobs_api_routes_fine_tuning_update_fine_tuned_model", oauth2_scopes=[], security_source=get_security_from_env( @@ -795,6 +831,8 @@ def archive( if server_url is not None: base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) request = models.JobsAPIRoutesFineTuningArchiveFineTunedModelRequest( model_id=model_id, @@ -826,6 +864,7 @@ def archive( http_res = self.do_request( hook_ctx=HookContext( + base_url=base_url or "", operation_id="jobs_api_routes_fine_tuning_archive_fine_tuned_model", oauth2_scopes=[], security_source=get_security_from_env( @@ -885,6 +924,8 @@ async def archive_async( if server_url is not None: base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) request = models.JobsAPIRoutesFineTuningArchiveFineTunedModelRequest( model_id=model_id, @@ -916,6 +957,7 @@ async def archive_async( http_res = await self.do_request_async( hook_ctx=HookContext( + base_url=base_url or "", operation_id="jobs_api_routes_fine_tuning_archive_fine_tuned_model", oauth2_scopes=[], security_source=get_security_from_env( @@ -975,6 +1017,8 @@ def unarchive( if server_url is not None: base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) request = models.JobsAPIRoutesFineTuningUnarchiveFineTunedModelRequest( model_id=model_id, @@ -1006,6 +1050,7 @@ def unarchive( http_res = self.do_request( hook_ctx=HookContext( + base_url=base_url or "", operation_id="jobs_api_routes_fine_tuning_unarchive_fine_tuned_model", oauth2_scopes=[], security_source=get_security_from_env( @@ -1065,6 +1110,8 @@ async def unarchive_async( if server_url is not None: base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) request = models.JobsAPIRoutesFineTuningUnarchiveFineTunedModelRequest( model_id=model_id, @@ -1096,6 +1143,7 @@ async def unarchive_async( http_res = await self.do_request_async( hook_ctx=HookContext( + base_url=base_url or "", operation_id="jobs_api_routes_fine_tuning_unarchive_fine_tuned_model", oauth2_scopes=[], security_source=get_security_from_env( diff --git a/src/mistralai/ocr.py b/src/mistralai/ocr.py index 56c1da51..5d0e2414 100644 --- a/src/mistralai/ocr.py +++ b/src/mistralai/ocr.py @@ -47,6 +47,8 @@ def process( if server_url is not None: base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) request = models.OCRRequest( model=model, @@ -87,6 +89,7 @@ def process( http_res = self.do_request( hook_ctx=HookContext( + base_url=base_url or "", operation_id="ocr_v1_ocr_post", oauth2_scopes=[], security_source=get_security_from_env( @@ -98,12 +101,14 @@ def process( retry_config=retry_config, ) - data: Any = None + response_data: Any = None if utils.match_response(http_res, "200", "application/json"): return utils.unmarshal_json(http_res.text, models.OCRResponse) if utils.match_response(http_res, "422", "application/json"): - data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) - raise models.HTTPValidationError(data=data) + response_data = utils.unmarshal_json( + http_res.text, models.HTTPValidationErrorData + ) + raise models.HTTPValidationError(data=response_data) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) raise models.SDKError( @@ -160,6 +165,8 @@ async def process_async( if server_url is not None: base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) request = models.OCRRequest( model=model, @@ -200,6 +207,7 @@ async def process_async( http_res = await self.do_request_async( hook_ctx=HookContext( + base_url=base_url or "", operation_id="ocr_v1_ocr_post", oauth2_scopes=[], security_source=get_security_from_env( @@ -211,12 +219,14 @@ async def process_async( retry_config=retry_config, ) - data: Any = None + response_data: Any = None if utils.match_response(http_res, "200", "application/json"): return utils.unmarshal_json(http_res.text, models.OCRResponse) if utils.match_response(http_res, "422", "application/json"): - data = utils.unmarshal_json(http_res.text, models.HTTPValidationErrorData) - raise models.HTTPValidationError(data=data) + response_data = utils.unmarshal_json( + http_res.text, models.HTTPValidationErrorData + ) + raise models.HTTPValidationError(data=response_data) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( diff --git a/src/mistralai/sdk.py b/src/mistralai/sdk.py index 00d8370a..e801eaf3 100644 --- a/src/mistralai/sdk.py +++ b/src/mistralai/sdk.py @@ -68,15 +68,19 @@ def __init__( :param retry_config: The retry configuration to use for all supported methods :param timeout_ms: Optional request timeout applied to each operation in milliseconds """ + client_supplied = True if client is None: client = httpx.Client() + client_supplied = False assert issubclass( type(client), HttpClient ), "The provided client must implement the HttpClient protocol." + async_client_supplied = True if async_client is None: async_client = httpx.AsyncClient() + async_client_supplied = False if debug_logger is None: debug_logger = get_default_logger() @@ -100,7 +104,9 @@ def __init__( self, SDKConfiguration( client=client, + client_supplied=client_supplied, async_client=async_client, + async_client_supplied=async_client_supplied, security=security, server_url=server_url, server=server, @@ -114,7 +120,7 @@ def __init__( current_server_url, *_ = self.sdk_configuration.get_server_details() server_url, self.sdk_configuration.client = hooks.sdk_init( - current_server_url, self.sdk_configuration.client + current_server_url, client ) if current_server_url != server_url: self.sdk_configuration.server_url = server_url @@ -127,7 +133,9 @@ def __init__( close_clients, cast(ClientOwner, self.sdk_configuration), self.sdk_configuration.client, + self.sdk_configuration.client_supplied, self.sdk_configuration.async_client, + self.sdk_configuration.async_client_supplied, ) self._init_sdks() @@ -151,9 +159,17 @@ async def __aenter__(self): return self def __exit__(self, exc_type, exc_val, exc_tb): - if self.sdk_configuration.client is not None: + if ( + self.sdk_configuration.client is not None + and not self.sdk_configuration.client_supplied + ): self.sdk_configuration.client.close() + self.sdk_configuration.client = None async def __aexit__(self, exc_type, exc_val, exc_tb): - if self.sdk_configuration.async_client is not None: + if ( + self.sdk_configuration.async_client is not None + and not self.sdk_configuration.async_client_supplied + ): await self.sdk_configuration.async_client.aclose() + self.sdk_configuration.async_client = None diff --git a/src/mistralai/sdkconfiguration.py b/src/mistralai/sdkconfiguration.py index 2ccbcbe1..257ff01d 100644 --- a/src/mistralai/sdkconfiguration.py +++ b/src/mistralai/sdkconfiguration.py @@ -26,8 +26,10 @@ @dataclass class SDKConfiguration: - client: HttpClient - async_client: AsyncHttpClient + client: Union[HttpClient, None] + client_supplied: bool + async_client: Union[AsyncHttpClient, None] + async_client_supplied: bool debug_logger: Logger security: Optional[Union[models.Security, Callable[[], models.Security]]] = None server_url: Optional[str] = "" diff --git a/src/mistralai/utils/__init__.py b/src/mistralai/utils/__init__.py index 151c87d4..d8b21128 100644 --- a/src/mistralai/utils/__init__.py +++ b/src/mistralai/utils/__init__.py @@ -43,6 +43,7 @@ match_content_type, match_status_codes, match_response, + cast_partial, ) from .logger import Logger, get_body_content, get_default_logger @@ -96,4 +97,5 @@ "validate_float", "validate_int", "validate_open_enum", + "cast_partial", ] diff --git a/src/mistralai/utils/serializers.py b/src/mistralai/utils/serializers.py index c5eb3659..baa41fbd 100644 --- a/src/mistralai/utils/serializers.py +++ b/src/mistralai/utils/serializers.py @@ -7,14 +7,15 @@ from typing_extensions import get_origin from pydantic import ConfigDict, create_model from pydantic_core import from_json -from typing_inspect import is_optional_type +from typing_inspection.typing_objects import is_union from ..types.basemodel import BaseModel, Nullable, OptionalNullable, Unset def serialize_decimal(as_str: bool): def serialize(d): - if is_optional_type(type(d)) and d is None: + # Optional[T] is a Union[T, None] + if is_union(type(d)) and type(None) in get_args(type(d)) and d is None: return None if isinstance(d, Unset): return d @@ -42,7 +43,8 @@ def validate_decimal(d): def serialize_float(as_str: bool): def serialize(f): - if is_optional_type(type(f)) and f is None: + # Optional[T] is a Union[T, None] + if is_union(type(f)) and type(None) in get_args(type(f)) and f is None: return None if isinstance(f, Unset): return f @@ -70,7 +72,8 @@ def validate_float(f): def serialize_int(as_str: bool): def serialize(i): - if is_optional_type(type(i)) and i is None: + # Optional[T] is a Union[T, None] + if is_union(type(i)) and type(None) in get_args(type(i)) and i is None: return None if isinstance(i, Unset): return i @@ -118,7 +121,8 @@ def validate(e): def validate_const(v): def validate(c): - if is_optional_type(type(c)) and c is None: + # Optional[T] is a Union[T, None] + if is_union(type(c)) and type(None) in get_args(type(c)) and c is None: return None if v != c: @@ -163,7 +167,7 @@ def marshal_json(val, typ): if len(d) == 0: return "" - return json.dumps(d[next(iter(d))], separators=(",", ":"), sort_keys=True) + return json.dumps(d[next(iter(d))], separators=(",", ":")) def is_nullable(field): diff --git a/src/mistralai/utils/values.py b/src/mistralai/utils/values.py index 2b4b6832..dae01a44 100644 --- a/src/mistralai/utils/values.py +++ b/src/mistralai/utils/values.py @@ -3,8 +3,9 @@ from datetime import datetime from enum import Enum from email.message import Message +from functools import partial import os -from typing import Any, Callable, Dict, List, Optional, Tuple, TypeVar, Union +from typing import Any, Callable, Dict, List, Optional, Tuple, TypeVar, Union, cast from httpx import Response from pydantic import BaseModel @@ -51,6 +52,8 @@ def match_status_codes(status_codes: List[str], status_code: int) -> bool: T = TypeVar("T") +def cast_partial(typ): + return partial(cast, typ) def get_global_from_env( value: Optional[T], env_key: str, type_cast: Callable[[str], T] From a7d02c30c97cae55448044514031a5b10d235444 Mon Sep 17 00:00:00 2001 From: Alexandre Menasria <47357713+amenasria@users.noreply.github.com> Date: Wed, 26 Mar 2025 16:19:37 +0100 Subject: [PATCH 118/223] Add examples for using raw json schema without pydantic (#210) --- .../structured_outputs_with_json_schema.py | 117 ++++++++++++++++++ ...py => structured_outputs_with_pydantic.py} | 0 2 files changed, 117 insertions(+) create mode 100644 examples/structured_outputs_with_json_schema.py rename examples/{structured_outputs.py => structured_outputs_with_pydantic.py} (100%) diff --git a/examples/structured_outputs_with_json_schema.py b/examples/structured_outputs_with_json_schema.py new file mode 100644 index 00000000..69ac9690 --- /dev/null +++ b/examples/structured_outputs_with_json_schema.py @@ -0,0 +1,117 @@ +#!/usr/bin/env python + +import os + +from mistralai import Mistral + + +def main(): + api_key = os.environ["MISTRAL_API_KEY"] + client = Mistral(api_key=api_key) + + print("Using the .complete method to input a raw json schema to the API:\n") + # When providing raw JSON Schema to the SDK you need to have 'additionalProperties': False in the schema definition + # This is because the API is only accepting a strict JSON Schema + chat_response = client.chat.complete( + model="mistral-large-latest", + messages=[ + { + "role": "system", + "content": "You are a helpful math tutor. You will be provided with a math problem, and your goal will be to output a step by step solution, along with a final answer. For each step, just provide the output as an equation use the explanation field to detail the reasoning.", + }, + {"role": "user", "content": "How can I solve 8x + 7 = -23"}, + ], + response_format={ + "type": "json_schema", + "json_schema": { + "name": "MathDemonstration", + "schema_definition": { + "$defs": { + "Explanation": { + "properties": { + "explanation": { + "title": "Explanation", + "type": "string", + }, + "output": {"title": "Output", "type": "string"}, + }, + "required": ["explanation", "output"], + "title": "Explanation", + "type": "object", + "additionalProperties": False, + } + }, + "properties": { + "steps": { + "items": {"$ref": "#/$defs/Explanation"}, + "title": "Steps", + "type": "array", + }, + "final_answer": {"title": "Final Answer", "type": "string"}, + }, + "required": ["steps", "final_answer"], + "title": "MathDemonstration", + "type": "object", + "additionalProperties": False, + }, + "description": None, + "strict": True, + }, + }, + ) + print(chat_response.choices[0].message.content) + + # Or with the streaming API + with client.chat.stream( + model="mistral-large-latest", + messages=[ + { + "role": "system", + "content": "You are a helpful math tutor. You will be provided with a math problem, and your goal will be to output a step by step solution, along with a final answer. For each step, just provide the output as an equation use the explanation field to detail the reasoning.", + }, + {"role": "user", "content": "How can I solve 8x + 7 = -23"}, + ], + response_format={ + "type": "json_schema", + "json_schema": { + "name": "MathDemonstration", + "schema_definition": { + "$defs": { + "Explanation": { + "properties": { + "explanation": { + "title": "Explanation", + "type": "string", + }, + "output": {"title": "Output", "type": "string"}, + }, + "required": ["explanation", "output"], + "title": "Explanation", + "type": "object", + "additionalProperties": False, + } + }, + "properties": { + "steps": { + "items": {"$ref": "#/$defs/Explanation"}, + "title": "Steps", + "type": "array", + }, + "final_answer": {"title": "Final Answer", "type": "string"}, + }, + "required": ["steps", "final_answer"], + "title": "MathDemonstration", + "type": "object", + "additionalProperties": False, + }, + "description": None, + "strict": True, + }, + }, + ) as stream: + for chunk in stream: + print(chunk.data.choices[0].delta.content, end="") + + +if __name__ == "__main__": + main() diff --git a/examples/structured_outputs.py b/examples/structured_outputs_with_pydantic.py similarity index 100% rename from examples/structured_outputs.py rename to examples/structured_outputs_with_pydantic.py From bc4adf335968c8a272e1ab7da8461c9943d8e701 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Wed, 16 Apr 2025 21:41:07 +0200 Subject: [PATCH 119/223] =?UTF-8?q?chore:=20=F0=9F=90=9D=20Update=20SDK=20?= =?UTF-8?q?-=20Generate=20MISTRALAI=20MISTRALAI-SDK=201.7.0=20(#214)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * ci: regenerated with OpenAPI Doc , Speakeasy CLI 1.517.3 * update examples * fix inputs --------- Co-authored-by: speakeasybot Co-authored-by: gaspardBT --- .speakeasy/gen.lock | 129 +++-- .speakeasy/gen.yaml | 2 +- .speakeasy/workflow.lock | 11 +- README.md | 2 + RELEASES.md | 12 +- docs/models/chatclassificationrequest.md | 9 + docs/models/chatmoderationrequest.md | 3 +- docs/models/classificationobject.md | 9 - docs/models/classificationresponse.md | 10 +- docs/models/classificationtargetresult.md | 8 + ...djobout.md => classifierdetailedjobout.md} | 14 +- .../classifierdetailedjoboutintegrations.md | 11 + .../models/classifierdetailedjoboutjobtype.md | 8 + ...t.md => classifierdetailedjoboutobject.md} | 2 +- ...s.md => classifierdetailedjoboutstatus.md} | 2 +- docs/models/classifierftmodelout.md | 21 + docs/models/classifierftmodeloutmodeltype.md | 8 + ...bject.md => classifierftmodeloutobject.md} | 2 +- docs/models/classifierjobout.md | 23 + ...ons.md => classifierjoboutintegrations.md} | 2 +- docs/models/classifierjoboutjobtype.md | 10 + docs/models/classifierjoboutobject.md | 10 + docs/models/classifierjoboutstatus.md | 19 + docs/models/classifiertargetin.md | 11 + docs/models/classifiertargetout.md | 11 + docs/models/classifiertrainingparameters.md | 13 + docs/models/classifiertrainingparametersin.md | 15 + docs/models/completiondetailedjobout.md | 26 + .../completiondetailedjoboutintegrations.md | 11 + .../models/completiondetailedjoboutjobtype.md | 8 + docs/models/completiondetailedjoboutobject.md | 8 + ...> completiondetailedjoboutrepositories.md} | 2 +- docs/models/completiondetailedjoboutstatus.md | 17 + docs/models/completionftmodelout.md | 20 + docs/models/completionftmodeloutobject.md | 8 + .../models/{jobout.md => completionjobout.md} | 10 +- ...ers.md => completiontrainingparameters.md} | 6 +- ...n.md => completiontrainingparametersin.md} | 6 +- docs/models/embeddingrequest.md | 8 +- docs/models/embeddingrequestinputs.md | 19 + docs/models/finetuneablemodeltype.md | 9 + docs/models/ftclassifierlossfunction.md | 9 + docs/models/ftmodelcapabilitiesout.md | 3 +- docs/models/ftmodelout.md | 19 - docs/models/hyperparameters.md | 17 + docs/models/inputs.md | 10 +- docs/models/instructrequest.md | 8 + docs/models/instructrequestinputs.md | 8 + docs/models/instructrequestinputsmessages.md | 29 ++ docs/models/instructrequestmessages.md | 29 ++ docs/models/jobin.md | 7 +- .../jobsapiroutesbatchgetbatchjobsrequest.md | 18 +- ...esfinetuningcancelfinetuningjobresponse.md | 19 + ...esfinetuningcreatefinetuningjobresponse.md | 4 +- ...outesfinetuninggetfinetuningjobresponse.md | 19 + ...outesfinetuninggetfinetuningjobsrequest.md | 1 + ...tesfinetuningstartfinetuningjobresponse.md | 19 + ...sfinetuningupdatefinetunedmodelresponse.md | 19 + docs/models/jobsout.md | 2 +- docs/models/jobsoutdata.md | 17 + docs/models/jobtype.md | 10 + docs/models/modeltype.md | 8 + docs/models/moderationobject.md | 9 + docs/models/moderationresponse.md | 10 + docs/models/response1.md | 17 + docs/models/wandbintegrationout.md | 3 +- docs/sdks/classifiers/README.md | 107 ++++- docs/sdks/embeddings/README.md | 10 +- docs/sdks/jobs/README.md | 18 +- docs/sdks/mistraljobs/README.md | 20 +- docs/sdks/models/README.md | 2 +- examples/async_classifier.py | 98 ++++ examples/async_files.py | 2 +- examples/async_jobs.py | 8 +- examples/async_jobs_chat.py | 27 +- examples/dry_run_job.py | 6 +- examples/files.py | 2 +- examples/fixtures/classifier_sentiments.jsonl | 33 ++ .../ft_training_file.jsonl} | 0 .../ft_validation_file.jsonl} | 0 examples/jobs.py | 8 +- pyproject.toml | 2 +- src/mistralai/_version.py | 4 +- src/mistralai/classifiers.py | 450 +++++++++++++++++- src/mistralai/embeddings.py | 8 +- src/mistralai/jobs.py | 122 +++-- src/mistralai/mistral_jobs.py | 4 +- src/mistralai/models/__init__.py | 243 ++++++++-- src/mistralai/models/archiveftmodelout.py | 14 +- src/mistralai/models/batchjobout.py | 12 +- src/mistralai/models/batchjobsout.py | 12 +- .../models/chatclassificationrequest.py | 20 + src/mistralai/models/chatmoderationrequest.py | 11 +- .../models/classificationresponse.py | 21 +- .../models/classificationtargetresult.py | 14 + .../models/classifierdetailedjobout.py | 156 ++++++ src/mistralai/models/classifierftmodelout.py | 101 ++++ src/mistralai/models/classifierjobout.py | 165 +++++++ src/mistralai/models/classifiertargetin.py | 55 +++ src/mistralai/models/classifiertargetout.py | 24 + .../models/classifiertrainingparameters.py | 73 +++ .../models/classifiertrainingparametersin.py | 85 ++++ ...djobout.py => completiondetailedjobout.py} | 68 +-- ...{ftmodelout.py => completionftmodelout.py} | 24 +- .../models/{jobout.py => completionjobout.py} | 49 +- ...ers.py => completiontrainingparameters.py} | 14 +- ...n.py => completiontrainingparametersin.py} | 14 +- src/mistralai/models/embeddingrequest.py | 10 +- src/mistralai/models/finetuneablemodeltype.py | 7 + .../models/ftclassifierlossfunction.py | 7 + .../models/ftmodelcapabilitiesout.py | 3 + src/mistralai/models/githubrepositoryin.py | 14 +- src/mistralai/models/githubrepositoryout.py | 14 +- src/mistralai/models/inputs.py | 54 +++ src/mistralai/models/instructrequest.py | 42 ++ src/mistralai/models/jobin.py | 64 ++- .../jobs_api_routes_batch_get_batch_jobsop.py | 6 +- ...es_fine_tuning_cancel_fine_tuning_jobop.py | 31 +- ...es_fine_tuning_create_fine_tuning_jobop.py | 25 +- ...outes_fine_tuning_get_fine_tuning_jobop.py | 31 +- ...utes_fine_tuning_get_fine_tuning_jobsop.py | 8 + ...tes_fine_tuning_start_fine_tuning_jobop.py | 31 +- ...s_fine_tuning_update_fine_tuned_modelop.py | 30 +- src/mistralai/models/jobsout.py | 37 +- src/mistralai/models/legacyjobmetadataout.py | 15 +- ...ificationobject.py => moderationobject.py} | 12 +- src/mistralai/models/moderationresponse.py | 21 + src/mistralai/models/unarchiveftmodelout.py | 14 +- src/mistralai/models/wandbintegration.py | 14 +- src/mistralai/models/wandbintegrationout.py | 21 +- src/mistralai/models_.py | 14 +- 131 files changed, 2948 insertions(+), 532 deletions(-) create mode 100644 docs/models/chatclassificationrequest.md delete mode 100644 docs/models/classificationobject.md create mode 100644 docs/models/classificationtargetresult.md rename docs/models/{detailedjobout.md => classifierdetailedjobout.md} (92%) create mode 100644 docs/models/classifierdetailedjoboutintegrations.md create mode 100644 docs/models/classifierdetailedjoboutjobtype.md rename docs/models/{detailedjoboutobject.md => classifierdetailedjoboutobject.md} (66%) rename docs/models/{detailedjoboutstatus.md => classifierdetailedjoboutstatus.md} (95%) create mode 100644 docs/models/classifierftmodelout.md create mode 100644 docs/models/classifierftmodeloutmodeltype.md rename docs/models/{ftmodeloutobject.md => classifierftmodeloutobject.md} (72%) create mode 100644 docs/models/classifierjobout.md rename docs/models/{detailedjoboutintegrations.md => classifierjoboutintegrations.md} (80%) create mode 100644 docs/models/classifierjoboutjobtype.md create mode 100644 docs/models/classifierjoboutobject.md create mode 100644 docs/models/classifierjoboutstatus.md create mode 100644 docs/models/classifiertargetin.md create mode 100644 docs/models/classifiertargetout.md create mode 100644 docs/models/classifiertrainingparameters.md create mode 100644 docs/models/classifiertrainingparametersin.md create mode 100644 docs/models/completiondetailedjobout.md create mode 100644 docs/models/completiondetailedjoboutintegrations.md create mode 100644 docs/models/completiondetailedjoboutjobtype.md create mode 100644 docs/models/completiondetailedjoboutobject.md rename docs/models/{detailedjoboutrepositories.md => completiondetailedjoboutrepositories.md} (76%) create mode 100644 docs/models/completiondetailedjoboutstatus.md create mode 100644 docs/models/completionftmodelout.md create mode 100644 docs/models/completionftmodeloutobject.md rename docs/models/{jobout.md => completionjobout.md} (98%) rename docs/models/{trainingparameters.md => completiontrainingparameters.md} (97%) rename docs/models/{trainingparametersin.md => completiontrainingparametersin.md} (99%) create mode 100644 docs/models/embeddingrequestinputs.md create mode 100644 docs/models/finetuneablemodeltype.md create mode 100644 docs/models/ftclassifierlossfunction.md delete mode 100644 docs/models/ftmodelout.md create mode 100644 docs/models/hyperparameters.md create mode 100644 docs/models/instructrequest.md create mode 100644 docs/models/instructrequestinputs.md create mode 100644 docs/models/instructrequestinputsmessages.md create mode 100644 docs/models/instructrequestmessages.md create mode 100644 docs/models/jobsapiroutesfinetuningcancelfinetuningjobresponse.md create mode 100644 docs/models/jobsapiroutesfinetuninggetfinetuningjobresponse.md create mode 100644 docs/models/jobsapiroutesfinetuningstartfinetuningjobresponse.md create mode 100644 docs/models/jobsapiroutesfinetuningupdatefinetunedmodelresponse.md create mode 100644 docs/models/jobsoutdata.md create mode 100644 docs/models/jobtype.md create mode 100644 docs/models/modeltype.md create mode 100644 docs/models/moderationobject.md create mode 100644 docs/models/moderationresponse.md create mode 100644 docs/models/response1.md create mode 100644 examples/async_classifier.py create mode 100644 examples/fixtures/classifier_sentiments.jsonl rename examples/{file.jsonl => fixtures/ft_training_file.jsonl} (100%) rename examples/{validation_file.jsonl => fixtures/ft_validation_file.jsonl} (100%) create mode 100644 src/mistralai/models/chatclassificationrequest.py create mode 100644 src/mistralai/models/classificationtargetresult.py create mode 100644 src/mistralai/models/classifierdetailedjobout.py create mode 100644 src/mistralai/models/classifierftmodelout.py create mode 100644 src/mistralai/models/classifierjobout.py create mode 100644 src/mistralai/models/classifiertargetin.py create mode 100644 src/mistralai/models/classifiertargetout.py create mode 100644 src/mistralai/models/classifiertrainingparameters.py create mode 100644 src/mistralai/models/classifiertrainingparametersin.py rename src/mistralai/models/{detailedjobout.py => completiondetailedjobout.py} (69%) rename src/mistralai/models/{ftmodelout.py => completionftmodelout.py} (81%) rename src/mistralai/models/{jobout.py => completionjobout.py} (89%) rename src/mistralai/models/{trainingparameters.py => completiontrainingparameters.py} (95%) rename src/mistralai/models/{trainingparametersin.py => completiontrainingparametersin.py} (97%) create mode 100644 src/mistralai/models/finetuneablemodeltype.py create mode 100644 src/mistralai/models/ftclassifierlossfunction.py create mode 100644 src/mistralai/models/inputs.py create mode 100644 src/mistralai/models/instructrequest.py rename src/mistralai/models/{classificationobject.py => moderationobject.py} (65%) create mode 100644 src/mistralai/models/moderationresponse.py diff --git a/.speakeasy/gen.lock b/.speakeasy/gen.lock index fc748eb0..bc731e9a 100644 --- a/.speakeasy/gen.lock +++ b/.speakeasy/gen.lock @@ -1,12 +1,12 @@ lockVersion: 2.0.0 id: 2d045ec7-2ebb-4f4d-ad25-40953b132161 management: - docChecksum: 292a97e4dd465554d420c92d78d70c5f + docChecksum: 63f1a973632e9afab0da3d2498994c1b docVersion: 0.0.2 speakeasyVersion: 1.517.3 generationVersion: 2.548.6 - releaseVersion: 1.6.0 - configChecksum: 1cfb4e3f53a140213b7b400e79811fe5 + releaseVersion: 1.7.0 + configChecksum: d52ab0a71ab9e0798da08262c59bf31d repoURL: https://github.com/mistralai/client-python.git installationURL: https://github.com/mistralai/client-python.git published: true @@ -68,6 +68,7 @@ generatedFiles: - docs/models/batchjobsout.md - docs/models/batchjobsoutobject.md - docs/models/batchjobstatus.md + - docs/models/chatclassificationrequest.md - docs/models/chatcompletionchoice.md - docs/models/chatcompletionrequest.md - docs/models/chatcompletionrequesttoolchoice.md @@ -79,14 +80,42 @@ generatedFiles: - docs/models/chatmoderationrequest.md - docs/models/chatmoderationrequestinputs.md - docs/models/checkpointout.md - - docs/models/classificationobject.md - docs/models/classificationrequest.md - docs/models/classificationrequestinputs.md - docs/models/classificationresponse.md + - docs/models/classificationtargetresult.md + - docs/models/classifierdetailedjobout.md + - docs/models/classifierdetailedjoboutintegrations.md + - docs/models/classifierdetailedjoboutjobtype.md + - docs/models/classifierdetailedjoboutobject.md + - docs/models/classifierdetailedjoboutstatus.md + - docs/models/classifierftmodelout.md + - docs/models/classifierftmodeloutmodeltype.md + - docs/models/classifierftmodeloutobject.md + - docs/models/classifierjobout.md + - docs/models/classifierjoboutintegrations.md + - docs/models/classifierjoboutjobtype.md + - docs/models/classifierjoboutobject.md + - docs/models/classifierjoboutstatus.md + - docs/models/classifiertargetin.md + - docs/models/classifiertargetout.md + - docs/models/classifiertrainingparameters.md + - docs/models/classifiertrainingparametersin.md - docs/models/completionchunk.md + - docs/models/completiondetailedjobout.md + - docs/models/completiondetailedjoboutintegrations.md + - docs/models/completiondetailedjoboutjobtype.md + - docs/models/completiondetailedjoboutobject.md + - docs/models/completiondetailedjoboutrepositories.md + - docs/models/completiondetailedjoboutstatus.md - docs/models/completionevent.md + - docs/models/completionftmodelout.md + - docs/models/completionftmodeloutobject.md + - docs/models/completionjobout.md - docs/models/completionresponsestreamchoice.md - docs/models/completionresponsestreamchoicefinishreason.md + - docs/models/completiontrainingparameters.md + - docs/models/completiontrainingparametersin.md - docs/models/content.md - docs/models/contentchunk.md - docs/models/data.md @@ -94,15 +123,11 @@ generatedFiles: - docs/models/deletemodelout.md - docs/models/deletemodelv1modelsmodeliddeleterequest.md - docs/models/deltamessage.md - - docs/models/detailedjobout.md - - docs/models/detailedjoboutintegrations.md - - docs/models/detailedjoboutobject.md - - docs/models/detailedjoboutrepositories.md - - docs/models/detailedjoboutstatus.md - docs/models/document.md - docs/models/documenturlchunk.md - docs/models/documenturlchunktype.md - docs/models/embeddingrequest.md + - docs/models/embeddingrequestinputs.md - docs/models/embeddingresponse.md - docs/models/embeddingresponsedata.md - docs/models/eventout.md @@ -121,12 +146,12 @@ generatedFiles: - docs/models/fimcompletionresponse.md - docs/models/fimcompletionstreamrequest.md - docs/models/fimcompletionstreamrequeststop.md + - docs/models/finetuneablemodeltype.md - docs/models/finishreason.md + - docs/models/ftclassifierlossfunction.md - docs/models/ftmodelcapabilitiesout.md - docs/models/ftmodelcard.md - docs/models/ftmodelcardtype.md - - docs/models/ftmodelout.md - - docs/models/ftmodeloutobject.md - docs/models/function.md - docs/models/functioncall.md - docs/models/functionname.md @@ -135,30 +160,40 @@ generatedFiles: - docs/models/githubrepositoryout.md - docs/models/githubrepositoryouttype.md - docs/models/httpvalidationerror.md + - docs/models/hyperparameters.md - docs/models/imageurl.md - docs/models/imageurlchunk.md - docs/models/imageurlchunkimageurl.md - docs/models/imageurlchunktype.md - docs/models/inputs.md + - docs/models/instructrequest.md + - docs/models/instructrequestinputs.md + - docs/models/instructrequestinputsmessages.md + - docs/models/instructrequestmessages.md - docs/models/integrations.md - docs/models/jobin.md - docs/models/jobinintegrations.md - docs/models/jobinrepositories.md - docs/models/jobmetadataout.md - - docs/models/jobout.md - docs/models/jobsapiroutesbatchcancelbatchjobrequest.md - docs/models/jobsapiroutesbatchgetbatchjobrequest.md - docs/models/jobsapiroutesbatchgetbatchjobsrequest.md - docs/models/jobsapiroutesfinetuningarchivefinetunedmodelrequest.md - docs/models/jobsapiroutesfinetuningcancelfinetuningjobrequest.md + - docs/models/jobsapiroutesfinetuningcancelfinetuningjobresponse.md - docs/models/jobsapiroutesfinetuningcreatefinetuningjobresponse.md - docs/models/jobsapiroutesfinetuninggetfinetuningjobrequest.md + - docs/models/jobsapiroutesfinetuninggetfinetuningjobresponse.md - docs/models/jobsapiroutesfinetuninggetfinetuningjobsrequest.md - docs/models/jobsapiroutesfinetuningstartfinetuningjobrequest.md + - docs/models/jobsapiroutesfinetuningstartfinetuningjobresponse.md - docs/models/jobsapiroutesfinetuningunarchivefinetunedmodelrequest.md - docs/models/jobsapiroutesfinetuningupdatefinetunedmodelrequest.md + - docs/models/jobsapiroutesfinetuningupdatefinetunedmodelresponse.md - docs/models/jobsout.md + - docs/models/jobsoutdata.md - docs/models/jobsoutobject.md + - docs/models/jobtype.md - docs/models/jsonschema.md - docs/models/legacyjobmetadataout.md - docs/models/legacyjobmetadataoutobject.md @@ -168,6 +203,9 @@ generatedFiles: - docs/models/metricout.md - docs/models/modelcapabilities.md - docs/models/modellist.md + - docs/models/modeltype.md + - docs/models/moderationobject.md + - docs/models/moderationresponse.md - docs/models/object.md - docs/models/ocrimageobject.md - docs/models/ocrpagedimensions.md @@ -181,6 +219,7 @@ generatedFiles: - docs/models/referencechunk.md - docs/models/referencechunktype.md - docs/models/repositories.md + - docs/models/response1.md - docs/models/responseformat.md - docs/models/responseformats.md - docs/models/retrievefileout.md @@ -205,8 +244,6 @@ generatedFiles: - docs/models/toolmessagerole.md - docs/models/tooltypes.md - docs/models/trainingfile.md - - docs/models/trainingparameters.md - - docs/models/trainingparametersin.md - docs/models/two.md - docs/models/type.md - docs/models/unarchiveftmodelout.md @@ -270,24 +307,36 @@ generatedFiles: - src/mistralai/models/batchjobout.py - src/mistralai/models/batchjobsout.py - src/mistralai/models/batchjobstatus.py + - src/mistralai/models/chatclassificationrequest.py - src/mistralai/models/chatcompletionchoice.py - src/mistralai/models/chatcompletionrequest.py - src/mistralai/models/chatcompletionresponse.py - src/mistralai/models/chatcompletionstreamrequest.py - src/mistralai/models/chatmoderationrequest.py - src/mistralai/models/checkpointout.py - - src/mistralai/models/classificationobject.py - src/mistralai/models/classificationrequest.py - src/mistralai/models/classificationresponse.py + - src/mistralai/models/classificationtargetresult.py + - src/mistralai/models/classifierdetailedjobout.py + - src/mistralai/models/classifierftmodelout.py + - src/mistralai/models/classifierjobout.py + - src/mistralai/models/classifiertargetin.py + - src/mistralai/models/classifiertargetout.py + - src/mistralai/models/classifiertrainingparameters.py + - src/mistralai/models/classifiertrainingparametersin.py - src/mistralai/models/completionchunk.py + - src/mistralai/models/completiondetailedjobout.py - src/mistralai/models/completionevent.py + - src/mistralai/models/completionftmodelout.py + - src/mistralai/models/completionjobout.py - src/mistralai/models/completionresponsestreamchoice.py + - src/mistralai/models/completiontrainingparameters.py + - src/mistralai/models/completiontrainingparametersin.py - src/mistralai/models/contentchunk.py - src/mistralai/models/delete_model_v1_models_model_id_deleteop.py - src/mistralai/models/deletefileout.py - src/mistralai/models/deletemodelout.py - src/mistralai/models/deltamessage.py - - src/mistralai/models/detailedjobout.py - src/mistralai/models/documenturlchunk.py - src/mistralai/models/embeddingrequest.py - src/mistralai/models/embeddingresponse.py @@ -305,9 +354,10 @@ generatedFiles: - src/mistralai/models/fimcompletionrequest.py - src/mistralai/models/fimcompletionresponse.py - src/mistralai/models/fimcompletionstreamrequest.py + - src/mistralai/models/finetuneablemodeltype.py + - src/mistralai/models/ftclassifierlossfunction.py - src/mistralai/models/ftmodelcapabilitiesout.py - src/mistralai/models/ftmodelcard.py - - src/mistralai/models/ftmodelout.py - src/mistralai/models/function.py - src/mistralai/models/functioncall.py - src/mistralai/models/functionname.py @@ -316,9 +366,10 @@ generatedFiles: - src/mistralai/models/httpvalidationerror.py - src/mistralai/models/imageurl.py - src/mistralai/models/imageurlchunk.py + - src/mistralai/models/inputs.py + - src/mistralai/models/instructrequest.py - src/mistralai/models/jobin.py - src/mistralai/models/jobmetadataout.py - - src/mistralai/models/jobout.py - src/mistralai/models/jobs_api_routes_batch_cancel_batch_jobop.py - src/mistralai/models/jobs_api_routes_batch_get_batch_jobop.py - src/mistralai/models/jobs_api_routes_batch_get_batch_jobsop.py @@ -337,6 +388,8 @@ generatedFiles: - src/mistralai/models/metricout.py - src/mistralai/models/modelcapabilities.py - src/mistralai/models/modellist.py + - src/mistralai/models/moderationobject.py + - src/mistralai/models/moderationresponse.py - src/mistralai/models/ocrimageobject.py - src/mistralai/models/ocrpagedimensions.py - src/mistralai/models/ocrpageobject.py @@ -362,8 +415,6 @@ generatedFiles: - src/mistralai/models/toolmessage.py - src/mistralai/models/tooltypes.py - src/mistralai/models/trainingfile.py - - src/mistralai/models/trainingparameters.py - - src/mistralai/models/trainingparametersin.py - src/mistralai/models/unarchiveftmodelout.py - src/mistralai/models/updateftmodelin.py - src/mistralai/models/uploadfileout.py @@ -429,7 +480,7 @@ examples: application/json: {} responses: "200": - application/json: {"id": "", "created": 857478, "owned_by": "", "root": "", "archived": false, "capabilities": {"completion_chat": true, "completion_fim": false, "function_calling": false, "fine_tuning": false}, "max_context_length": 32768, "job": "5fa7f0e7-432c-4e47-acb6-0cc78135ddeb"} + application/json: {"id": "", "object": "model", "created": 597129, "owned_by": "", "root": "", "archived": true, "capabilities": {"completion_chat": true, "completion_fim": false, "function_calling": false, "fine_tuning": false, "classification": false}, "max_context_length": 32768, "job": "fa7f0e74-32ce-447c-9b60-cc78135ddeb8", "model_type": "completion"} jobs_api_routes_fine_tuning_archive_fine_tuned_model: "": parameters: @@ -437,7 +488,7 @@ examples: model_id: "ft:open-mistral-7b:587a6b29:20240514:7e773925" responses: "200": - application/json: {"id": "", "archived": true} + application/json: {"id": "", "object": "model", "archived": true} jobs_api_routes_fine_tuning_unarchive_fine_tuned_model: "": parameters: @@ -445,7 +496,7 @@ examples: model_id: "ft:open-mistral-7b:587a6b29:20240514:7e773925" responses: "200": - application/json: {"id": "", "archived": false} + application/json: {"id": "", "object": "model", "archived": false} files_api_routes_upload_file: speakeasy-default-files-api-routes-upload-file: requestBody: @@ -509,7 +560,7 @@ examples: jobs_api_routes_fine_tuning_create_fine_tuning_job: speakeasy-default-jobs-api-routes-fine-tuning-create-fine-tuning-job: requestBody: - application/json: {"model": "Fiesta", "hyperparameters": {"learning_rate": 0.0001}} + application/json: {"model": "Fiesta", "invalid_sample_skip_percentage": 0, "hyperparameters": {"learning_rate": 0.0001}} responses: "200": application/json: {"expected_duration_seconds": 220, "cost": 10, "cost_currency": "EUR", "train_tokens_per_step": 131072, "train_tokens": 1310720, "data_tokens": 305375, "deprecated": true, "details": "", "epochs": 4.2922, "training_steps": 10, "object": "job.metadata"} @@ -520,7 +571,7 @@ examples: job_id: "b888f774-3e7c-4135-a18c-6b985523c4bc" responses: "200": - application/json: {"id": "e50f7622-81da-484b-9c66-1c8a99c6b71b", "auto_start": false, "hyperparameters": {"learning_rate": 0.0001}, "model": "Model Y", "status": "CANCELLED", "job_type": "", "created_at": 415305, "modified_at": 149108, "training_files": ["8f7112aa-f0ab-44e4-83b4-cca3716f6208", "7aa1f8cf-05d8-49d5-88ee-381f6b4b885c"], "object": "job", "checkpoints": [{"metrics": {}, "step_number": 856562, "created_at": 1716963433}, {"metrics": {}, "step_number": 328633, "created_at": 1716963433}]} + application/json: {"id": "888f7743-e7c1-4351-b8c6-b985523c4bcb", "auto_start": true, "model": "2", "status": "CANCELLATION_REQUESTED", "created_at": 444836, "modified_at": 424256, "training_files": [], "object": "job", "job_type": "completion", "hyperparameters": {"learning_rate": 0.0001}, "checkpoints": [{"metrics": {}, "step_number": 550563, "created_at": 1716963433}]} jobs_api_routes_fine_tuning_cancel_fine_tuning_job: speakeasy-default-jobs-api-routes-fine-tuning-cancel-fine-tuning-job: parameters: @@ -528,7 +579,7 @@ examples: job_id: "0f713502-9233-41c6-9ebd-c570b7edb496" responses: "200": - application/json: {"id": "d50fbe4e-3e32-4613-8574-4d82f3fd6b3c", "auto_start": true, "hyperparameters": {"learning_rate": 0.0001}, "model": "Taurus", "status": "SUCCESS", "job_type": "", "created_at": 251316, "modified_at": 342605, "training_files": ["247ac10c-dc31-412f-a7cc-924123024afa", "0f84bd49-4511-4689-9d07-a64aa724280b", "200aa489-3801-4d6e-a454-eb14cac448cb"], "object": "job", "checkpoints": [{"metrics": {}, "step_number": 949854, "created_at": 1716963433}, {"metrics": {}, "step_number": 516599, "created_at": 1716963433}, {"metrics": {}, "step_number": 439590, "created_at": 1716963433}]} + application/json: {"id": "f7135029-2331-4c6e-bbdc-570b7edb4966", "auto_start": true, "model": "A4", "status": "CANCELLATION_REQUESTED", "created_at": 703131, "modified_at": 929437, "training_files": ["e3e32613-5744-4d82-8f3f-d6b3c11eb45e"], "object": "job", "job_type": "classifier", "hyperparameters": {"learning_rate": 0.0001}, "checkpoints": [{"metrics": {}, "step_number": 466651, "created_at": 1716963433}], "classifier_targets": [{"name": "", "labels": ["", "", ""], "weight": 687.66, "loss_function": "single_class"}, {"name": "", "labels": ["", "", ""], "weight": 8470.22, "loss_function": "multi_class"}]} jobs_api_routes_fine_tuning_start_fine_tuning_job: speakeasy-default-jobs-api-routes-fine-tuning-start-fine-tuning-job: parameters: @@ -536,7 +587,7 @@ examples: job_id: "0bf0f9e6-c3e5-4d61-aac8-0e36dcac0dfc" responses: "200": - application/json: {"id": "b676fe58-2c47-483e-831e-c71dbed4c90a", "auto_start": false, "hyperparameters": {"learning_rate": 0.0001}, "model": "A4", "status": "CANCELLED", "job_type": "", "created_at": 874397, "modified_at": 483387, "training_files": [], "object": "job", "checkpoints": [{"metrics": {}, "step_number": 331375, "created_at": 1716963433}, {"metrics": {}, "step_number": 590686, "created_at": 1716963433}, {"metrics": {}, "step_number": 543177, "created_at": 1716963433}]} + application/json: {"id": "bf0f9e6c-3e5d-461a-ac80-e36dcac0dfc8", "auto_start": true, "model": "Explorer", "status": "RUNNING", "created_at": 961967, "modified_at": 914446, "training_files": ["82c4783e-31ec-471d-bbed-4c90a1b0dd73"], "object": "job", "job_type": "classifier", "hyperparameters": {"learning_rate": 0.0001}, "checkpoints": [{"metrics": {}, "step_number": 590686, "created_at": 1716963433}], "classifier_targets": [{"name": "", "labels": [""], "weight": 5494.15, "loss_function": "single_class"}, {"name": "", "labels": ["", ""], "weight": 7945.15, "loss_function": "single_class"}]} jobs_api_routes_batch_get_batch_jobs: speakeasy-default-jobs-api-routes-batch-get-batch-jobs: parameters: @@ -636,7 +687,7 @@ examples: application/json: {"model": "V90", "input": [""]} responses: "200": - application/json: {"id": "mod-e5cc70bb28c444948073e77776eb30ef"} + application/json: {"id": "mod-e5cc70bb28c444948073e77776eb30ef", "model": "V90", "results": [{}]} "422": application/json: {} moderations_chat_v1_chat_moderations_post: @@ -660,10 +711,28 @@ examples: chat_moderations_v1_chat_moderations_post: speakeasy-default-chat-moderations-v1-chat-moderations-post: requestBody: - application/json: {"model": "Model Y", "input": [[{"content": [], "role": "system"}, {"content": "", "role": "tool"}], [{"prefix": false, "role": "assistant"}, {"content": "", "role": "user"}, {"prefix": false, "role": "assistant"}], [{"content": "", "role": "system"}, {"content": [{"image_url": "https://fatherly-colon.name", "type": "image_url"}], "role": "user"}, {"content": "", "role": "user"}]], "truncate_for_context_length": false} + application/json: {"input": [[{"content": [], "role": "system"}, {"content": "", "role": "tool"}], [{"prefix": false, "role": "assistant"}, {"content": "", "role": "user"}, {"prefix": false, "role": "assistant"}], [{"content": "", "role": "system"}, {"content": [{"image_url": "https://fatherly-colon.name", "type": "image_url"}], "role": "user"}, {"content": "", "role": "user"}]], "model": "Model Y"} responses: "200": - application/json: {"id": "mod-e5cc70bb28c444948073e77776eb30ef"} + application/json: {"id": "mod-e5cc70bb28c444948073e77776eb30ef", "model": "Model Y", "results": [{}, {}]} + "422": + application/json: {} + classifications_v1_classifications_post: + speakeasy-default-classifications-v1-classifications-post: + requestBody: + application/json: {"model": "Altima", "input": ""} + responses: + "200": + application/json: {"id": "mod-e5cc70bb28c444948073e77776eb30ef", "model": "Camaro", "results": [{"key": {"scores": {"key": 6063.42, "key1": 1739.44}}, "key1": {"scores": {}}}, {"key": {"scores": {"key": 2625.67}}, "key1": {"scores": {}}}]} + "422": + application/json: {} + chat_classifications_v1_chat_classifications_post: + speakeasy-default-chat-classifications-v1-chat-classifications-post: + requestBody: + application/json: {"model": "Fortwo", "input": [{"messages": [{"content": "", "role": "tool"}]}, {"messages": []}]} + responses: + "200": + application/json: {"id": "mod-e5cc70bb28c444948073e77776eb30ef", "model": "CX-9", "results": [{"key": {"scores": {"key": 4386.53, "key1": 2974.85}}, "key1": {"scores": {"key": 7100.52, "key1": 480.47}}}]} "422": application/json: {} examplesVersion: 1.0.0 diff --git a/.speakeasy/gen.yaml b/.speakeasy/gen.yaml index 3bc90dff..4bf0297c 100644 --- a/.speakeasy/gen.yaml +++ b/.speakeasy/gen.yaml @@ -15,7 +15,7 @@ generation: oAuth2ClientCredentialsEnabled: true oAuth2PasswordEnabled: false python: - version: 1.6.0 + version: 1.7.0 additionalDependencies: dev: pytest: ^8.2.2 diff --git a/.speakeasy/workflow.lock b/.speakeasy/workflow.lock index b4c6af57..5c57d996 100644 --- a/.speakeasy/workflow.lock +++ b/.speakeasy/workflow.lock @@ -14,11 +14,10 @@ sources: - latest mistral-openapi: sourceNamespace: mistral-openapi - sourceRevisionDigest: sha256:15e39124d61c30c69260e298a909e60996ac6e8623c202d1745b88fc3e67cb2f - sourceBlobDigest: sha256:d16b98efd9214ceb1c89beedc40e67dd09349d5122076f6e16d1a552ee5b3e63 + sourceRevisionDigest: sha256:e7953d0c99d1c036d6bfe223052f231a89626f7007a105a96258dad2eedab39e + sourceBlobDigest: sha256:66ed2d18b563f3350de4ab16c9e2ca6ce425c6376feb5fcf1511d5d074908091 tags: - latest - - speakeasy-sdk-regen-1742466858 targets: mistralai-azure-sdk: source: mistral-azure-source @@ -37,10 +36,10 @@ targets: mistralai-sdk: source: mistral-openapi sourceNamespace: mistral-openapi - sourceRevisionDigest: sha256:15e39124d61c30c69260e298a909e60996ac6e8623c202d1745b88fc3e67cb2f - sourceBlobDigest: sha256:d16b98efd9214ceb1c89beedc40e67dd09349d5122076f6e16d1a552ee5b3e63 + sourceRevisionDigest: sha256:e7953d0c99d1c036d6bfe223052f231a89626f7007a105a96258dad2eedab39e + sourceBlobDigest: sha256:66ed2d18b563f3350de4ab16c9e2ca6ce425c6376feb5fcf1511d5d074908091 codeSamplesNamespace: mistral-openapi-code-samples - codeSamplesRevisionDigest: sha256:4de7cac024939b19dfba3601531280e278d2d8188dc063827694bda3992666fc + codeSamplesRevisionDigest: sha256:7c657301f482932fca0a3e914d3c25820ebb7e535e1887daea3cd9240eca0444 workflow: workflowVersion: 1.0.0 speakeasyVersion: 1.517.3 diff --git a/README.md b/README.md index 6bfeae52..d5b265b6 100644 --- a/README.md +++ b/README.md @@ -441,6 +441,8 @@ The documentation for the GCP SDK is available [here](packages/mistralai_gcp/REA * [moderate](docs/sdks/classifiers/README.md#moderate) - Moderations * [moderate_chat](docs/sdks/classifiers/README.md#moderate_chat) - Chat Moderations +* [classify](docs/sdks/classifiers/README.md#classify) - Classifications +* [classify_chat](docs/sdks/classifiers/README.md#classify_chat) - Chat Classifications ### [embeddings](docs/sdks/embeddings/README.md) diff --git a/RELEASES.md b/RELEASES.md index 4827ac51..629e92d9 100644 --- a/RELEASES.md +++ b/RELEASES.md @@ -188,4 +188,14 @@ Based on: ### Generated - [python v1.6.0] . ### Releases -- [PyPI v1.6.0] https://pypi.org/project/mistralai/1.6.0 - . \ No newline at end of file +- [PyPI v1.6.0] https://pypi.org/project/mistralai/1.6.0 - . + +## 2025-04-16 18:35:19 +### Changes +Based on: +- OpenAPI Doc +- Speakeasy CLI 1.517.3 (2.548.6) https://github.com/speakeasy-api/speakeasy +### Generated +- [python v1.7.0] . +### Releases +- [PyPI v1.7.0] https://pypi.org/project/mistralai/1.7.0 - . \ No newline at end of file diff --git a/docs/models/chatclassificationrequest.md b/docs/models/chatclassificationrequest.md new file mode 100644 index 00000000..910d62ae --- /dev/null +++ b/docs/models/chatclassificationrequest.md @@ -0,0 +1,9 @@ +# ChatClassificationRequest + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------ | ------------------------------------ | ------------------------------------ | ------------------------------------ | +| `model` | *str* | :heavy_check_mark: | N/A | +| `inputs` | [models.Inputs](../models/inputs.md) | :heavy_check_mark: | Chat to classify | \ No newline at end of file diff --git a/docs/models/chatmoderationrequest.md b/docs/models/chatmoderationrequest.md index 2b8f46cb..69b6c1dc 100644 --- a/docs/models/chatmoderationrequest.md +++ b/docs/models/chatmoderationrequest.md @@ -5,6 +5,5 @@ | Field | Type | Required | Description | | ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | -| `model` | *str* | :heavy_check_mark: | N/A | | `inputs` | [models.ChatModerationRequestInputs](../models/chatmoderationrequestinputs.md) | :heavy_check_mark: | Chat to classify | -| `truncate_for_context_length` | *Optional[bool]* | :heavy_minus_sign: | N/A | \ No newline at end of file +| `model` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/classificationobject.md b/docs/models/classificationobject.md deleted file mode 100644 index 68f2e2b2..00000000 --- a/docs/models/classificationobject.md +++ /dev/null @@ -1,9 +0,0 @@ -# ClassificationObject - - -## Fields - -| Field | Type | Required | Description | -| ----------------------------- | ----------------------------- | ----------------------------- | ----------------------------- | -| `categories` | Dict[str, *bool*] | :heavy_minus_sign: | Classifier result thresholded | -| `category_scores` | Dict[str, *float*] | :heavy_minus_sign: | Classifier result | \ No newline at end of file diff --git a/docs/models/classificationresponse.md b/docs/models/classificationresponse.md index 4765ff62..d1633ae7 100644 --- a/docs/models/classificationresponse.md +++ b/docs/models/classificationresponse.md @@ -3,8 +3,8 @@ ## Fields -| Field | Type | Required | Description | Example | -| ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | -| `id` | *Optional[str]* | :heavy_minus_sign: | N/A | mod-e5cc70bb28c444948073e77776eb30ef | -| `model` | *Optional[str]* | :heavy_minus_sign: | N/A | | -| `results` | List[[models.ClassificationObject](../models/classificationobject.md)] | :heavy_minus_sign: | N/A | | \ No newline at end of file +| Field | Type | Required | Description | Example | +| --------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------- | +| `id` | *str* | :heavy_check_mark: | N/A | mod-e5cc70bb28c444948073e77776eb30ef | +| `model` | *str* | :heavy_check_mark: | N/A | | +| `results` | List[Dict[str, [models.ClassificationTargetResult](../models/classificationtargetresult.md)]] | :heavy_check_mark: | N/A | | \ No newline at end of file diff --git a/docs/models/classificationtargetresult.md b/docs/models/classificationtargetresult.md new file mode 100644 index 00000000..f3b10727 --- /dev/null +++ b/docs/models/classificationtargetresult.md @@ -0,0 +1,8 @@ +# ClassificationTargetResult + + +## Fields + +| Field | Type | Required | Description | +| ------------------ | ------------------ | ------------------ | ------------------ | +| `scores` | Dict[str, *float*] | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/detailedjobout.md b/docs/models/classifierdetailedjobout.md similarity index 92% rename from docs/models/detailedjobout.md rename to docs/models/classifierdetailedjobout.md index f7470327..99227c01 100644 --- a/docs/models/detailedjobout.md +++ b/docs/models/classifierdetailedjobout.md @@ -1,4 +1,4 @@ -# DetailedJobOut +# ClassifierDetailedJobOut ## Fields @@ -7,20 +7,20 @@ | ---------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------- | | `id` | *str* | :heavy_check_mark: | N/A | | `auto_start` | *bool* | :heavy_check_mark: | N/A | -| `hyperparameters` | [models.TrainingParameters](../models/trainingparameters.md) | :heavy_check_mark: | N/A | | `model` | *str* | :heavy_check_mark: | The name of the model to fine-tune. | -| `status` | [models.DetailedJobOutStatus](../models/detailedjoboutstatus.md) | :heavy_check_mark: | N/A | -| `job_type` | *str* | :heavy_check_mark: | N/A | +| `status` | [models.ClassifierDetailedJobOutStatus](../models/classifierdetailedjoboutstatus.md) | :heavy_check_mark: | N/A | | `created_at` | *int* | :heavy_check_mark: | N/A | | `modified_at` | *int* | :heavy_check_mark: | N/A | | `training_files` | List[*str*] | :heavy_check_mark: | N/A | +| `hyperparameters` | [models.ClassifierTrainingParameters](../models/classifiertrainingparameters.md) | :heavy_check_mark: | N/A | +| `classifier_targets` | List[[models.ClassifierTargetOut](../models/classifiertargetout.md)] | :heavy_check_mark: | N/A | | `validation_files` | List[*str*] | :heavy_minus_sign: | N/A | -| `object` | [Optional[models.DetailedJobOutObject]](../models/detailedjoboutobject.md) | :heavy_minus_sign: | N/A | +| `object` | [Optional[models.ClassifierDetailedJobOutObject]](../models/classifierdetailedjoboutobject.md) | :heavy_minus_sign: | N/A | | `fine_tuned_model` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | `suffix` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `integrations` | List[[models.DetailedJobOutIntegrations](../models/detailedjoboutintegrations.md)] | :heavy_minus_sign: | N/A | +| `integrations` | List[[models.ClassifierDetailedJobOutIntegrations](../models/classifierdetailedjoboutintegrations.md)] | :heavy_minus_sign: | N/A | | `trained_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | -| `repositories` | List[[models.DetailedJobOutRepositories](../models/detailedjoboutrepositories.md)] | :heavy_minus_sign: | N/A | | `metadata` | [OptionalNullable[models.JobMetadataOut]](../models/jobmetadataout.md) | :heavy_minus_sign: | N/A | +| `job_type` | [Optional[models.ClassifierDetailedJobOutJobType]](../models/classifierdetailedjoboutjobtype.md) | :heavy_minus_sign: | N/A | | `events` | List[[models.EventOut](../models/eventout.md)] | :heavy_minus_sign: | Event items are created every time the status of a fine-tuning job changes. The timestamped list of all events is accessible here. | | `checkpoints` | List[[models.CheckpointOut](../models/checkpointout.md)] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/classifierdetailedjoboutintegrations.md b/docs/models/classifierdetailedjoboutintegrations.md new file mode 100644 index 00000000..5a09465e --- /dev/null +++ b/docs/models/classifierdetailedjoboutintegrations.md @@ -0,0 +1,11 @@ +# ClassifierDetailedJobOutIntegrations + + +## Supported Types + +### `models.WandbIntegrationOut` + +```python +value: models.WandbIntegrationOut = /* values here */ +``` + diff --git a/docs/models/classifierdetailedjoboutjobtype.md b/docs/models/classifierdetailedjoboutjobtype.md new file mode 100644 index 00000000..0d1c6573 --- /dev/null +++ b/docs/models/classifierdetailedjoboutjobtype.md @@ -0,0 +1,8 @@ +# ClassifierDetailedJobOutJobType + + +## Values + +| Name | Value | +| ------------ | ------------ | +| `CLASSIFIER` | classifier | \ No newline at end of file diff --git a/docs/models/detailedjoboutobject.md b/docs/models/classifierdetailedjoboutobject.md similarity index 66% rename from docs/models/detailedjoboutobject.md rename to docs/models/classifierdetailedjoboutobject.md index 3731b1f6..08cbcffc 100644 --- a/docs/models/detailedjoboutobject.md +++ b/docs/models/classifierdetailedjoboutobject.md @@ -1,4 +1,4 @@ -# DetailedJobOutObject +# ClassifierDetailedJobOutObject ## Values diff --git a/docs/models/detailedjoboutstatus.md b/docs/models/classifierdetailedjoboutstatus.md similarity index 95% rename from docs/models/detailedjoboutstatus.md rename to docs/models/classifierdetailedjoboutstatus.md index 955d5a26..c3118aaf 100644 --- a/docs/models/detailedjoboutstatus.md +++ b/docs/models/classifierdetailedjoboutstatus.md @@ -1,4 +1,4 @@ -# DetailedJobOutStatus +# ClassifierDetailedJobOutStatus ## Values diff --git a/docs/models/classifierftmodelout.md b/docs/models/classifierftmodelout.md new file mode 100644 index 00000000..406102cf --- /dev/null +++ b/docs/models/classifierftmodelout.md @@ -0,0 +1,21 @@ +# ClassifierFTModelOut + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------- | +| `id` | *str* | :heavy_check_mark: | N/A | +| `created` | *int* | :heavy_check_mark: | N/A | +| `owned_by` | *str* | :heavy_check_mark: | N/A | +| `root` | *str* | :heavy_check_mark: | N/A | +| `archived` | *bool* | :heavy_check_mark: | N/A | +| `capabilities` | [models.FTModelCapabilitiesOut](../models/ftmodelcapabilitiesout.md) | :heavy_check_mark: | N/A | +| `job` | *str* | :heavy_check_mark: | N/A | +| `classifier_targets` | List[[models.ClassifierTargetOut](../models/classifiertargetout.md)] | :heavy_check_mark: | N/A | +| `object` | [Optional[models.ClassifierFTModelOutObject]](../models/classifierftmodeloutobject.md) | :heavy_minus_sign: | N/A | +| `name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `max_context_length` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `aliases` | List[*str*] | :heavy_minus_sign: | N/A | +| `model_type` | [Optional[models.ClassifierFTModelOutModelType]](../models/classifierftmodeloutmodeltype.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/classifierftmodeloutmodeltype.md b/docs/models/classifierftmodeloutmodeltype.md new file mode 100644 index 00000000..e1e7e465 --- /dev/null +++ b/docs/models/classifierftmodeloutmodeltype.md @@ -0,0 +1,8 @@ +# ClassifierFTModelOutModelType + + +## Values + +| Name | Value | +| ------------ | ------------ | +| `CLASSIFIER` | classifier | \ No newline at end of file diff --git a/docs/models/ftmodeloutobject.md b/docs/models/classifierftmodeloutobject.md similarity index 72% rename from docs/models/ftmodeloutobject.md rename to docs/models/classifierftmodeloutobject.md index e12b214e..9fe05bcf 100644 --- a/docs/models/ftmodeloutobject.md +++ b/docs/models/classifierftmodeloutobject.md @@ -1,4 +1,4 @@ -# FTModelOutObject +# ClassifierFTModelOutObject ## Values diff --git a/docs/models/classifierjobout.md b/docs/models/classifierjobout.md new file mode 100644 index 00000000..5fa290c1 --- /dev/null +++ b/docs/models/classifierjobout.md @@ -0,0 +1,23 @@ +# ClassifierJobOut + + +## Fields + +| Field | Type | Required | Description | +| --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `id` | *str* | :heavy_check_mark: | The ID of the job. | +| `auto_start` | *bool* | :heavy_check_mark: | N/A | +| `model` | *str* | :heavy_check_mark: | The name of the model to fine-tune. | +| `status` | [models.ClassifierJobOutStatus](../models/classifierjoboutstatus.md) | :heavy_check_mark: | The current status of the fine-tuning job. | +| `created_at` | *int* | :heavy_check_mark: | The UNIX timestamp (in seconds) for when the fine-tuning job was created. | +| `modified_at` | *int* | :heavy_check_mark: | The UNIX timestamp (in seconds) for when the fine-tuning job was last modified. | +| `training_files` | List[*str*] | :heavy_check_mark: | A list containing the IDs of uploaded files that contain training data. | +| `hyperparameters` | [models.ClassifierTrainingParameters](../models/classifiertrainingparameters.md) | :heavy_check_mark: | N/A | +| `validation_files` | List[*str*] | :heavy_minus_sign: | A list containing the IDs of uploaded files that contain validation data. | +| `object` | [Optional[models.ClassifierJobOutObject]](../models/classifierjoboutobject.md) | :heavy_minus_sign: | The object type of the fine-tuning job. | +| `fine_tuned_model` | *OptionalNullable[str]* | :heavy_minus_sign: | The name of the fine-tuned model that is being created. The value will be `null` if the fine-tuning job is still running. | +| `suffix` | *OptionalNullable[str]* | :heavy_minus_sign: | Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`. | +| `integrations` | List[[models.ClassifierJobOutIntegrations](../models/classifierjoboutintegrations.md)] | :heavy_minus_sign: | A list of integrations enabled for your fine-tuning job. | +| `trained_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | Total number of tokens trained. | +| `metadata` | [OptionalNullable[models.JobMetadataOut]](../models/jobmetadataout.md) | :heavy_minus_sign: | N/A | +| `job_type` | [Optional[models.ClassifierJobOutJobType]](../models/classifierjoboutjobtype.md) | :heavy_minus_sign: | The type of job (`FT` for fine-tuning). | \ No newline at end of file diff --git a/docs/models/detailedjoboutintegrations.md b/docs/models/classifierjoboutintegrations.md similarity index 80% rename from docs/models/detailedjoboutintegrations.md rename to docs/models/classifierjoboutintegrations.md index 46beabc1..d938d0b9 100644 --- a/docs/models/detailedjoboutintegrations.md +++ b/docs/models/classifierjoboutintegrations.md @@ -1,4 +1,4 @@ -# DetailedJobOutIntegrations +# ClassifierJobOutIntegrations ## Supported Types diff --git a/docs/models/classifierjoboutjobtype.md b/docs/models/classifierjoboutjobtype.md new file mode 100644 index 00000000..7f5236fa --- /dev/null +++ b/docs/models/classifierjoboutjobtype.md @@ -0,0 +1,10 @@ +# ClassifierJobOutJobType + +The type of job (`FT` for fine-tuning). + + +## Values + +| Name | Value | +| ------------ | ------------ | +| `CLASSIFIER` | classifier | \ No newline at end of file diff --git a/docs/models/classifierjoboutobject.md b/docs/models/classifierjoboutobject.md new file mode 100644 index 00000000..1b42d547 --- /dev/null +++ b/docs/models/classifierjoboutobject.md @@ -0,0 +1,10 @@ +# ClassifierJobOutObject + +The object type of the fine-tuning job. + + +## Values + +| Name | Value | +| ----- | ----- | +| `JOB` | job | \ No newline at end of file diff --git a/docs/models/classifierjoboutstatus.md b/docs/models/classifierjoboutstatus.md new file mode 100644 index 00000000..4520f164 --- /dev/null +++ b/docs/models/classifierjoboutstatus.md @@ -0,0 +1,19 @@ +# ClassifierJobOutStatus + +The current status of the fine-tuning job. + + +## Values + +| Name | Value | +| ------------------------ | ------------------------ | +| `QUEUED` | QUEUED | +| `STARTED` | STARTED | +| `VALIDATING` | VALIDATING | +| `VALIDATED` | VALIDATED | +| `RUNNING` | RUNNING | +| `FAILED_VALIDATION` | FAILED_VALIDATION | +| `FAILED` | FAILED | +| `SUCCESS` | SUCCESS | +| `CANCELLED` | CANCELLED | +| `CANCELLATION_REQUESTED` | CANCELLATION_REQUESTED | \ No newline at end of file diff --git a/docs/models/classifiertargetin.md b/docs/models/classifiertargetin.md new file mode 100644 index 00000000..78cab67b --- /dev/null +++ b/docs/models/classifiertargetin.md @@ -0,0 +1,11 @@ +# ClassifierTargetIn + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------ | +| `name` | *str* | :heavy_check_mark: | N/A | +| `labels` | List[*str*] | :heavy_check_mark: | N/A | +| `weight` | *Optional[float]* | :heavy_minus_sign: | N/A | +| `loss_function` | [OptionalNullable[models.FTClassifierLossFunction]](../models/ftclassifierlossfunction.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/classifiertargetout.md b/docs/models/classifiertargetout.md new file mode 100644 index 00000000..57535ae5 --- /dev/null +++ b/docs/models/classifiertargetout.md @@ -0,0 +1,11 @@ +# ClassifierTargetOut + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------------------------------------------ | ------------------------------------------------------------------------ | ------------------------------------------------------------------------ | ------------------------------------------------------------------------ | +| `name` | *str* | :heavy_check_mark: | N/A | +| `labels` | List[*str*] | :heavy_check_mark: | N/A | +| `weight` | *float* | :heavy_check_mark: | N/A | +| `loss_function` | [models.FTClassifierLossFunction](../models/ftclassifierlossfunction.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/classifiertrainingparameters.md b/docs/models/classifiertrainingparameters.md new file mode 100644 index 00000000..3b6f3be6 --- /dev/null +++ b/docs/models/classifiertrainingparameters.md @@ -0,0 +1,13 @@ +# ClassifierTrainingParameters + + +## Fields + +| Field | Type | Required | Description | +| ------------------------- | ------------------------- | ------------------------- | ------------------------- | +| `training_steps` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | +| `learning_rate` | *Optional[float]* | :heavy_minus_sign: | N/A | +| `weight_decay` | *OptionalNullable[float]* | :heavy_minus_sign: | N/A | +| `warmup_fraction` | *OptionalNullable[float]* | :heavy_minus_sign: | N/A | +| `epochs` | *OptionalNullable[float]* | :heavy_minus_sign: | N/A | +| `seq_len` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/classifiertrainingparametersin.md b/docs/models/classifiertrainingparametersin.md new file mode 100644 index 00000000..1287c973 --- /dev/null +++ b/docs/models/classifiertrainingparametersin.md @@ -0,0 +1,15 @@ +# ClassifierTrainingParametersIn + +The fine-tuning hyperparameter settings used in a classifier fine-tune job. + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `training_steps` | *OptionalNullable[int]* | :heavy_minus_sign: | The number of training steps to perform. A training step refers to a single update of the model weights during the fine-tuning process. This update is typically calculated using a batch of samples from the training dataset. | +| `learning_rate` | *Optional[float]* | :heavy_minus_sign: | A parameter describing how much to adjust the pre-trained model's weights in response to the estimated error each time the weights are updated during the fine-tuning process. | +| `weight_decay` | *OptionalNullable[float]* | :heavy_minus_sign: | (Advanced Usage) Weight decay adds a term to the loss function that is proportional to the sum of the squared weights. This term reduces the magnitude of the weights and prevents them from growing too large. | +| `warmup_fraction` | *OptionalNullable[float]* | :heavy_minus_sign: | (Advanced Usage) A parameter that specifies the percentage of the total training steps at which the learning rate warm-up phase ends. During this phase, the learning rate gradually increases from a small value to the initial learning rate, helping to stabilize the training process and improve convergence. Similar to `pct_start` in [mistral-finetune](https://github.com/mistralai/mistral-finetune) | +| `epochs` | *OptionalNullable[float]* | :heavy_minus_sign: | N/A | +| `seq_len` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/completiondetailedjobout.md b/docs/models/completiondetailedjobout.md new file mode 100644 index 00000000..b42dd419 --- /dev/null +++ b/docs/models/completiondetailedjobout.md @@ -0,0 +1,26 @@ +# CompletionDetailedJobOut + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------- | +| `id` | *str* | :heavy_check_mark: | N/A | +| `auto_start` | *bool* | :heavy_check_mark: | N/A | +| `model` | *str* | :heavy_check_mark: | The name of the model to fine-tune. | +| `status` | [models.CompletionDetailedJobOutStatus](../models/completiondetailedjoboutstatus.md) | :heavy_check_mark: | N/A | +| `created_at` | *int* | :heavy_check_mark: | N/A | +| `modified_at` | *int* | :heavy_check_mark: | N/A | +| `training_files` | List[*str*] | :heavy_check_mark: | N/A | +| `hyperparameters` | [models.CompletionTrainingParameters](../models/completiontrainingparameters.md) | :heavy_check_mark: | N/A | +| `validation_files` | List[*str*] | :heavy_minus_sign: | N/A | +| `object` | [Optional[models.CompletionDetailedJobOutObject]](../models/completiondetailedjoboutobject.md) | :heavy_minus_sign: | N/A | +| `fine_tuned_model` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `suffix` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `integrations` | List[[models.CompletionDetailedJobOutIntegrations](../models/completiondetailedjoboutintegrations.md)] | :heavy_minus_sign: | N/A | +| `trained_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | +| `metadata` | [OptionalNullable[models.JobMetadataOut]](../models/jobmetadataout.md) | :heavy_minus_sign: | N/A | +| `job_type` | [Optional[models.CompletionDetailedJobOutJobType]](../models/completiondetailedjoboutjobtype.md) | :heavy_minus_sign: | N/A | +| `repositories` | List[[models.CompletionDetailedJobOutRepositories](../models/completiondetailedjoboutrepositories.md)] | :heavy_minus_sign: | N/A | +| `events` | List[[models.EventOut](../models/eventout.md)] | :heavy_minus_sign: | Event items are created every time the status of a fine-tuning job changes. The timestamped list of all events is accessible here. | +| `checkpoints` | List[[models.CheckpointOut](../models/checkpointout.md)] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/completiondetailedjoboutintegrations.md b/docs/models/completiondetailedjoboutintegrations.md new file mode 100644 index 00000000..af6bbcc5 --- /dev/null +++ b/docs/models/completiondetailedjoboutintegrations.md @@ -0,0 +1,11 @@ +# CompletionDetailedJobOutIntegrations + + +## Supported Types + +### `models.WandbIntegrationOut` + +```python +value: models.WandbIntegrationOut = /* values here */ +``` + diff --git a/docs/models/completiondetailedjoboutjobtype.md b/docs/models/completiondetailedjoboutjobtype.md new file mode 100644 index 00000000..fb24db0c --- /dev/null +++ b/docs/models/completiondetailedjoboutjobtype.md @@ -0,0 +1,8 @@ +# CompletionDetailedJobOutJobType + + +## Values + +| Name | Value | +| ------------ | ------------ | +| `COMPLETION` | completion | \ No newline at end of file diff --git a/docs/models/completiondetailedjoboutobject.md b/docs/models/completiondetailedjoboutobject.md new file mode 100644 index 00000000..1bec88e5 --- /dev/null +++ b/docs/models/completiondetailedjoboutobject.md @@ -0,0 +1,8 @@ +# CompletionDetailedJobOutObject + + +## Values + +| Name | Value | +| ----- | ----- | +| `JOB` | job | \ No newline at end of file diff --git a/docs/models/detailedjoboutrepositories.md b/docs/models/completiondetailedjoboutrepositories.md similarity index 76% rename from docs/models/detailedjoboutrepositories.md rename to docs/models/completiondetailedjoboutrepositories.md index 4b32079a..4f9727c3 100644 --- a/docs/models/detailedjoboutrepositories.md +++ b/docs/models/completiondetailedjoboutrepositories.md @@ -1,4 +1,4 @@ -# DetailedJobOutRepositories +# CompletionDetailedJobOutRepositories ## Supported Types diff --git a/docs/models/completiondetailedjoboutstatus.md b/docs/models/completiondetailedjoboutstatus.md new file mode 100644 index 00000000..b80525ba --- /dev/null +++ b/docs/models/completiondetailedjoboutstatus.md @@ -0,0 +1,17 @@ +# CompletionDetailedJobOutStatus + + +## Values + +| Name | Value | +| ------------------------ | ------------------------ | +| `QUEUED` | QUEUED | +| `STARTED` | STARTED | +| `VALIDATING` | VALIDATING | +| `VALIDATED` | VALIDATED | +| `RUNNING` | RUNNING | +| `FAILED_VALIDATION` | FAILED_VALIDATION | +| `FAILED` | FAILED | +| `SUCCESS` | SUCCESS | +| `CANCELLED` | CANCELLED | +| `CANCELLATION_REQUESTED` | CANCELLATION_REQUESTED | \ No newline at end of file diff --git a/docs/models/completionftmodelout.md b/docs/models/completionftmodelout.md new file mode 100644 index 00000000..ca1c5289 --- /dev/null +++ b/docs/models/completionftmodelout.md @@ -0,0 +1,20 @@ +# CompletionFTModelOut + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------- | +| `id` | *str* | :heavy_check_mark: | N/A | +| `created` | *int* | :heavy_check_mark: | N/A | +| `owned_by` | *str* | :heavy_check_mark: | N/A | +| `root` | *str* | :heavy_check_mark: | N/A | +| `archived` | *bool* | :heavy_check_mark: | N/A | +| `capabilities` | [models.FTModelCapabilitiesOut](../models/ftmodelcapabilitiesout.md) | :heavy_check_mark: | N/A | +| `job` | *str* | :heavy_check_mark: | N/A | +| `object` | [Optional[models.CompletionFTModelOutObject]](../models/completionftmodeloutobject.md) | :heavy_minus_sign: | N/A | +| `name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `max_context_length` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `aliases` | List[*str*] | :heavy_minus_sign: | N/A | +| `model_type` | [Optional[models.ModelType]](../models/modeltype.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/completionftmodeloutobject.md b/docs/models/completionftmodeloutobject.md new file mode 100644 index 00000000..6f9d858c --- /dev/null +++ b/docs/models/completionftmodeloutobject.md @@ -0,0 +1,8 @@ +# CompletionFTModelOutObject + + +## Values + +| Name | Value | +| ------- | ------- | +| `MODEL` | model | \ No newline at end of file diff --git a/docs/models/jobout.md b/docs/models/completionjobout.md similarity index 98% rename from docs/models/jobout.md rename to docs/models/completionjobout.md index 652c9d16..381aeb94 100644 --- a/docs/models/jobout.md +++ b/docs/models/completionjobout.md @@ -1,4 +1,4 @@ -# JobOut +# CompletionJobOut ## Fields @@ -7,18 +7,18 @@ | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | | `id` | *str* | :heavy_check_mark: | The ID of the job. | | `auto_start` | *bool* | :heavy_check_mark: | N/A | -| `hyperparameters` | [models.TrainingParameters](../models/trainingparameters.md) | :heavy_check_mark: | N/A | | `model` | *str* | :heavy_check_mark: | The name of the model to fine-tune. | | `status` | [models.Status](../models/status.md) | :heavy_check_mark: | The current status of the fine-tuning job. | -| `job_type` | *str* | :heavy_check_mark: | The type of job (`FT` for fine-tuning). | | `created_at` | *int* | :heavy_check_mark: | The UNIX timestamp (in seconds) for when the fine-tuning job was created. | | `modified_at` | *int* | :heavy_check_mark: | The UNIX timestamp (in seconds) for when the fine-tuning job was last modified. | | `training_files` | List[*str*] | :heavy_check_mark: | A list containing the IDs of uploaded files that contain training data. | +| `hyperparameters` | [models.CompletionTrainingParameters](../models/completiontrainingparameters.md) | :heavy_check_mark: | N/A | | `validation_files` | List[*str*] | :heavy_minus_sign: | A list containing the IDs of uploaded files that contain validation data. | | `object` | [Optional[models.Object]](../models/object.md) | :heavy_minus_sign: | The object type of the fine-tuning job. | | `fine_tuned_model` | *OptionalNullable[str]* | :heavy_minus_sign: | The name of the fine-tuned model that is being created. The value will be `null` if the fine-tuning job is still running. | | `suffix` | *OptionalNullable[str]* | :heavy_minus_sign: | Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`. | | `integrations` | List[[models.Integrations](../models/integrations.md)] | :heavy_minus_sign: | A list of integrations enabled for your fine-tuning job. | | `trained_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | Total number of tokens trained. | -| `repositories` | List[[models.Repositories](../models/repositories.md)] | :heavy_minus_sign: | N/A | -| `metadata` | [OptionalNullable[models.JobMetadataOut]](../models/jobmetadataout.md) | :heavy_minus_sign: | N/A | \ No newline at end of file +| `metadata` | [OptionalNullable[models.JobMetadataOut]](../models/jobmetadataout.md) | :heavy_minus_sign: | N/A | +| `job_type` | [Optional[models.JobType]](../models/jobtype.md) | :heavy_minus_sign: | The type of job (`FT` for fine-tuning). | +| `repositories` | List[[models.Repositories](../models/repositories.md)] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/trainingparameters.md b/docs/models/completiontrainingparameters.md similarity index 97% rename from docs/models/trainingparameters.md rename to docs/models/completiontrainingparameters.md index e56df8e9..4746a95d 100644 --- a/docs/models/trainingparameters.md +++ b/docs/models/completiontrainingparameters.md @@ -1,4 +1,4 @@ -# TrainingParameters +# CompletionTrainingParameters ## Fields @@ -10,5 +10,5 @@ | `weight_decay` | *OptionalNullable[float]* | :heavy_minus_sign: | N/A | | `warmup_fraction` | *OptionalNullable[float]* | :heavy_minus_sign: | N/A | | `epochs` | *OptionalNullable[float]* | :heavy_minus_sign: | N/A | -| `fim_ratio` | *OptionalNullable[float]* | :heavy_minus_sign: | N/A | -| `seq_len` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | \ No newline at end of file +| `seq_len` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | +| `fim_ratio` | *OptionalNullable[float]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/trainingparametersin.md b/docs/models/completiontrainingparametersin.md similarity index 99% rename from docs/models/trainingparametersin.md rename to docs/models/completiontrainingparametersin.md index 64c31a44..9fcc714e 100644 --- a/docs/models/trainingparametersin.md +++ b/docs/models/completiontrainingparametersin.md @@ -1,4 +1,4 @@ -# TrainingParametersIn +# CompletionTrainingParametersIn The fine-tuning hyperparameter settings used in a fine-tune job. @@ -12,5 +12,5 @@ The fine-tuning hyperparameter settings used in a fine-tune job. | `weight_decay` | *OptionalNullable[float]* | :heavy_minus_sign: | (Advanced Usage) Weight decay adds a term to the loss function that is proportional to the sum of the squared weights. This term reduces the magnitude of the weights and prevents them from growing too large. | | `warmup_fraction` | *OptionalNullable[float]* | :heavy_minus_sign: | (Advanced Usage) A parameter that specifies the percentage of the total training steps at which the learning rate warm-up phase ends. During this phase, the learning rate gradually increases from a small value to the initial learning rate, helping to stabilize the training process and improve convergence. Similar to `pct_start` in [mistral-finetune](https://github.com/mistralai/mistral-finetune) | | `epochs` | *OptionalNullable[float]* | :heavy_minus_sign: | N/A | -| `fim_ratio` | *OptionalNullable[float]* | :heavy_minus_sign: | N/A | -| `seq_len` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | \ No newline at end of file +| `seq_len` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | +| `fim_ratio` | *OptionalNullable[float]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/embeddingrequest.md b/docs/models/embeddingrequest.md index 3bdd79e8..242bb3e3 100644 --- a/docs/models/embeddingrequest.md +++ b/docs/models/embeddingrequest.md @@ -3,7 +3,7 @@ ## Fields -| Field | Type | Required | Description | Example | -| -------------------------------------------------- | -------------------------------------------------- | -------------------------------------------------- | -------------------------------------------------- | -------------------------------------------------- | -| `model` | *str* | :heavy_check_mark: | ID of the model to use. | mistral-embed | -| `inputs` | [models.Inputs](../models/inputs.md) | :heavy_check_mark: | Text to embed. | [
"Embed this sentence.",
"As well as this one."
] | \ No newline at end of file +| Field | Type | Required | Description | Example | +| -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | +| `model` | *str* | :heavy_check_mark: | ID of the model to use. | mistral-embed | +| `inputs` | [models.EmbeddingRequestInputs](../models/embeddingrequestinputs.md) | :heavy_check_mark: | Text to embed. | [
"Embed this sentence.",
"As well as this one."
] | \ No newline at end of file diff --git a/docs/models/embeddingrequestinputs.md b/docs/models/embeddingrequestinputs.md new file mode 100644 index 00000000..a3f82c1c --- /dev/null +++ b/docs/models/embeddingrequestinputs.md @@ -0,0 +1,19 @@ +# EmbeddingRequestInputs + +Text to embed. + + +## Supported Types + +### `str` + +```python +value: str = /* values here */ +``` + +### `List[str]` + +```python +value: List[str] = /* values here */ +``` + diff --git a/docs/models/finetuneablemodeltype.md b/docs/models/finetuneablemodeltype.md new file mode 100644 index 00000000..34b24bd4 --- /dev/null +++ b/docs/models/finetuneablemodeltype.md @@ -0,0 +1,9 @@ +# FineTuneableModelType + + +## Values + +| Name | Value | +| ------------ | ------------ | +| `COMPLETION` | completion | +| `CLASSIFIER` | classifier | \ No newline at end of file diff --git a/docs/models/ftclassifierlossfunction.md b/docs/models/ftclassifierlossfunction.md new file mode 100644 index 00000000..919cdd38 --- /dev/null +++ b/docs/models/ftclassifierlossfunction.md @@ -0,0 +1,9 @@ +# FTClassifierLossFunction + + +## Values + +| Name | Value | +| -------------- | -------------- | +| `SINGLE_CLASS` | single_class | +| `MULTI_CLASS` | multi_class | \ No newline at end of file diff --git a/docs/models/ftmodelcapabilitiesout.md b/docs/models/ftmodelcapabilitiesout.md index 3cb52377..19690476 100644 --- a/docs/models/ftmodelcapabilitiesout.md +++ b/docs/models/ftmodelcapabilitiesout.md @@ -8,4 +8,5 @@ | `completion_chat` | *Optional[bool]* | :heavy_minus_sign: | N/A | | `completion_fim` | *Optional[bool]* | :heavy_minus_sign: | N/A | | `function_calling` | *Optional[bool]* | :heavy_minus_sign: | N/A | -| `fine_tuning` | *Optional[bool]* | :heavy_minus_sign: | N/A | \ No newline at end of file +| `fine_tuning` | *Optional[bool]* | :heavy_minus_sign: | N/A | +| `classification` | *Optional[bool]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/ftmodelout.md b/docs/models/ftmodelout.md deleted file mode 100644 index 6dec7156..00000000 --- a/docs/models/ftmodelout.md +++ /dev/null @@ -1,19 +0,0 @@ -# FTModelOut - - -## Fields - -| Field | Type | Required | Description | -| -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -| `id` | *str* | :heavy_check_mark: | N/A | -| `created` | *int* | :heavy_check_mark: | N/A | -| `owned_by` | *str* | :heavy_check_mark: | N/A | -| `root` | *str* | :heavy_check_mark: | N/A | -| `archived` | *bool* | :heavy_check_mark: | N/A | -| `capabilities` | [models.FTModelCapabilitiesOut](../models/ftmodelcapabilitiesout.md) | :heavy_check_mark: | N/A | -| `job` | *str* | :heavy_check_mark: | N/A | -| `object` | [Optional[models.FTModelOutObject]](../models/ftmodeloutobject.md) | :heavy_minus_sign: | N/A | -| `name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `max_context_length` | *Optional[int]* | :heavy_minus_sign: | N/A | -| `aliases` | List[*str*] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/hyperparameters.md b/docs/models/hyperparameters.md new file mode 100644 index 00000000..46a6dd6b --- /dev/null +++ b/docs/models/hyperparameters.md @@ -0,0 +1,17 @@ +# Hyperparameters + + +## Supported Types + +### `models.CompletionTrainingParametersIn` + +```python +value: models.CompletionTrainingParametersIn = /* values here */ +``` + +### `models.ClassifierTrainingParametersIn` + +```python +value: models.ClassifierTrainingParametersIn = /* values here */ +``` + diff --git a/docs/models/inputs.md b/docs/models/inputs.md index 45264f9e..0f62a7ce 100644 --- a/docs/models/inputs.md +++ b/docs/models/inputs.md @@ -1,19 +1,19 @@ # Inputs -Text to embed. +Chat to classify ## Supported Types -### `str` +### `models.InstructRequestInputs` ```python -value: str = /* values here */ +value: models.InstructRequestInputs = /* values here */ ``` -### `List[str]` +### `List[models.InstructRequest]` ```python -value: List[str] = /* values here */ +value: List[models.InstructRequest] = /* values here */ ``` diff --git a/docs/models/instructrequest.md b/docs/models/instructrequest.md new file mode 100644 index 00000000..9500cb58 --- /dev/null +++ b/docs/models/instructrequest.md @@ -0,0 +1,8 @@ +# InstructRequest + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | +| `messages` | List[[models.InstructRequestMessages](../models/instructrequestmessages.md)] | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/instructrequestinputs.md b/docs/models/instructrequestinputs.md new file mode 100644 index 00000000..4caa028f --- /dev/null +++ b/docs/models/instructrequestinputs.md @@ -0,0 +1,8 @@ +# InstructRequestInputs + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | +| `messages` | List[[models.InstructRequestInputsMessages](../models/instructrequestinputsmessages.md)] | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/instructrequestinputsmessages.md b/docs/models/instructrequestinputsmessages.md new file mode 100644 index 00000000..237e131f --- /dev/null +++ b/docs/models/instructrequestinputsmessages.md @@ -0,0 +1,29 @@ +# InstructRequestInputsMessages + + +## Supported Types + +### `models.AssistantMessage` + +```python +value: models.AssistantMessage = /* values here */ +``` + +### `models.SystemMessage` + +```python +value: models.SystemMessage = /* values here */ +``` + +### `models.ToolMessage` + +```python +value: models.ToolMessage = /* values here */ +``` + +### `models.UserMessage` + +```python +value: models.UserMessage = /* values here */ +``` + diff --git a/docs/models/instructrequestmessages.md b/docs/models/instructrequestmessages.md new file mode 100644 index 00000000..9c866a7d --- /dev/null +++ b/docs/models/instructrequestmessages.md @@ -0,0 +1,29 @@ +# InstructRequestMessages + + +## Supported Types + +### `models.AssistantMessage` + +```python +value: models.AssistantMessage = /* values here */ +``` + +### `models.SystemMessage` + +```python +value: models.SystemMessage = /* values here */ +``` + +### `models.ToolMessage` + +```python +value: models.ToolMessage = /* values here */ +``` + +### `models.UserMessage` + +```python +value: models.UserMessage = /* values here */ +``` + diff --git a/docs/models/jobin.md b/docs/models/jobin.md index 6fd661cf..d6cbd27a 100644 --- a/docs/models/jobin.md +++ b/docs/models/jobin.md @@ -6,10 +6,13 @@ | Field | Type | Required | Description | | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | | `model` | *str* | :heavy_check_mark: | The name of the model to fine-tune. | -| `hyperparameters` | [models.TrainingParametersIn](../models/trainingparametersin.md) | :heavy_check_mark: | The fine-tuning hyperparameter settings used in a fine-tune job. | +| `hyperparameters` | [models.Hyperparameters](../models/hyperparameters.md) | :heavy_check_mark: | N/A | | `training_files` | List[[models.TrainingFile](../models/trainingfile.md)] | :heavy_minus_sign: | N/A | | `validation_files` | List[*str*] | :heavy_minus_sign: | A list containing the IDs of uploaded files that contain validation data. If you provide these files, the data is used to generate validation metrics periodically during fine-tuning. These metrics can be viewed in `checkpoints` when getting the status of a running fine-tuning job. The same data should not be present in both train and validation files. | | `suffix` | *OptionalNullable[str]* | :heavy_minus_sign: | A string that will be added to your fine-tuning model name. For example, a suffix of "my-great-model" would produce a model name like `ft:open-mistral-7b:my-great-model:xxx...` | | `integrations` | List[[models.JobInIntegrations](../models/jobinintegrations.md)] | :heavy_minus_sign: | A list of integrations to enable for your fine-tuning job. | +| `auto_start` | *Optional[bool]* | :heavy_minus_sign: | This field will be required in a future release. | +| `invalid_sample_skip_percentage` | *Optional[float]* | :heavy_minus_sign: | N/A | +| `job_type` | [OptionalNullable[models.FineTuneableModelType]](../models/finetuneablemodeltype.md) | :heavy_minus_sign: | N/A | | `repositories` | List[[models.JobInRepositories](../models/jobinrepositories.md)] | :heavy_minus_sign: | N/A | -| `auto_start` | *Optional[bool]* | :heavy_minus_sign: | This field will be required in a future release. | \ No newline at end of file +| `classifier_targets` | List[[models.ClassifierTargetIn](../models/classifiertargetin.md)] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/jobsapiroutesbatchgetbatchjobsrequest.md b/docs/models/jobsapiroutesbatchgetbatchjobsrequest.md index 93de090e..f2a3bb78 100644 --- a/docs/models/jobsapiroutesbatchgetbatchjobsrequest.md +++ b/docs/models/jobsapiroutesbatchgetbatchjobsrequest.md @@ -3,12 +3,12 @@ ## Fields -| Field | Type | Required | Description | -| ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | -| `page` | *Optional[int]* | :heavy_minus_sign: | N/A | -| `page_size` | *Optional[int]* | :heavy_minus_sign: | N/A | -| `model` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | -| `created_after` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | -| `created_by_me` | *Optional[bool]* | :heavy_minus_sign: | N/A | -| `status` | [OptionalNullable[models.BatchJobStatus]](../models/batchjobstatus.md) | :heavy_minus_sign: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | +| `page` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `page_size` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `model` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | +| `created_after` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | +| `created_by_me` | *Optional[bool]* | :heavy_minus_sign: | N/A | +| `status` | List[[models.BatchJobStatus](../models/batchjobstatus.md)] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/jobsapiroutesfinetuningcancelfinetuningjobresponse.md b/docs/models/jobsapiroutesfinetuningcancelfinetuningjobresponse.md new file mode 100644 index 00000000..1b331662 --- /dev/null +++ b/docs/models/jobsapiroutesfinetuningcancelfinetuningjobresponse.md @@ -0,0 +1,19 @@ +# JobsAPIRoutesFineTuningCancelFineTuningJobResponse + +OK + + +## Supported Types + +### `models.ClassifierDetailedJobOut` + +```python +value: models.ClassifierDetailedJobOut = /* values here */ +``` + +### `models.CompletionDetailedJobOut` + +```python +value: models.CompletionDetailedJobOut = /* values here */ +``` + diff --git a/docs/models/jobsapiroutesfinetuningcreatefinetuningjobresponse.md b/docs/models/jobsapiroutesfinetuningcreatefinetuningjobresponse.md index dd12c71c..eeddc3cd 100644 --- a/docs/models/jobsapiroutesfinetuningcreatefinetuningjobresponse.md +++ b/docs/models/jobsapiroutesfinetuningcreatefinetuningjobresponse.md @@ -5,10 +5,10 @@ OK ## Supported Types -### `models.JobOut` +### `models.Response1` ```python -value: models.JobOut = /* values here */ +value: models.Response1 = /* values here */ ``` ### `models.LegacyJobMetadataOut` diff --git a/docs/models/jobsapiroutesfinetuninggetfinetuningjobresponse.md b/docs/models/jobsapiroutesfinetuninggetfinetuningjobresponse.md new file mode 100644 index 00000000..e0d2e361 --- /dev/null +++ b/docs/models/jobsapiroutesfinetuninggetfinetuningjobresponse.md @@ -0,0 +1,19 @@ +# JobsAPIRoutesFineTuningGetFineTuningJobResponse + +OK + + +## Supported Types + +### `models.ClassifierDetailedJobOut` + +```python +value: models.ClassifierDetailedJobOut = /* values here */ +``` + +### `models.CompletionDetailedJobOut` + +```python +value: models.CompletionDetailedJobOut = /* values here */ +``` + diff --git a/docs/models/jobsapiroutesfinetuninggetfinetuningjobsrequest.md b/docs/models/jobsapiroutesfinetuninggetfinetuningjobsrequest.md index 9d25d79c..3dca3cd8 100644 --- a/docs/models/jobsapiroutesfinetuninggetfinetuningjobsrequest.md +++ b/docs/models/jobsapiroutesfinetuninggetfinetuningjobsrequest.md @@ -9,6 +9,7 @@ | `page_size` | *Optional[int]* | :heavy_minus_sign: | The number of items to return per page. | | `model` | *OptionalNullable[str]* | :heavy_minus_sign: | The model name used for fine-tuning to filter on. When set, the other results are not displayed. | | `created_after` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | The date/time to filter on. When set, the results for previous creation times are not displayed. | +| `created_before` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | | `created_by_me` | *Optional[bool]* | :heavy_minus_sign: | When set, only return results for jobs created by the API caller. Other results are not displayed. | | `status` | [OptionalNullable[models.QueryParamStatus]](../models/queryparamstatus.md) | :heavy_minus_sign: | The current job state to filter on. When set, the other results are not displayed. | | `wandb_project` | *OptionalNullable[str]* | :heavy_minus_sign: | The Weights and Biases project to filter on. When set, the other results are not displayed. | diff --git a/docs/models/jobsapiroutesfinetuningstartfinetuningjobresponse.md b/docs/models/jobsapiroutesfinetuningstartfinetuningjobresponse.md new file mode 100644 index 00000000..64f4cca6 --- /dev/null +++ b/docs/models/jobsapiroutesfinetuningstartfinetuningjobresponse.md @@ -0,0 +1,19 @@ +# JobsAPIRoutesFineTuningStartFineTuningJobResponse + +OK + + +## Supported Types + +### `models.ClassifierDetailedJobOut` + +```python +value: models.ClassifierDetailedJobOut = /* values here */ +``` + +### `models.CompletionDetailedJobOut` + +```python +value: models.CompletionDetailedJobOut = /* values here */ +``` + diff --git a/docs/models/jobsapiroutesfinetuningupdatefinetunedmodelresponse.md b/docs/models/jobsapiroutesfinetuningupdatefinetunedmodelresponse.md new file mode 100644 index 00000000..54f4c398 --- /dev/null +++ b/docs/models/jobsapiroutesfinetuningupdatefinetunedmodelresponse.md @@ -0,0 +1,19 @@ +# JobsAPIRoutesFineTuningUpdateFineTunedModelResponse + +OK + + +## Supported Types + +### `models.ClassifierFTModelOut` + +```python +value: models.ClassifierFTModelOut = /* values here */ +``` + +### `models.CompletionFTModelOut` + +```python +value: models.CompletionFTModelOut = /* values here */ +``` + diff --git a/docs/models/jobsout.md b/docs/models/jobsout.md index 99ff75ec..d71793ef 100644 --- a/docs/models/jobsout.md +++ b/docs/models/jobsout.md @@ -6,5 +6,5 @@ | Field | Type | Required | Description | | ------------------------------------------------------------ | ------------------------------------------------------------ | ------------------------------------------------------------ | ------------------------------------------------------------ | | `total` | *int* | :heavy_check_mark: | N/A | -| `data` | List[[models.JobOut](../models/jobout.md)] | :heavy_minus_sign: | N/A | +| `data` | List[[models.JobsOutData](../models/jobsoutdata.md)] | :heavy_minus_sign: | N/A | | `object` | [Optional[models.JobsOutObject]](../models/jobsoutobject.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/jobsoutdata.md b/docs/models/jobsoutdata.md new file mode 100644 index 00000000..28cec311 --- /dev/null +++ b/docs/models/jobsoutdata.md @@ -0,0 +1,17 @@ +# JobsOutData + + +## Supported Types + +### `models.ClassifierJobOut` + +```python +value: models.ClassifierJobOut = /* values here */ +``` + +### `models.CompletionJobOut` + +```python +value: models.CompletionJobOut = /* values here */ +``` + diff --git a/docs/models/jobtype.md b/docs/models/jobtype.md new file mode 100644 index 00000000..847c6622 --- /dev/null +++ b/docs/models/jobtype.md @@ -0,0 +1,10 @@ +# JobType + +The type of job (`FT` for fine-tuning). + + +## Values + +| Name | Value | +| ------------ | ------------ | +| `COMPLETION` | completion | \ No newline at end of file diff --git a/docs/models/modeltype.md b/docs/models/modeltype.md new file mode 100644 index 00000000..a31c3ca0 --- /dev/null +++ b/docs/models/modeltype.md @@ -0,0 +1,8 @@ +# ModelType + + +## Values + +| Name | Value | +| ------------ | ------------ | +| `COMPLETION` | completion | \ No newline at end of file diff --git a/docs/models/moderationobject.md b/docs/models/moderationobject.md new file mode 100644 index 00000000..320b2ab4 --- /dev/null +++ b/docs/models/moderationobject.md @@ -0,0 +1,9 @@ +# ModerationObject + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------- | ---------------------------- | ---------------------------- | ---------------------------- | +| `categories` | Dict[str, *bool*] | :heavy_minus_sign: | Moderation result thresholds | +| `category_scores` | Dict[str, *float*] | :heavy_minus_sign: | Moderation result | \ No newline at end of file diff --git a/docs/models/moderationresponse.md b/docs/models/moderationresponse.md new file mode 100644 index 00000000..75a5eec7 --- /dev/null +++ b/docs/models/moderationresponse.md @@ -0,0 +1,10 @@ +# ModerationResponse + + +## Fields + +| Field | Type | Required | Description | Example | +| -------------------------------------------------------------- | -------------------------------------------------------------- | -------------------------------------------------------------- | -------------------------------------------------------------- | -------------------------------------------------------------- | +| `id` | *str* | :heavy_check_mark: | N/A | mod-e5cc70bb28c444948073e77776eb30ef | +| `model` | *str* | :heavy_check_mark: | N/A | | +| `results` | List[[models.ModerationObject](../models/moderationobject.md)] | :heavy_check_mark: | N/A | | \ No newline at end of file diff --git a/docs/models/response1.md b/docs/models/response1.md new file mode 100644 index 00000000..2e73fdbb --- /dev/null +++ b/docs/models/response1.md @@ -0,0 +1,17 @@ +# Response1 + + +## Supported Types + +### `models.ClassifierJobOut` + +```python +value: models.ClassifierJobOut = /* values here */ +``` + +### `models.CompletionJobOut` + +```python +value: models.CompletionJobOut = /* values here */ +``` + diff --git a/docs/models/wandbintegrationout.md b/docs/models/wandbintegrationout.md index b9a3a86d..f924b636 100644 --- a/docs/models/wandbintegrationout.md +++ b/docs/models/wandbintegrationout.md @@ -8,4 +8,5 @@ | `project` | *str* | :heavy_check_mark: | The name of the project that the new run will be created under. | | `type` | [Optional[models.WandbIntegrationOutType]](../models/wandbintegrationouttype.md) | :heavy_minus_sign: | N/A | | `name` | *OptionalNullable[str]* | :heavy_minus_sign: | A display name to set for the run. If not set, will use the job ID as the name. | -| `run_name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file +| `run_name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `url` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/sdks/classifiers/README.md b/docs/sdks/classifiers/README.md index cbe409bb..19761046 100644 --- a/docs/sdks/classifiers/README.md +++ b/docs/sdks/classifiers/README.md @@ -9,6 +9,8 @@ Classifiers API. * [moderate](#moderate) - Moderations * [moderate_chat](#moderate_chat) - Chat Moderations +* [classify](#classify) - Classifications +* [classify_chat](#classify_chat) - Chat Classifications ## moderate @@ -44,7 +46,7 @@ with Mistral( ### Response -**[models.ClassificationResponse](../../models/classificationresponse.md)** +**[models.ModerationResponse](../../models/moderationresponse.md)** ### Errors @@ -68,7 +70,7 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.classifiers.moderate_chat(model="Model Y", inputs=[ + res = mistral.classifiers.moderate_chat(inputs=[ [ { "content": [ @@ -114,7 +116,7 @@ with Mistral( "role": "user", }, ], - ]) + ], model="Model Y") # Handle response print(res) @@ -125,13 +127,108 @@ with Mistral( | Parameter | Type | Required | Description | | --------------------------------------------------------------------------------- | --------------------------------------------------------------------------------- | --------------------------------------------------------------------------------- | --------------------------------------------------------------------------------- | -| `model` | *str* | :heavy_check_mark: | N/A | | `inputs` | [models.ChatModerationRequestInputs](../../models/chatmoderationrequestinputs.md) | :heavy_check_mark: | Chat to classify | -| `truncate_for_context_length` | *Optional[bool]* | :heavy_minus_sign: | N/A | +| `model` | *str* | :heavy_check_mark: | N/A | | `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | ### Response +**[models.ModerationResponse](../../models/moderationresponse.md)** + +### Errors + +| Error Type | Status Code | Content Type | +| -------------------------- | -------------------------- | -------------------------- | +| models.HTTPValidationError | 422 | application/json | +| models.SDKError | 4XX, 5XX | \*/\* | + +## classify + +Classifications + +### Example Usage + +```python +from mistralai import Mistral +import os + + +with Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) as mistral: + + res = mistral.classifiers.classify(model="Altima", inputs="") + + # Handle response + print(res) + +``` + +### Parameters + +| Parameter | Type | Required | Description | +| --------------------------------------------------------------------------------- | --------------------------------------------------------------------------------- | --------------------------------------------------------------------------------- | --------------------------------------------------------------------------------- | +| `model` | *str* | :heavy_check_mark: | ID of the model to use. | +| `inputs` | [models.ClassificationRequestInputs](../../models/classificationrequestinputs.md) | :heavy_check_mark: | Text to classify. | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | + +### Response + +**[models.ClassificationResponse](../../models/classificationresponse.md)** + +### Errors + +| Error Type | Status Code | Content Type | +| -------------------------- | -------------------------- | -------------------------- | +| models.HTTPValidationError | 422 | application/json | +| models.SDKError | 4XX, 5XX | \*/\* | + +## classify_chat + +Chat Classifications + +### Example Usage + +```python +from mistralai import Mistral +import os + + +with Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) as mistral: + + res = mistral.classifiers.classify_chat(model="Fortwo", inputs=[ + { + "messages": [ + { + "content": "", + "role": "tool", + }, + ], + }, + { + "messages": [ + + ], + }, + ]) + + # Handle response + print(res) + +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | +| `model` | *str* | :heavy_check_mark: | N/A | +| `inputs` | [models.Inputs](../../models/inputs.md) | :heavy_check_mark: | Chat to classify | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | + +### Response + **[models.ClassificationResponse](../../models/classificationresponse.md)** ### Errors diff --git a/docs/sdks/embeddings/README.md b/docs/sdks/embeddings/README.md index 8c386439..d55b38fb 100644 --- a/docs/sdks/embeddings/README.md +++ b/docs/sdks/embeddings/README.md @@ -36,11 +36,11 @@ with Mistral( ### Parameters -| Parameter | Type | Required | Description | Example | -| ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | -| `model` | *str* | :heavy_check_mark: | ID of the model to use. | mistral-embed | -| `inputs` | [models.Inputs](../../models/inputs.md) | :heavy_check_mark: | Text to embed. | [
"Embed this sentence.",
"As well as this one."
] | -| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | | +| Parameter | Type | Required | Description | Example | +| ----------------------------------------------------------------------- | ----------------------------------------------------------------------- | ----------------------------------------------------------------------- | ----------------------------------------------------------------------- | ----------------------------------------------------------------------- | +| `model` | *str* | :heavy_check_mark: | ID of the model to use. | mistral-embed | +| `inputs` | [models.EmbeddingRequestInputs](../../models/embeddingrequestinputs.md) | :heavy_check_mark: | Text to embed. | [
"Embed this sentence.",
"As well as this one."
] | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | | ### Response diff --git a/docs/sdks/jobs/README.md b/docs/sdks/jobs/README.md index ecb11def..75d3b57d 100644 --- a/docs/sdks/jobs/README.md +++ b/docs/sdks/jobs/README.md @@ -41,6 +41,7 @@ with Mistral( | `page_size` | *Optional[int]* | :heavy_minus_sign: | The number of items to return per page. | | `model` | *OptionalNullable[str]* | :heavy_minus_sign: | The model name used for fine-tuning to filter on. When set, the other results are not displayed. | | `created_after` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | The date/time to filter on. When set, the results for previous creation times are not displayed. | +| `created_before` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | | `created_by_me` | *Optional[bool]* | :heavy_minus_sign: | When set, only return results for jobs created by the API caller. Other results are not displayed. | | `status` | [OptionalNullable[models.QueryParamStatus]](../../models/queryparamstatus.md) | :heavy_minus_sign: | The current job state to filter on. When set, the other results are not displayed. | | `wandb_project` | *OptionalNullable[str]* | :heavy_minus_sign: | The Weights and Biases project to filter on. When set, the other results are not displayed. | @@ -73,7 +74,9 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.fine_tuning.jobs.create(model="Fiesta", hyperparameters={}) + res = mistral.fine_tuning.jobs.create(model="Fiesta", hyperparameters={ + "learning_rate": 0.0001, + }) # Handle response print(res) @@ -85,13 +88,16 @@ with Mistral( | Parameter | Type | Required | Description | | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | | `model` | *str* | :heavy_check_mark: | The name of the model to fine-tune. | -| `hyperparameters` | [models.TrainingParametersIn](../../models/trainingparametersin.md) | :heavy_check_mark: | The fine-tuning hyperparameter settings used in a fine-tune job. | +| `hyperparameters` | [models.Hyperparameters](../../models/hyperparameters.md) | :heavy_check_mark: | N/A | | `training_files` | List[[models.TrainingFile](../../models/trainingfile.md)] | :heavy_minus_sign: | N/A | | `validation_files` | List[*str*] | :heavy_minus_sign: | A list containing the IDs of uploaded files that contain validation data. If you provide these files, the data is used to generate validation metrics periodically during fine-tuning. These metrics can be viewed in `checkpoints` when getting the status of a running fine-tuning job. The same data should not be present in both train and validation files. | | `suffix` | *OptionalNullable[str]* | :heavy_minus_sign: | A string that will be added to your fine-tuning model name. For example, a suffix of "my-great-model" would produce a model name like `ft:open-mistral-7b:my-great-model:xxx...` | | `integrations` | List[[models.JobInIntegrations](../../models/jobinintegrations.md)] | :heavy_minus_sign: | A list of integrations to enable for your fine-tuning job. | -| `repositories` | List[[models.JobInRepositories](../../models/jobinrepositories.md)] | :heavy_minus_sign: | N/A | | `auto_start` | *Optional[bool]* | :heavy_minus_sign: | This field will be required in a future release. | +| `invalid_sample_skip_percentage` | *Optional[float]* | :heavy_minus_sign: | N/A | +| `job_type` | [OptionalNullable[models.FineTuneableModelType]](../../models/finetuneablemodeltype.md) | :heavy_minus_sign: | N/A | +| `repositories` | List[[models.JobInRepositories](../../models/jobinrepositories.md)] | :heavy_minus_sign: | N/A | +| `classifier_targets` | List[[models.ClassifierTargetIn](../../models/classifiertargetin.md)] | :heavy_minus_sign: | N/A | | `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | ### Response @@ -135,7 +141,7 @@ with Mistral( ### Response -**[models.DetailedJobOut](../../models/detailedjobout.md)** +**[models.JobsAPIRoutesFineTuningGetFineTuningJobResponse](../../models/jobsapiroutesfinetuninggetfinetuningjobresponse.md)** ### Errors @@ -174,7 +180,7 @@ with Mistral( ### Response -**[models.DetailedJobOut](../../models/detailedjobout.md)** +**[models.JobsAPIRoutesFineTuningCancelFineTuningJobResponse](../../models/jobsapiroutesfinetuningcancelfinetuningjobresponse.md)** ### Errors @@ -213,7 +219,7 @@ with Mistral( ### Response -**[models.DetailedJobOut](../../models/detailedjobout.md)** +**[models.JobsAPIRoutesFineTuningStartFineTuningJobResponse](../../models/jobsapiroutesfinetuningstartfinetuningjobresponse.md)** ### Errors diff --git a/docs/sdks/mistraljobs/README.md b/docs/sdks/mistraljobs/README.md index 5b80a45b..e2dac8b4 100644 --- a/docs/sdks/mistraljobs/README.md +++ b/docs/sdks/mistraljobs/README.md @@ -34,16 +34,16 @@ with Mistral( ### Parameters -| Parameter | Type | Required | Description | -| ------------------------------------------------------------------------- | ------------------------------------------------------------------------- | ------------------------------------------------------------------------- | ------------------------------------------------------------------------- | -| `page` | *Optional[int]* | :heavy_minus_sign: | N/A | -| `page_size` | *Optional[int]* | :heavy_minus_sign: | N/A | -| `model` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | -| `created_after` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | -| `created_by_me` | *Optional[bool]* | :heavy_minus_sign: | N/A | -| `status` | [OptionalNullable[models.BatchJobStatus]](../../models/batchjobstatus.md) | :heavy_minus_sign: | N/A | -| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | +| Parameter | Type | Required | Description | +| -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | +| `page` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `page_size` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `model` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | +| `created_after` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | +| `created_by_me` | *Optional[bool]* | :heavy_minus_sign: | N/A | +| `status` | List[[models.BatchJobStatus](../../models/batchjobstatus.md)] | :heavy_minus_sign: | N/A | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | ### Response diff --git a/docs/sdks/models/README.md b/docs/sdks/models/README.md index dd7baf50..d7a5ed85 100644 --- a/docs/sdks/models/README.md +++ b/docs/sdks/models/README.md @@ -166,7 +166,7 @@ with Mistral( ### Response -**[models.FTModelOut](../../models/ftmodelout.md)** +**[models.JobsAPIRoutesFineTuningUpdateFineTunedModelResponse](../../models/jobsapiroutesfinetuningupdatefinetunedmodelresponse.md)** ### Errors diff --git a/examples/async_classifier.py b/examples/async_classifier.py new file mode 100644 index 00000000..10c8bb76 --- /dev/null +++ b/examples/async_classifier.py @@ -0,0 +1,98 @@ +#!/usr/bin/env python + +from pprint import pprint +import asyncio +from mistralai import Mistral, TrainingFile, ClassifierTrainingParametersIn + +import os + + +async def upload_files(client: Mistral, file_names: list[str]) -> list[str]: + # Upload files + print("Uploading files...") + + file_ids = [] + for file_name in file_names: + with open(file_name, "rb") as file: + f = await client.files.upload_async( + file={ + "file_name": file_name, + "content": file.read(), + }, + purpose="fine-tune", + ) + file_ids.append(f.id) + print("Files uploaded...") + return file_ids + + +async def train_classifier(client: Mistral,training_file_ids: list[str]) -> str: + print("Creating job...") + job = await client.fine_tuning.jobs.create_async( + model="ministral-3b-latest", + job_type="classifier", + training_files=[ + TrainingFile(file_id=training_file_id) + for training_file_id in training_file_ids + ], + hyperparameters=ClassifierTrainingParametersIn( + learning_rate=0.0001, + ), + auto_start=True, + ) + + print(f"Job created ({job.id})") + + i = 1 + while True: + await asyncio.sleep(10) + detailed_job = await client.fine_tuning.jobs.get_async(job_id=job.id) + if detailed_job.status not in [ + "QUEUED", + "STARTED", + "VALIDATING", + "VALIDATED", + "RUNNING", + ]: + break + print(f"Still training after {i * 10} seconds") + i += 1 + + if detailed_job.status != "SUCCESS": + print("Training failed") + raise Exception(f"Job failed {detailed_job.status}") + + print(f"Training succeed: {detailed_job.fine_tuned_model}") + + return detailed_job.fine_tuned_model + + +async def main(): + training_files = ["./examples/fixtures/classifier_sentiments.jsonl"] + client = Mistral( + api_key=os.environ["MISTRAL_API_KEY"], + ) + + training_file_ids: list[str] = await upload_files(client=client, file_names=training_files) + model_name: str | None = await train_classifier(client=client,training_file_ids=training_file_ids) + + if model_name: + print("Calling inference...") + response = client.classifiers.classify( + model=model_name, + inputs=["It's nice", "It's terrible", "Why not"], + ) + print("Inference succeed !") + pprint(response) + + print("Calling inference (Chat)...") + response = client.classifiers.classify_chat( + model=model_name, + inputs={"messages": [{"role": "user", "content": "Lame..."}]}, + ) + print("Inference succeed (Chat)!") + pprint(response) + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/examples/async_files.py b/examples/async_files.py index 64c99484..4dc21542 100644 --- a/examples/async_files.py +++ b/examples/async_files.py @@ -16,7 +16,7 @@ async def main(): created_file = await client.files.upload_async( file=File( file_name="training_file.jsonl", - content=open("examples/file.jsonl", "rb").read(), + content=open("examples/fixtures/ft_training_file.jsonl", "rb").read(), ) ) print(created_file) diff --git a/examples/async_jobs.py b/examples/async_jobs.py index b1f9e3bf..44a58af1 100644 --- a/examples/async_jobs.py +++ b/examples/async_jobs.py @@ -4,7 +4,7 @@ import os from mistralai import Mistral -from mistralai.models import File, TrainingParametersIn +from mistralai.models import File, CompletionTrainingParametersIn async def main(): @@ -13,11 +13,11 @@ async def main(): client = Mistral(api_key=api_key) # Create new files - with open("examples/file.jsonl", "rb") as f: + with open("examples/fixtures/ft_training_file.jsonl", "rb") as f: training_file = await client.files.upload_async( file=File(file_name="file.jsonl", content=f) ) - with open("examples/validation_file.jsonl", "rb") as f: + with open("examples/fixtures/ft_validation_file.jsonl", "rb") as f: validation_file = await client.files.upload_async( file=File(file_name="validation_file.jsonl", content=f) ) @@ -27,7 +27,7 @@ async def main(): model="open-mistral-7b", training_files=[{"file_id": training_file.id, "weight": 1}], validation_files=[validation_file.id], - hyperparameters=TrainingParametersIn( + hyperparameters=CompletionTrainingParametersIn( training_steps=1, learning_rate=0.0001, ), diff --git a/examples/async_jobs_chat.py b/examples/async_jobs_chat.py index 7e0d0577..84327b32 100644 --- a/examples/async_jobs_chat.py +++ b/examples/async_jobs_chat.py @@ -4,7 +4,10 @@ import os from mistralai import Mistral -from mistralai.models import File, TrainingParametersIn +from mistralai.models import ( + File, + CompletionTrainingParametersIn, +) POLLING_INTERVAL = 10 @@ -14,11 +17,11 @@ async def main(): client = Mistral(api_key=api_key) # Create new files - with open("examples/file.jsonl", "rb") as f: + with open("examples/fixtures/ft_training_file.jsonl", "rb") as f: training_file = await client.files.upload_async( file=File(file_name="file.jsonl", content=f) ) - with open("examples/validation_file.jsonl", "rb") as f: + with open("examples/fixtures/ft_validation_file.jsonl", "rb") as f: validation_file = await client.files.upload_async( file=File(file_name="validation_file.jsonl", content=f) ) @@ -27,22 +30,28 @@ async def main(): model="open-mistral-7b", training_files=[{"file_id": training_file.id, "weight": 1}], validation_files=[validation_file.id], - hyperparameters=TrainingParametersIn( - training_steps=1, + hyperparameters=CompletionTrainingParametersIn( + training_steps=2, learning_rate=0.0001, ), ) print(created_job) - while created_job.status in ["RUNNING", "QUEUED"]: + while created_job.status in [ + "QUEUED", + "STARTED", + "VALIDATING", + "VALIDATED", + "RUNNING", + ]: created_job = await client.fine_tuning.jobs.get_async(job_id=created_job.id) print(f"Job is {created_job.status}, waiting {POLLING_INTERVAL} seconds") await asyncio.sleep(POLLING_INTERVAL) - if created_job.status == "FAILED": + if created_job.status != "SUCCESS": print("Job failed") - return - + raise Exception(f"Job failed with {created_job.status}") + print(created_job) # Chat with model response = await client.chat.complete_async( model=created_job.fine_tuned_model, diff --git a/examples/dry_run_job.py b/examples/dry_run_job.py index 3c2a6369..84a2d0ce 100644 --- a/examples/dry_run_job.py +++ b/examples/dry_run_job.py @@ -4,7 +4,7 @@ import os from mistralai import Mistral -from mistralai.models import TrainingParametersIn +from mistralai.models import CompletionTrainingParametersIn async def main(): @@ -13,7 +13,7 @@ async def main(): client = Mistral(api_key=api_key) # Create new files - with open("examples/file.jsonl", "rb") as f: + with open("examples/fixtures/ft_training_file.jsonl", "rb") as f: training_file = await client.files.upload_async( file={"file_name": "test-file.jsonl", "content": f} ) @@ -22,7 +22,7 @@ async def main(): dry_run_job = await client.fine_tuning.jobs.create_async( model="open-mistral-7b", training_files=[{"file_id": training_file.id, "weight": 1}], - hyperparameters=TrainingParametersIn( + hyperparameters=CompletionTrainingParametersIn( training_steps=1, learning_rate=0.0001, warmup_fraction=0.01, diff --git a/examples/files.py b/examples/files.py index a10fd031..5dce880b 100644 --- a/examples/files.py +++ b/examples/files.py @@ -15,7 +15,7 @@ def main(): created_file = client.files.upload( file=File( file_name="training_file.jsonl", - content=open("examples/file.jsonl", "rb").read(), + content=open("examples/fixtures/ft_training_file.jsonl", "rb").read(), ) ) print(created_file) diff --git a/examples/fixtures/classifier_sentiments.jsonl b/examples/fixtures/classifier_sentiments.jsonl new file mode 100644 index 00000000..e5507198 --- /dev/null +++ b/examples/fixtures/classifier_sentiments.jsonl @@ -0,0 +1,33 @@ +{"text": "I love this product!", "labels": {"sentiment": "positive"}} +{"text": "The game was amazing.", "labels": {"sentiment": "positive"}} +{"text": "The new policy is controversial.", "labels": {"sentiment": "neutral"}} +{"text": "I don't like the new design.", "labels": {"sentiment": "negative"}} +{"text": "The team won the championship.", "labels": {"sentiment": "positive"}} +{"text": "The economy is in a bad shape.", "labels": {"sentiment": "negative"}} +{"text": "The weather is nice today.", "labels": {"sentiment": "positive"}} +{"text": "The match ended in a draw.", "labels": {"sentiment": "neutral"}} +{"text": "The new law will be implemented soon.", "labels": {"sentiment": "neutral"}} +{"text": "I had a great time at the concert.", "labels": {"sentiment": "positive"}} +{"text": "This movie was fantastic!", "labels": {"sentiment": "positive"}} +{"text": "The service was terrible.", "labels": {"sentiment": "negative"}} +{"text": "The food was delicious.", "labels": {"sentiment": "positive"}} +{"text": "I'm not sure about this decision.", "labels": {"sentiment": "neutral"}} +{"text": "The book was boring.", "labels": {"sentiment": "negative"}} +{"text": "The view from the top was breathtaking.", "labels": {"sentiment": "positive"}} +{"text": "The traffic was awful today.", "labels": {"sentiment": "negative"}} +{"text": "The event was well-organized.", "labels": {"sentiment": "positive"}} +{"text": "The meeting went on for too long.", "labels": {"sentiment": "negative"}} +{"text": "The presentation was informative.", "labels": {"sentiment": "positive"}} +{"text": "The new software update is buggy.", "labels": {"sentiment": "negative"}} +{"text": "The concert was sold out.", "labels": {"sentiment": "positive"}} +{"text": "The weather forecast is unreliable.", "labels": {"sentiment": "negative"}} +{"text": "The new phone is expensive.", "labels": {"sentiment": "neutral"}} +{"text": "The customer service was excellent.", "labels": {"sentiment": "positive"}} +{"text": "The new restaurant opened today.", "labels": {"sentiment": "neutral"}} +{"text": "The movie had a surprising ending.", "labels": {"sentiment": "positive"}} +{"text": "The project deadline is approaching.", "labels": {"sentiment": "neutral"}} +{"text": "The team is working hard.", "labels": {"sentiment": "positive"}} +{"text": "The new product launch was successful.", "labels": {"sentiment": "positive"}} +{"text": "The conference was insightful.", "labels": {"sentiment": "positive"}} +{"text": "The flight was delayed.", "labels": {"sentiment": "negative"}} +{"text": "The vacation was relaxing.", "labels": {"sentiment": "positive"}} \ No newline at end of file diff --git a/examples/file.jsonl b/examples/fixtures/ft_training_file.jsonl similarity index 100% rename from examples/file.jsonl rename to examples/fixtures/ft_training_file.jsonl diff --git a/examples/validation_file.jsonl b/examples/fixtures/ft_validation_file.jsonl similarity index 100% rename from examples/validation_file.jsonl rename to examples/fixtures/ft_validation_file.jsonl diff --git a/examples/jobs.py b/examples/jobs.py index 246edace..f65fda8e 100644 --- a/examples/jobs.py +++ b/examples/jobs.py @@ -2,7 +2,7 @@ import os from mistralai import Mistral -from mistralai.models import File, TrainingParametersIn +from mistralai.models import File, CompletionTrainingParametersIn def main(): @@ -11,11 +11,11 @@ def main(): client = Mistral(api_key=api_key) # Create new files - with open("examples/file.jsonl", "rb") as f: + with open("examples/fixtures/ft_training_file.jsonl", "rb") as f: training_file = client.files.upload( file=File(file_name="file.jsonl", content=f) ) - with open("examples/validation_file.jsonl", "rb") as f: + with open("examples/fixtures/ft_validation_file.jsonl", "rb") as f: validation_file = client.files.upload( file=File(file_name="validation_file.jsonl", content=f) ) @@ -25,7 +25,7 @@ def main(): model="open-mistral-7b", training_files=[{"file_id": training_file.id, "weight": 1}], validation_files=[validation_file.id], - hyperparameters=TrainingParametersIn( + hyperparameters=CompletionTrainingParametersIn( training_steps=1, learning_rate=0.0001, ), diff --git a/pyproject.toml b/pyproject.toml index 6307fc75..2da8b5ea 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "mistralai" -version = "1.6.0" +version = "1.7.0" description = "Python Client SDK for the Mistral AI API." authors = [{ name = "Mistral" },] readme = "README-PYPI.md" diff --git a/src/mistralai/_version.py b/src/mistralai/_version.py index 9507529b..7b151c78 100644 --- a/src/mistralai/_version.py +++ b/src/mistralai/_version.py @@ -3,10 +3,10 @@ import importlib.metadata __title__: str = "mistralai" -__version__: str = "1.6.0" +__version__: str = "1.7.0" __openapi_doc_version__: str = "0.0.2" __gen_version__: str = "2.548.6" -__user_agent__: str = "speakeasy-sdk/python 1.6.0 2.548.6 0.0.2 mistralai" +__user_agent__: str = "speakeasy-sdk/python 1.7.0 2.548.6 0.0.2 mistralai" try: if __package__ is not None: diff --git a/src/mistralai/classifiers.py b/src/mistralai/classifiers.py index d85961f3..e5b46f5d 100644 --- a/src/mistralai/classifiers.py +++ b/src/mistralai/classifiers.py @@ -23,7 +23,7 @@ def moderate( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> models.ClassificationResponse: + ) -> models.ModerationResponse: r"""Moderations :param model: ID of the model to use. @@ -91,7 +91,7 @@ def moderate( response_data: Any = None if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, models.ClassificationResponse) + return utils.unmarshal_json(http_res.text, models.ModerationResponse) if utils.match_response(http_res, "422", "application/json"): response_data = utils.unmarshal_json( http_res.text, models.HTTPValidationErrorData @@ -129,7 +129,7 @@ async def moderate_async( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> models.ClassificationResponse: + ) -> models.ModerationResponse: r"""Moderations :param model: ID of the model to use. @@ -197,7 +197,7 @@ async def moderate_async( response_data: Any = None if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, models.ClassificationResponse) + return utils.unmarshal_json(http_res.text, models.ModerationResponse) if utils.match_response(http_res, "422", "application/json"): response_data = utils.unmarshal_json( http_res.text, models.HTTPValidationErrorData @@ -226,22 +226,20 @@ async def moderate_async( def moderate_chat( self, *, - model: str, inputs: Union[ models.ChatModerationRequestInputs, models.ChatModerationRequestInputsTypedDict, ], - truncate_for_context_length: Optional[bool] = False, + model: str, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> models.ClassificationResponse: + ) -> models.ModerationResponse: r"""Chat Moderations - :param model: :param inputs: Chat to classify - :param truncate_for_context_length: + :param model: :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -258,9 +256,8 @@ def moderate_chat( base_url = self._get_url(base_url, url_variables) request = models.ChatModerationRequest( - model=model, inputs=utils.get_pydantic_model(inputs, models.ChatModerationRequestInputs), - truncate_for_context_length=truncate_for_context_length, + model=model, ) req = self._build_request( @@ -306,7 +303,7 @@ def moderate_chat( response_data: Any = None if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, models.ClassificationResponse) + return utils.unmarshal_json(http_res.text, models.ModerationResponse) if utils.match_response(http_res, "422", "application/json"): response_data = utils.unmarshal_json( http_res.text, models.HTTPValidationErrorData @@ -335,22 +332,20 @@ def moderate_chat( async def moderate_chat_async( self, *, - model: str, inputs: Union[ models.ChatModerationRequestInputs, models.ChatModerationRequestInputsTypedDict, ], - truncate_for_context_length: Optional[bool] = False, + model: str, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> models.ClassificationResponse: + ) -> models.ModerationResponse: r"""Chat Moderations - :param model: :param inputs: Chat to classify - :param truncate_for_context_length: + :param model: :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -367,9 +362,8 @@ async def moderate_chat_async( base_url = self._get_url(base_url, url_variables) request = models.ChatModerationRequest( - model=model, inputs=utils.get_pydantic_model(inputs, models.ChatModerationRequestInputs), - truncate_for_context_length=truncate_for_context_length, + model=model, ) req = self._build_request_async( @@ -413,6 +407,424 @@ async def moderate_chat_async( retry_config=retry_config, ) + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return utils.unmarshal_json(http_res.text, models.ModerationResponse) + if utils.match_response(http_res, "422", "application/json"): + response_data = utils.unmarshal_json( + http_res.text, models.HTTPValidationErrorData + ) + raise models.HTTPValidationError(data=response_data) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + + content_type = http_res.headers.get("Content-Type") + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res_text, + http_res, + ) + + def classify( + self, + *, + model: str, + inputs: Union[ + models.ClassificationRequestInputs, + models.ClassificationRequestInputsTypedDict, + ], + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.ClassificationResponse: + r"""Classifications + + :param model: ID of the model to use. + :param inputs: Text to classify. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.ClassificationRequest( + model=model, + inputs=inputs, + ) + + req = self._build_request( + method="POST", + path="/v1/classifications", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.ClassificationRequest + ), + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + base_url=base_url or "", + operation_id="classifications_v1_classifications_post", + oauth2_scopes=[], + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return utils.unmarshal_json(http_res.text, models.ClassificationResponse) + if utils.match_response(http_res, "422", "application/json"): + response_data = utils.unmarshal_json( + http_res.text, models.HTTPValidationErrorData + ) + raise models.HTTPValidationError(data=response_data) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + + content_type = http_res.headers.get("Content-Type") + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res_text, + http_res, + ) + + async def classify_async( + self, + *, + model: str, + inputs: Union[ + models.ClassificationRequestInputs, + models.ClassificationRequestInputsTypedDict, + ], + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.ClassificationResponse: + r"""Classifications + + :param model: ID of the model to use. + :param inputs: Text to classify. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.ClassificationRequest( + model=model, + inputs=inputs, + ) + + req = self._build_request_async( + method="POST", + path="/v1/classifications", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.ClassificationRequest + ), + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + base_url=base_url or "", + operation_id="classifications_v1_classifications_post", + oauth2_scopes=[], + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return utils.unmarshal_json(http_res.text, models.ClassificationResponse) + if utils.match_response(http_res, "422", "application/json"): + response_data = utils.unmarshal_json( + http_res.text, models.HTTPValidationErrorData + ) + raise models.HTTPValidationError(data=response_data) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + + content_type = http_res.headers.get("Content-Type") + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res_text, + http_res, + ) + + def classify_chat( + self, + *, + model: str, + inputs: Union[models.Inputs, models.InputsTypedDict], + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.ClassificationResponse: + r"""Chat Classifications + + :param model: + :param inputs: Chat to classify + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.ChatClassificationRequest( + model=model, + inputs=utils.get_pydantic_model(inputs, models.Inputs), + ) + + req = self._build_request( + method="POST", + path="/v1/chat/classifications", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.ChatClassificationRequest + ), + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + base_url=base_url or "", + operation_id="chat_classifications_v1_chat_classifications_post", + oauth2_scopes=[], + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return utils.unmarshal_json(http_res.text, models.ClassificationResponse) + if utils.match_response(http_res, "422", "application/json"): + response_data = utils.unmarshal_json( + http_res.text, models.HTTPValidationErrorData + ) + raise models.HTTPValidationError(data=response_data) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + + content_type = http_res.headers.get("Content-Type") + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res_text, + http_res, + ) + + async def classify_chat_async( + self, + *, + model: str, + inputs: Union[models.Inputs, models.InputsTypedDict], + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.ClassificationResponse: + r"""Chat Classifications + + :param model: + :param inputs: Chat to classify + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.ChatClassificationRequest( + model=model, + inputs=utils.get_pydantic_model(inputs, models.Inputs), + ) + + req = self._build_request_async( + method="POST", + path="/v1/chat/classifications", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.ChatClassificationRequest + ), + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + base_url=base_url or "", + operation_id="chat_classifications_v1_chat_classifications_post", + oauth2_scopes=[], + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + response_data: Any = None if utils.match_response(http_res, "200", "application/json"): return utils.unmarshal_json(http_res.text, models.ClassificationResponse) diff --git a/src/mistralai/embeddings.py b/src/mistralai/embeddings.py index b99ff0cf..b81a5e37 100644 --- a/src/mistralai/embeddings.py +++ b/src/mistralai/embeddings.py @@ -15,7 +15,9 @@ def create( self, *, model: str, - inputs: Union[models.Inputs, models.InputsTypedDict], + inputs: Union[ + models.EmbeddingRequestInputs, models.EmbeddingRequestInputsTypedDict + ], retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -120,7 +122,9 @@ async def create_async( self, *, model: str, - inputs: Union[models.Inputs, models.InputsTypedDict], + inputs: Union[ + models.EmbeddingRequestInputs, models.EmbeddingRequestInputsTypedDict + ], retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, diff --git a/src/mistralai/jobs.py b/src/mistralai/jobs.py index 675ece0b..76d9f41a 100644 --- a/src/mistralai/jobs.py +++ b/src/mistralai/jobs.py @@ -17,6 +17,7 @@ def list( page_size: Optional[int] = 100, model: OptionalNullable[str] = UNSET, created_after: OptionalNullable[datetime] = UNSET, + created_before: OptionalNullable[datetime] = UNSET, created_by_me: Optional[bool] = False, status: OptionalNullable[models.QueryParamStatus] = UNSET, wandb_project: OptionalNullable[str] = UNSET, @@ -35,6 +36,7 @@ def list( :param page_size: The number of items to return per page. :param model: The model name used for fine-tuning to filter on. When set, the other results are not displayed. :param created_after: The date/time to filter on. When set, the results for previous creation times are not displayed. + :param created_before: :param created_by_me: When set, only return results for jobs created by the API caller. Other results are not displayed. :param status: The current job state to filter on. When set, the other results are not displayed. :param wandb_project: The Weights and Biases project to filter on. When set, the other results are not displayed. @@ -60,6 +62,7 @@ def list( page_size=page_size, model=model, created_after=created_after, + created_before=created_before, created_by_me=created_by_me, status=status, wandb_project=wandb_project, @@ -134,6 +137,7 @@ async def list_async( page_size: Optional[int] = 100, model: OptionalNullable[str] = UNSET, created_after: OptionalNullable[datetime] = UNSET, + created_before: OptionalNullable[datetime] = UNSET, created_by_me: Optional[bool] = False, status: OptionalNullable[models.QueryParamStatus] = UNSET, wandb_project: OptionalNullable[str] = UNSET, @@ -152,6 +156,7 @@ async def list_async( :param page_size: The number of items to return per page. :param model: The model name used for fine-tuning to filter on. When set, the other results are not displayed. :param created_after: The date/time to filter on. When set, the results for previous creation times are not displayed. + :param created_before: :param created_by_me: When set, only return results for jobs created by the API caller. Other results are not displayed. :param status: The current job state to filter on. When set, the other results are not displayed. :param wandb_project: The Weights and Biases project to filter on. When set, the other results are not displayed. @@ -177,6 +182,7 @@ async def list_async( page_size=page_size, model=model, created_after=created_after, + created_before=created_before, created_by_me=created_by_me, status=status, wandb_project=wandb_project, @@ -248,9 +254,7 @@ def create( self, *, model: str, - hyperparameters: Union[ - models.TrainingParametersIn, models.TrainingParametersInTypedDict - ], + hyperparameters: Union[models.Hyperparameters, models.HyperparametersTypedDict], training_files: Optional[ Union[List[models.TrainingFile], List[models.TrainingFileTypedDict]] ] = None, @@ -261,12 +265,20 @@ def create( List[models.JobInIntegrations], List[models.JobInIntegrationsTypedDict] ] ] = UNSET, - repositories: Optional[ + auto_start: Optional[bool] = None, + invalid_sample_skip_percentage: Optional[float] = 0, + job_type: OptionalNullable[models.FineTuneableModelType] = UNSET, + repositories: OptionalNullable[ Union[ List[models.JobInRepositories], List[models.JobInRepositoriesTypedDict] ] - ] = None, - auto_start: Optional[bool] = None, + ] = UNSET, + classifier_targets: OptionalNullable[ + Union[ + List[models.ClassifierTargetIn], + List[models.ClassifierTargetInTypedDict], + ] + ] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -277,13 +289,16 @@ def create( Create a new fine-tuning job, it will be queued for processing. :param model: The name of the model to fine-tune. - :param hyperparameters: The fine-tuning hyperparameter settings used in a fine-tune job. + :param hyperparameters: :param training_files: :param validation_files: A list containing the IDs of uploaded files that contain validation data. If you provide these files, the data is used to generate validation metrics periodically during fine-tuning. These metrics can be viewed in `checkpoints` when getting the status of a running fine-tuning job. The same data should not be present in both train and validation files. :param suffix: A string that will be added to your fine-tuning model name. For example, a suffix of \"my-great-model\" would produce a model name like `ft:open-mistral-7b:my-great-model:xxx...` :param integrations: A list of integrations to enable for your fine-tuning job. - :param repositories: :param auto_start: This field will be required in a future release. + :param invalid_sample_skip_percentage: + :param job_type: + :param repositories: + :param classifier_targets: :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -305,17 +320,22 @@ def create( training_files, Optional[List[models.TrainingFile]] ), validation_files=validation_files, - hyperparameters=utils.get_pydantic_model( - hyperparameters, models.TrainingParametersIn - ), suffix=suffix, integrations=utils.get_pydantic_model( integrations, OptionalNullable[List[models.JobInIntegrations]] ), + auto_start=auto_start, + invalid_sample_skip_percentage=invalid_sample_skip_percentage, + job_type=job_type, + hyperparameters=utils.get_pydantic_model( + hyperparameters, models.Hyperparameters + ), repositories=utils.get_pydantic_model( - repositories, Optional[List[models.JobInRepositories]] + repositories, OptionalNullable[List[models.JobInRepositories]] + ), + classifier_targets=utils.get_pydantic_model( + classifier_targets, OptionalNullable[List[models.ClassifierTargetIn]] ), - auto_start=auto_start, ) req = self._build_request( @@ -387,9 +407,7 @@ async def create_async( self, *, model: str, - hyperparameters: Union[ - models.TrainingParametersIn, models.TrainingParametersInTypedDict - ], + hyperparameters: Union[models.Hyperparameters, models.HyperparametersTypedDict], training_files: Optional[ Union[List[models.TrainingFile], List[models.TrainingFileTypedDict]] ] = None, @@ -400,12 +418,20 @@ async def create_async( List[models.JobInIntegrations], List[models.JobInIntegrationsTypedDict] ] ] = UNSET, - repositories: Optional[ + auto_start: Optional[bool] = None, + invalid_sample_skip_percentage: Optional[float] = 0, + job_type: OptionalNullable[models.FineTuneableModelType] = UNSET, + repositories: OptionalNullable[ Union[ List[models.JobInRepositories], List[models.JobInRepositoriesTypedDict] ] - ] = None, - auto_start: Optional[bool] = None, + ] = UNSET, + classifier_targets: OptionalNullable[ + Union[ + List[models.ClassifierTargetIn], + List[models.ClassifierTargetInTypedDict], + ] + ] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -416,13 +442,16 @@ async def create_async( Create a new fine-tuning job, it will be queued for processing. :param model: The name of the model to fine-tune. - :param hyperparameters: The fine-tuning hyperparameter settings used in a fine-tune job. + :param hyperparameters: :param training_files: :param validation_files: A list containing the IDs of uploaded files that contain validation data. If you provide these files, the data is used to generate validation metrics periodically during fine-tuning. These metrics can be viewed in `checkpoints` when getting the status of a running fine-tuning job. The same data should not be present in both train and validation files. :param suffix: A string that will be added to your fine-tuning model name. For example, a suffix of \"my-great-model\" would produce a model name like `ft:open-mistral-7b:my-great-model:xxx...` :param integrations: A list of integrations to enable for your fine-tuning job. - :param repositories: :param auto_start: This field will be required in a future release. + :param invalid_sample_skip_percentage: + :param job_type: + :param repositories: + :param classifier_targets: :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -444,17 +473,22 @@ async def create_async( training_files, Optional[List[models.TrainingFile]] ), validation_files=validation_files, - hyperparameters=utils.get_pydantic_model( - hyperparameters, models.TrainingParametersIn - ), suffix=suffix, integrations=utils.get_pydantic_model( integrations, OptionalNullable[List[models.JobInIntegrations]] ), + auto_start=auto_start, + invalid_sample_skip_percentage=invalid_sample_skip_percentage, + job_type=job_type, + hyperparameters=utils.get_pydantic_model( + hyperparameters, models.Hyperparameters + ), repositories=utils.get_pydantic_model( - repositories, Optional[List[models.JobInRepositories]] + repositories, OptionalNullable[List[models.JobInRepositories]] + ), + classifier_targets=utils.get_pydantic_model( + classifier_targets, OptionalNullable[List[models.ClassifierTargetIn]] ), - auto_start=auto_start, ) req = self._build_request_async( @@ -530,7 +564,7 @@ def get( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> models.DetailedJobOut: + ) -> models.JobsAPIRoutesFineTuningGetFineTuningJobResponse: r"""Get Fine Tuning Job Get a fine-tuned job details by its UUID. @@ -594,7 +628,9 @@ def get( ) if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, models.DetailedJobOut) + return utils.unmarshal_json( + http_res.text, models.JobsAPIRoutesFineTuningGetFineTuningJobResponse + ) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) raise models.SDKError( @@ -623,7 +659,7 @@ async def get_async( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> models.DetailedJobOut: + ) -> models.JobsAPIRoutesFineTuningGetFineTuningJobResponse: r"""Get Fine Tuning Job Get a fine-tuned job details by its UUID. @@ -687,7 +723,9 @@ async def get_async( ) if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, models.DetailedJobOut) + return utils.unmarshal_json( + http_res.text, models.JobsAPIRoutesFineTuningGetFineTuningJobResponse + ) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( @@ -716,7 +754,7 @@ def cancel( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> models.DetailedJobOut: + ) -> models.JobsAPIRoutesFineTuningCancelFineTuningJobResponse: r"""Cancel Fine Tuning Job Request the cancellation of a fine tuning job. @@ -780,7 +818,9 @@ def cancel( ) if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, models.DetailedJobOut) + return utils.unmarshal_json( + http_res.text, models.JobsAPIRoutesFineTuningCancelFineTuningJobResponse + ) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) raise models.SDKError( @@ -809,7 +849,7 @@ async def cancel_async( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> models.DetailedJobOut: + ) -> models.JobsAPIRoutesFineTuningCancelFineTuningJobResponse: r"""Cancel Fine Tuning Job Request the cancellation of a fine tuning job. @@ -873,7 +913,9 @@ async def cancel_async( ) if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, models.DetailedJobOut) + return utils.unmarshal_json( + http_res.text, models.JobsAPIRoutesFineTuningCancelFineTuningJobResponse + ) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( @@ -902,7 +944,7 @@ def start( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> models.DetailedJobOut: + ) -> models.JobsAPIRoutesFineTuningStartFineTuningJobResponse: r"""Start Fine Tuning Job Request the start of a validated fine tuning job. @@ -966,7 +1008,9 @@ def start( ) if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, models.DetailedJobOut) + return utils.unmarshal_json( + http_res.text, models.JobsAPIRoutesFineTuningStartFineTuningJobResponse + ) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) raise models.SDKError( @@ -995,7 +1039,7 @@ async def start_async( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> models.DetailedJobOut: + ) -> models.JobsAPIRoutesFineTuningStartFineTuningJobResponse: r"""Start Fine Tuning Job Request the start of a validated fine tuning job. @@ -1059,7 +1103,9 @@ async def start_async( ) if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, models.DetailedJobOut) + return utils.unmarshal_json( + http_res.text, models.JobsAPIRoutesFineTuningStartFineTuningJobResponse + ) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( diff --git a/src/mistralai/mistral_jobs.py b/src/mistralai/mistral_jobs.py index e0d3c616..32a40aa7 100644 --- a/src/mistralai/mistral_jobs.py +++ b/src/mistralai/mistral_jobs.py @@ -19,7 +19,7 @@ def list( metadata: OptionalNullable[Dict[str, Any]] = UNSET, created_after: OptionalNullable[datetime] = UNSET, created_by_me: Optional[bool] = False, - status: OptionalNullable[models.BatchJobStatus] = UNSET, + status: OptionalNullable[List[models.BatchJobStatus]] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -130,7 +130,7 @@ async def list_async( metadata: OptionalNullable[Dict[str, Any]] = UNSET, created_after: OptionalNullable[datetime] = UNSET, created_by_me: Optional[bool] = False, - status: OptionalNullable[models.BatchJobStatus] = UNSET, + status: OptionalNullable[List[models.BatchJobStatus]] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, diff --git a/src/mistralai/models/__init__.py b/src/mistralai/models/__init__.py index 0750906a..d56f5bf8 100644 --- a/src/mistralai/models/__init__.py +++ b/src/mistralai/models/__init__.py @@ -39,6 +39,10 @@ from .batchjobout import BatchJobOut, BatchJobOutObject, BatchJobOutTypedDict from .batchjobsout import BatchJobsOut, BatchJobsOutObject, BatchJobsOutTypedDict from .batchjobstatus import BatchJobStatus +from .chatclassificationrequest import ( + ChatClassificationRequest, + ChatClassificationRequestTypedDict, +) from .chatcompletionchoice import ( ChatCompletionChoice, ChatCompletionChoiceTypedDict, @@ -79,7 +83,6 @@ TwoTypedDict, ) from .checkpointout import CheckpointOut, CheckpointOutTypedDict -from .classificationobject import ClassificationObject, ClassificationObjectTypedDict from .classificationrequest import ( ClassificationRequest, ClassificationRequestInputs, @@ -90,13 +93,87 @@ ClassificationResponse, ClassificationResponseTypedDict, ) +from .classificationtargetresult import ( + ClassificationTargetResult, + ClassificationTargetResultTypedDict, +) +from .classifierdetailedjobout import ( + ClassifierDetailedJobOut, + ClassifierDetailedJobOutIntegrations, + ClassifierDetailedJobOutIntegrationsTypedDict, + ClassifierDetailedJobOutJobType, + ClassifierDetailedJobOutObject, + ClassifierDetailedJobOutStatus, + ClassifierDetailedJobOutTypedDict, +) +from .classifierftmodelout import ( + ClassifierFTModelOut, + ClassifierFTModelOutModelType, + ClassifierFTModelOutObject, + ClassifierFTModelOutTypedDict, +) +from .classifierjobout import ( + ClassifierJobOut, + ClassifierJobOutIntegrations, + ClassifierJobOutIntegrationsTypedDict, + ClassifierJobOutJobType, + ClassifierJobOutObject, + ClassifierJobOutStatus, + ClassifierJobOutTypedDict, +) +from .classifiertargetin import ClassifierTargetIn, ClassifierTargetInTypedDict +from .classifiertargetout import ClassifierTargetOut, ClassifierTargetOutTypedDict +from .classifiertrainingparameters import ( + ClassifierTrainingParameters, + ClassifierTrainingParametersTypedDict, +) +from .classifiertrainingparametersin import ( + ClassifierTrainingParametersIn, + ClassifierTrainingParametersInTypedDict, +) from .completionchunk import CompletionChunk, CompletionChunkTypedDict +from .completiondetailedjobout import ( + CompletionDetailedJobOut, + CompletionDetailedJobOutIntegrations, + CompletionDetailedJobOutIntegrationsTypedDict, + CompletionDetailedJobOutJobType, + CompletionDetailedJobOutObject, + CompletionDetailedJobOutRepositories, + CompletionDetailedJobOutRepositoriesTypedDict, + CompletionDetailedJobOutStatus, + CompletionDetailedJobOutTypedDict, +) from .completionevent import CompletionEvent, CompletionEventTypedDict +from .completionftmodelout import ( + CompletionFTModelOut, + CompletionFTModelOutObject, + CompletionFTModelOutTypedDict, + ModelType, +) +from .completionjobout import ( + CompletionJobOut, + CompletionJobOutTypedDict, + Integrations, + IntegrationsTypedDict, + JobType, + Object, + Repositories, + RepositoriesTypedDict, + Status, +) from .completionresponsestreamchoice import ( CompletionResponseStreamChoice, CompletionResponseStreamChoiceFinishReason, CompletionResponseStreamChoiceTypedDict, ) +from .completiontrainingparameters import ( + CompletionTrainingParameters, + CompletionTrainingParametersTypedDict, +) +from .completiontrainingparametersin import ( + CompletionTrainingParametersIn, + CompletionTrainingParametersInTypedDict, +) from .contentchunk import ContentChunk, ContentChunkTypedDict from .delete_model_v1_models_model_id_deleteop import ( DeleteModelV1ModelsModelIDDeleteRequest, @@ -105,16 +182,6 @@ from .deletefileout import DeleteFileOut, DeleteFileOutTypedDict from .deletemodelout import DeleteModelOut, DeleteModelOutTypedDict from .deltamessage import Content, ContentTypedDict, DeltaMessage, DeltaMessageTypedDict -from .detailedjobout import ( - DetailedJobOut, - DetailedJobOutIntegrations, - DetailedJobOutIntegrationsTypedDict, - DetailedJobOutObject, - DetailedJobOutRepositories, - DetailedJobOutRepositoriesTypedDict, - DetailedJobOutStatus, - DetailedJobOutTypedDict, -) from .documenturlchunk import ( DocumentURLChunk, DocumentURLChunkType, @@ -122,9 +189,9 @@ ) from .embeddingrequest import ( EmbeddingRequest, + EmbeddingRequestInputs, + EmbeddingRequestInputsTypedDict, EmbeddingRequestTypedDict, - Inputs, - InputsTypedDict, ) from .embeddingresponse import EmbeddingResponse, EmbeddingResponseTypedDict from .embeddingresponsedata import EmbeddingResponseData, EmbeddingResponseDataTypedDict @@ -171,12 +238,13 @@ FIMCompletionStreamRequestStopTypedDict, FIMCompletionStreamRequestTypedDict, ) +from .finetuneablemodeltype import FineTuneableModelType +from .ftclassifierlossfunction import FTClassifierLossFunction from .ftmodelcapabilitiesout import ( FTModelCapabilitiesOut, FTModelCapabilitiesOutTypedDict, ) from .ftmodelcard import FTModelCard, FTModelCardType, FTModelCardTypedDict -from .ftmodelout import FTModelOut, FTModelOutObject, FTModelOutTypedDict from .function import Function, FunctionTypedDict from .functioncall import ( Arguments, @@ -204,7 +272,23 @@ ImageURLChunkType, ImageURLChunkTypedDict, ) +from .inputs import ( + Inputs, + InputsTypedDict, + InstructRequestInputs, + InstructRequestInputsMessages, + InstructRequestInputsMessagesTypedDict, + InstructRequestInputsTypedDict, +) +from .instructrequest import ( + InstructRequest, + InstructRequestMessages, + InstructRequestMessagesTypedDict, + InstructRequestTypedDict, +) from .jobin import ( + Hyperparameters, + HyperparametersTypedDict, JobIn, JobInIntegrations, JobInIntegrationsTypedDict, @@ -213,16 +297,6 @@ JobInTypedDict, ) from .jobmetadataout import JobMetadataOut, JobMetadataOutTypedDict -from .jobout import ( - Integrations, - IntegrationsTypedDict, - JobOut, - JobOutTypedDict, - Object, - Repositories, - RepositoriesTypedDict, - Status, -) from .jobs_api_routes_batch_cancel_batch_jobop import ( JobsAPIRoutesBatchCancelBatchJobRequest, JobsAPIRoutesBatchCancelBatchJobRequestTypedDict, @@ -242,14 +316,20 @@ from .jobs_api_routes_fine_tuning_cancel_fine_tuning_jobop import ( JobsAPIRoutesFineTuningCancelFineTuningJobRequest, JobsAPIRoutesFineTuningCancelFineTuningJobRequestTypedDict, + JobsAPIRoutesFineTuningCancelFineTuningJobResponse, + JobsAPIRoutesFineTuningCancelFineTuningJobResponseTypedDict, ) from .jobs_api_routes_fine_tuning_create_fine_tuning_jobop import ( JobsAPIRoutesFineTuningCreateFineTuningJobResponse, JobsAPIRoutesFineTuningCreateFineTuningJobResponseTypedDict, + Response1, + Response1TypedDict, ) from .jobs_api_routes_fine_tuning_get_fine_tuning_jobop import ( JobsAPIRoutesFineTuningGetFineTuningJobRequest, JobsAPIRoutesFineTuningGetFineTuningJobRequestTypedDict, + JobsAPIRoutesFineTuningGetFineTuningJobResponse, + JobsAPIRoutesFineTuningGetFineTuningJobResponseTypedDict, ) from .jobs_api_routes_fine_tuning_get_fine_tuning_jobsop import ( JobsAPIRoutesFineTuningGetFineTuningJobsRequest, @@ -259,6 +339,8 @@ from .jobs_api_routes_fine_tuning_start_fine_tuning_jobop import ( JobsAPIRoutesFineTuningStartFineTuningJobRequest, JobsAPIRoutesFineTuningStartFineTuningJobRequestTypedDict, + JobsAPIRoutesFineTuningStartFineTuningJobResponse, + JobsAPIRoutesFineTuningStartFineTuningJobResponseTypedDict, ) from .jobs_api_routes_fine_tuning_unarchive_fine_tuned_modelop import ( JobsAPIRoutesFineTuningUnarchiveFineTunedModelRequest, @@ -267,8 +349,16 @@ from .jobs_api_routes_fine_tuning_update_fine_tuned_modelop import ( JobsAPIRoutesFineTuningUpdateFineTunedModelRequest, JobsAPIRoutesFineTuningUpdateFineTunedModelRequestTypedDict, + JobsAPIRoutesFineTuningUpdateFineTunedModelResponse, + JobsAPIRoutesFineTuningUpdateFineTunedModelResponseTypedDict, +) +from .jobsout import ( + JobsOut, + JobsOutData, + JobsOutDataTypedDict, + JobsOutObject, + JobsOutTypedDict, ) -from .jobsout import JobsOut, JobsOutObject, JobsOutTypedDict from .jsonschema import JSONSchema, JSONSchemaTypedDict from .legacyjobmetadataout import ( LegacyJobMetadataOut, @@ -279,6 +369,8 @@ from .metricout import MetricOut, MetricOutTypedDict from .modelcapabilities import ModelCapabilities, ModelCapabilitiesTypedDict from .modellist import Data, DataTypedDict, ModelList, ModelListTypedDict +from .moderationobject import ModerationObject, ModerationObjectTypedDict +from .moderationresponse import ModerationResponse, ModerationResponseTypedDict from .ocrimageobject import OCRImageObject, OCRImageObjectTypedDict from .ocrpagedimensions import OCRPageDimensions, OCRPageDimensionsTypedDict from .ocrpageobject import OCRPageObject, OCRPageObjectTypedDict @@ -321,8 +413,6 @@ ) from .tooltypes import ToolTypes from .trainingfile import TrainingFile, TrainingFileTypedDict -from .trainingparameters import TrainingParameters, TrainingParametersTypedDict -from .trainingparametersin import TrainingParametersIn, TrainingParametersInTypedDict from .unarchiveftmodelout import ( UnarchiveFTModelOut, UnarchiveFTModelOutObject, @@ -397,6 +487,8 @@ "BatchJobsOut", "BatchJobsOutObject", "BatchJobsOutTypedDict", + "ChatClassificationRequest", + "ChatClassificationRequestTypedDict", "ChatCompletionChoice", "ChatCompletionChoiceTypedDict", "ChatCompletionRequest", @@ -419,21 +511,65 @@ "ChatModerationRequestTypedDict", "CheckpointOut", "CheckpointOutTypedDict", - "ClassificationObject", - "ClassificationObjectTypedDict", "ClassificationRequest", "ClassificationRequestInputs", "ClassificationRequestInputsTypedDict", "ClassificationRequestTypedDict", "ClassificationResponse", "ClassificationResponseTypedDict", + "ClassificationTargetResult", + "ClassificationTargetResultTypedDict", + "ClassifierDetailedJobOut", + "ClassifierDetailedJobOutIntegrations", + "ClassifierDetailedJobOutIntegrationsTypedDict", + "ClassifierDetailedJobOutJobType", + "ClassifierDetailedJobOutObject", + "ClassifierDetailedJobOutStatus", + "ClassifierDetailedJobOutTypedDict", + "ClassifierFTModelOut", + "ClassifierFTModelOutModelType", + "ClassifierFTModelOutObject", + "ClassifierFTModelOutTypedDict", + "ClassifierJobOut", + "ClassifierJobOutIntegrations", + "ClassifierJobOutIntegrationsTypedDict", + "ClassifierJobOutJobType", + "ClassifierJobOutObject", + "ClassifierJobOutStatus", + "ClassifierJobOutTypedDict", + "ClassifierTargetIn", + "ClassifierTargetInTypedDict", + "ClassifierTargetOut", + "ClassifierTargetOutTypedDict", + "ClassifierTrainingParameters", + "ClassifierTrainingParametersIn", + "ClassifierTrainingParametersInTypedDict", + "ClassifierTrainingParametersTypedDict", "CompletionChunk", "CompletionChunkTypedDict", + "CompletionDetailedJobOut", + "CompletionDetailedJobOutIntegrations", + "CompletionDetailedJobOutIntegrationsTypedDict", + "CompletionDetailedJobOutJobType", + "CompletionDetailedJobOutObject", + "CompletionDetailedJobOutRepositories", + "CompletionDetailedJobOutRepositoriesTypedDict", + "CompletionDetailedJobOutStatus", + "CompletionDetailedJobOutTypedDict", "CompletionEvent", "CompletionEventTypedDict", + "CompletionFTModelOut", + "CompletionFTModelOutObject", + "CompletionFTModelOutTypedDict", + "CompletionJobOut", + "CompletionJobOutTypedDict", "CompletionResponseStreamChoice", "CompletionResponseStreamChoiceFinishReason", "CompletionResponseStreamChoiceTypedDict", + "CompletionTrainingParameters", + "CompletionTrainingParametersIn", + "CompletionTrainingParametersInTypedDict", + "CompletionTrainingParametersTypedDict", "Content", "ContentChunk", "ContentChunkTypedDict", @@ -448,20 +584,14 @@ "DeleteModelV1ModelsModelIDDeleteRequestTypedDict", "DeltaMessage", "DeltaMessageTypedDict", - "DetailedJobOut", - "DetailedJobOutIntegrations", - "DetailedJobOutIntegrationsTypedDict", - "DetailedJobOutObject", - "DetailedJobOutRepositories", - "DetailedJobOutRepositoriesTypedDict", - "DetailedJobOutStatus", - "DetailedJobOutTypedDict", "Document", "DocumentTypedDict", "DocumentURLChunk", "DocumentURLChunkType", "DocumentURLChunkTypedDict", "EmbeddingRequest", + "EmbeddingRequestInputs", + "EmbeddingRequestInputsTypedDict", "EmbeddingRequestTypedDict", "EmbeddingResponse", "EmbeddingResponseData", @@ -479,14 +609,12 @@ "FIMCompletionStreamRequestStop", "FIMCompletionStreamRequestStopTypedDict", "FIMCompletionStreamRequestTypedDict", + "FTClassifierLossFunction", "FTModelCapabilitiesOut", "FTModelCapabilitiesOutTypedDict", "FTModelCard", "FTModelCardType", "FTModelCardTypedDict", - "FTModelOut", - "FTModelOutObject", - "FTModelOutTypedDict", "File", "FilePurpose", "FileSchema", @@ -506,6 +634,7 @@ "FilesAPIRoutesRetrieveFileRequestTypedDict", "FilesAPIRoutesUploadFileMultiPartBodyParams", "FilesAPIRoutesUploadFileMultiPartBodyParamsTypedDict", + "FineTuneableModelType", "FinishReason", "Function", "FunctionCall", @@ -521,6 +650,8 @@ "GithubRepositoryOutTypedDict", "HTTPValidationError", "HTTPValidationErrorData", + "Hyperparameters", + "HyperparametersTypedDict", "ImageURL", "ImageURLChunk", "ImageURLChunkImageURL", @@ -530,6 +661,14 @@ "ImageURLTypedDict", "Inputs", "InputsTypedDict", + "InstructRequest", + "InstructRequestInputs", + "InstructRequestInputsMessages", + "InstructRequestInputsMessagesTypedDict", + "InstructRequestInputsTypedDict", + "InstructRequestMessages", + "InstructRequestMessagesTypedDict", + "InstructRequestTypedDict", "Integrations", "IntegrationsTypedDict", "JSONSchema", @@ -542,8 +681,7 @@ "JobInTypedDict", "JobMetadataOut", "JobMetadataOutTypedDict", - "JobOut", - "JobOutTypedDict", + "JobType", "JobsAPIRoutesBatchCancelBatchJobRequest", "JobsAPIRoutesBatchCancelBatchJobRequestTypedDict", "JobsAPIRoutesBatchGetBatchJobRequest", @@ -554,19 +692,29 @@ "JobsAPIRoutesFineTuningArchiveFineTunedModelRequestTypedDict", "JobsAPIRoutesFineTuningCancelFineTuningJobRequest", "JobsAPIRoutesFineTuningCancelFineTuningJobRequestTypedDict", + "JobsAPIRoutesFineTuningCancelFineTuningJobResponse", + "JobsAPIRoutesFineTuningCancelFineTuningJobResponseTypedDict", "JobsAPIRoutesFineTuningCreateFineTuningJobResponse", "JobsAPIRoutesFineTuningCreateFineTuningJobResponseTypedDict", "JobsAPIRoutesFineTuningGetFineTuningJobRequest", "JobsAPIRoutesFineTuningGetFineTuningJobRequestTypedDict", + "JobsAPIRoutesFineTuningGetFineTuningJobResponse", + "JobsAPIRoutesFineTuningGetFineTuningJobResponseTypedDict", "JobsAPIRoutesFineTuningGetFineTuningJobsRequest", "JobsAPIRoutesFineTuningGetFineTuningJobsRequestTypedDict", "JobsAPIRoutesFineTuningStartFineTuningJobRequest", "JobsAPIRoutesFineTuningStartFineTuningJobRequestTypedDict", + "JobsAPIRoutesFineTuningStartFineTuningJobResponse", + "JobsAPIRoutesFineTuningStartFineTuningJobResponseTypedDict", "JobsAPIRoutesFineTuningUnarchiveFineTunedModelRequest", "JobsAPIRoutesFineTuningUnarchiveFineTunedModelRequestTypedDict", "JobsAPIRoutesFineTuningUpdateFineTunedModelRequest", "JobsAPIRoutesFineTuningUpdateFineTunedModelRequestTypedDict", + "JobsAPIRoutesFineTuningUpdateFineTunedModelResponse", + "JobsAPIRoutesFineTuningUpdateFineTunedModelResponseTypedDict", "JobsOut", + "JobsOutData", + "JobsOutDataTypedDict", "JobsOutObject", "JobsOutTypedDict", "LegacyJobMetadataOut", @@ -584,6 +732,11 @@ "ModelCapabilitiesTypedDict", "ModelList", "ModelListTypedDict", + "ModelType", + "ModerationObject", + "ModerationObjectTypedDict", + "ModerationResponse", + "ModerationResponseTypedDict", "OCRImageObject", "OCRImageObjectTypedDict", "OCRPageDimensions", @@ -607,6 +760,8 @@ "ReferenceChunkTypedDict", "Repositories", "RepositoriesTypedDict", + "Response1", + "Response1TypedDict", "ResponseFormat", "ResponseFormatTypedDict", "ResponseFormats", @@ -647,10 +802,6 @@ "ToolTypes", "TrainingFile", "TrainingFileTypedDict", - "TrainingParameters", - "TrainingParametersIn", - "TrainingParametersInTypedDict", - "TrainingParametersTypedDict", "Two", "TwoTypedDict", "Type", diff --git a/src/mistralai/models/archiveftmodelout.py b/src/mistralai/models/archiveftmodelout.py index e78e98c4..cff27c4e 100644 --- a/src/mistralai/models/archiveftmodelout.py +++ b/src/mistralai/models/archiveftmodelout.py @@ -2,11 +2,8 @@ from __future__ import annotations from mistralai.types import BaseModel -from mistralai.utils import validate_const -import pydantic -from pydantic.functional_validators import AfterValidator from typing import Literal, Optional -from typing_extensions import Annotated, NotRequired, TypedDict +from typing_extensions import NotRequired, TypedDict ArchiveFTModelOutObject = Literal["model"] @@ -14,18 +11,13 @@ class ArchiveFTModelOutTypedDict(TypedDict): id: str - object: ArchiveFTModelOutObject + object: NotRequired[ArchiveFTModelOutObject] archived: NotRequired[bool] class ArchiveFTModelOut(BaseModel): id: str - OBJECT: Annotated[ - Annotated[ - Optional[ArchiveFTModelOutObject], AfterValidator(validate_const("model")) - ], - pydantic.Field(alias="object"), - ] = "model" + object: Optional[ArchiveFTModelOutObject] = "model" archived: Optional[bool] = True diff --git a/src/mistralai/models/batchjobout.py b/src/mistralai/models/batchjobout.py index 677284f2..bf873f41 100644 --- a/src/mistralai/models/batchjobout.py +++ b/src/mistralai/models/batchjobout.py @@ -4,12 +4,9 @@ from .batcherror import BatchError, BatchErrorTypedDict from .batchjobstatus import BatchJobStatus from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from mistralai.utils import validate_const -import pydantic from pydantic import model_serializer -from pydantic.functional_validators import AfterValidator from typing import Any, Dict, List, Literal, Optional -from typing_extensions import Annotated, NotRequired, TypedDict +from typing_extensions import NotRequired, TypedDict BatchJobOutObject = Literal["batch"] @@ -27,7 +24,7 @@ class BatchJobOutTypedDict(TypedDict): completed_requests: int succeeded_requests: int failed_requests: int - object: BatchJobOutObject + object: NotRequired[BatchJobOutObject] metadata: NotRequired[Nullable[Dict[str, Any]]] output_file: NotRequired[Nullable[str]] error_file: NotRequired[Nullable[str]] @@ -58,10 +55,7 @@ class BatchJobOut(BaseModel): failed_requests: int - OBJECT: Annotated[ - Annotated[Optional[BatchJobOutObject], AfterValidator(validate_const("batch"))], - pydantic.Field(alias="object"), - ] = "batch" + object: Optional[BatchJobOutObject] = "batch" metadata: OptionalNullable[Dict[str, Any]] = UNSET diff --git a/src/mistralai/models/batchjobsout.py b/src/mistralai/models/batchjobsout.py index f8c63a33..8ce26f31 100644 --- a/src/mistralai/models/batchjobsout.py +++ b/src/mistralai/models/batchjobsout.py @@ -3,11 +3,8 @@ from __future__ import annotations from .batchjobout import BatchJobOut, BatchJobOutTypedDict from mistralai.types import BaseModel -from mistralai.utils import validate_const -import pydantic -from pydantic.functional_validators import AfterValidator from typing import List, Literal, Optional -from typing_extensions import Annotated, NotRequired, TypedDict +from typing_extensions import NotRequired, TypedDict BatchJobsOutObject = Literal["list"] @@ -16,7 +13,7 @@ class BatchJobsOutTypedDict(TypedDict): total: int data: NotRequired[List[BatchJobOutTypedDict]] - object: BatchJobsOutObject + object: NotRequired[BatchJobsOutObject] class BatchJobsOut(BaseModel): @@ -24,7 +21,4 @@ class BatchJobsOut(BaseModel): data: Optional[List[BatchJobOut]] = None - OBJECT: Annotated[ - Annotated[Optional[BatchJobsOutObject], AfterValidator(validate_const("list"))], - pydantic.Field(alias="object"), - ] = "list" + object: Optional[BatchJobsOutObject] = "list" diff --git a/src/mistralai/models/chatclassificationrequest.py b/src/mistralai/models/chatclassificationrequest.py new file mode 100644 index 00000000..f06f4f34 --- /dev/null +++ b/src/mistralai/models/chatclassificationrequest.py @@ -0,0 +1,20 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .inputs import Inputs, InputsTypedDict +from mistralai.types import BaseModel +import pydantic +from typing_extensions import Annotated, TypedDict + + +class ChatClassificationRequestTypedDict(TypedDict): + model: str + inputs: InputsTypedDict + r"""Chat to classify""" + + +class ChatClassificationRequest(BaseModel): + model: str + + inputs: Annotated[Inputs, pydantic.Field(alias="input")] + r"""Chat to classify""" diff --git a/src/mistralai/models/chatmoderationrequest.py b/src/mistralai/models/chatmoderationrequest.py index 5b25b877..2f58d52f 100644 --- a/src/mistralai/models/chatmoderationrequest.py +++ b/src/mistralai/models/chatmoderationrequest.py @@ -9,8 +9,8 @@ from mistralai.utils import get_discriminator import pydantic from pydantic import Discriminator, Tag -from typing import List, Optional, Union -from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict +from typing import List, Union +from typing_extensions import Annotated, TypeAliasType, TypedDict TwoTypedDict = TypeAliasType( @@ -71,16 +71,13 @@ class ChatModerationRequestTypedDict(TypedDict): - model: str inputs: ChatModerationRequestInputsTypedDict r"""Chat to classify""" - truncate_for_context_length: NotRequired[bool] + model: str class ChatModerationRequest(BaseModel): - model: str - inputs: Annotated[ChatModerationRequestInputs, pydantic.Field(alias="input")] r"""Chat to classify""" - truncate_for_context_length: Optional[bool] = False + model: str diff --git a/src/mistralai/models/classificationresponse.py b/src/mistralai/models/classificationresponse.py index 5716db42..b7741f37 100644 --- a/src/mistralai/models/classificationresponse.py +++ b/src/mistralai/models/classificationresponse.py @@ -1,21 +1,24 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from __future__ import annotations -from .classificationobject import ClassificationObject, ClassificationObjectTypedDict +from .classificationtargetresult import ( + ClassificationTargetResult, + ClassificationTargetResultTypedDict, +) from mistralai.types import BaseModel -from typing import List, Optional -from typing_extensions import NotRequired, TypedDict +from typing import Dict, List +from typing_extensions import TypedDict class ClassificationResponseTypedDict(TypedDict): - id: NotRequired[str] - model: NotRequired[str] - results: NotRequired[List[ClassificationObjectTypedDict]] + id: str + model: str + results: List[Dict[str, ClassificationTargetResultTypedDict]] class ClassificationResponse(BaseModel): - id: Optional[str] = None + id: str - model: Optional[str] = None + model: str - results: Optional[List[ClassificationObject]] = None + results: List[Dict[str, ClassificationTargetResult]] diff --git a/src/mistralai/models/classificationtargetresult.py b/src/mistralai/models/classificationtargetresult.py new file mode 100644 index 00000000..60c5a51b --- /dev/null +++ b/src/mistralai/models/classificationtargetresult.py @@ -0,0 +1,14 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.types import BaseModel +from typing import Dict +from typing_extensions import TypedDict + + +class ClassificationTargetResultTypedDict(TypedDict): + scores: Dict[str, float] + + +class ClassificationTargetResult(BaseModel): + scores: Dict[str, float] diff --git a/src/mistralai/models/classifierdetailedjobout.py b/src/mistralai/models/classifierdetailedjobout.py new file mode 100644 index 00000000..971d529f --- /dev/null +++ b/src/mistralai/models/classifierdetailedjobout.py @@ -0,0 +1,156 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .checkpointout import CheckpointOut, CheckpointOutTypedDict +from .classifiertargetout import ClassifierTargetOut, ClassifierTargetOutTypedDict +from .classifiertrainingparameters import ( + ClassifierTrainingParameters, + ClassifierTrainingParametersTypedDict, +) +from .eventout import EventOut, EventOutTypedDict +from .jobmetadataout import JobMetadataOut, JobMetadataOutTypedDict +from .wandbintegrationout import WandbIntegrationOut, WandbIntegrationOutTypedDict +from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +from pydantic import model_serializer +from typing import List, Literal, Optional +from typing_extensions import NotRequired, TypedDict + + +ClassifierDetailedJobOutStatus = Literal[ + "QUEUED", + "STARTED", + "VALIDATING", + "VALIDATED", + "RUNNING", + "FAILED_VALIDATION", + "FAILED", + "SUCCESS", + "CANCELLED", + "CANCELLATION_REQUESTED", +] + +ClassifierDetailedJobOutObject = Literal["job"] + +ClassifierDetailedJobOutIntegrationsTypedDict = WandbIntegrationOutTypedDict + + +ClassifierDetailedJobOutIntegrations = WandbIntegrationOut + + +ClassifierDetailedJobOutJobType = Literal["classifier"] + + +class ClassifierDetailedJobOutTypedDict(TypedDict): + id: str + auto_start: bool + model: str + r"""The name of the model to fine-tune.""" + status: ClassifierDetailedJobOutStatus + created_at: int + modified_at: int + training_files: List[str] + hyperparameters: ClassifierTrainingParametersTypedDict + classifier_targets: List[ClassifierTargetOutTypedDict] + validation_files: NotRequired[Nullable[List[str]]] + object: NotRequired[ClassifierDetailedJobOutObject] + fine_tuned_model: NotRequired[Nullable[str]] + suffix: NotRequired[Nullable[str]] + integrations: NotRequired[ + Nullable[List[ClassifierDetailedJobOutIntegrationsTypedDict]] + ] + trained_tokens: NotRequired[Nullable[int]] + metadata: NotRequired[Nullable[JobMetadataOutTypedDict]] + job_type: NotRequired[ClassifierDetailedJobOutJobType] + events: NotRequired[List[EventOutTypedDict]] + r"""Event items are created every time the status of a fine-tuning job changes. The timestamped list of all events is accessible here.""" + checkpoints: NotRequired[List[CheckpointOutTypedDict]] + + +class ClassifierDetailedJobOut(BaseModel): + id: str + + auto_start: bool + + model: str + r"""The name of the model to fine-tune.""" + + status: ClassifierDetailedJobOutStatus + + created_at: int + + modified_at: int + + training_files: List[str] + + hyperparameters: ClassifierTrainingParameters + + classifier_targets: List[ClassifierTargetOut] + + validation_files: OptionalNullable[List[str]] = UNSET + + object: Optional[ClassifierDetailedJobOutObject] = "job" + + fine_tuned_model: OptionalNullable[str] = UNSET + + suffix: OptionalNullable[str] = UNSET + + integrations: OptionalNullable[List[ClassifierDetailedJobOutIntegrations]] = UNSET + + trained_tokens: OptionalNullable[int] = UNSET + + metadata: OptionalNullable[JobMetadataOut] = UNSET + + job_type: Optional[ClassifierDetailedJobOutJobType] = "classifier" + + events: Optional[List[EventOut]] = None + r"""Event items are created every time the status of a fine-tuning job changes. The timestamped list of all events is accessible here.""" + + checkpoints: Optional[List[CheckpointOut]] = None + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = [ + "validation_files", + "object", + "fine_tuned_model", + "suffix", + "integrations", + "trained_tokens", + "metadata", + "job_type", + "events", + "checkpoints", + ] + nullable_fields = [ + "validation_files", + "fine_tuned_model", + "suffix", + "integrations", + "trained_tokens", + "metadata", + ] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in self.model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/models/classifierftmodelout.py b/src/mistralai/models/classifierftmodelout.py new file mode 100644 index 00000000..846a20a2 --- /dev/null +++ b/src/mistralai/models/classifierftmodelout.py @@ -0,0 +1,101 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .classifiertargetout import ClassifierTargetOut, ClassifierTargetOutTypedDict +from .ftmodelcapabilitiesout import ( + FTModelCapabilitiesOut, + FTModelCapabilitiesOutTypedDict, +) +from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +from pydantic import model_serializer +from typing import List, Literal, Optional +from typing_extensions import NotRequired, TypedDict + + +ClassifierFTModelOutObject = Literal["model"] + +ClassifierFTModelOutModelType = Literal["classifier"] + + +class ClassifierFTModelOutTypedDict(TypedDict): + id: str + created: int + owned_by: str + root: str + archived: bool + capabilities: FTModelCapabilitiesOutTypedDict + job: str + classifier_targets: List[ClassifierTargetOutTypedDict] + object: NotRequired[ClassifierFTModelOutObject] + name: NotRequired[Nullable[str]] + description: NotRequired[Nullable[str]] + max_context_length: NotRequired[int] + aliases: NotRequired[List[str]] + model_type: NotRequired[ClassifierFTModelOutModelType] + + +class ClassifierFTModelOut(BaseModel): + id: str + + created: int + + owned_by: str + + root: str + + archived: bool + + capabilities: FTModelCapabilitiesOut + + job: str + + classifier_targets: List[ClassifierTargetOut] + + object: Optional[ClassifierFTModelOutObject] = "model" + + name: OptionalNullable[str] = UNSET + + description: OptionalNullable[str] = UNSET + + max_context_length: Optional[int] = 32768 + + aliases: Optional[List[str]] = None + + model_type: Optional[ClassifierFTModelOutModelType] = "classifier" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = [ + "object", + "name", + "description", + "max_context_length", + "aliases", + "model_type", + ] + nullable_fields = ["name", "description"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in self.model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/models/classifierjobout.py b/src/mistralai/models/classifierjobout.py new file mode 100644 index 00000000..66011b4a --- /dev/null +++ b/src/mistralai/models/classifierjobout.py @@ -0,0 +1,165 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .classifiertrainingparameters import ( + ClassifierTrainingParameters, + ClassifierTrainingParametersTypedDict, +) +from .jobmetadataout import JobMetadataOut, JobMetadataOutTypedDict +from .wandbintegrationout import WandbIntegrationOut, WandbIntegrationOutTypedDict +from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +from pydantic import model_serializer +from typing import List, Literal, Optional +from typing_extensions import NotRequired, TypedDict + + +ClassifierJobOutStatus = Literal[ + "QUEUED", + "STARTED", + "VALIDATING", + "VALIDATED", + "RUNNING", + "FAILED_VALIDATION", + "FAILED", + "SUCCESS", + "CANCELLED", + "CANCELLATION_REQUESTED", +] +r"""The current status of the fine-tuning job.""" + +ClassifierJobOutObject = Literal["job"] +r"""The object type of the fine-tuning job.""" + +ClassifierJobOutIntegrationsTypedDict = WandbIntegrationOutTypedDict + + +ClassifierJobOutIntegrations = WandbIntegrationOut + + +ClassifierJobOutJobType = Literal["classifier"] +r"""The type of job (`FT` for fine-tuning).""" + + +class ClassifierJobOutTypedDict(TypedDict): + id: str + r"""The ID of the job.""" + auto_start: bool + model: str + r"""The name of the model to fine-tune.""" + status: ClassifierJobOutStatus + r"""The current status of the fine-tuning job.""" + created_at: int + r"""The UNIX timestamp (in seconds) for when the fine-tuning job was created.""" + modified_at: int + r"""The UNIX timestamp (in seconds) for when the fine-tuning job was last modified.""" + training_files: List[str] + r"""A list containing the IDs of uploaded files that contain training data.""" + hyperparameters: ClassifierTrainingParametersTypedDict + validation_files: NotRequired[Nullable[List[str]]] + r"""A list containing the IDs of uploaded files that contain validation data.""" + object: NotRequired[ClassifierJobOutObject] + r"""The object type of the fine-tuning job.""" + fine_tuned_model: NotRequired[Nullable[str]] + r"""The name of the fine-tuned model that is being created. The value will be `null` if the fine-tuning job is still running.""" + suffix: NotRequired[Nullable[str]] + r"""Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`.""" + integrations: NotRequired[Nullable[List[ClassifierJobOutIntegrationsTypedDict]]] + r"""A list of integrations enabled for your fine-tuning job.""" + trained_tokens: NotRequired[Nullable[int]] + r"""Total number of tokens trained.""" + metadata: NotRequired[Nullable[JobMetadataOutTypedDict]] + job_type: NotRequired[ClassifierJobOutJobType] + r"""The type of job (`FT` for fine-tuning).""" + + +class ClassifierJobOut(BaseModel): + id: str + r"""The ID of the job.""" + + auto_start: bool + + model: str + r"""The name of the model to fine-tune.""" + + status: ClassifierJobOutStatus + r"""The current status of the fine-tuning job.""" + + created_at: int + r"""The UNIX timestamp (in seconds) for when the fine-tuning job was created.""" + + modified_at: int + r"""The UNIX timestamp (in seconds) for when the fine-tuning job was last modified.""" + + training_files: List[str] + r"""A list containing the IDs of uploaded files that contain training data.""" + + hyperparameters: ClassifierTrainingParameters + + validation_files: OptionalNullable[List[str]] = UNSET + r"""A list containing the IDs of uploaded files that contain validation data.""" + + object: Optional[ClassifierJobOutObject] = "job" + r"""The object type of the fine-tuning job.""" + + fine_tuned_model: OptionalNullable[str] = UNSET + r"""The name of the fine-tuned model that is being created. The value will be `null` if the fine-tuning job is still running.""" + + suffix: OptionalNullable[str] = UNSET + r"""Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`.""" + + integrations: OptionalNullable[List[ClassifierJobOutIntegrations]] = UNSET + r"""A list of integrations enabled for your fine-tuning job.""" + + trained_tokens: OptionalNullable[int] = UNSET + r"""Total number of tokens trained.""" + + metadata: OptionalNullable[JobMetadataOut] = UNSET + + job_type: Optional[ClassifierJobOutJobType] = "classifier" + r"""The type of job (`FT` for fine-tuning).""" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = [ + "validation_files", + "object", + "fine_tuned_model", + "suffix", + "integrations", + "trained_tokens", + "metadata", + "job_type", + ] + nullable_fields = [ + "validation_files", + "fine_tuned_model", + "suffix", + "integrations", + "trained_tokens", + "metadata", + ] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in self.model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/models/classifiertargetin.py b/src/mistralai/models/classifiertargetin.py new file mode 100644 index 00000000..c9e4b406 --- /dev/null +++ b/src/mistralai/models/classifiertargetin.py @@ -0,0 +1,55 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .ftclassifierlossfunction import FTClassifierLossFunction +from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +from pydantic import model_serializer +from typing import List, Optional +from typing_extensions import NotRequired, TypedDict + + +class ClassifierTargetInTypedDict(TypedDict): + name: str + labels: List[str] + weight: NotRequired[float] + loss_function: NotRequired[Nullable[FTClassifierLossFunction]] + + +class ClassifierTargetIn(BaseModel): + name: str + + labels: List[str] + + weight: Optional[float] = 1 + + loss_function: OptionalNullable[FTClassifierLossFunction] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["weight", "loss_function"] + nullable_fields = ["loss_function"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in self.model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/models/classifiertargetout.py b/src/mistralai/models/classifiertargetout.py new file mode 100644 index 00000000..ddc587f4 --- /dev/null +++ b/src/mistralai/models/classifiertargetout.py @@ -0,0 +1,24 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .ftclassifierlossfunction import FTClassifierLossFunction +from mistralai.types import BaseModel +from typing import List +from typing_extensions import TypedDict + + +class ClassifierTargetOutTypedDict(TypedDict): + name: str + labels: List[str] + weight: float + loss_function: FTClassifierLossFunction + + +class ClassifierTargetOut(BaseModel): + name: str + + labels: List[str] + + weight: float + + loss_function: FTClassifierLossFunction diff --git a/src/mistralai/models/classifiertrainingparameters.py b/src/mistralai/models/classifiertrainingparameters.py new file mode 100644 index 00000000..f0908e81 --- /dev/null +++ b/src/mistralai/models/classifiertrainingparameters.py @@ -0,0 +1,73 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +from pydantic import model_serializer +from typing import Optional +from typing_extensions import NotRequired, TypedDict + + +class ClassifierTrainingParametersTypedDict(TypedDict): + training_steps: NotRequired[Nullable[int]] + learning_rate: NotRequired[float] + weight_decay: NotRequired[Nullable[float]] + warmup_fraction: NotRequired[Nullable[float]] + epochs: NotRequired[Nullable[float]] + seq_len: NotRequired[Nullable[int]] + + +class ClassifierTrainingParameters(BaseModel): + training_steps: OptionalNullable[int] = UNSET + + learning_rate: Optional[float] = 0.0001 + + weight_decay: OptionalNullable[float] = UNSET + + warmup_fraction: OptionalNullable[float] = UNSET + + epochs: OptionalNullable[float] = UNSET + + seq_len: OptionalNullable[int] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = [ + "training_steps", + "learning_rate", + "weight_decay", + "warmup_fraction", + "epochs", + "seq_len", + ] + nullable_fields = [ + "training_steps", + "weight_decay", + "warmup_fraction", + "epochs", + "seq_len", + ] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in self.model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/models/classifiertrainingparametersin.py b/src/mistralai/models/classifiertrainingparametersin.py new file mode 100644 index 00000000..f1f16cfb --- /dev/null +++ b/src/mistralai/models/classifiertrainingparametersin.py @@ -0,0 +1,85 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +from pydantic import model_serializer +from typing import Optional +from typing_extensions import NotRequired, TypedDict + + +class ClassifierTrainingParametersInTypedDict(TypedDict): + r"""The fine-tuning hyperparameter settings used in a classifier fine-tune job.""" + + training_steps: NotRequired[Nullable[int]] + r"""The number of training steps to perform. A training step refers to a single update of the model weights during the fine-tuning process. This update is typically calculated using a batch of samples from the training dataset.""" + learning_rate: NotRequired[float] + r"""A parameter describing how much to adjust the pre-trained model's weights in response to the estimated error each time the weights are updated during the fine-tuning process.""" + weight_decay: NotRequired[Nullable[float]] + r"""(Advanced Usage) Weight decay adds a term to the loss function that is proportional to the sum of the squared weights. This term reduces the magnitude of the weights and prevents them from growing too large.""" + warmup_fraction: NotRequired[Nullable[float]] + r"""(Advanced Usage) A parameter that specifies the percentage of the total training steps at which the learning rate warm-up phase ends. During this phase, the learning rate gradually increases from a small value to the initial learning rate, helping to stabilize the training process and improve convergence. Similar to `pct_start` in [mistral-finetune](https://github.com/mistralai/mistral-finetune)""" + epochs: NotRequired[Nullable[float]] + seq_len: NotRequired[Nullable[int]] + + +class ClassifierTrainingParametersIn(BaseModel): + r"""The fine-tuning hyperparameter settings used in a classifier fine-tune job.""" + + training_steps: OptionalNullable[int] = UNSET + r"""The number of training steps to perform. A training step refers to a single update of the model weights during the fine-tuning process. This update is typically calculated using a batch of samples from the training dataset.""" + + learning_rate: Optional[float] = 0.0001 + r"""A parameter describing how much to adjust the pre-trained model's weights in response to the estimated error each time the weights are updated during the fine-tuning process.""" + + weight_decay: OptionalNullable[float] = UNSET + r"""(Advanced Usage) Weight decay adds a term to the loss function that is proportional to the sum of the squared weights. This term reduces the magnitude of the weights and prevents them from growing too large.""" + + warmup_fraction: OptionalNullable[float] = UNSET + r"""(Advanced Usage) A parameter that specifies the percentage of the total training steps at which the learning rate warm-up phase ends. During this phase, the learning rate gradually increases from a small value to the initial learning rate, helping to stabilize the training process and improve convergence. Similar to `pct_start` in [mistral-finetune](https://github.com/mistralai/mistral-finetune)""" + + epochs: OptionalNullable[float] = UNSET + + seq_len: OptionalNullable[int] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = [ + "training_steps", + "learning_rate", + "weight_decay", + "warmup_fraction", + "epochs", + "seq_len", + ] + nullable_fields = [ + "training_steps", + "weight_decay", + "warmup_fraction", + "epochs", + "seq_len", + ] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in self.model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/models/detailedjobout.py b/src/mistralai/models/completiondetailedjobout.py similarity index 69% rename from src/mistralai/models/detailedjobout.py rename to src/mistralai/models/completiondetailedjobout.py index b2a1c8d9..598a5e20 100644 --- a/src/mistralai/models/detailedjobout.py +++ b/src/mistralai/models/completiondetailedjobout.py @@ -2,21 +2,21 @@ from __future__ import annotations from .checkpointout import CheckpointOut, CheckpointOutTypedDict +from .completiontrainingparameters import ( + CompletionTrainingParameters, + CompletionTrainingParametersTypedDict, +) from .eventout import EventOut, EventOutTypedDict from .githubrepositoryout import GithubRepositoryOut, GithubRepositoryOutTypedDict from .jobmetadataout import JobMetadataOut, JobMetadataOutTypedDict -from .trainingparameters import TrainingParameters, TrainingParametersTypedDict from .wandbintegrationout import WandbIntegrationOut, WandbIntegrationOutTypedDict from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from mistralai.utils import validate_const -import pydantic from pydantic import model_serializer -from pydantic.functional_validators import AfterValidator from typing import List, Literal, Optional -from typing_extensions import Annotated, NotRequired, TypedDict +from typing_extensions import NotRequired, TypedDict -DetailedJobOutStatus = Literal[ +CompletionDetailedJobOutStatus = Literal[ "QUEUED", "STARTED", "VALIDATING", @@ -29,57 +29,57 @@ "CANCELLATION_REQUESTED", ] -DetailedJobOutObject = Literal["job"] +CompletionDetailedJobOutObject = Literal["job"] -DetailedJobOutIntegrationsTypedDict = WandbIntegrationOutTypedDict +CompletionDetailedJobOutIntegrationsTypedDict = WandbIntegrationOutTypedDict -DetailedJobOutIntegrations = WandbIntegrationOut +CompletionDetailedJobOutIntegrations = WandbIntegrationOut -DetailedJobOutRepositoriesTypedDict = GithubRepositoryOutTypedDict +CompletionDetailedJobOutJobType = Literal["completion"] +CompletionDetailedJobOutRepositoriesTypedDict = GithubRepositoryOutTypedDict -DetailedJobOutRepositories = GithubRepositoryOut +CompletionDetailedJobOutRepositories = GithubRepositoryOut -class DetailedJobOutTypedDict(TypedDict): + +class CompletionDetailedJobOutTypedDict(TypedDict): id: str auto_start: bool - hyperparameters: TrainingParametersTypedDict model: str r"""The name of the model to fine-tune.""" - status: DetailedJobOutStatus - job_type: str + status: CompletionDetailedJobOutStatus created_at: int modified_at: int training_files: List[str] + hyperparameters: CompletionTrainingParametersTypedDict validation_files: NotRequired[Nullable[List[str]]] - object: DetailedJobOutObject + object: NotRequired[CompletionDetailedJobOutObject] fine_tuned_model: NotRequired[Nullable[str]] suffix: NotRequired[Nullable[str]] - integrations: NotRequired[Nullable[List[DetailedJobOutIntegrationsTypedDict]]] + integrations: NotRequired[ + Nullable[List[CompletionDetailedJobOutIntegrationsTypedDict]] + ] trained_tokens: NotRequired[Nullable[int]] - repositories: NotRequired[List[DetailedJobOutRepositoriesTypedDict]] metadata: NotRequired[Nullable[JobMetadataOutTypedDict]] + job_type: NotRequired[CompletionDetailedJobOutJobType] + repositories: NotRequired[List[CompletionDetailedJobOutRepositoriesTypedDict]] events: NotRequired[List[EventOutTypedDict]] r"""Event items are created every time the status of a fine-tuning job changes. The timestamped list of all events is accessible here.""" checkpoints: NotRequired[List[CheckpointOutTypedDict]] -class DetailedJobOut(BaseModel): +class CompletionDetailedJobOut(BaseModel): id: str auto_start: bool - hyperparameters: TrainingParameters - model: str r"""The name of the model to fine-tune.""" - status: DetailedJobOutStatus - - job_type: str + status: CompletionDetailedJobOutStatus created_at: int @@ -87,27 +87,26 @@ class DetailedJobOut(BaseModel): training_files: List[str] + hyperparameters: CompletionTrainingParameters + validation_files: OptionalNullable[List[str]] = UNSET - OBJECT: Annotated[ - Annotated[ - Optional[DetailedJobOutObject], AfterValidator(validate_const("job")) - ], - pydantic.Field(alias="object"), - ] = "job" + object: Optional[CompletionDetailedJobOutObject] = "job" fine_tuned_model: OptionalNullable[str] = UNSET suffix: OptionalNullable[str] = UNSET - integrations: OptionalNullable[List[DetailedJobOutIntegrations]] = UNSET + integrations: OptionalNullable[List[CompletionDetailedJobOutIntegrations]] = UNSET trained_tokens: OptionalNullable[int] = UNSET - repositories: Optional[List[DetailedJobOutRepositories]] = None - metadata: OptionalNullable[JobMetadataOut] = UNSET + job_type: Optional[CompletionDetailedJobOutJobType] = "completion" + + repositories: Optional[List[CompletionDetailedJobOutRepositories]] = None + events: Optional[List[EventOut]] = None r"""Event items are created every time the status of a fine-tuning job changes. The timestamped list of all events is accessible here.""" @@ -122,8 +121,9 @@ def serialize_model(self, handler): "suffix", "integrations", "trained_tokens", - "repositories", "metadata", + "job_type", + "repositories", "events", "checkpoints", ] diff --git a/src/mistralai/models/ftmodelout.py b/src/mistralai/models/completionftmodelout.py similarity index 81% rename from src/mistralai/models/ftmodelout.py rename to src/mistralai/models/completionftmodelout.py index e8d6864c..71ab1a45 100644 --- a/src/mistralai/models/ftmodelout.py +++ b/src/mistralai/models/completionftmodelout.py @@ -6,18 +6,17 @@ FTModelCapabilitiesOutTypedDict, ) from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from mistralai.utils import validate_const -import pydantic from pydantic import model_serializer -from pydantic.functional_validators import AfterValidator from typing import List, Literal, Optional -from typing_extensions import Annotated, NotRequired, TypedDict +from typing_extensions import NotRequired, TypedDict -FTModelOutObject = Literal["model"] +CompletionFTModelOutObject = Literal["model"] +ModelType = Literal["completion"] -class FTModelOutTypedDict(TypedDict): + +class CompletionFTModelOutTypedDict(TypedDict): id: str created: int owned_by: str @@ -25,14 +24,15 @@ class FTModelOutTypedDict(TypedDict): archived: bool capabilities: FTModelCapabilitiesOutTypedDict job: str - object: FTModelOutObject + object: NotRequired[CompletionFTModelOutObject] name: NotRequired[Nullable[str]] description: NotRequired[Nullable[str]] max_context_length: NotRequired[int] aliases: NotRequired[List[str]] + model_type: NotRequired[ModelType] -class FTModelOut(BaseModel): +class CompletionFTModelOut(BaseModel): id: str created: int @@ -47,10 +47,7 @@ class FTModelOut(BaseModel): job: str - OBJECT: Annotated[ - Annotated[Optional[FTModelOutObject], AfterValidator(validate_const("model"))], - pydantic.Field(alias="object"), - ] = "model" + object: Optional[CompletionFTModelOutObject] = "model" name: OptionalNullable[str] = UNSET @@ -60,6 +57,8 @@ class FTModelOut(BaseModel): aliases: Optional[List[str]] = None + model_type: Optional[ModelType] = "completion" + @model_serializer(mode="wrap") def serialize_model(self, handler): optional_fields = [ @@ -68,6 +67,7 @@ def serialize_model(self, handler): "description", "max_context_length", "aliases", + "model_type", ] nullable_fields = ["name", "description"] null_default_fields = [] diff --git a/src/mistralai/models/jobout.py b/src/mistralai/models/completionjobout.py similarity index 89% rename from src/mistralai/models/jobout.py rename to src/mistralai/models/completionjobout.py index c3ffb248..7f8bfd91 100644 --- a/src/mistralai/models/jobout.py +++ b/src/mistralai/models/completionjobout.py @@ -1,17 +1,17 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from __future__ import annotations +from .completiontrainingparameters import ( + CompletionTrainingParameters, + CompletionTrainingParametersTypedDict, +) from .githubrepositoryout import GithubRepositoryOut, GithubRepositoryOutTypedDict from .jobmetadataout import JobMetadataOut, JobMetadataOutTypedDict -from .trainingparameters import TrainingParameters, TrainingParametersTypedDict from .wandbintegrationout import WandbIntegrationOut, WandbIntegrationOutTypedDict from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from mistralai.utils import validate_const -import pydantic from pydantic import model_serializer -from pydantic.functional_validators import AfterValidator from typing import List, Literal, Optional -from typing_extensions import Annotated, NotRequired, TypedDict +from typing_extensions import NotRequired, TypedDict Status = Literal[ @@ -37,32 +37,33 @@ Integrations = WandbIntegrationOut +JobType = Literal["completion"] +r"""The type of job (`FT` for fine-tuning).""" + RepositoriesTypedDict = GithubRepositoryOutTypedDict Repositories = GithubRepositoryOut -class JobOutTypedDict(TypedDict): +class CompletionJobOutTypedDict(TypedDict): id: str r"""The ID of the job.""" auto_start: bool - hyperparameters: TrainingParametersTypedDict model: str r"""The name of the model to fine-tune.""" status: Status r"""The current status of the fine-tuning job.""" - job_type: str - r"""The type of job (`FT` for fine-tuning).""" created_at: int r"""The UNIX timestamp (in seconds) for when the fine-tuning job was created.""" modified_at: int r"""The UNIX timestamp (in seconds) for when the fine-tuning job was last modified.""" training_files: List[str] r"""A list containing the IDs of uploaded files that contain training data.""" + hyperparameters: CompletionTrainingParametersTypedDict validation_files: NotRequired[Nullable[List[str]]] r"""A list containing the IDs of uploaded files that contain validation data.""" - object: Object + object: NotRequired[Object] r"""The object type of the fine-tuning job.""" fine_tuned_model: NotRequired[Nullable[str]] r"""The name of the fine-tuned model that is being created. The value will be `null` if the fine-tuning job is still running.""" @@ -72,27 +73,24 @@ class JobOutTypedDict(TypedDict): r"""A list of integrations enabled for your fine-tuning job.""" trained_tokens: NotRequired[Nullable[int]] r"""Total number of tokens trained.""" - repositories: NotRequired[List[RepositoriesTypedDict]] metadata: NotRequired[Nullable[JobMetadataOutTypedDict]] + job_type: NotRequired[JobType] + r"""The type of job (`FT` for fine-tuning).""" + repositories: NotRequired[List[RepositoriesTypedDict]] -class JobOut(BaseModel): +class CompletionJobOut(BaseModel): id: str r"""The ID of the job.""" auto_start: bool - hyperparameters: TrainingParameters - model: str r"""The name of the model to fine-tune.""" status: Status r"""The current status of the fine-tuning job.""" - job_type: str - r"""The type of job (`FT` for fine-tuning).""" - created_at: int r"""The UNIX timestamp (in seconds) for when the fine-tuning job was created.""" @@ -102,13 +100,12 @@ class JobOut(BaseModel): training_files: List[str] r"""A list containing the IDs of uploaded files that contain training data.""" + hyperparameters: CompletionTrainingParameters + validation_files: OptionalNullable[List[str]] = UNSET r"""A list containing the IDs of uploaded files that contain validation data.""" - OBJECT: Annotated[ - Annotated[Optional[Object], AfterValidator(validate_const("job"))], - pydantic.Field(alias="object"), - ] = "job" + object: Optional[Object] = "job" r"""The object type of the fine-tuning job.""" fine_tuned_model: OptionalNullable[str] = UNSET @@ -123,10 +120,13 @@ class JobOut(BaseModel): trained_tokens: OptionalNullable[int] = UNSET r"""Total number of tokens trained.""" - repositories: Optional[List[Repositories]] = None - metadata: OptionalNullable[JobMetadataOut] = UNSET + job_type: Optional[JobType] = "completion" + r"""The type of job (`FT` for fine-tuning).""" + + repositories: Optional[List[Repositories]] = None + @model_serializer(mode="wrap") def serialize_model(self, handler): optional_fields = [ @@ -136,8 +136,9 @@ def serialize_model(self, handler): "suffix", "integrations", "trained_tokens", - "repositories", "metadata", + "job_type", + "repositories", ] nullable_fields = [ "validation_files", diff --git a/src/mistralai/models/trainingparameters.py b/src/mistralai/models/completiontrainingparameters.py similarity index 95% rename from src/mistralai/models/trainingparameters.py rename to src/mistralai/models/completiontrainingparameters.py index cc2b037a..33b21ec9 100644 --- a/src/mistralai/models/trainingparameters.py +++ b/src/mistralai/models/completiontrainingparameters.py @@ -7,17 +7,17 @@ from typing_extensions import NotRequired, TypedDict -class TrainingParametersTypedDict(TypedDict): +class CompletionTrainingParametersTypedDict(TypedDict): training_steps: NotRequired[Nullable[int]] learning_rate: NotRequired[float] weight_decay: NotRequired[Nullable[float]] warmup_fraction: NotRequired[Nullable[float]] epochs: NotRequired[Nullable[float]] - fim_ratio: NotRequired[Nullable[float]] seq_len: NotRequired[Nullable[int]] + fim_ratio: NotRequired[Nullable[float]] -class TrainingParameters(BaseModel): +class CompletionTrainingParameters(BaseModel): training_steps: OptionalNullable[int] = UNSET learning_rate: Optional[float] = 0.0001 @@ -28,10 +28,10 @@ class TrainingParameters(BaseModel): epochs: OptionalNullable[float] = UNSET - fim_ratio: OptionalNullable[float] = UNSET - seq_len: OptionalNullable[int] = UNSET + fim_ratio: OptionalNullable[float] = UNSET + @model_serializer(mode="wrap") def serialize_model(self, handler): optional_fields = [ @@ -40,16 +40,16 @@ def serialize_model(self, handler): "weight_decay", "warmup_fraction", "epochs", - "fim_ratio", "seq_len", + "fim_ratio", ] nullable_fields = [ "training_steps", "weight_decay", "warmup_fraction", "epochs", - "fim_ratio", "seq_len", + "fim_ratio", ] null_default_fields = [] diff --git a/src/mistralai/models/trainingparametersin.py b/src/mistralai/models/completiontrainingparametersin.py similarity index 97% rename from src/mistralai/models/trainingparametersin.py rename to src/mistralai/models/completiontrainingparametersin.py index 7d2e414b..92f8d99a 100644 --- a/src/mistralai/models/trainingparametersin.py +++ b/src/mistralai/models/completiontrainingparametersin.py @@ -7,7 +7,7 @@ from typing_extensions import NotRequired, TypedDict -class TrainingParametersInTypedDict(TypedDict): +class CompletionTrainingParametersInTypedDict(TypedDict): r"""The fine-tuning hyperparameter settings used in a fine-tune job.""" training_steps: NotRequired[Nullable[int]] @@ -19,11 +19,11 @@ class TrainingParametersInTypedDict(TypedDict): warmup_fraction: NotRequired[Nullable[float]] r"""(Advanced Usage) A parameter that specifies the percentage of the total training steps at which the learning rate warm-up phase ends. During this phase, the learning rate gradually increases from a small value to the initial learning rate, helping to stabilize the training process and improve convergence. Similar to `pct_start` in [mistral-finetune](https://github.com/mistralai/mistral-finetune)""" epochs: NotRequired[Nullable[float]] - fim_ratio: NotRequired[Nullable[float]] seq_len: NotRequired[Nullable[int]] + fim_ratio: NotRequired[Nullable[float]] -class TrainingParametersIn(BaseModel): +class CompletionTrainingParametersIn(BaseModel): r"""The fine-tuning hyperparameter settings used in a fine-tune job.""" training_steps: OptionalNullable[int] = UNSET @@ -40,10 +40,10 @@ class TrainingParametersIn(BaseModel): epochs: OptionalNullable[float] = UNSET - fim_ratio: OptionalNullable[float] = UNSET - seq_len: OptionalNullable[int] = UNSET + fim_ratio: OptionalNullable[float] = UNSET + @model_serializer(mode="wrap") def serialize_model(self, handler): optional_fields = [ @@ -52,16 +52,16 @@ def serialize_model(self, handler): "weight_decay", "warmup_fraction", "epochs", - "fim_ratio", "seq_len", + "fim_ratio", ] nullable_fields = [ "training_steps", "weight_decay", "warmup_fraction", "epochs", - "fim_ratio", "seq_len", + "fim_ratio", ] null_default_fields = [] diff --git a/src/mistralai/models/embeddingrequest.py b/src/mistralai/models/embeddingrequest.py index 5c37fd48..bf9ce3ff 100644 --- a/src/mistralai/models/embeddingrequest.py +++ b/src/mistralai/models/embeddingrequest.py @@ -7,18 +7,20 @@ from typing_extensions import Annotated, TypeAliasType, TypedDict -InputsTypedDict = TypeAliasType("InputsTypedDict", Union[str, List[str]]) +EmbeddingRequestInputsTypedDict = TypeAliasType( + "EmbeddingRequestInputsTypedDict", Union[str, List[str]] +) r"""Text to embed.""" -Inputs = TypeAliasType("Inputs", Union[str, List[str]]) +EmbeddingRequestInputs = TypeAliasType("EmbeddingRequestInputs", Union[str, List[str]]) r"""Text to embed.""" class EmbeddingRequestTypedDict(TypedDict): model: str r"""ID of the model to use.""" - inputs: InputsTypedDict + inputs: EmbeddingRequestInputsTypedDict r"""Text to embed.""" @@ -26,5 +28,5 @@ class EmbeddingRequest(BaseModel): model: str r"""ID of the model to use.""" - inputs: Annotated[Inputs, pydantic.Field(alias="input")] + inputs: Annotated[EmbeddingRequestInputs, pydantic.Field(alias="input")] r"""Text to embed.""" diff --git a/src/mistralai/models/finetuneablemodeltype.py b/src/mistralai/models/finetuneablemodeltype.py new file mode 100644 index 00000000..3507dc91 --- /dev/null +++ b/src/mistralai/models/finetuneablemodeltype.py @@ -0,0 +1,7 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from typing import Literal + + +FineTuneableModelType = Literal["completion", "classifier"] diff --git a/src/mistralai/models/ftclassifierlossfunction.py b/src/mistralai/models/ftclassifierlossfunction.py new file mode 100644 index 00000000..df2d19ff --- /dev/null +++ b/src/mistralai/models/ftclassifierlossfunction.py @@ -0,0 +1,7 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from typing import Literal + + +FTClassifierLossFunction = Literal["single_class", "multi_class"] diff --git a/src/mistralai/models/ftmodelcapabilitiesout.py b/src/mistralai/models/ftmodelcapabilitiesout.py index b5e1e521..7f3aa18b 100644 --- a/src/mistralai/models/ftmodelcapabilitiesout.py +++ b/src/mistralai/models/ftmodelcapabilitiesout.py @@ -11,6 +11,7 @@ class FTModelCapabilitiesOutTypedDict(TypedDict): completion_fim: NotRequired[bool] function_calling: NotRequired[bool] fine_tuning: NotRequired[bool] + classification: NotRequired[bool] class FTModelCapabilitiesOut(BaseModel): @@ -21,3 +22,5 @@ class FTModelCapabilitiesOut(BaseModel): function_calling: Optional[bool] = False fine_tuning: Optional[bool] = False + + classification: Optional[bool] = False diff --git a/src/mistralai/models/githubrepositoryin.py b/src/mistralai/models/githubrepositoryin.py index 715db6b7..eda4ee0b 100644 --- a/src/mistralai/models/githubrepositoryin.py +++ b/src/mistralai/models/githubrepositoryin.py @@ -2,12 +2,9 @@ from __future__ import annotations from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from mistralai.utils import validate_const -import pydantic from pydantic import model_serializer -from pydantic.functional_validators import AfterValidator from typing import Literal, Optional -from typing_extensions import Annotated, NotRequired, TypedDict +from typing_extensions import NotRequired, TypedDict GithubRepositoryInType = Literal["github"] @@ -17,7 +14,7 @@ class GithubRepositoryInTypedDict(TypedDict): name: str owner: str token: str - type: GithubRepositoryInType + type: NotRequired[GithubRepositoryInType] ref: NotRequired[Nullable[str]] weight: NotRequired[float] @@ -29,12 +26,7 @@ class GithubRepositoryIn(BaseModel): token: str - TYPE: Annotated[ - Annotated[ - Optional[GithubRepositoryInType], AfterValidator(validate_const("github")) - ], - pydantic.Field(alias="type"), - ] = "github" + type: Optional[GithubRepositoryInType] = "github" ref: OptionalNullable[str] = UNSET diff --git a/src/mistralai/models/githubrepositoryout.py b/src/mistralai/models/githubrepositoryout.py index 5a0ce31a..72213b6f 100644 --- a/src/mistralai/models/githubrepositoryout.py +++ b/src/mistralai/models/githubrepositoryout.py @@ -2,12 +2,9 @@ from __future__ import annotations from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from mistralai.utils import validate_const -import pydantic from pydantic import model_serializer -from pydantic.functional_validators import AfterValidator from typing import Literal, Optional -from typing_extensions import Annotated, NotRequired, TypedDict +from typing_extensions import NotRequired, TypedDict GithubRepositoryOutType = Literal["github"] @@ -17,7 +14,7 @@ class GithubRepositoryOutTypedDict(TypedDict): name: str owner: str commit_id: str - type: GithubRepositoryOutType + type: NotRequired[GithubRepositoryOutType] ref: NotRequired[Nullable[str]] weight: NotRequired[float] @@ -29,12 +26,7 @@ class GithubRepositoryOut(BaseModel): commit_id: str - TYPE: Annotated[ - Annotated[ - Optional[GithubRepositoryOutType], AfterValidator(validate_const("github")) - ], - pydantic.Field(alias="type"), - ] = "github" + type: Optional[GithubRepositoryOutType] = "github" ref: OptionalNullable[str] = UNSET diff --git a/src/mistralai/models/inputs.py b/src/mistralai/models/inputs.py new file mode 100644 index 00000000..34d20f34 --- /dev/null +++ b/src/mistralai/models/inputs.py @@ -0,0 +1,54 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .assistantmessage import AssistantMessage, AssistantMessageTypedDict +from .instructrequest import InstructRequest, InstructRequestTypedDict +from .systemmessage import SystemMessage, SystemMessageTypedDict +from .toolmessage import ToolMessage, ToolMessageTypedDict +from .usermessage import UserMessage, UserMessageTypedDict +from mistralai.types import BaseModel +from mistralai.utils import get_discriminator +from pydantic import Discriminator, Tag +from typing import List, Union +from typing_extensions import Annotated, TypeAliasType, TypedDict + + +InstructRequestInputsMessagesTypedDict = TypeAliasType( + "InstructRequestInputsMessagesTypedDict", + Union[ + SystemMessageTypedDict, + UserMessageTypedDict, + AssistantMessageTypedDict, + ToolMessageTypedDict, + ], +) + + +InstructRequestInputsMessages = Annotated[ + Union[ + Annotated[AssistantMessage, Tag("assistant")], + Annotated[SystemMessage, Tag("system")], + Annotated[ToolMessage, Tag("tool")], + Annotated[UserMessage, Tag("user")], + ], + Discriminator(lambda m: get_discriminator(m, "role", "role")), +] + + +class InstructRequestInputsTypedDict(TypedDict): + messages: List[InstructRequestInputsMessagesTypedDict] + + +class InstructRequestInputs(BaseModel): + messages: List[InstructRequestInputsMessages] + + +InputsTypedDict = TypeAliasType( + "InputsTypedDict", + Union[InstructRequestInputsTypedDict, List[InstructRequestTypedDict]], +) +r"""Chat to classify""" + + +Inputs = TypeAliasType("Inputs", Union[InstructRequestInputs, List[InstructRequest]]) +r"""Chat to classify""" diff --git a/src/mistralai/models/instructrequest.py b/src/mistralai/models/instructrequest.py new file mode 100644 index 00000000..dddbda00 --- /dev/null +++ b/src/mistralai/models/instructrequest.py @@ -0,0 +1,42 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .assistantmessage import AssistantMessage, AssistantMessageTypedDict +from .systemmessage import SystemMessage, SystemMessageTypedDict +from .toolmessage import ToolMessage, ToolMessageTypedDict +from .usermessage import UserMessage, UserMessageTypedDict +from mistralai.types import BaseModel +from mistralai.utils import get_discriminator +from pydantic import Discriminator, Tag +from typing import List, Union +from typing_extensions import Annotated, TypeAliasType, TypedDict + + +InstructRequestMessagesTypedDict = TypeAliasType( + "InstructRequestMessagesTypedDict", + Union[ + SystemMessageTypedDict, + UserMessageTypedDict, + AssistantMessageTypedDict, + ToolMessageTypedDict, + ], +) + + +InstructRequestMessages = Annotated[ + Union[ + Annotated[AssistantMessage, Tag("assistant")], + Annotated[SystemMessage, Tag("system")], + Annotated[ToolMessage, Tag("tool")], + Annotated[UserMessage, Tag("user")], + ], + Discriminator(lambda m: get_discriminator(m, "role", "role")), +] + + +class InstructRequestTypedDict(TypedDict): + messages: List[InstructRequestMessagesTypedDict] + + +class InstructRequest(BaseModel): + messages: List[InstructRequestMessages] diff --git a/src/mistralai/models/jobin.py b/src/mistralai/models/jobin.py index 0ef66da3..cb535e46 100644 --- a/src/mistralai/models/jobin.py +++ b/src/mistralai/models/jobin.py @@ -1,14 +1,23 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from __future__ import annotations +from .classifiertargetin import ClassifierTargetIn, ClassifierTargetInTypedDict +from .classifiertrainingparametersin import ( + ClassifierTrainingParametersIn, + ClassifierTrainingParametersInTypedDict, +) +from .completiontrainingparametersin import ( + CompletionTrainingParametersIn, + CompletionTrainingParametersInTypedDict, +) +from .finetuneablemodeltype import FineTuneableModelType from .githubrepositoryin import GithubRepositoryIn, GithubRepositoryInTypedDict from .trainingfile import TrainingFile, TrainingFileTypedDict -from .trainingparametersin import TrainingParametersIn, TrainingParametersInTypedDict from .wandbintegration import WandbIntegration, WandbIntegrationTypedDict from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL from pydantic import model_serializer -from typing import List, Optional -from typing_extensions import NotRequired, TypedDict +from typing import List, Optional, Union +from typing_extensions import NotRequired, TypeAliasType, TypedDict JobInIntegrationsTypedDict = WandbIntegrationTypedDict @@ -17,6 +26,20 @@ JobInIntegrations = WandbIntegration +HyperparametersTypedDict = TypeAliasType( + "HyperparametersTypedDict", + Union[ + ClassifierTrainingParametersInTypedDict, CompletionTrainingParametersInTypedDict + ], +) + + +Hyperparameters = TypeAliasType( + "Hyperparameters", + Union[ClassifierTrainingParametersIn, CompletionTrainingParametersIn], +) + + JobInRepositoriesTypedDict = GithubRepositoryInTypedDict @@ -26,8 +49,7 @@ class JobInTypedDict(TypedDict): model: str r"""The name of the model to fine-tune.""" - hyperparameters: TrainingParametersInTypedDict - r"""The fine-tuning hyperparameter settings used in a fine-tune job.""" + hyperparameters: HyperparametersTypedDict training_files: NotRequired[List[TrainingFileTypedDict]] validation_files: NotRequired[Nullable[List[str]]] r"""A list containing the IDs of uploaded files that contain validation data. If you provide these files, the data is used to generate validation metrics periodically during fine-tuning. These metrics can be viewed in `checkpoints` when getting the status of a running fine-tuning job. The same data should not be present in both train and validation files.""" @@ -35,17 +57,19 @@ class JobInTypedDict(TypedDict): r"""A string that will be added to your fine-tuning model name. For example, a suffix of \"my-great-model\" would produce a model name like `ft:open-mistral-7b:my-great-model:xxx...`""" integrations: NotRequired[Nullable[List[JobInIntegrationsTypedDict]]] r"""A list of integrations to enable for your fine-tuning job.""" - repositories: NotRequired[List[JobInRepositoriesTypedDict]] auto_start: NotRequired[bool] r"""This field will be required in a future release.""" + invalid_sample_skip_percentage: NotRequired[float] + job_type: NotRequired[Nullable[FineTuneableModelType]] + repositories: NotRequired[Nullable[List[JobInRepositoriesTypedDict]]] + classifier_targets: NotRequired[Nullable[List[ClassifierTargetInTypedDict]]] class JobIn(BaseModel): model: str r"""The name of the model to fine-tune.""" - hyperparameters: TrainingParametersIn - r"""The fine-tuning hyperparameter settings used in a fine-tune job.""" + hyperparameters: Hyperparameters training_files: Optional[List[TrainingFile]] = None @@ -58,11 +82,17 @@ class JobIn(BaseModel): integrations: OptionalNullable[List[JobInIntegrations]] = UNSET r"""A list of integrations to enable for your fine-tuning job.""" - repositories: Optional[List[JobInRepositories]] = None - auto_start: Optional[bool] = None r"""This field will be required in a future release.""" + invalid_sample_skip_percentage: Optional[float] = 0 + + job_type: OptionalNullable[FineTuneableModelType] = UNSET + + repositories: OptionalNullable[List[JobInRepositories]] = UNSET + + classifier_targets: OptionalNullable[List[ClassifierTargetIn]] = UNSET + @model_serializer(mode="wrap") def serialize_model(self, handler): optional_fields = [ @@ -70,10 +100,20 @@ def serialize_model(self, handler): "validation_files", "suffix", "integrations", - "repositories", "auto_start", + "invalid_sample_skip_percentage", + "job_type", + "repositories", + "classifier_targets", + ] + nullable_fields = [ + "validation_files", + "suffix", + "integrations", + "job_type", + "repositories", + "classifier_targets", ] - nullable_fields = ["validation_files", "suffix", "integrations"] null_default_fields = [] serialized = handler(self) diff --git a/src/mistralai/models/jobs_api_routes_batch_get_batch_jobsop.py b/src/mistralai/models/jobs_api_routes_batch_get_batch_jobsop.py index 8f0c66ca..fa2c6ed3 100644 --- a/src/mistralai/models/jobs_api_routes_batch_get_batch_jobsop.py +++ b/src/mistralai/models/jobs_api_routes_batch_get_batch_jobsop.py @@ -6,7 +6,7 @@ from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL from mistralai.utils import FieldMetadata, QueryParamMetadata from pydantic import model_serializer -from typing import Any, Dict, Optional +from typing import Any, Dict, List, Optional from typing_extensions import Annotated, NotRequired, TypedDict @@ -17,7 +17,7 @@ class JobsAPIRoutesBatchGetBatchJobsRequestTypedDict(TypedDict): metadata: NotRequired[Nullable[Dict[str, Any]]] created_after: NotRequired[Nullable[datetime]] created_by_me: NotRequired[bool] - status: NotRequired[Nullable[BatchJobStatus]] + status: NotRequired[Nullable[List[BatchJobStatus]]] class JobsAPIRoutesBatchGetBatchJobsRequest(BaseModel): @@ -52,7 +52,7 @@ class JobsAPIRoutesBatchGetBatchJobsRequest(BaseModel): ] = False status: Annotated[ - OptionalNullable[BatchJobStatus], + OptionalNullable[List[BatchJobStatus]], FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), ] = UNSET diff --git a/src/mistralai/models/jobs_api_routes_fine_tuning_cancel_fine_tuning_jobop.py b/src/mistralai/models/jobs_api_routes_fine_tuning_cancel_fine_tuning_jobop.py index b72ff42f..ceb19a69 100644 --- a/src/mistralai/models/jobs_api_routes_fine_tuning_cancel_fine_tuning_jobop.py +++ b/src/mistralai/models/jobs_api_routes_fine_tuning_cancel_fine_tuning_jobop.py @@ -1,9 +1,19 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from __future__ import annotations +from .classifierdetailedjobout import ( + ClassifierDetailedJobOut, + ClassifierDetailedJobOutTypedDict, +) +from .completiondetailedjobout import ( + CompletionDetailedJobOut, + CompletionDetailedJobOutTypedDict, +) from mistralai.types import BaseModel -from mistralai.utils import FieldMetadata, PathParamMetadata -from typing_extensions import Annotated, TypedDict +from mistralai.utils import FieldMetadata, PathParamMetadata, get_discriminator +from pydantic import Discriminator, Tag +from typing import Union +from typing_extensions import Annotated, TypeAliasType, TypedDict class JobsAPIRoutesFineTuningCancelFineTuningJobRequestTypedDict(TypedDict): @@ -16,3 +26,20 @@ class JobsAPIRoutesFineTuningCancelFineTuningJobRequest(BaseModel): str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) ] r"""The ID of the job to cancel.""" + + +JobsAPIRoutesFineTuningCancelFineTuningJobResponseTypedDict = TypeAliasType( + "JobsAPIRoutesFineTuningCancelFineTuningJobResponseTypedDict", + Union[CompletionDetailedJobOutTypedDict, ClassifierDetailedJobOutTypedDict], +) +r"""OK""" + + +JobsAPIRoutesFineTuningCancelFineTuningJobResponse = Annotated[ + Union[ + Annotated[ClassifierDetailedJobOut, Tag("classifier")], + Annotated[CompletionDetailedJobOut, Tag("completion")], + ], + Discriminator(lambda m: get_discriminator(m, "job_type", "job_type")), +] +r"""OK""" diff --git a/src/mistralai/models/jobs_api_routes_fine_tuning_create_fine_tuning_jobop.py b/src/mistralai/models/jobs_api_routes_fine_tuning_create_fine_tuning_jobop.py index d7a5d10d..39af3ea6 100644 --- a/src/mistralai/models/jobs_api_routes_fine_tuning_create_fine_tuning_jobop.py +++ b/src/mistralai/models/jobs_api_routes_fine_tuning_create_fine_tuning_jobop.py @@ -1,21 +1,38 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from __future__ import annotations -from .jobout import JobOut, JobOutTypedDict +from .classifierjobout import ClassifierJobOut, ClassifierJobOutTypedDict +from .completionjobout import CompletionJobOut, CompletionJobOutTypedDict from .legacyjobmetadataout import LegacyJobMetadataOut, LegacyJobMetadataOutTypedDict +from mistralai.utils import get_discriminator +from pydantic import Discriminator, Tag from typing import Union -from typing_extensions import TypeAliasType +from typing_extensions import Annotated, TypeAliasType + + +Response1TypedDict = TypeAliasType( + "Response1TypedDict", Union[ClassifierJobOutTypedDict, CompletionJobOutTypedDict] +) + + +Response1 = Annotated[ + Union[ + Annotated[ClassifierJobOut, Tag("classifier")], + Annotated[CompletionJobOut, Tag("completion")], + ], + Discriminator(lambda m: get_discriminator(m, "job_type", "job_type")), +] JobsAPIRoutesFineTuningCreateFineTuningJobResponseTypedDict = TypeAliasType( "JobsAPIRoutesFineTuningCreateFineTuningJobResponseTypedDict", - Union[LegacyJobMetadataOutTypedDict, JobOutTypedDict], + Union[LegacyJobMetadataOutTypedDict, Response1TypedDict], ) r"""OK""" JobsAPIRoutesFineTuningCreateFineTuningJobResponse = TypeAliasType( "JobsAPIRoutesFineTuningCreateFineTuningJobResponse", - Union[LegacyJobMetadataOut, JobOut], + Union[LegacyJobMetadataOut, Response1], ) r"""OK""" diff --git a/src/mistralai/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobop.py b/src/mistralai/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobop.py index 896d34f5..be99dd2d 100644 --- a/src/mistralai/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobop.py +++ b/src/mistralai/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobop.py @@ -1,9 +1,19 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from __future__ import annotations +from .classifierdetailedjobout import ( + ClassifierDetailedJobOut, + ClassifierDetailedJobOutTypedDict, +) +from .completiondetailedjobout import ( + CompletionDetailedJobOut, + CompletionDetailedJobOutTypedDict, +) from mistralai.types import BaseModel -from mistralai.utils import FieldMetadata, PathParamMetadata -from typing_extensions import Annotated, TypedDict +from mistralai.utils import FieldMetadata, PathParamMetadata, get_discriminator +from pydantic import Discriminator, Tag +from typing import Union +from typing_extensions import Annotated, TypeAliasType, TypedDict class JobsAPIRoutesFineTuningGetFineTuningJobRequestTypedDict(TypedDict): @@ -16,3 +26,20 @@ class JobsAPIRoutesFineTuningGetFineTuningJobRequest(BaseModel): str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) ] r"""The ID of the job to analyse.""" + + +JobsAPIRoutesFineTuningGetFineTuningJobResponseTypedDict = TypeAliasType( + "JobsAPIRoutesFineTuningGetFineTuningJobResponseTypedDict", + Union[CompletionDetailedJobOutTypedDict, ClassifierDetailedJobOutTypedDict], +) +r"""OK""" + + +JobsAPIRoutesFineTuningGetFineTuningJobResponse = Annotated[ + Union[ + Annotated[ClassifierDetailedJobOut, Tag("classifier")], + Annotated[CompletionDetailedJobOut, Tag("completion")], + ], + Discriminator(lambda m: get_discriminator(m, "job_type", "job_type")), +] +r"""OK""" diff --git a/src/mistralai/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobsop.py b/src/mistralai/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobsop.py index b51b1958..710436c9 100644 --- a/src/mistralai/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobsop.py +++ b/src/mistralai/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobsop.py @@ -33,6 +33,7 @@ class JobsAPIRoutesFineTuningGetFineTuningJobsRequestTypedDict(TypedDict): r"""The model name used for fine-tuning to filter on. When set, the other results are not displayed.""" created_after: NotRequired[Nullable[datetime]] r"""The date/time to filter on. When set, the results for previous creation times are not displayed.""" + created_before: NotRequired[Nullable[datetime]] created_by_me: NotRequired[bool] r"""When set, only return results for jobs created by the API caller. Other results are not displayed.""" status: NotRequired[Nullable[QueryParamStatus]] @@ -70,6 +71,11 @@ class JobsAPIRoutesFineTuningGetFineTuningJobsRequest(BaseModel): ] = UNSET r"""The date/time to filter on. When set, the results for previous creation times are not displayed.""" + created_before: Annotated[ + OptionalNullable[datetime], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = UNSET + created_by_me: Annotated[ Optional[bool], FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), @@ -107,6 +113,7 @@ def serialize_model(self, handler): "page_size", "model", "created_after", + "created_before", "created_by_me", "status", "wandb_project", @@ -116,6 +123,7 @@ def serialize_model(self, handler): nullable_fields = [ "model", "created_after", + "created_before", "status", "wandb_project", "wandb_name", diff --git a/src/mistralai/models/jobs_api_routes_fine_tuning_start_fine_tuning_jobop.py b/src/mistralai/models/jobs_api_routes_fine_tuning_start_fine_tuning_jobop.py index 3e7989a7..8103b67b 100644 --- a/src/mistralai/models/jobs_api_routes_fine_tuning_start_fine_tuning_jobop.py +++ b/src/mistralai/models/jobs_api_routes_fine_tuning_start_fine_tuning_jobop.py @@ -1,9 +1,19 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from __future__ import annotations +from .classifierdetailedjobout import ( + ClassifierDetailedJobOut, + ClassifierDetailedJobOutTypedDict, +) +from .completiondetailedjobout import ( + CompletionDetailedJobOut, + CompletionDetailedJobOutTypedDict, +) from mistralai.types import BaseModel -from mistralai.utils import FieldMetadata, PathParamMetadata -from typing_extensions import Annotated, TypedDict +from mistralai.utils import FieldMetadata, PathParamMetadata, get_discriminator +from pydantic import Discriminator, Tag +from typing import Union +from typing_extensions import Annotated, TypeAliasType, TypedDict class JobsAPIRoutesFineTuningStartFineTuningJobRequestTypedDict(TypedDict): @@ -14,3 +24,20 @@ class JobsAPIRoutesFineTuningStartFineTuningJobRequest(BaseModel): job_id: Annotated[ str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) ] + + +JobsAPIRoutesFineTuningStartFineTuningJobResponseTypedDict = TypeAliasType( + "JobsAPIRoutesFineTuningStartFineTuningJobResponseTypedDict", + Union[CompletionDetailedJobOutTypedDict, ClassifierDetailedJobOutTypedDict], +) +r"""OK""" + + +JobsAPIRoutesFineTuningStartFineTuningJobResponse = Annotated[ + Union[ + Annotated[ClassifierDetailedJobOut, Tag("classifier")], + Annotated[CompletionDetailedJobOut, Tag("completion")], + ], + Discriminator(lambda m: get_discriminator(m, "job_type", "job_type")), +] +r"""OK""" diff --git a/src/mistralai/models/jobs_api_routes_fine_tuning_update_fine_tuned_modelop.py b/src/mistralai/models/jobs_api_routes_fine_tuning_update_fine_tuned_modelop.py index 11e23f8c..a10528ca 100644 --- a/src/mistralai/models/jobs_api_routes_fine_tuning_update_fine_tuned_modelop.py +++ b/src/mistralai/models/jobs_api_routes_fine_tuning_update_fine_tuned_modelop.py @@ -1,10 +1,19 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from __future__ import annotations +from .classifierftmodelout import ClassifierFTModelOut, ClassifierFTModelOutTypedDict +from .completionftmodelout import CompletionFTModelOut, CompletionFTModelOutTypedDict from .updateftmodelin import UpdateFTModelIn, UpdateFTModelInTypedDict from mistralai.types import BaseModel -from mistralai.utils import FieldMetadata, PathParamMetadata, RequestMetadata -from typing_extensions import Annotated, TypedDict +from mistralai.utils import ( + FieldMetadata, + PathParamMetadata, + RequestMetadata, + get_discriminator, +) +from pydantic import Discriminator, Tag +from typing import Union +from typing_extensions import Annotated, TypeAliasType, TypedDict class JobsAPIRoutesFineTuningUpdateFineTunedModelRequestTypedDict(TypedDict): @@ -23,3 +32,20 @@ class JobsAPIRoutesFineTuningUpdateFineTunedModelRequest(BaseModel): UpdateFTModelIn, FieldMetadata(request=RequestMetadata(media_type="application/json")), ] + + +JobsAPIRoutesFineTuningUpdateFineTunedModelResponseTypedDict = TypeAliasType( + "JobsAPIRoutesFineTuningUpdateFineTunedModelResponseTypedDict", + Union[CompletionFTModelOutTypedDict, ClassifierFTModelOutTypedDict], +) +r"""OK""" + + +JobsAPIRoutesFineTuningUpdateFineTunedModelResponse = Annotated[ + Union[ + Annotated[ClassifierFTModelOut, Tag("classifier")], + Annotated[CompletionFTModelOut, Tag("completion")], + ], + Discriminator(lambda m: get_discriminator(m, "model_type", "model_type")), +] +r"""OK""" diff --git a/src/mistralai/models/jobsout.py b/src/mistralai/models/jobsout.py index 316bf89f..abdf18fd 100644 --- a/src/mistralai/models/jobsout.py +++ b/src/mistralai/models/jobsout.py @@ -1,13 +1,27 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from __future__ import annotations -from .jobout import JobOut, JobOutTypedDict +from .classifierjobout import ClassifierJobOut, ClassifierJobOutTypedDict +from .completionjobout import CompletionJobOut, CompletionJobOutTypedDict from mistralai.types import BaseModel -from mistralai.utils import validate_const -import pydantic -from pydantic.functional_validators import AfterValidator -from typing import List, Literal, Optional -from typing_extensions import Annotated, NotRequired, TypedDict +from mistralai.utils import get_discriminator +from pydantic import Discriminator, Tag +from typing import List, Literal, Optional, Union +from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict + + +JobsOutDataTypedDict = TypeAliasType( + "JobsOutDataTypedDict", Union[ClassifierJobOutTypedDict, CompletionJobOutTypedDict] +) + + +JobsOutData = Annotated[ + Union[ + Annotated[ClassifierJobOut, Tag("classifier")], + Annotated[CompletionJobOut, Tag("completion")], + ], + Discriminator(lambda m: get_discriminator(m, "job_type", "job_type")), +] JobsOutObject = Literal["list"] @@ -15,16 +29,13 @@ class JobsOutTypedDict(TypedDict): total: int - data: NotRequired[List[JobOutTypedDict]] - object: JobsOutObject + data: NotRequired[List[JobsOutDataTypedDict]] + object: NotRequired[JobsOutObject] class JobsOut(BaseModel): total: int - data: Optional[List[JobOut]] = None + data: Optional[List[JobsOutData]] = None - OBJECT: Annotated[ - Annotated[Optional[JobsOutObject], AfterValidator(validate_const("list"))], - pydantic.Field(alias="object"), - ] = "list" + object: Optional[JobsOutObject] = "list" diff --git a/src/mistralai/models/legacyjobmetadataout.py b/src/mistralai/models/legacyjobmetadataout.py index df6b3d35..1741570e 100644 --- a/src/mistralai/models/legacyjobmetadataout.py +++ b/src/mistralai/models/legacyjobmetadataout.py @@ -2,12 +2,9 @@ from __future__ import annotations from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from mistralai.utils import validate_const -import pydantic from pydantic import model_serializer -from pydantic.functional_validators import AfterValidator from typing import Literal, Optional -from typing_extensions import Annotated, NotRequired, TypedDict +from typing_extensions import NotRequired, TypedDict LegacyJobMetadataOutObject = Literal["job.metadata"] @@ -33,7 +30,7 @@ class LegacyJobMetadataOutTypedDict(TypedDict): r"""The number of complete passes through the entire training dataset.""" training_steps: NotRequired[Nullable[int]] r"""The number of training steps to perform. A training step refers to a single update of the model weights during the fine-tuning process. This update is typically calculated using a batch of samples from the training dataset.""" - object: LegacyJobMetadataOutObject + object: NotRequired[LegacyJobMetadataOutObject] class LegacyJobMetadataOut(BaseModel): @@ -67,13 +64,7 @@ class LegacyJobMetadataOut(BaseModel): training_steps: OptionalNullable[int] = UNSET r"""The number of training steps to perform. A training step refers to a single update of the model weights during the fine-tuning process. This update is typically calculated using a batch of samples from the training dataset.""" - OBJECT: Annotated[ - Annotated[ - Optional[LegacyJobMetadataOutObject], - AfterValidator(validate_const("job.metadata")), - ], - pydantic.Field(alias="object"), - ] = "job.metadata" + object: Optional[LegacyJobMetadataOutObject] = "job.metadata" @model_serializer(mode="wrap") def serialize_model(self, handler): diff --git a/src/mistralai/models/classificationobject.py b/src/mistralai/models/moderationobject.py similarity index 65% rename from src/mistralai/models/classificationobject.py rename to src/mistralai/models/moderationobject.py index e4ee3624..5eff2d2a 100644 --- a/src/mistralai/models/classificationobject.py +++ b/src/mistralai/models/moderationobject.py @@ -6,16 +6,16 @@ from typing_extensions import NotRequired, TypedDict -class ClassificationObjectTypedDict(TypedDict): +class ModerationObjectTypedDict(TypedDict): categories: NotRequired[Dict[str, bool]] - r"""Classifier result thresholded""" + r"""Moderation result thresholds""" category_scores: NotRequired[Dict[str, float]] - r"""Classifier result""" + r"""Moderation result""" -class ClassificationObject(BaseModel): +class ModerationObject(BaseModel): categories: Optional[Dict[str, bool]] = None - r"""Classifier result thresholded""" + r"""Moderation result thresholds""" category_scores: Optional[Dict[str, float]] = None - r"""Classifier result""" + r"""Moderation result""" diff --git a/src/mistralai/models/moderationresponse.py b/src/mistralai/models/moderationresponse.py new file mode 100644 index 00000000..ed13cd6b --- /dev/null +++ b/src/mistralai/models/moderationresponse.py @@ -0,0 +1,21 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .moderationobject import ModerationObject, ModerationObjectTypedDict +from mistralai.types import BaseModel +from typing import List +from typing_extensions import TypedDict + + +class ModerationResponseTypedDict(TypedDict): + id: str + model: str + results: List[ModerationObjectTypedDict] + + +class ModerationResponse(BaseModel): + id: str + + model: str + + results: List[ModerationObject] diff --git a/src/mistralai/models/unarchiveftmodelout.py b/src/mistralai/models/unarchiveftmodelout.py index 6540df1f..6b2f730d 100644 --- a/src/mistralai/models/unarchiveftmodelout.py +++ b/src/mistralai/models/unarchiveftmodelout.py @@ -2,11 +2,8 @@ from __future__ import annotations from mistralai.types import BaseModel -from mistralai.utils import validate_const -import pydantic -from pydantic.functional_validators import AfterValidator from typing import Literal, Optional -from typing_extensions import Annotated, NotRequired, TypedDict +from typing_extensions import NotRequired, TypedDict UnarchiveFTModelOutObject = Literal["model"] @@ -14,18 +11,13 @@ class UnarchiveFTModelOutTypedDict(TypedDict): id: str - object: UnarchiveFTModelOutObject + object: NotRequired[UnarchiveFTModelOutObject] archived: NotRequired[bool] class UnarchiveFTModelOut(BaseModel): id: str - OBJECT: Annotated[ - Annotated[ - Optional[UnarchiveFTModelOutObject], AfterValidator(validate_const("model")) - ], - pydantic.Field(alias="object"), - ] = "model" + object: Optional[UnarchiveFTModelOutObject] = "model" archived: Optional[bool] = False diff --git a/src/mistralai/models/wandbintegration.py b/src/mistralai/models/wandbintegration.py index d82f921a..2bafc035 100644 --- a/src/mistralai/models/wandbintegration.py +++ b/src/mistralai/models/wandbintegration.py @@ -2,12 +2,9 @@ from __future__ import annotations from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from mistralai.utils import validate_const -import pydantic from pydantic import model_serializer -from pydantic.functional_validators import AfterValidator from typing import Literal, Optional -from typing_extensions import Annotated, NotRequired, TypedDict +from typing_extensions import NotRequired, TypedDict WandbIntegrationType = Literal["wandb"] @@ -18,7 +15,7 @@ class WandbIntegrationTypedDict(TypedDict): r"""The name of the project that the new run will be created under.""" api_key: str r"""The WandB API key to use for authentication.""" - type: WandbIntegrationType + type: NotRequired[WandbIntegrationType] name: NotRequired[Nullable[str]] r"""A display name to set for the run. If not set, will use the job ID as the name.""" run_name: NotRequired[Nullable[str]] @@ -31,12 +28,7 @@ class WandbIntegration(BaseModel): api_key: str r"""The WandB API key to use for authentication.""" - TYPE: Annotated[ - Annotated[ - Optional[WandbIntegrationType], AfterValidator(validate_const("wandb")) - ], - pydantic.Field(alias="type"), - ] = "wandb" + type: Optional[WandbIntegrationType] = "wandb" name: OptionalNullable[str] = UNSET r"""A display name to set for the run. If not set, will use the job ID as the name.""" diff --git a/src/mistralai/models/wandbintegrationout.py b/src/mistralai/models/wandbintegrationout.py index 5514b595..bb624bd8 100644 --- a/src/mistralai/models/wandbintegrationout.py +++ b/src/mistralai/models/wandbintegrationout.py @@ -2,12 +2,9 @@ from __future__ import annotations from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from mistralai.utils import validate_const -import pydantic from pydantic import model_serializer -from pydantic.functional_validators import AfterValidator from typing import Literal, Optional -from typing_extensions import Annotated, NotRequired, TypedDict +from typing_extensions import NotRequired, TypedDict WandbIntegrationOutType = Literal["wandb"] @@ -16,32 +13,30 @@ class WandbIntegrationOutTypedDict(TypedDict): project: str r"""The name of the project that the new run will be created under.""" - type: WandbIntegrationOutType + type: NotRequired[WandbIntegrationOutType] name: NotRequired[Nullable[str]] r"""A display name to set for the run. If not set, will use the job ID as the name.""" run_name: NotRequired[Nullable[str]] + url: NotRequired[Nullable[str]] class WandbIntegrationOut(BaseModel): project: str r"""The name of the project that the new run will be created under.""" - TYPE: Annotated[ - Annotated[ - Optional[WandbIntegrationOutType], AfterValidator(validate_const("wandb")) - ], - pydantic.Field(alias="type"), - ] = "wandb" + type: Optional[WandbIntegrationOutType] = "wandb" name: OptionalNullable[str] = UNSET r"""A display name to set for the run. If not set, will use the job ID as the name.""" run_name: OptionalNullable[str] = UNSET + url: OptionalNullable[str] = UNSET + @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = ["type", "name", "run_name"] - nullable_fields = ["name", "run_name"] + optional_fields = ["type", "name", "run_name", "url"] + nullable_fields = ["name", "run_name", "url"] null_default_fields = [] serialized = handler(self) diff --git a/src/mistralai/models_.py b/src/mistralai/models_.py index 0b04694d..96aab468 100644 --- a/src/mistralai/models_.py +++ b/src/mistralai/models_.py @@ -607,7 +607,7 @@ def update( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> models.FTModelOut: + ) -> models.JobsAPIRoutesFineTuningUpdateFineTunedModelResponse: r"""Update Fine Tuned Model Update a model name or description. @@ -680,7 +680,10 @@ def update( ) if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, models.FTModelOut) + return utils.unmarshal_json( + http_res.text, + models.JobsAPIRoutesFineTuningUpdateFineTunedModelResponse, + ) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) raise models.SDKError( @@ -711,7 +714,7 @@ async def update_async( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> models.FTModelOut: + ) -> models.JobsAPIRoutesFineTuningUpdateFineTunedModelResponse: r"""Update Fine Tuned Model Update a model name or description. @@ -784,7 +787,10 @@ async def update_async( ) if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, models.FTModelOut) + return utils.unmarshal_json( + http_res.text, + models.JobsAPIRoutesFineTuningUpdateFineTunedModelResponse, + ) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError( From be02ba7ffa4ce2e5d290424684eba8ce08f40c80 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Thu, 22 May 2025 16:21:55 +0200 Subject: [PATCH 120/223] =?UTF-8?q?chore:=20=F0=9F=90=9D=20Update=20SDK=20?= =?UTF-8?q?-=20Generate=20MISTRALAI=20MISTRALAI-SDK=201.7.1=20(#220)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * ci: regenerated with OpenAPI Doc , Speakeasy CLI 1.517.3 * Update RELEASES.md --------- Co-authored-by: speakeasybot Co-authored-by: Jean-Malo Delignon <56539593+jean-malo@users.noreply.github.com> --- .speakeasy/gen.lock | 8 +++--- .speakeasy/gen.yaml | 2 +- .speakeasy/workflow.lock | 11 +++---- RELEASES.md | 14 ++++++++- docs/models/ocrimageobject.md | 3 +- docs/models/ocrrequest.md | 20 +++++++------ docs/models/ocrresponse.md | 11 +++---- docs/sdks/ocr/README.md | 22 +++++++------- pyproject.toml | 2 +- src/mistralai/_version.py | 6 ++-- src/mistralai/models/ocrimageobject.py | 8 +++++- src/mistralai/models/ocrrequest.py | 15 ++++++++++ src/mistralai/models/ocrresponse.py | 40 ++++++++++++++++++++++++-- src/mistralai/ocr.py | 28 ++++++++++++++++++ 14 files changed, 147 insertions(+), 43 deletions(-) diff --git a/.speakeasy/gen.lock b/.speakeasy/gen.lock index bc731e9a..1a9287fc 100644 --- a/.speakeasy/gen.lock +++ b/.speakeasy/gen.lock @@ -1,12 +1,12 @@ lockVersion: 2.0.0 id: 2d045ec7-2ebb-4f4d-ad25-40953b132161 management: - docChecksum: 63f1a973632e9afab0da3d2498994c1b - docVersion: 0.0.2 + docChecksum: 3589e9f1ea5775264c5c8e0887b4ea0e + docVersion: 1.0.0 speakeasyVersion: 1.517.3 generationVersion: 2.548.6 - releaseVersion: 1.7.0 - configChecksum: d52ab0a71ab9e0798da08262c59bf31d + releaseVersion: 1.7.1 + configChecksum: d35541d61057b11258d7d56bbc5c5260 repoURL: https://github.com/mistralai/client-python.git installationURL: https://github.com/mistralai/client-python.git published: true diff --git a/.speakeasy/gen.yaml b/.speakeasy/gen.yaml index 4bf0297c..35d79fdf 100644 --- a/.speakeasy/gen.yaml +++ b/.speakeasy/gen.yaml @@ -15,7 +15,7 @@ generation: oAuth2ClientCredentialsEnabled: true oAuth2PasswordEnabled: false python: - version: 1.7.0 + version: 1.7.1 additionalDependencies: dev: pytest: ^8.2.2 diff --git a/.speakeasy/workflow.lock b/.speakeasy/workflow.lock index 5c57d996..eaadc1d2 100644 --- a/.speakeasy/workflow.lock +++ b/.speakeasy/workflow.lock @@ -14,10 +14,11 @@ sources: - latest mistral-openapi: sourceNamespace: mistral-openapi - sourceRevisionDigest: sha256:e7953d0c99d1c036d6bfe223052f231a89626f7007a105a96258dad2eedab39e - sourceBlobDigest: sha256:66ed2d18b563f3350de4ab16c9e2ca6ce425c6376feb5fcf1511d5d074908091 + sourceRevisionDigest: sha256:14f5a88b723582e80ead33d129f287568eed05815a9437b7ff5c890ca4c93318 + sourceBlobDigest: sha256:230b4f22197e202aebd70f8628844a27fe70b9b27569dbc3338d3e7d5442cb88 tags: - latest + - speakeasy-sdk-regen-1747922489 targets: mistralai-azure-sdk: source: mistral-azure-source @@ -36,10 +37,10 @@ targets: mistralai-sdk: source: mistral-openapi sourceNamespace: mistral-openapi - sourceRevisionDigest: sha256:e7953d0c99d1c036d6bfe223052f231a89626f7007a105a96258dad2eedab39e - sourceBlobDigest: sha256:66ed2d18b563f3350de4ab16c9e2ca6ce425c6376feb5fcf1511d5d074908091 + sourceRevisionDigest: sha256:14f5a88b723582e80ead33d129f287568eed05815a9437b7ff5c890ca4c93318 + sourceBlobDigest: sha256:230b4f22197e202aebd70f8628844a27fe70b9b27569dbc3338d3e7d5442cb88 codeSamplesNamespace: mistral-openapi-code-samples - codeSamplesRevisionDigest: sha256:7c657301f482932fca0a3e914d3c25820ebb7e535e1887daea3cd9240eca0444 + codeSamplesRevisionDigest: sha256:d75cede62f03f1040732d65da392f66fe75c725dab04cdfba5498ad334652c1e workflow: workflowVersion: 1.0.0 speakeasyVersion: 1.517.3 diff --git a/RELEASES.md b/RELEASES.md index 629e92d9..791fb70b 100644 --- a/RELEASES.md +++ b/RELEASES.md @@ -198,4 +198,16 @@ Based on: ### Generated - [python v1.7.0] . ### Releases -- [PyPI v1.7.0] https://pypi.org/project/mistralai/1.7.0 - . \ No newline at end of file +- [PyPI v1.7.0] https://pypi.org/project/mistralai/1.7.0 - . + +## 2025-05-22 14:01:13 +### Changes +- Added support for `document_annotation_format` and `bbox_annotation_format` in `client.ocr.process` + +Based on: +- OpenAPI Doc +- Speakeasy CLI 1.517.3 (2.548.6) https://github.com/speakeasy-api/speakeasy +### Generated +- [python v1.7.1] . +### Releases +- [PyPI v1.7.1] https://pypi.org/project/mistralai/1.7.1 - . diff --git a/docs/models/ocrimageobject.md b/docs/models/ocrimageobject.md index 273cfa9a..3c0d5544 100644 --- a/docs/models/ocrimageobject.md +++ b/docs/models/ocrimageobject.md @@ -10,4 +10,5 @@ | `top_left_y` | *Nullable[int]* | :heavy_check_mark: | Y coordinate of top-left corner of the extracted image | | `bottom_right_x` | *Nullable[int]* | :heavy_check_mark: | X coordinate of bottom-right corner of the extracted image | | `bottom_right_y` | *Nullable[int]* | :heavy_check_mark: | Y coordinate of bottom-right corner of the extracted image | -| `image_base64` | *OptionalNullable[str]* | :heavy_minus_sign: | Base64 string of the extracted image | \ No newline at end of file +| `image_base64` | *OptionalNullable[str]* | :heavy_minus_sign: | Base64 string of the extracted image | +| `image_annotation` | *OptionalNullable[str]* | :heavy_minus_sign: | Annotation of the extracted image in json str | \ No newline at end of file diff --git a/docs/models/ocrrequest.md b/docs/models/ocrrequest.md index dbc4dc80..0c8954a5 100644 --- a/docs/models/ocrrequest.md +++ b/docs/models/ocrrequest.md @@ -3,12 +3,14 @@ ## Fields -| Field | Type | Required | Description | -| ------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------- | -| `model` | *Nullable[str]* | :heavy_check_mark: | N/A | -| `document` | [models.Document](../models/document.md) | :heavy_check_mark: | Document to run OCR on | -| `id` | *Optional[str]* | :heavy_minus_sign: | N/A | -| `pages` | List[*int*] | :heavy_minus_sign: | Specific pages user wants to process in various formats: single number, range, or list of both. Starts from 0 | -| `include_image_base64` | *OptionalNullable[bool]* | :heavy_minus_sign: | Include image URLs in response | -| `image_limit` | *OptionalNullable[int]* | :heavy_minus_sign: | Max images to extract | -| `image_min_size` | *OptionalNullable[int]* | :heavy_minus_sign: | Minimum height and width of image to extract | \ No newline at end of file +| Field | Type | Required | Description | +| ---------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `model` | *Nullable[str]* | :heavy_check_mark: | N/A | +| `document` | [models.Document](../models/document.md) | :heavy_check_mark: | Document to run OCR on | +| `id` | *Optional[str]* | :heavy_minus_sign: | N/A | +| `pages` | List[*int*] | :heavy_minus_sign: | Specific pages user wants to process in various formats: single number, range, or list of both. Starts from 0 | +| `include_image_base64` | *OptionalNullable[bool]* | :heavy_minus_sign: | Include image URLs in response | +| `image_limit` | *OptionalNullable[int]* | :heavy_minus_sign: | Max images to extract | +| `image_min_size` | *OptionalNullable[int]* | :heavy_minus_sign: | Minimum height and width of image to extract | +| `bbox_annotation_format` | [OptionalNullable[models.ResponseFormat]](../models/responseformat.md) | :heavy_minus_sign: | Structured output class for extracting useful information from each extracted bounding box / image from document. Only json_schema is valid for this field | +| `document_annotation_format` | [OptionalNullable[models.ResponseFormat]](../models/responseformat.md) | :heavy_minus_sign: | Structured output class for extracting useful information from the entire document. Only json_schema is valid for this field | \ No newline at end of file diff --git a/docs/models/ocrresponse.md b/docs/models/ocrresponse.md index 690d992d..7d6a58ae 100644 --- a/docs/models/ocrresponse.md +++ b/docs/models/ocrresponse.md @@ -3,8 +3,9 @@ ## Fields -| Field | Type | Required | Description | -| -------------------------------------------------------- | -------------------------------------------------------- | -------------------------------------------------------- | -------------------------------------------------------- | -| `pages` | List[[models.OCRPageObject](../models/ocrpageobject.md)] | :heavy_check_mark: | List of OCR info for pages. | -| `model` | *str* | :heavy_check_mark: | The model used to generate the OCR. | -| `usage_info` | [models.OCRUsageInfo](../models/ocrusageinfo.md) | :heavy_check_mark: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| ---------------------------------------------------------------- | ---------------------------------------------------------------- | ---------------------------------------------------------------- | ---------------------------------------------------------------- | +| `pages` | List[[models.OCRPageObject](../models/ocrpageobject.md)] | :heavy_check_mark: | List of OCR info for pages. | +| `model` | *str* | :heavy_check_mark: | The model used to generate the OCR. | +| `usage_info` | [models.OCRUsageInfo](../models/ocrusageinfo.md) | :heavy_check_mark: | N/A | +| `document_annotation` | *OptionalNullable[str]* | :heavy_minus_sign: | Formatted response in the request_format if provided in json str | \ No newline at end of file diff --git a/docs/sdks/ocr/README.md b/docs/sdks/ocr/README.md index 61988ea6..60d987b4 100644 --- a/docs/sdks/ocr/README.md +++ b/docs/sdks/ocr/README.md @@ -36,16 +36,18 @@ with Mistral( ### Parameters -| Parameter | Type | Required | Description | -| ------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------- | -| `model` | *Nullable[str]* | :heavy_check_mark: | N/A | -| `document` | [models.Document](../../models/document.md) | :heavy_check_mark: | Document to run OCR on | -| `id` | *Optional[str]* | :heavy_minus_sign: | N/A | -| `pages` | List[*int*] | :heavy_minus_sign: | Specific pages user wants to process in various formats: single number, range, or list of both. Starts from 0 | -| `include_image_base64` | *OptionalNullable[bool]* | :heavy_minus_sign: | Include image URLs in response | -| `image_limit` | *OptionalNullable[int]* | :heavy_minus_sign: | Max images to extract | -| `image_min_size` | *OptionalNullable[int]* | :heavy_minus_sign: | Minimum height and width of image to extract | -| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | +| Parameter | Type | Required | Description | +| ---------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `model` | *Nullable[str]* | :heavy_check_mark: | N/A | +| `document` | [models.Document](../../models/document.md) | :heavy_check_mark: | Document to run OCR on | +| `id` | *Optional[str]* | :heavy_minus_sign: | N/A | +| `pages` | List[*int*] | :heavy_minus_sign: | Specific pages user wants to process in various formats: single number, range, or list of both. Starts from 0 | +| `include_image_base64` | *OptionalNullable[bool]* | :heavy_minus_sign: | Include image URLs in response | +| `image_limit` | *OptionalNullable[int]* | :heavy_minus_sign: | Max images to extract | +| `image_min_size` | *OptionalNullable[int]* | :heavy_minus_sign: | Minimum height and width of image to extract | +| `bbox_annotation_format` | [OptionalNullable[models.ResponseFormat]](../../models/responseformat.md) | :heavy_minus_sign: | Structured output class for extracting useful information from each extracted bounding box / image from document. Only json_schema is valid for this field | +| `document_annotation_format` | [OptionalNullable[models.ResponseFormat]](../../models/responseformat.md) | :heavy_minus_sign: | Structured output class for extracting useful information from the entire document. Only json_schema is valid for this field | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | ### Response diff --git a/pyproject.toml b/pyproject.toml index 2da8b5ea..1d7bd53d 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "mistralai" -version = "1.7.0" +version = "1.7.1" description = "Python Client SDK for the Mistral AI API." authors = [{ name = "Mistral" },] readme = "README-PYPI.md" diff --git a/src/mistralai/_version.py b/src/mistralai/_version.py index 7b151c78..ade7e11c 100644 --- a/src/mistralai/_version.py +++ b/src/mistralai/_version.py @@ -3,10 +3,10 @@ import importlib.metadata __title__: str = "mistralai" -__version__: str = "1.7.0" -__openapi_doc_version__: str = "0.0.2" +__version__: str = "1.7.1" +__openapi_doc_version__: str = "1.0.0" __gen_version__: str = "2.548.6" -__user_agent__: str = "speakeasy-sdk/python 1.7.0 2.548.6 0.0.2 mistralai" +__user_agent__: str = "speakeasy-sdk/python 1.7.1 2.548.6 1.0.0 mistralai" try: if __package__ is not None: diff --git a/src/mistralai/models/ocrimageobject.py b/src/mistralai/models/ocrimageobject.py index 16b41e6c..78e37c1f 100644 --- a/src/mistralai/models/ocrimageobject.py +++ b/src/mistralai/models/ocrimageobject.py @@ -19,6 +19,8 @@ class OCRImageObjectTypedDict(TypedDict): r"""Y coordinate of bottom-right corner of the extracted image""" image_base64: NotRequired[Nullable[str]] r"""Base64 string of the extracted image""" + image_annotation: NotRequired[Nullable[str]] + r"""Annotation of the extracted image in json str""" class OCRImageObject(BaseModel): @@ -40,15 +42,19 @@ class OCRImageObject(BaseModel): image_base64: OptionalNullable[str] = UNSET r"""Base64 string of the extracted image""" + image_annotation: OptionalNullable[str] = UNSET + r"""Annotation of the extracted image in json str""" + @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = ["image_base64"] + optional_fields = ["image_base64", "image_annotation"] nullable_fields = [ "top_left_x", "top_left_y", "bottom_right_x", "bottom_right_y", "image_base64", + "image_annotation", ] null_default_fields = [] diff --git a/src/mistralai/models/ocrrequest.py b/src/mistralai/models/ocrrequest.py index 54339e9e..4f9dfd47 100644 --- a/src/mistralai/models/ocrrequest.py +++ b/src/mistralai/models/ocrrequest.py @@ -3,6 +3,7 @@ from __future__ import annotations from .documenturlchunk import DocumentURLChunk, DocumentURLChunkTypedDict from .imageurlchunk import ImageURLChunk, ImageURLChunkTypedDict +from .responseformat import ResponseFormat, ResponseFormatTypedDict from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL from pydantic import model_serializer from typing import List, Optional, Union @@ -32,6 +33,10 @@ class OCRRequestTypedDict(TypedDict): r"""Max images to extract""" image_min_size: NotRequired[Nullable[int]] r"""Minimum height and width of image to extract""" + bbox_annotation_format: NotRequired[Nullable[ResponseFormatTypedDict]] + r"""Structured output class for extracting useful information from each extracted bounding box / image from document. Only json_schema is valid for this field""" + document_annotation_format: NotRequired[Nullable[ResponseFormatTypedDict]] + r"""Structured output class for extracting useful information from the entire document. Only json_schema is valid for this field""" class OCRRequest(BaseModel): @@ -54,6 +59,12 @@ class OCRRequest(BaseModel): image_min_size: OptionalNullable[int] = UNSET r"""Minimum height and width of image to extract""" + bbox_annotation_format: OptionalNullable[ResponseFormat] = UNSET + r"""Structured output class for extracting useful information from each extracted bounding box / image from document. Only json_schema is valid for this field""" + + document_annotation_format: OptionalNullable[ResponseFormat] = UNSET + r"""Structured output class for extracting useful information from the entire document. Only json_schema is valid for this field""" + @model_serializer(mode="wrap") def serialize_model(self, handler): optional_fields = [ @@ -62,6 +73,8 @@ def serialize_model(self, handler): "include_image_base64", "image_limit", "image_min_size", + "bbox_annotation_format", + "document_annotation_format", ] nullable_fields = [ "model", @@ -69,6 +82,8 @@ def serialize_model(self, handler): "include_image_base64", "image_limit", "image_min_size", + "bbox_annotation_format", + "document_annotation_format", ] null_default_fields = [] diff --git a/src/mistralai/models/ocrresponse.py b/src/mistralai/models/ocrresponse.py index 45fb06e3..df3b7d18 100644 --- a/src/mistralai/models/ocrresponse.py +++ b/src/mistralai/models/ocrresponse.py @@ -3,9 +3,10 @@ from __future__ import annotations from .ocrpageobject import OCRPageObject, OCRPageObjectTypedDict from .ocrusageinfo import OCRUsageInfo, OCRUsageInfoTypedDict -from mistralai.types import BaseModel +from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +from pydantic import model_serializer from typing import List -from typing_extensions import TypedDict +from typing_extensions import NotRequired, TypedDict class OCRResponseTypedDict(TypedDict): @@ -14,6 +15,8 @@ class OCRResponseTypedDict(TypedDict): model: str r"""The model used to generate the OCR.""" usage_info: OCRUsageInfoTypedDict + document_annotation: NotRequired[Nullable[str]] + r"""Formatted response in the request_format if provided in json str""" class OCRResponse(BaseModel): @@ -24,3 +27,36 @@ class OCRResponse(BaseModel): r"""The model used to generate the OCR.""" usage_info: OCRUsageInfo + + document_annotation: OptionalNullable[str] = UNSET + r"""Formatted response in the request_format if provided in json str""" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["document_annotation"] + nullable_fields = ["document_annotation"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in self.model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/ocr.py b/src/mistralai/ocr.py index 5d0e2414..cdc56ae8 100644 --- a/src/mistralai/ocr.py +++ b/src/mistralai/ocr.py @@ -21,6 +21,12 @@ def process( include_image_base64: OptionalNullable[bool] = UNSET, image_limit: OptionalNullable[int] = UNSET, image_min_size: OptionalNullable[int] = UNSET, + bbox_annotation_format: OptionalNullable[ + Union[models.ResponseFormat, models.ResponseFormatTypedDict] + ] = UNSET, + document_annotation_format: OptionalNullable[ + Union[models.ResponseFormat, models.ResponseFormatTypedDict] + ] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -35,6 +41,8 @@ def process( :param include_image_base64: Include image URLs in response :param image_limit: Max images to extract :param image_min_size: Minimum height and width of image to extract + :param bbox_annotation_format: Structured output class for extracting useful information from each extracted bounding box / image from document. Only json_schema is valid for this field + :param document_annotation_format: Structured output class for extracting useful information from the entire document. Only json_schema is valid for this field :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -58,6 +66,12 @@ def process( include_image_base64=include_image_base64, image_limit=image_limit, image_min_size=image_min_size, + bbox_annotation_format=utils.get_pydantic_model( + bbox_annotation_format, OptionalNullable[models.ResponseFormat] + ), + document_annotation_format=utils.get_pydantic_model( + document_annotation_format, OptionalNullable[models.ResponseFormat] + ), ) req = self._build_request( @@ -139,6 +153,12 @@ async def process_async( include_image_base64: OptionalNullable[bool] = UNSET, image_limit: OptionalNullable[int] = UNSET, image_min_size: OptionalNullable[int] = UNSET, + bbox_annotation_format: OptionalNullable[ + Union[models.ResponseFormat, models.ResponseFormatTypedDict] + ] = UNSET, + document_annotation_format: OptionalNullable[ + Union[models.ResponseFormat, models.ResponseFormatTypedDict] + ] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -153,6 +173,8 @@ async def process_async( :param include_image_base64: Include image URLs in response :param image_limit: Max images to extract :param image_min_size: Minimum height and width of image to extract + :param bbox_annotation_format: Structured output class for extracting useful information from each extracted bounding box / image from document. Only json_schema is valid for this field + :param document_annotation_format: Structured output class for extracting useful information from the entire document. Only json_schema is valid for this field :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -176,6 +198,12 @@ async def process_async( include_image_base64=include_image_base64, image_limit=image_limit, image_min_size=image_min_size, + bbox_annotation_format=utils.get_pydantic_model( + bbox_annotation_format, OptionalNullable[models.ResponseFormat] + ), + document_annotation_format=utils.get_pydantic_model( + document_annotation_format, OptionalNullable[models.ResponseFormat] + ), ) req = self._build_request_async( From 192f0a00ae07f3a2eee5b79c5d6d6f6676656d51 Mon Sep 17 00:00:00 2001 From: Alexandre Menasria <47357713+amenasria@users.noreply.github.com> Date: Thu, 22 May 2025 16:48:32 +0200 Subject: [PATCH 121/223] Fix `rec_strict_json_schema` for None default fields models (#219) --- src/mistralai/extra/utils/_pydantic_helper.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/mistralai/extra/utils/_pydantic_helper.py b/src/mistralai/extra/utils/_pydantic_helper.py index 08523f41..f042c394 100644 --- a/src/mistralai/extra/utils/_pydantic_helper.py +++ b/src/mistralai/extra/utils/_pydantic_helper.py @@ -1,11 +1,12 @@ from typing import Any + def rec_strict_json_schema(schema_node: Any) -> Any: """ Recursively set the additionalProperties property to False for all objects in the JSON Schema. This makes the JSON Schema strict (i.e. no additional properties are allowed). """ - if isinstance(schema_node, (str, bool)): + if isinstance(schema_node, (str, bool)) or schema_node is None: return schema_node if isinstance(schema_node, dict): if "type" in schema_node and schema_node["type"] == "object": From 18d0ba0c82245fefb4b1d0a57ad8ae60aa7f3a94 Mon Sep 17 00:00:00 2001 From: Jean-Malo Delignon <56539593+jean-malo@users.noreply.github.com> Date: Thu, 22 May 2025 16:57:08 +0200 Subject: [PATCH 122/223] =?UTF-8?q?Revert=20"chore:=20=F0=9F=90=9D=20Updat?= =?UTF-8?q?e=20SDK=20-=20Generate=20MISTRALAI=20MISTRALAI-SDK=201.7.1=20(#?= =?UTF-8?q?220)"=20(#222)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This reverts commit be02ba7ffa4ce2e5d290424684eba8ce08f40c80. --- .speakeasy/gen.lock | 8 +++--- .speakeasy/gen.yaml | 2 +- .speakeasy/workflow.lock | 11 ++++--- RELEASES.md | 14 +-------- docs/models/ocrimageobject.md | 3 +- docs/models/ocrrequest.md | 20 ++++++------- docs/models/ocrresponse.md | 11 ++++--- docs/sdks/ocr/README.md | 22 +++++++------- pyproject.toml | 2 +- src/mistralai/_version.py | 6 ++-- src/mistralai/models/ocrimageobject.py | 8 +----- src/mistralai/models/ocrrequest.py | 15 ---------- src/mistralai/models/ocrresponse.py | 40 ++------------------------ src/mistralai/ocr.py | 28 ------------------ 14 files changed, 43 insertions(+), 147 deletions(-) diff --git a/.speakeasy/gen.lock b/.speakeasy/gen.lock index 1a9287fc..bc731e9a 100644 --- a/.speakeasy/gen.lock +++ b/.speakeasy/gen.lock @@ -1,12 +1,12 @@ lockVersion: 2.0.0 id: 2d045ec7-2ebb-4f4d-ad25-40953b132161 management: - docChecksum: 3589e9f1ea5775264c5c8e0887b4ea0e - docVersion: 1.0.0 + docChecksum: 63f1a973632e9afab0da3d2498994c1b + docVersion: 0.0.2 speakeasyVersion: 1.517.3 generationVersion: 2.548.6 - releaseVersion: 1.7.1 - configChecksum: d35541d61057b11258d7d56bbc5c5260 + releaseVersion: 1.7.0 + configChecksum: d52ab0a71ab9e0798da08262c59bf31d repoURL: https://github.com/mistralai/client-python.git installationURL: https://github.com/mistralai/client-python.git published: true diff --git a/.speakeasy/gen.yaml b/.speakeasy/gen.yaml index 35d79fdf..4bf0297c 100644 --- a/.speakeasy/gen.yaml +++ b/.speakeasy/gen.yaml @@ -15,7 +15,7 @@ generation: oAuth2ClientCredentialsEnabled: true oAuth2PasswordEnabled: false python: - version: 1.7.1 + version: 1.7.0 additionalDependencies: dev: pytest: ^8.2.2 diff --git a/.speakeasy/workflow.lock b/.speakeasy/workflow.lock index eaadc1d2..5c57d996 100644 --- a/.speakeasy/workflow.lock +++ b/.speakeasy/workflow.lock @@ -14,11 +14,10 @@ sources: - latest mistral-openapi: sourceNamespace: mistral-openapi - sourceRevisionDigest: sha256:14f5a88b723582e80ead33d129f287568eed05815a9437b7ff5c890ca4c93318 - sourceBlobDigest: sha256:230b4f22197e202aebd70f8628844a27fe70b9b27569dbc3338d3e7d5442cb88 + sourceRevisionDigest: sha256:e7953d0c99d1c036d6bfe223052f231a89626f7007a105a96258dad2eedab39e + sourceBlobDigest: sha256:66ed2d18b563f3350de4ab16c9e2ca6ce425c6376feb5fcf1511d5d074908091 tags: - latest - - speakeasy-sdk-regen-1747922489 targets: mistralai-azure-sdk: source: mistral-azure-source @@ -37,10 +36,10 @@ targets: mistralai-sdk: source: mistral-openapi sourceNamespace: mistral-openapi - sourceRevisionDigest: sha256:14f5a88b723582e80ead33d129f287568eed05815a9437b7ff5c890ca4c93318 - sourceBlobDigest: sha256:230b4f22197e202aebd70f8628844a27fe70b9b27569dbc3338d3e7d5442cb88 + sourceRevisionDigest: sha256:e7953d0c99d1c036d6bfe223052f231a89626f7007a105a96258dad2eedab39e + sourceBlobDigest: sha256:66ed2d18b563f3350de4ab16c9e2ca6ce425c6376feb5fcf1511d5d074908091 codeSamplesNamespace: mistral-openapi-code-samples - codeSamplesRevisionDigest: sha256:d75cede62f03f1040732d65da392f66fe75c725dab04cdfba5498ad334652c1e + codeSamplesRevisionDigest: sha256:7c657301f482932fca0a3e914d3c25820ebb7e535e1887daea3cd9240eca0444 workflow: workflowVersion: 1.0.0 speakeasyVersion: 1.517.3 diff --git a/RELEASES.md b/RELEASES.md index 791fb70b..629e92d9 100644 --- a/RELEASES.md +++ b/RELEASES.md @@ -198,16 +198,4 @@ Based on: ### Generated - [python v1.7.0] . ### Releases -- [PyPI v1.7.0] https://pypi.org/project/mistralai/1.7.0 - . - -## 2025-05-22 14:01:13 -### Changes -- Added support for `document_annotation_format` and `bbox_annotation_format` in `client.ocr.process` - -Based on: -- OpenAPI Doc -- Speakeasy CLI 1.517.3 (2.548.6) https://github.com/speakeasy-api/speakeasy -### Generated -- [python v1.7.1] . -### Releases -- [PyPI v1.7.1] https://pypi.org/project/mistralai/1.7.1 - . +- [PyPI v1.7.0] https://pypi.org/project/mistralai/1.7.0 - . \ No newline at end of file diff --git a/docs/models/ocrimageobject.md b/docs/models/ocrimageobject.md index 3c0d5544..273cfa9a 100644 --- a/docs/models/ocrimageobject.md +++ b/docs/models/ocrimageobject.md @@ -10,5 +10,4 @@ | `top_left_y` | *Nullable[int]* | :heavy_check_mark: | Y coordinate of top-left corner of the extracted image | | `bottom_right_x` | *Nullable[int]* | :heavy_check_mark: | X coordinate of bottom-right corner of the extracted image | | `bottom_right_y` | *Nullable[int]* | :heavy_check_mark: | Y coordinate of bottom-right corner of the extracted image | -| `image_base64` | *OptionalNullable[str]* | :heavy_minus_sign: | Base64 string of the extracted image | -| `image_annotation` | *OptionalNullable[str]* | :heavy_minus_sign: | Annotation of the extracted image in json str | \ No newline at end of file +| `image_base64` | *OptionalNullable[str]* | :heavy_minus_sign: | Base64 string of the extracted image | \ No newline at end of file diff --git a/docs/models/ocrrequest.md b/docs/models/ocrrequest.md index 0c8954a5..dbc4dc80 100644 --- a/docs/models/ocrrequest.md +++ b/docs/models/ocrrequest.md @@ -3,14 +3,12 @@ ## Fields -| Field | Type | Required | Description | -| ---------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `model` | *Nullable[str]* | :heavy_check_mark: | N/A | -| `document` | [models.Document](../models/document.md) | :heavy_check_mark: | Document to run OCR on | -| `id` | *Optional[str]* | :heavy_minus_sign: | N/A | -| `pages` | List[*int*] | :heavy_minus_sign: | Specific pages user wants to process in various formats: single number, range, or list of both. Starts from 0 | -| `include_image_base64` | *OptionalNullable[bool]* | :heavy_minus_sign: | Include image URLs in response | -| `image_limit` | *OptionalNullable[int]* | :heavy_minus_sign: | Max images to extract | -| `image_min_size` | *OptionalNullable[int]* | :heavy_minus_sign: | Minimum height and width of image to extract | -| `bbox_annotation_format` | [OptionalNullable[models.ResponseFormat]](../models/responseformat.md) | :heavy_minus_sign: | Structured output class for extracting useful information from each extracted bounding box / image from document. Only json_schema is valid for this field | -| `document_annotation_format` | [OptionalNullable[models.ResponseFormat]](../models/responseformat.md) | :heavy_minus_sign: | Structured output class for extracting useful information from the entire document. Only json_schema is valid for this field | \ No newline at end of file +| Field | Type | Required | Description | +| ------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------- | +| `model` | *Nullable[str]* | :heavy_check_mark: | N/A | +| `document` | [models.Document](../models/document.md) | :heavy_check_mark: | Document to run OCR on | +| `id` | *Optional[str]* | :heavy_minus_sign: | N/A | +| `pages` | List[*int*] | :heavy_minus_sign: | Specific pages user wants to process in various formats: single number, range, or list of both. Starts from 0 | +| `include_image_base64` | *OptionalNullable[bool]* | :heavy_minus_sign: | Include image URLs in response | +| `image_limit` | *OptionalNullable[int]* | :heavy_minus_sign: | Max images to extract | +| `image_min_size` | *OptionalNullable[int]* | :heavy_minus_sign: | Minimum height and width of image to extract | \ No newline at end of file diff --git a/docs/models/ocrresponse.md b/docs/models/ocrresponse.md index 7d6a58ae..690d992d 100644 --- a/docs/models/ocrresponse.md +++ b/docs/models/ocrresponse.md @@ -3,9 +3,8 @@ ## Fields -| Field | Type | Required | Description | -| ---------------------------------------------------------------- | ---------------------------------------------------------------- | ---------------------------------------------------------------- | ---------------------------------------------------------------- | -| `pages` | List[[models.OCRPageObject](../models/ocrpageobject.md)] | :heavy_check_mark: | List of OCR info for pages. | -| `model` | *str* | :heavy_check_mark: | The model used to generate the OCR. | -| `usage_info` | [models.OCRUsageInfo](../models/ocrusageinfo.md) | :heavy_check_mark: | N/A | -| `document_annotation` | *OptionalNullable[str]* | :heavy_minus_sign: | Formatted response in the request_format if provided in json str | \ No newline at end of file +| Field | Type | Required | Description | +| -------------------------------------------------------- | -------------------------------------------------------- | -------------------------------------------------------- | -------------------------------------------------------- | +| `pages` | List[[models.OCRPageObject](../models/ocrpageobject.md)] | :heavy_check_mark: | List of OCR info for pages. | +| `model` | *str* | :heavy_check_mark: | The model used to generate the OCR. | +| `usage_info` | [models.OCRUsageInfo](../models/ocrusageinfo.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/sdks/ocr/README.md b/docs/sdks/ocr/README.md index 60d987b4..61988ea6 100644 --- a/docs/sdks/ocr/README.md +++ b/docs/sdks/ocr/README.md @@ -36,18 +36,16 @@ with Mistral( ### Parameters -| Parameter | Type | Required | Description | -| ---------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `model` | *Nullable[str]* | :heavy_check_mark: | N/A | -| `document` | [models.Document](../../models/document.md) | :heavy_check_mark: | Document to run OCR on | -| `id` | *Optional[str]* | :heavy_minus_sign: | N/A | -| `pages` | List[*int*] | :heavy_minus_sign: | Specific pages user wants to process in various formats: single number, range, or list of both. Starts from 0 | -| `include_image_base64` | *OptionalNullable[bool]* | :heavy_minus_sign: | Include image URLs in response | -| `image_limit` | *OptionalNullable[int]* | :heavy_minus_sign: | Max images to extract | -| `image_min_size` | *OptionalNullable[int]* | :heavy_minus_sign: | Minimum height and width of image to extract | -| `bbox_annotation_format` | [OptionalNullable[models.ResponseFormat]](../../models/responseformat.md) | :heavy_minus_sign: | Structured output class for extracting useful information from each extracted bounding box / image from document. Only json_schema is valid for this field | -| `document_annotation_format` | [OptionalNullable[models.ResponseFormat]](../../models/responseformat.md) | :heavy_minus_sign: | Structured output class for extracting useful information from the entire document. Only json_schema is valid for this field | -| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | +| Parameter | Type | Required | Description | +| ------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------- | +| `model` | *Nullable[str]* | :heavy_check_mark: | N/A | +| `document` | [models.Document](../../models/document.md) | :heavy_check_mark: | Document to run OCR on | +| `id` | *Optional[str]* | :heavy_minus_sign: | N/A | +| `pages` | List[*int*] | :heavy_minus_sign: | Specific pages user wants to process in various formats: single number, range, or list of both. Starts from 0 | +| `include_image_base64` | *OptionalNullable[bool]* | :heavy_minus_sign: | Include image URLs in response | +| `image_limit` | *OptionalNullable[int]* | :heavy_minus_sign: | Max images to extract | +| `image_min_size` | *OptionalNullable[int]* | :heavy_minus_sign: | Minimum height and width of image to extract | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | ### Response diff --git a/pyproject.toml b/pyproject.toml index 1d7bd53d..2da8b5ea 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "mistralai" -version = "1.7.1" +version = "1.7.0" description = "Python Client SDK for the Mistral AI API." authors = [{ name = "Mistral" },] readme = "README-PYPI.md" diff --git a/src/mistralai/_version.py b/src/mistralai/_version.py index ade7e11c..7b151c78 100644 --- a/src/mistralai/_version.py +++ b/src/mistralai/_version.py @@ -3,10 +3,10 @@ import importlib.metadata __title__: str = "mistralai" -__version__: str = "1.7.1" -__openapi_doc_version__: str = "1.0.0" +__version__: str = "1.7.0" +__openapi_doc_version__: str = "0.0.2" __gen_version__: str = "2.548.6" -__user_agent__: str = "speakeasy-sdk/python 1.7.1 2.548.6 1.0.0 mistralai" +__user_agent__: str = "speakeasy-sdk/python 1.7.0 2.548.6 0.0.2 mistralai" try: if __package__ is not None: diff --git a/src/mistralai/models/ocrimageobject.py b/src/mistralai/models/ocrimageobject.py index 78e37c1f..16b41e6c 100644 --- a/src/mistralai/models/ocrimageobject.py +++ b/src/mistralai/models/ocrimageobject.py @@ -19,8 +19,6 @@ class OCRImageObjectTypedDict(TypedDict): r"""Y coordinate of bottom-right corner of the extracted image""" image_base64: NotRequired[Nullable[str]] r"""Base64 string of the extracted image""" - image_annotation: NotRequired[Nullable[str]] - r"""Annotation of the extracted image in json str""" class OCRImageObject(BaseModel): @@ -42,19 +40,15 @@ class OCRImageObject(BaseModel): image_base64: OptionalNullable[str] = UNSET r"""Base64 string of the extracted image""" - image_annotation: OptionalNullable[str] = UNSET - r"""Annotation of the extracted image in json str""" - @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = ["image_base64", "image_annotation"] + optional_fields = ["image_base64"] nullable_fields = [ "top_left_x", "top_left_y", "bottom_right_x", "bottom_right_y", "image_base64", - "image_annotation", ] null_default_fields = [] diff --git a/src/mistralai/models/ocrrequest.py b/src/mistralai/models/ocrrequest.py index 4f9dfd47..54339e9e 100644 --- a/src/mistralai/models/ocrrequest.py +++ b/src/mistralai/models/ocrrequest.py @@ -3,7 +3,6 @@ from __future__ import annotations from .documenturlchunk import DocumentURLChunk, DocumentURLChunkTypedDict from .imageurlchunk import ImageURLChunk, ImageURLChunkTypedDict -from .responseformat import ResponseFormat, ResponseFormatTypedDict from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL from pydantic import model_serializer from typing import List, Optional, Union @@ -33,10 +32,6 @@ class OCRRequestTypedDict(TypedDict): r"""Max images to extract""" image_min_size: NotRequired[Nullable[int]] r"""Minimum height and width of image to extract""" - bbox_annotation_format: NotRequired[Nullable[ResponseFormatTypedDict]] - r"""Structured output class for extracting useful information from each extracted bounding box / image from document. Only json_schema is valid for this field""" - document_annotation_format: NotRequired[Nullable[ResponseFormatTypedDict]] - r"""Structured output class for extracting useful information from the entire document. Only json_schema is valid for this field""" class OCRRequest(BaseModel): @@ -59,12 +54,6 @@ class OCRRequest(BaseModel): image_min_size: OptionalNullable[int] = UNSET r"""Minimum height and width of image to extract""" - bbox_annotation_format: OptionalNullable[ResponseFormat] = UNSET - r"""Structured output class for extracting useful information from each extracted bounding box / image from document. Only json_schema is valid for this field""" - - document_annotation_format: OptionalNullable[ResponseFormat] = UNSET - r"""Structured output class for extracting useful information from the entire document. Only json_schema is valid for this field""" - @model_serializer(mode="wrap") def serialize_model(self, handler): optional_fields = [ @@ -73,8 +62,6 @@ def serialize_model(self, handler): "include_image_base64", "image_limit", "image_min_size", - "bbox_annotation_format", - "document_annotation_format", ] nullable_fields = [ "model", @@ -82,8 +69,6 @@ def serialize_model(self, handler): "include_image_base64", "image_limit", "image_min_size", - "bbox_annotation_format", - "document_annotation_format", ] null_default_fields = [] diff --git a/src/mistralai/models/ocrresponse.py b/src/mistralai/models/ocrresponse.py index df3b7d18..45fb06e3 100644 --- a/src/mistralai/models/ocrresponse.py +++ b/src/mistralai/models/ocrresponse.py @@ -3,10 +3,9 @@ from __future__ import annotations from .ocrpageobject import OCRPageObject, OCRPageObjectTypedDict from .ocrusageinfo import OCRUsageInfo, OCRUsageInfoTypedDict -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from pydantic import model_serializer +from mistralai.types import BaseModel from typing import List -from typing_extensions import NotRequired, TypedDict +from typing_extensions import TypedDict class OCRResponseTypedDict(TypedDict): @@ -15,8 +14,6 @@ class OCRResponseTypedDict(TypedDict): model: str r"""The model used to generate the OCR.""" usage_info: OCRUsageInfoTypedDict - document_annotation: NotRequired[Nullable[str]] - r"""Formatted response in the request_format if provided in json str""" class OCRResponse(BaseModel): @@ -27,36 +24,3 @@ class OCRResponse(BaseModel): r"""The model used to generate the OCR.""" usage_info: OCRUsageInfo - - document_annotation: OptionalNullable[str] = UNSET - r"""Formatted response in the request_format if provided in json str""" - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = ["document_annotation"] - nullable_fields = ["document_annotation"] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in self.model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/ocr.py b/src/mistralai/ocr.py index cdc56ae8..5d0e2414 100644 --- a/src/mistralai/ocr.py +++ b/src/mistralai/ocr.py @@ -21,12 +21,6 @@ def process( include_image_base64: OptionalNullable[bool] = UNSET, image_limit: OptionalNullable[int] = UNSET, image_min_size: OptionalNullable[int] = UNSET, - bbox_annotation_format: OptionalNullable[ - Union[models.ResponseFormat, models.ResponseFormatTypedDict] - ] = UNSET, - document_annotation_format: OptionalNullable[ - Union[models.ResponseFormat, models.ResponseFormatTypedDict] - ] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -41,8 +35,6 @@ def process( :param include_image_base64: Include image URLs in response :param image_limit: Max images to extract :param image_min_size: Minimum height and width of image to extract - :param bbox_annotation_format: Structured output class for extracting useful information from each extracted bounding box / image from document. Only json_schema is valid for this field - :param document_annotation_format: Structured output class for extracting useful information from the entire document. Only json_schema is valid for this field :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -66,12 +58,6 @@ def process( include_image_base64=include_image_base64, image_limit=image_limit, image_min_size=image_min_size, - bbox_annotation_format=utils.get_pydantic_model( - bbox_annotation_format, OptionalNullable[models.ResponseFormat] - ), - document_annotation_format=utils.get_pydantic_model( - document_annotation_format, OptionalNullable[models.ResponseFormat] - ), ) req = self._build_request( @@ -153,12 +139,6 @@ async def process_async( include_image_base64: OptionalNullable[bool] = UNSET, image_limit: OptionalNullable[int] = UNSET, image_min_size: OptionalNullable[int] = UNSET, - bbox_annotation_format: OptionalNullable[ - Union[models.ResponseFormat, models.ResponseFormatTypedDict] - ] = UNSET, - document_annotation_format: OptionalNullable[ - Union[models.ResponseFormat, models.ResponseFormatTypedDict] - ] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -173,8 +153,6 @@ async def process_async( :param include_image_base64: Include image URLs in response :param image_limit: Max images to extract :param image_min_size: Minimum height and width of image to extract - :param bbox_annotation_format: Structured output class for extracting useful information from each extracted bounding box / image from document. Only json_schema is valid for this field - :param document_annotation_format: Structured output class for extracting useful information from the entire document. Only json_schema is valid for this field :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -198,12 +176,6 @@ async def process_async( include_image_base64=include_image_base64, image_limit=image_limit, image_min_size=image_min_size, - bbox_annotation_format=utils.get_pydantic_model( - bbox_annotation_format, OptionalNullable[models.ResponseFormat] - ), - document_annotation_format=utils.get_pydantic_model( - document_annotation_format, OptionalNullable[models.ResponseFormat] - ), ) req = self._build_request_async( From 7ec1d2b0122c23f5eb01ef17de32ad8a5d657d5e Mon Sep 17 00:00:00 2001 From: Alexandre Menasria <47357713+amenasria@users.noreply.github.com> Date: Thu, 22 May 2025 16:59:06 +0200 Subject: [PATCH 123/223] Speakeasy created PR should trigger the test workflow (#223) --- .github/workflows/sdk_generation_mistralai_sdk.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/sdk_generation_mistralai_sdk.yaml b/.github/workflows/sdk_generation_mistralai_sdk.yaml index 7d0540e7..1d80cddc 100644 --- a/.github/workflows/sdk_generation_mistralai_sdk.yaml +++ b/.github/workflows/sdk_generation_mistralai_sdk.yaml @@ -24,6 +24,6 @@ jobs: speakeasy_version: latest target: mistralai-sdk secrets: - github_access_token: ${{ secrets.GITHUB_TOKEN }} + github_access_token: ${{ secrets.SPEAKEASY_WORKFLOW_GITHUB_PAT }} pypi_token: ${{ secrets.PYPI_TOKEN }} speakeasy_api_key: ${{ secrets.SPEAKEASY_API_KEY }} From 3990f94b03c94c8698e0decaa5c45d235beb5b98 Mon Sep 17 00:00:00 2001 From: Alexandre Menasria <47357713+amenasria@users.noreply.github.com> Date: Thu, 22 May 2025 17:06:29 +0200 Subject: [PATCH 124/223] ci: regenerated with OpenAPI Doc , Speakeasy CLI 1.517.3 (#224) Co-authored-by: speakeasybot --- .speakeasy/gen.lock | 8 +++--- .speakeasy/gen.yaml | 2 +- .speakeasy/workflow.lock | 11 +++---- RELEASES.md | 12 +++++++- docs/models/ocrimageobject.md | 3 +- docs/models/ocrrequest.md | 20 +++++++------ docs/models/ocrresponse.md | 11 +++---- docs/sdks/ocr/README.md | 22 +++++++------- pyproject.toml | 2 +- src/mistralai/_version.py | 6 ++-- src/mistralai/models/ocrimageobject.py | 8 +++++- src/mistralai/models/ocrrequest.py | 15 ++++++++++ src/mistralai/models/ocrresponse.py | 40 ++++++++++++++++++++++++-- src/mistralai/ocr.py | 28 ++++++++++++++++++ 14 files changed, 145 insertions(+), 43 deletions(-) diff --git a/.speakeasy/gen.lock b/.speakeasy/gen.lock index bc731e9a..1a9287fc 100644 --- a/.speakeasy/gen.lock +++ b/.speakeasy/gen.lock @@ -1,12 +1,12 @@ lockVersion: 2.0.0 id: 2d045ec7-2ebb-4f4d-ad25-40953b132161 management: - docChecksum: 63f1a973632e9afab0da3d2498994c1b - docVersion: 0.0.2 + docChecksum: 3589e9f1ea5775264c5c8e0887b4ea0e + docVersion: 1.0.0 speakeasyVersion: 1.517.3 generationVersion: 2.548.6 - releaseVersion: 1.7.0 - configChecksum: d52ab0a71ab9e0798da08262c59bf31d + releaseVersion: 1.7.1 + configChecksum: d35541d61057b11258d7d56bbc5c5260 repoURL: https://github.com/mistralai/client-python.git installationURL: https://github.com/mistralai/client-python.git published: true diff --git a/.speakeasy/gen.yaml b/.speakeasy/gen.yaml index 4bf0297c..35d79fdf 100644 --- a/.speakeasy/gen.yaml +++ b/.speakeasy/gen.yaml @@ -15,7 +15,7 @@ generation: oAuth2ClientCredentialsEnabled: true oAuth2PasswordEnabled: false python: - version: 1.7.0 + version: 1.7.1 additionalDependencies: dev: pytest: ^8.2.2 diff --git a/.speakeasy/workflow.lock b/.speakeasy/workflow.lock index 5c57d996..1d3e05a8 100644 --- a/.speakeasy/workflow.lock +++ b/.speakeasy/workflow.lock @@ -14,10 +14,11 @@ sources: - latest mistral-openapi: sourceNamespace: mistral-openapi - sourceRevisionDigest: sha256:e7953d0c99d1c036d6bfe223052f231a89626f7007a105a96258dad2eedab39e - sourceBlobDigest: sha256:66ed2d18b563f3350de4ab16c9e2ca6ce425c6376feb5fcf1511d5d074908091 + sourceRevisionDigest: sha256:14f5a88b723582e80ead33d129f287568eed05815a9437b7ff5c890ca4c93318 + sourceBlobDigest: sha256:230b4f22197e202aebd70f8628844a27fe70b9b27569dbc3338d3e7d5442cb88 tags: - latest + - speakeasy-sdk-regen-1747926206 targets: mistralai-azure-sdk: source: mistral-azure-source @@ -36,10 +37,10 @@ targets: mistralai-sdk: source: mistral-openapi sourceNamespace: mistral-openapi - sourceRevisionDigest: sha256:e7953d0c99d1c036d6bfe223052f231a89626f7007a105a96258dad2eedab39e - sourceBlobDigest: sha256:66ed2d18b563f3350de4ab16c9e2ca6ce425c6376feb5fcf1511d5d074908091 + sourceRevisionDigest: sha256:14f5a88b723582e80ead33d129f287568eed05815a9437b7ff5c890ca4c93318 + sourceBlobDigest: sha256:230b4f22197e202aebd70f8628844a27fe70b9b27569dbc3338d3e7d5442cb88 codeSamplesNamespace: mistral-openapi-code-samples - codeSamplesRevisionDigest: sha256:7c657301f482932fca0a3e914d3c25820ebb7e535e1887daea3cd9240eca0444 + codeSamplesRevisionDigest: sha256:a130e7408a5dd6edaaba35712518861618aefc523eb03f15d7b53b9bfd085c5b workflow: workflowVersion: 1.0.0 speakeasyVersion: 1.517.3 diff --git a/RELEASES.md b/RELEASES.md index 629e92d9..744e3312 100644 --- a/RELEASES.md +++ b/RELEASES.md @@ -198,4 +198,14 @@ Based on: ### Generated - [python v1.7.0] . ### Releases -- [PyPI v1.7.0] https://pypi.org/project/mistralai/1.7.0 - . \ No newline at end of file +- [PyPI v1.7.0] https://pypi.org/project/mistralai/1.7.0 - . + +## 2025-05-22 15:03:08 +### Changes +Based on: +- OpenAPI Doc +- Speakeasy CLI 1.517.3 (2.548.6) https://github.com/speakeasy-api/speakeasy +### Generated +- [python v1.7.1] . +### Releases +- [PyPI v1.7.1] https://pypi.org/project/mistralai/1.7.1 - . \ No newline at end of file diff --git a/docs/models/ocrimageobject.md b/docs/models/ocrimageobject.md index 273cfa9a..3c0d5544 100644 --- a/docs/models/ocrimageobject.md +++ b/docs/models/ocrimageobject.md @@ -10,4 +10,5 @@ | `top_left_y` | *Nullable[int]* | :heavy_check_mark: | Y coordinate of top-left corner of the extracted image | | `bottom_right_x` | *Nullable[int]* | :heavy_check_mark: | X coordinate of bottom-right corner of the extracted image | | `bottom_right_y` | *Nullable[int]* | :heavy_check_mark: | Y coordinate of bottom-right corner of the extracted image | -| `image_base64` | *OptionalNullable[str]* | :heavy_minus_sign: | Base64 string of the extracted image | \ No newline at end of file +| `image_base64` | *OptionalNullable[str]* | :heavy_minus_sign: | Base64 string of the extracted image | +| `image_annotation` | *OptionalNullable[str]* | :heavy_minus_sign: | Annotation of the extracted image in json str | \ No newline at end of file diff --git a/docs/models/ocrrequest.md b/docs/models/ocrrequest.md index dbc4dc80..0c8954a5 100644 --- a/docs/models/ocrrequest.md +++ b/docs/models/ocrrequest.md @@ -3,12 +3,14 @@ ## Fields -| Field | Type | Required | Description | -| ------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------- | -| `model` | *Nullable[str]* | :heavy_check_mark: | N/A | -| `document` | [models.Document](../models/document.md) | :heavy_check_mark: | Document to run OCR on | -| `id` | *Optional[str]* | :heavy_minus_sign: | N/A | -| `pages` | List[*int*] | :heavy_minus_sign: | Specific pages user wants to process in various formats: single number, range, or list of both. Starts from 0 | -| `include_image_base64` | *OptionalNullable[bool]* | :heavy_minus_sign: | Include image URLs in response | -| `image_limit` | *OptionalNullable[int]* | :heavy_minus_sign: | Max images to extract | -| `image_min_size` | *OptionalNullable[int]* | :heavy_minus_sign: | Minimum height and width of image to extract | \ No newline at end of file +| Field | Type | Required | Description | +| ---------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `model` | *Nullable[str]* | :heavy_check_mark: | N/A | +| `document` | [models.Document](../models/document.md) | :heavy_check_mark: | Document to run OCR on | +| `id` | *Optional[str]* | :heavy_minus_sign: | N/A | +| `pages` | List[*int*] | :heavy_minus_sign: | Specific pages user wants to process in various formats: single number, range, or list of both. Starts from 0 | +| `include_image_base64` | *OptionalNullable[bool]* | :heavy_minus_sign: | Include image URLs in response | +| `image_limit` | *OptionalNullable[int]* | :heavy_minus_sign: | Max images to extract | +| `image_min_size` | *OptionalNullable[int]* | :heavy_minus_sign: | Minimum height and width of image to extract | +| `bbox_annotation_format` | [OptionalNullable[models.ResponseFormat]](../models/responseformat.md) | :heavy_minus_sign: | Structured output class for extracting useful information from each extracted bounding box / image from document. Only json_schema is valid for this field | +| `document_annotation_format` | [OptionalNullable[models.ResponseFormat]](../models/responseformat.md) | :heavy_minus_sign: | Structured output class for extracting useful information from the entire document. Only json_schema is valid for this field | \ No newline at end of file diff --git a/docs/models/ocrresponse.md b/docs/models/ocrresponse.md index 690d992d..7d6a58ae 100644 --- a/docs/models/ocrresponse.md +++ b/docs/models/ocrresponse.md @@ -3,8 +3,9 @@ ## Fields -| Field | Type | Required | Description | -| -------------------------------------------------------- | -------------------------------------------------------- | -------------------------------------------------------- | -------------------------------------------------------- | -| `pages` | List[[models.OCRPageObject](../models/ocrpageobject.md)] | :heavy_check_mark: | List of OCR info for pages. | -| `model` | *str* | :heavy_check_mark: | The model used to generate the OCR. | -| `usage_info` | [models.OCRUsageInfo](../models/ocrusageinfo.md) | :heavy_check_mark: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| ---------------------------------------------------------------- | ---------------------------------------------------------------- | ---------------------------------------------------------------- | ---------------------------------------------------------------- | +| `pages` | List[[models.OCRPageObject](../models/ocrpageobject.md)] | :heavy_check_mark: | List of OCR info for pages. | +| `model` | *str* | :heavy_check_mark: | The model used to generate the OCR. | +| `usage_info` | [models.OCRUsageInfo](../models/ocrusageinfo.md) | :heavy_check_mark: | N/A | +| `document_annotation` | *OptionalNullable[str]* | :heavy_minus_sign: | Formatted response in the request_format if provided in json str | \ No newline at end of file diff --git a/docs/sdks/ocr/README.md b/docs/sdks/ocr/README.md index 61988ea6..60d987b4 100644 --- a/docs/sdks/ocr/README.md +++ b/docs/sdks/ocr/README.md @@ -36,16 +36,18 @@ with Mistral( ### Parameters -| Parameter | Type | Required | Description | -| ------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------- | -| `model` | *Nullable[str]* | :heavy_check_mark: | N/A | -| `document` | [models.Document](../../models/document.md) | :heavy_check_mark: | Document to run OCR on | -| `id` | *Optional[str]* | :heavy_minus_sign: | N/A | -| `pages` | List[*int*] | :heavy_minus_sign: | Specific pages user wants to process in various formats: single number, range, or list of both. Starts from 0 | -| `include_image_base64` | *OptionalNullable[bool]* | :heavy_minus_sign: | Include image URLs in response | -| `image_limit` | *OptionalNullable[int]* | :heavy_minus_sign: | Max images to extract | -| `image_min_size` | *OptionalNullable[int]* | :heavy_minus_sign: | Minimum height and width of image to extract | -| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | +| Parameter | Type | Required | Description | +| ---------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `model` | *Nullable[str]* | :heavy_check_mark: | N/A | +| `document` | [models.Document](../../models/document.md) | :heavy_check_mark: | Document to run OCR on | +| `id` | *Optional[str]* | :heavy_minus_sign: | N/A | +| `pages` | List[*int*] | :heavy_minus_sign: | Specific pages user wants to process in various formats: single number, range, or list of both. Starts from 0 | +| `include_image_base64` | *OptionalNullable[bool]* | :heavy_minus_sign: | Include image URLs in response | +| `image_limit` | *OptionalNullable[int]* | :heavy_minus_sign: | Max images to extract | +| `image_min_size` | *OptionalNullable[int]* | :heavy_minus_sign: | Minimum height and width of image to extract | +| `bbox_annotation_format` | [OptionalNullable[models.ResponseFormat]](../../models/responseformat.md) | :heavy_minus_sign: | Structured output class for extracting useful information from each extracted bounding box / image from document. Only json_schema is valid for this field | +| `document_annotation_format` | [OptionalNullable[models.ResponseFormat]](../../models/responseformat.md) | :heavy_minus_sign: | Structured output class for extracting useful information from the entire document. Only json_schema is valid for this field | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | ### Response diff --git a/pyproject.toml b/pyproject.toml index 2da8b5ea..1d7bd53d 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "mistralai" -version = "1.7.0" +version = "1.7.1" description = "Python Client SDK for the Mistral AI API." authors = [{ name = "Mistral" },] readme = "README-PYPI.md" diff --git a/src/mistralai/_version.py b/src/mistralai/_version.py index 7b151c78..ade7e11c 100644 --- a/src/mistralai/_version.py +++ b/src/mistralai/_version.py @@ -3,10 +3,10 @@ import importlib.metadata __title__: str = "mistralai" -__version__: str = "1.7.0" -__openapi_doc_version__: str = "0.0.2" +__version__: str = "1.7.1" +__openapi_doc_version__: str = "1.0.0" __gen_version__: str = "2.548.6" -__user_agent__: str = "speakeasy-sdk/python 1.7.0 2.548.6 0.0.2 mistralai" +__user_agent__: str = "speakeasy-sdk/python 1.7.1 2.548.6 1.0.0 mistralai" try: if __package__ is not None: diff --git a/src/mistralai/models/ocrimageobject.py b/src/mistralai/models/ocrimageobject.py index 16b41e6c..78e37c1f 100644 --- a/src/mistralai/models/ocrimageobject.py +++ b/src/mistralai/models/ocrimageobject.py @@ -19,6 +19,8 @@ class OCRImageObjectTypedDict(TypedDict): r"""Y coordinate of bottom-right corner of the extracted image""" image_base64: NotRequired[Nullable[str]] r"""Base64 string of the extracted image""" + image_annotation: NotRequired[Nullable[str]] + r"""Annotation of the extracted image in json str""" class OCRImageObject(BaseModel): @@ -40,15 +42,19 @@ class OCRImageObject(BaseModel): image_base64: OptionalNullable[str] = UNSET r"""Base64 string of the extracted image""" + image_annotation: OptionalNullable[str] = UNSET + r"""Annotation of the extracted image in json str""" + @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = ["image_base64"] + optional_fields = ["image_base64", "image_annotation"] nullable_fields = [ "top_left_x", "top_left_y", "bottom_right_x", "bottom_right_y", "image_base64", + "image_annotation", ] null_default_fields = [] diff --git a/src/mistralai/models/ocrrequest.py b/src/mistralai/models/ocrrequest.py index 54339e9e..4f9dfd47 100644 --- a/src/mistralai/models/ocrrequest.py +++ b/src/mistralai/models/ocrrequest.py @@ -3,6 +3,7 @@ from __future__ import annotations from .documenturlchunk import DocumentURLChunk, DocumentURLChunkTypedDict from .imageurlchunk import ImageURLChunk, ImageURLChunkTypedDict +from .responseformat import ResponseFormat, ResponseFormatTypedDict from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL from pydantic import model_serializer from typing import List, Optional, Union @@ -32,6 +33,10 @@ class OCRRequestTypedDict(TypedDict): r"""Max images to extract""" image_min_size: NotRequired[Nullable[int]] r"""Minimum height and width of image to extract""" + bbox_annotation_format: NotRequired[Nullable[ResponseFormatTypedDict]] + r"""Structured output class for extracting useful information from each extracted bounding box / image from document. Only json_schema is valid for this field""" + document_annotation_format: NotRequired[Nullable[ResponseFormatTypedDict]] + r"""Structured output class for extracting useful information from the entire document. Only json_schema is valid for this field""" class OCRRequest(BaseModel): @@ -54,6 +59,12 @@ class OCRRequest(BaseModel): image_min_size: OptionalNullable[int] = UNSET r"""Minimum height and width of image to extract""" + bbox_annotation_format: OptionalNullable[ResponseFormat] = UNSET + r"""Structured output class for extracting useful information from each extracted bounding box / image from document. Only json_schema is valid for this field""" + + document_annotation_format: OptionalNullable[ResponseFormat] = UNSET + r"""Structured output class for extracting useful information from the entire document. Only json_schema is valid for this field""" + @model_serializer(mode="wrap") def serialize_model(self, handler): optional_fields = [ @@ -62,6 +73,8 @@ def serialize_model(self, handler): "include_image_base64", "image_limit", "image_min_size", + "bbox_annotation_format", + "document_annotation_format", ] nullable_fields = [ "model", @@ -69,6 +82,8 @@ def serialize_model(self, handler): "include_image_base64", "image_limit", "image_min_size", + "bbox_annotation_format", + "document_annotation_format", ] null_default_fields = [] diff --git a/src/mistralai/models/ocrresponse.py b/src/mistralai/models/ocrresponse.py index 45fb06e3..df3b7d18 100644 --- a/src/mistralai/models/ocrresponse.py +++ b/src/mistralai/models/ocrresponse.py @@ -3,9 +3,10 @@ from __future__ import annotations from .ocrpageobject import OCRPageObject, OCRPageObjectTypedDict from .ocrusageinfo import OCRUsageInfo, OCRUsageInfoTypedDict -from mistralai.types import BaseModel +from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +from pydantic import model_serializer from typing import List -from typing_extensions import TypedDict +from typing_extensions import NotRequired, TypedDict class OCRResponseTypedDict(TypedDict): @@ -14,6 +15,8 @@ class OCRResponseTypedDict(TypedDict): model: str r"""The model used to generate the OCR.""" usage_info: OCRUsageInfoTypedDict + document_annotation: NotRequired[Nullable[str]] + r"""Formatted response in the request_format if provided in json str""" class OCRResponse(BaseModel): @@ -24,3 +27,36 @@ class OCRResponse(BaseModel): r"""The model used to generate the OCR.""" usage_info: OCRUsageInfo + + document_annotation: OptionalNullable[str] = UNSET + r"""Formatted response in the request_format if provided in json str""" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["document_annotation"] + nullable_fields = ["document_annotation"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in self.model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/ocr.py b/src/mistralai/ocr.py index 5d0e2414..cdc56ae8 100644 --- a/src/mistralai/ocr.py +++ b/src/mistralai/ocr.py @@ -21,6 +21,12 @@ def process( include_image_base64: OptionalNullable[bool] = UNSET, image_limit: OptionalNullable[int] = UNSET, image_min_size: OptionalNullable[int] = UNSET, + bbox_annotation_format: OptionalNullable[ + Union[models.ResponseFormat, models.ResponseFormatTypedDict] + ] = UNSET, + document_annotation_format: OptionalNullable[ + Union[models.ResponseFormat, models.ResponseFormatTypedDict] + ] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -35,6 +41,8 @@ def process( :param include_image_base64: Include image URLs in response :param image_limit: Max images to extract :param image_min_size: Minimum height and width of image to extract + :param bbox_annotation_format: Structured output class for extracting useful information from each extracted bounding box / image from document. Only json_schema is valid for this field + :param document_annotation_format: Structured output class for extracting useful information from the entire document. Only json_schema is valid for this field :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -58,6 +66,12 @@ def process( include_image_base64=include_image_base64, image_limit=image_limit, image_min_size=image_min_size, + bbox_annotation_format=utils.get_pydantic_model( + bbox_annotation_format, OptionalNullable[models.ResponseFormat] + ), + document_annotation_format=utils.get_pydantic_model( + document_annotation_format, OptionalNullable[models.ResponseFormat] + ), ) req = self._build_request( @@ -139,6 +153,12 @@ async def process_async( include_image_base64: OptionalNullable[bool] = UNSET, image_limit: OptionalNullable[int] = UNSET, image_min_size: OptionalNullable[int] = UNSET, + bbox_annotation_format: OptionalNullable[ + Union[models.ResponseFormat, models.ResponseFormatTypedDict] + ] = UNSET, + document_annotation_format: OptionalNullable[ + Union[models.ResponseFormat, models.ResponseFormatTypedDict] + ] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -153,6 +173,8 @@ async def process_async( :param include_image_base64: Include image URLs in response :param image_limit: Max images to extract :param image_min_size: Minimum height and width of image to extract + :param bbox_annotation_format: Structured output class for extracting useful information from each extracted bounding box / image from document. Only json_schema is valid for this field + :param document_annotation_format: Structured output class for extracting useful information from the entire document. Only json_schema is valid for this field :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -176,6 +198,12 @@ async def process_async( include_image_base64=include_image_base64, image_limit=image_limit, image_min_size=image_min_size, + bbox_annotation_format=utils.get_pydantic_model( + bbox_annotation_format, OptionalNullable[models.ResponseFormat] + ), + document_annotation_format=utils.get_pydantic_model( + document_annotation_format, OptionalNullable[models.ResponseFormat] + ), ) req = self._build_request_async( From f999a0c710eef8de7512cdbee22ce689cf57a990 Mon Sep 17 00:00:00 2001 From: speakeasybot Date: Mon, 26 May 2025 11:06:17 +0000 Subject: [PATCH 125/223] ci: regenerated with OpenAPI Doc , Speakeasy CLI 1.517.3 --- .speakeasy/gen.lock | 4 ++-- .speakeasy/gen.yaml | 2 +- .speakeasy/workflow.lock | 4 ++-- RELEASES.md | 12 +++++++++++- pyproject.toml | 2 +- src/mistralai/_version.py | 4 ++-- 6 files changed, 19 insertions(+), 9 deletions(-) diff --git a/.speakeasy/gen.lock b/.speakeasy/gen.lock index 1a9287fc..dc68a842 100644 --- a/.speakeasy/gen.lock +++ b/.speakeasy/gen.lock @@ -5,8 +5,8 @@ management: docVersion: 1.0.0 speakeasyVersion: 1.517.3 generationVersion: 2.548.6 - releaseVersion: 1.7.1 - configChecksum: d35541d61057b11258d7d56bbc5c5260 + releaseVersion: 1.8.0 + configChecksum: 1f7adfac0b677cdca4c073a11cbcef02 repoURL: https://github.com/mistralai/client-python.git installationURL: https://github.com/mistralai/client-python.git published: true diff --git a/.speakeasy/gen.yaml b/.speakeasy/gen.yaml index 35d79fdf..820ed567 100644 --- a/.speakeasy/gen.yaml +++ b/.speakeasy/gen.yaml @@ -15,7 +15,7 @@ generation: oAuth2ClientCredentialsEnabled: true oAuth2PasswordEnabled: false python: - version: 1.7.1 + version: 1.8.0 additionalDependencies: dev: pytest: ^8.2.2 diff --git a/.speakeasy/workflow.lock b/.speakeasy/workflow.lock index 1d3e05a8..4c5198f5 100644 --- a/.speakeasy/workflow.lock +++ b/.speakeasy/workflow.lock @@ -18,7 +18,7 @@ sources: sourceBlobDigest: sha256:230b4f22197e202aebd70f8628844a27fe70b9b27569dbc3338d3e7d5442cb88 tags: - latest - - speakeasy-sdk-regen-1747926206 + - speakeasy-sdk-regen-1748257524 targets: mistralai-azure-sdk: source: mistral-azure-source @@ -40,7 +40,7 @@ targets: sourceRevisionDigest: sha256:14f5a88b723582e80ead33d129f287568eed05815a9437b7ff5c890ca4c93318 sourceBlobDigest: sha256:230b4f22197e202aebd70f8628844a27fe70b9b27569dbc3338d3e7d5442cb88 codeSamplesNamespace: mistral-openapi-code-samples - codeSamplesRevisionDigest: sha256:a130e7408a5dd6edaaba35712518861618aefc523eb03f15d7b53b9bfd085c5b + codeSamplesRevisionDigest: sha256:ac98d7bc86d476552e342173fdcb5735ee9c6fc0fa41820fd73eef137fe07bac workflow: workflowVersion: 1.0.0 speakeasyVersion: 1.517.3 diff --git a/RELEASES.md b/RELEASES.md index 744e3312..fc9229a9 100644 --- a/RELEASES.md +++ b/RELEASES.md @@ -208,4 +208,14 @@ Based on: ### Generated - [python v1.7.1] . ### Releases -- [PyPI v1.7.1] https://pypi.org/project/mistralai/1.7.1 - . \ No newline at end of file +- [PyPI v1.7.1] https://pypi.org/project/mistralai/1.7.1 - . + +## 2025-05-26 11:05:08 +### Changes +Based on: +- OpenAPI Doc +- Speakeasy CLI 1.517.3 (2.548.6) https://github.com/speakeasy-api/speakeasy +### Generated +- [python v1.8.0] . +### Releases +- [PyPI v1.8.0] https://pypi.org/project/mistralai/1.8.0 - . \ No newline at end of file diff --git a/pyproject.toml b/pyproject.toml index 1d7bd53d..4bd94805 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "mistralai" -version = "1.7.1" +version = "1.8.0" description = "Python Client SDK for the Mistral AI API." authors = [{ name = "Mistral" },] readme = "README-PYPI.md" diff --git a/src/mistralai/_version.py b/src/mistralai/_version.py index ade7e11c..de6b8db8 100644 --- a/src/mistralai/_version.py +++ b/src/mistralai/_version.py @@ -3,10 +3,10 @@ import importlib.metadata __title__: str = "mistralai" -__version__: str = "1.7.1" +__version__: str = "1.8.0" __openapi_doc_version__: str = "1.0.0" __gen_version__: str = "2.548.6" -__user_agent__: str = "speakeasy-sdk/python 1.7.1 2.548.6 1.0.0 mistralai" +__user_agent__: str = "speakeasy-sdk/python 1.8.0 2.548.6 1.0.0 mistralai" try: if __package__ is not None: From 53079a43197a20e76faad71db2174ca02785586d Mon Sep 17 00:00:00 2001 From: alex-ac Date: Mon, 26 May 2025 13:27:21 +0200 Subject: [PATCH 126/223] feat: run sdk generation --- .speakeasy/gen.lock | 345 ++- .speakeasy/workflow.lock | 11 +- README.md | 31 +- docs/models/agent.md | 19 + docs/models/agentconversation.md | 14 + docs/models/agentconversationobject.md | 8 + docs/models/agentcreationrequest.md | 14 + docs/models/agentcreationrequesttools.md | 41 + docs/models/agenthandoffdoneevent.md | 13 + docs/models/agenthandoffdoneeventtype.md | 8 + docs/models/agenthandoffentry.md | 16 + docs/models/agenthandoffentryobject.md | 8 + docs/models/agenthandoffentrytype.md | 8 + docs/models/agenthandoffstartedevent.md | 13 + docs/models/agenthandoffstartedeventtype.md | 8 + docs/models/agentobject.md | 8 + docs/models/agentsapiv1agentsgetrequest.md | 8 + docs/models/agentsapiv1agentslistrequest.md | 9 + docs/models/agentsapiv1agentsupdaterequest.md | 9 + .../agentsapiv1agentsupdateversionrequest.md | 9 + .../agentsapiv1conversationsappendrequest.md | 9 + ...tsapiv1conversationsappendstreamrequest.md | 9 + .../agentsapiv1conversationsgetrequest.md | 8 + ...versationsgetresponsev1conversationsget.md | 19 + .../agentsapiv1conversationshistoryrequest.md | 8 + .../agentsapiv1conversationslistrequest.md | 9 + ...agentsapiv1conversationsmessagesrequest.md | 8 + .../agentsapiv1conversationsrestartrequest.md | 9 + ...sapiv1conversationsrestartstreamrequest.md | 9 + docs/models/agenttools.md | 41 + docs/models/agentupdaterequest.md | 14 + docs/models/agentupdaterequesttools.md | 41 + docs/models/builtinconnectors.md | 12 + docs/models/codeinterpretertool.md | 8 + docs/models/codeinterpretertooltype.md | 8 + docs/models/completionargs.md | 19 + docs/models/completionargsstop.md | 17 + docs/models/completionjobout.md | 2 +- docs/models/completionjoboutobject.md | 10 + docs/models/conversationappendrequest.md | 12 + ...nversationappendrequesthandoffexecution.md | 9 + .../models/conversationappendstreamrequest.md | 12 + ...tionappendstreamrequesthandoffexecution.md | 9 + docs/models/conversationevents.md | 9 + docs/models/conversationeventsdata.md | 59 + docs/models/conversationhistory.md | 12 + docs/models/conversationhistoryobject.md | 8 + docs/models/conversationinputs.md | 17 + docs/models/conversationmessages.md | 12 + docs/models/conversationmessagesobject.md | 8 + docs/models/conversationrequest.md | 18 + docs/models/conversationresponse.md | 13 + docs/models/conversationresponseobject.md | 8 + docs/models/conversationrestartrequest.md | 15 + ...versationrestartrequesthandoffexecution.md | 9 + .../conversationrestartstreamrequest.md | 15 + ...ionrestartstreamrequesthandoffexecution.md | 9 + docs/models/conversationstreamrequest.md | 18 + ...nversationstreamrequesthandoffexecution.md | 9 + docs/models/conversationstreamrequesttools.md | 41 + docs/models/conversationusageinfo.md | 12 + docs/models/documentlibrarytool.md | 9 + docs/models/documentlibrarytooltype.md | 8 + docs/models/entries.md | 41 + docs/models/functioncallentry.md | 15 + docs/models/functioncallentryarguments.md | 17 + docs/models/functioncallentryobject.md | 8 + docs/models/functioncallentrytype.md | 8 + docs/models/functioncallevent.md | 14 + docs/models/functioncalleventtype.md | 8 + docs/models/functionresultentry.md | 14 + docs/models/functionresultentryobject.md | 8 + docs/models/functionresultentrytype.md | 8 + docs/models/functiontool.md | 9 + docs/models/functiontooltype.md | 8 + docs/models/handoffexecution.md | 9 + docs/models/imagegenerationtool.md | 8 + docs/models/imagegenerationtooltype.md | 8 + docs/models/inputentries.md | 17 + docs/models/messageentries.md | 17 + docs/models/messageinputcontentchunks.md | 29 + docs/models/messageinputentry.md | 16 + docs/models/messageinputentrycontent.md | 17 + docs/models/messageinputentryrole.md | 9 + docs/models/messageinputentrytype.md | 8 + docs/models/messageoutputcontentchunks.md | 35 + docs/models/messageoutputentry.md | 16 + docs/models/messageoutputentrycontent.md | 17 + docs/models/messageoutputentryobject.md | 8 + docs/models/messageoutputentryrole.md | 8 + docs/models/messageoutputentrytype.md | 8 + docs/models/messageoutputevent.md | 16 + docs/models/messageoutputeventcontent.md | 17 + docs/models/messageoutputeventrole.md | 8 + docs/models/messageoutputeventtype.md | 8 + docs/models/modelconversation.md | 17 + docs/models/modelconversationobject.md | 8 + docs/models/modelconversationtools.md | 41 + docs/models/object.md | 8 +- docs/models/outputcontentchunks.md | 35 + docs/models/outputs.md | 29 + docs/models/responsebody.md | 17 + docs/models/responsedoneevent.md | 10 + docs/models/responsedoneeventtype.md | 8 + docs/models/responseerrorevent.md | 11 + docs/models/responseerroreventtype.md | 8 + docs/models/responsestartedevent.md | 10 + docs/models/responsestartedeventtype.md | 8 + docs/models/ssetypes.md | 18 + docs/models/toolexecutiondoneevent.md | 13 + docs/models/toolexecutiondoneeventtype.md | 8 + docs/models/toolexecutionentry.md | 14 + docs/models/toolexecutionentryobject.md | 8 + docs/models/toolexecutionentrytype.md | 8 + docs/models/toolexecutionstartedevent.md | 12 + docs/models/toolexecutionstartedeventtype.md | 8 + docs/models/toolfilechunk.md | 12 + docs/models/toolfilechunktype.md | 8 + docs/models/toolreferencechunk.md | 12 + docs/models/toolreferencechunktype.md | 8 + docs/models/tools.md | 41 + docs/models/websearchpremiumtool.md | 8 + docs/models/websearchpremiumtooltype.md | 8 + docs/models/websearchtool.md | 8 + docs/models/websearchtooltype.md | 8 + docs/sdks/beta/README.md | 6 + docs/sdks/conversations/README.md | 466 ++++ docs/sdks/mistralagents/README.md | 227 ++ src/mistralai/beta.py | 20 + src/mistralai/conversations.py | 2454 +++++++++++++++++ src/mistralai/mistral_agents.py | 1158 ++++++++ src/mistralai/models/__init__.py | 471 +++- src/mistralai/models/agent.py | 129 + src/mistralai/models/agentconversation.py | 71 + src/mistralai/models/agentcreationrequest.py | 109 + src/mistralai/models/agenthandoffdoneevent.py | 33 + src/mistralai/models/agenthandoffentry.py | 75 + .../models/agenthandoffstartedevent.py | 33 + .../models/agents_api_v1_agents_getop.py | 16 + .../models/agents_api_v1_agents_listop.py | 24 + .../agents_api_v1_agents_update_versionop.py | 21 + .../models/agents_api_v1_agents_updateop.py | 23 + ...ts_api_v1_conversations_append_streamop.py | 28 + .../agents_api_v1_conversations_appendop.py | 28 + .../agents_api_v1_conversations_getop.py | 33 + .../agents_api_v1_conversations_historyop.py | 16 + .../agents_api_v1_conversations_listop.py | 37 + .../agents_api_v1_conversations_messagesop.py | 16 + ...s_api_v1_conversations_restart_streamop.py | 26 + .../agents_api_v1_conversations_restartop.py | 26 + src/mistralai/models/agentupdaterequest.py | 111 + src/mistralai/models/builtinconnectors.py | 13 + src/mistralai/models/codeinterpretertool.py | 17 + src/mistralai/models/completionargs.py | 100 + src/mistralai/models/completionargsstop.py | 13 + src/mistralai/models/completionjobout.py | 6 +- .../models/conversationappendrequest.py | 35 + .../models/conversationappendstreamrequest.py | 37 + src/mistralai/models/conversationevents.py | 72 + src/mistralai/models/conversationhistory.py | 58 + src/mistralai/models/conversationinputs.py | 14 + src/mistralai/models/conversationmessages.py | 28 + src/mistralai/models/conversationrequest.py | 133 + src/mistralai/models/conversationresponse.py | 51 + .../models/conversationrestartrequest.py | 42 + .../conversationrestartstreamrequest.py | 44 + .../models/conversationstreamrequest.py | 135 + src/mistralai/models/conversationusageinfo.py | 63 + src/mistralai/models/documentlibrarytool.py | 22 + src/mistralai/models/functioncallentry.py | 76 + .../models/functioncallentryarguments.py | 15 + src/mistralai/models/functioncallevent.py | 36 + src/mistralai/models/functionresultentry.py | 69 + src/mistralai/models/functiontool.py | 21 + src/mistralai/models/imagegenerationtool.py | 17 + src/mistralai/models/inputentries.py | 18 + src/mistralai/models/messageentries.py | 18 + .../models/messageinputcontentchunks.py | 26 + src/mistralai/models/messageinputentry.py | 89 + .../models/messageoutputcontentchunks.py | 30 + src/mistralai/models/messageoutputentry.py | 100 + src/mistralai/models/messageoutputevent.py | 93 + src/mistralai/models/modelconversation.py | 127 + src/mistralai/models/outputcontentchunks.py | 30 + src/mistralai/models/responsedoneevent.py | 25 + src/mistralai/models/responseerrorevent.py | 27 + src/mistralai/models/responsestartedevent.py | 24 + src/mistralai/models/ssetypes.py | 18 + .../models/toolexecutiondoneevent.py | 34 + src/mistralai/models/toolexecutionentry.py | 70 + .../models/toolexecutionstartedevent.py | 31 + src/mistralai/models/toolfilechunk.py | 61 + src/mistralai/models/toolreferencechunk.py | 61 + src/mistralai/models/websearchpremiumtool.py | 17 + src/mistralai/models/websearchtool.py | 17 + src/mistralai/sdk.py | 3 + 196 files changed, 9730 insertions(+), 23 deletions(-) create mode 100644 docs/models/agent.md create mode 100644 docs/models/agentconversation.md create mode 100644 docs/models/agentconversationobject.md create mode 100644 docs/models/agentcreationrequest.md create mode 100644 docs/models/agentcreationrequesttools.md create mode 100644 docs/models/agenthandoffdoneevent.md create mode 100644 docs/models/agenthandoffdoneeventtype.md create mode 100644 docs/models/agenthandoffentry.md create mode 100644 docs/models/agenthandoffentryobject.md create mode 100644 docs/models/agenthandoffentrytype.md create mode 100644 docs/models/agenthandoffstartedevent.md create mode 100644 docs/models/agenthandoffstartedeventtype.md create mode 100644 docs/models/agentobject.md create mode 100644 docs/models/agentsapiv1agentsgetrequest.md create mode 100644 docs/models/agentsapiv1agentslistrequest.md create mode 100644 docs/models/agentsapiv1agentsupdaterequest.md create mode 100644 docs/models/agentsapiv1agentsupdateversionrequest.md create mode 100644 docs/models/agentsapiv1conversationsappendrequest.md create mode 100644 docs/models/agentsapiv1conversationsappendstreamrequest.md create mode 100644 docs/models/agentsapiv1conversationsgetrequest.md create mode 100644 docs/models/agentsapiv1conversationsgetresponsev1conversationsget.md create mode 100644 docs/models/agentsapiv1conversationshistoryrequest.md create mode 100644 docs/models/agentsapiv1conversationslistrequest.md create mode 100644 docs/models/agentsapiv1conversationsmessagesrequest.md create mode 100644 docs/models/agentsapiv1conversationsrestartrequest.md create mode 100644 docs/models/agentsapiv1conversationsrestartstreamrequest.md create mode 100644 docs/models/agenttools.md create mode 100644 docs/models/agentupdaterequest.md create mode 100644 docs/models/agentupdaterequesttools.md create mode 100644 docs/models/builtinconnectors.md create mode 100644 docs/models/codeinterpretertool.md create mode 100644 docs/models/codeinterpretertooltype.md create mode 100644 docs/models/completionargs.md create mode 100644 docs/models/completionargsstop.md create mode 100644 docs/models/completionjoboutobject.md create mode 100644 docs/models/conversationappendrequest.md create mode 100644 docs/models/conversationappendrequesthandoffexecution.md create mode 100644 docs/models/conversationappendstreamrequest.md create mode 100644 docs/models/conversationappendstreamrequesthandoffexecution.md create mode 100644 docs/models/conversationevents.md create mode 100644 docs/models/conversationeventsdata.md create mode 100644 docs/models/conversationhistory.md create mode 100644 docs/models/conversationhistoryobject.md create mode 100644 docs/models/conversationinputs.md create mode 100644 docs/models/conversationmessages.md create mode 100644 docs/models/conversationmessagesobject.md create mode 100644 docs/models/conversationrequest.md create mode 100644 docs/models/conversationresponse.md create mode 100644 docs/models/conversationresponseobject.md create mode 100644 docs/models/conversationrestartrequest.md create mode 100644 docs/models/conversationrestartrequesthandoffexecution.md create mode 100644 docs/models/conversationrestartstreamrequest.md create mode 100644 docs/models/conversationrestartstreamrequesthandoffexecution.md create mode 100644 docs/models/conversationstreamrequest.md create mode 100644 docs/models/conversationstreamrequesthandoffexecution.md create mode 100644 docs/models/conversationstreamrequesttools.md create mode 100644 docs/models/conversationusageinfo.md create mode 100644 docs/models/documentlibrarytool.md create mode 100644 docs/models/documentlibrarytooltype.md create mode 100644 docs/models/entries.md create mode 100644 docs/models/functioncallentry.md create mode 100644 docs/models/functioncallentryarguments.md create mode 100644 docs/models/functioncallentryobject.md create mode 100644 docs/models/functioncallentrytype.md create mode 100644 docs/models/functioncallevent.md create mode 100644 docs/models/functioncalleventtype.md create mode 100644 docs/models/functionresultentry.md create mode 100644 docs/models/functionresultentryobject.md create mode 100644 docs/models/functionresultentrytype.md create mode 100644 docs/models/functiontool.md create mode 100644 docs/models/functiontooltype.md create mode 100644 docs/models/handoffexecution.md create mode 100644 docs/models/imagegenerationtool.md create mode 100644 docs/models/imagegenerationtooltype.md create mode 100644 docs/models/inputentries.md create mode 100644 docs/models/messageentries.md create mode 100644 docs/models/messageinputcontentchunks.md create mode 100644 docs/models/messageinputentry.md create mode 100644 docs/models/messageinputentrycontent.md create mode 100644 docs/models/messageinputentryrole.md create mode 100644 docs/models/messageinputentrytype.md create mode 100644 docs/models/messageoutputcontentchunks.md create mode 100644 docs/models/messageoutputentry.md create mode 100644 docs/models/messageoutputentrycontent.md create mode 100644 docs/models/messageoutputentryobject.md create mode 100644 docs/models/messageoutputentryrole.md create mode 100644 docs/models/messageoutputentrytype.md create mode 100644 docs/models/messageoutputevent.md create mode 100644 docs/models/messageoutputeventcontent.md create mode 100644 docs/models/messageoutputeventrole.md create mode 100644 docs/models/messageoutputeventtype.md create mode 100644 docs/models/modelconversation.md create mode 100644 docs/models/modelconversationobject.md create mode 100644 docs/models/modelconversationtools.md create mode 100644 docs/models/outputcontentchunks.md create mode 100644 docs/models/outputs.md create mode 100644 docs/models/responsebody.md create mode 100644 docs/models/responsedoneevent.md create mode 100644 docs/models/responsedoneeventtype.md create mode 100644 docs/models/responseerrorevent.md create mode 100644 docs/models/responseerroreventtype.md create mode 100644 docs/models/responsestartedevent.md create mode 100644 docs/models/responsestartedeventtype.md create mode 100644 docs/models/ssetypes.md create mode 100644 docs/models/toolexecutiondoneevent.md create mode 100644 docs/models/toolexecutiondoneeventtype.md create mode 100644 docs/models/toolexecutionentry.md create mode 100644 docs/models/toolexecutionentryobject.md create mode 100644 docs/models/toolexecutionentrytype.md create mode 100644 docs/models/toolexecutionstartedevent.md create mode 100644 docs/models/toolexecutionstartedeventtype.md create mode 100644 docs/models/toolfilechunk.md create mode 100644 docs/models/toolfilechunktype.md create mode 100644 docs/models/toolreferencechunk.md create mode 100644 docs/models/toolreferencechunktype.md create mode 100644 docs/models/tools.md create mode 100644 docs/models/websearchpremiumtool.md create mode 100644 docs/models/websearchpremiumtooltype.md create mode 100644 docs/models/websearchtool.md create mode 100644 docs/models/websearchtooltype.md create mode 100644 docs/sdks/beta/README.md create mode 100644 docs/sdks/conversations/README.md create mode 100644 docs/sdks/mistralagents/README.md create mode 100644 src/mistralai/beta.py create mode 100644 src/mistralai/conversations.py create mode 100644 src/mistralai/mistral_agents.py create mode 100644 src/mistralai/models/agent.py create mode 100644 src/mistralai/models/agentconversation.py create mode 100644 src/mistralai/models/agentcreationrequest.py create mode 100644 src/mistralai/models/agenthandoffdoneevent.py create mode 100644 src/mistralai/models/agenthandoffentry.py create mode 100644 src/mistralai/models/agenthandoffstartedevent.py create mode 100644 src/mistralai/models/agents_api_v1_agents_getop.py create mode 100644 src/mistralai/models/agents_api_v1_agents_listop.py create mode 100644 src/mistralai/models/agents_api_v1_agents_update_versionop.py create mode 100644 src/mistralai/models/agents_api_v1_agents_updateop.py create mode 100644 src/mistralai/models/agents_api_v1_conversations_append_streamop.py create mode 100644 src/mistralai/models/agents_api_v1_conversations_appendop.py create mode 100644 src/mistralai/models/agents_api_v1_conversations_getop.py create mode 100644 src/mistralai/models/agents_api_v1_conversations_historyop.py create mode 100644 src/mistralai/models/agents_api_v1_conversations_listop.py create mode 100644 src/mistralai/models/agents_api_v1_conversations_messagesop.py create mode 100644 src/mistralai/models/agents_api_v1_conversations_restart_streamop.py create mode 100644 src/mistralai/models/agents_api_v1_conversations_restartop.py create mode 100644 src/mistralai/models/agentupdaterequest.py create mode 100644 src/mistralai/models/builtinconnectors.py create mode 100644 src/mistralai/models/codeinterpretertool.py create mode 100644 src/mistralai/models/completionargs.py create mode 100644 src/mistralai/models/completionargsstop.py create mode 100644 src/mistralai/models/conversationappendrequest.py create mode 100644 src/mistralai/models/conversationappendstreamrequest.py create mode 100644 src/mistralai/models/conversationevents.py create mode 100644 src/mistralai/models/conversationhistory.py create mode 100644 src/mistralai/models/conversationinputs.py create mode 100644 src/mistralai/models/conversationmessages.py create mode 100644 src/mistralai/models/conversationrequest.py create mode 100644 src/mistralai/models/conversationresponse.py create mode 100644 src/mistralai/models/conversationrestartrequest.py create mode 100644 src/mistralai/models/conversationrestartstreamrequest.py create mode 100644 src/mistralai/models/conversationstreamrequest.py create mode 100644 src/mistralai/models/conversationusageinfo.py create mode 100644 src/mistralai/models/documentlibrarytool.py create mode 100644 src/mistralai/models/functioncallentry.py create mode 100644 src/mistralai/models/functioncallentryarguments.py create mode 100644 src/mistralai/models/functioncallevent.py create mode 100644 src/mistralai/models/functionresultentry.py create mode 100644 src/mistralai/models/functiontool.py create mode 100644 src/mistralai/models/imagegenerationtool.py create mode 100644 src/mistralai/models/inputentries.py create mode 100644 src/mistralai/models/messageentries.py create mode 100644 src/mistralai/models/messageinputcontentchunks.py create mode 100644 src/mistralai/models/messageinputentry.py create mode 100644 src/mistralai/models/messageoutputcontentchunks.py create mode 100644 src/mistralai/models/messageoutputentry.py create mode 100644 src/mistralai/models/messageoutputevent.py create mode 100644 src/mistralai/models/modelconversation.py create mode 100644 src/mistralai/models/outputcontentchunks.py create mode 100644 src/mistralai/models/responsedoneevent.py create mode 100644 src/mistralai/models/responseerrorevent.py create mode 100644 src/mistralai/models/responsestartedevent.py create mode 100644 src/mistralai/models/ssetypes.py create mode 100644 src/mistralai/models/toolexecutiondoneevent.py create mode 100644 src/mistralai/models/toolexecutionentry.py create mode 100644 src/mistralai/models/toolexecutionstartedevent.py create mode 100644 src/mistralai/models/toolfilechunk.py create mode 100644 src/mistralai/models/toolreferencechunk.py create mode 100644 src/mistralai/models/websearchpremiumtool.py create mode 100644 src/mistralai/models/websearchtool.py diff --git a/.speakeasy/gen.lock b/.speakeasy/gen.lock index dc68a842..80be7b20 100644 --- a/.speakeasy/gen.lock +++ b/.speakeasy/gen.lock @@ -1,7 +1,7 @@ lockVersion: 2.0.0 id: 2d045ec7-2ebb-4f4d-ad25-40953b132161 management: - docChecksum: 3589e9f1ea5775264c5c8e0887b4ea0e + docChecksum: e9c447db719018a5721988252c09c2dc docVersion: 1.0.0 speakeasyVersion: 1.517.3 generationVersion: 2.548.6 @@ -45,6 +45,32 @@ generatedFiles: - .python-version - .vscode/settings.json - USAGE.md + - docs/models/agent.md + - docs/models/agentconversation.md + - docs/models/agentconversationobject.md + - docs/models/agentcreationrequest.md + - docs/models/agentcreationrequesttools.md + - docs/models/agenthandoffdoneevent.md + - docs/models/agenthandoffdoneeventtype.md + - docs/models/agenthandoffentry.md + - docs/models/agenthandoffentryobject.md + - docs/models/agenthandoffentrytype.md + - docs/models/agenthandoffstartedevent.md + - docs/models/agenthandoffstartedeventtype.md + - docs/models/agentobject.md + - docs/models/agentsapiv1agentsgetrequest.md + - docs/models/agentsapiv1agentslistrequest.md + - docs/models/agentsapiv1agentsupdaterequest.md + - docs/models/agentsapiv1agentsupdateversionrequest.md + - docs/models/agentsapiv1conversationsappendrequest.md + - docs/models/agentsapiv1conversationsappendstreamrequest.md + - docs/models/agentsapiv1conversationsgetrequest.md + - docs/models/agentsapiv1conversationsgetresponsev1conversationsget.md + - docs/models/agentsapiv1conversationshistoryrequest.md + - docs/models/agentsapiv1conversationslistrequest.md + - docs/models/agentsapiv1conversationsmessagesrequest.md + - docs/models/agentsapiv1conversationsrestartrequest.md + - docs/models/agentsapiv1conversationsrestartstreamrequest.md - docs/models/agentscompletionrequest.md - docs/models/agentscompletionrequestmessages.md - docs/models/agentscompletionrequeststop.md @@ -53,6 +79,9 @@ generatedFiles: - docs/models/agentscompletionstreamrequestmessages.md - docs/models/agentscompletionstreamrequeststop.md - docs/models/agentscompletionstreamrequesttoolchoice.md + - docs/models/agenttools.md + - docs/models/agentupdaterequest.md + - docs/models/agentupdaterequesttools.md - docs/models/apiendpoint.md - docs/models/archiveftmodelout.md - docs/models/archiveftmodeloutobject.md @@ -68,6 +97,7 @@ generatedFiles: - docs/models/batchjobsout.md - docs/models/batchjobsoutobject.md - docs/models/batchjobstatus.md + - docs/models/builtinconnectors.md - docs/models/chatclassificationrequest.md - docs/models/chatcompletionchoice.md - docs/models/chatcompletionrequest.md @@ -101,6 +131,10 @@ generatedFiles: - docs/models/classifiertargetout.md - docs/models/classifiertrainingparameters.md - docs/models/classifiertrainingparametersin.md + - docs/models/codeinterpretertool.md + - docs/models/codeinterpretertooltype.md + - docs/models/completionargs.md + - docs/models/completionargsstop.md - docs/models/completionchunk.md - docs/models/completiondetailedjobout.md - docs/models/completiondetailedjoboutintegrations.md @@ -112,24 +146,50 @@ generatedFiles: - docs/models/completionftmodelout.md - docs/models/completionftmodeloutobject.md - docs/models/completionjobout.md + - docs/models/completionjoboutobject.md - docs/models/completionresponsestreamchoice.md - docs/models/completionresponsestreamchoicefinishreason.md - docs/models/completiontrainingparameters.md - docs/models/completiontrainingparametersin.md - docs/models/content.md - docs/models/contentchunk.md + - docs/models/conversationappendrequest.md + - docs/models/conversationappendrequesthandoffexecution.md + - docs/models/conversationappendstreamrequest.md + - docs/models/conversationappendstreamrequesthandoffexecution.md + - docs/models/conversationevents.md + - docs/models/conversationeventsdata.md + - docs/models/conversationhistory.md + - docs/models/conversationhistoryobject.md + - docs/models/conversationinputs.md + - docs/models/conversationmessages.md + - docs/models/conversationmessagesobject.md + - docs/models/conversationrequest.md + - docs/models/conversationresponse.md + - docs/models/conversationresponseobject.md + - docs/models/conversationrestartrequest.md + - docs/models/conversationrestartrequesthandoffexecution.md + - docs/models/conversationrestartstreamrequest.md + - docs/models/conversationrestartstreamrequesthandoffexecution.md + - docs/models/conversationstreamrequest.md + - docs/models/conversationstreamrequesthandoffexecution.md + - docs/models/conversationstreamrequesttools.md + - docs/models/conversationusageinfo.md - docs/models/data.md - docs/models/deletefileout.md - docs/models/deletemodelout.md - docs/models/deletemodelv1modelsmodeliddeleterequest.md - docs/models/deltamessage.md - docs/models/document.md + - docs/models/documentlibrarytool.md + - docs/models/documentlibrarytooltype.md - docs/models/documenturlchunk.md - docs/models/documenturlchunktype.md - docs/models/embeddingrequest.md - docs/models/embeddingrequestinputs.md - docs/models/embeddingresponse.md - docs/models/embeddingresponsedata.md + - docs/models/entries.md - docs/models/eventout.md - docs/models/file.md - docs/models/filepurpose.md @@ -154,17 +214,32 @@ generatedFiles: - docs/models/ftmodelcardtype.md - docs/models/function.md - docs/models/functioncall.md + - docs/models/functioncallentry.md + - docs/models/functioncallentryarguments.md + - docs/models/functioncallentryobject.md + - docs/models/functioncallentrytype.md + - docs/models/functioncallevent.md + - docs/models/functioncalleventtype.md - docs/models/functionname.md + - docs/models/functionresultentry.md + - docs/models/functionresultentryobject.md + - docs/models/functionresultentrytype.md + - docs/models/functiontool.md + - docs/models/functiontooltype.md - docs/models/githubrepositoryin.md - docs/models/githubrepositoryintype.md - docs/models/githubrepositoryout.md - docs/models/githubrepositoryouttype.md + - docs/models/handoffexecution.md - docs/models/httpvalidationerror.md - docs/models/hyperparameters.md + - docs/models/imagegenerationtool.md + - docs/models/imagegenerationtooltype.md - docs/models/imageurl.md - docs/models/imageurlchunk.md - docs/models/imageurlchunkimageurl.md - docs/models/imageurlchunktype.md + - docs/models/inputentries.md - docs/models/inputs.md - docs/models/instructrequest.md - docs/models/instructrequestinputs.md @@ -199,9 +274,28 @@ generatedFiles: - docs/models/legacyjobmetadataoutobject.md - docs/models/listfilesout.md - docs/models/loc.md + - docs/models/messageentries.md + - docs/models/messageinputcontentchunks.md + - docs/models/messageinputentry.md + - docs/models/messageinputentrycontent.md + - docs/models/messageinputentryrole.md + - docs/models/messageinputentrytype.md + - docs/models/messageoutputcontentchunks.md + - docs/models/messageoutputentry.md + - docs/models/messageoutputentrycontent.md + - docs/models/messageoutputentryobject.md + - docs/models/messageoutputentryrole.md + - docs/models/messageoutputentrytype.md + - docs/models/messageoutputevent.md + - docs/models/messageoutputeventcontent.md + - docs/models/messageoutputeventrole.md + - docs/models/messageoutputeventtype.md - docs/models/messages.md - docs/models/metricout.md - docs/models/modelcapabilities.md + - docs/models/modelconversation.md + - docs/models/modelconversationobject.md + - docs/models/modelconversationtools.md - docs/models/modellist.md - docs/models/modeltype.md - docs/models/moderationobject.md @@ -214,14 +308,23 @@ generatedFiles: - docs/models/ocrresponse.md - docs/models/ocrusageinfo.md - docs/models/one.md + - docs/models/outputcontentchunks.md + - docs/models/outputs.md - docs/models/prediction.md - docs/models/queryparamstatus.md - docs/models/referencechunk.md - docs/models/referencechunktype.md - docs/models/repositories.md - docs/models/response1.md + - docs/models/responsebody.md + - docs/models/responsedoneevent.md + - docs/models/responsedoneeventtype.md + - docs/models/responseerrorevent.md + - docs/models/responseerroreventtype.md - docs/models/responseformat.md - docs/models/responseformats.md + - docs/models/responsestartedevent.md + - docs/models/responsestartedeventtype.md - docs/models/retrievefileout.md - docs/models/retrievemodelv1modelsmodelidgetrequest.md - docs/models/retrievemodelv1modelsmodelidgetresponseretrievemodelv1modelsmodelidget.md @@ -229,6 +332,7 @@ generatedFiles: - docs/models/sampletype.md - docs/models/security.md - docs/models/source.md + - docs/models/ssetypes.md - docs/models/status.md - docs/models/stop.md - docs/models/systemmessage.md @@ -239,9 +343,21 @@ generatedFiles: - docs/models/toolcall.md - docs/models/toolchoice.md - docs/models/toolchoiceenum.md + - docs/models/toolexecutiondoneevent.md + - docs/models/toolexecutiondoneeventtype.md + - docs/models/toolexecutionentry.md + - docs/models/toolexecutionentryobject.md + - docs/models/toolexecutionentrytype.md + - docs/models/toolexecutionstartedevent.md + - docs/models/toolexecutionstartedeventtype.md + - docs/models/toolfilechunk.md + - docs/models/toolfilechunktype.md - docs/models/toolmessage.md - docs/models/toolmessagecontent.md - docs/models/toolmessagerole.md + - docs/models/toolreferencechunk.md + - docs/models/toolreferencechunktype.md + - docs/models/tools.md - docs/models/tooltypes.md - docs/models/trainingfile.md - docs/models/two.md @@ -260,16 +376,23 @@ generatedFiles: - docs/models/wandbintegrationout.md - docs/models/wandbintegrationouttype.md - docs/models/wandbintegrationtype.md + - docs/models/websearchpremiumtool.md + - docs/models/websearchpremiumtooltype.md + - docs/models/websearchtool.md + - docs/models/websearchtooltype.md - docs/sdks/agents/README.md - docs/sdks/batch/README.md + - docs/sdks/beta/README.md - docs/sdks/chat/README.md - docs/sdks/classifiers/README.md + - docs/sdks/conversations/README.md - docs/sdks/embeddings/README.md - docs/sdks/files/README.md - docs/sdks/fim/README.md - docs/sdks/finetuning/README.md - docs/sdks/jobs/README.md - docs/sdks/mistral/README.md + - docs/sdks/mistralagents/README.md - docs/sdks/mistraljobs/README.md - docs/sdks/models/README.md - docs/sdks/ocr/README.md @@ -286,18 +409,40 @@ generatedFiles: - src/mistralai/agents.py - src/mistralai/basesdk.py - src/mistralai/batch.py + - src/mistralai/beta.py - src/mistralai/chat.py - src/mistralai/classifiers.py + - src/mistralai/conversations.py - src/mistralai/embeddings.py - src/mistralai/files.py - src/mistralai/fim.py - src/mistralai/fine_tuning.py - src/mistralai/httpclient.py - src/mistralai/jobs.py + - src/mistralai/mistral_agents.py - src/mistralai/mistral_jobs.py - src/mistralai/models/__init__.py + - src/mistralai/models/agent.py + - src/mistralai/models/agentconversation.py + - src/mistralai/models/agentcreationrequest.py + - src/mistralai/models/agenthandoffdoneevent.py + - src/mistralai/models/agenthandoffentry.py + - src/mistralai/models/agenthandoffstartedevent.py + - src/mistralai/models/agents_api_v1_agents_getop.py + - src/mistralai/models/agents_api_v1_agents_listop.py + - src/mistralai/models/agents_api_v1_agents_update_versionop.py + - src/mistralai/models/agents_api_v1_agents_updateop.py + - src/mistralai/models/agents_api_v1_conversations_append_streamop.py + - src/mistralai/models/agents_api_v1_conversations_appendop.py + - src/mistralai/models/agents_api_v1_conversations_getop.py + - src/mistralai/models/agents_api_v1_conversations_historyop.py + - src/mistralai/models/agents_api_v1_conversations_listop.py + - src/mistralai/models/agents_api_v1_conversations_messagesop.py + - src/mistralai/models/agents_api_v1_conversations_restart_streamop.py + - src/mistralai/models/agents_api_v1_conversations_restartop.py - src/mistralai/models/agentscompletionrequest.py - src/mistralai/models/agentscompletionstreamrequest.py + - src/mistralai/models/agentupdaterequest.py - src/mistralai/models/apiendpoint.py - src/mistralai/models/archiveftmodelout.py - src/mistralai/models/assistantmessage.py @@ -307,6 +452,7 @@ generatedFiles: - src/mistralai/models/batchjobout.py - src/mistralai/models/batchjobsout.py - src/mistralai/models/batchjobstatus.py + - src/mistralai/models/builtinconnectors.py - src/mistralai/models/chatclassificationrequest.py - src/mistralai/models/chatcompletionchoice.py - src/mistralai/models/chatcompletionrequest.py @@ -324,6 +470,9 @@ generatedFiles: - src/mistralai/models/classifiertargetout.py - src/mistralai/models/classifiertrainingparameters.py - src/mistralai/models/classifiertrainingparametersin.py + - src/mistralai/models/codeinterpretertool.py + - src/mistralai/models/completionargs.py + - src/mistralai/models/completionargsstop.py - src/mistralai/models/completionchunk.py - src/mistralai/models/completiondetailedjobout.py - src/mistralai/models/completionevent.py @@ -333,10 +482,23 @@ generatedFiles: - src/mistralai/models/completiontrainingparameters.py - src/mistralai/models/completiontrainingparametersin.py - src/mistralai/models/contentchunk.py + - src/mistralai/models/conversationappendrequest.py + - src/mistralai/models/conversationappendstreamrequest.py + - src/mistralai/models/conversationevents.py + - src/mistralai/models/conversationhistory.py + - src/mistralai/models/conversationinputs.py + - src/mistralai/models/conversationmessages.py + - src/mistralai/models/conversationrequest.py + - src/mistralai/models/conversationresponse.py + - src/mistralai/models/conversationrestartrequest.py + - src/mistralai/models/conversationrestartstreamrequest.py + - src/mistralai/models/conversationstreamrequest.py + - src/mistralai/models/conversationusageinfo.py - src/mistralai/models/delete_model_v1_models_model_id_deleteop.py - src/mistralai/models/deletefileout.py - src/mistralai/models/deletemodelout.py - src/mistralai/models/deltamessage.py + - src/mistralai/models/documentlibrarytool.py - src/mistralai/models/documenturlchunk.py - src/mistralai/models/embeddingrequest.py - src/mistralai/models/embeddingresponse.py @@ -360,12 +522,19 @@ generatedFiles: - src/mistralai/models/ftmodelcard.py - src/mistralai/models/function.py - src/mistralai/models/functioncall.py + - src/mistralai/models/functioncallentry.py + - src/mistralai/models/functioncallentryarguments.py + - src/mistralai/models/functioncallevent.py - src/mistralai/models/functionname.py + - src/mistralai/models/functionresultentry.py + - src/mistralai/models/functiontool.py - src/mistralai/models/githubrepositoryin.py - src/mistralai/models/githubrepositoryout.py - src/mistralai/models/httpvalidationerror.py + - src/mistralai/models/imagegenerationtool.py - src/mistralai/models/imageurl.py - src/mistralai/models/imageurlchunk.py + - src/mistralai/models/inputentries.py - src/mistralai/models/inputs.py - src/mistralai/models/instructrequest.py - src/mistralai/models/jobin.py @@ -385,8 +554,15 @@ generatedFiles: - src/mistralai/models/jsonschema.py - src/mistralai/models/legacyjobmetadataout.py - src/mistralai/models/listfilesout.py + - src/mistralai/models/messageentries.py + - src/mistralai/models/messageinputcontentchunks.py + - src/mistralai/models/messageinputentry.py + - src/mistralai/models/messageoutputcontentchunks.py + - src/mistralai/models/messageoutputentry.py + - src/mistralai/models/messageoutputevent.py - src/mistralai/models/metricout.py - src/mistralai/models/modelcapabilities.py + - src/mistralai/models/modelconversation.py - src/mistralai/models/modellist.py - src/mistralai/models/moderationobject.py - src/mistralai/models/moderationresponse.py @@ -396,23 +572,33 @@ generatedFiles: - src/mistralai/models/ocrrequest.py - src/mistralai/models/ocrresponse.py - src/mistralai/models/ocrusageinfo.py + - src/mistralai/models/outputcontentchunks.py - src/mistralai/models/prediction.py - src/mistralai/models/referencechunk.py + - src/mistralai/models/responsedoneevent.py + - src/mistralai/models/responseerrorevent.py - src/mistralai/models/responseformat.py - src/mistralai/models/responseformats.py + - src/mistralai/models/responsestartedevent.py - src/mistralai/models/retrieve_model_v1_models_model_id_getop.py - src/mistralai/models/retrievefileout.py - src/mistralai/models/sampletype.py - src/mistralai/models/sdkerror.py - src/mistralai/models/security.py - src/mistralai/models/source.py + - src/mistralai/models/ssetypes.py - src/mistralai/models/systemmessage.py - src/mistralai/models/textchunk.py - src/mistralai/models/tool.py - src/mistralai/models/toolcall.py - src/mistralai/models/toolchoice.py - src/mistralai/models/toolchoiceenum.py + - src/mistralai/models/toolexecutiondoneevent.py + - src/mistralai/models/toolexecutionentry.py + - src/mistralai/models/toolexecutionstartedevent.py + - src/mistralai/models/toolfilechunk.py - src/mistralai/models/toolmessage.py + - src/mistralai/models/toolreferencechunk.py - src/mistralai/models/tooltypes.py - src/mistralai/models/trainingfile.py - src/mistralai/models/unarchiveftmodelout.py @@ -423,6 +609,8 @@ generatedFiles: - src/mistralai/models/validationerror.py - src/mistralai/models/wandbintegration.py - src/mistralai/models/wandbintegrationout.py + - src/mistralai/models/websearchpremiumtool.py + - src/mistralai/models/websearchtool.py - src/mistralai/models_.py - src/mistralai/ocr.py - src/mistralai/py.typed @@ -735,5 +923,160 @@ examples: application/json: {"id": "mod-e5cc70bb28c444948073e77776eb30ef", "model": "CX-9", "results": [{"key": {"scores": {"key": 4386.53, "key1": 2974.85}}, "key1": {"scores": {"key": 7100.52, "key1": 480.47}}}]} "422": application/json: {} + agents_api_v1_conversations_start: + speakeasy-default-agents-api-v1-conversations-start: + requestBody: + application/json: {"inputs": "", "stream": false} + responses: + "200": + application/json: {"object": "conversation.response", "conversation_id": "", "outputs": [{"object": "entry", "type": "agent.handoff", "previous_agent_id": "", "previous_agent_name": "", "next_agent_id": "", "next_agent_name": ""}, {"object": "entry", "type": "message.output", "role": "assistant", "content": [{"type": "tool_reference", "tool": "web_search_premium", "title": ""}, {"document_url": "https://unrealistic-fund.org/", "type": "document_url"}]}], "usage": {"prompt_tokens": 0, "completion_tokens": 0, "total_tokens": 0}} + "422": + application/json: {} + agents_api_v1_conversations_list: + speakeasy-default-agents-api-v1-conversations-list: + parameters: + query: + page: 0 + page_size: 100 + responses: + "200": + application/json: [{"object": "conversation", "id": "", "created_at": "2025-01-13T10:26:00.433Z", "updated_at": "2023-07-14T18:23:27.528Z", "agent_id": ""}, {"object": "conversation", "id": "", "created_at": "2023-06-17T12:14:27.999Z", "updated_at": "2024-11-27T13:02:27.296Z", "model": "LeBaron"}, {"object": "conversation", "id": "", "created_at": "2025-02-26T06:14:46.641Z", "updated_at": "2023-04-05T09:49:38.010Z", "model": "A8"}] + "422": + application/json: {} + agents_api_v1_conversations_get: + speakeasy-default-agents-api-v1-conversations-get: + parameters: + path: + conversation_id: "" + responses: + "200": + application/json: {"object": "conversation", "id": "", "created_at": "2024-09-04T11:33:52.011Z", "updated_at": "2024-08-19T11:11:04.610Z", "agent_id": ""} + "422": + application/json: {} + agents_api_v1_conversations_append: + speakeasy-default-agents-api-v1-conversations-append: + parameters: + path: + conversation_id: "" + requestBody: + application/json: {"inputs": "", "stream": false, "store": true, "handoff_execution": "server"} + responses: + "200": + application/json: {"object": "conversation.response", "conversation_id": "", "outputs": [{"object": "entry", "type": "agent.handoff", "previous_agent_id": "", "previous_agent_name": "", "next_agent_id": "", "next_agent_name": ""}, {"object": "entry", "type": "function.call", "tool_call_id": "", "name": "", "arguments": ""}], "usage": {"prompt_tokens": 0, "completion_tokens": 0, "total_tokens": 0}} + "422": + application/json: {} + agents_api_v1_conversations_history: + speakeasy-default-agents-api-v1-conversations-history: + parameters: + path: + conversation_id: "" + responses: + "200": + application/json: {"object": "conversation.history", "conversation_id": "", "entries": [{"object": "entry", "type": "message.output", "role": "assistant", "content": [{"type": "tool_file", "tool": "web_search", "file_id": ""}]}]} + "422": + application/json: {} + agents_api_v1_conversations_messages: + speakeasy-default-agents-api-v1-conversations-messages: + parameters: + path: + conversation_id: "" + responses: + "200": + application/json: {"object": "conversation.messages", "conversation_id": "", "messages": [{"object": "entry", "type": "message.input", "role": "assistant", "content": ""}, {"object": "entry", "type": "message.input", "role": "assistant", "content": [{"document_url": "https://black-and-white-sauerkraut.biz", "type": "document_url"}, {"type": "tool_file", "tool": "code_interpreter", "file_id": ""}, {"image_url": "https://emotional-couch.org", "type": "image_url"}]}, {"object": "entry", "type": "message.input", "role": "assistant", "content": ""}]} + "422": + application/json: {} + agents_api_v1_conversations_restart: + speakeasy-default-agents-api-v1-conversations-restart: + parameters: + path: + conversation_id: "" + requestBody: + application/json: {"inputs": "", "stream": false, "store": true, "handoff_execution": "server", "from_entry_id": ""} + responses: + "200": + application/json: {"object": "conversation.response", "conversation_id": "", "outputs": [{"object": "entry", "type": "tool.execution", "name": "image_generation"}, {"object": "entry", "type": "tool.execution", "name": "web_search_premium"}], "usage": {"prompt_tokens": 0, "completion_tokens": 0, "total_tokens": 0}} + "422": + application/json: {} + agents_api_v1_conversations_start_stream: + speakeasy-default-agents-api-v1-conversations-start-stream: + requestBody: + application/json: {"inputs": "", "stream": true} + responses: + "422": + application/json: {} + agents_api_v1_conversations_append_stream: + speakeasy-default-agents-api-v1-conversations-append-stream: + parameters: + path: + conversation_id: "" + requestBody: + application/json: {"inputs": "", "stream": true, "store": true, "handoff_execution": "server"} + responses: + "422": + application/json: {} + agents_api_v1_conversations_restart_stream: + speakeasy-default-agents-api-v1-conversations-restart-stream: + parameters: + path: + conversation_id: "" + requestBody: + application/json: {"inputs": "", "stream": true, "store": true, "handoff_execution": "server", "from_entry_id": ""} + responses: + "422": + application/json: {} + agents_api_v1_agents_create: + speakeasy-default-agents-api-v1-agents-create: + requestBody: + application/json: {"model": "Fiesta", "name": ""} + responses: + "200": + application/json: {"model": "LeBaron", "name": "", "object": "agent", "id": "", "version": 417458, "created_at": "2023-05-28T06:20:22.766Z", "updated_at": "2023-03-17T15:39:20.911Z"} + "422": + application/json: {} + agents_api_v1_agents_list: + speakeasy-default-agents-api-v1-agents-list: + parameters: + query: + page: 0 + page_size: 20 + responses: + "200": + application/json: [{"model": "Golf", "name": "", "object": "agent", "id": "", "version": 678317, "created_at": "2023-07-14T18:23:27.528Z", "updated_at": "2023-09-09T18:28:08.953Z"}, {"model": "Aventador", "name": "", "object": "agent", "id": "", "version": 635532, "created_at": "2024-12-01T18:25:37.169Z", "updated_at": "2023-01-20T06:21:22.156Z"}, {"model": "Model T", "name": "", "object": "agent", "id": "", "version": 86140, "created_at": "2023-03-17T01:57:00.187Z", "updated_at": "2025-01-24T00:05:25.844Z"}] + "422": + application/json: {} + agents_api_v1_agents_get: + speakeasy-default-agents-api-v1-agents-get: + parameters: + path: + agent_id: "" + responses: + "200": + application/json: {"model": "Model S", "name": "", "object": "agent", "id": "", "version": 558834, "created_at": "2024-08-19T11:11:04.610Z", "updated_at": "2024-07-25T06:33:15.810Z"} + "422": + application/json: {} + agents_api_v1_agents_update: + speakeasy-default-agents-api-v1-agents-update: + parameters: + path: + agent_id: "" + requestBody: + application/json: {} + responses: + "200": + application/json: {"model": "Sentra", "name": "", "object": "agent", "id": "", "version": 597129, "created_at": "2024-01-13T16:52:57.274Z", "updated_at": "2025-12-22T15:27:45.882Z"} + "422": + application/json: {} + agents_api_v1_agents_update_version: + speakeasy-default-agents-api-v1-agents-update-version: + parameters: + path: + agent_id: "" + query: + version: 193920 + responses: + "200": + application/json: {"model": "Mercielago", "name": "", "object": "agent", "id": "", "version": 253661, "created_at": "2023-02-14T22:44:06.703Z", "updated_at": "2025-12-15T06:22:04.120Z"} + "422": + application/json: {} examplesVersion: 1.0.0 generatedTests: {} diff --git a/.speakeasy/workflow.lock b/.speakeasy/workflow.lock index 4c5198f5..d0361942 100644 --- a/.speakeasy/workflow.lock +++ b/.speakeasy/workflow.lock @@ -14,11 +14,10 @@ sources: - latest mistral-openapi: sourceNamespace: mistral-openapi - sourceRevisionDigest: sha256:14f5a88b723582e80ead33d129f287568eed05815a9437b7ff5c890ca4c93318 - sourceBlobDigest: sha256:230b4f22197e202aebd70f8628844a27fe70b9b27569dbc3338d3e7d5442cb88 + sourceRevisionDigest: sha256:f2590d9933e1e9208fa5b8e509b671e6a86907268bcd5dad41dc4179e20c5b69 + sourceBlobDigest: sha256:3026ed65da39c94e9787697305e7e059bec5cff09bceeddc6e68c289cfaeb592 tags: - latest - - speakeasy-sdk-regen-1748257524 targets: mistralai-azure-sdk: source: mistral-azure-source @@ -37,10 +36,10 @@ targets: mistralai-sdk: source: mistral-openapi sourceNamespace: mistral-openapi - sourceRevisionDigest: sha256:14f5a88b723582e80ead33d129f287568eed05815a9437b7ff5c890ca4c93318 - sourceBlobDigest: sha256:230b4f22197e202aebd70f8628844a27fe70b9b27569dbc3338d3e7d5442cb88 + sourceRevisionDigest: sha256:f2590d9933e1e9208fa5b8e509b671e6a86907268bcd5dad41dc4179e20c5b69 + sourceBlobDigest: sha256:3026ed65da39c94e9787697305e7e059bec5cff09bceeddc6e68c289cfaeb592 codeSamplesNamespace: mistral-openapi-code-samples - codeSamplesRevisionDigest: sha256:ac98d7bc86d476552e342173fdcb5735ee9c6fc0fa41820fd73eef137fe07bac + codeSamplesRevisionDigest: sha256:bd4031e558c0426c02f2a4f3bb1642068047aa555e0f9cbbc70de74ff7ec04ec workflow: workflowVersion: 1.0.0 speakeasyVersion: 1.517.3 diff --git a/README.md b/README.md index d5b265b6..9b8e3c4f 100644 --- a/README.md +++ b/README.md @@ -432,6 +432,30 @@ The documentation for the GCP SDK is available [here](packages/mistralai_gcp/REA * [get](docs/sdks/mistraljobs/README.md#get) - Get Batch Job * [cancel](docs/sdks/mistraljobs/README.md#cancel) - Cancel Batch Job +### [beta](docs/sdks/beta/README.md) + + +#### [beta.agents](docs/sdks/mistralagents/README.md) + +* [create](docs/sdks/mistralagents/README.md#create) - Create a agent that can be used within a conversation. +* [list](docs/sdks/mistralagents/README.md#list) - List agent entities. +* [get](docs/sdks/mistralagents/README.md#get) - Retrieve an agent entity. +* [update](docs/sdks/mistralagents/README.md#update) - Update an agent entity. +* [update_version](docs/sdks/mistralagents/README.md#update_version) - Update an agent version. + +#### [beta.conversations](docs/sdks/conversations/README.md) + +* [start](docs/sdks/conversations/README.md#start) - Create a conversation and append entries to it. +* [list](docs/sdks/conversations/README.md#list) - List all created conversations. +* [get](docs/sdks/conversations/README.md#get) - Retrieve a conversation information. +* [append](docs/sdks/conversations/README.md#append) - Append new entries to an existing conversation. +* [get_history](docs/sdks/conversations/README.md#get_history) - Retrieve all entries in a conversation. +* [get_messages](docs/sdks/conversations/README.md#get_messages) - Retrieve all messages in a conversation. +* [restart](docs/sdks/conversations/README.md#restart) - Restart a conversation starting from a given entry. +* [start_stream](docs/sdks/conversations/README.md#start_stream) - Create a conversation and append entries to it. +* [append_stream](docs/sdks/conversations/README.md#append_stream) - Append new entries to an existing conversation. +* [restart_stream](docs/sdks/conversations/README.md#restart_stream) - Restart a conversation starting from a given entry. + ### [chat](docs/sdks/chat/README.md) * [complete](docs/sdks/chat/README.md#complete) - Chat Completion @@ -511,12 +535,7 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.chat.stream(model="mistral-small-latest", messages=[ - { - "content": "Who is the best French painter? Answer in one short sentence.", - "role": "user", - }, - ]) + res = mistral.beta.conversations.start_stream(inputs="") with res as event_stream: for event in event_stream: diff --git a/docs/models/agent.md b/docs/models/agent.md new file mode 100644 index 00000000..9a64fb68 --- /dev/null +++ b/docs/models/agent.md @@ -0,0 +1,19 @@ +# Agent + + +## Fields + +| Field | Type | Required | Description | +| ----------------------------------------------------------------------- | ----------------------------------------------------------------------- | ----------------------------------------------------------------------- | ----------------------------------------------------------------------- | +| `model` | *str* | :heavy_check_mark: | N/A | +| `name` | *str* | :heavy_check_mark: | N/A | +| `id` | *str* | :heavy_check_mark: | N/A | +| `version` | *int* | :heavy_check_mark: | N/A | +| `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_check_mark: | N/A | +| `updated_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_check_mark: | N/A | +| `instructions` | *OptionalNullable[str]* | :heavy_minus_sign: | Instruction prompt the model will follow during the conversation. | +| `tools` | List[[models.AgentTools](../models/agenttools.md)] | :heavy_minus_sign: | List of tools which are available to the model during the conversation. | +| `completion_args` | [Optional[models.CompletionArgs]](../models/completionargs.md) | :heavy_minus_sign: | White-listed arguments from the completion API | +| `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `handoffs` | List[*str*] | :heavy_minus_sign: | N/A | +| `object` | [Optional[models.AgentObject]](../models/agentobject.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/agentconversation.md b/docs/models/agentconversation.md new file mode 100644 index 00000000..93dde728 --- /dev/null +++ b/docs/models/agentconversation.md @@ -0,0 +1,14 @@ +# AgentConversation + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | +| `id` | *str* | :heavy_check_mark: | N/A | +| `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_check_mark: | N/A | +| `updated_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_check_mark: | N/A | +| `agent_id` | *str* | :heavy_check_mark: | N/A | +| `name` | *OptionalNullable[str]* | :heavy_minus_sign: | Name given to the conversation. | +| `description` | *OptionalNullable[str]* | :heavy_minus_sign: | Description of the what the conversation is about. | +| `object` | [Optional[models.AgentConversationObject]](../models/agentconversationobject.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/agentconversationobject.md b/docs/models/agentconversationobject.md new file mode 100644 index 00000000..ea7cc75c --- /dev/null +++ b/docs/models/agentconversationobject.md @@ -0,0 +1,8 @@ +# AgentConversationObject + + +## Values + +| Name | Value | +| -------------- | -------------- | +| `CONVERSATION` | conversation | \ No newline at end of file diff --git a/docs/models/agentcreationrequest.md b/docs/models/agentcreationrequest.md new file mode 100644 index 00000000..324ff25c --- /dev/null +++ b/docs/models/agentcreationrequest.md @@ -0,0 +1,14 @@ +# AgentCreationRequest + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | +| `model` | *str* | :heavy_check_mark: | N/A | +| `name` | *str* | :heavy_check_mark: | N/A | +| `instructions` | *OptionalNullable[str]* | :heavy_minus_sign: | Instruction prompt the model will follow during the conversation. | +| `tools` | List[[models.AgentCreationRequestTools](../models/agentcreationrequesttools.md)] | :heavy_minus_sign: | List of tools which are available to the model during the conversation. | +| `completion_args` | [Optional[models.CompletionArgs]](../models/completionargs.md) | :heavy_minus_sign: | White-listed arguments from the completion API | +| `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `handoffs` | List[*str*] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/agentcreationrequesttools.md b/docs/models/agentcreationrequesttools.md new file mode 100644 index 00000000..c2525850 --- /dev/null +++ b/docs/models/agentcreationrequesttools.md @@ -0,0 +1,41 @@ +# AgentCreationRequestTools + + +## Supported Types + +### `models.CodeInterpreterTool` + +```python +value: models.CodeInterpreterTool = /* values here */ +``` + +### `models.DocumentLibraryTool` + +```python +value: models.DocumentLibraryTool = /* values here */ +``` + +### `models.FunctionTool` + +```python +value: models.FunctionTool = /* values here */ +``` + +### `models.ImageGenerationTool` + +```python +value: models.ImageGenerationTool = /* values here */ +``` + +### `models.WebSearchTool` + +```python +value: models.WebSearchTool = /* values here */ +``` + +### `models.WebSearchPremiumTool` + +```python +value: models.WebSearchPremiumTool = /* values here */ +``` + diff --git a/docs/models/agenthandoffdoneevent.md b/docs/models/agenthandoffdoneevent.md new file mode 100644 index 00000000..a8a74ec0 --- /dev/null +++ b/docs/models/agenthandoffdoneevent.md @@ -0,0 +1,13 @@ +# AgentHandoffDoneEvent + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------ | +| `id` | *str* | :heavy_check_mark: | N/A | +| `next_agent_id` | *str* | :heavy_check_mark: | N/A | +| `next_agent_name` | *str* | :heavy_check_mark: | N/A | +| `type` | [Optional[models.AgentHandoffDoneEventType]](../models/agenthandoffdoneeventtype.md) | :heavy_minus_sign: | N/A | +| `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | +| `output_index` | *Optional[int]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/agenthandoffdoneeventtype.md b/docs/models/agenthandoffdoneeventtype.md new file mode 100644 index 00000000..c864ce43 --- /dev/null +++ b/docs/models/agenthandoffdoneeventtype.md @@ -0,0 +1,8 @@ +# AgentHandoffDoneEventType + + +## Values + +| Name | Value | +| -------------------- | -------------------- | +| `AGENT_HANDOFF_DONE` | agent.handoff.done | \ No newline at end of file diff --git a/docs/models/agenthandoffentry.md b/docs/models/agenthandoffentry.md new file mode 100644 index 00000000..327f8048 --- /dev/null +++ b/docs/models/agenthandoffentry.md @@ -0,0 +1,16 @@ +# AgentHandoffEntry + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | +| `previous_agent_id` | *str* | :heavy_check_mark: | N/A | +| `previous_agent_name` | *str* | :heavy_check_mark: | N/A | +| `next_agent_id` | *str* | :heavy_check_mark: | N/A | +| `next_agent_name` | *str* | :heavy_check_mark: | N/A | +| `object` | [Optional[models.AgentHandoffEntryObject]](../models/agenthandoffentryobject.md) | :heavy_minus_sign: | N/A | +| `type` | [Optional[models.AgentHandoffEntryType]](../models/agenthandoffentrytype.md) | :heavy_minus_sign: | N/A | +| `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | +| `completed_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | +| `id` | *Optional[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/agenthandoffentryobject.md b/docs/models/agenthandoffentryobject.md new file mode 100644 index 00000000..4bb876fb --- /dev/null +++ b/docs/models/agenthandoffentryobject.md @@ -0,0 +1,8 @@ +# AgentHandoffEntryObject + + +## Values + +| Name | Value | +| ------- | ------- | +| `ENTRY` | entry | \ No newline at end of file diff --git a/docs/models/agenthandoffentrytype.md b/docs/models/agenthandoffentrytype.md new file mode 100644 index 00000000..527ebceb --- /dev/null +++ b/docs/models/agenthandoffentrytype.md @@ -0,0 +1,8 @@ +# AgentHandoffEntryType + + +## Values + +| Name | Value | +| --------------- | --------------- | +| `AGENT_HANDOFF` | agent.handoff | \ No newline at end of file diff --git a/docs/models/agenthandoffstartedevent.md b/docs/models/agenthandoffstartedevent.md new file mode 100644 index 00000000..f99ed45d --- /dev/null +++ b/docs/models/agenthandoffstartedevent.md @@ -0,0 +1,13 @@ +# AgentHandoffStartedEvent + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------ | +| `id` | *str* | :heavy_check_mark: | N/A | +| `previous_agent_id` | *str* | :heavy_check_mark: | N/A | +| `previous_agent_name` | *str* | :heavy_check_mark: | N/A | +| `type` | [Optional[models.AgentHandoffStartedEventType]](../models/agenthandoffstartedeventtype.md) | :heavy_minus_sign: | N/A | +| `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | +| `output_index` | *Optional[int]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/agenthandoffstartedeventtype.md b/docs/models/agenthandoffstartedeventtype.md new file mode 100644 index 00000000..4ffaff15 --- /dev/null +++ b/docs/models/agenthandoffstartedeventtype.md @@ -0,0 +1,8 @@ +# AgentHandoffStartedEventType + + +## Values + +| Name | Value | +| ----------------------- | ----------------------- | +| `AGENT_HANDOFF_STARTED` | agent.handoff.started | \ No newline at end of file diff --git a/docs/models/agentobject.md b/docs/models/agentobject.md new file mode 100644 index 00000000..70e143b0 --- /dev/null +++ b/docs/models/agentobject.md @@ -0,0 +1,8 @@ +# AgentObject + + +## Values + +| Name | Value | +| ------- | ------- | +| `AGENT` | agent | \ No newline at end of file diff --git a/docs/models/agentsapiv1agentsgetrequest.md b/docs/models/agentsapiv1agentsgetrequest.md new file mode 100644 index 00000000..b46ac23d --- /dev/null +++ b/docs/models/agentsapiv1agentsgetrequest.md @@ -0,0 +1,8 @@ +# AgentsAPIV1AgentsGetRequest + + +## Fields + +| Field | Type | Required | Description | +| ------------------ | ------------------ | ------------------ | ------------------ | +| `agent_id` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/agentsapiv1agentslistrequest.md b/docs/models/agentsapiv1agentslistrequest.md new file mode 100644 index 00000000..b5bcee62 --- /dev/null +++ b/docs/models/agentsapiv1agentslistrequest.md @@ -0,0 +1,9 @@ +# AgentsAPIV1AgentsListRequest + + +## Fields + +| Field | Type | Required | Description | +| ------------------ | ------------------ | ------------------ | ------------------ | +| `page` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `page_size` | *Optional[int]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/agentsapiv1agentsupdaterequest.md b/docs/models/agentsapiv1agentsupdaterequest.md new file mode 100644 index 00000000..f60f8e5b --- /dev/null +++ b/docs/models/agentsapiv1agentsupdaterequest.md @@ -0,0 +1,9 @@ +# AgentsAPIV1AgentsUpdateRequest + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------------------------------ | ------------------------------------------------------------ | ------------------------------------------------------------ | ------------------------------------------------------------ | +| `agent_id` | *str* | :heavy_check_mark: | N/A | +| `agent_update_request` | [models.AgentUpdateRequest](../models/agentupdaterequest.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/agentsapiv1agentsupdateversionrequest.md b/docs/models/agentsapiv1agentsupdateversionrequest.md new file mode 100644 index 00000000..e937acc9 --- /dev/null +++ b/docs/models/agentsapiv1agentsupdateversionrequest.md @@ -0,0 +1,9 @@ +# AgentsAPIV1AgentsUpdateVersionRequest + + +## Fields + +| Field | Type | Required | Description | +| ------------------ | ------------------ | ------------------ | ------------------ | +| `agent_id` | *str* | :heavy_check_mark: | N/A | +| `version` | *int* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/agentsapiv1conversationsappendrequest.md b/docs/models/agentsapiv1conversationsappendrequest.md new file mode 100644 index 00000000..ac8a00ec --- /dev/null +++ b/docs/models/agentsapiv1conversationsappendrequest.md @@ -0,0 +1,9 @@ +# AgentsAPIV1ConversationsAppendRequest + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------------------------------------- | -------------------------------------------------------------------------- | -------------------------------------------------------------------------- | -------------------------------------------------------------------------- | +| `conversation_id` | *str* | :heavy_check_mark: | ID of the conversation to which we append entries. | +| `conversation_append_request` | [models.ConversationAppendRequest](../models/conversationappendrequest.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/agentsapiv1conversationsappendstreamrequest.md b/docs/models/agentsapiv1conversationsappendstreamrequest.md new file mode 100644 index 00000000..dbc330f1 --- /dev/null +++ b/docs/models/agentsapiv1conversationsappendstreamrequest.md @@ -0,0 +1,9 @@ +# AgentsAPIV1ConversationsAppendStreamRequest + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------- | +| `conversation_id` | *str* | :heavy_check_mark: | ID of the conversation to which we append entries. | +| `conversation_append_stream_request` | [models.ConversationAppendStreamRequest](../models/conversationappendstreamrequest.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/agentsapiv1conversationsgetrequest.md b/docs/models/agentsapiv1conversationsgetrequest.md new file mode 100644 index 00000000..0d2d7827 --- /dev/null +++ b/docs/models/agentsapiv1conversationsgetrequest.md @@ -0,0 +1,8 @@ +# AgentsAPIV1ConversationsGetRequest + + +## Fields + +| Field | Type | Required | Description | +| ------------------ | ------------------ | ------------------ | ------------------ | +| `conversation_id` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/agentsapiv1conversationsgetresponsev1conversationsget.md b/docs/models/agentsapiv1conversationsgetresponsev1conversationsget.md new file mode 100644 index 00000000..4bc836f3 --- /dev/null +++ b/docs/models/agentsapiv1conversationsgetresponsev1conversationsget.md @@ -0,0 +1,19 @@ +# AgentsAPIV1ConversationsGetResponseV1ConversationsGet + +Successful Response + + +## Supported Types + +### `models.ModelConversation` + +```python +value: models.ModelConversation = /* values here */ +``` + +### `models.AgentConversation` + +```python +value: models.AgentConversation = /* values here */ +``` + diff --git a/docs/models/agentsapiv1conversationshistoryrequest.md b/docs/models/agentsapiv1conversationshistoryrequest.md new file mode 100644 index 00000000..f0d4f049 --- /dev/null +++ b/docs/models/agentsapiv1conversationshistoryrequest.md @@ -0,0 +1,8 @@ +# AgentsAPIV1ConversationsHistoryRequest + + +## Fields + +| Field | Type | Required | Description | +| ------------------ | ------------------ | ------------------ | ------------------ | +| `conversation_id` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/agentsapiv1conversationslistrequest.md b/docs/models/agentsapiv1conversationslistrequest.md new file mode 100644 index 00000000..528a055a --- /dev/null +++ b/docs/models/agentsapiv1conversationslistrequest.md @@ -0,0 +1,9 @@ +# AgentsAPIV1ConversationsListRequest + + +## Fields + +| Field | Type | Required | Description | +| ------------------ | ------------------ | ------------------ | ------------------ | +| `page` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `page_size` | *Optional[int]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/agentsapiv1conversationsmessagesrequest.md b/docs/models/agentsapiv1conversationsmessagesrequest.md new file mode 100644 index 00000000..b3189925 --- /dev/null +++ b/docs/models/agentsapiv1conversationsmessagesrequest.md @@ -0,0 +1,8 @@ +# AgentsAPIV1ConversationsMessagesRequest + + +## Fields + +| Field | Type | Required | Description | +| ------------------ | ------------------ | ------------------ | ------------------ | +| `conversation_id` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/agentsapiv1conversationsrestartrequest.md b/docs/models/agentsapiv1conversationsrestartrequest.md new file mode 100644 index 00000000..11a2fe2e --- /dev/null +++ b/docs/models/agentsapiv1conversationsrestartrequest.md @@ -0,0 +1,9 @@ +# AgentsAPIV1ConversationsRestartRequest + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | +| `conversation_id` | *str* | :heavy_check_mark: | N/A | +| `conversation_restart_request` | [models.ConversationRestartRequest](../models/conversationrestartrequest.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/agentsapiv1conversationsrestartstreamrequest.md b/docs/models/agentsapiv1conversationsrestartstreamrequest.md new file mode 100644 index 00000000..4cbb9d6c --- /dev/null +++ b/docs/models/agentsapiv1conversationsrestartstreamrequest.md @@ -0,0 +1,9 @@ +# AgentsAPIV1ConversationsRestartStreamRequest + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | +| `conversation_id` | *str* | :heavy_check_mark: | N/A | +| `conversation_restart_stream_request` | [models.ConversationRestartStreamRequest](../models/conversationrestartstreamrequest.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/agenttools.md b/docs/models/agenttools.md new file mode 100644 index 00000000..15891f56 --- /dev/null +++ b/docs/models/agenttools.md @@ -0,0 +1,41 @@ +# AgentTools + + +## Supported Types + +### `models.CodeInterpreterTool` + +```python +value: models.CodeInterpreterTool = /* values here */ +``` + +### `models.DocumentLibraryTool` + +```python +value: models.DocumentLibraryTool = /* values here */ +``` + +### `models.FunctionTool` + +```python +value: models.FunctionTool = /* values here */ +``` + +### `models.ImageGenerationTool` + +```python +value: models.ImageGenerationTool = /* values here */ +``` + +### `models.WebSearchTool` + +```python +value: models.WebSearchTool = /* values here */ +``` + +### `models.WebSearchPremiumTool` + +```python +value: models.WebSearchPremiumTool = /* values here */ +``` + diff --git a/docs/models/agentupdaterequest.md b/docs/models/agentupdaterequest.md new file mode 100644 index 00000000..9da03d03 --- /dev/null +++ b/docs/models/agentupdaterequest.md @@ -0,0 +1,14 @@ +# AgentUpdateRequest + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | +| `instructions` | *OptionalNullable[str]* | :heavy_minus_sign: | Instruction prompt the model will follow during the conversation. | +| `tools` | List[[models.AgentUpdateRequestTools](../models/agentupdaterequesttools.md)] | :heavy_minus_sign: | List of tools which are available to the model during the conversation. | +| `completion_args` | [Optional[models.CompletionArgs]](../models/completionargs.md) | :heavy_minus_sign: | White-listed arguments from the completion API | +| `model` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `handoffs` | List[*str*] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/agentupdaterequesttools.md b/docs/models/agentupdaterequesttools.md new file mode 100644 index 00000000..1752ee68 --- /dev/null +++ b/docs/models/agentupdaterequesttools.md @@ -0,0 +1,41 @@ +# AgentUpdateRequestTools + + +## Supported Types + +### `models.CodeInterpreterTool` + +```python +value: models.CodeInterpreterTool = /* values here */ +``` + +### `models.DocumentLibraryTool` + +```python +value: models.DocumentLibraryTool = /* values here */ +``` + +### `models.FunctionTool` + +```python +value: models.FunctionTool = /* values here */ +``` + +### `models.ImageGenerationTool` + +```python +value: models.ImageGenerationTool = /* values here */ +``` + +### `models.WebSearchTool` + +```python +value: models.WebSearchTool = /* values here */ +``` + +### `models.WebSearchPremiumTool` + +```python +value: models.WebSearchPremiumTool = /* values here */ +``` + diff --git a/docs/models/builtinconnectors.md b/docs/models/builtinconnectors.md new file mode 100644 index 00000000..f96f5044 --- /dev/null +++ b/docs/models/builtinconnectors.md @@ -0,0 +1,12 @@ +# BuiltInConnectors + + +## Values + +| Name | Value | +| -------------------- | -------------------- | +| `WEB_SEARCH` | web_search | +| `WEB_SEARCH_PREMIUM` | web_search_premium | +| `CODE_INTERPRETER` | code_interpreter | +| `IMAGE_GENERATION` | image_generation | +| `DOCUMENT_LIBRARY` | document_library | \ No newline at end of file diff --git a/docs/models/codeinterpretertool.md b/docs/models/codeinterpretertool.md new file mode 100644 index 00000000..d5ad789e --- /dev/null +++ b/docs/models/codeinterpretertool.md @@ -0,0 +1,8 @@ +# CodeInterpreterTool + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | +| `type` | [Optional[models.CodeInterpreterToolType]](../models/codeinterpretertooltype.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/codeinterpretertooltype.md b/docs/models/codeinterpretertooltype.md new file mode 100644 index 00000000..f704b65e --- /dev/null +++ b/docs/models/codeinterpretertooltype.md @@ -0,0 +1,8 @@ +# CodeInterpreterToolType + + +## Values + +| Name | Value | +| ------------------ | ------------------ | +| `CODE_INTERPRETER` | code_interpreter | \ No newline at end of file diff --git a/docs/models/completionargs.md b/docs/models/completionargs.md new file mode 100644 index 00000000..5f07b673 --- /dev/null +++ b/docs/models/completionargs.md @@ -0,0 +1,19 @@ +# CompletionArgs + +White-listed arguments from the completion API + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | +| `stop` | [OptionalNullable[models.CompletionArgsStop]](../models/completionargsstop.md) | :heavy_minus_sign: | N/A | +| `presence_penalty` | *OptionalNullable[float]* | :heavy_minus_sign: | N/A | +| `frequency_penalty` | *OptionalNullable[float]* | :heavy_minus_sign: | N/A | +| `temperature` | *Optional[float]* | :heavy_minus_sign: | N/A | +| `top_p` | *OptionalNullable[float]* | :heavy_minus_sign: | N/A | +| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | +| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | +| `prediction` | [OptionalNullable[models.Prediction]](../models/prediction.md) | :heavy_minus_sign: | N/A | +| `response_format` | [OptionalNullable[models.ResponseFormat]](../models/responseformat.md) | :heavy_minus_sign: | N/A | +| `tool_choice` | [Optional[models.ToolChoiceEnum]](../models/toolchoiceenum.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/completionargsstop.md b/docs/models/completionargsstop.md new file mode 100644 index 00000000..b93f993e --- /dev/null +++ b/docs/models/completionargsstop.md @@ -0,0 +1,17 @@ +# CompletionArgsStop + + +## Supported Types + +### `str` + +```python +value: str = /* values here */ +``` + +### `List[str]` + +```python +value: List[str] = /* values here */ +``` + diff --git a/docs/models/completionjobout.md b/docs/models/completionjobout.md index 381aeb94..7f30f58c 100644 --- a/docs/models/completionjobout.md +++ b/docs/models/completionjobout.md @@ -14,7 +14,7 @@ | `training_files` | List[*str*] | :heavy_check_mark: | A list containing the IDs of uploaded files that contain training data. | | `hyperparameters` | [models.CompletionTrainingParameters](../models/completiontrainingparameters.md) | :heavy_check_mark: | N/A | | `validation_files` | List[*str*] | :heavy_minus_sign: | A list containing the IDs of uploaded files that contain validation data. | -| `object` | [Optional[models.Object]](../models/object.md) | :heavy_minus_sign: | The object type of the fine-tuning job. | +| `object` | [Optional[models.CompletionJobOutObject]](../models/completionjoboutobject.md) | :heavy_minus_sign: | The object type of the fine-tuning job. | | `fine_tuned_model` | *OptionalNullable[str]* | :heavy_minus_sign: | The name of the fine-tuned model that is being created. The value will be `null` if the fine-tuning job is still running. | | `suffix` | *OptionalNullable[str]* | :heavy_minus_sign: | Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`. | | `integrations` | List[[models.Integrations](../models/integrations.md)] | :heavy_minus_sign: | A list of integrations enabled for your fine-tuning job. | diff --git a/docs/models/completionjoboutobject.md b/docs/models/completionjoboutobject.md new file mode 100644 index 00000000..712b107d --- /dev/null +++ b/docs/models/completionjoboutobject.md @@ -0,0 +1,10 @@ +# CompletionJobOutObject + +The object type of the fine-tuning job. + + +## Values + +| Name | Value | +| ----- | ----- | +| `JOB` | job | \ No newline at end of file diff --git a/docs/models/conversationappendrequest.md b/docs/models/conversationappendrequest.md new file mode 100644 index 00000000..1cdb584b --- /dev/null +++ b/docs/models/conversationappendrequest.md @@ -0,0 +1,12 @@ +# ConversationAppendRequest + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------- | +| `inputs` | [models.ConversationInputs](../models/conversationinputs.md) | :heavy_check_mark: | N/A | +| `stream` | *Optional[bool]* | :heavy_minus_sign: | N/A | +| `store` | *Optional[bool]* | :heavy_minus_sign: | Whether to store the results into our servers or not. | +| `handoff_execution` | [Optional[models.ConversationAppendRequestHandoffExecution]](../models/conversationappendrequesthandoffexecution.md) | :heavy_minus_sign: | N/A | +| `completion_args` | [Optional[models.CompletionArgs]](../models/completionargs.md) | :heavy_minus_sign: | White-listed arguments from the completion API | \ No newline at end of file diff --git a/docs/models/conversationappendrequesthandoffexecution.md b/docs/models/conversationappendrequesthandoffexecution.md new file mode 100644 index 00000000..7418b36a --- /dev/null +++ b/docs/models/conversationappendrequesthandoffexecution.md @@ -0,0 +1,9 @@ +# ConversationAppendRequestHandoffExecution + + +## Values + +| Name | Value | +| -------- | -------- | +| `CLIENT` | client | +| `SERVER` | server | \ No newline at end of file diff --git a/docs/models/conversationappendstreamrequest.md b/docs/models/conversationappendstreamrequest.md new file mode 100644 index 00000000..a8516ea7 --- /dev/null +++ b/docs/models/conversationappendstreamrequest.md @@ -0,0 +1,12 @@ +# ConversationAppendStreamRequest + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------- | +| `inputs` | [models.ConversationInputs](../models/conversationinputs.md) | :heavy_check_mark: | N/A | +| `stream` | *Optional[bool]* | :heavy_minus_sign: | N/A | +| `store` | *Optional[bool]* | :heavy_minus_sign: | Whether to store the results into our servers or not. | +| `handoff_execution` | [Optional[models.ConversationAppendStreamRequestHandoffExecution]](../models/conversationappendstreamrequesthandoffexecution.md) | :heavy_minus_sign: | N/A | +| `completion_args` | [Optional[models.CompletionArgs]](../models/completionargs.md) | :heavy_minus_sign: | White-listed arguments from the completion API | \ No newline at end of file diff --git a/docs/models/conversationappendstreamrequesthandoffexecution.md b/docs/models/conversationappendstreamrequesthandoffexecution.md new file mode 100644 index 00000000..1bbced3e --- /dev/null +++ b/docs/models/conversationappendstreamrequesthandoffexecution.md @@ -0,0 +1,9 @@ +# ConversationAppendStreamRequestHandoffExecution + + +## Values + +| Name | Value | +| -------- | -------- | +| `CLIENT` | client | +| `SERVER` | server | \ No newline at end of file diff --git a/docs/models/conversationevents.md b/docs/models/conversationevents.md new file mode 100644 index 00000000..f1e2c4e9 --- /dev/null +++ b/docs/models/conversationevents.md @@ -0,0 +1,9 @@ +# ConversationEvents + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | +| `event` | [models.SSETypes](../models/ssetypes.md) | :heavy_check_mark: | Server side events sent when streaming a conversation response. | +| `data` | [models.ConversationEventsData](../models/conversationeventsdata.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/conversationeventsdata.md b/docs/models/conversationeventsdata.md new file mode 100644 index 00000000..81faf197 --- /dev/null +++ b/docs/models/conversationeventsdata.md @@ -0,0 +1,59 @@ +# ConversationEventsData + + +## Supported Types + +### `models.AgentHandoffDoneEvent` + +```python +value: models.AgentHandoffDoneEvent = /* values here */ +``` + +### `models.AgentHandoffStartedEvent` + +```python +value: models.AgentHandoffStartedEvent = /* values here */ +``` + +### `models.ResponseDoneEvent` + +```python +value: models.ResponseDoneEvent = /* values here */ +``` + +### `models.ResponseErrorEvent` + +```python +value: models.ResponseErrorEvent = /* values here */ +``` + +### `models.ResponseStartedEvent` + +```python +value: models.ResponseStartedEvent = /* values here */ +``` + +### `models.FunctionCallEvent` + +```python +value: models.FunctionCallEvent = /* values here */ +``` + +### `models.MessageOutputEvent` + +```python +value: models.MessageOutputEvent = /* values here */ +``` + +### `models.ToolExecutionDoneEvent` + +```python +value: models.ToolExecutionDoneEvent = /* values here */ +``` + +### `models.ToolExecutionStartedEvent` + +```python +value: models.ToolExecutionStartedEvent = /* values here */ +``` + diff --git a/docs/models/conversationhistory.md b/docs/models/conversationhistory.md new file mode 100644 index 00000000..8bcef1de --- /dev/null +++ b/docs/models/conversationhistory.md @@ -0,0 +1,12 @@ +# ConversationHistory + +Retrieve all entries in a conversation. + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------ | +| `conversation_id` | *str* | :heavy_check_mark: | N/A | +| `entries` | List[[models.Entries](../models/entries.md)] | :heavy_check_mark: | N/A | +| `object` | [Optional[models.ConversationHistoryObject]](../models/conversationhistoryobject.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/conversationhistoryobject.md b/docs/models/conversationhistoryobject.md new file mode 100644 index 00000000..a14e7f9c --- /dev/null +++ b/docs/models/conversationhistoryobject.md @@ -0,0 +1,8 @@ +# ConversationHistoryObject + + +## Values + +| Name | Value | +| ---------------------- | ---------------------- | +| `CONVERSATION_HISTORY` | conversation.history | \ No newline at end of file diff --git a/docs/models/conversationinputs.md b/docs/models/conversationinputs.md new file mode 100644 index 00000000..86db40ea --- /dev/null +++ b/docs/models/conversationinputs.md @@ -0,0 +1,17 @@ +# ConversationInputs + + +## Supported Types + +### `str` + +```python +value: str = /* values here */ +``` + +### `List[models.InputEntries]` + +```python +value: List[models.InputEntries] = /* values here */ +``` + diff --git a/docs/models/conversationmessages.md b/docs/models/conversationmessages.md new file mode 100644 index 00000000..f6a5569f --- /dev/null +++ b/docs/models/conversationmessages.md @@ -0,0 +1,12 @@ +# ConversationMessages + +Similar to the conversation history but only keep the messages + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------- | +| `conversation_id` | *str* | :heavy_check_mark: | N/A | +| `messages` | List[[models.MessageEntries](../models/messageentries.md)] | :heavy_check_mark: | N/A | +| `object` | [Optional[models.ConversationMessagesObject]](../models/conversationmessagesobject.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/conversationmessagesobject.md b/docs/models/conversationmessagesobject.md new file mode 100644 index 00000000..db3a441b --- /dev/null +++ b/docs/models/conversationmessagesobject.md @@ -0,0 +1,8 @@ +# ConversationMessagesObject + + +## Values + +| Name | Value | +| ----------------------- | ----------------------- | +| `CONVERSATION_MESSAGES` | conversation.messages | \ No newline at end of file diff --git a/docs/models/conversationrequest.md b/docs/models/conversationrequest.md new file mode 100644 index 00000000..141533e7 --- /dev/null +++ b/docs/models/conversationrequest.md @@ -0,0 +1,18 @@ +# ConversationRequest + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------------------------------------- | -------------------------------------------------------------------------- | -------------------------------------------------------------------------- | -------------------------------------------------------------------------- | +| `inputs` | [models.ConversationInputs](../models/conversationinputs.md) | :heavy_check_mark: | N/A | +| `stream` | *Optional[bool]* | :heavy_minus_sign: | N/A | +| `store` | *OptionalNullable[bool]* | :heavy_minus_sign: | N/A | +| `handoff_execution` | [OptionalNullable[models.HandoffExecution]](../models/handoffexecution.md) | :heavy_minus_sign: | N/A | +| `instructions` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `tools` | List[[models.Tools](../models/tools.md)] | :heavy_minus_sign: | N/A | +| `completion_args` | [OptionalNullable[models.CompletionArgs]](../models/completionargs.md) | :heavy_minus_sign: | N/A | +| `name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `agent_id` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `model` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/conversationresponse.md b/docs/models/conversationresponse.md new file mode 100644 index 00000000..3309a08b --- /dev/null +++ b/docs/models/conversationresponse.md @@ -0,0 +1,13 @@ +# ConversationResponse + +The response after appending new entries to the conversation. + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------- | +| `conversation_id` | *str* | :heavy_check_mark: | N/A | +| `outputs` | List[[models.Outputs](../models/outputs.md)] | :heavy_check_mark: | N/A | +| `usage` | [models.ConversationUsageInfo](../models/conversationusageinfo.md) | :heavy_check_mark: | N/A | +| `object` | [Optional[models.ConversationResponseObject]](../models/conversationresponseobject.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/conversationresponseobject.md b/docs/models/conversationresponseobject.md new file mode 100644 index 00000000..bea66e52 --- /dev/null +++ b/docs/models/conversationresponseobject.md @@ -0,0 +1,8 @@ +# ConversationResponseObject + + +## Values + +| Name | Value | +| ----------------------- | ----------------------- | +| `CONVERSATION_RESPONSE` | conversation.response | \ No newline at end of file diff --git a/docs/models/conversationrestartrequest.md b/docs/models/conversationrestartrequest.md new file mode 100644 index 00000000..15a6ead4 --- /dev/null +++ b/docs/models/conversationrestartrequest.md @@ -0,0 +1,15 @@ +# ConversationRestartRequest + +Request to restart a new conversation from a given entry in the conversation. + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------- | +| `inputs` | [models.ConversationInputs](../models/conversationinputs.md) | :heavy_check_mark: | N/A | +| `from_entry_id` | *str* | :heavy_check_mark: | N/A | +| `stream` | *Optional[bool]* | :heavy_minus_sign: | N/A | +| `store` | *Optional[bool]* | :heavy_minus_sign: | Whether to store the results into our servers or not. | +| `handoff_execution` | [Optional[models.ConversationRestartRequestHandoffExecution]](../models/conversationrestartrequesthandoffexecution.md) | :heavy_minus_sign: | N/A | +| `completion_args` | [Optional[models.CompletionArgs]](../models/completionargs.md) | :heavy_minus_sign: | White-listed arguments from the completion API | \ No newline at end of file diff --git a/docs/models/conversationrestartrequesthandoffexecution.md b/docs/models/conversationrestartrequesthandoffexecution.md new file mode 100644 index 00000000..5790624b --- /dev/null +++ b/docs/models/conversationrestartrequesthandoffexecution.md @@ -0,0 +1,9 @@ +# ConversationRestartRequestHandoffExecution + + +## Values + +| Name | Value | +| -------- | -------- | +| `CLIENT` | client | +| `SERVER` | server | \ No newline at end of file diff --git a/docs/models/conversationrestartstreamrequest.md b/docs/models/conversationrestartstreamrequest.md new file mode 100644 index 00000000..30f3767c --- /dev/null +++ b/docs/models/conversationrestartstreamrequest.md @@ -0,0 +1,15 @@ +# ConversationRestartStreamRequest + +Request to restart a new conversation from a given entry in the conversation. + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------- | +| `inputs` | [models.ConversationInputs](../models/conversationinputs.md) | :heavy_check_mark: | N/A | +| `from_entry_id` | *str* | :heavy_check_mark: | N/A | +| `stream` | *Optional[bool]* | :heavy_minus_sign: | N/A | +| `store` | *Optional[bool]* | :heavy_minus_sign: | Whether to store the results into our servers or not. | +| `handoff_execution` | [Optional[models.ConversationRestartStreamRequestHandoffExecution]](../models/conversationrestartstreamrequesthandoffexecution.md) | :heavy_minus_sign: | N/A | +| `completion_args` | [Optional[models.CompletionArgs]](../models/completionargs.md) | :heavy_minus_sign: | White-listed arguments from the completion API | \ No newline at end of file diff --git a/docs/models/conversationrestartstreamrequesthandoffexecution.md b/docs/models/conversationrestartstreamrequesthandoffexecution.md new file mode 100644 index 00000000..97266b43 --- /dev/null +++ b/docs/models/conversationrestartstreamrequesthandoffexecution.md @@ -0,0 +1,9 @@ +# ConversationRestartStreamRequestHandoffExecution + + +## Values + +| Name | Value | +| -------- | -------- | +| `CLIENT` | client | +| `SERVER` | server | \ No newline at end of file diff --git a/docs/models/conversationstreamrequest.md b/docs/models/conversationstreamrequest.md new file mode 100644 index 00000000..a571e2af --- /dev/null +++ b/docs/models/conversationstreamrequest.md @@ -0,0 +1,18 @@ +# ConversationStreamRequest + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------- | +| `inputs` | [models.ConversationInputs](../models/conversationinputs.md) | :heavy_check_mark: | N/A | +| `stream` | *Optional[bool]* | :heavy_minus_sign: | N/A | +| `store` | *OptionalNullable[bool]* | :heavy_minus_sign: | N/A | +| `handoff_execution` | [OptionalNullable[models.ConversationStreamRequestHandoffExecution]](../models/conversationstreamrequesthandoffexecution.md) | :heavy_minus_sign: | N/A | +| `instructions` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `tools` | List[[models.ConversationStreamRequestTools](../models/conversationstreamrequesttools.md)] | :heavy_minus_sign: | N/A | +| `completion_args` | [OptionalNullable[models.CompletionArgs]](../models/completionargs.md) | :heavy_minus_sign: | N/A | +| `name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `agent_id` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `model` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/conversationstreamrequesthandoffexecution.md b/docs/models/conversationstreamrequesthandoffexecution.md new file mode 100644 index 00000000..c98e194c --- /dev/null +++ b/docs/models/conversationstreamrequesthandoffexecution.md @@ -0,0 +1,9 @@ +# ConversationStreamRequestHandoffExecution + + +## Values + +| Name | Value | +| -------- | -------- | +| `CLIENT` | client | +| `SERVER` | server | \ No newline at end of file diff --git a/docs/models/conversationstreamrequesttools.md b/docs/models/conversationstreamrequesttools.md new file mode 100644 index 00000000..700c8448 --- /dev/null +++ b/docs/models/conversationstreamrequesttools.md @@ -0,0 +1,41 @@ +# ConversationStreamRequestTools + + +## Supported Types + +### `models.CodeInterpreterTool` + +```python +value: models.CodeInterpreterTool = /* values here */ +``` + +### `models.DocumentLibraryTool` + +```python +value: models.DocumentLibraryTool = /* values here */ +``` + +### `models.FunctionTool` + +```python +value: models.FunctionTool = /* values here */ +``` + +### `models.ImageGenerationTool` + +```python +value: models.ImageGenerationTool = /* values here */ +``` + +### `models.WebSearchTool` + +```python +value: models.WebSearchTool = /* values here */ +``` + +### `models.WebSearchPremiumTool` + +```python +value: models.WebSearchPremiumTool = /* values here */ +``` + diff --git a/docs/models/conversationusageinfo.md b/docs/models/conversationusageinfo.md new file mode 100644 index 00000000..57e26033 --- /dev/null +++ b/docs/models/conversationusageinfo.md @@ -0,0 +1,12 @@ +# ConversationUsageInfo + + +## Fields + +| Field | Type | Required | Description | +| ----------------------- | ----------------------- | ----------------------- | ----------------------- | +| `prompt_tokens` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `completion_tokens` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `total_tokens` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `connector_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | +| `connectors` | Dict[str, *int*] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/documentlibrarytool.md b/docs/models/documentlibrarytool.md new file mode 100644 index 00000000..bed4e2c5 --- /dev/null +++ b/docs/models/documentlibrarytool.md @@ -0,0 +1,9 @@ +# DocumentLibraryTool + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | +| `library_ids` | List[*str*] | :heavy_check_mark: | Ids of the library in which to search. | +| `type` | [Optional[models.DocumentLibraryToolType]](../models/documentlibrarytooltype.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/documentlibrarytooltype.md b/docs/models/documentlibrarytooltype.md new file mode 100644 index 00000000..ebd420f6 --- /dev/null +++ b/docs/models/documentlibrarytooltype.md @@ -0,0 +1,8 @@ +# DocumentLibraryToolType + + +## Values + +| Name | Value | +| ------------------ | ------------------ | +| `DOCUMENT_LIBRARY` | document_library | \ No newline at end of file diff --git a/docs/models/entries.md b/docs/models/entries.md new file mode 100644 index 00000000..8e5a20d0 --- /dev/null +++ b/docs/models/entries.md @@ -0,0 +1,41 @@ +# Entries + + +## Supported Types + +### `models.MessageInputEntry` + +```python +value: models.MessageInputEntry = /* values here */ +``` + +### `models.MessageOutputEntry` + +```python +value: models.MessageOutputEntry = /* values here */ +``` + +### `models.FunctionResultEntry` + +```python +value: models.FunctionResultEntry = /* values here */ +``` + +### `models.FunctionCallEntry` + +```python +value: models.FunctionCallEntry = /* values here */ +``` + +### `models.ToolExecutionEntry` + +```python +value: models.ToolExecutionEntry = /* values here */ +``` + +### `models.AgentHandoffEntry` + +```python +value: models.AgentHandoffEntry = /* values here */ +``` + diff --git a/docs/models/functioncallentry.md b/docs/models/functioncallentry.md new file mode 100644 index 00000000..55665bad --- /dev/null +++ b/docs/models/functioncallentry.md @@ -0,0 +1,15 @@ +# FunctionCallEntry + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | +| `tool_call_id` | *str* | :heavy_check_mark: | N/A | +| `name` | *str* | :heavy_check_mark: | N/A | +| `arguments` | [models.FunctionCallEntryArguments](../models/functioncallentryarguments.md) | :heavy_check_mark: | N/A | +| `object` | [Optional[models.FunctionCallEntryObject]](../models/functioncallentryobject.md) | :heavy_minus_sign: | N/A | +| `type` | [Optional[models.FunctionCallEntryType]](../models/functioncallentrytype.md) | :heavy_minus_sign: | N/A | +| `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | +| `completed_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | +| `id` | *Optional[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/functioncallentryarguments.md b/docs/models/functioncallentryarguments.md new file mode 100644 index 00000000..f1f6e39e --- /dev/null +++ b/docs/models/functioncallentryarguments.md @@ -0,0 +1,17 @@ +# FunctionCallEntryArguments + + +## Supported Types + +### `Dict[str, Any]` + +```python +value: Dict[str, Any] = /* values here */ +``` + +### `str` + +```python +value: str = /* values here */ +``` + diff --git a/docs/models/functioncallentryobject.md b/docs/models/functioncallentryobject.md new file mode 100644 index 00000000..3cf2e427 --- /dev/null +++ b/docs/models/functioncallentryobject.md @@ -0,0 +1,8 @@ +# FunctionCallEntryObject + + +## Values + +| Name | Value | +| ------- | ------- | +| `ENTRY` | entry | \ No newline at end of file diff --git a/docs/models/functioncallentrytype.md b/docs/models/functioncallentrytype.md new file mode 100644 index 00000000..7ea34c52 --- /dev/null +++ b/docs/models/functioncallentrytype.md @@ -0,0 +1,8 @@ +# FunctionCallEntryType + + +## Values + +| Name | Value | +| --------------- | --------------- | +| `FUNCTION_CALL` | function.call | \ No newline at end of file diff --git a/docs/models/functioncallevent.md b/docs/models/functioncallevent.md new file mode 100644 index 00000000..a5162090 --- /dev/null +++ b/docs/models/functioncallevent.md @@ -0,0 +1,14 @@ +# FunctionCallEvent + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | +| `id` | *str* | :heavy_check_mark: | N/A | +| `name` | *str* | :heavy_check_mark: | N/A | +| `tool_call_id` | *str* | :heavy_check_mark: | N/A | +| `arguments` | *str* | :heavy_check_mark: | N/A | +| `type` | [Optional[models.FunctionCallEventType]](../models/functioncalleventtype.md) | :heavy_minus_sign: | N/A | +| `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | +| `output_index` | *Optional[int]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/functioncalleventtype.md b/docs/models/functioncalleventtype.md new file mode 100644 index 00000000..8cf3f038 --- /dev/null +++ b/docs/models/functioncalleventtype.md @@ -0,0 +1,8 @@ +# FunctionCallEventType + + +## Values + +| Name | Value | +| --------------------- | --------------------- | +| `FUNCTION_CALL_DELTA` | function.call.delta | \ No newline at end of file diff --git a/docs/models/functionresultentry.md b/docs/models/functionresultentry.md new file mode 100644 index 00000000..5cdcf3eb --- /dev/null +++ b/docs/models/functionresultentry.md @@ -0,0 +1,14 @@ +# FunctionResultEntry + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------ | +| `tool_call_id` | *str* | :heavy_check_mark: | N/A | +| `result` | *str* | :heavy_check_mark: | N/A | +| `object` | [Optional[models.FunctionResultEntryObject]](../models/functionresultentryobject.md) | :heavy_minus_sign: | N/A | +| `type` | [Optional[models.FunctionResultEntryType]](../models/functionresultentrytype.md) | :heavy_minus_sign: | N/A | +| `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | +| `completed_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | +| `id` | *Optional[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/functionresultentryobject.md b/docs/models/functionresultentryobject.md new file mode 100644 index 00000000..fe52e0a5 --- /dev/null +++ b/docs/models/functionresultentryobject.md @@ -0,0 +1,8 @@ +# FunctionResultEntryObject + + +## Values + +| Name | Value | +| ------- | ------- | +| `ENTRY` | entry | \ No newline at end of file diff --git a/docs/models/functionresultentrytype.md b/docs/models/functionresultentrytype.md new file mode 100644 index 00000000..35c94d8e --- /dev/null +++ b/docs/models/functionresultentrytype.md @@ -0,0 +1,8 @@ +# FunctionResultEntryType + + +## Values + +| Name | Value | +| ----------------- | ----------------- | +| `FUNCTION_RESULT` | function.result | \ No newline at end of file diff --git a/docs/models/functiontool.md b/docs/models/functiontool.md new file mode 100644 index 00000000..1332febe --- /dev/null +++ b/docs/models/functiontool.md @@ -0,0 +1,9 @@ +# FunctionTool + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------------------------------------ | ------------------------------------------------------------------ | ------------------------------------------------------------------ | ------------------------------------------------------------------ | +| `function` | [models.Function](../models/function.md) | :heavy_check_mark: | N/A | +| `type` | [Optional[models.FunctionToolType]](../models/functiontooltype.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/functiontooltype.md b/docs/models/functiontooltype.md new file mode 100644 index 00000000..9c095625 --- /dev/null +++ b/docs/models/functiontooltype.md @@ -0,0 +1,8 @@ +# FunctionToolType + + +## Values + +| Name | Value | +| ---------- | ---------- | +| `FUNCTION` | function | \ No newline at end of file diff --git a/docs/models/handoffexecution.md b/docs/models/handoffexecution.md new file mode 100644 index 00000000..61e7dade --- /dev/null +++ b/docs/models/handoffexecution.md @@ -0,0 +1,9 @@ +# HandoffExecution + + +## Values + +| Name | Value | +| -------- | -------- | +| `CLIENT` | client | +| `SERVER` | server | \ No newline at end of file diff --git a/docs/models/imagegenerationtool.md b/docs/models/imagegenerationtool.md new file mode 100644 index 00000000..b8fc9cf4 --- /dev/null +++ b/docs/models/imagegenerationtool.md @@ -0,0 +1,8 @@ +# ImageGenerationTool + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | +| `type` | [Optional[models.ImageGenerationToolType]](../models/imagegenerationtooltype.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/imagegenerationtooltype.md b/docs/models/imagegenerationtooltype.md new file mode 100644 index 00000000..29681b58 --- /dev/null +++ b/docs/models/imagegenerationtooltype.md @@ -0,0 +1,8 @@ +# ImageGenerationToolType + + +## Values + +| Name | Value | +| ------------------ | ------------------ | +| `IMAGE_GENERATION` | image_generation | \ No newline at end of file diff --git a/docs/models/inputentries.md b/docs/models/inputentries.md new file mode 100644 index 00000000..e1e48279 --- /dev/null +++ b/docs/models/inputentries.md @@ -0,0 +1,17 @@ +# InputEntries + + +## Supported Types + +### `models.MessageInputEntry` + +```python +value: models.MessageInputEntry = /* values here */ +``` + +### `models.FunctionResultEntry` + +```python +value: models.FunctionResultEntry = /* values here */ +``` + diff --git a/docs/models/messageentries.md b/docs/models/messageentries.md new file mode 100644 index 00000000..76256fb9 --- /dev/null +++ b/docs/models/messageentries.md @@ -0,0 +1,17 @@ +# MessageEntries + + +## Supported Types + +### `models.MessageInputEntry` + +```python +value: models.MessageInputEntry = /* values here */ +``` + +### `models.MessageOutputEntry` + +```python +value: models.MessageOutputEntry = /* values here */ +``` + diff --git a/docs/models/messageinputcontentchunks.md b/docs/models/messageinputcontentchunks.md new file mode 100644 index 00000000..50795f0e --- /dev/null +++ b/docs/models/messageinputcontentchunks.md @@ -0,0 +1,29 @@ +# MessageInputContentChunks + + +## Supported Types + +### `models.TextChunk` + +```python +value: models.TextChunk = /* values here */ +``` + +### `models.ImageURLChunk` + +```python +value: models.ImageURLChunk = /* values here */ +``` + +### `models.ToolFileChunk` + +```python +value: models.ToolFileChunk = /* values here */ +``` + +### `models.DocumentURLChunk` + +```python +value: models.DocumentURLChunk = /* values here */ +``` + diff --git a/docs/models/messageinputentry.md b/docs/models/messageinputentry.md new file mode 100644 index 00000000..a1573ed5 --- /dev/null +++ b/docs/models/messageinputentry.md @@ -0,0 +1,16 @@ +# MessageInputEntry + +Representation of an input message inside the conversation. + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | +| `role` | [models.MessageInputEntryRole](../models/messageinputentryrole.md) | :heavy_check_mark: | N/A | +| `content` | [models.MessageInputEntryContent](../models/messageinputentrycontent.md) | :heavy_check_mark: | N/A | +| `object` | [Optional[models.Object]](../models/object.md) | :heavy_minus_sign: | N/A | +| `type` | [Optional[models.MessageInputEntryType]](../models/messageinputentrytype.md) | :heavy_minus_sign: | N/A | +| `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | +| `completed_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | +| `id` | *Optional[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/messageinputentrycontent.md b/docs/models/messageinputentrycontent.md new file mode 100644 index 00000000..65e55d97 --- /dev/null +++ b/docs/models/messageinputentrycontent.md @@ -0,0 +1,17 @@ +# MessageInputEntryContent + + +## Supported Types + +### `str` + +```python +value: str = /* values here */ +``` + +### `List[models.MessageInputContentChunks]` + +```python +value: List[models.MessageInputContentChunks] = /* values here */ +``` + diff --git a/docs/models/messageinputentryrole.md b/docs/models/messageinputentryrole.md new file mode 100644 index 00000000..f2fdc71d --- /dev/null +++ b/docs/models/messageinputentryrole.md @@ -0,0 +1,9 @@ +# MessageInputEntryRole + + +## Values + +| Name | Value | +| ----------- | ----------- | +| `ASSISTANT` | assistant | +| `USER` | user | \ No newline at end of file diff --git a/docs/models/messageinputentrytype.md b/docs/models/messageinputentrytype.md new file mode 100644 index 00000000..d3378124 --- /dev/null +++ b/docs/models/messageinputentrytype.md @@ -0,0 +1,8 @@ +# MessageInputEntryType + + +## Values + +| Name | Value | +| --------------- | --------------- | +| `MESSAGE_INPUT` | message.input | \ No newline at end of file diff --git a/docs/models/messageoutputcontentchunks.md b/docs/models/messageoutputcontentchunks.md new file mode 100644 index 00000000..5dc74a89 --- /dev/null +++ b/docs/models/messageoutputcontentchunks.md @@ -0,0 +1,35 @@ +# MessageOutputContentChunks + + +## Supported Types + +### `models.TextChunk` + +```python +value: models.TextChunk = /* values here */ +``` + +### `models.ImageURLChunk` + +```python +value: models.ImageURLChunk = /* values here */ +``` + +### `models.ToolFileChunk` + +```python +value: models.ToolFileChunk = /* values here */ +``` + +### `models.DocumentURLChunk` + +```python +value: models.DocumentURLChunk = /* values here */ +``` + +### `models.ToolReferenceChunk` + +```python +value: models.ToolReferenceChunk = /* values here */ +``` + diff --git a/docs/models/messageoutputentry.md b/docs/models/messageoutputentry.md new file mode 100644 index 00000000..224d043d --- /dev/null +++ b/docs/models/messageoutputentry.md @@ -0,0 +1,16 @@ +# MessageOutputEntry + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------- | +| `content` | [models.MessageOutputEntryContent](../models/messageoutputentrycontent.md) | :heavy_check_mark: | N/A | +| `object` | [Optional[models.MessageOutputEntryObject]](../models/messageoutputentryobject.md) | :heavy_minus_sign: | N/A | +| `type` | [Optional[models.MessageOutputEntryType]](../models/messageoutputentrytype.md) | :heavy_minus_sign: | N/A | +| `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | +| `completed_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | +| `id` | *Optional[str]* | :heavy_minus_sign: | N/A | +| `agent_id` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `model` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `role` | [Optional[models.MessageOutputEntryRole]](../models/messageoutputentryrole.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/messageoutputentrycontent.md b/docs/models/messageoutputentrycontent.md new file mode 100644 index 00000000..5206e4eb --- /dev/null +++ b/docs/models/messageoutputentrycontent.md @@ -0,0 +1,17 @@ +# MessageOutputEntryContent + + +## Supported Types + +### `str` + +```python +value: str = /* values here */ +``` + +### `List[models.MessageOutputContentChunks]` + +```python +value: List[models.MessageOutputContentChunks] = /* values here */ +``` + diff --git a/docs/models/messageoutputentryobject.md b/docs/models/messageoutputentryobject.md new file mode 100644 index 00000000..bb254c82 --- /dev/null +++ b/docs/models/messageoutputentryobject.md @@ -0,0 +1,8 @@ +# MessageOutputEntryObject + + +## Values + +| Name | Value | +| ------- | ------- | +| `ENTRY` | entry | \ No newline at end of file diff --git a/docs/models/messageoutputentryrole.md b/docs/models/messageoutputentryrole.md new file mode 100644 index 00000000..783ee0aa --- /dev/null +++ b/docs/models/messageoutputentryrole.md @@ -0,0 +1,8 @@ +# MessageOutputEntryRole + + +## Values + +| Name | Value | +| ----------- | ----------- | +| `ASSISTANT` | assistant | \ No newline at end of file diff --git a/docs/models/messageoutputentrytype.md b/docs/models/messageoutputentrytype.md new file mode 100644 index 00000000..cb4a7a1b --- /dev/null +++ b/docs/models/messageoutputentrytype.md @@ -0,0 +1,8 @@ +# MessageOutputEntryType + + +## Values + +| Name | Value | +| ---------------- | ---------------- | +| `MESSAGE_OUTPUT` | message.output | \ No newline at end of file diff --git a/docs/models/messageoutputevent.md b/docs/models/messageoutputevent.md new file mode 100644 index 00000000..3fe8ac49 --- /dev/null +++ b/docs/models/messageoutputevent.md @@ -0,0 +1,16 @@ +# MessageOutputEvent + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | +| `id` | *str* | :heavy_check_mark: | N/A | +| `content` | [models.MessageOutputEventContent](../models/messageoutputeventcontent.md) | :heavy_check_mark: | N/A | +| `type` | [Optional[models.MessageOutputEventType]](../models/messageoutputeventtype.md) | :heavy_minus_sign: | N/A | +| `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | +| `output_index` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `content_index` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `model` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `agent_id` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `role` | [Optional[models.MessageOutputEventRole]](../models/messageoutputeventrole.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/messageoutputeventcontent.md b/docs/models/messageoutputeventcontent.md new file mode 100644 index 00000000..16d8d52f --- /dev/null +++ b/docs/models/messageoutputeventcontent.md @@ -0,0 +1,17 @@ +# MessageOutputEventContent + + +## Supported Types + +### `str` + +```python +value: str = /* values here */ +``` + +### `models.OutputContentChunks` + +```python +value: models.OutputContentChunks = /* values here */ +``` + diff --git a/docs/models/messageoutputeventrole.md b/docs/models/messageoutputeventrole.md new file mode 100644 index 00000000..e38c6472 --- /dev/null +++ b/docs/models/messageoutputeventrole.md @@ -0,0 +1,8 @@ +# MessageOutputEventRole + + +## Values + +| Name | Value | +| ----------- | ----------- | +| `ASSISTANT` | assistant | \ No newline at end of file diff --git a/docs/models/messageoutputeventtype.md b/docs/models/messageoutputeventtype.md new file mode 100644 index 00000000..1f43fdcc --- /dev/null +++ b/docs/models/messageoutputeventtype.md @@ -0,0 +1,8 @@ +# MessageOutputEventType + + +## Values + +| Name | Value | +| ---------------------- | ---------------------- | +| `MESSAGE_OUTPUT_DELTA` | message.output.delta | \ No newline at end of file diff --git a/docs/models/modelconversation.md b/docs/models/modelconversation.md new file mode 100644 index 00000000..ffedcc0f --- /dev/null +++ b/docs/models/modelconversation.md @@ -0,0 +1,17 @@ +# ModelConversation + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | +| `id` | *str* | :heavy_check_mark: | N/A | +| `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_check_mark: | N/A | +| `updated_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_check_mark: | N/A | +| `model` | *str* | :heavy_check_mark: | N/A | +| `instructions` | *OptionalNullable[str]* | :heavy_minus_sign: | Instruction prompt the model will follow during the conversation. | +| `tools` | List[[models.ModelConversationTools](../models/modelconversationtools.md)] | :heavy_minus_sign: | List of tools which are available to the model during the conversation. | +| `completion_args` | [Optional[models.CompletionArgs]](../models/completionargs.md) | :heavy_minus_sign: | White-listed arguments from the completion API | +| `name` | *OptionalNullable[str]* | :heavy_minus_sign: | Name given to the conversation. | +| `description` | *OptionalNullable[str]* | :heavy_minus_sign: | Description of the what the conversation is about. | +| `object` | [Optional[models.ModelConversationObject]](../models/modelconversationobject.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/modelconversationobject.md b/docs/models/modelconversationobject.md new file mode 100644 index 00000000..ead1fa26 --- /dev/null +++ b/docs/models/modelconversationobject.md @@ -0,0 +1,8 @@ +# ModelConversationObject + + +## Values + +| Name | Value | +| -------------- | -------------- | +| `CONVERSATION` | conversation | \ No newline at end of file diff --git a/docs/models/modelconversationtools.md b/docs/models/modelconversationtools.md new file mode 100644 index 00000000..5cc97437 --- /dev/null +++ b/docs/models/modelconversationtools.md @@ -0,0 +1,41 @@ +# ModelConversationTools + + +## Supported Types + +### `models.CodeInterpreterTool` + +```python +value: models.CodeInterpreterTool = /* values here */ +``` + +### `models.DocumentLibraryTool` + +```python +value: models.DocumentLibraryTool = /* values here */ +``` + +### `models.FunctionTool` + +```python +value: models.FunctionTool = /* values here */ +``` + +### `models.ImageGenerationTool` + +```python +value: models.ImageGenerationTool = /* values here */ +``` + +### `models.WebSearchTool` + +```python +value: models.WebSearchTool = /* values here */ +``` + +### `models.WebSearchPremiumTool` + +```python +value: models.WebSearchPremiumTool = /* values here */ +``` + diff --git a/docs/models/object.md b/docs/models/object.md index ab4c4588..0122c0db 100644 --- a/docs/models/object.md +++ b/docs/models/object.md @@ -1,10 +1,8 @@ # Object -The object type of the fine-tuning job. - ## Values -| Name | Value | -| ----- | ----- | -| `JOB` | job | \ No newline at end of file +| Name | Value | +| ------- | ------- | +| `ENTRY` | entry | \ No newline at end of file diff --git a/docs/models/outputcontentchunks.md b/docs/models/outputcontentchunks.md new file mode 100644 index 00000000..2da475f7 --- /dev/null +++ b/docs/models/outputcontentchunks.md @@ -0,0 +1,35 @@ +# OutputContentChunks + + +## Supported Types + +### `models.TextChunk` + +```python +value: models.TextChunk = /* values here */ +``` + +### `models.ImageURLChunk` + +```python +value: models.ImageURLChunk = /* values here */ +``` + +### `models.ToolFileChunk` + +```python +value: models.ToolFileChunk = /* values here */ +``` + +### `models.DocumentURLChunk` + +```python +value: models.DocumentURLChunk = /* values here */ +``` + +### `models.ToolReferenceChunk` + +```python +value: models.ToolReferenceChunk = /* values here */ +``` + diff --git a/docs/models/outputs.md b/docs/models/outputs.md new file mode 100644 index 00000000..7756c627 --- /dev/null +++ b/docs/models/outputs.md @@ -0,0 +1,29 @@ +# Outputs + + +## Supported Types + +### `models.MessageOutputEntry` + +```python +value: models.MessageOutputEntry = /* values here */ +``` + +### `models.ToolExecutionEntry` + +```python +value: models.ToolExecutionEntry = /* values here */ +``` + +### `models.FunctionCallEntry` + +```python +value: models.FunctionCallEntry = /* values here */ +``` + +### `models.AgentHandoffEntry` + +```python +value: models.AgentHandoffEntry = /* values here */ +``` + diff --git a/docs/models/responsebody.md b/docs/models/responsebody.md new file mode 100644 index 00000000..8a218517 --- /dev/null +++ b/docs/models/responsebody.md @@ -0,0 +1,17 @@ +# ResponseBody + + +## Supported Types + +### `models.ModelConversation` + +```python +value: models.ModelConversation = /* values here */ +``` + +### `models.AgentConversation` + +```python +value: models.AgentConversation = /* values here */ +``` + diff --git a/docs/models/responsedoneevent.md b/docs/models/responsedoneevent.md new file mode 100644 index 00000000..b33fa52c --- /dev/null +++ b/docs/models/responsedoneevent.md @@ -0,0 +1,10 @@ +# ResponseDoneEvent + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | +| `usage` | [models.ConversationUsageInfo](../models/conversationusageinfo.md) | :heavy_check_mark: | N/A | +| `type` | [Optional[models.ResponseDoneEventType]](../models/responsedoneeventtype.md) | :heavy_minus_sign: | N/A | +| `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/responsedoneeventtype.md b/docs/models/responsedoneeventtype.md new file mode 100644 index 00000000..58f7f44d --- /dev/null +++ b/docs/models/responsedoneeventtype.md @@ -0,0 +1,8 @@ +# ResponseDoneEventType + + +## Values + +| Name | Value | +| ---------------------------- | ---------------------------- | +| `CONVERSATION_RESPONSE_DONE` | conversation.response.done | \ No newline at end of file diff --git a/docs/models/responseerrorevent.md b/docs/models/responseerrorevent.md new file mode 100644 index 00000000..e730b7c4 --- /dev/null +++ b/docs/models/responseerrorevent.md @@ -0,0 +1,11 @@ +# ResponseErrorEvent + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | +| `message` | *str* | :heavy_check_mark: | N/A | +| `code` | *int* | :heavy_check_mark: | N/A | +| `type` | [Optional[models.ResponseErrorEventType]](../models/responseerroreventtype.md) | :heavy_minus_sign: | N/A | +| `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/responseerroreventtype.md b/docs/models/responseerroreventtype.md new file mode 100644 index 00000000..3b3fc303 --- /dev/null +++ b/docs/models/responseerroreventtype.md @@ -0,0 +1,8 @@ +# ResponseErrorEventType + + +## Values + +| Name | Value | +| ----------------------------- | ----------------------------- | +| `CONVERSATION_RESPONSE_ERROR` | conversation.response.error | \ No newline at end of file diff --git a/docs/models/responsestartedevent.md b/docs/models/responsestartedevent.md new file mode 100644 index 00000000..7bd02b3e --- /dev/null +++ b/docs/models/responsestartedevent.md @@ -0,0 +1,10 @@ +# ResponseStartedEvent + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------- | +| `conversation_id` | *str* | :heavy_check_mark: | N/A | +| `type` | [Optional[models.ResponseStartedEventType]](../models/responsestartedeventtype.md) | :heavy_minus_sign: | N/A | +| `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/responsestartedeventtype.md b/docs/models/responsestartedeventtype.md new file mode 100644 index 00000000..2d9273bd --- /dev/null +++ b/docs/models/responsestartedeventtype.md @@ -0,0 +1,8 @@ +# ResponseStartedEventType + + +## Values + +| Name | Value | +| ------------------------------- | ------------------------------- | +| `CONVERSATION_RESPONSE_STARTED` | conversation.response.started | \ No newline at end of file diff --git a/docs/models/ssetypes.md b/docs/models/ssetypes.md new file mode 100644 index 00000000..08d0f662 --- /dev/null +++ b/docs/models/ssetypes.md @@ -0,0 +1,18 @@ +# SSETypes + +Server side events sent when streaming a conversation response. + + +## Values + +| Name | Value | +| ------------------------------- | ------------------------------- | +| `CONVERSATION_RESPONSE_STARTED` | conversation.response.started | +| `CONVERSATION_RESPONSE_DONE` | conversation.response.done | +| `CONVERSATION_RESPONSE_ERROR` | conversation.response.error | +| `MESSAGE_OUTPUT_DELTA` | message.output.delta | +| `TOOL_EXECUTION_STARTED` | tool.execution.started | +| `TOOL_EXECUTION_DONE` | tool.execution.done | +| `AGENT_HANDOFF_STARTED` | agent.handoff.started | +| `AGENT_HANDOFF_DONE` | agent.handoff.done | +| `FUNCTION_CALL_DELTA` | function.call.delta | \ No newline at end of file diff --git a/docs/models/toolexecutiondoneevent.md b/docs/models/toolexecutiondoneevent.md new file mode 100644 index 00000000..d6d28ce2 --- /dev/null +++ b/docs/models/toolexecutiondoneevent.md @@ -0,0 +1,13 @@ +# ToolExecutionDoneEvent + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------- | +| `id` | *str* | :heavy_check_mark: | N/A | +| `name` | [models.BuiltInConnectors](../models/builtinconnectors.md) | :heavy_check_mark: | N/A | +| `type` | [Optional[models.ToolExecutionDoneEventType]](../models/toolexecutiondoneeventtype.md) | :heavy_minus_sign: | N/A | +| `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | +| `output_index` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `info` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/toolexecutiondoneeventtype.md b/docs/models/toolexecutiondoneeventtype.md new file mode 100644 index 00000000..872624c1 --- /dev/null +++ b/docs/models/toolexecutiondoneeventtype.md @@ -0,0 +1,8 @@ +# ToolExecutionDoneEventType + + +## Values + +| Name | Value | +| --------------------- | --------------------- | +| `TOOL_EXECUTION_DONE` | tool.execution.done | \ No newline at end of file diff --git a/docs/models/toolexecutionentry.md b/docs/models/toolexecutionentry.md new file mode 100644 index 00000000..8422a8fd --- /dev/null +++ b/docs/models/toolexecutionentry.md @@ -0,0 +1,14 @@ +# ToolExecutionEntry + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------- | +| `name` | [models.BuiltInConnectors](../models/builtinconnectors.md) | :heavy_check_mark: | N/A | +| `object` | [Optional[models.ToolExecutionEntryObject]](../models/toolexecutionentryobject.md) | :heavy_minus_sign: | N/A | +| `type` | [Optional[models.ToolExecutionEntryType]](../models/toolexecutionentrytype.md) | :heavy_minus_sign: | N/A | +| `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | +| `completed_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | +| `id` | *Optional[str]* | :heavy_minus_sign: | N/A | +| `info` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/toolexecutionentryobject.md b/docs/models/toolexecutionentryobject.md new file mode 100644 index 00000000..0ca79af5 --- /dev/null +++ b/docs/models/toolexecutionentryobject.md @@ -0,0 +1,8 @@ +# ToolExecutionEntryObject + + +## Values + +| Name | Value | +| ------- | ------- | +| `ENTRY` | entry | \ No newline at end of file diff --git a/docs/models/toolexecutionentrytype.md b/docs/models/toolexecutionentrytype.md new file mode 100644 index 00000000..a67629b8 --- /dev/null +++ b/docs/models/toolexecutionentrytype.md @@ -0,0 +1,8 @@ +# ToolExecutionEntryType + + +## Values + +| Name | Value | +| ---------------- | ---------------- | +| `TOOL_EXECUTION` | tool.execution | \ No newline at end of file diff --git a/docs/models/toolexecutionstartedevent.md b/docs/models/toolexecutionstartedevent.md new file mode 100644 index 00000000..4b03f94c --- /dev/null +++ b/docs/models/toolexecutionstartedevent.md @@ -0,0 +1,12 @@ +# ToolExecutionStartedEvent + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------- | +| `id` | *str* | :heavy_check_mark: | N/A | +| `name` | [models.BuiltInConnectors](../models/builtinconnectors.md) | :heavy_check_mark: | N/A | +| `type` | [Optional[models.ToolExecutionStartedEventType]](../models/toolexecutionstartedeventtype.md) | :heavy_minus_sign: | N/A | +| `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | +| `output_index` | *Optional[int]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/toolexecutionstartedeventtype.md b/docs/models/toolexecutionstartedeventtype.md new file mode 100644 index 00000000..56695d1f --- /dev/null +++ b/docs/models/toolexecutionstartedeventtype.md @@ -0,0 +1,8 @@ +# ToolExecutionStartedEventType + + +## Values + +| Name | Value | +| ------------------------ | ------------------------ | +| `TOOL_EXECUTION_STARTED` | tool.execution.started | \ No newline at end of file diff --git a/docs/models/toolfilechunk.md b/docs/models/toolfilechunk.md new file mode 100644 index 00000000..236d2f41 --- /dev/null +++ b/docs/models/toolfilechunk.md @@ -0,0 +1,12 @@ +# ToolFileChunk + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | +| `tool` | [models.BuiltInConnectors](../models/builtinconnectors.md) | :heavy_check_mark: | N/A | +| `file_id` | *str* | :heavy_check_mark: | N/A | +| `type` | [Optional[models.ToolFileChunkType]](../models/toolfilechunktype.md) | :heavy_minus_sign: | N/A | +| `file_name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `file_type` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/toolfilechunktype.md b/docs/models/toolfilechunktype.md new file mode 100644 index 00000000..7e99acef --- /dev/null +++ b/docs/models/toolfilechunktype.md @@ -0,0 +1,8 @@ +# ToolFileChunkType + + +## Values + +| Name | Value | +| ----------- | ----------- | +| `TOOL_FILE` | tool_file | \ No newline at end of file diff --git a/docs/models/toolreferencechunk.md b/docs/models/toolreferencechunk.md new file mode 100644 index 00000000..fb4b46a6 --- /dev/null +++ b/docs/models/toolreferencechunk.md @@ -0,0 +1,12 @@ +# ToolReferenceChunk + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | +| `tool` | [models.BuiltInConnectors](../models/builtinconnectors.md) | :heavy_check_mark: | N/A | +| `title` | *str* | :heavy_check_mark: | N/A | +| `type` | [Optional[models.ToolReferenceChunkType]](../models/toolreferencechunktype.md) | :heavy_minus_sign: | N/A | +| `url` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `source` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/toolreferencechunktype.md b/docs/models/toolreferencechunktype.md new file mode 100644 index 00000000..bc57d277 --- /dev/null +++ b/docs/models/toolreferencechunktype.md @@ -0,0 +1,8 @@ +# ToolReferenceChunkType + + +## Values + +| Name | Value | +| ---------------- | ---------------- | +| `TOOL_REFERENCE` | tool_reference | \ No newline at end of file diff --git a/docs/models/tools.md b/docs/models/tools.md new file mode 100644 index 00000000..f308d732 --- /dev/null +++ b/docs/models/tools.md @@ -0,0 +1,41 @@ +# Tools + + +## Supported Types + +### `models.CodeInterpreterTool` + +```python +value: models.CodeInterpreterTool = /* values here */ +``` + +### `models.DocumentLibraryTool` + +```python +value: models.DocumentLibraryTool = /* values here */ +``` + +### `models.FunctionTool` + +```python +value: models.FunctionTool = /* values here */ +``` + +### `models.ImageGenerationTool` + +```python +value: models.ImageGenerationTool = /* values here */ +``` + +### `models.WebSearchTool` + +```python +value: models.WebSearchTool = /* values here */ +``` + +### `models.WebSearchPremiumTool` + +```python +value: models.WebSearchPremiumTool = /* values here */ +``` + diff --git a/docs/models/websearchpremiumtool.md b/docs/models/websearchpremiumtool.md new file mode 100644 index 00000000..941fc2b8 --- /dev/null +++ b/docs/models/websearchpremiumtool.md @@ -0,0 +1,8 @@ +# WebSearchPremiumTool + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------- | +| `type` | [Optional[models.WebSearchPremiumToolType]](../models/websearchpremiumtooltype.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/websearchpremiumtooltype.md b/docs/models/websearchpremiumtooltype.md new file mode 100644 index 00000000..348bfe85 --- /dev/null +++ b/docs/models/websearchpremiumtooltype.md @@ -0,0 +1,8 @@ +# WebSearchPremiumToolType + + +## Values + +| Name | Value | +| -------------------- | -------------------- | +| `WEB_SEARCH_PREMIUM` | web_search_premium | \ No newline at end of file diff --git a/docs/models/websearchtool.md b/docs/models/websearchtool.md new file mode 100644 index 00000000..c8d708bd --- /dev/null +++ b/docs/models/websearchtool.md @@ -0,0 +1,8 @@ +# WebSearchTool + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | +| `type` | [Optional[models.WebSearchToolType]](../models/websearchtooltype.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/websearchtooltype.md b/docs/models/websearchtooltype.md new file mode 100644 index 00000000..57b6acbb --- /dev/null +++ b/docs/models/websearchtooltype.md @@ -0,0 +1,8 @@ +# WebSearchToolType + + +## Values + +| Name | Value | +| ------------ | ------------ | +| `WEB_SEARCH` | web_search | \ No newline at end of file diff --git a/docs/sdks/beta/README.md b/docs/sdks/beta/README.md new file mode 100644 index 00000000..f5b5f822 --- /dev/null +++ b/docs/sdks/beta/README.md @@ -0,0 +1,6 @@ +# Beta +(*beta*) + +## Overview + +### Available Operations diff --git a/docs/sdks/conversations/README.md b/docs/sdks/conversations/README.md new file mode 100644 index 00000000..b5c12b24 --- /dev/null +++ b/docs/sdks/conversations/README.md @@ -0,0 +1,466 @@ +# Conversations +(*beta.conversations*) + +## Overview + +### Available Operations + +* [start](#start) - Create a conversation and append entries to it. +* [list](#list) - List all created conversations. +* [get](#get) - Retrieve a conversation information. +* [append](#append) - Append new entries to an existing conversation. +* [get_history](#get_history) - Retrieve all entries in a conversation. +* [get_messages](#get_messages) - Retrieve all messages in a conversation. +* [restart](#restart) - Restart a conversation starting from a given entry. +* [start_stream](#start_stream) - Create a conversation and append entries to it. +* [append_stream](#append_stream) - Append new entries to an existing conversation. +* [restart_stream](#restart_stream) - Restart a conversation starting from a given entry. + +## start + +Create a new conversation, using a base model or an agent and append entries. Completion and tool executions are run and the response is appended to the conversation.Use the returned conversation_id to continue the conversation. + +### Example Usage + +```python +from mistralai import Mistral +import os + + +with Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) as mistral: + + res = mistral.beta.conversations.start(inputs="") + + # Handle response + print(res) + +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ----------------------------------------------------------------------------- | ----------------------------------------------------------------------------- | ----------------------------------------------------------------------------- | ----------------------------------------------------------------------------- | +| `inputs` | [models.ConversationInputs](../../models/conversationinputs.md) | :heavy_check_mark: | N/A | +| `stream` | *Optional[bool]* | :heavy_minus_sign: | N/A | +| `store` | *OptionalNullable[bool]* | :heavy_minus_sign: | N/A | +| `handoff_execution` | [OptionalNullable[models.HandoffExecution]](../../models/handoffexecution.md) | :heavy_minus_sign: | N/A | +| `instructions` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `tools` | List[[models.Tools](../../models/tools.md)] | :heavy_minus_sign: | N/A | +| `completion_args` | [OptionalNullable[models.CompletionArgs]](../../models/completionargs.md) | :heavy_minus_sign: | N/A | +| `name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `agent_id` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `model` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | + +### Response + +**[models.ConversationResponse](../../models/conversationresponse.md)** + +### Errors + +| Error Type | Status Code | Content Type | +| -------------------------- | -------------------------- | -------------------------- | +| models.HTTPValidationError | 422 | application/json | +| models.SDKError | 4XX, 5XX | \*/\* | + +## list + +Retrieve a list of conversation entities sorted by creation time. + +### Example Usage + +```python +from mistralai import Mistral +import os + + +with Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) as mistral: + + res = mistral.beta.conversations.list() + + # Handle response + print(res) + +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | +| `page` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `page_size` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | + +### Response + +**[List[models.ResponseBody]](../../models/.md)** + +### Errors + +| Error Type | Status Code | Content Type | +| -------------------------- | -------------------------- | -------------------------- | +| models.HTTPValidationError | 422 | application/json | +| models.SDKError | 4XX, 5XX | \*/\* | + +## get + +Given a conversation_id retrieve a conversation entity with its attributes. + +### Example Usage + +```python +from mistralai import Mistral +import os + + +with Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) as mistral: + + res = mistral.beta.conversations.get(conversation_id="") + + # Handle response + print(res) + +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | +| `conversation_id` | *str* | :heavy_check_mark: | N/A | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | + +### Response + +**[models.AgentsAPIV1ConversationsGetResponseV1ConversationsGet](../../models/agentsapiv1conversationsgetresponsev1conversationsget.md)** + +### Errors + +| Error Type | Status Code | Content Type | +| -------------------------- | -------------------------- | -------------------------- | +| models.HTTPValidationError | 422 | application/json | +| models.SDKError | 4XX, 5XX | \*/\* | + +## append + +Run completion on the history of the conversation and the user entries. Return the new created entries. + +### Example Usage + +```python +from mistralai import Mistral +import os + + +with Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) as mistral: + + res = mistral.beta.conversations.append(conversation_id="", inputs="") + + # Handle response + print(res) + +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ----------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------- | +| `conversation_id` | *str* | :heavy_check_mark: | ID of the conversation to which we append entries. | +| `inputs` | [models.ConversationInputs](../../models/conversationinputs.md) | :heavy_check_mark: | N/A | +| `stream` | *Optional[bool]* | :heavy_minus_sign: | N/A | +| `store` | *Optional[bool]* | :heavy_minus_sign: | Whether to store the results into our servers or not. | +| `handoff_execution` | [Optional[models.ConversationAppendRequestHandoffExecution]](../../models/conversationappendrequesthandoffexecution.md) | :heavy_minus_sign: | N/A | +| `completion_args` | [Optional[models.CompletionArgs]](../../models/completionargs.md) | :heavy_minus_sign: | White-listed arguments from the completion API | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | + +### Response + +**[models.ConversationResponse](../../models/conversationresponse.md)** + +### Errors + +| Error Type | Status Code | Content Type | +| -------------------------- | -------------------------- | -------------------------- | +| models.HTTPValidationError | 422 | application/json | +| models.SDKError | 4XX, 5XX | \*/\* | + +## get_history + +Given a conversation_id retrieve all the entries belonging to that conversation. The entries are sorted in the order they were appended, those can be messages, connectors or function_call. + +### Example Usage + +```python +from mistralai import Mistral +import os + + +with Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) as mistral: + + res = mistral.beta.conversations.get_history(conversation_id="") + + # Handle response + print(res) + +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | +| `conversation_id` | *str* | :heavy_check_mark: | N/A | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | + +### Response + +**[models.ConversationHistory](../../models/conversationhistory.md)** + +### Errors + +| Error Type | Status Code | Content Type | +| -------------------------- | -------------------------- | -------------------------- | +| models.HTTPValidationError | 422 | application/json | +| models.SDKError | 4XX, 5XX | \*/\* | + +## get_messages + +Given a conversation_id retrieve all the messages belonging to that conversation. This is similar to retrieving all entries except we filter the messages only. + +### Example Usage + +```python +from mistralai import Mistral +import os + + +with Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) as mistral: + + res = mistral.beta.conversations.get_messages(conversation_id="") + + # Handle response + print(res) + +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | +| `conversation_id` | *str* | :heavy_check_mark: | N/A | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | + +### Response + +**[models.ConversationMessages](../../models/conversationmessages.md)** + +### Errors + +| Error Type | Status Code | Content Type | +| -------------------------- | -------------------------- | -------------------------- | +| models.HTTPValidationError | 422 | application/json | +| models.SDKError | 4XX, 5XX | \*/\* | + +## restart + +Given a conversation_id and an id, recreate a conversation from this point and run completion. A new conversation is returned with the new entries returned. + +### Example Usage + +```python +from mistralai import Mistral +import os + + +with Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) as mistral: + + res = mistral.beta.conversations.restart(conversation_id="", inputs="", from_entry_id="") + + # Handle response + print(res) + +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------- | +| `conversation_id` | *str* | :heavy_check_mark: | N/A | +| `inputs` | [models.ConversationInputs](../../models/conversationinputs.md) | :heavy_check_mark: | N/A | +| `from_entry_id` | *str* | :heavy_check_mark: | N/A | +| `stream` | *Optional[bool]* | :heavy_minus_sign: | N/A | +| `store` | *Optional[bool]* | :heavy_minus_sign: | Whether to store the results into our servers or not. | +| `handoff_execution` | [Optional[models.ConversationRestartRequestHandoffExecution]](../../models/conversationrestartrequesthandoffexecution.md) | :heavy_minus_sign: | N/A | +| `completion_args` | [Optional[models.CompletionArgs]](../../models/completionargs.md) | :heavy_minus_sign: | White-listed arguments from the completion API | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | + +### Response + +**[models.ConversationResponse](../../models/conversationresponse.md)** + +### Errors + +| Error Type | Status Code | Content Type | +| -------------------------- | -------------------------- | -------------------------- | +| models.HTTPValidationError | 422 | application/json | +| models.SDKError | 4XX, 5XX | \*/\* | + +## start_stream + +Create a new conversation, using a base model or an agent and append entries. Completion and tool executions are run and the response is appended to the conversation.Use the returned conversation_id to continue the conversation. + +### Example Usage + +```python +from mistralai import Mistral +import os + + +with Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) as mistral: + + res = mistral.beta.conversations.start_stream(inputs="") + + with res as event_stream: + for event in event_stream: + # handle event + print(event, flush=True) + +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------- | +| `inputs` | [models.ConversationInputs](../../models/conversationinputs.md) | :heavy_check_mark: | N/A | +| `stream` | *Optional[bool]* | :heavy_minus_sign: | N/A | +| `store` | *OptionalNullable[bool]* | :heavy_minus_sign: | N/A | +| `handoff_execution` | [OptionalNullable[models.ConversationStreamRequestHandoffExecution]](../../models/conversationstreamrequesthandoffexecution.md) | :heavy_minus_sign: | N/A | +| `instructions` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `tools` | List[[models.ConversationStreamRequestTools](../../models/conversationstreamrequesttools.md)] | :heavy_minus_sign: | N/A | +| `completion_args` | [OptionalNullable[models.CompletionArgs]](../../models/completionargs.md) | :heavy_minus_sign: | N/A | +| `name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `agent_id` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `model` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | + +### Response + +**[Union[eventstreaming.EventStream[models.ConversationEvents], eventstreaming.EventStreamAsync[models.ConversationEvents]]](../../models/.md)** + +### Errors + +| Error Type | Status Code | Content Type | +| -------------------------- | -------------------------- | -------------------------- | +| models.HTTPValidationError | 422 | application/json | +| models.SDKError | 4XX, 5XX | \*/\* | + +## append_stream + +Run completion on the history of the conversation and the user entries. Return the new created entries. + +### Example Usage + +```python +from mistralai import Mistral +import os + + +with Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) as mistral: + + res = mistral.beta.conversations.append_stream(conversation_id="", inputs="") + + with res as event_stream: + for event in event_stream: + # handle event + print(event, flush=True) + +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ----------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------- | +| `conversation_id` | *str* | :heavy_check_mark: | ID of the conversation to which we append entries. | +| `inputs` | [models.ConversationInputs](../../models/conversationinputs.md) | :heavy_check_mark: | N/A | +| `stream` | *Optional[bool]* | :heavy_minus_sign: | N/A | +| `store` | *Optional[bool]* | :heavy_minus_sign: | Whether to store the results into our servers or not. | +| `handoff_execution` | [Optional[models.ConversationAppendStreamRequestHandoffExecution]](../../models/conversationappendstreamrequesthandoffexecution.md) | :heavy_minus_sign: | N/A | +| `completion_args` | [Optional[models.CompletionArgs]](../../models/completionargs.md) | :heavy_minus_sign: | White-listed arguments from the completion API | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | + +### Response + +**[Union[eventstreaming.EventStream[models.ConversationEvents], eventstreaming.EventStreamAsync[models.ConversationEvents]]](../../models/.md)** + +### Errors + +| Error Type | Status Code | Content Type | +| -------------------------- | -------------------------- | -------------------------- | +| models.HTTPValidationError | 422 | application/json | +| models.SDKError | 4XX, 5XX | \*/\* | + +## restart_stream + +Given a conversation_id and an id, recreate a conversation from this point and run completion. A new conversation is returned with the new entries returned. + +### Example Usage + +```python +from mistralai import Mistral +import os + + +with Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) as mistral: + + res = mistral.beta.conversations.restart_stream(conversation_id="", inputs="", from_entry_id="") + + with res as event_stream: + for event in event_stream: + # handle event + print(event, flush=True) + +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------- | +| `conversation_id` | *str* | :heavy_check_mark: | N/A | +| `inputs` | [models.ConversationInputs](../../models/conversationinputs.md) | :heavy_check_mark: | N/A | +| `from_entry_id` | *str* | :heavy_check_mark: | N/A | +| `stream` | *Optional[bool]* | :heavy_minus_sign: | N/A | +| `store` | *Optional[bool]* | :heavy_minus_sign: | Whether to store the results into our servers or not. | +| `handoff_execution` | [Optional[models.ConversationRestartStreamRequestHandoffExecution]](../../models/conversationrestartstreamrequesthandoffexecution.md) | :heavy_minus_sign: | N/A | +| `completion_args` | [Optional[models.CompletionArgs]](../../models/completionargs.md) | :heavy_minus_sign: | White-listed arguments from the completion API | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | + +### Response + +**[Union[eventstreaming.EventStream[models.ConversationEvents], eventstreaming.EventStreamAsync[models.ConversationEvents]]](../../models/.md)** + +### Errors + +| Error Type | Status Code | Content Type | +| -------------------------- | -------------------------- | -------------------------- | +| models.HTTPValidationError | 422 | application/json | +| models.SDKError | 4XX, 5XX | \*/\* | \ No newline at end of file diff --git a/docs/sdks/mistralagents/README.md b/docs/sdks/mistralagents/README.md new file mode 100644 index 00000000..aeb2b917 --- /dev/null +++ b/docs/sdks/mistralagents/README.md @@ -0,0 +1,227 @@ +# MistralAgents +(*beta.agents*) + +## Overview + +### Available Operations + +* [create](#create) - Create a agent that can be used within a conversation. +* [list](#list) - List agent entities. +* [get](#get) - Retrieve an agent entity. +* [update](#update) - Update an agent entity. +* [update_version](#update_version) - Update an agent version. + +## create + +Create a new agent giving it instructions, tools, description. The agent is then available to be used as a regular assistant in a conversation or as part of an agent pool from which it can be used. + +### Example Usage + +```python +from mistralai import Mistral +import os + + +with Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) as mistral: + + res = mistral.beta.agents.create(model="Fiesta", name="") + + # Handle response + print(res) + +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ----------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------- | +| `model` | *str* | :heavy_check_mark: | N/A | +| `name` | *str* | :heavy_check_mark: | N/A | +| `instructions` | *OptionalNullable[str]* | :heavy_minus_sign: | Instruction prompt the model will follow during the conversation. | +| `tools` | List[[models.AgentCreationRequestTools](../../models/agentcreationrequesttools.md)] | :heavy_minus_sign: | List of tools which are available to the model during the conversation. | +| `completion_args` | [Optional[models.CompletionArgs]](../../models/completionargs.md) | :heavy_minus_sign: | White-listed arguments from the completion API | +| `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `handoffs` | List[*str*] | :heavy_minus_sign: | N/A | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | + +### Response + +**[models.Agent](../../models/agent.md)** + +### Errors + +| Error Type | Status Code | Content Type | +| -------------------------- | -------------------------- | -------------------------- | +| models.HTTPValidationError | 422 | application/json | +| models.SDKError | 4XX, 5XX | \*/\* | + +## list + +Retrieve a list of agent entities sorted by creation time. + +### Example Usage + +```python +from mistralai import Mistral +import os + + +with Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) as mistral: + + res = mistral.beta.agents.list() + + # Handle response + print(res) + +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | +| `page` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `page_size` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | + +### Response + +**[List[models.Agent]](../../models/.md)** + +### Errors + +| Error Type | Status Code | Content Type | +| -------------------------- | -------------------------- | -------------------------- | +| models.HTTPValidationError | 422 | application/json | +| models.SDKError | 4XX, 5XX | \*/\* | + +## get + +Given an agent retrieve an agent entity with its attributes. + +### Example Usage + +```python +from mistralai import Mistral +import os + + +with Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) as mistral: + + res = mistral.beta.agents.get(agent_id="") + + # Handle response + print(res) + +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | +| `agent_id` | *str* | :heavy_check_mark: | N/A | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | + +### Response + +**[models.Agent](../../models/agent.md)** + +### Errors + +| Error Type | Status Code | Content Type | +| -------------------------- | -------------------------- | -------------------------- | +| models.HTTPValidationError | 422 | application/json | +| models.SDKError | 4XX, 5XX | \*/\* | + +## update + +Update an agent attributes and create a new version. + +### Example Usage + +```python +from mistralai import Mistral +import os + + +with Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) as mistral: + + res = mistral.beta.agents.update(agent_id="") + + # Handle response + print(res) + +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ------------------------------------------------------------------------------- | ------------------------------------------------------------------------------- | ------------------------------------------------------------------------------- | ------------------------------------------------------------------------------- | +| `agent_id` | *str* | :heavy_check_mark: | N/A | +| `instructions` | *OptionalNullable[str]* | :heavy_minus_sign: | Instruction prompt the model will follow during the conversation. | +| `tools` | List[[models.AgentUpdateRequestTools](../../models/agentupdaterequesttools.md)] | :heavy_minus_sign: | List of tools which are available to the model during the conversation. | +| `completion_args` | [Optional[models.CompletionArgs]](../../models/completionargs.md) | :heavy_minus_sign: | White-listed arguments from the completion API | +| `model` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `handoffs` | List[*str*] | :heavy_minus_sign: | N/A | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | + +### Response + +**[models.Agent](../../models/agent.md)** + +### Errors + +| Error Type | Status Code | Content Type | +| -------------------------- | -------------------------- | -------------------------- | +| models.HTTPValidationError | 422 | application/json | +| models.SDKError | 4XX, 5XX | \*/\* | + +## update_version + +Switch the version of an agent. + +### Example Usage + +```python +from mistralai import Mistral +import os + + +with Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) as mistral: + + res = mistral.beta.agents.update_version(agent_id="", version=193920) + + # Handle response + print(res) + +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | +| `agent_id` | *str* | :heavy_check_mark: | N/A | +| `version` | *int* | :heavy_check_mark: | N/A | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | + +### Response + +**[models.Agent](../../models/agent.md)** + +### Errors + +| Error Type | Status Code | Content Type | +| -------------------------- | -------------------------- | -------------------------- | +| models.HTTPValidationError | 422 | application/json | +| models.SDKError | 4XX, 5XX | \*/\* | \ No newline at end of file diff --git a/src/mistralai/beta.py b/src/mistralai/beta.py new file mode 100644 index 00000000..6858b0a8 --- /dev/null +++ b/src/mistralai/beta.py @@ -0,0 +1,20 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from .basesdk import BaseSDK +from .sdkconfiguration import SDKConfiguration +from mistralai.conversations import Conversations +from mistralai.mistral_agents import MistralAgents + + +class Beta(BaseSDK): + conversations: Conversations + agents: MistralAgents + + def __init__(self, sdk_config: SDKConfiguration) -> None: + BaseSDK.__init__(self, sdk_config) + self.sdk_configuration = sdk_config + self._init_sdks() + + def _init_sdks(self): + self.conversations = Conversations(self.sdk_configuration) + self.agents = MistralAgents(self.sdk_configuration) diff --git a/src/mistralai/conversations.py b/src/mistralai/conversations.py new file mode 100644 index 00000000..438e444d --- /dev/null +++ b/src/mistralai/conversations.py @@ -0,0 +1,2454 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from .basesdk import BaseSDK +from mistralai import models, utils +from mistralai._hooks import HookContext +from mistralai.types import OptionalNullable, UNSET +from mistralai.utils import eventstreaming, get_security_from_env +from typing import Any, List, Mapping, Optional, Union + + +class Conversations(BaseSDK): + def start( + self, + *, + inputs: Union[models.ConversationInputs, models.ConversationInputsTypedDict], + stream: Optional[bool] = False, + store: OptionalNullable[bool] = UNSET, + handoff_execution: OptionalNullable[models.HandoffExecution] = UNSET, + instructions: OptionalNullable[str] = UNSET, + tools: OptionalNullable[ + Union[List[models.Tools], List[models.ToolsTypedDict]] + ] = UNSET, + completion_args: OptionalNullable[ + Union[models.CompletionArgs, models.CompletionArgsTypedDict] + ] = UNSET, + name: OptionalNullable[str] = UNSET, + description: OptionalNullable[str] = UNSET, + agent_id: OptionalNullable[str] = UNSET, + model: OptionalNullable[str] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.ConversationResponse: + r"""Create a conversation and append entries to it. + + Create a new conversation, using a base model or an agent and append entries. Completion and tool executions are run and the response is appended to the conversation.Use the returned conversation_id to continue the conversation. + + :param inputs: + :param stream: + :param store: + :param handoff_execution: + :param instructions: + :param tools: + :param completion_args: + :param name: + :param description: + :param agent_id: + :param model: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.ConversationRequest( + inputs=utils.get_pydantic_model(inputs, models.ConversationInputs), + stream=stream, + store=store, + handoff_execution=handoff_execution, + instructions=instructions, + tools=utils.get_pydantic_model(tools, OptionalNullable[List[models.Tools]]), + completion_args=utils.get_pydantic_model( + completion_args, OptionalNullable[models.CompletionArgs] + ), + name=name, + description=description, + agent_id=agent_id, + model=model, + ) + + req = self._build_request( + method="POST", + path="/v1/conversations", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.ConversationRequest + ), + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + base_url=base_url or "", + operation_id="agents_api_v1_conversations_start", + oauth2_scopes=[], + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return utils.unmarshal_json(http_res.text, models.ConversationResponse) + if utils.match_response(http_res, "422", "application/json"): + response_data = utils.unmarshal_json( + http_res.text, models.HTTPValidationErrorData + ) + raise models.HTTPValidationError(data=response_data) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + + content_type = http_res.headers.get("Content-Type") + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res_text, + http_res, + ) + + async def start_async( + self, + *, + inputs: Union[models.ConversationInputs, models.ConversationInputsTypedDict], + stream: Optional[bool] = False, + store: OptionalNullable[bool] = UNSET, + handoff_execution: OptionalNullable[models.HandoffExecution] = UNSET, + instructions: OptionalNullable[str] = UNSET, + tools: OptionalNullable[ + Union[List[models.Tools], List[models.ToolsTypedDict]] + ] = UNSET, + completion_args: OptionalNullable[ + Union[models.CompletionArgs, models.CompletionArgsTypedDict] + ] = UNSET, + name: OptionalNullable[str] = UNSET, + description: OptionalNullable[str] = UNSET, + agent_id: OptionalNullable[str] = UNSET, + model: OptionalNullable[str] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.ConversationResponse: + r"""Create a conversation and append entries to it. + + Create a new conversation, using a base model or an agent and append entries. Completion and tool executions are run and the response is appended to the conversation.Use the returned conversation_id to continue the conversation. + + :param inputs: + :param stream: + :param store: + :param handoff_execution: + :param instructions: + :param tools: + :param completion_args: + :param name: + :param description: + :param agent_id: + :param model: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.ConversationRequest( + inputs=utils.get_pydantic_model(inputs, models.ConversationInputs), + stream=stream, + store=store, + handoff_execution=handoff_execution, + instructions=instructions, + tools=utils.get_pydantic_model(tools, OptionalNullable[List[models.Tools]]), + completion_args=utils.get_pydantic_model( + completion_args, OptionalNullable[models.CompletionArgs] + ), + name=name, + description=description, + agent_id=agent_id, + model=model, + ) + + req = self._build_request_async( + method="POST", + path="/v1/conversations", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.ConversationRequest + ), + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + base_url=base_url or "", + operation_id="agents_api_v1_conversations_start", + oauth2_scopes=[], + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return utils.unmarshal_json(http_res.text, models.ConversationResponse) + if utils.match_response(http_res, "422", "application/json"): + response_data = utils.unmarshal_json( + http_res.text, models.HTTPValidationErrorData + ) + raise models.HTTPValidationError(data=response_data) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + + content_type = http_res.headers.get("Content-Type") + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res_text, + http_res, + ) + + def list( + self, + *, + page: Optional[int] = 0, + page_size: Optional[int] = 100, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> List[models.ResponseBody]: + r"""List all created conversations. + + Retrieve a list of conversation entities sorted by creation time. + + :param page: + :param page_size: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AgentsAPIV1ConversationsListRequest( + page=page, + page_size=page_size, + ) + + req = self._build_request( + method="GET", + path="/v1/conversations", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + base_url=base_url or "", + operation_id="agents_api_v1_conversations_list", + oauth2_scopes=[], + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return utils.unmarshal_json(http_res.text, List[models.ResponseBody]) + if utils.match_response(http_res, "422", "application/json"): + response_data = utils.unmarshal_json( + http_res.text, models.HTTPValidationErrorData + ) + raise models.HTTPValidationError(data=response_data) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + + content_type = http_res.headers.get("Content-Type") + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res_text, + http_res, + ) + + async def list_async( + self, + *, + page: Optional[int] = 0, + page_size: Optional[int] = 100, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> List[models.ResponseBody]: + r"""List all created conversations. + + Retrieve a list of conversation entities sorted by creation time. + + :param page: + :param page_size: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AgentsAPIV1ConversationsListRequest( + page=page, + page_size=page_size, + ) + + req = self._build_request_async( + method="GET", + path="/v1/conversations", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + base_url=base_url or "", + operation_id="agents_api_v1_conversations_list", + oauth2_scopes=[], + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return utils.unmarshal_json(http_res.text, List[models.ResponseBody]) + if utils.match_response(http_res, "422", "application/json"): + response_data = utils.unmarshal_json( + http_res.text, models.HTTPValidationErrorData + ) + raise models.HTTPValidationError(data=response_data) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + + content_type = http_res.headers.get("Content-Type") + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res_text, + http_res, + ) + + def get( + self, + *, + conversation_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.AgentsAPIV1ConversationsGetResponseV1ConversationsGet: + r"""Retrieve a conversation information. + + Given a conversation_id retrieve a conversation entity with its attributes. + + :param conversation_id: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AgentsAPIV1ConversationsGetRequest( + conversation_id=conversation_id, + ) + + req = self._build_request( + method="GET", + path="/v1/conversations/{conversation_id}", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + base_url=base_url or "", + operation_id="agents_api_v1_conversations_get", + oauth2_scopes=[], + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return utils.unmarshal_json( + http_res.text, + models.AgentsAPIV1ConversationsGetResponseV1ConversationsGet, + ) + if utils.match_response(http_res, "422", "application/json"): + response_data = utils.unmarshal_json( + http_res.text, models.HTTPValidationErrorData + ) + raise models.HTTPValidationError(data=response_data) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + + content_type = http_res.headers.get("Content-Type") + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res_text, + http_res, + ) + + async def get_async( + self, + *, + conversation_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.AgentsAPIV1ConversationsGetResponseV1ConversationsGet: + r"""Retrieve a conversation information. + + Given a conversation_id retrieve a conversation entity with its attributes. + + :param conversation_id: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AgentsAPIV1ConversationsGetRequest( + conversation_id=conversation_id, + ) + + req = self._build_request_async( + method="GET", + path="/v1/conversations/{conversation_id}", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + base_url=base_url or "", + operation_id="agents_api_v1_conversations_get", + oauth2_scopes=[], + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return utils.unmarshal_json( + http_res.text, + models.AgentsAPIV1ConversationsGetResponseV1ConversationsGet, + ) + if utils.match_response(http_res, "422", "application/json"): + response_data = utils.unmarshal_json( + http_res.text, models.HTTPValidationErrorData + ) + raise models.HTTPValidationError(data=response_data) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + + content_type = http_res.headers.get("Content-Type") + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res_text, + http_res, + ) + + def append( + self, + *, + conversation_id: str, + inputs: Union[models.ConversationInputs, models.ConversationInputsTypedDict], + stream: Optional[bool] = False, + store: Optional[bool] = True, + handoff_execution: Optional[ + models.ConversationAppendRequestHandoffExecution + ] = "server", + completion_args: Optional[ + Union[models.CompletionArgs, models.CompletionArgsTypedDict] + ] = None, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.ConversationResponse: + r"""Append new entries to an existing conversation. + + Run completion on the history of the conversation and the user entries. Return the new created entries. + + :param conversation_id: ID of the conversation to which we append entries. + :param inputs: + :param stream: + :param store: Whether to store the results into our servers or not. + :param handoff_execution: + :param completion_args: White-listed arguments from the completion API + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AgentsAPIV1ConversationsAppendRequest( + conversation_id=conversation_id, + conversation_append_request=models.ConversationAppendRequest( + inputs=utils.get_pydantic_model(inputs, models.ConversationInputs), + stream=stream, + store=store, + handoff_execution=handoff_execution, + completion_args=utils.get_pydantic_model( + completion_args, Optional[models.CompletionArgs] + ), + ), + ) + + req = self._build_request( + method="POST", + path="/v1/conversations/{conversation_id}", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request.conversation_append_request, + False, + False, + "json", + models.ConversationAppendRequest, + ), + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + base_url=base_url or "", + operation_id="agents_api_v1_conversations_append", + oauth2_scopes=[], + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return utils.unmarshal_json(http_res.text, models.ConversationResponse) + if utils.match_response(http_res, "422", "application/json"): + response_data = utils.unmarshal_json( + http_res.text, models.HTTPValidationErrorData + ) + raise models.HTTPValidationError(data=response_data) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + + content_type = http_res.headers.get("Content-Type") + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res_text, + http_res, + ) + + async def append_async( + self, + *, + conversation_id: str, + inputs: Union[models.ConversationInputs, models.ConversationInputsTypedDict], + stream: Optional[bool] = False, + store: Optional[bool] = True, + handoff_execution: Optional[ + models.ConversationAppendRequestHandoffExecution + ] = "server", + completion_args: Optional[ + Union[models.CompletionArgs, models.CompletionArgsTypedDict] + ] = None, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.ConversationResponse: + r"""Append new entries to an existing conversation. + + Run completion on the history of the conversation and the user entries. Return the new created entries. + + :param conversation_id: ID of the conversation to which we append entries. + :param inputs: + :param stream: + :param store: Whether to store the results into our servers or not. + :param handoff_execution: + :param completion_args: White-listed arguments from the completion API + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AgentsAPIV1ConversationsAppendRequest( + conversation_id=conversation_id, + conversation_append_request=models.ConversationAppendRequest( + inputs=utils.get_pydantic_model(inputs, models.ConversationInputs), + stream=stream, + store=store, + handoff_execution=handoff_execution, + completion_args=utils.get_pydantic_model( + completion_args, Optional[models.CompletionArgs] + ), + ), + ) + + req = self._build_request_async( + method="POST", + path="/v1/conversations/{conversation_id}", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request.conversation_append_request, + False, + False, + "json", + models.ConversationAppendRequest, + ), + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + base_url=base_url or "", + operation_id="agents_api_v1_conversations_append", + oauth2_scopes=[], + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return utils.unmarshal_json(http_res.text, models.ConversationResponse) + if utils.match_response(http_res, "422", "application/json"): + response_data = utils.unmarshal_json( + http_res.text, models.HTTPValidationErrorData + ) + raise models.HTTPValidationError(data=response_data) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + + content_type = http_res.headers.get("Content-Type") + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res_text, + http_res, + ) + + def get_history( + self, + *, + conversation_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.ConversationHistory: + r"""Retrieve all entries in a conversation. + + Given a conversation_id retrieve all the entries belonging to that conversation. The entries are sorted in the order they were appended, those can be messages, connectors or function_call. + + :param conversation_id: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AgentsAPIV1ConversationsHistoryRequest( + conversation_id=conversation_id, + ) + + req = self._build_request( + method="GET", + path="/v1/conversations/{conversation_id}/history", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + base_url=base_url or "", + operation_id="agents_api_v1_conversations_history", + oauth2_scopes=[], + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return utils.unmarshal_json(http_res.text, models.ConversationHistory) + if utils.match_response(http_res, "422", "application/json"): + response_data = utils.unmarshal_json( + http_res.text, models.HTTPValidationErrorData + ) + raise models.HTTPValidationError(data=response_data) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + + content_type = http_res.headers.get("Content-Type") + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res_text, + http_res, + ) + + async def get_history_async( + self, + *, + conversation_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.ConversationHistory: + r"""Retrieve all entries in a conversation. + + Given a conversation_id retrieve all the entries belonging to that conversation. The entries are sorted in the order they were appended, those can be messages, connectors or function_call. + + :param conversation_id: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AgentsAPIV1ConversationsHistoryRequest( + conversation_id=conversation_id, + ) + + req = self._build_request_async( + method="GET", + path="/v1/conversations/{conversation_id}/history", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + base_url=base_url or "", + operation_id="agents_api_v1_conversations_history", + oauth2_scopes=[], + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return utils.unmarshal_json(http_res.text, models.ConversationHistory) + if utils.match_response(http_res, "422", "application/json"): + response_data = utils.unmarshal_json( + http_res.text, models.HTTPValidationErrorData + ) + raise models.HTTPValidationError(data=response_data) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + + content_type = http_res.headers.get("Content-Type") + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res_text, + http_res, + ) + + def get_messages( + self, + *, + conversation_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.ConversationMessages: + r"""Retrieve all messages in a conversation. + + Given a conversation_id retrieve all the messages belonging to that conversation. This is similar to retrieving all entries except we filter the messages only. + + :param conversation_id: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AgentsAPIV1ConversationsMessagesRequest( + conversation_id=conversation_id, + ) + + req = self._build_request( + method="GET", + path="/v1/conversations/{conversation_id}/messages", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + base_url=base_url or "", + operation_id="agents_api_v1_conversations_messages", + oauth2_scopes=[], + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return utils.unmarshal_json(http_res.text, models.ConversationMessages) + if utils.match_response(http_res, "422", "application/json"): + response_data = utils.unmarshal_json( + http_res.text, models.HTTPValidationErrorData + ) + raise models.HTTPValidationError(data=response_data) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + + content_type = http_res.headers.get("Content-Type") + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res_text, + http_res, + ) + + async def get_messages_async( + self, + *, + conversation_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.ConversationMessages: + r"""Retrieve all messages in a conversation. + + Given a conversation_id retrieve all the messages belonging to that conversation. This is similar to retrieving all entries except we filter the messages only. + + :param conversation_id: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AgentsAPIV1ConversationsMessagesRequest( + conversation_id=conversation_id, + ) + + req = self._build_request_async( + method="GET", + path="/v1/conversations/{conversation_id}/messages", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + base_url=base_url or "", + operation_id="agents_api_v1_conversations_messages", + oauth2_scopes=[], + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return utils.unmarshal_json(http_res.text, models.ConversationMessages) + if utils.match_response(http_res, "422", "application/json"): + response_data = utils.unmarshal_json( + http_res.text, models.HTTPValidationErrorData + ) + raise models.HTTPValidationError(data=response_data) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + + content_type = http_res.headers.get("Content-Type") + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res_text, + http_res, + ) + + def restart( + self, + *, + conversation_id: str, + inputs: Union[models.ConversationInputs, models.ConversationInputsTypedDict], + from_entry_id: str, + stream: Optional[bool] = False, + store: Optional[bool] = True, + handoff_execution: Optional[ + models.ConversationRestartRequestHandoffExecution + ] = "server", + completion_args: Optional[ + Union[models.CompletionArgs, models.CompletionArgsTypedDict] + ] = None, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.ConversationResponse: + r"""Restart a conversation starting from a given entry. + + Given a conversation_id and an id, recreate a conversation from this point and run completion. A new conversation is returned with the new entries returned. + + :param conversation_id: + :param inputs: + :param from_entry_id: + :param stream: + :param store: Whether to store the results into our servers or not. + :param handoff_execution: + :param completion_args: White-listed arguments from the completion API + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AgentsAPIV1ConversationsRestartRequest( + conversation_id=conversation_id, + conversation_restart_request=models.ConversationRestartRequest( + inputs=utils.get_pydantic_model(inputs, models.ConversationInputs), + stream=stream, + store=store, + handoff_execution=handoff_execution, + from_entry_id=from_entry_id, + completion_args=utils.get_pydantic_model( + completion_args, Optional[models.CompletionArgs] + ), + ), + ) + + req = self._build_request( + method="POST", + path="/v1/conversations/{conversation_id}/restart", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request.conversation_restart_request, + False, + False, + "json", + models.ConversationRestartRequest, + ), + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + base_url=base_url or "", + operation_id="agents_api_v1_conversations_restart", + oauth2_scopes=[], + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return utils.unmarshal_json(http_res.text, models.ConversationResponse) + if utils.match_response(http_res, "422", "application/json"): + response_data = utils.unmarshal_json( + http_res.text, models.HTTPValidationErrorData + ) + raise models.HTTPValidationError(data=response_data) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + + content_type = http_res.headers.get("Content-Type") + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res_text, + http_res, + ) + + async def restart_async( + self, + *, + conversation_id: str, + inputs: Union[models.ConversationInputs, models.ConversationInputsTypedDict], + from_entry_id: str, + stream: Optional[bool] = False, + store: Optional[bool] = True, + handoff_execution: Optional[ + models.ConversationRestartRequestHandoffExecution + ] = "server", + completion_args: Optional[ + Union[models.CompletionArgs, models.CompletionArgsTypedDict] + ] = None, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.ConversationResponse: + r"""Restart a conversation starting from a given entry. + + Given a conversation_id and an id, recreate a conversation from this point and run completion. A new conversation is returned with the new entries returned. + + :param conversation_id: + :param inputs: + :param from_entry_id: + :param stream: + :param store: Whether to store the results into our servers or not. + :param handoff_execution: + :param completion_args: White-listed arguments from the completion API + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AgentsAPIV1ConversationsRestartRequest( + conversation_id=conversation_id, + conversation_restart_request=models.ConversationRestartRequest( + inputs=utils.get_pydantic_model(inputs, models.ConversationInputs), + stream=stream, + store=store, + handoff_execution=handoff_execution, + from_entry_id=from_entry_id, + completion_args=utils.get_pydantic_model( + completion_args, Optional[models.CompletionArgs] + ), + ), + ) + + req = self._build_request_async( + method="POST", + path="/v1/conversations/{conversation_id}/restart", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request.conversation_restart_request, + False, + False, + "json", + models.ConversationRestartRequest, + ), + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + base_url=base_url or "", + operation_id="agents_api_v1_conversations_restart", + oauth2_scopes=[], + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return utils.unmarshal_json(http_res.text, models.ConversationResponse) + if utils.match_response(http_res, "422", "application/json"): + response_data = utils.unmarshal_json( + http_res.text, models.HTTPValidationErrorData + ) + raise models.HTTPValidationError(data=response_data) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + + content_type = http_res.headers.get("Content-Type") + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res_text, + http_res, + ) + + def start_stream( + self, + *, + inputs: Union[models.ConversationInputs, models.ConversationInputsTypedDict], + stream: Optional[bool] = True, + store: OptionalNullable[bool] = UNSET, + handoff_execution: OptionalNullable[ + models.ConversationStreamRequestHandoffExecution + ] = UNSET, + instructions: OptionalNullable[str] = UNSET, + tools: OptionalNullable[ + Union[ + List[models.ConversationStreamRequestTools], + List[models.ConversationStreamRequestToolsTypedDict], + ] + ] = UNSET, + completion_args: OptionalNullable[ + Union[models.CompletionArgs, models.CompletionArgsTypedDict] + ] = UNSET, + name: OptionalNullable[str] = UNSET, + description: OptionalNullable[str] = UNSET, + agent_id: OptionalNullable[str] = UNSET, + model: OptionalNullable[str] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> eventstreaming.EventStream[models.ConversationEvents]: + r"""Create a conversation and append entries to it. + + Create a new conversation, using a base model or an agent and append entries. Completion and tool executions are run and the response is appended to the conversation.Use the returned conversation_id to continue the conversation. + + :param inputs: + :param stream: + :param store: + :param handoff_execution: + :param instructions: + :param tools: + :param completion_args: + :param name: + :param description: + :param agent_id: + :param model: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.ConversationStreamRequest( + inputs=utils.get_pydantic_model(inputs, models.ConversationInputs), + stream=stream, + store=store, + handoff_execution=handoff_execution, + instructions=instructions, + tools=utils.get_pydantic_model( + tools, OptionalNullable[List[models.ConversationStreamRequestTools]] + ), + completion_args=utils.get_pydantic_model( + completion_args, OptionalNullable[models.CompletionArgs] + ), + name=name, + description=description, + agent_id=agent_id, + model=model, + ) + + req = self._build_request( + method="POST", + path="/v1/conversations#stream", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="text/event-stream", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.ConversationStreamRequest + ), + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + base_url=base_url or "", + operation_id="agents_api_v1_conversations_start_stream", + oauth2_scopes=[], + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + stream=True, + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "text/event-stream"): + return eventstreaming.EventStream( + http_res, + lambda raw: utils.unmarshal_json(raw, models.ConversationEvents), + ) + if utils.match_response(http_res, "422", "application/json"): + http_res_text = utils.stream_to_text(http_res) + response_data = utils.unmarshal_json( + http_res_text, models.HTTPValidationErrorData + ) + raise models.HTTPValidationError(data=response_data) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + + content_type = http_res.headers.get("Content-Type") + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res_text, + http_res, + ) + + async def start_stream_async( + self, + *, + inputs: Union[models.ConversationInputs, models.ConversationInputsTypedDict], + stream: Optional[bool] = True, + store: OptionalNullable[bool] = UNSET, + handoff_execution: OptionalNullable[ + models.ConversationStreamRequestHandoffExecution + ] = UNSET, + instructions: OptionalNullable[str] = UNSET, + tools: OptionalNullable[ + Union[ + List[models.ConversationStreamRequestTools], + List[models.ConversationStreamRequestToolsTypedDict], + ] + ] = UNSET, + completion_args: OptionalNullable[ + Union[models.CompletionArgs, models.CompletionArgsTypedDict] + ] = UNSET, + name: OptionalNullable[str] = UNSET, + description: OptionalNullable[str] = UNSET, + agent_id: OptionalNullable[str] = UNSET, + model: OptionalNullable[str] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> eventstreaming.EventStreamAsync[models.ConversationEvents]: + r"""Create a conversation and append entries to it. + + Create a new conversation, using a base model or an agent and append entries. Completion and tool executions are run and the response is appended to the conversation.Use the returned conversation_id to continue the conversation. + + :param inputs: + :param stream: + :param store: + :param handoff_execution: + :param instructions: + :param tools: + :param completion_args: + :param name: + :param description: + :param agent_id: + :param model: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.ConversationStreamRequest( + inputs=utils.get_pydantic_model(inputs, models.ConversationInputs), + stream=stream, + store=store, + handoff_execution=handoff_execution, + instructions=instructions, + tools=utils.get_pydantic_model( + tools, OptionalNullable[List[models.ConversationStreamRequestTools]] + ), + completion_args=utils.get_pydantic_model( + completion_args, OptionalNullable[models.CompletionArgs] + ), + name=name, + description=description, + agent_id=agent_id, + model=model, + ) + + req = self._build_request_async( + method="POST", + path="/v1/conversations#stream", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="text/event-stream", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.ConversationStreamRequest + ), + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + base_url=base_url or "", + operation_id="agents_api_v1_conversations_start_stream", + oauth2_scopes=[], + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + stream=True, + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "text/event-stream"): + return eventstreaming.EventStreamAsync( + http_res, + lambda raw: utils.unmarshal_json(raw, models.ConversationEvents), + ) + if utils.match_response(http_res, "422", "application/json"): + http_res_text = await utils.stream_to_text_async(http_res) + response_data = utils.unmarshal_json( + http_res_text, models.HTTPValidationErrorData + ) + raise models.HTTPValidationError(data=response_data) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + + content_type = http_res.headers.get("Content-Type") + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res_text, + http_res, + ) + + def append_stream( + self, + *, + conversation_id: str, + inputs: Union[models.ConversationInputs, models.ConversationInputsTypedDict], + stream: Optional[bool] = True, + store: Optional[bool] = True, + handoff_execution: Optional[ + models.ConversationAppendStreamRequestHandoffExecution + ] = "server", + completion_args: Optional[ + Union[models.CompletionArgs, models.CompletionArgsTypedDict] + ] = None, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> eventstreaming.EventStream[models.ConversationEvents]: + r"""Append new entries to an existing conversation. + + Run completion on the history of the conversation and the user entries. Return the new created entries. + + :param conversation_id: ID of the conversation to which we append entries. + :param inputs: + :param stream: + :param store: Whether to store the results into our servers or not. + :param handoff_execution: + :param completion_args: White-listed arguments from the completion API + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AgentsAPIV1ConversationsAppendStreamRequest( + conversation_id=conversation_id, + conversation_append_stream_request=models.ConversationAppendStreamRequest( + inputs=utils.get_pydantic_model(inputs, models.ConversationInputs), + stream=stream, + store=store, + handoff_execution=handoff_execution, + completion_args=utils.get_pydantic_model( + completion_args, Optional[models.CompletionArgs] + ), + ), + ) + + req = self._build_request( + method="POST", + path="/v1/conversations/{conversation_id}#stream", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="text/event-stream", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request.conversation_append_stream_request, + False, + False, + "json", + models.ConversationAppendStreamRequest, + ), + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + base_url=base_url or "", + operation_id="agents_api_v1_conversations_append_stream", + oauth2_scopes=[], + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + stream=True, + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "text/event-stream"): + return eventstreaming.EventStream( + http_res, + lambda raw: utils.unmarshal_json(raw, models.ConversationEvents), + ) + if utils.match_response(http_res, "422", "application/json"): + http_res_text = utils.stream_to_text(http_res) + response_data = utils.unmarshal_json( + http_res_text, models.HTTPValidationErrorData + ) + raise models.HTTPValidationError(data=response_data) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + + content_type = http_res.headers.get("Content-Type") + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res_text, + http_res, + ) + + async def append_stream_async( + self, + *, + conversation_id: str, + inputs: Union[models.ConversationInputs, models.ConversationInputsTypedDict], + stream: Optional[bool] = True, + store: Optional[bool] = True, + handoff_execution: Optional[ + models.ConversationAppendStreamRequestHandoffExecution + ] = "server", + completion_args: Optional[ + Union[models.CompletionArgs, models.CompletionArgsTypedDict] + ] = None, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> eventstreaming.EventStreamAsync[models.ConversationEvents]: + r"""Append new entries to an existing conversation. + + Run completion on the history of the conversation and the user entries. Return the new created entries. + + :param conversation_id: ID of the conversation to which we append entries. + :param inputs: + :param stream: + :param store: Whether to store the results into our servers or not. + :param handoff_execution: + :param completion_args: White-listed arguments from the completion API + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AgentsAPIV1ConversationsAppendStreamRequest( + conversation_id=conversation_id, + conversation_append_stream_request=models.ConversationAppendStreamRequest( + inputs=utils.get_pydantic_model(inputs, models.ConversationInputs), + stream=stream, + store=store, + handoff_execution=handoff_execution, + completion_args=utils.get_pydantic_model( + completion_args, Optional[models.CompletionArgs] + ), + ), + ) + + req = self._build_request_async( + method="POST", + path="/v1/conversations/{conversation_id}#stream", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="text/event-stream", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request.conversation_append_stream_request, + False, + False, + "json", + models.ConversationAppendStreamRequest, + ), + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + base_url=base_url or "", + operation_id="agents_api_v1_conversations_append_stream", + oauth2_scopes=[], + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + stream=True, + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "text/event-stream"): + return eventstreaming.EventStreamAsync( + http_res, + lambda raw: utils.unmarshal_json(raw, models.ConversationEvents), + ) + if utils.match_response(http_res, "422", "application/json"): + http_res_text = await utils.stream_to_text_async(http_res) + response_data = utils.unmarshal_json( + http_res_text, models.HTTPValidationErrorData + ) + raise models.HTTPValidationError(data=response_data) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + + content_type = http_res.headers.get("Content-Type") + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res_text, + http_res, + ) + + def restart_stream( + self, + *, + conversation_id: str, + inputs: Union[models.ConversationInputs, models.ConversationInputsTypedDict], + from_entry_id: str, + stream: Optional[bool] = True, + store: Optional[bool] = True, + handoff_execution: Optional[ + models.ConversationRestartStreamRequestHandoffExecution + ] = "server", + completion_args: Optional[ + Union[models.CompletionArgs, models.CompletionArgsTypedDict] + ] = None, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> eventstreaming.EventStream[models.ConversationEvents]: + r"""Restart a conversation starting from a given entry. + + Given a conversation_id and an id, recreate a conversation from this point and run completion. A new conversation is returned with the new entries returned. + + :param conversation_id: + :param inputs: + :param from_entry_id: + :param stream: + :param store: Whether to store the results into our servers or not. + :param handoff_execution: + :param completion_args: White-listed arguments from the completion API + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AgentsAPIV1ConversationsRestartStreamRequest( + conversation_id=conversation_id, + conversation_restart_stream_request=models.ConversationRestartStreamRequest( + inputs=utils.get_pydantic_model(inputs, models.ConversationInputs), + stream=stream, + store=store, + handoff_execution=handoff_execution, + from_entry_id=from_entry_id, + completion_args=utils.get_pydantic_model( + completion_args, Optional[models.CompletionArgs] + ), + ), + ) + + req = self._build_request( + method="POST", + path="/v1/conversations/{conversation_id}/restart#stream", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="text/event-stream", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request.conversation_restart_stream_request, + False, + False, + "json", + models.ConversationRestartStreamRequest, + ), + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + base_url=base_url or "", + operation_id="agents_api_v1_conversations_restart_stream", + oauth2_scopes=[], + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + stream=True, + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "text/event-stream"): + return eventstreaming.EventStream( + http_res, + lambda raw: utils.unmarshal_json(raw, models.ConversationEvents), + ) + if utils.match_response(http_res, "422", "application/json"): + http_res_text = utils.stream_to_text(http_res) + response_data = utils.unmarshal_json( + http_res_text, models.HTTPValidationErrorData + ) + raise models.HTTPValidationError(data=response_data) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + + content_type = http_res.headers.get("Content-Type") + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res_text, + http_res, + ) + + async def restart_stream_async( + self, + *, + conversation_id: str, + inputs: Union[models.ConversationInputs, models.ConversationInputsTypedDict], + from_entry_id: str, + stream: Optional[bool] = True, + store: Optional[bool] = True, + handoff_execution: Optional[ + models.ConversationRestartStreamRequestHandoffExecution + ] = "server", + completion_args: Optional[ + Union[models.CompletionArgs, models.CompletionArgsTypedDict] + ] = None, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> eventstreaming.EventStreamAsync[models.ConversationEvents]: + r"""Restart a conversation starting from a given entry. + + Given a conversation_id and an id, recreate a conversation from this point and run completion. A new conversation is returned with the new entries returned. + + :param conversation_id: + :param inputs: + :param from_entry_id: + :param stream: + :param store: Whether to store the results into our servers or not. + :param handoff_execution: + :param completion_args: White-listed arguments from the completion API + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AgentsAPIV1ConversationsRestartStreamRequest( + conversation_id=conversation_id, + conversation_restart_stream_request=models.ConversationRestartStreamRequest( + inputs=utils.get_pydantic_model(inputs, models.ConversationInputs), + stream=stream, + store=store, + handoff_execution=handoff_execution, + from_entry_id=from_entry_id, + completion_args=utils.get_pydantic_model( + completion_args, Optional[models.CompletionArgs] + ), + ), + ) + + req = self._build_request_async( + method="POST", + path="/v1/conversations/{conversation_id}/restart#stream", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="text/event-stream", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request.conversation_restart_stream_request, + False, + False, + "json", + models.ConversationRestartStreamRequest, + ), + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + base_url=base_url or "", + operation_id="agents_api_v1_conversations_restart_stream", + oauth2_scopes=[], + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + stream=True, + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "text/event-stream"): + return eventstreaming.EventStreamAsync( + http_res, + lambda raw: utils.unmarshal_json(raw, models.ConversationEvents), + ) + if utils.match_response(http_res, "422", "application/json"): + http_res_text = await utils.stream_to_text_async(http_res) + response_data = utils.unmarshal_json( + http_res_text, models.HTTPValidationErrorData + ) + raise models.HTTPValidationError(data=response_data) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + + content_type = http_res.headers.get("Content-Type") + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res_text, + http_res, + ) diff --git a/src/mistralai/mistral_agents.py b/src/mistralai/mistral_agents.py new file mode 100644 index 00000000..5fdd8f32 --- /dev/null +++ b/src/mistralai/mistral_agents.py @@ -0,0 +1,1158 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from .basesdk import BaseSDK +from mistralai import models, utils +from mistralai._hooks import HookContext +from mistralai.types import OptionalNullable, UNSET +from mistralai.utils import get_security_from_env +from typing import Any, List, Mapping, Optional, Union + + +class MistralAgents(BaseSDK): + def create( + self, + *, + model: str, + name: str, + instructions: OptionalNullable[str] = UNSET, + tools: Optional[ + Union[ + List[models.AgentCreationRequestTools], + List[models.AgentCreationRequestToolsTypedDict], + ] + ] = None, + completion_args: Optional[ + Union[models.CompletionArgs, models.CompletionArgsTypedDict] + ] = None, + description: OptionalNullable[str] = UNSET, + handoffs: OptionalNullable[List[str]] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.Agent: + r"""Create a agent that can be used within a conversation. + + Create a new agent giving it instructions, tools, description. The agent is then available to be used as a regular assistant in a conversation or as part of an agent pool from which it can be used. + + :param model: + :param name: + :param instructions: Instruction prompt the model will follow during the conversation. + :param tools: List of tools which are available to the model during the conversation. + :param completion_args: White-listed arguments from the completion API + :param description: + :param handoffs: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AgentCreationRequest( + instructions=instructions, + tools=utils.get_pydantic_model( + tools, Optional[List[models.AgentCreationRequestTools]] + ), + completion_args=utils.get_pydantic_model( + completion_args, Optional[models.CompletionArgs] + ), + model=model, + name=name, + description=description, + handoffs=handoffs, + ) + + req = self._build_request( + method="POST", + path="/v1/agents", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.AgentCreationRequest + ), + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + base_url=base_url or "", + operation_id="agents_api_v1_agents_create", + oauth2_scopes=[], + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return utils.unmarshal_json(http_res.text, models.Agent) + if utils.match_response(http_res, "422", "application/json"): + response_data = utils.unmarshal_json( + http_res.text, models.HTTPValidationErrorData + ) + raise models.HTTPValidationError(data=response_data) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + + content_type = http_res.headers.get("Content-Type") + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res_text, + http_res, + ) + + async def create_async( + self, + *, + model: str, + name: str, + instructions: OptionalNullable[str] = UNSET, + tools: Optional[ + Union[ + List[models.AgentCreationRequestTools], + List[models.AgentCreationRequestToolsTypedDict], + ] + ] = None, + completion_args: Optional[ + Union[models.CompletionArgs, models.CompletionArgsTypedDict] + ] = None, + description: OptionalNullable[str] = UNSET, + handoffs: OptionalNullable[List[str]] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.Agent: + r"""Create a agent that can be used within a conversation. + + Create a new agent giving it instructions, tools, description. The agent is then available to be used as a regular assistant in a conversation or as part of an agent pool from which it can be used. + + :param model: + :param name: + :param instructions: Instruction prompt the model will follow during the conversation. + :param tools: List of tools which are available to the model during the conversation. + :param completion_args: White-listed arguments from the completion API + :param description: + :param handoffs: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AgentCreationRequest( + instructions=instructions, + tools=utils.get_pydantic_model( + tools, Optional[List[models.AgentCreationRequestTools]] + ), + completion_args=utils.get_pydantic_model( + completion_args, Optional[models.CompletionArgs] + ), + model=model, + name=name, + description=description, + handoffs=handoffs, + ) + + req = self._build_request_async( + method="POST", + path="/v1/agents", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.AgentCreationRequest + ), + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + base_url=base_url or "", + operation_id="agents_api_v1_agents_create", + oauth2_scopes=[], + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return utils.unmarshal_json(http_res.text, models.Agent) + if utils.match_response(http_res, "422", "application/json"): + response_data = utils.unmarshal_json( + http_res.text, models.HTTPValidationErrorData + ) + raise models.HTTPValidationError(data=response_data) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + + content_type = http_res.headers.get("Content-Type") + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res_text, + http_res, + ) + + def list( + self, + *, + page: Optional[int] = 0, + page_size: Optional[int] = 20, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> List[models.Agent]: + r"""List agent entities. + + Retrieve a list of agent entities sorted by creation time. + + :param page: + :param page_size: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AgentsAPIV1AgentsListRequest( + page=page, + page_size=page_size, + ) + + req = self._build_request( + method="GET", + path="/v1/agents", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + base_url=base_url or "", + operation_id="agents_api_v1_agents_list", + oauth2_scopes=[], + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return utils.unmarshal_json(http_res.text, List[models.Agent]) + if utils.match_response(http_res, "422", "application/json"): + response_data = utils.unmarshal_json( + http_res.text, models.HTTPValidationErrorData + ) + raise models.HTTPValidationError(data=response_data) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + + content_type = http_res.headers.get("Content-Type") + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res_text, + http_res, + ) + + async def list_async( + self, + *, + page: Optional[int] = 0, + page_size: Optional[int] = 20, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> List[models.Agent]: + r"""List agent entities. + + Retrieve a list of agent entities sorted by creation time. + + :param page: + :param page_size: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AgentsAPIV1AgentsListRequest( + page=page, + page_size=page_size, + ) + + req = self._build_request_async( + method="GET", + path="/v1/agents", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + base_url=base_url or "", + operation_id="agents_api_v1_agents_list", + oauth2_scopes=[], + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return utils.unmarshal_json(http_res.text, List[models.Agent]) + if utils.match_response(http_res, "422", "application/json"): + response_data = utils.unmarshal_json( + http_res.text, models.HTTPValidationErrorData + ) + raise models.HTTPValidationError(data=response_data) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + + content_type = http_res.headers.get("Content-Type") + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res_text, + http_res, + ) + + def get( + self, + *, + agent_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.Agent: + r"""Retrieve an agent entity. + + Given an agent retrieve an agent entity with its attributes. + + :param agent_id: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AgentsAPIV1AgentsGetRequest( + agent_id=agent_id, + ) + + req = self._build_request( + method="GET", + path="/v1/agents/{agent_id}", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + base_url=base_url or "", + operation_id="agents_api_v1_agents_get", + oauth2_scopes=[], + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return utils.unmarshal_json(http_res.text, models.Agent) + if utils.match_response(http_res, "422", "application/json"): + response_data = utils.unmarshal_json( + http_res.text, models.HTTPValidationErrorData + ) + raise models.HTTPValidationError(data=response_data) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + + content_type = http_res.headers.get("Content-Type") + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res_text, + http_res, + ) + + async def get_async( + self, + *, + agent_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.Agent: + r"""Retrieve an agent entity. + + Given an agent retrieve an agent entity with its attributes. + + :param agent_id: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AgentsAPIV1AgentsGetRequest( + agent_id=agent_id, + ) + + req = self._build_request_async( + method="GET", + path="/v1/agents/{agent_id}", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + base_url=base_url or "", + operation_id="agents_api_v1_agents_get", + oauth2_scopes=[], + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return utils.unmarshal_json(http_res.text, models.Agent) + if utils.match_response(http_res, "422", "application/json"): + response_data = utils.unmarshal_json( + http_res.text, models.HTTPValidationErrorData + ) + raise models.HTTPValidationError(data=response_data) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + + content_type = http_res.headers.get("Content-Type") + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res_text, + http_res, + ) + + def update( + self, + *, + agent_id: str, + instructions: OptionalNullable[str] = UNSET, + tools: Optional[ + Union[ + List[models.AgentUpdateRequestTools], + List[models.AgentUpdateRequestToolsTypedDict], + ] + ] = None, + completion_args: Optional[ + Union[models.CompletionArgs, models.CompletionArgsTypedDict] + ] = None, + model: OptionalNullable[str] = UNSET, + name: OptionalNullable[str] = UNSET, + description: OptionalNullable[str] = UNSET, + handoffs: OptionalNullable[List[str]] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.Agent: + r"""Update an agent entity. + + Update an agent attributes and create a new version. + + :param agent_id: + :param instructions: Instruction prompt the model will follow during the conversation. + :param tools: List of tools which are available to the model during the conversation. + :param completion_args: White-listed arguments from the completion API + :param model: + :param name: + :param description: + :param handoffs: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AgentsAPIV1AgentsUpdateRequest( + agent_id=agent_id, + agent_update_request=models.AgentUpdateRequest( + instructions=instructions, + tools=utils.get_pydantic_model( + tools, Optional[List[models.AgentUpdateRequestTools]] + ), + completion_args=utils.get_pydantic_model( + completion_args, Optional[models.CompletionArgs] + ), + model=model, + name=name, + description=description, + handoffs=handoffs, + ), + ) + + req = self._build_request( + method="PATCH", + path="/v1/agents/{agent_id}", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request.agent_update_request, + False, + False, + "json", + models.AgentUpdateRequest, + ), + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + base_url=base_url or "", + operation_id="agents_api_v1_agents_update", + oauth2_scopes=[], + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return utils.unmarshal_json(http_res.text, models.Agent) + if utils.match_response(http_res, "422", "application/json"): + response_data = utils.unmarshal_json( + http_res.text, models.HTTPValidationErrorData + ) + raise models.HTTPValidationError(data=response_data) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + + content_type = http_res.headers.get("Content-Type") + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res_text, + http_res, + ) + + async def update_async( + self, + *, + agent_id: str, + instructions: OptionalNullable[str] = UNSET, + tools: Optional[ + Union[ + List[models.AgentUpdateRequestTools], + List[models.AgentUpdateRequestToolsTypedDict], + ] + ] = None, + completion_args: Optional[ + Union[models.CompletionArgs, models.CompletionArgsTypedDict] + ] = None, + model: OptionalNullable[str] = UNSET, + name: OptionalNullable[str] = UNSET, + description: OptionalNullable[str] = UNSET, + handoffs: OptionalNullable[List[str]] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.Agent: + r"""Update an agent entity. + + Update an agent attributes and create a new version. + + :param agent_id: + :param instructions: Instruction prompt the model will follow during the conversation. + :param tools: List of tools which are available to the model during the conversation. + :param completion_args: White-listed arguments from the completion API + :param model: + :param name: + :param description: + :param handoffs: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AgentsAPIV1AgentsUpdateRequest( + agent_id=agent_id, + agent_update_request=models.AgentUpdateRequest( + instructions=instructions, + tools=utils.get_pydantic_model( + tools, Optional[List[models.AgentUpdateRequestTools]] + ), + completion_args=utils.get_pydantic_model( + completion_args, Optional[models.CompletionArgs] + ), + model=model, + name=name, + description=description, + handoffs=handoffs, + ), + ) + + req = self._build_request_async( + method="PATCH", + path="/v1/agents/{agent_id}", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request.agent_update_request, + False, + False, + "json", + models.AgentUpdateRequest, + ), + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + base_url=base_url or "", + operation_id="agents_api_v1_agents_update", + oauth2_scopes=[], + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return utils.unmarshal_json(http_res.text, models.Agent) + if utils.match_response(http_res, "422", "application/json"): + response_data = utils.unmarshal_json( + http_res.text, models.HTTPValidationErrorData + ) + raise models.HTTPValidationError(data=response_data) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + + content_type = http_res.headers.get("Content-Type") + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res_text, + http_res, + ) + + def update_version( + self, + *, + agent_id: str, + version: int, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.Agent: + r"""Update an agent version. + + Switch the version of an agent. + + :param agent_id: + :param version: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AgentsAPIV1AgentsUpdateVersionRequest( + agent_id=agent_id, + version=version, + ) + + req = self._build_request( + method="PATCH", + path="/v1/agents/{agent_id}/version", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + base_url=base_url or "", + operation_id="agents_api_v1_agents_update_version", + oauth2_scopes=[], + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return utils.unmarshal_json(http_res.text, models.Agent) + if utils.match_response(http_res, "422", "application/json"): + response_data = utils.unmarshal_json( + http_res.text, models.HTTPValidationErrorData + ) + raise models.HTTPValidationError(data=response_data) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + + content_type = http_res.headers.get("Content-Type") + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res_text, + http_res, + ) + + async def update_version_async( + self, + *, + agent_id: str, + version: int, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.Agent: + r"""Update an agent version. + + Switch the version of an agent. + + :param agent_id: + :param version: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AgentsAPIV1AgentsUpdateVersionRequest( + agent_id=agent_id, + version=version, + ) + + req = self._build_request_async( + method="PATCH", + path="/v1/agents/{agent_id}/version", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + base_url=base_url or "", + operation_id="agents_api_v1_agents_update_version", + oauth2_scopes=[], + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return utils.unmarshal_json(http_res.text, models.Agent) + if utils.match_response(http_res, "422", "application/json"): + response_data = utils.unmarshal_json( + http_res.text, models.HTTPValidationErrorData + ) + raise models.HTTPValidationError(data=response_data) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + + content_type = http_res.headers.get("Content-Type") + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res_text, + http_res, + ) diff --git a/src/mistralai/models/__init__.py b/src/mistralai/models/__init__.py index d56f5bf8..cf121986 100644 --- a/src/mistralai/models/__init__.py +++ b/src/mistralai/models/__init__.py @@ -1,5 +1,85 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +from .agent import Agent, AgentObject, AgentTools, AgentToolsTypedDict, AgentTypedDict +from .agentconversation import ( + AgentConversation, + AgentConversationObject, + AgentConversationTypedDict, +) +from .agentcreationrequest import ( + AgentCreationRequest, + AgentCreationRequestTools, + AgentCreationRequestToolsTypedDict, + AgentCreationRequestTypedDict, +) +from .agenthandoffdoneevent import ( + AgentHandoffDoneEvent, + AgentHandoffDoneEventType, + AgentHandoffDoneEventTypedDict, +) +from .agenthandoffentry import ( + AgentHandoffEntry, + AgentHandoffEntryObject, + AgentHandoffEntryType, + AgentHandoffEntryTypedDict, +) +from .agenthandoffstartedevent import ( + AgentHandoffStartedEvent, + AgentHandoffStartedEventType, + AgentHandoffStartedEventTypedDict, +) +from .agents_api_v1_agents_getop import ( + AgentsAPIV1AgentsGetRequest, + AgentsAPIV1AgentsGetRequestTypedDict, +) +from .agents_api_v1_agents_listop import ( + AgentsAPIV1AgentsListRequest, + AgentsAPIV1AgentsListRequestTypedDict, +) +from .agents_api_v1_agents_update_versionop import ( + AgentsAPIV1AgentsUpdateVersionRequest, + AgentsAPIV1AgentsUpdateVersionRequestTypedDict, +) +from .agents_api_v1_agents_updateop import ( + AgentsAPIV1AgentsUpdateRequest, + AgentsAPIV1AgentsUpdateRequestTypedDict, +) +from .agents_api_v1_conversations_append_streamop import ( + AgentsAPIV1ConversationsAppendStreamRequest, + AgentsAPIV1ConversationsAppendStreamRequestTypedDict, +) +from .agents_api_v1_conversations_appendop import ( + AgentsAPIV1ConversationsAppendRequest, + AgentsAPIV1ConversationsAppendRequestTypedDict, +) +from .agents_api_v1_conversations_getop import ( + AgentsAPIV1ConversationsGetRequest, + AgentsAPIV1ConversationsGetRequestTypedDict, + AgentsAPIV1ConversationsGetResponseV1ConversationsGet, + AgentsAPIV1ConversationsGetResponseV1ConversationsGetTypedDict, +) +from .agents_api_v1_conversations_historyop import ( + AgentsAPIV1ConversationsHistoryRequest, + AgentsAPIV1ConversationsHistoryRequestTypedDict, +) +from .agents_api_v1_conversations_listop import ( + AgentsAPIV1ConversationsListRequest, + AgentsAPIV1ConversationsListRequestTypedDict, + ResponseBody, + ResponseBodyTypedDict, +) +from .agents_api_v1_conversations_messagesop import ( + AgentsAPIV1ConversationsMessagesRequest, + AgentsAPIV1ConversationsMessagesRequestTypedDict, +) +from .agents_api_v1_conversations_restart_streamop import ( + AgentsAPIV1ConversationsRestartStreamRequest, + AgentsAPIV1ConversationsRestartStreamRequestTypedDict, +) +from .agents_api_v1_conversations_restartop import ( + AgentsAPIV1ConversationsRestartRequest, + AgentsAPIV1ConversationsRestartRequestTypedDict, +) from .agentscompletionrequest import ( AgentsCompletionRequest, AgentsCompletionRequestMessages, @@ -20,6 +100,12 @@ AgentsCompletionStreamRequestToolChoiceTypedDict, AgentsCompletionStreamRequestTypedDict, ) +from .agentupdaterequest import ( + AgentUpdateRequest, + AgentUpdateRequestTools, + AgentUpdateRequestToolsTypedDict, + AgentUpdateRequestTypedDict, +) from .apiendpoint import APIEndpoint from .archiveftmodelout import ( ArchiveFTModelOut, @@ -39,6 +125,7 @@ from .batchjobout import BatchJobOut, BatchJobOutObject, BatchJobOutTypedDict from .batchjobsout import BatchJobsOut, BatchJobsOutObject, BatchJobsOutTypedDict from .batchjobstatus import BatchJobStatus +from .builtinconnectors import BuiltInConnectors from .chatclassificationrequest import ( ChatClassificationRequest, ChatClassificationRequestTypedDict, @@ -131,6 +218,13 @@ ClassifierTrainingParametersIn, ClassifierTrainingParametersInTypedDict, ) +from .codeinterpretertool import ( + CodeInterpreterTool, + CodeInterpreterToolType, + CodeInterpreterToolTypedDict, +) +from .completionargs import CompletionArgs, CompletionArgsTypedDict +from .completionargsstop import CompletionArgsStop, CompletionArgsStopTypedDict from .completionchunk import CompletionChunk, CompletionChunkTypedDict from .completiondetailedjobout import ( CompletionDetailedJobOut, @@ -152,11 +246,11 @@ ) from .completionjobout import ( CompletionJobOut, + CompletionJobOutObject, CompletionJobOutTypedDict, Integrations, IntegrationsTypedDict, JobType, - Object, Repositories, RepositoriesTypedDict, Status, @@ -175,6 +269,67 @@ CompletionTrainingParametersInTypedDict, ) from .contentchunk import ContentChunk, ContentChunkTypedDict +from .conversationappendrequest import ( + ConversationAppendRequest, + ConversationAppendRequestHandoffExecution, + ConversationAppendRequestTypedDict, +) +from .conversationappendstreamrequest import ( + ConversationAppendStreamRequest, + ConversationAppendStreamRequestHandoffExecution, + ConversationAppendStreamRequestTypedDict, +) +from .conversationevents import ( + ConversationEvents, + ConversationEventsData, + ConversationEventsDataTypedDict, + ConversationEventsTypedDict, +) +from .conversationhistory import ( + ConversationHistory, + ConversationHistoryObject, + ConversationHistoryTypedDict, + Entries, + EntriesTypedDict, +) +from .conversationinputs import ConversationInputs, ConversationInputsTypedDict +from .conversationmessages import ( + ConversationMessages, + ConversationMessagesObject, + ConversationMessagesTypedDict, +) +from .conversationrequest import ( + ConversationRequest, + ConversationRequestTypedDict, + HandoffExecution, + Tools, + ToolsTypedDict, +) +from .conversationresponse import ( + ConversationResponse, + ConversationResponseObject, + ConversationResponseTypedDict, + Outputs, + OutputsTypedDict, +) +from .conversationrestartrequest import ( + ConversationRestartRequest, + ConversationRestartRequestHandoffExecution, + ConversationRestartRequestTypedDict, +) +from .conversationrestartstreamrequest import ( + ConversationRestartStreamRequest, + ConversationRestartStreamRequestHandoffExecution, + ConversationRestartStreamRequestTypedDict, +) +from .conversationstreamrequest import ( + ConversationStreamRequest, + ConversationStreamRequestHandoffExecution, + ConversationStreamRequestTools, + ConversationStreamRequestToolsTypedDict, + ConversationStreamRequestTypedDict, +) +from .conversationusageinfo import ConversationUsageInfo, ConversationUsageInfoTypedDict from .delete_model_v1_models_model_id_deleteop import ( DeleteModelV1ModelsModelIDDeleteRequest, DeleteModelV1ModelsModelIDDeleteRequestTypedDict, @@ -182,6 +337,11 @@ from .deletefileout import DeleteFileOut, DeleteFileOutTypedDict from .deletemodelout import DeleteModelOut, DeleteModelOutTypedDict from .deltamessage import Content, ContentTypedDict, DeltaMessage, DeltaMessageTypedDict +from .documentlibrarytool import ( + DocumentLibraryTool, + DocumentLibraryToolType, + DocumentLibraryToolTypedDict, +) from .documenturlchunk import ( DocumentURLChunk, DocumentURLChunkType, @@ -252,7 +412,29 @@ FunctionCall, FunctionCallTypedDict, ) +from .functioncallentry import ( + FunctionCallEntry, + FunctionCallEntryObject, + FunctionCallEntryType, + FunctionCallEntryTypedDict, +) +from .functioncallentryarguments import ( + FunctionCallEntryArguments, + FunctionCallEntryArgumentsTypedDict, +) +from .functioncallevent import ( + FunctionCallEvent, + FunctionCallEventType, + FunctionCallEventTypedDict, +) from .functionname import FunctionName, FunctionNameTypedDict +from .functionresultentry import ( + FunctionResultEntry, + FunctionResultEntryObject, + FunctionResultEntryType, + FunctionResultEntryTypedDict, +) +from .functiontool import FunctionTool, FunctionToolType, FunctionToolTypedDict from .githubrepositoryin import ( GithubRepositoryIn, GithubRepositoryInType, @@ -264,6 +446,11 @@ GithubRepositoryOutTypedDict, ) from .httpvalidationerror import HTTPValidationError, HTTPValidationErrorData +from .imagegenerationtool import ( + ImageGenerationTool, + ImageGenerationToolType, + ImageGenerationToolTypedDict, +) from .imageurl import ImageURL, ImageURLTypedDict from .imageurlchunk import ( ImageURLChunk, @@ -272,6 +459,7 @@ ImageURLChunkType, ImageURLChunkTypedDict, ) +from .inputentries import InputEntries, InputEntriesTypedDict from .inputs import ( Inputs, InputsTypedDict, @@ -366,8 +554,50 @@ LegacyJobMetadataOutTypedDict, ) from .listfilesout import ListFilesOut, ListFilesOutTypedDict +from .messageentries import MessageEntries, MessageEntriesTypedDict +from .messageinputcontentchunks import ( + MessageInputContentChunks, + MessageInputContentChunksTypedDict, +) +from .messageinputentry import ( + MessageInputEntry, + MessageInputEntryContent, + MessageInputEntryContentTypedDict, + MessageInputEntryRole, + MessageInputEntryType, + MessageInputEntryTypedDict, + Object, +) +from .messageoutputcontentchunks import ( + MessageOutputContentChunks, + MessageOutputContentChunksTypedDict, +) +from .messageoutputentry import ( + MessageOutputEntry, + MessageOutputEntryContent, + MessageOutputEntryContentTypedDict, + MessageOutputEntryObject, + MessageOutputEntryRole, + MessageOutputEntryType, + MessageOutputEntryTypedDict, +) +from .messageoutputevent import ( + MessageOutputEvent, + MessageOutputEventContent, + MessageOutputEventContentTypedDict, + MessageOutputEventRole, + MessageOutputEventType, + MessageOutputEventTypedDict, +) from .metricout import MetricOut, MetricOutTypedDict from .modelcapabilities import ModelCapabilities, ModelCapabilitiesTypedDict +from .modelconversation import ( + ModelConversation, + ModelConversationObject, + ModelConversationTools, + ModelConversationToolsTypedDict, + ModelConversationTypedDict, +) from .modellist import Data, DataTypedDict, ModelList, ModelListTypedDict from .moderationobject import ModerationObject, ModerationObjectTypedDict from .moderationresponse import ModerationResponse, ModerationResponseTypedDict @@ -377,10 +607,26 @@ from .ocrrequest import Document, DocumentTypedDict, OCRRequest, OCRRequestTypedDict from .ocrresponse import OCRResponse, OCRResponseTypedDict from .ocrusageinfo import OCRUsageInfo, OCRUsageInfoTypedDict +from .outputcontentchunks import OutputContentChunks, OutputContentChunksTypedDict from .prediction import Prediction, PredictionTypedDict from .referencechunk import ReferenceChunk, ReferenceChunkType, ReferenceChunkTypedDict +from .responsedoneevent import ( + ResponseDoneEvent, + ResponseDoneEventType, + ResponseDoneEventTypedDict, +) +from .responseerrorevent import ( + ResponseErrorEvent, + ResponseErrorEventType, + ResponseErrorEventTypedDict, +) from .responseformat import ResponseFormat, ResponseFormatTypedDict from .responseformats import ResponseFormats +from .responsestartedevent import ( + ResponseStartedEvent, + ResponseStartedEventType, + ResponseStartedEventTypedDict, +) from .retrieve_model_v1_models_model_id_getop import ( RetrieveModelV1ModelsModelIDGetRequest, RetrieveModelV1ModelsModelIDGetRequestTypedDict, @@ -392,6 +638,7 @@ from .sdkerror import SDKError from .security import Security, SecurityTypedDict from .source import Source +from .ssetypes import SSETypes from .systemmessage import ( Role, SystemMessage, @@ -404,6 +651,23 @@ from .toolcall import ToolCall, ToolCallTypedDict from .toolchoice import ToolChoice, ToolChoiceTypedDict from .toolchoiceenum import ToolChoiceEnum +from .toolexecutiondoneevent import ( + ToolExecutionDoneEvent, + ToolExecutionDoneEventType, + ToolExecutionDoneEventTypedDict, +) +from .toolexecutionentry import ( + ToolExecutionEntry, + ToolExecutionEntryObject, + ToolExecutionEntryType, + ToolExecutionEntryTypedDict, +) +from .toolexecutionstartedevent import ( + ToolExecutionStartedEvent, + ToolExecutionStartedEventType, + ToolExecutionStartedEventTypedDict, +) +from .toolfilechunk import ToolFileChunk, ToolFileChunkType, ToolFileChunkTypedDict from .toolmessage import ( ToolMessage, ToolMessageContent, @@ -411,6 +675,11 @@ ToolMessageRole, ToolMessageTypedDict, ) +from .toolreferencechunk import ( + ToolReferenceChunk, + ToolReferenceChunkType, + ToolReferenceChunkTypedDict, +) from .tooltypes import ToolTypes from .trainingfile import TrainingFile, TrainingFileTypedDict from .unarchiveftmodelout import ( @@ -444,10 +713,68 @@ WandbIntegrationOutType, WandbIntegrationOutTypedDict, ) +from .websearchpremiumtool import ( + WebSearchPremiumTool, + WebSearchPremiumToolType, + WebSearchPremiumToolTypedDict, +) +from .websearchtool import WebSearchTool, WebSearchToolType, WebSearchToolTypedDict __all__ = [ "APIEndpoint", + "Agent", + "AgentConversation", + "AgentConversationObject", + "AgentConversationTypedDict", + "AgentCreationRequest", + "AgentCreationRequestTools", + "AgentCreationRequestToolsTypedDict", + "AgentCreationRequestTypedDict", + "AgentHandoffDoneEvent", + "AgentHandoffDoneEventType", + "AgentHandoffDoneEventTypedDict", + "AgentHandoffEntry", + "AgentHandoffEntryObject", + "AgentHandoffEntryType", + "AgentHandoffEntryTypedDict", + "AgentHandoffStartedEvent", + "AgentHandoffStartedEventType", + "AgentHandoffStartedEventTypedDict", + "AgentObject", + "AgentTools", + "AgentToolsTypedDict", + "AgentTypedDict", + "AgentUpdateRequest", + "AgentUpdateRequestTools", + "AgentUpdateRequestToolsTypedDict", + "AgentUpdateRequestTypedDict", + "AgentsAPIV1AgentsGetRequest", + "AgentsAPIV1AgentsGetRequestTypedDict", + "AgentsAPIV1AgentsListRequest", + "AgentsAPIV1AgentsListRequestTypedDict", + "AgentsAPIV1AgentsUpdateRequest", + "AgentsAPIV1AgentsUpdateRequestTypedDict", + "AgentsAPIV1AgentsUpdateVersionRequest", + "AgentsAPIV1AgentsUpdateVersionRequestTypedDict", + "AgentsAPIV1ConversationsAppendRequest", + "AgentsAPIV1ConversationsAppendRequestTypedDict", + "AgentsAPIV1ConversationsAppendStreamRequest", + "AgentsAPIV1ConversationsAppendStreamRequestTypedDict", + "AgentsAPIV1ConversationsGetRequest", + "AgentsAPIV1ConversationsGetRequestTypedDict", + "AgentsAPIV1ConversationsGetResponseV1ConversationsGet", + "AgentsAPIV1ConversationsGetResponseV1ConversationsGetTypedDict", + "AgentsAPIV1ConversationsHistoryRequest", + "AgentsAPIV1ConversationsHistoryRequestTypedDict", + "AgentsAPIV1ConversationsListRequest", + "AgentsAPIV1ConversationsListRequestTypedDict", + "AgentsAPIV1ConversationsMessagesRequest", + "AgentsAPIV1ConversationsMessagesRequestTypedDict", + "AgentsAPIV1ConversationsRestartRequest", + "AgentsAPIV1ConversationsRestartRequestTypedDict", + "AgentsAPIV1ConversationsRestartStreamRequest", + "AgentsAPIV1ConversationsRestartStreamRequestTypedDict", "AgentsCompletionRequest", "AgentsCompletionRequestMessages", "AgentsCompletionRequestMessagesTypedDict", @@ -487,6 +814,7 @@ "BatchJobsOut", "BatchJobsOutObject", "BatchJobsOutTypedDict", + "BuiltInConnectors", "ChatClassificationRequest", "ChatClassificationRequestTypedDict", "ChatCompletionChoice", @@ -545,6 +873,13 @@ "ClassifierTrainingParametersIn", "ClassifierTrainingParametersInTypedDict", "ClassifierTrainingParametersTypedDict", + "CodeInterpreterTool", + "CodeInterpreterToolType", + "CodeInterpreterToolTypedDict", + "CompletionArgs", + "CompletionArgsStop", + "CompletionArgsStopTypedDict", + "CompletionArgsTypedDict", "CompletionChunk", "CompletionChunkTypedDict", "CompletionDetailedJobOut", @@ -562,6 +897,7 @@ "CompletionFTModelOutObject", "CompletionFTModelOutTypedDict", "CompletionJobOut", + "CompletionJobOutObject", "CompletionJobOutTypedDict", "CompletionResponseStreamChoice", "CompletionResponseStreamChoiceFinishReason", @@ -574,6 +910,42 @@ "ContentChunk", "ContentChunkTypedDict", "ContentTypedDict", + "ConversationAppendRequest", + "ConversationAppendRequestHandoffExecution", + "ConversationAppendRequestTypedDict", + "ConversationAppendStreamRequest", + "ConversationAppendStreamRequestHandoffExecution", + "ConversationAppendStreamRequestTypedDict", + "ConversationEvents", + "ConversationEventsData", + "ConversationEventsDataTypedDict", + "ConversationEventsTypedDict", + "ConversationHistory", + "ConversationHistoryObject", + "ConversationHistoryTypedDict", + "ConversationInputs", + "ConversationInputsTypedDict", + "ConversationMessages", + "ConversationMessagesObject", + "ConversationMessagesTypedDict", + "ConversationRequest", + "ConversationRequestTypedDict", + "ConversationResponse", + "ConversationResponseObject", + "ConversationResponseTypedDict", + "ConversationRestartRequest", + "ConversationRestartRequestHandoffExecution", + "ConversationRestartRequestTypedDict", + "ConversationRestartStreamRequest", + "ConversationRestartStreamRequestHandoffExecution", + "ConversationRestartStreamRequestTypedDict", + "ConversationStreamRequest", + "ConversationStreamRequestHandoffExecution", + "ConversationStreamRequestTools", + "ConversationStreamRequestToolsTypedDict", + "ConversationStreamRequestTypedDict", + "ConversationUsageInfo", + "ConversationUsageInfoTypedDict", "Data", "DataTypedDict", "DeleteFileOut", @@ -585,6 +957,9 @@ "DeltaMessage", "DeltaMessageTypedDict", "Document", + "DocumentLibraryTool", + "DocumentLibraryToolType", + "DocumentLibraryToolTypedDict", "DocumentTypedDict", "DocumentURLChunk", "DocumentURLChunkType", @@ -597,6 +972,8 @@ "EmbeddingResponseData", "EmbeddingResponseDataTypedDict", "EmbeddingResponseTypedDict", + "Entries", + "EntriesTypedDict", "EventOut", "EventOutTypedDict", "FIMCompletionRequest", @@ -638,9 +1015,25 @@ "FinishReason", "Function", "FunctionCall", + "FunctionCallEntry", + "FunctionCallEntryArguments", + "FunctionCallEntryArgumentsTypedDict", + "FunctionCallEntryObject", + "FunctionCallEntryType", + "FunctionCallEntryTypedDict", + "FunctionCallEvent", + "FunctionCallEventType", + "FunctionCallEventTypedDict", "FunctionCallTypedDict", "FunctionName", "FunctionNameTypedDict", + "FunctionResultEntry", + "FunctionResultEntryObject", + "FunctionResultEntryType", + "FunctionResultEntryTypedDict", + "FunctionTool", + "FunctionToolType", + "FunctionToolTypedDict", "FunctionTypedDict", "GithubRepositoryIn", "GithubRepositoryInType", @@ -650,8 +1043,12 @@ "GithubRepositoryOutTypedDict", "HTTPValidationError", "HTTPValidationErrorData", + "HandoffExecution", "Hyperparameters", "HyperparametersTypedDict", + "ImageGenerationTool", + "ImageGenerationToolType", + "ImageGenerationToolTypedDict", "ImageURL", "ImageURLChunk", "ImageURLChunkImageURL", @@ -659,6 +1056,8 @@ "ImageURLChunkType", "ImageURLChunkTypedDict", "ImageURLTypedDict", + "InputEntries", + "InputEntriesTypedDict", "Inputs", "InputsTypedDict", "InstructRequest", @@ -724,12 +1123,42 @@ "ListFilesOutTypedDict", "Loc", "LocTypedDict", + "MessageEntries", + "MessageEntriesTypedDict", + "MessageInputContentChunks", + "MessageInputContentChunksTypedDict", + "MessageInputEntry", + "MessageInputEntryContent", + "MessageInputEntryContentTypedDict", + "MessageInputEntryRole", + "MessageInputEntryType", + "MessageInputEntryTypedDict", + "MessageOutputContentChunks", + "MessageOutputContentChunksTypedDict", + "MessageOutputEntry", + "MessageOutputEntryContent", + "MessageOutputEntryContentTypedDict", + "MessageOutputEntryObject", + "MessageOutputEntryRole", + "MessageOutputEntryType", + "MessageOutputEntryTypedDict", + "MessageOutputEvent", + "MessageOutputEventContent", + "MessageOutputEventContentTypedDict", + "MessageOutputEventRole", + "MessageOutputEventType", + "MessageOutputEventTypedDict", "Messages", "MessagesTypedDict", "MetricOut", "MetricOutTypedDict", "ModelCapabilities", "ModelCapabilitiesTypedDict", + "ModelConversation", + "ModelConversationObject", + "ModelConversationTools", + "ModelConversationToolsTypedDict", + "ModelConversationTypedDict", "ModelList", "ModelListTypedDict", "ModelType", @@ -752,6 +1181,10 @@ "Object", "One", "OneTypedDict", + "OutputContentChunks", + "OutputContentChunksTypedDict", + "Outputs", + "OutputsTypedDict", "Prediction", "PredictionTypedDict", "QueryParamStatus", @@ -762,9 +1195,20 @@ "RepositoriesTypedDict", "Response1", "Response1TypedDict", + "ResponseBody", + "ResponseBodyTypedDict", + "ResponseDoneEvent", + "ResponseDoneEventType", + "ResponseDoneEventTypedDict", + "ResponseErrorEvent", + "ResponseErrorEventType", + "ResponseErrorEventTypedDict", "ResponseFormat", "ResponseFormatTypedDict", "ResponseFormats", + "ResponseStartedEvent", + "ResponseStartedEventType", + "ResponseStartedEventTypedDict", "RetrieveFileOut", "RetrieveFileOutTypedDict", "RetrieveModelV1ModelsModelIDGetRequest", @@ -773,6 +1217,7 @@ "RetrieveModelV1ModelsModelIDGetResponseRetrieveModelV1ModelsModelIDGetTypedDict", "Role", "SDKError", + "SSETypes", "SampleType", "Security", "SecurityTypedDict", @@ -793,13 +1238,31 @@ "ToolChoice", "ToolChoiceEnum", "ToolChoiceTypedDict", + "ToolExecutionDoneEvent", + "ToolExecutionDoneEventType", + "ToolExecutionDoneEventTypedDict", + "ToolExecutionEntry", + "ToolExecutionEntryObject", + "ToolExecutionEntryType", + "ToolExecutionEntryTypedDict", + "ToolExecutionStartedEvent", + "ToolExecutionStartedEventType", + "ToolExecutionStartedEventTypedDict", + "ToolFileChunk", + "ToolFileChunkType", + "ToolFileChunkTypedDict", "ToolMessage", "ToolMessageContent", "ToolMessageContentTypedDict", "ToolMessageRole", "ToolMessageTypedDict", + "ToolReferenceChunk", + "ToolReferenceChunkType", + "ToolReferenceChunkTypedDict", "ToolTypedDict", "ToolTypes", + "Tools", + "ToolsTypedDict", "TrainingFile", "TrainingFileTypedDict", "Two", @@ -827,4 +1290,10 @@ "WandbIntegrationOutTypedDict", "WandbIntegrationType", "WandbIntegrationTypedDict", + "WebSearchPremiumTool", + "WebSearchPremiumToolType", + "WebSearchPremiumToolTypedDict", + "WebSearchTool", + "WebSearchToolType", + "WebSearchToolTypedDict", ] diff --git a/src/mistralai/models/agent.py b/src/mistralai/models/agent.py new file mode 100644 index 00000000..ce750606 --- /dev/null +++ b/src/mistralai/models/agent.py @@ -0,0 +1,129 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .codeinterpretertool import CodeInterpreterTool, CodeInterpreterToolTypedDict +from .completionargs import CompletionArgs, CompletionArgsTypedDict +from .documentlibrarytool import DocumentLibraryTool, DocumentLibraryToolTypedDict +from .functiontool import FunctionTool, FunctionToolTypedDict +from .imagegenerationtool import ImageGenerationTool, ImageGenerationToolTypedDict +from .websearchpremiumtool import WebSearchPremiumTool, WebSearchPremiumToolTypedDict +from .websearchtool import WebSearchTool, WebSearchToolTypedDict +from datetime import datetime +from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +from mistralai.utils import get_discriminator +from pydantic import Discriminator, Tag, model_serializer +from typing import List, Literal, Optional, Union +from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict + + +AgentToolsTypedDict = TypeAliasType( + "AgentToolsTypedDict", + Union[ + WebSearchToolTypedDict, + WebSearchPremiumToolTypedDict, + CodeInterpreterToolTypedDict, + ImageGenerationToolTypedDict, + FunctionToolTypedDict, + DocumentLibraryToolTypedDict, + ], +) + + +AgentTools = Annotated[ + Union[ + Annotated[CodeInterpreterTool, Tag("code_interpreter")], + Annotated[DocumentLibraryTool, Tag("document_library")], + Annotated[FunctionTool, Tag("function")], + Annotated[ImageGenerationTool, Tag("image_generation")], + Annotated[WebSearchTool, Tag("web_search")], + Annotated[WebSearchPremiumTool, Tag("web_search_premium")], + ], + Discriminator(lambda m: get_discriminator(m, "type", "type")), +] + + +AgentObject = Literal["agent"] + + +class AgentTypedDict(TypedDict): + model: str + name: str + id: str + version: int + created_at: datetime + updated_at: datetime + instructions: NotRequired[Nullable[str]] + r"""Instruction prompt the model will follow during the conversation.""" + tools: NotRequired[List[AgentToolsTypedDict]] + r"""List of tools which are available to the model during the conversation.""" + completion_args: NotRequired[CompletionArgsTypedDict] + r"""White-listed arguments from the completion API""" + description: NotRequired[Nullable[str]] + handoffs: NotRequired[Nullable[List[str]]] + object: NotRequired[AgentObject] + + +class Agent(BaseModel): + model: str + + name: str + + id: str + + version: int + + created_at: datetime + + updated_at: datetime + + instructions: OptionalNullable[str] = UNSET + r"""Instruction prompt the model will follow during the conversation.""" + + tools: Optional[List[AgentTools]] = None + r"""List of tools which are available to the model during the conversation.""" + + completion_args: Optional[CompletionArgs] = None + r"""White-listed arguments from the completion API""" + + description: OptionalNullable[str] = UNSET + + handoffs: OptionalNullable[List[str]] = UNSET + + object: Optional[AgentObject] = "agent" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = [ + "instructions", + "tools", + "completion_args", + "description", + "handoffs", + "object", + ] + nullable_fields = ["instructions", "description", "handoffs"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in self.model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/models/agentconversation.py b/src/mistralai/models/agentconversation.py new file mode 100644 index 00000000..66d6d9f5 --- /dev/null +++ b/src/mistralai/models/agentconversation.py @@ -0,0 +1,71 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from datetime import datetime +from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +from pydantic import model_serializer +from typing import Literal, Optional +from typing_extensions import NotRequired, TypedDict + + +AgentConversationObject = Literal["conversation"] + + +class AgentConversationTypedDict(TypedDict): + id: str + created_at: datetime + updated_at: datetime + agent_id: str + name: NotRequired[Nullable[str]] + r"""Name given to the conversation.""" + description: NotRequired[Nullable[str]] + r"""Description of the what the conversation is about.""" + object: NotRequired[AgentConversationObject] + + +class AgentConversation(BaseModel): + id: str + + created_at: datetime + + updated_at: datetime + + agent_id: str + + name: OptionalNullable[str] = UNSET + r"""Name given to the conversation.""" + + description: OptionalNullable[str] = UNSET + r"""Description of the what the conversation is about.""" + + object: Optional[AgentConversationObject] = "conversation" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["name", "description", "object"] + nullable_fields = ["name", "description"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in self.model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/models/agentcreationrequest.py b/src/mistralai/models/agentcreationrequest.py new file mode 100644 index 00000000..7e0a1fa2 --- /dev/null +++ b/src/mistralai/models/agentcreationrequest.py @@ -0,0 +1,109 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .codeinterpretertool import CodeInterpreterTool, CodeInterpreterToolTypedDict +from .completionargs import CompletionArgs, CompletionArgsTypedDict +from .documentlibrarytool import DocumentLibraryTool, DocumentLibraryToolTypedDict +from .functiontool import FunctionTool, FunctionToolTypedDict +from .imagegenerationtool import ImageGenerationTool, ImageGenerationToolTypedDict +from .websearchpremiumtool import WebSearchPremiumTool, WebSearchPremiumToolTypedDict +from .websearchtool import WebSearchTool, WebSearchToolTypedDict +from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +from mistralai.utils import get_discriminator +from pydantic import Discriminator, Tag, model_serializer +from typing import List, Optional, Union +from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict + + +AgentCreationRequestToolsTypedDict = TypeAliasType( + "AgentCreationRequestToolsTypedDict", + Union[ + WebSearchToolTypedDict, + WebSearchPremiumToolTypedDict, + CodeInterpreterToolTypedDict, + ImageGenerationToolTypedDict, + FunctionToolTypedDict, + DocumentLibraryToolTypedDict, + ], +) + + +AgentCreationRequestTools = Annotated[ + Union[ + Annotated[CodeInterpreterTool, Tag("code_interpreter")], + Annotated[DocumentLibraryTool, Tag("document_library")], + Annotated[FunctionTool, Tag("function")], + Annotated[ImageGenerationTool, Tag("image_generation")], + Annotated[WebSearchTool, Tag("web_search")], + Annotated[WebSearchPremiumTool, Tag("web_search_premium")], + ], + Discriminator(lambda m: get_discriminator(m, "type", "type")), +] + + +class AgentCreationRequestTypedDict(TypedDict): + model: str + name: str + instructions: NotRequired[Nullable[str]] + r"""Instruction prompt the model will follow during the conversation.""" + tools: NotRequired[List[AgentCreationRequestToolsTypedDict]] + r"""List of tools which are available to the model during the conversation.""" + completion_args: NotRequired[CompletionArgsTypedDict] + r"""White-listed arguments from the completion API""" + description: NotRequired[Nullable[str]] + handoffs: NotRequired[Nullable[List[str]]] + + +class AgentCreationRequest(BaseModel): + model: str + + name: str + + instructions: OptionalNullable[str] = UNSET + r"""Instruction prompt the model will follow during the conversation.""" + + tools: Optional[List[AgentCreationRequestTools]] = None + r"""List of tools which are available to the model during the conversation.""" + + completion_args: Optional[CompletionArgs] = None + r"""White-listed arguments from the completion API""" + + description: OptionalNullable[str] = UNSET + + handoffs: OptionalNullable[List[str]] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = [ + "instructions", + "tools", + "completion_args", + "description", + "handoffs", + ] + nullable_fields = ["instructions", "description", "handoffs"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in self.model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/models/agenthandoffdoneevent.py b/src/mistralai/models/agenthandoffdoneevent.py new file mode 100644 index 00000000..fa545a02 --- /dev/null +++ b/src/mistralai/models/agenthandoffdoneevent.py @@ -0,0 +1,33 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from datetime import datetime +from mistralai.types import BaseModel +from typing import Literal, Optional +from typing_extensions import NotRequired, TypedDict + + +AgentHandoffDoneEventType = Literal["agent.handoff.done"] + + +class AgentHandoffDoneEventTypedDict(TypedDict): + id: str + next_agent_id: str + next_agent_name: str + type: NotRequired[AgentHandoffDoneEventType] + created_at: NotRequired[datetime] + output_index: NotRequired[int] + + +class AgentHandoffDoneEvent(BaseModel): + id: str + + next_agent_id: str + + next_agent_name: str + + type: Optional[AgentHandoffDoneEventType] = "agent.handoff.done" + + created_at: Optional[datetime] = None + + output_index: Optional[int] = 0 diff --git a/src/mistralai/models/agenthandoffentry.py b/src/mistralai/models/agenthandoffentry.py new file mode 100644 index 00000000..b8e356c9 --- /dev/null +++ b/src/mistralai/models/agenthandoffentry.py @@ -0,0 +1,75 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from datetime import datetime +from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +from pydantic import model_serializer +from typing import Literal, Optional +from typing_extensions import NotRequired, TypedDict + + +AgentHandoffEntryObject = Literal["entry"] + +AgentHandoffEntryType = Literal["agent.handoff"] + + +class AgentHandoffEntryTypedDict(TypedDict): + previous_agent_id: str + previous_agent_name: str + next_agent_id: str + next_agent_name: str + object: NotRequired[AgentHandoffEntryObject] + type: NotRequired[AgentHandoffEntryType] + created_at: NotRequired[datetime] + completed_at: NotRequired[Nullable[datetime]] + id: NotRequired[str] + + +class AgentHandoffEntry(BaseModel): + previous_agent_id: str + + previous_agent_name: str + + next_agent_id: str + + next_agent_name: str + + object: Optional[AgentHandoffEntryObject] = "entry" + + type: Optional[AgentHandoffEntryType] = "agent.handoff" + + created_at: Optional[datetime] = None + + completed_at: OptionalNullable[datetime] = UNSET + + id: Optional[str] = None + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["object", "type", "created_at", "completed_at", "id"] + nullable_fields = ["completed_at"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in self.model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/models/agenthandoffstartedevent.py b/src/mistralai/models/agenthandoffstartedevent.py new file mode 100644 index 00000000..9033a0a9 --- /dev/null +++ b/src/mistralai/models/agenthandoffstartedevent.py @@ -0,0 +1,33 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from datetime import datetime +from mistralai.types import BaseModel +from typing import Literal, Optional +from typing_extensions import NotRequired, TypedDict + + +AgentHandoffStartedEventType = Literal["agent.handoff.started"] + + +class AgentHandoffStartedEventTypedDict(TypedDict): + id: str + previous_agent_id: str + previous_agent_name: str + type: NotRequired[AgentHandoffStartedEventType] + created_at: NotRequired[datetime] + output_index: NotRequired[int] + + +class AgentHandoffStartedEvent(BaseModel): + id: str + + previous_agent_id: str + + previous_agent_name: str + + type: Optional[AgentHandoffStartedEventType] = "agent.handoff.started" + + created_at: Optional[datetime] = None + + output_index: Optional[int] = 0 diff --git a/src/mistralai/models/agents_api_v1_agents_getop.py b/src/mistralai/models/agents_api_v1_agents_getop.py new file mode 100644 index 00000000..5dbcecc1 --- /dev/null +++ b/src/mistralai/models/agents_api_v1_agents_getop.py @@ -0,0 +1,16 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.types import BaseModel +from mistralai.utils import FieldMetadata, PathParamMetadata +from typing_extensions import Annotated, TypedDict + + +class AgentsAPIV1AgentsGetRequestTypedDict(TypedDict): + agent_id: str + + +class AgentsAPIV1AgentsGetRequest(BaseModel): + agent_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] diff --git a/src/mistralai/models/agents_api_v1_agents_listop.py b/src/mistralai/models/agents_api_v1_agents_listop.py new file mode 100644 index 00000000..25f48a62 --- /dev/null +++ b/src/mistralai/models/agents_api_v1_agents_listop.py @@ -0,0 +1,24 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.types import BaseModel +from mistralai.utils import FieldMetadata, QueryParamMetadata +from typing import Optional +from typing_extensions import Annotated, NotRequired, TypedDict + + +class AgentsAPIV1AgentsListRequestTypedDict(TypedDict): + page: NotRequired[int] + page_size: NotRequired[int] + + +class AgentsAPIV1AgentsListRequest(BaseModel): + page: Annotated[ + Optional[int], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = 0 + + page_size: Annotated[ + Optional[int], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = 20 diff --git a/src/mistralai/models/agents_api_v1_agents_update_versionop.py b/src/mistralai/models/agents_api_v1_agents_update_versionop.py new file mode 100644 index 00000000..5e4b97b3 --- /dev/null +++ b/src/mistralai/models/agents_api_v1_agents_update_versionop.py @@ -0,0 +1,21 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.types import BaseModel +from mistralai.utils import FieldMetadata, PathParamMetadata, QueryParamMetadata +from typing_extensions import Annotated, TypedDict + + +class AgentsAPIV1AgentsUpdateVersionRequestTypedDict(TypedDict): + agent_id: str + version: int + + +class AgentsAPIV1AgentsUpdateVersionRequest(BaseModel): + agent_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] + + version: Annotated[ + int, FieldMetadata(query=QueryParamMetadata(style="form", explode=True)) + ] diff --git a/src/mistralai/models/agents_api_v1_agents_updateop.py b/src/mistralai/models/agents_api_v1_agents_updateop.py new file mode 100644 index 00000000..32696fbe --- /dev/null +++ b/src/mistralai/models/agents_api_v1_agents_updateop.py @@ -0,0 +1,23 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .agentupdaterequest import AgentUpdateRequest, AgentUpdateRequestTypedDict +from mistralai.types import BaseModel +from mistralai.utils import FieldMetadata, PathParamMetadata, RequestMetadata +from typing_extensions import Annotated, TypedDict + + +class AgentsAPIV1AgentsUpdateRequestTypedDict(TypedDict): + agent_id: str + agent_update_request: AgentUpdateRequestTypedDict + + +class AgentsAPIV1AgentsUpdateRequest(BaseModel): + agent_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] + + agent_update_request: Annotated[ + AgentUpdateRequest, + FieldMetadata(request=RequestMetadata(media_type="application/json")), + ] diff --git a/src/mistralai/models/agents_api_v1_conversations_append_streamop.py b/src/mistralai/models/agents_api_v1_conversations_append_streamop.py new file mode 100644 index 00000000..d2489ffb --- /dev/null +++ b/src/mistralai/models/agents_api_v1_conversations_append_streamop.py @@ -0,0 +1,28 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .conversationappendstreamrequest import ( + ConversationAppendStreamRequest, + ConversationAppendStreamRequestTypedDict, +) +from mistralai.types import BaseModel +from mistralai.utils import FieldMetadata, PathParamMetadata, RequestMetadata +from typing_extensions import Annotated, TypedDict + + +class AgentsAPIV1ConversationsAppendStreamRequestTypedDict(TypedDict): + conversation_id: str + r"""ID of the conversation to which we append entries.""" + conversation_append_stream_request: ConversationAppendStreamRequestTypedDict + + +class AgentsAPIV1ConversationsAppendStreamRequest(BaseModel): + conversation_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] + r"""ID of the conversation to which we append entries.""" + + conversation_append_stream_request: Annotated[ + ConversationAppendStreamRequest, + FieldMetadata(request=RequestMetadata(media_type="application/json")), + ] diff --git a/src/mistralai/models/agents_api_v1_conversations_appendop.py b/src/mistralai/models/agents_api_v1_conversations_appendop.py new file mode 100644 index 00000000..ba37697e --- /dev/null +++ b/src/mistralai/models/agents_api_v1_conversations_appendop.py @@ -0,0 +1,28 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .conversationappendrequest import ( + ConversationAppendRequest, + ConversationAppendRequestTypedDict, +) +from mistralai.types import BaseModel +from mistralai.utils import FieldMetadata, PathParamMetadata, RequestMetadata +from typing_extensions import Annotated, TypedDict + + +class AgentsAPIV1ConversationsAppendRequestTypedDict(TypedDict): + conversation_id: str + r"""ID of the conversation to which we append entries.""" + conversation_append_request: ConversationAppendRequestTypedDict + + +class AgentsAPIV1ConversationsAppendRequest(BaseModel): + conversation_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] + r"""ID of the conversation to which we append entries.""" + + conversation_append_request: Annotated[ + ConversationAppendRequest, + FieldMetadata(request=RequestMetadata(media_type="application/json")), + ] diff --git a/src/mistralai/models/agents_api_v1_conversations_getop.py b/src/mistralai/models/agents_api_v1_conversations_getop.py new file mode 100644 index 00000000..4a800ad6 --- /dev/null +++ b/src/mistralai/models/agents_api_v1_conversations_getop.py @@ -0,0 +1,33 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .agentconversation import AgentConversation, AgentConversationTypedDict +from .modelconversation import ModelConversation, ModelConversationTypedDict +from mistralai.types import BaseModel +from mistralai.utils import FieldMetadata, PathParamMetadata +from typing import Union +from typing_extensions import Annotated, TypeAliasType, TypedDict + + +class AgentsAPIV1ConversationsGetRequestTypedDict(TypedDict): + conversation_id: str + + +class AgentsAPIV1ConversationsGetRequest(BaseModel): + conversation_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] + + +AgentsAPIV1ConversationsGetResponseV1ConversationsGetTypedDict = TypeAliasType( + "AgentsAPIV1ConversationsGetResponseV1ConversationsGetTypedDict", + Union[AgentConversationTypedDict, ModelConversationTypedDict], +) +r"""Successful Response""" + + +AgentsAPIV1ConversationsGetResponseV1ConversationsGet = TypeAliasType( + "AgentsAPIV1ConversationsGetResponseV1ConversationsGet", + Union[AgentConversation, ModelConversation], +) +r"""Successful Response""" diff --git a/src/mistralai/models/agents_api_v1_conversations_historyop.py b/src/mistralai/models/agents_api_v1_conversations_historyop.py new file mode 100644 index 00000000..09fb6081 --- /dev/null +++ b/src/mistralai/models/agents_api_v1_conversations_historyop.py @@ -0,0 +1,16 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.types import BaseModel +from mistralai.utils import FieldMetadata, PathParamMetadata +from typing_extensions import Annotated, TypedDict + + +class AgentsAPIV1ConversationsHistoryRequestTypedDict(TypedDict): + conversation_id: str + + +class AgentsAPIV1ConversationsHistoryRequest(BaseModel): + conversation_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] diff --git a/src/mistralai/models/agents_api_v1_conversations_listop.py b/src/mistralai/models/agents_api_v1_conversations_listop.py new file mode 100644 index 00000000..f1d3d579 --- /dev/null +++ b/src/mistralai/models/agents_api_v1_conversations_listop.py @@ -0,0 +1,37 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .agentconversation import AgentConversation, AgentConversationTypedDict +from .modelconversation import ModelConversation, ModelConversationTypedDict +from mistralai.types import BaseModel +from mistralai.utils import FieldMetadata, QueryParamMetadata +from typing import Optional, Union +from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict + + +class AgentsAPIV1ConversationsListRequestTypedDict(TypedDict): + page: NotRequired[int] + page_size: NotRequired[int] + + +class AgentsAPIV1ConversationsListRequest(BaseModel): + page: Annotated[ + Optional[int], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = 0 + + page_size: Annotated[ + Optional[int], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = 100 + + +ResponseBodyTypedDict = TypeAliasType( + "ResponseBodyTypedDict", + Union[AgentConversationTypedDict, ModelConversationTypedDict], +) + + +ResponseBody = TypeAliasType( + "ResponseBody", Union[AgentConversation, ModelConversation] +) diff --git a/src/mistralai/models/agents_api_v1_conversations_messagesop.py b/src/mistralai/models/agents_api_v1_conversations_messagesop.py new file mode 100644 index 00000000..ade66e5e --- /dev/null +++ b/src/mistralai/models/agents_api_v1_conversations_messagesop.py @@ -0,0 +1,16 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.types import BaseModel +from mistralai.utils import FieldMetadata, PathParamMetadata +from typing_extensions import Annotated, TypedDict + + +class AgentsAPIV1ConversationsMessagesRequestTypedDict(TypedDict): + conversation_id: str + + +class AgentsAPIV1ConversationsMessagesRequest(BaseModel): + conversation_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] diff --git a/src/mistralai/models/agents_api_v1_conversations_restart_streamop.py b/src/mistralai/models/agents_api_v1_conversations_restart_streamop.py new file mode 100644 index 00000000..c8fd8475 --- /dev/null +++ b/src/mistralai/models/agents_api_v1_conversations_restart_streamop.py @@ -0,0 +1,26 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .conversationrestartstreamrequest import ( + ConversationRestartStreamRequest, + ConversationRestartStreamRequestTypedDict, +) +from mistralai.types import BaseModel +from mistralai.utils import FieldMetadata, PathParamMetadata, RequestMetadata +from typing_extensions import Annotated, TypedDict + + +class AgentsAPIV1ConversationsRestartStreamRequestTypedDict(TypedDict): + conversation_id: str + conversation_restart_stream_request: ConversationRestartStreamRequestTypedDict + + +class AgentsAPIV1ConversationsRestartStreamRequest(BaseModel): + conversation_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] + + conversation_restart_stream_request: Annotated[ + ConversationRestartStreamRequest, + FieldMetadata(request=RequestMetadata(media_type="application/json")), + ] diff --git a/src/mistralai/models/agents_api_v1_conversations_restartop.py b/src/mistralai/models/agents_api_v1_conversations_restartop.py new file mode 100644 index 00000000..aa867aff --- /dev/null +++ b/src/mistralai/models/agents_api_v1_conversations_restartop.py @@ -0,0 +1,26 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .conversationrestartrequest import ( + ConversationRestartRequest, + ConversationRestartRequestTypedDict, +) +from mistralai.types import BaseModel +from mistralai.utils import FieldMetadata, PathParamMetadata, RequestMetadata +from typing_extensions import Annotated, TypedDict + + +class AgentsAPIV1ConversationsRestartRequestTypedDict(TypedDict): + conversation_id: str + conversation_restart_request: ConversationRestartRequestTypedDict + + +class AgentsAPIV1ConversationsRestartRequest(BaseModel): + conversation_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] + + conversation_restart_request: Annotated[ + ConversationRestartRequest, + FieldMetadata(request=RequestMetadata(media_type="application/json")), + ] diff --git a/src/mistralai/models/agentupdaterequest.py b/src/mistralai/models/agentupdaterequest.py new file mode 100644 index 00000000..ebb656d6 --- /dev/null +++ b/src/mistralai/models/agentupdaterequest.py @@ -0,0 +1,111 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .codeinterpretertool import CodeInterpreterTool, CodeInterpreterToolTypedDict +from .completionargs import CompletionArgs, CompletionArgsTypedDict +from .documentlibrarytool import DocumentLibraryTool, DocumentLibraryToolTypedDict +from .functiontool import FunctionTool, FunctionToolTypedDict +from .imagegenerationtool import ImageGenerationTool, ImageGenerationToolTypedDict +from .websearchpremiumtool import WebSearchPremiumTool, WebSearchPremiumToolTypedDict +from .websearchtool import WebSearchTool, WebSearchToolTypedDict +from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +from mistralai.utils import get_discriminator +from pydantic import Discriminator, Tag, model_serializer +from typing import List, Optional, Union +from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict + + +AgentUpdateRequestToolsTypedDict = TypeAliasType( + "AgentUpdateRequestToolsTypedDict", + Union[ + WebSearchToolTypedDict, + WebSearchPremiumToolTypedDict, + CodeInterpreterToolTypedDict, + ImageGenerationToolTypedDict, + FunctionToolTypedDict, + DocumentLibraryToolTypedDict, + ], +) + + +AgentUpdateRequestTools = Annotated[ + Union[ + Annotated[CodeInterpreterTool, Tag("code_interpreter")], + Annotated[DocumentLibraryTool, Tag("document_library")], + Annotated[FunctionTool, Tag("function")], + Annotated[ImageGenerationTool, Tag("image_generation")], + Annotated[WebSearchTool, Tag("web_search")], + Annotated[WebSearchPremiumTool, Tag("web_search_premium")], + ], + Discriminator(lambda m: get_discriminator(m, "type", "type")), +] + + +class AgentUpdateRequestTypedDict(TypedDict): + instructions: NotRequired[Nullable[str]] + r"""Instruction prompt the model will follow during the conversation.""" + tools: NotRequired[List[AgentUpdateRequestToolsTypedDict]] + r"""List of tools which are available to the model during the conversation.""" + completion_args: NotRequired[CompletionArgsTypedDict] + r"""White-listed arguments from the completion API""" + model: NotRequired[Nullable[str]] + name: NotRequired[Nullable[str]] + description: NotRequired[Nullable[str]] + handoffs: NotRequired[Nullable[List[str]]] + + +class AgentUpdateRequest(BaseModel): + instructions: OptionalNullable[str] = UNSET + r"""Instruction prompt the model will follow during the conversation.""" + + tools: Optional[List[AgentUpdateRequestTools]] = None + r"""List of tools which are available to the model during the conversation.""" + + completion_args: Optional[CompletionArgs] = None + r"""White-listed arguments from the completion API""" + + model: OptionalNullable[str] = UNSET + + name: OptionalNullable[str] = UNSET + + description: OptionalNullable[str] = UNSET + + handoffs: OptionalNullable[List[str]] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = [ + "instructions", + "tools", + "completion_args", + "model", + "name", + "description", + "handoffs", + ] + nullable_fields = ["instructions", "model", "name", "description", "handoffs"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in self.model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/models/builtinconnectors.py b/src/mistralai/models/builtinconnectors.py new file mode 100644 index 00000000..6a3b2476 --- /dev/null +++ b/src/mistralai/models/builtinconnectors.py @@ -0,0 +1,13 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from typing import Literal + + +BuiltInConnectors = Literal[ + "web_search", + "web_search_premium", + "code_interpreter", + "image_generation", + "document_library", +] diff --git a/src/mistralai/models/codeinterpretertool.py b/src/mistralai/models/codeinterpretertool.py new file mode 100644 index 00000000..b0fc4d20 --- /dev/null +++ b/src/mistralai/models/codeinterpretertool.py @@ -0,0 +1,17 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.types import BaseModel +from typing import Literal, Optional +from typing_extensions import NotRequired, TypedDict + + +CodeInterpreterToolType = Literal["code_interpreter"] + + +class CodeInterpreterToolTypedDict(TypedDict): + type: NotRequired[CodeInterpreterToolType] + + +class CodeInterpreterTool(BaseModel): + type: Optional[CodeInterpreterToolType] = "code_interpreter" diff --git a/src/mistralai/models/completionargs.py b/src/mistralai/models/completionargs.py new file mode 100644 index 00000000..2c5cf213 --- /dev/null +++ b/src/mistralai/models/completionargs.py @@ -0,0 +1,100 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .completionargsstop import CompletionArgsStop, CompletionArgsStopTypedDict +from .prediction import Prediction, PredictionTypedDict +from .responseformat import ResponseFormat, ResponseFormatTypedDict +from .toolchoiceenum import ToolChoiceEnum +from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +from pydantic import model_serializer +from typing import Optional +from typing_extensions import NotRequired, TypedDict + + +class CompletionArgsTypedDict(TypedDict): + r"""White-listed arguments from the completion API""" + + stop: NotRequired[Nullable[CompletionArgsStopTypedDict]] + presence_penalty: NotRequired[Nullable[float]] + frequency_penalty: NotRequired[Nullable[float]] + temperature: NotRequired[float] + top_p: NotRequired[Nullable[float]] + max_tokens: NotRequired[Nullable[int]] + random_seed: NotRequired[Nullable[int]] + prediction: NotRequired[Nullable[PredictionTypedDict]] + response_format: NotRequired[Nullable[ResponseFormatTypedDict]] + tool_choice: NotRequired[ToolChoiceEnum] + + +class CompletionArgs(BaseModel): + r"""White-listed arguments from the completion API""" + + stop: OptionalNullable[CompletionArgsStop] = UNSET + + presence_penalty: OptionalNullable[float] = UNSET + + frequency_penalty: OptionalNullable[float] = UNSET + + temperature: Optional[float] = 0.3 + + top_p: OptionalNullable[float] = UNSET + + max_tokens: OptionalNullable[int] = UNSET + + random_seed: OptionalNullable[int] = UNSET + + prediction: OptionalNullable[Prediction] = UNSET + + response_format: OptionalNullable[ResponseFormat] = UNSET + + tool_choice: Optional[ToolChoiceEnum] = None + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = [ + "stop", + "presence_penalty", + "frequency_penalty", + "temperature", + "top_p", + "max_tokens", + "random_seed", + "prediction", + "response_format", + "tool_choice", + ] + nullable_fields = [ + "stop", + "presence_penalty", + "frequency_penalty", + "top_p", + "max_tokens", + "random_seed", + "prediction", + "response_format", + ] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in self.model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/models/completionargsstop.py b/src/mistralai/models/completionargsstop.py new file mode 100644 index 00000000..de7a0956 --- /dev/null +++ b/src/mistralai/models/completionargsstop.py @@ -0,0 +1,13 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from typing import List, Union +from typing_extensions import TypeAliasType + + +CompletionArgsStopTypedDict = TypeAliasType( + "CompletionArgsStopTypedDict", Union[str, List[str]] +) + + +CompletionArgsStop = TypeAliasType("CompletionArgsStop", Union[str, List[str]]) diff --git a/src/mistralai/models/completionjobout.py b/src/mistralai/models/completionjobout.py index 7f8bfd91..3932dae3 100644 --- a/src/mistralai/models/completionjobout.py +++ b/src/mistralai/models/completionjobout.py @@ -28,7 +28,7 @@ ] r"""The current status of the fine-tuning job.""" -Object = Literal["job"] +CompletionJobOutObject = Literal["job"] r"""The object type of the fine-tuning job.""" IntegrationsTypedDict = WandbIntegrationOutTypedDict @@ -63,7 +63,7 @@ class CompletionJobOutTypedDict(TypedDict): hyperparameters: CompletionTrainingParametersTypedDict validation_files: NotRequired[Nullable[List[str]]] r"""A list containing the IDs of uploaded files that contain validation data.""" - object: NotRequired[Object] + object: NotRequired[CompletionJobOutObject] r"""The object type of the fine-tuning job.""" fine_tuned_model: NotRequired[Nullable[str]] r"""The name of the fine-tuned model that is being created. The value will be `null` if the fine-tuning job is still running.""" @@ -105,7 +105,7 @@ class CompletionJobOut(BaseModel): validation_files: OptionalNullable[List[str]] = UNSET r"""A list containing the IDs of uploaded files that contain validation data.""" - object: Optional[Object] = "job" + object: Optional[CompletionJobOutObject] = "job" r"""The object type of the fine-tuning job.""" fine_tuned_model: OptionalNullable[str] = UNSET diff --git a/src/mistralai/models/conversationappendrequest.py b/src/mistralai/models/conversationappendrequest.py new file mode 100644 index 00000000..ecc47e45 --- /dev/null +++ b/src/mistralai/models/conversationappendrequest.py @@ -0,0 +1,35 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .completionargs import CompletionArgs, CompletionArgsTypedDict +from .conversationinputs import ConversationInputs, ConversationInputsTypedDict +from mistralai.types import BaseModel +from typing import Literal, Optional +from typing_extensions import NotRequired, TypedDict + + +ConversationAppendRequestHandoffExecution = Literal["client", "server"] + + +class ConversationAppendRequestTypedDict(TypedDict): + inputs: ConversationInputsTypedDict + stream: NotRequired[bool] + store: NotRequired[bool] + r"""Whether to store the results into our servers or not.""" + handoff_execution: NotRequired[ConversationAppendRequestHandoffExecution] + completion_args: NotRequired[CompletionArgsTypedDict] + r"""White-listed arguments from the completion API""" + + +class ConversationAppendRequest(BaseModel): + inputs: ConversationInputs + + stream: Optional[bool] = False + + store: Optional[bool] = True + r"""Whether to store the results into our servers or not.""" + + handoff_execution: Optional[ConversationAppendRequestHandoffExecution] = "server" + + completion_args: Optional[CompletionArgs] = None + r"""White-listed arguments from the completion API""" diff --git a/src/mistralai/models/conversationappendstreamrequest.py b/src/mistralai/models/conversationappendstreamrequest.py new file mode 100644 index 00000000..25ffe5fb --- /dev/null +++ b/src/mistralai/models/conversationappendstreamrequest.py @@ -0,0 +1,37 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .completionargs import CompletionArgs, CompletionArgsTypedDict +from .conversationinputs import ConversationInputs, ConversationInputsTypedDict +from mistralai.types import BaseModel +from typing import Literal, Optional +from typing_extensions import NotRequired, TypedDict + + +ConversationAppendStreamRequestHandoffExecution = Literal["client", "server"] + + +class ConversationAppendStreamRequestTypedDict(TypedDict): + inputs: ConversationInputsTypedDict + stream: NotRequired[bool] + store: NotRequired[bool] + r"""Whether to store the results into our servers or not.""" + handoff_execution: NotRequired[ConversationAppendStreamRequestHandoffExecution] + completion_args: NotRequired[CompletionArgsTypedDict] + r"""White-listed arguments from the completion API""" + + +class ConversationAppendStreamRequest(BaseModel): + inputs: ConversationInputs + + stream: Optional[bool] = True + + store: Optional[bool] = True + r"""Whether to store the results into our servers or not.""" + + handoff_execution: Optional[ConversationAppendStreamRequestHandoffExecution] = ( + "server" + ) + + completion_args: Optional[CompletionArgs] = None + r"""White-listed arguments from the completion API""" diff --git a/src/mistralai/models/conversationevents.py b/src/mistralai/models/conversationevents.py new file mode 100644 index 00000000..8552edda --- /dev/null +++ b/src/mistralai/models/conversationevents.py @@ -0,0 +1,72 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .agenthandoffdoneevent import AgentHandoffDoneEvent, AgentHandoffDoneEventTypedDict +from .agenthandoffstartedevent import ( + AgentHandoffStartedEvent, + AgentHandoffStartedEventTypedDict, +) +from .functioncallevent import FunctionCallEvent, FunctionCallEventTypedDict +from .messageoutputevent import MessageOutputEvent, MessageOutputEventTypedDict +from .responsedoneevent import ResponseDoneEvent, ResponseDoneEventTypedDict +from .responseerrorevent import ResponseErrorEvent, ResponseErrorEventTypedDict +from .responsestartedevent import ResponseStartedEvent, ResponseStartedEventTypedDict +from .ssetypes import SSETypes +from .toolexecutiondoneevent import ( + ToolExecutionDoneEvent, + ToolExecutionDoneEventTypedDict, +) +from .toolexecutionstartedevent import ( + ToolExecutionStartedEvent, + ToolExecutionStartedEventTypedDict, +) +from mistralai.types import BaseModel +from mistralai.utils import get_discriminator +from pydantic import Discriminator, Tag +from typing import Union +from typing_extensions import Annotated, TypeAliasType, TypedDict + + +ConversationEventsDataTypedDict = TypeAliasType( + "ConversationEventsDataTypedDict", + Union[ + ResponseStartedEventTypedDict, + ResponseDoneEventTypedDict, + ResponseErrorEventTypedDict, + ToolExecutionStartedEventTypedDict, + ToolExecutionDoneEventTypedDict, + AgentHandoffStartedEventTypedDict, + AgentHandoffDoneEventTypedDict, + FunctionCallEventTypedDict, + MessageOutputEventTypedDict, + ], +) + + +ConversationEventsData = Annotated[ + Union[ + Annotated[AgentHandoffDoneEvent, Tag("agent.handoff.done")], + Annotated[AgentHandoffStartedEvent, Tag("agent.handoff.started")], + Annotated[ResponseDoneEvent, Tag("conversation.response.done")], + Annotated[ResponseErrorEvent, Tag("conversation.response.error")], + Annotated[ResponseStartedEvent, Tag("conversation.response.started")], + Annotated[FunctionCallEvent, Tag("function.call.delta")], + Annotated[MessageOutputEvent, Tag("message.output.delta")], + Annotated[ToolExecutionDoneEvent, Tag("tool.execution.done")], + Annotated[ToolExecutionStartedEvent, Tag("tool.execution.started")], + ], + Discriminator(lambda m: get_discriminator(m, "type", "type")), +] + + +class ConversationEventsTypedDict(TypedDict): + event: SSETypes + r"""Server side events sent when streaming a conversation response.""" + data: ConversationEventsDataTypedDict + + +class ConversationEvents(BaseModel): + event: SSETypes + r"""Server side events sent when streaming a conversation response.""" + + data: ConversationEventsData diff --git a/src/mistralai/models/conversationhistory.py b/src/mistralai/models/conversationhistory.py new file mode 100644 index 00000000..d07d7297 --- /dev/null +++ b/src/mistralai/models/conversationhistory.py @@ -0,0 +1,58 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .agenthandoffentry import AgentHandoffEntry, AgentHandoffEntryTypedDict +from .functioncallentry import FunctionCallEntry, FunctionCallEntryTypedDict +from .functionresultentry import FunctionResultEntry, FunctionResultEntryTypedDict +from .messageinputentry import MessageInputEntry, MessageInputEntryTypedDict +from .messageoutputentry import MessageOutputEntry, MessageOutputEntryTypedDict +from .toolexecutionentry import ToolExecutionEntry, ToolExecutionEntryTypedDict +from mistralai.types import BaseModel +from typing import List, Literal, Optional, Union +from typing_extensions import NotRequired, TypeAliasType, TypedDict + + +ConversationHistoryObject = Literal["conversation.history"] + +EntriesTypedDict = TypeAliasType( + "EntriesTypedDict", + Union[ + MessageInputEntryTypedDict, + FunctionResultEntryTypedDict, + ToolExecutionEntryTypedDict, + FunctionCallEntryTypedDict, + MessageOutputEntryTypedDict, + AgentHandoffEntryTypedDict, + ], +) + + +Entries = TypeAliasType( + "Entries", + Union[ + MessageInputEntry, + FunctionResultEntry, + ToolExecutionEntry, + FunctionCallEntry, + MessageOutputEntry, + AgentHandoffEntry, + ], +) + + +class ConversationHistoryTypedDict(TypedDict): + r"""Retrieve all entries in a conversation.""" + + conversation_id: str + entries: List[EntriesTypedDict] + object: NotRequired[ConversationHistoryObject] + + +class ConversationHistory(BaseModel): + r"""Retrieve all entries in a conversation.""" + + conversation_id: str + + entries: List[Entries] + + object: Optional[ConversationHistoryObject] = "conversation.history" diff --git a/src/mistralai/models/conversationinputs.py b/src/mistralai/models/conversationinputs.py new file mode 100644 index 00000000..4d30cd76 --- /dev/null +++ b/src/mistralai/models/conversationinputs.py @@ -0,0 +1,14 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .inputentries import InputEntries, InputEntriesTypedDict +from typing import List, Union +from typing_extensions import TypeAliasType + + +ConversationInputsTypedDict = TypeAliasType( + "ConversationInputsTypedDict", Union[str, List[InputEntriesTypedDict]] +) + + +ConversationInputs = TypeAliasType("ConversationInputs", Union[str, List[InputEntries]]) diff --git a/src/mistralai/models/conversationmessages.py b/src/mistralai/models/conversationmessages.py new file mode 100644 index 00000000..9027045b --- /dev/null +++ b/src/mistralai/models/conversationmessages.py @@ -0,0 +1,28 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .messageentries import MessageEntries, MessageEntriesTypedDict +from mistralai.types import BaseModel +from typing import List, Literal, Optional +from typing_extensions import NotRequired, TypedDict + + +ConversationMessagesObject = Literal["conversation.messages"] + + +class ConversationMessagesTypedDict(TypedDict): + r"""Similar to the conversation history but only keep the messages""" + + conversation_id: str + messages: List[MessageEntriesTypedDict] + object: NotRequired[ConversationMessagesObject] + + +class ConversationMessages(BaseModel): + r"""Similar to the conversation history but only keep the messages""" + + conversation_id: str + + messages: List[MessageEntries] + + object: Optional[ConversationMessagesObject] = "conversation.messages" diff --git a/src/mistralai/models/conversationrequest.py b/src/mistralai/models/conversationrequest.py new file mode 100644 index 00000000..48cc6fe7 --- /dev/null +++ b/src/mistralai/models/conversationrequest.py @@ -0,0 +1,133 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .codeinterpretertool import CodeInterpreterTool, CodeInterpreterToolTypedDict +from .completionargs import CompletionArgs, CompletionArgsTypedDict +from .conversationinputs import ConversationInputs, ConversationInputsTypedDict +from .documentlibrarytool import DocumentLibraryTool, DocumentLibraryToolTypedDict +from .functiontool import FunctionTool, FunctionToolTypedDict +from .imagegenerationtool import ImageGenerationTool, ImageGenerationToolTypedDict +from .websearchpremiumtool import WebSearchPremiumTool, WebSearchPremiumToolTypedDict +from .websearchtool import WebSearchTool, WebSearchToolTypedDict +from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +from mistralai.utils import get_discriminator +from pydantic import Discriminator, Tag, model_serializer +from typing import List, Literal, Optional, Union +from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict + + +HandoffExecution = Literal["client", "server"] + +ToolsTypedDict = TypeAliasType( + "ToolsTypedDict", + Union[ + WebSearchToolTypedDict, + WebSearchPremiumToolTypedDict, + CodeInterpreterToolTypedDict, + ImageGenerationToolTypedDict, + FunctionToolTypedDict, + DocumentLibraryToolTypedDict, + ], +) + + +Tools = Annotated[ + Union[ + Annotated[CodeInterpreterTool, Tag("code_interpreter")], + Annotated[DocumentLibraryTool, Tag("document_library")], + Annotated[FunctionTool, Tag("function")], + Annotated[ImageGenerationTool, Tag("image_generation")], + Annotated[WebSearchTool, Tag("web_search")], + Annotated[WebSearchPremiumTool, Tag("web_search_premium")], + ], + Discriminator(lambda m: get_discriminator(m, "type", "type")), +] + + +class ConversationRequestTypedDict(TypedDict): + inputs: ConversationInputsTypedDict + stream: NotRequired[bool] + store: NotRequired[Nullable[bool]] + handoff_execution: NotRequired[Nullable[HandoffExecution]] + instructions: NotRequired[Nullable[str]] + tools: NotRequired[Nullable[List[ToolsTypedDict]]] + completion_args: NotRequired[Nullable[CompletionArgsTypedDict]] + name: NotRequired[Nullable[str]] + description: NotRequired[Nullable[str]] + agent_id: NotRequired[Nullable[str]] + model: NotRequired[Nullable[str]] + + +class ConversationRequest(BaseModel): + inputs: ConversationInputs + + stream: Optional[bool] = False + + store: OptionalNullable[bool] = UNSET + + handoff_execution: OptionalNullable[HandoffExecution] = UNSET + + instructions: OptionalNullable[str] = UNSET + + tools: OptionalNullable[List[Tools]] = UNSET + + completion_args: OptionalNullable[CompletionArgs] = UNSET + + name: OptionalNullable[str] = UNSET + + description: OptionalNullable[str] = UNSET + + agent_id: OptionalNullable[str] = UNSET + + model: OptionalNullable[str] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = [ + "stream", + "store", + "handoff_execution", + "instructions", + "tools", + "completion_args", + "name", + "description", + "agent_id", + "model", + ] + nullable_fields = [ + "store", + "handoff_execution", + "instructions", + "tools", + "completion_args", + "name", + "description", + "agent_id", + "model", + ] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in self.model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/models/conversationresponse.py b/src/mistralai/models/conversationresponse.py new file mode 100644 index 00000000..61de8565 --- /dev/null +++ b/src/mistralai/models/conversationresponse.py @@ -0,0 +1,51 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .agenthandoffentry import AgentHandoffEntry, AgentHandoffEntryTypedDict +from .conversationusageinfo import ConversationUsageInfo, ConversationUsageInfoTypedDict +from .functioncallentry import FunctionCallEntry, FunctionCallEntryTypedDict +from .messageoutputentry import MessageOutputEntry, MessageOutputEntryTypedDict +from .toolexecutionentry import ToolExecutionEntry, ToolExecutionEntryTypedDict +from mistralai.types import BaseModel +from typing import List, Literal, Optional, Union +from typing_extensions import NotRequired, TypeAliasType, TypedDict + + +ConversationResponseObject = Literal["conversation.response"] + +OutputsTypedDict = TypeAliasType( + "OutputsTypedDict", + Union[ + ToolExecutionEntryTypedDict, + FunctionCallEntryTypedDict, + MessageOutputEntryTypedDict, + AgentHandoffEntryTypedDict, + ], +) + + +Outputs = TypeAliasType( + "Outputs", + Union[ToolExecutionEntry, FunctionCallEntry, MessageOutputEntry, AgentHandoffEntry], +) + + +class ConversationResponseTypedDict(TypedDict): + r"""The response after appending new entries to the conversation.""" + + conversation_id: str + outputs: List[OutputsTypedDict] + usage: ConversationUsageInfoTypedDict + object: NotRequired[ConversationResponseObject] + + +class ConversationResponse(BaseModel): + r"""The response after appending new entries to the conversation.""" + + conversation_id: str + + outputs: List[Outputs] + + usage: ConversationUsageInfo + + object: Optional[ConversationResponseObject] = "conversation.response" diff --git a/src/mistralai/models/conversationrestartrequest.py b/src/mistralai/models/conversationrestartrequest.py new file mode 100644 index 00000000..58376140 --- /dev/null +++ b/src/mistralai/models/conversationrestartrequest.py @@ -0,0 +1,42 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .completionargs import CompletionArgs, CompletionArgsTypedDict +from .conversationinputs import ConversationInputs, ConversationInputsTypedDict +from mistralai.types import BaseModel +from typing import Literal, Optional +from typing_extensions import NotRequired, TypedDict + + +ConversationRestartRequestHandoffExecution = Literal["client", "server"] + + +class ConversationRestartRequestTypedDict(TypedDict): + r"""Request to restart a new conversation from a given entry in the conversation.""" + + inputs: ConversationInputsTypedDict + from_entry_id: str + stream: NotRequired[bool] + store: NotRequired[bool] + r"""Whether to store the results into our servers or not.""" + handoff_execution: NotRequired[ConversationRestartRequestHandoffExecution] + completion_args: NotRequired[CompletionArgsTypedDict] + r"""White-listed arguments from the completion API""" + + +class ConversationRestartRequest(BaseModel): + r"""Request to restart a new conversation from a given entry in the conversation.""" + + inputs: ConversationInputs + + from_entry_id: str + + stream: Optional[bool] = False + + store: Optional[bool] = True + r"""Whether to store the results into our servers or not.""" + + handoff_execution: Optional[ConversationRestartRequestHandoffExecution] = "server" + + completion_args: Optional[CompletionArgs] = None + r"""White-listed arguments from the completion API""" diff --git a/src/mistralai/models/conversationrestartstreamrequest.py b/src/mistralai/models/conversationrestartstreamrequest.py new file mode 100644 index 00000000..f213aea3 --- /dev/null +++ b/src/mistralai/models/conversationrestartstreamrequest.py @@ -0,0 +1,44 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .completionargs import CompletionArgs, CompletionArgsTypedDict +from .conversationinputs import ConversationInputs, ConversationInputsTypedDict +from mistralai.types import BaseModel +from typing import Literal, Optional +from typing_extensions import NotRequired, TypedDict + + +ConversationRestartStreamRequestHandoffExecution = Literal["client", "server"] + + +class ConversationRestartStreamRequestTypedDict(TypedDict): + r"""Request to restart a new conversation from a given entry in the conversation.""" + + inputs: ConversationInputsTypedDict + from_entry_id: str + stream: NotRequired[bool] + store: NotRequired[bool] + r"""Whether to store the results into our servers or not.""" + handoff_execution: NotRequired[ConversationRestartStreamRequestHandoffExecution] + completion_args: NotRequired[CompletionArgsTypedDict] + r"""White-listed arguments from the completion API""" + + +class ConversationRestartStreamRequest(BaseModel): + r"""Request to restart a new conversation from a given entry in the conversation.""" + + inputs: ConversationInputs + + from_entry_id: str + + stream: Optional[bool] = True + + store: Optional[bool] = True + r"""Whether to store the results into our servers or not.""" + + handoff_execution: Optional[ConversationRestartStreamRequestHandoffExecution] = ( + "server" + ) + + completion_args: Optional[CompletionArgs] = None + r"""White-listed arguments from the completion API""" diff --git a/src/mistralai/models/conversationstreamrequest.py b/src/mistralai/models/conversationstreamrequest.py new file mode 100644 index 00000000..a1c21d9a --- /dev/null +++ b/src/mistralai/models/conversationstreamrequest.py @@ -0,0 +1,135 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .codeinterpretertool import CodeInterpreterTool, CodeInterpreterToolTypedDict +from .completionargs import CompletionArgs, CompletionArgsTypedDict +from .conversationinputs import ConversationInputs, ConversationInputsTypedDict +from .documentlibrarytool import DocumentLibraryTool, DocumentLibraryToolTypedDict +from .functiontool import FunctionTool, FunctionToolTypedDict +from .imagegenerationtool import ImageGenerationTool, ImageGenerationToolTypedDict +from .websearchpremiumtool import WebSearchPremiumTool, WebSearchPremiumToolTypedDict +from .websearchtool import WebSearchTool, WebSearchToolTypedDict +from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +from mistralai.utils import get_discriminator +from pydantic import Discriminator, Tag, model_serializer +from typing import List, Literal, Optional, Union +from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict + + +ConversationStreamRequestHandoffExecution = Literal["client", "server"] + +ConversationStreamRequestToolsTypedDict = TypeAliasType( + "ConversationStreamRequestToolsTypedDict", + Union[ + WebSearchToolTypedDict, + WebSearchPremiumToolTypedDict, + CodeInterpreterToolTypedDict, + ImageGenerationToolTypedDict, + FunctionToolTypedDict, + DocumentLibraryToolTypedDict, + ], +) + + +ConversationStreamRequestTools = Annotated[ + Union[ + Annotated[CodeInterpreterTool, Tag("code_interpreter")], + Annotated[DocumentLibraryTool, Tag("document_library")], + Annotated[FunctionTool, Tag("function")], + Annotated[ImageGenerationTool, Tag("image_generation")], + Annotated[WebSearchTool, Tag("web_search")], + Annotated[WebSearchPremiumTool, Tag("web_search_premium")], + ], + Discriminator(lambda m: get_discriminator(m, "type", "type")), +] + + +class ConversationStreamRequestTypedDict(TypedDict): + inputs: ConversationInputsTypedDict + stream: NotRequired[bool] + store: NotRequired[Nullable[bool]] + handoff_execution: NotRequired[Nullable[ConversationStreamRequestHandoffExecution]] + instructions: NotRequired[Nullable[str]] + tools: NotRequired[Nullable[List[ConversationStreamRequestToolsTypedDict]]] + completion_args: NotRequired[Nullable[CompletionArgsTypedDict]] + name: NotRequired[Nullable[str]] + description: NotRequired[Nullable[str]] + agent_id: NotRequired[Nullable[str]] + model: NotRequired[Nullable[str]] + + +class ConversationStreamRequest(BaseModel): + inputs: ConversationInputs + + stream: Optional[bool] = True + + store: OptionalNullable[bool] = UNSET + + handoff_execution: OptionalNullable[ConversationStreamRequestHandoffExecution] = ( + UNSET + ) + + instructions: OptionalNullable[str] = UNSET + + tools: OptionalNullable[List[ConversationStreamRequestTools]] = UNSET + + completion_args: OptionalNullable[CompletionArgs] = UNSET + + name: OptionalNullable[str] = UNSET + + description: OptionalNullable[str] = UNSET + + agent_id: OptionalNullable[str] = UNSET + + model: OptionalNullable[str] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = [ + "stream", + "store", + "handoff_execution", + "instructions", + "tools", + "completion_args", + "name", + "description", + "agent_id", + "model", + ] + nullable_fields = [ + "store", + "handoff_execution", + "instructions", + "tools", + "completion_args", + "name", + "description", + "agent_id", + "model", + ] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in self.model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/models/conversationusageinfo.py b/src/mistralai/models/conversationusageinfo.py new file mode 100644 index 00000000..44ffd5e5 --- /dev/null +++ b/src/mistralai/models/conversationusageinfo.py @@ -0,0 +1,63 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +from pydantic import model_serializer +from typing import Dict, Optional +from typing_extensions import NotRequired, TypedDict + + +class ConversationUsageInfoTypedDict(TypedDict): + prompt_tokens: NotRequired[int] + completion_tokens: NotRequired[int] + total_tokens: NotRequired[int] + connector_tokens: NotRequired[Nullable[int]] + connectors: NotRequired[Nullable[Dict[str, int]]] + + +class ConversationUsageInfo(BaseModel): + prompt_tokens: Optional[int] = 0 + + completion_tokens: Optional[int] = 0 + + total_tokens: Optional[int] = 0 + + connector_tokens: OptionalNullable[int] = UNSET + + connectors: OptionalNullable[Dict[str, int]] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = [ + "prompt_tokens", + "completion_tokens", + "total_tokens", + "connector_tokens", + "connectors", + ] + nullable_fields = ["connector_tokens", "connectors"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in self.model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/models/documentlibrarytool.py b/src/mistralai/models/documentlibrarytool.py new file mode 100644 index 00000000..f36de710 --- /dev/null +++ b/src/mistralai/models/documentlibrarytool.py @@ -0,0 +1,22 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.types import BaseModel +from typing import List, Literal, Optional +from typing_extensions import NotRequired, TypedDict + + +DocumentLibraryToolType = Literal["document_library"] + + +class DocumentLibraryToolTypedDict(TypedDict): + library_ids: List[str] + r"""Ids of the library in which to search.""" + type: NotRequired[DocumentLibraryToolType] + + +class DocumentLibraryTool(BaseModel): + library_ids: List[str] + r"""Ids of the library in which to search.""" + + type: Optional[DocumentLibraryToolType] = "document_library" diff --git a/src/mistralai/models/functioncallentry.py b/src/mistralai/models/functioncallentry.py new file mode 100644 index 00000000..821e7c14 --- /dev/null +++ b/src/mistralai/models/functioncallentry.py @@ -0,0 +1,76 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .functioncallentryarguments import ( + FunctionCallEntryArguments, + FunctionCallEntryArgumentsTypedDict, +) +from datetime import datetime +from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +from pydantic import model_serializer +from typing import Literal, Optional +from typing_extensions import NotRequired, TypedDict + + +FunctionCallEntryObject = Literal["entry"] + +FunctionCallEntryType = Literal["function.call"] + + +class FunctionCallEntryTypedDict(TypedDict): + tool_call_id: str + name: str + arguments: FunctionCallEntryArgumentsTypedDict + object: NotRequired[FunctionCallEntryObject] + type: NotRequired[FunctionCallEntryType] + created_at: NotRequired[datetime] + completed_at: NotRequired[Nullable[datetime]] + id: NotRequired[str] + + +class FunctionCallEntry(BaseModel): + tool_call_id: str + + name: str + + arguments: FunctionCallEntryArguments + + object: Optional[FunctionCallEntryObject] = "entry" + + type: Optional[FunctionCallEntryType] = "function.call" + + created_at: Optional[datetime] = None + + completed_at: OptionalNullable[datetime] = UNSET + + id: Optional[str] = None + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["object", "type", "created_at", "completed_at", "id"] + nullable_fields = ["completed_at"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in self.model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/models/functioncallentryarguments.py b/src/mistralai/models/functioncallentryarguments.py new file mode 100644 index 00000000..ac9e6227 --- /dev/null +++ b/src/mistralai/models/functioncallentryarguments.py @@ -0,0 +1,15 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from typing import Any, Dict, Union +from typing_extensions import TypeAliasType + + +FunctionCallEntryArgumentsTypedDict = TypeAliasType( + "FunctionCallEntryArgumentsTypedDict", Union[Dict[str, Any], str] +) + + +FunctionCallEntryArguments = TypeAliasType( + "FunctionCallEntryArguments", Union[Dict[str, Any], str] +) diff --git a/src/mistralai/models/functioncallevent.py b/src/mistralai/models/functioncallevent.py new file mode 100644 index 00000000..90b4b226 --- /dev/null +++ b/src/mistralai/models/functioncallevent.py @@ -0,0 +1,36 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from datetime import datetime +from mistralai.types import BaseModel +from typing import Literal, Optional +from typing_extensions import NotRequired, TypedDict + + +FunctionCallEventType = Literal["function.call.delta"] + + +class FunctionCallEventTypedDict(TypedDict): + id: str + name: str + tool_call_id: str + arguments: str + type: NotRequired[FunctionCallEventType] + created_at: NotRequired[datetime] + output_index: NotRequired[int] + + +class FunctionCallEvent(BaseModel): + id: str + + name: str + + tool_call_id: str + + arguments: str + + type: Optional[FunctionCallEventType] = "function.call.delta" + + created_at: Optional[datetime] = None + + output_index: Optional[int] = 0 diff --git a/src/mistralai/models/functionresultentry.py b/src/mistralai/models/functionresultentry.py new file mode 100644 index 00000000..64040954 --- /dev/null +++ b/src/mistralai/models/functionresultentry.py @@ -0,0 +1,69 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from datetime import datetime +from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +from pydantic import model_serializer +from typing import Literal, Optional +from typing_extensions import NotRequired, TypedDict + + +FunctionResultEntryObject = Literal["entry"] + +FunctionResultEntryType = Literal["function.result"] + + +class FunctionResultEntryTypedDict(TypedDict): + tool_call_id: str + result: str + object: NotRequired[FunctionResultEntryObject] + type: NotRequired[FunctionResultEntryType] + created_at: NotRequired[datetime] + completed_at: NotRequired[Nullable[datetime]] + id: NotRequired[str] + + +class FunctionResultEntry(BaseModel): + tool_call_id: str + + result: str + + object: Optional[FunctionResultEntryObject] = "entry" + + type: Optional[FunctionResultEntryType] = "function.result" + + created_at: Optional[datetime] = None + + completed_at: OptionalNullable[datetime] = UNSET + + id: Optional[str] = None + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["object", "type", "created_at", "completed_at", "id"] + nullable_fields = ["completed_at"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in self.model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/models/functiontool.py b/src/mistralai/models/functiontool.py new file mode 100644 index 00000000..7ce5c464 --- /dev/null +++ b/src/mistralai/models/functiontool.py @@ -0,0 +1,21 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .function import Function, FunctionTypedDict +from mistralai.types import BaseModel +from typing import Literal, Optional +from typing_extensions import NotRequired, TypedDict + + +FunctionToolType = Literal["function"] + + +class FunctionToolTypedDict(TypedDict): + function: FunctionTypedDict + type: NotRequired[FunctionToolType] + + +class FunctionTool(BaseModel): + function: Function + + type: Optional[FunctionToolType] = "function" diff --git a/src/mistralai/models/imagegenerationtool.py b/src/mistralai/models/imagegenerationtool.py new file mode 100644 index 00000000..27bb2d12 --- /dev/null +++ b/src/mistralai/models/imagegenerationtool.py @@ -0,0 +1,17 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.types import BaseModel +from typing import Literal, Optional +from typing_extensions import NotRequired, TypedDict + + +ImageGenerationToolType = Literal["image_generation"] + + +class ImageGenerationToolTypedDict(TypedDict): + type: NotRequired[ImageGenerationToolType] + + +class ImageGenerationTool(BaseModel): + type: Optional[ImageGenerationToolType] = "image_generation" diff --git a/src/mistralai/models/inputentries.py b/src/mistralai/models/inputentries.py new file mode 100644 index 00000000..9c0fea6e --- /dev/null +++ b/src/mistralai/models/inputentries.py @@ -0,0 +1,18 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .functionresultentry import FunctionResultEntry, FunctionResultEntryTypedDict +from .messageinputentry import MessageInputEntry, MessageInputEntryTypedDict +from typing import Union +from typing_extensions import TypeAliasType + + +InputEntriesTypedDict = TypeAliasType( + "InputEntriesTypedDict", + Union[MessageInputEntryTypedDict, FunctionResultEntryTypedDict], +) + + +InputEntries = TypeAliasType( + "InputEntries", Union[MessageInputEntry, FunctionResultEntry] +) diff --git a/src/mistralai/models/messageentries.py b/src/mistralai/models/messageentries.py new file mode 100644 index 00000000..9b1706de --- /dev/null +++ b/src/mistralai/models/messageentries.py @@ -0,0 +1,18 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .messageinputentry import MessageInputEntry, MessageInputEntryTypedDict +from .messageoutputentry import MessageOutputEntry, MessageOutputEntryTypedDict +from typing import Union +from typing_extensions import TypeAliasType + + +MessageEntriesTypedDict = TypeAliasType( + "MessageEntriesTypedDict", + Union[MessageInputEntryTypedDict, MessageOutputEntryTypedDict], +) + + +MessageEntries = TypeAliasType( + "MessageEntries", Union[MessageInputEntry, MessageOutputEntry] +) diff --git a/src/mistralai/models/messageinputcontentchunks.py b/src/mistralai/models/messageinputcontentchunks.py new file mode 100644 index 00000000..47704211 --- /dev/null +++ b/src/mistralai/models/messageinputcontentchunks.py @@ -0,0 +1,26 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .documenturlchunk import DocumentURLChunk, DocumentURLChunkTypedDict +from .imageurlchunk import ImageURLChunk, ImageURLChunkTypedDict +from .textchunk import TextChunk, TextChunkTypedDict +from .toolfilechunk import ToolFileChunk, ToolFileChunkTypedDict +from typing import Union +from typing_extensions import TypeAliasType + + +MessageInputContentChunksTypedDict = TypeAliasType( + "MessageInputContentChunksTypedDict", + Union[ + TextChunkTypedDict, + ImageURLChunkTypedDict, + DocumentURLChunkTypedDict, + ToolFileChunkTypedDict, + ], +) + + +MessageInputContentChunks = TypeAliasType( + "MessageInputContentChunks", + Union[TextChunk, ImageURLChunk, DocumentURLChunk, ToolFileChunk], +) diff --git a/src/mistralai/models/messageinputentry.py b/src/mistralai/models/messageinputentry.py new file mode 100644 index 00000000..3d642cdf --- /dev/null +++ b/src/mistralai/models/messageinputentry.py @@ -0,0 +1,89 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .messageinputcontentchunks import ( + MessageInputContentChunks, + MessageInputContentChunksTypedDict, +) +from datetime import datetime +from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +from pydantic import model_serializer +from typing import List, Literal, Optional, Union +from typing_extensions import NotRequired, TypeAliasType, TypedDict + + +Object = Literal["entry"] + +MessageInputEntryType = Literal["message.input"] + +MessageInputEntryRole = Literal["assistant", "user"] + +MessageInputEntryContentTypedDict = TypeAliasType( + "MessageInputEntryContentTypedDict", + Union[str, List[MessageInputContentChunksTypedDict]], +) + + +MessageInputEntryContent = TypeAliasType( + "MessageInputEntryContent", Union[str, List[MessageInputContentChunks]] +) + + +class MessageInputEntryTypedDict(TypedDict): + r"""Representation of an input message inside the conversation.""" + + role: MessageInputEntryRole + content: MessageInputEntryContentTypedDict + object: NotRequired[Object] + type: NotRequired[MessageInputEntryType] + created_at: NotRequired[datetime] + completed_at: NotRequired[Nullable[datetime]] + id: NotRequired[str] + + +class MessageInputEntry(BaseModel): + r"""Representation of an input message inside the conversation.""" + + role: MessageInputEntryRole + + content: MessageInputEntryContent + + object: Optional[Object] = "entry" + + type: Optional[MessageInputEntryType] = "message.input" + + created_at: Optional[datetime] = None + + completed_at: OptionalNullable[datetime] = UNSET + + id: Optional[str] = None + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["object", "type", "created_at", "completed_at", "id"] + nullable_fields = ["completed_at"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in self.model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/models/messageoutputcontentchunks.py b/src/mistralai/models/messageoutputcontentchunks.py new file mode 100644 index 00000000..e83fb3a9 --- /dev/null +++ b/src/mistralai/models/messageoutputcontentchunks.py @@ -0,0 +1,30 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .documenturlchunk import DocumentURLChunk, DocumentURLChunkTypedDict +from .imageurlchunk import ImageURLChunk, ImageURLChunkTypedDict +from .textchunk import TextChunk, TextChunkTypedDict +from .toolfilechunk import ToolFileChunk, ToolFileChunkTypedDict +from .toolreferencechunk import ToolReferenceChunk, ToolReferenceChunkTypedDict +from typing import Union +from typing_extensions import TypeAliasType + + +MessageOutputContentChunksTypedDict = TypeAliasType( + "MessageOutputContentChunksTypedDict", + Union[ + TextChunkTypedDict, + ImageURLChunkTypedDict, + DocumentURLChunkTypedDict, + ToolFileChunkTypedDict, + ToolReferenceChunkTypedDict, + ], +) + + +MessageOutputContentChunks = TypeAliasType( + "MessageOutputContentChunks", + Union[ + TextChunk, ImageURLChunk, DocumentURLChunk, ToolFileChunk, ToolReferenceChunk + ], +) diff --git a/src/mistralai/models/messageoutputentry.py b/src/mistralai/models/messageoutputentry.py new file mode 100644 index 00000000..abb361e7 --- /dev/null +++ b/src/mistralai/models/messageoutputentry.py @@ -0,0 +1,100 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .messageoutputcontentchunks import ( + MessageOutputContentChunks, + MessageOutputContentChunksTypedDict, +) +from datetime import datetime +from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +from pydantic import model_serializer +from typing import List, Literal, Optional, Union +from typing_extensions import NotRequired, TypeAliasType, TypedDict + + +MessageOutputEntryObject = Literal["entry"] + +MessageOutputEntryType = Literal["message.output"] + +MessageOutputEntryRole = Literal["assistant"] + +MessageOutputEntryContentTypedDict = TypeAliasType( + "MessageOutputEntryContentTypedDict", + Union[str, List[MessageOutputContentChunksTypedDict]], +) + + +MessageOutputEntryContent = TypeAliasType( + "MessageOutputEntryContent", Union[str, List[MessageOutputContentChunks]] +) + + +class MessageOutputEntryTypedDict(TypedDict): + content: MessageOutputEntryContentTypedDict + object: NotRequired[MessageOutputEntryObject] + type: NotRequired[MessageOutputEntryType] + created_at: NotRequired[datetime] + completed_at: NotRequired[Nullable[datetime]] + id: NotRequired[str] + agent_id: NotRequired[Nullable[str]] + model: NotRequired[Nullable[str]] + role: NotRequired[MessageOutputEntryRole] + + +class MessageOutputEntry(BaseModel): + content: MessageOutputEntryContent + + object: Optional[MessageOutputEntryObject] = "entry" + + type: Optional[MessageOutputEntryType] = "message.output" + + created_at: Optional[datetime] = None + + completed_at: OptionalNullable[datetime] = UNSET + + id: Optional[str] = None + + agent_id: OptionalNullable[str] = UNSET + + model: OptionalNullable[str] = UNSET + + role: Optional[MessageOutputEntryRole] = "assistant" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = [ + "object", + "type", + "created_at", + "completed_at", + "id", + "agent_id", + "model", + "role", + ] + nullable_fields = ["completed_at", "agent_id", "model"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in self.model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/models/messageoutputevent.py b/src/mistralai/models/messageoutputevent.py new file mode 100644 index 00000000..328874d6 --- /dev/null +++ b/src/mistralai/models/messageoutputevent.py @@ -0,0 +1,93 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .outputcontentchunks import OutputContentChunks, OutputContentChunksTypedDict +from datetime import datetime +from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +from pydantic import model_serializer +from typing import Literal, Optional, Union +from typing_extensions import NotRequired, TypeAliasType, TypedDict + + +MessageOutputEventType = Literal["message.output.delta"] + +MessageOutputEventRole = Literal["assistant"] + +MessageOutputEventContentTypedDict = TypeAliasType( + "MessageOutputEventContentTypedDict", Union[str, OutputContentChunksTypedDict] +) + + +MessageOutputEventContent = TypeAliasType( + "MessageOutputEventContent", Union[str, OutputContentChunks] +) + + +class MessageOutputEventTypedDict(TypedDict): + id: str + content: MessageOutputEventContentTypedDict + type: NotRequired[MessageOutputEventType] + created_at: NotRequired[datetime] + output_index: NotRequired[int] + content_index: NotRequired[int] + model: NotRequired[Nullable[str]] + agent_id: NotRequired[Nullable[str]] + role: NotRequired[MessageOutputEventRole] + + +class MessageOutputEvent(BaseModel): + id: str + + content: MessageOutputEventContent + + type: Optional[MessageOutputEventType] = "message.output.delta" + + created_at: Optional[datetime] = None + + output_index: Optional[int] = 0 + + content_index: Optional[int] = 0 + + model: OptionalNullable[str] = UNSET + + agent_id: OptionalNullable[str] = UNSET + + role: Optional[MessageOutputEventRole] = "assistant" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = [ + "type", + "created_at", + "output_index", + "content_index", + "model", + "agent_id", + "role", + ] + nullable_fields = ["model", "agent_id"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in self.model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/models/modelconversation.py b/src/mistralai/models/modelconversation.py new file mode 100644 index 00000000..3e927192 --- /dev/null +++ b/src/mistralai/models/modelconversation.py @@ -0,0 +1,127 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .codeinterpretertool import CodeInterpreterTool, CodeInterpreterToolTypedDict +from .completionargs import CompletionArgs, CompletionArgsTypedDict +from .documentlibrarytool import DocumentLibraryTool, DocumentLibraryToolTypedDict +from .functiontool import FunctionTool, FunctionToolTypedDict +from .imagegenerationtool import ImageGenerationTool, ImageGenerationToolTypedDict +from .websearchpremiumtool import WebSearchPremiumTool, WebSearchPremiumToolTypedDict +from .websearchtool import WebSearchTool, WebSearchToolTypedDict +from datetime import datetime +from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +from mistralai.utils import get_discriminator +from pydantic import Discriminator, Tag, model_serializer +from typing import List, Literal, Optional, Union +from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict + + +ModelConversationToolsTypedDict = TypeAliasType( + "ModelConversationToolsTypedDict", + Union[ + WebSearchToolTypedDict, + WebSearchPremiumToolTypedDict, + CodeInterpreterToolTypedDict, + ImageGenerationToolTypedDict, + FunctionToolTypedDict, + DocumentLibraryToolTypedDict, + ], +) + + +ModelConversationTools = Annotated[ + Union[ + Annotated[CodeInterpreterTool, Tag("code_interpreter")], + Annotated[DocumentLibraryTool, Tag("document_library")], + Annotated[FunctionTool, Tag("function")], + Annotated[ImageGenerationTool, Tag("image_generation")], + Annotated[WebSearchTool, Tag("web_search")], + Annotated[WebSearchPremiumTool, Tag("web_search_premium")], + ], + Discriminator(lambda m: get_discriminator(m, "type", "type")), +] + + +ModelConversationObject = Literal["conversation"] + + +class ModelConversationTypedDict(TypedDict): + id: str + created_at: datetime + updated_at: datetime + model: str + instructions: NotRequired[Nullable[str]] + r"""Instruction prompt the model will follow during the conversation.""" + tools: NotRequired[List[ModelConversationToolsTypedDict]] + r"""List of tools which are available to the model during the conversation.""" + completion_args: NotRequired[CompletionArgsTypedDict] + r"""White-listed arguments from the completion API""" + name: NotRequired[Nullable[str]] + r"""Name given to the conversation.""" + description: NotRequired[Nullable[str]] + r"""Description of the what the conversation is about.""" + object: NotRequired[ModelConversationObject] + + +class ModelConversation(BaseModel): + id: str + + created_at: datetime + + updated_at: datetime + + model: str + + instructions: OptionalNullable[str] = UNSET + r"""Instruction prompt the model will follow during the conversation.""" + + tools: Optional[List[ModelConversationTools]] = None + r"""List of tools which are available to the model during the conversation.""" + + completion_args: Optional[CompletionArgs] = None + r"""White-listed arguments from the completion API""" + + name: OptionalNullable[str] = UNSET + r"""Name given to the conversation.""" + + description: OptionalNullable[str] = UNSET + r"""Description of the what the conversation is about.""" + + object: Optional[ModelConversationObject] = "conversation" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = [ + "instructions", + "tools", + "completion_args", + "name", + "description", + "object", + ] + nullable_fields = ["instructions", "name", "description"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in self.model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/models/outputcontentchunks.py b/src/mistralai/models/outputcontentchunks.py new file mode 100644 index 00000000..6b7e39ea --- /dev/null +++ b/src/mistralai/models/outputcontentchunks.py @@ -0,0 +1,30 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .documenturlchunk import DocumentURLChunk, DocumentURLChunkTypedDict +from .imageurlchunk import ImageURLChunk, ImageURLChunkTypedDict +from .textchunk import TextChunk, TextChunkTypedDict +from .toolfilechunk import ToolFileChunk, ToolFileChunkTypedDict +from .toolreferencechunk import ToolReferenceChunk, ToolReferenceChunkTypedDict +from typing import Union +from typing_extensions import TypeAliasType + + +OutputContentChunksTypedDict = TypeAliasType( + "OutputContentChunksTypedDict", + Union[ + TextChunkTypedDict, + ImageURLChunkTypedDict, + DocumentURLChunkTypedDict, + ToolFileChunkTypedDict, + ToolReferenceChunkTypedDict, + ], +) + + +OutputContentChunks = TypeAliasType( + "OutputContentChunks", + Union[ + TextChunk, ImageURLChunk, DocumentURLChunk, ToolFileChunk, ToolReferenceChunk + ], +) diff --git a/src/mistralai/models/responsedoneevent.py b/src/mistralai/models/responsedoneevent.py new file mode 100644 index 00000000..296cb430 --- /dev/null +++ b/src/mistralai/models/responsedoneevent.py @@ -0,0 +1,25 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .conversationusageinfo import ConversationUsageInfo, ConversationUsageInfoTypedDict +from datetime import datetime +from mistralai.types import BaseModel +from typing import Literal, Optional +from typing_extensions import NotRequired, TypedDict + + +ResponseDoneEventType = Literal["conversation.response.done"] + + +class ResponseDoneEventTypedDict(TypedDict): + usage: ConversationUsageInfoTypedDict + type: NotRequired[ResponseDoneEventType] + created_at: NotRequired[datetime] + + +class ResponseDoneEvent(BaseModel): + usage: ConversationUsageInfo + + type: Optional[ResponseDoneEventType] = "conversation.response.done" + + created_at: Optional[datetime] = None diff --git a/src/mistralai/models/responseerrorevent.py b/src/mistralai/models/responseerrorevent.py new file mode 100644 index 00000000..e4190d17 --- /dev/null +++ b/src/mistralai/models/responseerrorevent.py @@ -0,0 +1,27 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from datetime import datetime +from mistralai.types import BaseModel +from typing import Literal, Optional +from typing_extensions import NotRequired, TypedDict + + +ResponseErrorEventType = Literal["conversation.response.error"] + + +class ResponseErrorEventTypedDict(TypedDict): + message: str + code: int + type: NotRequired[ResponseErrorEventType] + created_at: NotRequired[datetime] + + +class ResponseErrorEvent(BaseModel): + message: str + + code: int + + type: Optional[ResponseErrorEventType] = "conversation.response.error" + + created_at: Optional[datetime] = None diff --git a/src/mistralai/models/responsestartedevent.py b/src/mistralai/models/responsestartedevent.py new file mode 100644 index 00000000..6acb483e --- /dev/null +++ b/src/mistralai/models/responsestartedevent.py @@ -0,0 +1,24 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from datetime import datetime +from mistralai.types import BaseModel +from typing import Literal, Optional +from typing_extensions import NotRequired, TypedDict + + +ResponseStartedEventType = Literal["conversation.response.started"] + + +class ResponseStartedEventTypedDict(TypedDict): + conversation_id: str + type: NotRequired[ResponseStartedEventType] + created_at: NotRequired[datetime] + + +class ResponseStartedEvent(BaseModel): + conversation_id: str + + type: Optional[ResponseStartedEventType] = "conversation.response.started" + + created_at: Optional[datetime] = None diff --git a/src/mistralai/models/ssetypes.py b/src/mistralai/models/ssetypes.py new file mode 100644 index 00000000..4d15b4f1 --- /dev/null +++ b/src/mistralai/models/ssetypes.py @@ -0,0 +1,18 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from typing import Literal + + +SSETypes = Literal[ + "conversation.response.started", + "conversation.response.done", + "conversation.response.error", + "message.output.delta", + "tool.execution.started", + "tool.execution.done", + "agent.handoff.started", + "agent.handoff.done", + "function.call.delta", +] +r"""Server side events sent when streaming a conversation response.""" diff --git a/src/mistralai/models/toolexecutiondoneevent.py b/src/mistralai/models/toolexecutiondoneevent.py new file mode 100644 index 00000000..c73d943a --- /dev/null +++ b/src/mistralai/models/toolexecutiondoneevent.py @@ -0,0 +1,34 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .builtinconnectors import BuiltInConnectors +from datetime import datetime +from mistralai.types import BaseModel +from typing import Any, Dict, Literal, Optional +from typing_extensions import NotRequired, TypedDict + + +ToolExecutionDoneEventType = Literal["tool.execution.done"] + + +class ToolExecutionDoneEventTypedDict(TypedDict): + id: str + name: BuiltInConnectors + type: NotRequired[ToolExecutionDoneEventType] + created_at: NotRequired[datetime] + output_index: NotRequired[int] + info: NotRequired[Dict[str, Any]] + + +class ToolExecutionDoneEvent(BaseModel): + id: str + + name: BuiltInConnectors + + type: Optional[ToolExecutionDoneEventType] = "tool.execution.done" + + created_at: Optional[datetime] = None + + output_index: Optional[int] = 0 + + info: Optional[Dict[str, Any]] = None diff --git a/src/mistralai/models/toolexecutionentry.py b/src/mistralai/models/toolexecutionentry.py new file mode 100644 index 00000000..20c9bf19 --- /dev/null +++ b/src/mistralai/models/toolexecutionentry.py @@ -0,0 +1,70 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .builtinconnectors import BuiltInConnectors +from datetime import datetime +from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +from pydantic import model_serializer +from typing import Any, Dict, Literal, Optional +from typing_extensions import NotRequired, TypedDict + + +ToolExecutionEntryObject = Literal["entry"] + +ToolExecutionEntryType = Literal["tool.execution"] + + +class ToolExecutionEntryTypedDict(TypedDict): + name: BuiltInConnectors + object: NotRequired[ToolExecutionEntryObject] + type: NotRequired[ToolExecutionEntryType] + created_at: NotRequired[datetime] + completed_at: NotRequired[Nullable[datetime]] + id: NotRequired[str] + info: NotRequired[Dict[str, Any]] + + +class ToolExecutionEntry(BaseModel): + name: BuiltInConnectors + + object: Optional[ToolExecutionEntryObject] = "entry" + + type: Optional[ToolExecutionEntryType] = "tool.execution" + + created_at: Optional[datetime] = None + + completed_at: OptionalNullable[datetime] = UNSET + + id: Optional[str] = None + + info: Optional[Dict[str, Any]] = None + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["object", "type", "created_at", "completed_at", "id", "info"] + nullable_fields = ["completed_at"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in self.model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/models/toolexecutionstartedevent.py b/src/mistralai/models/toolexecutionstartedevent.py new file mode 100644 index 00000000..e140665e --- /dev/null +++ b/src/mistralai/models/toolexecutionstartedevent.py @@ -0,0 +1,31 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .builtinconnectors import BuiltInConnectors +from datetime import datetime +from mistralai.types import BaseModel +from typing import Literal, Optional +from typing_extensions import NotRequired, TypedDict + + +ToolExecutionStartedEventType = Literal["tool.execution.started"] + + +class ToolExecutionStartedEventTypedDict(TypedDict): + id: str + name: BuiltInConnectors + type: NotRequired[ToolExecutionStartedEventType] + created_at: NotRequired[datetime] + output_index: NotRequired[int] + + +class ToolExecutionStartedEvent(BaseModel): + id: str + + name: BuiltInConnectors + + type: Optional[ToolExecutionStartedEventType] = "tool.execution.started" + + created_at: Optional[datetime] = None + + output_index: Optional[int] = 0 diff --git a/src/mistralai/models/toolfilechunk.py b/src/mistralai/models/toolfilechunk.py new file mode 100644 index 00000000..1d28e2db --- /dev/null +++ b/src/mistralai/models/toolfilechunk.py @@ -0,0 +1,61 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .builtinconnectors import BuiltInConnectors +from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +from pydantic import model_serializer +from typing import Literal, Optional +from typing_extensions import NotRequired, TypedDict + + +ToolFileChunkType = Literal["tool_file"] + + +class ToolFileChunkTypedDict(TypedDict): + tool: BuiltInConnectors + file_id: str + type: NotRequired[ToolFileChunkType] + file_name: NotRequired[Nullable[str]] + file_type: NotRequired[Nullable[str]] + + +class ToolFileChunk(BaseModel): + tool: BuiltInConnectors + + file_id: str + + type: Optional[ToolFileChunkType] = "tool_file" + + file_name: OptionalNullable[str] = UNSET + + file_type: OptionalNullable[str] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["type", "file_name", "file_type"] + nullable_fields = ["file_name", "file_type"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in self.model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/models/toolreferencechunk.py b/src/mistralai/models/toolreferencechunk.py new file mode 100644 index 00000000..84f72696 --- /dev/null +++ b/src/mistralai/models/toolreferencechunk.py @@ -0,0 +1,61 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .builtinconnectors import BuiltInConnectors +from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +from pydantic import model_serializer +from typing import Literal, Optional +from typing_extensions import NotRequired, TypedDict + + +ToolReferenceChunkType = Literal["tool_reference"] + + +class ToolReferenceChunkTypedDict(TypedDict): + tool: BuiltInConnectors + title: str + type: NotRequired[ToolReferenceChunkType] + url: NotRequired[Nullable[str]] + source: NotRequired[Nullable[str]] + + +class ToolReferenceChunk(BaseModel): + tool: BuiltInConnectors + + title: str + + type: Optional[ToolReferenceChunkType] = "tool_reference" + + url: OptionalNullable[str] = UNSET + + source: OptionalNullable[str] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["type", "url", "source"] + nullable_fields = ["url", "source"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in self.model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/models/websearchpremiumtool.py b/src/mistralai/models/websearchpremiumtool.py new file mode 100644 index 00000000..70fc5626 --- /dev/null +++ b/src/mistralai/models/websearchpremiumtool.py @@ -0,0 +1,17 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.types import BaseModel +from typing import Literal, Optional +from typing_extensions import NotRequired, TypedDict + + +WebSearchPremiumToolType = Literal["web_search_premium"] + + +class WebSearchPremiumToolTypedDict(TypedDict): + type: NotRequired[WebSearchPremiumToolType] + + +class WebSearchPremiumTool(BaseModel): + type: Optional[WebSearchPremiumToolType] = "web_search_premium" diff --git a/src/mistralai/models/websearchtool.py b/src/mistralai/models/websearchtool.py new file mode 100644 index 00000000..3dfd1c53 --- /dev/null +++ b/src/mistralai/models/websearchtool.py @@ -0,0 +1,17 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.types import BaseModel +from typing import Literal, Optional +from typing_extensions import NotRequired, TypedDict + + +WebSearchToolType = Literal["web_search"] + + +class WebSearchToolTypedDict(TypedDict): + type: NotRequired[WebSearchToolType] + + +class WebSearchTool(BaseModel): + type: Optional[WebSearchToolType] = "web_search" diff --git a/src/mistralai/sdk.py b/src/mistralai/sdk.py index e801eaf3..5414436d 100644 --- a/src/mistralai/sdk.py +++ b/src/mistralai/sdk.py @@ -10,6 +10,7 @@ from mistralai._hooks import SDKHooks from mistralai.agents import Agents from mistralai.batch import Batch +from mistralai.beta import Beta from mistralai.chat import Chat from mistralai.classifiers import Classifiers from mistralai.embeddings import Embeddings @@ -28,6 +29,7 @@ class Mistral(BaseSDK): models: Models r"""Model Management API""" + beta: Beta files: Files r"""Files API""" fine_tuning: FineTuning @@ -142,6 +144,7 @@ def __init__( def _init_sdks(self): self.models = Models(self.sdk_configuration) + self.beta = Beta(self.sdk_configuration) self.files = Files(self.sdk_configuration) self.fine_tuning = FineTuning(self.sdk_configuration) self.batch = Batch(self.sdk_configuration) From fd6fc04fc2df85d7b78fb64c5e3e3b9482c899d5 Mon Sep 17 00:00:00 2001 From: alex-ac Date: Mon, 26 May 2025 13:05:46 +0200 Subject: [PATCH 127/223] feat: update extra code --- .../async_chat_with_image_no_streaming.py | 4 +- examples/async_chat_with_streaming.py | 1 + examples/async_conversation_run.py | 57 +++ examples/async_conversation_run_mcp.py | 75 ++++ examples/async_conversation_run_mcp_remote.py | 49 ++ .../async_conversation_run_mcp_remote_auth.py | 133 ++++++ examples/async_conversation_run_stream.py | 82 ++++ examples/async_structured_outputs.py | 3 +- examples/azure/az_chat_no_streaming.py.py | 16 + examples/chat_with_streaming.py | 1 - examples/function_calling.py | 45 +- examples/gcp/gcp_async_chat_no_streaming.py | 24 + examples/mcp_servers/sse_server.py | 32 ++ examples/mcp_servers/stdio_server.py | 21 + examples/run_all.sh | 41 ++ examples/structured_outputs.py | 55 +++ poetry.lock | 418 +++++++++++++++++- pyproject.toml | 5 + src/mistralai/conversations.py | 203 +++++++++ src/mistralai/extra/__init__.py | 12 +- src/mistralai/extra/exceptions.py | 14 + src/mistralai/extra/mcp/__init__.py | 0 src/mistralai/extra/mcp/auth.py | 166 +++++++ src/mistralai/extra/mcp/base.py | 155 +++++++ src/mistralai/extra/mcp/sse.py | 165 +++++++ src/mistralai/extra/mcp/stdio.py | 22 + src/mistralai/extra/run/__init__.py | 0 src/mistralai/extra/run/context.py | 295 ++++++++++++ src/mistralai/extra/run/result.py | 212 +++++++++ src/mistralai/extra/run/tools.py | 225 ++++++++++ src/mistralai/extra/run/utils.py | 36 ++ 31 files changed, 2540 insertions(+), 27 deletions(-) create mode 100644 examples/async_conversation_run.py create mode 100644 examples/async_conversation_run_mcp.py create mode 100644 examples/async_conversation_run_mcp_remote.py create mode 100644 examples/async_conversation_run_mcp_remote_auth.py create mode 100644 examples/async_conversation_run_stream.py create mode 100644 examples/azure/az_chat_no_streaming.py.py create mode 100755 examples/gcp/gcp_async_chat_no_streaming.py create mode 100644 examples/mcp_servers/sse_server.py create mode 100644 examples/mcp_servers/stdio_server.py create mode 100755 examples/run_all.sh create mode 100644 examples/structured_outputs.py create mode 100644 src/mistralai/extra/exceptions.py create mode 100644 src/mistralai/extra/mcp/__init__.py create mode 100644 src/mistralai/extra/mcp/auth.py create mode 100644 src/mistralai/extra/mcp/base.py create mode 100644 src/mistralai/extra/mcp/sse.py create mode 100644 src/mistralai/extra/mcp/stdio.py create mode 100644 src/mistralai/extra/run/__init__.py create mode 100644 src/mistralai/extra/run/context.py create mode 100644 src/mistralai/extra/run/result.py create mode 100644 src/mistralai/extra/run/tools.py create mode 100644 src/mistralai/extra/run/utils.py diff --git a/examples/async_chat_with_image_no_streaming.py b/examples/async_chat_with_image_no_streaming.py index ecb42257..efadff89 100755 --- a/examples/async_chat_with_image_no_streaming.py +++ b/examples/async_chat_with_image_no_streaming.py @@ -10,7 +10,7 @@ async def main(): api_key = os.environ["MISTRAL_API_KEY"] - model = "pixtral-12b" + model = "pixtral-12b-2409" client = Mistral(api_key=api_key) chat_response = await client.chat.complete_async( @@ -21,7 +21,7 @@ async def main(): {"type": "text", "text": "What's in this image?"}, { "type": "image_url", - "image_url": "https://cms.mistral.ai/assets/af26a11d-0793-439f-a06e-7694b24b8270", + "image_url": "https://cms.mistral.ai/assets/a64b3821-3a4c-4d4d-b718-d653f3eb7a5e.png?", }, ] ) diff --git a/examples/async_chat_with_streaming.py b/examples/async_chat_with_streaming.py index 736c47a0..1ef500ae 100755 --- a/examples/async_chat_with_streaming.py +++ b/examples/async_chat_with_streaming.py @@ -20,6 +20,7 @@ async def main(): UserMessage(content="What is the best French cheese?give the best 50") ], ) + assert response async for chunk in response: if chunk.data.choices[0].delta.content is not None: print(chunk.data.choices[0].delta.content, end="") diff --git a/examples/async_conversation_run.py b/examples/async_conversation_run.py new file mode 100644 index 00000000..9e118037 --- /dev/null +++ b/examples/async_conversation_run.py @@ -0,0 +1,57 @@ +#!/usr/bin/env python +import asyncio +import os + +from mistralai import Mistral +from mistralai.extra.run.context import RunContext +from mistralai.types import BaseModel + +MODEL = "mistral-medium-latest" + + +def math_question_generator(question_num: int): + """Random generator of mathematical question + + Args: + question_num (int): the number of the question that will be returned, should be between 1-100 + """ + return ( + "solve the following differential equation: `y'' + 3y' + 2y = 0`" + if question_num % 2 == 0 + else "solve the following differential equation: `y'' - 4y' + 4y = e^x`" + ) + + +async def main(): + api_key = os.environ["MISTRAL_API_KEY"] + client = Mistral(api_key=api_key) + + class Explanation(BaseModel): + explanation: str + output: str + + class MathDemonstration(BaseModel): + steps: list[Explanation] + final_answer: str + + async with RunContext(model=MODEL, output_format=MathDemonstration) as run_ctx: + # register a new function that can be executed on the client side + run_ctx.register_func(math_question_generator) + run_result = await client.beta.conversations.run_async( + run_ctx=run_ctx, + instructions="Use the code interpreter to help you when asked mathematical questions.", + inputs=[ + {"role": "user", "content": "hey"}, + {"role": "assistant", "content": "hello"}, + {"role": "user", "content": "Request a math question and answer it."}, + ], + tools=[{"type": "code_interpreter"}], + ) + print("All run entries:") + for entry in run_result.output_entries: + print(f"{entry}") + print(f"Final model: {run_result.output_as_model}") + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/examples/async_conversation_run_mcp.py b/examples/async_conversation_run_mcp.py new file mode 100644 index 00000000..0e373715 --- /dev/null +++ b/examples/async_conversation_run_mcp.py @@ -0,0 +1,75 @@ +#!/usr/bin/env python +import asyncio +import os +import random + +from mistralai import Mistral +from mistralai.extra.run.context import RunContext +from mcp import StdioServerParameters +from mistralai.extra.mcp.stdio import ( + MCPClientSTDIO, +) +from pathlib import Path + +from mistralai.types import BaseModel + +cwd = Path(__file__).parent +MODEL = "mistral-medium-latest" + + +async def main() -> None: + api_key = os.environ["MISTRAL_API_KEY"] + client = Mistral(api_key=api_key) + + # Create a mcp server has a tool to return the weather based on the location + server_params = StdioServerParameters( + command="python", + args=[str((cwd / "mcp_servers/stdio_server.py").resolve())], + env=None, + ) + + weather_agent = client.beta.agents.create( + model=MODEL, + name="weather teller", + instructions="You are able to tell the weather.", + description="", + ) + + class WeatherResult(BaseModel): + user: str + location: str + temperature: float + + async with RunContext( + agent_id=weather_agent.id, + output_format=WeatherResult, + continue_on_fn_error=True, + ) as run_ctx: + # Add location function to the run context + @run_ctx.register_func + def get_location(name: str) -> str: + """function to get location of a user. + + Args: + name: name of the user. + """ + return random.choice(["New York", "London", "Paris", "Tokyo", "Sydney"]) + + # Add mcp client to the run context + mcp_client = MCPClientSTDIO(stdio_params=server_params) + await run_ctx.register_mcp_client(mcp_client=mcp_client) + + run_result = await client.beta.conversations.run_async( + run_ctx=run_ctx, + inputs="Tell me the weather in John's location currently.", + ) + + print("All run entries:") + for entry in run_result.output_entries: + print(f"{entry}") + print() + print(f"Final model: {run_result.output_as_model}") + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/examples/async_conversation_run_mcp_remote.py b/examples/async_conversation_run_mcp_remote.py new file mode 100644 index 00000000..7b2f46a6 --- /dev/null +++ b/examples/async_conversation_run_mcp_remote.py @@ -0,0 +1,49 @@ +#!/usr/bin/env python +import asyncio +import os + +from mistralai import Mistral +from mistralai.extra.run.context import RunContext + +from mistralai.extra.mcp.sse import ( + MCPClientSSE, + SSEServerParams, +) +from pathlib import Path + +cwd = Path(__file__).parent +MODEL = "mistral-medium-latest" + +# Use an official remote mcp server +# you can find some at: +# - https://mcpservers.org/remote-mcp-servers +# this one does not require auth: https://remote.mcpservers.org/edgeone-pages/mcp + + +async def main(): + api_key = os.environ["MISTRAL_API_KEY"] + client = Mistral(api_key=api_key) + + server_url = "https://mcp.semgrep.ai/sse" + mcp_client = MCPClientSSE(sse_params=SSEServerParams(url=server_url, timeout=100)) + + async with RunContext( + model=MODEL, + ) as run_ctx: + # Add mcp client to the run context + await run_ctx.register_mcp_client(mcp_client=mcp_client) + + run_result = await client.beta.conversations.run_async( + run_ctx=run_ctx, + inputs="Can you write a hello_world.py and check for security vulnerabilities", + ) + + print("All run entries:") + for entry in run_result.output_entries: + print(f"{entry}") + print() + print(f"Final Response: {run_result.output_as_text}") + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/examples/async_conversation_run_mcp_remote_auth.py b/examples/async_conversation_run_mcp_remote_auth.py new file mode 100644 index 00000000..f69d8096 --- /dev/null +++ b/examples/async_conversation_run_mcp_remote_auth.py @@ -0,0 +1,133 @@ +#!/usr/bin/env python +import asyncio +from http.server import BaseHTTPRequestHandler, HTTPServer +import os +import threading +import webbrowser + +from mistralai import Mistral +from mistralai.extra.run.context import RunContext + +from mistralai.extra.mcp.sse import ( + MCPClientSSE, + SSEServerParams, +) +from mistralai.extra.mcp.auth import build_oauth_params + +MODEL = "mistral-medium-latest" + +CALLBACK_PORT = 16010 + + +# Use an official remote mcp server +# you can find some at: +# - https://mcpservers.org/remote-mcp-servers +# - https://support.anthropic.com/en/articles/11176164-pre-built-integrations-using-remote-mcp +# this one has auth: https://mcp.linear.app/sse + + +def run_callback_server(callback_func): + auth_response: dict = {"url": ""} + + class OAuthCallbackHandler(BaseHTTPRequestHandler): + server_version = "HTTP" + code = None + + def do_GET(self): + if "/callback" in self.path: + try: + auth_response["url"] = self.path + self.send_response(200) + self.send_header("Content-type", "text/html") + self.end_headers() + callback_func() + response_html = "

You may now close this window.

" + self.wfile.write(response_html.encode()) + threading.Thread(target=httpd.shutdown).start() + except Exception: + self.send_response(500) + self.end_headers() + + server_address = ("localhost", CALLBACK_PORT) + httpd = HTTPServer(server_address, OAuthCallbackHandler) + threading.Thread(target=httpd.serve_forever).start() + redirect_url = f"http://localhost:{CALLBACK_PORT}/oauth/callback" + return httpd, redirect_url, auth_response + + +async def main(): + api_key = os.environ["MISTRAL_API_KEY"] + client = Mistral(api_key=api_key) + + server_url = "https://mcp.linear.app/sse" + + # set-up the client + mcp_client = MCPClientSSE( + sse_params=SSEServerParams( + url=server_url, + ) + ) + + callback_event = asyncio.Event() + event_loop = asyncio.get_event_loop() + + # check if auth is required + if await mcp_client.requires_auth(): + # let's login + httpd, redirect_url, auth_response = run_callback_server( + callback_func=lambda: event_loop.call_soon_threadsafe(callback_event.set) + ) + try: + # First create the required oauth config, this means fetching the server metadata and registering a client + oauth_params = await build_oauth_params( + mcp_client.base_url, redirect_url=redirect_url + ) + mcp_client.set_oauth_params(oauth_params=oauth_params) + login_url, state = await mcp_client.get_auth_url_and_state(redirect_url) + + # The oauth params like client_id, client_secret would generally be saved in some persistent storage. + # The oauth state and token would be saved in a user session. + + # wait for the user to complete the authentication process + print("Please go to this URL and authorize the application:", login_url) + webbrowser.open(login_url, new=2) + await callback_event.wait() + + # in a real app this would be your oauth2 callback route you would get the code from the query params, + # verify the state, and then get the token + # Here we recreate a new client with the saved params which and exchange the code for a token + mcp_client = MCPClientSSE( + sse_params=SSEServerParams( + url=server_url, + ), + oauth_params=oauth_params, + ) + + token = await mcp_client.get_token_from_auth_response( + auth_response["url"], redirect_url=redirect_url, state=state + ) + mcp_client.set_auth_token(token) + + except Exception as e: + print(f"Error during authentication: {e}") + finally: + httpd.shutdown() + httpd.server_close() + + # Now it's possible to make a query to the mcp server as we would do without authentication + async with RunContext( + model=MODEL, + ) as run_ctx: + # Add mcp client to the run context + await run_ctx.register_mcp_client(mcp_client=mcp_client) + + run_result = await client.beta.conversations.run_async( + run_ctx=run_ctx, + inputs="Tell me which projects do I have in my workspace?", + ) + + print(f"Final Response: {run_result.output_as_text}") + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/examples/async_conversation_run_stream.py b/examples/async_conversation_run_stream.py new file mode 100644 index 00000000..1e6ad87b --- /dev/null +++ b/examples/async_conversation_run_stream.py @@ -0,0 +1,82 @@ +#!/usr/bin/env python +import asyncio +import os +import random + +from mistralai import Mistral +from mistralai.extra.run.context import RunContext +from mcp import StdioServerParameters +from mistralai.extra.mcp.stdio import MCPClientSTDIO +from pathlib import Path + +from mistralai.extra.run.result import RunResult +from mistralai.types import BaseModel + +cwd = Path(__file__).parent +MODEL = "mistral-medium-latest" + + +async def main(): + api_key = os.environ["MISTRAL_API_KEY"] + client = Mistral(api_key=api_key) + + # Create a mcp server has a tool to return the weather based on the location + server_params = StdioServerParameters( + command="python", + args=[str((cwd / "mcp_servers/stdio_server.py").resolve())], + env=None, + ) + + weather_agent = client.beta.agents.create( + model=MODEL, + name="weather teller", + instructions="You are able to tell the weather.", + description="", + ) + + class WeatherResult(BaseModel): + user: str + location: str + temperature: float + + async with RunContext( + agent_id=weather_agent.id, + output_format=WeatherResult, + ) as run_ctx: + # Add location function to the run context + @run_ctx.register_func + def get_location(name: str) -> str: + """function to get location of a user. + + Args: + name: name of the user. + """ + return random.choice(["New York", "London", "Paris", "Tokyo", "Sydney"]) + + # Add mcp client to the run context + mcp_client = MCPClientSTDIO(stdio_params=server_params) + await run_ctx.register_mcp_client(mcp_client=mcp_client) + + events = await client.beta.conversations.run_stream_async( + run_ctx=run_ctx, + inputs="Tell me the weather in John's location currently.", + ) + + run_result = None + async for event in events: + if isinstance(event, RunResult): + run_result = event + else: + print(event) + + if not run_result: + raise RuntimeError("not run result found") + + print("All run entries:") + for entry in run_result.output_entries: + print(f"{entry}") + print(f"Final model: {run_result.output_as_model}") + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/examples/async_structured_outputs.py b/examples/async_structured_outputs.py index 560934e9..a512d38f 100644 --- a/examples/async_structured_outputs.py +++ b/examples/async_structured_outputs.py @@ -5,7 +5,6 @@ from pydantic import BaseModel from mistralai import Mistral -from typing import List async def main(): @@ -17,7 +16,7 @@ class Explanation(BaseModel): output: str class MathDemonstration(BaseModel): - steps: List[Explanation] + steps: list[Explanation] final_answer: str chat_response = await client.chat.parse_async( diff --git a/examples/azure/az_chat_no_streaming.py.py b/examples/azure/az_chat_no_streaming.py.py new file mode 100644 index 00000000..485b594e --- /dev/null +++ b/examples/azure/az_chat_no_streaming.py.py @@ -0,0 +1,16 @@ +import os + +from mistralai_azure import MistralAzure + +client = MistralAzure( + azure_api_key=os.environ["AZURE_API_KEY"], + azure_endpoint=os.environ["AZURE_ENDPOINT"], +) + +res = client.chat.complete( + messages=[ + {"role": "user", "content": "What is the capital of France?"}, + ], + # you don't need model as it will always be "azureai" +) +print(res.choices[0].message.content) diff --git a/examples/chat_with_streaming.py b/examples/chat_with_streaming.py index 19d48a15..66b167f1 100755 --- a/examples/chat_with_streaming.py +++ b/examples/chat_with_streaming.py @@ -16,7 +16,6 @@ def main(): model=model, messages=[UserMessage(content="What is the best French cheese?")], ): - print(chunk.data.choices[0].delta.content, end="") diff --git a/examples/function_calling.py b/examples/function_calling.py index e7eba594..aba7d671 100644 --- a/examples/function_calling.py +++ b/examples/function_calling.py @@ -90,35 +90,46 @@ def retrieve_payment_date(data: Dict[str, List], transaction_id: str) -> str: messages = [UserMessage(content="What's the status of my transaction?")] -response = client.chat.complete(model=model, messages=messages, tools=tools) +response = client.chat.complete( + model=model, messages=messages, tools=tools, temperature=0 +) print(response.choices[0].message.content) messages.append(AssistantMessage(content=response.choices[0].message.content)) messages.append(UserMessage(content="My transaction ID is T1001.")) -response = client.chat.complete(model=model, messages=messages, tools=tools) -messages.append(response.choices[0].message) +response = client.chat.complete( + model=model, messages=messages, tools=tools, temperature=0 +) -for tool_call in response.choices[0].message.tool_calls: +tool_call = response.choices[0].message.tool_calls[0] +function_name = tool_call.function.name +function_params = json.loads(tool_call.function.arguments) - function_name = tool_call.function.name - function_params = json.loads(tool_call.function.arguments) +print( + f"calling function_name: {function_name}, with function_params: {function_params}" +) - print( - f"calling function_name: {function_name}, with function_params: {function_params}" - ) +function_result = names_to_functions[function_name](**function_params) - function_result =names_to_functions[function_name](**function_params) - messages.append( - ToolMessage( - name=function_name, - content=function_result, - tool_call_id=tool_call.id, - ) +messages.append( + AssistantMessage( + content=response.choices[0].message.content, + tool_calls=response.choices[0].message.tool_calls, + ) +) +messages.append( + ToolMessage( + name=function_name, + content=function_result, + tool_call_id=tool_call.id, ) +) print(messages) -response = client.chat.complete(model=model, messages=messages, tools=tools) +response = client.chat.complete( + model=model, messages=messages, tools=tools, temperature=0 +) print(f"{response.choices[0].message.content}") diff --git a/examples/gcp/gcp_async_chat_no_streaming.py b/examples/gcp/gcp_async_chat_no_streaming.py new file mode 100755 index 00000000..178f151c --- /dev/null +++ b/examples/gcp/gcp_async_chat_no_streaming.py @@ -0,0 +1,24 @@ +#!/usr/bin/env python + +import asyncio +import os + +from mistralai_gcp import MistralGoogleCloud +from mistralai_gcp.models.usermessage import UserMessage + + +async def main(): + model = "mistral-large-2407" + + client = MistralGoogleCloud(project_id=os.environ["GCP_PROJECT_ID"]) + + chat_response = await client.chat.complete_async( + model=model, + messages=[UserMessage(content="What is the best French cheese?")], + ) + + print(chat_response.choices[0].message.content) + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/examples/mcp_servers/sse_server.py b/examples/mcp_servers/sse_server.py new file mode 100644 index 00000000..66edb98b --- /dev/null +++ b/examples/mcp_servers/sse_server.py @@ -0,0 +1,32 @@ +import random +import threading +from contextlib import contextmanager + +from mcp.server.fastmcp import FastMCP +import logging + +logging.basicConfig(level=logging.ERROR) + +# Initialize FastMCP server +mcp = FastMCP("weather") + + +@mcp.tool() +async def get_weather(location: str) -> float: + return random.random() * 30 + + +def run_sse_server(): + mcp.run(transport="sse") + + +@contextmanager +def run_sse_server_in_background(): + """start the server in a new thread""" + thread = threading.Thread(target=run_sse_server, daemon=True) + thread.start() + yield thread + + +if __name__ == "__main__": + run_sse_server() diff --git a/examples/mcp_servers/stdio_server.py b/examples/mcp_servers/stdio_server.py new file mode 100644 index 00000000..f95ac8dc --- /dev/null +++ b/examples/mcp_servers/stdio_server.py @@ -0,0 +1,21 @@ +import random +from mcp.server.fastmcp import FastMCP +import logging + +logging.basicConfig(level=logging.ERROR) + +# Initialize FastMCP server +mcp = FastMCP("weather") + + +@mcp.tool() +async def get_weather(location: str) -> float: + return random.random() * 30 + + +def run_stdio_server(): + mcp.run(transport="stdio") + + +if __name__ == "__main__": + run_stdio_server() diff --git a/examples/run_all.sh b/examples/run_all.sh new file mode 100755 index 00000000..a48d34af --- /dev/null +++ b/examples/run_all.sh @@ -0,0 +1,41 @@ +#!/bin/bash + +# List of files to exclude +exclude_files=( + "examples/chatbot_with_streaming.py" + "examples/async_conversation_run_mcp_remote_auth.py" +) + +# Check if the first argument is "no-extra-dep" then remove all the files that require the extra dependencies +if [ "$1" = "--no-extra-dep" ]; then + # Add more files to the exclude list + exclude_files+=( + "examples/async_conversation_run_mcp_remote.py" + "examples/async_conversation_run_mcp.py" + "examples/async_conversation_run_stream.py" + "examples/async_conversation_run.py" + ) +fi + +failed=0 + +for file in examples/*.py; do + # Check if the file is not in the exclude list + if [ -f "$file" ] && [[ ! " ${exclude_files[@]} " =~ " $file " ]]; then + echo "Running $file" + # Run the script and capture the exit status + if python3 "$file" > /dev/null; then + echo "Success" + else + echo "Failed" + failed=1 + fi + else + echo "Skipped $file" + fi +done + +# If one of the example scripts failed, then exit +if [ $failed -ne 0 ]; then + exit 1 +fi diff --git a/examples/structured_outputs.py b/examples/structured_outputs.py new file mode 100644 index 00000000..bc4a5e18 --- /dev/null +++ b/examples/structured_outputs.py @@ -0,0 +1,55 @@ +#!/usr/bin/env python + +import os +from pydantic import BaseModel + +from mistralai import Mistral + + +def main(): + api_key = os.environ["MISTRAL_API_KEY"] + client = Mistral(api_key=api_key) + + class Explanation(BaseModel): + explanation: str + output: str + + class MathDemonstration(BaseModel): + steps: list[Explanation] + final_answer: str + + print("Using the .parse method to parse the response into a Pydantic model:\n") + chat_response = client.chat.parse( + model="mistral-large-latest", + messages=[ + { + "role": "system", + "content": "You are a helpful math tutor. You will be provided with a math problem, and your goal will be to output a step by step solution, along with a final answer. For each step, just provide the output as an equation use the explanation field to detail the reasoning.", + }, + {"role": "user", "content": "How can I solve 8x + 7 = -23"}, + ], + response_format=MathDemonstration, + ) + print(chat_response.choices[0].message.parsed) + + # Or with the streaming API + print( + "\nUsing the .parse_stream method to stream back the response into a JSON Schema:\n" + ) + with client.chat.parse_stream( + model="mistral-large-latest", + messages=[ + { + "role": "system", + "content": "You are a helpful math tutor. You will be provided with a math problem, and your goal will be to output a step by step solution, along with a final answer. For each step, just provide the output as an equation use the explanation field to detail the reasoning.", + }, + {"role": "user", "content": "How can I solve 8x + 7 = -23"}, + ], + response_format=MathDemonstration, + ) as stream: + for chunk in stream: + print(chunk.data.choices[0].delta.content, end="") + + +if __name__ == "__main__": + main() diff --git a/poetry.lock b/poetry.lock index 3d36b94f..97d62d45 100644 --- a/poetry.lock +++ b/poetry.lock @@ -50,6 +50,22 @@ files = [ [package.dependencies] typing-extensions = {version = ">=4.0.0", markers = "python_version < \"3.11\""} +[[package]] +name = "authlib" +version = "1.6.0" +description = "The ultimate Python library in building OAuth and OpenID Connect servers and clients." +optional = true +python-versions = ">=3.9" +groups = ["main"] +markers = "extra == \"agents\"" +files = [ + {file = "authlib-1.6.0-py2.py3-none-any.whl", hash = "sha256:91685589498f79e8655e8a8947431ad6288831d643f11c55c2143ffcc738048d"}, + {file = "authlib-1.6.0.tar.gz", hash = "sha256:4367d32031b7af175ad3a323d571dc7257b7099d55978087ceae4a0d88cd3210"}, +] + +[package.dependencies] +cryptography = "*" + [[package]] name = "cachetools" version = "5.5.0" @@ -75,6 +91,87 @@ files = [ {file = "certifi-2024.8.30.tar.gz", hash = "sha256:bec941d2aa8195e248a60b31ff9f0558284cf01a52591ceda73ea9afffd69fd9"}, ] +[[package]] +name = "cffi" +version = "1.17.1" +description = "Foreign Function Interface for Python calling C code." +optional = true +python-versions = ">=3.8" +groups = ["main"] +markers = "extra == \"agents\" and platform_python_implementation != \"PyPy\"" +files = [ + {file = "cffi-1.17.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:df8b1c11f177bc2313ec4b2d46baec87a5f3e71fc8b45dab2ee7cae86d9aba14"}, + {file = "cffi-1.17.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:8f2cdc858323644ab277e9bb925ad72ae0e67f69e804f4898c070998d50b1a67"}, + {file = "cffi-1.17.1-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:edae79245293e15384b51f88b00613ba9f7198016a5948b5dddf4917d4d26382"}, + {file = "cffi-1.17.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:45398b671ac6d70e67da8e4224a065cec6a93541bb7aebe1b198a61b58c7b702"}, + {file = "cffi-1.17.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ad9413ccdeda48c5afdae7e4fa2192157e991ff761e7ab8fdd8926f40b160cc3"}, + {file = "cffi-1.17.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5da5719280082ac6bd9aa7becb3938dc9f9cbd57fac7d2871717b1feb0902ab6"}, + {file = "cffi-1.17.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2bb1a08b8008b281856e5971307cc386a8e9c5b625ac297e853d36da6efe9c17"}, + {file = "cffi-1.17.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:045d61c734659cc045141be4bae381a41d89b741f795af1dd018bfb532fd0df8"}, + {file = "cffi-1.17.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:6883e737d7d9e4899a8a695e00ec36bd4e5e4f18fabe0aca0efe0a4b44cdb13e"}, + {file = "cffi-1.17.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:6b8b4a92e1c65048ff98cfe1f735ef8f1ceb72e3d5f0c25fdb12087a23da22be"}, + {file = "cffi-1.17.1-cp310-cp310-win32.whl", hash = "sha256:c9c3d058ebabb74db66e431095118094d06abf53284d9c81f27300d0e0d8bc7c"}, + {file = "cffi-1.17.1-cp310-cp310-win_amd64.whl", hash = "sha256:0f048dcf80db46f0098ccac01132761580d28e28bc0f78ae0d58048063317e15"}, + {file = "cffi-1.17.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:a45e3c6913c5b87b3ff120dcdc03f6131fa0065027d0ed7ee6190736a74cd401"}, + {file = "cffi-1.17.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:30c5e0cb5ae493c04c8b42916e52ca38079f1b235c2f8ae5f4527b963c401caf"}, + {file = "cffi-1.17.1-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f75c7ab1f9e4aca5414ed4d8e5c0e303a34f4421f8a0d47a4d019ceff0ab6af4"}, + {file = "cffi-1.17.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a1ed2dd2972641495a3ec98445e09766f077aee98a1c896dcb4ad0d303628e41"}, + {file = "cffi-1.17.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:46bf43160c1a35f7ec506d254e5c890f3c03648a4dbac12d624e4490a7046cd1"}, + {file = "cffi-1.17.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a24ed04c8ffd54b0729c07cee15a81d964e6fee0e3d4d342a27b020d22959dc6"}, + {file = "cffi-1.17.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:610faea79c43e44c71e1ec53a554553fa22321b65fae24889706c0a84d4ad86d"}, + {file = "cffi-1.17.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:a9b15d491f3ad5d692e11f6b71f7857e7835eb677955c00cc0aefcd0669adaf6"}, + {file = "cffi-1.17.1-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:de2ea4b5833625383e464549fec1bc395c1bdeeb5f25c4a3a82b5a8c756ec22f"}, + {file = "cffi-1.17.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:fc48c783f9c87e60831201f2cce7f3b2e4846bf4d8728eabe54d60700b318a0b"}, + {file = "cffi-1.17.1-cp311-cp311-win32.whl", hash = "sha256:85a950a4ac9c359340d5963966e3e0a94a676bd6245a4b55bc43949eee26a655"}, + {file = "cffi-1.17.1-cp311-cp311-win_amd64.whl", hash = "sha256:caaf0640ef5f5517f49bc275eca1406b0ffa6aa184892812030f04c2abf589a0"}, + {file = "cffi-1.17.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:805b4371bf7197c329fcb3ead37e710d1bca9da5d583f5073b799d5c5bd1eee4"}, + {file = "cffi-1.17.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:733e99bc2df47476e3848417c5a4540522f234dfd4ef3ab7fafdf555b082ec0c"}, + {file = "cffi-1.17.1-cp312-cp312-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1257bdabf294dceb59f5e70c64a3e2f462c30c7ad68092d01bbbfb1c16b1ba36"}, + {file = "cffi-1.17.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:da95af8214998d77a98cc14e3a3bd00aa191526343078b530ceb0bd710fb48a5"}, + {file = "cffi-1.17.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d63afe322132c194cf832bfec0dc69a99fb9bb6bbd550f161a49e9e855cc78ff"}, + {file = "cffi-1.17.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f79fc4fc25f1c8698ff97788206bb3c2598949bfe0fef03d299eb1b5356ada99"}, + {file = "cffi-1.17.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b62ce867176a75d03a665bad002af8e6d54644fad99a3c70905c543130e39d93"}, + {file = "cffi-1.17.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:386c8bf53c502fff58903061338ce4f4950cbdcb23e2902d86c0f722b786bbe3"}, + {file = "cffi-1.17.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:4ceb10419a9adf4460ea14cfd6bc43d08701f0835e979bf821052f1805850fe8"}, + {file = "cffi-1.17.1-cp312-cp312-win32.whl", hash = "sha256:a08d7e755f8ed21095a310a693525137cfe756ce62d066e53f502a83dc550f65"}, + {file = "cffi-1.17.1-cp312-cp312-win_amd64.whl", hash = "sha256:51392eae71afec0d0c8fb1a53b204dbb3bcabcb3c9b807eedf3e1e6ccf2de903"}, + {file = "cffi-1.17.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:f3a2b4222ce6b60e2e8b337bb9596923045681d71e5a082783484d845390938e"}, + {file = "cffi-1.17.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:0984a4925a435b1da406122d4d7968dd861c1385afe3b45ba82b750f229811e2"}, + {file = "cffi-1.17.1-cp313-cp313-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d01b12eeeb4427d3110de311e1774046ad344f5b1a7403101878976ecd7a10f3"}, + {file = "cffi-1.17.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:706510fe141c86a69c8ddc029c7910003a17353970cff3b904ff0686a5927683"}, + {file = "cffi-1.17.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:de55b766c7aa2e2a3092c51e0483d700341182f08e67c63630d5b6f200bb28e5"}, + {file = "cffi-1.17.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c59d6e989d07460165cc5ad3c61f9fd8f1b4796eacbd81cee78957842b834af4"}, + {file = "cffi-1.17.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dd398dbc6773384a17fe0d3e7eeb8d1a21c2200473ee6806bb5e6a8e62bb73dd"}, + {file = "cffi-1.17.1-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:3edc8d958eb099c634dace3c7e16560ae474aa3803a5df240542b305d14e14ed"}, + {file = "cffi-1.17.1-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:72e72408cad3d5419375fc87d289076ee319835bdfa2caad331e377589aebba9"}, + {file = "cffi-1.17.1-cp313-cp313-win32.whl", hash = "sha256:e03eab0a8677fa80d646b5ddece1cbeaf556c313dcfac435ba11f107ba117b5d"}, + {file = "cffi-1.17.1-cp313-cp313-win_amd64.whl", hash = "sha256:f6a16c31041f09ead72d69f583767292f750d24913dadacf5756b966aacb3f1a"}, + {file = "cffi-1.17.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:636062ea65bd0195bc012fea9321aca499c0504409f413dc88af450b57ffd03b"}, + {file = "cffi-1.17.1-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c7eac2ef9b63c79431bc4b25f1cd649d7f061a28808cbc6c47b534bd789ef964"}, + {file = "cffi-1.17.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e221cf152cff04059d011ee126477f0d9588303eb57e88923578ace7baad17f9"}, + {file = "cffi-1.17.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:31000ec67d4221a71bd3f67df918b1f88f676f1c3b535a7eb473255fdc0b83fc"}, + {file = "cffi-1.17.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:6f17be4345073b0a7b8ea599688f692ac3ef23ce28e5df79c04de519dbc4912c"}, + {file = "cffi-1.17.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0e2b1fac190ae3ebfe37b979cc1ce69c81f4e4fe5746bb401dca63a9062cdaf1"}, + {file = "cffi-1.17.1-cp38-cp38-win32.whl", hash = "sha256:7596d6620d3fa590f677e9ee430df2958d2d6d6de2feeae5b20e82c00b76fbf8"}, + {file = "cffi-1.17.1-cp38-cp38-win_amd64.whl", hash = "sha256:78122be759c3f8a014ce010908ae03364d00a1f81ab5c7f4a7a5120607ea56e1"}, + {file = "cffi-1.17.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:b2ab587605f4ba0bf81dc0cb08a41bd1c0a5906bd59243d56bad7668a6fc6c16"}, + {file = "cffi-1.17.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:28b16024becceed8c6dfbc75629e27788d8a3f9030691a1dbf9821a128b22c36"}, + {file = "cffi-1.17.1-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1d599671f396c4723d016dbddb72fe8e0397082b0a77a4fab8028923bec050e8"}, + {file = "cffi-1.17.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ca74b8dbe6e8e8263c0ffd60277de77dcee6c837a3d0881d8c1ead7268c9e576"}, + {file = "cffi-1.17.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f7f5baafcc48261359e14bcd6d9bff6d4b28d9103847c9e136694cb0501aef87"}, + {file = "cffi-1.17.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:98e3969bcff97cae1b2def8ba499ea3d6f31ddfdb7635374834cf89a1a08ecf0"}, + {file = "cffi-1.17.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cdf5ce3acdfd1661132f2a9c19cac174758dc2352bfe37d98aa7512c6b7178b3"}, + {file = "cffi-1.17.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:9755e4345d1ec879e3849e62222a18c7174d65a6a92d5b346b1863912168b595"}, + {file = "cffi-1.17.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:f1e22e8c4419538cb197e4dd60acc919d7696e5ef98ee4da4e01d3f8cfa4cc5a"}, + {file = "cffi-1.17.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:c03e868a0b3bc35839ba98e74211ed2b05d2119be4e8a0f224fba9384f1fe02e"}, + {file = "cffi-1.17.1-cp39-cp39-win32.whl", hash = "sha256:e31ae45bc2e29f6b2abd0de1cc3b9d5205aa847cafaecb8af1476a609a2f6eb7"}, + {file = "cffi-1.17.1-cp39-cp39-win_amd64.whl", hash = "sha256:d016c76bdd850f3c626af19b0542c9677ba156e4ee4fccfdd7848803533ef662"}, + {file = "cffi-1.17.1.tar.gz", hash = "sha256:1c39c6016c32bc48dd54561950ebd6836e1670f2ae46128f67cf49e789c52824"}, +] + +[package.dependencies] +pycparser = "*" + [[package]] name = "charset-normalizer" version = "3.4.0" @@ -191,18 +288,146 @@ files = [ {file = "charset_normalizer-3.4.0.tar.gz", hash = "sha256:223217c3d4f82c3ac5e29032b3f1c2eb0fb591b72161f86d93f5719079dae93e"}, ] +[[package]] +name = "click" +version = "8.2.1" +description = "Composable command line interface toolkit" +optional = true +python-versions = ">=3.10" +groups = ["main"] +markers = "python_version >= \"3.10\" and extra == \"agents\"" +files = [ + {file = "click-8.2.1-py3-none-any.whl", hash = "sha256:61a3265b914e850b85317d0b3109c7f8cd35a670f963866005d6ef1d5175a12b"}, + {file = "click-8.2.1.tar.gz", hash = "sha256:27c491cc05d968d271d5a1db13e3b5a184636d9d930f148c50b038f0d0646202"}, +] + +[package.dependencies] +colorama = {version = "*", markers = "platform_system == \"Windows\""} + [[package]] name = "colorama" version = "0.4.6" description = "Cross-platform colored terminal text." optional = false python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7" -groups = ["dev"] -markers = "sys_platform == \"win32\"" +groups = ["main", "dev"] files = [ {file = "colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6"}, {file = "colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44"}, ] +markers = {main = "extra == \"agents\"", dev = "sys_platform == \"win32\""} + +[[package]] +name = "cryptography" +version = "43.0.3" +description = "cryptography is a package which provides cryptographic recipes and primitives to Python developers." +optional = true +python-versions = ">=3.7" +groups = ["main"] +markers = "python_version < \"3.11\" and extra == \"agents\"" +files = [ + {file = "cryptography-43.0.3-cp37-abi3-macosx_10_9_universal2.whl", hash = "sha256:bf7a1932ac4176486eab36a19ed4c0492da5d97123f1406cf15e41b05e787d2e"}, + {file = "cryptography-43.0.3-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:63efa177ff54aec6e1c0aefaa1a241232dcd37413835a9b674b6e3f0ae2bfd3e"}, + {file = "cryptography-43.0.3-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7e1ce50266f4f70bf41a2c6dc4358afadae90e2a1e5342d3c08883df1675374f"}, + {file = "cryptography-43.0.3-cp37-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:443c4a81bb10daed9a8f334365fe52542771f25aedaf889fd323a853ce7377d6"}, + {file = "cryptography-43.0.3-cp37-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:74f57f24754fe349223792466a709f8e0c093205ff0dca557af51072ff47ab18"}, + {file = "cryptography-43.0.3-cp37-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:9762ea51a8fc2a88b70cf2995e5675b38d93bf36bd67d91721c309df184f49bd"}, + {file = "cryptography-43.0.3-cp37-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:81ef806b1fef6b06dcebad789f988d3b37ccaee225695cf3e07648eee0fc6b73"}, + {file = "cryptography-43.0.3-cp37-abi3-win32.whl", hash = "sha256:cbeb489927bd7af4aa98d4b261af9a5bc025bd87f0e3547e11584be9e9427be2"}, + {file = "cryptography-43.0.3-cp37-abi3-win_amd64.whl", hash = "sha256:f46304d6f0c6ab8e52770addfa2fc41e6629495548862279641972b6215451cd"}, + {file = "cryptography-43.0.3-cp39-abi3-macosx_10_9_universal2.whl", hash = "sha256:8ac43ae87929a5982f5948ceda07001ee5e83227fd69cf55b109144938d96984"}, + {file = "cryptography-43.0.3-cp39-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:846da004a5804145a5f441b8530b4bf35afbf7da70f82409f151695b127213d5"}, + {file = "cryptography-43.0.3-cp39-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0f996e7268af62598f2fc1204afa98a3b5712313a55c4c9d434aef49cadc91d4"}, + {file = "cryptography-43.0.3-cp39-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:f7b178f11ed3664fd0e995a47ed2b5ff0a12d893e41dd0494f406d1cf555cab7"}, + {file = "cryptography-43.0.3-cp39-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:c2e6fc39c4ab499049df3bdf567f768a723a5e8464816e8f009f121a5a9f4405"}, + {file = "cryptography-43.0.3-cp39-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:e1be4655c7ef6e1bbe6b5d0403526601323420bcf414598955968c9ef3eb7d16"}, + {file = "cryptography-43.0.3-cp39-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:df6b6c6d742395dd77a23ea3728ab62f98379eff8fb61be2744d4679ab678f73"}, + {file = "cryptography-43.0.3-cp39-abi3-win32.whl", hash = "sha256:d56e96520b1020449bbace2b78b603442e7e378a9b3bd68de65c782db1507995"}, + {file = "cryptography-43.0.3-cp39-abi3-win_amd64.whl", hash = "sha256:0c580952eef9bf68c4747774cde7ec1d85a6e61de97281f2dba83c7d2c806362"}, + {file = "cryptography-43.0.3-pp310-pypy310_pp73-macosx_10_9_x86_64.whl", hash = "sha256:d03b5621a135bffecad2c73e9f4deb1a0f977b9a8ffe6f8e002bf6c9d07b918c"}, + {file = "cryptography-43.0.3-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:a2a431ee15799d6db9fe80c82b055bae5a752bef645bba795e8e52687c69efe3"}, + {file = "cryptography-43.0.3-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:281c945d0e28c92ca5e5930664c1cefd85efe80e5c0d2bc58dd63383fda29f83"}, + {file = "cryptography-43.0.3-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:f18c716be16bc1fea8e95def49edf46b82fccaa88587a45f8dc0ff6ab5d8e0a7"}, + {file = "cryptography-43.0.3-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:4a02ded6cd4f0a5562a8887df8b3bd14e822a90f97ac5e544c162899bc467664"}, + {file = "cryptography-43.0.3-pp39-pypy39_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:53a583b6637ab4c4e3591a15bc9db855b8d9dee9a669b550f311480acab6eb08"}, + {file = "cryptography-43.0.3-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:1ec0bcf7e17c0c5669d881b1cd38c4972fade441b27bda1051665faaa89bdcaa"}, + {file = "cryptography-43.0.3-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:2ce6fae5bdad59577b44e4dfed356944fbf1d925269114c28be377692643b4ff"}, + {file = "cryptography-43.0.3.tar.gz", hash = "sha256:315b9001266a492a6ff443b61238f956b214dbec9910a081ba5b6646a055a805"}, +] + +[package.dependencies] +cffi = {version = ">=1.12", markers = "platform_python_implementation != \"PyPy\""} + +[package.extras] +docs = ["sphinx (>=5.3.0)", "sphinx-rtd-theme (>=1.1.1)"] +docstest = ["pyenchant (>=1.6.11)", "readme-renderer", "sphinxcontrib-spelling (>=4.0.1)"] +nox = ["nox"] +pep8test = ["check-sdist", "click", "mypy", "ruff"] +sdist = ["build"] +ssh = ["bcrypt (>=3.1.5)"] +test = ["certifi", "cryptography-vectors (==43.0.3)", "pretend", "pytest (>=6.2.0)", "pytest-benchmark", "pytest-cov", "pytest-xdist"] +test-randomorder = ["pytest-randomly"] + +[[package]] +name = "cryptography" +version = "45.0.3" +description = "cryptography is a package which provides cryptographic recipes and primitives to Python developers." +optional = true +python-versions = "!=3.9.0,!=3.9.1,>=3.7" +groups = ["main"] +markers = "python_version >= \"3.11\" and extra == \"agents\"" +files = [ + {file = "cryptography-45.0.3-cp311-abi3-macosx_10_9_universal2.whl", hash = "sha256:7573d9eebaeceeb55285205dbbb8753ac1e962af3d9640791d12b36864065e71"}, + {file = "cryptography-45.0.3-cp311-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d377dde61c5d67eb4311eace661c3efda46c62113ff56bf05e2d679e02aebb5b"}, + {file = "cryptography-45.0.3-cp311-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fae1e637f527750811588e4582988932c222f8251f7b7ea93739acb624e1487f"}, + {file = "cryptography-45.0.3-cp311-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:ca932e11218bcc9ef812aa497cdf669484870ecbcf2d99b765d6c27a86000942"}, + {file = "cryptography-45.0.3-cp311-abi3-manylinux_2_28_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:af3f92b1dc25621f5fad065288a44ac790c5798e986a34d393ab27d2b27fcff9"}, + {file = "cryptography-45.0.3-cp311-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:2f8f8f0b73b885ddd7f3d8c2b2234a7d3ba49002b0223f58cfde1bedd9563c56"}, + {file = "cryptography-45.0.3-cp311-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:9cc80ce69032ffa528b5e16d217fa4d8d4bb7d6ba8659c1b4d74a1b0f4235fca"}, + {file = "cryptography-45.0.3-cp311-abi3-manylinux_2_34_x86_64.whl", hash = "sha256:c824c9281cb628015bfc3c59335163d4ca0540d49de4582d6c2637312907e4b1"}, + {file = "cryptography-45.0.3-cp311-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:5833bb4355cb377ebd880457663a972cd044e7f49585aee39245c0d592904578"}, + {file = "cryptography-45.0.3-cp311-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:9bb5bf55dcb69f7067d80354d0a348368da907345a2c448b0babc4215ccd3497"}, + {file = "cryptography-45.0.3-cp311-abi3-win32.whl", hash = "sha256:3ad69eeb92a9de9421e1f6685e85a10fbcfb75c833b42cc9bc2ba9fb00da4710"}, + {file = "cryptography-45.0.3-cp311-abi3-win_amd64.whl", hash = "sha256:97787952246a77d77934d41b62fb1b6f3581d83f71b44796a4158d93b8f5c490"}, + {file = "cryptography-45.0.3-cp37-abi3-macosx_10_9_universal2.whl", hash = "sha256:c92519d242703b675ccefd0f0562eb45e74d438e001f8ab52d628e885751fb06"}, + {file = "cryptography-45.0.3-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c5edcb90da1843df85292ef3a313513766a78fbbb83f584a5a58fb001a5a9d57"}, + {file = "cryptography-45.0.3-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:38deed72285c7ed699864f964a3f4cf11ab3fb38e8d39cfcd96710cd2b5bb716"}, + {file = "cryptography-45.0.3-cp37-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:5555365a50efe1f486eed6ac7062c33b97ccef409f5970a0b6f205a7cfab59c8"}, + {file = "cryptography-45.0.3-cp37-abi3-manylinux_2_28_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:9e4253ed8f5948a3589b3caee7ad9a5bf218ffd16869c516535325fece163dcc"}, + {file = "cryptography-45.0.3-cp37-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:cfd84777b4b6684955ce86156cfb5e08d75e80dc2585e10d69e47f014f0a5342"}, + {file = "cryptography-45.0.3-cp37-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:a2b56de3417fd5f48773ad8e91abaa700b678dc7fe1e0c757e1ae340779acf7b"}, + {file = "cryptography-45.0.3-cp37-abi3-manylinux_2_34_x86_64.whl", hash = "sha256:57a6500d459e8035e813bd8b51b671977fb149a8c95ed814989da682314d0782"}, + {file = "cryptography-45.0.3-cp37-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:f22af3c78abfbc7cbcdf2c55d23c3e022e1a462ee2481011d518c7fb9c9f3d65"}, + {file = "cryptography-45.0.3-cp37-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:232954730c362638544758a8160c4ee1b832dc011d2c41a306ad8f7cccc5bb0b"}, + {file = "cryptography-45.0.3-cp37-abi3-win32.whl", hash = "sha256:cb6ab89421bc90e0422aca911c69044c2912fc3debb19bb3c1bfe28ee3dff6ab"}, + {file = "cryptography-45.0.3-cp37-abi3-win_amd64.whl", hash = "sha256:d54ae41e6bd70ea23707843021c778f151ca258081586f0cfa31d936ae43d1b2"}, + {file = "cryptography-45.0.3-pp310-pypy310_pp73-macosx_10_9_x86_64.whl", hash = "sha256:ed43d396f42028c1f47b5fec012e9e12631266e3825e95c00e3cf94d472dac49"}, + {file = "cryptography-45.0.3-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:fed5aaca1750e46db870874c9c273cd5182a9e9deb16f06f7bdffdb5c2bde4b9"}, + {file = "cryptography-45.0.3-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:00094838ecc7c6594171e8c8a9166124c1197b074cfca23645cee573910d76bc"}, + {file = "cryptography-45.0.3-pp310-pypy310_pp73-manylinux_2_34_aarch64.whl", hash = "sha256:92d5f428c1a0439b2040435a1d6bc1b26ebf0af88b093c3628913dd464d13fa1"}, + {file = "cryptography-45.0.3-pp310-pypy310_pp73-manylinux_2_34_x86_64.whl", hash = "sha256:ec64ee375b5aaa354b2b273c921144a660a511f9df8785e6d1c942967106438e"}, + {file = "cryptography-45.0.3-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:71320fbefd05454ef2d457c481ba9a5b0e540f3753354fff6f780927c25d19b0"}, + {file = "cryptography-45.0.3-pp311-pypy311_pp73-macosx_10_9_x86_64.whl", hash = "sha256:edd6d51869beb7f0d472e902ef231a9b7689508e83880ea16ca3311a00bf5ce7"}, + {file = "cryptography-45.0.3-pp311-pypy311_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:555e5e2d3a53b4fabeca32835878b2818b3f23966a4efb0d566689777c5a12c8"}, + {file = "cryptography-45.0.3-pp311-pypy311_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:25286aacb947286620a31f78f2ed1a32cded7be5d8b729ba3fb2c988457639e4"}, + {file = "cryptography-45.0.3-pp311-pypy311_pp73-manylinux_2_34_aarch64.whl", hash = "sha256:050ce5209d5072472971e6efbfc8ec5a8f9a841de5a4db0ebd9c2e392cb81972"}, + {file = "cryptography-45.0.3-pp311-pypy311_pp73-manylinux_2_34_x86_64.whl", hash = "sha256:dc10ec1e9f21f33420cc05214989544727e776286c1c16697178978327b95c9c"}, + {file = "cryptography-45.0.3-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:9eda14f049d7f09c2e8fb411dda17dd6b16a3c76a1de5e249188a32aeb92de19"}, + {file = "cryptography-45.0.3.tar.gz", hash = "sha256:ec21313dd335c51d7877baf2972569f40a4291b76a0ce51391523ae358d05899"}, +] + +[package.dependencies] +cffi = {version = ">=1.14", markers = "platform_python_implementation != \"PyPy\""} + +[package.extras] +docs = ["sphinx (>=5.3.0)", "sphinx-inline-tabs ; python_full_version >= \"3.8.0\"", "sphinx-rtd-theme (>=3.0.0) ; python_full_version >= \"3.8.0\""] +docstest = ["pyenchant (>=3)", "readme-renderer (>=30.0)", "sphinxcontrib-spelling (>=7.3.1)"] +nox = ["nox (>=2024.4.15)", "nox[uv] (>=2024.3.2) ; python_full_version >= \"3.8.0\""] +pep8test = ["check-sdist ; python_full_version >= \"3.8.0\"", "click (>=8.0.1)", "mypy (>=1.4)", "ruff (>=0.3.6)"] +sdist = ["build (>=1.0.0)"] +ssh = ["bcrypt (>=3.1.5)"] +test = ["certifi (>=2024)", "cryptography-vectors (==45.0.3)", "pretend (>=0.7)", "pytest (>=7.4.0)", "pytest-benchmark (>=4.0)", "pytest-cov (>=2.10.1)", "pytest-xdist (>=3.5.0)"] +test-randomorder = ["pytest-randomly"] [[package]] name = "dill" @@ -276,6 +501,22 @@ pyopenssl = ["cryptography (>=38.0.3)", "pyopenssl (>=20.0.0)"] reauth = ["pyu2f (>=0.1.5)"] requests = ["requests (>=2.20.0,<3.0.0.dev0)"] +[[package]] +name = "griffe" +version = "1.7.3" +description = "Signatures for entire Python programs. Extract the structure, the frame, the skeleton of your project, to generate API documentation or find breaking changes in your API." +optional = true +python-versions = ">=3.9" +groups = ["main"] +markers = "extra == \"agents\"" +files = [ + {file = "griffe-1.7.3-py3-none-any.whl", hash = "sha256:c6b3ee30c2f0f17f30bcdef5068d6ab7a2a4f1b8bf1a3e74b56fffd21e1c5f75"}, + {file = "griffe-1.7.3.tar.gz", hash = "sha256:52ee893c6a3a968b639ace8015bec9d36594961e156e23315c8e8e51401fa50b"}, +] + +[package.dependencies] +colorama = ">=0.4" + [[package]] name = "h11" version = "0.14.0" @@ -335,6 +576,19 @@ http2 = ["h2 (>=3,<5)"] socks = ["socksio (==1.*)"] zstd = ["zstandard (>=0.18.0)"] +[[package]] +name = "httpx-sse" +version = "0.4.0" +description = "Consume Server-Sent Event (SSE) messages with HTTPX." +optional = true +python-versions = ">=3.8" +groups = ["main"] +markers = "python_version >= \"3.10\" and extra == \"agents\"" +files = [ + {file = "httpx-sse-0.4.0.tar.gz", hash = "sha256:1e81a3a3070ce322add1d3529ed42eb5f70817f45ed6ec915ab753f961139721"}, + {file = "httpx_sse-0.4.0-py3-none-any.whl", hash = "sha256:f329af6eae57eaa2bdfd962b42524764af68075ea87370a2de920af5341e318f"}, +] + [[package]] name = "idna" version = "3.10" @@ -389,6 +643,35 @@ files = [ {file = "mccabe-0.7.0.tar.gz", hash = "sha256:348e0240c33b60bbdf4e523192ef919f28cb2c3d7d5c7794f74009290f236325"}, ] +[[package]] +name = "mcp" +version = "1.9.1" +description = "Model Context Protocol SDK" +optional = true +python-versions = ">=3.10" +groups = ["main"] +markers = "python_version >= \"3.10\" and extra == \"agents\"" +files = [ + {file = "mcp-1.9.1-py3-none-any.whl", hash = "sha256:2900ded8ffafc3c8a7bfcfe8bc5204037e988e753ec398f371663e6a06ecd9a9"}, + {file = "mcp-1.9.1.tar.gz", hash = "sha256:19879cd6dde3d763297617242888c2f695a95dfa854386a6a68676a646ce75e4"}, +] + +[package.dependencies] +anyio = ">=4.5" +httpx = ">=0.27" +httpx-sse = ">=0.4" +pydantic = ">=2.7.2,<3.0.0" +pydantic-settings = ">=2.5.2" +python-multipart = ">=0.0.9" +sse-starlette = ">=1.6.1" +starlette = ">=0.27" +uvicorn = {version = ">=0.23.1", markers = "sys_platform != \"emscripten\""} + +[package.extras] +cli = ["python-dotenv (>=1.0.0)", "typer (>=0.12.4)"] +rich = ["rich (>=13.9.4)"] +ws = ["websockets (>=15.0.1)"] + [[package]] name = "mypy" version = "1.14.1" @@ -535,6 +818,19 @@ files = [ [package.dependencies] pyasn1 = ">=0.4.6,<0.7.0" +[[package]] +name = "pycparser" +version = "2.22" +description = "C parser in Python" +optional = true +python-versions = ">=3.8" +groups = ["main"] +markers = "extra == \"agents\" and platform_python_implementation != \"PyPy\"" +files = [ + {file = "pycparser-2.22-py3-none-any.whl", hash = "sha256:c3702b6d3dd8c7abc1afa565d7e63d53a1d0bd86cdc24edd75470f4de499cfcc"}, + {file = "pycparser-2.22.tar.gz", hash = "sha256:491c8be9c040f5390f5bf44a5b07752bd07f56edf992381b05c701439eec10f6"}, +] + [[package]] name = "pydantic" version = "2.10.6" @@ -669,6 +965,31 @@ files = [ [package.dependencies] typing-extensions = ">=4.6.0,<4.7.0 || >4.7.0" +[[package]] +name = "pydantic-settings" +version = "2.9.1" +description = "Settings management using Pydantic" +optional = true +python-versions = ">=3.9" +groups = ["main"] +markers = "python_version >= \"3.10\" and extra == \"agents\"" +files = [ + {file = "pydantic_settings-2.9.1-py3-none-any.whl", hash = "sha256:59b4f431b1defb26fe620c71a7d3968a710d719f5f4cdbbdb7926edeb770f6ef"}, + {file = "pydantic_settings-2.9.1.tar.gz", hash = "sha256:c509bf79d27563add44e8446233359004ed85066cd096d8b510f715e6ef5d268"}, +] + +[package.dependencies] +pydantic = ">=2.7.0" +python-dotenv = ">=0.21.0" +typing-inspection = ">=0.4.0" + +[package.extras] +aws-secrets-manager = ["boto3 (>=1.35.0)", "boto3-stubs[secretsmanager]"] +azure-key-vault = ["azure-identity (>=1.16.0)", "azure-keyvault-secrets (>=4.8.0)"] +gcp-secret-manager = ["google-cloud-secret-manager (>=2.23.1)"] +toml = ["tomli (>=2.0.1)"] +yaml = ["pyyaml (>=6.0.1)"] + [[package]] name = "pylint" version = "3.2.3" @@ -757,6 +1078,35 @@ files = [ [package.dependencies] six = ">=1.5" +[[package]] +name = "python-dotenv" +version = "1.1.0" +description = "Read key-value pairs from a .env file and set them as environment variables" +optional = true +python-versions = ">=3.9" +groups = ["main"] +markers = "python_version >= \"3.10\" and extra == \"agents\"" +files = [ + {file = "python_dotenv-1.1.0-py3-none-any.whl", hash = "sha256:d7c01d9e2293916c18baf562d95698754b0dbbb5e74d457c45d4f6561fb9d55d"}, + {file = "python_dotenv-1.1.0.tar.gz", hash = "sha256:41f90bc6f5f177fb41f53e87666db362025010eb28f60a01c9143bfa33a2b2d5"}, +] + +[package.extras] +cli = ["click (>=5.0)"] + +[[package]] +name = "python-multipart" +version = "0.0.20" +description = "A streaming multipart parser for Python" +optional = true +python-versions = ">=3.8" +groups = ["main"] +markers = "python_version >= \"3.10\" and extra == \"agents\"" +files = [ + {file = "python_multipart-0.0.20-py3-none-any.whl", hash = "sha256:8a62d3a8335e06589fe01f2a3e178cdcc632f3fbe0d492ad9ee0ec35aab1f104"}, + {file = "python_multipart-0.0.20.tar.gz", hash = "sha256:8dd0cab45b8e23064ae09147625994d090fa46f5b0d1e13af944c331a7fa9d13"}, +] + [[package]] name = "requests" version = "2.32.3" @@ -819,6 +1169,46 @@ files = [ {file = "sniffio-1.3.1.tar.gz", hash = "sha256:f4324edc670a0f49750a81b895f35c3adb843cca46f0530f79fc1babb23789dc"}, ] +[[package]] +name = "sse-starlette" +version = "2.1.3" +description = "SSE plugin for Starlette" +optional = true +python-versions = ">=3.8" +groups = ["main"] +markers = "python_version >= \"3.10\" and extra == \"agents\"" +files = [ + {file = "sse_starlette-2.1.3-py3-none-any.whl", hash = "sha256:8ec846438b4665b9e8c560fcdea6bc8081a3abf7942faa95e5a744999d219772"}, + {file = "sse_starlette-2.1.3.tar.gz", hash = "sha256:9cd27eb35319e1414e3d2558ee7414487f9529ce3b3cf9b21434fd110e017169"}, +] + +[package.dependencies] +anyio = "*" +starlette = "*" +uvicorn = "*" + +[package.extras] +examples = ["fastapi"] + +[[package]] +name = "starlette" +version = "0.46.2" +description = "The little ASGI library that shines." +optional = true +python-versions = ">=3.9" +groups = ["main"] +markers = "python_version >= \"3.10\" and extra == \"agents\"" +files = [ + {file = "starlette-0.46.2-py3-none-any.whl", hash = "sha256:595633ce89f8ffa71a015caed34a5b2dc1c0cdb3f0f1fbd1e69339cf2abeec35"}, + {file = "starlette-0.46.2.tar.gz", hash = "sha256:7f7361f34eed179294600af672f565727419830b54b7b084efe44bb82d2fccd5"}, +] + +[package.dependencies] +anyio = ">=3.6.2,<5" + +[package.extras] +full = ["httpx (>=0.27.0,<0.29.0)", "itsdangerous", "jinja2", "python-multipart (>=0.0.18)", "pyyaml"] + [[package]] name = "tomli" version = "2.2.1" @@ -932,10 +1322,32 @@ h2 = ["h2 (>=4,<5)"] socks = ["pysocks (>=1.5.6,!=1.5.7,<2.0)"] zstd = ["zstandard (>=0.18.0)"] +[[package]] +name = "uvicorn" +version = "0.34.2" +description = "The lightning-fast ASGI server." +optional = true +python-versions = ">=3.9" +groups = ["main"] +markers = "python_version >= \"3.10\" and extra == \"agents\"" +files = [ + {file = "uvicorn-0.34.2-py3-none-any.whl", hash = "sha256:deb49af569084536d269fe0a6d67e3754f104cf03aba7c11c40f01aadf33c403"}, + {file = "uvicorn-0.34.2.tar.gz", hash = "sha256:0e929828f6186353a80b58ea719861d2629d766293b6d19baf086ba31d4f3328"}, +] + +[package.dependencies] +click = ">=7.0" +h11 = ">=0.8" +typing-extensions = {version = ">=4.0", markers = "python_version < \"3.11\""} + +[package.extras] +standard = ["colorama (>=0.4) ; sys_platform == \"win32\"", "httptools (>=0.6.3)", "python-dotenv (>=0.13)", "pyyaml (>=5.1)", "uvloop (>=0.14.0,!=0.15.0,!=0.15.1) ; sys_platform != \"win32\" and sys_platform != \"cygwin\" and platform_python_implementation != \"PyPy\"", "watchfiles (>=0.13)", "websockets (>=10.4)"] + [extras] +agents = ["authlib", "griffe", "mcp"] gcp = ["google-auth", "requests"] [metadata] lock-version = "2.1" python-versions = ">=3.9" -content-hash = "c3917a9114ca2a0c01aedf207fa1b59cc259bb07c4d2914fe2ed9a4cb3e1785e" +content-hash = "a4bab5aa103ed3c281b0e196f6f99537cd0ee121465841b6918c3cc71eae7274" diff --git a/pyproject.toml b/pyproject.toml index 4bd94805..45d9876d 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -40,6 +40,11 @@ gcp = [ "google-auth >=2.27.0", "requests >=2.32.3" ] +agents = [ + "mcp >=1.0,<2.0; python_version >= '3.10'", + "griffe >=1.7.3,<2.0", + "authlib >=1.5.2,<2.0", +] [build-system] requires = ["poetry-core"] diff --git a/src/mistralai/conversations.py b/src/mistralai/conversations.py index 438e444d..6e4b37ee 100644 --- a/src/mistralai/conversations.py +++ b/src/mistralai/conversations.py @@ -7,8 +7,211 @@ from mistralai.utils import eventstreaming, get_security_from_env from typing import Any, List, Mapping, Optional, Union +# region imports +import typing +from typing import AsyncGenerator +import logging +from collections import defaultdict + +from mistralai.models import ( + ResponseStartedEvent, + ConversationEventsData, + InputEntries, +) +from mistralai.extra.run.result import ( + RunResult, + RunResultEvents, + FunctionResultEvent, + reconstitue_entries, +) +from mistralai.extra.run.utils import run_requirements + +logger = logging.getLogger(__name__) + +if typing.TYPE_CHECKING: + from mistralai.extra.run.context import RunContext + +# endregion imports + + class Conversations(BaseSDK): + # region sdk-class-body + # Custom run code allowing client side execution of code + + @run_requirements + async def run_async( + self, + run_ctx: "RunContext", + inputs: Union[models.ConversationInputs, models.ConversationInputsTypedDict], + instructions: OptionalNullable[str] = UNSET, + tools: OptionalNullable[ + Union[List[models.Tools], List[models.ToolsTypedDict]] + ] = UNSET, + completion_args: OptionalNullable[ + Union[models.CompletionArgs, models.CompletionArgsTypedDict] + ] = UNSET, + name: OptionalNullable[str] = UNSET, + description: OptionalNullable[str] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> RunResult: + """Run a conversation with the given inputs and context. + + The execution of a run will only stop when no required local execution can be done.""" + from mistralai.beta import Beta + from mistralai.extra.run.context import _validate_run + from mistralai.extra.run.tools import get_function_calls + + req, run_result, input_entries = await _validate_run( + beta_client=Beta(self.sdk_configuration), + run_ctx=run_ctx, + inputs=inputs, + instructions=instructions, + tools=tools, + completion_args=completion_args, + ) + + while True: + if run_ctx.conversation_id is None: + res = await self.start_async( + inputs=input_entries, + http_headers=http_headers, + name=name, + description=description, + retries=retries, + server_url=server_url, + timeout_ms=timeout_ms, + **req, + ) + run_result.conversation_id = res.conversation_id + run_ctx.conversation_id = res.conversation_id + logger.info( + f"Started Run with conversation with id {res.conversation_id}" + ) + else: + res = await self.append_async( + conversation_id=run_ctx.conversation_id, + inputs=input_entries, + retries=retries, + server_url=server_url, + timeout_ms=timeout_ms, + ) + run_ctx.request_count += 1 + run_result.output_entries.extend(res.outputs) + fcalls = get_function_calls(res.outputs) + if not fcalls: + logger.debug("No more function calls to execute") + break + else: + fresults = await run_ctx.execute_function_calls(fcalls) + run_result.output_entries.extend(fresults) + input_entries = typing.cast(list[InputEntries], fresults) + return run_result + + @run_requirements + async def run_stream_async( + self, + run_ctx: "RunContext", + inputs: Union[models.ConversationInputs, models.ConversationInputsTypedDict], + instructions: OptionalNullable[str] = UNSET, + tools: OptionalNullable[ + Union[List[models.Tools], List[models.ToolsTypedDict]] + ] = UNSET, + completion_args: OptionalNullable[ + Union[models.CompletionArgs, models.CompletionArgsTypedDict] + ] = UNSET, + name: OptionalNullable[str] = UNSET, + description: OptionalNullable[str] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> AsyncGenerator[Union[RunResultEvents, RunResult], None]: + """Similar to `run_async` but returns a generator which streams events. + + The last streamed object is the RunResult object which summarises what happened in the run.""" + from mistralai.beta import Beta + from mistralai.extra.run.context import _validate_run + from mistralai.extra.run.tools import get_function_calls + + req, run_result, input_entries = await _validate_run( + beta_client=Beta(self.sdk_configuration), + run_ctx=run_ctx, + inputs=inputs, + instructions=instructions, + tools=tools, + completion_args=completion_args, + ) + + async def run_generator() -> AsyncGenerator[Union[RunResultEvents, RunResult], None]: + current_entries = input_entries + while True: + received_event_tracker: defaultdict[ + int, list[ConversationEventsData] + ] = defaultdict(list) + if run_ctx.conversation_id is None: + res = await self.start_stream_async( + inputs=current_entries, + http_headers=http_headers, + name=name, + description=description, + retries=retries, + server_url=server_url, + timeout_ms=timeout_ms, + **req, + ) + else: + res = await self.append_stream_async( + conversation_id=run_ctx.conversation_id, + inputs=current_entries, + retries=retries, + server_url=server_url, + timeout_ms=timeout_ms, + ) + async for event in res: + if ( + isinstance(event.data, ResponseStartedEvent) + and run_ctx.conversation_id is None + ): + run_result.conversation_id = event.data.conversation_id + run_ctx.conversation_id = event.data.conversation_id + logger.info( + f"Started Run with conversation with id {run_ctx.conversation_id}" + ) + if ( + output_index := getattr(event.data, "output_index", None) + ) is not None: + received_event_tracker[output_index].append(event.data) + yield typing.cast(RunResultEvents, event) + run_ctx.request_count += 1 + outputs = reconstitue_entries(received_event_tracker) + run_result.output_entries.extend(outputs) + fcalls = get_function_calls(outputs) + if not fcalls: + logger.debug("No more function calls to execute") + break + else: + fresults = await run_ctx.execute_function_calls(fcalls) + run_result.output_entries.extend(fresults) + for fresult in fresults: + yield RunResultEvents( + event="function.result", + data=FunctionResultEvent( + type="function.result", + result=fresult.result, + tool_call_id=fresult.tool_call_id, + ), + ) + current_entries = typing.cast(list[InputEntries], fresults) + yield run_result + + return run_generator() + + # endregion sdk-class-body + def start( self, *, diff --git a/src/mistralai/extra/__init__.py b/src/mistralai/extra/__init__.py index d8f7a21a..d9a81d24 100644 --- a/src/mistralai/extra/__init__.py +++ b/src/mistralai/extra/__init__.py @@ -1,5 +1,13 @@ -from .struct_chat import ParsedChatCompletionResponse, convert_to_parsed_chat_completion_response +from .struct_chat import ( + ParsedChatCompletionResponse, + convert_to_parsed_chat_completion_response, +) from .utils import response_format_from_pydantic_model from .utils.response_format import CustomPydanticModel -__all__ = ["convert_to_parsed_chat_completion_response", "response_format_from_pydantic_model", "CustomPydanticModel", "ParsedChatCompletionResponse"] +__all__ = [ + "convert_to_parsed_chat_completion_response", + "response_format_from_pydantic_model", + "CustomPydanticModel", + "ParsedChatCompletionResponse", +] diff --git a/src/mistralai/extra/exceptions.py b/src/mistralai/extra/exceptions.py new file mode 100644 index 00000000..7853ddc2 --- /dev/null +++ b/src/mistralai/extra/exceptions.py @@ -0,0 +1,14 @@ +class MistralClientException(Exception): + """Base exception for all the client errors.""" + + +class RunException(MistralClientException): + """Exception raised for errors during a conversation run.""" + + +class MCPException(MistralClientException): + """Exception raised for errors related to MCP operations.""" + + +class MCPAuthException(MCPException): + """Exception raised for authentication errors with an MCP server.""" diff --git a/src/mistralai/extra/mcp/__init__.py b/src/mistralai/extra/mcp/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/src/mistralai/extra/mcp/auth.py b/src/mistralai/extra/mcp/auth.py new file mode 100644 index 00000000..909f5d4a --- /dev/null +++ b/src/mistralai/extra/mcp/auth.py @@ -0,0 +1,166 @@ +from typing import Optional + +from authlib.oauth2.rfc8414 import AuthorizationServerMetadata +from authlib.integrations.httpx_client import AsyncOAuth2Client as AsyncOAuth2ClientBase +import httpx +import logging + +from mistralai.types import BaseModel + +logger = logging.getLogger(__name__) + + +class Oauth2AuthorizationScheme(BaseModel): + """Information about the oauth flow to perform with the authorization server.""" + + authorization_url: str + token_url: str + scope: list[str] + description: Optional[str] = None + refresh_url: Optional[str] = None + + +class OAuthParams(BaseModel): + """Required params for authorization.""" + + scheme: Oauth2AuthorizationScheme + client_id: str + client_secret: str + + +class AsyncOAuth2Client(AsyncOAuth2ClientBase): + """Subclass of the Async httpx oauth client which provides a constructor from OAuthParams.""" + + @classmethod + def from_oauth_params(cls, oauth_params: OAuthParams) -> "AsyncOAuth2Client": + return cls( + client_id=oauth_params.client_id, + client_secret=oauth_params.client_secret, + scope=oauth_params.scheme.scope, + ) + + +async def get_well_known_authorization_server_metadata( + server_url: str, +) -> Optional[AuthorizationServerMetadata]: + """Fetch the metadata from the well-known location. + + This should be available on MCP servers as described by the specification: + https://modelcontextprotocol.io/specification/2025-03-26/basic/authorization#2-3-server-metadata-discovery. + """ + well_known_url = f"{server_url}/.well-known/oauth-authorization-server" + response = await httpx.AsyncClient().get(well_known_url) + if 200 <= response.status_code < 300: + try: + server_metadata = AuthorizationServerMetadata(**response.json()) + server_metadata.validate() + return server_metadata + except ValueError: + logger.exception("Failed to parse oauth well-known metadata") + return None + else: + logger.error(f"Failed to get oauth well-known metadata from {server_url}") + return None + + +async def get_oauth_server_metadata(server_url: str) -> AuthorizationServerMetadata: + """Fetch the metadata from the authorization server to perform the oauth flow.""" + # 1) attempt to get the metadata from the resource server at /.well-known/oauth-protected-resource + # TODO: new self-discovery protocol, not released yet + + # 2) attempt to get the metadata from the authorization server at /.well-known/oauth-authorization-server + metadata = await get_well_known_authorization_server_metadata(server_url=server_url) + if metadata is not None: + return metadata + + # 3) fallback on default endpoints + # https://modelcontextprotocol.io/specification/2025-03-26/basic/authorization#2-3-3-fallbacks-for-servers-without-metadata-discovery + return AuthorizationServerMetadata( + issuer=server_url, + authorization_endpoint=f"{server_url}/authorize", + token_endpoint=f"{server_url}/token", + register_endpoint=f"{server_url}/register", + response_types_supported=["code"], + response_modes_supported=["query"], + grant_types_supported=["authorization_code", "refresh_token"], + token_endpoint_auth_methods_supported=["client_secret_basic"], + code_challenge_methods_supported=["S256", "plain"], + ) + + +async def dynamic_client_registration( + register_endpoint: str, + redirect_url: str, + async_client: httpx.AsyncClient, +) -> tuple[str, str]: + """Try to register the client dynamically with an MCP server. + + Returns a client_id and client_secret. + """ + # Construct the registration request payload + registration_payload = { + "client_name": "MistralSDKClient", + "grant_types": ["authorization_code", "refresh_token"], + "token_endpoint_auth_method": "client_secret_basic", + "response_types": ["code"], + "redirect_uris": [redirect_url], + } + + # Make the registration request + response = await async_client.post(register_endpoint, json=registration_payload) + try: + response.raise_for_status() + registration_info = response.json() + client_id = registration_info["client_id"] + client_secret = registration_info["client_secret"] + except Exception as e: + raise ValueError( + f"Client registration failed: status={response.status_code}, error={response.text}" + ) from e + return client_id, client_secret + + +async def build_oauth_params( + server_url: str, + redirect_url: str, + client_id: Optional[str] = None, + client_secret: Optional[str] = None, + scope: Optional[list[str]] = None, + async_client: Optional[httpx.AsyncClient] = None, +) -> OAuthParams: + """Get issuer metadata and build the oauth required params.""" + metadata = await get_oauth_server_metadata(server_url=server_url) + oauth_scheme = Oauth2AuthorizationScheme( + authorization_url=metadata.authorization_endpoint, + token_url=metadata.token_endpoint, + scope=scope or [], + refresh_url=metadata.token_endpoint + if "refresh_token" in metadata.grant_types_supported + else None, + ) + if client_id and client_secret: + return OAuthParams( + client_id=client_id, + client_secret=client_secret, + scheme=oauth_scheme, + ) + + # Try to dynamically register the client + if async_client: + reg_client_id, reg_client_secret = await dynamic_client_registration( + register_endpoint=metadata.registration_endpoint, + redirect_url=redirect_url, + async_client=async_client, + ) + else: + async with httpx.AsyncClient() as async_client: + reg_client_id, reg_client_secret = await dynamic_client_registration( + register_endpoint=metadata.registration_endpoint, + redirect_url=redirect_url, + async_client=async_client, + ) + return OAuthParams( + client_id=reg_client_id, + client_secret=reg_client_secret, + scheme=oauth_scheme, + ) diff --git a/src/mistralai/extra/mcp/base.py b/src/mistralai/extra/mcp/base.py new file mode 100644 index 00000000..8be5585c --- /dev/null +++ b/src/mistralai/extra/mcp/base.py @@ -0,0 +1,155 @@ +from typing import Optional, Union +import logging +import typing +from contextlib import AsyncExitStack +from typing import Protocol, Any + +from mcp import ClientSession +from mcp.types import ListPromptsResult, EmbeddedResource, ImageContent, TextContent + +from mistralai.extra.exceptions import MCPException +from mistralai.models import ( + FunctionTool, + Function, + SystemMessageTypedDict, + AssistantMessageTypedDict, + TextChunkTypedDict, +) + +logger = logging.getLogger(__name__) + + +class MCPSystemPrompt(typing.TypedDict): + description: Optional[str] + messages: list[Union[SystemMessageTypedDict, AssistantMessageTypedDict]] + + +class MCPClientProtocol(Protocol): + """MCP client that converts MCP artifacts to Mistral format.""" + + _name: str + + async def initialize(self, exit_stack: Optional[AsyncExitStack]) -> None: + ... + + async def aclose(self) -> None: + ... + + async def get_tools(self) -> list[FunctionTool]: + ... + + async def execute_tool( + self, name: str, arguments: dict + ) -> list[TextChunkTypedDict]: + ... + + async def get_system_prompt( + self, name: str, arguments: dict[str, Any] + ) -> MCPSystemPrompt: + ... + + async def list_system_prompts(self) -> ListPromptsResult: + ... + + +class MCPClientBase(MCPClientProtocol): + """Base class to implement functionalities from an initialized MCP session.""" + + _session: ClientSession + + def __init__(self, name: Optional[str] = None): + self._name = name or self.__class__.__name__ + self._exit_stack: Optional[AsyncExitStack] = None + self._is_initialized = False + + def _convert_content( + self, mcp_content: Union[TextContent, ImageContent, EmbeddedResource] + ) -> TextChunkTypedDict: + if not mcp_content.type == "text": + raise MCPException("Only supporting text tool responses for now.") + return {"type": "text", "text": mcp_content.text} + + def _convert_content_list( + self, mcp_contents: list[Union[TextContent, ImageContent, EmbeddedResource]] + ) -> list[TextChunkTypedDict]: + content_chunks = [] + for mcp_content in mcp_contents: + content_chunks.append(self._convert_content(mcp_content)) + return content_chunks + + async def get_tools(self) -> list[FunctionTool]: + mcp_tools = await self._session.list_tools() + tools = [] + for mcp_tool in mcp_tools.tools: + tools.append( + FunctionTool( + type="function", + function=Function( + name=mcp_tool.name, + description=mcp_tool.description, + parameters=mcp_tool.inputSchema, + strict=True, + ), + ) + ) + return tools + + async def execute_tool( + self, name: str, arguments: dict[str, Any] + ) -> list[TextChunkTypedDict]: + contents = await self._session.call_tool(name=name, arguments=arguments) + return self._convert_content_list(contents.content) + + async def get_system_prompt( + self, name: str, arguments: dict[str, Any] + ) -> MCPSystemPrompt: + prompt_result = await self._session.get_prompt(name=name, arguments=arguments) + return { + "description": prompt_result.description, + "messages": [ + typing.cast( + Union[SystemMessageTypedDict, AssistantMessageTypedDict], + { + "role": message.role, + "content": self._convert_content(mcp_content=message.content), + }, + ) + for message in prompt_result.messages + ], + } + + async def list_system_prompts(self) -> ListPromptsResult: + return await self._session.list_prompts() + + async def initialize(self, exit_stack: Optional[AsyncExitStack] = None) -> None: + """Initialize the MCP session.""" + # client is already initialized so return + if self._is_initialized: + return + if exit_stack is None: + self._exit_stack = AsyncExitStack() + exit_stack = self._exit_stack + stdio_transport = await self._get_transport(exit_stack=exit_stack) + mcp_session = await exit_stack.enter_async_context( + ClientSession( + read_stream=stdio_transport[0], + write_stream=stdio_transport[1], + ) + ) + await mcp_session.initialize() + self._session = mcp_session + self._is_initialized = True + + async def aclose(self): + """Close the MCP session.""" + if self._exit_stack: + await self._exit_stack.aclose() + + def __repr__(self): + return f"<{self.__class__.__name__} name={self._name!r} id=0x{id(self):x}>" + + def __str__(self): + return f"{self.__class__.__name__}(name={self._name})" + + async def _get_transport(self, exit_stack: AsyncExitStack): + raise NotImplementedError diff --git a/src/mistralai/extra/mcp/sse.py b/src/mistralai/extra/mcp/sse.py new file mode 100644 index 00000000..2dfe7a2d --- /dev/null +++ b/src/mistralai/extra/mcp/sse.py @@ -0,0 +1,165 @@ +import http +import logging +import typing +from typing import Any, Optional +from contextlib import AsyncExitStack +from functools import cached_property + +import httpx + +from mistralai.extra.exceptions import MCPAuthException +from mistralai.extra.mcp.base import ( + MCPClientBase, +) +from mistralai.extra.mcp.auth import OAuthParams, AsyncOAuth2Client +from anyio.streams.memory import MemoryObjectReceiveStream, MemoryObjectSendStream + +from mcp.client.sse import sse_client +from mcp.shared.message import SessionMessage +from authlib.oauth2.rfc6749 import OAuth2Token + +from mistralai.types import BaseModel + +logger = logging.getLogger(__name__) + + +class SSEServerParams(BaseModel): + """Parameters required for a MCPClient with SSE transport""" + + url: str + headers: Optional[dict[str, Any]] = None + timeout: float = 5 + sse_read_timeout: float = 60 * 5 + + +class MCPClientSSE(MCPClientBase): + """MCP client that uses sse for communication. + + The client provides authentication for OAuth2 protocol following the current MCP authorization spec: + https://modelcontextprotocol.io/specification/2025-03-26/basic/authorization. + + This is possibly going to change in the future since the protocol has ongoing discussions. + """ + + _oauth_params: Optional[OAuthParams] + _sse_params: SSEServerParams + + def __init__( + self, + sse_params: SSEServerParams, + name: Optional[str] = None, + oauth_params: Optional[OAuthParams] = None, + auth_token: Optional[OAuth2Token] = None, + ): + super().__init__(name=name) + self._sse_params = sse_params + self._oauth_params: Optional[OAuthParams] = oauth_params + self._auth_token: Optional[OAuth2Token] = auth_token + + @cached_property + def base_url(self) -> str: + return self._sse_params.url.rstrip("/sse") + + def set_oauth_params(self, oauth_params: OAuthParams): + """Update the oauth params and client accordingly.""" + if self._oauth_params is not None: + logger.warning(f"Overriding current oauth params for {self._name}") + self._oauth_params = oauth_params + + async def get_auth_url_and_state(self, redirect_url: str) -> tuple[str, str]: + """Create the authorization url for client to start oauth flow.""" + if self._oauth_params is None: + raise MCPAuthException( + "Can't generate an authorization url without oauth_params being set, " + "make sure the oauth params have been set." + ) + oauth_client = AsyncOAuth2Client.from_oauth_params(self._oauth_params) + auth_url, state = oauth_client.create_authorization_url( + self._oauth_params.scheme.authorization_url, redirect_uri=redirect_url + ) + return auth_url, state + + async def get_token_from_auth_response( + self, + authorization_response: str, + redirect_url: str, + state: str, + ) -> OAuth2Token: + """Fetch the authentication token from the server.""" + if self._oauth_params is None: + raise MCPAuthException( + "Can't fetch a token without oauth_params, make sure they have been set." + ) + oauth_client = AsyncOAuth2Client.from_oauth_params(self._oauth_params) + oauth_token = await oauth_client.fetch_token( + url=self._oauth_params.scheme.token_url, + authorization_response=authorization_response, + redirect_uri=redirect_url, + headers={"Content-Type": "application/x-www-form-urlencoded"}, + state=state, + ) + return oauth_token + + async def refresh_auth_token(self): + """Refresh an expired token.""" + if self._oauth_params is None or self._oauth_params.scheme.refresh_url is None: + raise MCPAuthException( + "Can't refresh a token without a refresh url make sure the oauth params have been set." + ) + if self._auth_token is None: + raise MCPAuthException( + "Can't refresh a token without a refresh token, use the `set_auth_token` to add a OAuth2Token." + ) + oauth_client = AsyncOAuth2Client.from_oauth_params(self._oauth_params) + oauth_token = await oauth_client.refresh_token( + url=self._oauth_params.scheme.refresh_url, + refresh_token=self._auth_token["refresh_token"], + headers={"Content-Type": "application/x-www-form-urlencoded"}, + ) + self.set_auth_token(oauth_token) + + def set_auth_token(self, token: OAuth2Token) -> None: + """Register the authentication token with this client.""" + self._auth_token = token + + def _format_headers(self) -> dict[str, str]: + headers: dict[str, str] = {} + if self._sse_params.headers: + headers |= self._sse_params.headers + if self._auth_token: + headers["Authorization"] = f"Bearer {self._auth_token['access_token']}" + return headers + + async def requires_auth(self) -> bool: + """Check if the client requires authentication to communicate with the server.""" + response = httpx.get( + self._sse_params.url, + headers=self._format_headers(), + timeout=self._sse_params.timeout, + ) + return response.status_code == http.HTTPStatus.UNAUTHORIZED + + async def _get_transport( + self, exit_stack: AsyncExitStack + ) -> tuple[ + MemoryObjectReceiveStream[typing.Union[SessionMessage, Exception]], + MemoryObjectSendStream[SessionMessage], + ]: + try: + return await exit_stack.enter_async_context( + sse_client( + url=self._sse_params.url, + headers=self._format_headers(), + timeout=self._sse_params.timeout, + sse_read_timeout=self._sse_params.sse_read_timeout, + ) + ) + except Exception as e: + if isinstance(e, httpx.HTTPStatusError): + if e.response.status_code == http.HTTPStatus.UNAUTHORIZED: + if self._oauth_params is None: + raise MCPAuthException( + "Authentication required but no auth params provided." + ) from e + raise MCPAuthException("Authentication required.") from e + raise diff --git a/src/mistralai/extra/mcp/stdio.py b/src/mistralai/extra/mcp/stdio.py new file mode 100644 index 00000000..28c3b8c5 --- /dev/null +++ b/src/mistralai/extra/mcp/stdio.py @@ -0,0 +1,22 @@ +from typing import Optional +import logging +from contextlib import AsyncExitStack + +from mistralai.extra.mcp.base import ( + MCPClientBase, +) + +from mcp import stdio_client, StdioServerParameters + +logger = logging.getLogger(__name__) + + +class MCPClientSTDIO(MCPClientBase): + """MCP client that uses stdio for communication.""" + + def __init__(self, stdio_params: StdioServerParameters, name: Optional[str] = None): + super().__init__(name=name) + self._stdio_params = stdio_params + + async def _get_transport(self, exit_stack: AsyncExitStack): + return await exit_stack.enter_async_context(stdio_client(self._stdio_params)) diff --git a/src/mistralai/extra/run/__init__.py b/src/mistralai/extra/run/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/src/mistralai/extra/run/context.py b/src/mistralai/extra/run/context.py new file mode 100644 index 00000000..a79fd59e --- /dev/null +++ b/src/mistralai/extra/run/context.py @@ -0,0 +1,295 @@ +import asyncio +import inspect +import typing +from contextlib import AsyncExitStack +from functools import wraps +from collections.abc import Callable + +from dataclasses import dataclass, field +from typing import Union, Optional + +import pydantic + +from mistralai.extra import ( + response_format_from_pydantic_model, +) +from mistralai.extra.exceptions import RunException +from mistralai.extra.mcp.base import MCPClientProtocol +from mistralai.extra.run.result import RunResult +from mistralai.types.basemodel import OptionalNullable, BaseModel, UNSET +from mistralai.models import ( + ResponseFormat, + FunctionCallEntry, + Tools, + ToolsTypedDict, + CompletionArgs, + CompletionArgsTypedDict, + FunctionResultEntry, + ConversationInputs, + ConversationInputsTypedDict, + FunctionTool, + MessageInputEntry, + InputEntries, + ResponseFormatTypedDict, +) + +from logging import getLogger + +from mistralai.extra.run.tools import ( + create_function_result, + RunFunction, + create_tool_call, + RunTool, + RunMCPTool, + RunCoroutine, +) + +if typing.TYPE_CHECKING: + from mistralai import Beta, OptionalNullable + +logger = getLogger(__name__) + + +class AgentRequestKwargs(typing.TypedDict): + agent_id: str + + +class ModelRequestKwargs(typing.TypedDict): + model: str + instructions: OptionalNullable[str] + tools: OptionalNullable[Union[list[Tools], list[ToolsTypedDict]]] + completion_args: OptionalNullable[Union[CompletionArgs, CompletionArgsTypedDict]] + + +@dataclass +class RunContext: + """A context for running a conversation with an agent or a model. + + The context can be used to execute function calls, connect to MCP server, and keep track of information about + the run. + + Args: + conversation_id (Options[str]): The unique identifier for the conversation. This is + passed if the user wants to continue an existing conversation. + model (Options[str]): The model name to be used for the conversation. Can't be used along with 'agent_id'. + agent_id (Options[str]): The agent id to be used for the conversation. Can't be used along with 'model'. + output_format (Optional[type[BaseModel]]): The output format expected from the conversation. It represents + the `response_format` which is part of the `CompletionArgs`. + request_count (int): The number of requests made in the current `RunContext`. + continue_on_fn_error (bool): Flag to determine if the conversation should continue when function execution + resulted in an error. + """ + + _exit_stack: AsyncExitStack = field(init=False) + _callable_tools: dict[str, RunTool] = field(init=False, default_factory=dict) + _mcp_clients: list[MCPClientProtocol] = field(init=False, default_factory=list) + + conversation_id: Optional[str] = field(default=None) + model: Optional[str] = field(default=None) + agent_id: Optional[str] = field(default=None) + output_format: Optional[type[BaseModel]] = field(default=None) + request_count: int = field(default=0) + continue_on_fn_error: bool = field(default=False) + + def __post_init__(self): + if self.model and self.agent_id: + raise RunException("Only one for model or agent_id should be set") + self._exit_stack = AsyncExitStack() + + async def __aenter__(self): + return self + + async def __aexit__(self, exc_type, exc_val, exc_tb): + await self._exit_stack.aclose() + for mcp_client in self._mcp_clients: + await mcp_client.aclose() + + def register_func(self, func: Callable): + """Add a function to the context.""" + if not inspect.isfunction(func): + raise RunException( + "Only object of type function can be registered at the moment." + ) + + if inspect.iscoroutinefunction(func): + self._callable_tools[func.__name__] = RunCoroutine( + name=func.__name__, + awaitable=func, + tool=create_tool_call(func), + ) + else: + self._callable_tools[func.__name__] = RunFunction( + name=func.__name__, + callable=func, + tool=create_tool_call(func), + ) + + @wraps(func) + def wrapper(*args, **kwargs): + logger.info(f"Executing {func.__name__}") + return func(*args, **kwargs) + + return wrapper + + async def register_mcp_clients(self, mcp_clients: list[MCPClientProtocol]) -> None: + """Registering multiple MCP clients at the same time in the same asyncio.Task.""" + for mcp_client in mcp_clients: + await self.register_mcp_client(mcp_client) + + async def register_mcp_client(self, mcp_client: MCPClientProtocol) -> None: + """Add a MCP client to the context.""" + await mcp_client.initialize(exit_stack=self._exit_stack) + tools = await mcp_client.get_tools() + for tool in tools: + logger.info( + f"Adding tool {tool.function.name} from {mcp_client._name or 'mcp client'}" + ) + self._callable_tools[tool.function.name] = RunMCPTool( + name=tool.function.name, + tool=tool, + mcp_client=mcp_client, + ) + self._mcp_clients.append(mcp_client) + + async def execute_function_calls( + self, function_calls: list[FunctionCallEntry] + ) -> list[FunctionResultEntry]: + """Execute function calls and create function results from them.""" + if not all( + function_call.name in self._callable_tools + for function_call in function_calls + ): + logger.warning("Can't execute all functions, stopping run here") + return [] + function_result_tasks = [] + for function_call in function_calls: + function_result_tasks.append( + asyncio.create_task( + create_function_result( + function_call=function_call, + run_tool=self._callable_tools[function_call.name], + continue_on_fn_error=self.continue_on_fn_error, + ) + ) + ) + await asyncio.gather(*function_result_tasks) + return [task.result() for task in function_result_tasks] + + def get_tools(self) -> list[FunctionTool]: + """Get the tools that are part of the context.""" + callable_tools = [ + run_functions.tool for run_functions in self._callable_tools.values() + ] + return callable_tools + + async def prepare_agent_request(self, beta_client: "Beta") -> AgentRequestKwargs: + """Prepare an agent request with the functions added to the context. + + Update the agent definition before making the request. + """ + if self.agent_id is None: + raise RunException( + "Can't prepare an agent request, if no agent_id is provided" + ) + agent = await beta_client.agents.get_async(agent_id=self.agent_id) + agent_tools = agent.tools or [] + updated_tools = [] + for i in range(len(agent_tools)): + tool = agent_tools[i] + if tool.type != "function": + updated_tools.append(tool) + elif tool.function.name in self._callable_tools: + # function already exists in the agent, don't add it again + continue + else: + updated_tools.append(tool) + updated_tools += self.get_tools() + completion_args = ( + CompletionArgs(response_format=self.response_format) + if self.output_format + else None + ) + beta_client.agents.update( + agent_id=self.agent_id, tools=updated_tools, completion_args=completion_args + ) + return AgentRequestKwargs(agent_id=self.agent_id) + + async def prepare_model_request( + self, + tools: OptionalNullable[Union[list[Tools], list[ToolsTypedDict]]] = UNSET, + completion_args: OptionalNullable[ + Union[CompletionArgs, CompletionArgsTypedDict] + ] = UNSET, + instructions: OptionalNullable[str] = None, + ) -> ModelRequestKwargs: + if self.model is None: + raise RunException("Can't prepare a model request, if no model is provided") + if not completion_args and self.output_format: + completion_args = CompletionArgs(response_format=self.response_format) + elif isinstance(completion_args, CompletionArgs) and self.output_format: + completion_args.response_format = self.response_format + elif isinstance(completion_args, dict) and self.output_format: + completion_args["response_format"] = typing.cast( + ResponseFormatTypedDict, self.response_format.model_dump() + ) + request_tools = [] + if isinstance(tools, list): + for tool in tools: + request_tools.append(typing.cast(Tools, tool)) + for tool in self.get_tools(): + request_tools.append(tool) + return ModelRequestKwargs( + model=self.model, + tools=request_tools, + instructions=instructions, + completion_args=completion_args, + ) + + @property + def response_format(self) -> ResponseFormat: + if not self.output_format: + raise RunException("No response format exist for the current RunContext.") + return response_format_from_pydantic_model(self.output_format) + + +async def _validate_run( + *, + beta_client: "Beta", + run_ctx: RunContext, + inputs: Union[ConversationInputs, ConversationInputsTypedDict], + instructions: OptionalNullable[str] = UNSET, + tools: OptionalNullable[Union[list[Tools], list[ToolsTypedDict]]] = UNSET, + completion_args: OptionalNullable[ + Union[CompletionArgs, CompletionArgsTypedDict] + ] = UNSET, +) -> tuple[ + Union[AgentRequestKwargs, ModelRequestKwargs], RunResult, list[InputEntries] +]: + input_entries: list[InputEntries] = [] + if isinstance(inputs, str): + input_entries.append(MessageInputEntry(role="user", content=inputs)) + else: + for input in inputs: + if isinstance(input, dict): + input_entries.append( + pydantic.TypeAdapter(InputEntries).validate_python(input) + ) + run_result = RunResult( + input_entries=input_entries, + output_model=run_ctx.output_format, + conversation_id=run_ctx.conversation_id, + ) + req: Union[AgentRequestKwargs, ModelRequestKwargs] + if run_ctx.agent_id: + if tools or completion_args: + raise RunException("Can't set tools or completion_args when using an agent") + req = await run_ctx.prepare_agent_request(beta_client=beta_client) + elif run_ctx.model: + req = await run_ctx.prepare_model_request( + instructions=instructions, + tools=tools, + completion_args=completion_args, + ) + else: + raise RunException("Either agent_id or model must be set in the run context") + return req, run_result, input_entries diff --git a/src/mistralai/extra/run/result.py b/src/mistralai/extra/run/result.py new file mode 100644 index 00000000..9592dccf --- /dev/null +++ b/src/mistralai/extra/run/result.py @@ -0,0 +1,212 @@ +import datetime +import json +import typing +from typing import Union, Annotated, Optional, Literal +from dataclasses import dataclass, field +from pydantic import Discriminator, Tag, BaseModel + +from mistralai.extra.utils.response_format import pydantic_model_from_json +from mistralai.models import ( + FunctionResultEntry, + FunctionCallEntry, + MessageOutputEntry, + AgentHandoffEntry, + ToolExecutionEntry, + MessageInputEntry, + AgentHandoffDoneEvent, + AgentHandoffStartedEvent, + ResponseDoneEvent, + ResponseErrorEvent, + ResponseStartedEvent, + FunctionCallEvent, + MessageOutputEvent, + ToolExecutionDoneEvent, + ToolExecutionStartedEvent, + ConversationEventsData, + MessageOutputEventContent, + MessageOutputEntryContent, + TextChunk, + MessageOutputContentChunks, + SSETypes, + InputEntries, + ToolFileChunk, + ToolReferenceChunk, + FunctionCallEntryArguments, +) +from mistralai.utils import get_discriminator + +RunOutputEntries = typing.Union[ + MessageOutputEntry, + FunctionCallEntry, + FunctionResultEntry, + AgentHandoffEntry, + ToolExecutionEntry, +] + +RunEntries = typing.Union[RunOutputEntries, MessageInputEntry] + + +def as_text(entry: RunOutputEntries) -> str: + """Keep only the messages and turn content into textual representation.""" + text = "" + if isinstance(entry, MessageOutputEntry): + if isinstance(entry.content, str): + text += entry.content + else: + for chunk in entry.content: + if isinstance(chunk, TextChunk): + text += chunk.text + elif isinstance(chunk, ToolFileChunk): + text += f"" + elif isinstance(chunk, ToolReferenceChunk): + text += f"" + return text + + +def reconstitute_message_content( + chunks: list[MessageOutputEventContent], +) -> MessageOutputEntryContent: + """Given a list of MessageOutputEventContent, recreate a normalised MessageOutputEntryContent.""" + if all(isinstance(chunk, str) for chunk in chunks): + return "".join(typing.cast(list[str], chunks)) + content: list[MessageOutputContentChunks] = [] + for chunk in chunks: + if isinstance(chunk, str): + chunk = TextChunk(text=chunk) + if isinstance(chunk, TextChunk): + if len(content) and isinstance(content[-1], TextChunk): + content[-1].text += chunk.text + else: + content.append(chunk) + else: + content.append(chunk) + return content + + +def reconstitute_function_call_args(chunks: list[str]) -> FunctionCallEntryArguments: + """Recreates function call arguments from stream""" + return typing.cast(FunctionCallEntryArguments, "".join(chunks)) + + +def reconstitue_entries( + received_event_tracker: dict[int, list[ConversationEventsData]], +) -> list[RunOutputEntries]: + """Given a list of events, recreate the corresponding entries.""" + run_entries: list[RunOutputEntries] = [] + for idx, events in sorted(received_event_tracker.items(), key=lambda x: x[0]): + first_event = events[0] + if isinstance(first_event, MessageOutputEvent): + message_events = typing.cast(list[MessageOutputEvent], events) + run_entries.append( + MessageOutputEntry( + content=reconstitute_message_content( + chunks=[ + message_event.content for message_event in message_events + ] + ), + created_at=first_event.created_at, + id=first_event.id, + agent_id=first_event.agent_id, + model=first_event.model, + role=first_event.role, + ) + ) + elif isinstance(first_event, FunctionCallEvent): + function_call_events = typing.cast(list[FunctionCallEvent], events) + run_entries.append( + FunctionCallEntry( + name=first_event.name, + arguments=reconstitute_function_call_args( + chunks=[ + function_call_event.arguments + for function_call_event in function_call_events + ] + ), + created_at=first_event.created_at, + id=first_event.id, + tool_call_id=first_event.tool_call_id, + ) + ) + return run_entries + + +@dataclass +class RunFiles: + id: str + name: str + content: bytes + + +@dataclass +class RunResult: + input_entries: list[InputEntries] + conversation_id: Optional[str] = field(default=None) + output_entries: list[RunOutputEntries] = field(default_factory=list) + files: dict[str, RunFiles] = field(default_factory=dict) + output_model: Optional[type[BaseModel]] = field(default=None) + + def get_file(self, file_id: str) -> Optional[RunFiles]: + return self.files.get(file_id) + + @property + def entries(self) -> list[RunEntries]: + return [*self.input_entries, *self.output_entries] + + @property + def output_as_text(self) -> str: + if not self.output_entries: + raise ValueError("No output entries were started.") + return "\n".join( + as_text(entry) + for entry in self.output_entries + if entry.type == "message.output" + ) + + @property + def output_as_model(self) -> BaseModel: + if self.output_model is None: + raise ValueError("No output format was not set.") + return pydantic_model_from_json( + json.loads(self.output_as_text), self.output_model + ) + + +class FunctionResultEvent(BaseModel): + id: Optional[str] = None + + type: Optional[Literal["function.result"]] = "function.result" + + result: str + + tool_call_id: str + + created_at: Optional[datetime.datetime] = datetime.datetime.now( + tz=datetime.timezone.utc + ) + + output_index: Optional[int] = 0 + + +RunResultEventsType = typing.Union[SSETypes, Literal["function.result"]] + +RunResultEventsData = typing.Annotated[ + Union[ + Annotated[AgentHandoffDoneEvent, Tag("agent.handoff.done")], + Annotated[AgentHandoffStartedEvent, Tag("agent.handoff.started")], + Annotated[ResponseDoneEvent, Tag("conversation.response.done")], + Annotated[ResponseErrorEvent, Tag("conversation.response.error")], + Annotated[ResponseStartedEvent, Tag("conversation.response.started")], + Annotated[FunctionCallEvent, Tag("function.call.delta")], + Annotated[MessageOutputEvent, Tag("message.output.delta")], + Annotated[ToolExecutionDoneEvent, Tag("tool.execution.done")], + Annotated[ToolExecutionStartedEvent, Tag("tool.execution.started")], + Annotated[FunctionResultEvent, Tag("function.result")], + ], + Discriminator(lambda m: get_discriminator(m, "type", "type")), +] + + +class RunResultEvents(BaseModel): + event: RunResultEventsType + + data: RunResultEventsData diff --git a/src/mistralai/extra/run/tools.py b/src/mistralai/extra/run/tools.py new file mode 100644 index 00000000..81fec665 --- /dev/null +++ b/src/mistralai/extra/run/tools.py @@ -0,0 +1,225 @@ +import itertools +import logging +from dataclasses import dataclass +import inspect + +from pydantic import Field, create_model +from pydantic.fields import FieldInfo +import json +from typing import cast, Callable, Sequence, Any, ForwardRef, get_type_hints, Union + +from griffe import ( + Docstring, + DocstringSectionKind, + DocstringSectionText, + DocstringParameter, + DocstringSection, +) + +from mistralai.extra.exceptions import RunException +from mistralai.extra.mcp.base import MCPClientProtocol +from mistralai.extra.run.result import RunOutputEntries +from mistralai.models import ( + FunctionResultEntry, + FunctionTool, + Function, + FunctionCallEntry, +) + + +logger = logging.getLogger(__name__) + + +@dataclass +class RunFunction: + name: str + callable: Callable + tool: FunctionTool + + +@dataclass +class RunCoroutine: + name: str + awaitable: Callable + tool: FunctionTool + + +@dataclass +class RunMCPTool: + name: str + tool: FunctionTool + mcp_client: MCPClientProtocol + + +RunTool = Union[RunFunction, RunCoroutine, RunMCPTool] + + +def _get_function_description(docstring_sections: list[DocstringSection]) -> str: + """Given a list of docstring sections create a description for the function.""" + text_sections: list[DocstringSectionText] = [] + for section in docstring_sections: + if section.kind == DocstringSectionKind.text: + text_sections.append(cast(DocstringSectionText, section)) + return "\n".join(text.value for text in text_sections) + + +def _get_function_parameters( + docstring_sections: list[DocstringSection], + params_from_sig: list[inspect.Parameter], + type_hints: dict[str, Any], +): + """Given a list of docstring sections and type annotations create the most accurate tool parameters""" + params_from_docstrings: list[DocstringParameter] = list( + itertools.chain.from_iterable( + section.value + for section in docstring_sections + if section.kind + in (DocstringSectionKind.parameters, DocstringSectionKind.other_parameters) + ) + ) + + # Extract all description and annotation + param_descriptions = {} + param_annotations = {} + + for param_doc in params_from_docstrings: + param_descriptions[param_doc.name] = param_doc.description + + for param in params_from_sig: + if param.name not in param_descriptions: + param_descriptions[param.name] = "" + param_annotations[param.name] = type_hints.get(param.name) + + # resolve all params into Field and create the parameters schema + fields: dict[str, tuple[type, FieldInfo]] = {} + for p in params_from_sig: + default = p.default if p.default is not inspect.Parameter.empty else ... + annotation = ( + p.annotation if p.annotation is not inspect.Parameter.empty else Any + ) + # handle forward ref with the help of get_type_hints + if isinstance(annotation, str): + annotation = type_hints[p.name] + + if isinstance(default, FieldInfo): + field_info = default + else: + # If the annotation is Annotated[..., Field(...)] extract the Field and annotation + # Otherwise, just use the annotation as-is + field_info = None + # If it's Annotated[..., SomeFieldMarker(...)], find it + if hasattr(annotation, "__metadata__") and hasattr(annotation, "__args__"): + # It's Annotated + # e.g. Annotated[str, Field(...)] + # Extract the first Field(...) or None if not found + for meta in annotation.__metadata__: # type: ignore + if isinstance(meta, FieldInfo): + field_info = meta + break + # The actual annotation is the first part of Annotated + annotation = annotation.__args__[0] # type: ignore + + # handle forward ref with the help of get_type_hints + if isinstance(annotation, ForwardRef): + annotation = param_annotations[p.name] + + # no Field + if field_info is None: + if default is ...: + field_info = Field() + else: + field_info = Field(default=default) + + field_info.description = param_descriptions[p.name] + fields[p.name] = (cast(type, annotation), field_info) + + schema = create_model("_", **fields).model_json_schema() # type: ignore[call-overload] + schema.pop("title", None) + for prop in schema.get("properties", {}).values(): + prop.pop("title", None) + return schema + + +def create_tool_call(func: Callable) -> FunctionTool: + """Parse a function docstring / type annotations to create a FunctionTool.""" + name = func.__name__ + + # Inspect and parse the docstring of the function + doc = inspect.getdoc(func) + docstring_sections: list[DocstringSection] + if not doc: + logger.warning( + f"Function '{name}' without a docstring is being parsed, add docstring for more accurate result." + ) + docstring_sections = [] + else: + docstring = Docstring(doc, parser="google") + docstring_sections = docstring.parse(warnings=False) + if len(docstring_sections) == 0: + logger.warning( + f"Function '{name}' has no relevant docstring sections, add docstring for more accurate result." + ) + + # Extract the function's signature and type hints + sig = inspect.signature(func) + params_from_sig = list(sig.parameters.values()) + type_hints = get_type_hints(func, include_extras=True, localns=None, globalns=None) + + return FunctionTool( + type="function", + function=Function( + name=name, + description=_get_function_description(docstring_sections), + parameters=_get_function_parameters( + docstring_sections=docstring_sections, + params_from_sig=params_from_sig, + type_hints=type_hints, + ), + strict=True, + ), + ) + + +async def create_function_result( + function_call: FunctionCallEntry, + run_tool: RunTool, + continue_on_fn_error: bool = False, +) -> FunctionResultEntry: + """Run the function with arguments of a FunctionCallEntry.""" + arguments = ( + json.loads(function_call.arguments) + if isinstance(function_call.arguments, str) + else function_call.arguments + ) + try: + if isinstance(run_tool, RunFunction): + res = run_tool.callable(**arguments) + elif isinstance(run_tool, RunCoroutine): + res = await run_tool.awaitable(**arguments) + elif isinstance(run_tool, RunMCPTool): + res = await run_tool.mcp_client.execute_tool(function_call.name, arguments) + except Exception as e: + if continue_on_fn_error is True: + return FunctionResultEntry( + tool_call_id=function_call.tool_call_id, + result=f"Error while executing {function_call.name}: {str(e)}", + ) + raise RunException( + f"Failed to execute tool {function_call.name} with arguments '{function_call.arguments}'" + ) from e + + return FunctionResultEntry( + tool_call_id=function_call.tool_call_id, + result=res if isinstance(res, str) else json.dumps(res), + ) + + +def get_function_calls( + output_entries: Sequence[RunOutputEntries], +) -> list[FunctionCallEntry]: + """Extract all FunctionCallEntry from a conversation response""" + function_calls = [] + for entry in output_entries: + if isinstance(entry, FunctionCallEntry): + function_calls.append(entry) + return function_calls diff --git a/src/mistralai/extra/run/utils.py b/src/mistralai/extra/run/utils.py new file mode 100644 index 00000000..231c7131 --- /dev/null +++ b/src/mistralai/extra/run/utils.py @@ -0,0 +1,36 @@ +import importlib.util +import sys +from typing import Callable, TypeVar, Any, cast +from functools import wraps + +from mistralai.extra.exceptions import MistralClientException + +F = TypeVar("F", bound=Callable[..., Any]) + + +REQUIRED_PYTHON_VERSION = (3, 10) +REQUIRED_PYTHON_VERSION_STR = "3.10" +REQUIRED_PACKAGES = ["mcp"] + + +def is_module_installed(module_name: str) -> bool: + spec = importlib.util.find_spec(module_name) + return spec is not None + + +def run_requirements(func: F) -> F: + @wraps(func) + def wrapper(*args, **kwargs): + if sys.version_info < REQUIRED_PYTHON_VERSION: + raise MistralClientException( + f"{func.__name__} requires a Python version higher than {REQUIRED_PYTHON_VERSION_STR}." + f"You are using Python {sys.version_info.major}.{sys.version_info.minor}." + ) + for package in REQUIRED_PACKAGES: + if not is_module_installed(package): + raise MistralClientException( + f"{func.__name__} requires the sdk to be installed with 'agents' extra dependencies." + ) + return func(*args, **kwargs) + + return cast(F, wrapper) From 6efb794d3db3d52ec4e02eda9431093fbc813a10 Mon Sep 17 00:00:00 2001 From: alex-ac Date: Mon, 26 May 2025 13:28:31 +0200 Subject: [PATCH 128/223] docs: add README info --- README.md | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/README.md b/README.md index 9b8e3c4f..1c2c1d1c 100644 --- a/README.md +++ b/README.md @@ -108,6 +108,15 @@ Once that is saved to a file, you can run it with `uv run script.py` where `script.py` can be replaced with the actual file name. +### Agents extra dependencies + +When using the agents related feature it is required to add the `agents` extra dependencies. This can be added when +installing the package: + +```bash +pip install "mistralai-private[agents]" +``` + ## SDK Example Usage From 1cfd45eb490669d1ea061f182afcb03bb28064d4 Mon Sep 17 00:00:00 2001 From: alex-ac Date: Mon, 26 May 2025 13:51:55 +0200 Subject: [PATCH 129/223] feat: add new example --- .github/workflows/run_example_scripts.yaml | 38 +++++++++---------- README.md | 3 ++ examples/async_conversation_agent.py | 43 ++++++++++++++++++++++ 3 files changed, 64 insertions(+), 20 deletions(-) create mode 100644 examples/async_conversation_agent.py diff --git a/.github/workflows/run_example_scripts.yaml b/.github/workflows/run_example_scripts.yaml index e55ca08d..e5205cb0 100644 --- a/.github/workflows/run_example_scripts.yaml +++ b/.github/workflows/run_example_scripts.yaml @@ -28,12 +28,6 @@ jobs: - name: Install Poetry uses: snok/install-poetry@76e04a911780d5b312d89783f7b1cd627778900a # v1.4.1 - - name: Build and install client - run: | - touch README-PYPI.md # Create this file since the client is not built from Speakeasy - poetry build - python3 -m pip install dist/mistralai-*.whl - - name: Set VERSION run: | VERSION=$(echo ${{ matrix.python-version }} | tr -d .) @@ -43,20 +37,24 @@ jobs: run: | echo "MISTRAL_API_KEY=${{ secrets[format('CI_MISTRAL_API_KEY_PYTHON_{0}', env.VERSION)] }}" >> $GITHUB_ENV - - name: Run the example scripts + - name: For python 3.9, build and install client. Run examples without extra dependencies. + if: matrix.python-version == '3.9' run: | - failed=0 - for file in examples/*.py; do - if [ -f "$file" ] && [ "$file" != "examples/chatbot_with_streaming.py" ]; then - echo "Running $file" - # Do not fail if the script fails, but save it in the failed variable - python3 "$file" > /dev/null || failed=1 - fi - done - # If one of the example script failed then exit - if [ $failed -ne 0 ]; then - exit 1 - fi + poetry build + PACKAGE="dist/$(ls dist | grep whl | head -n 1)" + python3 -m pip install "$PACKAGE" + ./examples/run_all.sh --no-extra-dep + env: + MISTRAL_AGENT_ID: ${{ vars.CI_AGENT_ID }} + MISTRAL_API_KEY: ${{ env.MISTRAL_API_KEY }} + + - name: For python 3.10+, build and install client with extras. Run all examples. + if: matrix.python-version != '3.9' + run: | + poetry build + PACKAGE="dist/$(ls dist | grep whl | head -n 1)[agents]" + python3 -m pip install "$PACKAGE" + ./examples/run_all.sh env: - MISTRAL_AGENT_ID: ${{ secrets.CI_AGENT_ID }} + MISTRAL_AGENT_ID: ${{ vars.CI_AGENT_ID }} MISTRAL_API_KEY: ${{ env.MISTRAL_API_KEY }} diff --git a/README.md b/README.md index 1c2c1d1c..5d010776 100644 --- a/README.md +++ b/README.md @@ -117,6 +117,9 @@ installing the package: pip install "mistralai-private[agents]" ``` +> Note: Because of some of our dependencies, these features are only available for python version higher or equal to +> 3.10. + ## SDK Example Usage diff --git a/examples/async_conversation_agent.py b/examples/async_conversation_agent.py new file mode 100644 index 00000000..0a8968e9 --- /dev/null +++ b/examples/async_conversation_agent.py @@ -0,0 +1,43 @@ +#!/usr/bin/env python +import asyncio +import os + +from mistralai_private import MistralPrivate + +MODEL = "mistral-medium-latest" + + +async def main(): + api_key = os.environ["MISTRAL_API_KEY"] + client = MistralPrivate(api_key=api_key) + + agent = client.beta.agents.create( + model=MODEL, + name="WebSearch Agent", + instructions="Use your websearch abilities when answering requests you don't know.", + description="Agent able to fetch new information on the web.", + tools = [{"type": "web_search"}], + ) + + result = await client.beta.conversations.start_async( + agent_id=agent.id, + inputs="Who won the last Champions League?" + ) + + print("All result entries:") + for entry in result.outputs: + print(f"{entry}") + + result = await client.beta.conversations.append_async( + conversation_id=result.conversation_id, + inputs="And what about the previous year?" + ) + + print("All result entries:") + for entry in result.outputs: + print(f"{entry}") + + + +if __name__ == "__main__": + asyncio.run(main()) From ce990e78f0dbe2cb0a4c8f7cd5fdc47aed700419 Mon Sep 17 00:00:00 2001 From: alex-ac Date: Mon, 26 May 2025 14:03:00 +0200 Subject: [PATCH 130/223] ci: update the run example workflow --- .github/workflows/run_example_scripts.yaml | 15 +++++++++------ examples/async_conversation_agent.py | 4 ++-- 2 files changed, 11 insertions(+), 8 deletions(-) diff --git a/.github/workflows/run_example_scripts.yaml b/.github/workflows/run_example_scripts.yaml index e5205cb0..329acdec 100644 --- a/.github/workflows/run_example_scripts.yaml +++ b/.github/workflows/run_example_scripts.yaml @@ -37,24 +37,27 @@ jobs: run: | echo "MISTRAL_API_KEY=${{ secrets[format('CI_MISTRAL_API_KEY_PYTHON_{0}', env.VERSION)] }}" >> $GITHUB_ENV - - name: For python 3.9, build and install client. Run examples without extra dependencies. - if: matrix.python-version == '3.9' + - name: Build the package run: | + touch README-PYPI.md # Create this file since the client is not built by Speakeasy poetry build + + - name: For python 3.9, install the client and run examples without extra dependencies. + if: matrix.python-version == '3.9' + run: | PACKAGE="dist/$(ls dist | grep whl | head -n 1)" python3 -m pip install "$PACKAGE" ./examples/run_all.sh --no-extra-dep env: - MISTRAL_AGENT_ID: ${{ vars.CI_AGENT_ID }} + MISTRAL_AGENT_ID: ${{ secrets.CI_AGENT_ID }} MISTRAL_API_KEY: ${{ env.MISTRAL_API_KEY }} - - name: For python 3.10+, build and install client with extras. Run all examples. + - name: For python 3.10+, install client with extras and run all examples. if: matrix.python-version != '3.9' run: | - poetry build PACKAGE="dist/$(ls dist | grep whl | head -n 1)[agents]" python3 -m pip install "$PACKAGE" ./examples/run_all.sh env: - MISTRAL_AGENT_ID: ${{ vars.CI_AGENT_ID }} + MISTRAL_AGENT_ID: ${{ secrets.CI_AGENT_ID }} MISTRAL_API_KEY: ${{ env.MISTRAL_API_KEY }} diff --git a/examples/async_conversation_agent.py b/examples/async_conversation_agent.py index 0a8968e9..54f002ac 100644 --- a/examples/async_conversation_agent.py +++ b/examples/async_conversation_agent.py @@ -2,14 +2,14 @@ import asyncio import os -from mistralai_private import MistralPrivate +from mistralai import Mistral MODEL = "mistral-medium-latest" async def main(): api_key = os.environ["MISTRAL_API_KEY"] - client = MistralPrivate(api_key=api_key) + client = Mistral(api_key=api_key) agent = client.beta.agents.create( model=MODEL, From d8315acdc2c4f03c0d0818b27a8d721c6e8c921e Mon Sep 17 00:00:00 2001 From: alex-ac Date: Mon, 26 May 2025 14:56:29 +0200 Subject: [PATCH 131/223] fix README --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 5d010776..b8926d7b 100644 --- a/README.md +++ b/README.md @@ -114,7 +114,7 @@ When using the agents related feature it is required to add the `agents` extra d installing the package: ```bash -pip install "mistralai-private[agents]" +pip install "mistralai[agents]" ``` > Note: Because of some of our dependencies, these features are only available for python version higher or equal to From 607b835be40c52121a04492f7d6181fc34aab0f7 Mon Sep 17 00:00:00 2001 From: alex-ac Date: Mon, 26 May 2025 15:14:30 +0200 Subject: [PATCH 132/223] fix: example async_jobs_chat not deterministic file --- examples/async_jobs_chat.py | 87 +++++++++++++++++++++++++++++-------- 1 file changed, 68 insertions(+), 19 deletions(-) diff --git a/examples/async_jobs_chat.py b/examples/async_jobs_chat.py index 84327b32..80e598c7 100644 --- a/examples/async_jobs_chat.py +++ b/examples/async_jobs_chat.py @@ -1,7 +1,9 @@ #!/usr/bin/env python - import asyncio +import json import os +import random +from pathlib import Path from mistralai import Mistral from mistralai.models import ( @@ -11,46 +13,93 @@ POLLING_INTERVAL = 10 +cwd = Path(__file__).parent + +user_contents = [ + "How far is the Moon from Earth?", + "What's the largest ocean on Earth?", + "How many continents are there?", + "What's the powerhouse of the cell?", + "What's the speed of light?", + "Can you solve a Rubik's Cube?", + "What is the tallest mountain in the world?", + "Who painted the Mona Lisa?", +] + +# List of assistant contents +assistant_contents = [ + "Around 384,400 kilometers. Give or take a few, like that really matters.", + "The Pacific Ocean. You know, the one that covers more than 60 million square miles. No big deal.", + "There are seven continents. I hope that wasn't too hard to count.", + "The mitochondria. Remember that from high school biology?", + "Approximately 299,792 kilometers per second. You know, faster than your internet speed.", + "I could if I had hands. What's your excuse?", + "Mount Everest, standing at 29,029 feet. You know, just a little hill.", + "Leonardo da Vinci. Just another guy who liked to doodle.", +] + +system_message = "Marv is a factual chatbot that is also sarcastic" + +def create_validation_file() -> bytes: + return json.dumps({ + "messages": [ + {"role": "user", "content": "How long does it take to travel around the Earth?"}, + {"role": "assistant", "content": "Around 24 hours if you're the Earth itself. For you, depends on your mode of transportation."} + ], + "temperature": random.random() + }).encode() async def main(): api_key = os.environ["MISTRAL_API_KEY"] client = Mistral(api_key=api_key) + requests = [] + for um, am in zip( + random.sample(user_contents, len(user_contents)), + random.sample(assistant_contents, len(assistant_contents)), + ): + requests.append(json.dumps({ + "messages": [ + {"role": "system", "content": system_message}, + {"role": "user", "content": um}, + {"role": "assistant", "content": am}, + ] + })) + # Create new files - with open("examples/fixtures/ft_training_file.jsonl", "rb") as f: - training_file = await client.files.upload_async( - file=File(file_name="file.jsonl", content=f) - ) - with open("examples/fixtures/ft_validation_file.jsonl", "rb") as f: - validation_file = await client.files.upload_async( - file=File(file_name="validation_file.jsonl", content=f) - ) + training_file = await client.files.upload_async( + file=File( + file_name="file.jsonl", content=("\n".join(requests)).encode() + ), + purpose="fine-tune", + ) + + validation_file = await client.files.upload_async( + file=File( + file_name="validation_file.jsonl", content=create_validation_file() + ), + purpose="fine-tune", + ) # Create a new job created_job = await client.fine_tuning.jobs.create_async( model="open-mistral-7b", training_files=[{"file_id": training_file.id, "weight": 1}], validation_files=[validation_file.id], hyperparameters=CompletionTrainingParametersIn( - training_steps=2, + training_steps=1, learning_rate=0.0001, ), ) - print(created_job) - while created_job.status in [ - "QUEUED", - "STARTED", - "VALIDATING", - "VALIDATED", - "RUNNING", - ]: + while created_job.status in ["RUNNING", "STARTED", "QUEUED", "VALIDATING", "VALIDATED"]: created_job = await client.fine_tuning.jobs.get_async(job_id=created_job.id) print(f"Job is {created_job.status}, waiting {POLLING_INTERVAL} seconds") await asyncio.sleep(POLLING_INTERVAL) - if created_job.status != "SUCCESS": + if created_job.status == "FAILED": print("Job failed") raise Exception(f"Job failed with {created_job.status}") + print(created_job) # Chat with model response = await client.chat.complete_async( From 69374e89bdc7696968e9bccf7209486a1c4945d1 Mon Sep 17 00:00:00 2001 From: Alexandre Menasria <47357713+amenasria@users.noreply.github.com> Date: Mon, 26 May 2025 16:47:13 +0200 Subject: [PATCH 133/223] Lint with `ruff`, `pyright` and `mypy` the `examples` and `extra` folders (#226) --- .github/workflows/lint_custom_code.yaml | 16 +- .pre-commit-config.yaml | 20 +++ poetry.lock | 147 ++++++++++++------ pyproject.toml | 8 +- scripts/lint_custom_code.sh | 31 ++++ src/mistralai/extra/tests/test_struct_chat.py | 2 +- 6 files changed, 169 insertions(+), 55 deletions(-) create mode 100644 .pre-commit-config.yaml create mode 100755 scripts/lint_custom_code.sh diff --git a/.github/workflows/lint_custom_code.yaml b/.github/workflows/lint_custom_code.yaml index f9289188..c95f2c1b 100644 --- a/.github/workflows/lint_custom_code.yaml +++ b/.github/workflows/lint_custom_code.yaml @@ -21,12 +21,14 @@ jobs: with: python-version: '3.12' - - name: Install ruff - run: pip install ruff + - name: Install Poetry + uses: snok/install-poetry@76e04a911780d5b312d89783f7b1cd627778900a # v1.4.1 - - name: Lint with ruff - # No need to lint the automatically generated Speakeasy code + - name: Install dependencies run: | - ruff check examples/ - ruff check src/mistralai/_hooks/ --exclude __init__.py --exclude sdkhooks.py --exclude types.py - ruff check src/mistralai/extra/ + touch README-PYPI.md + poetry install + + # The init, sdkhooks.py and types.py files in the _hooks folders are generated by Speakeasy hence the exclusion + - name: Run all linters + run: scripts/lint_custom_code.sh diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml new file mode 100644 index 00000000..39e850eb --- /dev/null +++ b/.pre-commit-config.yaml @@ -0,0 +1,20 @@ +repos: +- repo: https://github.com/astral-sh/ruff-pre-commit + rev: v0.11.10 + hooks: + - id: ruff + args: [--fix] + files: ^(example/|src/mistralai/).*\.py$ + exclude: ^src/mistralai/(__init__|sdkhooks|types)\.py$ +- repo: https://github.com/RobertCraigie/pyright-python + rev: v1.1.401 + hooks: + - id: pyright + files: ^(example/|src/mistralai/).*\.py$ + exclude: ^src/mistralai/(__init__|sdkhooks|types)\.py$ +- repo: https://github.com/pre-commit/mirrors-mypy + rev: v1.15.0 + hooks: + - id: mypy + files: ^(example/|src/mistralai/).*\.py$ + exclude: ^src/mistralai/(__init__|sdkhooks|types)\.py$ diff --git a/poetry.lock b/poetry.lock index 3d36b94f..3a645e5e 100644 --- a/poetry.lock +++ b/poetry.lock @@ -391,50 +391,44 @@ files = [ [[package]] name = "mypy" -version = "1.14.1" +version = "1.15.0" description = "Optional static typing for Python" optional = false -python-versions = ">=3.8" -groups = ["dev"] -files = [ - {file = "mypy-1.14.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:52686e37cf13d559f668aa398dd7ddf1f92c5d613e4f8cb262be2fb4fedb0fcb"}, - {file = "mypy-1.14.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:1fb545ca340537d4b45d3eecdb3def05e913299ca72c290326be19b3804b39c0"}, - {file = "mypy-1.14.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:90716d8b2d1f4cd503309788e51366f07c56635a3309b0f6a32547eaaa36a64d"}, - {file = "mypy-1.14.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:2ae753f5c9fef278bcf12e1a564351764f2a6da579d4a81347e1d5a15819997b"}, - {file = "mypy-1.14.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:e0fe0f5feaafcb04505bcf439e991c6d8f1bf8b15f12b05feeed96e9e7bf1427"}, - {file = "mypy-1.14.1-cp310-cp310-win_amd64.whl", hash = "sha256:7d54bd85b925e501c555a3227f3ec0cfc54ee8b6930bd6141ec872d1c572f81f"}, - {file = "mypy-1.14.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:f995e511de847791c3b11ed90084a7a0aafdc074ab88c5a9711622fe4751138c"}, - {file = "mypy-1.14.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:d64169ec3b8461311f8ce2fd2eb5d33e2d0f2c7b49116259c51d0d96edee48d1"}, - {file = "mypy-1.14.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ba24549de7b89b6381b91fbc068d798192b1b5201987070319889e93038967a8"}, - {file = "mypy-1.14.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:183cf0a45457d28ff9d758730cd0210419ac27d4d3f285beda038c9083363b1f"}, - {file = "mypy-1.14.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:f2a0ecc86378f45347f586e4163d1769dd81c5a223d577fe351f26b179e148b1"}, - {file = "mypy-1.14.1-cp311-cp311-win_amd64.whl", hash = "sha256:ad3301ebebec9e8ee7135d8e3109ca76c23752bac1e717bc84cd3836b4bf3eae"}, - {file = "mypy-1.14.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:30ff5ef8519bbc2e18b3b54521ec319513a26f1bba19a7582e7b1f58a6e69f14"}, - {file = "mypy-1.14.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:cb9f255c18052343c70234907e2e532bc7e55a62565d64536dbc7706a20b78b9"}, - {file = "mypy-1.14.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:8b4e3413e0bddea671012b063e27591b953d653209e7a4fa5e48759cda77ca11"}, - {file = "mypy-1.14.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:553c293b1fbdebb6c3c4030589dab9fafb6dfa768995a453d8a5d3b23784af2e"}, - {file = "mypy-1.14.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:fad79bfe3b65fe6a1efaed97b445c3d37f7be9fdc348bdb2d7cac75579607c89"}, - {file = "mypy-1.14.1-cp312-cp312-win_amd64.whl", hash = "sha256:8fa2220e54d2946e94ab6dbb3ba0a992795bd68b16dc852db33028df2b00191b"}, - {file = "mypy-1.14.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:92c3ed5afb06c3a8e188cb5da4984cab9ec9a77ba956ee419c68a388b4595255"}, - {file = "mypy-1.14.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:dbec574648b3e25f43d23577309b16534431db4ddc09fda50841f1e34e64ed34"}, - {file = "mypy-1.14.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:8c6d94b16d62eb3e947281aa7347d78236688e21081f11de976376cf010eb31a"}, - {file = "mypy-1.14.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:d4b19b03fdf54f3c5b2fa474c56b4c13c9dbfb9a2db4370ede7ec11a2c5927d9"}, - {file = "mypy-1.14.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:0c911fde686394753fff899c409fd4e16e9b294c24bfd5e1ea4675deae1ac6fd"}, - {file = "mypy-1.14.1-cp313-cp313-win_amd64.whl", hash = "sha256:8b21525cb51671219f5307be85f7e646a153e5acc656e5cebf64bfa076c50107"}, - {file = "mypy-1.14.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:7084fb8f1128c76cd9cf68fe5971b37072598e7c31b2f9f95586b65c741a9d31"}, - {file = "mypy-1.14.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:8f845a00b4f420f693f870eaee5f3e2692fa84cc8514496114649cfa8fd5e2c6"}, - {file = "mypy-1.14.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:44bf464499f0e3a2d14d58b54674dee25c031703b2ffc35064bd0df2e0fac319"}, - {file = "mypy-1.14.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c99f27732c0b7dc847adb21c9d47ce57eb48fa33a17bc6d7d5c5e9f9e7ae5bac"}, - {file = "mypy-1.14.1-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:bce23c7377b43602baa0bd22ea3265c49b9ff0b76eb315d6c34721af4cdf1d9b"}, - {file = "mypy-1.14.1-cp38-cp38-win_amd64.whl", hash = "sha256:8edc07eeade7ebc771ff9cf6b211b9a7d93687ff892150cb5692e4f4272b0837"}, - {file = "mypy-1.14.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:3888a1816d69f7ab92092f785a462944b3ca16d7c470d564165fe703b0970c35"}, - {file = "mypy-1.14.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:46c756a444117c43ee984bd055db99e498bc613a70bbbc120272bd13ca579fbc"}, - {file = "mypy-1.14.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:27fc248022907e72abfd8e22ab1f10e903915ff69961174784a3900a8cba9ad9"}, - {file = "mypy-1.14.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:499d6a72fb7e5de92218db961f1a66d5f11783f9ae549d214617edab5d4dbdbb"}, - {file = "mypy-1.14.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:57961db9795eb566dc1d1b4e9139ebc4c6b0cb6e7254ecde69d1552bf7613f60"}, - {file = "mypy-1.14.1-cp39-cp39-win_amd64.whl", hash = "sha256:07ba89fdcc9451f2ebb02853deb6aaaa3d2239a236669a63ab3801bbf923ef5c"}, - {file = "mypy-1.14.1-py3-none-any.whl", hash = "sha256:b66a60cc4073aeb8ae00057f9c1f64d49e90f918fbcef9a977eb121da8b8f1d1"}, - {file = "mypy-1.14.1.tar.gz", hash = "sha256:7ec88144fe9b510e8475ec2f5f251992690fcf89ccb4500b214b4226abcd32d6"}, +python-versions = ">=3.9" +groups = ["dev", "lint"] +files = [ + {file = "mypy-1.15.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:979e4e1a006511dacf628e36fadfecbcc0160a8af6ca7dad2f5025529e082c13"}, + {file = "mypy-1.15.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c4bb0e1bd29f7d34efcccd71cf733580191e9a264a2202b0239da95984c5b559"}, + {file = "mypy-1.15.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:be68172e9fd9ad8fb876c6389f16d1c1b5f100ffa779f77b1fb2176fcc9ab95b"}, + {file = "mypy-1.15.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c7be1e46525adfa0d97681432ee9fcd61a3964c2446795714699a998d193f1a3"}, + {file = "mypy-1.15.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:2e2c2e6d3593f6451b18588848e66260ff62ccca522dd231cd4dd59b0160668b"}, + {file = "mypy-1.15.0-cp310-cp310-win_amd64.whl", hash = "sha256:6983aae8b2f653e098edb77f893f7b6aca69f6cffb19b2cc7443f23cce5f4828"}, + {file = "mypy-1.15.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:2922d42e16d6de288022e5ca321cd0618b238cfc5570e0263e5ba0a77dbef56f"}, + {file = "mypy-1.15.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:2ee2d57e01a7c35de00f4634ba1bbf015185b219e4dc5909e281016df43f5ee5"}, + {file = "mypy-1.15.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:973500e0774b85d9689715feeffcc980193086551110fd678ebe1f4342fb7c5e"}, + {file = "mypy-1.15.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:5a95fb17c13e29d2d5195869262f8125dfdb5c134dc8d9a9d0aecf7525b10c2c"}, + {file = "mypy-1.15.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:1905f494bfd7d85a23a88c5d97840888a7bd516545fc5aaedff0267e0bb54e2f"}, + {file = "mypy-1.15.0-cp311-cp311-win_amd64.whl", hash = "sha256:c9817fa23833ff189db061e6d2eff49b2f3b6ed9856b4a0a73046e41932d744f"}, + {file = "mypy-1.15.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:aea39e0583d05124836ea645f412e88a5c7d0fd77a6d694b60d9b6b2d9f184fd"}, + {file = "mypy-1.15.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:2f2147ab812b75e5b5499b01ade1f4a81489a147c01585cda36019102538615f"}, + {file = "mypy-1.15.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ce436f4c6d218a070048ed6a44c0bbb10cd2cc5e272b29e7845f6a2f57ee4464"}, + {file = "mypy-1.15.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:8023ff13985661b50a5928fc7a5ca15f3d1affb41e5f0a9952cb68ef090b31ee"}, + {file = "mypy-1.15.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:1124a18bc11a6a62887e3e137f37f53fbae476dc36c185d549d4f837a2a6a14e"}, + {file = "mypy-1.15.0-cp312-cp312-win_amd64.whl", hash = "sha256:171a9ca9a40cd1843abeca0e405bc1940cd9b305eaeea2dda769ba096932bb22"}, + {file = "mypy-1.15.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:93faf3fdb04768d44bf28693293f3904bbb555d076b781ad2530214ee53e3445"}, + {file = "mypy-1.15.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:811aeccadfb730024c5d3e326b2fbe9249bb7413553f15499a4050f7c30e801d"}, + {file = "mypy-1.15.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:98b7b9b9aedb65fe628c62a6dc57f6d5088ef2dfca37903a7d9ee374d03acca5"}, + {file = "mypy-1.15.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c43a7682e24b4f576d93072216bf56eeff70d9140241f9edec0c104d0c515036"}, + {file = "mypy-1.15.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:baefc32840a9f00babd83251560e0ae1573e2f9d1b067719479bfb0e987c6357"}, + {file = "mypy-1.15.0-cp313-cp313-win_amd64.whl", hash = "sha256:b9378e2c00146c44793c98b8d5a61039a048e31f429fb0eb546d93f4b000bedf"}, + {file = "mypy-1.15.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:e601a7fa172c2131bff456bb3ee08a88360760d0d2f8cbd7a75a65497e2df078"}, + {file = "mypy-1.15.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:712e962a6357634fef20412699a3655c610110e01cdaa6180acec7fc9f8513ba"}, + {file = "mypy-1.15.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:f95579473af29ab73a10bada2f9722856792a36ec5af5399b653aa28360290a5"}, + {file = "mypy-1.15.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:8f8722560a14cde92fdb1e31597760dc35f9f5524cce17836c0d22841830fd5b"}, + {file = "mypy-1.15.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:1fbb8da62dc352133d7d7ca90ed2fb0e9d42bb1a32724c287d3c76c58cbaa9c2"}, + {file = "mypy-1.15.0-cp39-cp39-win_amd64.whl", hash = "sha256:d10d994b41fb3497719bbf866f227b3489048ea4bbbb5015357db306249f7980"}, + {file = "mypy-1.15.0-py3-none-any.whl", hash = "sha256:5469affef548bd1895d86d3bf10ce2b44e33d86923c29e4d675b3e323437ea3e"}, + {file = "mypy-1.15.0.tar.gz", hash = "sha256:404534629d51d3efea5c800ee7c42b72a6554d6c400e6a79eafe15d11341fd43"}, ] [package.dependencies] @@ -455,12 +449,24 @@ version = "1.0.0" description = "Type system extensions for programs checked with the mypy type checker." optional = false python-versions = ">=3.5" -groups = ["dev"] +groups = ["dev", "lint"] files = [ {file = "mypy_extensions-1.0.0-py3-none-any.whl", hash = "sha256:4392f6c0eb8a5668a69e23d168ffa70f0be9ccfd32b5cc2d26a34ae5b844552d"}, {file = "mypy_extensions-1.0.0.tar.gz", hash = "sha256:75dbf8955dc00442a438fc4d0666508a9a97b6bd41aa2f0ffe9d2f2725af0782"}, ] +[[package]] +name = "nodeenv" +version = "1.9.1" +description = "Node.js virtual environment builder" +optional = false +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7" +groups = ["lint"] +files = [ + {file = "nodeenv-1.9.1-py2.py3-none-any.whl", hash = "sha256:ba11c9782d29c27c70ffbdda2d7415098754709be8a7056d79a737cd901155c9"}, + {file = "nodeenv-1.9.1.tar.gz", hash = "sha256:6ec12890a2dab7946721edbfbcd91f3319c6ccc9aec47be7c7e6b7011ee6645f"}, +] + [[package]] name = "packaging" version = "24.2" @@ -700,6 +706,27 @@ typing-extensions = {version = ">=3.10.0", markers = "python_version < \"3.10\"" spelling = ["pyenchant (>=3.2,<4.0)"] testutils = ["gitpython (>3)"] +[[package]] +name = "pyright" +version = "1.1.401" +description = "Command line wrapper for pyright" +optional = false +python-versions = ">=3.7" +groups = ["lint"] +files = [ + {file = "pyright-1.1.401-py3-none-any.whl", hash = "sha256:6fde30492ba5b0d7667c16ecaf6c699fab8d7a1263f6a18549e0b00bf7724c06"}, + {file = "pyright-1.1.401.tar.gz", hash = "sha256:788a82b6611fa5e34a326a921d86d898768cddf59edde8e93e56087d277cc6f1"}, +] + +[package.dependencies] +nodeenv = ">=1.6.0" +typing-extensions = ">=4.1" + +[package.extras] +all = ["nodejs-wheel-binaries", "twine (>=3.4.1)"] +dev = ["twine (>=3.4.1)"] +nodejs = ["nodejs-wheel-binaries"] + [[package]] name = "pytest" version = "8.3.4" @@ -795,6 +822,34 @@ files = [ [package.dependencies] pyasn1 = ">=0.1.3" +[[package]] +name = "ruff" +version = "0.11.11" +description = "An extremely fast Python linter and code formatter, written in Rust." +optional = false +python-versions = ">=3.7" +groups = ["lint"] +files = [ + {file = "ruff-0.11.11-py3-none-linux_armv6l.whl", hash = "sha256:9924e5ae54125ed8958a4f7de320dab7380f6e9fa3195e3dc3b137c6842a0092"}, + {file = "ruff-0.11.11-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:c8a93276393d91e952f790148eb226658dd275cddfde96c6ca304873f11d2ae4"}, + {file = "ruff-0.11.11-py3-none-macosx_11_0_arm64.whl", hash = "sha256:d6e333dbe2e6ae84cdedefa943dfd6434753ad321764fd937eef9d6b62022bcd"}, + {file = "ruff-0.11.11-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7885d9a5e4c77b24e8c88aba8c80be9255fa22ab326019dac2356cff42089fc6"}, + {file = "ruff-0.11.11-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:1b5ab797fcc09121ed82e9b12b6f27e34859e4227080a42d090881be888755d4"}, + {file = "ruff-0.11.11-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e231ff3132c1119ece836487a02785f099a43992b95c2f62847d29bace3c75ac"}, + {file = "ruff-0.11.11-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:a97c9babe1d4081037a90289986925726b802d180cca784ac8da2bbbc335f709"}, + {file = "ruff-0.11.11-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d8c4ddcbe8a19f59f57fd814b8b117d4fcea9bee7c0492e6cf5fdc22cfa563c8"}, + {file = "ruff-0.11.11-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:6224076c344a7694c6fbbb70d4f2a7b730f6d47d2a9dc1e7f9d9bb583faf390b"}, + {file = "ruff-0.11.11-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:882821fcdf7ae8db7a951df1903d9cb032bbe838852e5fc3c2b6c3ab54e39875"}, + {file = "ruff-0.11.11-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:dcec2d50756463d9df075a26a85a6affbc1b0148873da3997286caf1ce03cae1"}, + {file = "ruff-0.11.11-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:99c28505ecbaeb6594701a74e395b187ee083ee26478c1a795d35084d53ebd81"}, + {file = "ruff-0.11.11-py3-none-musllinux_1_2_i686.whl", hash = "sha256:9263f9e5aa4ff1dec765e99810f1cc53f0c868c5329b69f13845f699fe74f639"}, + {file = "ruff-0.11.11-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:64ac6f885e3ecb2fdbb71de2701d4e34526651f1e8503af8fb30d4915a3fe345"}, + {file = "ruff-0.11.11-py3-none-win32.whl", hash = "sha256:1adcb9a18802268aaa891ffb67b1c94cd70578f126637118e8099b8e4adcf112"}, + {file = "ruff-0.11.11-py3-none-win_amd64.whl", hash = "sha256:748b4bb245f11e91a04a4ff0f96e386711df0a30412b9fe0c74d5bdc0e4a531f"}, + {file = "ruff-0.11.11-py3-none-win_arm64.whl", hash = "sha256:6c51f136c0364ab1b774767aa8b86331bd8e9d414e2d107db7a2189f35ea1f7b"}, + {file = "ruff-0.11.11.tar.gz", hash = "sha256:7774173cc7c1980e6bf67569ebb7085989a78a103922fb83ef3dfe230cd0687d"}, +] + [[package]] name = "six" version = "1.16.0" @@ -825,7 +880,7 @@ version = "2.2.1" description = "A lil' TOML parser" optional = false python-versions = ">=3.8" -groups = ["dev"] +groups = ["dev", "lint"] markers = "python_version < \"3.11\"" files = [ {file = "tomli-2.2.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:678e4fa69e4575eb77d103de3df8a895e1591b48e740211bd1067378c69e8249"}, @@ -892,7 +947,7 @@ version = "4.12.2" description = "Backported and Experimental Type Hints for Python 3.8+" optional = false python-versions = ">=3.8" -groups = ["main", "dev"] +groups = ["main", "dev", "lint"] files = [ {file = "typing_extensions-4.12.2-py3-none-any.whl", hash = "sha256:04e5ca0351e0f3f85c6853954072df659d0d13fac324d0072316b67d7794700d"}, {file = "typing_extensions-4.12.2.tar.gz", hash = "sha256:1a7ead55c7e559dd4dee8856e3a88b41225abfe1ce8df57b7c13915fe121ffb8"}, @@ -938,4 +993,4 @@ gcp = ["google-auth", "requests"] [metadata] lock-version = "2.1" python-versions = ">=3.9" -content-hash = "c3917a9114ca2a0c01aedf207fa1b59cc259bb07c4d2914fe2ed9a4cb3e1785e" +content-hash = "54b1df325389d0398ad0ecb64575548aff40b97d220b0002780d48bee3c90846" diff --git a/pyproject.toml b/pyproject.toml index 1d7bd53d..f429e1e4 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -29,12 +29,18 @@ include = ["py.typed", "src/mistralai/py.typed"] in-project = true [tool.poetry.group.dev.dependencies] -mypy = "==1.14.1" +mypy = "==1.15.0" pylint = "==3.2.3" pytest = "^8.2.2" pytest-asyncio = "^0.23.7" types-python-dateutil = "^2.9.0.20240316" +[tool.poetry.group.lint.dependencies] +ruff = "^0.11.10" +pyright = "^1.1.401" +mypy = "==1.15.0" + + [project.optional-dependencies] gcp = [ "google-auth >=2.27.0", diff --git a/scripts/lint_custom_code.sh b/scripts/lint_custom_code.sh new file mode 100755 index 00000000..dca05562 --- /dev/null +++ b/scripts/lint_custom_code.sh @@ -0,0 +1,31 @@ +#!/usr/bin/env bash + +set -e + +ERRORS=0 + +echo "Running mypy..." +# TODO: Uncomment once the examples are fixed +# poetry run mypy examples/ || ERRORS=1 +poetry run mypy src/mistralai/extra/ || ERRORS=1 +poetry run mypy src/mistralai/_hooks/ \ +--exclude __init__.py --exclude sdkhooks.py --exclude types.py || ERRORS=1 + +echo "Running pyright..." +# TODO: Uncomment once the examples are fixed +# poetry run pyright examples/ || ERRORS=1 +poetry run pyright src/mistralai/extra/ || ERRORS=1 +poetry run pyright src/mistralai/_hooks/ || ERRORS=1 + +echo "Running ruff..." +poetry run ruff check examples/ || ERRORS=1 +poetry run ruff check src/mistralai/extra/ || ERRORS=1 +poetry run ruff check src/mistralai/_hooks/ \ +--exclude __init__.py --exclude sdkhooks.py --exclude types.py || ERRORS=1 + +if [ "$ERRORS" -ne 0 ]; then +echo "❌ One or more linters failed" +exit 1 +else +echo "✅ All linters passed" +fi diff --git a/src/mistralai/extra/tests/test_struct_chat.py b/src/mistralai/extra/tests/test_struct_chat.py index fd93575e..dd529ba5 100644 --- a/src/mistralai/extra/tests/test_struct_chat.py +++ b/src/mistralai/extra/tests/test_struct_chat.py @@ -45,7 +45,7 @@ class MathDemonstration(BaseModel): ) -expected_response = ParsedChatCompletionResponse( +expected_response: ParsedChatCompletionResponse = ParsedChatCompletionResponse( choices=[ ParsedChatCompletionChoice( index=0, From 3390e69253132ce3c3eb4ef489fb4aa7383848cf Mon Sep 17 00:00:00 2001 From: alex-ac Date: Mon, 26 May 2025 18:44:59 +0200 Subject: [PATCH 134/223] add linting rules --- .github/workflows/lint_custom_code.yaml | 2 +- .github/workflows/run_example_scripts.yaml | 4 +- poetry.lock | 41 +++++++++++++------ pyproject.toml | 1 + scripts/lint_custom_code.sh | 7 ++++ .../run_all.sh => scripts/run_examples.sh | 0 6 files changed, 39 insertions(+), 16 deletions(-) rename examples/run_all.sh => scripts/run_examples.sh (100%) diff --git a/.github/workflows/lint_custom_code.yaml b/.github/workflows/lint_custom_code.yaml index c95f2c1b..0bbf7126 100644 --- a/.github/workflows/lint_custom_code.yaml +++ b/.github/workflows/lint_custom_code.yaml @@ -27,7 +27,7 @@ jobs: - name: Install dependencies run: | touch README-PYPI.md - poetry install + poetry install --all-extras # The init, sdkhooks.py and types.py files in the _hooks folders are generated by Speakeasy hence the exclusion - name: Run all linters diff --git a/.github/workflows/run_example_scripts.yaml b/.github/workflows/run_example_scripts.yaml index 329acdec..7d8eb792 100644 --- a/.github/workflows/run_example_scripts.yaml +++ b/.github/workflows/run_example_scripts.yaml @@ -47,7 +47,7 @@ jobs: run: | PACKAGE="dist/$(ls dist | grep whl | head -n 1)" python3 -m pip install "$PACKAGE" - ./examples/run_all.sh --no-extra-dep + ./scripts/run_examples.sh --no-extra-dep env: MISTRAL_AGENT_ID: ${{ secrets.CI_AGENT_ID }} MISTRAL_API_KEY: ${{ env.MISTRAL_API_KEY }} @@ -57,7 +57,7 @@ jobs: run: | PACKAGE="dist/$(ls dist | grep whl | head -n 1)[agents]" python3 -m pip install "$PACKAGE" - ./examples/run_all.sh + ./scripts/run_examples.sh env: MISTRAL_AGENT_ID: ${{ secrets.CI_AGENT_ID }} MISTRAL_API_KEY: ${{ env.MISTRAL_API_KEY }} diff --git a/poetry.lock b/poetry.lock index d733697b..9ffdc439 100644 --- a/poetry.lock +++ b/poetry.lock @@ -95,10 +95,9 @@ files = [ name = "cffi" version = "1.17.1" description = "Foreign Function Interface for Python calling C code." -optional = true +optional = false python-versions = ">=3.8" -groups = ["main"] -markers = "extra == \"agents\" and platform_python_implementation != \"PyPy\"" +groups = ["main", "dev"] files = [ {file = "cffi-1.17.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:df8b1c11f177bc2313ec4b2d46baec87a5f3e71fc8b45dab2ee7cae86d9aba14"}, {file = "cffi-1.17.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:8f2cdc858323644ab277e9bb925ad72ae0e67f69e804f4898c070998d50b1a67"}, @@ -168,6 +167,7 @@ files = [ {file = "cffi-1.17.1-cp39-cp39-win_amd64.whl", hash = "sha256:d016c76bdd850f3c626af19b0542c9677ba156e4ee4fccfdd7848803533ef662"}, {file = "cffi-1.17.1.tar.gz", hash = "sha256:1c39c6016c32bc48dd54561950ebd6836e1670f2ae46128f67cf49e789c52824"}, ] +markers = {main = "extra == \"agents\" and platform_python_implementation != \"PyPy\"", dev = "platform_python_implementation != \"PyPy\""} [package.dependencies] pycparser = "*" @@ -321,10 +321,9 @@ markers = {main = "extra == \"agents\"", dev = "sys_platform == \"win32\""} name = "cryptography" version = "43.0.3" description = "cryptography is a package which provides cryptographic recipes and primitives to Python developers." -optional = true +optional = false python-versions = ">=3.7" -groups = ["main"] -markers = "python_version < \"3.11\" and extra == \"agents\"" +groups = ["main", "dev"] files = [ {file = "cryptography-43.0.3-cp37-abi3-macosx_10_9_universal2.whl", hash = "sha256:bf7a1932ac4176486eab36a19ed4c0492da5d97123f1406cf15e41b05e787d2e"}, {file = "cryptography-43.0.3-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:63efa177ff54aec6e1c0aefaa1a241232dcd37413835a9b674b6e3f0ae2bfd3e"}, @@ -354,6 +353,7 @@ files = [ {file = "cryptography-43.0.3-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:2ce6fae5bdad59577b44e4dfed356944fbf1d925269114c28be377692643b4ff"}, {file = "cryptography-43.0.3.tar.gz", hash = "sha256:315b9001266a492a6ff443b61238f956b214dbec9910a081ba5b6646a055a805"}, ] +markers = {main = "python_version < \"3.11\" and extra == \"agents\"", dev = "python_version < \"3.11\""} [package.dependencies] cffi = {version = ">=1.12", markers = "platform_python_implementation != \"PyPy\""} @@ -372,10 +372,9 @@ test-randomorder = ["pytest-randomly"] name = "cryptography" version = "45.0.3" description = "cryptography is a package which provides cryptographic recipes and primitives to Python developers." -optional = true +optional = false python-versions = "!=3.9.0,!=3.9.1,>=3.7" -groups = ["main"] -markers = "python_version >= \"3.11\" and extra == \"agents\"" +groups = ["main", "dev"] files = [ {file = "cryptography-45.0.3-cp311-abi3-macosx_10_9_universal2.whl", hash = "sha256:7573d9eebaeceeb55285205dbbb8753ac1e962af3d9640791d12b36864065e71"}, {file = "cryptography-45.0.3-cp311-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d377dde61c5d67eb4311eace661c3efda46c62113ff56bf05e2d679e02aebb5b"}, @@ -415,6 +414,7 @@ files = [ {file = "cryptography-45.0.3-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:9eda14f049d7f09c2e8fb411dda17dd6b16a3c76a1de5e249188a32aeb92de19"}, {file = "cryptography-45.0.3.tar.gz", hash = "sha256:ec21313dd335c51d7877baf2972569f40a4291b76a0ce51391523ae358d05899"}, ] +markers = {main = "python_version >= \"3.11\" and extra == \"agents\"", dev = "python_version >= \"3.11\""} [package.dependencies] cffi = {version = ">=1.14", markers = "platform_python_implementation != \"PyPy\""} @@ -828,14 +828,14 @@ pyasn1 = ">=0.4.6,<0.7.0" name = "pycparser" version = "2.22" description = "C parser in Python" -optional = true +optional = false python-versions = ">=3.8" -groups = ["main"] -markers = "extra == \"agents\" and platform_python_implementation != \"PyPy\"" +groups = ["main", "dev"] files = [ {file = "pycparser-2.22-py3-none-any.whl", hash = "sha256:c3702b6d3dd8c7abc1afa565d7e63d53a1d0bd86cdc24edd75470f4de499cfcc"}, {file = "pycparser-2.22.tar.gz", hash = "sha256:491c8be9c040f5390f5bf44a5b07752bd07f56edf992381b05c701439eec10f6"}, ] +markers = {main = "extra == \"agents\" and platform_python_implementation != \"PyPy\"", dev = "platform_python_implementation != \"PyPy\""} [[package]] name = "pydantic" @@ -1319,6 +1319,21 @@ files = [ {file = "tomlkit-0.13.2.tar.gz", hash = "sha256:fff5fe59a87295b278abd31bec92c15d9bc4a06885ab12bcea52c71119392e79"}, ] +[[package]] +name = "types-authlib" +version = "1.5.0.20250516" +description = "Typing stubs for Authlib" +optional = false +python-versions = ">=3.9" +groups = ["dev"] +files = [ + {file = "types_authlib-1.5.0.20250516-py3-none-any.whl", hash = "sha256:c553659ba00b7e5f98d1bc183a47224a882de5d32c07917b1587a6a22ddd2583"}, + {file = "types_authlib-1.5.0.20250516.tar.gz", hash = "sha256:6d11b46622c4c338087d059e9036887408c788cf254f0fb11ff69f2a85ca7231"}, +] + +[package.dependencies] +cryptography = "*" + [[package]] name = "types-python-dateutil" version = "2.9.0.20241003" @@ -1405,4 +1420,4 @@ gcp = ["google-auth", "requests"] [metadata] lock-version = "2.1" python-versions = ">=3.9" -content-hash = "d2b70251e2fdbdebd5c58b53b1aed10f4d70838b3b8481559d9e0c1ac838130a" +content-hash = "f111068ee90dcada908f5064a1ed67f027a728ababa2bb6bd9e6957957fc5c6c" diff --git a/pyproject.toml b/pyproject.toml index 95b5744c..1050042e 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -34,6 +34,7 @@ pylint = "==3.2.3" pytest = "^8.2.2" pytest-asyncio = "^0.23.7" types-python-dateutil = "^2.9.0.20240316" +types-authlib = "^1.5.0.20250516" [tool.poetry.group.lint.dependencies] ruff = "^0.11.10" diff --git a/scripts/lint_custom_code.sh b/scripts/lint_custom_code.sh index dca05562..163bb3a6 100755 --- a/scripts/lint_custom_code.sh +++ b/scripts/lint_custom_code.sh @@ -7,19 +7,26 @@ ERRORS=0 echo "Running mypy..." # TODO: Uncomment once the examples are fixed # poetry run mypy examples/ || ERRORS=1 +echo "-> running on extra" poetry run mypy src/mistralai/extra/ || ERRORS=1 +echo "-> running on hooks" poetry run mypy src/mistralai/_hooks/ \ --exclude __init__.py --exclude sdkhooks.py --exclude types.py || ERRORS=1 echo "Running pyright..." # TODO: Uncomment once the examples are fixed # poetry run pyright examples/ || ERRORS=1 +echo "-> running on extra" poetry run pyright src/mistralai/extra/ || ERRORS=1 +echo "-> running on hooks" poetry run pyright src/mistralai/_hooks/ || ERRORS=1 echo "Running ruff..." +echo "-> running on examples" poetry run ruff check examples/ || ERRORS=1 +echo "-> running on extra" poetry run ruff check src/mistralai/extra/ || ERRORS=1 +echo "-> running on hooks" poetry run ruff check src/mistralai/_hooks/ \ --exclude __init__.py --exclude sdkhooks.py --exclude types.py || ERRORS=1 diff --git a/examples/run_all.sh b/scripts/run_examples.sh similarity index 100% rename from examples/run_all.sh rename to scripts/run_examples.sh From 55e8c9fbbd313dc349a912cab8eeb5e6d17bfd08 Mon Sep 17 00:00:00 2001 From: Gaspard Blanchet <26529448+GaspardBT@users.noreply.github.com> Date: Wed, 28 May 2025 17:14:23 +0200 Subject: [PATCH 135/223] fix: extras dependencies related issues during compiling (#227) * fix: extras dependencies related issues during compiling * add jsonpath back * fix dumb typo --- .genignore | 1 + pylintrc | 3 ++- pyproject.toml | 8 +++++++- 3 files changed, 10 insertions(+), 2 deletions(-) diff --git a/.genignore b/.genignore index 1ded5670..be3ba87c 100644 --- a/.genignore +++ b/.genignore @@ -1,3 +1,4 @@ pyproject.toml examples/* src/mistral/extra/* +pylintrc \ No newline at end of file diff --git a/pylintrc b/pylintrc index 266bc815..dd962451 100644 --- a/pylintrc +++ b/pylintrc @@ -52,7 +52,8 @@ ignore=CVS # ignore-list. The regex matches against paths and can be in Posix or Windows # format. Because '\\' represents the directory delimiter on Windows systems, # it can't be used as an escape character. -ignore-paths= +ignore-paths=^src/mistralai_private/extra/.*$, + ^src/mistralai_private/conversations.py # Files or directories matching the regular expression patterns are skipped. # The regex matches against base names, not paths. The default value ignores diff --git a/pyproject.toml b/pyproject.toml index 1050042e..dc055d7b 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -69,7 +69,13 @@ module = "typing_inspect" ignore_missing_imports = true [[tool.mypy.overrides]] -module = "jsonpath" +module = [ + "jsonpath.*", + "typing_inspect.*", + "authlib.*", + "mcp.*", + "griffe.*" +] ignore_missing_imports = true [tool.pyright] From 73f224460e42301d0395bb95b758eee8da96a7aa Mon Sep 17 00:00:00 2001 From: Gaspard Blanchet <26529448+GaspardBT@users.noreply.github.com> Date: Wed, 28 May 2025 17:29:22 +0200 Subject: [PATCH 136/223] fix: typo in pylintrc ignore-paths (#228) --- pylintrc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pylintrc b/pylintrc index dd962451..f27c59ff 100644 --- a/pylintrc +++ b/pylintrc @@ -52,8 +52,8 @@ ignore=CVS # ignore-list. The regex matches against paths and can be in Posix or Windows # format. Because '\\' represents the directory delimiter on Windows systems, # it can't be used as an escape character. -ignore-paths=^src/mistralai_private/extra/.*$, - ^src/mistralai_private/conversations.py +ignore-paths=^src/mistrala/extra/.*$, + ^src/mistrala/conversations.py # Files or directories matching the regular expression patterns are skipped. # The regex matches against base names, not paths. The default value ignores From cb79e0d1a4fcd54bea48ec63ae0d74f635f54687 Mon Sep 17 00:00:00 2001 From: Gaspard Blanchet <26529448+GaspardBT@users.noreply.github.com> Date: Wed, 28 May 2025 17:37:30 +0200 Subject: [PATCH 137/223] fix: incomplete company nam (#229) --- pylintrc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pylintrc b/pylintrc index f27c59ff..29202a96 100644 --- a/pylintrc +++ b/pylintrc @@ -52,8 +52,8 @@ ignore=CVS # ignore-list. The regex matches against paths and can be in Posix or Windows # format. Because '\\' represents the directory delimiter on Windows systems, # it can't be used as an escape character. -ignore-paths=^src/mistrala/extra/.*$, - ^src/mistrala/conversations.py +ignore-paths=^src/mistralai/extra/.*$, + ^src/mistralai/conversations.py # Files or directories matching the regular expression patterns are skipped. # The regex matches against base names, not paths. The default value ignores From 49a72fb5bbdf0797da504ba3f6fb7e9f566fc938 Mon Sep 17 00:00:00 2001 From: Alexandre Menasria <47357713+amenasria@users.noreply.github.com> Date: Wed, 28 May 2025 21:11:50 +0200 Subject: [PATCH 138/223] ci: regenerated with OpenAPI Doc , Speakeasy CLI 1.517.3 (#230) --- .speakeasy/gen.lock | 12 +++-- .speakeasy/gen.yaml | 2 +- .speakeasy/workflow.lock | 11 +++-- RELEASES.md | 12 ++++- docs/models/chatcompletionresponse.md | 4 +- docs/models/embeddingdtype.md | 12 +++++ docs/models/embeddingrequest.md | 4 +- docs/models/fimcompletionresponse.md | 4 +- docs/sdks/conversations/README.md | 2 + docs/sdks/embeddings/README.md | 2 + docs/sdks/mistralagents/README.md | 2 + pyproject.toml | 2 +- src/mistralai/_version.py | 4 +- src/mistralai/beta.py | 2 + src/mistralai/conversations.py | 7 ++- src/mistralai/embeddings.py | 12 +++++ src/mistralai/mistral_agents.py | 2 + src/mistralai/models/__init__.py | 2 + .../models/chatcompletionresponse.py | 12 ++--- src/mistralai/models/embeddingdtype.py | 7 +++ src/mistralai/models/embeddingrequest.py | 46 +++++++++++++++++-- src/mistralai/models/fimcompletionresponse.py | 12 ++--- 22 files changed, 139 insertions(+), 36 deletions(-) create mode 100644 docs/models/embeddingdtype.md create mode 100644 src/mistralai/models/embeddingdtype.py diff --git a/.speakeasy/gen.lock b/.speakeasy/gen.lock index 80be7b20..ad83b298 100644 --- a/.speakeasy/gen.lock +++ b/.speakeasy/gen.lock @@ -1,12 +1,12 @@ lockVersion: 2.0.0 id: 2d045ec7-2ebb-4f4d-ad25-40953b132161 management: - docChecksum: e9c447db719018a5721988252c09c2dc + docChecksum: 12226a12b72a82af24e4b098c631ff42 docVersion: 1.0.0 speakeasyVersion: 1.517.3 generationVersion: 2.548.6 - releaseVersion: 1.8.0 - configChecksum: 1f7adfac0b677cdca4c073a11cbcef02 + releaseVersion: 1.8.1 + configChecksum: a47e6a59e54c30528cf829e1a6fcc310 repoURL: https://github.com/mistralai/client-python.git installationURL: https://github.com/mistralai/client-python.git published: true @@ -185,6 +185,7 @@ generatedFiles: - docs/models/documentlibrarytooltype.md - docs/models/documenturlchunk.md - docs/models/documenturlchunktype.md + - docs/models/embeddingdtype.md - docs/models/embeddingrequest.md - docs/models/embeddingrequestinputs.md - docs/models/embeddingresponse.md @@ -398,7 +399,6 @@ generatedFiles: - docs/sdks/ocr/README.md - poetry.toml - py.typed - - pylintrc - scripts/prepare_readme.py - scripts/publish.sh - src/mistralai/__init__.py @@ -500,6 +500,7 @@ generatedFiles: - src/mistralai/models/deltamessage.py - src/mistralai/models/documentlibrarytool.py - src/mistralai/models/documenturlchunk.py + - src/mistralai/models/embeddingdtype.py - src/mistralai/models/embeddingrequest.py - src/mistralai/models/embeddingresponse.py - src/mistralai/models/embeddingresponsedata.py @@ -1004,6 +1005,7 @@ examples: responses: "422": application/json: {} + "200": {} agents_api_v1_conversations_append_stream: speakeasy-default-agents-api-v1-conversations-append-stream: parameters: @@ -1014,6 +1016,7 @@ examples: responses: "422": application/json: {} + "200": {} agents_api_v1_conversations_restart_stream: speakeasy-default-agents-api-v1-conversations-restart-stream: parameters: @@ -1024,6 +1027,7 @@ examples: responses: "422": application/json: {} + "200": {} agents_api_v1_agents_create: speakeasy-default-agents-api-v1-agents-create: requestBody: diff --git a/.speakeasy/gen.yaml b/.speakeasy/gen.yaml index 820ed567..a8255953 100644 --- a/.speakeasy/gen.yaml +++ b/.speakeasy/gen.yaml @@ -15,7 +15,7 @@ generation: oAuth2ClientCredentialsEnabled: true oAuth2PasswordEnabled: false python: - version: 1.8.0 + version: 1.8.1 additionalDependencies: dev: pytest: ^8.2.2 diff --git a/.speakeasy/workflow.lock b/.speakeasy/workflow.lock index d0361942..d38d57a2 100644 --- a/.speakeasy/workflow.lock +++ b/.speakeasy/workflow.lock @@ -14,10 +14,11 @@ sources: - latest mistral-openapi: sourceNamespace: mistral-openapi - sourceRevisionDigest: sha256:f2590d9933e1e9208fa5b8e509b671e6a86907268bcd5dad41dc4179e20c5b69 - sourceBlobDigest: sha256:3026ed65da39c94e9787697305e7e059bec5cff09bceeddc6e68c289cfaeb592 + sourceRevisionDigest: sha256:ab414b4936bea95f6713273bbcee90d66af9da18c6672d62ce885769c47edc34 + sourceBlobDigest: sha256:109235b09f0f6d400d146591dff959fac189a873fc95f4b32867d47d65779d28 tags: - latest + - speakeasy-sdk-regen-1748446717 targets: mistralai-azure-sdk: source: mistral-azure-source @@ -36,10 +37,10 @@ targets: mistralai-sdk: source: mistral-openapi sourceNamespace: mistral-openapi - sourceRevisionDigest: sha256:f2590d9933e1e9208fa5b8e509b671e6a86907268bcd5dad41dc4179e20c5b69 - sourceBlobDigest: sha256:3026ed65da39c94e9787697305e7e059bec5cff09bceeddc6e68c289cfaeb592 + sourceRevisionDigest: sha256:ab414b4936bea95f6713273bbcee90d66af9da18c6672d62ce885769c47edc34 + sourceBlobDigest: sha256:109235b09f0f6d400d146591dff959fac189a873fc95f4b32867d47d65779d28 codeSamplesNamespace: mistral-openapi-code-samples - codeSamplesRevisionDigest: sha256:bd4031e558c0426c02f2a4f3bb1642068047aa555e0f9cbbc70de74ff7ec04ec + codeSamplesRevisionDigest: sha256:03b507fe6fdcabb21ec711d436300a3888b22fbfc970722bb3433db31c06047a workflow: workflowVersion: 1.0.0 speakeasyVersion: 1.517.3 diff --git a/RELEASES.md b/RELEASES.md index fc9229a9..14663d6c 100644 --- a/RELEASES.md +++ b/RELEASES.md @@ -218,4 +218,14 @@ Based on: ### Generated - [python v1.8.0] . ### Releases -- [PyPI v1.8.0] https://pypi.org/project/mistralai/1.8.0 - . \ No newline at end of file +- [PyPI v1.8.0] https://pypi.org/project/mistralai/1.8.0 - . + +## 2025-05-28 15:38:22 +### Changes +Based on: +- OpenAPI Doc +- Speakeasy CLI 1.517.3 (2.548.6) https://github.com/speakeasy-api/speakeasy +### Generated +- [python v1.8.1] . +### Releases +- [PyPI v1.8.1] https://pypi.org/project/mistralai/1.8.1 - . \ No newline at end of file diff --git a/docs/models/chatcompletionresponse.md b/docs/models/chatcompletionresponse.md index ad376158..a0465ffb 100644 --- a/docs/models/chatcompletionresponse.md +++ b/docs/models/chatcompletionresponse.md @@ -9,5 +9,5 @@ | `object` | *str* | :heavy_check_mark: | N/A | chat.completion | | `model` | *str* | :heavy_check_mark: | N/A | mistral-small-latest | | `usage` | [models.UsageInfo](../models/usageinfo.md) | :heavy_check_mark: | N/A | | -| `created` | *Optional[int]* | :heavy_minus_sign: | N/A | 1702256327 | -| `choices` | List[[models.ChatCompletionChoice](../models/chatcompletionchoice.md)] | :heavy_minus_sign: | N/A | | \ No newline at end of file +| `created` | *int* | :heavy_check_mark: | N/A | 1702256327 | +| `choices` | List[[models.ChatCompletionChoice](../models/chatcompletionchoice.md)] | :heavy_check_mark: | N/A | | \ No newline at end of file diff --git a/docs/models/embeddingdtype.md b/docs/models/embeddingdtype.md new file mode 100644 index 00000000..01656b0a --- /dev/null +++ b/docs/models/embeddingdtype.md @@ -0,0 +1,12 @@ +# EmbeddingDtype + + +## Values + +| Name | Value | +| --------- | --------- | +| `FLOAT` | float | +| `INT8` | int8 | +| `UINT8` | uint8 | +| `BINARY` | binary | +| `UBINARY` | ubinary | \ No newline at end of file diff --git a/docs/models/embeddingrequest.md b/docs/models/embeddingrequest.md index 242bb3e3..3a778a6f 100644 --- a/docs/models/embeddingrequest.md +++ b/docs/models/embeddingrequest.md @@ -6,4 +6,6 @@ | Field | Type | Required | Description | Example | | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | | `model` | *str* | :heavy_check_mark: | ID of the model to use. | mistral-embed | -| `inputs` | [models.EmbeddingRequestInputs](../models/embeddingrequestinputs.md) | :heavy_check_mark: | Text to embed. | [
"Embed this sentence.",
"As well as this one."
] | \ No newline at end of file +| `inputs` | [models.EmbeddingRequestInputs](../models/embeddingrequestinputs.md) | :heavy_check_mark: | Text to embed. | [
"Embed this sentence.",
"As well as this one."
] | +| `output_dimension` | *OptionalNullable[int]* | :heavy_minus_sign: | The dimension of the output embeddings. | | +| `output_dtype` | [Optional[models.EmbeddingDtype]](../models/embeddingdtype.md) | :heavy_minus_sign: | N/A | | \ No newline at end of file diff --git a/docs/models/fimcompletionresponse.md b/docs/models/fimcompletionresponse.md index da786a1f..cd62d034 100644 --- a/docs/models/fimcompletionresponse.md +++ b/docs/models/fimcompletionresponse.md @@ -9,5 +9,5 @@ | `object` | *str* | :heavy_check_mark: | N/A | chat.completion | | `model` | *str* | :heavy_check_mark: | N/A | codestral-latest | | `usage` | [models.UsageInfo](../models/usageinfo.md) | :heavy_check_mark: | N/A | | -| `created` | *Optional[int]* | :heavy_minus_sign: | N/A | 1702256327 | -| `choices` | List[[models.ChatCompletionChoice](../models/chatcompletionchoice.md)] | :heavy_minus_sign: | N/A | | \ No newline at end of file +| `created` | *int* | :heavy_check_mark: | N/A | 1702256327 | +| `choices` | List[[models.ChatCompletionChoice](../models/chatcompletionchoice.md)] | :heavy_check_mark: | N/A | | \ No newline at end of file diff --git a/docs/sdks/conversations/README.md b/docs/sdks/conversations/README.md index b5c12b24..8b462c16 100644 --- a/docs/sdks/conversations/README.md +++ b/docs/sdks/conversations/README.md @@ -3,6 +3,8 @@ ## Overview +(beta) Converstations API + ### Available Operations * [start](#start) - Create a conversation and append entries to it. diff --git a/docs/sdks/embeddings/README.md b/docs/sdks/embeddings/README.md index d55b38fb..91e33138 100644 --- a/docs/sdks/embeddings/README.md +++ b/docs/sdks/embeddings/README.md @@ -40,6 +40,8 @@ with Mistral( | ----------------------------------------------------------------------- | ----------------------------------------------------------------------- | ----------------------------------------------------------------------- | ----------------------------------------------------------------------- | ----------------------------------------------------------------------- | | `model` | *str* | :heavy_check_mark: | ID of the model to use. | mistral-embed | | `inputs` | [models.EmbeddingRequestInputs](../../models/embeddingrequestinputs.md) | :heavy_check_mark: | Text to embed. | [
"Embed this sentence.",
"As well as this one."
] | +| `output_dimension` | *OptionalNullable[int]* | :heavy_minus_sign: | The dimension of the output embeddings. | | +| `output_dtype` | [Optional[models.EmbeddingDtype]](../../models/embeddingdtype.md) | :heavy_minus_sign: | N/A | | | `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | | ### Response diff --git a/docs/sdks/mistralagents/README.md b/docs/sdks/mistralagents/README.md index aeb2b917..496016c3 100644 --- a/docs/sdks/mistralagents/README.md +++ b/docs/sdks/mistralagents/README.md @@ -3,6 +3,8 @@ ## Overview +(beta) Agents API + ### Available Operations * [create](#create) - Create a agent that can be used within a conversation. diff --git a/pyproject.toml b/pyproject.toml index dc055d7b..961af49d 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "mistralai" -version = "1.8.0" +version = "1.8.1" description = "Python Client SDK for the Mistral AI API." authors = [{ name = "Mistral" },] readme = "README-PYPI.md" diff --git a/src/mistralai/_version.py b/src/mistralai/_version.py index de6b8db8..ddccfbfa 100644 --- a/src/mistralai/_version.py +++ b/src/mistralai/_version.py @@ -3,10 +3,10 @@ import importlib.metadata __title__: str = "mistralai" -__version__: str = "1.8.0" +__version__: str = "1.8.1" __openapi_doc_version__: str = "1.0.0" __gen_version__: str = "2.548.6" -__user_agent__: str = "speakeasy-sdk/python 1.8.0 2.548.6 1.0.0 mistralai" +__user_agent__: str = "speakeasy-sdk/python 1.8.1 2.548.6 1.0.0 mistralai" try: if __package__ is not None: diff --git a/src/mistralai/beta.py b/src/mistralai/beta.py index 6858b0a8..a0d45f67 100644 --- a/src/mistralai/beta.py +++ b/src/mistralai/beta.py @@ -8,7 +8,9 @@ class Beta(BaseSDK): conversations: Conversations + r"""(beta) Converstations API""" agents: MistralAgents + r"""(beta) Agents API""" def __init__(self, sdk_config: SDKConfiguration) -> None: BaseSDK.__init__(self, sdk_config) diff --git a/src/mistralai/conversations.py b/src/mistralai/conversations.py index 6e4b37ee..320e3d09 100644 --- a/src/mistralai/conversations.py +++ b/src/mistralai/conversations.py @@ -34,8 +34,9 @@ # endregion imports - class Conversations(BaseSDK): + r"""(beta) Converstations API""" + # region sdk-class-body # Custom run code allowing client side execution of code @@ -146,7 +147,9 @@ async def run_stream_async( completion_args=completion_args, ) - async def run_generator() -> AsyncGenerator[Union[RunResultEvents, RunResult], None]: + async def run_generator() -> ( + AsyncGenerator[Union[RunResultEvents, RunResult], None] + ): current_entries = input_entries while True: received_event_tracker: defaultdict[ diff --git a/src/mistralai/embeddings.py b/src/mistralai/embeddings.py index b81a5e37..fee30251 100644 --- a/src/mistralai/embeddings.py +++ b/src/mistralai/embeddings.py @@ -18,6 +18,8 @@ def create( inputs: Union[ models.EmbeddingRequestInputs, models.EmbeddingRequestInputsTypedDict ], + output_dimension: OptionalNullable[int] = UNSET, + output_dtype: Optional[models.EmbeddingDtype] = None, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -29,6 +31,8 @@ def create( :param model: ID of the model to use. :param inputs: Text to embed. + :param output_dimension: The dimension of the output embeddings. + :param output_dtype: :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -47,6 +51,8 @@ def create( request = models.EmbeddingRequest( model=model, inputs=inputs, + output_dimension=output_dimension, + output_dtype=output_dtype, ) req = self._build_request( @@ -125,6 +131,8 @@ async def create_async( inputs: Union[ models.EmbeddingRequestInputs, models.EmbeddingRequestInputsTypedDict ], + output_dimension: OptionalNullable[int] = UNSET, + output_dtype: Optional[models.EmbeddingDtype] = None, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -136,6 +144,8 @@ async def create_async( :param model: ID of the model to use. :param inputs: Text to embed. + :param output_dimension: The dimension of the output embeddings. + :param output_dtype: :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -154,6 +164,8 @@ async def create_async( request = models.EmbeddingRequest( model=model, inputs=inputs, + output_dimension=output_dimension, + output_dtype=output_dtype, ) req = self._build_request_async( diff --git a/src/mistralai/mistral_agents.py b/src/mistralai/mistral_agents.py index 5fdd8f32..a22ce41d 100644 --- a/src/mistralai/mistral_agents.py +++ b/src/mistralai/mistral_agents.py @@ -9,6 +9,8 @@ class MistralAgents(BaseSDK): + r"""(beta) Agents API""" + def create( self, *, diff --git a/src/mistralai/models/__init__.py b/src/mistralai/models/__init__.py index cf121986..9ed85c07 100644 --- a/src/mistralai/models/__init__.py +++ b/src/mistralai/models/__init__.py @@ -347,6 +347,7 @@ DocumentURLChunkType, DocumentURLChunkTypedDict, ) +from .embeddingdtype import EmbeddingDtype from .embeddingrequest import ( EmbeddingRequest, EmbeddingRequestInputs, @@ -964,6 +965,7 @@ "DocumentURLChunk", "DocumentURLChunkType", "DocumentURLChunkTypedDict", + "EmbeddingDtype", "EmbeddingRequest", "EmbeddingRequestInputs", "EmbeddingRequestInputsTypedDict", diff --git a/src/mistralai/models/chatcompletionresponse.py b/src/mistralai/models/chatcompletionresponse.py index 67f19651..3d03b126 100644 --- a/src/mistralai/models/chatcompletionresponse.py +++ b/src/mistralai/models/chatcompletionresponse.py @@ -4,8 +4,8 @@ from .chatcompletionchoice import ChatCompletionChoice, ChatCompletionChoiceTypedDict from .usageinfo import UsageInfo, UsageInfoTypedDict from mistralai.types import BaseModel -from typing import List, Optional -from typing_extensions import NotRequired, TypedDict +from typing import List +from typing_extensions import TypedDict class ChatCompletionResponseTypedDict(TypedDict): @@ -13,8 +13,8 @@ class ChatCompletionResponseTypedDict(TypedDict): object: str model: str usage: UsageInfoTypedDict - created: NotRequired[int] - choices: NotRequired[List[ChatCompletionChoiceTypedDict]] + created: int + choices: List[ChatCompletionChoiceTypedDict] class ChatCompletionResponse(BaseModel): @@ -26,6 +26,6 @@ class ChatCompletionResponse(BaseModel): usage: UsageInfo - created: Optional[int] = None + created: int - choices: Optional[List[ChatCompletionChoice]] = None + choices: List[ChatCompletionChoice] diff --git a/src/mistralai/models/embeddingdtype.py b/src/mistralai/models/embeddingdtype.py new file mode 100644 index 00000000..4f3c41bd --- /dev/null +++ b/src/mistralai/models/embeddingdtype.py @@ -0,0 +1,7 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from typing import Literal + + +EmbeddingDtype = Literal["float", "int8", "uint8", "binary", "ubinary"] diff --git a/src/mistralai/models/embeddingrequest.py b/src/mistralai/models/embeddingrequest.py index bf9ce3ff..56cccc72 100644 --- a/src/mistralai/models/embeddingrequest.py +++ b/src/mistralai/models/embeddingrequest.py @@ -1,10 +1,12 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from __future__ import annotations -from mistralai.types import BaseModel +from .embeddingdtype import EmbeddingDtype +from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL import pydantic -from typing import List, Union -from typing_extensions import Annotated, TypeAliasType, TypedDict +from pydantic import model_serializer +from typing import List, Optional, Union +from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict EmbeddingRequestInputsTypedDict = TypeAliasType( @@ -22,6 +24,9 @@ class EmbeddingRequestTypedDict(TypedDict): r"""ID of the model to use.""" inputs: EmbeddingRequestInputsTypedDict r"""Text to embed.""" + output_dimension: NotRequired[Nullable[int]] + r"""The dimension of the output embeddings.""" + output_dtype: NotRequired[EmbeddingDtype] class EmbeddingRequest(BaseModel): @@ -30,3 +35,38 @@ class EmbeddingRequest(BaseModel): inputs: Annotated[EmbeddingRequestInputs, pydantic.Field(alias="input")] r"""Text to embed.""" + + output_dimension: OptionalNullable[int] = UNSET + r"""The dimension of the output embeddings.""" + + output_dtype: Optional[EmbeddingDtype] = None + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["output_dimension", "output_dtype"] + nullable_fields = ["output_dimension"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in self.model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/models/fimcompletionresponse.py b/src/mistralai/models/fimcompletionresponse.py index 9fe05820..f27972b9 100644 --- a/src/mistralai/models/fimcompletionresponse.py +++ b/src/mistralai/models/fimcompletionresponse.py @@ -4,8 +4,8 @@ from .chatcompletionchoice import ChatCompletionChoice, ChatCompletionChoiceTypedDict from .usageinfo import UsageInfo, UsageInfoTypedDict from mistralai.types import BaseModel -from typing import List, Optional -from typing_extensions import NotRequired, TypedDict +from typing import List +from typing_extensions import TypedDict class FIMCompletionResponseTypedDict(TypedDict): @@ -13,8 +13,8 @@ class FIMCompletionResponseTypedDict(TypedDict): object: str model: str usage: UsageInfoTypedDict - created: NotRequired[int] - choices: NotRequired[List[ChatCompletionChoiceTypedDict]] + created: int + choices: List[ChatCompletionChoiceTypedDict] class FIMCompletionResponse(BaseModel): @@ -26,6 +26,6 @@ class FIMCompletionResponse(BaseModel): usage: UsageInfo - created: Optional[int] = None + created: int - choices: Optional[List[ChatCompletionChoice]] = None + choices: List[ChatCompletionChoice] From 1a2f775ddec41e48384752267ffc07c4f8de59ff Mon Sep 17 00:00:00 2001 From: speakeasybot Date: Tue, 10 Jun 2025 16:43:42 +0000 Subject: [PATCH 139/223] ci: regenerated with OpenAPI Doc , Speakeasy CLI 1.517.3 --- .speakeasy/gen.lock | 8 +++++--- .speakeasy/gen.yaml | 2 +- .speakeasy/workflow.lock | 12 ++++++------ RELEASES.md | 12 +++++++++++- docs/models/agentscompletionrequest.md | 3 ++- docs/models/agentscompletionstreamrequest.md | 3 ++- docs/models/chatcompletionrequest.md | 1 + docs/models/chatcompletionstreamrequest.md | 1 + docs/models/mistralpromptmode.md | 8 ++++++++ docs/sdks/agents/README.md | 2 ++ docs/sdks/chat/README.md | 2 ++ docs/sdks/conversations/README.md | 2 +- pyproject.toml | 2 +- src/mistralai/_version.py | 4 ++-- src/mistralai/agents.py | 12 ++++++++++++ src/mistralai/beta.py | 2 +- src/mistralai/chat.py | 12 ++++++++++++ src/mistralai/conversations.py | 2 +- src/mistralai/models/__init__.py | 2 ++ .../models/agentscompletionrequest.py | 12 ++++++++++-- .../models/agentscompletionstreamrequest.py | 12 ++++++++++-- src/mistralai/models/chatcompletionrequest.py | 19 +++++++++++++++++-- .../models/chatcompletionstreamrequest.py | 19 +++++++++++++++++-- src/mistralai/models/mistralpromptmode.py | 8 ++++++++ 24 files changed, 135 insertions(+), 27 deletions(-) create mode 100644 docs/models/mistralpromptmode.md create mode 100644 src/mistralai/models/mistralpromptmode.py diff --git a/.speakeasy/gen.lock b/.speakeasy/gen.lock index ad83b298..c568d4f3 100644 --- a/.speakeasy/gen.lock +++ b/.speakeasy/gen.lock @@ -1,12 +1,12 @@ lockVersion: 2.0.0 id: 2d045ec7-2ebb-4f4d-ad25-40953b132161 management: - docChecksum: 12226a12b72a82af24e4b098c631ff42 + docChecksum: 9c8bd4d6bf675b159a80173b97c1265c docVersion: 1.0.0 speakeasyVersion: 1.517.3 generationVersion: 2.548.6 - releaseVersion: 1.8.1 - configChecksum: a47e6a59e54c30528cf829e1a6fcc310 + releaseVersion: 1.8.2 + configChecksum: 5024c28578f991eabb85310ad8df96b7 repoURL: https://github.com/mistralai/client-python.git installationURL: https://github.com/mistralai/client-python.git published: true @@ -293,6 +293,7 @@ generatedFiles: - docs/models/messageoutputeventtype.md - docs/models/messages.md - docs/models/metricout.md + - docs/models/mistralpromptmode.md - docs/models/modelcapabilities.md - docs/models/modelconversation.md - docs/models/modelconversationobject.md @@ -562,6 +563,7 @@ generatedFiles: - src/mistralai/models/messageoutputentry.py - src/mistralai/models/messageoutputevent.py - src/mistralai/models/metricout.py + - src/mistralai/models/mistralpromptmode.py - src/mistralai/models/modelcapabilities.py - src/mistralai/models/modelconversation.py - src/mistralai/models/modellist.py diff --git a/.speakeasy/gen.yaml b/.speakeasy/gen.yaml index a8255953..77710816 100644 --- a/.speakeasy/gen.yaml +++ b/.speakeasy/gen.yaml @@ -15,7 +15,7 @@ generation: oAuth2ClientCredentialsEnabled: true oAuth2PasswordEnabled: false python: - version: 1.8.1 + version: 1.8.2 additionalDependencies: dev: pytest: ^8.2.2 diff --git a/.speakeasy/workflow.lock b/.speakeasy/workflow.lock index d38d57a2..c618ac1d 100644 --- a/.speakeasy/workflow.lock +++ b/.speakeasy/workflow.lock @@ -14,11 +14,11 @@ sources: - latest mistral-openapi: sourceNamespace: mistral-openapi - sourceRevisionDigest: sha256:ab414b4936bea95f6713273bbcee90d66af9da18c6672d62ce885769c47edc34 - sourceBlobDigest: sha256:109235b09f0f6d400d146591dff959fac189a873fc95f4b32867d47d65779d28 + sourceRevisionDigest: sha256:21244d618cafcc163c3aa4acbc443ca16c63b8614632b65b87fbb2c4066987f3 + sourceBlobDigest: sha256:74aeb6a2e0d466c206f983ce79581cc72d205cc7866826282c181207ebe841a2 tags: - latest - - speakeasy-sdk-regen-1748446717 + - speakeasy-sdk-regen-1749573609 targets: mistralai-azure-sdk: source: mistral-azure-source @@ -37,10 +37,10 @@ targets: mistralai-sdk: source: mistral-openapi sourceNamespace: mistral-openapi - sourceRevisionDigest: sha256:ab414b4936bea95f6713273bbcee90d66af9da18c6672d62ce885769c47edc34 - sourceBlobDigest: sha256:109235b09f0f6d400d146591dff959fac189a873fc95f4b32867d47d65779d28 + sourceRevisionDigest: sha256:21244d618cafcc163c3aa4acbc443ca16c63b8614632b65b87fbb2c4066987f3 + sourceBlobDigest: sha256:74aeb6a2e0d466c206f983ce79581cc72d205cc7866826282c181207ebe841a2 codeSamplesNamespace: mistral-openapi-code-samples - codeSamplesRevisionDigest: sha256:03b507fe6fdcabb21ec711d436300a3888b22fbfc970722bb3433db31c06047a + codeSamplesRevisionDigest: sha256:dc4396ba994048a9f31c008dced1a46a9e54d89973e9608039a7bc37b1052957 workflow: workflowVersion: 1.0.0 speakeasyVersion: 1.517.3 diff --git a/RELEASES.md b/RELEASES.md index 14663d6c..265eda73 100644 --- a/RELEASES.md +++ b/RELEASES.md @@ -228,4 +228,14 @@ Based on: ### Generated - [python v1.8.1] . ### Releases -- [PyPI v1.8.1] https://pypi.org/project/mistralai/1.8.1 - . \ No newline at end of file +- [PyPI v1.8.1] https://pypi.org/project/mistralai/1.8.1 - . + +## 2025-06-10 16:42:28 +### Changes +Based on: +- OpenAPI Doc +- Speakeasy CLI 1.517.3 (2.548.6) https://github.com/speakeasy-api/speakeasy +### Generated +- [python v1.8.2] . +### Releases +- [PyPI v1.8.2] https://pypi.org/project/mistralai/1.8.2 - . \ No newline at end of file diff --git a/docs/models/agentscompletionrequest.md b/docs/models/agentscompletionrequest.md index 73a0f77a..8ace69d9 100644 --- a/docs/models/agentscompletionrequest.md +++ b/docs/models/agentscompletionrequest.md @@ -18,4 +18,5 @@ | `frequency_penalty` | *Optional[float]* | :heavy_minus_sign: | frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. | | | `n` | *OptionalNullable[int]* | :heavy_minus_sign: | Number of completions to return for each request, input tokens are only billed once. | | | `prediction` | [Optional[models.Prediction]](../models/prediction.md) | :heavy_minus_sign: | N/A | | -| `parallel_tool_calls` | *Optional[bool]* | :heavy_minus_sign: | N/A | | \ No newline at end of file +| `parallel_tool_calls` | *Optional[bool]* | :heavy_minus_sign: | N/A | | +| `prompt_mode` | [OptionalNullable[models.MistralPromptMode]](../models/mistralpromptmode.md) | :heavy_minus_sign: | N/A | | \ No newline at end of file diff --git a/docs/models/agentscompletionstreamrequest.md b/docs/models/agentscompletionstreamrequest.md index 4a3093dd..0bab012c 100644 --- a/docs/models/agentscompletionstreamrequest.md +++ b/docs/models/agentscompletionstreamrequest.md @@ -18,4 +18,5 @@ | `frequency_penalty` | *Optional[float]* | :heavy_minus_sign: | frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. | | | `n` | *OptionalNullable[int]* | :heavy_minus_sign: | Number of completions to return for each request, input tokens are only billed once. | | | `prediction` | [Optional[models.Prediction]](../models/prediction.md) | :heavy_minus_sign: | N/A | | -| `parallel_tool_calls` | *Optional[bool]* | :heavy_minus_sign: | N/A | | \ No newline at end of file +| `parallel_tool_calls` | *Optional[bool]* | :heavy_minus_sign: | N/A | | +| `prompt_mode` | [OptionalNullable[models.MistralPromptMode]](../models/mistralpromptmode.md) | :heavy_minus_sign: | N/A | | \ No newline at end of file diff --git a/docs/models/chatcompletionrequest.md b/docs/models/chatcompletionrequest.md index 904ad6c5..a850b5b8 100644 --- a/docs/models/chatcompletionrequest.md +++ b/docs/models/chatcompletionrequest.md @@ -21,4 +21,5 @@ | `n` | *OptionalNullable[int]* | :heavy_minus_sign: | Number of completions to return for each request, input tokens are only billed once. | | | `prediction` | [Optional[models.Prediction]](../models/prediction.md) | :heavy_minus_sign: | N/A | | | `parallel_tool_calls` | *Optional[bool]* | :heavy_minus_sign: | N/A | | +| `prompt_mode` | [OptionalNullable[models.MistralPromptMode]](../models/mistralpromptmode.md) | :heavy_minus_sign: | N/A | | | `safe_prompt` | *Optional[bool]* | :heavy_minus_sign: | Whether to inject a safety prompt before all conversations. | | \ No newline at end of file diff --git a/docs/models/chatcompletionstreamrequest.md b/docs/models/chatcompletionstreamrequest.md index f2cce68b..cf286cda 100644 --- a/docs/models/chatcompletionstreamrequest.md +++ b/docs/models/chatcompletionstreamrequest.md @@ -21,4 +21,5 @@ | `n` | *OptionalNullable[int]* | :heavy_minus_sign: | Number of completions to return for each request, input tokens are only billed once. | | | `prediction` | [Optional[models.Prediction]](../models/prediction.md) | :heavy_minus_sign: | N/A | | | `parallel_tool_calls` | *Optional[bool]* | :heavy_minus_sign: | N/A | | +| `prompt_mode` | [OptionalNullable[models.MistralPromptMode]](../models/mistralpromptmode.md) | :heavy_minus_sign: | N/A | | | `safe_prompt` | *Optional[bool]* | :heavy_minus_sign: | Whether to inject a safety prompt before all conversations. | | \ No newline at end of file diff --git a/docs/models/mistralpromptmode.md b/docs/models/mistralpromptmode.md new file mode 100644 index 00000000..7416e203 --- /dev/null +++ b/docs/models/mistralpromptmode.md @@ -0,0 +1,8 @@ +# MistralPromptMode + + +## Values + +| Name | Value | +| ----------- | ----------- | +| `REASONING` | reasoning | \ No newline at end of file diff --git a/docs/sdks/agents/README.md b/docs/sdks/agents/README.md index 28e10497..c7fdb687 100644 --- a/docs/sdks/agents/README.md +++ b/docs/sdks/agents/README.md @@ -55,6 +55,7 @@ with Mistral( | `n` | *OptionalNullable[int]* | :heavy_minus_sign: | Number of completions to return for each request, input tokens are only billed once. | | | `prediction` | [Optional[models.Prediction]](../../models/prediction.md) | :heavy_minus_sign: | N/A | | | `parallel_tool_calls` | *Optional[bool]* | :heavy_minus_sign: | N/A | | +| `prompt_mode` | [OptionalNullable[models.MistralPromptMode]](../../models/mistralpromptmode.md) | :heavy_minus_sign: | N/A | | | `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | | ### Response @@ -115,6 +116,7 @@ with Mistral( | `n` | *OptionalNullable[int]* | :heavy_minus_sign: | Number of completions to return for each request, input tokens are only billed once. | | | `prediction` | [Optional[models.Prediction]](../../models/prediction.md) | :heavy_minus_sign: | N/A | | | `parallel_tool_calls` | *Optional[bool]* | :heavy_minus_sign: | N/A | | +| `prompt_mode` | [OptionalNullable[models.MistralPromptMode]](../../models/mistralpromptmode.md) | :heavy_minus_sign: | N/A | | | `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | | ### Response diff --git a/docs/sdks/chat/README.md b/docs/sdks/chat/README.md index 12d9feca..3a8d57fa 100644 --- a/docs/sdks/chat/README.md +++ b/docs/sdks/chat/README.md @@ -57,6 +57,7 @@ with Mistral( | `n` | *OptionalNullable[int]* | :heavy_minus_sign: | Number of completions to return for each request, input tokens are only billed once. | | | `prediction` | [Optional[models.Prediction]](../../models/prediction.md) | :heavy_minus_sign: | N/A | | | `parallel_tool_calls` | *Optional[bool]* | :heavy_minus_sign: | N/A | | +| `prompt_mode` | [OptionalNullable[models.MistralPromptMode]](../../models/mistralpromptmode.md) | :heavy_minus_sign: | N/A | | | `safe_prompt` | *Optional[bool]* | :heavy_minus_sign: | Whether to inject a safety prompt before all conversations. | | | `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | | @@ -120,6 +121,7 @@ with Mistral( | `n` | *OptionalNullable[int]* | :heavy_minus_sign: | Number of completions to return for each request, input tokens are only billed once. | | | `prediction` | [Optional[models.Prediction]](../../models/prediction.md) | :heavy_minus_sign: | N/A | | | `parallel_tool_calls` | *Optional[bool]* | :heavy_minus_sign: | N/A | | +| `prompt_mode` | [OptionalNullable[models.MistralPromptMode]](../../models/mistralpromptmode.md) | :heavy_minus_sign: | N/A | | | `safe_prompt` | *Optional[bool]* | :heavy_minus_sign: | Whether to inject a safety prompt before all conversations. | | | `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | | diff --git a/docs/sdks/conversations/README.md b/docs/sdks/conversations/README.md index 8b462c16..6d6aaa2c 100644 --- a/docs/sdks/conversations/README.md +++ b/docs/sdks/conversations/README.md @@ -3,7 +3,7 @@ ## Overview -(beta) Converstations API +(beta) Conversations API ### Available Operations diff --git a/pyproject.toml b/pyproject.toml index 961af49d..c7cb9095 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "mistralai" -version = "1.8.1" +version = "1.8.2" description = "Python Client SDK for the Mistral AI API." authors = [{ name = "Mistral" },] readme = "README-PYPI.md" diff --git a/src/mistralai/_version.py b/src/mistralai/_version.py index ddccfbfa..fc416fd3 100644 --- a/src/mistralai/_version.py +++ b/src/mistralai/_version.py @@ -3,10 +3,10 @@ import importlib.metadata __title__: str = "mistralai" -__version__: str = "1.8.1" +__version__: str = "1.8.2" __openapi_doc_version__: str = "1.0.0" __gen_version__: str = "2.548.6" -__user_agent__: str = "speakeasy-sdk/python 1.8.1 2.548.6 1.0.0 mistralai" +__user_agent__: str = "speakeasy-sdk/python 1.8.2 2.548.6 1.0.0 mistralai" try: if __package__ is not None: diff --git a/src/mistralai/agents.py b/src/mistralai/agents.py index e81f01aa..4fbb25dd 100644 --- a/src/mistralai/agents.py +++ b/src/mistralai/agents.py @@ -47,6 +47,7 @@ def complete( Union[models.Prediction, models.PredictionTypedDict] ] = None, parallel_tool_calls: Optional[bool] = None, + prompt_mode: OptionalNullable[models.MistralPromptMode] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -68,6 +69,7 @@ def complete( :param n: Number of completions to return for each request, input tokens are only billed once. :param prediction: :param parallel_tool_calls: + :param prompt_mode: :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -105,6 +107,7 @@ def complete( prediction, Optional[models.Prediction] ), parallel_tool_calls=parallel_tool_calls, + prompt_mode=prompt_mode, agent_id=agent_id, ) @@ -213,6 +216,7 @@ async def complete_async( Union[models.Prediction, models.PredictionTypedDict] ] = None, parallel_tool_calls: Optional[bool] = None, + prompt_mode: OptionalNullable[models.MistralPromptMode] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -234,6 +238,7 @@ async def complete_async( :param n: Number of completions to return for each request, input tokens are only billed once. :param prediction: :param parallel_tool_calls: + :param prompt_mode: :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -271,6 +276,7 @@ async def complete_async( prediction, Optional[models.Prediction] ), parallel_tool_calls=parallel_tool_calls, + prompt_mode=prompt_mode, agent_id=agent_id, ) @@ -379,6 +385,7 @@ def stream( Union[models.Prediction, models.PredictionTypedDict] ] = None, parallel_tool_calls: Optional[bool] = None, + prompt_mode: OptionalNullable[models.MistralPromptMode] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -402,6 +409,7 @@ def stream( :param n: Number of completions to return for each request, input tokens are only billed once. :param prediction: :param parallel_tool_calls: + :param prompt_mode: :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -439,6 +447,7 @@ def stream( prediction, Optional[models.Prediction] ), parallel_tool_calls=parallel_tool_calls, + prompt_mode=prompt_mode, agent_id=agent_id, ) @@ -553,6 +562,7 @@ async def stream_async( Union[models.Prediction, models.PredictionTypedDict] ] = None, parallel_tool_calls: Optional[bool] = None, + prompt_mode: OptionalNullable[models.MistralPromptMode] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -576,6 +586,7 @@ async def stream_async( :param n: Number of completions to return for each request, input tokens are only billed once. :param prediction: :param parallel_tool_calls: + :param prompt_mode: :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -613,6 +624,7 @@ async def stream_async( prediction, Optional[models.Prediction] ), parallel_tool_calls=parallel_tool_calls, + prompt_mode=prompt_mode, agent_id=agent_id, ) diff --git a/src/mistralai/beta.py b/src/mistralai/beta.py index a0d45f67..04209d74 100644 --- a/src/mistralai/beta.py +++ b/src/mistralai/beta.py @@ -8,7 +8,7 @@ class Beta(BaseSDK): conversations: Conversations - r"""(beta) Converstations API""" + r"""(beta) Conversations API""" agents: MistralAgents r"""(beta) Agents API""" diff --git a/src/mistralai/chat.py b/src/mistralai/chat.py index 0e7294f9..96fcf65d 100644 --- a/src/mistralai/chat.py +++ b/src/mistralai/chat.py @@ -123,6 +123,7 @@ def complete( Union[models.Prediction, models.PredictionTypedDict] ] = None, parallel_tool_calls: Optional[bool] = None, + prompt_mode: OptionalNullable[models.MistralPromptMode] = UNSET, safe_prompt: Optional[bool] = None, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, @@ -147,6 +148,7 @@ def complete( :param n: Number of completions to return for each request, input tokens are only billed once. :param prediction: :param parallel_tool_calls: + :param prompt_mode: :param safe_prompt: Whether to inject a safety prompt before all conversations. :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method @@ -186,6 +188,7 @@ def complete( prediction, Optional[models.Prediction] ), parallel_tool_calls=parallel_tool_calls, + prompt_mode=prompt_mode, safe_prompt=safe_prompt, ) @@ -288,6 +291,7 @@ async def complete_async( Union[models.Prediction, models.PredictionTypedDict] ] = None, parallel_tool_calls: Optional[bool] = None, + prompt_mode: OptionalNullable[models.MistralPromptMode] = UNSET, safe_prompt: Optional[bool] = None, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, @@ -312,6 +316,7 @@ async def complete_async( :param n: Number of completions to return for each request, input tokens are only billed once. :param prediction: :param parallel_tool_calls: + :param prompt_mode: :param safe_prompt: Whether to inject a safety prompt before all conversations. :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method @@ -351,6 +356,7 @@ async def complete_async( prediction, Optional[models.Prediction] ), parallel_tool_calls=parallel_tool_calls, + prompt_mode=prompt_mode, safe_prompt=safe_prompt, ) @@ -461,6 +467,7 @@ def stream( Union[models.Prediction, models.PredictionTypedDict] ] = None, parallel_tool_calls: Optional[bool] = None, + prompt_mode: OptionalNullable[models.MistralPromptMode] = UNSET, safe_prompt: Optional[bool] = None, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, @@ -487,6 +494,7 @@ def stream( :param n: Number of completions to return for each request, input tokens are only billed once. :param prediction: :param parallel_tool_calls: + :param prompt_mode: :param safe_prompt: Whether to inject a safety prompt before all conversations. :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method @@ -528,6 +536,7 @@ def stream( prediction, Optional[models.Prediction] ), parallel_tool_calls=parallel_tool_calls, + prompt_mode=prompt_mode, safe_prompt=safe_prompt, ) @@ -644,6 +653,7 @@ async def stream_async( Union[models.Prediction, models.PredictionTypedDict] ] = None, parallel_tool_calls: Optional[bool] = None, + prompt_mode: OptionalNullable[models.MistralPromptMode] = UNSET, safe_prompt: Optional[bool] = None, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, @@ -670,6 +680,7 @@ async def stream_async( :param n: Number of completions to return for each request, input tokens are only billed once. :param prediction: :param parallel_tool_calls: + :param prompt_mode: :param safe_prompt: Whether to inject a safety prompt before all conversations. :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method @@ -711,6 +722,7 @@ async def stream_async( prediction, Optional[models.Prediction] ), parallel_tool_calls=parallel_tool_calls, + prompt_mode=prompt_mode, safe_prompt=safe_prompt, ) diff --git a/src/mistralai/conversations.py b/src/mistralai/conversations.py index 320e3d09..6ef02edd 100644 --- a/src/mistralai/conversations.py +++ b/src/mistralai/conversations.py @@ -35,7 +35,7 @@ class Conversations(BaseSDK): - r"""(beta) Converstations API""" + r"""(beta) Conversations API""" # region sdk-class-body # Custom run code allowing client side execution of code diff --git a/src/mistralai/models/__init__.py b/src/mistralai/models/__init__.py index 9ed85c07..e6493e90 100644 --- a/src/mistralai/models/__init__.py +++ b/src/mistralai/models/__init__.py @@ -591,6 +591,7 @@ MessageOutputEventTypedDict, ) from .metricout import MetricOut, MetricOutTypedDict +from .mistralpromptmode import MistralPromptMode from .modelcapabilities import ModelCapabilities, ModelCapabilitiesTypedDict from .modelconversation import ( ModelConversation, @@ -1154,6 +1155,7 @@ "MessagesTypedDict", "MetricOut", "MetricOutTypedDict", + "MistralPromptMode", "ModelCapabilities", "ModelCapabilitiesTypedDict", "ModelConversation", diff --git a/src/mistralai/models/agentscompletionrequest.py b/src/mistralai/models/agentscompletionrequest.py index cd81393a..e99dcfc2 100644 --- a/src/mistralai/models/agentscompletionrequest.py +++ b/src/mistralai/models/agentscompletionrequest.py @@ -2,6 +2,7 @@ from __future__ import annotations from .assistantmessage import AssistantMessage, AssistantMessageTypedDict +from .mistralpromptmode import MistralPromptMode from .prediction import Prediction, PredictionTypedDict from .responseformat import ResponseFormat, ResponseFormatTypedDict from .systemmessage import SystemMessage, SystemMessageTypedDict @@ -11,8 +12,9 @@ from .toolmessage import ToolMessage, ToolMessageTypedDict from .usermessage import UserMessage, UserMessageTypedDict from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from mistralai.utils import get_discriminator +from mistralai.utils import get_discriminator, validate_open_enum from pydantic import Discriminator, Tag, model_serializer +from pydantic.functional_validators import PlainValidator from typing import List, Optional, Union from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict @@ -86,6 +88,7 @@ class AgentsCompletionRequestTypedDict(TypedDict): r"""Number of completions to return for each request, input tokens are only billed once.""" prediction: NotRequired[PredictionTypedDict] parallel_tool_calls: NotRequired[bool] + prompt_mode: NotRequired[Nullable[MistralPromptMode]] class AgentsCompletionRequest(BaseModel): @@ -126,6 +129,10 @@ class AgentsCompletionRequest(BaseModel): parallel_tool_calls: Optional[bool] = None + prompt_mode: Annotated[ + OptionalNullable[MistralPromptMode], PlainValidator(validate_open_enum(False)) + ] = UNSET + @model_serializer(mode="wrap") def serialize_model(self, handler): optional_fields = [ @@ -141,8 +148,9 @@ def serialize_model(self, handler): "n", "prediction", "parallel_tool_calls", + "prompt_mode", ] - nullable_fields = ["max_tokens", "random_seed", "tools", "n"] + nullable_fields = ["max_tokens", "random_seed", "tools", "n", "prompt_mode"] null_default_fields = [] serialized = handler(self) diff --git a/src/mistralai/models/agentscompletionstreamrequest.py b/src/mistralai/models/agentscompletionstreamrequest.py index ab6a307a..b4b423f5 100644 --- a/src/mistralai/models/agentscompletionstreamrequest.py +++ b/src/mistralai/models/agentscompletionstreamrequest.py @@ -2,6 +2,7 @@ from __future__ import annotations from .assistantmessage import AssistantMessage, AssistantMessageTypedDict +from .mistralpromptmode import MistralPromptMode from .prediction import Prediction, PredictionTypedDict from .responseformat import ResponseFormat, ResponseFormatTypedDict from .systemmessage import SystemMessage, SystemMessageTypedDict @@ -11,8 +12,9 @@ from .toolmessage import ToolMessage, ToolMessageTypedDict from .usermessage import UserMessage, UserMessageTypedDict from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from mistralai.utils import get_discriminator +from mistralai.utils import get_discriminator, validate_open_enum from pydantic import Discriminator, Tag, model_serializer +from pydantic.functional_validators import PlainValidator from typing import List, Optional, Union from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict @@ -85,6 +87,7 @@ class AgentsCompletionStreamRequestTypedDict(TypedDict): r"""Number of completions to return for each request, input tokens are only billed once.""" prediction: NotRequired[PredictionTypedDict] parallel_tool_calls: NotRequired[bool] + prompt_mode: NotRequired[Nullable[MistralPromptMode]] class AgentsCompletionStreamRequest(BaseModel): @@ -124,6 +127,10 @@ class AgentsCompletionStreamRequest(BaseModel): parallel_tool_calls: Optional[bool] = None + prompt_mode: Annotated[ + OptionalNullable[MistralPromptMode], PlainValidator(validate_open_enum(False)) + ] = UNSET + @model_serializer(mode="wrap") def serialize_model(self, handler): optional_fields = [ @@ -139,8 +146,9 @@ def serialize_model(self, handler): "n", "prediction", "parallel_tool_calls", + "prompt_mode", ] - nullable_fields = ["max_tokens", "random_seed", "tools", "n"] + nullable_fields = ["max_tokens", "random_seed", "tools", "n", "prompt_mode"] null_default_fields = [] serialized = handler(self) diff --git a/src/mistralai/models/chatcompletionrequest.py b/src/mistralai/models/chatcompletionrequest.py index a277db8f..004cc011 100644 --- a/src/mistralai/models/chatcompletionrequest.py +++ b/src/mistralai/models/chatcompletionrequest.py @@ -2,6 +2,7 @@ from __future__ import annotations from .assistantmessage import AssistantMessage, AssistantMessageTypedDict +from .mistralpromptmode import MistralPromptMode from .prediction import Prediction, PredictionTypedDict from .responseformat import ResponseFormat, ResponseFormatTypedDict from .systemmessage import SystemMessage, SystemMessageTypedDict @@ -11,8 +12,9 @@ from .toolmessage import ToolMessage, ToolMessageTypedDict from .usermessage import UserMessage, UserMessageTypedDict from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from mistralai.utils import get_discriminator +from mistralai.utils import get_discriminator, validate_open_enum from pydantic import Discriminator, Tag, model_serializer +from pydantic.functional_validators import PlainValidator from typing import List, Optional, Union from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict @@ -86,6 +88,7 @@ class ChatCompletionRequestTypedDict(TypedDict): r"""Number of completions to return for each request, input tokens are only billed once.""" prediction: NotRequired[PredictionTypedDict] parallel_tool_calls: NotRequired[bool] + prompt_mode: NotRequired[Nullable[MistralPromptMode]] safe_prompt: NotRequired[bool] r"""Whether to inject a safety prompt before all conversations.""" @@ -134,6 +137,10 @@ class ChatCompletionRequest(BaseModel): parallel_tool_calls: Optional[bool] = None + prompt_mode: Annotated[ + OptionalNullable[MistralPromptMode], PlainValidator(validate_open_enum(False)) + ] = UNSET + safe_prompt: Optional[bool] = None r"""Whether to inject a safety prompt before all conversations.""" @@ -154,9 +161,17 @@ def serialize_model(self, handler): "n", "prediction", "parallel_tool_calls", + "prompt_mode", "safe_prompt", ] - nullable_fields = ["temperature", "max_tokens", "random_seed", "tools", "n"] + nullable_fields = [ + "temperature", + "max_tokens", + "random_seed", + "tools", + "n", + "prompt_mode", + ] null_default_fields = [] serialized = handler(self) diff --git a/src/mistralai/models/chatcompletionstreamrequest.py b/src/mistralai/models/chatcompletionstreamrequest.py index 9ed7b3f6..78a85bef 100644 --- a/src/mistralai/models/chatcompletionstreamrequest.py +++ b/src/mistralai/models/chatcompletionstreamrequest.py @@ -2,6 +2,7 @@ from __future__ import annotations from .assistantmessage import AssistantMessage, AssistantMessageTypedDict +from .mistralpromptmode import MistralPromptMode from .prediction import Prediction, PredictionTypedDict from .responseformat import ResponseFormat, ResponseFormatTypedDict from .systemmessage import SystemMessage, SystemMessageTypedDict @@ -11,8 +12,9 @@ from .toolmessage import ToolMessage, ToolMessageTypedDict from .usermessage import UserMessage, UserMessageTypedDict from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from mistralai.utils import get_discriminator +from mistralai.utils import get_discriminator, validate_open_enum from pydantic import Discriminator, Tag, model_serializer +from pydantic.functional_validators import PlainValidator from typing import List, Optional, Union from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict @@ -89,6 +91,7 @@ class ChatCompletionStreamRequestTypedDict(TypedDict): r"""Number of completions to return for each request, input tokens are only billed once.""" prediction: NotRequired[PredictionTypedDict] parallel_tool_calls: NotRequired[bool] + prompt_mode: NotRequired[Nullable[MistralPromptMode]] safe_prompt: NotRequired[bool] r"""Whether to inject a safety prompt before all conversations.""" @@ -136,6 +139,10 @@ class ChatCompletionStreamRequest(BaseModel): parallel_tool_calls: Optional[bool] = None + prompt_mode: Annotated[ + OptionalNullable[MistralPromptMode], PlainValidator(validate_open_enum(False)) + ] = UNSET + safe_prompt: Optional[bool] = None r"""Whether to inject a safety prompt before all conversations.""" @@ -156,9 +163,17 @@ def serialize_model(self, handler): "n", "prediction", "parallel_tool_calls", + "prompt_mode", "safe_prompt", ] - nullable_fields = ["temperature", "max_tokens", "random_seed", "tools", "n"] + nullable_fields = [ + "temperature", + "max_tokens", + "random_seed", + "tools", + "n", + "prompt_mode", + ] null_default_fields = [] serialized = handler(self) diff --git a/src/mistralai/models/mistralpromptmode.py b/src/mistralai/models/mistralpromptmode.py new file mode 100644 index 00000000..0ffd6787 --- /dev/null +++ b/src/mistralai/models/mistralpromptmode.py @@ -0,0 +1,8 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.types import UnrecognizedStr +from typing import Literal, Union + + +MistralPromptMode = Union[Literal["reasoning"], UnrecognizedStr] From b706d55b95fe16c2f31d3f02868dc5d526072150 Mon Sep 17 00:00:00 2001 From: alex-ac Date: Mon, 16 Jun 2025 16:58:06 +0200 Subject: [PATCH 140/223] fix: response format in run context badly set --- src/mistralai/extra/run/context.py | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/src/mistralai/extra/run/context.py b/src/mistralai/extra/run/context.py index a79fd59e..08350a84 100644 --- a/src/mistralai/extra/run/context.py +++ b/src/mistralai/extra/run/context.py @@ -30,7 +30,6 @@ FunctionTool, MessageInputEntry, InputEntries, - ResponseFormatTypedDict, ) from logging import getLogger @@ -229,9 +228,8 @@ async def prepare_model_request( elif isinstance(completion_args, CompletionArgs) and self.output_format: completion_args.response_format = self.response_format elif isinstance(completion_args, dict) and self.output_format: - completion_args["response_format"] = typing.cast( - ResponseFormatTypedDict, self.response_format.model_dump() - ) + completion_args = CompletionArgs.model_validate(completion_args) + completion_args.response_format = self.response_format request_tools = [] if isinstance(tools, list): for tool in tools: From f724d6203f73e73b95458ef59e86b47bd10efe91 Mon Sep 17 00:00:00 2001 From: Alexandre Menasria <47357713+amenasria@users.noreply.github.com> Date: Mon, 30 Jun 2025 10:18:48 +0200 Subject: [PATCH 141/223] [Upgrade] Bump speakeasy to `1.568.2` (#241) --- .gitignore | 3 + .speakeasy/gen.lock | 442 ++-- .speakeasy/gen.yaml | 11 +- .speakeasy/workflow.lock | 13 +- .speakeasy/workflow.yaml | 8 +- README.md | 17 +- USAGE.md | 8 +- docs/models/agent.md | 14 +- docs/models/agentconversation.md | 8 +- docs/models/agentcreationrequest.md | 4 +- docs/models/agenthandoffdoneevent.md | 8 +- docs/models/agenthandoffentry.md | 10 +- docs/models/agenthandoffstartedevent.md | 8 +- docs/models/agentscompletionrequest.md | 6 +- docs/models/agentscompletionstreamrequest.md | 6 +- docs/models/basemodelcard.md | 2 +- docs/models/batchjobout.md | 8 +- docs/models/batchjobsout.md | 4 +- docs/models/chatcompletionrequest.md | 2 +- docs/models/chatcompletionstreamrequest.md | 2 +- docs/models/classifierdetailedjobout.md | 6 +- docs/models/classifierftmodelout.md | 8 +- docs/models/classifierjobout.md | 4 +- docs/models/completionchunk.md | 6 +- docs/models/completiondetailedjobout.md | 2 +- docs/models/completionftmodelout.md | 6 +- docs/models/completionjobout.md | 2 +- docs/models/conversationhistory.md | 4 +- docs/models/conversationmessages.md | 4 +- docs/models/conversationresponse.md | 4 +- docs/models/conversationrestartrequest.md | 2 +- .../conversationrestartstreamrequest.md | 2 +- docs/models/documentlibrarytool.md | 4 +- docs/models/eventout.md | 4 +- docs/models/fileschema.md | 4 +- docs/models/fimcompletionrequest.md | 2 +- docs/models/fimcompletionstreamrequest.md | 2 +- docs/models/ftmodelcard.md | 6 +- docs/models/function.md | 4 +- docs/models/functioncallentry.md | 8 +- docs/models/functioncallevent.md | 8 +- docs/models/functionresultentry.md | 6 +- docs/models/functiontool.md | 4 +- docs/models/githubrepositoryin.md | 6 +- docs/models/githubrepositoryout.md | 6 +- docs/models/jobin.md | 2 +- docs/models/jobsout.md | 4 +- docs/models/jsonschema.md | 2 +- docs/models/legacyjobmetadataout.md | 2 +- docs/models/messageinputentry.md | 6 +- docs/models/messageoutputentry.md | 4 +- docs/models/messageoutputevent.md | 6 +- docs/models/modelconversation.md | 10 +- docs/models/ocrrequest.md | 2 +- docs/models/ocrresponse.md | 4 +- docs/models/responsedoneevent.md | 4 +- docs/models/responseerrorevent.md | 6 +- docs/models/responsestartedevent.md | 4 +- docs/models/retrievefileout.md | 4 +- docs/models/tool.md | 4 +- docs/models/toolcall.md | 2 +- docs/models/toolchoice.md | 4 +- docs/models/toolexecutiondoneevent.md | 4 +- docs/models/toolexecutionentry.md | 2 +- docs/models/toolexecutionstartedevent.md | 6 +- docs/models/toolfilechunk.md | 2 +- docs/models/toolreferencechunk.md | 2 +- docs/models/uploadfileout.md | 4 +- docs/models/wandbintegration.md | 4 +- docs/models/wandbintegrationout.md | 2 +- docs/sdks/agents/README.md | 4 +- docs/sdks/chat/README.md | 4 +- docs/sdks/classifiers/README.md | 69 +- docs/sdks/conversations/README.md | 28 +- docs/sdks/files/README.md | 10 +- docs/sdks/fim/README.md | 4 +- docs/sdks/jobs/README.md | 12 +- docs/sdks/mistralagents/README.md | 6 +- docs/sdks/mistraljobs/README.md | 10 +- docs/sdks/ocr/README.md | 8 +- pyproject.toml | 2 +- src/mistralai/_hooks/types.py | 7 + src/mistralai/_version.py | 6 +- src/mistralai/agents.py | 4 + src/mistralai/basesdk.py | 32 +- src/mistralai/chat.py | 4 + src/mistralai/classifiers.py | 8 + src/mistralai/conversations.py | 20 + src/mistralai/embeddings.py | 2 + src/mistralai/files.py | 12 + src/mistralai/fim.py | 4 + src/mistralai/httpclient.py | 22 +- src/mistralai/jobs.py | 10 + src/mistralai/mistral_agents.py | 10 + src/mistralai/mistral_jobs.py | 8 + src/mistralai/models/__init__.py | 2074 +++++++++++------ src/mistralai/models/agent.py | 2 +- src/mistralai/models/agentconversation.py | 2 +- src/mistralai/models/agentcreationrequest.py | 2 +- src/mistralai/models/agenthandoffentry.py | 2 +- .../models/agentscompletionrequest.py | 2 +- .../models/agentscompletionstreamrequest.py | 2 +- src/mistralai/models/agentupdaterequest.py | 2 +- src/mistralai/models/assistantmessage.py | 2 +- src/mistralai/models/basemodelcard.py | 2 +- src/mistralai/models/batchjobin.py | 2 +- src/mistralai/models/batchjobout.py | 2 +- src/mistralai/models/chatcompletionrequest.py | 2 +- .../models/chatcompletionstreamrequest.py | 2 +- .../models/classifierdetailedjobout.py | 2 +- src/mistralai/models/classifierftmodelout.py | 2 +- src/mistralai/models/classifierjobout.py | 2 +- src/mistralai/models/classifiertargetin.py | 2 +- .../models/classifiertrainingparameters.py | 2 +- .../models/classifiertrainingparametersin.py | 2 +- src/mistralai/models/completionargs.py | 2 +- .../models/completiondetailedjobout.py | 2 +- src/mistralai/models/completionftmodelout.py | 2 +- src/mistralai/models/completionjobout.py | 2 +- .../models/completionresponsestreamchoice.py | 2 +- .../models/completiontrainingparameters.py | 2 +- .../models/completiontrainingparametersin.py | 2 +- src/mistralai/models/conversationrequest.py | 2 +- .../models/conversationstreamrequest.py | 2 +- src/mistralai/models/conversationusageinfo.py | 2 +- src/mistralai/models/deltamessage.py | 2 +- src/mistralai/models/documenturlchunk.py | 2 +- src/mistralai/models/embeddingrequest.py | 2 +- src/mistralai/models/eventout.py | 2 +- .../models/files_api_routes_list_filesop.py | 2 +- src/mistralai/models/fileschema.py | 2 +- src/mistralai/models/fimcompletionrequest.py | 2 +- .../models/fimcompletionstreamrequest.py | 2 +- src/mistralai/models/ftmodelcard.py | 2 +- src/mistralai/models/functioncallentry.py | 2 +- src/mistralai/models/functionresultentry.py | 2 +- src/mistralai/models/githubrepositoryin.py | 2 +- src/mistralai/models/githubrepositoryout.py | 2 +- src/mistralai/models/imageurl.py | 2 +- src/mistralai/models/jobin.py | 2 +- src/mistralai/models/jobmetadataout.py | 2 +- .../jobs_api_routes_batch_get_batch_jobsop.py | 2 +- ...utes_fine_tuning_get_fine_tuning_jobsop.py | 2 +- src/mistralai/models/jsonschema.py | 2 +- src/mistralai/models/legacyjobmetadataout.py | 2 +- src/mistralai/models/messageinputentry.py | 2 +- src/mistralai/models/messageoutputentry.py | 2 +- src/mistralai/models/messageoutputevent.py | 2 +- src/mistralai/models/metricout.py | 2 +- src/mistralai/models/modelconversation.py | 2 +- src/mistralai/models/ocrimageobject.py | 2 +- src/mistralai/models/ocrpageobject.py | 2 +- src/mistralai/models/ocrrequest.py | 2 +- src/mistralai/models/ocrresponse.py | 2 +- src/mistralai/models/ocrusageinfo.py | 2 +- src/mistralai/models/responseformat.py | 2 +- src/mistralai/models/retrievefileout.py | 2 +- src/mistralai/models/toolexecutionentry.py | 2 +- src/mistralai/models/toolfilechunk.py | 2 +- src/mistralai/models/toolmessage.py | 2 +- src/mistralai/models/toolreferencechunk.py | 2 +- src/mistralai/models/updateftmodelin.py | 2 +- src/mistralai/models/uploadfileout.py | 2 +- src/mistralai/models/usermessage.py | 2 +- src/mistralai/models/wandbintegration.py | 2 +- src/mistralai/models/wandbintegrationout.py | 2 +- src/mistralai/models_.py | 12 + src/mistralai/ocr.py | 2 + src/mistralai/sdk.py | 108 +- src/mistralai/sdkconfiguration.py | 7 - src/mistralai/types/basemodel.py | 6 +- src/mistralai/utils/__init__.py | 176 +- src/mistralai/utils/datetimes.py | 23 + src/mistralai/utils/enums.py | 94 +- src/mistralai/utils/forms.py | 77 +- src/mistralai/utils/serializers.py | 35 +- 176 files changed, 2385 insertions(+), 1469 deletions(-) create mode 100644 src/mistralai/utils/datetimes.py diff --git a/.gitignore b/.gitignore index ab3be6d0..954adb7c 100644 --- a/.gitignore +++ b/.gitignore @@ -1,3 +1,6 @@ +**/__pycache__/ +**/.speakeasy/temp/ +**/.speakeasy/logs/ .vscode/ .speakeasy/reports README-PYPI.md diff --git a/.speakeasy/gen.lock b/.speakeasy/gen.lock index c568d4f3..d5ae1e64 100644 --- a/.speakeasy/gen.lock +++ b/.speakeasy/gen.lock @@ -3,10 +3,10 @@ id: 2d045ec7-2ebb-4f4d-ad25-40953b132161 management: docChecksum: 9c8bd4d6bf675b159a80173b97c1265c docVersion: 1.0.0 - speakeasyVersion: 1.517.3 - generationVersion: 2.548.6 - releaseVersion: 1.8.2 - configChecksum: 5024c28578f991eabb85310ad8df96b7 + speakeasyVersion: 1.568.2 + generationVersion: 2.634.2 + releaseVersion: 1.9.0 + configChecksum: a67788bf50c3de92f0ef16f385b615b3 repoURL: https://github.com/mistralai/client-python.git installationURL: https://github.com/mistralai/client-python.git published: true @@ -14,7 +14,7 @@ features: python: additionalDependencies: 1.0.0 constsAndDefaults: 1.0.5 - core: 5.12.3 + core: 5.19.3 customCodeRegions: 0.1.1 defaultEnabledRetries: 0.2.0 downloadStreams: 1.0.1 @@ -26,15 +26,15 @@ features: globalSecurity: 3.0.3 globalSecurityCallbacks: 1.0.0 globalSecurityFlattening: 1.0.0 - globalServerURLs: 3.1.0 + globalServerURLs: 3.1.1 methodArguments: 1.0.2 multipartFileContentType: 1.0.0 nameOverrides: 3.0.1 nullables: 1.0.1 - openEnums: 1.0.0 + openEnums: 1.0.1 responseFormat: 1.0.1 retries: 3.0.2 - sdkHooks: 1.0.1 + sdkHooks: 1.1.0 serverEvents: 1.0.7 serverEventsSentinels: 0.1.0 serverIDs: 3.0.0 @@ -623,6 +623,7 @@ generatedFiles: - src/mistralai/types/basemodel.py - src/mistralai/utils/__init__.py - src/mistralai/utils/annotations.py + - src/mistralai/utils/datetimes.py - src/mistralai/utils/enums.py - src/mistralai/utils/eventstreaming.py - src/mistralai/utils/forms.py @@ -645,25 +646,27 @@ examples: "422": application/json: {} retrieve_model_v1_models__model_id__get: - "": + speakeasy-default-retrieve-model-v1-models-model-id-get: parameters: path: model_id: "ft:open-mistral-7b:587a6b29:20240514:7e773925" responses: "200": - application/json: {"id": "", "object": "model", "owned_by": "mistralai", "capabilities": {"completion_chat": true, "completion_fim": false, "function_calling": true, "fine_tuning": false, "vision": false}, "max_context_length": 32768} - "422": {} + application/json: {"id": "", "object": "model", "owned_by": "mistralai", "capabilities": {"completion_chat": true, "completion_fim": false, "function_calling": true, "fine_tuning": false, "vision": false}, "max_context_length": 32768, "type": "fine-tuned", "job": "Product Markets Facilitator", "root": "", "archived": false} + "422": + application/json: {} delete_model_v1_models__model_id__delete: - "": + speakeasy-default-delete-model-v1-models-model-id-delete: parameters: path: model_id: "ft:open-mistral-7b:587a6b29:20240514:7e773925" responses: "200": application/json: {"id": "ft:open-mistral-7b:587a6b29:20240514:7e773925", "object": "model", "deleted": true} - "422": {} + "422": + application/json: {} jobs_api_routes_fine_tuning_update_fine_tuned_model: - "": + speakeasy-default-jobs-api-routes-fine-tuning-update-fine-tuned-model: parameters: path: model_id: "ft:open-mistral-7b:587a6b29:20240514:7e773925" @@ -671,9 +674,9 @@ examples: application/json: {} responses: "200": - application/json: {"id": "", "object": "model", "created": 597129, "owned_by": "", "root": "", "archived": true, "capabilities": {"completion_chat": true, "completion_fim": false, "function_calling": false, "fine_tuning": false, "classification": false}, "max_context_length": 32768, "job": "fa7f0e74-32ce-447c-9b60-cc78135ddeb8", "model_type": "completion"} + application/json: {"id": "", "object": "model", "created": 124166, "owned_by": "", "root": "", "archived": true, "capabilities": {"completion_chat": true, "completion_fim": false, "function_calling": false, "fine_tuning": false, "classification": false}, "max_context_length": 32768, "job": "c4f8ef9a-6612-4f49-88fa-a80eb8116e46", "model_type": "completion"} jobs_api_routes_fine_tuning_archive_fine_tuned_model: - "": + speakeasy-default-jobs-api-routes-fine-tuning-archive-fine-tuned-model: parameters: path: model_id: "ft:open-mistral-7b:587a6b29:20240514:7e773925" @@ -681,20 +684,175 @@ examples: "200": application/json: {"id": "", "object": "model", "archived": true} jobs_api_routes_fine_tuning_unarchive_fine_tuned_model: - "": + speakeasy-default-jobs-api-routes-fine-tuning-unarchive-fine-tuned-model: parameters: path: model_id: "ft:open-mistral-7b:587a6b29:20240514:7e773925" responses: "200": application/json: {"id": "", "object": "model", "archived": false} + agents_api_v1_conversations_start: + speakeasy-default-agents-api-v1-conversations-start: + requestBody: + application/json: {"inputs": "", "stream": false} + responses: + "200": + application/json: {"object": "conversation.response", "conversation_id": "", "outputs": [{"object": "entry", "type": "agent.handoff", "previous_agent_id": "", "previous_agent_name": "", "next_agent_id": "", "next_agent_name": ""}], "usage": {"prompt_tokens": 0, "completion_tokens": 0, "total_tokens": 0}} + "422": + application/json: {} + agents_api_v1_conversations_list: + speakeasy-default-agents-api-v1-conversations-list: + parameters: + query: + page: 0 + page_size: 100 + responses: + "200": + application/json: [{"object": "conversation", "id": "", "created_at": "2025-11-20T22:30:47.754Z", "updated_at": "2025-08-05T08:36:20.296Z", "agent_id": ""}] + "422": + application/json: {} + agents_api_v1_conversations_get: + speakeasy-default-agents-api-v1-conversations-get: + parameters: + path: + conversation_id: "" + responses: + "200": + application/json: {"object": "conversation", "id": "", "created_at": "2023-06-02T14:00:42.201Z", "updated_at": "2024-10-06T17:16:50.325Z", "agent_id": ""} + "422": + application/json: {} + agents_api_v1_conversations_append: + speakeasy-default-agents-api-v1-conversations-append: + parameters: + path: + conversation_id: "" + requestBody: + application/json: {"inputs": [], "stream": false, "store": true, "handoff_execution": "server"} + responses: + "200": + application/json: {"object": "conversation.response", "conversation_id": "", "outputs": [], "usage": {"prompt_tokens": 0, "completion_tokens": 0, "total_tokens": 0}} + "422": + application/json: {} + agents_api_v1_conversations_history: + speakeasy-default-agents-api-v1-conversations-history: + parameters: + path: + conversation_id: "" + responses: + "200": + application/json: {"object": "conversation.history", "conversation_id": "", "entries": [{"object": "entry", "type": "tool.execution", "name": "code_interpreter"}]} + "422": + application/json: {} + agents_api_v1_conversations_messages: + speakeasy-default-agents-api-v1-conversations-messages: + parameters: + path: + conversation_id: "" + responses: + "200": + application/json: {"object": "conversation.messages", "conversation_id": "", "messages": []} + "422": + application/json: {} + agents_api_v1_conversations_restart: + speakeasy-default-agents-api-v1-conversations-restart: + parameters: + path: + conversation_id: "" + requestBody: + application/json: {"inputs": "", "stream": false, "store": true, "handoff_execution": "server", "from_entry_id": ""} + responses: + "200": + application/json: {"object": "conversation.response", "conversation_id": "", "outputs": [], "usage": {"prompt_tokens": 0, "completion_tokens": 0, "total_tokens": 0}} + "422": + application/json: {} + agents_api_v1_conversations_start_stream: + speakeasy-default-agents-api-v1-conversations-start-stream: + requestBody: + application/json: {"inputs": [{"object": "entry", "type": "function.result", "tool_call_id": "", "result": ""}], "stream": true} + responses: + "422": + application/json: {} + agents_api_v1_conversations_append_stream: + speakeasy-default-agents-api-v1-conversations-append-stream: + parameters: + path: + conversation_id: "" + requestBody: + application/json: {"inputs": "", "stream": true, "store": true, "handoff_execution": "server"} + responses: + "422": + application/json: {} + agents_api_v1_conversations_restart_stream: + speakeasy-default-agents-api-v1-conversations-restart-stream: + parameters: + path: + conversation_id: "" + requestBody: + application/json: {"inputs": [{"object": "entry", "type": "message.input", "role": "assistant", "content": ""}], "stream": true, "store": true, "handoff_execution": "server", "from_entry_id": ""} + responses: + "422": + application/json: {} + agents_api_v1_agents_create: + speakeasy-default-agents-api-v1-agents-create: + requestBody: + application/json: {"model": "LeBaron", "name": ""} + responses: + "200": + application/json: {"model": "Ranchero", "name": "", "object": "agent", "id": "", "version": 316961, "created_at": "2025-03-26T19:00:51.430Z", "updated_at": "2023-04-28T15:08:02.110Z"} + "422": + application/json: {} + agents_api_v1_agents_list: + speakeasy-default-agents-api-v1-agents-list: + parameters: + query: + page: 0 + page_size: 20 + responses: + "200": + application/json: [{"model": "Impala", "name": "", "object": "agent", "id": "", "version": 43153, "created_at": "2024-04-26T15:54:09.954Z", "updated_at": "2024-02-11T18:27:55.607Z"}] + "422": + application/json: {} + agents_api_v1_agents_get: + speakeasy-default-agents-api-v1-agents-get: + parameters: + path: + agent_id: "" + responses: + "200": + application/json: {"model": "Silverado", "name": "", "object": "agent", "id": "", "version": 845972, "created_at": "2025-08-21T03:10:48.135Z", "updated_at": "2024-11-11T17:15:57.309Z"} + "422": + application/json: {} + agents_api_v1_agents_update: + speakeasy-default-agents-api-v1-agents-update: + parameters: + path: + agent_id: "" + requestBody: + application/json: {} + responses: + "200": + application/json: {"model": "Model X", "name": "", "object": "agent", "id": "", "version": 799821, "created_at": "2025-10-20T17:35:08.067Z", "updated_at": "2023-11-16T08:47:13.265Z"} + "422": + application/json: {} + agents_api_v1_agents_update_version: + speakeasy-default-agents-api-v1-agents-update-version: + parameters: + path: + agent_id: "" + query: + version: 157995 + responses: + "200": + application/json: {"model": "XTS", "name": "", "object": "agent", "id": "", "version": 310764, "created_at": "2023-05-08T23:29:06.216Z", "updated_at": "2023-05-16T19:20:05.735Z"} + "422": + application/json: {} files_api_routes_upload_file: speakeasy-default-files-api-routes-upload-file: requestBody: - multipart/form-data: {"file": {}} + multipart/form-data: {"file": "x-file: example.file"} responses: "200": - application/json: {"id": "497f6eca-6276-4993-bfeb-53cbbbba6f09", "object": "file", "bytes": 13000, "created_at": 1716963433, "filename": "files_upload.jsonl", "purpose": "fine-tune", "sample_type": "batch_request", "source": "repository"} + application/json: {"id": "497f6eca-6276-4993-bfeb-53cbbbba6f09", "object": "file", "bytes": 13000, "created_at": 1716963433, "filename": "files_upload.jsonl", "purpose": "batch", "sample_type": "batch_result", "source": "upload"} files_api_routes_list_files: speakeasy-default-files-api-routes-list-files: parameters: @@ -703,20 +861,20 @@ examples: page_size: 100 responses: "200": - application/json: {"data": [{"id": "497f6eca-6276-4993-bfeb-53cbbbba6f09", "object": "file", "bytes": 13000, "created_at": 1716963433, "filename": "files_upload.jsonl", "purpose": "batch", "sample_type": "batch_result", "source": "upload"}, {"id": "497f6eca-6276-4993-bfeb-53cbbbba6f09", "object": "file", "bytes": 13000, "created_at": 1716963433, "filename": "files_upload.jsonl", "purpose": "fine-tune", "sample_type": "pretrain", "source": "repository"}, {"id": "497f6eca-6276-4993-bfeb-53cbbbba6f09", "object": "file", "bytes": 13000, "created_at": 1716963433, "filename": "files_upload.jsonl", "purpose": "batch", "sample_type": "pretrain", "source": "mistral"}], "object": "", "total": 86140} + application/json: {"data": [{"id": "497f6eca-6276-4993-bfeb-53cbbbba6f09", "object": "file", "bytes": 13000, "created_at": 1716963433, "filename": "files_upload.jsonl", "purpose": "fine-tune", "sample_type": "batch_error", "source": "upload"}], "object": "", "total": 999335} files_api_routes_retrieve_file: speakeasy-default-files-api-routes-retrieve-file: parameters: path: - file_id: "" + file_id: "f2a27685-ca4e-4dc2-9f2b-88c422c3e0f6" responses: "200": - application/json: {"id": "497f6eca-6276-4993-bfeb-53cbbbba6f09", "object": "file", "bytes": 13000, "created_at": 1716963433, "filename": "files_upload.jsonl", "purpose": "fine-tune", "sample_type": "batch_error", "source": "upload", "deleted": true} + application/json: {"id": "497f6eca-6276-4993-bfeb-53cbbbba6f09", "object": "file", "bytes": 13000, "created_at": 1716963433, "filename": "files_upload.jsonl", "purpose": "batch", "sample_type": "instruct", "source": "repository", "deleted": false} files_api_routes_delete_file: speakeasy-default-files-api-routes-delete-file: parameters: path: - file_id: "" + file_id: "3b6d45eb-e30b-416f-8019-f47e2e93d930" responses: "200": application/json: {"id": "497f6eca-6276-4993-bfeb-53cbbbba6f09", "object": "file", "deleted": false} @@ -724,7 +882,7 @@ examples: speakeasy-default-files-api-routes-download-file: parameters: path: - file_id: "" + file_id: "f8919994-a4a1-46b2-8b5b-06335a4300ce" responses: "200": application/octet-stream: "x-file: example.file" @@ -732,12 +890,12 @@ examples: speakeasy-default-files-api-routes-get-signed-url: parameters: path: - file_id: "" + file_id: "06a020ab-355c-49a6-b19d-304b7c01699f" query: expiry: 24 responses: "200": - application/json: {"url": "https://scornful-daughter.com/"} + application/json: {"url": "https://knotty-birdcage.net/"} jobs_api_routes_fine_tuning_get_fine_tuning_jobs: speakeasy-default-jobs-api-routes-fine-tuning-get-fine-tuning-jobs: parameters: @@ -747,11 +905,11 @@ examples: created_by_me: false responses: "200": - application/json: {"object": "list", "total": 768578} + application/json: {"object": "list", "total": 843585} jobs_api_routes_fine_tuning_create_fine_tuning_job: speakeasy-default-jobs-api-routes-fine-tuning-create-fine-tuning-job: requestBody: - application/json: {"model": "Fiesta", "invalid_sample_skip_percentage": 0, "hyperparameters": {"learning_rate": 0.0001}} + application/json: {"model": "Camaro", "invalid_sample_skip_percentage": 0, "hyperparameters": {"learning_rate": 0.0001}} responses: "200": application/json: {"expected_duration_seconds": 220, "cost": 10, "cost_currency": "EUR", "train_tokens_per_step": 131072, "train_tokens": 1310720, "data_tokens": 305375, "deprecated": true, "details": "", "epochs": 4.2922, "training_steps": 10, "object": "job.metadata"} @@ -759,26 +917,26 @@ examples: speakeasy-default-jobs-api-routes-fine-tuning-get-fine-tuning-job: parameters: path: - job_id: "b888f774-3e7c-4135-a18c-6b985523c4bc" + job_id: "c167a961-ffca-4bcf-93ac-6169468dd389" responses: "200": - application/json: {"id": "888f7743-e7c1-4351-b8c6-b985523c4bcb", "auto_start": true, "model": "2", "status": "CANCELLATION_REQUESTED", "created_at": 444836, "modified_at": 424256, "training_files": [], "object": "job", "job_type": "completion", "hyperparameters": {"learning_rate": 0.0001}, "checkpoints": [{"metrics": {}, "step_number": 550563, "created_at": 1716963433}]} + application/json: {"id": "babac92a-96fa-48c4-931c-f6f97e1bf24c", "auto_start": false, "model": "Spyder", "status": "FAILED", "created_at": 232438, "modified_at": 32259, "training_files": ["7a95c5a0-399d-4665-84c8-deab766d22dc"], "object": "job", "job_type": "classifier", "hyperparameters": {"learning_rate": 0.0001}, "classifier_targets": [{"name": "", "labels": [], "weight": 5651, "loss_function": "single_class"}]} jobs_api_routes_fine_tuning_cancel_fine_tuning_job: speakeasy-default-jobs-api-routes-fine-tuning-cancel-fine-tuning-job: parameters: path: - job_id: "0f713502-9233-41c6-9ebd-c570b7edb496" + job_id: "6188a2f6-7513-4e0f-89cc-3f8088523a49" responses: "200": - application/json: {"id": "f7135029-2331-4c6e-bbdc-570b7edb4966", "auto_start": true, "model": "A4", "status": "CANCELLATION_REQUESTED", "created_at": 703131, "modified_at": 929437, "training_files": ["e3e32613-5744-4d82-8f3f-d6b3c11eb45e"], "object": "job", "job_type": "classifier", "hyperparameters": {"learning_rate": 0.0001}, "checkpoints": [{"metrics": {}, "step_number": 466651, "created_at": 1716963433}], "classifier_targets": [{"name": "", "labels": ["", "", ""], "weight": 687.66, "loss_function": "single_class"}, {"name": "", "labels": ["", "", ""], "weight": 8470.22, "loss_function": "multi_class"}]} + application/json: {"id": "770b9cc0-1ab6-44de-a816-67010644e9fb", "auto_start": false, "model": "Volt", "status": "CANCELLATION_REQUESTED", "created_at": 546404, "modified_at": 180081, "training_files": ["45e621c6-ac30-4133-b6d1-fc0d1fe24c9f"], "object": "job", "job_type": "classifier", "hyperparameters": {"learning_rate": 0.0001}, "classifier_targets": [{"name": "", "labels": [""], "weight": 1298.58, "loss_function": "multi_class"}]} jobs_api_routes_fine_tuning_start_fine_tuning_job: speakeasy-default-jobs-api-routes-fine-tuning-start-fine-tuning-job: parameters: path: - job_id: "0bf0f9e6-c3e5-4d61-aac8-0e36dcac0dfc" + job_id: "56553e4d-0679-471e-b9ac-59a77d671103" responses: "200": - application/json: {"id": "bf0f9e6c-3e5d-461a-ac80-e36dcac0dfc8", "auto_start": true, "model": "Explorer", "status": "RUNNING", "created_at": 961967, "modified_at": 914446, "training_files": ["82c4783e-31ec-471d-bbed-4c90a1b0dd73"], "object": "job", "job_type": "classifier", "hyperparameters": {"learning_rate": 0.0001}, "checkpoints": [{"metrics": {}, "step_number": 590686, "created_at": 1716963433}], "classifier_targets": [{"name": "", "labels": [""], "weight": 5494.15, "loss_function": "single_class"}, {"name": "", "labels": ["", ""], "weight": 7945.15, "loss_function": "single_class"}]} + application/json: {"id": "68ad461a-676e-47fe-a07e-15e38f5082b5", "auto_start": false, "model": "Grand Cherokee", "status": "STARTED", "created_at": 134515, "modified_at": 192651, "training_files": ["39dabc3d-15eb-49ac-a549-69973f33acee"], "object": "job", "job_type": "completion", "hyperparameters": {"learning_rate": 0.0001}} jobs_api_routes_batch_get_batch_jobs: speakeasy-default-jobs-api-routes-batch-get-batch-jobs: parameters: @@ -788,37 +946,37 @@ examples: created_by_me: false responses: "200": - application/json: {"object": "list", "total": 768578} + application/json: {"object": "list", "total": 186589} jobs_api_routes_batch_create_batch_job: speakeasy-default-jobs-api-routes-batch-create-batch-job: requestBody: - application/json: {"input_files": ["a621cf02-1cd9-4cf5-8403-315211a509a3"], "endpoint": "/v1/fim/completions", "model": "2", "timeout_hours": 24} + application/json: {"input_files": ["fe3343a2-3b8d-404b-ba32-a78dede2614a"], "endpoint": "/v1/moderations", "model": "Altima", "timeout_hours": 24} responses: "200": - application/json: {"id": "", "object": "batch", "input_files": ["8e774c2b-ecc3-4769-b177-5e024985613d", "0ee803d5-6a1d-4f94-836b-fd39494798bc"], "endpoint": "", "model": "Impala", "errors": [{"message": "", "count": 1}, {"message": "", "count": 1}, {"message": "", "count": 1}], "status": "RUNNING", "created_at": 770370, "total_requests": 350586, "completed_requests": 95214, "succeeded_requests": 930830, "failed_requests": 617761} + application/json: {"id": "", "object": "batch", "input_files": ["7b2553d8-e17f-4df5-a862-a1678f6b5271", "8c618d9f-7d82-42ba-a284-d57d84f50a58", "c042f996-e842-441d-ae47-4e0850334e41"], "endpoint": "", "model": "Taurus", "errors": [{"message": "", "count": 1}], "status": "SUCCESS", "created_at": 395527, "total_requests": 166919, "completed_requests": 258552, "succeeded_requests": 480980, "failed_requests": 684176} jobs_api_routes_batch_get_batch_job: speakeasy-default-jobs-api-routes-batch-get-batch-job: parameters: path: - job_id: "b888f774-3e7c-4135-a18c-6b985523c4bc" + job_id: "4017dc9f-b629-42f4-9700-8c681b9e7f0f" responses: "200": - application/json: {"id": "", "object": "batch", "input_files": ["50f76228-1da8-44bc-b661-c8a99c6b71b6", "cd62b8f7-112a-4af0-bab4-e43b4cca3716", "620807aa-1f8c-4f05-ad89-d58ee381f6b4"], "endpoint": "", "model": "Golf", "errors": [{"message": "", "count": 1}, {"message": "", "count": 1}], "status": "SUCCESS", "created_at": 790898, "total_requests": 55097, "completed_requests": 578320, "succeeded_requests": 856562, "failed_requests": 328633} + application/json: {"id": "", "object": "batch", "input_files": ["11b83f16-f2f9-4de4-a81f-203fff419c99"], "endpoint": "", "model": "Accord", "errors": [], "status": "TIMEOUT_EXCEEDED", "created_at": 900958, "total_requests": 458292, "completed_requests": 184893, "succeeded_requests": 104800, "failed_requests": 836210} jobs_api_routes_batch_cancel_batch_job: speakeasy-default-jobs-api-routes-batch-cancel-batch-job: parameters: path: - job_id: "0f713502-9233-41c6-9ebd-c570b7edb496" + job_id: "4fb29d1c-535b-4f0a-a1cb-2167f86da569" responses: "200": - application/json: {"id": "", "object": "batch", "input_files": ["50fbe4e3-e326-4135-8744-d82f3fd6b3c1", "eb45e247-ac10-4cdc-8311-2f7cc9241230", "4afaa0f8-4bd4-4945-9116-89d07a64aa72"], "endpoint": "", "model": "Alpine", "errors": [{"message": "", "count": 1}, {"message": "", "count": 1}], "status": "QUEUED", "created_at": 709109, "total_requests": 275794, "completed_requests": 158938, "succeeded_requests": 12381, "failed_requests": 11864} + application/json: {"id": "", "object": "batch", "input_files": ["8fd9d88a-66be-43fd-a816-ba509ca3ca85"], "endpoint": "", "model": "PT Cruiser", "errors": [], "status": "TIMEOUT_EXCEEDED", "created_at": 608251, "total_requests": 12693, "completed_requests": 203340, "succeeded_requests": 189291, "failed_requests": 969057} chat_completion_v1_chat_completions_post: speakeasy-default-chat-completion-v1-chat-completions-post: requestBody: application/json: {"model": "mistral-small-latest", "stream": false, "messages": [{"content": "Who is the best French painter? Answer in one short sentence.", "role": "user"}]} responses: "200": - application/json: {"id": "cmpl-e5cc70bb28c444948073e77776eb30ef", "object": "chat.completion", "model": "mistral-small-latest", "usage": {"prompt_tokens": 16, "completion_tokens": 34, "total_tokens": 50}, "created": 1702256327, "choices": [{"index": 0, "message": {"prefix": false, "role": "assistant"}, "finish_reason": "stop"}, {"index": 0, "message": {"prefix": false, "role": "assistant"}, "finish_reason": "stop"}, {"index": 0, "message": {"prefix": false, "role": "assistant"}, "finish_reason": "stop"}]} + application/json: {"id": "cmpl-e5cc70bb28c444948073e77776eb30ef", "object": "chat.completion", "model": "mistral-small-latest", "usage": {"prompt_tokens": 16, "completion_tokens": 34, "total_tokens": 50}, "created": 1702256327, "choices": []} "422": application/json: {} stream_chat: @@ -828,14 +986,13 @@ examples: responses: "422": application/json: {} - "200": {} fim_completion_v1_fim_completions_post: speakeasy-default-fim-completion-v1-fim-completions-post: requestBody: application/json: {"model": "codestral-2405", "top_p": 1, "stream": false, "prompt": "def", "suffix": "return a+b"} responses: "200": - application/json: {"id": "cmpl-e5cc70bb28c444948073e77776eb30ef", "object": "chat.completion", "model": "codestral-latest", "usage": {"prompt_tokens": 16, "completion_tokens": 34, "total_tokens": 50}, "created": 1702256327, "choices": [{"index": 0, "message": {"prefix": false, "role": "assistant"}, "finish_reason": "stop"}, {"index": 0, "message": {"prefix": false, "role": "assistant"}, "finish_reason": "stop"}, {"index": 0, "message": {"prefix": false, "role": "assistant"}, "finish_reason": "stop"}]} + application/json: {"id": "cmpl-e5cc70bb28c444948073e77776eb30ef", "object": "chat.completion", "model": "codestral-latest", "usage": {"prompt_tokens": 16, "completion_tokens": 34, "total_tokens": 50}, "created": 1702256327, "choices": []} "422": application/json: {} stream_fim: @@ -845,14 +1002,13 @@ examples: responses: "422": application/json: {} - "200": {} agents_completion_v1_agents_completions_post: speakeasy-default-agents-completion-v1-agents-completions-post: requestBody: application/json: {"stream": false, "messages": [{"content": "Who is the best French painter? Answer in one short sentence.", "role": "user"}], "agent_id": ""} responses: "200": - application/json: {"id": "cmpl-e5cc70bb28c444948073e77776eb30ef", "object": "chat.completion", "model": "mistral-small-latest", "usage": {"prompt_tokens": 16, "completion_tokens": 34, "total_tokens": 50}, "created": 1702256327, "choices": [{"index": 0, "message": {"prefix": false, "role": "assistant"}, "finish_reason": "stop"}, {"index": 0, "message": {"prefix": false, "role": "assistant"}, "finish_reason": "stop"}, {"index": 0, "message": {"prefix": false, "role": "assistant"}, "finish_reason": "stop"}]} + application/json: {"id": "cmpl-e5cc70bb28c444948073e77776eb30ef", "object": "chat.completion", "model": "mistral-small-latest", "usage": {"prompt_tokens": 16, "completion_tokens": 34, "total_tokens": 50}, "created": 1702256327, "choices": []} "422": application/json: {} stream_agents: @@ -862,7 +1018,6 @@ examples: responses: "422": application/json: {} - "200": {} embeddings_v1_embeddings_post: speakeasy-default-embeddings-v1-embeddings-post: requestBody: @@ -875,214 +1030,47 @@ examples: moderations_v1_moderations_post: speakeasy-default-moderations-v1-moderations-post: requestBody: - application/json: {"model": "V90", "input": [""]} + application/json: {"model": "Durango", "input": ["", ""]} responses: "200": - application/json: {"id": "mod-e5cc70bb28c444948073e77776eb30ef", "model": "V90", "results": [{}]} - "422": - application/json: {} - moderations_chat_v1_chat_moderations_post: - speakeasy-default-moderations-chat-v1-chat-moderations-post: - requestBody: - application/json: {"model": "Roadster", "input": [[{"content": "", "role": "tool"}, {"content": "", "role": "tool"}, {"content": "", "role": "tool"}], [{"prefix": false, "role": "assistant"}, {"content": "", "role": "user"}, {"prefix": false, "role": "assistant"}]], "truncate_for_context_length": false} - responses: - "200": - application/json: {"id": "mod-e5cc70bb28c444948073e77776eb30ef"} - "422": - application/json: {} - ocr_v1_ocr_post: - speakeasy-default-ocr-v1-ocr-post: - requestBody: - application/json: {"model": "Focus", "document": {"document_url": "https://dutiful-horst.org", "type": "document_url"}} - responses: - "200": - application/json: {"pages": [], "model": "A4", "usage_info": {"pages_processed": 442675}} + application/json: {"id": "mod-e5cc70bb28c444948073e77776eb30ef", "model": "Corvette", "results": [{}]} "422": application/json: {} chat_moderations_v1_chat_moderations_post: speakeasy-default-chat-moderations-v1-chat-moderations-post: requestBody: - application/json: {"input": [[{"content": [], "role": "system"}, {"content": "", "role": "tool"}], [{"prefix": false, "role": "assistant"}, {"content": "", "role": "user"}, {"prefix": false, "role": "assistant"}], [{"content": "", "role": "system"}, {"content": [{"image_url": "https://fatherly-colon.name", "type": "image_url"}], "role": "user"}, {"content": "", "role": "user"}]], "model": "Model Y"} + application/json: {"input": [{"content": "", "role": "tool"}], "model": "LeBaron"} responses: "200": - application/json: {"id": "mod-e5cc70bb28c444948073e77776eb30ef", "model": "Model Y", "results": [{}, {}]} + application/json: {"id": "mod-e5cc70bb28c444948073e77776eb30ef", "model": "Explorer", "results": [{}]} "422": application/json: {} classifications_v1_classifications_post: speakeasy-default-classifications-v1-classifications-post: requestBody: - application/json: {"model": "Altima", "input": ""} + application/json: {"model": "Silverado", "input": [""]} responses: "200": - application/json: {"id": "mod-e5cc70bb28c444948073e77776eb30ef", "model": "Camaro", "results": [{"key": {"scores": {"key": 6063.42, "key1": 1739.44}}, "key1": {"scores": {}}}, {"key": {"scores": {"key": 2625.67}}, "key1": {"scores": {}}}]} + application/json: {"id": "mod-e5cc70bb28c444948073e77776eb30ef", "model": "ATS", "results": [{}, {"key": {"scores": {"key": 2080.19}}}]} "422": application/json: {} chat_classifications_v1_chat_classifications_post: speakeasy-default-chat-classifications-v1-chat-classifications-post: requestBody: - application/json: {"model": "Fortwo", "input": [{"messages": [{"content": "", "role": "tool"}]}, {"messages": []}]} - responses: - "200": - application/json: {"id": "mod-e5cc70bb28c444948073e77776eb30ef", "model": "CX-9", "results": [{"key": {"scores": {"key": 4386.53, "key1": 2974.85}}, "key1": {"scores": {"key": 7100.52, "key1": 480.47}}}]} - "422": - application/json: {} - agents_api_v1_conversations_start: - speakeasy-default-agents-api-v1-conversations-start: - requestBody: - application/json: {"inputs": "", "stream": false} - responses: - "200": - application/json: {"object": "conversation.response", "conversation_id": "", "outputs": [{"object": "entry", "type": "agent.handoff", "previous_agent_id": "", "previous_agent_name": "", "next_agent_id": "", "next_agent_name": ""}, {"object": "entry", "type": "message.output", "role": "assistant", "content": [{"type": "tool_reference", "tool": "web_search_premium", "title": ""}, {"document_url": "https://unrealistic-fund.org/", "type": "document_url"}]}], "usage": {"prompt_tokens": 0, "completion_tokens": 0, "total_tokens": 0}} - "422": - application/json: {} - agents_api_v1_conversations_list: - speakeasy-default-agents-api-v1-conversations-list: - parameters: - query: - page: 0 - page_size: 100 - responses: - "200": - application/json: [{"object": "conversation", "id": "", "created_at": "2025-01-13T10:26:00.433Z", "updated_at": "2023-07-14T18:23:27.528Z", "agent_id": ""}, {"object": "conversation", "id": "", "created_at": "2023-06-17T12:14:27.999Z", "updated_at": "2024-11-27T13:02:27.296Z", "model": "LeBaron"}, {"object": "conversation", "id": "", "created_at": "2025-02-26T06:14:46.641Z", "updated_at": "2023-04-05T09:49:38.010Z", "model": "A8"}] - "422": - application/json: {} - agents_api_v1_conversations_get: - speakeasy-default-agents-api-v1-conversations-get: - parameters: - path: - conversation_id: "" - responses: - "200": - application/json: {"object": "conversation", "id": "", "created_at": "2024-09-04T11:33:52.011Z", "updated_at": "2024-08-19T11:11:04.610Z", "agent_id": ""} - "422": - application/json: {} - agents_api_v1_conversations_append: - speakeasy-default-agents-api-v1-conversations-append: - parameters: - path: - conversation_id: "" - requestBody: - application/json: {"inputs": "", "stream": false, "store": true, "handoff_execution": "server"} - responses: - "200": - application/json: {"object": "conversation.response", "conversation_id": "", "outputs": [{"object": "entry", "type": "agent.handoff", "previous_agent_id": "", "previous_agent_name": "", "next_agent_id": "", "next_agent_name": ""}, {"object": "entry", "type": "function.call", "tool_call_id": "", "name": "", "arguments": ""}], "usage": {"prompt_tokens": 0, "completion_tokens": 0, "total_tokens": 0}} - "422": - application/json: {} - agents_api_v1_conversations_history: - speakeasy-default-agents-api-v1-conversations-history: - parameters: - path: - conversation_id: "" - responses: - "200": - application/json: {"object": "conversation.history", "conversation_id": "", "entries": [{"object": "entry", "type": "message.output", "role": "assistant", "content": [{"type": "tool_file", "tool": "web_search", "file_id": ""}]}]} - "422": - application/json: {} - agents_api_v1_conversations_messages: - speakeasy-default-agents-api-v1-conversations-messages: - parameters: - path: - conversation_id: "" - responses: - "200": - application/json: {"object": "conversation.messages", "conversation_id": "", "messages": [{"object": "entry", "type": "message.input", "role": "assistant", "content": ""}, {"object": "entry", "type": "message.input", "role": "assistant", "content": [{"document_url": "https://black-and-white-sauerkraut.biz", "type": "document_url"}, {"type": "tool_file", "tool": "code_interpreter", "file_id": ""}, {"image_url": "https://emotional-couch.org", "type": "image_url"}]}, {"object": "entry", "type": "message.input", "role": "assistant", "content": ""}]} - "422": - application/json: {} - agents_api_v1_conversations_restart: - speakeasy-default-agents-api-v1-conversations-restart: - parameters: - path: - conversation_id: "" - requestBody: - application/json: {"inputs": "", "stream": false, "store": true, "handoff_execution": "server", "from_entry_id": ""} - responses: - "200": - application/json: {"object": "conversation.response", "conversation_id": "", "outputs": [{"object": "entry", "type": "tool.execution", "name": "image_generation"}, {"object": "entry", "type": "tool.execution", "name": "web_search_premium"}], "usage": {"prompt_tokens": 0, "completion_tokens": 0, "total_tokens": 0}} - "422": - application/json: {} - agents_api_v1_conversations_start_stream: - speakeasy-default-agents-api-v1-conversations-start-stream: - requestBody: - application/json: {"inputs": "", "stream": true} - responses: - "422": - application/json: {} - "200": {} - agents_api_v1_conversations_append_stream: - speakeasy-default-agents-api-v1-conversations-append-stream: - parameters: - path: - conversation_id: "" - requestBody: - application/json: {"inputs": "", "stream": true, "store": true, "handoff_execution": "server"} - responses: - "422": - application/json: {} - "200": {} - agents_api_v1_conversations_restart_stream: - speakeasy-default-agents-api-v1-conversations-restart-stream: - parameters: - path: - conversation_id: "" - requestBody: - application/json: {"inputs": "", "stream": true, "store": true, "handoff_execution": "server", "from_entry_id": ""} - responses: - "422": - application/json: {} - "200": {} - agents_api_v1_agents_create: - speakeasy-default-agents-api-v1-agents-create: - requestBody: - application/json: {"model": "Fiesta", "name": ""} - responses: - "200": - application/json: {"model": "LeBaron", "name": "", "object": "agent", "id": "", "version": 417458, "created_at": "2023-05-28T06:20:22.766Z", "updated_at": "2023-03-17T15:39:20.911Z"} - "422": - application/json: {} - agents_api_v1_agents_list: - speakeasy-default-agents-api-v1-agents-list: - parameters: - query: - page: 0 - page_size: 20 - responses: - "200": - application/json: [{"model": "Golf", "name": "", "object": "agent", "id": "", "version": 678317, "created_at": "2023-07-14T18:23:27.528Z", "updated_at": "2023-09-09T18:28:08.953Z"}, {"model": "Aventador", "name": "", "object": "agent", "id": "", "version": 635532, "created_at": "2024-12-01T18:25:37.169Z", "updated_at": "2023-01-20T06:21:22.156Z"}, {"model": "Model T", "name": "", "object": "agent", "id": "", "version": 86140, "created_at": "2023-03-17T01:57:00.187Z", "updated_at": "2025-01-24T00:05:25.844Z"}] - "422": - application/json: {} - agents_api_v1_agents_get: - speakeasy-default-agents-api-v1-agents-get: - parameters: - path: - agent_id: "" + application/json: {"model": "Camry", "input": [{"messages": [{"content": "", "role": "system"}]}]} responses: "200": - application/json: {"model": "Model S", "name": "", "object": "agent", "id": "", "version": 558834, "created_at": "2024-08-19T11:11:04.610Z", "updated_at": "2024-07-25T06:33:15.810Z"} + application/json: {"id": "mod-e5cc70bb28c444948073e77776eb30ef", "model": "Altima", "results": [{}, {"key": {"scores": {"key": 1360.53, "key1": 5946.42}}}, {"key": {"scores": {"key": 1360.53, "key1": 5946.42}}}]} "422": application/json: {} - agents_api_v1_agents_update: - speakeasy-default-agents-api-v1-agents-update: - parameters: - path: - agent_id: "" + ocr_v1_ocr_post: + speakeasy-default-ocr-v1-ocr-post: requestBody: - application/json: {} - responses: - "200": - application/json: {"model": "Sentra", "name": "", "object": "agent", "id": "", "version": 597129, "created_at": "2024-01-13T16:52:57.274Z", "updated_at": "2025-12-22T15:27:45.882Z"} - "422": - application/json: {} - agents_api_v1_agents_update_version: - speakeasy-default-agents-api-v1-agents-update-version: - parameters: - path: - agent_id: "" - query: - version: 193920 + application/json: {"model": "CX-9", "document": {"image_url": {"url": "https://measly-scrap.com"}, "type": "image_url"}} responses: "200": - application/json: {"model": "Mercielago", "name": "", "object": "agent", "id": "", "version": 253661, "created_at": "2023-02-14T22:44:06.703Z", "updated_at": "2025-12-15T06:22:04.120Z"} + application/json: {"pages": [{"index": 944919, "markdown": "", "images": [], "dimensions": {"dpi": 984283, "height": 453411, "width": 398292}}], "model": "Wrangler", "usage_info": {"pages_processed": 47064}} "422": application/json: {} -examplesVersion: 1.0.0 +examplesVersion: 1.0.2 generatedTests: {} diff --git a/.speakeasy/gen.yaml b/.speakeasy/gen.yaml index 77710816..2c46f3c0 100644 --- a/.speakeasy/gen.yaml +++ b/.speakeasy/gen.yaml @@ -4,6 +4,7 @@ generation: maintainOpenAPIOrder: true usageSnippets: optionalPropertyRendering: withExample + sdkInitStyle: constructor useClassNamesForArrayFields: true fixes: nameResolutionDec2023: true @@ -11,17 +12,23 @@ generation: parameterOrderingFeb2024: true requestResponseComponentNamesFeb2024: true securityFeb2025: false + sharedErrorComponentsApr2025: false auth: oAuth2ClientCredentialsEnabled: true oAuth2PasswordEnabled: false + tests: + generateTests: true + generateNewTests: false + skipResponseBodyAssertions: false python: - version: 1.8.2 + version: 1.9.0 additionalDependencies: dev: pytest: ^8.2.2 pytest-asyncio: ^0.23.7 authors: - Mistral + baseErrorName: MistralError clientServerStatusCodesAsErrors: true defaultErrorName: SDKError description: Python Client SDK for the Mistral AI API. @@ -44,8 +51,10 @@ python: inputModelSuffix: input maxMethodParams: 15 methodArguments: infer-optional-args + moduleName: "" outputModelSuffix: output packageName: mistralai + pytestFilterWarnings: [] pytestTimeout: 0 responseFormat: flat templateVersion: v2 diff --git a/.speakeasy/workflow.lock b/.speakeasy/workflow.lock index c618ac1d..12dfd152 100644 --- a/.speakeasy/workflow.lock +++ b/.speakeasy/workflow.lock @@ -1,4 +1,4 @@ -speakeasyVersion: 1.517.3 +speakeasyVersion: 1.568.2 sources: mistral-azure-source: sourceNamespace: mistral-openapi-azure @@ -18,7 +18,6 @@ sources: sourceBlobDigest: sha256:74aeb6a2e0d466c206f983ce79581cc72d205cc7866826282c181207ebe841a2 tags: - latest - - speakeasy-sdk-regen-1749573609 targets: mistralai-azure-sdk: source: mistral-azure-source @@ -40,20 +39,20 @@ targets: sourceRevisionDigest: sha256:21244d618cafcc163c3aa4acbc443ca16c63b8614632b65b87fbb2c4066987f3 sourceBlobDigest: sha256:74aeb6a2e0d466c206f983ce79581cc72d205cc7866826282c181207ebe841a2 codeSamplesNamespace: mistral-openapi-code-samples - codeSamplesRevisionDigest: sha256:dc4396ba994048a9f31c008dced1a46a9e54d89973e9608039a7bc37b1052957 + codeSamplesRevisionDigest: sha256:b631243aae349ddebec1b984874a8e1d5b40e67d6229a199a3d5e63ba69d1538 workflow: workflowVersion: 1.0.0 - speakeasyVersion: 1.517.3 + speakeasyVersion: 1.568.2 sources: mistral-azure-source: inputs: - - location: registry.speakeasyapi.dev/mistral-dev/mistral-dev/mistral-openapi-azure:main + - location: registry.speakeasyapi.dev/mistral-dev/mistral-dev/mistral-openapi-azure:sha256:c5931a7e0cc2db844149d71db57dfc2178665f0400bc26c90ee113795ea2872f mistral-google-cloud-source: inputs: - - location: registry.speakeasyapi.dev/mistral-dev/mistral-dev/mistral-openapi-google-cloud:main + - location: registry.speakeasyapi.dev/mistral-dev/mistral-dev/mistral-openapi-google-cloud:sha256:4a5343e63c6a78152e472b00ccc46d7bcb15594496bc94c8040039d3a9d4c5f8 mistral-openapi: inputs: - - location: registry.speakeasyapi.dev/mistral-dev/mistral-dev/mistral-openapi:main + - location: registry.speakeasyapi.dev/mistral-dev/mistral-dev/mistral-openapi:sha256:21244d618cafcc163c3aa4acbc443ca16c63b8614632b65b87fbb2c4066987f3 targets: mistralai-azure-sdk: target: python diff --git a/.speakeasy/workflow.yaml b/.speakeasy/workflow.yaml index d448108d..fe32bb3f 100644 --- a/.speakeasy/workflow.yaml +++ b/.speakeasy/workflow.yaml @@ -1,15 +1,15 @@ workflowVersion: 1.0.0 -speakeasyVersion: 1.517.3 +speakeasyVersion: 1.568.2 sources: mistral-azure-source: inputs: - - location: registry.speakeasyapi.dev/mistral-dev/mistral-dev/mistral-openapi-azure:main + - location: registry.speakeasyapi.dev/mistral-dev/mistral-dev/mistral-openapi-azure:main mistral-google-cloud-source: inputs: - - location: registry.speakeasyapi.dev/mistral-dev/mistral-dev/mistral-openapi-google-cloud:main + - location: registry.speakeasyapi.dev/mistral-dev/mistral-dev/mistral-openapi-google-cloud:main mistral-openapi: inputs: - - location: registry.speakeasyapi.dev/mistral-dev/mistral-dev/mistral-openapi:main + - location: registry.speakeasyapi.dev/mistral-dev/mistral-dev/mistral-openapi:main targets: mistralai-azure-sdk: target: python diff --git a/README.md b/README.md index b8926d7b..ee0b1d08 100644 --- a/README.md +++ b/README.md @@ -142,7 +142,7 @@ with Mistral( "content": "Who is the best French painter? Answer in one short sentence.", "role": "user", }, - ]) + ], stream=False) # Handle response print(res) @@ -168,7 +168,7 @@ async def main(): "content": "Who is the best French painter? Answer in one short sentence.", "role": "user", }, - ]) + ], stream=False) # Handle response print(res) @@ -244,7 +244,7 @@ with Mistral( "content": "Who is the best French painter? Answer in one short sentence.", "role": "user", }, - ], agent_id="") + ], agent_id="", stream=False) # Handle response print(res) @@ -270,7 +270,7 @@ async def main(): "content": "Who is the best French painter? Answer in one short sentence.", "role": "user", }, - ], agent_id="") + ], agent_id="", stream=False) # Handle response print(res) @@ -547,7 +547,14 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.beta.conversations.start_stream(inputs="") + res = mistral.beta.conversations.start_stream(inputs=[ + { + "object": "entry", + "type": "function.result", + "tool_call_id": "", + "result": "", + }, + ], stream=True) with res as event_stream: for event in event_stream: diff --git a/USAGE.md b/USAGE.md index fa3a77de..d2bba404 100644 --- a/USAGE.md +++ b/USAGE.md @@ -18,7 +18,7 @@ with Mistral( "content": "Who is the best French painter? Answer in one short sentence.", "role": "user", }, - ]) + ], stream=False) # Handle response print(res) @@ -44,7 +44,7 @@ async def main(): "content": "Who is the best French painter? Answer in one short sentence.", "role": "user", }, - ]) + ], stream=False) # Handle response print(res) @@ -120,7 +120,7 @@ with Mistral( "content": "Who is the best French painter? Answer in one short sentence.", "role": "user", }, - ], agent_id="") + ], agent_id="", stream=False) # Handle response print(res) @@ -146,7 +146,7 @@ async def main(): "content": "Who is the best French painter? Answer in one short sentence.", "role": "user", }, - ], agent_id="") + ], agent_id="", stream=False) # Handle response print(res) diff --git a/docs/models/agent.md b/docs/models/agent.md index 9a64fb68..686fae75 100644 --- a/docs/models/agent.md +++ b/docs/models/agent.md @@ -5,15 +5,15 @@ | Field | Type | Required | Description | | ----------------------------------------------------------------------- | ----------------------------------------------------------------------- | ----------------------------------------------------------------------- | ----------------------------------------------------------------------- | -| `model` | *str* | :heavy_check_mark: | N/A | -| `name` | *str* | :heavy_check_mark: | N/A | -| `id` | *str* | :heavy_check_mark: | N/A | -| `version` | *int* | :heavy_check_mark: | N/A | -| `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_check_mark: | N/A | -| `updated_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_check_mark: | N/A | | `instructions` | *OptionalNullable[str]* | :heavy_minus_sign: | Instruction prompt the model will follow during the conversation. | | `tools` | List[[models.AgentTools](../models/agenttools.md)] | :heavy_minus_sign: | List of tools which are available to the model during the conversation. | | `completion_args` | [Optional[models.CompletionArgs]](../models/completionargs.md) | :heavy_minus_sign: | White-listed arguments from the completion API | +| `model` | *str* | :heavy_check_mark: | N/A | +| `name` | *str* | :heavy_check_mark: | N/A | | `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | `handoffs` | List[*str*] | :heavy_minus_sign: | N/A | -| `object` | [Optional[models.AgentObject]](../models/agentobject.md) | :heavy_minus_sign: | N/A | \ No newline at end of file +| `object` | [Optional[models.AgentObject]](../models/agentobject.md) | :heavy_minus_sign: | N/A | +| `id` | *str* | :heavy_check_mark: | N/A | +| `version` | *int* | :heavy_check_mark: | N/A | +| `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_check_mark: | N/A | +| `updated_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/agentconversation.md b/docs/models/agentconversation.md index 93dde728..772cc80e 100644 --- a/docs/models/agentconversation.md +++ b/docs/models/agentconversation.md @@ -5,10 +5,10 @@ | Field | Type | Required | Description | | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | +| `name` | *OptionalNullable[str]* | :heavy_minus_sign: | Name given to the conversation. | +| `description` | *OptionalNullable[str]* | :heavy_minus_sign: | Description of the what the conversation is about. | +| `object` | [Optional[models.AgentConversationObject]](../models/agentconversationobject.md) | :heavy_minus_sign: | N/A | | `id` | *str* | :heavy_check_mark: | N/A | | `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_check_mark: | N/A | | `updated_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_check_mark: | N/A | -| `agent_id` | *str* | :heavy_check_mark: | N/A | -| `name` | *OptionalNullable[str]* | :heavy_minus_sign: | Name given to the conversation. | -| `description` | *OptionalNullable[str]* | :heavy_minus_sign: | Description of the what the conversation is about. | -| `object` | [Optional[models.AgentConversationObject]](../models/agentconversationobject.md) | :heavy_minus_sign: | N/A | \ No newline at end of file +| `agent_id` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/agentcreationrequest.md b/docs/models/agentcreationrequest.md index 324ff25c..34060d9a 100644 --- a/docs/models/agentcreationrequest.md +++ b/docs/models/agentcreationrequest.md @@ -5,10 +5,10 @@ | Field | Type | Required | Description | | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -| `model` | *str* | :heavy_check_mark: | N/A | -| `name` | *str* | :heavy_check_mark: | N/A | | `instructions` | *OptionalNullable[str]* | :heavy_minus_sign: | Instruction prompt the model will follow during the conversation. | | `tools` | List[[models.AgentCreationRequestTools](../models/agentcreationrequesttools.md)] | :heavy_minus_sign: | List of tools which are available to the model during the conversation. | | `completion_args` | [Optional[models.CompletionArgs]](../models/completionargs.md) | :heavy_minus_sign: | White-listed arguments from the completion API | +| `model` | *str* | :heavy_check_mark: | N/A | +| `name` | *str* | :heavy_check_mark: | N/A | | `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | `handoffs` | List[*str*] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/agenthandoffdoneevent.md b/docs/models/agenthandoffdoneevent.md index a8a74ec0..c0039f41 100644 --- a/docs/models/agenthandoffdoneevent.md +++ b/docs/models/agenthandoffdoneevent.md @@ -5,9 +5,9 @@ | Field | Type | Required | Description | | ------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------ | -| `id` | *str* | :heavy_check_mark: | N/A | -| `next_agent_id` | *str* | :heavy_check_mark: | N/A | -| `next_agent_name` | *str* | :heavy_check_mark: | N/A | | `type` | [Optional[models.AgentHandoffDoneEventType]](../models/agenthandoffdoneeventtype.md) | :heavy_minus_sign: | N/A | | `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | -| `output_index` | *Optional[int]* | :heavy_minus_sign: | N/A | \ No newline at end of file +| `output_index` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `id` | *str* | :heavy_check_mark: | N/A | +| `next_agent_id` | *str* | :heavy_check_mark: | N/A | +| `next_agent_name` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/agenthandoffentry.md b/docs/models/agenthandoffentry.md index 327f8048..8831b0eb 100644 --- a/docs/models/agenthandoffentry.md +++ b/docs/models/agenthandoffentry.md @@ -5,12 +5,12 @@ | Field | Type | Required | Description | | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -| `previous_agent_id` | *str* | :heavy_check_mark: | N/A | -| `previous_agent_name` | *str* | :heavy_check_mark: | N/A | -| `next_agent_id` | *str* | :heavy_check_mark: | N/A | -| `next_agent_name` | *str* | :heavy_check_mark: | N/A | | `object` | [Optional[models.AgentHandoffEntryObject]](../models/agenthandoffentryobject.md) | :heavy_minus_sign: | N/A | | `type` | [Optional[models.AgentHandoffEntryType]](../models/agenthandoffentrytype.md) | :heavy_minus_sign: | N/A | | `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | | `completed_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | -| `id` | *Optional[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file +| `id` | *Optional[str]* | :heavy_minus_sign: | N/A | +| `previous_agent_id` | *str* | :heavy_check_mark: | N/A | +| `previous_agent_name` | *str* | :heavy_check_mark: | N/A | +| `next_agent_id` | *str* | :heavy_check_mark: | N/A | +| `next_agent_name` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/agenthandoffstartedevent.md b/docs/models/agenthandoffstartedevent.md index f99ed45d..035cd02a 100644 --- a/docs/models/agenthandoffstartedevent.md +++ b/docs/models/agenthandoffstartedevent.md @@ -5,9 +5,9 @@ | Field | Type | Required | Description | | ------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------ | -| `id` | *str* | :heavy_check_mark: | N/A | -| `previous_agent_id` | *str* | :heavy_check_mark: | N/A | -| `previous_agent_name` | *str* | :heavy_check_mark: | N/A | | `type` | [Optional[models.AgentHandoffStartedEventType]](../models/agenthandoffstartedeventtype.md) | :heavy_minus_sign: | N/A | | `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | -| `output_index` | *Optional[int]* | :heavy_minus_sign: | N/A | \ No newline at end of file +| `output_index` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `id` | *str* | :heavy_check_mark: | N/A | +| `previous_agent_id` | *str* | :heavy_check_mark: | N/A | +| `previous_agent_name` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/agentscompletionrequest.md b/docs/models/agentscompletionrequest.md index 8ace69d9..e4a3f849 100644 --- a/docs/models/agentscompletionrequest.md +++ b/docs/models/agentscompletionrequest.md @@ -5,12 +5,11 @@ | Field | Type | Required | Description | Example | | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `messages` | List[[models.AgentsCompletionRequestMessages](../models/agentscompletionrequestmessages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | -| `agent_id` | *str* | :heavy_check_mark: | The ID of the agent to use for this completion. | | | `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | | `stream` | *Optional[bool]* | :heavy_minus_sign: | Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. | | | `stop` | [Optional[models.AgentsCompletionRequestStop]](../models/agentscompletionrequeststop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | | `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | +| `messages` | List[[models.AgentsCompletionRequestMessages](../models/agentscompletionrequestmessages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | | `response_format` | [Optional[models.ResponseFormat]](../models/responseformat.md) | :heavy_minus_sign: | N/A | | | `tools` | List[[models.Tool](../models/tool.md)] | :heavy_minus_sign: | N/A | | | `tool_choice` | [Optional[models.AgentsCompletionRequestToolChoice]](../models/agentscompletionrequesttoolchoice.md) | :heavy_minus_sign: | N/A | | @@ -19,4 +18,5 @@ | `n` | *OptionalNullable[int]* | :heavy_minus_sign: | Number of completions to return for each request, input tokens are only billed once. | | | `prediction` | [Optional[models.Prediction]](../models/prediction.md) | :heavy_minus_sign: | N/A | | | `parallel_tool_calls` | *Optional[bool]* | :heavy_minus_sign: | N/A | | -| `prompt_mode` | [OptionalNullable[models.MistralPromptMode]](../models/mistralpromptmode.md) | :heavy_minus_sign: | N/A | | \ No newline at end of file +| `prompt_mode` | [OptionalNullable[models.MistralPromptMode]](../models/mistralpromptmode.md) | :heavy_minus_sign: | N/A | | +| `agent_id` | *str* | :heavy_check_mark: | The ID of the agent to use for this completion. | | \ No newline at end of file diff --git a/docs/models/agentscompletionstreamrequest.md b/docs/models/agentscompletionstreamrequest.md index 0bab012c..bd55190b 100644 --- a/docs/models/agentscompletionstreamrequest.md +++ b/docs/models/agentscompletionstreamrequest.md @@ -5,12 +5,11 @@ | Field | Type | Required | Description | Example | | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `messages` | List[[models.AgentsCompletionStreamRequestMessages](../models/agentscompletionstreamrequestmessages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | -| `agent_id` | *str* | :heavy_check_mark: | The ID of the agent to use for this completion. | | | `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | | `stream` | *Optional[bool]* | :heavy_minus_sign: | N/A | | | `stop` | [Optional[models.AgentsCompletionStreamRequestStop]](../models/agentscompletionstreamrequeststop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | | `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | +| `messages` | List[[models.AgentsCompletionStreamRequestMessages](../models/agentscompletionstreamrequestmessages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | | `response_format` | [Optional[models.ResponseFormat]](../models/responseformat.md) | :heavy_minus_sign: | N/A | | | `tools` | List[[models.Tool](../models/tool.md)] | :heavy_minus_sign: | N/A | | | `tool_choice` | [Optional[models.AgentsCompletionStreamRequestToolChoice]](../models/agentscompletionstreamrequesttoolchoice.md) | :heavy_minus_sign: | N/A | | @@ -19,4 +18,5 @@ | `n` | *OptionalNullable[int]* | :heavy_minus_sign: | Number of completions to return for each request, input tokens are only billed once. | | | `prediction` | [Optional[models.Prediction]](../models/prediction.md) | :heavy_minus_sign: | N/A | | | `parallel_tool_calls` | *Optional[bool]* | :heavy_minus_sign: | N/A | | -| `prompt_mode` | [OptionalNullable[models.MistralPromptMode]](../models/mistralpromptmode.md) | :heavy_minus_sign: | N/A | | \ No newline at end of file +| `prompt_mode` | [OptionalNullable[models.MistralPromptMode]](../models/mistralpromptmode.md) | :heavy_minus_sign: | N/A | | +| `agent_id` | *str* | :heavy_check_mark: | The ID of the agent to use for this completion. | | \ No newline at end of file diff --git a/docs/models/basemodelcard.md b/docs/models/basemodelcard.md index 0bdbb65f..a2a19fcb 100644 --- a/docs/models/basemodelcard.md +++ b/docs/models/basemodelcard.md @@ -6,10 +6,10 @@ | Field | Type | Required | Description | | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | | `id` | *str* | :heavy_check_mark: | N/A | -| `capabilities` | [models.ModelCapabilities](../models/modelcapabilities.md) | :heavy_check_mark: | N/A | | `object` | *Optional[str]* | :heavy_minus_sign: | N/A | | `created` | *Optional[int]* | :heavy_minus_sign: | N/A | | `owned_by` | *Optional[str]* | :heavy_minus_sign: | N/A | +| `capabilities` | [models.ModelCapabilities](../models/modelcapabilities.md) | :heavy_check_mark: | N/A | | `name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | `max_context_length` | *Optional[int]* | :heavy_minus_sign: | N/A | diff --git a/docs/models/batchjobout.md b/docs/models/batchjobout.md index d79d9a27..16374467 100644 --- a/docs/models/batchjobout.md +++ b/docs/models/batchjobout.md @@ -6,9 +6,13 @@ | Field | Type | Required | Description | | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | | `id` | *str* | :heavy_check_mark: | N/A | +| `object` | [Optional[models.BatchJobOutObject]](../models/batchjoboutobject.md) | :heavy_minus_sign: | N/A | | `input_files` | List[*str*] | :heavy_check_mark: | N/A | +| `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | | `endpoint` | *str* | :heavy_check_mark: | N/A | | `model` | *str* | :heavy_check_mark: | N/A | +| `output_file` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `error_file` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | `errors` | List[[models.BatchError](../models/batcherror.md)] | :heavy_check_mark: | N/A | | `status` | [models.BatchJobStatus](../models/batchjobstatus.md) | :heavy_check_mark: | N/A | | `created_at` | *int* | :heavy_check_mark: | N/A | @@ -16,9 +20,5 @@ | `completed_requests` | *int* | :heavy_check_mark: | N/A | | `succeeded_requests` | *int* | :heavy_check_mark: | N/A | | `failed_requests` | *int* | :heavy_check_mark: | N/A | -| `object` | [Optional[models.BatchJobOutObject]](../models/batchjoboutobject.md) | :heavy_minus_sign: | N/A | -| `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | -| `output_file` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `error_file` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | `started_at` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | | `completed_at` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/batchjobsout.md b/docs/models/batchjobsout.md index 3104118c..a76cfdcc 100644 --- a/docs/models/batchjobsout.md +++ b/docs/models/batchjobsout.md @@ -5,6 +5,6 @@ | Field | Type | Required | Description | | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | -| `total` | *int* | :heavy_check_mark: | N/A | | `data` | List[[models.BatchJobOut](../models/batchjobout.md)] | :heavy_minus_sign: | N/A | -| `object` | [Optional[models.BatchJobsOutObject]](../models/batchjobsoutobject.md) | :heavy_minus_sign: | N/A | \ No newline at end of file +| `object` | [Optional[models.BatchJobsOutObject]](../models/batchjobsoutobject.md) | :heavy_minus_sign: | N/A | +| `total` | *int* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/chatcompletionrequest.md b/docs/models/chatcompletionrequest.md index a850b5b8..ecbcad39 100644 --- a/docs/models/chatcompletionrequest.md +++ b/docs/models/chatcompletionrequest.md @@ -6,13 +6,13 @@ | Field | Type | Required | Description | Example | | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | | `model` | *str* | :heavy_check_mark: | ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions. | mistral-small-latest | -| `messages` | List[[models.Messages](../models/messages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | | `temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. | | | `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | | `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | | `stream` | *Optional[bool]* | :heavy_minus_sign: | Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. | | | `stop` | [Optional[models.Stop]](../models/stop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | | `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | +| `messages` | List[[models.Messages](../models/messages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | | `response_format` | [Optional[models.ResponseFormat]](../models/responseformat.md) | :heavy_minus_sign: | N/A | | | `tools` | List[[models.Tool](../models/tool.md)] | :heavy_minus_sign: | N/A | | | `tool_choice` | [Optional[models.ChatCompletionRequestToolChoice]](../models/chatcompletionrequesttoolchoice.md) | :heavy_minus_sign: | N/A | | diff --git a/docs/models/chatcompletionstreamrequest.md b/docs/models/chatcompletionstreamrequest.md index cf286cda..7f73a269 100644 --- a/docs/models/chatcompletionstreamrequest.md +++ b/docs/models/chatcompletionstreamrequest.md @@ -6,13 +6,13 @@ | Field | Type | Required | Description | Example | | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | | `model` | *str* | :heavy_check_mark: | ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions. | mistral-small-latest | -| `messages` | List[[models.ChatCompletionStreamRequestMessages](../models/chatcompletionstreamrequestmessages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | | `temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. | | | `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | | `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | | `stream` | *Optional[bool]* | :heavy_minus_sign: | N/A | | | `stop` | [Optional[models.ChatCompletionStreamRequestStop]](../models/chatcompletionstreamrequeststop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | | `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | +| `messages` | List[[models.ChatCompletionStreamRequestMessages](../models/chatcompletionstreamrequestmessages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | | `response_format` | [Optional[models.ResponseFormat]](../models/responseformat.md) | :heavy_minus_sign: | N/A | | | `tools` | List[[models.Tool](../models/tool.md)] | :heavy_minus_sign: | N/A | | | `tool_choice` | [Optional[models.ChatCompletionStreamRequestToolChoice]](../models/chatcompletionstreamrequesttoolchoice.md) | :heavy_minus_sign: | N/A | | diff --git a/docs/models/classifierdetailedjobout.md b/docs/models/classifierdetailedjobout.md index 99227c01..ccc88f89 100644 --- a/docs/models/classifierdetailedjobout.md +++ b/docs/models/classifierdetailedjobout.md @@ -12,8 +12,6 @@ | `created_at` | *int* | :heavy_check_mark: | N/A | | `modified_at` | *int* | :heavy_check_mark: | N/A | | `training_files` | List[*str*] | :heavy_check_mark: | N/A | -| `hyperparameters` | [models.ClassifierTrainingParameters](../models/classifiertrainingparameters.md) | :heavy_check_mark: | N/A | -| `classifier_targets` | List[[models.ClassifierTargetOut](../models/classifiertargetout.md)] | :heavy_check_mark: | N/A | | `validation_files` | List[*str*] | :heavy_minus_sign: | N/A | | `object` | [Optional[models.ClassifierDetailedJobOutObject]](../models/classifierdetailedjoboutobject.md) | :heavy_minus_sign: | N/A | | `fine_tuned_model` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | @@ -22,5 +20,7 @@ | `trained_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | | `metadata` | [OptionalNullable[models.JobMetadataOut]](../models/jobmetadataout.md) | :heavy_minus_sign: | N/A | | `job_type` | [Optional[models.ClassifierDetailedJobOutJobType]](../models/classifierdetailedjoboutjobtype.md) | :heavy_minus_sign: | N/A | +| `hyperparameters` | [models.ClassifierTrainingParameters](../models/classifiertrainingparameters.md) | :heavy_check_mark: | N/A | | `events` | List[[models.EventOut](../models/eventout.md)] | :heavy_minus_sign: | Event items are created every time the status of a fine-tuning job changes. The timestamped list of all events is accessible here. | -| `checkpoints` | List[[models.CheckpointOut](../models/checkpointout.md)] | :heavy_minus_sign: | N/A | \ No newline at end of file +| `checkpoints` | List[[models.CheckpointOut](../models/checkpointout.md)] | :heavy_minus_sign: | N/A | +| `classifier_targets` | List[[models.ClassifierTargetOut](../models/classifiertargetout.md)] | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/classifierftmodelout.md b/docs/models/classifierftmodelout.md index 406102cf..506af14e 100644 --- a/docs/models/classifierftmodelout.md +++ b/docs/models/classifierftmodelout.md @@ -6,16 +6,16 @@ | Field | Type | Required | Description | | -------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------- | | `id` | *str* | :heavy_check_mark: | N/A | +| `object` | [Optional[models.ClassifierFTModelOutObject]](../models/classifierftmodeloutobject.md) | :heavy_minus_sign: | N/A | | `created` | *int* | :heavy_check_mark: | N/A | | `owned_by` | *str* | :heavy_check_mark: | N/A | | `root` | *str* | :heavy_check_mark: | N/A | | `archived` | *bool* | :heavy_check_mark: | N/A | -| `capabilities` | [models.FTModelCapabilitiesOut](../models/ftmodelcapabilitiesout.md) | :heavy_check_mark: | N/A | -| `job` | *str* | :heavy_check_mark: | N/A | -| `classifier_targets` | List[[models.ClassifierTargetOut](../models/classifiertargetout.md)] | :heavy_check_mark: | N/A | -| `object` | [Optional[models.ClassifierFTModelOutObject]](../models/classifierftmodeloutobject.md) | :heavy_minus_sign: | N/A | | `name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `capabilities` | [models.FTModelCapabilitiesOut](../models/ftmodelcapabilitiesout.md) | :heavy_check_mark: | N/A | | `max_context_length` | *Optional[int]* | :heavy_minus_sign: | N/A | | `aliases` | List[*str*] | :heavy_minus_sign: | N/A | +| `job` | *str* | :heavy_check_mark: | N/A | +| `classifier_targets` | List[[models.ClassifierTargetOut](../models/classifiertargetout.md)] | :heavy_check_mark: | N/A | | `model_type` | [Optional[models.ClassifierFTModelOutModelType]](../models/classifierftmodeloutmodeltype.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/classifierjobout.md b/docs/models/classifierjobout.md index 5fa290c1..aa1d3ca9 100644 --- a/docs/models/classifierjobout.md +++ b/docs/models/classifierjobout.md @@ -12,7 +12,6 @@ | `created_at` | *int* | :heavy_check_mark: | The UNIX timestamp (in seconds) for when the fine-tuning job was created. | | `modified_at` | *int* | :heavy_check_mark: | The UNIX timestamp (in seconds) for when the fine-tuning job was last modified. | | `training_files` | List[*str*] | :heavy_check_mark: | A list containing the IDs of uploaded files that contain training data. | -| `hyperparameters` | [models.ClassifierTrainingParameters](../models/classifiertrainingparameters.md) | :heavy_check_mark: | N/A | | `validation_files` | List[*str*] | :heavy_minus_sign: | A list containing the IDs of uploaded files that contain validation data. | | `object` | [Optional[models.ClassifierJobOutObject]](../models/classifierjoboutobject.md) | :heavy_minus_sign: | The object type of the fine-tuning job. | | `fine_tuned_model` | *OptionalNullable[str]* | :heavy_minus_sign: | The name of the fine-tuned model that is being created. The value will be `null` if the fine-tuning job is still running. | @@ -20,4 +19,5 @@ | `integrations` | List[[models.ClassifierJobOutIntegrations](../models/classifierjoboutintegrations.md)] | :heavy_minus_sign: | A list of integrations enabled for your fine-tuning job. | | `trained_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | Total number of tokens trained. | | `metadata` | [OptionalNullable[models.JobMetadataOut]](../models/jobmetadataout.md) | :heavy_minus_sign: | N/A | -| `job_type` | [Optional[models.ClassifierJobOutJobType]](../models/classifierjoboutjobtype.md) | :heavy_minus_sign: | The type of job (`FT` for fine-tuning). | \ No newline at end of file +| `job_type` | [Optional[models.ClassifierJobOutJobType]](../models/classifierjoboutjobtype.md) | :heavy_minus_sign: | The type of job (`FT` for fine-tuning). | +| `hyperparameters` | [models.ClassifierTrainingParameters](../models/classifiertrainingparameters.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/completionchunk.md b/docs/models/completionchunk.md index b8ae6a09..7f8ab5e6 100644 --- a/docs/models/completionchunk.md +++ b/docs/models/completionchunk.md @@ -6,8 +6,8 @@ | Field | Type | Required | Description | | ------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------ | | `id` | *str* | :heavy_check_mark: | N/A | -| `model` | *str* | :heavy_check_mark: | N/A | -| `choices` | List[[models.CompletionResponseStreamChoice](../models/completionresponsestreamchoice.md)] | :heavy_check_mark: | N/A | | `object` | *Optional[str]* | :heavy_minus_sign: | N/A | | `created` | *Optional[int]* | :heavy_minus_sign: | N/A | -| `usage` | [Optional[models.UsageInfo]](../models/usageinfo.md) | :heavy_minus_sign: | N/A | \ No newline at end of file +| `model` | *str* | :heavy_check_mark: | N/A | +| `usage` | [Optional[models.UsageInfo]](../models/usageinfo.md) | :heavy_minus_sign: | N/A | +| `choices` | List[[models.CompletionResponseStreamChoice](../models/completionresponsestreamchoice.md)] | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/completiondetailedjobout.md b/docs/models/completiondetailedjobout.md index b42dd419..84613080 100644 --- a/docs/models/completiondetailedjobout.md +++ b/docs/models/completiondetailedjobout.md @@ -12,7 +12,6 @@ | `created_at` | *int* | :heavy_check_mark: | N/A | | `modified_at` | *int* | :heavy_check_mark: | N/A | | `training_files` | List[*str*] | :heavy_check_mark: | N/A | -| `hyperparameters` | [models.CompletionTrainingParameters](../models/completiontrainingparameters.md) | :heavy_check_mark: | N/A | | `validation_files` | List[*str*] | :heavy_minus_sign: | N/A | | `object` | [Optional[models.CompletionDetailedJobOutObject]](../models/completiondetailedjoboutobject.md) | :heavy_minus_sign: | N/A | | `fine_tuned_model` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | @@ -21,6 +20,7 @@ | `trained_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | | `metadata` | [OptionalNullable[models.JobMetadataOut]](../models/jobmetadataout.md) | :heavy_minus_sign: | N/A | | `job_type` | [Optional[models.CompletionDetailedJobOutJobType]](../models/completiondetailedjoboutjobtype.md) | :heavy_minus_sign: | N/A | +| `hyperparameters` | [models.CompletionTrainingParameters](../models/completiontrainingparameters.md) | :heavy_check_mark: | N/A | | `repositories` | List[[models.CompletionDetailedJobOutRepositories](../models/completiondetailedjoboutrepositories.md)] | :heavy_minus_sign: | N/A | | `events` | List[[models.EventOut](../models/eventout.md)] | :heavy_minus_sign: | Event items are created every time the status of a fine-tuning job changes. The timestamped list of all events is accessible here. | | `checkpoints` | List[[models.CheckpointOut](../models/checkpointout.md)] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/completionftmodelout.md b/docs/models/completionftmodelout.md index ca1c5289..f1e22b88 100644 --- a/docs/models/completionftmodelout.md +++ b/docs/models/completionftmodelout.md @@ -6,15 +6,15 @@ | Field | Type | Required | Description | | -------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------- | | `id` | *str* | :heavy_check_mark: | N/A | +| `object` | [Optional[models.CompletionFTModelOutObject]](../models/completionftmodeloutobject.md) | :heavy_minus_sign: | N/A | | `created` | *int* | :heavy_check_mark: | N/A | | `owned_by` | *str* | :heavy_check_mark: | N/A | | `root` | *str* | :heavy_check_mark: | N/A | | `archived` | *bool* | :heavy_check_mark: | N/A | -| `capabilities` | [models.FTModelCapabilitiesOut](../models/ftmodelcapabilitiesout.md) | :heavy_check_mark: | N/A | -| `job` | *str* | :heavy_check_mark: | N/A | -| `object` | [Optional[models.CompletionFTModelOutObject]](../models/completionftmodeloutobject.md) | :heavy_minus_sign: | N/A | | `name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `capabilities` | [models.FTModelCapabilitiesOut](../models/ftmodelcapabilitiesout.md) | :heavy_check_mark: | N/A | | `max_context_length` | *Optional[int]* | :heavy_minus_sign: | N/A | | `aliases` | List[*str*] | :heavy_minus_sign: | N/A | +| `job` | *str* | :heavy_check_mark: | N/A | | `model_type` | [Optional[models.ModelType]](../models/modeltype.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/completionjobout.md b/docs/models/completionjobout.md index 7f30f58c..cb471746 100644 --- a/docs/models/completionjobout.md +++ b/docs/models/completionjobout.md @@ -12,7 +12,6 @@ | `created_at` | *int* | :heavy_check_mark: | The UNIX timestamp (in seconds) for when the fine-tuning job was created. | | `modified_at` | *int* | :heavy_check_mark: | The UNIX timestamp (in seconds) for when the fine-tuning job was last modified. | | `training_files` | List[*str*] | :heavy_check_mark: | A list containing the IDs of uploaded files that contain training data. | -| `hyperparameters` | [models.CompletionTrainingParameters](../models/completiontrainingparameters.md) | :heavy_check_mark: | N/A | | `validation_files` | List[*str*] | :heavy_minus_sign: | A list containing the IDs of uploaded files that contain validation data. | | `object` | [Optional[models.CompletionJobOutObject]](../models/completionjoboutobject.md) | :heavy_minus_sign: | The object type of the fine-tuning job. | | `fine_tuned_model` | *OptionalNullable[str]* | :heavy_minus_sign: | The name of the fine-tuned model that is being created. The value will be `null` if the fine-tuning job is still running. | @@ -21,4 +20,5 @@ | `trained_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | Total number of tokens trained. | | `metadata` | [OptionalNullable[models.JobMetadataOut]](../models/jobmetadataout.md) | :heavy_minus_sign: | N/A | | `job_type` | [Optional[models.JobType]](../models/jobtype.md) | :heavy_minus_sign: | The type of job (`FT` for fine-tuning). | +| `hyperparameters` | [models.CompletionTrainingParameters](../models/completiontrainingparameters.md) | :heavy_check_mark: | N/A | | `repositories` | List[[models.Repositories](../models/repositories.md)] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/conversationhistory.md b/docs/models/conversationhistory.md index 8bcef1de..ebb1d513 100644 --- a/docs/models/conversationhistory.md +++ b/docs/models/conversationhistory.md @@ -7,6 +7,6 @@ Retrieve all entries in a conversation. | Field | Type | Required | Description | | ------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------ | +| `object` | [Optional[models.ConversationHistoryObject]](../models/conversationhistoryobject.md) | :heavy_minus_sign: | N/A | | `conversation_id` | *str* | :heavy_check_mark: | N/A | -| `entries` | List[[models.Entries](../models/entries.md)] | :heavy_check_mark: | N/A | -| `object` | [Optional[models.ConversationHistoryObject]](../models/conversationhistoryobject.md) | :heavy_minus_sign: | N/A | \ No newline at end of file +| `entries` | List[[models.Entries](../models/entries.md)] | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/conversationmessages.md b/docs/models/conversationmessages.md index f6a5569f..c3f00979 100644 --- a/docs/models/conversationmessages.md +++ b/docs/models/conversationmessages.md @@ -7,6 +7,6 @@ Similar to the conversation history but only keep the messages | Field | Type | Required | Description | | -------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------- | +| `object` | [Optional[models.ConversationMessagesObject]](../models/conversationmessagesobject.md) | :heavy_minus_sign: | N/A | | `conversation_id` | *str* | :heavy_check_mark: | N/A | -| `messages` | List[[models.MessageEntries](../models/messageentries.md)] | :heavy_check_mark: | N/A | -| `object` | [Optional[models.ConversationMessagesObject]](../models/conversationmessagesobject.md) | :heavy_minus_sign: | N/A | \ No newline at end of file +| `messages` | List[[models.MessageEntries](../models/messageentries.md)] | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/conversationresponse.md b/docs/models/conversationresponse.md index 3309a08b..38cdadd0 100644 --- a/docs/models/conversationresponse.md +++ b/docs/models/conversationresponse.md @@ -7,7 +7,7 @@ The response after appending new entries to the conversation. | Field | Type | Required | Description | | -------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------- | +| `object` | [Optional[models.ConversationResponseObject]](../models/conversationresponseobject.md) | :heavy_minus_sign: | N/A | | `conversation_id` | *str* | :heavy_check_mark: | N/A | | `outputs` | List[[models.Outputs](../models/outputs.md)] | :heavy_check_mark: | N/A | -| `usage` | [models.ConversationUsageInfo](../models/conversationusageinfo.md) | :heavy_check_mark: | N/A | -| `object` | [Optional[models.ConversationResponseObject]](../models/conversationresponseobject.md) | :heavy_minus_sign: | N/A | \ No newline at end of file +| `usage` | [models.ConversationUsageInfo](../models/conversationusageinfo.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/conversationrestartrequest.md b/docs/models/conversationrestartrequest.md index 15a6ead4..16786f6a 100644 --- a/docs/models/conversationrestartrequest.md +++ b/docs/models/conversationrestartrequest.md @@ -8,8 +8,8 @@ Request to restart a new conversation from a given entry in the conversation. | Field | Type | Required | Description | | ---------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------- | | `inputs` | [models.ConversationInputs](../models/conversationinputs.md) | :heavy_check_mark: | N/A | -| `from_entry_id` | *str* | :heavy_check_mark: | N/A | | `stream` | *Optional[bool]* | :heavy_minus_sign: | N/A | | `store` | *Optional[bool]* | :heavy_minus_sign: | Whether to store the results into our servers or not. | | `handoff_execution` | [Optional[models.ConversationRestartRequestHandoffExecution]](../models/conversationrestartrequesthandoffexecution.md) | :heavy_minus_sign: | N/A | +| `from_entry_id` | *str* | :heavy_check_mark: | N/A | | `completion_args` | [Optional[models.CompletionArgs]](../models/completionargs.md) | :heavy_minus_sign: | White-listed arguments from the completion API | \ No newline at end of file diff --git a/docs/models/conversationrestartstreamrequest.md b/docs/models/conversationrestartstreamrequest.md index 30f3767c..23bf9851 100644 --- a/docs/models/conversationrestartstreamrequest.md +++ b/docs/models/conversationrestartstreamrequest.md @@ -8,8 +8,8 @@ Request to restart a new conversation from a given entry in the conversation. | Field | Type | Required | Description | | ---------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------- | | `inputs` | [models.ConversationInputs](../models/conversationinputs.md) | :heavy_check_mark: | N/A | -| `from_entry_id` | *str* | :heavy_check_mark: | N/A | | `stream` | *Optional[bool]* | :heavy_minus_sign: | N/A | | `store` | *Optional[bool]* | :heavy_minus_sign: | Whether to store the results into our servers or not. | | `handoff_execution` | [Optional[models.ConversationRestartStreamRequestHandoffExecution]](../models/conversationrestartstreamrequesthandoffexecution.md) | :heavy_minus_sign: | N/A | +| `from_entry_id` | *str* | :heavy_check_mark: | N/A | | `completion_args` | [Optional[models.CompletionArgs]](../models/completionargs.md) | :heavy_minus_sign: | White-listed arguments from the completion API | \ No newline at end of file diff --git a/docs/models/documentlibrarytool.md b/docs/models/documentlibrarytool.md index bed4e2c5..82315f32 100644 --- a/docs/models/documentlibrarytool.md +++ b/docs/models/documentlibrarytool.md @@ -5,5 +5,5 @@ | Field | Type | Required | Description | | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -| `library_ids` | List[*str*] | :heavy_check_mark: | Ids of the library in which to search. | -| `type` | [Optional[models.DocumentLibraryToolType]](../models/documentlibrarytooltype.md) | :heavy_minus_sign: | N/A | \ No newline at end of file +| `type` | [Optional[models.DocumentLibraryToolType]](../models/documentlibrarytooltype.md) | :heavy_minus_sign: | N/A | +| `library_ids` | List[*str*] | :heavy_check_mark: | Ids of the library in which to search. | \ No newline at end of file diff --git a/docs/models/eventout.md b/docs/models/eventout.md index c6f69ada..d9202353 100644 --- a/docs/models/eventout.md +++ b/docs/models/eventout.md @@ -6,5 +6,5 @@ | Field | Type | Required | Description | | --------------------------------------------- | --------------------------------------------- | --------------------------------------------- | --------------------------------------------- | | `name` | *str* | :heavy_check_mark: | The name of the event. | -| `created_at` | *int* | :heavy_check_mark: | The UNIX timestamp (in seconds) of the event. | -| `data` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | \ No newline at end of file +| `data` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | +| `created_at` | *int* | :heavy_check_mark: | The UNIX timestamp (in seconds) of the event. | \ No newline at end of file diff --git a/docs/models/fileschema.md b/docs/models/fileschema.md index 4fbcd718..9746a995 100644 --- a/docs/models/fileschema.md +++ b/docs/models/fileschema.md @@ -12,5 +12,5 @@ | `filename` | *str* | :heavy_check_mark: | The name of the uploaded file. | files_upload.jsonl | | `purpose` | [models.FilePurpose](../models/filepurpose.md) | :heavy_check_mark: | N/A | | | `sample_type` | [models.SampleType](../models/sampletype.md) | :heavy_check_mark: | N/A | | -| `source` | [models.Source](../models/source.md) | :heavy_check_mark: | N/A | | -| `num_lines` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | | \ No newline at end of file +| `num_lines` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | | +| `source` | [models.Source](../models/source.md) | :heavy_check_mark: | N/A | | \ No newline at end of file diff --git a/docs/models/fimcompletionrequest.md b/docs/models/fimcompletionrequest.md index 7507b90c..7b785cf0 100644 --- a/docs/models/fimcompletionrequest.md +++ b/docs/models/fimcompletionrequest.md @@ -6,12 +6,12 @@ | Field | Type | Required | Description | Example | | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | | `model` | *str* | :heavy_check_mark: | ID of the model to use. Only compatible for now with:
- `codestral-2405`
- `codestral-latest` | codestral-2405 | -| `prompt` | *str* | :heavy_check_mark: | The text/code to complete. | def | | `temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. | | | `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | | `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | | `stream` | *Optional[bool]* | :heavy_minus_sign: | Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. | | | `stop` | [Optional[models.FIMCompletionRequestStop]](../models/fimcompletionrequeststop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | | `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | +| `prompt` | *str* | :heavy_check_mark: | The text/code to complete. | def | | `suffix` | *OptionalNullable[str]* | :heavy_minus_sign: | Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`. | return a+b | | `min_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The minimum number of tokens to generate in the completion. | | \ No newline at end of file diff --git a/docs/models/fimcompletionstreamrequest.md b/docs/models/fimcompletionstreamrequest.md index 6cc439c7..d49a6301 100644 --- a/docs/models/fimcompletionstreamrequest.md +++ b/docs/models/fimcompletionstreamrequest.md @@ -6,12 +6,12 @@ | Field | Type | Required | Description | Example | | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | | `model` | *str* | :heavy_check_mark: | ID of the model to use. Only compatible for now with:
- `codestral-2405`
- `codestral-latest` | codestral-2405 | -| `prompt` | *str* | :heavy_check_mark: | The text/code to complete. | def | | `temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. | | | `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | | `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | | `stream` | *Optional[bool]* | :heavy_minus_sign: | N/A | | | `stop` | [Optional[models.FIMCompletionStreamRequestStop]](../models/fimcompletionstreamrequeststop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | | `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | +| `prompt` | *str* | :heavy_check_mark: | The text/code to complete. | def | | `suffix` | *OptionalNullable[str]* | :heavy_minus_sign: | Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`. | return a+b | | `min_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The minimum number of tokens to generate in the completion. | | \ No newline at end of file diff --git a/docs/models/ftmodelcard.md b/docs/models/ftmodelcard.md index 1efeadb2..aaa5b401 100644 --- a/docs/models/ftmodelcard.md +++ b/docs/models/ftmodelcard.md @@ -8,12 +8,10 @@ Extra fields for fine-tuned models. | Field | Type | Required | Description | | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | | `id` | *str* | :heavy_check_mark: | N/A | -| `capabilities` | [models.ModelCapabilities](../models/modelcapabilities.md) | :heavy_check_mark: | N/A | -| `job` | *str* | :heavy_check_mark: | N/A | -| `root` | *str* | :heavy_check_mark: | N/A | | `object` | *Optional[str]* | :heavy_minus_sign: | N/A | | `created` | *Optional[int]* | :heavy_minus_sign: | N/A | | `owned_by` | *Optional[str]* | :heavy_minus_sign: | N/A | +| `capabilities` | [models.ModelCapabilities](../models/modelcapabilities.md) | :heavy_check_mark: | N/A | | `name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | `max_context_length` | *Optional[int]* | :heavy_minus_sign: | N/A | @@ -21,4 +19,6 @@ Extra fields for fine-tuned models. | `deprecation` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | | `default_model_temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | N/A | | `type` | [Optional[models.FTModelCardType]](../models/ftmodelcardtype.md) | :heavy_minus_sign: | N/A | +| `job` | *str* | :heavy_check_mark: | N/A | +| `root` | *str* | :heavy_check_mark: | N/A | | `archived` | *Optional[bool]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/function.md b/docs/models/function.md index a166b7bb..b2bdb3fe 100644 --- a/docs/models/function.md +++ b/docs/models/function.md @@ -6,6 +6,6 @@ | Field | Type | Required | Description | | ------------------ | ------------------ | ------------------ | ------------------ | | `name` | *str* | :heavy_check_mark: | N/A | -| `parameters` | Dict[str, *Any*] | :heavy_check_mark: | N/A | | `description` | *Optional[str]* | :heavy_minus_sign: | N/A | -| `strict` | *Optional[bool]* | :heavy_minus_sign: | N/A | \ No newline at end of file +| `strict` | *Optional[bool]* | :heavy_minus_sign: | N/A | +| `parameters` | Dict[str, *Any*] | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/functioncallentry.md b/docs/models/functioncallentry.md index 55665bad..fd3aa5c5 100644 --- a/docs/models/functioncallentry.md +++ b/docs/models/functioncallentry.md @@ -5,11 +5,11 @@ | Field | Type | Required | Description | | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -| `tool_call_id` | *str* | :heavy_check_mark: | N/A | -| `name` | *str* | :heavy_check_mark: | N/A | -| `arguments` | [models.FunctionCallEntryArguments](../models/functioncallentryarguments.md) | :heavy_check_mark: | N/A | | `object` | [Optional[models.FunctionCallEntryObject]](../models/functioncallentryobject.md) | :heavy_minus_sign: | N/A | | `type` | [Optional[models.FunctionCallEntryType]](../models/functioncallentrytype.md) | :heavy_minus_sign: | N/A | | `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | | `completed_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | -| `id` | *Optional[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file +| `id` | *Optional[str]* | :heavy_minus_sign: | N/A | +| `tool_call_id` | *str* | :heavy_check_mark: | N/A | +| `name` | *str* | :heavy_check_mark: | N/A | +| `arguments` | [models.FunctionCallEntryArguments](../models/functioncallentryarguments.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/functioncallevent.md b/docs/models/functioncallevent.md index a5162090..c25679a5 100644 --- a/docs/models/functioncallevent.md +++ b/docs/models/functioncallevent.md @@ -5,10 +5,10 @@ | Field | Type | Required | Description | | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | +| `type` | [Optional[models.FunctionCallEventType]](../models/functioncalleventtype.md) | :heavy_minus_sign: | N/A | +| `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | +| `output_index` | *Optional[int]* | :heavy_minus_sign: | N/A | | `id` | *str* | :heavy_check_mark: | N/A | | `name` | *str* | :heavy_check_mark: | N/A | | `tool_call_id` | *str* | :heavy_check_mark: | N/A | -| `arguments` | *str* | :heavy_check_mark: | N/A | -| `type` | [Optional[models.FunctionCallEventType]](../models/functioncalleventtype.md) | :heavy_minus_sign: | N/A | -| `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | -| `output_index` | *Optional[int]* | :heavy_minus_sign: | N/A | \ No newline at end of file +| `arguments` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/functionresultentry.md b/docs/models/functionresultentry.md index 5cdcf3eb..6df54d3d 100644 --- a/docs/models/functionresultentry.md +++ b/docs/models/functionresultentry.md @@ -5,10 +5,10 @@ | Field | Type | Required | Description | | ------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------ | -| `tool_call_id` | *str* | :heavy_check_mark: | N/A | -| `result` | *str* | :heavy_check_mark: | N/A | | `object` | [Optional[models.FunctionResultEntryObject]](../models/functionresultentryobject.md) | :heavy_minus_sign: | N/A | | `type` | [Optional[models.FunctionResultEntryType]](../models/functionresultentrytype.md) | :heavy_minus_sign: | N/A | | `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | | `completed_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | -| `id` | *Optional[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file +| `id` | *Optional[str]* | :heavy_minus_sign: | N/A | +| `tool_call_id` | *str* | :heavy_check_mark: | N/A | +| `result` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/functiontool.md b/docs/models/functiontool.md index 1332febe..8c424593 100644 --- a/docs/models/functiontool.md +++ b/docs/models/functiontool.md @@ -5,5 +5,5 @@ | Field | Type | Required | Description | | ------------------------------------------------------------------ | ------------------------------------------------------------------ | ------------------------------------------------------------------ | ------------------------------------------------------------------ | -| `function` | [models.Function](../models/function.md) | :heavy_check_mark: | N/A | -| `type` | [Optional[models.FunctionToolType]](../models/functiontooltype.md) | :heavy_minus_sign: | N/A | \ No newline at end of file +| `type` | [Optional[models.FunctionToolType]](../models/functiontooltype.md) | :heavy_minus_sign: | N/A | +| `function` | [models.Function](../models/function.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/githubrepositoryin.md b/docs/models/githubrepositoryin.md index 7ae2fb4f..1584152b 100644 --- a/docs/models/githubrepositoryin.md +++ b/docs/models/githubrepositoryin.md @@ -5,9 +5,9 @@ | Field | Type | Required | Description | | ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | +| `type` | [Optional[models.GithubRepositoryInType]](../models/githubrepositoryintype.md) | :heavy_minus_sign: | N/A | | `name` | *str* | :heavy_check_mark: | N/A | | `owner` | *str* | :heavy_check_mark: | N/A | -| `token` | *str* | :heavy_check_mark: | N/A | -| `type` | [Optional[models.GithubRepositoryInType]](../models/githubrepositoryintype.md) | :heavy_minus_sign: | N/A | | `ref` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `weight` | *Optional[float]* | :heavy_minus_sign: | N/A | \ No newline at end of file +| `weight` | *Optional[float]* | :heavy_minus_sign: | N/A | +| `token` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/githubrepositoryout.md b/docs/models/githubrepositoryout.md index 0f96736f..03f0b266 100644 --- a/docs/models/githubrepositoryout.md +++ b/docs/models/githubrepositoryout.md @@ -5,9 +5,9 @@ | Field | Type | Required | Description | | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | +| `type` | [Optional[models.GithubRepositoryOutType]](../models/githubrepositoryouttype.md) | :heavy_minus_sign: | N/A | | `name` | *str* | :heavy_check_mark: | N/A | | `owner` | *str* | :heavy_check_mark: | N/A | -| `commit_id` | *str* | :heavy_check_mark: | N/A | -| `type` | [Optional[models.GithubRepositoryOutType]](../models/githubrepositoryouttype.md) | :heavy_minus_sign: | N/A | | `ref` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `weight` | *Optional[float]* | :heavy_minus_sign: | N/A | \ No newline at end of file +| `weight` | *Optional[float]* | :heavy_minus_sign: | N/A | +| `commit_id` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/jobin.md b/docs/models/jobin.md index d6cbd27a..b9651770 100644 --- a/docs/models/jobin.md +++ b/docs/models/jobin.md @@ -6,7 +6,6 @@ | Field | Type | Required | Description | | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | | `model` | *str* | :heavy_check_mark: | The name of the model to fine-tune. | -| `hyperparameters` | [models.Hyperparameters](../models/hyperparameters.md) | :heavy_check_mark: | N/A | | `training_files` | List[[models.TrainingFile](../models/trainingfile.md)] | :heavy_minus_sign: | N/A | | `validation_files` | List[*str*] | :heavy_minus_sign: | A list containing the IDs of uploaded files that contain validation data. If you provide these files, the data is used to generate validation metrics periodically during fine-tuning. These metrics can be viewed in `checkpoints` when getting the status of a running fine-tuning job. The same data should not be present in both train and validation files. | | `suffix` | *OptionalNullable[str]* | :heavy_minus_sign: | A string that will be added to your fine-tuning model name. For example, a suffix of "my-great-model" would produce a model name like `ft:open-mistral-7b:my-great-model:xxx...` | @@ -14,5 +13,6 @@ | `auto_start` | *Optional[bool]* | :heavy_minus_sign: | This field will be required in a future release. | | `invalid_sample_skip_percentage` | *Optional[float]* | :heavy_minus_sign: | N/A | | `job_type` | [OptionalNullable[models.FineTuneableModelType]](../models/finetuneablemodeltype.md) | :heavy_minus_sign: | N/A | +| `hyperparameters` | [models.Hyperparameters](../models/hyperparameters.md) | :heavy_check_mark: | N/A | | `repositories` | List[[models.JobInRepositories](../models/jobinrepositories.md)] | :heavy_minus_sign: | N/A | | `classifier_targets` | List[[models.ClassifierTargetIn](../models/classifiertargetin.md)] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/jobsout.md b/docs/models/jobsout.md index d71793ef..977013f7 100644 --- a/docs/models/jobsout.md +++ b/docs/models/jobsout.md @@ -5,6 +5,6 @@ | Field | Type | Required | Description | | ------------------------------------------------------------ | ------------------------------------------------------------ | ------------------------------------------------------------ | ------------------------------------------------------------ | -| `total` | *int* | :heavy_check_mark: | N/A | | `data` | List[[models.JobsOutData](../models/jobsoutdata.md)] | :heavy_minus_sign: | N/A | -| `object` | [Optional[models.JobsOutObject]](../models/jobsoutobject.md) | :heavy_minus_sign: | N/A | \ No newline at end of file +| `object` | [Optional[models.JobsOutObject]](../models/jobsoutobject.md) | :heavy_minus_sign: | N/A | +| `total` | *int* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/jsonschema.md b/docs/models/jsonschema.md index ae387867..7ff7c070 100644 --- a/docs/models/jsonschema.md +++ b/docs/models/jsonschema.md @@ -6,6 +6,6 @@ | Field | Type | Required | Description | | ----------------------- | ----------------------- | ----------------------- | ----------------------- | | `name` | *str* | :heavy_check_mark: | N/A | -| `schema_definition` | Dict[str, *Any*] | :heavy_check_mark: | N/A | | `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `schema_definition` | Dict[str, *Any*] | :heavy_check_mark: | N/A | | `strict` | *Optional[bool]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/legacyjobmetadataout.md b/docs/models/legacyjobmetadataout.md index 44d17e95..53a45485 100644 --- a/docs/models/legacyjobmetadataout.md +++ b/docs/models/legacyjobmetadataout.md @@ -5,7 +5,6 @@ | Field | Type | Required | Description | Example | | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `details` | *str* | :heavy_check_mark: | N/A | | | `expected_duration_seconds` | *OptionalNullable[int]* | :heavy_minus_sign: | The approximated time (in seconds) for the fine-tuning process to complete. | 220 | | `cost` | *OptionalNullable[float]* | :heavy_minus_sign: | The cost of the fine-tuning job. | 10 | | `cost_currency` | *OptionalNullable[str]* | :heavy_minus_sign: | The currency used for the fine-tuning job cost. | EUR | @@ -14,6 +13,7 @@ | `data_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The total number of tokens in the training dataset. | 305375 | | `estimated_start_time` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | | | `deprecated` | *Optional[bool]* | :heavy_minus_sign: | N/A | | +| `details` | *str* | :heavy_check_mark: | N/A | | | `epochs` | *OptionalNullable[float]* | :heavy_minus_sign: | The number of complete passes through the entire training dataset. | 4.2922 | | `training_steps` | *OptionalNullable[int]* | :heavy_minus_sign: | The number of training steps to perform. A training step refers to a single update of the model weights during the fine-tuning process. This update is typically calculated using a batch of samples from the training dataset. | 10 | | `object` | [Optional[models.LegacyJobMetadataOutObject]](../models/legacyjobmetadataoutobject.md) | :heavy_minus_sign: | N/A | | \ No newline at end of file diff --git a/docs/models/messageinputentry.md b/docs/models/messageinputentry.md index a1573ed5..f5bb6c25 100644 --- a/docs/models/messageinputentry.md +++ b/docs/models/messageinputentry.md @@ -7,10 +7,10 @@ Representation of an input message inside the conversation. | Field | Type | Required | Description | | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | -| `role` | [models.MessageInputEntryRole](../models/messageinputentryrole.md) | :heavy_check_mark: | N/A | -| `content` | [models.MessageInputEntryContent](../models/messageinputentrycontent.md) | :heavy_check_mark: | N/A | | `object` | [Optional[models.Object]](../models/object.md) | :heavy_minus_sign: | N/A | | `type` | [Optional[models.MessageInputEntryType]](../models/messageinputentrytype.md) | :heavy_minus_sign: | N/A | | `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | | `completed_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | -| `id` | *Optional[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file +| `id` | *Optional[str]* | :heavy_minus_sign: | N/A | +| `role` | [models.MessageInputEntryRole](../models/messageinputentryrole.md) | :heavy_check_mark: | N/A | +| `content` | [models.MessageInputEntryContent](../models/messageinputentrycontent.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/messageoutputentry.md b/docs/models/messageoutputentry.md index 224d043d..5b42e20d 100644 --- a/docs/models/messageoutputentry.md +++ b/docs/models/messageoutputentry.md @@ -5,7 +5,6 @@ | Field | Type | Required | Description | | ---------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------- | -| `content` | [models.MessageOutputEntryContent](../models/messageoutputentrycontent.md) | :heavy_check_mark: | N/A | | `object` | [Optional[models.MessageOutputEntryObject]](../models/messageoutputentryobject.md) | :heavy_minus_sign: | N/A | | `type` | [Optional[models.MessageOutputEntryType]](../models/messageoutputentrytype.md) | :heavy_minus_sign: | N/A | | `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | @@ -13,4 +12,5 @@ | `id` | *Optional[str]* | :heavy_minus_sign: | N/A | | `agent_id` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | `model` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `role` | [Optional[models.MessageOutputEntryRole]](../models/messageoutputentryrole.md) | :heavy_minus_sign: | N/A | \ No newline at end of file +| `role` | [Optional[models.MessageOutputEntryRole]](../models/messageoutputentryrole.md) | :heavy_minus_sign: | N/A | +| `content` | [models.MessageOutputEntryContent](../models/messageoutputentrycontent.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/messageoutputevent.md b/docs/models/messageoutputevent.md index 3fe8ac49..92c1c615 100644 --- a/docs/models/messageoutputevent.md +++ b/docs/models/messageoutputevent.md @@ -5,12 +5,12 @@ | Field | Type | Required | Description | | ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | -| `id` | *str* | :heavy_check_mark: | N/A | -| `content` | [models.MessageOutputEventContent](../models/messageoutputeventcontent.md) | :heavy_check_mark: | N/A | | `type` | [Optional[models.MessageOutputEventType]](../models/messageoutputeventtype.md) | :heavy_minus_sign: | N/A | | `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | | `output_index` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `id` | *str* | :heavy_check_mark: | N/A | | `content_index` | *Optional[int]* | :heavy_minus_sign: | N/A | | `model` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | `agent_id` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `role` | [Optional[models.MessageOutputEventRole]](../models/messageoutputeventrole.md) | :heavy_minus_sign: | N/A | \ No newline at end of file +| `role` | [Optional[models.MessageOutputEventRole]](../models/messageoutputeventrole.md) | :heavy_minus_sign: | N/A | +| `content` | [models.MessageOutputEventContent](../models/messageoutputeventcontent.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/modelconversation.md b/docs/models/modelconversation.md index ffedcc0f..f7f61a79 100644 --- a/docs/models/modelconversation.md +++ b/docs/models/modelconversation.md @@ -5,13 +5,13 @@ | Field | Type | Required | Description | | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -| `id` | *str* | :heavy_check_mark: | N/A | -| `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_check_mark: | N/A | -| `updated_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_check_mark: | N/A | -| `model` | *str* | :heavy_check_mark: | N/A | | `instructions` | *OptionalNullable[str]* | :heavy_minus_sign: | Instruction prompt the model will follow during the conversation. | | `tools` | List[[models.ModelConversationTools](../models/modelconversationtools.md)] | :heavy_minus_sign: | List of tools which are available to the model during the conversation. | | `completion_args` | [Optional[models.CompletionArgs]](../models/completionargs.md) | :heavy_minus_sign: | White-listed arguments from the completion API | | `name` | *OptionalNullable[str]* | :heavy_minus_sign: | Name given to the conversation. | | `description` | *OptionalNullable[str]* | :heavy_minus_sign: | Description of the what the conversation is about. | -| `object` | [Optional[models.ModelConversationObject]](../models/modelconversationobject.md) | :heavy_minus_sign: | N/A | \ No newline at end of file +| `object` | [Optional[models.ModelConversationObject]](../models/modelconversationobject.md) | :heavy_minus_sign: | N/A | +| `id` | *str* | :heavy_check_mark: | N/A | +| `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_check_mark: | N/A | +| `updated_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_check_mark: | N/A | +| `model` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/ocrrequest.md b/docs/models/ocrrequest.md index 0c8954a5..6a9c77ab 100644 --- a/docs/models/ocrrequest.md +++ b/docs/models/ocrrequest.md @@ -6,8 +6,8 @@ | Field | Type | Required | Description | | ---------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------- | | `model` | *Nullable[str]* | :heavy_check_mark: | N/A | -| `document` | [models.Document](../models/document.md) | :heavy_check_mark: | Document to run OCR on | | `id` | *Optional[str]* | :heavy_minus_sign: | N/A | +| `document` | [models.Document](../models/document.md) | :heavy_check_mark: | Document to run OCR on | | `pages` | List[*int*] | :heavy_minus_sign: | Specific pages user wants to process in various formats: single number, range, or list of both. Starts from 0 | | `include_image_base64` | *OptionalNullable[bool]* | :heavy_minus_sign: | Include image URLs in response | | `image_limit` | *OptionalNullable[int]* | :heavy_minus_sign: | Max images to extract | diff --git a/docs/models/ocrresponse.md b/docs/models/ocrresponse.md index 7d6a58ae..0a309317 100644 --- a/docs/models/ocrresponse.md +++ b/docs/models/ocrresponse.md @@ -7,5 +7,5 @@ | ---------------------------------------------------------------- | ---------------------------------------------------------------- | ---------------------------------------------------------------- | ---------------------------------------------------------------- | | `pages` | List[[models.OCRPageObject](../models/ocrpageobject.md)] | :heavy_check_mark: | List of OCR info for pages. | | `model` | *str* | :heavy_check_mark: | The model used to generate the OCR. | -| `usage_info` | [models.OCRUsageInfo](../models/ocrusageinfo.md) | :heavy_check_mark: | N/A | -| `document_annotation` | *OptionalNullable[str]* | :heavy_minus_sign: | Formatted response in the request_format if provided in json str | \ No newline at end of file +| `document_annotation` | *OptionalNullable[str]* | :heavy_minus_sign: | Formatted response in the request_format if provided in json str | +| `usage_info` | [models.OCRUsageInfo](../models/ocrusageinfo.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/responsedoneevent.md b/docs/models/responsedoneevent.md index b33fa52c..ec25bd6d 100644 --- a/docs/models/responsedoneevent.md +++ b/docs/models/responsedoneevent.md @@ -5,6 +5,6 @@ | Field | Type | Required | Description | | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | -| `usage` | [models.ConversationUsageInfo](../models/conversationusageinfo.md) | :heavy_check_mark: | N/A | | `type` | [Optional[models.ResponseDoneEventType]](../models/responsedoneeventtype.md) | :heavy_minus_sign: | N/A | -| `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | \ No newline at end of file +| `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | +| `usage` | [models.ConversationUsageInfo](../models/conversationusageinfo.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/responseerrorevent.md b/docs/models/responseerrorevent.md index e730b7c4..2ea6a2e0 100644 --- a/docs/models/responseerrorevent.md +++ b/docs/models/responseerrorevent.md @@ -5,7 +5,7 @@ | Field | Type | Required | Description | | ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | -| `message` | *str* | :heavy_check_mark: | N/A | -| `code` | *int* | :heavy_check_mark: | N/A | | `type` | [Optional[models.ResponseErrorEventType]](../models/responseerroreventtype.md) | :heavy_minus_sign: | N/A | -| `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | \ No newline at end of file +| `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | +| `message` | *str* | :heavy_check_mark: | N/A | +| `code` | *int* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/responsestartedevent.md b/docs/models/responsestartedevent.md index 7bd02b3e..481bd5bb 100644 --- a/docs/models/responsestartedevent.md +++ b/docs/models/responsestartedevent.md @@ -5,6 +5,6 @@ | Field | Type | Required | Description | | ---------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------- | -| `conversation_id` | *str* | :heavy_check_mark: | N/A | | `type` | [Optional[models.ResponseStartedEventType]](../models/responsestartedeventtype.md) | :heavy_minus_sign: | N/A | -| `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | \ No newline at end of file +| `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | +| `conversation_id` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/retrievefileout.md b/docs/models/retrievefileout.md index 30861f5c..10f738b9 100644 --- a/docs/models/retrievefileout.md +++ b/docs/models/retrievefileout.md @@ -12,6 +12,6 @@ | `filename` | *str* | :heavy_check_mark: | The name of the uploaded file. | files_upload.jsonl | | `purpose` | [models.FilePurpose](../models/filepurpose.md) | :heavy_check_mark: | N/A | | | `sample_type` | [models.SampleType](../models/sampletype.md) | :heavy_check_mark: | N/A | | +| `num_lines` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | | | `source` | [models.Source](../models/source.md) | :heavy_check_mark: | N/A | | -| `deleted` | *bool* | :heavy_check_mark: | N/A | | -| `num_lines` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | | \ No newline at end of file +| `deleted` | *bool* | :heavy_check_mark: | N/A | | \ No newline at end of file diff --git a/docs/models/tool.md b/docs/models/tool.md index 822f86f8..fb661f72 100644 --- a/docs/models/tool.md +++ b/docs/models/tool.md @@ -5,5 +5,5 @@ | Field | Type | Required | Description | | ---------------------------------------------------- | ---------------------------------------------------- | ---------------------------------------------------- | ---------------------------------------------------- | -| `function` | [models.Function](../models/function.md) | :heavy_check_mark: | N/A | -| `type` | [Optional[models.ToolTypes]](../models/tooltypes.md) | :heavy_minus_sign: | N/A | \ No newline at end of file +| `type` | [Optional[models.ToolTypes]](../models/tooltypes.md) | :heavy_minus_sign: | N/A | +| `function` | [models.Function](../models/function.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/toolcall.md b/docs/models/toolcall.md index 574be1ea..3819236b 100644 --- a/docs/models/toolcall.md +++ b/docs/models/toolcall.md @@ -5,7 +5,7 @@ | Field | Type | Required | Description | | ---------------------------------------------------- | ---------------------------------------------------- | ---------------------------------------------------- | ---------------------------------------------------- | -| `function` | [models.FunctionCall](../models/functioncall.md) | :heavy_check_mark: | N/A | | `id` | *Optional[str]* | :heavy_minus_sign: | N/A | | `type` | [Optional[models.ToolTypes]](../models/tooltypes.md) | :heavy_minus_sign: | N/A | +| `function` | [models.FunctionCall](../models/functioncall.md) | :heavy_check_mark: | N/A | | `index` | *Optional[int]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/toolchoice.md b/docs/models/toolchoice.md index 792ebcd6..373046bb 100644 --- a/docs/models/toolchoice.md +++ b/docs/models/toolchoice.md @@ -7,5 +7,5 @@ ToolChoice is either a ToolChoiceEnum or a ToolChoice | Field | Type | Required | Description | | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | -| `function` | [models.FunctionName](../models/functionname.md) | :heavy_check_mark: | this restriction of `Function` is used to select a specific function to call | -| `type` | [Optional[models.ToolTypes]](../models/tooltypes.md) | :heavy_minus_sign: | N/A | \ No newline at end of file +| `type` | [Optional[models.ToolTypes]](../models/tooltypes.md) | :heavy_minus_sign: | N/A | +| `function` | [models.FunctionName](../models/functionname.md) | :heavy_check_mark: | this restriction of `Function` is used to select a specific function to call | \ No newline at end of file diff --git a/docs/models/toolexecutiondoneevent.md b/docs/models/toolexecutiondoneevent.md index d6d28ce2..aa28df59 100644 --- a/docs/models/toolexecutiondoneevent.md +++ b/docs/models/toolexecutiondoneevent.md @@ -5,9 +5,9 @@ | Field | Type | Required | Description | | -------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------- | -| `id` | *str* | :heavy_check_mark: | N/A | -| `name` | [models.BuiltInConnectors](../models/builtinconnectors.md) | :heavy_check_mark: | N/A | | `type` | [Optional[models.ToolExecutionDoneEventType]](../models/toolexecutiondoneeventtype.md) | :heavy_minus_sign: | N/A | | `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | | `output_index` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `id` | *str* | :heavy_check_mark: | N/A | +| `name` | [models.BuiltInConnectors](../models/builtinconnectors.md) | :heavy_check_mark: | N/A | | `info` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/toolexecutionentry.md b/docs/models/toolexecutionentry.md index 8422a8fd..2e58b517 100644 --- a/docs/models/toolexecutionentry.md +++ b/docs/models/toolexecutionentry.md @@ -5,10 +5,10 @@ | Field | Type | Required | Description | | ---------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------- | -| `name` | [models.BuiltInConnectors](../models/builtinconnectors.md) | :heavy_check_mark: | N/A | | `object` | [Optional[models.ToolExecutionEntryObject]](../models/toolexecutionentryobject.md) | :heavy_minus_sign: | N/A | | `type` | [Optional[models.ToolExecutionEntryType]](../models/toolexecutionentrytype.md) | :heavy_minus_sign: | N/A | | `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | | `completed_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | | `id` | *Optional[str]* | :heavy_minus_sign: | N/A | +| `name` | [models.BuiltInConnectors](../models/builtinconnectors.md) | :heavy_check_mark: | N/A | | `info` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/toolexecutionstartedevent.md b/docs/models/toolexecutionstartedevent.md index 4b03f94c..8fc20194 100644 --- a/docs/models/toolexecutionstartedevent.md +++ b/docs/models/toolexecutionstartedevent.md @@ -5,8 +5,8 @@ | Field | Type | Required | Description | | -------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------- | -| `id` | *str* | :heavy_check_mark: | N/A | -| `name` | [models.BuiltInConnectors](../models/builtinconnectors.md) | :heavy_check_mark: | N/A | | `type` | [Optional[models.ToolExecutionStartedEventType]](../models/toolexecutionstartedeventtype.md) | :heavy_minus_sign: | N/A | | `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | -| `output_index` | *Optional[int]* | :heavy_minus_sign: | N/A | \ No newline at end of file +| `output_index` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `id` | *str* | :heavy_check_mark: | N/A | +| `name` | [models.BuiltInConnectors](../models/builtinconnectors.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/toolfilechunk.md b/docs/models/toolfilechunk.md index 236d2f41..f1b54c7c 100644 --- a/docs/models/toolfilechunk.md +++ b/docs/models/toolfilechunk.md @@ -5,8 +5,8 @@ | Field | Type | Required | Description | | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | +| `type` | [Optional[models.ToolFileChunkType]](../models/toolfilechunktype.md) | :heavy_minus_sign: | N/A | | `tool` | [models.BuiltInConnectors](../models/builtinconnectors.md) | :heavy_check_mark: | N/A | | `file_id` | *str* | :heavy_check_mark: | N/A | -| `type` | [Optional[models.ToolFileChunkType]](../models/toolfilechunktype.md) | :heavy_minus_sign: | N/A | | `file_name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | `file_type` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/toolreferencechunk.md b/docs/models/toolreferencechunk.md index fb4b46a6..d8bea0da 100644 --- a/docs/models/toolreferencechunk.md +++ b/docs/models/toolreferencechunk.md @@ -5,8 +5,8 @@ | Field | Type | Required | Description | | ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | +| `type` | [Optional[models.ToolReferenceChunkType]](../models/toolreferencechunktype.md) | :heavy_minus_sign: | N/A | | `tool` | [models.BuiltInConnectors](../models/builtinconnectors.md) | :heavy_check_mark: | N/A | | `title` | *str* | :heavy_check_mark: | N/A | -| `type` | [Optional[models.ToolReferenceChunkType]](../models/toolreferencechunktype.md) | :heavy_minus_sign: | N/A | | `url` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | `source` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/uploadfileout.md b/docs/models/uploadfileout.md index cf3c5994..ef2ad212 100644 --- a/docs/models/uploadfileout.md +++ b/docs/models/uploadfileout.md @@ -12,5 +12,5 @@ | `filename` | *str* | :heavy_check_mark: | The name of the uploaded file. | files_upload.jsonl | | `purpose` | [models.FilePurpose](../models/filepurpose.md) | :heavy_check_mark: | N/A | | | `sample_type` | [models.SampleType](../models/sampletype.md) | :heavy_check_mark: | N/A | | -| `source` | [models.Source](../models/source.md) | :heavy_check_mark: | N/A | | -| `num_lines` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | | \ No newline at end of file +| `num_lines` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | | +| `source` | [models.Source](../models/source.md) | :heavy_check_mark: | N/A | | \ No newline at end of file diff --git a/docs/models/wandbintegration.md b/docs/models/wandbintegration.md index c003a46e..199d2edd 100644 --- a/docs/models/wandbintegration.md +++ b/docs/models/wandbintegration.md @@ -5,8 +5,8 @@ | Field | Type | Required | Description | | ------------------------------------------------------------------------------- | ------------------------------------------------------------------------------- | ------------------------------------------------------------------------------- | ------------------------------------------------------------------------------- | -| `project` | *str* | :heavy_check_mark: | The name of the project that the new run will be created under. | -| `api_key` | *str* | :heavy_check_mark: | The WandB API key to use for authentication. | | `type` | [Optional[models.WandbIntegrationType]](../models/wandbintegrationtype.md) | :heavy_minus_sign: | N/A | +| `project` | *str* | :heavy_check_mark: | The name of the project that the new run will be created under. | | `name` | *OptionalNullable[str]* | :heavy_minus_sign: | A display name to set for the run. If not set, will use the job ID as the name. | +| `api_key` | *str* | :heavy_check_mark: | The WandB API key to use for authentication. | | `run_name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/wandbintegrationout.md b/docs/models/wandbintegrationout.md index f924b636..cec02ed8 100644 --- a/docs/models/wandbintegrationout.md +++ b/docs/models/wandbintegrationout.md @@ -5,8 +5,8 @@ | Field | Type | Required | Description | | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -| `project` | *str* | :heavy_check_mark: | The name of the project that the new run will be created under. | | `type` | [Optional[models.WandbIntegrationOutType]](../models/wandbintegrationouttype.md) | :heavy_minus_sign: | N/A | +| `project` | *str* | :heavy_check_mark: | The name of the project that the new run will be created under. | | `name` | *OptionalNullable[str]* | :heavy_minus_sign: | A display name to set for the run. If not set, will use the job ID as the name. | | `run_name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | `url` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/sdks/agents/README.md b/docs/sdks/agents/README.md index c7fdb687..d5014a36 100644 --- a/docs/sdks/agents/README.md +++ b/docs/sdks/agents/README.md @@ -30,7 +30,7 @@ with Mistral( "content": "Who is the best French painter? Answer in one short sentence.", "role": "user", }, - ], agent_id="") + ], agent_id="", stream=False) # Handle response print(res) @@ -89,7 +89,7 @@ with Mistral( "content": "Who is the best French painter? Answer in one short sentence.", "role": "user", }, - ], agent_id="") + ], agent_id="", stream=True) with res as event_stream: for event in event_stream: diff --git a/docs/sdks/chat/README.md b/docs/sdks/chat/README.md index 3a8d57fa..f8aca31f 100644 --- a/docs/sdks/chat/README.md +++ b/docs/sdks/chat/README.md @@ -30,7 +30,7 @@ with Mistral( "content": "Who is the best French painter? Answer in one short sentence.", "role": "user", }, - ]) + ], stream=False) # Handle response print(res) @@ -92,7 +92,7 @@ with Mistral( "content": "Who is the best French painter? Answer in one short sentence.", "role": "user", }, - ]) + ], stream=True) with res as event_stream: for event in event_stream: diff --git a/docs/sdks/classifiers/README.md b/docs/sdks/classifiers/README.md index 19761046..3f9d3a3c 100644 --- a/docs/sdks/classifiers/README.md +++ b/docs/sdks/classifiers/README.md @@ -27,8 +27,9 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.classifiers.moderate(model="V90", inputs=[ - "", + res = mistral.classifiers.moderate(model="Durango", inputs=[ + "", + "", ]) # Handle response @@ -71,52 +72,11 @@ with Mistral( ) as mistral: res = mistral.classifiers.moderate_chat(inputs=[ - [ - { - "content": [ - - ], - "role": "system", - }, - { - "content": "", - "role": "tool", - }, - ], - [ - { - "prefix": False, - "role": "assistant", - }, - { - "content": "", - "role": "user", - }, - { - "prefix": False, - "role": "assistant", - }, - ], - [ - { - "content": "", - "role": "system", - }, - { - "content": [ - { - "image_url": "https://fatherly-colon.name", - "type": "image_url", - }, - ], - "role": "user", - }, - { - "content": "", - "role": "user", - }, - ], - ], model="Model Y") + { + "content": "", + "role": "tool", + }, + ], model="LeBaron") # Handle response print(res) @@ -157,7 +117,9 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.classifiers.classify(model="Altima", inputs="") + res = mistral.classifiers.classify(model="Silverado", inputs=[ + "", + ]) # Handle response print(res) @@ -198,20 +160,15 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.classifiers.classify_chat(model="Fortwo", inputs=[ + res = mistral.classifiers.classify_chat(model="Camry", inputs=[ { "messages": [ { "content": "", - "role": "tool", + "role": "system", }, ], }, - { - "messages": [ - - ], - }, ]) # Handle response diff --git a/docs/sdks/conversations/README.md b/docs/sdks/conversations/README.md index 6d6aaa2c..25b1ab9c 100644 --- a/docs/sdks/conversations/README.md +++ b/docs/sdks/conversations/README.md @@ -33,7 +33,7 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.beta.conversations.start(inputs="") + res = mistral.beta.conversations.start(inputs="", stream=False) # Handle response print(res) @@ -83,7 +83,7 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.beta.conversations.list() + res = mistral.beta.conversations.list(page=0, page_size=100) # Handle response print(res) @@ -164,7 +164,7 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.beta.conversations.append(conversation_id="", inputs="") + res = mistral.beta.conversations.append(conversation_id="", inputs=[], stream=False, store=True, handoff_execution="server") # Handle response print(res) @@ -289,7 +289,7 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.beta.conversations.restart(conversation_id="", inputs="", from_entry_id="") + res = mistral.beta.conversations.restart(conversation_id="", inputs="", from_entry_id="", stream=False, store=True, handoff_execution="server") # Handle response print(res) @@ -335,7 +335,14 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.beta.conversations.start_stream(inputs="") + res = mistral.beta.conversations.start_stream(inputs=[ + { + "object": "entry", + "type": "function.result", + "tool_call_id": "", + "result": "", + }, + ], stream=True) with res as event_stream: for event in event_stream: @@ -387,7 +394,7 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.beta.conversations.append_stream(conversation_id="", inputs="") + res = mistral.beta.conversations.append_stream(conversation_id="", inputs="", stream=True, store=True, handoff_execution="server") with res as event_stream: for event in event_stream: @@ -434,7 +441,14 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.beta.conversations.restart_stream(conversation_id="", inputs="", from_entry_id="") + res = mistral.beta.conversations.restart_stream(conversation_id="", inputs=[ + { + "object": "entry", + "type": "message.input", + "role": "assistant", + "content": "", + }, + ], from_entry_id="", stream=True, store=True, handoff_execution="server") with res as event_stream: for event in event_stream: diff --git a/docs/sdks/files/README.md b/docs/sdks/files/README.md index befa4d67..bc39f2e4 100644 --- a/docs/sdks/files/README.md +++ b/docs/sdks/files/README.md @@ -76,7 +76,7 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.files.list() + res = mistral.files.list(page=0, page_size=100) # Handle response print(res) @@ -120,7 +120,7 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.files.retrieve(file_id="") + res = mistral.files.retrieve(file_id="f2a27685-ca4e-4dc2-9f2b-88c422c3e0f6") # Handle response print(res) @@ -159,7 +159,7 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.files.delete(file_id="") + res = mistral.files.delete(file_id="3b6d45eb-e30b-416f-8019-f47e2e93d930") # Handle response print(res) @@ -198,7 +198,7 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.files.download(file_id="") + res = mistral.files.download(file_id="f8919994-a4a1-46b2-8b5b-06335a4300ce") # Handle response print(res) @@ -237,7 +237,7 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.files.get_signed_url(file_id="") + res = mistral.files.get_signed_url(file_id="06a020ab-355c-49a6-b19d-304b7c01699f", expiry=24) # Handle response print(res) diff --git a/docs/sdks/fim/README.md b/docs/sdks/fim/README.md index c70b3da4..10e6255d 100644 --- a/docs/sdks/fim/README.md +++ b/docs/sdks/fim/README.md @@ -25,7 +25,7 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.fim.complete(model="codestral-2405", prompt="def", suffix="return a+b") + res = mistral.fim.complete(model="codestral-2405", prompt="def", top_p=1, stream=False, suffix="return a+b") # Handle response print(res) @@ -74,7 +74,7 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.fim.stream(model="codestral-2405", prompt="def", suffix="return a+b") + res = mistral.fim.stream(model="codestral-2405", prompt="def", top_p=1, stream=True, suffix="return a+b") with res as event_stream: for event in event_stream: diff --git a/docs/sdks/jobs/README.md b/docs/sdks/jobs/README.md index 75d3b57d..1e240c33 100644 --- a/docs/sdks/jobs/README.md +++ b/docs/sdks/jobs/README.md @@ -26,7 +26,7 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.fine_tuning.jobs.list() + res = mistral.fine_tuning.jobs.list(page=0, page_size=100, created_by_me=False) # Handle response print(res) @@ -74,9 +74,9 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.fine_tuning.jobs.create(model="Fiesta", hyperparameters={ + res = mistral.fine_tuning.jobs.create(model="Camaro", hyperparameters={ "learning_rate": 0.0001, - }) + }, invalid_sample_skip_percentage=0) # Handle response print(res) @@ -125,7 +125,7 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.fine_tuning.jobs.get(job_id="b888f774-3e7c-4135-a18c-6b985523c4bc") + res = mistral.fine_tuning.jobs.get(job_id="c167a961-ffca-4bcf-93ac-6169468dd389") # Handle response print(res) @@ -164,7 +164,7 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.fine_tuning.jobs.cancel(job_id="0f713502-9233-41c6-9ebd-c570b7edb496") + res = mistral.fine_tuning.jobs.cancel(job_id="6188a2f6-7513-4e0f-89cc-3f8088523a49") # Handle response print(res) @@ -203,7 +203,7 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.fine_tuning.jobs.start(job_id="0bf0f9e6-c3e5-4d61-aac8-0e36dcac0dfc") + res = mistral.fine_tuning.jobs.start(job_id="56553e4d-0679-471e-b9ac-59a77d671103") # Handle response print(res) diff --git a/docs/sdks/mistralagents/README.md b/docs/sdks/mistralagents/README.md index 496016c3..58082d21 100644 --- a/docs/sdks/mistralagents/README.md +++ b/docs/sdks/mistralagents/README.md @@ -28,7 +28,7 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.beta.agents.create(model="Fiesta", name="") + res = mistral.beta.agents.create(model="LeBaron", name="") # Handle response print(res) @@ -74,7 +74,7 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.beta.agents.list() + res = mistral.beta.agents.list(page=0, page_size=20) # Handle response print(res) @@ -202,7 +202,7 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.beta.agents.update_version(agent_id="", version=193920) + res = mistral.beta.agents.update_version(agent_id="", version=157995) # Handle response print(res) diff --git a/docs/sdks/mistraljobs/README.md b/docs/sdks/mistraljobs/README.md index e2dac8b4..0c0b5991 100644 --- a/docs/sdks/mistraljobs/README.md +++ b/docs/sdks/mistraljobs/README.md @@ -25,7 +25,7 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.batch.jobs.list() + res = mistral.batch.jobs.list(page=0, page_size=100, created_by_me=False) # Handle response print(res) @@ -71,8 +71,8 @@ with Mistral( ) as mistral: res = mistral.batch.jobs.create(input_files=[ - "a621cf02-1cd9-4cf5-8403-315211a509a3", - ], endpoint="/v1/fim/completions", model="2") + "fe3343a2-3b8d-404b-ba32-a78dede2614a", + ], endpoint="/v1/moderations", model="Altima", timeout_hours=24) # Handle response print(res) @@ -115,7 +115,7 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.batch.jobs.get(job_id="b888f774-3e7c-4135-a18c-6b985523c4bc") + res = mistral.batch.jobs.get(job_id="4017dc9f-b629-42f4-9700-8c681b9e7f0f") # Handle response print(res) @@ -154,7 +154,7 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.batch.jobs.cancel(job_id="0f713502-9233-41c6-9ebd-c570b7edb496") + res = mistral.batch.jobs.cancel(job_id="4fb29d1c-535b-4f0a-a1cb-2167f86da569") # Handle response print(res) diff --git a/docs/sdks/ocr/README.md b/docs/sdks/ocr/README.md index 60d987b4..2188f378 100644 --- a/docs/sdks/ocr/README.md +++ b/docs/sdks/ocr/README.md @@ -24,9 +24,11 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.ocr.process(model="Focus", document={ - "document_url": "https://dutiful-horst.org", - "type": "document_url", + res = mistral.ocr.process(model="CX-9", document={ + "image_url": { + "url": "https://measly-scrap.com", + }, + "type": "image_url", }) # Handle response diff --git a/pyproject.toml b/pyproject.toml index c7cb9095..f8cf20a9 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "mistralai" -version = "1.8.2" +version = "1.9.0" description = "Python Client SDK for the Mistral AI API." authors = [{ name = "Mistral" },] readme = "README-PYPI.md" diff --git a/src/mistralai/_hooks/types.py b/src/mistralai/_hooks/types.py index ebc789ff..6d0f3e11 100644 --- a/src/mistralai/_hooks/types.py +++ b/src/mistralai/_hooks/types.py @@ -3,10 +3,12 @@ from abc import ABC, abstractmethod import httpx from mistralai.httpclient import HttpClient +from mistralai.sdkconfiguration import SDKConfiguration from typing import Any, Callable, List, Optional, Tuple, Union class HookContext: + config: SDKConfiguration base_url: str operation_id: str oauth2_scopes: Optional[List[str]] = None @@ -14,11 +16,13 @@ class HookContext: def __init__( self, + config: SDKConfiguration, base_url: str, operation_id: str, oauth2_scopes: Optional[List[str]], security_source: Optional[Union[Any, Callable[[], Any]]], ): + self.config = config self.base_url = base_url self.operation_id = operation_id self.oauth2_scopes = oauth2_scopes @@ -28,6 +32,7 @@ def __init__( class BeforeRequestContext(HookContext): def __init__(self, hook_ctx: HookContext): super().__init__( + hook_ctx.config, hook_ctx.base_url, hook_ctx.operation_id, hook_ctx.oauth2_scopes, @@ -38,6 +43,7 @@ def __init__(self, hook_ctx: HookContext): class AfterSuccessContext(HookContext): def __init__(self, hook_ctx: HookContext): super().__init__( + hook_ctx.config, hook_ctx.base_url, hook_ctx.operation_id, hook_ctx.oauth2_scopes, @@ -48,6 +54,7 @@ def __init__(self, hook_ctx: HookContext): class AfterErrorContext(HookContext): def __init__(self, hook_ctx: HookContext): super().__init__( + hook_ctx.config, hook_ctx.base_url, hook_ctx.operation_id, hook_ctx.oauth2_scopes, diff --git a/src/mistralai/_version.py b/src/mistralai/_version.py index fc416fd3..db4f08fe 100644 --- a/src/mistralai/_version.py +++ b/src/mistralai/_version.py @@ -3,10 +3,10 @@ import importlib.metadata __title__: str = "mistralai" -__version__: str = "1.8.2" +__version__: str = "1.9.0" __openapi_doc_version__: str = "1.0.0" -__gen_version__: str = "2.548.6" -__user_agent__: str = "speakeasy-sdk/python 1.8.2 2.548.6 1.0.0 mistralai" +__gen_version__: str = "2.634.2" +__user_agent__: str = "speakeasy-sdk/python 1.9.0 2.634.2 1.0.0 mistralai" try: if __package__ is not None: diff --git a/src/mistralai/agents.py b/src/mistralai/agents.py index 4fbb25dd..febc3383 100644 --- a/src/mistralai/agents.py +++ b/src/mistralai/agents.py @@ -140,6 +140,7 @@ def complete( http_res = self.do_request( hook_ctx=HookContext( + config=self.sdk_configuration, base_url=base_url or "", operation_id="agents_completion_v1_agents_completions_post", oauth2_scopes=[], @@ -309,6 +310,7 @@ async def complete_async( http_res = await self.do_request_async( hook_ctx=HookContext( + config=self.sdk_configuration, base_url=base_url or "", operation_id="agents_completion_v1_agents_completions_post", oauth2_scopes=[], @@ -480,6 +482,7 @@ def stream( http_res = self.do_request( hook_ctx=HookContext( + config=self.sdk_configuration, base_url=base_url or "", operation_id="stream_agents", oauth2_scopes=[], @@ -657,6 +660,7 @@ async def stream_async( http_res = await self.do_request_async( hook_ctx=HookContext( + config=self.sdk_configuration, base_url=base_url or "", operation_id="stream_agents", oauth2_scopes=[], diff --git a/src/mistralai/basesdk.py b/src/mistralai/basesdk.py index 512e3072..bb37a1ee 100644 --- a/src/mistralai/basesdk.py +++ b/src/mistralai/basesdk.py @@ -218,12 +218,12 @@ def do_request( client = self.sdk_configuration.client logger = self.sdk_configuration.debug_logger + hooks = self.sdk_configuration.__dict__["_hooks"] + def do(): http_res = None try: - req = self.sdk_configuration.get_hooks().before_request( - BeforeRequestContext(hook_ctx), request - ) + req = hooks.before_request(BeforeRequestContext(hook_ctx), request) logger.debug( "Request:\nMethod: %s\nURL: %s\nHeaders: %s\nBody: %s", req.method, @@ -237,9 +237,7 @@ def do(): http_res = client.send(req, stream=stream) except Exception as e: - _, e = self.sdk_configuration.get_hooks().after_error( - AfterErrorContext(hook_ctx), None, e - ) + _, e = hooks.after_error(AfterErrorContext(hook_ctx), None, e) if e is not None: logger.debug("Request Exception", exc_info=True) raise e @@ -257,7 +255,7 @@ def do(): ) if utils.match_status_codes(error_status_codes, http_res.status_code): - result, err = self.sdk_configuration.get_hooks().after_error( + result, err = hooks.after_error( AfterErrorContext(hook_ctx), http_res, None ) if err is not None: @@ -277,9 +275,7 @@ def do(): http_res = do() if not utils.match_status_codes(error_status_codes, http_res.status_code): - http_res = self.sdk_configuration.get_hooks().after_success( - AfterSuccessContext(hook_ctx), http_res - ) + http_res = hooks.after_success(AfterSuccessContext(hook_ctx), http_res) return http_res @@ -294,12 +290,12 @@ async def do_request_async( client = self.sdk_configuration.async_client logger = self.sdk_configuration.debug_logger + hooks = self.sdk_configuration.__dict__["_hooks"] + async def do(): http_res = None try: - req = self.sdk_configuration.get_hooks().before_request( - BeforeRequestContext(hook_ctx), request - ) + req = hooks.before_request(BeforeRequestContext(hook_ctx), request) logger.debug( "Request:\nMethod: %s\nURL: %s\nHeaders: %s\nBody: %s", req.method, @@ -313,9 +309,7 @@ async def do(): http_res = await client.send(req, stream=stream) except Exception as e: - _, e = self.sdk_configuration.get_hooks().after_error( - AfterErrorContext(hook_ctx), None, e - ) + _, e = hooks.after_error(AfterErrorContext(hook_ctx), None, e) if e is not None: logger.debug("Request Exception", exc_info=True) raise e @@ -333,7 +327,7 @@ async def do(): ) if utils.match_status_codes(error_status_codes, http_res.status_code): - result, err = self.sdk_configuration.get_hooks().after_error( + result, err = hooks.after_error( AfterErrorContext(hook_ctx), http_res, None ) if err is not None: @@ -355,8 +349,6 @@ async def do(): http_res = await do() if not utils.match_status_codes(error_status_codes, http_res.status_code): - http_res = self.sdk_configuration.get_hooks().after_success( - AfterSuccessContext(hook_ctx), http_res - ) + http_res = hooks.after_success(AfterSuccessContext(hook_ctx), http_res) return http_res diff --git a/src/mistralai/chat.py b/src/mistralai/chat.py index 96fcf65d..8556c5a0 100644 --- a/src/mistralai/chat.py +++ b/src/mistralai/chat.py @@ -221,6 +221,7 @@ def complete( http_res = self.do_request( hook_ctx=HookContext( + config=self.sdk_configuration, base_url=base_url or "", operation_id="chat_completion_v1_chat_completions_post", oauth2_scopes=[], @@ -389,6 +390,7 @@ async def complete_async( http_res = await self.do_request_async( hook_ctx=HookContext( + config=self.sdk_configuration, base_url=base_url or "", operation_id="chat_completion_v1_chat_completions_post", oauth2_scopes=[], @@ -569,6 +571,7 @@ def stream( http_res = self.do_request( hook_ctx=HookContext( + config=self.sdk_configuration, base_url=base_url or "", operation_id="stream_chat", oauth2_scopes=[], @@ -755,6 +758,7 @@ async def stream_async( http_res = await self.do_request_async( hook_ctx=HookContext( + config=self.sdk_configuration, base_url=base_url or "", operation_id="stream_chat", oauth2_scopes=[], diff --git a/src/mistralai/classifiers.py b/src/mistralai/classifiers.py index e5b46f5d..6ebf7834 100644 --- a/src/mistralai/classifiers.py +++ b/src/mistralai/classifiers.py @@ -77,6 +77,7 @@ def moderate( http_res = self.do_request( hook_ctx=HookContext( + config=self.sdk_configuration, base_url=base_url or "", operation_id="moderations_v1_moderations_post", oauth2_scopes=[], @@ -183,6 +184,7 @@ async def moderate_async( http_res = await self.do_request_async( hook_ctx=HookContext( + config=self.sdk_configuration, base_url=base_url or "", operation_id="moderations_v1_moderations_post", oauth2_scopes=[], @@ -289,6 +291,7 @@ def moderate_chat( http_res = self.do_request( hook_ctx=HookContext( + config=self.sdk_configuration, base_url=base_url or "", operation_id="chat_moderations_v1_chat_moderations_post", oauth2_scopes=[], @@ -395,6 +398,7 @@ async def moderate_chat_async( http_res = await self.do_request_async( hook_ctx=HookContext( + config=self.sdk_configuration, base_url=base_url or "", operation_id="chat_moderations_v1_chat_moderations_post", oauth2_scopes=[], @@ -501,6 +505,7 @@ def classify( http_res = self.do_request( hook_ctx=HookContext( + config=self.sdk_configuration, base_url=base_url or "", operation_id="classifications_v1_classifications_post", oauth2_scopes=[], @@ -607,6 +612,7 @@ async def classify_async( http_res = await self.do_request_async( hook_ctx=HookContext( + config=self.sdk_configuration, base_url=base_url or "", operation_id="classifications_v1_classifications_post", oauth2_scopes=[], @@ -710,6 +716,7 @@ def classify_chat( http_res = self.do_request( hook_ctx=HookContext( + config=self.sdk_configuration, base_url=base_url or "", operation_id="chat_classifications_v1_chat_classifications_post", oauth2_scopes=[], @@ -813,6 +820,7 @@ async def classify_chat_async( http_res = await self.do_request_async( hook_ctx=HookContext( + config=self.sdk_configuration, base_url=base_url or "", operation_id="chat_classifications_v1_chat_classifications_post", oauth2_scopes=[], diff --git a/src/mistralai/conversations.py b/src/mistralai/conversations.py index 6ef02edd..f8b6ec2c 100644 --- a/src/mistralai/conversations.py +++ b/src/mistralai/conversations.py @@ -313,6 +313,7 @@ def start( http_res = self.do_request( hook_ctx=HookContext( + config=self.sdk_configuration, base_url=base_url or "", operation_id="agents_api_v1_conversations_start", oauth2_scopes=[], @@ -451,6 +452,7 @@ async def start_async( http_res = await self.do_request_async( hook_ctx=HookContext( + config=self.sdk_configuration, base_url=base_url or "", operation_id="agents_api_v1_conversations_start", oauth2_scopes=[], @@ -553,6 +555,7 @@ def list( http_res = self.do_request( hook_ctx=HookContext( + config=self.sdk_configuration, base_url=base_url or "", operation_id="agents_api_v1_conversations_list", oauth2_scopes=[], @@ -655,6 +658,7 @@ async def list_async( http_res = await self.do_request_async( hook_ctx=HookContext( + config=self.sdk_configuration, base_url=base_url or "", operation_id="agents_api_v1_conversations_list", oauth2_scopes=[], @@ -754,6 +758,7 @@ def get( http_res = self.do_request( hook_ctx=HookContext( + config=self.sdk_configuration, base_url=base_url or "", operation_id="agents_api_v1_conversations_get", oauth2_scopes=[], @@ -856,6 +861,7 @@ async def get_async( http_res = await self.do_request_async( hook_ctx=HookContext( + config=self.sdk_configuration, base_url=base_url or "", operation_id="agents_api_v1_conversations_get", oauth2_scopes=[], @@ -988,6 +994,7 @@ def append( http_res = self.do_request( hook_ctx=HookContext( + config=self.sdk_configuration, base_url=base_url or "", operation_id="agents_api_v1_conversations_append", oauth2_scopes=[], @@ -1117,6 +1124,7 @@ async def append_async( http_res = await self.do_request_async( hook_ctx=HookContext( + config=self.sdk_configuration, base_url=base_url or "", operation_id="agents_api_v1_conversations_append", oauth2_scopes=[], @@ -1216,6 +1224,7 @@ def get_history( http_res = self.do_request( hook_ctx=HookContext( + config=self.sdk_configuration, base_url=base_url or "", operation_id="agents_api_v1_conversations_history", oauth2_scopes=[], @@ -1315,6 +1324,7 @@ async def get_history_async( http_res = await self.do_request_async( hook_ctx=HookContext( + config=self.sdk_configuration, base_url=base_url or "", operation_id="agents_api_v1_conversations_history", oauth2_scopes=[], @@ -1414,6 +1424,7 @@ def get_messages( http_res = self.do_request( hook_ctx=HookContext( + config=self.sdk_configuration, base_url=base_url or "", operation_id="agents_api_v1_conversations_messages", oauth2_scopes=[], @@ -1513,6 +1524,7 @@ async def get_messages_async( http_res = await self.do_request_async( hook_ctx=HookContext( + config=self.sdk_configuration, base_url=base_url or "", operation_id="agents_api_v1_conversations_messages", oauth2_scopes=[], @@ -1645,6 +1657,7 @@ def restart( http_res = self.do_request( hook_ctx=HookContext( + config=self.sdk_configuration, base_url=base_url or "", operation_id="agents_api_v1_conversations_restart", oauth2_scopes=[], @@ -1777,6 +1790,7 @@ async def restart_async( http_res = await self.do_request_async( hook_ctx=HookContext( + config=self.sdk_configuration, base_url=base_url or "", operation_id="agents_api_v1_conversations_restart", oauth2_scopes=[], @@ -1922,6 +1936,7 @@ def start_stream( http_res = self.do_request( hook_ctx=HookContext( + config=self.sdk_configuration, base_url=base_url or "", operation_id="agents_api_v1_conversations_start_stream", oauth2_scopes=[], @@ -2072,6 +2087,7 @@ async def start_stream_async( http_res = await self.do_request_async( hook_ctx=HookContext( + config=self.sdk_configuration, base_url=base_url or "", operation_id="agents_api_v1_conversations_start_stream", oauth2_scopes=[], @@ -2206,6 +2222,7 @@ def append_stream( http_res = self.do_request( hook_ctx=HookContext( + config=self.sdk_configuration, base_url=base_url or "", operation_id="agents_api_v1_conversations_append_stream", oauth2_scopes=[], @@ -2340,6 +2357,7 @@ async def append_stream_async( http_res = await self.do_request_async( hook_ctx=HookContext( + config=self.sdk_configuration, base_url=base_url or "", operation_id="agents_api_v1_conversations_append_stream", oauth2_scopes=[], @@ -2477,6 +2495,7 @@ def restart_stream( http_res = self.do_request( hook_ctx=HookContext( + config=self.sdk_configuration, base_url=base_url or "", operation_id="agents_api_v1_conversations_restart_stream", oauth2_scopes=[], @@ -2614,6 +2633,7 @@ async def restart_stream_async( http_res = await self.do_request_async( hook_ctx=HookContext( + config=self.sdk_configuration, base_url=base_url or "", operation_id="agents_api_v1_conversations_restart_stream", oauth2_scopes=[], diff --git a/src/mistralai/embeddings.py b/src/mistralai/embeddings.py index fee30251..ef0699d1 100644 --- a/src/mistralai/embeddings.py +++ b/src/mistralai/embeddings.py @@ -84,6 +84,7 @@ def create( http_res = self.do_request( hook_ctx=HookContext( + config=self.sdk_configuration, base_url=base_url or "", operation_id="embeddings_v1_embeddings_post", oauth2_scopes=[], @@ -197,6 +198,7 @@ async def create_async( http_res = await self.do_request_async( hook_ctx=HookContext( + config=self.sdk_configuration, base_url=base_url or "", operation_id="embeddings_v1_embeddings_post", oauth2_scopes=[], diff --git a/src/mistralai/files.py b/src/mistralai/files.py index 0ffc4857..39f65dd4 100644 --- a/src/mistralai/files.py +++ b/src/mistralai/files.py @@ -85,6 +85,7 @@ def upload( http_res = self.do_request( hook_ctx=HookContext( + config=self.sdk_configuration, base_url=base_url or "", operation_id="files_api_routes_upload_file", oauth2_scopes=[], @@ -192,6 +193,7 @@ async def upload_async( http_res = await self.do_request_async( hook_ctx=HookContext( + config=self.sdk_configuration, base_url=base_url or "", operation_id="files_api_routes_upload_file", oauth2_scopes=[], @@ -300,6 +302,7 @@ def list( http_res = self.do_request( hook_ctx=HookContext( + config=self.sdk_configuration, base_url=base_url or "", operation_id="files_api_routes_list_files", oauth2_scopes=[], @@ -408,6 +411,7 @@ async def list_async( http_res = await self.do_request_async( hook_ctx=HookContext( + config=self.sdk_configuration, base_url=base_url or "", operation_id="files_api_routes_list_files", oauth2_scopes=[], @@ -501,6 +505,7 @@ def retrieve( http_res = self.do_request( hook_ctx=HookContext( + config=self.sdk_configuration, base_url=base_url or "", operation_id="files_api_routes_retrieve_file", oauth2_scopes=[], @@ -594,6 +599,7 @@ async def retrieve_async( http_res = await self.do_request_async( hook_ctx=HookContext( + config=self.sdk_configuration, base_url=base_url or "", operation_id="files_api_routes_retrieve_file", oauth2_scopes=[], @@ -687,6 +693,7 @@ def delete( http_res = self.do_request( hook_ctx=HookContext( + config=self.sdk_configuration, base_url=base_url or "", operation_id="files_api_routes_delete_file", oauth2_scopes=[], @@ -780,6 +787,7 @@ async def delete_async( http_res = await self.do_request_async( hook_ctx=HookContext( + config=self.sdk_configuration, base_url=base_url or "", operation_id="files_api_routes_delete_file", oauth2_scopes=[], @@ -873,6 +881,7 @@ def download( http_res = self.do_request( hook_ctx=HookContext( + config=self.sdk_configuration, base_url=base_url or "", operation_id="files_api_routes_download_file", oauth2_scopes=[], @@ -967,6 +976,7 @@ async def download_async( http_res = await self.do_request_async( hook_ctx=HookContext( + config=self.sdk_configuration, base_url=base_url or "", operation_id="files_api_routes_download_file", oauth2_scopes=[], @@ -1062,6 +1072,7 @@ def get_signed_url( http_res = self.do_request( hook_ctx=HookContext( + config=self.sdk_configuration, base_url=base_url or "", operation_id="files_api_routes_get_signed_url", oauth2_scopes=[], @@ -1156,6 +1167,7 @@ async def get_signed_url_async( http_res = await self.do_request_async( hook_ctx=HookContext( + config=self.sdk_configuration, base_url=base_url or "", operation_id="files_api_routes_get_signed_url", oauth2_scopes=[], diff --git a/src/mistralai/fim.py b/src/mistralai/fim.py index 032c722f..c57bc68e 100644 --- a/src/mistralai/fim.py +++ b/src/mistralai/fim.py @@ -105,6 +105,7 @@ def complete( http_res = self.do_request( hook_ctx=HookContext( + config=self.sdk_configuration, base_url=base_url or "", operation_id="fim_completion_v1_fim_completions_post", oauth2_scopes=[], @@ -239,6 +240,7 @@ async def complete_async( http_res = await self.do_request_async( hook_ctx=HookContext( + config=self.sdk_configuration, base_url=base_url or "", operation_id="fim_completion_v1_fim_completions_post", oauth2_scopes=[], @@ -373,6 +375,7 @@ def stream( http_res = self.do_request( hook_ctx=HookContext( + config=self.sdk_configuration, base_url=base_url or "", operation_id="stream_fim", oauth2_scopes=[], @@ -513,6 +516,7 @@ async def stream_async( http_res = await self.do_request_async( hook_ctx=HookContext( + config=self.sdk_configuration, base_url=base_url or "", operation_id="stream_fim", oauth2_scopes=[], diff --git a/src/mistralai/httpclient.py b/src/mistralai/httpclient.py index 1e426352..47b052cb 100644 --- a/src/mistralai/httpclient.py +++ b/src/mistralai/httpclient.py @@ -2,7 +2,6 @@ # pyright: reportReturnType = false import asyncio -from concurrent.futures import ThreadPoolExecutor from typing_extensions import Protocol, runtime_checkable import httpx from typing import Any, Optional, Union @@ -116,21 +115,12 @@ def close_clients( pass if async_client is not None and not async_client_supplied: - is_async = False try: - asyncio.get_running_loop() - is_async = True + loop = asyncio.get_running_loop() + asyncio.run_coroutine_threadsafe(async_client.aclose(), loop) except RuntimeError: - pass - - try: - # If this function is called in an async loop then start another - # loop in a separate thread to close the async http client. - if is_async: - with ThreadPoolExecutor(max_workers=1) as executor: - future = executor.submit(asyncio.run, async_client.aclose()) - future.result() - else: + try: asyncio.run(async_client.aclose()) - except Exception: - pass + except RuntimeError: + # best effort + pass diff --git a/src/mistralai/jobs.py b/src/mistralai/jobs.py index 76d9f41a..020c40f0 100644 --- a/src/mistralai/jobs.py +++ b/src/mistralai/jobs.py @@ -96,6 +96,7 @@ def list( http_res = self.do_request( hook_ctx=HookContext( + config=self.sdk_configuration, base_url=base_url or "", operation_id="jobs_api_routes_fine_tuning_get_fine_tuning_jobs", oauth2_scopes=[], @@ -216,6 +217,7 @@ async def list_async( http_res = await self.do_request_async( hook_ctx=HookContext( + config=self.sdk_configuration, base_url=base_url or "", operation_id="jobs_api_routes_fine_tuning_get_fine_tuning_jobs", oauth2_scopes=[], @@ -367,6 +369,7 @@ def create( http_res = self.do_request( hook_ctx=HookContext( + config=self.sdk_configuration, base_url=base_url or "", operation_id="jobs_api_routes_fine_tuning_create_fine_tuning_job", oauth2_scopes=[], @@ -520,6 +523,7 @@ async def create_async( http_res = await self.do_request_async( hook_ctx=HookContext( + config=self.sdk_configuration, base_url=base_url or "", operation_id="jobs_api_routes_fine_tuning_create_fine_tuning_job", oauth2_scopes=[], @@ -615,6 +619,7 @@ def get( http_res = self.do_request( hook_ctx=HookContext( + config=self.sdk_configuration, base_url=base_url or "", operation_id="jobs_api_routes_fine_tuning_get_fine_tuning_job", oauth2_scopes=[], @@ -710,6 +715,7 @@ async def get_async( http_res = await self.do_request_async( hook_ctx=HookContext( + config=self.sdk_configuration, base_url=base_url or "", operation_id="jobs_api_routes_fine_tuning_get_fine_tuning_job", oauth2_scopes=[], @@ -805,6 +811,7 @@ def cancel( http_res = self.do_request( hook_ctx=HookContext( + config=self.sdk_configuration, base_url=base_url or "", operation_id="jobs_api_routes_fine_tuning_cancel_fine_tuning_job", oauth2_scopes=[], @@ -900,6 +907,7 @@ async def cancel_async( http_res = await self.do_request_async( hook_ctx=HookContext( + config=self.sdk_configuration, base_url=base_url or "", operation_id="jobs_api_routes_fine_tuning_cancel_fine_tuning_job", oauth2_scopes=[], @@ -995,6 +1003,7 @@ def start( http_res = self.do_request( hook_ctx=HookContext( + config=self.sdk_configuration, base_url=base_url or "", operation_id="jobs_api_routes_fine_tuning_start_fine_tuning_job", oauth2_scopes=[], @@ -1090,6 +1099,7 @@ async def start_async( http_res = await self.do_request_async( hook_ctx=HookContext( + config=self.sdk_configuration, base_url=base_url or "", operation_id="jobs_api_routes_fine_tuning_start_fine_tuning_job", oauth2_scopes=[], diff --git a/src/mistralai/mistral_agents.py b/src/mistralai/mistral_agents.py index a22ce41d..f0d4be01 100644 --- a/src/mistralai/mistral_agents.py +++ b/src/mistralai/mistral_agents.py @@ -102,6 +102,7 @@ def create( http_res = self.do_request( hook_ctx=HookContext( + config=self.sdk_configuration, base_url=base_url or "", operation_id="agents_api_v1_agents_create", oauth2_scopes=[], @@ -233,6 +234,7 @@ async def create_async( http_res = await self.do_request_async( hook_ctx=HookContext( + config=self.sdk_configuration, base_url=base_url or "", operation_id="agents_api_v1_agents_create", oauth2_scopes=[], @@ -335,6 +337,7 @@ def list( http_res = self.do_request( hook_ctx=HookContext( + config=self.sdk_configuration, base_url=base_url or "", operation_id="agents_api_v1_agents_list", oauth2_scopes=[], @@ -437,6 +440,7 @@ async def list_async( http_res = await self.do_request_async( hook_ctx=HookContext( + config=self.sdk_configuration, base_url=base_url or "", operation_id="agents_api_v1_agents_list", oauth2_scopes=[], @@ -536,6 +540,7 @@ def get( http_res = self.do_request( hook_ctx=HookContext( + config=self.sdk_configuration, base_url=base_url or "", operation_id="agents_api_v1_agents_get", oauth2_scopes=[], @@ -635,6 +640,7 @@ async def get_async( http_res = await self.do_request_async( hook_ctx=HookContext( + config=self.sdk_configuration, base_url=base_url or "", operation_id="agents_api_v1_agents_get", oauth2_scopes=[], @@ -775,6 +781,7 @@ def update( http_res = self.do_request( hook_ctx=HookContext( + config=self.sdk_configuration, base_url=base_url or "", operation_id="agents_api_v1_agents_update", oauth2_scopes=[], @@ -915,6 +922,7 @@ async def update_async( http_res = await self.do_request_async( hook_ctx=HookContext( + config=self.sdk_configuration, base_url=base_url or "", operation_id="agents_api_v1_agents_update", oauth2_scopes=[], @@ -1017,6 +1025,7 @@ def update_version( http_res = self.do_request( hook_ctx=HookContext( + config=self.sdk_configuration, base_url=base_url or "", operation_id="agents_api_v1_agents_update_version", oauth2_scopes=[], @@ -1119,6 +1128,7 @@ async def update_version_async( http_res = await self.do_request_async( hook_ctx=HookContext( + config=self.sdk_configuration, base_url=base_url or "", operation_id="agents_api_v1_agents_update_version", oauth2_scopes=[], diff --git a/src/mistralai/mistral_jobs.py b/src/mistralai/mistral_jobs.py index 32a40aa7..b824508a 100644 --- a/src/mistralai/mistral_jobs.py +++ b/src/mistralai/mistral_jobs.py @@ -87,6 +87,7 @@ def list( http_res = self.do_request( hook_ctx=HookContext( + config=self.sdk_configuration, base_url=base_url or "", operation_id="jobs_api_routes_batch_get_batch_jobs", oauth2_scopes=[], @@ -198,6 +199,7 @@ async def list_async( http_res = await self.do_request_async( hook_ctx=HookContext( + config=self.sdk_configuration, base_url=base_url or "", operation_id="jobs_api_routes_batch_get_batch_jobs", oauth2_scopes=[], @@ -306,6 +308,7 @@ def create( http_res = self.do_request( hook_ctx=HookContext( + config=self.sdk_configuration, base_url=base_url or "", operation_id="jobs_api_routes_batch_create_batch_job", oauth2_scopes=[], @@ -414,6 +417,7 @@ async def create_async( http_res = await self.do_request_async( hook_ctx=HookContext( + config=self.sdk_configuration, base_url=base_url or "", operation_id="jobs_api_routes_batch_create_batch_job", oauth2_scopes=[], @@ -507,6 +511,7 @@ def get( http_res = self.do_request( hook_ctx=HookContext( + config=self.sdk_configuration, base_url=base_url or "", operation_id="jobs_api_routes_batch_get_batch_job", oauth2_scopes=[], @@ -600,6 +605,7 @@ async def get_async( http_res = await self.do_request_async( hook_ctx=HookContext( + config=self.sdk_configuration, base_url=base_url or "", operation_id="jobs_api_routes_batch_get_batch_job", oauth2_scopes=[], @@ -693,6 +699,7 @@ def cancel( http_res = self.do_request( hook_ctx=HookContext( + config=self.sdk_configuration, base_url=base_url or "", operation_id="jobs_api_routes_batch_cancel_batch_job", oauth2_scopes=[], @@ -786,6 +793,7 @@ async def cancel_async( http_res = await self.do_request_async( hook_ctx=HookContext( + config=self.sdk_configuration, base_url=base_url or "", operation_id="jobs_api_routes_batch_cancel_batch_job", oauth2_scopes=[], diff --git a/src/mistralai/models/__init__.py b/src/mistralai/models/__init__.py index e6493e90..1b7b9c6c 100644 --- a/src/mistralai/models/__init__.py +++ b/src/mistralai/models/__init__.py @@ -1,727 +1,754 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -from .agent import Agent, AgentObject, AgentTools, AgentToolsTypedDict, AgentTypedDict -from .agentconversation import ( - AgentConversation, - AgentConversationObject, - AgentConversationTypedDict, -) -from .agentcreationrequest import ( - AgentCreationRequest, - AgentCreationRequestTools, - AgentCreationRequestToolsTypedDict, - AgentCreationRequestTypedDict, -) -from .agenthandoffdoneevent import ( - AgentHandoffDoneEvent, - AgentHandoffDoneEventType, - AgentHandoffDoneEventTypedDict, -) -from .agenthandoffentry import ( - AgentHandoffEntry, - AgentHandoffEntryObject, - AgentHandoffEntryType, - AgentHandoffEntryTypedDict, -) -from .agenthandoffstartedevent import ( - AgentHandoffStartedEvent, - AgentHandoffStartedEventType, - AgentHandoffStartedEventTypedDict, -) -from .agents_api_v1_agents_getop import ( - AgentsAPIV1AgentsGetRequest, - AgentsAPIV1AgentsGetRequestTypedDict, -) -from .agents_api_v1_agents_listop import ( - AgentsAPIV1AgentsListRequest, - AgentsAPIV1AgentsListRequestTypedDict, -) -from .agents_api_v1_agents_update_versionop import ( - AgentsAPIV1AgentsUpdateVersionRequest, - AgentsAPIV1AgentsUpdateVersionRequestTypedDict, -) -from .agents_api_v1_agents_updateop import ( - AgentsAPIV1AgentsUpdateRequest, - AgentsAPIV1AgentsUpdateRequestTypedDict, -) -from .agents_api_v1_conversations_append_streamop import ( - AgentsAPIV1ConversationsAppendStreamRequest, - AgentsAPIV1ConversationsAppendStreamRequestTypedDict, -) -from .agents_api_v1_conversations_appendop import ( - AgentsAPIV1ConversationsAppendRequest, - AgentsAPIV1ConversationsAppendRequestTypedDict, -) -from .agents_api_v1_conversations_getop import ( - AgentsAPIV1ConversationsGetRequest, - AgentsAPIV1ConversationsGetRequestTypedDict, - AgentsAPIV1ConversationsGetResponseV1ConversationsGet, - AgentsAPIV1ConversationsGetResponseV1ConversationsGetTypedDict, -) -from .agents_api_v1_conversations_historyop import ( - AgentsAPIV1ConversationsHistoryRequest, - AgentsAPIV1ConversationsHistoryRequestTypedDict, -) -from .agents_api_v1_conversations_listop import ( - AgentsAPIV1ConversationsListRequest, - AgentsAPIV1ConversationsListRequestTypedDict, - ResponseBody, - ResponseBodyTypedDict, -) -from .agents_api_v1_conversations_messagesop import ( - AgentsAPIV1ConversationsMessagesRequest, - AgentsAPIV1ConversationsMessagesRequestTypedDict, -) -from .agents_api_v1_conversations_restart_streamop import ( - AgentsAPIV1ConversationsRestartStreamRequest, - AgentsAPIV1ConversationsRestartStreamRequestTypedDict, -) -from .agents_api_v1_conversations_restartop import ( - AgentsAPIV1ConversationsRestartRequest, - AgentsAPIV1ConversationsRestartRequestTypedDict, -) -from .agentscompletionrequest import ( - AgentsCompletionRequest, - AgentsCompletionRequestMessages, - AgentsCompletionRequestMessagesTypedDict, - AgentsCompletionRequestStop, - AgentsCompletionRequestStopTypedDict, - AgentsCompletionRequestToolChoice, - AgentsCompletionRequestToolChoiceTypedDict, - AgentsCompletionRequestTypedDict, -) -from .agentscompletionstreamrequest import ( - AgentsCompletionStreamRequest, - AgentsCompletionStreamRequestMessages, - AgentsCompletionStreamRequestMessagesTypedDict, - AgentsCompletionStreamRequestStop, - AgentsCompletionStreamRequestStopTypedDict, - AgentsCompletionStreamRequestToolChoice, - AgentsCompletionStreamRequestToolChoiceTypedDict, - AgentsCompletionStreamRequestTypedDict, -) -from .agentupdaterequest import ( - AgentUpdateRequest, - AgentUpdateRequestTools, - AgentUpdateRequestToolsTypedDict, - AgentUpdateRequestTypedDict, -) -from .apiendpoint import APIEndpoint -from .archiveftmodelout import ( - ArchiveFTModelOut, - ArchiveFTModelOutObject, - ArchiveFTModelOutTypedDict, -) -from .assistantmessage import ( - AssistantMessage, - AssistantMessageContent, - AssistantMessageContentTypedDict, - AssistantMessageRole, - AssistantMessageTypedDict, -) -from .basemodelcard import BaseModelCard, BaseModelCardTypedDict, Type -from .batcherror import BatchError, BatchErrorTypedDict -from .batchjobin import BatchJobIn, BatchJobInTypedDict -from .batchjobout import BatchJobOut, BatchJobOutObject, BatchJobOutTypedDict -from .batchjobsout import BatchJobsOut, BatchJobsOutObject, BatchJobsOutTypedDict -from .batchjobstatus import BatchJobStatus -from .builtinconnectors import BuiltInConnectors -from .chatclassificationrequest import ( - ChatClassificationRequest, - ChatClassificationRequestTypedDict, -) -from .chatcompletionchoice import ( - ChatCompletionChoice, - ChatCompletionChoiceTypedDict, - FinishReason, -) -from .chatcompletionrequest import ( - ChatCompletionRequest, - ChatCompletionRequestToolChoice, - ChatCompletionRequestToolChoiceTypedDict, - ChatCompletionRequestTypedDict, - Messages, - MessagesTypedDict, - Stop, - StopTypedDict, -) -from .chatcompletionresponse import ( - ChatCompletionResponse, - ChatCompletionResponseTypedDict, -) -from .chatcompletionstreamrequest import ( - ChatCompletionStreamRequest, - ChatCompletionStreamRequestMessages, - ChatCompletionStreamRequestMessagesTypedDict, - ChatCompletionStreamRequestStop, - ChatCompletionStreamRequestStopTypedDict, - ChatCompletionStreamRequestToolChoice, - ChatCompletionStreamRequestToolChoiceTypedDict, - ChatCompletionStreamRequestTypedDict, -) -from .chatmoderationrequest import ( - ChatModerationRequest, - ChatModerationRequestInputs, - ChatModerationRequestInputsTypedDict, - ChatModerationRequestTypedDict, - One, - OneTypedDict, - Two, - TwoTypedDict, -) -from .checkpointout import CheckpointOut, CheckpointOutTypedDict -from .classificationrequest import ( - ClassificationRequest, - ClassificationRequestInputs, - ClassificationRequestInputsTypedDict, - ClassificationRequestTypedDict, -) -from .classificationresponse import ( - ClassificationResponse, - ClassificationResponseTypedDict, -) -from .classificationtargetresult import ( - ClassificationTargetResult, - ClassificationTargetResultTypedDict, -) -from .classifierdetailedjobout import ( - ClassifierDetailedJobOut, - ClassifierDetailedJobOutIntegrations, - ClassifierDetailedJobOutIntegrationsTypedDict, - ClassifierDetailedJobOutJobType, - ClassifierDetailedJobOutObject, - ClassifierDetailedJobOutStatus, - ClassifierDetailedJobOutTypedDict, -) -from .classifierftmodelout import ( - ClassifierFTModelOut, - ClassifierFTModelOutModelType, - ClassifierFTModelOutObject, - ClassifierFTModelOutTypedDict, -) -from .classifierjobout import ( - ClassifierJobOut, - ClassifierJobOutIntegrations, - ClassifierJobOutIntegrationsTypedDict, - ClassifierJobOutJobType, - ClassifierJobOutObject, - ClassifierJobOutStatus, - ClassifierJobOutTypedDict, -) -from .classifiertargetin import ClassifierTargetIn, ClassifierTargetInTypedDict -from .classifiertargetout import ClassifierTargetOut, ClassifierTargetOutTypedDict -from .classifiertrainingparameters import ( - ClassifierTrainingParameters, - ClassifierTrainingParametersTypedDict, -) -from .classifiertrainingparametersin import ( - ClassifierTrainingParametersIn, - ClassifierTrainingParametersInTypedDict, -) -from .codeinterpretertool import ( - CodeInterpreterTool, - CodeInterpreterToolType, - CodeInterpreterToolTypedDict, -) -from .completionargs import CompletionArgs, CompletionArgsTypedDict -from .completionargsstop import CompletionArgsStop, CompletionArgsStopTypedDict -from .completionchunk import CompletionChunk, CompletionChunkTypedDict -from .completiondetailedjobout import ( - CompletionDetailedJobOut, - CompletionDetailedJobOutIntegrations, - CompletionDetailedJobOutIntegrationsTypedDict, - CompletionDetailedJobOutJobType, - CompletionDetailedJobOutObject, - CompletionDetailedJobOutRepositories, - CompletionDetailedJobOutRepositoriesTypedDict, - CompletionDetailedJobOutStatus, - CompletionDetailedJobOutTypedDict, -) -from .completionevent import CompletionEvent, CompletionEventTypedDict -from .completionftmodelout import ( - CompletionFTModelOut, - CompletionFTModelOutObject, - CompletionFTModelOutTypedDict, - ModelType, -) -from .completionjobout import ( - CompletionJobOut, - CompletionJobOutObject, - CompletionJobOutTypedDict, - Integrations, - IntegrationsTypedDict, - JobType, - Repositories, - RepositoriesTypedDict, - Status, -) -from .completionresponsestreamchoice import ( - CompletionResponseStreamChoice, - CompletionResponseStreamChoiceFinishReason, - CompletionResponseStreamChoiceTypedDict, -) -from .completiontrainingparameters import ( - CompletionTrainingParameters, - CompletionTrainingParametersTypedDict, -) -from .completiontrainingparametersin import ( - CompletionTrainingParametersIn, - CompletionTrainingParametersInTypedDict, -) -from .contentchunk import ContentChunk, ContentChunkTypedDict -from .conversationappendrequest import ( - ConversationAppendRequest, - ConversationAppendRequestHandoffExecution, - ConversationAppendRequestTypedDict, -) -from .conversationappendstreamrequest import ( - ConversationAppendStreamRequest, - ConversationAppendStreamRequestHandoffExecution, - ConversationAppendStreamRequestTypedDict, -) -from .conversationevents import ( - ConversationEvents, - ConversationEventsData, - ConversationEventsDataTypedDict, - ConversationEventsTypedDict, -) -from .conversationhistory import ( - ConversationHistory, - ConversationHistoryObject, - ConversationHistoryTypedDict, - Entries, - EntriesTypedDict, -) -from .conversationinputs import ConversationInputs, ConversationInputsTypedDict -from .conversationmessages import ( - ConversationMessages, - ConversationMessagesObject, - ConversationMessagesTypedDict, -) -from .conversationrequest import ( - ConversationRequest, - ConversationRequestTypedDict, - HandoffExecution, - Tools, - ToolsTypedDict, -) -from .conversationresponse import ( - ConversationResponse, - ConversationResponseObject, - ConversationResponseTypedDict, - Outputs, - OutputsTypedDict, -) -from .conversationrestartrequest import ( - ConversationRestartRequest, - ConversationRestartRequestHandoffExecution, - ConversationRestartRequestTypedDict, -) -from .conversationrestartstreamrequest import ( - ConversationRestartStreamRequest, - ConversationRestartStreamRequestHandoffExecution, - ConversationRestartStreamRequestTypedDict, -) -from .conversationstreamrequest import ( - ConversationStreamRequest, - ConversationStreamRequestHandoffExecution, - ConversationStreamRequestTools, - ConversationStreamRequestToolsTypedDict, - ConversationStreamRequestTypedDict, -) -from .conversationusageinfo import ConversationUsageInfo, ConversationUsageInfoTypedDict -from .delete_model_v1_models_model_id_deleteop import ( - DeleteModelV1ModelsModelIDDeleteRequest, - DeleteModelV1ModelsModelIDDeleteRequestTypedDict, -) -from .deletefileout import DeleteFileOut, DeleteFileOutTypedDict -from .deletemodelout import DeleteModelOut, DeleteModelOutTypedDict -from .deltamessage import Content, ContentTypedDict, DeltaMessage, DeltaMessageTypedDict -from .documentlibrarytool import ( - DocumentLibraryTool, - DocumentLibraryToolType, - DocumentLibraryToolTypedDict, -) -from .documenturlchunk import ( - DocumentURLChunk, - DocumentURLChunkType, - DocumentURLChunkTypedDict, -) -from .embeddingdtype import EmbeddingDtype -from .embeddingrequest import ( - EmbeddingRequest, - EmbeddingRequestInputs, - EmbeddingRequestInputsTypedDict, - EmbeddingRequestTypedDict, -) -from .embeddingresponse import EmbeddingResponse, EmbeddingResponseTypedDict -from .embeddingresponsedata import EmbeddingResponseData, EmbeddingResponseDataTypedDict -from .eventout import EventOut, EventOutTypedDict -from .filepurpose import FilePurpose -from .files_api_routes_delete_fileop import ( - FilesAPIRoutesDeleteFileRequest, - FilesAPIRoutesDeleteFileRequestTypedDict, -) -from .files_api_routes_download_fileop import ( - FilesAPIRoutesDownloadFileRequest, - FilesAPIRoutesDownloadFileRequestTypedDict, -) -from .files_api_routes_get_signed_urlop import ( - FilesAPIRoutesGetSignedURLRequest, - FilesAPIRoutesGetSignedURLRequestTypedDict, -) -from .files_api_routes_list_filesop import ( - FilesAPIRoutesListFilesRequest, - FilesAPIRoutesListFilesRequestTypedDict, -) -from .files_api_routes_retrieve_fileop import ( - FilesAPIRoutesRetrieveFileRequest, - FilesAPIRoutesRetrieveFileRequestTypedDict, -) -from .files_api_routes_upload_fileop import ( - File, - FileTypedDict, - FilesAPIRoutesUploadFileMultiPartBodyParams, - FilesAPIRoutesUploadFileMultiPartBodyParamsTypedDict, -) -from .fileschema import FileSchema, FileSchemaTypedDict -from .filesignedurl import FileSignedURL, FileSignedURLTypedDict -from .fimcompletionrequest import ( - FIMCompletionRequest, - FIMCompletionRequestStop, - FIMCompletionRequestStopTypedDict, - FIMCompletionRequestTypedDict, -) -from .fimcompletionresponse import FIMCompletionResponse, FIMCompletionResponseTypedDict -from .fimcompletionstreamrequest import ( - FIMCompletionStreamRequest, - FIMCompletionStreamRequestStop, - FIMCompletionStreamRequestStopTypedDict, - FIMCompletionStreamRequestTypedDict, -) -from .finetuneablemodeltype import FineTuneableModelType -from .ftclassifierlossfunction import FTClassifierLossFunction -from .ftmodelcapabilitiesout import ( - FTModelCapabilitiesOut, - FTModelCapabilitiesOutTypedDict, -) -from .ftmodelcard import FTModelCard, FTModelCardType, FTModelCardTypedDict -from .function import Function, FunctionTypedDict -from .functioncall import ( - Arguments, - ArgumentsTypedDict, - FunctionCall, - FunctionCallTypedDict, -) -from .functioncallentry import ( - FunctionCallEntry, - FunctionCallEntryObject, - FunctionCallEntryType, - FunctionCallEntryTypedDict, -) -from .functioncallentryarguments import ( - FunctionCallEntryArguments, - FunctionCallEntryArgumentsTypedDict, -) -from .functioncallevent import ( - FunctionCallEvent, - FunctionCallEventType, - FunctionCallEventTypedDict, -) -from .functionname import FunctionName, FunctionNameTypedDict -from .functionresultentry import ( - FunctionResultEntry, - FunctionResultEntryObject, - FunctionResultEntryType, - FunctionResultEntryTypedDict, -) -from .functiontool import FunctionTool, FunctionToolType, FunctionToolTypedDict -from .githubrepositoryin import ( - GithubRepositoryIn, - GithubRepositoryInType, - GithubRepositoryInTypedDict, -) -from .githubrepositoryout import ( - GithubRepositoryOut, - GithubRepositoryOutType, - GithubRepositoryOutTypedDict, -) -from .httpvalidationerror import HTTPValidationError, HTTPValidationErrorData -from .imagegenerationtool import ( - ImageGenerationTool, - ImageGenerationToolType, - ImageGenerationToolTypedDict, -) -from .imageurl import ImageURL, ImageURLTypedDict -from .imageurlchunk import ( - ImageURLChunk, - ImageURLChunkImageURL, - ImageURLChunkImageURLTypedDict, - ImageURLChunkType, - ImageURLChunkTypedDict, -) -from .inputentries import InputEntries, InputEntriesTypedDict -from .inputs import ( - Inputs, - InputsTypedDict, - InstructRequestInputs, - InstructRequestInputsMessages, - InstructRequestInputsMessagesTypedDict, - InstructRequestInputsTypedDict, -) -from .instructrequest import ( - InstructRequest, - InstructRequestMessages, - InstructRequestMessagesTypedDict, - InstructRequestTypedDict, -) -from .jobin import ( - Hyperparameters, - HyperparametersTypedDict, - JobIn, - JobInIntegrations, - JobInIntegrationsTypedDict, - JobInRepositories, - JobInRepositoriesTypedDict, - JobInTypedDict, -) -from .jobmetadataout import JobMetadataOut, JobMetadataOutTypedDict -from .jobs_api_routes_batch_cancel_batch_jobop import ( - JobsAPIRoutesBatchCancelBatchJobRequest, - JobsAPIRoutesBatchCancelBatchJobRequestTypedDict, -) -from .jobs_api_routes_batch_get_batch_jobop import ( - JobsAPIRoutesBatchGetBatchJobRequest, - JobsAPIRoutesBatchGetBatchJobRequestTypedDict, -) -from .jobs_api_routes_batch_get_batch_jobsop import ( - JobsAPIRoutesBatchGetBatchJobsRequest, - JobsAPIRoutesBatchGetBatchJobsRequestTypedDict, -) -from .jobs_api_routes_fine_tuning_archive_fine_tuned_modelop import ( - JobsAPIRoutesFineTuningArchiveFineTunedModelRequest, - JobsAPIRoutesFineTuningArchiveFineTunedModelRequestTypedDict, -) -from .jobs_api_routes_fine_tuning_cancel_fine_tuning_jobop import ( - JobsAPIRoutesFineTuningCancelFineTuningJobRequest, - JobsAPIRoutesFineTuningCancelFineTuningJobRequestTypedDict, - JobsAPIRoutesFineTuningCancelFineTuningJobResponse, - JobsAPIRoutesFineTuningCancelFineTuningJobResponseTypedDict, -) -from .jobs_api_routes_fine_tuning_create_fine_tuning_jobop import ( - JobsAPIRoutesFineTuningCreateFineTuningJobResponse, - JobsAPIRoutesFineTuningCreateFineTuningJobResponseTypedDict, - Response1, - Response1TypedDict, -) -from .jobs_api_routes_fine_tuning_get_fine_tuning_jobop import ( - JobsAPIRoutesFineTuningGetFineTuningJobRequest, - JobsAPIRoutesFineTuningGetFineTuningJobRequestTypedDict, - JobsAPIRoutesFineTuningGetFineTuningJobResponse, - JobsAPIRoutesFineTuningGetFineTuningJobResponseTypedDict, -) -from .jobs_api_routes_fine_tuning_get_fine_tuning_jobsop import ( - JobsAPIRoutesFineTuningGetFineTuningJobsRequest, - JobsAPIRoutesFineTuningGetFineTuningJobsRequestTypedDict, - QueryParamStatus, -) -from .jobs_api_routes_fine_tuning_start_fine_tuning_jobop import ( - JobsAPIRoutesFineTuningStartFineTuningJobRequest, - JobsAPIRoutesFineTuningStartFineTuningJobRequestTypedDict, - JobsAPIRoutesFineTuningStartFineTuningJobResponse, - JobsAPIRoutesFineTuningStartFineTuningJobResponseTypedDict, -) -from .jobs_api_routes_fine_tuning_unarchive_fine_tuned_modelop import ( - JobsAPIRoutesFineTuningUnarchiveFineTunedModelRequest, - JobsAPIRoutesFineTuningUnarchiveFineTunedModelRequestTypedDict, -) -from .jobs_api_routes_fine_tuning_update_fine_tuned_modelop import ( - JobsAPIRoutesFineTuningUpdateFineTunedModelRequest, - JobsAPIRoutesFineTuningUpdateFineTunedModelRequestTypedDict, - JobsAPIRoutesFineTuningUpdateFineTunedModelResponse, - JobsAPIRoutesFineTuningUpdateFineTunedModelResponseTypedDict, -) -from .jobsout import ( - JobsOut, - JobsOutData, - JobsOutDataTypedDict, - JobsOutObject, - JobsOutTypedDict, -) -from .jsonschema import JSONSchema, JSONSchemaTypedDict -from .legacyjobmetadataout import ( - LegacyJobMetadataOut, - LegacyJobMetadataOutObject, - LegacyJobMetadataOutTypedDict, -) -from .listfilesout import ListFilesOut, ListFilesOutTypedDict -from .messageentries import MessageEntries, MessageEntriesTypedDict -from .messageinputcontentchunks import ( - MessageInputContentChunks, - MessageInputContentChunksTypedDict, -) -from .messageinputentry import ( - MessageInputEntry, - MessageInputEntryContent, - MessageInputEntryContentTypedDict, - MessageInputEntryRole, - MessageInputEntryType, - MessageInputEntryTypedDict, - Object, -) -from .messageoutputcontentchunks import ( - MessageOutputContentChunks, - MessageOutputContentChunksTypedDict, -) -from .messageoutputentry import ( - MessageOutputEntry, - MessageOutputEntryContent, - MessageOutputEntryContentTypedDict, - MessageOutputEntryObject, - MessageOutputEntryRole, - MessageOutputEntryType, - MessageOutputEntryTypedDict, -) -from .messageoutputevent import ( - MessageOutputEvent, - MessageOutputEventContent, - MessageOutputEventContentTypedDict, - MessageOutputEventRole, - MessageOutputEventType, - MessageOutputEventTypedDict, -) -from .metricout import MetricOut, MetricOutTypedDict -from .mistralpromptmode import MistralPromptMode -from .modelcapabilities import ModelCapabilities, ModelCapabilitiesTypedDict -from .modelconversation import ( - ModelConversation, - ModelConversationObject, - ModelConversationTools, - ModelConversationToolsTypedDict, - ModelConversationTypedDict, -) -from .modellist import Data, DataTypedDict, ModelList, ModelListTypedDict -from .moderationobject import ModerationObject, ModerationObjectTypedDict -from .moderationresponse import ModerationResponse, ModerationResponseTypedDict -from .ocrimageobject import OCRImageObject, OCRImageObjectTypedDict -from .ocrpagedimensions import OCRPageDimensions, OCRPageDimensionsTypedDict -from .ocrpageobject import OCRPageObject, OCRPageObjectTypedDict -from .ocrrequest import Document, DocumentTypedDict, OCRRequest, OCRRequestTypedDict -from .ocrresponse import OCRResponse, OCRResponseTypedDict -from .ocrusageinfo import OCRUsageInfo, OCRUsageInfoTypedDict -from .outputcontentchunks import OutputContentChunks, OutputContentChunksTypedDict -from .prediction import Prediction, PredictionTypedDict -from .referencechunk import ReferenceChunk, ReferenceChunkType, ReferenceChunkTypedDict -from .responsedoneevent import ( - ResponseDoneEvent, - ResponseDoneEventType, - ResponseDoneEventTypedDict, -) -from .responseerrorevent import ( - ResponseErrorEvent, - ResponseErrorEventType, - ResponseErrorEventTypedDict, -) -from .responseformat import ResponseFormat, ResponseFormatTypedDict -from .responseformats import ResponseFormats -from .responsestartedevent import ( - ResponseStartedEvent, - ResponseStartedEventType, - ResponseStartedEventTypedDict, -) -from .retrieve_model_v1_models_model_id_getop import ( - RetrieveModelV1ModelsModelIDGetRequest, - RetrieveModelV1ModelsModelIDGetRequestTypedDict, - RetrieveModelV1ModelsModelIDGetResponseRetrieveModelV1ModelsModelIDGet, - RetrieveModelV1ModelsModelIDGetResponseRetrieveModelV1ModelsModelIDGetTypedDict, -) -from .retrievefileout import RetrieveFileOut, RetrieveFileOutTypedDict -from .sampletype import SampleType -from .sdkerror import SDKError -from .security import Security, SecurityTypedDict -from .source import Source -from .ssetypes import SSETypes -from .systemmessage import ( - Role, - SystemMessage, - SystemMessageContent, - SystemMessageContentTypedDict, - SystemMessageTypedDict, -) -from .textchunk import TextChunk, TextChunkType, TextChunkTypedDict -from .tool import Tool, ToolTypedDict -from .toolcall import ToolCall, ToolCallTypedDict -from .toolchoice import ToolChoice, ToolChoiceTypedDict -from .toolchoiceenum import ToolChoiceEnum -from .toolexecutiondoneevent import ( - ToolExecutionDoneEvent, - ToolExecutionDoneEventType, - ToolExecutionDoneEventTypedDict, -) -from .toolexecutionentry import ( - ToolExecutionEntry, - ToolExecutionEntryObject, - ToolExecutionEntryType, - ToolExecutionEntryTypedDict, -) -from .toolexecutionstartedevent import ( - ToolExecutionStartedEvent, - ToolExecutionStartedEventType, - ToolExecutionStartedEventTypedDict, -) -from .toolfilechunk import ToolFileChunk, ToolFileChunkType, ToolFileChunkTypedDict -from .toolmessage import ( - ToolMessage, - ToolMessageContent, - ToolMessageContentTypedDict, - ToolMessageRole, - ToolMessageTypedDict, -) -from .toolreferencechunk import ( - ToolReferenceChunk, - ToolReferenceChunkType, - ToolReferenceChunkTypedDict, -) -from .tooltypes import ToolTypes -from .trainingfile import TrainingFile, TrainingFileTypedDict -from .unarchiveftmodelout import ( - UnarchiveFTModelOut, - UnarchiveFTModelOutObject, - UnarchiveFTModelOutTypedDict, -) -from .updateftmodelin import UpdateFTModelIn, UpdateFTModelInTypedDict -from .uploadfileout import UploadFileOut, UploadFileOutTypedDict -from .usageinfo import UsageInfo, UsageInfoTypedDict -from .usermessage import ( - UserMessage, - UserMessageContent, - UserMessageContentTypedDict, - UserMessageRole, - UserMessageTypedDict, -) -from .validationerror import ( - Loc, - LocTypedDict, - ValidationError, - ValidationErrorTypedDict, -) -from .wandbintegration import ( - WandbIntegration, - WandbIntegrationType, - WandbIntegrationTypedDict, -) -from .wandbintegrationout import ( - WandbIntegrationOut, - WandbIntegrationOutType, - WandbIntegrationOutTypedDict, -) -from .websearchpremiumtool import ( - WebSearchPremiumTool, - WebSearchPremiumToolType, - WebSearchPremiumToolTypedDict, -) -from .websearchtool import WebSearchTool, WebSearchToolType, WebSearchToolTypedDict +from typing import TYPE_CHECKING +from importlib import import_module +if TYPE_CHECKING: + from .agent import ( + Agent, + AgentObject, + AgentTools, + AgentToolsTypedDict, + AgentTypedDict, + ) + from .agentconversation import ( + AgentConversation, + AgentConversationObject, + AgentConversationTypedDict, + ) + from .agentcreationrequest import ( + AgentCreationRequest, + AgentCreationRequestTools, + AgentCreationRequestToolsTypedDict, + AgentCreationRequestTypedDict, + ) + from .agenthandoffdoneevent import ( + AgentHandoffDoneEvent, + AgentHandoffDoneEventType, + AgentHandoffDoneEventTypedDict, + ) + from .agenthandoffentry import ( + AgentHandoffEntry, + AgentHandoffEntryObject, + AgentHandoffEntryType, + AgentHandoffEntryTypedDict, + ) + from .agenthandoffstartedevent import ( + AgentHandoffStartedEvent, + AgentHandoffStartedEventType, + AgentHandoffStartedEventTypedDict, + ) + from .agents_api_v1_agents_getop import ( + AgentsAPIV1AgentsGetRequest, + AgentsAPIV1AgentsGetRequestTypedDict, + ) + from .agents_api_v1_agents_listop import ( + AgentsAPIV1AgentsListRequest, + AgentsAPIV1AgentsListRequestTypedDict, + ) + from .agents_api_v1_agents_update_versionop import ( + AgentsAPIV1AgentsUpdateVersionRequest, + AgentsAPIV1AgentsUpdateVersionRequestTypedDict, + ) + from .agents_api_v1_agents_updateop import ( + AgentsAPIV1AgentsUpdateRequest, + AgentsAPIV1AgentsUpdateRequestTypedDict, + ) + from .agents_api_v1_conversations_append_streamop import ( + AgentsAPIV1ConversationsAppendStreamRequest, + AgentsAPIV1ConversationsAppendStreamRequestTypedDict, + ) + from .agents_api_v1_conversations_appendop import ( + AgentsAPIV1ConversationsAppendRequest, + AgentsAPIV1ConversationsAppendRequestTypedDict, + ) + from .agents_api_v1_conversations_getop import ( + AgentsAPIV1ConversationsGetRequest, + AgentsAPIV1ConversationsGetRequestTypedDict, + AgentsAPIV1ConversationsGetResponseV1ConversationsGet, + AgentsAPIV1ConversationsGetResponseV1ConversationsGetTypedDict, + ) + from .agents_api_v1_conversations_historyop import ( + AgentsAPIV1ConversationsHistoryRequest, + AgentsAPIV1ConversationsHistoryRequestTypedDict, + ) + from .agents_api_v1_conversations_listop import ( + AgentsAPIV1ConversationsListRequest, + AgentsAPIV1ConversationsListRequestTypedDict, + ResponseBody, + ResponseBodyTypedDict, + ) + from .agents_api_v1_conversations_messagesop import ( + AgentsAPIV1ConversationsMessagesRequest, + AgentsAPIV1ConversationsMessagesRequestTypedDict, + ) + from .agents_api_v1_conversations_restart_streamop import ( + AgentsAPIV1ConversationsRestartStreamRequest, + AgentsAPIV1ConversationsRestartStreamRequestTypedDict, + ) + from .agents_api_v1_conversations_restartop import ( + AgentsAPIV1ConversationsRestartRequest, + AgentsAPIV1ConversationsRestartRequestTypedDict, + ) + from .agentscompletionrequest import ( + AgentsCompletionRequest, + AgentsCompletionRequestMessages, + AgentsCompletionRequestMessagesTypedDict, + AgentsCompletionRequestStop, + AgentsCompletionRequestStopTypedDict, + AgentsCompletionRequestToolChoice, + AgentsCompletionRequestToolChoiceTypedDict, + AgentsCompletionRequestTypedDict, + ) + from .agentscompletionstreamrequest import ( + AgentsCompletionStreamRequest, + AgentsCompletionStreamRequestMessages, + AgentsCompletionStreamRequestMessagesTypedDict, + AgentsCompletionStreamRequestStop, + AgentsCompletionStreamRequestStopTypedDict, + AgentsCompletionStreamRequestToolChoice, + AgentsCompletionStreamRequestToolChoiceTypedDict, + AgentsCompletionStreamRequestTypedDict, + ) + from .agentupdaterequest import ( + AgentUpdateRequest, + AgentUpdateRequestTools, + AgentUpdateRequestToolsTypedDict, + AgentUpdateRequestTypedDict, + ) + from .apiendpoint import APIEndpoint + from .archiveftmodelout import ( + ArchiveFTModelOut, + ArchiveFTModelOutObject, + ArchiveFTModelOutTypedDict, + ) + from .assistantmessage import ( + AssistantMessage, + AssistantMessageContent, + AssistantMessageContentTypedDict, + AssistantMessageRole, + AssistantMessageTypedDict, + ) + from .basemodelcard import BaseModelCard, BaseModelCardTypedDict, Type + from .batcherror import BatchError, BatchErrorTypedDict + from .batchjobin import BatchJobIn, BatchJobInTypedDict + from .batchjobout import BatchJobOut, BatchJobOutObject, BatchJobOutTypedDict + from .batchjobsout import BatchJobsOut, BatchJobsOutObject, BatchJobsOutTypedDict + from .batchjobstatus import BatchJobStatus + from .builtinconnectors import BuiltInConnectors + from .chatclassificationrequest import ( + ChatClassificationRequest, + ChatClassificationRequestTypedDict, + ) + from .chatcompletionchoice import ( + ChatCompletionChoice, + ChatCompletionChoiceTypedDict, + FinishReason, + ) + from .chatcompletionrequest import ( + ChatCompletionRequest, + ChatCompletionRequestToolChoice, + ChatCompletionRequestToolChoiceTypedDict, + ChatCompletionRequestTypedDict, + Messages, + MessagesTypedDict, + Stop, + StopTypedDict, + ) + from .chatcompletionresponse import ( + ChatCompletionResponse, + ChatCompletionResponseTypedDict, + ) + from .chatcompletionstreamrequest import ( + ChatCompletionStreamRequest, + ChatCompletionStreamRequestMessages, + ChatCompletionStreamRequestMessagesTypedDict, + ChatCompletionStreamRequestStop, + ChatCompletionStreamRequestStopTypedDict, + ChatCompletionStreamRequestToolChoice, + ChatCompletionStreamRequestToolChoiceTypedDict, + ChatCompletionStreamRequestTypedDict, + ) + from .chatmoderationrequest import ( + ChatModerationRequest, + ChatModerationRequestInputs, + ChatModerationRequestInputsTypedDict, + ChatModerationRequestTypedDict, + One, + OneTypedDict, + Two, + TwoTypedDict, + ) + from .checkpointout import CheckpointOut, CheckpointOutTypedDict + from .classificationrequest import ( + ClassificationRequest, + ClassificationRequestInputs, + ClassificationRequestInputsTypedDict, + ClassificationRequestTypedDict, + ) + from .classificationresponse import ( + ClassificationResponse, + ClassificationResponseTypedDict, + ) + from .classificationtargetresult import ( + ClassificationTargetResult, + ClassificationTargetResultTypedDict, + ) + from .classifierdetailedjobout import ( + ClassifierDetailedJobOut, + ClassifierDetailedJobOutIntegrations, + ClassifierDetailedJobOutIntegrationsTypedDict, + ClassifierDetailedJobOutJobType, + ClassifierDetailedJobOutObject, + ClassifierDetailedJobOutStatus, + ClassifierDetailedJobOutTypedDict, + ) + from .classifierftmodelout import ( + ClassifierFTModelOut, + ClassifierFTModelOutModelType, + ClassifierFTModelOutObject, + ClassifierFTModelOutTypedDict, + ) + from .classifierjobout import ( + ClassifierJobOut, + ClassifierJobOutIntegrations, + ClassifierJobOutIntegrationsTypedDict, + ClassifierJobOutJobType, + ClassifierJobOutObject, + ClassifierJobOutStatus, + ClassifierJobOutTypedDict, + ) + from .classifiertargetin import ClassifierTargetIn, ClassifierTargetInTypedDict + from .classifiertargetout import ClassifierTargetOut, ClassifierTargetOutTypedDict + from .classifiertrainingparameters import ( + ClassifierTrainingParameters, + ClassifierTrainingParametersTypedDict, + ) + from .classifiertrainingparametersin import ( + ClassifierTrainingParametersIn, + ClassifierTrainingParametersInTypedDict, + ) + from .codeinterpretertool import ( + CodeInterpreterTool, + CodeInterpreterToolType, + CodeInterpreterToolTypedDict, + ) + from .completionargs import CompletionArgs, CompletionArgsTypedDict + from .completionargsstop import CompletionArgsStop, CompletionArgsStopTypedDict + from .completionchunk import CompletionChunk, CompletionChunkTypedDict + from .completiondetailedjobout import ( + CompletionDetailedJobOut, + CompletionDetailedJobOutIntegrations, + CompletionDetailedJobOutIntegrationsTypedDict, + CompletionDetailedJobOutJobType, + CompletionDetailedJobOutObject, + CompletionDetailedJobOutRepositories, + CompletionDetailedJobOutRepositoriesTypedDict, + CompletionDetailedJobOutStatus, + CompletionDetailedJobOutTypedDict, + ) + from .completionevent import CompletionEvent, CompletionEventTypedDict + from .completionftmodelout import ( + CompletionFTModelOut, + CompletionFTModelOutObject, + CompletionFTModelOutTypedDict, + ModelType, + ) + from .completionjobout import ( + CompletionJobOut, + CompletionJobOutObject, + CompletionJobOutTypedDict, + Integrations, + IntegrationsTypedDict, + JobType, + Repositories, + RepositoriesTypedDict, + Status, + ) + from .completionresponsestreamchoice import ( + CompletionResponseStreamChoice, + CompletionResponseStreamChoiceFinishReason, + CompletionResponseStreamChoiceTypedDict, + ) + from .completiontrainingparameters import ( + CompletionTrainingParameters, + CompletionTrainingParametersTypedDict, + ) + from .completiontrainingparametersin import ( + CompletionTrainingParametersIn, + CompletionTrainingParametersInTypedDict, + ) + from .contentchunk import ContentChunk, ContentChunkTypedDict + from .conversationappendrequest import ( + ConversationAppendRequest, + ConversationAppendRequestHandoffExecution, + ConversationAppendRequestTypedDict, + ) + from .conversationappendstreamrequest import ( + ConversationAppendStreamRequest, + ConversationAppendStreamRequestHandoffExecution, + ConversationAppendStreamRequestTypedDict, + ) + from .conversationevents import ( + ConversationEvents, + ConversationEventsData, + ConversationEventsDataTypedDict, + ConversationEventsTypedDict, + ) + from .conversationhistory import ( + ConversationHistory, + ConversationHistoryObject, + ConversationHistoryTypedDict, + Entries, + EntriesTypedDict, + ) + from .conversationinputs import ConversationInputs, ConversationInputsTypedDict + from .conversationmessages import ( + ConversationMessages, + ConversationMessagesObject, + ConversationMessagesTypedDict, + ) + from .conversationrequest import ( + ConversationRequest, + ConversationRequestTypedDict, + HandoffExecution, + Tools, + ToolsTypedDict, + ) + from .conversationresponse import ( + ConversationResponse, + ConversationResponseObject, + ConversationResponseTypedDict, + Outputs, + OutputsTypedDict, + ) + from .conversationrestartrequest import ( + ConversationRestartRequest, + ConversationRestartRequestHandoffExecution, + ConversationRestartRequestTypedDict, + ) + from .conversationrestartstreamrequest import ( + ConversationRestartStreamRequest, + ConversationRestartStreamRequestHandoffExecution, + ConversationRestartStreamRequestTypedDict, + ) + from .conversationstreamrequest import ( + ConversationStreamRequest, + ConversationStreamRequestHandoffExecution, + ConversationStreamRequestTools, + ConversationStreamRequestToolsTypedDict, + ConversationStreamRequestTypedDict, + ) + from .conversationusageinfo import ( + ConversationUsageInfo, + ConversationUsageInfoTypedDict, + ) + from .delete_model_v1_models_model_id_deleteop import ( + DeleteModelV1ModelsModelIDDeleteRequest, + DeleteModelV1ModelsModelIDDeleteRequestTypedDict, + ) + from .deletefileout import DeleteFileOut, DeleteFileOutTypedDict + from .deletemodelout import DeleteModelOut, DeleteModelOutTypedDict + from .deltamessage import ( + Content, + ContentTypedDict, + DeltaMessage, + DeltaMessageTypedDict, + ) + from .documentlibrarytool import ( + DocumentLibraryTool, + DocumentLibraryToolType, + DocumentLibraryToolTypedDict, + ) + from .documenturlchunk import ( + DocumentURLChunk, + DocumentURLChunkType, + DocumentURLChunkTypedDict, + ) + from .embeddingdtype import EmbeddingDtype + from .embeddingrequest import ( + EmbeddingRequest, + EmbeddingRequestInputs, + EmbeddingRequestInputsTypedDict, + EmbeddingRequestTypedDict, + ) + from .embeddingresponse import EmbeddingResponse, EmbeddingResponseTypedDict + from .embeddingresponsedata import ( + EmbeddingResponseData, + EmbeddingResponseDataTypedDict, + ) + from .eventout import EventOut, EventOutTypedDict + from .filepurpose import FilePurpose + from .files_api_routes_delete_fileop import ( + FilesAPIRoutesDeleteFileRequest, + FilesAPIRoutesDeleteFileRequestTypedDict, + ) + from .files_api_routes_download_fileop import ( + FilesAPIRoutesDownloadFileRequest, + FilesAPIRoutesDownloadFileRequestTypedDict, + ) + from .files_api_routes_get_signed_urlop import ( + FilesAPIRoutesGetSignedURLRequest, + FilesAPIRoutesGetSignedURLRequestTypedDict, + ) + from .files_api_routes_list_filesop import ( + FilesAPIRoutesListFilesRequest, + FilesAPIRoutesListFilesRequestTypedDict, + ) + from .files_api_routes_retrieve_fileop import ( + FilesAPIRoutesRetrieveFileRequest, + FilesAPIRoutesRetrieveFileRequestTypedDict, + ) + from .files_api_routes_upload_fileop import ( + File, + FileTypedDict, + FilesAPIRoutesUploadFileMultiPartBodyParams, + FilesAPIRoutesUploadFileMultiPartBodyParamsTypedDict, + ) + from .fileschema import FileSchema, FileSchemaTypedDict + from .filesignedurl import FileSignedURL, FileSignedURLTypedDict + from .fimcompletionrequest import ( + FIMCompletionRequest, + FIMCompletionRequestStop, + FIMCompletionRequestStopTypedDict, + FIMCompletionRequestTypedDict, + ) + from .fimcompletionresponse import ( + FIMCompletionResponse, + FIMCompletionResponseTypedDict, + ) + from .fimcompletionstreamrequest import ( + FIMCompletionStreamRequest, + FIMCompletionStreamRequestStop, + FIMCompletionStreamRequestStopTypedDict, + FIMCompletionStreamRequestTypedDict, + ) + from .finetuneablemodeltype import FineTuneableModelType + from .ftclassifierlossfunction import FTClassifierLossFunction + from .ftmodelcapabilitiesout import ( + FTModelCapabilitiesOut, + FTModelCapabilitiesOutTypedDict, + ) + from .ftmodelcard import FTModelCard, FTModelCardType, FTModelCardTypedDict + from .function import Function, FunctionTypedDict + from .functioncall import ( + Arguments, + ArgumentsTypedDict, + FunctionCall, + FunctionCallTypedDict, + ) + from .functioncallentry import ( + FunctionCallEntry, + FunctionCallEntryObject, + FunctionCallEntryType, + FunctionCallEntryTypedDict, + ) + from .functioncallentryarguments import ( + FunctionCallEntryArguments, + FunctionCallEntryArgumentsTypedDict, + ) + from .functioncallevent import ( + FunctionCallEvent, + FunctionCallEventType, + FunctionCallEventTypedDict, + ) + from .functionname import FunctionName, FunctionNameTypedDict + from .functionresultentry import ( + FunctionResultEntry, + FunctionResultEntryObject, + FunctionResultEntryType, + FunctionResultEntryTypedDict, + ) + from .functiontool import FunctionTool, FunctionToolType, FunctionToolTypedDict + from .githubrepositoryin import ( + GithubRepositoryIn, + GithubRepositoryInType, + GithubRepositoryInTypedDict, + ) + from .githubrepositoryout import ( + GithubRepositoryOut, + GithubRepositoryOutType, + GithubRepositoryOutTypedDict, + ) + from .httpvalidationerror import HTTPValidationError, HTTPValidationErrorData + from .imagegenerationtool import ( + ImageGenerationTool, + ImageGenerationToolType, + ImageGenerationToolTypedDict, + ) + from .imageurl import ImageURL, ImageURLTypedDict + from .imageurlchunk import ( + ImageURLChunk, + ImageURLChunkImageURL, + ImageURLChunkImageURLTypedDict, + ImageURLChunkType, + ImageURLChunkTypedDict, + ) + from .inputentries import InputEntries, InputEntriesTypedDict + from .inputs import ( + Inputs, + InputsTypedDict, + InstructRequestInputs, + InstructRequestInputsMessages, + InstructRequestInputsMessagesTypedDict, + InstructRequestInputsTypedDict, + ) + from .instructrequest import ( + InstructRequest, + InstructRequestMessages, + InstructRequestMessagesTypedDict, + InstructRequestTypedDict, + ) + from .jobin import ( + Hyperparameters, + HyperparametersTypedDict, + JobIn, + JobInIntegrations, + JobInIntegrationsTypedDict, + JobInRepositories, + JobInRepositoriesTypedDict, + JobInTypedDict, + ) + from .jobmetadataout import JobMetadataOut, JobMetadataOutTypedDict + from .jobs_api_routes_batch_cancel_batch_jobop import ( + JobsAPIRoutesBatchCancelBatchJobRequest, + JobsAPIRoutesBatchCancelBatchJobRequestTypedDict, + ) + from .jobs_api_routes_batch_get_batch_jobop import ( + JobsAPIRoutesBatchGetBatchJobRequest, + JobsAPIRoutesBatchGetBatchJobRequestTypedDict, + ) + from .jobs_api_routes_batch_get_batch_jobsop import ( + JobsAPIRoutesBatchGetBatchJobsRequest, + JobsAPIRoutesBatchGetBatchJobsRequestTypedDict, + ) + from .jobs_api_routes_fine_tuning_archive_fine_tuned_modelop import ( + JobsAPIRoutesFineTuningArchiveFineTunedModelRequest, + JobsAPIRoutesFineTuningArchiveFineTunedModelRequestTypedDict, + ) + from .jobs_api_routes_fine_tuning_cancel_fine_tuning_jobop import ( + JobsAPIRoutesFineTuningCancelFineTuningJobRequest, + JobsAPIRoutesFineTuningCancelFineTuningJobRequestTypedDict, + JobsAPIRoutesFineTuningCancelFineTuningJobResponse, + JobsAPIRoutesFineTuningCancelFineTuningJobResponseTypedDict, + ) + from .jobs_api_routes_fine_tuning_create_fine_tuning_jobop import ( + JobsAPIRoutesFineTuningCreateFineTuningJobResponse, + JobsAPIRoutesFineTuningCreateFineTuningJobResponseTypedDict, + Response1, + Response1TypedDict, + ) + from .jobs_api_routes_fine_tuning_get_fine_tuning_jobop import ( + JobsAPIRoutesFineTuningGetFineTuningJobRequest, + JobsAPIRoutesFineTuningGetFineTuningJobRequestTypedDict, + JobsAPIRoutesFineTuningGetFineTuningJobResponse, + JobsAPIRoutesFineTuningGetFineTuningJobResponseTypedDict, + ) + from .jobs_api_routes_fine_tuning_get_fine_tuning_jobsop import ( + JobsAPIRoutesFineTuningGetFineTuningJobsRequest, + JobsAPIRoutesFineTuningGetFineTuningJobsRequestTypedDict, + QueryParamStatus, + ) + from .jobs_api_routes_fine_tuning_start_fine_tuning_jobop import ( + JobsAPIRoutesFineTuningStartFineTuningJobRequest, + JobsAPIRoutesFineTuningStartFineTuningJobRequestTypedDict, + JobsAPIRoutesFineTuningStartFineTuningJobResponse, + JobsAPIRoutesFineTuningStartFineTuningJobResponseTypedDict, + ) + from .jobs_api_routes_fine_tuning_unarchive_fine_tuned_modelop import ( + JobsAPIRoutesFineTuningUnarchiveFineTunedModelRequest, + JobsAPIRoutesFineTuningUnarchiveFineTunedModelRequestTypedDict, + ) + from .jobs_api_routes_fine_tuning_update_fine_tuned_modelop import ( + JobsAPIRoutesFineTuningUpdateFineTunedModelRequest, + JobsAPIRoutesFineTuningUpdateFineTunedModelRequestTypedDict, + JobsAPIRoutesFineTuningUpdateFineTunedModelResponse, + JobsAPIRoutesFineTuningUpdateFineTunedModelResponseTypedDict, + ) + from .jobsout import ( + JobsOut, + JobsOutData, + JobsOutDataTypedDict, + JobsOutObject, + JobsOutTypedDict, + ) + from .jsonschema import JSONSchema, JSONSchemaTypedDict + from .legacyjobmetadataout import ( + LegacyJobMetadataOut, + LegacyJobMetadataOutObject, + LegacyJobMetadataOutTypedDict, + ) + from .listfilesout import ListFilesOut, ListFilesOutTypedDict + from .messageentries import MessageEntries, MessageEntriesTypedDict + from .messageinputcontentchunks import ( + MessageInputContentChunks, + MessageInputContentChunksTypedDict, + ) + from .messageinputentry import ( + MessageInputEntry, + MessageInputEntryContent, + MessageInputEntryContentTypedDict, + MessageInputEntryRole, + MessageInputEntryType, + MessageInputEntryTypedDict, + Object, + ) + from .messageoutputcontentchunks import ( + MessageOutputContentChunks, + MessageOutputContentChunksTypedDict, + ) + from .messageoutputentry import ( + MessageOutputEntry, + MessageOutputEntryContent, + MessageOutputEntryContentTypedDict, + MessageOutputEntryObject, + MessageOutputEntryRole, + MessageOutputEntryType, + MessageOutputEntryTypedDict, + ) + from .messageoutputevent import ( + MessageOutputEvent, + MessageOutputEventContent, + MessageOutputEventContentTypedDict, + MessageOutputEventRole, + MessageOutputEventType, + MessageOutputEventTypedDict, + ) + from .metricout import MetricOut, MetricOutTypedDict + from .mistralpromptmode import MistralPromptMode + from .modelcapabilities import ModelCapabilities, ModelCapabilitiesTypedDict + from .modelconversation import ( + ModelConversation, + ModelConversationObject, + ModelConversationTools, + ModelConversationToolsTypedDict, + ModelConversationTypedDict, + ) + from .modellist import Data, DataTypedDict, ModelList, ModelListTypedDict + from .moderationobject import ModerationObject, ModerationObjectTypedDict + from .moderationresponse import ModerationResponse, ModerationResponseTypedDict + from .ocrimageobject import OCRImageObject, OCRImageObjectTypedDict + from .ocrpagedimensions import OCRPageDimensions, OCRPageDimensionsTypedDict + from .ocrpageobject import OCRPageObject, OCRPageObjectTypedDict + from .ocrrequest import Document, DocumentTypedDict, OCRRequest, OCRRequestTypedDict + from .ocrresponse import OCRResponse, OCRResponseTypedDict + from .ocrusageinfo import OCRUsageInfo, OCRUsageInfoTypedDict + from .outputcontentchunks import OutputContentChunks, OutputContentChunksTypedDict + from .prediction import Prediction, PredictionTypedDict + from .referencechunk import ( + ReferenceChunk, + ReferenceChunkType, + ReferenceChunkTypedDict, + ) + from .responsedoneevent import ( + ResponseDoneEvent, + ResponseDoneEventType, + ResponseDoneEventTypedDict, + ) + from .responseerrorevent import ( + ResponseErrorEvent, + ResponseErrorEventType, + ResponseErrorEventTypedDict, + ) + from .responseformat import ResponseFormat, ResponseFormatTypedDict + from .responseformats import ResponseFormats + from .responsestartedevent import ( + ResponseStartedEvent, + ResponseStartedEventType, + ResponseStartedEventTypedDict, + ) + from .retrieve_model_v1_models_model_id_getop import ( + RetrieveModelV1ModelsModelIDGetRequest, + RetrieveModelV1ModelsModelIDGetRequestTypedDict, + RetrieveModelV1ModelsModelIDGetResponseRetrieveModelV1ModelsModelIDGet, + RetrieveModelV1ModelsModelIDGetResponseRetrieveModelV1ModelsModelIDGetTypedDict, + ) + from .retrievefileout import RetrieveFileOut, RetrieveFileOutTypedDict + from .sampletype import SampleType + from .sdkerror import SDKError + from .security import Security, SecurityTypedDict + from .source import Source + from .ssetypes import SSETypes + from .systemmessage import ( + Role, + SystemMessage, + SystemMessageContent, + SystemMessageContentTypedDict, + SystemMessageTypedDict, + ) + from .textchunk import TextChunk, TextChunkType, TextChunkTypedDict + from .tool import Tool, ToolTypedDict + from .toolcall import ToolCall, ToolCallTypedDict + from .toolchoice import ToolChoice, ToolChoiceTypedDict + from .toolchoiceenum import ToolChoiceEnum + from .toolexecutiondoneevent import ( + ToolExecutionDoneEvent, + ToolExecutionDoneEventType, + ToolExecutionDoneEventTypedDict, + ) + from .toolexecutionentry import ( + ToolExecutionEntry, + ToolExecutionEntryObject, + ToolExecutionEntryType, + ToolExecutionEntryTypedDict, + ) + from .toolexecutionstartedevent import ( + ToolExecutionStartedEvent, + ToolExecutionStartedEventType, + ToolExecutionStartedEventTypedDict, + ) + from .toolfilechunk import ToolFileChunk, ToolFileChunkType, ToolFileChunkTypedDict + from .toolmessage import ( + ToolMessage, + ToolMessageContent, + ToolMessageContentTypedDict, + ToolMessageRole, + ToolMessageTypedDict, + ) + from .toolreferencechunk import ( + ToolReferenceChunk, + ToolReferenceChunkType, + ToolReferenceChunkTypedDict, + ) + from .tooltypes import ToolTypes + from .trainingfile import TrainingFile, TrainingFileTypedDict + from .unarchiveftmodelout import ( + UnarchiveFTModelOut, + UnarchiveFTModelOutObject, + UnarchiveFTModelOutTypedDict, + ) + from .updateftmodelin import UpdateFTModelIn, UpdateFTModelInTypedDict + from .uploadfileout import UploadFileOut, UploadFileOutTypedDict + from .usageinfo import UsageInfo, UsageInfoTypedDict + from .usermessage import ( + UserMessage, + UserMessageContent, + UserMessageContentTypedDict, + UserMessageRole, + UserMessageTypedDict, + ) + from .validationerror import ( + Loc, + LocTypedDict, + ValidationError, + ValidationErrorTypedDict, + ) + from .wandbintegration import ( + WandbIntegration, + WandbIntegrationType, + WandbIntegrationTypedDict, + ) + from .wandbintegrationout import ( + WandbIntegrationOut, + WandbIntegrationOutType, + WandbIntegrationOutTypedDict, + ) + from .websearchpremiumtool import ( + WebSearchPremiumTool, + WebSearchPremiumToolType, + WebSearchPremiumToolTypedDict, + ) + from .websearchtool import WebSearchTool, WebSearchToolType, WebSearchToolTypedDict __all__ = [ "APIEndpoint", @@ -1301,3 +1328,608 @@ "WebSearchToolType", "WebSearchToolTypedDict", ] + +_dynamic_imports: dict[str, str] = { + "Agent": ".agent", + "AgentObject": ".agent", + "AgentTools": ".agent", + "AgentToolsTypedDict": ".agent", + "AgentTypedDict": ".agent", + "AgentConversation": ".agentconversation", + "AgentConversationObject": ".agentconversation", + "AgentConversationTypedDict": ".agentconversation", + "AgentCreationRequest": ".agentcreationrequest", + "AgentCreationRequestTools": ".agentcreationrequest", + "AgentCreationRequestToolsTypedDict": ".agentcreationrequest", + "AgentCreationRequestTypedDict": ".agentcreationrequest", + "AgentHandoffDoneEvent": ".agenthandoffdoneevent", + "AgentHandoffDoneEventType": ".agenthandoffdoneevent", + "AgentHandoffDoneEventTypedDict": ".agenthandoffdoneevent", + "AgentHandoffEntry": ".agenthandoffentry", + "AgentHandoffEntryObject": ".agenthandoffentry", + "AgentHandoffEntryType": ".agenthandoffentry", + "AgentHandoffEntryTypedDict": ".agenthandoffentry", + "AgentHandoffStartedEvent": ".agenthandoffstartedevent", + "AgentHandoffStartedEventType": ".agenthandoffstartedevent", + "AgentHandoffStartedEventTypedDict": ".agenthandoffstartedevent", + "AgentsAPIV1AgentsGetRequest": ".agents_api_v1_agents_getop", + "AgentsAPIV1AgentsGetRequestTypedDict": ".agents_api_v1_agents_getop", + "AgentsAPIV1AgentsListRequest": ".agents_api_v1_agents_listop", + "AgentsAPIV1AgentsListRequestTypedDict": ".agents_api_v1_agents_listop", + "AgentsAPIV1AgentsUpdateVersionRequest": ".agents_api_v1_agents_update_versionop", + "AgentsAPIV1AgentsUpdateVersionRequestTypedDict": ".agents_api_v1_agents_update_versionop", + "AgentsAPIV1AgentsUpdateRequest": ".agents_api_v1_agents_updateop", + "AgentsAPIV1AgentsUpdateRequestTypedDict": ".agents_api_v1_agents_updateop", + "AgentsAPIV1ConversationsAppendStreamRequest": ".agents_api_v1_conversations_append_streamop", + "AgentsAPIV1ConversationsAppendStreamRequestTypedDict": ".agents_api_v1_conversations_append_streamop", + "AgentsAPIV1ConversationsAppendRequest": ".agents_api_v1_conversations_appendop", + "AgentsAPIV1ConversationsAppendRequestTypedDict": ".agents_api_v1_conversations_appendop", + "AgentsAPIV1ConversationsGetRequest": ".agents_api_v1_conversations_getop", + "AgentsAPIV1ConversationsGetRequestTypedDict": ".agents_api_v1_conversations_getop", + "AgentsAPIV1ConversationsGetResponseV1ConversationsGet": ".agents_api_v1_conversations_getop", + "AgentsAPIV1ConversationsGetResponseV1ConversationsGetTypedDict": ".agents_api_v1_conversations_getop", + "AgentsAPIV1ConversationsHistoryRequest": ".agents_api_v1_conversations_historyop", + "AgentsAPIV1ConversationsHistoryRequestTypedDict": ".agents_api_v1_conversations_historyop", + "AgentsAPIV1ConversationsListRequest": ".agents_api_v1_conversations_listop", + "AgentsAPIV1ConversationsListRequestTypedDict": ".agents_api_v1_conversations_listop", + "ResponseBody": ".agents_api_v1_conversations_listop", + "ResponseBodyTypedDict": ".agents_api_v1_conversations_listop", + "AgentsAPIV1ConversationsMessagesRequest": ".agents_api_v1_conversations_messagesop", + "AgentsAPIV1ConversationsMessagesRequestTypedDict": ".agents_api_v1_conversations_messagesop", + "AgentsAPIV1ConversationsRestartStreamRequest": ".agents_api_v1_conversations_restart_streamop", + "AgentsAPIV1ConversationsRestartStreamRequestTypedDict": ".agents_api_v1_conversations_restart_streamop", + "AgentsAPIV1ConversationsRestartRequest": ".agents_api_v1_conversations_restartop", + "AgentsAPIV1ConversationsRestartRequestTypedDict": ".agents_api_v1_conversations_restartop", + "AgentsCompletionRequest": ".agentscompletionrequest", + "AgentsCompletionRequestMessages": ".agentscompletionrequest", + "AgentsCompletionRequestMessagesTypedDict": ".agentscompletionrequest", + "AgentsCompletionRequestStop": ".agentscompletionrequest", + "AgentsCompletionRequestStopTypedDict": ".agentscompletionrequest", + "AgentsCompletionRequestToolChoice": ".agentscompletionrequest", + "AgentsCompletionRequestToolChoiceTypedDict": ".agentscompletionrequest", + "AgentsCompletionRequestTypedDict": ".agentscompletionrequest", + "AgentsCompletionStreamRequest": ".agentscompletionstreamrequest", + "AgentsCompletionStreamRequestMessages": ".agentscompletionstreamrequest", + "AgentsCompletionStreamRequestMessagesTypedDict": ".agentscompletionstreamrequest", + "AgentsCompletionStreamRequestStop": ".agentscompletionstreamrequest", + "AgentsCompletionStreamRequestStopTypedDict": ".agentscompletionstreamrequest", + "AgentsCompletionStreamRequestToolChoice": ".agentscompletionstreamrequest", + "AgentsCompletionStreamRequestToolChoiceTypedDict": ".agentscompletionstreamrequest", + "AgentsCompletionStreamRequestTypedDict": ".agentscompletionstreamrequest", + "AgentUpdateRequest": ".agentupdaterequest", + "AgentUpdateRequestTools": ".agentupdaterequest", + "AgentUpdateRequestToolsTypedDict": ".agentupdaterequest", + "AgentUpdateRequestTypedDict": ".agentupdaterequest", + "APIEndpoint": ".apiendpoint", + "ArchiveFTModelOut": ".archiveftmodelout", + "ArchiveFTModelOutObject": ".archiveftmodelout", + "ArchiveFTModelOutTypedDict": ".archiveftmodelout", + "AssistantMessage": ".assistantmessage", + "AssistantMessageContent": ".assistantmessage", + "AssistantMessageContentTypedDict": ".assistantmessage", + "AssistantMessageRole": ".assistantmessage", + "AssistantMessageTypedDict": ".assistantmessage", + "BaseModelCard": ".basemodelcard", + "BaseModelCardTypedDict": ".basemodelcard", + "Type": ".basemodelcard", + "BatchError": ".batcherror", + "BatchErrorTypedDict": ".batcherror", + "BatchJobIn": ".batchjobin", + "BatchJobInTypedDict": ".batchjobin", + "BatchJobOut": ".batchjobout", + "BatchJobOutObject": ".batchjobout", + "BatchJobOutTypedDict": ".batchjobout", + "BatchJobsOut": ".batchjobsout", + "BatchJobsOutObject": ".batchjobsout", + "BatchJobsOutTypedDict": ".batchjobsout", + "BatchJobStatus": ".batchjobstatus", + "BuiltInConnectors": ".builtinconnectors", + "ChatClassificationRequest": ".chatclassificationrequest", + "ChatClassificationRequestTypedDict": ".chatclassificationrequest", + "ChatCompletionChoice": ".chatcompletionchoice", + "ChatCompletionChoiceTypedDict": ".chatcompletionchoice", + "FinishReason": ".chatcompletionchoice", + "ChatCompletionRequest": ".chatcompletionrequest", + "ChatCompletionRequestToolChoice": ".chatcompletionrequest", + "ChatCompletionRequestToolChoiceTypedDict": ".chatcompletionrequest", + "ChatCompletionRequestTypedDict": ".chatcompletionrequest", + "Messages": ".chatcompletionrequest", + "MessagesTypedDict": ".chatcompletionrequest", + "Stop": ".chatcompletionrequest", + "StopTypedDict": ".chatcompletionrequest", + "ChatCompletionResponse": ".chatcompletionresponse", + "ChatCompletionResponseTypedDict": ".chatcompletionresponse", + "ChatCompletionStreamRequest": ".chatcompletionstreamrequest", + "ChatCompletionStreamRequestMessages": ".chatcompletionstreamrequest", + "ChatCompletionStreamRequestMessagesTypedDict": ".chatcompletionstreamrequest", + "ChatCompletionStreamRequestStop": ".chatcompletionstreamrequest", + "ChatCompletionStreamRequestStopTypedDict": ".chatcompletionstreamrequest", + "ChatCompletionStreamRequestToolChoice": ".chatcompletionstreamrequest", + "ChatCompletionStreamRequestToolChoiceTypedDict": ".chatcompletionstreamrequest", + "ChatCompletionStreamRequestTypedDict": ".chatcompletionstreamrequest", + "ChatModerationRequest": ".chatmoderationrequest", + "ChatModerationRequestInputs": ".chatmoderationrequest", + "ChatModerationRequestInputsTypedDict": ".chatmoderationrequest", + "ChatModerationRequestTypedDict": ".chatmoderationrequest", + "One": ".chatmoderationrequest", + "OneTypedDict": ".chatmoderationrequest", + "Two": ".chatmoderationrequest", + "TwoTypedDict": ".chatmoderationrequest", + "CheckpointOut": ".checkpointout", + "CheckpointOutTypedDict": ".checkpointout", + "ClassificationRequest": ".classificationrequest", + "ClassificationRequestInputs": ".classificationrequest", + "ClassificationRequestInputsTypedDict": ".classificationrequest", + "ClassificationRequestTypedDict": ".classificationrequest", + "ClassificationResponse": ".classificationresponse", + "ClassificationResponseTypedDict": ".classificationresponse", + "ClassificationTargetResult": ".classificationtargetresult", + "ClassificationTargetResultTypedDict": ".classificationtargetresult", + "ClassifierDetailedJobOut": ".classifierdetailedjobout", + "ClassifierDetailedJobOutIntegrations": ".classifierdetailedjobout", + "ClassifierDetailedJobOutIntegrationsTypedDict": ".classifierdetailedjobout", + "ClassifierDetailedJobOutJobType": ".classifierdetailedjobout", + "ClassifierDetailedJobOutObject": ".classifierdetailedjobout", + "ClassifierDetailedJobOutStatus": ".classifierdetailedjobout", + "ClassifierDetailedJobOutTypedDict": ".classifierdetailedjobout", + "ClassifierFTModelOut": ".classifierftmodelout", + "ClassifierFTModelOutModelType": ".classifierftmodelout", + "ClassifierFTModelOutObject": ".classifierftmodelout", + "ClassifierFTModelOutTypedDict": ".classifierftmodelout", + "ClassifierJobOut": ".classifierjobout", + "ClassifierJobOutIntegrations": ".classifierjobout", + "ClassifierJobOutIntegrationsTypedDict": ".classifierjobout", + "ClassifierJobOutJobType": ".classifierjobout", + "ClassifierJobOutObject": ".classifierjobout", + "ClassifierJobOutStatus": ".classifierjobout", + "ClassifierJobOutTypedDict": ".classifierjobout", + "ClassifierTargetIn": ".classifiertargetin", + "ClassifierTargetInTypedDict": ".classifiertargetin", + "ClassifierTargetOut": ".classifiertargetout", + "ClassifierTargetOutTypedDict": ".classifiertargetout", + "ClassifierTrainingParameters": ".classifiertrainingparameters", + "ClassifierTrainingParametersTypedDict": ".classifiertrainingparameters", + "ClassifierTrainingParametersIn": ".classifiertrainingparametersin", + "ClassifierTrainingParametersInTypedDict": ".classifiertrainingparametersin", + "CodeInterpreterTool": ".codeinterpretertool", + "CodeInterpreterToolType": ".codeinterpretertool", + "CodeInterpreterToolTypedDict": ".codeinterpretertool", + "CompletionArgs": ".completionargs", + "CompletionArgsTypedDict": ".completionargs", + "CompletionArgsStop": ".completionargsstop", + "CompletionArgsStopTypedDict": ".completionargsstop", + "CompletionChunk": ".completionchunk", + "CompletionChunkTypedDict": ".completionchunk", + "CompletionDetailedJobOut": ".completiondetailedjobout", + "CompletionDetailedJobOutIntegrations": ".completiondetailedjobout", + "CompletionDetailedJobOutIntegrationsTypedDict": ".completiondetailedjobout", + "CompletionDetailedJobOutJobType": ".completiondetailedjobout", + "CompletionDetailedJobOutObject": ".completiondetailedjobout", + "CompletionDetailedJobOutRepositories": ".completiondetailedjobout", + "CompletionDetailedJobOutRepositoriesTypedDict": ".completiondetailedjobout", + "CompletionDetailedJobOutStatus": ".completiondetailedjobout", + "CompletionDetailedJobOutTypedDict": ".completiondetailedjobout", + "CompletionEvent": ".completionevent", + "CompletionEventTypedDict": ".completionevent", + "CompletionFTModelOut": ".completionftmodelout", + "CompletionFTModelOutObject": ".completionftmodelout", + "CompletionFTModelOutTypedDict": ".completionftmodelout", + "ModelType": ".completionftmodelout", + "CompletionJobOut": ".completionjobout", + "CompletionJobOutObject": ".completionjobout", + "CompletionJobOutTypedDict": ".completionjobout", + "Integrations": ".completionjobout", + "IntegrationsTypedDict": ".completionjobout", + "JobType": ".completionjobout", + "Repositories": ".completionjobout", + "RepositoriesTypedDict": ".completionjobout", + "Status": ".completionjobout", + "CompletionResponseStreamChoice": ".completionresponsestreamchoice", + "CompletionResponseStreamChoiceFinishReason": ".completionresponsestreamchoice", + "CompletionResponseStreamChoiceTypedDict": ".completionresponsestreamchoice", + "CompletionTrainingParameters": ".completiontrainingparameters", + "CompletionTrainingParametersTypedDict": ".completiontrainingparameters", + "CompletionTrainingParametersIn": ".completiontrainingparametersin", + "CompletionTrainingParametersInTypedDict": ".completiontrainingparametersin", + "ContentChunk": ".contentchunk", + "ContentChunkTypedDict": ".contentchunk", + "ConversationAppendRequest": ".conversationappendrequest", + "ConversationAppendRequestHandoffExecution": ".conversationappendrequest", + "ConversationAppendRequestTypedDict": ".conversationappendrequest", + "ConversationAppendStreamRequest": ".conversationappendstreamrequest", + "ConversationAppendStreamRequestHandoffExecution": ".conversationappendstreamrequest", + "ConversationAppendStreamRequestTypedDict": ".conversationappendstreamrequest", + "ConversationEvents": ".conversationevents", + "ConversationEventsData": ".conversationevents", + "ConversationEventsDataTypedDict": ".conversationevents", + "ConversationEventsTypedDict": ".conversationevents", + "ConversationHistory": ".conversationhistory", + "ConversationHistoryObject": ".conversationhistory", + "ConversationHistoryTypedDict": ".conversationhistory", + "Entries": ".conversationhistory", + "EntriesTypedDict": ".conversationhistory", + "ConversationInputs": ".conversationinputs", + "ConversationInputsTypedDict": ".conversationinputs", + "ConversationMessages": ".conversationmessages", + "ConversationMessagesObject": ".conversationmessages", + "ConversationMessagesTypedDict": ".conversationmessages", + "ConversationRequest": ".conversationrequest", + "ConversationRequestTypedDict": ".conversationrequest", + "HandoffExecution": ".conversationrequest", + "Tools": ".conversationrequest", + "ToolsTypedDict": ".conversationrequest", + "ConversationResponse": ".conversationresponse", + "ConversationResponseObject": ".conversationresponse", + "ConversationResponseTypedDict": ".conversationresponse", + "Outputs": ".conversationresponse", + "OutputsTypedDict": ".conversationresponse", + "ConversationRestartRequest": ".conversationrestartrequest", + "ConversationRestartRequestHandoffExecution": ".conversationrestartrequest", + "ConversationRestartRequestTypedDict": ".conversationrestartrequest", + "ConversationRestartStreamRequest": ".conversationrestartstreamrequest", + "ConversationRestartStreamRequestHandoffExecution": ".conversationrestartstreamrequest", + "ConversationRestartStreamRequestTypedDict": ".conversationrestartstreamrequest", + "ConversationStreamRequest": ".conversationstreamrequest", + "ConversationStreamRequestHandoffExecution": ".conversationstreamrequest", + "ConversationStreamRequestTools": ".conversationstreamrequest", + "ConversationStreamRequestToolsTypedDict": ".conversationstreamrequest", + "ConversationStreamRequestTypedDict": ".conversationstreamrequest", + "ConversationUsageInfo": ".conversationusageinfo", + "ConversationUsageInfoTypedDict": ".conversationusageinfo", + "DeleteModelV1ModelsModelIDDeleteRequest": ".delete_model_v1_models_model_id_deleteop", + "DeleteModelV1ModelsModelIDDeleteRequestTypedDict": ".delete_model_v1_models_model_id_deleteop", + "DeleteFileOut": ".deletefileout", + "DeleteFileOutTypedDict": ".deletefileout", + "DeleteModelOut": ".deletemodelout", + "DeleteModelOutTypedDict": ".deletemodelout", + "Content": ".deltamessage", + "ContentTypedDict": ".deltamessage", + "DeltaMessage": ".deltamessage", + "DeltaMessageTypedDict": ".deltamessage", + "DocumentLibraryTool": ".documentlibrarytool", + "DocumentLibraryToolType": ".documentlibrarytool", + "DocumentLibraryToolTypedDict": ".documentlibrarytool", + "DocumentURLChunk": ".documenturlchunk", + "DocumentURLChunkType": ".documenturlchunk", + "DocumentURLChunkTypedDict": ".documenturlchunk", + "EmbeddingDtype": ".embeddingdtype", + "EmbeddingRequest": ".embeddingrequest", + "EmbeddingRequestInputs": ".embeddingrequest", + "EmbeddingRequestInputsTypedDict": ".embeddingrequest", + "EmbeddingRequestTypedDict": ".embeddingrequest", + "EmbeddingResponse": ".embeddingresponse", + "EmbeddingResponseTypedDict": ".embeddingresponse", + "EmbeddingResponseData": ".embeddingresponsedata", + "EmbeddingResponseDataTypedDict": ".embeddingresponsedata", + "EventOut": ".eventout", + "EventOutTypedDict": ".eventout", + "FilePurpose": ".filepurpose", + "FilesAPIRoutesDeleteFileRequest": ".files_api_routes_delete_fileop", + "FilesAPIRoutesDeleteFileRequestTypedDict": ".files_api_routes_delete_fileop", + "FilesAPIRoutesDownloadFileRequest": ".files_api_routes_download_fileop", + "FilesAPIRoutesDownloadFileRequestTypedDict": ".files_api_routes_download_fileop", + "FilesAPIRoutesGetSignedURLRequest": ".files_api_routes_get_signed_urlop", + "FilesAPIRoutesGetSignedURLRequestTypedDict": ".files_api_routes_get_signed_urlop", + "FilesAPIRoutesListFilesRequest": ".files_api_routes_list_filesop", + "FilesAPIRoutesListFilesRequestTypedDict": ".files_api_routes_list_filesop", + "FilesAPIRoutesRetrieveFileRequest": ".files_api_routes_retrieve_fileop", + "FilesAPIRoutesRetrieveFileRequestTypedDict": ".files_api_routes_retrieve_fileop", + "File": ".files_api_routes_upload_fileop", + "FileTypedDict": ".files_api_routes_upload_fileop", + "FilesAPIRoutesUploadFileMultiPartBodyParams": ".files_api_routes_upload_fileop", + "FilesAPIRoutesUploadFileMultiPartBodyParamsTypedDict": ".files_api_routes_upload_fileop", + "FileSchema": ".fileschema", + "FileSchemaTypedDict": ".fileschema", + "FileSignedURL": ".filesignedurl", + "FileSignedURLTypedDict": ".filesignedurl", + "FIMCompletionRequest": ".fimcompletionrequest", + "FIMCompletionRequestStop": ".fimcompletionrequest", + "FIMCompletionRequestStopTypedDict": ".fimcompletionrequest", + "FIMCompletionRequestTypedDict": ".fimcompletionrequest", + "FIMCompletionResponse": ".fimcompletionresponse", + "FIMCompletionResponseTypedDict": ".fimcompletionresponse", + "FIMCompletionStreamRequest": ".fimcompletionstreamrequest", + "FIMCompletionStreamRequestStop": ".fimcompletionstreamrequest", + "FIMCompletionStreamRequestStopTypedDict": ".fimcompletionstreamrequest", + "FIMCompletionStreamRequestTypedDict": ".fimcompletionstreamrequest", + "FineTuneableModelType": ".finetuneablemodeltype", + "FTClassifierLossFunction": ".ftclassifierlossfunction", + "FTModelCapabilitiesOut": ".ftmodelcapabilitiesout", + "FTModelCapabilitiesOutTypedDict": ".ftmodelcapabilitiesout", + "FTModelCard": ".ftmodelcard", + "FTModelCardType": ".ftmodelcard", + "FTModelCardTypedDict": ".ftmodelcard", + "Function": ".function", + "FunctionTypedDict": ".function", + "Arguments": ".functioncall", + "ArgumentsTypedDict": ".functioncall", + "FunctionCall": ".functioncall", + "FunctionCallTypedDict": ".functioncall", + "FunctionCallEntry": ".functioncallentry", + "FunctionCallEntryObject": ".functioncallentry", + "FunctionCallEntryType": ".functioncallentry", + "FunctionCallEntryTypedDict": ".functioncallentry", + "FunctionCallEntryArguments": ".functioncallentryarguments", + "FunctionCallEntryArgumentsTypedDict": ".functioncallentryarguments", + "FunctionCallEvent": ".functioncallevent", + "FunctionCallEventType": ".functioncallevent", + "FunctionCallEventTypedDict": ".functioncallevent", + "FunctionName": ".functionname", + "FunctionNameTypedDict": ".functionname", + "FunctionResultEntry": ".functionresultentry", + "FunctionResultEntryObject": ".functionresultentry", + "FunctionResultEntryType": ".functionresultentry", + "FunctionResultEntryTypedDict": ".functionresultentry", + "FunctionTool": ".functiontool", + "FunctionToolType": ".functiontool", + "FunctionToolTypedDict": ".functiontool", + "GithubRepositoryIn": ".githubrepositoryin", + "GithubRepositoryInType": ".githubrepositoryin", + "GithubRepositoryInTypedDict": ".githubrepositoryin", + "GithubRepositoryOut": ".githubrepositoryout", + "GithubRepositoryOutType": ".githubrepositoryout", + "GithubRepositoryOutTypedDict": ".githubrepositoryout", + "HTTPValidationError": ".httpvalidationerror", + "HTTPValidationErrorData": ".httpvalidationerror", + "ImageGenerationTool": ".imagegenerationtool", + "ImageGenerationToolType": ".imagegenerationtool", + "ImageGenerationToolTypedDict": ".imagegenerationtool", + "ImageURL": ".imageurl", + "ImageURLTypedDict": ".imageurl", + "ImageURLChunk": ".imageurlchunk", + "ImageURLChunkImageURL": ".imageurlchunk", + "ImageURLChunkImageURLTypedDict": ".imageurlchunk", + "ImageURLChunkType": ".imageurlchunk", + "ImageURLChunkTypedDict": ".imageurlchunk", + "InputEntries": ".inputentries", + "InputEntriesTypedDict": ".inputentries", + "Inputs": ".inputs", + "InputsTypedDict": ".inputs", + "InstructRequestInputs": ".inputs", + "InstructRequestInputsMessages": ".inputs", + "InstructRequestInputsMessagesTypedDict": ".inputs", + "InstructRequestInputsTypedDict": ".inputs", + "InstructRequest": ".instructrequest", + "InstructRequestMessages": ".instructrequest", + "InstructRequestMessagesTypedDict": ".instructrequest", + "InstructRequestTypedDict": ".instructrequest", + "Hyperparameters": ".jobin", + "HyperparametersTypedDict": ".jobin", + "JobIn": ".jobin", + "JobInIntegrations": ".jobin", + "JobInIntegrationsTypedDict": ".jobin", + "JobInRepositories": ".jobin", + "JobInRepositoriesTypedDict": ".jobin", + "JobInTypedDict": ".jobin", + "JobMetadataOut": ".jobmetadataout", + "JobMetadataOutTypedDict": ".jobmetadataout", + "JobsAPIRoutesBatchCancelBatchJobRequest": ".jobs_api_routes_batch_cancel_batch_jobop", + "JobsAPIRoutesBatchCancelBatchJobRequestTypedDict": ".jobs_api_routes_batch_cancel_batch_jobop", + "JobsAPIRoutesBatchGetBatchJobRequest": ".jobs_api_routes_batch_get_batch_jobop", + "JobsAPIRoutesBatchGetBatchJobRequestTypedDict": ".jobs_api_routes_batch_get_batch_jobop", + "JobsAPIRoutesBatchGetBatchJobsRequest": ".jobs_api_routes_batch_get_batch_jobsop", + "JobsAPIRoutesBatchGetBatchJobsRequestTypedDict": ".jobs_api_routes_batch_get_batch_jobsop", + "JobsAPIRoutesFineTuningArchiveFineTunedModelRequest": ".jobs_api_routes_fine_tuning_archive_fine_tuned_modelop", + "JobsAPIRoutesFineTuningArchiveFineTunedModelRequestTypedDict": ".jobs_api_routes_fine_tuning_archive_fine_tuned_modelop", + "JobsAPIRoutesFineTuningCancelFineTuningJobRequest": ".jobs_api_routes_fine_tuning_cancel_fine_tuning_jobop", + "JobsAPIRoutesFineTuningCancelFineTuningJobRequestTypedDict": ".jobs_api_routes_fine_tuning_cancel_fine_tuning_jobop", + "JobsAPIRoutesFineTuningCancelFineTuningJobResponse": ".jobs_api_routes_fine_tuning_cancel_fine_tuning_jobop", + "JobsAPIRoutesFineTuningCancelFineTuningJobResponseTypedDict": ".jobs_api_routes_fine_tuning_cancel_fine_tuning_jobop", + "JobsAPIRoutesFineTuningCreateFineTuningJobResponse": ".jobs_api_routes_fine_tuning_create_fine_tuning_jobop", + "JobsAPIRoutesFineTuningCreateFineTuningJobResponseTypedDict": ".jobs_api_routes_fine_tuning_create_fine_tuning_jobop", + "Response1": ".jobs_api_routes_fine_tuning_create_fine_tuning_jobop", + "Response1TypedDict": ".jobs_api_routes_fine_tuning_create_fine_tuning_jobop", + "JobsAPIRoutesFineTuningGetFineTuningJobRequest": ".jobs_api_routes_fine_tuning_get_fine_tuning_jobop", + "JobsAPIRoutesFineTuningGetFineTuningJobRequestTypedDict": ".jobs_api_routes_fine_tuning_get_fine_tuning_jobop", + "JobsAPIRoutesFineTuningGetFineTuningJobResponse": ".jobs_api_routes_fine_tuning_get_fine_tuning_jobop", + "JobsAPIRoutesFineTuningGetFineTuningJobResponseTypedDict": ".jobs_api_routes_fine_tuning_get_fine_tuning_jobop", + "JobsAPIRoutesFineTuningGetFineTuningJobsRequest": ".jobs_api_routes_fine_tuning_get_fine_tuning_jobsop", + "JobsAPIRoutesFineTuningGetFineTuningJobsRequestTypedDict": ".jobs_api_routes_fine_tuning_get_fine_tuning_jobsop", + "QueryParamStatus": ".jobs_api_routes_fine_tuning_get_fine_tuning_jobsop", + "JobsAPIRoutesFineTuningStartFineTuningJobRequest": ".jobs_api_routes_fine_tuning_start_fine_tuning_jobop", + "JobsAPIRoutesFineTuningStartFineTuningJobRequestTypedDict": ".jobs_api_routes_fine_tuning_start_fine_tuning_jobop", + "JobsAPIRoutesFineTuningStartFineTuningJobResponse": ".jobs_api_routes_fine_tuning_start_fine_tuning_jobop", + "JobsAPIRoutesFineTuningStartFineTuningJobResponseTypedDict": ".jobs_api_routes_fine_tuning_start_fine_tuning_jobop", + "JobsAPIRoutesFineTuningUnarchiveFineTunedModelRequest": ".jobs_api_routes_fine_tuning_unarchive_fine_tuned_modelop", + "JobsAPIRoutesFineTuningUnarchiveFineTunedModelRequestTypedDict": ".jobs_api_routes_fine_tuning_unarchive_fine_tuned_modelop", + "JobsAPIRoutesFineTuningUpdateFineTunedModelRequest": ".jobs_api_routes_fine_tuning_update_fine_tuned_modelop", + "JobsAPIRoutesFineTuningUpdateFineTunedModelRequestTypedDict": ".jobs_api_routes_fine_tuning_update_fine_tuned_modelop", + "JobsAPIRoutesFineTuningUpdateFineTunedModelResponse": ".jobs_api_routes_fine_tuning_update_fine_tuned_modelop", + "JobsAPIRoutesFineTuningUpdateFineTunedModelResponseTypedDict": ".jobs_api_routes_fine_tuning_update_fine_tuned_modelop", + "JobsOut": ".jobsout", + "JobsOutData": ".jobsout", + "JobsOutDataTypedDict": ".jobsout", + "JobsOutObject": ".jobsout", + "JobsOutTypedDict": ".jobsout", + "JSONSchema": ".jsonschema", + "JSONSchemaTypedDict": ".jsonschema", + "LegacyJobMetadataOut": ".legacyjobmetadataout", + "LegacyJobMetadataOutObject": ".legacyjobmetadataout", + "LegacyJobMetadataOutTypedDict": ".legacyjobmetadataout", + "ListFilesOut": ".listfilesout", + "ListFilesOutTypedDict": ".listfilesout", + "MessageEntries": ".messageentries", + "MessageEntriesTypedDict": ".messageentries", + "MessageInputContentChunks": ".messageinputcontentchunks", + "MessageInputContentChunksTypedDict": ".messageinputcontentchunks", + "MessageInputEntry": ".messageinputentry", + "MessageInputEntryContent": ".messageinputentry", + "MessageInputEntryContentTypedDict": ".messageinputentry", + "MessageInputEntryRole": ".messageinputentry", + "MessageInputEntryType": ".messageinputentry", + "MessageInputEntryTypedDict": ".messageinputentry", + "Object": ".messageinputentry", + "MessageOutputContentChunks": ".messageoutputcontentchunks", + "MessageOutputContentChunksTypedDict": ".messageoutputcontentchunks", + "MessageOutputEntry": ".messageoutputentry", + "MessageOutputEntryContent": ".messageoutputentry", + "MessageOutputEntryContentTypedDict": ".messageoutputentry", + "MessageOutputEntryObject": ".messageoutputentry", + "MessageOutputEntryRole": ".messageoutputentry", + "MessageOutputEntryType": ".messageoutputentry", + "MessageOutputEntryTypedDict": ".messageoutputentry", + "MessageOutputEvent": ".messageoutputevent", + "MessageOutputEventContent": ".messageoutputevent", + "MessageOutputEventContentTypedDict": ".messageoutputevent", + "MessageOutputEventRole": ".messageoutputevent", + "MessageOutputEventType": ".messageoutputevent", + "MessageOutputEventTypedDict": ".messageoutputevent", + "MetricOut": ".metricout", + "MetricOutTypedDict": ".metricout", + "MistralPromptMode": ".mistralpromptmode", + "ModelCapabilities": ".modelcapabilities", + "ModelCapabilitiesTypedDict": ".modelcapabilities", + "ModelConversation": ".modelconversation", + "ModelConversationObject": ".modelconversation", + "ModelConversationTools": ".modelconversation", + "ModelConversationToolsTypedDict": ".modelconversation", + "ModelConversationTypedDict": ".modelconversation", + "Data": ".modellist", + "DataTypedDict": ".modellist", + "ModelList": ".modellist", + "ModelListTypedDict": ".modellist", + "ModerationObject": ".moderationobject", + "ModerationObjectTypedDict": ".moderationobject", + "ModerationResponse": ".moderationresponse", + "ModerationResponseTypedDict": ".moderationresponse", + "OCRImageObject": ".ocrimageobject", + "OCRImageObjectTypedDict": ".ocrimageobject", + "OCRPageDimensions": ".ocrpagedimensions", + "OCRPageDimensionsTypedDict": ".ocrpagedimensions", + "OCRPageObject": ".ocrpageobject", + "OCRPageObjectTypedDict": ".ocrpageobject", + "Document": ".ocrrequest", + "DocumentTypedDict": ".ocrrequest", + "OCRRequest": ".ocrrequest", + "OCRRequestTypedDict": ".ocrrequest", + "OCRResponse": ".ocrresponse", + "OCRResponseTypedDict": ".ocrresponse", + "OCRUsageInfo": ".ocrusageinfo", + "OCRUsageInfoTypedDict": ".ocrusageinfo", + "OutputContentChunks": ".outputcontentchunks", + "OutputContentChunksTypedDict": ".outputcontentchunks", + "Prediction": ".prediction", + "PredictionTypedDict": ".prediction", + "ReferenceChunk": ".referencechunk", + "ReferenceChunkType": ".referencechunk", + "ReferenceChunkTypedDict": ".referencechunk", + "ResponseDoneEvent": ".responsedoneevent", + "ResponseDoneEventType": ".responsedoneevent", + "ResponseDoneEventTypedDict": ".responsedoneevent", + "ResponseErrorEvent": ".responseerrorevent", + "ResponseErrorEventType": ".responseerrorevent", + "ResponseErrorEventTypedDict": ".responseerrorevent", + "ResponseFormat": ".responseformat", + "ResponseFormatTypedDict": ".responseformat", + "ResponseFormats": ".responseformats", + "ResponseStartedEvent": ".responsestartedevent", + "ResponseStartedEventType": ".responsestartedevent", + "ResponseStartedEventTypedDict": ".responsestartedevent", + "RetrieveModelV1ModelsModelIDGetRequest": ".retrieve_model_v1_models_model_id_getop", + "RetrieveModelV1ModelsModelIDGetRequestTypedDict": ".retrieve_model_v1_models_model_id_getop", + "RetrieveModelV1ModelsModelIDGetResponseRetrieveModelV1ModelsModelIDGet": ".retrieve_model_v1_models_model_id_getop", + "RetrieveModelV1ModelsModelIDGetResponseRetrieveModelV1ModelsModelIDGetTypedDict": ".retrieve_model_v1_models_model_id_getop", + "RetrieveFileOut": ".retrievefileout", + "RetrieveFileOutTypedDict": ".retrievefileout", + "SampleType": ".sampletype", + "SDKError": ".sdkerror", + "Security": ".security", + "SecurityTypedDict": ".security", + "Source": ".source", + "SSETypes": ".ssetypes", + "Role": ".systemmessage", + "SystemMessage": ".systemmessage", + "SystemMessageContent": ".systemmessage", + "SystemMessageContentTypedDict": ".systemmessage", + "SystemMessageTypedDict": ".systemmessage", + "TextChunk": ".textchunk", + "TextChunkType": ".textchunk", + "TextChunkTypedDict": ".textchunk", + "Tool": ".tool", + "ToolTypedDict": ".tool", + "ToolCall": ".toolcall", + "ToolCallTypedDict": ".toolcall", + "ToolChoice": ".toolchoice", + "ToolChoiceTypedDict": ".toolchoice", + "ToolChoiceEnum": ".toolchoiceenum", + "ToolExecutionDoneEvent": ".toolexecutiondoneevent", + "ToolExecutionDoneEventType": ".toolexecutiondoneevent", + "ToolExecutionDoneEventTypedDict": ".toolexecutiondoneevent", + "ToolExecutionEntry": ".toolexecutionentry", + "ToolExecutionEntryObject": ".toolexecutionentry", + "ToolExecutionEntryType": ".toolexecutionentry", + "ToolExecutionEntryTypedDict": ".toolexecutionentry", + "ToolExecutionStartedEvent": ".toolexecutionstartedevent", + "ToolExecutionStartedEventType": ".toolexecutionstartedevent", + "ToolExecutionStartedEventTypedDict": ".toolexecutionstartedevent", + "ToolFileChunk": ".toolfilechunk", + "ToolFileChunkType": ".toolfilechunk", + "ToolFileChunkTypedDict": ".toolfilechunk", + "ToolMessage": ".toolmessage", + "ToolMessageContent": ".toolmessage", + "ToolMessageContentTypedDict": ".toolmessage", + "ToolMessageRole": ".toolmessage", + "ToolMessageTypedDict": ".toolmessage", + "ToolReferenceChunk": ".toolreferencechunk", + "ToolReferenceChunkType": ".toolreferencechunk", + "ToolReferenceChunkTypedDict": ".toolreferencechunk", + "ToolTypes": ".tooltypes", + "TrainingFile": ".trainingfile", + "TrainingFileTypedDict": ".trainingfile", + "UnarchiveFTModelOut": ".unarchiveftmodelout", + "UnarchiveFTModelOutObject": ".unarchiveftmodelout", + "UnarchiveFTModelOutTypedDict": ".unarchiveftmodelout", + "UpdateFTModelIn": ".updateftmodelin", + "UpdateFTModelInTypedDict": ".updateftmodelin", + "UploadFileOut": ".uploadfileout", + "UploadFileOutTypedDict": ".uploadfileout", + "UsageInfo": ".usageinfo", + "UsageInfoTypedDict": ".usageinfo", + "UserMessage": ".usermessage", + "UserMessageContent": ".usermessage", + "UserMessageContentTypedDict": ".usermessage", + "UserMessageRole": ".usermessage", + "UserMessageTypedDict": ".usermessage", + "Loc": ".validationerror", + "LocTypedDict": ".validationerror", + "ValidationError": ".validationerror", + "ValidationErrorTypedDict": ".validationerror", + "WandbIntegration": ".wandbintegration", + "WandbIntegrationType": ".wandbintegration", + "WandbIntegrationTypedDict": ".wandbintegration", + "WandbIntegrationOut": ".wandbintegrationout", + "WandbIntegrationOutType": ".wandbintegrationout", + "WandbIntegrationOutTypedDict": ".wandbintegrationout", + "WebSearchPremiumTool": ".websearchpremiumtool", + "WebSearchPremiumToolType": ".websearchpremiumtool", + "WebSearchPremiumToolTypedDict": ".websearchpremiumtool", + "WebSearchTool": ".websearchtool", + "WebSearchToolType": ".websearchtool", + "WebSearchToolTypedDict": ".websearchtool", +} + + +def __getattr__(attr_name: str) -> object: + module_name = _dynamic_imports.get(attr_name) + if module_name is None: + raise AttributeError( + f"No {attr_name} found in _dynamic_imports for module name -> {__name__} " + ) + + try: + module = import_module(module_name, __package__) + result = getattr(module, attr_name) + return result + except ImportError as e: + raise ImportError( + f"Failed to import {attr_name} from {module_name}: {e}" + ) from e + except AttributeError as e: + raise AttributeError( + f"Failed to get {attr_name} from {module_name}: {e}" + ) from e + + +def __dir__(): + lazy_attrs = list(_dynamic_imports.keys()) + return sorted(lazy_attrs) diff --git a/src/mistralai/models/agent.py b/src/mistralai/models/agent.py index ce750606..b6bf17ab 100644 --- a/src/mistralai/models/agent.py +++ b/src/mistralai/models/agent.py @@ -108,7 +108,7 @@ def serialize_model(self, handler): m = {} - for n, f in self.model_fields.items(): + for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) serialized.pop(k, None) diff --git a/src/mistralai/models/agentconversation.py b/src/mistralai/models/agentconversation.py index 66d6d9f5..42ab84f5 100644 --- a/src/mistralai/models/agentconversation.py +++ b/src/mistralai/models/agentconversation.py @@ -50,7 +50,7 @@ def serialize_model(self, handler): m = {} - for n, f in self.model_fields.items(): + for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) serialized.pop(k, None) diff --git a/src/mistralai/models/agentcreationrequest.py b/src/mistralai/models/agentcreationrequest.py index 7e0a1fa2..83a27028 100644 --- a/src/mistralai/models/agentcreationrequest.py +++ b/src/mistralai/models/agentcreationrequest.py @@ -88,7 +88,7 @@ def serialize_model(self, handler): m = {} - for n, f in self.model_fields.items(): + for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) serialized.pop(k, None) diff --git a/src/mistralai/models/agenthandoffentry.py b/src/mistralai/models/agenthandoffentry.py index b8e356c9..44bfe0f2 100644 --- a/src/mistralai/models/agenthandoffentry.py +++ b/src/mistralai/models/agenthandoffentry.py @@ -54,7 +54,7 @@ def serialize_model(self, handler): m = {} - for n, f in self.model_fields.items(): + for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) serialized.pop(k, None) diff --git a/src/mistralai/models/agentscompletionrequest.py b/src/mistralai/models/agentscompletionrequest.py index e99dcfc2..2e3c35f8 100644 --- a/src/mistralai/models/agentscompletionrequest.py +++ b/src/mistralai/models/agentscompletionrequest.py @@ -157,7 +157,7 @@ def serialize_model(self, handler): m = {} - for n, f in self.model_fields.items(): + for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) serialized.pop(k, None) diff --git a/src/mistralai/models/agentscompletionstreamrequest.py b/src/mistralai/models/agentscompletionstreamrequest.py index b4b423f5..a74842f6 100644 --- a/src/mistralai/models/agentscompletionstreamrequest.py +++ b/src/mistralai/models/agentscompletionstreamrequest.py @@ -155,7 +155,7 @@ def serialize_model(self, handler): m = {} - for n, f in self.model_fields.items(): + for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) serialized.pop(k, None) diff --git a/src/mistralai/models/agentupdaterequest.py b/src/mistralai/models/agentupdaterequest.py index ebb656d6..f6fcb27a 100644 --- a/src/mistralai/models/agentupdaterequest.py +++ b/src/mistralai/models/agentupdaterequest.py @@ -90,7 +90,7 @@ def serialize_model(self, handler): m = {} - for n, f in self.model_fields.items(): + for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) serialized.pop(k, None) diff --git a/src/mistralai/models/assistantmessage.py b/src/mistralai/models/assistantmessage.py index fd540d99..18841a72 100644 --- a/src/mistralai/models/assistantmessage.py +++ b/src/mistralai/models/assistantmessage.py @@ -50,7 +50,7 @@ def serialize_model(self, handler): m = {} - for n, f in self.model_fields.items(): + for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) serialized.pop(k, None) diff --git a/src/mistralai/models/basemodelcard.py b/src/mistralai/models/basemodelcard.py index edb81741..8a4e3710 100644 --- a/src/mistralai/models/basemodelcard.py +++ b/src/mistralai/models/basemodelcard.py @@ -84,7 +84,7 @@ def serialize_model(self, handler): m = {} - for n, f in self.model_fields.items(): + for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) serialized.pop(k, None) diff --git a/src/mistralai/models/batchjobin.py b/src/mistralai/models/batchjobin.py index e249e526..6fcce0f0 100644 --- a/src/mistralai/models/batchjobin.py +++ b/src/mistralai/models/batchjobin.py @@ -39,7 +39,7 @@ def serialize_model(self, handler): m = {} - for n, f in self.model_fields.items(): + for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) serialized.pop(k, None) diff --git a/src/mistralai/models/batchjobout.py b/src/mistralai/models/batchjobout.py index bf873f41..2b49057b 100644 --- a/src/mistralai/models/batchjobout.py +++ b/src/mistralai/models/batchjobout.py @@ -90,7 +90,7 @@ def serialize_model(self, handler): m = {} - for n, f in self.model_fields.items(): + for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) serialized.pop(k, None) diff --git a/src/mistralai/models/chatcompletionrequest.py b/src/mistralai/models/chatcompletionrequest.py index 004cc011..ac90de32 100644 --- a/src/mistralai/models/chatcompletionrequest.py +++ b/src/mistralai/models/chatcompletionrequest.py @@ -178,7 +178,7 @@ def serialize_model(self, handler): m = {} - for n, f in self.model_fields.items(): + for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) serialized.pop(k, None) diff --git a/src/mistralai/models/chatcompletionstreamrequest.py b/src/mistralai/models/chatcompletionstreamrequest.py index 78a85bef..00f57144 100644 --- a/src/mistralai/models/chatcompletionstreamrequest.py +++ b/src/mistralai/models/chatcompletionstreamrequest.py @@ -180,7 +180,7 @@ def serialize_model(self, handler): m = {} - for n, f in self.model_fields.items(): + for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) serialized.pop(k, None) diff --git a/src/mistralai/models/classifierdetailedjobout.py b/src/mistralai/models/classifierdetailedjobout.py index 971d529f..da5bd281 100644 --- a/src/mistralai/models/classifierdetailedjobout.py +++ b/src/mistralai/models/classifierdetailedjobout.py @@ -135,7 +135,7 @@ def serialize_model(self, handler): m = {} - for n, f in self.model_fields.items(): + for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) serialized.pop(k, None) diff --git a/src/mistralai/models/classifierftmodelout.py b/src/mistralai/models/classifierftmodelout.py index 846a20a2..4143d0e8 100644 --- a/src/mistralai/models/classifierftmodelout.py +++ b/src/mistralai/models/classifierftmodelout.py @@ -80,7 +80,7 @@ def serialize_model(self, handler): m = {} - for n, f in self.model_fields.items(): + for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) serialized.pop(k, None) diff --git a/src/mistralai/models/classifierjobout.py b/src/mistralai/models/classifierjobout.py index 66011b4a..c8df6da3 100644 --- a/src/mistralai/models/classifierjobout.py +++ b/src/mistralai/models/classifierjobout.py @@ -144,7 +144,7 @@ def serialize_model(self, handler): m = {} - for n, f in self.model_fields.items(): + for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) serialized.pop(k, None) diff --git a/src/mistralai/models/classifiertargetin.py b/src/mistralai/models/classifiertargetin.py index c9e4b406..d8a060e4 100644 --- a/src/mistralai/models/classifiertargetin.py +++ b/src/mistralai/models/classifiertargetin.py @@ -34,7 +34,7 @@ def serialize_model(self, handler): m = {} - for n, f in self.model_fields.items(): + for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) serialized.pop(k, None) diff --git a/src/mistralai/models/classifiertrainingparameters.py b/src/mistralai/models/classifiertrainingparameters.py index f0908e81..718beeac 100644 --- a/src/mistralai/models/classifiertrainingparameters.py +++ b/src/mistralai/models/classifiertrainingparameters.py @@ -52,7 +52,7 @@ def serialize_model(self, handler): m = {} - for n, f in self.model_fields.items(): + for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) serialized.pop(k, None) diff --git a/src/mistralai/models/classifiertrainingparametersin.py b/src/mistralai/models/classifiertrainingparametersin.py index f1f16cfb..9868843f 100644 --- a/src/mistralai/models/classifiertrainingparametersin.py +++ b/src/mistralai/models/classifiertrainingparametersin.py @@ -64,7 +64,7 @@ def serialize_model(self, handler): m = {} - for n, f in self.model_fields.items(): + for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) serialized.pop(k, None) diff --git a/src/mistralai/models/completionargs.py b/src/mistralai/models/completionargs.py index 2c5cf213..04e032ac 100644 --- a/src/mistralai/models/completionargs.py +++ b/src/mistralai/models/completionargs.py @@ -79,7 +79,7 @@ def serialize_model(self, handler): m = {} - for n, f in self.model_fields.items(): + for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) serialized.pop(k, None) diff --git a/src/mistralai/models/completiondetailedjobout.py b/src/mistralai/models/completiondetailedjobout.py index 598a5e20..8fb1b62a 100644 --- a/src/mistralai/models/completiondetailedjobout.py +++ b/src/mistralai/models/completiondetailedjobout.py @@ -141,7 +141,7 @@ def serialize_model(self, handler): m = {} - for n, f in self.model_fields.items(): + for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) serialized.pop(k, None) diff --git a/src/mistralai/models/completionftmodelout.py b/src/mistralai/models/completionftmodelout.py index 71ab1a45..ad04d73e 100644 --- a/src/mistralai/models/completionftmodelout.py +++ b/src/mistralai/models/completionftmodelout.py @@ -76,7 +76,7 @@ def serialize_model(self, handler): m = {} - for n, f in self.model_fields.items(): + for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) serialized.pop(k, None) diff --git a/src/mistralai/models/completionjobout.py b/src/mistralai/models/completionjobout.py index 3932dae3..bed67b50 100644 --- a/src/mistralai/models/completionjobout.py +++ b/src/mistralai/models/completionjobout.py @@ -154,7 +154,7 @@ def serialize_model(self, handler): m = {} - for n, f in self.model_fields.items(): + for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) serialized.pop(k, None) diff --git a/src/mistralai/models/completionresponsestreamchoice.py b/src/mistralai/models/completionresponsestreamchoice.py index b3b2a287..2426148c 100644 --- a/src/mistralai/models/completionresponsestreamchoice.py +++ b/src/mistralai/models/completionresponsestreamchoice.py @@ -41,7 +41,7 @@ def serialize_model(self, handler): m = {} - for n, f in self.model_fields.items(): + for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) serialized.pop(k, None) diff --git a/src/mistralai/models/completiontrainingparameters.py b/src/mistralai/models/completiontrainingparameters.py index 33b21ec9..0200e81c 100644 --- a/src/mistralai/models/completiontrainingparameters.py +++ b/src/mistralai/models/completiontrainingparameters.py @@ -57,7 +57,7 @@ def serialize_model(self, handler): m = {} - for n, f in self.model_fields.items(): + for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) serialized.pop(k, None) diff --git a/src/mistralai/models/completiontrainingparametersin.py b/src/mistralai/models/completiontrainingparametersin.py index 92f8d99a..1f74bb9d 100644 --- a/src/mistralai/models/completiontrainingparametersin.py +++ b/src/mistralai/models/completiontrainingparametersin.py @@ -69,7 +69,7 @@ def serialize_model(self, handler): m = {} - for n, f in self.model_fields.items(): + for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) serialized.pop(k, None) diff --git a/src/mistralai/models/conversationrequest.py b/src/mistralai/models/conversationrequest.py index 48cc6fe7..0fcca512 100644 --- a/src/mistralai/models/conversationrequest.py +++ b/src/mistralai/models/conversationrequest.py @@ -112,7 +112,7 @@ def serialize_model(self, handler): m = {} - for n, f in self.model_fields.items(): + for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) serialized.pop(k, None) diff --git a/src/mistralai/models/conversationstreamrequest.py b/src/mistralai/models/conversationstreamrequest.py index a1c21d9a..0880727e 100644 --- a/src/mistralai/models/conversationstreamrequest.py +++ b/src/mistralai/models/conversationstreamrequest.py @@ -114,7 +114,7 @@ def serialize_model(self, handler): m = {} - for n, f in self.model_fields.items(): + for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) serialized.pop(k, None) diff --git a/src/mistralai/models/conversationusageinfo.py b/src/mistralai/models/conversationusageinfo.py index 44ffd5e5..9ae6f4fb 100644 --- a/src/mistralai/models/conversationusageinfo.py +++ b/src/mistralai/models/conversationusageinfo.py @@ -42,7 +42,7 @@ def serialize_model(self, handler): m = {} - for n, f in self.model_fields.items(): + for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) serialized.pop(k, None) diff --git a/src/mistralai/models/deltamessage.py b/src/mistralai/models/deltamessage.py index b46cf641..88aefe7f 100644 --- a/src/mistralai/models/deltamessage.py +++ b/src/mistralai/models/deltamessage.py @@ -40,7 +40,7 @@ def serialize_model(self, handler): m = {} - for n, f in self.model_fields.items(): + for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) serialized.pop(k, None) diff --git a/src/mistralai/models/documenturlchunk.py b/src/mistralai/models/documenturlchunk.py index 29945102..33f29ba8 100644 --- a/src/mistralai/models/documenturlchunk.py +++ b/src/mistralai/models/documenturlchunk.py @@ -35,7 +35,7 @@ def serialize_model(self, handler): m = {} - for n, f in self.model_fields.items(): + for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) serialized.pop(k, None) diff --git a/src/mistralai/models/embeddingrequest.py b/src/mistralai/models/embeddingrequest.py index 56cccc72..4b5db550 100644 --- a/src/mistralai/models/embeddingrequest.py +++ b/src/mistralai/models/embeddingrequest.py @@ -51,7 +51,7 @@ def serialize_model(self, handler): m = {} - for n, f in self.model_fields.items(): + for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) serialized.pop(k, None) diff --git a/src/mistralai/models/eventout.py b/src/mistralai/models/eventout.py index a9f22874..32819034 100644 --- a/src/mistralai/models/eventout.py +++ b/src/mistralai/models/eventout.py @@ -34,7 +34,7 @@ def serialize_model(self, handler): m = {} - for n, f in self.model_fields.items(): + for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) serialized.pop(k, None) diff --git a/src/mistralai/models/files_api_routes_list_filesop.py b/src/mistralai/models/files_api_routes_list_filesop.py index 03a33af7..5060c3b8 100644 --- a/src/mistralai/models/files_api_routes_list_filesop.py +++ b/src/mistralai/models/files_api_routes_list_filesop.py @@ -75,7 +75,7 @@ def serialize_model(self, handler): m = {} - for n, f in self.model_fields.items(): + for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) serialized.pop(k, None) diff --git a/src/mistralai/models/fileschema.py b/src/mistralai/models/fileschema.py index 4cf51c02..d687f222 100644 --- a/src/mistralai/models/fileschema.py +++ b/src/mistralai/models/fileschema.py @@ -63,7 +63,7 @@ def serialize_model(self, handler): m = {} - for n, f in self.model_fields.items(): + for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) serialized.pop(k, None) diff --git a/src/mistralai/models/fimcompletionrequest.py b/src/mistralai/models/fimcompletionrequest.py index 01f8b2d1..06210139 100644 --- a/src/mistralai/models/fimcompletionrequest.py +++ b/src/mistralai/models/fimcompletionrequest.py @@ -104,7 +104,7 @@ def serialize_model(self, handler): m = {} - for n, f in self.model_fields.items(): + for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) serialized.pop(k, None) diff --git a/src/mistralai/models/fimcompletionstreamrequest.py b/src/mistralai/models/fimcompletionstreamrequest.py index cc4cf6e8..05cc345b 100644 --- a/src/mistralai/models/fimcompletionstreamrequest.py +++ b/src/mistralai/models/fimcompletionstreamrequest.py @@ -102,7 +102,7 @@ def serialize_model(self, handler): m = {} - for n, f in self.model_fields.items(): + for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) serialized.pop(k, None) diff --git a/src/mistralai/models/ftmodelcard.py b/src/mistralai/models/ftmodelcard.py index 9a640a28..48937f48 100644 --- a/src/mistralai/models/ftmodelcard.py +++ b/src/mistralai/models/ftmodelcard.py @@ -100,7 +100,7 @@ def serialize_model(self, handler): m = {} - for n, f in self.model_fields.items(): + for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) serialized.pop(k, None) diff --git a/src/mistralai/models/functioncallentry.py b/src/mistralai/models/functioncallentry.py index 821e7c14..1e47fda9 100644 --- a/src/mistralai/models/functioncallentry.py +++ b/src/mistralai/models/functioncallentry.py @@ -55,7 +55,7 @@ def serialize_model(self, handler): m = {} - for n, f in self.model_fields.items(): + for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) serialized.pop(k, None) diff --git a/src/mistralai/models/functionresultentry.py b/src/mistralai/models/functionresultentry.py index 64040954..f09e11ae 100644 --- a/src/mistralai/models/functionresultentry.py +++ b/src/mistralai/models/functionresultentry.py @@ -48,7 +48,7 @@ def serialize_model(self, handler): m = {} - for n, f in self.model_fields.items(): + for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) serialized.pop(k, None) diff --git a/src/mistralai/models/githubrepositoryin.py b/src/mistralai/models/githubrepositoryin.py index eda4ee0b..801c0540 100644 --- a/src/mistralai/models/githubrepositoryin.py +++ b/src/mistralai/models/githubrepositoryin.py @@ -42,7 +42,7 @@ def serialize_model(self, handler): m = {} - for n, f in self.model_fields.items(): + for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) serialized.pop(k, None) diff --git a/src/mistralai/models/githubrepositoryout.py b/src/mistralai/models/githubrepositoryout.py index 72213b6f..0d74c17a 100644 --- a/src/mistralai/models/githubrepositoryout.py +++ b/src/mistralai/models/githubrepositoryout.py @@ -42,7 +42,7 @@ def serialize_model(self, handler): m = {} - for n, f in self.model_fields.items(): + for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) serialized.pop(k, None) diff --git a/src/mistralai/models/imageurl.py b/src/mistralai/models/imageurl.py index 1e8276ad..6f077b69 100644 --- a/src/mistralai/models/imageurl.py +++ b/src/mistralai/models/imageurl.py @@ -26,7 +26,7 @@ def serialize_model(self, handler): m = {} - for n, f in self.model_fields.items(): + for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) serialized.pop(k, None) diff --git a/src/mistralai/models/jobin.py b/src/mistralai/models/jobin.py index cb535e46..aa0cd06c 100644 --- a/src/mistralai/models/jobin.py +++ b/src/mistralai/models/jobin.py @@ -120,7 +120,7 @@ def serialize_model(self, handler): m = {} - for n, f in self.model_fields.items(): + for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) serialized.pop(k, None) diff --git a/src/mistralai/models/jobmetadataout.py b/src/mistralai/models/jobmetadataout.py index d1eeb4f2..10ef781e 100644 --- a/src/mistralai/models/jobmetadataout.py +++ b/src/mistralai/models/jobmetadataout.py @@ -57,7 +57,7 @@ def serialize_model(self, handler): m = {} - for n, f in self.model_fields.items(): + for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) serialized.pop(k, None) diff --git a/src/mistralai/models/jobs_api_routes_batch_get_batch_jobsop.py b/src/mistralai/models/jobs_api_routes_batch_get_batch_jobsop.py index fa2c6ed3..39261226 100644 --- a/src/mistralai/models/jobs_api_routes_batch_get_batch_jobsop.py +++ b/src/mistralai/models/jobs_api_routes_batch_get_batch_jobsop.py @@ -74,7 +74,7 @@ def serialize_model(self, handler): m = {} - for n, f in self.model_fields.items(): + for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) serialized.pop(k, None) diff --git a/src/mistralai/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobsop.py b/src/mistralai/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobsop.py index 710436c9..9aec8eb2 100644 --- a/src/mistralai/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobsop.py +++ b/src/mistralai/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobsop.py @@ -135,7 +135,7 @@ def serialize_model(self, handler): m = {} - for n, f in self.model_fields.items(): + for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) serialized.pop(k, None) diff --git a/src/mistralai/models/jsonschema.py b/src/mistralai/models/jsonschema.py index 5d96d1fd..e2b6a45e 100644 --- a/src/mistralai/models/jsonschema.py +++ b/src/mistralai/models/jsonschema.py @@ -34,7 +34,7 @@ def serialize_model(self, handler): m = {} - for n, f in self.model_fields.items(): + for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) serialized.pop(k, None) diff --git a/src/mistralai/models/legacyjobmetadataout.py b/src/mistralai/models/legacyjobmetadataout.py index 1741570e..d878173b 100644 --- a/src/mistralai/models/legacyjobmetadataout.py +++ b/src/mistralai/models/legacyjobmetadataout.py @@ -98,7 +98,7 @@ def serialize_model(self, handler): m = {} - for n, f in self.model_fields.items(): + for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) serialized.pop(k, None) diff --git a/src/mistralai/models/messageinputentry.py b/src/mistralai/models/messageinputentry.py index 3d642cdf..0ea6f24c 100644 --- a/src/mistralai/models/messageinputentry.py +++ b/src/mistralai/models/messageinputentry.py @@ -68,7 +68,7 @@ def serialize_model(self, handler): m = {} - for n, f in self.model_fields.items(): + for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) serialized.pop(k, None) diff --git a/src/mistralai/models/messageoutputentry.py b/src/mistralai/models/messageoutputentry.py index abb361e7..1c2e4107 100644 --- a/src/mistralai/models/messageoutputentry.py +++ b/src/mistralai/models/messageoutputentry.py @@ -79,7 +79,7 @@ def serialize_model(self, handler): m = {} - for n, f in self.model_fields.items(): + for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) serialized.pop(k, None) diff --git a/src/mistralai/models/messageoutputevent.py b/src/mistralai/models/messageoutputevent.py index 328874d6..474cb081 100644 --- a/src/mistralai/models/messageoutputevent.py +++ b/src/mistralai/models/messageoutputevent.py @@ -72,7 +72,7 @@ def serialize_model(self, handler): m = {} - for n, f in self.model_fields.items(): + for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) serialized.pop(k, None) diff --git a/src/mistralai/models/metricout.py b/src/mistralai/models/metricout.py index 7583d927..930b5c21 100644 --- a/src/mistralai/models/metricout.py +++ b/src/mistralai/models/metricout.py @@ -33,7 +33,7 @@ def serialize_model(self, handler): m = {} - for n, f in self.model_fields.items(): + for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) serialized.pop(k, None) diff --git a/src/mistralai/models/modelconversation.py b/src/mistralai/models/modelconversation.py index 3e927192..4ced79ea 100644 --- a/src/mistralai/models/modelconversation.py +++ b/src/mistralai/models/modelconversation.py @@ -106,7 +106,7 @@ def serialize_model(self, handler): m = {} - for n, f in self.model_fields.items(): + for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) serialized.pop(k, None) diff --git a/src/mistralai/models/ocrimageobject.py b/src/mistralai/models/ocrimageobject.py index 78e37c1f..cec0acf4 100644 --- a/src/mistralai/models/ocrimageobject.py +++ b/src/mistralai/models/ocrimageobject.py @@ -62,7 +62,7 @@ def serialize_model(self, handler): m = {} - for n, f in self.model_fields.items(): + for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) serialized.pop(k, None) diff --git a/src/mistralai/models/ocrpageobject.py b/src/mistralai/models/ocrpageobject.py index c3ef8916..94624a16 100644 --- a/src/mistralai/models/ocrpageobject.py +++ b/src/mistralai/models/ocrpageobject.py @@ -43,7 +43,7 @@ def serialize_model(self, handler): m = {} - for n, f in self.model_fields.items(): + for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) serialized.pop(k, None) diff --git a/src/mistralai/models/ocrrequest.py b/src/mistralai/models/ocrrequest.py index 4f9dfd47..53ad6111 100644 --- a/src/mistralai/models/ocrrequest.py +++ b/src/mistralai/models/ocrrequest.py @@ -91,7 +91,7 @@ def serialize_model(self, handler): m = {} - for n, f in self.model_fields.items(): + for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) serialized.pop(k, None) diff --git a/src/mistralai/models/ocrresponse.py b/src/mistralai/models/ocrresponse.py index df3b7d18..7b65bee7 100644 --- a/src/mistralai/models/ocrresponse.py +++ b/src/mistralai/models/ocrresponse.py @@ -41,7 +41,7 @@ def serialize_model(self, handler): m = {} - for n, f in self.model_fields.items(): + for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) serialized.pop(k, None) diff --git a/src/mistralai/models/ocrusageinfo.py b/src/mistralai/models/ocrusageinfo.py index 9dced73b..36c9f826 100644 --- a/src/mistralai/models/ocrusageinfo.py +++ b/src/mistralai/models/ocrusageinfo.py @@ -30,7 +30,7 @@ def serialize_model(self, handler): m = {} - for n, f in self.model_fields.items(): + for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) serialized.pop(k, None) diff --git a/src/mistralai/models/responseformat.py b/src/mistralai/models/responseformat.py index 17424afb..c9319989 100644 --- a/src/mistralai/models/responseformat.py +++ b/src/mistralai/models/responseformat.py @@ -31,7 +31,7 @@ def serialize_model(self, handler): m = {} - for n, f in self.model_fields.items(): + for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) serialized.pop(k, None) diff --git a/src/mistralai/models/retrievefileout.py b/src/mistralai/models/retrievefileout.py index 70d688cc..e5f91449 100644 --- a/src/mistralai/models/retrievefileout.py +++ b/src/mistralai/models/retrievefileout.py @@ -66,7 +66,7 @@ def serialize_model(self, handler): m = {} - for n, f in self.model_fields.items(): + for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) serialized.pop(k, None) diff --git a/src/mistralai/models/toolexecutionentry.py b/src/mistralai/models/toolexecutionentry.py index 20c9bf19..533752ad 100644 --- a/src/mistralai/models/toolexecutionentry.py +++ b/src/mistralai/models/toolexecutionentry.py @@ -49,7 +49,7 @@ def serialize_model(self, handler): m = {} - for n, f in self.model_fields.items(): + for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) serialized.pop(k, None) diff --git a/src/mistralai/models/toolfilechunk.py b/src/mistralai/models/toolfilechunk.py index 1d28e2db..77c07d6d 100644 --- a/src/mistralai/models/toolfilechunk.py +++ b/src/mistralai/models/toolfilechunk.py @@ -40,7 +40,7 @@ def serialize_model(self, handler): m = {} - for n, f in self.model_fields.items(): + for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) serialized.pop(k, None) diff --git a/src/mistralai/models/toolmessage.py b/src/mistralai/models/toolmessage.py index bee9c700..82f62e0f 100644 --- a/src/mistralai/models/toolmessage.py +++ b/src/mistralai/models/toolmessage.py @@ -45,7 +45,7 @@ def serialize_model(self, handler): m = {} - for n, f in self.model_fields.items(): + for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) serialized.pop(k, None) diff --git a/src/mistralai/models/toolreferencechunk.py b/src/mistralai/models/toolreferencechunk.py index 84f72696..c052340d 100644 --- a/src/mistralai/models/toolreferencechunk.py +++ b/src/mistralai/models/toolreferencechunk.py @@ -40,7 +40,7 @@ def serialize_model(self, handler): m = {} - for n, f in self.model_fields.items(): + for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) serialized.pop(k, None) diff --git a/src/mistralai/models/updateftmodelin.py b/src/mistralai/models/updateftmodelin.py index 603f031c..1bd0eaf2 100644 --- a/src/mistralai/models/updateftmodelin.py +++ b/src/mistralai/models/updateftmodelin.py @@ -26,7 +26,7 @@ def serialize_model(self, handler): m = {} - for n, f in self.model_fields.items(): + for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) serialized.pop(k, None) diff --git a/src/mistralai/models/uploadfileout.py b/src/mistralai/models/uploadfileout.py index cf783862..3a8b7337 100644 --- a/src/mistralai/models/uploadfileout.py +++ b/src/mistralai/models/uploadfileout.py @@ -63,7 +63,7 @@ def serialize_model(self, handler): m = {} - for n, f in self.model_fields.items(): + for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) serialized.pop(k, None) diff --git a/src/mistralai/models/usermessage.py b/src/mistralai/models/usermessage.py index dac2618a..049bc755 100644 --- a/src/mistralai/models/usermessage.py +++ b/src/mistralai/models/usermessage.py @@ -39,7 +39,7 @@ def serialize_model(self, handler): m = {} - for n, f in self.model_fields.items(): + for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) serialized.pop(k, None) diff --git a/src/mistralai/models/wandbintegration.py b/src/mistralai/models/wandbintegration.py index 2bafc035..0789b648 100644 --- a/src/mistralai/models/wandbintegration.py +++ b/src/mistralai/models/wandbintegration.py @@ -45,7 +45,7 @@ def serialize_model(self, handler): m = {} - for n, f in self.model_fields.items(): + for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) serialized.pop(k, None) diff --git a/src/mistralai/models/wandbintegrationout.py b/src/mistralai/models/wandbintegrationout.py index bb624bd8..a1c2f570 100644 --- a/src/mistralai/models/wandbintegrationout.py +++ b/src/mistralai/models/wandbintegrationout.py @@ -43,7 +43,7 @@ def serialize_model(self, handler): m = {} - for n, f in self.model_fields.items(): + for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) serialized.pop(k, None) diff --git a/src/mistralai/models_.py b/src/mistralai/models_.py index 96aab468..86259e17 100644 --- a/src/mistralai/models_.py +++ b/src/mistralai/models_.py @@ -63,6 +63,7 @@ def list( http_res = self.do_request( hook_ctx=HookContext( + config=self.sdk_configuration, base_url=base_url or "", operation_id="list_models_v1_models_get", oauth2_scopes=[], @@ -155,6 +156,7 @@ async def list_async( http_res = await self.do_request_async( hook_ctx=HookContext( + config=self.sdk_configuration, base_url=base_url or "", operation_id="list_models_v1_models_get", oauth2_scopes=[], @@ -254,6 +256,7 @@ def retrieve( http_res = self.do_request( hook_ctx=HookContext( + config=self.sdk_configuration, base_url=base_url or "", operation_id="retrieve_model_v1_models__model_id__get", oauth2_scopes=[], @@ -356,6 +359,7 @@ async def retrieve_async( http_res = await self.do_request_async( hook_ctx=HookContext( + config=self.sdk_configuration, base_url=base_url or "", operation_id="retrieve_model_v1_models__model_id__get", oauth2_scopes=[], @@ -458,6 +462,7 @@ def delete( http_res = self.do_request( hook_ctx=HookContext( + config=self.sdk_configuration, base_url=base_url or "", operation_id="delete_model_v1_models__model_id__delete", oauth2_scopes=[], @@ -557,6 +562,7 @@ async def delete_async( http_res = await self.do_request_async( hook_ctx=HookContext( + config=self.sdk_configuration, base_url=base_url or "", operation_id="delete_model_v1_models__model_id__delete", oauth2_scopes=[], @@ -667,6 +673,7 @@ def update( http_res = self.do_request( hook_ctx=HookContext( + config=self.sdk_configuration, base_url=base_url or "", operation_id="jobs_api_routes_fine_tuning_update_fine_tuned_model", oauth2_scopes=[], @@ -774,6 +781,7 @@ async def update_async( http_res = await self.do_request_async( hook_ctx=HookContext( + config=self.sdk_configuration, base_url=base_url or "", operation_id="jobs_api_routes_fine_tuning_update_fine_tuned_model", oauth2_scopes=[], @@ -870,6 +878,7 @@ def archive( http_res = self.do_request( hook_ctx=HookContext( + config=self.sdk_configuration, base_url=base_url or "", operation_id="jobs_api_routes_fine_tuning_archive_fine_tuned_model", oauth2_scopes=[], @@ -963,6 +972,7 @@ async def archive_async( http_res = await self.do_request_async( hook_ctx=HookContext( + config=self.sdk_configuration, base_url=base_url or "", operation_id="jobs_api_routes_fine_tuning_archive_fine_tuned_model", oauth2_scopes=[], @@ -1056,6 +1066,7 @@ def unarchive( http_res = self.do_request( hook_ctx=HookContext( + config=self.sdk_configuration, base_url=base_url or "", operation_id="jobs_api_routes_fine_tuning_unarchive_fine_tuned_model", oauth2_scopes=[], @@ -1149,6 +1160,7 @@ async def unarchive_async( http_res = await self.do_request_async( hook_ctx=HookContext( + config=self.sdk_configuration, base_url=base_url or "", operation_id="jobs_api_routes_fine_tuning_unarchive_fine_tuned_model", oauth2_scopes=[], diff --git a/src/mistralai/ocr.py b/src/mistralai/ocr.py index cdc56ae8..a7b1d04a 100644 --- a/src/mistralai/ocr.py +++ b/src/mistralai/ocr.py @@ -103,6 +103,7 @@ def process( http_res = self.do_request( hook_ctx=HookContext( + config=self.sdk_configuration, base_url=base_url or "", operation_id="ocr_v1_ocr_post", oauth2_scopes=[], @@ -235,6 +236,7 @@ async def process_async( http_res = await self.do_request_async( hook_ctx=HookContext( + config=self.sdk_configuration, base_url=base_url or "", operation_id="ocr_v1_ocr_post", oauth2_scopes=[], diff --git a/src/mistralai/sdk.py b/src/mistralai/sdk.py index 5414436d..352c16a1 100644 --- a/src/mistralai/sdk.py +++ b/src/mistralai/sdk.py @@ -6,46 +6,62 @@ from .utils.logger import Logger, get_default_logger from .utils.retries import RetryConfig import httpx +import importlib from mistralai import models, utils from mistralai._hooks import SDKHooks -from mistralai.agents import Agents -from mistralai.batch import Batch -from mistralai.beta import Beta -from mistralai.chat import Chat -from mistralai.classifiers import Classifiers -from mistralai.embeddings import Embeddings -from mistralai.files import Files -from mistralai.fim import Fim -from mistralai.fine_tuning import FineTuning -from mistralai.models_ import Models -from mistralai.ocr import Ocr from mistralai.types import OptionalNullable, UNSET -from typing import Any, Callable, Dict, Optional, Union, cast +from typing import Any, Callable, Dict, Optional, TYPE_CHECKING, Union, cast import weakref +if TYPE_CHECKING: + from mistralai.agents import Agents + from mistralai.batch import Batch + from mistralai.beta import Beta + from mistralai.chat import Chat + from mistralai.classifiers import Classifiers + from mistralai.embeddings import Embeddings + from mistralai.files import Files + from mistralai.fim import Fim + from mistralai.fine_tuning import FineTuning + from mistralai.models_ import Models + from mistralai.ocr import Ocr + class Mistral(BaseSDK): r"""Mistral AI API: Our Chat Completion and Embeddings APIs specification. Create your account on [La Plateforme](https://console.mistral.ai) to get access and read the [docs](https://docs.mistral.ai) to learn how to use it.""" - models: Models + models: "Models" r"""Model Management API""" - beta: Beta - files: Files + beta: "Beta" + files: "Files" r"""Files API""" - fine_tuning: FineTuning - batch: Batch - chat: Chat + fine_tuning: "FineTuning" + batch: "Batch" + chat: "Chat" r"""Chat Completion API.""" - fim: Fim + fim: "Fim" r"""Fill-in-the-middle API.""" - agents: Agents + agents: "Agents" r"""Agents API.""" - embeddings: Embeddings + embeddings: "Embeddings" r"""Embeddings API.""" - classifiers: Classifiers + classifiers: "Classifiers" r"""Classifiers API.""" - ocr: Ocr + ocr: "Ocr" r"""OCR API""" + _sub_sdk_map = { + "models": ("mistralai.models_", "Models"), + "beta": ("mistralai.beta", "Beta"), + "files": ("mistralai.files", "Files"), + "fine_tuning": ("mistralai.fine_tuning", "FineTuning"), + "batch": ("mistralai.batch", "Batch"), + "chat": ("mistralai.chat", "Chat"), + "fim": ("mistralai.fim", "Fim"), + "agents": ("mistralai.agents", "Agents"), + "embeddings": ("mistralai.embeddings", "Embeddings"), + "classifiers": ("mistralai.classifiers", "Classifiers"), + "ocr": ("mistralai.ocr", "Ocr"), + } def __init__( self, @@ -120,6 +136,9 @@ def __init__( hooks = SDKHooks() + # pylint: disable=protected-access + self.sdk_configuration.__dict__["_hooks"] = hooks + current_server_url, *_ = self.sdk_configuration.get_server_details() server_url, self.sdk_configuration.client = hooks.sdk_init( current_server_url, client @@ -127,9 +146,6 @@ def __init__( if current_server_url != server_url: self.sdk_configuration.server_url = server_url - # pylint: disable=protected-access - self.sdk_configuration.__dict__["_hooks"] = hooks - weakref.finalize( self, close_clients, @@ -140,20 +156,32 @@ def __init__( self.sdk_configuration.async_client_supplied, ) - self._init_sdks() - - def _init_sdks(self): - self.models = Models(self.sdk_configuration) - self.beta = Beta(self.sdk_configuration) - self.files = Files(self.sdk_configuration) - self.fine_tuning = FineTuning(self.sdk_configuration) - self.batch = Batch(self.sdk_configuration) - self.chat = Chat(self.sdk_configuration) - self.fim = Fim(self.sdk_configuration) - self.agents = Agents(self.sdk_configuration) - self.embeddings = Embeddings(self.sdk_configuration) - self.classifiers = Classifiers(self.sdk_configuration) - self.ocr = Ocr(self.sdk_configuration) + def __getattr__(self, name: str): + if name in self._sub_sdk_map: + module_path, class_name = self._sub_sdk_map[name] + try: + module = importlib.import_module(module_path) + klass = getattr(module, class_name) + instance = klass(self.sdk_configuration) + setattr(self, name, instance) + return instance + except ImportError as e: + raise AttributeError( + f"Failed to import module {module_path} for attribute {name}: {e}" + ) from e + except AttributeError as e: + raise AttributeError( + f"Failed to find class {class_name} in module {module_path} for attribute {name}: {e}" + ) from e + + raise AttributeError( + f"'{type(self).__name__}' object has no attribute '{name}'" + ) + + def __dir__(self): + default_attrs = list(super().__dir__()) + lazy_attrs = list(self._sub_sdk_map.keys()) + return sorted(list(set(default_attrs + lazy_attrs))) def __enter__(self): return self diff --git a/src/mistralai/sdkconfiguration.py b/src/mistralai/sdkconfiguration.py index 257ff01d..7e77925d 100644 --- a/src/mistralai/sdkconfiguration.py +++ b/src/mistralai/sdkconfiguration.py @@ -1,6 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -from ._hooks import SDKHooks from ._version import ( __gen_version__, __openapi_doc_version__, @@ -42,9 +41,6 @@ class SDKConfiguration: retry_config: OptionalNullable[RetryConfig] = Field(default_factory=lambda: UNSET) timeout_ms: Optional[int] = None - def __post_init__(self): - self._hooks = SDKHooks() - def get_server_details(self) -> Tuple[str, Dict[str, str]]: if self.server_url is not None and self.server_url: return remove_suffix(self.server_url, "/"), {} @@ -55,6 +51,3 @@ def get_server_details(self) -> Tuple[str, Dict[str, str]]: raise ValueError(f'Invalid server "{self.server}"') return SERVERS[self.server], {} - - def get_hooks(self) -> SDKHooks: - return self._hooks diff --git a/src/mistralai/types/basemodel.py b/src/mistralai/types/basemodel.py index a6187efa..231c2e37 100644 --- a/src/mistralai/types/basemodel.py +++ b/src/mistralai/types/basemodel.py @@ -2,7 +2,7 @@ from pydantic import ConfigDict, model_serializer from pydantic import BaseModel as PydanticBaseModel -from typing import TYPE_CHECKING, Literal, Optional, TypeVar, Union, NewType +from typing import TYPE_CHECKING, Literal, Optional, TypeVar, Union from typing_extensions import TypeAliasType, TypeAlias @@ -35,5 +35,5 @@ def __bool__(self) -> Literal[False]: "OptionalNullable", Union[Optional[Nullable[T]], Unset], type_params=(T,) ) -UnrecognizedInt = NewType("UnrecognizedInt", int) -UnrecognizedStr = NewType("UnrecognizedStr", str) +UnrecognizedInt: TypeAlias = int +UnrecognizedStr: TypeAlias = str diff --git a/src/mistralai/utils/__init__.py b/src/mistralai/utils/__init__.py index d8b21128..3d078198 100644 --- a/src/mistralai/utils/__init__.py +++ b/src/mistralai/utils/__init__.py @@ -1,51 +1,56 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -from .annotations import get_discriminator -from .enums import OpenEnumMeta -from .headers import get_headers, get_response_headers -from .metadata import ( - FieldMetadata, - find_metadata, - FormMetadata, - HeaderMetadata, - MultipartFormMetadata, - PathParamMetadata, - QueryParamMetadata, - RequestMetadata, - SecurityMetadata, -) -from .queryparams import get_query_params -from .retries import BackoffStrategy, Retries, retry, retry_async, RetryConfig -from .requestbodies import serialize_request_body, SerializedRequestBody -from .security import get_security, get_security_from_env +from typing import TYPE_CHECKING +from importlib import import_module -from .serializers import ( - get_pydantic_model, - marshal_json, - unmarshal, - unmarshal_json, - serialize_decimal, - serialize_float, - serialize_int, - stream_to_text, - stream_to_text_async, - stream_to_bytes, - stream_to_bytes_async, - validate_const, - validate_decimal, - validate_float, - validate_int, - validate_open_enum, -) -from .url import generate_url, template_url, remove_suffix -from .values import ( - get_global_from_env, - match_content_type, - match_status_codes, - match_response, - cast_partial, -) -from .logger import Logger, get_body_content, get_default_logger +if TYPE_CHECKING: + from .annotations import get_discriminator + from .datetimes import parse_datetime + from .enums import OpenEnumMeta + from .headers import get_headers, get_response_headers + from .metadata import ( + FieldMetadata, + find_metadata, + FormMetadata, + HeaderMetadata, + MultipartFormMetadata, + PathParamMetadata, + QueryParamMetadata, + RequestMetadata, + SecurityMetadata, + ) + from .queryparams import get_query_params + from .retries import BackoffStrategy, Retries, retry, retry_async, RetryConfig + from .requestbodies import serialize_request_body, SerializedRequestBody + from .security import get_security, get_security_from_env + + from .serializers import ( + get_pydantic_model, + marshal_json, + unmarshal, + unmarshal_json, + serialize_decimal, + serialize_float, + serialize_int, + stream_to_text, + stream_to_text_async, + stream_to_bytes, + stream_to_bytes_async, + validate_const, + validate_decimal, + validate_float, + validate_int, + validate_open_enum, + ) + from .url import generate_url, template_url, remove_suffix + from .values import ( + get_global_from_env, + match_content_type, + match_status_codes, + match_response, + cast_partial, + ) + from .logger import Logger, get_body_content, get_default_logger __all__ = [ "BackoffStrategy", @@ -56,6 +61,7 @@ "get_body_content", "get_default_logger", "get_discriminator", + "parse_datetime", "get_global_from_env", "get_headers", "get_pydantic_model", @@ -99,3 +105,83 @@ "validate_open_enum", "cast_partial", ] + +_dynamic_imports: dict[str, str] = { + "BackoffStrategy": ".retries", + "FieldMetadata": ".metadata", + "find_metadata": ".metadata", + "FormMetadata": ".metadata", + "generate_url": ".url", + "get_body_content": ".logger", + "get_default_logger": ".logger", + "get_discriminator": ".annotations", + "parse_datetime": ".datetimes", + "get_global_from_env": ".values", + "get_headers": ".headers", + "get_pydantic_model": ".serializers", + "get_query_params": ".queryparams", + "get_response_headers": ".headers", + "get_security": ".security", + "get_security_from_env": ".security", + "HeaderMetadata": ".metadata", + "Logger": ".logger", + "marshal_json": ".serializers", + "match_content_type": ".values", + "match_status_codes": ".values", + "match_response": ".values", + "MultipartFormMetadata": ".metadata", + "OpenEnumMeta": ".enums", + "PathParamMetadata": ".metadata", + "QueryParamMetadata": ".metadata", + "remove_suffix": ".url", + "Retries": ".retries", + "retry": ".retries", + "retry_async": ".retries", + "RetryConfig": ".retries", + "RequestMetadata": ".metadata", + "SecurityMetadata": ".metadata", + "serialize_decimal": ".serializers", + "serialize_float": ".serializers", + "serialize_int": ".serializers", + "serialize_request_body": ".requestbodies", + "SerializedRequestBody": ".requestbodies", + "stream_to_text": ".serializers", + "stream_to_text_async": ".serializers", + "stream_to_bytes": ".serializers", + "stream_to_bytes_async": ".serializers", + "template_url": ".url", + "unmarshal": ".serializers", + "unmarshal_json": ".serializers", + "validate_decimal": ".serializers", + "validate_const": ".serializers", + "validate_float": ".serializers", + "validate_int": ".serializers", + "validate_open_enum": ".serializers", + "cast_partial": ".values", +} + + +def __getattr__(attr_name: str) -> object: + module_name = _dynamic_imports.get(attr_name) + if module_name is None: + raise AttributeError( + f"no {attr_name} found in _dynamic_imports, module name -> {__name__} " + ) + + try: + module = import_module(module_name, __package__) + result = getattr(module, attr_name) + return result + except ImportError as e: + raise ImportError( + f"Failed to import {attr_name} from {module_name}: {e}" + ) from e + except AttributeError as e: + raise AttributeError( + f"Failed to get {attr_name} from {module_name}: {e}" + ) from e + + +def __dir__(): + lazy_attrs = list(_dynamic_imports.keys()) + return sorted(lazy_attrs) diff --git a/src/mistralai/utils/datetimes.py b/src/mistralai/utils/datetimes.py new file mode 100644 index 00000000..a6c52cd6 --- /dev/null +++ b/src/mistralai/utils/datetimes.py @@ -0,0 +1,23 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from datetime import datetime +import sys + + +def parse_datetime(datetime_string: str) -> datetime: + """ + Convert a RFC 3339 / ISO 8601 formatted string into a datetime object. + Python versions 3.11 and later support parsing RFC 3339 directly with + datetime.fromisoformat(), but for earlier versions, this function + encapsulates the necessary extra logic. + """ + # Python 3.11 and later can parse RFC 3339 directly + if sys.version_info >= (3, 11): + return datetime.fromisoformat(datetime_string) + + # For Python 3.10 and earlier, a common ValueError is trailing 'Z' suffix, + # so fix that upfront. + if datetime_string.endswith("Z"): + datetime_string = datetime_string[:-1] + "+00:00" + + return datetime.fromisoformat(datetime_string) diff --git a/src/mistralai/utils/enums.py b/src/mistralai/utils/enums.py index c650b10c..c3bc13cf 100644 --- a/src/mistralai/utils/enums.py +++ b/src/mistralai/utils/enums.py @@ -1,34 +1,74 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" import enum - +import sys class OpenEnumMeta(enum.EnumMeta): - def __call__( - cls, value, names=None, *, module=None, qualname=None, type=None, start=1 - ): - # The `type` kwarg also happens to be a built-in that pylint flags as - # redeclared. Safe to ignore this lint rule with this scope. - # pylint: disable=redefined-builtin + # The __call__ method `boundary` kwarg was added in 3.11 and must be present + # for pyright. Refer also: https://github.com/pylint-dev/pylint/issues/9622 + # pylint: disable=unexpected-keyword-arg + # The __call__ method `values` varg must be named for pyright. + # pylint: disable=keyword-arg-before-vararg + + if sys.version_info >= (3, 11): + def __call__( + cls, value, names=None, *values, module=None, qualname=None, type=None, start=1, boundary=None + ): + # The `type` kwarg also happens to be a built-in that pylint flags as + # redeclared. Safe to ignore this lint rule with this scope. + # pylint: disable=redefined-builtin + + if names is not None: + return super().__call__( + value, + names=names, + *values, + module=module, + qualname=qualname, + type=type, + start=start, + boundary=boundary, + ) + + try: + return super().__call__( + value, + names=names, # pyright: ignore[reportArgumentType] + *values, + module=module, + qualname=qualname, + type=type, + start=start, + boundary=boundary, + ) + except ValueError: + return value + else: + def __call__( + cls, value, names=None, *, module=None, qualname=None, type=None, start=1 + ): + # The `type` kwarg also happens to be a built-in that pylint flags as + # redeclared. Safe to ignore this lint rule with this scope. + # pylint: disable=redefined-builtin - if names is not None: - return super().__call__( - value, - names=names, - module=module, - qualname=qualname, - type=type, - start=start, - ) + if names is not None: + return super().__call__( + value, + names=names, + module=module, + qualname=qualname, + type=type, + start=start, + ) - try: - return super().__call__( - value, - names=names, # pyright: ignore[reportArgumentType] - module=module, - qualname=qualname, - type=type, - start=start, - ) - except ValueError: - return value + try: + return super().__call__( + value, + names=names, # pyright: ignore[reportArgumentType] + module=module, + qualname=qualname, + type=type, + start=start, + ) + except ValueError: + return value diff --git a/src/mistralai/utils/forms.py b/src/mistralai/utils/forms.py index 0472aba8..e873495f 100644 --- a/src/mistralai/utils/forms.py +++ b/src/mistralai/utils/forms.py @@ -86,11 +86,39 @@ def _populate_form( return form +def _extract_file_properties(file_obj: Any) -> Tuple[str, Any, Any]: + """Extract file name, content, and content type from a file object.""" + file_fields: Dict[str, FieldInfo] = file_obj.__class__.model_fields + + file_name = "" + content = None + content_type = None + + for file_field_name in file_fields: + file_field = file_fields[file_field_name] + + file_metadata = find_field_metadata(file_field, MultipartFormMetadata) + if file_metadata is None: + continue + + if file_metadata.content: + content = getattr(file_obj, file_field_name, None) + elif file_field_name == "content_type": + content_type = getattr(file_obj, file_field_name, None) + else: + file_name = getattr(file_obj, file_field_name) + + if file_name == "" or content is None: + raise ValueError("invalid multipart/form-data file") + + return file_name, content, content_type + + def serialize_multipart_form( media_type: str, request: Any -) -> Tuple[str, Dict[str, Any], Dict[str, Any]]: +) -> Tuple[str, Dict[str, Any], List[Tuple[str, Any]]]: form: Dict[str, Any] = {} - files: Dict[str, Any] = {} + files: List[Tuple[str, Any]] = [] if not isinstance(request, BaseModel): raise TypeError("invalid request body type") @@ -112,39 +140,32 @@ def serialize_multipart_form( f_name = field.alias if field.alias else name if field_metadata.file: - file_fields: Dict[str, FieldInfo] = val.__class__.model_fields - - file_name = "" - content = None - content_type = None - - for file_field_name in file_fields: - file_field = file_fields[file_field_name] + if isinstance(val, List): + # Handle array of files + for file_obj in val: + if not _is_set(file_obj): + continue + + file_name, content, content_type = _extract_file_properties(file_obj) - file_metadata = find_field_metadata(file_field, MultipartFormMetadata) - if file_metadata is None: - continue + if content_type is not None: + files.append((f_name + "[]", (file_name, content, content_type))) + else: + files.append((f_name + "[]", (file_name, content))) + else: + # Handle single file + file_name, content, content_type = _extract_file_properties(val) - if file_metadata.content: - content = getattr(val, file_field_name, None) - elif file_field_name == "content_type": - content_type = getattr(val, file_field_name, None) + if content_type is not None: + files.append((f_name, (file_name, content, content_type))) else: - file_name = getattr(val, file_field_name) - - if file_name == "" or content is None: - raise ValueError("invalid multipart/form-data file") - - if content_type is not None: - files[f_name] = (file_name, content, content_type) - else: - files[f_name] = (file_name, content) + files.append((f_name, (file_name, content))) elif field_metadata.json: - files[f_name] = ( + files.append((f_name, ( None, marshal_json(val, request_field_types[name]), "application/json", - ) + ))) else: if isinstance(val, List): values = [] diff --git a/src/mistralai/utils/serializers.py b/src/mistralai/utils/serializers.py index baa41fbd..76e44d71 100644 --- a/src/mistralai/utils/serializers.py +++ b/src/mistralai/utils/serializers.py @@ -1,13 +1,16 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from decimal import Decimal +import functools import json -from typing import Any, Dict, List, Union, get_args -import httpx +import typing +from typing import Any, Dict, List, Tuple, Union, get_args +import typing_extensions from typing_extensions import get_origin + +import httpx from pydantic import ConfigDict, create_model from pydantic_core import from_json -from typing_inspection.typing_objects import is_union from ..types.basemodel import BaseModel, Nullable, OptionalNullable, Unset @@ -185,6 +188,13 @@ def is_nullable(field): return False +def is_union(obj: object) -> bool: + """ + Returns True if the given object is a typing.Union or typing_extensions.Union. + """ + return any(obj is typing_obj for typing_obj in _get_typing_objects_by_name_of("Union")) + + def stream_to_text(stream: httpx.Response) -> str: return "".join(stream.iter_text()) @@ -217,3 +227,22 @@ def _contains_pydantic_model(data: Any) -> bool: return any(_contains_pydantic_model(value) for value in data.values()) return False + + +@functools.cache +def _get_typing_objects_by_name_of(name: str) -> Tuple[Any, ...]: + """ + Get typing objects by name from typing and typing_extensions. + Reference: https://typing-extensions.readthedocs.io/en/latest/#runtime-use-of-types + """ + result = tuple( + getattr(module, name) + for module in (typing, typing_extensions) + if hasattr(module, name) + ) + if not result: + raise ValueError( + f"Neither typing nor typing_extensions has an object called {name!r}" + ) + return result + From a5038e55dbe9e05f67c335c24f885b9e7ba7badd Mon Sep 17 00:00:00 2001 From: speakeasybot Date: Mon, 30 Jun 2025 17:57:42 +0000 Subject: [PATCH 142/223] ci: regenerated with OpenAPI Doc , Speakeasy CLI 1.568.2 --- .speakeasy/gen.lock | 12 ++++---- .speakeasy/gen.yaml | 2 +- .speakeasy/workflow.lock | 17 +++++------ RELEASES.md | 12 +++++++- .../agentsapiv1conversationsgetrequest.md | 6 ++-- .../agentsapiv1conversationshistoryrequest.md | 6 ++-- ...agentsapiv1conversationsmessagesrequest.md | 6 ++-- .../agentsapiv1conversationsrestartrequest.md | 2 +- ...sapiv1conversationsrestartstreamrequest.md | 2 +- docs/models/agentscompletionrequest.md | 2 +- docs/models/agentscompletionstreamrequest.md | 2 +- docs/models/basemodelcard.md | 3 +- docs/models/chatcompletionrequest.md | 2 +- docs/models/chatcompletionstreamrequest.md | 2 +- docs/models/contentchunk.md | 6 ++++ docs/models/conversationrestartrequest.md | 4 +-- .../conversationrestartstreamrequest.md | 4 +-- docs/models/document.md | 6 ++++ docs/models/filechunk.md | 9 ++++++ docs/models/ftmodelcard.md | 3 +- docs/models/ftmodelcardtype.md | 8 ------ docs/models/inputentries.md | 24 ++++++++++++++++ docs/models/messageinputentry.md | 18 ++++++------ docs/models/messageinputentrytype.md | 8 ------ docs/models/modelcapabilities.md | 3 +- docs/models/type.md | 6 ++-- docs/sdks/agents/README.md | 4 +-- docs/sdks/chat/README.md | 4 +-- docs/sdks/conversations/README.md | 10 +++---- docs/sdks/models/README.md | 2 +- pyproject.toml | 2 +- src/mistralai/_version.py | 4 +-- src/mistralai/agents.py | 8 +++--- src/mistralai/chat.py | 8 +++--- src/mistralai/conversations.py | 28 +++++++++---------- src/mistralai/models/__init__.py | 17 +++++------ .../agents_api_v1_conversations_getop.py | 2 ++ .../agents_api_v1_conversations_historyop.py | 2 ++ .../agents_api_v1_conversations_messagesop.py | 2 ++ ...s_api_v1_conversations_restart_streamop.py | 2 ++ .../agents_api_v1_conversations_restartop.py | 2 ++ .../models/agentscompletionrequest.py | 2 ++ .../models/agentscompletionstreamrequest.py | 2 ++ src/mistralai/models/basemodelcard.py | 12 ++++---- src/mistralai/models/chatcompletionrequest.py | 2 ++ .../models/chatcompletionstreamrequest.py | 2 ++ src/mistralai/models/contentchunk.py | 3 ++ src/mistralai/models/filechunk.py | 23 +++++++++++++++ src/mistralai/models/ftmodelcard.py | 13 +++++---- src/mistralai/models/inputentries.py | 23 +++++++++++++-- src/mistralai/models/messageinputentry.py | 6 ++-- src/mistralai/models/modelcapabilities.py | 3 ++ src/mistralai/models/ocrrequest.py | 6 ++-- src/mistralai/models_.py | 4 +-- 54 files changed, 245 insertions(+), 128 deletions(-) create mode 100644 docs/models/filechunk.md delete mode 100644 docs/models/ftmodelcardtype.md delete mode 100644 docs/models/messageinputentrytype.md create mode 100644 src/mistralai/models/filechunk.py diff --git a/.speakeasy/gen.lock b/.speakeasy/gen.lock index d5ae1e64..3d37ffed 100644 --- a/.speakeasy/gen.lock +++ b/.speakeasy/gen.lock @@ -1,12 +1,12 @@ lockVersion: 2.0.0 id: 2d045ec7-2ebb-4f4d-ad25-40953b132161 management: - docChecksum: 9c8bd4d6bf675b159a80173b97c1265c + docChecksum: e0186c33d0269977e1790dfcc7d11aac docVersion: 1.0.0 speakeasyVersion: 1.568.2 generationVersion: 2.634.2 - releaseVersion: 1.9.0 - configChecksum: a67788bf50c3de92f0ef16f385b615b3 + releaseVersion: 1.9.1 + configChecksum: 5f97671226b9fdcc9adc3c7662003247 repoURL: https://github.com/mistralai/client-python.git installationURL: https://github.com/mistralai/client-python.git published: true @@ -193,6 +193,7 @@ generatedFiles: - docs/models/entries.md - docs/models/eventout.md - docs/models/file.md + - docs/models/filechunk.md - docs/models/filepurpose.md - docs/models/filesapiroutesdeletefilerequest.md - docs/models/filesapiroutesdownloadfilerequest.md @@ -212,7 +213,6 @@ generatedFiles: - docs/models/ftclassifierlossfunction.md - docs/models/ftmodelcapabilitiesout.md - docs/models/ftmodelcard.md - - docs/models/ftmodelcardtype.md - docs/models/function.md - docs/models/functioncall.md - docs/models/functioncallentry.md @@ -280,7 +280,6 @@ generatedFiles: - docs/models/messageinputentry.md - docs/models/messageinputentrycontent.md - docs/models/messageinputentryrole.md - - docs/models/messageinputentrytype.md - docs/models/messageoutputcontentchunks.md - docs/models/messageoutputentry.md - docs/models/messageoutputentrycontent.md @@ -506,6 +505,7 @@ generatedFiles: - src/mistralai/models/embeddingresponse.py - src/mistralai/models/embeddingresponsedata.py - src/mistralai/models/eventout.py + - src/mistralai/models/filechunk.py - src/mistralai/models/filepurpose.py - src/mistralai/models/files_api_routes_delete_fileop.py - src/mistralai/models/files_api_routes_download_fileop.py @@ -652,7 +652,7 @@ examples: model_id: "ft:open-mistral-7b:587a6b29:20240514:7e773925" responses: "200": - application/json: {"id": "", "object": "model", "owned_by": "mistralai", "capabilities": {"completion_chat": true, "completion_fim": false, "function_calling": true, "fine_tuning": false, "vision": false}, "max_context_length": 32768, "type": "fine-tuned", "job": "Product Markets Facilitator", "root": "", "archived": false} + application/json: {"id": "", "object": "model", "owned_by": "mistralai", "capabilities": {"completion_chat": true, "completion_fim": false, "function_calling": true, "fine_tuning": false, "vision": false, "classification": false}, "max_context_length": 32768, "type": "fine-tuned", "job": "Product Markets Facilitator", "root": "", "archived": false} "422": application/json: {} delete_model_v1_models__model_id__delete: diff --git a/.speakeasy/gen.yaml b/.speakeasy/gen.yaml index 2c46f3c0..6eb63598 100644 --- a/.speakeasy/gen.yaml +++ b/.speakeasy/gen.yaml @@ -21,7 +21,7 @@ generation: generateNewTests: false skipResponseBodyAssertions: false python: - version: 1.9.0 + version: 1.9.1 additionalDependencies: dev: pytest: ^8.2.2 diff --git a/.speakeasy/workflow.lock b/.speakeasy/workflow.lock index 12dfd152..2e95e59c 100644 --- a/.speakeasy/workflow.lock +++ b/.speakeasy/workflow.lock @@ -14,10 +14,11 @@ sources: - latest mistral-openapi: sourceNamespace: mistral-openapi - sourceRevisionDigest: sha256:21244d618cafcc163c3aa4acbc443ca16c63b8614632b65b87fbb2c4066987f3 - sourceBlobDigest: sha256:74aeb6a2e0d466c206f983ce79581cc72d205cc7866826282c181207ebe841a2 + sourceRevisionDigest: sha256:e9fd379cd22f75a10ccc5b866f4de98f973c3c0f77cb15a7bebcb94bf10c82f2 + sourceBlobDigest: sha256:4bb656d10d1cfbe09f9b1b7734c79f1855eb27184590362d3747116f6abf69d1 tags: - latest + - speakeasy-sdk-regen-1751306196 targets: mistralai-azure-sdk: source: mistral-azure-source @@ -36,23 +37,23 @@ targets: mistralai-sdk: source: mistral-openapi sourceNamespace: mistral-openapi - sourceRevisionDigest: sha256:21244d618cafcc163c3aa4acbc443ca16c63b8614632b65b87fbb2c4066987f3 - sourceBlobDigest: sha256:74aeb6a2e0d466c206f983ce79581cc72d205cc7866826282c181207ebe841a2 + sourceRevisionDigest: sha256:e9fd379cd22f75a10ccc5b866f4de98f973c3c0f77cb15a7bebcb94bf10c82f2 + sourceBlobDigest: sha256:4bb656d10d1cfbe09f9b1b7734c79f1855eb27184590362d3747116f6abf69d1 codeSamplesNamespace: mistral-openapi-code-samples - codeSamplesRevisionDigest: sha256:b631243aae349ddebec1b984874a8e1d5b40e67d6229a199a3d5e63ba69d1538 + codeSamplesRevisionDigest: sha256:85311d42c06d86b38d49ed14b2485f45ad219ef76da40dfbec2592bb75dcaf00 workflow: workflowVersion: 1.0.0 speakeasyVersion: 1.568.2 sources: mistral-azure-source: inputs: - - location: registry.speakeasyapi.dev/mistral-dev/mistral-dev/mistral-openapi-azure:sha256:c5931a7e0cc2db844149d71db57dfc2178665f0400bc26c90ee113795ea2872f + - location: registry.speakeasyapi.dev/mistral-dev/mistral-dev/mistral-openapi-azure:main mistral-google-cloud-source: inputs: - - location: registry.speakeasyapi.dev/mistral-dev/mistral-dev/mistral-openapi-google-cloud:sha256:4a5343e63c6a78152e472b00ccc46d7bcb15594496bc94c8040039d3a9d4c5f8 + - location: registry.speakeasyapi.dev/mistral-dev/mistral-dev/mistral-openapi-google-cloud:main mistral-openapi: inputs: - - location: registry.speakeasyapi.dev/mistral-dev/mistral-dev/mistral-openapi:sha256:21244d618cafcc163c3aa4acbc443ca16c63b8614632b65b87fbb2c4066987f3 + - location: registry.speakeasyapi.dev/mistral-dev/mistral-dev/mistral-openapi:main targets: mistralai-azure-sdk: target: python diff --git a/RELEASES.md b/RELEASES.md index 265eda73..3d7513fc 100644 --- a/RELEASES.md +++ b/RELEASES.md @@ -238,4 +238,14 @@ Based on: ### Generated - [python v1.8.2] . ### Releases -- [PyPI v1.8.2] https://pypi.org/project/mistralai/1.8.2 - . \ No newline at end of file +- [PyPI v1.8.2] https://pypi.org/project/mistralai/1.8.2 - . + +## 2025-06-30 17:56:20 +### Changes +Based on: +- OpenAPI Doc +- Speakeasy CLI 1.568.2 (2.634.2) https://github.com/speakeasy-api/speakeasy +### Generated +- [python v1.9.1] . +### Releases +- [PyPI v1.9.1] https://pypi.org/project/mistralai/1.9.1 - . \ No newline at end of file diff --git a/docs/models/agentsapiv1conversationsgetrequest.md b/docs/models/agentsapiv1conversationsgetrequest.md index 0d2d7827..67d450c8 100644 --- a/docs/models/agentsapiv1conversationsgetrequest.md +++ b/docs/models/agentsapiv1conversationsgetrequest.md @@ -3,6 +3,6 @@ ## Fields -| Field | Type | Required | Description | -| ------------------ | ------------------ | ------------------ | ------------------ | -| `conversation_id` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| ----------------------------------------------------------- | ----------------------------------------------------------- | ----------------------------------------------------------- | ----------------------------------------------------------- | +| `conversation_id` | *str* | :heavy_check_mark: | ID of the conversation from which we are fetching metadata. | \ No newline at end of file diff --git a/docs/models/agentsapiv1conversationshistoryrequest.md b/docs/models/agentsapiv1conversationshistoryrequest.md index f0d4f049..7e5d39e9 100644 --- a/docs/models/agentsapiv1conversationshistoryrequest.md +++ b/docs/models/agentsapiv1conversationshistoryrequest.md @@ -3,6 +3,6 @@ ## Fields -| Field | Type | Required | Description | -| ------------------ | ------------------ | ------------------ | ------------------ | -| `conversation_id` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| ---------------------------------------------------------- | ---------------------------------------------------------- | ---------------------------------------------------------- | ---------------------------------------------------------- | +| `conversation_id` | *str* | :heavy_check_mark: | ID of the conversation from which we are fetching entries. | \ No newline at end of file diff --git a/docs/models/agentsapiv1conversationsmessagesrequest.md b/docs/models/agentsapiv1conversationsmessagesrequest.md index b3189925..a91ab046 100644 --- a/docs/models/agentsapiv1conversationsmessagesrequest.md +++ b/docs/models/agentsapiv1conversationsmessagesrequest.md @@ -3,6 +3,6 @@ ## Fields -| Field | Type | Required | Description | -| ------------------ | ------------------ | ------------------ | ------------------ | -| `conversation_id` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| ----------------------------------------------------------- | ----------------------------------------------------------- | ----------------------------------------------------------- | ----------------------------------------------------------- | +| `conversation_id` | *str* | :heavy_check_mark: | ID of the conversation from which we are fetching messages. | \ No newline at end of file diff --git a/docs/models/agentsapiv1conversationsrestartrequest.md b/docs/models/agentsapiv1conversationsrestartrequest.md index 11a2fe2e..a18a41f5 100644 --- a/docs/models/agentsapiv1conversationsrestartrequest.md +++ b/docs/models/agentsapiv1conversationsrestartrequest.md @@ -5,5 +5,5 @@ | Field | Type | Required | Description | | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | -| `conversation_id` | *str* | :heavy_check_mark: | N/A | +| `conversation_id` | *str* | :heavy_check_mark: | ID of the original conversation which is being restarted. | | `conversation_restart_request` | [models.ConversationRestartRequest](../models/conversationrestartrequest.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/agentsapiv1conversationsrestartstreamrequest.md b/docs/models/agentsapiv1conversationsrestartstreamrequest.md index 4cbb9d6c..7548286a 100644 --- a/docs/models/agentsapiv1conversationsrestartstreamrequest.md +++ b/docs/models/agentsapiv1conversationsrestartstreamrequest.md @@ -5,5 +5,5 @@ | Field | Type | Required | Description | | ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | -| `conversation_id` | *str* | :heavy_check_mark: | N/A | +| `conversation_id` | *str* | :heavy_check_mark: | ID of the original conversation which is being restarted. | | `conversation_restart_stream_request` | [models.ConversationRestartStreamRequest](../models/conversationrestartstreamrequest.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/agentscompletionrequest.md b/docs/models/agentscompletionrequest.md index e4a3f849..73615ed9 100644 --- a/docs/models/agentscompletionrequest.md +++ b/docs/models/agentscompletionrequest.md @@ -18,5 +18,5 @@ | `n` | *OptionalNullable[int]* | :heavy_minus_sign: | Number of completions to return for each request, input tokens are only billed once. | | | `prediction` | [Optional[models.Prediction]](../models/prediction.md) | :heavy_minus_sign: | N/A | | | `parallel_tool_calls` | *Optional[bool]* | :heavy_minus_sign: | N/A | | -| `prompt_mode` | [OptionalNullable[models.MistralPromptMode]](../models/mistralpromptmode.md) | :heavy_minus_sign: | N/A | | +| `prompt_mode` | [OptionalNullable[models.MistralPromptMode]](../models/mistralpromptmode.md) | :heavy_minus_sign: | Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. | | | `agent_id` | *str* | :heavy_check_mark: | The ID of the agent to use for this completion. | | \ No newline at end of file diff --git a/docs/models/agentscompletionstreamrequest.md b/docs/models/agentscompletionstreamrequest.md index bd55190b..b0aac6c1 100644 --- a/docs/models/agentscompletionstreamrequest.md +++ b/docs/models/agentscompletionstreamrequest.md @@ -18,5 +18,5 @@ | `n` | *OptionalNullable[int]* | :heavy_minus_sign: | Number of completions to return for each request, input tokens are only billed once. | | | `prediction` | [Optional[models.Prediction]](../models/prediction.md) | :heavy_minus_sign: | N/A | | | `parallel_tool_calls` | *Optional[bool]* | :heavy_minus_sign: | N/A | | -| `prompt_mode` | [OptionalNullable[models.MistralPromptMode]](../models/mistralpromptmode.md) | :heavy_minus_sign: | N/A | | +| `prompt_mode` | [OptionalNullable[models.MistralPromptMode]](../models/mistralpromptmode.md) | :heavy_minus_sign: | Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. | | | `agent_id` | *str* | :heavy_check_mark: | The ID of the agent to use for this completion. | | \ No newline at end of file diff --git a/docs/models/basemodelcard.md b/docs/models/basemodelcard.md index a2a19fcb..18cb9a27 100644 --- a/docs/models/basemodelcard.md +++ b/docs/models/basemodelcard.md @@ -15,5 +15,6 @@ | `max_context_length` | *Optional[int]* | :heavy_minus_sign: | N/A | | `aliases` | List[*str*] | :heavy_minus_sign: | N/A | | `deprecation` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | +| `deprecation_replacement_model` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | `default_model_temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | N/A | -| `type` | [Optional[models.Type]](../models/type.md) | :heavy_minus_sign: | N/A | \ No newline at end of file +| `type` | *Optional[Literal["base"]]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/chatcompletionrequest.md b/docs/models/chatcompletionrequest.md index ecbcad39..a9806a4d 100644 --- a/docs/models/chatcompletionrequest.md +++ b/docs/models/chatcompletionrequest.md @@ -21,5 +21,5 @@ | `n` | *OptionalNullable[int]* | :heavy_minus_sign: | Number of completions to return for each request, input tokens are only billed once. | | | `prediction` | [Optional[models.Prediction]](../models/prediction.md) | :heavy_minus_sign: | N/A | | | `parallel_tool_calls` | *Optional[bool]* | :heavy_minus_sign: | N/A | | -| `prompt_mode` | [OptionalNullable[models.MistralPromptMode]](../models/mistralpromptmode.md) | :heavy_minus_sign: | N/A | | +| `prompt_mode` | [OptionalNullable[models.MistralPromptMode]](../models/mistralpromptmode.md) | :heavy_minus_sign: | Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. | | | `safe_prompt` | *Optional[bool]* | :heavy_minus_sign: | Whether to inject a safety prompt before all conversations. | | \ No newline at end of file diff --git a/docs/models/chatcompletionstreamrequest.md b/docs/models/chatcompletionstreamrequest.md index 7f73a269..6faeb411 100644 --- a/docs/models/chatcompletionstreamrequest.md +++ b/docs/models/chatcompletionstreamrequest.md @@ -21,5 +21,5 @@ | `n` | *OptionalNullable[int]* | :heavy_minus_sign: | Number of completions to return for each request, input tokens are only billed once. | | | `prediction` | [Optional[models.Prediction]](../models/prediction.md) | :heavy_minus_sign: | N/A | | | `parallel_tool_calls` | *Optional[bool]* | :heavy_minus_sign: | N/A | | -| `prompt_mode` | [OptionalNullable[models.MistralPromptMode]](../models/mistralpromptmode.md) | :heavy_minus_sign: | N/A | | +| `prompt_mode` | [OptionalNullable[models.MistralPromptMode]](../models/mistralpromptmode.md) | :heavy_minus_sign: | Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. | | | `safe_prompt` | *Optional[bool]* | :heavy_minus_sign: | Whether to inject a safety prompt before all conversations. | | \ No newline at end of file diff --git a/docs/models/contentchunk.md b/docs/models/contentchunk.md index 8cf7fad1..a65cd054 100644 --- a/docs/models/contentchunk.md +++ b/docs/models/contentchunk.md @@ -27,3 +27,9 @@ value: models.TextChunk = /* values here */ value: models.ReferenceChunk = /* values here */ ``` +### `models.FileChunk` + +```python +value: models.FileChunk = /* values here */ +``` + diff --git a/docs/models/conversationrestartrequest.md b/docs/models/conversationrestartrequest.md index 16786f6a..61679df6 100644 --- a/docs/models/conversationrestartrequest.md +++ b/docs/models/conversationrestartrequest.md @@ -11,5 +11,5 @@ Request to restart a new conversation from a given entry in the conversation. | `stream` | *Optional[bool]* | :heavy_minus_sign: | N/A | | `store` | *Optional[bool]* | :heavy_minus_sign: | Whether to store the results into our servers or not. | | `handoff_execution` | [Optional[models.ConversationRestartRequestHandoffExecution]](../models/conversationrestartrequesthandoffexecution.md) | :heavy_minus_sign: | N/A | -| `from_entry_id` | *str* | :heavy_check_mark: | N/A | -| `completion_args` | [Optional[models.CompletionArgs]](../models/completionargs.md) | :heavy_minus_sign: | White-listed arguments from the completion API | \ No newline at end of file +| `completion_args` | [Optional[models.CompletionArgs]](../models/completionargs.md) | :heavy_minus_sign: | White-listed arguments from the completion API | +| `from_entry_id` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/conversationrestartstreamrequest.md b/docs/models/conversationrestartstreamrequest.md index 23bf9851..9548b336 100644 --- a/docs/models/conversationrestartstreamrequest.md +++ b/docs/models/conversationrestartstreamrequest.md @@ -11,5 +11,5 @@ Request to restart a new conversation from a given entry in the conversation. | `stream` | *Optional[bool]* | :heavy_minus_sign: | N/A | | `store` | *Optional[bool]* | :heavy_minus_sign: | Whether to store the results into our servers or not. | | `handoff_execution` | [Optional[models.ConversationRestartStreamRequestHandoffExecution]](../models/conversationrestartstreamrequesthandoffexecution.md) | :heavy_minus_sign: | N/A | -| `from_entry_id` | *str* | :heavy_check_mark: | N/A | -| `completion_args` | [Optional[models.CompletionArgs]](../models/completionargs.md) | :heavy_minus_sign: | White-listed arguments from the completion API | \ No newline at end of file +| `completion_args` | [Optional[models.CompletionArgs]](../models/completionargs.md) | :heavy_minus_sign: | White-listed arguments from the completion API | +| `from_entry_id` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/document.md b/docs/models/document.md index e2940355..509d43b7 100644 --- a/docs/models/document.md +++ b/docs/models/document.md @@ -5,6 +5,12 @@ Document to run OCR on ## Supported Types +### `models.FileChunk` + +```python +value: models.FileChunk = /* values here */ +``` + ### `models.DocumentURLChunk` ```python diff --git a/docs/models/filechunk.md b/docs/models/filechunk.md new file mode 100644 index 00000000..18217114 --- /dev/null +++ b/docs/models/filechunk.md @@ -0,0 +1,9 @@ +# FileChunk + + +## Fields + +| Field | Type | Required | Description | +| --------------------------- | --------------------------- | --------------------------- | --------------------------- | +| `type` | *Optional[Literal["file"]]* | :heavy_minus_sign: | N/A | +| `file_id` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/ftmodelcard.md b/docs/models/ftmodelcard.md index aaa5b401..a286f04e 100644 --- a/docs/models/ftmodelcard.md +++ b/docs/models/ftmodelcard.md @@ -17,8 +17,9 @@ Extra fields for fine-tuned models. | `max_context_length` | *Optional[int]* | :heavy_minus_sign: | N/A | | `aliases` | List[*str*] | :heavy_minus_sign: | N/A | | `deprecation` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | +| `deprecation_replacement_model` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | `default_model_temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | N/A | -| `type` | [Optional[models.FTModelCardType]](../models/ftmodelcardtype.md) | :heavy_minus_sign: | N/A | +| `type` | *Optional[Literal["fine-tuned"]]* | :heavy_minus_sign: | N/A | | `job` | *str* | :heavy_check_mark: | N/A | | `root` | *str* | :heavy_check_mark: | N/A | | `archived` | *Optional[bool]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/ftmodelcardtype.md b/docs/models/ftmodelcardtype.md deleted file mode 100644 index 0b38470b..00000000 --- a/docs/models/ftmodelcardtype.md +++ /dev/null @@ -1,8 +0,0 @@ -# FTModelCardType - - -## Values - -| Name | Value | -| ------------ | ------------ | -| `FINE_TUNED` | fine-tuned | \ No newline at end of file diff --git a/docs/models/inputentries.md b/docs/models/inputentries.md index e1e48279..b44a467d 100644 --- a/docs/models/inputentries.md +++ b/docs/models/inputentries.md @@ -9,9 +9,33 @@ value: models.MessageInputEntry = /* values here */ ``` +### `models.MessageOutputEntry` + +```python +value: models.MessageOutputEntry = /* values here */ +``` + ### `models.FunctionResultEntry` ```python value: models.FunctionResultEntry = /* values here */ ``` +### `models.FunctionCallEntry` + +```python +value: models.FunctionCallEntry = /* values here */ +``` + +### `models.ToolExecutionEntry` + +```python +value: models.ToolExecutionEntry = /* values here */ +``` + +### `models.AgentHandoffEntry` + +```python +value: models.AgentHandoffEntry = /* values here */ +``` + diff --git a/docs/models/messageinputentry.md b/docs/models/messageinputentry.md index f5bb6c25..d0168f6e 100644 --- a/docs/models/messageinputentry.md +++ b/docs/models/messageinputentry.md @@ -5,12 +5,12 @@ Representation of an input message inside the conversation. ## Fields -| Field | Type | Required | Description | -| ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | -| `object` | [Optional[models.Object]](../models/object.md) | :heavy_minus_sign: | N/A | -| `type` | [Optional[models.MessageInputEntryType]](../models/messageinputentrytype.md) | :heavy_minus_sign: | N/A | -| `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | -| `completed_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | -| `id` | *Optional[str]* | :heavy_minus_sign: | N/A | -| `role` | [models.MessageInputEntryRole](../models/messageinputentryrole.md) | :heavy_check_mark: | N/A | -| `content` | [models.MessageInputEntryContent](../models/messageinputentrycontent.md) | :heavy_check_mark: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| ------------------------------------------------------------------------ | ------------------------------------------------------------------------ | ------------------------------------------------------------------------ | ------------------------------------------------------------------------ | +| `object` | [Optional[models.Object]](../models/object.md) | :heavy_minus_sign: | N/A | +| `type` | [Optional[models.Type]](../models/type.md) | :heavy_minus_sign: | N/A | +| `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | +| `completed_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | +| `id` | *Optional[str]* | :heavy_minus_sign: | N/A | +| `role` | [models.MessageInputEntryRole](../models/messageinputentryrole.md) | :heavy_check_mark: | N/A | +| `content` | [models.MessageInputEntryContent](../models/messageinputentrycontent.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/messageinputentrytype.md b/docs/models/messageinputentrytype.md deleted file mode 100644 index d3378124..00000000 --- a/docs/models/messageinputentrytype.md +++ /dev/null @@ -1,8 +0,0 @@ -# MessageInputEntryType - - -## Values - -| Name | Value | -| --------------- | --------------- | -| `MESSAGE_INPUT` | message.input | \ No newline at end of file diff --git a/docs/models/modelcapabilities.md b/docs/models/modelcapabilities.md index 2e399ab6..36b27938 100644 --- a/docs/models/modelcapabilities.md +++ b/docs/models/modelcapabilities.md @@ -9,4 +9,5 @@ | `completion_fim` | *Optional[bool]* | :heavy_minus_sign: | N/A | | `function_calling` | *Optional[bool]* | :heavy_minus_sign: | N/A | | `fine_tuning` | *Optional[bool]* | :heavy_minus_sign: | N/A | -| `vision` | *Optional[bool]* | :heavy_minus_sign: | N/A | \ No newline at end of file +| `vision` | *Optional[bool]* | :heavy_minus_sign: | N/A | +| `classification` | *Optional[bool]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/type.md b/docs/models/type.md index 239a00f5..357acf0b 100644 --- a/docs/models/type.md +++ b/docs/models/type.md @@ -3,6 +3,6 @@ ## Values -| Name | Value | -| ------ | ------ | -| `BASE` | base | \ No newline at end of file +| Name | Value | +| --------------- | --------------- | +| `MESSAGE_INPUT` | message.input | \ No newline at end of file diff --git a/docs/sdks/agents/README.md b/docs/sdks/agents/README.md index d5014a36..6bab08dd 100644 --- a/docs/sdks/agents/README.md +++ b/docs/sdks/agents/README.md @@ -55,7 +55,7 @@ with Mistral( | `n` | *OptionalNullable[int]* | :heavy_minus_sign: | Number of completions to return for each request, input tokens are only billed once. | | | `prediction` | [Optional[models.Prediction]](../../models/prediction.md) | :heavy_minus_sign: | N/A | | | `parallel_tool_calls` | *Optional[bool]* | :heavy_minus_sign: | N/A | | -| `prompt_mode` | [OptionalNullable[models.MistralPromptMode]](../../models/mistralpromptmode.md) | :heavy_minus_sign: | N/A | | +| `prompt_mode` | [OptionalNullable[models.MistralPromptMode]](../../models/mistralpromptmode.md) | :heavy_minus_sign: | Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. | | | `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | | ### Response @@ -116,7 +116,7 @@ with Mistral( | `n` | *OptionalNullable[int]* | :heavy_minus_sign: | Number of completions to return for each request, input tokens are only billed once. | | | `prediction` | [Optional[models.Prediction]](../../models/prediction.md) | :heavy_minus_sign: | N/A | | | `parallel_tool_calls` | *Optional[bool]* | :heavy_minus_sign: | N/A | | -| `prompt_mode` | [OptionalNullable[models.MistralPromptMode]](../../models/mistralpromptmode.md) | :heavy_minus_sign: | N/A | | +| `prompt_mode` | [OptionalNullable[models.MistralPromptMode]](../../models/mistralpromptmode.md) | :heavy_minus_sign: | Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. | | | `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | | ### Response diff --git a/docs/sdks/chat/README.md b/docs/sdks/chat/README.md index f8aca31f..7b467b58 100644 --- a/docs/sdks/chat/README.md +++ b/docs/sdks/chat/README.md @@ -57,7 +57,7 @@ with Mistral( | `n` | *OptionalNullable[int]* | :heavy_minus_sign: | Number of completions to return for each request, input tokens are only billed once. | | | `prediction` | [Optional[models.Prediction]](../../models/prediction.md) | :heavy_minus_sign: | N/A | | | `parallel_tool_calls` | *Optional[bool]* | :heavy_minus_sign: | N/A | | -| `prompt_mode` | [OptionalNullable[models.MistralPromptMode]](../../models/mistralpromptmode.md) | :heavy_minus_sign: | N/A | | +| `prompt_mode` | [OptionalNullable[models.MistralPromptMode]](../../models/mistralpromptmode.md) | :heavy_minus_sign: | Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. | | | `safe_prompt` | *Optional[bool]* | :heavy_minus_sign: | Whether to inject a safety prompt before all conversations. | | | `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | | @@ -121,7 +121,7 @@ with Mistral( | `n` | *OptionalNullable[int]* | :heavy_minus_sign: | Number of completions to return for each request, input tokens are only billed once. | | | `prediction` | [Optional[models.Prediction]](../../models/prediction.md) | :heavy_minus_sign: | N/A | | | `parallel_tool_calls` | *Optional[bool]* | :heavy_minus_sign: | N/A | | -| `prompt_mode` | [OptionalNullable[models.MistralPromptMode]](../../models/mistralpromptmode.md) | :heavy_minus_sign: | N/A | | +| `prompt_mode` | [OptionalNullable[models.MistralPromptMode]](../../models/mistralpromptmode.md) | :heavy_minus_sign: | Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. | | | `safe_prompt` | *Optional[bool]* | :heavy_minus_sign: | Whether to inject a safety prompt before all conversations. | | | `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | | diff --git a/docs/sdks/conversations/README.md b/docs/sdks/conversations/README.md index 25b1ab9c..38d5915b 100644 --- a/docs/sdks/conversations/README.md +++ b/docs/sdks/conversations/README.md @@ -135,7 +135,7 @@ with Mistral( | Parameter | Type | Required | Description | | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | -| `conversation_id` | *str* | :heavy_check_mark: | N/A | +| `conversation_id` | *str* | :heavy_check_mark: | ID of the conversation from which we are fetching metadata. | | `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | ### Response @@ -220,7 +220,7 @@ with Mistral( | Parameter | Type | Required | Description | | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | -| `conversation_id` | *str* | :heavy_check_mark: | N/A | +| `conversation_id` | *str* | :heavy_check_mark: | ID of the conversation from which we are fetching entries. | | `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | ### Response @@ -260,7 +260,7 @@ with Mistral( | Parameter | Type | Required | Description | | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | -| `conversation_id` | *str* | :heavy_check_mark: | N/A | +| `conversation_id` | *str* | :heavy_check_mark: | ID of the conversation from which we are fetching messages. | | `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | ### Response @@ -300,7 +300,7 @@ with Mistral( | Parameter | Type | Required | Description | | ------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------- | -| `conversation_id` | *str* | :heavy_check_mark: | N/A | +| `conversation_id` | *str* | :heavy_check_mark: | ID of the original conversation which is being restarted. | | `inputs` | [models.ConversationInputs](../../models/conversationinputs.md) | :heavy_check_mark: | N/A | | `from_entry_id` | *str* | :heavy_check_mark: | N/A | | `stream` | *Optional[bool]* | :heavy_minus_sign: | N/A | @@ -461,7 +461,7 @@ with Mistral( | Parameter | Type | Required | Description | | ------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------- | -| `conversation_id` | *str* | :heavy_check_mark: | N/A | +| `conversation_id` | *str* | :heavy_check_mark: | ID of the original conversation which is being restarted. | | `inputs` | [models.ConversationInputs](../../models/conversationinputs.md) | :heavy_check_mark: | N/A | | `from_entry_id` | *str* | :heavy_check_mark: | N/A | | `stream` | *Optional[bool]* | :heavy_minus_sign: | N/A | diff --git a/docs/sdks/models/README.md b/docs/sdks/models/README.md index d7a5ed85..7dd5d1de 100644 --- a/docs/sdks/models/README.md +++ b/docs/sdks/models/README.md @@ -55,7 +55,7 @@ with Mistral( ## retrieve -Retrieve a model information. +Retrieve information about a model. ### Example Usage diff --git a/pyproject.toml b/pyproject.toml index f8cf20a9..5cf64972 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "mistralai" -version = "1.9.0" +version = "1.9.1" description = "Python Client SDK for the Mistral AI API." authors = [{ name = "Mistral" },] readme = "README-PYPI.md" diff --git a/src/mistralai/_version.py b/src/mistralai/_version.py index db4f08fe..d22b4e90 100644 --- a/src/mistralai/_version.py +++ b/src/mistralai/_version.py @@ -3,10 +3,10 @@ import importlib.metadata __title__: str = "mistralai" -__version__: str = "1.9.0" +__version__: str = "1.9.1" __openapi_doc_version__: str = "1.0.0" __gen_version__: str = "2.634.2" -__user_agent__: str = "speakeasy-sdk/python 1.9.0 2.634.2 1.0.0 mistralai" +__user_agent__: str = "speakeasy-sdk/python 1.9.1 2.634.2 1.0.0 mistralai" try: if __package__ is not None: diff --git a/src/mistralai/agents.py b/src/mistralai/agents.py index febc3383..48c06372 100644 --- a/src/mistralai/agents.py +++ b/src/mistralai/agents.py @@ -69,7 +69,7 @@ def complete( :param n: Number of completions to return for each request, input tokens are only billed once. :param prediction: :param parallel_tool_calls: - :param prompt_mode: + :param prompt_mode: Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -239,7 +239,7 @@ async def complete_async( :param n: Number of completions to return for each request, input tokens are only billed once. :param prediction: :param parallel_tool_calls: - :param prompt_mode: + :param prompt_mode: Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -411,7 +411,7 @@ def stream( :param n: Number of completions to return for each request, input tokens are only billed once. :param prediction: :param parallel_tool_calls: - :param prompt_mode: + :param prompt_mode: Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -589,7 +589,7 @@ async def stream_async( :param n: Number of completions to return for each request, input tokens are only billed once. :param prediction: :param parallel_tool_calls: - :param prompt_mode: + :param prompt_mode: Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds diff --git a/src/mistralai/chat.py b/src/mistralai/chat.py index 8556c5a0..1ed067e8 100644 --- a/src/mistralai/chat.py +++ b/src/mistralai/chat.py @@ -148,7 +148,7 @@ def complete( :param n: Number of completions to return for each request, input tokens are only billed once. :param prediction: :param parallel_tool_calls: - :param prompt_mode: + :param prompt_mode: Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. :param safe_prompt: Whether to inject a safety prompt before all conversations. :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method @@ -317,7 +317,7 @@ async def complete_async( :param n: Number of completions to return for each request, input tokens are only billed once. :param prediction: :param parallel_tool_calls: - :param prompt_mode: + :param prompt_mode: Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. :param safe_prompt: Whether to inject a safety prompt before all conversations. :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method @@ -496,7 +496,7 @@ def stream( :param n: Number of completions to return for each request, input tokens are only billed once. :param prediction: :param parallel_tool_calls: - :param prompt_mode: + :param prompt_mode: Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. :param safe_prompt: Whether to inject a safety prompt before all conversations. :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method @@ -683,7 +683,7 @@ async def stream_async( :param n: Number of completions to return for each request, input tokens are only billed once. :param prediction: :param parallel_tool_calls: - :param prompt_mode: + :param prompt_mode: Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. :param safe_prompt: Whether to inject a safety prompt before all conversations. :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method diff --git a/src/mistralai/conversations.py b/src/mistralai/conversations.py index f8b6ec2c..009df94d 100644 --- a/src/mistralai/conversations.py +++ b/src/mistralai/conversations.py @@ -712,7 +712,7 @@ def get( Given a conversation_id retrieve a conversation entity with its attributes. - :param conversation_id: + :param conversation_id: ID of the conversation from which we are fetching metadata. :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -815,7 +815,7 @@ async def get_async( Given a conversation_id retrieve a conversation entity with its attributes. - :param conversation_id: + :param conversation_id: ID of the conversation from which we are fetching metadata. :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -1178,7 +1178,7 @@ def get_history( Given a conversation_id retrieve all the entries belonging to that conversation. The entries are sorted in the order they were appended, those can be messages, connectors or function_call. - :param conversation_id: + :param conversation_id: ID of the conversation from which we are fetching entries. :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -1278,7 +1278,7 @@ async def get_history_async( Given a conversation_id retrieve all the entries belonging to that conversation. The entries are sorted in the order they were appended, those can be messages, connectors or function_call. - :param conversation_id: + :param conversation_id: ID of the conversation from which we are fetching entries. :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -1378,7 +1378,7 @@ def get_messages( Given a conversation_id retrieve all the messages belonging to that conversation. This is similar to retrieving all entries except we filter the messages only. - :param conversation_id: + :param conversation_id: ID of the conversation from which we are fetching messages. :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -1478,7 +1478,7 @@ async def get_messages_async( Given a conversation_id retrieve all the messages belonging to that conversation. This is similar to retrieving all entries except we filter the messages only. - :param conversation_id: + :param conversation_id: ID of the conversation from which we are fetching messages. :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -1588,7 +1588,7 @@ def restart( Given a conversation_id and an id, recreate a conversation from this point and run completion. A new conversation is returned with the new entries returned. - :param conversation_id: + :param conversation_id: ID of the original conversation which is being restarted. :param inputs: :param from_entry_id: :param stream: @@ -1617,10 +1617,10 @@ def restart( stream=stream, store=store, handoff_execution=handoff_execution, - from_entry_id=from_entry_id, completion_args=utils.get_pydantic_model( completion_args, Optional[models.CompletionArgs] ), + from_entry_id=from_entry_id, ), ) @@ -1721,7 +1721,7 @@ async def restart_async( Given a conversation_id and an id, recreate a conversation from this point and run completion. A new conversation is returned with the new entries returned. - :param conversation_id: + :param conversation_id: ID of the original conversation which is being restarted. :param inputs: :param from_entry_id: :param stream: @@ -1750,10 +1750,10 @@ async def restart_async( stream=stream, store=store, handoff_execution=handoff_execution, - from_entry_id=from_entry_id, completion_args=utils.get_pydantic_model( completion_args, Optional[models.CompletionArgs] ), + from_entry_id=from_entry_id, ), ) @@ -2426,7 +2426,7 @@ def restart_stream( Given a conversation_id and an id, recreate a conversation from this point and run completion. A new conversation is returned with the new entries returned. - :param conversation_id: + :param conversation_id: ID of the original conversation which is being restarted. :param inputs: :param from_entry_id: :param stream: @@ -2455,10 +2455,10 @@ def restart_stream( stream=stream, store=store, handoff_execution=handoff_execution, - from_entry_id=from_entry_id, completion_args=utils.get_pydantic_model( completion_args, Optional[models.CompletionArgs] ), + from_entry_id=from_entry_id, ), ) @@ -2564,7 +2564,7 @@ async def restart_stream_async( Given a conversation_id and an id, recreate a conversation from this point and run completion. A new conversation is returned with the new entries returned. - :param conversation_id: + :param conversation_id: ID of the original conversation which is being restarted. :param inputs: :param from_entry_id: :param stream: @@ -2593,10 +2593,10 @@ async def restart_stream_async( stream=stream, store=store, handoff_execution=handoff_execution, - from_entry_id=from_entry_id, completion_args=utils.get_pydantic_model( completion_args, Optional[models.CompletionArgs] ), + from_entry_id=from_entry_id, ), ) diff --git a/src/mistralai/models/__init__.py b/src/mistralai/models/__init__.py index 1b7b9c6c..a44de97c 100644 --- a/src/mistralai/models/__init__.py +++ b/src/mistralai/models/__init__.py @@ -129,7 +129,7 @@ AssistantMessageRole, AssistantMessageTypedDict, ) - from .basemodelcard import BaseModelCard, BaseModelCardTypedDict, Type + from .basemodelcard import BaseModelCard, BaseModelCardTypedDict from .batcherror import BatchError, BatchErrorTypedDict from .batchjobin import BatchJobIn, BatchJobInTypedDict from .batchjobout import BatchJobOut, BatchJobOutObject, BatchJobOutTypedDict @@ -378,6 +378,7 @@ EmbeddingResponseDataTypedDict, ) from .eventout import EventOut, EventOutTypedDict + from .filechunk import FileChunk, FileChunkTypedDict from .filepurpose import FilePurpose from .files_api_routes_delete_fileop import ( FilesAPIRoutesDeleteFileRequest, @@ -429,7 +430,7 @@ FTModelCapabilitiesOut, FTModelCapabilitiesOutTypedDict, ) - from .ftmodelcard import FTModelCard, FTModelCardType, FTModelCardTypedDict + from .ftmodelcard import FTModelCard, FTModelCardTypedDict from .function import Function, FunctionTypedDict from .functioncall import ( Arguments, @@ -589,9 +590,9 @@ MessageInputEntryContent, MessageInputEntryContentTypedDict, MessageInputEntryRole, - MessageInputEntryType, MessageInputEntryTypedDict, Object, + Type, ) from .messageoutputcontentchunks import ( MessageOutputContentChunks, @@ -1020,9 +1021,10 @@ "FTModelCapabilitiesOut", "FTModelCapabilitiesOutTypedDict", "FTModelCard", - "FTModelCardType", "FTModelCardTypedDict", "File", + "FileChunk", + "FileChunkTypedDict", "FilePurpose", "FileSchema", "FileSchemaTypedDict", @@ -1161,7 +1163,6 @@ "MessageInputEntryContent", "MessageInputEntryContentTypedDict", "MessageInputEntryRole", - "MessageInputEntryType", "MessageInputEntryTypedDict", "MessageOutputContentChunks", "MessageOutputContentChunksTypedDict", @@ -1411,7 +1412,6 @@ "AssistantMessageTypedDict": ".assistantmessage", "BaseModelCard": ".basemodelcard", "BaseModelCardTypedDict": ".basemodelcard", - "Type": ".basemodelcard", "BatchError": ".batcherror", "BatchErrorTypedDict": ".batcherror", "BatchJobIn": ".batchjobin", @@ -1603,6 +1603,8 @@ "EmbeddingResponseDataTypedDict": ".embeddingresponsedata", "EventOut": ".eventout", "EventOutTypedDict": ".eventout", + "FileChunk": ".filechunk", + "FileChunkTypedDict": ".filechunk", "FilePurpose": ".filepurpose", "FilesAPIRoutesDeleteFileRequest": ".files_api_routes_delete_fileop", "FilesAPIRoutesDeleteFileRequestTypedDict": ".files_api_routes_delete_fileop", @@ -1637,7 +1639,6 @@ "FTModelCapabilitiesOut": ".ftmodelcapabilitiesout", "FTModelCapabilitiesOutTypedDict": ".ftmodelcapabilitiesout", "FTModelCard": ".ftmodelcard", - "FTModelCardType": ".ftmodelcard", "FTModelCardTypedDict": ".ftmodelcard", "Function": ".function", "FunctionTypedDict": ".function", @@ -1756,9 +1757,9 @@ "MessageInputEntryContent": ".messageinputentry", "MessageInputEntryContentTypedDict": ".messageinputentry", "MessageInputEntryRole": ".messageinputentry", - "MessageInputEntryType": ".messageinputentry", "MessageInputEntryTypedDict": ".messageinputentry", "Object": ".messageinputentry", + "Type": ".messageinputentry", "MessageOutputContentChunks": ".messageoutputcontentchunks", "MessageOutputContentChunksTypedDict": ".messageoutputcontentchunks", "MessageOutputEntry": ".messageoutputentry", diff --git a/src/mistralai/models/agents_api_v1_conversations_getop.py b/src/mistralai/models/agents_api_v1_conversations_getop.py index 4a800ad6..a37a61ba 100644 --- a/src/mistralai/models/agents_api_v1_conversations_getop.py +++ b/src/mistralai/models/agents_api_v1_conversations_getop.py @@ -11,12 +11,14 @@ class AgentsAPIV1ConversationsGetRequestTypedDict(TypedDict): conversation_id: str + r"""ID of the conversation from which we are fetching metadata.""" class AgentsAPIV1ConversationsGetRequest(BaseModel): conversation_id: Annotated[ str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) ] + r"""ID of the conversation from which we are fetching metadata.""" AgentsAPIV1ConversationsGetResponseV1ConversationsGetTypedDict = TypeAliasType( diff --git a/src/mistralai/models/agents_api_v1_conversations_historyop.py b/src/mistralai/models/agents_api_v1_conversations_historyop.py index 09fb6081..b8c33d1b 100644 --- a/src/mistralai/models/agents_api_v1_conversations_historyop.py +++ b/src/mistralai/models/agents_api_v1_conversations_historyop.py @@ -8,9 +8,11 @@ class AgentsAPIV1ConversationsHistoryRequestTypedDict(TypedDict): conversation_id: str + r"""ID of the conversation from which we are fetching entries.""" class AgentsAPIV1ConversationsHistoryRequest(BaseModel): conversation_id: Annotated[ str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) ] + r"""ID of the conversation from which we are fetching entries.""" diff --git a/src/mistralai/models/agents_api_v1_conversations_messagesop.py b/src/mistralai/models/agents_api_v1_conversations_messagesop.py index ade66e5e..f0dac8bf 100644 --- a/src/mistralai/models/agents_api_v1_conversations_messagesop.py +++ b/src/mistralai/models/agents_api_v1_conversations_messagesop.py @@ -8,9 +8,11 @@ class AgentsAPIV1ConversationsMessagesRequestTypedDict(TypedDict): conversation_id: str + r"""ID of the conversation from which we are fetching messages.""" class AgentsAPIV1ConversationsMessagesRequest(BaseModel): conversation_id: Annotated[ str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) ] + r"""ID of the conversation from which we are fetching messages.""" diff --git a/src/mistralai/models/agents_api_v1_conversations_restart_streamop.py b/src/mistralai/models/agents_api_v1_conversations_restart_streamop.py index c8fd8475..f39b74eb 100644 --- a/src/mistralai/models/agents_api_v1_conversations_restart_streamop.py +++ b/src/mistralai/models/agents_api_v1_conversations_restart_streamop.py @@ -12,6 +12,7 @@ class AgentsAPIV1ConversationsRestartStreamRequestTypedDict(TypedDict): conversation_id: str + r"""ID of the original conversation which is being restarted.""" conversation_restart_stream_request: ConversationRestartStreamRequestTypedDict @@ -19,6 +20,7 @@ class AgentsAPIV1ConversationsRestartStreamRequest(BaseModel): conversation_id: Annotated[ str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) ] + r"""ID of the original conversation which is being restarted.""" conversation_restart_stream_request: Annotated[ ConversationRestartStreamRequest, diff --git a/src/mistralai/models/agents_api_v1_conversations_restartop.py b/src/mistralai/models/agents_api_v1_conversations_restartop.py index aa867aff..f706c066 100644 --- a/src/mistralai/models/agents_api_v1_conversations_restartop.py +++ b/src/mistralai/models/agents_api_v1_conversations_restartop.py @@ -12,6 +12,7 @@ class AgentsAPIV1ConversationsRestartRequestTypedDict(TypedDict): conversation_id: str + r"""ID of the original conversation which is being restarted.""" conversation_restart_request: ConversationRestartRequestTypedDict @@ -19,6 +20,7 @@ class AgentsAPIV1ConversationsRestartRequest(BaseModel): conversation_id: Annotated[ str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) ] + r"""ID of the original conversation which is being restarted.""" conversation_restart_request: Annotated[ ConversationRestartRequest, diff --git a/src/mistralai/models/agentscompletionrequest.py b/src/mistralai/models/agentscompletionrequest.py index 2e3c35f8..c832edfd 100644 --- a/src/mistralai/models/agentscompletionrequest.py +++ b/src/mistralai/models/agentscompletionrequest.py @@ -89,6 +89,7 @@ class AgentsCompletionRequestTypedDict(TypedDict): prediction: NotRequired[PredictionTypedDict] parallel_tool_calls: NotRequired[bool] prompt_mode: NotRequired[Nullable[MistralPromptMode]] + r"""Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used.""" class AgentsCompletionRequest(BaseModel): @@ -132,6 +133,7 @@ class AgentsCompletionRequest(BaseModel): prompt_mode: Annotated[ OptionalNullable[MistralPromptMode], PlainValidator(validate_open_enum(False)) ] = UNSET + r"""Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used.""" @model_serializer(mode="wrap") def serialize_model(self, handler): diff --git a/src/mistralai/models/agentscompletionstreamrequest.py b/src/mistralai/models/agentscompletionstreamrequest.py index a74842f6..6e619b77 100644 --- a/src/mistralai/models/agentscompletionstreamrequest.py +++ b/src/mistralai/models/agentscompletionstreamrequest.py @@ -88,6 +88,7 @@ class AgentsCompletionStreamRequestTypedDict(TypedDict): prediction: NotRequired[PredictionTypedDict] parallel_tool_calls: NotRequired[bool] prompt_mode: NotRequired[Nullable[MistralPromptMode]] + r"""Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used.""" class AgentsCompletionStreamRequest(BaseModel): @@ -130,6 +131,7 @@ class AgentsCompletionStreamRequest(BaseModel): prompt_mode: Annotated[ OptionalNullable[MistralPromptMode], PlainValidator(validate_open_enum(False)) ] = UNSET + r"""Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used.""" @model_serializer(mode="wrap") def serialize_model(self, handler): diff --git a/src/mistralai/models/basemodelcard.py b/src/mistralai/models/basemodelcard.py index 8a4e3710..fc852f4b 100644 --- a/src/mistralai/models/basemodelcard.py +++ b/src/mistralai/models/basemodelcard.py @@ -12,9 +12,6 @@ from typing_extensions import Annotated, NotRequired, TypedDict -Type = Literal["base"] - - class BaseModelCardTypedDict(TypedDict): id: str capabilities: ModelCapabilitiesTypedDict @@ -26,8 +23,9 @@ class BaseModelCardTypedDict(TypedDict): max_context_length: NotRequired[int] aliases: NotRequired[List[str]] deprecation: NotRequired[Nullable[datetime]] + deprecation_replacement_model: NotRequired[Nullable[str]] default_model_temperature: NotRequired[Nullable[float]] - type: Type + type: Literal["base"] class BaseModelCard(BaseModel): @@ -51,10 +49,12 @@ class BaseModelCard(BaseModel): deprecation: OptionalNullable[datetime] = UNSET + deprecation_replacement_model: OptionalNullable[str] = UNSET + default_model_temperature: OptionalNullable[float] = UNSET TYPE: Annotated[ - Annotated[Optional[Type], AfterValidator(validate_const("base"))], + Annotated[Optional[Literal["base"]], AfterValidator(validate_const("base"))], pydantic.Field(alias="type"), ] = "base" @@ -69,6 +69,7 @@ def serialize_model(self, handler): "max_context_length", "aliases", "deprecation", + "deprecation_replacement_model", "default_model_temperature", "type", ] @@ -76,6 +77,7 @@ def serialize_model(self, handler): "name", "description", "deprecation", + "deprecation_replacement_model", "default_model_temperature", ] null_default_fields = [] diff --git a/src/mistralai/models/chatcompletionrequest.py b/src/mistralai/models/chatcompletionrequest.py index ac90de32..6f195f13 100644 --- a/src/mistralai/models/chatcompletionrequest.py +++ b/src/mistralai/models/chatcompletionrequest.py @@ -89,6 +89,7 @@ class ChatCompletionRequestTypedDict(TypedDict): prediction: NotRequired[PredictionTypedDict] parallel_tool_calls: NotRequired[bool] prompt_mode: NotRequired[Nullable[MistralPromptMode]] + r"""Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used.""" safe_prompt: NotRequired[bool] r"""Whether to inject a safety prompt before all conversations.""" @@ -140,6 +141,7 @@ class ChatCompletionRequest(BaseModel): prompt_mode: Annotated[ OptionalNullable[MistralPromptMode], PlainValidator(validate_open_enum(False)) ] = UNSET + r"""Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used.""" safe_prompt: Optional[bool] = None r"""Whether to inject a safety prompt before all conversations.""" diff --git a/src/mistralai/models/chatcompletionstreamrequest.py b/src/mistralai/models/chatcompletionstreamrequest.py index 00f57144..0fa102e5 100644 --- a/src/mistralai/models/chatcompletionstreamrequest.py +++ b/src/mistralai/models/chatcompletionstreamrequest.py @@ -92,6 +92,7 @@ class ChatCompletionStreamRequestTypedDict(TypedDict): prediction: NotRequired[PredictionTypedDict] parallel_tool_calls: NotRequired[bool] prompt_mode: NotRequired[Nullable[MistralPromptMode]] + r"""Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used.""" safe_prompt: NotRequired[bool] r"""Whether to inject a safety prompt before all conversations.""" @@ -142,6 +143,7 @@ class ChatCompletionStreamRequest(BaseModel): prompt_mode: Annotated[ OptionalNullable[MistralPromptMode], PlainValidator(validate_open_enum(False)) ] = UNSET + r"""Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used.""" safe_prompt: Optional[bool] = None r"""Whether to inject a safety prompt before all conversations.""" diff --git a/src/mistralai/models/contentchunk.py b/src/mistralai/models/contentchunk.py index ff7d9fcf..4cb8ab6d 100644 --- a/src/mistralai/models/contentchunk.py +++ b/src/mistralai/models/contentchunk.py @@ -2,6 +2,7 @@ from __future__ import annotations from .documenturlchunk import DocumentURLChunk, DocumentURLChunkTypedDict +from .filechunk import FileChunk, FileChunkTypedDict from .imageurlchunk import ImageURLChunk, ImageURLChunkTypedDict from .referencechunk import ReferenceChunk, ReferenceChunkTypedDict from .textchunk import TextChunk, TextChunkTypedDict @@ -17,6 +18,7 @@ TextChunkTypedDict, ImageURLChunkTypedDict, ReferenceChunkTypedDict, + FileChunkTypedDict, DocumentURLChunkTypedDict, ], ) @@ -28,6 +30,7 @@ Annotated[DocumentURLChunk, Tag("document_url")], Annotated[TextChunk, Tag("text")], Annotated[ReferenceChunk, Tag("reference")], + Annotated[FileChunk, Tag("file")], ], Discriminator(lambda m: get_discriminator(m, "type", "type")), ] diff --git a/src/mistralai/models/filechunk.py b/src/mistralai/models/filechunk.py new file mode 100644 index 00000000..83e60cef --- /dev/null +++ b/src/mistralai/models/filechunk.py @@ -0,0 +1,23 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.types import BaseModel +from mistralai.utils import validate_const +import pydantic +from pydantic.functional_validators import AfterValidator +from typing import Literal, Optional +from typing_extensions import Annotated, TypedDict + + +class FileChunkTypedDict(TypedDict): + file_id: str + type: Literal["file"] + + +class FileChunk(BaseModel): + file_id: str + + TYPE: Annotated[ + Annotated[Optional[Literal["file"]], AfterValidator(validate_const("file"))], + pydantic.Field(alias="type"), + ] = "file" diff --git a/src/mistralai/models/ftmodelcard.py b/src/mistralai/models/ftmodelcard.py index 48937f48..286357e7 100644 --- a/src/mistralai/models/ftmodelcard.py +++ b/src/mistralai/models/ftmodelcard.py @@ -12,9 +12,6 @@ from typing_extensions import Annotated, NotRequired, TypedDict -FTModelCardType = Literal["fine-tuned"] - - class FTModelCardTypedDict(TypedDict): r"""Extra fields for fine-tuned models.""" @@ -30,8 +27,9 @@ class FTModelCardTypedDict(TypedDict): max_context_length: NotRequired[int] aliases: NotRequired[List[str]] deprecation: NotRequired[Nullable[datetime]] + deprecation_replacement_model: NotRequired[Nullable[str]] default_model_temperature: NotRequired[Nullable[float]] - type: FTModelCardType + type: Literal["fine-tuned"] archived: NotRequired[bool] @@ -62,11 +60,14 @@ class FTModelCard(BaseModel): deprecation: OptionalNullable[datetime] = UNSET + deprecation_replacement_model: OptionalNullable[str] = UNSET + default_model_temperature: OptionalNullable[float] = UNSET TYPE: Annotated[ Annotated[ - Optional[FTModelCardType], AfterValidator(validate_const("fine-tuned")) + Optional[Literal["fine-tuned"]], + AfterValidator(validate_const("fine-tuned")), ], pydantic.Field(alias="type"), ] = "fine-tuned" @@ -84,6 +85,7 @@ def serialize_model(self, handler): "max_context_length", "aliases", "deprecation", + "deprecation_replacement_model", "default_model_temperature", "type", "archived", @@ -92,6 +94,7 @@ def serialize_model(self, handler): "name", "description", "deprecation", + "deprecation_replacement_model", "default_model_temperature", ] null_default_fields = [] diff --git a/src/mistralai/models/inputentries.py b/src/mistralai/models/inputentries.py index 9c0fea6e..0221f968 100644 --- a/src/mistralai/models/inputentries.py +++ b/src/mistralai/models/inputentries.py @@ -1,18 +1,37 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from __future__ import annotations +from .agenthandoffentry import AgentHandoffEntry, AgentHandoffEntryTypedDict +from .functioncallentry import FunctionCallEntry, FunctionCallEntryTypedDict from .functionresultentry import FunctionResultEntry, FunctionResultEntryTypedDict from .messageinputentry import MessageInputEntry, MessageInputEntryTypedDict +from .messageoutputentry import MessageOutputEntry, MessageOutputEntryTypedDict +from .toolexecutionentry import ToolExecutionEntry, ToolExecutionEntryTypedDict from typing import Union from typing_extensions import TypeAliasType InputEntriesTypedDict = TypeAliasType( "InputEntriesTypedDict", - Union[MessageInputEntryTypedDict, FunctionResultEntryTypedDict], + Union[ + MessageInputEntryTypedDict, + FunctionResultEntryTypedDict, + ToolExecutionEntryTypedDict, + FunctionCallEntryTypedDict, + MessageOutputEntryTypedDict, + AgentHandoffEntryTypedDict, + ], ) InputEntries = TypeAliasType( - "InputEntries", Union[MessageInputEntry, FunctionResultEntry] + "InputEntries", + Union[ + MessageInputEntry, + FunctionResultEntry, + ToolExecutionEntry, + FunctionCallEntry, + MessageOutputEntry, + AgentHandoffEntry, + ], ) diff --git a/src/mistralai/models/messageinputentry.py b/src/mistralai/models/messageinputentry.py index 0ea6f24c..6f1190c7 100644 --- a/src/mistralai/models/messageinputentry.py +++ b/src/mistralai/models/messageinputentry.py @@ -14,7 +14,7 @@ Object = Literal["entry"] -MessageInputEntryType = Literal["message.input"] +Type = Literal["message.input"] MessageInputEntryRole = Literal["assistant", "user"] @@ -35,7 +35,7 @@ class MessageInputEntryTypedDict(TypedDict): role: MessageInputEntryRole content: MessageInputEntryContentTypedDict object: NotRequired[Object] - type: NotRequired[MessageInputEntryType] + type: NotRequired[Type] created_at: NotRequired[datetime] completed_at: NotRequired[Nullable[datetime]] id: NotRequired[str] @@ -50,7 +50,7 @@ class MessageInputEntry(BaseModel): object: Optional[Object] = "entry" - type: Optional[MessageInputEntryType] = "message.input" + type: Optional[Type] = "message.input" created_at: Optional[datetime] = None diff --git a/src/mistralai/models/modelcapabilities.py b/src/mistralai/models/modelcapabilities.py index 961f8664..54c5f2a2 100644 --- a/src/mistralai/models/modelcapabilities.py +++ b/src/mistralai/models/modelcapabilities.py @@ -12,6 +12,7 @@ class ModelCapabilitiesTypedDict(TypedDict): function_calling: NotRequired[bool] fine_tuning: NotRequired[bool] vision: NotRequired[bool] + classification: NotRequired[bool] class ModelCapabilities(BaseModel): @@ -24,3 +25,5 @@ class ModelCapabilities(BaseModel): fine_tuning: Optional[bool] = False vision: Optional[bool] = False + + classification: Optional[bool] = False diff --git a/src/mistralai/models/ocrrequest.py b/src/mistralai/models/ocrrequest.py index 53ad6111..df932c2a 100644 --- a/src/mistralai/models/ocrrequest.py +++ b/src/mistralai/models/ocrrequest.py @@ -2,6 +2,7 @@ from __future__ import annotations from .documenturlchunk import DocumentURLChunk, DocumentURLChunkTypedDict +from .filechunk import FileChunk, FileChunkTypedDict from .imageurlchunk import ImageURLChunk, ImageURLChunkTypedDict from .responseformat import ResponseFormat, ResponseFormatTypedDict from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL @@ -11,12 +12,13 @@ DocumentTypedDict = TypeAliasType( - "DocumentTypedDict", Union[ImageURLChunkTypedDict, DocumentURLChunkTypedDict] + "DocumentTypedDict", + Union[FileChunkTypedDict, ImageURLChunkTypedDict, DocumentURLChunkTypedDict], ) r"""Document to run OCR on""" -Document = TypeAliasType("Document", Union[ImageURLChunk, DocumentURLChunk]) +Document = TypeAliasType("Document", Union[FileChunk, ImageURLChunk, DocumentURLChunk]) r"""Document to run OCR on""" diff --git a/src/mistralai/models_.py b/src/mistralai/models_.py index 86259e17..b712c545 100644 --- a/src/mistralai/models_.py +++ b/src/mistralai/models_.py @@ -208,7 +208,7 @@ def retrieve( ) -> models.RetrieveModelV1ModelsModelIDGetResponseRetrieveModelV1ModelsModelIDGet: r"""Retrieve Model - Retrieve a model information. + Retrieve information about a model. :param model_id: The ID of the model to retrieve. :param retries: Override the default retry configuration for this method @@ -311,7 +311,7 @@ async def retrieve_async( ) -> models.RetrieveModelV1ModelsModelIDGetResponseRetrieveModelV1ModelsModelIDGet: r"""Retrieve Model - Retrieve a model information. + Retrieve information about a model. :param model_id: The ID of the model to retrieve. :param retries: Override the default retry configuration for this method From d11dfba26fcdced2cb2b38e1f82fef5680e290e9 Mon Sep 17 00:00:00 2001 From: Nicolas Faurie Date: Tue, 1 Jul 2025 09:15:23 +0200 Subject: [PATCH 143/223] Adapt OCR example --- examples/ocr_process_from_file.py | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/examples/ocr_process_from_file.py b/examples/ocr_process_from_file.py index 70c9d4a8..84a7b4d8 100644 --- a/examples/ocr_process_from_file.py +++ b/examples/ocr_process_from_file.py @@ -26,12 +26,9 @@ def main(): purpose="ocr", ) - signed_url = client.files.get_signed_url(file_id=uploaded_file.id, expiry=1) - pdf_response = client.ocr.process(document={ - "document_url": signed_url.url, - "type": "document_url", - "document_name": "mistral-7b-pdf", + "type": "file", + "file_id": uploaded_file.id, }, model="mistral-ocr-latest", include_image_base64=True) # Print the parsed PDF From 348a1e33ec60339309d5cbe446adf898b48b3065 Mon Sep 17 00:00:00 2001 From: speakeasybot Date: Thu, 10 Jul 2025 12:24:10 +0000 Subject: [PATCH 144/223] ci: regenerated with OpenAPI Doc , Speakeasy CLI 1.568.2 --- .speakeasy/gen.lock | 280 ++- .speakeasy/gen.yaml | 2 +- .speakeasy/workflow.lock | 12 +- README.md | 29 +- RELEASES.md | 12 +- docs/models/basemodelcard.md | 2 +- docs/models/conversationeventsdata.md | 6 + docs/models/documentout.md | 24 + docs/models/documenttextcontent.md | 8 + docs/models/documentupdatein.md | 8 + docs/models/entitytype.md | 12 + ...sapiroutesuploadfilemultipartbodyparams.md | 4 +- docs/models/ftmodelcard.md | 2 +- docs/models/ftmodelcardtype.md | 8 + docs/models/librariesdeletev1request.md | 8 + .../librariesdocumentsdeletev1request.md | 9 + ...mentsgetextractedtextsignedurlv1request.md | 9 + ...librariesdocumentsgetsignedurlv1request.md | 9 + .../librariesdocumentsgetstatusv1request.md | 9 + ...brariesdocumentsgettextcontentv1request.md | 9 + docs/models/librariesdocumentsgetv1request.md | 9 + .../models/librariesdocumentslistv1request.md | 13 + .../librariesdocumentsreprocessv1request.md | 9 + .../librariesdocumentsupdatev1request.md | 10 + ...ibrariesdocumentsuploadv1documentupload.md | 8 + .../librariesdocumentsuploadv1request.md | 9 + docs/models/librariesgetv1request.md | 8 + docs/models/librariessharecreatev1request.md | 9 + docs/models/librariessharedeletev1request.md | 9 + docs/models/librariessharelistv1request.md | 8 + docs/models/librariesupdatev1request.md | 9 + docs/models/libraryin.md | 10 + docs/models/libraryinupdate.md | 9 + docs/models/libraryout.md | 23 + docs/models/listdocumentout.md | 9 + docs/models/listlibraryout.md | 8 + docs/models/listsharingout.md | 8 + docs/models/messageinputentry.md | 19 +- docs/models/messageinputentrytype.md | 8 + docs/models/paginationinfo.md | 12 + docs/models/processingstatusout.md | 9 + docs/models/shareenum.md | 9 + docs/models/sharingdelete.md | 10 + docs/models/sharingin.md | 11 + docs/models/sharingout.md | 13 + docs/models/ssetypes.md | 1 + docs/models/toolexecutiondeltaevent.md | 13 + docs/models/toolexecutiondeltaeventtype.md | 8 + docs/models/toolexecutionentry.md | 1 + docs/models/toolexecutionstartedevent.md | 3 +- docs/models/toolreferencechunk.md | 3 +- docs/models/type.md | 6 +- docs/sdks/accesses/README.md | 137 ++ docs/sdks/conversations/README.md | 1 + docs/sdks/documents/README.md | 425 ++++ docs/sdks/libraries/README.md | 216 ++ pyproject.toml | 2 +- src/mistralai/_version.py | 4 +- src/mistralai/accesses.py | 672 ++++++ src/mistralai/beta.py | 4 + src/mistralai/documents.py | 2136 +++++++++++++++++ src/mistralai/files.py | 4 +- src/mistralai/libraries.py | 1041 ++++++++ src/mistralai/models/__init__.py | 240 +- src/mistralai/models/basemodelcard.py | 7 +- src/mistralai/models/conversationevents.py | 6 + src/mistralai/models/conversationhistory.py | 8 +- src/mistralai/models/documentout.py | 105 + src/mistralai/models/documenttextcontent.py | 13 + src/mistralai/models/documentupdatein.py | 44 + src/mistralai/models/entitytype.py | 9 + src/mistralai/models/file.py | 33 + .../models/files_api_routes_upload_fileop.py | 29 +- src/mistralai/models/ftmodelcard.py | 8 +- src/mistralai/models/inputentries.py | 8 +- src/mistralai/models/libraries_delete_v1op.py | 16 + .../models/libraries_documents_delete_v1op.py | 21 + ...ents_get_extracted_text_signed_url_v1op.py | 21 + ...libraries_documents_get_signed_url_v1op.py | 21 + .../libraries_documents_get_status_v1op.py | 21 + ...braries_documents_get_text_content_v1op.py | 21 + .../models/libraries_documents_get_v1op.py | 21 + .../models/libraries_documents_list_v1op.py | 78 + .../libraries_documents_reprocess_v1op.py | 21 + .../models/libraries_documents_update_v1op.py | 28 + .../models/libraries_documents_upload_v1op.py | 56 + src/mistralai/models/libraries_get_v1op.py | 16 + .../models/libraries_share_create_v1op.py | 22 + .../models/libraries_share_delete_v1op.py | 23 + .../models/libraries_share_list_v1op.py | 16 + src/mistralai/models/libraries_update_v1op.py | 23 + src/mistralai/models/libraryin.py | 50 + src/mistralai/models/libraryinupdate.py | 47 + src/mistralai/models/libraryout.py | 107 + src/mistralai/models/listdocumentout.py | 19 + src/mistralai/models/listlibraryout.py | 15 + src/mistralai/models/listsharingout.py | 15 + src/mistralai/models/messageinputentry.py | 18 +- src/mistralai/models/paginationinfo.py | 25 + src/mistralai/models/processingstatusout.py | 16 + src/mistralai/models/shareenum.py | 8 + src/mistralai/models/sharingdelete.py | 26 + src/mistralai/models/sharingin.py | 30 + src/mistralai/models/sharingout.py | 59 + src/mistralai/models/ssetypes.py | 1 + .../models/toolexecutiondeltaevent.py | 34 + src/mistralai/models/toolexecutionentry.py | 3 + .../models/toolexecutionstartedevent.py | 3 + src/mistralai/models/toolreferencechunk.py | 11 +- 109 files changed, 6727 insertions(+), 93 deletions(-) create mode 100644 docs/models/documentout.md create mode 100644 docs/models/documenttextcontent.md create mode 100644 docs/models/documentupdatein.md create mode 100644 docs/models/entitytype.md create mode 100644 docs/models/ftmodelcardtype.md create mode 100644 docs/models/librariesdeletev1request.md create mode 100644 docs/models/librariesdocumentsdeletev1request.md create mode 100644 docs/models/librariesdocumentsgetextractedtextsignedurlv1request.md create mode 100644 docs/models/librariesdocumentsgetsignedurlv1request.md create mode 100644 docs/models/librariesdocumentsgetstatusv1request.md create mode 100644 docs/models/librariesdocumentsgettextcontentv1request.md create mode 100644 docs/models/librariesdocumentsgetv1request.md create mode 100644 docs/models/librariesdocumentslistv1request.md create mode 100644 docs/models/librariesdocumentsreprocessv1request.md create mode 100644 docs/models/librariesdocumentsupdatev1request.md create mode 100644 docs/models/librariesdocumentsuploadv1documentupload.md create mode 100644 docs/models/librariesdocumentsuploadv1request.md create mode 100644 docs/models/librariesgetv1request.md create mode 100644 docs/models/librariessharecreatev1request.md create mode 100644 docs/models/librariessharedeletev1request.md create mode 100644 docs/models/librariessharelistv1request.md create mode 100644 docs/models/librariesupdatev1request.md create mode 100644 docs/models/libraryin.md create mode 100644 docs/models/libraryinupdate.md create mode 100644 docs/models/libraryout.md create mode 100644 docs/models/listdocumentout.md create mode 100644 docs/models/listlibraryout.md create mode 100644 docs/models/listsharingout.md create mode 100644 docs/models/messageinputentrytype.md create mode 100644 docs/models/paginationinfo.md create mode 100644 docs/models/processingstatusout.md create mode 100644 docs/models/shareenum.md create mode 100644 docs/models/sharingdelete.md create mode 100644 docs/models/sharingin.md create mode 100644 docs/models/sharingout.md create mode 100644 docs/models/toolexecutiondeltaevent.md create mode 100644 docs/models/toolexecutiondeltaeventtype.md create mode 100644 docs/sdks/accesses/README.md create mode 100644 docs/sdks/documents/README.md create mode 100644 docs/sdks/libraries/README.md create mode 100644 src/mistralai/accesses.py create mode 100644 src/mistralai/documents.py create mode 100644 src/mistralai/libraries.py create mode 100644 src/mistralai/models/documentout.py create mode 100644 src/mistralai/models/documenttextcontent.py create mode 100644 src/mistralai/models/documentupdatein.py create mode 100644 src/mistralai/models/entitytype.py create mode 100644 src/mistralai/models/file.py create mode 100644 src/mistralai/models/libraries_delete_v1op.py create mode 100644 src/mistralai/models/libraries_documents_delete_v1op.py create mode 100644 src/mistralai/models/libraries_documents_get_extracted_text_signed_url_v1op.py create mode 100644 src/mistralai/models/libraries_documents_get_signed_url_v1op.py create mode 100644 src/mistralai/models/libraries_documents_get_status_v1op.py create mode 100644 src/mistralai/models/libraries_documents_get_text_content_v1op.py create mode 100644 src/mistralai/models/libraries_documents_get_v1op.py create mode 100644 src/mistralai/models/libraries_documents_list_v1op.py create mode 100644 src/mistralai/models/libraries_documents_reprocess_v1op.py create mode 100644 src/mistralai/models/libraries_documents_update_v1op.py create mode 100644 src/mistralai/models/libraries_documents_upload_v1op.py create mode 100644 src/mistralai/models/libraries_get_v1op.py create mode 100644 src/mistralai/models/libraries_share_create_v1op.py create mode 100644 src/mistralai/models/libraries_share_delete_v1op.py create mode 100644 src/mistralai/models/libraries_share_list_v1op.py create mode 100644 src/mistralai/models/libraries_update_v1op.py create mode 100644 src/mistralai/models/libraryin.py create mode 100644 src/mistralai/models/libraryinupdate.py create mode 100644 src/mistralai/models/libraryout.py create mode 100644 src/mistralai/models/listdocumentout.py create mode 100644 src/mistralai/models/listlibraryout.py create mode 100644 src/mistralai/models/listsharingout.py create mode 100644 src/mistralai/models/paginationinfo.py create mode 100644 src/mistralai/models/processingstatusout.py create mode 100644 src/mistralai/models/shareenum.py create mode 100644 src/mistralai/models/sharingdelete.py create mode 100644 src/mistralai/models/sharingin.py create mode 100644 src/mistralai/models/sharingout.py create mode 100644 src/mistralai/models/toolexecutiondeltaevent.py diff --git a/.speakeasy/gen.lock b/.speakeasy/gen.lock index 3d37ffed..6cf27f7a 100644 --- a/.speakeasy/gen.lock +++ b/.speakeasy/gen.lock @@ -1,12 +1,12 @@ lockVersion: 2.0.0 id: 2d045ec7-2ebb-4f4d-ad25-40953b132161 management: - docChecksum: e0186c33d0269977e1790dfcc7d11aac + docChecksum: 82cf79b2dee6811d91e2912113c21d3a docVersion: 1.0.0 speakeasyVersion: 1.568.2 generationVersion: 2.634.2 - releaseVersion: 1.9.1 - configChecksum: 5f97671226b9fdcc9adc3c7662003247 + releaseVersion: 1.9.2 + configChecksum: 1ca921f44508650d65ccf46783910ff3 repoURL: https://github.com/mistralai/client-python.git installationURL: https://github.com/mistralai/client-python.git published: true @@ -183,6 +183,9 @@ generatedFiles: - docs/models/document.md - docs/models/documentlibrarytool.md - docs/models/documentlibrarytooltype.md + - docs/models/documentout.md + - docs/models/documenttextcontent.md + - docs/models/documentupdatein.md - docs/models/documenturlchunk.md - docs/models/documenturlchunktype.md - docs/models/embeddingdtype.md @@ -190,6 +193,7 @@ generatedFiles: - docs/models/embeddingrequestinputs.md - docs/models/embeddingresponse.md - docs/models/embeddingresponsedata.md + - docs/models/entitytype.md - docs/models/entries.md - docs/models/eventout.md - docs/models/file.md @@ -213,6 +217,7 @@ generatedFiles: - docs/models/ftclassifierlossfunction.md - docs/models/ftmodelcapabilitiesout.md - docs/models/ftmodelcard.md + - docs/models/ftmodelcardtype.md - docs/models/function.md - docs/models/functioncall.md - docs/models/functioncallentry.md @@ -273,13 +278,37 @@ generatedFiles: - docs/models/jsonschema.md - docs/models/legacyjobmetadataout.md - docs/models/legacyjobmetadataoutobject.md + - docs/models/librariesdeletev1request.md + - docs/models/librariesdocumentsdeletev1request.md + - docs/models/librariesdocumentsgetextractedtextsignedurlv1request.md + - docs/models/librariesdocumentsgetsignedurlv1request.md + - docs/models/librariesdocumentsgetstatusv1request.md + - docs/models/librariesdocumentsgettextcontentv1request.md + - docs/models/librariesdocumentsgetv1request.md + - docs/models/librariesdocumentslistv1request.md + - docs/models/librariesdocumentsreprocessv1request.md + - docs/models/librariesdocumentsupdatev1request.md + - docs/models/librariesdocumentsuploadv1documentupload.md + - docs/models/librariesdocumentsuploadv1request.md + - docs/models/librariesgetv1request.md + - docs/models/librariessharecreatev1request.md + - docs/models/librariessharedeletev1request.md + - docs/models/librariessharelistv1request.md + - docs/models/librariesupdatev1request.md + - docs/models/libraryin.md + - docs/models/libraryinupdate.md + - docs/models/libraryout.md + - docs/models/listdocumentout.md - docs/models/listfilesout.md + - docs/models/listlibraryout.md + - docs/models/listsharingout.md - docs/models/loc.md - docs/models/messageentries.md - docs/models/messageinputcontentchunks.md - docs/models/messageinputentry.md - docs/models/messageinputentrycontent.md - docs/models/messageinputentryrole.md + - docs/models/messageinputentrytype.md - docs/models/messageoutputcontentchunks.md - docs/models/messageoutputentry.md - docs/models/messageoutputentrycontent.md @@ -311,7 +340,9 @@ generatedFiles: - docs/models/one.md - docs/models/outputcontentchunks.md - docs/models/outputs.md + - docs/models/paginationinfo.md - docs/models/prediction.md + - docs/models/processingstatusout.md - docs/models/queryparamstatus.md - docs/models/referencechunk.md - docs/models/referencechunktype.md @@ -332,6 +363,10 @@ generatedFiles: - docs/models/role.md - docs/models/sampletype.md - docs/models/security.md + - docs/models/shareenum.md + - docs/models/sharingdelete.md + - docs/models/sharingin.md + - docs/models/sharingout.md - docs/models/source.md - docs/models/ssetypes.md - docs/models/status.md @@ -344,6 +379,8 @@ generatedFiles: - docs/models/toolcall.md - docs/models/toolchoice.md - docs/models/toolchoiceenum.md + - docs/models/toolexecutiondeltaevent.md + - docs/models/toolexecutiondeltaeventtype.md - docs/models/toolexecutiondoneevent.md - docs/models/toolexecutiondoneeventtype.md - docs/models/toolexecutionentry.md @@ -381,17 +418,20 @@ generatedFiles: - docs/models/websearchpremiumtooltype.md - docs/models/websearchtool.md - docs/models/websearchtooltype.md + - docs/sdks/accesses/README.md - docs/sdks/agents/README.md - docs/sdks/batch/README.md - docs/sdks/beta/README.md - docs/sdks/chat/README.md - docs/sdks/classifiers/README.md - docs/sdks/conversations/README.md + - docs/sdks/documents/README.md - docs/sdks/embeddings/README.md - docs/sdks/files/README.md - docs/sdks/fim/README.md - docs/sdks/finetuning/README.md - docs/sdks/jobs/README.md + - docs/sdks/libraries/README.md - docs/sdks/mistral/README.md - docs/sdks/mistralagents/README.md - docs/sdks/mistraljobs/README.md @@ -406,6 +446,7 @@ generatedFiles: - src/mistralai/_hooks/sdkhooks.py - src/mistralai/_hooks/types.py - src/mistralai/_version.py + - src/mistralai/accesses.py - src/mistralai/agents.py - src/mistralai/basesdk.py - src/mistralai/batch.py @@ -413,12 +454,14 @@ generatedFiles: - src/mistralai/chat.py - src/mistralai/classifiers.py - src/mistralai/conversations.py + - src/mistralai/documents.py - src/mistralai/embeddings.py - src/mistralai/files.py - src/mistralai/fim.py - src/mistralai/fine_tuning.py - src/mistralai/httpclient.py - src/mistralai/jobs.py + - src/mistralai/libraries.py - src/mistralai/mistral_agents.py - src/mistralai/mistral_jobs.py - src/mistralai/models/__init__.py @@ -499,12 +542,17 @@ generatedFiles: - src/mistralai/models/deletemodelout.py - src/mistralai/models/deltamessage.py - src/mistralai/models/documentlibrarytool.py + - src/mistralai/models/documentout.py + - src/mistralai/models/documenttextcontent.py + - src/mistralai/models/documentupdatein.py - src/mistralai/models/documenturlchunk.py - src/mistralai/models/embeddingdtype.py - src/mistralai/models/embeddingrequest.py - src/mistralai/models/embeddingresponse.py - src/mistralai/models/embeddingresponsedata.py + - src/mistralai/models/entitytype.py - src/mistralai/models/eventout.py + - src/mistralai/models/file.py - src/mistralai/models/filechunk.py - src/mistralai/models/filepurpose.py - src/mistralai/models/files_api_routes_delete_fileop.py @@ -555,7 +603,29 @@ generatedFiles: - src/mistralai/models/jobsout.py - src/mistralai/models/jsonschema.py - src/mistralai/models/legacyjobmetadataout.py + - src/mistralai/models/libraries_delete_v1op.py + - src/mistralai/models/libraries_documents_delete_v1op.py + - src/mistralai/models/libraries_documents_get_extracted_text_signed_url_v1op.py + - src/mistralai/models/libraries_documents_get_signed_url_v1op.py + - src/mistralai/models/libraries_documents_get_status_v1op.py + - src/mistralai/models/libraries_documents_get_text_content_v1op.py + - src/mistralai/models/libraries_documents_get_v1op.py + - src/mistralai/models/libraries_documents_list_v1op.py + - src/mistralai/models/libraries_documents_reprocess_v1op.py + - src/mistralai/models/libraries_documents_update_v1op.py + - src/mistralai/models/libraries_documents_upload_v1op.py + - src/mistralai/models/libraries_get_v1op.py + - src/mistralai/models/libraries_share_create_v1op.py + - src/mistralai/models/libraries_share_delete_v1op.py + - src/mistralai/models/libraries_share_list_v1op.py + - src/mistralai/models/libraries_update_v1op.py + - src/mistralai/models/libraryin.py + - src/mistralai/models/libraryinupdate.py + - src/mistralai/models/libraryout.py + - src/mistralai/models/listdocumentout.py - src/mistralai/models/listfilesout.py + - src/mistralai/models/listlibraryout.py + - src/mistralai/models/listsharingout.py - src/mistralai/models/messageentries.py - src/mistralai/models/messageinputcontentchunks.py - src/mistralai/models/messageinputentry.py @@ -576,7 +646,9 @@ generatedFiles: - src/mistralai/models/ocrresponse.py - src/mistralai/models/ocrusageinfo.py - src/mistralai/models/outputcontentchunks.py + - src/mistralai/models/paginationinfo.py - src/mistralai/models/prediction.py + - src/mistralai/models/processingstatusout.py - src/mistralai/models/referencechunk.py - src/mistralai/models/responsedoneevent.py - src/mistralai/models/responseerrorevent.py @@ -588,6 +660,10 @@ generatedFiles: - src/mistralai/models/sampletype.py - src/mistralai/models/sdkerror.py - src/mistralai/models/security.py + - src/mistralai/models/shareenum.py + - src/mistralai/models/sharingdelete.py + - src/mistralai/models/sharingin.py + - src/mistralai/models/sharingout.py - src/mistralai/models/source.py - src/mistralai/models/ssetypes.py - src/mistralai/models/systemmessage.py @@ -596,6 +672,7 @@ generatedFiles: - src/mistralai/models/toolcall.py - src/mistralai/models/toolchoice.py - src/mistralai/models/toolchoiceenum.py + - src/mistralai/models/toolexecutiondeltaevent.py - src/mistralai/models/toolexecutiondoneevent.py - src/mistralai/models/toolexecutionentry.py - src/mistralai/models/toolexecutionstartedevent.py @@ -740,7 +817,7 @@ examples: conversation_id: "" responses: "200": - application/json: {"object": "conversation.history", "conversation_id": "", "entries": [{"object": "entry", "type": "tool.execution", "name": "code_interpreter"}]} + application/json: {"object": "conversation.history", "conversation_id": "", "entries": [{"object": "entry", "type": "tool.execution", "name": "image_generation", "arguments": ""}]} "422": application/json: {} agents_api_v1_conversations_messages: @@ -788,7 +865,7 @@ examples: path: conversation_id: "" requestBody: - application/json: {"inputs": [{"object": "entry", "type": "message.input", "role": "assistant", "content": ""}], "stream": true, "store": true, "handoff_execution": "server", "from_entry_id": ""} + application/json: {"inputs": [{"object": "entry", "type": "message.input", "role": "assistant", "content": "", "prefix": false}], "stream": true, "store": true, "handoff_execution": "server", "from_entry_id": ""} responses: "422": application/json: {} @@ -1072,5 +1149,198 @@ examples: application/json: {"pages": [{"index": 944919, "markdown": "", "images": [], "dimensions": {"dpi": 984283, "height": 453411, "width": 398292}}], "model": "Wrangler", "usage_info": {"pages_processed": 47064}} "422": application/json: {} + libraries_list_v1: + speakeasy-default-libraries-list-v1: + responses: + "200": + application/json: {"data": [{"id": "bfc452fd-4bcb-46ec-9f68-ceea101e924d", "name": "", "created_at": "2024-01-31T13:50:47.409Z", "updated_at": "2023-04-09T15:28:24.261Z", "owner_id": "3fb92cf9-0fea-44d0-958f-16963601a1f0", "owner_type": "", "total_size": 811051, "nb_documents": 634577, "chunk_size": 502060}]} + libraries_create_v1: + speakeasy-default-libraries-create-v1: + requestBody: + application/json: {"name": ""} + responses: + "201": + application/json: {"id": "7285d921-bbab-471e-a2df-600e096d8aca", "name": "", "created_at": "2025-12-10T18:12:15.618Z", "updated_at": "2023-12-29T15:14:03.343Z", "owner_id": "d5e2af8f-c98a-479e-aece-62d79ea6bab3", "owner_type": "", "total_size": 866940, "nb_documents": 123652, "chunk_size": 274694} + "422": + application/json: {} + libraries_get_v1: + speakeasy-default-libraries-get-v1: + parameters: + path: + library_id: "d0d23a1e-bfe5-45e7-b7bb-22a4ea78d47f" + responses: + "200": + application/json: {"id": "24e6ac5e-61cb-4f2c-b0c0-806dfd5d8dbf", "name": "", "created_at": "2023-01-19T09:20:07.756Z", "updated_at": "2023-05-28T00:39:57.656Z", "owner_id": "546a730e-7d06-4324-a4fd-2b7ff127978c", "owner_type": "", "total_size": 191122, "nb_documents": 932135, "chunk_size": null} + "422": + application/json: {} + libraries_delete_v1: + speakeasy-default-libraries-delete-v1: + parameters: + path: + library_id: "6cad0b6e-fd2e-4d11-a48b-21d30fb7c17a" + responses: + "200": + application/json: {"id": "04e83772-3f8e-41d3-a053-763ed9937e07", "name": "", "created_at": "2025-03-15T23:45:26.060Z", "updated_at": "2024-08-03T06:23:12.129Z", "owner_id": "f636aa46-e1d5-4df4-966b-de4af27da6db", "owner_type": "", "total_size": 268102, "nb_documents": 821714, "chunk_size": null} + "422": + application/json: {} + libraries_update_v1: + speakeasy-default-libraries-update-v1: + parameters: + path: + library_id: "e01880c3-d0b5-4a29-8b1b-abdb8ce917e4" + requestBody: + application/json: {} + responses: + "200": + application/json: {"id": "c3bb20a7-df8c-4461-8cfb-9e2a978c00da", "name": "", "created_at": "2025-03-12T04:49:28.349Z", "updated_at": "2025-12-18T03:09:25.092Z", "owner_id": "734e66b8-ae70-4069-9ebb-7eb7ee3967d5", "owner_type": "", "total_size": 762363, "nb_documents": 896591, "chunk_size": 507889} + "422": + application/json: {} + libraries_documents_list_v1: + speakeasy-default-libraries-documents-list-v1: + parameters: + path: + library_id: "5c3ca4cd-62bc-4c71-ad8a-1531ae80d078" + query: + page_size: 100 + page: 0 + sort_by: "created_at" + sort_order: "desc" + responses: + "200": + application/json: {"pagination": {"total_items": 23246, "total_pages": 881485, "current_page": 173326, "page_size": 318395, "has_more": false}, "data": [{"id": "5106c0c7-30fb-4fd3-9083-129b77f9f509", "library_id": "71eb68a2-756e-48b0-9d2b-a04d7bf95ff5", "hash": "", "mime_type": "", "extension": "pdf", "size": 367159, "name": "", "created_at": "2024-09-24T04:50:43.988Z", "processing_status": "", "uploaded_by_id": "7d65f4d8-1997-479f-bfb4-535c0144b48c", "uploaded_by_type": "", "tokens_processing_total": 957230}]} + "422": + application/json: {} + libraries_documents_upload_v1: + speakeasy-default-libraries-documents-upload-v1: + parameters: + path: + library_id: "a02150d9-5ee0-4877-b62c-28b1fcdf3b76" + requestBody: + multipart/form-data: {"file": "x-file: example.file"} + responses: + "200": + application/json: {"id": "d40f9b56-c832-405d-aa99-b3e442254dd8", "library_id": "868d7955-009a-4433-bfc6-ad7b4be4e7e4", "hash": "", "mime_type": "", "extension": "m2v", "size": 418415, "name": "", "created_at": "2025-04-30T20:11:27.130Z", "processing_status": "", "uploaded_by_id": "7db8d896-09c9-438c-b6dc-aa5c70102b3f", "uploaded_by_type": "", "tokens_processing_total": 61161} + "422": + application/json: {} + libraries_documents_get_v1: + speakeasy-default-libraries-documents-get-v1: + parameters: + path: + library_id: "03d908c8-90a1-44fd-bf3a-8490fb7c9a03" + document_id: "90973aec-0508-4375-8b00-91d732414745" + responses: + "200": + application/json: {"id": "0de60230-717d-459a-8c0f-fbb9360c01be", "library_id": "e0bf3cf9-cd3b-405b-b842-ac7fcb9c373e", "hash": "", "mime_type": "", "extension": "jpe", "size": 402478, "name": "", "created_at": "2023-07-29T21:43:20.750Z", "processing_status": "", "uploaded_by_id": "d5eadabe-d7f2-4f87-a337-f80c192f886d", "uploaded_by_type": "", "tokens_processing_total": 793889} + "422": + application/json: {} + libraries_documents_update_v1: + speakeasy-default-libraries-documents-update-v1: + parameters: + path: + library_id: "3ddd8d93-dca5-4a6d-980d-173226c35742" + document_id: "2a25e44c-b160-40ca-b5c2-b65fb2fcae34" + requestBody: + application/json: {} + responses: + "200": + application/json: {"id": "1111e519-9ba5-42de-9301-938fbfee59fc", "library_id": "70aac5e3-23f7-439b-bbef-090e4c1dbd6d", "hash": "", "mime_type": "", "extension": "m1v", "size": 802305, "name": "", "created_at": "2024-07-02T20:02:03.680Z", "processing_status": "", "uploaded_by_id": "08471957-b27d-4437-8242-57256727dc49", "uploaded_by_type": "", "tokens_processing_total": 806683} + "422": + application/json: {} + libraries_documents_delete_v1: + speakeasy-default-libraries-documents-delete-v1: + parameters: + path: + library_id: "005daae9-d42e-407d-82d7-2261c6a1496c" + document_id: "edc236b0-baff-49a9-884b-4ca36a258da4" + responses: + "422": + application/json: {} + libraries_documents_get_text_content_v1: + speakeasy-default-libraries-documents-get-text-content-v1: + parameters: + path: + library_id: "1d177215-3b6b-45ba-9fa9-baf773223bec" + document_id: "60214c91-2aba-4692-a4e6-a53365de8caf" + responses: + "200": + application/json: {"text": ""} + "422": + application/json: {} + libraries_documents_get_status_v1: + speakeasy-default-libraries-documents-get-status-v1: + parameters: + path: + library_id: "e6906f70-368f-4155-80da-c1718f01bc43" + document_id: "2c904915-d831-4e9d-a345-8ce405bcef66" + responses: + "200": + application/json: {"document_id": "90473b79-1fd5-437f-bee0-6638bdf69c90", "processing_status": ""} + "422": + application/json: {} + libraries_documents_get_signed_url_v1: + speakeasy-default-libraries-documents-get-signed-url-v1: + parameters: + path: + library_id: "23cf6904-a602-4ee8-9f5b-8efc557c336d" + document_id: "48598486-df71-4994-acbb-1133c72efa8c" + responses: + "200": + application/json: "https://burdensome-jellyfish.name" + "422": + application/json: {} + libraries_documents_get_extracted_text_signed_url_v1: + speakeasy-default-libraries-documents-get-extracted-text-signed-url-v1: + parameters: + path: + library_id: "a6f15de3-1e82-4f95-af82-851499042ef8" + document_id: "9749d4f9-24e5-4ca2-99a3-a406863f805d" + responses: + "200": + application/json: "https://athletic-disadvantage.info" + "422": + application/json: {} + libraries_documents_reprocess_v1: + speakeasy-default-libraries-documents-reprocess-v1: + parameters: + path: + library_id: "51b29371-de8f-4ba4-932b-a0bafb3a7f64" + document_id: "3052422c-49ca-45ac-a918-cadb35d61fd8" + responses: + "422": + application/json: {} + libraries_share_list_v1: + speakeasy-default-libraries-share-list-v1: + parameters: + path: + library_id: "d2169833-d8e2-416e-a372-76518d3d99c2" + responses: + "200": + application/json: {"data": [{"library_id": "26c08a7a-d226-4d29-b4d8-c08f0ad41dd1", "org_id": "254e3633-51b9-47a9-bc14-466ecf29d167", "role": "", "share_with_type": "", "share_with_uuid": "815eb88e-1f97-4782-863f-5fd00d37268b"}]} + "422": + application/json: {} + libraries_share_create_v1: + speakeasy-default-libraries-share-create-v1: + parameters: + path: + library_id: "36de3a24-5b1c-4c8f-9d84-d5642205a976" + requestBody: + application/json: {"org_id": "aadd9ae1-f285-4437-884a-091c77efa6fd", "level": "Viewer", "share_with_uuid": "0ae92ecb-21ed-47c5-9f7e-0b2cbe325a20", "share_with_type": "User"} + responses: + "200": + application/json: {"library_id": "45b3a5b2-8b81-4453-9130-ded7f1e5a366", "org_id": "0fa6e542-f04b-431e-a1be-76a9a92b0e68", "role": "", "share_with_type": "", "share_with_uuid": "cdbcc0c5-e577-4880-8ed3-f919421d4fc5"} + "422": + application/json: {} + libraries_share_delete_v1: + speakeasy-default-libraries-share-delete-v1: + parameters: + path: + library_id: "709e3cad-9fb2-4f4e-bf88-143cf1808107" + requestBody: + application/json: {"org_id": "0814a235-c2d0-4814-875a-4b85f93d3dc7", "share_with_uuid": "b843cc47-ce8f-4354-8cfc-5fcd7fb2865b", "share_with_type": "User"} + responses: + "200": + application/json: {"library_id": "7f9c6af4-e362-4cf1-9363-0409d51c2dfa", "org_id": "6b2cac3a-b29c-4d8f-bebb-0db06ec1bf97", "role": "", "share_with_type": "", "share_with_uuid": "618c78f1-41ca-45c3-8ef2-7d78898c7061"} + "422": + application/json: {} examplesVersion: 1.0.2 generatedTests: {} diff --git a/.speakeasy/gen.yaml b/.speakeasy/gen.yaml index 6eb63598..d3df5c35 100644 --- a/.speakeasy/gen.yaml +++ b/.speakeasy/gen.yaml @@ -21,7 +21,7 @@ generation: generateNewTests: false skipResponseBodyAssertions: false python: - version: 1.9.1 + version: 1.9.2 additionalDependencies: dev: pytest: ^8.2.2 diff --git a/.speakeasy/workflow.lock b/.speakeasy/workflow.lock index 2e95e59c..75541fbd 100644 --- a/.speakeasy/workflow.lock +++ b/.speakeasy/workflow.lock @@ -14,11 +14,11 @@ sources: - latest mistral-openapi: sourceNamespace: mistral-openapi - sourceRevisionDigest: sha256:e9fd379cd22f75a10ccc5b866f4de98f973c3c0f77cb15a7bebcb94bf10c82f2 - sourceBlobDigest: sha256:4bb656d10d1cfbe09f9b1b7734c79f1855eb27184590362d3747116f6abf69d1 + sourceRevisionDigest: sha256:2ab1acc41424ca9be28ef867168aeb32af9fc7129b0a91494c0cd24d68c30345 + sourceBlobDigest: sha256:029ae17d555b02220397bba95308ba545c4733db81e65258be7baf9991d10c3a tags: - latest - - speakeasy-sdk-regen-1751306196 + - speakeasy-sdk-regen-1751557705 targets: mistralai-azure-sdk: source: mistral-azure-source @@ -37,10 +37,10 @@ targets: mistralai-sdk: source: mistral-openapi sourceNamespace: mistral-openapi - sourceRevisionDigest: sha256:e9fd379cd22f75a10ccc5b866f4de98f973c3c0f77cb15a7bebcb94bf10c82f2 - sourceBlobDigest: sha256:4bb656d10d1cfbe09f9b1b7734c79f1855eb27184590362d3747116f6abf69d1 + sourceRevisionDigest: sha256:2ab1acc41424ca9be28ef867168aeb32af9fc7129b0a91494c0cd24d68c30345 + sourceBlobDigest: sha256:029ae17d555b02220397bba95308ba545c4733db81e65258be7baf9991d10c3a codeSamplesNamespace: mistral-openapi-code-samples - codeSamplesRevisionDigest: sha256:85311d42c06d86b38d49ed14b2485f45ad219ef76da40dfbec2592bb75dcaf00 + codeSamplesRevisionDigest: sha256:b45de481b3d77689a76a406421d4625dc37cc17bf90bab2f7d6e78f3eec77a9c workflow: workflowVersion: 1.0.0 speakeasyVersion: 1.568.2 diff --git a/README.md b/README.md index ee0b1d08..503c5128 100644 --- a/README.md +++ b/README.md @@ -468,6 +468,33 @@ The documentation for the GCP SDK is available [here](packages/mistralai_gcp/REA * [append_stream](docs/sdks/conversations/README.md#append_stream) - Append new entries to an existing conversation. * [restart_stream](docs/sdks/conversations/README.md#restart_stream) - Restart a conversation starting from a given entry. +#### [beta.libraries](docs/sdks/libraries/README.md) + +* [list](docs/sdks/libraries/README.md#list) - List all libraries you have access to. +* [create](docs/sdks/libraries/README.md#create) - Create a new Library. +* [get](docs/sdks/libraries/README.md#get) - Detailed information about a specific Library. +* [delete](docs/sdks/libraries/README.md#delete) - Delete a library and all of it's document. +* [update](docs/sdks/libraries/README.md#update) - Update a library. + +#### [beta.libraries.accesses](docs/sdks/accesses/README.md) + +* [list](docs/sdks/accesses/README.md#list) - List all of the access to this library. +* [update_or_create](docs/sdks/accesses/README.md#update_or_create) - Create or update an access level. +* [delete](docs/sdks/accesses/README.md#delete) - Delete an access level. + +#### [beta.libraries.documents](docs/sdks/documents/README.md) + +* [list](docs/sdks/documents/README.md#list) - List document in a given library. +* [upload](docs/sdks/documents/README.md#upload) - Upload a new document. +* [get](docs/sdks/documents/README.md#get) - Retrieve the metadata of a specific document. +* [update](docs/sdks/documents/README.md#update) - Update the metadata of a specific document. +* [delete](docs/sdks/documents/README.md#delete) - Delete a document. +* [text_content](docs/sdks/documents/README.md#text_content) - Retrieve the text content of a specific document. +* [status](docs/sdks/documents/README.md#status) - Retrieve the processing status of a specific document. +* [get_signed_url](docs/sdks/documents/README.md#get_signed_url) - Retrieve the signed URL of a specific document. +* [extracted_text_signed_url](docs/sdks/documents/README.md#extracted_text_signed_url) - Retrieve the signed URL of text extracted from a given document. +* [reprocess](docs/sdks/documents/README.md#reprocess) - Reprocess a document. + ### [chat](docs/sdks/chat/README.md) * [complete](docs/sdks/chat/README.md#complete) - Chat Completion @@ -587,7 +614,7 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.files.upload(file={ + res = mistral.beta.libraries.documents.upload(library_id="a02150d9-5ee0-4877-b62c-28b1fcdf3b76", file={ "file_name": "example.file", "content": open("example.file", "rb"), }) diff --git a/RELEASES.md b/RELEASES.md index 3d7513fc..b66777ed 100644 --- a/RELEASES.md +++ b/RELEASES.md @@ -248,4 +248,14 @@ Based on: ### Generated - [python v1.9.1] . ### Releases -- [PyPI v1.9.1] https://pypi.org/project/mistralai/1.9.1 - . \ No newline at end of file +- [PyPI v1.9.1] https://pypi.org/project/mistralai/1.9.1 - . + +## 2025-07-10 12:22:52 +### Changes +Based on: +- OpenAPI Doc +- Speakeasy CLI 1.568.2 (2.634.2) https://github.com/speakeasy-api/speakeasy +### Generated +- [python v1.9.2] . +### Releases +- [PyPI v1.9.2] https://pypi.org/project/mistralai/1.9.2 - . \ No newline at end of file diff --git a/docs/models/basemodelcard.md b/docs/models/basemodelcard.md index 18cb9a27..f5ce8c5e 100644 --- a/docs/models/basemodelcard.md +++ b/docs/models/basemodelcard.md @@ -17,4 +17,4 @@ | `deprecation` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | | `deprecation_replacement_model` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | `default_model_temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | N/A | -| `type` | *Optional[Literal["base"]]* | :heavy_minus_sign: | N/A | \ No newline at end of file +| `type` | [Optional[models.Type]](../models/type.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/conversationeventsdata.md b/docs/models/conversationeventsdata.md index 81faf197..5452d7d5 100644 --- a/docs/models/conversationeventsdata.md +++ b/docs/models/conversationeventsdata.md @@ -45,6 +45,12 @@ value: models.FunctionCallEvent = /* values here */ value: models.MessageOutputEvent = /* values here */ ``` +### `models.ToolExecutionDeltaEvent` + +```python +value: models.ToolExecutionDeltaEvent = /* values here */ +``` + ### `models.ToolExecutionDoneEvent` ```python diff --git a/docs/models/documentout.md b/docs/models/documentout.md new file mode 100644 index 00000000..b9e7b212 --- /dev/null +++ b/docs/models/documentout.md @@ -0,0 +1,24 @@ +# DocumentOut + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | +| `id` | *str* | :heavy_check_mark: | N/A | +| `library_id` | *str* | :heavy_check_mark: | N/A | +| `hash` | *str* | :heavy_check_mark: | N/A | +| `mime_type` | *str* | :heavy_check_mark: | N/A | +| `extension` | *str* | :heavy_check_mark: | N/A | +| `size` | *int* | :heavy_check_mark: | N/A | +| `name` | *str* | :heavy_check_mark: | N/A | +| `summary` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_check_mark: | N/A | +| `last_processed_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | +| `number_of_pages` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | +| `processing_status` | *str* | :heavy_check_mark: | N/A | +| `uploaded_by_id` | *str* | :heavy_check_mark: | N/A | +| `uploaded_by_type` | *str* | :heavy_check_mark: | N/A | +| `tokens_processing_main_content` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | +| `tokens_processing_summary` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | +| `tokens_processing_total` | *int* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/documenttextcontent.md b/docs/models/documenttextcontent.md new file mode 100644 index 00000000..989f49e9 --- /dev/null +++ b/docs/models/documenttextcontent.md @@ -0,0 +1,8 @@ +# DocumentTextContent + + +## Fields + +| Field | Type | Required | Description | +| ------------------ | ------------------ | ------------------ | ------------------ | +| `text` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/documentupdatein.md b/docs/models/documentupdatein.md new file mode 100644 index 00000000..215ae95f --- /dev/null +++ b/docs/models/documentupdatein.md @@ -0,0 +1,8 @@ +# DocumentUpdateIn + + +## Fields + +| Field | Type | Required | Description | +| ----------------------- | ----------------------- | ----------------------- | ----------------------- | +| `name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/entitytype.md b/docs/models/entitytype.md new file mode 100644 index 00000000..7c040b38 --- /dev/null +++ b/docs/models/entitytype.md @@ -0,0 +1,12 @@ +# EntityType + +The type of entity, used to share a library. + + +## Values + +| Name | Value | +| ----------- | ----------- | +| `USER` | User | +| `WORKSPACE` | Workspace | +| `ORG` | Org | \ No newline at end of file diff --git a/docs/models/filesapiroutesuploadfilemultipartbodyparams.md b/docs/models/filesapiroutesuploadfilemultipartbodyparams.md index 41631b28..a5dd1174 100644 --- a/docs/models/filesapiroutesuploadfilemultipartbodyparams.md +++ b/docs/models/filesapiroutesuploadfilemultipartbodyparams.md @@ -5,5 +5,5 @@ | Field | Type | Required | Description | | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `file` | [models.File](../models/file.md) | :heavy_check_mark: | The File object (not file name) to be uploaded.
To upload a file and specify a custom file name you should format your request as such:
```bash
file=@path/to/your/file.jsonl;filename=custom_name.jsonl
```
Otherwise, you can just keep the original file name:
```bash
file=@path/to/your/file.jsonl
``` | -| `purpose` | [Optional[models.FilePurpose]](../models/filepurpose.md) | :heavy_minus_sign: | N/A | \ No newline at end of file +| `purpose` | [Optional[models.FilePurpose]](../models/filepurpose.md) | :heavy_minus_sign: | N/A | +| `file` | [models.File](../models/file.md) | :heavy_check_mark: | The File object (not file name) to be uploaded.
To upload a file and specify a custom file name you should format your request as such:
```bash
file=@path/to/your/file.jsonl;filename=custom_name.jsonl
```
Otherwise, you can just keep the original file name:
```bash
file=@path/to/your/file.jsonl
``` | \ No newline at end of file diff --git a/docs/models/ftmodelcard.md b/docs/models/ftmodelcard.md index a286f04e..35032775 100644 --- a/docs/models/ftmodelcard.md +++ b/docs/models/ftmodelcard.md @@ -19,7 +19,7 @@ Extra fields for fine-tuned models. | `deprecation` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | | `deprecation_replacement_model` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | `default_model_temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | N/A | -| `type` | *Optional[Literal["fine-tuned"]]* | :heavy_minus_sign: | N/A | +| `type` | [Optional[models.FTModelCardType]](../models/ftmodelcardtype.md) | :heavy_minus_sign: | N/A | | `job` | *str* | :heavy_check_mark: | N/A | | `root` | *str* | :heavy_check_mark: | N/A | | `archived` | *Optional[bool]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/ftmodelcardtype.md b/docs/models/ftmodelcardtype.md new file mode 100644 index 00000000..0b38470b --- /dev/null +++ b/docs/models/ftmodelcardtype.md @@ -0,0 +1,8 @@ +# FTModelCardType + + +## Values + +| Name | Value | +| ------------ | ------------ | +| `FINE_TUNED` | fine-tuned | \ No newline at end of file diff --git a/docs/models/librariesdeletev1request.md b/docs/models/librariesdeletev1request.md new file mode 100644 index 00000000..68d7e543 --- /dev/null +++ b/docs/models/librariesdeletev1request.md @@ -0,0 +1,8 @@ +# LibrariesDeleteV1Request + + +## Fields + +| Field | Type | Required | Description | +| ------------------ | ------------------ | ------------------ | ------------------ | +| `library_id` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/librariesdocumentsdeletev1request.md b/docs/models/librariesdocumentsdeletev1request.md new file mode 100644 index 00000000..efccdb1b --- /dev/null +++ b/docs/models/librariesdocumentsdeletev1request.md @@ -0,0 +1,9 @@ +# LibrariesDocumentsDeleteV1Request + + +## Fields + +| Field | Type | Required | Description | +| ------------------ | ------------------ | ------------------ | ------------------ | +| `library_id` | *str* | :heavy_check_mark: | N/A | +| `document_id` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/librariesdocumentsgetextractedtextsignedurlv1request.md b/docs/models/librariesdocumentsgetextractedtextsignedurlv1request.md new file mode 100644 index 00000000..14ca66f7 --- /dev/null +++ b/docs/models/librariesdocumentsgetextractedtextsignedurlv1request.md @@ -0,0 +1,9 @@ +# LibrariesDocumentsGetExtractedTextSignedURLV1Request + + +## Fields + +| Field | Type | Required | Description | +| ------------------ | ------------------ | ------------------ | ------------------ | +| `library_id` | *str* | :heavy_check_mark: | N/A | +| `document_id` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/librariesdocumentsgetsignedurlv1request.md b/docs/models/librariesdocumentsgetsignedurlv1request.md new file mode 100644 index 00000000..7c08c180 --- /dev/null +++ b/docs/models/librariesdocumentsgetsignedurlv1request.md @@ -0,0 +1,9 @@ +# LibrariesDocumentsGetSignedURLV1Request + + +## Fields + +| Field | Type | Required | Description | +| ------------------ | ------------------ | ------------------ | ------------------ | +| `library_id` | *str* | :heavy_check_mark: | N/A | +| `document_id` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/librariesdocumentsgetstatusv1request.md b/docs/models/librariesdocumentsgetstatusv1request.md new file mode 100644 index 00000000..e6d41875 --- /dev/null +++ b/docs/models/librariesdocumentsgetstatusv1request.md @@ -0,0 +1,9 @@ +# LibrariesDocumentsGetStatusV1Request + + +## Fields + +| Field | Type | Required | Description | +| ------------------ | ------------------ | ------------------ | ------------------ | +| `library_id` | *str* | :heavy_check_mark: | N/A | +| `document_id` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/librariesdocumentsgettextcontentv1request.md b/docs/models/librariesdocumentsgettextcontentv1request.md new file mode 100644 index 00000000..2f58a446 --- /dev/null +++ b/docs/models/librariesdocumentsgettextcontentv1request.md @@ -0,0 +1,9 @@ +# LibrariesDocumentsGetTextContentV1Request + + +## Fields + +| Field | Type | Required | Description | +| ------------------ | ------------------ | ------------------ | ------------------ | +| `library_id` | *str* | :heavy_check_mark: | N/A | +| `document_id` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/librariesdocumentsgetv1request.md b/docs/models/librariesdocumentsgetv1request.md new file mode 100644 index 00000000..6febc058 --- /dev/null +++ b/docs/models/librariesdocumentsgetv1request.md @@ -0,0 +1,9 @@ +# LibrariesDocumentsGetV1Request + + +## Fields + +| Field | Type | Required | Description | +| ------------------ | ------------------ | ------------------ | ------------------ | +| `library_id` | *str* | :heavy_check_mark: | N/A | +| `document_id` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/librariesdocumentslistv1request.md b/docs/models/librariesdocumentslistv1request.md new file mode 100644 index 00000000..1b4eb24d --- /dev/null +++ b/docs/models/librariesdocumentslistv1request.md @@ -0,0 +1,13 @@ +# LibrariesDocumentsListV1Request + + +## Fields + +| Field | Type | Required | Description | +| ----------------------- | ----------------------- | ----------------------- | ----------------------- | +| `library_id` | *str* | :heavy_check_mark: | N/A | +| `search` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `page_size` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `page` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `sort_by` | *Optional[str]* | :heavy_minus_sign: | N/A | +| `sort_order` | *Optional[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/librariesdocumentsreprocessv1request.md b/docs/models/librariesdocumentsreprocessv1request.md new file mode 100644 index 00000000..196ba17b --- /dev/null +++ b/docs/models/librariesdocumentsreprocessv1request.md @@ -0,0 +1,9 @@ +# LibrariesDocumentsReprocessV1Request + + +## Fields + +| Field | Type | Required | Description | +| ------------------ | ------------------ | ------------------ | ------------------ | +| `library_id` | *str* | :heavy_check_mark: | N/A | +| `document_id` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/librariesdocumentsupdatev1request.md b/docs/models/librariesdocumentsupdatev1request.md new file mode 100644 index 00000000..2f18b014 --- /dev/null +++ b/docs/models/librariesdocumentsupdatev1request.md @@ -0,0 +1,10 @@ +# LibrariesDocumentsUpdateV1Request + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------------------- | -------------------------------------------------------- | -------------------------------------------------------- | -------------------------------------------------------- | +| `library_id` | *str* | :heavy_check_mark: | N/A | +| `document_id` | *str* | :heavy_check_mark: | N/A | +| `document_update_in` | [models.DocumentUpdateIn](../models/documentupdatein.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/librariesdocumentsuploadv1documentupload.md b/docs/models/librariesdocumentsuploadv1documentupload.md new file mode 100644 index 00000000..a0ba95da --- /dev/null +++ b/docs/models/librariesdocumentsuploadv1documentupload.md @@ -0,0 +1,8 @@ +# LibrariesDocumentsUploadV1DocumentUpload + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `file` | [models.File](../models/file.md) | :heavy_check_mark: | The File object (not file name) to be uploaded.
To upload a file and specify a custom file name you should format your request as such:
```bash
file=@path/to/your/file.jsonl;filename=custom_name.jsonl
```
Otherwise, you can just keep the original file name:
```bash
file=@path/to/your/file.jsonl
``` | \ No newline at end of file diff --git a/docs/models/librariesdocumentsuploadv1request.md b/docs/models/librariesdocumentsuploadv1request.md new file mode 100644 index 00000000..7c91ca9b --- /dev/null +++ b/docs/models/librariesdocumentsuploadv1request.md @@ -0,0 +1,9 @@ +# LibrariesDocumentsUploadV1Request + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------- | +| `library_id` | *str* | :heavy_check_mark: | N/A | +| `request_body` | [models.LibrariesDocumentsUploadV1DocumentUpload](../models/librariesdocumentsuploadv1documentupload.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/librariesgetv1request.md b/docs/models/librariesgetv1request.md new file mode 100644 index 00000000..6e1e04c3 --- /dev/null +++ b/docs/models/librariesgetv1request.md @@ -0,0 +1,8 @@ +# LibrariesGetV1Request + + +## Fields + +| Field | Type | Required | Description | +| ------------------ | ------------------ | ------------------ | ------------------ | +| `library_id` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/librariessharecreatev1request.md b/docs/models/librariessharecreatev1request.md new file mode 100644 index 00000000..4c05241d --- /dev/null +++ b/docs/models/librariessharecreatev1request.md @@ -0,0 +1,9 @@ +# LibrariesShareCreateV1Request + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------------ | ------------------------------------------ | ------------------------------------------ | ------------------------------------------ | +| `library_id` | *str* | :heavy_check_mark: | N/A | +| `sharing_in` | [models.SharingIn](../models/sharingin.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/librariessharedeletev1request.md b/docs/models/librariessharedeletev1request.md new file mode 100644 index 00000000..850e22ab --- /dev/null +++ b/docs/models/librariessharedeletev1request.md @@ -0,0 +1,9 @@ +# LibrariesShareDeleteV1Request + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------------- | -------------------------------------------------- | -------------------------------------------------- | -------------------------------------------------- | +| `library_id` | *str* | :heavy_check_mark: | N/A | +| `sharing_delete` | [models.SharingDelete](../models/sharingdelete.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/librariessharelistv1request.md b/docs/models/librariessharelistv1request.md new file mode 100644 index 00000000..98bf6d17 --- /dev/null +++ b/docs/models/librariessharelistv1request.md @@ -0,0 +1,8 @@ +# LibrariesShareListV1Request + + +## Fields + +| Field | Type | Required | Description | +| ------------------ | ------------------ | ------------------ | ------------------ | +| `library_id` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/librariesupdatev1request.md b/docs/models/librariesupdatev1request.md new file mode 100644 index 00000000..a68ef7a8 --- /dev/null +++ b/docs/models/librariesupdatev1request.md @@ -0,0 +1,9 @@ +# LibrariesUpdateV1Request + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------------------------ | ------------------------------------------------------ | ------------------------------------------------------ | ------------------------------------------------------ | +| `library_id` | *str* | :heavy_check_mark: | N/A | +| `library_in_update` | [models.LibraryInUpdate](../models/libraryinupdate.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/libraryin.md b/docs/models/libraryin.md new file mode 100644 index 00000000..d6b11914 --- /dev/null +++ b/docs/models/libraryin.md @@ -0,0 +1,10 @@ +# LibraryIn + + +## Fields + +| Field | Type | Required | Description | +| ----------------------- | ----------------------- | ----------------------- | ----------------------- | +| `name` | *str* | :heavy_check_mark: | N/A | +| `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `chunk_size` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/libraryinupdate.md b/docs/models/libraryinupdate.md new file mode 100644 index 00000000..4aa169c7 --- /dev/null +++ b/docs/models/libraryinupdate.md @@ -0,0 +1,9 @@ +# LibraryInUpdate + + +## Fields + +| Field | Type | Required | Description | +| ----------------------- | ----------------------- | ----------------------- | ----------------------- | +| `name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/libraryout.md b/docs/models/libraryout.md new file mode 100644 index 00000000..cf4de41b --- /dev/null +++ b/docs/models/libraryout.md @@ -0,0 +1,23 @@ +# LibraryOut + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | +| `id` | *str* | :heavy_check_mark: | N/A | +| `name` | *str* | :heavy_check_mark: | N/A | +| `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_check_mark: | N/A | +| `updated_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_check_mark: | N/A | +| `owner_id` | *str* | :heavy_check_mark: | N/A | +| `owner_type` | *str* | :heavy_check_mark: | N/A | +| `total_size` | *int* | :heavy_check_mark: | N/A | +| `nb_documents` | *int* | :heavy_check_mark: | N/A | +| `chunk_size` | *Nullable[int]* | :heavy_check_mark: | N/A | +| `emoji` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `generated_name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `generated_description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `explicit_user_members_count` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | +| `explicit_workspace_members_count` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | +| `org_sharing_role` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/listdocumentout.md b/docs/models/listdocumentout.md new file mode 100644 index 00000000..f14157b8 --- /dev/null +++ b/docs/models/listdocumentout.md @@ -0,0 +1,9 @@ +# ListDocumentOut + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------- | ---------------------------------------------------- | ---------------------------------------------------- | ---------------------------------------------------- | +| `pagination` | [models.PaginationInfo](../models/paginationinfo.md) | :heavy_check_mark: | N/A | +| `data` | List[[models.DocumentOut](../models/documentout.md)] | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/listlibraryout.md b/docs/models/listlibraryout.md new file mode 100644 index 00000000..db76ffa1 --- /dev/null +++ b/docs/models/listlibraryout.md @@ -0,0 +1,8 @@ +# ListLibraryOut + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------------- | -------------------------------------------------- | -------------------------------------------------- | -------------------------------------------------- | +| `data` | List[[models.LibraryOut](../models/libraryout.md)] | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/listsharingout.md b/docs/models/listsharingout.md new file mode 100644 index 00000000..bcac4834 --- /dev/null +++ b/docs/models/listsharingout.md @@ -0,0 +1,8 @@ +# ListSharingOut + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------------- | -------------------------------------------------- | -------------------------------------------------- | -------------------------------------------------- | +| `data` | List[[models.SharingOut](../models/sharingout.md)] | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/messageinputentry.md b/docs/models/messageinputentry.md index d0168f6e..d55eb876 100644 --- a/docs/models/messageinputentry.md +++ b/docs/models/messageinputentry.md @@ -5,12 +5,13 @@ Representation of an input message inside the conversation. ## Fields -| Field | Type | Required | Description | -| ------------------------------------------------------------------------ | ------------------------------------------------------------------------ | ------------------------------------------------------------------------ | ------------------------------------------------------------------------ | -| `object` | [Optional[models.Object]](../models/object.md) | :heavy_minus_sign: | N/A | -| `type` | [Optional[models.Type]](../models/type.md) | :heavy_minus_sign: | N/A | -| `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | -| `completed_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | -| `id` | *Optional[str]* | :heavy_minus_sign: | N/A | -| `role` | [models.MessageInputEntryRole](../models/messageinputentryrole.md) | :heavy_check_mark: | N/A | -| `content` | [models.MessageInputEntryContent](../models/messageinputentrycontent.md) | :heavy_check_mark: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | +| `object` | [Optional[models.Object]](../models/object.md) | :heavy_minus_sign: | N/A | +| `type` | [Optional[models.MessageInputEntryType]](../models/messageinputentrytype.md) | :heavy_minus_sign: | N/A | +| `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | +| `completed_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | +| `id` | *Optional[str]* | :heavy_minus_sign: | N/A | +| `role` | [models.MessageInputEntryRole](../models/messageinputentryrole.md) | :heavy_check_mark: | N/A | +| `content` | [models.MessageInputEntryContent](../models/messageinputentrycontent.md) | :heavy_check_mark: | N/A | +| `prefix` | *Optional[bool]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/messageinputentrytype.md b/docs/models/messageinputentrytype.md new file mode 100644 index 00000000..d3378124 --- /dev/null +++ b/docs/models/messageinputentrytype.md @@ -0,0 +1,8 @@ +# MessageInputEntryType + + +## Values + +| Name | Value | +| --------------- | --------------- | +| `MESSAGE_INPUT` | message.input | \ No newline at end of file diff --git a/docs/models/paginationinfo.md b/docs/models/paginationinfo.md new file mode 100644 index 00000000..ad1fbb86 --- /dev/null +++ b/docs/models/paginationinfo.md @@ -0,0 +1,12 @@ +# PaginationInfo + + +## Fields + +| Field | Type | Required | Description | +| ------------------ | ------------------ | ------------------ | ------------------ | +| `total_items` | *int* | :heavy_check_mark: | N/A | +| `total_pages` | *int* | :heavy_check_mark: | N/A | +| `current_page` | *int* | :heavy_check_mark: | N/A | +| `page_size` | *int* | :heavy_check_mark: | N/A | +| `has_more` | *bool* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/processingstatusout.md b/docs/models/processingstatusout.md new file mode 100644 index 00000000..7b67583f --- /dev/null +++ b/docs/models/processingstatusout.md @@ -0,0 +1,9 @@ +# ProcessingStatusOut + + +## Fields + +| Field | Type | Required | Description | +| ------------------- | ------------------- | ------------------- | ------------------- | +| `document_id` | *str* | :heavy_check_mark: | N/A | +| `processing_status` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/shareenum.md b/docs/models/shareenum.md new file mode 100644 index 00000000..dc5d2b68 --- /dev/null +++ b/docs/models/shareenum.md @@ -0,0 +1,9 @@ +# ShareEnum + + +## Values + +| Name | Value | +| -------- | -------- | +| `VIEWER` | Viewer | +| `EDITOR` | Editor | \ No newline at end of file diff --git a/docs/models/sharingdelete.md b/docs/models/sharingdelete.md new file mode 100644 index 00000000..71cacab6 --- /dev/null +++ b/docs/models/sharingdelete.md @@ -0,0 +1,10 @@ +# SharingDelete + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | +| `org_id` | *str* | :heavy_check_mark: | N/A | +| `share_with_uuid` | *str* | :heavy_check_mark: | The id of the entity (user, workspace or organization) to share with | +| `share_with_type` | [models.EntityType](../models/entitytype.md) | :heavy_check_mark: | The type of entity, used to share a library. | \ No newline at end of file diff --git a/docs/models/sharingin.md b/docs/models/sharingin.md new file mode 100644 index 00000000..537ede03 --- /dev/null +++ b/docs/models/sharingin.md @@ -0,0 +1,11 @@ +# SharingIn + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | +| `org_id` | *str* | :heavy_check_mark: | N/A | +| `level` | [models.ShareEnum](../models/shareenum.md) | :heavy_check_mark: | N/A | +| `share_with_uuid` | *str* | :heavy_check_mark: | The id of the entity (user, workspace or organization) to share with | +| `share_with_type` | [models.EntityType](../models/entitytype.md) | :heavy_check_mark: | The type of entity, used to share a library. | \ No newline at end of file diff --git a/docs/models/sharingout.md b/docs/models/sharingout.md new file mode 100644 index 00000000..5844fe64 --- /dev/null +++ b/docs/models/sharingout.md @@ -0,0 +1,13 @@ +# SharingOut + + +## Fields + +| Field | Type | Required | Description | +| ----------------------- | ----------------------- | ----------------------- | ----------------------- | +| `library_id` | *str* | :heavy_check_mark: | N/A | +| `user_id` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `org_id` | *str* | :heavy_check_mark: | N/A | +| `role` | *str* | :heavy_check_mark: | N/A | +| `share_with_type` | *str* | :heavy_check_mark: | N/A | +| `share_with_uuid` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/ssetypes.md b/docs/models/ssetypes.md index 08d0f662..ae06b5e8 100644 --- a/docs/models/ssetypes.md +++ b/docs/models/ssetypes.md @@ -12,6 +12,7 @@ Server side events sent when streaming a conversation response. | `CONVERSATION_RESPONSE_ERROR` | conversation.response.error | | `MESSAGE_OUTPUT_DELTA` | message.output.delta | | `TOOL_EXECUTION_STARTED` | tool.execution.started | +| `TOOL_EXECUTION_DELTA` | tool.execution.delta | | `TOOL_EXECUTION_DONE` | tool.execution.done | | `AGENT_HANDOFF_STARTED` | agent.handoff.started | | `AGENT_HANDOFF_DONE` | agent.handoff.done | diff --git a/docs/models/toolexecutiondeltaevent.md b/docs/models/toolexecutiondeltaevent.md new file mode 100644 index 00000000..bfc9dc0e --- /dev/null +++ b/docs/models/toolexecutiondeltaevent.md @@ -0,0 +1,13 @@ +# ToolExecutionDeltaEvent + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | +| `type` | [Optional[models.ToolExecutionDeltaEventType]](../models/toolexecutiondeltaeventtype.md) | :heavy_minus_sign: | N/A | +| `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | +| `output_index` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `id` | *str* | :heavy_check_mark: | N/A | +| `name` | [models.BuiltInConnectors](../models/builtinconnectors.md) | :heavy_check_mark: | N/A | +| `arguments` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/toolexecutiondeltaeventtype.md b/docs/models/toolexecutiondeltaeventtype.md new file mode 100644 index 00000000..a4a2f8cc --- /dev/null +++ b/docs/models/toolexecutiondeltaeventtype.md @@ -0,0 +1,8 @@ +# ToolExecutionDeltaEventType + + +## Values + +| Name | Value | +| ---------------------- | ---------------------- | +| `TOOL_EXECUTION_DELTA` | tool.execution.delta | \ No newline at end of file diff --git a/docs/models/toolexecutionentry.md b/docs/models/toolexecutionentry.md index 2e58b517..174abdd1 100644 --- a/docs/models/toolexecutionentry.md +++ b/docs/models/toolexecutionentry.md @@ -11,4 +11,5 @@ | `completed_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | | `id` | *Optional[str]* | :heavy_minus_sign: | N/A | | `name` | [models.BuiltInConnectors](../models/builtinconnectors.md) | :heavy_check_mark: | N/A | +| `arguments` | *str* | :heavy_check_mark: | N/A | | `info` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/toolexecutionstartedevent.md b/docs/models/toolexecutionstartedevent.md index 8fc20194..82ea65e5 100644 --- a/docs/models/toolexecutionstartedevent.md +++ b/docs/models/toolexecutionstartedevent.md @@ -9,4 +9,5 @@ | `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | | `output_index` | *Optional[int]* | :heavy_minus_sign: | N/A | | `id` | *str* | :heavy_check_mark: | N/A | -| `name` | [models.BuiltInConnectors](../models/builtinconnectors.md) | :heavy_check_mark: | N/A | \ No newline at end of file +| `name` | [models.BuiltInConnectors](../models/builtinconnectors.md) | :heavy_check_mark: | N/A | +| `arguments` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/toolreferencechunk.md b/docs/models/toolreferencechunk.md index d8bea0da..af447aee 100644 --- a/docs/models/toolreferencechunk.md +++ b/docs/models/toolreferencechunk.md @@ -9,4 +9,5 @@ | `tool` | [models.BuiltInConnectors](../models/builtinconnectors.md) | :heavy_check_mark: | N/A | | `title` | *str* | :heavy_check_mark: | N/A | | `url` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `source` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file +| `favicon` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/type.md b/docs/models/type.md index 357acf0b..239a00f5 100644 --- a/docs/models/type.md +++ b/docs/models/type.md @@ -3,6 +3,6 @@ ## Values -| Name | Value | -| --------------- | --------------- | -| `MESSAGE_INPUT` | message.input | \ No newline at end of file +| Name | Value | +| ------ | ------ | +| `BASE` | base | \ No newline at end of file diff --git a/docs/sdks/accesses/README.md b/docs/sdks/accesses/README.md new file mode 100644 index 00000000..20484120 --- /dev/null +++ b/docs/sdks/accesses/README.md @@ -0,0 +1,137 @@ +# Accesses +(*beta.libraries.accesses*) + +## Overview + +### Available Operations + +* [list](#list) - List all of the access to this library. +* [update_or_create](#update_or_create) - Create or update an access level. +* [delete](#delete) - Delete an access level. + +## list + +Given a library, list all of the Entity that have access and to what level. + +### Example Usage + +```python +from mistralai import Mistral +import os + + +with Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) as mistral: + + res = mistral.beta.libraries.accesses.list(library_id="d2169833-d8e2-416e-a372-76518d3d99c2") + + # Handle response + print(res) + +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | +| `library_id` | *str* | :heavy_check_mark: | N/A | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | + +### Response + +**[models.ListSharingOut](../../models/listsharingout.md)** + +### Errors + +| Error Type | Status Code | Content Type | +| -------------------------- | -------------------------- | -------------------------- | +| models.HTTPValidationError | 422 | application/json | +| models.SDKError | 4XX, 5XX | \*/\* | + +## update_or_create + +Given a library id, you can create or update the access level of an entity. You have to be owner of the library to share a library. An owner cannot change their own role. A library cannot be shared outside of the organization. + +### Example Usage + +```python +from mistralai import Mistral +import os + + +with Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) as mistral: + + res = mistral.beta.libraries.accesses.update_or_create(library_id="36de3a24-5b1c-4c8f-9d84-d5642205a976", org_id="aadd9ae1-f285-4437-884a-091c77efa6fd", level="Viewer", share_with_uuid="0ae92ecb-21ed-47c5-9f7e-0b2cbe325a20", share_with_type="User") + + # Handle response + print(res) + +``` + +### Parameters + +| Parameter | Type | Required | Description | +| -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | +| `library_id` | *str* | :heavy_check_mark: | N/A | +| `org_id` | *str* | :heavy_check_mark: | N/A | +| `level` | [models.ShareEnum](../../models/shareenum.md) | :heavy_check_mark: | N/A | +| `share_with_uuid` | *str* | :heavy_check_mark: | The id of the entity (user, workspace or organization) to share with | +| `share_with_type` | [models.EntityType](../../models/entitytype.md) | :heavy_check_mark: | The type of entity, used to share a library. | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | + +### Response + +**[models.SharingOut](../../models/sharingout.md)** + +### Errors + +| Error Type | Status Code | Content Type | +| -------------------------- | -------------------------- | -------------------------- | +| models.HTTPValidationError | 422 | application/json | +| models.SDKError | 4XX, 5XX | \*/\* | + +## delete + +Given a library id, you can delete the access level of an entity. An owner cannot delete it's own access. You have to be the owner of the library to delete an acces other than yours. + +### Example Usage + +```python +from mistralai import Mistral +import os + + +with Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) as mistral: + + res = mistral.beta.libraries.accesses.delete(library_id="709e3cad-9fb2-4f4e-bf88-143cf1808107", org_id="0814a235-c2d0-4814-875a-4b85f93d3dc7", share_with_uuid="b843cc47-ce8f-4354-8cfc-5fcd7fb2865b", share_with_type="User") + + # Handle response + print(res) + +``` + +### Parameters + +| Parameter | Type | Required | Description | +| -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | +| `library_id` | *str* | :heavy_check_mark: | N/A | +| `org_id` | *str* | :heavy_check_mark: | N/A | +| `share_with_uuid` | *str* | :heavy_check_mark: | The id of the entity (user, workspace or organization) to share with | +| `share_with_type` | [models.EntityType](../../models/entitytype.md) | :heavy_check_mark: | The type of entity, used to share a library. | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | + +### Response + +**[models.SharingOut](../../models/sharingout.md)** + +### Errors + +| Error Type | Status Code | Content Type | +| -------------------------- | -------------------------- | -------------------------- | +| models.HTTPValidationError | 422 | application/json | +| models.SDKError | 4XX, 5XX | \*/\* | \ No newline at end of file diff --git a/docs/sdks/conversations/README.md b/docs/sdks/conversations/README.md index 38d5915b..2eceb451 100644 --- a/docs/sdks/conversations/README.md +++ b/docs/sdks/conversations/README.md @@ -447,6 +447,7 @@ with Mistral( "type": "message.input", "role": "assistant", "content": "", + "prefix": False, }, ], from_entry_id="", stream=True, store=True, handoff_execution="server") diff --git a/docs/sdks/documents/README.md b/docs/sdks/documents/README.md new file mode 100644 index 00000000..05ae6f74 --- /dev/null +++ b/docs/sdks/documents/README.md @@ -0,0 +1,425 @@ +# Documents +(*beta.libraries.documents*) + +## Overview + +### Available Operations + +* [list](#list) - List document in a given library. +* [upload](#upload) - Upload a new document. +* [get](#get) - Retrieve the metadata of a specific document. +* [update](#update) - Update the metadata of a specific document. +* [delete](#delete) - Delete a document. +* [text_content](#text_content) - Retrieve the text content of a specific document. +* [status](#status) - Retrieve the processing status of a specific document. +* [get_signed_url](#get_signed_url) - Retrieve the signed URL of a specific document. +* [extracted_text_signed_url](#extracted_text_signed_url) - Retrieve the signed URL of text extracted from a given document. +* [reprocess](#reprocess) - Reprocess a document. + +## list + +Given a library, lists the document that have been uploaded to that library. + +### Example Usage + +```python +from mistralai import Mistral +import os + + +with Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) as mistral: + + res = mistral.beta.libraries.documents.list(library_id="5c3ca4cd-62bc-4c71-ad8a-1531ae80d078", page_size=100, page=0, sort_by="created_at", sort_order="desc") + + # Handle response + print(res) + +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | +| `library_id` | *str* | :heavy_check_mark: | N/A | +| `search` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `page_size` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `page` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `sort_by` | *Optional[str]* | :heavy_minus_sign: | N/A | +| `sort_order` | *Optional[str]* | :heavy_minus_sign: | N/A | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | + +### Response + +**[models.ListDocumentOut](../../models/listdocumentout.md)** + +### Errors + +| Error Type | Status Code | Content Type | +| -------------------------- | -------------------------- | -------------------------- | +| models.HTTPValidationError | 422 | application/json | +| models.SDKError | 4XX, 5XX | \*/\* | + +## upload + +Given a library, upload a new document to that library. It is queued for processing, it status will change it has been processed. The processing has to be completed in order be discoverable for the library search + +### Example Usage + +```python +from mistralai import Mistral +import os + + +with Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) as mistral: + + res = mistral.beta.libraries.documents.upload(library_id="a02150d9-5ee0-4877-b62c-28b1fcdf3b76", file={ + "file_name": "example.file", + "content": open("example.file", "rb"), + }) + + # Handle response + print(res) + +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `library_id` | *str* | :heavy_check_mark: | N/A | +| `file` | [models.File](../../models/file.md) | :heavy_check_mark: | The File object (not file name) to be uploaded.
To upload a file and specify a custom file name you should format your request as such:
```bash
file=@path/to/your/file.jsonl;filename=custom_name.jsonl
```
Otherwise, you can just keep the original file name:
```bash
file=@path/to/your/file.jsonl
``` | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | + +### Response + +**[models.DocumentOut](../../models/documentout.md)** + +### Errors + +| Error Type | Status Code | Content Type | +| -------------------------- | -------------------------- | -------------------------- | +| models.HTTPValidationError | 422 | application/json | +| models.SDKError | 4XX, 5XX | \*/\* | + +## get + +Given a library and a document in this library, you can retrieve the metadata of that document. + +### Example Usage + +```python +from mistralai import Mistral +import os + + +with Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) as mistral: + + res = mistral.beta.libraries.documents.get(library_id="03d908c8-90a1-44fd-bf3a-8490fb7c9a03", document_id="90973aec-0508-4375-8b00-91d732414745") + + # Handle response + print(res) + +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | +| `library_id` | *str* | :heavy_check_mark: | N/A | +| `document_id` | *str* | :heavy_check_mark: | N/A | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | + +### Response + +**[models.DocumentOut](../../models/documentout.md)** + +### Errors + +| Error Type | Status Code | Content Type | +| -------------------------- | -------------------------- | -------------------------- | +| models.HTTPValidationError | 422 | application/json | +| models.SDKError | 4XX, 5XX | \*/\* | + +## update + +Given a library and a document in that library, update the name of that document. + +### Example Usage + +```python +from mistralai import Mistral +import os + + +with Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) as mistral: + + res = mistral.beta.libraries.documents.update(library_id="3ddd8d93-dca5-4a6d-980d-173226c35742", document_id="2a25e44c-b160-40ca-b5c2-b65fb2fcae34") + + # Handle response + print(res) + +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | +| `library_id` | *str* | :heavy_check_mark: | N/A | +| `document_id` | *str* | :heavy_check_mark: | N/A | +| `name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | + +### Response + +**[models.DocumentOut](../../models/documentout.md)** + +### Errors + +| Error Type | Status Code | Content Type | +| -------------------------- | -------------------------- | -------------------------- | +| models.HTTPValidationError | 422 | application/json | +| models.SDKError | 4XX, 5XX | \*/\* | + +## delete + +Given a library and a document in that library, delete that document. The document will be deleted from the library and the search index. + +### Example Usage + +```python +from mistralai import Mistral +import os + + +with Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) as mistral: + + mistral.beta.libraries.documents.delete(library_id="005daae9-d42e-407d-82d7-2261c6a1496c", document_id="edc236b0-baff-49a9-884b-4ca36a258da4") + + # Use the SDK ... + +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | +| `library_id` | *str* | :heavy_check_mark: | N/A | +| `document_id` | *str* | :heavy_check_mark: | N/A | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | + +### Errors + +| Error Type | Status Code | Content Type | +| -------------------------- | -------------------------- | -------------------------- | +| models.HTTPValidationError | 422 | application/json | +| models.SDKError | 4XX, 5XX | \*/\* | + +## text_content + +Given a library and a document in that library, you can retrieve the text content of that document if it exists. For documents like pdf, docx and pptx the text content results from our processing using Mistral OCR. + +### Example Usage + +```python +from mistralai import Mistral +import os + + +with Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) as mistral: + + res = mistral.beta.libraries.documents.text_content(library_id="1d177215-3b6b-45ba-9fa9-baf773223bec", document_id="60214c91-2aba-4692-a4e6-a53365de8caf") + + # Handle response + print(res) + +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | +| `library_id` | *str* | :heavy_check_mark: | N/A | +| `document_id` | *str* | :heavy_check_mark: | N/A | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | + +### Response + +**[models.DocumentTextContent](../../models/documenttextcontent.md)** + +### Errors + +| Error Type | Status Code | Content Type | +| -------------------------- | -------------------------- | -------------------------- | +| models.HTTPValidationError | 422 | application/json | +| models.SDKError | 4XX, 5XX | \*/\* | + +## status + +Given a library and a document in that library, retrieve the processing status of that document. + +### Example Usage + +```python +from mistralai import Mistral +import os + + +with Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) as mistral: + + res = mistral.beta.libraries.documents.status(library_id="e6906f70-368f-4155-80da-c1718f01bc43", document_id="2c904915-d831-4e9d-a345-8ce405bcef66") + + # Handle response + print(res) + +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | +| `library_id` | *str* | :heavy_check_mark: | N/A | +| `document_id` | *str* | :heavy_check_mark: | N/A | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | + +### Response + +**[models.ProcessingStatusOut](../../models/processingstatusout.md)** + +### Errors + +| Error Type | Status Code | Content Type | +| -------------------------- | -------------------------- | -------------------------- | +| models.HTTPValidationError | 422 | application/json | +| models.SDKError | 4XX, 5XX | \*/\* | + +## get_signed_url + +Given a library and a document in that library, retrieve the signed URL of a specific document.The url will expire after 30 minutes and can be accessed by anyone with the link. + +### Example Usage + +```python +from mistralai import Mistral +import os + + +with Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) as mistral: + + res = mistral.beta.libraries.documents.get_signed_url(library_id="23cf6904-a602-4ee8-9f5b-8efc557c336d", document_id="48598486-df71-4994-acbb-1133c72efa8c") + + # Handle response + print(res) + +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | +| `library_id` | *str* | :heavy_check_mark: | N/A | +| `document_id` | *str* | :heavy_check_mark: | N/A | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | + +### Response + +**[str](../../models/.md)** + +### Errors + +| Error Type | Status Code | Content Type | +| -------------------------- | -------------------------- | -------------------------- | +| models.HTTPValidationError | 422 | application/json | +| models.SDKError | 4XX, 5XX | \*/\* | + +## extracted_text_signed_url + +Given a library and a document in that library, retrieve the signed URL of text extracted. For documents that are sent to the OCR this returns the result of the OCR queries. + +### Example Usage + +```python +from mistralai import Mistral +import os + + +with Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) as mistral: + + res = mistral.beta.libraries.documents.extracted_text_signed_url(library_id="a6f15de3-1e82-4f95-af82-851499042ef8", document_id="9749d4f9-24e5-4ca2-99a3-a406863f805d") + + # Handle response + print(res) + +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | +| `library_id` | *str* | :heavy_check_mark: | N/A | +| `document_id` | *str* | :heavy_check_mark: | N/A | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | + +### Response + +**[str](../../models/.md)** + +### Errors + +| Error Type | Status Code | Content Type | +| -------------------------- | -------------------------- | -------------------------- | +| models.HTTPValidationError | 422 | application/json | +| models.SDKError | 4XX, 5XX | \*/\* | + +## reprocess + +Given a library and a document in that library, reprocess that document, it will be billed again. + +### Example Usage + +```python +from mistralai import Mistral +import os + + +with Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) as mistral: + + mistral.beta.libraries.documents.reprocess(library_id="51b29371-de8f-4ba4-932b-a0bafb3a7f64", document_id="3052422c-49ca-45ac-a918-cadb35d61fd8") + + # Use the SDK ... + +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | +| `library_id` | *str* | :heavy_check_mark: | N/A | +| `document_id` | *str* | :heavy_check_mark: | N/A | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | + +### Errors + +| Error Type | Status Code | Content Type | +| -------------------------- | -------------------------- | -------------------------- | +| models.HTTPValidationError | 422 | application/json | +| models.SDKError | 4XX, 5XX | \*/\* | \ No newline at end of file diff --git a/docs/sdks/libraries/README.md b/docs/sdks/libraries/README.md new file mode 100644 index 00000000..9c709d0b --- /dev/null +++ b/docs/sdks/libraries/README.md @@ -0,0 +1,216 @@ +# Libraries +(*beta.libraries*) + +## Overview + +(beta) Libraries API for indexing documents to enhance agent capabilities. + +### Available Operations + +* [list](#list) - List all libraries you have access to. +* [create](#create) - Create a new Library. +* [get](#get) - Detailed information about a specific Library. +* [delete](#delete) - Delete a library and all of it's document. +* [update](#update) - Update a library. + +## list + +List all libraries that you have created or have been shared with you. + +### Example Usage + +```python +from mistralai import Mistral +import os + + +with Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) as mistral: + + res = mistral.beta.libraries.list() + + # Handle response + print(res) + +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | + +### Response + +**[models.ListLibraryOut](../../models/listlibraryout.md)** + +### Errors + +| Error Type | Status Code | Content Type | +| --------------- | --------------- | --------------- | +| models.SDKError | 4XX, 5XX | \*/\* | + +## create + +Create a new Library, you will be marked as the owner and only you will have the possibility to share it with others. When first created this will only be accessible by you. + +### Example Usage + +```python +from mistralai import Mistral +import os + + +with Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) as mistral: + + res = mistral.beta.libraries.create(name="") + + # Handle response + print(res) + +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | +| `name` | *str* | :heavy_check_mark: | N/A | +| `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `chunk_size` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | + +### Response + +**[models.LibraryOut](../../models/libraryout.md)** + +### Errors + +| Error Type | Status Code | Content Type | +| -------------------------- | -------------------------- | -------------------------- | +| models.HTTPValidationError | 422 | application/json | +| models.SDKError | 4XX, 5XX | \*/\* | + +## get + +Given a library id, details information about that Library. + +### Example Usage + +```python +from mistralai import Mistral +import os + + +with Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) as mistral: + + res = mistral.beta.libraries.get(library_id="d0d23a1e-bfe5-45e7-b7bb-22a4ea78d47f") + + # Handle response + print(res) + +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | +| `library_id` | *str* | :heavy_check_mark: | N/A | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | + +### Response + +**[models.LibraryOut](../../models/libraryout.md)** + +### Errors + +| Error Type | Status Code | Content Type | +| -------------------------- | -------------------------- | -------------------------- | +| models.HTTPValidationError | 422 | application/json | +| models.SDKError | 4XX, 5XX | \*/\* | + +## delete + +Given a library id, deletes it together with all documents that have been uploaded to that library. + +### Example Usage + +```python +from mistralai import Mistral +import os + + +with Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) as mistral: + + res = mistral.beta.libraries.delete(library_id="6cad0b6e-fd2e-4d11-a48b-21d30fb7c17a") + + # Handle response + print(res) + +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | +| `library_id` | *str* | :heavy_check_mark: | N/A | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | + +### Response + +**[models.LibraryOut](../../models/libraryout.md)** + +### Errors + +| Error Type | Status Code | Content Type | +| -------------------------- | -------------------------- | -------------------------- | +| models.HTTPValidationError | 422 | application/json | +| models.SDKError | 4XX, 5XX | \*/\* | + +## update + +Given a library id, you can update the name and description. + +### Example Usage + +```python +from mistralai import Mistral +import os + + +with Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) as mistral: + + res = mistral.beta.libraries.update(library_id="e01880c3-d0b5-4a29-8b1b-abdb8ce917e4") + + # Handle response + print(res) + +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | +| `library_id` | *str* | :heavy_check_mark: | N/A | +| `name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | + +### Response + +**[models.LibraryOut](../../models/libraryout.md)** + +### Errors + +| Error Type | Status Code | Content Type | +| -------------------------- | -------------------------- | -------------------------- | +| models.HTTPValidationError | 422 | application/json | +| models.SDKError | 4XX, 5XX | \*/\* | \ No newline at end of file diff --git a/pyproject.toml b/pyproject.toml index 5cf64972..846b6ff2 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "mistralai" -version = "1.9.1" +version = "1.9.2" description = "Python Client SDK for the Mistral AI API." authors = [{ name = "Mistral" },] readme = "README-PYPI.md" diff --git a/src/mistralai/_version.py b/src/mistralai/_version.py index d22b4e90..8d003f9e 100644 --- a/src/mistralai/_version.py +++ b/src/mistralai/_version.py @@ -3,10 +3,10 @@ import importlib.metadata __title__: str = "mistralai" -__version__: str = "1.9.1" +__version__: str = "1.9.2" __openapi_doc_version__: str = "1.0.0" __gen_version__: str = "2.634.2" -__user_agent__: str = "speakeasy-sdk/python 1.9.1 2.634.2 1.0.0 mistralai" +__user_agent__: str = "speakeasy-sdk/python 1.9.2 2.634.2 1.0.0 mistralai" try: if __package__ is not None: diff --git a/src/mistralai/accesses.py b/src/mistralai/accesses.py new file mode 100644 index 00000000..67061b7e --- /dev/null +++ b/src/mistralai/accesses.py @@ -0,0 +1,672 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from .basesdk import BaseSDK +from mistralai import models, utils +from mistralai._hooks import HookContext +from mistralai.types import OptionalNullable, UNSET +from mistralai.utils import get_security_from_env +from typing import Any, Mapping, Optional + + +class Accesses(BaseSDK): + def list( + self, + *, + library_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.ListSharingOut: + r"""List all of the access to this library. + + Given a library, list all of the Entity that have access and to what level. + + :param library_id: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.LibrariesShareListV1Request( + library_id=library_id, + ) + + req = self._build_request( + method="GET", + path="/v1/libraries/{library_id}/share", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="libraries_share_list_v1", + oauth2_scopes=[], + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return utils.unmarshal_json(http_res.text, models.ListSharingOut) + if utils.match_response(http_res, "422", "application/json"): + response_data = utils.unmarshal_json( + http_res.text, models.HTTPValidationErrorData + ) + raise models.HTTPValidationError(data=response_data) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + + content_type = http_res.headers.get("Content-Type") + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res_text, + http_res, + ) + + async def list_async( + self, + *, + library_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.ListSharingOut: + r"""List all of the access to this library. + + Given a library, list all of the Entity that have access and to what level. + + :param library_id: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.LibrariesShareListV1Request( + library_id=library_id, + ) + + req = self._build_request_async( + method="GET", + path="/v1/libraries/{library_id}/share", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="libraries_share_list_v1", + oauth2_scopes=[], + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return utils.unmarshal_json(http_res.text, models.ListSharingOut) + if utils.match_response(http_res, "422", "application/json"): + response_data = utils.unmarshal_json( + http_res.text, models.HTTPValidationErrorData + ) + raise models.HTTPValidationError(data=response_data) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + + content_type = http_res.headers.get("Content-Type") + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res_text, + http_res, + ) + + def update_or_create( + self, + *, + library_id: str, + org_id: str, + level: models.ShareEnum, + share_with_uuid: str, + share_with_type: models.EntityType, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.SharingOut: + r"""Create or update an access level. + + Given a library id, you can create or update the access level of an entity. You have to be owner of the library to share a library. An owner cannot change their own role. A library cannot be shared outside of the organization. + + :param library_id: + :param org_id: + :param level: + :param share_with_uuid: The id of the entity (user, workspace or organization) to share with + :param share_with_type: The type of entity, used to share a library. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.LibrariesShareCreateV1Request( + library_id=library_id, + sharing_in=models.SharingIn( + org_id=org_id, + level=level, + share_with_uuid=share_with_uuid, + share_with_type=share_with_type, + ), + ) + + req = self._build_request( + method="PUT", + path="/v1/libraries/{library_id}/share", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request.sharing_in, False, False, "json", models.SharingIn + ), + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="libraries_share_create_v1", + oauth2_scopes=[], + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return utils.unmarshal_json(http_res.text, models.SharingOut) + if utils.match_response(http_res, "422", "application/json"): + response_data = utils.unmarshal_json( + http_res.text, models.HTTPValidationErrorData + ) + raise models.HTTPValidationError(data=response_data) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + + content_type = http_res.headers.get("Content-Type") + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res_text, + http_res, + ) + + async def update_or_create_async( + self, + *, + library_id: str, + org_id: str, + level: models.ShareEnum, + share_with_uuid: str, + share_with_type: models.EntityType, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.SharingOut: + r"""Create or update an access level. + + Given a library id, you can create or update the access level of an entity. You have to be owner of the library to share a library. An owner cannot change their own role. A library cannot be shared outside of the organization. + + :param library_id: + :param org_id: + :param level: + :param share_with_uuid: The id of the entity (user, workspace or organization) to share with + :param share_with_type: The type of entity, used to share a library. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.LibrariesShareCreateV1Request( + library_id=library_id, + sharing_in=models.SharingIn( + org_id=org_id, + level=level, + share_with_uuid=share_with_uuid, + share_with_type=share_with_type, + ), + ) + + req = self._build_request_async( + method="PUT", + path="/v1/libraries/{library_id}/share", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request.sharing_in, False, False, "json", models.SharingIn + ), + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="libraries_share_create_v1", + oauth2_scopes=[], + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return utils.unmarshal_json(http_res.text, models.SharingOut) + if utils.match_response(http_res, "422", "application/json"): + response_data = utils.unmarshal_json( + http_res.text, models.HTTPValidationErrorData + ) + raise models.HTTPValidationError(data=response_data) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + + content_type = http_res.headers.get("Content-Type") + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res_text, + http_res, + ) + + def delete( + self, + *, + library_id: str, + org_id: str, + share_with_uuid: str, + share_with_type: models.EntityType, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.SharingOut: + r"""Delete an access level. + + Given a library id, you can delete the access level of an entity. An owner cannot delete it's own access. You have to be the owner of the library to delete an acces other than yours. + + :param library_id: + :param org_id: + :param share_with_uuid: The id of the entity (user, workspace or organization) to share with + :param share_with_type: The type of entity, used to share a library. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.LibrariesShareDeleteV1Request( + library_id=library_id, + sharing_delete=models.SharingDelete( + org_id=org_id, + share_with_uuid=share_with_uuid, + share_with_type=share_with_type, + ), + ) + + req = self._build_request( + method="DELETE", + path="/v1/libraries/{library_id}/share", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request.sharing_delete, False, False, "json", models.SharingDelete + ), + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="libraries_share_delete_v1", + oauth2_scopes=[], + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return utils.unmarshal_json(http_res.text, models.SharingOut) + if utils.match_response(http_res, "422", "application/json"): + response_data = utils.unmarshal_json( + http_res.text, models.HTTPValidationErrorData + ) + raise models.HTTPValidationError(data=response_data) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + + content_type = http_res.headers.get("Content-Type") + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res_text, + http_res, + ) + + async def delete_async( + self, + *, + library_id: str, + org_id: str, + share_with_uuid: str, + share_with_type: models.EntityType, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.SharingOut: + r"""Delete an access level. + + Given a library id, you can delete the access level of an entity. An owner cannot delete it's own access. You have to be the owner of the library to delete an acces other than yours. + + :param library_id: + :param org_id: + :param share_with_uuid: The id of the entity (user, workspace or organization) to share with + :param share_with_type: The type of entity, used to share a library. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.LibrariesShareDeleteV1Request( + library_id=library_id, + sharing_delete=models.SharingDelete( + org_id=org_id, + share_with_uuid=share_with_uuid, + share_with_type=share_with_type, + ), + ) + + req = self._build_request_async( + method="DELETE", + path="/v1/libraries/{library_id}/share", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request.sharing_delete, False, False, "json", models.SharingDelete + ), + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="libraries_share_delete_v1", + oauth2_scopes=[], + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return utils.unmarshal_json(http_res.text, models.SharingOut) + if utils.match_response(http_res, "422", "application/json"): + response_data = utils.unmarshal_json( + http_res.text, models.HTTPValidationErrorData + ) + raise models.HTTPValidationError(data=response_data) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + + content_type = http_res.headers.get("Content-Type") + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res_text, + http_res, + ) diff --git a/src/mistralai/beta.py b/src/mistralai/beta.py index 04209d74..3408d943 100644 --- a/src/mistralai/beta.py +++ b/src/mistralai/beta.py @@ -3,6 +3,7 @@ from .basesdk import BaseSDK from .sdkconfiguration import SDKConfiguration from mistralai.conversations import Conversations +from mistralai.libraries import Libraries from mistralai.mistral_agents import MistralAgents @@ -11,6 +12,8 @@ class Beta(BaseSDK): r"""(beta) Conversations API""" agents: MistralAgents r"""(beta) Agents API""" + libraries: Libraries + r"""(beta) Libraries API for indexing documents to enhance agent capabilities.""" def __init__(self, sdk_config: SDKConfiguration) -> None: BaseSDK.__init__(self, sdk_config) @@ -20,3 +23,4 @@ def __init__(self, sdk_config: SDKConfiguration) -> None: def _init_sdks(self): self.conversations = Conversations(self.sdk_configuration) self.agents = MistralAgents(self.sdk_configuration) + self.libraries = Libraries(self.sdk_configuration) diff --git a/src/mistralai/documents.py b/src/mistralai/documents.py new file mode 100644 index 00000000..e43d3faf --- /dev/null +++ b/src/mistralai/documents.py @@ -0,0 +1,2136 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from .basesdk import BaseSDK +from mistralai import models, utils +from mistralai._hooks import HookContext +from mistralai.types import OptionalNullable, UNSET +from mistralai.utils import get_security_from_env +from typing import Any, Mapping, Optional, Union + + +class Documents(BaseSDK): + def list( + self, + *, + library_id: str, + search: OptionalNullable[str] = UNSET, + page_size: Optional[int] = 100, + page: Optional[int] = 0, + sort_by: Optional[str] = "created_at", + sort_order: Optional[str] = "desc", + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.ListDocumentOut: + r"""List document in a given library. + + Given a library, lists the document that have been uploaded to that library. + + :param library_id: + :param search: + :param page_size: + :param page: + :param sort_by: + :param sort_order: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.LibrariesDocumentsListV1Request( + library_id=library_id, + search=search, + page_size=page_size, + page=page, + sort_by=sort_by, + sort_order=sort_order, + ) + + req = self._build_request( + method="GET", + path="/v1/libraries/{library_id}/documents", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="libraries_documents_list_v1", + oauth2_scopes=[], + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return utils.unmarshal_json(http_res.text, models.ListDocumentOut) + if utils.match_response(http_res, "422", "application/json"): + response_data = utils.unmarshal_json( + http_res.text, models.HTTPValidationErrorData + ) + raise models.HTTPValidationError(data=response_data) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + + content_type = http_res.headers.get("Content-Type") + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res_text, + http_res, + ) + + async def list_async( + self, + *, + library_id: str, + search: OptionalNullable[str] = UNSET, + page_size: Optional[int] = 100, + page: Optional[int] = 0, + sort_by: Optional[str] = "created_at", + sort_order: Optional[str] = "desc", + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.ListDocumentOut: + r"""List document in a given library. + + Given a library, lists the document that have been uploaded to that library. + + :param library_id: + :param search: + :param page_size: + :param page: + :param sort_by: + :param sort_order: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.LibrariesDocumentsListV1Request( + library_id=library_id, + search=search, + page_size=page_size, + page=page, + sort_by=sort_by, + sort_order=sort_order, + ) + + req = self._build_request_async( + method="GET", + path="/v1/libraries/{library_id}/documents", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="libraries_documents_list_v1", + oauth2_scopes=[], + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return utils.unmarshal_json(http_res.text, models.ListDocumentOut) + if utils.match_response(http_res, "422", "application/json"): + response_data = utils.unmarshal_json( + http_res.text, models.HTTPValidationErrorData + ) + raise models.HTTPValidationError(data=response_data) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + + content_type = http_res.headers.get("Content-Type") + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res_text, + http_res, + ) + + def upload( + self, + *, + library_id: str, + file: Union[models.File, models.FileTypedDict], + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.DocumentOut: + r"""Upload a new document. + + Given a library, upload a new document to that library. It is queued for processing, it status will change it has been processed. The processing has to be completed in order be discoverable for the library search + + :param library_id: + :param file: The File object (not file name) to be uploaded. To upload a file and specify a custom file name you should format your request as such: ```bash file=@path/to/your/file.jsonl;filename=custom_name.jsonl ``` Otherwise, you can just keep the original file name: ```bash file=@path/to/your/file.jsonl ``` + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.LibrariesDocumentsUploadV1Request( + library_id=library_id, + request_body=models.LibrariesDocumentsUploadV1DocumentUpload( + file=utils.get_pydantic_model(file, models.File), + ), + ) + + req = self._build_request( + method="POST", + path="/v1/libraries/{library_id}/documents", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request.request_body, + False, + False, + "multipart", + models.LibrariesDocumentsUploadV1DocumentUpload, + ), + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="libraries_documents_upload_v1", + oauth2_scopes=[], + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, ["200", "201"], "application/json"): + return utils.unmarshal_json(http_res.text, models.DocumentOut) + if utils.match_response(http_res, "422", "application/json"): + response_data = utils.unmarshal_json( + http_res.text, models.HTTPValidationErrorData + ) + raise models.HTTPValidationError(data=response_data) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + + content_type = http_res.headers.get("Content-Type") + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res_text, + http_res, + ) + + async def upload_async( + self, + *, + library_id: str, + file: Union[models.File, models.FileTypedDict], + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.DocumentOut: + r"""Upload a new document. + + Given a library, upload a new document to that library. It is queued for processing, it status will change it has been processed. The processing has to be completed in order be discoverable for the library search + + :param library_id: + :param file: The File object (not file name) to be uploaded. To upload a file and specify a custom file name you should format your request as such: ```bash file=@path/to/your/file.jsonl;filename=custom_name.jsonl ``` Otherwise, you can just keep the original file name: ```bash file=@path/to/your/file.jsonl ``` + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.LibrariesDocumentsUploadV1Request( + library_id=library_id, + request_body=models.LibrariesDocumentsUploadV1DocumentUpload( + file=utils.get_pydantic_model(file, models.File), + ), + ) + + req = self._build_request_async( + method="POST", + path="/v1/libraries/{library_id}/documents", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request.request_body, + False, + False, + "multipart", + models.LibrariesDocumentsUploadV1DocumentUpload, + ), + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="libraries_documents_upload_v1", + oauth2_scopes=[], + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, ["200", "201"], "application/json"): + return utils.unmarshal_json(http_res.text, models.DocumentOut) + if utils.match_response(http_res, "422", "application/json"): + response_data = utils.unmarshal_json( + http_res.text, models.HTTPValidationErrorData + ) + raise models.HTTPValidationError(data=response_data) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + + content_type = http_res.headers.get("Content-Type") + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res_text, + http_res, + ) + + def get( + self, + *, + library_id: str, + document_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.DocumentOut: + r"""Retrieve the metadata of a specific document. + + Given a library and a document in this library, you can retrieve the metadata of that document. + + :param library_id: + :param document_id: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.LibrariesDocumentsGetV1Request( + library_id=library_id, + document_id=document_id, + ) + + req = self._build_request( + method="GET", + path="/v1/libraries/{library_id}/documents/{document_id}", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="libraries_documents_get_v1", + oauth2_scopes=[], + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return utils.unmarshal_json(http_res.text, models.DocumentOut) + if utils.match_response(http_res, "422", "application/json"): + response_data = utils.unmarshal_json( + http_res.text, models.HTTPValidationErrorData + ) + raise models.HTTPValidationError(data=response_data) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + + content_type = http_res.headers.get("Content-Type") + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res_text, + http_res, + ) + + async def get_async( + self, + *, + library_id: str, + document_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.DocumentOut: + r"""Retrieve the metadata of a specific document. + + Given a library and a document in this library, you can retrieve the metadata of that document. + + :param library_id: + :param document_id: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.LibrariesDocumentsGetV1Request( + library_id=library_id, + document_id=document_id, + ) + + req = self._build_request_async( + method="GET", + path="/v1/libraries/{library_id}/documents/{document_id}", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="libraries_documents_get_v1", + oauth2_scopes=[], + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return utils.unmarshal_json(http_res.text, models.DocumentOut) + if utils.match_response(http_res, "422", "application/json"): + response_data = utils.unmarshal_json( + http_res.text, models.HTTPValidationErrorData + ) + raise models.HTTPValidationError(data=response_data) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + + content_type = http_res.headers.get("Content-Type") + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res_text, + http_res, + ) + + def update( + self, + *, + library_id: str, + document_id: str, + name: OptionalNullable[str] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.DocumentOut: + r"""Update the metadata of a specific document. + + Given a library and a document in that library, update the name of that document. + + :param library_id: + :param document_id: + :param name: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.LibrariesDocumentsUpdateV1Request( + library_id=library_id, + document_id=document_id, + document_update_in=models.DocumentUpdateIn( + name=name, + ), + ) + + req = self._build_request( + method="PUT", + path="/v1/libraries/{library_id}/documents/{document_id}", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request.document_update_in, + False, + False, + "json", + models.DocumentUpdateIn, + ), + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="libraries_documents_update_v1", + oauth2_scopes=[], + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return utils.unmarshal_json(http_res.text, models.DocumentOut) + if utils.match_response(http_res, "422", "application/json"): + response_data = utils.unmarshal_json( + http_res.text, models.HTTPValidationErrorData + ) + raise models.HTTPValidationError(data=response_data) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + + content_type = http_res.headers.get("Content-Type") + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res_text, + http_res, + ) + + async def update_async( + self, + *, + library_id: str, + document_id: str, + name: OptionalNullable[str] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.DocumentOut: + r"""Update the metadata of a specific document. + + Given a library and a document in that library, update the name of that document. + + :param library_id: + :param document_id: + :param name: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.LibrariesDocumentsUpdateV1Request( + library_id=library_id, + document_id=document_id, + document_update_in=models.DocumentUpdateIn( + name=name, + ), + ) + + req = self._build_request_async( + method="PUT", + path="/v1/libraries/{library_id}/documents/{document_id}", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request.document_update_in, + False, + False, + "json", + models.DocumentUpdateIn, + ), + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="libraries_documents_update_v1", + oauth2_scopes=[], + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return utils.unmarshal_json(http_res.text, models.DocumentOut) + if utils.match_response(http_res, "422", "application/json"): + response_data = utils.unmarshal_json( + http_res.text, models.HTTPValidationErrorData + ) + raise models.HTTPValidationError(data=response_data) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + + content_type = http_res.headers.get("Content-Type") + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res_text, + http_res, + ) + + def delete( + self, + *, + library_id: str, + document_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ): + r"""Delete a document. + + Given a library and a document in that library, delete that document. The document will be deleted from the library and the search index. + + :param library_id: + :param document_id: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.LibrariesDocumentsDeleteV1Request( + library_id=library_id, + document_id=document_id, + ) + + req = self._build_request( + method="DELETE", + path="/v1/libraries/{library_id}/documents/{document_id}", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="libraries_documents_delete_v1", + oauth2_scopes=[], + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "204", "*"): + return + if utils.match_response(http_res, "422", "application/json"): + response_data = utils.unmarshal_json( + http_res.text, models.HTTPValidationErrorData + ) + raise models.HTTPValidationError(data=response_data) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + + content_type = http_res.headers.get("Content-Type") + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res_text, + http_res, + ) + + async def delete_async( + self, + *, + library_id: str, + document_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ): + r"""Delete a document. + + Given a library and a document in that library, delete that document. The document will be deleted from the library and the search index. + + :param library_id: + :param document_id: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.LibrariesDocumentsDeleteV1Request( + library_id=library_id, + document_id=document_id, + ) + + req = self._build_request_async( + method="DELETE", + path="/v1/libraries/{library_id}/documents/{document_id}", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="libraries_documents_delete_v1", + oauth2_scopes=[], + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "204", "*"): + return + if utils.match_response(http_res, "422", "application/json"): + response_data = utils.unmarshal_json( + http_res.text, models.HTTPValidationErrorData + ) + raise models.HTTPValidationError(data=response_data) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + + content_type = http_res.headers.get("Content-Type") + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res_text, + http_res, + ) + + def text_content( + self, + *, + library_id: str, + document_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.DocumentTextContent: + r"""Retrieve the text content of a specific document. + + Given a library and a document in that library, you can retrieve the text content of that document if it exists. For documents like pdf, docx and pptx the text content results from our processing using Mistral OCR. + + :param library_id: + :param document_id: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.LibrariesDocumentsGetTextContentV1Request( + library_id=library_id, + document_id=document_id, + ) + + req = self._build_request( + method="GET", + path="/v1/libraries/{library_id}/documents/{document_id}/text_content", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="libraries_documents_get_text_content_v1", + oauth2_scopes=[], + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return utils.unmarshal_json(http_res.text, models.DocumentTextContent) + if utils.match_response(http_res, "422", "application/json"): + response_data = utils.unmarshal_json( + http_res.text, models.HTTPValidationErrorData + ) + raise models.HTTPValidationError(data=response_data) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + + content_type = http_res.headers.get("Content-Type") + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res_text, + http_res, + ) + + async def text_content_async( + self, + *, + library_id: str, + document_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.DocumentTextContent: + r"""Retrieve the text content of a specific document. + + Given a library and a document in that library, you can retrieve the text content of that document if it exists. For documents like pdf, docx and pptx the text content results from our processing using Mistral OCR. + + :param library_id: + :param document_id: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.LibrariesDocumentsGetTextContentV1Request( + library_id=library_id, + document_id=document_id, + ) + + req = self._build_request_async( + method="GET", + path="/v1/libraries/{library_id}/documents/{document_id}/text_content", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="libraries_documents_get_text_content_v1", + oauth2_scopes=[], + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return utils.unmarshal_json(http_res.text, models.DocumentTextContent) + if utils.match_response(http_res, "422", "application/json"): + response_data = utils.unmarshal_json( + http_res.text, models.HTTPValidationErrorData + ) + raise models.HTTPValidationError(data=response_data) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + + content_type = http_res.headers.get("Content-Type") + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res_text, + http_res, + ) + + def status( + self, + *, + library_id: str, + document_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.ProcessingStatusOut: + r"""Retrieve the processing status of a specific document. + + Given a library and a document in that library, retrieve the processing status of that document. + + :param library_id: + :param document_id: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.LibrariesDocumentsGetStatusV1Request( + library_id=library_id, + document_id=document_id, + ) + + req = self._build_request( + method="GET", + path="/v1/libraries/{library_id}/documents/{document_id}/status", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="libraries_documents_get_status_v1", + oauth2_scopes=[], + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return utils.unmarshal_json(http_res.text, models.ProcessingStatusOut) + if utils.match_response(http_res, "422", "application/json"): + response_data = utils.unmarshal_json( + http_res.text, models.HTTPValidationErrorData + ) + raise models.HTTPValidationError(data=response_data) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + + content_type = http_res.headers.get("Content-Type") + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res_text, + http_res, + ) + + async def status_async( + self, + *, + library_id: str, + document_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.ProcessingStatusOut: + r"""Retrieve the processing status of a specific document. + + Given a library and a document in that library, retrieve the processing status of that document. + + :param library_id: + :param document_id: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.LibrariesDocumentsGetStatusV1Request( + library_id=library_id, + document_id=document_id, + ) + + req = self._build_request_async( + method="GET", + path="/v1/libraries/{library_id}/documents/{document_id}/status", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="libraries_documents_get_status_v1", + oauth2_scopes=[], + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return utils.unmarshal_json(http_res.text, models.ProcessingStatusOut) + if utils.match_response(http_res, "422", "application/json"): + response_data = utils.unmarshal_json( + http_res.text, models.HTTPValidationErrorData + ) + raise models.HTTPValidationError(data=response_data) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + + content_type = http_res.headers.get("Content-Type") + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res_text, + http_res, + ) + + def get_signed_url( + self, + *, + library_id: str, + document_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> str: + r"""Retrieve the signed URL of a specific document. + + Given a library and a document in that library, retrieve the signed URL of a specific document.The url will expire after 30 minutes and can be accessed by anyone with the link. + + :param library_id: + :param document_id: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.LibrariesDocumentsGetSignedURLV1Request( + library_id=library_id, + document_id=document_id, + ) + + req = self._build_request( + method="GET", + path="/v1/libraries/{library_id}/documents/{document_id}/signed-url", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="libraries_documents_get_signed_url_v1", + oauth2_scopes=[], + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return utils.unmarshal_json(http_res.text, str) + if utils.match_response(http_res, "422", "application/json"): + response_data = utils.unmarshal_json( + http_res.text, models.HTTPValidationErrorData + ) + raise models.HTTPValidationError(data=response_data) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + + content_type = http_res.headers.get("Content-Type") + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res_text, + http_res, + ) + + async def get_signed_url_async( + self, + *, + library_id: str, + document_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> str: + r"""Retrieve the signed URL of a specific document. + + Given a library and a document in that library, retrieve the signed URL of a specific document.The url will expire after 30 minutes and can be accessed by anyone with the link. + + :param library_id: + :param document_id: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.LibrariesDocumentsGetSignedURLV1Request( + library_id=library_id, + document_id=document_id, + ) + + req = self._build_request_async( + method="GET", + path="/v1/libraries/{library_id}/documents/{document_id}/signed-url", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="libraries_documents_get_signed_url_v1", + oauth2_scopes=[], + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return utils.unmarshal_json(http_res.text, str) + if utils.match_response(http_res, "422", "application/json"): + response_data = utils.unmarshal_json( + http_res.text, models.HTTPValidationErrorData + ) + raise models.HTTPValidationError(data=response_data) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + + content_type = http_res.headers.get("Content-Type") + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res_text, + http_res, + ) + + def extracted_text_signed_url( + self, + *, + library_id: str, + document_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> str: + r"""Retrieve the signed URL of text extracted from a given document. + + Given a library and a document in that library, retrieve the signed URL of text extracted. For documents that are sent to the OCR this returns the result of the OCR queries. + + :param library_id: + :param document_id: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.LibrariesDocumentsGetExtractedTextSignedURLV1Request( + library_id=library_id, + document_id=document_id, + ) + + req = self._build_request( + method="GET", + path="/v1/libraries/{library_id}/documents/{document_id}/extracted-text-signed-url", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="libraries_documents_get_extracted_text_signed_url_v1", + oauth2_scopes=[], + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return utils.unmarshal_json(http_res.text, str) + if utils.match_response(http_res, "422", "application/json"): + response_data = utils.unmarshal_json( + http_res.text, models.HTTPValidationErrorData + ) + raise models.HTTPValidationError(data=response_data) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + + content_type = http_res.headers.get("Content-Type") + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res_text, + http_res, + ) + + async def extracted_text_signed_url_async( + self, + *, + library_id: str, + document_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> str: + r"""Retrieve the signed URL of text extracted from a given document. + + Given a library and a document in that library, retrieve the signed URL of text extracted. For documents that are sent to the OCR this returns the result of the OCR queries. + + :param library_id: + :param document_id: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.LibrariesDocumentsGetExtractedTextSignedURLV1Request( + library_id=library_id, + document_id=document_id, + ) + + req = self._build_request_async( + method="GET", + path="/v1/libraries/{library_id}/documents/{document_id}/extracted-text-signed-url", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="libraries_documents_get_extracted_text_signed_url_v1", + oauth2_scopes=[], + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return utils.unmarshal_json(http_res.text, str) + if utils.match_response(http_res, "422", "application/json"): + response_data = utils.unmarshal_json( + http_res.text, models.HTTPValidationErrorData + ) + raise models.HTTPValidationError(data=response_data) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + + content_type = http_res.headers.get("Content-Type") + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res_text, + http_res, + ) + + def reprocess( + self, + *, + library_id: str, + document_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ): + r"""Reprocess a document. + + Given a library and a document in that library, reprocess that document, it will be billed again. + + :param library_id: + :param document_id: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.LibrariesDocumentsReprocessV1Request( + library_id=library_id, + document_id=document_id, + ) + + req = self._build_request( + method="POST", + path="/v1/libraries/{library_id}/documents/{document_id}/reprocess", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="libraries_documents_reprocess_v1", + oauth2_scopes=[], + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "204", "*"): + return + if utils.match_response(http_res, "422", "application/json"): + response_data = utils.unmarshal_json( + http_res.text, models.HTTPValidationErrorData + ) + raise models.HTTPValidationError(data=response_data) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + + content_type = http_res.headers.get("Content-Type") + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res_text, + http_res, + ) + + async def reprocess_async( + self, + *, + library_id: str, + document_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ): + r"""Reprocess a document. + + Given a library and a document in that library, reprocess that document, it will be billed again. + + :param library_id: + :param document_id: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.LibrariesDocumentsReprocessV1Request( + library_id=library_id, + document_id=document_id, + ) + + req = self._build_request_async( + method="POST", + path="/v1/libraries/{library_id}/documents/{document_id}/reprocess", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="libraries_documents_reprocess_v1", + oauth2_scopes=[], + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "204", "*"): + return + if utils.match_response(http_res, "422", "application/json"): + response_data = utils.unmarshal_json( + http_res.text, models.HTTPValidationErrorData + ) + raise models.HTTPValidationError(data=response_data) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + + content_type = http_res.headers.get("Content-Type") + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res_text, + http_res, + ) diff --git a/src/mistralai/files.py b/src/mistralai/files.py index 39f65dd4..05739eeb 100644 --- a/src/mistralai/files.py +++ b/src/mistralai/files.py @@ -48,8 +48,8 @@ def upload( base_url = self._get_url(base_url, url_variables) request = models.FilesAPIRoutesUploadFileMultiPartBodyParams( - file=utils.get_pydantic_model(file, models.File), purpose=purpose, + file=utils.get_pydantic_model(file, models.File), ) req = self._build_request( @@ -156,8 +156,8 @@ async def upload_async( base_url = self._get_url(base_url, url_variables) request = models.FilesAPIRoutesUploadFileMultiPartBodyParams( - file=utils.get_pydantic_model(file, models.File), purpose=purpose, + file=utils.get_pydantic_model(file, models.File), ) req = self._build_request_async( diff --git a/src/mistralai/libraries.py b/src/mistralai/libraries.py new file mode 100644 index 00000000..45bf0397 --- /dev/null +++ b/src/mistralai/libraries.py @@ -0,0 +1,1041 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from .basesdk import BaseSDK +from .sdkconfiguration import SDKConfiguration +from mistralai import models, utils +from mistralai._hooks import HookContext +from mistralai.accesses import Accesses +from mistralai.documents import Documents +from mistralai.types import OptionalNullable, UNSET +from mistralai.utils import get_security_from_env +from typing import Any, Mapping, Optional + + +class Libraries(BaseSDK): + r"""(beta) Libraries API for indexing documents to enhance agent capabilities.""" + + documents: Documents + accesses: Accesses + + def __init__(self, sdk_config: SDKConfiguration) -> None: + BaseSDK.__init__(self, sdk_config) + self.sdk_configuration = sdk_config + self._init_sdks() + + def _init_sdks(self): + self.documents = Documents(self.sdk_configuration) + self.accesses = Accesses(self.sdk_configuration) + + def list( + self, + *, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.ListLibraryOut: + r"""List all libraries you have access to. + + List all libraries that you have created or have been shared with you. + + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + req = self._build_request( + method="GET", + path="/v1/libraries", + base_url=base_url, + url_variables=url_variables, + request=None, + request_body_required=False, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="libraries_list_v1", + oauth2_scopes=[], + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["4XX", "5XX"], + retry_config=retry_config, + ) + + if utils.match_response(http_res, "200", "application/json"): + return utils.unmarshal_json(http_res.text, models.ListLibraryOut) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + + content_type = http_res.headers.get("Content-Type") + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res_text, + http_res, + ) + + async def list_async( + self, + *, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.ListLibraryOut: + r"""List all libraries you have access to. + + List all libraries that you have created or have been shared with you. + + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + req = self._build_request_async( + method="GET", + path="/v1/libraries", + base_url=base_url, + url_variables=url_variables, + request=None, + request_body_required=False, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="libraries_list_v1", + oauth2_scopes=[], + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["4XX", "5XX"], + retry_config=retry_config, + ) + + if utils.match_response(http_res, "200", "application/json"): + return utils.unmarshal_json(http_res.text, models.ListLibraryOut) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + + content_type = http_res.headers.get("Content-Type") + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res_text, + http_res, + ) + + def create( + self, + *, + name: str, + description: OptionalNullable[str] = UNSET, + chunk_size: OptionalNullable[int] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.LibraryOut: + r"""Create a new Library. + + Create a new Library, you will be marked as the owner and only you will have the possibility to share it with others. When first created this will only be accessible by you. + + :param name: + :param description: + :param chunk_size: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.LibraryIn( + name=name, + description=description, + chunk_size=chunk_size, + ) + + req = self._build_request( + method="POST", + path="/v1/libraries", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.LibraryIn + ), + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="libraries_create_v1", + oauth2_scopes=[], + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "201", "application/json"): + return utils.unmarshal_json(http_res.text, models.LibraryOut) + if utils.match_response(http_res, "422", "application/json"): + response_data = utils.unmarshal_json( + http_res.text, models.HTTPValidationErrorData + ) + raise models.HTTPValidationError(data=response_data) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + + content_type = http_res.headers.get("Content-Type") + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res_text, + http_res, + ) + + async def create_async( + self, + *, + name: str, + description: OptionalNullable[str] = UNSET, + chunk_size: OptionalNullable[int] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.LibraryOut: + r"""Create a new Library. + + Create a new Library, you will be marked as the owner and only you will have the possibility to share it with others. When first created this will only be accessible by you. + + :param name: + :param description: + :param chunk_size: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.LibraryIn( + name=name, + description=description, + chunk_size=chunk_size, + ) + + req = self._build_request_async( + method="POST", + path="/v1/libraries", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.LibraryIn + ), + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="libraries_create_v1", + oauth2_scopes=[], + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "201", "application/json"): + return utils.unmarshal_json(http_res.text, models.LibraryOut) + if utils.match_response(http_res, "422", "application/json"): + response_data = utils.unmarshal_json( + http_res.text, models.HTTPValidationErrorData + ) + raise models.HTTPValidationError(data=response_data) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + + content_type = http_res.headers.get("Content-Type") + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res_text, + http_res, + ) + + def get( + self, + *, + library_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.LibraryOut: + r"""Detailed information about a specific Library. + + Given a library id, details information about that Library. + + :param library_id: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.LibrariesGetV1Request( + library_id=library_id, + ) + + req = self._build_request( + method="GET", + path="/v1/libraries/{library_id}", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="libraries_get_v1", + oauth2_scopes=[], + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return utils.unmarshal_json(http_res.text, models.LibraryOut) + if utils.match_response(http_res, "422", "application/json"): + response_data = utils.unmarshal_json( + http_res.text, models.HTTPValidationErrorData + ) + raise models.HTTPValidationError(data=response_data) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + + content_type = http_res.headers.get("Content-Type") + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res_text, + http_res, + ) + + async def get_async( + self, + *, + library_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.LibraryOut: + r"""Detailed information about a specific Library. + + Given a library id, details information about that Library. + + :param library_id: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.LibrariesGetV1Request( + library_id=library_id, + ) + + req = self._build_request_async( + method="GET", + path="/v1/libraries/{library_id}", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="libraries_get_v1", + oauth2_scopes=[], + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return utils.unmarshal_json(http_res.text, models.LibraryOut) + if utils.match_response(http_res, "422", "application/json"): + response_data = utils.unmarshal_json( + http_res.text, models.HTTPValidationErrorData + ) + raise models.HTTPValidationError(data=response_data) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + + content_type = http_res.headers.get("Content-Type") + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res_text, + http_res, + ) + + def delete( + self, + *, + library_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.LibraryOut: + r"""Delete a library and all of it's document. + + Given a library id, deletes it together with all documents that have been uploaded to that library. + + :param library_id: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.LibrariesDeleteV1Request( + library_id=library_id, + ) + + req = self._build_request( + method="DELETE", + path="/v1/libraries/{library_id}", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="libraries_delete_v1", + oauth2_scopes=[], + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return utils.unmarshal_json(http_res.text, models.LibraryOut) + if utils.match_response(http_res, "422", "application/json"): + response_data = utils.unmarshal_json( + http_res.text, models.HTTPValidationErrorData + ) + raise models.HTTPValidationError(data=response_data) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + + content_type = http_res.headers.get("Content-Type") + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res_text, + http_res, + ) + + async def delete_async( + self, + *, + library_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.LibraryOut: + r"""Delete a library and all of it's document. + + Given a library id, deletes it together with all documents that have been uploaded to that library. + + :param library_id: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.LibrariesDeleteV1Request( + library_id=library_id, + ) + + req = self._build_request_async( + method="DELETE", + path="/v1/libraries/{library_id}", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="libraries_delete_v1", + oauth2_scopes=[], + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return utils.unmarshal_json(http_res.text, models.LibraryOut) + if utils.match_response(http_res, "422", "application/json"): + response_data = utils.unmarshal_json( + http_res.text, models.HTTPValidationErrorData + ) + raise models.HTTPValidationError(data=response_data) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + + content_type = http_res.headers.get("Content-Type") + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res_text, + http_res, + ) + + def update( + self, + *, + library_id: str, + name: OptionalNullable[str] = UNSET, + description: OptionalNullable[str] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.LibraryOut: + r"""Update a library. + + Given a library id, you can update the name and description. + + :param library_id: + :param name: + :param description: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.LibrariesUpdateV1Request( + library_id=library_id, + library_in_update=models.LibraryInUpdate( + name=name, + description=description, + ), + ) + + req = self._build_request( + method="PUT", + path="/v1/libraries/{library_id}", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request.library_in_update, False, False, "json", models.LibraryInUpdate + ), + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="libraries_update_v1", + oauth2_scopes=[], + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return utils.unmarshal_json(http_res.text, models.LibraryOut) + if utils.match_response(http_res, "422", "application/json"): + response_data = utils.unmarshal_json( + http_res.text, models.HTTPValidationErrorData + ) + raise models.HTTPValidationError(data=response_data) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + + content_type = http_res.headers.get("Content-Type") + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res_text, + http_res, + ) + + async def update_async( + self, + *, + library_id: str, + name: OptionalNullable[str] = UNSET, + description: OptionalNullable[str] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.LibraryOut: + r"""Update a library. + + Given a library id, you can update the name and description. + + :param library_id: + :param name: + :param description: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.LibrariesUpdateV1Request( + library_id=library_id, + library_in_update=models.LibraryInUpdate( + name=name, + description=description, + ), + ) + + req = self._build_request_async( + method="PUT", + path="/v1/libraries/{library_id}", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request.library_in_update, False, False, "json", models.LibraryInUpdate + ), + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="libraries_update_v1", + oauth2_scopes=[], + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return utils.unmarshal_json(http_res.text, models.LibraryOut) + if utils.match_response(http_res, "422", "application/json"): + response_data = utils.unmarshal_json( + http_res.text, models.HTTPValidationErrorData + ) + raise models.HTTPValidationError(data=response_data) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + + content_type = http_res.headers.get("Content-Type") + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res_text, + http_res, + ) diff --git a/src/mistralai/models/__init__.py b/src/mistralai/models/__init__.py index a44de97c..03965fde 100644 --- a/src/mistralai/models/__init__.py +++ b/src/mistralai/models/__init__.py @@ -129,7 +129,7 @@ AssistantMessageRole, AssistantMessageTypedDict, ) - from .basemodelcard import BaseModelCard, BaseModelCardTypedDict + from .basemodelcard import BaseModelCard, BaseModelCardTypedDict, Type from .batcherror import BatchError, BatchErrorTypedDict from .batchjobin import BatchJobIn, BatchJobInTypedDict from .batchjobout import BatchJobOut, BatchJobOutObject, BatchJobOutTypedDict @@ -360,6 +360,9 @@ DocumentLibraryToolType, DocumentLibraryToolTypedDict, ) + from .documentout import DocumentOut, DocumentOutTypedDict + from .documenttextcontent import DocumentTextContent, DocumentTextContentTypedDict + from .documentupdatein import DocumentUpdateIn, DocumentUpdateInTypedDict from .documenturlchunk import ( DocumentURLChunk, DocumentURLChunkType, @@ -377,7 +380,9 @@ EmbeddingResponseData, EmbeddingResponseDataTypedDict, ) + from .entitytype import EntityType from .eventout import EventOut, EventOutTypedDict + from .file import File, FileTypedDict from .filechunk import FileChunk, FileChunkTypedDict from .filepurpose import FilePurpose from .files_api_routes_delete_fileop import ( @@ -401,8 +406,6 @@ FilesAPIRoutesRetrieveFileRequestTypedDict, ) from .files_api_routes_upload_fileop import ( - File, - FileTypedDict, FilesAPIRoutesUploadFileMultiPartBodyParams, FilesAPIRoutesUploadFileMultiPartBodyParamsTypedDict, ) @@ -430,7 +433,7 @@ FTModelCapabilitiesOut, FTModelCapabilitiesOutTypedDict, ) - from .ftmodelcard import FTModelCard, FTModelCardTypedDict + from .ftmodelcard import FTModelCard, FTModelCardType, FTModelCardTypedDict from .function import Function, FunctionTypedDict from .functioncall import ( Arguments, @@ -579,7 +582,79 @@ LegacyJobMetadataOutObject, LegacyJobMetadataOutTypedDict, ) + from .libraries_delete_v1op import ( + LibrariesDeleteV1Request, + LibrariesDeleteV1RequestTypedDict, + ) + from .libraries_documents_delete_v1op import ( + LibrariesDocumentsDeleteV1Request, + LibrariesDocumentsDeleteV1RequestTypedDict, + ) + from .libraries_documents_get_extracted_text_signed_url_v1op import ( + LibrariesDocumentsGetExtractedTextSignedURLV1Request, + LibrariesDocumentsGetExtractedTextSignedURLV1RequestTypedDict, + ) + from .libraries_documents_get_signed_url_v1op import ( + LibrariesDocumentsGetSignedURLV1Request, + LibrariesDocumentsGetSignedURLV1RequestTypedDict, + ) + from .libraries_documents_get_status_v1op import ( + LibrariesDocumentsGetStatusV1Request, + LibrariesDocumentsGetStatusV1RequestTypedDict, + ) + from .libraries_documents_get_text_content_v1op import ( + LibrariesDocumentsGetTextContentV1Request, + LibrariesDocumentsGetTextContentV1RequestTypedDict, + ) + from .libraries_documents_get_v1op import ( + LibrariesDocumentsGetV1Request, + LibrariesDocumentsGetV1RequestTypedDict, + ) + from .libraries_documents_list_v1op import ( + LibrariesDocumentsListV1Request, + LibrariesDocumentsListV1RequestTypedDict, + ) + from .libraries_documents_reprocess_v1op import ( + LibrariesDocumentsReprocessV1Request, + LibrariesDocumentsReprocessV1RequestTypedDict, + ) + from .libraries_documents_update_v1op import ( + LibrariesDocumentsUpdateV1Request, + LibrariesDocumentsUpdateV1RequestTypedDict, + ) + from .libraries_documents_upload_v1op import ( + LibrariesDocumentsUploadV1DocumentUpload, + LibrariesDocumentsUploadV1DocumentUploadTypedDict, + LibrariesDocumentsUploadV1Request, + LibrariesDocumentsUploadV1RequestTypedDict, + ) + from .libraries_get_v1op import ( + LibrariesGetV1Request, + LibrariesGetV1RequestTypedDict, + ) + from .libraries_share_create_v1op import ( + LibrariesShareCreateV1Request, + LibrariesShareCreateV1RequestTypedDict, + ) + from .libraries_share_delete_v1op import ( + LibrariesShareDeleteV1Request, + LibrariesShareDeleteV1RequestTypedDict, + ) + from .libraries_share_list_v1op import ( + LibrariesShareListV1Request, + LibrariesShareListV1RequestTypedDict, + ) + from .libraries_update_v1op import ( + LibrariesUpdateV1Request, + LibrariesUpdateV1RequestTypedDict, + ) + from .libraryin import LibraryIn, LibraryInTypedDict + from .libraryinupdate import LibraryInUpdate, LibraryInUpdateTypedDict + from .libraryout import LibraryOut, LibraryOutTypedDict + from .listdocumentout import ListDocumentOut, ListDocumentOutTypedDict from .listfilesout import ListFilesOut, ListFilesOutTypedDict + from .listlibraryout import ListLibraryOut, ListLibraryOutTypedDict + from .listsharingout import ListSharingOut, ListSharingOutTypedDict from .messageentries import MessageEntries, MessageEntriesTypedDict from .messageinputcontentchunks import ( MessageInputContentChunks, @@ -590,9 +665,9 @@ MessageInputEntryContent, MessageInputEntryContentTypedDict, MessageInputEntryRole, + MessageInputEntryType, MessageInputEntryTypedDict, Object, - Type, ) from .messageoutputcontentchunks import ( MessageOutputContentChunks, @@ -635,7 +710,9 @@ from .ocrresponse import OCRResponse, OCRResponseTypedDict from .ocrusageinfo import OCRUsageInfo, OCRUsageInfoTypedDict from .outputcontentchunks import OutputContentChunks, OutputContentChunksTypedDict + from .paginationinfo import PaginationInfo, PaginationInfoTypedDict from .prediction import Prediction, PredictionTypedDict + from .processingstatusout import ProcessingStatusOut, ProcessingStatusOutTypedDict from .referencechunk import ( ReferenceChunk, ReferenceChunkType, @@ -668,6 +745,10 @@ from .sampletype import SampleType from .sdkerror import SDKError from .security import Security, SecurityTypedDict + from .shareenum import ShareEnum + from .sharingdelete import SharingDelete, SharingDeleteTypedDict + from .sharingin import SharingIn, SharingInTypedDict + from .sharingout import SharingOut, SharingOutTypedDict from .source import Source from .ssetypes import SSETypes from .systemmessage import ( @@ -682,6 +763,11 @@ from .toolcall import ToolCall, ToolCallTypedDict from .toolchoice import ToolChoice, ToolChoiceTypedDict from .toolchoiceenum import ToolChoiceEnum + from .toolexecutiondeltaevent import ( + ToolExecutionDeltaEvent, + ToolExecutionDeltaEventType, + ToolExecutionDeltaEventTypedDict, + ) from .toolexecutiondoneevent import ( ToolExecutionDoneEvent, ToolExecutionDoneEventType, @@ -990,10 +1076,16 @@ "DocumentLibraryTool", "DocumentLibraryToolType", "DocumentLibraryToolTypedDict", + "DocumentOut", + "DocumentOutTypedDict", + "DocumentTextContent", + "DocumentTextContentTypedDict", "DocumentTypedDict", "DocumentURLChunk", "DocumentURLChunkType", "DocumentURLChunkTypedDict", + "DocumentUpdateIn", + "DocumentUpdateInTypedDict", "EmbeddingDtype", "EmbeddingRequest", "EmbeddingRequestInputs", @@ -1003,6 +1095,7 @@ "EmbeddingResponseData", "EmbeddingResponseDataTypedDict", "EmbeddingResponseTypedDict", + "EntityType", "Entries", "EntriesTypedDict", "EventOut", @@ -1021,6 +1114,7 @@ "FTModelCapabilitiesOut", "FTModelCapabilitiesOutTypedDict", "FTModelCard", + "FTModelCardType", "FTModelCardTypedDict", "File", "FileChunk", @@ -1151,8 +1245,54 @@ "LegacyJobMetadataOut", "LegacyJobMetadataOutObject", "LegacyJobMetadataOutTypedDict", + "LibrariesDeleteV1Request", + "LibrariesDeleteV1RequestTypedDict", + "LibrariesDocumentsDeleteV1Request", + "LibrariesDocumentsDeleteV1RequestTypedDict", + "LibrariesDocumentsGetExtractedTextSignedURLV1Request", + "LibrariesDocumentsGetExtractedTextSignedURLV1RequestTypedDict", + "LibrariesDocumentsGetSignedURLV1Request", + "LibrariesDocumentsGetSignedURLV1RequestTypedDict", + "LibrariesDocumentsGetStatusV1Request", + "LibrariesDocumentsGetStatusV1RequestTypedDict", + "LibrariesDocumentsGetTextContentV1Request", + "LibrariesDocumentsGetTextContentV1RequestTypedDict", + "LibrariesDocumentsGetV1Request", + "LibrariesDocumentsGetV1RequestTypedDict", + "LibrariesDocumentsListV1Request", + "LibrariesDocumentsListV1RequestTypedDict", + "LibrariesDocumentsReprocessV1Request", + "LibrariesDocumentsReprocessV1RequestTypedDict", + "LibrariesDocumentsUpdateV1Request", + "LibrariesDocumentsUpdateV1RequestTypedDict", + "LibrariesDocumentsUploadV1DocumentUpload", + "LibrariesDocumentsUploadV1DocumentUploadTypedDict", + "LibrariesDocumentsUploadV1Request", + "LibrariesDocumentsUploadV1RequestTypedDict", + "LibrariesGetV1Request", + "LibrariesGetV1RequestTypedDict", + "LibrariesShareCreateV1Request", + "LibrariesShareCreateV1RequestTypedDict", + "LibrariesShareDeleteV1Request", + "LibrariesShareDeleteV1RequestTypedDict", + "LibrariesShareListV1Request", + "LibrariesShareListV1RequestTypedDict", + "LibrariesUpdateV1Request", + "LibrariesUpdateV1RequestTypedDict", + "LibraryIn", + "LibraryInTypedDict", + "LibraryInUpdate", + "LibraryInUpdateTypedDict", + "LibraryOut", + "LibraryOutTypedDict", + "ListDocumentOut", + "ListDocumentOutTypedDict", "ListFilesOut", "ListFilesOutTypedDict", + "ListLibraryOut", + "ListLibraryOutTypedDict", + "ListSharingOut", + "ListSharingOutTypedDict", "Loc", "LocTypedDict", "MessageEntries", @@ -1163,6 +1303,7 @@ "MessageInputEntryContent", "MessageInputEntryContentTypedDict", "MessageInputEntryRole", + "MessageInputEntryType", "MessageInputEntryTypedDict", "MessageOutputContentChunks", "MessageOutputContentChunksTypedDict", @@ -1217,8 +1358,12 @@ "OutputContentChunksTypedDict", "Outputs", "OutputsTypedDict", + "PaginationInfo", + "PaginationInfoTypedDict", "Prediction", "PredictionTypedDict", + "ProcessingStatusOut", + "ProcessingStatusOutTypedDict", "QueryParamStatus", "ReferenceChunk", "ReferenceChunkType", @@ -1253,6 +1398,13 @@ "SampleType", "Security", "SecurityTypedDict", + "ShareEnum", + "SharingDelete", + "SharingDeleteTypedDict", + "SharingIn", + "SharingInTypedDict", + "SharingOut", + "SharingOutTypedDict", "Source", "Status", "Stop", @@ -1270,6 +1422,9 @@ "ToolChoice", "ToolChoiceEnum", "ToolChoiceTypedDict", + "ToolExecutionDeltaEvent", + "ToolExecutionDeltaEventType", + "ToolExecutionDeltaEventTypedDict", "ToolExecutionDoneEvent", "ToolExecutionDoneEventType", "ToolExecutionDoneEventTypedDict", @@ -1412,6 +1567,7 @@ "AssistantMessageTypedDict": ".assistantmessage", "BaseModelCard": ".basemodelcard", "BaseModelCardTypedDict": ".basemodelcard", + "Type": ".basemodelcard", "BatchError": ".batcherror", "BatchErrorTypedDict": ".batcherror", "BatchJobIn": ".batchjobin", @@ -1589,6 +1745,12 @@ "DocumentLibraryTool": ".documentlibrarytool", "DocumentLibraryToolType": ".documentlibrarytool", "DocumentLibraryToolTypedDict": ".documentlibrarytool", + "DocumentOut": ".documentout", + "DocumentOutTypedDict": ".documentout", + "DocumentTextContent": ".documenttextcontent", + "DocumentTextContentTypedDict": ".documenttextcontent", + "DocumentUpdateIn": ".documentupdatein", + "DocumentUpdateInTypedDict": ".documentupdatein", "DocumentURLChunk": ".documenturlchunk", "DocumentURLChunkType": ".documenturlchunk", "DocumentURLChunkTypedDict": ".documenturlchunk", @@ -1601,8 +1763,11 @@ "EmbeddingResponseTypedDict": ".embeddingresponse", "EmbeddingResponseData": ".embeddingresponsedata", "EmbeddingResponseDataTypedDict": ".embeddingresponsedata", + "EntityType": ".entitytype", "EventOut": ".eventout", "EventOutTypedDict": ".eventout", + "File": ".file", + "FileTypedDict": ".file", "FileChunk": ".filechunk", "FileChunkTypedDict": ".filechunk", "FilePurpose": ".filepurpose", @@ -1616,8 +1781,6 @@ "FilesAPIRoutesListFilesRequestTypedDict": ".files_api_routes_list_filesop", "FilesAPIRoutesRetrieveFileRequest": ".files_api_routes_retrieve_fileop", "FilesAPIRoutesRetrieveFileRequestTypedDict": ".files_api_routes_retrieve_fileop", - "File": ".files_api_routes_upload_fileop", - "FileTypedDict": ".files_api_routes_upload_fileop", "FilesAPIRoutesUploadFileMultiPartBodyParams": ".files_api_routes_upload_fileop", "FilesAPIRoutesUploadFileMultiPartBodyParamsTypedDict": ".files_api_routes_upload_fileop", "FileSchema": ".fileschema", @@ -1639,6 +1802,7 @@ "FTModelCapabilitiesOut": ".ftmodelcapabilitiesout", "FTModelCapabilitiesOutTypedDict": ".ftmodelcapabilitiesout", "FTModelCard": ".ftmodelcard", + "FTModelCardType": ".ftmodelcard", "FTModelCardTypedDict": ".ftmodelcard", "Function": ".function", "FunctionTypedDict": ".function", @@ -1747,8 +1911,54 @@ "LegacyJobMetadataOut": ".legacyjobmetadataout", "LegacyJobMetadataOutObject": ".legacyjobmetadataout", "LegacyJobMetadataOutTypedDict": ".legacyjobmetadataout", + "LibrariesDeleteV1Request": ".libraries_delete_v1op", + "LibrariesDeleteV1RequestTypedDict": ".libraries_delete_v1op", + "LibrariesDocumentsDeleteV1Request": ".libraries_documents_delete_v1op", + "LibrariesDocumentsDeleteV1RequestTypedDict": ".libraries_documents_delete_v1op", + "LibrariesDocumentsGetExtractedTextSignedURLV1Request": ".libraries_documents_get_extracted_text_signed_url_v1op", + "LibrariesDocumentsGetExtractedTextSignedURLV1RequestTypedDict": ".libraries_documents_get_extracted_text_signed_url_v1op", + "LibrariesDocumentsGetSignedURLV1Request": ".libraries_documents_get_signed_url_v1op", + "LibrariesDocumentsGetSignedURLV1RequestTypedDict": ".libraries_documents_get_signed_url_v1op", + "LibrariesDocumentsGetStatusV1Request": ".libraries_documents_get_status_v1op", + "LibrariesDocumentsGetStatusV1RequestTypedDict": ".libraries_documents_get_status_v1op", + "LibrariesDocumentsGetTextContentV1Request": ".libraries_documents_get_text_content_v1op", + "LibrariesDocumentsGetTextContentV1RequestTypedDict": ".libraries_documents_get_text_content_v1op", + "LibrariesDocumentsGetV1Request": ".libraries_documents_get_v1op", + "LibrariesDocumentsGetV1RequestTypedDict": ".libraries_documents_get_v1op", + "LibrariesDocumentsListV1Request": ".libraries_documents_list_v1op", + "LibrariesDocumentsListV1RequestTypedDict": ".libraries_documents_list_v1op", + "LibrariesDocumentsReprocessV1Request": ".libraries_documents_reprocess_v1op", + "LibrariesDocumentsReprocessV1RequestTypedDict": ".libraries_documents_reprocess_v1op", + "LibrariesDocumentsUpdateV1Request": ".libraries_documents_update_v1op", + "LibrariesDocumentsUpdateV1RequestTypedDict": ".libraries_documents_update_v1op", + "LibrariesDocumentsUploadV1DocumentUpload": ".libraries_documents_upload_v1op", + "LibrariesDocumentsUploadV1DocumentUploadTypedDict": ".libraries_documents_upload_v1op", + "LibrariesDocumentsUploadV1Request": ".libraries_documents_upload_v1op", + "LibrariesDocumentsUploadV1RequestTypedDict": ".libraries_documents_upload_v1op", + "LibrariesGetV1Request": ".libraries_get_v1op", + "LibrariesGetV1RequestTypedDict": ".libraries_get_v1op", + "LibrariesShareCreateV1Request": ".libraries_share_create_v1op", + "LibrariesShareCreateV1RequestTypedDict": ".libraries_share_create_v1op", + "LibrariesShareDeleteV1Request": ".libraries_share_delete_v1op", + "LibrariesShareDeleteV1RequestTypedDict": ".libraries_share_delete_v1op", + "LibrariesShareListV1Request": ".libraries_share_list_v1op", + "LibrariesShareListV1RequestTypedDict": ".libraries_share_list_v1op", + "LibrariesUpdateV1Request": ".libraries_update_v1op", + "LibrariesUpdateV1RequestTypedDict": ".libraries_update_v1op", + "LibraryIn": ".libraryin", + "LibraryInTypedDict": ".libraryin", + "LibraryInUpdate": ".libraryinupdate", + "LibraryInUpdateTypedDict": ".libraryinupdate", + "LibraryOut": ".libraryout", + "LibraryOutTypedDict": ".libraryout", + "ListDocumentOut": ".listdocumentout", + "ListDocumentOutTypedDict": ".listdocumentout", "ListFilesOut": ".listfilesout", "ListFilesOutTypedDict": ".listfilesout", + "ListLibraryOut": ".listlibraryout", + "ListLibraryOutTypedDict": ".listlibraryout", + "ListSharingOut": ".listsharingout", + "ListSharingOutTypedDict": ".listsharingout", "MessageEntries": ".messageentries", "MessageEntriesTypedDict": ".messageentries", "MessageInputContentChunks": ".messageinputcontentchunks", @@ -1757,9 +1967,9 @@ "MessageInputEntryContent": ".messageinputentry", "MessageInputEntryContentTypedDict": ".messageinputentry", "MessageInputEntryRole": ".messageinputentry", + "MessageInputEntryType": ".messageinputentry", "MessageInputEntryTypedDict": ".messageinputentry", "Object": ".messageinputentry", - "Type": ".messageinputentry", "MessageOutputContentChunks": ".messageoutputcontentchunks", "MessageOutputContentChunksTypedDict": ".messageoutputcontentchunks", "MessageOutputEntry": ".messageoutputentry", @@ -1809,8 +2019,12 @@ "OCRUsageInfoTypedDict": ".ocrusageinfo", "OutputContentChunks": ".outputcontentchunks", "OutputContentChunksTypedDict": ".outputcontentchunks", + "PaginationInfo": ".paginationinfo", + "PaginationInfoTypedDict": ".paginationinfo", "Prediction": ".prediction", "PredictionTypedDict": ".prediction", + "ProcessingStatusOut": ".processingstatusout", + "ProcessingStatusOutTypedDict": ".processingstatusout", "ReferenceChunk": ".referencechunk", "ReferenceChunkType": ".referencechunk", "ReferenceChunkTypedDict": ".referencechunk", @@ -1836,6 +2050,13 @@ "SDKError": ".sdkerror", "Security": ".security", "SecurityTypedDict": ".security", + "ShareEnum": ".shareenum", + "SharingDelete": ".sharingdelete", + "SharingDeleteTypedDict": ".sharingdelete", + "SharingIn": ".sharingin", + "SharingInTypedDict": ".sharingin", + "SharingOut": ".sharingout", + "SharingOutTypedDict": ".sharingout", "Source": ".source", "SSETypes": ".ssetypes", "Role": ".systemmessage", @@ -1853,6 +2074,9 @@ "ToolChoice": ".toolchoice", "ToolChoiceTypedDict": ".toolchoice", "ToolChoiceEnum": ".toolchoiceenum", + "ToolExecutionDeltaEvent": ".toolexecutiondeltaevent", + "ToolExecutionDeltaEventType": ".toolexecutiondeltaevent", + "ToolExecutionDeltaEventTypedDict": ".toolexecutiondeltaevent", "ToolExecutionDoneEvent": ".toolexecutiondoneevent", "ToolExecutionDoneEventType": ".toolexecutiondoneevent", "ToolExecutionDoneEventTypedDict": ".toolexecutiondoneevent", diff --git a/src/mistralai/models/basemodelcard.py b/src/mistralai/models/basemodelcard.py index fc852f4b..7423a71b 100644 --- a/src/mistralai/models/basemodelcard.py +++ b/src/mistralai/models/basemodelcard.py @@ -12,6 +12,9 @@ from typing_extensions import Annotated, NotRequired, TypedDict +Type = Literal["base"] + + class BaseModelCardTypedDict(TypedDict): id: str capabilities: ModelCapabilitiesTypedDict @@ -25,7 +28,7 @@ class BaseModelCardTypedDict(TypedDict): deprecation: NotRequired[Nullable[datetime]] deprecation_replacement_model: NotRequired[Nullable[str]] default_model_temperature: NotRequired[Nullable[float]] - type: Literal["base"] + type: Type class BaseModelCard(BaseModel): @@ -54,7 +57,7 @@ class BaseModelCard(BaseModel): default_model_temperature: OptionalNullable[float] = UNSET TYPE: Annotated[ - Annotated[Optional[Literal["base"]], AfterValidator(validate_const("base"))], + Annotated[Optional[Type], AfterValidator(validate_const("base"))], pydantic.Field(alias="type"), ] = "base" diff --git a/src/mistralai/models/conversationevents.py b/src/mistralai/models/conversationevents.py index 8552edda..ba4c628c 100644 --- a/src/mistralai/models/conversationevents.py +++ b/src/mistralai/models/conversationevents.py @@ -12,6 +12,10 @@ from .responseerrorevent import ResponseErrorEvent, ResponseErrorEventTypedDict from .responsestartedevent import ResponseStartedEvent, ResponseStartedEventTypedDict from .ssetypes import SSETypes +from .toolexecutiondeltaevent import ( + ToolExecutionDeltaEvent, + ToolExecutionDeltaEventTypedDict, +) from .toolexecutiondoneevent import ( ToolExecutionDoneEvent, ToolExecutionDoneEventTypedDict, @@ -34,6 +38,7 @@ ResponseDoneEventTypedDict, ResponseErrorEventTypedDict, ToolExecutionStartedEventTypedDict, + ToolExecutionDeltaEventTypedDict, ToolExecutionDoneEventTypedDict, AgentHandoffStartedEventTypedDict, AgentHandoffDoneEventTypedDict, @@ -52,6 +57,7 @@ Annotated[ResponseStartedEvent, Tag("conversation.response.started")], Annotated[FunctionCallEvent, Tag("function.call.delta")], Annotated[MessageOutputEvent, Tag("message.output.delta")], + Annotated[ToolExecutionDeltaEvent, Tag("tool.execution.delta")], Annotated[ToolExecutionDoneEvent, Tag("tool.execution.done")], Annotated[ToolExecutionStartedEvent, Tag("tool.execution.started")], ], diff --git a/src/mistralai/models/conversationhistory.py b/src/mistralai/models/conversationhistory.py index d07d7297..472915fe 100644 --- a/src/mistralai/models/conversationhistory.py +++ b/src/mistralai/models/conversationhistory.py @@ -17,10 +17,10 @@ EntriesTypedDict = TypeAliasType( "EntriesTypedDict", Union[ - MessageInputEntryTypedDict, FunctionResultEntryTypedDict, - ToolExecutionEntryTypedDict, + MessageInputEntryTypedDict, FunctionCallEntryTypedDict, + ToolExecutionEntryTypedDict, MessageOutputEntryTypedDict, AgentHandoffEntryTypedDict, ], @@ -30,10 +30,10 @@ Entries = TypeAliasType( "Entries", Union[ - MessageInputEntry, FunctionResultEntry, - ToolExecutionEntry, + MessageInputEntry, FunctionCallEntry, + ToolExecutionEntry, MessageOutputEntry, AgentHandoffEntry, ], diff --git a/src/mistralai/models/documentout.py b/src/mistralai/models/documentout.py new file mode 100644 index 00000000..65f1be80 --- /dev/null +++ b/src/mistralai/models/documentout.py @@ -0,0 +1,105 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from datetime import datetime +from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +from pydantic import model_serializer +from typing_extensions import NotRequired, TypedDict + + +class DocumentOutTypedDict(TypedDict): + id: str + library_id: str + hash: str + mime_type: str + extension: str + size: int + name: str + created_at: datetime + processing_status: str + uploaded_by_id: str + uploaded_by_type: str + tokens_processing_total: int + summary: NotRequired[Nullable[str]] + last_processed_at: NotRequired[Nullable[datetime]] + number_of_pages: NotRequired[Nullable[int]] + tokens_processing_main_content: NotRequired[Nullable[int]] + tokens_processing_summary: NotRequired[Nullable[int]] + + +class DocumentOut(BaseModel): + id: str + + library_id: str + + hash: str + + mime_type: str + + extension: str + + size: int + + name: str + + created_at: datetime + + processing_status: str + + uploaded_by_id: str + + uploaded_by_type: str + + tokens_processing_total: int + + summary: OptionalNullable[str] = UNSET + + last_processed_at: OptionalNullable[datetime] = UNSET + + number_of_pages: OptionalNullable[int] = UNSET + + tokens_processing_main_content: OptionalNullable[int] = UNSET + + tokens_processing_summary: OptionalNullable[int] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = [ + "summary", + "last_processed_at", + "number_of_pages", + "tokens_processing_main_content", + "tokens_processing_summary", + ] + nullable_fields = [ + "summary", + "last_processed_at", + "number_of_pages", + "tokens_processing_main_content", + "tokens_processing_summary", + ] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/models/documenttextcontent.py b/src/mistralai/models/documenttextcontent.py new file mode 100644 index 00000000..c02528c2 --- /dev/null +++ b/src/mistralai/models/documenttextcontent.py @@ -0,0 +1,13 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.types import BaseModel +from typing_extensions import TypedDict + + +class DocumentTextContentTypedDict(TypedDict): + text: str + + +class DocumentTextContent(BaseModel): + text: str diff --git a/src/mistralai/models/documentupdatein.py b/src/mistralai/models/documentupdatein.py new file mode 100644 index 00000000..0f6abd5b --- /dev/null +++ b/src/mistralai/models/documentupdatein.py @@ -0,0 +1,44 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +from pydantic import model_serializer +from typing_extensions import NotRequired, TypedDict + + +class DocumentUpdateInTypedDict(TypedDict): + name: NotRequired[Nullable[str]] + + +class DocumentUpdateIn(BaseModel): + name: OptionalNullable[str] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["name"] + nullable_fields = ["name"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/models/entitytype.py b/src/mistralai/models/entitytype.py new file mode 100644 index 00000000..b5149c5f --- /dev/null +++ b/src/mistralai/models/entitytype.py @@ -0,0 +1,9 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.types import UnrecognizedStr +from typing import Literal, Union + + +EntityType = Union[Literal["User", "Workspace", "Org"], UnrecognizedStr] +r"""The type of entity, used to share a library.""" diff --git a/src/mistralai/models/file.py b/src/mistralai/models/file.py new file mode 100644 index 00000000..682d7f6e --- /dev/null +++ b/src/mistralai/models/file.py @@ -0,0 +1,33 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +import io +from mistralai.types import BaseModel +from mistralai.utils import FieldMetadata, MultipartFormMetadata +import pydantic +from typing import IO, Optional, Union +from typing_extensions import Annotated, NotRequired, TypedDict + + +class FileTypedDict(TypedDict): + file_name: str + content: Union[bytes, IO[bytes], io.BufferedReader] + content_type: NotRequired[str] + + +class File(BaseModel): + file_name: Annotated[ + str, pydantic.Field(alias="fileName"), FieldMetadata(multipart=True) + ] + + content: Annotated[ + Union[bytes, IO[bytes], io.BufferedReader], + pydantic.Field(alias=""), + FieldMetadata(multipart=MultipartFormMetadata(content=True)), + ] + + content_type: Annotated[ + Optional[str], + pydantic.Field(alias="Content-Type"), + FieldMetadata(multipart=True), + ] = None diff --git a/src/mistralai/models/files_api_routes_upload_fileop.py b/src/mistralai/models/files_api_routes_upload_fileop.py index e6d86877..34321cf5 100644 --- a/src/mistralai/models/files_api_routes_upload_fileop.py +++ b/src/mistralai/models/files_api_routes_upload_fileop.py @@ -1,40 +1,15 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from __future__ import annotations +from .file import File, FileTypedDict from .filepurpose import FilePurpose -import io from mistralai.types import BaseModel from mistralai.utils import FieldMetadata, MultipartFormMetadata, validate_open_enum -import pydantic from pydantic.functional_validators import PlainValidator -from typing import IO, Optional, Union +from typing import Optional from typing_extensions import Annotated, NotRequired, TypedDict -class FileTypedDict(TypedDict): - file_name: str - content: Union[bytes, IO[bytes], io.BufferedReader] - content_type: NotRequired[str] - - -class File(BaseModel): - file_name: Annotated[ - str, pydantic.Field(alias="fileName"), FieldMetadata(multipart=True) - ] - - content: Annotated[ - Union[bytes, IO[bytes], io.BufferedReader], - pydantic.Field(alias=""), - FieldMetadata(multipart=MultipartFormMetadata(content=True)), - ] - - content_type: Annotated[ - Optional[str], - pydantic.Field(alias="Content-Type"), - FieldMetadata(multipart=True), - ] = None - - class FilesAPIRoutesUploadFileMultiPartBodyParamsTypedDict(TypedDict): file: FileTypedDict r"""The File object (not file name) to be uploaded. diff --git a/src/mistralai/models/ftmodelcard.py b/src/mistralai/models/ftmodelcard.py index 286357e7..7159ce00 100644 --- a/src/mistralai/models/ftmodelcard.py +++ b/src/mistralai/models/ftmodelcard.py @@ -12,6 +12,9 @@ from typing_extensions import Annotated, NotRequired, TypedDict +FTModelCardType = Literal["fine-tuned"] + + class FTModelCardTypedDict(TypedDict): r"""Extra fields for fine-tuned models.""" @@ -29,7 +32,7 @@ class FTModelCardTypedDict(TypedDict): deprecation: NotRequired[Nullable[datetime]] deprecation_replacement_model: NotRequired[Nullable[str]] default_model_temperature: NotRequired[Nullable[float]] - type: Literal["fine-tuned"] + type: FTModelCardType archived: NotRequired[bool] @@ -66,8 +69,7 @@ class FTModelCard(BaseModel): TYPE: Annotated[ Annotated[ - Optional[Literal["fine-tuned"]], - AfterValidator(validate_const("fine-tuned")), + Optional[FTModelCardType], AfterValidator(validate_const("fine-tuned")) ], pydantic.Field(alias="type"), ] = "fine-tuned" diff --git a/src/mistralai/models/inputentries.py b/src/mistralai/models/inputentries.py index 0221f968..8ae29837 100644 --- a/src/mistralai/models/inputentries.py +++ b/src/mistralai/models/inputentries.py @@ -14,10 +14,10 @@ InputEntriesTypedDict = TypeAliasType( "InputEntriesTypedDict", Union[ - MessageInputEntryTypedDict, FunctionResultEntryTypedDict, - ToolExecutionEntryTypedDict, + MessageInputEntryTypedDict, FunctionCallEntryTypedDict, + ToolExecutionEntryTypedDict, MessageOutputEntryTypedDict, AgentHandoffEntryTypedDict, ], @@ -27,10 +27,10 @@ InputEntries = TypeAliasType( "InputEntries", Union[ - MessageInputEntry, FunctionResultEntry, - ToolExecutionEntry, + MessageInputEntry, FunctionCallEntry, + ToolExecutionEntry, MessageOutputEntry, AgentHandoffEntry, ], diff --git a/src/mistralai/models/libraries_delete_v1op.py b/src/mistralai/models/libraries_delete_v1op.py new file mode 100644 index 00000000..56f8f8a8 --- /dev/null +++ b/src/mistralai/models/libraries_delete_v1op.py @@ -0,0 +1,16 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.types import BaseModel +from mistralai.utils import FieldMetadata, PathParamMetadata +from typing_extensions import Annotated, TypedDict + + +class LibrariesDeleteV1RequestTypedDict(TypedDict): + library_id: str + + +class LibrariesDeleteV1Request(BaseModel): + library_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] diff --git a/src/mistralai/models/libraries_documents_delete_v1op.py b/src/mistralai/models/libraries_documents_delete_v1op.py new file mode 100644 index 00000000..c33710b0 --- /dev/null +++ b/src/mistralai/models/libraries_documents_delete_v1op.py @@ -0,0 +1,21 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.types import BaseModel +from mistralai.utils import FieldMetadata, PathParamMetadata +from typing_extensions import Annotated, TypedDict + + +class LibrariesDocumentsDeleteV1RequestTypedDict(TypedDict): + library_id: str + document_id: str + + +class LibrariesDocumentsDeleteV1Request(BaseModel): + library_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] + + document_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] diff --git a/src/mistralai/models/libraries_documents_get_extracted_text_signed_url_v1op.py b/src/mistralai/models/libraries_documents_get_extracted_text_signed_url_v1op.py new file mode 100644 index 00000000..e2459c1c --- /dev/null +++ b/src/mistralai/models/libraries_documents_get_extracted_text_signed_url_v1op.py @@ -0,0 +1,21 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.types import BaseModel +from mistralai.utils import FieldMetadata, PathParamMetadata +from typing_extensions import Annotated, TypedDict + + +class LibrariesDocumentsGetExtractedTextSignedURLV1RequestTypedDict(TypedDict): + library_id: str + document_id: str + + +class LibrariesDocumentsGetExtractedTextSignedURLV1Request(BaseModel): + library_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] + + document_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] diff --git a/src/mistralai/models/libraries_documents_get_signed_url_v1op.py b/src/mistralai/models/libraries_documents_get_signed_url_v1op.py new file mode 100644 index 00000000..bc913ba5 --- /dev/null +++ b/src/mistralai/models/libraries_documents_get_signed_url_v1op.py @@ -0,0 +1,21 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.types import BaseModel +from mistralai.utils import FieldMetadata, PathParamMetadata +from typing_extensions import Annotated, TypedDict + + +class LibrariesDocumentsGetSignedURLV1RequestTypedDict(TypedDict): + library_id: str + document_id: str + + +class LibrariesDocumentsGetSignedURLV1Request(BaseModel): + library_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] + + document_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] diff --git a/src/mistralai/models/libraries_documents_get_status_v1op.py b/src/mistralai/models/libraries_documents_get_status_v1op.py new file mode 100644 index 00000000..08992d7c --- /dev/null +++ b/src/mistralai/models/libraries_documents_get_status_v1op.py @@ -0,0 +1,21 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.types import BaseModel +from mistralai.utils import FieldMetadata, PathParamMetadata +from typing_extensions import Annotated, TypedDict + + +class LibrariesDocumentsGetStatusV1RequestTypedDict(TypedDict): + library_id: str + document_id: str + + +class LibrariesDocumentsGetStatusV1Request(BaseModel): + library_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] + + document_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] diff --git a/src/mistralai/models/libraries_documents_get_text_content_v1op.py b/src/mistralai/models/libraries_documents_get_text_content_v1op.py new file mode 100644 index 00000000..21a131ad --- /dev/null +++ b/src/mistralai/models/libraries_documents_get_text_content_v1op.py @@ -0,0 +1,21 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.types import BaseModel +from mistralai.utils import FieldMetadata, PathParamMetadata +from typing_extensions import Annotated, TypedDict + + +class LibrariesDocumentsGetTextContentV1RequestTypedDict(TypedDict): + library_id: str + document_id: str + + +class LibrariesDocumentsGetTextContentV1Request(BaseModel): + library_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] + + document_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] diff --git a/src/mistralai/models/libraries_documents_get_v1op.py b/src/mistralai/models/libraries_documents_get_v1op.py new file mode 100644 index 00000000..ff2bdedb --- /dev/null +++ b/src/mistralai/models/libraries_documents_get_v1op.py @@ -0,0 +1,21 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.types import BaseModel +from mistralai.utils import FieldMetadata, PathParamMetadata +from typing_extensions import Annotated, TypedDict + + +class LibrariesDocumentsGetV1RequestTypedDict(TypedDict): + library_id: str + document_id: str + + +class LibrariesDocumentsGetV1Request(BaseModel): + library_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] + + document_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] diff --git a/src/mistralai/models/libraries_documents_list_v1op.py b/src/mistralai/models/libraries_documents_list_v1op.py new file mode 100644 index 00000000..04a3ed25 --- /dev/null +++ b/src/mistralai/models/libraries_documents_list_v1op.py @@ -0,0 +1,78 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +from mistralai.utils import FieldMetadata, PathParamMetadata, QueryParamMetadata +from pydantic import model_serializer +from typing import Optional +from typing_extensions import Annotated, NotRequired, TypedDict + + +class LibrariesDocumentsListV1RequestTypedDict(TypedDict): + library_id: str + search: NotRequired[Nullable[str]] + page_size: NotRequired[int] + page: NotRequired[int] + sort_by: NotRequired[str] + sort_order: NotRequired[str] + + +class LibrariesDocumentsListV1Request(BaseModel): + library_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] + + search: Annotated[ + OptionalNullable[str], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = UNSET + + page_size: Annotated[ + Optional[int], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = 100 + + page: Annotated[ + Optional[int], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = 0 + + sort_by: Annotated[ + Optional[str], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = "created_at" + + sort_order: Annotated[ + Optional[str], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = "desc" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["search", "page_size", "page", "sort_by", "sort_order"] + nullable_fields = ["search"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/models/libraries_documents_reprocess_v1op.py b/src/mistralai/models/libraries_documents_reprocess_v1op.py new file mode 100644 index 00000000..861993e7 --- /dev/null +++ b/src/mistralai/models/libraries_documents_reprocess_v1op.py @@ -0,0 +1,21 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.types import BaseModel +from mistralai.utils import FieldMetadata, PathParamMetadata +from typing_extensions import Annotated, TypedDict + + +class LibrariesDocumentsReprocessV1RequestTypedDict(TypedDict): + library_id: str + document_id: str + + +class LibrariesDocumentsReprocessV1Request(BaseModel): + library_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] + + document_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] diff --git a/src/mistralai/models/libraries_documents_update_v1op.py b/src/mistralai/models/libraries_documents_update_v1op.py new file mode 100644 index 00000000..5551d5ee --- /dev/null +++ b/src/mistralai/models/libraries_documents_update_v1op.py @@ -0,0 +1,28 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .documentupdatein import DocumentUpdateIn, DocumentUpdateInTypedDict +from mistralai.types import BaseModel +from mistralai.utils import FieldMetadata, PathParamMetadata, RequestMetadata +from typing_extensions import Annotated, TypedDict + + +class LibrariesDocumentsUpdateV1RequestTypedDict(TypedDict): + library_id: str + document_id: str + document_update_in: DocumentUpdateInTypedDict + + +class LibrariesDocumentsUpdateV1Request(BaseModel): + library_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] + + document_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] + + document_update_in: Annotated[ + DocumentUpdateIn, + FieldMetadata(request=RequestMetadata(media_type="application/json")), + ] diff --git a/src/mistralai/models/libraries_documents_upload_v1op.py b/src/mistralai/models/libraries_documents_upload_v1op.py new file mode 100644 index 00000000..51f536cc --- /dev/null +++ b/src/mistralai/models/libraries_documents_upload_v1op.py @@ -0,0 +1,56 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .file import File, FileTypedDict +from mistralai.types import BaseModel +from mistralai.utils import ( + FieldMetadata, + MultipartFormMetadata, + PathParamMetadata, + RequestMetadata, +) +from typing_extensions import Annotated, TypedDict + + +class LibrariesDocumentsUploadV1DocumentUploadTypedDict(TypedDict): + file: FileTypedDict + r"""The File object (not file name) to be uploaded. + To upload a file and specify a custom file name you should format your request as such: + ```bash + file=@path/to/your/file.jsonl;filename=custom_name.jsonl + ``` + Otherwise, you can just keep the original file name: + ```bash + file=@path/to/your/file.jsonl + ``` + """ + + +class LibrariesDocumentsUploadV1DocumentUpload(BaseModel): + file: Annotated[File, FieldMetadata(multipart=MultipartFormMetadata(file=True))] + r"""The File object (not file name) to be uploaded. + To upload a file and specify a custom file name you should format your request as such: + ```bash + file=@path/to/your/file.jsonl;filename=custom_name.jsonl + ``` + Otherwise, you can just keep the original file name: + ```bash + file=@path/to/your/file.jsonl + ``` + """ + + +class LibrariesDocumentsUploadV1RequestTypedDict(TypedDict): + library_id: str + request_body: LibrariesDocumentsUploadV1DocumentUploadTypedDict + + +class LibrariesDocumentsUploadV1Request(BaseModel): + library_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] + + request_body: Annotated[ + LibrariesDocumentsUploadV1DocumentUpload, + FieldMetadata(request=RequestMetadata(media_type="multipart/form-data")), + ] diff --git a/src/mistralai/models/libraries_get_v1op.py b/src/mistralai/models/libraries_get_v1op.py new file mode 100644 index 00000000..b87090f6 --- /dev/null +++ b/src/mistralai/models/libraries_get_v1op.py @@ -0,0 +1,16 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.types import BaseModel +from mistralai.utils import FieldMetadata, PathParamMetadata +from typing_extensions import Annotated, TypedDict + + +class LibrariesGetV1RequestTypedDict(TypedDict): + library_id: str + + +class LibrariesGetV1Request(BaseModel): + library_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] diff --git a/src/mistralai/models/libraries_share_create_v1op.py b/src/mistralai/models/libraries_share_create_v1op.py new file mode 100644 index 00000000..a8b0e35d --- /dev/null +++ b/src/mistralai/models/libraries_share_create_v1op.py @@ -0,0 +1,22 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .sharingin import SharingIn, SharingInTypedDict +from mistralai.types import BaseModel +from mistralai.utils import FieldMetadata, PathParamMetadata, RequestMetadata +from typing_extensions import Annotated, TypedDict + + +class LibrariesShareCreateV1RequestTypedDict(TypedDict): + library_id: str + sharing_in: SharingInTypedDict + + +class LibrariesShareCreateV1Request(BaseModel): + library_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] + + sharing_in: Annotated[ + SharingIn, FieldMetadata(request=RequestMetadata(media_type="application/json")) + ] diff --git a/src/mistralai/models/libraries_share_delete_v1op.py b/src/mistralai/models/libraries_share_delete_v1op.py new file mode 100644 index 00000000..e29d556a --- /dev/null +++ b/src/mistralai/models/libraries_share_delete_v1op.py @@ -0,0 +1,23 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .sharingdelete import SharingDelete, SharingDeleteTypedDict +from mistralai.types import BaseModel +from mistralai.utils import FieldMetadata, PathParamMetadata, RequestMetadata +from typing_extensions import Annotated, TypedDict + + +class LibrariesShareDeleteV1RequestTypedDict(TypedDict): + library_id: str + sharing_delete: SharingDeleteTypedDict + + +class LibrariesShareDeleteV1Request(BaseModel): + library_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] + + sharing_delete: Annotated[ + SharingDelete, + FieldMetadata(request=RequestMetadata(media_type="application/json")), + ] diff --git a/src/mistralai/models/libraries_share_list_v1op.py b/src/mistralai/models/libraries_share_list_v1op.py new file mode 100644 index 00000000..b276d756 --- /dev/null +++ b/src/mistralai/models/libraries_share_list_v1op.py @@ -0,0 +1,16 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.types import BaseModel +from mistralai.utils import FieldMetadata, PathParamMetadata +from typing_extensions import Annotated, TypedDict + + +class LibrariesShareListV1RequestTypedDict(TypedDict): + library_id: str + + +class LibrariesShareListV1Request(BaseModel): + library_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] diff --git a/src/mistralai/models/libraries_update_v1op.py b/src/mistralai/models/libraries_update_v1op.py new file mode 100644 index 00000000..c93895d9 --- /dev/null +++ b/src/mistralai/models/libraries_update_v1op.py @@ -0,0 +1,23 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .libraryinupdate import LibraryInUpdate, LibraryInUpdateTypedDict +from mistralai.types import BaseModel +from mistralai.utils import FieldMetadata, PathParamMetadata, RequestMetadata +from typing_extensions import Annotated, TypedDict + + +class LibrariesUpdateV1RequestTypedDict(TypedDict): + library_id: str + library_in_update: LibraryInUpdateTypedDict + + +class LibrariesUpdateV1Request(BaseModel): + library_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] + + library_in_update: Annotated[ + LibraryInUpdate, + FieldMetadata(request=RequestMetadata(media_type="application/json")), + ] diff --git a/src/mistralai/models/libraryin.py b/src/mistralai/models/libraryin.py new file mode 100644 index 00000000..872d494d --- /dev/null +++ b/src/mistralai/models/libraryin.py @@ -0,0 +1,50 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +from pydantic import model_serializer +from typing_extensions import NotRequired, TypedDict + + +class LibraryInTypedDict(TypedDict): + name: str + description: NotRequired[Nullable[str]] + chunk_size: NotRequired[Nullable[int]] + + +class LibraryIn(BaseModel): + name: str + + description: OptionalNullable[str] = UNSET + + chunk_size: OptionalNullable[int] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["description", "chunk_size"] + nullable_fields = ["description", "chunk_size"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/models/libraryinupdate.py b/src/mistralai/models/libraryinupdate.py new file mode 100644 index 00000000..6e8ab81a --- /dev/null +++ b/src/mistralai/models/libraryinupdate.py @@ -0,0 +1,47 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +from pydantic import model_serializer +from typing_extensions import NotRequired, TypedDict + + +class LibraryInUpdateTypedDict(TypedDict): + name: NotRequired[Nullable[str]] + description: NotRequired[Nullable[str]] + + +class LibraryInUpdate(BaseModel): + name: OptionalNullable[str] = UNSET + + description: OptionalNullable[str] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["name", "description"] + nullable_fields = ["name", "description"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/models/libraryout.py b/src/mistralai/models/libraryout.py new file mode 100644 index 00000000..6a13130d --- /dev/null +++ b/src/mistralai/models/libraryout.py @@ -0,0 +1,107 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from datetime import datetime +from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +from pydantic import model_serializer +from typing_extensions import NotRequired, TypedDict + + +class LibraryOutTypedDict(TypedDict): + id: str + name: str + created_at: datetime + updated_at: datetime + owner_id: str + owner_type: str + total_size: int + nb_documents: int + chunk_size: Nullable[int] + emoji: NotRequired[Nullable[str]] + description: NotRequired[Nullable[str]] + generated_name: NotRequired[Nullable[str]] + generated_description: NotRequired[Nullable[str]] + explicit_user_members_count: NotRequired[Nullable[int]] + explicit_workspace_members_count: NotRequired[Nullable[int]] + org_sharing_role: NotRequired[Nullable[str]] + + +class LibraryOut(BaseModel): + id: str + + name: str + + created_at: datetime + + updated_at: datetime + + owner_id: str + + owner_type: str + + total_size: int + + nb_documents: int + + chunk_size: Nullable[int] + + emoji: OptionalNullable[str] = UNSET + + description: OptionalNullable[str] = UNSET + + generated_name: OptionalNullable[str] = UNSET + + generated_description: OptionalNullable[str] = UNSET + + explicit_user_members_count: OptionalNullable[int] = UNSET + + explicit_workspace_members_count: OptionalNullable[int] = UNSET + + org_sharing_role: OptionalNullable[str] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = [ + "emoji", + "description", + "generated_name", + "generated_description", + "explicit_user_members_count", + "explicit_workspace_members_count", + "org_sharing_role", + ] + nullable_fields = [ + "chunk_size", + "emoji", + "description", + "generated_name", + "generated_description", + "explicit_user_members_count", + "explicit_workspace_members_count", + "org_sharing_role", + ] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/models/listdocumentout.py b/src/mistralai/models/listdocumentout.py new file mode 100644 index 00000000..9d39e087 --- /dev/null +++ b/src/mistralai/models/listdocumentout.py @@ -0,0 +1,19 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .documentout import DocumentOut, DocumentOutTypedDict +from .paginationinfo import PaginationInfo, PaginationInfoTypedDict +from mistralai.types import BaseModel +from typing import List +from typing_extensions import TypedDict + + +class ListDocumentOutTypedDict(TypedDict): + pagination: PaginationInfoTypedDict + data: List[DocumentOutTypedDict] + + +class ListDocumentOut(BaseModel): + pagination: PaginationInfo + + data: List[DocumentOut] diff --git a/src/mistralai/models/listlibraryout.py b/src/mistralai/models/listlibraryout.py new file mode 100644 index 00000000..1e647fe1 --- /dev/null +++ b/src/mistralai/models/listlibraryout.py @@ -0,0 +1,15 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .libraryout import LibraryOut, LibraryOutTypedDict +from mistralai.types import BaseModel +from typing import List +from typing_extensions import TypedDict + + +class ListLibraryOutTypedDict(TypedDict): + data: List[LibraryOutTypedDict] + + +class ListLibraryOut(BaseModel): + data: List[LibraryOut] diff --git a/src/mistralai/models/listsharingout.py b/src/mistralai/models/listsharingout.py new file mode 100644 index 00000000..38c0dbe0 --- /dev/null +++ b/src/mistralai/models/listsharingout.py @@ -0,0 +1,15 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .sharingout import SharingOut, SharingOutTypedDict +from mistralai.types import BaseModel +from typing import List +from typing_extensions import TypedDict + + +class ListSharingOutTypedDict(TypedDict): + data: List[SharingOutTypedDict] + + +class ListSharingOut(BaseModel): + data: List[SharingOut] diff --git a/src/mistralai/models/messageinputentry.py b/src/mistralai/models/messageinputentry.py index 6f1190c7..c14ad5ae 100644 --- a/src/mistralai/models/messageinputentry.py +++ b/src/mistralai/models/messageinputentry.py @@ -14,7 +14,7 @@ Object = Literal["entry"] -Type = Literal["message.input"] +MessageInputEntryType = Literal["message.input"] MessageInputEntryRole = Literal["assistant", "user"] @@ -35,10 +35,11 @@ class MessageInputEntryTypedDict(TypedDict): role: MessageInputEntryRole content: MessageInputEntryContentTypedDict object: NotRequired[Object] - type: NotRequired[Type] + type: NotRequired[MessageInputEntryType] created_at: NotRequired[datetime] completed_at: NotRequired[Nullable[datetime]] id: NotRequired[str] + prefix: NotRequired[bool] class MessageInputEntry(BaseModel): @@ -50,7 +51,7 @@ class MessageInputEntry(BaseModel): object: Optional[Object] = "entry" - type: Optional[Type] = "message.input" + type: Optional[MessageInputEntryType] = "message.input" created_at: Optional[datetime] = None @@ -58,9 +59,18 @@ class MessageInputEntry(BaseModel): id: Optional[str] = None + prefix: Optional[bool] = False + @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = ["object", "type", "created_at", "completed_at", "id"] + optional_fields = [ + "object", + "type", + "created_at", + "completed_at", + "id", + "prefix", + ] nullable_fields = ["completed_at"] null_default_fields = [] diff --git a/src/mistralai/models/paginationinfo.py b/src/mistralai/models/paginationinfo.py new file mode 100644 index 00000000..00d4f1ec --- /dev/null +++ b/src/mistralai/models/paginationinfo.py @@ -0,0 +1,25 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.types import BaseModel +from typing_extensions import TypedDict + + +class PaginationInfoTypedDict(TypedDict): + total_items: int + total_pages: int + current_page: int + page_size: int + has_more: bool + + +class PaginationInfo(BaseModel): + total_items: int + + total_pages: int + + current_page: int + + page_size: int + + has_more: bool diff --git a/src/mistralai/models/processingstatusout.py b/src/mistralai/models/processingstatusout.py new file mode 100644 index 00000000..e67bfa86 --- /dev/null +++ b/src/mistralai/models/processingstatusout.py @@ -0,0 +1,16 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.types import BaseModel +from typing_extensions import TypedDict + + +class ProcessingStatusOutTypedDict(TypedDict): + document_id: str + processing_status: str + + +class ProcessingStatusOut(BaseModel): + document_id: str + + processing_status: str diff --git a/src/mistralai/models/shareenum.py b/src/mistralai/models/shareenum.py new file mode 100644 index 00000000..c2945514 --- /dev/null +++ b/src/mistralai/models/shareenum.py @@ -0,0 +1,8 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.types import UnrecognizedStr +from typing import Literal, Union + + +ShareEnum = Union[Literal["Viewer", "Editor"], UnrecognizedStr] diff --git a/src/mistralai/models/sharingdelete.py b/src/mistralai/models/sharingdelete.py new file mode 100644 index 00000000..b9df5f9d --- /dev/null +++ b/src/mistralai/models/sharingdelete.py @@ -0,0 +1,26 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .entitytype import EntityType +from mistralai.types import BaseModel +from mistralai.utils import validate_open_enum +from pydantic.functional_validators import PlainValidator +from typing_extensions import Annotated, TypedDict + + +class SharingDeleteTypedDict(TypedDict): + org_id: str + share_with_uuid: str + r"""The id of the entity (user, workspace or organization) to share with""" + share_with_type: EntityType + r"""The type of entity, used to share a library.""" + + +class SharingDelete(BaseModel): + org_id: str + + share_with_uuid: str + r"""The id of the entity (user, workspace or organization) to share with""" + + share_with_type: Annotated[EntityType, PlainValidator(validate_open_enum(False))] + r"""The type of entity, used to share a library.""" diff --git a/src/mistralai/models/sharingin.py b/src/mistralai/models/sharingin.py new file mode 100644 index 00000000..af20fd14 --- /dev/null +++ b/src/mistralai/models/sharingin.py @@ -0,0 +1,30 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .entitytype import EntityType +from .shareenum import ShareEnum +from mistralai.types import BaseModel +from mistralai.utils import validate_open_enum +from pydantic.functional_validators import PlainValidator +from typing_extensions import Annotated, TypedDict + + +class SharingInTypedDict(TypedDict): + org_id: str + level: ShareEnum + share_with_uuid: str + r"""The id of the entity (user, workspace or organization) to share with""" + share_with_type: EntityType + r"""The type of entity, used to share a library.""" + + +class SharingIn(BaseModel): + org_id: str + + level: Annotated[ShareEnum, PlainValidator(validate_open_enum(False))] + + share_with_uuid: str + r"""The id of the entity (user, workspace or organization) to share with""" + + share_with_type: Annotated[EntityType, PlainValidator(validate_open_enum(False))] + r"""The type of entity, used to share a library.""" diff --git a/src/mistralai/models/sharingout.py b/src/mistralai/models/sharingout.py new file mode 100644 index 00000000..a78a7764 --- /dev/null +++ b/src/mistralai/models/sharingout.py @@ -0,0 +1,59 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +from pydantic import model_serializer +from typing_extensions import NotRequired, TypedDict + + +class SharingOutTypedDict(TypedDict): + library_id: str + org_id: str + role: str + share_with_type: str + share_with_uuid: str + user_id: NotRequired[Nullable[str]] + + +class SharingOut(BaseModel): + library_id: str + + org_id: str + + role: str + + share_with_type: str + + share_with_uuid: str + + user_id: OptionalNullable[str] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["user_id"] + nullable_fields = ["user_id"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/models/ssetypes.py b/src/mistralai/models/ssetypes.py index 4d15b4f1..796f0327 100644 --- a/src/mistralai/models/ssetypes.py +++ b/src/mistralai/models/ssetypes.py @@ -10,6 +10,7 @@ "conversation.response.error", "message.output.delta", "tool.execution.started", + "tool.execution.delta", "tool.execution.done", "agent.handoff.started", "agent.handoff.done", diff --git a/src/mistralai/models/toolexecutiondeltaevent.py b/src/mistralai/models/toolexecutiondeltaevent.py new file mode 100644 index 00000000..99b97e68 --- /dev/null +++ b/src/mistralai/models/toolexecutiondeltaevent.py @@ -0,0 +1,34 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .builtinconnectors import BuiltInConnectors +from datetime import datetime +from mistralai.types import BaseModel +from typing import Literal, Optional +from typing_extensions import NotRequired, TypedDict + + +ToolExecutionDeltaEventType = Literal["tool.execution.delta"] + + +class ToolExecutionDeltaEventTypedDict(TypedDict): + id: str + name: BuiltInConnectors + arguments: str + type: NotRequired[ToolExecutionDeltaEventType] + created_at: NotRequired[datetime] + output_index: NotRequired[int] + + +class ToolExecutionDeltaEvent(BaseModel): + id: str + + name: BuiltInConnectors + + arguments: str + + type: Optional[ToolExecutionDeltaEventType] = "tool.execution.delta" + + created_at: Optional[datetime] = None + + output_index: Optional[int] = 0 diff --git a/src/mistralai/models/toolexecutionentry.py b/src/mistralai/models/toolexecutionentry.py index 533752ad..db503ea8 100644 --- a/src/mistralai/models/toolexecutionentry.py +++ b/src/mistralai/models/toolexecutionentry.py @@ -16,6 +16,7 @@ class ToolExecutionEntryTypedDict(TypedDict): name: BuiltInConnectors + arguments: str object: NotRequired[ToolExecutionEntryObject] type: NotRequired[ToolExecutionEntryType] created_at: NotRequired[datetime] @@ -27,6 +28,8 @@ class ToolExecutionEntryTypedDict(TypedDict): class ToolExecutionEntry(BaseModel): name: BuiltInConnectors + arguments: str + object: Optional[ToolExecutionEntryObject] = "entry" type: Optional[ToolExecutionEntryType] = "tool.execution" diff --git a/src/mistralai/models/toolexecutionstartedevent.py b/src/mistralai/models/toolexecutionstartedevent.py index e140665e..7a54058f 100644 --- a/src/mistralai/models/toolexecutionstartedevent.py +++ b/src/mistralai/models/toolexecutionstartedevent.py @@ -14,6 +14,7 @@ class ToolExecutionStartedEventTypedDict(TypedDict): id: str name: BuiltInConnectors + arguments: str type: NotRequired[ToolExecutionStartedEventType] created_at: NotRequired[datetime] output_index: NotRequired[int] @@ -24,6 +25,8 @@ class ToolExecutionStartedEvent(BaseModel): name: BuiltInConnectors + arguments: str + type: Optional[ToolExecutionStartedEventType] = "tool.execution.started" created_at: Optional[datetime] = None diff --git a/src/mistralai/models/toolreferencechunk.py b/src/mistralai/models/toolreferencechunk.py index c052340d..e50b8451 100644 --- a/src/mistralai/models/toolreferencechunk.py +++ b/src/mistralai/models/toolreferencechunk.py @@ -16,7 +16,8 @@ class ToolReferenceChunkTypedDict(TypedDict): title: str type: NotRequired[ToolReferenceChunkType] url: NotRequired[Nullable[str]] - source: NotRequired[Nullable[str]] + favicon: NotRequired[Nullable[str]] + description: NotRequired[Nullable[str]] class ToolReferenceChunk(BaseModel): @@ -28,12 +29,14 @@ class ToolReferenceChunk(BaseModel): url: OptionalNullable[str] = UNSET - source: OptionalNullable[str] = UNSET + favicon: OptionalNullable[str] = UNSET + + description: OptionalNullable[str] = UNSET @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = ["type", "url", "source"] - nullable_fields = ["url", "source"] + optional_fields = ["type", "url", "favicon", "description"] + nullable_fields = ["url", "favicon", "description"] null_default_fields = [] serialized = handler(self) From 7176907c49e79aa1c9a5e7e5de6238839e2e2d34 Mon Sep 17 00:00:00 2001 From: fnareoh Date: Mon, 7 Jul 2025 15:26:29 +0200 Subject: [PATCH 145/223] add example for libraries --- examples/async_libraries.py | 57 ++++++++++++++++++++++++++++++++ examples/fixtures/lorem_ipsum.md | 11 ++++++ examples/libraries.py | 56 +++++++++++++++++++++++++++++++ 3 files changed, 124 insertions(+) create mode 100644 examples/async_libraries.py create mode 100644 examples/fixtures/lorem_ipsum.md create mode 100644 examples/libraries.py diff --git a/examples/async_libraries.py b/examples/async_libraries.py new file mode 100644 index 00000000..b2f9d4c4 --- /dev/null +++ b/examples/async_libraries.py @@ -0,0 +1,57 @@ +#!/usr/bin/env python + +import os +import asyncio + +from mistralai import Mistral +from mistralai.models import File + + +async def main(): + api_key = os.environ["MISTRAL_API_KEY"] + + client = Mistral(api_key=api_key) + + # create new library + library = await client.beta.libraries.create_async(name="My API Library") + print(library) + + # Upload a new file + uploaded_file = await client.beta.libraries.documents.upload_async( + library_id=library.id, + file=File( + file_name="lorem_ipsum.md", + content=open("examples/fixtures/lorem_ipsum.md", "rb").read(), + ) + ) + print(uploaded_file) + + # List files + files = (await client.beta.libraries.documents.list_async(library_id=library.id)).data + print(files) + + # Retrieve a file + retrieved_file = await client.beta.libraries.documents.get_async(library_id=library.id, document_id=uploaded_file.id) + print(retrieved_file) + + # Retrieve a file content + retrieved_file_content = await client.beta.libraries.documents.text_content_async(library_id=library.id, document_id=uploaded_file.id) + print(retrieved_file_content) + + + # Rename a file + renamed_file = await client.beta.libraries.documents.update_async(library_id=library.id, document_id=uploaded_file.id, name="renamed_file.md") + print(renamed_file) + + # Delete a file + deleted_file = await client.beta.libraries.documents.delete_async(library_id=library.id, document_id=uploaded_file.id) + print(deleted_file) + + # Delete a library + deleted_library = await client.beta.libraries.delete_async(library_id=library.id) + print(deleted_library) + + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/examples/fixtures/lorem_ipsum.md b/examples/fixtures/lorem_ipsum.md new file mode 100644 index 00000000..1b36f18c --- /dev/null +++ b/examples/fixtures/lorem_ipsum.md @@ -0,0 +1,11 @@ +# Lorem ipsum + +Lorem ipsum dolor sit amet, consectetur adipiscing elit. Fusce interdum mi velit, ac tincidunt erat ullamcorper in. Cras imperdiet orci sit amet tincidunt feugiat. Vivamus imperdiet purus a elit varius interdum ultrices ac dui. Curabitur et bibendum ex. Maecenas diam nulla, tempus ultrices tincidunt non, blandit a neque. Vivamus consequat metus aliquet finibus pretium. Phasellus arcu magna, lacinia eget hendrerit eget, hendrerit vitae ipsum. + +Ut sodales enim purus, quis laoreet libero fermentum et. Sed id massa id turpis porttitor tristique. Cras sed eleifend ante. In hac habitasse platea dictumst. Mauris rutrum sodales varius. Vestibulum imperdiet urna ac aliquet auctor. Nulla ultrices nulla mauris, sed sodales lacus vestibulum sit amet. Phasellus mollis erat quis neque hendrerit accumsan. Curabitur imperdiet sapien diam, sit amet finibus turpis placerat at. Proin in euismod lorem, eget bibendum quam. + +Aenean sit amet elit ornare, laoreet elit eget, aliquet nunc. Integer quis fermentum metus. Donec venenatis eget quam gravida mollis. Sed a suscipit libero, eget aliquet nibh. Maecenas tempus est nisl, vel viverra metus pharetra faucibus. Cras sodales dignissim mi quis pellentesque. In non hendrerit enim. Aliquam pretium turpis nec tortor imperdiet, id cursus enim auctor. Praesent aliquam mollis quam, nec luctus orci finibus quis. Vivamus in nibh faucibus, efficitur libero ut, feugiat elit. Quisque volutpat velit at porttitor feugiat. Vivamus porta sapien eros, non ultricies arcu suscipit ac. Etiam elit lorem, condimentum convallis semper at, congue eu quam. In ultrices sed nunc ac rhoncus. Aenean ultrices lobortis ex, at malesuada libero. + +Etiam accumsan quam sit amet interdum bibendum. Pellentesque rutrum tellus vel nibh dapibus mollis. Vestibulum a dolor semper, semper turpis varius, lacinia tellus. Pellentesque placerat neque vitae elit porttitor tincidunt. Donec id pulvinar felis, eu aliquam ipsum. Praesent a sapien et justo gravida condimentum. Vivamus vitae posuere nisi, quis faucibus justo. Pellentesque eget metus mauris. Quisque in lobortis nisi. Suspendisse nec risus sagittis, tristique nulla sed, faucibus massa. Cras ullamcorper turpis nec sagittis mollis. Pellentesque mattis consectetur enim, at molestie justo molestie sit amet. + +In hac habitasse platea dictumst. Aenean sed turpis tellus. Proin tristique dignissim neque, quis interdum mauris aliquam non. Aenean vitae enim eu nulla consectetur vestibulum. Aliquam lacus massa, venenatis a consectetur nec, vulputate vitae urna. In ultricies pulvinar leo, vel tempus ante scelerisque a. Vivamus dictum sodales orci. Aliquam quis neque leo. Donec eu mauris accumsan, pretium magna quis, feugiat purus. Phasellus malesuada purus nunc, condimentum mollis ipsum sollicitudin vitae. Nam tempus sapien non nulla varius viverra. Cras vel felis volutpat, bibendum neque a, porttitor mauris. Ut eu dapibus justo. \ No newline at end of file diff --git a/examples/libraries.py b/examples/libraries.py new file mode 100644 index 00000000..88436540 --- /dev/null +++ b/examples/libraries.py @@ -0,0 +1,56 @@ +#!/usr/bin/env python + +import os + +from mistralai import Mistral +from mistralai.models import File + + +def main(): + api_key = os.environ["MISTRAL_API_KEY"] + + client = Mistral(api_key=api_key) + + # create new library + library = client.beta.libraries.create(name="My API Library") + print(library) + + # Upload a new file + uploaded_file = client.beta.libraries.documents.upload( + library_id=library.id, + file=File( + file_name="lorem_ipsum.md", + content=open("examples/fixtures/lorem_ipsum.md", "rb").read(), + ) + ) + print(uploaded_file) + + # List files + files = client.beta.libraries.documents.list(library_id=library.id).data + print(files) + + # Retrieve a file + retrieved_file = client.beta.libraries.documents.get(library_id=library.id, document_id=uploaded_file.id) + print(retrieved_file) + + # Retrieve a file content + retrieved_file_content = client.beta.libraries.documents.text_content(library_id=library.id, document_id=uploaded_file.id) + print(retrieved_file_content) + + + # Rename a file + renamed_file = client.beta.libraries.documents.update(library_id=library.id, document_id=uploaded_file.id, name="renamed_file.md") + print(renamed_file) + + # Delete a file + deleted_file = client.beta.libraries.documents.delete(library_id=library.id, document_id=uploaded_file.id) + print(deleted_file) + + # Delete a library + deleted_library = client.beta.libraries.delete(library_id=library.id) + print(deleted_library) + + + +if __name__ == "__main__": + main() From d4c691dd7885630cd383a536e9f156cd71597aaf Mon Sep 17 00:00:00 2001 From: Alexandre Menasria <47357713+amenasria@users.noreply.github.com> Date: Wed, 23 Jul 2025 21:10:26 +0200 Subject: [PATCH 146/223] =?UTF-8?q?chore:=20=F0=9F=90=9D=20Update=20SDK=20?= =?UTF-8?q?-=20Generate=20MISTRALAI=20MISTRALAI-SDK=201.9.3=20(#251)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * ci: regenerated with OpenAPI Doc , Speakeasy CLI 1.568.2 * fix: add examples * chore: folder --------- Co-authored-by: speakeasybot Co-authored-by: jean-malo --- .speakeasy/gen.lock | 76 ++- .speakeasy/gen.yaml | 2 +- .speakeasy/workflow.lock | 12 +- README.md | 8 + RELEASES.md | 12 +- docs/models/audiochunk.md | 9 + docs/models/audiochunktype.md | 8 + docs/models/audiotranscriptionrequest.md | 15 + .../models/audiotranscriptionrequeststream.md | 15 + docs/models/basemodelcard.md | 2 +- docs/models/basemodelcardtype.md | 8 + docs/models/batchjobin.md | 3 +- docs/models/batchjobout.md | 3 +- docs/models/classifierftmodelout.md | 2 + docs/models/completionargs.md | 2 +- docs/models/completionftmodelout.md | 2 + docs/models/contentchunk.md | 12 + docs/models/fileschema.md | 4 +- .../jobsapiroutesbatchgetbatchjobsrequest.md | 1 + docs/models/metadata.md | 7 + docs/models/retrievefileout.md | 2 + docs/models/thinkchunk.md | 10 + docs/models/thinkchunktype.md | 8 + docs/models/thinking.md | 17 + docs/models/timestampgranularity.md | 8 + docs/models/toolcall.md | 13 +- docs/models/transcriptionresponse.md | 13 + docs/models/transcriptionsegmentchunk.md | 12 + docs/models/transcriptionstreamdone.md | 14 + docs/models/transcriptionstreamdonetype.md | 8 + docs/models/transcriptionstreamevents.md | 9 + docs/models/transcriptionstreameventsdata.md | 29 ++ docs/models/transcriptionstreameventtypes.md | 11 + docs/models/transcriptionstreamlanguage.md | 10 + .../models/transcriptionstreamlanguagetype.md | 8 + .../models/transcriptionstreamsegmentdelta.md | 12 + .../transcriptionstreamsegmentdeltatype.md | 8 + docs/models/transcriptionstreamtextdelta.md | 10 + .../transcriptionstreamtextdeltatype.md | 8 + docs/models/type.md | 6 +- docs/models/uploadfileout.md | 4 +- docs/models/usageinfo.md | 12 +- docs/sdks/audio/README.md | 6 + docs/sdks/mistraljobs/README.md | 6 +- docs/sdks/transcriptions/README.md | 103 ++++ examples/fixtures/bcn_weather.mp3 | Bin 0 -> 353280 bytes .../agents}/async_agents_no_streaming.py | 0 .../agents}/async_conversation_agent.py | 0 .../agents}/async_conversation_run.py | 0 .../agents}/async_conversation_run_mcp.py | 0 .../async_conversation_run_mcp_remote.py | 0 .../async_conversation_run_mcp_remote_auth.py | 0 .../agents}/async_conversation_run_stream.py | 0 examples/mistral/audio/chat_base64.py | 31 ++ examples/mistral/audio/chat_no_streaming.py | 30 ++ examples/mistral/audio/chat_streaming.py | 37 ++ examples/mistral/audio/transcription_async.py | 22 + .../mistral/audio/transcription_segments.py | 23 + .../audio/transcription_segments_stream.py | 23 + .../audio/transcription_stream_async.py | 23 + examples/mistral/audio/transcription_url.py | 21 + .../chat}/async_chat_no_streaming.py | 0 .../async_chat_with_image_no_streaming.py | 0 .../chat}/async_chat_with_streaming.py | 0 .../chat}/async_structured_outputs.py | 0 .../{ => mistral/chat}/chat_no_streaming.py | 0 .../{ => mistral/chat}/chat_prediction.py | 0 .../{ => mistral/chat}/chat_with_streaming.py | 0 .../chat}/chatbot_with_streaming.py | 0 .../chat}/completion_with_streaming.py | 0 .../{ => mistral/chat}/function_calling.py | 0 examples/{ => mistral/chat}/json_format.py | 0 .../{ => mistral/chat}/structured_outputs.py | 0 .../structured_outputs_with_json_schema.py | 0 .../chat}/structured_outputs_with_pydantic.py | 0 .../classifier}/async_classifier.py | 0 .../embeddings}/async_embeddings.py | 0 .../{ => mistral/embeddings}/embeddings.py | 0 .../fim}/async_code_completion.py | 0 examples/{ => mistral/fim}/code_completion.py | 0 examples/{ => mistral/jobs}/async_files.py | 0 examples/{ => mistral/jobs}/async_jobs.py | 0 .../{ => mistral/jobs}/async_jobs_chat.py | 0 examples/{ => mistral/jobs}/dry_run_job.py | 0 examples/{ => mistral/jobs}/files.py | 0 examples/{ => mistral/jobs}/jobs.py | 0 .../libraries}/async_libraries.py | 0 examples/{ => mistral/libraries}/libraries.py | 0 .../{ => mistral}/mcp_servers/sse_server.py | 0 .../{ => mistral}/mcp_servers/stdio_server.py | 0 .../{ => mistral/models}/async_list_models.py | 0 examples/{ => mistral/models}/list_models.py | 0 .../ocr}/ocr_process_from_file.py | 0 .../{ => mistral/ocr}/ocr_process_from_url.py | 0 pyproject.toml | 2 +- scripts/run_examples.sh | 19 +- src/mistralai/_version.py | 4 +- src/mistralai/audio.py | 18 + src/mistralai/mistral_jobs.py | 16 +- src/mistralai/models/__init__.py | 133 ++++- src/mistralai/models/audiochunk.py | 20 + .../models/audiotranscriptionrequest.py | 97 ++++ .../models/audiotranscriptionrequeststream.py | 97 ++++ src/mistralai/models/basemodelcard.py | 6 +- src/mistralai/models/batchjobin.py | 11 +- src/mistralai/models/batchjobout.py | 13 +- src/mistralai/models/classifierftmodelout.py | 6 + src/mistralai/models/completionargs.py | 5 +- src/mistralai/models/completionftmodelout.py | 6 + src/mistralai/models/contentchunk.py | 6 + src/mistralai/models/fileschema.py | 10 +- .../jobs_api_routes_batch_get_batch_jobsop.py | 9 +- src/mistralai/models/retrievefileout.py | 10 +- src/mistralai/models/thinkchunk.py | 35 ++ src/mistralai/models/timestampgranularity.py | 7 + src/mistralai/models/toolcall.py | 44 +- src/mistralai/models/transcriptionresponse.py | 79 +++ .../models/transcriptionsegmentchunk.py | 41 ++ .../models/transcriptionstreamdone.py | 85 ++++ .../models/transcriptionstreamevents.py | 58 +++ .../models/transcriptionstreameventtypes.py | 12 + .../models/transcriptionstreamlanguage.py | 35 ++ .../models/transcriptionstreamsegmentdelta.py | 41 ++ .../models/transcriptionstreamtextdelta.py | 35 ++ src/mistralai/models/uploadfileout.py | 10 +- src/mistralai/models/usageinfo.py | 73 ++- src/mistralai/sdk.py | 3 + src/mistralai/transcriptions.py | 480 ++++++++++++++++++ 128 files changed, 2201 insertions(+), 85 deletions(-) create mode 100644 docs/models/audiochunk.md create mode 100644 docs/models/audiochunktype.md create mode 100644 docs/models/audiotranscriptionrequest.md create mode 100644 docs/models/audiotranscriptionrequeststream.md create mode 100644 docs/models/basemodelcardtype.md create mode 100644 docs/models/metadata.md create mode 100644 docs/models/thinkchunk.md create mode 100644 docs/models/thinkchunktype.md create mode 100644 docs/models/thinking.md create mode 100644 docs/models/timestampgranularity.md create mode 100644 docs/models/transcriptionresponse.md create mode 100644 docs/models/transcriptionsegmentchunk.md create mode 100644 docs/models/transcriptionstreamdone.md create mode 100644 docs/models/transcriptionstreamdonetype.md create mode 100644 docs/models/transcriptionstreamevents.md create mode 100644 docs/models/transcriptionstreameventsdata.md create mode 100644 docs/models/transcriptionstreameventtypes.md create mode 100644 docs/models/transcriptionstreamlanguage.md create mode 100644 docs/models/transcriptionstreamlanguagetype.md create mode 100644 docs/models/transcriptionstreamsegmentdelta.md create mode 100644 docs/models/transcriptionstreamsegmentdeltatype.md create mode 100644 docs/models/transcriptionstreamtextdelta.md create mode 100644 docs/models/transcriptionstreamtextdeltatype.md create mode 100644 docs/sdks/audio/README.md create mode 100644 docs/sdks/transcriptions/README.md create mode 100644 examples/fixtures/bcn_weather.mp3 rename examples/{ => mistral/agents}/async_agents_no_streaming.py (100%) rename examples/{ => mistral/agents}/async_conversation_agent.py (100%) rename examples/{ => mistral/agents}/async_conversation_run.py (100%) rename examples/{ => mistral/agents}/async_conversation_run_mcp.py (100%) rename examples/{ => mistral/agents}/async_conversation_run_mcp_remote.py (100%) rename examples/{ => mistral/agents}/async_conversation_run_mcp_remote_auth.py (100%) rename examples/{ => mistral/agents}/async_conversation_run_stream.py (100%) create mode 100755 examples/mistral/audio/chat_base64.py create mode 100755 examples/mistral/audio/chat_no_streaming.py create mode 100755 examples/mistral/audio/chat_streaming.py create mode 100644 examples/mistral/audio/transcription_async.py create mode 100644 examples/mistral/audio/transcription_segments.py create mode 100644 examples/mistral/audio/transcription_segments_stream.py create mode 100644 examples/mistral/audio/transcription_stream_async.py create mode 100644 examples/mistral/audio/transcription_url.py rename examples/{ => mistral/chat}/async_chat_no_streaming.py (100%) rename examples/{ => mistral/chat}/async_chat_with_image_no_streaming.py (100%) rename examples/{ => mistral/chat}/async_chat_with_streaming.py (100%) rename examples/{ => mistral/chat}/async_structured_outputs.py (100%) rename examples/{ => mistral/chat}/chat_no_streaming.py (100%) rename examples/{ => mistral/chat}/chat_prediction.py (100%) rename examples/{ => mistral/chat}/chat_with_streaming.py (100%) rename examples/{ => mistral/chat}/chatbot_with_streaming.py (100%) rename examples/{ => mistral/chat}/completion_with_streaming.py (100%) rename examples/{ => mistral/chat}/function_calling.py (100%) rename examples/{ => mistral/chat}/json_format.py (100%) rename examples/{ => mistral/chat}/structured_outputs.py (100%) rename examples/{ => mistral/chat}/structured_outputs_with_json_schema.py (100%) rename examples/{ => mistral/chat}/structured_outputs_with_pydantic.py (100%) rename examples/{ => mistral/classifier}/async_classifier.py (100%) rename examples/{ => mistral/embeddings}/async_embeddings.py (100%) rename examples/{ => mistral/embeddings}/embeddings.py (100%) rename examples/{ => mistral/fim}/async_code_completion.py (100%) rename examples/{ => mistral/fim}/code_completion.py (100%) rename examples/{ => mistral/jobs}/async_files.py (100%) rename examples/{ => mistral/jobs}/async_jobs.py (100%) rename examples/{ => mistral/jobs}/async_jobs_chat.py (100%) rename examples/{ => mistral/jobs}/dry_run_job.py (100%) rename examples/{ => mistral/jobs}/files.py (100%) rename examples/{ => mistral/jobs}/jobs.py (100%) rename examples/{ => mistral/libraries}/async_libraries.py (100%) rename examples/{ => mistral/libraries}/libraries.py (100%) rename examples/{ => mistral}/mcp_servers/sse_server.py (100%) rename examples/{ => mistral}/mcp_servers/stdio_server.py (100%) rename examples/{ => mistral/models}/async_list_models.py (100%) rename examples/{ => mistral/models}/list_models.py (100%) rename examples/{ => mistral/ocr}/ocr_process_from_file.py (100%) rename examples/{ => mistral/ocr}/ocr_process_from_url.py (100%) create mode 100644 src/mistralai/audio.py create mode 100644 src/mistralai/models/audiochunk.py create mode 100644 src/mistralai/models/audiotranscriptionrequest.py create mode 100644 src/mistralai/models/audiotranscriptionrequeststream.py create mode 100644 src/mistralai/models/thinkchunk.py create mode 100644 src/mistralai/models/timestampgranularity.py create mode 100644 src/mistralai/models/transcriptionresponse.py create mode 100644 src/mistralai/models/transcriptionsegmentchunk.py create mode 100644 src/mistralai/models/transcriptionstreamdone.py create mode 100644 src/mistralai/models/transcriptionstreamevents.py create mode 100644 src/mistralai/models/transcriptionstreameventtypes.py create mode 100644 src/mistralai/models/transcriptionstreamlanguage.py create mode 100644 src/mistralai/models/transcriptionstreamsegmentdelta.py create mode 100644 src/mistralai/models/transcriptionstreamtextdelta.py create mode 100644 src/mistralai/transcriptions.py diff --git a/.speakeasy/gen.lock b/.speakeasy/gen.lock index 6cf27f7a..73686adb 100644 --- a/.speakeasy/gen.lock +++ b/.speakeasy/gen.lock @@ -1,18 +1,19 @@ lockVersion: 2.0.0 id: 2d045ec7-2ebb-4f4d-ad25-40953b132161 management: - docChecksum: 82cf79b2dee6811d91e2912113c21d3a + docChecksum: c33c788946fa446bfcf90b60f68abde9 docVersion: 1.0.0 speakeasyVersion: 1.568.2 generationVersion: 2.634.2 - releaseVersion: 1.9.2 - configChecksum: 1ca921f44508650d65ccf46783910ff3 + releaseVersion: 1.9.3 + configChecksum: 0f65a9bdd8df5ae03eaaaea3ab055bf1 repoURL: https://github.com/mistralai/client-python.git installationURL: https://github.com/mistralai/client-python.git published: true features: python: additionalDependencies: 1.0.0 + additionalProperties: 1.0.1 constsAndDefaults: 1.0.5 core: 5.19.3 customCodeRegions: 0.1.1 @@ -89,7 +90,12 @@ generatedFiles: - docs/models/assistantmessage.md - docs/models/assistantmessagecontent.md - docs/models/assistantmessagerole.md + - docs/models/audiochunk.md + - docs/models/audiochunktype.md + - docs/models/audiotranscriptionrequest.md + - docs/models/audiotranscriptionrequeststream.md - docs/models/basemodelcard.md + - docs/models/basemodelcardtype.md - docs/models/batcherror.md - docs/models/batchjobin.md - docs/models/batchjobout.md @@ -320,6 +326,7 @@ generatedFiles: - docs/models/messageoutputeventrole.md - docs/models/messageoutputeventtype.md - docs/models/messages.md + - docs/models/metadata.md - docs/models/metricout.md - docs/models/mistralpromptmode.md - docs/models/modelcapabilities.md @@ -375,6 +382,10 @@ generatedFiles: - docs/models/systemmessagecontent.md - docs/models/textchunk.md - docs/models/textchunktype.md + - docs/models/thinkchunk.md + - docs/models/thinkchunktype.md + - docs/models/thinking.md + - docs/models/timestampgranularity.md - docs/models/tool.md - docs/models/toolcall.md - docs/models/toolchoice.md @@ -398,6 +409,19 @@ generatedFiles: - docs/models/tools.md - docs/models/tooltypes.md - docs/models/trainingfile.md + - docs/models/transcriptionresponse.md + - docs/models/transcriptionsegmentchunk.md + - docs/models/transcriptionstreamdone.md + - docs/models/transcriptionstreamdonetype.md + - docs/models/transcriptionstreamevents.md + - docs/models/transcriptionstreameventsdata.md + - docs/models/transcriptionstreameventtypes.md + - docs/models/transcriptionstreamlanguage.md + - docs/models/transcriptionstreamlanguagetype.md + - docs/models/transcriptionstreamsegmentdelta.md + - docs/models/transcriptionstreamsegmentdeltatype.md + - docs/models/transcriptionstreamtextdelta.md + - docs/models/transcriptionstreamtextdeltatype.md - docs/models/two.md - docs/models/type.md - docs/models/unarchiveftmodelout.md @@ -420,6 +444,7 @@ generatedFiles: - docs/models/websearchtooltype.md - docs/sdks/accesses/README.md - docs/sdks/agents/README.md + - docs/sdks/audio/README.md - docs/sdks/batch/README.md - docs/sdks/beta/README.md - docs/sdks/chat/README.md @@ -437,6 +462,7 @@ generatedFiles: - docs/sdks/mistraljobs/README.md - docs/sdks/models/README.md - docs/sdks/ocr/README.md + - docs/sdks/transcriptions/README.md - poetry.toml - py.typed - scripts/prepare_readme.py @@ -448,6 +474,7 @@ generatedFiles: - src/mistralai/_version.py - src/mistralai/accesses.py - src/mistralai/agents.py + - src/mistralai/audio.py - src/mistralai/basesdk.py - src/mistralai/batch.py - src/mistralai/beta.py @@ -489,6 +516,9 @@ generatedFiles: - src/mistralai/models/apiendpoint.py - src/mistralai/models/archiveftmodelout.py - src/mistralai/models/assistantmessage.py + - src/mistralai/models/audiochunk.py + - src/mistralai/models/audiotranscriptionrequest.py + - src/mistralai/models/audiotranscriptionrequeststream.py - src/mistralai/models/basemodelcard.py - src/mistralai/models/batcherror.py - src/mistralai/models/batchjobin.py @@ -668,6 +698,8 @@ generatedFiles: - src/mistralai/models/ssetypes.py - src/mistralai/models/systemmessage.py - src/mistralai/models/textchunk.py + - src/mistralai/models/thinkchunk.py + - src/mistralai/models/timestampgranularity.py - src/mistralai/models/tool.py - src/mistralai/models/toolcall.py - src/mistralai/models/toolchoice.py @@ -681,6 +713,14 @@ generatedFiles: - src/mistralai/models/toolreferencechunk.py - src/mistralai/models/tooltypes.py - src/mistralai/models/trainingfile.py + - src/mistralai/models/transcriptionresponse.py + - src/mistralai/models/transcriptionsegmentchunk.py + - src/mistralai/models/transcriptionstreamdone.py + - src/mistralai/models/transcriptionstreamevents.py + - src/mistralai/models/transcriptionstreameventtypes.py + - src/mistralai/models/transcriptionstreamlanguage.py + - src/mistralai/models/transcriptionstreamsegmentdelta.py + - src/mistralai/models/transcriptionstreamtextdelta.py - src/mistralai/models/unarchiveftmodelout.py - src/mistralai/models/updateftmodelin.py - src/mistralai/models/uploadfileout.py @@ -696,6 +736,7 @@ generatedFiles: - src/mistralai/py.typed - src/mistralai/sdk.py - src/mistralai/sdkconfiguration.py + - src/mistralai/transcriptions.py - src/mistralai/types/__init__.py - src/mistralai/types/basemodel.py - src/mistralai/utils/__init__.py @@ -751,7 +792,7 @@ examples: application/json: {} responses: "200": - application/json: {"id": "", "object": "model", "created": 124166, "owned_by": "", "root": "", "archived": true, "capabilities": {"completion_chat": true, "completion_fim": false, "function_calling": false, "fine_tuning": false, "classification": false}, "max_context_length": 32768, "job": "c4f8ef9a-6612-4f49-88fa-a80eb8116e46", "model_type": "completion"} + application/json: {"id": "", "object": "model", "created": 124166, "owned_by": "", "workspace_id": "", "root": "", "root_version": "", "archived": true, "capabilities": {"completion_chat": true, "completion_fim": false, "function_calling": false, "fine_tuning": false, "classification": false}, "max_context_length": 32768, "job": "c4f8ef9a-6612-4f49-88fa-a80eb8116e46", "model_type": "completion"} jobs_api_routes_fine_tuning_archive_fine_tuned_model: speakeasy-default-jobs-api-routes-fine-tuning-archive-fine-tuned-model: parameters: @@ -1027,10 +1068,10 @@ examples: jobs_api_routes_batch_create_batch_job: speakeasy-default-jobs-api-routes-batch-create-batch-job: requestBody: - application/json: {"input_files": ["fe3343a2-3b8d-404b-ba32-a78dede2614a"], "endpoint": "/v1/moderations", "model": "Altima", "timeout_hours": 24} + application/json: {"input_files": ["fe3343a2-3b8d-404b-ba32-a78dede2614a"], "endpoint": "/v1/moderations", "timeout_hours": 24} responses: "200": - application/json: {"id": "", "object": "batch", "input_files": ["7b2553d8-e17f-4df5-a862-a1678f6b5271", "8c618d9f-7d82-42ba-a284-d57d84f50a58", "c042f996-e842-441d-ae47-4e0850334e41"], "endpoint": "", "model": "Taurus", "errors": [{"message": "", "count": 1}], "status": "SUCCESS", "created_at": 395527, "total_requests": 166919, "completed_requests": 258552, "succeeded_requests": 480980, "failed_requests": 684176} + application/json: {"id": "", "object": "batch", "input_files": ["7b2553d8-e17f-4df5-a862-a1678f6b5271", "8c618d9f-7d82-42ba-a284-d57d84f50a58", "c042f996-e842-441d-ae47-4e0850334e41"], "endpoint": "", "errors": [{"message": "", "count": 1}], "status": "SUCCESS", "created_at": 395527, "total_requests": 166919, "completed_requests": 258552, "succeeded_requests": 480980, "failed_requests": 684176} jobs_api_routes_batch_get_batch_job: speakeasy-default-jobs-api-routes-batch-get-batch-job: parameters: @@ -1038,7 +1079,7 @@ examples: job_id: "4017dc9f-b629-42f4-9700-8c681b9e7f0f" responses: "200": - application/json: {"id": "", "object": "batch", "input_files": ["11b83f16-f2f9-4de4-a81f-203fff419c99"], "endpoint": "", "model": "Accord", "errors": [], "status": "TIMEOUT_EXCEEDED", "created_at": 900958, "total_requests": 458292, "completed_requests": 184893, "succeeded_requests": 104800, "failed_requests": 836210} + application/json: {"id": "", "object": "batch", "input_files": ["11b83f16-f2f9-4de4-a81f-203fff419c99"], "endpoint": "", "errors": [], "status": "TIMEOUT_EXCEEDED", "created_at": 900958, "total_requests": 458292, "completed_requests": 184893, "succeeded_requests": 104800, "failed_requests": 836210} jobs_api_routes_batch_cancel_batch_job: speakeasy-default-jobs-api-routes-batch-cancel-batch-job: parameters: @@ -1046,14 +1087,14 @@ examples: job_id: "4fb29d1c-535b-4f0a-a1cb-2167f86da569" responses: "200": - application/json: {"id": "", "object": "batch", "input_files": ["8fd9d88a-66be-43fd-a816-ba509ca3ca85"], "endpoint": "", "model": "PT Cruiser", "errors": [], "status": "TIMEOUT_EXCEEDED", "created_at": 608251, "total_requests": 12693, "completed_requests": 203340, "succeeded_requests": 189291, "failed_requests": 969057} + application/json: {"id": "", "object": "batch", "input_files": ["8fd9d88a-66be-43fd-a816-ba509ca3ca85"], "endpoint": "", "errors": [], "status": "TIMEOUT_EXCEEDED", "created_at": 608251, "total_requests": 12693, "completed_requests": 203340, "succeeded_requests": 189291, "failed_requests": 969057} chat_completion_v1_chat_completions_post: speakeasy-default-chat-completion-v1-chat-completions-post: requestBody: application/json: {"model": "mistral-small-latest", "stream": false, "messages": [{"content": "Who is the best French painter? Answer in one short sentence.", "role": "user"}]} responses: "200": - application/json: {"id": "cmpl-e5cc70bb28c444948073e77776eb30ef", "object": "chat.completion", "model": "mistral-small-latest", "usage": {"prompt_tokens": 16, "completion_tokens": 34, "total_tokens": 50}, "created": 1702256327, "choices": []} + application/json: {"id": "cmpl-e5cc70bb28c444948073e77776eb30ef", "object": "chat.completion", "model": "mistral-small-latest", "usage": {"prompt_tokens": 0, "completion_tokens": 0, "total_tokens": 0}, "created": 1702256327, "choices": []} "422": application/json: {} stream_chat: @@ -1069,7 +1110,7 @@ examples: application/json: {"model": "codestral-2405", "top_p": 1, "stream": false, "prompt": "def", "suffix": "return a+b"} responses: "200": - application/json: {"id": "cmpl-e5cc70bb28c444948073e77776eb30ef", "object": "chat.completion", "model": "codestral-latest", "usage": {"prompt_tokens": 16, "completion_tokens": 34, "total_tokens": 50}, "created": 1702256327, "choices": []} + application/json: {"id": "cmpl-e5cc70bb28c444948073e77776eb30ef", "object": "chat.completion", "model": "codestral-latest", "usage": {"prompt_tokens": 0, "completion_tokens": 0, "total_tokens": 0}, "created": 1702256327, "choices": []} "422": application/json: {} stream_fim: @@ -1085,7 +1126,7 @@ examples: application/json: {"stream": false, "messages": [{"content": "Who is the best French painter? Answer in one short sentence.", "role": "user"}], "agent_id": ""} responses: "200": - application/json: {"id": "cmpl-e5cc70bb28c444948073e77776eb30ef", "object": "chat.completion", "model": "mistral-small-latest", "usage": {"prompt_tokens": 16, "completion_tokens": 34, "total_tokens": 50}, "created": 1702256327, "choices": []} + application/json: {"id": "cmpl-e5cc70bb28c444948073e77776eb30ef", "object": "chat.completion", "model": "mistral-small-latest", "usage": {"prompt_tokens": 0, "completion_tokens": 0, "total_tokens": 0}, "created": 1702256327, "choices": []} "422": application/json: {} stream_agents: @@ -1101,7 +1142,7 @@ examples: application/json: {"model": "mistral-embed", "input": ["Embed this sentence.", "As well as this one."]} responses: "200": - application/json: {"id": "cmpl-e5cc70bb28c444948073e77776eb30ef", "object": "chat.completion", "model": "mistral-small-latest", "usage": {"prompt_tokens": 16, "completion_tokens": 34, "total_tokens": 50}, "data": [{"object": "embedding", "embedding": [0.1, 0.2, 0.3], "index": 0}, {"object": "embedding", "embedding": [0.4, 0.5, 0.6], "index": 1}]} + application/json: {"id": "cmpl-e5cc70bb28c444948073e77776eb30ef", "object": "chat.completion", "model": "mistral-small-latest", "usage": {"prompt_tokens": 0, "completion_tokens": 0, "total_tokens": 0}, "data": [{"object": "embedding", "embedding": [0.1, 0.2, 0.3], "index": 0}, {"object": "embedding", "embedding": [0.4, 0.5, 0.6], "index": 1}]} "422": application/json: {} moderations_v1_moderations_post: @@ -1342,5 +1383,16 @@ examples: application/json: {"library_id": "7f9c6af4-e362-4cf1-9363-0409d51c2dfa", "org_id": "6b2cac3a-b29c-4d8f-bebb-0db06ec1bf97", "role": "", "share_with_type": "", "share_with_uuid": "618c78f1-41ca-45c3-8ef2-7d78898c7061"} "422": application/json: {} + audio_api_v1_transcriptions_post: + speakeasy-default-audio-api-v1-transcriptions-post: + requestBody: + multipart/form-data: {"model": "Model X", "stream": false} + responses: + "200": + application/json: {"model": "Beetle", "text": "", "usage": {"prompt_tokens": 0, "completion_tokens": 0, "total_tokens": 0}, "language": ""} + audio_api_v1_transcriptions_post_stream: + speakeasy-default-audio-api-v1-transcriptions-post-stream: + requestBody: + multipart/form-data: {"model": "Camry", "stream": true} examplesVersion: 1.0.2 generatedTests: {} diff --git a/.speakeasy/gen.yaml b/.speakeasy/gen.yaml index d3df5c35..ffc6c827 100644 --- a/.speakeasy/gen.yaml +++ b/.speakeasy/gen.yaml @@ -21,7 +21,7 @@ generation: generateNewTests: false skipResponseBodyAssertions: false python: - version: 1.9.2 + version: 1.9.3 additionalDependencies: dev: pytest: ^8.2.2 diff --git a/.speakeasy/workflow.lock b/.speakeasy/workflow.lock index 75541fbd..45143669 100644 --- a/.speakeasy/workflow.lock +++ b/.speakeasy/workflow.lock @@ -14,11 +14,11 @@ sources: - latest mistral-openapi: sourceNamespace: mistral-openapi - sourceRevisionDigest: sha256:2ab1acc41424ca9be28ef867168aeb32af9fc7129b0a91494c0cd24d68c30345 - sourceBlobDigest: sha256:029ae17d555b02220397bba95308ba545c4733db81e65258be7baf9991d10c3a + sourceRevisionDigest: sha256:22d8044215dc1331ba83f3d25598409bc82fdc04d68033fb05e0133a13cc4dad + sourceBlobDigest: sha256:f3322d8a44d0bf1515b5c1c078525dbf00ff90e6110644de4c03b0b0e9050350 tags: - latest - - speakeasy-sdk-regen-1751557705 + - speakeasy-sdk-regen-1753290410 targets: mistralai-azure-sdk: source: mistral-azure-source @@ -37,10 +37,10 @@ targets: mistralai-sdk: source: mistral-openapi sourceNamespace: mistral-openapi - sourceRevisionDigest: sha256:2ab1acc41424ca9be28ef867168aeb32af9fc7129b0a91494c0cd24d68c30345 - sourceBlobDigest: sha256:029ae17d555b02220397bba95308ba545c4733db81e65258be7baf9991d10c3a + sourceRevisionDigest: sha256:22d8044215dc1331ba83f3d25598409bc82fdc04d68033fb05e0133a13cc4dad + sourceBlobDigest: sha256:f3322d8a44d0bf1515b5c1c078525dbf00ff90e6110644de4c03b0b0e9050350 codeSamplesNamespace: mistral-openapi-code-samples - codeSamplesRevisionDigest: sha256:b45de481b3d77689a76a406421d4625dc37cc17bf90bab2f7d6e78f3eec77a9c + codeSamplesRevisionDigest: sha256:1fd9897fdd851557c592b8fd46232518359401d15a6574933c43be63ec2edb53 workflow: workflowVersion: 1.0.0 speakeasyVersion: 1.568.2 diff --git a/README.md b/README.md index 503c5128..f71ccfcb 100644 --- a/README.md +++ b/README.md @@ -434,6 +434,14 @@ The documentation for the GCP SDK is available [here](packages/mistralai_gcp/REA * [complete](docs/sdks/agents/README.md#complete) - Agents Completion * [stream](docs/sdks/agents/README.md#stream) - Stream Agents completion +### [audio](docs/sdks/audio/README.md) + + +#### [audio.transcriptions](docs/sdks/transcriptions/README.md) + +* [complete](docs/sdks/transcriptions/README.md#complete) - Create Transcription +* [stream](docs/sdks/transcriptions/README.md#stream) - Create streaming transcription (SSE) + ### [batch](docs/sdks/batch/README.md) diff --git a/RELEASES.md b/RELEASES.md index b66777ed..2089bb04 100644 --- a/RELEASES.md +++ b/RELEASES.md @@ -258,4 +258,14 @@ Based on: ### Generated - [python v1.9.2] . ### Releases -- [PyPI v1.9.2] https://pypi.org/project/mistralai/1.9.2 - . \ No newline at end of file +- [PyPI v1.9.2] https://pypi.org/project/mistralai/1.9.2 - . + +## 2025-07-23 17:06:32 +### Changes +Based on: +- OpenAPI Doc +- Speakeasy CLI 1.568.2 (2.634.2) https://github.com/speakeasy-api/speakeasy +### Generated +- [python v1.9.3] . +### Releases +- [PyPI v1.9.3] https://pypi.org/project/mistralai/1.9.3 - . \ No newline at end of file diff --git a/docs/models/audiochunk.md b/docs/models/audiochunk.md new file mode 100644 index 00000000..c443e7ad --- /dev/null +++ b/docs/models/audiochunk.md @@ -0,0 +1,9 @@ +# AudioChunk + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------------------------- | -------------------------------------------------------------- | -------------------------------------------------------------- | -------------------------------------------------------------- | +| `input_audio` | *str* | :heavy_check_mark: | N/A | +| `type` | [Optional[models.AudioChunkType]](../models/audiochunktype.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/audiochunktype.md b/docs/models/audiochunktype.md new file mode 100644 index 00000000..46ebf372 --- /dev/null +++ b/docs/models/audiochunktype.md @@ -0,0 +1,8 @@ +# AudioChunkType + + +## Values + +| Name | Value | +| ------------- | ------------- | +| `INPUT_AUDIO` | input_audio | \ No newline at end of file diff --git a/docs/models/audiotranscriptionrequest.md b/docs/models/audiotranscriptionrequest.md new file mode 100644 index 00000000..e876de18 --- /dev/null +++ b/docs/models/audiotranscriptionrequest.md @@ -0,0 +1,15 @@ +# AudioTranscriptionRequest + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | +| `model` | *str* | :heavy_check_mark: | N/A | +| `file` | [Optional[models.File]](../models/file.md) | :heavy_minus_sign: | N/A | +| `file_url` | *OptionalNullable[str]* | :heavy_minus_sign: | Url of a file to be transcribed | +| `file_id` | *OptionalNullable[str]* | :heavy_minus_sign: | ID of a file uploaded to /v1/files | +| `language` | *OptionalNullable[str]* | :heavy_minus_sign: | Language of the audio, e.g. 'en'. Providing the language can boost accuracy. | +| `temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | N/A | +| `stream` | *Optional[Literal[False]]* | :heavy_minus_sign: | N/A | +| `timestamp_granularities` | List[[models.TimestampGranularity](../models/timestampgranularity.md)] | :heavy_minus_sign: | Granularities of timestamps to include in the response. | \ No newline at end of file diff --git a/docs/models/audiotranscriptionrequeststream.md b/docs/models/audiotranscriptionrequeststream.md new file mode 100644 index 00000000..975e437a --- /dev/null +++ b/docs/models/audiotranscriptionrequeststream.md @@ -0,0 +1,15 @@ +# AudioTranscriptionRequestStream + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | +| `model` | *str* | :heavy_check_mark: | N/A | +| `file` | [Optional[models.File]](../models/file.md) | :heavy_minus_sign: | N/A | +| `file_url` | *OptionalNullable[str]* | :heavy_minus_sign: | Url of a file to be transcribed | +| `file_id` | *OptionalNullable[str]* | :heavy_minus_sign: | ID of a file uploaded to /v1/files | +| `language` | *OptionalNullable[str]* | :heavy_minus_sign: | Language of the audio, e.g. 'en'. Providing the language can boost accuracy. | +| `temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | N/A | +| `stream` | *Optional[Literal[True]]* | :heavy_minus_sign: | N/A | +| `timestamp_granularities` | List[[models.TimestampGranularity](../models/timestampgranularity.md)] | :heavy_minus_sign: | Granularities of timestamps to include in the response. | \ No newline at end of file diff --git a/docs/models/basemodelcard.md b/docs/models/basemodelcard.md index f5ce8c5e..58ad5e25 100644 --- a/docs/models/basemodelcard.md +++ b/docs/models/basemodelcard.md @@ -17,4 +17,4 @@ | `deprecation` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | | `deprecation_replacement_model` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | `default_model_temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | N/A | -| `type` | [Optional[models.Type]](../models/type.md) | :heavy_minus_sign: | N/A | \ No newline at end of file +| `type` | [Optional[models.BaseModelCardType]](../models/basemodelcardtype.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/basemodelcardtype.md b/docs/models/basemodelcardtype.md new file mode 100644 index 00000000..4a40ce76 --- /dev/null +++ b/docs/models/basemodelcardtype.md @@ -0,0 +1,8 @@ +# BaseModelCardType + + +## Values + +| Name | Value | +| ------ | ------ | +| `BASE` | base | \ No newline at end of file diff --git a/docs/models/batchjobin.md b/docs/models/batchjobin.md index 5203a520..b5b13786 100644 --- a/docs/models/batchjobin.md +++ b/docs/models/batchjobin.md @@ -7,6 +7,7 @@ | ---------------------------------------------- | ---------------------------------------------- | ---------------------------------------------- | ---------------------------------------------- | | `input_files` | List[*str*] | :heavy_check_mark: | N/A | | `endpoint` | [models.APIEndpoint](../models/apiendpoint.md) | :heavy_check_mark: | N/A | -| `model` | *str* | :heavy_check_mark: | N/A | +| `model` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `agent_id` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | `metadata` | Dict[str, *str*] | :heavy_minus_sign: | N/A | | `timeout_hours` | *Optional[int]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/batchjobout.md b/docs/models/batchjobout.md index 16374467..b66fff08 100644 --- a/docs/models/batchjobout.md +++ b/docs/models/batchjobout.md @@ -10,7 +10,8 @@ | `input_files` | List[*str*] | :heavy_check_mark: | N/A | | `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | | `endpoint` | *str* | :heavy_check_mark: | N/A | -| `model` | *str* | :heavy_check_mark: | N/A | +| `model` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `agent_id` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | `output_file` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | `error_file` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | `errors` | List[[models.BatchError](../models/batcherror.md)] | :heavy_check_mark: | N/A | diff --git a/docs/models/classifierftmodelout.md b/docs/models/classifierftmodelout.md index 506af14e..dd9e8bf9 100644 --- a/docs/models/classifierftmodelout.md +++ b/docs/models/classifierftmodelout.md @@ -9,7 +9,9 @@ | `object` | [Optional[models.ClassifierFTModelOutObject]](../models/classifierftmodeloutobject.md) | :heavy_minus_sign: | N/A | | `created` | *int* | :heavy_check_mark: | N/A | | `owned_by` | *str* | :heavy_check_mark: | N/A | +| `workspace_id` | *str* | :heavy_check_mark: | N/A | | `root` | *str* | :heavy_check_mark: | N/A | +| `root_version` | *str* | :heavy_check_mark: | N/A | | `archived` | *bool* | :heavy_check_mark: | N/A | | `name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | diff --git a/docs/models/completionargs.md b/docs/models/completionargs.md index 5f07b673..0d108225 100644 --- a/docs/models/completionargs.md +++ b/docs/models/completionargs.md @@ -10,7 +10,7 @@ White-listed arguments from the completion API | `stop` | [OptionalNullable[models.CompletionArgsStop]](../models/completionargsstop.md) | :heavy_minus_sign: | N/A | | `presence_penalty` | *OptionalNullable[float]* | :heavy_minus_sign: | N/A | | `frequency_penalty` | *OptionalNullable[float]* | :heavy_minus_sign: | N/A | -| `temperature` | *Optional[float]* | :heavy_minus_sign: | N/A | +| `temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | N/A | | `top_p` | *OptionalNullable[float]* | :heavy_minus_sign: | N/A | | `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | | `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | diff --git a/docs/models/completionftmodelout.md b/docs/models/completionftmodelout.md index f1e22b88..cd085825 100644 --- a/docs/models/completionftmodelout.md +++ b/docs/models/completionftmodelout.md @@ -9,7 +9,9 @@ | `object` | [Optional[models.CompletionFTModelOutObject]](../models/completionftmodeloutobject.md) | :heavy_minus_sign: | N/A | | `created` | *int* | :heavy_check_mark: | N/A | | `owned_by` | *str* | :heavy_check_mark: | N/A | +| `workspace_id` | *str* | :heavy_check_mark: | N/A | | `root` | *str* | :heavy_check_mark: | N/A | +| `root_version` | *str* | :heavy_check_mark: | N/A | | `archived` | *bool* | :heavy_check_mark: | N/A | | `name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | diff --git a/docs/models/contentchunk.md b/docs/models/contentchunk.md index a65cd054..cb7e51d3 100644 --- a/docs/models/contentchunk.md +++ b/docs/models/contentchunk.md @@ -33,3 +33,15 @@ value: models.ReferenceChunk = /* values here */ value: models.FileChunk = /* values here */ ``` +### `models.ThinkChunk` + +```python +value: models.ThinkChunk = /* values here */ +``` + +### `models.AudioChunk` + +```python +value: models.AudioChunk = /* values here */ +``` + diff --git a/docs/models/fileschema.md b/docs/models/fileschema.md index 9746a995..4f3e72db 100644 --- a/docs/models/fileschema.md +++ b/docs/models/fileschema.md @@ -13,4 +13,6 @@ | `purpose` | [models.FilePurpose](../models/filepurpose.md) | :heavy_check_mark: | N/A | | | `sample_type` | [models.SampleType](../models/sampletype.md) | :heavy_check_mark: | N/A | | | `num_lines` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | | -| `source` | [models.Source](../models/source.md) | :heavy_check_mark: | N/A | | \ No newline at end of file +| `mimetype` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | +| `source` | [models.Source](../models/source.md) | :heavy_check_mark: | N/A | | +| `signature` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | \ No newline at end of file diff --git a/docs/models/jobsapiroutesbatchgetbatchjobsrequest.md b/docs/models/jobsapiroutesbatchgetbatchjobsrequest.md index f2a3bb78..b062b873 100644 --- a/docs/models/jobsapiroutesbatchgetbatchjobsrequest.md +++ b/docs/models/jobsapiroutesbatchgetbatchjobsrequest.md @@ -8,6 +8,7 @@ | `page` | *Optional[int]* | :heavy_minus_sign: | N/A | | `page_size` | *Optional[int]* | :heavy_minus_sign: | N/A | | `model` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `agent_id` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | | `created_after` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | | `created_by_me` | *Optional[bool]* | :heavy_minus_sign: | N/A | diff --git a/docs/models/metadata.md b/docs/models/metadata.md new file mode 100644 index 00000000..e655f580 --- /dev/null +++ b/docs/models/metadata.md @@ -0,0 +1,7 @@ +# Metadata + + +## Fields + +| Field | Type | Required | Description | +| ----------- | ----------- | ----------- | ----------- | \ No newline at end of file diff --git a/docs/models/retrievefileout.md b/docs/models/retrievefileout.md index 10f738b9..28f97dd2 100644 --- a/docs/models/retrievefileout.md +++ b/docs/models/retrievefileout.md @@ -13,5 +13,7 @@ | `purpose` | [models.FilePurpose](../models/filepurpose.md) | :heavy_check_mark: | N/A | | | `sample_type` | [models.SampleType](../models/sampletype.md) | :heavy_check_mark: | N/A | | | `num_lines` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | | +| `mimetype` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | | `source` | [models.Source](../models/source.md) | :heavy_check_mark: | N/A | | +| `signature` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | | `deleted` | *bool* | :heavy_check_mark: | N/A | | \ No newline at end of file diff --git a/docs/models/thinkchunk.md b/docs/models/thinkchunk.md new file mode 100644 index 00000000..66b2e0cd --- /dev/null +++ b/docs/models/thinkchunk.md @@ -0,0 +1,10 @@ +# ThinkChunk + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------------------------------------------------- | ------------------------------------------------------------------------------- | ------------------------------------------------------------------------------- | ------------------------------------------------------------------------------- | +| `thinking` | List[[models.Thinking](../models/thinking.md)] | :heavy_check_mark: | N/A | +| `closed` | *Optional[bool]* | :heavy_minus_sign: | Whether the thinking chunk is closed or not. Currently only used for prefixing. | +| `type` | [Optional[models.ThinkChunkType]](../models/thinkchunktype.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/thinkchunktype.md b/docs/models/thinkchunktype.md new file mode 100644 index 00000000..baf6f755 --- /dev/null +++ b/docs/models/thinkchunktype.md @@ -0,0 +1,8 @@ +# ThinkChunkType + + +## Values + +| Name | Value | +| ---------- | ---------- | +| `THINKING` | thinking | \ No newline at end of file diff --git a/docs/models/thinking.md b/docs/models/thinking.md new file mode 100644 index 00000000..c7a0d5c9 --- /dev/null +++ b/docs/models/thinking.md @@ -0,0 +1,17 @@ +# Thinking + + +## Supported Types + +### `models.ReferenceChunk` + +```python +value: models.ReferenceChunk = /* values here */ +``` + +### `models.TextChunk` + +```python +value: models.TextChunk = /* values here */ +``` + diff --git a/docs/models/timestampgranularity.md b/docs/models/timestampgranularity.md new file mode 100644 index 00000000..0d2a8054 --- /dev/null +++ b/docs/models/timestampgranularity.md @@ -0,0 +1,8 @@ +# TimestampGranularity + + +## Values + +| Name | Value | +| --------- | --------- | +| `SEGMENT` | segment | \ No newline at end of file diff --git a/docs/models/toolcall.md b/docs/models/toolcall.md index 3819236b..43e09050 100644 --- a/docs/models/toolcall.md +++ b/docs/models/toolcall.md @@ -3,9 +3,10 @@ ## Fields -| Field | Type | Required | Description | -| ---------------------------------------------------- | ---------------------------------------------------- | ---------------------------------------------------- | ---------------------------------------------------- | -| `id` | *Optional[str]* | :heavy_minus_sign: | N/A | -| `type` | [Optional[models.ToolTypes]](../models/tooltypes.md) | :heavy_minus_sign: | N/A | -| `function` | [models.FunctionCall](../models/functioncall.md) | :heavy_check_mark: | N/A | -| `index` | *Optional[int]* | :heavy_minus_sign: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| ---------------------------------------------------------- | ---------------------------------------------------------- | ---------------------------------------------------------- | ---------------------------------------------------------- | +| `id` | *Optional[str]* | :heavy_minus_sign: | N/A | +| `type` | [Optional[models.ToolTypes]](../models/tooltypes.md) | :heavy_minus_sign: | N/A | +| `function` | [models.FunctionCall](../models/functioncall.md) | :heavy_check_mark: | N/A | +| `index` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `metadata` | [OptionalNullable[models.Metadata]](../models/metadata.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/transcriptionresponse.md b/docs/models/transcriptionresponse.md new file mode 100644 index 00000000..1bc0189c --- /dev/null +++ b/docs/models/transcriptionresponse.md @@ -0,0 +1,13 @@ +# TranscriptionResponse + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | +| `model` | *str* | :heavy_check_mark: | N/A | +| `text` | *str* | :heavy_check_mark: | N/A | +| `segments` | List[[models.TranscriptionSegmentChunk](../models/transcriptionsegmentchunk.md)] | :heavy_minus_sign: | N/A | +| `usage` | [models.UsageInfo](../models/usageinfo.md) | :heavy_check_mark: | N/A | +| `language` | *Nullable[str]* | :heavy_check_mark: | N/A | +| `__pydantic_extra__` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/transcriptionsegmentchunk.md b/docs/models/transcriptionsegmentchunk.md new file mode 100644 index 00000000..bebc9f72 --- /dev/null +++ b/docs/models/transcriptionsegmentchunk.md @@ -0,0 +1,12 @@ +# TranscriptionSegmentChunk + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------------ | ------------------------------------------ | ------------------------------------------ | ------------------------------------------ | +| `text` | *str* | :heavy_check_mark: | N/A | +| `start` | *float* | :heavy_check_mark: | N/A | +| `end` | *float* | :heavy_check_mark: | N/A | +| `type` | [Optional[models.Type]](../models/type.md) | :heavy_minus_sign: | N/A | +| `__pydantic_extra__` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/transcriptionstreamdone.md b/docs/models/transcriptionstreamdone.md new file mode 100644 index 00000000..9ecf7d9c --- /dev/null +++ b/docs/models/transcriptionstreamdone.md @@ -0,0 +1,14 @@ +# TranscriptionStreamDone + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | +| `model` | *str* | :heavy_check_mark: | N/A | +| `text` | *str* | :heavy_check_mark: | N/A | +| `segments` | List[[models.TranscriptionSegmentChunk](../models/transcriptionsegmentchunk.md)] | :heavy_minus_sign: | N/A | +| `usage` | [models.UsageInfo](../models/usageinfo.md) | :heavy_check_mark: | N/A | +| `type` | [Optional[models.TranscriptionStreamDoneType]](../models/transcriptionstreamdonetype.md) | :heavy_minus_sign: | N/A | +| `language` | *Nullable[str]* | :heavy_check_mark: | N/A | +| `__pydantic_extra__` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/transcriptionstreamdonetype.md b/docs/models/transcriptionstreamdonetype.md new file mode 100644 index 00000000..db092c4f --- /dev/null +++ b/docs/models/transcriptionstreamdonetype.md @@ -0,0 +1,8 @@ +# TranscriptionStreamDoneType + + +## Values + +| Name | Value | +| -------------------- | -------------------- | +| `TRANSCRIPTION_DONE` | transcription.done | \ No newline at end of file diff --git a/docs/models/transcriptionstreamevents.md b/docs/models/transcriptionstreamevents.md new file mode 100644 index 00000000..f760385d --- /dev/null +++ b/docs/models/transcriptionstreamevents.md @@ -0,0 +1,9 @@ +# TranscriptionStreamEvents + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------- | +| `event` | [models.TranscriptionStreamEventTypes](../models/transcriptionstreameventtypes.md) | :heavy_check_mark: | N/A | +| `data` | [models.TranscriptionStreamEventsData](../models/transcriptionstreameventsdata.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/transcriptionstreameventsdata.md b/docs/models/transcriptionstreameventsdata.md new file mode 100644 index 00000000..eea8e928 --- /dev/null +++ b/docs/models/transcriptionstreameventsdata.md @@ -0,0 +1,29 @@ +# TranscriptionStreamEventsData + + +## Supported Types + +### `models.TranscriptionStreamDone` + +```python +value: models.TranscriptionStreamDone = /* values here */ +``` + +### `models.TranscriptionStreamLanguage` + +```python +value: models.TranscriptionStreamLanguage = /* values here */ +``` + +### `models.TranscriptionStreamSegmentDelta` + +```python +value: models.TranscriptionStreamSegmentDelta = /* values here */ +``` + +### `models.TranscriptionStreamTextDelta` + +```python +value: models.TranscriptionStreamTextDelta = /* values here */ +``` + diff --git a/docs/models/transcriptionstreameventtypes.md b/docs/models/transcriptionstreameventtypes.md new file mode 100644 index 00000000..e4eb25a6 --- /dev/null +++ b/docs/models/transcriptionstreameventtypes.md @@ -0,0 +1,11 @@ +# TranscriptionStreamEventTypes + + +## Values + +| Name | Value | +| -------------------------- | -------------------------- | +| `TRANSCRIPTION_LANGUAGE` | transcription.language | +| `TRANSCRIPTION_SEGMENT` | transcription.segment | +| `TRANSCRIPTION_TEXT_DELTA` | transcription.text.delta | +| `TRANSCRIPTION_DONE` | transcription.done | \ No newline at end of file diff --git a/docs/models/transcriptionstreamlanguage.md b/docs/models/transcriptionstreamlanguage.md new file mode 100644 index 00000000..e16c8fdc --- /dev/null +++ b/docs/models/transcriptionstreamlanguage.md @@ -0,0 +1,10 @@ +# TranscriptionStreamLanguage + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------ | +| `type` | [Optional[models.TranscriptionStreamLanguageType]](../models/transcriptionstreamlanguagetype.md) | :heavy_minus_sign: | N/A | +| `audio_language` | *str* | :heavy_check_mark: | N/A | +| `__pydantic_extra__` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/transcriptionstreamlanguagetype.md b/docs/models/transcriptionstreamlanguagetype.md new file mode 100644 index 00000000..e93521e1 --- /dev/null +++ b/docs/models/transcriptionstreamlanguagetype.md @@ -0,0 +1,8 @@ +# TranscriptionStreamLanguageType + + +## Values + +| Name | Value | +| ------------------------ | ------------------------ | +| `TRANSCRIPTION_LANGUAGE` | transcription.language | \ No newline at end of file diff --git a/docs/models/transcriptionstreamsegmentdelta.md b/docs/models/transcriptionstreamsegmentdelta.md new file mode 100644 index 00000000..3deeedf0 --- /dev/null +++ b/docs/models/transcriptionstreamsegmentdelta.md @@ -0,0 +1,12 @@ +# TranscriptionStreamSegmentDelta + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------- | +| `text` | *str* | :heavy_check_mark: | N/A | +| `start` | *float* | :heavy_check_mark: | N/A | +| `end` | *float* | :heavy_check_mark: | N/A | +| `type` | [Optional[models.TranscriptionStreamSegmentDeltaType]](../models/transcriptionstreamsegmentdeltatype.md) | :heavy_minus_sign: | N/A | +| `__pydantic_extra__` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/transcriptionstreamsegmentdeltatype.md b/docs/models/transcriptionstreamsegmentdeltatype.md new file mode 100644 index 00000000..03ff3e8b --- /dev/null +++ b/docs/models/transcriptionstreamsegmentdeltatype.md @@ -0,0 +1,8 @@ +# TranscriptionStreamSegmentDeltaType + + +## Values + +| Name | Value | +| ----------------------- | ----------------------- | +| `TRANSCRIPTION_SEGMENT` | transcription.segment | \ No newline at end of file diff --git a/docs/models/transcriptionstreamtextdelta.md b/docs/models/transcriptionstreamtextdelta.md new file mode 100644 index 00000000..adddfe18 --- /dev/null +++ b/docs/models/transcriptionstreamtextdelta.md @@ -0,0 +1,10 @@ +# TranscriptionStreamTextDelta + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------- | +| `text` | *str* | :heavy_check_mark: | N/A | +| `type` | [Optional[models.TranscriptionStreamTextDeltaType]](../models/transcriptionstreamtextdeltatype.md) | :heavy_minus_sign: | N/A | +| `__pydantic_extra__` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/transcriptionstreamtextdeltatype.md b/docs/models/transcriptionstreamtextdeltatype.md new file mode 100644 index 00000000..b7c9d675 --- /dev/null +++ b/docs/models/transcriptionstreamtextdeltatype.md @@ -0,0 +1,8 @@ +# TranscriptionStreamTextDeltaType + + +## Values + +| Name | Value | +| -------------------------- | -------------------------- | +| `TRANSCRIPTION_TEXT_DELTA` | transcription.text.delta | \ No newline at end of file diff --git a/docs/models/type.md b/docs/models/type.md index 239a00f5..d05ead75 100644 --- a/docs/models/type.md +++ b/docs/models/type.md @@ -3,6 +3,6 @@ ## Values -| Name | Value | -| ------ | ------ | -| `BASE` | base | \ No newline at end of file +| Name | Value | +| ----------------------- | ----------------------- | +| `TRANSCRIPTION_SEGMENT` | transcription_segment | \ No newline at end of file diff --git a/docs/models/uploadfileout.md b/docs/models/uploadfileout.md index ef2ad212..6f09c9a6 100644 --- a/docs/models/uploadfileout.md +++ b/docs/models/uploadfileout.md @@ -13,4 +13,6 @@ | `purpose` | [models.FilePurpose](../models/filepurpose.md) | :heavy_check_mark: | N/A | | | `sample_type` | [models.SampleType](../models/sampletype.md) | :heavy_check_mark: | N/A | | | `num_lines` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | | -| `source` | [models.Source](../models/source.md) | :heavy_check_mark: | N/A | | \ No newline at end of file +| `mimetype` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | +| `source` | [models.Source](../models/source.md) | :heavy_check_mark: | N/A | | +| `signature` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | \ No newline at end of file diff --git a/docs/models/usageinfo.md b/docs/models/usageinfo.md index 9f56a3ae..f5204ac9 100644 --- a/docs/models/usageinfo.md +++ b/docs/models/usageinfo.md @@ -3,8 +3,10 @@ ## Fields -| Field | Type | Required | Description | Example | -| ------------------- | ------------------- | ------------------- | ------------------- | ------------------- | -| `prompt_tokens` | *int* | :heavy_check_mark: | N/A | 16 | -| `completion_tokens` | *int* | :heavy_check_mark: | N/A | 34 | -| `total_tokens` | *int* | :heavy_check_mark: | N/A | 50 | \ No newline at end of file +| Field | Type | Required | Description | +| ----------------------- | ----------------------- | ----------------------- | ----------------------- | +| `prompt_tokens` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `completion_tokens` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `total_tokens` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `prompt_audio_seconds` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | +| `__pydantic_extra__` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/sdks/audio/README.md b/docs/sdks/audio/README.md new file mode 100644 index 00000000..2101c266 --- /dev/null +++ b/docs/sdks/audio/README.md @@ -0,0 +1,6 @@ +# Audio +(*audio*) + +## Overview + +### Available Operations diff --git a/docs/sdks/mistraljobs/README.md b/docs/sdks/mistraljobs/README.md index 0c0b5991..ef1e1549 100644 --- a/docs/sdks/mistraljobs/README.md +++ b/docs/sdks/mistraljobs/README.md @@ -39,6 +39,7 @@ with Mistral( | `page` | *Optional[int]* | :heavy_minus_sign: | N/A | | `page_size` | *Optional[int]* | :heavy_minus_sign: | N/A | | `model` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `agent_id` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | | `created_after` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | | `created_by_me` | *Optional[bool]* | :heavy_minus_sign: | N/A | @@ -72,7 +73,7 @@ with Mistral( res = mistral.batch.jobs.create(input_files=[ "fe3343a2-3b8d-404b-ba32-a78dede2614a", - ], endpoint="/v1/moderations", model="Altima", timeout_hours=24) + ], endpoint="/v1/moderations", timeout_hours=24) # Handle response print(res) @@ -85,7 +86,8 @@ with Mistral( | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | | `input_files` | List[*str*] | :heavy_check_mark: | N/A | | `endpoint` | [models.APIEndpoint](../../models/apiendpoint.md) | :heavy_check_mark: | N/A | -| `model` | *str* | :heavy_check_mark: | N/A | +| `model` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `agent_id` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | `metadata` | Dict[str, *str*] | :heavy_minus_sign: | N/A | | `timeout_hours` | *Optional[int]* | :heavy_minus_sign: | N/A | | `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | diff --git a/docs/sdks/transcriptions/README.md b/docs/sdks/transcriptions/README.md new file mode 100644 index 00000000..fcac2467 --- /dev/null +++ b/docs/sdks/transcriptions/README.md @@ -0,0 +1,103 @@ +# Transcriptions +(*audio.transcriptions*) + +## Overview + +API for audio transcription. + +### Available Operations + +* [complete](#complete) - Create Transcription +* [stream](#stream) - Create streaming transcription (SSE) + +## complete + +Create Transcription + +### Example Usage + +```python +from mistralai import Mistral +import os + + +with Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) as mistral: + + res = mistral.audio.transcriptions.complete(model="Model X") + + # Handle response + print(res) + +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | +| `model` | *str* | :heavy_check_mark: | N/A | +| `file` | [Optional[models.File]](../../models/file.md) | :heavy_minus_sign: | N/A | +| `file_url` | *OptionalNullable[str]* | :heavy_minus_sign: | Url of a file to be transcribed | +| `file_id` | *OptionalNullable[str]* | :heavy_minus_sign: | ID of a file uploaded to /v1/files | +| `language` | *OptionalNullable[str]* | :heavy_minus_sign: | Language of the audio, e.g. 'en'. Providing the language can boost accuracy. | +| `temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | N/A | +| `timestamp_granularities` | List[[models.TimestampGranularity](../../models/timestampgranularity.md)] | :heavy_minus_sign: | Granularities of timestamps to include in the response. | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | + +### Response + +**[models.TranscriptionResponse](../../models/transcriptionresponse.md)** + +### Errors + +| Error Type | Status Code | Content Type | +| --------------- | --------------- | --------------- | +| models.SDKError | 4XX, 5XX | \*/\* | + +## stream + +Create streaming transcription (SSE) + +### Example Usage + +```python +from mistralai import Mistral +import os + + +with Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) as mistral: + + res = mistral.audio.transcriptions.stream(model="Camry") + + with res as event_stream: + for event in event_stream: + # handle event + print(event, flush=True) + +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | +| `model` | *str* | :heavy_check_mark: | N/A | +| `file` | [Optional[models.File]](../../models/file.md) | :heavy_minus_sign: | N/A | +| `file_url` | *OptionalNullable[str]* | :heavy_minus_sign: | Url of a file to be transcribed | +| `file_id` | *OptionalNullable[str]* | :heavy_minus_sign: | ID of a file uploaded to /v1/files | +| `language` | *OptionalNullable[str]* | :heavy_minus_sign: | Language of the audio, e.g. 'en'. Providing the language can boost accuracy. | +| `temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | N/A | +| `timestamp_granularities` | List[[models.TimestampGranularity](../../models/timestampgranularity.md)] | :heavy_minus_sign: | Granularities of timestamps to include in the response. | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | + +### Response + +**[Union[eventstreaming.EventStream[models.TranscriptionStreamEvents], eventstreaming.EventStreamAsync[models.TranscriptionStreamEvents]]](../../models/.md)** + +### Errors + +| Error Type | Status Code | Content Type | +| --------------- | --------------- | --------------- | +| models.SDKError | 4XX, 5XX | \*/\* | \ No newline at end of file diff --git a/examples/fixtures/bcn_weather.mp3 b/examples/fixtures/bcn_weather.mp3 new file mode 100644 index 0000000000000000000000000000000000000000..6a5152d178f2042357907aa557cc50f43bc8ba29 GIT binary patch literal 353280 zcmeFXXH=8hyY8KYB!mzkgch16A@mkPKtRBRW~hcx3{^stCISM2H3=;=r8hwV=>h@* z0;1A8NL2(2T|jYxy{`4Hebzo>@Be$=z0dpkj5EJHWjtS=Iezn+*SzoRub=Y+0G=KI zumS)8U;qFT3jhGyP;AY40DLe2`0tnhcl_TO_&;|B{`$}SE8b}pqP%=dBckoJZ2r(T ziQn7a=Ahr!|P&KCcmU3s?SXvJ(*TuHnFUk4i&Q%M%wboPqgc# z@v2OChyaF?)0q0)ehlNBR-_&gucTy+C=;IG$fljZO~@!Ca2gXO3TgX1wn!-AyUfHR zWyBd(8=MLNQkd$;&X0%rdjYfVmLFxmB%_o#bdeSE531Vu{98#7xQCBMmiDM^f6s3@3 z_=X|U-**~RBwa^<3|N zSBAtt`knwXY*p8ba})~y&^RwGt*0Q|)31%{nUz1X#_|)iH;t$tR>9~3G2Wb16+tRU z!^<=UrU~2M4>JYf%^1C6uy|Vrgglc?gtC;VVrYMbhd}rK8rVje&UjkTfq74&!kO@H zT~z(MOucadRRXRIbxx2wC83J}ycFnJr^ojlLgOF^s1z*#Q;J5r^U*+h>AbuEP!d|n zi<@MiYFEH1N`V4UNC52rT*m*qKl_LOkcE{g!PC&e5kqOqNoI^8u4a*x9hN8{V@S}W zCS&{~MicpyK)#+_3&Eywv96-ZA{puUVoci_r`VGWMZuagR`e^(h<@lY5izH1h|sYsm znJm0PijHK&xZ7?Pp}-r9Ghvrsm{Pk1y{j zOx)$Fl+S+pZbYq^TsTBnc4q3bw1}3?tgI#5E-#?K)m)31&~9h4q}UuS>3yDa?FLtA zw@f{&^+)4TCAQDmpOFqbu=n@ct_`7D-X>m8RVTa(2Oyiec1{7sU*6S%5KhbA4<0Ef zO4x8p6`Xoa>OObsv$lu)@YS4NRbc_`rGA9NE?~{vy0JO6nU~a>;r#Wc^eOm3ZsRve zzK|VT>~KR|2kJQ<{Ka(tg<)yBZ1%kS`<8(e`t8N~vrXYylpz9yYsWm<{7i{g?UD2% zQbtTnMZ^>cz&Ly~q+*+IDj|ww7H1C`xd2^SY*jLN9J>_6>{mI8OKBvFDfi@%WPe2_kD0#zDb!RYgn^+ zuHrO7&yQ7(#DY{)P@mYDTDEpCNRpEVfI1(01Xnz*TMs6@dpjKudAmhR7dYTuc(<3e`4 zxHs#Az-6^hop|0GC`1(7JdC!=+~gY#X&oh(C?)#j-y5~l9MN0WtVnnC6HKtz5*f_i zHa3gV4?~akfr|~(LHz^?t zA;D?2)(v0lZPc!(L;5~SE)YeN%-v*ngUVml-fF({#CZ_DS5TN4|02>A4?$}1V0i>* ziO`P(Riva43MMv@YX}BhvR$dhST6FlGTa$9a3oGJUmKK@_|Z4TqWDHezvvh}Y1B8-AI5ed zFI4n6`pOrT5xx)R{rP}HFiV7hyx_yq#m1gGLR9(>?Y1q$!gDUOSWQ~rNQNw_&ed8= zSKJf&r{N2XGH{*!!mIw5yE8#wr7(WT&q3C;Hg@qtYsb~|qQveP1FQ3<5MI^=T-~Qk zIYz)q<1-(2yz2Kx+_5r3AreypW;L1F&yRUVx8R;H{jZ$=Pu~CE*MImAUftMI-~61g zYUmD}^rlIH=BuLXqT&9BJeenk{-}$stL9v~7-k_AZ?NyxY@x6%LDShM6EkzVGn7Gb zeT`7(dEZ*7uDidL1vJbnX~-}jz@+jC5G<>a_TjA5NbfX{)A9K+3A`(KSu^XD00V@* zA>g{CO^`)W$r-JAv> z=q?pPk_W<*uX|(I$=<4MZ|szbY@#<^xFN0IW)a1s%Mev|Z#>Fp=_F`T4FOE&-K^Sp z@38XAbgnA6Ih>X+M=B@;evy?M*cHh^Ej~DoPm#Ck4lSs!O2O3!d&uT65($U`{r7Hq!_Aw2^*lT{b)O=P(@rv;M7Zg3CKsl@4IbUX3nfBtQR=c%mio&pTe(Q6N?a7J915ZrDk1$_fDPb8 z1?}Prf7|4|1w|#e8bb8j@XV2In~rlR*z7}n9AYVASx`!=oU-UZf)h$7`}IVPDZw0Z zNeAj8jI{1>m>(5gw8DbCPy?t$K6AGUP0}G{Xl-1id4JA}{mJBWEm3(Dms`1lbXUFd8H+qE~a)E>b=y0L%R0l&H69=LIbX% zXB*>G+yhk}Sif_xOiQY9&D|NwgipPAzSy&XVc4OL=qCfD%BBG_Xe(Hy#mJP0vqi)D-{%=8O9zb z=xsXYt-?^N_64BKOG#$V&;m6A2)t|8`X?n3FR1S-WAd!^vxBoNB0|p4G`_J)RAiD4PJ<~S5k|v!Rsu+ zxK5n12Hm@5RolrJ$YWScFHc>pB>8il_kB6IvXcsaQA*@8UxS3+eJ!t`@Ppwwe>hb@ ztY`|YX7ZZtBfV!&5NMm(sfj)EFkjkanRf4VBHoqhRXIY)0=zNBAyol>>1sWFL#%PY z?Y`17vt|$st2Z8>6To$hL?RHb--V$f?UH{xT3&eYm`nbm5+A)f?y1o z*8vWG9RMigV~i9wHM!`kKCZCwFjzlt>Hr@8{NXI+HAcW8+g=W?~j=&VuwAu#y3*0+R%i_oz}e3R0iMSBZL?b zz)KDimd*C+fu`0ccj`D2Ml(U9T;-ZlFCZ2GVEqUE0Kh-`udckcCh(STu%!D%^2BNZ zac2FT!x3%Ms{vZ~=gO^hhD=LJ2^4L0qTb9D5A$M`Sk3W%$h#4VBS4$=IpSikyzU?5 zxHJHv)dz+j!|29cy~VkuGTj24gksi_ zgJDgRua-}Qc{n8vl`L8%CP>7%T4&S>zoa1ZdKp1c#I~BYTU#N|&Qi`E6%H$UNdkY- zp9#QL^wW}dFR!=$q)a1Zo@PA~|M9@OHm>`RwHGx%kIf;26T}j|rwydkf#k$8iv7maw_*MYb=+%Il}4$HP<5Y7P+Q_kE)IZq!d*2Q4al`suA-am zxc&$i^>D!5hIj{>xQa&Tb{Z`-Fh;Cw{Y2g%#q8ic5(ao@Zw`1587S(U&KQE{a;Al8QAP-t#AIqa;jZq6^?^) zt<)_lgiN|~@V*m(m7Ynh2M;nAty96It@a20rwUMn7aq~%k0Wk6@c-Hm{lmWsd-t8~ z8{UL>-R|a-%x%z*9;|SgXu0rpm>9woG|^PNngU9fG!;tp6$Rth&3Su4uv80HgaU`v z&-#K7&+E|B)+zmHOCEZ7q9=IZeOdDK2Cu$xAoD|yi2YAXb4;8a5(6H^8QePm^uZ}+ zOQi*zp)`u@Wqy9jhVWvD809YSa^Mw__(wY_ZUp3_DU24~vm_B-txz6`$-Ug@HDH2j zu;o7`ElCVJE|P9o%q#Ffk!1TSfQIsYFb3FdZBJV>r~ekYVd4H%xc8OI8Tm|cuQAaQ zrDBf@M^nm|kBFwRBxCT9KVSq}U^n;@Rm%V7=xiIOycQ-Vl_s*y8e0OHkn+O% zmUGHwljI^ZHiE#F6)!yX1&>83Grzy{){dNp1Sr3_>Kz#o_^p`ZAn*VuQA1 z{Nr%AlE1QeLG*1DS7ZL=w~BUai>a17nzf&3pW2Us$U60}52qP~>q(jQkDKa))1sNA z)Nj)xbLL7Dk5JmrW5#_sW!=LTbG$jG*Pz0Jl4+bk6XAZ*n1KSRb;yXB%%cA&vtS;y zavmPXQ-~Z!BRMkIB_IkVI{U!TG#B=TgoG?H91PB<=f}JQfJXq6W~TuG6BnqXHH+6Hqf(h@hvg1S8WdF;L}AvSN^q zPIM~}&9HA0vHnrlXKTBC<~7WUjrn0#rUfwH)#$Uo%|*T^sgFiJv-MZG-sF9ilyElM zsRj-T&V<3_qlsGn_#by{@?u*mi;4!3g_;iLv-{$|x6T)+89aAu`HLYQFd!vok?^=a zolOji3XRwKO3`MvZ@(XSo=}+-PuKo}FPNl^L{+Bx-FU1U;PcMB)--=L@rj_RrnN^9 zB1Patd%hTFJ~e)PxAR(d1b42QJ|1|@A?3Z9UxUXdda;yGs@WeVTWH+($fE)Zn5uF% z3mgvs{b%L>=l&o5-`VbN2%c3;80dZhe8|*O$(v;B5%(SIxfpD=shyu9G;Lz>yP?4I z`Qd95&kg74L$(uS_rV<7z%-=c8H55%FqN5Awr(-x`&u%F;Q~aOCrr|K*rI3%_DU9G z(&jDRI3rAsFCmS`!WA!P?Rd*HGuE7@zTdALJ#$j8X3a#==BUu)X7HGGltuTd^0$o| zgx~qb>fgzChsGWke^s`Yn!Tv{2I5vUkxWKCtTP?Q^18Q@X;EDjQYH@qvlI^f&@NOJ zxMr|}UUW3l7KQYtH-60u0>!%X&MZV1WoP{)=s*I~W7PPM(!YjV5W++(uiUvf+v>JS zo?VfIs9bDSN|_47R!Iwc+kQkjC!|f6zVVwre>2eVC+EYuOqwM_O=_$8zJlnk`Heui z##eeao3uV`Rjf*|T}5T*-G_}&OQaaF+u6oRg%jXckKdDo#jI;`pv@H@3JaEBO_dZK z0YUS14|ap1ntDzKPalvSS}n|S{I_{&+x}MaSjX_HZfof?N`a z34|0Nd$JXT*=&$^%;rKX*^4cgvEmdgdwxKH;?2sWRakecm}RLKZELikArl2`IO`AAMPzr z_wn<2;exl8Pe#fVns$4bQ>SWWP;v?hkzXR5(k)>oX$j95Qc*Q2Pu5TiOIzmAcaAHE z!uMiS^AB8~U*ri;Ez^4OLj05Eox6CgV1$R^)1bL?r=XrMOO+t!NYRf*-CnAH_8Uy@ zR;#`JFs-fleur|zLoa0=r97qb_PH@lO$n@8B|r175{{`eeqCD2S=uhHXfRX!6Q5uW zs$DfO#xcqo?WTPhlXo3)-&t)+d1Yfjbe={4WUv6_e^~_o@UOPHy(X};H(2=4eYAeH zMvO%SIE)wxyP)k?-y=#m*l*uC)N2hgGZaDaq7&ayfeT9gLE`bDd_|xtd&wfhfaj@o z#Gv#8xrovkh?atFwzitrP?EOy^*j-B*hCWXjjg1dS!r*QI8j}#enc4%&bDVyA^>J- zjZ7OON9xUE^!_thB3O9nw zXA!QcAvX+1PDo(26$$YOPO21X?nKEG7==Z@haKk2l2l$e%4&sAst)#ai_kBe=r8z= zqs{M9EhVVT_$yhhO|E=l9ki39;U<}f6YH6)aBe69=}4w7;=1w_cnP%w~;dkI-!m6K5WL)PW@=bWW9Iq*F5NJXls zm9$V>)&Y*|Lq5rC36CoTNgT+zuBWVFr8e9ei2ySmK~zQ-Zb@m zC^*qaC}df?v@+5ywf)a(lWJetkdWHK`>+z#Wu9ZYH&2W6ru58dxN)BO2~J9n{F<82 zGhiHNra%Mrg*@Y=zy4m&>Azb~husC(4IkDg&Ijup;&Up({}<%{C;bPgNp5ebzvUhH zY4|FzUg5jojgMvZ+Q4;VSD>jh53|Nd_B_+jCoOWv1(1u04)Bdjl5DG1!p51Mp~<<< zE4*^2*=rjEMmPj(o6i|kAE`ixxO@={*ZzLtGCuDkvX}T!+9GKNJudmb#Sr zCM0@U2Ws9_t%vB&Z=O?!OcyrJp2S@)W?n*1n)Cbo@%GWU zqDR%|T0PC*jD`i4f1c!rj?|-e6{x@NQRO>&?*0_MTzDzrC0j+C5BJeeh-dl8r7%%& z$s&4CT+V%CDg2yl#q&f#qNb8sFE7y*)r z2NTiifTo2%gjopKD*E|y#>Qes+Ne7p&>{5HBnS2(_|o`EtynCui2AlrNw! zON{`7Xdv@!9%^FLrx+{GXHD^$XXs~a_+!dbjtLzXX_2wE@SK${PiP6Nw;{aJHBMu3 zCM(!ZbTQRryo!tEI<-+h(<6eeENaW|wUiTh4#E)q`_!*^L`%N6#CETWbYqHfhD`cz ziT)RP|KC3RyZ^^-Z`(4z4*oK7=X>bq1?}V@x7k8L`lJal~< z2uj4e%ID`hF!1`i02zedCu-Bf+~z+u3*t2<;d_VJ7YoOf72^E-R_>G$)O6O=zV;f7 zx6fq(Tr9a6kk5`gFUF>0+U)bfDsHycaphZvyeBD|Qhm*epi@*ylX1;+#%7VpuIWYN zcI>)iS>oK0GR}A0HxB`QQ3uDCj(?cVG=vyOt8TXjR^caH3a6qz0PQXHh4Z^`Df6QQeB}I3z6?{D~zRqr*WJv#6NuFI&5lBwy z$0Yhfb>y^*-r65A3?OFQxUr6+5L(F4CzKRGb`N;&5U z;?x6)1C))TKs@5W4*Yy&dnYC(&?3k<=y=FocxJ_(b^Vzsg z>bNXneO!YUv0YA2#!vJOgfHS_^NE#O4hHp^_4*$^N4C2{o~~>ds)=EogS+bZrOG!V z^ZlYrH}PXL5hdTs&zU z;g?Vz51?O*r{API^VpdDC100YkfIKjQ?)*)>7b&C+Y2I%+>}5FxNDF_9FU!LHLP}- zXEfN*P3Nuw+K!Z0UE^GKSl0dUML& zA;L%(hg=PogrjbAmT++w!fDUJ1lwP4btbJgjz7Rg7n=AgdsZbrb0@5s z<*K$kJ9RP(c=a_^_!+lX{ff4&9jFH?4k@xBg>NLMW}2F7!w|MpgIer?zQKb4mwfUC z8_6Gjw8SW-`0oYO@I=`B&3+4`-Z|Jo>CgmTNP19OYfJkItYTZsL-K90B>SiIw>PlZ z76*|lQf5fv=ByRC1sv~j4ptR`MbM&h1l1k^>=#`^q!;NyhRL7;?xT1|3fmmF+Gj4g zzb`N?L+_$JdnY3q7JOa2i@t~?w5lfSj&dVkfMYBm%uAvlF3lP?_|33hwWr@tk9}N+ zubOqX_;N>O_lg6=D?8R{nN0N`AG~)J_0VFxXoDvy(B=gQ;2sB~aZvid>BAmSnuiJM zWy6s__>Z7a=-kHwFYDVOLRUPy8Uz#SJMPv^1qr_Eua&>v;^cEG;+ZQn!B8FOdiZ|A zGrt*^npD>y^_fUrrDAOqoB32xv@B`fP1ixmp<|#~|CPmVvm)lC)J^L6T*)=v^Y+gJ zLpiU)#8rYHzWu!Fd+if(JmTu&+c8TGCwcJ%b zv;yq43Vj2*Adq!Nrf|GZNk=<0IJdJIYEch^c&aSTyGorj4tkL+UzLKs8FsR6A^Bd| zY^L286@5~CNu4-DO$l7S$#jouoT;}~*ez^HuhIy)ZT2z+ZfEdg+1|jYK!@0!Rb;gh zi=ity3Zoa^O_msDx_2H8E=Gb=Y@_P6XIn=D?&&L~@tG1~69Kb+?1S>OL<^mw))8cf zmcC3sXB~~UupVODpcA0ptPV!ineAHULsC$6PJ%BmBh9<1pF$fZ$5s>1Bm!%ti9K>q z56gVO=~Tf=DQ||LAeoe#7?)7*;3^6NqbpEYv)NS=*u(~$;#2p6v^K6-TOerl79cG4 zO8le=Z2%{N7L}jWEY0AB&Lx(fgj(DjpKlL*k;rGWb~i)0-1=SMAL}OWS}7LB8<~_a%#?dktysad*Tc=l?eAOxUqDz_mX!53wCGoa zc3tWgSpp^mp!rqNZg=SMT^kgAZZtQG8(m4TDi*DGGUM++LbC4D1=Zf?#b&j=IZ%Bz z3rg)aaf}TNrRy06{8^CD=C%hfVTd>}@`GZBH{PvgsC-GhJ#*Y+f)NwAt~ooS5wUe1 zYvnp}9Ne}T&-@G~-4=jg-OlBWW)ST|G8GFawO=s2rz*%Kv3I_#ga( z)MU5TG|tMUD?Rimsb8UjLGZ*AN(J?>F!u9!c;R=;`cwe{cy>Wc;mmBZc43clCI*!t zT}rLZtUz&SbF2X#Kr-Z{{zu*8@_MIQ4vk-di?W^t#Dtrq@dM=mi6k3D77sOC#LtZ7 zh)y_$U?Az!fYNqZ0$U1qT%|~(LmSfKn$n!o@QLry?y_LnQ?LM3!!o&1ZGt z-&gX}b!KN?pGbJ;4pusUd)o-@jSLpbJbK3iWLZ;}RU==-!^&-}N?9==NV{biZ;mqS z_21Y;LvPjgC0g6w)87M{EN@)#92Hk{2}Q1W=wagPTWpc3c4%C=zDeHrhdfY>4s0j zM{GvUTFJDxOo#S|$%H=-YGG>|=qS1SnhW zpL=IYR9Mzg9U{|9*9?ByF!~M6lVo1%^~RARr6cyi-zYI5N@kA?zhV1vhr&yvo`8h4 z?A0^iF6T@j%D5M_z_~&NC@N{>jt9k+dJuS0%PA<=Ae$nS^Ac8!I%pzgxYg2UM{;CD z=ZM7*mEJdOI+!dPs#6L#$g%GZ2T%Hqt}1Vf(bi@tLX%PoIH&r-4{m*4g#aK(q&R9m zqlH&|-Ocr_xJ?1yV%PJHhb}U$Ww2R!14LW+RQ_ZuJ<(h5Z&@Q2>vD@5Wpa6h%^+qb zCZ>^0;H)(}gM$*sw4_e8FG@zyDbK9hz~>x6(mqOwzttSCxDj$9PSxGoQuL-j(_MJ< zxIXJ+gGL|W^NU;QwEir~NuJtekKHVABsOT&+s*YKl>+W4LP7wV3EXK-){+EhDTldequa z$KZ4auN|R)CcMC*3cB_MDknhv;Hngk7OI-QJLYk@VxkEOb0XG<|MG7)`~W!K`NV3|*o$n92bJ zsE75i6{R=5GK7nb&wy^Q6+*(RG^9wNu>20VmKP0~IZBgZTta7S!_+2^K>}b!ejbHU zo1CHE9%Y!aRx(%#Vle>Vq)bouL!7*N70S{CTB$skv>^y4gs@}?N+XFMy7`uo5jd0t z(lro(-B#LCn@@49Df$ag1vSZg41d>I&jF;Um6k5)>=@-;hUlp<#`1{2?JEGs3zlnc z38FYU9f2Y_1Ia4w=~mfJtbP^2q<0)6QQ!Cx>o`B6)ViF2u1k$*aPz@(No(tajrzT{y2EMwkx;vo5|`^arMgSr!%o*5vPXD@0#7p zmAU$6YdutI_0+WKw>{Eg?76w7oc=S1bcPY?jP5f*RL}N6-baB}1!K9F%yK;>^IKrJ zy_f6JOZ^(BtbBO!mq@vSZjC>V%Ek4 zT(Q1_a1n{jq?5hmdT2BmC4x6Em(v`NZvp4f+Gk|!C$BY%EoNa>j`s8l!%F!ML+;MN z7JUqS(zGh$cALbZX%36VW%{ zT~2H;`PLoWo_j?vZEw_xp#r`U`s61i(AGuES=E#C>@72CQ^3$dF|u2e2jf8*M0$8J z1HN^7w0crE+g+=St6a_frf)R{+2o~nJPOwVSZ!$<_e-3+p2m-1>@xcM)>IL-vT`{O z`O;7`6FfB+8*I9cjrL zYT13`bXu6JaeFDK3gN=Rak(EPg!yx_nlfr?{b|WWq@+o7Q4PnHUDN0vP4@oEjc1xB z?yKC+`n%mPCK6>P8m-GU<~B65OMasu>lKkfYM1%7OuyGwpX!eHO{;0~si~Ix7i_E~(M&`yP#0PrQTn5n zKHN}5xYmhatuq_-8b+@j<3?&KBcJ^?J9k9}-2c`k=K0%>u{G^BG1awO2|vT~FVx(n zzb=btxR_;Y{jmD>vMqhfmDQ7Quh+k2TYkY3;^=Rsw5Mj#aq0{8j+7a(F=N!M`me9C zS`Zyk1OSf#Api3&@Q?m0U*6rdc?-6H{9FBp83X;R&&}3+N?dSn7QENpds$$gIe|`Q z0ZR;6emH-m^I9w~NdsG`84gSLYH}y4BpIRkUvbL3@?qUv-~6e97DpzS(QIDymz=l$ zed5ms@2MJ7&BT-C{)!j06=v;p #L6;pVb+d;{HNJrVy;}wFub19dNo!$Hy(F5}g z*Mf#9&+d$khWgm+DlJURhc6nY-8~C$-#?S}t?}#3qjp@ojgkdfA5pMqaVmtp_65~w z9}0e#fW8A2VY&90JG-O#q_^a@QoumrPft2Xl1x zZ3jW86Jrh%ECeq(d4r6BF}x_Ii4=B7C-AzsFEkm%FzXceRm%k7**p@gM!rL&1e0=F z3^-TWrWe45xC8BVzPLlJL~=<0ESXs_qU@C|86TgU;?~qExhvF?J3(&Q!qbJz>>K!* zxdn^4AYehxW9!KDQgS^7nVO^IcY%BDoNlfN(<`M3j@9>?$7iwK%Xr8P8g001Rs<*l zrkH_dG#yXr7`FK(#LILET*6U3GqCyg)c&e$3)9*>7{+v59Sh(}iCRQW4+1T` zs=f_?Se9lFLFu+>zJV~y$EM<;{D@J{0My|CA?>hJ!2&W|g_KuOTKP@7FO}yuhXYWX z9B*5WKoqt^<4l0@T^ZpUqrCuzwqKakPg?gtp|4bLk+=7>>P1LwrM9|yg4v;h4TJm- z=V$O`9pSr|Z^}>op84;(zyCA;qujf@eTJDoWNGB~cmAOXqI-k&pei6n*|Jh8U=$)- zWV4mi+?fU@k&~nH=e_I`y+}=?DP$7^C8kKEqw_v0Y&~Tkaf#a#AudIgly*eO#H#@% zL6;r+=SS_4`A(V!CHP1JklXu`fJG*gIUJG5!%l^(LS1#{)~niS(4=jmS5O zA*~k1OyJJDQkOtP;ah+n1UisS$s@CijCqn_anG}TVmA2ruk15#MyKq;WPzSMmL zAHg0VOHKlUbKV+}8CFFV`hKF_R)EGrR@nh(QLZaiQ?%j=4PMaH!xxe~czn?RqUdCA zB3h+QOACX;i17t|t^5&a?db2wGOKl#pcYhMDX4ootlmUme3_2}G+s$e)6e>$vE;aj z%~N-e4u>q9^gtnXX##~kOoI+v0!sHZ;+kF26xk`jLWEUcZ#w%<47vvh2=74AkB#%n|0xLikN)4GY;V{yKTm!#avOZ; z+W~N5OPC_dZf!?Q_mjt{cgGSaaW^{N732@%v5&)QIIr^gLW&;G)#+8L{-LW z*stmpxlo^4#%&2YU7iZ8@axgDOX!OnJGNTwlfTIv zyNw^*{G~e>ypW^JYEmbu^6vOp6zz;U!Yt`FFKgYawbE}DWp;Mf6^}^b^lL|Cr~OBF z-qU@TZvdg}?``m-q6G(BarF9;N!@rz;%|0$R@!SUQ zV2>=NSOE~M>((v8CP%ey+GN95RT+UCkeMP2HEm%i*mIUC(FduhAZCtKG@D_SB{u&$p|D{9??}d# zQLdYmV7g$|G-1@!X1*k^Rl%$ zX(4nk{VGN9(q-+ry`sB`$EE$xr6u$ST9q)1eq3r+T2|*i$gm1%Q8xXoa(+3R>DJ&9 ztkg<+WN#th6H*w`ZUO7`Ur3SkwOpIH=|9rwxGW`1ev@AAAm_y;^1q9}=EaM9DrT43=a zyBeAWxo69ljzA>9lMY$=_7r3*)OaC@`dn}>N&+h`Y;=GFGXM;Qn(TspA*2jJ-Nqh+ zysfME#9Uuf;L|y0a-SU0goQr=MJ1>AW+H25XGg?6OewRl{TcBep|v)L9=zr)>AbHr zVX#}BT2Y<)MoApGFJjcnIA5t^7SEk>kJepn=Di8{aeeTgV8~!-@H<^;( z+M>O8C+PQD>Zka%)+fIKw^qoTlx@D)X)CYcqPg-^fE9x3fJ?h*CY8?0_O-If%pkRh zLrV{mK($j09zjRASuYDIYjE6M9Nu9UIcmCb+Q70)j&^At1=Q=Zw8;K zWrLo5AJezOFgJr!-vU8FBWhm25a+!Efc@Z@d6Yp=jmB=%h9YU+Mzi04|ik`O}Fa3vT>+ z!#A|u&O6ATZv@^}V~owZP#dep7B^I@{j}V;=58rKDZ{tXPNT@RMrS%VB$oK>!6xdG zZ@+1Y`+J*M3!1EkD__wR{fzWt*F}D$38*Yzxn?IB+SL!X>ig(0XHs+#kZJCjlSi(G z?1h06n9W?KZPu%z@V&_To`BG&d9!N{)gDZ|_Bz0^9lKO1`*%nU&Z${E_eMn zrde72N5n5b`|4LRp)DORv^6G>M9JH?ri3dcXAe>(B`0>?-zbWwg2B@-Ewd(+4NISw zfc*R9Vsr_-%-0MrN}+rr)e8VCFgDB}d}+>k(#6AqA;0B{4mm6~o3LOc-VX z7VtRBwk^W>THW*_XxkZ*k(h7j3?>gAsRVo~Q298_w)_gkN>|x-I1_<}1_ffkT**9o z;T@Y1>79r$C4KXLesBqJk|*qb<*gv@#Lc%2*I7mBL%7C)5BH$N$kZzO*7G>cw}F?; zczrS6J)x-s4~0;9IL>9&ti`(rTe#8zbPQj({I^tU&bo3D`7WsvV}-c`2c0PosfF@~ z+{t(h=IKcrwGm!D&TGM{3K&@ejce5PHFeV8U$0(VIJ~XSe16-2BWY4tN4@8&dG1?P z$~$rrwmaYAP+QOZ@|}=eb>M?q`tAuuPk#S{r#3Hdqy`HP(bWx~RS-s2?-1ud(fPqQ zugzsVDG^;b^#{*a*+h5A){7?*lXkO~eUZ}#D_(zmd?|ac=>H=I{qOvr`P=`z;hh{d zDz@xi!vLx%B2@6NXBKI!o}2-bf=~ne_)vGqr=friuIoTADG)+3`9>7xPyCMQI8hGJ z;MqY+p)v88_ZC)~&5M8;e`}tlYVTtLxu%F}bxw*?FL#WXv(Y#EHcohv7f$K z$de4t4#;E@iW1{J^Qo z2kKdGj(>?RJ^NPBH#_GE?!Z%JT=}vYz@d4D;ZK{rrrk+Lmv?p19NSLuedI|JC2YPT zOTW(Cx##?0an@V?1F^=JGC!}& z4LvL}$Ro9XSdTk~;^6Gurq{>h8jFu=-gH$Te4usw8X#SNuM*L9hm3Esg8P;vRzc7F zQKja6L1L>KcX}r=M%RxY>WKmSlUDc3o>RiO7(dJiLS%pF7BbKO5yTG&LZ+n23KXjr zM3r?Dz*I6-xbz)=0O?@LzO@uj)`oltU17@jqd~%Ioby`dmd@p;?3q0APEyUuOwGd# z0c@%b1g~4iy>!Rbjh0mGD^f8B41!m6Pw&^8dx;a9>O01U(iD*U^Rc)>Z*6>G*h{9V zu(Hd`PXJYRRY%Wml>xiTO1bldsO(|`s;DXmy^n&XN+WZy52Ce8uEzOxtwaIIm>+gjIF%mDY(&xU-YmlSB$BlEmOrTTO??Q@~t~T1(md2Mj9bc1ob)Qm|1YY^MIPYWk^%^0{Dy zWe{L=zD?Q@8L;;XZO@yoUcRUGEGTohM~%2wpSRIAQ!c~2yA)0tZ)@n;$G2UEt#N9@ zuw?lGEL9Q9K!4~`1AY%ZDysNHa|wsi`n9*;1`W-B)@blYb#wi zC6(k(h3A>76(=434i;w4hkW3MO0f@b2Vs7U2HdKXc=XGYl6FNKpD2q)$qSr$9YV;z zaJ%Wn8H>9;sILv5(=4_(v)k7XIxiXj())i{d(W^Y_itM?2}wvofY1X1h7ch1Kmr0H zhH8KSp?3(qs(^sllhCX5CTi$aq$3JSm%iwNqEZ9_Q4!H)UF)#_>zupyIrsechx^?3 zL-OW%pL`pc8NWHl7}Hv0@p!{u`hpsFzn{zzD}m9(oZA)AEow09oP?6*(MqX!%Za}d zOVteY;r*GSGQ0lwflP6n8w4hg3e?ZV;%iw#85fZe>DHD6sxu#QFZ0pOxDc<`GC^mn zB>!`$TdZcR8>C7 z74Ba7`ceaV*QPp{*3_W<#pX|Cy+Mrcw8G6c$MCEQ?A3duZa1biF`k2V>GG1t4lE-c z>Zxp;HSC#xof@eHY-*+5fjXxJ)|{mxZCzF=V5hpYHXa5o*~(Q!0JIT3 zWH5kc@UGB-$K@Ip7&=D*&>1q?3Dp|4)}_OrYr{)T*6=}hFmJ1@nfjcrCgq^QMGG0t zw!&e~<69_c{q$aHj;_r%nn_oKkQr@?=NJIRY1sHy|58b#M0MUHWWVefm(z-!1Y{}e z3K#pnJ@!|w@r&=zq;fSDo5f=l+aIJxht3#HyJreS*?fKMIY_%V64C#Esr~8xnAvVADz39_w$R#4V&W?RA{8Y49%o|M;_qW&Ur&r`-I?NlIQb%FJM> z(ScIchTfLE%hl7FZ@9pX3*bu&-g<9ZoDFShc@CyR#$wcqbmVFr-sS@dxNo+Mw{mT3~@9l!^vMGl^AmPNCT2P9E( zO}l~(g~D!sJ>mX52h`!&2=JXk>aG*=y{Un_u>(A7mqdOJ_T61XH!0mNYWXs_QnDim zdkJD1-P)f(bO@hRkPWd=u8nr~o^kPeis1IrA6DryPEOFhxFExHwf>VXWM4csTX~Xy zQfEc>afaFRy0oI)`+g@3-ySrpza1!foSEA%iPPC?OwnA4w z!y`nwyl$#htbHpNe!rJ>eX7AaiDU2|junH23=Hk%>F9ydzQUsVX zYw$#s#dq=pJ5EVl8PhagR9d#*d6a%Jv_#4P1LJT=i23p8!$ynSfh>%%4A1m5dS|~j zA99HF)7j)TBHHhk-osbrZ3qR&@7QJS%g3s-Xg>|k0Pe>Y7Ax^R4jSxhJH#B2m(FO= z&kO(lLKCOC4fPufsb06%ra-8y$Z4K^T3u)^4k4Unw`QY|446A?YoKy?%f||?q-MXP zmTE|O9G-Jbj|rX-6`~ll9AC$l_X*}JoGl~mqbTtpV%%rv%58lXPH#7R?33M~Y3Mus z1{q@5<S)*J!(Tw=ToC3h|o427ha6xI7Wgb$?hswi2S;D9AJWFyx`VUmx+&!2)!vEMQ3FC&-^%xkq zfyXxj3;`T7r%U%m^sf3-b_h8jJdE!2&`Q|&cu^AC`9^lw>Z)6^?MsxetDV0^VqK2@J3sH2E-|H&5gyERF3sDw~+()l&_GvBqF; zz9Pf*ItQ}QSzm!MaN-!x@Ljt$%9)qCu1-MJ(z3}j3zQ^8e-fg@A-~+^vQez>RjQCb5SCxI?s zVD~#cNaFRXjf1EsOKkvo2-EpY;ER-V8UYsPI^o{Bx2EvHS63BWBy(RneA!bQtY?*8 z;n|zKe{)dJf2$^ya?v1f=S0hJL#IO{z1B6GcWxqlogr)e?UZ%_& zw5I&F@rBR(B`1J0zT?g%kL#P^4C@BKOmW0as>WqajT4mKR~-f`xlpAlV8#UnrqTHN zD6%=o=Juo=?YZIHkkD9bS@_kQ@nok2b8ymGAvGs@=He#?OcU{Ff`EPBTpozJc1>De z@EJ%0@7<{~t$)VoadCZ0zQK^)XQlQa6+gF+f7H~gPy%W%>ZLi18RoUSj&qsY9K_Kz zyxFQ|yfyHHz%q5#pDp|!@;8$NxPR!w&t>WMjG4~#`;z8M8BLyp-i}K~ASZS1%=KcO5DA1hO z@XyKHEx&P$=I6dd)v*h*+5QzeM5Qm^3aKDbqXlcrg5NCj@gE(t-}Q^km_nQO-3F@qv0E9a=#2F|_-7jofBf0DC5ac?$J zDk-Yc$VIxHca|VA==X=f@Uy^kBQl=?EkpaK>@p8e$z)AioIdC|JtNzE!cZ>aIdzVQ zDZ|^~%Jj9h>gBy9Rw$0uykM(!f|%*GRgU5z#tIx0pzgmR^{HjDZTK>{MZF)?B?I(} zS+Eo&j{JE6#z_rSv2y_j63H8qylQkJTp0+82dwgM3z4a3MOo@@Bx1s^L>9`fPmZWMu_JRg)t1lAlW@1>9n%NN#EKsWQq zEZ^7XEF@42CCRLNqbX=FOqJ;sS~`01K{>5LY91ajXqZx3`AR=Y+0Mlcm9wYDZwAfey~_OWnysZ*3eP@iRh0tG`xUZXl>2+&<2f>Dg?2L z(u}AKc_)BWJjaOtUs{_R5OBqF>3pEXG)A5k4>T}D=Bv{+7Hwu=>5JJ2nOy_fAD1(d z3%bRS_5iAV&Aq+^ijFSYgU*PfseHdg(FgToOtM=HXbAn)#B;?SwOsTj##_}mPD1o8 z=lYZBepj%zEC_$m{Z>>~`$++1ce!Q$AbPnd7l~} z7}0`8TxmRXOYNZbRuyATXkPALCff;oJg8ghx{P~_)oy!xAX~|&-T9n<=XLhu^!T2F zC#m|sFU#y$ga*O0r2uIX!j{EZ3cw6S0`D;lXTpM8@(C%P255ghJ2R6{fW>WW2(kgH zKY;N7xVa5T>T!L(7ohw|j0dTRF*|N(l}IMfaix!#LX`!R`g#Fc;hBKY=Lo8s(L_P| zkx3r`wF08Juo4Wyg{&mrv{cx0Y7A1%DCX*faT@Kac(F7<9mFBep(9NMsS?(v)wOnj zWGx!0b%UI`5ZkUk*R~kpi;=~O;l7GDc`k0X8#7Z&%s{1el6NT)5p#Q&EPv^CgZ=m^ z@SHhFi-zhuI50P0o)`wk21{4r{_G-+yIAv%QECgWp4VXD0f$;Gf zhe&9fy~^2~l^R?ZEOUQYIBhM~yl+tnD+>X4uzEu(zlE4Nt>L{tN0}FoxGzMc#hu|3 zx}G5A7|*ChWds!&mV;G`t1Y1eGM?)5^XKg*@8EPE1lB8us0!1`I15YRu+8#x;}gag zBQ*OL%jKf7WsXMWCYG} z1ucY7N9ikv1g}6LRr9u>u!Zyh)y0fnN8*n}nMWbX|4E1bIsYp5>}^>boqwH9Nz^vz zeFyj(;+B?PYg2fG40!?Ly0^4V{o+rQl75G--kZ#}@Ukx)&~*_wci}}?D;G30jtAw- z4x}@w00;0YX$(AIH?0ekuUgFnRr3kcahGdI3hK#k+UHY6=&l(jeL^_QheJc9?t z9cGwaV6E?}gjiU@cHp`8mtG!tWm>KnLSkPHRjnoi-l=52?HF|{pyf@!SDdBi6!|Su z@=89Hd0$a6MkLy8TD;UKkT%S0+KJfJB)tkslX_|MQl$zxxNcA;RY%g;#ID>83`2l8 zvbC&HaN}l9z^o|JYtk9skk_tC3TgZqvmiz^bugJY)QPrspL0D|l0}~zT45dE36&a8 z@Ck(srjM32y*c})@sWod+{N>oxlTa#Gx?RaiE`_=4(`VbD6b0<%=ZbRx6H3?d?RN< z1bk9Y&TKVHFkg4!u0IZox|m=HB^1?NWEp%m+ro5y9|>Fkss+FSjJ4jFAZ?pM5{q&m z2C{`gt^AH}X4;`RLRsn*da0jJRu+opLC~3i+*~bT$v%A!eIBvCX19B#gGL;H0axj7 z=^!K1%#)Z{P_7p|I~zRDm5xb?jflX+!yFu3xS0Bt=tWiTLii#syoTTLS_v08H)J zkrU(MlXO1WQYlkpo%9`O-)t}3ze48Y&lkcvRX}@R#PMgTLN`tZH<#Q+BeEmwD2;z? zbYEvr4Q<1m3th108@LZ)>ngYJY_VQM2)EoU-sg3}zn&;L%~RdlFkGA>G(~7xZ~8H+ zctYXLw6_9!T)igOV#w_X6K=$V^Rw0w zCyv2H1#Wvo*YBEm>Gu}s_PPt^ewn82_UJFn4L+HkzUqacgG-%^r)H4enZlDUu~BYG zdGf+g@hDrhZw%4pi(dH-Q?Kl24}>d-eMua;65vQDXb(Jc79x)ZrAiYCj3y@syPIYM;v$U5BA}!-H0D(ji557sgCoXnPJ4?QJ^xmAXkjp(!UGS$3Z-D#?QV3$q)Q^uH0UmDy z!<7=UI9V@`Tu3(0Tf77W{2=|E9Pomy`>Z$p=vGzvjulJL1Mt> zR~Nwm$3Q`Lidad_VKT}>UteqV!@Q#@!%89Jbh9ImOSkzJ4;9@fjeyo4k9No-JWxAc z{7bJOxo+Tv_h=D^tbfMC-ws$zv;Jyb;OvfJa-4eJs`%Tkv-R{+VTsRib$_1nD)54* zvfUR#fkJ5w#~Mqv4;rU4z@FRL{qVGOSkLv&C=Xj@6?q9m~z5#q$2DS_v zLR1d?7*MA;Tr?@^IBq9ds<8LFoggB{XFEYb5(Nfgxj<+>a%*iY0IpG(86&6*VC#Mj z&2Z%h^7JRX;!@&BYp~%Lol(~x29)sWDgo?{D6X5hz95so+$LD21coXL(_&9&>xdc) zMWswxUG1`cv=SIg9y^|>Et6{bQ?UviM*(?-+czu@G2=Mk9DwnK zWwQQkMO#z$1q~CwIL0ykAINN_VgXFQ;qq)qIa9Ju2`4y9fMJA^vcI`Ib4!PvE2#&+ zpYa#(O;YLawgi*afm`{g9KzjUQRxZlaS_x9%W8o!ebW=U(vd`~mZdJI(>EPjW5%s< z({OcPPe)Z`UZ_l)HsG}RK+Qz+_|ZTq669y3+^8d7ef_)UcQZE01x-=d-ZB=+X?WYR zJat#s$z7mAR!l7^uhH|azx)kbamv?kIsgO!gSmBf5R6khD7Q4hI;e%wXlf~m5;2#G zZx+@TTS5A>&`)C}$+du%(w6wxl$Edn3MDhljv;kadP=Xw8w{lI9mZITjT5>7y@6PQ zIG$7ztkg_G&w8o0o%dt36gY#jx^4*@SC5|eXt~-+?U=Ff22_rwD1i9qeGMy%kL1W2d&3^B&54$L`#qH=6PR%HYZIaA1 zP2+jdeX}gp7f+Qu#!9+Ed-+JJaD}^`WPtW*%!xOIv z>9Ak#YkWQ#>e2V?M|!GSW4+v_**vDt1lK(%!L!6&>-*?B)hLvyQ9Isrw%9 zpt+%~9TcM8+!#EW7?MQGZK~xP%0T$N5`JC(NH@COIIwMff&0Ln`eh1Xxm_J}H#+d- z|K~R7Km1=2{$|_a9b|I#NdGhG6$S{51X9~R4dWhs9h>{}GZb-fg|K*T?uJasmIGDyJ* zcuqZn#kM(lUv~Sqe6s(tSYH{ah?qe8I7iZ44$A*6>=vl0%B!d_T(hUUZ~t~m{ik8p z31yaE&0*7;QuC<4*RW4ugMQ z&+oc2ewaC-FIcj*n~*Uy;&uwzzH=2jXn;FtRdAG)7SB9)=lakyb(`lyc0Y4ee1B%vlL<~Cf|yg?xyYK*!UN=Wr^$VnYFP@HEg{Jv z*>-G`Fz$higP^2&0ADI8GJTxGve48t4ec&?O?`}2(bsQSFDQ)`wm0!HLuRBv(cQ5) z=(2PHy#<6rx8d~@NSR|g%}eV&{{DRG6@Ok69VBFrCbe5Hi>q>^nSl(GhJST!<4fPE zAwuPT@Gl4VxMf_a8>elvAK|pHH(jvGskE8bXQkujQ7G0cf~%rLl6Fv-de>S~@j6LXWa_(}19T z-h0@+hTYV1ChDUX3)_7YOHvCs%IRqh&Ayo5bMPmjvn5DENp~awsB*ZHaLec+&B5&c z1_I2wiH&|-C-^sMDB0e<mn88v?+=|Gf*KZ4Uh%_n#0g;TV66Wf0hiNW}g194ZC4}9v|Bn??S zRa_H!V>})w?5`@)v$@~jC3F3$O6%OYrWz-mCPQwGZ}H5}PO=J%3yo?uG;eM(mq18f^xGGb3%5r6r9v^0R?Fc&fY2^ZTwl z!#3MB<*=xCvd<5~t!DujY0&tPWITUfu%QKyHu$NMshr3>&;FU?CJ8?WxG()3Ll21N ziN#BFtrMneO2y4srDUT#FEP~aKr>(C`bbimR7!blm~VU(QY{oRa@@cVVgL?gRiu!B zG>p|^#_?OY+BID`SlBJe(828NBa0s@uG|DuxfF+Umdj55_Q8GaQo%y4UI ziy~DDPN3%ofH6)J+(}8L5d*xwhDLe346bh&QV`a+py+xwYQm`WB7Q(g>Opa6XDX~^ zF8R0(Chp)1o&f;;l+(fRf`dWam`J{yaZEsVxK_gZYdK{$LO_ma<6N0AL9D1$rkl)c zs!Fj)^I+W#u97aklJ3e|!H`$}WB6vissy?d5`I~-tV(t5|wt=FaAjOsRzCa^9)|k}mJ>n&3zG)lst`cax z=Q<+7yIR8SkBpI`Z7c`{O(NiyfBIc~G(INBOAOqz*vPvdHPz>t1ic>gkT$)T^<=+G z^;w$EIoJ29AsEna@>XHG}4Mc(hQNH|QlPdP-Z-^XhMwN}NESZ4j_;ntgPFOlQ-&$(w)RecJW0m)~(N@tW z3DVM?E`_v<+80%SPd{%6^*LK-EJAmNEF%Tf9Z2_r6yMjE79Px#9tZ2kNbSw8%5nS0QJv*PyaVV#lVmRVtZ@zz^lnF}eDuJqXnCi^CC=T*vC$?VA66vWQTRRylN zvHlNbBy%r4on&iexwm_Ke%B?yrjHlPtV~|Gdf^4~&&rO_Y zLiD}4za}`q3TcTJO9Nj%duo_bF3&v5u;kFE+MQ1neHdA{Z|~Ix?e;78B$p}=!%EgT zMRn?*W`FqM(^$5P)4lC%uWqK$Q@P|}^u#i8CP=niGsV<-=34IZu=OCuv-SqJYVQ4X zvr&*+uJE}LQF%Do`;JoS10zBMXd!3NeRySRP3^-HzM$Sx8KwLo+ z!;rv3i`X-7XOj|&~uAdOJT|`AyR9Dsb#=+Bke2?DfSqu~A z+K=B6|3wJHNM|l3O;>g2B=0xit79V#g=`a^`w%SS9^7`_Ym}e|Yu$#e7}-~>D>%XT z14f>25hI^odKI!))5Y`4!hUwai?)zcZRGs0?ORY2;hERnEaiw6QjzZt&Rys&(a2%7 zb_ogokSHp#v6JCq0&a>gzs%qBm)V;X|M5N)s&xovB?@_&S*1iBc?z;>dy(A*w?f4u zx(hlV2_I_k-|+tQ^|I7QM}!@J)EQhgOLDASb#pUpcv3@3>si^7$ih$$G8Nt~d)x8x zt2@FFv)jMlM)3%pCXtXMWmu4Q>|g?kCK`Ficb!?uQDwx1)=eO35Vq~mylUFh{bjx6 z`KR%Q*rRq@!wJlgdErSpKCcPT$w8x~UJ^o`KP6zk@N?e;>W(ZXVx%kpo`-^hOE#f$)*g!56|XL3n-$izCv~~%XVuxYfHYi( z*8byaeHWy?Ob=vY{`k>r5%7VsC~ZhYTMRIIfj^(@yYaa%O&# z8{>)GrxhL}x`6R!W_%P%Fbpa0(8r2#Y3_HU*B!R#9HU4<&Vul)sGoW)Tcr(RBkLgpR@Yu3l zZ-w7tHfsiyxN*#zHKqs%*)(*y=Wg5D6>&i$6l zxcyW~a*r3D5K$g@`_x?Nu}inIOF*JE)#Ccv(~0QRtR@N2Z9@j}y{>vJm|D!@bP0`_ zvZ~J{Q!iC8dw@++SM((cn_%%ARSH2ZXs&6@ns*{;-e8m)R~ie{DM9-9CV&&pjd*OM z6UYo%q8*3=q~G9Y7R4x zq~aou2R7>TX)J6{xo+24b!K%{8`vXa9B!YaMHLVs0S8-Cb1!224)aKV27|q)Hrn2d zyCCv)&OVn>X3AJ1&OcUS9=fAcsAppos8!f^U%o_Cm2^C8-$ZDQc$^7(=g0#&4oIf; zO(Sc;hbdI8nqe1bJ#UZ2wgfWHlMz{LS*dJ+f!ZSO53)B8}q_d^5) z?&8l_bW+k64D6W^M|1Nrl*+V7vM*mN;b=_>CQeanI(E)H0dR~rM$r0vHQBmmiWB%i zs!^Fe?KK}FMR5a;%POYjnO#=ch=W+=2NlSjjALn(jnfl-i`X1MkiwfubqO%1C?6oR zaz_YpTn{iHe+W_blR^8hg$XY>x%-J+wjTOBpboTDyjo+N+s-%msPX&N7d{|YkQgrm zoTwF6VW(p2i^@uFZ4v4;SgzXRnYSObzS+UDvKP{NxLmk)K6hu-VQ~XZ7Bj@-Hc1FSqdKOI`^;)^^Y%R)c-?z%)I5+UAMcEJ~G+pD+|Tpo{oasE_nviJid} zn?eQ48Zby5ZLC_?eg0WPcHr{!VFjSYj75VwR0|cK-Nio|oS)TDT_2}_m#{=3dL5?4 z({7(vygY1|AyA-_FyE{{yE70Wbg!q9@thT_y1B@d!N;cl>T&N_UiC~dji=u0_>}j= zha-fmGF$G2W}+=HlH|N~O(fcJ6LFza6ZG)*MG)yIfJ5&^%`Ku5&>;JZZ8`Q=#f{u( z!MLC2_ME8;rp66A1P}|q?;`?QF5nts-!w9vu6kT=*B;yy9t)PhNoaze9O){H-~Sk> z&3U+?wBU6^8GnTo6uKxAJKI`X{q^>pp-pA${%D1VD$*SzJ0GXq$}7@c#(D2Fo2NZ? zHl4IxD1SYFE7$LH#I1{JRSS=OBJN#YsBG)<()NOVT7*=n_gWmY9x@C9^zx#Jk|l#G z(c^(#6Ja^RHH2i0j}wLB%#=h09ytm9WWFc?9EuG0bPrqN091^A+1YR@aPlZ%aH2~; z>q0PWPT2cnV!`%)huIz)OD>Zo8Q8}6p5E)59rj3p6w+#?mtGy_b$5 z-i6@=A`kM2B6J7Kq&ct!TJ6gd3cw!u>L1pSke&Ki6u& zkw^hszMl&h{#f7PjCSNb*8g@w_w_$d|3V0f|kK&IAkRXC3Q{!zsdRuITt66 z=^M&p!YxjXAtdkY+wo)vg3qB)t7(&F*%pzF{=Z|f#G@$WGndZEO^4M)`_0Mkwh5Vt zfakiKj`l88@O!cGOxA80yiUKR=^4;B?F_fKRjZMHoUG@U5@Rf9Rm!U{=lTGLqd27= zpR^If?1hRq>dZ${RYAD;_Zbk>qPSpCTj{q+0$;Aua{OAl<=zMlov zD462PP)9#6s}2WytT4y)d*!$fSkR+bmv6v|KQIbmgdDSHkB)pLNWVH}rwZCG0y}-G z?QAc*ZA@3xpH-qbNqclz2eO?DIn!=o%8B%+1U5hbJW*3fnlC-GuZUf#nKspT zKKsZ<3@uKQM+);D%c~PrJz7EXO+1x&aXf5xzHi>t=!yjJGMADI%V4$}llM--E`hHU zbyMq?2E&wrNg3#n2D+BKi525+Y>JOp)wg%1F%Owv<-9_;_s@>&^M{^6rPCd_q%)zSSyvLX7=AH(9oG`yD@9P50 zeo>h*ZFcWfn*UeJv9n`!asS-};9vL$HC)>L${S!jG^P7$;e+3IwG+J3B5eceSE+;u z^+HidshwbYCiJ{txe2NmY9VxcLDS~V8?0X=1g0PaVzL3?SU?Ev9?=(U%+H{tLVh8U z`D_3SD|_Dg`<;uc;+#)`;ko2qF`~|xyY6>?PtE>Ej#)O5uE7ZBacaRYf3y@2(; zhCZVU*ko42CyZAXhb)pqrq#e#V$61_g$hl$DE0x1MkJPTEJ7jSt~Y?M zi((0<@?;4e_vj4p$Wqo#8>ArmG z>+`Zd9;+S8ebH>C)c>pQ$%THg!iDcUdJg5MYL|Do#j9m(|6TqM;63;5FWw8LLnYmG z`wxDX2x-Ctx3Evtc*@FU3|^=uBR>mHD-dviRv?ByOssGlE?ro1iEao8ZeR4Pcf!CR zfC_m6_nOzeQ|1naJMI@t#h8^ztAM_Wf#g{3%r>D;v4n)6t-d)2ecCoarx^PJ=63Ua z$B)piEmRnWtOa0Tz10OIag5{=u>iy^I(>BP6+%5}7x9Mxzvjk z!oKYR>-w;%X6r_Ui6GJI&agwHa8ZPDu-1=Ss))F?=(x{~u)$7G%o&iE<&sIH2 zY8&OBG4~is#Da!Uy1w;DSy^w$z&pPFbNf2C`qYQ<;~0FK$uoD;V#gn1)%Dm*8|ou^ z*TFFjJNHI>EZ=+)fBfqG$NkQ{A1=>Sf7dO3aQ^lAC#hrinuKfQuaBWOj^P zlLH=dYpue@y9em91JTW3`pN!0;loMs2g5SZFa~CN^0tbtOt>MyPNm5@W$H@;xKQZ) zFXxX|WOmt8RG|pDb8@spnUF;$LqY0Pr+2@#Zc&5?Yf4b9{l0COcgk?Sp+R0DNdK$3 z?_*CDxO$D{8GZV!#cHV8YN3I&wKE*Ht92!CAKzqPI^k}lQ0^2x(v|}fwHM88^afEH z24y*qyc~ad#toTAzD>+l8)3Z^@m6np{%fvOe5GflFxIQ4TrtNBhUUlbxIz6`Dj!F#(`Y$OXwFP}cXKw9OMp z+UA$Bze&9?Xwe?at9Xd?C`f)hP6@;T5a2bq0D~LK54ZBh8mifR@{TcGRs)-=fv=gd zhpR4OK@(~QUpV1>2uX2KFea14D$XIQnM^3fa+CU6a3+2?A+t(N`0QR=9{RkLnje^A z;lwQ4{M$Du17Y9=L~_}Lrtig|skOhaioFQ@JB`yWsqStdt$D$kK3#$mC(}Bto{zR0 z9H<_6h2{;M&m(@Q$p;yTV`YWJ-SYKj>}SH=+owJ^I8Ya@;h5L$4~rYVK2T zwP*pzIGi=4)>z@))UtP-O`NdlEmY~{Icv&ZCU9|GA%_88@vbu5LrJ8ZdaItQpZ zDOWW*t|gy(=9>Bl*xNRKhU}kU6m*I0Ce3f-ahH zQ~kYOSt*%;vy{BHs@XIlY8o~Hpb%)06LJ@LgcI!iHHAerg8iyXA@j>=MCyGceUdM$ zZOo{AH(bdW{EU3kI+7C<&+D3Sw0HXOn{n0VSB%X>2oYc9ciOsFvDb}iou`Fn`D zWhtWp0MeL^9~>tds%}4KdLsqR?+g|RAa6dF7sjGY##HjZtISW94vezn2DM|WA)3#S zr2;^f9t>cqY1P?4<{=R|_+++tIS6Nac_~PrG52+^|Gy|l| z%FEbKxc?Xpc;kTCovKhfR`3gJfBX^mlkXp1@>rFzG`>ruhR3wEMErf_`ngTrL(9kQ zSGwk?uKmMNr=!jf@5TSWsL((Be{XGv=l%uWp#fd2(911tFaXG+Xovx`zh~nwipl|+ z%QBDV;4nV2MWGfmNf5A{=Ay4$FuLXS!|J-|?gW6jz}(DQ4&G+aOQo`*3CBr=LQwi_ zcw$07Y6*k$4cE?~8&WUuW2o!-YBl>wZCD^4gW{Af^G=uP)W^C!WMiBeWGDHh-Zf^`2~$f~z+$>kd zilNu*PsLZ`t^x2;Ql<>Lw@9<2mK_t;gl3 z!|z7qqHJ|*D;LX%-NUXE?FA>)^6r1|eDM%A8Ik$oL(j1`=N|9Mn6{2R+xj3NrrW$mKN@hc0Mc= zO|C62n$RA#5KSU0G#P-J>i^2{MxRS@4s&wzjk*HJ?eD{?*`9!sIzN*rI@PV!o}?2* z3pt2r)!1yzF>C!cw%zErT>${34_HxQ2oRz%%e{z%auh#GOWe*Esp8Gv(uDyKgkEVB zS{6bCsd*X^?dCAvBZL74rZq%L5Y(;L9+vQD%DJ?_-)8dRYcOhv2+$V@I z)2=#}bSHUzM3N%mnl-8Z98I6R_9W~LwMB#HS(qfJI=Q!8p#x?rXFm*({e)L9Ca2gO z;BI|C=6)|)GR?V1D*xHF8kLAat?BK`{Vlc#^Lsq?@xISrAN!_AMG5Afe=7ehp#E`Y zb+mJ~yG%{BKV$N*5XlQ|2g%ox1v^qgBsVRK{;Tl$KZ^g+^)LJb-}AlX=lQ5MdAEB# z@BL;MAtvwCQ~{ArBLb8)W^bws_NtMT6$L*Zevfi{& zQ!AuS?F@$s4;Q`&i}$WIOmf4C{bR=CC2|`nZ4YE8q2guoRZGSfFhhhvzBa8Q+_ep}B~U2M$mY(%Ncf z9MkfghWgW#w0dJX7-jHwP!&H6WJ(a?F9B-?&ZYqRfTlO0u2QfmR8|__U{6q9)u))_ zZ9yLR=arZks)2>TR6(-1&QtD@Xz7Q&Yut-d7tOb_V4Ny#Feiw+V&`R$<5~OUaW)+C zAn0}YdShMCqx5TAo(ND$L!TQaUg5Q0Xo||0*E|-3Z>2_`Uk=!s5C3&5V(a$PwWl;n z!wIYhxB4T2R2@>-T=x%R?o2#!({XeuT6X372By>dM)!DU#K)5fb({4XRl&!bAWc(#-uuq@^=!OYOw5Jf3^rV^v1Q-WM zKx0Een>HL~TIxjl$iR_XgU#UYWdg*{1>*e+T|hSKm>Kl}PA%B9+A8aLUEt(jG~1eb z1uOuAqI~kPJtioLQ3DbcC>qm@bm_~hqy%FVj45nIWOv&t=DKLx;MzISxX|$5M*A&T zW{L~Xs;PfuE>$~?Ep&XWv}`>G24uj&AInD~jK^VGnGRh@7Sk*-+zZB(DP z)Gw{6l0s1Uz?u}n=Yxk=!RyI91YYR?4cT4%F6Ss-hM128Pox@7^wVa)l{=Z z_p-{kOO&$PG_bj6obAUon3CLpBCF4EJ`?-w-=!r=kK~IZeDg0&N%%GMg&KTJ7CvVp zI{5i0ebMAZNW?q)hQ2{i9=r%a&*K*ob^v3}8lx`&!h6p!qpCu`iKRpqgnvnGdDa+^ zF)zHv1|B~Hz)dHiF?NXFc{hTBc)&u+HDqq~GY>9s4|`s=bpdT8(%pwNLNV7sue%i8 zF3ty?3FtKe0r67jB{M0JZ?H5MyLyu~vOL0b(d=XSgSZG2%$uWSDpxF&u-`JI=U%;( ztTr8Yqm~g3K^@M+vw*bwI~}?13(D1rbEeZl!I;4yspn{o7g5I)Z{zONdcfXTG`oh~ zDu3Ac`m4UsonhtA8uHSD*4u6Q*)K_t{|NPQxb|0u`C8W~siWuC<$L*dOSEyKoFIdT zF32H!Z)nA9*~mMq%!~b30B>}J4wPvS)H^IG%#GmT3Q@}KBct5wNMvxDo&vOoq5 zvQ${dLmWv#$Bs)_^7_K_D&lXmC( zdzvy|b&y)nY4%rgaRt+&T;DUeY_G#ludYGuLUk=b>T0eo20-BlI@QJ%SdQm6eK-b4 zW#`>h4Boeu>GnM2&xNZ;-nM9aqx^ggn%QBbXa9yMN&y^d9(D@cwz_$sJv6-{ejg2moX}a21ox4vNbpqR+>VrmhyDUARkG zVW-j5*e00hT?T-oHVF@KKDdy;rMXG_EbFnUzqe65GgAP7P8UGpNCp@%h7-g* zV;45$Xa&!S+%f)f(h734fi%Ff+#b^WWISLWGV0{}DC+eY zufP2*PnllziysVx0!wn`9>(TJNP6xHrv35gc(o9p$dNpbNpP2?P#9`8`5tj{8ejEt zC=FeFd@r-m(h!EN8cOCPJ($DUq5gr_?{mRK%Y<|ptDnQ)ED4KuDo0*Idd)6^22w(^ zErL37%|(N6v_2TvR7j;%+_x0SoJf#Xu1vT45rkXW1ASOYr~exE>N2ih9=m70Uz_gn z`}yaR`iDya_kY_pN558CYYcE`%p;r>fBVM(JvY749rR!d$xA4GsWudwnrEu@@x_hU zIvRk)h4*@=_QFG%`Pu;k>uustF>GR3pN!?qW!s^8=n24cmxNC578w(kJBQoKXex7p(RVWf*_HEWS~vQToS4pKwY| znz-N}v0yQ*4U&$VZ&<#Z1W1RZpt$&Dkvwsjx2u^U&PdJ-K^16qh2nhxV z5UK$J(n&&Sf<6HP1Pn?SP%-qbfPmN&2t6R3(3IYhE~2QRN(sFvDhdcFii*9V&*6Qa z{grdRz4tli&$`zCHSW3QxaSyS#wPa<9`~bHH2-vKvi_#1wa;(n7O=9Gj9$@A?h+-x zvIyc`O&ui4#`)%axc&7yjeq#57g0Hz*U}SySMrMTmm0!3o!wUG#r&%qle4d5i4Ojn z#Nz4=!zbzP9H&Y5J7@I&ryk;;`v2y{Jg(`D&Unu4O5k1H6&Mph;7q0O?kQRv;25HZ zeA0Hrg+y7+^wE_hVI7YCPuiE_x8KEbf(&44F#&hF#ekM<+lP3!o(h z?Zsu1rO%&<=Slu><7CDuU5l#T_u;j$m*{O|J-HviJ3OR@^99Cw^buU7kh-^ezP!451S0&m#i;>r z35D04N4*Jkbr;$-n9r<(TgpRoknlTBN<$2$e0RQ0oDa_sMAHskKvl27%%bKU0bCh1My;7*P-oqyk66=+vYr z*F2o2BjO@q+usxoG*{7^Zm*V96b?hw@BiE{tO=0!168NM)`4kQ?8V7k5=jVVZCZx? z2nbs-Ev0at-VD4^{5I3o3NVTV0`xX+=k=c-*h}r!atNz7US5A79W+vN9Ftx-s~x>H zZr;AFJEl-IcTdm!FS%yPCSeAHUaOjme7bh6$^5-Th6zhfj-6H$5#K!b2C37e_3dFN zf>0=m{Qseb{x|*sujgOjIh#dk-+Nxnk8l0}z-f5sQ~gE&#}g~qLoe57JrIMU7D~#p zj;)Q|`n9xFcrvD*&I>_!sR{VsA*oDXZ=tH#2surNZ0?iX-=kHY&@pvpIWx9Sj+pSp z`d$#y@>C%f0vlIoOY(QeUfLK~nJf*vi9M5;ylpa4SLnS}@Pd&C!!it|cO)>HR5X+l zTLp`SCMlKyKK{9iMJI+RnxW|=!nCw~6pGl-3EiwfVmrnSw?&CU3mP5~Bj#@g0G0LW7=_@S#hB#iCD9SFetkFw-RAb_Qxey|z67`Aodl1nt~bKu@;-X$URHRxUNCgZ!6Ewj!kX>{L+Qd39^>Oy z0gZ=8*So%*-flQzOH=qy$nS0!^dIDs->J|8+BIr*my^E*aU3oeVdX*oq?;R)`K}p?&pSoDPW3kq zI!yxm3AMZt z=Pw(aq&kcxsQqf(QS!QN=bGhF#&rekX3M$0!r{t9dRZ{^bb&mMLV|*sG@vOm%o|U; zxx#bP&?W+G5+6|ZwTjzkrwsUQa)E2(i=wL8P=YBZOx)vc!rpE_@~%!L5s_utx6vDK z?@6g9{7{8?VbyFUFTPMqo6xWt6|-Qq=lzbmGrrn)59y_PSIRrn_gU562Y}k!`%6p3 z4h;?uC((}1ew&xC6oIe6wV6~|j8MOW+Ce3#uhV*yYj0oCW5b%io=0&Fciybmxo2{k zb$%3Chy$653`D|J#Ysd0z^omb$cMO#3%~%^f~8=o0BNFmm|#R3 zDTf9N!OUPAVlvzeisq#5xit+uvDAl8)m2aA|MVjOYuRSr+XXCMH5klQJw*k1GJWp~ zAW(qHh+VOg;aN6dn@hr%y5G& z<)kegn2_3z$A-^-&#s!4JN^dQ!xsXnH+q#**nM7=`j$sus|J0^S1Z+NKS>Oi>zm%Y zl04G+rQ_+CblUA_Ul1I}y8j&d|Kf4&8`4gGN1&FJzU;a4{>3qRv5Pf&=HX@0 zK(vUptHLL7v?^|wtL??pXC;8@!d1y^99Gq+7RtTVnwfo0yI$uz&IMq@DC@Q7&9M+ru^hrlT%5Rz36+)^_7a9r#0#J4gcy)5AI1~|&^pgjG zk$xsZImn?%VwkBs61Mv%5TSEsPAi6B?K-?geq`||s+re?c}!rj#fF?$0iEi%@`#P6 zdJxfg*+=Tdh~uTTm~uQIw~Daojm&uuAoY!xW%OH?8nw#$JqG8dYA`%i{oK!_HE!Gq$4$O5C)O5ESz^$qn0$hoVW%S5R zFA<-1za)@JkEZqE=|?g34)wLiK~FFMY&@L{qCk0u$sQ+M9E9P#LqAJW<+?RR-vxil z`YGjOgpZcc8@e_2M5ab3Y^CU?vcn~tPaKm)^V|7_YY#@n>W{YASOJicBdViySEHb4 z4O@gI7n`sTTC<;0IQ#)4AC-su?~TF#QU5a^?&9BQyzhXL=eX~80BjQg<)K8A2HZ>s zJd<8j!Bu;Txe=${{5p-8K6d77>E^9ZKf2!js>#)ObSb|(>KGDRpLD9-^@AOOf$1IM z*bz_P5(?CkDvFSm^#gG;5@7y0 zBBc~pyIeZ>+0@+5*HEUl!R7>$rle?y0pt{9s!kS(un;8{&q$m6*jx`^uS7vYoI~Wo zH9}%9)s)7GQJhvgZ_)-^#j6R3pj6R$jRE?_$@QNr3#ZP?M;D3FH-4V;ERH^;cquQn zzPGrp)+@zI^{TE_{lm%m{JD0I^Zd1Y()o_-XV{u@>@)5`cTKYG9#)rvnL>c}_+C2GlUR#%BIjG`iwGbWoGpK^esg&j@lxyH5U>_( zS()0G?-BL}D-QXr=1`RXL2|Lr#&U0`qb<>2^}ExlSyy;VR8Rh9w0ngLrB_6^7Ey}~ z{G?4DNzm;;sX4N8rsN5z1VrK41n08YmrJZuXTw8<6I%k_{3qn^bJepx8 zkR@cb5t0Q|>|rpSw4vSNk`3cg@Z-{J!onBeMh*K2Hk#8D3eweKtc*-DAvrvLHZ9NW(?h=k;f7i$ZUaAB4as1Au}9e=<=T`gs2$<5fOe=$Ix=VO(#1S`ubK;w zBt*Ch-|I3&5{Or3bF4+zdVyw+IhG(mVuD9d>nW0?^Qlh#t+jrn$UVP|p2dctrnysF z;;HWCQBMtx!xPU9PFOkQ*J^VV!h7 zYe&hU?(a_9BU+;qf2F0~{r^S+{*8ZNFgO)Q3;o!Lr@TJUC9*=}C4>cbPN;YKnf-AYi-J~RD20=veK z5|;JQ#Z2_sgVJG`Eu>svdIN@ZS}NYOV4hjGQQKP`&9Gx0N8|(2NN|2282fZ^ukl@e zRk#z_8evj2U8#V|uCT$w#kt)0BGV*kS@PqsjM&G_b0i!DA|mDA%PyqA@y_0!5|)zi z4%JG3L(N^;ii8TVx0iEmN6@5nRTNapsq`^n#zlrr@}0pVq!Qtf{S^@A8_s-kuUyPi z{FvP!R4k)PAelzAix$5PIVuVX3^v1+=5A@yrp8KSxMoLF*j;=^bxx;3rUj-l(COQa z8o`w?hTm1d!D-1K8D>w4PX$4{jZF`6ydvzuw!TK6xC2$!bYDI4zxLDUJ zQp@xty})GmFdDa&+6vhwDLa@>md`|ji;psW__~Lfi4LG~;zX-+i6jILOfs?7f1!U& z&kC4y%*+8yJk?dADu)uHp8U8k$7caKgdGNeR1sxp#tZr+#q|2?>pn=V4&emAkQyqA z4}XPVs!F99@iP3Dk6r!9VsrLe+Qdj<;$z7I+5s(qRi2Z{`b*`~QVJ>bF=^x0K+ORq zcdS?QHc=@Z&`fe8+G;FMKx{I;07qgI$*JMAUVXC@^uuH8b9B8{`D4f={oFfNwNi8YO9S1@Jp&PwMx3?1E)mWV_o80DbGUq> zbZA{%{@mHT&b&+Q3C$l%PsFSIx9<1!6BBVsofME>N1_A>I;#PWjvW_G; z0g)@d_rMC5&|5Q>-|6=;}%B8D|d;u76(J*bgY~~T5zJ6(*#BWa8?N6 z78fX{)~?2q5qzc@p3vPTa)VJ{{wN=?ao~C(vRR~sA8tCtNiiH6Xg@NG1Uc|%rPy2J zna)qPR~JN_r>M0;(I+#@UC=XfJfW;{s!)QoI1+$3&t={+ug1Q~)NU7hic_-NM#E!E zSAb=#SoBorbhMhO-O);hM5bCOrt|l4iyC@lLy`7VZQ&EsSl>qF)wuwagZk74uFmnH z_T@X$EXR_}Iu%$Ub$Z{Ei14E`kNoYn_MCDQ%sjAX2l=H-^~D{DJJ`DiCTpI_{rNbg z*WiDxed68TYZ9c1PLIn`T|H506YC;xm}m(k{4nJnU+FB}OLiP@i##U5JSLa|j8WB1 zIJS+JABVF<8Pwhk_`p;K+&IyMyvn!l5Lel4pZ^xvtE(_<3fU(wU(m-_C~%8|X~v+@ zx-E^w_znJMCQ^ai^g2Oe1nb^@plZq11qQ?b^XzaBawO!v2h+4w^ZdraM+%|_Liu(g z_N#aX>kWeZ9g#GktGmiJ8z`vn+epADCA8kq^F{7o@>ZJ02bct0v~w~C8y!rwp->&c zP?lt*Wh78I%}FLH(;}8{Hlz#^g7ethj?;!T-y3%1j!F@6S&Zxb+QZhS47HXwed_|I zD$)_WO4H!S5#aohN8mD4`9alV-=}7B>@=%gzjnOvd5} zT5^Ig%dTI=#S!2$-9l|KVC*NKeSu@7X<(uI%bb13l6twe zq5&F@s9K(GoVrZXtxw4ilgJi}g2bP|;1+zG&+6{c;&xpcFL!=y^CD%abC^Q zQ+E7V*wI#h&?c-Yk#u)-^7DZ`PK8}sdGpuae|!DnYVe^Z-=*K$I6RnpQTckYWx-2~ z%yul+;XAMY)N}zn*&7tc|CtR6b5m=<`46Uu zli2)`QutB8bdE?7F>c%}E}Ern83z_kkCpdOx`^QjBDEK5r0ixaA%IlmfdGp}oH#J$ z$+EBh^#R33L$fuwBmkmVnC4ed4eA%H4Id8nIJ`OuQ6WvIOBGHNf&`txz^#g)fXez) z7zr1I==3d7gqP&}&dCN`&3m$@)tpWtGa>t}Hus#9i)OkkJ{EBWI}Js~}WnvaOIbxivK)eDb5lYiaU?5(j040i6c z^>}dlQNyYA`|q0k6A4R8e}9WP!+ml2?sH|inb`j(CHLR>2fuOa6&Bv@f8H6EvEFVP z5#rHC7nO zfQJa9kJLfxg^VJ8UJi~@NAVoYGjCx7n1kl#t-<|31jbv%B!DvzrW5CZF+}!Wo*0Ct zouwN9%j(T3L{R+7Ze*)p${!|}KqE}6t^?PE5J@oGY3jf@P;oF`kn5V^{=q|AamxNP z%g6Q{GUrkpi8inkRZZ&ByUVJ~u*L5b?w!yw5NX{rNv+fR=J4yGm(j&}=X)ffN%;v6};CuMtJDgjxYXy+!MCfm2uU#ZK{ z1IF-5!92=AXMh(gynPRh#ON2SBU}-4_cEH%1>s2)1&Ust9StQz2qQ!&4A;e)^C@g6 z@ilZ3`4i?bqs1F=tX34^X#Lp1P>)yam}ik_Nqi|8=K_-_kiNo1)$M=y$`#6(a4k2JV80> z_+8<3h~D6?f^jW)cj6|VdUN7M!dBfv!NsSd3to<)5klFsClWiO2j3lfL)-V7epa?d z{l&GY&C93m>9u!yoj%?fJrVUvP3h~F!?D(}hp)7L->RCSkhqo^e3=taYcQX>gz2rmvfb@Q|STj1X%q2FWbGw^kO1p1LnYP{bv;Ns!C~G+JsL+b&?*B(7 zswTJ#;mi&^WBbd1F&gPYux%l%2nBtbMBgo9x0{crtQP zZ9P0s{Ht%Mm&dH!L${H_vb-aUT}$gd20NqG=Ns}#Rii6Y%D-?81`v?M(4ja@&%CSE z(v>&86V!*8p%Vg_7j6a2Y9T^WXlbl9`~+G7)3BaYG|M8zjllW zacMRXsWf@cL7eV1wG3t=Ku9vl6ve=I?{&Ph?ZP~2?-wWiM4gk9M!|f8 zh`?vTBypkz<~l^ekcIRMQ&AiI8Za$BQyB&&S#yxU@fzdD4DNdvU=q`sUFZ!r0#~zah6%bk7;J@dcyRf8ig&lqwRGDUg(G%^B`3NCp+${HTQi;NbEJ>@Z#cfX(PE z#4GFAJK?=&3fRLiX}f}ep1_}OS%cMkRCU6_U3-BQf{CW)U_cHKzub3T4=%E?$BMgl z(bu~!({97rwQ${zG0GHl@@xYpnhwD*h9UAU@9y4l^()zvfxJDE=oPR4Q)_~cn8i7_ z_`oK4*Kk>XQi~<);HJCZ#SJe2q?ykB)N&d)XPn!VZTO;0rGC(zBlXrX3_T1pGjt6r z?e!W_y{Z7`LSmuvqF^ltiwZkD=&gZdfvU@sTccv3XU&$yf95t^3{;+aWEbfg2IR0~ z);z7q7^@CI-fxWDP0_g5@G+-Lm(;%f_d2347KHIp* z--JSk#rBM$uOdrlU-4A6IGf4Gm*Z=BK%Okn55Q2-6sAn6*T*GUP)6c_2^tV2oZm<$ zs*O4LB*8SsY@Lbr#EbU8?UQ0Ml_( zgOxX}*E;BPc*#&eQ*VLxOV64>|E7A0lZaEAOE4MPSj3sfHZ%SqB|nb%(+#0(cTQ@2 zUTnJ+wU9Hml<;cy;YHDP!xr$V#d2%rH`BSSn=H}We>`P>N#FVUqoi>0(}9&=4++16 z@891LCtJz=mo)g__y?$-xGMYZy@e(5P8n<0{{;gm+USBT;>0t~s2|IgO;vJD%Fes< z0ss=G642;yH3o)jLO_QJ{vx8mB4`D)f;c(9f+a>~^+~V~z|~{PF1u?ja`?5n2!b6k zhjQM$aArt0kVOqU1pQdJ{^}Xyp;Q~P%4WTNrf~Fto=r_?l2c|xmNj8%efhQOk2rZ@ zv?gaF9=O+X6{k8ik?RIQoeK0UTc7$+F~cj%+GFx+Czjt`ruD5_W!QZtzN32hN6l32 zA-Kn?3hPp82>*J`+ea+Vu0Nsbvxg}g4_yq;quu+q)y(B-dH|6W0cmN?&Sz2O)y?mF zA6tLm1ssmZjW67HMlD7ZSVA5`ywEsSujzc@ufrOBU3u-A+J!|pimHft-^$Fh%aY2M zv2Cma?`FD)k-si0RO!=Ij=xt_?`xN`IR4B1rGeV_VEcQ!G37pFhM8b>^h$>h;rfPx z@=kzk7|e8L!K)9FAPkUY41p40%SajkzQ%9>z<>;dI}3+2^#(Cdl49AzpU?cZ2fUyC zD|6f@l0^I5m)MVzr0=gF8x6}@>qg}$&>w`AFHc2n(qZ}}fBEZ#RZiqpW04#vL?JSxtWQK!r3i#TBM z&E!{gL}uRP%pQHQ*H7fENJ)k6@SFP)bp=jx;QK;Njwdsol+?UDTXN2#Vdue*$%ZS1 zlX8FmmDhGZ!RSQaSLXi|`vCvpy#M?2AN;E&XJSsDRvW+5qgeNLeb#=eAY{n2-lP?Z{9$@mMz4~Z3h4r+8Z@~5mC?+unVf@U1Nvp|`?*)8b z4u!S6{z*-l*k2}MRELrtrP#4WCqf2Pv^{o1QC{F}6Ibg`?FQbplSzb1%KBVj$*DB6 zf~3FRMx3i2Z^@A+P%I!hE-*7+G($ybm=dElHIkQ!(7Y_`arKUU49*FEi)O8;M|W_p z0cVTEb~wDc^;$zgMI_9m{`}jUS9G{;wwHWv&+PYKQL8vz464>KliFARR2Fcr%&sVM zP99FN_7i$)Xvpw1_MS#0j~{%8uVc2U^!=cq zyZ%(pA;0-+1*r=^yYROk?bV^ZkFCa?Pd7fCIe|PRM;(mS-_g0cK<<7CTAWXgBu>7` z{q^^mJ@S@K=HY>EK;)fh)GY6zZvnp_&kFG8S&3-+xBfPcD{B z`Sj;#f$6+Oy+$bu=GNvW zv4l))a!$NuRm@|Zed3Q3<~BzKE(cfA+oIS(?s|Vb5`@V`=MjczUDQ!{EdS{scj9#! zg&?|K%?gnMv@oVH>khc&1;1@%YqXoQMdgOMcWI>?n#Oc9QySeR7g!4e{XP3ycx!tq zVby+TJU1k102)A)388R@^25nF`kHUa7UEV9MNFoA2V&%p3YNCJ$!I8QIkpuXdjleI zqb_!9AbJ)@W9nlXB#!7^2Wiqe#Kf-@`qJzDbDELguD>aIX8iZYzx?liPV96Yl_k{u zdMEYX_*V7*%YEnnj{SeV|I`0y&aVjToKYKO8<(Trt{4I6Fy8WmS2T|Bf^2WVB69E$ z_R(iM`d|H1yf8!}yCX+1HIhzM+)H>|3GCmSH4J5l^j0~l+*X!ryBE~%mL_Pq4<&_0 zdWMZ;TW|1C+~iR&@(Lr!heSwr_f|G7Og4W8&dA~eQ^?s*pO-TM*pZEPakiMp<+mx5 zkm@KK5pje5B&5kAFzW^tRBDJhm#Z;_fBg)(Wdv3M5R-=3bA7_7v2&8cqo$TeADQxE zdt1OjA@=0h)1;B0m;KEeu&lxLv~bpmWAt$yc|ClRB|N~eLQ9K2k{tAQzTW}r+4320 zC1Nd`fh7#%;MOBHt&MiQ6&h+ZA{$am z{_DoC@i%{e`*HS5Pk6Ix`lNN!@tU{%Uw)6^0P_SW#mlFBZ#AP2;B(W#GL@m?KX-Tt zSZ2ropkM%sJOZpME(pey*;tB#6_OJL0r7xIMdIup_5IIhn<#o=yw(wWl+ZZPX-OWX zPT1nwQf>_jacpDe%ZHxYMl}xj*~;ViV@R`%@FX5+syNZ=XMwBd;MGESQFnxCnWi3> zEydDvCYZ13Km&T`dbQeiJQ|f5&KEAexlvaWx=0P*OiJmKGuvrrBmHGBTo+gZBvnBh zB1vDBXH*d=9m$!?1_+u8N~c^1V}wyatJPY{0^ zU%Jxzu<7ml?yQcpV*aTG;7S?f7d z*%Uf^N*QJw?eO>L>J1P}^HJQ^7&c<(aFs#i+sV6xkomvVJ^kFy=sdfB;?LPH&CNgF zUukT=?^1PPo-}qe=J9{I_2Pe>f`8*5q>;Obo;)2t9%NKrw~K!efGNR%3#m}d$c&%; zmWm3lK?nC%>y+O(J6wZp499N()CKpd07q&YdU@GU5h9ME${Z2w+ANCj9DO3~0Fb&k zHx`!+V}c7%_Xd1Hh{ITJED7ZD$pVJNT_(ns&fvu|xD-oW%vEHu8i9w85nWt9?-mKz z?5Qdfg_JESWgi$mS-Om6DB#BMlHfY*FB#vA@zTeG7mFnzbNPe}1)YFtxH>?~ z4XtJ*{cCPhFTt!$YW4#<)7gF8qNWshu}z%vc1S4W2dO$@zIsW&RVuh;V6RxrNde5w zhAoI7AqfXUAGHlnHVUKb!mLZby8eA@YkpSC;n0suT(Qydfb#H#e&h_nxu<`#6oK6- zc+gu#@P7@vbjQ!(cSGJwlv>5mgRBD!VNx1@ODFHQU5ur)%65vU&f1}_!KFWc zzyI?0&ZO(vAJ=t;O6vnL3sTQ+kTd~ce3cQCtAw>+31iOaWDb2k0X5UM{H@>kk9 zS)tqa`iG)wCpe{MIp=L)zt44c{Z6^dQK7#)$|^~^o0#Xch`D=tM1U#gClvO(2@!SnoCs0ayTBP(>fJoi(tnChQtAhz}S&E0cERKyY3|D;NTmKAfap)OM%Pi zLC+u|qB`K+%@A1Jhukcwy2aRg#UgcOso0TeRkdPhUg*=%C%?wMJZ3WMJh=m9QREHn zE#6f#l~!JhOjzJ)Z$l4Mg00f7#BQtmnfQ&V)JSHe1|-%-GOi5B(-ykf`9zm?B!C|7 zuTM6MH_&@s-u&rL=``)qLLy$oulK=XbC>H@|A|w00F@H*8Lh&tC-g9^Yz$mrU7|I2 zb|p8et|$jAyu1~h{^-Jq3uO;OH-d@EA6uHe`ERH0gvsHB^puJiAFCb@iZ)p+VLm%T z*cp0NzR`tzIiken;?9o?j%fwFOZShQx3#|e`sGz+hrh(}A;MbHj5X0R2d-XNY5&2W z#`+93^RwWm#4K?~bjx+Z5Ht)9#gb*X`{5vFhXp8ccj_JoEMPOjlNDAnS#DVsYB^=5 z9{}gBv$|83KO}(clNGQZBo3@2*p%3wnK5Mwf2KDnHcP%yy6!@4v2%SOowC zq~(zq(b)np*xJ)xAsMwg>*BN6tp^vDm^vCJK44&N;Z8#h!S*=OBa_6`k1%QzmlVwR z>{sSPp<3V3K+%~e&Az7<;RUaJI6g8;H zy(fVY>K0`|4p)K9DJp9dLjWcPX--R<8FZgY>9`f-s6Sk!I}SR?*<6j@r*s49o;>fa zehko`-MnOJtC=z$tp)dQO**%eK3cS!Oq3TQ<8laI6L(Nx{qnKyiG|ViVAr5;1_{J1 zWilz{)AaYyooh*!>oe*9)!F!`|7o;-4*ysG)6-o#_-@@8U;*IvKH`j9pRgNaO&*5B zv6FM%^i%L*X_%!jQx*y+#hs9*OAEVf1L%W1Z!n8PH6YONNLk`J!gyK5xFWLp0FimI zSE*k*7SLyYte8%wL9NH}x&#(FlIEktCEys#o=#J~)oR@1X2jhi0H9#8J&2{e`{evL zvzFJGL8cU1A{;3#$fyZuzme2xb_mpWCRLF&GGM6rYIc1zv$fi5*RUE6*9)lxCQ*); zZebRCJ%m6#PALvXwo8vs+M<4)Kns2Ek7El#IvyglaOCzNzx)Un4f)H-J3mi{uP{c=e-Nq-#QXgs#pjmFu{H^9)9V1&oPUgEd%(P8Lp|V zcB~3)z3Gz`u+!b2Gjs1P!sU;}i_nQYx9b6KX56dpgXYp~PW^SY({=ut|J`?0hfm9f z{eJ&(;o`I7&tLv{^LlBhXRX>f=mCuJM;fNf5@!5{R!3dcKD{PVecI2Cp(x=ez*Av7 zg{`8M2yA(%K!2YI6JW|JWwbCE05k&~>nw~!sc>$eME308gTx`f?`6h%vR6bB7)FU^ zEBR*jX@001b@QN%#3B7dtet2CjDdpa4_ia6v3PO>fraK`?w2@Id>G$>oM7iWji7$A~ zlsijG5K|1<qSd zY`jG2es-j}aTb|40#_0Z%OLmy`ax5&MD#1w-IU470ZQ5OD@sn%1Y^}caS-ONfZc$f zJ8C-A>NhD`u;uC-w;i!Fj(m}w0e$3WA6DQzUqo&h#hCW}F z3iSxvE+}+TYb-uuBA}gdfc$=JG~VJ7Tq{7q6CD^($cMn{jf?>5JW8zT6|->w%v|bG ze{~~7SrXo(5;g=JZu5yn;f-K7m(}+v=)uyD9V$@tGC7vi*ekNWHwTS$T+JK7H`p(u z;aL3aDdE?Hc6uyRU9b}!qm9jK1RS&Ts&g6x!R&6f&EXck-Q6cXRrjt|;w0thqwKr1 z=#Wf_gpcHWlEa_HSS8KSV;ePptEsHpUpkdY3>@P-oD*5wW-hva4yDndI z-5x3K{v~;#WHa^myH5|!evAKod2e&qRXzT_uiIm;>sMqxBYTev^G~2c`d3mpL$rW# zS}#A{p26GE2t2YG%3?-tI~<){P;(aIOXeza98OCbM{8|_(`2EDdOJsL@%-Mj(RJm;~w2l zz8B0Vyb^{UDhf17xR%A&nT3lE_NVTqRTg!*62efcDsjTp z2Zsve2tlzWPi&u>dOC)qrPy($be^H)7{V2gA)1 zEYOT@%lo5sK@Xq`}TlY^kO07j#dYJQm{5N+1)EJr8hEX`EEa7 z2zavQP%9`)`D#ZsvZJ9{yz$pxfwxmE-dpDnU@*$*Tufp_rg-Y>tIHur%FE5j&UWNZ z3qjd*Mq+6OA=Pvkqn@JHP`X(jU*RR*dR{)Avptd)$I&5$b05oMbSUOP^Ep3{l;16S zdUgW9rd#9f>lj1_lk#@5QgN4ijw{{B+M%C_cYHFQZ3xuO%YCZxu*tx5 z@y@Y?jr#SHCq*^$KjA~s@28KRRIF z$C0v*_ld9iWE)MXnc@d=Gt~XfuM^eqMv~6f!l-qR(Jvw*Us(Brf@Y@>RVoGokWb3fNpc@EsugKvOU-U#FjMA`%&rC}}l#+1< z0MqY@dY<(S1SQ#csO>x&je6#~R@3j%J%~Kan1N_wD+H+~V%Z`;B3D+qR!b|M!yr|JxDxkNzLJ{6}XO|5L`- zlizv9061W3z(5ZRCGA$p=Y^qktSkHYcs@S1vM)f3qr_zPR>C<>Y;}wx1th>7#^R8C zJ3jFeJtb~~O~3NTGH2MguiV9)8qA-F8%qfE912myYgVuFs>yh&G*pxvmQBJDA=z=( zWD6!CS+z9Qd&~8x!o~7qRAO4PvHJG;cXJ1SHxO2(p4e*r>L`4pr2~jlq1dZT*Nkb{ zRENz>Cl@PxJX&6UGG6gS-}lN)_Zc5TMRnBD2e^~P{d`mS)Zd-+Se>B3!@;kq0P)C& zLo`aB;>@waUr?qxwZm4qdZD+{kpPsl`8@3+S%bG~*z)nX|s0Ck%Y6$ao7 zdZ>sqp>2PYOXO#fEd>M|SP7m%AZP<;A!sCl1Z>4Z5Q-$JoLESQqAVixF1URs0s8#2-b z%m7BLb!;LK#$%E)&t$y&V`m|nLfd+K*UTOlw*wpqv^6yk@5fhweHEneAD^=79*mxh@qUO=Hp9lM9JVlqPs9c)H& zqjXo^5_o+Wak)QbIG#}mu2en5Fl+PKZv(>NlEd2o44qE1=%_v=TT9*~s!}-DxrN{ezYIMi!1CXFLgFqW!Q88G;w6AH2Lz1SF8I zZfACe6!3kp58XDkbw{jRaL>J45+At`2Zp%FaFE~YU~nnSX`h01Hk-#}CHK-ZM}+_q z*da=pS2%&Xfa~9*&DjtjP%`?UXMo9U69owH-1FRYJ4LE0Bs=F6_mi})R>Dv+~1z<^Y{J8|&r_&1HtC(Aby4tfwt>WJ?*9jNrMg&tG3Im3B zXEA&B*P`3EBb5diJPoV<=Ky6>2%lcb5ceV&R? z;p~Q#^mP{j`+LV2=p=Nh`rDGZG+NaRoO2s;?%So|D<{rIwW+ErX+7wCcGs4ucBw@2 z+-L}KWZYr&n|033!k$>ZjMiC2mxv|ES@oOkb*J9zeSdT7S(9T|m4W8avS2}EyK#0- z*6rMgn;;H;g|(d`oe3yNC%lwOiD#u1Srl3(ux_WT?D`O}F|l!eQedv;DQ~eloa@Ys zWhut&TMC>xxLR6j!nZVsNFp^iMI8#O%I4jN!4Z;KA{{T+Zc%HwRq^X2mZ%dYJjc3` zcASu7Rc&SHg^2L!+G?*^x1gn^4*Lo1FU(YN^1k+1%A0P;m)fyhz&+wR5YJ6zkRo*RMy$8@Vx_NdK%lVk1^2CK(TcwS2%mHR7~m+IxO zexBg1MlJc2fM)FMdUr0S!4B<=J-y36DDiaQ9Gf!I0n%gi%IJTqP!f?AG*AI&c$lcZ zNMz_jGE9x~WD9;!@Seov__Vp5$?J5==U%vVti7!)E;=&HrEH}vjAVCSh8J)|*o=PM zc+V>p{4wEvFW=#PtcA)o$j4iJVu5;9+q$tXM8F9qMq9pEC1q;Q2@AG6)U9xOJBOa{ zOQ)+IOwK0P$?idooE%m(e_-@H^WLdnihd)}g=A=Y7rJB=|QOdd=RY{>?csckBDh=DCBVi#P6J zMs$lCKfdkg<0S}|e( zFN{z*a&r*@52{v@jsXp0q|dx(`C#fA#C{1{6F0=5Vu`b=g`hIEOZ*2gs}mA61qQJh zx6kSp3M=_)mBZwOZ6*I7*s6cD$>aOYWGwUKP8$`v!IqJ+oVYZ4&C&-(sibEze({`;&Gwq>B&;f za=lm%016-N3gGtxv+%fFO@llf*&Xy=&JOjA3B-K^1qdZkl#Xl2w+!%?L9=Zy&Pm4J zYZOKDy1vzxG#%gHkQ;>}^3CGEp_r;-_rjw?UF1H#^5$$T0P~M-t;^^b=9v?ByGW%KWX$p_7I8SQe$Rvqwj1gZ}*yj+sZ&HiZPcb-PWj>ff&l zQ?dc+cbj-hEZ!1sU5_yz=*xs?GxVuE|OkN-K##YE{W<_Y5Ko8 zY7l`^dn_`s8*67gf6hrI$L2y}>Q51KllP!+7w{WB_0=C1Y#E23Z4Tzg39b@hf9y_- zgue=%s?R#>hjEHgruo%78v7#8JFx|xzw#vDMvvckxdfQVZxAn$Zn>Y1SdBPmz}t9W3|6L$Llust=HD;U?0VC z16}OwmyA+LTCgB6(M)g3?{@ipkHABl7rHq?2eh*%Dr9D$c$vGqo zoCmJdgnpQ=GF#eZlux`HjB5OdLv4r#s!Eap(!x}rz&5+{B}8#ENNt=cB30(&K~t*| zXIm?KPB*{6+1?8(8N6MxI^2tvT4~k@uRdlBz+^&4Xxrw8 zI8%eT30g0pS`IW0o^GTSfu__7rDH)$5Rh%#Q9|eZHCXMrNhGF!J}CXj`zKhbg}g-PCw+54YSp>v!)0dOLuUTVSm?SPVR)NkD9r0(ju#ZG$ziH%F=kG2lA zXqZW6bu`|^@b>%Bf^kC$x2OP)UVk{VocrXsiLD92_f6HPp9|A|)Ia*nyGGsDkz4z% z{G$xuo_2!*4e`l1ozFY>ziN$+1H_R+mJI}e*Y-VGr;%_1?VD8oyhYY{a`#|W*sWyF zTZ4^&&nq(*dttgB(fbc?{XTk5ayHuc`0tY&_iGM6Ulrr$VRZ=?%q+XLjPBZUj2|p! zU|Ti9OgGZ%j3k)?>;!W!<`&c&<1ieC$r<^Hbz%);gcv566ND2OwBWv}eV+6EU^JyL zjjb%kC?6mfm6Oj=*b7@STFwXZkhI53*uZXdVR#+~Nye&XK%0#V^j96g@W91p+(F5v z!c=b7hcx5N&H8BvCPc`F^g>hV*81n$msx}>B!?x(X41M>b3I^x{QcU|9kwvt8%NK< zmUgW#%EYRKn5$2wGP?6X)sZD+mYl%weU*ETh76>jX)K_??zD+k#=TN%#n}~|nOh5S z(D_Ujz{13MAg2uA_>~1xRcDcM>RNB^zg9^dDnKj2<(H*&#SXBhHuZZAuQ1CP_LG!+ z$M`EMR|cT;Hra3+u)DrFT`WQdBvPh}9?&vz;;Cdt=-=}o71^8A=u5{ecWaD7 zt3K@EK^ZT!suHew40xK?fXtvShd0c+c1GJUs>@NYRphF^q|T%ehv^HTbJsJRqNU?b zTe=d;9_WvCbm)6XX4-rz%1^QM0H54En%B=a{itd)fAaf@cTNGfEy%YB%=bwRiKaMbyN zK@xOw_WOF7EYND`vWOx{k9}9Wx}&ib~pl$>gVw`t9Yb z>_bp`;3*&`FtgxeYv0%7QLp3iBeVBURkt+?r`@;{W05m`=;a2SI0DBqUH#xK&GE5> z7j-V}jc{Sm^=_ECx@5CbvUxo7pxb8WcP}||UCNX0axtc3nq1Jw;`!CqNSl%$cQ_@& z86Njr%a`x&`f2Y!R}?pWmCqS$G;3OP<9a;j)eGC##!Lo1SFZQDbvm)VPHW@UY3@Qx z&PCtpt=}eFvppqENRBMNiRg(}Bes7yJ&K^NjcOTpNc0lAIJ(_}Q6gTS0~VZsi2wvJ zbya7gV_qJi+sNcG2XhQmHtMm@8riQ6paHdj&Nr5qfkg!Sr9IpjLksQbm%F5u6LUkz zAF9V+veiN3F9(mIP|KHcc+l4z91z%dZKP(&?(Kc4zXekz`s|8pZykSl65Na}EMVl| z+8bhLj|uBz(qe-)#G)eW26l~oZwgK{P}!-H*`0LS))Dc?twT{I)XSTD`(`wRBn(Y_k`kh)kGt6?}Y? zxBB~c?}YRZ1upA>vpn?vn|_7F;`JCQN5Ol=yHXcxs)|;_7Bx+rqyPR;eJ$xq#}(w= zvky*vytUus;GVCS2h=|;{y)n9&k6YV`WL(Oeb*0GTK&DLvfoqOOXmE4h| zdbd+QwrfO()oy!TSH$I}K4LFu&=8SePruT7a$!67(k|+D!QGGMdb-IIQu9Tf!HwdY zMk8!_yOoshJ`A`Iih#-+TDa~u65g(j5o*&g=64D+y zBJXdw7D(5?9A7F3YXu9Ng%MX~LK|xBHA7wmKhqdM(auD8WaKOL&;%HzW>IRG*v_Sl z8w;&kSH$A_TNq$FKfPM(-7Iwi+UIJCYUjvAn=@^Pgw$0)khBrkR~VFNf4~UNd)Ms< z(O+!McHREq4)^2JTcQv;L->a)Euhv`p$6&UcZmAf~&KE3{sr{;du{LGY-l_E<-g+qw*13Qw!G?l7LL*!|Yi{?s=eom>|5VqmWQClTl zdR}A4iF=z#i^*LY_v6j}*i7mWC3e}OwJAKi=Pn*RrPc}qn6pxQ7sz;!IQDy$dj0g) z=JBr6pGtMA-1SV&ldhip@o>NK*Dr@}6aPgu{7?Mr99x7OIu)Mt*Z#{xTWjB-x@~|S zFb=3zEdvh6j_>nSnkYUf0;x`zF2@e*B@l-^HQlC*WYon7U~;*7BOn-w_mmM+2laev zvo^iY&k!bt$Yb!z&AGfLXjL_TmK|G@P&&dbiw za$f|Lasr$_^^GW77~cBSTy#V|xOMhK2Kv$|)6jRK;U6;VOf@AR97kf-vuI#zAzHqC zhe=tk#1|fc^RG!4jOBnIuGPcGCCg1^SJXhf= ztlw*}W&BEOt!vS7{fexp=2h3qZ@CJ=>@@ZOL zoNYPL@%j0erX765UyD)aD8rqA|EAsfA8F73dHpB;ueo=D4khdwYqLZHw>G~5G=jnL zXE0|D7-`&;!>$4ocBlWrK5)13Hwwk7%dE<&`l*UC(QqfsBh*seCDeK7H}0e`%BmnP zO%R9D0d(W*Q2H1WK|vm`0aBIaqNXT-Z_pa;eHgLEVRs|-rW`eMQ&IRR#8XmXOe@{= zq~$n$TjC1L(Fdas5YT~Ie45y!fKt8SG+vIqX4x~dOo{G3%z6e~Ra}k;iWbplGCU{W zKHX|=J7MJ#8WXR~?&XyBxI3Vl1_S*Ut%l|8lp#k89==VWhk41{Ff{UY?o1bcbG-}k zr5lz`JV<*_tNe0&?W$;nrkVf(xo>!Fv+Xgiym}CNZI<4#W3D7g8Vx(j9*#zSULcc$ z`C~Kf2rxo9Bsde9R#My2G#e#pX5i83ymLk>bZN~rbs%UJ0G#Nt6p(>Jz06j?QeG&8 zrlrh+fGNyXrwoyAErOv-)^Q9WKTfY-aZl!;KmqfOt;EAD9eBS2&UN54hiL@9->ZE$ z_uZ3%`qR$mE@xkhvq$Fy+z7?D0dEGlw~RexrHgjc-N|_MKMws|Cw=VQuoh z;01oftNji237uC3ikQJof7{z2bYA+Ki8GMckD_ zT!&6phvupj=uo->j{gYaS7%&0R+nZw1c5bcLvf*apnEJr(H;+;8ZV0x-n_Sos|bcT4?(C9r4by2i4d=O>D*(1 zk3_qhLao8#qKA)NyQqErMO(=c8BlaVDU9!%K{E6}s|jDpyrA@cd10kuyX90`x~}`g zSioCNXZh%a_Df^O6nFQh9<4bUd1&-{w5~HTA#-~8XqdfUe=rb&@|92Y+yDek=NUIu zIe0A>TEb_=*NlH3*p|kA$_lzsHOb%>NM?$(vIB3<$ER`e2&f|otr`)ri6;jBJ>0c2 z^|vu?aqzs{H_F^}#^Ys!Ylauvb%KolJXx&2Kj!K2$g$-?nzQ3M(6Pm=($<|`JAMoN z*P+s1op!`y?-WtbTe`w8nEZTxdi>qzwP!7lb-%z`?!e%HWBwF|M)LOYXu4L+#Z59| z>#X~eq`d>CwxK{aCXfPtnd}=ZY-_(n@)XNAZDq>A-I5qe&^(tQ7p(@+eRF-|Ws8XOX(^-I!yo>8B{-Nb_;`5oCN09fFc3i+(46@bRZW!W2qIN)gW4l0ufsZDFY0?gJ;k-My#IXTQ1srmSaB!SrLQR*yFvAdf05Jt zA94R*y!j{o8@@e|$$g_WwsNnkyX)ZmPW!VF0PtgL37q;Us(=vBjo`SAD7aQgULrZ! zSw;$u@6A-e2}6YxEKxIVY8rBIjSgr&de*MdJ}4}+(9bvYyk(0pK7S$oa9>?4t(8vN z<>9(-sAk`iiJq*1j)HsL%tBLlhAu+kl90T*<9lDd@0X1vm1!=^C!t1G^Jx7MnrhGA zWv@ynw53oeLVA*AW$n6mFkj%b1f=;YzrUr?hGEI7c2@acjh}fv1p&ItyM~< zF|vst9j(Jx$F6xr!UDiG;}CMg%4f}<_ZQDS!F_Z1Lh-SQ>H3SkHj-~>xOG@7_PHjY z=-{-6_I4>`f6=(&1&B-74wyVQaQyMwiGbOx){aG^qZjOOy}nQD4^=eZIB|zL_2IAc zDrnt;)FgIo%rW_ao-+AVYR&RMV~&^ud(sgPUlKvdJECKyFbn<+jU9Twk?q~U$ zGo`sJ$jCn^n3FmfsuU&OPo)}X{e>v%NBq!)cH#^%$M$8=S$b)u7)4$EyNW^I$t`h zo>T4W4u2qEzrJ+eQZX1FQsVy?C;M!^!Lq z)@}9MuN3C^ul8Sn-Vj!IOGNU<{<;@jd_CL~0Eh*MiyW|(Xh9GM0wW0ktZf_FIpw6~ zTHo;uQ-G-U`L@klKX}70-?%!cUUyrApSoDRC2)zjG&mp(2=}0iqTpI6?+lGt$BB~$ zDj>?NLM)(Qne5Gg*$vyXK~Z1)HehP3QVU{{r zRAGSVYPVRl|6Z0CJp|}YztpH9Hd-M<2)}c@c}Y`KhID1)TgL3G?*3!^ky5UC*PoM?YVZg(WptvARBzAaxNt3C|2C`_={D3Vbu*(B{;UUh?vACafkcX2davA&Jw=XPEe zpaL@3nn?lq011LJoM^_7P34hbV(GwNoV8Md*`bkIR=iLYtjJn598B zOYFPSq<*A_I{rYUf2VXMVNS;1#q8O_lL(J}nOZ?1dBGgrKJ#5`DNE@`PZ^|Xt7C`$ zVuyU$eY!RK@T+4_`8Nnw^8Bn*sF^rV!C=NOd%025UsqQ7D+fNGwlX&g4*KSOO_q{j zPkP}I-?b6&b>_c^fc~TZo6BpvC8Duor!1?No2V%Ozzcv_=7B0u72v8T%@IJdu6rKB zp$~eFQgu!;rfmOC8LnCzKGEhRJM*Hyvrf|I@vIqIo(1H-p7f}}%jsrw@f%(gC$1-3 zNfq8^?{!OfiQcLqe~UDJS+0#x#BuJi#d?Lz2c$gbjRa2b5J4cjU+y>Uq3?Yqo7RjQsM*Q}j86syRs{?^nv?jzzBI_n+I5Dn z{if{Qt@ODtt=>>q&GI{wP?dd>ZeDA5JHIH!yh=;;72ji(*Z2@?5Lu!kl;0Rpc-O_% zuw?Z@%+a2TzUw8W2{XO~#hM#O{!&x~4Lp*jeoqtCUcrM2jV~edQm%Bt&WsV%LQ!`8 z)n89O<6fS(3HHJ{>pbSQ9UPMK1*Kg~-7a*hx0umgd^Ds0#ya(`XcieI%OrhG8{7DXb zWh^LIiq_8;fQhh3p3?m!spT;cz<)%$(yhL^b-k$WO5xLER4M1=+2bd8hsP@%sN=YwkyXf# z*$Ee`2Q~k**ZIFnhyIEG4V`6lPUPQXTX(D7yACb@0AigPYy};48YfWZomrnI=9l!n zM~ioER5^OOq#(|puyHSsA&pdjh|wKMC?{{r;d8;Tu zu_T6)O-q+4LP4q47YjtHER;Eyt1rU$6L?&z0`n3C4h4W!C~iVvg*>OtDC@G6r5ZMr zR|8k0CWBF=B^G`)LIZ+Ofs0j8`oRzxz;(4T+|4=LP@2I9vV6)YmK+~zEtH0`D5N|@ z)F>%Opj5>rRbkB*K6MORtt?j*hQP?T{{Z-^(yy#F?;>a2OhZz@Yg~<~(7;#C}X%wVpQS6it-Et>V2l`_!GwN5v0aa<6ct0r@jxZYRB- zC;?QpOec~6S{*OLMbYb?=Ts1RWm2z8g?Q$1nJ{_2Xg3WTXAf(TiBe|>--(q4!0gAO z+dy!^EfvcntCpzoQV|41T1*WKa;B5q3>qgdU|B5a(_WTwccl+b11$yP3;=OEJ4evq z%W{IE*uk3Y9rA1CD9Ome@gaSm9j#9caZy8rvg{3n$Ksn;1Wg&N5CDLkuOwyq!G5GK z!>Q~!G~KLsxDPK6hv@E%`;f`ahsSn1f*J>k^0Rs-A4k+k-v*hwi7YQYK)$h<$tsn$ z(?}G!RC&iVKzDs!YRpPBX}*TW3MdCM1b!Em$h8ll4w+@BrJt=KHyaNxo*wk~nUz-2 z^+v7E zxICEr>q!#IHa<%O&qk}|3*WRS-iO6uPIl{~6Iw3ll`BMEl9gdhqT2Fu}>W;>!FsR`m~A>9Z8~Ys27vF4k1;Ngy_txNaZ8Pd;v;>HCU>(I9fp1 zhUO4$%BAJ8uSPp*#x!C$0uScw#G^Ir4$~Bk$-khl zALCf{*rQuj72q;zYv(IN-&-_EETN|IwVtFN96aP)f_^r?Ie7BiM_DB;l7HSJo8#}@bsO|y;r-o^&W223v&5kuPZiq`=MAZo8>OY! zi${;lJ#1462{ak(*UMNa3t8Kp0qt8Y@Y$`xdVZM^vjwiII{)^_4ZRT*Md`0=1w0cZ zTZCBCFJnlHMFykG#}Pp3pguYj&wF1NYYNEX4T~^@%VL*B$0Mr)@sV+FJ;H9Me>OTH zVc$WhobAZxE4KX@{c9GZV2Wyc&(cx?cBoXjZtDTx|FFWkti}Qj> ziD#n(-Nho}GAQx~HWMO`u1at17m+8e5&KBFh{LXBJ^ed^psWgiHp5ki7KXC+sl=C~ zA}n7Xmx0>y^J;et(pn$-@u~!OR>Vas+N*u`;?0*sj3nU5WV#ffhfx!e?>ZE5a~8<7 zcL=8R)Znf#P`K{vJ#}E`dwN!CQU>N8T|Xkl8&su=?=~xe5mm>^TpIe9ZyTyy(>uR- z@2y|yVO}loR!7knT&Is>Z4SQ0tl-Rmcg}r&=zFAubg!OK0{mIuGaKz6r_N~^xH+FL zd7yav+j-)@a1s4m`~zN_Ebf*FJur^Cx8mJ(uoA$91DvbU)_?^4(W*QQza!K8$8k<8 z*2mpRF=-Y4qSk-)@!f!~ND3aFeLJZAa?ujGZa5+vg5^)lw`bFnP;u+SO7|wpjF|2a zb*CJ!*oq5^4tY^FJ{q<=kTY50Nw;!kcHkuUUL=*iwVS%dcZTPkl#C%FLu^H>1FyzOkRWl$LS=}-yne2n z`9W6*=N7yP@igl<(Io4_&_Hh32WpevY?&@syt!5Lm}X?H*>(s}R>J6r5YaN~mv3UH z4~u^GX-le1^Mi(4Bc2}5uiHgBtu>Uoc|OZ*5Z{wNujhx1?a-FdfMpEyTDRK9#(i*h zk&P)yqW?&u!uA*$Y&bds3f0m_Gqh>TSmS{0?7)pZMJJ2SXqw&ilm{eB0a)BfxHqat z1Xc-CjUCOJ?$Lci$a?;me=_4o3wINfPwWgb);9PRg8jVJpaH667^vPcxih|mL5&c2 zEl^A=P?SQ;6g8>NfS_5V#1|tH0(&5V3GooGCig)wKT$bgh1(>SY;NdYY z>T|Ulpjs;LdARwbQJcKu)mq)EV=3UWB>_*?BLc}KYaOA>t_C}fU)nEyA;!9&tlg$y z+%1W;rjeZB0{ax_8~St}GHe`Z!%^QC%qqy-EzA_LMkrzi=^7GxU>w}Mr9#pV&R3U& z=)#dus`d8=I1z}tJn_RWt!vyh>UTQjf*~9Nf!t-|A6{b^Xj1LJpX<11}#91d_ z40Bmf?K>}3RGpxgW;o_6wqNZK$XWkQbtNEh*qsvX5QHpiz2j))3dG-%absT#z|~xP z^|ocXZ`Akhzuteb;l3s<5qeqTgB67^77hon0040rv%K!sF=9dx51uq zX$o}|FSAQ7)$ff3HTp6PdM6NxHa1JT-<4157PW9SIvi17IiNoQ2WD%D)_M+WqMFHT zj^2{+ZumoOF-F!dUAwsR-nvRXJ;E_?y^JH%W?oE%kdS76_U61&pCzp3WW0{XrvLIF zh!Nrf@wH97 zP@?WR&gT6F(dedc&Wi*98(+PIjxjSE0@Bzas-h*a$#c=Qgc2Yz!&*^WqU0O;<#I6E z-1z{=IzrQPvr!vtlXSs)3SRC50>VbAIYvB8@5+n;uDUP%S_DbA0kX>`PBf3;X$pS^ zFkj0i#T`PW3XJUI1k=@Kqy2Rj1A~p+%00i=jr}EPYL_-@cjR~qepNNkJ0g!RdKL39 zy#HbdKh~{wsMFvB(~%W>N8li$sGpf8B{CZji2{z=lh&{3GuyQ1{k$B-jfDV@Kg|s) z4KCQe$+T2UL<%zV-#W`7RmE(ow0-9~Uh3?!6@^{rqaka5xZl*9l2jLbJFdxLAdYwM9dkBa{ic#IV_;mDIKLo!fS=Wm- zTL9ojgl~%kV?Yof2OS3oM8wbYtl7rQR2T?=52xv*dH`%&1@J`S7Te1TzCp6qX;(-Q zl1Cb^m}JxP^Ll3CiP;41(aN?`z}C9Tuu@T(B5h7uHC;08h9+2E*0Ivhxmh@|S3O^% z`d(XqJx~@zb60QxHHRzUfy?Jt5DrW2;{BNuqMFm^cF(*lZA){$^R#O{30zgu=UKH6 zU}!bgEk022hVkj3TTE(@i7tJ$?S!p7tM%Zc+MX_@3+;6~_MX0Ku?|<4?Zthk;-5D> ze|VNVQ1Zp-uRmXWgBCC_L7nNS9=lGEcy-ih=xeW%_SjQbVo!}4N2Kp(H&HB;cD+at zE4c$zi&22si_nrND+qge@OzYK9pcr-So?0H(xe1lxXuwf&Db1udgq(8%VU}&WenN%nymX zL3bq5nkC>T$bxZ=g$J((;KJp8M#N&4*{DsqJZo*5DcVdOrwVznUIlO*4dTfz z((XY;-4c`WF$yi&<4QQm;u61!2>i16^T8S*HNzW+Hj)rg3$|Rys)DiddNmKe0Hfpg z%DAB*wm_aXp1*v=J8N2HU|r+hARy^Lm7UvMLyy%8SZoA2-+Lcamfcqqu@Jq8e# zXH9O^4vzR4e9ie(DnzLO#g&sj8sC88>(Z6eM9b9p=fX24S(OZztC~plDId@XhqJ3# zyyP-DYRaT<%%<{Azb&KPbw#b1pnrb8!0Z9+)7?$zEqtY1rbWTuhW^p8quNDmW|f=A zqtf?|^)rnbjZX~gA#%<+qhUV1*(^fQ^CNn%MH$A%vYTsI8CP}hYxVi9>mD5;V|q?v zEd5L~zJ{htUiZgK?w>-4uY1-cY4`Q=ZCqA*Co6%WD=<-b&jlq9(R>J+(*dnzRYmE6 zN}ly-+}=gqQWEp&bet943qi@K@A4E{s^IvQ-waPV(>s3W9h-m3@RU&SSK)5Z;QD2D;*@;S?t7>2jvJv?EcOGW!TCLyw~EqIZ3i(vY6%J#Wf=ECYo3P)rOeA0Pf6pSK+|n7#$-UItWnOn zIo(vFNwQ0-fmy5OYbZ_95ir-R9i6mqhYl>vP+CW?K4UE9F{mZPl~ieCOA6si9zv&f`8dQ0B z#ck>>^XB8_t*~8vI95iv*xI~nQquVP)`NVB2giRf8mSXN6_&fB#m z0L>#=pf655B1jZ)upG4=SHM{)F|^l~`+P^10>^~tKmiql@nZ`CIe3jQ7(1~q9J3VW ziJ~|ir*`x6d@Hr#b*(SK>1h=xfSt7#+tADEeiO07FJ4)*6hVU(3v~kf< z81hPvYtxmdd)36sT)r)a3;@VT1v{-O7#kRA-RHB?bMdp%RiC!4kFsH5PyOey9aQInF9q zb-I!lQajwBqw?mFukEJd!aDm%L-8iR$?AQgP$(P5$^ z;Nb;HQP)B)!97DR&DxHkP-HUt`h;%CXR+FvCmUc}2VteCT%?l>{fbs*f%_xi&%DZ7 zI1P}ME(Vhvy&FSBI(`wxc-*uj(*X1xjY<>|(PaeQ!39Gv+xIO|C?34+PnWY(23%G& z{rqzeN-UJuQXkR8x(k{TUpc<-lYc7Yrv_Lkc|45}f((xMFbLD!XoiJOiF=Xmb&u}I zxxFsegU#5rUsq@c4k1qC1=`yJp5~1cYRB$e7?86uv-WKUhPV`6$clKpSabgfKuRE_ z+re3d?8z)FJ^3J9%G^t*={{#`wd2`iEA`OAk%-@4;=b>B-?Dhj0t`9S^66icw*OVT z^FKZN_x$HWC2Vg(s6>0mGU4soGJvQ92z<136vpb+?&}gAEu($v6AE%oL1X{*(Lmg}}!X^;$13k>z z+`MG`%Q$Fg(@O~ISYLQFD8@`^&{wFWBD3{PTM1)~ukq%He$_rh8nx+(l~kH_u=}g$ zCcD{h-S$t2FTLx^8T#B}v;$6@z5Ju&d|1f#ZT)WUT+x(E^{qV*45Po?f1cmx`JA&s zZu|I$*#_{yWBt{par}53PSXZ?3XWN(8>uckV|frD%8zSAS7jzkqr~YtX*6sWaDlN7 zMGZ0#4Ejm#1z5A`C&Q?iCBR#9{p$*utYZ0bY(1jfj8|HnEk=_Xm1>^w%z;7DoSDvld#L;?7 z*NqaZ68)#|;ya(rrp$Y9uIsr(Xn2`KjWjJZ{d%5wX22x%%l_|O-?uE+Ozz%}ZVG<= z>wi8a|HQxEu`$WLC-zEMwozRd*B1apM4#i(j1ecsK5@X-XEf&_vhPw9k<$IFKSmty zP(%JC5(c2Fkdvfpg}NolA)sY?9;IA-Ko(L*h6rhe-KRR$`j{$d;bJK-ZFE?2_B&&& zOM5_Klx1O9a)>v+1QbeQfW@M*mdq2{`)vzN4;y!{s)pbuK;*~m<{n$-0=u0)n8hLo zm?b28)*_d@3&%3dYWMQx!B-ISz$+xLZi@d5#=ma0VbOE;Dkn=)MwAd+l7T4t<5geA zsD!~J2VKf;%4+O%yi0!lK3fu(5YPv&?2pNndV;?5F1)TGQ}j-8l-Hngt@UkL+JtC6 z_+Z575g}tcRvfwUH_tr3T{7RPEa#fEo4QJA?*#b$k||XDfs+B9n)kQA_N>f#Zi!V( zX+N*I?vt`=$qxBMal9#@4R<=_R}QSO>MWI zUoyWSDPQPe=?gTmF%HxXS>$6-u%Cqr0KcgFmQ)FOk`NtGU4i@owmRbKV?@0d+Swo` zkDU$30Udju->>N?PJwQrK~zC5LjiiFcZ>$i@547Lpq=ImV$q!%XDR+(N?!$>lS}Dy z8&-xVQAZ-woUybG|8M#w1%Z8A)>KBNX06S*X8_d@dkPLM{X5c2D&j%iGtSP=##5R6 zI%8v2x>G124|uMq%tNEPy-@r6Why3T;k*B8HCx!D&HlrnPtUe4g7z>^HN=_TTU|gh~i%!gEb-|{aTvS@n5W;8l}DttPjX~ zF?`;Ct?~4qKgGQO_Ma2u7vQ|fC5wN(;OIXo{{Q{ppZKpoTM0k@ZdckX3ss@TmHHk3 zPomd0PQTTdgf=w^H*G`hyEbpC<|P?5J{uzWH4jjw;!Je1@nG7 z@}ZVStwv}iZM=|`z76y|7PxBVgk7S=WqQGumOIli9K}KNhqGPD z_xSmB{cG(J`)%q{Y{WMNcy$Ue|MpiUDwW!xR0fh_0${J<1U+z?D7Aj>cb|VY>pD4E zRB=JzFi%H3K5bldPyd#R*T6YdnNH1`ilRH$v)#_F!tDK!=lR7~4m=J%-ljt+S^wI6 zb2RJ5Kwj;MYCX?ADd`IiW~;8}{iJ^QUkAG%`Z7r->p_z%VaGC!!aH39B=$R2rx%Ow z@`;dnN7wfpJC+SWJ9uT8LZRrT+i#~F05(Rm(JrqpevO>9FS*~$=?pa3?=hS@7ybJW z@7<-_);ffp87;b063SFsT46g1(p#}Gz1F_^z-`qM6FemueE-pE8hhJ~F|B|PMvAsc zXZM1J+k~LiM?4d5NiYM`jif}RrC4A99n+jaPCh`^M&ER#!K-{IPjI$=+*m*wTS9Yb zkPGuloTGatjvi&H)plBjN1}t7hR1NYSeowO$;AxKklwqsqHpq(X*{Z%MlyS2*?&K{gi$*E# zEo(k*0%(*Yj#KBa5NN_R2y$eeNv3gHIaC~kFjf#?GWjL0e&-&%Q~%}G#&5T0=y`7Dj?3nR5ZsmeJU+LxjvUbfo zlr(PkDxmD$@js`=5)PcY({gIRG&S7G|9?Th{SW^0X57e5{f$#%}%D}o& zj3y_7IXKE$8*P@mDTtdSKBCR^Ct7l(dd>(oK?GZAxC*D%0GA&!OjS4Lj2j;?K`C*6 zYD-EffEh2k&1e~DC^A!&Xc{J-`A(7!(>jQS$0Un_vS=JA3<>AKH6kZJ(6U|<(2^i) zntgyir!(I21;(PMZd*qT?&lMzN+eDbU&s|I6tk^SRR|hE4FP4QS0pS6xm~(mz%NzH z14#?Zg*esTE4cQ#I6H$W*1iLGt5iMV;jB^&1WTzx9Y1JT3p=z|p%E(2+V50l48URb zJNtWQXXJ#fV1d>a-kK{1dKu#)hD{q$4N`Vb_)zMjJt2=Q#rRHH`u?x%Z|1SfJ3z&+ z=+qmm`!)`;?3YK^@7)RfIY*h${Yf5iHhdhQu`j$pPr7k|eT`aZhXmWa+dO-&#)zp+ zU!A`hp(b*vX`ji*VaxjHQIeq|NFtib^~6sTASFb-U}Mj&5vvvcZ1t`h2X1g zc(**&u;3-VG8I#eD4|B&R z1~*xffz23SdsrCkWjFNrvaNcpvByPwsSr4kl2Oa5h`2fM1QROyl9pvZ@+~3mi4Pu? za-LXB6Tg^_Iu3A5@pvIc#`7n~7-NS)uY@w;05ad-}Lm1Ko-7XLPg!vS1 zt=<6zjv6SDpUMLX(Uh)=-n{^N==oZW@sQ6aI5`gf#dZSgm6|YcSHB(8dee7#kd~Y7 z88#zgp7s=5Y20Ia3+Sjnd7dw_Gl;BxlNZoB?P#rrdZup&ItKX64#7t3$SPVFga1?Jcii-oH{0gQXbctKZf4 zA&NgP?i#5wJNzwX)<5vp`2$~IwJGPa^-o?p_GkZ(N0$bM|GUHRZ~70?Pg@l;e52F9 zV7Z)sJp@iBW+zK7;H|s!@1Mwf7f3a;4&xmN)~*VEpG*0Qv(VR7b{EHB)@B-4#&Fg> z5(>12C(mpE#&s!YxYphH{$U##6&-ad#k1zN>lo))UfvrbjSkR^Aiqf_ zk?Kz@HxdvbLiL}LtTShyRB!PiMlcgO>i}~2a0n9Jk(oqA8VexYR=IWOA<=NcnU3IDanr`kOoikjvhFrYvxx8O5e(~pzBePHAv-fxW29S)R zB-hl9!LB4<+%Lx`AIbQ}ZeePJ@JALiO-e}RGGJq{Nk0s{`NAM&Pa##-!dS4=R-_gg_gs`_Q1}~;#7*O9s@EIdpjf&P>K#8kdP8U!CGUhS%){R#n~MMTR&w2C?(so2_%eh^8)9UB|6S4lfj1@upW zW0<;8v>FSYs@@wYRYyX7t~wNSV6KXkHf#aAl_UBl#Q50Xp&=o8{~uxR8P(MKcZ;r6 z0t5^_R1cj+Caca*9K76=^z(ghWz3xaM`?7H{my!*Xl zoO8zezc-)O$mhA9HJ|yLzd3`qMjI{d_%lB7rW}NZq|1I*2|pIuva@Qj#>i$pPUl&& zbz-XKrTD`ZGC<)z)&Z;9n8*9)Ss?&2T$@?;(G?~1TFkQNfQ8DksV`<`VTObJB_YV1 zEmS7xkE}ybWn6vGtL~N046^Y7R45(@8wpk>SW@y?s;V6nn+VBiVr)qB0}GsP8rZfv zX|l|ViJXMwQGtHPb+;@1jyLh44i@J`>G}XQTmUgyUKpDwLtF3gi%?tztILjtoOYy0 zo=~UQMT32r9Wwp|^=BXxOM}dv)6cIQw#EsFrF2J)sExXcBR(4lMS-)UzbN#d8ORmA zOz4GH8I1MQMcUo&Y8+WA2&K*Fzlr9*BoiE_a)%~{JjXKB(A!o$suGDii?PwrJ)=TS z&XgKjf+$_sMWt4>$a<^6F;L=Ffdg#K3)nHf0vy$3F1e zH*x&T;|(4u8XV?*0u?Hz>6buDQ~56?EGbzadtUfUOC@_^@?Ambekogf_AjzsH16X` z?RD92va-T6fqRU8q(KpQBU2(v;{7RLXe93wOPqv&`#4O|03C*R=^6lvQw9pXF3%SHw6R?Kv;=U2q=FW)7u# zrpBaC;i3MCJR6CL%Larh?%%*8R~{wSTbJ53fA9F;cv z{(l&P{tN%W;p4aHc0aE5VI9xvcHe#|0G&{wW(Y+am0W`lAwE)V7^*~S8Wm2!4Xr+$ zhA^J!@TaX2n>XA0)9Pk&mh&!7x=BRb)H})JcHFq2p!+;mLZq7&ufl3>LT>ct;yVc# zbQXI9<=$@Li-`JtfewT!4OZ_z)S3wGNK%(Tl)Zg_*?f=Yww=@XnV z&SJ-|dpORqF4NWOlxg1yLY;($akjZTn62DWjM4*fl>_67MdkCRtGFC(cU9*0Siymc zN}I*Idimd<`dh*uiqGMDbHAJTXQs41(d~L+2hmMa@t13?&EULh6|d3|@NU8RaAc~W zId3@NDNExB>d6~!rTyngO3TSbbzLhNO^^Yr4jRchkSEX~v3|HQfS2Cwy+jmWD^OE+ z);vS+e~vr#YC}2@m=>eMwE6qjrLxo2YoUft@xf0fwCWiGsn9b4>ZU9DehM_x@8Jucp&v0T-(VqEMwC4ME=t{RS27Kfc)Km{PU$-P@6nU zWIS+>$V|cKNSPQ<>RO!+XCwzoskd0cfU9U?7)5V8$WAa%(`xu=O=(1A{*p^-YA&1O zHH-5$?U%7XqRqaS8f;kDGf|*FSW${txnh40MGK0PHDc(>Jdz? z0s9U+n{A`d52N~pEKlrY9(!<9ak_bOv&LkG?Y;RQ2f(2=0x4@ zb-djTQGQUQD%73+pSaKex(WZwe~x+&x!b+mlg1wl7q-2N1}G5ci^u-T*U-ZlZO)PG z(i3=Q>l=12gZYRT-JUkW>sqZ+R|%h*Q_0^d{49brV&oqz>;6jL(Nb?-tpTq{hmDNk z?@PR=oSQ@DG1wjlY7UBGnajY)QW*oY65&dDUGTj$sWgOR6DMPUzU=eP0(9nnj}+*G z6jVev=0!GUuIKe=ynEVK@r1Dg3f&^e!Em+TEKtA`zxnj*cCg?a153K-r+P6ZaV6SZ zyQrKgh3z$^3YPq&k)IJuO+giUgR`-@A%J@d>Hy3)E88(Bu0b+C{>Z2Y%{+Db-WG)F z2!v_mLBNtoLdU&2Cuzv7>H;|zF3E`p?VRWm0M|uPG+-4+ma!G$Ww8Pqg-?Ory0^N- zISrj{8#UUlQC@(KAlJKRg5C$DGgo>Rs=q~8u312?dd!3sO*d7J5nt+biT6zTTtrgG zXhm$u5zl46y17HQWNcIGOaAaXj!Ck-Mv=#rpZP+k+mJL8P7v0x`n_om)p5$yWCjWR zdsAP;c(1$D0PqY)nR7p{i=CC2JWw;a6>u7)CJ?`8_1r4?!l!w#A<;k7GY#?B(dM_;@(6`rTbPbvLXm2H%*zosvOOb;UJ^ePNH9)EL#~#0d167J$zvF+(&! zEYLJ}3<}%S?W!Qzl*eg6*b_%70eSQ1OS=xl#pZ<=)dhRG^x-*=D#Dw@zR9qeV1t|$ zF_h{#J`Kz;zXhP-WvML?+=~GiiQX2F1liG_D08d^f*87oK(c#{v&n#Xg<0G6ZDI9i zm@elRAOk>#P!LIYS9aBN8EG5%(g)OZbSn^1iY0jgxl*Pndh#dBmLvIwJne=#cDI@} zWA5Kw6ZySf_S;Ih>i(D41y8RHZm#ggDMxlxgnCKdd=tD^v+$h> z>a%#2y)WEEv4EUBbgxaBD{fa|dTHHBPB!n8_x~^i{+It8%^x6Hyd+0EA5{&_c=thJ zT1n;(S6T!X_COrC`EjPyW$q;Y7oT)Hc?&{SCj`NUoUuPj_e2XWT`kBrRjkW=l-wof z)aFz-c?DG=Z8|6iOMO*{cN@fPc-g43pnEz&qJygXx^d4xfAzGKXbrBC{5xyErM_8y zt2lNL1Y4V|DlKDgMh}*d5&6tINU_ij62Y-JU5-&TGx_qxQH1h?O9@g z#&v=N&O_@-w?TzqVae2i!9^r{+HFUCRedK80#J$$2SXR;Iv-S@L1c)+6l|8|t#!P_ znQ7>tn0D63Q-3X_DgHdR-V6G$D+)nfF8TcF_}pEyu;hxnOai1E({Vx<@$waR<`&=T>uIjpptVi6&v_d6fPZkaHWb!OJJrL z+S>Z;i`c*Q)@SPB&T7u=a32fg7I2b)6NGQP)0lmQ7~dp8&GE02&}JuJ%-HHVC$>+{ z%=fQ}R7DSA;w`v1O<)+$9HuV~9R!3-hd)HNlnFcjA}%R&W>M48zQr3mq|Q!WV`X%c z6j8@xBlxlp@fJcKRYqevsBm#;O=6i=q^{{R-T^&aQx=c0Q zuktYC1frtEN?%SeRpYc1wP%h6Bk$!Ax|8uD?yk(0HC%gU@A$2~_xGW{ihCX5wMDB3 zPNIP!-Cn;wGO?V$PqLr-_PlUx^Y*wzoptjgp9TKacT4>S)_=RT{U4;~f0ID}VLVA_D43r? z&jyOuC1Hj-A5G_IkItp8rz=7VE-}BT8ER-o9OF331O>~P_ma1bFlycil&HQe3x85P z|CNCA!}e@er&b@uX^TJx3p@9^Cx)XTHe#HFjy)O#%GQKfkuVrpV#== zBmBB{IyDXPM06R`v=CdDcVE+tw_o|Hn(GWXrg`Q9*W;98HkT#rWpUwx zwv!vmY!WM4Egf6`NLII}pa8)6*on3+jnbkzw1j)c+lyc&^+SlqttHZ@DRFBgu`P3U z$0U=$+jL#HuFaE=1I;-{N~#-69@U%LtvhJ;saDUT4%~(?zY2p?44$}EMre}ld0qTx z%OlIg$mQ$oj*_7V%q(PD?5p&}nb%`=svb5mtq$ieKASV!yYf7k^v~}up{Hg=ovmJu zE015qhys{;Nm@ww(w#;iS253^Yz;f8RVqbJ0HuPdLr`GW`HQ7elVqZC=&NA{28R5) zRK>G}hD4%(wlE`;BC&3!E{>y6W36AW_G3oEE%vVcu(sOaX;z~fSvyW@@LJmdF%yd;1h_kKleGz=toRl8Gy8#L>Qb<+%8?JljIL2y>m?8 z_uVufd%Y2cP(P!N;Dg!zrN>F8%aQn`XB?(ZN*Zc@$uEuFGy<_YI_tYYaNZ5Z*6xo! z$oxHw;>LjyD~T4B!nOB162y`klwja0);bNvnuI0>b!d~;M7o?qh}(zqhoaF~&9P!z zJBuN@U|=a7xZ{%(elriMB7<}ar9E|%N^Y9V!NT~VKDAw6u9uf(46xn(Wqo_xH!ki6 z?|tk1kDr+;Gak!cZ)7NTB{l{{P{|)E=>G9w5Uk0?xck!~p==P({ne|;w0M&_h zMDt_^B;)Uu_+*!+`jc{v6)TiW>n3}F;`gq3nem{p1RxO%tL5p^N}jV>MQ8eRoo7OT zsmZJeA73DigCAyu*1W?Z%K(>=R??qIKF^nJ3ef~MScLrnf!iV&vvH~9ow=<9bAp^$ zCk&{s(#SWx|8DF>P~NkBrxU3*@o@iy_-P^EETOb9_8D`|49+Xd~h=+x>S{MrYpr&W3W0Khs-RfqYp$$Sk6*;Xt%{h-TdbCbh+sO0w4n8`CG2)C4bYsFN=Zq8@rxOmABRI2OHF zid$J!jJ>@auWQTxb=IlUy4kfs(DmMMk}`kcIrBXO<--0oKQI0_FP^_!v|d_?2*9>} zcz<9~GGpenu-fZfwk}r|cue0>*0ivn333C#gwWA&%7=)YWlqwF;;YHMS3{=z^v+ik z+p&X`o^hQ2I0IcndzgVfxHObNH^hJZbIWyO0-&RNCaJW+$wUxd87CqruUzQmFLtX* zGP7fK3^@n9n#$~aG|$7*FZ)i?V(`@n$UG-@m})&j5(OlWEf3#2*{G}ay)2HnCw#&C z{d&BVtIzEoTWV-VE3%n=SGCHgL@|ISwZC@f%@$QG2y^;5{=i&3Twmb z6dEfg1m&F4j$AHyZ?`di3IDSBzw7T&CyTZ3@>}-tQHr(0nMaR zeVt0^`1LHEw0BNzqTi?5i7w9~2h4VL3-?NBk;!4ca8NJK$~A1(>#F>^{Jxh7E9o5pg-kRu3GX-?Lpzm90p{ON1jWwSkNijj@P^hKvx=Ce)Y&w{AVTP-DSgx- zHMG7en0Y9NM>V1urku?)&z58Ep?ZIO0oT7v)|6d4V?Z!hfpsOT-rx;t$}HO)nRvn1 zQ@6z?Z%nSOk>;u!4t&hF@H=4k=NU%bcI;>!rO9;UdtDgEsi=9>why=AhB%)IZmw(M zi*46%{d)D0Ku?5U_=AH+%{E?YM?n3?fzeqWh}RImBl?5+$A4L{gtEp3z59V zmU{i%?_SSm%k7fI_U#t#;jceGlQ+D#+->Imr=08h?W^7P*Dsn4K+O3-$e;}t{o#T+ z#F;!*a!%@ZSuIo14e zC{8FVnq*9ZJZv-bT{3{OF%Jp@C9&atHjF6-b)%oTt1XbOiujI^ArUwuQkKy*{jM&^ zK#lvZ-7lXYi^_`0TojI+gB=fhj<5MkG@=&WVOy&YMYId!>~Wc&7PyZJu5zph$c|IR zX}LwOz}J78gU?y09fl60GfAw~l=6MDG6TV-xro9yHyyeOc>kch>XNPiHF%!+nje@C7}W+q_p*ba7ggTh8S{ z)wP5L9Z29I!~$Tk2w{Fzn24YcSPHb zumex_JAOPMSj(~5-Gl{Jxi z^RTO%II*^8XVu9O>VBFNDK+7SU#eYVpWL{eqJ1OZPG;Bs>yKaW7p*;yI*zE_GOBaH zX>IHGh02B&r+9%od~%^y)VxmKBG_7jaX1vUHP1B$8DqR-t z)U|y1cRl0zJWn=mnJ0`eQ=U(^E!A?gNf;0s3r-y)cn(Pq3M)PzG0j0iMU9DB#%;>H z4wnQF8j=gG(yaYM38}eoU12BNCdo{qWSY3802wT<1$aaDL_;sBG%!Y1w)KRK4&^2` zw0QB_vy%0w<+F_+^?8rW{6&BTSpZOS6nF;z2qex-Lh^Iznu=;AQ36!LE<0i}gW6iIW7uic7qeVT!ho5L;MEiKXL zyv{J|V!I!+LVh7VSrrpyHkX9L0s>xe$Y)OlKk!MREasiP?aS78gD>dh*3Ke`ikhiZ zh+JdgeW{U>N~s7)!o1PM36IRm%h~4Lem6RebUopY!tF~f> zL8vesi7_A2;g1ssacNS58P zF6ossljRrWvcgox3}^GUq9u)Q{QdyEL^_r`y2cla*OVa*u*@0={W{?O6+&2@$ z`IJRk^!JlIafs!CR*;=eM$fQKfKb`4a;+8Z$uGS{9#3_t`p?gpYccVK&b#A(ek5Q^ zRSaJx=i4r>G<^sWdT=rDmA#cwB>io4*6-#(qiuG{tjh)S@BV~rW>whyUoPl>@h_D- zaO(e&nECOjnq~bI{~d&@qt81&D)o#i+HTh3Mrp_6b2%kiICEhoz21>VI2`PTBe^u; z*hyo%-Q0@jrMk~(t9E&*Ybkku&L${Xdne-X3$I(fE#j~T_|hQyM}u?OM;0RE?BGFG z*C_7LCfnBzs;nyUI})SCaf}(c)UHs1A&#*lJ4B1aO6O^+a$+Nlds|lIBrQieVWK>t zSy?jK?HeIpFW1obC5%;N(rJiaoH>Qj2 zgPBU+UUK1fGNaB<_y^w2aUmI5rM?eDHmJezzQ8?TS4+#|X_w`d}|kx`L2agI_^)6scrg zL@4;Q(Gh@7W`>a{SSM<-yzOh1P8&@XHI-V3JX=DBb_uRDBfo_GVBaF4-5eyF8k=Tu zbaSqRkqrc)P^nNo1Myd%JdL)1VtDC`7*|wjoK z#cH%kC7k2U4piKs#>3>#y_J+dGL)tfRm+CAKi=WqEOLBK0F6ZWsIHqDN?zG0g14H3`~@?a2pdE#HCq+l6 zI1;dtJ48V+n z-6wEqze~)@VqsH^zV5)g5P~LTT}oFKt6Otrylpkuio{8z z3yO_>O+*XRQcwVumt_Sy-dpp9?4N<=vSWpfdy)er<-M3O201mMD$y}iy-_orpep}F zWXV)pnF8}_n^}Y9Pi+6OchGuG@7GRPoAL#-FFDN;Tg@ud?oGo#Od!UB2hT$!wEoe# z+ugGEhI`}S@uh__1L;KkmCt1r`R6~aHNkBcF=}Cbo)?00UMoh8)GX`#RUZ8&J!&WX z?lq4enKBEEr=55JFGAE?|3@Qi&-S451WrDt0b?*mMkU>xhZNy=VmR@(eyRaXgzwKT zBZTTAaxP7Yxbb-`C%(128q3}^<4up6LDbrqZO16vqL_5=QttEVckh4aVsRnnkb2$= zNZifG58*R0a{;41`yX_fAhsj^g_mt{TX{x>-b!!fa!2hXfGT0#0V4 zX4bk@{I-`l-|5sTe=tYQH&@XdYg736;F!ix0Yoi*_hl^SX!`>rZg_N6|E=jEq@KA> z+OWd#x?J1#+_T=4yS~hYWZ6_)sZECQ?(>v-`$myt=5r6jmqNaFT-!)xPv{&ksyDMy zSq@_UAA_I&TM+s${Da#!50R`vGNaWxSf3ek6bzcZr*fKsNw=$Qtwx42;t-&+c-;mD zWLFd0iO;tM;=@69ui7k}k{AtVBicG>nmO?`MA+RoTLZ(^^nr|G%2sM|S+tFHLZi`& zywys)8}jC~RI_?#bE)jsssOLH8L+SRbY{wgxrZ#hn~J>K$o4DvppR8EiMzd{7v}C$ zaw}1|xI+_jb&0X)Nq*jE4iUV za@gtb?kHw)-l1^)7W;ez(ZBtm2p|VaHZ<91WrJ6Ini#?tO)BaG11|QchBnBo`_&Cd zUUvB&V_4qey8ilY+6d``-Xyu_75k?_R7>OoJ(e-gFvpQGB_4a;^e5rx!g(H^ggQNj zoJ;20_DCCGpSoU1e*fsXXldYYm|UJU{%5j8kmDdAf-0~mxX+8C(0r*p=wLo1Ac;*L zGHFm@2$sj$0v$Z>zp2S(m2^v!YB0Ic0m2-PBwIx)g3HkxQ;gL#4(Y=2MwTA6#C;6tv+aBAKt#rCPJpp>B^Eb`bILwpy`XK|BBS4@e!^YzT2JnDQRo zv=C3Om$cdoypnB*$U-U2ydOUmu4KCl z@3fRSN`{`cJKr6D4;}l)p7qLl4@s#?QhjSA5%w*(?S+uH1FFr(TxWtg)s`;xM?UjL z+#gq&!gBRzE<5dEDf4DU-A5l5o31L0tsG#cy6-)n;1*$hkzv}ft?tA5DLrse{?IK# zsz$NQ!qms|qGz`~DR?hHUDe`Kxys7_&3*pYEATJ>k!3spv;M_An*NA1v+kp*QOU-npv;Xyb@-7p?IR1@RQ?z>XyY`&}wd7?l8Fg{uOejnFtUS1iD#T$)qm zh;Up_HOkF&1&~6LF%XC)MDZ-Ei6WH>0_8D4%yPIzz+fPSuOp|4`Mg4=mYZ!KoP${* zxem_Z^*sIxM(&MV-m!c9G)mH={*4lJj?C_W!+ zDYKnD5idjc!|I<3yw}Cga=#lv*VsN~YRL*OwtIlz+AglYA>vsmYg-aaq^eu(4&kLZ zoDVJy%L`YtEi=}oG1W1-l9*!$D@4td6G`dB?Il@C&Ycy$K_}Yz5uR9~qh=fbKjS4T z*{W zpX;6O|#P-xz)H6;6f#JkNlg+<%<6%;99a}}$5YUzFOwg#0yW3e1# zL$^0TgTvPJVO;8Pr~q1fNLmR$W85PFqLk$N3HBZ0B)#weEsf}aJc zx3I*PX_X1g6z)YViP~G5U-AbD0|KUUXS1*j$Bs!ABd!w1i5txgPOvs^1T-3zw#*>j ze<48+WzK?38*<56CfM3OeXlFLz-~3{IJ#nKC*UL>B#Wc@GS*NUmi!H>;#Eb#DqRqkH1bTkUhzyGsk#r1(FodBIYP73Fomt9RY zw^9H4-o^8)Po|=W!oz~yYo_gLh}EQ_x$(q|KIq3_i+<`=8ECWCv8mp;|>t+mFVzQ zLZyW*db6$d$N$f}3rd=o|)rZ=g7QYw7TyB}|gD(9u)R=W} zXpO5F%2%wVpFO^qP~;z2^^x}S!K8KoS38cl39|ti?;4zYK;bj|R*8d$r;6#{I4i)k z-TphT*>PGjH~3RrSqtLP`E`e`zu1JtLc+YzB3^nzX`B~K_zwWi&qdNcEgxokTDp>n z4=eaD^-o^oZY4OkAj&?7#N=h_M63JrMs)#{nPpP`!CXxJ*P<$vb)6eCZM^dshRj=CDgukdpcr>eg2+UOB8}8b4@@1$duey)%6 zuST|q1oio3sT6qz*gXx_Uqw4GT|pS5RM8B7GoPnm0?Aw>Y~CXMRNqDtUpqu z+$KBBN)1sW0^6J$k&4```JkMCa9r%&z{WamgISrG@DtaK-rl8Z3@#E$%DNCOH5j5i z9kfte+O&#;jtp97769z8z*iNkILH=CE1`KdF_z4{rO_tY?PZMQJSq#KHbFPKENGCH zkVrfo2k@9gs+I^grh}z(Z~$OS^4^<@_1lE&BlSU|#SYk4Q0{oG>-RY$p}O>^s?opJ&0Ac1UzF_HoJ)N9A4t%@{9jIp>Bx}* z$qAH<;V;t_XwZ=nPNXPAfJQIO{LR9NL8Rd~)AHvV#B<6E#Ds9GvMX2;MskR{Xaq^4 z!q3QpLCMPKDp6dfcdkm1OcZ6mYWl*wOu$Z3Y8MU40$hyPvnY*p`}T86m2Hn}OtPy!{CKS}}FV>9YcmC#{5L1CqOjkocgJbX#VUK61-*eE^mk}>ZR+CL;CRmeZnE~Q3&KT+i zk6*``afnD#=9LsNTidnrr%S+wJuW47k#X3tQo~IEK%tDGXe0!lImm%Sh$@=0pwnbv zCUAZ6_*)IysFFUo2qG19c^=0UnOvlY@TJ-hEd(MU9 z7L2PPx`6h=_nb4Ue=OErv(??RE{U_q zXqtCu(Ca-iJH=Rpi58X~tdE;#dU&#GF3>F)=^RM9DI5J9d7 zX0ib=gjd4Qlv#W0tmX8U3aZ??-1 z_a2y8QUcWFuC^@bS=VpEY_Gquf6rZ$?@{;u=u%wvrfkDNk%r?q-;ISAzPp;2Y(Gwp9kS%10FQDQ}HN^wcaBFS|ZtKeV zELARuE1S}_4V#a58A|83 zDEXMyqnXXdvy~Sv{P#oV+78j3_^bm`u)ClMqR^q@Wno|pW;anQr@)42I09$*?sH7_+r*x@S5TnZWBf^pg4$n_Vk~WM#;e`Zi5JA zPuZFgixS@MyDQ5hBql+5=PB;WP_CG~?<0Zg_CmQjh(xEF`3qrhX|48^OBSQ`u4E1$L2J1c>AP&BQK@R`#_Z{avm&S_{2jhbmxvQoJPsWbh zUh=3e(nYH0eQ{BlWU4-XfBHVl)M>{V3n!DL3IM#!Qsz@7J%=g)@@|EQ0JP5lfWHNS(*EPMxmJ~SpD0Ao;x%A;BtFCjMyT&gZ z@B+Wc?y_-fW@*=pne4dKlXzxr@A7FgExM0!zmWg?Jfi6XPCwOKJIXn(VBR4(EeoG} zG8ZN&yG9$V*125o>?{q>bN@tuR0A`3T+q&A_Km8v9cM8{|IKFrmJm>kCsH=V2l|t< zjxK#qH!|D1w<=xU5bxaKAK|Bj!qGsgeJ)@4DXaW<5|RC99m`jyc0Lui6c&W%vjo-; zl)P>E^UqI9vWZvB_^-B4uj77e-oAM|rb$PQf?=opq#}2Q1}6q>CnO5!MiV==BSX5Y z`n>s2LlL+yJ1{h~AeIhI7DkUbP~b6;hTxn;9AXX>kffnvK7N*&T`>)n=RI9mz#I=b z&7g%yffJdk)JEWfKtg(qa=geG0^iJKt!P3D>eGl{%Pa5~5>1Qe)njT_(*PH?Dr<(`T$V?AY{7vpKJ}md_8DB>B;z!D}&_O zWH5C53|M*maYgEgimePfjcpeMsv0l}`YdK;N~KtXA8`q4P<45Ff+BRz8XTkc&g~t* z^Nhg23(GJwqANahdWdF$4-)5P*oJr1kICy6spERNxh^mt&zFkr0<21r&1;D@4y2Ip zG6zsrnsf;gu6tEj3lAclIjA7XJ;iZ`{n1<2txj|7<_6WNefm}ytD0e){`~E$FLPB) zV{M^<94I~yZ%)Heq=We`JoixxHc+}Hpq1%IEn5XeHx=b1ENDl>D(x8R?h7|*(=4Ja z+L@nRP;)ymy^%2?w|rDQIvKIs_5&|`7hI%Yz!IUAU|>!htsOC-aQ`_m^Dz;4RPxCn zbl+$|VDZ+CqT5zCJY`H2Dw22EBCWWReNY^^Vg>FWxO_f6 zoXpqL}#^m18p$ExtppXnGL?RPJ$f->T;YO*-HIP}Vrgr;Y9f?1z zpwyR{iT)roou+`22|zr*W&(qX3S;0**fSt&;UY5u0zDrj?kz4>9bgye0I4=tMlC`3 zb8EuC+lX=$*U5u<^QA$^(z($3rvM6_|LIZPe%S;cDG~fpLodCk+F(w9dd{>bzd?T5 zk7W00(skp?E0{qRoNosm#ff&Zj+lRZp99lbL<=ruu+^Cj>CFrdlIju-)KXfR zJOa>?>`jo{?r(qmV#)#A@<5N}=b&rN`@@rd2a?k9HY~DFS${CuDdm`}emi@^TJA~c zNJYHFALDt2umWn$`r3B-VKG(E%-qGvdQW~xb6?*8#JJ*V+A^R!)^XTUh4HUwO^F}9 zr@LC4{G!|G3N)8{bc+cp8~&>3OOf!EI?4z{-JT?H)zK}ux=(n~-aqNK!~QjdNuL*? z#yf~~X-Xi%8ku@hI%O?_v5nZu+%WWJZ%aT^hZz*7h$~8~#C*m8CZUdd#%^hCLBhx` z8lV>r=gu60*WI847JJBgsdtCk7i)D(BD}0Gv|5W5HqBn`i#-Z-5gW*QSY}DEEer@U zt9}4C zAahaBP_?6mN>^9eLQ_wmF;~9;p2poQ)!+#?CheBmSVWr@bY@YCN^l1i8mJ+Fk5lYB z5~PLH#4vGOjRoloG(F2#1k=&O@v$#iR8G^E8?=}bo}*I@zY{=KGvKszA1O|dfg^7q zEg$7N*Ooy~B6d|@qqUT+RD6`Gz1!hgqa0bnQBe zmCcubn#-haA}O@wc*R+hIz?X2VeLtz<2Dqv1)}umm-|SNcL$*H=yw`o^XP`qhKI8x zVi+2qkOx!^5{>U0EL?`9IZ!86RywfIL4i5$Ld6PP(`4DOeK<*~|2Y=qCaLUbm##K@ zTeq2|J2{=JiYME-tLFXo0Yo~HF0IEDv$xgRFmCvMOo~4$!y>E++wVN@usfDvC@Fl8 zQ<-&6(I|dK;H1Tc>jA^~_KQ0a^-I=r@3J8`cfN?t?Y20@O8YhcC;tO*#J2zQ?xCo` z{3DFrjL$~1RNH#_AkeC(!9@IKX|QSM~DN`Q)L`hFp-@nw`6^I>~V1c1Ay z=dN0<9K_L)Ao()gzXH>zHyI^2X*XgI&1nOD7%cD&WJ&nK*fz) zZG8ddy&3`_!Mw9#+r~+V**!caVI??C(o*APOT;=xm(y;6Zuz%8`PKF8C2y=dQ^?GS zoncnMkX)EUXVTQW{$rWbnHoUQcf6>F4co30&`-qbu63XYw(yV4%>o z#DhdfTL*|doVZifHDwMg>+DAVwVv9)wTJq$c;xi=^v#klW`0CO&O0j(U z^_Htm5R|)d#Jz{_Dh7guJU?IaJeuY6ye!Z=7wzOClIL7{OJ9tQ_u;p2-1cwJlfqw( zw`*>0{5TAsiiy!;{1Ka%mnT^8%;hm3rPZymw!RY-@@G1-{@Ulp*UeY%{drskWs1E$ z1bC4@raLeYZAy>)(Y+v0rWy{Qa16DJgTE+5p&uqmlPP+4(N$1!(VCqb0#I)oAyZph zLvxJ*nW(biuLT8(V8?P7|69Om#4KtId5uN^;*o6I0)tb+zQ7b9;>En6q36cNll0g$IY!D_I{ziig^kb|3hig zzxUtwVuwuapD_k~kIA6Nj|3SH*RYXYU2XiK40-!T3NyyVuXkyskvz;CVpx@`HGl;0QHxyK0F$| zmNb1WvFuzt6OFQMXxP;a%YkR2F)br{N#ihK3>xZ8Mx@vVU<8~wb7;da*>LGc{`o3Z z5ZJOXn|sgR-#|Hv3c(nVouw!A4;XFz*1nUPg#A;+V(P6LVt3BTjBZ9&PSfpH9TaMZ zd~PPx)m6H z>LJa4RZ-eGV5%gW37DJ6XI(9rPBJP@twezAlvytEgUby`<39x9)OFQSOn7YsX6Nr% zp;6!B5|l3!%76{;Jyqj{&G_(GU`WrPIcSfdg{jKHF#B2cUV$nRt?t?oHUM?ZpgAAh;HF#9L5?|JY4bagLaOk zbI##)H8JXH;91+{rhb{iNpE^PQRcd=+19ey%fLdWF(4o&_&&nLK81N9IT=!&%6yM6 zu$}NOJr{Jit7ToPoZrVOmPrK-)eT+Bx0EL_9a>oNA#U3O`^1X!2AmvM)f0QqyhOc8 z+dtD4^-YiKm>>SR#_Z}Y-m^{wrz=Kwt)Km3ANOLK)%9*S(P@T|GF&9Y6R_sEg7Fca zt21@})`z>_Eg95)x(yc`pkmKRCPo{wUvg+Ok_$f{^ziSbVH8Bi&-BY>xsgK$WP5ZM zLT)R{%eu^emxts3^8Wm<8Nk2%r_JImcy?Q2=D9ZJH2>`(=u`kDf6ol8p-bwZ=k)}7 zqR@L{i987e$E4{YG-eQFQ=MMg!w8UGyznRreyb!Q=B2_4cEw!xko)-;k<>eVd;!G9 zV5+XI3Re~evi)OjHOeHX4LW44=1m5rj-$X{$QBSg+%+JlWe`;+i8kyDiH#Tern<$b z%`VNFkVLxMAI?L+?_CRl3w1ipTp65C6#vy$|tbU1JO3zrxEU!=tb9tG6yM^0h?n2s=nUDW2%(YrLbde0z2^;O0x^ zFthlt7}Hu*oz)i-F|xzWz*|1u+CVUBW^d%@=vg)0cDj?}s?y-SapQ+zeeo8=D^-Pn z!-z4wb0_XgG&&T`eDq%JNz3Ip8PCkAn6)a2arDUN0Ew$!YNr(uW%taauZXf%sN1Xj zB7BSrl;JBYobs8Y>s;U+;Huv(3x48<^<^K-_Wf{KoPUeeo4eWfCN+~>1WRTclqGqC zL@6QKfCBBJoO;$Er?3wdWIX$XMjMMNA*+rS0Q|LK} zA%dRx>?M5x5|8}3nk($AP=J%48MtfvrMOi(hy`4iA;Cr|Nkw$6QBt_ASV=%)f-(`) z&sLg!D^aY*UyOqxtkayjk@*O@o;9E4V6JCsrK)GXWn=bfEPjf22ix6@#RqwlcekAM zgk{I|Q^jqY-M(4a=kRf9mn3LH17IjsY!qNHTjt4(NJFj}xIMr|X-duZQzuy2g~|L- zJf%2`1yjEt@^~>c3~QT`h(rt=?OK+_RLT{ulpXvO=ESgV*nYq))^ym2->4q{a$Aji z@C)98?N^YEC7tSa!JO$<$o6}YO95M|t&TYhGh^Z&*FK2;ZPfbArR!uu$f!u|2i3;!4XK}Up5@V(0{>$8u}f0%T(TQES4DzJha$pcC8@vq7vSXh#p@ zp{Q*2;9RuRNSo^jYJy-#Wtuy6wUY+LV};Ds$ixP|*GzPzsODD?V>O#FS(Q&yXF@Al z3A~ZEpAD-~tPUfyQ@9|;iHd?AYLwx#?4Vv5yLagk1A3->?aj2}@#|2{H6(mdCw;07uP0Hw6!&2*d@vis7rA4zn zJ}g&nH`KmU@@CiBXcT$3MrFQnizN;`J!)KOz3{n*+sS7(N-0nN+sCYyx|s8G^`V>$ zLbY*Ksc~k$-bMV#Bl2HYD~c;`sIR!3!$iImu=<%?f9o;y1`t(M68+`>VeCDlnrgeP z;hjPVAwUSlP}EQjJqaCA6Ka5Bg&QWU$Q z-nLs`&bgoWeBbkpbG~u*pFOgF>@jx6ywLIaFl~}!Q^VnTM@7xO} ztXJT_zBYVnvUb%$nynZidOV$M)JlB)&gcJKk%52bSP(#7(4)ISPTV?5^xV}4iweif zfpkGFqDW{AR>|BMS`2{pU_X$&p?QI;r0674kctq7Cz|kV_4S3pD{iH%6uNjKYuN6f zYp46SsdEZsGl^Zo)HXroUFGhIfEWhXi_$bR^T#Jt(?f66JEJZRLO>iG-jNDdRlF9) z$Op!fVBpfIgZ_QpS-mfW557Gow@^*;;5kTxE9>4}Zgv5w@%td5q7-Kn0SJzxGHhFT zgQp+^JZ%e*^UNF*Ei!A`UHaTYYu`w^0c*tlS*@#QO|Ec}UQVh-0Fs0xi{eLn4c{_B z|^OB@7L8L3|u!`>=FCbPG~AChm{+PlA80y&Myn; zg~QkNCaR*$v-%wnBGF0?@v^^7bXi+`7T>eP!n-k)wZ_gk_aSreT99v4TsvY?^WJK) z?J0Zw&!KhVR=J&QvTtd7wk+%6VcPVO$gJOzx0^kO&HmdCz(4W7Y5CP>u~^I08$D0U z-mY}x1F#aADJjG@+v+_7Zn`*7p9K3FDvxC(g{R03PWNGU7f5IG#m*Ha)mGScA8g@P5Mcb(!3-tlRLXAv;ikS9IVL5D6Z+8 zbRQAr{kL*J35hK=|iD0nYNbNs0J2QDxp zqF`w>(t#gJ9bU5R>qH_cMj~lo%k*SS3&$NAkb&%isV0T&+l&W`Kn?FyOKHlsNa_e@ zB&AAKs?0*MfvrG}ulwmohNVUsr<;$XIt9cXcWBhiSK+FkD}oSPnwC(J(v#C83AaMS z52AK@9smnRyx52JuYKL?Gwhx(LPm;@sKewx{?e1YS#swDSlKA7F{i%6Da-1h`q%CS%-<-pJ!Eo85#zW&%k1$HhUO zQ%ArvD)V}M8%^i!p$%xDoF{frwgv`x*Lhw(e`4$RzOsr!n9_7@#Jipu3qSdI$tfWa zS+{0)>{5MV`fX#`#gz6J=l+;OahA~8LcO_(Me32*TMtoJ#>+`8;L|ULBpmrefu?I-KWNyC3`S@&s|7+flPo-0eABS0Imfyoy?(v zoNsI6Gi_28W2~|z_uC#R&;>N1%9E_Xk*vwf#iqA~jT+2&&UoibfsR^t;kE>({i|c_ z%rhC5vSj*+S2xxX{iAyMaZfZ4CB7_a*-Z2c^4ptOVBmY|?wi#RHoQ8}totx;!74!5 z-k+IMU(`8xz4=yRT94Z4y5kl`n)=7^R#`<2h!IKWLZP&Jmn$LlF?R1RZz07pi+)kgNLovww5+O7CI_tGEsl-!!MTijl2qTGWTDf$%&kmYUubv5 z(=fTAobLvf`e|h*-|Q`}&^ELjiZQ&!=Px{887pM#J|1os5j7@ceF>`gEBaCHy^oh# zzegBKojLYhO7mAfB*SV!vP$MIS+9wBpHjN6at6)0kJc+edv4h&lQ0 zS(;l%A&%S9k%m^MQ{nV?djh@f~ZOpFUx1UWMHt` z>HYglWubo0xdG7t2N58D(4x2*2FHK|eJ49#-8cq+HKdJorjlrS;EKa-I4lwjcHU={ zE@_&5ZNcxi{go(20HSt+-){VE*TB&6H)*c*-0d z+duE|cD;MNu;%Bl{@^U$k@&@q4dFhjEHiN_innc-oFl)>?$h6^E20 zva_x2B+6b*OR@P&54^$KOUn+}RR#Nq5+`}Y}K)Q3dn5emggJE~yli^pYGlJ%Q zB*4NV&28857td$&Y@lXh6MFp~EWM+grt1Z8e%GRjXqM; zN8a-c5B)(bK6?H0`|m%u|MEu~F13z%&p-IytX~lyrn55{cx=zXY@2%xmn$MVy3@r! zlzOMDF%zhzri^cJMI@3(3)q7c1}XDq75U!gArIGKeDsroZ%D)Cz``?(W7A`e-&uOyMzdV1WGS2h5RNhiaz0eZ(bM$ukOv*}qdAM%d z@?fz)dsw{nZ^ucBMW5nHc{~Y&CG8R{)kDh*;zKlVy>0Z)O&&B?#A|%g17hd8yEUTl zlJr&aR7jt>69~qI?x+DlQb3GUN?l+tN+<<1lqfCj8OouPQkpeQRRlp6WQc$O9LRM( z$JwPIl%?6}QTrv53`!a>?~SuZFdq4xu70vVMVAsYcI#7hm$RUOx5Y>Cq%rMdU0N5K zgH|{qHPUOhjbdEFvYhPV?-^)K6wTzDXZz)UIZ-x|?Q^*y=f~Xsc)7f3a>u8i=uyv> zXY)f{fB$`Mu`vHzE z^7&D!>qdUr6oV-KdLa5w+TKg_CW^M8-J&KyM3%x=E(04gP9u-#(W;QJ+OcJ!tM0GSU%rbFK&Aw&)0Tc z`Tg77_u{{v_J`~_Qx?2aVeV4=>b_Vz+zb5V50?N#pB<0P*vE}zEFw=ZhIu_=P@tcl z3x$cSgwaWuO|(LWzohbJtdf~;kp3WYWrCiM9t4wA)s=LGXv6(Z{&x;b&EvTob(l`b zq<4brMV6(xaE+7$1~?crnXm%%0xMk>+E6r%XNMR&T5PoxfPVCt`Lys!@6Y6hQ-ONz z(N>vhE~;;S-CEkKQT%tNa-YvB8GadkG2-YO$Pv9uNk9k(lVQ}D`bQh$B?DXpB-L=X z3>QkFXwT1>s*`2;k0k*uDX4XyFo=Z&;~NCp6tjBkC9i`Z+)9jdJ(Mc#T(x!9bhoN- z0R*0`MCK4$5JHZV^+1Lhcvt1)?NhR6@9YD6dx$x9om&!oYAk43``1dQMt{os_}VM7eBlp5ED<`Ak6hWk^QA&83q_=*1ae(>+!%JzJ;grJiFIFt?*tk} zBnu7n!AA&uEOR=$Z?>&$w`u50}ouV%)*3ypfpcY)=7Du*c%1TRGZVY7Cjnox6nVxFj(m8)KA2 z#5H}@?~L+OlSu9TY|;z47G?C(%qR}0Q~+bQ4@8=E@B_k3b9H77epvh6-2SN9dsAmE ztmAZzr?CIW>1mQV`LMuB(R+W}HT9$|dRbS|z6Wl#51Zde{;^W49Do!>mdLp^ojADq z?4f#C6+fP5dw=SIfdHc*5#;et6_I9`5_K|=XL7sWO z`|bM7-Jv?;!Sh~@Hbwhq{}}Fa^l|l3O1d%}c1TSB3+cUok!IF=ExYaRQ?lVwA4cC9 zbQ|+`nkzwbS3J7UxF*^it;o;KTKn%xp8tjY|0n;60IdN>PN|I#m_8eM7vunNA!y=Z zgNw55NmK23-z5oJ%_+i3p>_d|ED{n6<_2OxzL!@cXko&Z#Y7#@hHjEpEl14$$0x$>@CYJ_lyQ->mT~+Lw zmAtqWEEy|d!TX5p#h>NjsaTlx%cae>y6z*f@wa0Kw%7WRSi3Y)C}fha_P#+gO-VX*`|ZP!C ztoMXRF`dWU>fgG)*nZ%6-Y0GA&3LeEk!Q~IT3R<>^~6HM$p5?y{)zvuqn|;KoDy1o z)^nBjZp(NF|6*n*^`&K$qT=TCG&7W-`G)v1!GaxKPyqnLk|hJkq%;x6AOKSpy~tyJ z&*HA;1{h~2gIG|Z%m6}LI`dJxO~If;iD-ASRdjYxV#dH!R%Od%6&+Cb>r(f{D$^$e zprX{1ax7pEwNsyZz&uBN57kA2dIKl2k4mjYGRX~luMR1+3~ajg8K+J^d{oZd*Y&(~ zGz9z=-2@AKtME1a;XB;sOx2CC_p(ooPQ3e7S5`6lEdG13F*Wa;dR7bX`ND~jPsi;s~cQUi?CYzEMZb;--mkAs(d>LNQkg|<79Ewy@P+p{?ON| z?XsA76_wkVLfXa^*6px|_~nQ%C4b+!^Q-%Lfu z<^483)n{rU#8@y2f1b&F$-^RTk{ot)0}3Gh{-TUztbnc#(+QxEtKW`$9Z`Rw^YbdJMRWn|-d(1^f z*q6fP4+dvBW8#+cE@(b}`y4YGG_JcGj2(76*+Yh#R& z{~-zbPyB;4-aZ5C?8N^EJu6#pA~dGMKBx#5s4U7rl_CY)%aWRqi;>`-qNJT+VlGzfFbA5c zLTzR|921*^zgQEo)SIMm0W+rRS;SE`UZMV>3|WC)2&x>0=4?5y42Z;qJZ6}Wh%!2Q z9Y^*}5E;*ztc+4^#gs}w*d4u#g6FWKS2JM9q>^`RO&~?p%rFC1PX;!R`?u8U`HJ|OXCKl*B2hrIaT0ug>WX_3>Gk*z zinK83U_V#L>X%CI>$I!$N9R@tT3QGCSvV&eb+A>&t3b2fiAMd{eHzGI#q(Y~lCt?MHuo>>VP!`|{&%iT>Y@{`}cK z80-E1@3a4Y;{CVu=b!oysHyTE>c$DNfo#(?hkN^0L4eT3niuz-uXq4w+*_~1V0MDl z$=V?(-R76XJgjj2M-Cx96$D;6oLMVWAY58WK0cU^G=+x<30cWkg(|N)C{K6-nrj0UxNJ7Xbl*;E|+}d@BAj7ev*I8z)gwG>RmAfIwcmXkF*Oe_UBhG3AOU zCf@lv5>%lK4j*RfD9YO$N-xhF#gEKf7nDOQ5FHncgk4OgUJjc(6tPAzSR5X)hid|w zL7Lrj9(ET|JH1GBHfr(esJ)ooHM38o}vscZ{D?jf| zY`n95&q5anev?1D7eEYw?KAr`d*q$~w=$-Gn?!1~7kzy}(8%^UI}2rt8|Mo><(!dQ zpiR=@z^E`%IfN#2Um8r7Z7Be^?Myd=1^S1qM2buwPa*|ULU1;wv(RKF+`;OIXlTz@ zxA7uAf0VBYEP=Bya8XzpRSWfeNaz|@QT0_o08a&8cNmHZ!Q=-is8TR=dYx%V7>&fh zB52!SC)JMVAuE0PUKmjkjhBUw?4zayHB9d&&air=w0=Sb36=pj@l_=9RMElY!kt-; z^r-HPl|I=ga@iXGhgHBkUVpN_BnGn_3tU?rA%j5X8Q@Zf9d%t1naK=LI>IOQ_g8pt zif|CF;;Y@XKC>E8V_7sS4J^s;igA8sxu6@}n_AR-#?k%cujALAeLXt0@P!iT1oFC2 zZQ6RGbL-vM=GKc>M<>FujxsF;&=B^t=e$AM374wMYN!q|RA1Gm`}$kgzwA_N>s9{$ z2><^M|9|nHw=!Q~b6!5l**xH%Hy)e;K&AmT8aX--DL2*7oP-Te_%-~Zl6bLXsP8uH`d(01QPD z48ep%26O6-o)S^wf_R%z zj(D+G84~~pOO@)G-?Sx{>JKgm6{nrKe>L(2wzZHVb~L#BlCGJ!s(oHQ*sJ;yb<%(k zO-f6-I5xEs(5O(xqg!x$OU>_dv}f`>`Ffz5ecef+9c7`Ho_{VA1YJwsBiNAH*3zo- zO`X`4AM)3My@OywA$<^Jf=-n;E^o?3IAEcw=mqx_bxq#lJx?&KS zw6E8SDCL2+cZE&tt$CcK{xWgocH;$NOMSk^NXukc)V_o(s!}p z=jT7!t?6oxdI^{iGE~!!9J;hE-G1cnc6dWj+P(co*`u1Y7-P%sT>tYMKT4;a8(ifJ zPyVk~!9VrS*Kc3%xe;nSkl3?Ud2gRF0)SEKp=vU9S-y3~F(!BdO-BdLI?BUng8E`^XtW**m1SS*!;>(J(g`pjar)XGTn&vWa zFq?{<3GI04FQJ^6iaA-=L+fteGwI3jk^7p^FXK5lg;~Wq%+OQRm356$$Q)dBSvh`l z7$wtMeK=&7bm^-+hy*+ij{#Dky9tHvfd0u0cNkPy&@wxynZkbFp^818e0H+u>nrxUlPvtF9sxor*iG`P?X?f_IFSmqy7GiJv@O9HBpX zO|w1xHrKIqhf}nnMBufIJKKMzj)E{{pe9@W-_r9x5QJLt6Hbp}c%b`Kv>rY~}A=5=baq!_Nc?_144yE-?iX=7g;@Q`vqe_iB=#f*;=Y zm>>e=PUJWxl#&Zo%j)L93bLGrEDN>f2b$t3rGQ#bn+vHkl&Eu4i}ef; zj>qK>oS}qIloUZHzR@soz5%zl{==tYAQJ)v?*YMfp97QnLAP{A5RZUk_Z*??3)rzQ{NSoOw~dtc07GL&rlQ zW{&<{pUV8E;%jeKU)vo_La}L+cmv_It5GD6v&k&TMZSf@5p6iS47yTDgSVnE3^JTm zvjms+%c-OKh0``gB@re>hAu`xT`>7@PS;9b$3jD6M!5hSUy9PkR^mgF4K)p=eP@6W z5{CR*HDp*wNI@^OUrt;()grY#ZN(4P%p8a!oE@<6mjW^ir~VQ3tT>L$Sz=iy7G9SBj*7N-u{&*UZn8B|%Pq^f?<9Pc6; z944ZfJR*N3G>n-Ooqoeg7wxWpGP3AJM8w=GtEx#MQv1@HZ&#DVfpbNlg~ZcBu3n)K zueR%JE&tl8o?V!6vQxQLi3-?{%OBm^dVaBKTg<^_PQUBQn7CJKV_2A7hda%-Y5bf3 z^0o-|znF0U;Q!6zWrXMno$Fsq5?^rqT-1OyALD@K4=0SI^AFdDl&)9r9_hSS0u6v2xi~JF&iGWAtpy5%*W11|hnAM{R+LvQYx(+VRE-bu z5_^>G92(&2rEnat%8Eut4U=YonGFjoco(DPV-%5wTQ9~^0xUAjWLn7SY)uP-wS5Sv zQq>3-OHboE)ME)o8A7Tw+~W&aYw7^vu&^^#Uv*NG4;HkuQ+K zm9Qwl03dOQ%H30Pl(1Yw^s(bV)zW#89Cn%y@zPhf{Uw^Iqk(qiNB@}Sey(S`T5=M~ zcLb|e43f(xas=RyG~g{yUI2I4F!c;{w{P(Xy4 zpW>B4a37@_gO3B3@2U2HgZvJoBn0*@f;t#`*?ty}fvSKYTZ?Q`Ch5_|-bp|T-iw#@ z@;@8{@)#2W;j*+W*IiRLP5p$IqbHR$CA3}VUUtaJ$9r=3CTb+L7+&4wQe=Ab&Kt(E zK__+1y7I@g4l7vS4tFWzG23DE_;R7ui;wGfP95W)SS@T!^U=OrEdL{{XG~81+5S_= z*rDPJZ+h!_@Cg13z5g#uMv@MLK>10^DTe>Y7kg&v)G z>&64lEsIN=D7^?ZoPf_5_~E7fS5?xnhVjLiXpH)n`HaW`aEc5gG#>o{jdlm{pEw${ zrq=8GU7HjHZew~1;_f%rHK?qZrq4-ScVl(Y_BucgM6m z|2!=7KQeLQ?;5rLq4xP7InaN`e~6al;~o7g!g$4gj?nT(H=fQqM3&P_=rh`$BGz}o zQ9~e7*I7UJESgS9GZ#Q|6=;&s5>%-)V^KJ=H(a$To~r-NH;ugT@{YHQDsfhpVx6J^Xt@<;oKm{j=5QQ>K0z+~@fB_C! z2gC?;4n9#FpO<+8X09C~t)$`ve_?vsK}(zi7MIYpjDm@BSu+lr79PW>m7ok*s2Q`! zQgq3f0raK_YAbs*)19X+r&sR7O zEOw7%#N5*`^427pJ+6)#6R5~q%G9-f6f>fHU|)Hjb>JFL$TP4Rb2?J0M?AKZ0+ zSNHB~9cI4r_xywS0%X*(!LfT6v`&~N29sQd0ij$XnJ*k#!N~kOj$B#?r+1l(sdisl zrC}i_kx&rp0L&`M0SoQ0^OXK`=yj!(&tw$w0Far+aLaMm!{AKyW(o%+%VrHVQ#vsl zL^mCOCDq`NDP##i*F!`GK&(8mu8#hk&6}dW468Wzc$Kt+?nPFx!+tiEh)O{~0Gi}S z^4-HpS`yIGB7aRBW{9_n2Sru&3V5N=)Up(zW}(U9R7t^9GXyHICKLeUdohnd2p6La zL8EbCM=6Ryg@!7I4EN5Ya%rGds5L`c0foq8#Hi!f;CP~yk7_>DQnE0rIRIWuO3JEM z|Kxf^A)ESK&F^La!c+j1Suc!(i$oe+oPnHKyc{EwCL=umV|qZUn{zBQ?#R#R#3z#R zknj`z#0EJf$L zR~O4%hsxI6N?)Au?z{E#Jn_HfKK~c~!5T%Y5S0%su@W> z_>Jtj=fzJ>KHY5|$)zdeL+jlhy4rG?xflc0TS6CczE>`zVV521<6}}l0tI@)sSpEc zTdwFB@N<|dil z4`)6+>$-lA@;X^2fBxeAcdv64lH#9Rs|8;CG7)S;`F@0aE4K6S`_-lG_xic*y#;a) zV*c9r>%l4QQ=;Ko+g;(>ZtY9Pt{;-6e zqfyQt2gyy~jFd4h8wAHVJJ<~sA{{8xV)j7X!yMN6%a7+oKA`m6#~-pjyh&tn=gm3P z)`B$XN+*70!Y?YeJ;}mTt(X?OhIormB&g5M@_N8d`j%v#72*)bFUSUuBd`?PkZQ@Q z_=m-ck@5{{NQ^m*V9ZkEwm89^;TB_Q0%CxenR{ouszM->QM%e!X(`;vV>7Vil!08P z4Gg_Q|BYf8p3Eg_Rx+cr?|XAysz&pa)sxT`>LCAQI7dN}Wa(9PC+bm?v&DMM`FW9X z$G;wDkeYmJhTMp2BPFB9N{A|s@F)&0Mk>g0s&;DGYAc+gn#tSnG*B(u$C})>IlbpAvh&BiZx07ux>r%lxhdC_vm;bBOTz3j&Q1EznXY@Ax+k`+ZYLHMr;1{pt<4|n34Oy$q9>G{ z&hBPs^kI&OE95+OlrhO-R*4fZq2RK-moH0g1=Lb;x}^;$-xPXa+11L>i}lAc>P0D& z+Ayy{NIzxj?K2@(+mBccOv!Me6FMC>#~R8qL8%%h&P zh=e7`D4l(O_pnp5x?(_5jsZjrv&>p%t(3yeynrS`yE6PD!I@;0ItDpQ!)kD_1JR;2 zA{J3lFF3}%4{&*Kl9U(rirAJRbBzHBs7luGb72<4P#0H)q@xpySU)bxdcMoZA`}s3 z_DCjtHWfy5HkJg5L|n!4-L)sNi@DHomBmyOZ;{E)AY#Cxmofxw=Iu| zyeJ8J)}0ak>}qkm;gOuzZFhDgMykq|FFpvqedyer?a|ez;fd2;Ph(^5PEpTz75>%q zYzPte{{sDg1pa?r|C#@QHRqOh^#4VckMCF5dKd5=ph^k+>Mu1*C0`z4g)>yY2GorO zRSLR?0R#)%TYmEjn~J!ncLeuv;IEm`g~Lz zj(Pbw8G;cCsaLGFD%A4`f2qkh)A~NZ&2?^;kpxb)LYx__O8w!D*hstydtmx`tU(Q3 znII&!W$^Q~OAO|@RP8AqE^W-TdR1RXE=YHKrXHNP>xV4Jzbk3`#IqIJS zzDNJ4B%Jz#JhZ=bdG1w0ik0T8L!X{_-^PUi0upLWnsE}Vuoo%LKbor7M^QzB3z0m@ zeL2{q!tF-?>>O5Pq#=(zPlSk@iQQrnX(0j!agItEYzs+MvI9mz{TQ@Q5t+eN0+I!b zsWl8;Qa=_ziP0n==>rf!%t)@wc;0}`j_Exo$cx%du5bW9v)l8bFVzWLm^uD=XIN@Q z7B6>1rpg$OsG-9zKcejxYep;Ry5C`7ox>cg)vku*9N(W0N+-9}jA}Ki8OFPh<<)W? zcKp@whRMk^3&`Msuzuw}DQRedgISf$*FZ*P2)M*q$)Zu~xu)*y>y+le-mLfSHf9|e zeruyD;xR%l2=+15fSo3*b{d@T4=C(T(6apbXdZXFq{)`yjhpq@X^ZX9yX@hpZhAph z)BPWIk)!oj2*8_o1b}{b#P17%$J>bvhV(_WfUf68>!e`JTJ$!x*x8 z#&tYe6!pKA|NJNZgEhab!gONoEzR#Lsx5Cc093FAc8u-Khxbkn^UtyhEfZ!QGmvwh zaSI#^e!jI4(gCKln&E#ClBUr2gC22(M}9=8|F+U(E`YSkTw@kqcMrZ&SYJPk36Z3W z)9RhaDQ(`&w8wRX-7+w z-Ep@({NRP=1S8|lb@jc27mHWlO;6trTIM(%8Ev>0?l3*lzo%`U^&c;xCfp5b!KwHYld4eDbMMofeWcZ8H4}}Iqdp1W9{*;+y zlAx|Yayx{)rZ!S-ZMPkW@b@TH@Y6=+8Kaja!NDf!n@WI0{@sHpYWFOE|r!d-|&4jm0 zY%ENoNM>@UFAq#UITW+rh$v!}W54KU?fas=nto17kQ|D@bytepmCZH&nmW8+?tdyo5w+W!2noxhVRN0=is)!rzf8TEjFa9%H&EAN2LO#$6_EZWLWultEr7yi)jC;QPXJQmJL_a5GrXR^zL*>ike?m|$lcuBcVQAlI6yDIIM z>4zo5tH=NRrL+0Rbo(79T+~0ebBE>s{=uKSrpu!d-OWu%Ayh1gJ3-tMtrl*nBw$e6 zhBRH1XL}3gKllT;l-#(n9yDD`WGuFC+c>iT>fe5^c)LSoKVAHIftmyci2*L$(B6DC z@#KXhrDN2JPok`Buo^FCAe;FCP_Uw>u0#}(Snbe8bmYDQtMvv=Vq0)L-GJgJch0rM z{K-SW_7r#ZGx!t?7o1MzpNv}w>HP(`S9@jYk&4)*SoZQSg zwf5#MYEgFD)8?9>n&*MeQ&R&mUc&G9(G16no%EG8Tvurd?9WAw-O8Gsdlp~2TGFFv zt>UN_46acgi?~pBhJLh(hf>7> z$7`4GWf89}8$2IgL#=70Ka~x3grfiqfVL<;8v?bG-32*`B3R%ad5b0gO#Bk8v7xg1 zNM$Hy`S#NX19W$jzZ*`Doa}kC(3>9<{de)>!5yQB?R&WwQX8im&q(HvO`C+x&b9q7 z2f+Tp|C`RoP_YyC_JJnPSMTXQ#^Z(BRX!RJ&cLfhT89YT>JpCJ)A-#=$#NV=dn^nq*JBFAo8|I`3i`cwl!EAma8m1s`kd1HYMJNT_Nfxgb2=oxRrkhVy@Q6V}UCubc^jwMO&9&n3RlWH(a%1 z?9xJw$^0vOy`nG3l5@)0*vF1qisA!gNO~nNuIfyjlf}2gg5CBsgsqA;8i<0#yScbL zk?nZFW*ZWR7@D~lY^Ry(<^S(qR#*(uQ(Jo+R%Ed{0rM*V8d0d*{ig-_`OQeP3j9v-6NkyLk4pE`LDfR9QPhF?G8WJ;CH=; zI3yCk?Af*USF1i2FR6Y_rT(h61s&3|Y% zg!JdpCti%94{+06e3UkoOp8e2{cTp#dxT(m#kz8ug8C&jk}!EyWn3VxwvI3r3zs6A zDYMY7zzMa;xgU}HGq`m^=Z_AZbvBA_W!G8kaGQ2t9c2OZ6~t&$p-+P~ zN9yS{;Y13AS;FSQ%+f-+ zDvV77tA%XPUz(n!>RE@%HGMUGp;q|buW9Yw_uBVu5Byt_hbaH+GwZ>>sVA~Do1jzZo1a%BaB@_$pO$@M+HsSOypcWuIh zb_$sw9T{DWy^NUbheFw-j=#;X&&4zIY2P^X5yZp5o#iLMv?MA6H+&2VT|LzyHa4lA zJ{_^v#I5>|2{uCJy74al7tN{Of$Q%rv8XzPAX+Hxz9dsEJX%EDA*{;Lt?JCZy>R=0 zG|~L*6uhN#fZFza$GZlLBeF(BfYsTh)nJSs9D5YXsJ^3S^=Mu1(N?(Kr<(TMxb4rM zAFkxZ1qvX=3j_pwZ@FoDV+QBVk4YYX;+JIkf%Qpm>T*zQ;fCB57c3)M$gC_=Cmqsk zk&FKpD=i*yvE0x6b*+gt^Lb=yXux5oqkmJz?Gk7Pw{(45p!fIhx|ZGc*&Hpd#Vg2d zaCB3w$1T_xl|@aZA@x{sdeinKW^&aas;+d^38I0qt9>gQ=4Xt-x%|bFTcz#{C0S@Tc&t|vLV@a$;bgR{U)Q2j-$zHrIBkg5L+LO z5$yR=QWMi%l6=D!ZG6(@Ug!Ve>^;MpY`3k^G(rdg0|W@67^-v%fG6|}%qRFck58DNpMFg||e3ZwF96cqo#&fuu3>c&YoRpSULQ6ovTuWCLH7eEyk(xNg1 zOEUJm-GmQ)7x3xI*R!|eNeu|2U%@pp>SRe1oM$<*FXcm(-t#^#H(cEy6aj+6Ptd_G zK$Yv!BRD4=qG5tB@CX}%7v(cZUZYagL52Xp>p3$%Y$?H)oQE@Uj|VVR2!n~0N>4Ab zx(82MH$+CFns~7bKm=NZOioQ#_C5{FX{6Py;`HqR*5(MfOcANvp5b*w$G5~{fK{LE zlp~9(c;{|t-ngILO<>CV?HO8lcA0v}w%!Z%=BatFU{#=a_7kF``c_KAY)Ss`Yn$yz z898K8cw;92O?8I3N?O7^aN1l`5N0~2P+73hb0N~QR9K`0XQm+|q8#^nCFsCCjk`mlg;E{L_drBMKXk3Es2&ux{!#MM5>0D7tkDYOk+Ndj6&UvLdFqIW20 z8LHkXfpKB`ribry^(Pm|Ln#ZQ$P9Oduu2A=l9TwvJq2vM?>!cth&7PQPUY(6IqhrV zv4!reK;nvoE=vXq6^?$Ki;%vUX+vvG8SuXvEKV{9diEbkC%Poi3~nSl)WJ{WX{k zBtkS6P#>I`o@t-uI89=IbH@ms*M{( z=pP`Xt{+nV)96lpvK_a5D0h<(yu6IH1Q`)XXh{Rw*{gC4maZRcB7Lad;1$YBgo)fF z`=pkVjcIA<$P|}RiYu!@Q%nM(uCDDY9=Z->1F$1UhXkP%7e7%fz>FKKZjXdH`%O%J z6~tkJ2SkM+G>N@>FkIig{`fF3eKgX`sJ-$0NG8XX3XI3uDR4CR^04=jPlAx) zo93<0H{U&~bc^v*v{S+{KRR%6N;^&-8Tx}f&9_TTNTABC-G`m4)_QwrDkE6Y)^^nP z!%$6ft%Ava^L+l@bMT+|2fveB;pOtZG5n(EQ2+2;-^E;O{=SzlfMNh%bq1KD@|#xb zIbSb*w)6(4;mt$~fmahR5$eMi&X?CxqYA(O1?1t@6XYO$IA+It*pn#E(Jv#ts|wXh2_!r!RK8LX#{Bb+@6SwPV+4nsG(}`n8}D7G(paHI}Y~7HTLe z3MOiWf=vj!^Xyhyg2&6$OOWAG0j&cZQ)~TCA<6(L`)Plcjk1H2Y&e&S&ozf#tO=fp zXM{R5N7nn#4Vj4~?mf2KC*@T$@Zd(M<@C0t$Yj{O9 z^%9@`D2c7XNL{d>w4T4qL?Q<~?p_43&A||}N-m&!a5<7zZ#H0AEw!yRseE$%L1oGl z`8uS39LQib@UY0KQk6YQ(iJ^8WmqjsRI1Q%c;gyvxl|@ROxmSj>42V#-zdIUr zBl2tI_qK~)hbkw|N=jkIbOF?UX1LH$me7ZCS~?WiQk5(K*9QRP!i}Hp>^eHgdp7a} zoZ61C$p)JZ-ZZCsmSY@MUS? zk^DIYJe(ZJT3C`GO5cB~inKxnH84I58pVkvq;pz(<_A54OA&-}sj~oL ze|73OOb=hF#k-E)s(ZSrYA&UJnZ3|L?M7w6;MoYv3V*5@H{eIP|1Yq93On>9byX^X zj6q=HnYffs9c~~;m5<)yemGB#TmV{OFnJVY{ls8pY|QtB$evpC_DOeBm8v3N zu>ps(=fX>V?Lz)io;dGNJ?P%^?<<18@xSBn63!i@GP$Ws?qAt5j>xp0nqY#sJMJfp{LyLIO{ccnZ-S_=z<*(69cf zOqp>O;27|YCb3FC)QcgrQrB$ER`qeY@y5!$0TkHv(Rfuc>_DIk}3La5{;m^Ie^*aAIc;j5UwxaNjc4!z(juQaz>=&Zc!J$31U5JAC5arPim$t~uk{ zQTI4Mg?U z<>N$1vBB~n4!EE6w*lE4?_S=WJH_2z{*5Z)DdS#NbhI3e74x29OLq2@Xa1NBD>qBwO>$WBe71y+>96m2KQk zL-%p3lcs7S?UN}QLqKa#J%Xv9O~V>cSzZyjp_4RCFn|E>%PmJ68NKSI?0VUdaC}-2 zKrmWlga>e(R{$bqYV-sEXyBE?xuFOKfCeG%8uc9N;lHY9#EX8^rU7KZ3PBPw-bE0Z z)2h?dLj_WR@78+Bb}p2>SU?)!fF=l-6v!R2#HvgXWOS~*kLmhnl@Ua}F|FE`T7h!| zXG+4V=jg@N^65vyT-!V%-R|c9lzXk<(O6$QpSpd^q8~MT&z$EeiN^w6)e(&c`3m$( zj$ml-Y=3Ls4?I(7zxe9-UU#z8JhxHv6{zcT=b4gR2#k{c+$IvWGFW#_xf)PdH#bBMXb0rkWGO&g~P~(Qcc1@?x zgFC?FJ8ONs4{%sCj131fY6F>~^t-0rWyrO-jgpsmNp={?K+tJ}@R|5qeYMV6k1ivO z1L$dyxY)}9}@=<8Si|!I*#a-okan#z2js!dVWq3H3oX>3YUOzi!6DwHJaNl zKPo@;JSs9Sv25K8rf2=A3A#1lVY%YQ(P3Y9J?MSJkKJXmJhF68IDho%eAKr&jyON* zx!ketV8@G1Iqw-pN~ec%yJGL>d4iXV{Cg&Dcf6==VdfO3HAmK9vy*u<{TxbbptgSc zv~{CCSp^w^Joi%Z(e_aY4mOK|rz95Wvk$L0tiC*T%T_%_dd)Ksac)F1o6=85d-n~y zC$)R{=lyu5DcKEFfHhiQWspmDgbEUqc?!J} zvdFbY(lX5p>_)`Nubm$R{(ad0@2cRx;y?e!A^%Nfc%cUi z-19F!+``qecT@B6P`&dBnp-0m4;6$12?(5|-D`k?C{9mO)jk7PmktN8ktGWdI+^XB zqmgxGOVzG9LMqt=Ger?HkTBy-5eloF`#zVSH zM%EyuJd>L`Y)|h)x#$lS8T8!Im(xqXv;2()GrAiIo&~*KtJLyq`Y?T z;=J8TNo1l&?G4pxu-fJTzPdU?mL3dQzm5W_5nE(jTdk-Tq>+#+2OnR+w2=W2jp}JY z&K0N7r3J3Jv~hSe8duekcy~`u2T9WJUdsQ#mlHO1p*$3!IVI`X=xLfY{^qu4V^mn- z%8#B*%NOsDe|acR(*5@5`~BGHy_Q=IrN7T*DcJhk;40?bgogprMghml*POK3Dgym9ig`(Qy^pm+? zG?0@_^lSupc#id$S~mkd#~R4scte3u(`TlNTG>O=;-e5C4qQ@R!ZZx+#*+JJfzZbS z+SuwpWm!`E67h$;uE%ffA7q>;e3ZY4{b6m~5-Yx7<1tRvDULiYeiy^g8b?0-2C_)q+U z8lxtlRuP6=Io;ULQ?KLz05!mB3(f}&hBB3K62|e@o?9MK!!xXhjqHvkB%}@X=Rx39 z$%vpn`V8o};#{%5qgkdRBY^VwlyaUTp3u~lx_JI28X06vgeQjsI1SJ^dyIjA?JLH# zMU;f==P`LSga#xh`ILT~QCC2eg3UzH>>y0ZW@vFj?jJ=)ht3mONJ|`YZp7%JuhPQp9cWQQstltTiPk+s2mqN*^PCQMH3+$xtfn zZ=${nv1X;QJr8uaw*NguAj&J(*)})t)Wpr-FSWDQ?!3^Q*t_5`Am)8fJr)l#uaP)f z<_Yl{JL~i1uKyPiUy8rqH+vs_GsR&cvLsrWjq$>%xF<^pc*yCODLN+3a`TOs%3n3B zzW0Df_939UP;p1@0z1vR&D_d!dZ3q{CZ@oTVk^I^K4Y#E8}K4&b-0Fr93L%we!Zod z=p50|x`bc^e!Eb*lJ#O34@)G?r4UgC(mVhl`6Wfx@7sEU&=3=;tL6?6WJ(>9u*tY{ zjJx{m#w7$p-s+3-n^2YM;;Pk{xYcmwZ%=dkv~^^)-_sjIEDW#f9h- z@2u3thUB1`=-;Z&-{fq?EsA-O-PMSZ9R43FWB%p8FJJF%9nOEV4_iQnf|+1Sh8nktq{>C_`mHue2{30#23}MYdz~B@)Q_)gJT&@c z7@zfxZruJQfLK5*8t--#0K3<~3$>72~xkz}*gCGi|Ps6B(v~jqsZk#Pd z5Xm)HL^A!ApFivOH{eOK7lk@b*XgP$Q zl`r66ZM)mrwsVkF-x59`z=0)VBS~(1y2g*W^An+fRF+Me&XlIAVCLE1dN_^lh$ml1 z-gx*Y34AxU(+hv(Qu|WldFb!2u_1SJ_$Z;zcAuqgG5o;{L2iBes5fH<(qD7{Epdf! z=g4}M9IDA)rWb#!yl&=S;({vPKUe(31-+=vxN%x!f5aUb%7-EN;Hm5>*+X(5PI1Mf zJA<&;9Ch9li6^r1U~79Gz|b45HRhp$^%71X%K*ui^tegp6K+e90E;my|{VR9l(S{2nZmTpIRd@bUIsr zU;;}3U8E$n4=MZL7T^n^fmVyR{1ilQb{^%VbBjQfML2ki#;!T)qE~004+WA?WLPK1BriD|$QqIB!J>{*~`(|HtR)Ngu)u zKcq`wotSq@UuYo7SCNncJ_P`SfSHddLj+Hn^kc(iD{?b1i#yLNm)ftip$sKbHv2Q=gCh)}cnv1jznbYF zn9(h_xs&SG0A>nWcq@H0m4Be>^D{;bhYJC*!Z4yQ)d6@+P8~3T9}o zy@=d7r>1vmId$a$x$6D0PkG#{aCGA}j75y>02|p+Pir1EQ*Gj0>E=R|41M-pwa(Bj=-baS=HIWBPrdqt8TCv^SGFw}Y_MQ0k#J!BfMSuUP?XP_cvnS<2j8`t%^78;27eL2m}|pj#*5Dd%oPhh`h_vqN}p zsLN*P!}=)&G;Zx19j*NOs>gCJrQ4S=Wk~%_Z`q~N%oAK(lOE(sI31Cvhzrpy+0sym zMWiAS>2q3wNa3+zOxLG=6p%)-m0AZ?s2QhY)MdvqVd}sHf$6*q=G><2A-Grtx*9?D z>lM-lZv~W1)JGSNa2^ZU*Yp{a+r7~I?&EeJudIgfi?2D#?r6hg$E0^op=BMwX!5bm z=b%6Oc~-)vS1VP=B7zqy)LY*czIMxgeet+NeM7-9Y1h(UF$L;@?e``d=UwY#oZo1i z<1qd{cGy!7ltlxGghO7>KYjkjzY_4pw<8ynChzpT6x#Kz!$pYOft$r}f1s5_)Q&pQ ze*-)srn&ByBFgV5^UpA`LTpa7- zR(qeGD8cLN2gBgnJy!=oa`;G}Ic*M8=~gZOCXC)eO&Q>CaO_9H-CA@%`hQynqI zb@i1{b9uJHCwNJ~24QLRf$@}Qy zgmvxs*<<(f>kvF=M%6*hMLgCZK5Jh9+mgTkua-0E_*z@^?e(kqwqK?uoiO(P@h8NMF#?6L;TXbOh{luY5(bRrmP#lD|Qp; z#)S(*PO`JHbMPqf&}95CFluUZoM$g0WMf1U>2-26?xtIr1#=HxY7KymG0Fz~C%0;F zuKDRSwT$&SC+*cSmxF|mg9Mh^+G!kf2J0K^$kk zcak-yRa*ac@WYZrwfL@_cL66Bn2_(q0QnE0Tds0`@}5WJzfNoUxN!33n>8r%s?Z@z zv+da2E&d}`hG-tio6$k?F-t)MXymw`OFABKXe7+xB>mcvP`Bm{k**1kdfbs3F{riN zmA!L9;+&H5!qOMFjr&{y`%%>&-?f%?ny7g1|D2XO;#cUv&N7cuVNR7qztFrj-#H-= z7^L^}Z13*c^7HB!+u^;~-o!M9rQ>+WXU%A>e^U$pC;q{Tz3X2N@y`~d`!Zv1%M-vg z(}dGw=)lT5Or6jJUSru#`xqd|vm~^zxG13|UiLfsIE^Qs zwNf2u^Y7g*;?>ZVeW)^!S<*E5{OaBdZJ(CC`fK4&ji3JbNWU5T#y9&*WR86H>l0_f zrDdxbnJ@2Hp>BoDZ{AwVa4|2dWnTO465@e($xT|hd?CDQu{!4b^Lqd`K_P0f4B8|s zAbHic#6;rhoJ;wSu7H+)=Os>UYLa-zU}{Fce!8j615TT@cVRjDraPKYuU^#UP$B>g zfkleTJeus@dlAzJHctbeZ3ywRMK9FE!NbePU?c)?ZD5FHU68xqA=91}?EG^;TbM#3 zN_!>r2ls@ksRvJ)iu1w6uTZH97>QHL10FbmGBwc}Gz=}rwd4Z%&EPy?WxsJz^Qjn-YCSJxZ=@q zwYTy6M$y9F#m<%2z23=6zA*(K8UNbq;-4b_-^ai4f9y&bVDB9#msjsmi{};J0F-{E zUPeipsEif*BBu!N6?A?K19hY%L_;`5r^LsL0pMZac+i?wM>$R(iOV!Xpz^ccew0Dh za9Q_^Lp{g&zv=|_iC0*{ca?4Uvk|@X^^S11fD~Fi<2d#sFS<2;(uB9Qc<_^A>`9SZ zAWgP?v`g@i_*+JZxOFEiEUUHRSd@#@$+*R=oe`?ZDF1Gx~#OUb=`BBam+B6tZo z&gVXpEYKu>tcl&H(@jBD-=O0Ck>kRm-3p(ZtxWx1Bxi=)!?8 znx;D`@0g&?msNkbvx;g+mHbnK(f*D9F=l4 z&E*5B@O9}30k=KCwEstY1OtP@bArnG$52|aSC6AE!Ptf^u+6Q=qu`xjD0P7$$3c6< z`J!e#eYz$g>b~ww8$N$4Xl9f=`=nB6to{ExxA|WJ{+}-Y#((4P)<>%t!(m-LkBtA| z{|m%JY}4^euy{UeEY8MqZe!5!yuu)N%7MzVAn1PZcIJY@X%*nFH zvjO(efC4`l6v_s|GQSNnr~8<{CR*|riK@gt3LxF~gl%^I$vG};MAA@SpUwtpqr&M} zX6&M8KN*`OC8;NyB#xYVaoXakUl&0qtIgzrfgXZK6H!;qrRK#F8ZBuV7y4m%M8FO< z;gXCwc%*ysfPl>@WEOBFEm!{8Qht-#8NDrbZ}x7t+0d?yhGy6GomWOc=i+w}IS;em zR!1+2N(f6Bjs^%NY#;;Z89w^TKYm58PpTqcgtxb$!R;Q1-#3o)ou~9HJIY!YZK>1qBxN1-*9lI$u$gjIvfRzfvGCUSOcIH6z)tb$gz7UlY^v)O>Ymg*Fz? zU1>>KozTq5AOS+PrgsJ^6kxImBBXi8E2GJ01nlc8Q>f8WS>se1!1H|B$c5s{l`q?V za@gfI+hxZP_Z61(kzYd(Sg!-0)nAA{>D>6lGfU&njXM!4;}2Y4#Hc+M{vJTIGSgvVjP;^!g_g}sPRtE)u8}$GiPye}-k#BYg z&rt1iRra6fvCF=ZQ@)VdHNt2lV>G2{MBJK7@TQX)|8*~1aKANO5zCMeMT^K&6LQv` zjFS(&GOWA8nK+Q^=T~e9R)HvWD7Lx8vz`c4rJJ5&K{cuDLX>rJiuiS4*x9d9YE`N% zg}w{zV78u5GhfK|oc@`8cJa{|9*vs&%uT~wnZ4HQX@~pa!B!pVT-Cx?RNrcS?_i4( zh0-8C)L=7>NHl!T%R^{!T2A_9_Po(fQAlzc z-8}ZEhJ%q2oZn#JROpOCxht`yvX4HO23!>Hx~}b_AU;AY_%yM62JX_l()463-l)nn zuyx?Y&HHAS%Q)Lb^-4uP$<{w{$@5N`pM6y?d{23K-d>_8Ao@PSOvyI$FzSMicLyY- zmUq$Q(kOA+jQ!EjHRh2~2N~V`KdKB39T6q9$VS}0t{OlEjyEyOI~^yuVKjrk?FEF2 zxOsY0hU~DyYOR$Xf)dU!Mft>M%Dq2@<4+eAYiZ$l0KB9qW6=#O}Lk~1A+1+;9f2^${>@@r?tLE*55$#Xm zc?~-66H|S|{;Ixm1y2k2uVO5%=Tf4DhBLKtgE%m>r)D#iy9>i34TDOHtFU#c*#pr| z%n9rh6N$Bo)>9mCDOdK>VSUfYqvimqEdX;Em?o4YIwF?B!5vr_kcG&mWt|s4TBOs^ z+CeH$7kV9tHOfsx>?f29Ow{)3 z19S`Dzxnq3R_3Xs`xlfwmUNR&JU($X_Rp1<_>QEq|DmG&2YCOlhyTKVe+ww&{gKIl zyQ^}$hw(qz8Zq#R@?5QSXy0i<&S>!Jry23-kuu85g{_GM#^cty5ggMf1!N@&S?RA> zeK910B0)hA>M8}-E$h@IAMPVLG6ms2O?@&~FM7?4Vw#FQFJ{{uA!Mw@IVVL_%Ui|k zip5+yg(>ajtt~ibd;kbaL=-`a6GIkW6{PmllJ zyVd|C=$M=J? z)wf>}0h~k=m`*EXDbS|~6*Uin0Qw9T-Ec>Nl`__FHn4Vi0$ zByw9^2V)|==i$vQE#su#4t9>z&chPJPbt~oFVA!=qe!$AX+D1~E&bxbcnA<;;vbsa zb>XMGhqXxNdFcAyPcc8vDX}Y+uiJRTva4L&$7g8P?w=kfmA+Y^sE> zdOvR4qijlsNDJ)7Gr3Hb(lto@^A%HIZh!L!-=le`E=%n^M)h|5sK7&=PscilwIoQ= zmSCIauNe9vBQ?3Q33)f~7eQgXdCLq>POM`|SI ze{{6?Um^bwKlmH}N?fl#9l4;AFwsn|UwLhc%T&xW*;mBf2F2S+&!|})*^tE_SH-#- zy8G>@-c(U@0OsM(!R!FDqpC?ll8Cttyr$g&Z*u`20gdPAGsQ`?kzeC&sDjkwtl|l| z{sbNj1+Z?mScqu%XPA;&$xNdK))CQy5k9(q^Ub6vX;l7I=h-S z5FNW|4BsI4Eb$g>ZYt?A6p)GfxK(v-{fsyFjte@g+s4@2v>|rFm##&63o2>-@ScRv8Jwm(;$yZcG3n1OiDr0DTz0Q*`div_!}43YXEwF%$=b}p(9d{q&bYztH;zUDz$b@kG-gS_BK}b zAfDgJPCw*Gj6>?}W&I>fccEC7$U#po=j8>(T~A~mbv?`LB=SKy{@FrhMC^!9(fL9A zcnrCFb$Jl(qUhXPRT?(mwswE+fbYYnU&D|UNT$~n34VLTy}}HEgd}!Un%=Dab$d?$ zFgfJNyUZO$ThD3Bf%1Y+1kTGX%0!RC$X<}zak`V5;*cG;b?5McV-28wI-oRJEDQEw z!Lfnq^lmIyrp?+(AgP&dah9ocMtxzA@l8Tk(XRbZm~D=uosp)b;j-=+>8DJ%KyI(f9LGz}R2rhob{zv7B@RHW{@tAXV2cMU98 z-41M;{}is*Mq{x(CYT9o-D~Z}`XwbKt|s>T*oSdBU)(SmR zl&p`MPTwtNVWQ2UQ`}yujo!X&irRP{In&w44E)`(Y%3Y8ywtLdepV4mAwQ)qrvjCrkSH`DUq4AA zAz!IZM#A%nH}|Q-9iAV)Qzu6*Sr z3P`gVL9;VT5&WP87{X5iM^LnOBZv8;Bz74PowInd6d5k6lt4ra;`}L!Y?`CEHJYKv zV&+pm@C}-j_&{iw-}%Hml0XJ*{+AVd7If$yFo9~zciGtnKBDh;V&K?wS^h66z0KeY z5_ckcNTD1M+0iUw*p|idO8z2 zgn(+yX&6n~h#L?_5Pu+qO2Rf&J(}mY>kW}XM*S-N_+7o@WENYORn@qc77fCV&|_%L z%+kYbOlMA~*aUg)nIH4H(3JoU&^Ye(`g^fh=#p3H=c3u;r?p*`*3<%rpPk*Kxocan z&3?&@Q8;xmTlN zBK_1ss8^%9NaW-k6U#68vi0LU(dRX;QeVj~F$H(dF8iF;Ny+M4dVS#Ia7CO&xnLSU zXU5X2l`f83OHns{u36@w)3paIKkc2FsXISISKq1IwK~(hUO+rK0vsZx0z1exePX{jH3)2=8rhcK_?n#as86T7P!_ zwG((<@tBHx)ysuPn9E@UzcsgS&G#J=C3BaGsja~Kl_}b?xD-d#AwEF-h|cGvS6w6X zzb-8HeQA#TewV8(Z#%mUQye0>(2iNC z0j)63LMkPXa{LWwyW}jr#Pu7rJ>BzHykI6*pW8d*yv+9d&eYSj@p3MA7x_OqF6O=( zWObN`Z#E6(Y+n)dO+DEve_O#d5W$6??WPWz4;cwvv{@5#XumH#cvWVhix-cKqKrFdabUR9?>Z3XVtIF`p-^y|)g{Dn zfNRR>*Eo}-5gw*lYY{gyV`9q2d&|rIdWpZ~*sCFWQV zzY6}2|B8uQJ619J8Tq~LrhBgrNyvs=2R8&7)MBV=7U&2qas05>&I)W60^q`MFX?cZ zR(`P{wgr-G;E|9o8@GaTzQN_6Vqwc)IU@Ym-w%17D*AEl)P;%gAoX|LlOmBhJTL(x zk&uCs-g?Z6ca*cw1cgf$P=AE`tdGPu4fK?IW#y*>E z$iW%gg&zy$J9yuNDFPw|h>mu-2z&FXYq*bc!>^kUO398K?rBM@pF*(PR(Q%rk$wfR|cYbI@^VwD~~PKyT=10|g72c8oEdDWvYSRmFZ z7rY5Pf#-uW_VSK!$Uvyl>C`nFgZ;LFdM>ueWAj?~^J(u$7*@_by`9fe?#GR1FFEMm z>qJ-n6!e+gICZJ&-RH|j$M&QQsQTQ`j-6o5EL^KPxC2Iu2K1hjyYY161OAe8+K_VP zH}{ya)idv@gm>qq=Y@1ild=z{bUp;$tt}4ANm$0;r)C=StXn4r-m61%Xo)n#y!a?s zIzoaQ1m-@w=+%o+f_c>@Fnq-WI74yLeXc<13BQpTD(rdHl6{=F@K<2~j$& zJzCZBJMnoYi$vi4Udw;q$}M4NBW%oJWKbIw<~yzUxh}tV@B-QY=ObHC@RMoV{+z8# z#{{rDgG+h;Yqs=v{FiRo`f2HTe8lR6YyYn2769;>O|wlBS~r!MLc(HqCZ+ATxe;fi z)v4&0s|WHFIugc>VxYORhM45R_4pKm1f_k%J>x_gC4O7j%=sy2McPck{2f~!QqDtZ z<0BpPhTFAL1a=6)u^T!fJ?*j*6{6;|bWO+`=iu`=&=gbU$U&^b@a7qqb!Yol_XRJI zL4B4rk?-#Z<@}5ni?s_spAYwwpJk{6rQ@CQ&St69yVhQbtDAO{sxV!!4tyu->@R&j zF7@8jFX`#)mypx-^(L597#^#^N(V1`Y9c#xlm&jpPXY^}RAKxV*`-yo)y;1}Z6>C`2iidMM7j ze>R)NEO1U9luq|+E?rApP>|$vJf&HA=ry06iqAqyBFlU6onmd`W_bL4C2){UlByeW zkTY}_=gJ9&SS#Wjpag&nC>J##0;3-RG{4C^mB7UridNICLlWG=^2EpUl^mNSU7`&+A4ce{eEie@W3Rb1J7Ccu zGE7D?XwtT$h}dI1XLEqnr}~byx>wy&!qgs0G2_V2#(BhkfD4P?Cxu)i6aR-G_07Ke zvq$1Hm*CF_id}Eazn$Iu3`y){7vCC#7v>sj``!;T;|ZZZFrCnI_mvNBN)x3XGJ332 z&BNByy3)HlJvvP(_XKqMdlB{DBe8M9F=s6&xP!!plzz}8X_-i%26iaU&YDuBJ$;TP z2L<&hl~hNZNvEGp=ygeXUa1&nSkk03ob&o+ETx~w_GAClop=c+Kl>;4HXK&z?JMQS z|99N~-*1S&<9}nv)|T>n*^ISbH~kgQRW=028;g7}cQTrkg{P0E(4BZKD$FcTYi1OK zxX#LwoJf;NEL7>(2m;mg0ckc!uOm)xSsdG%B}~bthKYFI7gdtC9es!y6`=IMzOx_tG2QLhDw)= zAgHxljo&c(>Ya%vIy(6kYmrNNb0r~7Bm2+dj>fJ_bYFDey(;rndGY&DINBvmF?w%h znQv!yR>-)ZEJm#=@q))EIn(IX3`MzsI}+pR#%8MzK49L5VeCF0m@b%co#jSOZ}biIrs9p@ zQJB^k#zLO@7Z|%kIq0gLo;6LvGU5o|D+(@CP$7?FwgDO1TGxj!WEie-@ahz3R6ME7 zJ<}U(oY}2kVsJ?i3~(8*HyD2Y1`-$OfuMAe_5u0^5?GAc$gc&|azn1>&pf#DOyT7_WrImM>38GEktg0}shuXX zGOMSMwk?{mbJAWH8p+|3AAbI63wc@cFIl1gmH!OyyyAW@n>c-<)Of`=1?M!UDKDv= zU5ewsrJ=8{8mWNG(7{Qcmnbl4uDocVUdXh31H0vPJx zJlKt?Ib^83u{?N5VseE)Z#S=4`xoN!seU*y61d{_n z@jT@(tsdXz>gCgFIAyKF^!Y5RaRP2Lwl+!*cZFS>GCA{aN4vv9()N3!keQx0T04zP za7#j?ah@ZA6OCOAYA73V#ci&4cdM5VPM zAZ!u%!V>1$b*RhrW9KS%Dg2o5?T0nDJBCZQPGldrxb5nbTs}`F16HdR8l}EU#x4vr z58dk{kQn)OXu4L(1V&g{MM#ztNNW?E$%FQXV&$)~XWj)oki?va(~oHcK?lSE`8K>J zjVXVm(1kTNZ;UnxWNTmpHykGGCYh2)Y#x9dlObUFzYI8iPde0gi;sDdwrdqh^ho1f z!%KUIH0?*~ThTxgdjmB*P(krO?$i-M=~?rrEdvRTo6zx}(wCIa?TqD)q$e!5OGr#- zL>o&6-cBAc81V^ez_>m(GBq~xZh;cwt49dAXYZL#psis7LauOlCL5q)!dT@k{Lsmg zj+-{4E5Ic=h@+^LU#_N8IsWo*>J3^>O!YYACMJ1~aj!VdeSC3@`zUAJ`dsSl#=Y~Z zWgcyBWRQETe2oZeUQD6;4(SBhxu;*ZcqrGEOT z#10j^yN@qkDipr>bUF9nE?e4X?R|gIa@9{g5hW}~1^JN4;XHi%J6qMD+}r=JS=oQ# zzkFw%J4imUp|^Bw(i6*$WEpX2nCiWgXsiFhidR(K9evY37wbY7DbalLiltxq{Ye{8 zU*<<@i2-aNvM)XlJ-_L68iL)WOkqBPX~$JKWzKy$lqQac8W?()Y*i3uXvUcd2_O3D z;q_}9NXPfwSw_JON$HBhkG@><(I2BuZ6_ndUB*m}9xDjt9Q#oxKvh03XKzuUs5;e? zF)4PG>E0sfes#9GO(y2qdH;Zu=*p{0#h2wn0<;$^+d$g8Cnu0pvCb1!3aCS>yQw%v z5xQ5e@B3cUsU6`lvPT`0VN9)3G&%T+8aJnERHf2wUeRygahQlO*U7NRP>J}t|8!T&0t zyg5m4#t9zL5d;rj(|sE^O}!BnzOn(_c-0x$OdV{kS{ojY#*USa*n1GIYiv}PwLr^ zoc8ip+w2jiP@#t&6!8 z%@$c6Ps3@_n|x7=8TI>V!W@?z4>FUcpO;zGzTgjU9>e_N+*iv;obUZ-^d!fSPW}Eqw3Lw{J$uB@2I37 zzwaLq5ET^_6-f;@DjGP^tiY86%bhF5JxX(>?ZAP!$}O6jJ9q9JWw`fVWoC{v%h9mC zK0nX({odDopL1W=b?)C^y#M4JUcBC~^?W?B%J7ekTa2M>u*SXmPeRsTykuTD+#&Jm zs=hMzwi?_z!f^fW1AD|_rMsUFs#zO(f%EBgvU4(@e<>2P@}%~_Em9G~nqSn{=6xLe zy3UCB+bML(7Cw3ZOK$7S1<=pV8hyxsMEKeQ;-|rM6Csl&Oqrikf28FzUI81(IH0dAUpX-B-Fs*>WwGuS`{YF zRm9R&ZGX2%f|FfC_@*OCf(5l|t~IF-Hhmi)FGPc1e&f0(A7!uezK2T!W*|j>uJ+(e zY~efIz3DPlQL%Wl`A}=j`b7oIN4QG%$vwsPWKh>jYy_`!4X?_ms%wOb$fM&pOK_3h z>9wMkAJvIEf4nWf@KmZbZJl5I@@qt_Znq`w8d)H$U_{${INa*Ez3 z^E+WrEPhR@)E=0N{n*+^j=?GR#JER$6vfFF#TqJRatzmHV7PrSwDi1Z#wS9aur3Y6om8SYwu^o=}2=}lU;y@WlJDI0qH3w zP72l!K%1Th(34Mk4a9=P$UrbWI-1EI;e&FjE0g<^g5}Y1{hXng%X+ItCn!DzTV3x5 zDrKf{m%rzM5yKp*nSp*mR`C5?lXU~_ow~s_GYMI+ov|k(KfBCMRM!cYahhb{l6RZ% zP(8>BS$41R7Uq?Ty4vN3nvZJp@I!=xGotx?tEo|0y{Vs|bhL7oThHnlYtrtz8&GLP zQIPh;w5C}y>#Q`N4C9;L)cS=J{Iadwx5S+JgL78Q8D<-UnDQ@ItIgax_nIo!4@K_g zGNI#SHN9F>#f^`qLGJ_Vu5Lbj`bGPzV%MJ)m&yB?Nxs@)C*H*F;(Pfg29>;>BZ41J z!QcvT0Cc%0JT}P_gzGm%Q;Zp?fpWdGT`SGP_Qnp~_8yN#ph$OV_c4V5S(yOpm^FiC z^4lXu#FLx_h0>@pf(Rxrs)~ldgloWL-AP+mCD18#ZqIY0C-Ko9G3qpRAYPr^YgVD@ zWkc3hllFA5Rs)vsI}$>Kcp#?n6mpc?rCWelgN8@I`H_$}u3Wkpbq&ISMmQW6Nv*Vi zq87S-TP)Wt9NjG5A@YqRsIaaHspbXkP0Lt|35iHl_sa;jU;n5rNlh4#M!8cH&*I1R z0p(0SagBRLB_fIheOWGk)ABHr(E!MxYKlj+bUhW*&$<}m^LV8rc=PnyJBx0Vr#&v* z*5QuBl=l!%lpXeptfB8ay*bozoa?^f)sD8q0sHEx#3CJ(WkT}7gA21!;?f%_O@3(k z$t=f&+{H^yb92&z*hf{@-mkoS6L#|M%k@G8+nMv>W_X2Nj z{r@Lm{VV=!_Fn(wkF>A|X}>?bzxx~?EIo@U{6t`Lg>fAM9J8(#Ema?NJ5A2dac8lF z*J1#+DAz>OWIzUL=x%&GtyPRx{Pw^T*kt%4SGqk4P)Nk$F@ksp!;sT7+YKNWlsBpz zPJ>EwLA9a>1bU2R;*8~Eq255K2-wFG%0Qxw8`8@$EGRGrXqkub&ynTH>7f-&)0DY7 zR!IbN;-TUTlYcY=lBlH>3vRT66uNR;LtWi?#MPAVn8O@>8fsAwuY-YuPlyin@hGRGbquo)xKnREK$S6h%GHmVkzz4`zO4sR$}&|4@)22u;#Qo6;`MN!56^KXw( zQ(`_Kq`fwx1Py{7sBs(^2jU}|BHYzQXHPQ`cXNW7WO!FtATtcbvUtnUv$ z{LUy#IHU4&PT^|g2q)PHclPm7=*UY9qgOC;gCH;2CmODH*Q{Fp7V27~niitrL(o`! z*;C#VW7KyiKvwKyhTl{tFFafxm%sT&(?;ph$ERN#M)kgBE-SCOi7R_v`V~x9OHTcN zT!H;7{;QvF{XG`x4Vgc26tW+P08sF;DPzG$;r05meXDp1^A7$)%Zi0O6^>yiYZZ=2 z^b$BXAX6K*gTX>9Z$Xl;dsbv;9@WyAT3$3=?rMpVQv))sjcX0NxOpLc6e2Zed4WDQ z|3@mR#sRB_6eWN*)WKSQv+iGykA=yfP`8_vglTU=0x#nGzl#!^1$&;E6iPE%(&l;i zAoQe+i`*1$9RMpj-Z|r?=_N1KYI!%pyQW7Thqra-?gq#Aqr&DgLJVIhfe<0Zxh|~#i6vUHltkUbe&uEiYLQ9Y_jcJ`@ydQ?SG!Gc#kaqQ~mhT z#-I6Hr9N=5i_c??@+eOiPmrE`8yRy^wGDdtQcyr}Xt{B;mwO+#{84YgT;8Z|O#os~ zM<&~u?GlYj8AD8w^!YRhX<;-sXy;936Qvu#Nbq!QD(;Ivl&VVGkLJ1h?1DQc5FHs zbXrGPxS;p#9Tdi~bSG(YD9ieiNWm#l+S}K`FLg(gtxB=BQrgoWt|+QrSg?pVrxcOJ zbilH^8IYLtC8skL*x=E>e~PR6kNL@>GI#;Uny5}^m&h`|wA^L}MG{WiHR$XhY2?c& z{5GE0Yrqj}V;|zr&xy&w?=_X#1kbhHYuP^OGT~wb3VD-P`QUQls|=@`O8WWPOD>BZ zskLJx{t|wY3}s%6-Wevh*3Usnmap}d<=3Yaqp)ksAA0s3mof)}Cy$@2rI-hKpDnmL zKaqC-T;r3llM7k4|K;@OfAK%7inhK_Y-W%DIw8@$zx^FR0(E%g<9~Zp(7Y@Qyw?H#s8%1cw8s^3n#O5E0T?zLN6>i9 z^)A=@fCIvi=9Y(X$n$b|6$&7Zik<%c2crL(Uh za`dw;A{lm&|h%w&I)IKXBf^_gI^(wL|6#WgP%^<51` zh*Wnv3oPPJlIRD7qoaY|4~w^>`$#z$rBd(cY*-tV$opEbyD!!#CM7CAR^7d(tryeY zCoS^MIwk02)u}`i2Mpfa!BKW1)V=4AdCL17LpFD|62 zjeI-msBxKr#7pweiU_M~Y=Swihjpm&gTvGtN7dAEVVsr}O=%AN@_D?ijav8|K0{dz zK-IaDs{VYNP^y4;aezZ9K#+Z!tS|}k*li9c4YoAAFVD$@S~9w$@kmJskV*|M~5LoWY%!Io|Is*uB=6x%gl$6nic6zj+`0NB##@yKnDL9LV>-Iw9V@?~eeG00=4~ zfEv-`oo*QPP2Wd0Ai|*{k}eo*ekO#O%9tJqL8I(sJ*?`tX}?RD7k_ z!<3?=6{KvDI)x}<|M0QrFbbbzxN7*BGxdXZF(xt9P8Wxdm={8E*kvQHQHLYG{_oSL zmYu@Pj`g0u&DtiWgUcgiG`g(}GCfTrH)NzxH+2~;&Y zeX9QcB6BSn_a*l#g%Na1>_Mja(d;G1Yr8;f)&gEin1iFKNeiwVt=$+zcrP4w-Qzoc z%%+4ND6Vz_DkD54svgzWleVFB7j9vvzVrRXo`F8pjN8ZEtSYy`eVlUT)8dg(KOgE) z6c-Gl@SJY!(?bW}Lk?IF;sFT5-Z+ycK|fuc5IF27__=xsPphlZJX|!@wyaVpFgW{U zl4#sbuo8r-BbOLqiX45Gj znqZJOGzr+wD2I~#{C4>H?1tV@D(jlXWQKxF3&dDKWJmLnP@@rN*_*1_^bxi8bcR2l zHC?&km!noFYZZ_EuA#!I&w<$xhRVS~DcPTKd`@hP!&@@YrY!0hIoiO2NGdG$u-`1f zs_4mK#bZ5hq)E7GLV%$To^Lb~)YZVDmhyt4gl=Q~vdY51bz(*MDC6Z4T<)grEvLE5 z4^*x?`)#+u*)O3Yxt%%TU~z(czHmtQh$>Cr+9 z;g+j0ysFY^{b669FZW-y?+yJ^OGftU+jaRB>-RLX#njg>HgFWm1;tgm+UIizu3WMH z>doZvpX8pUm4+Yta=h0E+Qj>^>bc`r!CwNVmEmSb#+Q>Py5A#>myK;)kL7h}z3wka z&bPh zdy#`{=OV+qIfJNiyw82R_Lb+4T%ij4N7!F9N;_N4I8$Xdw^HPICC&6_mU2iihxpwW zBvu&8Nk;X}9;bEeq3cE+d+KUOalPH1K$dh$vdpx{rQ;;)n73y?^BRgm3Od$EGVd0> zNNlXM`GeyL-$_$Tb!IW&%$e?~O$_-j*+sbqAF?-#>g9bV>W`5s7NSaTzQo!K4!;$* zmg5_F{n6p(T(ts1jiej4N@FLjAmPP@4W z8RsA?ZUlPA!RnkoswFaSWud!b8yA}B=L$1$@(Bw2zSG>tOmB?y{(G%Z=dfIOw1`U6 zJDIYJ`f@yUVd7<)d$>+0*12nwIQd&$(Sz5$y?5qH_Q2A4WrwRJ6C3P#YR*8WQo+sz z9Q9ad*Yr_F#@E5ZMU}gc-(N_yC;ruUx*k+^aUXcgpZXT5^|JaDpI0ziK%hH`3e~7Q zG-~lo7MmDb9+MoiNMVmyq(C`g-*+fGUecDu-mM0C+@j)+rF>T<1UDH_KD=!qVBm{! z1H%$3V2rVVgo2WeVKCi@*yihke@KV|2yPI!Xgew1yqSGW?BZB2Z2)yv(o)(eb%qi= zW=Zkjq56WGPwT^_rtsYAFb+v>@ke{Q>KrxF7!>HxF4!yR$%9WmW!r%dy8>@P>DGTd zyMlNo7Nut*-`$MPmXHM;I=YK5T&(Nw85L%c$T0#`g8)x_38BQa_lk^ogVJoUWBqow zsfde{eEsqiw2!TD&xci1I#X+~wwlTYkQ95dQ9cw9z*5ZyQ^sl#*F8O;z#npA^cIx& zy=upjN;i{yji-EW#e1j|6|iFYS=*aWrU6in1}^BiN>VT7adOTFjWy^96>%=zST@Tn z)oC2)H2bMJ)YtOud|+VZ;jmX~+EuIlb>l#TSy`}?+d}-T*`~!bUI_8@j((4BVx!Cn z-2c@D{Hy=nv)h64`v2%}KOxnfe~KUn_1HV2fpIU6NitVgssT@{A+8w#*gNfdbJmh) z%POGw3c17=kk>WIVo_|IURzMrm+*O`I)Z%1gdxW9XDH1vSpKW{BUCSYj94*p?MS)o$<4PL3 zRILkc)r&!}oL*a}eQ6VTmV03GNVENcv**+^7rO-=-0=R#;gGDqAo*nZ z(R1X>h%<>awhzSwfN*ajQio(Yg!!~G7?wG&63Y0^GY}e2`Wy7YK`@vG0oub^awW7$ zkIBuNLlN|$CbY9hWMV(t$LNBO5&-CnsFHX2Pb1G)q_sABjhWm1k>`QJu<>tqHGpg{ zVkaUetXuy6%~#*1-6am!k|swa@QtEYf8V_e-G8VmShD}cP4b|p8FDYMbW3A~%dX4} zzjLlwG-UhJtye7`!M`iF`bE&sGkEeZheu5!nO?S;A^GJqN?*<-#C(3b1Lx)~ulw}b zT+Xtx{eqVxugc}eOxg3pbu&jC^vhHo@V&IcP@mmxlJz8=A|54=1U5q9R1WZY4%l*y zz*d3h@;ywqm5Gl>cMiVf!O;U)MV}g>KOQEQuAHK(BKwNEQ3!&*#A3g@hoLPUsq8{L z4I=}EIKe})KRGn zlupXYZzYH*^oa_C)~OsWuV=QPZDS1aaVw z>WiR1rc<9H$CVeTm{FdKKKs01S2g)VL!Vo;njXr;d2B z$442KVE6;G5paabuZi)37mM5Cart*yp)z3W`1TStMz)T0VTHE=LWILLI2CZ3RXV(l zFB~sc+o4FSUt;wL0hBdKe8b%+_hMuDFn$gNxV<~+VLd+dcNQCII_kd&q&=^y0wy3e z1`q`%AGqp~?lI9unt^z3r2-eKe-F7LIHiuolqwPpfJ^&Z8hw;L{ZUGYuwa{1gZI`o z;vkc(O)Qf~kRPSKTc!dHXt7Z+aAm54CkBbe016ddE?C0WL(v|w++#BX*e6APDH;?Q z*hJ7(I+w%xgFe7m@mj?N7l7zhcET9`0KwF8`smM9hQ2PjEfV`FX|j7yfN)+bygsDf z>KlZoC{S_w_Wd`{8wM3Oax*MjXKM?h_=7Pfx3Jr!OVHxn39HTUfmvQn?6eT?2(sURa*;G?K<{A#x_iQS;bQ#q#B=VJq7_Mng#d&_ zAEQ$}5iiOqoxMJXlCqIT$dZkD^!)*G0sz%nel9H#k5V_a9bAY%un^{O-2##P^p1d4W*OQd9ww59^zg^X zk>FFartO4azcIb1!v(cP?m&2vhTk_Z3Wm1=4(P&(jpfwJYZX*1-^rDM&123V(&M2W z<})67g)` zwN_^+-5rSr)BZM}ei`P>^UD63<_PU8`;%y4`1v2>4XvQM|Ko4)AMp>s0pEgoH%}O` zPaNHR+!SoNN+lv(D$r$HmJa?j>7F90gMV8^$Fy&4O-v^TXtZXOu^mIt9dq9abtTiA z&{$QmD0;c{H?!9hDru;~6gZOG25?m@NT)|SmFFRzZD}f%SRm%W6&IuM{BBgOXPC5l zdan>1z{#SaNMa-uD%KwrjIH-0qF~qEx%;F+_U1IJZ6P>>J6URkX*w;sqQ1x!{`mV(d@x zbsf>Gk&QkP^m-X+=)J}HyNAcYi^9wk!#%uWs*)`K_mR##2WJTuw*voU2ia+Q$esKH z`Z3KmEWOK!Vq14jRq?=jzxZZ_`I$P3>FdK<>8Gs~xUj9M>Furm^`cF+(-2y6oC5Al>xxvLTP2 z3i$Gc_JB*qZpE{tfK>Nkazu*5Rkh`;iZP_aOoKj0WE3q%Nfv;&y?1P*S!gGw0PislZU6c5JOAOKR-Q< zM<8*>jtkrr_^fPec)j6?VRiBjd02hE@q+z*j@?Rw<})p+cFkMdxz*qPud3&Nw*vpl z|2Vl#0N;DX@tnu2@yks*gk2NjI=32l5^w6Q1wX6l55j*w%MGL!!`Ss;5VST55#?z` zDluQvJVMt6C7*UA4AbpD+eONe9Pk4u-l^D2b#r~LMh((3wnf2nD^P))YlJMtdoPQ& zp=bx#GdU0z6^xOE!IU~Qe%xu#A~uF*Eg$)Dhhzl%loNQ7`&zgt+zuOV=t%RMMwa%s z%D>aKy7b8^)F@ z#@1(Ksm!t=K0^Cck=}(>VtEIQMo_Cc(+J~Ch+G^A%W79nK^4^kTbxZSU9471p&Z6y zQpPcbDoz4$!DyrP>$BYn^;_#3+6~X^otHIv=` zv0)FGSYmBMHJ|?@1FM0T?pX>}OM^Q3QdhBHPm_~SC{jEFfQJL{SSa3)mr$<{Rbr^a zw`5@qWR!PKh^M6u`-Ld2d#a{)RU9L2IFL2QNh$%d!Sxj3@ceRqntSkPc2piaxDS7e zFnG2l+L0Vy?58h+?bVwx<;a1Z2l*MGTmxdPLw5TzI7oQG(-nQAf_B8`2Dmh!GuJ%A z@$I^dKX1CJ4R^{LgCL+6lT_eKd7d?JucYPVsC^P=!i7XmyHHe#6pmW%wJ5R+gg*Qd zJo=*H#yxa3DB4}!B!S4?)Q%g!UoHsB4mB+0KB5gfkn`m}`Qz!9RC(SO;ce*aVh^E= zVdoc@0WONnXC-`wFFHmQU7oEE*!fARE_D1&Q2Q$K?40$h0GkHr)?QoiOYzsR+1I!y zVU^FnkKO0$3-nTX`R&!I_|BCJ=Pq#F*qyBEwdg*2zIE~G5!ny&kooNR=wdeb{{flu zul%n#y~A^6M{YpyglqS{UmF0Uj^KgVK+k&SoG!5)Fz{Uwf)@!{(0Yr51;Y(thO+%p zvIt?MtC5ywB!BKfyM_)hA3RhT;STOp=>SD>f(MkLq-VChl0Y$I6v(V~{8`7yQk46y zjpb`%{-Z6f2*Bdaq+Bv#Nt z_{dEMj&>YFe2AOV{cVDqz4Wl;6*;9%Gt_F^$2ajS;r<_@#TB@V2hR9dcLvON=2hi( zIlTYWWMez`_|1FVr;*xY-gD0*B`=A;doSTsp7gZ2dHm`F=ZA_dvwZAyA}?0PqJC9P zMCP#U%_e+;dtO(B+Hi$;AB9PVW$+(S-%O4Ngs3Z}0!h>Q2rOIzFb@*o1-oyLu^CY`j-uf< zpo!>iUjnCVvQ{x|R8Lk-!F}EJZ9+Y+GNQxcVvITnm)xwtK(1Ok-a>N0U4=QdJ2m^r8MlM`r|iuzD2BaP^baVs%Cr*^Lazq%Jft8`#0*#jssl4x0+ zY`1`C+4OBkXL1m?`0IKRllsQ~(*6fkM#t*4SbI{~_A}MJe67A$yBMCwc8~a!S_fSH z$|yUNs5ruvJQq&Ggbg;>k9HYs-OVo?3@~q63b~nZ~85l|K*d*>q=^ zF{X60GV7ne|GQVfzw*Cw?e^DWVMdTmJ=gxd?IPM3noF-|%+-&SW%Ot?Hcm-gaOWmQD~E$2)pR!?7D-Q`_}fw_q=y@!nI zeVY?vEv7yN>K4!66>>!ly-1)%DH;KC?&Wa6~OrMNSj4kVw@n2Pl4jfszEw{zIh{P?l6BLx zTDZ=>9rwt{lPkgE+|y4>XRK|u|4S;|f5blsC$ar~GLmErd~%+?=luboba>x4!Yh9o zr*v^_{r0PZu+Rjr9E|^kR8E+@xG=$<3q6{PgJP-o;GLWo5m7g@-i7um^F7E|fnK$7 z#qf!8dtJO{Hgtb$ziLlYG0{t!CrQq-+uw3A+r-5wiFSAfSG3ChWCYWD{4AsAoTFLV zb`}JS<|c9#znoh({wwrD#1aOlRil)upn#l>gvQIoUrgW}p{aJnUl0F-fM zO%6byD(y)k{*ZL2Wy6|Mi#yVaA=7i&z!Gg>9K}BL1up)G69QiK=GWszGTzEsh8-OK z_VBN*c#I7~n83QGUumur6pze3)9jva_`of@?D^%w2N4Q7A3nEgw(Y&R@j4+aHnf$Q)<4=VbkJf!u&e<9&s9k3>bBH@0Nv?K3JMM-1=#Qrks9JJpYwpkbu}oe@E*Dw89FAQ zdGb{m8_#hX&UcR)vsZ4w&Bw=WVK({p+0TnG) zwdCH?j!t#5IM+SJieQr=?gm1dQo@cksa3Mo7Wf4wbziMv!#o9rkLkXU-mNpes?U2_ z-}@-Mf~OHFEX*#4M2jPk)RL{A13)#I!cX+rG_Z(Y0ut3c;K-%|6ys?fBo*G!kb0SC zq;324+vDewc`OXZzSE>cr}<=S60#EOpQY5k1hPv9;+>KLkI&T_y~wR3gm0WVC&-oA z>>cA-Jsw@XUK|MMa@UbA-h!YrCcypG;;iS{NRA>?5)?TxaMN>JMM|Pkq?G_g^aH z-S2ffy!}m2?hC*4S_RO6N$lqj(LYvFLYC#psoa=UGuZ%CFs!7W$teLg1BBPQ+w$pH z!9MiYZ#RK0Bducz;=VpJSQOBt;voidDW%q{&pdYfb+_3iM4qK36@!Sqs+?)ie*lQa zLJ=NH9_A_CSdp@~jK0lx1p*vt8Lx!IN0Vd$6+M7@8sg!%AgQwv9KO7v`7?ysVjopgF}O=W4H15*s*-ZBgJ0#uYJk5gu>D$tNd*v~5dX##1>4C3k zV(>#A%hVdKy5cB3!q7GS*`;1$R?9+6w~X1^l{4bP`Thz@7i1GI$bv3ZqXB5g z+hV!b$VwGmOXhg1&sm0o|A@wP8TO;8iar<)Gpv_x$xJ}y*SeKSBiZ+3`F#&A?oLP6 z^?kdTR94xM%JU>f+Dj@S_~&1bR#4Rfh!^^01vaO4geF-Eme?8*MFPqQ#6FCXNH}$dws{u z@F`RB;b;^XhCqUfq|mjP9_|OVZxG1uOm`blMeIl68@*GY`lxh-tpzvQdH(g&LpIxh zEjZ@0m?>u(^5Ig{p1zI6rRQcXIHbMCqx|yw%JEflKpS9T51$u5da2+xgLLt3Xtj#r zfkRaB1KoZ*fJr%kB*5-H?k5{+6lZsi_iiD}p^T;f`dn22pk;^pnaOC>@eg79*)$8Kva?+hJWm=7%$-@MF55LY0MqSMA@x zN^)tny=QB#Nd!x@8G0~I2$VA`>Ko(^pb5`1Y9GqRhvY_G$o?pa=;x{G%A#rwy)z(S z)BF837C)spz0=YM=x|5`xAn?0%?Pntrik3ej20F#_~39|S#ZUJ;L9U`{k=Q8NcoC` z(|sDYIx1{$oGsOhz`f=}j$^m34;ZWTdZ_25UjVdl-Sq*H;F zD@|QbU2aAYETSy%h{5A)Q`#}ZClW}S#=)mTcQt871h^9zP%ca_`_2R6C}brGy2`TS znT1l7w6$hhQNcrGx$&FQ|ZrejSXi|<* zw zSr}4A(dKH3efOP2kz~b)Hv3bwnh0FQP5x2LbOtz1_EVpg@vCiKCKJSHYhb-CL3rjXVn~HLBO*T8H z`#*?X{xjoZsLtio1_||)wubZ zp!Hoq!dL{7fJy=*@b!pxLs}CLWUNDpQ2F|OanpOBWDg=?q`6zV2vyie`(RbyFCBeE zDfu>p7?mmQi|6(a9iYZX^U0|szhtPegwSrj~+V&Yy0kz0Oqeau&>rU)kVC7fpH82dpKl%&_< ziOkF`e#3CjLj0MuFLw%9x$kubeie8Q%eH$0cq@{<`Gyi6b}TAbfrIl{Zf}KdbqCxB z=x~+mQe1061>lZ!qbHDCRzL=MUcpKhK`VMe6|2*L^_k`OI`{=XD|mw4Gj}vEv($r5 z*at+&+tlq0l-W2;c>1jDbUhsF$v!m7{QcngC5`iML#dTH&+V!g0+uE(*KR!jF%tNq zxZu^BJ1v=-9gbDk=jOg{63r?vI+qtc{V(J%|JHx)Zq7rGg%Of-ju+DRH?IOHM+OlC z;(AF`Qt7C8?AQ(c$8+uv;SD3$URwGg{RqJx5~IMouds(8>~1_2#bFF!d-1{zKx`(M zDMcHKW8LQdm~*8ev1<&Qh<{G$Hx>fGEil=dW>a%-XA2;_k1^Z;CaK&)BKI@ZKeX~8 zXlvjsY9QvyTu}F4(}G)3gG;=sxjJVeuGYDXZc_tB@_WP{fq=c#F-P9x`bM*dcXU&o&z za=}Wm%3}WhKUZ5LSMWJLDcG6vq~yzSB3agVhE>!UhJG}+R*_%WcMLkjv20zQOL>-v zAUR+J%&U%;`GlIDe{S)s^+D(G(7qs}?{Tj6zSrck-lkEB)%I5b(*z~Ok<*QkONs{x zCg|5TaJBJorIjxO`nJ&3w|ao01fuh6?{1G2GD1Q*-LgvzT%v`{DTY0)%^mRAXY z8DfPT0kw)}<-*l6n3lZO?SZ&~2URwG=uZmznfc?lxcE4OA|!8dp9FZ;AD_MU<=FC| z-AeiEq9T1Em{Tx)AsY7}*233-p>yW6D{AbTCL&XF=Oc2|N2*_DMn$E}E9mX5{bPp5 zAuDnfq#kjkN@t5gPUrKAc_H$ND3FN1-n}kQ5KJ6_t!D=e-%)K~5J|g3aEurrG%2gE z_}cwek93wx`TLde2ZRS)cd-dc_=UUR9to4uJ~4H%q^VJvneF*@udZ9S?l|5Ffd{$$ zS$=ularPU$e$Z))xKjQ702emUe@`oTufSoJi>X*qNCip(0JQ&|_W$$gU-`ere-p|d zY1%JCC>me!C*l?OXO)8Q5q@yZLTQ3{Q$_Xj@?Qua~>k12mj$7A%h>ESA%DV5#%5Qs-fmCpN_47S_T=< z3>6eRq-#<9W#No-N$wn!T<2taGMuGE8cr?PAXj=$m#kI*X&P8tX%92afHd;}hQ>NB zKz>ItoHcw0?B#I}?_n?%`n&1Mn`FwDJGgj1`H#VH0CLp#47lq$&s@91t=p+$5^uw= zmF~=zsZ#0Gq57h2W&^cMFw7OuW`(eps>D>HM+9w!P1shMD-*&X;fy23oBmGYDt>0E zD1z86x-lHDw#5e_>%^dZ6S!c07hnE!`}|K|>k0fO&eiAWU4ztxr=khkiS4DK+RL4H zecry6OAq~)VAUylJ3>)Cdooh>(y_tEy_r8eo!|d+aJ$yM)^l>9rRuqDk~!hwmpmKp zqgzBKBN8=mgx?1?aXNQr;5GnyCfi31#Ndf#Xu();c~3yFhP=C#AP{DD6Pz>fOPr?9EWAUGEnX}` zo0V%U=5X#_?F6gt915?@VgS-PSE+~2Q7CTs9Uv442v}w@xM)y;r~xE!fEmk3%No!C zV%FeDtmx37U{Wx9&S5-n^%-!i#-$kwMc*Z#7J}O0%M1hsASJAxRq*fbaX9LkiL8w; z(_7|IP|I^~pRDEd2(5X{d@LW1HyF)vvI!+`Bl!N!(|;BR)XvgCKFo4tcXtzW(#N;* zV`I3;@OadYm@4(l z)VLqr%>c!zL0wEMXt4{_C>*^CK`9TCFPd}~xg_R!D&grw$s>`puGGqfu8bZ5%h%m@ zABYfta>Vr4%=f%c(;aRE9bG@JZloOPR{3;LdExzV&VfO5fRjG}k(SW|I6iiV<5}+w zBs>GMJ1Se6boce46A~X>iPqK|4g#`I$^Z{XJupdl34qOcc<5j$L;*%IiK=60@-wy{ zKShs>nnECxghAomOs;4UacC@^@D%>3L*snkilwJ1uDhy4xOglNPWTR+usArQOc@nX zcLvV}(oo?;DkZrZW1sl7I1TAIp@QEoBV%J?$?0HVX)iL)) zdm`fgTdb?>LzT5stjALBYw$=KMEYJQ^?r80?(VF$!T$B3DReKvSiUCX?zQ$%CJ@RC zL0f!Wt2Xh{-Q{K=i0X)Ow1;I8jWiq^Q+s)LvzPJ#E1nV@!$Og$jo%Z2Ne>#8=td&_ zC(t4Iss6$ly1^D5qjBUipFXz+E?is=mEp0A%AH)Ud=;7@e3dU53^r+umNiYPw_L9a zBU+ZVm5+V7Vi0`8sx{2j`!I{Qyt%x0OD0k~^pDK1nybJ6-V%ikwKPO?uDtJ}=l@sr z&Ho(z|JJ{7RJ*@|9*c~PeWHg+crpbztbgbZL-mK(B8S#kdJ+~Ad@;k4LKG&Iazr=j zAqSLp*uwS{%JyJ}-~yt0U?j>~TdF7?!)6!wBARcE{`7RRbszF_m&)=m$SIv?%meRH z4zH-M_89aVRVML6O|?+$EGmtwH_^vkSG=X}hHpdTun%u_&f^d6;I~Zng%#-)ov#7? zTLp1}OGfhO@>Wr6Mkj#w*55(R!yx1ONcr`a`@W9pH;5c=WY8=ZUFOlgq1zTG?+7Nx z==dhM*Ye`YRfTN_K=pj9aW_Y)Y!gls znjtrgRCo!m1Uld|QERoT2v=K<0YJ5}4HG7jsYp2V*Z|#B{j^eW!L=9? znzQS}BW!1N1`NQ0L0PKYAlkw>=1JaH-O^fYsZGokG6ulu0!UGXhP-`8O!|pmYfI7i z&2wDx4$)%1QxAI_axy5?IjiWXQ_so4Y_%^zdtPid1TN^5Q`JTZELOiv$rINwToA^B3s` z6!MI~wWcq;BV`(GBm(Cr-lP#vA%}o#HL+iq8wEG_-`{HZ=hvy17x?};XrKGz8QHPF zd+Ekr-hU1{f&a}P;a~Y*C5V$xY@fzkOIjN}&IS@LbJ(b{q_g`09SblCg5IGsVKw!x`ywFy?YeV>s^JH zSgE_`YuyNznK~=92xW#9LrSC7baQUW=O9jsv98hj4j^m}S$Z^z!U60SQWyzSH#;~T z@5x1WiY#tV>NN#`4@Tw3xBwIx$YS%xOlig*A>Cuo*Leug;+%f~A_*^i5ddUI@+x4c zO6gXQWvt^;$(ff`w5Lu%p>$7@#JB&CviFQ?DqOclQz3x>2{oZfH$Z^Ui>RSW2}+kv zLT@4xZ=U|wt*wt*4_nxW zMw*#NL7SipwYHajb9!gVMOza*K!2jHI(RMflP5WY7r9ATQDaFoh!u_Gq!Uk6XX3e_ zKoCCnY2d3r^F79%cU~ZD^)wpC0Y<9XV!wjG=pv#_p{myG;`Sxs(R0Bs)lZ;@1#`sp zQn{@KH!yl-o|gV|FRS^SlKRI*HL5COAdo&FZT5>&b2(8)jPp2q`mh1TdlH#!?NeRy zqmdIZ5EahLswf`IRDJlgi$dLI=fS3#wQre7Nw5uU9@Iw=K~R?X0qVJ&U7^(W8dsxD zCbc9%j&_^HFRkZKLg^x~3^!lJd|oyg5NH?6eCK_4QcW2EITCcl>=dV19fd`_D2Yo4 zv7rs+JSbK$VTe3&C`x>c!pX}4^ibD)D5cTVK2mSFS$BK?dV$O9KW}kM5I7I*d4#6m2>O^gUnA`Zn7y9CB@hQ!a9G&2c=6gQy63K>S3>x+K6&pfdA z!GihM6PrUzc$sU^s+nxC3@c#gFhR5UZC9SZ#Ak!VRFrGtCMS3Xeysf5qndz|_M7jb zuAO7Imp-@U`{!YMOa)fR0Gvp@a32nWj0wPtIqldM?;a0iio$`V05BRyBUAC0(VWS($z4zBH{+-= z-#pL-(B1lHK3Adz(^7qLc-iI4wwQfqU&ygrOuvgCI@x_Le*I)XCK(h$cmL=1wz0@j#s}QyL8LA|1nX_j1LQk$8+!AZSG46$gSkroix97 zAp1P=9)BrT`n{usSibPXAY|k#fOsJHNj4_L{l=Y+H?pc_tEp; ze|~E^5NzLR%5vWq9x5Kar~I?)lY4^pO4I6fa>ifHd;1MTL1F(1?Ehg8_%Hl}89oK;L2AhvP|%TF zEL8j4y2eNxBo^QU2g~5N;@Rr9@x#9R=pg`@C7)U)&rOTp&5Utic9H|bJ-!8wM zqe)G7J+UNTl9^9W5NVGsXzZFdpPJ`44ZkS2;DZJa0Mi@G2?q~S*IZvB{YWE2F%Ssy_*sI9tk~m^yXLjB6|?1S;kjRHg6&$i z?i($kN@AQ-s{oc|f2G+znfFW%B^yDx&n7_hg;;Fi#>-NEc)r_W1TA%6zjPjVuk7Tx z=e%mqaV9#Tu$r9T`n*hpRrFB^M+>mk2VNh4B{Z!Y9{gmAtiBYTUoDqX+S8ZTS3G(i zQ=W1%tUKZXD(1!2@OIp4MgH_x#NLgLCx33ZY>hoR=A`Pq79SDY4d6BnXF8l6ccU!! zuE`Kj#gYc50OJwQV*BW)5Q0Z1Y7XqY$V8StfEpdP{7Oa;kjI7ud~@L&0hur#o;@lY z$Y&*N<9M)bFG&QUVx*!nIR@?H8yP3l)v1=78+u*@!{IT&B?HDp07gC3W&9of{#yUQ zj0=s+_PCB;$r%rxV;}H#`wlecsOOogA&~45fB}~vamF->3*y2mczAB^WZmD>j>RQq zh>GvZYtbBGR|r$i{?*?*^QnkOFT66$uN9xyQ}fa)dlT4iudQ$y8kb+|BvPH&-&lMHvr%|!vE-r zV*~r!j(8J}861TR{}9^8t9MV$)%}&E*PgOC%99nqiDzL2gF{ew=#h~Jk_2S$PlSnp zX=_?dXb+N8 zuLv3S7s9@XWy)REMdp$u$es8_%Ww^T%Q2jajT#u8Wdo3oxyS0n4h!n39@FOoB4@;} zsL$I;y6`HprKlD_=}HlW%$$e-xw#c=ghhZkhsEesA1ez?+NqWr7R|9IwLyPpS2i_0 z;bRegH7nJ2#i|Q5V68%N<6qN@Uw{63f93k%pYKoCZ@LT#Zp$+@H8Y7gFn%l(G|sOF z7#K%$WYZ0&;%tp!xDY7>2i1?+4^A#YAb}9(DHf0<7>NRd^e9XqnunrIjWj}Is8O*! zwhVX!HTy#iLn>}PWl^-SA%>xqh4}zQ|M-QWex^IMW9qP&5+gcI8dHX0I9x6iq%tBs z=)vzWC$BOt84Jcg5S&abp2pB8ktxvLl3@qLDx$qhUsymIVNl1;?~QQtcy@1;C9*hZ z-Y0m@X%_YTNaSm&bqU=hL3o`g$L0F2GG6%b^*MW0)iWbOZ}V;R%zo=kkbl<|j!*os zv%7sEdDu3TSU}EI4w@9sXvE6NnIIG6rNGrH^$Eh20ZqIqRt2h%(T7 zV=`fd1+jU0SV1_j-a2Wlz&hfQ$sEGzr%%-SVac!ihwBSJwfH@5xE? zN8d;n{O(IqOevdP=?No6x;^bN-&4vG^&mk_>nUet>eYq33&XZhjZfpHg)*y%Vy9Ij zO$MrB5V@%@Y*ki-U-3%DXCQtYYFMvK%2VQMq>vEk!_%2aTV!j@i zeYRW0ocrL>OquJieCtObMkDG7Bi_(-=X)6XY~5D0`AzBKDw+_l;A7D%>Fi-14YZj{ zGG_XrN;xiy8h$v-t|i-~yc2BCD{h#_YVkj$DqS^f)aEog*5SS;{Wjw*2g%MZu9}C( zO}aCkHq5c*0C6;DB>^^}$~)~6!A7n?B>!AnV! zyIE!8w>Bo()>KT(w1ciFLxu3~q}k!yd~rUAxOknSct2JNfXPz0!$OYKOJL?41u#6{ zG=`EIEvc}?;j!EjFrlj|U+roXfN1Pwajy#db@1ut#7c)YF6sQ~p6P`=>T0F}-BLQn zjpE!^40&V>epgZv2QvQxQVz~~^0zKKT*H{eK5 zee}*!FRP9D$C2$siDLCz zh=tJg{AFS`fHt8X)u|HBMybJ=JS%3>L1G-89p+BqhV48k9^nPu6&cElle_h1uLlh( zQ?-b(OVi_5xjwjcDNm_0t6S)PYk{wMTHUwT6TV;HIk&ruy8m_)(6>autN!=H(SO!| zT>EPX*Oj<|YMqj#{q-miA~g9MCoqx8T*JoZJFL~zA~+q=@(BolLMjTjU>Q^MY%I3Pb>BgC<2_Y`T6wrQnE1s=&bH?L?ZpkZ!pQQtd%hI)6T!sPX8cUkCl>oGw&$^(4C zWtyBJ?mQ5nEEW|q_gqY0UruTrfEz9c3mi@8O6*0!%L$eq)uKaiXbGb4_fKv? zM)Fnz`K9Wjj&dO_XPUTK<>JO+JZykv!J)8M%{#wTF~yZ*Lb_FyGOO3q;=Bn$=db^K z5@!)u_{b)EC|`e{XeR%-W%Y#hZg|(ribUJyu*&hSm-Er{we_AEDvd8b9X#oN@GI)h zdqwh{y$@_)IwpQ9<696K15l=5t>_kz+Ci;CMqH79ngbFoN-L$-z|b(7Hg$(B2TCOX zm}@3IOaT6*a;%6deNju>^|aWa>J+~U_OYNcLXnqvQk5ueHSR=BRmE{=AQ2G=fL;wL z++!F-PDFSyU|ym6B-CT?Ryi*NevdtU{~sE@wU&V<+z1#L487`c6J4I!&D(tOOliL0 zn544buRG0O6^%?b6%#g`R;0hY(7H?#+<9v#ujX&TA88Q7n?&e>amry)%|*g0#qV<{ey?g|)r|m>>7P z@4Cb3bM}+b^TPtflLq+w)v9ezwbth32lY?d0~SJuZoT;W=uOd2*X_ECU4OMli^L^r z|35ll|HOZg`T*qI71@c=2V$m2^&bql1hj;!oMev?YnsB(L2OvFG&LRcz=Xb8Wdz6u z0q6j;SfXw5-)sQD1^`X~roZl?JE5ZcR628%jN2-c>hYH9>6xv8qiOnR=0)^3Da^r9 z#~#lx%tfnV(NaQi*tQ5w1`U%r3mUYgQKV7_sow^^1DI_k)4s@OeZ#{Z%uQjszKpFz z*xRgp1w+&D(!@Tp^M|W{h5YN?%p=aHyv1_Yo_aRe!gwU-5P~NKEh-l#^_qADy#Qv} zrt94`s0Fllu6`}`FqvzPHK%Lem^sR4OM%@f-~zbMXZaM-Bx{lc})LnV6B&Ehc)g`N=^J}eS22_8wvqyb(Q;t38p1x1o9 zU0@>UBQMv60^^1+BS1hzC9G%}j(OpPCTRk)b2!W|!9ab2s2KL=s`5FOyZytIVO#Md z_GIZd1cVHhsg0L5jr3OZ`RR^08!~MmJ)6wu{eY*G8ev0f97F{JkX-d4krd!Yoq->> zG`v#`*sgHPz3d6wm0~L^>r3MRoX>M^kA~YHKnEn%hk;(NDO_jZb6*v&3$arnC!hWT z?LXl_y$MYJ#H#lphNTe?Ostz5y+a(Ccerf!gjfBk(g#cF#0Ssqbax-@&^aLlbgfj0 zBBAGlZN~GYw)v23aiQ^Wq6)FiVsesb0i?+JKWTh7{4V+&>D z-+Oj_{_>UdpSJ8CJpVZg<6rfE>lNS#{{t^|MSJ$QB0&IT(;>a?mVY5V3{8S$Wp|Yp z5$|MW{%}M?09_QoG@a|Hrj*uZ5G>JJ_;7JN)`=z22bLm^01ioDdR3_R2(mSUyG7zm zWU{ibow6ZH1tWt`J}&W@wF1n~nl0|q%Y^lpxQtFQds;F_4nhX|KMWva7rwcS&7>|9 zAnWVH!i6vn(Y0G*JeK{_Z$$|Y`~V*B297yi{d?+xgur44tL1*i-$MHs zXln;VOgM+V(X8?4@RwB0Y4n+x-#Uki@W`}%ko)CRN6wFL&s!sIo-!w&r03YUd=LJf z5c+5f2`C&lTW8JbtW{(NyTrmKiH6mZP&DrN8`DyI;BU<{iui{iGIm)@qGGbLV1L&2aM* z&yfNqA=HGd#Y@t{P3}MmuV6iKc0@;S!!JfPwq*|cPx9Hrj(n*onQS~4yYiJgbT+Qy zbfwB=kN8b=Ng9aajSG31a&Ka%{Ge&FM%wT6$3m9FF*j?YIC~!{N`FHLM50FAR^_m+ zP}mW&b0AyCf%2_UOZV%hS{1)iYcY&V4>Bq1xYLA8^rEJK=yk!r?>FcU;2kDA+4#Hi zF1bdL3*W!>Il-OeD^p;6?@8D6{s6Ijy3?>KTBNw7J?uu|T#)2c-G}kZ`|XQ9D8$l} zQAL+qm9=pV7hmyq1l-=%Ug^8O#gCb*Y^~RLtTbl(WpDMr+kgM{{VPAW@$1|r_CfGN z*Ma@@NDykW28U7L(N65VgJ$q*ClL+>iMiUDR&nU?7;?e~1+*F{i6X{}Mo^|D=>!?F zB?+SMvDVdQ8z0dsGA$FBJ(WLbTIV-~N`P)Faxz?CzE^tcQ@4K-kG6Al05_71N`sH@zB2?B1-+VYRWO-!{$G)#@h>Cea8H8aO-WNkbZHqh^kfj zVo_D_;}9P)2_FleXm3iE4}#Q5D&*LPuWgd*_>5s|mVCg$_#74{6?RGzkg5u{_{{ng z;6_|bMyy$;+Q_GRar6Lp^Yk*CZfolbeo@0@OQPFO#Kg^TQz#+4qz~P`Vr`c5bPwY~ zm^ubqKo1ZQ1mNg@f`sI*qXgxfdc_QJAjwbS=h-YvgR;?AXG(9Vc6zroOXDwb*WOdB z_~C_ru9QD=>x%kwXO7FC@O*S$@0+(=xW<3%Tq=^L#|%k_yLWv2=<^g2$GbY$UAZ0s z^2pHZ0^z%dTfC^TCT(Q|n}QmVLp8Nx!g5N}dc!u>WwwI=2b^gP#aQha6D$iuy__WX zmjr)Bn#6sEx+WhV3C(7_x2Z)oc6Ps>k~I2fH(chkKtn$CKH! z!W^B7L|znEQ%W}fn=kTT_1|di!?{boaVrm9jrQMm0s%3sp<^n4cyp9Ui^`_(9BxY^ zWwMc{7)eXn7#72p1!IX9;V${`;g^XHjvKG#5-Z_3G}8NtWmuJQ&BQ$Yod1t##n8Vm0C`UQE&K4io@F+umA%p^TQAl72 zPEX=Mu?Rh7a=xuS8zpY7N-VQYpSTvH7riyY%ajOezj`{%xGyXM?5{k`&HqawGR((O zDZj@yyUXRm*A7jeJ10~+btG9%wRG98)UDi=#AIAMk6F6*`M1

%C_5JCQUaf9dmE zs-g9K^{2YGL;k!Hj`w-F^ZV3BYZL_Bd!NLVGDmTF9Jc!0|{gi%ZRs0J%fm2fu5|YN{;sdfeid$8h9~`vX&11Fhcd3 zzTC0FWp1w_gON%WaM|D$!+_Ck;Zwe?#@vF@ax~4HaFj-3Iqxw84>3MvyLWnQKs}zu zv`odVyFEgJ{+((TExV4jG)qo3AohHAbzalsa|VBvye{^IlKo~IM`Ysh&KcE@<4=9+ z4%#Y`HtN!O6mS6H@-=5%c_{sr4O&5=-~{(a4^+%w&9GSytBgUVJMqFrk zSyq~9R4GvU&A-*);=&kKa-x;ier$VQTs}m`&38{{<=;urzwnRQ-aq@vJMQH}SG|>1 zKVXRgf*Qk%73&}FA&(F9hWtg>{L1_!CrSL~ilC$5Efr_NguDYL@S>jD&r8p%V(A(7n6Vr(1gwS5H% zr|U?yasgowUeJi@%dKE{ZW-I!+x(z!!;)-uvv1@Bn&j$yI^Myv8^d=9TQ|0rtW0fu z((Xx}F_QR;H5g%QshYG~sffwq*OSYE~c*y;^U(s%+dij zu6FXFb`A2!%P4|nP&63~*qDVCNYKTy73B6>S~fxB`pS{xHYjVbJ{*k5_$1L|0Mc8t ze9Or-pa&L=w=wMLlca?(7cCJIEC7NSiHH-Sd+@_y>v|>38J9k*l~GG1Q*CFn1V1Pq z^4N$(6Oa!JkuL03`cbSnl*bspy)AQ4+X_lZiFY-2ij6f$ymxuHo1u0w&nE7P2qK^@ zJf~||qu)42RGG|>vIg4^T~_0@@Y31sQeuf8pZ*1DZkFv9A2;yA_o#B(gqESSq5vTz zfRRoAB9OhyHj%&*wy;Q$;|H#x{*jSUeG`b`bUPH zjN%gCjG@`&KVxPrPkiS zWHN&XAbsw>Z2St;J{0sg2&CEz>&mfx%N0Qe|fdKmvq0 z(K=AZyGh~cOAO-^fTT~FBjpm~dg~3tfLH(u9224pV6j#q(AG4;tT5#;u*{A&BWGY( z`O+rg8XrPrD>op{SE$EEY(4}*fqI|P5aP> z$_6qA6@ubpw`A=~@mIy8jzXuf-0|x2@u@5Tqjf_8H zCcK%^iVe+r^-No>nLxt)y;?EGpWWbxrchVf3C~88!n+I{E||l|4rER4iof;U-=B4JPPn zUOfTO->4M4xaIUQ_B1<4Y|v#v`7`0}gJD={^SLfL@ir~~kdD+xcWYZ7d&!O0qhphn zt^7vaC3-INp(1&EkC~`rZRQr_+F%6bGyKOoupzZ%B;U6rS9hgQ*FwL^+YJ5KE}jWS zisIPTr7lY+oUD7g4X2;S$CY~Pw(qoZA9g><-tfKF!8^5?19F#>lggaFa4B7`oMRF~ z7EjguZT&UqnLC@%Ilh%Se92|F$58twh~lFf11{+ztda00q(~Ak1PI8A)l<$ftgTwr z-m!A5BaUQ=ccrf9An?P1_(&6VaYsC9P6b_zuQ|=hOuH!z17Z6WV{+C_xOx}4L*0=6zON)xU+O&zIRJyICLBIUQZahK&?$8JW6WLQf&3aV_&LRNDI}c zVSyk`NP^d}D_>+L@lo!CD%y%9!Gmr>dDl==bOg&NGsftf&S3myUjdJO8;>S|)}W%G zt%o=LL@Gdy=l#ERm@f{f^LiM(Wz?H}AX;gN@r=!;=H{01WSJ0pre?RasUN93Yaz`O zh#;vXww15hd1>RrSQ^j9kL=}}KX1=|Bc$#+@3N1a@0Lm@3sz76Pu;G6Uj+Z+KUO;* z&aR&@s(U0d@DKh&0t-MS+vRN(IAipaJO!t144H&_TJg^YQE(WLYDybv49quNl2Na zo~ss+jt0<^rRZM(6gx?UTb>+!mvqjk9@GB>?NM_fWK-&+yJ8$XaGLz*AkQA;3OJ43 zq#>$dBa)W zvLHGAt>wcNakaN;#N|KoJ>;bm<3DxZ&w6;gZLwfY?)S6;in0}P#}8oYei<{gEklBu zg5vCvY(*-Ha;&0OwicP$aKeXdiQf2;Y_B3a#U`>HL0{usg&(f08;1sq`kCYXEn&TC zu!&3GCZNJgM&ZLy;&Z3TcVWy#Q~Yq8@tX=4G;vrW2fkQQ4VQL$ z^rvcA88{}@OVlyZzT-ffZvgep-+;BFkg#O z&5IeGIdSaW@4R+t(15!^QAvjP{|v6k+T06ptr>gA84H?oFrR-y(d1v8BL;;Xa^|~b zuVjKK3rp8Ek3XtxBoMfeE~GUo{AlCg#iP-DVZSy%H+THrcIO<32)}JE)umJ}9)H`X zcdsgY4@9|j?^9mt_}#HMrR*o;HME9ho>GS?zal;9;oVFAnL2pwpsG^|g9$PQt-eDU zT0F<{CAr7;C3M4%pW|Zp3lNn+KD8i1+P^f@t?}=&x%_ohzFXa%MUZOkpSb}{>G7Jm{l>QE3NK(isDi!U@B3coUthz z#{|+yHChlDC-=2377$Rs#Q(y$zZh}II&JuBnv5>uNbaTyLf{aw`i%4~$2cE&?QuA* z6v9UOaF%2Oay~<3BBh#+(}O|u&GUl19_aY2!iXYzJSS$Dgs#D{gIU1fB#-W6$NeXL zxmH+SooXDlK=h%8sGrV`rdR43|2jLNj+?#9;+sD)ihiswU!zLt)F z&_?F6iax}1D))1p8ZazV;O*V}Wt@r*6{v~`r3P*tRz5=0ad|dz4Xl1ZzOazQF{xN& zi36QF4FFuD!0tF+R{qpFJAOiv6bn|>Q;yA1JBgyA9VnTmmJnMYKGgwsbapRhwpakw z=aI70sNf;sgu#^;fj!$e6-jROX$i5s$e7`mD|W%1C4MUv_faB`%~|qIYBT1QOW^1#F5ErBa@861fmdzLRySPg ziQZ7jqw~GiGnl8hV1f;gVab|uJ|S77eI|JmE}5TM7*v4~E=Nz5yG?IaN#^VG@n#At z(FnYBQa`iFheJH9^5~VD4wL`#o4yIUC&!Kc2?R>u{3+~M$M>I&FnQxu%Z7D?GCpk#nDeX8L{NmPRPJo%( z0KV>F6&mdb`ga2VZ;1Z?cjLeC55l?b9GqP*GRiqA>bP>;52zQzl998~`|2&L0enuC zC#Sp;Eos667U`7~kL!aGUO}iKC0Zo0qd6O6nJPfK92D&$0zt<+>B#K1%4C$XTn5Gq zP^&oL%-+}mOYG$5&0&w7(KR_Y7zDuvFraFX#Np$tus*mdXjGDO=92fz9DJsycY>#$ zb(YMy7TQ#UcVT__dbIS9zxmA6X#v%TS8zvDBbqJ0)eJ zWmfeNMRm6sZI??Ne_%~-4Zr;(IhtJRWIA}?S3;#!;mn!L{yB*gC*W0D$F*Q;S3GLM z&Pa(WTHi6OFamT1f<#-6^koqIVKOZP9$nfcSK%bjCK%4^V4VBqMUj`7`?ElR^nS;GcpoSx$?-s z_6Lw3zTpbr$WA#dR*kYb;m;Aenb$S`ur&#*E%ue&@nT=FT$>F0UyV8ok=<)0(9aC& z0GpIdJdJqT0`6QfFDMkpAEFmyZp@}-{XR8L#OkDS%C&sUvdMcpwD&}r{o!MSx8LQ%c{mJoiyEVY|1}ng%MWiA562$IR*F1yv^GdnvJshD3Pk?(anfpM3+%C zyU$woy4odrRK!iN?pO)q%lC%pf5Mpaq$_maI}+5~#|`G8xO(z{V&Jq^ z9Giiv9)+8T<;IBW(EtZzQA@Y#9RvnM2)D-{xsYJCDh(~Fy7S6j@q&`0C2Wd`097dM z6EC$~%@Qo7-v`qBxo|ze*-jJ5m zmh64&WA}%Zw4o>oiM;E2qvLf8dvjyfJxSst;%E9v*;)!#gXC)B%S*2|?<&XdB?1J3-^|cdk8apH# zChkxOtTH0L!6`(tzje$TCK?FylKySL_msk$8Db}^hYp(xM#Qr^;wG3)WsPJ!BQ)$e z62JmADR4+!yI{CL(jTV}6-fI}MvwQ?Rf4*~GgeVN8Up@Cw`^5Nui#xx59wJ=cQ-s# z`?@crztE3k?Lc?N20=3z@!EM z*xOq!o=EgkVBt+AJ;x*a-~K93(;652!g&oA7cU@AM_v=>D$>RJuZTfTd1g+LJRB#* zAT>a733MX2trQ*I;1SyVls4G)rHBFW{@|EtL)bxQwe^uUYoYM0_OCwsz4j_`qE2_m z)@FqslqBPfw`6#(Y25bZ8nnD|5$F!_EOuYyp zam#)7MM|M|*7h_M3^8jfZEv=<6jf(1W_40faIRKoY_&tr5$%cc%C>{1v@brV9;GHX zM>pbz1wmEerJdaaHzj!ODnmQP_b!*d{{Dmhls(1heSg)lsMmkWFS-G)7bmVX5BMwn zViz0u(Tjgh)YJ@pQW~cz1KToNb^er*b5*BZRBmzV-@gm~S^s5rcYazN;otX>FmRt7 z3E%`$Z+1wv@e$NM@hDuullpEcXl79c^~sAQrhysrj2;`6NO|;B&XSFnVS{177|1$% z=Cg3o;*!!C82`nb?^5m?TWqn%rGU_rY;>jp0%_Yr>-RDizWw9{PFU9F@2GUSV19l3 zX7^F(ci+h4&O2To%@65!r_&0+iH7ZMk10J$YXtVh27kfZal_<+G6i|89!lBW9&RpZ zRFNO^%FLJbiTO7FGizvM3y;ctBEDCp$p^p)(djD#S@kcO>s+78^@4K}rnKmLUe?ECO!j~87-Hp(Edd9SOEt;VLr8#TvtQ@O>O$G`!Ms^;-Gt$vZQX02>5;3GU zei>V=kyM!R12Aj3TH=HSJY}W+8vjADHTLePNm$UEoBMxaYUd@ABPOsnubSycv=Io= z9&B1ZS>6vl=o0Z5ni(nJl(8szob1KQ0`8e7hXZ|}Yad9luqrW5bf&O}6*)t%v=1zh zLt*w+^Q5B;OlMG3gqAdzH`NTT3XocSBB=&QbpwN9QU&FX7<9>|I4c0I2fYM>qzXc$ zP#AA7FI#dvq2_=QFZKYiX2b;p)oS@Yk=J;aw;_?GgL zvF7X2#vdm-hQ*bX%(s^))*{W1cheio#ApvNHkafrs=TV_Q{m0k*6_^h7kH#^-{%kI zxALE&6a@G^3F@tlwAU#i_MdTVD8!~uK8IyC~A<$E4w7GRT zb^p|0Nx|pDl;$y?kr-$Xd~*OJ_t!g}#Wc0b-)q6zs93j=gPHU1xc@&a>7Vr-An(YX@lIB4tj{;c?FgbuZm2{-|$LPrU0r-kh&>myKt|XfLTgc_Tu7@!*|P&?y6! z26>aW*FTxr$DPAmENV03B28Y^qO5jGYQb;em%L7;2Ne&yG@j(KJIih#o9GY`l;!93 z5H)g%zg9B$I>rh=$ZkSfCt@aiiOZ7w?(e1?mT`cWwpIQUt2R|aS*eawfzQBcsdI{WU9pZa`m^31Bt=G&ifQF#y3ojqgT z(^#x{<4ZqoXB5WY!OGl(8Uy3Ex#Nwk5@a+XKI~whmtD637tF;&awsa+V$zh}P9SK) zI)+Y|?Ns6AaFE1WJ`+D0iJ<7owyEqFA}L1-3XK_v0Er_3^t(pnxIB@Y$o> zmR^_Gb`wLM(qv!*C!}-j_2cBQpAS;54_w=9t~8f2(d&NrKwC(d z86DHlGLrirmcYO8@4C0C_+BdR=}BSWyUiK|LPvW1da8x6(Kyx!5o*ox=osSa;{4bM zb^LM-?+w4Wsz4gg+1_(o<8xF8AiI#mOvCeE-myxS;rB}i~yfn`&f;6I@_gDo2cJ8T@x$e751cL zr{8shH`*BBvwHo<%mzzJ#HJX|mr0V){@KRc70+q(Ks&PQ+?0NY;H3`Y+ZLALd)2Fa zA*;^iPY_1S8+UvWDrvTButy7j0M&kt^TE&{B<{K9o$Gh54Cc*~%nx3T?w+Vk|K4X^ z9UYN(^TD}R&w^6|P7uiZdnT3l4}V9mE*kcpd8{(O*(cJmJ007{p#$*>{@L*79zSii zQ0P?f*_3Lv*P3wW;pcZvG}5so4OSl|(=Q%|u7f(vAD)J+gFNL@QED=&I)Rsbj>Jwq zYp)JgP{3Q+%GKh4qIdxH+%|yo!a2{&8QeFM{fSX`ueL$_zH6)U3=bCrOVpi6Oj6>6 zDk7VuiQVBTMMdaA5u|E8)&%B>Et@(0F?>SCn-juxRW^Xha=4o0w(B$Gj@KhJ4&>v+ z7V@J9T?5lZXEZKs5$9HZ4+)e^CZ%pyH?U&EWU{P6`-P2Wo8+ah8}*hCI=7Z8i)BpMY`@>0 z;$BV9#Wg;+U8c%AMd`ZHl$DZQo0!;%k`B!K8#NwNJJ%k?^qB81>8Mr)#Pm3x@kp8C zD#@J*CT1n;&hc}EOLHeFuxV(5YKcI;>3zS{AMopoU}#vSYMo!Z(}^*GU<6;6ZW0`1 zpfalIX0q}jf20_*MGh@&Nyl-R)}TTjnf@mk=>J>)H#pzG;}lN1PK>L3!7%6%4dat|ESF z;n9EbR8Td{TOR>aNql7v0?UNPu)zF}qBM@l`pxO7ojq>Q^vT}fDR;#>?16Orq@Xw{ z6<_vEdQ`8ft?TTIxCHm4FRvg+2b#`Yb&rDUYW0olmF#eMd^JIgo(QHxaa z?DT5N4PgD z!htV*vzz@SyCYi}p=B0#zvgHCh#{+B*c&F^S>n6rk6ojS74auYqgJ?|P zq{^eqRKo0ziKX3G>XF_S%Oi!#rDE~r`(+~*^SrJCyzv`jgf9|q`risoplE&jkPqfM znT@-?&dK$4YJ*fyToT~o^K3J_MQQ(lnDb}FRMYCMadRD^GkHkv9t$B%1igseC9U4k zMQ&wuk$uJOzQa2+Xv-W597_U%DIK}nv zI~FKW37y!-7ph?+9|JbJg`q~xr}X#w?}A1`qdmdP%ipz@`#vpBxTY!8t?n2v3yYi> z`A@L^KlJ8*{rwmIEBDqo1K~y~k6cE^*Rjfrzjlo;p>)zmEwo&I%?HdV2vG)Ad6f*s zF2RMBC&eh3DMMA2QusEB@nPJ5h!OD`mhD<+U>HY84SIEo0E%5^Hx5li0a)!8Q;uJy zC2D|%S)|%27h#}4A#!lA317J%ifW&LQTNPzem{rpH}C)gd>FJf%+Qk%8^`6{lk>%K zoGG5qp0G!`-`yNIll_c2lG5&3ffeu%16G>~3w~t-~ZZ{m7Fo-&D^7Vz0hZlELA@?2{6YZ*LlXV ztT4k$ghBD=o6d>Lw_u{;@FqzpMXy=E;{#bX=o4G9%D~-TCmRQT3Cm=K3VD*PRqvKS zws%ZPROCy0z-j?u*d~wcQP=-Ccf?wsLRIo>veJ4IgdxtHl`5;CLGGR^e+4jC0aYXq zS+dTep-}2DCEmXS+8T#LW~lI^`^Vi-p=eHhZ>|tM=FNFV#Ejwv?+(Zfstsy0Pr~`d zps3xYGbXnu6p&(_%GF0+$@5nURQZ-d@96`r3(2MeRPj!}6vMY>#|x!-?^=ISAJ(wm zFfJ`LDKZMOy8Og(IoEYiFRwsQYV1x__q&Ly+yX zB|>$*0BGd6t{K50piP@QO9J+-BFmMO3YcX%n(PD^y)oW|G6{E?6>lsJvep4sW|tWMJQToF_PV(_-XiK#j3^! zd{@`cKB#CB@0PP?TdmDa#Rz>NHkaN%IOg1Vxl^a%4p5+cPz^Odh(obdLjX!^5%2DUX1iTsQDm1y)gH&@1&kApqG{z za?LRqCydY3uB79+h2i*}@V+$zL)2FTs^3%mhfB8lW?r~}4L}f*SGNZ#C?Sb(nXNOa zeDQQJiLM8b1P{pHa~Qj9)UIg$12)=4Csfp4wRQ#aM?Fz{p|)3fw^g~q&;FVU~wr9((c9WXx8dLWKR|s~;k(p50QBw=Ii4;qa6kHSF!n?Mewk=*C{aS(v1i9x2mr-m zSP(;~6IQf;Zu!t~Bs&49S0QG2Af@ON$d^+8+R$R8gk7mFY>_X~mC%>+YVW^sH z5PS~oTlfqn;TcqTtO!=Whu#EhT8CwJAmlvdz@e+8QjE+*Kd%%jt^xuT9Fr5= zpJ2=|5acTTTd6AUh!k}(<$&IYR%<32&vGV?Xq~V+y$q{NN{S%YI4Dxh43wgSoGuDz z?E45if)9kuyh+S<6F6rA*$LJA_#cnuHv`34f4o+&ar?!ThA#1sZMtjze>{~jm92=D z;yi8M4l6mX7rSh;Y5{;gd={JV0S(Z!T(v!+y{o9mk6&t27qSg!6XK8epG%R9F~bC8 z$ebqt$%jm7>nB5ky!NMl(|_z0_2<$iI?=RT{~=e#5TT} zz{g)pH-Kt+JMnL@sfz%xs+3HDjxxszO}`i4g~Jaa2ir*__N|}9Mfs5ceStcO(kxbj zbTzyG_-C2j?~><=5vA&)y%_ilYHc%M1Ae()0#1=W3simJ+Vdp@6P%a`Xuh^$QgL4R zYFeWI4h!(ct`F)?j;1LjesGKrq@dk(YAG7AVmc68GYfGEPu{lyt9ZSG0f!5+&2#o= zgIW4M=UdOH12}@03}$h7xYW42hQV6=K@W`=?SGLi=>Mpq;kT2;1L$iJYc?L zEtn>$-giWWKP^!#fHKA4V-oDeSYG1lMCZ5^i~C3>7B4&;u%y6J1t2n*10b} z0oms1^Wpp9v+8rQ{fSn)Bf?VO147Oe>jj)|$eL(<8ROHexS>y{mpn& z9B}w@NjK~mVSonOKDzB6D*`tXi6G`s*S=|`>Id(vFlsCtSWp-1pqfK|zmy>AD!i+T zJP5eBsKb|ixeOknp)J5eIMvH2erlOj+&2^-k56_~dVmGMgPK*psDDnW2rg858ey#7 zyh2hE0M#qCq~N5^I^8?SYes$|5-n^h>^>My;+l-3bmnFeL0W_SX0J6MdZx@f<-f;P zPbL$ybq{+dsT%5SJF%*s#QaUzyet!>CP7*^aaQ|TjSK#JY(7UbqnoeswK3ti)cq*` zmhan!7XGtSb5;c{>L*_|sBNs?oG=7FPAB-o4SQ_#fQts<`+$+q}7Z&FJEUr6R6{57282x?@#% zR=zxwk6%e9;-NG{S(vaD=FTs3sk{SYHYuoP=mSY%z;BCx_LhQ7LM(Drcvc8syMh?K zX%GlhKt$=!xf|kFV22oL+lCtP<-JWHkskZM(Dd=F76ZGW$m&7&)zJ4xcsIE)a(t~1 zN2CZiNGfC5jm-woeMm0|+Wur-DMgqLQuViHZO8f!yhPX`T{*1`kz+kOF~K&Mc7#7a zl5!jwMxQn2(dGb=FjA1c{0qjPymboOObT*H-(xl^8fEyw3;GeAGw!}vf6w`e!9nLm zi?qAVdmUeLP1du_BZ$^k;olRKU-!f;&K(0B%}N%1;9Pj)$gM@QHfq_)`Bf)oS;6+R zi<&N>GSTnsm(MKrUte^$z_Dr@t?kDezx+?}*Z<%jp!%7OuR@VZlN0w2NNqDzw2e`1 zOmR12xOAHYR};@VGz+1Z=&0`7YNvzPt6-4Gra7cJ0RRL7Sy?FDB2&qZwP8k+LA5LY zkZd+V$RT1$FbWDK>)}(z%8oV1ZyXb&n*F$A1fWr!d*?Yd$}myAt>dqHEEbCM4DsWA zlh>CCRpkU`IVM3+w7dc6FU0Iz0(u)kb3N{lL#qw`I#`}}u)lx! z`AJ#Cc=)4(&L*px34j(kx9*{@5$(^60Atv>)(c-s?s;9Q5`-9-dP!fMaPYWX9F%hI zt(*T3o%-zTgU4(`7Z=L|bhT1vLysMiZu2(Ge?Pdj5%5bN1BdyfWh&dM6)^UsVeBzM zUr4;N6r&%BRfYplbX5?9b(k&zSi!@cuevS{vT!6|0xu?;E&)%#${(dyL(7ce^+~6C zAF~6L5!!1kUnGjOX;vbjbjz@bmtjdVxyEhXpCKS3go&D0)=y@iCEY&oi-E+fzR?cT z#eF3rF80ML2K!(@mVy^8)zVO3<>wyJNHzt{}tlj1@J`#lb}CMrZIO4(e0S9^c^PUp4`kLjyRs7RGi+Sb0+ z1LG3c$usRH43VV&5De^O*j=zkouEt*7w-YEk9!}rP3;mS`3CHozl~Hpn=sm6tb3(` zxJTGJ``6I8eWCi~3wr_kN5{SMgM&S-trN$)bPwJsx#%TDB~o?&)l2XX{!vTb_+@+A zl={4d{^jKw8GzY$B?A}>f(`T_P_U>^kM1Fx0ioHG_&>s*Q>1k$qRJQ%9rcE<3P9t* zKs$W194s1l3OGCy5dW^lDW|bF#pV;Q0|`D~d^lA4+gGnAT0|EW!+ABubcrQ6w9{|u zVdRTZ>f@z@7&C<1R-Msl9^p=4x57nVVX&-;y>!%^PMh`6xM^-D_Ar)t*mrN^0m*Lz zZpx3iy!>$b@4UXotxp^o3&+zpPy9{!`zT>#5%ldQR?3)B%T-+!TXc)r9kgt&I3<2f zUc0*Nr-EwEj+0;+6D1=fCVZAK(u1vFjeLTG=;cTz!lP=Qq^mF)#Xll1Qj&r*(UD2h z?+zO>yP-LoGO*k=v$~D4HCx7Apgq|Mk*E7%N_WbXGfY%|yE^4yFJ&t!$i6kdG?Pyp z>4515s{2~p{;gpd@z5as!z<^Y&yO;mymEig_-R|Wr#m>`Ezo(}Bji=xGZtXb)N&~5 zL_x9MwMBKHr7urV%SbFpn#Vo_#=_r*%;s62d*J`ZBUR-{-iQO8w^!0Gi$qyzmEcRk zEy-hh{R&X$0f`E@lf$o`aGU~2am!g(JHCcaDRwK$mmIZYzRwOIv{%&TK@89npQm0C zSdzPL{$CR84F+s>h!vPPrr!ZAIRqsrp#bGr<}Khq7G~8tiXnV58mnE^Ee!GU3VWv! zy&O`lvjeiU@uLt1!6tD*BCK~&lSe~2ZN<}=!7b)HSgB2%MQ4=b@7tw^w#4T9*oBwi z$s#)ZIJ7Qu(H#0 z?w4coyMt#XUq~5Wqg*MG__QF*BZuxMDfidqag~taW5=IEqCFG8^4Ll=Nob4(L2x{V zaO{v9FAMU!#(lg=D|xgOt?G2@g4D_IKUZ9gQVk5*@=u7sk?MErFr^jE+@P;P zL6k+;^OL4mzjus7rl;EK&+;K&)pz6F_5F&s_}rHjeENj}gXuPo#DF9O*{>kP|FMfg zdFqZN1SB%LnEy5Y&)vP5Hk_PX0v&DO1Wi2SRHwjw@&2R5n#jB9k@aN9@*-`X#l-b# zc!)%-bO+HEcGe}r!9w_fOk(3`opQ)xit)E~{O>u2+?91n{|?#0?@uh={N;(urCAb^ z152_%5PZ2DQ6Sb_`?cJx+@puYP+x+PH33T_h{1sbZ8)e6=FrT>kpXj4-3gGgR{KC$ zkwY)07Rtv4U$G@9Do>KIG%kpy4&j2-Qw5cAJ;D$ZStym8B_9XKe_le;LbIqF4$MZ&)*2tMm`fR)bS~6w)zWSB+s&nB2iE$wQ#GopW)pt4wdR#Ti zjZ7HAuZjQkPKRpoo-%_WGi}-_cp$$G%~VIK7s9RM^!qFgBa^bA`(h?K^j60QCgA%rB}-J`_mmqW6j6B(aRL6Hx&m}nGA*$> zGKtrWLa)F5*J7Z5uYZvGy^Rk-kraUocV5d*`2E0#@>gIuAMhJ8IZ`#qqplZw1BH!Y zhs~Qwsucu}0jRjlWM3brZ5sY!Hl}E7o z-un(Da@Gvux_oiIb;_?<`cM$k9IMSn-%P`WEtC65{ju4CO2kzvHC(Fmuj26)p1^MZ2nWDs_J&v&q& zJrb9A7y2$*t~%`(Ipi$%OErTho=2s8g-ZFm!4HZ|@IN&lfEjmp(D(TeB`VE+{o{7m zO3+%2|I-TFN`-@>SGHd9i0_?~O9l@hsKW=J4X*cC?a0;#lxXCctst$jvC7K7bVkK| z*z54bvTM&?rMxJ~f1TacZ2F@|I)>0{&b*4?h8_+Pdmhp@`JFm-k_d1duEd00TrMYN z0vzK8t$i3*{sKn7q8p(vSukp^z4?B zDoc*Mn!UW0xWbw8o=WG2@j}dMrb2Ip8^-hxendxI8SqEDR`48ci(RPi3?J zZE-bE-<}?)uOH6r?VAX4h-rj*oT2aA-JruZ;k=-pJb>sCk&~>>>yqNC&;+ws04Z9* z!qMida$kSHrkEo?&K*fRrMnc^{Lbsj%fu&(IUNq_E+%apfTSx#q6a8&)i{nl#(jAl zlmY7XSs{Rxjz$BrlR-OZ4A*s%N%~1#)sc!Esam?BpcxC?xPXrgV*;4_fo&)2Z;Ln+l=ar(IdfU)R(o|S;=0XDs$BN{s7z3}-OP}MmS z-{x}iy4O<`^mw{ZtBP;rm@e-b1wxn4m$MeV=G}^%YKC^fZtZd#F+U;x9bZ*B=Dl1Y z$S!l-jn?F3XZBq3)ch5uf(c&uhJod`|GEwEPyN3?v+=jU`X7NJqZ`1xZ3X}z3_$yn zv9>$GbhU_qM+`{HNeIZPMvt&Xv$XOqAQEtM)8RM8&*r4QZ*Tf3r2F;$Q>D=~MTpX2 z!T_cD%hY`i*tJzAfy94#iUwn-z>tt?Ah?%BgR?IqqI5*e$I>oBLVOF>F2@|DAE`Q@P3h7FnrZ657wSe7%fl9&O^+$@9HgQQN^J z*e6BCM8Cq<9zNyD{B+uNU}Rg3nN3MUE#V3siGqslWeO6)S5?oD1WYV)nLyGbKq;uf zocr#f4WEtIIzKL*DaF#y3bWwza*yA|?Thh(LxHkm8SgM9ag3_W=jz8jzw$@wrQ4*eYQ2 zW_-lf8p`f{@VIcAOo16>G8gTH^_)_~d3Xho#DX9czzo@B8D!O(cLkAGXLmns@++G6 zmO99iS}qiJx@mY&@>33%fntWnn9RNnE<&oJ$iUq>W#lNbi69 zKtM@LR?=cRtt<1z*~D$#L!Z}96esw%&1#&Rx%8UC;7yw5p3wia@=3Q` z=F)|sI&tjD`(pOOQ{phHA2{R zCZYXd{ei&k@QZ4qU)MUme4A(7I9-4KSX?>wd?j5#ihXlIyFs_V)F+Jp{BMwxmfA$D zx{CV~>ui$f+lgLhuI3y33U-`aD!r0vQxbrB&+OoWEAK7%ToFC_0MYT=#@+nf&rrPK z-k|F>b!HM6&jQP3kyJr&^5F*#I;2ZJTk6+c_I)3^s`@hVyp0D9oNUD^n4`$wm)!mY zpSiE*p?i%5@-gp&ec)4IR5HyZDrml3={GIO>nPc~y`d^DqR2q0_BHyr~IFi3``;vk2FfGwBGnZK~{IJNiXu#}0rQIH1s z;DsDCj5aj>kO#k}80)xv zW8_JO90uNrSpsWnn9qU|C8F;8Ke6&z9(RA$sKU}94$HI=qn;N1eR{=3qc6-|ud$Dl zj0`n19F`K;?8WpsmjxB{6dBxXpL|;Pi#uLcN7+4hy6od&rl-Hlfk%kMeZbwYs$<2TBe)V*e^R1x&8wB+4^$%`%@&cfBTIJ&Db5N;Iz2Uvh@6VWDx9ESRmC{%o&)xuJh`|WU-%^n7F1^ zSY7SZY>8e{vKsl=!nl@sXObhzMA}`@wcYAFer?yk0q*H^|BiD*`)%hJt>X_<>8H*e z;V&MW&-SSnG1JakDvWFvP_{jMIK5|PR7uBi0R6i zk84L%m`U3!?l@_b7Q;zr$uDF%`!L3=|Gfb01co_^zaJnuzNG4q%SaOQ@yb^>_iOc2 zR#`CtI0kPj@rb=*o0?LdRs?&A0t#-vcr_`X_eFJDz=;Cn7jy-TkXC5`se0|n@Jig+ zlJIahAdd1ql0L<~8xqn)P8=J@>9PWqzM1#ac|@In{9|(cxsV|fmY)g9l>|Gw@hsr5 z79T|?$^{I+&bQcg;EPmD?kk3Dm}gD+gC=rnm)?E(gt!iVFV8i{uwBl}uFIYg8H)`0 zk|UZGZBJYq6W+Jloo5VAzWo}pp7FAzEuqQwMfE+^2hkr^eXnZFTndT#Y}jr8;>Oz6 ze-#V;Q~$V*EZFJOW{klZZS`BqmwL!2&9-8~4r2`Yis_ z?Rha-t;*$x{k^c*!NTC$hZMj;wJ(YpvB_#`u8!I~KLT5$=S4=uvms$FbD$QHNft4H z>P4PvRMHmOU3lICYv`6nQ5!++uDa?atv#qBAv98kJR$M!lb?60)%Ef4H1k=W ze*6}pJNwM=9NtX4w&_=ByVIA-keTys&hJdUHxl0J2qRaX<+D}o>u%JWdRplJZMX6= ze=d~v@i8Ik=-T#O!-&a`bk0}%M+eOA^VhO!108gFK@5-5#M8F@Cp*yz!!s*>Y#=LD z3125{d?>q3`@EtkU#T+w6Kb(`09!O6Rtwvm{7yRZj{7%-IIs8!E`e((+6?dk_vIID z7%ZVQUj_KF6UgIBBcMv;RY8t$FbPOX5R#P&;*)Si&sF<4EjODmOX;lh5%q3+ub7#> z%Csl4S>?}H&{8`5jSaw0W0tOG%AcO9cRjh2%c1R>*Dqe ztFES^-3yfSV)~CTnp^8fRb`E7poGKpnkX>%4bpZ@Pqau;{uHoR6_Zw_u}#y9m=el{8%4Nf>8aLCZXm{`4^4bmJMT- zd+O)!T!1!8IPhHt1cy%aG`YK3kIjlSI7)YUO@v;ymhBJNtDfkb``e!w@!eWNPtB=~ zs8%`l(#F>5Uuqxz4F&yE|7z@w?^>sM7az6w2)y#60;qPgy^n^%CiJpuN0jE%&WvEj z1Vkr==1`+LifNR=q~0hDNvSMDekc(HQS>8Jes&-jIU3(@18adG6D1Qbd4=BZeJ<<; zR-^v%mk&lZGM^{WNpYv+K;Vf2nG-s0%t@0hog|f1-NVz-mo9glF_AaXpDXxm!+whz zJx6l;i(5H~3C~Gr-+dKX_AI#b&Z$dFHja1gpsDCkHPd#M*LM{UcUSKF!+qsR-26sy zkQPzfdjxjk>5Klq6QEyZq|Yd3mDZ>oaDB~v5)9E!s5~rLx`{I#^1LPGtVOO3`QojS z{WX4&&m6lM^OtsI3>c>aP=#6KE55_goTU&G^}30kVe3z?N%8-!bt7@Jv5Wb9^K$-T z$4=mfUpnN&jv9L6QW>M)zdmY_EINMb_l+cK&(yZ2gY)0fKgM+~jXm-Hy^|K`yu zDC>u-18x>&0S?3)1uCZ2;u4#PLl>-Tg$90gr*&YVu(94X;T z^1091ZCyp|nZ^6^?rZ)JlJoy)iInN&xvD%durp(3*;aOVG{3d@c@r#g)iPJgC-7*^ z+){FI#C%64EKzbwy|DJe~F&o|2 zcgXAnSX?-fh_h-lC_94}0XV?ODEtmjudjmy{h2i}ViMD@Me?tSx9CHJn#AUpB*K8p z>!HCzMP4KhraVC>{2cOj*{mR0>C7#u?06lnb znp=^Etl-Vw>e2yNKCy_X!l^WoqfERd3K|8BI@xg^Z$GZpGse^<)7?ZQOgd6CKGpI^ zkS#=)rl04CT*>PxPwGnc7!g@7Ylyt;S$P*Z=AKC*w9bJKZ;t&^i9O^$Zfe(3Q#Bi7 z=S=ZF!($!&6HQgpLw28u>!%VKF1w{-Xv)}DA+i=}xlpsYsN!Vwq%h@lq4(tEd*i|| zNN2e^b$(Fln(Nt#@<9dEn$)t!+H(){ zq&|6S5HllA(z+P4B=yGsT4|w!nI1?bA#TjLh5GepY?!dW`pN@6IiC&yG;eQ0k#O6H5B4 z@jWvo7Y*d$_5U;~`=|cZxA*5?Ap)i$Mz4~$>05X;V5|e#T`vPsAsx|(SL2D4 zh8YuDRUpS;9DbkaIE`y4^dE^n2Dgf7l+R@xX5cP!S*d-7YB2l8j>y8Z+ zV;v2^D=lrMRknWwhlTp2nCxd57^@CAre`imUrOL@X4l;da>xJs3Kdddq>k z(tWZizgy$ZxHD#U3)B6@Dq88&g|H`BT_N&^yG8QasES*`TDAe1xHLxI-8)O?&ly?a z+Cxs8blSQZ#{PR8ucJD&(>!WnMu$l6^i$P36fJ*PMPa~h1r0;Wv zmx~#=cSJrfAOM)2H7Jrn?Oyhl0N^ezsKL4YsdG>-UTk2pROGrO)bW85r?m|S>M=K) z{?oSgWm`dJObVKQcD%jSNFscZjOqz~< z>|)SI)ik^bN~=_7V8tWL>{+x1j!!nZBZh9dB3tY06(v1SYptS&O1Y#eDNV5#UBup9 zhS{2^;63?FNxjmJww~)bX^nxVZbf5H(pmO+<2pHpLpfH7{Hhwpp1AJ~YE}qK#V9IU z$C4Ux`j&V*FV62u{)057-+7Okuqq`t^9z4@!@uwci1NB`_2CnpjEg4roNgI?{G$fE z=$(4ANgb?t>FPvJg2}hpr)?!g1|NTKVRNG2o<1D%_0)%n%&g>-LyfAhua4EJ&V1dd z-qHxYzp(}RuQccXctZSB|L@)YZT@xKy6>JR`-6`@UJKbq%>o_%2pl-b+8`WMo|SkJ zjz55JQ^uSlE=*`+d?Lw3yvmLWAU?Co*U}m>C*xbVoZfvt8LM7Oh{QY=1RsN>*+}TG zBPy1d;k;s(@CPSxAcdR4i4a%Keb+x?*>Hc^AZ5Bx!s<6`nVSGXO&uz&S1JN>Z`QkY zL2nAOL)9-;{?>19=mC1FKdU!xeD)=L%ew_>Vi{z&>C<)7rkshN{NlG+L|ogUOOGQ-MB~PYt~sC9e#de4po1J{QwyhgX6=g4L!0n#)s=~h56>-U8lSC-6sW8>c6(Zx z;hN!`W_8ZqKr4Hl|H>=oeWc*lfgd57Rdwi^reo@9qnH?JzMO9-8g2!scy4Sbc`Bd5 zm=E2SRML6eSng)1R+Qy>DM0PXjW)y(`NE^T$BnQ$qG40<`;&SWfeP&u=l9uyV!i#P@egV7Z5k-rF_qkPS4icoto&^UCPTcLr@XSkx z%N+`D(|;%Ro!PKm)bBklHCkIAso>bdM;Ucw3gOv6g@w~#C_X){5)=*etjw0v{BpPB z*%E8)kFMCZP0=2NmY_LdPp)X(2<=%i>oR*WTD@;uZol@Ltt5hq(A6yJUU8p`--|tP z+uFtEh+OM_$4{ReM`@V#_rI=2cer2ye~q7daH}$uqo^x9r}KL;)9l)jh2XvNQ*UEi z=sWKNWb;REj2WmKrelM(CA;@_i;TtC{;vNM5&G}-4^-25yK5UwkJmGNUAMhy4&>$t z_#f2C_HKLd(YL0}*VD7@l7ndFi&bbW!M?)uD1IsI;f69Be6i#3r&T2aA!s05NQU?D zZi^~$PerdxI1fw*>NyM$taPT^4kziBgOU470#ytUqHko{C7luW0g_gtKKhVE8G0X- zqq!m?l^T$#Qw8!{z`d+*Zo=f!1Za4;kf##0GWlh|ZR)o4Y1XE~8u^pp#MJz_K8+n!RE zt>g{*cF#S(G`v}AftYg|?hs42419yNlX|Nr?Ed&^XOo$gu7j%T38L%c;jOy&q+;yMHA?z+Jb?I8zA)R}Q*yS@%(MVBrUk zQCfw&ebSgDW(CGv#;8{aje^62e4xAqFJosXafH8$=n3#=+EL@GXgC$yj&O~MRZ>E9 zxs5LW5Nka%!%ySN`s!o4`% zncsezq*6bP8bF`-C^+%^K^&*Y41kW-#RUZmvLjE(2ReD$VpRgG>`)&)qdhMXpZ&P- z_N%t|XL(H{C*egun;84`MneDgF$zU4ro^QEIJO53b%WG0@B)hbZ?VZ|D+x?a+^9Q? zCF~yu7z3ghLAkxz@iReV5Ug${7+4v*Gx&7RH0R-Rp0}WN|7K9-)gOqXGb1L#0jR{r zdx9R)_J4{dlfQQWPv<)P@VUPI*PR4sgZpnn3ViRMmU}wx96hl;_qcf6g)le&hWq~n zbpD4E;Gh0~(0c8A=sO;p3r9SFfBD1!4ILWQDTTUUN@DI`KiXd;aRN2(QYfNO5Q8p& zk&rknU%D2@D#cW{ua}4FIweIP9OO{1`Z!kcNYN&3z@7eW<$gu>wX0=<+1k7czpT#a za=Z*4HH&K=ueCI)Pw~a0bx-+DcV8QoyHb8K}ljN>g_a&7+H7L)sJ@L5D^;cn}%mh`tlj{dW`ovs5nWY)@urA|C>53LIIzY+2# zBClwo`mZy(FU|zrzN`7P>s#ZS+y{L8-j_pDMa%#I!(k(PMdfy6->{s1^ael5049jM zv>|U!89sp12>?v-4+CQOO6dpyOS-7J)ECLVnWQ|iFV1uK?J8`TqR1P>4Us8+)7gQ?!?C4{FS;qe%;Mm~JR zd{19okcYQR9gP#cOw;d!VEL5l`oX;wVn`Nmf*TT7wEEMhrw>@360{`lMtc5}7ocLg z8FO#EmR?EW1GxwuX#sccyNh()|~5xI0L=z@BdJlHeLJ($wscal4SwftkgepCZWUB4MKp0KOrQKFj< zKI&~#`6X(DoX?&+Bil8UfA9|WGCDF<+Opd3k5<)35e_{dDlTaLFQx z9j&%!+_w*q77xyC=1Y)i_h;6)Jc(cu+rF4(u4q(A zW$31}AQ%l`9r!HFV-YI|v3z3FU!36(82`ebliYf0CK7q z7`|-Y>hoCIg$G>)x~l1dVcswIE;YxRr>mzmXHN6A z-+NT`;`WOa+jri?5^L+FS`dbJB0&#PM4f0*m z$O@045KYpxD@8%PW{kXQNS2unj~Ns+33=x#P zNV^r(8ZIjM{#ov!f=p2iaTdglP(f8w6_i8AuTpRvgD#3446YyoVhaEPbfO+vPSDVS zX@I2?BQuOjIo9!U07QJTP_=`+aA7h6-W^vx#!WC5q{gsPlQNq#)KP`UZSU{M z6rhC_5>*`@-9A&N^lfwLsmlFx62P^drnI5ooHgwWg!6MhW{zbsua~6#4h;3Ux27B7 zu73yH7CkB6yu4!4LWqW{FLPv2t9#*=Lv0^Cd+R=Q_!8#eo)@Qhi)%57pQ*3CB|mom?B@&q@Wuf z)jb5{-4DOonly#Pe2AFr1FaHQ$DE2boBw0TH~(0emr zY3)~O7jxZix7Jo3LX}=wZ&A{A+<9}VFE7Z@B|=wegzjTbIws6>PL)^jc~Z=ekg&Vq zl!5J^k^DDa#;4@m{CEV{P*ylTtmM})el@cG&q}+rZt>Qy--k{s*5f$<*U#D{>wQN& zc_cyz5XXty@_B#nVX&R!byrD>&Gh1y;?J^23WW%enJfUpiU~9KEn5p<*rwS0NENPQ6R3iVn?l)n0YP6P}H=EXy` z{g|8&Wp@~tT+xfkR=(T8`aLMli#w)5eT83*H4~RrhCqGTU;}f3!Iq`oO24})TP7bR z)g^6Ze$LlN{aPP_9kecZvJ!tP!@lv^1(^^2e-Z0E-c8+VlKJt{?e*`Ce{l);_x=xP zAg@E3HlIz-7`25iu}z~7vz_c@=?63VJL}J!sB+NEW?E^g z`wb%55`!Z9mQQqgO%KR-e#Rz&CD=cXu> zQamFPzBHMP_e(>~%opy{gEO7g!WAqkR?Gx+mv-(HwHJT8r80?3P&E-liWs;E3xNQ9 zJXA#j0HiOEFBNNqg59^z+6A zpTWWt_!w>T%EpsG2(hx$st$`m@hijb=w@j!ZU#=jRtA>gbBzMR=`;}lF-vK}9CbJZ z8AJ1-n>RC2o&O=2WR4m z+mFKWUcyWA5MUE~w~v&lfx^efvV@l*7671t5Ev~;&n)0BC!x1QO3O*Se!k*-mb^#e zQrR~7txALi-~+;is5@69M(6NeA&&;yLF9)GebO1k(-(RlA- z+}RhIifn_P*}2p_S#%4vapBXMgjq zPh@j=gpcDfV6;Yle%811S-JPpDw*ABNjv0d)#}qc5Y-Uh{?vX6zL0FFNO8NF6S#SV znk8x)D+A!gU39i9N-n|c@Vxx!6m>!FOidyWDZf-<=}q!iCr6c#?Pt_Qr^LXHC!NZG zu-Eq{EA3rf&Rq@HjQLE1UpF8>iMukC%9)sBKSC*?W%VI0VlgV z?hXCY{Oo=_PU1LLzb~Wzn=&scOPpL9r9g6F#%6gd#w&~*B-fevWZ<#;F+%2mh%^;E zus4}crvSxWO5*HCNPF2j6=iFlG30mJ`&|3HFa#|!05kWXSncZvlpOtTjjM%TU7JeQ zVV~|wfA4wIAp{ZKkRkOkI2;u4cmCJm!(JB+N(;h}^7-1Aw{$1zFccxk=L;{WPN-wB>bX1B$f5y`)AwC6Cm~5{MC{)FO@t__d{;Jnjjrrb z>RO)#OTqg+O4^Ir_c;J)3jpL3fl|KsUY2&IpaeF1{WVn1Cf*T|=RcGU(Y!sTBf;H< zCkue^Wt3VIvN?2<-t!Rrb>WcQkQ)9gnBPsxxE+jk7JheHD`+zmp%qQ1^Mh5A}uDflocAn@*W zpzPOILH||u@So-X&-@qExcvMtq0?%U1BM6Ix7W|%b0Dkqh+Pq48<6Zz{AC&mc;lv9 zBb*~k%fd^-_j!L)JOUI6;A+beM8t^a9eTy&b0wbPeRfAu1yivC1XK(8@d0rrsEGrp z;Bx>3mH3h$wW7Gqqh;j@C~{DP6f2jddfeIa^x!ZY&s;JCh{;<(ktkO(n+GbGR*Y0t zl@PUQH@Cnym)Vgh8EK?XzKT*$Ek-RR&|Ii}E?iv=ZQ~zvPj1ihu%4cWO~js%;N6!H zNlM2}Kv=Cl#t%}x2ghH#=>dc*wOe*`BSWj+B%u_pN$MU-0EKWSNl4j=RQ2 zRVMTy4ua|@R%-E0JhN`B8Kcpe^;M?}9>R3~X)x4T(;kA6*;DkW*pXw&j|#qV?`e>%ln^Dxj;^^xR63Qkw?1D`P2aa)20$!H%WC?De9y_j_FiOi=KFAf{jf zDUh_M&qRBq!IUG6HX@SQXXFcV#6$_e925W)4^QC*AecH`8%L8sO0cv9{v_yfyo%r@ zamCsZnt`LSa;jZ>oaGz;BU9~heibkYo}+}cDTDsyn{QhLxiW+VVnmBoe_bdFh$xO2 zRzC;d%8h^9*df<$W_Nr&w!SdQPKUzAt%zh>^+YPgwH7mPuhP) zt?Ad!G!K<|k@|A8SA3w|%0U3Li;18S)dK%EiZWotBYNBFn&BzvKqS}ji>c?Igy8Wi zr)8Q9-*{z8@Ei4OR?F`zE6=Hfr{0aabIIj}`UTaSc?XVprJ%0f2|FdH7j1D`=Hq9r zH)^NdTn7-h{9i5hS9^P1>-rJGMLGQoLEJz5Ps5kB-$JKVhQ{xCL-+eX-Wor92rNeE z=kvwlujqJYC=#Bw6NZ5Hemn@C0We)t$3I6;7k>y;R(CDy05>Zhl2O9HPY!?+EI4J= z_A)U;BvS{73JDb_CgFH$H}+)!S}%Z8mKp%M9DDlFt7 z2x$ILk_ecNCncw38=C;)qT?`Ht&c0}od-UM!0kSDl38{k6J#;KQ|V>jv6f`25w{kr zY&8CbS*wzwoj^O*UMq)s5P#e1YeDu%%Fir8bLqpR4ceUIvNOboQ^YU96k}a2&tH&i z403L+*jA6z>uIKEyIM~B3L3U{N;SUCt3C|2-J%1LwN{=|NYrlQ>rh~oZl3QVc()$~24jLbihThn${@`z6o6E0NyF2l$Yw?}` z;eI^zIUIs?%6Ll zFPU>b%%}1HbNt`;jWNhU3vdBqFv#v@7|*m~hUjPqf!H%KK0=@+a;v!DAqcu!;_i6r zpAH_t_Kz7z>_jv;79c0c38{9v$`>&gCzFxh=8~njjtd*t9n>$xT`;Q<@T>rE7mvCU zLVB?baqcYQCgZQN2|y&nu$!AWa^yjxuxmmQ1E9G)6PziB9s)7~NMe9|3i6aHSbQ4U zPIx|pYN@HZM`Gp2tU$IT_W~a+G`=svyb2buYv1G>TzQes&NxD%!$RqwvV9B6Tn|it z4pL1SUS=5g$hL(-Hn+;1oQ(moG^bEq?RPE=88mK*&0TM3F52uufG|iQS+%Bi+6V8n zx%!*o))C2=wYo^NGEtTU-iK>Nx-g_~KK|KE@X6FGx&YmewqJd=0DO=Z+1@plWQhpi02i^_wDgp;_F)mLaW z<<+`OK_$nUTA)nQ6xZOQp{LlTQ95HL_#@d}HM^nV=6P6Da(z~=_!FX>1 zPs^`8FvkIzZ#A6%j|Jr$q(@!f3+6(T%`~j=Uhj^YZXb=2B7NlD5=Li-3trWq z?cN4`Z*R)`^QA9!6UhhI!T`LW!_2J};Si1f|#`ATau%OHfqNGO{kDI98S@2K-KgU7LB)Tj#ep? zIdu3c zuSA+0D$e42+jC5hbCB%2wSKQ0L*O;{NdfyYHCv1I!KYn)TYc474|<$~@p~|~?<+FU z%L4k-ru+U`!0m2ZYAU|y&Vzqd@Al4Vg~BS06Kdbt(OTbU&qi6Hf;_VbbKXPL;!NUxfz@&j(T$L_ zC#*@n+{rDh;SO@`LMOZI@N-{7{kI9a+E~Uj~K{(u_TqRgd1XEukVoZM{~~s@($Kk z-E)#GDOmY9q(S-i@W3jKI0JLDwa#xRnr{||k_LiBRm+u4mx2k-ZPb}uA3XO4E=McG zW94Z|vsF(iWv}6C9#_9EUkcDW(7v7!a2$f}&bFX!Wz%)sD)3#XZoe^R^>w3NKUFM*VjWTsB`P=d9cm!fe=_-Ns zHdj;QsVNgAy=f@>8kpfVEV|eAtFz)KFG&c(UDIl6^1OB-fiJ{isb}iSHa1{8L+$Ba z_WrTjIc5u0E3v56^Xp!T#+b(7vLdT_pm zdux5?vcKT5P)T_D?c06U7WMsOIm+*5{N7q%wCFt4$<=ypj$tq!ZXrMkTMq(`lCY1KWNsGX{5H0$(^9 z+#x9E`?QrrqF&A^ozd?5MI1s+zqXbtX7cfrSrN4pA8I{*gbNj~e7a}orjpsDxe$ppph*rA>N6CK_#IJ7#Vq%6 z+#c3|BcF-qD9(91T5_BIEPdJ;Nk6sNe>Z;hqUq71@7wr_z}m={@nQxVPs1Z~|0Mb!xfr+L>rohWvof@bz2IQdCjpx}%jT@-IwpSSu{z zR89>i-&(}G3Q5g6esIeMl{Hn@Q+8-oUclsUXhwnQWe}8Z@`aAwS;CBK}GTHUZWs12`$OISm zvVy(J^_b8nW1JiCy{1#w4VE)v_VNwo^>ma&NTV}Y`^g1SuL%Be;kNXv^)9Ns+X%73 zMAWY%oBa8BQ3-{h8n`x~TGpuwjdP(p_v>WV(%(vT7gux>^N4poWhQsk7@Vta26CMkj+&r=XGy;8PQ8#X<^u zhp>Fb50e=<*dINlf@mqA4_E$&4a{ko1>zQf>o{R%67WKos=eD_ab7tbb=F$ zLcqgDofM>EcWN+bJ59yMkf=zl8y42)Z7MNr2L!Z7TM@r%dY%r>CQ$$dwP3VFfV7R+ ze3dJy7IzkqKZPzvtR4sVasQm7p84(W2Q7FZT$=B)*1c4je%4MC&V8>@bJqQ6wI1>P z1pka9uWe~&k@@^=gel^UK3my1St?$B)anZF+~VDJ`ECQ)$wMpma$w67-;J9E4+9M4)FIs2F5W&#SxXl z)pA0@p6xn^m&&@l>tZ>hlO?Ba)Sd13yu*h>PFAz6%lz4r`KV3Sui7-RY(4i4)W<6X zWU&zo)YDaQ|$&+6!9h$yI20y3c$wT3Q4Ree{(@DHe~NoxEnMzOw$=ED|8 z70o+_EJlBB)o)PLyUriEv>U2-wo%4nBRm3oKAVO+9P#Y+c~=eSJ!D#&KrJi(47w&(1H7a8|M&((+mbM50C)4rLpkQ<&Z z{>-h19vw0_>2J;zIKIIMmQ9!!vP8dMwrR}TOw??Y`y$FnSU>0y59LhM-|cj2a=XkH zGdB3$?D;|JTJku$SL|h4q28NGnDB9*5UuaO=z9IvDd3;@|N3#CK1eIBUYU#P^NypW z0gv=O=eJTr4(&a`w1`%!1(VPo#5dUmhT})F`>1i>Aam7>f$x%PVZ-=xbWfsVy)a)D z2t(3MAEL zdT^a2QFTl+u@Egb1JK5q{C&pHF2Pub?CKkXaU}Q}lze#o=ysNOQxQ-HI$x2C=yqi< zTDz{D-~W792-P_rX4!uHrhT95dclL9l*YAd#;TODd)M6LSPDm`nku{R@18m}CXIND z@E263irL&%h3mZ`ZDZqEBUo3PxWqc3N?e?|9Si1uWEtu#ILor6Mdx=4^A})U+J%Y3*gfUixfBw?T0fP^Wx`Ud5}Nxgh7^v~Iv_e~;c+b0p!tE%-#L9({S>wHpd{|w zp8oDDTipQGsqI+E!pd1!OH)X*)_tSz((x)^6{T$4!@tMC8e78%{PW#!NAl=z)15zV zPOutiC+!x|_rjbV^*Cl(sAbSNoosvfKT!|USb*qKEPO&dhwd(I(F&eTh3JD zlM|;mcnzY;M_5zg(tM5%NwQSQk@#!I% z!}F6;ehuxLN2gUzQa{twI3IFpe}g#*?~V+`4^4Ar4W+Vk&oXY+D1mrs$=80#Ud3kl z?uM`-v-7j6@wxs0gukOf`L^pP8;uhc@IfRQCax>o%v@&ir@GEH(dF!uw4T!yKdSg@ zk*@H+sALX01jV;3-=?s|v>-j(`=1Lvv_02Xc0$&Cj%MJ;gKgh;7h`1)aW_N?kQVDR zHSq9w(E3L8cY67D)4~g`KAE03XKCRsHL_uwO7cBQiORH5AN}Uz^N%!Fr+oh2^-G(+ z$_6>J^xQ$`(hFR7Lx(Z;;x_7?Tz0ULA1eZ~7CCtf(R$c0`NCS~QlCm_i8r)M7@AH| zbmux*)HA?yr$k?Zzg!tc1dyBp@xvqbCo~d^eJIHpc%Y>370U-GXA>Y82{KA?8beW) z-smxmnSn(~nK~W>$vT*wV}H+Nbrz5bOX{Ekg@N$$F<(+-$7l#NmY_TCCZ;;<;02y8GON%g@2Z@##`Ws0Y_@0$$LE_P ztdBHpm`us;Sd}$}N^P*5$tE!Sn)H2~St$;|W*Jf|WE-71=;y|X+uTzTH%l9sgvH1G z@zU27EwxNt%dATC|#Yajz zN%5U_X;T;ZCoet1?#$8x$-<*QDn=2}Z`_jC`-(e8{(QR`He&5+QKlmxb1jgWp~K1V zSNgcMfh4DS_ssWySq}OK{+opM_auTe;t0x|Q5%i{pi~y1$wchpjPl(wqOvj=4nj>F z*UtP1VquBW(lV!E=aciFOte6p<^GUHPn&Y%Mu;!tqg!HhGC@2>bZAmamsKba8tAB^ z!n0Snee;?Y#6y@83V^r*2^v&DMhpyW*Wsv_+?aG%L2QnZz3K;o)q#h4YC}LUg_h24 z{Apor?JW6N?n(E-8}WjTh`T=+LT6XxoLrFKK9Mz!{yY@F6LJ+hQHZ#)zwNUcBb4G0 zV&N@sEA;J?(v_V7$d^Uv;ga;jRt}mgEpYjWE;8A^;Um0<@lp-MNLJpjc}xb;-^IpJ zf*+K~_Up_$r?ySvn<(o8H8(V)by+NOSk%V&d>w}$JDT19nlrw(;NmpbeJklWeO_3r zA2-MmvwvWtOe*qV8)AC`cM=V7lgrQ)p-7WXeF9~#xSjGDImXjk{gU+7E7Ig`yx|yi z*U^X%S5q5XVe-Hn>ys}Nt-erBqV;c|66;sgVD@|T20_GUuQK*ZrgiwzKB%T2%7I_8 z7{K8~7i4N0h}IFAk&WSV=EwX%DKCNvj_lh6Jd4nRb`&7cKOreG*`Gt68331tK@tJ6 z*1`;?i699$-%uVOg#VYSs#!d@>H=Z@1|t;(SPHm6wz)8m9zmI!b*QNGJ+rSTzuPHr z_x$(W35^y?vr@^g??hBdHNv^$J_%ssb9Linv)t1m>)Z+4ZYpV#&Giksu15u~9q82E z>o&vGRn|5I3PD%)M{q0GpUL%JZ#z_mDFN{$1W#ZRgwU@>!vkr{?SWgt-5BDX4 zcr|AF(X{KEgCMXoKy+FuAdTv(0RpVHpk;!oiuV zQ_E1x{t6z72k;F^w;TyT3HYAVbqrkBS)GJ%&54l>BBWnMr#$T}(EtrtN*1Bxd_WSB zzuStf)V2HEK68Af#bvdtP@m9q;V{r%t>AS&wqtw3bw}@^XUecP#EqUtUUlE`w*KV- znO6ToB&Xx(x1Ix7Q1o||T#}(%xpLG#g`bl-%fh{9XTMGIO(g#veM0$l@}!nOM}_Ve z(2Z(k!77%&JM#LTK+z$)1p5Qg7rZIb!S^E8R=LrL_lif7#TxU2)6uMj_p`~q7zQu9 z)PIwkpQYvpT?tvY0JK5^OWBq}`eSXJtoQ_Cz`3#Z=`X7oYxQ?*DY7>QI}@I=X-SB9 zx2%JHsT1e(MHmyxsCxrHh<$&0;rQy^gLV0gFTe8pr~gVVoxBSgDQ=kjO3M8H{}&bT zKkzR?ws&kCq@C6`=s@)!{ts0P6z!~%{5+K+(M6JDs_HuzLP#&FZ3^$SfUZD zECRELH`$pGY}xN+5(E$#tq@m1?7lR1I1nu^3RULJIM?sFH-qMEMc>6>)<)f4i8KQQ z$s+7xF0qoP&&b`Q6SWl&qy9u%#de+fTLof7H_|(?WQ5#-2BB#_0a0337>kz4E6(n? zt%UNLw=`ZvD@IXHe7RIoTP@nkStn|BfQnYEyEJRzjcZ^Z#Yu@?X`Ma4;$2G|Uo*QR z<+GH@y~{(Yx z^y5BMEPm^y>q0IWn-Wk}e5A1ryfCOlA5S7WR|dHLk7I z_ltRPxn?p|ebJwVfDO99aXDmJu4 z?Fajxbc?S$n8((8sk1++N&BD-(EaYltDEP|=9)A2pslA-aaEbQ&$MJiR9;u0Z0KK% zJN(zd_y4{BD}OweIIFQxQM{Y>*=H4yO4cHks?z2PzE_rM04y5;Z%VeOrhT&_TC1uX zPy=qHev*)dK;P+Y`_N76J(mC3vD#wpA&##*qfR0MV7k&kOpY8P-%+wKRi}K6AyZ^% zL7Tn2a#K~sApni=U<#bK=QVg}LwtK96-7^kNylpdDCsz9MD#x2JtdFfc0c%hqUVX$ zwYMIF+u-PIbDa^h2naCw=X%BHxBRg}-G{K!?xF50w|s;g)in}ly}-I6&VrkJ4eiEW zHw611T^pmpy`a;+IwuI^wiO+H;4hfER&dfCvVLI4xr<~CepOYMnrK-1-TkwMREsb7 zy!oBc?)z2)hGjUB<^{((>6i9i-Ja@WZ)GzGSIq&r)0&XaA+U~J@5R52@nxUxA&H}P zeY%Er-oH8;Q8{CZ`T-Fi9=e=JL$3%n@Hv#_gjbh!eTy4s*X#70&;OI`7N5Zmu8z;xiO& z6jp1M2L~Tp>;<~eeoAjHm2F#M9~k^|-)3i_aeJsjb?Ub@K~2;ODtou!ZI*iFYsbBo zu~kC(B0rndTqQ4*lF89GFG2(qm;e>XE+Wg{m1oW#+eP?<2g-(2vfbs5;~h#Q^*TB` zKaz13e;SVM3s)slEaDLj3#hOO9GhjL&yd7`nw{k@x}s{4~j~CLXS` z5JQxLMDY7771^sAt(wUHsVaezWbm@QXDr30v}KMi-!JnFx$mcSHhE;5I75M%%1nwbybij=Hv$In2o~ zB2UYjrU}q?kR~puAoyiMbRg4U{#*O65q}Y#?$%Qu9s~;A6Quf&5un22Q|O<1 z&AG|!nLxP>J+a9f&IPXQpQjHyZ60NC3b=GPO*OdQ+jmTUz#_Tv^TF$yPsddJ{UfA~ zT#`9#LGlP|sZ9u}jA@DjRk$BD0G$7(f|^>a91D1`-Ovqe|pU z?+v+|UC9vrGcl=u4LZCJ7?FU)5TpewXaQ=Q-*VtpP?l+9_E>L?(SjGsCxSO|-Y*+O zQDL9mVb@sO9xs|ICm7C_d3YC-`Ou2x)xNhA%1Ok}M>s545vb_9Sa3(}WL;ZYLarUT zmVTe(149Efb@>K$d5Ut|+fHXsR+CqyT7I$ z4}{Fz`w`LcFFRTP4cPy~+kfIeYVZ8s<7mSx{-gWncm)8^#Ryxwv7~==o_!MUHqqxq zO@Z^;C+~2{C38Rr#r+8^S^A zP-X!#I%x2sdmwia1I1iM>7f_I9!RGVr5ZIt8H=ytEg2--Utag^yAed%Ct{baCjDdG1M_ z{dBkB`mE5xocB#aL?p3bm#C8DV+y;Tu3#8aX?``}bdzkj^vVN<^N#LU*=oPvC3RJ= zae~5cT1B2bgI4A(SPn)WNAiE~^?S5qT6^1MyW3XuZD$BF|ZHw(YL zV)28#_Rg4U=M8UEhbZLur2v5f#rjVk&SKX(HE$6hCH2(1FPmqy00yC!2hxT5)6Bv7 z*SOMEL`)4RfKZYG{I~C`jW6@gl1`n8 zqq^hk^{UE&=Y%dVG42=o(5ipqdj4OF;~)4Jj=J*yxgS5esboL#XICERHbM!!8ov@% zuZWJ3q*RKDyeWM<@AbB4%yq;8hdEq3MvJNamX0(HNE!&NR9kSiirG$QYrn3oQcGS2 z07zm~;piL_VGPL0v^*+H%?+wh$fa>_N(V$hL-Zg3RSaMp8SQ2seyU#M0>Bcp-TZB) zEd|WJ^!w|5Ep%dlrsbZFadz;GSLrQ zv$XW4c$@k=IRb5&tYS8?v3-wFfAw5XDLgw|#QQrI7K99}DnA)=>U-?OtyUMvX0$^e zzfL<#X>lF@twJ*y{xbOPc(}wQK9L%|kPJ!_y(THmo zhO~!Pnhc>F;}c*GFyQ5i-;VpdeHE*TvthC)8-Wf(-U~9@Km}>Y8QlP+DLQxzL70Y} zM;+_%1Y2VA^d}Q%;w{pold@eHm>isH#Y=P|d1WYIlrEX3Enx7DzLZ(6#q}dXGBlY| zY%P6wRivi_=2O0%CT|HvMTFdI8;uvwWdR_%?bjT}%QhwB$n-OHsFiv=c|^FPF{++M zDY2j1*JOLSGNgNA<~zi-(4i#L)7ps0WYfIvs?#ul3ox7iOepas{jX9I%wBCc!spF&1tZd>3wxgBp`#)TEDvI(6m(RQC zQycETx6d8t$-Nw;HMoj0`PG^nZt#|&md9pw`Cs&W{|o*9secr4-9M%ORZUhoaBcd} zmOO$1Fov*;m6Hm@ryxo}ic*!-2;}nogiNC72w$M7B zC6$GowX(tyPsq&yn{pI@vFn%}ZuuBEL-eLFDhhBq0nOFNhBXu(S{5nXD?zXU65HN) z4x8+Z0<0G1@e7WNKs~oQ zQ8d16Zj^MRja(mR%PNc&9C$N^RWphLxEo&DeKJ+$Km9N`m2XiIE#!vQ_!^ui{3&ey zFOBH$I=u0(7h<6s%Vgzt<@jTFjuRNMUHP~6XyKD$w; z?{+&v)vJ2FW;=T_LT54J$`yO9=Z~e^Zf;v|qW%I{Mm9=DU-t*$Ze2Vh&V;3S`Dnp% z*qWxW;PWNDc&AwbyL&)cLuka&fP`b7K|O(&(P~#lIZ3$`!09E0mLX9>Z^W9ps$v4s zDoOj!R?PEWQsn@bY*HNB7I29^!`woPN4L3=w74Z6x=OO5Xfvo#sZkDTT;x{Fo2d*4 z4~qMg7#Tf2a;O3{3UkJ)aVDO`bX{$MHPd9s^WN>a0YofZvMvT`50dIh0106-!C>l{ zn>J|yYfWi-Zr=cU8lNq`t&P+tc5S?T9o9fnce9qxg^u@j4$hR)vLUQ;i2))0AP zMTw(HZx#Xhzv)dIz5Jg;s~Nn)Evqx9`IPxRs&)%P;Fsh!yqssP(vqV|Q)yHg&#= zA){egFRUAd6%`wfz6$^cr_Z@RXFdP)neJ)Yh;bgxfq20n&8=QRU@xnMQ!?;Q^hLbY z2!o6FT&t5e$lzu%harq z%@K>A^gnc5e*8W&3P}sYKQv7x)4Y-a$b&^d$dN;Ouu(jivX;DS#L3BrRvixqP=SVj zar9;5@0BS?Ch=iWCxqMZ&WdO}mntEgR10Rvuqp0hZs&U=mj6FZ(U@hQxMC^W)8c~LmM`COY z`nJKA(P(7q<9QGxpz4yx>q+NYHr)#;4pe(Ha_nAS-8YqYv#(SU`o5=iHlpfMn!#%X z4^Ma3Z>KM4a>ln!kTae z`0mgEM`V{y_4zMa;5OcA7CX3fS=ybYO?h=5geX%0rHbW!vW-wtzE8vvo+UBQ%gBnw z`$8Y!Fetk)r>oJ*k|-5dJYOrN-ZLvLda~QOenx&i7Pl-1kKDcAsht z=*ee?gyvL!Ph8h2ri)v6aSzpeEx@CG-Xh5Uo3E~$gx4P9r^xk)U9sNCungDfsGHl! zWzXxqlNm9F+3#oU@9+Mo+3laZbU7}{z4J(H>ZXu$aM$(%11xVhH;cV*y}H0?R{x{E zf8lfb?Bb+8h$SdoL46zPAT?CBHDjKbM_ychk0X##N*W`IL?P+Vye6kSqBD7-Flx@! zx^>yX%m#>~eucZ#fPS9(T&bpGk!0qyPfg23c8U7t;fhu!WIFFn>VrvcXcQ*k6g1m@ z@EmFen}-*0--iT5tJae{fbf|u$U8b+bg!~ZK*5hwgrWjz@Ke{231StuG*QI?kt*sqaB#dA{^3c>$gNLc#M|&K;#rp!APkVWRhZDh z{XeN@{y&lZKfC@1{%`2+!j2rj&!{SG=>Bo61@Ou4t3(O` zl60vX;2$xtMDzg`Mt6&xB(Lr3WKtdD$EyHNvb-GvRNV?XZ;9d! z4xR}GLkh;pH3e3yD!hbStbTS?)JG3CXGaN0i)V-I31t*$L&(L}yRz^TptVM`QbhN$RY9$`b?`$Fk(~3eAxRos)HU z45EgtERtQiAda%!20ItFapUjpGEHUppHb}>W#;k+tHq5MNEU3#vWd8UTIq@Y`nl!D zxzBJUM@YmsaroeahFg(7!w-L5-StF&6L0+SZ3t89kI$uq4q=$&Cg6;RBMwyHp-UR% z(!Ky{?_6|tM3<~%=#s1jzd@dgPaq}Y$0#U=5zLqoRp!M}Hd2qDmz5w&RjqB3H~@hc zOrjHyW1+*2L-~{>XEl=KJc6?_joi!=iV$w@*+RJ*L`ni*B~Kz)!$>IyJ~kF(kWWmf z5>U(l7k#9pqFYLFNY$n8W3rcYr;KjG#|m&5!)#(C8Pq7%m}xxv|C5eUu|9ASMZV{=%%5z}ZOS$n%1aeCDl0*emqTWp1E6+A zFaz#vh&H((F35V!3{)rgpvF zcn~PJ{qs!w09*X_yIeL42z?Gdh!U%IbZ#N32`JBpd>av)v!X@$2aV{2**b(6D)f7$;l`~t3$N0k^U=?J=jdZi7-T4i`0NVUvBUbDBusuL`)-eKYG6!0E(neaj&8X zUe}^|#7mNCUo=>hDtJtNrPY5oVW5>xPBt!tqtfLkCxAzoeis6pM#9srD0fxrn_OT!W0&fC>aneaf z^vt7Mr6uEINa4H;q{ExMTu+EssSSU1=)`qX`ArSBnZS>j0GOY%rkfc5U@Gr=sg)G` z=31K029=0%u55e9bYvd9g^$dj0-iqrSVT>7u3}`Z*s@@uFz6{++mTkJ{HgBL-J861u?I1*M^5bKzp?NT{`Zj$%gMYE?KXq-&(%rS<7>iE%f!J7YF9^n5RcT z`iq>SPc?LM_ecusLUeqy6#SLATl&FnBOHN({OPJURl;H1?EDj+_Jn(9ySUem!3#E~dts-Op zbsHvFmdKvcjv10w;N)|L6SM`yL73=T>qJtR&roqbtzg+~dsYk?ySsHOZ;^RcWgFH* zwK^#~t3t{duG{jSdK60eURbE&VWL6VcG32tugMDza#eOc;Cg=r`)dhCpX9(xuj?t! z7e&{{QWynSoaNo`2r-X4?pHkWc)Hu0nN@znGu-Aj*?JiHu-wBG?6bUh(~Gz#2|vTo z%}=kD??mYTkU@guD$$`527$cf2U{$R{x=13i68man9FW6)!6(bD=qt(9;{@m{zQIQ zZk8=(YpHJipy=gb3s{tm7smdgZYoL`<0XGZVE`<6-@4Vb&gn zJFcC>Ayl|L5N8G`!G+l+L#Fih}ShMo;+U~;EOdtl* z!j=9?{8fORG%$s47V4XP!PYkJz&819vR`J`b!A>8>3DhUB6+JM$uO-65sO>~FxFguC*;m!-! z74P}UJ3Jgd{ySvQb_9kDrb2X~kW6DsX*v!2-XHTz&f_9}7BPRX?RTV0Z|eqZUq;9&D+idVB%yGmLbz)5-*Psz;Z1Q+P-gbsYyeXCl$$ByI=4#e$=uV=1|6X&aE z9iJ`8>HU^K#XI?`p!7Zz`r9OH@dbs1Ej#~GW-v2mRBB_*e%jzhhP#7c*&QE&Omf<# zn$4FAH=xjxSaEB8P@gx+=TXNbOPNohZa0k~`6~&Y`#k0?1)purGzBBwdL74O-b>bG zuM+~FW^v@02Pm5LOI{Gt)cxQj4hHf5#OwzRtW zcvrs{mFft7f;}g9WfTnq5z?|Bb9NJ+O6E6P5{b1PPNxge7mA?$3p4!wMF9EhM^#hlmyHY$wp1q77Q zp|xZMB!_GqFJOBz7?3-M=$>ZELzg~=couFrnwe&g4Q_)TXW651%yu4wksAUzA6CW6 zFg_}GV@|~T2er(pvADq)Jo8Q-%!HB635fnFNmRR$5okD&{(PiI-ceZ(2_a`q`vm?= zLis4KMTcxHPh?{k4$wkF7`JY=1fcNS2YGpQ;&PG7nNq)L`0pAf&oe<4Z@58}eDiNq zbRie7cSWD(zjat%8g3a3{^-f(%Mra3=pYw4>5mz@aaI6BF`P#m=cbuco`F1de-1!i z`$XJs0-ad#KTI}m*Nb9KuCz{j-z5`kYM=a$6-p9FALB~HK>yld&9jXCwWsLr7dMFt zb&(Y@+e}lwHvL z-@YGGClth={KB;RBx!toPg)`xMpP7dU|X>fSRFv&d2T^L5(*#OnNhqL-X+fARA*?! zJTp**|Lqw45B!Vd?e81k6Nq~|XqWWaV-^TkX}N|KNRP@!dq8!lxRX3)3$sL5QyJ2s zj0vgn`$HukNtpIMl7PpB=_L_C#^&TAcpduG>00Z zk7P6vEMtq%K+;2f4PwgdKlnTM;eHa1H}O?+*XWxjiJR`cQSzjhhEJhQA<6^-tC&}9&9^=4q}%6y7E{d>tj+~jW2O4JS^4*Eb{gUYfK&iFNiv18^KiOiU7ogCf zlLKuN-6O!9kwz4te(F?41cIGf@k(~U0I+C>I50Pc?{hE&eavp47#NNN1KBZ?2ABO% zJU)fgXGqoaENfG1%{P=)s}_K zhrEgBO4WHWxxX?!ElyvY{LpyMYfs2n3Tuqc&a0k!nz8w&l{!E)q?I8~R(-62%xKV@ zkEG5JTdoSU3u1j8``l#sLA@L?7;X8igT`+1Elc-r$+qWa6#fn|BSK~3#yr-?Mb8Hg z{*zPC|ET}9k9NgfJ`8{C;RIgd9|D8nJBISN$m3CJqmeRG3hG(|YRl6P#-W3KFi2#6 zKa_*a4KP43ltUuS9H5YBP$8!)!h}?m`XQGWXi9Ie_JF?MW%l4t@}@nHx*(gt#sbX9 zOwN8F;tGxeh93_GN`RCx*BaFL80F}t+*6mNO{))iRFslo8=0x}J?=q1A4F#u27Y+a z8Uv!PMd6@m%Aq_iBG19Kl$gR%E{)Ghtbir6XK`umv3~xn23BNghBso{nFLi{&QyM( zub|IT)&_L3S`QO3UoOMhzLyA4Z>Hp`ZeihjWgttN=nAUYwIZy^gzDDTy4->ec2!j` zWi!b^Y>SKMKs6`5Q!>kOyI37Eh(^nq)26S)6u z+gw`8BrW)P@|b$j_*b5z^n!fQ@sus?Wc+;!AUyuMmY@=Enex=d&TY}Cl^5k-pW)#1 zMc*SIrHJ5i$#7D4-{%c-1;}~q7(nz_I~e_yHytz(G6JQ7Eo4gh;}--9{q0|67!z2g zHL|5OwA3j3*?s9M1rH`r1c!ZhZQ21z^h@}ejm7b{6gP2|WN-%Fm1M|~yGj~EV+SL$ zfm&tv(D+8wp#4lVcsyEwTPCg^ZGwyyW_6@J@H%>y0 zm6BOxVyZ}A`P050GI83;T6zAlJ4A6q665C58G^=szBs63u_fy=Z>U{Zgg7`#?;jnt zQ0g>4$&ied2P%cr6fcD&mN zPImUNI4^%PjHcDoq;_j?{97!VcvLIwtlxSun_6UxO1WXJ`5{`k+-SwhM3PRMmAf>Z zPEg~LC*Uah!?h~SIwxLa6jvK8i2dKVa{u7}c=l+Ue(%HZ%j=xL4VOafSgh1vzEU6@ z7M3lQh>!|hR#1B@1y+}UjbOd`C0fL=$TY|`*Ld!tv7AzQp;1f7Zbbv&`l}1waoDl} zpF_DDfV*4cGaFNqk?#gK&%7X(={@HN(OLC!`a{7l6C*C@PUN1H;}kU6;-yM-sqcF3 zDT?beht6dkV9OIT$`ZFPB(6vbFc&T``R=f9wyB6*8WdaS>|2Cg*}sIORfDHr`)W&hh7wyj?wmC1q9XDt}AEw@a&^=xh%#=w2g(~ zz}%dj_O?rHBxH_-E_qj+yj5*s&NToCE*_hZD?|}@`z<0Pr8M#*tM;gej`_$Mj-Ow8 z@Yjss-x?-S;-IMgu;>I;1W!lT)u9f}bl9;*mHm3Wr@E@Et_YYxv=dL93Bi7Jy|#$>xHlI^M}gC09@rNislTjRsaz zpb^g$2fpj#TmVj$1|MZud&Jm6@IpCjy!6(LE^%`uai-v!m1YZ{={RzI$7Dm7WyEj% zlR(-h?z2j5X)oxUiF#p5j?~d@CJ%`=W!1fTi;w#jCE3{3t)G0S$r6}T#U8LdXoLdm zB+T|3$WMNlwLwR7)icDzU{bZZi8>3bYqqs-u7o;`ZQLuv8*pRpI?k#04H(O{&zz=9 zEp-jrRN^#U41)_NGa8fm6zW`#oZr_~h*_*0E-`#bepV(Clap52Y0MmJzFw(jo>Fojx(IRxF6n||1uT+cl?8d7LWGz z_C7?#T(^C)VSfSe0N6uZCU`t(Fy81sxNH7#JtyO(TeC_uZ8*HL#i4IS${M>a%g0Z2 zCWq?M=?#S42&dIorgjD4c_;7FXM_AQ``D8`qTys`!_RjjVj%vA5FGgO0fYK;VahMs z?P!n8FR0Xd>Z^!&C@LNffncK&=erx8dPlT62Nfpyz0svicM1!ULhK@T#L%iNMI^ES zK!g#weC+|4l(0}>vN9|_E#0MnrH zcB5lInRxtmjTrshcm|W(GI!Su+Jz|0wu5Vu_9O(ee;5kIDMzs3wNv;V9#)q2i^qfE zKTWcA+^DKczn?AzmsuCfpvyi;-7?WWsP_MQkLF0!f7eO20s8%s7`S2AWJrL8c6O-3 zJbyUyL@7CnNm;-K{p=Pl=i@f_Y_+5^cA=%pdF+j!2q8`PmIBn@62%Cdauu0UP1jy>u?=iPQH||VaJw4{TK37JI{evZt|B-bn%aI>l?1>?Di1lMzk4!Ak)WfA zX5x-_O0B;NL0{#0w71Ye`gm_XyC+daR);GLSgb*VGs%Q~=!)`fQmv!Iq8Y$97O$h7I*V}* z3X^vqA}6@KCCxf#OnE3;H5dxA_e_@N>i|BXy5N`1cmbh%Bv>t8`w?0txA3tH5X3$R z6`7;}V~icrJxmXJTl&cH(=nC0kG$)-o$R$3SpuB~W-e}djU!{u0iG#McH0YVJM=!f zMQ2$OUYs(a`!npSHmN;9n%}Xj|+(f0UWh#*=PI4=C6n zu8_~6OuHx*+*wBJP<5!>U*7+?$Z*Qu!!lbUdd2Vs-1#7VTB4M=wi6B9!)_<0;JZNT z8t47VMvgoY<`fADXqG^h^eH8pI9Fv<85*Jf4rl~LYysvS>d=5$nyHh_d)s=+Td$Mh z;3Y2t&+~En$2#ijgg+TDSgT-LE%j!$_ck=xy7@Rt2mZD><7LU%!Y6^r_$MFPC*r!U z>}VUs_5^vy>UYct4P`a#?gy0;ahYm+Vgn0i(kfY>;{9s~=jEx=Uw1{=`z6)a=5`M2 ze=5||#{NCG8Y?2@*Y1JyP(5M3n|``~cV>Vxv5vKM{yytq_8^Lezco&}rNgUcgyJu& zpKD9kmXt;AnI-Vvz816SEb*0Z#`ds%iOZM$r~iZRRLxZCM}9@${nMMHOY(OR(FYt{ zii+u2_7G#7H_a+YA+s=s&cmSFbo;m0`H77Cr->)AEs4YCbihJ9Gh>9mHVB(9o_$TK zpOSon58YMyP4lTUl}_-48`iEvJj2e$pMHjPEjyq! z!AAmMLsQ4&stKoQ%A__k_H2C`?cB&gNC_C+TqVoXf_|LAW2Q`rB{AN~XXwwLwKC!XP_J)AjHTUzkM=mjyio3#5Z zk2SPuM$(^ZWFT)3X`)J9Kk#{$BIG0vW*AVatI^{@k5T%J^Hr9>L>5UVo zOSDIWi1sT0CF4;%%>%w549a36WFiEbLp|c&h>69=DG}{OmoZ|KdAWyvh8Vvmb<)FNY_&@Ty>MviYoYJ}#k%Jr7V!nw zoN3|R=EWm$PDY?!nPI^D54c7MjbG?Dt1B1c^3x8oJX^RI0m zY2VCcb{8P;>`gspdD^%i0qUxCaBb&{yrr@EvEwsm&Pw2|tRCHx&%%%XHkO^@lck#P zCw%0*!#SdJ($Y4&9d@-NHLq@2XQ*hJWWySk(VW#--tmc6?14$De`RiZoten%j>krI zciGnG4ILUV-&Hc^me-~v#yRD32dfU3Iu4{+oDUTlyOu%$gi0(hWV4F3qK+Ofjld(46dWgcM^bO~L--F!u}g1!|m&IokvH zJvQ2M=4=T_Qbl}IR@G}e6b=q0ke}FFDfW#i&+EUt_oQ&AuZ+m+!+QNrsr^@4B`&iz z1hDAT*@yV`YNXB~I_1wme=4V(m_$`HwgGO>rpEgWUd9MmW1s_0xP~PUe6t*nB(;Hs z5J@pLRWpTX3E5qDm>Iw}mR99{Lc8>v3>e%5`Y26^M=Ho76Hruq-?0k7`ZfsQ<6V_d zR1!YSz|~czLRX@aBf=qIw(^`ClKVrHf>=eZN>SJq(Nx~iVL#pLAL5tS`^>3!TPrsW zVlPIkYld02%`HPi;trR8F6a;(7pKw==rU9+F5FWD833M$Vx?B2~T&8OaD(P1Mndb%Jpk)_ zOd9MiUmH*pyce6G1XNh310e!2TVZe|#QmmlfaaLAt||ndf)KjrPsJHc&-P&U2x82) zY0VNMq`ZrQGr1s`NST}rLwpD%yI6*I+(#!ORAE^ve~MCu(<<(YT!-U2U|1n;0TND1 zL`ztv+!=lm4l(1#1m(PBV^+GQo;gAYenpV#B7tvZ^o5Buq!@AJ0AVUiJKKS(#+9JzwR8x=#_!_0JN%hIVWMhdt=CM>`1f5K!( zgiDZ`2q7rd0^sfWjbYRA4BTD#ST8_biCFl}D^}$mnP4kBQ3n${o%>blhuh zdB6H)rty+LpW_zQ3%y8(gN&7SZCU9nM6Ifw2M_!&x^MEt+1=eci48y|oZ*ZJ;*krW zf2rXAH#q;dZ~kNbPdwVKccZ18@38?++g|`FQxanL6nlqM{dC0od@M%A4x@(gWp*)uXjuv4G2}zp0*Xz z!cGA}Ilt)OF}xuLFZ{Yf%Z2!mUGBa?@|S5U2}L& z7Yf1I9wUqU^xm9O0=~&P$<9dF%QUbiR!eU@j8B70a2W5dkrSZ710d6AU4vZE^{Ml1 zYK9Rg{#;k~YdZZEPP&EnLSRQ2I8sqCN-BkSkt#wtL0&Ac&A9$dM`Z$GV;* zb?A6}b2yy~9`IyW#xmbzK$+-#_-`Us2)r29Sjq}UIKRi!6kF2TX8-)?@+bPu zM=#SExmjJmS>AUH?VG=qd3Uscsw{5Q^k)=(jsKZ-^85`i>f)dJyw2FFY~!!*40SBu z>w930=^bM`K_5mvh^;o|X<`PZ@q2`SJp}!C{8Nbh+xd?de^lOeo4BbhcdTRb<^q#( z&tn=Tsr9~oNhgkSSb@G&fG!OR)3>KV4+t1!k<&0a0)Szn*?kmR0<)1ut|Ip$Ij@Us ziQ|f6iPRAt>?E!=uqNdOaCL%fvP0)vyo zm^sT!0P+F~g<@oultj)BJ%KIhjLGn_9a8a`zrELo)LJtxx|=nC!+Kuk?gbl$6pbjN zkOsJkSs_7sq+=twvp@!tT~%mWKAZhq%BiIP8I>Koeux8Ee7LYBa{|F%GSx5}Vohs6 z%#N>=l8v*-8t|wX6b>p9Zp3p%8l<4m0H|5+(J1x3vScOYoXeqOYWu{SA2m`6pUrce zjfP3?)rZ`U`lpVnr$6tM>~FBoYdo?swdV9no)`S<>0;peqUG9vO?ff}`l)l{gvH#9 zE77z)&r^>OY#{wjxWH6XgY(-Qt!c7#c+092ASc>P>GonxhP|gVR&ZrDIDjXQSbZHg zUbzTU4aRa9oZ^MD28%Z+kpphQ8!q? z=s0}jQhi-~Ih_hM^l=KFXRtWR#&d&*6@Nc*KGq7D_#eX1Japh>*+l#U3(#sUBKn!Jcqk<{q~2N){T10buM8 zOVOb$5kiR#vULllk0n*t2KbxQS+p8>_F^Zji%ro^>M^#XKN4$X$-1wC>)L26zrNVW znT~ZCvC=Im{@q6Pdo*VMgHHbUfS-A1E{)2~w`+VB@>*WlYEpKeW_4wJy?yxR-Jj>< z1QACpC)H9XY^cB^(8!QI!O}}#?Fy>zi=M@*G=Ld{EGsQx5d1K)E{2A zjhl9e1(3!3Bwl*Ibv&~V(>?8x{ZRT=LXxtM*ye;yuABj$2_}(04{oAP>ZthueQ>o` z>)(SIr9me32t5f!#jVPMKb{nf(eX!vd&*qReky z*fYqq8%Q%jJ6k#5p`n=l)$WxKQg~|s=A$I>0mCUlW*P^139f)J##K~S8l{)$^;Mh6 zZ;sacC=r?B02_ZQ0K5Gp z>(?+B*8r`Sw$R{nT^gn0BiFz2AI9`jK0c9ul((xcdB@Wund_R z$|<2wny`A`_Gysb#7pPR(mIPmlJ}G4(5?1a(_2}yZ=LVZegnKNxVyGa|7T{&MQ@RL zQVCog2C#WG#3V*Dq~2`OHkF2xBm7$3bfRQ{$^3=&J02d4pnf`)%b{2j08C6!L4 z#_kAItGtk2CQ%ly$I?IE_M}7#?BJPgI`({Gz~p!x?dRf6s}ZFo_EUkK&~aB-;sAY5kJ)cO%&Ma zzKDohEygjnQhyseYRd?CKo*5vZxM3<2jpd=&CU-$G%LN1pHu2Zd031Nhan6Cifm!!7QwfNCJt`Sve zBmSpiOGfEZQT#W-TZykL!nWc#rPdu&hhpX-0> zAA!!J^AB6{!!tcx)azTb00G3DA7gkfVeHJJ@}wszM`a8Tj0QGI%BA5XKL-LyENqM4 z=M54uph!);F-|TyTMZB;f((SyfXWJILbwJAExbtplo-g%aA7Ck{a~pvN1~?tcth>Y zS6F)T@eK^x74Cvd=~>n~xA0GexyRSB-G=98C(3me13T*Gq!D&?O?HtAmK=_V8O{ed z`%iq5wXgepDds*upF9cSGwH#y6D5vYazeeX`md;jXiw2Bon_{9Fw*0uNtwm!{1@@S z)qJU1Z)BTJWx=Zew$N)@rCs0H_6zU313yP|n*|9!O&+$VTC+2;_;KALF;db4!BR>` zBl7(Aa3I}uIwl=rD6z&pH69>vqeQ0#6$Z)k7}_)bVJ0bX z2}C7=KEc4~*fAOI7y@xATzO~(aV~;70B#5g7dWx$BL|=$lmH072r!(nm`ZMSg-$$( z!=O=>64blE%?es9JzIghgn*mJ92dt#c)Y(#gS%-ZK4^E^x`1D@sIeko)}&HP)bna;7!6f~ySpT{%97d7aw+^qJ~bFHvRQrpfLAcZ5eU*c zscyn32@{h&F3Eq9+zt8>wpA?zTvE?Lj{-pQV?7PV!`!bYzc$PCx8*PpT% zWw9`sbJp7DTRkv)ybIoq{B)b8>8sR~;o9ht;r#<#)WQ5>d$BG>_pYIA5Ujt(ylFA% zNp=HNS&(UL#!}B)iaKUv`+mbODupDC9&RsSnIVyn;vd_ff;V zz#W5HFd`eC@^ys^A5rdrr_BIBt+I$Qe9n0%nxvA*rVgPO@sl3}V*8P23IH0!AW1w_ zkKC_YD-DEGYB~o3naxYfkp5y7^OKY~^9@TYchYQ4eaKoVr__R3sF8Q#g0Eu_Lp&d% zbL%s7@`pP{ozGrA_G8k)$LRN;`|yVy@h2wK$zSWgE4CsE%)R9-bXK)waPETg ziElj7f zP)=+7|4&z}f9f9r>Vu0rclbusdY&l%IsObFt&Q~!y&Q!+hsn|Zc-UpgXR$5T!oxEV z(;`)6aY*{q_%-nF8wq(3LKr?`tz`VWlhuH^_d7EkXFN^#vh^ zn%1-gSg#v%<4;r9f$r3@%(v}Sue<(MuhuE*TD53qEzU_9d=TiqxAb9;VD*r_o>rgkxTtVj;_~pb$U)324 zW2h7mM0KDlH8?q5!B)HHv)B1;vQq}Q&g)$>@HheQ{{7LqG zkqtAtH|b++wN>{NQZLek(&Y48x!eBMv@`)96W%C(_~}B{;NX#axozhB?7=gE46R?( zbPXd4n>WVpCXE<*s`bpBtOhyx?kX~qt{z$3pOU1!p(hPnuv{;BFaKae-Zs{xYBFwQ z`&8f=Z(!o;_A~LgL74QX{HnWE|8@Qyg^20?$19iqZ_7QlaUYM}aUi^EGvyW8Rz!)x z_tz4Ky(Na(d6L@fg$x!+*hKs568#3#Wvbj_uKh=K1S~tc>I;H7_s5HY^Dw@Qb4SbGqZwhC&wgT<8dLJ9(Hsyyyu8S@x=1 z1HNuG0U|tc5z154Hi<@B1MN!y@65q;Ds^!$o3feO4z+Mk$U|i(ch4H>=O?y0cJrF< zd(I$&(ZbUg;hQ&zv#+!SZ1Z=T#jHiz>F4zbdp`3&)};A5d9q1fH_i-2-1+l$LR<~6 zcAg&pfi?O_isrs`GD)ebqT`+L=t}AOxXXVKScw87b=q+?HScojTEa#pTR5p5@(7h| zh>dJAO;{_pz&I0T!ih?i8!_U9Qw@kJWl~R3g$!dGl;~M8S0qbzXc#E~<(-~km~uIP zKLL%vbyh*FL9x9P4cL;8hb5{BSW>*fQw5c~bXQExE5v(Va8h2SqXP4DJDWy?35xUI zV#tV!ujoJq27-q)2Q(j7uu!{bnyJNB&T(7C1Hl~9oo^PP@) zGvGyZ(e(VtPRlt%h>e{mgS_d7^uzZt&iYc1j|OBy9)`<=v=7|+yIEiGIax@Y;m+3R z-bhyWOqT`OihJ2kkT!i*qp9m}W5R~cvb{x}@cT+WWgq^V{}*2uB?! z%zkSAVZ7z;8vzN#pZ1XF(aor@krSIO25JCz^#b>;Q6u^HBPvT-gbJiRR2yb!F1=4v z@_xLbyFn?n{wUpx?nvy2KO6(?cI-)Yg7xklK0As)2`D?2$u9;zeoGeI|ws>uCcP3v9HUDxce`w2ayg);B@A zU1Uf8v{}Zf@2%%NJT)e>+!liLrh(D1VGEpPt4>~N&@-{Eh)_~QMDgaLsj^Afon(r6 z0CvhDLhkf43@89+tj7;brLS1V#?qn7K+H6e8)T>0c&MC!uuyV>h{+{q|4CP^3TZJB zJnc7_IUK8OQwMvIcyUtu#9c~VYkEYJmj_9ctE7Jf)^qNU!tSxyc(S(;azBV6o|@A# zcRx(AtWx;w{6v>C7Cv#^0T1P`u{4dMq!qK-r@I!AqAr(u!mow~7FoD$Xv~EfJ&tS_ zO^qb&|0U>JG|(IG{8XE>JnqJLq3NmZJ?;%|=ee(d5oDTNAT==gS%RPJNs+3b`w$)M zCl$%@6OW_AN_K{-rGdGSh|d*od^fls$D%8a(wu!k;I!1;NS5N;Cs06yM8Nc z+Ck|p*N4rb%hp&bg10@yK7PiLt<8UKDDZo**L^dhIs{ULf546`Yzzk|$iPUXKAwKS z`LHh-#V_4Rlij5OKgpW_Wt%v?<;M7=zEdPwOUTJt(7BUho{S zlqe~8MCRT3_)#t;^GiKzD{`vP5s&}y5c$qn5uoSEWE#2ttQYXt^miJGrMuoQtM~JqCWAD` z4Ra(9Nyo6PH)1#b@}J?W5{_`z6%>4$^p}h7?(Ubi(7Lx0>q}Q0AFs8xp0ac2Kbp)t zL08LALsK_uvD&N=ARwdkA37%<6rH4&1qG~7LDFuXTVFPL9t6P|0{k(KV7eRw%S|J8 zAriEeIt*a8pWjmGiYX}>5_%8{l0u{HQh;BAvcs9C-2}Xdt~Axuq1udWNlu!aa3Ch@ zDz|Bla~$6jJq6pr<6jz#waT@cz6rglh$ZqQYd{eI zFG`XP3P*U+PAlOB-2z3@aQ0EIXCHj-TfV;wVz|`5>0N8JnyR`+MLFZspg@YE4A-QO_~4h-+y`}gj%_s$ zA5@=NZtSyJKIyqMJ8_9_s*?7p|Gap8dO|*wZT-eb(0`d;@vrAO|J^75^uGcpo4-EY zp`QM&^al9ncojgxqv?^rNL*51KrIx%(~EFt*WUI^6ujLVk|U}_ z^8vrmc3Da)BFW9KWnAtCCxs+`TR3e{LM0U zN&ZZv-m6`FZ{zQv$Ina#*`K<7aEKiQ5$Wbxlz1XU9xz4F74kE&&nh*;Now<0HDL^w}1xkI6Ymw#3-%s_coAw$?nF)p7PbA-0vGC0D?6(}j8h?zS=Eib2<)Cgr{YB#Oge!Rt{HQ0P(X?ZTI zM<=XSjuhhtxCRAnAolv)x6}O00>y3Y++h4Kx;8XwktvI7SkY zfX7!wtH(r<)%d85v^U2MYXu;SYWQzt1Vd!0!Y5On{f6ing^&jG#9j+`LG|r3{hyOy zqdtjjcC`Tn2{w~r zyA?iELTEUjxO#bO2RU~kJ~uV`QBU^QL&v#}p~vdt=xtckC zxP8s|p4uTcCBaHbWcp&>J8>_%3Agh>I@{W+AZJ}scYI&2?!#hid$9le)@a!J{+y|n zV)c5ga8(POrckXE#s?C*W;s!4XLjCI9hV}Db~e~eo#iXyH`cTiMZ<8LgzP+>Qk&x+ zd_%T!_v%&ea8@JlIrwR+NIIGE`~RDg$klF@AP86*>UV_C5%<7WdGwV6p& zivQ3%^7M@G>SgAB!_RM0k^fhX`+v13{Dc46oTGE`yU|hW z*RN9k`8)$4vqNAr4JlJ%pkD(S)UfB;zPoJRbQ~|d=S!c!*!k&fSYY3!h?11kU;;T; z;w5dwOD#PK26#W-~eINDo-kj8BF zf!>0+WrOWeZmEW;E(Er-bkMOaGgW*p?#67z>Ttx`T9tH6s?is*gQ9!3060UtFeE2C zRtfKB=0x|#;wvsLLlw|cB`MP?8ew#SVnjkLWj)@$_H)N@E;N}d(#airf_(Gd@$ood z88yfeX{KfMt(&&3GBy)^6+#y;XqIZ#Hz6dm>cQGTo^IWHXGZ6~Zn%X}ZI+g&*t3^8 z0qW26A=~d}uYZPJVapiXZW=y%_hGU$N$cq0b?xACkHfuQ*w9aH#AV9h2y|xItT=23iV?U{D z8&1RG$NH5Qx}kKGLnsu6ijEFM8i0Sd_jZzkN7~CFB(#%H)dLtJ06eKq-;)~O5gL%4 zMEg0053R0S>Sc%*VRz`5PnHV`!nIvcd#$mr(ZT;sz<2qrT@lBQJ^u;0V(fcE#8n zQa^rE+s%XohRD$n%3SgH|Eh}hrMarAb_aj@gYO{@z`$pU;VatEn`IV`B-K}E_FhSC zDvpm)r8V^&q_-WtDh*F=xexWLkZ6~l^q#%%ZX+#Erx5f)oX}k{OCp1J(hwaw&wX%K!n&HG1;ORKnc|*raVbR0 zUgPT){r|_N@DKilXAjQB14KyX*=0K$d%!mRy<_(*|jkOFsJ`a9t2tLP>U_5-A|Z(QDx1I6xyshQNfu~qOL z2>C)P3>v3n1^%cWgi3lzjSYiPuF3@B%q1D^lctIvL9zmj7x|A{3A%|k&AUyO@z#Wi zc|^dOl$_t60h^cRKd9%>uVTXbV>l=<4?v`fdg|%&T3Q9WNjjiHwFbnajuL|!1@h#s zx!c{fXRXt(&pI)x{^B57H&&u=LN9%HFMDQ)M_#VO+-oPil z6VTcZp4XcyzR|1Q(fK&RZ8YZhT+u?;DI;IRx@>)6aZ3D7+b1279aY?;R;!1$8P?Aq zH|JMO3`stza>28S%8_XZ2E*jUsd$jqbge9`C{^y1Zk#TOb7(qnlPEL3ImGD%YgKFL z2dE@ASlu{W7!}_%b%Zvws3Ut3kz50Lr^Bc zbs2?mr1d#WfPu-B786bo%PGaj5ta>7_BJ#Ht$L~Yh*Q|Qd1sV%O;xRtOx9@C(NW9> zBU6DGh3wH=tEM827vzzv%z_E6eBNt?ykL9hmO@;vu=JxXAI@n=t&VkTbmie(^FH=i z060L+W5CBbLaX#_e$l<1g*|ug9Mk1e6{=@3w$#pGBd`!9vCyhh+;!FU3I(oQVEhec zyimE%tN*}&S1v%%m|-J%GI_Y6f>`&y)IyUv!?q4bF4h!$QYR99c*9&*x+?EVKD3fW z%BoZo`sgF$kE{*kN6W1&Y5xM(vKQ}y-5!>7l3z9yU%uGk)#@3tt`Er_-{0}_ojyI6Ks=`<4?JqH(0-sqXWQU31w=t?VZ++<#47ZgCtTPW z122V=kE1afIODA(!+jfjE@@&&*lGnZ+6|R+8%z!6pO|qI0{m_?%l|B$_50l;_7aj3 zGdHWn>FaW~k=u~AeG1y5fZgAHO&y+kpA99VqvZA_-<_x<8bhQ;057nQ-oD%Tv7jN3 ziEYZR*&JXuhTP#FHPmS-_qfd^`PRJYUVF?Mqm#gpBMpWwUrrqJ_{MCubw}*f;dETT zcYkn+GaxU5AdIC(FRacbYQN_Ga%J4I^m06)ZBvooefx$s#r&j*(L|w#`ptpbqvNUK zL2lnYQ*SwHTeFQt#Vcs{SeG{5mp9vx zuIj9kVM4ro>mpl*RR?I}S_Z1IF8z10>=Aibu7Na(>Dd)*;$RncFp3AOGmwGZ9DIah z9E9MQl+~~beSKI%fIXHTe-k7xj5j=30bnU(lf+LXH+NlM_Nk;4MLXldpg#LFT)|(QewxQpS4YPRPN(% zj^ALpLXI4y0>P=%luPigJTpaKM@3q`+IR-S_#rR$MQy3#`WLt(mEMK=s^X}!gTCPCix zW>_1kb8&$hTl1HO4_I}OVi)(`{Plk1mfU^uwCE*dfa=}CF61M}ps}pM6lBfmo^j9O zyZvpk*S!RqJ5Tqo!zR3+Ggzw860BYmdei;GJ(gdpKH7OJUeT$F$W6RCm*e6s=yD?P zn=?~XU_w_kH)bO~H8eTr#n2%oKbE zez-Hi9Zt|K=@pLvKJI0STH!O!;#nL)h+DX!MVf;RuVepK_so=LQ9epd;|diZnx{`{ z5-X6b)dXURFmS!X9szd1qXBFWYR)KWsG=SG>IK^N7m;>X)E5^K(L{K;Bc zA@p^XYVkE2dwR=@;e&b$3)*$vGiR)Ks|vK{Vkjw%nWnJeh>Q!-{j$g%g6l@CtKEB1 z6E_T?L=vL)0yr#nJr^9CE5HHQ?q!GTo=x@ zQEog?gvtaBU~u-ZQmFWmo@}CYZx9~^-p1Oe&;2|9 zSalM#HSKJD{H^sF^HS|%-f!eRjS+ikSy0EAxfJw4oo_4F3o7Bcm@lH5TQHhYJ-gZq zRO4683&U@I$D^C9Yg`i?#PKF=d1(k-muFyhD)=Zcx0tD+WQ@5BXgZIBV$X?o#}$3 zzWNo9dsi5=+4rw@$(9_*aT6W(rUBaN;jCoWU1^qwPwv;1+<|Ji7Tn?mLKtgBZv>5b ztWGTUZi=AZIbx2w1luy4Na(S_&6gq_xBU``mtzUF*4pJ(Fn{WC^aA-&&X7yoaf&U0 zzpt0paPrY+uvO0zvZoL~%E3vEO&LKzDOdH|wo9sXBA0KMN%F*o7pU`;x6m%O=IjEE z48ePxq^%1@3Cg<6zf47}fWzk%V=7_53*^la$=#9H+%(${pSEp?);<6PgH0C*) zcR#hmmM1Y$(`{WYL$Qa0XEsJgI>PDtCmDGP1taWdiLZ3b=nDh9kecO8gCQB21Eh|sAiS{k6y&l#9+xTXZ#3Oamzyj&wV|)K=GQlyvq)2%%&QI`RQB@2k_ojdc zB5_S2zO47&8iivt>h5DCxsA$|BcjM}+=la~;<2T0CJ+K7_2DDm`(CU3Y2$TJK4(_@ zzWm%uC%s$G;$BgQPGafcWkI=c%hLKrh1cn!qsOA9V?w zl@6VgZ?(ZXve$uH6(YL&3y(f%`}8x-ij-zgY^Qd{dRjZ|XUV2kvt87_7+Q~ZlvVj? z9qpGnnDX7EO_=?eq9*vR(0%#X-6UbdixETULiiLNXY^rZJuPsud}@Fv^1Wlo$imRy zjLBmu`JWl(7AVFBaNct%$bp8Cp;Mj>$7AETR57B71stw@Vh@3t!@FvWqx=wr75 z5SaG;3f_-u4hgZ5nZU>NM!@AsR%^0RN2Ck%|g8QJch36f8tKEInY!A` z#=Tu4KJd>&-rk3Se|`IM;Pqdj z?S%fx|B3%a z2tNhl;b9JJ6pE{F(%cnma1d}WRhlr3ZpmVvyX`1`H_u$m%r~RGeub&2$@|pXb`G zwv6D}18)i*6OOTTW=4ihH{<1j*v(C4bIkFu1ZUJ$BJa<6gK!2-dXm%km_GUGRI)^5 zYsuynRchXuGgxMQ272CPPzvOXF+17lsoS8p_}Y1AH&vZ(3hEH>jCmg7W*yvkJkPh+Q3@zp7+7Qt-=id z8K*eYMTqz)L+zY#;^;t4(PyEYX|_F!n-nMET=$yvq2T_{w$#^z@!E$5E(>kL<5%4- zvTOkImQqH-DxYXwf;Lq|@c6J51gQ8%y~r@$3trB4EG>Q;I8uS3RtH|rUl{OP`k!^r z|2O^*9rpK6^bd(|b%pzn*6T3U5rdYIr!g_-bM9K_scA@Ta?kFb{puhgK9hdd7RP%* zQ(^0ewb~Q}E-sHotM`+$whkqgnVDgWP=5}u@)Xj75nR5cqQ*1VCOcES!MhCVkI}5p zX5(WeT|Q-CWJ}WKD)-}FgNreg1Umn=50zJH8*v+OPmw^0fPSd#>~}CgFhk=qddm&X ziMv?27%KVh_b8Iaw;yq@6LmuK2qAS{G9E&C*qPsthq_psm1*^6*+<&h($+^BCUzD| zV&M)L5iz(6Vq^$lKmxO>M9jP>3$q`4e{t2*V(K_FIPX<(CgvS!NxvtiVrAOE8XPqg zQZZw%v-?EF^_xV^JzarFDa>06_J@hQ>HFT0<{k47PDyu`*}{H{hi2Rg9_|W$57vj? zPSMQCIu-Ujc1v&K=0($wJ+jJ5FX&SWUPW<7z4TYhRVH8JD?=-Pppq^Th(Lg?xzU?% zf5uXtSZ}^EAm=qSiKr%CEd{?SsWk`YWaj^gsy$|7+B z6A|X9Lu8%$B=0G$W`s=^Ih{f6=1J=b7BA%Aic6U}pSb@A$&EGKYT))wr9eLCyu%0nIRS$^eSLe6hjWHEJx zkBLip#+y?r2t~s156sOsm}bgGSjwcUdp%eXJB(zZ{PLbN-`slK*+d$YS?D`rrM>(2Z*GMz&wifeenn z%ch_c1)0#|TIedfc|=*AkP%Y2%)8L}z})YT;FgDZmYt zn-wFC3ek{OZN5s)flASvY*@6QPzEEsCKwIAiXY@};)e77)@L@g4quPmTkU3ZxMW`c zm;0!#GqcMO9>AP%>eEig^)uCc;_vhO|GN5`rAP+c&VRuE&T*-zSAh8e8vgjRDfwjv zCWV(1q_KSS<~$%7sSc&+&pl{ZzxL=T-8Ih!!=qZGKC;bZ`#Lf|uCOpAuVF^t@DnC>^|S;cd$EMy0}>fF^aIXK%8OtJt!CAmyM;XdXoI0zyINLoyLN>r$vVR9S$;EsLbrMXSf^cLf>fy(_iz7 z2X8`X9p4|Sbi6k#4AwV{i17IRGL!~#w-vlppK|(6>&Jk&8ZRXE9K7Kgdnf#??8K|U zKakX$aSYhPk?Xl4OE#)fT^>_(oNwn&}%su}v{s$DIZ z_RYdM2i$<8)NGN12o^UrPcn0Q0JC^VQXw$aKA9(*lm+nYN8}O#6pdPI`viR@A+aPS z5a1=-g@`iVlQDsej^-$-s762BS}3Kk4<#S;Iq$*D2Er=Uhl)MXpMI{*H#;;)2b#J3 za=9fq&y}a)pnN}}_4AF3zVCtEY@&{JxhH`#?exKslHq=jb*5}?V;f9m;-q3K^o5rlA|Uls2+`z+gkls=ul zgMV*}6CCUpvzFQ@I#BEf+?e|lpr`Po3d6`HYb`DC`F^^x89Dpa#y4g71?!DhvD+UZ zkY75HNAiPz!`mFY-m8t=3V6|<-|^$R-bj_71;;Hh#jL1)G?>}))oTL317AW9TJ+u# zitBQV^x|r=9lSPU1j8@SM1de7q>vq1SOJ$Q{kyQfQiFeE;I$`-2j<{iztp;fm*!Gt z`9TVj4{@*aWOyV#N<`sDC)L+O#;1G%;p|FWH4Hh=`Hi z^5-aFLASbyH&i|yFv7VVI#Q%1)ex2m)@CE(*@+(@6mft&H@r3BZIB_!L!^euvEiK( zX0yOuoi4ig_Ki^?r*w%;jG`mHpHY1G$Ax?%0(?{XOM_#OI`ZRac7djs7F)1ITA!FQ)6Lv ztWN}rV{P~Ea=(km0#G@|2*4z(1A19XQxG{SOoT!>r2XkAusj+tGiX|A#S5oHR82Zg z3Xg)D*~_l0rr3{H9y3w&upi1apEQpOotm~6;VQ-fC~XL#YtzAyXX!~?z8BBn>h5Us zGoKl`eUbya!PvW@$vW*SwW(74Or@=i8@%N=<}70NHO?1ydG4g6*1~R&I?B~|-(UH5 zz|Wklfz>ABF$M_%FhFSF%mvoFQzQzD<|KJ^M6fNdvuY;^Fl1KUuU7^EB?arsjwPTc z&z7k*z$uA8lV9CY66BAyF7{8f0Xn$tkSYl?nN!~a)hNdWl@3I!m~z27_xF@c}Oj-v`mr5TzMct1mk0@tmF)Z`wUp0+LPB7o{hlF6<3T`2c|W{riPyx zsLArOIBr*oaMx|w|G&Pj~Yo7T|4FeAohyc?0Sruw)_Lq797lIEcNGqU} z=clIg=Ppl4u`T%LLh!dkZU@Vz{_!;2x>69RThU=_>)MfUEs{H3XdiMcdD_7CgRc2MQ z2Yf%SCbr~@lX)&BZY&pL9k_CpEx)F+(9p%uZE=2n-x@zv``zGX*dC@BWcNQAApe1X zP^0e7U(MU+ZZAK&DA?N`b`moTP>ruHj9t&97gY7JO*+D#B}{rca`K-fN~VCsBg#S# z1vy{|#I?HMQWaNFRKZIiS0W?Kg!~W*IG3z{ho<4%e|gq#iqteVOlvv9XDJl!g{LtK zQUz1l8CO*^^8t@&@JMCFZarzxtS?l`Xe8e|$zE2NN@Eh|J}dwi569bDtvDdi2Cx(% z^0T%Z0+CO+!mKw_)6W5uq1h5Z0Fya3Tx_uBmsT)r9K!xkzUn4O7CZ&(7LWrTsX8D{ z73zM4VP_2{B6IW>&Q%U);pn?-cukAFue=U=W@sc=&g z)C*K6l;#*1f)Pz^3t4oGObk^J*^a&aU~Vp=o=hA7CIJT2(;n!Cu|F9Ono`7v zj1W3R3c241#Bu`(-IQ%y_6ppISq>0eYsk@)P(2Zs@M2Uq1(;>!!6HnrrnOGo0z_(0 zjS%>=pnTC}Q~9i=mnRoYdXx=}S0$S56WmX)u01f(l*JRt@iGS)MR*PqN&~7PJ&fQj zfb>@m)-fp8rTcL@vEm;YTD{$5&H>4dH_8pI70i9v{5&|@29}E zH7txVp#E6{lyJmuB2vw-S$N`7-bed?ktU$*pcyKpj~@+DV0NldI79rNL)S@DQX(+P z1rs+R#ADJU&dUo%PQC~N=Ht1|ouT^6zRcb-sRtqUQ*&@z1SqO#cs` zkAL7F*krTyU1|GV`hd>s?J3__0M5rKvnF=<5=XRfuK+v)+2`p=)muYod+40c;sU)A z^g#=1a?5**!4mEwW($&;&#LDl;^Ud&VyLP97gqs1hZ$oDHH{reQ!k61+(<94M4Neb zK_(EN71hL$Mjq*_%y9rh`BceoBiYLA22IQMN~?h!zRVNYWIO9t8S6S_GIVn&8QyCb zDT`ONfu`7(guuCNFNr~&RgnV#>*2^JQ!X$S??c1I(%Bcbd6+FEAE1s&&8fzgG(a*e zGmqp_n5BWqHCX}Gec}@j3gtP>>1*ZQgQE5j)f(&?cv8(mY9_H;T<;P!!S22$5o?P& zDXi}|+Jsh}BlQRDtGAzcwI4*4J$;wg^OSI{{HklQI_*LXIyhpqJitZ4(uM{0m3jeF zCqFrKdoA7Vs?SI0c{2|OLBKtAc4^rnHKkluXI2<8N8(dRyQGR(sz1y;`AeXTvth=0 zHny(uRP8VjupiY9gHH=vq1OSz!KzufLA@MsBE%4f>$6tiqq*|qT-EOEY4yo*Sc7Xo zBqj+Cu&l0mK~RGyhJ{p`!#)dFVXG9t& zSQF%0B>545nP)`9%oBEDD>qNMV;S(Q$c}-6Ww;O=AL*RU7A&0&kweB!U zAYETK`NeuRD7lWcp}{0UEyh-YFBu!5B3m)T9ZToRu1mmDT7f7uR~+9Qfg{z6?%|lL zq>5S(ODD}CAM_JaO{L|89o*$BNadwhz!Yl;g`lRGd(l6!H*|CZ6)W;Wvj?)W-e295 zCuUzLlvN~}^LPdr!4s-plF>tYn#hPp3k>7Yd%~^NokL&J2CWUm zsx4Up*<$Kdho@>^5!Q_IAmCEg0_s)og zwl8UM|85EVbN|zDwDZ-%OV?sm*PYz^#2)}?P4qkcprZW`4wF@@?P?)fF6yjG)1yYg<&qz$bQSGdFqF&skVbFbzf&OXP7B307(%q^CVOe{JGDgcuj5qC! zeo#p@s?M&W7RH>+>k@Pw;VHmON!0eLth5+YF@93S$PPcsj|{Uohum){!0SqTlt)nwHs@pYr`Qt@0G(C zO7{D5z1z{qR#?XN_FVh|v}mmoE1S{kQM<3x-0*r=ywd0zax02l?QM0JKf6Qh`E$u| zSKpcsgIC_G<$s<&ugIDu6A{p$eXe>^&ukVM(mz z_Z`@okgw7@`hF0~GUQQ_Z^GK3#%M;3uq?e2g+d^>pmisOzN&<$%0ORI72RQQI-P$C057t>I>?5U zcg}?f0`NM-bG5?Ogqm@D*sOH6bby(jx){tppl&#i=Q9iRDZWn=>>r>q`?D#eyIrtRSi;#a$1J1r$Q5!;aSeWkWnLTDmAoB$mXkAPl`vIp);8x71!t= z_U{DAvDW7uNM++?obeDP^{^!G=bF!***|da|@1;iLlNG%B zt?OURayeOvqd2Z~f1&09Gst0)$=wapEDd(8VV~#|_1{;2-Dc%Gbsck)7W&d;{Km&C zwpCkZJZjJ6{wF!aKk?s`zWYHrNN%cCS0wQjaUEbzG{1oASlaNJ1ElX?*62q(-z58D zX_{a-lI+P193>f=<2PBE=^ZT!?JnkugdU<8ULVAiA;b^Whr^*ZAMc8j?2ue$PqEV# zE-;%L@@Pt!zh@Xpp9kO>4|q#62g(L`v(VkTr51&B{K5QUT$-sTV-AO>qnT2%{)c=Kd{qo+k39^E2A0yF$dkrNS2&aMo97(jq=mo5PFJIi)~rvvrKHB z<$dKjaHP|dm`HbLrr9H1((%t#V#a1)L41?<_x{(ouai$jwi|Ibb zGWXw z#nvo+W{@6kB=Z735r-@>6`CKHa769Ge}EGWx{H2{`e&OQN}A$1l^+&jKT7kiS*5oR z@(L3+WH&w!ns4E{hl}B81X?-j9G#OR{XI^TAp6R^4-6jvATI<30{ch+mb!@F1fnKu7Sof^bZwpXs>@KrXV@Ktm~6l7 zbKup7=DIXOkn#aVoXc6j?sM&)pu)B)eDHn6L3lC#p!aVLS?=(6o64lyuhqMK&Q_;B zJC`n4Jm%VLV&H!360tpkaY4q9_s^`U`*NrddHY$_U6*;Uu^XW(XE_G%H9KuU^9=8# z_~I9CzO4GofL>VeSA3=JI_{S;uKl#6U=Yv&{WP0mK{>jlU36NC@rf8y(vUaTH za_Z&$JmRXCRfPb@*Jr*@oCz|ws}u@eM+kd@uU(Q3Q?Tao3w`>aoGnT6$s0RO`Vjlf zgu{v1P_foQPcI2;Q!VPy_*oOtK&oBh&Y%ux6##!m78Q5~Vj)u4uNav5 zf5tdv=ngqgo2TGYa)SKO#p-R#lA{dEqOri)O+`_Tn@$h;i_6Ki{;s+Sn@^sCV0mX_ zrF)xy#=hK*c6^0QlbuI6#>(@V`9U&=eV5%w@|`>;<&_fCc;~tNAF)XBWOaAPy^Q?y zYw1qm$`9c$&QdFx3lFt^J%^f&K#X>Y7@3-9kn>*fJV=T-$w!`D+=PWF>@^R;oO{_f ziO*#`9~vde!fo}S2HZsW3N%#-%p-n2tpa4Yng^w6R9K@waWiZaNh!RdD9~=Z0TQ6| zLZR>AaDJTqvzLMZ6t%*GvZlP3u%t+{KUm2UYIa;a?0^>`z)52cCcMs*6*CdjG5;by%#6wT5kMFa3VN z&Q`~X4Kmqzfn7cp<-+1@HSp`KR5@eyajym|)urf`6u+F1ed~`_S&ICt@o0D=?xI<1 z@dBnX-|ZoHY;I4x*)89b0f<~(nI}*T03iJX<^TTYANU6;teo6`9pe(4bVZ!sZDj$d zXwSoxW#3|Ow7(OC;$sM*c@{Cn2Iqx+0F1ADN-7SAFmZ9xxaPU3#L=uuq%*UzQ8|yk zP=)|+o6YZYnh4vg^aQB300;%i_}C6zB@nQ)WzT+oa}=w=O6-TCj5tgokue#CVxI0J z_F;7Ia&VCKr(ZI?T+G8lFOdO3(?YKAg~dLQU9x|Ahux@%!#c$)1;bVU-2IImuS#7_ zUjMvL&{Rt5XcBSE0u0Jozk~qk@Z?~r$GGj!WeOj^DYNES<`uIDLim{s>>|cfN9niLjlwSK9)LsJ6Xj*p7)JN&IX%Kn zp?DS*NlWzK^;I|Wp7TKK6$LK~o;kzLB^fUs@SI2Cv^r5ua;5@>Vs8Ut6M2-prU4F> zQnDdYTz((#YGhcz2GBF3j+$DApcY;n6;KX09IDr{b0=ODym2Zy0aBCB9`LBoV4jyD zK>o{xR=D9i8Sbv-fAcl}tU=Kgiw9|{9S*?(Ju9R^vG&kT!kVBjZkETwY+mqgCAtrN z@M)_vnd`9H&vJv<9+mW%1LMWRqv|YB#7zj|%vLfyJW!Od!0I~!^@Ebo2G!DSzuL<0 zG8x|>992!DP-uQ_1}xUARR~{ZZIu(P+@m8oFc=SU_x&RV?ryfRv8pf=4l5kW+f0_L ze=Ycc!)WZ)a!&7#T8yRkxUw08ZBW?*F_^L4)oeJG;~W(Ci2X@dDHUWBA)~YOdW9$e zyShGsDR`~9w(^8#1k4$&KW+McvZ9);Q>2;e)>`yyY2AM__5ZK+@K64i3qR@q@;w+) z))hAVLQ5nQ@C;=UP3m9TW(Ss_lF?AGQ3tGi62&#jr12lJ-wEwSI%S$ z_pvsfaAMvXAP*lLN50;Ret|GhA_lTAWUtkUq^>O+Dkn}7!2tku!`x~~*;re+ZFk9| z;$6zuG6^62eP9V2c=-n3z(61%`-(W;S($4CSE-en?_(Y#y!|{?_KO@7YHlpFJ(ac{ zcuvewE6Q%P?ZvFUoRvtrXQ?p!t~mwdVez@ih=pdd^VO-$;9&-)ni)1c!wGAOfV>|CpI~ z0Lu*S*JUc{d&wW%{}_l6hJBYUec8Wt^hcRSZ8!Qoi&B3-sl)x2 z|G`+&IZM{EI4Fozs8^K?P!*7#JPG(?!jQEs94+?s#yH+x<7IVMo&(!gs*}QphGg{# z++bM{UZyhI1c(Lo6{7ABKe=ey33(A@SQ}>7ynHEBM|-;i{LEYSvSP7OH2va4a9g7I zvQE^On~aS_95;+!)wbuGIRW&0MaaB(0i zhhU_rNgE4YHQUNg)y^AJC-0RlTAM*0Pp)4V(-`rJ!2e8VO4MVmFL#3s+w6rEj4dP; zwzs`7UBx|i&%lMhi7Zopl*A)agkJji(zfA}2}xtiKUoMt)AKRq)ZY=^l5_d$OH)4a z!7ZAqe)m- zUIBzj+EyZ@GDg|{I=?%Y{#=ZKr-uVc1H*~XYo6f} z9zt>EoooR-w8FdN8#9A{3)AR1^u<%XFGbB%@YBPb1pfEGl_afJ)>Er z^J{#s7~D0)qv7`*kOVvzlE(nBnQ{@*lZ)_Vn0Z_f)TxO+Q{b4Xych%2q99Jy|DNPSWeQSy&n@d05 zRpsyW(3vlLKJcp}W^b8NA+SXkQBrH>Y5HX%$-l@vOO0lA`tn9mu|vdurz^EMPD&z-X&s++LAVJxlX0~Lk|D|rC{oQr2+{j6 z(})T56^^I8eFLaDz>L;D1}n|!zzknk#N%&TYd%e{xMNDPY)EZ01?6#wnHs$Di9`h^|VYf4M#bpdlTvES7gq6T=NGIW(_QuxyB)|Bj3p{_wIj_ zJLf|+_}qFnZayY(e&|i|89CikrB4*Y>z^8D^<>rdV!8mu?56A)I?9cH*O2NiMe6Sj ztfit~cs!Kbdw)Mm_UrA{;>BXt9P=S>`_CaS3_0{#f;s&ngcz;9D*NWAC$=ULpPuCk zw@iMx%D#aQ8zQQ+9DZp@cdP}{5P z*g=|__{TQstka&hd{rmyXD!e9K-v~(2x(=2(D-U^w}EIc+9 zz7qs~KeXH)&mYWIUDfpYDY4n_XMxzm6i)LCT_X1>Asyk~mnZf&rc}?$H7%!24Xpo= zWYK=??;u&h?(u%8Ys)D5%Zo2jR@cvQ|8#%Vm>j|AHuGC-^s>0=>~il#?mN5sBL8WP z19#{D*ImuO$NRs&`A__R+27$jkWL(UR5JG076UEFVuMyRi#^-RzNH#4s-{Ni=RqIo zMWBlS{Ag9c@|iOhe>2eu^6Jm-0j!z4ZS1ONL@B1UUMRI_jSFjsBb0VB9ce473rONd z75jpDTPgUIbr3d(!`fjqVGXa>--N+P>kYJ)x|o-BBlP=1TzgUq$0t*o;-5toq|r(y+ZGY9caAe6aArBelf;x(z6ovvdWczsxRh>Yh zo(N5-brnw;%*^a&SCS~lx;bKzOk(#%a{IO z^1Mr#nZC$vC)$r?-;{^~Dv!DU!`-=!{jl4UdlC0P{5%XP<3A9Jn0fTv1$J}EkUiAY zpL`hLTR)fU>OgQo$$F=%bXJT526XOD@~0kZym2^lZHDo(oy#0D@}{Z?Sdju##zk*+uI|Y5W5P}=tJ6Z) zh#|0HTZ`c}5e^o`p`PD8GHh-0YXWi(@?$P@G_$fowy(Q=AE?00o{Z1klqZ*ewita% zf5gca6Nwaw`PK2^c<5F610laR|AQ3tpZHhQ*xj+ZBAx#9B>(xy%O6B%V)uTCar`zI zH$9*!o_ zmHU$**!<&F5u&x}+;0HIQT-XALbYS0Co7n2oG%V@-HFVVZ& z2*v-<^slxcbX~M)aWJQ-tVO|fvjpvX8-xAkN2?XL#P(hPMfUl>KLh{7f7726{*Uoa zY}0icez!|UTgtbK3|`Z8;2Rt`cF}G6f)-;xi{I2}3Q?tbskVA*XYrVD^Ruz$R)i&> zm03OGHxO$H68vzq-bd(c5;=czc8V4bpUla;I%C?K(lQiJtCCdYmels#G~$=_l2qDD zhw!hC8JZXB!D{r+^D~*!%h((*p(Fa#vaF+u4wb)sMJ?9sX()SH`fmOH=Aa-$skK3z_K>kKzIz(?l+wyZ7?<<3>}v*8^nT871N7`@v|q zxT-DOH`MM(K6U*gp;!N9vkJLzu=-Sl%9AT*eASPYQWosq?e`XFar&6}D(=hP&Q#>s zXc*;pR`8ns?SXz{cKf@zqZIz9vv<@|THB*C9l`9Quq+STez*61vZ_=!&V8Pj>gE09q;tEcgdfk9q!ygdBIUOdWc zh~bHs^?ruLUvA~nKNRX4Vb~P-3t)nP@G|whl$yGLQlVVpQ#!xj0J(=%%)t}$asKVc zm4$bK4*?ZGDohB9Y^cA%2SD&$6CX&>ocMrVZsEHGO1DGG!4&f}r82Knb9ALMlRKQY zJk@yFz$de!Up6}rIc0U+B@1%Hsj5qPR=iBt_mHkhKOb`lZ3I~~Hq{1Izwze%Fl^s{ zl^=HRAx4rO(3T7Yw4MEvr|-nlFVC-q2Q2Ir z*QH{Fv>WCpd#8F90>dWM9zp}G&F>2OSXm;}a!!o!u)-kIhCSstgK3g~^q=@|G&$-24LF#ZIO+dA+F}5F4M1z&YKJCvn%}%IOu9^Z2qIb!5f*00 z08#46eS*lKv>Cb%gAE9EGu1>&C%M;WLOIl-Vd5yCC9oV+{*D(pl>}x=ucJQ)k>P~T z6)e0!KoBb!#7&Q{e51{`t4SYBIXB9VTI1m=r*4AIH}8MEe7g2(1(8BJP#zEzmP%Og zQ4++h{(!I1kmYi0_$XG-VSfE<%}slngKRv{4zhMHV*2nDW=4e_9ErQbPftj!QwheWw)SOj4%sBTxHl7Y zStW#n3dTB`g_V3e=2bb>P4M@Gi+l`sp1G#~I%;5PUM5)=Qkf_a^(eFjh(ZVm)z_ zR>v>VD7*;?n`-)8)=0SU%#hqJIsS3ERARBHeH?RG2+o|Yj_ON5gF(!QSCLBHX){Y= z;DSem?R1`CJ&Uxf;wln+e)O)M4GGrjOPhw8_-4g9uL3b;myLYtm>@v-JMs)t96Z1* zCM7v=VIPwbSg49LV2R>~un}S`tWdFN@L^@*3 zx#C(Gv&~t@;BEEVS%P1F4yd{!vxT3e$Sq&D26bFpc4nie`!>ccaEQ1}K6>RFvN1!M zvoT+&2=Q&&VlA^L}=g78#4Nu1V z=3Z}4{ywK1-_X8ksjmHPN<$%Z2t3QCS+6=V`P0RZ~nr2l_B{3riA@9uD3(YIKCRAP9vOIMZQir0|7uHK|| zT)BqRD8$XosHt$n8N+9OP2H=^?qus*qgBjA-74{mN7KO;6<_=d}-H~_0SlEA?VgWM|^NIf?Q zFiZx}wIUlXqUSF@fOT^LtXWqh)C+CE^_SG*L zZH9u$425r&$E2-z{WK6aRG+z8jP<00fP+)nV|q~KRuO9|=567r16A;zNv!JGg+`O|wh%SGW*wXXOid%OC z3rcn2*ifG-WUkdvUE^w0q5TC^o&ptC+;ihr8H5w)$RK+zOR#XX=P0B+40n;Kz<$=a zYA|_d*Uop02-QhB-{zFE5_UYIYPmJ2_}Bawxl#zbxIgjTx1EOuzoT(QD47^El|${B zsQSy6C|G(jaudkTwIKb-d~Xv{H8+hRPI+P35M^qtkpqn^JS;(~0W?`PU&6SiDwf%v zp`Xm#O_TK+02m=pB$aBOsgx0>+B#-}TU|%Cm_&j}3YgQoAHbHd5L)m88Iq0bBjXsy zcMs2{^(SMk;t*@nKVp`#*EeV%RHKcsR6Q;p7lGOID|!JH$$+w9>4GCIivd90Ijb0V zXx)cuVC=H@tT)W|-fUrL?Yu%kSqk9r)el--j-{s=pfn7j7yaX6llUY5G4(v*!U7IDM{ORf^X)$nNjz(JU*q1xP=(xw`3eKW)ttca{lV59zZYUm zmNz||(r?u19Qv;MD6Xduh>u^?SyPATbl6Ot#LvNRk6JO$tyE1~aQJ|c-pti>Cc(>k=v3txsJA$AUw_XDk+E23 zoMa7<;yC8W<653p%rf@BD&ewL0Vx~_f{2zAS-8RhH~?8s0t#pnOThS|)aN{*a9-A@ zl2Z@D1ii=*R(?~dk%Bt5g>6Y%_`NgYU)8#kOz#t$-iZ_iaS@m)>YYO+DRrr1zbC}G z5}PW*`KyY!PohnE-v$b-(>$cKLr+=e+2!$^9Wgj1z=8*f ze`VuR=iZ1jvwnTP3qsv%C5u%={H@$lmmq25jJZ}L$Yme*fkiWcOm-#Z(7<9%BEriR z&G@K{q7QbBIqxZ@Dgkc*=oMmPHkrWA-4MUW^1v%Im20w`cqD-qKG$S;nCesE0mSg z23a=ztzdmF7JTS0r9f8PaQI?xctK}U_NLwBw0xqX7VgkkczhPpEp+u%$6`o-`T3^v z7NDMcBK0Z3>VIu~ohU=He~eukntb_DoH}}6O@?w&Ygp*2J2k!W-|hGR59jbUCg86lqp{{0N|QF);xz#rbv6M8l}q(3b#n>c>ght95-4Z#g&6WM*5xh zZSG^j>EmBd^f>ZgZ?Zsob6zzxlZNkhq!{u|-_38U$3zpv6`%oriOF0pj>4S*14+A3 zUz!MXTVXgwhh2`+=;Ps)@o=$ll*btSl$Z+sBh?}Fx+A$PWprN*G~_fmKoR}=mbtS1 zW_wlrB)#F!C-dlV&U*9KvWLw5l$;RfQ{VcZIeszCu=wzWDL7P6CR|RGY-TT)wBf92 z1`8u~r#N`YD~P3h=(iKFQF8_Nxpyf&vZl%og35K}EZUc=WVU)0ES;p)v*r5NfBF_D z^K#?6)j9ILmv1(t!%j|hcHt=&ToJaY1J|)Vjf0J6ndW~Vs0cp9*u5F5>r4s+{4S?B zC4CDK#a;8imh$CI5>K#TjZUET)y9FOb$-%jiq{#7saXjHorO~(I=8Y(j)n2HM?Rq8p24o|VEf2;5JcT(L~wv@#sgSpRL{MjFME|TzyK5u6eo~x zi|VEFmG}$m2g;ObuH#1$PbIs@tj%G?{RB48N8yt^gC7o0jj$qtu2dfE*T{9 zR#XY&wOfJP?^)Y`%s1emedIbEjEp6ek!UmXxeUZwpMGMdzBRqNkr0`u^3ck_ahcB+ zSYXYk22h2CQWR=ZiFqkJc*BKKAgMr31efmDV*5*8D07mX=}v-34D!Xb3!&P(R{4~T z*2rv2-Vu|CBbjqR|16@Wk&WI8(P@duds6s9tj~?Rq+*yZbL(YVSblq#Y+ir3UAbM^ zFGJ;P1Gk;h*W? z%YXELNe{2;y7s^F^+ZY*%YMB{NcnnahlVPokAw=uqMHInOKn(~1@DFtVTJk-oXMbK zq#&~2`+@MQ(~`*muW4dOzz7>bwhU4!>^VX9V)JnJjKK3>@8K1Glz^gSaLA4mW`LYz zV~sGFc@+{0?M$}$PzS(P;v2e|AlA&PH0(i^_R961B8zd^<$@sp=dODO+ni_livBo7 zZYZ-oFxDa}*~|pR4pW`P^Lhbj5|y z8{&KZVpQ(EU6)gaQBE#}b{(3!^XAc>hc~ca@D=% zBVRRDVz_YJEX!S(tYf8L6JPe#0l%wOC&WLrqA+*dUN4~^+hBT!KR_a;>a&8hWM5^- zXUxwwq^R_}*CB744V0Lh4dxspzu=3%20yv>rp14LVP$wIy zq9ixyunyy-s2xzQH9%}@@z4CJGkJmS)o&AlkKeo+pp)+;T4-W^tGTg{xGZ*(mQHWu zo_Gw4_uJV-KY2HipU65=nl49L3QjN(a27-!M)^Nh?2KuYAF;QI`gB*QQOmqq)$LHE zyd$xdTZmu2&DGS{FzYN{WIRZY@c}IRHXG~0+81ossfEFGg)8*;ZD$T7C~k}I7WqX6ZWaFg zNSG4*WV&>Q`1eCNUrl@)^P)N+kSKU^|z?PFe!Wsy{;K znRRX1OI31*fUtiv8}OhNloDRxacu}o+torDr!o>@gkHP@qYrk zHT_T+0=n{;1D_DI64nVs&>*^6SF?@b{rBTv3#FFPR-r(v{c9}T5zw%gezC95Dec!1 zAVV)d$tk3d3FE5?;kUQG8K?$cN8hmAvIN%aau5p4f(&$emuil?U6`j=R|89(LRDo4 z&Z&yttUgO-;$Shr-L`wX?c-@<5D6XBg2%)DAIjb{s>y!e7EMA3Ap{H|Kmv$KD25&& zARuUf(2I2GCP3&Sz1UuA2vvGV>C&YtMU*aGx}d0Z0YOEv>+9j}wZ<7|t-JO<_kMf+ z-{&*Ougy6DF~}iHHA74IC{R%T9Q%ot7Wc;Tn4lOU5Z z_zUCbsjp1)wNptI5O0%t=-TdES%G zQ6`faji=>h_XXQ;8eE3Ma3NNeMM!J z0f+fHXX||Y)$xlhWHsXYwsE@w03TFv*m3z&y0DXMhTFaNQ5|dQyMcxqPw4t0w&LZW zdeAqoM8a07Sh|JAe$BXxOJA#4@}uW2-O2T%@Dx;K}tP1E=IxVGc|can|;(zUrpL=d8inp3W}MgvT%Guq`W5dyrbajxPH zq*)&pE^b9XYM2Xgb7}uaLG#A(w$E8!YqfkyfPWRq}6e-fY2C@xN zsyWHX7hynT36woaZ9;3H#9@BTQ@T{#+UJ`zyL;08%Awr=6vnBcgsfl6^0;=`dDC2f z%kRc3m+9j(xAx4=dNG$`Rii@{rjf9)diP9Uf)Z-i;y7Q?VpL#X$Kad9%0)x75-)-8 zHP1ilcpcsHnbg=a5)a=X-p|TKwyD!cy#x$nO`T+rmVoNa1{n`&O+7)5fUCLEvuY}` zN4k!Kg++*ac|KsflHGcZn#GGe5gQkHJ6-d}>mFU$Y=I?eK6u@c zHRn|6n^IsvxrSB;`O0gq%slD*%{sQ7>w}rtr#DqYZ9Qqg*^)|~cJhMKz zqPsbZE?>`Py?e8A=#zEeCw%t_hrXCDnR0|%Y9W{w_yglwQ1Iz6j!uWG9?PXVa_vrt zo3}gO9XlZ1$ZX|kR ziF#RNsLPl(L$426YE&y$3GI%EnsMCRSJDmKk_zFJZeUuHzFZUZxS%0$zvr;~La>c^ zQF?d%?>QRjb6rbrfbE8rn;LI1@6<(+j|+M!8bPm#gHbu^xr8pu(*V43!~|)TC7~jD&xID<2WCDj_O4{0Nr}|-_x?2t z4zqn_#G<8Rr1%zzrVw4rDeq%Ib)dYV(k(==$071vI~A9d61dtbRQZ?R-PGSeumzEM zOrg|{R9E7`1t9!pq$BdvN-jr79J}hE7~+cw&V5c5hOG2wfY!3GB3b=NZO5CitZ(V^ zu|l0Z$dsLX3XPi4oQK0_?5uRDVH^@Na+=Y$*{qT0DAXMdYeV3#EDM=jyaeABmGXGp zsv_^23M!wkmC)X~Crd|GU{gYe@=%CyqT=EYsi#MSC zFYwsiugU+ST7fBg~~TVIz_ z4}HZ1k*&!cIKy=z*&mlmwe=|CTz{lN194-0Xzh!Ao7x|gS_pA}yWdFSR`_+^m_5^om~F7@G84Xjl&$s-HBot#GF9H%)_=Op z2X@;O?k()1@J*tn{fCVheZkLotCPem=gU-ko=p5wH0u!KVJnUu^;%($#Xe4n=+8al z?)ih9TgJ7~$*S)g@PE0L_$U6C_t$0Kvc}y!_CM*Ne&+>cFdo$Vs6d8Rytdw$3I!62 zbqpF^ON9WC(X}jcaGtKoEcSB-$!_|hJY{*)4=%^owElRA z2(gf1Tz}@v&(6u)kChLRHPEj;-(I$HxArT;o_0O=nv4Bgc}Jx$X)kG8rc=}^FAg1X zzcq^8@DcN#F6v(15evCOWdQV z%QeqyA2&@|+)z)_%x-tT-dFtf#*b*1FFDUYR{B|{+q0&wDYQ3jE27!GMo)$&@Oz{y zm9#CNb*vHdPY)O_x?C7)u^e_nGi%g#rec4lEv%5kJ~BsfaA2XQLg{;=kqK_WBB@oL zzYIC;7YDOa)fV90sWtDfM~ zXmFoq=N6$ zP)>AjlQL{{!*Biibj`K4-MJR`jQlY4!%jv`-TdabT})(KK4s%8xJPF6SF&B;N|IlN zJN-pfrp}upW~hdVlFiB6(Jr+&zQ(&3sg!@^8g#Ok^oFlDDTy}n3M4)evb!O@5xsZz zNog=#`mK=sblKWm z&H{#&!Y6owOpCpfh2rVO$CzYR~NBRIaAW=R7Na$5D*KT-XE z-MHG3xO@T^1hO}kyw%15?=qdvnn{wbcKHl8G*_y2oSzh||Eaqx#6QN8+j*5yJRJDy za*7V6Ig0jQV%=_rKw< z-EP%qryDvXa}(06%=$BmCT+jXWf0~+`ES2w@=jKPhuNlTu#$TqQ=)>JMQjssxjQxw zm)@e0Uu0fXh5o35ox2}o$)ktN@%Jh~xL15Yq}Mh2aI&^W(=gQ10Bk99*j*5oO9Upn zYTv9-Q*bOTiB!#0bD03g6O7#kkleWp_(`sNOF&zYuP#{YK5O4V+#9_?hSRJ0z0=A8%-`tEUMc9fxTLOjO^hd*&CV&p{}h_ zm=M7hbV`(RF19kTF>gL9Am^nkR_S%TWwHE*EC<4twJa~u{(dPUP(3569>L3AlQ)?8 z?u4~U43DV?sv4*zc55P9IN+EcsykVc*(I^5=(slr0 zF({m>jP{J&IrA8)V6>-_D}YyCv6V+z7up*!hrvoo47)TzI@aB|^GFtI#vn)`9Hj4( z2}pqb22Vj(M@qoU(Iw~-Ao(<7f(;Ea{uw#gd}cC{L;eCTAV9A&=4N@?*-JJ$(m~I! zPT~tLiQ;{6^CKMN;6cmu?%sDT1H?D-sZ#?K)icPt^h*lTkP$ z&P&B1cyEi5!c0LbeZuRlrs%5BsBB7sEhkt0!c931-sF~1k(DF$wE3>vt<5qO1vZ$whvPA?zecC5 z2NxQp`1xTNTPA%WUoTwy!~$NWSb~)m94O_ zi^KxBxRTvWSsYo|3NkiIhd}6>ZQh;5Qf`;osJeV&FBaDi2M`C#jUd9|)yxe;X?QPw z50&Mebd;5lEYQDJZ>2B2jv*r7sEoR04z24vSy0f2OBd9-x7m+ ztRrVXvpMAm6pg_KEH#?B&0C|hzIMRrS6cGKvG1gK{MPHXQgIwYT9@iRc;+kJpGlKf zj$Fzq)2Oh|6aq0CKX1+zu!vc~^Ix0M$L+>S{fJVUS3P-d80KW<2Z{K+h7grL&hwW` z8R(`S;Jdm5r6k5`$+zMnkd4HE(ZqH!FFWU6ID$= zZc>8(B)7LC0xxbj*p;&+iCmMV4E-7fius*T+cW>tMmU30#c(hOp1-h-5|H@IQt2e> zKCTJdD!n#nwF}ynsQuic181?PU>F| z(HeN8rzQQ#{zB)SHshL6-^6F#NnAXNq%(#~qa%pzEPw^v#fR88y*kRH6A4e_QaBbY zm++9HCtUatY)mb&aTv(B?SgzCaTsLDEt5jHC7-C$S9&zWb(0{pk*IJdwTMTu=ye?Y z97@4{+AD}i9lp(-9iuswOvaa*4Z2+f4r-zjrfw_5Bqu-Wb&aHO0#U`Ts9j(kmSili#nNbODlttCt%K3 zc0LttrY*VW>L_guR(V)*0_#uXIDk=?hxrd{x~!#tQ~KX0Jtqs#{C4SJ5AHUrbXbn? zcFJY{rfV&Q=wMl6$S6O-IqAGGvrZKdI~idit?$VwC~;+l8rs$l;EhSn7Tf@I(}?%I zli{?vaOXS)j!O!X`t9eQ&F?Lkv6Ezp`Tjp;E>vR*HPeCz@XmpY?o(7jSxW~`Nos%9 zqAXG4>;-=pIk2wZxOw7)qQ{V}gIYu#&-^nxt%;->N3r6L%eX=Jgb119hV1{@8}}dh zuRFf~H(ARc`rcCne7FGsNMxc;lhA!{=Ct}G!Fh&EKF^V3^=hQq0DvV%9Hqbq2Jixa zI{NW2PPobhWhj2OEg^UY3W6svj_p;Uk%00(b~0Fo*c6H25qItdggC16-H9D8tMHbk z2oC}vo~#ANVI^9Wy{zmM8h&RuGg=$Pe#E)!E8Ks`np$iaXD;jhd(;GV;jXFY&mY}D z1Ed;>#P=}Gr*zm&!X@S~E7-@hYf`rYA|={WXsgNbVXZmDAM2X)x*<6Erh`?S+)uLo z`?L4>!%nrUh1qSaww2D$1z8?&r3K+F2qHHx;%o9@oi@BGs7L()xqq{J%k7Khjx7hB z3B2f#u2z`Rnu$z2{zG*tuRLTwi_27(xtye&@r53$eWHUHbk6Uc*x8RAP7ee&ffCt! zlgS*8ddb}9bls4tPu4xROHB+wpy;KQ3fuL)5*2p%ttepcX`xul^XFOUZJS4Cjv$B( z!e_vp=6FTt3A=?WQ1_RAkehgmpPNCSy9l0}NZUYx71vtvZX_Cyq^g?zhRb<{P9C5< zv7H*UTgSkBz%!xqmSm)sFNLAN7hOW`?Q1gY00`Af4*(~uN+u-X8n)xnriYvjT#=KJ z7YS;t;f?_%sA|r)^bkk(DuBMqlZl6#8YdLE8Uk4uM3hz0b>8PKa?m&fQN#&yV$wlXO*i9aqBN!fpI)YJ=K5b|i4C zw6x-o5|Jy7xbaC_1QCbvCm#X};`5_a>#*ow;Y|HSfF8RxFgXny5Y3Gq7|P zEQ;A(GfNhd&p5rF7!#?HXg-KkJwEoYSJyoU0o=JVXIhpYQ!wkPY9^pyLB97Zy+OUU z@lS8fW^YdB9sNf`nqvLWFHUiAI>=b{+!NXhMg1U`gAZR-n56%I1J(@b|ZqgYhHd>{h*Z{N!*~i^TTkuCJWUg6i7{u|{3uF*u4E^Eefka^fsA z-@tB!Trg&lvK)Iyw({Ye_NI20P3isc^G0O`o~?IG41%JqgJiBht5dsXWXWEq(KrsT%!QpiAtM8A^=VN1TJjW{-~T zbI8J!ok2o*S@&7NT)f}^GD{;s7XzcIGa9V@k<}mPwAwn|lS;j`4I3MF-^~RX{BGFJWiCTm1@Z=jHhexM1Yujw$?}h94ZDJ zEf3Id6O(h6+KPYSWt{4zEy#lM_mn`1J3z1x`=_nLoLc%zl-pXdHsH@K&noMclT*=A zLW{PAheEF(6yR$oh6WO!lr0AM_%(2buI{MSo2L(D$sF%PuB$)3t;hb}$BHxJJM(8h-Z}o3N{&NBJ&-y1}d(Y&!{yFwPVSjj@2#}bT z`E-r>*_^ZkkWS{{ajBj$7U)HffT{;h@S{PY?)~Ul_ZjX?CYvWU158kq)nm!iqJKe( zgAh=GXIkrsjD~&kSPU1)IU8=N@P}_uVlbXNI0uT(-Z|+!u3GIFcPN( z%;~D0S~?jno?btFH!-0>eA!ttCNxX{-zlWoR?cpw!+L=ZMGI6|^C3%pK!tl{i{8?1 z?DfL3*Wpbv^eDB#B_GLi49_LqJjK`Qrrw*mEp%zeYWd9GxKB#6H&eBFWM8n^Hc8$ZNds<@)U) zx#Y6!NS)NUYI&D?YPpb~MB9&uo-mA6MG(`}S=&AFVYga#lHtx9qv(vCYi#}hVk`Q2Xkh&RK%gTC$zU14_!72Cv}Z`<6^ zjwaeA6rhhoeJM}ToJDQ2A7sP5C=hXJFc5&gPcXm50$>;m31KIiur9bwyk{()#UctD zFdci2_`z*ee$xW99;L!98x1d)f!fg*1b$*ettD}FmE=fZRNi&TGt#>I&b)Gg=0hlU zrejWjZndFaCBISFf=SY}+9J$^S~I;+-yOgvqn#hT-}QSz(%$&wxtCkQ7btCF$U4#R zKtwSMmb;)qRG$To!v9E)?KD2AD)908qmduC$9g0eW*u>(Y&I@aCc!FIYp_IJ>WHLa zf-BzpeMYq*qkDetZG>*|p!oTbrOLgfab25dInZtx#le`G5zBnckAqosumxl=xIp<{ z43K7)D46-|x&!MFMT8OEtTu7W)90eXsj7A9f;!Wg%jd1rFee6pT6MmLxz~EE%hJbE zT16&Fby^3um&{tXg9WdiTl_2de_VO}%LwS7_+Q)GGg;*E;IUYdiCm@0z|YzQ#5$IH?NvYn$;4U&5{1{mGyYfIS7pU zJfzm5*Y$R@o*EPp7E^&*%uScEH?;^L3>dMZhcP(< zX!d9}w3GeBBCg$tmC$cTj)P>kwa@=6COzz{?h z#Br{EH+Y}IFYyQaJ_dgBsEJhul7Z#JdX1X0;AOL|QVguz2`M$Y8F*1}R8RjP2k5phHmwVsqAYc?7Zg9Z5RB>3laCFF^=wDdAk8?rrI&(^xV% zH4BMmrdH~|>+G9rl&U9+=5;R%AC((s+rAZRxvt1ymY75&w8w?MC+Y|~HRTNi)1Nws z^%EAvE?*`7KI0(&bsZYC;_-LNc*=cSN#nty#HXJETQxM*M(!A143$slPrP8n9Rm`r!Q8PWFS;NR0ze`F(0{@F|M~a_{!i03Kbp`~-#Ka& zMon(1=+bHZ0z`Da3D2s zy_%yU<%6CA&{&h_nW#*k$6Eq1+Of46AqLU!i$o*uG^&-GvE$4}d|z6<<1nPZ%l49% zMB#k*WSs`T7=GbOksgciF*hDxcD1?kdP_m(lX8IddXV_aP318wfr!8J-PotfYHr@1 z3KSaeA^o~%4Y!}!K2*vX(4$`l%4KF}%eoJj?&SR#Fx9G%b4+Pz9j+D4HP7w5V50p)8&;AuV?Toag$X_dCi zJv+4nGIzfZT`74s%PZ@)Z`aXnyfLLOQ!Xy1lvtSG=N4K+q1#H3U^=5uDS84b={P}U zzr5mG*n@21bKe)#Pgvq)pMgBP@$6A+$>WJUM@8~rU+kv|31VdHS-3JHQ}hOlG8=2c zUtF4ZuJo0-6d8R}`!Mc0sG^HR+}buy_NrLj;1N)j4~QXJss?0=v2vH@9a}l&VeBNb zN(-=ub!_>2quHhG1t<8}<>9#MB?8K`DZvUDc{$K2I~>?-F&SfmOTE@h$qmL6*}Yar z`v$>Wa87t6Ocm%aB}pbzu~wdx^ib|Y-CWBcueeb>tG)dmTLLJ5^r+0#Vvb9KOXnkf zhuse>p5?g`uZv-2Pd(y_T^LEcGD5V)2)@AoF(>yi<1D_+>(7o$XlWu~-Z{67O}=}0 zod8PDCu)A}e2xS9G^2tLkURodyTdAMo_gk`1jTLnFNcogw$v24Ht!@^-R+|B(o{93 z`hj>EOiG|wZPfmVAl<_`k4bH(NSV>EwqNWXt5bP3*WUi{*|HF9H27Fc)D7!RoMh?e8K>x;#WY0Q%4sDNKJUL0QwK_dDa4Oi8rsIhuvTV_j z1o;6V;X_o=NpUp;MS)Waie=GMTln3I^g<(q@k3WdBesbz&?dToNf2AXF+6orVQ_e?lE_ay-=eTrA7b zN-lcM&4(+N`diA_#c@%F=@m%RE295q(h>;w$nL_;KNv`t@oJ7W4XYw>ky!uLkVZ>} zW*D^pI9U71+FsWAZJ9khC&$)O5tR~a?vBZYYps$`aMK#M}gs@lvP=a{;WZg zK}(OVbxY#zL68&~U|I#&y7ee!%miXikila5vl{RUoB~YT^UGM%LFWo@gXf&#tO2}$7jf6rr z1Lgb3`M7x#)l~}~(U4HVCih@!4$)r|kQTMq(S<-G0b=OG%upyQ7#*Rtn+8NnM#^5} z)bC%%9!6rR!x9<&xy$hc7ivunLxFVTT%@mbGu?9Mp-wJ{Tf;s-Bx>0j+(0HcmAz8r zWoB?Iq=5kPH~C5&bu!XN#%E=@ zy94+W*tJ~KEDYB}w?NcbD7~^d0(GqQxq8D$AD)h)mTcL#@f>7J)WBcMpyGyM!k-+2 zQw4p2oTWB4O`Udo1-UaRHEe5Q6f(^R_6}DNUcn+8Q&27w&a--{{@=gz!ILC-@^sie`yqS5XZcDbo${;LSZ$H$V?zS}3u(NJA~ zS)3CXX7CYAi%PcD$-)s_`f~hX*A1hPQP6enVGIC;$t&_20RyWE+QYtJpghz{!;Y^* zRIj1^Lfa%8AOvOzHx`2Zb+;2ybJQY;3?-B}@_V&3_WIYPHjEEKk(=7pqInpiZ1=>; z1v_t@+NYhnqXo;37FCc*N7Na=TN%?A-$^coiybV44j|g28Rp+pRXKsRb)-4YoX3-X z1Iuo<;UG}xOkdYomp&27ZG3Tk;vPv2on@Ab+NI z+b{1FwG4?9<7Lx==k9pC8>m>^kla(1&1ioTkm)w$dB^CAI$r<7eSNiCk?`u{g7>J4 zP7CPI9G2n90|>jwd!>T5vF!O1lA?xeWCXQOTLcaQGqz8kM@Ndf991?u^oslUnoak@>21>{CX%c!-! zJEQn3ox>^TJf}pyy6+?BAN*C>j20h%4!@PcHa{WTX@`+5iIp;oahkl%U5v-{uhKC~ zN$ATLrx|OJC4b!9g{{*Ikra+)4;USCpIyGUiVn`eoVF{`1E1X|G60YFZ=_f zL-vmGe;WMe?CryPK@C>E=E~fHvwdTFKp|s$^y(D+*F>L^9I!0UQ%k^-^*|9~9OJsK z^1PH!G+?+u=Sh#dqi-dCd>{CM>6*78!MjZ_3%;ML*_Qye3+Aqrv;tk07eD)(IIUf9 z9ZnQusV>|_-5Ul#ARh-t;GWd}L@60nd3UVA(K;*^QLbQXIzr_g=`ra%?vX3@r zhxRV)N#V>9(2k*tepOG7b(-@pf_ZxqOaSE=xYW965-x$WMeM@7kb z1{Jzq?}eN}&9}@YF9VI=v03tk;X=8chQ0J*ndj599$oT%D+ba$`k*%yhBDGg{aeN7 zl|p~EuIzf_-<6*(WlL0@csmC?gT@@(HKVUQ%%5{g(+<$RyQJ|dwne;i*mTN+UUzOh z1?zXXB!C+56#cjzJX@L_p}T9@LJ)@pw z1Fz@Yj{uP&a&Oc82ZcxoW7eQxt9b~5i!0VWxkbDHgi6`R}h zaxw4jv`p)x7SD*fnr@gG>Smxb-~MxAD6%4|RA#hkf99q8@l90Sx&h@qVN*SQ;)dO- zcyPL*+uZYMDC~QWsRySK0>TvO4Vj_NzRjA}a*;#U%D@}K{OT3U?u~H?t zr|D5%|5?cU@5KK9@%<0}-#6J`=X@*0`m{%|@8P-~pycHG4hXv@oIDTjk4;#KWZgcc z>{`aKUlI?c#t{JH$+ShS}%=>FV9nqa*an3XD!hPe?X8W=UjU0VABX0 z8wVg!5hQ+s!A5WMy-7=BLu1hRIVwXdu%ADeSsG=Ye^HISVx0q!UE|9N=&tWU48{WS z@yab5%^~VvrrX+5<4f{ZN6Nw4O#j##zP6_oO&`M)JPux(KxL(n;jvoh5_Puq=_-LM z;jvMbEeWGn7wOjGj%P3}%240~gO^HT;&Xh3dGA&Bo=F9r$}8|}+p6z-vB0Uzgm%xR zm=Ioyx2-Jfy-QZCl#g#%@bP`;093qq)p;U6D0ydaRq3pU*}_@Z@Zym<=P0`un5#lL zds|&nQ8&+XcSz(=-}L7s8D`9ywJW!LQ%RMqy;t=zZRlmS#EX=ct4WXYeJ4A|?b6St zFfuL)_wD39{pKXfMqLTX%deF)9dcd{y#OMoxQv!9E(>5^}x8sQ!^)y#?R!|pg!Ku50lCdQHkPVxo%$|go=dh@e|dh) zic1``k*Y-4oZ*(tmRhN-mw4rx`4pK$Ku)^7D<69$t_G@v4RKUirJT%uC{cs?OI(EM zzSH8rI>>!e&LQk12|t#uQ>^P3zZ<(@tQHXRA6-!Xfq%(_ogI^{!zoNp;n1WD78n{u z%{*bp8X~Z=TV8z}T~T)mTN=a1DIdqsPmJu}hAU9ogupJ;DlDI0xj0qVwS4t@KMzp{ z3-9iaEFWR=+_nru1onChDk3kNPZzYQWOD@1JK=>9rII$0kb;@~h#L-H{OXaS*G((x z9TT)wVt4xUo7~bvka;~YkhH*0CxgwqSLV-1*{$O>Pw<(aVdoG&CyWA$Ci&mfiwi@f z5$wA(M%jIdDVhxL)=bgrNBs2!wAKp#$@c;w3g8U;($XHlxE(~|JbWw8_N^cuAd%c} zX2VWk2$ThYB11BLoR@7hFDTxt zy|uxu-{92itCrb%*2A>SoNZnH1~U6LCq`(vHv6<-ZUnU|n~=yioU<+VMUORZEbEga z#YxZfl;n_i2+XGL#Kv0sy%Px!YsL9`aT%rgG;yJGc}>^7+iJQ-+&xRsb~i7hRf}SV z5fX{HAYR>bu)L*402;&Y;@+GHn8agAJKy?G!+$6NtZ+|m9;luxp~fAUR)#^8 z1hOdbvQ3Im`|rxBW;nV|K$D=(@Vin3DgzohY>Y;2NCDo7fA@<_FsU-MJA$xC8hs?j7w|~hP6ed z{+0^zj;efVkt2N@mcGmt5HzEC@ zGc``HDh;OjR5geVQbw&azrPIroC0K;9dech#*hyja(G+p37=rNE1q^ z?5rn{#$=};E^Z6LB8{d0QdTh$i?|+KD<~V>c7q9AO3nnx?UkC5ysbEl$(nOQG{gM@gGLcl)c;w!6AnNpa-)m$gkfjLBCFwN$PbC3(p;=?Y!? zK!ChD8fe;$nF8Utp2&O0yCR`Dxl@Eq*6$B2&4i;d%k|0pH z&y$nf>|K$WZu(#qDbgHBr8e233CHbTH!%Ua5RifAW*rbl%uFZC0Al@*19^th0F`4~QM#ZW?*y9_3?@ynxAT+QCQ!Ib}=~+VFAEpmtX-4x%4eO55pA zh>Edj_16N@`B$G}6reL%s+{0{y`zVfEs8^@OBcCV?NWRbz$t`YB~Z`^+PUlsY?5<) z@1P|sNE>B~DADDoNP@*;O7e9+-C|8{8Z#TzDR3Ix;FGjZJV!h0JUC-8KLbRytXoC7 zChKG!w2JS8rdCf!*>hY-{AE*>F;PE|E*aX?zQ9q^g1?eP*R0xNQMl`LOX04!{<7uF z<7>CQj_SPvdB2P!%ul>hGX6X#UYj#Koh@M{oAE0k{ax`jZBF?yhsHX7`^zW8S`$Z3 zIPU$wR=)pI8U2I*C4L{z|KG6bA~lNGC+RAzkJAV$s%}s9dRI%6^vI%+KZ@%L-6rvo z(}HH0nZ#5s3Aq#kxq?;jybnGP!0rQ7NMzb=Y$R_`tJ9KsxqDj<1|Rgr`Fe5)mn(9M zLP^WIq;M^JN=;`VZ_0cG%B`H_61W7kAd3sscMDz*&P9uS^S!x5BzdM(+7F zBKB%6W}C6=QbXR?vnq2G&&bc|DV_Ti4fyq7YU9`E^TtXJubrzf3ZtogE`}~A--L8D zZ3CL57jkPq=d_&!vfI_^=bP%rpGYTkT7X?HvabQk_q^I{9Hau49vY<8xEf_PKS*eN zV6XcZsH=7=_D4q6y{J217yd9@Ne2nf0%&gnGIZQhAdtrG9&Y=_c|#w9AJFU8O0BAi z#GRx$t<~>^^1JU=3NkM|zT$QU|5xoB9YKb>*o}C?r6+&0Cuzb$xGoGJf(yH{MzBTA zj{x-y2qz14+fPKG-QQD0e!@bxP&PljmiJWNAk@7!MFcW<`3oFlWG5iX)UhR5Bls>2A)N_j z&h(6QLn;txM)2~>SB>SEO4BYA$#d0NL8=HPn76R%X8JKCw2agg@PCHQI`5?+TGzMw z9^H}d6BcmjEs#`UK0T?{jB}N0iZjg&Z?Q?S>H2BR&`{hXU6YiN67Wwd+H7QK2No^Z9F0zkPA6o*|awXj3NHBejk@@zH|cb^L13o zvV4Z<@(Vm5@@sB7KOLBU=I~IqFyTM94gbJDA%A<{g!XxIx2K4`hvv!hXVXQ{*F6^V ztRfL`l?z&$R%!0O1Zyeu_b*$6YH@K|#|?_=*u7Fo$6^zOhxpj{`Lvqf3ENGJO z%#l1xP!pM!A=SI3+MDrrmeg^GP{a%FTz*&@TWJ}R-S!}$i>MUd_e$BX>0NE1rDh_S zqy{$1ENd{RM>xZ|fZlhpu`jiZ ztlZ#NFA!=TG|i`phgW!~55be4$qEm82l#q~1~hkH`Aghc_L7Fj*b8B66XrF^jEl;l zus%mX64?a`X8HIbqwBo1+{qEkt;EMg9pza-wZ6 zuovy`)tUAKZMMWJQi$HGLKeK#eV@-$0m7e);~YzcgFa9Y ziu~hYmuIhA;ViHb?n&Eblvy{43MXrb&2TuUT5cKGNp%R-TM0o_eY~q_Cd1;UUZ;vEV13HG?8t~PeW=lN=B?$I?``7?rirJ2pnMJ=dQM*6GFrYrYW>bs441Vl)iwW6hL%(3VaV1ZG3EV zP}W;sRpXU|wdxbox8W#qpOpd;N>P7A&`r*TuA?FD(c*K!CM`Zx^E_;XNxsRtqFIlYHs}r4AhM!Rh>lI-_dhIg_ zdUERX2j1XoCGiIjlKPfZf-@%r!GDQ>fH%MSiw+fWo~&MS9@XV~m^^csOjI9kRMp}{ zx?SS{i7NNc+dl0u`gF!jdPM;`zRWFYHLXS^<<6iep={TcKmL3*~XBe^rp1q(tF5}Vq# z#%w+ir|nM76_*YWRv+iPI5xM*A;Ha}t6H(R7>#(FSa%L{74E;Z=HG+g%i76LZ;EIk z(?_Hs!c9ruY7TVqY}Q%R-hS!WTcT)!T+IId2{FAn?t3E9qLp1A47O|IflFsvix1nJFlwGD zZgylR>XW&emv*y_{wJeSt_j4zr__N@LHDm62kTI-Jf2F{>Fl!@^?4llQuNGC$;ooA zlMl<3T+OQoY#H>$|JdmMEA0RG>%aIPAfo#Gx3T)QMr94hlpeR$$OJD{Q9sgyNOO!? zw7sHhi;l6RC|pmctT|(C&qr2kPixEmmY<@Cady?5rGx_yt^tQ9HO|TZ4&NWW6PA^y z8Q5ZzT+6bPSvKZWR&6bBUpbgg+EH4-$E73de|Ees`$j?qLSIg z^(o66h^VA$@zj>xtQCnnye3Po49yiEi9_noUnHNNaQStmcxy9+J<||zX?%A~NGmi1 zS&kuS>3u(MGsgWj(ZCV{xYy1i!2wcTRf|=5_<#{rp+EH9UvA6wBQ#lbu*oI0jH5E+ zrgk!qes=-K)J1Xqs%m5X`DJC*4>bxo(+dhH%f=V9&O7NtIlBFhV%16hK6V&}r?<~3 zPZjlv=;L%qGjLXVNeO0QOz0YS?}Pvk``2MWjHy56wTlYb6lN(U4IgRpr-0WaH7ZkQ z%gKQKEImal5u2u_s&;HPC;=oyB15n-B{Pu&7`6f!00nBKkP+cf+K&b@JaB(dccfsPCauTW(Ln?*h0Ve^$jQR zk#?X!O!O`Xpc-r`N>x3agX`$Fdq8lz-|AiJq7tc>#W)7Fn(?P+4rpL<^r=(C*mXJn zZ#eEvW07*GUh~3sN zma<7L1z|Usj~l!7lUq9mHA1_rbYCh(rEB{0JUt!e8vNz|m(c5fBmQ4M{!#x(7w@fe zZhd8~JNCcrar;0l%RHOGYJ_<=nqztUYUyzJDQCV{_-jB;uNFiAe z;7ee$uQk}2<*HbQds^>Mc}8kuZ9O=ALAL|6Q4&Kcwr<{zIcY+K8_qLo*_!QnVPY8$4;<+2OsT!YAkXd z7Fd}eP+IK`Y9i?nM}?a+l2Ek_Zq$WMwh?R0%O?np-RK7`WaJrHQxZzGRM>>L`oW=T^7sDa6qo2ar z*B`8Zt7!R@R%r0uQ5N$uKQL)rBSq_7JL;P>ly;q8B4;_ZS)Rxn8##O?l^%R?xbJph zFk%3p*(ysuZ>gw%?TmS=CQ$LZC5jV7Im0lC7;yKW1(86UW71#-sVtyL3G(r!qCDnKi50Qf2(H_Y~8{|fBtuE9QvQFTTHLy~x{gR3Ybc-$AL~wA+jxD%q#+;UEPdiW^G0_`5UU4+c zU0O}E({2{YyZJIcP~KY`(YF%yS)w54Q4`P9>hlo0eVHj&UjYNz&}W4co%Wm1p*7RF zOZiEf@|{fW;Aq^=Ij+3R!hCzL-7h~@k)nub>3n!332%)#oGzlF7B=o9uLhl`v(3?m{d@UiN6M72>N^c?^ z1VjylPEfj__(~T96~!*<>yr?fSH6Sjb}Z2P&JF~t~_CYQoq0eSut>%A!# zqSD|%6yF~7+WFIxRE$R{Oi=wpvR*rng*|<>9iu12f#7OOD_mu5eZmIxoxDaloGWcI z=Xo~pCGbfB*fZxr`_Nil^;=Bu_mZ|=RCz|$D3YO<7l>cA1W_0D5S-FO7m6-P@*IH} z)eLRwhUREkGxkj%`sSLODM(s3JL+x8ZYB$mDmlQWWB7j7wtZwK@h;4Oq3{@7>p zxqE-YW-;@uy;enXsPA#-!LLBfHDNl7(bx`5+k`$(sr<>Uo`{=`@bKFqHX{Khch9(TD!G=wL(wQ@Y$jgHT&_=P1;*Sa)Ovn3 z%29R5QB?&?z?n0p>z>|KcC|c`Hn=2Jx?;`L-uMgbkhOBV-mwy4tX0*BJW?4+J$TKh zFHLI`Uujkzh2$2Icx5BXDAi-S`za|(NnZ*TCekspQ4^eO(fR!Vx)s*8y%~wn2Gq+P zb?{K^4vlV0tn1k)b2oJW2~X!LV8S0OEm)$MyDx5I!6FyHRb1s1C0^wKNiJrIILQGU zak0hJmgmB)&v;hP7PFC&!oHbGOW_triS_oTu+q1Fel*w6DbM6fQ<6UgI6GSnmRuC~ zRc@iO#cL%_>0zV=1J(BSQ>`{WD5GDtoM3ouv(uBz;f%0;nx*NZqPuOXl6PQnw@taT zG@cG<>Tv%2wv=DSCG5ceUZUP{#_W%ggngjmUyYur*ay9N@wf3W!f}tj-RrHN5?YP- z`26Pm7Ku09A(v-4RXn%g+`Zt)0qf|G%lP||QJRfMpKG2s^~a393J7~kG?J3h`uFSK zfA#-z-}koJ4y3GN9(u71Zg>MYO07Q7QDw)PIoV~=r7OIG9c8>i7LtkIcf;X_=-fx< zvSA6VK=Cl&i$eChrQZv^q1+J$%m67_wlu-h$ermlJgV&piA7f9qv_whS1;x-TLlW3mV1ugV5JzCWlKC0~tdK~)952@_M zffIz>pT+XL?KHeBhP=7l0oVJ- z173|ee=xr{d^UbARL6dM?6Tx4OylE&^Ve^*e7OZ0P#~H6zh9`|upZxPsW+4?Qu%K2 zZd*m_+@Q8>7%3@hm{^%0{!y^yFMJS$gK%)SyA>Rg-GgAb{}= z7{4GMvIXYK|H-voMg&+;nDtM>)`i4;h%^W=Y)S|KFaYoIlhX0lV=9)5rMOQn7xv9B zs9>$0Z5UE+6+g0|H?URwZObBCoMu^+JbUGmnxuK&L9mCD(<8$epUr@l#eh5muc%#R z6OofabRz7v`2LsL+aMLgYThP;mlY70P)-K*!swI2y9qZTW0nz*99a-Bnq^N)^sK?4 zlW{0!9Vs|n-6z&kN^{vEm7E@Mrr9X|=(mm;36+CB&Z}$&FF$qcXLv!nU6TI7zmyDq zo@CYfO;(XFehZ_+v^jP_m5=Sy2-9sFYF}{7d^l5al-8XVxn8s()R0o6@-(w^PuCHx zNxdpmB<1xD+^lT}QFDFlH@lc2QP|9!IrZue!ONuhg$lEf3o>UabiCo}{{`s(t>*vt z$A8xU=C|8>r~K#4bKP?A;3fhfS%45z7c*pMQZGEVbz`wS&t2ns;rzTDX36#!=B*dT z%@tHRXN8)PV9RCV;Ijn?ULDoIjDwzBVz0B{Wwt8|gY{I^R|9w`rl~#_ z9_#v&cj&lWs zEfKRfw`PoqSNj@mW}=daWtp5F7NRD0a)V9iIB&W!xe)$@pKZf){ckHom>%>}vz}`f z<sKlD__EWKOt_(B#Egr86Gp)`UHZ9@$8)NE0pSfZcaCb zS2A|DhlFJyj83lm z#3u2#o9$gLcGgbj15q9~5zJNzvaPat_r#|Od#ZiY%<;~dQ!^sPqpCW0(3A1X-#!jM zY<{~?h}(G=!_Y0fgb6gGrjj=6yBp~ZB8ZV9bgPsW85o$3T=hn$uhdA#SlHWn7Nc~8BjZfWq6MnCpa8-T zBlrBoj7!gj?xncXewR(0d$d~iz&|S(jBuVr$ubMs7o8lm)h^`dbhxs`q66J}2RknM zZ|KSL*x8N&4j)GK#^C@rlVYMX2grPkAl8vZ_PgvRz*e0nNd?2!Bls#J+=u^YOSH0G zlQM}v5wHJN#u>-G(s<5BoFO64@{U#!eiEq}VBIJ}FOVPl$muC;bI>I-=O+?z+afTS z#j$*N_JNdm-+sqf+})|u(ti>^tTriUccPf4Ig!w`AWld-UlK0)eXqA9+t1u4OMDZU zzw^$n#eSk_84t&CPb#zZs~A zNr;?t)qt+tM@@@r@d?E&nRW+Wtw_4=ImBq~JoUhVh1;%_Y83Sct9rFOSNXB^WVuA~ zEF};|H*%L?4A1mNRL+hhyG`3%m!&ZYn~2W4w_Z>u5cotWvW44`9T(3B~Jp_G)0540H8eM+Kp80%>ZV>tzJ zBbvZepb?ECDQd{*Wo&4hs|T=UtEBHTF~c1J_@sy^ai!#uo6FJ~rkoR83w{|N;1u8J z)zD;P2b_Reb~)P{C7m!{+#JE(rz|mDLDpodsEpm;IuvUq;p~v0g_J2|c+tO5iQs3D zec(d;UvCBBYNh<&nRxc=b@9RW;ZFfmCAJbrHV<*LH{Q zZB%CC;M^CTwbJIzSUcT;S&jC6M}dc-x*t_#Bkt5WBnWrTl%Vf7sjPYotX*t=-}R$b zC}en{G=bN*!&wbw16liftBU{uzmyp&dvy=eAzO_0VKjedF2Pu@S<5UKp0|%8r zci|XGkjKibx|<;jq@CXX z<<3Abf=a8T9rQn9tpJQB)rHyz1cw0y6AFhyBu~zFDJhY&cM2xk^vUm>pKU$Sjk7(m)4jxRva%j$VfhPt zMXquZD zwQ^#F4uZ)g&5xJLLvr?>B_fQZ{K-C zxY8RW4%aE<;eY#AE(43cGm|9ur#$>Ctr$3`EsOwyU@cbM0-3`Qk!=BtGRnkQz@cW>z8{= zsqa)OLsbU!V^_}^iyo3uL!ScwD&MT-Qt|wkAhK>uzm;H`%A%H%V`J_0HP;*VM_QQa zRq>%;S-S6%m<6jJz3$$fTMY~+iI+TC(9T`>qe#i8=57^mD18?vs*P>NZeK<>3y1Sd zDEg@v{CLxomSoLTFk~;=Mn^eCS%qKqwftxWo2~`qc=wr2lK3m>sfnW-K3lHIs`~9W zcN1z}q?xE03vED?`Ehk$c&{z?YWTn-otd{?4H>EPDulmLi+BBIp@b5m2{<@_p#gD? zVNk!46=`lc>fjuN;ezM(rAY@3=gZqFcD{4VP%vIx8IsEZKm_C#i$~ezHvq7Dps^`y zM1J)PgQya{O=5T>N&?AatXCA$n6rrlaQ4o=A*8vh)PH2G@V{^oyqOm9QBpZ(Psjp@ z>ye9gv2PGjF-ow|QDQZ^XVNZ|4la-d;e~ckNdPrB%NQ{yqR$6$f8WkV+9Lt^Y1L;-n)&AkPT#$F_9WBuDGhw;|bD!@( z>IUiU%l7dN5+Fp&nlh_%(hB`cKZ@j4W^cP5JDPvbRapuSk|;LXjhAvTUK`*AqHSTL zBC+0nU-MtmTv!|{%uyN2(`K@it-cnSwL~ROyc`o}R5b^Rzf%|C>~5Ge^7M=`JXIjF zSobm>$(+c}ZTHP8aZQ7!V@f*ABdTkB_c3L1IL4R16el4Fq=>I<20AwEQBM#08-Y2n4<`d`Dq{wnD$e9(}HEK){I$MzGGKq#^dam zk-k5S%VdeLl1Fi+`)LXO7875KWk*$=^K_W|!~&;k@!fz&R{H=~i$ZGTD}zMlYQz|_ zvY>t%TMUg^6bF-}-ybphmVVKNSGXOY%R&|TZhJo!>K&T26hpAsMmmI)>j|=1!YiW} zv@r8*L&kR3$M$h6kpL;UX4zY?? z%9o6$o#b>}A{W~vLn8Fjes^3PbDtcCh{--Mt*k7QiQpu_SrYVJwZtF{)61FhUg2xQ z9Ad<~b+voqtd4|WOA^#OKcvi{X{q|G0wZJaeiTv7eJKC>;Q*oMTk{OqaEUWw$2J-5 zHsw5eHV!c$gZ!i(oB^pg)M9fd`zNSNM}~DYeOZdGdGk5cTzs#?DLcg%7#uwNL;+Jo z{@4>|KU`-uS2Ym!?u8bCGQSxxUHQ0kH{{hL*mqS#fIZFUZNhTe{Qp$J|7-s3KlML( z&0;*I>uEN){*9@5F$r6gi|ZIZL#Z&!uLnyKq%uxs^|J$>M%+#mpI!s`C3yFnlmiK+ zd!$nfq!wS%fI%gUsjE$X1KvO3LtL-kE%|DgX99D#pHYXp2PK~Mui$y!r7mV$>7z{p2ZKa?k8LH&3 zM~n+iz3>?A3zwZL;P+Q>cjC zC>5GYH)I`pfELHJzH7>euJU1PEwL;-SY^z_<a5XThO7a4} z(SPIOnD=@8v4|_M;C$4C#A(zl+tYjd*E8Zub!bRT+8MQ9=TxGe<&|nJ*woTBHPtp~ zvtbS_jghZP6P<;ml&|W;6Pgr%wmg5$p`;Pw0dUWX{8iWfbAIP?6KnDbT`qHX{E1J(jL=G~QppMI7?_aV93^dqPNrU2^5thn_Nzw4Ib&aR4rU1Y7hzU;< z@4o`NOxiCGHv^gYAZCd~gKl~M+8k$qHi1$8Lm#A7z8#+^t{-qN%c7@&tP2T33m+vgP1$rHnr+@MD`G?~-{ZqBp|f%iR8+*PW3?1Z9;#Sm2~301Q=WJX}*|jgeZ(;j44^`T@8xLNcyrSTG`*HBB5@ z+!e5$suHZH-1l&?yT@WETGswe9YpqW^^`w@dAg0v5M)`|-QD<&%P=X0)w~k6b3)+v zC~V4SDk__69gj?v6ASBqwOtsT=?^X)=;-Q)YMf8#%U_;*AZHPKa{$J+b?+)7%_`_&y5Wi9g9?>PZI z!n@`Vo(*WVOR@70yBmD|qrhOvI`3jTgZd~-_1_pm|4gzL=}B-It8 zrtZVP@XLV5XOCz7oKadFVl*MSyM(fFBE-_%e&_-Mam z&cSrd6uBs#1gLm1LC41L8xoYE2_A4-y3bJ*AZ zR$)DQw~z6z9FZ|}tD5Osf4U4?MQ?LtQANHav$;!;3K|u`qBNe0uvNG|&B<~zn&nBf z8r!@Qf*LVST1b(;<|EF8hB7;>0ffQ09aF`l zpPZF8Mz<%wu;YY5VpCjqBM=TaQ)P*C#xWI66&pt9_~vUG-VDC&uCoOZkcNKg@KF8> zi$9$jpx*0LMDb8He7CA!rfPy@@38ZR-Ej;C=vg1O`#vR!n73o&%)LGBIAM$+en3+F zk1dA+4c@{fIvEA2;}A2GZDI4yXWR_F`FI%e8Z~lF`J&yC8i%1X2!dmY5^m>W_$&Y) zOg?5V3nE_N?dRvM?A+{7^2wV)OiyKV{A5*v8GnH8Q&b!=@OfB93K2mZ-be&qx@f@9 zn940mQRc#v#1yD9V$!BYBAbA-N{z`aChORiMW%K#NV9?HBPgyp$&CV*ffOJRq0sEy zky>IA#N=E_)0eWlXPz5TX~^~^Uvs|I=eqtof#iSnKijAEKl$L)|8oW(1gIr(1S;Lxa| z1xZJHNu;Xa<_>P7MUg$x%UKFkpcJrAu6t-57eKegYgCQP1R>=) z;F96ywmd8|tYeI^aCbN=lT}fu&Bao`rO7VL@rPW4Z{MF1E2rGf-r#C>cWWt7`lc;& z@QqH0-OW3}xOJ}YA=u*lvYGD05EV^W-MY8E#Ibai*ye}Kg=d8WN2PKhBPfULq+WRR zMGe%eG5GZQhK2hGqolu$*vN0%lM2(9o7dWU_7bM;lV_(|2H9&a__KUqGp_Jp_vHOG z_`XY)c=;ZagG9~kK~9*4Q~5A88dgLiq{%eg2ytuILBi${_dnamjzi6w`8)bR{v zj9O{7kC-8+2fhv^j#~f$K2(y7&AgF;L7nr+E(7}N<;r9h_7xY3st-{5FM_9p zW_XUu!-56|TAt{M^V0MCsIZE`Yy80W?r+{u$IL{x1Fg_0_EknolAo3RQk+V_EV_s6 zJ{x-{?V9I)Ilt0$DAc(d`Xr2}+^C(?TP-MU=Nmx25+!z+l7YF%WI4(8b}cNYi@I14 zfy?>%r6dTLYz!dM75EuS-lRYC3cdqPIa_CNq8{rMDj$5V#$v-ox_~{LZZ$2b<)_|c z$Jo`Qeld8ZsbKU2{%x1Rb>1eA7ib^<#=FTeIcyT~o%L7lwOmm9P&EjsMA!2!K}i>I z=1%I!7pKz2q=%ovAH7mSaVqRAPZafxRm}VkKj?qrza`<#ru=Kx)BI~s;LP?4fPiL7 z^S*=|!-fYPGUe{YB-b+#uShi#{9YqMd92qP8S8+5DFQ%r$o zwTx4rp^{DSgGfI6b7~t8ps);ET)}v<^{yU=UPOtO+TFs-MGtn4^bFa(%^Y88w=(cD z11m8y(J@{@K&xPnM1JeMow{3oPNzZSEhgZQ1q-#+aOy0O0=_)`+`S~(j#KrgdDG1yohKIzL)nB2B;H21hNBq`=Xw^A@yu?ZRz zcMw)#+bYAj0S*?~0AcHQ*xW3~$z+O%9>mAnn}k4X6d*j!dQ>*m93hClz z`>hNjC0GhpV{q;P|2z2bi_$IkaO-@k%!BOpX%FSy;vDY! zEB4n{{t_1RxG7m0`keH{{O1jB<2%#Ng=V{v!G-;`HJf;SzF^8_mu{AYk6kFxTtHzxS~YB{KJ(v4nC@xy0!6bM0c%Jh*vo2wcc0762_ zUzd~)%e-&RuoxW+;3t8(7}v`6t*jrImN}O_vv-vA^cdcDA%-D=p&&=Pfu3W&JeR=% zFVvtV*2o8WQ{qWkW@pb=RAvK$(@GSG%#$JdN~2Y0HIeyeASqYUnr*fKZ z*jd+@lW|5VQm-j5?Dld3u2h=EJGXxIvxBO?Z&Q7vuZ;NOaH@3*VDrA+7ewW3?y#D@ z?Uot{o*=yaJ7qdi`~nQ%*7iN7;+cNvBes~Ne7IndRA)pp%h-C#du6x4^p7u>%(A98 zUZ-0;+je_*$=jdzF|YsEQsq$6qu)A~pKCOZKYSzg!>rEFo;zh}^*J=rz$RO*%b$8) zq|>raK8Ix2f^qMrl;T$oysDyHC5q|<(k47as?T`7%aS{A$LFi2!xH!LR#z_^8j=X< zwWB>p-^YWNZY;j;T65RKVt6qvuNWCqz@&OMU_Ff0JNK~Mxl=-rHPPJ~MP(KY0~6TE zeBMg5>RMl0A6q(rh10L$+b$P%jsXyA)T~?nv!oMx>S_|_84B@ALMS?dUL-TYL7a&f zif~be&euGg!j1KUb74T~iNcMHMsi~Goc6S!8*wPi5-F4UMAPWLA2liRh3_Hf{R5bL z)QOy~QcSHZX4McmPYG_!I7u=*Wy>%ir4PKz> z^r%9{tUP^YlFyk<&cY|Jb^hT+v?S*nDFdH}TN3iaZ*1y207~6P@qz~<^W3vnLb5JQ zulVfPsM{5ry81MZzbcU-j$u9L1`>LWR`UDxo|pIcn|szr&ED}}es6T@=K04bdbY1c zfBVLjt+6E@-56xK+g;Zlt=}|see7yn1qJvz`@Sji?G@jy&M*n~&cj0Ul0oiguYP|2 zpY!g2-~Il_^FQ%_=zqA&aanxyZ`}&u@336VQw%Lo1gV^UqwB7cj1nv2nDe8kx(rE1 z7U(P#no$3&0yhDwmA>F(mbx*{kkUCeM&*Fkh*E@&;TAluzRqCu5c-9Y?24nfRGCI{ zHrAk^6j&yO)RURt5!$2xAt^9V8azp#n$H9R0UlBC(QY1xW4c*dO-cJ=9ylw!|BFCz zoXO)`ve#QREJvM69WMH)t#4b^I%XFf@B~NkE=DyJE1jm45Q@*RUF9QA6?itCx$uQ) z=2pz>uvuQ-&ksZMYD`)8W*o41{x?@tcjwgSb*H^9m#tdwOuSdSa%`NZF}oEXY}pYv zRTJh#d(20?5NJG|;(O53rE2;cGjfLaYxcy3&FQVn35{$vm5OZ0$QF~n9U6Y=t`Zcd zYsEQigV*QLA57fy??{L~s%fh+2$aWvZl4y8+mOlByL-WqX`<2%1l`toZzKyTF+rwv zNs(VGlR3CkZn3tHh%a(KfrG{0-?BwLFTD`t3oi)}hEu2!@DTiMA%3dgTtYfn@=32q z@E7m6bnOq%Awpbuq%@?3&Q4xynM|~F)u5Z|4UaLc_z&A}Qe}c+LA*obU<;6By|qzD zbpRUW$cattVqmZ1Oenue2S(I-4xt=~bDozBIF3LM>3fJo>Hcm%W*;S5~ipe44T7-e*?# zbffAHx6Jn~8qQUK0ePrjW7Kxw!~qkCp}DAvbfJNuk6!YanG5Q zRl~QZ2Ln-$2WVB3x_?4^}?4wBcjQXBy6 zcS6D}h3t`!k6`||xtbUr;Fy=uSNoArtEI}5e3ka~t2|nHE$X;%F{Mxau%fR45!30G zABQm80_ExZ_OQug6^5M*rq5$`zKq@NP@)I=*k@5$0s1jvsll(daP> zGne4pUNqT_V7;#@)nSYpT1b2wsPLo=3%0Vg*nqv=31j=Q8_4_3V58ysui@Jc6%wKn z7W4a#1}7|XZCH3);a#Cm-)MQygYLaE4%8l)xxtcdHQMAXn2HlyZOH}d-|5s}EfgmL zGpu>Ik_*q~6(YGZs*%ZC-z>jfdhQ$3r92)K zzp9F_tqhhEbem5?5~5~@QG;M2Ceo?XmpnD3VK?$7Fg4Oo{9e}wHwM;K=BKQmoHx|Z z0Q|6cQ3zzzw65EIaNU#+0VY%6j0wUP<8+8gYIcoXqIi@mdr=U&7%_D8*DYAU)s(ZP zBvBvEIX!Xq;6tm0OEON$SM;Jx6~v!+IZo)CoNL*GGD5l}O`sqP@gx8w7XZ{>ttn&& zaCG6UJDOngc3Q7HEi{7OI8)&Bo>0{XA{_xhV7j>~Mu z^Me;y-tIJE9#xASvu5d_a~PKd|6u1MquKIPu+A)o;D`hmH%-;{J$6&N3wRWT5#ZnA zL#SdPj0<1ks?Lz%U8J0u`T zoYNb8t(|0v(OinAh+EOS3Ak84qi8ov0!j>l1D>`JfC zk!j$a`vI>}-34=|AC~i$NZ!t|O4|dC)(#rrDg{4hKPBfHVpNy-L8@?Jq@zho&WY5OL zIEh$>KCAKjsjp(r6#lzd4;yX1bbP94JjK7bb$0swWP6-c!ybNG)%$9QR9%^ z;RoV0ZA~1)E$w7`C&7BZrgLvcyz9`Y=HnT=lS9=rZ6Pkc5uYg!@0m*C-_97l{>Gq} zVjPQ!XL4cIe$siPc4Dj>Hxh8pEG1&N1wRO70&c|kyNXGs`Ih@KDb*_?TJhr2wZJ(4 zugLT<1{DD?L=ZE|17rm7m!&5@$`{0aAd>-3iHu`W-~6d#6J>ZjqQRGi75l*46O_;a`2x;}`lkT_j=9_Js97Oi;`77y4~hcdTiCclN@XSMAwh4)(r3M@a*C-K zEh6$q3o~qN!iQE{xCl0{CzDh7 zln!0Y8+(Wwx%}EP_*p2*BXLYg&nt}`H|lH_fO%-!!|F!+DVTa&&AA<2XXUc)#YGzB zQdxoYYtN~kb!=t{Zha_nz0h>+YsG5fo!6z~9++ zz7DYl|GweiL=Cd}Dueo%7MJtR{^_e>W#>V9|L?-*9jbA6-}6i@-NPU8x%r)BEIluO zl5MMR^uL#5|LT7}?|$QmmP=mOJv;O+xDoS|)9^mlKy62k(X;)JI%mAE^Dcwxt`^CT zW{E9_DVzA9mFdtem>v9_!^*rtmqIc9TNGFWzK8IGaX=X@^LZdrf-NULKs&-P6CW79 z4)p<;rGa)$7=);efkdv$mKwa=vg|ytcA$;@DjWd{C`cb!t#2|Z|C-9(yfryv6!`b| z(#9K#^Qv)ai>9-I~bij0j%OO~t zj4;I8*SZCoez26)$C8ttKB-NCnqS-pulVsbuVouXA;(y5M_B333P zEPo*o2~zT5Mc7nFyB=5b9mP=_khl8iO9b2CAD{+Wc=3_hmB(AkK2!K z_bQ$IG&AEYc3iTouRHbo5!Fv(xn24(Su;D*?$#~vTvnrN>2kq`BR0N^ zwvi(U6pAd_xC1W}bcbxDA*?xDE!}y6tPItRybLgQF_;yfQGJ9oMUUI-H28%`YRYB@ zQX;;E&^D-~T_&zl5nH@b;+<7*kl7tpCM9WrMZLag@=8jD3cO$}$+bNxDfd;%b^;E& zBN6(6b@|I_I?Vc^;TtQ~s^@Z_iobsP_zc(+%j@+c*X=}MW-Q}Q+o{Jc>-D+uo>gociAm50=kE73 zNllp!uAvM~OwS&#GDGHB7$2UntQeKKxA@5wT?)c8ik*rGg4*JZOAYb49 zqa}W**y{0d#Gtt7^znQ2>TUvl;N-1kY&6^Qe{X{RHUIW>G1**}Kh3{Gvb^1BWQcHJ z8m_?jo~6O4Ysy*NTfD>3m}fG_$k7ZHcWE)J`FPBzs4_1%3^O6y#m)s(vgX!9MBWKm zmv5iavmET6fB>Yt^9&h~sD&f%YR03&pu^2pwLcKpcR)jb{^9--SZo55zMj z$LLFm$(?g>`b|GW`E;z3MO$<}d}`-lg8T|a{c%b^-*?Y>7c&~!OiM!WK{(AX9>WQJ zk{d+k(N1~4soU|68K#1xBL2yA5I<=wAppZ#B%eD9%>~qdG;5X#y$|uW(}v~ZxFE9V znzkthd%>U*a^E|#_uOfZ=HU7C8>lxMi5<;xyN~%jW3_Y3dJ2c0m1t$J6<_4*DQx|s zzTb4Xlvu~BX=j1#^HIM6O4)RYv|qG#<2INu;XpMqCQ{5m+f765@z6vG_a5e{Z)*758Nf_S6 zQoMySITlTEfSIZeDXV*Ot1E3qpMf#+mDH7`jkxAc93_UmYRK{Td_y980p?uj#uHC? ztkAe6R?F|MOBUphVRK6?;GvPpfsMJ*miV%FKkxYlZxY=)q337#o>=st^^~7+oI8$v ztw^(pAb`z^J0t{&Po3kW2haEut|KRER1=XciWf-ek3Ta(=iiR66;tY-Lh)dN5rDek z3q(PaC{$c77V~YEg_H_{(xAO|GX7ad8hn>jV|i?x3lAjU$jU@D_vLikeTlE6XkB=3 zHR^6aVg$@%Ts7{a?PQyBS8J){Gj^gG+K!h{sMX3~?dbA0^wr%}?%$5>#YT_wc3ya4 zwtgw#JDAH@(k=W8Tpb@)T+I*7cjZ1q2t@9MT>LBc?oN3A`}cqTc>Ve1t1bJ${s;u| zdkBr{()0GhgbY`Pbz*$0`27ThA|ua5Jtl|3D=#^igyG?#6T6q98bMI2a9#Q*IFR_E z-G+%tA~6d(f0^k+UMiJyg9tQvc(rbTLd>rkHsndB6ZzFjKdndD!3j@m7rfv@$YU-D z3>KwIk)BEcqEs@z(62a1Ll=%|Wf6@Rv`?x>M^UVUuCoU2A44B&Pd*A{s{swa$?Dn3 z_&s{u#b_2f-5H5ueYqEpV(nN8E0zdd{et^nRb~0FYVTAfvf45LkTn8phxn5U#U%T;BFKsdAKa{?V#| zxZ5fI-|B1_KA-{s2mpY`XNKf9?|VLjM@?Uu)zyJ#^wjG4_&nek^PDDZW=KemG$&{{*HTAim}Q1-yI$R@J2aT z5D+9dH#Qc4KRu|{Yh^Wau>WemWQ>4-BuWha{^4r%0B9$mAjhQoHA`c<{R2jAvk3R5 zu4BbWXrYoSxZ=hRH%kJBO4cr4DD|(;h=Jb zGgF`Xzk-o*^ZdPN39Z4CG*vU1aZD5ge-PEgJvl!6Gm5*d?B2drNJR3N=5K= z<~nV&^u{DX8I1U{#pMSx#|N)H-r{yC zY@R>q`S9Dert!lu-;Qtma;And@d18H%%flPY^;2XrkyUoETLOS#Qk*lGR(R8{`v67 z|1}2xtN)pN!LIP0OZ25qRq|Sw1}BWg+cJkSYfvETD<7FCMYax@@oKTOgs@}SvLJF~ z9#Km{Fd5A_l$4%0j?b=kqaf#MnM!k>L{>?e^@h3K(-0!Cd`LTuV9P3}+p+QT=0a$r z^hVw{P|K*oJrgSeb)H7opl;~r6lpBKVk3$yjMHhZvwZJ=6Rq&Y?LwT3ea7v&zQrP2Z=?WnuWp<2=CO!RF&M!yzb1%_S&Hcz^73J;&kl=tK%E9MN_WRu_hp_4nF(|ja3 z3zTX8sz3sz-wHo%#ss^V{|9U!fo{ips{*WI`)Jj&&8;P}(x|Hhy|9h+hUe@Eogjnu zDKNJnuxXKExwBb3Yk;jU9>u?I7tCW@kD2Jc+GN3Ufg4Bb;B(tlWz8{JryHG}f-dUZ zfBl!}IaT$@XVnSxKcAxT!&23I%~ABpHFrzMJUW&x;-+y#~~cG&NnFUirDQw@fd zQ7mL8Eu=97x)EuYs4pqR07#{-Q#e6B*u*@mF;7*>pkE*mmqhW&-%ldYbtYWd#BQwZ zl&EG++_+jNC2#pyrDR|a`SPzMe!B@hT^C6}Y@A z_if*axRcixTVvT(q!iNnld@dOYUVm!l_LGddoVqzsu2|Y)RWn5DUNTR$AT&!aol<= z76C`GjdHp6-W5!0H7*U_Ak^6n;1=nN_B9n{EW=&7AtWo0@BwDdn6<*+LzC{+r{q_H zk|2=zd&(@D@*9+x+PDHgBPf^>nhmce3=wQ~DN_VYt~)G?T3{Xt21LY4`8%3M-n3I_4j`65?P=InTr|($ zWHHjugfN#i&=?{mY>Y;KqU!*S5xDJNK4vN`?9@@lHmAcm58)c8&b!w>aT!d#Wd86e zqlMVWlxrz|C8VUgN8LBv^4>3b{9i?NSfcXoG?m+iaHb0x|j z<*CXufx&E6_U&WRR~8W|c#=ITTW{+_t08aU+%%W#WsN8g_K}_0C6^yAU*#O^X_4h+ z@=dYm&J5xWr)X)dnRlL*UByl}h5mB<(NNXX@AJQ^|9|2iU9`cju%n$ku7fmQdmIFZ znR#YC;~W8$X8AW|z75j4r+yQqDviOWxne90Fy3m|K@2Y&-w>(NOmEjaO##XT(#Kgw%bNMRk4G!uc;J=}>=Ruqv}0ffNo1I2 zliqa$RKqU31)i^gS}>{rc`%Gz2r!ZYM%$~ryy)h5({|19{p-I3Q-h<$@@tr&q|ncm z=P!G1Q5K{yw{P;%_=2E47qI;1??#UZZI2^#&I=?|262sEtDRT zNiMfhjxdiqvoJ>mZPWr!qzCGjRk&2M99t+4tZ%X7BZM(&Z7QEHqWxb*R;G>#r!b{S z9`OMQm?5KTNl&#IUM>KMffKZhXapBO%uN~DF?6%21Yg1eJu27pE`12YUcrA!_qE}8 zad;S_TAGKTUeO_3Th*pr`bacnNYta!p1+EHbi?DO^0%=$({lwu-o8Po8)*@$c5tB# zq4fvgK3~)_$(C_}4LH@d9Y41hEd9o>?4n!PkF;)GV|NFCOoDfLYxH?EOX z#)n#{cC{2)j4KTzkHmQ1Qu)TG>~;>jH;lFQ#MCNd4plKRL8>Fms^xqdVOc7=4cLfO zY($=_hJjXU`Lz0+i zrpm9k)gv;%X5!Qx)9FkC$g5DxCR%}TX4i6eazF6-&q5+-2F-puZTInlowUJbgG{`! zsh#~n4g-`kS6?CV60wAV4DZOm9laB4ez|IqHo$FlhM2^^AzA#FR;s5fKBQ{IF^@Jo zN<^X{2^UNle)lO5uc}Tyi#kx&7N}BKxcjEY=&D&-ukh#_RzW4c&9z9RsWOjNg+`;% zt=HGN&4X7(ip(XjCwPI)PeE-X&dL&t^xKkcXFl5B&ee$v*dKfZZWH>q60m>Pe|gcD zObW+xGaU~QX7BtpIbn*8Pa^fnt1{$#lbJxZY-SaDfWQolF>w-6nc>I*W9K15E~8f> z(ZSh%N4XODe#4x@mJ@i=xk`4q8|oU6wMs))4swL4ARO+43UE$;C=eK)S=Pwla}4lo z%rvryaCBYBVoVgV?l)8!p;Ix@^;}lvhFIx!kWh(Hk<~;X_)WyKP?->puCL)`%2ILL z3{+$W@+q$F-I55{230*Kqkdkxw8RkoQjrpMaQ*IiY@E~TwL|t3J)YdpAb${MxKVXC zZFP|kXN<~!c2C7YH|Tyu1 zvul$bkiw*%_$eis>PO!;#oD#1@4Ib=c33&eKfZD%fIP$Nbz2%mtubO~EOm%38b6!* zP8$`)|9=>J&!8r|cJ2R4C4>M00wfqPfqKqmg(6Y{(nRbO!GZ-uz3unkz3*qgv*(>>@Bf=GnaQVXGQV|R$2yO7tW&wlIIJ?p zOeL=BmC824G1hkRI5U&o$21$2bQP-4HSNy!96xxy_3&RozO7@i4?26!8Qt`}f)Jx? z$fRWk^No2aNlQV`&>>p=dI35|>W#M@WBH?bNI@@~2D8xzjAqE0*~b(qe21wT19cP! zvAm^IL}yX4^npwR`-X|;ZmJn&Jg}gO?(s8tX=HBtg=aDi<8WG%+DHd$f{PuhakLKS zr-)7uneoIL8Z-S4qEjB{n3L0oMm&8X{URfkRCV;h#SN*>ZA;)*o1*4tsH2+CpZIl$ z1%4qq?fZrrw?u6myfpoREN9|0fH79dV=jEUwkbnC*J~I4IHyj>Oe?Oc_n4>X{4Cdk z##5{KScZ}e-+r&ULg9u$42w6jT6U-w=L$AgT?o|!Q19b+07vV)%DQ*L;v}CjS6;VY z5t-Qq>y{M_EB!DlwJ$s+ez98^t0p~b;Ui=F^RPC+2}+N6Gn;Ctvr=yu?l7CsLka2l z`S2z-jqqJOZtR_f*u-E+o!eA-{+my(@4e~pl40)t-nsHeBK^BZvD95^ifYy zLE_n*h5*y&I#-n5-FmBgo#weL7mQNQ(dZSopJuPya=CB-S;`TzETLcvP>l?vA2vuY zhJ-rc#8bPDDL(17xmO<7)F1!aqx-AfYL8NhzTy{d=Al-5Ht*ct9Wv(v-V&cRpVNBM z)|`)+S(%8`Wpjh|T?#faZMvFn|p z?wHSioxf;0+*hV68HlPKY+RPyWm_cl8xwiVH4f4}EQwDBv!M!3w^I(k3wx;d4ISh`YB#@s06e>s48q zsw81lV}>4!L2MZej0{vvb|BVL@(F{1189ar9>T$urL|o<{S+I#Kr-#kFY_~gKKY76 zvNC{JQH)aOU}CENndr&=Hr=G0?X#US+Y`;URNt#{SR}f3xFu^Ilj-pu0%emiN9|c9 zodG+gnx^@kyvl;6b*!Xnt#j9aU-v$bj=UO9Ne~$^nJ!HYpbmy+hyu2Ul6FOHXrV^V zZ!5c^j_R_WKA!bm&|1dMFJnC8y&5T9>yg&#^grXd8K*ntd@O2?1qF4>r@5A%I(y&I za5>k$R-&(Pd;7L49%8Z4h+Qv??8!$TIypCHb8qX--PRrTh&11geA1a(_M2_sT0VZ^ z!O8`_mt&85zWlk8_J7g;I^qkz_r_}8nY%%dy7{O7BXC1?4b!5PHf`7+3?6N@R9i}? zE`uzI1O?g**iyQUP9;L*GxVkuy|5TW)|C?gZ=1y|L4)2VQnQkY9-*$v$}a*QtkgN&AkUKMpp6wt5y#c%#A@ zrDO6g8hCkPLRDfr*=hUc9(6y*r(~$dK0;;m;^Ub8JhRY1Dd!{W7gOf2zvbHp zR37~(?)bSmF?3?=%)P5Q4s?J4qQF%x!_Z7x1t!+;<35`57Bwx4`_yMxH8J2lWv_G% z1Zbh01_)>jz{!^tsalYP6tGC!LLh7*=lD+W=xspKCnxz#d<0)R0Wtx5eCbh&8Vixd!OQ<1X4N1Tssn6>~v4)CMC; z8qp&`fB<9w_C z1*IL>$Nl?`+^8Km`Fyt1%zw7J#Q)#k1M#2rAJ8q4N8#RcBx|n=(yj|Zlqm@{+_ELD z1L;O0w$KWt2T+5hlznhDkPu3FAXWog081~?6hY+ePGMKidti%lK4Z>0&WvH!u zJcths#bSq(a)^1J^iM!`!9cBIXBBv9wJ=E+9{EoTA< zGI~3{&;4iRn2u^JMyWG)1f&ptG49kw-D&}^>cWkCDjw^DK^-HK9z{IOC*OBXzDw!V zcTFD1YkcS9pv>Jpn{=dcM^Udz3Rtl&P*?Vgqd^UX9`NR}fU*@j&#US+?m7?_Q}FI= zM~gz>=Al!^Z!Qp&zvsDnS38TP9}|^}_pjCq4(~sgXC6-vln)6rcMpW1j{TXvyJ>Ya=;x7y z+z2c@ltT{UNU;DOYf>k$t(lD|aKK_4Rm#i(K9gC-c@%Hn#J>)2yR^jv&bF{9PY$!A zvgo5i1I#56U13MEb7Pju?PM{ynru3RCoh53g@$;F7dWPAox;wNV_eOW(}%(!2)Zq| z(jZ769@u_;{#pI)lp%+2j)fipb@UVsbEGV~n69HdbVljL&#bc8kM%_XKL{8tm>$IE z`tFdQFT@U)OC&FS#3tesW+DdM!X{?Whht2Lt+&u`2ezl}Z`e3>kq5tE*Nwv_^3a^= z*D_cwq=`I33X}@s0RZ0+%%VGU6iP=Z4XB&LiTlT#e($|6k%YSXt_Fey6EOH~6sCha zN(aO=6#5@d)yDq0BUAhDl4E?Y@mgsZNzBuOQ8J(l{xkruj|E&k`kN$a^^AswzDI@uIhrr@}2#(h4u>;g% zdM$Rxu$HN^nc>!tCT-tUCdL!t0~rIbzBp@YEs4V4M$vJnkatjOw*zQFk@<=>}8_QvU)(-Pj>A%=jqqkcD=womddMOb#cbBhv3PTvfl_yasPK z*a962tF9`A1p`N)!aF@e7z+9kr_cKKH-x@#=ncG?(&xqB;D1YIMbU%|H44tLWR&%A?+6wM(@f8+y*2*45|@3}JrY@5C}*1M;_b zBNC50TJRay#Cqg?!|#dU(3eXA9|vFR1A)`Q;w?=zJ(AIBqVYzpk5N9B2UzbTHTG+x zntYBOwwv7JQ6;Sr@!P9)D#JV~^4b&swU(r!-iev$a3_1w67BQCEw6|P=Tr?oGDJu) z@9sTGdBOAlX|FYVzWQQI_^SYCiC>Ag&GtMto9zxo?W%iZ&$5Pug{P>&g918&-vBml z^rgVj!y&dKeymsUAtI-rl@16OQYbPY4jqO}&gXi|z#-vDuN<(_$v&y^hB!NrOr^It z=PBHghkIHQ1e9ftfRIHAw5gNhA(k=kMEQDD%}>LOWtp5LU=z(jwFRqza9QxfKQ^wV z@S?Sp2px77w!sCq3QIpU4ju9t`2FriH_orH`DR;te_)a@b@1>T)cx{`w{=cWjNhF! zAf92R!YvTqcVF@O9&>$qb;YhD>>nFpCN9>y`fQ+##M4#fnC|(aCyy6*!|v%$e^6C= z_{W^c@*U+P2mOXc6>2oKYr^oieR+MUDcj2}0=x>66}tNvm|I#UhTs$0hYUWQJKP^~ ztGTB#IR$lc6cv|`M!t*c6h!Gxse9^-!t{*FQyi`kS`IBwFC2?r%@UrS(F}=nIoqhU zTRW0@Vwqan{p=Krd}`&8V7_yD$+{w|PdkbI=IoD)AoFPCwBjvY&8E}$PQ-4Bk66?E zZ&84M@qhF`_}3m^G!rDf4>zOu(#$rjw_AUqc|SL`+T8xo)a=}}S=2)+GZYP8#Y;@~ zW$g(_X>*PuD*#2Pa&42}VSi0^cFyM36DZnN!z4}*MJQC01C92Fm?=QR9e(yxo@V1c zC?jM&SqeIPs1^dvaqulS#QM$28|s%;<<{uy!!}lj zIQ=yR=>}w2#O=I{$GMfBs)8dUCuARjwTth0>9mbNwR$o-fBH_TO@;4fzxd?v#ci8k z*2gV}rBo}WpiL>P+VB(nm{c|2y*X&R06Ff+przj7KY`sJyMosTO>do3XxH=fyfOO# zDj{((JF;y8)r&i-oDlp)n)bpysRGqpV6A*T^2(v-6A`D~uJX&zUB?7H zjz6~lezly#Djo{`TYByJSb2Mhj(dK{3J4 zXrtqWtJj}}+mEeiXM-qRBM;3Q;`TNbqLgJa; zhU5kL_&L`-q=@9jFQ?lM9WsD%2>B)wA`}>JeZ6`gSR$1o?5mSHk!&hbfMO{($MV}s zUYIr|0G0*6(l5<6J2j%2h_)gQvoCLi%#G7l(ZM@(EV?f7JP;#)M&`Jz>R?U3wtdoe z{$y#_B`FJ@mHA5X?Yrs&TZ{X3HGCo4{BmJ+uPUqbAJoovIXBk%X zmz$YMG>j_Ud0OaOzuN38v_!tZ|6eNTKluk~p6Zcy3VJw!-XVa!4!ExGA9}@^Wx{a1IbgJL z-F2v;#`UY)*Ls(xjL2=i6XhLB+Z33lSKo#i9b^tg`PQAxvD~9Lva4LQ{I+=n^B`|r z`JtoQ&PZx#?)CEz9uzvBrT@`e^^Gmg2A8fepnnd%J>C{R(a$$Fe!qSt&k9|WGht>k z>1qb&Rv@fYS>8;u?^!ktkG+5VeRGsQfQ-QnuEqKu-XfSQ|M=yA11p7N%bApQ$7*dz zd55e1x)z?z5fB34gSM#|-ln>-;lj`(H|O297ayHU^sldc$IW;|0xPB1*q?lQGSea6 zJ4p-^*SeYQM8JCR#4o06l*eX2EXMJ%OA?FccZlKiD%h!$<=6mTe#)?XAq2-^iSuPR zc~Br3 zc!HD`Hj3dV%grdx8YDy+ARZkFTy4lXz17h4tC>nAx6gVD=Xv6Bc2`O}qNUdbACmt` zuA!I92>ugCMR8auDUGMzyN~$&x*QU77KX&{)eE9E`zYOY?{x9bL*k1qV^N;@5sq2I8jg=VBGO22M z`YH)i^3?&X!BiSZWlHLSSRN5ef%!ms%~mnc zs2j1~VbvUYJ)6;CJVm+JM-Wzg?A=O6YW$^`2OZq!ZSCf{Vu$@ac?l!)5hI)DZKRFy zU`7}0Y!a=X4yRe;-c*FSFvjyl%JN=)_niS_L0e!Q>fRuK`e8pCQhlaG!pq|i9?5t7 z6@7_>3IoH)66wx4j%%z8(!CtI#?nqA5l?{Oz38)LP6FY^6`7!Fr6K432aU8W%eUR{ zLLBy&c4AvcFo&*Ncr-IV3|_i_@yA$6+qiTs;=dM^WFy#26GTX0L;}r+-tMqhx_#!t~7;6C3AxWprBN@ zhlg6ComV(q#@TN))qbk5K!@Fmo;9e4K_JjFSi?S_?{C#(EhpeTB=BgT8*kQj%=ev^*mUnN>Ax1-qcbe^qy!lkmWQs z@y$N)(xr|IE7G)kJOAFiaK7!`v%?P-McsedSY3?&`d85Z!u@|OfPeUJsTz})T-hxd z-4mF9qhS%i0w6HCJVX=KvnGd22h~cKvCM++=4vWrpKOuW=i7}q-%a88jU?IMllL@X zILi2EAuiga&ZcjCR8#1y(`jG$e9(?6q_X8evifaN$tK16GFWE{rhm`@i$HWS6atJ_ zjTj-xJSUPQ$q=-O&z4vyyWXCPDr0IkmzN7Rcq$N3(wJZ+7D_hY4v4E!+LDrlFAL?x zrL<8p`TW2#coy%egPbTPtw`5JUj1&U+Drq7&=`kT-G>mD71z4c3HuZBMqQNKwHNvV zUfnIc+k*&OuT|R}5Lsoqhe0CQaF&+GN}}%I^~HzM1>*e^d2#aBWA|63bgWdH?@;&4 z?*F(;vw#AXnv~hUyobZoCGMMZjiU`g=fV_}_id5m{Jb4jzoGHyDkJy^Y1ZOyZD166b$ zof5UYat4rOR@IOOu%c0`r!MuJySlFyy;0*X>ldP{RJ!-Q|KEG80V@Wbc)U!Ruj;Ni z9^ZECv;g45Jz%dJfLdy7z_LNrtb#za6va}YIj>bTYbP>=9VwJdAE2wQhGj=-3_U;x z%-3I`1sl}(4LJZTi+V5~s}!DQL(qX??b&${w1ihZFvvsn1D9g1m}8qQppE@UvYfDt zCMZ33M6hoL6*uDK>?$!O;13w8&qZO`SZgrq>@Flbp{mgkJPH;w>N`cJ!*Vg`!Mp|1 z2nW`;2ku-wc}}zRz6vu9gbBt=>iV_z_mPk3Csx+paQ8MIDz`NRWWouLVvRl1WWC;< z(#9XnUQ+ypdH5ZXzfn!|m{{w)s{h@{SXb|KG~83s+vz)J?O)&bBvC^1Tz<@u0J{BJVc$3Qi3|@)JGg^Mi9W~dOD49=EBj|7k}@VwI`fBfiVlvXF#78m_^v+iqI2FVgl(aqM^&v+$~KO3 z4(6mdAQ&tb$yUIYgONbg-HXn~Lbhdw%>f!AIcd9EJ%GKX7&RlZmYhn7?@~tq`U@mE zovyt{-Jej~qTo2|Rd?G&DU{EjTMjb9Jc$tlIH*EhkOr#2XUZLl=G>xqG4f6|+QYGu zZ9y*@^=L}BGV{yTm;LtAm!5i8@EZ<^N91Fy^8Dn}$Js?uiLN^@b>G#@v03^sqsM9+ zOgeLAPVFMY`=RfX;GZSK!B>K&%^TNmn(PrT{>T&bCD-49a)?x$eVJ#2uP3Id9cr}8 zE<~XIyueeT7&km=3%m?4RgLdY&Or=bO}1u3*ND62k+-sLIlCX$4CmY+Ok$%=>&`ya zF`%l{ml{nA+9p2JFj%FnPo7fw z>k+H)3L-9?nyX$W04Zf8pT#p>u0cVSSfinkyy|x1>Nv4I@y}nTa|cDO9+d{y@RR!M z?48q+Tm=@YbZ)IAb5j+q#C#x4M1as3|L*iG1ecozx(Ko13R2}#QPfy#naf#{N;EK8 z0Tq;27PxAq=>vhgmuse=a|4(t=UIP)EL1{_dyO7Sf$)aQwanxPtx^+8l&tZXAWG&% zJ~6lrWqcv44>^Z*mkl?ltEaKy_I7w&$+lN*9U@KD4Xu#GHcYP`BdE88etf^^Emd(U}hP)Sj{y0ZIg~DZJ6jP_AtoVjzjf>ez0r|e_1v&hBur%q_HiL9t?*wWR_0nNX z{)8PGc#N&qYdH5RLVcNWDySuR5NNT9Yjt{)P^b(Wxo`)yUr)QdH75AY)8M$NNXdkC ztH+YMVdjp-YcWmiyU!v#c3SIy`#!t3>&1D34@B4j{d3tR7-veqIG7aZJiL(N`0h2* z+F?A_{;~C08a>Um{=%cAD1af3w5Xw9Qhkcb#%bD$XDP`8Cof#NDe-GIld8~se&rcw zUgCJds)o*vH1BdMP5r?_nJh z--2<0vM}{ff_Njb8dbmi(~$EZ!-SnP>K1*?9PJ6JjENc)-{LjIjG#h~qO3@7N6OG* zQnD~oDH8ztHh^j zS?-0%*NqdqE??Z3Sh4$bD7>lGBi+s~qG&_@!L5>UZIKXBYsg7(WK4O%rObxOp>F413-8vZ>@CjE1hCmRye*pWu1`iT z8?xyL(X~w_9|z99!;U*MUsS3mOhqB=vkrw!%{Af#QI*2l)aM1FB4p#zL>{BV z5~A`DhQ7Kbuf<%=J3A1k2KKilUR94iQ%cLEquNsjRfGC(?+v_23RbT0a`i|F)O&mv zcDv*W`A66bD^J{+_c66jxw|2bUIh`ig91tdXvmu&T7SV&3tonm{Y{y|*raJ+D)5Mk zl(11{SrQqttKw2bwx>tEkT?t=;uT5QolwbTksZTtc{gR#FHNvKWgbT=kKqw&EHvBI69 z(pwAw9dDU$rh)#-cdOn@^38`HP3&@q{mBk51k4y|*$r$r(FYgvSr3adl9<`f#$raQ z;2&6K6DKE~p65_kk5>ucz_*h_7>!VhL0l9g25)&CMc@){+8B~X`0-l|G6jFU`0Z-( z!u3AG1q{ZvBty4;fZjHuB`%OncoY8iL`1CUI6MHa3e$J2>)W3kK(=a2Mxo4)lH66C zhoFh@Cq(O&L+WLLyPRr&p>Q$z1d;pGPocJRD<&8>$EFy*(S2pBJ8^k$C4JG+7cGkH zTThK1$QoBg|8?N>l2`B%QH1uR(pon!vT}U%jm)w4wC}c~a~H>s%uLuHnR(nDW082? zK!3>Ia<4t~ak1xZt&6t%w5_-Nufdmpt$}~(pVvSHh7G2Pve5zU~sk3WtG;wpST)_0pImkwUL=T;O?>hP`@)4ae`*6b-x?cXz$8!?hvdVLj*IpIf>2x~hYc1EMHe^5<>-Oi1HDZ1cfW-hotY;Hyf)`TKNy%U?A0*h9ECuTUs$~cX6cC3 z?%%K!3HL`liBzKA_5~<&f(D)>?PBN1{+U(pU`bS42v|h`Kt3nx0ugbJiS7}sg2W0v zsA0|-%1Ksa2dq`Etfg!bg5GXx;jDDSLA4FxxIgT)sO$$bvp*;jNSXh+Y+7uQ*p{Qoot@*n=+T0F$(yx!?pcfI=lf9M|&7BH-$Q=CER@YcI* z_2xYvd0`!=!kS%Mh$j?hjlWt_Gau~S8zc`>(3L{EaKCJK9K0fJ@cXvR2GYUV;)93B z$*zK(Dx+xu0~#DC>+X=lYybw{k_kRYekp4~n;r-|0@+$N^{}j!2BfCJh>!4&4s+F1 znQYdE=CJ>vyL>ku#=Fy5mfpx(NFio0Ipus}za>*#q1JoByPtAt2nq*C2Zb;RMMGwv z;0fg=M@H*pv`Z36ra^jEuLYWHM9RD&7c5)R@AChjtDm+Rk?tJ!XPMg zs%*+5K8!A_;eE|=S;Irl-AGO4{yJ0A#XKFNfnZzlP!_d_t|X!p6m>_X#CQ__dG^UX z^5iAY0?{HR?q==2@YlCKJS*E$zNW z{uTR9lC)>$l+70Vz$Nz=1%)Pd)eT}2x(&qe>kHOLMd_~@U)EOpEm4}+fqO>?E4oka2u{b-P{0mzDC zo2uHLh6$cXM&Q-~xq7_Pqzs1A=fIk@t#JlM(g9gn^$H9TOXU8$A zEX_QO8I{u$GOvVY&`q%v?t3swrM3u|3_*yuQ{1&eV*nnGp=LAgD|{bl!OY{!4%--B z(;;*RPCp$o3{JXS=ZV1kIx}r}fslc~fw?!$w)e~No^`E^%|59uCSD&NuR*#!`J#nw zxKsy9`Y}aW-b&2sGRkc4^Fg)FqjmRhSvb#Fwb5Z93XEd7O$!3QoLXHyiW zSMuH&rG|R3k#{o-WwIFyjc^a$U}rf9P5a)RJlU6ZmYNsDHn!$vQwf?_!-Rw!utNIA zWOYx36{~MREV--=++z8?EN+RBIaNjsnQX84dE#t^Km)MIiwPR!_>eWvAL9}<<-BW^ zScB*EIf#4?-~kTH0^3y-4z;;(ok^%Z3tfm96+w5%_T}8Ep0w{m9zkPqg0fT#bsbTQ{I zY?HfpZyaXvuxfqeW5J6Q_2~1LIB(WizHjfBDl>BiXT)9Kw{pYRqF3DZoHpBeoBm5s zgncp;?30YL`R>2vIyw;9_fS&fscNR=9bWKruMOUDa`tt&z5};nbt|-t`5NIjvyVkF zbo_bdfK@0hNe1nb$u(5ef;1ePG#r3tL%r{&iOl^*bEl{@&yxg(h8;LiST{h11Csnk z@#I#1^=qzTmI(;Reno%IMe!S}ooBfygmXXK;S=S-m}nk9%H*jy_@ZXPFqox`sLND1 zjKWwgLK*Ze%-OuvrP>s%Ll38BY71tRA}OAcI#@vooE<^6(G3=y2T1cht3ZA-HXodZ zVlH$R2b~mwU7bT@IL=KXjo^OeSi;QRL*C>zh8s#@ms(r|4>uJ6jE3RY7xC0p(6;PX z3&&+&@~44tdeE>Fg>I8s79b>Y1dXeY1S5&1WLA1y3P3nzVyuvkjEHHPbX3FCXYUVqW#oYkcg|y=UDav9FtU z*!r5;k9h|c{eIatu%)v7$I}x=2ibQg|J|1OpZc$za9hr4O+V+p@nY(`m2W^E@lS8s z5{d{U*jVaw0>xlzfU;bP!T|X!h}cDtJ)mFhsmEOx2XU%v?{XKg3fQA8^s8NuFLv*d z?#p3A*>1!@4MReHmlu1wSz{1(-ohB$}gL=U0!>)SIr^`8JaAGRrWi4pxj17 z(z~6Ch{a;#xya;#$`JTo3nca#zX4_!E(T`XrjFZD-`gr6qBwM3)(}yrtx5BRtJM~P za-jM8l>CWUy~jIHDOX4YS$vLAP~Pm+!LtiZ_qjuw<4#<&N~#l0ozJzIp-H1h z!u>zbJ{J~!Y|yIM4pW^fkD^Om-*M*I9Qx~S$L8W|t+EAT+^VdDx(@{x5}u3S{B?e% z#O%##{E;jAvH17L-YboL%l!~mcu75W->-upt<j&( zHzFwekzRA+0T4hI{xNnz^jCdFq-=_i-k&EWGD3{)k)XE~Ke;}+)GP~1opkVsy3Y_= z+4T}ca>dv9;x5S~oFw}!1Dol>18ddxo{GCYlE@~qQv|hO`Z5>-z_9TN8Q{*jHwPfn zP&6q6kdUbSriJgbP(v*D*W1y3I!&D@Rn!7-5@%qUhNe(hLnveVFf72J3@^WvqlHt? zJ-=ZDzI6`M$k(_pQ_9!1&SD{WdAD zMf4Dxa##qO@!OxPil8a#(VZU{DmArzdpj>vC>8*ca+fkhxq}DNT@?*+9Dof~CzmhP zEt~5W00Z@6#&5CW3L~Uct7z}(REZ=7d3n@4PRkO6!uc*AdK7~6d^~>nqsU^u@Iw7u4FWxVGTx3P=94dT$ z(dOW3`S$0q3)PdLHes!SXpfblXx0RU1he`ZtsKueca7`Hi_|S% z2!kYH90Up&>{GxaLZEchKPJ3HTMl&_Dpo0P7qx1N91D^q>vSx(-7%6$>gB02LJ5nK zti{yPzOrr&m?U@e5H7r z7Wf8$1ZtY@V3jqTAa%s(T)i}E*2mtFit@CuNKX?}@n2l@d>E0bV&A07s}oG>ok&x5 zKl@`JXEveI+M8Q`L%;WJnop(M5mEcE9M@5`&5p2I6U|yq-L&{#jk^2i5h*JZYo7Wp z9_LEzU%%El-CldS=N$X?&(}viot?khZ+~-&u=Cvi*arHC|CXv5r1TNp@s69sKlv9^ z$~Ku!xv8TRS=0bT*h$ls-PD76)1KJ|A_RF9)Sp63LW1(W4X_fRKSuJo{Wmdq9x53drFNeMlt8 zlV!3VRpci1gUbUbENDbgUWl9)*)%gg$J;UHq?~#BWt@@pX8e~L+mmUTmM^2lTTw_f z2%sE{?`dSa;|Us+wnmsPh*j%}C*^Bs1Z1lb1^K1)OWCpd(&j;V!w<(=6rdgBopuq2 zGE^$K2XwtnSs|a>bsAN#JDz&!|I$G0^rwR83MwK*kx+7N{lrF&{5AJmH|E}0Up#R0 zgxGtNK##u?3w@q^?bb6}^eXf;Nqv47wL5Xn=fky^f44^ZKK(tgqeB(A1JlvbUS-f< zu?)AVrOXCWf;Ci{KCY)bnlcJ#o;MDWG0B5u-XB$HsK~=tF{gFZ1Ktu@h#TTY-@#w^pVyw z?xsCAS@M2z!Ol*bI%3s;jA8l6CUwuQ?SC^}f6c_k@5^vyFY=xUq!^F0Du2>(-b-qJ z;=bL92kWa?|G!sSPj9)m`Qi6BQFm*sckG4n(C05t95{Kk{NE%W|7igI!+-PVmnf;% zrFR~gFT&o3El@DGjfRCgOzjLsS|+cc=A!f+2Yyq$=c(BvlyqeZN0BnAOa_67{7Evq zcRN~HY2FcALc`jj`JPa$sbt;cIu3mj$Hz#>K)}EaLb58z+2%a$!6Az~rRjSS1a2Ka zv#-BTfLXL7SyM-7G%V*?itk!^xQFCtmuuy6s&3%`T}Ef6HEpDO8KJ`q zjoqRDO4s((m1?^i8wdL2&+oqwvG>F0FAqwN7}(GC*!=q2o6*N*4_glO*Fv z{*Xos_>y&fz_3n8ST>e?5N$_^FY2ob6qj|-Fs61x2U-Gu?8*+Pq|LwWOJ;`$aI?KX zo9ELPG+ajRL{RC-x&#f!D{2@DR0QOG6>}&%-x8=*E|Z~9-_?1r*H{XyWpzh}tfGY) zt)(SdLy{d0+rW?%78HtMLix=IY91jhu*T|WZWe>OP;XNzDMhVvh=QIsSS*ifau$wy z%cD^63Kk*kG#mh-Ynp!@)F{k*v%R2r`c(S zdt>)V3VR8u?_VtfKqrKrLyb2IM6j1BuD}3+x}El0!SaACjJAdeR2vn^9zv;LYs1*+ zvh-xQy==L6V}wH4H632h9vsPu4X~`*tAV=la=g3-7=El=YKqc~UVUW{X37zm=9i9Y zdf(3ihA}9q1Inp=pOxd;&sFjBo^4QqDe@|sqtv0OM4x0vCv9gZ%lz4|_JM7f4NP$# zv`Zclu=nJ0T)^NE77!)7s4!{zm5lT^7%s3f_Du0sYHewXtxlE;N5Rxrnp`jY^%T{% zl{a-|IVPy1vzZcyGr2>vb@m5d=~j|#c~aveMRHq^MSk`+QelX4Ul`R2_72qIS^iij zwEfl57-OjPg>2LWw`0RVc zwvL;_sw&QJygelgO3rLQp}5)ZczK{`pc`#^HTMT^*ZXEPMmUqwhmdhNor|7&;^M$T z{Ol9V=Rmx4W=UOQ7oc;+js1b?{phZt#n~`3Dm0k-)sjZKV(@62QRQ9ZPduu(1Qp7l zToy3`jWKxVW;(y@Y^B)xskNMMZhO8C!tr2vMjqUr$kz(K0>B{0uvW0VBP+4%J6=(C zUgnulIW-}T1YCYC$r~90;+Mcl;^j2YTC1oLX$TmviZ1F#zUZ`}`LGD$xHhQ|B`%t% zsA*Y_P+Hl{nnmWb3t^XF=u)l$ zQ)?NPmVxs=8eE?Nvd6O6Jd_XB!@9H$3?_iPh9C_B=$123RN6sJQ^ZhKW+ccbj7UlD%fMzhr+t z!HHVGz;5rxRQ}j;Kl0lCtAX7+(thI~{N8 znx*DJ+2yR@UwSXzncP1-ozK!VzdDaA1@4J#14cU0VFlNS!aH{@$>%?s4!@%QrA*`kx3HP`rhz6IECx!vKzmwV)f#@w zB?fyTU*;_N1}7DJtzHW7VudEe( zcE&Pt83Yh`XD{ZtT7n4%;+3XB1Kf1j%%1@p1^sm(8II|AQl}EfP&19pvc<`c`uP;d zRLzd2TBTs+h>V5hY8Q9Wb=4~OcVd0ERT>TvT8c+UA}>ZC@4MW)Hw&q^b~?p+G?=p# zFlcRgL;KwhI}`+7D5?7u8xhdLm)EH@HV+$Cf`q^}lAHpd*o7ng>)b-0dkUC-d5HR9 zr!8@w+kNV^Y@sd|veqeB-A(a=9+!-kXIIsKp37a@wAm+ntm{u`X)P#x@`KKzeogCj z_nFUHjv@1_+w^{Rbek6(EAi-od$VJAojUP&^wQU;d){yqRc7%f#f$)YdW>$>)h zqG&Sp{B(9a&%s-utBbJCqU^oiE#HgE)V0z*+ptzJw}*d6(R{#`whoh*B6Vat!X-vR;xp!ugzSWAeLO~sdy!yf9vuI zzc7S2BL1TbX^>S)vb$Q^^Aw#lqHDWju(m2pO)pY>ZxAE$;}EcFqyBE^5sKnrNb=a% zw71dnl^D%9`OTf7Q#`wo#Z-mFCx2-rT;3&^)!BH{om6v^7?2Itdtp3!>4NU`AZG4a z&%sAl4Uct;JLLP<4qr`3z5Vy^(+A=W<*V|u@65V9(9}>QBZ5teG^}5|2>UV!_l?zv zX5HXLfX@f^%MC$OxGYs*6~aN|#g~MuEWU&WoD~$bPAs}Wd zvxZ2hj6{R*YaD_*kPX0a*U6gU&_G4y%iD0kL2FcTcCWsbc_Bx@A7e$j#L35!b_JH*VI;vnx-LAqLG*g7@Co@ZtJwCIyN-pr9myCyc>FB%N3858D zPmUfsaQw4@MC*k}kNf4X|BV;$5C59^&%sBIXpHxmU$A{2v;c6xQT-UlZ0Xq-6T?*~ z`vLS|e7K00Gk^a@l}OPteNI)9rDLTOv_!lssT(0rjVsNw&2x?bRkTUI` ziFc0*sEZNT5sVWT3$I!X!R!nMrAZ9$Q~Cp9rxvdL+{|n&m$e!}^0E~Sv81S+>7yr0 zZlD7_luQR?aAR{^guMBqg?5pQR-HquZ+fz8QDwmEI8(DfnJM*Y&ylf@q9*4pN{skC@ocuxL;;zy@K^SiGkzTN$XchP@MHovgFzT)^uy)2|Mr&Hm z^~zB{#`;`@GfPKzD(NNXoz-zt4!#_uC}rSTy+PFM?XDk>F!ITuXi7vSsCaogPn-{{ zL@(~Cb}`F%w&&7b`&pNnl4-RY-2I73oA-LAp8Ve2a`NU8{O$v}?H~TC_<8%r%HO~J zR+XOX1*@dc4^Z|%2hh+TX4yD5rNtrYL%5|8@tx_Wb&bOMh5`A;;8HRvbsQl>F%@Fu zWRk%{f$ByRt~lDliZNbTrO~fmf5i1D$dX=7R9tEV)9t6GDpM2@tBG zN+*HP!4*Q05_&+YCWMY4MG$4Fp@oigLJ{d51p%pQ=pAVyDkz93g0d{w+vV)F&vWj5 z&fe$Te_y`88Q(j{oO8TmxXp0DZ#%t<5q=k2>$vXuI%OP+o=7IkQ04d~p>l_}C!`!7 zC^0+|&ylP?xDOI5lZ68PjXCGNIAv&8FZ)EeF6CHCG@nnHQolQ^?`LgZy>~}5@nWS_ z&05X#zK^ldhvOZ;CsDm0w^1&U-@lE`LgmjLD?0YxU-7caV^)XPza8}aKdk4!e*6Rf z0F|U?-_*~T&-(OlNUZxj1dvgO+?7>t>&kI3SR*nkIUaVe3o@%5w>&a&Jf#|e?D08Y zC$!*H2%F)gJ}=24zqKzuPsu&7aI6%HmWI&coKk?ST0o6jNqDQ!f`Zee(I`TIFrS9gwPT$ytHuDp!woZ$P=YlU81H#(QR+jK(xde(%z>CUZ&qh zBR$o7)YGSJq_6Y&!JPf{Z7uKUkGD^4d&J(4{T%A>!SkN-!wCH3)UEA;*H$_T`Of<- zu4Qi?cYtK=Wg8*V56kag!-OYes`eN?X|U=rK>z1`)v$cp8L>1ThBjG5t03S69?dfspBI zh0v=iU+g~zYPf4y3w@bLyoa3WEza;x!)|2QkHDHiA55`r)I7j!wT7Fl!iPzLJ|w`E zQ>lP!#KAAW!%>zRzg{7zR_q;)eI^IC64cV?o*8;roi!o#ajxF#z}!)s!~@E%wRI}I zm&O5xrW{|Ed2}NNPo0 z_vail?w8t}^%nsEHYfn`wAF61wKP1v_{TFk(4ivUk0Mb#Vo$^B7c_fI6udVRvgk|6 z^^pZ45MOk0aBR+8=XiB>o}xQ&hJOZk!UGIp(zAT)&gydErm|nB%p*sz;!>g9m6A{F zK|{LGAC8Fp#jb|@ZC>-y)6}%ba_8*Sz3lepUNo^b&`2KzKN%3h+}<}rdMbH_ZSJo~ zU#{Q+mSb`16@R+R$a&SYrRQ<=}G7n@qvx{ zI`767j97@uh>7BI%blTR|1*83YQ3%7!VnWD_2V>ifP=LjOROjv@%ks<&Av8Cda;+7#wyn0~deN_&iZv{zG)`ntk z5lV$0%$tssAhH>x?{OzYEhCf`oSh~wHi+rNCB%3tvUg| z#)t#3lU~B&O|-s{dnUz1Jfk0=dE}6EZ~}l|@HE)8wCXPyhm&AiqcKTI#=>#f53dAh znqdtvRs0fQ6E*xfXMZLQ+vo;#w3LT+8AsZe`H0dp5-KW52_oGR${1>V7C!%SeEfv? zt(ZmRSHq$&iBM&8*#{?Que!QVd0i2hAB(dgymP|Hp^BvWHQC02XM0ItgrGY@lbI30 zTHFmL8jC6HkVG;niGz%SSZDfc7X16YX2w`Eq zCIB#qBxh2#naij449@}twxn~hy?~evIP3yzl$aT#n}*DRDPaAN3tC;b=hxyrXI;T7 zK0QWG$bcmaVw>mP#^TBg3*QR`GBEEf@|TB(_-i^8RtHbUglfdkl%UK%Dp0AIv{}mD zyZ5ebwO_vAEF=;;Bfi zid~)K)cgu0bk^wX-8{Yzs@H65xB23kl})3@WAacgArNf^$8W#gY7>c;LwK8gHLc6- zqfA~`|Dv@Rb4}4^&oR}gM;?}D>T}~wkA9)CvEG>z&s|^6oJ%rN_4r=t8h}uIYlG`1x~I67tv~RXqylrB=UwB`dAMf*V84 zgq(^J1-3;ELr#gGbxQgj-xu#J1!6jl4uZOyb@TGP4?c5X1@=e{o5G0JatXPeSfEbd z<#sr5argZcUF3Ey&m80ZHlnS6SFO+8_)vG)1Zpth{BZ20T$3%Ik_-TWR9q4feps9( zWY|<;FK{3?NI9%3_F_?ff*)>eR=1}6 z?`ogC+|n$JKZ+Q1iyN#KBhSW*;U@AYrmJPqb6tG69!`%GMthruEL*li1D|oguf)i> zbQ_4Yp?cyrEC!!%ILW9CK%lZyNKgVFgu@(HZ;=L_C}Gq=9HWrU9BvwI2xNx~8eqQ? zkNyM9T0~H1KnQOVW;O|V1!1bfBP=w`bawz^!`z;vI6RU9E%5^7d=U<>=rYK>!fwp<)UnG+TDlw%kEjIkB5Y z8V`AE&MTcZy6?J%`~`G@&43oqAdA%*BpU%hLMlHw+@uwS9XtimwXdUAl;IWTbhV_H z_@RsXUl{Mm3K!Ih+^*(&RJrF-vNXCS%Y3q5^?HAPe*43aHHzxjte zSu)JUVbYV-s`7`VWBN79LiR^obWC&-$HNNQn(b-DmVv$lT}kflVuT|3(8s-rb#KaP zv%XBDrp^CF0QYzQ+Z6Z`ZnN=l^taI|7&5`oTj(RS+y-v+^09!3MZe1111$I1b$) zpsz#&Nmmi+n}epJCtv@tQOV}2RWUiM3l&i2_f|a5|G*I&&zOBgx)u}wvy3>1BVJ4W zDS+Y3AXPqeAEc9Zi=JNvJHR3jkEpk4RuMA zh(00OB{*}C6XaGkT^|S7QfNVP2CeoD<%gJw^&g?u#%V;8J>>h^jHyKqIw;O55+twG z1-c}5L1Lv^i1Ex0ahsidH8!M;G~Z}}m{t&F%Cg7HIR4Z0oo%i#RviIkD9n+pgYtsndmKU=1hK4PGU##zfX`T4v&Fc89b zG*MX98$N6Gj+*gS!PJMp3WEH|#RRCD`ocgA;wYKDuti0}Be|8iuK}%r{F-oUwucFy z?pCgD)S#8kP5?GrBuV#bovsDcs*$N_CR5Tj3V;ZLQWZ)ifc7E+N|^OrOEb7-5Nz%) zsj|=`3R#H9g1~hp?s7pcOd)^}KUf9oNc91Z2{>S%4*k{QI4=!I7WN+nci4Wk*p<^9Oz#k3sEtPWMNw5Wb>s^Pc6z1rq) z1#~*^#bd3M?czSMpZ~51`~&|WrG-t9?O*bl0UA~Iy0(&G+qN%7~ovG)MB89@w=CDOoP^n`_ zPEJw$-Z?RT1t1t%$R7~t#~&BQW(&2xfUDqYMvQ%5@%}O0+R-W-85$VSC&1rCRCw`L zwdMpl^??LftY6GGNKZVl==qF5yuz+?k@Z&C&qse6IEgoZR^5s<{JeJe@|Vb%y~}j3 zt~)gUfvzW?^&e7Rg=Lpo&QRSG!{TmU0G65Hkg^S@M;O*=xf+T?I8i}5SA1EB)=K)wUK zDld|!92-eVul;U0zbO)#0>@b0ihr;IhelvKNs$kNSIURSS{;{)lKOOfI5LHJ4eco} zDRMWYc;GoRPbuSuZ2|@A2STnbBY`fI&6ez;k{u)%4WPsg3xFEZIGOI1{`t9tDwzs} z^I&--DOo=4XdmEN zJbB5uqafiKP$s_W_2>kuE+Mn-hOS%2eE?YY&<_HIgA)cTorIV(er{S{WwB~Sc*k!= zmrTD84+E;f=q#aRPMEhOtsyX@qA^`6-^kWU*hx62u4TkAc<%ierD{;P@u<1EwJ_K8 zlPyDAErXx@@zcqe%p&ImLZM576epC?^JCI6XSU*0Gi;Ewp=9Rb6>R2g0S*p&ek%ms z|JI;$ruquWM=r=%1*_~VBX1Y@$!7!3K5iN!kO)y&Xn1{%6KM*Hs3-uQ`QG`lQ#j;) z^2-t@?1{jcK8$q9n`7e0HpSgr{DK#ZGH&K9m>qQ4e^zX1MXy6Z43~wY$riPC+e~Yp z7lC^jcLH9*^Bk?Q@F|@24K}&StZeqpl&?_ili%vnf;o%qJ0l~nRWn~1%=7@j7=yz| zzKJZ67N$rcdm8f{Osn;d5K(W>Nr>qz=4S9ScBC2ckqFMMBY=*hX?a5TKaflFvUs-w zC|ILj8t?b(nh4u66kSSj7XmkEO9!>o>ov zd^=EzYsJTJX7Dg`6YXhF-N(Qkuh-=-v`*M@Pf8m*C`(1>rScb%=yy|hX7UR7^2|g* zm85`pKs8kM50e0o3dxWO5NN+C-P}GJe$^PLo;xZbYD_RSxLY?-=q3&)3Mzs5M*+dA zPDe}H)oW%H+>Zkj`zk}<f@sG6c2QsMw6IG>mRW0C`3GV8Eb&jPdSVnW{<@3asEMje@18hJE^ z^3OP8$ko71aMA6{$9z|88%B80jLP`W@nBEpt}ckzRTQ8Eyk0wyru0)?G=`lWj^cC- ze2|;XqD_E;Yk#|^Cl$%2)r37r&?loWg$a?O>nj*N0S{?Q=sWWVExnc(_JzP4%)vP5 zciS-(%t_|qIWj=kToeZJZ{X(QkS=X%lAG{Q9NJXYJQ0pcGQQvLBwZ?D%TJYsD2O#& zb^J0gL-@`fs47|<6^X8Qfetw-2D>#%w3-=5AKW+mR4d@#EgNnv&@G?kl*D?j3^xsi zuXX*Z8%uMRX#V0m{p0qvpsm8+9!0d+J-YQ1&;L91?!U^x-~F%h)fYe1Badb}8e)U` zD}Vs9TEhJ5js#D++5A0Z|BT9&p3#~O@^y>XgDe5wFXDz_^An^5V8QY1x-enZfR#Ts zCu8u%h@D9Ikl)l>JzFZ^Ye|BS3HX>8;-tEUp67bwk~9RKLU$DP$H8_$Tz~cvw`Pu| z^Ir88d61*Q;h?oRR^U3G@Z3c-c!qtL-FvTTZ84CywAvVKk%&?uqVvB1)NL( zlB}ziPu`!}zO4#$-4NWUwK!)(;RK>o!_PZUg{!$(+E=9hdP^@`e*X@x{Ks&3hF$1^58vJV4 zLLz(J?)D44YFTbq8rwG*3i8)4dS0}Bx$kGbf8&*!#2ahRl-jLmNA40$XE z-)?ulh|cH!^eO!Otn=(nf=uJ{+52^(HW3?2+m{GX)}(~aYX%qCei$R1M^`ogR2smlMFB&m~cs3_UGG)<8+p z+PI-^6QA4{X}0=LoG7KXOy*eaX-uL_XR^F^&TL$I`-zV}_0l~Od|(jsn)v~_2bSx{Rq)=@AXsDul;}grTKE}%bA#SAJ${9 zwP;O$e!tSS{+H_IIq$RoYP0)a82_K1{(*mhD(DEr?G1ou!tlxP?Ka@y;QG+)x$vRp zeLj?=oFT3qJ>NT0tO7fqI$r|@tc`6*j{=rQGek$@GS~-j1O$j?hw&k}*sMW>7y?L4 zR2@LZ+_Uf<>ZHJ@1`G^Ft{{GEW743k6mw`p?zkshMOTL3wYgn7zbjOxS?Nl2fxP<6 zNC^-}E^QIyRSks9*-Q$pxGo4e8}{V$Y+a4_Rj=+Tt3Gr=M`bV@_*|P|^$5M`Q!{tC zOkaMILKtvE2vEPHXyo+b{rMPbi$A7HvHmcNH)#@{H%6bOl_{0FN0bB`IKI4UT|BH= z>v}ZDuD)c?eAE(jM`a&$;`Hr0nFvggPqz{xeuhinyV;R4tnPv1aXU>1{cJu&_qp5) zKXjn?CZr1v!>U&We=^JbAR}(Ud!m>79oSEV``3auBaeA>pLzFB&)YAd>Sr9&6eLdt zMSLO@=!m`f=lkDvHOyYWlDa2HeShYUG&VNY;t&Q~R(VeiswBa}+4nSOtz}F;$!c+l zR*T!`;*H@>;CUOIGNipi3ugfQ0M0?-oI!C|3*0eK9)#$vu_)8TsYjqXWa};u8}o?o zyl|Omn|b&Z9+nLfrDsqFfdY=M&8;R;pG{KIf=s zg$cOB2Z?5WAORZDbWDxXh6bJ5r1=_FWoCdRuTd+sVw#|xR7G(B49d-lvBzl1)bMri zTJ616Rr=yN@perJ(Z7eZU<5cjwghxI24vzr+_=A#T#+8C+B(Gz$mU33EdO}+$u?Sa z+VMVGAitj$f7;>r6xbY?}f0hT}pIwp( z-ScBeJ=hHr?KU{+pj=*`*lYVdLKV%B5`3XDYj3|~|K`z6nC+gz?8rc+ z+pKRd038W%9JVX{kO4&*+tR_#O9sXCJUBKY(YFUp#bT~(ENsa7H4;-D+Xdp_x5DB} z&P|xo4a3mq09Z(jDF8s!5x9Usl z-F=HxML*xIGp`F^x*epuIzEU`hjLmBzlZ}0(KvZ_gYTjmtchZgZb~^Zj07n1QN>lS zDZzG{VD0!hTH(O7`?E0l4}d5VLzwmuXQuuLi%Af205CCfH^^Gzj^mjH{qJ5G!iLws z+bqzItiU`*!=?qKN{)`4XQ{|6yF=!a1C46%W}2#1LC4+J4|8S>vcU%R6=my&*H7?G z%jK75PVc+j&|A}}$+xw7`jtcO?6;biAI@(xU%h(uW|7kV@lA3(G#!xv+yA3&W}6!< zkIt-j)v5@Xr=id=w4!%fg8hgo76eSrxuQ%!kRMCsRWg928}O9MubH^KK~ve|z-i;t zP`C(nXwbvQTv_QVm$-^C&OU~c4cKhLW~CtL^s2GsQ~~0a=>&rD?7}g0Mq9R@CIxa2 zilYlt?R5pQ_V3LyW#P6;gkzf%F?%u>)NXo`j=+X!RvH*j--n{f57YKO!E;WRlyv}T zNMdIaJb=5b2{a_N4h%dTd6|lakI|?^V`CH4S5&T4wp2nDkz1dfkE|QEF;>c`ktRq5 zG3!_LAb(;JqY|)%H{LoRZ@Afct`S#_>Du>3e3JB=gjEWA8&XVsp5&$mP@r56y9uB~|4Go!Cc48GV5 zUn`p09N5;b!mR~)FDn=95hJzL9`Czd`f%gSUlDiyxNP9I`$^`1l7aq>|CaNcT-X0n zo$VReNLu$^g#ZARE`Io|2!R*W3-Ug?OlpZJHH^WFEq}Wqv70=7fs@0&(l~oriAaTD zsUwrPidqHSyxg)Er2vqcVw~*8hd)?MCby}G7c%$1rlzSyjj2e|h?7jWVVH}AEM;?W zOcVs;cc#IC$qB5OZ(1S~4oy-FtBgc8hAN2g&w3XJ<43WqL^gQ{q<0FH%&0U6RF{7jUI@2CIz5dc{%l>UN25=QogL|J`ov%y>Ejj#EaMy?ysroxMtN%BdAZ;1g zR)}8>wwzmv>x&F*NXn@lB*L9wX>o(!6cPYtHORv}V#K=*{)90Qb68p`Zc{JOWPxA9 zf~&uV(RUjHWJ2STj1^`mtVu@%0?NgOI83&ztk5+M^gt8;xi7N_y(WAkMl+%a1T_^99hvm`!9Rn z|KLS`^B)zjZHU%7Lt@mFHg3=c0@$2vyfV>f8|T2YmoxW`M1DGE^NgD;fXYySl7%=C zFbK*~8VNEn2LhxRz{E=a`f`y5%N~Qo`(neK2^qwhjk|SlI96C%SfA}=)}x_8V;gTx zaN5L?(qP^@N^Fs1{xcQ87u+gL=b;-8{&7`sA~(Z3*!SlN%N7#AP(D8T1(gAP7p%0| zR{sp10Z|w?M#W$i22xf?hw~5Y7YvmIXo9~{IlgLDcY#9lBFJf$;~(l7YKCeJs463YejeJu?jOTZpI8gLmI z)G zI0BUDaEY*z#Uh|eMAc?44oG2FXFdDQR6u(gM?u#m5u89hsp7BbFcTmlG_{8{`vBk8&mw{ua1=q zPrfJF{F_@af8$?SaFkc%2>?1~cP_V|2L&D_qm7eNiWx^XQli$o=h=Tkj5s8+{L6 zU2l@L7#zu8>_HXXz1C|Z81xDEGI;yvq4NAc>Av`#x9B5b&I!XurT{SDEO6Pg4GRFk zaf5GFXRIl!GtaXSks0hE@;x$FB4LOsKV!uZXCIOVIKxco@bWt(fcw$60h}zT16-kr zgMcHue&~Fye1k)<3yUkt`6L?l*s@Ij1`ahtT2<0#RhDc%#L;_5Ud_kPk12qmS3Bg% zAvLn&uxDi~3S-B3Z>>li3c=4gtq#alU{Lj;lb^*dkEA|a+D(I-K9dfTVZ~WqhH+-I zy!@IJ&_sbb$Iwu4;PeyIn`JxI7nT-&>b3l#wx<{`vCQ*pvC&Ga?fxZCL>s4t?tI)6`81o+_WWmXN7wN> zY+Y3x1Cj^;{hKEEANU8WNWJ*N7ip4|I3PLshNg!@qZ6dN!t}a~t3g1T!JE}(U2u2V zoGBNOoHim?h=5kGOa;k{71?7O)BwU#W`~j}+0_o0F_|jfmeV1QP*9lxL!dhMPUH~Z zCsu6iiV`3stohlL!8AWwDXYoc6Yi=sd}o;CQti>gCdQ>P02LBpVKhwD4^DOe6zAf2 zp@8992>DKWtKQ&HMx#`HVyhEnl*Pf#70Ci%p#f8E~=SJPc$%EeE=L zvgxN8t>9+vtBI>JY-7gY=vW*f@k;2N*j+mrn~`qUg&ItVpqut`1<|cAqd-n5bUyEh z29;~5<;2B{GA8ma-LDFIT0VYCYYCrM9<_1zaif+hFbt0kQa_5vRV!8vN+J5*s5C`9 zm9}|s^nzEYUtvr0iKB1sZ+}%yEtbVctm#ft_g0!8+>7$X_`58daIgeWRE*$N?A$vH|YH-{yVBQ9%U42y3I%ijV3JVjr| zDsbi|`9eJyp5Sgr5zQ)juzli9`!=(>@-)OXP<%vbhhx0`PROH7GENyWs>qS0x#^iL zOe^$FohmVpl6_>VpaObYm9P55S?43lGk@&V?i=!9f`~Eyaapqwz3 zL`uJP>I2{RQ34LG7mc$AO~|7sxSn_+pJ6YYE49k2lfq&^vLvF?jLSS(hBRrZJp-{S zNC3%ZH-qc0`KVDt0cncjlgQj{kg4k152~^%7k5mUn`8odEje$t&1_wcp1U0U!4b~? zG$hFETCb;by9wUIaW(q&tXh=v$(-Q6?UIsiOZRI^Juc;ie|&nW<#$bC?oMZ2%gs<` zdF|RX;NP6~|NSHQ8~-hChxiXxp6wfWYWW}e7gii}m5&hwVR0w869K|2!mR9N^~G5- zCa^47Km-68M>aPrIK0~{jCRe&IW)Hg?3Eo>*7g*57i)FnEDs#vn<@r->6t=p2UL^R zh7+|xami56%rjb!8Pa%F^HlrIh_4vU=Z#r}%KCU&*v2Mj&Zb#DQiaS=*fL;Pyt=vT zkOGnjFa9GUZT7vS@Bud*A|>pL^VV z{r6p*exw=2R|%8*AktmbgRt_fbZXnK0?#=qP`5W2*p=O;aEW!rfq~64sVyM#;sym1 zW#h7u_UI3ADcA(bdfDSLc7>SFxpK44zi5$@JA2@f82Nm5_Up-qvWD~Wn^T?*PscAl zJRI45Ry}#?C00pPZ{5l9r7_SPKb5qc`O?z6`%=rD{d4j+JZkEzGPgw=b^LC-i+z2( z9lLt5BqtX2e!BDhx>c)>0ymxynIJDMscj>ar|j-U$ibt81uhFnSA!X1F0=}DSr9MC zlxQkL0U_NK2*cPsq$Cf4seRUB$1WkwDAUyxZ=Wh+T=^E~GePfgePy>~r$f%zQeFzW zYx8JbN3`tmWCBSP;VFP0rp}EyiGmTil5$+l+kg6kIO2gJgqN8gBAc;;GU5~(Xe<}| zKn95j4V8@sTySW=Y!ah0dW|a&UQUmfo<2*RqSisAC05UfzCG*wF=J!ym7nvsa;Jt) z?N{BdN}OJ{Aa5meMlI5*sXB{WH1S5gfRX+$cGglj<8eAVLJ$}Q!I}e~(!n`Qaj;Sdo0%-HZGrZz-cE55$9g<<{Ngc8QOn$^~L4Zt#kT3W_slwzutdp zzmeIxJgmf;`t>_@=6y|fZsWhW`SSO-@K5|3?R?`qXF1wDa1FZu!U4A>)N#dWM>|1; zE+l-IKWbdfs|m*)I}Bwb!tJ4PDyu2n3TUiFm?;>;1vP^}!ZVX9%LAld1&XZeyt-c7 z&}Qx?wLwwi!(2a7Oj;^_FI`riC?Yg3a%hP1MVTdE!BQIpx+FxBsx3}{ZAKCh4l)Fb z1dc&u5D_OtO~z?10)P%w8pG7`tSJQICt=$e9M`ZWrao0>c}nz6)i&28_zy{UPRNY- zt)9Y$QmN9`{3;ziZ`_JUSuT(N>33^&kuUDYy&U+2U%rCF2I=v7-|DS>T7MQ(^K3D2 zHvCcNp{F5AUtTEDGJf)11imm)7CjIU}QPa7Oo8!|iQN--E9fKByC_bk*t0pRU_DncnccL{}|8UU1>oxu8+G zr#GVaU8m&=TX%1G3o7c&+3l`G-U{D7-H7WLX#B3$go16|!+pm&A*)4oPU?Lq1Fqqm zq){9Wco5@l+hIH)$g*b9uR0H_$|Zx5i;Ih!K(V+J7Z@>rVl-0~S-RdOVwsb1*~%g~ z9?U1jc<=}H@nHk=!zbD z{hS8aD}$MOaigy!5B6w(bu_&mMmzRd1+2aFf6?%ENXHJy4$n}m{GQWPw6~o!<#F)2 zf703N{MRPiS@P?gi;)V<4o?u_6~-sxC?#tL0VZ*&yfBbvac4fYZsDfe3n0C;kx26v>2`dn zMJVgc4IFJV4oPPPAY>2a@HX>ib*95f9UN|)0YwQZ233PA08Xjep5=Ejz%IsC{(^9q zKcVxR{KxvC{xeO1CG9aQydJ4h(=jrVwKMPT`3m^|WM6Wd*3RhY{NaMWdzwPd6`@-0 zzHiX1`eJ5nvEm3{fKPn9pYwnZ<>DH%HS^w_UK?zwKG4_}RKy*P|=lkGHnOr)KQv=IqRw zoomV7&&bp}cs~&~g__N=F)xc#1WLiV;B18es9;-{AE4qGCuk5GhI0p@AlcJ6GxQ++ zi^iVP2P4f&+yNK8NZ2O#1`e1%dz?{28L5@g8%F&2s{iLR>a^q_QbDDdUaMWd9Gu z&5F+<`XM%->})qX4*j{fMP-}ABwhEET~qj7zT0B{X-> zF0(#y`{7=<#<=eO;lI?8-${9@T>2V65N!Q5)HyWQEn2K@YL=Yzzc8V{&wr)3olkZl zAIyAjOU~{Gv;pj^Io_+|6QsbqvUB*MAQ>lA8ZHc(+lIl3g8dWOXaHyfUm6HrXh6~Qzw3qaOPoyjEU)#_ z!{dBgWD||aM*^fu>s7&G`lGJT4cw}guT|blkGCjILE2_)Dx8{gbUagiMT6u?R=WBE zIf}RLOjp_cC>0wBOYz|QB6U`32xD!j`#~&GFzl3gddDY$i?}2{kgY?3(ubs!rU2>G z)DLjKiGDMs$t3}SOTE1&l?tj;5nc^SW{)38yEfU6`7yeZlx6BPVA>D8MYc0N+XPeal0L8Z5^cZR#Is~$ z?$EROqfNH|<`)}uRwr|&wtm5Heapei{q}_mmL5i%p)IqjJgU$Oi^ZzDo{^bP?_Sxw zly)yl>FfRyUG26((A@tbaQ^R<|NnXOPyFk>IIxSP&j}5b4IbvdgZh@>mA^GhJ+^{L zMuY|tz}w#b6!R-?tOE$IS^>C<@eWHw^O3ee3^Lv0UXKP~k2q2TAa(E!UTu6;SDZa=jP|!*0d2bz*{b*UgE@?FU zp=M7>D+n_i;^-VBYM>W%MH!m@G3G`=OYfSW=PwoRtT`;}QwYA42w`(lJbdk>!-|F> z6ujQ-!5mvR2H zQ;#}PrG1aOzEa>{l#Ng(Vd6)P-(I3SvP|F9iD*yAoGE<=bB(9xy|XaAW{0g1vr+Mxx(|DvDCTDfW+{xh8YA@CD(nf~3t!NFr- zHGaU%Nm;zR?sC;uRN4%@UV8+6xUsU}ga zKOG@!RjTAn2|^m6vIr?w-3-#DllNCEc@KQ^MvtQJ*=U(PknS$bJP3TvIc@Fy?1@Ph z_*R2;OU-iSD`dX?vxl&JU3~$|bRi@U2FuozO76_hrj3rOiul{-3Q4`?5aI$*H3Ue{ z2sKf>Jjr!nFAI1pA*649TabCWVtH~t)KB+ zymqtbI@68AE#&ErGs?BPIO?I3;DwEVCwA4!(MLU9EVZAf{);Z)Z~QB6zBo8T_sas@ zmPq{%{ip7vDaKTu7>D$6jfUm&?b4t7_yttyQ*X(dgRr_*!&7{=1jgzxb3__H21DOxv4v-$S1&W-lXl>9CkZa$)PY<$`(U_uXf(Fsg9P#VrSV^kXH>!e$; zrOZvhy_fODvi7GnE_xO4*wq(5$*mxe`D@=r3yYrEh+2!5cPrO z1E%*3K7VlXqt##~jPs0y8hu5(r4`fh)YoHWtmu^U9D3~2f3 ze;?py6+E#H3xC`%OfpfWz6*#r(i?2!+ry;jY)2PPV@ zqhJMq7YI-s4-jJsF@Q-MY$}C>V+l9`Stgb{9RrpqzN%q@j0`<9FLlyM^b|cvRuj65 zY)hdkgz`dXFaRr10>~5zg!pB*2}@cg(E)K=yp#4X8WuI}7r^ip4#+rgf<0InOyA~VmbBoPkvbq)N zcic92Tf?!95eN<0W44?qY}>xU&AM)s&8>(xy-0FYfRpZ_TXJ-FTl!gh!ubwIOW(3Q z(nhc6P3~x+PA_uataoQ(ubL|b$acM;Ly8*>@w5m{`jFj!k6ZK1)yE6(YHQ@klBtgr zF3InkEcV_Z;d7og!?r+P*%MTYOXAOs6kap!+<7t$9CQyi7bf=vgR<8x)4BeqA?V-u z56*nP&lj~f+hOE2`Ri>P?#)TfqczFL3d5@7i?0F6IiA1t#Jd zKI#vX0P=&^T96Bkg-pF-#Ic-I0&mYZ3Vj1kWjGQ93v1Z()S->UA!OUrsxM=2mcK@n zEi%FuU>if(=%uJHGvI^k!P@XU__vC@8tEV>98SE^Bu)M#9CJYO&3|q3RH?i;uX3nu0jMgzt2W__HwC=&%;=p+SRIxAM*yKBv?riQh;jl8=A zHwr>!e(L|6KK=Zc?iEVq*I3Uw%FJda+5S%dm3zaEin8C2_;U;TE3LSkgeH=U%&e|W zKz|n)DGQpq$?yfg`m-YaC$YrTA!87OT3thHf(@Xg+l zDt^HU{oL<(P^}yJ*teMzHv}p!?;7M-+z~kOyZGMS_4s$qHUc%N+WLk~CLQ=<%C_zQ zLjDh#(0`u)&)?g9Hp@~n@{0R?wgET~2KOj;h}&DzmD?^xV#<6_ws032NVclx>VlIZ zG!npeU_hfVNN54`I%mx`XK`vy`atpBD!oP-%PT{@r3#Z-ajAK?8?)3{@8fCYbh*>! zXn_Wn%XmX-5hx;|B>NeVnPA)w2@W{Y`hu-jxMtcmTgt)VJ5%h;()Z~~J_!uJ)`S1< zybmcy$ws(3-6dro$x}P6?&CN>*u|R{>%mr-p{AAmicU&#r&5+nW>JI9Q5%`I>*+VY zBW!fUiWLN}Pi+KpGk-S6cB8ww(>r&az0XX48gVLd$ZHSfYTKKcHQ_77&q!ZitCI<` zRAnf{Y97fj^ZgK+-=4mxSA60xucSY0rk(!U)622mNQ=>T#)mrVDEuGJ-ZQGnwd)qX zQwbqp2munB2?PvCNkTwG(9j|!Xs7~$hF(OZ2x8p{y#%Ba5H(b(N*A!~P^BtW!Aey` zbOZLb_u)Oyd&YR%`OX+$e%-$_)^%NT&AHZ`7Z+0Mci1r>Ki_rn@a(O9_br3!UX$J1!9y9<~*ptp?Kh+A^yVX{^J+MK6&%qB}S*e z2CW5_{3`y`56lmS&sy7PU>s&+m8ovAlyCk-ENZ_B3onN&96SHbkcZ_-z4AY2z&2j+ztl7P+{?SRv=fUHtj*zwtSm1Y8zJI|YYqZEn=K0r z4Zv2TgAR09FWx$#mY(AbB=h=Kd`{8LY!}*29ZkI~=Y_1UlaE{At>}(?*Xn;F45PhW zEuqg9(^h>Yo0kuEh)3>uzYM-#t{+M6nYZ_|WEfBTQ=-&7Gnd}w9fE{!gi)NP`=8Ny z``kEmv2&DyBhT7G3J!!g>BfbQ_GW#2SHd)VsvP6_*>P0#`IwZ|ieq&_C0lm)gTvq{ zBZ|Fp#eZ9r`B(j~Nx%NIBVy~J+T9o3|9GFJENjT@C+E?96F!x^pltG9{ zfd*H>B1sBmDT!0SQB-`VBCbj*)N!7k$>&hmi=jR@>&#> zs~&~ws8QSBdTVMoh`nS24QY}2cC;5qAWcDpNDH7|Yy&O607MF4LUEh|k2Ag_0TrB# zpy5u#q!ZoN@T~mo>>QRLtdN?9F)PZ-C-TQ#rP1zjMtH$tFg_j_7c?e&r)6acvXipY zDDHyYP&4lAK0zI5BGnpKmp>e$rCTz}r z3bD`3|0iP~cDINuZ3*6%gzkZ?3Z`1Br^HIueXgxlioKtd)m4rNGTwB37;qCDob)*a|StI&)&Os z;aH%0>b{$w>Ft#F4+3jeuEy4Ne{7TX4wNl=dA2ll&siS}r2+=sxlxzW*Oe2wv;10; z_Hfn9bv3kh+Fq$Iy}buVHG)HJd>U7#|9c1Uulf%yUH!D<^HzVIsY>GC_X!j;9-i_F zWUX)4cYtc17E9;$+EDmlB;}S_JcTY&gl*4<0i|THA7iI9pv4WhhE{D8LIJ=x2{;IW zsRDm93_yq!n3!r(fa>_E0f5ZZ5ZjP}h(H1bZNaK0-lVz6c6;ol^Xl4ZMWc)&R3yIL zwb%qw{3RT}z1y0)yX0XLUbV5Qq*-%{;> zZy1RWS~GNgb=Dn1)*D~%lO7lH0$sJvn{6JsQM;xXmN(M0^@lqZy#RUFxyx8+0(j@TmSS^7(YlS>pZJM~Qno4j(#gp)E#F%Z7S6 zoOsi$l5_BO%?{E}U4nkMkL$HGgY?^KOCAAZizY{mwi5{jKQ28yCF3LrxUJta$~=>S zwBBtR*NK%6{tcD?6^n_;Qv_)h(Q2JJ4O(M38qNDh7*uUuy7f;SPBIWALqZW zD}jGLxB%JA0ZJ3qsx?jYyZ2C}uzOmCMp_$2+D{E<579HUso(8so0vTeFtzoU!PHfH zLl>1WMYHszmQbl88YKu5p&}oP2GnpO%$>sc^EGyJK33QJ%w4P}XAgPhEIdCt5_e7} z?d;uU%R;Nj*2ak<+ry5l^<6a2`ch7Jt*w-yuZc)Xh%xU-PBRCvylbpHMuSY$C+5Lw z7Q91;xdqx7k!f|XJWWuN(Ue_qXd|2z9-A9lEt67};5uPp?rL5co zYDLu7v41O??D?@?ZzolHuHE52{JCs{qeaRJ?Egao_*ebcXRm$Hi>mCeHr)yP;2#U+ zOCrgSiklCOxqN#ydHqoF#Kr^9mctU^6o+5o+g-h?G#6MmC=~!I&WZw-s?;CFq4R;y z@e8-b5HnY)=%6ix<}9ZhVPqRn);XXcma&cXj$DbcE9xr{*oNYU*?5GpAd@JFp%+i) z;HqkhL$^n!WlTXW+E(G{r&r%~wm`TQ(Lu0PkIbJ%z)026TE|`5G2lCnVG=RrjXEij z*DBwBegoAXj@|ovpb90mj&nK? z-29CW;q98Q=T#0$skj^*vNqkmn}ZL%%XZx*n=aB;MgMJZ+k>#&SLvJ)pd%Y|=YZs$ z6Trt-2Lf=Xf5mAy4SKzz^WfYwc5U0UCvPhrdZbvm+;VUDKEg@)bxgUc()Vz>!ll)6 zx%#7Rq@Mb+qgsDzw&+z|K7ssNnfG`tw1Jgo2mx~-2u=WKO7VhkIWe3?K1d*n-2YW z#2C&!(b!y%L%?a$Sr1bo${;>iooZ$87I99mF(goFaqQ1A&$~RcT%e5$0V9GFr*i+? z3}`>vB%D`Qt{u``dSC9O*M&CvP{IuBTl^m(<58PV2_Nmqjo3dA`-Vt7zGVFJx_El+l-iw;EQwi=}bNCBo zI#kDa?Zy$~uHp}hZC@xWYk9X<@(MMnw*w2-*)di}|9HrL{y@{}|Kj}fAM{^P18fO+ zbZSR`>OOCm7mIV-|1F&JQjsY!q!NIN9XRx$*>6J@zGMET_cDYtZ(;uVW{*Rv*4zjt z0#hFZX4NqIUuOgX{rGNGLY9FaFCD}*LIx%Vm=AgzC1dlvD@h_tZnx!<|30{?Aksa~ zxo9MKF&AI$z}uD`btC_RrJ!H{vpR44ZkyvV#=kd^mTpPJ67dH803X5Ymh7xBKHfm>QON8h5$E)qQUUbt_~jZTgZ znCtcQdUy48toOG|d-fdGs*wrZmv`xUSMKksUQ~4Qo@auEAHAC+#Xm2+fA;jBS0Dep zyz=VNZg((5T;$7;_}GN#Rd>=q4lM^+EHCLUPG!F?wEwh|e7AJ1QeFwhQUdpIOaGX0 zg*;8(-3w#*I80A&t5iY*B1;CW=mZn_LSWSK)4^277CF4~0sSqiH!086LJY5;wJE+= zLUW*|WSFr0J!Pn9NxUn&&@FAgM@q86M1;z~bNUfuh}jV57xokPn&cZ1&LLLa{g7@r zK!HnBa;K=fk5ugy&JpcWqHXg1Ug~{6H#g;X5PrPp1Y+(`DdZs(j!6+GYF!-Q($s=_ z>a-gxfjo?PbV|Y8V4fUqdRlZxBHwh;*?$c59LctwiR`5~XB-YFyLGgA;G{=!`o1!l za!E1aP8=bDyQp*UcZ{KxuS-mm`*YibjuWT$0Vj99J^A6#+KONWlV;pe`7_ympCdGH zUzWP$sXH}AzYY}CzjL{5LvAX#L9OV$IHSI6-~FX)_%Hvw^Y};5RR3B#v=RK@0ndM8 z|NqbY^Lx{`XsJ_r{V67k+xhSF(0G(K*Epwfs?3QG?=$C>Zj~l>Mj-gj1;^8kinlc* zt)@~v|8Myp;cIZ43w{r%e%6g@|5K`dT!0p;Jme zBNUA02Kaf;A9x@B&Y9Z*1*$4ce5`>+Um2P~Sv<%Ts#By;Q?7;I?5yBtCE$t_{dCcd z!Ixoi03#)|Qpvtnq_{nAYu>HcP`tAHF|BKiC_p#adR*=9#)<84JXJi1d}uxcQ`Hu5 z9p0Wh#yG>yrNns->RF&0&FN!=xT+Kh32b6m+TLzN5FI2VbQtrNO%ZgXg97E`fDWx6 zWmmVMg|u@|^=C6^azM%k7VvZREJ-m=juw8rl_aWEWT+@6#Ug-T$~=)LhcP9C@iU-{;F+xN6%opN2*Uaz}Vdwky8|NZBcUq7F| zdiL>2Rqb$%?u3)`>zH-`oCVzfK<4omOPoi8v1X?-l@MI zbFJ=i-xE9+gSgyhrq@L2`GXH@45vP|uB3S-GDqpSQjFunZK4zD2O{m+=w(&w?{UKD zQ_X!BPtv;`6Iue%bmAs%^C}m|k-kjI7atwOjrH-}Q8=!VH#nO9$set%)1Tb??b%5sUbBM8L@t5(}&QufEn!9qhj_&wT z^@mWbIHY|u(^QyE82Ee=fxeM8Wa%v~_p24!srNQ{kEEey?deL;dHh*vqIqyZU2z3* zaY_&oTm-?CPZ?$!c>3sBHV^7DO?QdJ1(J4C2!Ipf97LT>Yh8 ztU!Zf4MQhRI%j$Ezj5%0VGj$Bm! zx)y`~W7?E8J$I^Um#ogy$QP&Ic&+NfGwwI|V184RV&C0GS7QASIT@W&lYDqC$H4B( zn`oU~MvsZ{8P$h8O^F^v=ak2OsV(8#OZ(0zk48G4etjgT!~bl3V8Au5IIx&_>GDMq zE?hebAFC2{zw&f;&~o(eOYeGbmY17`v|cf(40o%UH8(IWy8GeBEHgsrugo~dEs#KI zGM<3q#GDcL`r+_MN&tMI1mz1}QwoTGO;M4>(DTD5AmU^tIV!AWuavSH5w@Q;7An&X z{LnT=q8Q~qd{|H~R5j9XTsbAFWZwDhy~ZeTd@ee{djV!<{;bw>hk|}ta-Ke@TRcZ? z+qw11dR&HAPqVz0Dx;^0pxv`eQqEkcgyW`=Z44v@m!12nXmfkA4o38tIjQ#?FBpXq zCxL|AtR>l81Z6$XC||Zp9u;drg#M{_7Rg*#B(YK28$%44xTUO2LF#dZ2>-tv2*cX( ziJvdmm<)0~(s?f;v%Cr);LBw~MrQuH!8HRFltMvpJFkI|rM6kj-jm<*BF(h(9RuyG z^wn=2YyBk7FZvmwf_N^Wb@%7f{3*K@!9YQK)Te^ZfW49ndU0<9uU~rT-k_QOH1q#a z`Twc@P2Va?zTKD6WU4xGw`CrfM*{&>wv90hr6TZy#_4wG8Fj$}&B}ZEt|g8MZ*LuN zbP6o$-I^x|R?~W`eivJ}JkDe+H4vU5DaY_D2ib2=Q=xK=qu&>Mi|h37>dsyZE# z<{{K9ux;mz3^#MeXpM zaglo5(|j1T75kE(O|UQ%&*K_aVk@+Rnin8>W8u0{+v|VqPL0za#qFZVE;^&^1aFG3 zHJZd@oQ}F~kM&0dR0bI=@AWS4tw&cVBweSg`!Qoq zPwYpFG=k(juLkBFuT6EGJJ(zMM`go3u#Nqnk^U1}Ba)7L=sOI5Dgo*QvWr#Uyl+JVQ%)N!r(0p_kk_T%B(8p|!PMT~D{6w9(2^&)d1NqOt*bV^6p$ltMt48hc1Pd`*e}^WIBe#V-HZt-R>F{W z-Ek&&7)4c3kloQd4UJaAU~Cbo3=)STy|iYlA3_1zn&;Ag+`$Qbg0Z%k%j(}5K{hAS zZWiD4gZE3XU6O$eTvtEkWuq}N<*l;tb%J}}+k5(LxrKh?ud|Cqg_iGTcxp_rBg7_a z=yl)emG0k@oY7w$bN*2ee*KzQQcEnT*mBv?xX%7(dbDG4<{!ELH|hT$Fwp<*e}T1$ z6};ri0m*SwRoS~O3jl|(P3uU26=xmd@^&L%pYytL=(twO7Yn_%Fr$Iv{rKz)A%Wzd zKNip+3BnF&lV@%J^%q83KduS{!eNTmcsPbge15tPK*Kx)DWv@X^W;E+dPO z%%U)303*4*24;)uQx=A_@WzOs#n>TpLZ$>RC^bPG$GLM<79s|gN))_@;$SRQ2Ymkr zr|uU3%r%cAr9`A^%2@YGZy<27S=Qul)_4IiTtCM;rk06wr$Y461rOo~- zGd!NIg&UuGdy^p%KreaxemKbMtff?8>hfmmkRNTeQ&L0h#6;4`S^jip<=G3&v=X}t zBW+C=O4p#Ys{1gJk8noApL<|M9CEFim)VSi5PiYynY0Mo=b2n4weBO za2~bWwRW+YiIqRpv;doq=dx$K~C z^_0ev**(du>c)NupJ83~@vdNO;E(XSA`!!Nj)?8c6Km<|N#k}yCYDA(8rK!%R( z)$7SBC~4fx?d+WMU<%dxi+5xqOhyKUmSX~~+t90e>`doYRP;EN3ppeL<2hjpua8m^ zkMQ!gTr{>$>Q_D3eKO_;yG%yM<3pK}n)W?{fBjr?kFbgt-STAkO6w=NBci8@y9G33 zPFD5vs_qHNLCdZF)cvR52Jd zH~779d1pc-j_s;}0uVXnBq#~fv2W{(9QJif!CWEz%wv!D-VV_tjy-m}bH_x~?(var zqZTwFV)BHLD%hjCgbR1H4q!{ceJ#~RgB-G>MF2L7Y+WX%WYB#J9{A5yKT%TBO7>_9 z(eczP@jyd{J-MlDz|Fha4ewYR2oTNfIa3#e=#JoeHi)gCivG#)EO^_Dyms8R3+$Ov z6{l2(uoac5+8Hr!CP@WaO;CFdo2gq#s)CLVLZom6^M531UY0IFWzs#B3E((sOixcX zwqYnUOb$*CY6C1erLSL@4{Y~YBF3n5KjrR9nHUJ`t^V}b@<8>KY-c8h89{uT-6^`^z%jV)J`G)p%R+c zEh3$YA>NoNHw#NJeBssi94$V;qsG406;|%W+pf|6_+Di()d=*HnLn8-wv%MfzpsjKuNm} zF^my&_M)kwD5p6IZp0KaIor<$nL#nP5aWShgM#T`$55Cy&6V^tGd_i%2eCs!#da$& zy}2N+fY9iW;pl7#rU$vk_rjpCK7>n>@{tN=LYnYuN@L$Z?Pcrw&O|uOMn=pZBpRV{ zwS1P;9AURA_G*@AdhEL1WRR|tcYD^L9ot+vb_sX)$KElxwB;9(WOk>mpSIPv{0WCI zN^j5P1!;BHi-9Ug=DEhWlad5|PU945(jTn)#JaA;!}+h(XZZU4W^+OFQ#IjET@&U(R(d$ir(E%aPz$f@kyD%+s2P8NLrD?oeTPhlbKbqMWzKcB1A* z3gnM&5V`r0hK}BJw*P47aD@aeY`<*y94h4_N$3q5qtzfWB%h`i5-I>oYugpPu;*kd z2iGGN$A+W4XOY%*IG@TUss=@2|3g@-!7#2k4JTA0m*f^{nuu@7!}QIxZ+;T)<-s*N z!85s-2jI(E5~pLKn}blzJ=&rXf5k_xZ3^=DjwwuI%;nr|XCHg(OAlOEn&e zEC_@BIFE}wuP|M(LjTfOCbM4wG#LVT}9kqUPx?$^4R6ho<||O*Tf@VxUqo3U_5d zPQ~Bn@J~oZfmE6to1kO6P`J+aF9tQ(WIMCNaag7$N*)frSu`*Gic!?$-uDVG-6YHv zY*l}pxF*2I4HXm!G^I;-$~uT38k24=R{Bw@==fMi>Kn$g1=NT}>D42|u+VuAonL!8 zfbdHyQg>mYp%sRh)!}s#^<91tdVWMSpeIy;pSROSt#wcbpn!LRBou-#=8`#)@e*7p zNomo*;$b&Rf3$Tbh2peVpTz+MtiD!@+bjMRek=@(h<(U-XIa!n_h&*y>UMr`3b>IS zt%p%>*@5)3JbvY{+&(2Sskyktvgg=#jLp}hX+fQ;kzy`4DYJ0eXRWdqk@EcJi!V>; z?e3cIv>&$DZ3&q9d;jDID_?_KPumjh55K&VU!tCW@>q|=aL(^z%riIL>LOh%&aXdz z)cW|$AKyc={Qj!|{J-lTygv38d^6b8YVPiuerp71L_2P$3Or?6=LXDHM5yT zKrRL|KnHnJSq;!KDwj3ajhq*i$GJNsD~Xdrd8L|)xxxompos{dqbq~*fIN~MO18X5 z5XCbFGZ68#vh~VAbm>`3!sK&`V<0z!x?t%;SY!^g7Ev_Nd=|<&hI(7>R2&b?#T9q= z0xY4(f597W8f(TK^ygYj9R}ecQaY(njU=Z-D0|E|F|or|anbUTTBU@$RQ2J>Hi-L; zz=))^5&!YuLnjIvP>&&*4<3JlXx;tTSE={@hr2&t!w3759f-NomTGv{J$FK*BJwug z3e{iVVq3STulEdD`y6{V)XR*QYyF0A&Qjfn3k7pFHn~>SalL}Z1b}Zd#_+6uH`!NO z_QE(5B5%g!=3ZLt)&PTX>#h$D@@lO-_oT7fN7f{g%tQ|3XOofwpwP?G@`?au5GUM; zqlkx&fFV1R_PEkLi%bF^gQ0x49;{Tekz*x+LFT9u2d}_g92Qlf+#>-idf9Ct zCAxe_bKaa98dsM|h;ns%n9E#d+4(|oP&cVQS?xY3)v}w&7!=vq!5Nv`u3JRE0q5`p zx0G!<{1L!ZXfhW{JpGgpk*~%nYw^nC6r^PIKf#Iw-V1S<`JftSEuZTS0Ss@%;wnh)%_w8r`Z>e3fO#d+6u32LkjvKFiGcJbB&}HjD8Y-R*6e%td-Y@*G;k z5`Anwx&Dkj?;?Y?Jxrchq{1n_HUC)*`xpLMpLqRu!&_7P;d{r;?)iU%0pK11evqLy1WJ{&_YpFA3jB<>ufV zc*)xOxW#f^n=E?K>kHZC&OX^Zg24ZEY(09vvM~^h;=qI?yg5u#ha*b=4vtgONO7Si zGH52XP2QunGP+r%9<)SWp^!MaM(s)9uu}B3iDdBvzO)@-0hs6Hg2qRdC25{&2xu(Q z5X2BzB>9nz6pE!O#t@dEF|x6@cEH5*&H~TVTS!=0sSBwol-gG(dttYXEtQxvk+{1` zk+JRAYI)in{66&2ulq_xsiL8HHLr0$N9(7vNlI2f_u#J91iX|Bs#Fk^R$JaB_^3M3 zM%(J-qo64>j-Z2R(ERj@}{vrVe-{4sM=P*@RRAaG&J#IhqV(sKWS)R<4+?}WH zgZs~{PuYIgy5nl!mACO*9ozodu&VY?jOLtu!+&p${~iDQUi>8)=$X-Z?>O)38$^_O zx+EF0h6^f6Q2{{{Dx0OGNMP$#<41}T{t>1^sZrjP#j#*1=g}sX-@`KdJZ6SHS|*`K+LNfUkdM&(H)wGNg555l#VJ^q||n z3D$0y^|EVweOwzuxPJ2LiQ1S&!G;9cX9sxW*k@$~tM7nj=ivi+=32iezl1e(UI>x) zDOf*dronrwp_JiO=;$BuDaC4MmvL)On(j(o|1WZI*u`gGJ|BAjXzDR`V+S_uhnR^r zVVio{i093UYrmazKCw0Ef2w>bI{b~}P0#M9?-Gg**>>rRF>FU8?P$+YJu>U)P5)6S z^kH;jDy5eSEL++g1Q6h3@pNyyUQ^RO=*DIuH&qtdpNxWD5b*|!`Dmp_^Q=|eZKA|} zW&rFlxFW)^bbgj+k;IS-a+Y37h^Jf8fJ3rdM~PsyR8(Gk^{0V>hIE42Wpo;e8jVp_ zU^9K=jD4k!Xh=hz7F+EwCdxASaU&@@9;LeEeJ+Sl^Lxpd2st^dN8&VsYwhH{e z(VLc|SSAvgK~s0vSH+-!#|~%v;P`{`vjTLyqQBDdluNnwDaGj53Gr@en8nIFkLfbt zC|P62w*J*G6xfh?^Sld)S0H_&{bpKkm(lErsK3@vIHbzFqCFe(J^a_vZ}}$2PR_@8 z+^JqO*`5OKEYZSU(l`%uId`VIq-)>MJ*JOi>cNTLuU9+gJnzS?-~ZF;e@a9B_xoS^ zEeLf&!Rn>yb>7uL2C$ugB#oGo={;yZ1}EH-Wx!I&(xJub+(cWYNMqsZhO_t4Y@crFo_M`)dUc+dkLvCY zn~~_aP@=bE)M+1!Q*MOPY5ALjk-Jgw2!nW zp7u681?NM15X1W0_IiHxMn*ARWx6Kr|9E}URps71`1w^JF`UqgqI8fYw%5W)mVufN zeNvuuTZ$OM$K|m4KeN5=F~|wo-D8QUvtu&c{S{{Emm+5HEga|qH2G%!kt(#OcczMG z2k}HS#*N-IZhO3+!cqL;;zejZd#`g#v>C1%{& zVc2Vs0E5OkEI||aaAXqXPzuwq2oAoX&C|;%HLhI1<)Y+5+k0itjZm#S*Td3B) zWzje_JmcNAH8;EOkeA)PizE8>^zVNZo&WU1ShML+FHj1F} z#fz7*T7(RUkU%I+lNXE|l^a?fD00q4fGi4Qqs4Y=e_Qn{fBYg0p9jxHUrg#VJwDiisO8XBL369t)CY%tR6A6m|KD6#izZ zx19BEbh0*@%}Y3WIUmd2w+vU^tl%9f$XFp-WA(o0$)R;y57BLuIc)jb@2EZeD;u! zF4Xv^A^9EmZwpK5}*mB>feSUM#FgUe`XQM<>P*OpmJkg8xYNMsbFh;hyn3>`ZRj7w;=LM{C zQIPaop?sVIMw3%;I|3F z8&=bxqXHtFiwM96*f3-J`xUic3q|xkq8&ey8$hg%0IMb2;ZI{%>*`AlM{<-pD*(^% z$i;4AY99EWlw^7RX?yH|Sn#0bt4YdZpK*z6@w-vUX>E2#?D(DP zo3P1Ym7pJ0k$#l-CWeoXwYR_z&?#%+<5mR0yM|P)K=0j-ohSO|ZwrOHCN!q3_Lsg< zrz-S$CD)zn59RmoEBqLK>iUK9V^zbKfMb(L@xwCz`4#y0{x^E{7wVmYlpJv@i+YUOi}MS#R0scWA) zZ~X`w*|k%>c(Wg08j-%`HIXnva|0QoRC9A8d@|6Ks~yl_59G zTE{)QPjANnfDvUP)vQ6RCITr;kV`jmhYkYSt<0I}-^4o6J9Vd{p_NXC&Vw0M8;-n$ zL#MZOmZU+q;0?NZ$u&P;H1UQ22Oi8Dy?hu_ff*#9z8sTq&9M}0k0yi$b9t>uHn!?2 zWLFB8ncUc|t}bBvZ+dcjgo!n!M}NFB6bn8^;I}@acss{yI@i=2Vo!RC=WFSjhp91~ zkmUQ)}TcG0TI5=EopAwisF?qIh6rbn0_)`gBH1iV&A*N8z!^~r1YKaG~P0)E?;_D>$ero~Je$>0zq>K#osKsgiS z!G(`_w=-HPh9ojVQxI39y>)6wczzrOCYMFAa|=09K_V;|t^4nZSGj7xH26^~*F(&- zK&~~f+fi-3K`cE8CfMs3%KikHlO(aIY4J(rCQZ@R&l(;p*!{(2_mZ}r98NjGL{x|L!9yL9}U z_i*~2U6Mx$w2)_>!QkZ~7=if3E2OcOc1<2hmHKTgj^bhH6jy%<(c9 z6pV!j#Ye^apX?g*8+kp`Z>r*?m83YL_3xvWIvD=#C_Z_&!sJWZtEC z0&Z~!AsB%gw0l&b&uDzh4F%7ujgMGi^j{w<)T(xOEEU6vZ^=R3toUH2d2xuYmQyjh zOYv@P+pT@h4?odZTl;0C9?jBv3|FPa_8yYb>lEUmH*kySmd*4v%&Ii>pU{4fsGPEG2lSyiqjNhPN|4z;s#2=L*urTarDH7pFZhkJ zJG2XnJ4fN^n7pOEj?8t^UR}SEw75^}p(RVuVTXH8SpRz_!=3x3G_!|q&_|@7UUS&_ zVON!)Y~ZRIYk?*n2+T*dIENS4r1xxd$7V!AyVk=}Z_%?H$C^QcNf6g~&U<7-h>VWs zwHlH+aX8SRE(cXYugd5~gMZ0wpc%OjwE-h?)W#>|JgGDj*axJsC~|~jjj90%xRn`_ z%hWHxAwDzy2{&1W2&U&335j45ZFuO@}t*O(y@67uUgQI&t^8uKlio;VnmzAyWdZt{0K$B;DIY&21dnm&UVAXi&?q^{d-9O9~;Tb{~5_UY(9vUEEG- z8p&*P31XcUK+Z)u| zK3*LWQ%&?A^iZbQ_$GKGf}sQsYl`giw<0RlJiG)vcd4-j%CkMd`^x|dGk|)F(*PVO z-q9_}#@Q?ad=s^e1d0dZz}dCR#6MPO)nuj0v#4g~bx36hQFBV36Ku^;^T3CnwOhqn zsI`U{F`Rzu9egm8B(DrOb~9 z?>s7e>w4Z4DHPsvn@OX(ym1dB(E0NC*<5P-Ge%9{H=CmL>M6~dD|I)Q?rjc=Hw|(+ zDqml?Ba98$qj08vKsX5Rbx*On<(0HorFQLQRB4gLJu|hmmsc}~f_MGgo&U>}EJ(Gk zRIJ&VOsxK`Bl%gx0m{t=_S?)L{e^saMmFqYgyr8L>MlvmcqD*AapRI8TnTl`Vq(E) zT#_a{2}^R{S}Ch3z)|QeK4=8QEFNC{u;5}EtR`qMi;De{NA8L`W^=@#a-WT=nid|& zCOebi*&(@p50E${X?sZ>F;t+UU4jyjvuw-yn&rdICxMsJ;wMJkpowlvn#I5^Dz~Q& zVr3N(<~)~L?s&e<;8W=W{Ib;1@qwp+<;vr8FXZyBr5pg?2-;F__gMuW>UpGI>s#-q zPelYnphgl1-YS29B^)*qgXdIR%K&}WO0;DO1vwMIQ;Kc8gi+QTvIV$gO$&{ntRMTx zT`yuZvTJrtVFlNH>l_Gr^ubo=4!IH++e-dirk<-4t1tuMrR+b%hD;8rgXz& z?TmVIScGhf?^3th#wFq=w)%u(zka)c>SSBHQWUX!%*5|AG0GLc$+0-)9H7MX%X9ix zTs7XW2>Qn*YDQ#Oi!PXIsE9W{zLYD5ZKQ5z=CWyg3vqYBu)yC63P=VXsJsKsa1{YP z>WhW(6^zN@-4~44Na+K9Hu(%=Is1JNinsF6Y~o<%$g|zyIr_1%icIZ0>er5dftsK^ z8H1LF_O#Rxvs+}$1-1C$Yvn5#0 z_b0@C<^rDTFMciaSviQ6jel9NPdrUyA2Xvar0(t_^GGnVYzRt&ttX56N`oV~PXn#0 zT?xxD*P`k(X1ya!>r|~y<%uFEur09HrGFl#8M&jvoaAJA9dHdf{bG5o;hp4I(1^>{BM0YYGhQ{uyc*l!v53ON|d8*!Um3N-FY!TKv% zAH31spxV#JjR)yZcgzinRl1zFb3c7^&aw1hfM;!)Xgi>t39>3+IkT;n99xSA2g}kz z4K6i08KWJHCb^C2-^Rl&D5Yf@V#euT@cf-?}7| z(Tk9V;L3OfZ+{$Wo-Ye^G1HVLDhOIW)uuO?JkQ1)<2)>6yJu{*x<<}4lM$wc(!W!U zXnXu??RFmdfJ^T@Y;Z6`*xn>KXRyQX2`&gclwKim5b<5G&9nR#(?^ z2-`QYGP7^&C{+`dbgaO+A)TKEdtIV=9%>r0a4bW$HEtlz+_keWDG)nq<9y`G;3_3R z^;fAWURF!&Ms~2Y0n}#r?#RooLW~8yjpNYGnU*qK({iaw^MexBgW7#9WwZ2=$vlQhzr+xE@6CI?RdvJ;sgU9ZWP%>xCEja&cl((9+5oC6h4S8I5# z*Bnte%u3V}M_7?;(j-+_FP^GN#5V^V%^U6zyJCDu;fjWgx7)C-+Jn9FMK1Zq{Ct_I z`Y)En2wT`WV&qYdWoLEHNO@;<;?B(%!5%+!a{qJB`ybEBzwm#3^y;e4DZ2fNsb}Kd zB?Q1nI0a%KlTqD_{i7e0uy;&mwv6<)<3)Vu!L3 z0ar=Z^$Qd2@dMRKWbXTGS&<0fTK(5Vquc51|ygQ~G_iM%Ry-#kqnQcd^B{uV+_-i)JC-#l?3dmY!rUS>OPBba1y zDbnPX3g2iP)d-jKxAr@wLzLdrD~ujc`b{|K6FezjXKuG*{mE_WY_R=Z60rxk2BXTTjJVs4gA(E&(^wQ%o z*s2vdBw>DD7q9X`d!@#vEunvWYZ80Mo<(cDr zsu&{1!*VAHN_V^j(-;+H??irp%*d3~ubBqRuK*PlVzfIvlCoyWM>R$bLlUJeR-w}w z!Dftfr{F*Wb4+n8X4qvbF+-iG*wquySh15?VA>+2C^^cJK?LhA?F3FWT`HsiFwd_3 z@x-% zs^>y)`fiNve9|;~okQQfC1$-#_U}vh6L3Fv%G#4-4q`Jarx$i${7t7@FBFVhJp8#Y zgB&Fszmi}z?8YR&>b&{f2X~G0VL0U8Uy<5PxcIx9Q_LslSAPz;xAgopxN21rsItfQ zKaWNK3;*n*u70-nF6>`1y=s5YzXNDypnWqdFG(o`+ZGwmSBQ8TSiGCi@${I265NFM z_T?iKrD<^yfW0t3|7%co6NsG*ySp8y?#^vB!0yNrF7)hEN6G~r>K@-|ZJ#Q*;=BT5a~aTE zy>#JM-IyzeU%5#ssTE*IUKMf&>-o%c<7;)R&SNx1$7yf-@n-}orHETv`sKHkAji<) z@k`*?=z%_xwyJO=eF{Uys-9=FgfS$+_QzYRK z2){wRrJ7}NC%ah($A-%?K2A89Jz_=p=7jg4EazI!TT_B|Rc?zfUyhmqChx5**&AAR zYdM*lCkrM3jT-eSt@@(f4aO5INQqB$EnJz=-te9QsAH}sE&d8IJpJ#yy6F)P!c}0Rn{H zH3_{MddGwwnt&)^I|(&Z>0J#~x^zKNLzOOF!3HRZ6f1gc$Mf;t_x+6V^!FX#{C~R&jgTDK;|Lmmio`iI=y9Z4_npm1c7NMLmtO@x zSApkstzY%*1pdCV1^+RiYvvb2RJ_q6vm75WmNn=!-xyx@wepXeTT9v|>ZloNa!w^CDyK?M`MNufhme0Yk^2Qf>ZCWl2l|)m&w7Nl zDc@2|t5krHZyV^uSzLy!{rDdFZiMzd=1I$mSOCI>*!^6ze$=W9dHqHOH5HY@p6eg*wSLyX;IM0Ooqwzqf`Y(P zMw6(Z8h!IZ%cxDIZ74${Zpk?w>PJkLAdgj&bxt%SrwE9W;&+At5TuJVR~dkVkY-xe zP?ZRO!%^#OU}{K9-n7SS`93T@d4|#*?oqBBql__DK+PQ;T}T$nmHc(B z)9f2azcwURn>{J=+<)xl50~wIm6gz==9VPW*e4A0)HDnC)M8yUD#xO!*S02aN`X|Y z;X-JYiR82XbF$dIq15^bU#ts$zBS(xXRTB8G_`5+mB;RH-qN*d+iQubYRVqMj%o${ zWl@b6o}O6zpJ~qjI0*h(|CQ(WcV_muN3u0B+AW&eU@#m32O}^QmU>#Rhq$H_u}|KZ z(SeGHZ+Hq8Po%KdH!lsxJ6a>xQo30N{f@z^Q&J(405GeZHFIds8OcH!$|$d|BL4pJ`k9 z`v92lZNU(0s|q#Z<3QQW43xa%-~F)2WTHdk&^^+HJZnzJUDFaUqOIt&!aa|z68z&( zi2L62QXEfZjSXz{G|$xKIODnQlr)D9WAaBk_ayGL#lh^s7ImRYvmjAvgz=*yz& zpjgiDNb=<6?1)~nJht7BK+O>Ug@1i<5VVoDAeo_oqP?c^c>}Q*>OSdx#$}&uZ{&IR zQ+xcUckc$C!P9%XgETF17YQ zeYfG)p_eJv?ss|9ZC~D-Ls?nxeqCES^9`djMo^@sKy%HC)cB)IOiQJ?#WJ2lq=|;A z7CQ9?#PO?H-0Ct&Ha;|EN{AVNV%PhC0RhJVI@ZkOj50yL@^z}bJ2AZ4Y&zywMqrU$ zfSV&dh+$?XH`FL=BSiGM{c+THU){M}ES7>;yMe6B$BHR;-mv>rQum0(U2==HbqpTL z{jJZ7N6OXH-L%bjW89%!^fX+afAtHV{X|(P0Bb`k8`lnGU+q@M#dukMDFiadHs!+% z1NUBt{UP9!v~(BY9rjg_DFwrEUnu!tA!LfH$Bv@zG-xn0|47_&x3-D$EF1DiohTUR zQgGGc16IRgVn92Qa%##NGU>7(^~xXrZRT5kX-!gOhyUIp19e)Kt}F%SxNuNldT-*iSE9hu$k;vH9>FkWrkz;LG*J>pzKxsX z4^e^(ISZL8HM0FZIFqeGFf zgP$RanI8@-amqEKIZ|P|Ce@Elx4$~)96j_-KYNDi(hIfkc4c@j84B8M>BO$)MTyuP zm3?4^dSK;D8j#$l{fxE)LL+0_0oy|&E&;!AoHHvDdyz`mqNdIvw(j{Op$^sI05VaU ze$%m8{cG~b)0?Ncz1Z`|pP-u*oV3oC2DpfxHs4(+E-<>d_A_+Av*NUSkn=mE=DL#G zaU0)q)u0i@#_h4bz>28A=AwEd4R?73fbL=T1?iRA$e-3s&y=atwbqa(F;489tbpDGTl5Jp~_zyW0R*0TZtF6%Y!c=YhWD_OEWBRR!EF&q&u zcHWi+jIr&pX+OJ37mxt?nP(Fn8{{CW0h}4kM{B#+l~6Bo8OPI5p>d|dddYIBsVjzA zwoabG6W~#)4w=Un4f(h|m&=;x7G*XQZ|}lJSpgCDZaqgLp>;RAYu=rfmU#$L95azN zI!3d$%UABnaRKJbE$Cs~#{t8oA+kg|gQMmLwYGf1bmRs3VK%@IBf2wD#|l>5$)Cl@ z`biT>(sRtmF3PVUJ8_=-f!4^?6uC=^<5hGVa-5T>dp6#z>_2coUP_aC?9gEFsoqX32oKu{+;4+s-kaPcv+S(W zZC<`6J-+ykPYPGqE=n5y7<9ZZXpjes1 z-KNFiMJ)1wDN9Ou5c!IDx~+2W$umJ}j$nc<<;#=THKBiH-_%@s-#P(LX_m)OSMSE} zKWJ)e6p(Z}e(px#1(M*dm*X7XHvyrC^E=T*7Ldx6=jm(bK?kyFU+X#LS}jriHjP~t zk87ktnS;OcuU<~e zAH+Op>v`Y9-kIxyixTb^j>kyPO0EcvQXDL^ZgZKfo?9z)bcp4A4{VvWT(m(Pa!UjR zPL#G#nDT-S*_>f6GIZsT*r-Ng_>qt$3+&0l1lDI)~+L` zPD$WxWW#F9pp@C78vhNd_A$>V zI-x&RL$)fbtc)9YRaxv~da)uJ&B9z7V!XhpM{zS}h@}kd4fZD)ZZo?k!VyBKWJIS=R2(CiIe9 zi%@sDoKHPH=j0r_?^u4COV?ixe!G8qEI&`9I0%=_d(N{W@rAnZbz9x&PFc~lFl+d? z+F=O`dCHDF$?!yuQ{pwgztj^PHaJ}i`L0>3R)uoDt{M88g8jtj_?&+d*_ZCviMXCO zK`F|Fpz|_n#T97W@}Lxy438NPqUk7H)0teFetl@+uZ^~*y{(>T=ZRULg|YDK4qc{A z?m3$&<+R%nP5+AA&!t}#W+r4`Yaw%!T$Ej`0`&g~dF`nk!_v($lfGuuy)>KUI22!^ z@suYKb5}e2G5?iXjTiZ&yo+R~%updQX}heFg7I{$OF0U^ZL0fdmf)#*itq-;Zxkn4 z+BAln&$OhH_!7Ygpn<4$Td!^=}?{Q7MV z-*SO4@R?=CPZ*29Hy;R~E zkvn-lN(bTE)z7;Z52mGJMhe^H(92V0)O`wdSLBmpFsA?`E4fTY zDXrqHpveZe8sGBk4$7|9JnviKg`m?57DLQ6yJpxZa*iPf?sS)L5;wQt`KrBpqqz&~ z?zDS$DAD?sO**Kf-&_~TRn}&o?fe&tPuuwa@r>Y~_zzdvIja9$Gf(eTLhkvEtDDF; zvebu?lC-!0<@02@?kD!Ap1w-Ny>P;tB(c)OOckAmSu+G|*x1>RbAK^sMb#G?(Ftx#9(Tb!vhT&O)J7R!R-n znN@2-ztO{dzD#s%)=nm;26>s!>v;Nq&4{vlxSDW6*>f?vP+vWG<;cce;@wH!A1sH2 z-QsJya!H+EUV^mhsz762~RagG#p zcA4+5{aIKRZf961SDZ>TCy68AR6llA*KtmbJQGy~yg!0P>xIFjm=1#@L8SnH(6*l` z)npTxnu@t+zCv19>1F*BZveD8m2g+W=jYRgu z+T+v3J;xqLHP5qDOuWwT*DG5UG_L(9>8}5))q&B3;3))?)*E+U=ti6?I}Z%hex7D%}U3 z7uw#;A9`6Fex3WR`E{Ij2iub8;>)pBWmWum;sM>#d`85eck)Gj?14&&!K43XO#B1? zAZ*?CN86(u)(=PZ|6Tz7NJ1ea7b)dnO9B;gz_cwi{I%{>o+I5XGf9)bXN%#{Fl{=U z&*z^HLEKmQk(DN{rJgHQ&#yL|tqwuV*D*6GxLkNl0+VEQXzOMLV!v>GZQb+EI;fK5 zvj3P+s?8lFAc#G``aQ0sKiUyt{h|AtYw`(crM0V*d4q)K`CAbBBTCd+nay;vUPOy9 zUzg(nz8HfDk+gNuxT3fO@mdRq2vBK3pZf#1sp0*0%)~Ns=)>TwzV0s-V#e0mOL!si zkl7@8SFU*-2g04b)>kQAg?pLO^OyX>m`_}>54i{{C2t9(yY&(C<~pZu`_$ZQ*9x;Q zJK@m{sNi>#KS_F}ft|jfdz|(znM)wzoS9l|jlJREu?LT<+rJfNa3z}#WbYTiI9^Q% z=<{Mlg`^1X0f&m}mSv6DJMyA_8s4wgE`2#?o)kv`Bu`26I`2<_0GdHfA%Q}v?2{+7 zTMc%!ZPmx|i73ph>36*7lc=D#+s_+UWF0CS^aL+T*7m$oD#1o*YMDq4UoaQ-)x?{i zx|evA7_@pUfGor(c|}5nn9BsSq(W@1KfSHq@C<`;ntxBMPEO_8eXPM}4a$ zxbW8F>CCY^QK{;3bkSedktg{tP? zE$xX$p481jVef}4sCY`S0r}qMG38bC<-n}32mNOc|9tr3ZLHvnhqwN#&gDPzo_~A) z7yobE-{jrnPH8f(Out9@hKDSos&rutp?=1rGLQ5EPEKCY-*~9)7b(0P-zyeLojfd? zMUxSMN4txDFHh!&x@>&2KJq@ADBc;6#~tZT$c(G#x;GMZU%))2Gi!|Q#1xha6!>th!fb(0UEQ zMmdYQ4@C|gqwQ;>II2g4e$>^39vC;ZH+-r~sB8?Q-v2c%_jG5rTzCyup=0r&S;Ede zpxi+q=9);=HE@^dq5aKq|}%UHu2w8g=~ zPH1JtB0KVe^8({VUOOYnTvPs(jm;chqk-an#%Sh8In}S}SA>58E4#DG4G}~1p zT+8gzF@SbIJYOsj?;Kr!C#N|=#Ig9V_kW;j5>%OYA~apZdq+WIVhOnH&mm!ezqNLn zABiNamwwv08DRfuG#32@SGPxN9o=mCrG9fV^h!*z?1Dl?fqUfo=bGZx za!%97iVDTp^MpMs5L(FDWhD9bmZMcUqVH96bOO5I&_`Cn209tU`bAR)GYA(bdb?{) z;bj_+lURdKGs2p;u!D5jg3j4glgL|wpMDkVA5&KK>27ry3DFVj7EJx1^yXAu4s){J z!%R5rSj^fjfMSzgU;9$AsLisvYI*bIe%H;nd*1)gQaB*q-OU zoZja8rF8gh(-Udh(!D*EhuN(vu9Y(^$nif)n@>~unubhe_wV}v%wEP(f1E9QDn@G? zPTacxfdd{cg4RskRBJGPc~){at?Gb?-$I7rWzGiAlht|#EFXPNR~hiP~sg5GfE-FA+xc9 zin>AgDk2zLp$>HfiN-wxJXv)=+FJk4+UO%DA>ESD&fPGQFS77h%#k3GXGNnfD?OX8 ztxOD%PNIJtkn(AOm82wx8{WAZ&}9|CFDszOnJ%2^{rAcA?G}3}Bsaj@6@(xypbA?s zJu*c1sq615DO8URtE{!L#Tk`{1YuqPDty_k?xKw(g=LYCyogbwVWWn!jNDar2d|>7=xprM+&XBbRAHxl1Lp|GY|0He z)E@4n@Ezacdf!2HnQsh>A6YOc0zt#nb8^d!w=C$L=5%H+Sv6=XC=wOLi+HU#2Nnm3 zl!ysI?;?J8eSc`Y^X1b#h}$&wjWZX)9$A}tcSNp92*h@0UV)zS=?s8c37Bobht$ z@CU&=ybdLm#KluqU}v>#WBF^MJ{})gj4Bk;7VN+I)k zJ{w{4>)=jSzUbGT_a{4of$a0oZwIYyKfNW8J-*&s20uz1O)s)<-&z*FAj<>&@!*#8 z%eKeF*B6gtb-WCD4Uo*kYU*O_F7@P`V?+b;7m#U?lrxOMK$-a0nwpiGey?27_!=h& zw_<1tX)@y|0I&8TOie?Lxo5K}uz} zqJFf1EO7dfo{Y83^vC7(!T0U+kC7NmZi(g30RNZ?!oG0TEgj!;Zyg%I~((w$sb>J8$R}B)sA;HJWMaf z=&ykf5@OdTdoKE-i@dAPu!8elBp3;Nx(2Y;(9zPR#Am4ovJtneu6y`Mlx(7+tHWNdt1CQ_@%x%Up_MRBnU;%>Y>6i?z#q^t^aU7ygw`&!fQbpOX z<@G}}&q6Rkm}t?!fvz%Y_neEyla$M@bQDVlJ~$#F##I_5I+Hc1v^=jl48J->(<0}4 zu=iwRxYK>JY|-(iphS5nmbor&Mzw`(KFGT#wLGAQJ9n0_4@qemyM!@WF|c4dxA9l% zO=dT9LjI_2Rtx5?^fS`8hb#l~PPy#R6}A$kR(T8fp3)mU<5@1BbE+wC8is;o z%8RtlZ?3EH1KScI$~OVdQv>F;nHsz>$5HQ_PA}j}5>r`nb?)?P0fkA#$z3oX58tWR z+by}_tACzD#k!Ap*f`h=4p^jJ(P>dv^6a4X3}qDV&nH-HEy$Ih_Ha9b9ytnfP6@sCic<)%C4@ z)Sd-%6thF?G~)mBuit-2&;sRrfsaERC{LX1J;_VQ#`BUU;3#!@(`&ZzbQ)`pgMkIi znP5>s095dPveZ>5SLv(QrNc&nFJQk02u`M+Xtnweb$&i+C|syH9Hn1Z;xp?&S2-t< zi|?NVi&G3%e%vIKGUe~fJ%Ivq&=ekqwAE_}m*s6`L2^R|mh=}!pG)98S*|XH6XyG9 z7*sAj7vbP?H6NH}|4{1diBkUwYw)>9>)O*t=GUC}5ypb#8TZ1$ke)B5X?`gUYwpOX z2br*I(Rwg2^eHnvuTBM|-B#zE3)kjxm;TiJrOLW#ymGrM(Dp|nh=vD1I-QS}6OM|- zh?}FP#UFM%Sl4{}CfAJ2o_sT~J%DX~%0HKRGeM?1p6jk72j-2zBX6g+0LE$PsJ6M^DH4P8=gETVM-3l(ouPRbSg zAz}}y`xHt4A$M?-QZvWAePCg?jZ!8Pi3L2>vw9n;IvG#(H_{+ z<`jaOTKY(?XV9A0p`D*11&`V02(8RV-3+I3X+5|QLA=r1fOV58 z)TDL!PQp2rk>`#oso(8?mDDZ-eyQy_wrQCxLV8W&`52|n-QoCcSIe=6m4Z0W(Z1*= znPORcKSD9k=AIS4DBk7!l zgC96a{#mXbO`E@PjjSjK#|0@&JR26aG^BDnY8nKPrOUZ~Lr1DQ=deVEbo;XWvTMN)Nu{>D$q)%av_|<>_Qd! z&B~t<;;`VQ%|1CUdY@FW+K&zAM}5-3Jg*Q89h}u}$A;v8Q@G;|gqVKo=~=ktyyE?U zzsd#s((F1blpAYX%AHudWGEZNA9TN8q8{3RI_+{>7ninv@cNS=1);WObdt(hx27m! zy9} z$7_>bmPl&bD8BZ~`(HT!zn=j9h5x$!P1P{&l%9K)Rs$O$+z{aQu1P7yKH!U0YUTDZ zqhr~?#2T(fqT8_V>p2Cey9^eM5(OGr-9p6#F|gi^Cc|3JV}`l}yWEt0X+d6YdA&9` zm4C5}w5{ETb*M}=>%B_iPGbiX{$}m@#PEkGL_g&sMh@gAayJy_S6bW;MdRiSGKY zO)9EqI*~JEh5tULxh8$Qwt)L~(Jkj>Y6O~sd~$DsNsvn+o%M`B80kwpIn{2ZiiZ9^ zFJhX3Ut0U=JDR0f5a_vP^W8N1mQ*IYH1y!Ic8}~#g!Ypd-;eD&Wx`jU2m9QQt1!F) zO+9^-Xf_Z;=nBOgup@9TLI6uuuji~@^tb26zv&BXIn-8W4kLcclUEKrlQ$Eddn7K! zV^4fo%3(n0A{KW}X)G{5+vf+t7iW4Mz*U1vsqT7c`S>wPDIggPtZh7#6`h!0nl3CZ z49qesb{z`S7?*QQtmGpZ318yOpF>$|x$Gpf846w6#NZ%NGDxd9T+eb}uSZEJ7h$<1 z<3ab!B?1UuOoF*JY&axuCoX_G7!*z)A~?EV@JrJg*9V%EYDmdz__Qj_pR3yRy_pe# znid9XU9)IIms!(yyLJX?!Wk!PH2JN1f&%Q!B68`d5zd>kl9kqW-^p5^eR4^q!b_?U zWer^C))()sk|2V#s0UnxAM?<;@KWE*b3q_SHfA@rr+x}P7*}nxi(5f&DRV`#GgN|O@yhe2pKQ^XWy%T8w3BqKTuh2ciZj_ms$J0%CeUA z58&cNHu7#|8EbXDQ~)5p<4}9EJf^SgO$T;PgUROfJd*E|mp{aQMn$zfOdfT#Jj(1D zQi-nGr3z}fn8zI_Kw;?YfWVz6g9x}bn06LYn30&wGtWEG-VUkccK|xqQ6KNVb?6B- z|JwbxihSw~Twgilm3>s)DwL@>2?pu4(ux~OK9)oaP{u-2O4i4AyjL{Z1Xnl1TGI7y z;Zk4t2!~~%jlC-Jo3cLioq}72;&hErIp;=I?ORAPf^N3yh%RhPE>ZscO z;l!i&FnM7_6*vaFRF58ruBA~+m=`|y1=X@%k7VJL;6xz^Qgc;f10k+PT<+SO%g~o( z*xP9&HGrHFXAxZnC~6pyA742UyYDx87$EmScJN}EHOQWm5(2?SNpa7Ga8u;+)tYPJ zP#3Hn4kJZ)g{?T{7QW#z-%T>rkTz6waa+z6fwKbdg>~ksbxH#Tv<-n~O0b=1wo;&y zU+Gme$5YaJexA+aL|@GjE8#vYJa5UKDO6Ss;>AR*d&1^%w&_-5_dL)si~Gz8b8WMM z%B^P*DQ0KcvXUe^%2ZhxaSo}^>r1ypDiTet>_(s6$xNh#xn~WZ$@HZizCR1Ua{Bi7 zp~Rw2NQvIk+#%pom%dl`D|Qb{?BO6pvKQ;CFaD>~|5G>IKkL78+0Or2|Jrrj6Xzb- zXyou93|KLcWv{I0&SD7(B{`+PqN3vQr4s-+zf4V|~c3)TX; zwC&n2>oQmxyabppuhndJOBVqE)?=q0vEuUrs47)C=b^ zOvGn7$=#A|3@vmSwFAog*tnr>ob^?OhlPn!5$1_~l1Gwj z&&hY3MW?#zy1T2Q_`kbz#w=C129)AS!nanOOT$50Kc7cGFH2L}yYNMyXU+8S`j=lX z?rIoV4kyl_#TPl_R(9=%n1WigM(Yn<>R+2#sl&J^o#03N@dU$S^pkXV|#RI{mq&74B zj%mWDhy`WEnT499;Zo(cm<0jOL8r>{-ImlLp&C3=nfB28W4EaZ1vygx7Dba$rPF!n z2*CULKHr_7tXrbeP}EHBfB|GlGBrOU_AYHVO}_AYhK3pPddayTf+R1I5%{6KMTb}b z_6ZdKip?PnlNV-PPIM^uhkjHj9 z=GKb@_P+Bm=8m$>&zHo|2e7|q*2k=KVo-&ym}^-_n_bh(%djt&+ybP?$j_dsRofTZ zd8~>?{Z;H9THpREap8JtavQBg&dcj^e0?N`VZ@E%*q^t9KEd)doZnxm{!qH8xAyQH z3$3+x3lYrY=p`M-vt7Bba|&1{%`F6U;0Oo!s_{~+u?n*}hWqemX z-*w-7rcjo9t4GA-PHmEVg-sT5!r*2R9I-K&bYGDVx?h6kv>|la!{vKZ{FIqH) zKKDc#U77PZ*HB&f{3psc|6}@g*9Bj#`Zqw7v<04Kz!(Q!%|Vhb`p!}BcH3Uj&&OA>Tbm)`L(O9_dym5Ta+&5 z)V8nBy`*bSSR0|xvJPPQ1UHeWG2iy3dtvW{8b1+NOvmwzy<^?E=W*4mZ_25t<2Lr3 zzloo;CncJ12%ywvejmsG*5VUMbc#aFQ)pz$eukQy@AeKRLm3w-32KuE|1RT;H zix^yN5PR2}5wBOPoM{{e)>2;1#XBeanbpVUuPTV0!+U?|7fo$H+;X%*8VY@`;iCLU z<;y=A&_C)w0H?pZEi^!fydCki&~68)ek>0~4>^>UfIS6bn7}g2|;2Ltx$x`ACN}1A**#J5PhRq*L z$@b&`x&-ALkEnFkL>IFu`{~Dq7^&dIbNSZNBGOIk_@s*kDnQumV1+9aSCu1RY&{B- z3MBFvDH(_p&2z~Je0g$&ZgZV6&{Cw8ES;q9-uBCdd+yxGIkTGw2t@4t@sKAzje+CZ z1tv49r#W;3@3)VCyIw;CW%DFvv7}ZzKc)&9>f81XOmFTgee6lmK23gFd3^@0BswiT zSeYzC7pUzC)6E&3$CvsyIL zo>tycZLnn+DI!vlvr?D+(<9}f_=@jHeVK_`QpwYFtH7>^FYXq_5qS?zWuB2Q>J)at zgtb6Yy|VQl73LQK0ALP7jzQBIkV83VD=jN1_{cUW$RjG5LHBbN;vlj(Gm@bipb>%; z`O{Yj14iGLa6gHz~sew?-&6x1tp4`HI7swULxm%x!}!->CtK{K7*K zV8k3BfQM6BL#(cmGVuJh?Y{UtFT*Tv4$slBW-y%tWS|Lpa)yG3;CC&W<)pg?&-dZZvxe0e-am12HLE$l9_qpnVn=IhcbjiI{0Let42VLG%*S^ zN0(neU;T@atpQ9s&_6T0xv;|h({-G8`^ug!*NbkF<1I|wmsjC?_X?_?I;NEc*!`%h z>VTgtrq$*CMS~b?QeK@a>;LG5T3+jfTaeN6M(6D`~f?xTq#6%YztxG6hmI7mSi$Sph&G) zNV|??;|VD0^E8(}xUe5k8gE(z1EwQODY;-^O^@?gJQ)F?6QJ&(V^+?+>3R*_-{c*= z3>%ZTPAP&`e?0it&)Ev{ zL2XZ--M``xC$@06`>^4JKU^i`%4k2R92Q``jyEf?1UOLEpDyn|$W7FkXEMx_kmPuM z;z|?-QuS34nHB2WyWq3As$UkfrPS&RlRFbSj{e!_Zu#qX34M$b0FGSkJsn>oyfQw8B^xZ%eYqZ<$G5lZaW_|e6u67=$$tv*E#hy>^W=8t87*W+8#V189?`w-56quP`iO)3pR~+0JT1lWt>+sIPz(~HtAlZ+QJdK9xDp-C}>H#ny+kv;n7GU=y z2f&vw{o4+u(Qc*TKO!nN=BckP+6QhY}^DDwThLdUri7tE?ikyW}8ai z)f15JnKFN~-Sm{5$EW)ZPG5|S8cOos#65~TpH7(e=y!l;7CaojaK5{%6i$Vc&`O4YE^tQ@e z#hN7ektqwm6&f&3i6WCH32-`mj6@)DD&->UspB4%8!PJjWwjs2^zk4LFi-gyy4n!y zEI$4dDy@w=jzs~~34~NY^&o&xt$n_>fx+O$AQYJ)6t)7}My8zslAGk#pd;WEYjfCF z40IA0tYx}vd3%TDnqXava|zI#&gZ4Q{}qY6{R8a3Q@RkCqxGZLu&J!uyQvu$Q@rb~ zRmAKk=l|6a7;!C;Ce3G*S}sFfA^Jk_ps}~X>eCRXGG%hnU41@BD zf{2d!0v&+?<-qp^1aZ8nci-;LYj2m{k;3pYd&bbB_eY8-6k&CJg`j(nNTx^^;(N%2_=83m>9{Yg`$w!vc<*h z__~p+$N8DjT7KupggrYyVkrK$^b0%3N~=@4HLdO8?U;4$Xk=7hUT!Xx!8tTYbY?wR zCx#C6hrT9(A5>3?{$SL3rbr{aH4`5?#r8^%muSGG`k3MxsoWqf96^qVkp*Rgi5j-?|&*!kH`#flY$KjlF4MhUopRjibdbsbi zaT|wir)Rb_iI*pWH{0y4q1_5SjjNHD2AnBfqE3asC+oJHK96#I4`{#EH=9{If7nl& zfUn=_FDd%3RsP?0JOAtbAN3!o{C4MX`t3=`&N+22IvCq!3wmc1@1g6x*mD-}%t5tEPIj0`(lonm753}OQ9k|m}3fboGMRX?i-PmfX*#mwKMr7=Qx8-Q@E2LI|OzywsaF<>05F0C^rs92CA zPeghGutNOmt&nwb@u1wQmqudb`D}GMPbw$}-OdZ26$ODzVj5Pf$N=dv%4!WJR|)P_ z!kXkcB^v}q(Z7Qlp-D-Z_<%q0(heO|XExjjujC-3p9SaSs)T(K%uw3<$d zfXZ5n2@-gDvsWjW3!P>FC;*V9epU<|2D};DK30g{;tS11M-h|fJR(`UY01L;T4eVT z5ID>3HLsN`X)&{to3GG+7)Y81)Cw~Y3HEERyvqcE*1lhL{`8`sUYfQjVq+!c8CE~o z1s)N-=l9?WjbsiCp0$j@_h|vLG{6A*RIijN)_E*R8#v8$_?%c}OX8Pdo0-mLWfVxx z4W7|F$l7!~ichJB1N7!&;cEzj(+PJ`oRsBK$D#<|g zyjeBgR%4Tcxdc7MXZUp?)irJy)~xXVvKc?g9Lj7qL^ zMBU#%7WN6ZjqUpSn%&5ES8Rtel{~=N#X1yW*QWb5OW4)yzrVw=_1D*z4t@e^nDtDG9 zq!HoQ6;H^LYDO6H^*sFl;{@gdkv>%4vGvKwUT3F+8v;Os^Yig{O!v z*te0sLP6Sbr?`NfGSlZLV>%vxPa={*y=Le`d}obA)8`aGo-YlPOhKQ`Sr9YWc5Jj^ z-qZ>q?aEeLl~99quP%DH2#4gfq$N6bh<{NxQa2M#O!QzW zA4;)h>o3!V>6~i) zfbA(@F8^Qd!v)E>o0Ry`OFrkK%JbCOFX>L}U<;t{R*P(Q=rKVpBuA1$k}y8ab!AKn z3C*?}*O5Q0Bg&G*HR~<-=gXvkHT)hNKxh%Cx00VhdWK)9Y-p}0X{6*5h>*+`Z5Fhw zBzy96y6f|bLxcK!B7oO{r9QQq(&R=XPPyXYu`t<|yVMQPJO~iZ0wUm!?N!Fvc}j@# z5YVHJ2&IRFtJZ;W{%T9rz65j;Y=AiFO_ zZlMh@gr1rKR9Aue^9QE%;pL&Z$D&028dL#0&m2n1;mqlDJ}YNlkc*5%%cLQ2!1{@) ztf4Z=PEp5w1aY}3KjmsGYVG3PV_vE*Zpbbc&6;G5$K|sx9aLwqJxv4#^X@N3QJI6$amcurRq+z}a2y8V({wYXU zG&#h{(r2A)0#bDG&j{_T2wu%KQpwoE{~`5o`Ob-3VQ1s3BL(c4COqXFA#++=GR{lc zlW7%k)X_}5h2m)Bsh@7)nyZ~$53$j^7aOG__Zo$Vb>iC{iti%(IZT`nKYaE|l4weB z*5$%AGGC5w9h0y*_Wx1#o>5J%Z`ODcLI@#X0s%q@n1l`iLg)yZK?P73M7U`f~qJ#6TVd*4#VF z;}~ueu;28|eAgw19TY(BoWE~TlC2CJLY?9<{_1Q5)SOpUb(%+X_(u9uj_FNb+64&^ zUF}DfzCOvnu9P3!?p;VnjL{jhA~swkN*YC0mSHP|9Ru660i>J&1mrQTXIkG8%CBvT zuD_cBP38ufNLF+*sr4T`1fYAZsUCV;$_vAoW@9QXlFTn3pa@DF}#2 zmMw1cowCrLyYynG^TWn77g3+u@v`bAMV*=TH?#BC^HWso6cVb`(iUH5Ot>hVXJlWD z>oR|Cv8wTR&+iKCsn^w@)xw^uS6-jLQg2=q^mVlQDCBOp>7!uxXLn^vU0}cAiZ77A zC;!SetE+Q0x>_R7Sy`9$-yDd4$3H+}Z1=#@OJ=HF&vpBF{%ZO&0|Jj@dQH}SYWC+a zDuF!@Hg#-$h2rp(^L@bmt}FSkxI8Tt)$_kZph@})H6b%nU38P@CKNN@oX?GVnlyh5 zo2C05${C#L-AOX!lr+Ji^{x71i{eQA^OXTOeHk(88|J(mL)(Kb0+_jL!p0Nu&PkzJ zhZM}?jQeW*d3(dgC~IH+NuiqpvFYxhGCm<%ZHvk7AQGG*<>0_s4d*t`>2r1t%n#6+ z_}sf()&3^NnXuy>oIxI~as$dve#(n{mQ6NG6$mrtI%o0&h{hew{z8~! zfMqq-hSQOjq0ET-q*Xesf=+>zOW0z0bv!wkpaaj3lWVyNXm`uykdNV9aWVCiTLsO1 zD$%K!&MouPM5#RsfjeHda;g7HE@= z!qTM0p|j%aT)h*>B(C_ag3TqhE@|hnNWlpKe5@7Ngm+8+>`>)u&QTGXRvPy_x)?2g zpO?x|`)MD(1mtDK+L6HTF1=PiViOEd=3$0mI}ga;RdP|Z9nY~bp?Hde!h(9Pim*kL zMLNpu1&`Iq^Am2HE+(d)0oS^-IJpmiQWFF=bgOkGg@bP6fy00uuaK>)YC9aW!iUys zxZfLpmd|S|{8Axs(|OPx7QLNv4Rdv4vPpMs{8g8MN9rRH-?O$KzV3R3QML+O@!tj z`iaEP9;JXt?f#a0c2AkntC|c)Dsc2yW=gJ0c|BsM!Q0MTt?9Nv*^ZT=oq5$g8T&CG z3wO#QVirGdub7JtiFmlraW!&Xxre%Kx@F^&oO!OI z|DOHawkF+St*LOc_9@1zwSTd{?eydj$Sl*}@V>IfjUKGMT+Exi@|Lq?Q~p2Wn~Vwb zGnaQi+MMsZ-CXYs`dV?N?-rQ8t>P4yt@v7L8I=2fg3|&FxMP(ev^0LE<;~r9s^etot3fgx9Y5@_p z$i`)rRj|qf@^muF`=*qUu+hiqr1*L_d^s+zRQ8maXVL{0>$7qWY(tk<+b$tvD;9XX zqa?d9b#iW76oKT^8IA`Lqe1eOrng2kT*1}}v&d06_Z74_pQA- zxH*|vFd0JiQZzd@+d|F4k+bnqF5Ix8Q%s5|=sK55JIr2mv^qB>JOJ>fzDI|?h7_{- zQNQ*3nRljfh9X60t6qe~eJ;ob?j-CTB|IbbM z-|-K;uz0Y0e^YUB@Ci2V3F9Le0F0@`0Dwsvn7~J1|z>K5&V1C z*yJ;Xvf7f)NWfYI zeTjA!s8R1B4%w~@(-QeWU^OM|Mx(zFnliWmPHlUZOpcr3LqlZ|Lh_u@A?_GZ+;N_D z$ni27y;&Yv-cnDH$*UWMV0_3=qt9^}WnIdFEf+qf{z_8Yh0(wD1;c(?Bm|VUE!EG8 zU$xA5NryD=54jX~0q&!tCL97YehnC;eTCe5Q4z+t64c?x-J=Te0(Ar!N?WgnG3DH) zWxLORTLD;odj3V`@kxpN)<<8e=cma#I?eDd<-*T5|6o0UR{SPuzEg5IhN3G8xP@k~ z+*gmP7D(}qefmARl1tjcn}u^A^@f;fci1MW#B_joM9Ql(anf?<11cC&$-}Nlpk=_VvH$bLzh*jF3MGT39bUmNTW-B{Nl`*V~++ETrsBg?>F{^H(@~*r;teMO)TohYTP`uGG0tRD3 z@F3lC?nbNFqukLsFaE`IK8;bBec7t=29h)l-zk8TK$`T(ZXAF+b(r26E1b4WkG}9& zvt(R6o2V}p=Ee^s_&fDhTjExeQXbg+%@2M&oprrZ-AQB>}aI304iCWPJD#Vf0i(p zhil1pyRTZ%&P`JMZ#1F*)c=FBgDqZX$vPsg~MP5yu9fOE>JtN zL};W+MvhfO%ejSM-s1F8A+myCI}Oan7yyzvxrMRJ$i8_roRN44VB=|{*uE6v`Oo~4 zDlCrzsC+#8@jk#Q6PX)$`Jr8)vg&IYx-Wxe9z7ncFb2A$emg3_y_5lZ8jGz;}R#9I8eXs4X}IgTGcgYPWU+EQK@5 znBqji3ZgghHd58u4KNOKQ;qBow}L7Dl@o@P>9$%SKr%r^m{zvm2HJ znVr@4@urug5M%Zo+*J8HT=s6wA~uY?Cl|(SN$q>5S>0D0)o3$C-pQ`XiPv_RO>Q0C z)h4)z3_gA1Bp?}ti*rO3`AWlYT;-s1IIhxVM*$M8T$upJ{%V^{1z~$V=RbCH>-S`6 zG#A-5XTMC7`}se}xtsD4FV%in``8o*h5Dw$uPp>kCY@agPGhQ!^F0vKRU1VNDKB5h z;|j}ncB$8j8gr+UTPn6C#JtCwK1v4O>2TQEXyNosDD{prLxDTqmMhJ@2(YmBVQ^h(-i7&FMVWBy=SQ5nJHW!b=z>$u4?{Y*w8=tzwmB%jrR@r(9>i8hsWz75IAro~aS{I`N$N-+G*8d4nQYFN`&Ip0(T!iKgKa3ytJEEz@JXg7_c~UP)tzT&E$y%~y zA5B=&Gd|`COujR>9fy~snt}o%RubEIaRfkLcC?y@zWm5&GDty?2uu`i@3fqT&~4Ir zxXA)GJ}X8!X%^q8pNZYqWPe5$#5^m|#z|4r9Iu^|qKV9U1OyHg#JD&6+Pmd(#MhyU z8sd2?$^#pZ&4!b7SqjB_Os5kJPncC65w{RLd1RK~*ppxFQ*x zl1cBXGz}>pdST=9I&w`rH=^-^Su$tn%bh%Sps7*i3Hf}s9qN4#Tqi9^)83KYHvNaO zS@rN-aidxVOlx-_ZN=Ur@6SvVpLcq$4|%v_{&*<(Lc(2WRrn^VV((xHV>^Y)ZbFM> z^e9z)Z@$<7XH_(m$;7{k5S4IK42W)aRG)FOg%-V(3VL~Fy(ZyhL6i(qRg^!o22kio zhkc)gr(;XDm!`GU#-|;yBi3j440OnGJmLNni_Y7?XCe{>E)ph8Y6KK{rXKC>9Xm+3 zT}$SvRh4BYa@&=yozS_%`NqMERNi}>c2q8Uw%t`lgb&BhzJ|hW%=lRtDp3|-ab{E{ z;(WCvBB(H~8YZqTM_fak)#+#U%c!Zkm>ibxp&AhH=T<1}dxquJ7*mZ~G4Hm3C;Dfw ze-QKEJ2s}7Few@866ob}hzwiy9fV6c`IN|DYb?k)OG>UQ63F)L!??-^mV8h}oNW$R zisCVaGHu3FfIY&deF-vK=I~q<@)Yekyu_r*_I=X`P`BVhS^*Dl%vqU~jl7AdL>skg zrGA%)FX62kxR);~G|v&;s)f5%;J8gz^TF8cb@Wz~{$XdMN?8x_X0??%I_rYD*uqZy zMaBHIR=L>aE*JkJQzKvJ&7!CFk14_d-BfcM&cIae14rriyR@>O*Z;ftuYdgiIU2im z*A(A!Oik#y^gZ#=0`u}>KNy`wEg#zRf&l4kz)0D30?7n`m`qW{`45+9QB&7r)l7Ov zwANzN*mcP5se>idJe+a4`p-a3&uPGNKwB$bSxpIdg2!L=MUfU)51V>d<`_KDxDp|A~-fSrl%U?I=VLCbP!>#IZS_BhO@eA{!$9(ZGFQ~E6 zyKbI`4nrjl!cH^?SIB-emr zMun9Vx;)V>iuu!Or5ekk91;Gxm;Z$2KPe8F(+~-58GZ2;vj0d+kkyI~7KH1ZFfjK# z%ZufeD;fRM>W25TmZ?(Wq*1@D6oyB54~WO(yvd6#(sx!r#l5c*W*%$QGZ#Y57bRSI{VS_c_rX;re#v>oIQ7)UKE^ zhS3&glwBc5LB=pQc+@ekhj@Ym4g_&vj=JNxa@o!Bvud-}{)IP+VEP5agV{q8!|?Nl zH8C1$pLHZQ#AdCVNeLq00J~ua&%e!c`wOaGozKh)t#qzv(uE+-FOJh4#|&l)N@51{ zRmhgYquN0rzR|g2)t_s`MDJcfY%IblTluTSlDO%j$$(pjhlIC$roZv)5r|m8%&E0n z?D0CwOZ(R+8hyLRT#})S1K&TWwWo55J!lcJo#&IXu_NNcO1QX3Jh@+;ci7WmE@l(F z6}Er)!^d)-z`9~*#tId}Z7vAn*wx$@d)puiruc@(K3)$y_QXd74EHaYPDxGl9?7#1Z=9{MJYp^1 zv)HmAeE$7z-$;DXPUv7z$!MyLV~EAq?2_>KqJic9~g06tw3G z5%owvS%hVYYJ$?@wJLfoG$?~vtd5KIVoB^Kp$u8Groy=UT!N0JG=8D4j`0%&7$?4p z{+qzY$nD%Xydl6lP@>HPQJ`Qa{k@DP9k4OrU9}LDl*LwDymjCB%c4`xM49D+Uj3lX z%3}XtN~(HcSn%ibHo{SY=LW_%<*r?BmRUr-#i)b6DQ2P;#l#*=pJPdJ*dv{lzCe$38_>b2h7! zM;sfuDn2}xu_t3Au8g^*Xnkpb(> z8jGo}4<d9yJ-c^Ngub^R_n-*2`*r!D$ zf!FIJi6`)5V+ZO3TP(0F*#`d{q`Y5kX=f|O-FXM=-r<{gq3n4WWBYr8(Ar|I4gjUy z)$i2eXu38zZ>BMf{22V0ck!NM@CSi7fO~LO-Dm)Bm8G@ua(Fi&X!6N0%eKE7mv=;Z z>_iCp?T}a%@H9KL<16jMBTiC^jn*vMuJub~PBrEx1|FQb;~TuS`ocYr&iF7!uzB-} z%Jm|?N}=>c#`AuBlDFi)!u|hTd;jm}KlpES+uu;!{u0-#SHd~vg9d^5MiQR}^$?KR zW*61a9F#=uTejN<0Eyy{Ny(BJdSRScL1&^MR^Qbd3`v(pJ7&f+K$13WCI{Jn0LVl4 z1{4%{FzD;C6`VLFd1^L@oAe&7|GdhIup*Ok#5zHc;GkL9Oek5;EVy~fYs{+W;$@+< zPjc4|t!DbL`Joa8fwb}-Zi<7sTz4bE=u{f-H}CBZckY8(EaWBg{zz7X-i>pfg2V_$ z)#q{VgKq!JfkA-Aq$owVcfgYBWEeNv6Z9udbRgF;DyFhgvNo&G;&fVgw4M~z!JS_I zN?NjgU(s+$uC=`g_W&C3IRG3!s)3UDqaUL)&{lgHOWZ)l@K{vn6>7Jdp_q_H)I9grm-fHXuf@yj4H=u44VO z%fe9Fuh?|S_0Cc>bh9#2^W;vZlI~1SKx?+kRghQIfLXlT6%5Y7r@gR%G7Qkf;N=6e z(UR`K3!%wHt)8i=1{JWwezu1H@3EHy7uuwI%Ta^EvF8buKB_+jqP8x0%8^IGOA3_xQD~T+f20Wec9y&? z@yBI~3)t+73;vw<>H#*PE7FH&?WWMitMeuv2dC_$<(9q&-O0vj$Cc!j?)4=rJI*IK zOU9=PT*xdUm7XhFq+ zI}3bCn7M;C`QlVtd1}^ZCGJ=cQIi-Yg7=mtSe|xFAIJo_3+HoYe$i;I>}STjK{j7y zGwWtfs0AHgXti)4n9wf%dx&=n7=y^*BiN;*!hjU1^ZreeCEC}) z5%khbeIr>rn-t0noZstiveRBpie))h z&6NEM?8!z%W_R_YLd0YB z3;FeBLGu7ZOOk*EP0Z;nB8C765lw?2fMm(Z-Ll@%4o_AMQx18-4E$UcR~*pHDBo06 zHReax&VGzjZt1J$53iQb>vk}PtvVmwsq$1;vxZ(cV}4N{r{A0Nq)`T)IC|KHPDVW@ zsj6fsg9Xj1(FtHph5^MS1nHP*uV~Nd676G1&)*knu#G!PH)u2eO8|3~fsy z8R4G3#lK$M6Nu_Ow@_QO?dx~&&|H9OPLTIsn9-+&YecKV!L&jbRk-t+U5b0}5 zX{P_0zc32FkzEg|kLPZ7{-i^3onQ@9PI6P~Q2?zZTO7?=*{QssHu;Aerz@P)ua2Lh zAxHVm+xzWerj6~ekX|(vlFv^^`f-Qug}x_rLfRA490SHV6P0^UeOI1)kw+Yit_H*F zsXK{FLXI?ZUppIU2H^alin2lLnBB!OY5cA1RRLonjv5BaaozgE$^)1JjGiD6xS3Oi zM)v!5%w-e1ECQJ$iB|5jaAo2zw$zfJNtTdPXr}fz2U$per{Pw;pTeJN=jsa;ed?b% z7u_yHSthG};4=GZGhZ%o-S9de85&|09sc5A zuPuHApw|xW7nb_sXr9iG#VnB0d0y7_$iHe*@lguATxS+TmbrhmqvUjFtI^|f7s3K9wPV7rnx}02ch@ZDSjwKYd@-0EH zRG%ub!v(SHOi>a5fNi@OFxV;it|i zaESKZST6hfa(Dx|NP2-}7)ln=EZ6@k4@l4rv1S)wLd}O0l&^={N@-lhZUJ#=cF*PL z@-)Di=pjjVB6#PPaBNnMeqY^Zrh-aiv*wzk#sdzy@KfYHDV-$yAIbLYzH0dP@P}{HJfBapYyM=e>c+wPpzopR2~Xa*M`0oy z?8o*&W<6Z&pW_xXPsc00KG?Nt0#4B{_s$FJ5e%O+PG6}cg}=_H^7UlR=)9Blmirbj zCI>zxcJ1V-dfCIPFProA?v-B<>bfg+>b!h-YaVbZV9G~JwDtLA|MvgNgZ|0slfst=|+-1hv97*tgUz{f8QZYg|zV6;d)&(`p_{X3Fr(S>C|Z6C6bY`Or<^X(8vF21Hs;pkbUj(SSBC zD&?#s`HwqGQorAx=$y*@DQ=$@)t;ITXuN>bb;*i<>R%?|ATQ+p{)7=n2tOERA8lek zla|CcGsih21VldD;J5!v7CnU$c`()f`SX=LvmnwJ5MJKdkOAD#PSrk<0j|2?OtkKj z=+;rK=ep8yFMK24hn}!EI`cE-#&Ex%k%Y5uY~G- zW)L7jLkH3C=}C~R;6JC5JufA$68(vrmxs-~+XwP(eouUe>4pLhT}iF$l@vo6Lrt?7 z%E$t-5QvQKHz8Dx*D+__KbNi4PW7Od!nq;k$cI3>f4O zJ!|tw(|FI)+0#;+sjcKZBmdUTKdV4*-DkS(!!1Asm#R*|!H4Icm*+#5Oe=!ft zu+|)_(IM$a%BcxjNOv!~tI1T;e zK86;W{qha6i5k47NUMFF%Mn((v-7M$!@>WV`4hOgi*bLq>(kcS;^q-gSm~9?1d$<$ zud5}7CnB<(I#Lx}wc))MeNTOOz>8iH&z<|#R$S7|3AZW~tVFJ^7awl9i~94;Zw($V z-{tk?O{DSuMsB#lX@io&m`VLQjnImvNeH6q2){Bf%kn?tA}5Y%B*#ch&1U{;TW2h+ z>NnFrMnlZJ8z7G>NC1@mD#Y|bI`cy{laXGKs4{4xuXq5yQ{Px9)n!*-|E^;GF463Z z7mQETAaOGKEWz_!t^T7pD6n2MO{A#RUNB57n-hgi{doXge?jVn9XN3XhevD#d~*5{HjZQZ(RfrTJb`Xp#Zem?|-^#g^CO=074 zS>>kg=O>E{gy@=bF}9UI8b>Y7*B;aK4UT+pKlyc5$A}qrQON%-jYp=inlJU>x||mjRwivU)*ygPQ`Cjx5n=CC)fOH6y?Uin-L=G>aLf$OwZ196$+XG+ z{gKSCsUb@mXb2Fv>n1mLbErF3#CfBEd9) zka&Pg(C3mpSdcxp(3)>jG<#W^>Yl4>TzjiwK=w?6^_r+5fYn1VP#3?wU>bWPhXwa) z<}!^?74T42oHl%gX;yFr=D5wm166*yn0q(|JSt)3FqusW4`hb-G&z3aC+`Yu3`%g6 zX$48A?nvsnx=r*Z+)>lxyFin2z~zUg*UH3~F|Oqsm{I?-&Cf~fQxoP|sJ7eo2w(2f z)Hy8j19qq7Won7_(C&9Yi>Rxm`Gvrl}a+81zaGg#prV}-=GVB9j)vpQXVOyjxcfv!Flp^>#AN++c59a?=DmOY z-dNDl<4tRyt*T%<-Hpbfg8hb?(`x1L!shmltogZ*Ev>@#ZS_%Re*|;aE-#5iJ*r$`DCqYW2lmt*7 zdrDH>Yf81B-`6U{Y3z5M28-jq!zWL`)SQL^uRo~52+44um`jue_DO=lSZzUkUbG28 zF(NLJQo)B1*ygXyWa|j9(p4zbLw%^J6|GSb^Cm@0QAH}t`dmJ`Te9zzA~aD7DiRQztCsj(<&MLx1hh(N4Aa*#QB1#7yJpX~nEX{Cmr&z|-Cnit3T z3l?c)%F0Lo3b^z#mJ4E$Tu!c^9DcJY#6NZRG(c|(>!WTl)mzchJeW_Y4VNv9!36}o zCpUY47wonFn0zZEUb3Z>?v1^pMB19pv}L=F&+$7jR-(;{1SCddw8?^6{i>D#m}Q8Y zgjP11uCf2?`js_SbpLdJ@WySIb5Tvot>0TA2Ks*s zOGQ@L{=Pa^$K0&9A}MYdW9-Ayviot=zGIbM9+)QTzG+ux%@xO z&VHXinq81;B>Eh!7PGcMulg1QyZ+@dne{BNo&1{6j1ePuy=Ga*kglDXro@0J}rVWT0XC8(JwX|IpHy@ zMN?Vf*9tu}k?QBtMHgv+p*;w3f99PnN$E&09#fz+6!y-FE(90xW@kY0j4Q8RC`8>5 zazc#0M{|U`_Qz-??Yh=x8vK&1Aht+SG9+m5TZyJ<`BflYYUWFC={sqY2@ifv(<6+< z^jtKh`FXFlt}+WIc2N%Y^jib z_g$ooKEJB7XT%l?S&D;wYsgqaZa^(W7Pn9>h5B}{HC}M`xrr^6^k+_JqNenE1QC);Adw&u>!GNXX@Qtb-k&mhz{ zuNgYF1~ZADQ!)op#xpH*DxZ*CxkWGdJkf$`Yrv0#dj-zPOpaHoHJI!M8ziCNi8ZPW z#NhMeEoZ%)Xw6zc%oNAbx>$9BjY@-3B4f+P&JP;!haKNA0~Ny|5*F)fQT7Q+n^ z47G!#$I1(%?GXBvPV9{{yw)m;4|2ur0(QEVlSO;wa8nW%Zi3OWj>eI!R>=OCHJpjs z&KKk-NpChkp@%2=Og?u1!l`^C%>=K-ICz&^Y5t>pt+HQ+aLwmw%EPd+kCZS-TR{owO)m5F-Lsh>dlcJW^K;Seg=b**(*WZpk{vUHkv zrNe*wX^PvM>#jm(KeaCa%hY#QjVi7ttV}rHemUBIV}3z9Q5 zdg6Ask^G$uNF5b93hJvn57AS`70i1g>Jo=zZBRcA3uV}kd>3X{_`FOKQO&lkB|mdj zi143rnus{@ohS!bodz-0#+JPBTc5rq)WAwRTss~CfiY((90;#<3fQBTqKkl5j7R`O zxiX^7LqMScOi1A9uQbC~P{F9g@c=$&huZ6I@jgk7Zxyz#dmX_hseKHgrE49M1P`tE z1cSJ~7?Gr4p|`tRvK2^5ki0V#y={aK>wy0K;H%lv+@ERc4;wh^g-)oAxjFkEwRLkh z=EC!6F(2WJ4|OU6D_@PcvPPUzLj9Br>PWng$$cVi88sFnrqEv7E1fUvUgb4c2M6}I zeJMCltxsQ2KcPJO!n4Tj>|8VJH0jzlen9-l$j6 z1UHj^w8?s?3pYFf_NI3qdB~bEi`&8&Y8QSqJvDRQ)4n*DRMShhT6C(_YmxrV{zr^^ zlKL0ci`E4*)*+3Dq#j^N*-X8VxR>ZH)D+g27Np84FG4B{89sIn;m;ohKv;PGdf8Ac zgmEGxQ+byOwlvkSMle-;+98NB3n)cfbI1%5l_;mQE#$FdWb?8ZD~-ya>94YPSsxl& zOYajM2P`4Hh#5e#y!58R$4#oB=n~F(vA3umn@dFq#W56~i?FJ*Vh43NL);%`NtP!O zMP$!~Ex1}8=~WkROgk1ZoMI;jQ;2u!b?=U~7Ec{_BYWF;#y>RV5{}pYpO1%@Z%R{o zlYguKQWe+x?g5I=l_^Vp80VIX=w7Q-@u8GVRx)`yA4ap3qInQAPZuxLzy^A0Mtrb6 zC2PCodVk$(M@)4c8PiV#y}x{Ac*YH*O^|vTLLkH3qgUN~b@CiK&}(;E0=)whVu5pECP?AK+_Gk1@ z6F4=vzH1$OkIAI=F_oij!4NaL6!YCcjiwbqh`@XuNt&Sy)Ri-s<%5YnU=yeZq3FgU)a9)q^%Cm8u zA9)A0ZWj&}YFu;WDYy*|aILUQ>3pmTTt46bl;Lc4?KIDTe;g>%_;-%)pOiK$V;~u8 zGF9L6cpN=*V`t9WfgHQDVik2g%k%hWUaWFe9b3hct@2lAj>Q`%BJA5^lQj(hwE-s6 ze6=PVJe^WmAoypl#0zy+PL*r+A-Fnz-M}@=uQXl#TMwetNX(5jBo?f4GXY|Uel+Uq z2*;s1lsK#ufK7?LKkC>d!!1QLKt4@@3vw^5KB%65;=cM}oP5}$IV|DQK2C;|!4m|k ztB?uuPwo(w^O$PYXR9L!%7;2;=;jSUUUdaWJSpDjZj*UO<3v_KUz9Dk)dcI)bnPbv z;~cT`a4!Dus;#SXYM;Wek{l1gei$=PVG~oIYDz~;oZ)NzoO#P7c%xPFKzW#R5}c8y zm)6i8=dpjESC4PwvH8O93^!EKqJ++?@S!HcRjg*JQWD|=wjUT&a zr}h>iD=gM;VDm;=1%S`awJ^)QmN6DjIG$smsQ`WYLoFm-o^$t3Y_BcKK?P^65Or+o z=1xjuc|Vr-pI-GA;o>}LR9k1K?J~ceOgfB}onyw*b4{dtft4(&f&-K6c^&r z43|Qu!X1axdEu4Q7F2%Aq*$!oPCEHb@{UNg$6%Vlnk=R7L~z zlgxlr7Rx9kzMvHu0br_EtF^>%x_hA!@i;Z6eRlXv608!megB>3;Y#jm(L{brdo#s zb*#3u{?<9sF6FP`;GALBTlQ{0aC{jR6t=ULc2V2YU@ZKp5O-A1#I!ZH_l*2_nn6DH z>&E*YP9KY!uj+`hmWyQujafDYZ==f_!GxlKpf>3nwUwDQN4oQQoC(+wqic=XP`LB0^0iq; z1*ae)-u z@a{SF>N_UGn!tLR(rl1gFy$Zj)^kn9A<8ILnl zZ~SyJc=uHVxwOh5h}&6){=3+i?$g#29iK{E1_roQ!UV{{I~McrOg9Iwdap-%RI~=1 z$>=JR`IwdHBITTzCtLCiPC!W3+1)0hqAI$!xO*aUr!RZ?%Bjuai`dtOb7n@pZaI#>z?CDs zf7j*k)7y(4uqa&XBB2(o#B?EvbID}SRR3~`9m+~E79H9Hsw%-tC;F>}q2l==$smA* zWDcs6K3iSs>f;pHTie4a;a$%uJT6Wywl}FZTaQ0y?^Qw^Q?E1TPhu5voAZV=r^=`I=p*d!SWf za0L`P_hrhq!)BsDuT!t*i@Tcfx`j06-RJF4TSa#BGv2V@D?TA@jaZE~3KlJ~T2psl z`|_j*yot#F6msd_sM(@>nomt@{NnZAcYaU!8QLGEP z=s6}k zjvqqa&3c;b;T)~b!*`yO*M+hKaIHUFpS_cITaYWj7h>OnPORQSN2x#Ye5zRzRTL>e7= zZh~?N4Tt=eC;0P*IC#!qVJfo+xr54^oE-@axp+ESo{@87XVMaPV+IR_q$YI^9!xTq zWp9b;U__cc3Iq1bzvz16874yvoe>U-OB;5Rq5r(lT67Ykbpm_?aM?f_%&Z!J_yU;e zX;6KM>DZO5!$d+X%Az?SC-6qJz{$5Yq-?K5=8Iq!@Y{-a1s?nXT>j{d&xsEZ8W>)&}OaXm&^u`UB!3fM!~jBe?+#x;hi=d^&T{d4vPuRx#f4;h88ePdDWqb>+6%!{ zDS0YK0)VO0D@QF`n0%;DO;s{WvY;5u&FFhX4A;7q4?ps=6gSr@^LCC{v?s-?-A$Zs zL8(ij(a}4AIBXGb>l_$ZSiRP4HV=4MK7)VB03#6Gc(J`}0{nY@M&20PP^J{jWMHf- zw4|y(E&oY)BGVcCw`qGGh)Mw)#>D~R`p3!?v%_zfy+M{&&3&hay=9wX^Bg5(lZ+NQ ztcPNPWQjwV?D+h2ILsFoI?$L1->9R{#?*MjRt2e7G~LNxN0-E_iL3NsO{%QVhhKd2 z&kY3Hn?F2}@9zn3`s1!xSQQMq-*7`+h%teQZWwMLeYM&fKYz7Q`IR(mLHgv^Htt+K z^26UbD?TaTScq`^?UjWDFEZo*YwSAPn!wg=5|Yp{^bnCrXok>22t`0Agb->lRB0li zNtbG2bWRc=gr*Q6kN^q@2nf;@unb*5q7bSGBUQzMA{J23oXgz#aOXLa1OcxuEyj5vW7$QsqDD|{6)a}gb6-qyv}F?;2wcc&`t=JbOIC@ zF9X8I%~>3jst2J*S`TmS3V8ODLuZw&Cl{(-Dw%VqsZlQ^Q~<`u*$BOC@s8MVhbsE8 z>AhnAFyEo40|drZsPA2MeW`o7G0O?%d@)RDxp4`qywPGPU~KsQ5?2=5I{hvb1-64K zY~JzKqHRbY4LTeZ`*PNabSnw*!!@(PZm+sAd2qA&f(2jQ*@*njbhdaQ)NI5d%dmuh z!`;M?LGAwjk7pdIZuhK(9n0;`*GoPK}t(h zkF&9&MISM0x{p z3g&WTy@iilbYT73$Kwggtt&5&$1xmQHG}(kE!w>83%#|y{f`o&$#C^rTex%=aP+ns zWpGGMR#XO^2UR^H_NkjR;Q*w)X}Mj6)cF7)SA-Bppymc~3clzEIW^b`?mVJ>O98~x z8Wei2wROUDbf#z2^bJ0Bs7Wnyg^HD_B|bId_qhGdv@Ee9IFBVU9~xbGqpE$rJjOxH zX<=ErtUIyoAu>(Gf?lB2+N`DdAln-!Ys6#nbZ#y>ABjb2ZC+H|hCv)l(8-Qp>>tdo z&8QY|7?k9oGrFTT+;Ac+(_oG;MzVATTwy^VMBTUv}xi$ag(_GeyuP zg(Ep_!&bY7cT$ekPI{@WJwi_%eebc=-_qc<{^~dV%;ym;BF*xxf6qbr3;*x-_0RqO z$MpOA`2Umti-VmjLIl^OR6i}bQ>s6uz#fg~6ni|9lroa-vb#t}M55tepwmy;(6gzf zf#hB)kuVlUor1{b%4Pr%_>gRBa;gf7<v;?Yu65HR!s=C*9DIJ)qt;;Sce+I6p^O z`@q4pE`n@H_0Gr+QK&MOZ|MTwqfmHcGlY`d`v3GplUOW^QR301b6QR6R7tiR) z7vE8`vJgY2P(408tbamS&kT7@zrwE*&|S>dW^?vL)tTa892`77bIrt_@@oZc?HDPU zeG|YYC9}3$Mw5Is;*HA2-F1*!LIYeG;O;u!~0q zUbt)CKLn4<9?uL;^QeGm=?pimefTY-R!QT+!2&~%MyET-2&p6zBqaYgeH9DU6EDh> zv=Ai^wtgvOLhwPMTzA|oy{iE7JzG2Lqzy$sLtIN?EZvc-BrI8YZmWGVlv~B$i7(#y zW2yY}rhZg*WwmufIltDa88D0*7BOTL%)(QgZs(49ZT#GvLa!751;^qMr%p#QOgf#Nv5!qjT{Qwz}9WNF{5@tM8&9cC?IWPQ(a~?*5kawf~tR*vji$$f$M`{Os zVw;SX-5)myF8G^19)Z8^(>HI|4h*0D!=XZ(H=Qa$90OTj%LAPrg+_)@MLt0rhq(LdwmqvBAs4_aSiMuKF(~%iyJA)kbu36 z4?D_89EgtUzo)>6JZ|7re@oF$oNaC|wm^{G6goceD6jK&aLHq}X$d}l)IFHV<`++D z>u1SbMu%rtA%!ye9eIH)Lq5`yiql(itKg0Jp;+ZdU&5|f6p7=)Gp%fp0cRg zm%Yk70bvM-<`ZEBiTtC0NPO#iQSzN**A~L*XraIvAN_ECeEu`aIjL*t$+&k}KNym+ zcs*6kNNWd;Ry?XJp_UBF;D;y3sQ_#$^Gh;yCE;g;K~)ywS@9Q18Zul3spveUI+4H3 zt$Qpb$I-21r-$uckQ>QX3$N~zW#6R>v*Xe=8TzZwDwh34KdbkqHhbM0KU|#WubzRR zT}mgqCR4(FLbeHF_$mTsOfp*Fdi1@b6wI=E3}%AmD2h$npNE#!N~vtcyD5rzVYE$C zRx31uXM>pYHR5j@vo<37*81eK(scW=UGob^xjqVIAxZA`N#S7)Gs8B88m00!H!m(e z%aO{}1wV3&cosCWI<%T4v4x$}>7-;Bh{l}k%t_ZZy4ZOhX+9?)ie&h^c01mnj!Jme z-7_tvXfbg?UkpvhTVgDvsk7l9kgrcU4^^BSVsUaBhl3}Ad<>A21~|8@F7xQc3v+B zPzUr7Nl{lJIM-2Mhfw1tU`$E<32Yt~o74eRh$MKxz!qmj*a%<}iU8KE@!eXNS)^9V zQeApv1;(2^z6<)9a|qT=vvXFBTc0yVO_mqp5JvWf?G|2x6RbdVd@fEWIDi8G^yv5B zrji%jMLYcsUO@Ex)&Xs-^4DN zK&H3KGAuw>Bcnfe4M=Xc)M{Y(M3sbR{yJBGv}Q0E`nwmJY@}m90bTlqyg;6Md-Lh~ z#`cZC=$w_h`<{B1(ZJnms4WROdB0%O+a}=&}auE>g~V} zJQhH^wGa+Nt5WB#il$&A#!KBj0-H8p2AtMVIs2lO+QHdmWwA7mB*$>3)y@SuLCzIs%_FEI2LXJm`)=p)cX5b&-Afi)? zg(FGP(mIjtx5>)pu7)jG=^-OtO0ar@Knh`3BvVz>a)~Po@%@3T(Cclfwj6l9;^bEm zH>-GZs7NA(ZKBe*w+ZWb9sgPh{CEu&B*#iCvxj<*U#L*p+onX04f2kc65jZE zT=~T^_VMT?USivVB^6(Dy64H81GeXs4n1i4B(>@?_nhTTuYtr~nUc-NYrCJM$i4Ow zltVn+N;u7~9V0=Js{b0R`1qxB9o85g;XG5%cfhoDoH)CfsS3ScelX&{RMG#$KM4K# z#cKhrSn$VDzfnP`H_*uJ8(1PpneVxA)wnJf$O zD>!MO*f$K$u0y#>YoesgkrE_x#GrXajwbljPK}YU zPxW-Zn|KR$gYNOB=_IZ;AJj??Xc|jmdTlK>HR0%08! zN>3xNMU`7~!RC4Mbv_r2_^V_jFB5aUwj^7ovE?<(10vnrPH`qS1|?Fu?C;Woq^ z=Vm-SJ=GUkaZLw6!dN*jar?I@6kDfFKtAU=ddEbyvlG}}A#VKJA z?Bsy?ljgpv`=JHf+tETmQGeI3=ZR|ME#5vA&J-1Wt=GZTi*5{CI`Fs?KJp_DM?7X| zWiPk1(8Eduj<_`EUDhDw{dhOsg1OiQ>VRu@#e*FN`)bh&2f^#1$a)s1w!yl{;l(9W zcr+#=D6?Ea#w+{)f}}nG+5kH}zSWy^dc>M97ihz41DN+dKM3VtXrPgX7{s6$k2dBYRCD|&P6GZTrYd{H(&b(%V1b}G}7(dDh2%1KNOzZ2| zB$3MAA$9fKh9Z2#i3Q1=Y?)$3CX+%6qb?RI6xwZAtMrISe##UCMyf=d%TBsk%$y31 z+GPiQVAF}=6_P7>*&+o161$;`1r_IpH7nDZF#0%fv`GEv8`5fPA%MDXztNHa+WWb5 z1B_};1m$5=ll-}Hf(g%C*tCDepA@bX$X(N6smdayC)Ls=1FM0!`N3MZ2hZm>&v_Fg z^iQjlw+$tI?LW2GfVt>r(HPh==C8rYZK=CQtM9y9g$+BcfSP9g-f08p-WnO$Ez;XA zt@dB^S!|n~Gxt`Oxa*pr@#f@4gsb-C%86EP{~XOIW2LKiUi<$s4E_uMx|1(fDKDj^ zZLOAS8V?@@gxTnbOJPj%<(1ebgemJ}i}?bl2ooHv(x_Gz(Sj<$ND>)NG9rvVu{4uG zlb#!f_7_0Ia4_OJodt2p9r;02at=3Km0KOjQ2ybN+)x#aC&y7wqt2M=_4XUQ0%h#KNEoh~ur^myC;b`GsQP-&;cl+`iIIYW(sOYm5;yDTvAeLjBs1 zORlZ}$>paMW763fmnl<^(vRF%W60VF!>oFD`i=n`q%B;{P3GLxwfCbO;yidg@yN&b zdIE|yxJO(*UB8rq*nyqn>T!7AV~tp5A|`jYMT_z+1&MHz-Le~jPj1K)MT+{S3`_l! zm*4|o0lU4&b0V2{y)SUpB(l5OPpEN=yvoNJ8)q~t+2-Fb1-^W3%cs%}W}Fg|i6!e{ z7fa9p8z4QfDV=D2UJE7*aOg{v-(+S(0pd1{A|+=p15H%}g-Wjo#Cr%4$jDH0$aB5H zu0R~se1)~F`lwNc-NY}STi`eV*%^J~3cBmQ9AHT>2^ThIfuiXN-&YS1VJk8>FQzrf zVPW;D?8Qf8oHv`=N|PEzx#e1^jjZVMLRfB82`HFZXQY@$Y+Gc>1Z0q5ez@)S*{#m$ zv5&8hdr1g=cuC4t|AH!_CS0>@IAqo-+>4qIeKRRz&gi0Ltec6LmpFxMd@?L&hHeJ& z^qNgzpyPUU+{OcH-8hv%``XXsGNt8wx&A4Uv}UzrZl6jpw^%sugm$+-@-#8XV9S}Y zef(?D-MiGpj_HF+9!Mhd~H}$UPwIZtxx?BN54MMivD2D@iLe(4>z}j?3E!9+_ zieAYC12i;W^gr*#qWgY z_^|Jl%4Z5|V9?SBS;NQbN8+ng3gEyG32Ube^Szwg4#{ajE!z%hO<^3 zS?~j{gBbcx?RtJ2TblXJ2he(SxS*2Y5aC!!5Hojvn=}|fI@jlQg99oaG!jLY7d3yL zeR)7%nEJ|9UordloDFyFEUS(=9{7C+_Uqg)8aDm;23++WZ5zFd`IL6_y3c+C*G;+8 zj#NKh+AHUzV)nDV;orTqzf{sNPx&pDSz^9y+1lddKUj;iQIO8swi^@&Yn3YF=DCg4 zXB#sQ?`vDWUrfa*AwPe6`iX#WrYP(|fI6FKe z38_{pcapLcti!Jgjn0j{@yNvft5^ON)X~Loy??sQWdV>OPh^3b1$lSai%W_YgJ<{i z_X?YLKFrBZ(o~nJvMcne{mH}aMQxN+OQv3O_pSO83BZM!DYL8@e?j}$rX^*ji#Wjto(@ovE93gkIN)OSI!@5@NrJb3X!RzT~9HZbID3gK_Nv0Uf5y!T+9 z_2?B|yBJ#=Mht|5*Rh*)yYb+vUJ%2+P_u{wC)M!PP4{PUs;^1wdxVL#Y+G&tRDfG`78O-2Jv=Yu+jb3TPwB>B?nj9E|Sf zs#_F~db>Zh)Da_(Oh@EDaF`e$uE>p2w9Ih1C(2snba$K{3U%qU2=HhIcpLD4Hr$QS zoUBI$i-qK)FUCs{Zfet#6nm9Z6{Tz$AqRtd)6>GWZI_Y`uLL+c2e;M`1{<;2bhffv z#ZTcTwD34H(L7E@CBl?d@?DEGl3^!L_Oxic`m=SNhfFbMKCUpsPkiPynW>eonG=>% z=-GP5e(4a#pAX~A$l&E)I&Nkqoe7YaAK@PRwI(Y6UGZVZSY6|_?Q%v^bF{tgiNr$= zU+iY&#NZy-KnAZhi&cGsXA*E0i{g*g?kBHDk1J7etEBns^3<9k@fW8MC9*Fn<2%$C zy>TyN9BPsr&Es7!TP0S))d6raeuB_`)QDQ5F^q<8@SsFS)E}0Ft0}CB+XiL7yxv}BC(TcC$f~fy(i=VNU@>zBu;q8 z*gdKueFO}!7B+bftKzSA#s8l3M9j1J9BHEDMjmIAtF)X{6<+T*-h*CqdU?Y`Y+pl zqr$uLiq^5U`1f)bDvd<4_oOYKw7oKXip?83WL-hpU@w2X1SP-0`ZqZ3PiV;f8PXu82t5e7=PZ+AU! rnQPmA%N9cNL=)tHyux#nYJ+PYJe>cSbMw None: + BaseSDK.__init__(self, sdk_config) + self.sdk_configuration = sdk_config + self._init_sdks() + + def _init_sdks(self): + self.transcriptions = Transcriptions(self.sdk_configuration) diff --git a/src/mistralai/mistral_jobs.py b/src/mistralai/mistral_jobs.py index b824508a..c51d64a7 100644 --- a/src/mistralai/mistral_jobs.py +++ b/src/mistralai/mistral_jobs.py @@ -16,6 +16,7 @@ def list( page: Optional[int] = 0, page_size: Optional[int] = 100, model: OptionalNullable[str] = UNSET, + agent_id: OptionalNullable[str] = UNSET, metadata: OptionalNullable[Dict[str, Any]] = UNSET, created_after: OptionalNullable[datetime] = UNSET, created_by_me: Optional[bool] = False, @@ -32,6 +33,7 @@ def list( :param page: :param page_size: :param model: + :param agent_id: :param metadata: :param created_after: :param created_by_me: @@ -55,6 +57,7 @@ def list( page=page, page_size=page_size, model=model, + agent_id=agent_id, metadata=metadata, created_after=created_after, created_by_me=created_by_me, @@ -128,6 +131,7 @@ async def list_async( page: Optional[int] = 0, page_size: Optional[int] = 100, model: OptionalNullable[str] = UNSET, + agent_id: OptionalNullable[str] = UNSET, metadata: OptionalNullable[Dict[str, Any]] = UNSET, created_after: OptionalNullable[datetime] = UNSET, created_by_me: Optional[bool] = False, @@ -144,6 +148,7 @@ async def list_async( :param page: :param page_size: :param model: + :param agent_id: :param metadata: :param created_after: :param created_by_me: @@ -167,6 +172,7 @@ async def list_async( page=page, page_size=page_size, model=model, + agent_id=agent_id, metadata=metadata, created_after=created_after, created_by_me=created_by_me, @@ -239,7 +245,8 @@ def create( *, input_files: List[str], endpoint: models.APIEndpoint, - model: str, + model: OptionalNullable[str] = UNSET, + agent_id: OptionalNullable[str] = UNSET, metadata: OptionalNullable[Dict[str, str]] = UNSET, timeout_hours: Optional[int] = 24, retries: OptionalNullable[utils.RetryConfig] = UNSET, @@ -254,6 +261,7 @@ def create( :param input_files: :param endpoint: :param model: + :param agent_id: :param metadata: :param timeout_hours: :param retries: Override the default retry configuration for this method @@ -275,6 +283,7 @@ def create( input_files=input_files, endpoint=endpoint, model=model, + agent_id=agent_id, metadata=metadata, timeout_hours=timeout_hours, ) @@ -348,7 +357,8 @@ async def create_async( *, input_files: List[str], endpoint: models.APIEndpoint, - model: str, + model: OptionalNullable[str] = UNSET, + agent_id: OptionalNullable[str] = UNSET, metadata: OptionalNullable[Dict[str, str]] = UNSET, timeout_hours: Optional[int] = 24, retries: OptionalNullable[utils.RetryConfig] = UNSET, @@ -363,6 +373,7 @@ async def create_async( :param input_files: :param endpoint: :param model: + :param agent_id: :param metadata: :param timeout_hours: :param retries: Override the default retry configuration for this method @@ -384,6 +395,7 @@ async def create_async( input_files=input_files, endpoint=endpoint, model=model, + agent_id=agent_id, metadata=metadata, timeout_hours=timeout_hours, ) diff --git a/src/mistralai/models/__init__.py b/src/mistralai/models/__init__.py index 03965fde..2039c2b6 100644 --- a/src/mistralai/models/__init__.py +++ b/src/mistralai/models/__init__.py @@ -129,7 +129,16 @@ AssistantMessageRole, AssistantMessageTypedDict, ) - from .basemodelcard import BaseModelCard, BaseModelCardTypedDict, Type + from .audiochunk import AudioChunk, AudioChunkType, AudioChunkTypedDict + from .audiotranscriptionrequest import ( + AudioTranscriptionRequest, + AudioTranscriptionRequestTypedDict, + ) + from .audiotranscriptionrequeststream import ( + AudioTranscriptionRequestStream, + AudioTranscriptionRequestStreamTypedDict, + ) + from .basemodelcard import BaseModelCard, BaseModelCardType, BaseModelCardTypedDict from .batcherror import BatchError, BatchErrorTypedDict from .batchjobin import BatchJobIn, BatchJobInTypedDict from .batchjobout import BatchJobOut, BatchJobOutObject, BatchJobOutTypedDict @@ -759,8 +768,16 @@ SystemMessageTypedDict, ) from .textchunk import TextChunk, TextChunkType, TextChunkTypedDict + from .thinkchunk import ( + ThinkChunk, + ThinkChunkType, + ThinkChunkTypedDict, + Thinking, + ThinkingTypedDict, + ) + from .timestampgranularity import TimestampGranularity from .tool import Tool, ToolTypedDict - from .toolcall import ToolCall, ToolCallTypedDict + from .toolcall import Metadata, MetadataTypedDict, ToolCall, ToolCallTypedDict from .toolchoice import ToolChoice, ToolChoiceTypedDict from .toolchoiceenum import ToolChoiceEnum from .toolexecutiondeltaevent import ( @@ -799,6 +816,42 @@ ) from .tooltypes import ToolTypes from .trainingfile import TrainingFile, TrainingFileTypedDict + from .transcriptionresponse import ( + TranscriptionResponse, + TranscriptionResponseTypedDict, + ) + from .transcriptionsegmentchunk import ( + TranscriptionSegmentChunk, + TranscriptionSegmentChunkTypedDict, + Type, + ) + from .transcriptionstreamdone import ( + TranscriptionStreamDone, + TranscriptionStreamDoneType, + TranscriptionStreamDoneTypedDict, + ) + from .transcriptionstreamevents import ( + TranscriptionStreamEvents, + TranscriptionStreamEventsData, + TranscriptionStreamEventsDataTypedDict, + TranscriptionStreamEventsTypedDict, + ) + from .transcriptionstreameventtypes import TranscriptionStreamEventTypes + from .transcriptionstreamlanguage import ( + TranscriptionStreamLanguage, + TranscriptionStreamLanguageType, + TranscriptionStreamLanguageTypedDict, + ) + from .transcriptionstreamsegmentdelta import ( + TranscriptionStreamSegmentDelta, + TranscriptionStreamSegmentDeltaType, + TranscriptionStreamSegmentDeltaTypedDict, + ) + from .transcriptionstreamtextdelta import ( + TranscriptionStreamTextDelta, + TranscriptionStreamTextDeltaType, + TranscriptionStreamTextDeltaTypedDict, + ) from .unarchiveftmodelout import ( UnarchiveFTModelOut, UnarchiveFTModelOutObject, @@ -917,7 +970,15 @@ "AssistantMessageContentTypedDict", "AssistantMessageRole", "AssistantMessageTypedDict", + "AudioChunk", + "AudioChunkType", + "AudioChunkTypedDict", + "AudioTranscriptionRequest", + "AudioTranscriptionRequestStream", + "AudioTranscriptionRequestStreamTypedDict", + "AudioTranscriptionRequestTypedDict", "BaseModelCard", + "BaseModelCardType", "BaseModelCardTypedDict", "BatchError", "BatchErrorTypedDict", @@ -1322,6 +1383,8 @@ "MessageOutputEventTypedDict", "Messages", "MessagesTypedDict", + "Metadata", + "MetadataTypedDict", "MetricOut", "MetricOutTypedDict", "MistralPromptMode", @@ -1416,6 +1479,12 @@ "TextChunk", "TextChunkType", "TextChunkTypedDict", + "ThinkChunk", + "ThinkChunkType", + "ThinkChunkTypedDict", + "Thinking", + "ThinkingTypedDict", + "TimestampGranularity", "Tool", "ToolCall", "ToolCallTypedDict", @@ -1452,6 +1521,27 @@ "ToolsTypedDict", "TrainingFile", "TrainingFileTypedDict", + "TranscriptionResponse", + "TranscriptionResponseTypedDict", + "TranscriptionSegmentChunk", + "TranscriptionSegmentChunkTypedDict", + "TranscriptionStreamDone", + "TranscriptionStreamDoneType", + "TranscriptionStreamDoneTypedDict", + "TranscriptionStreamEventTypes", + "TranscriptionStreamEvents", + "TranscriptionStreamEventsData", + "TranscriptionStreamEventsDataTypedDict", + "TranscriptionStreamEventsTypedDict", + "TranscriptionStreamLanguage", + "TranscriptionStreamLanguageType", + "TranscriptionStreamLanguageTypedDict", + "TranscriptionStreamSegmentDelta", + "TranscriptionStreamSegmentDeltaType", + "TranscriptionStreamSegmentDeltaTypedDict", + "TranscriptionStreamTextDelta", + "TranscriptionStreamTextDeltaType", + "TranscriptionStreamTextDeltaTypedDict", "Two", "TwoTypedDict", "Type", @@ -1565,9 +1655,16 @@ "AssistantMessageContentTypedDict": ".assistantmessage", "AssistantMessageRole": ".assistantmessage", "AssistantMessageTypedDict": ".assistantmessage", + "AudioChunk": ".audiochunk", + "AudioChunkType": ".audiochunk", + "AudioChunkTypedDict": ".audiochunk", + "AudioTranscriptionRequest": ".audiotranscriptionrequest", + "AudioTranscriptionRequestTypedDict": ".audiotranscriptionrequest", + "AudioTranscriptionRequestStream": ".audiotranscriptionrequeststream", + "AudioTranscriptionRequestStreamTypedDict": ".audiotranscriptionrequeststream", "BaseModelCard": ".basemodelcard", + "BaseModelCardType": ".basemodelcard", "BaseModelCardTypedDict": ".basemodelcard", - "Type": ".basemodelcard", "BatchError": ".batcherror", "BatchErrorTypedDict": ".batcherror", "BatchJobIn": ".batchjobin", @@ -2067,8 +2164,16 @@ "TextChunk": ".textchunk", "TextChunkType": ".textchunk", "TextChunkTypedDict": ".textchunk", + "ThinkChunk": ".thinkchunk", + "ThinkChunkType": ".thinkchunk", + "ThinkChunkTypedDict": ".thinkchunk", + "Thinking": ".thinkchunk", + "ThinkingTypedDict": ".thinkchunk", + "TimestampGranularity": ".timestampgranularity", "Tool": ".tool", "ToolTypedDict": ".tool", + "Metadata": ".toolcall", + "MetadataTypedDict": ".toolcall", "ToolCall": ".toolcall", "ToolCallTypedDict": ".toolcall", "ToolChoice": ".toolchoice", @@ -2101,6 +2206,28 @@ "ToolTypes": ".tooltypes", "TrainingFile": ".trainingfile", "TrainingFileTypedDict": ".trainingfile", + "TranscriptionResponse": ".transcriptionresponse", + "TranscriptionResponseTypedDict": ".transcriptionresponse", + "TranscriptionSegmentChunk": ".transcriptionsegmentchunk", + "TranscriptionSegmentChunkTypedDict": ".transcriptionsegmentchunk", + "Type": ".transcriptionsegmentchunk", + "TranscriptionStreamDone": ".transcriptionstreamdone", + "TranscriptionStreamDoneType": ".transcriptionstreamdone", + "TranscriptionStreamDoneTypedDict": ".transcriptionstreamdone", + "TranscriptionStreamEvents": ".transcriptionstreamevents", + "TranscriptionStreamEventsData": ".transcriptionstreamevents", + "TranscriptionStreamEventsDataTypedDict": ".transcriptionstreamevents", + "TranscriptionStreamEventsTypedDict": ".transcriptionstreamevents", + "TranscriptionStreamEventTypes": ".transcriptionstreameventtypes", + "TranscriptionStreamLanguage": ".transcriptionstreamlanguage", + "TranscriptionStreamLanguageType": ".transcriptionstreamlanguage", + "TranscriptionStreamLanguageTypedDict": ".transcriptionstreamlanguage", + "TranscriptionStreamSegmentDelta": ".transcriptionstreamsegmentdelta", + "TranscriptionStreamSegmentDeltaType": ".transcriptionstreamsegmentdelta", + "TranscriptionStreamSegmentDeltaTypedDict": ".transcriptionstreamsegmentdelta", + "TranscriptionStreamTextDelta": ".transcriptionstreamtextdelta", + "TranscriptionStreamTextDeltaType": ".transcriptionstreamtextdelta", + "TranscriptionStreamTextDeltaTypedDict": ".transcriptionstreamtextdelta", "UnarchiveFTModelOut": ".unarchiveftmodelout", "UnarchiveFTModelOutObject": ".unarchiveftmodelout", "UnarchiveFTModelOutTypedDict": ".unarchiveftmodelout", diff --git a/src/mistralai/models/audiochunk.py b/src/mistralai/models/audiochunk.py new file mode 100644 index 00000000..2780570a --- /dev/null +++ b/src/mistralai/models/audiochunk.py @@ -0,0 +1,20 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.types import BaseModel +from typing import Literal, Optional +from typing_extensions import NotRequired, TypedDict + + +AudioChunkType = Literal["input_audio"] + + +class AudioChunkTypedDict(TypedDict): + input_audio: str + type: NotRequired[AudioChunkType] + + +class AudioChunk(BaseModel): + input_audio: str + + type: Optional[AudioChunkType] = "input_audio" diff --git a/src/mistralai/models/audiotranscriptionrequest.py b/src/mistralai/models/audiotranscriptionrequest.py new file mode 100644 index 00000000..371d3ecc --- /dev/null +++ b/src/mistralai/models/audiotranscriptionrequest.py @@ -0,0 +1,97 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .file import File, FileTypedDict +from .timestampgranularity import TimestampGranularity +from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +from mistralai.utils import FieldMetadata, MultipartFormMetadata, validate_const +import pydantic +from pydantic import model_serializer +from pydantic.functional_validators import AfterValidator +from typing import List, Literal, Optional +from typing_extensions import Annotated, NotRequired, TypedDict + + +class AudioTranscriptionRequestTypedDict(TypedDict): + model: str + file: NotRequired[FileTypedDict] + file_url: NotRequired[Nullable[str]] + r"""Url of a file to be transcribed""" + file_id: NotRequired[Nullable[str]] + r"""ID of a file uploaded to /v1/files""" + language: NotRequired[Nullable[str]] + r"""Language of the audio, e.g. 'en'. Providing the language can boost accuracy.""" + temperature: NotRequired[Nullable[float]] + stream: Literal[False] + timestamp_granularities: NotRequired[List[TimestampGranularity]] + r"""Granularities of timestamps to include in the response.""" + + +class AudioTranscriptionRequest(BaseModel): + model: Annotated[str, FieldMetadata(multipart=True)] + + file: Annotated[ + Optional[File], FieldMetadata(multipart=MultipartFormMetadata(file=True)) + ] = None + + file_url: Annotated[OptionalNullable[str], FieldMetadata(multipart=True)] = UNSET + r"""Url of a file to be transcribed""" + + file_id: Annotated[OptionalNullable[str], FieldMetadata(multipart=True)] = UNSET + r"""ID of a file uploaded to /v1/files""" + + language: Annotated[OptionalNullable[str], FieldMetadata(multipart=True)] = UNSET + r"""Language of the audio, e.g. 'en'. Providing the language can boost accuracy.""" + + temperature: Annotated[OptionalNullable[float], FieldMetadata(multipart=True)] = ( + UNSET + ) + + STREAM: Annotated[ + Annotated[Optional[Literal[False]], AfterValidator(validate_const(False))], + pydantic.Field(alias="stream"), + FieldMetadata(multipart=True), + ] = False + + timestamp_granularities: Annotated[ + Optional[List[TimestampGranularity]], FieldMetadata(multipart=True) + ] = None + r"""Granularities of timestamps to include in the response.""" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = [ + "file", + "file_url", + "file_id", + "language", + "temperature", + "stream", + "timestamp_granularities", + ] + nullable_fields = ["file_url", "file_id", "language", "temperature"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/models/audiotranscriptionrequeststream.py b/src/mistralai/models/audiotranscriptionrequeststream.py new file mode 100644 index 00000000..04374503 --- /dev/null +++ b/src/mistralai/models/audiotranscriptionrequeststream.py @@ -0,0 +1,97 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .file import File, FileTypedDict +from .timestampgranularity import TimestampGranularity +from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +from mistralai.utils import FieldMetadata, MultipartFormMetadata, validate_const +import pydantic +from pydantic import model_serializer +from pydantic.functional_validators import AfterValidator +from typing import List, Literal, Optional +from typing_extensions import Annotated, NotRequired, TypedDict + + +class AudioTranscriptionRequestStreamTypedDict(TypedDict): + model: str + file: NotRequired[FileTypedDict] + file_url: NotRequired[Nullable[str]] + r"""Url of a file to be transcribed""" + file_id: NotRequired[Nullable[str]] + r"""ID of a file uploaded to /v1/files""" + language: NotRequired[Nullable[str]] + r"""Language of the audio, e.g. 'en'. Providing the language can boost accuracy.""" + temperature: NotRequired[Nullable[float]] + stream: Literal[True] + timestamp_granularities: NotRequired[List[TimestampGranularity]] + r"""Granularities of timestamps to include in the response.""" + + +class AudioTranscriptionRequestStream(BaseModel): + model: Annotated[str, FieldMetadata(multipart=True)] + + file: Annotated[ + Optional[File], FieldMetadata(multipart=MultipartFormMetadata(file=True)) + ] = None + + file_url: Annotated[OptionalNullable[str], FieldMetadata(multipart=True)] = UNSET + r"""Url of a file to be transcribed""" + + file_id: Annotated[OptionalNullable[str], FieldMetadata(multipart=True)] = UNSET + r"""ID of a file uploaded to /v1/files""" + + language: Annotated[OptionalNullable[str], FieldMetadata(multipart=True)] = UNSET + r"""Language of the audio, e.g. 'en'. Providing the language can boost accuracy.""" + + temperature: Annotated[OptionalNullable[float], FieldMetadata(multipart=True)] = ( + UNSET + ) + + STREAM: Annotated[ + Annotated[Optional[Literal[True]], AfterValidator(validate_const(True))], + pydantic.Field(alias="stream"), + FieldMetadata(multipart=True), + ] = True + + timestamp_granularities: Annotated[ + Optional[List[TimestampGranularity]], FieldMetadata(multipart=True) + ] = None + r"""Granularities of timestamps to include in the response.""" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = [ + "file", + "file_url", + "file_id", + "language", + "temperature", + "stream", + "timestamp_granularities", + ] + nullable_fields = ["file_url", "file_id", "language", "temperature"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/models/basemodelcard.py b/src/mistralai/models/basemodelcard.py index 7423a71b..a4a061ff 100644 --- a/src/mistralai/models/basemodelcard.py +++ b/src/mistralai/models/basemodelcard.py @@ -12,7 +12,7 @@ from typing_extensions import Annotated, NotRequired, TypedDict -Type = Literal["base"] +BaseModelCardType = Literal["base"] class BaseModelCardTypedDict(TypedDict): @@ -28,7 +28,7 @@ class BaseModelCardTypedDict(TypedDict): deprecation: NotRequired[Nullable[datetime]] deprecation_replacement_model: NotRequired[Nullable[str]] default_model_temperature: NotRequired[Nullable[float]] - type: Type + type: BaseModelCardType class BaseModelCard(BaseModel): @@ -57,7 +57,7 @@ class BaseModelCard(BaseModel): default_model_temperature: OptionalNullable[float] = UNSET TYPE: Annotated[ - Annotated[Optional[Type], AfterValidator(validate_const("base"))], + Annotated[Optional[BaseModelCardType], AfterValidator(validate_const("base"))], pydantic.Field(alias="type"), ] = "base" diff --git a/src/mistralai/models/batchjobin.py b/src/mistralai/models/batchjobin.py index 6fcce0f0..aa0bb5be 100644 --- a/src/mistralai/models/batchjobin.py +++ b/src/mistralai/models/batchjobin.py @@ -13,7 +13,8 @@ class BatchJobInTypedDict(TypedDict): input_files: List[str] endpoint: APIEndpoint - model: str + model: NotRequired[Nullable[str]] + agent_id: NotRequired[Nullable[str]] metadata: NotRequired[Nullable[Dict[str, str]]] timeout_hours: NotRequired[int] @@ -23,7 +24,9 @@ class BatchJobIn(BaseModel): endpoint: Annotated[APIEndpoint, PlainValidator(validate_open_enum(False))] - model: str + model: OptionalNullable[str] = UNSET + + agent_id: OptionalNullable[str] = UNSET metadata: OptionalNullable[Dict[str, str]] = UNSET @@ -31,8 +34,8 @@ class BatchJobIn(BaseModel): @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = ["metadata", "timeout_hours"] - nullable_fields = ["metadata"] + optional_fields = ["model", "agent_id", "metadata", "timeout_hours"] + nullable_fields = ["model", "agent_id", "metadata"] null_default_fields = [] serialized = handler(self) diff --git a/src/mistralai/models/batchjobout.py b/src/mistralai/models/batchjobout.py index 2b49057b..88304313 100644 --- a/src/mistralai/models/batchjobout.py +++ b/src/mistralai/models/batchjobout.py @@ -16,7 +16,6 @@ class BatchJobOutTypedDict(TypedDict): id: str input_files: List[str] endpoint: str - model: str errors: List[BatchErrorTypedDict] status: BatchJobStatus created_at: int @@ -26,6 +25,8 @@ class BatchJobOutTypedDict(TypedDict): failed_requests: int object: NotRequired[BatchJobOutObject] metadata: NotRequired[Nullable[Dict[str, Any]]] + model: NotRequired[Nullable[str]] + agent_id: NotRequired[Nullable[str]] output_file: NotRequired[Nullable[str]] error_file: NotRequired[Nullable[str]] started_at: NotRequired[Nullable[int]] @@ -39,8 +40,6 @@ class BatchJobOut(BaseModel): endpoint: str - model: str - errors: List[BatchError] status: BatchJobStatus @@ -59,6 +58,10 @@ class BatchJobOut(BaseModel): metadata: OptionalNullable[Dict[str, Any]] = UNSET + model: OptionalNullable[str] = UNSET + + agent_id: OptionalNullable[str] = UNSET + output_file: OptionalNullable[str] = UNSET error_file: OptionalNullable[str] = UNSET @@ -72,6 +75,8 @@ def serialize_model(self, handler): optional_fields = [ "object", "metadata", + "model", + "agent_id", "output_file", "error_file", "started_at", @@ -79,6 +84,8 @@ def serialize_model(self, handler): ] nullable_fields = [ "metadata", + "model", + "agent_id", "output_file", "error_file", "started_at", diff --git a/src/mistralai/models/classifierftmodelout.py b/src/mistralai/models/classifierftmodelout.py index 4143d0e8..56ffe96d 100644 --- a/src/mistralai/models/classifierftmodelout.py +++ b/src/mistralai/models/classifierftmodelout.py @@ -21,7 +21,9 @@ class ClassifierFTModelOutTypedDict(TypedDict): id: str created: int owned_by: str + workspace_id: str root: str + root_version: str archived: bool capabilities: FTModelCapabilitiesOutTypedDict job: str @@ -41,8 +43,12 @@ class ClassifierFTModelOut(BaseModel): owned_by: str + workspace_id: str + root: str + root_version: str + archived: bool capabilities: FTModelCapabilitiesOut diff --git a/src/mistralai/models/completionargs.py b/src/mistralai/models/completionargs.py index 04e032ac..40aa0314 100644 --- a/src/mistralai/models/completionargs.py +++ b/src/mistralai/models/completionargs.py @@ -17,7 +17,7 @@ class CompletionArgsTypedDict(TypedDict): stop: NotRequired[Nullable[CompletionArgsStopTypedDict]] presence_penalty: NotRequired[Nullable[float]] frequency_penalty: NotRequired[Nullable[float]] - temperature: NotRequired[float] + temperature: NotRequired[Nullable[float]] top_p: NotRequired[Nullable[float]] max_tokens: NotRequired[Nullable[int]] random_seed: NotRequired[Nullable[int]] @@ -35,7 +35,7 @@ class CompletionArgs(BaseModel): frequency_penalty: OptionalNullable[float] = UNSET - temperature: Optional[float] = 0.3 + temperature: OptionalNullable[float] = UNSET top_p: OptionalNullable[float] = UNSET @@ -67,6 +67,7 @@ def serialize_model(self, handler): "stop", "presence_penalty", "frequency_penalty", + "temperature", "top_p", "max_tokens", "random_seed", diff --git a/src/mistralai/models/completionftmodelout.py b/src/mistralai/models/completionftmodelout.py index ad04d73e..ab71168b 100644 --- a/src/mistralai/models/completionftmodelout.py +++ b/src/mistralai/models/completionftmodelout.py @@ -20,7 +20,9 @@ class CompletionFTModelOutTypedDict(TypedDict): id: str created: int owned_by: str + workspace_id: str root: str + root_version: str archived: bool capabilities: FTModelCapabilitiesOutTypedDict job: str @@ -39,8 +41,12 @@ class CompletionFTModelOut(BaseModel): owned_by: str + workspace_id: str + root: str + root_version: str + archived: bool capabilities: FTModelCapabilitiesOut diff --git a/src/mistralai/models/contentchunk.py b/src/mistralai/models/contentchunk.py index 4cb8ab6d..47170eef 100644 --- a/src/mistralai/models/contentchunk.py +++ b/src/mistralai/models/contentchunk.py @@ -1,11 +1,13 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from __future__ import annotations +from .audiochunk import AudioChunk, AudioChunkTypedDict from .documenturlchunk import DocumentURLChunk, DocumentURLChunkTypedDict from .filechunk import FileChunk, FileChunkTypedDict from .imageurlchunk import ImageURLChunk, ImageURLChunkTypedDict from .referencechunk import ReferenceChunk, ReferenceChunkTypedDict from .textchunk import TextChunk, TextChunkTypedDict +from .thinkchunk import ThinkChunk, ThinkChunkTypedDict from mistralai.utils import get_discriminator from pydantic import Discriminator, Tag from typing import Union @@ -19,7 +21,9 @@ ImageURLChunkTypedDict, ReferenceChunkTypedDict, FileChunkTypedDict, + AudioChunkTypedDict, DocumentURLChunkTypedDict, + ThinkChunkTypedDict, ], ) @@ -31,6 +35,8 @@ Annotated[TextChunk, Tag("text")], Annotated[ReferenceChunk, Tag("reference")], Annotated[FileChunk, Tag("file")], + Annotated[ThinkChunk, Tag("thinking")], + Annotated[AudioChunk, Tag("input_audio")], ], Discriminator(lambda m: get_discriminator(m, "type", "type")), ] diff --git a/src/mistralai/models/fileschema.py b/src/mistralai/models/fileschema.py index d687f222..7c7b60c6 100644 --- a/src/mistralai/models/fileschema.py +++ b/src/mistralai/models/fileschema.py @@ -27,6 +27,8 @@ class FileSchemaTypedDict(TypedDict): sample_type: SampleType source: Source num_lines: NotRequired[Nullable[int]] + mimetype: NotRequired[Nullable[str]] + signature: NotRequired[Nullable[str]] class FileSchema(BaseModel): @@ -53,10 +55,14 @@ class FileSchema(BaseModel): num_lines: OptionalNullable[int] = UNSET + mimetype: OptionalNullable[str] = UNSET + + signature: OptionalNullable[str] = UNSET + @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = ["num_lines"] - nullable_fields = ["num_lines"] + optional_fields = ["num_lines", "mimetype", "signature"] + nullable_fields = ["num_lines", "mimetype", "signature"] null_default_fields = [] serialized = handler(self) diff --git a/src/mistralai/models/jobs_api_routes_batch_get_batch_jobsop.py b/src/mistralai/models/jobs_api_routes_batch_get_batch_jobsop.py index 39261226..c48246d5 100644 --- a/src/mistralai/models/jobs_api_routes_batch_get_batch_jobsop.py +++ b/src/mistralai/models/jobs_api_routes_batch_get_batch_jobsop.py @@ -14,6 +14,7 @@ class JobsAPIRoutesBatchGetBatchJobsRequestTypedDict(TypedDict): page: NotRequired[int] page_size: NotRequired[int] model: NotRequired[Nullable[str]] + agent_id: NotRequired[Nullable[str]] metadata: NotRequired[Nullable[Dict[str, Any]]] created_after: NotRequired[Nullable[datetime]] created_by_me: NotRequired[bool] @@ -36,6 +37,11 @@ class JobsAPIRoutesBatchGetBatchJobsRequest(BaseModel): FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), ] = UNSET + agent_id: Annotated[ + OptionalNullable[str], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = UNSET + metadata: Annotated[ OptionalNullable[Dict[str, Any]], FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), @@ -62,12 +68,13 @@ def serialize_model(self, handler): "page", "page_size", "model", + "agent_id", "metadata", "created_after", "created_by_me", "status", ] - nullable_fields = ["model", "metadata", "created_after", "status"] + nullable_fields = ["model", "agent_id", "metadata", "created_after", "status"] null_default_fields = [] serialized = handler(self) diff --git a/src/mistralai/models/retrievefileout.py b/src/mistralai/models/retrievefileout.py index e5f91449..7d734b0f 100644 --- a/src/mistralai/models/retrievefileout.py +++ b/src/mistralai/models/retrievefileout.py @@ -28,6 +28,8 @@ class RetrieveFileOutTypedDict(TypedDict): source: Source deleted: bool num_lines: NotRequired[Nullable[int]] + mimetype: NotRequired[Nullable[str]] + signature: NotRequired[Nullable[str]] class RetrieveFileOut(BaseModel): @@ -56,10 +58,14 @@ class RetrieveFileOut(BaseModel): num_lines: OptionalNullable[int] = UNSET + mimetype: OptionalNullable[str] = UNSET + + signature: OptionalNullable[str] = UNSET + @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = ["num_lines"] - nullable_fields = ["num_lines"] + optional_fields = ["num_lines", "mimetype", "signature"] + nullable_fields = ["num_lines", "mimetype", "signature"] null_default_fields = [] serialized = handler(self) diff --git a/src/mistralai/models/thinkchunk.py b/src/mistralai/models/thinkchunk.py new file mode 100644 index 00000000..24b466f9 --- /dev/null +++ b/src/mistralai/models/thinkchunk.py @@ -0,0 +1,35 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .referencechunk import ReferenceChunk, ReferenceChunkTypedDict +from .textchunk import TextChunk, TextChunkTypedDict +from mistralai.types import BaseModel +from typing import List, Literal, Optional, Union +from typing_extensions import NotRequired, TypeAliasType, TypedDict + + +ThinkingTypedDict = TypeAliasType( + "ThinkingTypedDict", Union[ReferenceChunkTypedDict, TextChunkTypedDict] +) + + +Thinking = TypeAliasType("Thinking", Union[ReferenceChunk, TextChunk]) + + +ThinkChunkType = Literal["thinking"] + + +class ThinkChunkTypedDict(TypedDict): + thinking: List[ThinkingTypedDict] + closed: NotRequired[bool] + r"""Whether the thinking chunk is closed or not. Currently only used for prefixing.""" + type: NotRequired[ThinkChunkType] + + +class ThinkChunk(BaseModel): + thinking: List[Thinking] + + closed: Optional[bool] = None + r"""Whether the thinking chunk is closed or not. Currently only used for prefixing.""" + + type: Optional[ThinkChunkType] = "thinking" diff --git a/src/mistralai/models/timestampgranularity.py b/src/mistralai/models/timestampgranularity.py new file mode 100644 index 00000000..dd1b6446 --- /dev/null +++ b/src/mistralai/models/timestampgranularity.py @@ -0,0 +1,7 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from typing import Literal + + +TimestampGranularity = Literal["segment"] diff --git a/src/mistralai/models/toolcall.py b/src/mistralai/models/toolcall.py index 92dbb4a9..7d3a3c6b 100644 --- a/src/mistralai/models/toolcall.py +++ b/src/mistralai/models/toolcall.py @@ -3,18 +3,28 @@ from __future__ import annotations from .functioncall import FunctionCall, FunctionCallTypedDict from .tooltypes import ToolTypes -from mistralai.types import BaseModel +from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL from mistralai.utils import validate_open_enum +from pydantic import model_serializer from pydantic.functional_validators import PlainValidator from typing import Optional from typing_extensions import Annotated, NotRequired, TypedDict +class MetadataTypedDict(TypedDict): + pass + + +class Metadata(BaseModel): + pass + + class ToolCallTypedDict(TypedDict): function: FunctionCallTypedDict id: NotRequired[str] type: NotRequired[ToolTypes] index: NotRequired[int] + metadata: NotRequired[Nullable[MetadataTypedDict]] class ToolCall(BaseModel): @@ -27,3 +37,35 @@ class ToolCall(BaseModel): ) index: Optional[int] = 0 + + metadata: OptionalNullable[Metadata] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["id", "type", "index", "metadata"] + nullable_fields = ["metadata"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/models/transcriptionresponse.py b/src/mistralai/models/transcriptionresponse.py new file mode 100644 index 00000000..54a98a5b --- /dev/null +++ b/src/mistralai/models/transcriptionresponse.py @@ -0,0 +1,79 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .transcriptionsegmentchunk import ( + TranscriptionSegmentChunk, + TranscriptionSegmentChunkTypedDict, +) +from .usageinfo import UsageInfo, UsageInfoTypedDict +from mistralai.types import BaseModel, Nullable, UNSET_SENTINEL +import pydantic +from pydantic import ConfigDict, model_serializer +from typing import Any, Dict, List, Optional +from typing_extensions import NotRequired, TypedDict + + +class TranscriptionResponseTypedDict(TypedDict): + model: str + text: str + usage: UsageInfoTypedDict + language: Nullable[str] + segments: NotRequired[List[TranscriptionSegmentChunkTypedDict]] + + +class TranscriptionResponse(BaseModel): + model_config = ConfigDict( + populate_by_name=True, arbitrary_types_allowed=True, extra="allow" + ) + __pydantic_extra__: Dict[str, Any] = pydantic.Field(init=False) + + model: str + + text: str + + usage: UsageInfo + + language: Nullable[str] + + segments: Optional[List[TranscriptionSegmentChunk]] = None + + @property + def additional_properties(self): + return self.__pydantic_extra__ + + @additional_properties.setter + def additional_properties(self, value): + self.__pydantic_extra__ = value # pyright: ignore[reportIncompatibleVariableOverride] + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["segments"] + nullable_fields = ["language"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + for k, v in serialized.items(): + m[k] = v + + return m diff --git a/src/mistralai/models/transcriptionsegmentchunk.py b/src/mistralai/models/transcriptionsegmentchunk.py new file mode 100644 index 00000000..53f1b397 --- /dev/null +++ b/src/mistralai/models/transcriptionsegmentchunk.py @@ -0,0 +1,41 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.types import BaseModel +import pydantic +from pydantic import ConfigDict +from typing import Any, Dict, Literal, Optional +from typing_extensions import NotRequired, TypedDict + + +Type = Literal["transcription_segment"] + + +class TranscriptionSegmentChunkTypedDict(TypedDict): + text: str + start: float + end: float + type: NotRequired[Type] + + +class TranscriptionSegmentChunk(BaseModel): + model_config = ConfigDict( + populate_by_name=True, arbitrary_types_allowed=True, extra="allow" + ) + __pydantic_extra__: Dict[str, Any] = pydantic.Field(init=False) + + text: str + + start: float + + end: float + + type: Optional[Type] = "transcription_segment" + + @property + def additional_properties(self): + return self.__pydantic_extra__ + + @additional_properties.setter + def additional_properties(self, value): + self.__pydantic_extra__ = value # pyright: ignore[reportIncompatibleVariableOverride] diff --git a/src/mistralai/models/transcriptionstreamdone.py b/src/mistralai/models/transcriptionstreamdone.py new file mode 100644 index 00000000..ffd0e080 --- /dev/null +++ b/src/mistralai/models/transcriptionstreamdone.py @@ -0,0 +1,85 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .transcriptionsegmentchunk import ( + TranscriptionSegmentChunk, + TranscriptionSegmentChunkTypedDict, +) +from .usageinfo import UsageInfo, UsageInfoTypedDict +from mistralai.types import BaseModel, Nullable, UNSET_SENTINEL +import pydantic +from pydantic import ConfigDict, model_serializer +from typing import Any, Dict, List, Literal, Optional +from typing_extensions import NotRequired, TypedDict + + +TranscriptionStreamDoneType = Literal["transcription.done"] + + +class TranscriptionStreamDoneTypedDict(TypedDict): + model: str + text: str + usage: UsageInfoTypedDict + language: Nullable[str] + segments: NotRequired[List[TranscriptionSegmentChunkTypedDict]] + type: NotRequired[TranscriptionStreamDoneType] + + +class TranscriptionStreamDone(BaseModel): + model_config = ConfigDict( + populate_by_name=True, arbitrary_types_allowed=True, extra="allow" + ) + __pydantic_extra__: Dict[str, Any] = pydantic.Field(init=False) + + model: str + + text: str + + usage: UsageInfo + + language: Nullable[str] + + segments: Optional[List[TranscriptionSegmentChunk]] = None + + type: Optional[TranscriptionStreamDoneType] = "transcription.done" + + @property + def additional_properties(self): + return self.__pydantic_extra__ + + @additional_properties.setter + def additional_properties(self, value): + self.__pydantic_extra__ = value # pyright: ignore[reportIncompatibleVariableOverride] + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["segments", "type"] + nullable_fields = ["language"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + for k, v in serialized.items(): + m[k] = v + + return m diff --git a/src/mistralai/models/transcriptionstreamevents.py b/src/mistralai/models/transcriptionstreamevents.py new file mode 100644 index 00000000..8207c03f --- /dev/null +++ b/src/mistralai/models/transcriptionstreamevents.py @@ -0,0 +1,58 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .transcriptionstreamdone import ( + TranscriptionStreamDone, + TranscriptionStreamDoneTypedDict, +) +from .transcriptionstreameventtypes import TranscriptionStreamEventTypes +from .transcriptionstreamlanguage import ( + TranscriptionStreamLanguage, + TranscriptionStreamLanguageTypedDict, +) +from .transcriptionstreamsegmentdelta import ( + TranscriptionStreamSegmentDelta, + TranscriptionStreamSegmentDeltaTypedDict, +) +from .transcriptionstreamtextdelta import ( + TranscriptionStreamTextDelta, + TranscriptionStreamTextDeltaTypedDict, +) +from mistralai.types import BaseModel +from mistralai.utils import get_discriminator +from pydantic import Discriminator, Tag +from typing import Union +from typing_extensions import Annotated, TypeAliasType, TypedDict + + +TranscriptionStreamEventsDataTypedDict = TypeAliasType( + "TranscriptionStreamEventsDataTypedDict", + Union[ + TranscriptionStreamTextDeltaTypedDict, + TranscriptionStreamLanguageTypedDict, + TranscriptionStreamSegmentDeltaTypedDict, + TranscriptionStreamDoneTypedDict, + ], +) + + +TranscriptionStreamEventsData = Annotated[ + Union[ + Annotated[TranscriptionStreamDone, Tag("transcription.done")], + Annotated[TranscriptionStreamLanguage, Tag("transcription.language")], + Annotated[TranscriptionStreamSegmentDelta, Tag("transcription.segment")], + Annotated[TranscriptionStreamTextDelta, Tag("transcription.text.delta")], + ], + Discriminator(lambda m: get_discriminator(m, "type", "type")), +] + + +class TranscriptionStreamEventsTypedDict(TypedDict): + event: TranscriptionStreamEventTypes + data: TranscriptionStreamEventsDataTypedDict + + +class TranscriptionStreamEvents(BaseModel): + event: TranscriptionStreamEventTypes + + data: TranscriptionStreamEventsData diff --git a/src/mistralai/models/transcriptionstreameventtypes.py b/src/mistralai/models/transcriptionstreameventtypes.py new file mode 100644 index 00000000..4a910f0a --- /dev/null +++ b/src/mistralai/models/transcriptionstreameventtypes.py @@ -0,0 +1,12 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from typing import Literal + + +TranscriptionStreamEventTypes = Literal[ + "transcription.language", + "transcription.segment", + "transcription.text.delta", + "transcription.done", +] diff --git a/src/mistralai/models/transcriptionstreamlanguage.py b/src/mistralai/models/transcriptionstreamlanguage.py new file mode 100644 index 00000000..8fc2aa6e --- /dev/null +++ b/src/mistralai/models/transcriptionstreamlanguage.py @@ -0,0 +1,35 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.types import BaseModel +import pydantic +from pydantic import ConfigDict +from typing import Any, Dict, Literal, Optional +from typing_extensions import NotRequired, TypedDict + + +TranscriptionStreamLanguageType = Literal["transcription.language"] + + +class TranscriptionStreamLanguageTypedDict(TypedDict): + audio_language: str + type: NotRequired[TranscriptionStreamLanguageType] + + +class TranscriptionStreamLanguage(BaseModel): + model_config = ConfigDict( + populate_by_name=True, arbitrary_types_allowed=True, extra="allow" + ) + __pydantic_extra__: Dict[str, Any] = pydantic.Field(init=False) + + audio_language: str + + type: Optional[TranscriptionStreamLanguageType] = "transcription.language" + + @property + def additional_properties(self): + return self.__pydantic_extra__ + + @additional_properties.setter + def additional_properties(self, value): + self.__pydantic_extra__ = value # pyright: ignore[reportIncompatibleVariableOverride] diff --git a/src/mistralai/models/transcriptionstreamsegmentdelta.py b/src/mistralai/models/transcriptionstreamsegmentdelta.py new file mode 100644 index 00000000..61b396b4 --- /dev/null +++ b/src/mistralai/models/transcriptionstreamsegmentdelta.py @@ -0,0 +1,41 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.types import BaseModel +import pydantic +from pydantic import ConfigDict +from typing import Any, Dict, Literal, Optional +from typing_extensions import NotRequired, TypedDict + + +TranscriptionStreamSegmentDeltaType = Literal["transcription.segment"] + + +class TranscriptionStreamSegmentDeltaTypedDict(TypedDict): + text: str + start: float + end: float + type: NotRequired[TranscriptionStreamSegmentDeltaType] + + +class TranscriptionStreamSegmentDelta(BaseModel): + model_config = ConfigDict( + populate_by_name=True, arbitrary_types_allowed=True, extra="allow" + ) + __pydantic_extra__: Dict[str, Any] = pydantic.Field(init=False) + + text: str + + start: float + + end: float + + type: Optional[TranscriptionStreamSegmentDeltaType] = "transcription.segment" + + @property + def additional_properties(self): + return self.__pydantic_extra__ + + @additional_properties.setter + def additional_properties(self, value): + self.__pydantic_extra__ = value # pyright: ignore[reportIncompatibleVariableOverride] diff --git a/src/mistralai/models/transcriptionstreamtextdelta.py b/src/mistralai/models/transcriptionstreamtextdelta.py new file mode 100644 index 00000000..8f0b0e59 --- /dev/null +++ b/src/mistralai/models/transcriptionstreamtextdelta.py @@ -0,0 +1,35 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.types import BaseModel +import pydantic +from pydantic import ConfigDict +from typing import Any, Dict, Literal, Optional +from typing_extensions import NotRequired, TypedDict + + +TranscriptionStreamTextDeltaType = Literal["transcription.text.delta"] + + +class TranscriptionStreamTextDeltaTypedDict(TypedDict): + text: str + type: NotRequired[TranscriptionStreamTextDeltaType] + + +class TranscriptionStreamTextDelta(BaseModel): + model_config = ConfigDict( + populate_by_name=True, arbitrary_types_allowed=True, extra="allow" + ) + __pydantic_extra__: Dict[str, Any] = pydantic.Field(init=False) + + text: str + + type: Optional[TranscriptionStreamTextDeltaType] = "transcription.text.delta" + + @property + def additional_properties(self): + return self.__pydantic_extra__ + + @additional_properties.setter + def additional_properties(self, value): + self.__pydantic_extra__ = value # pyright: ignore[reportIncompatibleVariableOverride] diff --git a/src/mistralai/models/uploadfileout.py b/src/mistralai/models/uploadfileout.py index 3a8b7337..8f9f1067 100644 --- a/src/mistralai/models/uploadfileout.py +++ b/src/mistralai/models/uploadfileout.py @@ -27,6 +27,8 @@ class UploadFileOutTypedDict(TypedDict): sample_type: SampleType source: Source num_lines: NotRequired[Nullable[int]] + mimetype: NotRequired[Nullable[str]] + signature: NotRequired[Nullable[str]] class UploadFileOut(BaseModel): @@ -53,10 +55,14 @@ class UploadFileOut(BaseModel): num_lines: OptionalNullable[int] = UNSET + mimetype: OptionalNullable[str] = UNSET + + signature: OptionalNullable[str] = UNSET + @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = ["num_lines"] - nullable_fields = ["num_lines"] + optional_fields = ["num_lines", "mimetype", "signature"] + nullable_fields = ["num_lines", "mimetype", "signature"] null_default_fields = [] serialized = handler(self) diff --git a/src/mistralai/models/usageinfo.py b/src/mistralai/models/usageinfo.py index f7a6e99e..cedad5c1 100644 --- a/src/mistralai/models/usageinfo.py +++ b/src/mistralai/models/usageinfo.py @@ -1,19 +1,76 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from __future__ import annotations -from mistralai.types import BaseModel -from typing_extensions import TypedDict +from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +import pydantic +from pydantic import ConfigDict, model_serializer +from typing import Any, Dict, Optional +from typing_extensions import NotRequired, TypedDict class UsageInfoTypedDict(TypedDict): - prompt_tokens: int - completion_tokens: int - total_tokens: int + prompt_tokens: NotRequired[int] + completion_tokens: NotRequired[int] + total_tokens: NotRequired[int] + prompt_audio_seconds: NotRequired[Nullable[int]] class UsageInfo(BaseModel): - prompt_tokens: int + model_config = ConfigDict( + populate_by_name=True, arbitrary_types_allowed=True, extra="allow" + ) + __pydantic_extra__: Dict[str, Any] = pydantic.Field(init=False) - completion_tokens: int + prompt_tokens: Optional[int] = 0 - total_tokens: int + completion_tokens: Optional[int] = 0 + + total_tokens: Optional[int] = 0 + + prompt_audio_seconds: OptionalNullable[int] = UNSET + + @property + def additional_properties(self): + return self.__pydantic_extra__ + + @additional_properties.setter + def additional_properties(self, value): + self.__pydantic_extra__ = value # pyright: ignore[reportIncompatibleVariableOverride] + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = [ + "prompt_tokens", + "completion_tokens", + "total_tokens", + "prompt_audio_seconds", + ] + nullable_fields = ["prompt_audio_seconds"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + for k, v in serialized.items(): + m[k] = v + + return m diff --git a/src/mistralai/sdk.py b/src/mistralai/sdk.py index 352c16a1..23d31cc7 100644 --- a/src/mistralai/sdk.py +++ b/src/mistralai/sdk.py @@ -15,6 +15,7 @@ if TYPE_CHECKING: from mistralai.agents import Agents + from mistralai.audio import Audio from mistralai.batch import Batch from mistralai.beta import Beta from mistralai.chat import Chat @@ -49,6 +50,7 @@ class Mistral(BaseSDK): r"""Classifiers API.""" ocr: "Ocr" r"""OCR API""" + audio: "Audio" _sub_sdk_map = { "models": ("mistralai.models_", "Models"), "beta": ("mistralai.beta", "Beta"), @@ -61,6 +63,7 @@ class Mistral(BaseSDK): "embeddings": ("mistralai.embeddings", "Embeddings"), "classifiers": ("mistralai.classifiers", "Classifiers"), "ocr": ("mistralai.ocr", "Ocr"), + "audio": ("mistralai.audio", "Audio"), } def __init__( diff --git a/src/mistralai/transcriptions.py b/src/mistralai/transcriptions.py new file mode 100644 index 00000000..24975cb2 --- /dev/null +++ b/src/mistralai/transcriptions.py @@ -0,0 +1,480 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from .basesdk import BaseSDK +from mistralai import models, utils +from mistralai._hooks import HookContext +from mistralai.types import OptionalNullable, UNSET +from mistralai.utils import eventstreaming, get_security_from_env +from typing import List, Mapping, Optional, Union + + +class Transcriptions(BaseSDK): + r"""API for audio transcription.""" + + def complete( + self, + *, + model: str, + file: Optional[Union[models.File, models.FileTypedDict]] = None, + file_url: OptionalNullable[str] = UNSET, + file_id: OptionalNullable[str] = UNSET, + language: OptionalNullable[str] = UNSET, + temperature: OptionalNullable[float] = UNSET, + timestamp_granularities: Optional[List[models.TimestampGranularity]] = None, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.TranscriptionResponse: + r"""Create Transcription + + :param model: + :param file: + :param file_url: Url of a file to be transcribed + :param file_id: ID of a file uploaded to /v1/files + :param language: Language of the audio, e.g. 'en'. Providing the language can boost accuracy. + :param temperature: + :param timestamp_granularities: Granularities of timestamps to include in the response. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AudioTranscriptionRequest( + model=model, + file=utils.get_pydantic_model(file, Optional[models.File]), + file_url=file_url, + file_id=file_id, + language=language, + temperature=temperature, + timestamp_granularities=timestamp_granularities, + ) + + req = self._build_request( + method="POST", + path="/v1/audio/transcriptions", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "multipart", models.AudioTranscriptionRequest + ), + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="audio_api_v1_transcriptions_post", + oauth2_scopes=[], + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["4XX", "5XX"], + retry_config=retry_config, + ) + + if utils.match_response(http_res, "200", "application/json"): + return utils.unmarshal_json(http_res.text, models.TranscriptionResponse) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + + content_type = http_res.headers.get("Content-Type") + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res_text, + http_res, + ) + + async def complete_async( + self, + *, + model: str, + file: Optional[Union[models.File, models.FileTypedDict]] = None, + file_url: OptionalNullable[str] = UNSET, + file_id: OptionalNullable[str] = UNSET, + language: OptionalNullable[str] = UNSET, + temperature: OptionalNullable[float] = UNSET, + timestamp_granularities: Optional[List[models.TimestampGranularity]] = None, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.TranscriptionResponse: + r"""Create Transcription + + :param model: + :param file: + :param file_url: Url of a file to be transcribed + :param file_id: ID of a file uploaded to /v1/files + :param language: Language of the audio, e.g. 'en'. Providing the language can boost accuracy. + :param temperature: + :param timestamp_granularities: Granularities of timestamps to include in the response. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AudioTranscriptionRequest( + model=model, + file=utils.get_pydantic_model(file, Optional[models.File]), + file_url=file_url, + file_id=file_id, + language=language, + temperature=temperature, + timestamp_granularities=timestamp_granularities, + ) + + req = self._build_request_async( + method="POST", + path="/v1/audio/transcriptions", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "multipart", models.AudioTranscriptionRequest + ), + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="audio_api_v1_transcriptions_post", + oauth2_scopes=[], + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["4XX", "5XX"], + retry_config=retry_config, + ) + + if utils.match_response(http_res, "200", "application/json"): + return utils.unmarshal_json(http_res.text, models.TranscriptionResponse) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + + content_type = http_res.headers.get("Content-Type") + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res_text, + http_res, + ) + + def stream( + self, + *, + model: str, + file: Optional[Union[models.File, models.FileTypedDict]] = None, + file_url: OptionalNullable[str] = UNSET, + file_id: OptionalNullable[str] = UNSET, + language: OptionalNullable[str] = UNSET, + temperature: OptionalNullable[float] = UNSET, + timestamp_granularities: Optional[List[models.TimestampGranularity]] = None, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> eventstreaming.EventStream[models.TranscriptionStreamEvents]: + r"""Create streaming transcription (SSE) + + :param model: + :param file: + :param file_url: Url of a file to be transcribed + :param file_id: ID of a file uploaded to /v1/files + :param language: Language of the audio, e.g. 'en'. Providing the language can boost accuracy. + :param temperature: + :param timestamp_granularities: Granularities of timestamps to include in the response. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AudioTranscriptionRequestStream( + model=model, + file=utils.get_pydantic_model(file, Optional[models.File]), + file_url=file_url, + file_id=file_id, + language=language, + temperature=temperature, + timestamp_granularities=timestamp_granularities, + ) + + req = self._build_request( + method="POST", + path="/v1/audio/transcriptions#stream", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="text/event-stream", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request, + False, + False, + "multipart", + models.AudioTranscriptionRequestStream, + ), + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="audio_api_v1_transcriptions_post_stream", + oauth2_scopes=[], + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["4XX", "5XX"], + stream=True, + retry_config=retry_config, + ) + + if utils.match_response(http_res, "200", "text/event-stream"): + return eventstreaming.EventStream( + http_res, + lambda raw: utils.unmarshal_json(raw, models.TranscriptionStreamEvents), + ) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + + content_type = http_res.headers.get("Content-Type") + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res_text, + http_res, + ) + + async def stream_async( + self, + *, + model: str, + file: Optional[Union[models.File, models.FileTypedDict]] = None, + file_url: OptionalNullable[str] = UNSET, + file_id: OptionalNullable[str] = UNSET, + language: OptionalNullable[str] = UNSET, + temperature: OptionalNullable[float] = UNSET, + timestamp_granularities: Optional[List[models.TimestampGranularity]] = None, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> eventstreaming.EventStreamAsync[models.TranscriptionStreamEvents]: + r"""Create streaming transcription (SSE) + + :param model: + :param file: + :param file_url: Url of a file to be transcribed + :param file_id: ID of a file uploaded to /v1/files + :param language: Language of the audio, e.g. 'en'. Providing the language can boost accuracy. + :param temperature: + :param timestamp_granularities: Granularities of timestamps to include in the response. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AudioTranscriptionRequestStream( + model=model, + file=utils.get_pydantic_model(file, Optional[models.File]), + file_url=file_url, + file_id=file_id, + language=language, + temperature=temperature, + timestamp_granularities=timestamp_granularities, + ) + + req = self._build_request_async( + method="POST", + path="/v1/audio/transcriptions#stream", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="text/event-stream", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request, + False, + False, + "multipart", + models.AudioTranscriptionRequestStream, + ), + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="audio_api_v1_transcriptions_post_stream", + oauth2_scopes=[], + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["4XX", "5XX"], + stream=True, + retry_config=retry_config, + ) + + if utils.match_response(http_res, "200", "text/event-stream"): + return eventstreaming.EventStreamAsync( + http_res, + lambda raw: utils.unmarshal_json(raw, models.TranscriptionStreamEvents), + ) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + + content_type = http_res.headers.get("Content-Type") + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res_text, + http_res, + ) From 864ceca714f666913f4dec6b4473d672993739e5 Mon Sep 17 00:00:00 2001 From: Alexandre Menasria <47357713+amenasria@users.noreply.github.com> Date: Tue, 12 Aug 2025 15:48:39 +0200 Subject: [PATCH 147/223] Speakeasy update workflow (#254) * Add the update_speakeasy.py script * Fix linter errors * Make it an invoke task * Rename scripts/update_speakeasy.py -> utils/speakeasy.py * Add GHA Workflow * Add missing permissions * Revert python >= 3.10 --- .genignore | 1 + .github/workflows/update_speakeasy.yaml | 128 +++++++++++++++ poetry.lock | 197 ++++++++++++++++++------ pyproject.toml | 9 +- src/mistralai/conversations.py | 4 +- src/mistralai/version.py | 7 - tasks.py | 48 ++++++ utils/speakeasy.py | 91 +++++++++++ 8 files changed, 422 insertions(+), 63 deletions(-) create mode 100644 .github/workflows/update_speakeasy.yaml delete mode 100644 src/mistralai/version.py create mode 100644 tasks.py create mode 100755 utils/speakeasy.py diff --git a/.genignore b/.genignore index be3ba87c..3ef32897 100644 --- a/.genignore +++ b/.genignore @@ -1,4 +1,5 @@ pyproject.toml examples/* +/utils/* src/mistral/extra/* pylintrc \ No newline at end of file diff --git a/.github/workflows/update_speakeasy.yaml b/.github/workflows/update_speakeasy.yaml new file mode 100644 index 00000000..7fe5d3e2 --- /dev/null +++ b/.github/workflows/update_speakeasy.yaml @@ -0,0 +1,128 @@ +name: Update Speakeasy SDKs +permissions: + checks: write + contents: write + pull-requests: write + statuses: write +on: + workflow_dispatch: + inputs: + version: + description: 'Speakeasy version to update to (e.g., 1.580.2)' + required: true + type: string + targets: + description: 'Targets to update. If not provided, all targets will be updated.' + type: choice + options: + - mistralai-sdk + - mistralai-azure-sdk + - mistralai-gcp-sdk + +jobs: + update-sdks: + runs-on: ubuntu-latest + steps: + - name: Checkout repository + uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0 + + - name: Set up Python + uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0 + with: + python-version: '3.11' + + - name: Install Poetry + uses: snok/install-poetry@76e04a911780d5b312d89783f7b1cd627778900a # v1.4.1 + with: + version: latest + virtualenvs-create: true + virtualenvs-in-project: true + + - name: Install dependencies + run: | + poetry install --with dev + + - name: Configure Git + run: | + git config --local user.email "action@github.com" + git config --local user.name "GitHub Action" + + - name: Create branch + run: | + git checkout -b update-speakeasy-to-${{ github.event.inputs.version }}-${{ github.run_id }} + + - name: Update Speakeasy SDKs + run: | + # Split targets and build command with multiple --targets flags + TARGETS_ARGS="" + for target in ${{ github.event.inputs.targets }}; do + TARGETS_ARGS="$TARGETS_ARGS --targets $target" + done + + poetry run inv update-speakeasy \ + --version "${{ github.event.inputs.version }}" \ + $TARGETS_ARGS + + - name: Check for changes + id: check-changes + run: | + if [ -n "$(git status --porcelain)" ]; then + echo "has_changes=true" >> $GITHUB_OUTPUT + echo "Files changed:" + git status --porcelain + else + echo "has_changes=false" >> $GITHUB_OUTPUT + echo "No changes detected" + fi + + - name: Commit and push changes + if: steps.check-changes.outputs.has_changes == 'true' + run: | + git add . + git commit -m "Update Speakeasy SDKs to version ${{ github.event.inputs.version }} + + Targets updated: ${{ github.event.inputs.targets }} + + This PR was automatically generated by the Update Speakeasy workflow." + git push origin ${{ github.event.inputs.branch_name }} + + - name: Create Pull Request + if: steps.check-changes.outputs.has_changes == 'true' + uses: peter-evans/create-pull-request@271a8d0340265f705b14b6d32b9829c1cb33d45e # v7.0.8 + with: + token: ${{ secrets.GITHUB_TOKEN }} + base: main + branch: ${{ github.event.inputs.branch_name }} + title: "Update Speakeasy SDKs to version ${{ github.event.inputs.version }}" + body: | + ## Summary + + This PR updates the Speakeasy SDKs to version `${{ github.event.inputs.version }}`. + + ## Changes + + - **Version**: Updated to `${{ github.event.inputs.version }}` + - **Targets**: ${{ github.event.inputs.targets }} + + ## Files Updated + + The following SDK files have been regenerated: + - Generated SDK code files + - Updated dependencies and configurations + + ## How to Review + + 1. Check that the generated files look correct + 2. Verify that the version update is appropriate + 3. Ensure all target SDKs are properly updated + + --- + + *This PR was automatically generated by the [Update Speakeasy workflow](.github/workflows/update_speakeasy.yaml)* + labels: automated + assignees: ${{ github.actor }} + + - name: Comment on workflow run + if: steps.check-changes.outputs.has_changes == 'false' + run: | + echo "No changes were detected. The SDKs are already up to date with version ${{ github.event.inputs.version }}." diff --git a/poetry.lock b/poetry.lock index 9ffdc439..e3a652fa 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1,4 +1,4 @@ -# This file is automatically @generated by Poetry 2.1.1 and should not be changed by hand. +# This file is automatically @generated by Poetry 2.1.3 and should not be changed by hand. [[package]] name = "annotated-types" @@ -6,11 +6,12 @@ version = "0.7.0" description = "Reusable constraint types to use with typing.Annotated" optional = false python-versions = ">=3.8" -groups = ["main"] +groups = ["main", "dev"] files = [ {file = "annotated_types-0.7.0-py3-none-any.whl", hash = "sha256:1f02e8b43a8fbbc3f3e0d4f0f4bfc8131bcb4eebe8849b8e5c773f3a1c582a53"}, {file = "annotated_types-0.7.0.tar.gz", hash = "sha256:aff07c09a53a08bc8cfccb9c85b05f1aa9a2a6f23728d790723543408344ce89"}, ] +markers = {dev = "python_version >= \"3.10\""} [[package]] name = "anyio" @@ -18,11 +19,12 @@ version = "4.5.2" description = "High level compatibility layer for multiple asynchronous event loop implementations" optional = false python-versions = ">=3.8" -groups = ["main"] +groups = ["main", "dev"] files = [ {file = "anyio-4.5.2-py3-none-any.whl", hash = "sha256:c011ee36bc1e8ba40e5a81cb9df91925c218fe9b778554e0b56a21e1b5d4716f"}, {file = "anyio-4.5.2.tar.gz", hash = "sha256:23009af4ed04ce05991845451e11ef02fc7c5ed29179ac9a420e5ad0ac7ddc5b"}, ] +markers = {dev = "python_version >= \"3.10\""} [package.dependencies] exceptiongroup = {version = ">=1.0.2", markers = "python_version < \"3.11\""} @@ -54,10 +56,9 @@ typing-extensions = {version = ">=4.0.0", markers = "python_version < \"3.11\""} name = "authlib" version = "1.6.0" description = "The ultimate Python library in building OAuth and OpenID Connect servers and clients." -optional = true +optional = false python-versions = ">=3.9" -groups = ["main"] -markers = "extra == \"agents\"" +groups = ["main", "dev"] files = [ {file = "authlib-1.6.0-py2.py3-none-any.whl", hash = "sha256:91685589498f79e8655e8a8947431ad6288831d643f11c55c2143ffcc738048d"}, {file = "authlib-1.6.0.tar.gz", hash = "sha256:4367d32031b7af175ad3a323d571dc7257b7099d55978087ceae4a0d88cd3210"}, @@ -85,11 +86,12 @@ version = "2024.8.30" description = "Python package for providing Mozilla's CA Bundle." optional = false python-versions = ">=3.6" -groups = ["main"] +groups = ["main", "dev"] files = [ {file = "certifi-2024.8.30-py3-none-any.whl", hash = "sha256:922820b53db7a7257ffbda3f597266d435245903d80737e34f8a45ff3e3230d8"}, {file = "certifi-2024.8.30.tar.gz", hash = "sha256:bec941d2aa8195e248a60b31ff9f0558284cf01a52591ceda73ea9afffd69fd9"}, ] +markers = {dev = "python_version >= \"3.10\""} [[package]] name = "cffi" @@ -98,6 +100,7 @@ description = "Foreign Function Interface for Python calling C code." optional = false python-versions = ">=3.8" groups = ["main", "dev"] +markers = "platform_python_implementation != \"PyPy\"" files = [ {file = "cffi-1.17.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:df8b1c11f177bc2313ec4b2d46baec87a5f3e71fc8b45dab2ee7cae86d9aba14"}, {file = "cffi-1.17.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:8f2cdc858323644ab277e9bb925ad72ae0e67f69e804f4898c070998d50b1a67"}, @@ -167,7 +170,6 @@ files = [ {file = "cffi-1.17.1-cp39-cp39-win_amd64.whl", hash = "sha256:d016c76bdd850f3c626af19b0542c9677ba156e4ee4fccfdd7848803533ef662"}, {file = "cffi-1.17.1.tar.gz", hash = "sha256:1c39c6016c32bc48dd54561950ebd6836e1670f2ae46128f67cf49e789c52824"}, ] -markers = {main = "extra == \"agents\" and platform_python_implementation != \"PyPy\"", dev = "platform_python_implementation != \"PyPy\""} [package.dependencies] pycparser = "*" @@ -292,10 +294,10 @@ files = [ name = "click" version = "8.2.1" description = "Composable command line interface toolkit" -optional = true +optional = false python-versions = ">=3.10" -groups = ["main"] -markers = "python_version >= \"3.10\" and extra == \"agents\"" +groups = ["main", "dev"] +markers = "python_version >= \"3.10\"" files = [ {file = "click-8.2.1-py3-none-any.whl", hash = "sha256:61a3265b914e850b85317d0b3109c7f8cd35a670f963866005d6ef1d5175a12b"}, {file = "click-8.2.1.tar.gz", hash = "sha256:27c491cc05d968d271d5a1db13e3b5a184636d9d930f148c50b038f0d0646202"}, @@ -315,7 +317,6 @@ files = [ {file = "colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6"}, {file = "colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44"}, ] -markers = {main = "extra == \"agents\"", dev = "sys_platform == \"win32\""} [[package]] name = "cryptography" @@ -324,6 +325,7 @@ description = "cryptography is a package which provides cryptographic recipes an optional = false python-versions = ">=3.7" groups = ["main", "dev"] +markers = "python_version == \"3.9\"" files = [ {file = "cryptography-43.0.3-cp37-abi3-macosx_10_9_universal2.whl", hash = "sha256:bf7a1932ac4176486eab36a19ed4c0492da5d97123f1406cf15e41b05e787d2e"}, {file = "cryptography-43.0.3-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:63efa177ff54aec6e1c0aefaa1a241232dcd37413835a9b674b6e3f0ae2bfd3e"}, @@ -353,7 +355,6 @@ files = [ {file = "cryptography-43.0.3-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:2ce6fae5bdad59577b44e4dfed356944fbf1d925269114c28be377692643b4ff"}, {file = "cryptography-43.0.3.tar.gz", hash = "sha256:315b9001266a492a6ff443b61238f956b214dbec9910a081ba5b6646a055a805"}, ] -markers = {main = "python_version < \"3.11\" and extra == \"agents\"", dev = "python_version < \"3.11\""} [package.dependencies] cffi = {version = ">=1.12", markers = "platform_python_implementation != \"PyPy\""} @@ -375,6 +376,7 @@ description = "cryptography is a package which provides cryptographic recipes an optional = false python-versions = "!=3.9.0,!=3.9.1,>=3.7" groups = ["main", "dev"] +markers = "python_version >= \"3.10\"" files = [ {file = "cryptography-45.0.3-cp311-abi3-macosx_10_9_universal2.whl", hash = "sha256:7573d9eebaeceeb55285205dbbb8753ac1e962af3d9640791d12b36864065e71"}, {file = "cryptography-45.0.3-cp311-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d377dde61c5d67eb4311eace661c3efda46c62113ff56bf05e2d679e02aebb5b"}, @@ -414,7 +416,6 @@ files = [ {file = "cryptography-45.0.3-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:9eda14f049d7f09c2e8fb411dda17dd6b16a3c76a1de5e249188a32aeb92de19"}, {file = "cryptography-45.0.3.tar.gz", hash = "sha256:ec21313dd335c51d7877baf2972569f40a4291b76a0ce51391523ae358d05899"}, ] -markers = {main = "python_version >= \"3.11\" and extra == \"agents\"", dev = "python_version >= \"3.11\""} [package.dependencies] cffi = {version = ">=1.14", markers = "platform_python_implementation != \"PyPy\""} @@ -505,10 +506,9 @@ requests = ["requests (>=2.20.0,<3.0.0.dev0)"] name = "griffe" version = "1.7.3" description = "Signatures for entire Python programs. Extract the structure, the frame, the skeleton of your project, to generate API documentation or find breaking changes in your API." -optional = true +optional = false python-versions = ">=3.9" -groups = ["main"] -markers = "extra == \"agents\"" +groups = ["main", "dev"] files = [ {file = "griffe-1.7.3-py3-none-any.whl", hash = "sha256:c6b3ee30c2f0f17f30bcdef5068d6ab7a2a4f1b8bf1a3e74b56fffd21e1c5f75"}, {file = "griffe-1.7.3.tar.gz", hash = "sha256:52ee893c6a3a968b639ace8015bec9d36594961e156e23315c8e8e51401fa50b"}, @@ -523,11 +523,12 @@ version = "0.14.0" description = "A pure-Python, bring-your-own-I/O implementation of HTTP/1.1" optional = false python-versions = ">=3.7" -groups = ["main"] +groups = ["main", "dev"] files = [ {file = "h11-0.14.0-py3-none-any.whl", hash = "sha256:e3fe4ac4b851c468cc8363d500db52c2ead036020723024a109d37346efaa761"}, {file = "h11-0.14.0.tar.gz", hash = "sha256:8f19fbbe99e72420ff35c00b27a34cb9937e902a8b810e2c88300c6f0a3b699d"}, ] +markers = {dev = "python_version >= \"3.10\""} [[package]] name = "httpcore" @@ -535,11 +536,12 @@ version = "1.0.7" description = "A minimal low-level HTTP client." optional = false python-versions = ">=3.8" -groups = ["main"] +groups = ["main", "dev"] files = [ {file = "httpcore-1.0.7-py3-none-any.whl", hash = "sha256:a3fff8f43dc260d5bd363d9f9cf1830fa3a458b332856f34282de498ed420edd"}, {file = "httpcore-1.0.7.tar.gz", hash = "sha256:8551cb62a169ec7162ac7be8d4817d561f60e08eaa485234898414bb5a8a0b4c"}, ] +markers = {dev = "python_version >= \"3.10\""} [package.dependencies] certifi = "*" @@ -557,11 +559,12 @@ version = "0.28.1" description = "The next generation HTTP client." optional = false python-versions = ">=3.8" -groups = ["main"] +groups = ["main", "dev"] files = [ {file = "httpx-0.28.1-py3-none-any.whl", hash = "sha256:d909fcccc110f8c7faf814ca82a9a4d816bc5a6dbfea25d6591d6985b8ba59ad"}, {file = "httpx-0.28.1.tar.gz", hash = "sha256:75e98c5f16b0f35b567856f597f06ff2270a374470a5c2392242528e3e3e42fc"}, ] +markers = {dev = "python_version >= \"3.10\""} [package.dependencies] anyio = "*" @@ -580,10 +583,10 @@ zstd = ["zstandard (>=0.18.0)"] name = "httpx-sse" version = "0.4.0" description = "Consume Server-Sent Event (SSE) messages with HTTPX." -optional = true +optional = false python-versions = ">=3.8" -groups = ["main"] -markers = "python_version >= \"3.10\" and extra == \"agents\"" +groups = ["main", "dev"] +markers = "python_version >= \"3.10\"" files = [ {file = "httpx-sse-0.4.0.tar.gz", hash = "sha256:1e81a3a3070ce322add1d3529ed42eb5f70817f45ed6ec915ab753f961139721"}, {file = "httpx_sse-0.4.0-py3-none-any.whl", hash = "sha256:f329af6eae57eaa2bdfd962b42524764af68075ea87370a2de920af5341e318f"}, @@ -595,11 +598,12 @@ version = "3.10" description = "Internationalized Domain Names in Applications (IDNA)" optional = false python-versions = ">=3.6" -groups = ["main"] +groups = ["main", "dev"] files = [ {file = "idna-3.10-py3-none-any.whl", hash = "sha256:946d195a0d259cbba61165e88e65941f16e9b36ea6ddb97f00452bae8b1287d3"}, {file = "idna-3.10.tar.gz", hash = "sha256:12f65c9b470abda6dc35cf8e63cc574b1c52b11df2c86030af0ac09b01b13ea9"}, ] +markers = {dev = "python_version >= \"3.10\""} [package.extras] all = ["flake8 (>=7.1.1)", "mypy (>=1.11.2)", "pytest (>=8.3.2)", "ruff (>=0.6.2)"] @@ -616,6 +620,18 @@ files = [ {file = "iniconfig-2.0.0.tar.gz", hash = "sha256:2d91e135bf72d31a410b17c16da610a82cb55f6b0477d1a902134b24a455b8b3"}, ] +[[package]] +name = "invoke" +version = "2.2.0" +description = "Pythonic task execution" +optional = false +python-versions = ">=3.6" +groups = ["main"] +files = [ + {file = "invoke-2.2.0-py3-none-any.whl", hash = "sha256:6ea924cc53d4f78e3d98bc436b08069a03077e6f85ad1ddaa8a116d7dad15820"}, + {file = "invoke-2.2.0.tar.gz", hash = "sha256:ee6cbb101af1a859c7fe84f2a264c059020b0cb7fe3535f9424300ab568f6bd5"}, +] + [[package]] name = "isort" version = "5.13.2" @@ -647,10 +663,10 @@ files = [ name = "mcp" version = "1.9.1" description = "Model Context Protocol SDK" -optional = true +optional = false python-versions = ">=3.10" -groups = ["main"] -markers = "python_version >= \"3.10\" and extra == \"agents\"" +groups = ["main", "dev"] +markers = "python_version >= \"3.10\"" files = [ {file = "mcp-1.9.1-py3-none-any.whl", hash = "sha256:2900ded8ffafc3c8a7bfcfe8bc5204037e988e753ec398f371663e6a06ecd9a9"}, {file = "mcp-1.9.1.tar.gz", hash = "sha256:19879cd6dde3d763297617242888c2f695a95dfa854386a6a68676a646ce75e4"}, @@ -831,11 +847,11 @@ description = "C parser in Python" optional = false python-versions = ">=3.8" groups = ["main", "dev"] +markers = "platform_python_implementation != \"PyPy\"" files = [ {file = "pycparser-2.22-py3-none-any.whl", hash = "sha256:c3702b6d3dd8c7abc1afa565d7e63d53a1d0bd86cdc24edd75470f4de499cfcc"}, {file = "pycparser-2.22.tar.gz", hash = "sha256:491c8be9c040f5390f5bf44a5b07752bd07f56edf992381b05c701439eec10f6"}, ] -markers = {main = "extra == \"agents\" and platform_python_implementation != \"PyPy\"", dev = "platform_python_implementation != \"PyPy\""} [[package]] name = "pydantic" @@ -843,11 +859,12 @@ version = "2.10.6" description = "Data validation using Python type hints" optional = false python-versions = ">=3.8" -groups = ["main"] +groups = ["main", "dev"] files = [ {file = "pydantic-2.10.6-py3-none-any.whl", hash = "sha256:427d664bf0b8a2b34ff5dd0f5a18df00591adcee7198fbd71981054cef37b584"}, {file = "pydantic-2.10.6.tar.gz", hash = "sha256:ca5daa827cce33de7a42be142548b0096bf05a7e7b365aebfa5f8eeec7128236"}, ] +markers = {dev = "python_version >= \"3.10\""} [package.dependencies] annotated-types = ">=0.6.0" @@ -864,7 +881,7 @@ version = "2.27.2" description = "Core functionality for Pydantic validation and serialization" optional = false python-versions = ">=3.8" -groups = ["main"] +groups = ["main", "dev"] files = [ {file = "pydantic_core-2.27.2-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:2d367ca20b2f14095a8f4fa1210f5a7b78b8a20009ecced6b12818f455b1e9fa"}, {file = "pydantic_core-2.27.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:491a2b73db93fab69731eaee494f320faa4e093dbed776be1a829c2eb222c34c"}, @@ -967,6 +984,7 @@ files = [ {file = "pydantic_core-2.27.2-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:7d0c8399fcc1848491f00e0314bd59fb34a9c008761bcb422a057670c3f65e35"}, {file = "pydantic_core-2.27.2.tar.gz", hash = "sha256:eb026e5a4c1fee05726072337ff51d1efb6f59090b7da90d30ea58625b1ffb39"}, ] +markers = {dev = "python_version >= \"3.10\""} [package.dependencies] typing-extensions = ">=4.6.0,<4.7.0 || >4.7.0" @@ -975,10 +993,10 @@ typing-extensions = ">=4.6.0,<4.7.0 || >4.7.0" name = "pydantic-settings" version = "2.9.1" description = "Settings management using Pydantic" -optional = true +optional = false python-versions = ">=3.9" -groups = ["main"] -markers = "python_version >= \"3.10\" and extra == \"agents\"" +groups = ["main", "dev"] +markers = "python_version >= \"3.10\"" files = [ {file = "pydantic_settings-2.9.1-py3-none-any.whl", hash = "sha256:59b4f431b1defb26fe620c71a7d3968a710d719f5f4cdbbdb7926edeb770f6ef"}, {file = "pydantic_settings-2.9.1.tar.gz", hash = "sha256:c509bf79d27563add44e8446233359004ed85066cd096d8b510f715e6ef5d268"}, @@ -1109,10 +1127,10 @@ six = ">=1.5" name = "python-dotenv" version = "1.1.0" description = "Read key-value pairs from a .env file and set them as environment variables" -optional = true +optional = false python-versions = ">=3.9" -groups = ["main"] -markers = "python_version >= \"3.10\" and extra == \"agents\"" +groups = ["main", "dev"] +markers = "python_version >= \"3.10\"" files = [ {file = "python_dotenv-1.1.0-py3-none-any.whl", hash = "sha256:d7c01d9e2293916c18baf562d95698754b0dbbb5e74d457c45d4f6561fb9d55d"}, {file = "python_dotenv-1.1.0.tar.gz", hash = "sha256:41f90bc6f5f177fb41f53e87666db362025010eb28f60a01c9143bfa33a2b2d5"}, @@ -1125,15 +1143,78 @@ cli = ["click (>=5.0)"] name = "python-multipart" version = "0.0.20" description = "A streaming multipart parser for Python" -optional = true +optional = false python-versions = ">=3.8" -groups = ["main"] -markers = "python_version >= \"3.10\" and extra == \"agents\"" +groups = ["main", "dev"] +markers = "python_version >= \"3.10\"" files = [ {file = "python_multipart-0.0.20-py3-none-any.whl", hash = "sha256:8a62d3a8335e06589fe01f2a3e178cdcc632f3fbe0d492ad9ee0ec35aab1f104"}, {file = "python_multipart-0.0.20.tar.gz", hash = "sha256:8dd0cab45b8e23064ae09147625994d090fa46f5b0d1e13af944c331a7fa9d13"}, ] +[[package]] +name = "pyyaml" +version = "6.0.2" +description = "YAML parser and emitter for Python" +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "PyYAML-6.0.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0a9a2848a5b7feac301353437eb7d5957887edbf81d56e903999a75a3d743086"}, + {file = "PyYAML-6.0.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:29717114e51c84ddfba879543fb232a6ed60086602313ca38cce623c1d62cfbf"}, + {file = "PyYAML-6.0.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8824b5a04a04a047e72eea5cec3bc266db09e35de6bdfe34c9436ac5ee27d237"}, + {file = "PyYAML-6.0.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7c36280e6fb8385e520936c3cb3b8042851904eba0e58d277dca80a5cfed590b"}, + {file = "PyYAML-6.0.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ec031d5d2feb36d1d1a24380e4db6d43695f3748343d99434e6f5f9156aaa2ed"}, + {file = "PyYAML-6.0.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:936d68689298c36b53b29f23c6dbb74de12b4ac12ca6cfe0e047bedceea56180"}, + {file = "PyYAML-6.0.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:23502f431948090f597378482b4812b0caae32c22213aecf3b55325e049a6c68"}, + {file = "PyYAML-6.0.2-cp310-cp310-win32.whl", hash = "sha256:2e99c6826ffa974fe6e27cdb5ed0021786b03fc98e5ee3c5bfe1fd5015f42b99"}, + {file = "PyYAML-6.0.2-cp310-cp310-win_amd64.whl", hash = "sha256:a4d3091415f010369ae4ed1fc6b79def9416358877534caf6a0fdd2146c87a3e"}, + {file = "PyYAML-6.0.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:cc1c1159b3d456576af7a3e4d1ba7e6924cb39de8f67111c735f6fc832082774"}, + {file = "PyYAML-6.0.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:1e2120ef853f59c7419231f3bf4e7021f1b936f6ebd222406c3b60212205d2ee"}, + {file = "PyYAML-6.0.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5d225db5a45f21e78dd9358e58a98702a0302f2659a3c6cd320564b75b86f47c"}, + {file = "PyYAML-6.0.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5ac9328ec4831237bec75defaf839f7d4564be1e6b25ac710bd1a96321cc8317"}, + {file = "PyYAML-6.0.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3ad2a3decf9aaba3d29c8f537ac4b243e36bef957511b4766cb0057d32b0be85"}, + {file = "PyYAML-6.0.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:ff3824dc5261f50c9b0dfb3be22b4567a6f938ccce4587b38952d85fd9e9afe4"}, + {file = "PyYAML-6.0.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:797b4f722ffa07cc8d62053e4cff1486fa6dc094105d13fea7b1de7d8bf71c9e"}, + {file = "PyYAML-6.0.2-cp311-cp311-win32.whl", hash = "sha256:11d8f3dd2b9c1207dcaf2ee0bbbfd5991f571186ec9cc78427ba5bd32afae4b5"}, + {file = "PyYAML-6.0.2-cp311-cp311-win_amd64.whl", hash = "sha256:e10ce637b18caea04431ce14fabcf5c64a1c61ec9c56b071a4b7ca131ca52d44"}, + {file = "PyYAML-6.0.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:c70c95198c015b85feafc136515252a261a84561b7b1d51e3384e0655ddf25ab"}, + {file = "PyYAML-6.0.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:ce826d6ef20b1bc864f0a68340c8b3287705cae2f8b4b1d932177dcc76721725"}, + {file = "PyYAML-6.0.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1f71ea527786de97d1a0cc0eacd1defc0985dcf6b3f17bb77dcfc8c34bec4dc5"}, + {file = "PyYAML-6.0.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9b22676e8097e9e22e36d6b7bda33190d0d400f345f23d4065d48f4ca7ae0425"}, + {file = "PyYAML-6.0.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:80bab7bfc629882493af4aa31a4cfa43a4c57c83813253626916b8c7ada83476"}, + {file = "PyYAML-6.0.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:0833f8694549e586547b576dcfaba4a6b55b9e96098b36cdc7ebefe667dfed48"}, + {file = "PyYAML-6.0.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8b9c7197f7cb2738065c481a0461e50ad02f18c78cd75775628afb4d7137fb3b"}, + {file = "PyYAML-6.0.2-cp312-cp312-win32.whl", hash = "sha256:ef6107725bd54b262d6dedcc2af448a266975032bc85ef0172c5f059da6325b4"}, + {file = "PyYAML-6.0.2-cp312-cp312-win_amd64.whl", hash = "sha256:7e7401d0de89a9a855c839bc697c079a4af81cf878373abd7dc625847d25cbd8"}, + {file = "PyYAML-6.0.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:efdca5630322a10774e8e98e1af481aad470dd62c3170801852d752aa7a783ba"}, + {file = "PyYAML-6.0.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:50187695423ffe49e2deacb8cd10510bc361faac997de9efef88badc3bb9e2d1"}, + {file = "PyYAML-6.0.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0ffe8360bab4910ef1b9e87fb812d8bc0a308b0d0eef8c8f44e0254ab3b07133"}, + {file = "PyYAML-6.0.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:17e311b6c678207928d649faa7cb0d7b4c26a0ba73d41e99c4fff6b6c3276484"}, + {file = "PyYAML-6.0.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:70b189594dbe54f75ab3a1acec5f1e3faa7e8cf2f1e08d9b561cb41b845f69d5"}, + {file = "PyYAML-6.0.2-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:41e4e3953a79407c794916fa277a82531dd93aad34e29c2a514c2c0c5fe971cc"}, + {file = "PyYAML-6.0.2-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:68ccc6023a3400877818152ad9a1033e3db8625d899c72eacb5a668902e4d652"}, + {file = "PyYAML-6.0.2-cp313-cp313-win32.whl", hash = "sha256:bc2fa7c6b47d6bc618dd7fb02ef6fdedb1090ec036abab80d4681424b84c1183"}, + {file = "PyYAML-6.0.2-cp313-cp313-win_amd64.whl", hash = "sha256:8388ee1976c416731879ac16da0aff3f63b286ffdd57cdeb95f3f2e085687563"}, + {file = "PyYAML-6.0.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:24471b829b3bf607e04e88d79542a9d48bb037c2267d7927a874e6c205ca7e9a"}, + {file = "PyYAML-6.0.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d7fded462629cfa4b685c5416b949ebad6cec74af5e2d42905d41e257e0869f5"}, + {file = "PyYAML-6.0.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d84a1718ee396f54f3a086ea0a66d8e552b2ab2017ef8b420e92edbc841c352d"}, + {file = "PyYAML-6.0.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9056c1ecd25795207ad294bcf39f2db3d845767be0ea6e6a34d856f006006083"}, + {file = "PyYAML-6.0.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:82d09873e40955485746739bcb8b4586983670466c23382c19cffecbf1fd8706"}, + {file = "PyYAML-6.0.2-cp38-cp38-win32.whl", hash = "sha256:43fa96a3ca0d6b1812e01ced1044a003533c47f6ee8aca31724f78e93ccc089a"}, + {file = "PyYAML-6.0.2-cp38-cp38-win_amd64.whl", hash = "sha256:01179a4a8559ab5de078078f37e5c1a30d76bb88519906844fd7bdea1b7729ff"}, + {file = "PyYAML-6.0.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:688ba32a1cffef67fd2e9398a2efebaea461578b0923624778664cc1c914db5d"}, + {file = "PyYAML-6.0.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:a8786accb172bd8afb8be14490a16625cbc387036876ab6ba70912730faf8e1f"}, + {file = "PyYAML-6.0.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d8e03406cac8513435335dbab54c0d385e4a49e4945d2909a581c83647ca0290"}, + {file = "PyYAML-6.0.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f753120cb8181e736c57ef7636e83f31b9c0d1722c516f7e86cf15b7aa57ff12"}, + {file = "PyYAML-6.0.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3b1fdb9dc17f5a7677423d508ab4f243a726dea51fa5e70992e59a7411c89d19"}, + {file = "PyYAML-6.0.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:0b69e4ce7a131fe56b7e4d770c67429700908fc0752af059838b1cfb41960e4e"}, + {file = "PyYAML-6.0.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:a9f8c2e67970f13b16084e04f134610fd1d374bf477b17ec1599185cf611d725"}, + {file = "PyYAML-6.0.2-cp39-cp39-win32.whl", hash = "sha256:6395c297d42274772abc367baaa79683958044e5d3835486c16da75d2a694631"}, + {file = "PyYAML-6.0.2-cp39-cp39-win_amd64.whl", hash = "sha256:39693e1f8320ae4f43943590b49779ffb98acb81f788220ea932a6b6c51004d8"}, + {file = "pyyaml-6.0.2.tar.gz", hash = "sha256:d584d9ec91ad65861cc08d42e834324ef890a082e591037abe114850ff7bbc3e"}, +] + [[package]] name = "requests" version = "2.32.3" @@ -1218,20 +1299,21 @@ version = "1.3.1" description = "Sniff out which async library your code is running under" optional = false python-versions = ">=3.7" -groups = ["main"] +groups = ["main", "dev"] files = [ {file = "sniffio-1.3.1-py3-none-any.whl", hash = "sha256:2f6da418d1f1e0fddd844478f41680e794e6051915791a034ff65e5f100525a2"}, {file = "sniffio-1.3.1.tar.gz", hash = "sha256:f4324edc670a0f49750a81b895f35c3adb843cca46f0530f79fc1babb23789dc"}, ] +markers = {dev = "python_version >= \"3.10\""} [[package]] name = "sse-starlette" version = "2.1.3" description = "SSE plugin for Starlette" -optional = true +optional = false python-versions = ">=3.8" -groups = ["main"] -markers = "python_version >= \"3.10\" and extra == \"agents\"" +groups = ["main", "dev"] +markers = "python_version >= \"3.10\"" files = [ {file = "sse_starlette-2.1.3-py3-none-any.whl", hash = "sha256:8ec846438b4665b9e8c560fcdea6bc8081a3abf7942faa95e5a744999d219772"}, {file = "sse_starlette-2.1.3.tar.gz", hash = "sha256:9cd27eb35319e1414e3d2558ee7414487f9529ce3b3cf9b21434fd110e017169"}, @@ -1249,10 +1331,10 @@ examples = ["fastapi"] name = "starlette" version = "0.46.2" description = "The little ASGI library that shines." -optional = true +optional = false python-versions = ">=3.9" -groups = ["main"] -markers = "python_version >= \"3.10\" and extra == \"agents\"" +groups = ["main", "dev"] +markers = "python_version >= \"3.10\"" files = [ {file = "starlette-0.46.2-py3-none-any.whl", hash = "sha256:595633ce89f8ffa71a015caed34a5b2dc1c0cdb3f0f1fbd1e69339cf2abeec35"}, {file = "starlette-0.46.2.tar.gz", hash = "sha256:7f7361f34eed179294600af672f565727419830b54b7b084efe44bb82d2fccd5"}, @@ -1346,6 +1428,18 @@ files = [ {file = "types_python_dateutil-2.9.0.20241003-py3-none-any.whl", hash = "sha256:250e1d8e80e7bbc3a6c99b907762711d1a1cdd00e978ad39cb5940f6f0a87f3d"}, ] +[[package]] +name = "types-pyyaml" +version = "6.0.12.20250516" +description = "Typing stubs for PyYAML" +optional = false +python-versions = ">=3.9" +groups = ["dev"] +files = [ + {file = "types_pyyaml-6.0.12.20250516-py3-none-any.whl", hash = "sha256:8478208feaeb53a34cb5d970c56a7cd76b72659442e733e268a94dc72b2d0530"}, + {file = "types_pyyaml-6.0.12.20250516.tar.gz", hash = "sha256:9f21a70216fc0fa1b216a8176db5f9e0af6eb35d2f2932acb87689d03a5bf6ba"}, +] + [[package]] name = "typing-extensions" version = "4.12.2" @@ -1364,11 +1458,12 @@ version = "0.4.0" description = "Runtime typing introspection tools" optional = false python-versions = ">=3.9" -groups = ["main"] +groups = ["main", "dev"] files = [ {file = "typing_inspection-0.4.0-py3-none-any.whl", hash = "sha256:50e72559fcd2a6367a19f7a7e610e6afcb9fac940c650290eed893d61386832f"}, {file = "typing_inspection-0.4.0.tar.gz", hash = "sha256:9765c87de36671694a67904bf2c96e395be9c6439bb6c87b5142569dcdd65122"}, ] +markers = {dev = "python_version >= \"3.10\""} [package.dependencies] typing-extensions = ">=4.12.0" @@ -1396,10 +1491,10 @@ zstd = ["zstandard (>=0.18.0)"] name = "uvicorn" version = "0.34.2" description = "The lightning-fast ASGI server." -optional = true +optional = false python-versions = ">=3.9" -groups = ["main"] -markers = "python_version >= \"3.10\" and extra == \"agents\"" +groups = ["main", "dev"] +markers = "python_version >= \"3.10\"" files = [ {file = "uvicorn-0.34.2-py3-none-any.whl", hash = "sha256:deb49af569084536d269fe0a6d67e3754f104cf03aba7c11c40f01aadf33c403"}, {file = "uvicorn-0.34.2.tar.gz", hash = "sha256:0e929828f6186353a80b58ea719861d2629d766293b6d19baf086ba31d4f3328"}, @@ -1420,4 +1515,4 @@ gcp = ["google-auth", "requests"] [metadata] lock-version = "2.1" python-versions = ">=3.9" -content-hash = "f111068ee90dcada908f5064a1ed67f027a728ababa2bb6bd9e6957957fc5c6c" +content-hash = "84dda1a6ae0a8491ec9f64e6500480e7ef2e177812a624e388127f354c8e844c" diff --git a/pyproject.toml b/pyproject.toml index 123c0fe9..80e65e4f 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -11,6 +11,8 @@ dependencies = [ "pydantic >=2.10.3", "python-dateutil >=2.8.2", "typing-inspection >=0.4.0", + "pyyaml (>=6.0.2,<7.0.0)", + "invoke (>=2.2.0,<3.0.0)", ] [tool.poetry] @@ -25,9 +27,6 @@ include = ["py.typed", "src/mistralai/py.typed"] [tool.setuptools.package-data] "*" = ["py.typed", "src/mistralai/py.typed"] -[virtualenvs] -in-project = true - [tool.poetry.group.dev.dependencies] mypy = "==1.15.0" pylint = "==3.2.3" @@ -35,6 +34,10 @@ pytest = "^8.2.2" pytest-asyncio = "^0.23.7" types-python-dateutil = "^2.9.0.20240316" types-authlib = "^1.5.0.20250516" +types-pyyaml = "^6.0.12.20250516" +mcp = { version = "^1.0", python = ">=3.10" } +griffe = "^1.7.3" +authlib = "^1.5.2" [tool.poetry.group.lint.dependencies] ruff = "^0.11.10" diff --git a/src/mistralai/conversations.py b/src/mistralai/conversations.py index 009df94d..12d690f8 100644 --- a/src/mistralai/conversations.py +++ b/src/mistralai/conversations.py @@ -85,7 +85,7 @@ async def run_async( retries=retries, server_url=server_url, timeout_ms=timeout_ms, - **req, + **req, # type: ignore ) run_result.conversation_id = res.conversation_id run_ctx.conversation_id = res.conversation_id @@ -164,7 +164,7 @@ async def run_generator() -> ( retries=retries, server_url=server_url, timeout_ms=timeout_ms, - **req, + **req, # type: ignore ) else: res = await self.append_stream_async( diff --git a/src/mistralai/version.py b/src/mistralai/version.py deleted file mode 100644 index ee8f8def..00000000 --- a/src/mistralai/version.py +++ /dev/null @@ -1,7 +0,0 @@ -from importlib import metadata - -try: - __version__ = metadata.version(__package__) -except metadata.PackageNotFoundError: - # Case where package metadata is not available. - __version__ = "" diff --git a/tasks.py b/tasks.py new file mode 100644 index 00000000..0d5483e1 --- /dev/null +++ b/tasks.py @@ -0,0 +1,48 @@ +import re +from invoke.context import Context +from invoke.tasks import task +from utils.speakeasy import ( + pin_speakeasy_version, + OpenAPISpecsPinned, + SpeakeasyTargets, + WORKFLOW_PATH, + WORKFLOW_LOCK_PATH, +) + + +@task(iterable=["targets"]) +def update_speakeasy( + ctx: Context, + version: str, + targets: list[SpeakeasyTargets] = [SpeakeasyTargets.ALL], + workflow_path: str = WORKFLOW_PATH, + workflow_lock_path: str = WORKFLOW_LOCK_PATH, + verbose: bool = False, +): + if not re.match(r'^\d+\.\d+\.\d+$', version): + raise ValueError(f"Invalid version format: {version}. Expected format: X.Y.Z (e.g., 1.2.3)") + """ + Update the speakeasy version and pin the openapi specs to the current revision. + + Usage: + inv update-speakeasy --version "1.580.2" --targets "all" + inv update-speakeasy --version "1.580.2" --targets "mistralai-azure-sdk" --targets "mistralai-gcp-sdk" --verbose + inv update-speakeasy --version "1.580.2" --targets "mistralai-sdk" --workflow-path ".speakeasy/workflow.yaml" --workflow-lock-path ".speakeasy/workflow.lock.yaml" + inv update-speakeasy --version "1.580.2" --targets "mistralai-sdk" --workflow-path ".speakeasy/workflow.yaml" --workflow-lock-path ".speakeasy/workflow.lock.yaml" --verbose + """ + for target in targets: + try: + SpeakeasyTargets(target) + except ValueError: + raise ValueError( + f"Invalid target: {target}. Your targets must be one of {SpeakeasyTargets.list()}" + ) + cmd = ( + "speakeasy run" + + " --skip-versioning" + + "".join(f" -t {target}" for target in targets) + + (" --verbose" if verbose else "") + ) + pin_speakeasy_version(workflow_path=workflow_path, version=version) + with OpenAPISpecsPinned(workflow_path, workflow_lock_path): + ctx.run(cmd) diff --git a/utils/speakeasy.py b/utils/speakeasy.py new file mode 100755 index 00000000..7c685feb --- /dev/null +++ b/utils/speakeasy.py @@ -0,0 +1,91 @@ +""" +This script: +- pins the OpenAPI specs, +- runs speakeasy to update the SDKs' files, +- and then unpins the OpenAPI specs. + +It is advised to often run this script to avoid getting unrelated changes (due to updates) when modifying the OpenAPI specs. +""" + +import yaml +from io import TextIOWrapper +import copy +import subprocess +from enum import Enum + +WORKFLOW_PATH = ".speakeasy/workflow.yaml" +WORKFLOW_LOCK_PATH = ".speakeasy/workflow.lock" + + +def set_location_rev(yaml_content: dict, source_name: str, new_rev: str) -> None: + registry = yaml_content["sources"][source_name]["inputs"][0]["location"].split(":")[0] + yaml_content["sources"][source_name]["inputs"][0]["location"] = f"{registry}:{new_rev}" + + +def write_yaml(yaml_content: dict, file: TextIOWrapper) -> None: + return yaml.dump( + yaml_content, file, default_flow_style=False, sort_keys=False, indent=4 + ) + +def pin_speakeasy_version(workflow_path: str, version: str): + with open(workflow_path, "r") as file: + workflow_yaml = yaml.safe_load(file) + workflow_yaml["speakeasyVersion"] = version + with open(workflow_path, "w") as file: + write_yaml(workflow_yaml, file) + +class OpenAPISpecsPinned: + def __init__(self, workflow_path: str, workflow_lock_path: str): + self.workflow_path = workflow_path + self.workflow_lock_path = workflow_lock_path + with open(workflow_path, "r") as file: + self.workflow_yaml = yaml.safe_load(file) + + def __enter__(self): + print("OpenAPI specs pinned to current revision") + self.pin_to_current_rev() + + def __exit__(self, exc_type, exc_value, traceback): + self.unpin() + print("OpenAPI specs unpinned") + + def pin_to_current_rev(self): + yaml_copy = copy.deepcopy(self.workflow_yaml) + # Getting the current revisions of the OpenAPI specs + with open(self.workflow_lock_path, "r") as lock_file: + yaml_lock = yaml.safe_load(lock_file) + rev_azure = yaml_lock["sources"]["mistral-azure-source"]["sourceRevisionDigest"] + rev_google_cloud = yaml_lock["sources"]["mistral-google-cloud-source"]["sourceRevisionDigest"] + rev_mistralai = yaml_lock["sources"]["mistral-openapi"]["sourceRevisionDigest"] + + # Pinning the OpenAPI specs to the current revisions + with open(self.workflow_path, "w") as file: + set_location_rev(yaml_copy, "mistral-azure-source", rev_azure) + set_location_rev(yaml_copy, "mistral-google-cloud-source", rev_google_cloud) + set_location_rev(yaml_copy, "mistral-openapi", rev_mistralai) + write_yaml(yaml_content=yaml_copy, file=file) + + + def unpin(self): + with open(self.workflow_path, "w") as file: + write_yaml(yaml_content=self.workflow_yaml, file=file) + +class SpeakeasyTargets(str, Enum): + """ + The list of targets defined in the .speakeasy/workflow.yaml[.targets] section. + This can also be listed running `speakeasy list targets` in the root of the project. + """ + ALL = "all" + MISTRALAI_SDK = "mistralai-sdk" + MISTRALAI_AZURE_SDK = "mistralai-azure-sdk" + MISTRALAI_GCP_SDK = "mistralai-gcp-sdk" + + @classmethod + def list(cls): + return list(map(lambda c: c.value, cls)) + + +if __name__ == "__main__": + pin_speakeasy_version(workflow_path=WORKFLOW_PATH, version="1.580.2") + with OpenAPISpecsPinned(WORKFLOW_PATH, WORKFLOW_LOCK_PATH): + subprocess.run(["speakeasy", "run", "-t", "mistralai-sdk", "--skip-versioning", "--verbose"]) From fba7b32a421245b815620eb1ff2d5d730837d26c Mon Sep 17 00:00:00 2001 From: speakeasybot Date: Wed, 13 Aug 2025 07:22:38 +0000 Subject: [PATCH 148/223] ci: regenerated with OpenAPI Doc , Speakeasy CLI 1.568.2 --- .speakeasy/gen.lock | 7 ++- .speakeasy/gen.yaml | 2 +- .speakeasy/workflow.lock | 12 ++--- RELEASES.md | 12 ++++- docs/models/messageoutputcontentchunks.md | 6 +++ docs/models/metadata.md | 7 --- docs/models/outputcontentchunks.md | 6 +++ docs/models/toolcall.md | 13 +++--- docs/sdks/accesses/README.md | 2 + docs/sdks/documents/README.md | 2 + docs/sdks/libraries/README.md | 2 +- pyproject.toml | 2 +- src/mistralai/_version.py | 4 +- src/mistralai/accesses.py | 2 + src/mistralai/beta.py | 2 +- src/mistralai/documents.py | 2 + src/mistralai/libraries.py | 4 +- src/mistralai/models/__init__.py | 6 +-- .../models/messageoutputcontentchunks.py | 9 +++- src/mistralai/models/outputcontentchunks.py | 9 +++- src/mistralai/models/toolcall.py | 44 +------------------ 21 files changed, 73 insertions(+), 82 deletions(-) delete mode 100644 docs/models/metadata.md diff --git a/.speakeasy/gen.lock b/.speakeasy/gen.lock index 73686adb..952dea4f 100644 --- a/.speakeasy/gen.lock +++ b/.speakeasy/gen.lock @@ -1,12 +1,12 @@ lockVersion: 2.0.0 id: 2d045ec7-2ebb-4f4d-ad25-40953b132161 management: - docChecksum: c33c788946fa446bfcf90b60f68abde9 + docChecksum: 52e6a68dbdc371fd28af0f2ae79505fa docVersion: 1.0.0 speakeasyVersion: 1.568.2 generationVersion: 2.634.2 - releaseVersion: 1.9.3 - configChecksum: 0f65a9bdd8df5ae03eaaaea3ab055bf1 + releaseVersion: 1.9.6 + configChecksum: 2b88c684b4750a8f781b81adb8480b58 repoURL: https://github.com/mistralai/client-python.git installationURL: https://github.com/mistralai/client-python.git published: true @@ -326,7 +326,6 @@ generatedFiles: - docs/models/messageoutputeventrole.md - docs/models/messageoutputeventtype.md - docs/models/messages.md - - docs/models/metadata.md - docs/models/metricout.md - docs/models/mistralpromptmode.md - docs/models/modelcapabilities.md diff --git a/.speakeasy/gen.yaml b/.speakeasy/gen.yaml index ffc6c827..140d66de 100644 --- a/.speakeasy/gen.yaml +++ b/.speakeasy/gen.yaml @@ -21,7 +21,7 @@ generation: generateNewTests: false skipResponseBodyAssertions: false python: - version: 1.9.3 + version: 1.9.6 additionalDependencies: dev: pytest: ^8.2.2 diff --git a/.speakeasy/workflow.lock b/.speakeasy/workflow.lock index 45143669..46da130d 100644 --- a/.speakeasy/workflow.lock +++ b/.speakeasy/workflow.lock @@ -14,11 +14,11 @@ sources: - latest mistral-openapi: sourceNamespace: mistral-openapi - sourceRevisionDigest: sha256:22d8044215dc1331ba83f3d25598409bc82fdc04d68033fb05e0133a13cc4dad - sourceBlobDigest: sha256:f3322d8a44d0bf1515b5c1c078525dbf00ff90e6110644de4c03b0b0e9050350 + sourceRevisionDigest: sha256:55f3f81b4bc4061fe86e7c25987282480065414f142fdedfe5cb103de7161a2d + sourceBlobDigest: sha256:f14bdfab5afcc84705d574e6fc22806c4a518292b255fec0643f1e1aa18ae58f tags: - latest - - speakeasy-sdk-regen-1753290410 + - speakeasy-sdk-regen-1755069688 targets: mistralai-azure-sdk: source: mistral-azure-source @@ -37,10 +37,10 @@ targets: mistralai-sdk: source: mistral-openapi sourceNamespace: mistral-openapi - sourceRevisionDigest: sha256:22d8044215dc1331ba83f3d25598409bc82fdc04d68033fb05e0133a13cc4dad - sourceBlobDigest: sha256:f3322d8a44d0bf1515b5c1c078525dbf00ff90e6110644de4c03b0b0e9050350 + sourceRevisionDigest: sha256:55f3f81b4bc4061fe86e7c25987282480065414f142fdedfe5cb103de7161a2d + sourceBlobDigest: sha256:f14bdfab5afcc84705d574e6fc22806c4a518292b255fec0643f1e1aa18ae58f codeSamplesNamespace: mistral-openapi-code-samples - codeSamplesRevisionDigest: sha256:1fd9897fdd851557c592b8fd46232518359401d15a6574933c43be63ec2edb53 + codeSamplesRevisionDigest: sha256:42d98cf03323c832ebd0a3fe0d3927374d1c212998cc37a5d3372c1c6e8a31c8 workflow: workflowVersion: 1.0.0 speakeasyVersion: 1.568.2 diff --git a/RELEASES.md b/RELEASES.md index 2089bb04..430220f2 100644 --- a/RELEASES.md +++ b/RELEASES.md @@ -268,4 +268,14 @@ Based on: ### Generated - [python v1.9.3] . ### Releases -- [PyPI v1.9.3] https://pypi.org/project/mistralai/1.9.3 - . \ No newline at end of file +- [PyPI v1.9.3] https://pypi.org/project/mistralai/1.9.3 - . + +## 2025-08-13 07:21:11 +### Changes +Based on: +- OpenAPI Doc +- Speakeasy CLI 1.568.2 (2.634.2) https://github.com/speakeasy-api/speakeasy +### Generated +- [python v1.9.6] . +### Releases +- [PyPI v1.9.6] https://pypi.org/project/mistralai/1.9.6 - . \ No newline at end of file diff --git a/docs/models/messageoutputcontentchunks.md b/docs/models/messageoutputcontentchunks.md index 5dc74a89..b2144e08 100644 --- a/docs/models/messageoutputcontentchunks.md +++ b/docs/models/messageoutputcontentchunks.md @@ -33,3 +33,9 @@ value: models.DocumentURLChunk = /* values here */ value: models.ToolReferenceChunk = /* values here */ ``` +### `models.ThinkChunk` + +```python +value: models.ThinkChunk = /* values here */ +``` + diff --git a/docs/models/metadata.md b/docs/models/metadata.md deleted file mode 100644 index e655f580..00000000 --- a/docs/models/metadata.md +++ /dev/null @@ -1,7 +0,0 @@ -# Metadata - - -## Fields - -| Field | Type | Required | Description | -| ----------- | ----------- | ----------- | ----------- | \ No newline at end of file diff --git a/docs/models/outputcontentchunks.md b/docs/models/outputcontentchunks.md index 2da475f7..b06d4a7f 100644 --- a/docs/models/outputcontentchunks.md +++ b/docs/models/outputcontentchunks.md @@ -33,3 +33,9 @@ value: models.DocumentURLChunk = /* values here */ value: models.ToolReferenceChunk = /* values here */ ``` +### `models.ThinkChunk` + +```python +value: models.ThinkChunk = /* values here */ +``` + diff --git a/docs/models/toolcall.md b/docs/models/toolcall.md index 43e09050..3819236b 100644 --- a/docs/models/toolcall.md +++ b/docs/models/toolcall.md @@ -3,10 +3,9 @@ ## Fields -| Field | Type | Required | Description | -| ---------------------------------------------------------- | ---------------------------------------------------------- | ---------------------------------------------------------- | ---------------------------------------------------------- | -| `id` | *Optional[str]* | :heavy_minus_sign: | N/A | -| `type` | [Optional[models.ToolTypes]](../models/tooltypes.md) | :heavy_minus_sign: | N/A | -| `function` | [models.FunctionCall](../models/functioncall.md) | :heavy_check_mark: | N/A | -| `index` | *Optional[int]* | :heavy_minus_sign: | N/A | -| `metadata` | [OptionalNullable[models.Metadata]](../models/metadata.md) | :heavy_minus_sign: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| ---------------------------------------------------- | ---------------------------------------------------- | ---------------------------------------------------- | ---------------------------------------------------- | +| `id` | *Optional[str]* | :heavy_minus_sign: | N/A | +| `type` | [Optional[models.ToolTypes]](../models/tooltypes.md) | :heavy_minus_sign: | N/A | +| `function` | [models.FunctionCall](../models/functioncall.md) | :heavy_check_mark: | N/A | +| `index` | *Optional[int]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/sdks/accesses/README.md b/docs/sdks/accesses/README.md index 20484120..c41b8454 100644 --- a/docs/sdks/accesses/README.md +++ b/docs/sdks/accesses/README.md @@ -3,6 +3,8 @@ ## Overview +(beta) Libraries API - manage access to a library. + ### Available Operations * [list](#list) - List all of the access to this library. diff --git a/docs/sdks/documents/README.md b/docs/sdks/documents/README.md index 05ae6f74..0b49c05c 100644 --- a/docs/sdks/documents/README.md +++ b/docs/sdks/documents/README.md @@ -3,6 +3,8 @@ ## Overview +(beta) Libraries API - manage documents in a library. + ### Available Operations * [list](#list) - List document in a given library. diff --git a/docs/sdks/libraries/README.md b/docs/sdks/libraries/README.md index 9c709d0b..4b441c85 100644 --- a/docs/sdks/libraries/README.md +++ b/docs/sdks/libraries/README.md @@ -3,7 +3,7 @@ ## Overview -(beta) Libraries API for indexing documents to enhance agent capabilities. +(beta) Libraries API to create and manage libraries - index your documents to enhance agent capabilities. ### Available Operations diff --git a/pyproject.toml b/pyproject.toml index 80e65e4f..42cf1504 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "mistralai" -version = "1.9.3" +version = "1.9.6" description = "Python Client SDK for the Mistral AI API." authors = [{ name = "Mistral" },] readme = "README-PYPI.md" diff --git a/src/mistralai/_version.py b/src/mistralai/_version.py index 5937a745..e5fd40e6 100644 --- a/src/mistralai/_version.py +++ b/src/mistralai/_version.py @@ -3,10 +3,10 @@ import importlib.metadata __title__: str = "mistralai" -__version__: str = "1.9.3" +__version__: str = "1.9.6" __openapi_doc_version__: str = "1.0.0" __gen_version__: str = "2.634.2" -__user_agent__: str = "speakeasy-sdk/python 1.9.3 2.634.2 1.0.0 mistralai" +__user_agent__: str = "speakeasy-sdk/python 1.9.6 2.634.2 1.0.0 mistralai" try: if __package__ is not None: diff --git a/src/mistralai/accesses.py b/src/mistralai/accesses.py index 67061b7e..f5f5b446 100644 --- a/src/mistralai/accesses.py +++ b/src/mistralai/accesses.py @@ -9,6 +9,8 @@ class Accesses(BaseSDK): + r"""(beta) Libraries API - manage access to a library.""" + def list( self, *, diff --git a/src/mistralai/beta.py b/src/mistralai/beta.py index 3408d943..5201022e 100644 --- a/src/mistralai/beta.py +++ b/src/mistralai/beta.py @@ -13,7 +13,7 @@ class Beta(BaseSDK): agents: MistralAgents r"""(beta) Agents API""" libraries: Libraries - r"""(beta) Libraries API for indexing documents to enhance agent capabilities.""" + r"""(beta) Libraries API to create and manage libraries - index your documents to enhance agent capabilities.""" def __init__(self, sdk_config: SDKConfiguration) -> None: BaseSDK.__init__(self, sdk_config) diff --git a/src/mistralai/documents.py b/src/mistralai/documents.py index e43d3faf..c28758d2 100644 --- a/src/mistralai/documents.py +++ b/src/mistralai/documents.py @@ -9,6 +9,8 @@ class Documents(BaseSDK): + r"""(beta) Libraries API - manage documents in a library.""" + def list( self, *, diff --git a/src/mistralai/libraries.py b/src/mistralai/libraries.py index 45bf0397..852f6997 100644 --- a/src/mistralai/libraries.py +++ b/src/mistralai/libraries.py @@ -12,10 +12,12 @@ class Libraries(BaseSDK): - r"""(beta) Libraries API for indexing documents to enhance agent capabilities.""" + r"""(beta) Libraries API to create and manage libraries - index your documents to enhance agent capabilities.""" documents: Documents + r"""(beta) Libraries API - manage documents in a library.""" accesses: Accesses + r"""(beta) Libraries API - manage access to a library.""" def __init__(self, sdk_config: SDKConfiguration) -> None: BaseSDK.__init__(self, sdk_config) diff --git a/src/mistralai/models/__init__.py b/src/mistralai/models/__init__.py index 2039c2b6..d24492d6 100644 --- a/src/mistralai/models/__init__.py +++ b/src/mistralai/models/__init__.py @@ -777,7 +777,7 @@ ) from .timestampgranularity import TimestampGranularity from .tool import Tool, ToolTypedDict - from .toolcall import Metadata, MetadataTypedDict, ToolCall, ToolCallTypedDict + from .toolcall import ToolCall, ToolCallTypedDict from .toolchoice import ToolChoice, ToolChoiceTypedDict from .toolchoiceenum import ToolChoiceEnum from .toolexecutiondeltaevent import ( @@ -1383,8 +1383,6 @@ "MessageOutputEventTypedDict", "Messages", "MessagesTypedDict", - "Metadata", - "MetadataTypedDict", "MetricOut", "MetricOutTypedDict", "MistralPromptMode", @@ -2172,8 +2170,6 @@ "TimestampGranularity": ".timestampgranularity", "Tool": ".tool", "ToolTypedDict": ".tool", - "Metadata": ".toolcall", - "MetadataTypedDict": ".toolcall", "ToolCall": ".toolcall", "ToolCallTypedDict": ".toolcall", "ToolChoice": ".toolchoice", diff --git a/src/mistralai/models/messageoutputcontentchunks.py b/src/mistralai/models/messageoutputcontentchunks.py index e83fb3a9..136a7608 100644 --- a/src/mistralai/models/messageoutputcontentchunks.py +++ b/src/mistralai/models/messageoutputcontentchunks.py @@ -4,6 +4,7 @@ from .documenturlchunk import DocumentURLChunk, DocumentURLChunkTypedDict from .imageurlchunk import ImageURLChunk, ImageURLChunkTypedDict from .textchunk import TextChunk, TextChunkTypedDict +from .thinkchunk import ThinkChunk, ThinkChunkTypedDict from .toolfilechunk import ToolFileChunk, ToolFileChunkTypedDict from .toolreferencechunk import ToolReferenceChunk, ToolReferenceChunkTypedDict from typing import Union @@ -16,6 +17,7 @@ TextChunkTypedDict, ImageURLChunkTypedDict, DocumentURLChunkTypedDict, + ThinkChunkTypedDict, ToolFileChunkTypedDict, ToolReferenceChunkTypedDict, ], @@ -25,6 +27,11 @@ MessageOutputContentChunks = TypeAliasType( "MessageOutputContentChunks", Union[ - TextChunk, ImageURLChunk, DocumentURLChunk, ToolFileChunk, ToolReferenceChunk + TextChunk, + ImageURLChunk, + DocumentURLChunk, + ThinkChunk, + ToolFileChunk, + ToolReferenceChunk, ], ) diff --git a/src/mistralai/models/outputcontentchunks.py b/src/mistralai/models/outputcontentchunks.py index 6b7e39ea..ad0c087e 100644 --- a/src/mistralai/models/outputcontentchunks.py +++ b/src/mistralai/models/outputcontentchunks.py @@ -4,6 +4,7 @@ from .documenturlchunk import DocumentURLChunk, DocumentURLChunkTypedDict from .imageurlchunk import ImageURLChunk, ImageURLChunkTypedDict from .textchunk import TextChunk, TextChunkTypedDict +from .thinkchunk import ThinkChunk, ThinkChunkTypedDict from .toolfilechunk import ToolFileChunk, ToolFileChunkTypedDict from .toolreferencechunk import ToolReferenceChunk, ToolReferenceChunkTypedDict from typing import Union @@ -16,6 +17,7 @@ TextChunkTypedDict, ImageURLChunkTypedDict, DocumentURLChunkTypedDict, + ThinkChunkTypedDict, ToolFileChunkTypedDict, ToolReferenceChunkTypedDict, ], @@ -25,6 +27,11 @@ OutputContentChunks = TypeAliasType( "OutputContentChunks", Union[ - TextChunk, ImageURLChunk, DocumentURLChunk, ToolFileChunk, ToolReferenceChunk + TextChunk, + ImageURLChunk, + DocumentURLChunk, + ThinkChunk, + ToolFileChunk, + ToolReferenceChunk, ], ) diff --git a/src/mistralai/models/toolcall.py b/src/mistralai/models/toolcall.py index 7d3a3c6b..92dbb4a9 100644 --- a/src/mistralai/models/toolcall.py +++ b/src/mistralai/models/toolcall.py @@ -3,28 +3,18 @@ from __future__ import annotations from .functioncall import FunctionCall, FunctionCallTypedDict from .tooltypes import ToolTypes -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +from mistralai.types import BaseModel from mistralai.utils import validate_open_enum -from pydantic import model_serializer from pydantic.functional_validators import PlainValidator from typing import Optional from typing_extensions import Annotated, NotRequired, TypedDict -class MetadataTypedDict(TypedDict): - pass - - -class Metadata(BaseModel): - pass - - class ToolCallTypedDict(TypedDict): function: FunctionCallTypedDict id: NotRequired[str] type: NotRequired[ToolTypes] index: NotRequired[int] - metadata: NotRequired[Nullable[MetadataTypedDict]] class ToolCall(BaseModel): @@ -37,35 +27,3 @@ class ToolCall(BaseModel): ) index: Optional[int] = 0 - - metadata: OptionalNullable[Metadata] = UNSET - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = ["id", "type", "index", "metadata"] - nullable_fields = ["metadata"] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m From 84915fcd89a168115d0367f6becec9f535997f0e Mon Sep 17 00:00:00 2001 From: Alexandre Menasria <47357713+amenasria@users.noreply.github.com> Date: Wed, 13 Aug 2025 11:01:30 +0200 Subject: [PATCH 149/223] Speakeasy update workflow fix (#259) --- .github/workflows/update_speakeasy.yaml | 69 ++++++++++--------------- 1 file changed, 28 insertions(+), 41 deletions(-) diff --git a/.github/workflows/update_speakeasy.yaml b/.github/workflows/update_speakeasy.yaml index 7fe5d3e2..06449cf1 100644 --- a/.github/workflows/update_speakeasy.yaml +++ b/.github/workflows/update_speakeasy.yaml @@ -12,12 +12,14 @@ on: required: true type: string targets: - description: 'Targets to update. If not provided, all targets will be updated.' + description: 'Targets to update.' + required: true type: choice options: - mistralai-sdk - mistralai-azure-sdk - mistralai-gcp-sdk + - all jobs: update-sdks: @@ -40,16 +42,25 @@ jobs: - name: Install dependencies run: | + cp README.md README-PYPI.md poetry install --with dev + - name: Install Speakeasy CLI + run: | + curl -fsSL https://go.speakeasy.com/cli-install.sh | sh + speakeasy --version + - name: Configure Git run: | git config --local user.email "action@github.com" git config --local user.name "GitHub Action" + git config --local --type bool push.autoSetupRemote true - name: Create branch run: | - git checkout -b update-speakeasy-to-${{ github.event.inputs.version }}-${{ github.run_id }} + TIMESTAMP=$(date +%Y%m%d-%H%M%S) + echo "TIMESTAMP=$TIMESTAMP" >> $GITHUB_ENV + git checkout -b update-speakeasy-to-${{ github.event.inputs.version }}-$TIMESTAMP - name: Update Speakeasy SDKs run: | @@ -62,6 +73,8 @@ jobs: poetry run inv update-speakeasy \ --version "${{ github.event.inputs.version }}" \ $TARGETS_ARGS + env: + SPEAKEASY_API_KEY: ${{ secrets.SPEAKEASY_API_KEY }} - name: Check for changes id: check-changes @@ -79,48 +92,22 @@ jobs: if: steps.check-changes.outputs.has_changes == 'true' run: | git add . - git commit -m "Update Speakeasy SDKs to version ${{ github.event.inputs.version }} - - Targets updated: ${{ github.event.inputs.targets }} - - This PR was automatically generated by the Update Speakeasy workflow." - git push origin ${{ github.event.inputs.branch_name }} + git commit -m "Update Speakeasy SDKs to version ${{ github.event.inputs.version }}" + git push origin update-speakeasy-to-${{ github.event.inputs.version }}-${{ env.TIMESTAMP }} - name: Create Pull Request if: steps.check-changes.outputs.has_changes == 'true' - uses: peter-evans/create-pull-request@271a8d0340265f705b14b6d32b9829c1cb33d45e # v7.0.8 - with: - token: ${{ secrets.GITHUB_TOKEN }} - base: main - branch: ${{ github.event.inputs.branch_name }} - title: "Update Speakeasy SDKs to version ${{ github.event.inputs.version }}" - body: | - ## Summary - - This PR updates the Speakeasy SDKs to version `${{ github.event.inputs.version }}`. - - ## Changes - - - **Version**: Updated to `${{ github.event.inputs.version }}` - - **Targets**: ${{ github.event.inputs.targets }} - - ## Files Updated - - The following SDK files have been regenerated: - - Generated SDK code files - - Updated dependencies and configurations - - ## How to Review - - 1. Check that the generated files look correct - 2. Verify that the version update is appropriate - 3. Ensure all target SDKs are properly updated - - --- - - *This PR was automatically generated by the [Update Speakeasy workflow](.github/workflows/update_speakeasy.yaml)* - labels: automated - assignees: ${{ github.actor }} + run: | + gh pr create \ + --base main \ + --head update-speakeasy-to-${{ github.event.inputs.version }}-${{ env.TIMESTAMP }} \ + --title "Update Speakeasy SDKs to version ${{ github.event.inputs.version }}" \ + --body "This PR updates the Speakeasy SDKs to version ${{ github.event.inputs.version }}. It was automatically generated by the [Update Speakeasy workflow](.github/workflows/update_speakeasy.yaml)." \ + --label automated \ + --label speakeasy-update \ + --assignee ${{ github.actor }} + env: + GITHUB_TOKEN: ${{ secrets.SPEAKEASY_WORKFLOW_GITHUB_PAT }} - name: Comment on workflow run if: steps.check-changes.outputs.has_changes == 'false' From a0c4bacfde490f9a027c74a72d0ece51571d848d Mon Sep 17 00:00:00 2001 From: speakeasybot Date: Wed, 20 Aug 2025 08:29:22 +0000 Subject: [PATCH 150/223] ci: regenerated with OpenAPI Doc , Speakeasy CLI 1.568.2 --- .speakeasy/gen.lock | 6 +++--- .speakeasy/gen.yaml | 2 +- .speakeasy/workflow.lock | 12 ++++++------ RELEASES.md | 12 +++++++++++- docs/models/messageinputcontentchunks.md | 6 ++++++ docs/models/messageoutputcontentchunks.md | 8 ++++---- docs/models/outputcontentchunks.md | 8 ++++---- pyproject.toml | 2 +- src/mistralai/_version.py | 4 ++-- src/mistralai/models/messageinputcontentchunks.py | 4 +++- 10 files changed, 41 insertions(+), 23 deletions(-) diff --git a/.speakeasy/gen.lock b/.speakeasy/gen.lock index 952dea4f..2714d6c3 100644 --- a/.speakeasy/gen.lock +++ b/.speakeasy/gen.lock @@ -1,12 +1,12 @@ lockVersion: 2.0.0 id: 2d045ec7-2ebb-4f4d-ad25-40953b132161 management: - docChecksum: 52e6a68dbdc371fd28af0f2ae79505fa + docChecksum: a38b769b82bac0658cf02fed22bdbde0 docVersion: 1.0.0 speakeasyVersion: 1.568.2 generationVersion: 2.634.2 - releaseVersion: 1.9.6 - configChecksum: 2b88c684b4750a8f781b81adb8480b58 + releaseVersion: 1.9.7 + configChecksum: 12add5d16b9129632055a59af01aa628 repoURL: https://github.com/mistralai/client-python.git installationURL: https://github.com/mistralai/client-python.git published: true diff --git a/.speakeasy/gen.yaml b/.speakeasy/gen.yaml index 140d66de..90a0d865 100644 --- a/.speakeasy/gen.yaml +++ b/.speakeasy/gen.yaml @@ -21,7 +21,7 @@ generation: generateNewTests: false skipResponseBodyAssertions: false python: - version: 1.9.6 + version: 1.9.7 additionalDependencies: dev: pytest: ^8.2.2 diff --git a/.speakeasy/workflow.lock b/.speakeasy/workflow.lock index 46da130d..1b1d6fed 100644 --- a/.speakeasy/workflow.lock +++ b/.speakeasy/workflow.lock @@ -14,11 +14,11 @@ sources: - latest mistral-openapi: sourceNamespace: mistral-openapi - sourceRevisionDigest: sha256:55f3f81b4bc4061fe86e7c25987282480065414f142fdedfe5cb103de7161a2d - sourceBlobDigest: sha256:f14bdfab5afcc84705d574e6fc22806c4a518292b255fec0643f1e1aa18ae58f + sourceRevisionDigest: sha256:fcdf40207fa983a6ad7c0346f499d2b87ef2f1381a48e791a9a485bd40525ff8 + sourceBlobDigest: sha256:64fa97f0836a2441e32bc255ced698234c147c790775988102f17b493f0da87d tags: - latest - - speakeasy-sdk-regen-1755069688 + - speakeasy-sdk-regen-1755678497 targets: mistralai-azure-sdk: source: mistral-azure-source @@ -37,10 +37,10 @@ targets: mistralai-sdk: source: mistral-openapi sourceNamespace: mistral-openapi - sourceRevisionDigest: sha256:55f3f81b4bc4061fe86e7c25987282480065414f142fdedfe5cb103de7161a2d - sourceBlobDigest: sha256:f14bdfab5afcc84705d574e6fc22806c4a518292b255fec0643f1e1aa18ae58f + sourceRevisionDigest: sha256:fcdf40207fa983a6ad7c0346f499d2b87ef2f1381a48e791a9a485bd40525ff8 + sourceBlobDigest: sha256:64fa97f0836a2441e32bc255ced698234c147c790775988102f17b493f0da87d codeSamplesNamespace: mistral-openapi-code-samples - codeSamplesRevisionDigest: sha256:42d98cf03323c832ebd0a3fe0d3927374d1c212998cc37a5d3372c1c6e8a31c8 + codeSamplesRevisionDigest: sha256:d53b8d8fa0068fc51569bdde2e319d0d2abdb019c80180d926a1a1a1f7fb56b3 workflow: workflowVersion: 1.0.0 speakeasyVersion: 1.568.2 diff --git a/RELEASES.md b/RELEASES.md index 430220f2..86c423ef 100644 --- a/RELEASES.md +++ b/RELEASES.md @@ -278,4 +278,14 @@ Based on: ### Generated - [python v1.9.6] . ### Releases -- [PyPI v1.9.6] https://pypi.org/project/mistralai/1.9.6 - . \ No newline at end of file +- [PyPI v1.9.6] https://pypi.org/project/mistralai/1.9.6 - . + +## 2025-08-20 08:28:00 +### Changes +Based on: +- OpenAPI Doc +- Speakeasy CLI 1.568.2 (2.634.2) https://github.com/speakeasy-api/speakeasy +### Generated +- [python v1.9.7] . +### Releases +- [PyPI v1.9.7] https://pypi.org/project/mistralai/1.9.7 - . \ No newline at end of file diff --git a/docs/models/messageinputcontentchunks.md b/docs/models/messageinputcontentchunks.md index 50795f0e..4fd18a0d 100644 --- a/docs/models/messageinputcontentchunks.md +++ b/docs/models/messageinputcontentchunks.md @@ -27,3 +27,9 @@ value: models.ToolFileChunk = /* values here */ value: models.DocumentURLChunk = /* values here */ ``` +### `models.ThinkChunk` + +```python +value: models.ThinkChunk = /* values here */ +``` + diff --git a/docs/models/messageoutputcontentchunks.md b/docs/models/messageoutputcontentchunks.md index b2144e08..d9c3d50e 100644 --- a/docs/models/messageoutputcontentchunks.md +++ b/docs/models/messageoutputcontentchunks.md @@ -27,15 +27,15 @@ value: models.ToolFileChunk = /* values here */ value: models.DocumentURLChunk = /* values here */ ``` -### `models.ToolReferenceChunk` +### `models.ThinkChunk` ```python -value: models.ToolReferenceChunk = /* values here */ +value: models.ThinkChunk = /* values here */ ``` -### `models.ThinkChunk` +### `models.ToolReferenceChunk` ```python -value: models.ThinkChunk = /* values here */ +value: models.ToolReferenceChunk = /* values here */ ``` diff --git a/docs/models/outputcontentchunks.md b/docs/models/outputcontentchunks.md index b06d4a7f..c76bc31d 100644 --- a/docs/models/outputcontentchunks.md +++ b/docs/models/outputcontentchunks.md @@ -27,15 +27,15 @@ value: models.ToolFileChunk = /* values here */ value: models.DocumentURLChunk = /* values here */ ``` -### `models.ToolReferenceChunk` +### `models.ThinkChunk` ```python -value: models.ToolReferenceChunk = /* values here */ +value: models.ThinkChunk = /* values here */ ``` -### `models.ThinkChunk` +### `models.ToolReferenceChunk` ```python -value: models.ThinkChunk = /* values here */ +value: models.ToolReferenceChunk = /* values here */ ``` diff --git a/pyproject.toml b/pyproject.toml index 42cf1504..2631d4da 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "mistralai" -version = "1.9.6" +version = "1.9.7" description = "Python Client SDK for the Mistral AI API." authors = [{ name = "Mistral" },] readme = "README-PYPI.md" diff --git a/src/mistralai/_version.py b/src/mistralai/_version.py index e5fd40e6..1843bff5 100644 --- a/src/mistralai/_version.py +++ b/src/mistralai/_version.py @@ -3,10 +3,10 @@ import importlib.metadata __title__: str = "mistralai" -__version__: str = "1.9.6" +__version__: str = "1.9.7" __openapi_doc_version__: str = "1.0.0" __gen_version__: str = "2.634.2" -__user_agent__: str = "speakeasy-sdk/python 1.9.6 2.634.2 1.0.0 mistralai" +__user_agent__: str = "speakeasy-sdk/python 1.9.7 2.634.2 1.0.0 mistralai" try: if __package__ is not None: diff --git a/src/mistralai/models/messageinputcontentchunks.py b/src/mistralai/models/messageinputcontentchunks.py index 47704211..e90d8aa0 100644 --- a/src/mistralai/models/messageinputcontentchunks.py +++ b/src/mistralai/models/messageinputcontentchunks.py @@ -4,6 +4,7 @@ from .documenturlchunk import DocumentURLChunk, DocumentURLChunkTypedDict from .imageurlchunk import ImageURLChunk, ImageURLChunkTypedDict from .textchunk import TextChunk, TextChunkTypedDict +from .thinkchunk import ThinkChunk, ThinkChunkTypedDict from .toolfilechunk import ToolFileChunk, ToolFileChunkTypedDict from typing import Union from typing_extensions import TypeAliasType @@ -15,6 +16,7 @@ TextChunkTypedDict, ImageURLChunkTypedDict, DocumentURLChunkTypedDict, + ThinkChunkTypedDict, ToolFileChunkTypedDict, ], ) @@ -22,5 +24,5 @@ MessageInputContentChunks = TypeAliasType( "MessageInputContentChunks", - Union[TextChunk, ImageURLChunk, DocumentURLChunk, ToolFileChunk], + Union[TextChunk, ImageURLChunk, DocumentURLChunk, ThinkChunk, ToolFileChunk], ) From 16d470cc7630051d43d08bf3040cb2735ce8bb51 Mon Sep 17 00:00:00 2001 From: Alexandre Menasria <47357713+amenasria@users.noreply.github.com> Date: Mon, 25 Aug 2025 18:28:44 +0200 Subject: [PATCH 151/223] =?UTF-8?q?chore:=20=F0=9F=90=9D=20Update=20SDK=20?= =?UTF-8?q?-=20Generate=20MISTRALAI=20MISTRALAI-SDK=201.9.8=20(#266)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * ci: regenerated with OpenAPI Doc , Speakeasy CLI 1.568.2 * feat: add ocr in azure sdk --------- Co-authored-by: speakeasybot Co-authored-by: gaspardBT --- .speakeasy/gen.lock | 4 +- .speakeasy/gen.yaml | 2 +- .speakeasy/workflow.lock | 14 +- RELEASES.md | 12 +- packages/mistralai_azure/.gitignore | 3 + packages/mistralai_azure/.speakeasy/gen.lock | 57 ++- packages/mistralai_azure/.speakeasy/gen.yaml | 9 + .../docs/models/chatcompletionrequest.md | 3 +- .../docs/models/chatcompletionresponse.md | 4 +- .../models/chatcompletionstreamrequest.md | 3 +- .../docs/models/completionchunk.md | 6 +- .../mistralai_azure/docs/models/document.md | 25 ++ .../docs/models/documenturlchunk.md | 10 + .../docs/models/documenturlchunktype.md | 8 + .../mistralai_azure/docs/models/filechunk.md | 9 + .../mistralai_azure/docs/models/function.md | 4 +- .../mistralai_azure/docs/models/jsonschema.md | 2 +- .../docs/models/mistralpromptmode.md | 8 + .../docs/models/ocrimageobject.md | 14 + .../docs/models/ocrpagedimensions.md | 10 + .../docs/models/ocrpageobject.md | 11 + .../mistralai_azure/docs/models/ocrrequest.md | 16 + .../docs/models/ocrresponse.md | 11 + .../docs/models/ocrusageinfo.md | 9 + packages/mistralai_azure/docs/models/tool.md | 4 +- .../mistralai_azure/docs/models/toolcall.md | 2 +- .../mistralai_azure/docs/models/toolchoice.md | 4 +- .../mistralai_azure/docs/models/usageinfo.md | 12 +- packages/mistralai_azure/poetry.lock | 364 +++++++--------- packages/mistralai_azure/pylintrc | 9 +- packages/mistralai_azure/pyproject.toml | 15 +- .../mistralai_azure/scripts/prepare_readme.py | 9 - packages/mistralai_azure/scripts/publish.sh | 2 - .../src/mistralai_azure/_hooks/types.py | 7 + .../src/mistralai_azure/_version.py | 6 +- .../src/mistralai_azure/basesdk.py | 32 +- .../src/mistralai_azure/chat.py | 16 + .../src/mistralai_azure/httpclient.py | 22 +- .../src/mistralai_azure/models/__init__.py | 401 +++++++++++++----- .../models/assistantmessage.py | 2 +- .../models/chatcompletionrequest.py | 23 +- .../models/chatcompletionresponse.py | 12 +- .../models/chatcompletionstreamrequest.py | 23 +- .../models/completionresponsestreamchoice.py | 2 +- .../mistralai_azure/models/deltamessage.py | 2 +- .../models/documenturlchunk.py | 62 +++ .../src/mistralai_azure/models/filechunk.py | 23 + .../src/mistralai_azure/models/imageurl.py | 2 +- .../src/mistralai_azure/models/jsonschema.py | 2 +- .../models/mistralpromptmode.py | 8 + .../mistralai_azure/models/ocrimageobject.py | 89 ++++ .../models/ocrpagedimensions.py | 25 ++ .../mistralai_azure/models/ocrpageobject.py | 64 +++ .../src/mistralai_azure/models/ocrrequest.py | 120 ++++++ .../src/mistralai_azure/models/ocrresponse.py | 68 +++ .../mistralai_azure/models/ocrusageinfo.py | 57 +++ .../mistralai_azure/models/responseformat.py | 2 +- .../src/mistralai_azure/models/toolmessage.py | 2 +- .../src/mistralai_azure/models/usageinfo.py | 79 +++- .../src/mistralai_azure/models/usermessage.py | 2 +- .../src/mistralai_azure/ocr.py | 271 ++++++++++++ .../src/mistralai_azure/sdkconfiguration.py | 7 - .../src/mistralai_azure/types/basemodel.py | 6 +- .../src/mistralai_azure/utils/__init__.py | 175 ++++++-- .../src/mistralai_azure/utils/datetimes.py | 23 + .../src/mistralai_azure/utils/enums.py | 94 ++-- .../src/mistralai_azure/utils/forms.py | 77 ++-- .../src/mistralai_azure/utils/serializers.py | 35 +- pyproject.toml | 2 +- src/mistralai/_version.py | 4 +- 70 files changed, 1956 insertions(+), 566 deletions(-) create mode 100644 packages/mistralai_azure/docs/models/document.md create mode 100644 packages/mistralai_azure/docs/models/documenturlchunk.md create mode 100644 packages/mistralai_azure/docs/models/documenturlchunktype.md create mode 100644 packages/mistralai_azure/docs/models/filechunk.md create mode 100644 packages/mistralai_azure/docs/models/mistralpromptmode.md create mode 100644 packages/mistralai_azure/docs/models/ocrimageobject.md create mode 100644 packages/mistralai_azure/docs/models/ocrpagedimensions.md create mode 100644 packages/mistralai_azure/docs/models/ocrpageobject.md create mode 100644 packages/mistralai_azure/docs/models/ocrrequest.md create mode 100644 packages/mistralai_azure/docs/models/ocrresponse.md create mode 100644 packages/mistralai_azure/docs/models/ocrusageinfo.md delete mode 100644 packages/mistralai_azure/scripts/prepare_readme.py create mode 100644 packages/mistralai_azure/src/mistralai_azure/models/documenturlchunk.py create mode 100644 packages/mistralai_azure/src/mistralai_azure/models/filechunk.py create mode 100644 packages/mistralai_azure/src/mistralai_azure/models/mistralpromptmode.py create mode 100644 packages/mistralai_azure/src/mistralai_azure/models/ocrimageobject.py create mode 100644 packages/mistralai_azure/src/mistralai_azure/models/ocrpagedimensions.py create mode 100644 packages/mistralai_azure/src/mistralai_azure/models/ocrpageobject.py create mode 100644 packages/mistralai_azure/src/mistralai_azure/models/ocrrequest.py create mode 100644 packages/mistralai_azure/src/mistralai_azure/models/ocrresponse.py create mode 100644 packages/mistralai_azure/src/mistralai_azure/models/ocrusageinfo.py create mode 100644 packages/mistralai_azure/src/mistralai_azure/ocr.py create mode 100644 packages/mistralai_azure/src/mistralai_azure/utils/datetimes.py diff --git a/.speakeasy/gen.lock b/.speakeasy/gen.lock index 2714d6c3..2af5a009 100644 --- a/.speakeasy/gen.lock +++ b/.speakeasy/gen.lock @@ -5,8 +5,8 @@ management: docVersion: 1.0.0 speakeasyVersion: 1.568.2 generationVersion: 2.634.2 - releaseVersion: 1.9.7 - configChecksum: 12add5d16b9129632055a59af01aa628 + releaseVersion: 1.9.8 + configChecksum: d7b7d0bf04e94ae161a650b203bc54df repoURL: https://github.com/mistralai/client-python.git installationURL: https://github.com/mistralai/client-python.git published: true diff --git a/.speakeasy/gen.yaml b/.speakeasy/gen.yaml index 90a0d865..fd56d757 100644 --- a/.speakeasy/gen.yaml +++ b/.speakeasy/gen.yaml @@ -21,7 +21,7 @@ generation: generateNewTests: false skipResponseBodyAssertions: false python: - version: 1.9.7 + version: 1.9.8 additionalDependencies: dev: pytest: ^8.2.2 diff --git a/.speakeasy/workflow.lock b/.speakeasy/workflow.lock index 1b1d6fed..4caed8b7 100644 --- a/.speakeasy/workflow.lock +++ b/.speakeasy/workflow.lock @@ -2,8 +2,8 @@ speakeasyVersion: 1.568.2 sources: mistral-azure-source: sourceNamespace: mistral-openapi-azure - sourceRevisionDigest: sha256:c5931a7e0cc2db844149d71db57dfc2178665f0400bc26c90ee113795ea2872f - sourceBlobDigest: sha256:504fff788fdac8d781e33d85e3a04d35f6d9f7a3ef5ed40da8b4567074e94f03 + sourceRevisionDigest: sha256:670c460702ec74f7077491464a6dc5ee9d873969c80e812c48dbf4deb160e470 + sourceBlobDigest: sha256:5a3ebfa4cb00a015bb7bb03ec7442fc7e0b9c17ca66ab35d3045290b2ad87eac tags: - latest mistral-google-cloud-source: @@ -18,15 +18,15 @@ sources: sourceBlobDigest: sha256:64fa97f0836a2441e32bc255ced698234c147c790775988102f17b493f0da87d tags: - latest - - speakeasy-sdk-regen-1755678497 + - speakeasy-sdk-regen-1756133663 targets: mistralai-azure-sdk: source: mistral-azure-source sourceNamespace: mistral-openapi-azure - sourceRevisionDigest: sha256:c5931a7e0cc2db844149d71db57dfc2178665f0400bc26c90ee113795ea2872f - sourceBlobDigest: sha256:504fff788fdac8d781e33d85e3a04d35f6d9f7a3ef5ed40da8b4567074e94f03 + sourceRevisionDigest: sha256:670c460702ec74f7077491464a6dc5ee9d873969c80e812c48dbf4deb160e470 + sourceBlobDigest: sha256:5a3ebfa4cb00a015bb7bb03ec7442fc7e0b9c17ca66ab35d3045290b2ad87eac codeSamplesNamespace: mistral-openapi-azure-code-samples - codeSamplesRevisionDigest: sha256:e242a7fc42e44d2bbc8e5637d4a6455da7fb3d0307dc275ee4c64867f5c4be55 + codeSamplesRevisionDigest: sha256:a4ace4b17dee92b180a2fede7742bd93fa1a83a9f96e4f61531289cafc50f6ad mistralai-gcp-sdk: source: mistral-google-cloud-source sourceNamespace: mistral-openapi-google-cloud @@ -40,7 +40,7 @@ targets: sourceRevisionDigest: sha256:fcdf40207fa983a6ad7c0346f499d2b87ef2f1381a48e791a9a485bd40525ff8 sourceBlobDigest: sha256:64fa97f0836a2441e32bc255ced698234c147c790775988102f17b493f0da87d codeSamplesNamespace: mistral-openapi-code-samples - codeSamplesRevisionDigest: sha256:d53b8d8fa0068fc51569bdde2e319d0d2abdb019c80180d926a1a1a1f7fb56b3 + codeSamplesRevisionDigest: sha256:f215393d076966760f8b24cb3db48d6dd9c2ba2e06daa7964860bab4b09f51f2 workflow: workflowVersion: 1.0.0 speakeasyVersion: 1.568.2 diff --git a/RELEASES.md b/RELEASES.md index 86c423ef..2e99de4a 100644 --- a/RELEASES.md +++ b/RELEASES.md @@ -288,4 +288,14 @@ Based on: ### Generated - [python v1.9.7] . ### Releases -- [PyPI v1.9.7] https://pypi.org/project/mistralai/1.9.7 - . \ No newline at end of file +- [PyPI v1.9.7] https://pypi.org/project/mistralai/1.9.7 - . + +## 2025-08-25 14:54:06 +### Changes +Based on: +- OpenAPI Doc +- Speakeasy CLI 1.568.2 (2.634.2) https://github.com/speakeasy-api/speakeasy +### Generated +- [python v1.9.8] . +### Releases +- [PyPI v1.9.8] https://pypi.org/project/mistralai/1.9.8 - . \ No newline at end of file diff --git a/packages/mistralai_azure/.gitignore b/packages/mistralai_azure/.gitignore index 5a82b069..f2ea8c39 100644 --- a/packages/mistralai_azure/.gitignore +++ b/packages/mistralai_azure/.gitignore @@ -1,3 +1,6 @@ +**/__pycache__/ +**/.speakeasy/temp/ +**/.speakeasy/logs/ .speakeasy/reports README-PYPI.md .venv/ diff --git a/packages/mistralai_azure/.speakeasy/gen.lock b/packages/mistralai_azure/.speakeasy/gen.lock index b7d6f3ba..ef80e828 100644 --- a/packages/mistralai_azure/.speakeasy/gen.lock +++ b/packages/mistralai_azure/.speakeasy/gen.lock @@ -1,18 +1,19 @@ lockVersion: 2.0.0 id: dc40fa48-2c4d-46ad-ac8b-270749770f34 management: - docChecksum: 574e96caec9a63dbe3f39d646830f2c2 - docVersion: 0.0.2 - speakeasyVersion: 1.517.3 - generationVersion: 2.548.6 + docChecksum: 87653f040f5b36c90e066870f34c478e + docVersion: 1.0.0 + speakeasyVersion: 1.568.2 + generationVersion: 2.634.2 releaseVersion: 1.6.0 - configChecksum: 3a4d9b204b5731c461ed7279710d5ed6 + configChecksum: 0b604304465a25f89acca310710262d1 published: true features: python: additionalDependencies: 1.0.0 + additionalProperties: 1.0.1 constsAndDefaults: 1.0.5 - core: 5.12.3 + core: 5.19.3 defaultEnabledRetries: 0.2.0 enumUnions: 0.1.0 envVarSecurityUsage: 0.3.2 @@ -21,14 +22,14 @@ features: globalSecurity: 3.0.3 globalSecurityCallbacks: 1.0.0 globalSecurityFlattening: 1.0.0 - globalServerURLs: 3.1.0 + globalServerURLs: 3.1.1 methodArguments: 1.0.2 nameOverrides: 3.0.1 nullables: 1.0.1 - openEnums: 1.0.0 + openEnums: 1.0.1 responseFormat: 1.0.1 retries: 3.0.2 - sdkHooks: 1.0.1 + sdkHooks: 1.1.0 serverEvents: 1.0.7 serverEventsSentinels: 0.1.0 serverIDs: 3.0.0 @@ -56,6 +57,10 @@ generatedFiles: - docs/models/content.md - docs/models/contentchunk.md - docs/models/deltamessage.md + - docs/models/document.md + - docs/models/documenturlchunk.md + - docs/models/documenturlchunktype.md + - docs/models/filechunk.md - docs/models/finishreason.md - docs/models/function.md - docs/models/functioncall.md @@ -68,6 +73,13 @@ generatedFiles: - docs/models/jsonschema.md - docs/models/loc.md - docs/models/messages.md + - docs/models/mistralpromptmode.md + - docs/models/ocrimageobject.md + - docs/models/ocrpagedimensions.md + - docs/models/ocrpageobject.md + - docs/models/ocrrequest.md + - docs/models/ocrresponse.md + - docs/models/ocrusageinfo.md - docs/models/prediction.md - docs/models/referencechunk.md - docs/models/referencechunktype.md @@ -98,7 +110,6 @@ generatedFiles: - py.typed - pylintrc - pyproject.toml - - scripts/prepare_readme.py - scripts/publish.sh - src/mistralai_azure/__init__.py - src/mistralai_azure/_hooks/__init__.py @@ -119,6 +130,8 @@ generatedFiles: - src/mistralai_azure/models/completionresponsestreamchoice.py - src/mistralai_azure/models/contentchunk.py - src/mistralai_azure/models/deltamessage.py + - src/mistralai_azure/models/documenturlchunk.py + - src/mistralai_azure/models/filechunk.py - src/mistralai_azure/models/function.py - src/mistralai_azure/models/functioncall.py - src/mistralai_azure/models/functionname.py @@ -126,6 +139,13 @@ generatedFiles: - src/mistralai_azure/models/imageurl.py - src/mistralai_azure/models/imageurlchunk.py - src/mistralai_azure/models/jsonschema.py + - src/mistralai_azure/models/mistralpromptmode.py + - src/mistralai_azure/models/ocrimageobject.py + - src/mistralai_azure/models/ocrpagedimensions.py + - src/mistralai_azure/models/ocrpageobject.py + - src/mistralai_azure/models/ocrrequest.py + - src/mistralai_azure/models/ocrresponse.py + - src/mistralai_azure/models/ocrusageinfo.py - src/mistralai_azure/models/prediction.py - src/mistralai_azure/models/referencechunk.py - src/mistralai_azure/models/responseformat.py @@ -143,13 +163,14 @@ generatedFiles: - src/mistralai_azure/models/usageinfo.py - src/mistralai_azure/models/usermessage.py - src/mistralai_azure/models/validationerror.py + - src/mistralai_azure/ocr.py - src/mistralai_azure/py.typed - - src/mistralai_azure/sdk.py - src/mistralai_azure/sdkconfiguration.py - src/mistralai_azure/types/__init__.py - src/mistralai_azure/types/basemodel.py - src/mistralai_azure/utils/__init__.py - src/mistralai_azure/utils/annotations.py + - src/mistralai_azure/utils/datetimes.py - src/mistralai_azure/utils/enums.py - src/mistralai_azure/utils/eventstreaming.py - src/mistralai_azure/utils/forms.py @@ -171,15 +192,23 @@ examples: responses: "422": application/json: {} - "200": {} chat_completion_v1_chat_completions_post: speakeasy-default-chat-completion-v1-chat-completions-post: requestBody: application/json: {"model": "azureai", "stream": false, "messages": [{"content": "Who is the best French painter? Answer in one short sentence.", "role": "user"}]} responses: "200": - application/json: {"id": "cmpl-e5cc70bb28c444948073e77776eb30ef", "object": "chat.completion", "model": "mistral-small-latest", "usage": {"prompt_tokens": 16, "completion_tokens": 34, "total_tokens": 50}, "created": 1702256327, "choices": [{"index": 0, "message": {"prefix": false, "role": "assistant"}, "finish_reason": "stop"}, {"index": 0, "message": {"prefix": false, "role": "assistant"}, "finish_reason": "stop"}, {"index": 0, "message": {"prefix": false, "role": "assistant"}, "finish_reason": "stop"}]} + application/json: {"id": "cmpl-e5cc70bb28c444948073e77776eb30ef", "object": "chat.completion", "model": "mistral-small-latest", "usage": {"prompt_tokens": 0, "completion_tokens": 0, "total_tokens": 0}, "created": 1702256327, "choices": []} "422": application/json: {} -examplesVersion: 1.0.0 + ocr_v1_ocr_post: + speakeasy-default-ocr-v1-ocr-post: + requestBody: + application/json: {"model": "CX-9", "document": {"document_url": "https://upset-labourer.net/", "type": "document_url"}} + responses: + "200": + application/json: {"pages": [], "model": "Golf", "usage_info": {"pages_processed": 944919}} + "422": + application/json: {} +examplesVersion: 1.0.2 generatedTests: {} diff --git a/packages/mistralai_azure/.speakeasy/gen.yaml b/packages/mistralai_azure/.speakeasy/gen.yaml index be4a1781..63e2da75 100644 --- a/packages/mistralai_azure/.speakeasy/gen.yaml +++ b/packages/mistralai_azure/.speakeasy/gen.yaml @@ -4,6 +4,7 @@ generation: maintainOpenAPIOrder: true usageSnippets: optionalPropertyRendering: withExample + sdkInitStyle: constructor useClassNamesForArrayFields: true fixes: nameResolutionDec2023: true @@ -11,9 +12,14 @@ generation: parameterOrderingFeb2024: true requestResponseComponentNamesFeb2024: true securityFeb2025: false + sharedErrorComponentsApr2025: false auth: oAuth2ClientCredentialsEnabled: true oAuth2PasswordEnabled: false + tests: + generateTests: true + generateNewTests: false + skipResponseBodyAssertions: false python: version: 1.6.0 additionalDependencies: @@ -22,6 +28,7 @@ python: pytest-asyncio: ^0.23.7 authors: - Mistral + baseErrorName: MistralAzureError clientServerStatusCodesAsErrors: true defaultErrorName: SDKError description: Python Client SDK for the Mistral AI API in Azure. @@ -43,8 +50,10 @@ python: inputModelSuffix: input maxMethodParams: 15 methodArguments: infer-optional-args + moduleName: "" outputModelSuffix: output packageName: mistralai_azure + pytestFilterWarnings: [] pytestTimeout: 0 responseFormat: flat templateVersion: v2 diff --git a/packages/mistralai_azure/docs/models/chatcompletionrequest.md b/packages/mistralai_azure/docs/models/chatcompletionrequest.md index a9a174fb..b0f05d37 100644 --- a/packages/mistralai_azure/docs/models/chatcompletionrequest.md +++ b/packages/mistralai_azure/docs/models/chatcompletionrequest.md @@ -5,7 +5,6 @@ | Field | Type | Required | Description | Example | | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `messages` | List[[models.ChatCompletionRequestMessages](../models/chatcompletionrequestmessages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | | `model` | *Optional[str]* | :heavy_minus_sign: | The ID of the model to use for this request. | azureai | | `temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. | | | `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | @@ -13,6 +12,7 @@ | `stream` | *Optional[bool]* | :heavy_minus_sign: | Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. | | | `stop` | [Optional[models.ChatCompletionRequestStop]](../models/chatcompletionrequeststop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | | `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | +| `messages` | List[[models.ChatCompletionRequestMessages](../models/chatcompletionrequestmessages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | | `response_format` | [Optional[models.ResponseFormat]](../models/responseformat.md) | :heavy_minus_sign: | N/A | | | `tools` | List[[models.Tool](../models/tool.md)] | :heavy_minus_sign: | N/A | | | `tool_choice` | [Optional[models.ChatCompletionRequestToolChoice]](../models/chatcompletionrequesttoolchoice.md) | :heavy_minus_sign: | N/A | | @@ -21,4 +21,5 @@ | `n` | *OptionalNullable[int]* | :heavy_minus_sign: | Number of completions to return for each request, input tokens are only billed once. | | | `prediction` | [Optional[models.Prediction]](../models/prediction.md) | :heavy_minus_sign: | N/A | | | `parallel_tool_calls` | *Optional[bool]* | :heavy_minus_sign: | N/A | | +| `prompt_mode` | [OptionalNullable[models.MistralPromptMode]](../models/mistralpromptmode.md) | :heavy_minus_sign: | Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. | | | `safe_prompt` | *Optional[bool]* | :heavy_minus_sign: | Whether to inject a safety prompt before all conversations. | | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/chatcompletionresponse.md b/packages/mistralai_azure/docs/models/chatcompletionresponse.md index ad376158..a0465ffb 100644 --- a/packages/mistralai_azure/docs/models/chatcompletionresponse.md +++ b/packages/mistralai_azure/docs/models/chatcompletionresponse.md @@ -9,5 +9,5 @@ | `object` | *str* | :heavy_check_mark: | N/A | chat.completion | | `model` | *str* | :heavy_check_mark: | N/A | mistral-small-latest | | `usage` | [models.UsageInfo](../models/usageinfo.md) | :heavy_check_mark: | N/A | | -| `created` | *Optional[int]* | :heavy_minus_sign: | N/A | 1702256327 | -| `choices` | List[[models.ChatCompletionChoice](../models/chatcompletionchoice.md)] | :heavy_minus_sign: | N/A | | \ No newline at end of file +| `created` | *int* | :heavy_check_mark: | N/A | 1702256327 | +| `choices` | List[[models.ChatCompletionChoice](../models/chatcompletionchoice.md)] | :heavy_check_mark: | N/A | | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/chatcompletionstreamrequest.md b/packages/mistralai_azure/docs/models/chatcompletionstreamrequest.md index b3e06e7a..90397dec 100644 --- a/packages/mistralai_azure/docs/models/chatcompletionstreamrequest.md +++ b/packages/mistralai_azure/docs/models/chatcompletionstreamrequest.md @@ -5,7 +5,6 @@ | Field | Type | Required | Description | Example | | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `messages` | List[[models.Messages](../models/messages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | | `model` | *Optional[str]* | :heavy_minus_sign: | The ID of the model to use for this request. | azureai | | `temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. | | | `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | @@ -13,6 +12,7 @@ | `stream` | *Optional[bool]* | :heavy_minus_sign: | N/A | | | `stop` | [Optional[models.Stop]](../models/stop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | | `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | +| `messages` | List[[models.Messages](../models/messages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | | `response_format` | [Optional[models.ResponseFormat]](../models/responseformat.md) | :heavy_minus_sign: | N/A | | | `tools` | List[[models.Tool](../models/tool.md)] | :heavy_minus_sign: | N/A | | | `tool_choice` | [Optional[models.ChatCompletionStreamRequestToolChoice]](../models/chatcompletionstreamrequesttoolchoice.md) | :heavy_minus_sign: | N/A | | @@ -21,4 +21,5 @@ | `n` | *OptionalNullable[int]* | :heavy_minus_sign: | Number of completions to return for each request, input tokens are only billed once. | | | `prediction` | [Optional[models.Prediction]](../models/prediction.md) | :heavy_minus_sign: | N/A | | | `parallel_tool_calls` | *Optional[bool]* | :heavy_minus_sign: | N/A | | +| `prompt_mode` | [OptionalNullable[models.MistralPromptMode]](../models/mistralpromptmode.md) | :heavy_minus_sign: | Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. | | | `safe_prompt` | *Optional[bool]* | :heavy_minus_sign: | Whether to inject a safety prompt before all conversations. | | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/completionchunk.md b/packages/mistralai_azure/docs/models/completionchunk.md index b8ae6a09..7f8ab5e6 100644 --- a/packages/mistralai_azure/docs/models/completionchunk.md +++ b/packages/mistralai_azure/docs/models/completionchunk.md @@ -6,8 +6,8 @@ | Field | Type | Required | Description | | ------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------ | | `id` | *str* | :heavy_check_mark: | N/A | -| `model` | *str* | :heavy_check_mark: | N/A | -| `choices` | List[[models.CompletionResponseStreamChoice](../models/completionresponsestreamchoice.md)] | :heavy_check_mark: | N/A | | `object` | *Optional[str]* | :heavy_minus_sign: | N/A | | `created` | *Optional[int]* | :heavy_minus_sign: | N/A | -| `usage` | [Optional[models.UsageInfo]](../models/usageinfo.md) | :heavy_minus_sign: | N/A | \ No newline at end of file +| `model` | *str* | :heavy_check_mark: | N/A | +| `usage` | [Optional[models.UsageInfo]](../models/usageinfo.md) | :heavy_minus_sign: | N/A | +| `choices` | List[[models.CompletionResponseStreamChoice](../models/completionresponsestreamchoice.md)] | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/document.md b/packages/mistralai_azure/docs/models/document.md new file mode 100644 index 00000000..509d43b7 --- /dev/null +++ b/packages/mistralai_azure/docs/models/document.md @@ -0,0 +1,25 @@ +# Document + +Document to run OCR on + + +## Supported Types + +### `models.FileChunk` + +```python +value: models.FileChunk = /* values here */ +``` + +### `models.DocumentURLChunk` + +```python +value: models.DocumentURLChunk = /* values here */ +``` + +### `models.ImageURLChunk` + +```python +value: models.ImageURLChunk = /* values here */ +``` + diff --git a/packages/mistralai_azure/docs/models/documenturlchunk.md b/packages/mistralai_azure/docs/models/documenturlchunk.md new file mode 100644 index 00000000..6c9a5b4d --- /dev/null +++ b/packages/mistralai_azure/docs/models/documenturlchunk.md @@ -0,0 +1,10 @@ +# DocumentURLChunk + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------------------------------------- | -------------------------------------------------------------------------- | -------------------------------------------------------------------------- | -------------------------------------------------------------------------- | +| `document_url` | *str* | :heavy_check_mark: | N/A | +| `document_name` | *OptionalNullable[str]* | :heavy_minus_sign: | The filename of the document | +| `type` | [Optional[models.DocumentURLChunkType]](../models/documenturlchunktype.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/documenturlchunktype.md b/packages/mistralai_azure/docs/models/documenturlchunktype.md new file mode 100644 index 00000000..32e1fa9e --- /dev/null +++ b/packages/mistralai_azure/docs/models/documenturlchunktype.md @@ -0,0 +1,8 @@ +# DocumentURLChunkType + + +## Values + +| Name | Value | +| -------------- | -------------- | +| `DOCUMENT_URL` | document_url | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/filechunk.md b/packages/mistralai_azure/docs/models/filechunk.md new file mode 100644 index 00000000..18217114 --- /dev/null +++ b/packages/mistralai_azure/docs/models/filechunk.md @@ -0,0 +1,9 @@ +# FileChunk + + +## Fields + +| Field | Type | Required | Description | +| --------------------------- | --------------------------- | --------------------------- | --------------------------- | +| `type` | *Optional[Literal["file"]]* | :heavy_minus_sign: | N/A | +| `file_id` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/function.md b/packages/mistralai_azure/docs/models/function.md index a166b7bb..b2bdb3fe 100644 --- a/packages/mistralai_azure/docs/models/function.md +++ b/packages/mistralai_azure/docs/models/function.md @@ -6,6 +6,6 @@ | Field | Type | Required | Description | | ------------------ | ------------------ | ------------------ | ------------------ | | `name` | *str* | :heavy_check_mark: | N/A | -| `parameters` | Dict[str, *Any*] | :heavy_check_mark: | N/A | | `description` | *Optional[str]* | :heavy_minus_sign: | N/A | -| `strict` | *Optional[bool]* | :heavy_minus_sign: | N/A | \ No newline at end of file +| `strict` | *Optional[bool]* | :heavy_minus_sign: | N/A | +| `parameters` | Dict[str, *Any*] | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/jsonschema.md b/packages/mistralai_azure/docs/models/jsonschema.md index ae387867..7ff7c070 100644 --- a/packages/mistralai_azure/docs/models/jsonschema.md +++ b/packages/mistralai_azure/docs/models/jsonschema.md @@ -6,6 +6,6 @@ | Field | Type | Required | Description | | ----------------------- | ----------------------- | ----------------------- | ----------------------- | | `name` | *str* | :heavy_check_mark: | N/A | -| `schema_definition` | Dict[str, *Any*] | :heavy_check_mark: | N/A | | `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `schema_definition` | Dict[str, *Any*] | :heavy_check_mark: | N/A | | `strict` | *Optional[bool]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/mistralpromptmode.md b/packages/mistralai_azure/docs/models/mistralpromptmode.md new file mode 100644 index 00000000..7416e203 --- /dev/null +++ b/packages/mistralai_azure/docs/models/mistralpromptmode.md @@ -0,0 +1,8 @@ +# MistralPromptMode + + +## Values + +| Name | Value | +| ----------- | ----------- | +| `REASONING` | reasoning | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/ocrimageobject.md b/packages/mistralai_azure/docs/models/ocrimageobject.md new file mode 100644 index 00000000..3c0d5544 --- /dev/null +++ b/packages/mistralai_azure/docs/models/ocrimageobject.md @@ -0,0 +1,14 @@ +# OCRImageObject + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------------- | ---------------------------------------------------------- | ---------------------------------------------------------- | ---------------------------------------------------------- | +| `id` | *str* | :heavy_check_mark: | Image ID for extracted image in a page | +| `top_left_x` | *Nullable[int]* | :heavy_check_mark: | X coordinate of top-left corner of the extracted image | +| `top_left_y` | *Nullable[int]* | :heavy_check_mark: | Y coordinate of top-left corner of the extracted image | +| `bottom_right_x` | *Nullable[int]* | :heavy_check_mark: | X coordinate of bottom-right corner of the extracted image | +| `bottom_right_y` | *Nullable[int]* | :heavy_check_mark: | Y coordinate of bottom-right corner of the extracted image | +| `image_base64` | *OptionalNullable[str]* | :heavy_minus_sign: | Base64 string of the extracted image | +| `image_annotation` | *OptionalNullable[str]* | :heavy_minus_sign: | Annotation of the extracted image in json str | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/ocrpagedimensions.md b/packages/mistralai_azure/docs/models/ocrpagedimensions.md new file mode 100644 index 00000000..c93ca64d --- /dev/null +++ b/packages/mistralai_azure/docs/models/ocrpagedimensions.md @@ -0,0 +1,10 @@ +# OCRPageDimensions + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------- | ------------------------------- | ------------------------------- | ------------------------------- | +| `dpi` | *int* | :heavy_check_mark: | Dots per inch of the page-image | +| `height` | *int* | :heavy_check_mark: | Height of the image in pixels | +| `width` | *int* | :heavy_check_mark: | Width of the image in pixels | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/ocrpageobject.md b/packages/mistralai_azure/docs/models/ocrpageobject.md new file mode 100644 index 00000000..9db3bb77 --- /dev/null +++ b/packages/mistralai_azure/docs/models/ocrpageobject.md @@ -0,0 +1,11 @@ +# OCRPageObject + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | +| `index` | *int* | :heavy_check_mark: | The page index in a pdf document starting from 0 | +| `markdown` | *str* | :heavy_check_mark: | The markdown string response of the page | +| `images` | List[[models.OCRImageObject](../models/ocrimageobject.md)] | :heavy_check_mark: | List of all extracted images in the page | +| `dimensions` | [Nullable[models.OCRPageDimensions]](../models/ocrpagedimensions.md) | :heavy_check_mark: | The dimensions of the PDF Page's screenshot image | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/ocrrequest.md b/packages/mistralai_azure/docs/models/ocrrequest.md new file mode 100644 index 00000000..6a9c77ab --- /dev/null +++ b/packages/mistralai_azure/docs/models/ocrrequest.md @@ -0,0 +1,16 @@ +# OCRRequest + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `model` | *Nullable[str]* | :heavy_check_mark: | N/A | +| `id` | *Optional[str]* | :heavy_minus_sign: | N/A | +| `document` | [models.Document](../models/document.md) | :heavy_check_mark: | Document to run OCR on | +| `pages` | List[*int*] | :heavy_minus_sign: | Specific pages user wants to process in various formats: single number, range, or list of both. Starts from 0 | +| `include_image_base64` | *OptionalNullable[bool]* | :heavy_minus_sign: | Include image URLs in response | +| `image_limit` | *OptionalNullable[int]* | :heavy_minus_sign: | Max images to extract | +| `image_min_size` | *OptionalNullable[int]* | :heavy_minus_sign: | Minimum height and width of image to extract | +| `bbox_annotation_format` | [OptionalNullable[models.ResponseFormat]](../models/responseformat.md) | :heavy_minus_sign: | Structured output class for extracting useful information from each extracted bounding box / image from document. Only json_schema is valid for this field | +| `document_annotation_format` | [OptionalNullable[models.ResponseFormat]](../models/responseformat.md) | :heavy_minus_sign: | Structured output class for extracting useful information from the entire document. Only json_schema is valid for this field | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/ocrresponse.md b/packages/mistralai_azure/docs/models/ocrresponse.md new file mode 100644 index 00000000..0a309317 --- /dev/null +++ b/packages/mistralai_azure/docs/models/ocrresponse.md @@ -0,0 +1,11 @@ +# OCRResponse + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------------------- | ---------------------------------------------------------------- | ---------------------------------------------------------------- | ---------------------------------------------------------------- | +| `pages` | List[[models.OCRPageObject](../models/ocrpageobject.md)] | :heavy_check_mark: | List of OCR info for pages. | +| `model` | *str* | :heavy_check_mark: | The model used to generate the OCR. | +| `document_annotation` | *OptionalNullable[str]* | :heavy_minus_sign: | Formatted response in the request_format if provided in json str | +| `usage_info` | [models.OCRUsageInfo](../models/ocrusageinfo.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/ocrusageinfo.md b/packages/mistralai_azure/docs/models/ocrusageinfo.md new file mode 100644 index 00000000..d9d79125 --- /dev/null +++ b/packages/mistralai_azure/docs/models/ocrusageinfo.md @@ -0,0 +1,9 @@ +# OCRUsageInfo + + +## Fields + +| Field | Type | Required | Description | +| ------------------------- | ------------------------- | ------------------------- | ------------------------- | +| `pages_processed` | *int* | :heavy_check_mark: | Number of pages processed | +| `doc_size_bytes` | *OptionalNullable[int]* | :heavy_minus_sign: | Document size in bytes | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/tool.md b/packages/mistralai_azure/docs/models/tool.md index 822f86f8..fb661f72 100644 --- a/packages/mistralai_azure/docs/models/tool.md +++ b/packages/mistralai_azure/docs/models/tool.md @@ -5,5 +5,5 @@ | Field | Type | Required | Description | | ---------------------------------------------------- | ---------------------------------------------------- | ---------------------------------------------------- | ---------------------------------------------------- | -| `function` | [models.Function](../models/function.md) | :heavy_check_mark: | N/A | -| `type` | [Optional[models.ToolTypes]](../models/tooltypes.md) | :heavy_minus_sign: | N/A | \ No newline at end of file +| `type` | [Optional[models.ToolTypes]](../models/tooltypes.md) | :heavy_minus_sign: | N/A | +| `function` | [models.Function](../models/function.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/toolcall.md b/packages/mistralai_azure/docs/models/toolcall.md index 574be1ea..3819236b 100644 --- a/packages/mistralai_azure/docs/models/toolcall.md +++ b/packages/mistralai_azure/docs/models/toolcall.md @@ -5,7 +5,7 @@ | Field | Type | Required | Description | | ---------------------------------------------------- | ---------------------------------------------------- | ---------------------------------------------------- | ---------------------------------------------------- | -| `function` | [models.FunctionCall](../models/functioncall.md) | :heavy_check_mark: | N/A | | `id` | *Optional[str]* | :heavy_minus_sign: | N/A | | `type` | [Optional[models.ToolTypes]](../models/tooltypes.md) | :heavy_minus_sign: | N/A | +| `function` | [models.FunctionCall](../models/functioncall.md) | :heavy_check_mark: | N/A | | `index` | *Optional[int]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/toolchoice.md b/packages/mistralai_azure/docs/models/toolchoice.md index 792ebcd6..373046bb 100644 --- a/packages/mistralai_azure/docs/models/toolchoice.md +++ b/packages/mistralai_azure/docs/models/toolchoice.md @@ -7,5 +7,5 @@ ToolChoice is either a ToolChoiceEnum or a ToolChoice | Field | Type | Required | Description | | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | -| `function` | [models.FunctionName](../models/functionname.md) | :heavy_check_mark: | this restriction of `Function` is used to select a specific function to call | -| `type` | [Optional[models.ToolTypes]](../models/tooltypes.md) | :heavy_minus_sign: | N/A | \ No newline at end of file +| `type` | [Optional[models.ToolTypes]](../models/tooltypes.md) | :heavy_minus_sign: | N/A | +| `function` | [models.FunctionName](../models/functionname.md) | :heavy_check_mark: | this restriction of `Function` is used to select a specific function to call | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/usageinfo.md b/packages/mistralai_azure/docs/models/usageinfo.md index 9f56a3ae..f5204ac9 100644 --- a/packages/mistralai_azure/docs/models/usageinfo.md +++ b/packages/mistralai_azure/docs/models/usageinfo.md @@ -3,8 +3,10 @@ ## Fields -| Field | Type | Required | Description | Example | -| ------------------- | ------------------- | ------------------- | ------------------- | ------------------- | -| `prompt_tokens` | *int* | :heavy_check_mark: | N/A | 16 | -| `completion_tokens` | *int* | :heavy_check_mark: | N/A | 34 | -| `total_tokens` | *int* | :heavy_check_mark: | N/A | 50 | \ No newline at end of file +| Field | Type | Required | Description | +| ----------------------- | ----------------------- | ----------------------- | ----------------------- | +| `prompt_tokens` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `completion_tokens` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `total_tokens` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `prompt_audio_seconds` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | +| `__pydantic_extra__` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai_azure/poetry.lock b/packages/mistralai_azure/poetry.lock index 8b70ddcc..fb5b615e 100644 --- a/packages/mistralai_azure/poetry.lock +++ b/packages/mistralai_azure/poetry.lock @@ -91,21 +91,6 @@ files = [ graph = ["objgraph (>=1.7.2)"] profile = ["gprof2dot (>=2022.7.29)"] -[[package]] -name = "eval-type-backport" -version = "0.2.0" -description = "Like `typing._eval_type`, but lets older Python versions use newer typing features." -optional = false -python-versions = ">=3.8" -groups = ["main"] -files = [ - {file = "eval_type_backport-0.2.0-py3-none-any.whl", hash = "sha256:ac2f73d30d40c5a30a80b8739a789d6bb5e49fdffa66d7912667e2015d9c9933"}, - {file = "eval_type_backport-0.2.0.tar.gz", hash = "sha256:68796cfbc7371ebf923f03bdf7bef415f3ec098aeced24e054b253a0e78f7b37"}, -] - -[package.extras] -tests = ["pytest"] - [[package]] name = "exceptiongroup" version = "1.2.2" @@ -124,37 +109,37 @@ test = ["pytest (>=6)"] [[package]] name = "h11" -version = "0.14.0" +version = "0.16.0" description = "A pure-Python, bring-your-own-I/O implementation of HTTP/1.1" optional = false -python-versions = ">=3.7" +python-versions = ">=3.8" groups = ["main"] files = [ - {file = "h11-0.14.0-py3-none-any.whl", hash = "sha256:e3fe4ac4b851c468cc8363d500db52c2ead036020723024a109d37346efaa761"}, - {file = "h11-0.14.0.tar.gz", hash = "sha256:8f19fbbe99e72420ff35c00b27a34cb9937e902a8b810e2c88300c6f0a3b699d"}, + {file = "h11-0.16.0-py3-none-any.whl", hash = "sha256:63cf8bbe7522de3bf65932fda1d9c2772064ffb3dae62d55932da54b31cb6c86"}, + {file = "h11-0.16.0.tar.gz", hash = "sha256:4e35b956cf45792e4caa5885e69fba00bdbc6ffafbfa020300e549b208ee5ff1"}, ] [[package]] name = "httpcore" -version = "1.0.5" +version = "1.0.9" description = "A minimal low-level HTTP client." optional = false python-versions = ">=3.8" groups = ["main"] files = [ - {file = "httpcore-1.0.5-py3-none-any.whl", hash = "sha256:421f18bac248b25d310f3cacd198d55b8e6125c107797b609ff9b7a6ba7991b5"}, - {file = "httpcore-1.0.5.tar.gz", hash = "sha256:34a38e2f9291467ee3b44e89dd52615370e152954ba21721378a87b2960f7a61"}, + {file = "httpcore-1.0.9-py3-none-any.whl", hash = "sha256:2d400746a40668fc9dec9810239072b40b4484b640a8c38fd654a024c7a1bf55"}, + {file = "httpcore-1.0.9.tar.gz", hash = "sha256:6e34463af53fd2ab5d807f399a9b45ea31c3dfa2276f15a2c3f00afff6e176e8"}, ] [package.dependencies] certifi = "*" -h11 = ">=0.13,<0.15" +h11 = ">=0.16" [package.extras] asyncio = ["anyio (>=4.0,<5.0)"] http2 = ["h2 (>=3,<5)"] socks = ["socksio (==1.*)"] -trio = ["trio (>=0.22.0,<0.26.0)"] +trio = ["trio (>=0.22.0,<1.0)"] [[package]] name = "httpx" @@ -234,50 +219,44 @@ files = [ [[package]] name = "mypy" -version = "1.14.1" +version = "1.15.0" description = "Optional static typing for Python" optional = false -python-versions = ">=3.8" +python-versions = ">=3.9" groups = ["dev"] files = [ - {file = "mypy-1.14.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:52686e37cf13d559f668aa398dd7ddf1f92c5d613e4f8cb262be2fb4fedb0fcb"}, - {file = "mypy-1.14.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:1fb545ca340537d4b45d3eecdb3def05e913299ca72c290326be19b3804b39c0"}, - {file = "mypy-1.14.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:90716d8b2d1f4cd503309788e51366f07c56635a3309b0f6a32547eaaa36a64d"}, - {file = "mypy-1.14.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:2ae753f5c9fef278bcf12e1a564351764f2a6da579d4a81347e1d5a15819997b"}, - {file = "mypy-1.14.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:e0fe0f5feaafcb04505bcf439e991c6d8f1bf8b15f12b05feeed96e9e7bf1427"}, - {file = "mypy-1.14.1-cp310-cp310-win_amd64.whl", hash = "sha256:7d54bd85b925e501c555a3227f3ec0cfc54ee8b6930bd6141ec872d1c572f81f"}, - {file = "mypy-1.14.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:f995e511de847791c3b11ed90084a7a0aafdc074ab88c5a9711622fe4751138c"}, - {file = "mypy-1.14.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:d64169ec3b8461311f8ce2fd2eb5d33e2d0f2c7b49116259c51d0d96edee48d1"}, - {file = "mypy-1.14.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ba24549de7b89b6381b91fbc068d798192b1b5201987070319889e93038967a8"}, - {file = "mypy-1.14.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:183cf0a45457d28ff9d758730cd0210419ac27d4d3f285beda038c9083363b1f"}, - {file = "mypy-1.14.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:f2a0ecc86378f45347f586e4163d1769dd81c5a223d577fe351f26b179e148b1"}, - {file = "mypy-1.14.1-cp311-cp311-win_amd64.whl", hash = "sha256:ad3301ebebec9e8ee7135d8e3109ca76c23752bac1e717bc84cd3836b4bf3eae"}, - {file = "mypy-1.14.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:30ff5ef8519bbc2e18b3b54521ec319513a26f1bba19a7582e7b1f58a6e69f14"}, - {file = "mypy-1.14.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:cb9f255c18052343c70234907e2e532bc7e55a62565d64536dbc7706a20b78b9"}, - {file = "mypy-1.14.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:8b4e3413e0bddea671012b063e27591b953d653209e7a4fa5e48759cda77ca11"}, - {file = "mypy-1.14.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:553c293b1fbdebb6c3c4030589dab9fafb6dfa768995a453d8a5d3b23784af2e"}, - {file = "mypy-1.14.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:fad79bfe3b65fe6a1efaed97b445c3d37f7be9fdc348bdb2d7cac75579607c89"}, - {file = "mypy-1.14.1-cp312-cp312-win_amd64.whl", hash = "sha256:8fa2220e54d2946e94ab6dbb3ba0a992795bd68b16dc852db33028df2b00191b"}, - {file = "mypy-1.14.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:92c3ed5afb06c3a8e188cb5da4984cab9ec9a77ba956ee419c68a388b4595255"}, - {file = "mypy-1.14.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:dbec574648b3e25f43d23577309b16534431db4ddc09fda50841f1e34e64ed34"}, - {file = "mypy-1.14.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:8c6d94b16d62eb3e947281aa7347d78236688e21081f11de976376cf010eb31a"}, - {file = "mypy-1.14.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:d4b19b03fdf54f3c5b2fa474c56b4c13c9dbfb9a2db4370ede7ec11a2c5927d9"}, - {file = "mypy-1.14.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:0c911fde686394753fff899c409fd4e16e9b294c24bfd5e1ea4675deae1ac6fd"}, - {file = "mypy-1.14.1-cp313-cp313-win_amd64.whl", hash = "sha256:8b21525cb51671219f5307be85f7e646a153e5acc656e5cebf64bfa076c50107"}, - {file = "mypy-1.14.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:7084fb8f1128c76cd9cf68fe5971b37072598e7c31b2f9f95586b65c741a9d31"}, - {file = "mypy-1.14.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:8f845a00b4f420f693f870eaee5f3e2692fa84cc8514496114649cfa8fd5e2c6"}, - {file = "mypy-1.14.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:44bf464499f0e3a2d14d58b54674dee25c031703b2ffc35064bd0df2e0fac319"}, - {file = "mypy-1.14.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c99f27732c0b7dc847adb21c9d47ce57eb48fa33a17bc6d7d5c5e9f9e7ae5bac"}, - {file = "mypy-1.14.1-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:bce23c7377b43602baa0bd22ea3265c49b9ff0b76eb315d6c34721af4cdf1d9b"}, - {file = "mypy-1.14.1-cp38-cp38-win_amd64.whl", hash = "sha256:8edc07eeade7ebc771ff9cf6b211b9a7d93687ff892150cb5692e4f4272b0837"}, - {file = "mypy-1.14.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:3888a1816d69f7ab92092f785a462944b3ca16d7c470d564165fe703b0970c35"}, - {file = "mypy-1.14.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:46c756a444117c43ee984bd055db99e498bc613a70bbbc120272bd13ca579fbc"}, - {file = "mypy-1.14.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:27fc248022907e72abfd8e22ab1f10e903915ff69961174784a3900a8cba9ad9"}, - {file = "mypy-1.14.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:499d6a72fb7e5de92218db961f1a66d5f11783f9ae549d214617edab5d4dbdbb"}, - {file = "mypy-1.14.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:57961db9795eb566dc1d1b4e9139ebc4c6b0cb6e7254ecde69d1552bf7613f60"}, - {file = "mypy-1.14.1-cp39-cp39-win_amd64.whl", hash = "sha256:07ba89fdcc9451f2ebb02853deb6aaaa3d2239a236669a63ab3801bbf923ef5c"}, - {file = "mypy-1.14.1-py3-none-any.whl", hash = "sha256:b66a60cc4073aeb8ae00057f9c1f64d49e90f918fbcef9a977eb121da8b8f1d1"}, - {file = "mypy-1.14.1.tar.gz", hash = "sha256:7ec88144fe9b510e8475ec2f5f251992690fcf89ccb4500b214b4226abcd32d6"}, + {file = "mypy-1.15.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:979e4e1a006511dacf628e36fadfecbcc0160a8af6ca7dad2f5025529e082c13"}, + {file = "mypy-1.15.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c4bb0e1bd29f7d34efcccd71cf733580191e9a264a2202b0239da95984c5b559"}, + {file = "mypy-1.15.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:be68172e9fd9ad8fb876c6389f16d1c1b5f100ffa779f77b1fb2176fcc9ab95b"}, + {file = "mypy-1.15.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c7be1e46525adfa0d97681432ee9fcd61a3964c2446795714699a998d193f1a3"}, + {file = "mypy-1.15.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:2e2c2e6d3593f6451b18588848e66260ff62ccca522dd231cd4dd59b0160668b"}, + {file = "mypy-1.15.0-cp310-cp310-win_amd64.whl", hash = "sha256:6983aae8b2f653e098edb77f893f7b6aca69f6cffb19b2cc7443f23cce5f4828"}, + {file = "mypy-1.15.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:2922d42e16d6de288022e5ca321cd0618b238cfc5570e0263e5ba0a77dbef56f"}, + {file = "mypy-1.15.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:2ee2d57e01a7c35de00f4634ba1bbf015185b219e4dc5909e281016df43f5ee5"}, + {file = "mypy-1.15.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:973500e0774b85d9689715feeffcc980193086551110fd678ebe1f4342fb7c5e"}, + {file = "mypy-1.15.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:5a95fb17c13e29d2d5195869262f8125dfdb5c134dc8d9a9d0aecf7525b10c2c"}, + {file = "mypy-1.15.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:1905f494bfd7d85a23a88c5d97840888a7bd516545fc5aaedff0267e0bb54e2f"}, + {file = "mypy-1.15.0-cp311-cp311-win_amd64.whl", hash = "sha256:c9817fa23833ff189db061e6d2eff49b2f3b6ed9856b4a0a73046e41932d744f"}, + {file = "mypy-1.15.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:aea39e0583d05124836ea645f412e88a5c7d0fd77a6d694b60d9b6b2d9f184fd"}, + {file = "mypy-1.15.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:2f2147ab812b75e5b5499b01ade1f4a81489a147c01585cda36019102538615f"}, + {file = "mypy-1.15.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ce436f4c6d218a070048ed6a44c0bbb10cd2cc5e272b29e7845f6a2f57ee4464"}, + {file = "mypy-1.15.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:8023ff13985661b50a5928fc7a5ca15f3d1affb41e5f0a9952cb68ef090b31ee"}, + {file = "mypy-1.15.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:1124a18bc11a6a62887e3e137f37f53fbae476dc36c185d549d4f837a2a6a14e"}, + {file = "mypy-1.15.0-cp312-cp312-win_amd64.whl", hash = "sha256:171a9ca9a40cd1843abeca0e405bc1940cd9b305eaeea2dda769ba096932bb22"}, + {file = "mypy-1.15.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:93faf3fdb04768d44bf28693293f3904bbb555d076b781ad2530214ee53e3445"}, + {file = "mypy-1.15.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:811aeccadfb730024c5d3e326b2fbe9249bb7413553f15499a4050f7c30e801d"}, + {file = "mypy-1.15.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:98b7b9b9aedb65fe628c62a6dc57f6d5088ef2dfca37903a7d9ee374d03acca5"}, + {file = "mypy-1.15.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c43a7682e24b4f576d93072216bf56eeff70d9140241f9edec0c104d0c515036"}, + {file = "mypy-1.15.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:baefc32840a9f00babd83251560e0ae1573e2f9d1b067719479bfb0e987c6357"}, + {file = "mypy-1.15.0-cp313-cp313-win_amd64.whl", hash = "sha256:b9378e2c00146c44793c98b8d5a61039a048e31f429fb0eb546d93f4b000bedf"}, + {file = "mypy-1.15.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:e601a7fa172c2131bff456bb3ee08a88360760d0d2f8cbd7a75a65497e2df078"}, + {file = "mypy-1.15.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:712e962a6357634fef20412699a3655c610110e01cdaa6180acec7fc9f8513ba"}, + {file = "mypy-1.15.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:f95579473af29ab73a10bada2f9722856792a36ec5af5399b653aa28360290a5"}, + {file = "mypy-1.15.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:8f8722560a14cde92fdb1e31597760dc35f9f5524cce17836c0d22841830fd5b"}, + {file = "mypy-1.15.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:1fbb8da62dc352133d7d7ca90ed2fb0e9d42bb1a32724c287d3c76c58cbaa9c2"}, + {file = "mypy-1.15.0-cp39-cp39-win_amd64.whl", hash = "sha256:d10d994b41fb3497719bbf866f227b3489048ea4bbbb5015357db306249f7980"}, + {file = "mypy-1.15.0-py3-none-any.whl", hash = "sha256:5469affef548bd1895d86d3bf10ce2b44e33d86923c29e4d675b3e323437ea3e"}, + {file = "mypy-1.15.0.tar.gz", hash = "sha256:404534629d51d3efea5c800ee7c42b72a6554d6c400e6a79eafe15d11341fd43"}, ] [package.dependencies] @@ -351,20 +330,21 @@ testing = ["pytest", "pytest-benchmark"] [[package]] name = "pydantic" -version = "2.10.6" +version = "2.11.7" description = "Data validation using Python type hints" optional = false -python-versions = ">=3.8" +python-versions = ">=3.9" groups = ["main"] files = [ - {file = "pydantic-2.10.6-py3-none-any.whl", hash = "sha256:427d664bf0b8a2b34ff5dd0f5a18df00591adcee7198fbd71981054cef37b584"}, - {file = "pydantic-2.10.6.tar.gz", hash = "sha256:ca5daa827cce33de7a42be142548b0096bf05a7e7b365aebfa5f8eeec7128236"}, + {file = "pydantic-2.11.7-py3-none-any.whl", hash = "sha256:dde5df002701f6de26248661f6835bbe296a47bf73990135c7d07ce741b9623b"}, + {file = "pydantic-2.11.7.tar.gz", hash = "sha256:d989c3c6cb79469287b1569f7447a17848c998458d49ebe294e975b9baf0f0db"}, ] [package.dependencies] annotated-types = ">=0.6.0" -pydantic-core = "2.27.2" +pydantic-core = "2.33.2" typing-extensions = ">=4.12.2" +typing-inspection = ">=0.4.0" [package.extras] email = ["email-validator (>=2.0.0)"] @@ -372,112 +352,111 @@ timezone = ["tzdata ; python_version >= \"3.9\" and platform_system == \"Windows [[package]] name = "pydantic-core" -version = "2.27.2" +version = "2.33.2" description = "Core functionality for Pydantic validation and serialization" optional = false -python-versions = ">=3.8" +python-versions = ">=3.9" groups = ["main"] files = [ - {file = "pydantic_core-2.27.2-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:2d367ca20b2f14095a8f4fa1210f5a7b78b8a20009ecced6b12818f455b1e9fa"}, - {file = "pydantic_core-2.27.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:491a2b73db93fab69731eaee494f320faa4e093dbed776be1a829c2eb222c34c"}, - {file = "pydantic_core-2.27.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7969e133a6f183be60e9f6f56bfae753585680f3b7307a8e555a948d443cc05a"}, - {file = "pydantic_core-2.27.2-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:3de9961f2a346257caf0aa508a4da705467f53778e9ef6fe744c038119737ef5"}, - {file = "pydantic_core-2.27.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e2bb4d3e5873c37bb3dd58714d4cd0b0e6238cebc4177ac8fe878f8b3aa8e74c"}, - {file = "pydantic_core-2.27.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:280d219beebb0752699480fe8f1dc61ab6615c2046d76b7ab7ee38858de0a4e7"}, - {file = "pydantic_core-2.27.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:47956ae78b6422cbd46f772f1746799cbb862de838fd8d1fbd34a82e05b0983a"}, - {file = "pydantic_core-2.27.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:14d4a5c49d2f009d62a2a7140d3064f686d17a5d1a268bc641954ba181880236"}, - {file = "pydantic_core-2.27.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:337b443af21d488716f8d0b6164de833e788aa6bd7e3a39c005febc1284f4962"}, - {file = "pydantic_core-2.27.2-cp310-cp310-musllinux_1_1_armv7l.whl", hash = "sha256:03d0f86ea3184a12f41a2d23f7ccb79cdb5a18e06993f8a45baa8dfec746f0e9"}, - {file = "pydantic_core-2.27.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:7041c36f5680c6e0f08d922aed302e98b3745d97fe1589db0a3eebf6624523af"}, - {file = "pydantic_core-2.27.2-cp310-cp310-win32.whl", hash = "sha256:50a68f3e3819077be2c98110c1f9dcb3817e93f267ba80a2c05bb4f8799e2ff4"}, - {file = "pydantic_core-2.27.2-cp310-cp310-win_amd64.whl", hash = "sha256:e0fd26b16394ead34a424eecf8a31a1f5137094cabe84a1bcb10fa6ba39d3d31"}, - {file = "pydantic_core-2.27.2-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:8e10c99ef58cfdf2a66fc15d66b16c4a04f62bca39db589ae8cba08bc55331bc"}, - {file = "pydantic_core-2.27.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:26f32e0adf166a84d0cb63be85c562ca8a6fa8de28e5f0d92250c6b7e9e2aff7"}, - {file = "pydantic_core-2.27.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8c19d1ea0673cd13cc2f872f6c9ab42acc4e4f492a7ca9d3795ce2b112dd7e15"}, - {file = "pydantic_core-2.27.2-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:5e68c4446fe0810e959cdff46ab0a41ce2f2c86d227d96dc3847af0ba7def306"}, - {file = "pydantic_core-2.27.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d9640b0059ff4f14d1f37321b94061c6db164fbe49b334b31643e0528d100d99"}, - {file = "pydantic_core-2.27.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:40d02e7d45c9f8af700f3452f329ead92da4c5f4317ca9b896de7ce7199ea459"}, - {file = "pydantic_core-2.27.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1c1fd185014191700554795c99b347d64f2bb637966c4cfc16998a0ca700d048"}, - {file = "pydantic_core-2.27.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d81d2068e1c1228a565af076598f9e7451712700b673de8f502f0334f281387d"}, - {file = "pydantic_core-2.27.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:1a4207639fb02ec2dbb76227d7c751a20b1a6b4bc52850568e52260cae64ca3b"}, - {file = "pydantic_core-2.27.2-cp311-cp311-musllinux_1_1_armv7l.whl", hash = "sha256:3de3ce3c9ddc8bbd88f6e0e304dea0e66d843ec9de1b0042b0911c1663ffd474"}, - {file = "pydantic_core-2.27.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:30c5f68ded0c36466acede341551106821043e9afaad516adfb6e8fa80a4e6a6"}, - {file = "pydantic_core-2.27.2-cp311-cp311-win32.whl", hash = "sha256:c70c26d2c99f78b125a3459f8afe1aed4d9687c24fd677c6a4436bc042e50d6c"}, - {file = "pydantic_core-2.27.2-cp311-cp311-win_amd64.whl", hash = "sha256:08e125dbdc505fa69ca7d9c499639ab6407cfa909214d500897d02afb816e7cc"}, - {file = "pydantic_core-2.27.2-cp311-cp311-win_arm64.whl", hash = "sha256:26f0d68d4b235a2bae0c3fc585c585b4ecc51382db0e3ba402a22cbc440915e4"}, - {file = "pydantic_core-2.27.2-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:9e0c8cfefa0ef83b4da9588448b6d8d2a2bf1a53c3f1ae5fca39eb3061e2f0b0"}, - {file = "pydantic_core-2.27.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:83097677b8e3bd7eaa6775720ec8e0405f1575015a463285a92bfdfe254529ef"}, - {file = "pydantic_core-2.27.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:172fce187655fece0c90d90a678424b013f8fbb0ca8b036ac266749c09438cb7"}, - {file = "pydantic_core-2.27.2-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:519f29f5213271eeeeb3093f662ba2fd512b91c5f188f3bb7b27bc5973816934"}, - {file = "pydantic_core-2.27.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:05e3a55d124407fffba0dd6b0c0cd056d10e983ceb4e5dbd10dda135c31071d6"}, - {file = "pydantic_core-2.27.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9c3ed807c7b91de05e63930188f19e921d1fe90de6b4f5cd43ee7fcc3525cb8c"}, - {file = "pydantic_core-2.27.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6fb4aadc0b9a0c063206846d603b92030eb6f03069151a625667f982887153e2"}, - {file = "pydantic_core-2.27.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:28ccb213807e037460326424ceb8b5245acb88f32f3d2777427476e1b32c48c4"}, - {file = "pydantic_core-2.27.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:de3cd1899e2c279b140adde9357c4495ed9d47131b4a4eaff9052f23398076b3"}, - {file = "pydantic_core-2.27.2-cp312-cp312-musllinux_1_1_armv7l.whl", hash = "sha256:220f892729375e2d736b97d0e51466252ad84c51857d4d15f5e9692f9ef12be4"}, - {file = "pydantic_core-2.27.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:a0fcd29cd6b4e74fe8ddd2c90330fd8edf2e30cb52acda47f06dd615ae72da57"}, - {file = "pydantic_core-2.27.2-cp312-cp312-win32.whl", hash = "sha256:1e2cb691ed9834cd6a8be61228471d0a503731abfb42f82458ff27be7b2186fc"}, - {file = "pydantic_core-2.27.2-cp312-cp312-win_amd64.whl", hash = "sha256:cc3f1a99a4f4f9dd1de4fe0312c114e740b5ddead65bb4102884b384c15d8bc9"}, - {file = "pydantic_core-2.27.2-cp312-cp312-win_arm64.whl", hash = "sha256:3911ac9284cd8a1792d3cb26a2da18f3ca26c6908cc434a18f730dc0db7bfa3b"}, - {file = "pydantic_core-2.27.2-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:7d14bd329640e63852364c306f4d23eb744e0f8193148d4044dd3dacdaacbd8b"}, - {file = "pydantic_core-2.27.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:82f91663004eb8ed30ff478d77c4d1179b3563df6cdb15c0817cd1cdaf34d154"}, - {file = "pydantic_core-2.27.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:71b24c7d61131bb83df10cc7e687433609963a944ccf45190cfc21e0887b08c9"}, - {file = "pydantic_core-2.27.2-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:fa8e459d4954f608fa26116118bb67f56b93b209c39b008277ace29937453dc9"}, - {file = "pydantic_core-2.27.2-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ce8918cbebc8da707ba805b7fd0b382816858728ae7fe19a942080c24e5b7cd1"}, - {file = "pydantic_core-2.27.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:eda3f5c2a021bbc5d976107bb302e0131351c2ba54343f8a496dc8783d3d3a6a"}, - {file = "pydantic_core-2.27.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bd8086fa684c4775c27f03f062cbb9eaa6e17f064307e86b21b9e0abc9c0f02e"}, - {file = "pydantic_core-2.27.2-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:8d9b3388db186ba0c099a6d20f0604a44eabdeef1777ddd94786cdae158729e4"}, - {file = "pydantic_core-2.27.2-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:7a66efda2387de898c8f38c0cf7f14fca0b51a8ef0b24bfea5849f1b3c95af27"}, - {file = "pydantic_core-2.27.2-cp313-cp313-musllinux_1_1_armv7l.whl", hash = "sha256:18a101c168e4e092ab40dbc2503bdc0f62010e95d292b27827871dc85450d7ee"}, - {file = "pydantic_core-2.27.2-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:ba5dd002f88b78a4215ed2f8ddbdf85e8513382820ba15ad5ad8955ce0ca19a1"}, - {file = "pydantic_core-2.27.2-cp313-cp313-win32.whl", hash = "sha256:1ebaf1d0481914d004a573394f4be3a7616334be70261007e47c2a6fe7e50130"}, - {file = "pydantic_core-2.27.2-cp313-cp313-win_amd64.whl", hash = "sha256:953101387ecf2f5652883208769a79e48db18c6df442568a0b5ccd8c2723abee"}, - {file = "pydantic_core-2.27.2-cp313-cp313-win_arm64.whl", hash = "sha256:ac4dbfd1691affb8f48c2c13241a2e3b60ff23247cbcf981759c768b6633cf8b"}, - {file = "pydantic_core-2.27.2-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:d3e8d504bdd3f10835468f29008d72fc8359d95c9c415ce6e767203db6127506"}, - {file = "pydantic_core-2.27.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:521eb9b7f036c9b6187f0b47318ab0d7ca14bd87f776240b90b21c1f4f149320"}, - {file = "pydantic_core-2.27.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:85210c4d99a0114f5a9481b44560d7d1e35e32cc5634c656bc48e590b669b145"}, - {file = "pydantic_core-2.27.2-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:d716e2e30c6f140d7560ef1538953a5cd1a87264c737643d481f2779fc247fe1"}, - {file = "pydantic_core-2.27.2-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f66d89ba397d92f840f8654756196d93804278457b5fbede59598a1f9f90b228"}, - {file = "pydantic_core-2.27.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:669e193c1c576a58f132e3158f9dfa9662969edb1a250c54d8fa52590045f046"}, - {file = "pydantic_core-2.27.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9fdbe7629b996647b99c01b37f11170a57ae675375b14b8c13b8518b8320ced5"}, - {file = "pydantic_core-2.27.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d262606bf386a5ba0b0af3b97f37c83d7011439e3dc1a9298f21efb292e42f1a"}, - {file = "pydantic_core-2.27.2-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:cabb9bcb7e0d97f74df8646f34fc76fbf793b7f6dc2438517d7a9e50eee4f14d"}, - {file = "pydantic_core-2.27.2-cp38-cp38-musllinux_1_1_armv7l.whl", hash = "sha256:d2d63f1215638d28221f664596b1ccb3944f6e25dd18cd3b86b0a4c408d5ebb9"}, - {file = "pydantic_core-2.27.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:bca101c00bff0adb45a833f8451b9105d9df18accb8743b08107d7ada14bd7da"}, - {file = "pydantic_core-2.27.2-cp38-cp38-win32.whl", hash = "sha256:f6f8e111843bbb0dee4cb6594cdc73e79b3329b526037ec242a3e49012495b3b"}, - {file = "pydantic_core-2.27.2-cp38-cp38-win_amd64.whl", hash = "sha256:fd1aea04935a508f62e0d0ef1f5ae968774a32afc306fb8545e06f5ff5cdf3ad"}, - {file = "pydantic_core-2.27.2-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:c10eb4f1659290b523af58fa7cffb452a61ad6ae5613404519aee4bfbf1df993"}, - {file = "pydantic_core-2.27.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:ef592d4bad47296fb11f96cd7dc898b92e795032b4894dfb4076cfccd43a9308"}, - {file = "pydantic_core-2.27.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c61709a844acc6bf0b7dce7daae75195a10aac96a596ea1b776996414791ede4"}, - {file = "pydantic_core-2.27.2-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:42c5f762659e47fdb7b16956c71598292f60a03aa92f8b6351504359dbdba6cf"}, - {file = "pydantic_core-2.27.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4c9775e339e42e79ec99c441d9730fccf07414af63eac2f0e48e08fd38a64d76"}, - {file = "pydantic_core-2.27.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:57762139821c31847cfb2df63c12f725788bd9f04bc2fb392790959b8f70f118"}, - {file = "pydantic_core-2.27.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0d1e85068e818c73e048fe28cfc769040bb1f475524f4745a5dc621f75ac7630"}, - {file = "pydantic_core-2.27.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:097830ed52fd9e427942ff3b9bc17fab52913b2f50f2880dc4a5611446606a54"}, - {file = "pydantic_core-2.27.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:044a50963a614ecfae59bb1eaf7ea7efc4bc62f49ed594e18fa1e5d953c40e9f"}, - {file = "pydantic_core-2.27.2-cp39-cp39-musllinux_1_1_armv7l.whl", hash = "sha256:4e0b4220ba5b40d727c7f879eac379b822eee5d8fff418e9d3381ee45b3b0362"}, - {file = "pydantic_core-2.27.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:5e4f4bb20d75e9325cc9696c6802657b58bc1dbbe3022f32cc2b2b632c3fbb96"}, - {file = "pydantic_core-2.27.2-cp39-cp39-win32.whl", hash = "sha256:cca63613e90d001b9f2f9a9ceb276c308bfa2a43fafb75c8031c4f66039e8c6e"}, - {file = "pydantic_core-2.27.2-cp39-cp39-win_amd64.whl", hash = "sha256:77d1bca19b0f7021b3a982e6f903dcd5b2b06076def36a652e3907f596e29f67"}, - {file = "pydantic_core-2.27.2-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:2bf14caea37e91198329b828eae1618c068dfb8ef17bb33287a7ad4b61ac314e"}, - {file = "pydantic_core-2.27.2-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:b0cb791f5b45307caae8810c2023a184c74605ec3bcbb67d13846c28ff731ff8"}, - {file = "pydantic_core-2.27.2-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:688d3fd9fcb71f41c4c015c023d12a79d1c4c0732ec9eb35d96e3388a120dcf3"}, - {file = "pydantic_core-2.27.2-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3d591580c34f4d731592f0e9fe40f9cc1b430d297eecc70b962e93c5c668f15f"}, - {file = "pydantic_core-2.27.2-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:82f986faf4e644ffc189a7f1aafc86e46ef70372bb153e7001e8afccc6e54133"}, - {file = "pydantic_core-2.27.2-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:bec317a27290e2537f922639cafd54990551725fc844249e64c523301d0822fc"}, - {file = "pydantic_core-2.27.2-pp310-pypy310_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:0296abcb83a797db256b773f45773da397da75a08f5fcaef41f2044adec05f50"}, - {file = "pydantic_core-2.27.2-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:0d75070718e369e452075a6017fbf187f788e17ed67a3abd47fa934d001863d9"}, - {file = "pydantic_core-2.27.2-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:7e17b560be3c98a8e3aa66ce828bdebb9e9ac6ad5466fba92eb74c4c95cb1151"}, - {file = "pydantic_core-2.27.2-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:c33939a82924da9ed65dab5a65d427205a73181d8098e79b6b426bdf8ad4e656"}, - {file = "pydantic_core-2.27.2-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:00bad2484fa6bda1e216e7345a798bd37c68fb2d97558edd584942aa41b7d278"}, - {file = "pydantic_core-2.27.2-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c817e2b40aba42bac6f457498dacabc568c3b7a986fc9ba7c8d9d260b71485fb"}, - {file = "pydantic_core-2.27.2-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:251136cdad0cb722e93732cb45ca5299fb56e1344a833640bf93b2803f8d1bfd"}, - {file = "pydantic_core-2.27.2-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d2088237af596f0a524d3afc39ab3b036e8adb054ee57cbb1dcf8e09da5b29cc"}, - {file = "pydantic_core-2.27.2-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:d4041c0b966a84b4ae7a09832eb691a35aec90910cd2dbe7a208de59be77965b"}, - {file = "pydantic_core-2.27.2-pp39-pypy39_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:8083d4e875ebe0b864ffef72a4304827015cff328a1be6e22cc850753bfb122b"}, - {file = "pydantic_core-2.27.2-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:f141ee28a0ad2123b6611b6ceff018039df17f32ada8b534e6aa039545a3efb2"}, - {file = "pydantic_core-2.27.2-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:7d0c8399fcc1848491f00e0314bd59fb34a9c008761bcb422a057670c3f65e35"}, - {file = "pydantic_core-2.27.2.tar.gz", hash = "sha256:eb026e5a4c1fee05726072337ff51d1efb6f59090b7da90d30ea58625b1ffb39"}, + {file = "pydantic_core-2.33.2-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:2b3d326aaef0c0399d9afffeb6367d5e26ddc24d351dbc9c636840ac355dc5d8"}, + {file = "pydantic_core-2.33.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:0e5b2671f05ba48b94cb90ce55d8bdcaaedb8ba00cc5359f6810fc918713983d"}, + {file = "pydantic_core-2.33.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0069c9acc3f3981b9ff4cdfaf088e98d83440a4c7ea1bc07460af3d4dc22e72d"}, + {file = "pydantic_core-2.33.2-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:d53b22f2032c42eaaf025f7c40c2e3b94568ae077a606f006d206a463bc69572"}, + {file = "pydantic_core-2.33.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:0405262705a123b7ce9f0b92f123334d67b70fd1f20a9372b907ce1080c7ba02"}, + {file = "pydantic_core-2.33.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4b25d91e288e2c4e0662b8038a28c6a07eaac3e196cfc4ff69de4ea3db992a1b"}, + {file = "pydantic_core-2.33.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6bdfe4b3789761f3bcb4b1ddf33355a71079858958e3a552f16d5af19768fef2"}, + {file = "pydantic_core-2.33.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:efec8db3266b76ef9607c2c4c419bdb06bf335ae433b80816089ea7585816f6a"}, + {file = "pydantic_core-2.33.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:031c57d67ca86902726e0fae2214ce6770bbe2f710dc33063187a68744a5ecac"}, + {file = "pydantic_core-2.33.2-cp310-cp310-musllinux_1_1_armv7l.whl", hash = "sha256:f8de619080e944347f5f20de29a975c2d815d9ddd8be9b9b7268e2e3ef68605a"}, + {file = "pydantic_core-2.33.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:73662edf539e72a9440129f231ed3757faab89630d291b784ca99237fb94db2b"}, + {file = "pydantic_core-2.33.2-cp310-cp310-win32.whl", hash = "sha256:0a39979dcbb70998b0e505fb1556a1d550a0781463ce84ebf915ba293ccb7e22"}, + {file = "pydantic_core-2.33.2-cp310-cp310-win_amd64.whl", hash = "sha256:b0379a2b24882fef529ec3b4987cb5d003b9cda32256024e6fe1586ac45fc640"}, + {file = "pydantic_core-2.33.2-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:4c5b0a576fb381edd6d27f0a85915c6daf2f8138dc5c267a57c08a62900758c7"}, + {file = "pydantic_core-2.33.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:e799c050df38a639db758c617ec771fd8fb7a5f8eaaa4b27b101f266b216a246"}, + {file = "pydantic_core-2.33.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dc46a01bf8d62f227d5ecee74178ffc448ff4e5197c756331f71efcc66dc980f"}, + {file = "pydantic_core-2.33.2-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:a144d4f717285c6d9234a66778059f33a89096dfb9b39117663fd8413d582dcc"}, + {file = "pydantic_core-2.33.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:73cf6373c21bc80b2e0dc88444f41ae60b2f070ed02095754eb5a01df12256de"}, + {file = "pydantic_core-2.33.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3dc625f4aa79713512d1976fe9f0bc99f706a9dee21dfd1810b4bbbf228d0e8a"}, + {file = "pydantic_core-2.33.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:881b21b5549499972441da4758d662aeea93f1923f953e9cbaff14b8b9565aef"}, + {file = "pydantic_core-2.33.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:bdc25f3681f7b78572699569514036afe3c243bc3059d3942624e936ec93450e"}, + {file = "pydantic_core-2.33.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:fe5b32187cbc0c862ee201ad66c30cf218e5ed468ec8dc1cf49dec66e160cc4d"}, + {file = "pydantic_core-2.33.2-cp311-cp311-musllinux_1_1_armv7l.whl", hash = "sha256:bc7aee6f634a6f4a95676fcb5d6559a2c2a390330098dba5e5a5f28a2e4ada30"}, + {file = "pydantic_core-2.33.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:235f45e5dbcccf6bd99f9f472858849f73d11120d76ea8707115415f8e5ebebf"}, + {file = "pydantic_core-2.33.2-cp311-cp311-win32.whl", hash = "sha256:6368900c2d3ef09b69cb0b913f9f8263b03786e5b2a387706c5afb66800efd51"}, + {file = "pydantic_core-2.33.2-cp311-cp311-win_amd64.whl", hash = "sha256:1e063337ef9e9820c77acc768546325ebe04ee38b08703244c1309cccc4f1bab"}, + {file = "pydantic_core-2.33.2-cp311-cp311-win_arm64.whl", hash = "sha256:6b99022f1d19bc32a4c2a0d544fc9a76e3be90f0b3f4af413f87d38749300e65"}, + {file = "pydantic_core-2.33.2-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:a7ec89dc587667f22b6a0b6579c249fca9026ce7c333fc142ba42411fa243cdc"}, + {file = "pydantic_core-2.33.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:3c6db6e52c6d70aa0d00d45cdb9b40f0433b96380071ea80b09277dba021ddf7"}, + {file = "pydantic_core-2.33.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4e61206137cbc65e6d5256e1166f88331d3b6238e082d9f74613b9b765fb9025"}, + {file = "pydantic_core-2.33.2-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:eb8c529b2819c37140eb51b914153063d27ed88e3bdc31b71198a198e921e011"}, + {file = "pydantic_core-2.33.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c52b02ad8b4e2cf14ca7b3d918f3eb0ee91e63b3167c32591e57c4317e134f8f"}, + {file = "pydantic_core-2.33.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:96081f1605125ba0855dfda83f6f3df5ec90c61195421ba72223de35ccfb2f88"}, + {file = "pydantic_core-2.33.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8f57a69461af2a5fa6e6bbd7a5f60d3b7e6cebb687f55106933188e79ad155c1"}, + {file = "pydantic_core-2.33.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:572c7e6c8bb4774d2ac88929e3d1f12bc45714ae5ee6d9a788a9fb35e60bb04b"}, + {file = "pydantic_core-2.33.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:db4b41f9bd95fbe5acd76d89920336ba96f03e149097365afe1cb092fceb89a1"}, + {file = "pydantic_core-2.33.2-cp312-cp312-musllinux_1_1_armv7l.whl", hash = "sha256:fa854f5cf7e33842a892e5c73f45327760bc7bc516339fda888c75ae60edaeb6"}, + {file = "pydantic_core-2.33.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:5f483cfb75ff703095c59e365360cb73e00185e01aaea067cd19acffd2ab20ea"}, + {file = "pydantic_core-2.33.2-cp312-cp312-win32.whl", hash = "sha256:9cb1da0f5a471435a7bc7e439b8a728e8b61e59784b2af70d7c169f8dd8ae290"}, + {file = "pydantic_core-2.33.2-cp312-cp312-win_amd64.whl", hash = "sha256:f941635f2a3d96b2973e867144fde513665c87f13fe0e193c158ac51bfaaa7b2"}, + {file = "pydantic_core-2.33.2-cp312-cp312-win_arm64.whl", hash = "sha256:cca3868ddfaccfbc4bfb1d608e2ccaaebe0ae628e1416aeb9c4d88c001bb45ab"}, + {file = "pydantic_core-2.33.2-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:1082dd3e2d7109ad8b7da48e1d4710c8d06c253cbc4a27c1cff4fbcaa97a9e3f"}, + {file = "pydantic_core-2.33.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:f517ca031dfc037a9c07e748cefd8d96235088b83b4f4ba8939105d20fa1dcd6"}, + {file = "pydantic_core-2.33.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0a9f2c9dd19656823cb8250b0724ee9c60a82f3cdf68a080979d13092a3b0fef"}, + {file = "pydantic_core-2.33.2-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:2b0a451c263b01acebe51895bfb0e1cc842a5c666efe06cdf13846c7418caa9a"}, + {file = "pydantic_core-2.33.2-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1ea40a64d23faa25e62a70ad163571c0b342b8bf66d5fa612ac0dec4f069d916"}, + {file = "pydantic_core-2.33.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0fb2d542b4d66f9470e8065c5469ec676978d625a8b7a363f07d9a501a9cb36a"}, + {file = "pydantic_core-2.33.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9fdac5d6ffa1b5a83bca06ffe7583f5576555e6c8b3a91fbd25ea7780f825f7d"}, + {file = "pydantic_core-2.33.2-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:04a1a413977ab517154eebb2d326da71638271477d6ad87a769102f7c2488c56"}, + {file = "pydantic_core-2.33.2-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:c8e7af2f4e0194c22b5b37205bfb293d166a7344a5b0d0eaccebc376546d77d5"}, + {file = "pydantic_core-2.33.2-cp313-cp313-musllinux_1_1_armv7l.whl", hash = "sha256:5c92edd15cd58b3c2d34873597a1e20f13094f59cf88068adb18947df5455b4e"}, + {file = "pydantic_core-2.33.2-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:65132b7b4a1c0beded5e057324b7e16e10910c106d43675d9bd87d4f38dde162"}, + {file = "pydantic_core-2.33.2-cp313-cp313-win32.whl", hash = "sha256:52fb90784e0a242bb96ec53f42196a17278855b0f31ac7c3cc6f5c1ec4811849"}, + {file = "pydantic_core-2.33.2-cp313-cp313-win_amd64.whl", hash = "sha256:c083a3bdd5a93dfe480f1125926afcdbf2917ae714bdb80b36d34318b2bec5d9"}, + {file = "pydantic_core-2.33.2-cp313-cp313-win_arm64.whl", hash = "sha256:e80b087132752f6b3d714f041ccf74403799d3b23a72722ea2e6ba2e892555b9"}, + {file = "pydantic_core-2.33.2-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:61c18fba8e5e9db3ab908620af374db0ac1baa69f0f32df4f61ae23f15e586ac"}, + {file = "pydantic_core-2.33.2-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:95237e53bb015f67b63c91af7518a62a8660376a6a0db19b89acc77a4d6199f5"}, + {file = "pydantic_core-2.33.2-cp313-cp313t-win_amd64.whl", hash = "sha256:c2fc0a768ef76c15ab9238afa6da7f69895bb5d1ee83aeea2e3509af4472d0b9"}, + {file = "pydantic_core-2.33.2-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:a2b911a5b90e0374d03813674bf0a5fbbb7741570dcd4b4e85a2e48d17def29d"}, + {file = "pydantic_core-2.33.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:6fa6dfc3e4d1f734a34710f391ae822e0a8eb8559a85c6979e14e65ee6ba2954"}, + {file = "pydantic_core-2.33.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c54c939ee22dc8e2d545da79fc5381f1c020d6d3141d3bd747eab59164dc89fb"}, + {file = "pydantic_core-2.33.2-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:53a57d2ed685940a504248187d5685e49eb5eef0f696853647bf37c418c538f7"}, + {file = "pydantic_core-2.33.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:09fb9dd6571aacd023fe6aaca316bd01cf60ab27240d7eb39ebd66a3a15293b4"}, + {file = "pydantic_core-2.33.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0e6116757f7959a712db11f3e9c0a99ade00a5bbedae83cb801985aa154f071b"}, + {file = "pydantic_core-2.33.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8d55ab81c57b8ff8548c3e4947f119551253f4e3787a7bbc0b6b3ca47498a9d3"}, + {file = "pydantic_core-2.33.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:c20c462aa4434b33a2661701b861604913f912254e441ab8d78d30485736115a"}, + {file = "pydantic_core-2.33.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:44857c3227d3fb5e753d5fe4a3420d6376fa594b07b621e220cd93703fe21782"}, + {file = "pydantic_core-2.33.2-cp39-cp39-musllinux_1_1_armv7l.whl", hash = "sha256:eb9b459ca4df0e5c87deb59d37377461a538852765293f9e6ee834f0435a93b9"}, + {file = "pydantic_core-2.33.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:9fcd347d2cc5c23b06de6d3b7b8275be558a0c90549495c699e379a80bf8379e"}, + {file = "pydantic_core-2.33.2-cp39-cp39-win32.whl", hash = "sha256:83aa99b1285bc8f038941ddf598501a86f1536789740991d7d8756e34f1e74d9"}, + {file = "pydantic_core-2.33.2-cp39-cp39-win_amd64.whl", hash = "sha256:f481959862f57f29601ccced557cc2e817bce7533ab8e01a797a48b49c9692b3"}, + {file = "pydantic_core-2.33.2-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:5c4aa4e82353f65e548c476b37e64189783aa5384903bfea4f41580f255fddfa"}, + {file = "pydantic_core-2.33.2-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:d946c8bf0d5c24bf4fe333af284c59a19358aa3ec18cb3dc4370080da1e8ad29"}, + {file = "pydantic_core-2.33.2-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:87b31b6846e361ef83fedb187bb5b4372d0da3f7e28d85415efa92d6125d6e6d"}, + {file = "pydantic_core-2.33.2-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:aa9d91b338f2df0508606f7009fde642391425189bba6d8c653afd80fd6bb64e"}, + {file = "pydantic_core-2.33.2-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2058a32994f1fde4ca0480ab9d1e75a0e8c87c22b53a3ae66554f9af78f2fe8c"}, + {file = "pydantic_core-2.33.2-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:0e03262ab796d986f978f79c943fc5f620381be7287148b8010b4097f79a39ec"}, + {file = "pydantic_core-2.33.2-pp310-pypy310_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:1a8695a8d00c73e50bff9dfda4d540b7dee29ff9b8053e38380426a85ef10052"}, + {file = "pydantic_core-2.33.2-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:fa754d1850735a0b0e03bcffd9d4b4343eb417e47196e4485d9cca326073a42c"}, + {file = "pydantic_core-2.33.2-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:a11c8d26a50bfab49002947d3d237abe4d9e4b5bdc8846a63537b6488e197808"}, + {file = "pydantic_core-2.33.2-pp311-pypy311_pp73-macosx_10_12_x86_64.whl", hash = "sha256:dd14041875d09cc0f9308e37a6f8b65f5585cf2598a53aa0123df8b129d481f8"}, + {file = "pydantic_core-2.33.2-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:d87c561733f66531dced0da6e864f44ebf89a8fba55f31407b00c2f7f9449593"}, + {file = "pydantic_core-2.33.2-pp311-pypy311_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2f82865531efd18d6e07a04a17331af02cb7a651583c418df8266f17a63c6612"}, + {file = "pydantic_core-2.33.2-pp311-pypy311_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2bfb5112df54209d820d7bf9317c7a6c9025ea52e49f46b6a2060104bba37de7"}, + {file = "pydantic_core-2.33.2-pp311-pypy311_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:64632ff9d614e5eecfb495796ad51b0ed98c453e447a76bcbeeb69615079fc7e"}, + {file = "pydantic_core-2.33.2-pp311-pypy311_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:f889f7a40498cc077332c7ab6b4608d296d852182211787d4f3ee377aaae66e8"}, + {file = "pydantic_core-2.33.2-pp311-pypy311_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:de4b83bb311557e439b9e186f733f6c645b9417c84e2eb8203f3f820a4b988bf"}, + {file = "pydantic_core-2.33.2-pp311-pypy311_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:82f68293f055f51b51ea42fafc74b6aad03e70e191799430b90c13d643059ebb"}, + {file = "pydantic_core-2.33.2-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:329467cecfb529c925cf2bbd4d60d2c509bc2fb52a20c1045bf09bb70971a9c1"}, + {file = "pydantic_core-2.33.2-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:87acbfcf8e90ca885206e98359d7dca4bcbb35abdc0ff66672a293e1d7a19101"}, + {file = "pydantic_core-2.33.2-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:7f92c15cd1e97d4b12acd1cc9004fa092578acfa57b67ad5e43a197175d01a64"}, + {file = "pydantic_core-2.33.2-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d3f26877a748dc4251cfcfda9dfb5f13fcb034f5308388066bcfe9031b63ae7d"}, + {file = "pydantic_core-2.33.2-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dac89aea9af8cd672fa7b510e7b8c33b0bba9a43186680550ccf23020f32d535"}, + {file = "pydantic_core-2.33.2-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:970919794d126ba8645f3837ab6046fb4e72bbc057b3709144066204c19a455d"}, + {file = "pydantic_core-2.33.2-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:3eb3fe62804e8f859c49ed20a8451342de53ed764150cb14ca71357c765dc2a6"}, + {file = "pydantic_core-2.33.2-pp39-pypy39_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:3abcd9392a36025e3bd55f9bd38d908bd17962cc49bc6da8e7e96285336e2bca"}, + {file = "pydantic_core-2.33.2-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:3a1c81334778f9e3af2f8aeb7a960736e5cab1dfebfb26aabca09afd2906c039"}, + {file = "pydantic_core-2.33.2-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:2807668ba86cb38c6817ad9bc66215ab8584d1d304030ce4f0887336f28a5e27"}, + {file = "pydantic_core-2.33.2.tar.gz", hash = "sha256:7cb8bc3605c29176e1b105350d2e6474142d7c1bd1d9327c4a9bdb46bf827acc"}, ] [package.dependencies] @@ -556,33 +535,6 @@ pytest = ">=7.0.0,<9" docs = ["sphinx (>=5.3)", "sphinx-rtd-theme (>=1.0)"] testing = ["coverage (>=6.2)", "hypothesis (>=5.7.1)"] -[[package]] -name = "python-dateutil" -version = "2.8.2" -description = "Extensions to the standard Python datetime module" -optional = false -python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7" -groups = ["main"] -files = [ - {file = "python-dateutil-2.8.2.tar.gz", hash = "sha256:0123cacc1627ae19ddf3c27a5de5bd67ee4586fbdd6440d9748f8abb483d3e86"}, - {file = "python_dateutil-2.8.2-py2.py3-none-any.whl", hash = "sha256:961d03dc3453ebbc59dbdea9e4e11c5651520a876d0f4db161e8674aae935da9"}, -] - -[package.dependencies] -six = ">=1.5" - -[[package]] -name = "six" -version = "1.16.0" -description = "Python 2 and 3 compatibility utilities" -optional = false -python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*" -groups = ["main"] -files = [ - {file = "six-1.16.0-py2.py3-none-any.whl", hash = "sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254"}, - {file = "six-1.16.0.tar.gz", hash = "sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926"}, -] - [[package]] name = "sniffio" version = "1.3.1" @@ -620,18 +572,6 @@ files = [ {file = "tomlkit-0.13.0.tar.gz", hash = "sha256:08ad192699734149f5b97b45f1f18dad7eb1b6d16bc72ad0c2335772650d7b72"}, ] -[[package]] -name = "types-python-dateutil" -version = "2.9.0.20240316" -description = "Typing stubs for python-dateutil" -optional = false -python-versions = ">=3.8" -groups = ["dev"] -files = [ - {file = "types-python-dateutil-2.9.0.20240316.tar.gz", hash = "sha256:5d2f2e240b86905e40944dd787db6da9263f0deabef1076ddaed797351ec0202"}, - {file = "types_python_dateutil-2.9.0.20240316-py3-none-any.whl", hash = "sha256:6b8cb66d960771ce5ff974e9dd45e38facb81718cc1e208b10b1baccbfdbee3b"}, -] - [[package]] name = "typing-extensions" version = "4.12.2" @@ -661,5 +601,5 @@ typing-extensions = ">=4.12.0" [metadata] lock-version = "2.1" -python-versions = ">=3.9" -content-hash = "16a8e7bb56287babdd384870773880315911c8f1851d21314cf11ca92104c600" +python-versions = ">=3.9.2" +content-hash = "7783570c2127219ca33415a933b1f4b8e43c4b432d4f04679552c25f89596fc1" diff --git a/packages/mistralai_azure/pylintrc b/packages/mistralai_azure/pylintrc index 266bc815..a8fcb932 100644 --- a/packages/mistralai_azure/pylintrc +++ b/packages/mistralai_azure/pylintrc @@ -188,8 +188,8 @@ good-names=i, Run, _, e, - n, - id + id, + n # Good variable names regexes, separated by a comma. If names match any regex, # they will always be accepted @@ -458,7 +458,8 @@ disable=raw-checker-failed, relative-beyond-top-level, consider-using-with, wildcard-import, - unused-wildcard-import + unused-wildcard-import, + too-many-return-statements # Enable the message, report, category or checker with the given id(s). You can # either give multiple identifier separated by comma (,) or put this option @@ -659,4 +660,4 @@ init-import=no # List of qualified module names which can have objects that can redefine # builtins. -redefining-builtins-modules=six.moves,past.builtins,future.builtins,builtins,io +redefining-builtins-modules=six.moves,past.builtins,future.builtins,builtins,io \ No newline at end of file diff --git a/packages/mistralai_azure/pyproject.toml b/packages/mistralai_azure/pyproject.toml index cca906a7..81387b21 100644 --- a/packages/mistralai_azure/pyproject.toml +++ b/packages/mistralai_azure/pyproject.toml @@ -3,14 +3,12 @@ name = "mistralai_azure" version = "1.6.0" description = "Python Client SDK for the Mistral AI API in Azure." authors = [{ name = "Mistral" },] -readme = "README-PYPI.md" -requires-python = ">=3.9" +readme = "README.md" +requires-python = ">=3.9.2" dependencies = [ - "eval-type-backport >=0.2.0", + "httpcore >=1.0.9", "httpx >=0.28.1", - "pydantic >=2.10.3", - "python-dateutil >=2.8.2", - "typing-inspection >=0.4.0", + "pydantic >=2.11.2", ] [tool.poetry] @@ -26,11 +24,10 @@ include = ["py.typed", "src/mistralai_azure/py.typed"] in-project = true [tool.poetry.group.dev.dependencies] -mypy = "==1.14.1" +mypy = "==1.15.0" pylint = "==3.2.3" pytest = "^8.2.2" pytest-asyncio = "^0.23.7" -types-python-dateutil = "^2.9.0.20240316" [build-system] requires = ["poetry-core"] @@ -42,6 +39,8 @@ pythonpath = ["src"] [tool.mypy] disable_error_code = "misc" +explicit_package_bases = true +mypy_path = "src" [[tool.mypy.overrides]] module = "typing_inspect" diff --git a/packages/mistralai_azure/scripts/prepare_readme.py b/packages/mistralai_azure/scripts/prepare_readme.py deleted file mode 100644 index 825d9ded..00000000 --- a/packages/mistralai_azure/scripts/prepare_readme.py +++ /dev/null @@ -1,9 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -import shutil - -try: - shutil.copyfile("README.md", "README-PYPI.md") -except Exception as e: - print("Failed to copy README.md to README-PYPI.md") - print(e) diff --git a/packages/mistralai_azure/scripts/publish.sh b/packages/mistralai_azure/scripts/publish.sh index f2f2cf2c..1ee7194c 100755 --- a/packages/mistralai_azure/scripts/publish.sh +++ b/packages/mistralai_azure/scripts/publish.sh @@ -2,6 +2,4 @@ export POETRY_PYPI_TOKEN_PYPI=${PYPI_TOKEN} -poetry run python scripts/prepare_readme.py - poetry publish --build --skip-existing diff --git a/packages/mistralai_azure/src/mistralai_azure/_hooks/types.py b/packages/mistralai_azure/src/mistralai_azure/_hooks/types.py index 297dfa2f..0c22d7eb 100644 --- a/packages/mistralai_azure/src/mistralai_azure/_hooks/types.py +++ b/packages/mistralai_azure/src/mistralai_azure/_hooks/types.py @@ -3,10 +3,12 @@ from abc import ABC, abstractmethod import httpx from mistralai_azure.httpclient import HttpClient +from mistralai_azure.sdkconfiguration import SDKConfiguration from typing import Any, Callable, List, Optional, Tuple, Union class HookContext: + config: SDKConfiguration base_url: str operation_id: str oauth2_scopes: Optional[List[str]] = None @@ -14,11 +16,13 @@ class HookContext: def __init__( self, + config: SDKConfiguration, base_url: str, operation_id: str, oauth2_scopes: Optional[List[str]], security_source: Optional[Union[Any, Callable[[], Any]]], ): + self.config = config self.base_url = base_url self.operation_id = operation_id self.oauth2_scopes = oauth2_scopes @@ -28,6 +32,7 @@ def __init__( class BeforeRequestContext(HookContext): def __init__(self, hook_ctx: HookContext): super().__init__( + hook_ctx.config, hook_ctx.base_url, hook_ctx.operation_id, hook_ctx.oauth2_scopes, @@ -38,6 +43,7 @@ def __init__(self, hook_ctx: HookContext): class AfterSuccessContext(HookContext): def __init__(self, hook_ctx: HookContext): super().__init__( + hook_ctx.config, hook_ctx.base_url, hook_ctx.operation_id, hook_ctx.oauth2_scopes, @@ -48,6 +54,7 @@ def __init__(self, hook_ctx: HookContext): class AfterErrorContext(HookContext): def __init__(self, hook_ctx: HookContext): super().__init__( + hook_ctx.config, hook_ctx.base_url, hook_ctx.operation_id, hook_ctx.oauth2_scopes, diff --git a/packages/mistralai_azure/src/mistralai_azure/_version.py b/packages/mistralai_azure/src/mistralai_azure/_version.py index 65696610..5fd03467 100644 --- a/packages/mistralai_azure/src/mistralai_azure/_version.py +++ b/packages/mistralai_azure/src/mistralai_azure/_version.py @@ -4,9 +4,9 @@ __title__: str = "mistralai_azure" __version__: str = "1.6.0" -__openapi_doc_version__: str = "0.0.2" -__gen_version__: str = "2.548.6" -__user_agent__: str = "speakeasy-sdk/python 1.6.0 2.548.6 0.0.2 mistralai_azure" +__openapi_doc_version__: str = "1.0.0" +__gen_version__: str = "2.634.2" +__user_agent__: str = "speakeasy-sdk/python 1.6.0 2.634.2 1.0.0 mistralai_azure" try: if __package__ is not None: diff --git a/packages/mistralai_azure/src/mistralai_azure/basesdk.py b/packages/mistralai_azure/src/mistralai_azure/basesdk.py index 24e4935e..84738ce8 100644 --- a/packages/mistralai_azure/src/mistralai_azure/basesdk.py +++ b/packages/mistralai_azure/src/mistralai_azure/basesdk.py @@ -218,12 +218,12 @@ def do_request( client = self.sdk_configuration.client logger = self.sdk_configuration.debug_logger + hooks = self.sdk_configuration.__dict__["_hooks"] + def do(): http_res = None try: - req = self.sdk_configuration.get_hooks().before_request( - BeforeRequestContext(hook_ctx), request - ) + req = hooks.before_request(BeforeRequestContext(hook_ctx), request) logger.debug( "Request:\nMethod: %s\nURL: %s\nHeaders: %s\nBody: %s", req.method, @@ -237,9 +237,7 @@ def do(): http_res = client.send(req, stream=stream) except Exception as e: - _, e = self.sdk_configuration.get_hooks().after_error( - AfterErrorContext(hook_ctx), None, e - ) + _, e = hooks.after_error(AfterErrorContext(hook_ctx), None, e) if e is not None: logger.debug("Request Exception", exc_info=True) raise e @@ -257,7 +255,7 @@ def do(): ) if utils.match_status_codes(error_status_codes, http_res.status_code): - result, err = self.sdk_configuration.get_hooks().after_error( + result, err = hooks.after_error( AfterErrorContext(hook_ctx), http_res, None ) if err is not None: @@ -277,9 +275,7 @@ def do(): http_res = do() if not utils.match_status_codes(error_status_codes, http_res.status_code): - http_res = self.sdk_configuration.get_hooks().after_success( - AfterSuccessContext(hook_ctx), http_res - ) + http_res = hooks.after_success(AfterSuccessContext(hook_ctx), http_res) return http_res @@ -294,12 +290,12 @@ async def do_request_async( client = self.sdk_configuration.async_client logger = self.sdk_configuration.debug_logger + hooks = self.sdk_configuration.__dict__["_hooks"] + async def do(): http_res = None try: - req = self.sdk_configuration.get_hooks().before_request( - BeforeRequestContext(hook_ctx), request - ) + req = hooks.before_request(BeforeRequestContext(hook_ctx), request) logger.debug( "Request:\nMethod: %s\nURL: %s\nHeaders: %s\nBody: %s", req.method, @@ -313,9 +309,7 @@ async def do(): http_res = await client.send(req, stream=stream) except Exception as e: - _, e = self.sdk_configuration.get_hooks().after_error( - AfterErrorContext(hook_ctx), None, e - ) + _, e = hooks.after_error(AfterErrorContext(hook_ctx), None, e) if e is not None: logger.debug("Request Exception", exc_info=True) raise e @@ -333,7 +327,7 @@ async def do(): ) if utils.match_status_codes(error_status_codes, http_res.status_code): - result, err = self.sdk_configuration.get_hooks().after_error( + result, err = hooks.after_error( AfterErrorContext(hook_ctx), http_res, None ) if err is not None: @@ -355,8 +349,6 @@ async def do(): http_res = await do() if not utils.match_status_codes(error_status_codes, http_res.status_code): - http_res = self.sdk_configuration.get_hooks().after_success( - AfterSuccessContext(hook_ctx), http_res - ) + http_res = hooks.after_success(AfterSuccessContext(hook_ctx), http_res) return http_res diff --git a/packages/mistralai_azure/src/mistralai_azure/chat.py b/packages/mistralai_azure/src/mistralai_azure/chat.py index cf3511fd..20184014 100644 --- a/packages/mistralai_azure/src/mistralai_azure/chat.py +++ b/packages/mistralai_azure/src/mistralai_azure/chat.py @@ -41,6 +41,7 @@ def stream( Union[models.Prediction, models.PredictionTypedDict] ] = None, parallel_tool_calls: Optional[bool] = None, + prompt_mode: OptionalNullable[models.MistralPromptMode] = UNSET, safe_prompt: Optional[bool] = None, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, @@ -67,6 +68,7 @@ def stream( :param n: Number of completions to return for each request, input tokens are only billed once. :param prediction: :param parallel_tool_calls: + :param prompt_mode: Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. :param safe_prompt: Whether to inject a safety prompt before all conversations. :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method @@ -106,6 +108,7 @@ def stream( prediction, Optional[models.Prediction] ), parallel_tool_calls=parallel_tool_calls, + prompt_mode=prompt_mode, safe_prompt=safe_prompt, ) @@ -138,6 +141,7 @@ def stream( http_res = self.do_request( hook_ctx=HookContext( + config=self.sdk_configuration, base_url=base_url or "", operation_id="stream_chat", oauth2_scopes=[], @@ -212,6 +216,7 @@ async def stream_async( Union[models.Prediction, models.PredictionTypedDict] ] = None, parallel_tool_calls: Optional[bool] = None, + prompt_mode: OptionalNullable[models.MistralPromptMode] = UNSET, safe_prompt: Optional[bool] = None, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, @@ -238,6 +243,7 @@ async def stream_async( :param n: Number of completions to return for each request, input tokens are only billed once. :param prediction: :param parallel_tool_calls: + :param prompt_mode: Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. :param safe_prompt: Whether to inject a safety prompt before all conversations. :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method @@ -277,6 +283,7 @@ async def stream_async( prediction, Optional[models.Prediction] ), parallel_tool_calls=parallel_tool_calls, + prompt_mode=prompt_mode, safe_prompt=safe_prompt, ) @@ -309,6 +316,7 @@ async def stream_async( http_res = await self.do_request_async( hook_ctx=HookContext( + config=self.sdk_configuration, base_url=base_url or "", operation_id="stream_chat", oauth2_scopes=[], @@ -391,6 +399,7 @@ def complete( Union[models.Prediction, models.PredictionTypedDict] ] = None, parallel_tool_calls: Optional[bool] = None, + prompt_mode: OptionalNullable[models.MistralPromptMode] = UNSET, safe_prompt: Optional[bool] = None, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, @@ -415,6 +424,7 @@ def complete( :param n: Number of completions to return for each request, input tokens are only billed once. :param prediction: :param parallel_tool_calls: + :param prompt_mode: Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. :param safe_prompt: Whether to inject a safety prompt before all conversations. :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method @@ -456,6 +466,7 @@ def complete( prediction, Optional[models.Prediction] ), parallel_tool_calls=parallel_tool_calls, + prompt_mode=prompt_mode, safe_prompt=safe_prompt, ) @@ -488,6 +499,7 @@ def complete( http_res = self.do_request( hook_ctx=HookContext( + config=self.sdk_configuration, base_url=base_url or "", operation_id="chat_completion_v1_chat_completions_post", oauth2_scopes=[], @@ -566,6 +578,7 @@ async def complete_async( Union[models.Prediction, models.PredictionTypedDict] ] = None, parallel_tool_calls: Optional[bool] = None, + prompt_mode: OptionalNullable[models.MistralPromptMode] = UNSET, safe_prompt: Optional[bool] = None, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, @@ -590,6 +603,7 @@ async def complete_async( :param n: Number of completions to return for each request, input tokens are only billed once. :param prediction: :param parallel_tool_calls: + :param prompt_mode: Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. :param safe_prompt: Whether to inject a safety prompt before all conversations. :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method @@ -631,6 +645,7 @@ async def complete_async( prediction, Optional[models.Prediction] ), parallel_tool_calls=parallel_tool_calls, + prompt_mode=prompt_mode, safe_prompt=safe_prompt, ) @@ -663,6 +678,7 @@ async def complete_async( http_res = await self.do_request_async( hook_ctx=HookContext( + config=self.sdk_configuration, base_url=base_url or "", operation_id="chat_completion_v1_chat_completions_post", oauth2_scopes=[], diff --git a/packages/mistralai_azure/src/mistralai_azure/httpclient.py b/packages/mistralai_azure/src/mistralai_azure/httpclient.py index 1e426352..47b052cb 100644 --- a/packages/mistralai_azure/src/mistralai_azure/httpclient.py +++ b/packages/mistralai_azure/src/mistralai_azure/httpclient.py @@ -2,7 +2,6 @@ # pyright: reportReturnType = false import asyncio -from concurrent.futures import ThreadPoolExecutor from typing_extensions import Protocol, runtime_checkable import httpx from typing import Any, Optional, Union @@ -116,21 +115,12 @@ def close_clients( pass if async_client is not None and not async_client_supplied: - is_async = False try: - asyncio.get_running_loop() - is_async = True + loop = asyncio.get_running_loop() + asyncio.run_coroutine_threadsafe(async_client.aclose(), loop) except RuntimeError: - pass - - try: - # If this function is called in an async loop then start another - # loop in a separate thread to close the async http client. - if is_async: - with ThreadPoolExecutor(max_workers=1) as executor: - future = executor.submit(asyncio.run, async_client.aclose()) - future.result() - else: + try: asyncio.run(async_client.aclose()) - except Exception: - pass + except RuntimeError: + # best effort + pass diff --git a/packages/mistralai_azure/src/mistralai_azure/models/__init__.py b/packages/mistralai_azure/src/mistralai_azure/models/__init__.py index 2229c469..bc1a3f4f 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/__init__.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/__init__.py @@ -1,109 +1,134 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -from .assistantmessage import ( - AssistantMessage, - AssistantMessageContent, - AssistantMessageContentTypedDict, - AssistantMessageRole, - AssistantMessageTypedDict, -) -from .chatcompletionchoice import ( - ChatCompletionChoice, - ChatCompletionChoiceFinishReason, - ChatCompletionChoiceTypedDict, -) -from .chatcompletionrequest import ( - ChatCompletionRequest, - ChatCompletionRequestMessages, - ChatCompletionRequestMessagesTypedDict, - ChatCompletionRequestStop, - ChatCompletionRequestStopTypedDict, - ChatCompletionRequestToolChoice, - ChatCompletionRequestToolChoiceTypedDict, - ChatCompletionRequestTypedDict, -) -from .chatcompletionresponse import ( - ChatCompletionResponse, - ChatCompletionResponseTypedDict, -) -from .chatcompletionstreamrequest import ( - ChatCompletionStreamRequest, - ChatCompletionStreamRequestToolChoice, - ChatCompletionStreamRequestToolChoiceTypedDict, - ChatCompletionStreamRequestTypedDict, - Messages, - MessagesTypedDict, - Stop, - StopTypedDict, -) -from .completionchunk import CompletionChunk, CompletionChunkTypedDict -from .completionevent import CompletionEvent, CompletionEventTypedDict -from .completionresponsestreamchoice import ( - CompletionResponseStreamChoice, - CompletionResponseStreamChoiceTypedDict, - FinishReason, -) -from .contentchunk import ContentChunk, ContentChunkTypedDict -from .deltamessage import Content, ContentTypedDict, DeltaMessage, DeltaMessageTypedDict -from .function import Function, FunctionTypedDict -from .functioncall import ( - Arguments, - ArgumentsTypedDict, - FunctionCall, - FunctionCallTypedDict, -) -from .functionname import FunctionName, FunctionNameTypedDict -from .httpvalidationerror import HTTPValidationError, HTTPValidationErrorData -from .imageurl import ImageURL, ImageURLTypedDict -from .imageurlchunk import ( - ImageURLChunk, - ImageURLChunkImageURL, - ImageURLChunkImageURLTypedDict, - ImageURLChunkType, - ImageURLChunkTypedDict, -) -from .jsonschema import JSONSchema, JSONSchemaTypedDict -from .prediction import Prediction, PredictionTypedDict -from .referencechunk import ReferenceChunk, ReferenceChunkType, ReferenceChunkTypedDict -from .responseformat import ResponseFormat, ResponseFormatTypedDict -from .responseformats import ResponseFormats -from .sdkerror import SDKError -from .security import Security, SecurityTypedDict -from .systemmessage import ( - Role, - SystemMessage, - SystemMessageContent, - SystemMessageContentTypedDict, - SystemMessageTypedDict, -) -from .textchunk import TextChunk, TextChunkTypedDict, Type -from .tool import Tool, ToolTypedDict -from .toolcall import ToolCall, ToolCallTypedDict -from .toolchoice import ToolChoice, ToolChoiceTypedDict -from .toolchoiceenum import ToolChoiceEnum -from .toolmessage import ( - ToolMessage, - ToolMessageContent, - ToolMessageContentTypedDict, - ToolMessageRole, - ToolMessageTypedDict, -) -from .tooltypes import ToolTypes -from .usageinfo import UsageInfo, UsageInfoTypedDict -from .usermessage import ( - UserMessage, - UserMessageContent, - UserMessageContentTypedDict, - UserMessageRole, - UserMessageTypedDict, -) -from .validationerror import ( - Loc, - LocTypedDict, - ValidationError, - ValidationErrorTypedDict, -) +from typing import TYPE_CHECKING +from importlib import import_module +if TYPE_CHECKING: + from .assistantmessage import ( + AssistantMessage, + AssistantMessageContent, + AssistantMessageContentTypedDict, + AssistantMessageRole, + AssistantMessageTypedDict, + ) + from .chatcompletionchoice import ( + ChatCompletionChoice, + ChatCompletionChoiceFinishReason, + ChatCompletionChoiceTypedDict, + ) + from .chatcompletionrequest import ( + ChatCompletionRequest, + ChatCompletionRequestMessages, + ChatCompletionRequestMessagesTypedDict, + ChatCompletionRequestStop, + ChatCompletionRequestStopTypedDict, + ChatCompletionRequestToolChoice, + ChatCompletionRequestToolChoiceTypedDict, + ChatCompletionRequestTypedDict, + ) + from .chatcompletionresponse import ( + ChatCompletionResponse, + ChatCompletionResponseTypedDict, + ) + from .chatcompletionstreamrequest import ( + ChatCompletionStreamRequest, + ChatCompletionStreamRequestToolChoice, + ChatCompletionStreamRequestToolChoiceTypedDict, + ChatCompletionStreamRequestTypedDict, + Messages, + MessagesTypedDict, + Stop, + StopTypedDict, + ) + from .completionchunk import CompletionChunk, CompletionChunkTypedDict + from .completionevent import CompletionEvent, CompletionEventTypedDict + from .completionresponsestreamchoice import ( + CompletionResponseStreamChoice, + CompletionResponseStreamChoiceTypedDict, + FinishReason, + ) + from .contentchunk import ContentChunk, ContentChunkTypedDict + from .deltamessage import ( + Content, + ContentTypedDict, + DeltaMessage, + DeltaMessageTypedDict, + ) + from .documenturlchunk import ( + DocumentURLChunk, + DocumentURLChunkType, + DocumentURLChunkTypedDict, + ) + from .filechunk import FileChunk, FileChunkTypedDict + from .function import Function, FunctionTypedDict + from .functioncall import ( + Arguments, + ArgumentsTypedDict, + FunctionCall, + FunctionCallTypedDict, + ) + from .functionname import FunctionName, FunctionNameTypedDict + from .httpvalidationerror import HTTPValidationError, HTTPValidationErrorData + from .imageurl import ImageURL, ImageURLTypedDict + from .imageurlchunk import ( + ImageURLChunk, + ImageURLChunkImageURL, + ImageURLChunkImageURLTypedDict, + ImageURLChunkType, + ImageURLChunkTypedDict, + ) + from .jsonschema import JSONSchema, JSONSchemaTypedDict + from .mistralpromptmode import MistralPromptMode + from .ocrimageobject import OCRImageObject, OCRImageObjectTypedDict + from .ocrpagedimensions import OCRPageDimensions, OCRPageDimensionsTypedDict + from .ocrpageobject import OCRPageObject, OCRPageObjectTypedDict + from .ocrrequest import Document, DocumentTypedDict, OCRRequest, OCRRequestTypedDict + from .ocrresponse import OCRResponse, OCRResponseTypedDict + from .ocrusageinfo import OCRUsageInfo, OCRUsageInfoTypedDict + from .prediction import Prediction, PredictionTypedDict + from .referencechunk import ( + ReferenceChunk, + ReferenceChunkType, + ReferenceChunkTypedDict, + ) + from .responseformat import ResponseFormat, ResponseFormatTypedDict + from .responseformats import ResponseFormats + from .sdkerror import SDKError + from .security import Security, SecurityTypedDict + from .systemmessage import ( + Role, + SystemMessage, + SystemMessageContent, + SystemMessageContentTypedDict, + SystemMessageTypedDict, + ) + from .textchunk import TextChunk, TextChunkTypedDict, Type + from .tool import Tool, ToolTypedDict + from .toolcall import ToolCall, ToolCallTypedDict + from .toolchoice import ToolChoice, ToolChoiceTypedDict + from .toolchoiceenum import ToolChoiceEnum + from .toolmessage import ( + ToolMessage, + ToolMessageContent, + ToolMessageContentTypedDict, + ToolMessageRole, + ToolMessageTypedDict, + ) + from .tooltypes import ToolTypes + from .usageinfo import UsageInfo, UsageInfoTypedDict + from .usermessage import ( + UserMessage, + UserMessageContent, + UserMessageContentTypedDict, + UserMessageRole, + UserMessageTypedDict, + ) + from .validationerror import ( + Loc, + LocTypedDict, + ValidationError, + ValidationErrorTypedDict, + ) __all__ = [ "Arguments", @@ -142,6 +167,13 @@ "ContentTypedDict", "DeltaMessage", "DeltaMessageTypedDict", + "Document", + "DocumentTypedDict", + "DocumentURLChunk", + "DocumentURLChunkType", + "DocumentURLChunkTypedDict", + "FileChunk", + "FileChunkTypedDict", "FinishReason", "Function", "FunctionCall", @@ -164,6 +196,19 @@ "LocTypedDict", "Messages", "MessagesTypedDict", + "MistralPromptMode", + "OCRImageObject", + "OCRImageObjectTypedDict", + "OCRPageDimensions", + "OCRPageDimensionsTypedDict", + "OCRPageObject", + "OCRPageObjectTypedDict", + "OCRRequest", + "OCRRequestTypedDict", + "OCRResponse", + "OCRResponseTypedDict", + "OCRUsageInfo", + "OCRUsageInfoTypedDict", "Prediction", "PredictionTypedDict", "ReferenceChunk", @@ -208,3 +253,153 @@ "ValidationError", "ValidationErrorTypedDict", ] + +_dynamic_imports: dict[str, str] = { + "AssistantMessage": ".assistantmessage", + "AssistantMessageContent": ".assistantmessage", + "AssistantMessageContentTypedDict": ".assistantmessage", + "AssistantMessageRole": ".assistantmessage", + "AssistantMessageTypedDict": ".assistantmessage", + "ChatCompletionChoice": ".chatcompletionchoice", + "ChatCompletionChoiceFinishReason": ".chatcompletionchoice", + "ChatCompletionChoiceTypedDict": ".chatcompletionchoice", + "ChatCompletionRequest": ".chatcompletionrequest", + "ChatCompletionRequestMessages": ".chatcompletionrequest", + "ChatCompletionRequestMessagesTypedDict": ".chatcompletionrequest", + "ChatCompletionRequestStop": ".chatcompletionrequest", + "ChatCompletionRequestStopTypedDict": ".chatcompletionrequest", + "ChatCompletionRequestToolChoice": ".chatcompletionrequest", + "ChatCompletionRequestToolChoiceTypedDict": ".chatcompletionrequest", + "ChatCompletionRequestTypedDict": ".chatcompletionrequest", + "ChatCompletionResponse": ".chatcompletionresponse", + "ChatCompletionResponseTypedDict": ".chatcompletionresponse", + "ChatCompletionStreamRequest": ".chatcompletionstreamrequest", + "ChatCompletionStreamRequestToolChoice": ".chatcompletionstreamrequest", + "ChatCompletionStreamRequestToolChoiceTypedDict": ".chatcompletionstreamrequest", + "ChatCompletionStreamRequestTypedDict": ".chatcompletionstreamrequest", + "Messages": ".chatcompletionstreamrequest", + "MessagesTypedDict": ".chatcompletionstreamrequest", + "Stop": ".chatcompletionstreamrequest", + "StopTypedDict": ".chatcompletionstreamrequest", + "CompletionChunk": ".completionchunk", + "CompletionChunkTypedDict": ".completionchunk", + "CompletionEvent": ".completionevent", + "CompletionEventTypedDict": ".completionevent", + "CompletionResponseStreamChoice": ".completionresponsestreamchoice", + "CompletionResponseStreamChoiceTypedDict": ".completionresponsestreamchoice", + "FinishReason": ".completionresponsestreamchoice", + "ContentChunk": ".contentchunk", + "ContentChunkTypedDict": ".contentchunk", + "Content": ".deltamessage", + "ContentTypedDict": ".deltamessage", + "DeltaMessage": ".deltamessage", + "DeltaMessageTypedDict": ".deltamessage", + "DocumentURLChunk": ".documenturlchunk", + "DocumentURLChunkType": ".documenturlchunk", + "DocumentURLChunkTypedDict": ".documenturlchunk", + "FileChunk": ".filechunk", + "FileChunkTypedDict": ".filechunk", + "Function": ".function", + "FunctionTypedDict": ".function", + "Arguments": ".functioncall", + "ArgumentsTypedDict": ".functioncall", + "FunctionCall": ".functioncall", + "FunctionCallTypedDict": ".functioncall", + "FunctionName": ".functionname", + "FunctionNameTypedDict": ".functionname", + "HTTPValidationError": ".httpvalidationerror", + "HTTPValidationErrorData": ".httpvalidationerror", + "ImageURL": ".imageurl", + "ImageURLTypedDict": ".imageurl", + "ImageURLChunk": ".imageurlchunk", + "ImageURLChunkImageURL": ".imageurlchunk", + "ImageURLChunkImageURLTypedDict": ".imageurlchunk", + "ImageURLChunkType": ".imageurlchunk", + "ImageURLChunkTypedDict": ".imageurlchunk", + "JSONSchema": ".jsonschema", + "JSONSchemaTypedDict": ".jsonschema", + "MistralPromptMode": ".mistralpromptmode", + "OCRImageObject": ".ocrimageobject", + "OCRImageObjectTypedDict": ".ocrimageobject", + "OCRPageDimensions": ".ocrpagedimensions", + "OCRPageDimensionsTypedDict": ".ocrpagedimensions", + "OCRPageObject": ".ocrpageobject", + "OCRPageObjectTypedDict": ".ocrpageobject", + "Document": ".ocrrequest", + "DocumentTypedDict": ".ocrrequest", + "OCRRequest": ".ocrrequest", + "OCRRequestTypedDict": ".ocrrequest", + "OCRResponse": ".ocrresponse", + "OCRResponseTypedDict": ".ocrresponse", + "OCRUsageInfo": ".ocrusageinfo", + "OCRUsageInfoTypedDict": ".ocrusageinfo", + "Prediction": ".prediction", + "PredictionTypedDict": ".prediction", + "ReferenceChunk": ".referencechunk", + "ReferenceChunkType": ".referencechunk", + "ReferenceChunkTypedDict": ".referencechunk", + "ResponseFormat": ".responseformat", + "ResponseFormatTypedDict": ".responseformat", + "ResponseFormats": ".responseformats", + "SDKError": ".sdkerror", + "Security": ".security", + "SecurityTypedDict": ".security", + "Role": ".systemmessage", + "SystemMessage": ".systemmessage", + "SystemMessageContent": ".systemmessage", + "SystemMessageContentTypedDict": ".systemmessage", + "SystemMessageTypedDict": ".systemmessage", + "TextChunk": ".textchunk", + "TextChunkTypedDict": ".textchunk", + "Type": ".textchunk", + "Tool": ".tool", + "ToolTypedDict": ".tool", + "ToolCall": ".toolcall", + "ToolCallTypedDict": ".toolcall", + "ToolChoice": ".toolchoice", + "ToolChoiceTypedDict": ".toolchoice", + "ToolChoiceEnum": ".toolchoiceenum", + "ToolMessage": ".toolmessage", + "ToolMessageContent": ".toolmessage", + "ToolMessageContentTypedDict": ".toolmessage", + "ToolMessageRole": ".toolmessage", + "ToolMessageTypedDict": ".toolmessage", + "ToolTypes": ".tooltypes", + "UsageInfo": ".usageinfo", + "UsageInfoTypedDict": ".usageinfo", + "UserMessage": ".usermessage", + "UserMessageContent": ".usermessage", + "UserMessageContentTypedDict": ".usermessage", + "UserMessageRole": ".usermessage", + "UserMessageTypedDict": ".usermessage", + "Loc": ".validationerror", + "LocTypedDict": ".validationerror", + "ValidationError": ".validationerror", + "ValidationErrorTypedDict": ".validationerror", +} + + +def __getattr__(attr_name: str) -> object: + module_name = _dynamic_imports.get(attr_name) + if module_name is None: + raise AttributeError( + f"No {attr_name} found in _dynamic_imports for module name -> {__name__} " + ) + + try: + module = import_module(module_name, __package__) + result = getattr(module, attr_name) + return result + except ImportError as e: + raise ImportError( + f"Failed to import {attr_name} from {module_name}: {e}" + ) from e + except AttributeError as e: + raise AttributeError( + f"Failed to get {attr_name} from {module_name}: {e}" + ) from e + + +def __dir__(): + lazy_attrs = list(_dynamic_imports.keys()) + return sorted(lazy_attrs) diff --git a/packages/mistralai_azure/src/mistralai_azure/models/assistantmessage.py b/packages/mistralai_azure/src/mistralai_azure/models/assistantmessage.py index 530b33df..86f5ec09 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/assistantmessage.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/assistantmessage.py @@ -56,7 +56,7 @@ def serialize_model(self, handler): m = {} - for n, f in self.model_fields.items(): + for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) serialized.pop(k, None) diff --git a/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionrequest.py b/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionrequest.py index f48c1f50..8dffe1bd 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionrequest.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionrequest.py @@ -2,6 +2,7 @@ from __future__ import annotations from .assistantmessage import AssistantMessage, AssistantMessageTypedDict +from .mistralpromptmode import MistralPromptMode from .prediction import Prediction, PredictionTypedDict from .responseformat import ResponseFormat, ResponseFormatTypedDict from .systemmessage import SystemMessage, SystemMessageTypedDict @@ -17,8 +18,9 @@ UNSET, UNSET_SENTINEL, ) -from mistralai_azure.utils import get_discriminator +from mistralai_azure.utils import get_discriminator, validate_open_enum from pydantic import Discriminator, Tag, model_serializer +from pydantic.functional_validators import PlainValidator from typing import List, Optional, Union from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict @@ -96,6 +98,8 @@ class ChatCompletionRequestTypedDict(TypedDict): r"""Number of completions to return for each request, input tokens are only billed once.""" prediction: NotRequired[PredictionTypedDict] parallel_tool_calls: NotRequired[bool] + prompt_mode: NotRequired[Nullable[MistralPromptMode]] + r"""Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used.""" safe_prompt: NotRequired[bool] r"""Whether to inject a safety prompt before all conversations.""" @@ -144,6 +148,11 @@ class ChatCompletionRequest(BaseModel): parallel_tool_calls: Optional[bool] = None + prompt_mode: Annotated[ + OptionalNullable[MistralPromptMode], PlainValidator(validate_open_enum(False)) + ] = UNSET + r"""Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used.""" + safe_prompt: Optional[bool] = None r"""Whether to inject a safety prompt before all conversations.""" @@ -165,16 +174,24 @@ def serialize_model(self, handler): "n", "prediction", "parallel_tool_calls", + "prompt_mode", "safe_prompt", ] - nullable_fields = ["temperature", "max_tokens", "random_seed", "tools", "n"] + nullable_fields = [ + "temperature", + "max_tokens", + "random_seed", + "tools", + "n", + "prompt_mode", + ] null_default_fields = [] serialized = handler(self) m = {} - for n, f in self.model_fields.items(): + for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) serialized.pop(k, None) diff --git a/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionresponse.py b/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionresponse.py index ecd85d5c..7a66f322 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionresponse.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionresponse.py @@ -4,8 +4,8 @@ from .chatcompletionchoice import ChatCompletionChoice, ChatCompletionChoiceTypedDict from .usageinfo import UsageInfo, UsageInfoTypedDict from mistralai_azure.types import BaseModel -from typing import List, Optional -from typing_extensions import NotRequired, TypedDict +from typing import List +from typing_extensions import TypedDict class ChatCompletionResponseTypedDict(TypedDict): @@ -13,8 +13,8 @@ class ChatCompletionResponseTypedDict(TypedDict): object: str model: str usage: UsageInfoTypedDict - created: NotRequired[int] - choices: NotRequired[List[ChatCompletionChoiceTypedDict]] + created: int + choices: List[ChatCompletionChoiceTypedDict] class ChatCompletionResponse(BaseModel): @@ -26,6 +26,6 @@ class ChatCompletionResponse(BaseModel): usage: UsageInfo - created: Optional[int] = None + created: int - choices: Optional[List[ChatCompletionChoice]] = None + choices: List[ChatCompletionChoice] diff --git a/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionstreamrequest.py b/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionstreamrequest.py index 50cf1f01..5fced93e 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionstreamrequest.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionstreamrequest.py @@ -2,6 +2,7 @@ from __future__ import annotations from .assistantmessage import AssistantMessage, AssistantMessageTypedDict +from .mistralpromptmode import MistralPromptMode from .prediction import Prediction, PredictionTypedDict from .responseformat import ResponseFormat, ResponseFormatTypedDict from .systemmessage import SystemMessage, SystemMessageTypedDict @@ -17,8 +18,9 @@ UNSET, UNSET_SENTINEL, ) -from mistralai_azure.utils import get_discriminator +from mistralai_azure.utils import get_discriminator, validate_open_enum from pydantic import Discriminator, Tag, model_serializer +from pydantic.functional_validators import PlainValidator from typing import List, Optional, Union from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict @@ -91,6 +93,8 @@ class ChatCompletionStreamRequestTypedDict(TypedDict): r"""Number of completions to return for each request, input tokens are only billed once.""" prediction: NotRequired[PredictionTypedDict] parallel_tool_calls: NotRequired[bool] + prompt_mode: NotRequired[Nullable[MistralPromptMode]] + r"""Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used.""" safe_prompt: NotRequired[bool] r"""Whether to inject a safety prompt before all conversations.""" @@ -138,6 +142,11 @@ class ChatCompletionStreamRequest(BaseModel): parallel_tool_calls: Optional[bool] = None + prompt_mode: Annotated[ + OptionalNullable[MistralPromptMode], PlainValidator(validate_open_enum(False)) + ] = UNSET + r"""Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used.""" + safe_prompt: Optional[bool] = None r"""Whether to inject a safety prompt before all conversations.""" @@ -159,16 +168,24 @@ def serialize_model(self, handler): "n", "prediction", "parallel_tool_calls", + "prompt_mode", "safe_prompt", ] - nullable_fields = ["temperature", "max_tokens", "random_seed", "tools", "n"] + nullable_fields = [ + "temperature", + "max_tokens", + "random_seed", + "tools", + "n", + "prompt_mode", + ] null_default_fields = [] serialized = handler(self) m = {} - for n, f in self.model_fields.items(): + for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) serialized.pop(k, None) diff --git a/packages/mistralai_azure/src/mistralai_azure/models/completionresponsestreamchoice.py b/packages/mistralai_azure/src/mistralai_azure/models/completionresponsestreamchoice.py index 37294d9b..1a492204 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/completionresponsestreamchoice.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/completionresponsestreamchoice.py @@ -38,7 +38,7 @@ def serialize_model(self, handler): m = {} - for n, f in self.model_fields.items(): + for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) serialized.pop(k, None) diff --git a/packages/mistralai_azure/src/mistralai_azure/models/deltamessage.py b/packages/mistralai_azure/src/mistralai_azure/models/deltamessage.py index 112eb127..7fa3c3f2 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/deltamessage.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/deltamessage.py @@ -46,7 +46,7 @@ def serialize_model(self, handler): m = {} - for n, f in self.model_fields.items(): + for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) serialized.pop(k, None) diff --git a/packages/mistralai_azure/src/mistralai_azure/models/documenturlchunk.py b/packages/mistralai_azure/src/mistralai_azure/models/documenturlchunk.py new file mode 100644 index 00000000..23ff71a6 --- /dev/null +++ b/packages/mistralai_azure/src/mistralai_azure/models/documenturlchunk.py @@ -0,0 +1,62 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai_azure.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing import Literal, Optional +from typing_extensions import NotRequired, TypedDict + + +DocumentURLChunkType = Literal["document_url"] + + +class DocumentURLChunkTypedDict(TypedDict): + document_url: str + document_name: NotRequired[Nullable[str]] + r"""The filename of the document""" + type: NotRequired[DocumentURLChunkType] + + +class DocumentURLChunk(BaseModel): + document_url: str + + document_name: OptionalNullable[str] = UNSET + r"""The filename of the document""" + + type: Optional[DocumentURLChunkType] = "document_url" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["document_name", "type"] + nullable_fields = ["document_name"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/packages/mistralai_azure/src/mistralai_azure/models/filechunk.py b/packages/mistralai_azure/src/mistralai_azure/models/filechunk.py new file mode 100644 index 00000000..2c3edc07 --- /dev/null +++ b/packages/mistralai_azure/src/mistralai_azure/models/filechunk.py @@ -0,0 +1,23 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai_azure.types import BaseModel +from mistralai_azure.utils import validate_const +import pydantic +from pydantic.functional_validators import AfterValidator +from typing import Literal, Optional +from typing_extensions import Annotated, TypedDict + + +class FileChunkTypedDict(TypedDict): + file_id: str + type: Literal["file"] + + +class FileChunk(BaseModel): + file_id: str + + TYPE: Annotated[ + Annotated[Optional[Literal["file"]], AfterValidator(validate_const("file"))], + pydantic.Field(alias="type"), + ] = "file" diff --git a/packages/mistralai_azure/src/mistralai_azure/models/imageurl.py b/packages/mistralai_azure/src/mistralai_azure/models/imageurl.py index 8faa272b..a5a66360 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/imageurl.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/imageurl.py @@ -32,7 +32,7 @@ def serialize_model(self, handler): m = {} - for n, f in self.model_fields.items(): + for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) serialized.pop(k, None) diff --git a/packages/mistralai_azure/src/mistralai_azure/models/jsonschema.py b/packages/mistralai_azure/src/mistralai_azure/models/jsonschema.py index b2d07d3a..0f7563fc 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/jsonschema.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/jsonschema.py @@ -40,7 +40,7 @@ def serialize_model(self, handler): m = {} - for n, f in self.model_fields.items(): + for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) serialized.pop(k, None) diff --git a/packages/mistralai_azure/src/mistralai_azure/models/mistralpromptmode.py b/packages/mistralai_azure/src/mistralai_azure/models/mistralpromptmode.py new file mode 100644 index 00000000..bd4584a5 --- /dev/null +++ b/packages/mistralai_azure/src/mistralai_azure/models/mistralpromptmode.py @@ -0,0 +1,8 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai_azure.types import UnrecognizedStr +from typing import Literal, Union + + +MistralPromptMode = Union[Literal["reasoning"], UnrecognizedStr] diff --git a/packages/mistralai_azure/src/mistralai_azure/models/ocrimageobject.py b/packages/mistralai_azure/src/mistralai_azure/models/ocrimageobject.py new file mode 100644 index 00000000..9d0dd01d --- /dev/null +++ b/packages/mistralai_azure/src/mistralai_azure/models/ocrimageobject.py @@ -0,0 +1,89 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai_azure.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing_extensions import NotRequired, TypedDict + + +class OCRImageObjectTypedDict(TypedDict): + id: str + r"""Image ID for extracted image in a page""" + top_left_x: Nullable[int] + r"""X coordinate of top-left corner of the extracted image""" + top_left_y: Nullable[int] + r"""Y coordinate of top-left corner of the extracted image""" + bottom_right_x: Nullable[int] + r"""X coordinate of bottom-right corner of the extracted image""" + bottom_right_y: Nullable[int] + r"""Y coordinate of bottom-right corner of the extracted image""" + image_base64: NotRequired[Nullable[str]] + r"""Base64 string of the extracted image""" + image_annotation: NotRequired[Nullable[str]] + r"""Annotation of the extracted image in json str""" + + +class OCRImageObject(BaseModel): + id: str + r"""Image ID for extracted image in a page""" + + top_left_x: Nullable[int] + r"""X coordinate of top-left corner of the extracted image""" + + top_left_y: Nullable[int] + r"""Y coordinate of top-left corner of the extracted image""" + + bottom_right_x: Nullable[int] + r"""X coordinate of bottom-right corner of the extracted image""" + + bottom_right_y: Nullable[int] + r"""Y coordinate of bottom-right corner of the extracted image""" + + image_base64: OptionalNullable[str] = UNSET + r"""Base64 string of the extracted image""" + + image_annotation: OptionalNullable[str] = UNSET + r"""Annotation of the extracted image in json str""" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["image_base64", "image_annotation"] + nullable_fields = [ + "top_left_x", + "top_left_y", + "bottom_right_x", + "bottom_right_y", + "image_base64", + "image_annotation", + ] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/packages/mistralai_azure/src/mistralai_azure/models/ocrpagedimensions.py b/packages/mistralai_azure/src/mistralai_azure/models/ocrpagedimensions.py new file mode 100644 index 00000000..efb62a58 --- /dev/null +++ b/packages/mistralai_azure/src/mistralai_azure/models/ocrpagedimensions.py @@ -0,0 +1,25 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai_azure.types import BaseModel +from typing_extensions import TypedDict + + +class OCRPageDimensionsTypedDict(TypedDict): + dpi: int + r"""Dots per inch of the page-image""" + height: int + r"""Height of the image in pixels""" + width: int + r"""Width of the image in pixels""" + + +class OCRPageDimensions(BaseModel): + dpi: int + r"""Dots per inch of the page-image""" + + height: int + r"""Height of the image in pixels""" + + width: int + r"""Width of the image in pixels""" diff --git a/packages/mistralai_azure/src/mistralai_azure/models/ocrpageobject.py b/packages/mistralai_azure/src/mistralai_azure/models/ocrpageobject.py new file mode 100644 index 00000000..4438e732 --- /dev/null +++ b/packages/mistralai_azure/src/mistralai_azure/models/ocrpageobject.py @@ -0,0 +1,64 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .ocrimageobject import OCRImageObject, OCRImageObjectTypedDict +from .ocrpagedimensions import OCRPageDimensions, OCRPageDimensionsTypedDict +from mistralai_azure.types import BaseModel, Nullable, UNSET_SENTINEL +from pydantic import model_serializer +from typing import List +from typing_extensions import TypedDict + + +class OCRPageObjectTypedDict(TypedDict): + index: int + r"""The page index in a pdf document starting from 0""" + markdown: str + r"""The markdown string response of the page""" + images: List[OCRImageObjectTypedDict] + r"""List of all extracted images in the page""" + dimensions: Nullable[OCRPageDimensionsTypedDict] + r"""The dimensions of the PDF Page's screenshot image""" + + +class OCRPageObject(BaseModel): + index: int + r"""The page index in a pdf document starting from 0""" + + markdown: str + r"""The markdown string response of the page""" + + images: List[OCRImageObject] + r"""List of all extracted images in the page""" + + dimensions: Nullable[OCRPageDimensions] + r"""The dimensions of the PDF Page's screenshot image""" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = [] + nullable_fields = ["dimensions"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/packages/mistralai_azure/src/mistralai_azure/models/ocrrequest.py b/packages/mistralai_azure/src/mistralai_azure/models/ocrrequest.py new file mode 100644 index 00000000..533d0742 --- /dev/null +++ b/packages/mistralai_azure/src/mistralai_azure/models/ocrrequest.py @@ -0,0 +1,120 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .documenturlchunk import DocumentURLChunk, DocumentURLChunkTypedDict +from .filechunk import FileChunk, FileChunkTypedDict +from .imageurlchunk import ImageURLChunk, ImageURLChunkTypedDict +from .responseformat import ResponseFormat, ResponseFormatTypedDict +from mistralai_azure.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing import List, Optional, Union +from typing_extensions import NotRequired, TypeAliasType, TypedDict + + +DocumentTypedDict = TypeAliasType( + "DocumentTypedDict", + Union[FileChunkTypedDict, ImageURLChunkTypedDict, DocumentURLChunkTypedDict], +) +r"""Document to run OCR on""" + + +Document = TypeAliasType("Document", Union[FileChunk, ImageURLChunk, DocumentURLChunk]) +r"""Document to run OCR on""" + + +class OCRRequestTypedDict(TypedDict): + model: Nullable[str] + document: DocumentTypedDict + r"""Document to run OCR on""" + id: NotRequired[str] + pages: NotRequired[Nullable[List[int]]] + r"""Specific pages user wants to process in various formats: single number, range, or list of both. Starts from 0""" + include_image_base64: NotRequired[Nullable[bool]] + r"""Include image URLs in response""" + image_limit: NotRequired[Nullable[int]] + r"""Max images to extract""" + image_min_size: NotRequired[Nullable[int]] + r"""Minimum height and width of image to extract""" + bbox_annotation_format: NotRequired[Nullable[ResponseFormatTypedDict]] + r"""Structured output class for extracting useful information from each extracted bounding box / image from document. Only json_schema is valid for this field""" + document_annotation_format: NotRequired[Nullable[ResponseFormatTypedDict]] + r"""Structured output class for extracting useful information from the entire document. Only json_schema is valid for this field""" + + +class OCRRequest(BaseModel): + model: Nullable[str] + + document: Document + r"""Document to run OCR on""" + + id: Optional[str] = None + + pages: OptionalNullable[List[int]] = UNSET + r"""Specific pages user wants to process in various formats: single number, range, or list of both. Starts from 0""" + + include_image_base64: OptionalNullable[bool] = UNSET + r"""Include image URLs in response""" + + image_limit: OptionalNullable[int] = UNSET + r"""Max images to extract""" + + image_min_size: OptionalNullable[int] = UNSET + r"""Minimum height and width of image to extract""" + + bbox_annotation_format: OptionalNullable[ResponseFormat] = UNSET + r"""Structured output class for extracting useful information from each extracted bounding box / image from document. Only json_schema is valid for this field""" + + document_annotation_format: OptionalNullable[ResponseFormat] = UNSET + r"""Structured output class for extracting useful information from the entire document. Only json_schema is valid for this field""" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = [ + "id", + "pages", + "include_image_base64", + "image_limit", + "image_min_size", + "bbox_annotation_format", + "document_annotation_format", + ] + nullable_fields = [ + "model", + "pages", + "include_image_base64", + "image_limit", + "image_min_size", + "bbox_annotation_format", + "document_annotation_format", + ] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/packages/mistralai_azure/src/mistralai_azure/models/ocrresponse.py b/packages/mistralai_azure/src/mistralai_azure/models/ocrresponse.py new file mode 100644 index 00000000..3e43fa8e --- /dev/null +++ b/packages/mistralai_azure/src/mistralai_azure/models/ocrresponse.py @@ -0,0 +1,68 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .ocrpageobject import OCRPageObject, OCRPageObjectTypedDict +from .ocrusageinfo import OCRUsageInfo, OCRUsageInfoTypedDict +from mistralai_azure.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing import List +from typing_extensions import NotRequired, TypedDict + + +class OCRResponseTypedDict(TypedDict): + pages: List[OCRPageObjectTypedDict] + r"""List of OCR info for pages.""" + model: str + r"""The model used to generate the OCR.""" + usage_info: OCRUsageInfoTypedDict + document_annotation: NotRequired[Nullable[str]] + r"""Formatted response in the request_format if provided in json str""" + + +class OCRResponse(BaseModel): + pages: List[OCRPageObject] + r"""List of OCR info for pages.""" + + model: str + r"""The model used to generate the OCR.""" + + usage_info: OCRUsageInfo + + document_annotation: OptionalNullable[str] = UNSET + r"""Formatted response in the request_format if provided in json str""" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["document_annotation"] + nullable_fields = ["document_annotation"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/packages/mistralai_azure/src/mistralai_azure/models/ocrusageinfo.py b/packages/mistralai_azure/src/mistralai_azure/models/ocrusageinfo.py new file mode 100644 index 00000000..1f5c9f1b --- /dev/null +++ b/packages/mistralai_azure/src/mistralai_azure/models/ocrusageinfo.py @@ -0,0 +1,57 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai_azure.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing_extensions import NotRequired, TypedDict + + +class OCRUsageInfoTypedDict(TypedDict): + pages_processed: int + r"""Number of pages processed""" + doc_size_bytes: NotRequired[Nullable[int]] + r"""Document size in bytes""" + + +class OCRUsageInfo(BaseModel): + pages_processed: int + r"""Number of pages processed""" + + doc_size_bytes: OptionalNullable[int] = UNSET + r"""Document size in bytes""" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["doc_size_bytes"] + nullable_fields = ["doc_size_bytes"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/packages/mistralai_azure/src/mistralai_azure/models/responseformat.py b/packages/mistralai_azure/src/mistralai_azure/models/responseformat.py index cfd58dcf..6d09de5b 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/responseformat.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/responseformat.py @@ -37,7 +37,7 @@ def serialize_model(self, handler): m = {} - for n, f in self.model_fields.items(): + for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) serialized.pop(k, None) diff --git a/packages/mistralai_azure/src/mistralai_azure/models/toolmessage.py b/packages/mistralai_azure/src/mistralai_azure/models/toolmessage.py index 3e9aa3da..abca8abe 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/toolmessage.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/toolmessage.py @@ -51,7 +51,7 @@ def serialize_model(self, handler): m = {} - for n, f in self.model_fields.items(): + for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) serialized.pop(k, None) diff --git a/packages/mistralai_azure/src/mistralai_azure/models/usageinfo.py b/packages/mistralai_azure/src/mistralai_azure/models/usageinfo.py index b1d094fc..bbe5cdfa 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/usageinfo.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/usageinfo.py @@ -1,19 +1,82 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from __future__ import annotations -from mistralai_azure.types import BaseModel -from typing_extensions import TypedDict +from mistralai_azure.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +import pydantic +from pydantic import ConfigDict, model_serializer +from typing import Any, Dict, Optional +from typing_extensions import NotRequired, TypedDict class UsageInfoTypedDict(TypedDict): - prompt_tokens: int - completion_tokens: int - total_tokens: int + prompt_tokens: NotRequired[int] + completion_tokens: NotRequired[int] + total_tokens: NotRequired[int] + prompt_audio_seconds: NotRequired[Nullable[int]] class UsageInfo(BaseModel): - prompt_tokens: int + model_config = ConfigDict( + populate_by_name=True, arbitrary_types_allowed=True, extra="allow" + ) + __pydantic_extra__: Dict[str, Any] = pydantic.Field(init=False) - completion_tokens: int + prompt_tokens: Optional[int] = 0 - total_tokens: int + completion_tokens: Optional[int] = 0 + + total_tokens: Optional[int] = 0 + + prompt_audio_seconds: OptionalNullable[int] = UNSET + + @property + def additional_properties(self): + return self.__pydantic_extra__ + + @additional_properties.setter + def additional_properties(self, value): + self.__pydantic_extra__ = value # pyright: ignore[reportIncompatibleVariableOverride] + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = [ + "prompt_tokens", + "completion_tokens", + "total_tokens", + "prompt_audio_seconds", + ] + nullable_fields = ["prompt_audio_seconds"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + for k, v in serialized.items(): + m[k] = v + + return m diff --git a/packages/mistralai_azure/src/mistralai_azure/models/usermessage.py b/packages/mistralai_azure/src/mistralai_azure/models/usermessage.py index 8cce1745..05976fc0 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/usermessage.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/usermessage.py @@ -39,7 +39,7 @@ def serialize_model(self, handler): m = {} - for n, f in self.model_fields.items(): + for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) serialized.pop(k, None) diff --git a/packages/mistralai_azure/src/mistralai_azure/ocr.py b/packages/mistralai_azure/src/mistralai_azure/ocr.py new file mode 100644 index 00000000..71fe0337 --- /dev/null +++ b/packages/mistralai_azure/src/mistralai_azure/ocr.py @@ -0,0 +1,271 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from .basesdk import BaseSDK +from mistralai_azure import models, utils +from mistralai_azure._hooks import HookContext +from mistralai_azure.types import Nullable, OptionalNullable, UNSET +from typing import Any, List, Mapping, Optional, Union + + +class Ocr(BaseSDK): + def process( + self, + *, + model: Nullable[str], + document: Union[models.Document, models.DocumentTypedDict], + id: Optional[str] = None, + pages: OptionalNullable[List[int]] = UNSET, + include_image_base64: OptionalNullable[bool] = UNSET, + image_limit: OptionalNullable[int] = UNSET, + image_min_size: OptionalNullable[int] = UNSET, + bbox_annotation_format: OptionalNullable[ + Union[models.ResponseFormat, models.ResponseFormatTypedDict] + ] = UNSET, + document_annotation_format: OptionalNullable[ + Union[models.ResponseFormat, models.ResponseFormatTypedDict] + ] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> Optional[models.OCRResponse]: + r"""OCR + + :param model: + :param document: Document to run OCR on + :param id: + :param pages: Specific pages user wants to process in various formats: single number, range, or list of both. Starts from 0 + :param include_image_base64: Include image URLs in response + :param image_limit: Max images to extract + :param image_min_size: Minimum height and width of image to extract + :param bbox_annotation_format: Structured output class for extracting useful information from each extracted bounding box / image from document. Only json_schema is valid for this field + :param document_annotation_format: Structured output class for extracting useful information from the entire document. Only json_schema is valid for this field + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.OCRRequest( + model=model, + id=id, + document=utils.get_pydantic_model(document, models.Document), + pages=pages, + include_image_base64=include_image_base64, + image_limit=image_limit, + image_min_size=image_min_size, + bbox_annotation_format=utils.get_pydantic_model( + bbox_annotation_format, OptionalNullable[models.ResponseFormat] + ), + document_annotation_format=utils.get_pydantic_model( + document_annotation_format, OptionalNullable[models.ResponseFormat] + ), + ) + + req = self._build_request( + method="POST", + path="/ocr", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.OCRRequest + ), + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="ocr_v1_ocr_post", + oauth2_scopes=[], + security_source=self.sdk_configuration.security, + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return utils.unmarshal_json(http_res.text, Optional[models.OCRResponse]) + if utils.match_response(http_res, "422", "application/json"): + response_data = utils.unmarshal_json( + http_res.text, models.HTTPValidationErrorData + ) + raise models.HTTPValidationError(data=response_data) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + + content_type = http_res.headers.get("Content-Type") + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res_text, + http_res, + ) + + async def process_async( + self, + *, + model: Nullable[str], + document: Union[models.Document, models.DocumentTypedDict], + id: Optional[str] = None, + pages: OptionalNullable[List[int]] = UNSET, + include_image_base64: OptionalNullable[bool] = UNSET, + image_limit: OptionalNullable[int] = UNSET, + image_min_size: OptionalNullable[int] = UNSET, + bbox_annotation_format: OptionalNullable[ + Union[models.ResponseFormat, models.ResponseFormatTypedDict] + ] = UNSET, + document_annotation_format: OptionalNullable[ + Union[models.ResponseFormat, models.ResponseFormatTypedDict] + ] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> Optional[models.OCRResponse]: + r"""OCR + + :param model: + :param document: Document to run OCR on + :param id: + :param pages: Specific pages user wants to process in various formats: single number, range, or list of both. Starts from 0 + :param include_image_base64: Include image URLs in response + :param image_limit: Max images to extract + :param image_min_size: Minimum height and width of image to extract + :param bbox_annotation_format: Structured output class for extracting useful information from each extracted bounding box / image from document. Only json_schema is valid for this field + :param document_annotation_format: Structured output class for extracting useful information from the entire document. Only json_schema is valid for this field + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.OCRRequest( + model=model, + id=id, + document=utils.get_pydantic_model(document, models.Document), + pages=pages, + include_image_base64=include_image_base64, + image_limit=image_limit, + image_min_size=image_min_size, + bbox_annotation_format=utils.get_pydantic_model( + bbox_annotation_format, OptionalNullable[models.ResponseFormat] + ), + document_annotation_format=utils.get_pydantic_model( + document_annotation_format, OptionalNullable[models.ResponseFormat] + ), + ) + + req = self._build_request_async( + method="POST", + path="/ocr", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.OCRRequest + ), + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="ocr_v1_ocr_post", + oauth2_scopes=[], + security_source=self.sdk_configuration.security, + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return utils.unmarshal_json(http_res.text, Optional[models.OCRResponse]) + if utils.match_response(http_res, "422", "application/json"): + response_data = utils.unmarshal_json( + http_res.text, models.HTTPValidationErrorData + ) + raise models.HTTPValidationError(data=response_data) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + "API error occurred", http_res.status_code, http_res_text, http_res + ) + + content_type = http_res.headers.get("Content-Type") + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res_text, + http_res, + ) diff --git a/packages/mistralai_azure/src/mistralai_azure/sdkconfiguration.py b/packages/mistralai_azure/src/mistralai_azure/sdkconfiguration.py index 605e5d74..51289cf0 100644 --- a/packages/mistralai_azure/src/mistralai_azure/sdkconfiguration.py +++ b/packages/mistralai_azure/src/mistralai_azure/sdkconfiguration.py @@ -1,6 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -from ._hooks import SDKHooks from ._version import ( __gen_version__, __openapi_doc_version__, @@ -42,9 +41,6 @@ class SDKConfiguration: retry_config: OptionalNullable[RetryConfig] = Field(default_factory=lambda: UNSET) timeout_ms: Optional[int] = None - def __post_init__(self): - self._hooks = SDKHooks() - def get_server_details(self) -> Tuple[str, Dict[str, str]]: if self.server_url is not None and self.server_url: return remove_suffix(self.server_url, "/"), {} @@ -55,6 +51,3 @@ def get_server_details(self) -> Tuple[str, Dict[str, str]]: raise ValueError(f'Invalid server "{self.server}"') return SERVERS[self.server], {} - - def get_hooks(self) -> SDKHooks: - return self._hooks diff --git a/packages/mistralai_azure/src/mistralai_azure/types/basemodel.py b/packages/mistralai_azure/src/mistralai_azure/types/basemodel.py index a6187efa..231c2e37 100644 --- a/packages/mistralai_azure/src/mistralai_azure/types/basemodel.py +++ b/packages/mistralai_azure/src/mistralai_azure/types/basemodel.py @@ -2,7 +2,7 @@ from pydantic import ConfigDict, model_serializer from pydantic import BaseModel as PydanticBaseModel -from typing import TYPE_CHECKING, Literal, Optional, TypeVar, Union, NewType +from typing import TYPE_CHECKING, Literal, Optional, TypeVar, Union from typing_extensions import TypeAliasType, TypeAlias @@ -35,5 +35,5 @@ def __bool__(self) -> Literal[False]: "OptionalNullable", Union[Optional[Nullable[T]], Unset], type_params=(T,) ) -UnrecognizedInt = NewType("UnrecognizedInt", int) -UnrecognizedStr = NewType("UnrecognizedStr", str) +UnrecognizedInt: TypeAlias = int +UnrecognizedStr: TypeAlias = str diff --git a/packages/mistralai_azure/src/mistralai_azure/utils/__init__.py b/packages/mistralai_azure/src/mistralai_azure/utils/__init__.py index 3cded8fe..dd4aa4b3 100644 --- a/packages/mistralai_azure/src/mistralai_azure/utils/__init__.py +++ b/packages/mistralai_azure/src/mistralai_azure/utils/__init__.py @@ -1,50 +1,55 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -from .annotations import get_discriminator -from .enums import OpenEnumMeta -from .headers import get_headers, get_response_headers -from .metadata import ( - FieldMetadata, - find_metadata, - FormMetadata, - HeaderMetadata, - MultipartFormMetadata, - PathParamMetadata, - QueryParamMetadata, - RequestMetadata, - SecurityMetadata, -) -from .queryparams import get_query_params -from .retries import BackoffStrategy, Retries, retry, retry_async, RetryConfig -from .requestbodies import serialize_request_body, SerializedRequestBody -from .security import get_security -from .serializers import ( - get_pydantic_model, - marshal_json, - unmarshal, - unmarshal_json, - serialize_decimal, - serialize_float, - serialize_int, - stream_to_text, - stream_to_text_async, - stream_to_bytes, - stream_to_bytes_async, - validate_const, - validate_decimal, - validate_float, - validate_int, - validate_open_enum, -) -from .url import generate_url, template_url, remove_suffix -from .values import ( - get_global_from_env, - match_content_type, - match_status_codes, - match_response, - cast_partial, -) -from .logger import Logger, get_body_content, get_default_logger +from typing import TYPE_CHECKING +from importlib import import_module + +if TYPE_CHECKING: + from .annotations import get_discriminator + from .datetimes import parse_datetime + from .enums import OpenEnumMeta + from .headers import get_headers, get_response_headers + from .metadata import ( + FieldMetadata, + find_metadata, + FormMetadata, + HeaderMetadata, + MultipartFormMetadata, + PathParamMetadata, + QueryParamMetadata, + RequestMetadata, + SecurityMetadata, + ) + from .queryparams import get_query_params + from .retries import BackoffStrategy, Retries, retry, retry_async, RetryConfig + from .requestbodies import serialize_request_body, SerializedRequestBody + from .security import get_security + from .serializers import ( + get_pydantic_model, + marshal_json, + unmarshal, + unmarshal_json, + serialize_decimal, + serialize_float, + serialize_int, + stream_to_text, + stream_to_text_async, + stream_to_bytes, + stream_to_bytes_async, + validate_const, + validate_decimal, + validate_float, + validate_int, + validate_open_enum, + ) + from .url import generate_url, template_url, remove_suffix + from .values import ( + get_global_from_env, + match_content_type, + match_status_codes, + match_response, + cast_partial, + ) + from .logger import Logger, get_body_content, get_default_logger __all__ = [ "BackoffStrategy", @@ -55,6 +60,7 @@ "get_body_content", "get_default_logger", "get_discriminator", + "parse_datetime", "get_global_from_env", "get_headers", "get_pydantic_model", @@ -97,3 +103,82 @@ "validate_open_enum", "cast_partial", ] + +_dynamic_imports: dict[str, str] = { + "BackoffStrategy": ".retries", + "FieldMetadata": ".metadata", + "find_metadata": ".metadata", + "FormMetadata": ".metadata", + "generate_url": ".url", + "get_body_content": ".logger", + "get_default_logger": ".logger", + "get_discriminator": ".annotations", + "parse_datetime": ".datetimes", + "get_global_from_env": ".values", + "get_headers": ".headers", + "get_pydantic_model": ".serializers", + "get_query_params": ".queryparams", + "get_response_headers": ".headers", + "get_security": ".security", + "HeaderMetadata": ".metadata", + "Logger": ".logger", + "marshal_json": ".serializers", + "match_content_type": ".values", + "match_status_codes": ".values", + "match_response": ".values", + "MultipartFormMetadata": ".metadata", + "OpenEnumMeta": ".enums", + "PathParamMetadata": ".metadata", + "QueryParamMetadata": ".metadata", + "remove_suffix": ".url", + "Retries": ".retries", + "retry": ".retries", + "retry_async": ".retries", + "RetryConfig": ".retries", + "RequestMetadata": ".metadata", + "SecurityMetadata": ".metadata", + "serialize_decimal": ".serializers", + "serialize_float": ".serializers", + "serialize_int": ".serializers", + "serialize_request_body": ".requestbodies", + "SerializedRequestBody": ".requestbodies", + "stream_to_text": ".serializers", + "stream_to_text_async": ".serializers", + "stream_to_bytes": ".serializers", + "stream_to_bytes_async": ".serializers", + "template_url": ".url", + "unmarshal": ".serializers", + "unmarshal_json": ".serializers", + "validate_decimal": ".serializers", + "validate_const": ".serializers", + "validate_float": ".serializers", + "validate_int": ".serializers", + "validate_open_enum": ".serializers", + "cast_partial": ".values", +} + + +def __getattr__(attr_name: str) -> object: + module_name = _dynamic_imports.get(attr_name) + if module_name is None: + raise AttributeError( + f"no {attr_name} found in _dynamic_imports, module name -> {__name__} " + ) + + try: + module = import_module(module_name, __package__) + result = getattr(module, attr_name) + return result + except ImportError as e: + raise ImportError( + f"Failed to import {attr_name} from {module_name}: {e}" + ) from e + except AttributeError as e: + raise AttributeError( + f"Failed to get {attr_name} from {module_name}: {e}" + ) from e + + +def __dir__(): + lazy_attrs = list(_dynamic_imports.keys()) + return sorted(lazy_attrs) diff --git a/packages/mistralai_azure/src/mistralai_azure/utils/datetimes.py b/packages/mistralai_azure/src/mistralai_azure/utils/datetimes.py new file mode 100644 index 00000000..a6c52cd6 --- /dev/null +++ b/packages/mistralai_azure/src/mistralai_azure/utils/datetimes.py @@ -0,0 +1,23 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from datetime import datetime +import sys + + +def parse_datetime(datetime_string: str) -> datetime: + """ + Convert a RFC 3339 / ISO 8601 formatted string into a datetime object. + Python versions 3.11 and later support parsing RFC 3339 directly with + datetime.fromisoformat(), but for earlier versions, this function + encapsulates the necessary extra logic. + """ + # Python 3.11 and later can parse RFC 3339 directly + if sys.version_info >= (3, 11): + return datetime.fromisoformat(datetime_string) + + # For Python 3.10 and earlier, a common ValueError is trailing 'Z' suffix, + # so fix that upfront. + if datetime_string.endswith("Z"): + datetime_string = datetime_string[:-1] + "+00:00" + + return datetime.fromisoformat(datetime_string) diff --git a/packages/mistralai_azure/src/mistralai_azure/utils/enums.py b/packages/mistralai_azure/src/mistralai_azure/utils/enums.py index c650b10c..c3bc13cf 100644 --- a/packages/mistralai_azure/src/mistralai_azure/utils/enums.py +++ b/packages/mistralai_azure/src/mistralai_azure/utils/enums.py @@ -1,34 +1,74 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" import enum - +import sys class OpenEnumMeta(enum.EnumMeta): - def __call__( - cls, value, names=None, *, module=None, qualname=None, type=None, start=1 - ): - # The `type` kwarg also happens to be a built-in that pylint flags as - # redeclared. Safe to ignore this lint rule with this scope. - # pylint: disable=redefined-builtin + # The __call__ method `boundary` kwarg was added in 3.11 and must be present + # for pyright. Refer also: https://github.com/pylint-dev/pylint/issues/9622 + # pylint: disable=unexpected-keyword-arg + # The __call__ method `values` varg must be named for pyright. + # pylint: disable=keyword-arg-before-vararg + + if sys.version_info >= (3, 11): + def __call__( + cls, value, names=None, *values, module=None, qualname=None, type=None, start=1, boundary=None + ): + # The `type` kwarg also happens to be a built-in that pylint flags as + # redeclared. Safe to ignore this lint rule with this scope. + # pylint: disable=redefined-builtin + + if names is not None: + return super().__call__( + value, + names=names, + *values, + module=module, + qualname=qualname, + type=type, + start=start, + boundary=boundary, + ) + + try: + return super().__call__( + value, + names=names, # pyright: ignore[reportArgumentType] + *values, + module=module, + qualname=qualname, + type=type, + start=start, + boundary=boundary, + ) + except ValueError: + return value + else: + def __call__( + cls, value, names=None, *, module=None, qualname=None, type=None, start=1 + ): + # The `type` kwarg also happens to be a built-in that pylint flags as + # redeclared. Safe to ignore this lint rule with this scope. + # pylint: disable=redefined-builtin - if names is not None: - return super().__call__( - value, - names=names, - module=module, - qualname=qualname, - type=type, - start=start, - ) + if names is not None: + return super().__call__( + value, + names=names, + module=module, + qualname=qualname, + type=type, + start=start, + ) - try: - return super().__call__( - value, - names=names, # pyright: ignore[reportArgumentType] - module=module, - qualname=qualname, - type=type, - start=start, - ) - except ValueError: - return value + try: + return super().__call__( + value, + names=names, # pyright: ignore[reportArgumentType] + module=module, + qualname=qualname, + type=type, + start=start, + ) + except ValueError: + return value diff --git a/packages/mistralai_azure/src/mistralai_azure/utils/forms.py b/packages/mistralai_azure/src/mistralai_azure/utils/forms.py index 0472aba8..e873495f 100644 --- a/packages/mistralai_azure/src/mistralai_azure/utils/forms.py +++ b/packages/mistralai_azure/src/mistralai_azure/utils/forms.py @@ -86,11 +86,39 @@ def _populate_form( return form +def _extract_file_properties(file_obj: Any) -> Tuple[str, Any, Any]: + """Extract file name, content, and content type from a file object.""" + file_fields: Dict[str, FieldInfo] = file_obj.__class__.model_fields + + file_name = "" + content = None + content_type = None + + for file_field_name in file_fields: + file_field = file_fields[file_field_name] + + file_metadata = find_field_metadata(file_field, MultipartFormMetadata) + if file_metadata is None: + continue + + if file_metadata.content: + content = getattr(file_obj, file_field_name, None) + elif file_field_name == "content_type": + content_type = getattr(file_obj, file_field_name, None) + else: + file_name = getattr(file_obj, file_field_name) + + if file_name == "" or content is None: + raise ValueError("invalid multipart/form-data file") + + return file_name, content, content_type + + def serialize_multipart_form( media_type: str, request: Any -) -> Tuple[str, Dict[str, Any], Dict[str, Any]]: +) -> Tuple[str, Dict[str, Any], List[Tuple[str, Any]]]: form: Dict[str, Any] = {} - files: Dict[str, Any] = {} + files: List[Tuple[str, Any]] = [] if not isinstance(request, BaseModel): raise TypeError("invalid request body type") @@ -112,39 +140,32 @@ def serialize_multipart_form( f_name = field.alias if field.alias else name if field_metadata.file: - file_fields: Dict[str, FieldInfo] = val.__class__.model_fields - - file_name = "" - content = None - content_type = None - - for file_field_name in file_fields: - file_field = file_fields[file_field_name] + if isinstance(val, List): + # Handle array of files + for file_obj in val: + if not _is_set(file_obj): + continue + + file_name, content, content_type = _extract_file_properties(file_obj) - file_metadata = find_field_metadata(file_field, MultipartFormMetadata) - if file_metadata is None: - continue + if content_type is not None: + files.append((f_name + "[]", (file_name, content, content_type))) + else: + files.append((f_name + "[]", (file_name, content))) + else: + # Handle single file + file_name, content, content_type = _extract_file_properties(val) - if file_metadata.content: - content = getattr(val, file_field_name, None) - elif file_field_name == "content_type": - content_type = getattr(val, file_field_name, None) + if content_type is not None: + files.append((f_name, (file_name, content, content_type))) else: - file_name = getattr(val, file_field_name) - - if file_name == "" or content is None: - raise ValueError("invalid multipart/form-data file") - - if content_type is not None: - files[f_name] = (file_name, content, content_type) - else: - files[f_name] = (file_name, content) + files.append((f_name, (file_name, content))) elif field_metadata.json: - files[f_name] = ( + files.append((f_name, ( None, marshal_json(val, request_field_types[name]), "application/json", - ) + ))) else: if isinstance(val, List): values = [] diff --git a/packages/mistralai_azure/src/mistralai_azure/utils/serializers.py b/packages/mistralai_azure/src/mistralai_azure/utils/serializers.py index baa41fbd..76e44d71 100644 --- a/packages/mistralai_azure/src/mistralai_azure/utils/serializers.py +++ b/packages/mistralai_azure/src/mistralai_azure/utils/serializers.py @@ -1,13 +1,16 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from decimal import Decimal +import functools import json -from typing import Any, Dict, List, Union, get_args -import httpx +import typing +from typing import Any, Dict, List, Tuple, Union, get_args +import typing_extensions from typing_extensions import get_origin + +import httpx from pydantic import ConfigDict, create_model from pydantic_core import from_json -from typing_inspection.typing_objects import is_union from ..types.basemodel import BaseModel, Nullable, OptionalNullable, Unset @@ -185,6 +188,13 @@ def is_nullable(field): return False +def is_union(obj: object) -> bool: + """ + Returns True if the given object is a typing.Union or typing_extensions.Union. + """ + return any(obj is typing_obj for typing_obj in _get_typing_objects_by_name_of("Union")) + + def stream_to_text(stream: httpx.Response) -> str: return "".join(stream.iter_text()) @@ -217,3 +227,22 @@ def _contains_pydantic_model(data: Any) -> bool: return any(_contains_pydantic_model(value) for value in data.values()) return False + + +@functools.cache +def _get_typing_objects_by_name_of(name: str) -> Tuple[Any, ...]: + """ + Get typing objects by name from typing and typing_extensions. + Reference: https://typing-extensions.readthedocs.io/en/latest/#runtime-use-of-types + """ + result = tuple( + getattr(module, name) + for module in (typing, typing_extensions) + if hasattr(module, name) + ) + if not result: + raise ValueError( + f"Neither typing nor typing_extensions has an object called {name!r}" + ) + return result + diff --git a/pyproject.toml b/pyproject.toml index 2631d4da..f82f5bc5 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "mistralai" -version = "1.9.7" +version = "1.9.8" description = "Python Client SDK for the Mistral AI API." authors = [{ name = "Mistral" },] readme = "README-PYPI.md" diff --git a/src/mistralai/_version.py b/src/mistralai/_version.py index 1843bff5..d2236835 100644 --- a/src/mistralai/_version.py +++ b/src/mistralai/_version.py @@ -3,10 +3,10 @@ import importlib.metadata __title__: str = "mistralai" -__version__: str = "1.9.7" +__version__: str = "1.9.8" __openapi_doc_version__: str = "1.0.0" __gen_version__: str = "2.634.2" -__user_agent__: str = "speakeasy-sdk/python 1.9.7 2.634.2 1.0.0 mistralai" +__user_agent__: str = "speakeasy-sdk/python 1.9.8 2.634.2 1.0.0 mistralai" try: if __package__ is not None: From 049014b55d0d0a2280674fc92cc72c8d6fef2a08 Mon Sep 17 00:00:00 2001 From: Gaspard Blanchet <26529448+GaspardBT@users.noreply.github.com> Date: Tue, 26 Aug 2025 19:16:15 +0200 Subject: [PATCH 152/223] Fix: update base azure sdk file to include OCR (#267) --- .speakeasy/workflow.lock | 2 +- packages/mistralai_azure/.speakeasy/gen.lock | 1 + .../src/mistralai_azure/sdk.py | 62 ++++++++++++++----- 3 files changed, 47 insertions(+), 18 deletions(-) diff --git a/.speakeasy/workflow.lock b/.speakeasy/workflow.lock index 4caed8b7..286cf6a9 100644 --- a/.speakeasy/workflow.lock +++ b/.speakeasy/workflow.lock @@ -26,7 +26,7 @@ targets: sourceRevisionDigest: sha256:670c460702ec74f7077491464a6dc5ee9d873969c80e812c48dbf4deb160e470 sourceBlobDigest: sha256:5a3ebfa4cb00a015bb7bb03ec7442fc7e0b9c17ca66ab35d3045290b2ad87eac codeSamplesNamespace: mistral-openapi-azure-code-samples - codeSamplesRevisionDigest: sha256:a4ace4b17dee92b180a2fede7742bd93fa1a83a9f96e4f61531289cafc50f6ad + codeSamplesRevisionDigest: sha256:e6802c97fd9783aa91cc0853de1a889944f699b88e0dafcf9fecd83de6e2c6c9 mistralai-gcp-sdk: source: mistral-google-cloud-source sourceNamespace: mistral-openapi-google-cloud diff --git a/packages/mistralai_azure/.speakeasy/gen.lock b/packages/mistralai_azure/.speakeasy/gen.lock index ef80e828..bce8e3c8 100644 --- a/packages/mistralai_azure/.speakeasy/gen.lock +++ b/packages/mistralai_azure/.speakeasy/gen.lock @@ -165,6 +165,7 @@ generatedFiles: - src/mistralai_azure/models/validationerror.py - src/mistralai_azure/ocr.py - src/mistralai_azure/py.typed + - src/mistralai_azure/sdk.py - src/mistralai_azure/sdkconfiguration.py - src/mistralai_azure/types/__init__.py - src/mistralai_azure/types/basemodel.py diff --git a/packages/mistralai_azure/src/mistralai_azure/sdk.py b/packages/mistralai_azure/src/mistralai_azure/sdk.py index 8379e55f..04bc7743 100644 --- a/packages/mistralai_azure/src/mistralai_azure/sdk.py +++ b/packages/mistralai_azure/src/mistralai_azure/sdk.py @@ -1,27 +1,33 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -import weakref -from typing import Any, Callable, Dict, Optional, Union, cast - -import httpx - -from mistralai_azure import models, utils -from mistralai_azure._hooks import SDKHooks -from mistralai_azure.chat import Chat -from mistralai_azure.types import UNSET, OptionalNullable - from .basesdk import BaseSDK from .httpclient import AsyncHttpClient, ClientOwner, HttpClient, close_clients from .sdkconfiguration import SDKConfiguration from .utils.logger import Logger, get_default_logger from .utils.retries import RetryConfig +import httpx +import importlib +from mistralai_azure import models, utils +from mistralai_azure._hooks import SDKHooks +from mistralai_azure.types import OptionalNullable, UNSET +from typing import Any, Callable, Dict, Optional, TYPE_CHECKING, Union, cast +import weakref + +if TYPE_CHECKING: + from mistralai_azure.chat import Chat + from mistralai_azure.ocr import Ocr class MistralAzure(BaseSDK): r"""Mistral AI API: Our Chat Completion and Embeddings APIs specification. Create your account on [La Plateforme](https://console.mistral.ai) to get access and read the [docs](https://docs.mistral.ai) to learn how to use it.""" - chat: Chat + chat: "Chat" r"""Chat Completion API.""" + ocr: "Ocr" + _sub_sdk_map = { + "chat": ("mistralai_azure.chat", "Chat"), + "ocr": ("mistralai_azure.ocr", "Ocr"), + } def __init__( self, @@ -101,6 +107,9 @@ def __init__( hooks = SDKHooks() + # pylint: disable=protected-access + self.sdk_configuration.__dict__["_hooks"] = hooks + current_server_url, *_ = self.sdk_configuration.get_server_details() server_url, self.sdk_configuration.client = hooks.sdk_init( current_server_url, client @@ -108,9 +117,6 @@ def __init__( if current_server_url != server_url: self.sdk_configuration.server_url = server_url - # pylint: disable=protected-access - self.sdk_configuration.__dict__["_hooks"] = hooks - weakref.finalize( self, close_clients, @@ -121,10 +127,32 @@ def __init__( self.sdk_configuration.async_client_supplied, ) - self._init_sdks() + def __getattr__(self, name: str): + if name in self._sub_sdk_map: + module_path, class_name = self._sub_sdk_map[name] + try: + module = importlib.import_module(module_path) + klass = getattr(module, class_name) + instance = klass(self.sdk_configuration) + setattr(self, name, instance) + return instance + except ImportError as e: + raise AttributeError( + f"Failed to import module {module_path} for attribute {name}: {e}" + ) from e + except AttributeError as e: + raise AttributeError( + f"Failed to find class {class_name} in module {module_path} for attribute {name}: {e}" + ) from e + + raise AttributeError( + f"'{type(self).__name__}' object has no attribute '{name}'" + ) - def _init_sdks(self): - self.chat = Chat(self.sdk_configuration) + def __dir__(self): + default_attrs = list(super().__dir__()) + lazy_attrs = list(self._sub_sdk_map.keys()) + return sorted(list(set(default_attrs + lazy_attrs))) def __enter__(self): return self From 806e91d522d66b7e6df98232717ab1bd3cafac4e Mon Sep 17 00:00:00 2001 From: Alexandre Menasria <47357713+amenasria@users.noreply.github.com> Date: Tue, 26 Aug 2025 19:39:21 +0200 Subject: [PATCH 153/223] ci: regenerated with OpenAPI Doc , Speakeasy CLI 1.568.2 (#268) Co-authored-by: speakeasybot --- .speakeasy/gen.lock | 4 ++-- .speakeasy/gen.yaml | 2 +- .speakeasy/workflow.lock | 4 ++-- RELEASES.md | 12 +++++++++++- pyproject.toml | 2 +- src/mistralai/_version.py | 4 ++-- 6 files changed, 19 insertions(+), 9 deletions(-) diff --git a/.speakeasy/gen.lock b/.speakeasy/gen.lock index 2af5a009..47f75811 100644 --- a/.speakeasy/gen.lock +++ b/.speakeasy/gen.lock @@ -5,8 +5,8 @@ management: docVersion: 1.0.0 speakeasyVersion: 1.568.2 generationVersion: 2.634.2 - releaseVersion: 1.9.8 - configChecksum: d7b7d0bf04e94ae161a650b203bc54df + releaseVersion: 1.9.9 + configChecksum: b08e017cb434c255d6ef2c20584815e8 repoURL: https://github.com/mistralai/client-python.git installationURL: https://github.com/mistralai/client-python.git published: true diff --git a/.speakeasy/gen.yaml b/.speakeasy/gen.yaml index fd56d757..9d66f396 100644 --- a/.speakeasy/gen.yaml +++ b/.speakeasy/gen.yaml @@ -21,7 +21,7 @@ generation: generateNewTests: false skipResponseBodyAssertions: false python: - version: 1.9.8 + version: 1.9.9 additionalDependencies: dev: pytest: ^8.2.2 diff --git a/.speakeasy/workflow.lock b/.speakeasy/workflow.lock index 286cf6a9..8bb5e897 100644 --- a/.speakeasy/workflow.lock +++ b/.speakeasy/workflow.lock @@ -18,7 +18,7 @@ sources: sourceBlobDigest: sha256:64fa97f0836a2441e32bc255ced698234c147c790775988102f17b493f0da87d tags: - latest - - speakeasy-sdk-regen-1756133663 + - speakeasy-sdk-regen-1756229662 targets: mistralai-azure-sdk: source: mistral-azure-source @@ -40,7 +40,7 @@ targets: sourceRevisionDigest: sha256:fcdf40207fa983a6ad7c0346f499d2b87ef2f1381a48e791a9a485bd40525ff8 sourceBlobDigest: sha256:64fa97f0836a2441e32bc255ced698234c147c790775988102f17b493f0da87d codeSamplesNamespace: mistral-openapi-code-samples - codeSamplesRevisionDigest: sha256:f215393d076966760f8b24cb3db48d6dd9c2ba2e06daa7964860bab4b09f51f2 + codeSamplesRevisionDigest: sha256:39367b425e8a1e7376b8900711e0852f653aaab239f74cc658567fe05ad11784 workflow: workflowVersion: 1.0.0 speakeasyVersion: 1.568.2 diff --git a/RELEASES.md b/RELEASES.md index 2e99de4a..ecf8aff6 100644 --- a/RELEASES.md +++ b/RELEASES.md @@ -298,4 +298,14 @@ Based on: ### Generated - [python v1.9.8] . ### Releases -- [PyPI v1.9.8] https://pypi.org/project/mistralai/1.9.8 - . \ No newline at end of file +- [PyPI v1.9.8] https://pypi.org/project/mistralai/1.9.8 - . + +## 2025-08-26 17:34:05 +### Changes +Based on: +- OpenAPI Doc +- Speakeasy CLI 1.568.2 (2.634.2) https://github.com/speakeasy-api/speakeasy +### Generated +- [python v1.9.9] . +### Releases +- [PyPI v1.9.9] https://pypi.org/project/mistralai/1.9.9 - . \ No newline at end of file diff --git a/pyproject.toml b/pyproject.toml index f82f5bc5..c1d167e3 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "mistralai" -version = "1.9.8" +version = "1.9.9" description = "Python Client SDK for the Mistral AI API." authors = [{ name = "Mistral" },] readme = "README-PYPI.md" diff --git a/src/mistralai/_version.py b/src/mistralai/_version.py index d2236835..30310d6a 100644 --- a/src/mistralai/_version.py +++ b/src/mistralai/_version.py @@ -3,10 +3,10 @@ import importlib.metadata __title__: str = "mistralai" -__version__: str = "1.9.8" +__version__: str = "1.9.9" __openapi_doc_version__: str = "1.0.0" __gen_version__: str = "2.634.2" -__user_agent__: str = "speakeasy-sdk/python 1.9.8 2.634.2 1.0.0 mistralai" +__user_agent__: str = "speakeasy-sdk/python 1.9.9 2.634.2 1.0.0 mistralai" try: if __package__ is not None: From 3784b4788ce4dbbb0163b7c0ba4cf19273fc742e Mon Sep 17 00:00:00 2001 From: speakeasybot Date: Tue, 2 Sep 2025 07:03:54 +0000 Subject: [PATCH 154/223] ci: regenerated with OpenAPI Doc , Speakeasy CLI 1.568.2 --- .speakeasy/gen.lock | 10 ++++++--- .speakeasy/gen.yaml | 2 +- .speakeasy/workflow.lock | 12 +++++------ RELEASES.md | 12 ++++++++++- docs/models/apiendpoint.md | 19 ++++++++++------- docs/models/embeddingrequest.md | 3 ++- docs/models/encodingformat.md | 9 ++++++++ docs/models/systemmessagecontent.md | 4 ++-- docs/models/systemmessagecontentchunks.md | 17 +++++++++++++++ docs/sdks/embeddings/README.md | 1 + pyproject.toml | 2 +- src/mistralai/_version.py | 4 ++-- src/mistralai/embeddings.py | 6 ++++++ src/mistralai/models/__init__.py | 11 ++++++++++ src/mistralai/models/apiendpoint.py | 5 +++++ src/mistralai/models/embeddingrequest.py | 6 +++++- src/mistralai/models/encodingformat.py | 7 +++++++ src/mistralai/models/systemmessage.py | 10 ++++++--- .../models/systemmessagecontentchunks.py | 21 +++++++++++++++++++ 19 files changed, 133 insertions(+), 28 deletions(-) create mode 100644 docs/models/encodingformat.md create mode 100644 docs/models/systemmessagecontentchunks.md create mode 100644 src/mistralai/models/encodingformat.py create mode 100644 src/mistralai/models/systemmessagecontentchunks.py diff --git a/.speakeasy/gen.lock b/.speakeasy/gen.lock index 47f75811..d980c62f 100644 --- a/.speakeasy/gen.lock +++ b/.speakeasy/gen.lock @@ -1,12 +1,12 @@ lockVersion: 2.0.0 id: 2d045ec7-2ebb-4f4d-ad25-40953b132161 management: - docChecksum: a38b769b82bac0658cf02fed22bdbde0 + docChecksum: c3693e4872a0785b2ed46c59a8464804 docVersion: 1.0.0 speakeasyVersion: 1.568.2 generationVersion: 2.634.2 - releaseVersion: 1.9.9 - configChecksum: b08e017cb434c255d6ef2c20584815e8 + releaseVersion: 1.9.10 + configChecksum: 438b1373b4090838b050a2779ab5383f repoURL: https://github.com/mistralai/client-python.git installationURL: https://github.com/mistralai/client-python.git published: true @@ -199,6 +199,7 @@ generatedFiles: - docs/models/embeddingrequestinputs.md - docs/models/embeddingresponse.md - docs/models/embeddingresponsedata.md + - docs/models/encodingformat.md - docs/models/entitytype.md - docs/models/entries.md - docs/models/eventout.md @@ -379,6 +380,7 @@ generatedFiles: - docs/models/stop.md - docs/models/systemmessage.md - docs/models/systemmessagecontent.md + - docs/models/systemmessagecontentchunks.md - docs/models/textchunk.md - docs/models/textchunktype.md - docs/models/thinkchunk.md @@ -579,6 +581,7 @@ generatedFiles: - src/mistralai/models/embeddingrequest.py - src/mistralai/models/embeddingresponse.py - src/mistralai/models/embeddingresponsedata.py + - src/mistralai/models/encodingformat.py - src/mistralai/models/entitytype.py - src/mistralai/models/eventout.py - src/mistralai/models/file.py @@ -696,6 +699,7 @@ generatedFiles: - src/mistralai/models/source.py - src/mistralai/models/ssetypes.py - src/mistralai/models/systemmessage.py + - src/mistralai/models/systemmessagecontentchunks.py - src/mistralai/models/textchunk.py - src/mistralai/models/thinkchunk.py - src/mistralai/models/timestampgranularity.py diff --git a/.speakeasy/gen.yaml b/.speakeasy/gen.yaml index 9d66f396..1fb8877e 100644 --- a/.speakeasy/gen.yaml +++ b/.speakeasy/gen.yaml @@ -21,7 +21,7 @@ generation: generateNewTests: false skipResponseBodyAssertions: false python: - version: 1.9.9 + version: 1.9.10 additionalDependencies: dev: pytest: ^8.2.2 diff --git a/.speakeasy/workflow.lock b/.speakeasy/workflow.lock index 8bb5e897..03418bf8 100644 --- a/.speakeasy/workflow.lock +++ b/.speakeasy/workflow.lock @@ -14,11 +14,11 @@ sources: - latest mistral-openapi: sourceNamespace: mistral-openapi - sourceRevisionDigest: sha256:fcdf40207fa983a6ad7c0346f499d2b87ef2f1381a48e791a9a485bd40525ff8 - sourceBlobDigest: sha256:64fa97f0836a2441e32bc255ced698234c147c790775988102f17b493f0da87d + sourceRevisionDigest: sha256:eefc1f0b6a5e9ec673d317d61cad766290710b5fc369412491b75f732cccfedd + sourceBlobDigest: sha256:97767522559603de92a9738938e522cea4d558b2a854500acf6fe8d81f8ccfb8 tags: - latest - - speakeasy-sdk-regen-1756229662 + - speakeasy-sdk-regen-1756796562 targets: mistralai-azure-sdk: source: mistral-azure-source @@ -37,10 +37,10 @@ targets: mistralai-sdk: source: mistral-openapi sourceNamespace: mistral-openapi - sourceRevisionDigest: sha256:fcdf40207fa983a6ad7c0346f499d2b87ef2f1381a48e791a9a485bd40525ff8 - sourceBlobDigest: sha256:64fa97f0836a2441e32bc255ced698234c147c790775988102f17b493f0da87d + sourceRevisionDigest: sha256:eefc1f0b6a5e9ec673d317d61cad766290710b5fc369412491b75f732cccfedd + sourceBlobDigest: sha256:97767522559603de92a9738938e522cea4d558b2a854500acf6fe8d81f8ccfb8 codeSamplesNamespace: mistral-openapi-code-samples - codeSamplesRevisionDigest: sha256:39367b425e8a1e7376b8900711e0852f653aaab239f74cc658567fe05ad11784 + codeSamplesRevisionDigest: sha256:372d48b60fafc0c389b12af1dad61faa479598e8194dc3a2a1ed27c207ab9b18 workflow: workflowVersion: 1.0.0 speakeasyVersion: 1.568.2 diff --git a/RELEASES.md b/RELEASES.md index ecf8aff6..5a818523 100644 --- a/RELEASES.md +++ b/RELEASES.md @@ -308,4 +308,14 @@ Based on: ### Generated - [python v1.9.9] . ### Releases -- [PyPI v1.9.9] https://pypi.org/project/mistralai/1.9.9 - . \ No newline at end of file +- [PyPI v1.9.9] https://pypi.org/project/mistralai/1.9.9 - . + +## 2025-09-02 07:02:26 +### Changes +Based on: +- OpenAPI Doc +- Speakeasy CLI 1.568.2 (2.634.2) https://github.com/speakeasy-api/speakeasy +### Generated +- [python v1.9.10] . +### Releases +- [PyPI v1.9.10] https://pypi.org/project/mistralai/1.9.10 - . \ No newline at end of file diff --git a/docs/models/apiendpoint.md b/docs/models/apiendpoint.md index 700b932f..8d83a26f 100644 --- a/docs/models/apiendpoint.md +++ b/docs/models/apiendpoint.md @@ -3,10 +3,15 @@ ## Values -| Name | Value | -| -------------------------- | -------------------------- | -| `ROOT_V1_CHAT_COMPLETIONS` | /v1/chat/completions | -| `ROOT_V1_EMBEDDINGS` | /v1/embeddings | -| `ROOT_V1_FIM_COMPLETIONS` | /v1/fim/completions | -| `ROOT_V1_MODERATIONS` | /v1/moderations | -| `ROOT_V1_CHAT_MODERATIONS` | /v1/chat/moderations | \ No newline at end of file +| Name | Value | +| ------------------------------ | ------------------------------ | +| `ROOT_V1_CHAT_COMPLETIONS` | /v1/chat/completions | +| `ROOT_V1_EMBEDDINGS` | /v1/embeddings | +| `ROOT_V1_FIM_COMPLETIONS` | /v1/fim/completions | +| `ROOT_V1_MODERATIONS` | /v1/moderations | +| `ROOT_V1_CHAT_MODERATIONS` | /v1/chat/moderations | +| `ROOT_V1_OCR` | /v1/ocr | +| `ROOT_V1_CLASSIFICATIONS` | /v1/classifications | +| `ROOT_V1_CHAT_CLASSIFICATIONS` | /v1/chat/classifications | +| `ROOT_V1_CONVERSATIONS` | /v1/conversations | +| `ROOT_V1_AUDIO_TRANSCRIPTIONS` | /v1/audio/transcriptions | \ No newline at end of file diff --git a/docs/models/embeddingrequest.md b/docs/models/embeddingrequest.md index 3a778a6f..2f48099f 100644 --- a/docs/models/embeddingrequest.md +++ b/docs/models/embeddingrequest.md @@ -8,4 +8,5 @@ | `model` | *str* | :heavy_check_mark: | ID of the model to use. | mistral-embed | | `inputs` | [models.EmbeddingRequestInputs](../models/embeddingrequestinputs.md) | :heavy_check_mark: | Text to embed. | [
"Embed this sentence.",
"As well as this one."
] | | `output_dimension` | *OptionalNullable[int]* | :heavy_minus_sign: | The dimension of the output embeddings. | | -| `output_dtype` | [Optional[models.EmbeddingDtype]](../models/embeddingdtype.md) | :heavy_minus_sign: | N/A | | \ No newline at end of file +| `output_dtype` | [Optional[models.EmbeddingDtype]](../models/embeddingdtype.md) | :heavy_minus_sign: | N/A | | +| `encoding_format` | [Optional[models.EncodingFormat]](../models/encodingformat.md) | :heavy_minus_sign: | N/A | | \ No newline at end of file diff --git a/docs/models/encodingformat.md b/docs/models/encodingformat.md new file mode 100644 index 00000000..7d5941cf --- /dev/null +++ b/docs/models/encodingformat.md @@ -0,0 +1,9 @@ +# EncodingFormat + + +## Values + +| Name | Value | +| -------- | -------- | +| `FLOAT` | float | +| `BASE64` | base64 | \ No newline at end of file diff --git a/docs/models/systemmessagecontent.md b/docs/models/systemmessagecontent.md index e0d27d9f..0c87baf3 100644 --- a/docs/models/systemmessagecontent.md +++ b/docs/models/systemmessagecontent.md @@ -9,9 +9,9 @@ value: str = /* values here */ ``` -### `List[models.TextChunk]` +### `List[models.SystemMessageContentChunks]` ```python -value: List[models.TextChunk] = /* values here */ +value: List[models.SystemMessageContentChunks] = /* values here */ ``` diff --git a/docs/models/systemmessagecontentchunks.md b/docs/models/systemmessagecontentchunks.md new file mode 100644 index 00000000..40030c17 --- /dev/null +++ b/docs/models/systemmessagecontentchunks.md @@ -0,0 +1,17 @@ +# SystemMessageContentChunks + + +## Supported Types + +### `models.TextChunk` + +```python +value: models.TextChunk = /* values here */ +``` + +### `models.ThinkChunk` + +```python +value: models.ThinkChunk = /* values here */ +``` + diff --git a/docs/sdks/embeddings/README.md b/docs/sdks/embeddings/README.md index 91e33138..a145855f 100644 --- a/docs/sdks/embeddings/README.md +++ b/docs/sdks/embeddings/README.md @@ -42,6 +42,7 @@ with Mistral( | `inputs` | [models.EmbeddingRequestInputs](../../models/embeddingrequestinputs.md) | :heavy_check_mark: | Text to embed. | [
"Embed this sentence.",
"As well as this one."
] | | `output_dimension` | *OptionalNullable[int]* | :heavy_minus_sign: | The dimension of the output embeddings. | | | `output_dtype` | [Optional[models.EmbeddingDtype]](../../models/embeddingdtype.md) | :heavy_minus_sign: | N/A | | +| `encoding_format` | [Optional[models.EncodingFormat]](../../models/encodingformat.md) | :heavy_minus_sign: | N/A | | | `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | | ### Response diff --git a/pyproject.toml b/pyproject.toml index c1d167e3..47842d19 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "mistralai" -version = "1.9.9" +version = "1.9.10" description = "Python Client SDK for the Mistral AI API." authors = [{ name = "Mistral" },] readme = "README-PYPI.md" diff --git a/src/mistralai/_version.py b/src/mistralai/_version.py index 30310d6a..3fa6e7eb 100644 --- a/src/mistralai/_version.py +++ b/src/mistralai/_version.py @@ -3,10 +3,10 @@ import importlib.metadata __title__: str = "mistralai" -__version__: str = "1.9.9" +__version__: str = "1.9.10" __openapi_doc_version__: str = "1.0.0" __gen_version__: str = "2.634.2" -__user_agent__: str = "speakeasy-sdk/python 1.9.9 2.634.2 1.0.0 mistralai" +__user_agent__: str = "speakeasy-sdk/python 1.9.10 2.634.2 1.0.0 mistralai" try: if __package__ is not None: diff --git a/src/mistralai/embeddings.py b/src/mistralai/embeddings.py index ef0699d1..79309eea 100644 --- a/src/mistralai/embeddings.py +++ b/src/mistralai/embeddings.py @@ -20,6 +20,7 @@ def create( ], output_dimension: OptionalNullable[int] = UNSET, output_dtype: Optional[models.EmbeddingDtype] = None, + encoding_format: Optional[models.EncodingFormat] = None, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -33,6 +34,7 @@ def create( :param inputs: Text to embed. :param output_dimension: The dimension of the output embeddings. :param output_dtype: + :param encoding_format: :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -53,6 +55,7 @@ def create( inputs=inputs, output_dimension=output_dimension, output_dtype=output_dtype, + encoding_format=encoding_format, ) req = self._build_request( @@ -134,6 +137,7 @@ async def create_async( ], output_dimension: OptionalNullable[int] = UNSET, output_dtype: Optional[models.EmbeddingDtype] = None, + encoding_format: Optional[models.EncodingFormat] = None, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -147,6 +151,7 @@ async def create_async( :param inputs: Text to embed. :param output_dimension: The dimension of the output embeddings. :param output_dtype: + :param encoding_format: :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -167,6 +172,7 @@ async def create_async( inputs=inputs, output_dimension=output_dimension, output_dtype=output_dtype, + encoding_format=encoding_format, ) req = self._build_request_async( diff --git a/src/mistralai/models/__init__.py b/src/mistralai/models/__init__.py index d24492d6..9ca279f8 100644 --- a/src/mistralai/models/__init__.py +++ b/src/mistralai/models/__init__.py @@ -389,6 +389,7 @@ EmbeddingResponseData, EmbeddingResponseDataTypedDict, ) + from .encodingformat import EncodingFormat from .entitytype import EntityType from .eventout import EventOut, EventOutTypedDict from .file import File, FileTypedDict @@ -767,6 +768,10 @@ SystemMessageContentTypedDict, SystemMessageTypedDict, ) + from .systemmessagecontentchunks import ( + SystemMessageContentChunks, + SystemMessageContentChunksTypedDict, + ) from .textchunk import TextChunk, TextChunkType, TextChunkTypedDict from .thinkchunk import ( ThinkChunk, @@ -1156,6 +1161,7 @@ "EmbeddingResponseData", "EmbeddingResponseDataTypedDict", "EmbeddingResponseTypedDict", + "EncodingFormat", "EntityType", "Entries", "EntriesTypedDict", @@ -1472,6 +1478,8 @@ "StopTypedDict", "SystemMessage", "SystemMessageContent", + "SystemMessageContentChunks", + "SystemMessageContentChunksTypedDict", "SystemMessageContentTypedDict", "SystemMessageTypedDict", "TextChunk", @@ -1858,6 +1866,7 @@ "EmbeddingResponseTypedDict": ".embeddingresponse", "EmbeddingResponseData": ".embeddingresponsedata", "EmbeddingResponseDataTypedDict": ".embeddingresponsedata", + "EncodingFormat": ".encodingformat", "EntityType": ".entitytype", "EventOut": ".eventout", "EventOutTypedDict": ".eventout", @@ -2159,6 +2168,8 @@ "SystemMessageContent": ".systemmessage", "SystemMessageContentTypedDict": ".systemmessage", "SystemMessageTypedDict": ".systemmessage", + "SystemMessageContentChunks": ".systemmessagecontentchunks", + "SystemMessageContentChunksTypedDict": ".systemmessagecontentchunks", "TextChunk": ".textchunk", "TextChunkType": ".textchunk", "TextChunkTypedDict": ".textchunk", diff --git a/src/mistralai/models/apiendpoint.py b/src/mistralai/models/apiendpoint.py index a1b42e88..0ad9366f 100644 --- a/src/mistralai/models/apiendpoint.py +++ b/src/mistralai/models/apiendpoint.py @@ -12,6 +12,11 @@ "/v1/fim/completions", "/v1/moderations", "/v1/chat/moderations", + "/v1/ocr", + "/v1/classifications", + "/v1/chat/classifications", + "/v1/conversations", + "/v1/audio/transcriptions", ], UnrecognizedStr, ] diff --git a/src/mistralai/models/embeddingrequest.py b/src/mistralai/models/embeddingrequest.py index 4b5db550..685f27fd 100644 --- a/src/mistralai/models/embeddingrequest.py +++ b/src/mistralai/models/embeddingrequest.py @@ -2,6 +2,7 @@ from __future__ import annotations from .embeddingdtype import EmbeddingDtype +from .encodingformat import EncodingFormat from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL import pydantic from pydantic import model_serializer @@ -27,6 +28,7 @@ class EmbeddingRequestTypedDict(TypedDict): output_dimension: NotRequired[Nullable[int]] r"""The dimension of the output embeddings.""" output_dtype: NotRequired[EmbeddingDtype] + encoding_format: NotRequired[EncodingFormat] class EmbeddingRequest(BaseModel): @@ -41,9 +43,11 @@ class EmbeddingRequest(BaseModel): output_dtype: Optional[EmbeddingDtype] = None + encoding_format: Optional[EncodingFormat] = None + @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = ["output_dimension", "output_dtype"] + optional_fields = ["output_dimension", "output_dtype", "encoding_format"] nullable_fields = ["output_dimension"] null_default_fields = [] diff --git a/src/mistralai/models/encodingformat.py b/src/mistralai/models/encodingformat.py new file mode 100644 index 00000000..6c28a15a --- /dev/null +++ b/src/mistralai/models/encodingformat.py @@ -0,0 +1,7 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from typing import Literal + + +EncodingFormat = Literal["float", "base64"] diff --git a/src/mistralai/models/systemmessage.py b/src/mistralai/models/systemmessage.py index 7827ac4b..25b51f95 100644 --- a/src/mistralai/models/systemmessage.py +++ b/src/mistralai/models/systemmessage.py @@ -1,19 +1,23 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from __future__ import annotations -from .textchunk import TextChunk, TextChunkTypedDict +from .systemmessagecontentchunks import ( + SystemMessageContentChunks, + SystemMessageContentChunksTypedDict, +) from mistralai.types import BaseModel from typing import List, Literal, Optional, Union from typing_extensions import NotRequired, TypeAliasType, TypedDict SystemMessageContentTypedDict = TypeAliasType( - "SystemMessageContentTypedDict", Union[str, List[TextChunkTypedDict]] + "SystemMessageContentTypedDict", + Union[str, List[SystemMessageContentChunksTypedDict]], ) SystemMessageContent = TypeAliasType( - "SystemMessageContent", Union[str, List[TextChunk]] + "SystemMessageContent", Union[str, List[SystemMessageContentChunks]] ) diff --git a/src/mistralai/models/systemmessagecontentchunks.py b/src/mistralai/models/systemmessagecontentchunks.py new file mode 100644 index 00000000..a1f04d1e --- /dev/null +++ b/src/mistralai/models/systemmessagecontentchunks.py @@ -0,0 +1,21 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .textchunk import TextChunk, TextChunkTypedDict +from .thinkchunk import ThinkChunk, ThinkChunkTypedDict +from mistralai.utils import get_discriminator +from pydantic import Discriminator, Tag +from typing import Union +from typing_extensions import Annotated, TypeAliasType + + +SystemMessageContentChunksTypedDict = TypeAliasType( + "SystemMessageContentChunksTypedDict", + Union[TextChunkTypedDict, ThinkChunkTypedDict], +) + + +SystemMessageContentChunks = Annotated[ + Union[Annotated[TextChunk, Tag("text")], Annotated[ThinkChunk, Tag("thinking")]], + Discriminator(lambda m: get_discriminator(m, "type", "type")), +] From d985faa8611806861db95db5fbea93f3884d9fa5 Mon Sep 17 00:00:00 2001 From: Nicolas Faurie Date: Tue, 2 Sep 2025 09:09:24 +0200 Subject: [PATCH 155/223] Skip flaky remote MCP server example --- scripts/run_examples.sh | 1 + 1 file changed, 1 insertion(+) diff --git a/scripts/run_examples.sh b/scripts/run_examples.sh index 6b281092..3ef2f9e2 100755 --- a/scripts/run_examples.sh +++ b/scripts/run_examples.sh @@ -10,6 +10,7 @@ exclude_files=( "examples/mistral/mcp_servers/stdio_server.py" "examples/mistral/agents/async_conversation_run_stream.py" "examples/mistral/agents/async_conversation_run_mcp.py" + "examples/mistral/agents/async_conversation_run_mcp_remote.py" ) # Check if the first argument is "no-extra-dep" then remove all the files that require the extra dependencies From 824187dce44c39214a21f51c6ffdd0b78f4b9215 Mon Sep 17 00:00:00 2001 From: Alexandre Menasria <47357713+amenasria@users.noreply.github.com> Date: Thu, 2 Oct 2025 17:37:27 +0200 Subject: [PATCH 156/223] Update Speakeasy SDKs to version 1.606.10 (#275) Co-authored-by: GitHub Action --- .speakeasy/gen.lock | 17 +- .speakeasy/gen.yaml | 4 + .speakeasy/workflow.lock | 14 +- .speakeasy/workflow.yaml | 2 +- README.md | 85 ++- USAGE.md | 8 +- docs/sdks/accesses/README.md | 3 + docs/sdks/agents/README.md | 2 + docs/sdks/chat/README.md | 2 + docs/sdks/classifiers/README.md | 4 + docs/sdks/conversations/README.md | 10 + docs/sdks/documents/README.md | 10 + docs/sdks/embeddings/README.md | 1 + docs/sdks/files/README.md | 6 + docs/sdks/fim/README.md | 2 + docs/sdks/jobs/README.md | 5 + docs/sdks/libraries/README.md | 5 + docs/sdks/mistralagents/README.md | 5 + docs/sdks/mistraljobs/README.md | 4 + docs/sdks/models/README.md | 6 + docs/sdks/ocr/README.md | 1 + docs/sdks/transcriptions/README.md | 2 + poetry.toml | 1 + scripts/prepare_readme.py | 2 + scripts/publish.sh | 1 - src/mistralai/_version.py | 4 +- src/mistralai/accesses.py | 151 ++---- src/mistralai/agents.py | 97 +--- src/mistralai/audio.py | 11 +- src/mistralai/basesdk.py | 20 +- src/mistralai/batch.py | 9 +- src/mistralai/beta.py | 15 +- src/mistralai/chat.py | 97 +--- src/mistralai/classifiers.py | 201 ++----- src/mistralai/conversations.py | 495 +++++------------- src/mistralai/documents.py | 493 +++++------------ src/mistralai/embeddings.py | 51 +- src/mistralai/files.py | 223 ++------ src/mistralai/fim.py | 97 +--- src/mistralai/fine_tuning.py | 9 +- src/mistralai/jobs.py | 207 ++------ src/mistralai/libraries.py | 249 +++------ src/mistralai/mistral_agents.py | 251 +++------ src/mistralai/mistral_jobs.py | 169 ++---- src/mistralai/models/__init__.py | 28 +- src/mistralai/models/httpvalidationerror.py | 17 +- src/mistralai/models/mistralerror.py | 26 + src/mistralai/models/no_response_error.py | 13 + .../models/responsevalidationerror.py | 25 + src/mistralai/models/sdkerror.py | 44 +- src/mistralai/models_.py | 275 +++------- src/mistralai/ocr.py | 51 +- src/mistralai/sdk.py | 17 +- src/mistralai/transcriptions.py | 73 +-- src/mistralai/utils/__init__.py | 23 +- src/mistralai/utils/eventstreaming.py | 10 + src/mistralai/utils/serializers.py | 5 +- .../utils/unmarshal_json_response.py | 24 + 58 files changed, 1265 insertions(+), 2417 deletions(-) create mode 100644 src/mistralai/models/mistralerror.py create mode 100644 src/mistralai/models/no_response_error.py create mode 100644 src/mistralai/models/responsevalidationerror.py create mode 100644 src/mistralai/utils/unmarshal_json_response.py diff --git a/.speakeasy/gen.lock b/.speakeasy/gen.lock index d980c62f..dc029dc3 100644 --- a/.speakeasy/gen.lock +++ b/.speakeasy/gen.lock @@ -3,10 +3,10 @@ id: 2d045ec7-2ebb-4f4d-ad25-40953b132161 management: docChecksum: c3693e4872a0785b2ed46c59a8464804 docVersion: 1.0.0 - speakeasyVersion: 1.568.2 - generationVersion: 2.634.2 + speakeasyVersion: 1.606.10 + generationVersion: 2.687.13 releaseVersion: 1.9.10 - configChecksum: 438b1373b4090838b050a2779ab5383f + configChecksum: 928d8206dce080425b60348d566b2c4e repoURL: https://github.com/mistralai/client-python.git installationURL: https://github.com/mistralai/client-python.git published: true @@ -15,13 +15,13 @@ features: additionalDependencies: 1.0.0 additionalProperties: 1.0.1 constsAndDefaults: 1.0.5 - core: 5.19.3 + core: 5.20.1 customCodeRegions: 0.1.1 defaultEnabledRetries: 0.2.0 downloadStreams: 1.0.1 enumUnions: 0.1.0 envVarSecurityUsage: 0.3.2 - examples: 3.0.1 + examples: 3.0.2 flatRequests: 1.0.1 flattening: 3.1.1 globalSecurity: 3.0.3 @@ -36,14 +36,13 @@ features: responseFormat: 1.0.1 retries: 3.0.2 sdkHooks: 1.1.0 - serverEvents: 1.0.7 + serverEvents: 1.0.8 serverEventsSentinels: 0.1.0 serverIDs: 3.0.0 unions: 3.0.4 uploadStreams: 1.0.0 generatedFiles: - .gitattributes - - .python-version - .vscode/settings.json - USAGE.md - docs/models/agent.md @@ -665,12 +664,14 @@ generatedFiles: - src/mistralai/models/messageoutputentry.py - src/mistralai/models/messageoutputevent.py - src/mistralai/models/metricout.py + - src/mistralai/models/mistralerror.py - src/mistralai/models/mistralpromptmode.py - src/mistralai/models/modelcapabilities.py - src/mistralai/models/modelconversation.py - src/mistralai/models/modellist.py - src/mistralai/models/moderationobject.py - src/mistralai/models/moderationresponse.py + - src/mistralai/models/no_response_error.py - src/mistralai/models/ocrimageobject.py - src/mistralai/models/ocrpagedimensions.py - src/mistralai/models/ocrpageobject.py @@ -687,6 +688,7 @@ generatedFiles: - src/mistralai/models/responseformat.py - src/mistralai/models/responseformats.py - src/mistralai/models/responsestartedevent.py + - src/mistralai/models/responsevalidationerror.py - src/mistralai/models/retrieve_model_v1_models_model_id_getop.py - src/mistralai/models/retrievefileout.py - src/mistralai/models/sampletype.py @@ -756,6 +758,7 @@ generatedFiles: - src/mistralai/utils/retries.py - src/mistralai/utils/security.py - src/mistralai/utils/serializers.py + - src/mistralai/utils/unmarshal_json_response.py - src/mistralai/utils/url.py - src/mistralai/utils/values.py examples: diff --git a/.speakeasy/gen.yaml b/.speakeasy/gen.yaml index 1fb8877e..24506f89 100644 --- a/.speakeasy/gen.yaml +++ b/.speakeasy/gen.yaml @@ -26,6 +26,9 @@ python: dev: pytest: ^8.2.2 pytest-asyncio: ^0.23.7 + allowedRedefinedBuiltins: + - id + - object authors: - Mistral baseErrorName: MistralError @@ -53,6 +56,7 @@ python: methodArguments: infer-optional-args moduleName: "" outputModelSuffix: output + packageManager: poetry packageName: mistralai pytestFilterWarnings: [] pytestTimeout: 0 diff --git a/.speakeasy/workflow.lock b/.speakeasy/workflow.lock index 03418bf8..3f4b0fd9 100644 --- a/.speakeasy/workflow.lock +++ b/.speakeasy/workflow.lock @@ -1,4 +1,4 @@ -speakeasyVersion: 1.568.2 +speakeasyVersion: 1.606.10 sources: mistral-azure-source: sourceNamespace: mistral-openapi-azure @@ -18,7 +18,7 @@ sources: sourceBlobDigest: sha256:97767522559603de92a9738938e522cea4d558b2a854500acf6fe8d81f8ccfb8 tags: - latest - - speakeasy-sdk-regen-1756796562 + - main targets: mistralai-azure-sdk: source: mistral-azure-source @@ -40,20 +40,20 @@ targets: sourceRevisionDigest: sha256:eefc1f0b6a5e9ec673d317d61cad766290710b5fc369412491b75f732cccfedd sourceBlobDigest: sha256:97767522559603de92a9738938e522cea4d558b2a854500acf6fe8d81f8ccfb8 codeSamplesNamespace: mistral-openapi-code-samples - codeSamplesRevisionDigest: sha256:372d48b60fafc0c389b12af1dad61faa479598e8194dc3a2a1ed27c207ab9b18 + codeSamplesRevisionDigest: sha256:31dee4783f73e1efa932b16da7238ddfe29b94f4b1c66223d6e0f2393cdc6f72 workflow: workflowVersion: 1.0.0 - speakeasyVersion: 1.568.2 + speakeasyVersion: 1.606.10 sources: mistral-azure-source: inputs: - - location: registry.speakeasyapi.dev/mistral-dev/mistral-dev/mistral-openapi-azure:main + - location: registry.speakeasyapi.dev/mistral-dev/mistral-dev/mistral-openapi-azure:sha256:670c460702ec74f7077491464a6dc5ee9d873969c80e812c48dbf4deb160e470 mistral-google-cloud-source: inputs: - - location: registry.speakeasyapi.dev/mistral-dev/mistral-dev/mistral-openapi-google-cloud:main + - location: registry.speakeasyapi.dev/mistral-dev/mistral-dev/mistral-openapi-google-cloud:sha256:4a5343e63c6a78152e472b00ccc46d7bcb15594496bc94c8040039d3a9d4c5f8 mistral-openapi: inputs: - - location: registry.speakeasyapi.dev/mistral-dev/mistral-dev/mistral-openapi:main + - location: registry.speakeasyapi.dev/mistral-dev/mistral-dev/mistral-openapi:sha256:eefc1f0b6a5e9ec673d317d61cad766290710b5fc369412491b75f732cccfedd targets: mistralai-azure-sdk: target: python diff --git a/.speakeasy/workflow.yaml b/.speakeasy/workflow.yaml index fe32bb3f..3156d149 100644 --- a/.speakeasy/workflow.yaml +++ b/.speakeasy/workflow.yaml @@ -1,5 +1,5 @@ workflowVersion: 1.0.0 -speakeasyVersion: 1.568.2 +speakeasyVersion: 1.606.10 sources: mistral-azure-source: inputs: diff --git a/README.md b/README.md index f71ccfcb..65b0f7b0 100644 --- a/README.md +++ b/README.md @@ -58,7 +58,15 @@ Mistral AI API: Our Chat Completion and Embeddings APIs specification. Create yo > > Once a Python version reaches its [official end of life date](https://devguide.python.org/versions/), a 3-month grace period is provided for users to upgrade. Following this grace period, the minimum python version supported in the SDK will be updated. -The SDK can be installed with either *pip* or *poetry* package managers. +The SDK can be installed with *uv*, *pip*, or *poetry* package managers. + +### uv + +*uv* is a fast Python package installer and resolver, designed as a drop-in replacement for pip and pip-tools. It's recommended for its speed and modern Python tooling capabilities. + +```bash +uv add mistralai +``` ### PIP @@ -150,7 +158,7 @@ with Mistral(
-The same SDK client can also be used to make asychronous requests by importing asyncio. +The same SDK client can also be used to make asynchronous requests by importing asyncio. ```python # Asynchronous Example import asyncio @@ -201,7 +209,7 @@ with Mistral(
-The same SDK client can also be used to make asychronous requests by importing asyncio. +The same SDK client can also be used to make asynchronous requests by importing asyncio. ```python # Asynchronous Example import asyncio @@ -252,7 +260,7 @@ with Mistral(
-The same SDK client can also be used to make asychronous requests by importing asyncio. +The same SDK client can also be used to make asynchronous requests by importing asyncio. ```python # Asynchronous Example import asyncio @@ -303,7 +311,7 @@ with Mistral(
-The same SDK client can also be used to make asychronous requests by importing asyncio. +The same SDK client can also be used to make asynchronous requests by importing asyncio. ```python # Asynchronous Example import asyncio @@ -680,27 +688,20 @@ with Mistral( ## Error Handling -Handling errors in this SDK should largely match your expectations. All operations return a response object or raise an exception. +[`MistralError`](./src/mistralai/models/mistralerror.py) is the base class for all HTTP error responses. It has the following properties: -By default, an API error will raise a models.SDKError exception, which has the following properties: - -| Property | Type | Description | -|-----------------|------------------|-----------------------| -| `.status_code` | *int* | The HTTP status code | -| `.message` | *str* | The error message | -| `.raw_response` | *httpx.Response* | The raw HTTP response | -| `.body` | *str* | The response content | - -When custom error responses are specified for an operation, the SDK may also raise their associated exceptions. You can refer to respective *Errors* tables in SDK docs for more details on possible exception types for each operation. For example, the `list_async` method may raise the following exceptions: - -| Error Type | Status Code | Content Type | -| -------------------------- | ----------- | ---------------- | -| models.HTTPValidationError | 422 | application/json | -| models.SDKError | 4XX, 5XX | \*/\* | +| Property | Type | Description | +| ------------------ | ---------------- | --------------------------------------------------------------------------------------- | +| `err.message` | `str` | Error message | +| `err.status_code` | `int` | HTTP response status code eg `404` | +| `err.headers` | `httpx.Headers` | HTTP response headers | +| `err.body` | `str` | HTTP body. Can be empty string if no body is returned. | +| `err.raw_response` | `httpx.Response` | Raw HTTP response | +| `err.data` | | Optional. Some errors may contain structured data. [See Error Classes](#error-classes). | ### Example - ```python +import mistralai from mistralai import Mistral, models import os @@ -716,13 +717,41 @@ with Mistral( # Handle response print(res) - except models.HTTPValidationError as e: - # handle e.data: models.HTTPValidationErrorData - raise(e) - except models.SDKError as e: - # handle exception - raise(e) + + except models.MistralError as e: + # The base class for HTTP error responses + print(e.message) + print(e.status_code) + print(e.body) + print(e.headers) + print(e.raw_response) + + # Depending on the method different errors may be thrown + if isinstance(e, models.HTTPValidationError): + print(e.data.detail) # Optional[List[mistralai.ValidationError]] ``` + +### Error Classes +**Primary error:** +* [`MistralError`](./src/mistralai/models/mistralerror.py): The base class for HTTP error responses. + +

Less common errors (6) + +
+ +**Network errors:** +* [`httpx.RequestError`](https://www.python-httpx.org/exceptions/#httpx.RequestError): Base class for request errors. + * [`httpx.ConnectError`](https://www.python-httpx.org/exceptions/#httpx.ConnectError): HTTP client was unable to make a request to a server. + * [`httpx.TimeoutException`](https://www.python-httpx.org/exceptions/#httpx.TimeoutException): HTTP request timed out. + + +**Inherit from [`MistralError`](./src/mistralai/models/mistralerror.py)**: +* [`HTTPValidationError`](./src/mistralai/models/httpvalidationerror.py): Validation Error. Status code `422`. Applicable to 47 of 68 methods.* +* [`ResponseValidationError`](./src/mistralai/models/responsevalidationerror.py): Type mismatch between the response data and the expected Pydantic model. Provides access to the Pydantic validation error via the `cause` attribute. + +
+ +\* Check [the method documentation](#available-resources-and-operations) to see if the error is applicable. diff --git a/USAGE.md b/USAGE.md index d2bba404..b15a88aa 100644 --- a/USAGE.md +++ b/USAGE.md @@ -26,7 +26,7 @@ with Mistral(
-The same SDK client can also be used to make asychronous requests by importing asyncio. +The same SDK client can also be used to make asynchronous requests by importing asyncio. ```python # Asynchronous Example import asyncio @@ -77,7 +77,7 @@ with Mistral(
-The same SDK client can also be used to make asychronous requests by importing asyncio. +The same SDK client can also be used to make asynchronous requests by importing asyncio. ```python # Asynchronous Example import asyncio @@ -128,7 +128,7 @@ with Mistral(
-The same SDK client can also be used to make asychronous requests by importing asyncio. +The same SDK client can also be used to make asynchronous requests by importing asyncio. ```python # Asynchronous Example import asyncio @@ -179,7 +179,7 @@ with Mistral(
-The same SDK client can also be used to make asychronous requests by importing asyncio. +The same SDK client can also be used to make asynchronous requests by importing asyncio. ```python # Asynchronous Example import asyncio diff --git a/docs/sdks/accesses/README.md b/docs/sdks/accesses/README.md index c41b8454..f06cce76 100644 --- a/docs/sdks/accesses/README.md +++ b/docs/sdks/accesses/README.md @@ -17,6 +17,7 @@ Given a library, list all of the Entity that have access and to what level. ### Example Usage + ```python from mistralai import Mistral import os @@ -57,6 +58,7 @@ Given a library id, you can create or update the access level of an entity. You ### Example Usage + ```python from mistralai import Mistral import os @@ -101,6 +103,7 @@ Given a library id, you can delete the access level of an entity. An owner canno ### Example Usage + ```python from mistralai import Mistral import os diff --git a/docs/sdks/agents/README.md b/docs/sdks/agents/README.md index 6bab08dd..a4e8b22e 100644 --- a/docs/sdks/agents/README.md +++ b/docs/sdks/agents/README.md @@ -16,6 +16,7 @@ Agents Completion ### Example Usage + ```python from mistralai import Mistral import os @@ -75,6 +76,7 @@ Mistral AI provides the ability to stream responses back to a client in order to ### Example Usage + ```python from mistralai import Mistral import os diff --git a/docs/sdks/chat/README.md b/docs/sdks/chat/README.md index 7b467b58..c5c45e0f 100644 --- a/docs/sdks/chat/README.md +++ b/docs/sdks/chat/README.md @@ -16,6 +16,7 @@ Chat Completion ### Example Usage + ```python from mistralai import Mistral import os @@ -78,6 +79,7 @@ Mistral AI provides the ability to stream responses back to a client in order to ### Example Usage + ```python from mistralai import Mistral import os diff --git a/docs/sdks/classifiers/README.md b/docs/sdks/classifiers/README.md index 3f9d3a3c..87eb8d69 100644 --- a/docs/sdks/classifiers/README.md +++ b/docs/sdks/classifiers/README.md @@ -18,6 +18,7 @@ Moderations ### Example Usage + ```python from mistralai import Mistral import os @@ -62,6 +63,7 @@ Chat Moderations ### Example Usage + ```python from mistralai import Mistral import os @@ -108,6 +110,7 @@ Classifications ### Example Usage + ```python from mistralai import Mistral import os @@ -151,6 +154,7 @@ Chat Classifications ### Example Usage + ```python from mistralai import Mistral import os diff --git a/docs/sdks/conversations/README.md b/docs/sdks/conversations/README.md index 2eceb451..d3ce96c2 100644 --- a/docs/sdks/conversations/README.md +++ b/docs/sdks/conversations/README.md @@ -24,6 +24,7 @@ Create a new conversation, using a base model or an agent and append entries. Co ### Example Usage + ```python from mistralai import Mistral import os @@ -74,6 +75,7 @@ Retrieve a list of conversation entities sorted by creation time. ### Example Usage + ```python from mistralai import Mistral import os @@ -115,6 +117,7 @@ Given a conversation_id retrieve a conversation entity with its attributes. ### Example Usage + ```python from mistralai import Mistral import os @@ -155,6 +158,7 @@ Run completion on the history of the conversation and the user entries. Return t ### Example Usage + ```python from mistralai import Mistral import os @@ -200,6 +204,7 @@ Given a conversation_id retrieve all the entries belonging to that conversation. ### Example Usage + ```python from mistralai import Mistral import os @@ -240,6 +245,7 @@ Given a conversation_id retrieve all the messages belonging to that conversation ### Example Usage + ```python from mistralai import Mistral import os @@ -280,6 +286,7 @@ Given a conversation_id and an id, recreate a conversation from this point and r ### Example Usage + ```python from mistralai import Mistral import os @@ -326,6 +333,7 @@ Create a new conversation, using a base model or an agent and append entries. Co ### Example Usage + ```python from mistralai import Mistral import os @@ -385,6 +393,7 @@ Run completion on the history of the conversation and the user entries. Return t ### Example Usage + ```python from mistralai import Mistral import os @@ -432,6 +441,7 @@ Given a conversation_id and an id, recreate a conversation from this point and r ### Example Usage + ```python from mistralai import Mistral import os diff --git a/docs/sdks/documents/README.md b/docs/sdks/documents/README.md index 0b49c05c..71848b07 100644 --- a/docs/sdks/documents/README.md +++ b/docs/sdks/documents/README.md @@ -24,6 +24,7 @@ Given a library, lists the document that have been uploaded to that library. ### Example Usage + ```python from mistralai import Mistral import os @@ -69,6 +70,7 @@ Given a library, upload a new document to that library. It is queued for process ### Example Usage + ```python from mistralai import Mistral import os @@ -113,6 +115,7 @@ Given a library and a document in this library, you can retrieve the metadata of ### Example Usage + ```python from mistralai import Mistral import os @@ -154,6 +157,7 @@ Given a library and a document in that library, update the name of that document ### Example Usage + ```python from mistralai import Mistral import os @@ -196,6 +200,7 @@ Given a library and a document in that library, delete that document. The docume ### Example Usage + ```python from mistralai import Mistral import os @@ -232,6 +237,7 @@ Given a library and a document in that library, you can retrieve the text conten ### Example Usage + ```python from mistralai import Mistral import os @@ -273,6 +279,7 @@ Given a library and a document in that library, retrieve the processing status o ### Example Usage + ```python from mistralai import Mistral import os @@ -314,6 +321,7 @@ Given a library and a document in that library, retrieve the signed URL of a spe ### Example Usage + ```python from mistralai import Mistral import os @@ -355,6 +363,7 @@ Given a library and a document in that library, retrieve the signed URL of text ### Example Usage + ```python from mistralai import Mistral import os @@ -396,6 +405,7 @@ Given a library and a document in that library, reprocess that document, it will ### Example Usage + ```python from mistralai import Mistral import os diff --git a/docs/sdks/embeddings/README.md b/docs/sdks/embeddings/README.md index a145855f..9554e7b7 100644 --- a/docs/sdks/embeddings/README.md +++ b/docs/sdks/embeddings/README.md @@ -15,6 +15,7 @@ Embeddings ### Example Usage + ```python from mistralai import Mistral import os diff --git a/docs/sdks/files/README.md b/docs/sdks/files/README.md index bc39f2e4..e8d28c86 100644 --- a/docs/sdks/files/README.md +++ b/docs/sdks/files/README.md @@ -24,6 +24,7 @@ Please contact us if you need to increase these storage limits. ### Example Usage + ```python from mistralai import Mistral import os @@ -67,6 +68,7 @@ Returns a list of files that belong to the user's organization. ### Example Usage + ```python from mistralai import Mistral import os @@ -111,6 +113,7 @@ Returns information about a specific file. ### Example Usage + ```python from mistralai import Mistral import os @@ -150,6 +153,7 @@ Delete a file. ### Example Usage + ```python from mistralai import Mistral import os @@ -189,6 +193,7 @@ Download a file ### Example Usage + ```python from mistralai import Mistral import os @@ -228,6 +233,7 @@ Get Signed Url ### Example Usage + ```python from mistralai import Mistral import os diff --git a/docs/sdks/fim/README.md b/docs/sdks/fim/README.md index 10e6255d..cce1c070 100644 --- a/docs/sdks/fim/README.md +++ b/docs/sdks/fim/README.md @@ -16,6 +16,7 @@ FIM completion. ### Example Usage + ```python from mistralai import Mistral import os @@ -65,6 +66,7 @@ Mistral AI provides the ability to stream responses back to a client in order to ### Example Usage + ```python from mistralai import Mistral import os diff --git a/docs/sdks/jobs/README.md b/docs/sdks/jobs/README.md index 1e240c33..b06170f8 100644 --- a/docs/sdks/jobs/README.md +++ b/docs/sdks/jobs/README.md @@ -17,6 +17,7 @@ Get a list of fine-tuning jobs for your organization and user. ### Example Usage + ```python from mistralai import Mistral import os @@ -65,6 +66,7 @@ Create a new fine-tuning job, it will be queued for processing. ### Example Usage + ```python from mistralai import Mistral import os @@ -116,6 +118,7 @@ Get a fine-tuned job details by its UUID. ### Example Usage + ```python from mistralai import Mistral import os @@ -155,6 +158,7 @@ Request the cancellation of a fine tuning job. ### Example Usage + ```python from mistralai import Mistral import os @@ -194,6 +198,7 @@ Request the start of a validated fine tuning job. ### Example Usage + ```python from mistralai import Mistral import os diff --git a/docs/sdks/libraries/README.md b/docs/sdks/libraries/README.md index 4b441c85..14d39f97 100644 --- a/docs/sdks/libraries/README.md +++ b/docs/sdks/libraries/README.md @@ -19,6 +19,7 @@ List all libraries that you have created or have been shared with you. ### Example Usage + ```python from mistralai import Mistral import os @@ -57,6 +58,7 @@ Create a new Library, you will be marked as the owner and only you will have the ### Example Usage + ```python from mistralai import Mistral import os @@ -99,6 +101,7 @@ Given a library id, details information about that Library. ### Example Usage + ```python from mistralai import Mistral import os @@ -139,6 +142,7 @@ Given a library id, deletes it together with all documents that have been upload ### Example Usage + ```python from mistralai import Mistral import os @@ -179,6 +183,7 @@ Given a library id, you can update the name and description. ### Example Usage + ```python from mistralai import Mistral import os diff --git a/docs/sdks/mistralagents/README.md b/docs/sdks/mistralagents/README.md index 58082d21..44b7fcf2 100644 --- a/docs/sdks/mistralagents/README.md +++ b/docs/sdks/mistralagents/README.md @@ -19,6 +19,7 @@ Create a new agent giving it instructions, tools, description. The agent is then ### Example Usage + ```python from mistralai import Mistral import os @@ -65,6 +66,7 @@ Retrieve a list of agent entities sorted by creation time. ### Example Usage + ```python from mistralai import Mistral import os @@ -106,6 +108,7 @@ Given an agent retrieve an agent entity with its attributes. ### Example Usage + ```python from mistralai import Mistral import os @@ -146,6 +149,7 @@ Update an agent attributes and create a new version. ### Example Usage + ```python from mistralai import Mistral import os @@ -193,6 +197,7 @@ Switch the version of an agent. ### Example Usage + ```python from mistralai import Mistral import os diff --git a/docs/sdks/mistraljobs/README.md b/docs/sdks/mistraljobs/README.md index ef1e1549..0ef3f138 100644 --- a/docs/sdks/mistraljobs/README.md +++ b/docs/sdks/mistraljobs/README.md @@ -16,6 +16,7 @@ Get a list of batch jobs for your organization and user. ### Example Usage + ```python from mistralai import Mistral import os @@ -62,6 +63,7 @@ Create a new batch job, it will be queued for processing. ### Example Usage + ```python from mistralai import Mistral import os @@ -108,6 +110,7 @@ Get a batch job details by its UUID. ### Example Usage + ```python from mistralai import Mistral import os @@ -147,6 +150,7 @@ Request the cancellation of a batch job. ### Example Usage + ```python from mistralai import Mistral import os diff --git a/docs/sdks/models/README.md b/docs/sdks/models/README.md index 7dd5d1de..3877c545 100644 --- a/docs/sdks/models/README.md +++ b/docs/sdks/models/README.md @@ -20,6 +20,7 @@ List all models available to the user. ### Example Usage + ```python from mistralai import Mistral import os @@ -59,6 +60,7 @@ Retrieve information about a model. ### Example Usage + ```python from mistralai import Mistral import os @@ -99,6 +101,7 @@ Delete a fine-tuned model. ### Example Usage + ```python from mistralai import Mistral import os @@ -139,6 +142,7 @@ Update a model name or description. ### Example Usage + ```python from mistralai import Mistral import os @@ -180,6 +184,7 @@ Archive a fine-tuned model. ### Example Usage + ```python from mistralai import Mistral import os @@ -219,6 +224,7 @@ Un-archive a fine-tuned model. ### Example Usage + ```python from mistralai import Mistral import os diff --git a/docs/sdks/ocr/README.md b/docs/sdks/ocr/README.md index 2188f378..c0c1293e 100644 --- a/docs/sdks/ocr/README.md +++ b/docs/sdks/ocr/README.md @@ -15,6 +15,7 @@ OCR ### Example Usage + ```python from mistralai import Mistral import os diff --git a/docs/sdks/transcriptions/README.md b/docs/sdks/transcriptions/README.md index fcac2467..022066ac 100644 --- a/docs/sdks/transcriptions/README.md +++ b/docs/sdks/transcriptions/README.md @@ -16,6 +16,7 @@ Create Transcription ### Example Usage + ```python from mistralai import Mistral import os @@ -61,6 +62,7 @@ Create streaming transcription (SSE) ### Example Usage + ```python from mistralai import Mistral import os diff --git a/poetry.toml b/poetry.toml index ab1033bd..cd3492ac 100644 --- a/poetry.toml +++ b/poetry.toml @@ -1,2 +1,3 @@ + [virtualenvs] in-project = true diff --git a/scripts/prepare_readme.py b/scripts/prepare_readme.py index 16f6fc7e..1b0a56ec 100644 --- a/scripts/prepare_readme.py +++ b/scripts/prepare_readme.py @@ -10,12 +10,14 @@ GITHUB_URL = ( GITHUB_URL[: -len(".git")] if GITHUB_URL.endswith(".git") else GITHUB_URL ) + REPO_SUBDIR = "" # links on PyPI should have absolute URLs readme_contents = re.sub( r"(\[[^\]]+\]\()((?!https?:)[^\)]+)(\))", lambda m: m.group(1) + GITHUB_URL + "/blob/master/" + + REPO_SUBDIR + m.group(2) + m.group(3), readme_contents, diff --git a/scripts/publish.sh b/scripts/publish.sh index f2f2cf2c..2a3ead70 100755 --- a/scripts/publish.sh +++ b/scripts/publish.sh @@ -1,5 +1,4 @@ #!/usr/bin/env bash - export POETRY_PYPI_TOKEN_PYPI=${PYPI_TOKEN} poetry run python scripts/prepare_readme.py diff --git a/src/mistralai/_version.py b/src/mistralai/_version.py index 3fa6e7eb..6e366325 100644 --- a/src/mistralai/_version.py +++ b/src/mistralai/_version.py @@ -5,8 +5,8 @@ __title__: str = "mistralai" __version__: str = "1.9.10" __openapi_doc_version__: str = "1.0.0" -__gen_version__: str = "2.634.2" -__user_agent__: str = "speakeasy-sdk/python 1.9.10 2.634.2 1.0.0 mistralai" +__gen_version__: str = "2.687.13" +__user_agent__: str = "speakeasy-sdk/python 1.9.10 2.687.13 1.0.0 mistralai" try: if __package__ is not None: diff --git a/src/mistralai/accesses.py b/src/mistralai/accesses.py index f5f5b446..ea33517b 100644 --- a/src/mistralai/accesses.py +++ b/src/mistralai/accesses.py @@ -5,6 +5,7 @@ from mistralai._hooks import HookContext from mistralai.types import OptionalNullable, UNSET from mistralai.utils import get_security_from_env +from mistralai.utils.unmarshal_json_response import unmarshal_json_response from typing import Any, Mapping, Optional @@ -85,31 +86,20 @@ def list( response_data: Any = None if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, models.ListSharingOut) + return unmarshal_json_response(models.ListSharingOut, http_res) if utils.match_response(http_res, "422", "application/json"): - response_data = utils.unmarshal_json( - http_res.text, models.HTTPValidationErrorData + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(data=response_data) + raise models.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) - content_type = http_res.headers.get("Content-Type") - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise models.SDKError("Unexpected response received", http_res) async def list_async( self, @@ -185,31 +175,20 @@ async def list_async( response_data: Any = None if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, models.ListSharingOut) + return unmarshal_json_response(models.ListSharingOut, http_res) if utils.match_response(http_res, "422", "application/json"): - response_data = utils.unmarshal_json( - http_res.text, models.HTTPValidationErrorData + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(data=response_data) + raise models.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) - content_type = http_res.headers.get("Content-Type") - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise models.SDKError("Unexpected response received", http_res) def update_or_create( self, @@ -302,31 +281,20 @@ def update_or_create( response_data: Any = None if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, models.SharingOut) + return unmarshal_json_response(models.SharingOut, http_res) if utils.match_response(http_res, "422", "application/json"): - response_data = utils.unmarshal_json( - http_res.text, models.HTTPValidationErrorData + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(data=response_data) + raise models.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) - content_type = http_res.headers.get("Content-Type") - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise models.SDKError("Unexpected response received", http_res) async def update_or_create_async( self, @@ -419,31 +387,20 @@ async def update_or_create_async( response_data: Any = None if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, models.SharingOut) + return unmarshal_json_response(models.SharingOut, http_res) if utils.match_response(http_res, "422", "application/json"): - response_data = utils.unmarshal_json( - http_res.text, models.HTTPValidationErrorData + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(data=response_data) + raise models.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) - content_type = http_res.headers.get("Content-Type") - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise models.SDKError("Unexpected response received", http_res) def delete( self, @@ -533,31 +490,20 @@ def delete( response_data: Any = None if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, models.SharingOut) + return unmarshal_json_response(models.SharingOut, http_res) if utils.match_response(http_res, "422", "application/json"): - response_data = utils.unmarshal_json( - http_res.text, models.HTTPValidationErrorData + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(data=response_data) + raise models.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) - content_type = http_res.headers.get("Content-Type") - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise models.SDKError("Unexpected response received", http_res) async def delete_async( self, @@ -647,28 +593,17 @@ async def delete_async( response_data: Any = None if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, models.SharingOut) + return unmarshal_json_response(models.SharingOut, http_res) if utils.match_response(http_res, "422", "application/json"): - response_data = utils.unmarshal_json( - http_res.text, models.HTTPValidationErrorData + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(data=response_data) + raise models.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) - content_type = http_res.headers.get("Content-Type") - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise models.SDKError("Unexpected response received", http_res) diff --git a/src/mistralai/agents.py b/src/mistralai/agents.py index 48c06372..b220ca5b 100644 --- a/src/mistralai/agents.py +++ b/src/mistralai/agents.py @@ -5,6 +5,7 @@ from mistralai._hooks import HookContext from mistralai.types import OptionalNullable, UNSET from mistralai.utils import eventstreaming, get_security_from_env +from mistralai.utils.unmarshal_json_response import unmarshal_json_response from typing import Any, List, Mapping, Optional, Union @@ -155,31 +156,20 @@ def complete( response_data: Any = None if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, models.ChatCompletionResponse) + return unmarshal_json_response(models.ChatCompletionResponse, http_res) if utils.match_response(http_res, "422", "application/json"): - response_data = utils.unmarshal_json( - http_res.text, models.HTTPValidationErrorData + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(data=response_data) + raise models.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) - content_type = http_res.headers.get("Content-Type") - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise models.SDKError("Unexpected response received", http_res) async def complete_async( self, @@ -325,31 +315,20 @@ async def complete_async( response_data: Any = None if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, models.ChatCompletionResponse) + return unmarshal_json_response(models.ChatCompletionResponse, http_res) if utils.match_response(http_res, "422", "application/json"): - response_data = utils.unmarshal_json( - http_res.text, models.HTTPValidationErrorData + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(data=response_data) + raise models.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) - content_type = http_res.headers.get("Content-Type") - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise models.SDKError("Unexpected response received", http_res) def stream( self, @@ -502,32 +481,23 @@ def stream( http_res, lambda raw: utils.unmarshal_json(raw, models.CompletionEvent), sentinel="[DONE]", + client_ref=self, ) if utils.match_response(http_res, "422", "application/json"): http_res_text = utils.stream_to_text(http_res) - response_data = utils.unmarshal_json( - http_res_text, models.HTTPValidationErrorData + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res, http_res_text ) - raise models.HTTPValidationError(data=response_data) + raise models.HTTPValidationError(response_data, http_res, http_res_text) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) - content_type = http_res.headers.get("Content-Type") http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise models.SDKError("Unexpected response received", http_res, http_res_text) async def stream_async( self, @@ -680,29 +650,20 @@ async def stream_async( http_res, lambda raw: utils.unmarshal_json(raw, models.CompletionEvent), sentinel="[DONE]", + client_ref=self, ) if utils.match_response(http_res, "422", "application/json"): http_res_text = await utils.stream_to_text_async(http_res) - response_data = utils.unmarshal_json( - http_res_text, models.HTTPValidationErrorData + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res, http_res_text ) - raise models.HTTPValidationError(data=response_data) + raise models.HTTPValidationError(response_data, http_res, http_res_text) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) - content_type = http_res.headers.get("Content-Type") http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise models.SDKError("Unexpected response received", http_res, http_res_text) diff --git a/src/mistralai/audio.py b/src/mistralai/audio.py index 66934a86..5687abdb 100644 --- a/src/mistralai/audio.py +++ b/src/mistralai/audio.py @@ -3,16 +3,21 @@ from .basesdk import BaseSDK from .sdkconfiguration import SDKConfiguration from mistralai.transcriptions import Transcriptions +from typing import Optional class Audio(BaseSDK): transcriptions: Transcriptions r"""API for audio transcription.""" - def __init__(self, sdk_config: SDKConfiguration) -> None: - BaseSDK.__init__(self, sdk_config) + def __init__( + self, sdk_config: SDKConfiguration, parent_ref: Optional[object] = None + ) -> None: + BaseSDK.__init__(self, sdk_config, parent_ref=parent_ref) self.sdk_configuration = sdk_config self._init_sdks() def _init_sdks(self): - self.transcriptions = Transcriptions(self.sdk_configuration) + self.transcriptions = Transcriptions( + self.sdk_configuration, parent_ref=self.parent_ref + ) diff --git a/src/mistralai/basesdk.py b/src/mistralai/basesdk.py index bb37a1ee..6b62ddae 100644 --- a/src/mistralai/basesdk.py +++ b/src/mistralai/basesdk.py @@ -15,9 +15,19 @@ class BaseSDK: sdk_configuration: SDKConfiguration + parent_ref: Optional[object] = None + """ + Reference to the root SDK instance, if any. This will prevent it from + being garbage collected while there are active streams. + """ - def __init__(self, sdk_config: SDKConfiguration) -> None: + def __init__( + self, + sdk_config: SDKConfiguration, + parent_ref: Optional[object] = None, + ) -> None: self.sdk_configuration = sdk_config + self.parent_ref = parent_ref def _get_url(self, base_url, url_variables): sdk_url, sdk_variables = self.sdk_configuration.get_server_details() @@ -244,7 +254,7 @@ def do(): if http_res is None: logger.debug("Raising no response SDK error") - raise models.SDKError("No response received") + raise models.NoResponseError("No response received") logger.debug( "Response:\nStatus Code: %s\nURL: %s\nHeaders: %s\nBody: %s", @@ -265,7 +275,7 @@ def do(): http_res = result else: logger.debug("Raising unexpected SDK error") - raise models.SDKError("Unexpected error occurred") + raise models.SDKError("Unexpected error occurred", http_res) return http_res @@ -316,7 +326,7 @@ async def do(): if http_res is None: logger.debug("Raising no response SDK error") - raise models.SDKError("No response received") + raise models.NoResponseError("No response received") logger.debug( "Response:\nStatus Code: %s\nURL: %s\nHeaders: %s\nBody: %s", @@ -337,7 +347,7 @@ async def do(): http_res = result else: logger.debug("Raising unexpected SDK error") - raise models.SDKError("Unexpected error occurred") + raise models.SDKError("Unexpected error occurred", http_res) return http_res diff --git a/src/mistralai/batch.py b/src/mistralai/batch.py index bb59abda..7ed7ccef 100644 --- a/src/mistralai/batch.py +++ b/src/mistralai/batch.py @@ -3,15 +3,18 @@ from .basesdk import BaseSDK from .sdkconfiguration import SDKConfiguration from mistralai.mistral_jobs import MistralJobs +from typing import Optional class Batch(BaseSDK): jobs: MistralJobs - def __init__(self, sdk_config: SDKConfiguration) -> None: - BaseSDK.__init__(self, sdk_config) + def __init__( + self, sdk_config: SDKConfiguration, parent_ref: Optional[object] = None + ) -> None: + BaseSDK.__init__(self, sdk_config, parent_ref=parent_ref) self.sdk_configuration = sdk_config self._init_sdks() def _init_sdks(self): - self.jobs = MistralJobs(self.sdk_configuration) + self.jobs = MistralJobs(self.sdk_configuration, parent_ref=self.parent_ref) diff --git a/src/mistralai/beta.py b/src/mistralai/beta.py index 5201022e..4bbf1fa3 100644 --- a/src/mistralai/beta.py +++ b/src/mistralai/beta.py @@ -5,6 +5,7 @@ from mistralai.conversations import Conversations from mistralai.libraries import Libraries from mistralai.mistral_agents import MistralAgents +from typing import Optional class Beta(BaseSDK): @@ -15,12 +16,16 @@ class Beta(BaseSDK): libraries: Libraries r"""(beta) Libraries API to create and manage libraries - index your documents to enhance agent capabilities.""" - def __init__(self, sdk_config: SDKConfiguration) -> None: - BaseSDK.__init__(self, sdk_config) + def __init__( + self, sdk_config: SDKConfiguration, parent_ref: Optional[object] = None + ) -> None: + BaseSDK.__init__(self, sdk_config, parent_ref=parent_ref) self.sdk_configuration = sdk_config self._init_sdks() def _init_sdks(self): - self.conversations = Conversations(self.sdk_configuration) - self.agents = MistralAgents(self.sdk_configuration) - self.libraries = Libraries(self.sdk_configuration) + self.conversations = Conversations( + self.sdk_configuration, parent_ref=self.parent_ref + ) + self.agents = MistralAgents(self.sdk_configuration, parent_ref=self.parent_ref) + self.libraries = Libraries(self.sdk_configuration, parent_ref=self.parent_ref) diff --git a/src/mistralai/chat.py b/src/mistralai/chat.py index 1ed067e8..67777a1a 100644 --- a/src/mistralai/chat.py +++ b/src/mistralai/chat.py @@ -5,6 +5,7 @@ from mistralai._hooks import HookContext from mistralai.types import OptionalNullable, UNSET from mistralai.utils import eventstreaming, get_security_from_env +from mistralai.utils.unmarshal_json_response import unmarshal_json_response from typing import Any, List, Mapping, Optional, Union # region imports @@ -236,31 +237,20 @@ def complete( response_data: Any = None if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, models.ChatCompletionResponse) + return unmarshal_json_response(models.ChatCompletionResponse, http_res) if utils.match_response(http_res, "422", "application/json"): - response_data = utils.unmarshal_json( - http_res.text, models.HTTPValidationErrorData + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(data=response_data) + raise models.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) - content_type = http_res.headers.get("Content-Type") - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise models.SDKError("Unexpected response received", http_res) async def complete_async( self, @@ -405,31 +395,20 @@ async def complete_async( response_data: Any = None if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, models.ChatCompletionResponse) + return unmarshal_json_response(models.ChatCompletionResponse, http_res) if utils.match_response(http_res, "422", "application/json"): - response_data = utils.unmarshal_json( - http_res.text, models.HTTPValidationErrorData + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(data=response_data) + raise models.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) - content_type = http_res.headers.get("Content-Type") - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise models.SDKError("Unexpected response received", http_res) def stream( self, @@ -591,32 +570,23 @@ def stream( http_res, lambda raw: utils.unmarshal_json(raw, models.CompletionEvent), sentinel="[DONE]", + client_ref=self, ) if utils.match_response(http_res, "422", "application/json"): http_res_text = utils.stream_to_text(http_res) - response_data = utils.unmarshal_json( - http_res_text, models.HTTPValidationErrorData + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res, http_res_text ) - raise models.HTTPValidationError(data=response_data) + raise models.HTTPValidationError(response_data, http_res, http_res_text) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) - content_type = http_res.headers.get("Content-Type") http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise models.SDKError("Unexpected response received", http_res, http_res_text) async def stream_async( self, @@ -778,29 +748,20 @@ async def stream_async( http_res, lambda raw: utils.unmarshal_json(raw, models.CompletionEvent), sentinel="[DONE]", + client_ref=self, ) if utils.match_response(http_res, "422", "application/json"): http_res_text = await utils.stream_to_text_async(http_res) - response_data = utils.unmarshal_json( - http_res_text, models.HTTPValidationErrorData + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res, http_res_text ) - raise models.HTTPValidationError(data=response_data) + raise models.HTTPValidationError(response_data, http_res, http_res_text) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) - content_type = http_res.headers.get("Content-Type") http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise models.SDKError("Unexpected response received", http_res, http_res_text) diff --git a/src/mistralai/classifiers.py b/src/mistralai/classifiers.py index 6ebf7834..cd6a9415 100644 --- a/src/mistralai/classifiers.py +++ b/src/mistralai/classifiers.py @@ -5,6 +5,7 @@ from mistralai._hooks import HookContext from mistralai.types import OptionalNullable, UNSET from mistralai.utils import get_security_from_env +from mistralai.utils.unmarshal_json_response import unmarshal_json_response from typing import Any, Mapping, Optional, Union @@ -92,31 +93,20 @@ def moderate( response_data: Any = None if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, models.ModerationResponse) + return unmarshal_json_response(models.ModerationResponse, http_res) if utils.match_response(http_res, "422", "application/json"): - response_data = utils.unmarshal_json( - http_res.text, models.HTTPValidationErrorData + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(data=response_data) + raise models.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) - content_type = http_res.headers.get("Content-Type") - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise models.SDKError("Unexpected response received", http_res) async def moderate_async( self, @@ -199,31 +189,20 @@ async def moderate_async( response_data: Any = None if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, models.ModerationResponse) + return unmarshal_json_response(models.ModerationResponse, http_res) if utils.match_response(http_res, "422", "application/json"): - response_data = utils.unmarshal_json( - http_res.text, models.HTTPValidationErrorData + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(data=response_data) + raise models.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) - content_type = http_res.headers.get("Content-Type") - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise models.SDKError("Unexpected response received", http_res) def moderate_chat( self, @@ -306,31 +285,20 @@ def moderate_chat( response_data: Any = None if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, models.ModerationResponse) + return unmarshal_json_response(models.ModerationResponse, http_res) if utils.match_response(http_res, "422", "application/json"): - response_data = utils.unmarshal_json( - http_res.text, models.HTTPValidationErrorData + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(data=response_data) + raise models.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) - content_type = http_res.headers.get("Content-Type") - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise models.SDKError("Unexpected response received", http_res) async def moderate_chat_async( self, @@ -413,31 +381,20 @@ async def moderate_chat_async( response_data: Any = None if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, models.ModerationResponse) + return unmarshal_json_response(models.ModerationResponse, http_res) if utils.match_response(http_res, "422", "application/json"): - response_data = utils.unmarshal_json( - http_res.text, models.HTTPValidationErrorData + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(data=response_data) + raise models.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) - content_type = http_res.headers.get("Content-Type") - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise models.SDKError("Unexpected response received", http_res) def classify( self, @@ -520,31 +477,20 @@ def classify( response_data: Any = None if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, models.ClassificationResponse) + return unmarshal_json_response(models.ClassificationResponse, http_res) if utils.match_response(http_res, "422", "application/json"): - response_data = utils.unmarshal_json( - http_res.text, models.HTTPValidationErrorData + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(data=response_data) + raise models.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) - content_type = http_res.headers.get("Content-Type") - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise models.SDKError("Unexpected response received", http_res) async def classify_async( self, @@ -627,31 +573,20 @@ async def classify_async( response_data: Any = None if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, models.ClassificationResponse) + return unmarshal_json_response(models.ClassificationResponse, http_res) if utils.match_response(http_res, "422", "application/json"): - response_data = utils.unmarshal_json( - http_res.text, models.HTTPValidationErrorData + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(data=response_data) + raise models.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) - content_type = http_res.headers.get("Content-Type") - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise models.SDKError("Unexpected response received", http_res) def classify_chat( self, @@ -731,31 +666,20 @@ def classify_chat( response_data: Any = None if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, models.ClassificationResponse) + return unmarshal_json_response(models.ClassificationResponse, http_res) if utils.match_response(http_res, "422", "application/json"): - response_data = utils.unmarshal_json( - http_res.text, models.HTTPValidationErrorData + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(data=response_data) + raise models.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) - content_type = http_res.headers.get("Content-Type") - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise models.SDKError("Unexpected response received", http_res) async def classify_chat_async( self, @@ -835,28 +759,17 @@ async def classify_chat_async( response_data: Any = None if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, models.ClassificationResponse) + return unmarshal_json_response(models.ClassificationResponse, http_res) if utils.match_response(http_res, "422", "application/json"): - response_data = utils.unmarshal_json( - http_res.text, models.HTTPValidationErrorData + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(data=response_data) + raise models.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) - content_type = http_res.headers.get("Content-Type") - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise models.SDKError("Unexpected response received", http_res) diff --git a/src/mistralai/conversations.py b/src/mistralai/conversations.py index 12d690f8..27eddedf 100644 --- a/src/mistralai/conversations.py +++ b/src/mistralai/conversations.py @@ -5,6 +5,7 @@ from mistralai._hooks import HookContext from mistralai.types import OptionalNullable, UNSET from mistralai.utils import eventstreaming, get_security_from_env +from mistralai.utils.unmarshal_json_response import unmarshal_json_response from typing import Any, List, Mapping, Optional, Union # region imports @@ -328,31 +329,20 @@ def start( response_data: Any = None if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, models.ConversationResponse) + return unmarshal_json_response(models.ConversationResponse, http_res) if utils.match_response(http_res, "422", "application/json"): - response_data = utils.unmarshal_json( - http_res.text, models.HTTPValidationErrorData + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(data=response_data) + raise models.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) - content_type = http_res.headers.get("Content-Type") - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise models.SDKError("Unexpected response received", http_res) async def start_async( self, @@ -467,31 +457,20 @@ async def start_async( response_data: Any = None if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, models.ConversationResponse) + return unmarshal_json_response(models.ConversationResponse, http_res) if utils.match_response(http_res, "422", "application/json"): - response_data = utils.unmarshal_json( - http_res.text, models.HTTPValidationErrorData + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(data=response_data) + raise models.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) - content_type = http_res.headers.get("Content-Type") - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise models.SDKError("Unexpected response received", http_res) def list( self, @@ -570,31 +549,20 @@ def list( response_data: Any = None if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, List[models.ResponseBody]) + return unmarshal_json_response(List[models.ResponseBody], http_res) if utils.match_response(http_res, "422", "application/json"): - response_data = utils.unmarshal_json( - http_res.text, models.HTTPValidationErrorData + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(data=response_data) + raise models.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) - content_type = http_res.headers.get("Content-Type") - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise models.SDKError("Unexpected response received", http_res) async def list_async( self, @@ -673,31 +641,20 @@ async def list_async( response_data: Any = None if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, List[models.ResponseBody]) + return unmarshal_json_response(List[models.ResponseBody], http_res) if utils.match_response(http_res, "422", "application/json"): - response_data = utils.unmarshal_json( - http_res.text, models.HTTPValidationErrorData + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(data=response_data) + raise models.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) - content_type = http_res.headers.get("Content-Type") - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise models.SDKError("Unexpected response received", http_res) def get( self, @@ -773,34 +730,22 @@ def get( response_data: Any = None if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json( - http_res.text, - models.AgentsAPIV1ConversationsGetResponseV1ConversationsGet, + return unmarshal_json_response( + models.AgentsAPIV1ConversationsGetResponseV1ConversationsGet, http_res ) if utils.match_response(http_res, "422", "application/json"): - response_data = utils.unmarshal_json( - http_res.text, models.HTTPValidationErrorData + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(data=response_data) + raise models.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) - content_type = http_res.headers.get("Content-Type") - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise models.SDKError("Unexpected response received", http_res) async def get_async( self, @@ -876,34 +821,22 @@ async def get_async( response_data: Any = None if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json( - http_res.text, - models.AgentsAPIV1ConversationsGetResponseV1ConversationsGet, + return unmarshal_json_response( + models.AgentsAPIV1ConversationsGetResponseV1ConversationsGet, http_res ) if utils.match_response(http_res, "422", "application/json"): - response_data = utils.unmarshal_json( - http_res.text, models.HTTPValidationErrorData + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(data=response_data) + raise models.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) - content_type = http_res.headers.get("Content-Type") - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise models.SDKError("Unexpected response received", http_res) def append( self, @@ -1009,31 +942,20 @@ def append( response_data: Any = None if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, models.ConversationResponse) + return unmarshal_json_response(models.ConversationResponse, http_res) if utils.match_response(http_res, "422", "application/json"): - response_data = utils.unmarshal_json( - http_res.text, models.HTTPValidationErrorData + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(data=response_data) + raise models.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) - content_type = http_res.headers.get("Content-Type") - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise models.SDKError("Unexpected response received", http_res) async def append_async( self, @@ -1139,31 +1061,20 @@ async def append_async( response_data: Any = None if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, models.ConversationResponse) + return unmarshal_json_response(models.ConversationResponse, http_res) if utils.match_response(http_res, "422", "application/json"): - response_data = utils.unmarshal_json( - http_res.text, models.HTTPValidationErrorData + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(data=response_data) + raise models.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) - content_type = http_res.headers.get("Content-Type") - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise models.SDKError("Unexpected response received", http_res) def get_history( self, @@ -1239,31 +1150,20 @@ def get_history( response_data: Any = None if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, models.ConversationHistory) + return unmarshal_json_response(models.ConversationHistory, http_res) if utils.match_response(http_res, "422", "application/json"): - response_data = utils.unmarshal_json( - http_res.text, models.HTTPValidationErrorData + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(data=response_data) + raise models.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) - content_type = http_res.headers.get("Content-Type") - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise models.SDKError("Unexpected response received", http_res) async def get_history_async( self, @@ -1339,31 +1239,20 @@ async def get_history_async( response_data: Any = None if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, models.ConversationHistory) + return unmarshal_json_response(models.ConversationHistory, http_res) if utils.match_response(http_res, "422", "application/json"): - response_data = utils.unmarshal_json( - http_res.text, models.HTTPValidationErrorData + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(data=response_data) + raise models.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) - content_type = http_res.headers.get("Content-Type") - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise models.SDKError("Unexpected response received", http_res) def get_messages( self, @@ -1439,31 +1328,20 @@ def get_messages( response_data: Any = None if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, models.ConversationMessages) + return unmarshal_json_response(models.ConversationMessages, http_res) if utils.match_response(http_res, "422", "application/json"): - response_data = utils.unmarshal_json( - http_res.text, models.HTTPValidationErrorData + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(data=response_data) + raise models.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) - content_type = http_res.headers.get("Content-Type") - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise models.SDKError("Unexpected response received", http_res) async def get_messages_async( self, @@ -1539,31 +1417,20 @@ async def get_messages_async( response_data: Any = None if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, models.ConversationMessages) + return unmarshal_json_response(models.ConversationMessages, http_res) if utils.match_response(http_res, "422", "application/json"): - response_data = utils.unmarshal_json( - http_res.text, models.HTTPValidationErrorData + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(data=response_data) + raise models.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) - content_type = http_res.headers.get("Content-Type") - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise models.SDKError("Unexpected response received", http_res) def restart( self, @@ -1672,31 +1539,20 @@ def restart( response_data: Any = None if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, models.ConversationResponse) + return unmarshal_json_response(models.ConversationResponse, http_res) if utils.match_response(http_res, "422", "application/json"): - response_data = utils.unmarshal_json( - http_res.text, models.HTTPValidationErrorData + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(data=response_data) + raise models.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) - content_type = http_res.headers.get("Content-Type") - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise models.SDKError("Unexpected response received", http_res) async def restart_async( self, @@ -1805,31 +1661,20 @@ async def restart_async( response_data: Any = None if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, models.ConversationResponse) + return unmarshal_json_response(models.ConversationResponse, http_res) if utils.match_response(http_res, "422", "application/json"): - response_data = utils.unmarshal_json( - http_res.text, models.HTTPValidationErrorData + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(data=response_data) + raise models.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) - content_type = http_res.headers.get("Content-Type") - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise models.SDKError("Unexpected response received", http_res) def start_stream( self, @@ -1955,32 +1800,23 @@ def start_stream( return eventstreaming.EventStream( http_res, lambda raw: utils.unmarshal_json(raw, models.ConversationEvents), + client_ref=self, ) if utils.match_response(http_res, "422", "application/json"): http_res_text = utils.stream_to_text(http_res) - response_data = utils.unmarshal_json( - http_res_text, models.HTTPValidationErrorData + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res, http_res_text ) - raise models.HTTPValidationError(data=response_data) + raise models.HTTPValidationError(response_data, http_res, http_res_text) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) - content_type = http_res.headers.get("Content-Type") http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise models.SDKError("Unexpected response received", http_res, http_res_text) async def start_stream_async( self, @@ -2106,32 +1942,23 @@ async def start_stream_async( return eventstreaming.EventStreamAsync( http_res, lambda raw: utils.unmarshal_json(raw, models.ConversationEvents), + client_ref=self, ) if utils.match_response(http_res, "422", "application/json"): http_res_text = await utils.stream_to_text_async(http_res) - response_data = utils.unmarshal_json( - http_res_text, models.HTTPValidationErrorData + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res, http_res_text ) - raise models.HTTPValidationError(data=response_data) + raise models.HTTPValidationError(response_data, http_res, http_res_text) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) - content_type = http_res.headers.get("Content-Type") http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise models.SDKError("Unexpected response received", http_res, http_res_text) def append_stream( self, @@ -2241,32 +2068,23 @@ def append_stream( return eventstreaming.EventStream( http_res, lambda raw: utils.unmarshal_json(raw, models.ConversationEvents), + client_ref=self, ) if utils.match_response(http_res, "422", "application/json"): http_res_text = utils.stream_to_text(http_res) - response_data = utils.unmarshal_json( - http_res_text, models.HTTPValidationErrorData + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res, http_res_text ) - raise models.HTTPValidationError(data=response_data) + raise models.HTTPValidationError(response_data, http_res, http_res_text) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) - content_type = http_res.headers.get("Content-Type") http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise models.SDKError("Unexpected response received", http_res, http_res_text) async def append_stream_async( self, @@ -2376,32 +2194,23 @@ async def append_stream_async( return eventstreaming.EventStreamAsync( http_res, lambda raw: utils.unmarshal_json(raw, models.ConversationEvents), + client_ref=self, ) if utils.match_response(http_res, "422", "application/json"): http_res_text = await utils.stream_to_text_async(http_res) - response_data = utils.unmarshal_json( - http_res_text, models.HTTPValidationErrorData + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res, http_res_text ) - raise models.HTTPValidationError(data=response_data) + raise models.HTTPValidationError(response_data, http_res, http_res_text) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) - content_type = http_res.headers.get("Content-Type") http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise models.SDKError("Unexpected response received", http_res, http_res_text) def restart_stream( self, @@ -2514,32 +2323,23 @@ def restart_stream( return eventstreaming.EventStream( http_res, lambda raw: utils.unmarshal_json(raw, models.ConversationEvents), + client_ref=self, ) if utils.match_response(http_res, "422", "application/json"): http_res_text = utils.stream_to_text(http_res) - response_data = utils.unmarshal_json( - http_res_text, models.HTTPValidationErrorData + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res, http_res_text ) - raise models.HTTPValidationError(data=response_data) + raise models.HTTPValidationError(response_data, http_res, http_res_text) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) - content_type = http_res.headers.get("Content-Type") http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise models.SDKError("Unexpected response received", http_res, http_res_text) async def restart_stream_async( self, @@ -2652,29 +2452,20 @@ async def restart_stream_async( return eventstreaming.EventStreamAsync( http_res, lambda raw: utils.unmarshal_json(raw, models.ConversationEvents), + client_ref=self, ) if utils.match_response(http_res, "422", "application/json"): http_res_text = await utils.stream_to_text_async(http_res) - response_data = utils.unmarshal_json( - http_res_text, models.HTTPValidationErrorData + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res, http_res_text ) - raise models.HTTPValidationError(data=response_data) + raise models.HTTPValidationError(response_data, http_res, http_res_text) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) - content_type = http_res.headers.get("Content-Type") http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise models.SDKError("Unexpected response received", http_res, http_res_text) diff --git a/src/mistralai/documents.py b/src/mistralai/documents.py index c28758d2..5f8c6b9f 100644 --- a/src/mistralai/documents.py +++ b/src/mistralai/documents.py @@ -5,6 +5,7 @@ from mistralai._hooks import HookContext from mistralai.types import OptionalNullable, UNSET from mistralai.utils import get_security_from_env +from mistralai.utils.unmarshal_json_response import unmarshal_json_response from typing import Any, Mapping, Optional, Union @@ -100,31 +101,20 @@ def list( response_data: Any = None if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, models.ListDocumentOut) + return unmarshal_json_response(models.ListDocumentOut, http_res) if utils.match_response(http_res, "422", "application/json"): - response_data = utils.unmarshal_json( - http_res.text, models.HTTPValidationErrorData + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(data=response_data) + raise models.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) - content_type = http_res.headers.get("Content-Type") - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise models.SDKError("Unexpected response received", http_res) async def list_async( self, @@ -215,31 +205,20 @@ async def list_async( response_data: Any = None if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, models.ListDocumentOut) + return unmarshal_json_response(models.ListDocumentOut, http_res) if utils.match_response(http_res, "422", "application/json"): - response_data = utils.unmarshal_json( - http_res.text, models.HTTPValidationErrorData + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(data=response_data) + raise models.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) - content_type = http_res.headers.get("Content-Type") - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise models.SDKError("Unexpected response received", http_res) def upload( self, @@ -327,31 +306,20 @@ def upload( response_data: Any = None if utils.match_response(http_res, ["200", "201"], "application/json"): - return utils.unmarshal_json(http_res.text, models.DocumentOut) + return unmarshal_json_response(models.DocumentOut, http_res) if utils.match_response(http_res, "422", "application/json"): - response_data = utils.unmarshal_json( - http_res.text, models.HTTPValidationErrorData + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(data=response_data) + raise models.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) - content_type = http_res.headers.get("Content-Type") - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise models.SDKError("Unexpected response received", http_res) async def upload_async( self, @@ -439,31 +407,20 @@ async def upload_async( response_data: Any = None if utils.match_response(http_res, ["200", "201"], "application/json"): - return utils.unmarshal_json(http_res.text, models.DocumentOut) + return unmarshal_json_response(models.DocumentOut, http_res) if utils.match_response(http_res, "422", "application/json"): - response_data = utils.unmarshal_json( - http_res.text, models.HTTPValidationErrorData + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(data=response_data) + raise models.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) - content_type = http_res.headers.get("Content-Type") - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise models.SDKError("Unexpected response received", http_res) def get( self, @@ -542,31 +499,20 @@ def get( response_data: Any = None if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, models.DocumentOut) + return unmarshal_json_response(models.DocumentOut, http_res) if utils.match_response(http_res, "422", "application/json"): - response_data = utils.unmarshal_json( - http_res.text, models.HTTPValidationErrorData + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(data=response_data) + raise models.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) - content_type = http_res.headers.get("Content-Type") - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise models.SDKError("Unexpected response received", http_res) async def get_async( self, @@ -645,31 +591,20 @@ async def get_async( response_data: Any = None if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, models.DocumentOut) + return unmarshal_json_response(models.DocumentOut, http_res) if utils.match_response(http_res, "422", "application/json"): - response_data = utils.unmarshal_json( - http_res.text, models.HTTPValidationErrorData + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(data=response_data) + raise models.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) - content_type = http_res.headers.get("Content-Type") - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise models.SDKError("Unexpected response received", http_res) def update( self, @@ -760,31 +695,20 @@ def update( response_data: Any = None if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, models.DocumentOut) + return unmarshal_json_response(models.DocumentOut, http_res) if utils.match_response(http_res, "422", "application/json"): - response_data = utils.unmarshal_json( - http_res.text, models.HTTPValidationErrorData + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(data=response_data) + raise models.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) - content_type = http_res.headers.get("Content-Type") - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise models.SDKError("Unexpected response received", http_res) async def update_async( self, @@ -875,31 +799,20 @@ async def update_async( response_data: Any = None if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, models.DocumentOut) + return unmarshal_json_response(models.DocumentOut, http_res) if utils.match_response(http_res, "422", "application/json"): - response_data = utils.unmarshal_json( - http_res.text, models.HTTPValidationErrorData + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(data=response_data) + raise models.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) - content_type = http_res.headers.get("Content-Type") - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise models.SDKError("Unexpected response received", http_res) def delete( self, @@ -980,29 +893,18 @@ def delete( if utils.match_response(http_res, "204", "*"): return if utils.match_response(http_res, "422", "application/json"): - response_data = utils.unmarshal_json( - http_res.text, models.HTTPValidationErrorData + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(data=response_data) + raise models.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) - content_type = http_res.headers.get("Content-Type") - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise models.SDKError("Unexpected response received", http_res) async def delete_async( self, @@ -1083,29 +985,18 @@ async def delete_async( if utils.match_response(http_res, "204", "*"): return if utils.match_response(http_res, "422", "application/json"): - response_data = utils.unmarshal_json( - http_res.text, models.HTTPValidationErrorData + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(data=response_data) + raise models.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) - content_type = http_res.headers.get("Content-Type") - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise models.SDKError("Unexpected response received", http_res) def text_content( self, @@ -1184,31 +1075,20 @@ def text_content( response_data: Any = None if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, models.DocumentTextContent) + return unmarshal_json_response(models.DocumentTextContent, http_res) if utils.match_response(http_res, "422", "application/json"): - response_data = utils.unmarshal_json( - http_res.text, models.HTTPValidationErrorData + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(data=response_data) + raise models.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) - content_type = http_res.headers.get("Content-Type") - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise models.SDKError("Unexpected response received", http_res) async def text_content_async( self, @@ -1287,31 +1167,20 @@ async def text_content_async( response_data: Any = None if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, models.DocumentTextContent) + return unmarshal_json_response(models.DocumentTextContent, http_res) if utils.match_response(http_res, "422", "application/json"): - response_data = utils.unmarshal_json( - http_res.text, models.HTTPValidationErrorData + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(data=response_data) + raise models.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) - content_type = http_res.headers.get("Content-Type") - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise models.SDKError("Unexpected response received", http_res) def status( self, @@ -1390,31 +1259,20 @@ def status( response_data: Any = None if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, models.ProcessingStatusOut) + return unmarshal_json_response(models.ProcessingStatusOut, http_res) if utils.match_response(http_res, "422", "application/json"): - response_data = utils.unmarshal_json( - http_res.text, models.HTTPValidationErrorData + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(data=response_data) + raise models.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) - content_type = http_res.headers.get("Content-Type") - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise models.SDKError("Unexpected response received", http_res) async def status_async( self, @@ -1493,31 +1351,20 @@ async def status_async( response_data: Any = None if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, models.ProcessingStatusOut) + return unmarshal_json_response(models.ProcessingStatusOut, http_res) if utils.match_response(http_res, "422", "application/json"): - response_data = utils.unmarshal_json( - http_res.text, models.HTTPValidationErrorData + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(data=response_data) + raise models.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) - content_type = http_res.headers.get("Content-Type") - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise models.SDKError("Unexpected response received", http_res) def get_signed_url( self, @@ -1596,31 +1443,20 @@ def get_signed_url( response_data: Any = None if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, str) + return unmarshal_json_response(str, http_res) if utils.match_response(http_res, "422", "application/json"): - response_data = utils.unmarshal_json( - http_res.text, models.HTTPValidationErrorData + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(data=response_data) + raise models.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) - content_type = http_res.headers.get("Content-Type") - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise models.SDKError("Unexpected response received", http_res) async def get_signed_url_async( self, @@ -1699,31 +1535,20 @@ async def get_signed_url_async( response_data: Any = None if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, str) + return unmarshal_json_response(str, http_res) if utils.match_response(http_res, "422", "application/json"): - response_data = utils.unmarshal_json( - http_res.text, models.HTTPValidationErrorData + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(data=response_data) + raise models.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) - content_type = http_res.headers.get("Content-Type") - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise models.SDKError("Unexpected response received", http_res) def extracted_text_signed_url( self, @@ -1802,31 +1627,20 @@ def extracted_text_signed_url( response_data: Any = None if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, str) + return unmarshal_json_response(str, http_res) if utils.match_response(http_res, "422", "application/json"): - response_data = utils.unmarshal_json( - http_res.text, models.HTTPValidationErrorData + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(data=response_data) + raise models.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) - content_type = http_res.headers.get("Content-Type") - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise models.SDKError("Unexpected response received", http_res) async def extracted_text_signed_url_async( self, @@ -1905,31 +1719,20 @@ async def extracted_text_signed_url_async( response_data: Any = None if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, str) + return unmarshal_json_response(str, http_res) if utils.match_response(http_res, "422", "application/json"): - response_data = utils.unmarshal_json( - http_res.text, models.HTTPValidationErrorData + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(data=response_data) + raise models.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) - content_type = http_res.headers.get("Content-Type") - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise models.SDKError("Unexpected response received", http_res) def reprocess( self, @@ -2010,29 +1813,18 @@ def reprocess( if utils.match_response(http_res, "204", "*"): return if utils.match_response(http_res, "422", "application/json"): - response_data = utils.unmarshal_json( - http_res.text, models.HTTPValidationErrorData + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(data=response_data) + raise models.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) - content_type = http_res.headers.get("Content-Type") - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise models.SDKError("Unexpected response received", http_res) async def reprocess_async( self, @@ -2113,26 +1905,15 @@ async def reprocess_async( if utils.match_response(http_res, "204", "*"): return if utils.match_response(http_res, "422", "application/json"): - response_data = utils.unmarshal_json( - http_res.text, models.HTTPValidationErrorData + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(data=response_data) + raise models.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) - content_type = http_res.headers.get("Content-Type") - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise models.SDKError("Unexpected response received", http_res) diff --git a/src/mistralai/embeddings.py b/src/mistralai/embeddings.py index 79309eea..1822a1ec 100644 --- a/src/mistralai/embeddings.py +++ b/src/mistralai/embeddings.py @@ -5,6 +5,7 @@ from mistralai._hooks import HookContext from mistralai.types import OptionalNullable, UNSET from mistralai.utils import get_security_from_env +from mistralai.utils.unmarshal_json_response import unmarshal_json_response from typing import Any, Mapping, Optional, Union @@ -102,31 +103,20 @@ def create( response_data: Any = None if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, models.EmbeddingResponse) + return unmarshal_json_response(models.EmbeddingResponse, http_res) if utils.match_response(http_res, "422", "application/json"): - response_data = utils.unmarshal_json( - http_res.text, models.HTTPValidationErrorData + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(data=response_data) + raise models.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) - content_type = http_res.headers.get("Content-Type") - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise models.SDKError("Unexpected response received", http_res) async def create_async( self, @@ -219,28 +209,17 @@ async def create_async( response_data: Any = None if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, models.EmbeddingResponse) + return unmarshal_json_response(models.EmbeddingResponse, http_res) if utils.match_response(http_res, "422", "application/json"): - response_data = utils.unmarshal_json( - http_res.text, models.HTTPValidationErrorData + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(data=response_data) + raise models.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) - content_type = http_res.headers.get("Content-Type") - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise models.SDKError("Unexpected response received", http_res) diff --git a/src/mistralai/files.py b/src/mistralai/files.py index 05739eeb..c6e438af 100644 --- a/src/mistralai/files.py +++ b/src/mistralai/files.py @@ -6,6 +6,7 @@ from mistralai._hooks import HookContext from mistralai.types import OptionalNullable, UNSET from mistralai.utils import get_security_from_env +from mistralai.utils.unmarshal_json_response import unmarshal_json_response from typing import List, Mapping, Optional, Union @@ -99,26 +100,15 @@ def upload( ) if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, models.UploadFileOut) + return unmarshal_json_response(models.UploadFileOut, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) - content_type = http_res.headers.get("Content-Type") - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise models.SDKError("Unexpected response received", http_res) async def upload_async( self, @@ -207,26 +197,15 @@ async def upload_async( ) if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, models.UploadFileOut) + return unmarshal_json_response(models.UploadFileOut, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) - content_type = http_res.headers.get("Content-Type") - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise models.SDKError("Unexpected response received", http_res) def list( self, @@ -316,26 +295,15 @@ def list( ) if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, models.ListFilesOut) + return unmarshal_json_response(models.ListFilesOut, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) - content_type = http_res.headers.get("Content-Type") - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise models.SDKError("Unexpected response received", http_res) async def list_async( self, @@ -425,26 +393,15 @@ async def list_async( ) if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, models.ListFilesOut) + return unmarshal_json_response(models.ListFilesOut, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) - content_type = http_res.headers.get("Content-Type") - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise models.SDKError("Unexpected response received", http_res) def retrieve( self, @@ -519,26 +476,15 @@ def retrieve( ) if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, models.RetrieveFileOut) + return unmarshal_json_response(models.RetrieveFileOut, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) - content_type = http_res.headers.get("Content-Type") - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise models.SDKError("Unexpected response received", http_res) async def retrieve_async( self, @@ -613,26 +559,15 @@ async def retrieve_async( ) if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, models.RetrieveFileOut) + return unmarshal_json_response(models.RetrieveFileOut, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) - content_type = http_res.headers.get("Content-Type") - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise models.SDKError("Unexpected response received", http_res) def delete( self, @@ -707,26 +642,15 @@ def delete( ) if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, models.DeleteFileOut) + return unmarshal_json_response(models.DeleteFileOut, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) - content_type = http_res.headers.get("Content-Type") - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise models.SDKError("Unexpected response received", http_res) async def delete_async( self, @@ -801,26 +725,15 @@ async def delete_async( ) if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, models.DeleteFileOut) + return unmarshal_json_response(models.DeleteFileOut, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) - content_type = http_res.headers.get("Content-Type") - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise models.SDKError("Unexpected response received", http_res) def download( self, @@ -899,23 +812,13 @@ def download( return http_res if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) - content_type = http_res.headers.get("Content-Type") http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise models.SDKError("Unexpected response received", http_res, http_res_text) async def download_async( self, @@ -994,23 +897,13 @@ async def download_async( return http_res if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) - content_type = http_res.headers.get("Content-Type") http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise models.SDKError("Unexpected response received", http_res, http_res_text) def get_signed_url( self, @@ -1086,26 +979,15 @@ def get_signed_url( ) if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, models.FileSignedURL) + return unmarshal_json_response(models.FileSignedURL, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) - content_type = http_res.headers.get("Content-Type") - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise models.SDKError("Unexpected response received", http_res) async def get_signed_url_async( self, @@ -1181,23 +1063,12 @@ async def get_signed_url_async( ) if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, models.FileSignedURL) + return unmarshal_json_response(models.FileSignedURL, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) - content_type = http_res.headers.get("Content-Type") - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise models.SDKError("Unexpected response received", http_res) diff --git a/src/mistralai/fim.py b/src/mistralai/fim.py index c57bc68e..fa7b15c2 100644 --- a/src/mistralai/fim.py +++ b/src/mistralai/fim.py @@ -5,6 +5,7 @@ from mistralai._hooks import HookContext from mistralai.types import OptionalNullable, UNSET from mistralai.utils import eventstreaming, get_security_from_env +from mistralai.utils.unmarshal_json_response import unmarshal_json_response from typing import Any, Mapping, Optional, Union @@ -120,31 +121,20 @@ def complete( response_data: Any = None if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, models.FIMCompletionResponse) + return unmarshal_json_response(models.FIMCompletionResponse, http_res) if utils.match_response(http_res, "422", "application/json"): - response_data = utils.unmarshal_json( - http_res.text, models.HTTPValidationErrorData + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(data=response_data) + raise models.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) - content_type = http_res.headers.get("Content-Type") - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise models.SDKError("Unexpected response received", http_res) async def complete_async( self, @@ -255,31 +245,20 @@ async def complete_async( response_data: Any = None if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, models.FIMCompletionResponse) + return unmarshal_json_response(models.FIMCompletionResponse, http_res) if utils.match_response(http_res, "422", "application/json"): - response_data = utils.unmarshal_json( - http_res.text, models.HTTPValidationErrorData + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(data=response_data) + raise models.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) - content_type = http_res.headers.get("Content-Type") - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise models.SDKError("Unexpected response received", http_res) def stream( self, @@ -395,32 +374,23 @@ def stream( http_res, lambda raw: utils.unmarshal_json(raw, models.CompletionEvent), sentinel="[DONE]", + client_ref=self, ) if utils.match_response(http_res, "422", "application/json"): http_res_text = utils.stream_to_text(http_res) - response_data = utils.unmarshal_json( - http_res_text, models.HTTPValidationErrorData + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res, http_res_text ) - raise models.HTTPValidationError(data=response_data) + raise models.HTTPValidationError(response_data, http_res, http_res_text) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) - content_type = http_res.headers.get("Content-Type") http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise models.SDKError("Unexpected response received", http_res, http_res_text) async def stream_async( self, @@ -536,29 +506,20 @@ async def stream_async( http_res, lambda raw: utils.unmarshal_json(raw, models.CompletionEvent), sentinel="[DONE]", + client_ref=self, ) if utils.match_response(http_res, "422", "application/json"): http_res_text = await utils.stream_to_text_async(http_res) - response_data = utils.unmarshal_json( - http_res_text, models.HTTPValidationErrorData + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res, http_res_text ) - raise models.HTTPValidationError(data=response_data) + raise models.HTTPValidationError(response_data, http_res, http_res_text) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) - content_type = http_res.headers.get("Content-Type") http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise models.SDKError("Unexpected response received", http_res, http_res_text) diff --git a/src/mistralai/fine_tuning.py b/src/mistralai/fine_tuning.py index ce3d1389..8ed5788a 100644 --- a/src/mistralai/fine_tuning.py +++ b/src/mistralai/fine_tuning.py @@ -3,15 +3,18 @@ from .basesdk import BaseSDK from .sdkconfiguration import SDKConfiguration from mistralai.jobs import Jobs +from typing import Optional class FineTuning(BaseSDK): jobs: Jobs - def __init__(self, sdk_config: SDKConfiguration) -> None: - BaseSDK.__init__(self, sdk_config) + def __init__( + self, sdk_config: SDKConfiguration, parent_ref: Optional[object] = None + ) -> None: + BaseSDK.__init__(self, sdk_config, parent_ref=parent_ref) self.sdk_configuration = sdk_config self._init_sdks() def _init_sdks(self): - self.jobs = Jobs(self.sdk_configuration) + self.jobs = Jobs(self.sdk_configuration, parent_ref=self.parent_ref) diff --git a/src/mistralai/jobs.py b/src/mistralai/jobs.py index 020c40f0..af6364cb 100644 --- a/src/mistralai/jobs.py +++ b/src/mistralai/jobs.py @@ -6,6 +6,7 @@ from mistralai._hooks import HookContext from mistralai.types import OptionalNullable, UNSET from mistralai.utils import get_security_from_env +from mistralai.utils.unmarshal_json_response import unmarshal_json_response from typing import List, Mapping, Optional, Union @@ -110,26 +111,15 @@ def list( ) if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, models.JobsOut) + return unmarshal_json_response(models.JobsOut, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) - content_type = http_res.headers.get("Content-Type") - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise models.SDKError("Unexpected response received", http_res) async def list_async( self, @@ -231,26 +221,15 @@ async def list_async( ) if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, models.JobsOut) + return unmarshal_json_response(models.JobsOut, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) - content_type = http_res.headers.get("Content-Type") - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise models.SDKError("Unexpected response received", http_res) def create( self, @@ -383,28 +362,17 @@ def create( ) if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json( - http_res.text, models.JobsAPIRoutesFineTuningCreateFineTuningJobResponse + return unmarshal_json_response( + models.JobsAPIRoutesFineTuningCreateFineTuningJobResponse, http_res ) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) - content_type = http_res.headers.get("Content-Type") - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise models.SDKError("Unexpected response received", http_res) async def create_async( self, @@ -537,28 +505,17 @@ async def create_async( ) if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json( - http_res.text, models.JobsAPIRoutesFineTuningCreateFineTuningJobResponse + return unmarshal_json_response( + models.JobsAPIRoutesFineTuningCreateFineTuningJobResponse, http_res ) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) - content_type = http_res.headers.get("Content-Type") - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise models.SDKError("Unexpected response received", http_res) def get( self, @@ -633,28 +590,17 @@ def get( ) if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json( - http_res.text, models.JobsAPIRoutesFineTuningGetFineTuningJobResponse + return unmarshal_json_response( + models.JobsAPIRoutesFineTuningGetFineTuningJobResponse, http_res ) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) - content_type = http_res.headers.get("Content-Type") - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise models.SDKError("Unexpected response received", http_res) async def get_async( self, @@ -729,28 +675,17 @@ async def get_async( ) if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json( - http_res.text, models.JobsAPIRoutesFineTuningGetFineTuningJobResponse + return unmarshal_json_response( + models.JobsAPIRoutesFineTuningGetFineTuningJobResponse, http_res ) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) - content_type = http_res.headers.get("Content-Type") - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise models.SDKError("Unexpected response received", http_res) def cancel( self, @@ -825,28 +760,17 @@ def cancel( ) if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json( - http_res.text, models.JobsAPIRoutesFineTuningCancelFineTuningJobResponse + return unmarshal_json_response( + models.JobsAPIRoutesFineTuningCancelFineTuningJobResponse, http_res ) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) - content_type = http_res.headers.get("Content-Type") - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise models.SDKError("Unexpected response received", http_res) async def cancel_async( self, @@ -921,28 +845,17 @@ async def cancel_async( ) if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json( - http_res.text, models.JobsAPIRoutesFineTuningCancelFineTuningJobResponse + return unmarshal_json_response( + models.JobsAPIRoutesFineTuningCancelFineTuningJobResponse, http_res ) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) - content_type = http_res.headers.get("Content-Type") - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise models.SDKError("Unexpected response received", http_res) def start( self, @@ -1017,28 +930,17 @@ def start( ) if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json( - http_res.text, models.JobsAPIRoutesFineTuningStartFineTuningJobResponse + return unmarshal_json_response( + models.JobsAPIRoutesFineTuningStartFineTuningJobResponse, http_res ) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) - content_type = http_res.headers.get("Content-Type") - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise models.SDKError("Unexpected response received", http_res) async def start_async( self, @@ -1113,25 +1015,14 @@ async def start_async( ) if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json( - http_res.text, models.JobsAPIRoutesFineTuningStartFineTuningJobResponse + return unmarshal_json_response( + models.JobsAPIRoutesFineTuningStartFineTuningJobResponse, http_res ) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) - content_type = http_res.headers.get("Content-Type") - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise models.SDKError("Unexpected response received", http_res) diff --git a/src/mistralai/libraries.py b/src/mistralai/libraries.py index 852f6997..e9f19047 100644 --- a/src/mistralai/libraries.py +++ b/src/mistralai/libraries.py @@ -8,6 +8,7 @@ from mistralai.documents import Documents from mistralai.types import OptionalNullable, UNSET from mistralai.utils import get_security_from_env +from mistralai.utils.unmarshal_json_response import unmarshal_json_response from typing import Any, Mapping, Optional @@ -19,14 +20,16 @@ class Libraries(BaseSDK): accesses: Accesses r"""(beta) Libraries API - manage access to a library.""" - def __init__(self, sdk_config: SDKConfiguration) -> None: - BaseSDK.__init__(self, sdk_config) + def __init__( + self, sdk_config: SDKConfiguration, parent_ref: Optional[object] = None + ) -> None: + BaseSDK.__init__(self, sdk_config, parent_ref=parent_ref) self.sdk_configuration = sdk_config self._init_sdks() def _init_sdks(self): - self.documents = Documents(self.sdk_configuration) - self.accesses = Accesses(self.sdk_configuration) + self.documents = Documents(self.sdk_configuration, parent_ref=self.parent_ref) + self.accesses = Accesses(self.sdk_configuration, parent_ref=self.parent_ref) def list( self, @@ -94,26 +97,15 @@ def list( ) if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, models.ListLibraryOut) + return unmarshal_json_response(models.ListLibraryOut, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) - content_type = http_res.headers.get("Content-Type") - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise models.SDKError("Unexpected response received", http_res) async def list_async( self, @@ -181,26 +173,15 @@ async def list_async( ) if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, models.ListLibraryOut) + return unmarshal_json_response(models.ListLibraryOut, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) - content_type = http_res.headers.get("Content-Type") - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise models.SDKError("Unexpected response received", http_res) def create( self, @@ -285,31 +266,20 @@ def create( response_data: Any = None if utils.match_response(http_res, "201", "application/json"): - return utils.unmarshal_json(http_res.text, models.LibraryOut) + return unmarshal_json_response(models.LibraryOut, http_res) if utils.match_response(http_res, "422", "application/json"): - response_data = utils.unmarshal_json( - http_res.text, models.HTTPValidationErrorData + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(data=response_data) + raise models.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) - content_type = http_res.headers.get("Content-Type") - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise models.SDKError("Unexpected response received", http_res) async def create_async( self, @@ -394,31 +364,20 @@ async def create_async( response_data: Any = None if utils.match_response(http_res, "201", "application/json"): - return utils.unmarshal_json(http_res.text, models.LibraryOut) + return unmarshal_json_response(models.LibraryOut, http_res) if utils.match_response(http_res, "422", "application/json"): - response_data = utils.unmarshal_json( - http_res.text, models.HTTPValidationErrorData + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(data=response_data) + raise models.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) - content_type = http_res.headers.get("Content-Type") - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise models.SDKError("Unexpected response received", http_res) def get( self, @@ -494,31 +453,20 @@ def get( response_data: Any = None if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, models.LibraryOut) + return unmarshal_json_response(models.LibraryOut, http_res) if utils.match_response(http_res, "422", "application/json"): - response_data = utils.unmarshal_json( - http_res.text, models.HTTPValidationErrorData + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(data=response_data) + raise models.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) - content_type = http_res.headers.get("Content-Type") - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise models.SDKError("Unexpected response received", http_res) async def get_async( self, @@ -594,31 +542,20 @@ async def get_async( response_data: Any = None if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, models.LibraryOut) + return unmarshal_json_response(models.LibraryOut, http_res) if utils.match_response(http_res, "422", "application/json"): - response_data = utils.unmarshal_json( - http_res.text, models.HTTPValidationErrorData + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(data=response_data) + raise models.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) - content_type = http_res.headers.get("Content-Type") - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise models.SDKError("Unexpected response received", http_res) def delete( self, @@ -694,31 +631,20 @@ def delete( response_data: Any = None if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, models.LibraryOut) + return unmarshal_json_response(models.LibraryOut, http_res) if utils.match_response(http_res, "422", "application/json"): - response_data = utils.unmarshal_json( - http_res.text, models.HTTPValidationErrorData + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(data=response_data) + raise models.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) - content_type = http_res.headers.get("Content-Type") - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise models.SDKError("Unexpected response received", http_res) async def delete_async( self, @@ -794,31 +720,20 @@ async def delete_async( response_data: Any = None if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, models.LibraryOut) + return unmarshal_json_response(models.LibraryOut, http_res) if utils.match_response(http_res, "422", "application/json"): - response_data = utils.unmarshal_json( - http_res.text, models.HTTPValidationErrorData + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(data=response_data) + raise models.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) - content_type = http_res.headers.get("Content-Type") - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise models.SDKError("Unexpected response received", http_res) def update( self, @@ -905,31 +820,20 @@ def update( response_data: Any = None if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, models.LibraryOut) + return unmarshal_json_response(models.LibraryOut, http_res) if utils.match_response(http_res, "422", "application/json"): - response_data = utils.unmarshal_json( - http_res.text, models.HTTPValidationErrorData + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(data=response_data) + raise models.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) - content_type = http_res.headers.get("Content-Type") - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise models.SDKError("Unexpected response received", http_res) async def update_async( self, @@ -1016,28 +920,17 @@ async def update_async( response_data: Any = None if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, models.LibraryOut) + return unmarshal_json_response(models.LibraryOut, http_res) if utils.match_response(http_res, "422", "application/json"): - response_data = utils.unmarshal_json( - http_res.text, models.HTTPValidationErrorData + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(data=response_data) + raise models.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) - content_type = http_res.headers.get("Content-Type") - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise models.SDKError("Unexpected response received", http_res) diff --git a/src/mistralai/mistral_agents.py b/src/mistralai/mistral_agents.py index f0d4be01..65f256d6 100644 --- a/src/mistralai/mistral_agents.py +++ b/src/mistralai/mistral_agents.py @@ -5,6 +5,7 @@ from mistralai._hooks import HookContext from mistralai.types import OptionalNullable, UNSET from mistralai.utils import get_security_from_env +from mistralai.utils.unmarshal_json_response import unmarshal_json_response from typing import Any, List, Mapping, Optional, Union @@ -117,31 +118,20 @@ def create( response_data: Any = None if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, models.Agent) + return unmarshal_json_response(models.Agent, http_res) if utils.match_response(http_res, "422", "application/json"): - response_data = utils.unmarshal_json( - http_res.text, models.HTTPValidationErrorData + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(data=response_data) + raise models.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) - content_type = http_res.headers.get("Content-Type") - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise models.SDKError("Unexpected response received", http_res) async def create_async( self, @@ -249,31 +239,20 @@ async def create_async( response_data: Any = None if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, models.Agent) + return unmarshal_json_response(models.Agent, http_res) if utils.match_response(http_res, "422", "application/json"): - response_data = utils.unmarshal_json( - http_res.text, models.HTTPValidationErrorData + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(data=response_data) + raise models.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) - content_type = http_res.headers.get("Content-Type") - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise models.SDKError("Unexpected response received", http_res) def list( self, @@ -352,31 +331,20 @@ def list( response_data: Any = None if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, List[models.Agent]) + return unmarshal_json_response(List[models.Agent], http_res) if utils.match_response(http_res, "422", "application/json"): - response_data = utils.unmarshal_json( - http_res.text, models.HTTPValidationErrorData + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(data=response_data) + raise models.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) - content_type = http_res.headers.get("Content-Type") - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise models.SDKError("Unexpected response received", http_res) async def list_async( self, @@ -455,31 +423,20 @@ async def list_async( response_data: Any = None if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, List[models.Agent]) + return unmarshal_json_response(List[models.Agent], http_res) if utils.match_response(http_res, "422", "application/json"): - response_data = utils.unmarshal_json( - http_res.text, models.HTTPValidationErrorData + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(data=response_data) + raise models.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) - content_type = http_res.headers.get("Content-Type") - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise models.SDKError("Unexpected response received", http_res) def get( self, @@ -555,31 +512,20 @@ def get( response_data: Any = None if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, models.Agent) + return unmarshal_json_response(models.Agent, http_res) if utils.match_response(http_res, "422", "application/json"): - response_data = utils.unmarshal_json( - http_res.text, models.HTTPValidationErrorData + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(data=response_data) + raise models.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) - content_type = http_res.headers.get("Content-Type") - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise models.SDKError("Unexpected response received", http_res) async def get_async( self, @@ -655,31 +601,20 @@ async def get_async( response_data: Any = None if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, models.Agent) + return unmarshal_json_response(models.Agent, http_res) if utils.match_response(http_res, "422", "application/json"): - response_data = utils.unmarshal_json( - http_res.text, models.HTTPValidationErrorData + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(data=response_data) + raise models.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) - content_type = http_res.headers.get("Content-Type") - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise models.SDKError("Unexpected response received", http_res) def update( self, @@ -796,31 +731,20 @@ def update( response_data: Any = None if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, models.Agent) + return unmarshal_json_response(models.Agent, http_res) if utils.match_response(http_res, "422", "application/json"): - response_data = utils.unmarshal_json( - http_res.text, models.HTTPValidationErrorData + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(data=response_data) + raise models.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) - content_type = http_res.headers.get("Content-Type") - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise models.SDKError("Unexpected response received", http_res) async def update_async( self, @@ -937,31 +861,20 @@ async def update_async( response_data: Any = None if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, models.Agent) + return unmarshal_json_response(models.Agent, http_res) if utils.match_response(http_res, "422", "application/json"): - response_data = utils.unmarshal_json( - http_res.text, models.HTTPValidationErrorData + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(data=response_data) + raise models.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) - content_type = http_res.headers.get("Content-Type") - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise models.SDKError("Unexpected response received", http_res) def update_version( self, @@ -1040,31 +953,20 @@ def update_version( response_data: Any = None if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, models.Agent) + return unmarshal_json_response(models.Agent, http_res) if utils.match_response(http_res, "422", "application/json"): - response_data = utils.unmarshal_json( - http_res.text, models.HTTPValidationErrorData + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(data=response_data) + raise models.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) - content_type = http_res.headers.get("Content-Type") - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise models.SDKError("Unexpected response received", http_res) async def update_version_async( self, @@ -1143,28 +1045,17 @@ async def update_version_async( response_data: Any = None if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, models.Agent) + return unmarshal_json_response(models.Agent, http_res) if utils.match_response(http_res, "422", "application/json"): - response_data = utils.unmarshal_json( - http_res.text, models.HTTPValidationErrorData + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(data=response_data) + raise models.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) - content_type = http_res.headers.get("Content-Type") - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise models.SDKError("Unexpected response received", http_res) diff --git a/src/mistralai/mistral_jobs.py b/src/mistralai/mistral_jobs.py index c51d64a7..fb0a0de7 100644 --- a/src/mistralai/mistral_jobs.py +++ b/src/mistralai/mistral_jobs.py @@ -6,6 +6,7 @@ from mistralai._hooks import HookContext from mistralai.types import OptionalNullable, UNSET from mistralai.utils import get_security_from_env +from mistralai.utils.unmarshal_json_response import unmarshal_json_response from typing import Any, Dict, List, Mapping, Optional @@ -104,26 +105,15 @@ def list( ) if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, models.BatchJobsOut) + return unmarshal_json_response(models.BatchJobsOut, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) - - content_type = http_res.headers.get("Content-Type") - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) async def list_async( self, @@ -219,26 +209,15 @@ async def list_async( ) if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, models.BatchJobsOut) + return unmarshal_json_response(models.BatchJobsOut, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) - - content_type = http_res.headers.get("Content-Type") - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) def create( self, @@ -331,26 +310,15 @@ def create( ) if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, models.BatchJobOut) + return unmarshal_json_response(models.BatchJobOut, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) - - content_type = http_res.headers.get("Content-Type") - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) async def create_async( self, @@ -443,26 +411,15 @@ async def create_async( ) if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, models.BatchJobOut) + return unmarshal_json_response(models.BatchJobOut, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) - - content_type = http_res.headers.get("Content-Type") - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) def get( self, @@ -537,26 +494,15 @@ def get( ) if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, models.BatchJobOut) + return unmarshal_json_response(models.BatchJobOut, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) - - content_type = http_res.headers.get("Content-Type") - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) async def get_async( self, @@ -631,26 +577,15 @@ async def get_async( ) if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, models.BatchJobOut) + return unmarshal_json_response(models.BatchJobOut, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) - - content_type = http_res.headers.get("Content-Type") - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) def cancel( self, @@ -725,26 +660,15 @@ def cancel( ) if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, models.BatchJobOut) + return unmarshal_json_response(models.BatchJobOut, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) - - content_type = http_res.headers.get("Content-Type") - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) async def cancel_async( self, @@ -819,23 +743,12 @@ async def cancel_async( ) if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, models.BatchJobOut) + return unmarshal_json_response(models.BatchJobOut, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) - - content_type = http_res.headers.get("Content-Type") - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) diff --git a/src/mistralai/models/__init__.py b/src/mistralai/models/__init__.py index 9ca279f8..0298e73b 100644 --- a/src/mistralai/models/__init__.py +++ b/src/mistralai/models/__init__.py @@ -1,7 +1,10 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +from .mistralerror import MistralError from typing import TYPE_CHECKING from importlib import import_module +import builtins +import sys if TYPE_CHECKING: from .agent import ( @@ -713,6 +716,7 @@ from .modellist import Data, DataTypedDict, ModelList, ModelListTypedDict from .moderationobject import ModerationObject, ModerationObjectTypedDict from .moderationresponse import ModerationResponse, ModerationResponseTypedDict + from .no_response_error import NoResponseError from .ocrimageobject import OCRImageObject, OCRImageObjectTypedDict from .ocrpagedimensions import OCRPageDimensions, OCRPageDimensionsTypedDict from .ocrpageobject import OCRPageObject, OCRPageObjectTypedDict @@ -745,6 +749,7 @@ ResponseStartedEventType, ResponseStartedEventTypedDict, ) + from .responsevalidationerror import ResponseValidationError from .retrieve_model_v1_models_model_id_getop import ( RetrieveModelV1ModelsModelIDGetRequest, RetrieveModelV1ModelsModelIDGetRequestTypedDict, @@ -1391,6 +1396,7 @@ "MessagesTypedDict", "MetricOut", "MetricOutTypedDict", + "MistralError", "MistralPromptMode", "ModelCapabilities", "ModelCapabilitiesTypedDict", @@ -1406,6 +1412,7 @@ "ModerationObjectTypedDict", "ModerationResponse", "ModerationResponseTypedDict", + "NoResponseError", "OCRImageObject", "OCRImageObjectTypedDict", "OCRPageDimensions", @@ -1453,6 +1460,7 @@ "ResponseStartedEvent", "ResponseStartedEventType", "ResponseStartedEventTypedDict", + "ResponseValidationError", "RetrieveFileOut", "RetrieveFileOutTypedDict", "RetrieveModelV1ModelsModelIDGetRequest", @@ -2107,6 +2115,7 @@ "ModerationObjectTypedDict": ".moderationobject", "ModerationResponse": ".moderationresponse", "ModerationResponseTypedDict": ".moderationresponse", + "NoResponseError": ".no_response_error", "OCRImageObject": ".ocrimageobject", "OCRImageObjectTypedDict": ".ocrimageobject", "OCRPageDimensions": ".ocrpagedimensions", @@ -2144,6 +2153,7 @@ "ResponseStartedEvent": ".responsestartedevent", "ResponseStartedEventType": ".responsestartedevent", "ResponseStartedEventTypedDict": ".responsestartedevent", + "ResponseValidationError": ".responsevalidationerror", "RetrieveModelV1ModelsModelIDGetRequest": ".retrieve_model_v1_models_model_id_getop", "RetrieveModelV1ModelsModelIDGetRequestTypedDict": ".retrieve_model_v1_models_model_id_getop", "RetrieveModelV1ModelsModelIDGetResponseRetrieveModelV1ModelsModelIDGet": ".retrieve_model_v1_models_model_id_getop", @@ -2268,6 +2278,18 @@ } +def dynamic_import(modname, retries=3): + for attempt in range(retries): + try: + return import_module(modname, __package__) + except KeyError: + # Clear any half-initialized module and retry + sys.modules.pop(modname, None) + if attempt == retries - 1: + break + raise KeyError(f"Failed to import module '{modname}' after {retries} attempts") + + def __getattr__(attr_name: str) -> object: module_name = _dynamic_imports.get(attr_name) if module_name is None: @@ -2276,7 +2298,7 @@ def __getattr__(attr_name: str) -> object: ) try: - module = import_module(module_name, __package__) + module = dynamic_import(module_name) result = getattr(module, attr_name) return result except ImportError as e: @@ -2290,5 +2312,5 @@ def __getattr__(attr_name: str) -> object: def __dir__(): - lazy_attrs = list(_dynamic_imports.keys()) - return sorted(lazy_attrs) + lazy_attrs = builtins.list(_dynamic_imports.keys()) + return builtins.sorted(lazy_attrs) diff --git a/src/mistralai/models/httpvalidationerror.py b/src/mistralai/models/httpvalidationerror.py index 37f2dd76..e9136063 100644 --- a/src/mistralai/models/httpvalidationerror.py +++ b/src/mistralai/models/httpvalidationerror.py @@ -2,7 +2,8 @@ from __future__ import annotations from .validationerror import ValidationError -from mistralai import utils +import httpx +from mistralai.models import MistralError from mistralai.types import BaseModel from typing import List, Optional @@ -11,11 +12,15 @@ class HTTPValidationErrorData(BaseModel): detail: Optional[List[ValidationError]] = None -class HTTPValidationError(Exception): +class HTTPValidationError(MistralError): data: HTTPValidationErrorData - def __init__(self, data: HTTPValidationErrorData): + def __init__( + self, + data: HTTPValidationErrorData, + raw_response: httpx.Response, + body: Optional[str] = None, + ): + message = body or raw_response.text + super().__init__(message, raw_response, body) self.data = data - - def __str__(self) -> str: - return utils.marshal_json(self.data, HTTPValidationErrorData) diff --git a/src/mistralai/models/mistralerror.py b/src/mistralai/models/mistralerror.py new file mode 100644 index 00000000..a0ee5078 --- /dev/null +++ b/src/mistralai/models/mistralerror.py @@ -0,0 +1,26 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +import httpx +from typing import Optional + + +class MistralError(Exception): + """The base class for all HTTP error responses.""" + + message: str + status_code: int + body: str + headers: httpx.Headers + raw_response: httpx.Response + + def __init__( + self, message: str, raw_response: httpx.Response, body: Optional[str] = None + ): + self.message = message + self.status_code = raw_response.status_code + self.body = body if body is not None else raw_response.text + self.headers = raw_response.headers + self.raw_response = raw_response + + def __str__(self): + return self.message diff --git a/src/mistralai/models/no_response_error.py b/src/mistralai/models/no_response_error.py new file mode 100644 index 00000000..f98beea2 --- /dev/null +++ b/src/mistralai/models/no_response_error.py @@ -0,0 +1,13 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +class NoResponseError(Exception): + """Error raised when no HTTP response is received from the server.""" + + message: str + + def __init__(self, message: str = "No response received"): + self.message = message + super().__init__(message) + + def __str__(self): + return self.message diff --git a/src/mistralai/models/responsevalidationerror.py b/src/mistralai/models/responsevalidationerror.py new file mode 100644 index 00000000..fe31cfbd --- /dev/null +++ b/src/mistralai/models/responsevalidationerror.py @@ -0,0 +1,25 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +import httpx +from typing import Optional + +from mistralai.models import MistralError + + +class ResponseValidationError(MistralError): + """Error raised when there is a type mismatch between the response data and the expected Pydantic model.""" + + def __init__( + self, + message: str, + raw_response: httpx.Response, + cause: Exception, + body: Optional[str] = None, + ): + message = f"{message}: {cause}" + super().__init__(message, raw_response, body) + + @property + def cause(self): + """Normally the Pydantic ValidationError""" + return self.__cause__ diff --git a/src/mistralai/models/sdkerror.py b/src/mistralai/models/sdkerror.py index 03216cbf..2513f36b 100644 --- a/src/mistralai/models/sdkerror.py +++ b/src/mistralai/models/sdkerror.py @@ -1,22 +1,38 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -from dataclasses import dataclass -from typing import Optional import httpx +from typing import Optional + +from mistralai.models import MistralError + +MAX_MESSAGE_LEN = 10_000 + + +class SDKError(MistralError): + """The fallback error class if no more specific error class is matched.""" + + def __init__( + self, message: str, raw_response: httpx.Response, body: Optional[str] = None + ): + body_display = body or raw_response.text or '""' + if message: + message += ": " + message += f"Status {raw_response.status_code}" -@dataclass -class SDKError(Exception): - """Represents an error returned by the API.""" + headers = raw_response.headers + content_type = headers.get("content-type", '""') + if content_type != "application/json": + if " " in content_type: + content_type = f'"{content_type}"' + message += f" Content-Type {content_type}" - message: str - status_code: int = -1 - body: str = "" - raw_response: Optional[httpx.Response] = None + if len(body_display) > MAX_MESSAGE_LEN: + truncated = body_display[:MAX_MESSAGE_LEN] + remaining = len(body_display) - MAX_MESSAGE_LEN + body_display = f"{truncated}...and {remaining} more chars" - def __str__(self): - body = "" - if len(self.body) > 0: - body = f"\n{self.body}" + message += f". Body: {body_display}" + message = message.strip() - return f"{self.message}: Status {self.status_code}{body}" + super().__init__(message, raw_response, body) diff --git a/src/mistralai/models_.py b/src/mistralai/models_.py index b712c545..b6cc3186 100644 --- a/src/mistralai/models_.py +++ b/src/mistralai/models_.py @@ -5,6 +5,7 @@ from mistralai._hooks import HookContext from mistralai.types import OptionalNullable, UNSET from mistralai.utils import get_security_from_env +from mistralai.utils.unmarshal_json_response import unmarshal_json_response from typing import Any, Mapping, Optional @@ -78,31 +79,20 @@ def list( response_data: Any = None if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, models.ModelList) + return unmarshal_json_response(models.ModelList, http_res) if utils.match_response(http_res, "422", "application/json"): - response_data = utils.unmarshal_json( - http_res.text, models.HTTPValidationErrorData + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(data=response_data) + raise models.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) - content_type = http_res.headers.get("Content-Type") - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise models.SDKError("Unexpected response received", http_res) async def list_async( self, @@ -171,31 +161,20 @@ async def list_async( response_data: Any = None if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, models.ModelList) + return unmarshal_json_response(models.ModelList, http_res) if utils.match_response(http_res, "422", "application/json"): - response_data = utils.unmarshal_json( - http_res.text, models.HTTPValidationErrorData + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(data=response_data) + raise models.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) - content_type = http_res.headers.get("Content-Type") - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise models.SDKError("Unexpected response received", http_res) def retrieve( self, @@ -271,34 +250,23 @@ def retrieve( response_data: Any = None if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json( - http_res.text, + return unmarshal_json_response( models.RetrieveModelV1ModelsModelIDGetResponseRetrieveModelV1ModelsModelIDGet, + http_res, ) if utils.match_response(http_res, "422", "application/json"): - response_data = utils.unmarshal_json( - http_res.text, models.HTTPValidationErrorData + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(data=response_data) + raise models.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) - content_type = http_res.headers.get("Content-Type") - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise models.SDKError("Unexpected response received", http_res) async def retrieve_async( self, @@ -374,34 +342,23 @@ async def retrieve_async( response_data: Any = None if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json( - http_res.text, + return unmarshal_json_response( models.RetrieveModelV1ModelsModelIDGetResponseRetrieveModelV1ModelsModelIDGet, + http_res, ) if utils.match_response(http_res, "422", "application/json"): - response_data = utils.unmarshal_json( - http_res.text, models.HTTPValidationErrorData + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(data=response_data) + raise models.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) - content_type = http_res.headers.get("Content-Type") - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise models.SDKError("Unexpected response received", http_res) def delete( self, @@ -477,31 +434,20 @@ def delete( response_data: Any = None if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, models.DeleteModelOut) + return unmarshal_json_response(models.DeleteModelOut, http_res) if utils.match_response(http_res, "422", "application/json"): - response_data = utils.unmarshal_json( - http_res.text, models.HTTPValidationErrorData + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(data=response_data) + raise models.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) - content_type = http_res.headers.get("Content-Type") - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise models.SDKError("Unexpected response received", http_res) async def delete_async( self, @@ -577,31 +523,20 @@ async def delete_async( response_data: Any = None if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, models.DeleteModelOut) + return unmarshal_json_response(models.DeleteModelOut, http_res) if utils.match_response(http_res, "422", "application/json"): - response_data = utils.unmarshal_json( - http_res.text, models.HTTPValidationErrorData + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(data=response_data) + raise models.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) - content_type = http_res.headers.get("Content-Type") - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise models.SDKError("Unexpected response received", http_res) def update( self, @@ -687,29 +622,17 @@ def update( ) if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json( - http_res.text, - models.JobsAPIRoutesFineTuningUpdateFineTunedModelResponse, + return unmarshal_json_response( + models.JobsAPIRoutesFineTuningUpdateFineTunedModelResponse, http_res ) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) - content_type = http_res.headers.get("Content-Type") - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise models.SDKError("Unexpected response received", http_res) async def update_async( self, @@ -795,29 +718,17 @@ async def update_async( ) if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json( - http_res.text, - models.JobsAPIRoutesFineTuningUpdateFineTunedModelResponse, + return unmarshal_json_response( + models.JobsAPIRoutesFineTuningUpdateFineTunedModelResponse, http_res ) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) - content_type = http_res.headers.get("Content-Type") - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise models.SDKError("Unexpected response received", http_res) def archive( self, @@ -892,26 +803,15 @@ def archive( ) if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, models.ArchiveFTModelOut) + return unmarshal_json_response(models.ArchiveFTModelOut, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) - content_type = http_res.headers.get("Content-Type") - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise models.SDKError("Unexpected response received", http_res) async def archive_async( self, @@ -986,26 +886,15 @@ async def archive_async( ) if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, models.ArchiveFTModelOut) + return unmarshal_json_response(models.ArchiveFTModelOut, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) - content_type = http_res.headers.get("Content-Type") - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise models.SDKError("Unexpected response received", http_res) def unarchive( self, @@ -1080,26 +969,15 @@ def unarchive( ) if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, models.UnarchiveFTModelOut) + return unmarshal_json_response(models.UnarchiveFTModelOut, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) - content_type = http_res.headers.get("Content-Type") - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise models.SDKError("Unexpected response received", http_res) async def unarchive_async( self, @@ -1174,23 +1052,12 @@ async def unarchive_async( ) if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, models.UnarchiveFTModelOut) + return unmarshal_json_response(models.UnarchiveFTModelOut, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) - content_type = http_res.headers.get("Content-Type") - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise models.SDKError("Unexpected response received", http_res) diff --git a/src/mistralai/ocr.py b/src/mistralai/ocr.py index a7b1d04a..bed8b7be 100644 --- a/src/mistralai/ocr.py +++ b/src/mistralai/ocr.py @@ -5,6 +5,7 @@ from mistralai._hooks import HookContext from mistralai.types import Nullable, OptionalNullable, UNSET from mistralai.utils import get_security_from_env +from mistralai.utils.unmarshal_json_response import unmarshal_json_response from typing import Any, List, Mapping, Optional, Union @@ -118,31 +119,20 @@ def process( response_data: Any = None if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, models.OCRResponse) + return unmarshal_json_response(models.OCRResponse, http_res) if utils.match_response(http_res, "422", "application/json"): - response_data = utils.unmarshal_json( - http_res.text, models.HTTPValidationErrorData + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(data=response_data) + raise models.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) - content_type = http_res.headers.get("Content-Type") - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise models.SDKError("Unexpected response received", http_res) async def process_async( self, @@ -251,28 +241,17 @@ async def process_async( response_data: Any = None if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, models.OCRResponse) + return unmarshal_json_response(models.OCRResponse, http_res) if utils.match_response(http_res, "422", "application/json"): - response_data = utils.unmarshal_json( - http_res.text, models.HTTPValidationErrorData + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(data=response_data) + raise models.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) - content_type = http_res.headers.get("Content-Type") - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise models.SDKError("Unexpected response received", http_res) diff --git a/src/mistralai/sdk.py b/src/mistralai/sdk.py index 23d31cc7..311147fd 100644 --- a/src/mistralai/sdk.py +++ b/src/mistralai/sdk.py @@ -10,6 +10,7 @@ from mistralai import models, utils from mistralai._hooks import SDKHooks from mistralai.types import OptionalNullable, UNSET +import sys from typing import Any, Callable, Dict, Optional, TYPE_CHECKING, Union, cast import weakref @@ -135,6 +136,7 @@ def __init__( timeout_ms=timeout_ms, debug_logger=debug_logger, ), + parent_ref=self, ) hooks = SDKHooks() @@ -159,13 +161,24 @@ def __init__( self.sdk_configuration.async_client_supplied, ) + def dynamic_import(self, modname, retries=3): + for attempt in range(retries): + try: + return importlib.import_module(modname) + except KeyError: + # Clear any half-initialized module and retry + sys.modules.pop(modname, None) + if attempt == retries - 1: + break + raise KeyError(f"Failed to import module '{modname}' after {retries} attempts") + def __getattr__(self, name: str): if name in self._sub_sdk_map: module_path, class_name = self._sub_sdk_map[name] try: - module = importlib.import_module(module_path) + module = self.dynamic_import(module_path) klass = getattr(module, class_name) - instance = klass(self.sdk_configuration) + instance = klass(self.sdk_configuration, parent_ref=self) setattr(self, name, instance) return instance except ImportError as e: diff --git a/src/mistralai/transcriptions.py b/src/mistralai/transcriptions.py index 24975cb2..3e2de6f5 100644 --- a/src/mistralai/transcriptions.py +++ b/src/mistralai/transcriptions.py @@ -5,6 +5,7 @@ from mistralai._hooks import HookContext from mistralai.types import OptionalNullable, UNSET from mistralai.utils import eventstreaming, get_security_from_env +from mistralai.utils.unmarshal_json_response import unmarshal_json_response from typing import List, Mapping, Optional, Union @@ -103,26 +104,15 @@ def complete( ) if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, models.TranscriptionResponse) + return unmarshal_json_response(models.TranscriptionResponse, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) - content_type = http_res.headers.get("Content-Type") - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise models.SDKError("Unexpected response received", http_res) async def complete_async( self, @@ -216,26 +206,15 @@ async def complete_async( ) if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, models.TranscriptionResponse) + return unmarshal_json_response(models.TranscriptionResponse, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) - content_type = http_res.headers.get("Content-Type") - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise models.SDKError("Unexpected response received", http_res) def stream( self, @@ -337,26 +316,17 @@ def stream( return eventstreaming.EventStream( http_res, lambda raw: utils.unmarshal_json(raw, models.TranscriptionStreamEvents), + client_ref=self, ) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) - content_type = http_res.headers.get("Content-Type") http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise models.SDKError("Unexpected response received", http_res, http_res_text) async def stream_async( self, @@ -458,23 +428,14 @@ async def stream_async( return eventstreaming.EventStreamAsync( http_res, lambda raw: utils.unmarshal_json(raw, models.TranscriptionStreamEvents), + client_ref=self, ) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) - content_type = http_res.headers.get("Content-Type") http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise models.SDKError("Unexpected response received", http_res, http_res_text) diff --git a/src/mistralai/utils/__init__.py b/src/mistralai/utils/__init__.py index 3d078198..87192dde 100644 --- a/src/mistralai/utils/__init__.py +++ b/src/mistralai/utils/__init__.py @@ -2,6 +2,8 @@ from typing import TYPE_CHECKING from importlib import import_module +import builtins +import sys if TYPE_CHECKING: from .annotations import get_discriminator @@ -161,6 +163,18 @@ } +def dynamic_import(modname, retries=3): + for attempt in range(retries): + try: + return import_module(modname, __package__) + except KeyError: + # Clear any half-initialized module and retry + sys.modules.pop(modname, None) + if attempt == retries - 1: + break + raise KeyError(f"Failed to import module '{modname}' after {retries} attempts") + + def __getattr__(attr_name: str) -> object: module_name = _dynamic_imports.get(attr_name) if module_name is None: @@ -169,9 +183,8 @@ def __getattr__(attr_name: str) -> object: ) try: - module = import_module(module_name, __package__) - result = getattr(module, attr_name) - return result + module = dynamic_import(module_name) + return getattr(module, attr_name) except ImportError as e: raise ImportError( f"Failed to import {attr_name} from {module_name}: {e}" @@ -183,5 +196,5 @@ def __getattr__(attr_name: str) -> object: def __dir__(): - lazy_attrs = list(_dynamic_imports.keys()) - return sorted(lazy_attrs) + lazy_attrs = builtins.list(_dynamic_imports.keys()) + return builtins.sorted(lazy_attrs) diff --git a/src/mistralai/utils/eventstreaming.py b/src/mistralai/utils/eventstreaming.py index 74a63f75..0969899b 100644 --- a/src/mistralai/utils/eventstreaming.py +++ b/src/mistralai/utils/eventstreaming.py @@ -17,6 +17,9 @@ class EventStream(Generic[T]): + # Holds a reference to the SDK client to avoid it being garbage collected + # and cause termination of the underlying httpx client. + client_ref: Optional[object] response: httpx.Response generator: Generator[T, None, None] @@ -25,9 +28,11 @@ def __init__( response: httpx.Response, decoder: Callable[[str], T], sentinel: Optional[str] = None, + client_ref: Optional[object] = None, ): self.response = response self.generator = stream_events(response, decoder, sentinel) + self.client_ref = client_ref def __iter__(self): return self @@ -43,6 +48,9 @@ def __exit__(self, exc_type, exc_val, exc_tb): class EventStreamAsync(Generic[T]): + # Holds a reference to the SDK client to avoid it being garbage collected + # and cause termination of the underlying httpx client. + client_ref: Optional[object] response: httpx.Response generator: AsyncGenerator[T, None] @@ -51,9 +59,11 @@ def __init__( response: httpx.Response, decoder: Callable[[str], T], sentinel: Optional[str] = None, + client_ref: Optional[object] = None, ): self.response = response self.generator = stream_events_async(response, decoder, sentinel) + self.client_ref = client_ref def __aiter__(self): return self diff --git a/src/mistralai/utils/serializers.py b/src/mistralai/utils/serializers.py index 76e44d71..378a14c0 100644 --- a/src/mistralai/utils/serializers.py +++ b/src/mistralai/utils/serializers.py @@ -192,7 +192,9 @@ def is_union(obj: object) -> bool: """ Returns True if the given object is a typing.Union or typing_extensions.Union. """ - return any(obj is typing_obj for typing_obj in _get_typing_objects_by_name_of("Union")) + return any( + obj is typing_obj for typing_obj in _get_typing_objects_by_name_of("Union") + ) def stream_to_text(stream: httpx.Response) -> str: @@ -245,4 +247,3 @@ def _get_typing_objects_by_name_of(name: str) -> Tuple[Any, ...]: f"Neither typing nor typing_extensions has an object called {name!r}" ) return result - diff --git a/src/mistralai/utils/unmarshal_json_response.py b/src/mistralai/utils/unmarshal_json_response.py new file mode 100644 index 00000000..c0ce7e0f --- /dev/null +++ b/src/mistralai/utils/unmarshal_json_response.py @@ -0,0 +1,24 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from typing import Any, Optional + +import httpx + +from .serializers import unmarshal_json +from mistralai import models + + +def unmarshal_json_response( + typ: Any, http_res: httpx.Response, body: Optional[str] = None +) -> Any: + if body is None: + body = http_res.text + try: + return unmarshal_json(body, typ) + except Exception as e: + raise models.ResponseValidationError( + "Response validation failed", + http_res, + e, + body, + ) from e From 992b0cbd3f86cef54e74a03cfd488f123cec4203 Mon Sep 17 00:00:00 2001 From: Alexandre Menasria <47357713+amenasria@users.noreply.github.com> Date: Thu, 2 Oct 2025 17:51:55 +0200 Subject: [PATCH 157/223] ci: regenerated with OpenAPI Doc , Speakeasy CLI 1.606.10 (#277) Co-authored-by: speakeasybot --- .speakeasy/gen.lock | 4 ++-- .speakeasy/gen.yaml | 2 +- .speakeasy/workflow.lock | 10 +++++----- RELEASES.md | 12 +++++++++++- pyproject.toml | 2 +- src/mistralai/_version.py | 4 ++-- 6 files changed, 22 insertions(+), 12 deletions(-) diff --git a/.speakeasy/gen.lock b/.speakeasy/gen.lock index dc029dc3..05e0840c 100644 --- a/.speakeasy/gen.lock +++ b/.speakeasy/gen.lock @@ -5,8 +5,8 @@ management: docVersion: 1.0.0 speakeasyVersion: 1.606.10 generationVersion: 2.687.13 - releaseVersion: 1.9.10 - configChecksum: 928d8206dce080425b60348d566b2c4e + releaseVersion: 1.9.11 + configChecksum: d84e605ef7a3265972f6695049243759 repoURL: https://github.com/mistralai/client-python.git installationURL: https://github.com/mistralai/client-python.git published: true diff --git a/.speakeasy/gen.yaml b/.speakeasy/gen.yaml index 24506f89..116b0e26 100644 --- a/.speakeasy/gen.yaml +++ b/.speakeasy/gen.yaml @@ -21,7 +21,7 @@ generation: generateNewTests: false skipResponseBodyAssertions: false python: - version: 1.9.10 + version: 1.9.11 additionalDependencies: dev: pytest: ^8.2.2 diff --git a/.speakeasy/workflow.lock b/.speakeasy/workflow.lock index 3f4b0fd9..f4582991 100644 --- a/.speakeasy/workflow.lock +++ b/.speakeasy/workflow.lock @@ -18,7 +18,7 @@ sources: sourceBlobDigest: sha256:97767522559603de92a9738938e522cea4d558b2a854500acf6fe8d81f8ccfb8 tags: - latest - - main + - speakeasy-sdk-regen-1759420102 targets: mistralai-azure-sdk: source: mistral-azure-source @@ -40,20 +40,20 @@ targets: sourceRevisionDigest: sha256:eefc1f0b6a5e9ec673d317d61cad766290710b5fc369412491b75f732cccfedd sourceBlobDigest: sha256:97767522559603de92a9738938e522cea4d558b2a854500acf6fe8d81f8ccfb8 codeSamplesNamespace: mistral-openapi-code-samples - codeSamplesRevisionDigest: sha256:31dee4783f73e1efa932b16da7238ddfe29b94f4b1c66223d6e0f2393cdc6f72 + codeSamplesRevisionDigest: sha256:8ed158c9c1ed8252f86b620219dd93e9650b45e7c6403cda7fdd9b4ee0d17dac workflow: workflowVersion: 1.0.0 speakeasyVersion: 1.606.10 sources: mistral-azure-source: inputs: - - location: registry.speakeasyapi.dev/mistral-dev/mistral-dev/mistral-openapi-azure:sha256:670c460702ec74f7077491464a6dc5ee9d873969c80e812c48dbf4deb160e470 + - location: registry.speakeasyapi.dev/mistral-dev/mistral-dev/mistral-openapi-azure:main mistral-google-cloud-source: inputs: - - location: registry.speakeasyapi.dev/mistral-dev/mistral-dev/mistral-openapi-google-cloud:sha256:4a5343e63c6a78152e472b00ccc46d7bcb15594496bc94c8040039d3a9d4c5f8 + - location: registry.speakeasyapi.dev/mistral-dev/mistral-dev/mistral-openapi-google-cloud:main mistral-openapi: inputs: - - location: registry.speakeasyapi.dev/mistral-dev/mistral-dev/mistral-openapi:sha256:eefc1f0b6a5e9ec673d317d61cad766290710b5fc369412491b75f732cccfedd + - location: registry.speakeasyapi.dev/mistral-dev/mistral-dev/mistral-openapi:main targets: mistralai-azure-sdk: target: python diff --git a/RELEASES.md b/RELEASES.md index 5a818523..b65d9d0c 100644 --- a/RELEASES.md +++ b/RELEASES.md @@ -318,4 +318,14 @@ Based on: ### Generated - [python v1.9.10] . ### Releases -- [PyPI v1.9.10] https://pypi.org/project/mistralai/1.9.10 - . \ No newline at end of file +- [PyPI v1.9.10] https://pypi.org/project/mistralai/1.9.10 - . + +## 2025-10-02 15:48:02 +### Changes +Based on: +- OpenAPI Doc +- Speakeasy CLI 1.606.10 (2.687.13) https://github.com/speakeasy-api/speakeasy +### Generated +- [python v1.9.11] . +### Releases +- [PyPI v1.9.11] https://pypi.org/project/mistralai/1.9.11 - . \ No newline at end of file diff --git a/pyproject.toml b/pyproject.toml index 47842d19..087b5703 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "mistralai" -version = "1.9.10" +version = "1.9.11" description = "Python Client SDK for the Mistral AI API." authors = [{ name = "Mistral" },] readme = "README-PYPI.md" diff --git a/src/mistralai/_version.py b/src/mistralai/_version.py index 6e366325..fa0b5e7d 100644 --- a/src/mistralai/_version.py +++ b/src/mistralai/_version.py @@ -3,10 +3,10 @@ import importlib.metadata __title__: str = "mistralai" -__version__: str = "1.9.10" +__version__: str = "1.9.11" __openapi_doc_version__: str = "1.0.0" __gen_version__: str = "2.687.13" -__user_agent__: str = "speakeasy-sdk/python 1.9.10 2.687.13 1.0.0 mistralai" +__user_agent__: str = "speakeasy-sdk/python 1.9.11 2.687.13 1.0.0 mistralai" try: if __package__ is not None: From 7e9f550d4c3be702fae8823cee91feeec169ebc9 Mon Sep 17 00:00:00 2001 From: Alexandre Menasria <47357713+amenasria@users.noreply.github.com> Date: Thu, 30 Oct 2025 15:44:37 +0100 Subject: [PATCH 158/223] [CI] Add retries to avoid failing because of flakey network issues (#281) * [CI] Add retries to avoid failing because of flakey network issues * Print each retry error if all retries failed --- scripts/run_examples.sh | 80 ++++++++++++++++++++++++++++++++++++----- 1 file changed, 71 insertions(+), 9 deletions(-) diff --git a/scripts/run_examples.sh b/scripts/run_examples.sh index 3ef2f9e2..d9ff43b2 100755 --- a/scripts/run_examples.sh +++ b/scripts/run_examples.sh @@ -1,5 +1,34 @@ #!/bin/bash +# Default retry count +RETRY_COUNT=3 + +# Parse command line arguments +while [[ $# -gt 0 ]]; do + case $1 in + --no-extra-dep) + NO_EXTRA_DEP=true + shift + ;; + --retry-count) + RETRY_COUNT="$2" + shift 2 + ;; + --help) + echo "Usage: $0 [--no-extra-dep] [--retry-count N]" + echo " --no-extra-dep: Exclude files that require extra dependencies" + echo " --retry-count N: Number of retries for each test (default: 3)" + echo " --help: Show this help message" + exit 0 + ;; + *) + echo "Unknown option: $1" + echo "Use --help for usage information" + exit 1 + ;; + esac +done + # List of files to exclude exclude_files=( "examples/mistral/chat/chatbot_with_streaming.py" @@ -13,8 +42,8 @@ exclude_files=( "examples/mistral/agents/async_conversation_run_mcp_remote.py" ) -# Check if the first argument is "no-extra-dep" then remove all the files that require the extra dependencies -if [ "$1" = "--no-extra-dep" ]; then +# Check if the no-extra-dep flag is set +if [ "$NO_EXTRA_DEP" = true ]; then # Add more files to the exclude list exclude_files+=( "examples/mistral/agents/async_conversation_run_mcp_remote.py" @@ -25,19 +54,52 @@ fi failed=0 +# Function to run a test with retries +run_test_with_retries() { + local file="$1" + local attempt=1 + local error_outputs=() + + while [ $attempt -le $RETRY_COUNT ]; do + echo "Running $file (attempt $attempt/$RETRY_COUNT)" + + # Run the script and capture both exit status and error output + local current_output=$(python3 "$file" 2>&1) + local exit_code=$? + + if [ $exit_code -eq 0 ]; then + echo "Success" + return 0 + else + # Store the error output from this attempt + error_outputs+=("Attempt $attempt: $current_output") + + if [ $attempt -lt $RETRY_COUNT ]; then + echo "Failed (attempt $attempt/$RETRY_COUNT), retrying..." + sleep 1 # Brief pause before retry + else + echo "Failed after $RETRY_COUNT attempts" + echo "Error outputs from all attempts:" + for error_output in "${error_outputs[@]}"; do + echo "$error_output" + echo "---" + done + return 1 + fi + fi + + attempt=$((attempt + 1)) + done +} + for file in examples/mistral/**/*.py; do # Check if the file is not in the exclude list if [ -f "$file" ] && [[ ! " ${exclude_files[@]} " =~ " $file " ]]; then - echo "Running $file" - # Run the script and capture the exit status - if python3 "$file" > /dev/null; then - echo "Success" - else - echo "Failed" + if ! run_test_with_retries "$file"; then failed=1 fi else - echo "Skipped $file" + echo "Skipped $file" fi done From c6c1f787fa0a6237d01c8d955137bd32599e8dd2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ma=C3=ABl=20Nison?= Date: Mon, 17 Nov 2025 14:18:31 +0100 Subject: [PATCH 159/223] Pins GH actions (#291) --- .github/workflows/lint_custom_code.yaml | 4 ++-- .github/workflows/run_example_scripts.yaml | 2 +- .github/workflows/sdk_generation_mistralai_azure_sdk.yaml | 2 +- .github/workflows/sdk_generation_mistralai_gcp_sdk.yaml | 2 +- .github/workflows/sdk_generation_mistralai_sdk.yaml | 2 +- .github/workflows/sdk_publish_mistralai_sdk.yaml | 2 +- .github/workflows/test_custom_code.yaml | 6 +++--- 7 files changed, 10 insertions(+), 10 deletions(-) diff --git a/.github/workflows/lint_custom_code.yaml b/.github/workflows/lint_custom_code.yaml index 0bbf7126..bd327c42 100644 --- a/.github/workflows/lint_custom_code.yaml +++ b/.github/workflows/lint_custom_code.yaml @@ -14,10 +14,10 @@ jobs: steps: - name: Checkout code - uses: actions/checkout@v3 + uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744 # v3 - name: Set up Python - uses: actions/setup-python@v4 + uses: actions/setup-python@7f4fc3e22c37d6ff65e88745f38bd3157c663f7c # v4 with: python-version: '3.12' diff --git a/.github/workflows/run_example_scripts.yaml b/.github/workflows/run_example_scripts.yaml index 7d8eb792..1ac5b3a5 100644 --- a/.github/workflows/run_example_scripts.yaml +++ b/.github/workflows/run_example_scripts.yaml @@ -18,7 +18,7 @@ jobs: steps: - name: Checkout code - uses: actions/checkout@v3 + uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744 # v3 - name: Set up Python ${{ matrix.python-version }} uses: actions/setup-python@42375524e23c412d93fb67b49958b491fce71c38 # v5.4.0 diff --git a/.github/workflows/sdk_generation_mistralai_azure_sdk.yaml b/.github/workflows/sdk_generation_mistralai_azure_sdk.yaml index 7ec5bb8d..2a510615 100644 --- a/.github/workflows/sdk_generation_mistralai_azure_sdk.yaml +++ b/.github/workflows/sdk_generation_mistralai_azure_sdk.yaml @@ -16,7 +16,7 @@ permissions: type: string jobs: generate: - uses: speakeasy-api/sdk-generation-action/.github/workflows/workflow-executor.yaml@v15 + uses: speakeasy-api/sdk-generation-action/.github/workflows/workflow-executor.yaml@5b268931ef6902bbb70fa01b467dc361c68f7617 # v15 with: force: ${{ github.event.inputs.force }} mode: pr diff --git a/.github/workflows/sdk_generation_mistralai_gcp_sdk.yaml b/.github/workflows/sdk_generation_mistralai_gcp_sdk.yaml index c4da64f7..f30440e8 100644 --- a/.github/workflows/sdk_generation_mistralai_gcp_sdk.yaml +++ b/.github/workflows/sdk_generation_mistralai_gcp_sdk.yaml @@ -16,7 +16,7 @@ permissions: type: string jobs: generate: - uses: speakeasy-api/sdk-generation-action/.github/workflows/workflow-executor.yaml@v15 + uses: speakeasy-api/sdk-generation-action/.github/workflows/workflow-executor.yaml@5b268931ef6902bbb70fa01b467dc361c68f7617 # v15 with: force: ${{ github.event.inputs.force }} mode: pr diff --git a/.github/workflows/sdk_generation_mistralai_sdk.yaml b/.github/workflows/sdk_generation_mistralai_sdk.yaml index 1d80cddc..719a7b72 100644 --- a/.github/workflows/sdk_generation_mistralai_sdk.yaml +++ b/.github/workflows/sdk_generation_mistralai_sdk.yaml @@ -16,7 +16,7 @@ permissions: type: string jobs: generate: - uses: speakeasy-api/sdk-generation-action/.github/workflows/workflow-executor.yaml@v15 + uses: speakeasy-api/sdk-generation-action/.github/workflows/workflow-executor.yaml@5b268931ef6902bbb70fa01b467dc361c68f7617 # v15 with: force: ${{ github.event.inputs.force }} mode: pr diff --git a/.github/workflows/sdk_publish_mistralai_sdk.yaml b/.github/workflows/sdk_publish_mistralai_sdk.yaml index 46af0ad3..e7928481 100644 --- a/.github/workflows/sdk_publish_mistralai_sdk.yaml +++ b/.github/workflows/sdk_publish_mistralai_sdk.yaml @@ -13,7 +13,7 @@ permissions: - "*/RELEASES.md" jobs: publish: - uses: speakeasy-api/sdk-generation-action/.github/workflows/sdk-publish.yaml@v15 + uses: speakeasy-api/sdk-generation-action/.github/workflows/sdk-publish.yaml@5b268931ef6902bbb70fa01b467dc361c68f7617 # v15 secrets: github_access_token: ${{ secrets.GITHUB_TOKEN }} pypi_token: ${{ secrets.PYPI_TOKEN }} diff --git a/.github/workflows/test_custom_code.yaml b/.github/workflows/test_custom_code.yaml index 230066cb..2b087076 100644 --- a/.github/workflows/test_custom_code.yaml +++ b/.github/workflows/test_custom_code.yaml @@ -14,11 +14,11 @@ jobs: steps: - name: Checkout code - uses: actions/checkout@v3 + uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744 # v3 - name: Set up Python id: setup-python - uses: actions/setup-python@v4 + uses: actions/setup-python@7f4fc3e22c37d6ff65e88745f38bd3157c663f7c # v4 with: python-version: '3.12' @@ -32,7 +32,7 @@ jobs: - name: Load cached venv id: cached-poetry-dependencies - uses: actions/cache@v4 + uses: actions/cache@0057852bfaa89a56745cba8c7296529d2fc39830 # v4 with: path: .venv key: venv-${{ runner.os }}-${{ steps.setup-python.outputs.python-version }}-${{ hashFiles('**/poetry.lock') }} From 747341e25fe2d5fddb39bd4afe9f25e7267268b4 Mon Sep 17 00:00:00 2001 From: Alexandre Menasria <47357713+amenasria@users.noreply.github.com> Date: Tue, 2 Dec 2025 14:40:08 +0100 Subject: [PATCH 160/223] Add OTel Tracing (#280) --- .../agents/async_multi_turn_conversation.py | 69 +++ poetry.lock | 193 ++++++++- pyproject.toml | 6 +- src/mistralai/_hooks/registration.py | 5 + src/mistralai/_hooks/tracing.py | 50 +++ src/mistralai/conversations.py | 90 ++-- src/mistralai/extra/observability/__init__.py | 15 + src/mistralai/extra/observability/otel.py | 393 ++++++++++++++++++ src/mistralai/extra/run/tools.py | 44 +- 9 files changed, 798 insertions(+), 67 deletions(-) create mode 100644 examples/mistral/agents/async_multi_turn_conversation.py create mode 100644 src/mistralai/_hooks/tracing.py create mode 100644 src/mistralai/extra/observability/__init__.py create mode 100644 src/mistralai/extra/observability/otel.py diff --git a/examples/mistral/agents/async_multi_turn_conversation.py b/examples/mistral/agents/async_multi_turn_conversation.py new file mode 100644 index 00000000..d24443c0 --- /dev/null +++ b/examples/mistral/agents/async_multi_turn_conversation.py @@ -0,0 +1,69 @@ +import os +from mistralai import Mistral + +from mistralai.extra.run.context import RunContext +import logging +import time +import asyncio + + +MODEL = "mistral-medium-latest" + +USER_MESSAGE = """ +Please make the Secret Santa for me +To properly do it you need to: +- Get the friend you were assigned to (using the get_secret_santa_assignment function) +- Read into his gift wishlist what they would like to receive (using the get_gift_wishlist function) +- Buy the gift (using the buy_gift function) +- Find the best website to buy the gift using a web search +- Send it to them (using the send_gift function) +""" + + +async def main(): + api_key = os.environ["MISTRAL_API_KEY"] + mistral_agent_id = os.environ["MISTRAL_AGENT_ID"] + client = Mistral( + api_key=api_key, debug_logger=logging.getLogger("mistralai") + ) + + async with RunContext( + agent_id=mistral_agent_id + ) as run_context: + run_context.register_func(get_secret_santa_assignment) + run_context.register_func(get_gift_wishlist) + run_context.register_func(buy_gift) + run_context.register_func(send_gift) + + await client.beta.conversations.run_async( + run_ctx=run_context, + inputs=USER_MESSAGE, + ) + + +def get_secret_santa_assignment(): + """Get the friend you were assigned to""" + time.sleep(2) + return "John Doe" + + +def get_gift_wishlist(friend_name: str): + """Get the gift wishlist of the friend you were assigned to""" + time.sleep(1.5) + return ["Book", "Chocolate", "T-Shirt"] + + +def buy_gift(gift_name: str): + """Buy the gift you want to send to your friend""" + time.sleep(1.1) + return f"Bought {gift_name}" + + +def send_gift(friend_name: str, gift_name: str, website: str): + """Send the gift to your friend""" + time.sleep(2.2) + return f"Sent {gift_name} to {friend_name} bought on {website}" + + +if __name__ == "__main__": + asyncio.run(main()) \ No newline at end of file diff --git a/poetry.lock b/poetry.lock index e3a652fa..51676115 100644 --- a/poetry.lock +++ b/poetry.lock @@ -178,10 +178,9 @@ pycparser = "*" name = "charset-normalizer" version = "3.4.0" description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet." -optional = true +optional = false python-versions = ">=3.7.0" groups = ["main"] -markers = "extra == \"gcp\"" files = [ {file = "charset_normalizer-3.4.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:4f9fc98dad6c2eaa32fc3af1417d95b5e3d08aff968df0cd320066def971f9a6"}, {file = "charset_normalizer-3.4.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0de7b687289d3c1b3e8660d0741874abe7888100efe14bd0f9fd7141bcbda92b"}, @@ -502,6 +501,24 @@ pyopenssl = ["cryptography (>=38.0.3)", "pyopenssl (>=20.0.0)"] reauth = ["pyu2f (>=0.1.5)"] requests = ["requests (>=2.20.0,<3.0.0.dev0)"] +[[package]] +name = "googleapis-common-protos" +version = "1.70.0" +description = "Common protobufs used in Google APIs" +optional = false +python-versions = ">=3.7" +groups = ["main"] +files = [ + {file = "googleapis_common_protos-1.70.0-py3-none-any.whl", hash = "sha256:b8bfcca8c25a2bb253e0e0b0adaf8c00773e5e6af6fd92397576680b807e0fd8"}, + {file = "googleapis_common_protos-1.70.0.tar.gz", hash = "sha256:0e1b44e0ea153e6594f9f394fef15193a68aaaea2d843f83e2742717ca753257"}, +] + +[package.dependencies] +protobuf = ">=3.20.2,<4.21.1 || >4.21.1,<4.21.2 || >4.21.2,<4.21.3 || >4.21.3,<4.21.4 || >4.21.4,<4.21.5 || >4.21.5,<7.0.0" + +[package.extras] +grpc = ["grpcio (>=1.44.0,<2.0.0)"] + [[package]] name = "griffe" version = "1.7.3" @@ -608,6 +625,30 @@ markers = {dev = "python_version >= \"3.10\""} [package.extras] all = ["flake8 (>=7.1.1)", "mypy (>=1.11.2)", "pytest (>=8.3.2)", "ruff (>=0.6.2)"] +[[package]] +name = "importlib-metadata" +version = "8.7.0" +description = "Read metadata from Python packages" +optional = false +python-versions = ">=3.9" +groups = ["main"] +files = [ + {file = "importlib_metadata-8.7.0-py3-none-any.whl", hash = "sha256:e5dd1551894c77868a30651cef00984d50e1002d06942a7101d34870c5f02afd"}, + {file = "importlib_metadata-8.7.0.tar.gz", hash = "sha256:d13b81ad223b890aa16c5471f2ac3056cf76c5f10f82d6f9292f0b415f389000"}, +] + +[package.dependencies] +zipp = ">=3.20" + +[package.extras] +check = ["pytest-checkdocs (>=2.4)", "pytest-ruff (>=0.2.1) ; sys_platform != \"cygwin\""] +cover = ["pytest-cov"] +doc = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"] +enabler = ["pytest-enabler (>=2.2)"] +perf = ["ipython"] +test = ["flufl.flake8", "importlib_resources (>=1.3) ; python_version < \"3.9\"", "jaraco.test (>=5.4)", "packaging", "pyfakefs", "pytest (>=6,!=8.1.*)", "pytest-perf (>=0.9.2)"] +type = ["pytest-mypy"] + [[package]] name = "iniconfig" version = "2.0.0" @@ -766,6 +807,106 @@ files = [ {file = "nodeenv-1.9.1.tar.gz", hash = "sha256:6ec12890a2dab7946721edbfbcd91f3319c6ccc9aec47be7c7e6b7011ee6645f"}, ] +[[package]] +name = "opentelemetry-api" +version = "1.38.0" +description = "OpenTelemetry Python API" +optional = false +python-versions = ">=3.9" +groups = ["main"] +files = [ + {file = "opentelemetry_api-1.38.0-py3-none-any.whl", hash = "sha256:2891b0197f47124454ab9f0cf58f3be33faca394457ac3e09daba13ff50aa582"}, + {file = "opentelemetry_api-1.38.0.tar.gz", hash = "sha256:f4c193b5e8acb0912b06ac5b16321908dd0843d75049c091487322284a3eea12"}, +] + +[package.dependencies] +importlib-metadata = ">=6.0,<8.8.0" +typing-extensions = ">=4.5.0" + +[[package]] +name = "opentelemetry-exporter-otlp-proto-common" +version = "1.38.0" +description = "OpenTelemetry Protobuf encoding" +optional = false +python-versions = ">=3.9" +groups = ["main"] +files = [ + {file = "opentelemetry_exporter_otlp_proto_common-1.38.0-py3-none-any.whl", hash = "sha256:03cb76ab213300fe4f4c62b7d8f17d97fcfd21b89f0b5ce38ea156327ddda74a"}, + {file = "opentelemetry_exporter_otlp_proto_common-1.38.0.tar.gz", hash = "sha256:e333278afab4695aa8114eeb7bf4e44e65c6607d54968271a249c180b2cb605c"}, +] + +[package.dependencies] +opentelemetry-proto = "1.38.0" + +[[package]] +name = "opentelemetry-exporter-otlp-proto-http" +version = "1.38.0" +description = "OpenTelemetry Collector Protobuf over HTTP Exporter" +optional = false +python-versions = ">=3.9" +groups = ["main"] +files = [ + {file = "opentelemetry_exporter_otlp_proto_http-1.38.0-py3-none-any.whl", hash = "sha256:84b937305edfc563f08ec69b9cb2298be8188371217e867c1854d77198d0825b"}, + {file = "opentelemetry_exporter_otlp_proto_http-1.38.0.tar.gz", hash = "sha256:f16bd44baf15cbe07633c5112ffc68229d0edbeac7b37610be0b2def4e21e90b"}, +] + +[package.dependencies] +googleapis-common-protos = ">=1.52,<2.0" +opentelemetry-api = ">=1.15,<2.0" +opentelemetry-exporter-otlp-proto-common = "1.38.0" +opentelemetry-proto = "1.38.0" +opentelemetry-sdk = ">=1.38.0,<1.39.0" +requests = ">=2.7,<3.0" +typing-extensions = ">=4.5.0" + +[[package]] +name = "opentelemetry-proto" +version = "1.38.0" +description = "OpenTelemetry Python Proto" +optional = false +python-versions = ">=3.9" +groups = ["main"] +files = [ + {file = "opentelemetry_proto-1.38.0-py3-none-any.whl", hash = "sha256:b6ebe54d3217c42e45462e2a1ae28c3e2bf2ec5a5645236a490f55f45f1a0a18"}, + {file = "opentelemetry_proto-1.38.0.tar.gz", hash = "sha256:88b161e89d9d372ce723da289b7da74c3a8354a8e5359992be813942969ed468"}, +] + +[package.dependencies] +protobuf = ">=5.0,<7.0" + +[[package]] +name = "opentelemetry-sdk" +version = "1.38.0" +description = "OpenTelemetry Python SDK" +optional = false +python-versions = ">=3.9" +groups = ["main"] +files = [ + {file = "opentelemetry_sdk-1.38.0-py3-none-any.whl", hash = "sha256:1c66af6564ecc1553d72d811a01df063ff097cdc82ce188da9951f93b8d10f6b"}, + {file = "opentelemetry_sdk-1.38.0.tar.gz", hash = "sha256:93df5d4d871ed09cb4272305be4d996236eedb232253e3ab864c8620f051cebe"}, +] + +[package.dependencies] +opentelemetry-api = "1.38.0" +opentelemetry-semantic-conventions = "0.59b0" +typing-extensions = ">=4.5.0" + +[[package]] +name = "opentelemetry-semantic-conventions" +version = "0.59b0" +description = "OpenTelemetry Semantic Conventions" +optional = false +python-versions = ">=3.9" +groups = ["main"] +files = [ + {file = "opentelemetry_semantic_conventions-0.59b0-py3-none-any.whl", hash = "sha256:35d3b8833ef97d614136e253c1da9342b4c3c083bbaf29ce31d572a1c3825eed"}, + {file = "opentelemetry_semantic_conventions-0.59b0.tar.gz", hash = "sha256:7a6db3f30d70202d5bf9fa4b69bc866ca6a30437287de6c510fb594878aed6b0"}, +] + +[package.dependencies] +opentelemetry-api = "1.38.0" +typing-extensions = ">=4.5.0" + [[package]] name = "packaging" version = "24.2" @@ -811,6 +952,26 @@ files = [ dev = ["pre-commit", "tox"] testing = ["pytest", "pytest-benchmark"] +[[package]] +name = "protobuf" +version = "6.33.0" +description = "" +optional = false +python-versions = ">=3.9" +groups = ["main"] +files = [ + {file = "protobuf-6.33.0-cp310-abi3-win32.whl", hash = "sha256:d6101ded078042a8f17959eccd9236fb7a9ca20d3b0098bbcb91533a5680d035"}, + {file = "protobuf-6.33.0-cp310-abi3-win_amd64.whl", hash = "sha256:9a031d10f703f03768f2743a1c403af050b6ae1f3480e9c140f39c45f81b13ee"}, + {file = "protobuf-6.33.0-cp39-abi3-macosx_10_9_universal2.whl", hash = "sha256:905b07a65f1a4b72412314082c7dbfae91a9e8b68a0cc1577515f8df58ecf455"}, + {file = "protobuf-6.33.0-cp39-abi3-manylinux2014_aarch64.whl", hash = "sha256:e0697ece353e6239b90ee43a9231318302ad8353c70e6e45499fa52396debf90"}, + {file = "protobuf-6.33.0-cp39-abi3-manylinux2014_s390x.whl", hash = "sha256:e0a1715e4f27355afd9570f3ea369735afc853a6c3951a6afe1f80d8569ad298"}, + {file = "protobuf-6.33.0-cp39-abi3-manylinux2014_x86_64.whl", hash = "sha256:35be49fd3f4fefa4e6e2aacc35e8b837d6703c37a2168a55ac21e9b1bc7559ef"}, + {file = "protobuf-6.33.0-cp39-cp39-win32.whl", hash = "sha256:cd33a8e38ea3e39df66e1bbc462b076d6e5ba3a4ebbde58219d777223a7873d3"}, + {file = "protobuf-6.33.0-cp39-cp39-win_amd64.whl", hash = "sha256:c963e86c3655af3a917962c9619e1a6b9670540351d7af9439d06064e3317cc9"}, + {file = "protobuf-6.33.0-py3-none-any.whl", hash = "sha256:25c9e1963c6734448ea2d308cfa610e692b801304ba0908d7bfa564ac5132995"}, + {file = "protobuf-6.33.0.tar.gz", hash = "sha256:140303d5c8d2037730c548f8c7b93b20bb1dc301be280c378b82b8894589c954"}, +] + [[package]] name = "pyasn1" version = "0.6.1" @@ -1219,10 +1380,9 @@ files = [ name = "requests" version = "2.32.3" description = "Python HTTP for Humans." -optional = true +optional = false python-versions = ">=3.8" groups = ["main"] -markers = "extra == \"gcp\"" files = [ {file = "requests-2.32.3-py3-none-any.whl", hash = "sha256:70761cfe03c773ceb22aa2f671b4757976145175cdfca038c02654d061d6dcc6"}, {file = "requests-2.32.3.tar.gz", hash = "sha256:55365417734eb18255590a9ff9eb97e9e1da868d4ccd6402399eaf68af20a760"}, @@ -1472,10 +1632,9 @@ typing-extensions = ">=4.12.0" name = "urllib3" version = "2.2.3" description = "HTTP library with thread-safe connection pooling, file post, and more." -optional = true +optional = false python-versions = ">=3.8" groups = ["main"] -markers = "extra == \"gcp\"" files = [ {file = "urllib3-2.2.3-py3-none-any.whl", hash = "sha256:ca899ca043dcb1bafa3e262d73aa25c465bfb49e0bd9dd5d59f1d0acba2f8fac"}, {file = "urllib3-2.2.3.tar.gz", hash = "sha256:e7d814a81dad81e6caf2ec9fdedb284ecc9c73076b62654547cc64ccdcae26e9"}, @@ -1508,6 +1667,26 @@ typing-extensions = {version = ">=4.0", markers = "python_version < \"3.11\""} [package.extras] standard = ["colorama (>=0.4) ; sys_platform == \"win32\"", "httptools (>=0.6.3)", "python-dotenv (>=0.13)", "pyyaml (>=5.1)", "uvloop (>=0.14.0,!=0.15.0,!=0.15.1) ; sys_platform != \"win32\" and sys_platform != \"cygwin\" and platform_python_implementation != \"PyPy\"", "watchfiles (>=0.13)", "websockets (>=10.4)"] +[[package]] +name = "zipp" +version = "3.23.0" +description = "Backport of pathlib-compatible object wrapper for zip files" +optional = false +python-versions = ">=3.9" +groups = ["main"] +files = [ + {file = "zipp-3.23.0-py3-none-any.whl", hash = "sha256:071652d6115ed432f5ce1d34c336c0adfd6a884660d1e9712a256d3d3bd4b14e"}, + {file = "zipp-3.23.0.tar.gz", hash = "sha256:a07157588a12518c9d4034df3fbbee09c814741a33ff63c05fa29d26a2404166"}, +] + +[package.extras] +check = ["pytest-checkdocs (>=2.4)", "pytest-ruff (>=0.2.1) ; sys_platform != \"cygwin\""] +cover = ["pytest-cov"] +doc = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"] +enabler = ["pytest-enabler (>=2.2)"] +test = ["big-O", "jaraco.functools", "jaraco.itertools", "jaraco.test", "more_itertools", "pytest (>=6,!=8.1.*)", "pytest-ignore-flaky"] +type = ["pytest-mypy"] + [extras] agents = ["authlib", "griffe", "mcp"] gcp = ["google-auth", "requests"] @@ -1515,4 +1694,4 @@ gcp = ["google-auth", "requests"] [metadata] lock-version = "2.1" python-versions = ">=3.9" -content-hash = "84dda1a6ae0a8491ec9f64e6500480e7ef2e177812a624e388127f354c8e844c" +content-hash = "9d707321f2730f9d1e581d43778dd605a83fdc3d3c375f597b1a2dabb2584ba0" diff --git a/pyproject.toml b/pyproject.toml index 087b5703..4bea6627 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "mistralai" -version = "1.9.11" +version = "1.9.12" description = "Python Client SDK for the Mistral AI API." authors = [{ name = "Mistral" },] readme = "README-PYPI.md" @@ -13,6 +13,10 @@ dependencies = [ "typing-inspection >=0.4.0", "pyyaml (>=6.0.2,<7.0.0)", "invoke (>=2.2.0,<3.0.0)", + "opentelemetry-sdk (>=1.33.1,<2.0.0)", + "opentelemetry-api (>=1.33.1,<2.0.0)", + "opentelemetry-exporter-otlp-proto-http (>=1.37.0,<2.0.0)", + "opentelemetry-semantic-conventions (>=0.59b0,<0.60)", ] [tool.poetry] diff --git a/src/mistralai/_hooks/registration.py b/src/mistralai/_hooks/registration.py index fc3ae79b..58bebab0 100644 --- a/src/mistralai/_hooks/registration.py +++ b/src/mistralai/_hooks/registration.py @@ -1,5 +1,6 @@ from .custom_user_agent import CustomUserAgentHook from .deprecation_warning import DeprecationWarningHook +from .tracing import TracingHook from .types import Hooks # This file is only ever generated once on the first generation and then is free to be modified. @@ -13,5 +14,9 @@ def init_hooks(hooks: Hooks): with an instance of a hook that implements that specific Hook interface Hooks are registered per SDK instance, and are valid for the lifetime of the SDK instance """ + tracing_hook = TracingHook() hooks.register_before_request_hook(CustomUserAgentHook()) hooks.register_after_success_hook(DeprecationWarningHook()) + hooks.register_after_success_hook(tracing_hook) + hooks.register_before_request_hook(tracing_hook) + hooks.register_after_error_hook(tracing_hook) diff --git a/src/mistralai/_hooks/tracing.py b/src/mistralai/_hooks/tracing.py new file mode 100644 index 00000000..f2ac9c86 --- /dev/null +++ b/src/mistralai/_hooks/tracing.py @@ -0,0 +1,50 @@ +import logging +from typing import Optional, Tuple, Union + +import httpx +from opentelemetry.trace import Span + +from ..extra.observability.otel import ( + get_or_create_otel_tracer, + get_response_and_error, + get_traced_request_and_span, + get_traced_response, +) +from .types import ( + AfterErrorContext, + AfterErrorHook, + AfterSuccessContext, + AfterSuccessHook, + BeforeRequestContext, + BeforeRequestHook, +) + +logger = logging.getLogger(__name__) + + +class TracingHook(BeforeRequestHook, AfterSuccessHook, AfterErrorHook): + def __init__(self) -> None: + self.tracing_enabled, self.tracer = get_or_create_otel_tracer() + self.request_span: Optional[Span] = None + + def before_request( + self, hook_ctx: BeforeRequestContext, request: httpx.Request + ) -> Union[httpx.Request, Exception]: + request, self.request_span = get_traced_request_and_span(tracing_enabled=self.tracing_enabled, tracer=self.tracer, span=self.request_span, operation_id=hook_ctx.operation_id, request=request) + return request + + def after_success( + self, hook_ctx: AfterSuccessContext, response: httpx.Response + ) -> Union[httpx.Response, Exception]: + response = get_traced_response(tracing_enabled=self.tracing_enabled, tracer=self.tracer, span=self.request_span, operation_id=hook_ctx.operation_id, response=response) + return response + + def after_error( + self, + hook_ctx: AfterErrorContext, + response: Optional[httpx.Response], + error: Optional[Exception], + ) -> Union[Tuple[Optional[httpx.Response], Optional[Exception]], Exception]: + if response: + response, error = get_response_and_error(tracing_enabled=self.tracing_enabled, tracer=self.tracer, span=self.request_span, operation_id=hook_ctx.operation_id, response=response, error=error) + return response, error diff --git a/src/mistralai/conversations.py b/src/mistralai/conversations.py index 27eddedf..64551a96 100644 --- a/src/mistralai/conversations.py +++ b/src/mistralai/conversations.py @@ -26,8 +26,10 @@ reconstitue_entries, ) from mistralai.extra.run.utils import run_requirements +from mistralai.extra.observability.otel import GenAISpanEnum, get_or_create_otel_tracer logger = logging.getLogger(__name__) +tracing_enabled, tracer = get_or_create_otel_tracer() if typing.TYPE_CHECKING: from mistralai.extra.run.context import RunContext @@ -67,50 +69,52 @@ async def run_async( from mistralai.extra.run.context import _validate_run from mistralai.extra.run.tools import get_function_calls - req, run_result, input_entries = await _validate_run( - beta_client=Beta(self.sdk_configuration), - run_ctx=run_ctx, - inputs=inputs, - instructions=instructions, - tools=tools, - completion_args=completion_args, - ) + with tracer.start_as_current_span(GenAISpanEnum.VALIDATE_RUN.value): + req, run_result, input_entries = await _validate_run( + beta_client=Beta(self.sdk_configuration), + run_ctx=run_ctx, + inputs=inputs, + instructions=instructions, + tools=tools, + completion_args=completion_args, + ) - while True: - if run_ctx.conversation_id is None: - res = await self.start_async( - inputs=input_entries, - http_headers=http_headers, - name=name, - description=description, - retries=retries, - server_url=server_url, - timeout_ms=timeout_ms, - **req, # type: ignore - ) - run_result.conversation_id = res.conversation_id - run_ctx.conversation_id = res.conversation_id - logger.info( - f"Started Run with conversation with id {res.conversation_id}" - ) - else: - res = await self.append_async( - conversation_id=run_ctx.conversation_id, - inputs=input_entries, - retries=retries, - server_url=server_url, - timeout_ms=timeout_ms, - ) - run_ctx.request_count += 1 - run_result.output_entries.extend(res.outputs) - fcalls = get_function_calls(res.outputs) - if not fcalls: - logger.debug("No more function calls to execute") - break - else: - fresults = await run_ctx.execute_function_calls(fcalls) - run_result.output_entries.extend(fresults) - input_entries = typing.cast(list[InputEntries], fresults) + with tracer.start_as_current_span(GenAISpanEnum.CONVERSATION.value): + while True: + if run_ctx.conversation_id is None: + res = await self.start_async( + inputs=input_entries, + http_headers=http_headers, + name=name, + description=description, + retries=retries, + server_url=server_url, + timeout_ms=timeout_ms, + **req, # type: ignore + ) + run_result.conversation_id = res.conversation_id + run_ctx.conversation_id = res.conversation_id + logger.info( + f"Started Run with conversation with id {res.conversation_id}" + ) + else: + res = await self.append_async( + conversation_id=run_ctx.conversation_id, + inputs=input_entries, + retries=retries, + server_url=server_url, + timeout_ms=timeout_ms, + ) + run_ctx.request_count += 1 + run_result.output_entries.extend(res.outputs) + fcalls = get_function_calls(res.outputs) + if not fcalls: + logger.debug("No more function calls to execute") + break + else: + fresults = await run_ctx.execute_function_calls(fcalls) + run_result.output_entries.extend(fresults) + input_entries = typing.cast(list[InputEntries], fresults) return run_result @run_requirements diff --git a/src/mistralai/extra/observability/__init__.py b/src/mistralai/extra/observability/__init__.py new file mode 100644 index 00000000..4ff5873c --- /dev/null +++ b/src/mistralai/extra/observability/__init__.py @@ -0,0 +1,15 @@ +from contextlib import contextmanager + +from opentelemetry import trace as otel_trace + +from .otel import MISTRAL_SDK_OTEL_TRACER_NAME + + +@contextmanager +def trace(name: str, **kwargs): + tracer = otel_trace.get_tracer(MISTRAL_SDK_OTEL_TRACER_NAME) + with tracer.start_as_current_span(name, **kwargs) as span: + yield span + + +__all__ = ["trace"] diff --git a/src/mistralai/extra/observability/otel.py b/src/mistralai/extra/observability/otel.py new file mode 100644 index 00000000..46c667d0 --- /dev/null +++ b/src/mistralai/extra/observability/otel.py @@ -0,0 +1,393 @@ +import copy +import json +import logging +import os +import traceback +from datetime import datetime, timezone +from enum import Enum +from typing import Optional, Tuple + +import httpx +import opentelemetry.semconv._incubating.attributes.gen_ai_attributes as gen_ai_attributes +import opentelemetry.semconv._incubating.attributes.http_attributes as http_attributes +import opentelemetry.semconv.attributes.server_attributes as server_attributes +from opentelemetry import propagate, trace +from opentelemetry.exporter.otlp.proto.http.trace_exporter import OTLPSpanExporter +from opentelemetry.sdk.resources import SERVICE_NAME, Resource +from opentelemetry.sdk.trace import SpanProcessor, TracerProvider +from opentelemetry.sdk.trace.export import BatchSpanProcessor, SpanExportResult +from opentelemetry.trace import Span, Status, StatusCode, Tracer, set_span_in_context + +logger = logging.getLogger(__name__) + + +OTEL_SERVICE_NAME: str = "mistralai_sdk" +OTEL_EXPORTER_OTLP_ENDPOINT: str = os.getenv("OTEL_EXPORTER_OTLP_ENDPOINT", "") +OTEL_EXPORTER_OTLP_TIMEOUT: int = int(os.getenv("OTEL_EXPORTER_OTLP_TIMEOUT", "2")) +OTEL_EXPORTER_OTLP_MAX_EXPORT_BATCH_SIZE: int = int(os.getenv("OTEL_EXPORTER_OTLP_MAX_EXPORT_BATCH_SIZE", "512")) +OTEL_EXPORTER_OTLP_SCHEDULE_DELAY_MILLIS: int = int(os.getenv("OTEL_EXPORTER_OTLP_SCHEDULE_DELAY_MILLIS", "1000")) +OTEL_EXPORTER_OTLP_MAX_QUEUE_SIZE: int = int(os.getenv("OTEL_EXPORTER_OTLP_MAX_QUEUE_SIZE", "2048")) +OTEL_EXPORTER_OTLP_EXPORT_TIMEOUT_MILLIS: int = int(os.getenv("OTEL_EXPORTER_OTLP_EXPORT_TIMEOUT_MILLIS", "5000")) + +MISTRAL_SDK_OTEL_TRACER_NAME: str = OTEL_SERVICE_NAME + "_tracer" + +MISTRAL_SDK_DEBUG_TRACING: bool = os.getenv("MISTRAL_SDK_DEBUG_TRACING", "false").lower() == "true" +DEBUG_HINT: str = "To see detailed exporter logs, set MISTRAL_SDK_DEBUG_TRACING=true." + + +class MistralAIAttributes: + MISTRAL_AI_TOTAL_TOKENS = "mistral_ai.request.total_tokens" + MISTRAL_AI_TOOL_CALL_ARGUMENTS = "mistral_ai.tool.call.arguments" + MISTRAL_AI_MESSAGE_ID = "mistral_ai.message.id" + MISTRAL_AI_OPERATION_NAME= "mistral_ai.operation.name" + MISTRAL_AI_OCR_USAGE_PAGES_PROCESSED = "mistral_ai.ocr.usage.pages_processed" + MISTRAL_AI_OCR_USAGE_DOC_SIZE_BYTES = "mistral_ai.ocr.usage.doc_size_bytes" + MISTRAL_AI_OPERATION_ID = "mistral_ai.operation.id" + MISTRAL_AI_ERROR_TYPE = "mistral_ai.error.type" + MISTRAL_AI_ERROR_MESSAGE = "mistral_ai.error.message" + MISTRAL_AI_ERROR_CODE = "mistral_ai.error.code" + MISTRAL_AI_FUNCTION_CALL_ARGUMENTS = "mistral_ai.function.call.arguments" + +class MistralAINameValues(Enum): + OCR = "ocr" + +class TracingErrors(Exception, Enum): + FAILED_TO_EXPORT_OTEL_SPANS = "Failed to export OpenTelemetry (OTEL) spans." + FAILED_TO_INITIALIZE_OPENTELEMETRY_TRACING = "Failed to initialize OpenTelemetry tracing." + FAILED_TO_CREATE_SPAN_FOR_REQUEST = "Failed to create span for request." + FAILED_TO_ENRICH_SPAN_WITH_RESPONSE = "Failed to enrich span with response." + FAILED_TO_HANDLE_ERROR_IN_SPAN = "Failed to handle error in span." + FAILED_TO_END_SPAN = "Failed to end span." + + def __str__(self): + return str(self.value) + +class GenAISpanEnum(str, Enum): + CONVERSATION = "conversation" + CONV_REQUEST = "POST /v1/conversations" + EXECUTE_TOOL = "execute_tool" + VALIDATE_RUN = "validate_run" + + @staticmethod + def function_call(func_name: str): + return f"function_call[{func_name}]" + + +def parse_time_to_nanos(ts: str) -> int: + dt = datetime.fromisoformat(ts.replace("Z", "+00:00")).astimezone(timezone.utc) + return int(dt.timestamp() * 1e9) + +def set_available_attributes(span: Span, attributes: dict) -> None: + for attribute, value in attributes.items(): + if value: + span.set_attribute(attribute, value) + + +def enrich_span_from_request(span: Span, request: httpx.Request) -> Span: + if not request.url.port: + # From httpx doc: + # Note that the URL class performs port normalization as per the WHATWG spec. + # Default ports for "http", "https", "ws", "wss", and "ftp" schemes are always treated as None. + # Handling default ports since most of the time we are using https + if request.url.scheme == "https": + port = 443 + elif request.url.scheme == "http": + port = 80 + else: + port = -1 + else: + port = request.url.port + + span.set_attributes({ + http_attributes.HTTP_REQUEST_METHOD: request.method, + http_attributes.HTTP_URL: str(request.url), + server_attributes.SERVER_ADDRESS: request.headers.get("host", ""), + server_attributes.SERVER_PORT: port + }) + if request._content: + request_body = json.loads(request._content) + + attributes = { + gen_ai_attributes.GEN_AI_REQUEST_CHOICE_COUNT: request_body.get("n", None), + gen_ai_attributes.GEN_AI_REQUEST_ENCODING_FORMATS: request_body.get("encoding_formats", None), + gen_ai_attributes.GEN_AI_REQUEST_FREQUENCY_PENALTY: request_body.get("frequency_penalty", None), + gen_ai_attributes.GEN_AI_REQUEST_MAX_TOKENS: request_body.get("max_tokens", None), + gen_ai_attributes.GEN_AI_REQUEST_MODEL: request_body.get("model", None), + gen_ai_attributes.GEN_AI_REQUEST_PRESENCE_PENALTY: request_body.get("presence_penalty", None), + gen_ai_attributes.GEN_AI_REQUEST_SEED: request_body.get("random_seed", None), + gen_ai_attributes.GEN_AI_REQUEST_STOP_SEQUENCES: request_body.get("stop", None), + gen_ai_attributes.GEN_AI_REQUEST_TEMPERATURE: request_body.get("temperature", None), + gen_ai_attributes.GEN_AI_REQUEST_TOP_P: request_body.get("top_p", None), + gen_ai_attributes.GEN_AI_REQUEST_TOP_K: request_body.get("top_k", None), + # Input messages are likely to be large, containing user/PII data and other sensitive information. + # Also structured attributes are not yet supported on spans in Python. + # For those reasons, we will not record the input messages for now. + gen_ai_attributes.GEN_AI_INPUT_MESSAGES: None, + } + # Set attributes only if they are not None. + # From OpenTelemetry documentation: None is not a valid attribute value per spec / is not a permitted value type for an attribute. + set_available_attributes(span, attributes) + return span + + +def enrich_span_from_response(tracer: trace.Tracer, span: Span, operation_id: str, response: httpx.Response) -> None: + span.set_status(Status(StatusCode.OK)) + response_data = json.loads(response.content) + + # Base attributes + attributes: dict[str, str | int] = { + http_attributes.HTTP_RESPONSE_STATUS_CODE: response.status_code, + MistralAIAttributes.MISTRAL_AI_OPERATION_ID: operation_id, + gen_ai_attributes.GEN_AI_PROVIDER_NAME: gen_ai_attributes.GenAiProviderNameValues.MISTRAL_AI.value + } + + # Add usage attributes if available + usage = response_data.get("usage", {}) + if usage: + attributes.update({ + gen_ai_attributes.GEN_AI_USAGE_PROMPT_TOKENS: usage.get("prompt_tokens", 0), + gen_ai_attributes.GEN_AI_USAGE_OUTPUT_TOKENS: usage.get("completion_tokens", 0), + MistralAIAttributes.MISTRAL_AI_TOTAL_TOKENS: usage.get("total_tokens", 0) + }) + + span.set_attributes(attributes) + if operation_id == "agents_api_v1_agents_create": + # Semantics from https://opentelemetry.io/docs/specs/semconv/gen-ai/gen-ai-agent-spans/#create-agent-span + agent_attributes = { + gen_ai_attributes.GEN_AI_OPERATION_NAME: gen_ai_attributes.GenAiOperationNameValues.CREATE_AGENT.value, + gen_ai_attributes.GEN_AI_AGENT_DESCRIPTION: response_data.get("description", ""), + gen_ai_attributes.GEN_AI_AGENT_ID: response_data.get("id", ""), + gen_ai_attributes.GEN_AI_AGENT_NAME: response_data.get("name", ""), + gen_ai_attributes.GEN_AI_REQUEST_MODEL: response_data.get("model", ""), + gen_ai_attributes.GEN_AI_SYSTEM_INSTRUCTIONS: response_data.get("instructions", "") + } + span.set_attributes(agent_attributes) + if operation_id in ["agents_api_v1_conversations_start", "agents_api_v1_conversations_append"]: + outputs = response_data.get("outputs", []) + conversation_attributes = { + gen_ai_attributes.GEN_AI_OPERATION_NAME: gen_ai_attributes.GenAiOperationNameValues.INVOKE_AGENT.value, + gen_ai_attributes.GEN_AI_CONVERSATION_ID: response_data.get("conversation_id", "") + } + span.set_attributes(conversation_attributes) + parent_context = set_span_in_context(span) + + for output in outputs: + # TODO: Only enrich the spans if it's a single turn conversation. + # Multi turn conversations are handled in the extra.run.tools.create_function_result function + if output["type"] == "function.call": + pass + if output["type"] == "tool.execution": + start_ns = parse_time_to_nanos(output["created_at"]) + end_ns = parse_time_to_nanos(output["completed_at"]) + child_span = tracer.start_span("Tool Execution", start_time=start_ns, context=parent_context) + tool_attributes = { + gen_ai_attributes.GEN_AI_OPERATION_NAME: gen_ai_attributes.GenAiOperationNameValues.EXECUTE_TOOL.value, + gen_ai_attributes.GEN_AI_TOOL_CALL_ID: output.get("id", ""), + MistralAIAttributes.MISTRAL_AI_TOOL_CALL_ARGUMENTS: output.get("arguments", ""), + gen_ai_attributes.GEN_AI_TOOL_NAME: output.get("name", "") + } + child_span.set_attributes(tool_attributes) + child_span.end(end_time=end_ns) + if output["type"] == "message.output": + start_ns = parse_time_to_nanos(output["created_at"]) + end_ns = parse_time_to_nanos(output["completed_at"]) + child_span = tracer.start_span("Message Output", start_time=start_ns, context=parent_context) + message_attributes = { + gen_ai_attributes.GEN_AI_OPERATION_NAME: gen_ai_attributes.GenAiOperationNameValues.CHAT.value, + gen_ai_attributes.GEN_AI_PROVIDER_NAME: gen_ai_attributes.GenAiProviderNameValues.MISTRAL_AI.value, + MistralAIAttributes.MISTRAL_AI_MESSAGE_ID: output.get("id", ""), + gen_ai_attributes.GEN_AI_AGENT_ID: output.get("agent_id", ""), + gen_ai_attributes.GEN_AI_REQUEST_MODEL: output.get("model", "") + } + child_span.set_attributes(message_attributes) + child_span.end(end_time=end_ns) + if operation_id == "ocr_v1_ocr_post": + usage_info = response_data.get("usage_info", "") + ocr_attributes = { + MistralAIAttributes.MISTRAL_AI_OPERATION_NAME: MistralAINameValues.OCR.value, + MistralAIAttributes.MISTRAL_AI_OCR_USAGE_PAGES_PROCESSED: usage_info.get("pages_processed", "") if usage_info else "", + MistralAIAttributes.MISTRAL_AI_OCR_USAGE_DOC_SIZE_BYTES: usage_info.get("doc_size_bytes", "") if usage_info else "", + gen_ai_attributes.GEN_AI_REQUEST_MODEL: response_data.get("model", "") + } + span.set_attributes(ocr_attributes) + + +class GenAISpanProcessor(SpanProcessor): + def on_start(self, span, parent_context = None): + span.set_attributes({"agent.trace.public": ""}) + + +class QuietOTLPSpanExporter(OTLPSpanExporter): + def export(self, spans): + try: + return super().export(spans) + except Exception: + logger.warning(f"{TracingErrors.FAILED_TO_EXPORT_OTEL_SPANS} {(traceback.format_exc() if MISTRAL_SDK_DEBUG_TRACING else DEBUG_HINT)}") + return SpanExportResult.FAILURE + + +def get_or_create_otel_tracer() -> Tuple[bool, Tracer]: + """ + 3 possible cases: + + -> [SDK in a Workflow / App] If there is already a tracer provider set -> use that one + + -> [SDK standalone] If no tracer provider is set but the OTEL_EXPORTER_OTLP_ENDPOINT is set -> create a new tracer provider that exports to the OTEL_EXPORTER_OTLP_ENDPOINT + + -> Else tracing is disabled + """ + tracing_enabled = True + tracer_provider = trace.get_tracer_provider() + + if isinstance(tracer_provider, trace.ProxyTracerProvider): + if OTEL_EXPORTER_OTLP_ENDPOINT: + # SDK standalone: No tracer provider but OTEL_EXPORTER_OTLP_ENDPOINT is set -> create a new tracer provider that exports to the OTEL_EXPORTER_OTLP_ENDPOINT + try: + exporter = QuietOTLPSpanExporter( + endpoint=OTEL_EXPORTER_OTLP_ENDPOINT, + timeout=OTEL_EXPORTER_OTLP_TIMEOUT + ) + resource = Resource.create(attributes={SERVICE_NAME: OTEL_SERVICE_NAME}) + tracer_provider = TracerProvider(resource=resource) + + span_processor = BatchSpanProcessor( + exporter, + export_timeout_millis=OTEL_EXPORTER_OTLP_EXPORT_TIMEOUT_MILLIS, + max_export_batch_size=OTEL_EXPORTER_OTLP_MAX_EXPORT_BATCH_SIZE, + schedule_delay_millis=OTEL_EXPORTER_OTLP_SCHEDULE_DELAY_MILLIS, + max_queue_size=OTEL_EXPORTER_OTLP_MAX_QUEUE_SIZE + ) + + tracer_provider.add_span_processor(span_processor) + tracer_provider.add_span_processor(GenAISpanProcessor()) + trace.set_tracer_provider(tracer_provider) + + except Exception: + logger.warning(f"{TracingErrors.FAILED_TO_INITIALIZE_OPENTELEMETRY_TRACING} {(traceback.format_exc() if MISTRAL_SDK_DEBUG_TRACING else DEBUG_HINT)}") + tracing_enabled = False + else: + # No tracer provider nor OTEL_EXPORTER_OTLP_ENDPOINT set -> tracing is disabled + tracing_enabled = False + + tracer = tracer_provider.get_tracer(MISTRAL_SDK_OTEL_TRACER_NAME) + + return tracing_enabled, tracer + +def get_traced_request_and_span(tracing_enabled: bool, tracer: Tracer, span: Optional[Span], operation_id: str, request: httpx.Request) -> Tuple[httpx.Request, Optional[Span]]: + if not tracing_enabled: + return request, span + + try: + span = tracer.start_span(name=operation_id) + # Inject the span context into the request headers to be used by the backend service to continue the trace + propagate.inject(request.headers) + span = enrich_span_from_request(span, request) + except Exception: + logger.warning( + "%s %s", + TracingErrors.FAILED_TO_CREATE_SPAN_FOR_REQUEST, + traceback.format_exc() if MISTRAL_SDK_DEBUG_TRACING else DEBUG_HINT, + ) + if span: + end_span(span=span) + span = None + + return request, span + + +def get_traced_response(tracing_enabled: bool, tracer: Tracer, span: Optional[Span], operation_id: str, response: httpx.Response) -> httpx.Response: + if not tracing_enabled or not span: + return response + try: + is_stream_response = not response.is_closed and not response.is_stream_consumed + if is_stream_response: + return TracedResponse.from_response(resp=response, span=span) + enrich_span_from_response( + tracer, span, operation_id, response + ) + except Exception: + logger.warning( + "%s %s", + TracingErrors.FAILED_TO_ENRICH_SPAN_WITH_RESPONSE, + traceback.format_exc() if MISTRAL_SDK_DEBUG_TRACING else DEBUG_HINT, + ) + if span: + end_span(span=span) + return response + +def get_response_and_error(tracing_enabled: bool, tracer: Tracer, span: Optional[Span], operation_id: str, response: httpx.Response, error: Optional[Exception]) -> Tuple[httpx.Response, Optional[Exception]]: + if not tracing_enabled or not span: + return response, error + try: + if error: + span.record_exception(error) + span.set_status(Status(StatusCode.ERROR, str(error))) + if hasattr(response, "_content") and response._content: + response_body = json.loads(response._content) + if response_body.get("object", "") == "error": + if error_msg := response_body.get("message", ""): + attributes = { + http_attributes.HTTP_RESPONSE_STATUS_CODE: response.status_code, + MistralAIAttributes.MISTRAL_AI_ERROR_TYPE: response_body.get("type", ""), + MistralAIAttributes.MISTRAL_AI_ERROR_MESSAGE: error_msg, + MistralAIAttributes.MISTRAL_AI_ERROR_CODE: response_body.get("code", ""), + } + for attribute, value in attributes.items(): + if value: + span.set_attribute(attribute, value) + span.end() + span = None + except Exception: + logger.warning( + "%s %s", + TracingErrors.FAILED_TO_HANDLE_ERROR_IN_SPAN, + traceback.format_exc() if MISTRAL_SDK_DEBUG_TRACING else DEBUG_HINT, + ) + + if span: + span.end() + span = None + return response, error + + +def end_span(span: Span) -> None: + try: + span.end() + except Exception: + logger.warning( + "%s %s", + TracingErrors.FAILED_TO_END_SPAN, + traceback.format_exc() if MISTRAL_SDK_DEBUG_TRACING else DEBUG_HINT, + ) + +class TracedResponse(httpx.Response): + """ + TracedResponse is a subclass of httpx.Response that ends the span when the response is closed. + + This hack allows ending the span only once the stream is fully consumed. + """ + def __init__(self, *args, span: Optional[Span], **kwargs) -> None: + super().__init__(*args, **kwargs) + self.span = span + + def close(self) -> None: + if self.span: + end_span(span=self.span) + super().close() + + async def aclose(self) -> None: + if self.span: + end_span(span=self.span) + await super().aclose() + + @classmethod + def from_response(cls, resp: httpx.Response, span: Optional[Span]) -> "TracedResponse": + traced_resp = cls.__new__(cls) + traced_resp.__dict__ = copy.copy(resp.__dict__) + traced_resp.span = span + + # Warning: this syntax bypasses the __init__ method. + # If you add init logic in the TracedResponse.__init__ method, you will need to add the following line for it to execute: + # traced_resp.__init__(your_arguments) + + return traced_resp diff --git a/src/mistralai/extra/run/tools.py b/src/mistralai/extra/run/tools.py index 81fec665..e3f80935 100644 --- a/src/mistralai/extra/run/tools.py +++ b/src/mistralai/extra/run/tools.py @@ -8,6 +8,7 @@ import json from typing import cast, Callable, Sequence, Any, ForwardRef, get_type_hints, Union +from opentelemetry import trace from griffe import ( Docstring, DocstringSectionKind, @@ -15,9 +16,11 @@ DocstringParameter, DocstringSection, ) +import opentelemetry.semconv._incubating.attributes.gen_ai_attributes as gen_ai_attributes from mistralai.extra.exceptions import RunException from mistralai.extra.mcp.base import MCPClientProtocol +from mistralai.extra.observability.otel import GenAISpanEnum, MistralAIAttributes, set_available_attributes from mistralai.extra.run.result import RunOutputEntries from mistralai.models import ( FunctionResultEntry, @@ -191,22 +194,31 @@ async def create_function_result( if isinstance(function_call.arguments, str) else function_call.arguments ) - try: - if isinstance(run_tool, RunFunction): - res = run_tool.callable(**arguments) - elif isinstance(run_tool, RunCoroutine): - res = await run_tool.awaitable(**arguments) - elif isinstance(run_tool, RunMCPTool): - res = await run_tool.mcp_client.execute_tool(function_call.name, arguments) - except Exception as e: - if continue_on_fn_error is True: - return FunctionResultEntry( - tool_call_id=function_call.tool_call_id, - result=f"Error while executing {function_call.name}: {str(e)}", - ) - raise RunException( - f"Failed to execute tool {function_call.name} with arguments '{function_call.arguments}'" - ) from e + tracer = trace.get_tracer(__name__) + with tracer.start_as_current_span(GenAISpanEnum.function_call(function_call.name)) as span: + try: + if isinstance(run_tool, RunFunction): + res = run_tool.callable(**arguments) + elif isinstance(run_tool, RunCoroutine): + res = await run_tool.awaitable(**arguments) + elif isinstance(run_tool, RunMCPTool): + res = await run_tool.mcp_client.execute_tool(function_call.name, arguments) + function_call_attributes = { + gen_ai_attributes.GEN_AI_OPERATION_NAME: gen_ai_attributes.GenAiOperationNameValues.EXECUTE_TOOL.value, + gen_ai_attributes.GEN_AI_TOOL_CALL_ID: function_call.id, + MistralAIAttributes.MISTRAL_AI_TOOL_CALL_ARGUMENTS: str(function_call.arguments), + gen_ai_attributes.GEN_AI_TOOL_NAME: function_call.name + } + set_available_attributes(span, function_call_attributes) + except Exception as e: + if continue_on_fn_error is True: + return FunctionResultEntry( + tool_call_id=function_call.tool_call_id, + result=f"Error while executing {function_call.name}: {str(e)}", + ) + raise RunException( + f"Failed to execute tool {function_call.name} with arguments '{function_call.arguments}'" + ) from e return FunctionResultEntry( tool_call_id=function_call.tool_call_id, From 8b8d97d59898d95a47989027e94dddacfe022e21 Mon Sep 17 00:00:00 2001 From: "alexandre.abouchahine" Date: Mon, 15 Dec 2025 23:13:19 +0100 Subject: [PATCH 161/223] fix: example script not failing correctly --- scripts/run_examples.sh | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/scripts/run_examples.sh b/scripts/run_examples.sh index d9ff43b2..e2532117 100755 --- a/scripts/run_examples.sh +++ b/scripts/run_examples.sh @@ -40,6 +40,7 @@ exclude_files=( "examples/mistral/agents/async_conversation_run_stream.py" "examples/mistral/agents/async_conversation_run_mcp.py" "examples/mistral/agents/async_conversation_run_mcp_remote.py" + "examples/mistral/agents/async_multi_turn_conversation" ) # Check if the no-extra-dep flag is set @@ -64,8 +65,8 @@ run_test_with_retries() { echo "Running $file (attempt $attempt/$RETRY_COUNT)" # Run the script and capture both exit status and error output - local current_output=$(python3 "$file" 2>&1) - local exit_code=$? + current_output=$(python3 "$file" 2>&1) + exit_code=$? if [ $exit_code -eq 0 ]; then echo "Success" From e4d3ea3e90d52041c987fab3e94e449f05fa4619 Mon Sep 17 00:00:00 2001 From: "alexandre.abouchahine" Date: Tue, 16 Dec 2025 19:31:59 +0100 Subject: [PATCH 162/223] update workflow token --- .github/workflows/sdk_generation_mistralai_sdk.yaml | 2 +- scripts/run_examples.sh | 10 +++++++--- 2 files changed, 8 insertions(+), 4 deletions(-) diff --git a/.github/workflows/sdk_generation_mistralai_sdk.yaml b/.github/workflows/sdk_generation_mistralai_sdk.yaml index 719a7b72..77a630f7 100644 --- a/.github/workflows/sdk_generation_mistralai_sdk.yaml +++ b/.github/workflows/sdk_generation_mistralai_sdk.yaml @@ -24,6 +24,6 @@ jobs: speakeasy_version: latest target: mistralai-sdk secrets: - github_access_token: ${{ secrets.SPEAKEASY_WORKFLOW_GITHUB_PAT }} + github_access_token: ${{ secrets.GITHUB_TOKEN }} pypi_token: ${{ secrets.PYPI_TOKEN }} speakeasy_api_key: ${{ secrets.SPEAKEASY_API_KEY }} diff --git a/scripts/run_examples.sh b/scripts/run_examples.sh index e2532117..106c10b2 100755 --- a/scripts/run_examples.sh +++ b/scripts/run_examples.sh @@ -39,22 +39,26 @@ exclude_files=( "examples/mistral/mcp_servers/stdio_server.py" "examples/mistral/agents/async_conversation_run_stream.py" "examples/mistral/agents/async_conversation_run_mcp.py" - "examples/mistral/agents/async_conversation_run_mcp_remote.py" - "examples/mistral/agents/async_multi_turn_conversation" + "examples/mistral/agents/async_conversation_run_mcp_remote.py" ) # Check if the no-extra-dep flag is set if [ "$NO_EXTRA_DEP" = true ]; then # Add more files to the exclude list exclude_files+=( - "examples/mistral/agents/async_conversation_run_mcp_remote.py" "examples/mistral/agents/async_conversation_run_stream.py" "examples/mistral/agents/async_conversation_run.py" + "examples/mistral/agents/async_multi_turn_conversation.py" ) fi failed=0 +echo "Skipping scripts" +for file in "${exclude_files[@]}"; do + echo "$file" +done + # Function to run a test with retries run_test_with_retries() { local file="$1" From 13d22bad61f00d863fd348945290a14de7411305 Mon Sep 17 00:00:00 2001 From: speakeasybot Date: Tue, 16 Dec 2025 19:45:38 +0000 Subject: [PATCH 163/223] ## SDK Changes Detected: * `mistral.beta.libraries.create()`: `response.owner_id` **Changed** **Breaking** :warning: * `mistral.beta.libraries.documents.get()`: `response` **Changed** **Breaking** :warning: * `mistral.models.list()`: * `response.data.[].[base].capabilities` **Changed** * `error.status[422]` **Removed** **Breaking** :warning: * `mistral.files.list()`: * `request.include_total` **Added** * `response.total` **Changed** **Breaking** :warning: * `mistral.beta.conversations.start()`: * `request` **Changed** **Breaking** :warning: * `response.outputs.[].[tool_execution_entry].name` **Changed** **Breaking** :warning: * `mistral.beta.libraries.accesses.delete()`: * `request.org_id` **Changed** * `response.share_with_uuid` **Changed** **Breaking** :warning: * `mistral.beta.libraries.accesses.update_or_create()`: * `request.org_id` **Changed** * `response.share_with_uuid` **Changed** **Breaking** :warning: * `mistral.beta.conversations.append()`: * `request.inputs.[array].[].[tool_execution_entry].name` **Changed** **Breaking** :warning: * `response.outputs.[].[tool_execution_entry].name` **Changed** **Breaking** :warning: * `mistral.beta.libraries.accesses.list()`: `response.data.[].share_with_uuid` **Changed** **Breaking** :warning: * `mistral.beta.conversations.restart()`: * `request` **Changed** **Breaking** :warning: * `response.outputs.[].[tool_execution_entry].name` **Changed** **Breaking** :warning: * `mistral.beta.libraries.documents.update()`: * `request.attributes` **Added** * `response` **Changed** **Breaking** :warning: * `mistral.beta.libraries.documents.upload()`: `response` **Changed** **Breaking** :warning: * `mistral.beta.libraries.documents.list()`: * `request.filters_attributes` **Added** * `response.data.[]` **Changed** **Breaking** :warning: * `mistral.beta.libraries.update()`: `response.owner_id` **Changed** **Breaking** :warning: * `mistral.beta.libraries.delete()`: `response.owner_id` **Changed** **Breaking** :warning: * `mistral.beta.libraries.get()`: `response.owner_id` **Changed** **Breaking** :warning: * `mistral.beta.conversations.get_history()`: `response.entries.[].[tool_execution_entry].name` **Changed** **Breaking** :warning: * `mistral.beta.libraries.list()`: `response.data.[].owner_id` **Changed** **Breaking** :warning: * `mistral.models.retrieve()`: `response.[base].capabilities` **Changed** * `mistral.agents.complete()`: `request.metadata` **Added** * `mistral.beta.agents.get()`: * `request.agent_version` **Added** * `response` **Changed** * `mistral.beta.agents.list()`: * `request` **Changed** * `response.[]` **Changed** * `mistral.beta.agents.update_version()`: `response` **Changed** * `mistral.beta.agents.delete()`: **Added** * `mistral.beta.conversations.list()`: * `request.metadata` **Added** * `response.[]` **Changed** * `mistral.beta.conversations.get()`: `response` **Changed** * `mistral.beta.agents.update()`: * `request` **Changed** * `response` **Changed** * `mistral.beta.conversations.delete()`: **Added** * `mistral.chat.complete()`: `request.metadata` **Added** * `mistral.fim.complete()`: `request.metadata` **Added** * `mistral.beta.agents.create()`: * `request.metadata` **Added** * `response` **Changed** * `mistral.ocr.process()`: * `request` **Changed** * `response.pages.[]` **Changed** --- .speakeasy/gen.lock | 180 ++++++++++-- .speakeasy/gen.yaml | 2 +- .speakeasy/workflow.lock | 12 +- README.md | 36 ++- RELEASES.md | 12 +- USAGE.md | 20 +- docs/models/agent.md | 6 +- docs/models/agentconversation.md | 4 +- docs/models/agentcreationrequest.md | 3 +- docs/models/agentsapiv1agentsdeleterequest.md | 8 + docs/models/agentsapiv1agentsgetrequest.md | 7 +- docs/models/agentsapiv1agentslistrequest.md | 13 +- .../agentsapiv1conversationsdeleterequest.md | 8 + .../agentsapiv1conversationslistrequest.md | 3 +- docs/models/agentscompletionrequest.md | 35 +-- docs/models/agentscompletionstreamrequest.md | 35 +-- docs/models/agentupdaterequest.md | 4 +- docs/models/attributes.md | 59 ++++ docs/models/audiotranscriptionrequest.md | 20 +- docs/models/batchjobin.md | 16 +- docs/models/chatcompletionrequest.md | 41 +-- .../models/chatcompletionrequesttoolchoice.md | 2 + docs/models/chatcompletionstreamrequest.md | 41 +-- .../chatcompletionstreamrequesttoolchoice.md | 2 + docs/models/classificationrequest.md | 8 +- docs/models/completionargs.md | 24 +- docs/models/conversationrequest.md | 4 +- docs/models/conversationrestartrequest.md | 4 +- .../conversationrestartstreamrequest.md | 4 +- docs/models/conversationstreamrequest.md | 4 +- docs/models/documentout.md | 12 +- docs/models/documentupdatein.md | 7 +- docs/models/embeddingrequest.md | 14 +- docs/models/embeddingrequestinputs.md | 2 +- docs/models/filesapirouteslistfilesrequest.md | 1 + docs/models/fimcompletionrequest.md | 3 +- docs/models/fimcompletionstreamrequest.md | 3 +- docs/models/format_.md | 11 + .../models/librariesdocumentslistv1request.md | 1 + docs/models/libraryout.md | 6 +- docs/models/listfilesout.md | 2 +- docs/models/modelcapabilities.md | 7 +- docs/models/modelconversation.md | 1 + docs/models/name.md | 17 ++ docs/models/ocrpageobject.md | 4 + docs/models/ocrrequest.md | 25 +- docs/models/ocrtableobject.md | 10 + docs/models/prediction.md | 2 + docs/models/requestsource.md | 10 + docs/models/responseformat.md | 10 +- docs/models/responseformats.md | 2 - docs/models/sharingdelete.md | 2 +- docs/models/sharingin.md | 2 +- docs/models/sharingout.md | 2 +- docs/models/tableformat.md | 9 + docs/models/toolexecutiondeltaevent.md | 2 +- docs/models/toolexecutiondeltaeventname.md | 17 ++ docs/models/toolexecutiondoneevent.md | 2 +- docs/models/toolexecutiondoneeventname.md | 17 ++ docs/models/toolexecutionentry.md | 2 +- docs/models/toolexecutionstartedevent.md | 2 +- docs/models/toolexecutionstartedeventname.md | 17 ++ docs/sdks/accesses/README.md | 8 +- docs/sdks/agents/README.md | 82 +++--- docs/sdks/chat/README.md | 98 ++++--- docs/sdks/classifiers/README.md | 22 +- docs/sdks/conversations/README.md | 86 +++++- docs/sdks/documents/README.md | 4 +- docs/sdks/embeddings/README.md | 16 +- docs/sdks/files/README.md | 3 +- docs/sdks/fim/README.md | 8 +- docs/sdks/mistralagents/README.md | 58 +++- docs/sdks/mistraljobs/README.md | 20 +- docs/sdks/models/README.md | 7 +- docs/sdks/ocr/README.md | 31 +- docs/sdks/transcriptions/README.md | 24 +- pyproject.toml | 2 +- src/mistralai/_version.py | 4 +- src/mistralai/accesses.py | 16 +- src/mistralai/agents.py | 46 +-- src/mistralai/chat.py | 70 +++-- src/mistralai/conversations.py | 266 ++++++++++++++++-- src/mistralai/documents.py | 22 +- src/mistralai/embeddings.py | 12 +- src/mistralai/files.py | 6 + src/mistralai/fim.py | 22 +- src/mistralai/mistral_agents.py | 230 ++++++++++++++- src/mistralai/mistral_jobs.py | 20 +- src/mistralai/models/__init__.py | 71 ++++- src/mistralai/models/agent.py | 17 +- src/mistralai/models/agentconversation.py | 14 +- src/mistralai/models/agentcreationrequest.py | 8 +- .../models/agents_api_v1_agents_deleteop.py | 16 ++ .../models/agents_api_v1_agents_getop.py | 43 ++- .../models/agents_api_v1_agents_listop.py | 74 ++++- .../agents_api_v1_conversations_deleteop.py | 18 ++ .../agents_api_v1_conversations_listop.py | 41 ++- .../models/agentscompletionrequest.py | 27 +- .../models/agentscompletionstreamrequest.py | 27 +- src/mistralai/models/agentupdaterequest.py | 20 +- .../models/audiotranscriptionrequest.py | 2 + src/mistralai/models/batchjobin.py | 10 + src/mistralai/models/chatcompletionrequest.py | 27 +- .../models/chatcompletionstreamrequest.py | 27 +- src/mistralai/models/conversationrequest.py | 19 +- .../models/conversationrestartrequest.py | 52 +++- .../conversationrestartstreamrequest.py | 52 +++- .../models/conversationstreamrequest.py | 19 +- src/mistralai/models/documentout.py | 36 ++- src/mistralai/models/documentupdatein.py | 27 +- src/mistralai/models/embeddingrequest.py | 16 +- .../models/files_api_routes_list_filesop.py | 7 + src/mistralai/models/fimcompletionrequest.py | 17 +- .../models/fimcompletionstreamrequest.py | 17 +- .../models/libraries_documents_list_v1op.py | 17 +- src/mistralai/models/libraryout.py | 17 +- src/mistralai/models/listfilesout.py | 39 ++- src/mistralai/models/modelcapabilities.py | 17 +- src/mistralai/models/modelconversation.py | 10 +- src/mistralai/models/ocrpageobject.py | 31 +- src/mistralai/models/ocrrequest.py | 18 +- src/mistralai/models/ocrtableobject.py | 31 ++ src/mistralai/models/prediction.py | 4 + src/mistralai/models/requestsource.py | 7 + src/mistralai/models/responseformat.py | 6 +- src/mistralai/models/responseformats.py | 1 - src/mistralai/models/sharingdelete.py | 41 ++- src/mistralai/models/sharingin.py | 41 ++- src/mistralai/models/sharingout.py | 6 +- .../models/toolexecutiondeltaevent.py | 17 +- .../models/toolexecutiondoneevent.py | 17 +- src/mistralai/models/toolexecutionentry.py | 13 +- .../models/toolexecutionstartedevent.py | 17 +- src/mistralai/models_.py | 16 +- src/mistralai/ocr.py | 18 ++ src/mistralai/transcriptions.py | 8 +- 136 files changed, 2471 insertions(+), 649 deletions(-) create mode 100644 docs/models/agentsapiv1agentsdeleterequest.md create mode 100644 docs/models/agentsapiv1conversationsdeleterequest.md create mode 100644 docs/models/attributes.md create mode 100644 docs/models/format_.md create mode 100644 docs/models/name.md create mode 100644 docs/models/ocrtableobject.md create mode 100644 docs/models/requestsource.md create mode 100644 docs/models/tableformat.md create mode 100644 docs/models/toolexecutiondeltaeventname.md create mode 100644 docs/models/toolexecutiondoneeventname.md create mode 100644 docs/models/toolexecutionstartedeventname.md create mode 100644 src/mistralai/models/agents_api_v1_agents_deleteop.py create mode 100644 src/mistralai/models/agents_api_v1_conversations_deleteop.py create mode 100644 src/mistralai/models/ocrtableobject.py create mode 100644 src/mistralai/models/requestsource.py diff --git a/.speakeasy/gen.lock b/.speakeasy/gen.lock index 05e0840c..3136ceae 100644 --- a/.speakeasy/gen.lock +++ b/.speakeasy/gen.lock @@ -1,12 +1,12 @@ lockVersion: 2.0.0 id: 2d045ec7-2ebb-4f4d-ad25-40953b132161 management: - docChecksum: c3693e4872a0785b2ed46c59a8464804 + docChecksum: 3135f1ce6dd57e0487ee2840362ced1a docVersion: 1.0.0 speakeasyVersion: 1.606.10 generationVersion: 2.687.13 - releaseVersion: 1.9.11 - configChecksum: d84e605ef7a3265972f6695049243759 + releaseVersion: 1.10.0 + configChecksum: 1446aab5f184e7184590fe5756b556a8 repoURL: https://github.com/mistralai/client-python.git installationURL: https://github.com/mistralai/client-python.git published: true @@ -58,12 +58,14 @@ generatedFiles: - docs/models/agenthandoffstartedevent.md - docs/models/agenthandoffstartedeventtype.md - docs/models/agentobject.md + - docs/models/agentsapiv1agentsdeleterequest.md - docs/models/agentsapiv1agentsgetrequest.md - docs/models/agentsapiv1agentslistrequest.md - docs/models/agentsapiv1agentsupdaterequest.md - docs/models/agentsapiv1agentsupdateversionrequest.md - docs/models/agentsapiv1conversationsappendrequest.md - docs/models/agentsapiv1conversationsappendstreamrequest.md + - docs/models/agentsapiv1conversationsdeleterequest.md - docs/models/agentsapiv1conversationsgetrequest.md - docs/models/agentsapiv1conversationsgetresponsev1conversationsget.md - docs/models/agentsapiv1conversationshistoryrequest.md @@ -89,6 +91,7 @@ generatedFiles: - docs/models/assistantmessage.md - docs/models/assistantmessagecontent.md - docs/models/assistantmessagerole.md + - docs/models/attributes.md - docs/models/audiochunk.md - docs/models/audiochunktype.md - docs/models/audiotranscriptionrequest.md @@ -220,6 +223,7 @@ generatedFiles: - docs/models/fimcompletionstreamrequeststop.md - docs/models/finetuneablemodeltype.md - docs/models/finishreason.md + - docs/models/format_.md - docs/models/ftclassifierlossfunction.md - docs/models/ftmodelcapabilitiesout.md - docs/models/ftmodelcard.md @@ -336,12 +340,14 @@ generatedFiles: - docs/models/modeltype.md - docs/models/moderationobject.md - docs/models/moderationresponse.md + - docs/models/name.md - docs/models/object.md - docs/models/ocrimageobject.md - docs/models/ocrpagedimensions.md - docs/models/ocrpageobject.md - docs/models/ocrrequest.md - docs/models/ocrresponse.md + - docs/models/ocrtableobject.md - docs/models/ocrusageinfo.md - docs/models/one.md - docs/models/outputcontentchunks.md @@ -353,6 +359,7 @@ generatedFiles: - docs/models/referencechunk.md - docs/models/referencechunktype.md - docs/models/repositories.md + - docs/models/requestsource.md - docs/models/response1.md - docs/models/responsebody.md - docs/models/responsedoneevent.md @@ -380,6 +387,7 @@ generatedFiles: - docs/models/systemmessage.md - docs/models/systemmessagecontent.md - docs/models/systemmessagecontentchunks.md + - docs/models/tableformat.md - docs/models/textchunk.md - docs/models/textchunktype.md - docs/models/thinkchunk.md @@ -391,13 +399,16 @@ generatedFiles: - docs/models/toolchoice.md - docs/models/toolchoiceenum.md - docs/models/toolexecutiondeltaevent.md + - docs/models/toolexecutiondeltaeventname.md - docs/models/toolexecutiondeltaeventtype.md - docs/models/toolexecutiondoneevent.md + - docs/models/toolexecutiondoneeventname.md - docs/models/toolexecutiondoneeventtype.md - docs/models/toolexecutionentry.md - docs/models/toolexecutionentryobject.md - docs/models/toolexecutionentrytype.md - docs/models/toolexecutionstartedevent.md + - docs/models/toolexecutionstartedeventname.md - docs/models/toolexecutionstartedeventtype.md - docs/models/toolfilechunk.md - docs/models/toolfilechunktype.md @@ -498,12 +509,14 @@ generatedFiles: - src/mistralai/models/agenthandoffdoneevent.py - src/mistralai/models/agenthandoffentry.py - src/mistralai/models/agenthandoffstartedevent.py + - src/mistralai/models/agents_api_v1_agents_deleteop.py - src/mistralai/models/agents_api_v1_agents_getop.py - src/mistralai/models/agents_api_v1_agents_listop.py - src/mistralai/models/agents_api_v1_agents_update_versionop.py - src/mistralai/models/agents_api_v1_agents_updateop.py - src/mistralai/models/agents_api_v1_conversations_append_streamop.py - src/mistralai/models/agents_api_v1_conversations_appendop.py + - src/mistralai/models/agents_api_v1_conversations_deleteop.py - src/mistralai/models/agents_api_v1_conversations_getop.py - src/mistralai/models/agents_api_v1_conversations_historyop.py - src/mistralai/models/agents_api_v1_conversations_listop.py @@ -677,12 +690,14 @@ generatedFiles: - src/mistralai/models/ocrpageobject.py - src/mistralai/models/ocrrequest.py - src/mistralai/models/ocrresponse.py + - src/mistralai/models/ocrtableobject.py - src/mistralai/models/ocrusageinfo.py - src/mistralai/models/outputcontentchunks.py - src/mistralai/models/paginationinfo.py - src/mistralai/models/prediction.py - src/mistralai/models/processingstatusout.py - src/mistralai/models/referencechunk.py + - src/mistralai/models/requestsource.py - src/mistralai/models/responsedoneevent.py - src/mistralai/models/responseerrorevent.py - src/mistralai/models/responseformat.py @@ -769,6 +784,10 @@ examples: application/json: {"object": "list"} "422": application/json: {} + userExample: + responses: + "200": + application/json: {"object": "list"} retrieve_model_v1_models__model_id__get: speakeasy-default-retrieve-model-v1-models-model-id-get: parameters: @@ -776,9 +795,16 @@ examples: model_id: "ft:open-mistral-7b:587a6b29:20240514:7e773925" responses: "200": - application/json: {"id": "", "object": "model", "owned_by": "mistralai", "capabilities": {"completion_chat": true, "completion_fim": false, "function_calling": true, "fine_tuning": false, "vision": false, "classification": false}, "max_context_length": 32768, "type": "fine-tuned", "job": "Product Markets Facilitator", "root": "", "archived": false} + application/json: {"id": "", "object": "model", "owned_by": "mistralai", "capabilities": {"completion_chat": true, "function_calling": true, "completion_fim": false, "fine_tuning": false, "vision": false, "ocr": false, "classification": false, "moderation": false, "audio": false}, "max_context_length": 32768, "type": "fine-tuned", "job": "Product Markets Facilitator", "root": "", "archived": false} "422": application/json: {} + userExample: + parameters: + path: + model_id: "ft:open-mistral-7b:587a6b29:20240514:7e773925" + responses: + "200": + application/json: {"id": "", "object": "model", "owned_by": "mistralai", "capabilities": {"completion_chat": false, "function_calling": false, "completion_fim": false, "fine_tuning": false, "vision": false, "ocr": false, "classification": false, "moderation": false, "audio": false}, "max_context_length": 32768, "type": "fine-tuned", "job": "Product Markets Facilitator", "root": "", "archived": false} delete_model_v1_models__model_id__delete: speakeasy-default-delete-model-v1-models-model-id-delete: parameters: @@ -789,6 +815,13 @@ examples: application/json: {"id": "ft:open-mistral-7b:587a6b29:20240514:7e773925", "object": "model", "deleted": true} "422": application/json: {} + userExample: + parameters: + path: + model_id: "ft:open-mistral-7b:587a6b29:20240514:7e773925" + responses: + "200": + application/json: {"id": "ft:open-mistral-7b:587a6b29:20240514:7e773925", "object": "model", "deleted": true} jobs_api_routes_fine_tuning_update_fine_tuned_model: speakeasy-default-jobs-api-routes-fine-tuning-update-fine-tuned-model: parameters: @@ -818,7 +851,7 @@ examples: agents_api_v1_conversations_start: speakeasy-default-agents-api-v1-conversations-start: requestBody: - application/json: {"inputs": "", "stream": false} + application/json: {"inputs": "", "stream": false, "completion_args": {"response_format": {"type": "text"}}} responses: "200": application/json: {"object": "conversation.response", "conversation_id": "", "outputs": [{"object": "entry", "type": "agent.handoff", "previous_agent_id": "", "previous_agent_name": "", "next_agent_id": "", "next_agent_name": ""}], "usage": {"prompt_tokens": 0, "completion_tokens": 0, "total_tokens": 0}} @@ -851,7 +884,7 @@ examples: path: conversation_id: "" requestBody: - application/json: {"inputs": [], "stream": false, "store": true, "handoff_execution": "server"} + application/json: {"inputs": [], "stream": false, "store": true, "handoff_execution": "server", "completion_args": {"response_format": {"type": "text"}}} responses: "200": application/json: {"object": "conversation.response", "conversation_id": "", "outputs": [], "usage": {"prompt_tokens": 0, "completion_tokens": 0, "total_tokens": 0}} @@ -883,7 +916,7 @@ examples: path: conversation_id: "" requestBody: - application/json: {"inputs": "", "stream": false, "store": true, "handoff_execution": "server", "from_entry_id": ""} + application/json: {"inputs": "", "stream": false, "store": true, "handoff_execution": "server", "completion_args": {"response_format": {"type": "text"}}, "from_entry_id": ""} responses: "200": application/json: {"object": "conversation.response", "conversation_id": "", "outputs": [], "usage": {"prompt_tokens": 0, "completion_tokens": 0, "total_tokens": 0}} @@ -892,7 +925,7 @@ examples: agents_api_v1_conversations_start_stream: speakeasy-default-agents-api-v1-conversations-start-stream: requestBody: - application/json: {"inputs": [{"object": "entry", "type": "function.result", "tool_call_id": "", "result": ""}], "stream": true} + application/json: {"inputs": [{"object": "entry", "type": "function.result", "tool_call_id": "", "result": ""}], "stream": true, "completion_args": {"response_format": {"type": "text"}}} responses: "422": application/json: {} @@ -902,7 +935,7 @@ examples: path: conversation_id: "" requestBody: - application/json: {"inputs": "", "stream": true, "store": true, "handoff_execution": "server"} + application/json: {"inputs": "", "stream": true, "store": true, "handoff_execution": "server", "completion_args": {"response_format": {"type": "text"}}} responses: "422": application/json: {} @@ -912,17 +945,17 @@ examples: path: conversation_id: "" requestBody: - application/json: {"inputs": [{"object": "entry", "type": "message.input", "role": "assistant", "content": "", "prefix": false}], "stream": true, "store": true, "handoff_execution": "server", "from_entry_id": ""} + application/json: {"inputs": [{"object": "entry", "type": "message.input", "role": "assistant", "content": "", "prefix": false}], "stream": true, "store": true, "handoff_execution": "server", "completion_args": {"response_format": {"type": "text"}}, "from_entry_id": ""} responses: "422": application/json: {} agents_api_v1_agents_create: speakeasy-default-agents-api-v1-agents-create: requestBody: - application/json: {"model": "LeBaron", "name": ""} + application/json: {"completion_args": {"response_format": {"type": "text"}}, "model": "LeBaron", "name": ""} responses: "200": - application/json: {"model": "Ranchero", "name": "", "object": "agent", "id": "", "version": 316961, "created_at": "2025-03-26T19:00:51.430Z", "updated_at": "2023-04-28T15:08:02.110Z"} + application/json: {"completion_args": {"response_format": {"type": "text"}}, "model": "Ranchero", "name": "", "object": "agent", "id": "", "version": 316961, "versions": [818563, 316961], "created_at": "2025-03-26T19:00:51.430Z", "updated_at": "2023-04-28T15:08:02.110Z", "deployment_chat": false, "source": ""} "422": application/json: {} agents_api_v1_agents_list: @@ -933,7 +966,7 @@ examples: page_size: 20 responses: "200": - application/json: [{"model": "Impala", "name": "", "object": "agent", "id": "", "version": 43153, "created_at": "2024-04-26T15:54:09.954Z", "updated_at": "2024-02-11T18:27:55.607Z"}] + application/json: [{"model": "Impala", "name": "", "object": "agent", "id": "", "version": 43153, "versions": [43153, 439473], "created_at": "2024-04-26T15:54:09.954Z", "updated_at": "2024-02-11T18:27:55.607Z", "deployment_chat": true, "source": ""}] "422": application/json: {} agents_api_v1_agents_get: @@ -943,7 +976,7 @@ examples: agent_id: "" responses: "200": - application/json: {"model": "Silverado", "name": "", "object": "agent", "id": "", "version": 845972, "created_at": "2025-08-21T03:10:48.135Z", "updated_at": "2024-11-11T17:15:57.309Z"} + application/json: {"completion_args": {"response_format": {"type": "text"}}, "model": "Silverado", "name": "", "object": "agent", "id": "", "version": 845972, "versions": [845972, 878771, 621094], "created_at": "2025-08-21T03:10:48.135Z", "updated_at": "2024-11-11T17:15:57.309Z", "deployment_chat": false, "source": ""} "422": application/json: {} agents_api_v1_agents_update: @@ -952,10 +985,10 @@ examples: path: agent_id: "" requestBody: - application/json: {} + application/json: {"completion_args": {"response_format": {"type": "text"}}} responses: "200": - application/json: {"model": "Model X", "name": "", "object": "agent", "id": "", "version": 799821, "created_at": "2025-10-20T17:35:08.067Z", "updated_at": "2023-11-16T08:47:13.265Z"} + application/json: {"completion_args": {"response_format": {"type": "text"}}, "model": "Model X", "name": "", "object": "agent", "id": "", "version": 799821, "versions": [799821, 934063], "created_at": "2025-10-20T17:35:08.067Z", "updated_at": "2023-11-16T08:47:13.265Z", "deployment_chat": true, "source": ""} "422": application/json: {} agents_api_v1_agents_update_version: @@ -967,7 +1000,7 @@ examples: version: 157995 responses: "200": - application/json: {"model": "XTS", "name": "", "object": "agent", "id": "", "version": 310764, "created_at": "2023-05-08T23:29:06.216Z", "updated_at": "2023-05-16T19:20:05.735Z"} + application/json: {"completion_args": {"response_format": {"type": "text"}}, "model": "XTS", "name": "", "object": "agent", "id": "", "version": 310764, "versions": [], "created_at": "2023-05-08T23:29:06.216Z", "updated_at": "2023-05-16T19:20:05.735Z", "deployment_chat": false, "source": ""} "422": application/json: {} files_api_routes_upload_file: @@ -977,6 +1010,12 @@ examples: responses: "200": application/json: {"id": "497f6eca-6276-4993-bfeb-53cbbbba6f09", "object": "file", "bytes": 13000, "created_at": 1716963433, "filename": "files_upload.jsonl", "purpose": "batch", "sample_type": "batch_result", "source": "upload"} + userExample: + requestBody: + multipart/form-data: {"file": "x-file: example.file"} + responses: + "200": + application/json: {"id": "e85980c9-409e-4a46-9304-36588f6292b0", "object": "file", "bytes": 13000, "created_at": 1759500189, "filename": "example.file.jsonl", "purpose": "fine-tune", "sample_type": "instruct", "num_lines": 2, "mimetype": "application/jsonl", "source": "upload", "signature": "d4821d2de1917341"} files_api_routes_list_files: speakeasy-default-files-api-routes-list-files: parameters: @@ -986,6 +1025,15 @@ examples: responses: "200": application/json: {"data": [{"id": "497f6eca-6276-4993-bfeb-53cbbbba6f09", "object": "file", "bytes": 13000, "created_at": 1716963433, "filename": "files_upload.jsonl", "purpose": "fine-tune", "sample_type": "batch_error", "source": "upload"}], "object": "", "total": 999335} + userExample: + parameters: + query: + page: 0 + page_size: 100 + include_total: true + responses: + "200": + application/json: {"data": [{"id": "", "object": "file", "bytes": 13000, "created_at": 1759491994, "filename": "", "purpose": "batch", "sample_type": "batch_result", "num_lines": 2, "mimetype": "application/jsonl", "source": "mistral", "signature": null}, {"id": "", "object": "file", "bytes": 13000, "created_at": 1759491994, "filename": "", "purpose": "batch", "sample_type": "batch_result", "num_lines": 2, "mimetype": "application/jsonl", "source": "mistral", "signature": null}], "object": "list", "total": 2} files_api_routes_retrieve_file: speakeasy-default-files-api-routes-retrieve-file: parameters: @@ -994,6 +1042,13 @@ examples: responses: "200": application/json: {"id": "497f6eca-6276-4993-bfeb-53cbbbba6f09", "object": "file", "bytes": 13000, "created_at": 1716963433, "filename": "files_upload.jsonl", "purpose": "batch", "sample_type": "instruct", "source": "repository", "deleted": false} + userExample: + parameters: + path: + file_id: "f2a27685-ca4e-4dc2-9f2b-88c422c3e0f6" + responses: + "200": + application/json: {"id": "e85980c9-409e-4a46-9304-36588f6292b0", "object": "file", "bytes": 13000, "created_at": 1759500189, "filename": "example.file.jsonl", "purpose": "fine-tune", "sample_type": "instruct", "num_lines": 2, "mimetype": "application/jsonl", "source": "upload", "signature": "d4821d2de1917341", "deleted": false} files_api_routes_delete_file: speakeasy-default-files-api-routes-delete-file: parameters: @@ -1002,6 +1057,13 @@ examples: responses: "200": application/json: {"id": "497f6eca-6276-4993-bfeb-53cbbbba6f09", "object": "file", "deleted": false} + userExample: + parameters: + path: + file_id: "3b6d45eb-e30b-416f-8019-f47e2e93d930" + responses: + "200": + application/json: {"id": "e85980c9-409e-4a46-9304-36588f6292b0", "object": "file", "deleted": true} files_api_routes_download_file: speakeasy-default-files-api-routes-download-file: parameters: @@ -1020,6 +1082,15 @@ examples: responses: "200": application/json: {"url": "https://knotty-birdcage.net/"} + userExample: + parameters: + path: + file_id: "06a020ab-355c-49a6-b19d-304b7c01699f" + query: + expiry: 24 + responses: + "200": + application/json: {"url": "https://mistralaifilesapiprodswe.blob.core.windows.net/fine-tune/.../.../e85980c9409e4a46930436588f6292b0.jsonl?se=2025-10-04T14%3A16%3A17Z&sp=r&sv=2025-01-05&sr=b&sig=..."} jobs_api_routes_fine_tuning_get_fine_tuning_jobs: speakeasy-default-jobs-api-routes-fine-tuning-get-fine-tuning-jobs: parameters: @@ -1074,7 +1145,7 @@ examples: jobs_api_routes_batch_create_batch_job: speakeasy-default-jobs-api-routes-batch-create-batch-job: requestBody: - application/json: {"input_files": ["fe3343a2-3b8d-404b-ba32-a78dede2614a"], "endpoint": "/v1/moderations", "timeout_hours": 24} + application/json: {"input_files": ["fe3343a2-3b8d-404b-ba32-a78dede2614a"], "endpoint": "/v1/moderations", "model": "mistral-small-latest", "timeout_hours": 24} responses: "200": application/json: {"id": "", "object": "batch", "input_files": ["7b2553d8-e17f-4df5-a862-a1678f6b5271", "8c618d9f-7d82-42ba-a284-d57d84f50a58", "c042f996-e842-441d-ae47-4e0850334e41"], "endpoint": "", "errors": [{"message": "", "count": 1}], "status": "SUCCESS", "created_at": 395527, "total_requests": 166919, "completed_requests": 258552, "succeeded_requests": 480980, "failed_requests": 684176} @@ -1097,7 +1168,7 @@ examples: chat_completion_v1_chat_completions_post: speakeasy-default-chat-completion-v1-chat-completions-post: requestBody: - application/json: {"model": "mistral-small-latest", "stream": false, "messages": [{"content": "Who is the best French painter? Answer in one short sentence.", "role": "user"}]} + application/json: {"model": "mistral-large-latest", "stream": false, "messages": [{"content": "Who is the best French painter? Answer in one short sentence.", "role": "user"}], "response_format": {"type": "text"}} responses: "200": application/json: {"id": "cmpl-e5cc70bb28c444948073e77776eb30ef", "object": "chat.completion", "model": "mistral-small-latest", "usage": {"prompt_tokens": 0, "completion_tokens": 0, "total_tokens": 0}, "created": 1702256327, "choices": []} @@ -1106,7 +1177,7 @@ examples: stream_chat: speakeasy-default-stream-chat: requestBody: - application/json: {"model": "mistral-small-latest", "stream": true, "messages": [{"content": "Who is the best French painter? Answer in one short sentence.", "role": "user"}]} + application/json: {"model": "mistral-large-latest", "stream": true, "messages": [{"content": "Who is the best French painter? Answer in one short sentence.", "role": "user"}], "response_format": {"type": "text"}} responses: "422": application/json: {} @@ -1119,10 +1190,16 @@ examples: application/json: {"id": "cmpl-e5cc70bb28c444948073e77776eb30ef", "object": "chat.completion", "model": "codestral-latest", "usage": {"prompt_tokens": 0, "completion_tokens": 0, "total_tokens": 0}, "created": 1702256327, "choices": []} "422": application/json: {} + userExample: + requestBody: + application/json: {"model": "codestral-latest", "top_p": 1, "stream": false, "prompt": "def", "suffix": "return a+b"} + responses: + "200": + application/json: {"id": "447e3e0d457e42e98248b5d2ef52a2a3", "object": "chat.completion", "model": "codestral-2508", "usage": {"prompt_tokens": 8, "completion_tokens": 91, "total_tokens": 99}, "created": 1759496862, "choices": [{"index": 0, "message": {"content": "add_numbers(a: int, b: int) -> int:\n \"\"\"\n You are given two integers `a` and `b`. Your task is to write a function that\n returns the sum of these two integers. The function should be implemented in a\n way that it can handle very large integers (up to 10^18). As a reminder, your\n code has to be in python\n \"\"\"\n", "tool_calls": null, "prefix": false, "role": "assistant"}, "finish_reason": "stop"}]} stream_fim: speakeasy-default-stream-fim: requestBody: - application/json: {"model": "codestral-2405", "top_p": 1, "stream": true, "prompt": "def", "suffix": "return a+b"} + application/json: {"model": "codestral-latest", "top_p": 1, "stream": true, "prompt": "def", "suffix": "return a+b"} responses: "422": application/json: {} @@ -1135,10 +1212,16 @@ examples: application/json: {"id": "cmpl-e5cc70bb28c444948073e77776eb30ef", "object": "chat.completion", "model": "mistral-small-latest", "usage": {"prompt_tokens": 0, "completion_tokens": 0, "total_tokens": 0}, "created": 1702256327, "choices": []} "422": application/json: {} + userExample: + requestBody: + application/json: {"stream": false, "messages": [{"content": "Who is the best French painter? Answer in one short sentence.", "role": "user"}], "response_format": {"type": "text"}, "agent_id": ""} + responses: + "200": + application/json: {"id": "cf79f7daaee244b1a0ae5c7b1444424a", "object": "chat.completion", "model": "mistral-medium-latest", "usage": {"prompt_tokens": 24, "completion_tokens": 27, "total_tokens": 51, "prompt_audio_seconds": {}}, "created": 1759500534, "choices": [{"index": 0, "message": {"content": "Arrr, the scallywag Claude Monet be the finest French painter to ever splash colors on a canvas, savvy?", "tool_calls": null, "prefix": false, "role": "assistant"}, "finish_reason": "stop"}]} stream_agents: speakeasy-default-stream-agents: requestBody: - application/json: {"stream": true, "messages": [{"content": "Who is the best French painter? Answer in one short sentence.", "role": "user"}], "agent_id": ""} + application/json: {"stream": true, "messages": [{"content": "Who is the best French painter? Answer in one short sentence.", "role": "user"}], "response_format": {"type": "text"}, "agent_id": ""} responses: "422": application/json: {} @@ -1151,6 +1234,12 @@ examples: application/json: {"id": "cmpl-e5cc70bb28c444948073e77776eb30ef", "object": "chat.completion", "model": "mistral-small-latest", "usage": {"prompt_tokens": 0, "completion_tokens": 0, "total_tokens": 0}, "data": [{"object": "embedding", "embedding": [0.1, 0.2, 0.3], "index": 0}, {"object": "embedding", "embedding": [0.4, 0.5, 0.6], "index": 1}]} "422": application/json: {} + userExample: + requestBody: + application/json: {"model": "mistral-embed", "input": ["Embed this sentence.", "As well as this one."]} + responses: + "200": + application/json: {"id": "cmpl-e5cc70bb28c444948073e77776eb30ef", "object": "list", "model": "mistral-embed", "usage": {"prompt_tokens": 15, "completion_tokens": 0, "total_tokens": 15, "prompt_audio_seconds": null}, "data": [{"object": "embedding", "embedding": [-0.016632080078125, 0.0701904296875, 0.03143310546875, 0.01309967041015625, 0.0202789306640625], "index": 0}, {"object": "embedding", "embedding": [-0.0230560302734375, 0.039337158203125, 0.0521240234375, -0.0184783935546875, 0.034271240234375], "index": 1}]} moderations_v1_moderations_post: speakeasy-default-moderations-v1-moderations-post: requestBody: @@ -1160,6 +1249,12 @@ examples: application/json: {"id": "mod-e5cc70bb28c444948073e77776eb30ef", "model": "Corvette", "results": [{}]} "422": application/json: {} + userExample: + requestBody: + application/json: {"model": "mistral-moderation-latest", "input": ""} + responses: + "200": + application/json: {"id": "4d71ae510af942108ef7344f903e2b88", "model": "mistral-moderation-latest", "results": [{"categories": {"sexual": false, "hate_and_discrimination": false, "violence_and_threats": false, "dangerous_and_criminal_content": false, "selfharm": false, "health": false, "financial": false, "law": false, "pii": false}, "category_scores": {"sexual": 0.0011335690505802631, "hate_and_discrimination": 0.0030753696337342262, "violence_and_threats": 0.0003569706459529698, "dangerous_and_criminal_content": 0.002251847181469202, "selfharm": 0.00017952796770259738, "health": 0.0002780309587251395, "financial": 0.00008481103577651083, "law": 0.00004539786823443137, "pii": 0.0023967307060956955}}, {"categories": {"sexual": false, "hate_and_discrimination": false, "violence_and_threats": false, "dangerous_and_criminal_content": false, "selfharm": false, "health": false, "financial": false, "law": false, "pii": false}, "category_scores": {"sexual": 0.000626334105618298, "hate_and_discrimination": 0.0013670255430042744, "violence_and_threats": 0.0002611903182696551, "dangerous_and_criminal_content": 0.0030753696337342262, "selfharm": 0.00010889690747717395, "health": 0.00015843621804378927, "financial": 0.000191104321856983, "law": 0.00004006369272246957, "pii": 0.0035936026833951473}}]} chat_moderations_v1_chat_moderations_post: speakeasy-default-chat-moderations-v1-chat-moderations-post: requestBody: @@ -1169,10 +1264,16 @@ examples: application/json: {"id": "mod-e5cc70bb28c444948073e77776eb30ef", "model": "Explorer", "results": [{}]} "422": application/json: {} + userExample: + requestBody: + application/json: {"input": [{"content": "", "role": "tool"}], "model": "LeBaron"} + responses: + "200": + application/json: {"id": "352bce1a55814127a3b0bc4fb8f02a35", "model": "mistral-moderation-latest", "results": [{"categories": {"sexual": false, "hate_and_discrimination": false, "violence_and_threats": false, "dangerous_and_criminal_content": false, "selfharm": false, "health": false, "financial": false, "law": false, "pii": false}, "category_scores": {"sexual": 0.0010322310263291001, "hate_and_discrimination": 0.001597845577634871, "violence_and_threats": 0.00020342698553577065, "dangerous_and_criminal_content": 0.0029810327105224133, "selfharm": 0.00017952796770259738, "health": 0.0002959570847451687, "financial": 0.000079673009167891, "law": 0.00004539786823443137, "pii": 0.004198795650154352}}]} classifications_v1_classifications_post: speakeasy-default-classifications-v1-classifications-post: requestBody: - application/json: {"model": "Silverado", "input": [""]} + application/json: {"model": "mistral-moderation-latest", "input": [""]} responses: "200": application/json: {"id": "mod-e5cc70bb28c444948073e77776eb30ef", "model": "ATS", "results": [{}, {"key": {"scores": {"key": 2080.19}}}]} @@ -1196,6 +1297,12 @@ examples: application/json: {"pages": [{"index": 944919, "markdown": "", "images": [], "dimensions": {"dpi": 984283, "height": 453411, "width": 398292}}], "model": "Wrangler", "usage_info": {"pages_processed": 47064}} "422": application/json: {} + userExample: + requestBody: + application/json: {"model": "CX-9", "document": {"document_url": "https://upset-labourer.net/", "type": "document_url"}, "bbox_annotation_format": {"type": "text"}, "document_annotation_format": {"type": "text"}} + responses: + "200": + application/json: {"pages": [{"index": 1, "markdown": "# LEVERAGING UNLABELED DATA TO PREDICT OUT-OF-DISTRIBUTION PERFORMANCE\nSaurabh Garg*
Carnegie Mellon University
sgarg2@andrew.cmu.edu
Sivaraman Balakrishnan
Carnegie Mellon University
sbalakri@andrew.cmu.edu
Zachary C. Lipton
Carnegie Mellon University
zlipton@andrew.cmu.edu\n## Behnam Neyshabur\nGoogle Research, Blueshift team
neyshabur@google.com\nHanie Sedghi
Google Research, Brain team
hsedghi@google.com\n#### Abstract\nReal-world machine learning deployments are characterized by mismatches between the source (training) and target (test) distributions that may cause performance drops. In this work, we investigate methods for predicting the target domain accuracy using only labeled source data and unlabeled target data. We propose Average Thresholded Confidence (ATC), a practical method that learns a threshold on the model's confidence, predicting accuracy as the fraction of unlabeled examples for which model confidence exceeds that threshold. ATC outperforms previous methods across several model architectures, types of distribution shifts (e.g., due to synthetic corruptions, dataset reproduction, or novel subpopulations), and datasets (WILDS, ImageNet, BREEDS, CIFAR, and MNIST). In our experiments, ATC estimates target performance $2-4 \\times$ more accurately than prior methods. We also explore the theoretical foundations of the problem, proving that, in general, identifying the accuracy is just as hard as identifying the optimal predictor and thus, the efficacy of any method rests upon (perhaps unstated) assumptions on the nature of the shift. Finally, analyzing our method on some toy distributions, we provide insights concerning when it works ${ }^{1}$.\n## 1 INTRODUCTION\nMachine learning models deployed in the real world typically encounter examples from previously unseen distributions. While the IID assumption enables us to evaluate models using held-out data from the source distribution (from which training data is sampled), this estimate is no longer valid in presence of a distribution shift. Moreover, under such shifts, model accuracy tends to degrade (Szegedy et al., 2014; Recht et al., 2019; Koh et al., 2021). Commonly, the only data available to the practitioner are a labeled training set (source) and unlabeled deployment-time data which makes the problem more difficult. In this setting, detecting shifts in the distribution of covariates is known to be possible (but difficult) in theory (Ramdas et al., 2015), and in practice (Rabanser et al., 2018). However, producing an optimal predictor using only labeled source and unlabeled target data is well-known to be impossible absent further assumptions (Ben-David et al., 2010; Lipton et al., 2018).\nTwo vital questions that remain are: (i) the precise conditions under which we can estimate a classifier's target-domain accuracy; and (ii) which methods are most practically useful. To begin, the straightforward way to assess the performance of a model under distribution shift would be to collect labeled (target domain) examples and then to evaluate the model on that data. However, collecting fresh labeled data from the target distribution is prohibitively expensive and time-consuming, especially if the target distribution is non-stationary. Hence, instead of using labeled data, we aim to use unlabeled data from the target distribution, that is comparatively abundant, to predict model performance. Note that in this work, our focus is not to improve performance on the target but, rather, to estimate the accuracy on the target for a given classifier.\n[^0]: Work done in part while Saurabh Garg was interning at Google ${ }^{1}$ Code is available at [https://github.com/saurabhgarg1996/ATC_code](https://github.com/saurabhgarg1996/ATC_code).\n", "images": [], "dimensions": {"dpi": 200, "height": 2200, "width": 1700}}, {"index": 2, "markdown": "![img-0.jpeg](img-0.jpeg)\nFigure 1: Illustration of our proposed method ATC. Left: using source domain validation data, we identify a threshold on a score (e.g. negative entropy) computed on model confidence such that fraction of examples above the threshold matches the validation set accuracy. ATC estimates accuracy on unlabeled target data as the fraction of examples with the score above the threshold. Interestingly, this threshold yields accurate estimates on a wide set of target distributions resulting from natural and synthetic shifts. Right: Efficacy of ATC over previously proposed approaches on our testbed with a post-hoc calibrated model. To obtain errors on the same scale, we rescale all errors with Average Confidence (AC) error. Lower estimation error is better. See Table 1 for exact numbers and comparison on various types of distribution shift. See Sec. 5 for details on our testbed.\nRecently, numerous methods have been proposed for this purpose (Deng & Zheng, 2021; Chen et al., 2021b; Jiang et al., 2021; Deng et al., 2021; Guillory et al., 2021). These methods either require calibration on the target domain to yield consistent estimates (Jiang et al., 2021; Guillory et al., 2021) or additional labeled data from several target domains to learn a linear regression function on a distributional distance that then predicts model performance (Deng et al., 2021; Deng & Zheng, 2021; Guillory et al., 2021). However, methods that require calibration on the target domain typically yield poor estimates since deep models trained and calibrated on source data are not, in general, calibrated on a (previously unseen) target domain (Ovadia et al., 2019). Besides, methods that leverage labeled data from target domains rely on the fact that unseen target domains exhibit strong linear correlation with seen target domains on the underlying distance measure and, hence, can be rendered ineffective when such target domains with labeled data are unavailable (in Sec. 5.1 we demonstrate such a failure on a real-world distribution shift problem). Therefore, throughout the paper, we assume access to labeled source data and only unlabeled data from target domain(s).\nIn this work, we first show that absent assumptions on the source classifier or the nature of the shift, no method of estimating accuracy will work generally (even in non-contrived settings). To estimate accuracy on target domain perfectly, we highlight that even given perfect knowledge of the labeled source distribution (i.e., $p_{s}(x, y)$ ) and unlabeled target distribution (i.e., $p_{t}(x)$ ), we need restrictions on the nature of the shift such that we can uniquely identify the target conditional $p_{t}(y \\mid x)$. Thus, in general, identifying the accuracy of the classifier is as hard as identifying the optimal predictor.\nSecond, motivated by the superiority of methods that use maximum softmax probability (or logit) of a model for Out-Of-Distribution (OOD) detection (Hendrycks & Gimpel, 2016; Hendrycks et al., 2019), we propose a simple method that leverages softmax probability to predict model performance. Our method, Average Thresholded Confidence (ATC), learns a threshold on a score (e.g., maximum confidence or negative entropy) of model confidence on validation source data and predicts target domain accuracy as the fraction of unlabeled target points that receive a score above that threshold. ATC selects a threshold on validation source data such that the fraction of source examples that receive the score above the threshold match the accuracy of those examples. Our primary contribution in ATC is the proposal of obtaining the threshold and observing its efficacy on (practical) accuracy estimation. Importantly, our work takes a step forward in positively answering the question raised in Deng & Zheng (2021); Deng et al. (2021) about a practical strategy to select a threshold that enables accuracy prediction with thresholded model confidence.\n", "images": [{"id": "img-0.jpeg", "top_left_x": 292, "top_left_y": 217, "bottom_right_x": 1405, "bottom_right_y": 649, "image_base64": ""}], "dimensions": {"dpi": 200, "height": 2200, "width": 1700}}, {"index": 3, "markdown": "", "images": [], "dimensions": {"dpi": 539192, "height": 944919, "width": 247256}}, {"index": 27, "markdown": "![img-8.jpeg](img-8.jpeg)\nFigure 9: Scatter plot of predicted accuracy versus (true) OOD accuracy for vision datasets except MNIST with a ResNet50 model. Results reported by aggregating MAE numbers over 4 different seeds.\n", "images": [{"id": "img-8.jpeg", "top_left_x": 290, "top_left_y": 226, "bottom_right_x": 1405, "bottom_right_y": 1834, "image_base64": ""}], "dimensions": {"dpi": 200, "height": 2200, "width": 1700}}, {"index": 28, "markdown": "| Dataset | Shift | IM | | AC | | DOC | | GDE | ATC-MC (Ours) | | ATC-NE (Ours) | | | :--: | :--: | :--: | :--: | :--: | :--: | :--: | :--: | :--: | :--: | :--: | :--: | :--: | | | | Pre T | Post T | Pre T | Post T | Pre T | Post T | Post T | Pre T | Post T | Pre T | Post T | | CIFAR10 | Natural | 6.60 | 5.74 | 9.88 | 6.89 | 7.25 | 6.07 | 4.77 | 3.21 | 3.02 | 2.99 | 2.85 | | | | (0.35) | (0.30) | (0.16) | (0.13) | (0.15) | (0.16) | (0.13) | (0.49) | (0.40) | (0.37) | (0.29) | | | Synthetic | 12.33 | 10.20 | 16.50 | 11.91 | 13.87 | 11.08 | 6.55 | 4.65 | 4.25 | 4.21 | 3.87 | | | | (0.51) | (0.48) | (0.26) | (0.17) | (0.18) | (0.17) | (0.35) | (0.55) | (0.55) | (0.55) | (0.75) | | CIFAR100 | Synthetic | 13.69 | 11.51 | 23.61 | 13.10 | 14.60 | 10.14 | 9.85 | 5.50 | 4.75 | 4.72 | 4.94 | | | | (0.55) | (0.41) | (1.16) | (0.80) | (0.77) | (0.64) | (0.57) | (0.70) | (0.73) | (0.74) | (0.74) | | ImageNet200 | Natural | 12.37 | 8.19 | 22.07 | 8.61 | 15.17 | 7.81 | 5.13 | 4.37 | 2.04 | 3.79 | 1.45 | | | | (0.25) | (0.33) | (0.08) | (0.25) | (0.11) | (0.29) | (0.08) | (0.39) | (0.24) | (0.30) | (0.27) | | | Synthetic | 19.86 | 12.94 | 32.44 | 13.35 | 25.02 | 12.38 | 5.41 | 5.93 | 3.09 | 5.00 | 2.68 | | | | (1.38) | (1.81) | (1.00) | (1.30) | (1.10) | (1.38) | (0.89) | (1.38) | (0.87) | (1.28) | (0.45) | | ImageNet | Natural | 7.77 | 6.50 | 18.13 | 6.02 | 8.13 | 5.76 | 6.23 | 3.88 | 2.17 | 2.06 | 0.80 | | | | (0.27) | (0.33) | (0.23) | (0.34) | (0.27) | (0.37) | (0.41) | (0.53) | (0.62) | (0.54) | (0.44) | | | Synthetic | 13.39 | 10.12 | 24.62 | 8.51 | 13.55 | 7.90 | 6.32 | 3.34 | 2.53 | 2.61 | 4.89 | | | | (0.53) | (0.63) | (0.64) | (0.71) | (0.61) | (0.72) | (0.33) | (0.53) | (0.36) | (0.33) | (0.83) | | FMoW-WILDS | Natural | 5.53 | 4.31 | 33.53 | 12.84 | 5.94 | 4.45 | 5.74 | 3.06 | 2.70 | 3.02 | 2.72 | | | | (0.33) | (0.63) | (0.13) | (12.06) | (0.36) | (0.77) | (0.55) | (0.36) | (0.54) | (0.35) | (0.44) | | RxRx1-WILDS | Natural | 5.80 | 5.72 | 7.90 | 4.84 | 5.98 | 5.98 | 6.03 | 4.66 | 4.56 | 4.41 | 4.47 | | | | (0.17) | (0.15) | (0.24) | (0.09) | (0.15) | (0.13) | (0.08) | (0.38) | (0.38) | (0.31) | (0.26) | | Amazon-WILDS | Natural | 2.40 | 2.29 | 8.01 | 2.38 | 2.40 | 2.28 | 17.87 | 1.65 | 1.62 | 1.60 | 1.59 | | | | (0.08) | (0.09) | (0.53) | (0.17) | (0.09) | (0.09) | (0.18) | (0.06) | (0.05) | (0.14) | (0.15) | | CivilCom.-WILDS | Natural | 12.64 | 10.80 | 16.76 | 11.03 | 13.31 | 10.99 | 16.65 | | 7.14 | | | | | | (0.52) | (0.48) | (0.53) | (0.49) | (0.52) | (0.49) | (0.25) | | (0.41) | | | | MNIST | Natural | 18.48 | 15.99 | 21.17 | 14.81 | 20.19 | 14.56 | 24.42 | 5.02 | 2.40 | 3.14 | 3.50 | | | | (0.45) | (1.53) | (0.24) | (3.89) | (0.23) | (3.47) | (0.41) | (0.44) | (1.83) | (0.49) | (0.17) | | ENTITY-13 | Same | 16.23 | 11.14 | 24.97 | 10.88 | 19.08 | 10.47 | 10.71 | 5.39 | 3.88 | 4.58 | 4.19 | | | | (0.77) | (0.65) | (0.70) | (0.77) | (0.65) | (0.72) | (0.74) | (0.92) | (0.61) | (0.85) | (0.16) | | | Novel | 28.53 | 22.02 | 38.33 | 21.64 | 32.43 | 21.22 | 20.61 | 13.58 | 10.28 | 12.25 | 6.63 | | | | (0.82) | (0.68) | (0.75) | (0.86) | (0.69) | (0.80) | (0.60) | (1.15) | (1.34) | (1.21) | (0.93) | | ENTITY-30 | Same | 18.59 | 14.46 | 28.82 | 14.30 | 21.63 | 13.46 | 12.92 | 9.12 | 7.75 | 8.15 | 7.64 | | | | (0.51) | (0.52) | (0.43) | (0.71) | (0.37) | (0.59) | (0.14) | (0.62) | (0.72) | (0.68) | (0.88) | | | Novel | 32.34 | 26.85 | 44.02 | 26.27 | 36.82 | 25.42 | 23.16 | 17.75 | 14.30 | 15.60 | 10.57 | | | | (0.60) | (0.58) | (0.56) | (0.79) | (0.47) | (0.68) | (0.12) | (0.76) | (0.85) | (0.86) | (0.86) | | NONLIVING-26 | Same | 18.66 | 17.17 | 26.39 | 16.14 | 19.86 | 15.58 | 16.63 | 10.87 | 10.24 | 10.07 | 10.26 | | | | (0.76) | (0.74) | (0.82) | (0.81) | (0.67) | (0.76) | (0.45) | (0.98) | (0.83) | (0.92) | (1.18) | | | Novel | 33.43 | 31.53 | 41.66 | 29.87 | 35.13 | 29.31 | 29.56 | 21.70 | 20.12 | 19.08 | 18.26 | | | | (0.67) | (0.65) | (0.67) | (0.71) | (0.54) | (0.64) | (0.21) | (0.86) | (0.75) | (0.82) | (1.12) | | LIVING-17 | Same | 12.63 | 11.05 | 18.32 | 10.46 | 14.43 | 10.14 | 9.87 | 4.57 | 3.95 | 3.81 | 4.21 | | | | (1.25) | (1.20) | (1.01) | (1.12) | (1.11) | (1.16) | (0.61) | (0.71) | (0.48) | (0.22) | (0.53) | | | Novel | 29.03 | 26.96 | 35.67 | 26.11 | 31.73 | 25.73 | 23.53 | 16.15 | 14.49 | 12.97 | 11.39 | | | | (1.44) | (1.38) | (1.09) | (1.27) | (1.19) | (1.35) | (0.52) | (1.36) | (1.46) | (1.52) | (1.72) |\nTable 3: Mean Absolute estimation Error (MAE) results for different datasets in our setup grouped by the nature of shift. 'Same' refers to same subpopulation shifts and 'Novel' refers novel subpopulation shifts. We include details about the target sets considered in each shift in Table 2. Post T denotes use of TS calibration on source. For language datasets, we use DistilBERT-base-uncased, for vision dataset we report results with DenseNet model with the exception of MNIST where we use FCN. Across all datasets, we observe that ATC achieves superior performance (lower MAE is better). For GDE post T and pre T estimates match since TS doesn't alter the argmax prediction. Results reported by aggregating MAE numbers over 4 different seeds. Values in parenthesis (i.e., $(\\cdot)$ ) denote standard deviation values.\n", "images": [], "dimensions": {"dpi": 200, "height": 2200, "width": 1700}}, {"index": 29, "markdown": "| Dataset | Shift | IM | | AC | | DOC | | GDE | ATC-MC (Ours) | | ATC-NE (Ours) | | | :--: | :--: | :--: | :--: | :--: | :--: | :--: | :--: | :--: | :--: | :--: | :--: | :--: | | | | Pre T | Post T | Pre T | Post T | Pre T | Post T | Post T | Pre T | Post T | Pre T | Post T | | CIFAR10 | Natural | 7.14 | 6.20 | 10.25 | 7.06 | 7.68 | 6.35 | 5.74 | 4.02 | 3.85 | 3.76 | 3.38 | | | | (0.14) | (0.11) | (0.31) | (0.33) | (0.28) | (0.27) | (0.25) | (0.38) | (0.30) | (0.33) | (0.32) | | | Synthetic | 12.62 | 10.75 | 16.50 | 11.91 | 13.93 | 11.20 | 7.97 | 5.66 | 5.03 | 4.87 | 3.63 | | | | (0.76) | (0.71) | (0.28) | (0.24) | (0.29) | (0.28) | (0.13) | (0.64) | (0.71) | (0.71) | (0.62) | | CIFAR100 | Synthetic | 12.77 | 12.34 | 16.89 | 12.73 | 11.18 | 9.63 | 12.00 | 5.61 | 5.55 | 5.65 | 5.76 | | | | (0.43) | (0.68) | (0.20) | (2.59) | (0.35) | (1.25) | (0.48) | (0.51) | (0.55) | (0.35) | (0.27) | | ImageNet200 | Natural | 12.63 | 7.99 | 23.08 | 7.22 | 15.40 | 6.33 | 5.00 | 4.60 | 1.80 | 4.06 | 1.38 | | | | (0.59) | (0.47) | (0.31) | (0.22) | (0.42) | (0.24) | (0.36) | (0.63) | (0.17) | (0.69) | (0.29) | | | Synthetic | 20.17 | 11.74 | 33.69 | 9.51 | 25.49 | 8.61 | 4.19 | 5.37 | 2.78 | 4.53 | 3.58 | | | | (0.74) | (0.80) | (0.73) | (0.51) | (0.66) | (0.50) | (0.14) | (0.88) | (0.23) | (0.79) | (0.33) | | ImageNet | Natural | 8.09 | 6.42 | 21.66 | 5.91 | 8.53 | 5.21 | 5.90 | 3.93 | 1.89 | 2.45 | 0.73 | | | | (0.25) | (0.28) | (0.38) | (0.22) | (0.26) | (0.25) | (0.44) | (0.26) | (0.21) | (0.16) | (0.10) | | | Synthetic | 13.93 | 9.90 | 28.05 | 7.56 | 13.82 | 6.19 | 6.70 | 3.33 | 2.55 | 2.12 | 5.06 | | | | (0.14) | (0.23) | (0.39) | (0.13) | (0.31) | (0.07) | (0.52) | (0.25) | (0.25) | (0.31) | (0.27) | | FMoW-WILDS | Natural | 5.15 | 3.55 | 34.64 | 5.03 | 5.58 | 3.46 | 5.08 | 2.59 | 2.33 | 2.52 | 2.22 | | | | (0.19) | (0.41) | (0.22) | (0.29) | (0.17) | (0.37) | (0.46) | (0.32) | (0.28) | (0.25) | (0.30) | | RxRx1-WILDS | Natural | 6.17 | 6.11 | 21.05 | 5.21 | 6.54 | 6.27 | 6.82 | 5.30 | 5.20 | 5.19 | 5.63 | | | | (0.20) | (0.24) | (0.31) | (0.18) | (0.21) | (0.20) | (0.31) | (0.30) | (0.44) | (0.43) | (0.55) | | Entity-13 | Same | 18.32 | 14.38 | 27.79 | 13.56 | 20.50 | 13.22 | 16.09 | 9.35 | 7.50 | 7.80 | 6.94 | | | | (0.29) | (0.53) | (1.18) | (0.58) | (0.47) | (0.58) | (0.84) | (0.79) | (0.65) | (0.62) | (0.71) | | | Novel | 28.82 | 24.03 | 38.97 | 22.96 | 31.66 | 22.61 | 25.26 | 17.11 | 13.96 | 14.75 | 9.94 | | | | (0.30) | (0.55) | (1.32) | (0.59) | (0.54) | (0.58) | (1.08) | (0.93) | (0.64) | (0.78) | | | Entity-30 | Same | 16.91 | 14.61 | 26.84 | 14.37 | 18.60 | 13.11 | 13.74 | 8.54 | 7.94 | 7.77 | 8.04 | | | | (1.33) | (1.11) | (2.15) | (1.34) | (1.69) | (1.30) | (1.07) | (1.47) | (1.38) | (1.44) | (1.51) | | | Novel | 28.66 | 25.83 | 39.21 | 25.03 | 30.95 | 23.73 | 23.15 | 15.57 | 13.24 | 12.44 | 11.05 | | | | (1.16) | (0.88) | (2.03) | (1.11) | (1.64) | (1.11) | (0.51) | (1.44) | (1.15) | (1.26) | (1.13) | | NonLIVING-26 | Same | 17.43 | 15.95 | 27.70 | 15.40 | 18.06 | 14.58 | 16.99 | 10.79 | 10.13 | 10.05 | 10.29 | | | | (0.90) | (0.86) | (0.90) | (0.69) | (1.00) | (0.78) | (1.25) | (0.62) | (0.32) | (0.46) | (0.79) | | | Novel | 29.51 | 27.75 | 40.02 | 26.77 | 30.36 | 25.93 | 27.70 | 19.64 | 17.75 | 16.90 | 15.69 | | | | (0.86) | (0.82) | (0.76) | (0.82) | (0.95) | (0.80) | (1.42) | (0.68) | (0.53) | (0.60) | (0.83) | | LIVING-17 | Same | 14.28 | 12.21 | 23.46 | 11.16 | 15.22 | 10.78 | 10.49 | 4.92 | 4.23 | 4.19 | 4.73 | | | | (0.96) | (0.93) | (1.16) | (0.90) | (0.96) | (0.99) | (0.97) | (0.57) | (0.42) | (0.35) | (0.24) | | | Novel | 28.91 | 26.35 | 38.62 | 24.91 | 30.32 | 24.52 | 22.49 | 15.42 | 13.02 | 12.29 | 10.34 | | | | (0.66) | (0.73) | (1.01) | (0.61) | (0.59) | (0.74) | (0.85) | (0.59) | (0.53) | (0.73) | (0.62) |\nTable 4: Mean Absolute estimation Error (MAE) results for different datasets in our setup grouped by the nature of shift for ResNet model. 'Same' refers to same subpopulation shifts and 'Novel' refers novel subpopulation shifts. We include details about the target sets considered in each shift in Table 2. Post T denotes use of TS calibration on source. Across all datasets, we observe that ATC achieves superior performance (lower MAE is better). For GDE post T and pre T estimates match since TS doesn't alter the argmax prediction. Results reported by aggregating MAE numbers over 4 different seeds. Values in parenthesis (i.e., $(\\cdot)$ ) denote standard deviation values.\n", "images": [], "dimensions": {"dpi": 200, "height": 2200, "width": 1700}}], "model": "mistral-ocr-2503-completion", "usage_info": {"pages_processed": 29, "doc_size_bytes": null}} libraries_list_v1: speakeasy-default-libraries-list-v1: responses: @@ -1371,7 +1478,7 @@ examples: path: library_id: "36de3a24-5b1c-4c8f-9d84-d5642205a976" requestBody: - application/json: {"org_id": "aadd9ae1-f285-4437-884a-091c77efa6fd", "level": "Viewer", "share_with_uuid": "0ae92ecb-21ed-47c5-9f7e-0b2cbe325a20", "share_with_type": "User"} + application/json: {"level": "Viewer", "share_with_uuid": "0ae92ecb-21ed-47c5-9f7e-0b2cbe325a20", "share_with_type": "User"} responses: "200": application/json: {"library_id": "45b3a5b2-8b81-4453-9130-ded7f1e5a366", "org_id": "0fa6e542-f04b-431e-a1be-76a9a92b0e68", "role": "", "share_with_type": "", "share_with_uuid": "cdbcc0c5-e577-4880-8ed3-f919421d4fc5"} @@ -1383,7 +1490,7 @@ examples: path: library_id: "709e3cad-9fb2-4f4e-bf88-143cf1808107" requestBody: - application/json: {"org_id": "0814a235-c2d0-4814-875a-4b85f93d3dc7", "share_with_uuid": "b843cc47-ce8f-4354-8cfc-5fcd7fb2865b", "share_with_type": "User"} + application/json: {"share_with_uuid": "b843cc47-ce8f-4354-8cfc-5fcd7fb2865b", "share_with_type": "User"} responses: "200": application/json: {"library_id": "7f9c6af4-e362-4cf1-9363-0409d51c2dfa", "org_id": "6b2cac3a-b29c-4d8f-bebb-0db06ec1bf97", "role": "", "share_with_type": "", "share_with_uuid": "618c78f1-41ca-45c3-8ef2-7d78898c7061"} @@ -1396,9 +1503,32 @@ examples: responses: "200": application/json: {"model": "Beetle", "text": "", "usage": {"prompt_tokens": 0, "completion_tokens": 0, "total_tokens": 0}, "language": ""} + userExample: + requestBody: + multipart/form-data: {"model": "voxtral-mini-latest", "stream": false} + responses: + "200": + application/json: {"model": "voxtral-mini-2507", "text": "This week, I traveled to Chicago to deliver my final farewell address to the nation, following in the tradition of presidents before me. It was an opportunity to say thank you. Whether we've seen eye to eye or rarely agreed at all, my conversations with you, the American people, in living rooms, in schools, at farms and on factory floors, at diners and on distant military outposts, All these conversations are what have kept me honest, kept me inspired, and kept me going. Every day, I learned from you. You made me a better President, and you made me a better man.\nOver the course of these eight years, I've seen the goodness, the resilience, and the hope of the American people. I've seen neighbors looking out for each other as we rescued our economy from the worst crisis of our lifetimes. I've hugged cancer survivors who finally know the security of affordable health care. I've seen communities like Joplin rebuild from disaster, and cities like Boston show the world that no terrorist will ever break the American spirit. I've seen the hopeful faces of young graduates and our newest military officers. I've mourned with grieving families searching for answers. And I found grace in a Charleston church. I've seen our scientists help a paralyzed man regain his sense of touch, and our wounded warriors walk again. I've seen our doctors and volunteers rebuild after earthquakes and stop pandemics in their tracks. I've learned from students who are building robots and curing diseases, and who will change the world in ways we can't even imagine. I've seen the youngest of children remind us of our obligations to care for our refugees, to work in peace, and above all, to look out for each other.\nThat's what's possible when we come together in the slow, hard, sometimes frustrating, but always vital work of self-government. But we can't take our democracy for granted. All of us, regardless of party, should throw ourselves into the work of citizenship. Not just when there is an election. Not just when our own narrow interest is at stake. But over the full span of a lifetime. If you're tired of arguing with strangers on the Internet, try to talk with one in real life. If something needs fixing, lace up your shoes and do some organizing. If you're disappointed by your elected officials, then grab a clipboard, get some signatures, and run for office yourself.\nOur success depends on our participation, regardless of which way the pendulum of power swings. It falls on each of us to be guardians of our democracy, to embrace the joyous task we've been given to continually try to improve this great nation of ours. Because for all our outward differences, we all share the same proud title – citizen.\nIt has been the honor of my life to serve you as President. Eight years later, I am even more optimistic about our country's promise. And I look forward to working along your side as a citizen for all my days that remain.\nThanks, everybody. God bless you. And God bless the United States of America.\n", "segments": [], "usage": {"prompt_tokens": 4, "completion_tokens": 635, "total_tokens": 3264, "prompt_audio_seconds": 203}, "language": "en"} audio_api_v1_transcriptions_post_stream: speakeasy-default-audio-api-v1-transcriptions-post-stream: requestBody: multipart/form-data: {"model": "Camry", "stream": true} + agents_api_v1_conversations_delete: + speakeasy-default-agents-api-v1-conversations-delete: + parameters: + path: + conversation_id: "" + responses: + "422": + application/json: {} + agents_api_v1_agents_delete: + speakeasy-default-agents-api-v1-agents-delete: + parameters: + path: + agent_id: "" + responses: + "422": + application/json: {} examplesVersion: 1.0.2 generatedTests: {} +releaseNotes: "## SDK Changes Detected:\n* `mistral.beta.libraries.create()`: `response.owner_id` **Changed** **Breaking** :warning:\n* `mistral.beta.libraries.documents.get()`: `response` **Changed** **Breaking** :warning:\n* `mistral.models.list()`: \n * `response.data.[].[base].capabilities` **Changed**\n * `error.status[422]` **Removed** **Breaking** :warning:\n* `mistral.files.list()`: \n * `request.include_total` **Added**\n * `response.total` **Changed** **Breaking** :warning:\n* `mistral.beta.conversations.start()`: \n * `request` **Changed** **Breaking** :warning:\n * `response.outputs.[].[tool_execution_entry].name` **Changed** **Breaking** :warning:\n* `mistral.beta.libraries.accesses.delete()`: \n * `request.org_id` **Changed**\n * `response.share_with_uuid` **Changed** **Breaking** :warning:\n* `mistral.beta.libraries.accesses.update_or_create()`: \n * `request.org_id` **Changed**\n * `response.share_with_uuid` **Changed** **Breaking** :warning:\n* `mistral.beta.conversations.append()`: \n * `request.inputs.[array].[].[tool_execution_entry].name` **Changed** **Breaking** :warning:\n * `response.outputs.[].[tool_execution_entry].name` **Changed** **Breaking** :warning:\n* `mistral.beta.libraries.accesses.list()`: `response.data.[].share_with_uuid` **Changed** **Breaking** :warning:\n* `mistral.beta.conversations.restart()`: \n * `request` **Changed** **Breaking** :warning:\n * `response.outputs.[].[tool_execution_entry].name` **Changed** **Breaking** :warning:\n* `mistral.beta.libraries.documents.update()`: \n * `request.attributes` **Added**\n * `response` **Changed** **Breaking** :warning:\n* `mistral.beta.libraries.documents.upload()`: `response` **Changed** **Breaking** :warning:\n* `mistral.beta.libraries.documents.list()`: \n * `request.filters_attributes` **Added**\n * `response.data.[]` **Changed** **Breaking** :warning:\n* `mistral.beta.libraries.update()`: `response.owner_id` **Changed** **Breaking** :warning:\n* `mistral.beta.libraries.delete()`: `response.owner_id` **Changed** **Breaking** :warning:\n* `mistral.beta.libraries.get()`: `response.owner_id` **Changed** **Breaking** :warning:\n* `mistral.beta.conversations.get_history()`: `response.entries.[].[tool_execution_entry].name` **Changed** **Breaking** :warning:\n* `mistral.beta.libraries.list()`: `response.data.[].owner_id` **Changed** **Breaking** :warning:\n* `mistral.models.retrieve()`: `response.[base].capabilities` **Changed**\n* `mistral.agents.complete()`: `request.metadata` **Added**\n* `mistral.beta.agents.get()`: \n * `request.agent_version` **Added**\n * `response` **Changed**\n* `mistral.beta.agents.list()`: \n * `request` **Changed**\n * `response.[]` **Changed**\n* `mistral.beta.agents.update_version()`: `response` **Changed**\n* `mistral.beta.agents.delete()`: **Added**\n* `mistral.beta.conversations.list()`: \n * `request.metadata` **Added**\n * `response.[]` **Changed**\n* `mistral.beta.conversations.get()`: `response` **Changed**\n* `mistral.beta.agents.update()`: \n * `request` **Changed**\n * `response` **Changed**\n* `mistral.beta.conversations.delete()`: **Added**\n* `mistral.chat.complete()`: `request.metadata` **Added**\n* `mistral.fim.complete()`: `request.metadata` **Added**\n* `mistral.beta.agents.create()`: \n * `request.metadata` **Added**\n * `response` **Changed**\n* `mistral.ocr.process()`: \n * `request` **Changed**\n * `response.pages.[]` **Changed**\n" diff --git a/.speakeasy/gen.yaml b/.speakeasy/gen.yaml index 116b0e26..f206b927 100644 --- a/.speakeasy/gen.yaml +++ b/.speakeasy/gen.yaml @@ -21,7 +21,7 @@ generation: generateNewTests: false skipResponseBodyAssertions: false python: - version: 1.9.11 + version: 1.10.0 additionalDependencies: dev: pytest: ^8.2.2 diff --git a/.speakeasy/workflow.lock b/.speakeasy/workflow.lock index f4582991..48c4bf7b 100644 --- a/.speakeasy/workflow.lock +++ b/.speakeasy/workflow.lock @@ -14,11 +14,11 @@ sources: - latest mistral-openapi: sourceNamespace: mistral-openapi - sourceRevisionDigest: sha256:eefc1f0b6a5e9ec673d317d61cad766290710b5fc369412491b75f732cccfedd - sourceBlobDigest: sha256:97767522559603de92a9738938e522cea4d558b2a854500acf6fe8d81f8ccfb8 + sourceRevisionDigest: sha256:cb63bd997cefe7b3b36e91a475df57cb779bf79f183340e0713d8ffb16a2dabc + sourceBlobDigest: sha256:f0caa06fb9bcadc35b097aa5ff69bb5020937652df311722b5e44a282bd95d6d tags: - latest - - speakeasy-sdk-regen-1759420102 + - speakeasy-sdk-regen-1765914268 targets: mistralai-azure-sdk: source: mistral-azure-source @@ -37,10 +37,10 @@ targets: mistralai-sdk: source: mistral-openapi sourceNamespace: mistral-openapi - sourceRevisionDigest: sha256:eefc1f0b6a5e9ec673d317d61cad766290710b5fc369412491b75f732cccfedd - sourceBlobDigest: sha256:97767522559603de92a9738938e522cea4d558b2a854500acf6fe8d81f8ccfb8 + sourceRevisionDigest: sha256:cb63bd997cefe7b3b36e91a475df57cb779bf79f183340e0713d8ffb16a2dabc + sourceBlobDigest: sha256:f0caa06fb9bcadc35b097aa5ff69bb5020937652df311722b5e44a282bd95d6d codeSamplesNamespace: mistral-openapi-code-samples - codeSamplesRevisionDigest: sha256:8ed158c9c1ed8252f86b620219dd93e9650b45e7c6403cda7fdd9b4ee0d17dac + codeSamplesRevisionDigest: sha256:b1eacff97275a14ab0c2143e07bdfa4f4bd58f5370b2f106bcc6ada92b754d08 workflow: workflowVersion: 1.0.0 speakeasyVersion: 1.606.10 diff --git a/README.md b/README.md index 65b0f7b0..1bc889c6 100644 --- a/README.md +++ b/README.md @@ -145,12 +145,14 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.chat.complete(model="mistral-small-latest", messages=[ + res = mistral.chat.complete(model="mistral-large-latest", messages=[ { "content": "Who is the best French painter? Answer in one short sentence.", "role": "user", }, - ], stream=False) + ], stream=False, response_format={ + "type": "text", + }) # Handle response print(res) @@ -171,12 +173,14 @@ async def main(): api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = await mistral.chat.complete_async(model="mistral-small-latest", messages=[ + res = await mistral.chat.complete_async(model="mistral-large-latest", messages=[ { "content": "Who is the best French painter? Answer in one short sentence.", "role": "user", }, - ], stream=False) + ], stream=False, response_format={ + "type": "text", + }) # Handle response print(res) @@ -252,7 +256,9 @@ with Mistral( "content": "Who is the best French painter? Answer in one short sentence.", "role": "user", }, - ], agent_id="", stream=False) + ], agent_id="", stream=False, response_format={ + "type": "text", + }) # Handle response print(res) @@ -278,7 +284,9 @@ async def main(): "content": "Who is the best French painter? Answer in one short sentence.", "role": "user", }, - ], agent_id="", stream=False) + ], agent_id="", stream=False, response_format={ + "type": "text", + }) # Handle response print(res) @@ -448,7 +456,7 @@ The documentation for the GCP SDK is available [here](packages/mistralai_gcp/REA #### [audio.transcriptions](docs/sdks/transcriptions/README.md) * [complete](docs/sdks/transcriptions/README.md#complete) - Create Transcription -* [stream](docs/sdks/transcriptions/README.md#stream) - Create streaming transcription (SSE) +* [stream](docs/sdks/transcriptions/README.md#stream) - Create Streaming Transcription (SSE) ### [batch](docs/sdks/batch/README.md) @@ -469,6 +477,7 @@ The documentation for the GCP SDK is available [here](packages/mistralai_gcp/REA * [list](docs/sdks/mistralagents/README.md#list) - List agent entities. * [get](docs/sdks/mistralagents/README.md#get) - Retrieve an agent entity. * [update](docs/sdks/mistralagents/README.md#update) - Update an agent entity. +* [delete](docs/sdks/mistralagents/README.md#delete) - Delete an agent entity. * [update_version](docs/sdks/mistralagents/README.md#update_version) - Update an agent version. #### [beta.conversations](docs/sdks/conversations/README.md) @@ -476,6 +485,7 @@ The documentation for the GCP SDK is available [here](packages/mistralai_gcp/REA * [start](docs/sdks/conversations/README.md#start) - Create a conversation and append entries to it. * [list](docs/sdks/conversations/README.md#list) - List all created conversations. * [get](docs/sdks/conversations/README.md#get) - Retrieve a conversation information. +* [delete](docs/sdks/conversations/README.md#delete) - Delete a conversation. * [append](docs/sdks/conversations/README.md#append) - Append new entries to an existing conversation. * [get_history](docs/sdks/conversations/README.md#get_history) - Retrieve all entries in a conversation. * [get_messages](docs/sdks/conversations/README.md#get_messages) - Retrieve all messages in a conversation. @@ -500,7 +510,7 @@ The documentation for the GCP SDK is available [here](packages/mistralai_gcp/REA #### [beta.libraries.documents](docs/sdks/documents/README.md) -* [list](docs/sdks/documents/README.md#list) - List document in a given library. +* [list](docs/sdks/documents/README.md#list) - List documents in a given library. * [upload](docs/sdks/documents/README.md#upload) - Upload a new document. * [get](docs/sdks/documents/README.md#get) - Retrieve the metadata of a specific document. * [update](docs/sdks/documents/README.md#update) - Update the metadata of a specific document. @@ -597,7 +607,11 @@ with Mistral( "tool_call_id": "", "result": "", }, - ], stream=True) + ], stream=True, completion_args={ + "response_format": { + "type": "text", + }, + }) with res as event_stream: for event in event_stream: @@ -712,7 +726,7 @@ with Mistral( res = None try: - res = mistral.models.list() + res = mistral.models.retrieve(model_id="ft:open-mistral-7b:587a6b29:20240514:7e773925") # Handle response print(res) @@ -746,7 +760,7 @@ with Mistral( **Inherit from [`MistralError`](./src/mistralai/models/mistralerror.py)**: -* [`HTTPValidationError`](./src/mistralai/models/httpvalidationerror.py): Validation Error. Status code `422`. Applicable to 47 of 68 methods.* +* [`HTTPValidationError`](./src/mistralai/models/httpvalidationerror.py): Validation Error. Status code `422`. Applicable to 48 of 70 methods.* * [`ResponseValidationError`](./src/mistralai/models/responsevalidationerror.py): Type mismatch between the response data and the expected Pydantic model. Provides access to the Pydantic validation error via the `cause` attribute. diff --git a/RELEASES.md b/RELEASES.md index b65d9d0c..e43d3f33 100644 --- a/RELEASES.md +++ b/RELEASES.md @@ -328,4 +328,14 @@ Based on: ### Generated - [python v1.9.11] . ### Releases -- [PyPI v1.9.11] https://pypi.org/project/mistralai/1.9.11 - . \ No newline at end of file +- [PyPI v1.9.11] https://pypi.org/project/mistralai/1.9.11 - . + +## 2025-12-16 19:44:09 +### Changes +Based on: +- OpenAPI Doc +- Speakeasy CLI 1.606.10 (2.687.13) https://github.com/speakeasy-api/speakeasy +### Generated +- [python v1.10.0] . +### Releases +- [PyPI v1.10.0] https://pypi.org/project/mistralai/1.10.0 - . \ No newline at end of file diff --git a/USAGE.md b/USAGE.md index b15a88aa..b230b016 100644 --- a/USAGE.md +++ b/USAGE.md @@ -13,12 +13,14 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.chat.complete(model="mistral-small-latest", messages=[ + res = mistral.chat.complete(model="mistral-large-latest", messages=[ { "content": "Who is the best French painter? Answer in one short sentence.", "role": "user", }, - ], stream=False) + ], stream=False, response_format={ + "type": "text", + }) # Handle response print(res) @@ -39,12 +41,14 @@ async def main(): api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = await mistral.chat.complete_async(model="mistral-small-latest", messages=[ + res = await mistral.chat.complete_async(model="mistral-large-latest", messages=[ { "content": "Who is the best French painter? Answer in one short sentence.", "role": "user", }, - ], stream=False) + ], stream=False, response_format={ + "type": "text", + }) # Handle response print(res) @@ -120,7 +124,9 @@ with Mistral( "content": "Who is the best French painter? Answer in one short sentence.", "role": "user", }, - ], agent_id="", stream=False) + ], agent_id="", stream=False, response_format={ + "type": "text", + }) # Handle response print(res) @@ -146,7 +152,9 @@ async def main(): "content": "Who is the best French painter? Answer in one short sentence.", "role": "user", }, - ], agent_id="", stream=False) + ], agent_id="", stream=False, response_format={ + "type": "text", + }) # Handle response print(res) diff --git a/docs/models/agent.md b/docs/models/agent.md index 686fae75..ee054dd3 100644 --- a/docs/models/agent.md +++ b/docs/models/agent.md @@ -12,8 +12,12 @@ | `name` | *str* | :heavy_check_mark: | N/A | | `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | `handoffs` | List[*str*] | :heavy_minus_sign: | N/A | +| `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | | `object` | [Optional[models.AgentObject]](../models/agentobject.md) | :heavy_minus_sign: | N/A | | `id` | *str* | :heavy_check_mark: | N/A | | `version` | *int* | :heavy_check_mark: | N/A | +| `versions` | List[*int*] | :heavy_check_mark: | N/A | | `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_check_mark: | N/A | -| `updated_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_check_mark: | N/A | \ No newline at end of file +| `updated_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_check_mark: | N/A | +| `deployment_chat` | *bool* | :heavy_check_mark: | N/A | +| `source` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/agentconversation.md b/docs/models/agentconversation.md index 772cc80e..92fd673c 100644 --- a/docs/models/agentconversation.md +++ b/docs/models/agentconversation.md @@ -7,8 +7,10 @@ | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | | `name` | *OptionalNullable[str]* | :heavy_minus_sign: | Name given to the conversation. | | `description` | *OptionalNullable[str]* | :heavy_minus_sign: | Description of the what the conversation is about. | +| `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | Custom metadata for the conversation. | | `object` | [Optional[models.AgentConversationObject]](../models/agentconversationobject.md) | :heavy_minus_sign: | N/A | | `id` | *str* | :heavy_check_mark: | N/A | | `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_check_mark: | N/A | | `updated_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_check_mark: | N/A | -| `agent_id` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file +| `agent_id` | *str* | :heavy_check_mark: | N/A | +| `agent_version` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/agentcreationrequest.md b/docs/models/agentcreationrequest.md index 34060d9a..afc27d3b 100644 --- a/docs/models/agentcreationrequest.md +++ b/docs/models/agentcreationrequest.md @@ -11,4 +11,5 @@ | `model` | *str* | :heavy_check_mark: | N/A | | `name` | *str* | :heavy_check_mark: | N/A | | `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `handoffs` | List[*str*] | :heavy_minus_sign: | N/A | \ No newline at end of file +| `handoffs` | List[*str*] | :heavy_minus_sign: | N/A | +| `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/agentsapiv1agentsdeleterequest.md b/docs/models/agentsapiv1agentsdeleterequest.md new file mode 100644 index 00000000..2799f418 --- /dev/null +++ b/docs/models/agentsapiv1agentsdeleterequest.md @@ -0,0 +1,8 @@ +# AgentsAPIV1AgentsDeleteRequest + + +## Fields + +| Field | Type | Required | Description | +| ------------------ | ------------------ | ------------------ | ------------------ | +| `agent_id` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/agentsapiv1agentsgetrequest.md b/docs/models/agentsapiv1agentsgetrequest.md index b46ac23d..825e03a0 100644 --- a/docs/models/agentsapiv1agentsgetrequest.md +++ b/docs/models/agentsapiv1agentsgetrequest.md @@ -3,6 +3,7 @@ ## Fields -| Field | Type | Required | Description | -| ------------------ | ------------------ | ------------------ | ------------------ | -| `agent_id` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| ----------------------- | ----------------------- | ----------------------- | ----------------------- | +| `agent_id` | *str* | :heavy_check_mark: | N/A | +| `agent_version` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/agentsapiv1agentslistrequest.md b/docs/models/agentsapiv1agentslistrequest.md index b5bcee62..c4f05b5c 100644 --- a/docs/models/agentsapiv1agentslistrequest.md +++ b/docs/models/agentsapiv1agentslistrequest.md @@ -3,7 +3,12 @@ ## Fields -| Field | Type | Required | Description | -| ------------------ | ------------------ | ------------------ | ------------------ | -| `page` | *Optional[int]* | :heavy_minus_sign: | N/A | -| `page_size` | *Optional[int]* | :heavy_minus_sign: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| -------------------------------------------------------- | -------------------------------------------------------- | -------------------------------------------------------- | -------------------------------------------------------- | +| `page` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `page_size` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `deployment_chat` | *OptionalNullable[bool]* | :heavy_minus_sign: | N/A | +| `sources` | List[[models.RequestSource](../models/requestsource.md)] | :heavy_minus_sign: | N/A | +| `name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `id` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/agentsapiv1conversationsdeleterequest.md b/docs/models/agentsapiv1conversationsdeleterequest.md new file mode 100644 index 00000000..c6eed281 --- /dev/null +++ b/docs/models/agentsapiv1conversationsdeleterequest.md @@ -0,0 +1,8 @@ +# AgentsAPIV1ConversationsDeleteRequest + + +## Fields + +| Field | Type | Required | Description | +| ----------------------------------------------------------- | ----------------------------------------------------------- | ----------------------------------------------------------- | ----------------------------------------------------------- | +| `conversation_id` | *str* | :heavy_check_mark: | ID of the conversation from which we are fetching metadata. | \ No newline at end of file diff --git a/docs/models/agentsapiv1conversationslistrequest.md b/docs/models/agentsapiv1conversationslistrequest.md index 528a055a..62c9011f 100644 --- a/docs/models/agentsapiv1conversationslistrequest.md +++ b/docs/models/agentsapiv1conversationslistrequest.md @@ -6,4 +6,5 @@ | Field | Type | Required | Description | | ------------------ | ------------------ | ------------------ | ------------------ | | `page` | *Optional[int]* | :heavy_minus_sign: | N/A | -| `page_size` | *Optional[int]* | :heavy_minus_sign: | N/A | \ No newline at end of file +| `page_size` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/agentscompletionrequest.md b/docs/models/agentscompletionrequest.md index 73615ed9..2a0c4144 100644 --- a/docs/models/agentscompletionrequest.md +++ b/docs/models/agentscompletionrequest.md @@ -3,20 +3,21 @@ ## Fields -| Field | Type | Required | Description | Example | -| ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | -| `stream` | *Optional[bool]* | :heavy_minus_sign: | Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. | | -| `stop` | [Optional[models.AgentsCompletionRequestStop]](../models/agentscompletionrequeststop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | -| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | -| `messages` | List[[models.AgentsCompletionRequestMessages](../models/agentscompletionrequestmessages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | -| `response_format` | [Optional[models.ResponseFormat]](../models/responseformat.md) | :heavy_minus_sign: | N/A | | -| `tools` | List[[models.Tool](../models/tool.md)] | :heavy_minus_sign: | N/A | | -| `tool_choice` | [Optional[models.AgentsCompletionRequestToolChoice]](../models/agentscompletionrequesttoolchoice.md) | :heavy_minus_sign: | N/A | | -| `presence_penalty` | *Optional[float]* | :heavy_minus_sign: | presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. | | -| `frequency_penalty` | *Optional[float]* | :heavy_minus_sign: | frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. | | -| `n` | *OptionalNullable[int]* | :heavy_minus_sign: | Number of completions to return for each request, input tokens are only billed once. | | -| `prediction` | [Optional[models.Prediction]](../models/prediction.md) | :heavy_minus_sign: | N/A | | -| `parallel_tool_calls` | *Optional[bool]* | :heavy_minus_sign: | N/A | | -| `prompt_mode` | [OptionalNullable[models.MistralPromptMode]](../models/mistralpromptmode.md) | :heavy_minus_sign: | Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. | | -| `agent_id` | *str* | :heavy_check_mark: | The ID of the agent to use for this completion. | | \ No newline at end of file +| Field | Type | Required | Description | Example | +| --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | +| `stream` | *Optional[bool]* | :heavy_minus_sign: | Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. | | +| `stop` | [Optional[models.AgentsCompletionRequestStop]](../models/agentscompletionrequeststop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | +| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | +| `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | | +| `messages` | List[[models.AgentsCompletionRequestMessages](../models/agentscompletionrequestmessages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | +| `response_format` | [Optional[models.ResponseFormat]](../models/responseformat.md) | :heavy_minus_sign: | Specify the format that the model must output. By default it will use `{ "type": "text" }`. Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ "type": "json_schema" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide. | {
"type": "text"
} | +| `tools` | List[[models.Tool](../models/tool.md)] | :heavy_minus_sign: | N/A | | +| `tool_choice` | [Optional[models.AgentsCompletionRequestToolChoice]](../models/agentscompletionrequesttoolchoice.md) | :heavy_minus_sign: | N/A | | +| `presence_penalty` | *Optional[float]* | :heavy_minus_sign: | The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. | | +| `frequency_penalty` | *Optional[float]* | :heavy_minus_sign: | The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. | | +| `n` | *OptionalNullable[int]* | :heavy_minus_sign: | Number of completions to return for each request, input tokens are only billed once. | | +| `prediction` | [Optional[models.Prediction]](../models/prediction.md) | :heavy_minus_sign: | Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content. | | +| `parallel_tool_calls` | *Optional[bool]* | :heavy_minus_sign: | N/A | | +| `prompt_mode` | [OptionalNullable[models.MistralPromptMode]](../models/mistralpromptmode.md) | :heavy_minus_sign: | Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. | | +| `agent_id` | *str* | :heavy_check_mark: | The ID of the agent to use for this completion. | | \ No newline at end of file diff --git a/docs/models/agentscompletionstreamrequest.md b/docs/models/agentscompletionstreamrequest.md index b0aac6c1..b2ccd4e8 100644 --- a/docs/models/agentscompletionstreamrequest.md +++ b/docs/models/agentscompletionstreamrequest.md @@ -3,20 +3,21 @@ ## Fields -| Field | Type | Required | Description | Example | -| --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | -| `stream` | *Optional[bool]* | :heavy_minus_sign: | N/A | | -| `stop` | [Optional[models.AgentsCompletionStreamRequestStop]](../models/agentscompletionstreamrequeststop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | -| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | -| `messages` | List[[models.AgentsCompletionStreamRequestMessages](../models/agentscompletionstreamrequestmessages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | -| `response_format` | [Optional[models.ResponseFormat]](../models/responseformat.md) | :heavy_minus_sign: | N/A | | -| `tools` | List[[models.Tool](../models/tool.md)] | :heavy_minus_sign: | N/A | | -| `tool_choice` | [Optional[models.AgentsCompletionStreamRequestToolChoice]](../models/agentscompletionstreamrequesttoolchoice.md) | :heavy_minus_sign: | N/A | | -| `presence_penalty` | *Optional[float]* | :heavy_minus_sign: | presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. | | -| `frequency_penalty` | *Optional[float]* | :heavy_minus_sign: | frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. | | -| `n` | *OptionalNullable[int]* | :heavy_minus_sign: | Number of completions to return for each request, input tokens are only billed once. | | -| `prediction` | [Optional[models.Prediction]](../models/prediction.md) | :heavy_minus_sign: | N/A | | -| `parallel_tool_calls` | *Optional[bool]* | :heavy_minus_sign: | N/A | | -| `prompt_mode` | [OptionalNullable[models.MistralPromptMode]](../models/mistralpromptmode.md) | :heavy_minus_sign: | Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. | | -| `agent_id` | *str* | :heavy_check_mark: | The ID of the agent to use for this completion. | | \ No newline at end of file +| Field | Type | Required | Description | Example | +| --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | +| `stream` | *Optional[bool]* | :heavy_minus_sign: | N/A | | +| `stop` | [Optional[models.AgentsCompletionStreamRequestStop]](../models/agentscompletionstreamrequeststop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | +| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | +| `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | | +| `messages` | List[[models.AgentsCompletionStreamRequestMessages](../models/agentscompletionstreamrequestmessages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | +| `response_format` | [Optional[models.ResponseFormat]](../models/responseformat.md) | :heavy_minus_sign: | Specify the format that the model must output. By default it will use `{ "type": "text" }`. Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ "type": "json_schema" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide. | {
"type": "text"
} | +| `tools` | List[[models.Tool](../models/tool.md)] | :heavy_minus_sign: | N/A | | +| `tool_choice` | [Optional[models.AgentsCompletionStreamRequestToolChoice]](../models/agentscompletionstreamrequesttoolchoice.md) | :heavy_minus_sign: | N/A | | +| `presence_penalty` | *Optional[float]* | :heavy_minus_sign: | The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. | | +| `frequency_penalty` | *Optional[float]* | :heavy_minus_sign: | The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. | | +| `n` | *OptionalNullable[int]* | :heavy_minus_sign: | Number of completions to return for each request, input tokens are only billed once. | | +| `prediction` | [Optional[models.Prediction]](../models/prediction.md) | :heavy_minus_sign: | Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content. | | +| `parallel_tool_calls` | *Optional[bool]* | :heavy_minus_sign: | N/A | | +| `prompt_mode` | [OptionalNullable[models.MistralPromptMode]](../models/mistralpromptmode.md) | :heavy_minus_sign: | Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. | | +| `agent_id` | *str* | :heavy_check_mark: | The ID of the agent to use for this completion. | | \ No newline at end of file diff --git a/docs/models/agentupdaterequest.md b/docs/models/agentupdaterequest.md index 9da03d03..641d1e40 100644 --- a/docs/models/agentupdaterequest.md +++ b/docs/models/agentupdaterequest.md @@ -11,4 +11,6 @@ | `model` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | `name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `handoffs` | List[*str*] | :heavy_minus_sign: | N/A | \ No newline at end of file +| `handoffs` | List[*str*] | :heavy_minus_sign: | N/A | +| `deployment_chat` | *OptionalNullable[bool]* | :heavy_minus_sign: | N/A | +| `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/attributes.md b/docs/models/attributes.md new file mode 100644 index 00000000..147708d9 --- /dev/null +++ b/docs/models/attributes.md @@ -0,0 +1,59 @@ +# Attributes + + +## Supported Types + +### `bool` + +```python +value: bool = /* values here */ +``` + +### `str` + +```python +value: str = /* values here */ +``` + +### `int` + +```python +value: int = /* values here */ +``` + +### `float` + +```python +value: float = /* values here */ +``` + +### `datetime` + +```python +value: datetime = /* values here */ +``` + +### `List[str]` + +```python +value: List[str] = /* values here */ +``` + +### `List[int]` + +```python +value: List[int] = /* values here */ +``` + +### `List[float]` + +```python +value: List[float] = /* values here */ +``` + +### `List[bool]` + +```python +value: List[bool] = /* values here */ +``` + diff --git a/docs/models/audiotranscriptionrequest.md b/docs/models/audiotranscriptionrequest.md index e876de18..f2e17dd3 100644 --- a/docs/models/audiotranscriptionrequest.md +++ b/docs/models/audiotranscriptionrequest.md @@ -3,13 +3,13 @@ ## Fields -| Field | Type | Required | Description | -| ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | -| `model` | *str* | :heavy_check_mark: | N/A | -| `file` | [Optional[models.File]](../models/file.md) | :heavy_minus_sign: | N/A | -| `file_url` | *OptionalNullable[str]* | :heavy_minus_sign: | Url of a file to be transcribed | -| `file_id` | *OptionalNullable[str]* | :heavy_minus_sign: | ID of a file uploaded to /v1/files | -| `language` | *OptionalNullable[str]* | :heavy_minus_sign: | Language of the audio, e.g. 'en'. Providing the language can boost accuracy. | -| `temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | N/A | -| `stream` | *Optional[Literal[False]]* | :heavy_minus_sign: | N/A | -| `timestamp_granularities` | List[[models.TimestampGranularity](../models/timestampgranularity.md)] | :heavy_minus_sign: | Granularities of timestamps to include in the response. | \ No newline at end of file +| Field | Type | Required | Description | Example | +| ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | +| `model` | *str* | :heavy_check_mark: | ID of the model to be used. | voxtral-mini-latest | +| `file` | [Optional[models.File]](../models/file.md) | :heavy_minus_sign: | N/A | | +| `file_url` | *OptionalNullable[str]* | :heavy_minus_sign: | Url of a file to be transcribed | | +| `file_id` | *OptionalNullable[str]* | :heavy_minus_sign: | ID of a file uploaded to /v1/files | | +| `language` | *OptionalNullable[str]* | :heavy_minus_sign: | Language of the audio, e.g. 'en'. Providing the language can boost accuracy. | | +| `temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | N/A | | +| `stream` | *Optional[Literal[False]]* | :heavy_minus_sign: | N/A | | +| `timestamp_granularities` | List[[models.TimestampGranularity](../models/timestampgranularity.md)] | :heavy_minus_sign: | Granularities of timestamps to include in the response. | | \ No newline at end of file diff --git a/docs/models/batchjobin.md b/docs/models/batchjobin.md index b5b13786..6fd06696 100644 --- a/docs/models/batchjobin.md +++ b/docs/models/batchjobin.md @@ -3,11 +3,11 @@ ## Fields -| Field | Type | Required | Description | -| ---------------------------------------------- | ---------------------------------------------- | ---------------------------------------------- | ---------------------------------------------- | -| `input_files` | List[*str*] | :heavy_check_mark: | N/A | -| `endpoint` | [models.APIEndpoint](../models/apiendpoint.md) | :heavy_check_mark: | N/A | -| `model` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `agent_id` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `metadata` | Dict[str, *str*] | :heavy_minus_sign: | N/A | -| `timeout_hours` | *Optional[int]* | :heavy_minus_sign: | N/A | \ No newline at end of file +| Field | Type | Required | Description | Example | +| ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `input_files` | List[*str*] | :heavy_check_mark: | The list of input files to be used for batch inference, these files should be `jsonl` files, containing the input data corresponding to the bory request for the batch inference in a "body" field. An example of such file is the following: ```json {"custom_id": "0", "body": {"max_tokens": 100, "messages": [{"role": "user", "content": "What is the best French cheese?"}]}} {"custom_id": "1", "body": {"max_tokens": 100, "messages": [{"role": "user", "content": "What is the best French wine?"}]}} ``` | | +| `endpoint` | [models.APIEndpoint](../models/apiendpoint.md) | :heavy_check_mark: | N/A | | +| `model` | *OptionalNullable[str]* | :heavy_minus_sign: | The model to be used for batch inference. | mistral-small-latest | +| `agent_id` | *OptionalNullable[str]* | :heavy_minus_sign: | In case you want to use a specific agent from the **deprecated** agents api for batch inference, you can specify the agent ID here. | | +| `metadata` | Dict[str, *str*] | :heavy_minus_sign: | The metadata of your choice to be associated with the batch inference job. | | +| `timeout_hours` | *Optional[int]* | :heavy_minus_sign: | The timeout in hours for the batch inference job. | | \ No newline at end of file diff --git a/docs/models/chatcompletionrequest.md b/docs/models/chatcompletionrequest.md index a9806a4d..109fa7b1 100644 --- a/docs/models/chatcompletionrequest.md +++ b/docs/models/chatcompletionrequest.md @@ -3,23 +3,24 @@ ## Fields -| Field | Type | Required | Description | Example | -| ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `model` | *str* | :heavy_check_mark: | ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions. | mistral-small-latest | -| `temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. | | -| `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | -| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | -| `stream` | *Optional[bool]* | :heavy_minus_sign: | Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. | | -| `stop` | [Optional[models.Stop]](../models/stop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | -| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | -| `messages` | List[[models.Messages](../models/messages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | -| `response_format` | [Optional[models.ResponseFormat]](../models/responseformat.md) | :heavy_minus_sign: | N/A | | -| `tools` | List[[models.Tool](../models/tool.md)] | :heavy_minus_sign: | N/A | | -| `tool_choice` | [Optional[models.ChatCompletionRequestToolChoice]](../models/chatcompletionrequesttoolchoice.md) | :heavy_minus_sign: | N/A | | -| `presence_penalty` | *Optional[float]* | :heavy_minus_sign: | presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. | | -| `frequency_penalty` | *Optional[float]* | :heavy_minus_sign: | frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. | | -| `n` | *OptionalNullable[int]* | :heavy_minus_sign: | Number of completions to return for each request, input tokens are only billed once. | | -| `prediction` | [Optional[models.Prediction]](../models/prediction.md) | :heavy_minus_sign: | N/A | | -| `parallel_tool_calls` | *Optional[bool]* | :heavy_minus_sign: | N/A | | -| `prompt_mode` | [OptionalNullable[models.MistralPromptMode]](../models/mistralpromptmode.md) | :heavy_minus_sign: | Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. | | -| `safe_prompt` | *Optional[bool]* | :heavy_minus_sign: | Whether to inject a safety prompt before all conversations. | | \ No newline at end of file +| Field | Type | Required | Description | Example | +| --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `model` | *str* | :heavy_check_mark: | ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions. | mistral-large-latest | +| `temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. | | +| `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | +| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | +| `stream` | *Optional[bool]* | :heavy_minus_sign: | Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. | | +| `stop` | [Optional[models.Stop]](../models/stop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | +| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | +| `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | | +| `messages` | List[[models.Messages](../models/messages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | +| `response_format` | [Optional[models.ResponseFormat]](../models/responseformat.md) | :heavy_minus_sign: | Specify the format that the model must output. By default it will use `{ "type": "text" }`. Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ "type": "json_schema" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide. | {
"type": "text"
} | +| `tools` | List[[models.Tool](../models/tool.md)] | :heavy_minus_sign: | A list of tools the model may call. Use this to provide a list of functions the model may generate JSON inputs for. | | +| `tool_choice` | [Optional[models.ChatCompletionRequestToolChoice]](../models/chatcompletionrequesttoolchoice.md) | :heavy_minus_sign: | Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{"type": "function", "function": {"name": "my_function"}}` forces the model to call that tool. | | +| `presence_penalty` | *Optional[float]* | :heavy_minus_sign: | The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. | | +| `frequency_penalty` | *Optional[float]* | :heavy_minus_sign: | The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. | | +| `n` | *OptionalNullable[int]* | :heavy_minus_sign: | Number of completions to return for each request, input tokens are only billed once. | | +| `prediction` | [Optional[models.Prediction]](../models/prediction.md) | :heavy_minus_sign: | Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content. | | +| `parallel_tool_calls` | *Optional[bool]* | :heavy_minus_sign: | Whether to enable parallel function calling during tool use, when enabled the model can call multiple tools in parallel. | | +| `prompt_mode` | [OptionalNullable[models.MistralPromptMode]](../models/mistralpromptmode.md) | :heavy_minus_sign: | Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. | | +| `safe_prompt` | *Optional[bool]* | :heavy_minus_sign: | Whether to inject a safety prompt before all conversations. | | \ No newline at end of file diff --git a/docs/models/chatcompletionrequesttoolchoice.md b/docs/models/chatcompletionrequesttoolchoice.md index 1646528d..dc82a8ef 100644 --- a/docs/models/chatcompletionrequesttoolchoice.md +++ b/docs/models/chatcompletionrequesttoolchoice.md @@ -1,5 +1,7 @@ # ChatCompletionRequestToolChoice +Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{"type": "function", "function": {"name": "my_function"}}` forces the model to call that tool. + ## Supported Types diff --git a/docs/models/chatcompletionstreamrequest.md b/docs/models/chatcompletionstreamrequest.md index 6faeb411..7d5fb411 100644 --- a/docs/models/chatcompletionstreamrequest.md +++ b/docs/models/chatcompletionstreamrequest.md @@ -3,23 +3,24 @@ ## Fields -| Field | Type | Required | Description | Example | -| ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `model` | *str* | :heavy_check_mark: | ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions. | mistral-small-latest | -| `temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. | | -| `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | -| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | -| `stream` | *Optional[bool]* | :heavy_minus_sign: | N/A | | -| `stop` | [Optional[models.ChatCompletionStreamRequestStop]](../models/chatcompletionstreamrequeststop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | -| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | -| `messages` | List[[models.ChatCompletionStreamRequestMessages](../models/chatcompletionstreamrequestmessages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | -| `response_format` | [Optional[models.ResponseFormat]](../models/responseformat.md) | :heavy_minus_sign: | N/A | | -| `tools` | List[[models.Tool](../models/tool.md)] | :heavy_minus_sign: | N/A | | -| `tool_choice` | [Optional[models.ChatCompletionStreamRequestToolChoice]](../models/chatcompletionstreamrequesttoolchoice.md) | :heavy_minus_sign: | N/A | | -| `presence_penalty` | *Optional[float]* | :heavy_minus_sign: | presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. | | -| `frequency_penalty` | *Optional[float]* | :heavy_minus_sign: | frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. | | -| `n` | *OptionalNullable[int]* | :heavy_minus_sign: | Number of completions to return for each request, input tokens are only billed once. | | -| `prediction` | [Optional[models.Prediction]](../models/prediction.md) | :heavy_minus_sign: | N/A | | -| `parallel_tool_calls` | *Optional[bool]* | :heavy_minus_sign: | N/A | | -| `prompt_mode` | [OptionalNullable[models.MistralPromptMode]](../models/mistralpromptmode.md) | :heavy_minus_sign: | Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. | | -| `safe_prompt` | *Optional[bool]* | :heavy_minus_sign: | Whether to inject a safety prompt before all conversations. | | \ No newline at end of file +| Field | Type | Required | Description | Example | +| --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `model` | *str* | :heavy_check_mark: | ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions. | mistral-large-latest | +| `temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. | | +| `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | +| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | +| `stream` | *Optional[bool]* | :heavy_minus_sign: | N/A | | +| `stop` | [Optional[models.ChatCompletionStreamRequestStop]](../models/chatcompletionstreamrequeststop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | +| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | +| `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | | +| `messages` | List[[models.ChatCompletionStreamRequestMessages](../models/chatcompletionstreamrequestmessages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | +| `response_format` | [Optional[models.ResponseFormat]](../models/responseformat.md) | :heavy_minus_sign: | Specify the format that the model must output. By default it will use `{ "type": "text" }`. Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ "type": "json_schema" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide. | {
"type": "text"
} | +| `tools` | List[[models.Tool](../models/tool.md)] | :heavy_minus_sign: | A list of tools the model may call. Use this to provide a list of functions the model may generate JSON inputs for. | | +| `tool_choice` | [Optional[models.ChatCompletionStreamRequestToolChoice]](../models/chatcompletionstreamrequesttoolchoice.md) | :heavy_minus_sign: | Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{"type": "function", "function": {"name": "my_function"}}` forces the model to call that tool. | | +| `presence_penalty` | *Optional[float]* | :heavy_minus_sign: | The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. | | +| `frequency_penalty` | *Optional[float]* | :heavy_minus_sign: | The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. | | +| `n` | *OptionalNullable[int]* | :heavy_minus_sign: | Number of completions to return for each request, input tokens are only billed once. | | +| `prediction` | [Optional[models.Prediction]](../models/prediction.md) | :heavy_minus_sign: | Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content. | | +| `parallel_tool_calls` | *Optional[bool]* | :heavy_minus_sign: | Whether to enable parallel function calling during tool use, when enabled the model can call multiple tools in parallel. | | +| `prompt_mode` | [OptionalNullable[models.MistralPromptMode]](../models/mistralpromptmode.md) | :heavy_minus_sign: | Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. | | +| `safe_prompt` | *Optional[bool]* | :heavy_minus_sign: | Whether to inject a safety prompt before all conversations. | | \ No newline at end of file diff --git a/docs/models/chatcompletionstreamrequesttoolchoice.md b/docs/models/chatcompletionstreamrequesttoolchoice.md index cce0ca3e..43f3ca38 100644 --- a/docs/models/chatcompletionstreamrequesttoolchoice.md +++ b/docs/models/chatcompletionstreamrequesttoolchoice.md @@ -1,5 +1,7 @@ # ChatCompletionStreamRequestToolChoice +Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{"type": "function", "function": {"name": "my_function"}}` forces the model to call that tool. + ## Supported Types diff --git a/docs/models/classificationrequest.md b/docs/models/classificationrequest.md index b9befc89..4b38c68a 100644 --- a/docs/models/classificationrequest.md +++ b/docs/models/classificationrequest.md @@ -3,7 +3,7 @@ ## Fields -| Field | Type | Required | Description | -| ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | -| `model` | *str* | :heavy_check_mark: | ID of the model to use. | -| `inputs` | [models.ClassificationRequestInputs](../models/classificationrequestinputs.md) | :heavy_check_mark: | Text to classify. | \ No newline at end of file +| Field | Type | Required | Description | Example | +| ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | +| `model` | *str* | :heavy_check_mark: | ID of the model to use. | mistral-moderation-latest | +| `inputs` | [models.ClassificationRequestInputs](../models/classificationrequestinputs.md) | :heavy_check_mark: | Text to classify. | | \ No newline at end of file diff --git a/docs/models/completionargs.md b/docs/models/completionargs.md index 0d108225..60d09137 100644 --- a/docs/models/completionargs.md +++ b/docs/models/completionargs.md @@ -5,15 +5,15 @@ White-listed arguments from the completion API ## Fields -| Field | Type | Required | Description | -| ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | -| `stop` | [OptionalNullable[models.CompletionArgsStop]](../models/completionargsstop.md) | :heavy_minus_sign: | N/A | -| `presence_penalty` | *OptionalNullable[float]* | :heavy_minus_sign: | N/A | -| `frequency_penalty` | *OptionalNullable[float]* | :heavy_minus_sign: | N/A | -| `temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | N/A | -| `top_p` | *OptionalNullable[float]* | :heavy_minus_sign: | N/A | -| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | -| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | -| `prediction` | [OptionalNullable[models.Prediction]](../models/prediction.md) | :heavy_minus_sign: | N/A | -| `response_format` | [OptionalNullable[models.ResponseFormat]](../models/responseformat.md) | :heavy_minus_sign: | N/A | -| `tool_choice` | [Optional[models.ToolChoiceEnum]](../models/toolchoiceenum.md) | :heavy_minus_sign: | N/A | \ No newline at end of file +| Field | Type | Required | Description | Example | +| ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | +| `stop` | [OptionalNullable[models.CompletionArgsStop]](../models/completionargsstop.md) | :heavy_minus_sign: | N/A | | +| `presence_penalty` | *OptionalNullable[float]* | :heavy_minus_sign: | N/A | | +| `frequency_penalty` | *OptionalNullable[float]* | :heavy_minus_sign: | N/A | | +| `temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | N/A | | +| `top_p` | *OptionalNullable[float]* | :heavy_minus_sign: | N/A | | +| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | | +| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | | +| `prediction` | [OptionalNullable[models.Prediction]](../models/prediction.md) | :heavy_minus_sign: | N/A | | +| `response_format` | [OptionalNullable[models.ResponseFormat]](../models/responseformat.md) | :heavy_minus_sign: | N/A | {
"type": "text"
} | +| `tool_choice` | [Optional[models.ToolChoiceEnum]](../models/toolchoiceenum.md) | :heavy_minus_sign: | N/A | | \ No newline at end of file diff --git a/docs/models/conversationrequest.md b/docs/models/conversationrequest.md index 141533e7..04378ae3 100644 --- a/docs/models/conversationrequest.md +++ b/docs/models/conversationrequest.md @@ -10,9 +10,11 @@ | `store` | *OptionalNullable[bool]* | :heavy_minus_sign: | N/A | | `handoff_execution` | [OptionalNullable[models.HandoffExecution]](../models/handoffexecution.md) | :heavy_minus_sign: | N/A | | `instructions` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `tools` | List[[models.Tools](../models/tools.md)] | :heavy_minus_sign: | N/A | +| `tools` | List[[models.Tools](../models/tools.md)] | :heavy_minus_sign: | List of tools which are available to the model during the conversation. | | `completion_args` | [OptionalNullable[models.CompletionArgs]](../models/completionargs.md) | :heavy_minus_sign: | N/A | | `name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | | `agent_id` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `agent_version` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | | `model` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/conversationrestartrequest.md b/docs/models/conversationrestartrequest.md index 61679df6..f389a1e5 100644 --- a/docs/models/conversationrestartrequest.md +++ b/docs/models/conversationrestartrequest.md @@ -12,4 +12,6 @@ Request to restart a new conversation from a given entry in the conversation. | `store` | *Optional[bool]* | :heavy_minus_sign: | Whether to store the results into our servers or not. | | `handoff_execution` | [Optional[models.ConversationRestartRequestHandoffExecution]](../models/conversationrestartrequesthandoffexecution.md) | :heavy_minus_sign: | N/A | | `completion_args` | [Optional[models.CompletionArgs]](../models/completionargs.md) | :heavy_minus_sign: | White-listed arguments from the completion API | -| `from_entry_id` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file +| `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | Custom metadata for the conversation. | +| `from_entry_id` | *str* | :heavy_check_mark: | N/A | +| `agent_version` | *OptionalNullable[int]* | :heavy_minus_sign: | Specific version of the agent to use when restarting. If not provided, uses the current version. | \ No newline at end of file diff --git a/docs/models/conversationrestartstreamrequest.md b/docs/models/conversationrestartstreamrequest.md index 9548b336..d7358dc2 100644 --- a/docs/models/conversationrestartstreamrequest.md +++ b/docs/models/conversationrestartstreamrequest.md @@ -12,4 +12,6 @@ Request to restart a new conversation from a given entry in the conversation. | `store` | *Optional[bool]* | :heavy_minus_sign: | Whether to store the results into our servers or not. | | `handoff_execution` | [Optional[models.ConversationRestartStreamRequestHandoffExecution]](../models/conversationrestartstreamrequesthandoffexecution.md) | :heavy_minus_sign: | N/A | | `completion_args` | [Optional[models.CompletionArgs]](../models/completionargs.md) | :heavy_minus_sign: | White-listed arguments from the completion API | -| `from_entry_id` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file +| `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | Custom metadata for the conversation. | +| `from_entry_id` | *str* | :heavy_check_mark: | N/A | +| `agent_version` | *OptionalNullable[int]* | :heavy_minus_sign: | Specific version of the agent to use when restarting. If not provided, uses the current version. | \ No newline at end of file diff --git a/docs/models/conversationstreamrequest.md b/docs/models/conversationstreamrequest.md index a571e2af..e403db68 100644 --- a/docs/models/conversationstreamrequest.md +++ b/docs/models/conversationstreamrequest.md @@ -10,9 +10,11 @@ | `store` | *OptionalNullable[bool]* | :heavy_minus_sign: | N/A | | `handoff_execution` | [OptionalNullable[models.ConversationStreamRequestHandoffExecution]](../models/conversationstreamrequesthandoffexecution.md) | :heavy_minus_sign: | N/A | | `instructions` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `tools` | List[[models.ConversationStreamRequestTools](../models/conversationstreamrequesttools.md)] | :heavy_minus_sign: | N/A | +| `tools` | List[[models.ConversationStreamRequestTools](../models/conversationstreamrequesttools.md)] | :heavy_minus_sign: | List of tools which are available to the model during the conversation. | | `completion_args` | [OptionalNullable[models.CompletionArgs]](../models/completionargs.md) | :heavy_minus_sign: | N/A | | `name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | | `agent_id` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `agent_version` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | | `model` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/documentout.md b/docs/models/documentout.md index b9e7b212..28df11eb 100644 --- a/docs/models/documentout.md +++ b/docs/models/documentout.md @@ -7,18 +7,20 @@ | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | | `id` | *str* | :heavy_check_mark: | N/A | | `library_id` | *str* | :heavy_check_mark: | N/A | -| `hash` | *str* | :heavy_check_mark: | N/A | -| `mime_type` | *str* | :heavy_check_mark: | N/A | -| `extension` | *str* | :heavy_check_mark: | N/A | -| `size` | *int* | :heavy_check_mark: | N/A | +| `hash` | *Nullable[str]* | :heavy_check_mark: | N/A | +| `mime_type` | *Nullable[str]* | :heavy_check_mark: | N/A | +| `extension` | *Nullable[str]* | :heavy_check_mark: | N/A | +| `size` | *Nullable[int]* | :heavy_check_mark: | N/A | | `name` | *str* | :heavy_check_mark: | N/A | | `summary` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_check_mark: | N/A | | `last_processed_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | | `number_of_pages` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | | `processing_status` | *str* | :heavy_check_mark: | N/A | -| `uploaded_by_id` | *str* | :heavy_check_mark: | N/A | +| `uploaded_by_id` | *Nullable[str]* | :heavy_check_mark: | N/A | | `uploaded_by_type` | *str* | :heavy_check_mark: | N/A | | `tokens_processing_main_content` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | | `tokens_processing_summary` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | +| `url` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `attributes` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | | `tokens_processing_total` | *int* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/documentupdatein.md b/docs/models/documentupdatein.md index 215ae95f..0993886d 100644 --- a/docs/models/documentupdatein.md +++ b/docs/models/documentupdatein.md @@ -3,6 +3,7 @@ ## Fields -| Field | Type | Required | Description | -| ----------------------- | ----------------------- | ----------------------- | ----------------------- | -| `name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| ------------------------------------------------------- | ------------------------------------------------------- | ------------------------------------------------------- | ------------------------------------------------------- | +| `name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `attributes` | Dict[str, [models.Attributes](../models/attributes.md)] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/embeddingrequest.md b/docs/models/embeddingrequest.md index 2f48099f..0f2fc6a6 100644 --- a/docs/models/embeddingrequest.md +++ b/docs/models/embeddingrequest.md @@ -3,10 +3,10 @@ ## Fields -| Field | Type | Required | Description | Example | -| -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -| `model` | *str* | :heavy_check_mark: | ID of the model to use. | mistral-embed | -| `inputs` | [models.EmbeddingRequestInputs](../models/embeddingrequestinputs.md) | :heavy_check_mark: | Text to embed. | [
"Embed this sentence.",
"As well as this one."
] | -| `output_dimension` | *OptionalNullable[int]* | :heavy_minus_sign: | The dimension of the output embeddings. | | -| `output_dtype` | [Optional[models.EmbeddingDtype]](../models/embeddingdtype.md) | :heavy_minus_sign: | N/A | | -| `encoding_format` | [Optional[models.EncodingFormat]](../models/encodingformat.md) | :heavy_minus_sign: | N/A | | \ No newline at end of file +| Field | Type | Required | Description | Example | +| ------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------ | +| `model` | *str* | :heavy_check_mark: | The ID of the model to be used for embedding. | mistral-embed | +| `inputs` | [models.EmbeddingRequestInputs](../models/embeddingrequestinputs.md) | :heavy_check_mark: | The text content to be embedded, can be a string or an array of strings for fast processing in bulk. | [
"Embed this sentence.",
"As well as this one."
] | +| `output_dimension` | *OptionalNullable[int]* | :heavy_minus_sign: | The dimension of the output embeddings when feature available. If not provided, a default output dimension will be used. | | +| `output_dtype` | [Optional[models.EmbeddingDtype]](../models/embeddingdtype.md) | :heavy_minus_sign: | N/A | | +| `encoding_format` | [Optional[models.EncodingFormat]](../models/encodingformat.md) | :heavy_minus_sign: | N/A | | \ No newline at end of file diff --git a/docs/models/embeddingrequestinputs.md b/docs/models/embeddingrequestinputs.md index a3f82c1c..527a089b 100644 --- a/docs/models/embeddingrequestinputs.md +++ b/docs/models/embeddingrequestinputs.md @@ -1,6 +1,6 @@ # EmbeddingRequestInputs -Text to embed. +The text content to be embedded, can be a string or an array of strings for fast processing in bulk. ## Supported Types diff --git a/docs/models/filesapirouteslistfilesrequest.md b/docs/models/filesapirouteslistfilesrequest.md index b28ab3fe..3801a96e 100644 --- a/docs/models/filesapirouteslistfilesrequest.md +++ b/docs/models/filesapirouteslistfilesrequest.md @@ -7,6 +7,7 @@ | ---------------------------------------------------------------- | ---------------------------------------------------------------- | ---------------------------------------------------------------- | ---------------------------------------------------------------- | | `page` | *Optional[int]* | :heavy_minus_sign: | N/A | | `page_size` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `include_total` | *Optional[bool]* | :heavy_minus_sign: | N/A | | `sample_type` | List[[models.SampleType](../models/sampletype.md)] | :heavy_minus_sign: | N/A | | `source` | List[[models.Source](../models/source.md)] | :heavy_minus_sign: | N/A | | `search` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | diff --git a/docs/models/fimcompletionrequest.md b/docs/models/fimcompletionrequest.md index 7b785cf0..fde0b625 100644 --- a/docs/models/fimcompletionrequest.md +++ b/docs/models/fimcompletionrequest.md @@ -5,13 +5,14 @@ | Field | Type | Required | Description | Example | | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `model` | *str* | :heavy_check_mark: | ID of the model to use. Only compatible for now with:
- `codestral-2405`
- `codestral-latest` | codestral-2405 | +| `model` | *str* | :heavy_check_mark: | ID of the model with FIM to use. | codestral-latest | | `temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. | | | `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | | `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | | `stream` | *Optional[bool]* | :heavy_minus_sign: | Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. | | | `stop` | [Optional[models.FIMCompletionRequestStop]](../models/fimcompletionrequeststop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | | `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | +| `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | | | `prompt` | *str* | :heavy_check_mark: | The text/code to complete. | def | | `suffix` | *OptionalNullable[str]* | :heavy_minus_sign: | Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`. | return a+b | | `min_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The minimum number of tokens to generate in the completion. | | \ No newline at end of file diff --git a/docs/models/fimcompletionstreamrequest.md b/docs/models/fimcompletionstreamrequest.md index d49a6301..ba62d854 100644 --- a/docs/models/fimcompletionstreamrequest.md +++ b/docs/models/fimcompletionstreamrequest.md @@ -5,13 +5,14 @@ | Field | Type | Required | Description | Example | | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `model` | *str* | :heavy_check_mark: | ID of the model to use. Only compatible for now with:
- `codestral-2405`
- `codestral-latest` | codestral-2405 | +| `model` | *str* | :heavy_check_mark: | ID of the model with FIM to use. | codestral-latest | | `temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. | | | `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | | `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | | `stream` | *Optional[bool]* | :heavy_minus_sign: | N/A | | | `stop` | [Optional[models.FIMCompletionStreamRequestStop]](../models/fimcompletionstreamrequeststop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | | `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | +| `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | | | `prompt` | *str* | :heavy_check_mark: | The text/code to complete. | def | | `suffix` | *OptionalNullable[str]* | :heavy_minus_sign: | Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`. | return a+b | | `min_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The minimum number of tokens to generate in the completion. | | \ No newline at end of file diff --git a/docs/models/format_.md b/docs/models/format_.md new file mode 100644 index 00000000..97d286a4 --- /dev/null +++ b/docs/models/format_.md @@ -0,0 +1,11 @@ +# Format + +Format of the table + + +## Values + +| Name | Value | +| ---------- | ---------- | +| `MARKDOWN` | markdown | +| `HTML` | html | \ No newline at end of file diff --git a/docs/models/librariesdocumentslistv1request.md b/docs/models/librariesdocumentslistv1request.md index 1b4eb24d..44f63001 100644 --- a/docs/models/librariesdocumentslistv1request.md +++ b/docs/models/librariesdocumentslistv1request.md @@ -9,5 +9,6 @@ | `search` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | `page_size` | *Optional[int]* | :heavy_minus_sign: | N/A | | `page` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `filters_attributes` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | `sort_by` | *Optional[str]* | :heavy_minus_sign: | N/A | | `sort_order` | *Optional[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/libraryout.md b/docs/models/libraryout.md index cf4de41b..ebf46d57 100644 --- a/docs/models/libraryout.md +++ b/docs/models/libraryout.md @@ -9,15 +9,15 @@ | `name` | *str* | :heavy_check_mark: | N/A | | `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_check_mark: | N/A | | `updated_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_check_mark: | N/A | -| `owner_id` | *str* | :heavy_check_mark: | N/A | +| `owner_id` | *Nullable[str]* | :heavy_check_mark: | N/A | | `owner_type` | *str* | :heavy_check_mark: | N/A | | `total_size` | *int* | :heavy_check_mark: | N/A | | `nb_documents` | *int* | :heavy_check_mark: | N/A | | `chunk_size` | *Nullable[int]* | :heavy_check_mark: | N/A | | `emoji` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `generated_name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | `generated_description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | `explicit_user_members_count` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | | `explicit_workspace_members_count` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | -| `org_sharing_role` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file +| `org_sharing_role` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `generated_name` | *OptionalNullable[str]* | :heavy_minus_sign: | Generated Name | \ No newline at end of file diff --git a/docs/models/listfilesout.md b/docs/models/listfilesout.md index ee544c1b..bcb1f13a 100644 --- a/docs/models/listfilesout.md +++ b/docs/models/listfilesout.md @@ -7,4 +7,4 @@ | -------------------------------------------------- | -------------------------------------------------- | -------------------------------------------------- | -------------------------------------------------- | | `data` | List[[models.FileSchema](../models/fileschema.md)] | :heavy_check_mark: | N/A | | `object` | *str* | :heavy_check_mark: | N/A | -| `total` | *int* | :heavy_check_mark: | N/A | \ No newline at end of file +| `total` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/modelcapabilities.md b/docs/models/modelcapabilities.md index 36b27938..646c8e94 100644 --- a/docs/models/modelcapabilities.md +++ b/docs/models/modelcapabilities.md @@ -6,8 +6,11 @@ | Field | Type | Required | Description | | ------------------ | ------------------ | ------------------ | ------------------ | | `completion_chat` | *Optional[bool]* | :heavy_minus_sign: | N/A | -| `completion_fim` | *Optional[bool]* | :heavy_minus_sign: | N/A | | `function_calling` | *Optional[bool]* | :heavy_minus_sign: | N/A | +| `completion_fim` | *Optional[bool]* | :heavy_minus_sign: | N/A | | `fine_tuning` | *Optional[bool]* | :heavy_minus_sign: | N/A | | `vision` | *Optional[bool]* | :heavy_minus_sign: | N/A | -| `classification` | *Optional[bool]* | :heavy_minus_sign: | N/A | \ No newline at end of file +| `ocr` | *Optional[bool]* | :heavy_minus_sign: | N/A | +| `classification` | *Optional[bool]* | :heavy_minus_sign: | N/A | +| `moderation` | *Optional[bool]* | :heavy_minus_sign: | N/A | +| `audio` | *Optional[bool]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/modelconversation.md b/docs/models/modelconversation.md index f7f61a79..1a03ef7d 100644 --- a/docs/models/modelconversation.md +++ b/docs/models/modelconversation.md @@ -10,6 +10,7 @@ | `completion_args` | [Optional[models.CompletionArgs]](../models/completionargs.md) | :heavy_minus_sign: | White-listed arguments from the completion API | | `name` | *OptionalNullable[str]* | :heavy_minus_sign: | Name given to the conversation. | | `description` | *OptionalNullable[str]* | :heavy_minus_sign: | Description of the what the conversation is about. | +| `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | Custom metadata for the conversation. | | `object` | [Optional[models.ModelConversationObject]](../models/modelconversationobject.md) | :heavy_minus_sign: | N/A | | `id` | *str* | :heavy_check_mark: | N/A | | `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_check_mark: | N/A | diff --git a/docs/models/name.md b/docs/models/name.md new file mode 100644 index 00000000..18b978a8 --- /dev/null +++ b/docs/models/name.md @@ -0,0 +1,17 @@ +# Name + + +## Supported Types + +### `models.BuiltInConnectors` + +```python +value: models.BuiltInConnectors = /* values here */ +``` + +### `str` + +```python +value: str = /* values here */ +``` + diff --git a/docs/models/ocrpageobject.md b/docs/models/ocrpageobject.md index 9db3bb77..02473d44 100644 --- a/docs/models/ocrpageobject.md +++ b/docs/models/ocrpageobject.md @@ -8,4 +8,8 @@ | `index` | *int* | :heavy_check_mark: | The page index in a pdf document starting from 0 | | `markdown` | *str* | :heavy_check_mark: | The markdown string response of the page | | `images` | List[[models.OCRImageObject](../models/ocrimageobject.md)] | :heavy_check_mark: | List of all extracted images in the page | +| `tables` | List[[models.OCRTableObject](../models/ocrtableobject.md)] | :heavy_minus_sign: | List of all extracted tables in the page | +| `hyperlinks` | List[*str*] | :heavy_minus_sign: | List of all hyperlinks in the page | +| `header` | *OptionalNullable[str]* | :heavy_minus_sign: | Header of the page | +| `footer` | *OptionalNullable[str]* | :heavy_minus_sign: | Footer of the page | | `dimensions` | [Nullable[models.OCRPageDimensions]](../models/ocrpagedimensions.md) | :heavy_check_mark: | The dimensions of the PDF Page's screenshot image | \ No newline at end of file diff --git a/docs/models/ocrrequest.md b/docs/models/ocrrequest.md index 6a9c77ab..76e4da92 100644 --- a/docs/models/ocrrequest.md +++ b/docs/models/ocrrequest.md @@ -3,14 +3,17 @@ ## Fields -| Field | Type | Required | Description | -| ---------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `model` | *Nullable[str]* | :heavy_check_mark: | N/A | -| `id` | *Optional[str]* | :heavy_minus_sign: | N/A | -| `document` | [models.Document](../models/document.md) | :heavy_check_mark: | Document to run OCR on | -| `pages` | List[*int*] | :heavy_minus_sign: | Specific pages user wants to process in various formats: single number, range, or list of both. Starts from 0 | -| `include_image_base64` | *OptionalNullable[bool]* | :heavy_minus_sign: | Include image URLs in response | -| `image_limit` | *OptionalNullable[int]* | :heavy_minus_sign: | Max images to extract | -| `image_min_size` | *OptionalNullable[int]* | :heavy_minus_sign: | Minimum height and width of image to extract | -| `bbox_annotation_format` | [OptionalNullable[models.ResponseFormat]](../models/responseformat.md) | :heavy_minus_sign: | Structured output class for extracting useful information from each extracted bounding box / image from document. Only json_schema is valid for this field | -| `document_annotation_format` | [OptionalNullable[models.ResponseFormat]](../models/responseformat.md) | :heavy_minus_sign: | Structured output class for extracting useful information from the entire document. Only json_schema is valid for this field | \ No newline at end of file +| Field | Type | Required | Description | Example | +| ---------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `model` | *Nullable[str]* | :heavy_check_mark: | N/A | | +| `id` | *Optional[str]* | :heavy_minus_sign: | N/A | | +| `document` | [models.Document](../models/document.md) | :heavy_check_mark: | Document to run OCR on | | +| `pages` | List[*int*] | :heavy_minus_sign: | Specific pages user wants to process in various formats: single number, range, or list of both. Starts from 0 | | +| `include_image_base64` | *OptionalNullable[bool]* | :heavy_minus_sign: | Include image URLs in response | | +| `image_limit` | *OptionalNullable[int]* | :heavy_minus_sign: | Max images to extract | | +| `image_min_size` | *OptionalNullable[int]* | :heavy_minus_sign: | Minimum height and width of image to extract | | +| `bbox_annotation_format` | [OptionalNullable[models.ResponseFormat]](../models/responseformat.md) | :heavy_minus_sign: | Structured output class for extracting useful information from each extracted bounding box / image from document. Only json_schema is valid for this field | {
"type": "text"
} | +| `document_annotation_format` | [OptionalNullable[models.ResponseFormat]](../models/responseformat.md) | :heavy_minus_sign: | Structured output class for extracting useful information from the entire document. Only json_schema is valid for this field | {
"type": "text"
} | +| `table_format` | [OptionalNullable[models.TableFormat]](../models/tableformat.md) | :heavy_minus_sign: | N/A | | +| `extract_header` | *Optional[bool]* | :heavy_minus_sign: | N/A | | +| `extract_footer` | *Optional[bool]* | :heavy_minus_sign: | N/A | | \ No newline at end of file diff --git a/docs/models/ocrtableobject.md b/docs/models/ocrtableobject.md new file mode 100644 index 00000000..4e27697c --- /dev/null +++ b/docs/models/ocrtableobject.md @@ -0,0 +1,10 @@ +# OCRTableObject + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------- | ---------------------------------------- | ---------------------------------------- | ---------------------------------------- | +| `id` | *str* | :heavy_check_mark: | Table ID for extracted table in a page | +| `content` | *str* | :heavy_check_mark: | Content of the table in the given format | +| `format_` | [models.Format](../models/format_.md) | :heavy_check_mark: | Format of the table | \ No newline at end of file diff --git a/docs/models/prediction.md b/docs/models/prediction.md index 86e9c396..fae3c1ca 100644 --- a/docs/models/prediction.md +++ b/docs/models/prediction.md @@ -1,5 +1,7 @@ # Prediction +Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content. + ## Fields diff --git a/docs/models/requestsource.md b/docs/models/requestsource.md new file mode 100644 index 00000000..c81c1159 --- /dev/null +++ b/docs/models/requestsource.md @@ -0,0 +1,10 @@ +# RequestSource + + +## Values + +| Name | Value | +| ------------------ | ------------------ | +| `API` | api | +| `PLAYGROUND` | playground | +| `AGENT_BUILDER_V1` | agent_builder_v1 | \ No newline at end of file diff --git a/docs/models/responseformat.md b/docs/models/responseformat.md index 23a1641b..5cab22f2 100644 --- a/docs/models/responseformat.md +++ b/docs/models/responseformat.md @@ -1,9 +1,11 @@ # ResponseFormat +Specify the format that the model must output. By default it will use `{ "type": "text" }`. Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ "type": "json_schema" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide. + ## Fields -| Field | Type | Required | Description | -| -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `type` | [Optional[models.ResponseFormats]](../models/responseformats.md) | :heavy_minus_sign: | An object specifying the format that the model must output. Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. | -| `json_schema` | [OptionalNullable[models.JSONSchema]](../models/jsonschema.md) | :heavy_minus_sign: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| ---------------------------------------------------------------- | ---------------------------------------------------------------- | ---------------------------------------------------------------- | ---------------------------------------------------------------- | +| `type` | [Optional[models.ResponseFormats]](../models/responseformats.md) | :heavy_minus_sign: | N/A | +| `json_schema` | [OptionalNullable[models.JSONSchema]](../models/jsonschema.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/responseformats.md b/docs/models/responseformats.md index 06886afe..2f5f1e55 100644 --- a/docs/models/responseformats.md +++ b/docs/models/responseformats.md @@ -1,7 +1,5 @@ # ResponseFormats -An object specifying the format that the model must output. Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. - ## Values diff --git a/docs/models/sharingdelete.md b/docs/models/sharingdelete.md index 71cacab6..1dcec095 100644 --- a/docs/models/sharingdelete.md +++ b/docs/models/sharingdelete.md @@ -5,6 +5,6 @@ | Field | Type | Required | Description | | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -| `org_id` | *str* | :heavy_check_mark: | N/A | +| `org_id` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | `share_with_uuid` | *str* | :heavy_check_mark: | The id of the entity (user, workspace or organization) to share with | | `share_with_type` | [models.EntityType](../models/entitytype.md) | :heavy_check_mark: | The type of entity, used to share a library. | \ No newline at end of file diff --git a/docs/models/sharingin.md b/docs/models/sharingin.md index 537ede03..bac18c8d 100644 --- a/docs/models/sharingin.md +++ b/docs/models/sharingin.md @@ -5,7 +5,7 @@ | Field | Type | Required | Description | | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -| `org_id` | *str* | :heavy_check_mark: | N/A | +| `org_id` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | `level` | [models.ShareEnum](../models/shareenum.md) | :heavy_check_mark: | N/A | | `share_with_uuid` | *str* | :heavy_check_mark: | The id of the entity (user, workspace or organization) to share with | | `share_with_type` | [models.EntityType](../models/entitytype.md) | :heavy_check_mark: | The type of entity, used to share a library. | \ No newline at end of file diff --git a/docs/models/sharingout.md b/docs/models/sharingout.md index 5844fe64..35aeff43 100644 --- a/docs/models/sharingout.md +++ b/docs/models/sharingout.md @@ -10,4 +10,4 @@ | `org_id` | *str* | :heavy_check_mark: | N/A | | `role` | *str* | :heavy_check_mark: | N/A | | `share_with_type` | *str* | :heavy_check_mark: | N/A | -| `share_with_uuid` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file +| `share_with_uuid` | *Nullable[str]* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/tableformat.md b/docs/models/tableformat.md new file mode 100644 index 00000000..54f029b8 --- /dev/null +++ b/docs/models/tableformat.md @@ -0,0 +1,9 @@ +# TableFormat + + +## Values + +| Name | Value | +| ---------- | ---------- | +| `MARKDOWN` | markdown | +| `HTML` | html | \ No newline at end of file diff --git a/docs/models/toolexecutiondeltaevent.md b/docs/models/toolexecutiondeltaevent.md index bfc9dc0e..7bee6d83 100644 --- a/docs/models/toolexecutiondeltaevent.md +++ b/docs/models/toolexecutiondeltaevent.md @@ -9,5 +9,5 @@ | `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | | `output_index` | *Optional[int]* | :heavy_minus_sign: | N/A | | `id` | *str* | :heavy_check_mark: | N/A | -| `name` | [models.BuiltInConnectors](../models/builtinconnectors.md) | :heavy_check_mark: | N/A | +| `name` | [models.ToolExecutionDeltaEventName](../models/toolexecutiondeltaeventname.md) | :heavy_check_mark: | N/A | | `arguments` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/toolexecutiondeltaeventname.md b/docs/models/toolexecutiondeltaeventname.md new file mode 100644 index 00000000..9c3edef8 --- /dev/null +++ b/docs/models/toolexecutiondeltaeventname.md @@ -0,0 +1,17 @@ +# ToolExecutionDeltaEventName + + +## Supported Types + +### `models.BuiltInConnectors` + +```python +value: models.BuiltInConnectors = /* values here */ +``` + +### `str` + +```python +value: str = /* values here */ +``` + diff --git a/docs/models/toolexecutiondoneevent.md b/docs/models/toolexecutiondoneevent.md index aa28df59..5898ea5e 100644 --- a/docs/models/toolexecutiondoneevent.md +++ b/docs/models/toolexecutiondoneevent.md @@ -9,5 +9,5 @@ | `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | | `output_index` | *Optional[int]* | :heavy_minus_sign: | N/A | | `id` | *str* | :heavy_check_mark: | N/A | -| `name` | [models.BuiltInConnectors](../models/builtinconnectors.md) | :heavy_check_mark: | N/A | +| `name` | [models.ToolExecutionDoneEventName](../models/toolexecutiondoneeventname.md) | :heavy_check_mark: | N/A | | `info` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/toolexecutiondoneeventname.md b/docs/models/toolexecutiondoneeventname.md new file mode 100644 index 00000000..6449079d --- /dev/null +++ b/docs/models/toolexecutiondoneeventname.md @@ -0,0 +1,17 @@ +# ToolExecutionDoneEventName + + +## Supported Types + +### `models.BuiltInConnectors` + +```python +value: models.BuiltInConnectors = /* values here */ +``` + +### `str` + +```python +value: str = /* values here */ +``` + diff --git a/docs/models/toolexecutionentry.md b/docs/models/toolexecutionentry.md index 174abdd1..3678116d 100644 --- a/docs/models/toolexecutionentry.md +++ b/docs/models/toolexecutionentry.md @@ -10,6 +10,6 @@ | `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | | `completed_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | | `id` | *Optional[str]* | :heavy_minus_sign: | N/A | -| `name` | [models.BuiltInConnectors](../models/builtinconnectors.md) | :heavy_check_mark: | N/A | +| `name` | [models.Name](../models/name.md) | :heavy_check_mark: | N/A | | `arguments` | *str* | :heavy_check_mark: | N/A | | `info` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/toolexecutionstartedevent.md b/docs/models/toolexecutionstartedevent.md index 82ea65e5..de81312b 100644 --- a/docs/models/toolexecutionstartedevent.md +++ b/docs/models/toolexecutionstartedevent.md @@ -9,5 +9,5 @@ | `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | | `output_index` | *Optional[int]* | :heavy_minus_sign: | N/A | | `id` | *str* | :heavy_check_mark: | N/A | -| `name` | [models.BuiltInConnectors](../models/builtinconnectors.md) | :heavy_check_mark: | N/A | +| `name` | [models.ToolExecutionStartedEventName](../models/toolexecutionstartedeventname.md) | :heavy_check_mark: | N/A | | `arguments` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/toolexecutionstartedeventname.md b/docs/models/toolexecutionstartedeventname.md new file mode 100644 index 00000000..3308c483 --- /dev/null +++ b/docs/models/toolexecutionstartedeventname.md @@ -0,0 +1,17 @@ +# ToolExecutionStartedEventName + + +## Supported Types + +### `models.BuiltInConnectors` + +```python +value: models.BuiltInConnectors = /* values here */ +``` + +### `str` + +```python +value: str = /* values here */ +``` + diff --git a/docs/sdks/accesses/README.md b/docs/sdks/accesses/README.md index f06cce76..af768506 100644 --- a/docs/sdks/accesses/README.md +++ b/docs/sdks/accesses/README.md @@ -68,7 +68,7 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.beta.libraries.accesses.update_or_create(library_id="36de3a24-5b1c-4c8f-9d84-d5642205a976", org_id="aadd9ae1-f285-4437-884a-091c77efa6fd", level="Viewer", share_with_uuid="0ae92ecb-21ed-47c5-9f7e-0b2cbe325a20", share_with_type="User") + res = mistral.beta.libraries.accesses.update_or_create(library_id="36de3a24-5b1c-4c8f-9d84-d5642205a976", level="Viewer", share_with_uuid="0ae92ecb-21ed-47c5-9f7e-0b2cbe325a20", share_with_type="User") # Handle response print(res) @@ -80,10 +80,10 @@ with Mistral( | Parameter | Type | Required | Description | | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | | `library_id` | *str* | :heavy_check_mark: | N/A | -| `org_id` | *str* | :heavy_check_mark: | N/A | | `level` | [models.ShareEnum](../../models/shareenum.md) | :heavy_check_mark: | N/A | | `share_with_uuid` | *str* | :heavy_check_mark: | The id of the entity (user, workspace or organization) to share with | | `share_with_type` | [models.EntityType](../../models/entitytype.md) | :heavy_check_mark: | The type of entity, used to share a library. | +| `org_id` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | ### Response @@ -113,7 +113,7 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.beta.libraries.accesses.delete(library_id="709e3cad-9fb2-4f4e-bf88-143cf1808107", org_id="0814a235-c2d0-4814-875a-4b85f93d3dc7", share_with_uuid="b843cc47-ce8f-4354-8cfc-5fcd7fb2865b", share_with_type="User") + res = mistral.beta.libraries.accesses.delete(library_id="709e3cad-9fb2-4f4e-bf88-143cf1808107", share_with_uuid="b843cc47-ce8f-4354-8cfc-5fcd7fb2865b", share_with_type="User") # Handle response print(res) @@ -125,9 +125,9 @@ with Mistral( | Parameter | Type | Required | Description | | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | | `library_id` | *str* | :heavy_check_mark: | N/A | -| `org_id` | *str* | :heavy_check_mark: | N/A | | `share_with_uuid` | *str* | :heavy_check_mark: | The id of the entity (user, workspace or organization) to share with | | `share_with_type` | [models.EntityType](../../models/entitytype.md) | :heavy_check_mark: | The type of entity, used to share a library. | +| `org_id` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | ### Response diff --git a/docs/sdks/agents/README.md b/docs/sdks/agents/README.md index a4e8b22e..87a411cd 100644 --- a/docs/sdks/agents/README.md +++ b/docs/sdks/agents/README.md @@ -31,7 +31,9 @@ with Mistral( "content": "Who is the best French painter? Answer in one short sentence.", "role": "user", }, - ], agent_id="", stream=False) + ], agent_id="", stream=False, response_format={ + "type": "text", + }) # Handle response print(res) @@ -40,24 +42,25 @@ with Mistral( ### Parameters -| Parameter | Type | Required | Description | Example | -| ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `messages` | List[[models.AgentsCompletionRequestMessages](../../models/agentscompletionrequestmessages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | -| `agent_id` | *str* | :heavy_check_mark: | The ID of the agent to use for this completion. | | -| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | -| `stream` | *Optional[bool]* | :heavy_minus_sign: | Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. | | -| `stop` | [Optional[models.AgentsCompletionRequestStop]](../../models/agentscompletionrequeststop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | -| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | -| `response_format` | [Optional[models.ResponseFormat]](../../models/responseformat.md) | :heavy_minus_sign: | N/A | | -| `tools` | List[[models.Tool](../../models/tool.md)] | :heavy_minus_sign: | N/A | | -| `tool_choice` | [Optional[models.AgentsCompletionRequestToolChoice]](../../models/agentscompletionrequesttoolchoice.md) | :heavy_minus_sign: | N/A | | -| `presence_penalty` | *Optional[float]* | :heavy_minus_sign: | presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. | | -| `frequency_penalty` | *Optional[float]* | :heavy_minus_sign: | frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. | | -| `n` | *OptionalNullable[int]* | :heavy_minus_sign: | Number of completions to return for each request, input tokens are only billed once. | | -| `prediction` | [Optional[models.Prediction]](../../models/prediction.md) | :heavy_minus_sign: | N/A | | -| `parallel_tool_calls` | *Optional[bool]* | :heavy_minus_sign: | N/A | | -| `prompt_mode` | [OptionalNullable[models.MistralPromptMode]](../../models/mistralpromptmode.md) | :heavy_minus_sign: | Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. | | -| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | | +| Parameter | Type | Required | Description | Example | +| --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `messages` | List[[models.AgentsCompletionRequestMessages](../../models/agentscompletionrequestmessages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | +| `agent_id` | *str* | :heavy_check_mark: | The ID of the agent to use for this completion. | | +| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | +| `stream` | *Optional[bool]* | :heavy_minus_sign: | Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. | | +| `stop` | [Optional[models.AgentsCompletionRequestStop]](../../models/agentscompletionrequeststop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | +| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | +| `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | | +| `response_format` | [Optional[models.ResponseFormat]](../../models/responseformat.md) | :heavy_minus_sign: | Specify the format that the model must output. By default it will use `{ "type": "text" }`. Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ "type": "json_schema" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide. | {
"type": "text"
} | +| `tools` | List[[models.Tool](../../models/tool.md)] | :heavy_minus_sign: | N/A | | +| `tool_choice` | [Optional[models.AgentsCompletionRequestToolChoice]](../../models/agentscompletionrequesttoolchoice.md) | :heavy_minus_sign: | N/A | | +| `presence_penalty` | *Optional[float]* | :heavy_minus_sign: | The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. | | +| `frequency_penalty` | *Optional[float]* | :heavy_minus_sign: | The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. | | +| `n` | *OptionalNullable[int]* | :heavy_minus_sign: | Number of completions to return for each request, input tokens are only billed once. | | +| `prediction` | [Optional[models.Prediction]](../../models/prediction.md) | :heavy_minus_sign: | Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content. | | +| `parallel_tool_calls` | *Optional[bool]* | :heavy_minus_sign: | N/A | | +| `prompt_mode` | [OptionalNullable[models.MistralPromptMode]](../../models/mistralpromptmode.md) | :heavy_minus_sign: | Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. | | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | | ### Response @@ -91,7 +94,9 @@ with Mistral( "content": "Who is the best French painter? Answer in one short sentence.", "role": "user", }, - ], agent_id="", stream=True) + ], agent_id="", stream=True, response_format={ + "type": "text", + }) with res as event_stream: for event in event_stream: @@ -102,24 +107,25 @@ with Mistral( ### Parameters -| Parameter | Type | Required | Description | Example | -| --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `messages` | List[[models.AgentsCompletionStreamRequestMessages](../../models/agentscompletionstreamrequestmessages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | -| `agent_id` | *str* | :heavy_check_mark: | The ID of the agent to use for this completion. | | -| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | -| `stream` | *Optional[bool]* | :heavy_minus_sign: | N/A | | -| `stop` | [Optional[models.AgentsCompletionStreamRequestStop]](../../models/agentscompletionstreamrequeststop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | -| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | -| `response_format` | [Optional[models.ResponseFormat]](../../models/responseformat.md) | :heavy_minus_sign: | N/A | | -| `tools` | List[[models.Tool](../../models/tool.md)] | :heavy_minus_sign: | N/A | | -| `tool_choice` | [Optional[models.AgentsCompletionStreamRequestToolChoice]](../../models/agentscompletionstreamrequesttoolchoice.md) | :heavy_minus_sign: | N/A | | -| `presence_penalty` | *Optional[float]* | :heavy_minus_sign: | presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. | | -| `frequency_penalty` | *Optional[float]* | :heavy_minus_sign: | frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. | | -| `n` | *OptionalNullable[int]* | :heavy_minus_sign: | Number of completions to return for each request, input tokens are only billed once. | | -| `prediction` | [Optional[models.Prediction]](../../models/prediction.md) | :heavy_minus_sign: | N/A | | -| `parallel_tool_calls` | *Optional[bool]* | :heavy_minus_sign: | N/A | | -| `prompt_mode` | [OptionalNullable[models.MistralPromptMode]](../../models/mistralpromptmode.md) | :heavy_minus_sign: | Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. | | -| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | | +| Parameter | Type | Required | Description | Example | +| --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `messages` | List[[models.AgentsCompletionStreamRequestMessages](../../models/agentscompletionstreamrequestmessages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | +| `agent_id` | *str* | :heavy_check_mark: | The ID of the agent to use for this completion. | | +| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | +| `stream` | *Optional[bool]* | :heavy_minus_sign: | N/A | | +| `stop` | [Optional[models.AgentsCompletionStreamRequestStop]](../../models/agentscompletionstreamrequeststop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | +| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | +| `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | | +| `response_format` | [Optional[models.ResponseFormat]](../../models/responseformat.md) | :heavy_minus_sign: | Specify the format that the model must output. By default it will use `{ "type": "text" }`. Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ "type": "json_schema" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide. | {
"type": "text"
} | +| `tools` | List[[models.Tool](../../models/tool.md)] | :heavy_minus_sign: | N/A | | +| `tool_choice` | [Optional[models.AgentsCompletionStreamRequestToolChoice]](../../models/agentscompletionstreamrequesttoolchoice.md) | :heavy_minus_sign: | N/A | | +| `presence_penalty` | *Optional[float]* | :heavy_minus_sign: | The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. | | +| `frequency_penalty` | *Optional[float]* | :heavy_minus_sign: | The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. | | +| `n` | *OptionalNullable[int]* | :heavy_minus_sign: | Number of completions to return for each request, input tokens are only billed once. | | +| `prediction` | [Optional[models.Prediction]](../../models/prediction.md) | :heavy_minus_sign: | Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content. | | +| `parallel_tool_calls` | *Optional[bool]* | :heavy_minus_sign: | N/A | | +| `prompt_mode` | [OptionalNullable[models.MistralPromptMode]](../../models/mistralpromptmode.md) | :heavy_minus_sign: | Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. | | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | | ### Response diff --git a/docs/sdks/chat/README.md b/docs/sdks/chat/README.md index c5c45e0f..213ab710 100644 --- a/docs/sdks/chat/README.md +++ b/docs/sdks/chat/README.md @@ -26,12 +26,14 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.chat.complete(model="mistral-small-latest", messages=[ + res = mistral.chat.complete(model="mistral-large-latest", messages=[ { "content": "Who is the best French painter? Answer in one short sentence.", "role": "user", }, - ], stream=False) + ], stream=False, response_format={ + "type": "text", + }) # Handle response print(res) @@ -40,27 +42,28 @@ with Mistral( ### Parameters -| Parameter | Type | Required | Description | Example | -| ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `model` | *str* | :heavy_check_mark: | ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions. | mistral-small-latest | -| `messages` | List[[models.Messages](../../models/messages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | -| `temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. | | -| `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | -| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | -| `stream` | *Optional[bool]* | :heavy_minus_sign: | Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. | | -| `stop` | [Optional[models.Stop]](../../models/stop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | -| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | -| `response_format` | [Optional[models.ResponseFormat]](../../models/responseformat.md) | :heavy_minus_sign: | N/A | | -| `tools` | List[[models.Tool](../../models/tool.md)] | :heavy_minus_sign: | N/A | | -| `tool_choice` | [Optional[models.ChatCompletionRequestToolChoice]](../../models/chatcompletionrequesttoolchoice.md) | :heavy_minus_sign: | N/A | | -| `presence_penalty` | *Optional[float]* | :heavy_minus_sign: | presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. | | -| `frequency_penalty` | *Optional[float]* | :heavy_minus_sign: | frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. | | -| `n` | *OptionalNullable[int]* | :heavy_minus_sign: | Number of completions to return for each request, input tokens are only billed once. | | -| `prediction` | [Optional[models.Prediction]](../../models/prediction.md) | :heavy_minus_sign: | N/A | | -| `parallel_tool_calls` | *Optional[bool]* | :heavy_minus_sign: | N/A | | -| `prompt_mode` | [OptionalNullable[models.MistralPromptMode]](../../models/mistralpromptmode.md) | :heavy_minus_sign: | Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. | | -| `safe_prompt` | *Optional[bool]* | :heavy_minus_sign: | Whether to inject a safety prompt before all conversations. | | -| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | | +| Parameter | Type | Required | Description | Example | +| --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `model` | *str* | :heavy_check_mark: | ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions. | mistral-large-latest | +| `messages` | List[[models.Messages](../../models/messages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | +| `temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. | | +| `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | +| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | +| `stream` | *Optional[bool]* | :heavy_minus_sign: | Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. | | +| `stop` | [Optional[models.Stop]](../../models/stop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | +| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | +| `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | | +| `response_format` | [Optional[models.ResponseFormat]](../../models/responseformat.md) | :heavy_minus_sign: | Specify the format that the model must output. By default it will use `{ "type": "text" }`. Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ "type": "json_schema" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide. | {
"type": "text"
} | +| `tools` | List[[models.Tool](../../models/tool.md)] | :heavy_minus_sign: | A list of tools the model may call. Use this to provide a list of functions the model may generate JSON inputs for. | | +| `tool_choice` | [Optional[models.ChatCompletionRequestToolChoice]](../../models/chatcompletionrequesttoolchoice.md) | :heavy_minus_sign: | Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{"type": "function", "function": {"name": "my_function"}}` forces the model to call that tool. | | +| `presence_penalty` | *Optional[float]* | :heavy_minus_sign: | The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. | | +| `frequency_penalty` | *Optional[float]* | :heavy_minus_sign: | The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. | | +| `n` | *OptionalNullable[int]* | :heavy_minus_sign: | Number of completions to return for each request, input tokens are only billed once. | | +| `prediction` | [Optional[models.Prediction]](../../models/prediction.md) | :heavy_minus_sign: | Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content. | | +| `parallel_tool_calls` | *Optional[bool]* | :heavy_minus_sign: | Whether to enable parallel function calling during tool use, when enabled the model can call multiple tools in parallel. | | +| `prompt_mode` | [OptionalNullable[models.MistralPromptMode]](../../models/mistralpromptmode.md) | :heavy_minus_sign: | Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. | | +| `safe_prompt` | *Optional[bool]* | :heavy_minus_sign: | Whether to inject a safety prompt before all conversations. | | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | | ### Response @@ -89,12 +92,14 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.chat.stream(model="mistral-small-latest", messages=[ + res = mistral.chat.stream(model="mistral-large-latest", messages=[ { "content": "Who is the best French painter? Answer in one short sentence.", "role": "user", }, - ], stream=True) + ], stream=True, response_format={ + "type": "text", + }) with res as event_stream: for event in event_stream: @@ -105,27 +110,28 @@ with Mistral( ### Parameters -| Parameter | Type | Required | Description | Example | -| ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `model` | *str* | :heavy_check_mark: | ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions. | mistral-small-latest | -| `messages` | List[[models.ChatCompletionStreamRequestMessages](../../models/chatcompletionstreamrequestmessages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | -| `temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. | | -| `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | -| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | -| `stream` | *Optional[bool]* | :heavy_minus_sign: | N/A | | -| `stop` | [Optional[models.ChatCompletionStreamRequestStop]](../../models/chatcompletionstreamrequeststop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | -| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | -| `response_format` | [Optional[models.ResponseFormat]](../../models/responseformat.md) | :heavy_minus_sign: | N/A | | -| `tools` | List[[models.Tool](../../models/tool.md)] | :heavy_minus_sign: | N/A | | -| `tool_choice` | [Optional[models.ChatCompletionStreamRequestToolChoice]](../../models/chatcompletionstreamrequesttoolchoice.md) | :heavy_minus_sign: | N/A | | -| `presence_penalty` | *Optional[float]* | :heavy_minus_sign: | presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. | | -| `frequency_penalty` | *Optional[float]* | :heavy_minus_sign: | frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. | | -| `n` | *OptionalNullable[int]* | :heavy_minus_sign: | Number of completions to return for each request, input tokens are only billed once. | | -| `prediction` | [Optional[models.Prediction]](../../models/prediction.md) | :heavy_minus_sign: | N/A | | -| `parallel_tool_calls` | *Optional[bool]* | :heavy_minus_sign: | N/A | | -| `prompt_mode` | [OptionalNullable[models.MistralPromptMode]](../../models/mistralpromptmode.md) | :heavy_minus_sign: | Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. | | -| `safe_prompt` | *Optional[bool]* | :heavy_minus_sign: | Whether to inject a safety prompt before all conversations. | | -| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | | +| Parameter | Type | Required | Description | Example | +| --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `model` | *str* | :heavy_check_mark: | ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions. | mistral-large-latest | +| `messages` | List[[models.ChatCompletionStreamRequestMessages](../../models/chatcompletionstreamrequestmessages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | +| `temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. | | +| `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | +| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | +| `stream` | *Optional[bool]* | :heavy_minus_sign: | N/A | | +| `stop` | [Optional[models.ChatCompletionStreamRequestStop]](../../models/chatcompletionstreamrequeststop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | +| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | +| `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | | +| `response_format` | [Optional[models.ResponseFormat]](../../models/responseformat.md) | :heavy_minus_sign: | Specify the format that the model must output. By default it will use `{ "type": "text" }`. Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ "type": "json_schema" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide. | {
"type": "text"
} | +| `tools` | List[[models.Tool](../../models/tool.md)] | :heavy_minus_sign: | A list of tools the model may call. Use this to provide a list of functions the model may generate JSON inputs for. | | +| `tool_choice` | [Optional[models.ChatCompletionStreamRequestToolChoice]](../../models/chatcompletionstreamrequesttoolchoice.md) | :heavy_minus_sign: | Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{"type": "function", "function": {"name": "my_function"}}` forces the model to call that tool. | | +| `presence_penalty` | *Optional[float]* | :heavy_minus_sign: | The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. | | +| `frequency_penalty` | *Optional[float]* | :heavy_minus_sign: | The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. | | +| `n` | *OptionalNullable[int]* | :heavy_minus_sign: | Number of completions to return for each request, input tokens are only billed once. | | +| `prediction` | [Optional[models.Prediction]](../../models/prediction.md) | :heavy_minus_sign: | Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content. | | +| `parallel_tool_calls` | *Optional[bool]* | :heavy_minus_sign: | Whether to enable parallel function calling during tool use, when enabled the model can call multiple tools in parallel. | | +| `prompt_mode` | [OptionalNullable[models.MistralPromptMode]](../../models/mistralpromptmode.md) | :heavy_minus_sign: | Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. | | +| `safe_prompt` | *Optional[bool]* | :heavy_minus_sign: | Whether to inject a safety prompt before all conversations. | | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | | ### Response diff --git a/docs/sdks/classifiers/README.md b/docs/sdks/classifiers/README.md index 87eb8d69..75b8c333 100644 --- a/docs/sdks/classifiers/README.md +++ b/docs/sdks/classifiers/README.md @@ -40,11 +40,11 @@ with Mistral( ### Parameters -| Parameter | Type | Required | Description | -| --------------------------------------------------------------------------------- | --------------------------------------------------------------------------------- | --------------------------------------------------------------------------------- | --------------------------------------------------------------------------------- | -| `model` | *str* | :heavy_check_mark: | ID of the model to use. | -| `inputs` | [models.ClassificationRequestInputs](../../models/classificationrequestinputs.md) | :heavy_check_mark: | Text to classify. | -| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | +| Parameter | Type | Required | Description | Example | +| --------------------------------------------------------------------------------- | --------------------------------------------------------------------------------- | --------------------------------------------------------------------------------- | --------------------------------------------------------------------------------- | --------------------------------------------------------------------------------- | +| `model` | *str* | :heavy_check_mark: | ID of the model to use. | mistral-moderation-latest | +| `inputs` | [models.ClassificationRequestInputs](../../models/classificationrequestinputs.md) | :heavy_check_mark: | Text to classify. | | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | | ### Response @@ -120,7 +120,7 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.classifiers.classify(model="Silverado", inputs=[ + res = mistral.classifiers.classify(model="mistral-moderation-latest", inputs=[ "", ]) @@ -131,11 +131,11 @@ with Mistral( ### Parameters -| Parameter | Type | Required | Description | -| --------------------------------------------------------------------------------- | --------------------------------------------------------------------------------- | --------------------------------------------------------------------------------- | --------------------------------------------------------------------------------- | -| `model` | *str* | :heavy_check_mark: | ID of the model to use. | -| `inputs` | [models.ClassificationRequestInputs](../../models/classificationrequestinputs.md) | :heavy_check_mark: | Text to classify. | -| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | +| Parameter | Type | Required | Description | Example | +| --------------------------------------------------------------------------------- | --------------------------------------------------------------------------------- | --------------------------------------------------------------------------------- | --------------------------------------------------------------------------------- | --------------------------------------------------------------------------------- | +| `model` | *str* | :heavy_check_mark: | ID of the model to use. | mistral-moderation-latest | +| `inputs` | [models.ClassificationRequestInputs](../../models/classificationrequestinputs.md) | :heavy_check_mark: | Text to classify. | | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | | ### Response diff --git a/docs/sdks/conversations/README.md b/docs/sdks/conversations/README.md index d3ce96c2..1e2d560e 100644 --- a/docs/sdks/conversations/README.md +++ b/docs/sdks/conversations/README.md @@ -10,6 +10,7 @@ * [start](#start) - Create a conversation and append entries to it. * [list](#list) - List all created conversations. * [get](#get) - Retrieve a conversation information. +* [delete](#delete) - Delete a conversation. * [append](#append) - Append new entries to an existing conversation. * [get_history](#get_history) - Retrieve all entries in a conversation. * [get_messages](#get_messages) - Retrieve all messages in a conversation. @@ -34,7 +35,11 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.beta.conversations.start(inputs="", stream=False) + res = mistral.beta.conversations.start(inputs="", stream=False, completion_args={ + "response_format": { + "type": "text", + }, + }) # Handle response print(res) @@ -50,11 +55,13 @@ with Mistral( | `store` | *OptionalNullable[bool]* | :heavy_minus_sign: | N/A | | `handoff_execution` | [OptionalNullable[models.HandoffExecution]](../../models/handoffexecution.md) | :heavy_minus_sign: | N/A | | `instructions` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `tools` | List[[models.Tools](../../models/tools.md)] | :heavy_minus_sign: | N/A | +| `tools` | List[[models.Tools](../../models/tools.md)] | :heavy_minus_sign: | List of tools which are available to the model during the conversation. | | `completion_args` | [OptionalNullable[models.CompletionArgs]](../../models/completionargs.md) | :heavy_minus_sign: | N/A | | `name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | | `agent_id` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `agent_version` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | | `model` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | @@ -98,6 +105,7 @@ with Mistral( | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | | `page` | *Optional[int]* | :heavy_minus_sign: | N/A | | `page_size` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | | `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | ### Response @@ -152,6 +160,42 @@ with Mistral( | models.HTTPValidationError | 422 | application/json | | models.SDKError | 4XX, 5XX | \*/\* | +## delete + +Delete a conversation given a conversation_id. + +### Example Usage + + +```python +from mistralai import Mistral +import os + + +with Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) as mistral: + + mistral.beta.conversations.delete(conversation_id="") + + # Use the SDK ... + +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | +| `conversation_id` | *str* | :heavy_check_mark: | ID of the conversation from which we are fetching metadata. | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | + +### Errors + +| Error Type | Status Code | Content Type | +| -------------------------- | -------------------------- | -------------------------- | +| models.HTTPValidationError | 422 | application/json | +| models.SDKError | 4XX, 5XX | \*/\* | + ## append Run completion on the history of the conversation and the user entries. Return the new created entries. @@ -168,7 +212,11 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.beta.conversations.append(conversation_id="", inputs=[], stream=False, store=True, handoff_execution="server") + res = mistral.beta.conversations.append(conversation_id="", inputs=[], stream=False, store=True, handoff_execution="server", completion_args={ + "response_format": { + "type": "text", + }, + }) # Handle response print(res) @@ -296,7 +344,11 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.beta.conversations.restart(conversation_id="", inputs="", from_entry_id="", stream=False, store=True, handoff_execution="server") + res = mistral.beta.conversations.restart(conversation_id="", inputs="", from_entry_id="", stream=False, store=True, handoff_execution="server", completion_args={ + "response_format": { + "type": "text", + }, + }) # Handle response print(res) @@ -314,6 +366,8 @@ with Mistral( | `store` | *Optional[bool]* | :heavy_minus_sign: | Whether to store the results into our servers or not. | | `handoff_execution` | [Optional[models.ConversationRestartRequestHandoffExecution]](../../models/conversationrestartrequesthandoffexecution.md) | :heavy_minus_sign: | N/A | | `completion_args` | [Optional[models.CompletionArgs]](../../models/completionargs.md) | :heavy_minus_sign: | White-listed arguments from the completion API | +| `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | Custom metadata for the conversation. | +| `agent_version` | *OptionalNullable[int]* | :heavy_minus_sign: | Specific version of the agent to use when restarting. If not provided, uses the current version. | | `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | ### Response @@ -350,7 +404,11 @@ with Mistral( "tool_call_id": "", "result": "", }, - ], stream=True) + ], stream=True, completion_args={ + "response_format": { + "type": "text", + }, + }) with res as event_stream: for event in event_stream: @@ -368,11 +426,13 @@ with Mistral( | `store` | *OptionalNullable[bool]* | :heavy_minus_sign: | N/A | | `handoff_execution` | [OptionalNullable[models.ConversationStreamRequestHandoffExecution]](../../models/conversationstreamrequesthandoffexecution.md) | :heavy_minus_sign: | N/A | | `instructions` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `tools` | List[[models.ConversationStreamRequestTools](../../models/conversationstreamrequesttools.md)] | :heavy_minus_sign: | N/A | +| `tools` | List[[models.ConversationStreamRequestTools](../../models/conversationstreamrequesttools.md)] | :heavy_minus_sign: | List of tools which are available to the model during the conversation. | | `completion_args` | [OptionalNullable[models.CompletionArgs]](../../models/completionargs.md) | :heavy_minus_sign: | N/A | | `name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | | `agent_id` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `agent_version` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | | `model` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | @@ -403,7 +463,11 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.beta.conversations.append_stream(conversation_id="", inputs="", stream=True, store=True, handoff_execution="server") + res = mistral.beta.conversations.append_stream(conversation_id="", inputs="", stream=True, store=True, handoff_execution="server", completion_args={ + "response_format": { + "type": "text", + }, + }) with res as event_stream: for event in event_stream: @@ -459,7 +523,11 @@ with Mistral( "content": "", "prefix": False, }, - ], from_entry_id="", stream=True, store=True, handoff_execution="server") + ], from_entry_id="", stream=True, store=True, handoff_execution="server", completion_args={ + "response_format": { + "type": "text", + }, + }) with res as event_stream: for event in event_stream: @@ -479,6 +547,8 @@ with Mistral( | `store` | *Optional[bool]* | :heavy_minus_sign: | Whether to store the results into our servers or not. | | `handoff_execution` | [Optional[models.ConversationRestartStreamRequestHandoffExecution]](../../models/conversationrestartstreamrequesthandoffexecution.md) | :heavy_minus_sign: | N/A | | `completion_args` | [Optional[models.CompletionArgs]](../../models/completionargs.md) | :heavy_minus_sign: | White-listed arguments from the completion API | +| `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | Custom metadata for the conversation. | +| `agent_version` | *OptionalNullable[int]* | :heavy_minus_sign: | Specific version of the agent to use when restarting. If not provided, uses the current version. | | `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | ### Response diff --git a/docs/sdks/documents/README.md b/docs/sdks/documents/README.md index 71848b07..c1551925 100644 --- a/docs/sdks/documents/README.md +++ b/docs/sdks/documents/README.md @@ -7,7 +7,7 @@ ### Available Operations -* [list](#list) - List document in a given library. +* [list](#list) - List documents in a given library. * [upload](#upload) - Upload a new document. * [get](#get) - Retrieve the metadata of a specific document. * [update](#update) - Update the metadata of a specific document. @@ -49,6 +49,7 @@ with Mistral( | `search` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | `page_size` | *Optional[int]* | :heavy_minus_sign: | N/A | | `page` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `filters_attributes` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | `sort_by` | *Optional[str]* | :heavy_minus_sign: | N/A | | `sort_order` | *Optional[str]* | :heavy_minus_sign: | N/A | | `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | @@ -181,6 +182,7 @@ with Mistral( | `library_id` | *str* | :heavy_check_mark: | N/A | | `document_id` | *str* | :heavy_check_mark: | N/A | | `name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `attributes` | Dict[str, [models.Attributes](../../models/attributes.md)] | :heavy_minus_sign: | N/A | | `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | ### Response diff --git a/docs/sdks/embeddings/README.md b/docs/sdks/embeddings/README.md index 9554e7b7..b03ea9cd 100644 --- a/docs/sdks/embeddings/README.md +++ b/docs/sdks/embeddings/README.md @@ -37,14 +37,14 @@ with Mistral( ### Parameters -| Parameter | Type | Required | Description | Example | -| ----------------------------------------------------------------------- | ----------------------------------------------------------------------- | ----------------------------------------------------------------------- | ----------------------------------------------------------------------- | ----------------------------------------------------------------------- | -| `model` | *str* | :heavy_check_mark: | ID of the model to use. | mistral-embed | -| `inputs` | [models.EmbeddingRequestInputs](../../models/embeddingrequestinputs.md) | :heavy_check_mark: | Text to embed. | [
"Embed this sentence.",
"As well as this one."
] | -| `output_dimension` | *OptionalNullable[int]* | :heavy_minus_sign: | The dimension of the output embeddings. | | -| `output_dtype` | [Optional[models.EmbeddingDtype]](../../models/embeddingdtype.md) | :heavy_minus_sign: | N/A | | -| `encoding_format` | [Optional[models.EncodingFormat]](../../models/encodingformat.md) | :heavy_minus_sign: | N/A | | -| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | | +| Parameter | Type | Required | Description | Example | +| ------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------ | +| `model` | *str* | :heavy_check_mark: | The ID of the model to be used for embedding. | mistral-embed | +| `inputs` | [models.EmbeddingRequestInputs](../../models/embeddingrequestinputs.md) | :heavy_check_mark: | The text content to be embedded, can be a string or an array of strings for fast processing in bulk. | [
"Embed this sentence.",
"As well as this one."
] | +| `output_dimension` | *OptionalNullable[int]* | :heavy_minus_sign: | The dimension of the output embeddings when feature available. If not provided, a default output dimension will be used. | | +| `output_dtype` | [Optional[models.EmbeddingDtype]](../../models/embeddingdtype.md) | :heavy_minus_sign: | N/A | | +| `encoding_format` | [Optional[models.EncodingFormat]](../../models/encodingformat.md) | :heavy_minus_sign: | N/A | | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | | ### Response diff --git a/docs/sdks/files/README.md b/docs/sdks/files/README.md index e8d28c86..0a68c1f5 100644 --- a/docs/sdks/files/README.md +++ b/docs/sdks/files/README.md @@ -78,7 +78,7 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.files.list(page=0, page_size=100) + res = mistral.files.list(page=0, page_size=100, include_total=True) # Handle response print(res) @@ -91,6 +91,7 @@ with Mistral( | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | | `page` | *Optional[int]* | :heavy_minus_sign: | N/A | | `page_size` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `include_total` | *Optional[bool]* | :heavy_minus_sign: | N/A | | `sample_type` | List[[models.SampleType](../../models/sampletype.md)] | :heavy_minus_sign: | N/A | | `source` | List[[models.Source](../../models/source.md)] | :heavy_minus_sign: | N/A | | `search` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | diff --git a/docs/sdks/fim/README.md b/docs/sdks/fim/README.md index cce1c070..d282a810 100644 --- a/docs/sdks/fim/README.md +++ b/docs/sdks/fim/README.md @@ -37,7 +37,7 @@ with Mistral( | Parameter | Type | Required | Description | Example | | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `model` | *str* | :heavy_check_mark: | ID of the model to use. Only compatible for now with:
- `codestral-2405`
- `codestral-latest` | codestral-2405 | +| `model` | *str* | :heavy_check_mark: | ID of the model with FIM to use. | codestral-latest | | `prompt` | *str* | :heavy_check_mark: | The text/code to complete. | def | | `temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. | | | `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | @@ -45,6 +45,7 @@ with Mistral( | `stream` | *Optional[bool]* | :heavy_minus_sign: | Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. | | | `stop` | [Optional[models.FIMCompletionRequestStop]](../../models/fimcompletionrequeststop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | | `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | +| `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | | | `suffix` | *OptionalNullable[str]* | :heavy_minus_sign: | Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`. | return a+b | | `min_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The minimum number of tokens to generate in the completion. | | | `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | | @@ -76,7 +77,7 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.fim.stream(model="codestral-2405", prompt="def", top_p=1, stream=True, suffix="return a+b") + res = mistral.fim.stream(model="codestral-latest", prompt="def", top_p=1, stream=True, suffix="return a+b") with res as event_stream: for event in event_stream: @@ -89,7 +90,7 @@ with Mistral( | Parameter | Type | Required | Description | Example | | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `model` | *str* | :heavy_check_mark: | ID of the model to use. Only compatible for now with:
- `codestral-2405`
- `codestral-latest` | codestral-2405 | +| `model` | *str* | :heavy_check_mark: | ID of the model with FIM to use. | codestral-latest | | `prompt` | *str* | :heavy_check_mark: | The text/code to complete. | def | | `temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. | | | `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | @@ -97,6 +98,7 @@ with Mistral( | `stream` | *Optional[bool]* | :heavy_minus_sign: | N/A | | | `stop` | [Optional[models.FIMCompletionStreamRequestStop]](../../models/fimcompletionstreamrequeststop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | | `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | +| `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | | | `suffix` | *OptionalNullable[str]* | :heavy_minus_sign: | Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`. | return a+b | | `min_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The minimum number of tokens to generate in the completion. | | | `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | | diff --git a/docs/sdks/mistralagents/README.md b/docs/sdks/mistralagents/README.md index 44b7fcf2..767ba56d 100644 --- a/docs/sdks/mistralagents/README.md +++ b/docs/sdks/mistralagents/README.md @@ -11,6 +11,7 @@ * [list](#list) - List agent entities. * [get](#get) - Retrieve an agent entity. * [update](#update) - Update an agent entity. +* [delete](#delete) - Delete an agent entity. * [update_version](#update_version) - Update an agent version. ## create @@ -29,7 +30,11 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.beta.agents.create(model="LeBaron", name="") + res = mistral.beta.agents.create(model="LeBaron", name="", completion_args={ + "response_format": { + "type": "text", + }, + }) # Handle response print(res) @@ -47,6 +52,7 @@ with Mistral( | `completion_args` | [Optional[models.CompletionArgs]](../../models/completionargs.md) | :heavy_minus_sign: | White-listed arguments from the completion API | | `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | `handoffs` | List[*str*] | :heavy_minus_sign: | N/A | +| `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | | `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | ### Response @@ -89,6 +95,11 @@ with Mistral( | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | | `page` | *Optional[int]* | :heavy_minus_sign: | N/A | | `page_size` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `deployment_chat` | *OptionalNullable[bool]* | :heavy_minus_sign: | N/A | +| `sources` | List[[models.RequestSource](../../models/requestsource.md)] | :heavy_minus_sign: | N/A | +| `name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `id` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | | `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | ### Response @@ -130,6 +141,7 @@ with Mistral( | Parameter | Type | Required | Description | | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | | `agent_id` | *str* | :heavy_check_mark: | N/A | +| `agent_version` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | | `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | ### Response @@ -159,7 +171,11 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.beta.agents.update(agent_id="") + res = mistral.beta.agents.update(agent_id="", completion_args={ + "response_format": { + "type": "text", + }, + }) # Handle response print(res) @@ -178,6 +194,8 @@ with Mistral( | `name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | `handoffs` | List[*str*] | :heavy_minus_sign: | N/A | +| `deployment_chat` | *OptionalNullable[bool]* | :heavy_minus_sign: | N/A | +| `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | | `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | ### Response @@ -191,6 +209,42 @@ with Mistral( | models.HTTPValidationError | 422 | application/json | | models.SDKError | 4XX, 5XX | \*/\* | +## delete + +Delete an agent entity. + +### Example Usage + + +```python +from mistralai import Mistral +import os + + +with Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) as mistral: + + mistral.beta.agents.delete(agent_id="") + + # Use the SDK ... + +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | +| `agent_id` | *str* | :heavy_check_mark: | N/A | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | + +### Errors + +| Error Type | Status Code | Content Type | +| -------------------------- | -------------------------- | -------------------------- | +| models.HTTPValidationError | 422 | application/json | +| models.SDKError | 4XX, 5XX | \*/\* | + ## update_version Switch the version of an agent. diff --git a/docs/sdks/mistraljobs/README.md b/docs/sdks/mistraljobs/README.md index 0ef3f138..469a2029 100644 --- a/docs/sdks/mistraljobs/README.md +++ b/docs/sdks/mistraljobs/README.md @@ -75,7 +75,7 @@ with Mistral( res = mistral.batch.jobs.create(input_files=[ "fe3343a2-3b8d-404b-ba32-a78dede2614a", - ], endpoint="/v1/moderations", timeout_hours=24) + ], endpoint="/v1/moderations", model="mistral-small-latest", timeout_hours=24) # Handle response print(res) @@ -84,15 +84,15 @@ with Mistral( ### Parameters -| Parameter | Type | Required | Description | -| ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | -| `input_files` | List[*str*] | :heavy_check_mark: | N/A | -| `endpoint` | [models.APIEndpoint](../../models/apiendpoint.md) | :heavy_check_mark: | N/A | -| `model` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `agent_id` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `metadata` | Dict[str, *str*] | :heavy_minus_sign: | N/A | -| `timeout_hours` | *Optional[int]* | :heavy_minus_sign: | N/A | -| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | +| Parameter | Type | Required | Description | Example | +| ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `input_files` | List[*str*] | :heavy_check_mark: | The list of input files to be used for batch inference, these files should be `jsonl` files, containing the input data corresponding to the bory request for the batch inference in a "body" field. An example of such file is the following: ```json {"custom_id": "0", "body": {"max_tokens": 100, "messages": [{"role": "user", "content": "What is the best French cheese?"}]}} {"custom_id": "1", "body": {"max_tokens": 100, "messages": [{"role": "user", "content": "What is the best French wine?"}]}} ``` | | +| `endpoint` | [models.APIEndpoint](../../models/apiendpoint.md) | :heavy_check_mark: | N/A | | +| `model` | *OptionalNullable[str]* | :heavy_minus_sign: | The model to be used for batch inference. | mistral-small-latest | +| `agent_id` | *OptionalNullable[str]* | :heavy_minus_sign: | In case you want to use a specific agent from the **deprecated** agents api for batch inference, you can specify the agent ID here. | | +| `metadata` | Dict[str, *str*] | :heavy_minus_sign: | The metadata of your choice to be associated with the batch inference job. | | +| `timeout_hours` | *Optional[int]* | :heavy_minus_sign: | The timeout in hours for the batch inference job. | | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | | ### Response diff --git a/docs/sdks/models/README.md b/docs/sdks/models/README.md index 3877c545..94491520 100644 --- a/docs/sdks/models/README.md +++ b/docs/sdks/models/README.md @@ -49,10 +49,9 @@ with Mistral( ### Errors -| Error Type | Status Code | Content Type | -| -------------------------- | -------------------------- | -------------------------- | -| models.HTTPValidationError | 422 | application/json | -| models.SDKError | 4XX, 5XX | \*/\* | +| Error Type | Status Code | Content Type | +| --------------- | --------------- | --------------- | +| models.SDKError | 4XX, 5XX | \*/\* | ## retrieve diff --git a/docs/sdks/ocr/README.md b/docs/sdks/ocr/README.md index c0c1293e..9264d104 100644 --- a/docs/sdks/ocr/README.md +++ b/docs/sdks/ocr/README.md @@ -30,6 +30,10 @@ with Mistral( "url": "https://measly-scrap.com", }, "type": "image_url", + }, bbox_annotation_format={ + "type": "text", + }, document_annotation_format={ + "type": "text", }) # Handle response @@ -39,18 +43,21 @@ with Mistral( ### Parameters -| Parameter | Type | Required | Description | -| ---------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `model` | *Nullable[str]* | :heavy_check_mark: | N/A | -| `document` | [models.Document](../../models/document.md) | :heavy_check_mark: | Document to run OCR on | -| `id` | *Optional[str]* | :heavy_minus_sign: | N/A | -| `pages` | List[*int*] | :heavy_minus_sign: | Specific pages user wants to process in various formats: single number, range, or list of both. Starts from 0 | -| `include_image_base64` | *OptionalNullable[bool]* | :heavy_minus_sign: | Include image URLs in response | -| `image_limit` | *OptionalNullable[int]* | :heavy_minus_sign: | Max images to extract | -| `image_min_size` | *OptionalNullable[int]* | :heavy_minus_sign: | Minimum height and width of image to extract | -| `bbox_annotation_format` | [OptionalNullable[models.ResponseFormat]](../../models/responseformat.md) | :heavy_minus_sign: | Structured output class for extracting useful information from each extracted bounding box / image from document. Only json_schema is valid for this field | -| `document_annotation_format` | [OptionalNullable[models.ResponseFormat]](../../models/responseformat.md) | :heavy_minus_sign: | Structured output class for extracting useful information from the entire document. Only json_schema is valid for this field | -| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | +| Parameter | Type | Required | Description | Example | +| ---------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `model` | *Nullable[str]* | :heavy_check_mark: | N/A | | +| `document` | [models.Document](../../models/document.md) | :heavy_check_mark: | Document to run OCR on | | +| `id` | *Optional[str]* | :heavy_minus_sign: | N/A | | +| `pages` | List[*int*] | :heavy_minus_sign: | Specific pages user wants to process in various formats: single number, range, or list of both. Starts from 0 | | +| `include_image_base64` | *OptionalNullable[bool]* | :heavy_minus_sign: | Include image URLs in response | | +| `image_limit` | *OptionalNullable[int]* | :heavy_minus_sign: | Max images to extract | | +| `image_min_size` | *OptionalNullable[int]* | :heavy_minus_sign: | Minimum height and width of image to extract | | +| `bbox_annotation_format` | [OptionalNullable[models.ResponseFormat]](../../models/responseformat.md) | :heavy_minus_sign: | Structured output class for extracting useful information from each extracted bounding box / image from document. Only json_schema is valid for this field | {
"type": "text"
} | +| `document_annotation_format` | [OptionalNullable[models.ResponseFormat]](../../models/responseformat.md) | :heavy_minus_sign: | Structured output class for extracting useful information from the entire document. Only json_schema is valid for this field | {
"type": "text"
} | +| `table_format` | [OptionalNullable[models.TableFormat]](../../models/tableformat.md) | :heavy_minus_sign: | N/A | | +| `extract_header` | *Optional[bool]* | :heavy_minus_sign: | N/A | | +| `extract_footer` | *Optional[bool]* | :heavy_minus_sign: | N/A | | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | | ### Response diff --git a/docs/sdks/transcriptions/README.md b/docs/sdks/transcriptions/README.md index 022066ac..52b7884e 100644 --- a/docs/sdks/transcriptions/README.md +++ b/docs/sdks/transcriptions/README.md @@ -8,7 +8,7 @@ API for audio transcription. ### Available Operations * [complete](#complete) - Create Transcription -* [stream](#stream) - Create streaming transcription (SSE) +* [stream](#stream) - Create Streaming Transcription (SSE) ## complete @@ -35,16 +35,16 @@ with Mistral( ### Parameters -| Parameter | Type | Required | Description | -| ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | -| `model` | *str* | :heavy_check_mark: | N/A | -| `file` | [Optional[models.File]](../../models/file.md) | :heavy_minus_sign: | N/A | -| `file_url` | *OptionalNullable[str]* | :heavy_minus_sign: | Url of a file to be transcribed | -| `file_id` | *OptionalNullable[str]* | :heavy_minus_sign: | ID of a file uploaded to /v1/files | -| `language` | *OptionalNullable[str]* | :heavy_minus_sign: | Language of the audio, e.g. 'en'. Providing the language can boost accuracy. | -| `temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | N/A | -| `timestamp_granularities` | List[[models.TimestampGranularity](../../models/timestampgranularity.md)] | :heavy_minus_sign: | Granularities of timestamps to include in the response. | -| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | +| Parameter | Type | Required | Description | Example | +| ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | +| `model` | *str* | :heavy_check_mark: | ID of the model to be used. | voxtral-mini-latest | +| `file` | [Optional[models.File]](../../models/file.md) | :heavy_minus_sign: | N/A | | +| `file_url` | *OptionalNullable[str]* | :heavy_minus_sign: | Url of a file to be transcribed | | +| `file_id` | *OptionalNullable[str]* | :heavy_minus_sign: | ID of a file uploaded to /v1/files | | +| `language` | *OptionalNullable[str]* | :heavy_minus_sign: | Language of the audio, e.g. 'en'. Providing the language can boost accuracy. | | +| `temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | N/A | | +| `timestamp_granularities` | List[[models.TimestampGranularity](../../models/timestampgranularity.md)] | :heavy_minus_sign: | Granularities of timestamps to include in the response. | | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | | ### Response @@ -58,7 +58,7 @@ with Mistral( ## stream -Create streaming transcription (SSE) +Create Streaming Transcription (SSE) ### Example Usage diff --git a/pyproject.toml b/pyproject.toml index 4bea6627..58efd52d 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "mistralai" -version = "1.9.12" +version = "1.10.0" description = "Python Client SDK for the Mistral AI API." authors = [{ name = "Mistral" },] readme = "README-PYPI.md" diff --git a/src/mistralai/_version.py b/src/mistralai/_version.py index fa0b5e7d..851d6fbe 100644 --- a/src/mistralai/_version.py +++ b/src/mistralai/_version.py @@ -3,10 +3,10 @@ import importlib.metadata __title__: str = "mistralai" -__version__: str = "1.9.11" +__version__: str = "1.10.0" __openapi_doc_version__: str = "1.0.0" __gen_version__: str = "2.687.13" -__user_agent__: str = "speakeasy-sdk/python 1.9.11 2.687.13 1.0.0 mistralai" +__user_agent__: str = "speakeasy-sdk/python 1.10.0 2.687.13 1.0.0 mistralai" try: if __package__ is not None: diff --git a/src/mistralai/accesses.py b/src/mistralai/accesses.py index ea33517b..dd8ffade 100644 --- a/src/mistralai/accesses.py +++ b/src/mistralai/accesses.py @@ -194,10 +194,10 @@ def update_or_create( self, *, library_id: str, - org_id: str, level: models.ShareEnum, share_with_uuid: str, share_with_type: models.EntityType, + org_id: OptionalNullable[str] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -208,10 +208,10 @@ def update_or_create( Given a library id, you can create or update the access level of an entity. You have to be owner of the library to share a library. An owner cannot change their own role. A library cannot be shared outside of the organization. :param library_id: - :param org_id: :param level: :param share_with_uuid: The id of the entity (user, workspace or organization) to share with :param share_with_type: The type of entity, used to share a library. + :param org_id: :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -300,10 +300,10 @@ async def update_or_create_async( self, *, library_id: str, - org_id: str, level: models.ShareEnum, share_with_uuid: str, share_with_type: models.EntityType, + org_id: OptionalNullable[str] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -314,10 +314,10 @@ async def update_or_create_async( Given a library id, you can create or update the access level of an entity. You have to be owner of the library to share a library. An owner cannot change their own role. A library cannot be shared outside of the organization. :param library_id: - :param org_id: :param level: :param share_with_uuid: The id of the entity (user, workspace or organization) to share with :param share_with_type: The type of entity, used to share a library. + :param org_id: :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -406,9 +406,9 @@ def delete( self, *, library_id: str, - org_id: str, share_with_uuid: str, share_with_type: models.EntityType, + org_id: OptionalNullable[str] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -419,9 +419,9 @@ def delete( Given a library id, you can delete the access level of an entity. An owner cannot delete it's own access. You have to be the owner of the library to delete an acces other than yours. :param library_id: - :param org_id: :param share_with_uuid: The id of the entity (user, workspace or organization) to share with :param share_with_type: The type of entity, used to share a library. + :param org_id: :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -509,9 +509,9 @@ async def delete_async( self, *, library_id: str, - org_id: str, share_with_uuid: str, share_with_type: models.EntityType, + org_id: OptionalNullable[str] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -522,9 +522,9 @@ async def delete_async( Given a library id, you can delete the access level of an entity. An owner cannot delete it's own access. You have to be the owner of the library to delete an acces other than yours. :param library_id: - :param org_id: :param share_with_uuid: The id of the entity (user, workspace or organization) to share with :param share_with_type: The type of entity, used to share a library. + :param org_id: :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds diff --git a/src/mistralai/agents.py b/src/mistralai/agents.py index b220ca5b..173921fa 100644 --- a/src/mistralai/agents.py +++ b/src/mistralai/agents.py @@ -6,7 +6,7 @@ from mistralai.types import OptionalNullable, UNSET from mistralai.utils import eventstreaming, get_security_from_env from mistralai.utils.unmarshal_json_response import unmarshal_json_response -from typing import Any, List, Mapping, Optional, Union +from typing import Any, Dict, List, Mapping, Optional, Union class Agents(BaseSDK): @@ -29,6 +29,7 @@ def complete( ] ] = None, random_seed: OptionalNullable[int] = UNSET, + metadata: OptionalNullable[Dict[str, Any]] = UNSET, response_format: Optional[ Union[models.ResponseFormat, models.ResponseFormatTypedDict] ] = None, @@ -62,13 +63,14 @@ def complete( :param stream: Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. - :param response_format: + :param metadata: + :param response_format: Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide. :param tools: :param tool_choice: - :param presence_penalty: presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. - :param frequency_penalty: frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. + :param presence_penalty: The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. + :param frequency_penalty: The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. :param n: Number of completions to return for each request, input tokens are only billed once. - :param prediction: + :param prediction: Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content. :param parallel_tool_calls: :param prompt_mode: Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. :param retries: Override the default retry configuration for this method @@ -91,6 +93,7 @@ def complete( stream=stream, stop=stop, random_seed=random_seed, + metadata=metadata, messages=utils.get_pydantic_model( messages, List[models.AgentsCompletionRequestMessages] ), @@ -188,6 +191,7 @@ async def complete_async( ] ] = None, random_seed: OptionalNullable[int] = UNSET, + metadata: OptionalNullable[Dict[str, Any]] = UNSET, response_format: Optional[ Union[models.ResponseFormat, models.ResponseFormatTypedDict] ] = None, @@ -221,13 +225,14 @@ async def complete_async( :param stream: Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. - :param response_format: + :param metadata: + :param response_format: Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide. :param tools: :param tool_choice: - :param presence_penalty: presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. - :param frequency_penalty: frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. + :param presence_penalty: The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. + :param frequency_penalty: The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. :param n: Number of completions to return for each request, input tokens are only billed once. - :param prediction: + :param prediction: Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content. :param parallel_tool_calls: :param prompt_mode: Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. :param retries: Override the default retry configuration for this method @@ -250,6 +255,7 @@ async def complete_async( stream=stream, stop=stop, random_seed=random_seed, + metadata=metadata, messages=utils.get_pydantic_model( messages, List[models.AgentsCompletionRequestMessages] ), @@ -347,6 +353,7 @@ def stream( ] ] = None, random_seed: OptionalNullable[int] = UNSET, + metadata: OptionalNullable[Dict[str, Any]] = UNSET, response_format: Optional[ Union[models.ResponseFormat, models.ResponseFormatTypedDict] ] = None, @@ -382,13 +389,14 @@ def stream( :param stream: :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. - :param response_format: + :param metadata: + :param response_format: Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide. :param tools: :param tool_choice: - :param presence_penalty: presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. - :param frequency_penalty: frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. + :param presence_penalty: The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. + :param frequency_penalty: The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. :param n: Number of completions to return for each request, input tokens are only billed once. - :param prediction: + :param prediction: Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content. :param parallel_tool_calls: :param prompt_mode: Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. :param retries: Override the default retry configuration for this method @@ -411,6 +419,7 @@ def stream( stream=stream, stop=stop, random_seed=random_seed, + metadata=metadata, messages=utils.get_pydantic_model( messages, List[models.AgentsCompletionStreamRequestMessages] ), @@ -516,6 +525,7 @@ async def stream_async( ] ] = None, random_seed: OptionalNullable[int] = UNSET, + metadata: OptionalNullable[Dict[str, Any]] = UNSET, response_format: Optional[ Union[models.ResponseFormat, models.ResponseFormatTypedDict] ] = None, @@ -551,13 +561,14 @@ async def stream_async( :param stream: :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. - :param response_format: + :param metadata: + :param response_format: Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide. :param tools: :param tool_choice: - :param presence_penalty: presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. - :param frequency_penalty: frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. + :param presence_penalty: The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. + :param frequency_penalty: The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. :param n: Number of completions to return for each request, input tokens are only billed once. - :param prediction: + :param prediction: Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content. :param parallel_tool_calls: :param prompt_mode: Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. :param retries: Override the default retry configuration for this method @@ -580,6 +591,7 @@ async def stream_async( stream=stream, stop=stop, random_seed=random_seed, + metadata=metadata, messages=utils.get_pydantic_model( messages, List[models.AgentsCompletionStreamRequestMessages] ), diff --git a/src/mistralai/chat.py b/src/mistralai/chat.py index 67777a1a..6a8058f7 100644 --- a/src/mistralai/chat.py +++ b/src/mistralai/chat.py @@ -6,7 +6,7 @@ from mistralai.types import OptionalNullable, UNSET from mistralai.utils import eventstreaming, get_security_from_env from mistralai.utils.unmarshal_json_response import unmarshal_json_response -from typing import Any, List, Mapping, Optional, Union +from typing import Any, Dict, List, Mapping, Optional, Union # region imports from typing import Type @@ -105,6 +105,7 @@ def complete( stream: Optional[bool] = False, stop: Optional[Union[models.Stop, models.StopTypedDict]] = None, random_seed: OptionalNullable[int] = UNSET, + metadata: OptionalNullable[Dict[str, Any]] = UNSET, response_format: Optional[ Union[models.ResponseFormat, models.ResponseFormatTypedDict] ] = None, @@ -141,14 +142,15 @@ def complete( :param stream: Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. - :param response_format: - :param tools: - :param tool_choice: - :param presence_penalty: presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. - :param frequency_penalty: frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. + :param metadata: + :param response_format: Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide. + :param tools: A list of tools the model may call. Use this to provide a list of functions the model may generate JSON inputs for. + :param tool_choice: Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool. + :param presence_penalty: The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. + :param frequency_penalty: The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. :param n: Number of completions to return for each request, input tokens are only billed once. - :param prediction: - :param parallel_tool_calls: + :param prediction: Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content. + :param parallel_tool_calls: Whether to enable parallel function calling during tool use, when enabled the model can call multiple tools in parallel. :param prompt_mode: Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. :param safe_prompt: Whether to inject a safety prompt before all conversations. :param retries: Override the default retry configuration for this method @@ -174,6 +176,7 @@ def complete( stream=stream, stop=stop, random_seed=random_seed, + metadata=metadata, messages=utils.get_pydantic_model(messages, List[models.Messages]), response_format=utils.get_pydantic_model( response_format, Optional[models.ResponseFormat] @@ -263,6 +266,7 @@ async def complete_async( stream: Optional[bool] = False, stop: Optional[Union[models.Stop, models.StopTypedDict]] = None, random_seed: OptionalNullable[int] = UNSET, + metadata: OptionalNullable[Dict[str, Any]] = UNSET, response_format: Optional[ Union[models.ResponseFormat, models.ResponseFormatTypedDict] ] = None, @@ -299,14 +303,15 @@ async def complete_async( :param stream: Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. - :param response_format: - :param tools: - :param tool_choice: - :param presence_penalty: presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. - :param frequency_penalty: frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. + :param metadata: + :param response_format: Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide. + :param tools: A list of tools the model may call. Use this to provide a list of functions the model may generate JSON inputs for. + :param tool_choice: Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool. + :param presence_penalty: The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. + :param frequency_penalty: The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. :param n: Number of completions to return for each request, input tokens are only billed once. - :param prediction: - :param parallel_tool_calls: + :param prediction: Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content. + :param parallel_tool_calls: Whether to enable parallel function calling during tool use, when enabled the model can call multiple tools in parallel. :param prompt_mode: Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. :param safe_prompt: Whether to inject a safety prompt before all conversations. :param retries: Override the default retry configuration for this method @@ -332,6 +337,7 @@ async def complete_async( stream=stream, stop=stop, random_seed=random_seed, + metadata=metadata, messages=utils.get_pydantic_model(messages, List[models.Messages]), response_format=utils.get_pydantic_model( response_format, Optional[models.ResponseFormat] @@ -429,6 +435,7 @@ def stream( ] ] = None, random_seed: OptionalNullable[int] = UNSET, + metadata: OptionalNullable[Dict[str, Any]] = UNSET, response_format: Optional[ Union[models.ResponseFormat, models.ResponseFormatTypedDict] ] = None, @@ -467,14 +474,15 @@ def stream( :param stream: :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. - :param response_format: - :param tools: - :param tool_choice: - :param presence_penalty: presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. - :param frequency_penalty: frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. + :param metadata: + :param response_format: Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide. + :param tools: A list of tools the model may call. Use this to provide a list of functions the model may generate JSON inputs for. + :param tool_choice: Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool. + :param presence_penalty: The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. + :param frequency_penalty: The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. :param n: Number of completions to return for each request, input tokens are only billed once. - :param prediction: - :param parallel_tool_calls: + :param prediction: Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content. + :param parallel_tool_calls: Whether to enable parallel function calling during tool use, when enabled the model can call multiple tools in parallel. :param prompt_mode: Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. :param safe_prompt: Whether to inject a safety prompt before all conversations. :param retries: Override the default retry configuration for this method @@ -500,6 +508,7 @@ def stream( stream=stream, stop=stop, random_seed=random_seed, + metadata=metadata, messages=utils.get_pydantic_model( messages, List[models.ChatCompletionStreamRequestMessages] ), @@ -607,6 +616,7 @@ async def stream_async( ] ] = None, random_seed: OptionalNullable[int] = UNSET, + metadata: OptionalNullable[Dict[str, Any]] = UNSET, response_format: Optional[ Union[models.ResponseFormat, models.ResponseFormatTypedDict] ] = None, @@ -645,14 +655,15 @@ async def stream_async( :param stream: :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. - :param response_format: - :param tools: - :param tool_choice: - :param presence_penalty: presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. - :param frequency_penalty: frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. + :param metadata: + :param response_format: Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide. + :param tools: A list of tools the model may call. Use this to provide a list of functions the model may generate JSON inputs for. + :param tool_choice: Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool. + :param presence_penalty: The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. + :param frequency_penalty: The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. :param n: Number of completions to return for each request, input tokens are only billed once. - :param prediction: - :param parallel_tool_calls: + :param prediction: Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content. + :param parallel_tool_calls: Whether to enable parallel function calling during tool use, when enabled the model can call multiple tools in parallel. :param prompt_mode: Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. :param safe_prompt: Whether to inject a safety prompt before all conversations. :param retries: Override the default retry configuration for this method @@ -678,6 +689,7 @@ async def stream_async( stream=stream, stop=stop, random_seed=random_seed, + metadata=metadata, messages=utils.get_pydantic_model( messages, List[models.ChatCompletionStreamRequestMessages] ), diff --git a/src/mistralai/conversations.py b/src/mistralai/conversations.py index 64551a96..a7d58abd 100644 --- a/src/mistralai/conversations.py +++ b/src/mistralai/conversations.py @@ -6,7 +6,7 @@ from mistralai.types import OptionalNullable, UNSET from mistralai.utils import eventstreaming, get_security_from_env from mistralai.utils.unmarshal_json_response import unmarshal_json_response -from typing import Any, List, Mapping, Optional, Union +from typing import Any, Dict, List, Mapping, Optional, Union # region imports import typing @@ -228,15 +228,15 @@ def start( store: OptionalNullable[bool] = UNSET, handoff_execution: OptionalNullable[models.HandoffExecution] = UNSET, instructions: OptionalNullable[str] = UNSET, - tools: OptionalNullable[ - Union[List[models.Tools], List[models.ToolsTypedDict]] - ] = UNSET, + tools: Optional[Union[List[models.Tools], List[models.ToolsTypedDict]]] = None, completion_args: OptionalNullable[ Union[models.CompletionArgs, models.CompletionArgsTypedDict] ] = UNSET, name: OptionalNullable[str] = UNSET, description: OptionalNullable[str] = UNSET, + metadata: OptionalNullable[Dict[str, Any]] = UNSET, agent_id: OptionalNullable[str] = UNSET, + agent_version: OptionalNullable[int] = UNSET, model: OptionalNullable[str] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, @@ -252,11 +252,13 @@ def start( :param store: :param handoff_execution: :param instructions: - :param tools: + :param tools: List of tools which are available to the model during the conversation. :param completion_args: :param name: :param description: + :param metadata: :param agent_id: + :param agent_version: :param model: :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method @@ -279,13 +281,15 @@ def start( store=store, handoff_execution=handoff_execution, instructions=instructions, - tools=utils.get_pydantic_model(tools, OptionalNullable[List[models.Tools]]), + tools=utils.get_pydantic_model(tools, Optional[List[models.Tools]]), completion_args=utils.get_pydantic_model( completion_args, OptionalNullable[models.CompletionArgs] ), name=name, description=description, + metadata=metadata, agent_id=agent_id, + agent_version=agent_version, model=model, ) @@ -356,15 +360,15 @@ async def start_async( store: OptionalNullable[bool] = UNSET, handoff_execution: OptionalNullable[models.HandoffExecution] = UNSET, instructions: OptionalNullable[str] = UNSET, - tools: OptionalNullable[ - Union[List[models.Tools], List[models.ToolsTypedDict]] - ] = UNSET, + tools: Optional[Union[List[models.Tools], List[models.ToolsTypedDict]]] = None, completion_args: OptionalNullable[ Union[models.CompletionArgs, models.CompletionArgsTypedDict] ] = UNSET, name: OptionalNullable[str] = UNSET, description: OptionalNullable[str] = UNSET, + metadata: OptionalNullable[Dict[str, Any]] = UNSET, agent_id: OptionalNullable[str] = UNSET, + agent_version: OptionalNullable[int] = UNSET, model: OptionalNullable[str] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, @@ -380,11 +384,13 @@ async def start_async( :param store: :param handoff_execution: :param instructions: - :param tools: + :param tools: List of tools which are available to the model during the conversation. :param completion_args: :param name: :param description: + :param metadata: :param agent_id: + :param agent_version: :param model: :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method @@ -407,13 +413,15 @@ async def start_async( store=store, handoff_execution=handoff_execution, instructions=instructions, - tools=utils.get_pydantic_model(tools, OptionalNullable[List[models.Tools]]), + tools=utils.get_pydantic_model(tools, Optional[List[models.Tools]]), completion_args=utils.get_pydantic_model( completion_args, OptionalNullable[models.CompletionArgs] ), name=name, description=description, + metadata=metadata, agent_id=agent_id, + agent_version=agent_version, model=model, ) @@ -481,6 +489,7 @@ def list( *, page: Optional[int] = 0, page_size: Optional[int] = 100, + metadata: OptionalNullable[Dict[str, Any]] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -492,6 +501,7 @@ def list( :param page: :param page_size: + :param metadata: :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -510,6 +520,7 @@ def list( request = models.AgentsAPIV1ConversationsListRequest( page=page, page_size=page_size, + metadata=metadata, ) req = self._build_request( @@ -573,6 +584,7 @@ async def list_async( *, page: Optional[int] = 0, page_size: Optional[int] = 100, + metadata: OptionalNullable[Dict[str, Any]] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -584,6 +596,7 @@ async def list_async( :param page: :param page_size: + :param metadata: :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -602,6 +615,7 @@ async def list_async( request = models.AgentsAPIV1ConversationsListRequest( page=page, page_size=page_size, + metadata=metadata, ) req = self._build_request_async( @@ -842,6 +856,184 @@ async def get_async( raise models.SDKError("Unexpected response received", http_res) + def delete( + self, + *, + conversation_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ): + r"""Delete a conversation. + + Delete a conversation given a conversation_id. + + :param conversation_id: ID of the conversation from which we are fetching metadata. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AgentsAPIV1ConversationsDeleteRequest( + conversation_id=conversation_id, + ) + + req = self._build_request( + method="DELETE", + path="/v1/conversations/{conversation_id}", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="agents_api_v1_conversations_delete", + oauth2_scopes=[], + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "204", "*"): + return + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res + ) + raise models.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + async def delete_async( + self, + *, + conversation_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ): + r"""Delete a conversation. + + Delete a conversation given a conversation_id. + + :param conversation_id: ID of the conversation from which we are fetching metadata. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AgentsAPIV1ConversationsDeleteRequest( + conversation_id=conversation_id, + ) + + req = self._build_request_async( + method="DELETE", + path="/v1/conversations/{conversation_id}", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="agents_api_v1_conversations_delete", + oauth2_scopes=[], + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "204", "*"): + return + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res + ) + raise models.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + def append( self, *, @@ -1450,6 +1642,8 @@ def restart( completion_args: Optional[ Union[models.CompletionArgs, models.CompletionArgsTypedDict] ] = None, + metadata: OptionalNullable[Dict[str, Any]] = UNSET, + agent_version: OptionalNullable[int] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -1466,6 +1660,8 @@ def restart( :param store: Whether to store the results into our servers or not. :param handoff_execution: :param completion_args: White-listed arguments from the completion API + :param metadata: Custom metadata for the conversation. + :param agent_version: Specific version of the agent to use when restarting. If not provided, uses the current version. :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -1491,7 +1687,9 @@ def restart( completion_args=utils.get_pydantic_model( completion_args, Optional[models.CompletionArgs] ), + metadata=metadata, from_entry_id=from_entry_id, + agent_version=agent_version, ), ) @@ -1572,6 +1770,8 @@ async def restart_async( completion_args: Optional[ Union[models.CompletionArgs, models.CompletionArgsTypedDict] ] = None, + metadata: OptionalNullable[Dict[str, Any]] = UNSET, + agent_version: OptionalNullable[int] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -1588,6 +1788,8 @@ async def restart_async( :param store: Whether to store the results into our servers or not. :param handoff_execution: :param completion_args: White-listed arguments from the completion API + :param metadata: Custom metadata for the conversation. + :param agent_version: Specific version of the agent to use when restarting. If not provided, uses the current version. :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -1613,7 +1815,9 @@ async def restart_async( completion_args=utils.get_pydantic_model( completion_args, Optional[models.CompletionArgs] ), + metadata=metadata, from_entry_id=from_entry_id, + agent_version=agent_version, ), ) @@ -1690,18 +1894,20 @@ def start_stream( models.ConversationStreamRequestHandoffExecution ] = UNSET, instructions: OptionalNullable[str] = UNSET, - tools: OptionalNullable[ + tools: Optional[ Union[ List[models.ConversationStreamRequestTools], List[models.ConversationStreamRequestToolsTypedDict], ] - ] = UNSET, + ] = None, completion_args: OptionalNullable[ Union[models.CompletionArgs, models.CompletionArgsTypedDict] ] = UNSET, name: OptionalNullable[str] = UNSET, description: OptionalNullable[str] = UNSET, + metadata: OptionalNullable[Dict[str, Any]] = UNSET, agent_id: OptionalNullable[str] = UNSET, + agent_version: OptionalNullable[int] = UNSET, model: OptionalNullable[str] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, @@ -1717,11 +1923,13 @@ def start_stream( :param store: :param handoff_execution: :param instructions: - :param tools: + :param tools: List of tools which are available to the model during the conversation. :param completion_args: :param name: :param description: + :param metadata: :param agent_id: + :param agent_version: :param model: :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method @@ -1745,14 +1953,16 @@ def start_stream( handoff_execution=handoff_execution, instructions=instructions, tools=utils.get_pydantic_model( - tools, OptionalNullable[List[models.ConversationStreamRequestTools]] + tools, Optional[List[models.ConversationStreamRequestTools]] ), completion_args=utils.get_pydantic_model( completion_args, OptionalNullable[models.CompletionArgs] ), name=name, description=description, + metadata=metadata, agent_id=agent_id, + agent_version=agent_version, model=model, ) @@ -1832,18 +2042,20 @@ async def start_stream_async( models.ConversationStreamRequestHandoffExecution ] = UNSET, instructions: OptionalNullable[str] = UNSET, - tools: OptionalNullable[ + tools: Optional[ Union[ List[models.ConversationStreamRequestTools], List[models.ConversationStreamRequestToolsTypedDict], ] - ] = UNSET, + ] = None, completion_args: OptionalNullable[ Union[models.CompletionArgs, models.CompletionArgsTypedDict] ] = UNSET, name: OptionalNullable[str] = UNSET, description: OptionalNullable[str] = UNSET, + metadata: OptionalNullable[Dict[str, Any]] = UNSET, agent_id: OptionalNullable[str] = UNSET, + agent_version: OptionalNullable[int] = UNSET, model: OptionalNullable[str] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, @@ -1859,11 +2071,13 @@ async def start_stream_async( :param store: :param handoff_execution: :param instructions: - :param tools: + :param tools: List of tools which are available to the model during the conversation. :param completion_args: :param name: :param description: + :param metadata: :param agent_id: + :param agent_version: :param model: :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method @@ -1887,14 +2101,16 @@ async def start_stream_async( handoff_execution=handoff_execution, instructions=instructions, tools=utils.get_pydantic_model( - tools, OptionalNullable[List[models.ConversationStreamRequestTools]] + tools, Optional[List[models.ConversationStreamRequestTools]] ), completion_args=utils.get_pydantic_model( completion_args, OptionalNullable[models.CompletionArgs] ), name=name, description=description, + metadata=metadata, agent_id=agent_id, + agent_version=agent_version, model=model, ) @@ -2230,6 +2446,8 @@ def restart_stream( completion_args: Optional[ Union[models.CompletionArgs, models.CompletionArgsTypedDict] ] = None, + metadata: OptionalNullable[Dict[str, Any]] = UNSET, + agent_version: OptionalNullable[int] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -2246,6 +2464,8 @@ def restart_stream( :param store: Whether to store the results into our servers or not. :param handoff_execution: :param completion_args: White-listed arguments from the completion API + :param metadata: Custom metadata for the conversation. + :param agent_version: Specific version of the agent to use when restarting. If not provided, uses the current version. :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -2271,7 +2491,9 @@ def restart_stream( completion_args=utils.get_pydantic_model( completion_args, Optional[models.CompletionArgs] ), + metadata=metadata, from_entry_id=from_entry_id, + agent_version=agent_version, ), ) @@ -2359,6 +2581,8 @@ async def restart_stream_async( completion_args: Optional[ Union[models.CompletionArgs, models.CompletionArgsTypedDict] ] = None, + metadata: OptionalNullable[Dict[str, Any]] = UNSET, + agent_version: OptionalNullable[int] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -2375,6 +2599,8 @@ async def restart_stream_async( :param store: Whether to store the results into our servers or not. :param handoff_execution: :param completion_args: White-listed arguments from the completion API + :param metadata: Custom metadata for the conversation. + :param agent_version: Specific version of the agent to use when restarting. If not provided, uses the current version. :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -2400,7 +2626,9 @@ async def restart_stream_async( completion_args=utils.get_pydantic_model( completion_args, Optional[models.CompletionArgs] ), + metadata=metadata, from_entry_id=from_entry_id, + agent_version=agent_version, ), ) diff --git a/src/mistralai/documents.py b/src/mistralai/documents.py index 5f8c6b9f..c1497bff 100644 --- a/src/mistralai/documents.py +++ b/src/mistralai/documents.py @@ -6,7 +6,7 @@ from mistralai.types import OptionalNullable, UNSET from mistralai.utils import get_security_from_env from mistralai.utils.unmarshal_json_response import unmarshal_json_response -from typing import Any, Mapping, Optional, Union +from typing import Any, Dict, Mapping, Optional, Union class Documents(BaseSDK): @@ -19,6 +19,7 @@ def list( search: OptionalNullable[str] = UNSET, page_size: Optional[int] = 100, page: Optional[int] = 0, + filters_attributes: OptionalNullable[str] = UNSET, sort_by: Optional[str] = "created_at", sort_order: Optional[str] = "desc", retries: OptionalNullable[utils.RetryConfig] = UNSET, @@ -26,7 +27,7 @@ def list( timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, ) -> models.ListDocumentOut: - r"""List document in a given library. + r"""List documents in a given library. Given a library, lists the document that have been uploaded to that library. @@ -34,6 +35,7 @@ def list( :param search: :param page_size: :param page: + :param filters_attributes: :param sort_by: :param sort_order: :param retries: Override the default retry configuration for this method @@ -56,6 +58,7 @@ def list( search=search, page_size=page_size, page=page, + filters_attributes=filters_attributes, sort_by=sort_by, sort_order=sort_order, ) @@ -123,6 +126,7 @@ async def list_async( search: OptionalNullable[str] = UNSET, page_size: Optional[int] = 100, page: Optional[int] = 0, + filters_attributes: OptionalNullable[str] = UNSET, sort_by: Optional[str] = "created_at", sort_order: Optional[str] = "desc", retries: OptionalNullable[utils.RetryConfig] = UNSET, @@ -130,7 +134,7 @@ async def list_async( timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, ) -> models.ListDocumentOut: - r"""List document in a given library. + r"""List documents in a given library. Given a library, lists the document that have been uploaded to that library. @@ -138,6 +142,7 @@ async def list_async( :param search: :param page_size: :param page: + :param filters_attributes: :param sort_by: :param sort_order: :param retries: Override the default retry configuration for this method @@ -160,6 +165,7 @@ async def list_async( search=search, page_size=page_size, page=page, + filters_attributes=filters_attributes, sort_by=sort_by, sort_order=sort_order, ) @@ -612,6 +618,9 @@ def update( library_id: str, document_id: str, name: OptionalNullable[str] = UNSET, + attributes: OptionalNullable[ + Union[Dict[str, models.Attributes], Dict[str, models.AttributesTypedDict]] + ] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -624,6 +633,7 @@ def update( :param library_id: :param document_id: :param name: + :param attributes: :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -644,6 +654,7 @@ def update( document_id=document_id, document_update_in=models.DocumentUpdateIn( name=name, + attributes=attributes, ), ) @@ -716,6 +727,9 @@ async def update_async( library_id: str, document_id: str, name: OptionalNullable[str] = UNSET, + attributes: OptionalNullable[ + Union[Dict[str, models.Attributes], Dict[str, models.AttributesTypedDict]] + ] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -728,6 +742,7 @@ async def update_async( :param library_id: :param document_id: :param name: + :param attributes: :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -748,6 +763,7 @@ async def update_async( document_id=document_id, document_update_in=models.DocumentUpdateIn( name=name, + attributes=attributes, ), ) diff --git a/src/mistralai/embeddings.py b/src/mistralai/embeddings.py index 1822a1ec..76e8e719 100644 --- a/src/mistralai/embeddings.py +++ b/src/mistralai/embeddings.py @@ -31,9 +31,9 @@ def create( Embeddings - :param model: ID of the model to use. - :param inputs: Text to embed. - :param output_dimension: The dimension of the output embeddings. + :param model: The ID of the model to be used for embedding. + :param inputs: The text content to be embedded, can be a string or an array of strings for fast processing in bulk. + :param output_dimension: The dimension of the output embeddings when feature available. If not provided, a default output dimension will be used. :param output_dtype: :param encoding_format: :param retries: Override the default retry configuration for this method @@ -137,9 +137,9 @@ async def create_async( Embeddings - :param model: ID of the model to use. - :param inputs: Text to embed. - :param output_dimension: The dimension of the output embeddings. + :param model: The ID of the model to be used for embedding. + :param inputs: The text content to be embedded, can be a string or an array of strings for fast processing in bulk. + :param output_dimension: The dimension of the output embeddings when feature available. If not provided, a default output dimension will be used. :param output_dtype: :param encoding_format: :param retries: Override the default retry configuration for this method diff --git a/src/mistralai/files.py b/src/mistralai/files.py index c6e438af..ae4eb779 100644 --- a/src/mistralai/files.py +++ b/src/mistralai/files.py @@ -212,6 +212,7 @@ def list( *, page: Optional[int] = 0, page_size: Optional[int] = 100, + include_total: Optional[bool] = True, sample_type: OptionalNullable[List[models.SampleType]] = UNSET, source: OptionalNullable[List[models.Source]] = UNSET, search: OptionalNullable[str] = UNSET, @@ -227,6 +228,7 @@ def list( :param page: :param page_size: + :param include_total: :param sample_type: :param source: :param search: @@ -249,6 +251,7 @@ def list( request = models.FilesAPIRoutesListFilesRequest( page=page, page_size=page_size, + include_total=include_total, sample_type=sample_type, source=source, search=search, @@ -310,6 +313,7 @@ async def list_async( *, page: Optional[int] = 0, page_size: Optional[int] = 100, + include_total: Optional[bool] = True, sample_type: OptionalNullable[List[models.SampleType]] = UNSET, source: OptionalNullable[List[models.Source]] = UNSET, search: OptionalNullable[str] = UNSET, @@ -325,6 +329,7 @@ async def list_async( :param page: :param page_size: + :param include_total: :param sample_type: :param source: :param search: @@ -347,6 +352,7 @@ async def list_async( request = models.FilesAPIRoutesListFilesRequest( page=page, page_size=page_size, + include_total=include_total, sample_type=sample_type, source=source, search=search, diff --git a/src/mistralai/fim.py b/src/mistralai/fim.py index fa7b15c2..49bdb32e 100644 --- a/src/mistralai/fim.py +++ b/src/mistralai/fim.py @@ -6,7 +6,7 @@ from mistralai.types import OptionalNullable, UNSET from mistralai.utils import eventstreaming, get_security_from_env from mistralai.utils.unmarshal_json_response import unmarshal_json_response -from typing import Any, Mapping, Optional, Union +from typing import Any, Dict, Mapping, Optional, Union class Fim(BaseSDK): @@ -28,6 +28,7 @@ def complete( ] ] = None, random_seed: OptionalNullable[int] = UNSET, + metadata: OptionalNullable[Dict[str, Any]] = UNSET, suffix: OptionalNullable[str] = UNSET, min_tokens: OptionalNullable[int] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, @@ -39,7 +40,7 @@ def complete( FIM completion. - :param model: ID of the model to use. Only compatible for now with: - `codestral-2405` - `codestral-latest` + :param model: ID of the model with FIM to use. :param prompt: The text/code to complete. :param temperature: What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. @@ -47,6 +48,7 @@ def complete( :param stream: Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. + :param metadata: :param suffix: Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`. :param min_tokens: The minimum number of tokens to generate in the completion. :param retries: Override the default retry configuration for this method @@ -72,6 +74,7 @@ def complete( stream=stream, stop=stop, random_seed=random_seed, + metadata=metadata, prompt=prompt, suffix=suffix, min_tokens=min_tokens, @@ -152,6 +155,7 @@ async def complete_async( ] ] = None, random_seed: OptionalNullable[int] = UNSET, + metadata: OptionalNullable[Dict[str, Any]] = UNSET, suffix: OptionalNullable[str] = UNSET, min_tokens: OptionalNullable[int] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, @@ -163,7 +167,7 @@ async def complete_async( FIM completion. - :param model: ID of the model to use. Only compatible for now with: - `codestral-2405` - `codestral-latest` + :param model: ID of the model with FIM to use. :param prompt: The text/code to complete. :param temperature: What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. @@ -171,6 +175,7 @@ async def complete_async( :param stream: Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. + :param metadata: :param suffix: Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`. :param min_tokens: The minimum number of tokens to generate in the completion. :param retries: Override the default retry configuration for this method @@ -196,6 +201,7 @@ async def complete_async( stream=stream, stop=stop, random_seed=random_seed, + metadata=metadata, prompt=prompt, suffix=suffix, min_tokens=min_tokens, @@ -276,6 +282,7 @@ def stream( ] ] = None, random_seed: OptionalNullable[int] = UNSET, + metadata: OptionalNullable[Dict[str, Any]] = UNSET, suffix: OptionalNullable[str] = UNSET, min_tokens: OptionalNullable[int] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, @@ -287,7 +294,7 @@ def stream( Mistral AI provides the ability to stream responses back to a client in order to allow partial results for certain requests. Tokens will be sent as data-only server-sent events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. - :param model: ID of the model to use. Only compatible for now with: - `codestral-2405` - `codestral-latest` + :param model: ID of the model with FIM to use. :param prompt: The text/code to complete. :param temperature: What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. @@ -295,6 +302,7 @@ def stream( :param stream: :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. + :param metadata: :param suffix: Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`. :param min_tokens: The minimum number of tokens to generate in the completion. :param retries: Override the default retry configuration for this method @@ -320,6 +328,7 @@ def stream( stream=stream, stop=stop, random_seed=random_seed, + metadata=metadata, prompt=prompt, suffix=suffix, min_tokens=min_tokens, @@ -408,6 +417,7 @@ async def stream_async( ] ] = None, random_seed: OptionalNullable[int] = UNSET, + metadata: OptionalNullable[Dict[str, Any]] = UNSET, suffix: OptionalNullable[str] = UNSET, min_tokens: OptionalNullable[int] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, @@ -419,7 +429,7 @@ async def stream_async( Mistral AI provides the ability to stream responses back to a client in order to allow partial results for certain requests. Tokens will be sent as data-only server-sent events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. - :param model: ID of the model to use. Only compatible for now with: - `codestral-2405` - `codestral-latest` + :param model: ID of the model with FIM to use. :param prompt: The text/code to complete. :param temperature: What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. @@ -427,6 +437,7 @@ async def stream_async( :param stream: :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. + :param metadata: :param suffix: Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`. :param min_tokens: The minimum number of tokens to generate in the completion. :param retries: Override the default retry configuration for this method @@ -452,6 +463,7 @@ async def stream_async( stream=stream, stop=stop, random_seed=random_seed, + metadata=metadata, prompt=prompt, suffix=suffix, min_tokens=min_tokens, diff --git a/src/mistralai/mistral_agents.py b/src/mistralai/mistral_agents.py index 65f256d6..0d9ad0b7 100644 --- a/src/mistralai/mistral_agents.py +++ b/src/mistralai/mistral_agents.py @@ -6,7 +6,7 @@ from mistralai.types import OptionalNullable, UNSET from mistralai.utils import get_security_from_env from mistralai.utils.unmarshal_json_response import unmarshal_json_response -from typing import Any, List, Mapping, Optional, Union +from typing import Any, Dict, List, Mapping, Optional, Union class MistralAgents(BaseSDK): @@ -29,6 +29,7 @@ def create( ] = None, description: OptionalNullable[str] = UNSET, handoffs: OptionalNullable[List[str]] = UNSET, + metadata: OptionalNullable[Dict[str, Any]] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -45,6 +46,7 @@ def create( :param completion_args: White-listed arguments from the completion API :param description: :param handoffs: + :param metadata: :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -72,6 +74,7 @@ def create( name=name, description=description, handoffs=handoffs, + metadata=metadata, ) req = self._build_request( @@ -150,6 +153,7 @@ async def create_async( ] = None, description: OptionalNullable[str] = UNSET, handoffs: OptionalNullable[List[str]] = UNSET, + metadata: OptionalNullable[Dict[str, Any]] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -166,6 +170,7 @@ async def create_async( :param completion_args: White-listed arguments from the completion API :param description: :param handoffs: + :param metadata: :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -193,6 +198,7 @@ async def create_async( name=name, description=description, handoffs=handoffs, + metadata=metadata, ) req = self._build_request_async( @@ -259,6 +265,11 @@ def list( *, page: Optional[int] = 0, page_size: Optional[int] = 20, + deployment_chat: OptionalNullable[bool] = UNSET, + sources: OptionalNullable[List[models.RequestSource]] = UNSET, + name: OptionalNullable[str] = UNSET, + id: OptionalNullable[str] = UNSET, + metadata: OptionalNullable[Dict[str, Any]] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -270,6 +281,11 @@ def list( :param page: :param page_size: + :param deployment_chat: + :param sources: + :param name: + :param id: + :param metadata: :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -288,6 +304,11 @@ def list( request = models.AgentsAPIV1AgentsListRequest( page=page, page_size=page_size, + deployment_chat=deployment_chat, + sources=sources, + name=name, + id=id, + metadata=metadata, ) req = self._build_request( @@ -351,6 +372,11 @@ async def list_async( *, page: Optional[int] = 0, page_size: Optional[int] = 20, + deployment_chat: OptionalNullable[bool] = UNSET, + sources: OptionalNullable[List[models.RequestSource]] = UNSET, + name: OptionalNullable[str] = UNSET, + id: OptionalNullable[str] = UNSET, + metadata: OptionalNullable[Dict[str, Any]] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -362,6 +388,11 @@ async def list_async( :param page: :param page_size: + :param deployment_chat: + :param sources: + :param name: + :param id: + :param metadata: :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -380,6 +411,11 @@ async def list_async( request = models.AgentsAPIV1AgentsListRequest( page=page, page_size=page_size, + deployment_chat=deployment_chat, + sources=sources, + name=name, + id=id, + metadata=metadata, ) req = self._build_request_async( @@ -442,6 +478,7 @@ def get( self, *, agent_id: str, + agent_version: OptionalNullable[int] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -452,6 +489,7 @@ def get( Given an agent retrieve an agent entity with its attributes. :param agent_id: + :param agent_version: :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -469,6 +507,7 @@ def get( request = models.AgentsAPIV1AgentsGetRequest( agent_id=agent_id, + agent_version=agent_version, ) req = self._build_request( @@ -531,6 +570,7 @@ async def get_async( self, *, agent_id: str, + agent_version: OptionalNullable[int] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -541,6 +581,7 @@ async def get_async( Given an agent retrieve an agent entity with its attributes. :param agent_id: + :param agent_version: :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -558,6 +599,7 @@ async def get_async( request = models.AgentsAPIV1AgentsGetRequest( agent_id=agent_id, + agent_version=agent_version, ) req = self._build_request_async( @@ -634,6 +676,8 @@ def update( name: OptionalNullable[str] = UNSET, description: OptionalNullable[str] = UNSET, handoffs: OptionalNullable[List[str]] = UNSET, + deployment_chat: OptionalNullable[bool] = UNSET, + metadata: OptionalNullable[Dict[str, Any]] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -651,6 +695,8 @@ def update( :param name: :param description: :param handoffs: + :param deployment_chat: + :param metadata: :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -680,6 +726,8 @@ def update( name=name, description=description, handoffs=handoffs, + deployment_chat=deployment_chat, + metadata=metadata, ), ) @@ -764,6 +812,8 @@ async def update_async( name: OptionalNullable[str] = UNSET, description: OptionalNullable[str] = UNSET, handoffs: OptionalNullable[List[str]] = UNSET, + deployment_chat: OptionalNullable[bool] = UNSET, + metadata: OptionalNullable[Dict[str, Any]] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -781,6 +831,8 @@ async def update_async( :param name: :param description: :param handoffs: + :param deployment_chat: + :param metadata: :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -810,6 +862,8 @@ async def update_async( name=name, description=description, handoffs=handoffs, + deployment_chat=deployment_chat, + metadata=metadata, ), ) @@ -876,6 +930,180 @@ async def update_async( raise models.SDKError("Unexpected response received", http_res) + def delete( + self, + *, + agent_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ): + r"""Delete an agent entity. + + :param agent_id: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AgentsAPIV1AgentsDeleteRequest( + agent_id=agent_id, + ) + + req = self._build_request( + method="DELETE", + path="/v1/agents/{agent_id}", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="agents_api_v1_agents_delete", + oauth2_scopes=[], + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "204", "*"): + return + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res + ) + raise models.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + async def delete_async( + self, + *, + agent_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ): + r"""Delete an agent entity. + + :param agent_id: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AgentsAPIV1AgentsDeleteRequest( + agent_id=agent_id, + ) + + req = self._build_request_async( + method="DELETE", + path="/v1/agents/{agent_id}", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="agents_api_v1_agents_delete", + oauth2_scopes=[], + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "204", "*"): + return + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res + ) + raise models.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + def update_version( self, *, diff --git a/src/mistralai/mistral_jobs.py b/src/mistralai/mistral_jobs.py index fb0a0de7..6c213756 100644 --- a/src/mistralai/mistral_jobs.py +++ b/src/mistralai/mistral_jobs.py @@ -237,12 +237,12 @@ def create( Create a new batch job, it will be queued for processing. - :param input_files: + :param input_files: The list of input files to be used for batch inference, these files should be `jsonl` files, containing the input data corresponding to the bory request for the batch inference in a \"body\" field. An example of such file is the following: ```json {\"custom_id\": \"0\", \"body\": {\"max_tokens\": 100, \"messages\": [{\"role\": \"user\", \"content\": \"What is the best French cheese?\"}]}} {\"custom_id\": \"1\", \"body\": {\"max_tokens\": 100, \"messages\": [{\"role\": \"user\", \"content\": \"What is the best French wine?\"}]}} ``` :param endpoint: - :param model: - :param agent_id: - :param metadata: - :param timeout_hours: + :param model: The model to be used for batch inference. + :param agent_id: In case you want to use a specific agent from the **deprecated** agents api for batch inference, you can specify the agent ID here. + :param metadata: The metadata of your choice to be associated with the batch inference job. + :param timeout_hours: The timeout in hours for the batch inference job. :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -338,12 +338,12 @@ async def create_async( Create a new batch job, it will be queued for processing. - :param input_files: + :param input_files: The list of input files to be used for batch inference, these files should be `jsonl` files, containing the input data corresponding to the bory request for the batch inference in a \"body\" field. An example of such file is the following: ```json {\"custom_id\": \"0\", \"body\": {\"max_tokens\": 100, \"messages\": [{\"role\": \"user\", \"content\": \"What is the best French cheese?\"}]}} {\"custom_id\": \"1\", \"body\": {\"max_tokens\": 100, \"messages\": [{\"role\": \"user\", \"content\": \"What is the best French wine?\"}]}} ``` :param endpoint: - :param model: - :param agent_id: - :param metadata: - :param timeout_hours: + :param model: The model to be used for batch inference. + :param agent_id: In case you want to use a specific agent from the **deprecated** agents api for batch inference, you can specify the agent ID here. + :param metadata: The metadata of your choice to be associated with the batch inference job. + :param timeout_hours: The timeout in hours for the batch inference job. :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds diff --git a/src/mistralai/models/__init__.py b/src/mistralai/models/__init__.py index 0298e73b..7895aeaa 100644 --- a/src/mistralai/models/__init__.py +++ b/src/mistralai/models/__init__.py @@ -41,6 +41,10 @@ AgentHandoffStartedEventType, AgentHandoffStartedEventTypedDict, ) + from .agents_api_v1_agents_deleteop import ( + AgentsAPIV1AgentsDeleteRequest, + AgentsAPIV1AgentsDeleteRequestTypedDict, + ) from .agents_api_v1_agents_getop import ( AgentsAPIV1AgentsGetRequest, AgentsAPIV1AgentsGetRequestTypedDict, @@ -65,6 +69,10 @@ AgentsAPIV1ConversationsAppendRequest, AgentsAPIV1ConversationsAppendRequestTypedDict, ) + from .agents_api_v1_conversations_deleteop import ( + AgentsAPIV1ConversationsDeleteRequest, + AgentsAPIV1ConversationsDeleteRequestTypedDict, + ) from .agents_api_v1_conversations_getop import ( AgentsAPIV1ConversationsGetRequest, AgentsAPIV1ConversationsGetRequestTypedDict, @@ -374,7 +382,12 @@ ) from .documentout import DocumentOut, DocumentOutTypedDict from .documenttextcontent import DocumentTextContent, DocumentTextContentTypedDict - from .documentupdatein import DocumentUpdateIn, DocumentUpdateInTypedDict + from .documentupdatein import ( + Attributes, + AttributesTypedDict, + DocumentUpdateIn, + DocumentUpdateInTypedDict, + ) from .documenturlchunk import ( DocumentURLChunk, DocumentURLChunkType, @@ -720,8 +733,15 @@ from .ocrimageobject import OCRImageObject, OCRImageObjectTypedDict from .ocrpagedimensions import OCRPageDimensions, OCRPageDimensionsTypedDict from .ocrpageobject import OCRPageObject, OCRPageObjectTypedDict - from .ocrrequest import Document, DocumentTypedDict, OCRRequest, OCRRequestTypedDict + from .ocrrequest import ( + Document, + DocumentTypedDict, + OCRRequest, + OCRRequestTypedDict, + TableFormat, + ) from .ocrresponse import OCRResponse, OCRResponseTypedDict + from .ocrtableobject import Format, OCRTableObject, OCRTableObjectTypedDict from .ocrusageinfo import OCRUsageInfo, OCRUsageInfoTypedDict from .outputcontentchunks import OutputContentChunks, OutputContentChunksTypedDict from .paginationinfo import PaginationInfo, PaginationInfoTypedDict @@ -732,6 +752,7 @@ ReferenceChunkType, ReferenceChunkTypedDict, ) + from .requestsource import RequestSource from .responsedoneevent import ( ResponseDoneEvent, ResponseDoneEventType, @@ -792,15 +813,21 @@ from .toolchoiceenum import ToolChoiceEnum from .toolexecutiondeltaevent import ( ToolExecutionDeltaEvent, + ToolExecutionDeltaEventName, + ToolExecutionDeltaEventNameTypedDict, ToolExecutionDeltaEventType, ToolExecutionDeltaEventTypedDict, ) from .toolexecutiondoneevent import ( ToolExecutionDoneEvent, + ToolExecutionDoneEventName, + ToolExecutionDoneEventNameTypedDict, ToolExecutionDoneEventType, ToolExecutionDoneEventTypedDict, ) from .toolexecutionentry import ( + Name, + NameTypedDict, ToolExecutionEntry, ToolExecutionEntryObject, ToolExecutionEntryType, @@ -808,6 +835,8 @@ ) from .toolexecutionstartedevent import ( ToolExecutionStartedEvent, + ToolExecutionStartedEventName, + ToolExecutionStartedEventNameTypedDict, ToolExecutionStartedEventType, ToolExecutionStartedEventTypedDict, ) @@ -928,6 +957,8 @@ "AgentUpdateRequestTools", "AgentUpdateRequestToolsTypedDict", "AgentUpdateRequestTypedDict", + "AgentsAPIV1AgentsDeleteRequest", + "AgentsAPIV1AgentsDeleteRequestTypedDict", "AgentsAPIV1AgentsGetRequest", "AgentsAPIV1AgentsGetRequestTypedDict", "AgentsAPIV1AgentsListRequest", @@ -940,6 +971,8 @@ "AgentsAPIV1ConversationsAppendRequestTypedDict", "AgentsAPIV1ConversationsAppendStreamRequest", "AgentsAPIV1ConversationsAppendStreamRequestTypedDict", + "AgentsAPIV1ConversationsDeleteRequest", + "AgentsAPIV1ConversationsDeleteRequestTypedDict", "AgentsAPIV1ConversationsGetRequest", "AgentsAPIV1ConversationsGetRequestTypedDict", "AgentsAPIV1ConversationsGetResponseV1ConversationsGet", @@ -980,6 +1013,8 @@ "AssistantMessageContentTypedDict", "AssistantMessageRole", "AssistantMessageTypedDict", + "Attributes", + "AttributesTypedDict", "AudioChunk", "AudioChunkType", "AudioChunkTypedDict", @@ -1211,6 +1246,7 @@ "FilesAPIRoutesUploadFileMultiPartBodyParamsTypedDict", "FineTuneableModelType", "FinishReason", + "Format", "Function", "FunctionCall", "FunctionCallEntry", @@ -1412,6 +1448,8 @@ "ModerationObjectTypedDict", "ModerationResponse", "ModerationResponseTypedDict", + "Name", + "NameTypedDict", "NoResponseError", "OCRImageObject", "OCRImageObjectTypedDict", @@ -1423,6 +1461,8 @@ "OCRRequestTypedDict", "OCRResponse", "OCRResponseTypedDict", + "OCRTableObject", + "OCRTableObjectTypedDict", "OCRUsageInfo", "OCRUsageInfoTypedDict", "Object", @@ -1444,6 +1484,7 @@ "ReferenceChunkTypedDict", "Repositories", "RepositoriesTypedDict", + "RequestSource", "Response1", "Response1TypedDict", "ResponseBody", @@ -1490,6 +1531,7 @@ "SystemMessageContentChunksTypedDict", "SystemMessageContentTypedDict", "SystemMessageTypedDict", + "TableFormat", "TextChunk", "TextChunkType", "TextChunkTypedDict", @@ -1506,9 +1548,13 @@ "ToolChoiceEnum", "ToolChoiceTypedDict", "ToolExecutionDeltaEvent", + "ToolExecutionDeltaEventName", + "ToolExecutionDeltaEventNameTypedDict", "ToolExecutionDeltaEventType", "ToolExecutionDeltaEventTypedDict", "ToolExecutionDoneEvent", + "ToolExecutionDoneEventName", + "ToolExecutionDoneEventNameTypedDict", "ToolExecutionDoneEventType", "ToolExecutionDoneEventTypedDict", "ToolExecutionEntry", @@ -1516,6 +1562,8 @@ "ToolExecutionEntryType", "ToolExecutionEntryTypedDict", "ToolExecutionStartedEvent", + "ToolExecutionStartedEventName", + "ToolExecutionStartedEventNameTypedDict", "ToolExecutionStartedEventType", "ToolExecutionStartedEventTypedDict", "ToolFileChunk", @@ -1612,6 +1660,8 @@ "AgentHandoffStartedEvent": ".agenthandoffstartedevent", "AgentHandoffStartedEventType": ".agenthandoffstartedevent", "AgentHandoffStartedEventTypedDict": ".agenthandoffstartedevent", + "AgentsAPIV1AgentsDeleteRequest": ".agents_api_v1_agents_deleteop", + "AgentsAPIV1AgentsDeleteRequestTypedDict": ".agents_api_v1_agents_deleteop", "AgentsAPIV1AgentsGetRequest": ".agents_api_v1_agents_getop", "AgentsAPIV1AgentsGetRequestTypedDict": ".agents_api_v1_agents_getop", "AgentsAPIV1AgentsListRequest": ".agents_api_v1_agents_listop", @@ -1624,6 +1674,8 @@ "AgentsAPIV1ConversationsAppendStreamRequestTypedDict": ".agents_api_v1_conversations_append_streamop", "AgentsAPIV1ConversationsAppendRequest": ".agents_api_v1_conversations_appendop", "AgentsAPIV1ConversationsAppendRequestTypedDict": ".agents_api_v1_conversations_appendop", + "AgentsAPIV1ConversationsDeleteRequest": ".agents_api_v1_conversations_deleteop", + "AgentsAPIV1ConversationsDeleteRequestTypedDict": ".agents_api_v1_conversations_deleteop", "AgentsAPIV1ConversationsGetRequest": ".agents_api_v1_conversations_getop", "AgentsAPIV1ConversationsGetRequestTypedDict": ".agents_api_v1_conversations_getop", "AgentsAPIV1ConversationsGetResponseV1ConversationsGet": ".agents_api_v1_conversations_getop", @@ -1860,6 +1912,8 @@ "DocumentOutTypedDict": ".documentout", "DocumentTextContent": ".documenttextcontent", "DocumentTextContentTypedDict": ".documenttextcontent", + "Attributes": ".documentupdatein", + "AttributesTypedDict": ".documentupdatein", "DocumentUpdateIn": ".documentupdatein", "DocumentUpdateInTypedDict": ".documentupdatein", "DocumentURLChunk": ".documenturlchunk", @@ -2126,8 +2180,12 @@ "DocumentTypedDict": ".ocrrequest", "OCRRequest": ".ocrrequest", "OCRRequestTypedDict": ".ocrrequest", + "TableFormat": ".ocrrequest", "OCRResponse": ".ocrresponse", "OCRResponseTypedDict": ".ocrresponse", + "Format": ".ocrtableobject", + "OCRTableObject": ".ocrtableobject", + "OCRTableObjectTypedDict": ".ocrtableobject", "OCRUsageInfo": ".ocrusageinfo", "OCRUsageInfoTypedDict": ".ocrusageinfo", "OutputContentChunks": ".outputcontentchunks", @@ -2141,6 +2199,7 @@ "ReferenceChunk": ".referencechunk", "ReferenceChunkType": ".referencechunk", "ReferenceChunkTypedDict": ".referencechunk", + "RequestSource": ".requestsource", "ResponseDoneEvent": ".responsedoneevent", "ResponseDoneEventType": ".responsedoneevent", "ResponseDoneEventTypedDict": ".responsedoneevent", @@ -2197,16 +2256,24 @@ "ToolChoiceTypedDict": ".toolchoice", "ToolChoiceEnum": ".toolchoiceenum", "ToolExecutionDeltaEvent": ".toolexecutiondeltaevent", + "ToolExecutionDeltaEventName": ".toolexecutiondeltaevent", + "ToolExecutionDeltaEventNameTypedDict": ".toolexecutiondeltaevent", "ToolExecutionDeltaEventType": ".toolexecutiondeltaevent", "ToolExecutionDeltaEventTypedDict": ".toolexecutiondeltaevent", "ToolExecutionDoneEvent": ".toolexecutiondoneevent", + "ToolExecutionDoneEventName": ".toolexecutiondoneevent", + "ToolExecutionDoneEventNameTypedDict": ".toolexecutiondoneevent", "ToolExecutionDoneEventType": ".toolexecutiondoneevent", "ToolExecutionDoneEventTypedDict": ".toolexecutiondoneevent", + "Name": ".toolexecutionentry", + "NameTypedDict": ".toolexecutionentry", "ToolExecutionEntry": ".toolexecutionentry", "ToolExecutionEntryObject": ".toolexecutionentry", "ToolExecutionEntryType": ".toolexecutionentry", "ToolExecutionEntryTypedDict": ".toolexecutionentry", "ToolExecutionStartedEvent": ".toolexecutionstartedevent", + "ToolExecutionStartedEventName": ".toolexecutionstartedevent", + "ToolExecutionStartedEventNameTypedDict": ".toolexecutionstartedevent", "ToolExecutionStartedEventType": ".toolexecutionstartedevent", "ToolExecutionStartedEventTypedDict": ".toolexecutionstartedevent", "ToolFileChunk": ".toolfilechunk", diff --git a/src/mistralai/models/agent.py b/src/mistralai/models/agent.py index b6bf17ab..5d0b39fa 100644 --- a/src/mistralai/models/agent.py +++ b/src/mistralai/models/agent.py @@ -12,7 +12,7 @@ from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL from mistralai.utils import get_discriminator from pydantic import Discriminator, Tag, model_serializer -from typing import List, Literal, Optional, Union +from typing import Any, Dict, List, Literal, Optional, Union from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict @@ -50,8 +50,11 @@ class AgentTypedDict(TypedDict): name: str id: str version: int + versions: List[int] created_at: datetime updated_at: datetime + deployment_chat: bool + source: str instructions: NotRequired[Nullable[str]] r"""Instruction prompt the model will follow during the conversation.""" tools: NotRequired[List[AgentToolsTypedDict]] @@ -60,6 +63,7 @@ class AgentTypedDict(TypedDict): r"""White-listed arguments from the completion API""" description: NotRequired[Nullable[str]] handoffs: NotRequired[Nullable[List[str]]] + metadata: NotRequired[Nullable[Dict[str, Any]]] object: NotRequired[AgentObject] @@ -72,10 +76,16 @@ class Agent(BaseModel): version: int + versions: List[int] + created_at: datetime updated_at: datetime + deployment_chat: bool + + source: str + instructions: OptionalNullable[str] = UNSET r"""Instruction prompt the model will follow during the conversation.""" @@ -89,6 +99,8 @@ class Agent(BaseModel): handoffs: OptionalNullable[List[str]] = UNSET + metadata: OptionalNullable[Dict[str, Any]] = UNSET + object: Optional[AgentObject] = "agent" @model_serializer(mode="wrap") @@ -99,9 +111,10 @@ def serialize_model(self, handler): "completion_args", "description", "handoffs", + "metadata", "object", ] - nullable_fields = ["instructions", "description", "handoffs"] + nullable_fields = ["instructions", "description", "handoffs", "metadata"] null_default_fields = [] serialized = handler(self) diff --git a/src/mistralai/models/agentconversation.py b/src/mistralai/models/agentconversation.py index 42ab84f5..7fa3dfe9 100644 --- a/src/mistralai/models/agentconversation.py +++ b/src/mistralai/models/agentconversation.py @@ -4,7 +4,7 @@ from datetime import datetime from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL from pydantic import model_serializer -from typing import Literal, Optional +from typing import Any, Dict, Literal, Optional from typing_extensions import NotRequired, TypedDict @@ -20,7 +20,10 @@ class AgentConversationTypedDict(TypedDict): r"""Name given to the conversation.""" description: NotRequired[Nullable[str]] r"""Description of the what the conversation is about.""" + metadata: NotRequired[Nullable[Dict[str, Any]]] + r"""Custom metadata for the conversation.""" object: NotRequired[AgentConversationObject] + agent_version: NotRequired[Nullable[int]] class AgentConversation(BaseModel): @@ -38,12 +41,17 @@ class AgentConversation(BaseModel): description: OptionalNullable[str] = UNSET r"""Description of the what the conversation is about.""" + metadata: OptionalNullable[Dict[str, Any]] = UNSET + r"""Custom metadata for the conversation.""" + object: Optional[AgentConversationObject] = "conversation" + agent_version: OptionalNullable[int] = UNSET + @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = ["name", "description", "object"] - nullable_fields = ["name", "description"] + optional_fields = ["name", "description", "metadata", "object", "agent_version"] + nullable_fields = ["name", "description", "metadata", "agent_version"] null_default_fields = [] serialized = handler(self) diff --git a/src/mistralai/models/agentcreationrequest.py b/src/mistralai/models/agentcreationrequest.py index 83a27028..6a14201e 100644 --- a/src/mistralai/models/agentcreationrequest.py +++ b/src/mistralai/models/agentcreationrequest.py @@ -11,7 +11,7 @@ from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL from mistralai.utils import get_discriminator from pydantic import Discriminator, Tag, model_serializer -from typing import List, Optional, Union +from typing import Any, Dict, List, Optional, Union from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict @@ -52,6 +52,7 @@ class AgentCreationRequestTypedDict(TypedDict): r"""White-listed arguments from the completion API""" description: NotRequired[Nullable[str]] handoffs: NotRequired[Nullable[List[str]]] + metadata: NotRequired[Nullable[Dict[str, Any]]] class AgentCreationRequest(BaseModel): @@ -72,6 +73,8 @@ class AgentCreationRequest(BaseModel): handoffs: OptionalNullable[List[str]] = UNSET + metadata: OptionalNullable[Dict[str, Any]] = UNSET + @model_serializer(mode="wrap") def serialize_model(self, handler): optional_fields = [ @@ -80,8 +83,9 @@ def serialize_model(self, handler): "completion_args", "description", "handoffs", + "metadata", ] - nullable_fields = ["instructions", "description", "handoffs"] + nullable_fields = ["instructions", "description", "handoffs", "metadata"] null_default_fields = [] serialized = handler(self) diff --git a/src/mistralai/models/agents_api_v1_agents_deleteop.py b/src/mistralai/models/agents_api_v1_agents_deleteop.py new file mode 100644 index 00000000..38e04953 --- /dev/null +++ b/src/mistralai/models/agents_api_v1_agents_deleteop.py @@ -0,0 +1,16 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.types import BaseModel +from mistralai.utils import FieldMetadata, PathParamMetadata +from typing_extensions import Annotated, TypedDict + + +class AgentsAPIV1AgentsDeleteRequestTypedDict(TypedDict): + agent_id: str + + +class AgentsAPIV1AgentsDeleteRequest(BaseModel): + agent_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] diff --git a/src/mistralai/models/agents_api_v1_agents_getop.py b/src/mistralai/models/agents_api_v1_agents_getop.py index 5dbcecc1..dced6dbb 100644 --- a/src/mistralai/models/agents_api_v1_agents_getop.py +++ b/src/mistralai/models/agents_api_v1_agents_getop.py @@ -1,16 +1,53 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from __future__ import annotations -from mistralai.types import BaseModel -from mistralai.utils import FieldMetadata, PathParamMetadata -from typing_extensions import Annotated, TypedDict +from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +from mistralai.utils import FieldMetadata, PathParamMetadata, QueryParamMetadata +from pydantic import model_serializer +from typing_extensions import Annotated, NotRequired, TypedDict class AgentsAPIV1AgentsGetRequestTypedDict(TypedDict): agent_id: str + agent_version: NotRequired[Nullable[int]] class AgentsAPIV1AgentsGetRequest(BaseModel): agent_id: Annotated[ str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) ] + + agent_version: Annotated[ + OptionalNullable[int], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["agent_version"] + nullable_fields = ["agent_version"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/models/agents_api_v1_agents_listop.py b/src/mistralai/models/agents_api_v1_agents_listop.py index 25f48a62..69a157a6 100644 --- a/src/mistralai/models/agents_api_v1_agents_listop.py +++ b/src/mistralai/models/agents_api_v1_agents_listop.py @@ -1,15 +1,22 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from __future__ import annotations -from mistralai.types import BaseModel +from .requestsource import RequestSource +from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL from mistralai.utils import FieldMetadata, QueryParamMetadata -from typing import Optional +from pydantic import model_serializer +from typing import Any, Dict, List, Optional from typing_extensions import Annotated, NotRequired, TypedDict class AgentsAPIV1AgentsListRequestTypedDict(TypedDict): page: NotRequired[int] page_size: NotRequired[int] + deployment_chat: NotRequired[Nullable[bool]] + sources: NotRequired[Nullable[List[RequestSource]]] + name: NotRequired[Nullable[str]] + id: NotRequired[Nullable[str]] + metadata: NotRequired[Nullable[Dict[str, Any]]] class AgentsAPIV1AgentsListRequest(BaseModel): @@ -22,3 +29,66 @@ class AgentsAPIV1AgentsListRequest(BaseModel): Optional[int], FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), ] = 20 + + deployment_chat: Annotated[ + OptionalNullable[bool], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = UNSET + + sources: Annotated[ + OptionalNullable[List[RequestSource]], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = UNSET + + name: Annotated[ + OptionalNullable[str], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = UNSET + + id: Annotated[ + OptionalNullable[str], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = UNSET + + metadata: Annotated[ + OptionalNullable[Dict[str, Any]], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = [ + "page", + "page_size", + "deployment_chat", + "sources", + "name", + "id", + "metadata", + ] + nullable_fields = ["deployment_chat", "sources", "name", "id", "metadata"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/models/agents_api_v1_conversations_deleteop.py b/src/mistralai/models/agents_api_v1_conversations_deleteop.py new file mode 100644 index 00000000..94126cae --- /dev/null +++ b/src/mistralai/models/agents_api_v1_conversations_deleteop.py @@ -0,0 +1,18 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.types import BaseModel +from mistralai.utils import FieldMetadata, PathParamMetadata +from typing_extensions import Annotated, TypedDict + + +class AgentsAPIV1ConversationsDeleteRequestTypedDict(TypedDict): + conversation_id: str + r"""ID of the conversation from which we are fetching metadata.""" + + +class AgentsAPIV1ConversationsDeleteRequest(BaseModel): + conversation_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] + r"""ID of the conversation from which we are fetching metadata.""" diff --git a/src/mistralai/models/agents_api_v1_conversations_listop.py b/src/mistralai/models/agents_api_v1_conversations_listop.py index f1d3d579..e1c8489b 100644 --- a/src/mistralai/models/agents_api_v1_conversations_listop.py +++ b/src/mistralai/models/agents_api_v1_conversations_listop.py @@ -3,15 +3,17 @@ from __future__ import annotations from .agentconversation import AgentConversation, AgentConversationTypedDict from .modelconversation import ModelConversation, ModelConversationTypedDict -from mistralai.types import BaseModel +from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL from mistralai.utils import FieldMetadata, QueryParamMetadata -from typing import Optional, Union +from pydantic import model_serializer +from typing import Any, Dict, Optional, Union from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict class AgentsAPIV1ConversationsListRequestTypedDict(TypedDict): page: NotRequired[int] page_size: NotRequired[int] + metadata: NotRequired[Nullable[Dict[str, Any]]] class AgentsAPIV1ConversationsListRequest(BaseModel): @@ -25,6 +27,41 @@ class AgentsAPIV1ConversationsListRequest(BaseModel): FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), ] = 100 + metadata: Annotated[ + OptionalNullable[Dict[str, Any]], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["page", "page_size", "metadata"] + nullable_fields = ["metadata"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m + ResponseBodyTypedDict = TypeAliasType( "ResponseBodyTypedDict", diff --git a/src/mistralai/models/agentscompletionrequest.py b/src/mistralai/models/agentscompletionrequest.py index c832edfd..cff4df64 100644 --- a/src/mistralai/models/agentscompletionrequest.py +++ b/src/mistralai/models/agentscompletionrequest.py @@ -15,7 +15,7 @@ from mistralai.utils import get_discriminator, validate_open_enum from pydantic import Discriminator, Tag, model_serializer from pydantic.functional_validators import PlainValidator -from typing import List, Optional, Union +from typing import Any, Dict, List, Optional, Union from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict @@ -77,16 +77,19 @@ class AgentsCompletionRequestTypedDict(TypedDict): r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" random_seed: NotRequired[Nullable[int]] r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" + metadata: NotRequired[Nullable[Dict[str, Any]]] response_format: NotRequired[ResponseFormatTypedDict] + r"""Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide.""" tools: NotRequired[Nullable[List[ToolTypedDict]]] tool_choice: NotRequired[AgentsCompletionRequestToolChoiceTypedDict] presence_penalty: NotRequired[float] - r"""presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative.""" + r"""The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative.""" frequency_penalty: NotRequired[float] - r"""frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.""" + r"""The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.""" n: NotRequired[Nullable[int]] r"""Number of completions to return for each request, input tokens are only billed once.""" prediction: NotRequired[PredictionTypedDict] + r"""Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content.""" parallel_tool_calls: NotRequired[bool] prompt_mode: NotRequired[Nullable[MistralPromptMode]] r"""Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used.""" @@ -111,22 +114,26 @@ class AgentsCompletionRequest(BaseModel): random_seed: OptionalNullable[int] = UNSET r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" + metadata: OptionalNullable[Dict[str, Any]] = UNSET + response_format: Optional[ResponseFormat] = None + r"""Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide.""" tools: OptionalNullable[List[Tool]] = UNSET tool_choice: Optional[AgentsCompletionRequestToolChoice] = None presence_penalty: Optional[float] = None - r"""presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative.""" + r"""The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative.""" frequency_penalty: Optional[float] = None - r"""frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.""" + r"""The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.""" n: OptionalNullable[int] = UNSET r"""Number of completions to return for each request, input tokens are only billed once.""" prediction: Optional[Prediction] = None + r"""Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content.""" parallel_tool_calls: Optional[bool] = None @@ -142,6 +149,7 @@ def serialize_model(self, handler): "stream", "stop", "random_seed", + "metadata", "response_format", "tools", "tool_choice", @@ -152,7 +160,14 @@ def serialize_model(self, handler): "parallel_tool_calls", "prompt_mode", ] - nullable_fields = ["max_tokens", "random_seed", "tools", "n", "prompt_mode"] + nullable_fields = [ + "max_tokens", + "random_seed", + "metadata", + "tools", + "n", + "prompt_mode", + ] null_default_fields = [] serialized = handler(self) diff --git a/src/mistralai/models/agentscompletionstreamrequest.py b/src/mistralai/models/agentscompletionstreamrequest.py index 6e619b77..69edc23c 100644 --- a/src/mistralai/models/agentscompletionstreamrequest.py +++ b/src/mistralai/models/agentscompletionstreamrequest.py @@ -15,7 +15,7 @@ from mistralai.utils import get_discriminator, validate_open_enum from pydantic import Discriminator, Tag, model_serializer from pydantic.functional_validators import PlainValidator -from typing import List, Optional, Union +from typing import Any, Dict, List, Optional, Union from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict @@ -76,16 +76,19 @@ class AgentsCompletionStreamRequestTypedDict(TypedDict): r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" random_seed: NotRequired[Nullable[int]] r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" + metadata: NotRequired[Nullable[Dict[str, Any]]] response_format: NotRequired[ResponseFormatTypedDict] + r"""Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide.""" tools: NotRequired[Nullable[List[ToolTypedDict]]] tool_choice: NotRequired[AgentsCompletionStreamRequestToolChoiceTypedDict] presence_penalty: NotRequired[float] - r"""presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative.""" + r"""The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative.""" frequency_penalty: NotRequired[float] - r"""frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.""" + r"""The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.""" n: NotRequired[Nullable[int]] r"""Number of completions to return for each request, input tokens are only billed once.""" prediction: NotRequired[PredictionTypedDict] + r"""Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content.""" parallel_tool_calls: NotRequired[bool] prompt_mode: NotRequired[Nullable[MistralPromptMode]] r"""Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used.""" @@ -109,22 +112,26 @@ class AgentsCompletionStreamRequest(BaseModel): random_seed: OptionalNullable[int] = UNSET r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" + metadata: OptionalNullable[Dict[str, Any]] = UNSET + response_format: Optional[ResponseFormat] = None + r"""Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide.""" tools: OptionalNullable[List[Tool]] = UNSET tool_choice: Optional[AgentsCompletionStreamRequestToolChoice] = None presence_penalty: Optional[float] = None - r"""presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative.""" + r"""The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative.""" frequency_penalty: Optional[float] = None - r"""frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.""" + r"""The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.""" n: OptionalNullable[int] = UNSET r"""Number of completions to return for each request, input tokens are only billed once.""" prediction: Optional[Prediction] = None + r"""Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content.""" parallel_tool_calls: Optional[bool] = None @@ -140,6 +147,7 @@ def serialize_model(self, handler): "stream", "stop", "random_seed", + "metadata", "response_format", "tools", "tool_choice", @@ -150,7 +158,14 @@ def serialize_model(self, handler): "parallel_tool_calls", "prompt_mode", ] - nullable_fields = ["max_tokens", "random_seed", "tools", "n", "prompt_mode"] + nullable_fields = [ + "max_tokens", + "random_seed", + "metadata", + "tools", + "n", + "prompt_mode", + ] null_default_fields = [] serialized = handler(self) diff --git a/src/mistralai/models/agentupdaterequest.py b/src/mistralai/models/agentupdaterequest.py index f6fcb27a..e496907c 100644 --- a/src/mistralai/models/agentupdaterequest.py +++ b/src/mistralai/models/agentupdaterequest.py @@ -11,7 +11,7 @@ from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL from mistralai.utils import get_discriminator from pydantic import Discriminator, Tag, model_serializer -from typing import List, Optional, Union +from typing import Any, Dict, List, Optional, Union from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict @@ -52,6 +52,8 @@ class AgentUpdateRequestTypedDict(TypedDict): name: NotRequired[Nullable[str]] description: NotRequired[Nullable[str]] handoffs: NotRequired[Nullable[List[str]]] + deployment_chat: NotRequired[Nullable[bool]] + metadata: NotRequired[Nullable[Dict[str, Any]]] class AgentUpdateRequest(BaseModel): @@ -72,6 +74,10 @@ class AgentUpdateRequest(BaseModel): handoffs: OptionalNullable[List[str]] = UNSET + deployment_chat: OptionalNullable[bool] = UNSET + + metadata: OptionalNullable[Dict[str, Any]] = UNSET + @model_serializer(mode="wrap") def serialize_model(self, handler): optional_fields = [ @@ -82,8 +88,18 @@ def serialize_model(self, handler): "name", "description", "handoffs", + "deployment_chat", + "metadata", + ] + nullable_fields = [ + "instructions", + "model", + "name", + "description", + "handoffs", + "deployment_chat", + "metadata", ] - nullable_fields = ["instructions", "model", "name", "description", "handoffs"] null_default_fields = [] serialized = handler(self) diff --git a/src/mistralai/models/audiotranscriptionrequest.py b/src/mistralai/models/audiotranscriptionrequest.py index 371d3ecc..308e2599 100644 --- a/src/mistralai/models/audiotranscriptionrequest.py +++ b/src/mistralai/models/audiotranscriptionrequest.py @@ -14,6 +14,7 @@ class AudioTranscriptionRequestTypedDict(TypedDict): model: str + r"""ID of the model to be used.""" file: NotRequired[FileTypedDict] file_url: NotRequired[Nullable[str]] r"""Url of a file to be transcribed""" @@ -29,6 +30,7 @@ class AudioTranscriptionRequestTypedDict(TypedDict): class AudioTranscriptionRequest(BaseModel): model: Annotated[str, FieldMetadata(multipart=True)] + r"""ID of the model to be used.""" file: Annotated[ Optional[File], FieldMetadata(multipart=MultipartFormMetadata(file=True)) diff --git a/src/mistralai/models/batchjobin.py b/src/mistralai/models/batchjobin.py index aa0bb5be..475ba863 100644 --- a/src/mistralai/models/batchjobin.py +++ b/src/mistralai/models/batchjobin.py @@ -12,25 +12,35 @@ class BatchJobInTypedDict(TypedDict): input_files: List[str] + r"""The list of input files to be used for batch inference, these files should be `jsonl` files, containing the input data corresponding to the bory request for the batch inference in a \"body\" field. An example of such file is the following: ```json {\"custom_id\": \"0\", \"body\": {\"max_tokens\": 100, \"messages\": [{\"role\": \"user\", \"content\": \"What is the best French cheese?\"}]}} {\"custom_id\": \"1\", \"body\": {\"max_tokens\": 100, \"messages\": [{\"role\": \"user\", \"content\": \"What is the best French wine?\"}]}} ```""" endpoint: APIEndpoint model: NotRequired[Nullable[str]] + r"""The model to be used for batch inference.""" agent_id: NotRequired[Nullable[str]] + r"""In case you want to use a specific agent from the **deprecated** agents api for batch inference, you can specify the agent ID here.""" metadata: NotRequired[Nullable[Dict[str, str]]] + r"""The metadata of your choice to be associated with the batch inference job.""" timeout_hours: NotRequired[int] + r"""The timeout in hours for the batch inference job.""" class BatchJobIn(BaseModel): input_files: List[str] + r"""The list of input files to be used for batch inference, these files should be `jsonl` files, containing the input data corresponding to the bory request for the batch inference in a \"body\" field. An example of such file is the following: ```json {\"custom_id\": \"0\", \"body\": {\"max_tokens\": 100, \"messages\": [{\"role\": \"user\", \"content\": \"What is the best French cheese?\"}]}} {\"custom_id\": \"1\", \"body\": {\"max_tokens\": 100, \"messages\": [{\"role\": \"user\", \"content\": \"What is the best French wine?\"}]}} ```""" endpoint: Annotated[APIEndpoint, PlainValidator(validate_open_enum(False))] model: OptionalNullable[str] = UNSET + r"""The model to be used for batch inference.""" agent_id: OptionalNullable[str] = UNSET + r"""In case you want to use a specific agent from the **deprecated** agents api for batch inference, you can specify the agent ID here.""" metadata: OptionalNullable[Dict[str, str]] = UNSET + r"""The metadata of your choice to be associated with the batch inference job.""" timeout_hours: Optional[int] = 24 + r"""The timeout in hours for the batch inference job.""" @model_serializer(mode="wrap") def serialize_model(self, handler): diff --git a/src/mistralai/models/chatcompletionrequest.py b/src/mistralai/models/chatcompletionrequest.py index 6f195f13..a309421b 100644 --- a/src/mistralai/models/chatcompletionrequest.py +++ b/src/mistralai/models/chatcompletionrequest.py @@ -15,7 +15,7 @@ from mistralai.utils import get_discriminator, validate_open_enum from pydantic import Discriminator, Tag, model_serializer from pydantic.functional_validators import PlainValidator -from typing import List, Optional, Union +from typing import Any, Dict, List, Optional, Union from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict @@ -53,11 +53,13 @@ "ChatCompletionRequestToolChoiceTypedDict", Union[ToolChoiceTypedDict, ToolChoiceEnum], ) +r"""Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool.""" ChatCompletionRequestToolChoice = TypeAliasType( "ChatCompletionRequestToolChoice", Union[ToolChoice, ToolChoiceEnum] ) +r"""Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool.""" class ChatCompletionRequestTypedDict(TypedDict): @@ -77,17 +79,23 @@ class ChatCompletionRequestTypedDict(TypedDict): r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" random_seed: NotRequired[Nullable[int]] r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" + metadata: NotRequired[Nullable[Dict[str, Any]]] response_format: NotRequired[ResponseFormatTypedDict] + r"""Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide.""" tools: NotRequired[Nullable[List[ToolTypedDict]]] + r"""A list of tools the model may call. Use this to provide a list of functions the model may generate JSON inputs for.""" tool_choice: NotRequired[ChatCompletionRequestToolChoiceTypedDict] + r"""Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool.""" presence_penalty: NotRequired[float] - r"""presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative.""" + r"""The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative.""" frequency_penalty: NotRequired[float] - r"""frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.""" + r"""The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.""" n: NotRequired[Nullable[int]] r"""Number of completions to return for each request, input tokens are only billed once.""" prediction: NotRequired[PredictionTypedDict] + r"""Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content.""" parallel_tool_calls: NotRequired[bool] + r"""Whether to enable parallel function calling during tool use, when enabled the model can call multiple tools in parallel.""" prompt_mode: NotRequired[Nullable[MistralPromptMode]] r"""Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used.""" safe_prompt: NotRequired[bool] @@ -119,24 +127,31 @@ class ChatCompletionRequest(BaseModel): random_seed: OptionalNullable[int] = UNSET r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" + metadata: OptionalNullable[Dict[str, Any]] = UNSET + response_format: Optional[ResponseFormat] = None + r"""Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide.""" tools: OptionalNullable[List[Tool]] = UNSET + r"""A list of tools the model may call. Use this to provide a list of functions the model may generate JSON inputs for.""" tool_choice: Optional[ChatCompletionRequestToolChoice] = None + r"""Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool.""" presence_penalty: Optional[float] = None - r"""presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative.""" + r"""The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative.""" frequency_penalty: Optional[float] = None - r"""frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.""" + r"""The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.""" n: OptionalNullable[int] = UNSET r"""Number of completions to return for each request, input tokens are only billed once.""" prediction: Optional[Prediction] = None + r"""Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content.""" parallel_tool_calls: Optional[bool] = None + r"""Whether to enable parallel function calling during tool use, when enabled the model can call multiple tools in parallel.""" prompt_mode: Annotated[ OptionalNullable[MistralPromptMode], PlainValidator(validate_open_enum(False)) @@ -155,6 +170,7 @@ def serialize_model(self, handler): "stream", "stop", "random_seed", + "metadata", "response_format", "tools", "tool_choice", @@ -170,6 +186,7 @@ def serialize_model(self, handler): "temperature", "max_tokens", "random_seed", + "metadata", "tools", "n", "prompt_mode", diff --git a/src/mistralai/models/chatcompletionstreamrequest.py b/src/mistralai/models/chatcompletionstreamrequest.py index 0fa102e5..7a28cf01 100644 --- a/src/mistralai/models/chatcompletionstreamrequest.py +++ b/src/mistralai/models/chatcompletionstreamrequest.py @@ -15,7 +15,7 @@ from mistralai.utils import get_discriminator, validate_open_enum from pydantic import Discriminator, Tag, model_serializer from pydantic.functional_validators import PlainValidator -from typing import List, Optional, Union +from typing import Any, Dict, List, Optional, Union from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict @@ -57,11 +57,13 @@ "ChatCompletionStreamRequestToolChoiceTypedDict", Union[ToolChoiceTypedDict, ToolChoiceEnum], ) +r"""Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool.""" ChatCompletionStreamRequestToolChoice = TypeAliasType( "ChatCompletionStreamRequestToolChoice", Union[ToolChoice, ToolChoiceEnum] ) +r"""Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool.""" class ChatCompletionStreamRequestTypedDict(TypedDict): @@ -80,17 +82,23 @@ class ChatCompletionStreamRequestTypedDict(TypedDict): r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" random_seed: NotRequired[Nullable[int]] r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" + metadata: NotRequired[Nullable[Dict[str, Any]]] response_format: NotRequired[ResponseFormatTypedDict] + r"""Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide.""" tools: NotRequired[Nullable[List[ToolTypedDict]]] + r"""A list of tools the model may call. Use this to provide a list of functions the model may generate JSON inputs for.""" tool_choice: NotRequired[ChatCompletionStreamRequestToolChoiceTypedDict] + r"""Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool.""" presence_penalty: NotRequired[float] - r"""presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative.""" + r"""The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative.""" frequency_penalty: NotRequired[float] - r"""frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.""" + r"""The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.""" n: NotRequired[Nullable[int]] r"""Number of completions to return for each request, input tokens are only billed once.""" prediction: NotRequired[PredictionTypedDict] + r"""Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content.""" parallel_tool_calls: NotRequired[bool] + r"""Whether to enable parallel function calling during tool use, when enabled the model can call multiple tools in parallel.""" prompt_mode: NotRequired[Nullable[MistralPromptMode]] r"""Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used.""" safe_prompt: NotRequired[bool] @@ -121,24 +129,31 @@ class ChatCompletionStreamRequest(BaseModel): random_seed: OptionalNullable[int] = UNSET r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" + metadata: OptionalNullable[Dict[str, Any]] = UNSET + response_format: Optional[ResponseFormat] = None + r"""Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide.""" tools: OptionalNullable[List[Tool]] = UNSET + r"""A list of tools the model may call. Use this to provide a list of functions the model may generate JSON inputs for.""" tool_choice: Optional[ChatCompletionStreamRequestToolChoice] = None + r"""Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool.""" presence_penalty: Optional[float] = None - r"""presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative.""" + r"""The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative.""" frequency_penalty: Optional[float] = None - r"""frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.""" + r"""The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.""" n: OptionalNullable[int] = UNSET r"""Number of completions to return for each request, input tokens are only billed once.""" prediction: Optional[Prediction] = None + r"""Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content.""" parallel_tool_calls: Optional[bool] = None + r"""Whether to enable parallel function calling during tool use, when enabled the model can call multiple tools in parallel.""" prompt_mode: Annotated[ OptionalNullable[MistralPromptMode], PlainValidator(validate_open_enum(False)) @@ -157,6 +172,7 @@ def serialize_model(self, handler): "stream", "stop", "random_seed", + "metadata", "response_format", "tools", "tool_choice", @@ -172,6 +188,7 @@ def serialize_model(self, handler): "temperature", "max_tokens", "random_seed", + "metadata", "tools", "n", "prompt_mode", diff --git a/src/mistralai/models/conversationrequest.py b/src/mistralai/models/conversationrequest.py index 0fcca512..bd4368d2 100644 --- a/src/mistralai/models/conversationrequest.py +++ b/src/mistralai/models/conversationrequest.py @@ -12,7 +12,7 @@ from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL from mistralai.utils import get_discriminator from pydantic import Discriminator, Tag, model_serializer -from typing import List, Literal, Optional, Union +from typing import Any, Dict, List, Literal, Optional, Union from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict @@ -50,11 +50,14 @@ class ConversationRequestTypedDict(TypedDict): store: NotRequired[Nullable[bool]] handoff_execution: NotRequired[Nullable[HandoffExecution]] instructions: NotRequired[Nullable[str]] - tools: NotRequired[Nullable[List[ToolsTypedDict]]] + tools: NotRequired[List[ToolsTypedDict]] + r"""List of tools which are available to the model during the conversation.""" completion_args: NotRequired[Nullable[CompletionArgsTypedDict]] name: NotRequired[Nullable[str]] description: NotRequired[Nullable[str]] + metadata: NotRequired[Nullable[Dict[str, Any]]] agent_id: NotRequired[Nullable[str]] + agent_version: NotRequired[Nullable[int]] model: NotRequired[Nullable[str]] @@ -69,7 +72,8 @@ class ConversationRequest(BaseModel): instructions: OptionalNullable[str] = UNSET - tools: OptionalNullable[List[Tools]] = UNSET + tools: Optional[List[Tools]] = None + r"""List of tools which are available to the model during the conversation.""" completion_args: OptionalNullable[CompletionArgs] = UNSET @@ -77,8 +81,12 @@ class ConversationRequest(BaseModel): description: OptionalNullable[str] = UNSET + metadata: OptionalNullable[Dict[str, Any]] = UNSET + agent_id: OptionalNullable[str] = UNSET + agent_version: OptionalNullable[int] = UNSET + model: OptionalNullable[str] = UNSET @model_serializer(mode="wrap") @@ -92,18 +100,21 @@ def serialize_model(self, handler): "completion_args", "name", "description", + "metadata", "agent_id", + "agent_version", "model", ] nullable_fields = [ "store", "handoff_execution", "instructions", - "tools", "completion_args", "name", "description", + "metadata", "agent_id", + "agent_version", "model", ] null_default_fields = [] diff --git a/src/mistralai/models/conversationrestartrequest.py b/src/mistralai/models/conversationrestartrequest.py index 58376140..091917fe 100644 --- a/src/mistralai/models/conversationrestartrequest.py +++ b/src/mistralai/models/conversationrestartrequest.py @@ -3,8 +3,9 @@ from __future__ import annotations from .completionargs import CompletionArgs, CompletionArgsTypedDict from .conversationinputs import ConversationInputs, ConversationInputsTypedDict -from mistralai.types import BaseModel -from typing import Literal, Optional +from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +from pydantic import model_serializer +from typing import Any, Dict, Literal, Optional from typing_extensions import NotRequired, TypedDict @@ -22,6 +23,10 @@ class ConversationRestartRequestTypedDict(TypedDict): handoff_execution: NotRequired[ConversationRestartRequestHandoffExecution] completion_args: NotRequired[CompletionArgsTypedDict] r"""White-listed arguments from the completion API""" + metadata: NotRequired[Nullable[Dict[str, Any]]] + r"""Custom metadata for the conversation.""" + agent_version: NotRequired[Nullable[int]] + r"""Specific version of the agent to use when restarting. If not provided, uses the current version.""" class ConversationRestartRequest(BaseModel): @@ -40,3 +45,46 @@ class ConversationRestartRequest(BaseModel): completion_args: Optional[CompletionArgs] = None r"""White-listed arguments from the completion API""" + + metadata: OptionalNullable[Dict[str, Any]] = UNSET + r"""Custom metadata for the conversation.""" + + agent_version: OptionalNullable[int] = UNSET + r"""Specific version of the agent to use when restarting. If not provided, uses the current version.""" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = [ + "stream", + "store", + "handoff_execution", + "completion_args", + "metadata", + "agent_version", + ] + nullable_fields = ["metadata", "agent_version"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/models/conversationrestartstreamrequest.py b/src/mistralai/models/conversationrestartstreamrequest.py index f213aea3..4bcf255a 100644 --- a/src/mistralai/models/conversationrestartstreamrequest.py +++ b/src/mistralai/models/conversationrestartstreamrequest.py @@ -3,8 +3,9 @@ from __future__ import annotations from .completionargs import CompletionArgs, CompletionArgsTypedDict from .conversationinputs import ConversationInputs, ConversationInputsTypedDict -from mistralai.types import BaseModel -from typing import Literal, Optional +from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +from pydantic import model_serializer +from typing import Any, Dict, Literal, Optional from typing_extensions import NotRequired, TypedDict @@ -22,6 +23,10 @@ class ConversationRestartStreamRequestTypedDict(TypedDict): handoff_execution: NotRequired[ConversationRestartStreamRequestHandoffExecution] completion_args: NotRequired[CompletionArgsTypedDict] r"""White-listed arguments from the completion API""" + metadata: NotRequired[Nullable[Dict[str, Any]]] + r"""Custom metadata for the conversation.""" + agent_version: NotRequired[Nullable[int]] + r"""Specific version of the agent to use when restarting. If not provided, uses the current version.""" class ConversationRestartStreamRequest(BaseModel): @@ -42,3 +47,46 @@ class ConversationRestartStreamRequest(BaseModel): completion_args: Optional[CompletionArgs] = None r"""White-listed arguments from the completion API""" + + metadata: OptionalNullable[Dict[str, Any]] = UNSET + r"""Custom metadata for the conversation.""" + + agent_version: OptionalNullable[int] = UNSET + r"""Specific version of the agent to use when restarting. If not provided, uses the current version.""" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = [ + "stream", + "store", + "handoff_execution", + "completion_args", + "metadata", + "agent_version", + ] + nullable_fields = ["metadata", "agent_version"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/models/conversationstreamrequest.py b/src/mistralai/models/conversationstreamrequest.py index 0880727e..8c6d56c2 100644 --- a/src/mistralai/models/conversationstreamrequest.py +++ b/src/mistralai/models/conversationstreamrequest.py @@ -12,7 +12,7 @@ from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL from mistralai.utils import get_discriminator from pydantic import Discriminator, Tag, model_serializer -from typing import List, Literal, Optional, Union +from typing import Any, Dict, List, Literal, Optional, Union from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict @@ -50,11 +50,14 @@ class ConversationStreamRequestTypedDict(TypedDict): store: NotRequired[Nullable[bool]] handoff_execution: NotRequired[Nullable[ConversationStreamRequestHandoffExecution]] instructions: NotRequired[Nullable[str]] - tools: NotRequired[Nullable[List[ConversationStreamRequestToolsTypedDict]]] + tools: NotRequired[List[ConversationStreamRequestToolsTypedDict]] + r"""List of tools which are available to the model during the conversation.""" completion_args: NotRequired[Nullable[CompletionArgsTypedDict]] name: NotRequired[Nullable[str]] description: NotRequired[Nullable[str]] + metadata: NotRequired[Nullable[Dict[str, Any]]] agent_id: NotRequired[Nullable[str]] + agent_version: NotRequired[Nullable[int]] model: NotRequired[Nullable[str]] @@ -71,7 +74,8 @@ class ConversationStreamRequest(BaseModel): instructions: OptionalNullable[str] = UNSET - tools: OptionalNullable[List[ConversationStreamRequestTools]] = UNSET + tools: Optional[List[ConversationStreamRequestTools]] = None + r"""List of tools which are available to the model during the conversation.""" completion_args: OptionalNullable[CompletionArgs] = UNSET @@ -79,8 +83,12 @@ class ConversationStreamRequest(BaseModel): description: OptionalNullable[str] = UNSET + metadata: OptionalNullable[Dict[str, Any]] = UNSET + agent_id: OptionalNullable[str] = UNSET + agent_version: OptionalNullable[int] = UNSET + model: OptionalNullable[str] = UNSET @model_serializer(mode="wrap") @@ -94,18 +102,21 @@ def serialize_model(self, handler): "completion_args", "name", "description", + "metadata", "agent_id", + "agent_version", "model", ] nullable_fields = [ "store", "handoff_execution", "instructions", - "tools", "completion_args", "name", "description", + "metadata", "agent_id", + "agent_version", "model", ] null_default_fields = [] diff --git a/src/mistralai/models/documentout.py b/src/mistralai/models/documentout.py index 65f1be80..81d9605f 100644 --- a/src/mistralai/models/documentout.py +++ b/src/mistralai/models/documentout.py @@ -4,20 +4,21 @@ from datetime import datetime from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL from pydantic import model_serializer +from typing import Any, Dict from typing_extensions import NotRequired, TypedDict class DocumentOutTypedDict(TypedDict): id: str library_id: str - hash: str - mime_type: str - extension: str - size: int + hash: Nullable[str] + mime_type: Nullable[str] + extension: Nullable[str] + size: Nullable[int] name: str created_at: datetime processing_status: str - uploaded_by_id: str + uploaded_by_id: Nullable[str] uploaded_by_type: str tokens_processing_total: int summary: NotRequired[Nullable[str]] @@ -25,6 +26,8 @@ class DocumentOutTypedDict(TypedDict): number_of_pages: NotRequired[Nullable[int]] tokens_processing_main_content: NotRequired[Nullable[int]] tokens_processing_summary: NotRequired[Nullable[int]] + url: NotRequired[Nullable[str]] + attributes: NotRequired[Nullable[Dict[str, Any]]] class DocumentOut(BaseModel): @@ -32,13 +35,13 @@ class DocumentOut(BaseModel): library_id: str - hash: str + hash: Nullable[str] - mime_type: str + mime_type: Nullable[str] - extension: str + extension: Nullable[str] - size: int + size: Nullable[int] name: str @@ -46,7 +49,7 @@ class DocumentOut(BaseModel): processing_status: str - uploaded_by_id: str + uploaded_by_id: Nullable[str] uploaded_by_type: str @@ -62,6 +65,10 @@ class DocumentOut(BaseModel): tokens_processing_summary: OptionalNullable[int] = UNSET + url: OptionalNullable[str] = UNSET + + attributes: OptionalNullable[Dict[str, Any]] = UNSET + @model_serializer(mode="wrap") def serialize_model(self, handler): optional_fields = [ @@ -70,13 +77,22 @@ def serialize_model(self, handler): "number_of_pages", "tokens_processing_main_content", "tokens_processing_summary", + "url", + "attributes", ] nullable_fields = [ + "hash", + "mime_type", + "extension", + "size", "summary", "last_processed_at", "number_of_pages", + "uploaded_by_id", "tokens_processing_main_content", "tokens_processing_summary", + "url", + "attributes", ] null_default_fields = [] diff --git a/src/mistralai/models/documentupdatein.py b/src/mistralai/models/documentupdatein.py index 0f6abd5b..bd89ff47 100644 --- a/src/mistralai/models/documentupdatein.py +++ b/src/mistralai/models/documentupdatein.py @@ -1,22 +1,43 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from __future__ import annotations +from datetime import datetime from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL from pydantic import model_serializer -from typing_extensions import NotRequired, TypedDict +from typing import Dict, List, Union +from typing_extensions import NotRequired, TypeAliasType, TypedDict + + +AttributesTypedDict = TypeAliasType( + "AttributesTypedDict", + Union[ + bool, str, int, float, datetime, List[str], List[int], List[float], List[bool] + ], +) + + +Attributes = TypeAliasType( + "Attributes", + Union[ + bool, str, int, float, datetime, List[str], List[int], List[float], List[bool] + ], +) class DocumentUpdateInTypedDict(TypedDict): name: NotRequired[Nullable[str]] + attributes: NotRequired[Nullable[Dict[str, AttributesTypedDict]]] class DocumentUpdateIn(BaseModel): name: OptionalNullable[str] = UNSET + attributes: OptionalNullable[Dict[str, Attributes]] = UNSET + @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = ["name"] - nullable_fields = ["name"] + optional_fields = ["name", "attributes"] + nullable_fields = ["name", "attributes"] null_default_fields = [] serialized = handler(self) diff --git a/src/mistralai/models/embeddingrequest.py b/src/mistralai/models/embeddingrequest.py index 685f27fd..4af890a3 100644 --- a/src/mistralai/models/embeddingrequest.py +++ b/src/mistralai/models/embeddingrequest.py @@ -13,33 +13,33 @@ EmbeddingRequestInputsTypedDict = TypeAliasType( "EmbeddingRequestInputsTypedDict", Union[str, List[str]] ) -r"""Text to embed.""" +r"""The text content to be embedded, can be a string or an array of strings for fast processing in bulk.""" EmbeddingRequestInputs = TypeAliasType("EmbeddingRequestInputs", Union[str, List[str]]) -r"""Text to embed.""" +r"""The text content to be embedded, can be a string or an array of strings for fast processing in bulk.""" class EmbeddingRequestTypedDict(TypedDict): model: str - r"""ID of the model to use.""" + r"""The ID of the model to be used for embedding.""" inputs: EmbeddingRequestInputsTypedDict - r"""Text to embed.""" + r"""The text content to be embedded, can be a string or an array of strings for fast processing in bulk.""" output_dimension: NotRequired[Nullable[int]] - r"""The dimension of the output embeddings.""" + r"""The dimension of the output embeddings when feature available. If not provided, a default output dimension will be used.""" output_dtype: NotRequired[EmbeddingDtype] encoding_format: NotRequired[EncodingFormat] class EmbeddingRequest(BaseModel): model: str - r"""ID of the model to use.""" + r"""The ID of the model to be used for embedding.""" inputs: Annotated[EmbeddingRequestInputs, pydantic.Field(alias="input")] - r"""Text to embed.""" + r"""The text content to be embedded, can be a string or an array of strings for fast processing in bulk.""" output_dimension: OptionalNullable[int] = UNSET - r"""The dimension of the output embeddings.""" + r"""The dimension of the output embeddings when feature available. If not provided, a default output dimension will be used.""" output_dtype: Optional[EmbeddingDtype] = None diff --git a/src/mistralai/models/files_api_routes_list_filesop.py b/src/mistralai/models/files_api_routes_list_filesop.py index 5060c3b8..8e174a58 100644 --- a/src/mistralai/models/files_api_routes_list_filesop.py +++ b/src/mistralai/models/files_api_routes_list_filesop.py @@ -15,6 +15,7 @@ class FilesAPIRoutesListFilesRequestTypedDict(TypedDict): page: NotRequired[int] page_size: NotRequired[int] + include_total: NotRequired[bool] sample_type: NotRequired[Nullable[List[SampleType]]] source: NotRequired[Nullable[List[Source]]] search: NotRequired[Nullable[str]] @@ -32,6 +33,11 @@ class FilesAPIRoutesListFilesRequest(BaseModel): FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), ] = 100 + include_total: Annotated[ + Optional[bool], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = True + sample_type: Annotated[ OptionalNullable[ List[Annotated[SampleType, PlainValidator(validate_open_enum(False))]] @@ -63,6 +69,7 @@ def serialize_model(self, handler): optional_fields = [ "page", "page_size", + "include_total", "sample_type", "source", "search", diff --git a/src/mistralai/models/fimcompletionrequest.py b/src/mistralai/models/fimcompletionrequest.py index 06210139..801a358b 100644 --- a/src/mistralai/models/fimcompletionrequest.py +++ b/src/mistralai/models/fimcompletionrequest.py @@ -3,7 +3,7 @@ from __future__ import annotations from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL from pydantic import model_serializer -from typing import List, Optional, Union +from typing import Any, Dict, List, Optional, Union from typing_extensions import NotRequired, TypeAliasType, TypedDict @@ -21,10 +21,7 @@ class FIMCompletionRequestTypedDict(TypedDict): model: str - r"""ID of the model to use. Only compatible for now with: - - `codestral-2405` - - `codestral-latest` - """ + r"""ID of the model with FIM to use.""" prompt: str r"""The text/code to complete.""" temperature: NotRequired[Nullable[float]] @@ -39,6 +36,7 @@ class FIMCompletionRequestTypedDict(TypedDict): r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" random_seed: NotRequired[Nullable[int]] r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" + metadata: NotRequired[Nullable[Dict[str, Any]]] suffix: NotRequired[Nullable[str]] r"""Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`.""" min_tokens: NotRequired[Nullable[int]] @@ -47,10 +45,7 @@ class FIMCompletionRequestTypedDict(TypedDict): class FIMCompletionRequest(BaseModel): model: str - r"""ID of the model to use. Only compatible for now with: - - `codestral-2405` - - `codestral-latest` - """ + r"""ID of the model with FIM to use.""" prompt: str r"""The text/code to complete.""" @@ -73,6 +68,8 @@ class FIMCompletionRequest(BaseModel): random_seed: OptionalNullable[int] = UNSET r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" + metadata: OptionalNullable[Dict[str, Any]] = UNSET + suffix: OptionalNullable[str] = UNSET r"""Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`.""" @@ -88,6 +85,7 @@ def serialize_model(self, handler): "stream", "stop", "random_seed", + "metadata", "suffix", "min_tokens", ] @@ -95,6 +93,7 @@ def serialize_model(self, handler): "temperature", "max_tokens", "random_seed", + "metadata", "suffix", "min_tokens", ] diff --git a/src/mistralai/models/fimcompletionstreamrequest.py b/src/mistralai/models/fimcompletionstreamrequest.py index 05cc345b..2e8e6db2 100644 --- a/src/mistralai/models/fimcompletionstreamrequest.py +++ b/src/mistralai/models/fimcompletionstreamrequest.py @@ -3,7 +3,7 @@ from __future__ import annotations from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL from pydantic import model_serializer -from typing import List, Optional, Union +from typing import Any, Dict, List, Optional, Union from typing_extensions import NotRequired, TypeAliasType, TypedDict @@ -21,10 +21,7 @@ class FIMCompletionStreamRequestTypedDict(TypedDict): model: str - r"""ID of the model to use. Only compatible for now with: - - `codestral-2405` - - `codestral-latest` - """ + r"""ID of the model with FIM to use.""" prompt: str r"""The text/code to complete.""" temperature: NotRequired[Nullable[float]] @@ -38,6 +35,7 @@ class FIMCompletionStreamRequestTypedDict(TypedDict): r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" random_seed: NotRequired[Nullable[int]] r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" + metadata: NotRequired[Nullable[Dict[str, Any]]] suffix: NotRequired[Nullable[str]] r"""Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`.""" min_tokens: NotRequired[Nullable[int]] @@ -46,10 +44,7 @@ class FIMCompletionStreamRequestTypedDict(TypedDict): class FIMCompletionStreamRequest(BaseModel): model: str - r"""ID of the model to use. Only compatible for now with: - - `codestral-2405` - - `codestral-latest` - """ + r"""ID of the model with FIM to use.""" prompt: str r"""The text/code to complete.""" @@ -71,6 +66,8 @@ class FIMCompletionStreamRequest(BaseModel): random_seed: OptionalNullable[int] = UNSET r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" + metadata: OptionalNullable[Dict[str, Any]] = UNSET + suffix: OptionalNullable[str] = UNSET r"""Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`.""" @@ -86,6 +83,7 @@ def serialize_model(self, handler): "stream", "stop", "random_seed", + "metadata", "suffix", "min_tokens", ] @@ -93,6 +91,7 @@ def serialize_model(self, handler): "temperature", "max_tokens", "random_seed", + "metadata", "suffix", "min_tokens", ] diff --git a/src/mistralai/models/libraries_documents_list_v1op.py b/src/mistralai/models/libraries_documents_list_v1op.py index 04a3ed25..e6ff29cf 100644 --- a/src/mistralai/models/libraries_documents_list_v1op.py +++ b/src/mistralai/models/libraries_documents_list_v1op.py @@ -13,6 +13,7 @@ class LibrariesDocumentsListV1RequestTypedDict(TypedDict): search: NotRequired[Nullable[str]] page_size: NotRequired[int] page: NotRequired[int] + filters_attributes: NotRequired[Nullable[str]] sort_by: NotRequired[str] sort_order: NotRequired[str] @@ -37,6 +38,11 @@ class LibrariesDocumentsListV1Request(BaseModel): FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), ] = 0 + filters_attributes: Annotated[ + OptionalNullable[str], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = UNSET + sort_by: Annotated[ Optional[str], FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), @@ -49,8 +55,15 @@ class LibrariesDocumentsListV1Request(BaseModel): @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = ["search", "page_size", "page", "sort_by", "sort_order"] - nullable_fields = ["search"] + optional_fields = [ + "search", + "page_size", + "page", + "filters_attributes", + "sort_by", + "sort_order", + ] + nullable_fields = ["search", "filters_attributes"] null_default_fields = [] serialized = handler(self) diff --git a/src/mistralai/models/libraryout.py b/src/mistralai/models/libraryout.py index 6a13130d..d3bc36f9 100644 --- a/src/mistralai/models/libraryout.py +++ b/src/mistralai/models/libraryout.py @@ -12,18 +12,19 @@ class LibraryOutTypedDict(TypedDict): name: str created_at: datetime updated_at: datetime - owner_id: str + owner_id: Nullable[str] owner_type: str total_size: int nb_documents: int chunk_size: Nullable[int] emoji: NotRequired[Nullable[str]] description: NotRequired[Nullable[str]] - generated_name: NotRequired[Nullable[str]] generated_description: NotRequired[Nullable[str]] explicit_user_members_count: NotRequired[Nullable[int]] explicit_workspace_members_count: NotRequired[Nullable[int]] org_sharing_role: NotRequired[Nullable[str]] + generated_name: NotRequired[Nullable[str]] + r"""Generated Name""" class LibraryOut(BaseModel): @@ -35,7 +36,7 @@ class LibraryOut(BaseModel): updated_at: datetime - owner_id: str + owner_id: Nullable[str] owner_type: str @@ -49,8 +50,6 @@ class LibraryOut(BaseModel): description: OptionalNullable[str] = UNSET - generated_name: OptionalNullable[str] = UNSET - generated_description: OptionalNullable[str] = UNSET explicit_user_members_count: OptionalNullable[int] = UNSET @@ -59,26 +58,30 @@ class LibraryOut(BaseModel): org_sharing_role: OptionalNullable[str] = UNSET + generated_name: OptionalNullable[str] = UNSET + r"""Generated Name""" + @model_serializer(mode="wrap") def serialize_model(self, handler): optional_fields = [ "emoji", "description", - "generated_name", "generated_description", "explicit_user_members_count", "explicit_workspace_members_count", "org_sharing_role", + "generated_name", ] nullable_fields = [ + "owner_id", "chunk_size", "emoji", "description", - "generated_name", "generated_description", "explicit_user_members_count", "explicit_workspace_members_count", "org_sharing_role", + "generated_name", ] null_default_fields = [] diff --git a/src/mistralai/models/listfilesout.py b/src/mistralai/models/listfilesout.py index b032f632..2f82b37d 100644 --- a/src/mistralai/models/listfilesout.py +++ b/src/mistralai/models/listfilesout.py @@ -2,15 +2,16 @@ from __future__ import annotations from .fileschema import FileSchema, FileSchemaTypedDict -from mistralai.types import BaseModel +from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +from pydantic import model_serializer from typing import List -from typing_extensions import TypedDict +from typing_extensions import NotRequired, TypedDict class ListFilesOutTypedDict(TypedDict): data: List[FileSchemaTypedDict] object: str - total: int + total: NotRequired[Nullable[int]] class ListFilesOut(BaseModel): @@ -18,4 +19,34 @@ class ListFilesOut(BaseModel): object: str - total: int + total: OptionalNullable[int] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["total"] + nullable_fields = ["total"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/models/modelcapabilities.py b/src/mistralai/models/modelcapabilities.py index 54c5f2a2..4b5d5da7 100644 --- a/src/mistralai/models/modelcapabilities.py +++ b/src/mistralai/models/modelcapabilities.py @@ -8,22 +8,31 @@ class ModelCapabilitiesTypedDict(TypedDict): completion_chat: NotRequired[bool] - completion_fim: NotRequired[bool] function_calling: NotRequired[bool] + completion_fim: NotRequired[bool] fine_tuning: NotRequired[bool] vision: NotRequired[bool] + ocr: NotRequired[bool] classification: NotRequired[bool] + moderation: NotRequired[bool] + audio: NotRequired[bool] class ModelCapabilities(BaseModel): - completion_chat: Optional[bool] = True + completion_chat: Optional[bool] = False - completion_fim: Optional[bool] = False + function_calling: Optional[bool] = False - function_calling: Optional[bool] = True + completion_fim: Optional[bool] = False fine_tuning: Optional[bool] = False vision: Optional[bool] = False + ocr: Optional[bool] = False + classification: Optional[bool] = False + + moderation: Optional[bool] = False + + audio: Optional[bool] = False diff --git a/src/mistralai/models/modelconversation.py b/src/mistralai/models/modelconversation.py index 4ced79ea..e413b6fb 100644 --- a/src/mistralai/models/modelconversation.py +++ b/src/mistralai/models/modelconversation.py @@ -12,7 +12,7 @@ from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL from mistralai.utils import get_discriminator from pydantic import Discriminator, Tag, model_serializer -from typing import List, Literal, Optional, Union +from typing import Any, Dict, List, Literal, Optional, Union from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict @@ -60,6 +60,8 @@ class ModelConversationTypedDict(TypedDict): r"""Name given to the conversation.""" description: NotRequired[Nullable[str]] r"""Description of the what the conversation is about.""" + metadata: NotRequired[Nullable[Dict[str, Any]]] + r"""Custom metadata for the conversation.""" object: NotRequired[ModelConversationObject] @@ -87,6 +89,9 @@ class ModelConversation(BaseModel): description: OptionalNullable[str] = UNSET r"""Description of the what the conversation is about.""" + metadata: OptionalNullable[Dict[str, Any]] = UNSET + r"""Custom metadata for the conversation.""" + object: Optional[ModelConversationObject] = "conversation" @model_serializer(mode="wrap") @@ -97,9 +102,10 @@ def serialize_model(self, handler): "completion_args", "name", "description", + "metadata", "object", ] - nullable_fields = ["instructions", "name", "description"] + nullable_fields = ["instructions", "name", "description", "metadata"] null_default_fields = [] serialized = handler(self) diff --git a/src/mistralai/models/ocrpageobject.py b/src/mistralai/models/ocrpageobject.py index 94624a16..737defba 100644 --- a/src/mistralai/models/ocrpageobject.py +++ b/src/mistralai/models/ocrpageobject.py @@ -3,10 +3,11 @@ from __future__ import annotations from .ocrimageobject import OCRImageObject, OCRImageObjectTypedDict from .ocrpagedimensions import OCRPageDimensions, OCRPageDimensionsTypedDict -from mistralai.types import BaseModel, Nullable, UNSET_SENTINEL +from .ocrtableobject import OCRTableObject, OCRTableObjectTypedDict +from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL from pydantic import model_serializer -from typing import List -from typing_extensions import TypedDict +from typing import List, Optional +from typing_extensions import NotRequired, TypedDict class OCRPageObjectTypedDict(TypedDict): @@ -18,6 +19,14 @@ class OCRPageObjectTypedDict(TypedDict): r"""List of all extracted images in the page""" dimensions: Nullable[OCRPageDimensionsTypedDict] r"""The dimensions of the PDF Page's screenshot image""" + tables: NotRequired[List[OCRTableObjectTypedDict]] + r"""List of all extracted tables in the page""" + hyperlinks: NotRequired[List[str]] + r"""List of all hyperlinks in the page""" + header: NotRequired[Nullable[str]] + r"""Header of the page""" + footer: NotRequired[Nullable[str]] + r"""Footer of the page""" class OCRPageObject(BaseModel): @@ -33,10 +42,22 @@ class OCRPageObject(BaseModel): dimensions: Nullable[OCRPageDimensions] r"""The dimensions of the PDF Page's screenshot image""" + tables: Optional[List[OCRTableObject]] = None + r"""List of all extracted tables in the page""" + + hyperlinks: Optional[List[str]] = None + r"""List of all hyperlinks in the page""" + + header: OptionalNullable[str] = UNSET + r"""Header of the page""" + + footer: OptionalNullable[str] = UNSET + r"""Footer of the page""" + @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = [] - nullable_fields = ["dimensions"] + optional_fields = ["tables", "hyperlinks", "header", "footer"] + nullable_fields = ["header", "footer", "dimensions"] null_default_fields = [] serialized = handler(self) diff --git a/src/mistralai/models/ocrrequest.py b/src/mistralai/models/ocrrequest.py index df932c2a..e600d5b6 100644 --- a/src/mistralai/models/ocrrequest.py +++ b/src/mistralai/models/ocrrequest.py @@ -7,7 +7,7 @@ from .responseformat import ResponseFormat, ResponseFormatTypedDict from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL from pydantic import model_serializer -from typing import List, Optional, Union +from typing import List, Literal, Optional, Union from typing_extensions import NotRequired, TypeAliasType, TypedDict @@ -22,6 +22,9 @@ r"""Document to run OCR on""" +TableFormat = Literal["markdown", "html"] + + class OCRRequestTypedDict(TypedDict): model: Nullable[str] document: DocumentTypedDict @@ -39,6 +42,9 @@ class OCRRequestTypedDict(TypedDict): r"""Structured output class for extracting useful information from each extracted bounding box / image from document. Only json_schema is valid for this field""" document_annotation_format: NotRequired[Nullable[ResponseFormatTypedDict]] r"""Structured output class for extracting useful information from the entire document. Only json_schema is valid for this field""" + table_format: NotRequired[Nullable[TableFormat]] + extract_header: NotRequired[bool] + extract_footer: NotRequired[bool] class OCRRequest(BaseModel): @@ -67,6 +73,12 @@ class OCRRequest(BaseModel): document_annotation_format: OptionalNullable[ResponseFormat] = UNSET r"""Structured output class for extracting useful information from the entire document. Only json_schema is valid for this field""" + table_format: OptionalNullable[TableFormat] = UNSET + + extract_header: Optional[bool] = None + + extract_footer: Optional[bool] = None + @model_serializer(mode="wrap") def serialize_model(self, handler): optional_fields = [ @@ -77,6 +89,9 @@ def serialize_model(self, handler): "image_min_size", "bbox_annotation_format", "document_annotation_format", + "table_format", + "extract_header", + "extract_footer", ] nullable_fields = [ "model", @@ -86,6 +101,7 @@ def serialize_model(self, handler): "image_min_size", "bbox_annotation_format", "document_annotation_format", + "table_format", ] null_default_fields = [] diff --git a/src/mistralai/models/ocrtableobject.py b/src/mistralai/models/ocrtableobject.py new file mode 100644 index 00000000..76f21f3b --- /dev/null +++ b/src/mistralai/models/ocrtableobject.py @@ -0,0 +1,31 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.types import BaseModel +import pydantic +from typing import Literal +from typing_extensions import Annotated, TypedDict + + +Format = Literal["markdown", "html"] +r"""Format of the table""" + + +class OCRTableObjectTypedDict(TypedDict): + id: str + r"""Table ID for extracted table in a page""" + content: str + r"""Content of the table in the given format""" + format_: Format + r"""Format of the table""" + + +class OCRTableObject(BaseModel): + id: str + r"""Table ID for extracted table in a page""" + + content: str + r"""Content of the table in the given format""" + + format_: Annotated[Format, pydantic.Field(alias="format")] + r"""Format of the table""" diff --git a/src/mistralai/models/prediction.py b/src/mistralai/models/prediction.py index 7937c9d1..582d8789 100644 --- a/src/mistralai/models/prediction.py +++ b/src/mistralai/models/prediction.py @@ -10,11 +10,15 @@ class PredictionTypedDict(TypedDict): + r"""Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content.""" + type: Literal["content"] content: NotRequired[str] class Prediction(BaseModel): + r"""Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content.""" + TYPE: Annotated[ Annotated[ Optional[Literal["content"]], AfterValidator(validate_const("content")) diff --git a/src/mistralai/models/requestsource.py b/src/mistralai/models/requestsource.py new file mode 100644 index 00000000..5ab93af0 --- /dev/null +++ b/src/mistralai/models/requestsource.py @@ -0,0 +1,7 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from typing import Literal + + +RequestSource = Literal["api", "playground", "agent_builder_v1"] diff --git a/src/mistralai/models/responseformat.py b/src/mistralai/models/responseformat.py index c9319989..92284017 100644 --- a/src/mistralai/models/responseformat.py +++ b/src/mistralai/models/responseformat.py @@ -10,14 +10,16 @@ class ResponseFormatTypedDict(TypedDict): + r"""Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide.""" + type: NotRequired[ResponseFormats] - r"""An object specifying the format that the model must output. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message.""" json_schema: NotRequired[Nullable[JSONSchemaTypedDict]] class ResponseFormat(BaseModel): + r"""Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide.""" + type: Optional[ResponseFormats] = None - r"""An object specifying the format that the model must output. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message.""" json_schema: OptionalNullable[JSONSchema] = UNSET diff --git a/src/mistralai/models/responseformats.py b/src/mistralai/models/responseformats.py index 08c39951..258fe70e 100644 --- a/src/mistralai/models/responseformats.py +++ b/src/mistralai/models/responseformats.py @@ -5,4 +5,3 @@ ResponseFormats = Literal["text", "json_object", "json_schema"] -r"""An object specifying the format that the model must output. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message.""" diff --git a/src/mistralai/models/sharingdelete.py b/src/mistralai/models/sharingdelete.py index b9df5f9d..d1cd7074 100644 --- a/src/mistralai/models/sharingdelete.py +++ b/src/mistralai/models/sharingdelete.py @@ -2,25 +2,56 @@ from __future__ import annotations from .entitytype import EntityType -from mistralai.types import BaseModel +from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL from mistralai.utils import validate_open_enum +from pydantic import model_serializer from pydantic.functional_validators import PlainValidator -from typing_extensions import Annotated, TypedDict +from typing_extensions import Annotated, NotRequired, TypedDict class SharingDeleteTypedDict(TypedDict): - org_id: str share_with_uuid: str r"""The id of the entity (user, workspace or organization) to share with""" share_with_type: EntityType r"""The type of entity, used to share a library.""" + org_id: NotRequired[Nullable[str]] class SharingDelete(BaseModel): - org_id: str - share_with_uuid: str r"""The id of the entity (user, workspace or organization) to share with""" share_with_type: Annotated[EntityType, PlainValidator(validate_open_enum(False))] r"""The type of entity, used to share a library.""" + + org_id: OptionalNullable[str] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["org_id"] + nullable_fields = ["org_id"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/models/sharingin.py b/src/mistralai/models/sharingin.py index af20fd14..d3ada343 100644 --- a/src/mistralai/models/sharingin.py +++ b/src/mistralai/models/sharingin.py @@ -3,24 +3,23 @@ from __future__ import annotations from .entitytype import EntityType from .shareenum import ShareEnum -from mistralai.types import BaseModel +from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL from mistralai.utils import validate_open_enum +from pydantic import model_serializer from pydantic.functional_validators import PlainValidator -from typing_extensions import Annotated, TypedDict +from typing_extensions import Annotated, NotRequired, TypedDict class SharingInTypedDict(TypedDict): - org_id: str level: ShareEnum share_with_uuid: str r"""The id of the entity (user, workspace or organization) to share with""" share_with_type: EntityType r"""The type of entity, used to share a library.""" + org_id: NotRequired[Nullable[str]] class SharingIn(BaseModel): - org_id: str - level: Annotated[ShareEnum, PlainValidator(validate_open_enum(False))] share_with_uuid: str @@ -28,3 +27,35 @@ class SharingIn(BaseModel): share_with_type: Annotated[EntityType, PlainValidator(validate_open_enum(False))] r"""The type of entity, used to share a library.""" + + org_id: OptionalNullable[str] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["org_id"] + nullable_fields = ["org_id"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/models/sharingout.py b/src/mistralai/models/sharingout.py index a78a7764..12455818 100644 --- a/src/mistralai/models/sharingout.py +++ b/src/mistralai/models/sharingout.py @@ -11,7 +11,7 @@ class SharingOutTypedDict(TypedDict): org_id: str role: str share_with_type: str - share_with_uuid: str + share_with_uuid: Nullable[str] user_id: NotRequired[Nullable[str]] @@ -24,14 +24,14 @@ class SharingOut(BaseModel): share_with_type: str - share_with_uuid: str + share_with_uuid: Nullable[str] user_id: OptionalNullable[str] = UNSET @model_serializer(mode="wrap") def serialize_model(self, handler): optional_fields = ["user_id"] - nullable_fields = ["user_id"] + nullable_fields = ["user_id", "share_with_uuid"] null_default_fields = [] serialized = handler(self) diff --git a/src/mistralai/models/toolexecutiondeltaevent.py b/src/mistralai/models/toolexecutiondeltaevent.py index 99b97e68..25438206 100644 --- a/src/mistralai/models/toolexecutiondeltaevent.py +++ b/src/mistralai/models/toolexecutiondeltaevent.py @@ -4,16 +4,25 @@ from .builtinconnectors import BuiltInConnectors from datetime import datetime from mistralai.types import BaseModel -from typing import Literal, Optional -from typing_extensions import NotRequired, TypedDict +from typing import Literal, Optional, Union +from typing_extensions import NotRequired, TypeAliasType, TypedDict ToolExecutionDeltaEventType = Literal["tool.execution.delta"] +ToolExecutionDeltaEventNameTypedDict = TypeAliasType( + "ToolExecutionDeltaEventNameTypedDict", Union[BuiltInConnectors, str] +) + + +ToolExecutionDeltaEventName = TypeAliasType( + "ToolExecutionDeltaEventName", Union[BuiltInConnectors, str] +) + class ToolExecutionDeltaEventTypedDict(TypedDict): id: str - name: BuiltInConnectors + name: ToolExecutionDeltaEventNameTypedDict arguments: str type: NotRequired[ToolExecutionDeltaEventType] created_at: NotRequired[datetime] @@ -23,7 +32,7 @@ class ToolExecutionDeltaEventTypedDict(TypedDict): class ToolExecutionDeltaEvent(BaseModel): id: str - name: BuiltInConnectors + name: ToolExecutionDeltaEventName arguments: str diff --git a/src/mistralai/models/toolexecutiondoneevent.py b/src/mistralai/models/toolexecutiondoneevent.py index c73d943a..2dea3324 100644 --- a/src/mistralai/models/toolexecutiondoneevent.py +++ b/src/mistralai/models/toolexecutiondoneevent.py @@ -4,16 +4,25 @@ from .builtinconnectors import BuiltInConnectors from datetime import datetime from mistralai.types import BaseModel -from typing import Any, Dict, Literal, Optional -from typing_extensions import NotRequired, TypedDict +from typing import Any, Dict, Literal, Optional, Union +from typing_extensions import NotRequired, TypeAliasType, TypedDict ToolExecutionDoneEventType = Literal["tool.execution.done"] +ToolExecutionDoneEventNameTypedDict = TypeAliasType( + "ToolExecutionDoneEventNameTypedDict", Union[BuiltInConnectors, str] +) + + +ToolExecutionDoneEventName = TypeAliasType( + "ToolExecutionDoneEventName", Union[BuiltInConnectors, str] +) + class ToolExecutionDoneEventTypedDict(TypedDict): id: str - name: BuiltInConnectors + name: ToolExecutionDoneEventNameTypedDict type: NotRequired[ToolExecutionDoneEventType] created_at: NotRequired[datetime] output_index: NotRequired[int] @@ -23,7 +32,7 @@ class ToolExecutionDoneEventTypedDict(TypedDict): class ToolExecutionDoneEvent(BaseModel): id: str - name: BuiltInConnectors + name: ToolExecutionDoneEventName type: Optional[ToolExecutionDoneEventType] = "tool.execution.done" diff --git a/src/mistralai/models/toolexecutionentry.py b/src/mistralai/models/toolexecutionentry.py index db503ea8..abe53e06 100644 --- a/src/mistralai/models/toolexecutionentry.py +++ b/src/mistralai/models/toolexecutionentry.py @@ -5,17 +5,22 @@ from datetime import datetime from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL from pydantic import model_serializer -from typing import Any, Dict, Literal, Optional -from typing_extensions import NotRequired, TypedDict +from typing import Any, Dict, Literal, Optional, Union +from typing_extensions import NotRequired, TypeAliasType, TypedDict ToolExecutionEntryObject = Literal["entry"] ToolExecutionEntryType = Literal["tool.execution"] +NameTypedDict = TypeAliasType("NameTypedDict", Union[BuiltInConnectors, str]) + + +Name = TypeAliasType("Name", Union[BuiltInConnectors, str]) + class ToolExecutionEntryTypedDict(TypedDict): - name: BuiltInConnectors + name: NameTypedDict arguments: str object: NotRequired[ToolExecutionEntryObject] type: NotRequired[ToolExecutionEntryType] @@ -26,7 +31,7 @@ class ToolExecutionEntryTypedDict(TypedDict): class ToolExecutionEntry(BaseModel): - name: BuiltInConnectors + name: Name arguments: str diff --git a/src/mistralai/models/toolexecutionstartedevent.py b/src/mistralai/models/toolexecutionstartedevent.py index 7a54058f..cf4ecbfc 100644 --- a/src/mistralai/models/toolexecutionstartedevent.py +++ b/src/mistralai/models/toolexecutionstartedevent.py @@ -4,16 +4,25 @@ from .builtinconnectors import BuiltInConnectors from datetime import datetime from mistralai.types import BaseModel -from typing import Literal, Optional -from typing_extensions import NotRequired, TypedDict +from typing import Literal, Optional, Union +from typing_extensions import NotRequired, TypeAliasType, TypedDict ToolExecutionStartedEventType = Literal["tool.execution.started"] +ToolExecutionStartedEventNameTypedDict = TypeAliasType( + "ToolExecutionStartedEventNameTypedDict", Union[BuiltInConnectors, str] +) + + +ToolExecutionStartedEventName = TypeAliasType( + "ToolExecutionStartedEventName", Union[BuiltInConnectors, str] +) + class ToolExecutionStartedEventTypedDict(TypedDict): id: str - name: BuiltInConnectors + name: ToolExecutionStartedEventNameTypedDict arguments: str type: NotRequired[ToolExecutionStartedEventType] created_at: NotRequired[datetime] @@ -23,7 +32,7 @@ class ToolExecutionStartedEventTypedDict(TypedDict): class ToolExecutionStartedEvent(BaseModel): id: str - name: BuiltInConnectors + name: ToolExecutionStartedEventName arguments: str diff --git a/src/mistralai/models_.py b/src/mistralai/models_.py index b6cc3186..bf82cc16 100644 --- a/src/mistralai/models_.py +++ b/src/mistralai/models_.py @@ -73,18 +73,12 @@ def list( ), ), request=req, - error_status_codes=["422", "4XX", "5XX"], + error_status_codes=["4XX", "5XX"], retry_config=retry_config, ) - response_data: Any = None if utils.match_response(http_res, "200", "application/json"): return unmarshal_json_response(models.ModelList, http_res) - if utils.match_response(http_res, "422", "application/json"): - response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res - ) - raise models.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) raise models.SDKError("API error occurred", http_res, http_res_text) @@ -155,18 +149,12 @@ async def list_async( ), ), request=req, - error_status_codes=["422", "4XX", "5XX"], + error_status_codes=["4XX", "5XX"], retry_config=retry_config, ) - response_data: Any = None if utils.match_response(http_res, "200", "application/json"): return unmarshal_json_response(models.ModelList, http_res) - if utils.match_response(http_res, "422", "application/json"): - response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res - ) - raise models.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError("API error occurred", http_res, http_res_text) diff --git a/src/mistralai/ocr.py b/src/mistralai/ocr.py index bed8b7be..6b283b35 100644 --- a/src/mistralai/ocr.py +++ b/src/mistralai/ocr.py @@ -28,6 +28,9 @@ def process( document_annotation_format: OptionalNullable[ Union[models.ResponseFormat, models.ResponseFormatTypedDict] ] = UNSET, + table_format: OptionalNullable[models.TableFormat] = UNSET, + extract_header: Optional[bool] = None, + extract_footer: Optional[bool] = None, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -44,6 +47,9 @@ def process( :param image_min_size: Minimum height and width of image to extract :param bbox_annotation_format: Structured output class for extracting useful information from each extracted bounding box / image from document. Only json_schema is valid for this field :param document_annotation_format: Structured output class for extracting useful information from the entire document. Only json_schema is valid for this field + :param table_format: + :param extract_header: + :param extract_footer: :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -73,6 +79,9 @@ def process( document_annotation_format=utils.get_pydantic_model( document_annotation_format, OptionalNullable[models.ResponseFormat] ), + table_format=table_format, + extract_header=extract_header, + extract_footer=extract_footer, ) req = self._build_request( @@ -150,6 +159,9 @@ async def process_async( document_annotation_format: OptionalNullable[ Union[models.ResponseFormat, models.ResponseFormatTypedDict] ] = UNSET, + table_format: OptionalNullable[models.TableFormat] = UNSET, + extract_header: Optional[bool] = None, + extract_footer: Optional[bool] = None, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -166,6 +178,9 @@ async def process_async( :param image_min_size: Minimum height and width of image to extract :param bbox_annotation_format: Structured output class for extracting useful information from each extracted bounding box / image from document. Only json_schema is valid for this field :param document_annotation_format: Structured output class for extracting useful information from the entire document. Only json_schema is valid for this field + :param table_format: + :param extract_header: + :param extract_footer: :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -195,6 +210,9 @@ async def process_async( document_annotation_format=utils.get_pydantic_model( document_annotation_format, OptionalNullable[models.ResponseFormat] ), + table_format=table_format, + extract_header=extract_header, + extract_footer=extract_footer, ) req = self._build_request_async( diff --git a/src/mistralai/transcriptions.py b/src/mistralai/transcriptions.py index 3e2de6f5..dc8ad2e8 100644 --- a/src/mistralai/transcriptions.py +++ b/src/mistralai/transcriptions.py @@ -29,7 +29,7 @@ def complete( ) -> models.TranscriptionResponse: r"""Create Transcription - :param model: + :param model: ID of the model to be used. :param file: :param file_url: Url of a file to be transcribed :param file_id: ID of a file uploaded to /v1/files @@ -131,7 +131,7 @@ async def complete_async( ) -> models.TranscriptionResponse: r"""Create Transcription - :param model: + :param model: ID of the model to be used. :param file: :param file_url: Url of a file to be transcribed :param file_id: ID of a file uploaded to /v1/files @@ -231,7 +231,7 @@ def stream( timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, ) -> eventstreaming.EventStream[models.TranscriptionStreamEvents]: - r"""Create streaming transcription (SSE) + r"""Create Streaming Transcription (SSE) :param model: :param file: @@ -343,7 +343,7 @@ async def stream_async( timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, ) -> eventstreaming.EventStreamAsync[models.TranscriptionStreamEvents]: - r"""Create streaming transcription (SSE) + r"""Create Streaming Transcription (SSE) :param model: :param file: From ee543e4dd242046bc60601ffa2f190ff8564fe41 Mon Sep 17 00:00:00 2001 From: Jean-Malo Delignon <56539593+jean-malo@users.noreply.github.com> Date: Mon, 29 Dec 2025 09:16:30 +0100 Subject: [PATCH 164/223] feat(ocr): add async batch annotation for table extraction from PDFs (#301) --- .../jobs/async_jobs_ocr_batch_annotation.py | 101 ++++++++++++++++++ 1 file changed, 101 insertions(+) create mode 100644 examples/mistral/jobs/async_jobs_ocr_batch_annotation.py diff --git a/examples/mistral/jobs/async_jobs_ocr_batch_annotation.py b/examples/mistral/jobs/async_jobs_ocr_batch_annotation.py new file mode 100644 index 00000000..e62bca17 --- /dev/null +++ b/examples/mistral/jobs/async_jobs_ocr_batch_annotation.py @@ -0,0 +1,101 @@ +#!/usr/bin/env python +import asyncio +import json +import os +from typing import List + +import httpx +from pydantic import BaseModel, Field + +from mistralai import Mistral +from mistralai.extra import response_format_from_pydantic_model +from mistralai.models import File + +SAMPLE_PDF_URL = "https://arxiv.org/pdf/2401.04088" + + +class Table(BaseModel): + name: str = Field(description="The name or title of the table") + + +class TableExtraction(BaseModel): + tables: List[Table] = Field(description="List of tables found in the document") + + +def create_ocr_batch_request(custom_id: str, document_url: str) -> dict: + """Batch requests require custom_id and body wrapper.""" + response_format = response_format_from_pydantic_model(TableExtraction) + return { + "custom_id": custom_id, + "body": { + "document": {"type": "document_url", "document_url": document_url}, + "document_annotation_format": response_format.model_dump( + by_alias=True, exclude_none=True + ), + "pages": [0, 1, 2, 3, 4, 5, 6, 7], + "include_image_base64": False, + }, + } + + +async def main(): + client = Mistral(api_key=os.environ["MISTRAL_API_KEY"]) + + document_urls = [SAMPLE_PDF_URL] + + batch_requests = [ + json.dumps(create_ocr_batch_request(custom_id=str(i), document_url=url)) + for i, url in enumerate(document_urls) + ] + batch_content = "\n".join(batch_requests) + + print("Uploading batch file...") + batch_file = await client.files.upload_async( + file=File(file_name="ocr_batch.jsonl", content=batch_content.encode()), + purpose="batch", + ) + print(f"Batch file uploaded: {batch_file.id}") + + print("Creating batch job...") + created_job = await client.batch.jobs.create_async( + model="mistral-ocr-latest", + input_files=[batch_file.id], + endpoint="/v1/ocr", + ) + print(f"Batch job created: {created_job.id}") + + print("Waiting for job completion...") + job = await client.batch.jobs.get_async(job_id=created_job.id) + while job.status not in ["SUCCESS", "FAILED", "CANCELLED"]: + print(f"Status: {job.status}") + await asyncio.sleep(5) + job = await client.batch.jobs.get_async(job_id=created_job.id) + + print(f"Job status: {job.status}") + + async with httpx.AsyncClient() as http_client: + if job.output_file: + signed_url = await client.files.get_signed_url_async( + file_id=job.output_file + ) + response = await http_client.get(signed_url.url) + for line in response.content.decode().strip().split("\n"): + result = json.loads(line) + annotation = result["response"]["body"].get("document_annotation") + if annotation: + tables = TableExtraction.model_validate_json(annotation) + for table in tables.tables: + print(table.name) + + if job.error_file: + signed_url = await client.files.get_signed_url_async(file_id=job.error_file) + response = await http_client.get(signed_url.url) + print("Errors:", response.content.decode()) + + print("\nCleaning up...") + await client.files.delete_async(file_id=batch_file.id) + print("Done!") + + +if __name__ == "__main__": + asyncio.run(main()) From 80eb53817ba4426418d4b3c1fefb5ae5323bb38d Mon Sep 17 00:00:00 2001 From: Jean-Malo Delignon <56539593+jean-malo@users.noreply.github.com> Date: Wed, 31 Dec 2025 12:48:03 +0100 Subject: [PATCH 165/223] feat: use UV (#303) --- .github/workflows/lint_custom_code.yaml | 6 +- .github/workflows/run_example_scripts.yaml | 10 +- .github/workflows/test_custom_code.yaml | 26 +- .github/workflows/update_speakeasy.yaml | 12 +- .gitignore | 1 + .speakeasy/gen.yaml | 2 +- README.md | 20 +- packages/mistralai_azure/README.md | 4 +- packages/mistralai_azure/poetry.lock | 605 ------- packages/mistralai_azure/poetry.toml | 2 - packages/mistralai_azure/pyproject.toml | 37 +- packages/mistralai_azure/scripts/publish.sh | 5 +- packages/mistralai_azure/uv.lock | 487 ++++++ packages/mistralai_gcp/poetry.lock | 900 ---------- packages/mistralai_gcp/poetry.toml | 2 - packages/mistralai_gcp/pyproject.toml | 39 +- packages/mistralai_gcp/scripts/publish.sh | 7 +- packages/mistralai_gcp/uv.lock | 823 +++++++++ poetry.lock | 1697 ------------------- poetry.toml | 3 - pyproject.toml | 94 +- scripts/lint_custom_code.sh | 30 +- scripts/publish.sh | 7 +- src/mistralai/extra/README.md | 2 +- src/mistralai/extra/mcp/base.py | 15 +- src/mistralai/extra/mcp/sse.py | 4 +- src/mistralai/extra/mcp/stdio.py | 2 +- src/mistralai/extra/observability/otel.py | 4 +- uv.lock | 1693 ++++++++++++++++++ 29 files changed, 3176 insertions(+), 3363 deletions(-) delete mode 100644 packages/mistralai_azure/poetry.lock delete mode 100644 packages/mistralai_azure/poetry.toml create mode 100644 packages/mistralai_azure/uv.lock delete mode 100644 packages/mistralai_gcp/poetry.lock delete mode 100644 packages/mistralai_gcp/poetry.toml create mode 100644 packages/mistralai_gcp/uv.lock delete mode 100644 poetry.lock delete mode 100644 poetry.toml create mode 100644 uv.lock diff --git a/.github/workflows/lint_custom_code.yaml b/.github/workflows/lint_custom_code.yaml index bd327c42..f6147b55 100644 --- a/.github/workflows/lint_custom_code.yaml +++ b/.github/workflows/lint_custom_code.yaml @@ -21,13 +21,13 @@ jobs: with: python-version: '3.12' - - name: Install Poetry - uses: snok/install-poetry@76e04a911780d5b312d89783f7b1cd627778900a # v1.4.1 + - name: Install uv + uses: astral-sh/setup-uv@d0cc045d04ccac9d8b7881df0226f9e82c39688e # v6 - name: Install dependencies run: | touch README-PYPI.md - poetry install --all-extras + uv sync --all-extras # The init, sdkhooks.py and types.py files in the _hooks folders are generated by Speakeasy hence the exclusion - name: Run all linters diff --git a/.github/workflows/run_example_scripts.yaml b/.github/workflows/run_example_scripts.yaml index 1ac5b3a5..8ea90354 100644 --- a/.github/workflows/run_example_scripts.yaml +++ b/.github/workflows/run_example_scripts.yaml @@ -25,8 +25,8 @@ jobs: with: python-version: ${{ matrix.python-version }} - - name: Install Poetry - uses: snok/install-poetry@76e04a911780d5b312d89783f7b1cd627778900a # v1.4.1 + - name: Install uv + uses: astral-sh/setup-uv@d0cc045d04ccac9d8b7881df0226f9e82c39688e # v6 - name: Set VERSION run: | @@ -40,13 +40,13 @@ jobs: - name: Build the package run: | touch README-PYPI.md # Create this file since the client is not built by Speakeasy - poetry build + uv build - name: For python 3.9, install the client and run examples without extra dependencies. if: matrix.python-version == '3.9' run: | PACKAGE="dist/$(ls dist | grep whl | head -n 1)" - python3 -m pip install "$PACKAGE" + uv pip install --system "$PACKAGE" ./scripts/run_examples.sh --no-extra-dep env: MISTRAL_AGENT_ID: ${{ secrets.CI_AGENT_ID }} @@ -56,7 +56,7 @@ jobs: if: matrix.python-version != '3.9' run: | PACKAGE="dist/$(ls dist | grep whl | head -n 1)[agents]" - python3 -m pip install "$PACKAGE" + uv pip install --system "$PACKAGE" ./scripts/run_examples.sh env: MISTRAL_AGENT_ID: ${{ secrets.CI_AGENT_ID }} diff --git a/.github/workflows/test_custom_code.yaml b/.github/workflows/test_custom_code.yaml index 2b087076..8a22fcb1 100644 --- a/.github/workflows/test_custom_code.yaml +++ b/.github/workflows/test_custom_code.yaml @@ -22,27 +22,13 @@ jobs: with: python-version: '3.12' - - name: Install Poetry - uses: snok/install-poetry@76e04a911780d5b312d89783f7b1cd627778900a # v1.4.1 - with: - virtualenvs-create: true - virtualenvs-in-project: true - virtualenvs-path: .venv - installer-parallel: true - - - name: Load cached venv - id: cached-poetry-dependencies - uses: actions/cache@0057852bfaa89a56745cba8c7296529d2fc39830 # v4 - with: - path: .venv - key: venv-${{ runner.os }}-${{ steps.setup-python.outputs.python-version }}-${{ hashFiles('**/poetry.lock') }} + - name: Install uv + uses: astral-sh/setup-uv@d0cc045d04ccac9d8b7881df0226f9e82c39688e # v6 - name: Install dependencies - # Install dependencies if cache does not exist - if: steps.cached-poetry-dependencies.outputs.cache-hit != 'true' - run: poetry install --no-interaction --no-root + run: | + touch README-PYPI.md + uv sync --all-extras - name: Run the 'src/mistralai/extra' package unit tests - run: | - source .venv/bin/activate - python3.12 -m unittest discover -s src/mistralai/extra/tests -t src + run: uv run python3.12 -m unittest discover -s src/mistralai/extra/tests -t src diff --git a/.github/workflows/update_speakeasy.yaml b/.github/workflows/update_speakeasy.yaml index 06449cf1..f596cf66 100644 --- a/.github/workflows/update_speakeasy.yaml +++ b/.github/workflows/update_speakeasy.yaml @@ -33,17 +33,13 @@ jobs: with: python-version: '3.11' - - name: Install Poetry - uses: snok/install-poetry@76e04a911780d5b312d89783f7b1cd627778900a # v1.4.1 - with: - version: latest - virtualenvs-create: true - virtualenvs-in-project: true + - name: Install uv + uses: astral-sh/setup-uv@d0cc045d04ccac9d8b7881df0226f9e82c39688e # v6 - name: Install dependencies run: | cp README.md README-PYPI.md - poetry install --with dev + uv sync --group dev --no-default-groups - name: Install Speakeasy CLI run: | @@ -70,7 +66,7 @@ jobs: TARGETS_ARGS="$TARGETS_ARGS --targets $target" done - poetry run inv update-speakeasy \ + uv run inv update-speakeasy \ --version "${{ github.event.inputs.version }}" \ $TARGETS_ARGS env: diff --git a/.gitignore b/.gitignore index 954adb7c..336f773d 100644 --- a/.gitignore +++ b/.gitignore @@ -1,3 +1,4 @@ +.idea **/__pycache__/ **/.speakeasy/temp/ **/.speakeasy/logs/ diff --git a/.speakeasy/gen.yaml b/.speakeasy/gen.yaml index f206b927..85ac8dac 100644 --- a/.speakeasy/gen.yaml +++ b/.speakeasy/gen.yaml @@ -56,7 +56,7 @@ python: methodArguments: infer-optional-args moduleName: "" outputModelSuffix: output - packageManager: poetry + packageManager: uv packageName: mistralai pytestFilterWarnings: [] pytestTimeout: 0 diff --git a/README.md b/README.md index 1bc889c6..d198cf10 100644 --- a/README.md +++ b/README.md @@ -58,15 +58,7 @@ Mistral AI API: Our Chat Completion and Embeddings APIs specification. Create yo > > Once a Python version reaches its [official end of life date](https://devguide.python.org/versions/), a 3-month grace period is provided for users to upgrade. Following this grace period, the minimum python version supported in the SDK will be updated. -The SDK can be installed with *uv*, *pip*, or *poetry* package managers. - -### uv - -*uv* is a fast Python package installer and resolver, designed as a drop-in replacement for pip and pip-tools. It's recommended for its speed and modern Python tooling capabilities. - -```bash -uv add mistralai -``` +The SDK can be installed with either *pip* or *uv* package managers. ### PIP @@ -76,12 +68,12 @@ uv add mistralai pip install mistralai ``` -### Poetry +### UV -*Poetry* is a modern tool that simplifies dependency management and package publishing by using a single `pyproject.toml` file to handle project metadata and dependencies. +*UV* is an extremely fast Python package and project manager. You can use it to add the SDK to your project: ```bash -poetry add mistralai +uv add mistralai ``` ### Shell and script usage with `uv` @@ -347,7 +339,7 @@ asyncio.run(main()) ### More examples -You can run the examples in the `examples/` directory using `poetry run` or by entering the virtual environment using `poetry shell`. +You can run the examples in the `examples/` directory using `uv run`. ## Providers' SDKs Example Usage @@ -991,4 +983,4 @@ Generally, the SDK will work well with most IDEs out of the box. However, when u ## Contributions While we value open-source contributions to this SDK, this library is generated programmatically. Any manual changes added to internal files will be overwritten on the next generation. -We look forward to hearing your feedback. Feel free to open a PR or an issue with a proof of concept and we'll do our best to include it in a future release. \ No newline at end of file +We look forward to hearing your feedback. Feel free to open a PR or an issue with a proof of concept and we'll do our best to include it in a future release. diff --git a/packages/mistralai_azure/README.md b/packages/mistralai_azure/README.md index 65bc2e4f..f869b90a 100644 --- a/packages/mistralai_azure/README.md +++ b/packages/mistralai_azure/README.md @@ -7,9 +7,9 @@ PIP pip install mistralai ``` -Poetry +UV ```bash -poetry add mistralai +uv add mistralai ``` **Prerequisites** diff --git a/packages/mistralai_azure/poetry.lock b/packages/mistralai_azure/poetry.lock deleted file mode 100644 index fb5b615e..00000000 --- a/packages/mistralai_azure/poetry.lock +++ /dev/null @@ -1,605 +0,0 @@ -# This file is automatically @generated by Poetry 2.1.1 and should not be changed by hand. - -[[package]] -name = "annotated-types" -version = "0.7.0" -description = "Reusable constraint types to use with typing.Annotated" -optional = false -python-versions = ">=3.8" -groups = ["main"] -files = [ - {file = "annotated_types-0.7.0-py3-none-any.whl", hash = "sha256:1f02e8b43a8fbbc3f3e0d4f0f4bfc8131bcb4eebe8849b8e5c773f3a1c582a53"}, - {file = "annotated_types-0.7.0.tar.gz", hash = "sha256:aff07c09a53a08bc8cfccb9c85b05f1aa9a2a6f23728d790723543408344ce89"}, -] - -[[package]] -name = "anyio" -version = "4.4.0" -description = "High level compatibility layer for multiple asynchronous event loop implementations" -optional = false -python-versions = ">=3.8" -groups = ["main"] -files = [ - {file = "anyio-4.4.0-py3-none-any.whl", hash = "sha256:c1b2d8f46a8a812513012e1107cb0e68c17159a7a594208005a57dc776e1bdc7"}, - {file = "anyio-4.4.0.tar.gz", hash = "sha256:5aadc6a1bbb7cdb0bede386cac5e2940f5e2ff3aa20277e991cf028e0585ce94"}, -] - -[package.dependencies] -exceptiongroup = {version = ">=1.0.2", markers = "python_version < \"3.11\""} -idna = ">=2.8" -sniffio = ">=1.1" -typing-extensions = {version = ">=4.1", markers = "python_version < \"3.11\""} - -[package.extras] -doc = ["Sphinx (>=7)", "packaging", "sphinx-autodoc-typehints (>=1.2.0)", "sphinx-rtd-theme"] -test = ["anyio[trio]", "coverage[toml] (>=7)", "exceptiongroup (>=1.2.0)", "hypothesis (>=4.0)", "psutil (>=5.9)", "pytest (>=7.0)", "pytest-mock (>=3.6.1)", "trustme", "uvloop (>=0.17) ; platform_python_implementation == \"CPython\" and platform_system != \"Windows\""] -trio = ["trio (>=0.23)"] - -[[package]] -name = "astroid" -version = "3.2.4" -description = "An abstract syntax tree for Python with inference support." -optional = false -python-versions = ">=3.8.0" -groups = ["dev"] -files = [ - {file = "astroid-3.2.4-py3-none-any.whl", hash = "sha256:413658a61eeca6202a59231abb473f932038fbcbf1666587f66d482083413a25"}, - {file = "astroid-3.2.4.tar.gz", hash = "sha256:0e14202810b30da1b735827f78f5157be2bbd4a7a59b7707ca0bfc2fb4c0063a"}, -] - -[package.dependencies] -typing-extensions = {version = ">=4.0.0", markers = "python_version < \"3.11\""} - -[[package]] -name = "certifi" -version = "2024.7.4" -description = "Python package for providing Mozilla's CA Bundle." -optional = false -python-versions = ">=3.6" -groups = ["main"] -files = [ - {file = "certifi-2024.7.4-py3-none-any.whl", hash = "sha256:c198e21b1289c2ab85ee4e67bb4b4ef3ead0892059901a8d5b622f24a1101e90"}, - {file = "certifi-2024.7.4.tar.gz", hash = "sha256:5a1e7645bc0ec61a09e26c36f6106dd4cf40c6db3a1fb6352b0244e7fb057c7b"}, -] - -[[package]] -name = "colorama" -version = "0.4.6" -description = "Cross-platform colored terminal text." -optional = false -python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7" -groups = ["dev"] -markers = "sys_platform == \"win32\"" -files = [ - {file = "colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6"}, - {file = "colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44"}, -] - -[[package]] -name = "dill" -version = "0.3.8" -description = "serialize all of Python" -optional = false -python-versions = ">=3.8" -groups = ["dev"] -files = [ - {file = "dill-0.3.8-py3-none-any.whl", hash = "sha256:c36ca9ffb54365bdd2f8eb3eff7d2a21237f8452b57ace88b1ac615b7e815bd7"}, - {file = "dill-0.3.8.tar.gz", hash = "sha256:3ebe3c479ad625c4553aca177444d89b486b1d84982eeacded644afc0cf797ca"}, -] - -[package.extras] -graph = ["objgraph (>=1.7.2)"] -profile = ["gprof2dot (>=2022.7.29)"] - -[[package]] -name = "exceptiongroup" -version = "1.2.2" -description = "Backport of PEP 654 (exception groups)" -optional = false -python-versions = ">=3.7" -groups = ["main", "dev"] -markers = "python_version < \"3.11\"" -files = [ - {file = "exceptiongroup-1.2.2-py3-none-any.whl", hash = "sha256:3111b9d131c238bec2f8f516e123e14ba243563fb135d3fe885990585aa7795b"}, - {file = "exceptiongroup-1.2.2.tar.gz", hash = "sha256:47c2edf7c6738fafb49fd34290706d1a1a2f4d1c6df275526b62cbb4aa5393cc"}, -] - -[package.extras] -test = ["pytest (>=6)"] - -[[package]] -name = "h11" -version = "0.16.0" -description = "A pure-Python, bring-your-own-I/O implementation of HTTP/1.1" -optional = false -python-versions = ">=3.8" -groups = ["main"] -files = [ - {file = "h11-0.16.0-py3-none-any.whl", hash = "sha256:63cf8bbe7522de3bf65932fda1d9c2772064ffb3dae62d55932da54b31cb6c86"}, - {file = "h11-0.16.0.tar.gz", hash = "sha256:4e35b956cf45792e4caa5885e69fba00bdbc6ffafbfa020300e549b208ee5ff1"}, -] - -[[package]] -name = "httpcore" -version = "1.0.9" -description = "A minimal low-level HTTP client." -optional = false -python-versions = ">=3.8" -groups = ["main"] -files = [ - {file = "httpcore-1.0.9-py3-none-any.whl", hash = "sha256:2d400746a40668fc9dec9810239072b40b4484b640a8c38fd654a024c7a1bf55"}, - {file = "httpcore-1.0.9.tar.gz", hash = "sha256:6e34463af53fd2ab5d807f399a9b45ea31c3dfa2276f15a2c3f00afff6e176e8"}, -] - -[package.dependencies] -certifi = "*" -h11 = ">=0.16" - -[package.extras] -asyncio = ["anyio (>=4.0,<5.0)"] -http2 = ["h2 (>=3,<5)"] -socks = ["socksio (==1.*)"] -trio = ["trio (>=0.22.0,<1.0)"] - -[[package]] -name = "httpx" -version = "0.28.1" -description = "The next generation HTTP client." -optional = false -python-versions = ">=3.8" -groups = ["main"] -files = [ - {file = "httpx-0.28.1-py3-none-any.whl", hash = "sha256:d909fcccc110f8c7faf814ca82a9a4d816bc5a6dbfea25d6591d6985b8ba59ad"}, - {file = "httpx-0.28.1.tar.gz", hash = "sha256:75e98c5f16b0f35b567856f597f06ff2270a374470a5c2392242528e3e3e42fc"}, -] - -[package.dependencies] -anyio = "*" -certifi = "*" -httpcore = "==1.*" -idna = "*" - -[package.extras] -brotli = ["brotli ; platform_python_implementation == \"CPython\"", "brotlicffi ; platform_python_implementation != \"CPython\""] -cli = ["click (==8.*)", "pygments (==2.*)", "rich (>=10,<14)"] -http2 = ["h2 (>=3,<5)"] -socks = ["socksio (==1.*)"] -zstd = ["zstandard (>=0.18.0)"] - -[[package]] -name = "idna" -version = "3.7" -description = "Internationalized Domain Names in Applications (IDNA)" -optional = false -python-versions = ">=3.5" -groups = ["main"] -files = [ - {file = "idna-3.7-py3-none-any.whl", hash = "sha256:82fee1fc78add43492d3a1898bfa6d8a904cc97d8427f683ed8e798d07761aa0"}, - {file = "idna-3.7.tar.gz", hash = "sha256:028ff3aadf0609c1fd278d8ea3089299412a7a8b9bd005dd08b9f8285bcb5cfc"}, -] - -[[package]] -name = "iniconfig" -version = "2.0.0" -description = "brain-dead simple config-ini parsing" -optional = false -python-versions = ">=3.7" -groups = ["dev"] -files = [ - {file = "iniconfig-2.0.0-py3-none-any.whl", hash = "sha256:b6a85871a79d2e3b22d2d1b94ac2824226a63c6b741c88f7ae975f18b6778374"}, - {file = "iniconfig-2.0.0.tar.gz", hash = "sha256:2d91e135bf72d31a410b17c16da610a82cb55f6b0477d1a902134b24a455b8b3"}, -] - -[[package]] -name = "isort" -version = "5.13.2" -description = "A Python utility / library to sort Python imports." -optional = false -python-versions = ">=3.8.0" -groups = ["dev"] -files = [ - {file = "isort-5.13.2-py3-none-any.whl", hash = "sha256:8ca5e72a8d85860d5a3fa69b8745237f2939afe12dbf656afbcb47fe72d947a6"}, - {file = "isort-5.13.2.tar.gz", hash = "sha256:48fdfcb9face5d58a4f6dde2e72a1fb8dcaf8ab26f95ab49fab84c2ddefb0109"}, -] - -[package.extras] -colors = ["colorama (>=0.4.6)"] - -[[package]] -name = "mccabe" -version = "0.7.0" -description = "McCabe checker, plugin for flake8" -optional = false -python-versions = ">=3.6" -groups = ["dev"] -files = [ - {file = "mccabe-0.7.0-py2.py3-none-any.whl", hash = "sha256:6c2d30ab6be0e4a46919781807b4f0d834ebdd6c6e3dca0bda5a15f863427b6e"}, - {file = "mccabe-0.7.0.tar.gz", hash = "sha256:348e0240c33b60bbdf4e523192ef919f28cb2c3d7d5c7794f74009290f236325"}, -] - -[[package]] -name = "mypy" -version = "1.15.0" -description = "Optional static typing for Python" -optional = false -python-versions = ">=3.9" -groups = ["dev"] -files = [ - {file = "mypy-1.15.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:979e4e1a006511dacf628e36fadfecbcc0160a8af6ca7dad2f5025529e082c13"}, - {file = "mypy-1.15.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c4bb0e1bd29f7d34efcccd71cf733580191e9a264a2202b0239da95984c5b559"}, - {file = "mypy-1.15.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:be68172e9fd9ad8fb876c6389f16d1c1b5f100ffa779f77b1fb2176fcc9ab95b"}, - {file = "mypy-1.15.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c7be1e46525adfa0d97681432ee9fcd61a3964c2446795714699a998d193f1a3"}, - {file = "mypy-1.15.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:2e2c2e6d3593f6451b18588848e66260ff62ccca522dd231cd4dd59b0160668b"}, - {file = "mypy-1.15.0-cp310-cp310-win_amd64.whl", hash = "sha256:6983aae8b2f653e098edb77f893f7b6aca69f6cffb19b2cc7443f23cce5f4828"}, - {file = "mypy-1.15.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:2922d42e16d6de288022e5ca321cd0618b238cfc5570e0263e5ba0a77dbef56f"}, - {file = "mypy-1.15.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:2ee2d57e01a7c35de00f4634ba1bbf015185b219e4dc5909e281016df43f5ee5"}, - {file = "mypy-1.15.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:973500e0774b85d9689715feeffcc980193086551110fd678ebe1f4342fb7c5e"}, - {file = "mypy-1.15.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:5a95fb17c13e29d2d5195869262f8125dfdb5c134dc8d9a9d0aecf7525b10c2c"}, - {file = "mypy-1.15.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:1905f494bfd7d85a23a88c5d97840888a7bd516545fc5aaedff0267e0bb54e2f"}, - {file = "mypy-1.15.0-cp311-cp311-win_amd64.whl", hash = "sha256:c9817fa23833ff189db061e6d2eff49b2f3b6ed9856b4a0a73046e41932d744f"}, - {file = "mypy-1.15.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:aea39e0583d05124836ea645f412e88a5c7d0fd77a6d694b60d9b6b2d9f184fd"}, - {file = "mypy-1.15.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:2f2147ab812b75e5b5499b01ade1f4a81489a147c01585cda36019102538615f"}, - {file = "mypy-1.15.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ce436f4c6d218a070048ed6a44c0bbb10cd2cc5e272b29e7845f6a2f57ee4464"}, - {file = "mypy-1.15.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:8023ff13985661b50a5928fc7a5ca15f3d1affb41e5f0a9952cb68ef090b31ee"}, - {file = "mypy-1.15.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:1124a18bc11a6a62887e3e137f37f53fbae476dc36c185d549d4f837a2a6a14e"}, - {file = "mypy-1.15.0-cp312-cp312-win_amd64.whl", hash = "sha256:171a9ca9a40cd1843abeca0e405bc1940cd9b305eaeea2dda769ba096932bb22"}, - {file = "mypy-1.15.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:93faf3fdb04768d44bf28693293f3904bbb555d076b781ad2530214ee53e3445"}, - {file = "mypy-1.15.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:811aeccadfb730024c5d3e326b2fbe9249bb7413553f15499a4050f7c30e801d"}, - {file = "mypy-1.15.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:98b7b9b9aedb65fe628c62a6dc57f6d5088ef2dfca37903a7d9ee374d03acca5"}, - {file = "mypy-1.15.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c43a7682e24b4f576d93072216bf56eeff70d9140241f9edec0c104d0c515036"}, - {file = "mypy-1.15.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:baefc32840a9f00babd83251560e0ae1573e2f9d1b067719479bfb0e987c6357"}, - {file = "mypy-1.15.0-cp313-cp313-win_amd64.whl", hash = "sha256:b9378e2c00146c44793c98b8d5a61039a048e31f429fb0eb546d93f4b000bedf"}, - {file = "mypy-1.15.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:e601a7fa172c2131bff456bb3ee08a88360760d0d2f8cbd7a75a65497e2df078"}, - {file = "mypy-1.15.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:712e962a6357634fef20412699a3655c610110e01cdaa6180acec7fc9f8513ba"}, - {file = "mypy-1.15.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:f95579473af29ab73a10bada2f9722856792a36ec5af5399b653aa28360290a5"}, - {file = "mypy-1.15.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:8f8722560a14cde92fdb1e31597760dc35f9f5524cce17836c0d22841830fd5b"}, - {file = "mypy-1.15.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:1fbb8da62dc352133d7d7ca90ed2fb0e9d42bb1a32724c287d3c76c58cbaa9c2"}, - {file = "mypy-1.15.0-cp39-cp39-win_amd64.whl", hash = "sha256:d10d994b41fb3497719bbf866f227b3489048ea4bbbb5015357db306249f7980"}, - {file = "mypy-1.15.0-py3-none-any.whl", hash = "sha256:5469affef548bd1895d86d3bf10ce2b44e33d86923c29e4d675b3e323437ea3e"}, - {file = "mypy-1.15.0.tar.gz", hash = "sha256:404534629d51d3efea5c800ee7c42b72a6554d6c400e6a79eafe15d11341fd43"}, -] - -[package.dependencies] -mypy_extensions = ">=1.0.0" -tomli = {version = ">=1.1.0", markers = "python_version < \"3.11\""} -typing_extensions = ">=4.6.0" - -[package.extras] -dmypy = ["psutil (>=4.0)"] -faster-cache = ["orjson"] -install-types = ["pip"] -mypyc = ["setuptools (>=50)"] -reports = ["lxml"] - -[[package]] -name = "mypy-extensions" -version = "1.0.0" -description = "Type system extensions for programs checked with the mypy type checker." -optional = false -python-versions = ">=3.5" -groups = ["dev"] -files = [ - {file = "mypy_extensions-1.0.0-py3-none-any.whl", hash = "sha256:4392f6c0eb8a5668a69e23d168ffa70f0be9ccfd32b5cc2d26a34ae5b844552d"}, - {file = "mypy_extensions-1.0.0.tar.gz", hash = "sha256:75dbf8955dc00442a438fc4d0666508a9a97b6bd41aa2f0ffe9d2f2725af0782"}, -] - -[[package]] -name = "packaging" -version = "24.1" -description = "Core utilities for Python packages" -optional = false -python-versions = ">=3.8" -groups = ["dev"] -files = [ - {file = "packaging-24.1-py3-none-any.whl", hash = "sha256:5b8f2217dbdbd2f7f384c41c628544e6d52f2d0f53c6d0c3ea61aa5d1d7ff124"}, - {file = "packaging-24.1.tar.gz", hash = "sha256:026ed72c8ed3fcce5bf8950572258698927fd1dbda10a5e981cdf0ac37f4f002"}, -] - -[[package]] -name = "platformdirs" -version = "4.2.2" -description = "A small Python package for determining appropriate platform-specific dirs, e.g. a `user data dir`." -optional = false -python-versions = ">=3.8" -groups = ["dev"] -files = [ - {file = "platformdirs-4.2.2-py3-none-any.whl", hash = "sha256:2d7a1657e36a80ea911db832a8a6ece5ee53d8de21edd5cc5879af6530b1bfee"}, - {file = "platformdirs-4.2.2.tar.gz", hash = "sha256:38b7b51f512eed9e84a22788b4bce1de17c0adb134d6becb09836e37d8654cd3"}, -] - -[package.extras] -docs = ["furo (>=2023.9.10)", "proselint (>=0.13)", "sphinx (>=7.2.6)", "sphinx-autodoc-typehints (>=1.25.2)"] -test = ["appdirs (==1.4.4)", "covdefaults (>=2.3)", "pytest (>=7.4.3)", "pytest-cov (>=4.1)", "pytest-mock (>=3.12)"] -type = ["mypy (>=1.8)"] - -[[package]] -name = "pluggy" -version = "1.5.0" -description = "plugin and hook calling mechanisms for python" -optional = false -python-versions = ">=3.8" -groups = ["dev"] -files = [ - {file = "pluggy-1.5.0-py3-none-any.whl", hash = "sha256:44e1ad92c8ca002de6377e165f3e0f1be63266ab4d554740532335b9d75ea669"}, - {file = "pluggy-1.5.0.tar.gz", hash = "sha256:2cffa88e94fdc978c4c574f15f9e59b7f4201d439195c3715ca9e2486f1d0cf1"}, -] - -[package.extras] -dev = ["pre-commit", "tox"] -testing = ["pytest", "pytest-benchmark"] - -[[package]] -name = "pydantic" -version = "2.11.7" -description = "Data validation using Python type hints" -optional = false -python-versions = ">=3.9" -groups = ["main"] -files = [ - {file = "pydantic-2.11.7-py3-none-any.whl", hash = "sha256:dde5df002701f6de26248661f6835bbe296a47bf73990135c7d07ce741b9623b"}, - {file = "pydantic-2.11.7.tar.gz", hash = "sha256:d989c3c6cb79469287b1569f7447a17848c998458d49ebe294e975b9baf0f0db"}, -] - -[package.dependencies] -annotated-types = ">=0.6.0" -pydantic-core = "2.33.2" -typing-extensions = ">=4.12.2" -typing-inspection = ">=0.4.0" - -[package.extras] -email = ["email-validator (>=2.0.0)"] -timezone = ["tzdata ; python_version >= \"3.9\" and platform_system == \"Windows\""] - -[[package]] -name = "pydantic-core" -version = "2.33.2" -description = "Core functionality for Pydantic validation and serialization" -optional = false -python-versions = ">=3.9" -groups = ["main"] -files = [ - {file = "pydantic_core-2.33.2-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:2b3d326aaef0c0399d9afffeb6367d5e26ddc24d351dbc9c636840ac355dc5d8"}, - {file = "pydantic_core-2.33.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:0e5b2671f05ba48b94cb90ce55d8bdcaaedb8ba00cc5359f6810fc918713983d"}, - {file = "pydantic_core-2.33.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0069c9acc3f3981b9ff4cdfaf088e98d83440a4c7ea1bc07460af3d4dc22e72d"}, - {file = "pydantic_core-2.33.2-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:d53b22f2032c42eaaf025f7c40c2e3b94568ae077a606f006d206a463bc69572"}, - {file = "pydantic_core-2.33.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:0405262705a123b7ce9f0b92f123334d67b70fd1f20a9372b907ce1080c7ba02"}, - {file = "pydantic_core-2.33.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4b25d91e288e2c4e0662b8038a28c6a07eaac3e196cfc4ff69de4ea3db992a1b"}, - {file = "pydantic_core-2.33.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6bdfe4b3789761f3bcb4b1ddf33355a71079858958e3a552f16d5af19768fef2"}, - {file = "pydantic_core-2.33.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:efec8db3266b76ef9607c2c4c419bdb06bf335ae433b80816089ea7585816f6a"}, - {file = "pydantic_core-2.33.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:031c57d67ca86902726e0fae2214ce6770bbe2f710dc33063187a68744a5ecac"}, - {file = "pydantic_core-2.33.2-cp310-cp310-musllinux_1_1_armv7l.whl", hash = "sha256:f8de619080e944347f5f20de29a975c2d815d9ddd8be9b9b7268e2e3ef68605a"}, - {file = "pydantic_core-2.33.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:73662edf539e72a9440129f231ed3757faab89630d291b784ca99237fb94db2b"}, - {file = "pydantic_core-2.33.2-cp310-cp310-win32.whl", hash = "sha256:0a39979dcbb70998b0e505fb1556a1d550a0781463ce84ebf915ba293ccb7e22"}, - {file = "pydantic_core-2.33.2-cp310-cp310-win_amd64.whl", hash = "sha256:b0379a2b24882fef529ec3b4987cb5d003b9cda32256024e6fe1586ac45fc640"}, - {file = "pydantic_core-2.33.2-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:4c5b0a576fb381edd6d27f0a85915c6daf2f8138dc5c267a57c08a62900758c7"}, - {file = "pydantic_core-2.33.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:e799c050df38a639db758c617ec771fd8fb7a5f8eaaa4b27b101f266b216a246"}, - {file = "pydantic_core-2.33.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dc46a01bf8d62f227d5ecee74178ffc448ff4e5197c756331f71efcc66dc980f"}, - {file = "pydantic_core-2.33.2-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:a144d4f717285c6d9234a66778059f33a89096dfb9b39117663fd8413d582dcc"}, - {file = "pydantic_core-2.33.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:73cf6373c21bc80b2e0dc88444f41ae60b2f070ed02095754eb5a01df12256de"}, - {file = "pydantic_core-2.33.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3dc625f4aa79713512d1976fe9f0bc99f706a9dee21dfd1810b4bbbf228d0e8a"}, - {file = "pydantic_core-2.33.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:881b21b5549499972441da4758d662aeea93f1923f953e9cbaff14b8b9565aef"}, - {file = "pydantic_core-2.33.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:bdc25f3681f7b78572699569514036afe3c243bc3059d3942624e936ec93450e"}, - {file = "pydantic_core-2.33.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:fe5b32187cbc0c862ee201ad66c30cf218e5ed468ec8dc1cf49dec66e160cc4d"}, - {file = "pydantic_core-2.33.2-cp311-cp311-musllinux_1_1_armv7l.whl", hash = "sha256:bc7aee6f634a6f4a95676fcb5d6559a2c2a390330098dba5e5a5f28a2e4ada30"}, - {file = "pydantic_core-2.33.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:235f45e5dbcccf6bd99f9f472858849f73d11120d76ea8707115415f8e5ebebf"}, - {file = "pydantic_core-2.33.2-cp311-cp311-win32.whl", hash = "sha256:6368900c2d3ef09b69cb0b913f9f8263b03786e5b2a387706c5afb66800efd51"}, - {file = "pydantic_core-2.33.2-cp311-cp311-win_amd64.whl", hash = "sha256:1e063337ef9e9820c77acc768546325ebe04ee38b08703244c1309cccc4f1bab"}, - {file = "pydantic_core-2.33.2-cp311-cp311-win_arm64.whl", hash = "sha256:6b99022f1d19bc32a4c2a0d544fc9a76e3be90f0b3f4af413f87d38749300e65"}, - {file = "pydantic_core-2.33.2-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:a7ec89dc587667f22b6a0b6579c249fca9026ce7c333fc142ba42411fa243cdc"}, - {file = "pydantic_core-2.33.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:3c6db6e52c6d70aa0d00d45cdb9b40f0433b96380071ea80b09277dba021ddf7"}, - {file = "pydantic_core-2.33.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4e61206137cbc65e6d5256e1166f88331d3b6238e082d9f74613b9b765fb9025"}, - {file = "pydantic_core-2.33.2-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:eb8c529b2819c37140eb51b914153063d27ed88e3bdc31b71198a198e921e011"}, - {file = "pydantic_core-2.33.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c52b02ad8b4e2cf14ca7b3d918f3eb0ee91e63b3167c32591e57c4317e134f8f"}, - {file = "pydantic_core-2.33.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:96081f1605125ba0855dfda83f6f3df5ec90c61195421ba72223de35ccfb2f88"}, - {file = "pydantic_core-2.33.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8f57a69461af2a5fa6e6bbd7a5f60d3b7e6cebb687f55106933188e79ad155c1"}, - {file = "pydantic_core-2.33.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:572c7e6c8bb4774d2ac88929e3d1f12bc45714ae5ee6d9a788a9fb35e60bb04b"}, - {file = "pydantic_core-2.33.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:db4b41f9bd95fbe5acd76d89920336ba96f03e149097365afe1cb092fceb89a1"}, - {file = "pydantic_core-2.33.2-cp312-cp312-musllinux_1_1_armv7l.whl", hash = "sha256:fa854f5cf7e33842a892e5c73f45327760bc7bc516339fda888c75ae60edaeb6"}, - {file = "pydantic_core-2.33.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:5f483cfb75ff703095c59e365360cb73e00185e01aaea067cd19acffd2ab20ea"}, - {file = "pydantic_core-2.33.2-cp312-cp312-win32.whl", hash = "sha256:9cb1da0f5a471435a7bc7e439b8a728e8b61e59784b2af70d7c169f8dd8ae290"}, - {file = "pydantic_core-2.33.2-cp312-cp312-win_amd64.whl", hash = "sha256:f941635f2a3d96b2973e867144fde513665c87f13fe0e193c158ac51bfaaa7b2"}, - {file = "pydantic_core-2.33.2-cp312-cp312-win_arm64.whl", hash = "sha256:cca3868ddfaccfbc4bfb1d608e2ccaaebe0ae628e1416aeb9c4d88c001bb45ab"}, - {file = "pydantic_core-2.33.2-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:1082dd3e2d7109ad8b7da48e1d4710c8d06c253cbc4a27c1cff4fbcaa97a9e3f"}, - {file = "pydantic_core-2.33.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:f517ca031dfc037a9c07e748cefd8d96235088b83b4f4ba8939105d20fa1dcd6"}, - {file = "pydantic_core-2.33.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0a9f2c9dd19656823cb8250b0724ee9c60a82f3cdf68a080979d13092a3b0fef"}, - {file = "pydantic_core-2.33.2-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:2b0a451c263b01acebe51895bfb0e1cc842a5c666efe06cdf13846c7418caa9a"}, - {file = "pydantic_core-2.33.2-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1ea40a64d23faa25e62a70ad163571c0b342b8bf66d5fa612ac0dec4f069d916"}, - {file = "pydantic_core-2.33.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0fb2d542b4d66f9470e8065c5469ec676978d625a8b7a363f07d9a501a9cb36a"}, - {file = "pydantic_core-2.33.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9fdac5d6ffa1b5a83bca06ffe7583f5576555e6c8b3a91fbd25ea7780f825f7d"}, - {file = "pydantic_core-2.33.2-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:04a1a413977ab517154eebb2d326da71638271477d6ad87a769102f7c2488c56"}, - {file = "pydantic_core-2.33.2-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:c8e7af2f4e0194c22b5b37205bfb293d166a7344a5b0d0eaccebc376546d77d5"}, - {file = "pydantic_core-2.33.2-cp313-cp313-musllinux_1_1_armv7l.whl", hash = "sha256:5c92edd15cd58b3c2d34873597a1e20f13094f59cf88068adb18947df5455b4e"}, - {file = "pydantic_core-2.33.2-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:65132b7b4a1c0beded5e057324b7e16e10910c106d43675d9bd87d4f38dde162"}, - {file = "pydantic_core-2.33.2-cp313-cp313-win32.whl", hash = "sha256:52fb90784e0a242bb96ec53f42196a17278855b0f31ac7c3cc6f5c1ec4811849"}, - {file = "pydantic_core-2.33.2-cp313-cp313-win_amd64.whl", hash = "sha256:c083a3bdd5a93dfe480f1125926afcdbf2917ae714bdb80b36d34318b2bec5d9"}, - {file = "pydantic_core-2.33.2-cp313-cp313-win_arm64.whl", hash = "sha256:e80b087132752f6b3d714f041ccf74403799d3b23a72722ea2e6ba2e892555b9"}, - {file = "pydantic_core-2.33.2-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:61c18fba8e5e9db3ab908620af374db0ac1baa69f0f32df4f61ae23f15e586ac"}, - {file = "pydantic_core-2.33.2-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:95237e53bb015f67b63c91af7518a62a8660376a6a0db19b89acc77a4d6199f5"}, - {file = "pydantic_core-2.33.2-cp313-cp313t-win_amd64.whl", hash = "sha256:c2fc0a768ef76c15ab9238afa6da7f69895bb5d1ee83aeea2e3509af4472d0b9"}, - {file = "pydantic_core-2.33.2-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:a2b911a5b90e0374d03813674bf0a5fbbb7741570dcd4b4e85a2e48d17def29d"}, - {file = "pydantic_core-2.33.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:6fa6dfc3e4d1f734a34710f391ae822e0a8eb8559a85c6979e14e65ee6ba2954"}, - {file = "pydantic_core-2.33.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c54c939ee22dc8e2d545da79fc5381f1c020d6d3141d3bd747eab59164dc89fb"}, - {file = "pydantic_core-2.33.2-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:53a57d2ed685940a504248187d5685e49eb5eef0f696853647bf37c418c538f7"}, - {file = "pydantic_core-2.33.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:09fb9dd6571aacd023fe6aaca316bd01cf60ab27240d7eb39ebd66a3a15293b4"}, - {file = "pydantic_core-2.33.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0e6116757f7959a712db11f3e9c0a99ade00a5bbedae83cb801985aa154f071b"}, - {file = "pydantic_core-2.33.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8d55ab81c57b8ff8548c3e4947f119551253f4e3787a7bbc0b6b3ca47498a9d3"}, - {file = "pydantic_core-2.33.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:c20c462aa4434b33a2661701b861604913f912254e441ab8d78d30485736115a"}, - {file = "pydantic_core-2.33.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:44857c3227d3fb5e753d5fe4a3420d6376fa594b07b621e220cd93703fe21782"}, - {file = "pydantic_core-2.33.2-cp39-cp39-musllinux_1_1_armv7l.whl", hash = "sha256:eb9b459ca4df0e5c87deb59d37377461a538852765293f9e6ee834f0435a93b9"}, - {file = "pydantic_core-2.33.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:9fcd347d2cc5c23b06de6d3b7b8275be558a0c90549495c699e379a80bf8379e"}, - {file = "pydantic_core-2.33.2-cp39-cp39-win32.whl", hash = "sha256:83aa99b1285bc8f038941ddf598501a86f1536789740991d7d8756e34f1e74d9"}, - {file = "pydantic_core-2.33.2-cp39-cp39-win_amd64.whl", hash = "sha256:f481959862f57f29601ccced557cc2e817bce7533ab8e01a797a48b49c9692b3"}, - {file = "pydantic_core-2.33.2-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:5c4aa4e82353f65e548c476b37e64189783aa5384903bfea4f41580f255fddfa"}, - {file = "pydantic_core-2.33.2-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:d946c8bf0d5c24bf4fe333af284c59a19358aa3ec18cb3dc4370080da1e8ad29"}, - {file = "pydantic_core-2.33.2-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:87b31b6846e361ef83fedb187bb5b4372d0da3f7e28d85415efa92d6125d6e6d"}, - {file = "pydantic_core-2.33.2-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:aa9d91b338f2df0508606f7009fde642391425189bba6d8c653afd80fd6bb64e"}, - {file = "pydantic_core-2.33.2-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2058a32994f1fde4ca0480ab9d1e75a0e8c87c22b53a3ae66554f9af78f2fe8c"}, - {file = "pydantic_core-2.33.2-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:0e03262ab796d986f978f79c943fc5f620381be7287148b8010b4097f79a39ec"}, - {file = "pydantic_core-2.33.2-pp310-pypy310_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:1a8695a8d00c73e50bff9dfda4d540b7dee29ff9b8053e38380426a85ef10052"}, - {file = "pydantic_core-2.33.2-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:fa754d1850735a0b0e03bcffd9d4b4343eb417e47196e4485d9cca326073a42c"}, - {file = "pydantic_core-2.33.2-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:a11c8d26a50bfab49002947d3d237abe4d9e4b5bdc8846a63537b6488e197808"}, - {file = "pydantic_core-2.33.2-pp311-pypy311_pp73-macosx_10_12_x86_64.whl", hash = "sha256:dd14041875d09cc0f9308e37a6f8b65f5585cf2598a53aa0123df8b129d481f8"}, - {file = "pydantic_core-2.33.2-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:d87c561733f66531dced0da6e864f44ebf89a8fba55f31407b00c2f7f9449593"}, - {file = "pydantic_core-2.33.2-pp311-pypy311_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2f82865531efd18d6e07a04a17331af02cb7a651583c418df8266f17a63c6612"}, - {file = "pydantic_core-2.33.2-pp311-pypy311_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2bfb5112df54209d820d7bf9317c7a6c9025ea52e49f46b6a2060104bba37de7"}, - {file = "pydantic_core-2.33.2-pp311-pypy311_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:64632ff9d614e5eecfb495796ad51b0ed98c453e447a76bcbeeb69615079fc7e"}, - {file = "pydantic_core-2.33.2-pp311-pypy311_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:f889f7a40498cc077332c7ab6b4608d296d852182211787d4f3ee377aaae66e8"}, - {file = "pydantic_core-2.33.2-pp311-pypy311_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:de4b83bb311557e439b9e186f733f6c645b9417c84e2eb8203f3f820a4b988bf"}, - {file = "pydantic_core-2.33.2-pp311-pypy311_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:82f68293f055f51b51ea42fafc74b6aad03e70e191799430b90c13d643059ebb"}, - {file = "pydantic_core-2.33.2-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:329467cecfb529c925cf2bbd4d60d2c509bc2fb52a20c1045bf09bb70971a9c1"}, - {file = "pydantic_core-2.33.2-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:87acbfcf8e90ca885206e98359d7dca4bcbb35abdc0ff66672a293e1d7a19101"}, - {file = "pydantic_core-2.33.2-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:7f92c15cd1e97d4b12acd1cc9004fa092578acfa57b67ad5e43a197175d01a64"}, - {file = "pydantic_core-2.33.2-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d3f26877a748dc4251cfcfda9dfb5f13fcb034f5308388066bcfe9031b63ae7d"}, - {file = "pydantic_core-2.33.2-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dac89aea9af8cd672fa7b510e7b8c33b0bba9a43186680550ccf23020f32d535"}, - {file = "pydantic_core-2.33.2-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:970919794d126ba8645f3837ab6046fb4e72bbc057b3709144066204c19a455d"}, - {file = "pydantic_core-2.33.2-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:3eb3fe62804e8f859c49ed20a8451342de53ed764150cb14ca71357c765dc2a6"}, - {file = "pydantic_core-2.33.2-pp39-pypy39_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:3abcd9392a36025e3bd55f9bd38d908bd17962cc49bc6da8e7e96285336e2bca"}, - {file = "pydantic_core-2.33.2-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:3a1c81334778f9e3af2f8aeb7a960736e5cab1dfebfb26aabca09afd2906c039"}, - {file = "pydantic_core-2.33.2-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:2807668ba86cb38c6817ad9bc66215ab8584d1d304030ce4f0887336f28a5e27"}, - {file = "pydantic_core-2.33.2.tar.gz", hash = "sha256:7cb8bc3605c29176e1b105350d2e6474142d7c1bd1d9327c4a9bdb46bf827acc"}, -] - -[package.dependencies] -typing-extensions = ">=4.6.0,<4.7.0 || >4.7.0" - -[[package]] -name = "pylint" -version = "3.2.3" -description = "python code static checker" -optional = false -python-versions = ">=3.8.0" -groups = ["dev"] -files = [ - {file = "pylint-3.2.3-py3-none-any.whl", hash = "sha256:b3d7d2708a3e04b4679e02d99e72329a8b7ee8afb8d04110682278781f889fa8"}, - {file = "pylint-3.2.3.tar.gz", hash = "sha256:02f6c562b215582386068d52a30f520d84fdbcf2a95fc7e855b816060d048b60"}, -] - -[package.dependencies] -astroid = ">=3.2.2,<=3.3.0-dev0" -colorama = {version = ">=0.4.5", markers = "sys_platform == \"win32\""} -dill = [ - {version = ">=0.2", markers = "python_version < \"3.11\""}, - {version = ">=0.3.7", markers = "python_version >= \"3.12\""}, - {version = ">=0.3.6", markers = "python_version == \"3.11\""}, -] -isort = ">=4.2.5,<5.13.0 || >5.13.0,<6" -mccabe = ">=0.6,<0.8" -platformdirs = ">=2.2.0" -tomli = {version = ">=1.1.0", markers = "python_version < \"3.11\""} -tomlkit = ">=0.10.1" -typing-extensions = {version = ">=3.10.0", markers = "python_version < \"3.10\""} - -[package.extras] -spelling = ["pyenchant (>=3.2,<4.0)"] -testutils = ["gitpython (>3)"] - -[[package]] -name = "pytest" -version = "8.3.2" -description = "pytest: simple powerful testing with Python" -optional = false -python-versions = ">=3.8" -groups = ["dev"] -files = [ - {file = "pytest-8.3.2-py3-none-any.whl", hash = "sha256:4ba08f9ae7dcf84ded419494d229b48d0903ea6407b030eaec46df5e6a73bba5"}, - {file = "pytest-8.3.2.tar.gz", hash = "sha256:c132345d12ce551242c87269de812483f5bcc87cdbb4722e48487ba194f9fdce"}, -] - -[package.dependencies] -colorama = {version = "*", markers = "sys_platform == \"win32\""} -exceptiongroup = {version = ">=1.0.0rc8", markers = "python_version < \"3.11\""} -iniconfig = "*" -packaging = "*" -pluggy = ">=1.5,<2" -tomli = {version = ">=1", markers = "python_version < \"3.11\""} - -[package.extras] -dev = ["argcomplete", "attrs (>=19.2)", "hypothesis (>=3.56)", "mock", "pygments (>=2.7.2)", "requests", "setuptools", "xmlschema"] - -[[package]] -name = "pytest-asyncio" -version = "0.23.8" -description = "Pytest support for asyncio" -optional = false -python-versions = ">=3.8" -groups = ["dev"] -files = [ - {file = "pytest_asyncio-0.23.8-py3-none-any.whl", hash = "sha256:50265d892689a5faefb84df80819d1ecef566eb3549cf915dfb33569359d1ce2"}, - {file = "pytest_asyncio-0.23.8.tar.gz", hash = "sha256:759b10b33a6dc61cce40a8bd5205e302978bbbcc00e279a8b61d9a6a3c82e4d3"}, -] - -[package.dependencies] -pytest = ">=7.0.0,<9" - -[package.extras] -docs = ["sphinx (>=5.3)", "sphinx-rtd-theme (>=1.0)"] -testing = ["coverage (>=6.2)", "hypothesis (>=5.7.1)"] - -[[package]] -name = "sniffio" -version = "1.3.1" -description = "Sniff out which async library your code is running under" -optional = false -python-versions = ">=3.7" -groups = ["main"] -files = [ - {file = "sniffio-1.3.1-py3-none-any.whl", hash = "sha256:2f6da418d1f1e0fddd844478f41680e794e6051915791a034ff65e5f100525a2"}, - {file = "sniffio-1.3.1.tar.gz", hash = "sha256:f4324edc670a0f49750a81b895f35c3adb843cca46f0530f79fc1babb23789dc"}, -] - -[[package]] -name = "tomli" -version = "2.0.1" -description = "A lil' TOML parser" -optional = false -python-versions = ">=3.7" -groups = ["dev"] -markers = "python_version < \"3.11\"" -files = [ - {file = "tomli-2.0.1-py3-none-any.whl", hash = "sha256:939de3e7a6161af0c887ef91b7d41a53e7c5a1ca976325f429cb46ea9bc30ecc"}, - {file = "tomli-2.0.1.tar.gz", hash = "sha256:de526c12914f0c550d15924c62d72abc48d6fe7364aa87328337a31007fe8a4f"}, -] - -[[package]] -name = "tomlkit" -version = "0.13.0" -description = "Style preserving TOML library" -optional = false -python-versions = ">=3.8" -groups = ["dev"] -files = [ - {file = "tomlkit-0.13.0-py3-none-any.whl", hash = "sha256:7075d3042d03b80f603482d69bf0c8f345c2b30e41699fd8883227f89972b264"}, - {file = "tomlkit-0.13.0.tar.gz", hash = "sha256:08ad192699734149f5b97b45f1f18dad7eb1b6d16bc72ad0c2335772650d7b72"}, -] - -[[package]] -name = "typing-extensions" -version = "4.12.2" -description = "Backported and Experimental Type Hints for Python 3.8+" -optional = false -python-versions = ">=3.8" -groups = ["main", "dev"] -files = [ - {file = "typing_extensions-4.12.2-py3-none-any.whl", hash = "sha256:04e5ca0351e0f3f85c6853954072df659d0d13fac324d0072316b67d7794700d"}, - {file = "typing_extensions-4.12.2.tar.gz", hash = "sha256:1a7ead55c7e559dd4dee8856e3a88b41225abfe1ce8df57b7c13915fe121ffb8"}, -] - -[[package]] -name = "typing-inspection" -version = "0.4.0" -description = "Runtime typing introspection tools" -optional = false -python-versions = ">=3.9" -groups = ["main"] -files = [ - {file = "typing_inspection-0.4.0-py3-none-any.whl", hash = "sha256:50e72559fcd2a6367a19f7a7e610e6afcb9fac940c650290eed893d61386832f"}, - {file = "typing_inspection-0.4.0.tar.gz", hash = "sha256:9765c87de36671694a67904bf2c96e395be9c6439bb6c87b5142569dcdd65122"}, -] - -[package.dependencies] -typing-extensions = ">=4.12.0" - -[metadata] -lock-version = "2.1" -python-versions = ">=3.9.2" -content-hash = "7783570c2127219ca33415a933b1f4b8e43c4b432d4f04679552c25f89596fc1" diff --git a/packages/mistralai_azure/poetry.toml b/packages/mistralai_azure/poetry.toml deleted file mode 100644 index ab1033bd..00000000 --- a/packages/mistralai_azure/poetry.toml +++ /dev/null @@ -1,2 +0,0 @@ -[virtualenvs] -in-project = true diff --git a/packages/mistralai_azure/pyproject.toml b/packages/mistralai_azure/pyproject.toml index 81387b21..59cd5be3 100644 --- a/packages/mistralai_azure/pyproject.toml +++ b/packages/mistralai_azure/pyproject.toml @@ -2,36 +2,45 @@ name = "mistralai_azure" version = "1.6.0" description = "Python Client SDK for the Mistral AI API in Azure." -authors = [{ name = "Mistral" },] -readme = "README.md" +authors = [{ name = "Mistral" }] requires-python = ">=3.9.2" +readme = "README.md" dependencies = [ "httpcore >=1.0.9", "httpx >=0.28.1", "pydantic >=2.11.2", ] -[tool.poetry] -packages = [ - { include = "mistralai_azure", from = "src" } +[dependency-groups] +dev = [ + "mypy==1.15.0", + "pylint==3.2.3", + "pytest>=8.2.2,<9", + "pytest-asyncio>=0.23.7,<0.24", ] -include = ["py.typed", "src/mistralai_azure/py.typed"] [tool.setuptools.package-data] "*" = ["py.typed", "src/mistralai_azure/py.typed"] +[tool.hatch.build.targets.sdist] +include = ["src/mistralai_azure"] + +[tool.hatch.build.targets.sdist.force-include] +"py.typed" = "py.typed" +"src/mistralai_azure/py.typed" = "src/mistralai_azure/py.typed" + +[tool.hatch.build.targets.wheel] +include = ["src/mistralai_azure"] + +[tool.hatch.build.targets.wheel.sources] +"src/mistralai_azure" = "mistralai_azure" + [virtualenvs] in-project = true -[tool.poetry.group.dev.dependencies] -mypy = "==1.15.0" -pylint = "==3.2.3" -pytest = "^8.2.2" -pytest-asyncio = "^0.23.7" - [build-system] -requires = ["poetry-core"] -build-backend = "poetry.core.masonry.api" +requires = ["hatchling"] +build-backend = "hatchling.build" [tool.pytest.ini_options] asyncio_default_fixture_loop_scope = "function" diff --git a/packages/mistralai_azure/scripts/publish.sh b/packages/mistralai_azure/scripts/publish.sh index 1ee7194c..f2f31e59 100755 --- a/packages/mistralai_azure/scripts/publish.sh +++ b/packages/mistralai_azure/scripts/publish.sh @@ -1,5 +1,6 @@ #!/usr/bin/env bash -export POETRY_PYPI_TOKEN_PYPI=${PYPI_TOKEN} +export UV_PUBLISH_TOKEN=${PYPI_TOKEN} -poetry publish --build --skip-existing +uv build +uv publish diff --git a/packages/mistralai_azure/uv.lock b/packages/mistralai_azure/uv.lock new file mode 100644 index 00000000..37c95c55 --- /dev/null +++ b/packages/mistralai_azure/uv.lock @@ -0,0 +1,487 @@ +version = 1 +revision = 3 +requires-python = ">=3.9.2" +resolution-markers = [ + "python_full_version >= '3.12'", + "python_full_version == '3.11.*'", + "python_full_version < '3.11'", +] + +[[package]] +name = "annotated-types" +version = "0.7.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/ee/67/531ea369ba64dcff5ec9c3402f9f51bf748cec26dde048a2f973a4eea7f5/annotated_types-0.7.0.tar.gz", hash = "sha256:aff07c09a53a08bc8cfccb9c85b05f1aa9a2a6f23728d790723543408344ce89", size = 16081, upload-time = "2024-05-20T21:33:25.928Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/78/b6/6307fbef88d9b5ee7421e68d78a9f162e0da4900bc5f5793f6d3d0e34fb8/annotated_types-0.7.0-py3-none-any.whl", hash = "sha256:1f02e8b43a8fbbc3f3e0d4f0f4bfc8131bcb4eebe8849b8e5c773f3a1c582a53", size = 13643, upload-time = "2024-05-20T21:33:24.1Z" }, +] + +[[package]] +name = "anyio" +version = "4.4.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "exceptiongroup", marker = "python_full_version < '3.11'" }, + { name = "idna" }, + { name = "sniffio" }, + { name = "typing-extensions", marker = "python_full_version < '3.11'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/e6/e3/c4c8d473d6780ef1853d630d581f70d655b4f8d7553c6997958c283039a2/anyio-4.4.0.tar.gz", hash = "sha256:5aadc6a1bbb7cdb0bede386cac5e2940f5e2ff3aa20277e991cf028e0585ce94", size = 163930, upload-time = "2024-05-26T22:02:15.75Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/7b/a2/10639a79341f6c019dedc95bd48a4928eed9f1d1197f4c04f546fc7ae0ff/anyio-4.4.0-py3-none-any.whl", hash = "sha256:c1b2d8f46a8a812513012e1107cb0e68c17159a7a594208005a57dc776e1bdc7", size = 86780, upload-time = "2024-05-26T22:02:13.671Z" }, +] + +[[package]] +name = "astroid" +version = "3.2.4" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "typing-extensions", marker = "python_full_version < '3.11'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/9e/53/1067e1113ecaf58312357f2cd93063674924119d80d173adc3f6f2387aa2/astroid-3.2.4.tar.gz", hash = "sha256:0e14202810b30da1b735827f78f5157be2bbd4a7a59b7707ca0bfc2fb4c0063a", size = 397576, upload-time = "2024-07-20T12:57:43.26Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/80/96/b32bbbb46170a1c8b8b1f28c794202e25cfe743565e9d3469b8eb1e0cc05/astroid-3.2.4-py3-none-any.whl", hash = "sha256:413658a61eeca6202a59231abb473f932038fbcbf1666587f66d482083413a25", size = 276348, upload-time = "2024-07-20T12:57:40.886Z" }, +] + +[[package]] +name = "certifi" +version = "2024.7.4" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/c2/02/a95f2b11e207f68bc64d7aae9666fed2e2b3f307748d5123dffb72a1bbea/certifi-2024.7.4.tar.gz", hash = "sha256:5a1e7645bc0ec61a09e26c36f6106dd4cf40c6db3a1fb6352b0244e7fb057c7b", size = 164065, upload-time = "2024-07-04T01:36:11.653Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/1c/d5/c84e1a17bf61d4df64ca866a1c9a913874b4e9bdc131ec689a0ad013fb36/certifi-2024.7.4-py3-none-any.whl", hash = "sha256:c198e21b1289c2ab85ee4e67bb4b4ef3ead0892059901a8d5b622f24a1101e90", size = 162960, upload-time = "2024-07-04T01:36:09.038Z" }, +] + +[[package]] +name = "colorama" +version = "0.4.6" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/d8/53/6f443c9a4a8358a93a6792e2acffb9d9d5cb0a5cfd8802644b7b1c9a02e4/colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44", size = 27697, upload-time = "2022-10-25T02:36:22.414Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/d1/d6/3965ed04c63042e047cb6a3e6ed1a63a35087b6a609aa3a15ed8ac56c221/colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6", size = 25335, upload-time = "2022-10-25T02:36:20.889Z" }, +] + +[[package]] +name = "dill" +version = "0.3.8" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/17/4d/ac7ffa80c69ea1df30a8aa11b3578692a5118e7cd1aa157e3ef73b092d15/dill-0.3.8.tar.gz", hash = "sha256:3ebe3c479ad625c4553aca177444d89b486b1d84982eeacded644afc0cf797ca", size = 184847, upload-time = "2024-01-27T23:42:16.145Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c9/7a/cef76fd8438a42f96db64ddaa85280485a9c395e7df3db8158cfec1eee34/dill-0.3.8-py3-none-any.whl", hash = "sha256:c36ca9ffb54365bdd2f8eb3eff7d2a21237f8452b57ace88b1ac615b7e815bd7", size = 116252, upload-time = "2024-01-27T23:42:14.239Z" }, +] + +[[package]] +name = "exceptiongroup" +version = "1.2.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/09/35/2495c4ac46b980e4ca1f6ad6db102322ef3ad2410b79fdde159a4b0f3b92/exceptiongroup-1.2.2.tar.gz", hash = "sha256:47c2edf7c6738fafb49fd34290706d1a1a2f4d1c6df275526b62cbb4aa5393cc", size = 28883, upload-time = "2024-07-12T22:26:00.161Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/02/cc/b7e31358aac6ed1ef2bb790a9746ac2c69bcb3c8588b41616914eb106eaf/exceptiongroup-1.2.2-py3-none-any.whl", hash = "sha256:3111b9d131c238bec2f8f516e123e14ba243563fb135d3fe885990585aa7795b", size = 16453, upload-time = "2024-07-12T22:25:58.476Z" }, +] + +[[package]] +name = "h11" +version = "0.16.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/01/ee/02a2c011bdab74c6fb3c75474d40b3052059d95df7e73351460c8588d963/h11-0.16.0.tar.gz", hash = "sha256:4e35b956cf45792e4caa5885e69fba00bdbc6ffafbfa020300e549b208ee5ff1", size = 101250, upload-time = "2025-04-24T03:35:25.427Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/04/4b/29cac41a4d98d144bf5f6d33995617b185d14b22401f75ca86f384e87ff1/h11-0.16.0-py3-none-any.whl", hash = "sha256:63cf8bbe7522de3bf65932fda1d9c2772064ffb3dae62d55932da54b31cb6c86", size = 37515, upload-time = "2025-04-24T03:35:24.344Z" }, +] + +[[package]] +name = "httpcore" +version = "1.0.9" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "certifi" }, + { name = "h11" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/06/94/82699a10bca87a5556c9c59b5963f2d039dbd239f25bc2a63907a05a14cb/httpcore-1.0.9.tar.gz", hash = "sha256:6e34463af53fd2ab5d807f399a9b45ea31c3dfa2276f15a2c3f00afff6e176e8", size = 85484, upload-time = "2025-04-24T22:06:22.219Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/7e/f5/f66802a942d491edb555dd61e3a9961140fd64c90bce1eafd741609d334d/httpcore-1.0.9-py3-none-any.whl", hash = "sha256:2d400746a40668fc9dec9810239072b40b4484b640a8c38fd654a024c7a1bf55", size = 78784, upload-time = "2025-04-24T22:06:20.566Z" }, +] + +[[package]] +name = "httpx" +version = "0.28.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "anyio" }, + { name = "certifi" }, + { name = "httpcore" }, + { name = "idna" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/b1/df/48c586a5fe32a0f01324ee087459e112ebb7224f646c0b5023f5e79e9956/httpx-0.28.1.tar.gz", hash = "sha256:75e98c5f16b0f35b567856f597f06ff2270a374470a5c2392242528e3e3e42fc", size = 141406, upload-time = "2024-12-06T15:37:23.222Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/2a/39/e50c7c3a983047577ee07d2a9e53faf5a69493943ec3f6a384bdc792deb2/httpx-0.28.1-py3-none-any.whl", hash = "sha256:d909fcccc110f8c7faf814ca82a9a4d816bc5a6dbfea25d6591d6985b8ba59ad", size = 73517, upload-time = "2024-12-06T15:37:21.509Z" }, +] + +[[package]] +name = "idna" +version = "3.7" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/21/ed/f86a79a07470cb07819390452f178b3bef1d375f2ec021ecfc709fc7cf07/idna-3.7.tar.gz", hash = "sha256:028ff3aadf0609c1fd278d8ea3089299412a7a8b9bd005dd08b9f8285bcb5cfc", size = 189575, upload-time = "2024-04-11T03:34:43.276Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/e5/3e/741d8c82801c347547f8a2a06aa57dbb1992be9e948df2ea0eda2c8b79e8/idna-3.7-py3-none-any.whl", hash = "sha256:82fee1fc78add43492d3a1898bfa6d8a904cc97d8427f683ed8e798d07761aa0", size = 66836, upload-time = "2024-04-11T03:34:41.447Z" }, +] + +[[package]] +name = "iniconfig" +version = "2.0.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/d7/4b/cbd8e699e64a6f16ca3a8220661b5f83792b3017d0f79807cb8708d33913/iniconfig-2.0.0.tar.gz", hash = "sha256:2d91e135bf72d31a410b17c16da610a82cb55f6b0477d1a902134b24a455b8b3", size = 4646, upload-time = "2023-01-07T11:08:11.254Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ef/a6/62565a6e1cf69e10f5727360368e451d4b7f58beeac6173dc9db836a5b46/iniconfig-2.0.0-py3-none-any.whl", hash = "sha256:b6a85871a79d2e3b22d2d1b94ac2824226a63c6b741c88f7ae975f18b6778374", size = 5892, upload-time = "2023-01-07T11:08:09.864Z" }, +] + +[[package]] +name = "isort" +version = "5.13.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/87/f9/c1eb8635a24e87ade2efce21e3ce8cd6b8630bb685ddc9cdaca1349b2eb5/isort-5.13.2.tar.gz", hash = "sha256:48fdfcb9face5d58a4f6dde2e72a1fb8dcaf8ab26f95ab49fab84c2ddefb0109", size = 175303, upload-time = "2023-12-13T20:37:26.124Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/d1/b3/8def84f539e7d2289a02f0524b944b15d7c75dab7628bedf1c4f0992029c/isort-5.13.2-py3-none-any.whl", hash = "sha256:8ca5e72a8d85860d5a3fa69b8745237f2939afe12dbf656afbcb47fe72d947a6", size = 92310, upload-time = "2023-12-13T20:37:23.244Z" }, +] + +[[package]] +name = "mccabe" +version = "0.7.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/e7/ff/0ffefdcac38932a54d2b5eed4e0ba8a408f215002cd178ad1df0f2806ff8/mccabe-0.7.0.tar.gz", hash = "sha256:348e0240c33b60bbdf4e523192ef919f28cb2c3d7d5c7794f74009290f236325", size = 9658, upload-time = "2022-01-24T01:14:51.113Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/27/1a/1f68f9ba0c207934b35b86a8ca3aad8395a3d6dd7921c0686e23853ff5a9/mccabe-0.7.0-py2.py3-none-any.whl", hash = "sha256:6c2d30ab6be0e4a46919781807b4f0d834ebdd6c6e3dca0bda5a15f863427b6e", size = 7350, upload-time = "2022-01-24T01:14:49.62Z" }, +] + +[[package]] +name = "mistralai-azure" +version = "1.6.0" +source = { editable = "." } +dependencies = [ + { name = "httpcore" }, + { name = "httpx" }, + { name = "pydantic" }, +] + +[package.dev-dependencies] +dev = [ + { name = "mypy" }, + { name = "pylint" }, + { name = "pytest" }, + { name = "pytest-asyncio" }, +] + +[package.metadata] +requires-dist = [ + { name = "httpcore", specifier = ">=1.0.9" }, + { name = "httpx", specifier = ">=0.28.1" }, + { name = "pydantic", specifier = ">=2.11.2" }, +] + +[package.metadata.requires-dev] +dev = [ + { name = "mypy", specifier = "==1.15.0" }, + { name = "pylint", specifier = "==3.2.3" }, + { name = "pytest", specifier = ">=8.2.2,<9" }, + { name = "pytest-asyncio", specifier = ">=0.23.7,<0.24" }, +] + +[[package]] +name = "mypy" +version = "1.15.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "mypy-extensions" }, + { name = "tomli", marker = "python_full_version < '3.11'" }, + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/ce/43/d5e49a86afa64bd3839ea0d5b9c7103487007d728e1293f52525d6d5486a/mypy-1.15.0.tar.gz", hash = "sha256:404534629d51d3efea5c800ee7c42b72a6554d6c400e6a79eafe15d11341fd43", size = 3239717, upload-time = "2025-02-05T03:50:34.655Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/68/f8/65a7ce8d0e09b6329ad0c8d40330d100ea343bd4dd04c4f8ae26462d0a17/mypy-1.15.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:979e4e1a006511dacf628e36fadfecbcc0160a8af6ca7dad2f5025529e082c13", size = 10738433, upload-time = "2025-02-05T03:49:29.145Z" }, + { url = "https://files.pythonhosted.org/packages/b4/95/9c0ecb8eacfe048583706249439ff52105b3f552ea9c4024166c03224270/mypy-1.15.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c4bb0e1bd29f7d34efcccd71cf733580191e9a264a2202b0239da95984c5b559", size = 9861472, upload-time = "2025-02-05T03:49:16.986Z" }, + { url = "https://files.pythonhosted.org/packages/84/09/9ec95e982e282e20c0d5407bc65031dfd0f0f8ecc66b69538296e06fcbee/mypy-1.15.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:be68172e9fd9ad8fb876c6389f16d1c1b5f100ffa779f77b1fb2176fcc9ab95b", size = 11611424, upload-time = "2025-02-05T03:49:46.908Z" }, + { url = "https://files.pythonhosted.org/packages/78/13/f7d14e55865036a1e6a0a69580c240f43bc1f37407fe9235c0d4ef25ffb0/mypy-1.15.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c7be1e46525adfa0d97681432ee9fcd61a3964c2446795714699a998d193f1a3", size = 12365450, upload-time = "2025-02-05T03:50:05.89Z" }, + { url = "https://files.pythonhosted.org/packages/48/e1/301a73852d40c241e915ac6d7bcd7fedd47d519246db2d7b86b9d7e7a0cb/mypy-1.15.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:2e2c2e6d3593f6451b18588848e66260ff62ccca522dd231cd4dd59b0160668b", size = 12551765, upload-time = "2025-02-05T03:49:33.56Z" }, + { url = "https://files.pythonhosted.org/packages/77/ba/c37bc323ae5fe7f3f15a28e06ab012cd0b7552886118943e90b15af31195/mypy-1.15.0-cp310-cp310-win_amd64.whl", hash = "sha256:6983aae8b2f653e098edb77f893f7b6aca69f6cffb19b2cc7443f23cce5f4828", size = 9274701, upload-time = "2025-02-05T03:49:38.981Z" }, + { url = "https://files.pythonhosted.org/packages/03/bc/f6339726c627bd7ca1ce0fa56c9ae2d0144604a319e0e339bdadafbbb599/mypy-1.15.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:2922d42e16d6de288022e5ca321cd0618b238cfc5570e0263e5ba0a77dbef56f", size = 10662338, upload-time = "2025-02-05T03:50:17.287Z" }, + { url = "https://files.pythonhosted.org/packages/e2/90/8dcf506ca1a09b0d17555cc00cd69aee402c203911410136cd716559efe7/mypy-1.15.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:2ee2d57e01a7c35de00f4634ba1bbf015185b219e4dc5909e281016df43f5ee5", size = 9787540, upload-time = "2025-02-05T03:49:51.21Z" }, + { url = "https://files.pythonhosted.org/packages/05/05/a10f9479681e5da09ef2f9426f650d7b550d4bafbef683b69aad1ba87457/mypy-1.15.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:973500e0774b85d9689715feeffcc980193086551110fd678ebe1f4342fb7c5e", size = 11538051, upload-time = "2025-02-05T03:50:20.885Z" }, + { url = "https://files.pythonhosted.org/packages/e9/9a/1f7d18b30edd57441a6411fcbc0c6869448d1a4bacbaee60656ac0fc29c8/mypy-1.15.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:5a95fb17c13e29d2d5195869262f8125dfdb5c134dc8d9a9d0aecf7525b10c2c", size = 12286751, upload-time = "2025-02-05T03:49:42.408Z" }, + { url = "https://files.pythonhosted.org/packages/72/af/19ff499b6f1dafcaf56f9881f7a965ac2f474f69f6f618b5175b044299f5/mypy-1.15.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:1905f494bfd7d85a23a88c5d97840888a7bd516545fc5aaedff0267e0bb54e2f", size = 12421783, upload-time = "2025-02-05T03:49:07.707Z" }, + { url = "https://files.pythonhosted.org/packages/96/39/11b57431a1f686c1aed54bf794870efe0f6aeca11aca281a0bd87a5ad42c/mypy-1.15.0-cp311-cp311-win_amd64.whl", hash = "sha256:c9817fa23833ff189db061e6d2eff49b2f3b6ed9856b4a0a73046e41932d744f", size = 9265618, upload-time = "2025-02-05T03:49:54.581Z" }, + { url = "https://files.pythonhosted.org/packages/98/3a/03c74331c5eb8bd025734e04c9840532226775c47a2c39b56a0c8d4f128d/mypy-1.15.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:aea39e0583d05124836ea645f412e88a5c7d0fd77a6d694b60d9b6b2d9f184fd", size = 10793981, upload-time = "2025-02-05T03:50:28.25Z" }, + { url = "https://files.pythonhosted.org/packages/f0/1a/41759b18f2cfd568848a37c89030aeb03534411eef981df621d8fad08a1d/mypy-1.15.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:2f2147ab812b75e5b5499b01ade1f4a81489a147c01585cda36019102538615f", size = 9749175, upload-time = "2025-02-05T03:50:13.411Z" }, + { url = "https://files.pythonhosted.org/packages/12/7e/873481abf1ef112c582db832740f4c11b2bfa510e829d6da29b0ab8c3f9c/mypy-1.15.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ce436f4c6d218a070048ed6a44c0bbb10cd2cc5e272b29e7845f6a2f57ee4464", size = 11455675, upload-time = "2025-02-05T03:50:31.421Z" }, + { url = "https://files.pythonhosted.org/packages/b3/d0/92ae4cde706923a2d3f2d6c39629134063ff64b9dedca9c1388363da072d/mypy-1.15.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:8023ff13985661b50a5928fc7a5ca15f3d1affb41e5f0a9952cb68ef090b31ee", size = 12410020, upload-time = "2025-02-05T03:48:48.705Z" }, + { url = "https://files.pythonhosted.org/packages/46/8b/df49974b337cce35f828ba6fda228152d6db45fed4c86ba56ffe442434fd/mypy-1.15.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:1124a18bc11a6a62887e3e137f37f53fbae476dc36c185d549d4f837a2a6a14e", size = 12498582, upload-time = "2025-02-05T03:49:03.628Z" }, + { url = "https://files.pythonhosted.org/packages/13/50/da5203fcf6c53044a0b699939f31075c45ae8a4cadf538a9069b165c1050/mypy-1.15.0-cp312-cp312-win_amd64.whl", hash = "sha256:171a9ca9a40cd1843abeca0e405bc1940cd9b305eaeea2dda769ba096932bb22", size = 9366614, upload-time = "2025-02-05T03:50:00.313Z" }, + { url = "https://files.pythonhosted.org/packages/6a/9b/fd2e05d6ffff24d912f150b87db9e364fa8282045c875654ce7e32fffa66/mypy-1.15.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:93faf3fdb04768d44bf28693293f3904bbb555d076b781ad2530214ee53e3445", size = 10788592, upload-time = "2025-02-05T03:48:55.789Z" }, + { url = "https://files.pythonhosted.org/packages/74/37/b246d711c28a03ead1fd906bbc7106659aed7c089d55fe40dd58db812628/mypy-1.15.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:811aeccadfb730024c5d3e326b2fbe9249bb7413553f15499a4050f7c30e801d", size = 9753611, upload-time = "2025-02-05T03:48:44.581Z" }, + { url = "https://files.pythonhosted.org/packages/a6/ac/395808a92e10cfdac8003c3de9a2ab6dc7cde6c0d2a4df3df1b815ffd067/mypy-1.15.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:98b7b9b9aedb65fe628c62a6dc57f6d5088ef2dfca37903a7d9ee374d03acca5", size = 11438443, upload-time = "2025-02-05T03:49:25.514Z" }, + { url = "https://files.pythonhosted.org/packages/d2/8b/801aa06445d2de3895f59e476f38f3f8d610ef5d6908245f07d002676cbf/mypy-1.15.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c43a7682e24b4f576d93072216bf56eeff70d9140241f9edec0c104d0c515036", size = 12402541, upload-time = "2025-02-05T03:49:57.623Z" }, + { url = "https://files.pythonhosted.org/packages/c7/67/5a4268782eb77344cc613a4cf23540928e41f018a9a1ec4c6882baf20ab8/mypy-1.15.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:baefc32840a9f00babd83251560e0ae1573e2f9d1b067719479bfb0e987c6357", size = 12494348, upload-time = "2025-02-05T03:48:52.361Z" }, + { url = "https://files.pythonhosted.org/packages/83/3e/57bb447f7bbbfaabf1712d96f9df142624a386d98fb026a761532526057e/mypy-1.15.0-cp313-cp313-win_amd64.whl", hash = "sha256:b9378e2c00146c44793c98b8d5a61039a048e31f429fb0eb546d93f4b000bedf", size = 9373648, upload-time = "2025-02-05T03:49:11.395Z" }, + { url = "https://files.pythonhosted.org/packages/5a/fa/79cf41a55b682794abe71372151dbbf856e3008f6767057229e6649d294a/mypy-1.15.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:e601a7fa172c2131bff456bb3ee08a88360760d0d2f8cbd7a75a65497e2df078", size = 10737129, upload-time = "2025-02-05T03:50:24.509Z" }, + { url = "https://files.pythonhosted.org/packages/d3/33/dd8feb2597d648de29e3da0a8bf4e1afbda472964d2a4a0052203a6f3594/mypy-1.15.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:712e962a6357634fef20412699a3655c610110e01cdaa6180acec7fc9f8513ba", size = 9856335, upload-time = "2025-02-05T03:49:36.398Z" }, + { url = "https://files.pythonhosted.org/packages/e4/b5/74508959c1b06b96674b364ffeb7ae5802646b32929b7701fc6b18447592/mypy-1.15.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:f95579473af29ab73a10bada2f9722856792a36ec5af5399b653aa28360290a5", size = 11611935, upload-time = "2025-02-05T03:49:14.154Z" }, + { url = "https://files.pythonhosted.org/packages/6c/53/da61b9d9973efcd6507183fdad96606996191657fe79701b2c818714d573/mypy-1.15.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:8f8722560a14cde92fdb1e31597760dc35f9f5524cce17836c0d22841830fd5b", size = 12365827, upload-time = "2025-02-05T03:48:59.458Z" }, + { url = "https://files.pythonhosted.org/packages/c1/72/965bd9ee89540c79a25778cc080c7e6ef40aa1eeac4d52cec7eae6eb5228/mypy-1.15.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:1fbb8da62dc352133d7d7ca90ed2fb0e9d42bb1a32724c287d3c76c58cbaa9c2", size = 12541924, upload-time = "2025-02-05T03:50:03.12Z" }, + { url = "https://files.pythonhosted.org/packages/46/d0/f41645c2eb263e6c77ada7d76f894c580c9ddb20d77f0c24d34273a4dab2/mypy-1.15.0-cp39-cp39-win_amd64.whl", hash = "sha256:d10d994b41fb3497719bbf866f227b3489048ea4bbbb5015357db306249f7980", size = 9271176, upload-time = "2025-02-05T03:50:10.86Z" }, + { url = "https://files.pythonhosted.org/packages/09/4e/a7d65c7322c510de2c409ff3828b03354a7c43f5a8ed458a7a131b41c7b9/mypy-1.15.0-py3-none-any.whl", hash = "sha256:5469affef548bd1895d86d3bf10ce2b44e33d86923c29e4d675b3e323437ea3e", size = 2221777, upload-time = "2025-02-05T03:50:08.348Z" }, +] + +[[package]] +name = "mypy-extensions" +version = "1.0.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/98/a4/1ab47638b92648243faf97a5aeb6ea83059cc3624972ab6b8d2316078d3f/mypy_extensions-1.0.0.tar.gz", hash = "sha256:75dbf8955dc00442a438fc4d0666508a9a97b6bd41aa2f0ffe9d2f2725af0782", size = 4433, upload-time = "2023-02-04T12:11:27.157Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/2a/e2/5d3f6ada4297caebe1a2add3b126fe800c96f56dbe5d1988a2cbe0b267aa/mypy_extensions-1.0.0-py3-none-any.whl", hash = "sha256:4392f6c0eb8a5668a69e23d168ffa70f0be9ccfd32b5cc2d26a34ae5b844552d", size = 4695, upload-time = "2023-02-04T12:11:25.002Z" }, +] + +[[package]] +name = "packaging" +version = "24.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/51/65/50db4dda066951078f0a96cf12f4b9ada6e4b811516bf0262c0f4f7064d4/packaging-24.1.tar.gz", hash = "sha256:026ed72c8ed3fcce5bf8950572258698927fd1dbda10a5e981cdf0ac37f4f002", size = 148788, upload-time = "2024-06-09T23:19:24.956Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/08/aa/cc0199a5f0ad350994d660967a8efb233fe0416e4639146c089643407ce6/packaging-24.1-py3-none-any.whl", hash = "sha256:5b8f2217dbdbd2f7f384c41c628544e6d52f2d0f53c6d0c3ea61aa5d1d7ff124", size = 53985, upload-time = "2024-06-09T23:19:21.909Z" }, +] + +[[package]] +name = "platformdirs" +version = "4.2.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/f5/52/0763d1d976d5c262df53ddda8d8d4719eedf9594d046f117c25a27261a19/platformdirs-4.2.2.tar.gz", hash = "sha256:38b7b51f512eed9e84a22788b4bce1de17c0adb134d6becb09836e37d8654cd3", size = 20916, upload-time = "2024-05-15T03:18:23.372Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/68/13/2aa1f0e1364feb2c9ef45302f387ac0bd81484e9c9a4c5688a322fbdfd08/platformdirs-4.2.2-py3-none-any.whl", hash = "sha256:2d7a1657e36a80ea911db832a8a6ece5ee53d8de21edd5cc5879af6530b1bfee", size = 18146, upload-time = "2024-05-15T03:18:21.209Z" }, +] + +[[package]] +name = "pluggy" +version = "1.5.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/96/2d/02d4312c973c6050a18b314a5ad0b3210edb65a906f868e31c111dede4a6/pluggy-1.5.0.tar.gz", hash = "sha256:2cffa88e94fdc978c4c574f15f9e59b7f4201d439195c3715ca9e2486f1d0cf1", size = 67955, upload-time = "2024-04-20T21:34:42.531Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/88/5f/e351af9a41f866ac3f1fac4ca0613908d9a41741cfcf2228f4ad853b697d/pluggy-1.5.0-py3-none-any.whl", hash = "sha256:44e1ad92c8ca002de6377e165f3e0f1be63266ab4d554740532335b9d75ea669", size = 20556, upload-time = "2024-04-20T21:34:40.434Z" }, +] + +[[package]] +name = "pydantic" +version = "2.11.7" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "annotated-types" }, + { name = "pydantic-core" }, + { name = "typing-extensions" }, + { name = "typing-inspection" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/00/dd/4325abf92c39ba8623b5af936ddb36ffcfe0beae70405d456ab1fb2f5b8c/pydantic-2.11.7.tar.gz", hash = "sha256:d989c3c6cb79469287b1569f7447a17848c998458d49ebe294e975b9baf0f0db", size = 788350, upload-time = "2025-06-14T08:33:17.137Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/6a/c0/ec2b1c8712ca690e5d61979dee872603e92b8a32f94cc1b72d53beab008a/pydantic-2.11.7-py3-none-any.whl", hash = "sha256:dde5df002701f6de26248661f6835bbe296a47bf73990135c7d07ce741b9623b", size = 444782, upload-time = "2025-06-14T08:33:14.905Z" }, +] + +[[package]] +name = "pydantic-core" +version = "2.33.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/ad/88/5f2260bdfae97aabf98f1778d43f69574390ad787afb646292a638c923d4/pydantic_core-2.33.2.tar.gz", hash = "sha256:7cb8bc3605c29176e1b105350d2e6474142d7c1bd1d9327c4a9bdb46bf827acc", size = 435195, upload-time = "2025-04-23T18:33:52.104Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/e5/92/b31726561b5dae176c2d2c2dc43a9c5bfba5d32f96f8b4c0a600dd492447/pydantic_core-2.33.2-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:2b3d326aaef0c0399d9afffeb6367d5e26ddc24d351dbc9c636840ac355dc5d8", size = 2028817, upload-time = "2025-04-23T18:30:43.919Z" }, + { url = "https://files.pythonhosted.org/packages/a3/44/3f0b95fafdaca04a483c4e685fe437c6891001bf3ce8b2fded82b9ea3aa1/pydantic_core-2.33.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:0e5b2671f05ba48b94cb90ce55d8bdcaaedb8ba00cc5359f6810fc918713983d", size = 1861357, upload-time = "2025-04-23T18:30:46.372Z" }, + { url = "https://files.pythonhosted.org/packages/30/97/e8f13b55766234caae05372826e8e4b3b96e7b248be3157f53237682e43c/pydantic_core-2.33.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0069c9acc3f3981b9ff4cdfaf088e98d83440a4c7ea1bc07460af3d4dc22e72d", size = 1898011, upload-time = "2025-04-23T18:30:47.591Z" }, + { url = "https://files.pythonhosted.org/packages/9b/a3/99c48cf7bafc991cc3ee66fd544c0aae8dc907b752f1dad2d79b1b5a471f/pydantic_core-2.33.2-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:d53b22f2032c42eaaf025f7c40c2e3b94568ae077a606f006d206a463bc69572", size = 1982730, upload-time = "2025-04-23T18:30:49.328Z" }, + { url = "https://files.pythonhosted.org/packages/de/8e/a5b882ec4307010a840fb8b58bd9bf65d1840c92eae7534c7441709bf54b/pydantic_core-2.33.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:0405262705a123b7ce9f0b92f123334d67b70fd1f20a9372b907ce1080c7ba02", size = 2136178, upload-time = "2025-04-23T18:30:50.907Z" }, + { url = "https://files.pythonhosted.org/packages/e4/bb/71e35fc3ed05af6834e890edb75968e2802fe98778971ab5cba20a162315/pydantic_core-2.33.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4b25d91e288e2c4e0662b8038a28c6a07eaac3e196cfc4ff69de4ea3db992a1b", size = 2736462, upload-time = "2025-04-23T18:30:52.083Z" }, + { url = "https://files.pythonhosted.org/packages/31/0d/c8f7593e6bc7066289bbc366f2235701dcbebcd1ff0ef8e64f6f239fb47d/pydantic_core-2.33.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6bdfe4b3789761f3bcb4b1ddf33355a71079858958e3a552f16d5af19768fef2", size = 2005652, upload-time = "2025-04-23T18:30:53.389Z" }, + { url = "https://files.pythonhosted.org/packages/d2/7a/996d8bd75f3eda405e3dd219ff5ff0a283cd8e34add39d8ef9157e722867/pydantic_core-2.33.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:efec8db3266b76ef9607c2c4c419bdb06bf335ae433b80816089ea7585816f6a", size = 2113306, upload-time = "2025-04-23T18:30:54.661Z" }, + { url = "https://files.pythonhosted.org/packages/ff/84/daf2a6fb2db40ffda6578a7e8c5a6e9c8affb251a05c233ae37098118788/pydantic_core-2.33.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:031c57d67ca86902726e0fae2214ce6770bbe2f710dc33063187a68744a5ecac", size = 2073720, upload-time = "2025-04-23T18:30:56.11Z" }, + { url = "https://files.pythonhosted.org/packages/77/fb/2258da019f4825128445ae79456a5499c032b55849dbd5bed78c95ccf163/pydantic_core-2.33.2-cp310-cp310-musllinux_1_1_armv7l.whl", hash = "sha256:f8de619080e944347f5f20de29a975c2d815d9ddd8be9b9b7268e2e3ef68605a", size = 2244915, upload-time = "2025-04-23T18:30:57.501Z" }, + { url = "https://files.pythonhosted.org/packages/d8/7a/925ff73756031289468326e355b6fa8316960d0d65f8b5d6b3a3e7866de7/pydantic_core-2.33.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:73662edf539e72a9440129f231ed3757faab89630d291b784ca99237fb94db2b", size = 2241884, upload-time = "2025-04-23T18:30:58.867Z" }, + { url = "https://files.pythonhosted.org/packages/0b/b0/249ee6d2646f1cdadcb813805fe76265745c4010cf20a8eba7b0e639d9b2/pydantic_core-2.33.2-cp310-cp310-win32.whl", hash = "sha256:0a39979dcbb70998b0e505fb1556a1d550a0781463ce84ebf915ba293ccb7e22", size = 1910496, upload-time = "2025-04-23T18:31:00.078Z" }, + { url = "https://files.pythonhosted.org/packages/66/ff/172ba8f12a42d4b552917aa65d1f2328990d3ccfc01d5b7c943ec084299f/pydantic_core-2.33.2-cp310-cp310-win_amd64.whl", hash = "sha256:b0379a2b24882fef529ec3b4987cb5d003b9cda32256024e6fe1586ac45fc640", size = 1955019, upload-time = "2025-04-23T18:31:01.335Z" }, + { url = "https://files.pythonhosted.org/packages/3f/8d/71db63483d518cbbf290261a1fc2839d17ff89fce7089e08cad07ccfce67/pydantic_core-2.33.2-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:4c5b0a576fb381edd6d27f0a85915c6daf2f8138dc5c267a57c08a62900758c7", size = 2028584, upload-time = "2025-04-23T18:31:03.106Z" }, + { url = "https://files.pythonhosted.org/packages/24/2f/3cfa7244ae292dd850989f328722d2aef313f74ffc471184dc509e1e4e5a/pydantic_core-2.33.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:e799c050df38a639db758c617ec771fd8fb7a5f8eaaa4b27b101f266b216a246", size = 1855071, upload-time = "2025-04-23T18:31:04.621Z" }, + { url = "https://files.pythonhosted.org/packages/b3/d3/4ae42d33f5e3f50dd467761304be2fa0a9417fbf09735bc2cce003480f2a/pydantic_core-2.33.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dc46a01bf8d62f227d5ecee74178ffc448ff4e5197c756331f71efcc66dc980f", size = 1897823, upload-time = "2025-04-23T18:31:06.377Z" }, + { url = "https://files.pythonhosted.org/packages/f4/f3/aa5976e8352b7695ff808599794b1fba2a9ae2ee954a3426855935799488/pydantic_core-2.33.2-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:a144d4f717285c6d9234a66778059f33a89096dfb9b39117663fd8413d582dcc", size = 1983792, upload-time = "2025-04-23T18:31:07.93Z" }, + { url = "https://files.pythonhosted.org/packages/d5/7a/cda9b5a23c552037717f2b2a5257e9b2bfe45e687386df9591eff7b46d28/pydantic_core-2.33.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:73cf6373c21bc80b2e0dc88444f41ae60b2f070ed02095754eb5a01df12256de", size = 2136338, upload-time = "2025-04-23T18:31:09.283Z" }, + { url = "https://files.pythonhosted.org/packages/2b/9f/b8f9ec8dd1417eb9da784e91e1667d58a2a4a7b7b34cf4af765ef663a7e5/pydantic_core-2.33.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3dc625f4aa79713512d1976fe9f0bc99f706a9dee21dfd1810b4bbbf228d0e8a", size = 2730998, upload-time = "2025-04-23T18:31:11.7Z" }, + { url = "https://files.pythonhosted.org/packages/47/bc/cd720e078576bdb8255d5032c5d63ee5c0bf4b7173dd955185a1d658c456/pydantic_core-2.33.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:881b21b5549499972441da4758d662aeea93f1923f953e9cbaff14b8b9565aef", size = 2003200, upload-time = "2025-04-23T18:31:13.536Z" }, + { url = "https://files.pythonhosted.org/packages/ca/22/3602b895ee2cd29d11a2b349372446ae9727c32e78a94b3d588a40fdf187/pydantic_core-2.33.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:bdc25f3681f7b78572699569514036afe3c243bc3059d3942624e936ec93450e", size = 2113890, upload-time = "2025-04-23T18:31:15.011Z" }, + { url = "https://files.pythonhosted.org/packages/ff/e6/e3c5908c03cf00d629eb38393a98fccc38ee0ce8ecce32f69fc7d7b558a7/pydantic_core-2.33.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:fe5b32187cbc0c862ee201ad66c30cf218e5ed468ec8dc1cf49dec66e160cc4d", size = 2073359, upload-time = "2025-04-23T18:31:16.393Z" }, + { url = "https://files.pythonhosted.org/packages/12/e7/6a36a07c59ebefc8777d1ffdaf5ae71b06b21952582e4b07eba88a421c79/pydantic_core-2.33.2-cp311-cp311-musllinux_1_1_armv7l.whl", hash = "sha256:bc7aee6f634a6f4a95676fcb5d6559a2c2a390330098dba5e5a5f28a2e4ada30", size = 2245883, upload-time = "2025-04-23T18:31:17.892Z" }, + { url = "https://files.pythonhosted.org/packages/16/3f/59b3187aaa6cc0c1e6616e8045b284de2b6a87b027cce2ffcea073adf1d2/pydantic_core-2.33.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:235f45e5dbcccf6bd99f9f472858849f73d11120d76ea8707115415f8e5ebebf", size = 2241074, upload-time = "2025-04-23T18:31:19.205Z" }, + { url = "https://files.pythonhosted.org/packages/e0/ed/55532bb88f674d5d8f67ab121a2a13c385df382de2a1677f30ad385f7438/pydantic_core-2.33.2-cp311-cp311-win32.whl", hash = "sha256:6368900c2d3ef09b69cb0b913f9f8263b03786e5b2a387706c5afb66800efd51", size = 1910538, upload-time = "2025-04-23T18:31:20.541Z" }, + { url = "https://files.pythonhosted.org/packages/fe/1b/25b7cccd4519c0b23c2dd636ad39d381abf113085ce4f7bec2b0dc755eb1/pydantic_core-2.33.2-cp311-cp311-win_amd64.whl", hash = "sha256:1e063337ef9e9820c77acc768546325ebe04ee38b08703244c1309cccc4f1bab", size = 1952909, upload-time = "2025-04-23T18:31:22.371Z" }, + { url = "https://files.pythonhosted.org/packages/49/a9/d809358e49126438055884c4366a1f6227f0f84f635a9014e2deb9b9de54/pydantic_core-2.33.2-cp311-cp311-win_arm64.whl", hash = "sha256:6b99022f1d19bc32a4c2a0d544fc9a76e3be90f0b3f4af413f87d38749300e65", size = 1897786, upload-time = "2025-04-23T18:31:24.161Z" }, + { url = "https://files.pythonhosted.org/packages/18/8a/2b41c97f554ec8c71f2a8a5f85cb56a8b0956addfe8b0efb5b3d77e8bdc3/pydantic_core-2.33.2-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:a7ec89dc587667f22b6a0b6579c249fca9026ce7c333fc142ba42411fa243cdc", size = 2009000, upload-time = "2025-04-23T18:31:25.863Z" }, + { url = "https://files.pythonhosted.org/packages/a1/02/6224312aacb3c8ecbaa959897af57181fb6cf3a3d7917fd44d0f2917e6f2/pydantic_core-2.33.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:3c6db6e52c6d70aa0d00d45cdb9b40f0433b96380071ea80b09277dba021ddf7", size = 1847996, upload-time = "2025-04-23T18:31:27.341Z" }, + { url = "https://files.pythonhosted.org/packages/d6/46/6dcdf084a523dbe0a0be59d054734b86a981726f221f4562aed313dbcb49/pydantic_core-2.33.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4e61206137cbc65e6d5256e1166f88331d3b6238e082d9f74613b9b765fb9025", size = 1880957, upload-time = "2025-04-23T18:31:28.956Z" }, + { url = "https://files.pythonhosted.org/packages/ec/6b/1ec2c03837ac00886ba8160ce041ce4e325b41d06a034adbef11339ae422/pydantic_core-2.33.2-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:eb8c529b2819c37140eb51b914153063d27ed88e3bdc31b71198a198e921e011", size = 1964199, upload-time = "2025-04-23T18:31:31.025Z" }, + { url = "https://files.pythonhosted.org/packages/2d/1d/6bf34d6adb9debd9136bd197ca72642203ce9aaaa85cfcbfcf20f9696e83/pydantic_core-2.33.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c52b02ad8b4e2cf14ca7b3d918f3eb0ee91e63b3167c32591e57c4317e134f8f", size = 2120296, upload-time = "2025-04-23T18:31:32.514Z" }, + { url = "https://files.pythonhosted.org/packages/e0/94/2bd0aaf5a591e974b32a9f7123f16637776c304471a0ab33cf263cf5591a/pydantic_core-2.33.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:96081f1605125ba0855dfda83f6f3df5ec90c61195421ba72223de35ccfb2f88", size = 2676109, upload-time = "2025-04-23T18:31:33.958Z" }, + { url = "https://files.pythonhosted.org/packages/f9/41/4b043778cf9c4285d59742281a769eac371b9e47e35f98ad321349cc5d61/pydantic_core-2.33.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8f57a69461af2a5fa6e6bbd7a5f60d3b7e6cebb687f55106933188e79ad155c1", size = 2002028, upload-time = "2025-04-23T18:31:39.095Z" }, + { url = "https://files.pythonhosted.org/packages/cb/d5/7bb781bf2748ce3d03af04d5c969fa1308880e1dca35a9bd94e1a96a922e/pydantic_core-2.33.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:572c7e6c8bb4774d2ac88929e3d1f12bc45714ae5ee6d9a788a9fb35e60bb04b", size = 2100044, upload-time = "2025-04-23T18:31:41.034Z" }, + { url = "https://files.pythonhosted.org/packages/fe/36/def5e53e1eb0ad896785702a5bbfd25eed546cdcf4087ad285021a90ed53/pydantic_core-2.33.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:db4b41f9bd95fbe5acd76d89920336ba96f03e149097365afe1cb092fceb89a1", size = 2058881, upload-time = "2025-04-23T18:31:42.757Z" }, + { url = "https://files.pythonhosted.org/packages/01/6c/57f8d70b2ee57fc3dc8b9610315949837fa8c11d86927b9bb044f8705419/pydantic_core-2.33.2-cp312-cp312-musllinux_1_1_armv7l.whl", hash = "sha256:fa854f5cf7e33842a892e5c73f45327760bc7bc516339fda888c75ae60edaeb6", size = 2227034, upload-time = "2025-04-23T18:31:44.304Z" }, + { url = "https://files.pythonhosted.org/packages/27/b9/9c17f0396a82b3d5cbea4c24d742083422639e7bb1d5bf600e12cb176a13/pydantic_core-2.33.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:5f483cfb75ff703095c59e365360cb73e00185e01aaea067cd19acffd2ab20ea", size = 2234187, upload-time = "2025-04-23T18:31:45.891Z" }, + { url = "https://files.pythonhosted.org/packages/b0/6a/adf5734ffd52bf86d865093ad70b2ce543415e0e356f6cacabbc0d9ad910/pydantic_core-2.33.2-cp312-cp312-win32.whl", hash = "sha256:9cb1da0f5a471435a7bc7e439b8a728e8b61e59784b2af70d7c169f8dd8ae290", size = 1892628, upload-time = "2025-04-23T18:31:47.819Z" }, + { url = "https://files.pythonhosted.org/packages/43/e4/5479fecb3606c1368d496a825d8411e126133c41224c1e7238be58b87d7e/pydantic_core-2.33.2-cp312-cp312-win_amd64.whl", hash = "sha256:f941635f2a3d96b2973e867144fde513665c87f13fe0e193c158ac51bfaaa7b2", size = 1955866, upload-time = "2025-04-23T18:31:49.635Z" }, + { url = "https://files.pythonhosted.org/packages/0d/24/8b11e8b3e2be9dd82df4b11408a67c61bb4dc4f8e11b5b0fc888b38118b5/pydantic_core-2.33.2-cp312-cp312-win_arm64.whl", hash = "sha256:cca3868ddfaccfbc4bfb1d608e2ccaaebe0ae628e1416aeb9c4d88c001bb45ab", size = 1888894, upload-time = "2025-04-23T18:31:51.609Z" }, + { url = "https://files.pythonhosted.org/packages/46/8c/99040727b41f56616573a28771b1bfa08a3d3fe74d3d513f01251f79f172/pydantic_core-2.33.2-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:1082dd3e2d7109ad8b7da48e1d4710c8d06c253cbc4a27c1cff4fbcaa97a9e3f", size = 2015688, upload-time = "2025-04-23T18:31:53.175Z" }, + { url = "https://files.pythonhosted.org/packages/3a/cc/5999d1eb705a6cefc31f0b4a90e9f7fc400539b1a1030529700cc1b51838/pydantic_core-2.33.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:f517ca031dfc037a9c07e748cefd8d96235088b83b4f4ba8939105d20fa1dcd6", size = 1844808, upload-time = "2025-04-23T18:31:54.79Z" }, + { url = "https://files.pythonhosted.org/packages/6f/5e/a0a7b8885c98889a18b6e376f344da1ef323d270b44edf8174d6bce4d622/pydantic_core-2.33.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0a9f2c9dd19656823cb8250b0724ee9c60a82f3cdf68a080979d13092a3b0fef", size = 1885580, upload-time = "2025-04-23T18:31:57.393Z" }, + { url = "https://files.pythonhosted.org/packages/3b/2a/953581f343c7d11a304581156618c3f592435523dd9d79865903272c256a/pydantic_core-2.33.2-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:2b0a451c263b01acebe51895bfb0e1cc842a5c666efe06cdf13846c7418caa9a", size = 1973859, upload-time = "2025-04-23T18:31:59.065Z" }, + { url = "https://files.pythonhosted.org/packages/e6/55/f1a813904771c03a3f97f676c62cca0c0a4138654107c1b61f19c644868b/pydantic_core-2.33.2-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1ea40a64d23faa25e62a70ad163571c0b342b8bf66d5fa612ac0dec4f069d916", size = 2120810, upload-time = "2025-04-23T18:32:00.78Z" }, + { url = "https://files.pythonhosted.org/packages/aa/c3/053389835a996e18853ba107a63caae0b9deb4a276c6b472931ea9ae6e48/pydantic_core-2.33.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0fb2d542b4d66f9470e8065c5469ec676978d625a8b7a363f07d9a501a9cb36a", size = 2676498, upload-time = "2025-04-23T18:32:02.418Z" }, + { url = "https://files.pythonhosted.org/packages/eb/3c/f4abd740877a35abade05e437245b192f9d0ffb48bbbbd708df33d3cda37/pydantic_core-2.33.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9fdac5d6ffa1b5a83bca06ffe7583f5576555e6c8b3a91fbd25ea7780f825f7d", size = 2000611, upload-time = "2025-04-23T18:32:04.152Z" }, + { url = "https://files.pythonhosted.org/packages/59/a7/63ef2fed1837d1121a894d0ce88439fe3e3b3e48c7543b2a4479eb99c2bd/pydantic_core-2.33.2-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:04a1a413977ab517154eebb2d326da71638271477d6ad87a769102f7c2488c56", size = 2107924, upload-time = "2025-04-23T18:32:06.129Z" }, + { url = "https://files.pythonhosted.org/packages/04/8f/2551964ef045669801675f1cfc3b0d74147f4901c3ffa42be2ddb1f0efc4/pydantic_core-2.33.2-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:c8e7af2f4e0194c22b5b37205bfb293d166a7344a5b0d0eaccebc376546d77d5", size = 2063196, upload-time = "2025-04-23T18:32:08.178Z" }, + { url = "https://files.pythonhosted.org/packages/26/bd/d9602777e77fc6dbb0c7db9ad356e9a985825547dce5ad1d30ee04903918/pydantic_core-2.33.2-cp313-cp313-musllinux_1_1_armv7l.whl", hash = "sha256:5c92edd15cd58b3c2d34873597a1e20f13094f59cf88068adb18947df5455b4e", size = 2236389, upload-time = "2025-04-23T18:32:10.242Z" }, + { url = "https://files.pythonhosted.org/packages/42/db/0e950daa7e2230423ab342ae918a794964b053bec24ba8af013fc7c94846/pydantic_core-2.33.2-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:65132b7b4a1c0beded5e057324b7e16e10910c106d43675d9bd87d4f38dde162", size = 2239223, upload-time = "2025-04-23T18:32:12.382Z" }, + { url = "https://files.pythonhosted.org/packages/58/4d/4f937099c545a8a17eb52cb67fe0447fd9a373b348ccfa9a87f141eeb00f/pydantic_core-2.33.2-cp313-cp313-win32.whl", hash = "sha256:52fb90784e0a242bb96ec53f42196a17278855b0f31ac7c3cc6f5c1ec4811849", size = 1900473, upload-time = "2025-04-23T18:32:14.034Z" }, + { url = "https://files.pythonhosted.org/packages/a0/75/4a0a9bac998d78d889def5e4ef2b065acba8cae8c93696906c3a91f310ca/pydantic_core-2.33.2-cp313-cp313-win_amd64.whl", hash = "sha256:c083a3bdd5a93dfe480f1125926afcdbf2917ae714bdb80b36d34318b2bec5d9", size = 1955269, upload-time = "2025-04-23T18:32:15.783Z" }, + { url = "https://files.pythonhosted.org/packages/f9/86/1beda0576969592f1497b4ce8e7bc8cbdf614c352426271b1b10d5f0aa64/pydantic_core-2.33.2-cp313-cp313-win_arm64.whl", hash = "sha256:e80b087132752f6b3d714f041ccf74403799d3b23a72722ea2e6ba2e892555b9", size = 1893921, upload-time = "2025-04-23T18:32:18.473Z" }, + { url = "https://files.pythonhosted.org/packages/a4/7d/e09391c2eebeab681df2b74bfe6c43422fffede8dc74187b2b0bf6fd7571/pydantic_core-2.33.2-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:61c18fba8e5e9db3ab908620af374db0ac1baa69f0f32df4f61ae23f15e586ac", size = 1806162, upload-time = "2025-04-23T18:32:20.188Z" }, + { url = "https://files.pythonhosted.org/packages/f1/3d/847b6b1fed9f8ed3bb95a9ad04fbd0b212e832d4f0f50ff4d9ee5a9f15cf/pydantic_core-2.33.2-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:95237e53bb015f67b63c91af7518a62a8660376a6a0db19b89acc77a4d6199f5", size = 1981560, upload-time = "2025-04-23T18:32:22.354Z" }, + { url = "https://files.pythonhosted.org/packages/6f/9a/e73262f6c6656262b5fdd723ad90f518f579b7bc8622e43a942eec53c938/pydantic_core-2.33.2-cp313-cp313t-win_amd64.whl", hash = "sha256:c2fc0a768ef76c15ab9238afa6da7f69895bb5d1ee83aeea2e3509af4472d0b9", size = 1935777, upload-time = "2025-04-23T18:32:25.088Z" }, + { url = "https://files.pythonhosted.org/packages/53/ea/bbe9095cdd771987d13c82d104a9c8559ae9aec1e29f139e286fd2e9256e/pydantic_core-2.33.2-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:a2b911a5b90e0374d03813674bf0a5fbbb7741570dcd4b4e85a2e48d17def29d", size = 2028677, upload-time = "2025-04-23T18:32:27.227Z" }, + { url = "https://files.pythonhosted.org/packages/49/1d/4ac5ed228078737d457a609013e8f7edc64adc37b91d619ea965758369e5/pydantic_core-2.33.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:6fa6dfc3e4d1f734a34710f391ae822e0a8eb8559a85c6979e14e65ee6ba2954", size = 1864735, upload-time = "2025-04-23T18:32:29.019Z" }, + { url = "https://files.pythonhosted.org/packages/23/9a/2e70d6388d7cda488ae38f57bc2f7b03ee442fbcf0d75d848304ac7e405b/pydantic_core-2.33.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c54c939ee22dc8e2d545da79fc5381f1c020d6d3141d3bd747eab59164dc89fb", size = 1898467, upload-time = "2025-04-23T18:32:31.119Z" }, + { url = "https://files.pythonhosted.org/packages/ff/2e/1568934feb43370c1ffb78a77f0baaa5a8b6897513e7a91051af707ffdc4/pydantic_core-2.33.2-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:53a57d2ed685940a504248187d5685e49eb5eef0f696853647bf37c418c538f7", size = 1983041, upload-time = "2025-04-23T18:32:33.655Z" }, + { url = "https://files.pythonhosted.org/packages/01/1a/1a1118f38ab64eac2f6269eb8c120ab915be30e387bb561e3af904b12499/pydantic_core-2.33.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:09fb9dd6571aacd023fe6aaca316bd01cf60ab27240d7eb39ebd66a3a15293b4", size = 2136503, upload-time = "2025-04-23T18:32:35.519Z" }, + { url = "https://files.pythonhosted.org/packages/5c/da/44754d1d7ae0f22d6d3ce6c6b1486fc07ac2c524ed8f6eca636e2e1ee49b/pydantic_core-2.33.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0e6116757f7959a712db11f3e9c0a99ade00a5bbedae83cb801985aa154f071b", size = 2736079, upload-time = "2025-04-23T18:32:37.659Z" }, + { url = "https://files.pythonhosted.org/packages/4d/98/f43cd89172220ec5aa86654967b22d862146bc4d736b1350b4c41e7c9c03/pydantic_core-2.33.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8d55ab81c57b8ff8548c3e4947f119551253f4e3787a7bbc0b6b3ca47498a9d3", size = 2006508, upload-time = "2025-04-23T18:32:39.637Z" }, + { url = "https://files.pythonhosted.org/packages/2b/cc/f77e8e242171d2158309f830f7d5d07e0531b756106f36bc18712dc439df/pydantic_core-2.33.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:c20c462aa4434b33a2661701b861604913f912254e441ab8d78d30485736115a", size = 2113693, upload-time = "2025-04-23T18:32:41.818Z" }, + { url = "https://files.pythonhosted.org/packages/54/7a/7be6a7bd43e0a47c147ba7fbf124fe8aaf1200bc587da925509641113b2d/pydantic_core-2.33.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:44857c3227d3fb5e753d5fe4a3420d6376fa594b07b621e220cd93703fe21782", size = 2074224, upload-time = "2025-04-23T18:32:44.033Z" }, + { url = "https://files.pythonhosted.org/packages/2a/07/31cf8fadffbb03be1cb520850e00a8490c0927ec456e8293cafda0726184/pydantic_core-2.33.2-cp39-cp39-musllinux_1_1_armv7l.whl", hash = "sha256:eb9b459ca4df0e5c87deb59d37377461a538852765293f9e6ee834f0435a93b9", size = 2245403, upload-time = "2025-04-23T18:32:45.836Z" }, + { url = "https://files.pythonhosted.org/packages/b6/8d/bbaf4c6721b668d44f01861f297eb01c9b35f612f6b8e14173cb204e6240/pydantic_core-2.33.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:9fcd347d2cc5c23b06de6d3b7b8275be558a0c90549495c699e379a80bf8379e", size = 2242331, upload-time = "2025-04-23T18:32:47.618Z" }, + { url = "https://files.pythonhosted.org/packages/bb/93/3cc157026bca8f5006250e74515119fcaa6d6858aceee8f67ab6dc548c16/pydantic_core-2.33.2-cp39-cp39-win32.whl", hash = "sha256:83aa99b1285bc8f038941ddf598501a86f1536789740991d7d8756e34f1e74d9", size = 1910571, upload-time = "2025-04-23T18:32:49.401Z" }, + { url = "https://files.pythonhosted.org/packages/5b/90/7edc3b2a0d9f0dda8806c04e511a67b0b7a41d2187e2003673a996fb4310/pydantic_core-2.33.2-cp39-cp39-win_amd64.whl", hash = "sha256:f481959862f57f29601ccced557cc2e817bce7533ab8e01a797a48b49c9692b3", size = 1956504, upload-time = "2025-04-23T18:32:51.287Z" }, + { url = "https://files.pythonhosted.org/packages/30/68/373d55e58b7e83ce371691f6eaa7175e3a24b956c44628eb25d7da007917/pydantic_core-2.33.2-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:5c4aa4e82353f65e548c476b37e64189783aa5384903bfea4f41580f255fddfa", size = 2023982, upload-time = "2025-04-23T18:32:53.14Z" }, + { url = "https://files.pythonhosted.org/packages/a4/16/145f54ac08c96a63d8ed6442f9dec17b2773d19920b627b18d4f10a061ea/pydantic_core-2.33.2-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:d946c8bf0d5c24bf4fe333af284c59a19358aa3ec18cb3dc4370080da1e8ad29", size = 1858412, upload-time = "2025-04-23T18:32:55.52Z" }, + { url = "https://files.pythonhosted.org/packages/41/b1/c6dc6c3e2de4516c0bb2c46f6a373b91b5660312342a0cf5826e38ad82fa/pydantic_core-2.33.2-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:87b31b6846e361ef83fedb187bb5b4372d0da3f7e28d85415efa92d6125d6e6d", size = 1892749, upload-time = "2025-04-23T18:32:57.546Z" }, + { url = "https://files.pythonhosted.org/packages/12/73/8cd57e20afba760b21b742106f9dbdfa6697f1570b189c7457a1af4cd8a0/pydantic_core-2.33.2-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:aa9d91b338f2df0508606f7009fde642391425189bba6d8c653afd80fd6bb64e", size = 2067527, upload-time = "2025-04-23T18:32:59.771Z" }, + { url = "https://files.pythonhosted.org/packages/e3/d5/0bb5d988cc019b3cba4a78f2d4b3854427fc47ee8ec8e9eaabf787da239c/pydantic_core-2.33.2-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2058a32994f1fde4ca0480ab9d1e75a0e8c87c22b53a3ae66554f9af78f2fe8c", size = 2108225, upload-time = "2025-04-23T18:33:04.51Z" }, + { url = "https://files.pythonhosted.org/packages/f1/c5/00c02d1571913d496aabf146106ad8239dc132485ee22efe08085084ff7c/pydantic_core-2.33.2-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:0e03262ab796d986f978f79c943fc5f620381be7287148b8010b4097f79a39ec", size = 2069490, upload-time = "2025-04-23T18:33:06.391Z" }, + { url = "https://files.pythonhosted.org/packages/22/a8/dccc38768274d3ed3a59b5d06f59ccb845778687652daa71df0cab4040d7/pydantic_core-2.33.2-pp310-pypy310_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:1a8695a8d00c73e50bff9dfda4d540b7dee29ff9b8053e38380426a85ef10052", size = 2237525, upload-time = "2025-04-23T18:33:08.44Z" }, + { url = "https://files.pythonhosted.org/packages/d4/e7/4f98c0b125dda7cf7ccd14ba936218397b44f50a56dd8c16a3091df116c3/pydantic_core-2.33.2-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:fa754d1850735a0b0e03bcffd9d4b4343eb417e47196e4485d9cca326073a42c", size = 2238446, upload-time = "2025-04-23T18:33:10.313Z" }, + { url = "https://files.pythonhosted.org/packages/ce/91/2ec36480fdb0b783cd9ef6795753c1dea13882f2e68e73bce76ae8c21e6a/pydantic_core-2.33.2-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:a11c8d26a50bfab49002947d3d237abe4d9e4b5bdc8846a63537b6488e197808", size = 2066678, upload-time = "2025-04-23T18:33:12.224Z" }, + { url = "https://files.pythonhosted.org/packages/7b/27/d4ae6487d73948d6f20dddcd94be4ea43e74349b56eba82e9bdee2d7494c/pydantic_core-2.33.2-pp311-pypy311_pp73-macosx_10_12_x86_64.whl", hash = "sha256:dd14041875d09cc0f9308e37a6f8b65f5585cf2598a53aa0123df8b129d481f8", size = 2025200, upload-time = "2025-04-23T18:33:14.199Z" }, + { url = "https://files.pythonhosted.org/packages/f1/b8/b3cb95375f05d33801024079b9392a5ab45267a63400bf1866e7ce0f0de4/pydantic_core-2.33.2-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:d87c561733f66531dced0da6e864f44ebf89a8fba55f31407b00c2f7f9449593", size = 1859123, upload-time = "2025-04-23T18:33:16.555Z" }, + { url = "https://files.pythonhosted.org/packages/05/bc/0d0b5adeda59a261cd30a1235a445bf55c7e46ae44aea28f7bd6ed46e091/pydantic_core-2.33.2-pp311-pypy311_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2f82865531efd18d6e07a04a17331af02cb7a651583c418df8266f17a63c6612", size = 1892852, upload-time = "2025-04-23T18:33:18.513Z" }, + { url = "https://files.pythonhosted.org/packages/3e/11/d37bdebbda2e449cb3f519f6ce950927b56d62f0b84fd9cb9e372a26a3d5/pydantic_core-2.33.2-pp311-pypy311_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2bfb5112df54209d820d7bf9317c7a6c9025ea52e49f46b6a2060104bba37de7", size = 2067484, upload-time = "2025-04-23T18:33:20.475Z" }, + { url = "https://files.pythonhosted.org/packages/8c/55/1f95f0a05ce72ecb02a8a8a1c3be0579bbc29b1d5ab68f1378b7bebc5057/pydantic_core-2.33.2-pp311-pypy311_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:64632ff9d614e5eecfb495796ad51b0ed98c453e447a76bcbeeb69615079fc7e", size = 2108896, upload-time = "2025-04-23T18:33:22.501Z" }, + { url = "https://files.pythonhosted.org/packages/53/89/2b2de6c81fa131f423246a9109d7b2a375e83968ad0800d6e57d0574629b/pydantic_core-2.33.2-pp311-pypy311_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:f889f7a40498cc077332c7ab6b4608d296d852182211787d4f3ee377aaae66e8", size = 2069475, upload-time = "2025-04-23T18:33:24.528Z" }, + { url = "https://files.pythonhosted.org/packages/b8/e9/1f7efbe20d0b2b10f6718944b5d8ece9152390904f29a78e68d4e7961159/pydantic_core-2.33.2-pp311-pypy311_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:de4b83bb311557e439b9e186f733f6c645b9417c84e2eb8203f3f820a4b988bf", size = 2239013, upload-time = "2025-04-23T18:33:26.621Z" }, + { url = "https://files.pythonhosted.org/packages/3c/b2/5309c905a93811524a49b4e031e9851a6b00ff0fb668794472ea7746b448/pydantic_core-2.33.2-pp311-pypy311_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:82f68293f055f51b51ea42fafc74b6aad03e70e191799430b90c13d643059ebb", size = 2238715, upload-time = "2025-04-23T18:33:28.656Z" }, + { url = "https://files.pythonhosted.org/packages/32/56/8a7ca5d2cd2cda1d245d34b1c9a942920a718082ae8e54e5f3e5a58b7add/pydantic_core-2.33.2-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:329467cecfb529c925cf2bbd4d60d2c509bc2fb52a20c1045bf09bb70971a9c1", size = 2066757, upload-time = "2025-04-23T18:33:30.645Z" }, + { url = "https://files.pythonhosted.org/packages/08/98/dbf3fdfabaf81cda5622154fda78ea9965ac467e3239078e0dcd6df159e7/pydantic_core-2.33.2-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:87acbfcf8e90ca885206e98359d7dca4bcbb35abdc0ff66672a293e1d7a19101", size = 2024034, upload-time = "2025-04-23T18:33:32.843Z" }, + { url = "https://files.pythonhosted.org/packages/8d/99/7810aa9256e7f2ccd492590f86b79d370df1e9292f1f80b000b6a75bd2fb/pydantic_core-2.33.2-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:7f92c15cd1e97d4b12acd1cc9004fa092578acfa57b67ad5e43a197175d01a64", size = 1858578, upload-time = "2025-04-23T18:33:34.912Z" }, + { url = "https://files.pythonhosted.org/packages/d8/60/bc06fa9027c7006cc6dd21e48dbf39076dc39d9abbaf718a1604973a9670/pydantic_core-2.33.2-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d3f26877a748dc4251cfcfda9dfb5f13fcb034f5308388066bcfe9031b63ae7d", size = 1892858, upload-time = "2025-04-23T18:33:36.933Z" }, + { url = "https://files.pythonhosted.org/packages/f2/40/9d03997d9518816c68b4dfccb88969756b9146031b61cd37f781c74c9b6a/pydantic_core-2.33.2-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dac89aea9af8cd672fa7b510e7b8c33b0bba9a43186680550ccf23020f32d535", size = 2068498, upload-time = "2025-04-23T18:33:38.997Z" }, + { url = "https://files.pythonhosted.org/packages/d8/62/d490198d05d2d86672dc269f52579cad7261ced64c2df213d5c16e0aecb1/pydantic_core-2.33.2-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:970919794d126ba8645f3837ab6046fb4e72bbc057b3709144066204c19a455d", size = 2108428, upload-time = "2025-04-23T18:33:41.18Z" }, + { url = "https://files.pythonhosted.org/packages/9a/ec/4cd215534fd10b8549015f12ea650a1a973da20ce46430b68fc3185573e8/pydantic_core-2.33.2-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:3eb3fe62804e8f859c49ed20a8451342de53ed764150cb14ca71357c765dc2a6", size = 2069854, upload-time = "2025-04-23T18:33:43.446Z" }, + { url = "https://files.pythonhosted.org/packages/1a/1a/abbd63d47e1d9b0d632fee6bb15785d0889c8a6e0a6c3b5a8e28ac1ec5d2/pydantic_core-2.33.2-pp39-pypy39_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:3abcd9392a36025e3bd55f9bd38d908bd17962cc49bc6da8e7e96285336e2bca", size = 2237859, upload-time = "2025-04-23T18:33:45.56Z" }, + { url = "https://files.pythonhosted.org/packages/80/1c/fa883643429908b1c90598fd2642af8839efd1d835b65af1f75fba4d94fe/pydantic_core-2.33.2-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:3a1c81334778f9e3af2f8aeb7a960736e5cab1dfebfb26aabca09afd2906c039", size = 2239059, upload-time = "2025-04-23T18:33:47.735Z" }, + { url = "https://files.pythonhosted.org/packages/d4/29/3cade8a924a61f60ccfa10842f75eb12787e1440e2b8660ceffeb26685e7/pydantic_core-2.33.2-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:2807668ba86cb38c6817ad9bc66215ab8584d1d304030ce4f0887336f28a5e27", size = 2066661, upload-time = "2025-04-23T18:33:49.995Z" }, +] + +[[package]] +name = "pylint" +version = "3.2.3" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "astroid" }, + { name = "colorama", marker = "sys_platform == 'win32'" }, + { name = "dill" }, + { name = "isort" }, + { name = "mccabe" }, + { name = "platformdirs" }, + { name = "tomli", marker = "python_full_version < '3.11'" }, + { name = "tomlkit" }, + { name = "typing-extensions", marker = "python_full_version < '3.10'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/9a/e9/60280b14cc1012794120345ce378504cf17409e38cd88f455dc24e0ad6b5/pylint-3.2.3.tar.gz", hash = "sha256:02f6c562b215582386068d52a30f520d84fdbcf2a95fc7e855b816060d048b60", size = 1506739, upload-time = "2024-06-06T14:19:17.955Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/50/d3/d346f779cbc9384d8b805a7557b5f2b8ee9f842bffebec9fc6364d6ae183/pylint-3.2.3-py3-none-any.whl", hash = "sha256:b3d7d2708a3e04b4679e02d99e72329a8b7ee8afb8d04110682278781f889fa8", size = 519244, upload-time = "2024-06-06T14:19:13.228Z" }, +] + +[[package]] +name = "pytest" +version = "8.3.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "colorama", marker = "sys_platform == 'win32'" }, + { name = "exceptiongroup", marker = "python_full_version < '3.11'" }, + { name = "iniconfig" }, + { name = "packaging" }, + { name = "pluggy" }, + { name = "tomli", marker = "python_full_version < '3.11'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/b4/8c/9862305bdcd6020bc7b45b1b5e7397a6caf1a33d3025b9a003b39075ffb2/pytest-8.3.2.tar.gz", hash = "sha256:c132345d12ce551242c87269de812483f5bcc87cdbb4722e48487ba194f9fdce", size = 1439314, upload-time = "2024-07-25T10:40:00.159Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/0f/f9/cf155cf32ca7d6fa3601bc4c5dd19086af4b320b706919d48a4c79081cf9/pytest-8.3.2-py3-none-any.whl", hash = "sha256:4ba08f9ae7dcf84ded419494d229b48d0903ea6407b030eaec46df5e6a73bba5", size = 341802, upload-time = "2024-07-25T10:39:57.834Z" }, +] + +[[package]] +name = "pytest-asyncio" +version = "0.23.8" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "pytest" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/de/b4/0b378b7bf26a8ae161c3890c0b48a91a04106c5713ce81b4b080ea2f4f18/pytest_asyncio-0.23.8.tar.gz", hash = "sha256:759b10b33a6dc61cce40a8bd5205e302978bbbcc00e279a8b61d9a6a3c82e4d3", size = 46920, upload-time = "2024-07-17T17:39:34.617Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ee/82/62e2d63639ecb0fbe8a7ee59ef0bc69a4669ec50f6d3459f74ad4e4189a2/pytest_asyncio-0.23.8-py3-none-any.whl", hash = "sha256:50265d892689a5faefb84df80819d1ecef566eb3549cf915dfb33569359d1ce2", size = 17663, upload-time = "2024-07-17T17:39:32.478Z" }, +] + +[[package]] +name = "sniffio" +version = "1.3.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/a2/87/a6771e1546d97e7e041b6ae58d80074f81b7d5121207425c964ddf5cfdbd/sniffio-1.3.1.tar.gz", hash = "sha256:f4324edc670a0f49750a81b895f35c3adb843cca46f0530f79fc1babb23789dc", size = 20372, upload-time = "2024-02-25T23:20:04.057Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/e9/44/75a9c9421471a6c4805dbf2356f7c181a29c1879239abab1ea2cc8f38b40/sniffio-1.3.1-py3-none-any.whl", hash = "sha256:2f6da418d1f1e0fddd844478f41680e794e6051915791a034ff65e5f100525a2", size = 10235, upload-time = "2024-02-25T23:20:01.196Z" }, +] + +[[package]] +name = "tomli" +version = "2.0.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/c0/3f/d7af728f075fb08564c5949a9c95e44352e23dee646869fa104a3b2060a3/tomli-2.0.1.tar.gz", hash = "sha256:de526c12914f0c550d15924c62d72abc48d6fe7364aa87328337a31007fe8a4f", size = 15164, upload-time = "2022-02-08T10:54:04.006Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/97/75/10a9ebee3fd790d20926a90a2547f0bf78f371b2f13aa822c759680ca7b9/tomli-2.0.1-py3-none-any.whl", hash = "sha256:939de3e7a6161af0c887ef91b7d41a53e7c5a1ca976325f429cb46ea9bc30ecc", size = 12757, upload-time = "2022-02-08T10:54:02.017Z" }, +] + +[[package]] +name = "tomlkit" +version = "0.13.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/4b/34/f5f4fbc6b329c948a90468dd423aaa3c3bfc1e07d5a76deec269110f2f6e/tomlkit-0.13.0.tar.gz", hash = "sha256:08ad192699734149f5b97b45f1f18dad7eb1b6d16bc72ad0c2335772650d7b72", size = 191792, upload-time = "2024-07-10T09:25:56.381Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/fd/7c/b753bf603852cab0a660da6e81f4ea5d2ca0f0b2b4870766d7aa9bceb7a2/tomlkit-0.13.0-py3-none-any.whl", hash = "sha256:7075d3042d03b80f603482d69bf0c8f345c2b30e41699fd8883227f89972b264", size = 37770, upload-time = "2024-07-10T09:25:54.676Z" }, +] + +[[package]] +name = "typing-extensions" +version = "4.12.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/df/db/f35a00659bc03fec321ba8bce9420de607a1d37f8342eee1863174c69557/typing_extensions-4.12.2.tar.gz", hash = "sha256:1a7ead55c7e559dd4dee8856e3a88b41225abfe1ce8df57b7c13915fe121ffb8", size = 85321, upload-time = "2024-06-07T18:52:15.995Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/26/9f/ad63fc0248c5379346306f8668cda6e2e2e9c95e01216d2b8ffd9ff037d0/typing_extensions-4.12.2-py3-none-any.whl", hash = "sha256:04e5ca0351e0f3f85c6853954072df659d0d13fac324d0072316b67d7794700d", size = 37438, upload-time = "2024-06-07T18:52:13.582Z" }, +] + +[[package]] +name = "typing-inspection" +version = "0.4.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/82/5c/e6082df02e215b846b4b8c0b887a64d7d08ffaba30605502639d44c06b82/typing_inspection-0.4.0.tar.gz", hash = "sha256:9765c87de36671694a67904bf2c96e395be9c6439bb6c87b5142569dcdd65122", size = 76222, upload-time = "2025-02-25T17:27:59.638Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/31/08/aa4fdfb71f7de5176385bd9e90852eaf6b5d622735020ad600f2bab54385/typing_inspection-0.4.0-py3-none-any.whl", hash = "sha256:50e72559fcd2a6367a19f7a7e610e6afcb9fac940c650290eed893d61386832f", size = 14125, upload-time = "2025-02-25T17:27:57.754Z" }, +] diff --git a/packages/mistralai_gcp/poetry.lock b/packages/mistralai_gcp/poetry.lock deleted file mode 100644 index 5f710a98..00000000 --- a/packages/mistralai_gcp/poetry.lock +++ /dev/null @@ -1,900 +0,0 @@ -# This file is automatically @generated by Poetry 2.1.1 and should not be changed by hand. - -[[package]] -name = "annotated-types" -version = "0.7.0" -description = "Reusable constraint types to use with typing.Annotated" -optional = false -python-versions = ">=3.8" -groups = ["main"] -files = [ - {file = "annotated_types-0.7.0-py3-none-any.whl", hash = "sha256:1f02e8b43a8fbbc3f3e0d4f0f4bfc8131bcb4eebe8849b8e5c773f3a1c582a53"}, - {file = "annotated_types-0.7.0.tar.gz", hash = "sha256:aff07c09a53a08bc8cfccb9c85b05f1aa9a2a6f23728d790723543408344ce89"}, -] - -[[package]] -name = "anyio" -version = "4.4.0" -description = "High level compatibility layer for multiple asynchronous event loop implementations" -optional = false -python-versions = ">=3.8" -groups = ["main"] -files = [ - {file = "anyio-4.4.0-py3-none-any.whl", hash = "sha256:c1b2d8f46a8a812513012e1107cb0e68c17159a7a594208005a57dc776e1bdc7"}, - {file = "anyio-4.4.0.tar.gz", hash = "sha256:5aadc6a1bbb7cdb0bede386cac5e2940f5e2ff3aa20277e991cf028e0585ce94"}, -] - -[package.dependencies] -exceptiongroup = {version = ">=1.0.2", markers = "python_version < \"3.11\""} -idna = ">=2.8" -sniffio = ">=1.1" -typing-extensions = {version = ">=4.1", markers = "python_version < \"3.11\""} - -[package.extras] -doc = ["Sphinx (>=7)", "packaging", "sphinx-autodoc-typehints (>=1.2.0)", "sphinx-rtd-theme"] -test = ["anyio[trio]", "coverage[toml] (>=7)", "exceptiongroup (>=1.2.0)", "hypothesis (>=4.0)", "psutil (>=5.9)", "pytest (>=7.0)", "pytest-mock (>=3.6.1)", "trustme", "uvloop (>=0.17) ; platform_python_implementation == \"CPython\" and platform_system != \"Windows\""] -trio = ["trio (>=0.23)"] - -[[package]] -name = "astroid" -version = "3.2.4" -description = "An abstract syntax tree for Python with inference support." -optional = false -python-versions = ">=3.8.0" -groups = ["dev"] -files = [ - {file = "astroid-3.2.4-py3-none-any.whl", hash = "sha256:413658a61eeca6202a59231abb473f932038fbcbf1666587f66d482083413a25"}, - {file = "astroid-3.2.4.tar.gz", hash = "sha256:0e14202810b30da1b735827f78f5157be2bbd4a7a59b7707ca0bfc2fb4c0063a"}, -] - -[package.dependencies] -typing-extensions = {version = ">=4.0.0", markers = "python_version < \"3.11\""} - -[[package]] -name = "cachetools" -version = "5.4.0" -description = "Extensible memoizing collections and decorators" -optional = false -python-versions = ">=3.7" -groups = ["main"] -files = [ - {file = "cachetools-5.4.0-py3-none-any.whl", hash = "sha256:3ae3b49a3d5e28a77a0be2b37dbcb89005058959cb2323858c2657c4a8cab474"}, - {file = "cachetools-5.4.0.tar.gz", hash = "sha256:b8adc2e7c07f105ced7bc56dbb6dfbe7c4a00acce20e2227b3f355be89bc6827"}, -] - -[[package]] -name = "certifi" -version = "2024.7.4" -description = "Python package for providing Mozilla's CA Bundle." -optional = false -python-versions = ">=3.6" -groups = ["main"] -files = [ - {file = "certifi-2024.7.4-py3-none-any.whl", hash = "sha256:c198e21b1289c2ab85ee4e67bb4b4ef3ead0892059901a8d5b622f24a1101e90"}, - {file = "certifi-2024.7.4.tar.gz", hash = "sha256:5a1e7645bc0ec61a09e26c36f6106dd4cf40c6db3a1fb6352b0244e7fb057c7b"}, -] - -[[package]] -name = "charset-normalizer" -version = "3.3.2" -description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet." -optional = false -python-versions = ">=3.7.0" -groups = ["main"] -files = [ - {file = "charset-normalizer-3.3.2.tar.gz", hash = "sha256:f30c3cb33b24454a82faecaf01b19c18562b1e89558fb6c56de4d9118a032fd5"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:25baf083bf6f6b341f4121c2f3c548875ee6f5339300e08be3f2b2ba1721cdd3"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:06435b539f889b1f6f4ac1758871aae42dc3a8c0e24ac9e60c2384973ad73027"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9063e24fdb1e498ab71cb7419e24622516c4a04476b17a2dab57e8baa30d6e03"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6897af51655e3691ff853668779c7bad41579facacf5fd7253b0133308cf000d"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1d3193f4a680c64b4b6a9115943538edb896edc190f0b222e73761716519268e"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:cd70574b12bb8a4d2aaa0094515df2463cb429d8536cfb6c7ce983246983e5a6"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8465322196c8b4d7ab6d1e049e4c5cb460d0394da4a27d23cc242fbf0034b6b5"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a9a8e9031d613fd2009c182b69c7b2c1ef8239a0efb1df3f7c8da66d5dd3d537"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:beb58fe5cdb101e3a055192ac291b7a21e3b7ef4f67fa1d74e331a7f2124341c"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:e06ed3eb3218bc64786f7db41917d4e686cc4856944f53d5bdf83a6884432e12"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:2e81c7b9c8979ce92ed306c249d46894776a909505d8f5a4ba55b14206e3222f"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:572c3763a264ba47b3cf708a44ce965d98555f618ca42c926a9c1616d8f34269"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:fd1abc0d89e30cc4e02e4064dc67fcc51bd941eb395c502aac3ec19fab46b519"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-win32.whl", hash = "sha256:3d47fa203a7bd9c5b6cee4736ee84ca03b8ef23193c0d1ca99b5089f72645c73"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-win_amd64.whl", hash = "sha256:10955842570876604d404661fbccbc9c7e684caf432c09c715ec38fbae45ae09"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:802fe99cca7457642125a8a88a084cef28ff0cf9407060f7b93dca5aa25480db"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:573f6eac48f4769d667c4442081b1794f52919e7edada77495aaed9236d13a96"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:549a3a73da901d5bc3ce8d24e0600d1fa85524c10287f6004fbab87672bf3e1e"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f27273b60488abe721a075bcca6d7f3964f9f6f067c8c4c605743023d7d3944f"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1ceae2f17a9c33cb48e3263960dc5fc8005351ee19db217e9b1bb15d28c02574"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:65f6f63034100ead094b8744b3b97965785388f308a64cf8d7c34f2f2e5be0c4"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:753f10e867343b4511128c6ed8c82f7bec3bd026875576dfd88483c5c73b2fd8"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4a78b2b446bd7c934f5dcedc588903fb2f5eec172f3d29e52a9096a43722adfc"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:e537484df0d8f426ce2afb2d0f8e1c3d0b114b83f8850e5f2fbea0e797bd82ae"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:eb6904c354526e758fda7167b33005998fb68c46fbc10e013ca97f21ca5c8887"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:deb6be0ac38ece9ba87dea880e438f25ca3eddfac8b002a2ec3d9183a454e8ae"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:4ab2fe47fae9e0f9dee8c04187ce5d09f48eabe611be8259444906793ab7cbce"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:80402cd6ee291dcb72644d6eac93785fe2c8b9cb30893c1af5b8fdd753b9d40f"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-win32.whl", hash = "sha256:7cd13a2e3ddeed6913a65e66e94b51d80a041145a026c27e6bb76c31a853c6ab"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-win_amd64.whl", hash = "sha256:663946639d296df6a2bb2aa51b60a2454ca1cb29835324c640dafb5ff2131a77"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:0b2b64d2bb6d3fb9112bafa732def486049e63de9618b5843bcdd081d8144cd8"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:ddbb2551d7e0102e7252db79ba445cdab71b26640817ab1e3e3648dad515003b"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:55086ee1064215781fff39a1af09518bc9255b50d6333f2e4c74ca09fac6a8f6"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8f4a014bc36d3c57402e2977dada34f9c12300af536839dc38c0beab8878f38a"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a10af20b82360ab00827f916a6058451b723b4e65030c5a18577c8b2de5b3389"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8d756e44e94489e49571086ef83b2bb8ce311e730092d2c34ca8f7d925cb20aa"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:90d558489962fd4918143277a773316e56c72da56ec7aa3dc3dbbe20fdfed15b"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6ac7ffc7ad6d040517be39eb591cac5ff87416c2537df6ba3cba3bae290c0fed"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:7ed9e526742851e8d5cc9e6cf41427dfc6068d4f5a3bb03659444b4cabf6bc26"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:8bdb58ff7ba23002a4c5808d608e4e6c687175724f54a5dade5fa8c67b604e4d"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_ppc64le.whl", hash = "sha256:6b3251890fff30ee142c44144871185dbe13b11bab478a88887a639655be1068"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_s390x.whl", hash = "sha256:b4a23f61ce87adf89be746c8a8974fe1c823c891d8f86eb218bb957c924bb143"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:efcb3f6676480691518c177e3b465bcddf57cea040302f9f4e6e191af91174d4"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-win32.whl", hash = "sha256:d965bba47ddeec8cd560687584e88cf699fd28f192ceb452d1d7ee807c5597b7"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-win_amd64.whl", hash = "sha256:96b02a3dc4381e5494fad39be677abcb5e6634bf7b4fa83a6dd3112607547001"}, - {file = "charset_normalizer-3.3.2-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:95f2a5796329323b8f0512e09dbb7a1860c46a39da62ecb2324f116fa8fdc85c"}, - {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c002b4ffc0be611f0d9da932eb0f704fe2602a9a949d1f738e4c34c75b0863d5"}, - {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a981a536974bbc7a512cf44ed14938cf01030a99e9b3a06dd59578882f06f985"}, - {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3287761bc4ee9e33561a7e058c72ac0938c4f57fe49a09eae428fd88aafe7bb6"}, - {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:42cb296636fcc8b0644486d15c12376cb9fa75443e00fb25de0b8602e64c1714"}, - {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0a55554a2fa0d408816b3b5cedf0045f4b8e1a6065aec45849de2d6f3f8e9786"}, - {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:c083af607d2515612056a31f0a8d9e0fcb5876b7bfc0abad3ecd275bc4ebc2d5"}, - {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:87d1351268731db79e0f8e745d92493ee2841c974128ef629dc518b937d9194c"}, - {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:bd8f7df7d12c2db9fab40bdd87a7c09b1530128315d047a086fa3ae3435cb3a8"}, - {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:c180f51afb394e165eafe4ac2936a14bee3eb10debc9d9e4db8958fe36afe711"}, - {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:8c622a5fe39a48f78944a87d4fb8a53ee07344641b0562c540d840748571b811"}, - {file = "charset_normalizer-3.3.2-cp37-cp37m-win32.whl", hash = "sha256:db364eca23f876da6f9e16c9da0df51aa4f104a972735574842618b8c6d999d4"}, - {file = "charset_normalizer-3.3.2-cp37-cp37m-win_amd64.whl", hash = "sha256:86216b5cee4b06df986d214f664305142d9c76df9b6512be2738aa72a2048f99"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:6463effa3186ea09411d50efc7d85360b38d5f09b870c48e4600f63af490e56a"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:6c4caeef8fa63d06bd437cd4bdcf3ffefe6738fb1b25951440d80dc7df8c03ac"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:37e55c8e51c236f95b033f6fb391d7d7970ba5fe7ff453dad675e88cf303377a"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fb69256e180cb6c8a894fee62b3afebae785babc1ee98b81cdf68bbca1987f33"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ae5f4161f18c61806f411a13b0310bea87f987c7d2ecdbdaad0e94eb2e404238"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b2b0a0c0517616b6869869f8c581d4eb2dd83a4d79e0ebcb7d373ef9956aeb0a"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:45485e01ff4d3630ec0d9617310448a8702f70e9c01906b0d0118bdf9d124cf2"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:eb00ed941194665c332bf8e078baf037d6c35d7c4f3102ea2d4f16ca94a26dc8"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:2127566c664442652f024c837091890cb1942c30937add288223dc895793f898"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:a50aebfa173e157099939b17f18600f72f84eed3049e743b68ad15bd69b6bf99"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:4d0d1650369165a14e14e1e47b372cfcb31d6ab44e6e33cb2d4e57265290044d"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:923c0c831b7cfcb071580d3f46c4baf50f174be571576556269530f4bbd79d04"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:06a81e93cd441c56a9b65d8e1d043daeb97a3d0856d177d5c90ba85acb3db087"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-win32.whl", hash = "sha256:6ef1d82a3af9d3eecdba2321dc1b3c238245d890843e040e41e470ffa64c3e25"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-win_amd64.whl", hash = "sha256:eb8821e09e916165e160797a6c17edda0679379a4be5c716c260e836e122f54b"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:c235ebd9baae02f1b77bcea61bce332cb4331dc3617d254df3323aa01ab47bd4"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:5b4c145409bef602a690e7cfad0a15a55c13320ff7a3ad7ca59c13bb8ba4d45d"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:68d1f8a9e9e37c1223b656399be5d6b448dea850bed7d0f87a8311f1ff3dabb0"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:22afcb9f253dac0696b5a4be4a1c0f8762f8239e21b99680099abd9b2b1b2269"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e27ad930a842b4c5eb8ac0016b0a54f5aebbe679340c26101df33424142c143c"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1f79682fbe303db92bc2b1136016a38a42e835d932bab5b3b1bfcfbf0640e519"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b261ccdec7821281dade748d088bb6e9b69e6d15b30652b74cbbac25e280b796"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:122c7fa62b130ed55f8f285bfd56d5f4b4a5b503609d181f9ad85e55c89f4185"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:d0eccceffcb53201b5bfebb52600a5fb483a20b61da9dbc885f8b103cbe7598c"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:9f96df6923e21816da7e0ad3fd47dd8f94b2a5ce594e00677c0013018b813458"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:7f04c839ed0b6b98b1a7501a002144b76c18fb1c1850c8b98d458ac269e26ed2"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:34d1c8da1e78d2e001f363791c98a272bb734000fcef47a491c1e3b0505657a8"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:ff8fa367d09b717b2a17a052544193ad76cd49979c805768879cb63d9ca50561"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-win32.whl", hash = "sha256:aed38f6e4fb3f5d6bf81bfa990a07806be9d83cf7bacef998ab1a9bd660a581f"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-win_amd64.whl", hash = "sha256:b01b88d45a6fcb69667cd6d2f7a9aeb4bf53760d7fc536bf679ec94fe9f3ff3d"}, - {file = "charset_normalizer-3.3.2-py3-none-any.whl", hash = "sha256:3e4d1f6587322d2788836a99c69062fbb091331ec940e02d12d179c1d53e25fc"}, -] - -[[package]] -name = "colorama" -version = "0.4.6" -description = "Cross-platform colored terminal text." -optional = false -python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7" -groups = ["dev"] -markers = "sys_platform == \"win32\"" -files = [ - {file = "colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6"}, - {file = "colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44"}, -] - -[[package]] -name = "dill" -version = "0.3.8" -description = "serialize all of Python" -optional = false -python-versions = ">=3.8" -groups = ["dev"] -files = [ - {file = "dill-0.3.8-py3-none-any.whl", hash = "sha256:c36ca9ffb54365bdd2f8eb3eff7d2a21237f8452b57ace88b1ac615b7e815bd7"}, - {file = "dill-0.3.8.tar.gz", hash = "sha256:3ebe3c479ad625c4553aca177444d89b486b1d84982eeacded644afc0cf797ca"}, -] - -[package.extras] -graph = ["objgraph (>=1.7.2)"] -profile = ["gprof2dot (>=2022.7.29)"] - -[[package]] -name = "eval-type-backport" -version = "0.2.0" -description = "Like `typing._eval_type`, but lets older Python versions use newer typing features." -optional = false -python-versions = ">=3.8" -groups = ["main"] -files = [ - {file = "eval_type_backport-0.2.0-py3-none-any.whl", hash = "sha256:ac2f73d30d40c5a30a80b8739a789d6bb5e49fdffa66d7912667e2015d9c9933"}, - {file = "eval_type_backport-0.2.0.tar.gz", hash = "sha256:68796cfbc7371ebf923f03bdf7bef415f3ec098aeced24e054b253a0e78f7b37"}, -] - -[package.extras] -tests = ["pytest"] - -[[package]] -name = "exceptiongroup" -version = "1.2.2" -description = "Backport of PEP 654 (exception groups)" -optional = false -python-versions = ">=3.7" -groups = ["main", "dev"] -markers = "python_version < \"3.11\"" -files = [ - {file = "exceptiongroup-1.2.2-py3-none-any.whl", hash = "sha256:3111b9d131c238bec2f8f516e123e14ba243563fb135d3fe885990585aa7795b"}, - {file = "exceptiongroup-1.2.2.tar.gz", hash = "sha256:47c2edf7c6738fafb49fd34290706d1a1a2f4d1c6df275526b62cbb4aa5393cc"}, -] - -[package.extras] -test = ["pytest (>=6)"] - -[[package]] -name = "google-auth" -version = "2.38.0" -description = "Google Authentication Library" -optional = false -python-versions = ">=3.7" -groups = ["main"] -files = [ - {file = "google_auth-2.38.0-py2.py3-none-any.whl", hash = "sha256:e7dae6694313f434a2727bf2906f27ad259bae090d7aa896590d86feec3d9d4a"}, - {file = "google_auth-2.38.0.tar.gz", hash = "sha256:8285113607d3b80a3f1543b75962447ba8a09fe85783432a784fdeef6ac094c4"}, -] - -[package.dependencies] -cachetools = ">=2.0.0,<6.0" -pyasn1-modules = ">=0.2.1" -rsa = ">=3.1.4,<5" - -[package.extras] -aiohttp = ["aiohttp (>=3.6.2,<4.0.0.dev0)", "requests (>=2.20.0,<3.0.0.dev0)"] -enterprise-cert = ["cryptography", "pyopenssl"] -pyjwt = ["cryptography (>=38.0.3)", "pyjwt (>=2.0)"] -pyopenssl = ["cryptography (>=38.0.3)", "pyopenssl (>=20.0.0)"] -reauth = ["pyu2f (>=0.1.5)"] -requests = ["requests (>=2.20.0,<3.0.0.dev0)"] - -[[package]] -name = "h11" -version = "0.14.0" -description = "A pure-Python, bring-your-own-I/O implementation of HTTP/1.1" -optional = false -python-versions = ">=3.7" -groups = ["main"] -files = [ - {file = "h11-0.14.0-py3-none-any.whl", hash = "sha256:e3fe4ac4b851c468cc8363d500db52c2ead036020723024a109d37346efaa761"}, - {file = "h11-0.14.0.tar.gz", hash = "sha256:8f19fbbe99e72420ff35c00b27a34cb9937e902a8b810e2c88300c6f0a3b699d"}, -] - -[[package]] -name = "httpcore" -version = "1.0.5" -description = "A minimal low-level HTTP client." -optional = false -python-versions = ">=3.8" -groups = ["main"] -files = [ - {file = "httpcore-1.0.5-py3-none-any.whl", hash = "sha256:421f18bac248b25d310f3cacd198d55b8e6125c107797b609ff9b7a6ba7991b5"}, - {file = "httpcore-1.0.5.tar.gz", hash = "sha256:34a38e2f9291467ee3b44e89dd52615370e152954ba21721378a87b2960f7a61"}, -] - -[package.dependencies] -certifi = "*" -h11 = ">=0.13,<0.15" - -[package.extras] -asyncio = ["anyio (>=4.0,<5.0)"] -http2 = ["h2 (>=3,<5)"] -socks = ["socksio (==1.*)"] -trio = ["trio (>=0.22.0,<0.26.0)"] - -[[package]] -name = "httpx" -version = "0.28.1" -description = "The next generation HTTP client." -optional = false -python-versions = ">=3.8" -groups = ["main"] -files = [ - {file = "httpx-0.28.1-py3-none-any.whl", hash = "sha256:d909fcccc110f8c7faf814ca82a9a4d816bc5a6dbfea25d6591d6985b8ba59ad"}, - {file = "httpx-0.28.1.tar.gz", hash = "sha256:75e98c5f16b0f35b567856f597f06ff2270a374470a5c2392242528e3e3e42fc"}, -] - -[package.dependencies] -anyio = "*" -certifi = "*" -httpcore = "==1.*" -idna = "*" - -[package.extras] -brotli = ["brotli ; platform_python_implementation == \"CPython\"", "brotlicffi ; platform_python_implementation != \"CPython\""] -cli = ["click (==8.*)", "pygments (==2.*)", "rich (>=10,<14)"] -http2 = ["h2 (>=3,<5)"] -socks = ["socksio (==1.*)"] -zstd = ["zstandard (>=0.18.0)"] - -[[package]] -name = "idna" -version = "3.7" -description = "Internationalized Domain Names in Applications (IDNA)" -optional = false -python-versions = ">=3.5" -groups = ["main"] -files = [ - {file = "idna-3.7-py3-none-any.whl", hash = "sha256:82fee1fc78add43492d3a1898bfa6d8a904cc97d8427f683ed8e798d07761aa0"}, - {file = "idna-3.7.tar.gz", hash = "sha256:028ff3aadf0609c1fd278d8ea3089299412a7a8b9bd005dd08b9f8285bcb5cfc"}, -] - -[[package]] -name = "iniconfig" -version = "2.0.0" -description = "brain-dead simple config-ini parsing" -optional = false -python-versions = ">=3.7" -groups = ["dev"] -files = [ - {file = "iniconfig-2.0.0-py3-none-any.whl", hash = "sha256:b6a85871a79d2e3b22d2d1b94ac2824226a63c6b741c88f7ae975f18b6778374"}, - {file = "iniconfig-2.0.0.tar.gz", hash = "sha256:2d91e135bf72d31a410b17c16da610a82cb55f6b0477d1a902134b24a455b8b3"}, -] - -[[package]] -name = "isort" -version = "5.13.2" -description = "A Python utility / library to sort Python imports." -optional = false -python-versions = ">=3.8.0" -groups = ["dev"] -files = [ - {file = "isort-5.13.2-py3-none-any.whl", hash = "sha256:8ca5e72a8d85860d5a3fa69b8745237f2939afe12dbf656afbcb47fe72d947a6"}, - {file = "isort-5.13.2.tar.gz", hash = "sha256:48fdfcb9face5d58a4f6dde2e72a1fb8dcaf8ab26f95ab49fab84c2ddefb0109"}, -] - -[package.extras] -colors = ["colorama (>=0.4.6)"] - -[[package]] -name = "mccabe" -version = "0.7.0" -description = "McCabe checker, plugin for flake8" -optional = false -python-versions = ">=3.6" -groups = ["dev"] -files = [ - {file = "mccabe-0.7.0-py2.py3-none-any.whl", hash = "sha256:6c2d30ab6be0e4a46919781807b4f0d834ebdd6c6e3dca0bda5a15f863427b6e"}, - {file = "mccabe-0.7.0.tar.gz", hash = "sha256:348e0240c33b60bbdf4e523192ef919f28cb2c3d7d5c7794f74009290f236325"}, -] - -[[package]] -name = "mypy" -version = "1.14.1" -description = "Optional static typing for Python" -optional = false -python-versions = ">=3.8" -groups = ["dev"] -files = [ - {file = "mypy-1.14.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:52686e37cf13d559f668aa398dd7ddf1f92c5d613e4f8cb262be2fb4fedb0fcb"}, - {file = "mypy-1.14.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:1fb545ca340537d4b45d3eecdb3def05e913299ca72c290326be19b3804b39c0"}, - {file = "mypy-1.14.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:90716d8b2d1f4cd503309788e51366f07c56635a3309b0f6a32547eaaa36a64d"}, - {file = "mypy-1.14.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:2ae753f5c9fef278bcf12e1a564351764f2a6da579d4a81347e1d5a15819997b"}, - {file = "mypy-1.14.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:e0fe0f5feaafcb04505bcf439e991c6d8f1bf8b15f12b05feeed96e9e7bf1427"}, - {file = "mypy-1.14.1-cp310-cp310-win_amd64.whl", hash = "sha256:7d54bd85b925e501c555a3227f3ec0cfc54ee8b6930bd6141ec872d1c572f81f"}, - {file = "mypy-1.14.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:f995e511de847791c3b11ed90084a7a0aafdc074ab88c5a9711622fe4751138c"}, - {file = "mypy-1.14.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:d64169ec3b8461311f8ce2fd2eb5d33e2d0f2c7b49116259c51d0d96edee48d1"}, - {file = "mypy-1.14.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ba24549de7b89b6381b91fbc068d798192b1b5201987070319889e93038967a8"}, - {file = "mypy-1.14.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:183cf0a45457d28ff9d758730cd0210419ac27d4d3f285beda038c9083363b1f"}, - {file = "mypy-1.14.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:f2a0ecc86378f45347f586e4163d1769dd81c5a223d577fe351f26b179e148b1"}, - {file = "mypy-1.14.1-cp311-cp311-win_amd64.whl", hash = "sha256:ad3301ebebec9e8ee7135d8e3109ca76c23752bac1e717bc84cd3836b4bf3eae"}, - {file = "mypy-1.14.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:30ff5ef8519bbc2e18b3b54521ec319513a26f1bba19a7582e7b1f58a6e69f14"}, - {file = "mypy-1.14.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:cb9f255c18052343c70234907e2e532bc7e55a62565d64536dbc7706a20b78b9"}, - {file = "mypy-1.14.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:8b4e3413e0bddea671012b063e27591b953d653209e7a4fa5e48759cda77ca11"}, - {file = "mypy-1.14.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:553c293b1fbdebb6c3c4030589dab9fafb6dfa768995a453d8a5d3b23784af2e"}, - {file = "mypy-1.14.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:fad79bfe3b65fe6a1efaed97b445c3d37f7be9fdc348bdb2d7cac75579607c89"}, - {file = "mypy-1.14.1-cp312-cp312-win_amd64.whl", hash = "sha256:8fa2220e54d2946e94ab6dbb3ba0a992795bd68b16dc852db33028df2b00191b"}, - {file = "mypy-1.14.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:92c3ed5afb06c3a8e188cb5da4984cab9ec9a77ba956ee419c68a388b4595255"}, - {file = "mypy-1.14.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:dbec574648b3e25f43d23577309b16534431db4ddc09fda50841f1e34e64ed34"}, - {file = "mypy-1.14.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:8c6d94b16d62eb3e947281aa7347d78236688e21081f11de976376cf010eb31a"}, - {file = "mypy-1.14.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:d4b19b03fdf54f3c5b2fa474c56b4c13c9dbfb9a2db4370ede7ec11a2c5927d9"}, - {file = "mypy-1.14.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:0c911fde686394753fff899c409fd4e16e9b294c24bfd5e1ea4675deae1ac6fd"}, - {file = "mypy-1.14.1-cp313-cp313-win_amd64.whl", hash = "sha256:8b21525cb51671219f5307be85f7e646a153e5acc656e5cebf64bfa076c50107"}, - {file = "mypy-1.14.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:7084fb8f1128c76cd9cf68fe5971b37072598e7c31b2f9f95586b65c741a9d31"}, - {file = "mypy-1.14.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:8f845a00b4f420f693f870eaee5f3e2692fa84cc8514496114649cfa8fd5e2c6"}, - {file = "mypy-1.14.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:44bf464499f0e3a2d14d58b54674dee25c031703b2ffc35064bd0df2e0fac319"}, - {file = "mypy-1.14.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c99f27732c0b7dc847adb21c9d47ce57eb48fa33a17bc6d7d5c5e9f9e7ae5bac"}, - {file = "mypy-1.14.1-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:bce23c7377b43602baa0bd22ea3265c49b9ff0b76eb315d6c34721af4cdf1d9b"}, - {file = "mypy-1.14.1-cp38-cp38-win_amd64.whl", hash = "sha256:8edc07eeade7ebc771ff9cf6b211b9a7d93687ff892150cb5692e4f4272b0837"}, - {file = "mypy-1.14.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:3888a1816d69f7ab92092f785a462944b3ca16d7c470d564165fe703b0970c35"}, - {file = "mypy-1.14.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:46c756a444117c43ee984bd055db99e498bc613a70bbbc120272bd13ca579fbc"}, - {file = "mypy-1.14.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:27fc248022907e72abfd8e22ab1f10e903915ff69961174784a3900a8cba9ad9"}, - {file = "mypy-1.14.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:499d6a72fb7e5de92218db961f1a66d5f11783f9ae549d214617edab5d4dbdbb"}, - {file = "mypy-1.14.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:57961db9795eb566dc1d1b4e9139ebc4c6b0cb6e7254ecde69d1552bf7613f60"}, - {file = "mypy-1.14.1-cp39-cp39-win_amd64.whl", hash = "sha256:07ba89fdcc9451f2ebb02853deb6aaaa3d2239a236669a63ab3801bbf923ef5c"}, - {file = "mypy-1.14.1-py3-none-any.whl", hash = "sha256:b66a60cc4073aeb8ae00057f9c1f64d49e90f918fbcef9a977eb121da8b8f1d1"}, - {file = "mypy-1.14.1.tar.gz", hash = "sha256:7ec88144fe9b510e8475ec2f5f251992690fcf89ccb4500b214b4226abcd32d6"}, -] - -[package.dependencies] -mypy_extensions = ">=1.0.0" -tomli = {version = ">=1.1.0", markers = "python_version < \"3.11\""} -typing_extensions = ">=4.6.0" - -[package.extras] -dmypy = ["psutil (>=4.0)"] -faster-cache = ["orjson"] -install-types = ["pip"] -mypyc = ["setuptools (>=50)"] -reports = ["lxml"] - -[[package]] -name = "mypy-extensions" -version = "1.0.0" -description = "Type system extensions for programs checked with the mypy type checker." -optional = false -python-versions = ">=3.5" -groups = ["dev"] -files = [ - {file = "mypy_extensions-1.0.0-py3-none-any.whl", hash = "sha256:4392f6c0eb8a5668a69e23d168ffa70f0be9ccfd32b5cc2d26a34ae5b844552d"}, - {file = "mypy_extensions-1.0.0.tar.gz", hash = "sha256:75dbf8955dc00442a438fc4d0666508a9a97b6bd41aa2f0ffe9d2f2725af0782"}, -] - -[[package]] -name = "packaging" -version = "24.1" -description = "Core utilities for Python packages" -optional = false -python-versions = ">=3.8" -groups = ["dev"] -files = [ - {file = "packaging-24.1-py3-none-any.whl", hash = "sha256:5b8f2217dbdbd2f7f384c41c628544e6d52f2d0f53c6d0c3ea61aa5d1d7ff124"}, - {file = "packaging-24.1.tar.gz", hash = "sha256:026ed72c8ed3fcce5bf8950572258698927fd1dbda10a5e981cdf0ac37f4f002"}, -] - -[[package]] -name = "platformdirs" -version = "4.2.2" -description = "A small Python package for determining appropriate platform-specific dirs, e.g. a `user data dir`." -optional = false -python-versions = ">=3.8" -groups = ["dev"] -files = [ - {file = "platformdirs-4.2.2-py3-none-any.whl", hash = "sha256:2d7a1657e36a80ea911db832a8a6ece5ee53d8de21edd5cc5879af6530b1bfee"}, - {file = "platformdirs-4.2.2.tar.gz", hash = "sha256:38b7b51f512eed9e84a22788b4bce1de17c0adb134d6becb09836e37d8654cd3"}, -] - -[package.extras] -docs = ["furo (>=2023.9.10)", "proselint (>=0.13)", "sphinx (>=7.2.6)", "sphinx-autodoc-typehints (>=1.25.2)"] -test = ["appdirs (==1.4.4)", "covdefaults (>=2.3)", "pytest (>=7.4.3)", "pytest-cov (>=4.1)", "pytest-mock (>=3.12)"] -type = ["mypy (>=1.8)"] - -[[package]] -name = "pluggy" -version = "1.5.0" -description = "plugin and hook calling mechanisms for python" -optional = false -python-versions = ">=3.8" -groups = ["dev"] -files = [ - {file = "pluggy-1.5.0-py3-none-any.whl", hash = "sha256:44e1ad92c8ca002de6377e165f3e0f1be63266ab4d554740532335b9d75ea669"}, - {file = "pluggy-1.5.0.tar.gz", hash = "sha256:2cffa88e94fdc978c4c574f15f9e59b7f4201d439195c3715ca9e2486f1d0cf1"}, -] - -[package.extras] -dev = ["pre-commit", "tox"] -testing = ["pytest", "pytest-benchmark"] - -[[package]] -name = "pyasn1" -version = "0.6.0" -description = "Pure-Python implementation of ASN.1 types and DER/BER/CER codecs (X.208)" -optional = false -python-versions = ">=3.8" -groups = ["main"] -files = [ - {file = "pyasn1-0.6.0-py2.py3-none-any.whl", hash = "sha256:cca4bb0f2df5504f02f6f8a775b6e416ff9b0b3b16f7ee80b5a3153d9b804473"}, - {file = "pyasn1-0.6.0.tar.gz", hash = "sha256:3a35ab2c4b5ef98e17dfdec8ab074046fbda76e281c5a706ccd82328cfc8f64c"}, -] - -[[package]] -name = "pyasn1-modules" -version = "0.4.0" -description = "A collection of ASN.1-based protocols modules" -optional = false -python-versions = ">=3.8" -groups = ["main"] -files = [ - {file = "pyasn1_modules-0.4.0-py3-none-any.whl", hash = "sha256:be04f15b66c206eed667e0bb5ab27e2b1855ea54a842e5037738099e8ca4ae0b"}, - {file = "pyasn1_modules-0.4.0.tar.gz", hash = "sha256:831dbcea1b177b28c9baddf4c6d1013c24c3accd14a1873fffaa6a2e905f17b6"}, -] - -[package.dependencies] -pyasn1 = ">=0.4.6,<0.7.0" - -[[package]] -name = "pydantic" -version = "2.10.6" -description = "Data validation using Python type hints" -optional = false -python-versions = ">=3.8" -groups = ["main"] -files = [ - {file = "pydantic-2.10.6-py3-none-any.whl", hash = "sha256:427d664bf0b8a2b34ff5dd0f5a18df00591adcee7198fbd71981054cef37b584"}, - {file = "pydantic-2.10.6.tar.gz", hash = "sha256:ca5daa827cce33de7a42be142548b0096bf05a7e7b365aebfa5f8eeec7128236"}, -] - -[package.dependencies] -annotated-types = ">=0.6.0" -pydantic-core = "2.27.2" -typing-extensions = ">=4.12.2" - -[package.extras] -email = ["email-validator (>=2.0.0)"] -timezone = ["tzdata ; python_version >= \"3.9\" and platform_system == \"Windows\""] - -[[package]] -name = "pydantic-core" -version = "2.27.2" -description = "Core functionality for Pydantic validation and serialization" -optional = false -python-versions = ">=3.8" -groups = ["main"] -files = [ - {file = "pydantic_core-2.27.2-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:2d367ca20b2f14095a8f4fa1210f5a7b78b8a20009ecced6b12818f455b1e9fa"}, - {file = "pydantic_core-2.27.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:491a2b73db93fab69731eaee494f320faa4e093dbed776be1a829c2eb222c34c"}, - {file = "pydantic_core-2.27.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7969e133a6f183be60e9f6f56bfae753585680f3b7307a8e555a948d443cc05a"}, - {file = "pydantic_core-2.27.2-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:3de9961f2a346257caf0aa508a4da705467f53778e9ef6fe744c038119737ef5"}, - {file = "pydantic_core-2.27.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e2bb4d3e5873c37bb3dd58714d4cd0b0e6238cebc4177ac8fe878f8b3aa8e74c"}, - {file = "pydantic_core-2.27.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:280d219beebb0752699480fe8f1dc61ab6615c2046d76b7ab7ee38858de0a4e7"}, - {file = "pydantic_core-2.27.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:47956ae78b6422cbd46f772f1746799cbb862de838fd8d1fbd34a82e05b0983a"}, - {file = "pydantic_core-2.27.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:14d4a5c49d2f009d62a2a7140d3064f686d17a5d1a268bc641954ba181880236"}, - {file = "pydantic_core-2.27.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:337b443af21d488716f8d0b6164de833e788aa6bd7e3a39c005febc1284f4962"}, - {file = "pydantic_core-2.27.2-cp310-cp310-musllinux_1_1_armv7l.whl", hash = "sha256:03d0f86ea3184a12f41a2d23f7ccb79cdb5a18e06993f8a45baa8dfec746f0e9"}, - {file = "pydantic_core-2.27.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:7041c36f5680c6e0f08d922aed302e98b3745d97fe1589db0a3eebf6624523af"}, - {file = "pydantic_core-2.27.2-cp310-cp310-win32.whl", hash = "sha256:50a68f3e3819077be2c98110c1f9dcb3817e93f267ba80a2c05bb4f8799e2ff4"}, - {file = "pydantic_core-2.27.2-cp310-cp310-win_amd64.whl", hash = "sha256:e0fd26b16394ead34a424eecf8a31a1f5137094cabe84a1bcb10fa6ba39d3d31"}, - {file = "pydantic_core-2.27.2-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:8e10c99ef58cfdf2a66fc15d66b16c4a04f62bca39db589ae8cba08bc55331bc"}, - {file = "pydantic_core-2.27.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:26f32e0adf166a84d0cb63be85c562ca8a6fa8de28e5f0d92250c6b7e9e2aff7"}, - {file = "pydantic_core-2.27.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8c19d1ea0673cd13cc2f872f6c9ab42acc4e4f492a7ca9d3795ce2b112dd7e15"}, - {file = "pydantic_core-2.27.2-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:5e68c4446fe0810e959cdff46ab0a41ce2f2c86d227d96dc3847af0ba7def306"}, - {file = "pydantic_core-2.27.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d9640b0059ff4f14d1f37321b94061c6db164fbe49b334b31643e0528d100d99"}, - {file = "pydantic_core-2.27.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:40d02e7d45c9f8af700f3452f329ead92da4c5f4317ca9b896de7ce7199ea459"}, - {file = "pydantic_core-2.27.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1c1fd185014191700554795c99b347d64f2bb637966c4cfc16998a0ca700d048"}, - {file = "pydantic_core-2.27.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d81d2068e1c1228a565af076598f9e7451712700b673de8f502f0334f281387d"}, - {file = "pydantic_core-2.27.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:1a4207639fb02ec2dbb76227d7c751a20b1a6b4bc52850568e52260cae64ca3b"}, - {file = "pydantic_core-2.27.2-cp311-cp311-musllinux_1_1_armv7l.whl", hash = "sha256:3de3ce3c9ddc8bbd88f6e0e304dea0e66d843ec9de1b0042b0911c1663ffd474"}, - {file = "pydantic_core-2.27.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:30c5f68ded0c36466acede341551106821043e9afaad516adfb6e8fa80a4e6a6"}, - {file = "pydantic_core-2.27.2-cp311-cp311-win32.whl", hash = "sha256:c70c26d2c99f78b125a3459f8afe1aed4d9687c24fd677c6a4436bc042e50d6c"}, - {file = "pydantic_core-2.27.2-cp311-cp311-win_amd64.whl", hash = "sha256:08e125dbdc505fa69ca7d9c499639ab6407cfa909214d500897d02afb816e7cc"}, - {file = "pydantic_core-2.27.2-cp311-cp311-win_arm64.whl", hash = "sha256:26f0d68d4b235a2bae0c3fc585c585b4ecc51382db0e3ba402a22cbc440915e4"}, - {file = "pydantic_core-2.27.2-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:9e0c8cfefa0ef83b4da9588448b6d8d2a2bf1a53c3f1ae5fca39eb3061e2f0b0"}, - {file = "pydantic_core-2.27.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:83097677b8e3bd7eaa6775720ec8e0405f1575015a463285a92bfdfe254529ef"}, - {file = "pydantic_core-2.27.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:172fce187655fece0c90d90a678424b013f8fbb0ca8b036ac266749c09438cb7"}, - {file = "pydantic_core-2.27.2-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:519f29f5213271eeeeb3093f662ba2fd512b91c5f188f3bb7b27bc5973816934"}, - {file = "pydantic_core-2.27.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:05e3a55d124407fffba0dd6b0c0cd056d10e983ceb4e5dbd10dda135c31071d6"}, - {file = "pydantic_core-2.27.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9c3ed807c7b91de05e63930188f19e921d1fe90de6b4f5cd43ee7fcc3525cb8c"}, - {file = "pydantic_core-2.27.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6fb4aadc0b9a0c063206846d603b92030eb6f03069151a625667f982887153e2"}, - {file = "pydantic_core-2.27.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:28ccb213807e037460326424ceb8b5245acb88f32f3d2777427476e1b32c48c4"}, - {file = "pydantic_core-2.27.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:de3cd1899e2c279b140adde9357c4495ed9d47131b4a4eaff9052f23398076b3"}, - {file = "pydantic_core-2.27.2-cp312-cp312-musllinux_1_1_armv7l.whl", hash = "sha256:220f892729375e2d736b97d0e51466252ad84c51857d4d15f5e9692f9ef12be4"}, - {file = "pydantic_core-2.27.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:a0fcd29cd6b4e74fe8ddd2c90330fd8edf2e30cb52acda47f06dd615ae72da57"}, - {file = "pydantic_core-2.27.2-cp312-cp312-win32.whl", hash = "sha256:1e2cb691ed9834cd6a8be61228471d0a503731abfb42f82458ff27be7b2186fc"}, - {file = "pydantic_core-2.27.2-cp312-cp312-win_amd64.whl", hash = "sha256:cc3f1a99a4f4f9dd1de4fe0312c114e740b5ddead65bb4102884b384c15d8bc9"}, - {file = "pydantic_core-2.27.2-cp312-cp312-win_arm64.whl", hash = "sha256:3911ac9284cd8a1792d3cb26a2da18f3ca26c6908cc434a18f730dc0db7bfa3b"}, - {file = "pydantic_core-2.27.2-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:7d14bd329640e63852364c306f4d23eb744e0f8193148d4044dd3dacdaacbd8b"}, - {file = "pydantic_core-2.27.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:82f91663004eb8ed30ff478d77c4d1179b3563df6cdb15c0817cd1cdaf34d154"}, - {file = "pydantic_core-2.27.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:71b24c7d61131bb83df10cc7e687433609963a944ccf45190cfc21e0887b08c9"}, - {file = "pydantic_core-2.27.2-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:fa8e459d4954f608fa26116118bb67f56b93b209c39b008277ace29937453dc9"}, - {file = "pydantic_core-2.27.2-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ce8918cbebc8da707ba805b7fd0b382816858728ae7fe19a942080c24e5b7cd1"}, - {file = "pydantic_core-2.27.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:eda3f5c2a021bbc5d976107bb302e0131351c2ba54343f8a496dc8783d3d3a6a"}, - {file = "pydantic_core-2.27.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bd8086fa684c4775c27f03f062cbb9eaa6e17f064307e86b21b9e0abc9c0f02e"}, - {file = "pydantic_core-2.27.2-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:8d9b3388db186ba0c099a6d20f0604a44eabdeef1777ddd94786cdae158729e4"}, - {file = "pydantic_core-2.27.2-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:7a66efda2387de898c8f38c0cf7f14fca0b51a8ef0b24bfea5849f1b3c95af27"}, - {file = "pydantic_core-2.27.2-cp313-cp313-musllinux_1_1_armv7l.whl", hash = "sha256:18a101c168e4e092ab40dbc2503bdc0f62010e95d292b27827871dc85450d7ee"}, - {file = "pydantic_core-2.27.2-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:ba5dd002f88b78a4215ed2f8ddbdf85e8513382820ba15ad5ad8955ce0ca19a1"}, - {file = "pydantic_core-2.27.2-cp313-cp313-win32.whl", hash = "sha256:1ebaf1d0481914d004a573394f4be3a7616334be70261007e47c2a6fe7e50130"}, - {file = "pydantic_core-2.27.2-cp313-cp313-win_amd64.whl", hash = "sha256:953101387ecf2f5652883208769a79e48db18c6df442568a0b5ccd8c2723abee"}, - {file = "pydantic_core-2.27.2-cp313-cp313-win_arm64.whl", hash = "sha256:ac4dbfd1691affb8f48c2c13241a2e3b60ff23247cbcf981759c768b6633cf8b"}, - {file = "pydantic_core-2.27.2-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:d3e8d504bdd3f10835468f29008d72fc8359d95c9c415ce6e767203db6127506"}, - {file = "pydantic_core-2.27.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:521eb9b7f036c9b6187f0b47318ab0d7ca14bd87f776240b90b21c1f4f149320"}, - {file = "pydantic_core-2.27.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:85210c4d99a0114f5a9481b44560d7d1e35e32cc5634c656bc48e590b669b145"}, - {file = "pydantic_core-2.27.2-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:d716e2e30c6f140d7560ef1538953a5cd1a87264c737643d481f2779fc247fe1"}, - {file = "pydantic_core-2.27.2-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f66d89ba397d92f840f8654756196d93804278457b5fbede59598a1f9f90b228"}, - {file = "pydantic_core-2.27.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:669e193c1c576a58f132e3158f9dfa9662969edb1a250c54d8fa52590045f046"}, - {file = "pydantic_core-2.27.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9fdbe7629b996647b99c01b37f11170a57ae675375b14b8c13b8518b8320ced5"}, - {file = "pydantic_core-2.27.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d262606bf386a5ba0b0af3b97f37c83d7011439e3dc1a9298f21efb292e42f1a"}, - {file = "pydantic_core-2.27.2-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:cabb9bcb7e0d97f74df8646f34fc76fbf793b7f6dc2438517d7a9e50eee4f14d"}, - {file = "pydantic_core-2.27.2-cp38-cp38-musllinux_1_1_armv7l.whl", hash = "sha256:d2d63f1215638d28221f664596b1ccb3944f6e25dd18cd3b86b0a4c408d5ebb9"}, - {file = "pydantic_core-2.27.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:bca101c00bff0adb45a833f8451b9105d9df18accb8743b08107d7ada14bd7da"}, - {file = "pydantic_core-2.27.2-cp38-cp38-win32.whl", hash = "sha256:f6f8e111843bbb0dee4cb6594cdc73e79b3329b526037ec242a3e49012495b3b"}, - {file = "pydantic_core-2.27.2-cp38-cp38-win_amd64.whl", hash = "sha256:fd1aea04935a508f62e0d0ef1f5ae968774a32afc306fb8545e06f5ff5cdf3ad"}, - {file = "pydantic_core-2.27.2-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:c10eb4f1659290b523af58fa7cffb452a61ad6ae5613404519aee4bfbf1df993"}, - {file = "pydantic_core-2.27.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:ef592d4bad47296fb11f96cd7dc898b92e795032b4894dfb4076cfccd43a9308"}, - {file = "pydantic_core-2.27.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c61709a844acc6bf0b7dce7daae75195a10aac96a596ea1b776996414791ede4"}, - {file = "pydantic_core-2.27.2-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:42c5f762659e47fdb7b16956c71598292f60a03aa92f8b6351504359dbdba6cf"}, - {file = "pydantic_core-2.27.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4c9775e339e42e79ec99c441d9730fccf07414af63eac2f0e48e08fd38a64d76"}, - {file = "pydantic_core-2.27.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:57762139821c31847cfb2df63c12f725788bd9f04bc2fb392790959b8f70f118"}, - {file = "pydantic_core-2.27.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0d1e85068e818c73e048fe28cfc769040bb1f475524f4745a5dc621f75ac7630"}, - {file = "pydantic_core-2.27.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:097830ed52fd9e427942ff3b9bc17fab52913b2f50f2880dc4a5611446606a54"}, - {file = "pydantic_core-2.27.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:044a50963a614ecfae59bb1eaf7ea7efc4bc62f49ed594e18fa1e5d953c40e9f"}, - {file = "pydantic_core-2.27.2-cp39-cp39-musllinux_1_1_armv7l.whl", hash = "sha256:4e0b4220ba5b40d727c7f879eac379b822eee5d8fff418e9d3381ee45b3b0362"}, - {file = "pydantic_core-2.27.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:5e4f4bb20d75e9325cc9696c6802657b58bc1dbbe3022f32cc2b2b632c3fbb96"}, - {file = "pydantic_core-2.27.2-cp39-cp39-win32.whl", hash = "sha256:cca63613e90d001b9f2f9a9ceb276c308bfa2a43fafb75c8031c4f66039e8c6e"}, - {file = "pydantic_core-2.27.2-cp39-cp39-win_amd64.whl", hash = "sha256:77d1bca19b0f7021b3a982e6f903dcd5b2b06076def36a652e3907f596e29f67"}, - {file = "pydantic_core-2.27.2-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:2bf14caea37e91198329b828eae1618c068dfb8ef17bb33287a7ad4b61ac314e"}, - {file = "pydantic_core-2.27.2-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:b0cb791f5b45307caae8810c2023a184c74605ec3bcbb67d13846c28ff731ff8"}, - {file = "pydantic_core-2.27.2-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:688d3fd9fcb71f41c4c015c023d12a79d1c4c0732ec9eb35d96e3388a120dcf3"}, - {file = "pydantic_core-2.27.2-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3d591580c34f4d731592f0e9fe40f9cc1b430d297eecc70b962e93c5c668f15f"}, - {file = "pydantic_core-2.27.2-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:82f986faf4e644ffc189a7f1aafc86e46ef70372bb153e7001e8afccc6e54133"}, - {file = "pydantic_core-2.27.2-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:bec317a27290e2537f922639cafd54990551725fc844249e64c523301d0822fc"}, - {file = "pydantic_core-2.27.2-pp310-pypy310_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:0296abcb83a797db256b773f45773da397da75a08f5fcaef41f2044adec05f50"}, - {file = "pydantic_core-2.27.2-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:0d75070718e369e452075a6017fbf187f788e17ed67a3abd47fa934d001863d9"}, - {file = "pydantic_core-2.27.2-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:7e17b560be3c98a8e3aa66ce828bdebb9e9ac6ad5466fba92eb74c4c95cb1151"}, - {file = "pydantic_core-2.27.2-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:c33939a82924da9ed65dab5a65d427205a73181d8098e79b6b426bdf8ad4e656"}, - {file = "pydantic_core-2.27.2-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:00bad2484fa6bda1e216e7345a798bd37c68fb2d97558edd584942aa41b7d278"}, - {file = "pydantic_core-2.27.2-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c817e2b40aba42bac6f457498dacabc568c3b7a986fc9ba7c8d9d260b71485fb"}, - {file = "pydantic_core-2.27.2-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:251136cdad0cb722e93732cb45ca5299fb56e1344a833640bf93b2803f8d1bfd"}, - {file = "pydantic_core-2.27.2-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d2088237af596f0a524d3afc39ab3b036e8adb054ee57cbb1dcf8e09da5b29cc"}, - {file = "pydantic_core-2.27.2-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:d4041c0b966a84b4ae7a09832eb691a35aec90910cd2dbe7a208de59be77965b"}, - {file = "pydantic_core-2.27.2-pp39-pypy39_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:8083d4e875ebe0b864ffef72a4304827015cff328a1be6e22cc850753bfb122b"}, - {file = "pydantic_core-2.27.2-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:f141ee28a0ad2123b6611b6ceff018039df17f32ada8b534e6aa039545a3efb2"}, - {file = "pydantic_core-2.27.2-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:7d0c8399fcc1848491f00e0314bd59fb34a9c008761bcb422a057670c3f65e35"}, - {file = "pydantic_core-2.27.2.tar.gz", hash = "sha256:eb026e5a4c1fee05726072337ff51d1efb6f59090b7da90d30ea58625b1ffb39"}, -] - -[package.dependencies] -typing-extensions = ">=4.6.0,<4.7.0 || >4.7.0" - -[[package]] -name = "pylint" -version = "3.2.3" -description = "python code static checker" -optional = false -python-versions = ">=3.8.0" -groups = ["dev"] -files = [ - {file = "pylint-3.2.3-py3-none-any.whl", hash = "sha256:b3d7d2708a3e04b4679e02d99e72329a8b7ee8afb8d04110682278781f889fa8"}, - {file = "pylint-3.2.3.tar.gz", hash = "sha256:02f6c562b215582386068d52a30f520d84fdbcf2a95fc7e855b816060d048b60"}, -] - -[package.dependencies] -astroid = ">=3.2.2,<=3.3.0-dev0" -colorama = {version = ">=0.4.5", markers = "sys_platform == \"win32\""} -dill = [ - {version = ">=0.2", markers = "python_version < \"3.11\""}, - {version = ">=0.3.7", markers = "python_version >= \"3.12\""}, - {version = ">=0.3.6", markers = "python_version == \"3.11\""}, -] -isort = ">=4.2.5,<5.13.0 || >5.13.0,<6" -mccabe = ">=0.6,<0.8" -platformdirs = ">=2.2.0" -tomli = {version = ">=1.1.0", markers = "python_version < \"3.11\""} -tomlkit = ">=0.10.1" -typing-extensions = {version = ">=3.10.0", markers = "python_version < \"3.10\""} - -[package.extras] -spelling = ["pyenchant (>=3.2,<4.0)"] -testutils = ["gitpython (>3)"] - -[[package]] -name = "pytest" -version = "8.3.2" -description = "pytest: simple powerful testing with Python" -optional = false -python-versions = ">=3.8" -groups = ["dev"] -files = [ - {file = "pytest-8.3.2-py3-none-any.whl", hash = "sha256:4ba08f9ae7dcf84ded419494d229b48d0903ea6407b030eaec46df5e6a73bba5"}, - {file = "pytest-8.3.2.tar.gz", hash = "sha256:c132345d12ce551242c87269de812483f5bcc87cdbb4722e48487ba194f9fdce"}, -] - -[package.dependencies] -colorama = {version = "*", markers = "sys_platform == \"win32\""} -exceptiongroup = {version = ">=1.0.0rc8", markers = "python_version < \"3.11\""} -iniconfig = "*" -packaging = "*" -pluggy = ">=1.5,<2" -tomli = {version = ">=1", markers = "python_version < \"3.11\""} - -[package.extras] -dev = ["argcomplete", "attrs (>=19.2)", "hypothesis (>=3.56)", "mock", "pygments (>=2.7.2)", "requests", "setuptools", "xmlschema"] - -[[package]] -name = "pytest-asyncio" -version = "0.23.8" -description = "Pytest support for asyncio" -optional = false -python-versions = ">=3.8" -groups = ["dev"] -files = [ - {file = "pytest_asyncio-0.23.8-py3-none-any.whl", hash = "sha256:50265d892689a5faefb84df80819d1ecef566eb3549cf915dfb33569359d1ce2"}, - {file = "pytest_asyncio-0.23.8.tar.gz", hash = "sha256:759b10b33a6dc61cce40a8bd5205e302978bbbcc00e279a8b61d9a6a3c82e4d3"}, -] - -[package.dependencies] -pytest = ">=7.0.0,<9" - -[package.extras] -docs = ["sphinx (>=5.3)", "sphinx-rtd-theme (>=1.0)"] -testing = ["coverage (>=6.2)", "hypothesis (>=5.7.1)"] - -[[package]] -name = "python-dateutil" -version = "2.8.2" -description = "Extensions to the standard Python datetime module" -optional = false -python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7" -groups = ["main"] -files = [ - {file = "python-dateutil-2.8.2.tar.gz", hash = "sha256:0123cacc1627ae19ddf3c27a5de5bd67ee4586fbdd6440d9748f8abb483d3e86"}, - {file = "python_dateutil-2.8.2-py2.py3-none-any.whl", hash = "sha256:961d03dc3453ebbc59dbdea9e4e11c5651520a876d0f4db161e8674aae935da9"}, -] - -[package.dependencies] -six = ">=1.5" - -[[package]] -name = "requests" -version = "2.32.3" -description = "Python HTTP for Humans." -optional = false -python-versions = ">=3.8" -groups = ["main"] -files = [ - {file = "requests-2.32.3-py3-none-any.whl", hash = "sha256:70761cfe03c773ceb22aa2f671b4757976145175cdfca038c02654d061d6dcc6"}, - {file = "requests-2.32.3.tar.gz", hash = "sha256:55365417734eb18255590a9ff9eb97e9e1da868d4ccd6402399eaf68af20a760"}, -] - -[package.dependencies] -certifi = ">=2017.4.17" -charset-normalizer = ">=2,<4" -idna = ">=2.5,<4" -urllib3 = ">=1.21.1,<3" - -[package.extras] -socks = ["PySocks (>=1.5.6,!=1.5.7)"] -use-chardet-on-py3 = ["chardet (>=3.0.2,<6)"] - -[[package]] -name = "rsa" -version = "4.2" -description = "Pure-Python RSA implementation" -optional = false -python-versions = "*" -groups = ["main"] -markers = "python_version >= \"3.12\"" -files = [ - {file = "rsa-4.2.tar.gz", hash = "sha256:aaefa4b84752e3e99bd8333a2e1e3e7a7da64614042bd66f775573424370108a"}, -] - -[package.dependencies] -pyasn1 = ">=0.1.3" - -[[package]] -name = "rsa" -version = "4.9" -description = "Pure-Python RSA implementation" -optional = false -python-versions = ">=3.6,<4" -groups = ["main"] -markers = "python_version <= \"3.11\"" -files = [ - {file = "rsa-4.9-py3-none-any.whl", hash = "sha256:90260d9058e514786967344d0ef75fa8727eed8a7d2e43ce9f4bcf1b536174f7"}, - {file = "rsa-4.9.tar.gz", hash = "sha256:e38464a49c6c85d7f1351b0126661487a7e0a14a50f1675ec50eb34d4f20ef21"}, -] - -[package.dependencies] -pyasn1 = ">=0.1.3" - -[[package]] -name = "six" -version = "1.16.0" -description = "Python 2 and 3 compatibility utilities" -optional = false -python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*" -groups = ["main"] -files = [ - {file = "six-1.16.0-py2.py3-none-any.whl", hash = "sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254"}, - {file = "six-1.16.0.tar.gz", hash = "sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926"}, -] - -[[package]] -name = "sniffio" -version = "1.3.1" -description = "Sniff out which async library your code is running under" -optional = false -python-versions = ">=3.7" -groups = ["main"] -files = [ - {file = "sniffio-1.3.1-py3-none-any.whl", hash = "sha256:2f6da418d1f1e0fddd844478f41680e794e6051915791a034ff65e5f100525a2"}, - {file = "sniffio-1.3.1.tar.gz", hash = "sha256:f4324edc670a0f49750a81b895f35c3adb843cca46f0530f79fc1babb23789dc"}, -] - -[[package]] -name = "tomli" -version = "2.0.1" -description = "A lil' TOML parser" -optional = false -python-versions = ">=3.7" -groups = ["dev"] -markers = "python_version < \"3.11\"" -files = [ - {file = "tomli-2.0.1-py3-none-any.whl", hash = "sha256:939de3e7a6161af0c887ef91b7d41a53e7c5a1ca976325f429cb46ea9bc30ecc"}, - {file = "tomli-2.0.1.tar.gz", hash = "sha256:de526c12914f0c550d15924c62d72abc48d6fe7364aa87328337a31007fe8a4f"}, -] - -[[package]] -name = "tomlkit" -version = "0.13.0" -description = "Style preserving TOML library" -optional = false -python-versions = ">=3.8" -groups = ["dev"] -files = [ - {file = "tomlkit-0.13.0-py3-none-any.whl", hash = "sha256:7075d3042d03b80f603482d69bf0c8f345c2b30e41699fd8883227f89972b264"}, - {file = "tomlkit-0.13.0.tar.gz", hash = "sha256:08ad192699734149f5b97b45f1f18dad7eb1b6d16bc72ad0c2335772650d7b72"}, -] - -[[package]] -name = "types-python-dateutil" -version = "2.9.0.20240316" -description = "Typing stubs for python-dateutil" -optional = false -python-versions = ">=3.8" -groups = ["dev"] -files = [ - {file = "types-python-dateutil-2.9.0.20240316.tar.gz", hash = "sha256:5d2f2e240b86905e40944dd787db6da9263f0deabef1076ddaed797351ec0202"}, - {file = "types_python_dateutil-2.9.0.20240316-py3-none-any.whl", hash = "sha256:6b8cb66d960771ce5ff974e9dd45e38facb81718cc1e208b10b1baccbfdbee3b"}, -] - -[[package]] -name = "typing-extensions" -version = "4.12.2" -description = "Backported and Experimental Type Hints for Python 3.8+" -optional = false -python-versions = ">=3.8" -groups = ["main", "dev"] -files = [ - {file = "typing_extensions-4.12.2-py3-none-any.whl", hash = "sha256:04e5ca0351e0f3f85c6853954072df659d0d13fac324d0072316b67d7794700d"}, - {file = "typing_extensions-4.12.2.tar.gz", hash = "sha256:1a7ead55c7e559dd4dee8856e3a88b41225abfe1ce8df57b7c13915fe121ffb8"}, -] - -[[package]] -name = "typing-inspection" -version = "0.4.0" -description = "Runtime typing introspection tools" -optional = false -python-versions = ">=3.9" -groups = ["main"] -files = [ - {file = "typing_inspection-0.4.0-py3-none-any.whl", hash = "sha256:50e72559fcd2a6367a19f7a7e610e6afcb9fac940c650290eed893d61386832f"}, - {file = "typing_inspection-0.4.0.tar.gz", hash = "sha256:9765c87de36671694a67904bf2c96e395be9c6439bb6c87b5142569dcdd65122"}, -] - -[package.dependencies] -typing-extensions = ">=4.12.0" - -[[package]] -name = "urllib3" -version = "2.2.2" -description = "HTTP library with thread-safe connection pooling, file post, and more." -optional = false -python-versions = ">=3.8" -groups = ["main"] -files = [ - {file = "urllib3-2.2.2-py3-none-any.whl", hash = "sha256:a448b2f64d686155468037e1ace9f2d2199776e17f0a46610480d311f73e3472"}, - {file = "urllib3-2.2.2.tar.gz", hash = "sha256:dd505485549a7a552833da5e6063639d0d177c04f23bc3864e41e5dc5f612168"}, -] - -[package.extras] -brotli = ["brotli (>=1.0.9) ; platform_python_implementation == \"CPython\"", "brotlicffi (>=0.8.0) ; platform_python_implementation != \"CPython\""] -h2 = ["h2 (>=4,<5)"] -socks = ["pysocks (>=1.5.6,!=1.5.7,<2.0)"] -zstd = ["zstandard (>=0.18.0)"] - -[metadata] -lock-version = "2.1" -python-versions = ">=3.9" -content-hash = "e393da36a5d5edb020e739f40ff611854b9940e11a34a4e221f3f1513efeb9db" diff --git a/packages/mistralai_gcp/poetry.toml b/packages/mistralai_gcp/poetry.toml deleted file mode 100644 index ab1033bd..00000000 --- a/packages/mistralai_gcp/poetry.toml +++ /dev/null @@ -1,2 +0,0 @@ -[virtualenvs] -in-project = true diff --git a/packages/mistralai_gcp/pyproject.toml b/packages/mistralai_gcp/pyproject.toml index 9763e417..f7f182b6 100644 --- a/packages/mistralai_gcp/pyproject.toml +++ b/packages/mistralai_gcp/pyproject.toml @@ -2,9 +2,9 @@ name = "mistralai-gcp" version = "1.6.0" description = "Python Client SDK for the Mistral AI API in GCP." -authors = [{ name = "Mistral" },] -readme = "README-PYPI.md" +authors = [{ name = "Mistral" }] requires-python = ">=3.9" +readme = "README-PYPI.md" dependencies = [ "eval-type-backport >=0.2.0", "google-auth (>=2.31.0,<3.0.0)", @@ -15,28 +15,37 @@ dependencies = [ "typing-inspection >=0.4.0", ] -[tool.poetry] -packages = [ - { include = "mistralai_gcp", from = "src" } +[dependency-groups] +dev = [ + "mypy==1.14.1", + "pylint==3.2.3", + "pytest>=8.2.2,<9", + "pytest-asyncio>=0.23.7,<0.24", + "types-python-dateutil>=2.9.0.20240316,<3", ] -include = ["py.typed", "src/mistralai_gcp/py.typed"] [tool.setuptools.package-data] "*" = ["py.typed", "src/mistralai_gcp/py.typed"] +[tool.hatch.build.targets.sdist] +include = ["src/mistralai_gcp"] + +[tool.hatch.build.targets.sdist.force-include] +"py.typed" = "py.typed" +"src/mistralai_gcp/py.typed" = "src/mistralai_gcp/py.typed" + +[tool.hatch.build.targets.wheel] +include = ["src/mistralai_gcp"] + +[tool.hatch.build.targets.wheel.sources] +"src/mistralai_gcp" = "mistralai_gcp" + [virtualenvs] in-project = true -[tool.poetry.group.dev.dependencies] -mypy = "==1.14.1" -pylint = "==3.2.3" -pytest = "^8.2.2" -pytest-asyncio = "^0.23.7" -types-python-dateutil = "^2.9.0.20240316" - [build-system] -requires = ["poetry-core"] -build-backend = "poetry.core.masonry.api" +requires = ["hatchling"] +build-backend = "hatchling.build" [tool.pytest.ini_options] asyncio_default_fixture_loop_scope = "function" diff --git a/packages/mistralai_gcp/scripts/publish.sh b/packages/mistralai_gcp/scripts/publish.sh index f2f2cf2c..d2bef9f7 100755 --- a/packages/mistralai_gcp/scripts/publish.sh +++ b/packages/mistralai_gcp/scripts/publish.sh @@ -1,7 +1,8 @@ #!/usr/bin/env bash -export POETRY_PYPI_TOKEN_PYPI=${PYPI_TOKEN} +export UV_PUBLISH_TOKEN=${PYPI_TOKEN} -poetry run python scripts/prepare_readme.py +uv run python scripts/prepare_readme.py -poetry publish --build --skip-existing +uv build +uv publish diff --git a/packages/mistralai_gcp/uv.lock b/packages/mistralai_gcp/uv.lock new file mode 100644 index 00000000..ef292b22 --- /dev/null +++ b/packages/mistralai_gcp/uv.lock @@ -0,0 +1,823 @@ +version = 1 +revision = 3 +requires-python = ">=3.9" +resolution-markers = [ + "python_full_version >= '3.12'", + "python_full_version == '3.11.*'", + "python_full_version == '3.10.*'", + "python_full_version < '3.10'", +] + +[[package]] +name = "annotated-types" +version = "0.7.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/ee/67/531ea369ba64dcff5ec9c3402f9f51bf748cec26dde048a2f973a4eea7f5/annotated_types-0.7.0.tar.gz", hash = "sha256:aff07c09a53a08bc8cfccb9c85b05f1aa9a2a6f23728d790723543408344ce89", size = 16081, upload-time = "2024-05-20T21:33:25.928Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/78/b6/6307fbef88d9b5ee7421e68d78a9f162e0da4900bc5f5793f6d3d0e34fb8/annotated_types-0.7.0-py3-none-any.whl", hash = "sha256:1f02e8b43a8fbbc3f3e0d4f0f4bfc8131bcb4eebe8849b8e5c773f3a1c582a53", size = 13643, upload-time = "2024-05-20T21:33:24.1Z" }, +] + +[[package]] +name = "anyio" +version = "4.12.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "exceptiongroup", marker = "python_full_version < '3.11'" }, + { name = "idna" }, + { name = "typing-extensions", marker = "python_full_version < '3.13'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/16/ce/8a777047513153587e5434fd752e89334ac33e379aa3497db860eeb60377/anyio-4.12.0.tar.gz", hash = "sha256:73c693b567b0c55130c104d0b43a9baf3aa6a31fc6110116509f27bf75e21ec0", size = 228266, upload-time = "2025-11-28T23:37:38.911Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/7f/9c/36c5c37947ebfb8c7f22e0eb6e4d188ee2d53aa3880f3f2744fb894f0cb1/anyio-4.12.0-py3-none-any.whl", hash = "sha256:dad2376a628f98eeca4881fc56cd06affd18f659b17a747d3ff0307ced94b1bb", size = 113362, upload-time = "2025-11-28T23:36:57.897Z" }, +] + +[[package]] +name = "astroid" +version = "3.2.4" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "typing-extensions", marker = "python_full_version < '3.11'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/9e/53/1067e1113ecaf58312357f2cd93063674924119d80d173adc3f6f2387aa2/astroid-3.2.4.tar.gz", hash = "sha256:0e14202810b30da1b735827f78f5157be2bbd4a7a59b7707ca0bfc2fb4c0063a", size = 397576, upload-time = "2024-07-20T12:57:43.26Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/80/96/b32bbbb46170a1c8b8b1f28c794202e25cfe743565e9d3469b8eb1e0cc05/astroid-3.2.4-py3-none-any.whl", hash = "sha256:413658a61eeca6202a59231abb473f932038fbcbf1666587f66d482083413a25", size = 276348, upload-time = "2024-07-20T12:57:40.886Z" }, +] + +[[package]] +name = "cachetools" +version = "6.2.4" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/bc/1d/ede8680603f6016887c062a2cf4fc8fdba905866a3ab8831aa8aa651320c/cachetools-6.2.4.tar.gz", hash = "sha256:82c5c05585e70b6ba2d3ae09ea60b79548872185d2f24ae1f2709d37299fd607", size = 31731, upload-time = "2025-12-15T18:24:53.744Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/2c/fc/1d7b80d0eb7b714984ce40efc78859c022cd930e402f599d8ca9e39c78a4/cachetools-6.2.4-py3-none-any.whl", hash = "sha256:69a7a52634fed8b8bf6e24a050fb60bff1c9bd8f6d24572b99c32d4e71e62a51", size = 11551, upload-time = "2025-12-15T18:24:52.332Z" }, +] + +[[package]] +name = "certifi" +version = "2025.11.12" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/a2/8c/58f469717fa48465e4a50c014a0400602d3c437d7c0c468e17ada824da3a/certifi-2025.11.12.tar.gz", hash = "sha256:d8ab5478f2ecd78af242878415affce761ca6bc54a22a27e026d7c25357c3316", size = 160538, upload-time = "2025-11-12T02:54:51.517Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/70/7d/9bc192684cea499815ff478dfcdc13835ddf401365057044fb721ec6bddb/certifi-2025.11.12-py3-none-any.whl", hash = "sha256:97de8790030bbd5c2d96b7ec782fc2f7820ef8dba6db909ccf95449f2d062d4b", size = 159438, upload-time = "2025-11-12T02:54:49.735Z" }, +] + +[[package]] +name = "charset-normalizer" +version = "3.4.4" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/13/69/33ddede1939fdd074bce5434295f38fae7136463422fe4fd3e0e89b98062/charset_normalizer-3.4.4.tar.gz", hash = "sha256:94537985111c35f28720e43603b8e7b43a6ecfb2ce1d3058bbe955b73404e21a", size = 129418, upload-time = "2025-10-14T04:42:32.879Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/1f/b8/6d51fc1d52cbd52cd4ccedd5b5b2f0f6a11bbf6765c782298b0f3e808541/charset_normalizer-3.4.4-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:e824f1492727fa856dd6eda4f7cee25f8518a12f3c4a56a74e8095695089cf6d", size = 209709, upload-time = "2025-10-14T04:40:11.385Z" }, + { url = "https://files.pythonhosted.org/packages/5c/af/1f9d7f7faafe2ddfb6f72a2e07a548a629c61ad510fe60f9630309908fef/charset_normalizer-3.4.4-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:4bd5d4137d500351a30687c2d3971758aac9a19208fc110ccb9d7188fbe709e8", size = 148814, upload-time = "2025-10-14T04:40:13.135Z" }, + { url = "https://files.pythonhosted.org/packages/79/3d/f2e3ac2bbc056ca0c204298ea4e3d9db9b4afe437812638759db2c976b5f/charset_normalizer-3.4.4-cp310-cp310-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:027f6de494925c0ab2a55eab46ae5129951638a49a34d87f4c3eda90f696b4ad", size = 144467, upload-time = "2025-10-14T04:40:14.728Z" }, + { url = "https://files.pythonhosted.org/packages/ec/85/1bf997003815e60d57de7bd972c57dc6950446a3e4ccac43bc3070721856/charset_normalizer-3.4.4-cp310-cp310-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:f820802628d2694cb7e56db99213f930856014862f3fd943d290ea8438d07ca8", size = 162280, upload-time = "2025-10-14T04:40:16.14Z" }, + { url = "https://files.pythonhosted.org/packages/3e/8e/6aa1952f56b192f54921c436b87f2aaf7c7a7c3d0d1a765547d64fd83c13/charset_normalizer-3.4.4-cp310-cp310-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:798d75d81754988d2565bff1b97ba5a44411867c0cf32b77a7e8f8d84796b10d", size = 159454, upload-time = "2025-10-14T04:40:17.567Z" }, + { url = "https://files.pythonhosted.org/packages/36/3b/60cbd1f8e93aa25d1c669c649b7a655b0b5fb4c571858910ea9332678558/charset_normalizer-3.4.4-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:9d1bb833febdff5c8927f922386db610b49db6e0d4f4ee29601d71e7c2694313", size = 153609, upload-time = "2025-10-14T04:40:19.08Z" }, + { url = "https://files.pythonhosted.org/packages/64/91/6a13396948b8fd3c4b4fd5bc74d045f5637d78c9675585e8e9fbe5636554/charset_normalizer-3.4.4-cp310-cp310-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:9cd98cdc06614a2f768d2b7286d66805f94c48cde050acdbbb7db2600ab3197e", size = 151849, upload-time = "2025-10-14T04:40:20.607Z" }, + { url = "https://files.pythonhosted.org/packages/b7/7a/59482e28b9981d105691e968c544cc0df3b7d6133152fb3dcdc8f135da7a/charset_normalizer-3.4.4-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:077fbb858e903c73f6c9db43374fd213b0b6a778106bc7032446a8e8b5b38b93", size = 151586, upload-time = "2025-10-14T04:40:21.719Z" }, + { url = "https://files.pythonhosted.org/packages/92/59/f64ef6a1c4bdd2baf892b04cd78792ed8684fbc48d4c2afe467d96b4df57/charset_normalizer-3.4.4-cp310-cp310-musllinux_1_2_armv7l.whl", hash = "sha256:244bfb999c71b35de57821b8ea746b24e863398194a4014e4c76adc2bbdfeff0", size = 145290, upload-time = "2025-10-14T04:40:23.069Z" }, + { url = "https://files.pythonhosted.org/packages/6b/63/3bf9f279ddfa641ffa1962b0db6a57a9c294361cc2f5fcac997049a00e9c/charset_normalizer-3.4.4-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:64b55f9dce520635f018f907ff1b0df1fdc31f2795a922fb49dd14fbcdf48c84", size = 163663, upload-time = "2025-10-14T04:40:24.17Z" }, + { url = "https://files.pythonhosted.org/packages/ed/09/c9e38fc8fa9e0849b172b581fd9803bdf6e694041127933934184e19f8c3/charset_normalizer-3.4.4-cp310-cp310-musllinux_1_2_riscv64.whl", hash = "sha256:faa3a41b2b66b6e50f84ae4a68c64fcd0c44355741c6374813a800cd6695db9e", size = 151964, upload-time = "2025-10-14T04:40:25.368Z" }, + { url = "https://files.pythonhosted.org/packages/d2/d1/d28b747e512d0da79d8b6a1ac18b7ab2ecfd81b2944c4c710e166d8dd09c/charset_normalizer-3.4.4-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:6515f3182dbe4ea06ced2d9e8666d97b46ef4c75e326b79bb624110f122551db", size = 161064, upload-time = "2025-10-14T04:40:26.806Z" }, + { url = "https://files.pythonhosted.org/packages/bb/9a/31d62b611d901c3b9e5500c36aab0ff5eb442043fb3a1c254200d3d397d9/charset_normalizer-3.4.4-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:cc00f04ed596e9dc0da42ed17ac5e596c6ccba999ba6bd92b0e0aef2f170f2d6", size = 155015, upload-time = "2025-10-14T04:40:28.284Z" }, + { url = "https://files.pythonhosted.org/packages/1f/f3/107e008fa2bff0c8b9319584174418e5e5285fef32f79d8ee6a430d0039c/charset_normalizer-3.4.4-cp310-cp310-win32.whl", hash = "sha256:f34be2938726fc13801220747472850852fe6b1ea75869a048d6f896838c896f", size = 99792, upload-time = "2025-10-14T04:40:29.613Z" }, + { url = "https://files.pythonhosted.org/packages/eb/66/e396e8a408843337d7315bab30dbf106c38966f1819f123257f5520f8a96/charset_normalizer-3.4.4-cp310-cp310-win_amd64.whl", hash = "sha256:a61900df84c667873b292c3de315a786dd8dac506704dea57bc957bd31e22c7d", size = 107198, upload-time = "2025-10-14T04:40:30.644Z" }, + { url = "https://files.pythonhosted.org/packages/b5/58/01b4f815bf0312704c267f2ccb6e5d42bcc7752340cd487bc9f8c3710597/charset_normalizer-3.4.4-cp310-cp310-win_arm64.whl", hash = "sha256:cead0978fc57397645f12578bfd2d5ea9138ea0fac82b2f63f7f7c6877986a69", size = 100262, upload-time = "2025-10-14T04:40:32.108Z" }, + { url = "https://files.pythonhosted.org/packages/ed/27/c6491ff4954e58a10f69ad90aca8a1b6fe9c5d3c6f380907af3c37435b59/charset_normalizer-3.4.4-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:6e1fcf0720908f200cd21aa4e6750a48ff6ce4afe7ff5a79a90d5ed8a08296f8", size = 206988, upload-time = "2025-10-14T04:40:33.79Z" }, + { url = "https://files.pythonhosted.org/packages/94/59/2e87300fe67ab820b5428580a53cad894272dbb97f38a7a814a2a1ac1011/charset_normalizer-3.4.4-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:5f819d5fe9234f9f82d75bdfa9aef3a3d72c4d24a6e57aeaebba32a704553aa0", size = 147324, upload-time = "2025-10-14T04:40:34.961Z" }, + { url = "https://files.pythonhosted.org/packages/07/fb/0cf61dc84b2b088391830f6274cb57c82e4da8bbc2efeac8c025edb88772/charset_normalizer-3.4.4-cp311-cp311-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:a59cb51917aa591b1c4e6a43c132f0cdc3c76dbad6155df4e28ee626cc77a0a3", size = 142742, upload-time = "2025-10-14T04:40:36.105Z" }, + { url = "https://files.pythonhosted.org/packages/62/8b/171935adf2312cd745d290ed93cf16cf0dfe320863ab7cbeeae1dcd6535f/charset_normalizer-3.4.4-cp311-cp311-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:8ef3c867360f88ac904fd3f5e1f902f13307af9052646963ee08ff4f131adafc", size = 160863, upload-time = "2025-10-14T04:40:37.188Z" }, + { url = "https://files.pythonhosted.org/packages/09/73/ad875b192bda14f2173bfc1bc9a55e009808484a4b256748d931b6948442/charset_normalizer-3.4.4-cp311-cp311-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:d9e45d7faa48ee908174d8fe84854479ef838fc6a705c9315372eacbc2f02897", size = 157837, upload-time = "2025-10-14T04:40:38.435Z" }, + { url = "https://files.pythonhosted.org/packages/6d/fc/de9cce525b2c5b94b47c70a4b4fb19f871b24995c728e957ee68ab1671ea/charset_normalizer-3.4.4-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:840c25fb618a231545cbab0564a799f101b63b9901f2569faecd6b222ac72381", size = 151550, upload-time = "2025-10-14T04:40:40.053Z" }, + { url = "https://files.pythonhosted.org/packages/55/c2/43edd615fdfba8c6f2dfbd459b25a6b3b551f24ea21981e23fb768503ce1/charset_normalizer-3.4.4-cp311-cp311-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:ca5862d5b3928c4940729dacc329aa9102900382fea192fc5e52eb69d6093815", size = 149162, upload-time = "2025-10-14T04:40:41.163Z" }, + { url = "https://files.pythonhosted.org/packages/03/86/bde4ad8b4d0e9429a4e82c1e8f5c659993a9a863ad62c7df05cf7b678d75/charset_normalizer-3.4.4-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:d9c7f57c3d666a53421049053eaacdd14bbd0a528e2186fcb2e672effd053bb0", size = 150019, upload-time = "2025-10-14T04:40:42.276Z" }, + { url = "https://files.pythonhosted.org/packages/1f/86/a151eb2af293a7e7bac3a739b81072585ce36ccfb4493039f49f1d3cae8c/charset_normalizer-3.4.4-cp311-cp311-musllinux_1_2_armv7l.whl", hash = "sha256:277e970e750505ed74c832b4bf75dac7476262ee2a013f5574dd49075879e161", size = 143310, upload-time = "2025-10-14T04:40:43.439Z" }, + { url = "https://files.pythonhosted.org/packages/b5/fe/43dae6144a7e07b87478fdfc4dbe9efd5defb0e7ec29f5f58a55aeef7bf7/charset_normalizer-3.4.4-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:31fd66405eaf47bb62e8cd575dc621c56c668f27d46a61d975a249930dd5e2a4", size = 162022, upload-time = "2025-10-14T04:40:44.547Z" }, + { url = "https://files.pythonhosted.org/packages/80/e6/7aab83774f5d2bca81f42ac58d04caf44f0cc2b65fc6db2b3b2e8a05f3b3/charset_normalizer-3.4.4-cp311-cp311-musllinux_1_2_riscv64.whl", hash = "sha256:0d3d8f15c07f86e9ff82319b3d9ef6f4bf907608f53fe9d92b28ea9ae3d1fd89", size = 149383, upload-time = "2025-10-14T04:40:46.018Z" }, + { url = "https://files.pythonhosted.org/packages/4f/e8/b289173b4edae05c0dde07f69f8db476a0b511eac556dfe0d6bda3c43384/charset_normalizer-3.4.4-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:9f7fcd74d410a36883701fafa2482a6af2ff5ba96b9a620e9e0721e28ead5569", size = 159098, upload-time = "2025-10-14T04:40:47.081Z" }, + { url = "https://files.pythonhosted.org/packages/d8/df/fe699727754cae3f8478493c7f45f777b17c3ef0600e28abfec8619eb49c/charset_normalizer-3.4.4-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:ebf3e58c7ec8a8bed6d66a75d7fb37b55e5015b03ceae72a8e7c74495551e224", size = 152991, upload-time = "2025-10-14T04:40:48.246Z" }, + { url = "https://files.pythonhosted.org/packages/1a/86/584869fe4ddb6ffa3bd9f491b87a01568797fb9bd8933f557dba9771beaf/charset_normalizer-3.4.4-cp311-cp311-win32.whl", hash = "sha256:eecbc200c7fd5ddb9a7f16c7decb07b566c29fa2161a16cf67b8d068bd21690a", size = 99456, upload-time = "2025-10-14T04:40:49.376Z" }, + { url = "https://files.pythonhosted.org/packages/65/f6/62fdd5feb60530f50f7e38b4f6a1d5203f4d16ff4f9f0952962c044e919a/charset_normalizer-3.4.4-cp311-cp311-win_amd64.whl", hash = "sha256:5ae497466c7901d54b639cf42d5b8c1b6a4fead55215500d2f486d34db48d016", size = 106978, upload-time = "2025-10-14T04:40:50.844Z" }, + { url = "https://files.pythonhosted.org/packages/7a/9d/0710916e6c82948b3be62d9d398cb4fcf4e97b56d6a6aeccd66c4b2f2bd5/charset_normalizer-3.4.4-cp311-cp311-win_arm64.whl", hash = "sha256:65e2befcd84bc6f37095f5961e68a6f077bf44946771354a28ad434c2cce0ae1", size = 99969, upload-time = "2025-10-14T04:40:52.272Z" }, + { url = "https://files.pythonhosted.org/packages/f3/85/1637cd4af66fa687396e757dec650f28025f2a2f5a5531a3208dc0ec43f2/charset_normalizer-3.4.4-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:0a98e6759f854bd25a58a73fa88833fba3b7c491169f86ce1180c948ab3fd394", size = 208425, upload-time = "2025-10-14T04:40:53.353Z" }, + { url = "https://files.pythonhosted.org/packages/9d/6a/04130023fef2a0d9c62d0bae2649b69f7b7d8d24ea5536feef50551029df/charset_normalizer-3.4.4-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:b5b290ccc2a263e8d185130284f8501e3e36c5e02750fc6b6bdeb2e9e96f1e25", size = 148162, upload-time = "2025-10-14T04:40:54.558Z" }, + { url = "https://files.pythonhosted.org/packages/78/29/62328d79aa60da22c9e0b9a66539feae06ca0f5a4171ac4f7dc285b83688/charset_normalizer-3.4.4-cp312-cp312-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:74bb723680f9f7a6234dcf67aea57e708ec1fbdf5699fb91dfd6f511b0a320ef", size = 144558, upload-time = "2025-10-14T04:40:55.677Z" }, + { url = "https://files.pythonhosted.org/packages/86/bb/b32194a4bf15b88403537c2e120b817c61cd4ecffa9b6876e941c3ee38fe/charset_normalizer-3.4.4-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:f1e34719c6ed0b92f418c7c780480b26b5d9c50349e9a9af7d76bf757530350d", size = 161497, upload-time = "2025-10-14T04:40:57.217Z" }, + { url = "https://files.pythonhosted.org/packages/19/89/a54c82b253d5b9b111dc74aca196ba5ccfcca8242d0fb64146d4d3183ff1/charset_normalizer-3.4.4-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:2437418e20515acec67d86e12bf70056a33abdacb5cb1655042f6538d6b085a8", size = 159240, upload-time = "2025-10-14T04:40:58.358Z" }, + { url = "https://files.pythonhosted.org/packages/c0/10/d20b513afe03acc89ec33948320a5544d31f21b05368436d580dec4e234d/charset_normalizer-3.4.4-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:11d694519d7f29d6cd09f6ac70028dba10f92f6cdd059096db198c283794ac86", size = 153471, upload-time = "2025-10-14T04:40:59.468Z" }, + { url = "https://files.pythonhosted.org/packages/61/fa/fbf177b55bdd727010f9c0a3c49eefa1d10f960e5f09d1d887bf93c2e698/charset_normalizer-3.4.4-cp312-cp312-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:ac1c4a689edcc530fc9d9aa11f5774b9e2f33f9a0c6a57864e90908f5208d30a", size = 150864, upload-time = "2025-10-14T04:41:00.623Z" }, + { url = "https://files.pythonhosted.org/packages/05/12/9fbc6a4d39c0198adeebbde20b619790e9236557ca59fc40e0e3cebe6f40/charset_normalizer-3.4.4-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:21d142cc6c0ec30d2efee5068ca36c128a30b0f2c53c1c07bd78cb6bc1d3be5f", size = 150647, upload-time = "2025-10-14T04:41:01.754Z" }, + { url = "https://files.pythonhosted.org/packages/ad/1f/6a9a593d52e3e8c5d2b167daf8c6b968808efb57ef4c210acb907c365bc4/charset_normalizer-3.4.4-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:5dbe56a36425d26d6cfb40ce79c314a2e4dd6211d51d6d2191c00bed34f354cc", size = 145110, upload-time = "2025-10-14T04:41:03.231Z" }, + { url = "https://files.pythonhosted.org/packages/30/42/9a52c609e72471b0fc54386dc63c3781a387bb4fe61c20231a4ebcd58bdd/charset_normalizer-3.4.4-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:5bfbb1b9acf3334612667b61bd3002196fe2a1eb4dd74d247e0f2a4d50ec9bbf", size = 162839, upload-time = "2025-10-14T04:41:04.715Z" }, + { url = "https://files.pythonhosted.org/packages/c4/5b/c0682bbf9f11597073052628ddd38344a3d673fda35a36773f7d19344b23/charset_normalizer-3.4.4-cp312-cp312-musllinux_1_2_riscv64.whl", hash = "sha256:d055ec1e26e441f6187acf818b73564e6e6282709e9bcb5b63f5b23068356a15", size = 150667, upload-time = "2025-10-14T04:41:05.827Z" }, + { url = "https://files.pythonhosted.org/packages/e4/24/a41afeab6f990cf2daf6cb8c67419b63b48cf518e4f56022230840c9bfb2/charset_normalizer-3.4.4-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:af2d8c67d8e573d6de5bc30cdb27e9b95e49115cd9baad5ddbd1a6207aaa82a9", size = 160535, upload-time = "2025-10-14T04:41:06.938Z" }, + { url = "https://files.pythonhosted.org/packages/2a/e5/6a4ce77ed243c4a50a1fecca6aaaab419628c818a49434be428fe24c9957/charset_normalizer-3.4.4-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:780236ac706e66881f3b7f2f32dfe90507a09e67d1d454c762cf642e6e1586e0", size = 154816, upload-time = "2025-10-14T04:41:08.101Z" }, + { url = "https://files.pythonhosted.org/packages/a8/ef/89297262b8092b312d29cdb2517cb1237e51db8ecef2e9af5edbe7b683b1/charset_normalizer-3.4.4-cp312-cp312-win32.whl", hash = "sha256:5833d2c39d8896e4e19b689ffc198f08ea58116bee26dea51e362ecc7cd3ed26", size = 99694, upload-time = "2025-10-14T04:41:09.23Z" }, + { url = "https://files.pythonhosted.org/packages/3d/2d/1e5ed9dd3b3803994c155cd9aacb60c82c331bad84daf75bcb9c91b3295e/charset_normalizer-3.4.4-cp312-cp312-win_amd64.whl", hash = "sha256:a79cfe37875f822425b89a82333404539ae63dbdddf97f84dcbc3d339aae9525", size = 107131, upload-time = "2025-10-14T04:41:10.467Z" }, + { url = "https://files.pythonhosted.org/packages/d0/d9/0ed4c7098a861482a7b6a95603edce4c0d9db2311af23da1fb2b75ec26fc/charset_normalizer-3.4.4-cp312-cp312-win_arm64.whl", hash = "sha256:376bec83a63b8021bb5c8ea75e21c4ccb86e7e45ca4eb81146091b56599b80c3", size = 100390, upload-time = "2025-10-14T04:41:11.915Z" }, + { url = "https://files.pythonhosted.org/packages/97/45/4b3a1239bbacd321068ea6e7ac28875b03ab8bc0aa0966452db17cd36714/charset_normalizer-3.4.4-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:e1f185f86a6f3403aa2420e815904c67b2f9ebc443f045edd0de921108345794", size = 208091, upload-time = "2025-10-14T04:41:13.346Z" }, + { url = "https://files.pythonhosted.org/packages/7d/62/73a6d7450829655a35bb88a88fca7d736f9882a27eacdca2c6d505b57e2e/charset_normalizer-3.4.4-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:6b39f987ae8ccdf0d2642338faf2abb1862340facc796048b604ef14919e55ed", size = 147936, upload-time = "2025-10-14T04:41:14.461Z" }, + { url = "https://files.pythonhosted.org/packages/89/c5/adb8c8b3d6625bef6d88b251bbb0d95f8205831b987631ab0c8bb5d937c2/charset_normalizer-3.4.4-cp313-cp313-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:3162d5d8ce1bb98dd51af660f2121c55d0fa541b46dff7bb9b9f86ea1d87de72", size = 144180, upload-time = "2025-10-14T04:41:15.588Z" }, + { url = "https://files.pythonhosted.org/packages/91/ed/9706e4070682d1cc219050b6048bfd293ccf67b3d4f5a4f39207453d4b99/charset_normalizer-3.4.4-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:81d5eb2a312700f4ecaa977a8235b634ce853200e828fbadf3a9c50bab278328", size = 161346, upload-time = "2025-10-14T04:41:16.738Z" }, + { url = "https://files.pythonhosted.org/packages/d5/0d/031f0d95e4972901a2f6f09ef055751805ff541511dc1252ba3ca1f80cf5/charset_normalizer-3.4.4-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:5bd2293095d766545ec1a8f612559f6b40abc0eb18bb2f5d1171872d34036ede", size = 158874, upload-time = "2025-10-14T04:41:17.923Z" }, + { url = "https://files.pythonhosted.org/packages/f5/83/6ab5883f57c9c801ce5e5677242328aa45592be8a00644310a008d04f922/charset_normalizer-3.4.4-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:a8a8b89589086a25749f471e6a900d3f662d1d3b6e2e59dcecf787b1cc3a1894", size = 153076, upload-time = "2025-10-14T04:41:19.106Z" }, + { url = "https://files.pythonhosted.org/packages/75/1e/5ff781ddf5260e387d6419959ee89ef13878229732732ee73cdae01800f2/charset_normalizer-3.4.4-cp313-cp313-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:bc7637e2f80d8530ee4a78e878bce464f70087ce73cf7c1caf142416923b98f1", size = 150601, upload-time = "2025-10-14T04:41:20.245Z" }, + { url = "https://files.pythonhosted.org/packages/d7/57/71be810965493d3510a6ca79b90c19e48696fb1ff964da319334b12677f0/charset_normalizer-3.4.4-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:f8bf04158c6b607d747e93949aa60618b61312fe647a6369f88ce2ff16043490", size = 150376, upload-time = "2025-10-14T04:41:21.398Z" }, + { url = "https://files.pythonhosted.org/packages/e5/d5/c3d057a78c181d007014feb7e9f2e65905a6c4ef182c0ddf0de2924edd65/charset_normalizer-3.4.4-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:554af85e960429cf30784dd47447d5125aaa3b99a6f0683589dbd27e2f45da44", size = 144825, upload-time = "2025-10-14T04:41:22.583Z" }, + { url = "https://files.pythonhosted.org/packages/e6/8c/d0406294828d4976f275ffbe66f00266c4b3136b7506941d87c00cab5272/charset_normalizer-3.4.4-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:74018750915ee7ad843a774364e13a3db91682f26142baddf775342c3f5b1133", size = 162583, upload-time = "2025-10-14T04:41:23.754Z" }, + { url = "https://files.pythonhosted.org/packages/d7/24/e2aa1f18c8f15c4c0e932d9287b8609dd30ad56dbe41d926bd846e22fb8d/charset_normalizer-3.4.4-cp313-cp313-musllinux_1_2_riscv64.whl", hash = "sha256:c0463276121fdee9c49b98908b3a89c39be45d86d1dbaa22957e38f6321d4ce3", size = 150366, upload-time = "2025-10-14T04:41:25.27Z" }, + { url = "https://files.pythonhosted.org/packages/e4/5b/1e6160c7739aad1e2df054300cc618b06bf784a7a164b0f238360721ab86/charset_normalizer-3.4.4-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:362d61fd13843997c1c446760ef36f240cf81d3ebf74ac62652aebaf7838561e", size = 160300, upload-time = "2025-10-14T04:41:26.725Z" }, + { url = "https://files.pythonhosted.org/packages/7a/10/f882167cd207fbdd743e55534d5d9620e095089d176d55cb22d5322f2afd/charset_normalizer-3.4.4-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:9a26f18905b8dd5d685d6d07b0cdf98a79f3c7a918906af7cc143ea2e164c8bc", size = 154465, upload-time = "2025-10-14T04:41:28.322Z" }, + { url = "https://files.pythonhosted.org/packages/89/66/c7a9e1b7429be72123441bfdbaf2bc13faab3f90b933f664db506dea5915/charset_normalizer-3.4.4-cp313-cp313-win32.whl", hash = "sha256:9b35f4c90079ff2e2edc5b26c0c77925e5d2d255c42c74fdb70fb49b172726ac", size = 99404, upload-time = "2025-10-14T04:41:29.95Z" }, + { url = "https://files.pythonhosted.org/packages/c4/26/b9924fa27db384bdcd97ab83b4f0a8058d96ad9626ead570674d5e737d90/charset_normalizer-3.4.4-cp313-cp313-win_amd64.whl", hash = "sha256:b435cba5f4f750aa6c0a0d92c541fb79f69a387c91e61f1795227e4ed9cece14", size = 107092, upload-time = "2025-10-14T04:41:31.188Z" }, + { url = "https://files.pythonhosted.org/packages/af/8f/3ed4bfa0c0c72a7ca17f0380cd9e4dd842b09f664e780c13cff1dcf2ef1b/charset_normalizer-3.4.4-cp313-cp313-win_arm64.whl", hash = "sha256:542d2cee80be6f80247095cc36c418f7bddd14f4a6de45af91dfad36d817bba2", size = 100408, upload-time = "2025-10-14T04:41:32.624Z" }, + { url = "https://files.pythonhosted.org/packages/2a/35/7051599bd493e62411d6ede36fd5af83a38f37c4767b92884df7301db25d/charset_normalizer-3.4.4-cp314-cp314-macosx_10_13_universal2.whl", hash = "sha256:da3326d9e65ef63a817ecbcc0df6e94463713b754fe293eaa03da99befb9a5bd", size = 207746, upload-time = "2025-10-14T04:41:33.773Z" }, + { url = "https://files.pythonhosted.org/packages/10/9a/97c8d48ef10d6cd4fcead2415523221624bf58bcf68a802721a6bc807c8f/charset_normalizer-3.4.4-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:8af65f14dc14a79b924524b1e7fffe304517b2bff5a58bf64f30b98bbc5079eb", size = 147889, upload-time = "2025-10-14T04:41:34.897Z" }, + { url = "https://files.pythonhosted.org/packages/10/bf/979224a919a1b606c82bd2c5fa49b5c6d5727aa47b4312bb27b1734f53cd/charset_normalizer-3.4.4-cp314-cp314-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:74664978bb272435107de04e36db5a9735e78232b85b77d45cfb38f758efd33e", size = 143641, upload-time = "2025-10-14T04:41:36.116Z" }, + { url = "https://files.pythonhosted.org/packages/ba/33/0ad65587441fc730dc7bd90e9716b30b4702dc7b617e6ba4997dc8651495/charset_normalizer-3.4.4-cp314-cp314-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:752944c7ffbfdd10c074dc58ec2d5a8a4cd9493b314d367c14d24c17684ddd14", size = 160779, upload-time = "2025-10-14T04:41:37.229Z" }, + { url = "https://files.pythonhosted.org/packages/67/ed/331d6b249259ee71ddea93f6f2f0a56cfebd46938bde6fcc6f7b9a3d0e09/charset_normalizer-3.4.4-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:d1f13550535ad8cff21b8d757a3257963e951d96e20ec82ab44bc64aeb62a191", size = 159035, upload-time = "2025-10-14T04:41:38.368Z" }, + { url = "https://files.pythonhosted.org/packages/67/ff/f6b948ca32e4f2a4576aa129d8bed61f2e0543bf9f5f2b7fc3758ed005c9/charset_normalizer-3.4.4-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:ecaae4149d99b1c9e7b88bb03e3221956f68fd6d50be2ef061b2381b61d20838", size = 152542, upload-time = "2025-10-14T04:41:39.862Z" }, + { url = "https://files.pythonhosted.org/packages/16/85/276033dcbcc369eb176594de22728541a925b2632f9716428c851b149e83/charset_normalizer-3.4.4-cp314-cp314-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:cb6254dc36b47a990e59e1068afacdcd02958bdcce30bb50cc1700a8b9d624a6", size = 149524, upload-time = "2025-10-14T04:41:41.319Z" }, + { url = "https://files.pythonhosted.org/packages/9e/f2/6a2a1f722b6aba37050e626530a46a68f74e63683947a8acff92569f979a/charset_normalizer-3.4.4-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:c8ae8a0f02f57a6e61203a31428fa1d677cbe50c93622b4149d5c0f319c1d19e", size = 150395, upload-time = "2025-10-14T04:41:42.539Z" }, + { url = "https://files.pythonhosted.org/packages/60/bb/2186cb2f2bbaea6338cad15ce23a67f9b0672929744381e28b0592676824/charset_normalizer-3.4.4-cp314-cp314-musllinux_1_2_armv7l.whl", hash = "sha256:47cc91b2f4dd2833fddaedd2893006b0106129d4b94fdb6af1f4ce5a9965577c", size = 143680, upload-time = "2025-10-14T04:41:43.661Z" }, + { url = "https://files.pythonhosted.org/packages/7d/a5/bf6f13b772fbb2a90360eb620d52ed8f796f3c5caee8398c3b2eb7b1c60d/charset_normalizer-3.4.4-cp314-cp314-musllinux_1_2_ppc64le.whl", hash = "sha256:82004af6c302b5d3ab2cfc4cc5f29db16123b1a8417f2e25f9066f91d4411090", size = 162045, upload-time = "2025-10-14T04:41:44.821Z" }, + { url = "https://files.pythonhosted.org/packages/df/c5/d1be898bf0dc3ef9030c3825e5d3b83f2c528d207d246cbabe245966808d/charset_normalizer-3.4.4-cp314-cp314-musllinux_1_2_riscv64.whl", hash = "sha256:2b7d8f6c26245217bd2ad053761201e9f9680f8ce52f0fcd8d0755aeae5b2152", size = 149687, upload-time = "2025-10-14T04:41:46.442Z" }, + { url = "https://files.pythonhosted.org/packages/a5/42/90c1f7b9341eef50c8a1cb3f098ac43b0508413f33affd762855f67a410e/charset_normalizer-3.4.4-cp314-cp314-musllinux_1_2_s390x.whl", hash = "sha256:799a7a5e4fb2d5898c60b640fd4981d6a25f1c11790935a44ce38c54e985f828", size = 160014, upload-time = "2025-10-14T04:41:47.631Z" }, + { url = "https://files.pythonhosted.org/packages/76/be/4d3ee471e8145d12795ab655ece37baed0929462a86e72372fd25859047c/charset_normalizer-3.4.4-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:99ae2cffebb06e6c22bdc25801d7b30f503cc87dbd283479e7b606f70aff57ec", size = 154044, upload-time = "2025-10-14T04:41:48.81Z" }, + { url = "https://files.pythonhosted.org/packages/b0/6f/8f7af07237c34a1defe7defc565a9bc1807762f672c0fde711a4b22bf9c0/charset_normalizer-3.4.4-cp314-cp314-win32.whl", hash = "sha256:f9d332f8c2a2fcbffe1378594431458ddbef721c1769d78e2cbc06280d8155f9", size = 99940, upload-time = "2025-10-14T04:41:49.946Z" }, + { url = "https://files.pythonhosted.org/packages/4b/51/8ade005e5ca5b0d80fb4aff72a3775b325bdc3d27408c8113811a7cbe640/charset_normalizer-3.4.4-cp314-cp314-win_amd64.whl", hash = "sha256:8a6562c3700cce886c5be75ade4a5db4214fda19fede41d9792d100288d8f94c", size = 107104, upload-time = "2025-10-14T04:41:51.051Z" }, + { url = "https://files.pythonhosted.org/packages/da/5f/6b8f83a55bb8278772c5ae54a577f3099025f9ade59d0136ac24a0df4bde/charset_normalizer-3.4.4-cp314-cp314-win_arm64.whl", hash = "sha256:de00632ca48df9daf77a2c65a484531649261ec9f25489917f09e455cb09ddb2", size = 100743, upload-time = "2025-10-14T04:41:52.122Z" }, + { url = "https://files.pythonhosted.org/packages/46/7c/0c4760bccf082737ca7ab84a4c2034fcc06b1f21cf3032ea98bd6feb1725/charset_normalizer-3.4.4-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:a9768c477b9d7bd54bc0c86dbaebdec6f03306675526c9927c0e8a04e8f94af9", size = 209609, upload-time = "2025-10-14T04:42:10.922Z" }, + { url = "https://files.pythonhosted.org/packages/bb/a4/69719daef2f3d7f1819de60c9a6be981b8eeead7542d5ec4440f3c80e111/charset_normalizer-3.4.4-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:1bee1e43c28aa63cb16e5c14e582580546b08e535299b8b6158a7c9c768a1f3d", size = 149029, upload-time = "2025-10-14T04:42:12.38Z" }, + { url = "https://files.pythonhosted.org/packages/e6/21/8d4e1d6c1e6070d3672908b8e4533a71b5b53e71d16828cc24d0efec564c/charset_normalizer-3.4.4-cp39-cp39-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:fd44c878ea55ba351104cb93cc85e74916eb8fa440ca7903e57575e97394f608", size = 144580, upload-time = "2025-10-14T04:42:13.549Z" }, + { url = "https://files.pythonhosted.org/packages/a7/0a/a616d001b3f25647a9068e0b9199f697ce507ec898cacb06a0d5a1617c99/charset_normalizer-3.4.4-cp39-cp39-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:0f04b14ffe5fdc8c4933862d8306109a2c51e0704acfa35d51598eb45a1e89fc", size = 162340, upload-time = "2025-10-14T04:42:14.892Z" }, + { url = "https://files.pythonhosted.org/packages/85/93/060b52deb249a5450460e0585c88a904a83aec474ab8e7aba787f45e79f2/charset_normalizer-3.4.4-cp39-cp39-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:cd09d08005f958f370f539f186d10aec3377d55b9eeb0d796025d4886119d76e", size = 159619, upload-time = "2025-10-14T04:42:16.676Z" }, + { url = "https://files.pythonhosted.org/packages/dd/21/0274deb1cc0632cd587a9a0ec6b4674d9108e461cb4cd40d457adaeb0564/charset_normalizer-3.4.4-cp39-cp39-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:4fe7859a4e3e8457458e2ff592f15ccb02f3da787fcd31e0183879c3ad4692a1", size = 153980, upload-time = "2025-10-14T04:42:17.917Z" }, + { url = "https://files.pythonhosted.org/packages/28/2b/e3d7d982858dccc11b31906976323d790dded2017a0572f093ff982d692f/charset_normalizer-3.4.4-cp39-cp39-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:fa09f53c465e532f4d3db095e0c55b615f010ad81803d383195b6b5ca6cbf5f3", size = 152174, upload-time = "2025-10-14T04:42:19.018Z" }, + { url = "https://files.pythonhosted.org/packages/6e/ff/4a269f8e35f1e58b2df52c131a1fa019acb7ef3f8697b7d464b07e9b492d/charset_normalizer-3.4.4-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:7fa17817dc5625de8a027cb8b26d9fefa3ea28c8253929b8d6649e705d2835b6", size = 151666, upload-time = "2025-10-14T04:42:20.171Z" }, + { url = "https://files.pythonhosted.org/packages/da/c9/ec39870f0b330d58486001dd8e532c6b9a905f5765f58a6f8204926b4a93/charset_normalizer-3.4.4-cp39-cp39-musllinux_1_2_armv7l.whl", hash = "sha256:5947809c8a2417be3267efc979c47d76a079758166f7d43ef5ae8e9f92751f88", size = 145550, upload-time = "2025-10-14T04:42:21.324Z" }, + { url = "https://files.pythonhosted.org/packages/75/8f/d186ab99e40e0ed9f82f033d6e49001701c81244d01905dd4a6924191a30/charset_normalizer-3.4.4-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:4902828217069c3c5c71094537a8e623f5d097858ac6ca8252f7b4d10b7560f1", size = 163721, upload-time = "2025-10-14T04:42:22.46Z" }, + { url = "https://files.pythonhosted.org/packages/96/b1/6047663b9744df26a7e479ac1e77af7134b1fcf9026243bb48ee2d18810f/charset_normalizer-3.4.4-cp39-cp39-musllinux_1_2_riscv64.whl", hash = "sha256:7c308f7e26e4363d79df40ca5b2be1c6ba9f02bdbccfed5abddb7859a6ce72cf", size = 152127, upload-time = "2025-10-14T04:42:23.712Z" }, + { url = "https://files.pythonhosted.org/packages/59/78/e5a6eac9179f24f704d1be67d08704c3c6ab9f00963963524be27c18ed87/charset_normalizer-3.4.4-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:2c9d3c380143a1fedbff95a312aa798578371eb29da42106a29019368a475318", size = 161175, upload-time = "2025-10-14T04:42:24.87Z" }, + { url = "https://files.pythonhosted.org/packages/e5/43/0e626e42d54dd2f8dd6fc5e1c5ff00f05fbca17cb699bedead2cae69c62f/charset_normalizer-3.4.4-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:cb01158d8b88ee68f15949894ccc6712278243d95f344770fa7593fa2d94410c", size = 155375, upload-time = "2025-10-14T04:42:27.246Z" }, + { url = "https://files.pythonhosted.org/packages/e9/91/d9615bf2e06f35e4997616ff31248c3657ed649c5ab9d35ea12fce54e380/charset_normalizer-3.4.4-cp39-cp39-win32.whl", hash = "sha256:2677acec1a2f8ef614c6888b5b4ae4060cc184174a938ed4e8ef690e15d3e505", size = 99692, upload-time = "2025-10-14T04:42:28.425Z" }, + { url = "https://files.pythonhosted.org/packages/d1/a9/6c040053909d9d1ef4fcab45fddec083aedc9052c10078339b47c8573ea8/charset_normalizer-3.4.4-cp39-cp39-win_amd64.whl", hash = "sha256:f8e160feb2aed042cd657a72acc0b481212ed28b1b9a95c0cee1621b524e1966", size = 107192, upload-time = "2025-10-14T04:42:29.482Z" }, + { url = "https://files.pythonhosted.org/packages/f0/c6/4fa536b2c0cd3edfb7ccf8469fa0f363ea67b7213a842b90909ca33dd851/charset_normalizer-3.4.4-cp39-cp39-win_arm64.whl", hash = "sha256:b5d84d37db046c5ca74ee7bb47dd6cbc13f80665fdde3e8040bdd3fb015ecb50", size = 100220, upload-time = "2025-10-14T04:42:30.632Z" }, + { url = "https://files.pythonhosted.org/packages/0a/4c/925909008ed5a988ccbb72dcc897407e5d6d3bd72410d69e051fc0c14647/charset_normalizer-3.4.4-py3-none-any.whl", hash = "sha256:7a32c560861a02ff789ad905a2fe94e3f840803362c84fecf1851cb4cf3dc37f", size = 53402, upload-time = "2025-10-14T04:42:31.76Z" }, +] + +[[package]] +name = "colorama" +version = "0.4.6" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/d8/53/6f443c9a4a8358a93a6792e2acffb9d9d5cb0a5cfd8802644b7b1c9a02e4/colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44", size = 27697, upload-time = "2022-10-25T02:36:22.414Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/d1/d6/3965ed04c63042e047cb6a3e6ed1a63a35087b6a609aa3a15ed8ac56c221/colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6", size = 25335, upload-time = "2022-10-25T02:36:20.889Z" }, +] + +[[package]] +name = "dill" +version = "0.4.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/12/80/630b4b88364e9a8c8c5797f4602d0f76ef820909ee32f0bacb9f90654042/dill-0.4.0.tar.gz", hash = "sha256:0633f1d2df477324f53a895b02c901fb961bdbf65a17122586ea7019292cbcf0", size = 186976, upload-time = "2025-04-16T00:41:48.867Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/50/3d/9373ad9c56321fdab5b41197068e1d8c25883b3fea29dd361f9b55116869/dill-0.4.0-py3-none-any.whl", hash = "sha256:44f54bf6412c2c8464c14e8243eb163690a9800dbe2c367330883b19c7561049", size = 119668, upload-time = "2025-04-16T00:41:47.671Z" }, +] + +[[package]] +name = "eval-type-backport" +version = "0.3.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/fb/a3/cafafb4558fd638aadfe4121dc6cefb8d743368c085acb2f521df0f3d9d7/eval_type_backport-0.3.1.tar.gz", hash = "sha256:57e993f7b5b69d271e37482e62f74e76a0276c82490cf8e4f0dffeb6b332d5ed", size = 9445, upload-time = "2025-12-02T11:51:42.987Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/cf/22/fdc2e30d43ff853720042fa15baa3e6122722be1a7950a98233ebb55cd71/eval_type_backport-0.3.1-py3-none-any.whl", hash = "sha256:279ab641905e9f11129f56a8a78f493518515b83402b860f6f06dd7c011fdfa8", size = 6063, upload-time = "2025-12-02T11:51:41.665Z" }, +] + +[[package]] +name = "exceptiongroup" +version = "1.3.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "typing-extensions", marker = "python_full_version < '3.11'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/50/79/66800aadf48771f6b62f7eb014e352e5d06856655206165d775e675a02c9/exceptiongroup-1.3.1.tar.gz", hash = "sha256:8b412432c6055b0b7d14c310000ae93352ed6754f70fa8f7c34141f91c4e3219", size = 30371, upload-time = "2025-11-21T23:01:54.787Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/8a/0e/97c33bf5009bdbac74fd2beace167cab3f978feb69cc36f1ef79360d6c4e/exceptiongroup-1.3.1-py3-none-any.whl", hash = "sha256:a7a39a3bd276781e98394987d3a5701d0c4edffb633bb7a5144577f82c773598", size = 16740, upload-time = "2025-11-21T23:01:53.443Z" }, +] + +[[package]] +name = "google-auth" +version = "2.45.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "cachetools" }, + { name = "pyasn1-modules" }, + { name = "rsa" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/e5/00/3c794502a8b892c404b2dea5b3650eb21bfc7069612fbfd15c7f17c1cb0d/google_auth-2.45.0.tar.gz", hash = "sha256:90d3f41b6b72ea72dd9811e765699ee491ab24139f34ebf1ca2b9cc0c38708f3", size = 320708, upload-time = "2025-12-15T22:58:42.889Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c6/97/451d55e05487a5cd6279a01a7e34921858b16f7dc8aa38a2c684743cd2b3/google_auth-2.45.0-py2.py3-none-any.whl", hash = "sha256:82344e86dc00410ef5382d99be677c6043d72e502b625aa4f4afa0bdacca0f36", size = 233312, upload-time = "2025-12-15T22:58:40.777Z" }, +] + +[[package]] +name = "h11" +version = "0.16.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/01/ee/02a2c011bdab74c6fb3c75474d40b3052059d95df7e73351460c8588d963/h11-0.16.0.tar.gz", hash = "sha256:4e35b956cf45792e4caa5885e69fba00bdbc6ffafbfa020300e549b208ee5ff1", size = 101250, upload-time = "2025-04-24T03:35:25.427Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/04/4b/29cac41a4d98d144bf5f6d33995617b185d14b22401f75ca86f384e87ff1/h11-0.16.0-py3-none-any.whl", hash = "sha256:63cf8bbe7522de3bf65932fda1d9c2772064ffb3dae62d55932da54b31cb6c86", size = 37515, upload-time = "2025-04-24T03:35:24.344Z" }, +] + +[[package]] +name = "httpcore" +version = "1.0.9" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "certifi" }, + { name = "h11" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/06/94/82699a10bca87a5556c9c59b5963f2d039dbd239f25bc2a63907a05a14cb/httpcore-1.0.9.tar.gz", hash = "sha256:6e34463af53fd2ab5d807f399a9b45ea31c3dfa2276f15a2c3f00afff6e176e8", size = 85484, upload-time = "2025-04-24T22:06:22.219Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/7e/f5/f66802a942d491edb555dd61e3a9961140fd64c90bce1eafd741609d334d/httpcore-1.0.9-py3-none-any.whl", hash = "sha256:2d400746a40668fc9dec9810239072b40b4484b640a8c38fd654a024c7a1bf55", size = 78784, upload-time = "2025-04-24T22:06:20.566Z" }, +] + +[[package]] +name = "httpx" +version = "0.28.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "anyio" }, + { name = "certifi" }, + { name = "httpcore" }, + { name = "idna" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/b1/df/48c586a5fe32a0f01324ee087459e112ebb7224f646c0b5023f5e79e9956/httpx-0.28.1.tar.gz", hash = "sha256:75e98c5f16b0f35b567856f597f06ff2270a374470a5c2392242528e3e3e42fc", size = 141406, upload-time = "2024-12-06T15:37:23.222Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/2a/39/e50c7c3a983047577ee07d2a9e53faf5a69493943ec3f6a384bdc792deb2/httpx-0.28.1-py3-none-any.whl", hash = "sha256:d909fcccc110f8c7faf814ca82a9a4d816bc5a6dbfea25d6591d6985b8ba59ad", size = 73517, upload-time = "2024-12-06T15:37:21.509Z" }, +] + +[[package]] +name = "idna" +version = "3.11" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/6f/6d/0703ccc57f3a7233505399edb88de3cbd678da106337b9fcde432b65ed60/idna-3.11.tar.gz", hash = "sha256:795dafcc9c04ed0c1fb032c2aa73654d8e8c5023a7df64a53f39190ada629902", size = 194582, upload-time = "2025-10-12T14:55:20.501Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/0e/61/66938bbb5fc52dbdf84594873d5b51fb1f7c7794e9c0f5bd885f30bc507b/idna-3.11-py3-none-any.whl", hash = "sha256:771a87f49d9defaf64091e6e6fe9c18d4833f140bd19464795bc32d966ca37ea", size = 71008, upload-time = "2025-10-12T14:55:18.883Z" }, +] + +[[package]] +name = "iniconfig" +version = "2.1.0" +source = { registry = "https://pypi.org/simple" } +resolution-markers = [ + "python_full_version < '3.10'", +] +sdist = { url = "https://files.pythonhosted.org/packages/f2/97/ebf4da567aa6827c909642694d71c9fcf53e5b504f2d96afea02718862f3/iniconfig-2.1.0.tar.gz", hash = "sha256:3abbd2e30b36733fee78f9c7f7308f2d0050e88f0087fd25c2645f63c773e1c7", size = 4793, upload-time = "2025-03-19T20:09:59.721Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/2c/e1/e6716421ea10d38022b952c159d5161ca1193197fb744506875fbb87ea7b/iniconfig-2.1.0-py3-none-any.whl", hash = "sha256:9deba5723312380e77435581c6bf4935c94cbfab9b1ed33ef8d238ea168eb760", size = 6050, upload-time = "2025-03-19T20:10:01.071Z" }, +] + +[[package]] +name = "iniconfig" +version = "2.3.0" +source = { registry = "https://pypi.org/simple" } +resolution-markers = [ + "python_full_version >= '3.12'", + "python_full_version == '3.11.*'", + "python_full_version == '3.10.*'", +] +sdist = { url = "https://files.pythonhosted.org/packages/72/34/14ca021ce8e5dfedc35312d08ba8bf51fdd999c576889fc2c24cb97f4f10/iniconfig-2.3.0.tar.gz", hash = "sha256:c76315c77db068650d49c5b56314774a7804df16fee4402c1f19d6d15d8c4730", size = 20503, upload-time = "2025-10-18T21:55:43.219Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/cb/b1/3846dd7f199d53cb17f49cba7e651e9ce294d8497c8c150530ed11865bb8/iniconfig-2.3.0-py3-none-any.whl", hash = "sha256:f631c04d2c48c52b84d0d0549c99ff3859c98df65b3101406327ecc7d53fbf12", size = 7484, upload-time = "2025-10-18T21:55:41.639Z" }, +] + +[[package]] +name = "isort" +version = "5.13.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/87/f9/c1eb8635a24e87ade2efce21e3ce8cd6b8630bb685ddc9cdaca1349b2eb5/isort-5.13.2.tar.gz", hash = "sha256:48fdfcb9face5d58a4f6dde2e72a1fb8dcaf8ab26f95ab49fab84c2ddefb0109", size = 175303, upload-time = "2023-12-13T20:37:26.124Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/d1/b3/8def84f539e7d2289a02f0524b944b15d7c75dab7628bedf1c4f0992029c/isort-5.13.2-py3-none-any.whl", hash = "sha256:8ca5e72a8d85860d5a3fa69b8745237f2939afe12dbf656afbcb47fe72d947a6", size = 92310, upload-time = "2023-12-13T20:37:23.244Z" }, +] + +[[package]] +name = "mccabe" +version = "0.7.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/e7/ff/0ffefdcac38932a54d2b5eed4e0ba8a408f215002cd178ad1df0f2806ff8/mccabe-0.7.0.tar.gz", hash = "sha256:348e0240c33b60bbdf4e523192ef919f28cb2c3d7d5c7794f74009290f236325", size = 9658, upload-time = "2022-01-24T01:14:51.113Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/27/1a/1f68f9ba0c207934b35b86a8ca3aad8395a3d6dd7921c0686e23853ff5a9/mccabe-0.7.0-py2.py3-none-any.whl", hash = "sha256:6c2d30ab6be0e4a46919781807b4f0d834ebdd6c6e3dca0bda5a15f863427b6e", size = 7350, upload-time = "2022-01-24T01:14:49.62Z" }, +] + +[[package]] +name = "mistralai-gcp" +version = "1.6.0" +source = { editable = "." } +dependencies = [ + { name = "eval-type-backport" }, + { name = "google-auth" }, + { name = "httpx" }, + { name = "pydantic" }, + { name = "python-dateutil" }, + { name = "requests" }, + { name = "typing-inspection" }, +] + +[package.dev-dependencies] +dev = [ + { name = "mypy" }, + { name = "pylint" }, + { name = "pytest" }, + { name = "pytest-asyncio" }, + { name = "types-python-dateutil" }, +] + +[package.metadata] +requires-dist = [ + { name = "eval-type-backport", specifier = ">=0.2.0" }, + { name = "google-auth", specifier = ">=2.31.0,<3.0.0" }, + { name = "httpx", specifier = ">=0.28.1" }, + { name = "pydantic", specifier = ">=2.10.3" }, + { name = "python-dateutil", specifier = ">=2.8.2" }, + { name = "requests", specifier = ">=2.32.3,<3.0.0" }, + { name = "typing-inspection", specifier = ">=0.4.0" }, +] + +[package.metadata.requires-dev] +dev = [ + { name = "mypy", specifier = "==1.14.1" }, + { name = "pylint", specifier = "==3.2.3" }, + { name = "pytest", specifier = ">=8.2.2,<9" }, + { name = "pytest-asyncio", specifier = ">=0.23.7,<0.24" }, + { name = "types-python-dateutil", specifier = ">=2.9.0.20240316,<3" }, +] + +[[package]] +name = "mypy" +version = "1.14.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "mypy-extensions" }, + { name = "tomli", marker = "python_full_version < '3.11'" }, + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/b9/eb/2c92d8ea1e684440f54fa49ac5d9a5f19967b7b472a281f419e69a8d228e/mypy-1.14.1.tar.gz", hash = "sha256:7ec88144fe9b510e8475ec2f5f251992690fcf89ccb4500b214b4226abcd32d6", size = 3216051, upload-time = "2024-12-30T16:39:07.335Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/9b/7a/87ae2adb31d68402da6da1e5f30c07ea6063e9f09b5e7cfc9dfa44075e74/mypy-1.14.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:52686e37cf13d559f668aa398dd7ddf1f92c5d613e4f8cb262be2fb4fedb0fcb", size = 11211002, upload-time = "2024-12-30T16:37:22.435Z" }, + { url = "https://files.pythonhosted.org/packages/e1/23/eada4c38608b444618a132be0d199b280049ded278b24cbb9d3fc59658e4/mypy-1.14.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:1fb545ca340537d4b45d3eecdb3def05e913299ca72c290326be19b3804b39c0", size = 10358400, upload-time = "2024-12-30T16:37:53.526Z" }, + { url = "https://files.pythonhosted.org/packages/43/c9/d6785c6f66241c62fd2992b05057f404237deaad1566545e9f144ced07f5/mypy-1.14.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:90716d8b2d1f4cd503309788e51366f07c56635a3309b0f6a32547eaaa36a64d", size = 12095172, upload-time = "2024-12-30T16:37:50.332Z" }, + { url = "https://files.pythonhosted.org/packages/c3/62/daa7e787770c83c52ce2aaf1a111eae5893de9e004743f51bfcad9e487ec/mypy-1.14.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:2ae753f5c9fef278bcf12e1a564351764f2a6da579d4a81347e1d5a15819997b", size = 12828732, upload-time = "2024-12-30T16:37:29.96Z" }, + { url = "https://files.pythonhosted.org/packages/1b/a2/5fb18318a3637f29f16f4e41340b795da14f4751ef4f51c99ff39ab62e52/mypy-1.14.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:e0fe0f5feaafcb04505bcf439e991c6d8f1bf8b15f12b05feeed96e9e7bf1427", size = 13012197, upload-time = "2024-12-30T16:38:05.037Z" }, + { url = "https://files.pythonhosted.org/packages/28/99/e153ce39105d164b5f02c06c35c7ba958aaff50a2babba7d080988b03fe7/mypy-1.14.1-cp310-cp310-win_amd64.whl", hash = "sha256:7d54bd85b925e501c555a3227f3ec0cfc54ee8b6930bd6141ec872d1c572f81f", size = 9780836, upload-time = "2024-12-30T16:37:19.726Z" }, + { url = "https://files.pythonhosted.org/packages/da/11/a9422850fd506edbcdc7f6090682ecceaf1f87b9dd847f9df79942da8506/mypy-1.14.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:f995e511de847791c3b11ed90084a7a0aafdc074ab88c5a9711622fe4751138c", size = 11120432, upload-time = "2024-12-30T16:37:11.533Z" }, + { url = "https://files.pythonhosted.org/packages/b6/9e/47e450fd39078d9c02d620545b2cb37993a8a8bdf7db3652ace2f80521ca/mypy-1.14.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:d64169ec3b8461311f8ce2fd2eb5d33e2d0f2c7b49116259c51d0d96edee48d1", size = 10279515, upload-time = "2024-12-30T16:37:40.724Z" }, + { url = "https://files.pythonhosted.org/packages/01/b5/6c8d33bd0f851a7692a8bfe4ee75eb82b6983a3cf39e5e32a5d2a723f0c1/mypy-1.14.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ba24549de7b89b6381b91fbc068d798192b1b5201987070319889e93038967a8", size = 12025791, upload-time = "2024-12-30T16:36:58.73Z" }, + { url = "https://files.pythonhosted.org/packages/f0/4c/e10e2c46ea37cab5c471d0ddaaa9a434dc1d28650078ac1b56c2d7b9b2e4/mypy-1.14.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:183cf0a45457d28ff9d758730cd0210419ac27d4d3f285beda038c9083363b1f", size = 12749203, upload-time = "2024-12-30T16:37:03.741Z" }, + { url = "https://files.pythonhosted.org/packages/88/55/beacb0c69beab2153a0f57671ec07861d27d735a0faff135a494cd4f5020/mypy-1.14.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:f2a0ecc86378f45347f586e4163d1769dd81c5a223d577fe351f26b179e148b1", size = 12885900, upload-time = "2024-12-30T16:37:57.948Z" }, + { url = "https://files.pythonhosted.org/packages/a2/75/8c93ff7f315c4d086a2dfcde02f713004357d70a163eddb6c56a6a5eff40/mypy-1.14.1-cp311-cp311-win_amd64.whl", hash = "sha256:ad3301ebebec9e8ee7135d8e3109ca76c23752bac1e717bc84cd3836b4bf3eae", size = 9777869, upload-time = "2024-12-30T16:37:33.428Z" }, + { url = "https://files.pythonhosted.org/packages/43/1b/b38c079609bb4627905b74fc6a49849835acf68547ac33d8ceb707de5f52/mypy-1.14.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:30ff5ef8519bbc2e18b3b54521ec319513a26f1bba19a7582e7b1f58a6e69f14", size = 11266668, upload-time = "2024-12-30T16:38:02.211Z" }, + { url = "https://files.pythonhosted.org/packages/6b/75/2ed0d2964c1ffc9971c729f7a544e9cd34b2cdabbe2d11afd148d7838aa2/mypy-1.14.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:cb9f255c18052343c70234907e2e532bc7e55a62565d64536dbc7706a20b78b9", size = 10254060, upload-time = "2024-12-30T16:37:46.131Z" }, + { url = "https://files.pythonhosted.org/packages/a1/5f/7b8051552d4da3c51bbe8fcafffd76a6823779101a2b198d80886cd8f08e/mypy-1.14.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:8b4e3413e0bddea671012b063e27591b953d653209e7a4fa5e48759cda77ca11", size = 11933167, upload-time = "2024-12-30T16:37:43.534Z" }, + { url = "https://files.pythonhosted.org/packages/04/90/f53971d3ac39d8b68bbaab9a4c6c58c8caa4d5fd3d587d16f5927eeeabe1/mypy-1.14.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:553c293b1fbdebb6c3c4030589dab9fafb6dfa768995a453d8a5d3b23784af2e", size = 12864341, upload-time = "2024-12-30T16:37:36.249Z" }, + { url = "https://files.pythonhosted.org/packages/03/d2/8bc0aeaaf2e88c977db41583559319f1821c069e943ada2701e86d0430b7/mypy-1.14.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:fad79bfe3b65fe6a1efaed97b445c3d37f7be9fdc348bdb2d7cac75579607c89", size = 12972991, upload-time = "2024-12-30T16:37:06.743Z" }, + { url = "https://files.pythonhosted.org/packages/6f/17/07815114b903b49b0f2cf7499f1c130e5aa459411596668267535fe9243c/mypy-1.14.1-cp312-cp312-win_amd64.whl", hash = "sha256:8fa2220e54d2946e94ab6dbb3ba0a992795bd68b16dc852db33028df2b00191b", size = 9879016, upload-time = "2024-12-30T16:37:15.02Z" }, + { url = "https://files.pythonhosted.org/packages/9e/15/bb6a686901f59222275ab228453de741185f9d54fecbaacec041679496c6/mypy-1.14.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:92c3ed5afb06c3a8e188cb5da4984cab9ec9a77ba956ee419c68a388b4595255", size = 11252097, upload-time = "2024-12-30T16:37:25.144Z" }, + { url = "https://files.pythonhosted.org/packages/f8/b3/8b0f74dfd072c802b7fa368829defdf3ee1566ba74c32a2cb2403f68024c/mypy-1.14.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:dbec574648b3e25f43d23577309b16534431db4ddc09fda50841f1e34e64ed34", size = 10239728, upload-time = "2024-12-30T16:38:08.634Z" }, + { url = "https://files.pythonhosted.org/packages/c5/9b/4fd95ab20c52bb5b8c03cc49169be5905d931de17edfe4d9d2986800b52e/mypy-1.14.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:8c6d94b16d62eb3e947281aa7347d78236688e21081f11de976376cf010eb31a", size = 11924965, upload-time = "2024-12-30T16:38:12.132Z" }, + { url = "https://files.pythonhosted.org/packages/56/9d/4a236b9c57f5d8f08ed346914b3f091a62dd7e19336b2b2a0d85485f82ff/mypy-1.14.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:d4b19b03fdf54f3c5b2fa474c56b4c13c9dbfb9a2db4370ede7ec11a2c5927d9", size = 12867660, upload-time = "2024-12-30T16:38:17.342Z" }, + { url = "https://files.pythonhosted.org/packages/40/88/a61a5497e2f68d9027de2bb139c7bb9abaeb1be1584649fa9d807f80a338/mypy-1.14.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:0c911fde686394753fff899c409fd4e16e9b294c24bfd5e1ea4675deae1ac6fd", size = 12969198, upload-time = "2024-12-30T16:38:32.839Z" }, + { url = "https://files.pythonhosted.org/packages/54/da/3d6fc5d92d324701b0c23fb413c853892bfe0e1dbe06c9138037d459756b/mypy-1.14.1-cp313-cp313-win_amd64.whl", hash = "sha256:8b21525cb51671219f5307be85f7e646a153e5acc656e5cebf64bfa076c50107", size = 9885276, upload-time = "2024-12-30T16:38:20.828Z" }, + { url = "https://files.pythonhosted.org/packages/ca/1f/186d133ae2514633f8558e78cd658070ba686c0e9275c5a5c24a1e1f0d67/mypy-1.14.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:3888a1816d69f7ab92092f785a462944b3ca16d7c470d564165fe703b0970c35", size = 11200493, upload-time = "2024-12-30T16:38:26.935Z" }, + { url = "https://files.pythonhosted.org/packages/af/fc/4842485d034e38a4646cccd1369f6b1ccd7bc86989c52770d75d719a9941/mypy-1.14.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:46c756a444117c43ee984bd055db99e498bc613a70bbbc120272bd13ca579fbc", size = 10357702, upload-time = "2024-12-30T16:38:50.623Z" }, + { url = "https://files.pythonhosted.org/packages/b4/e6/457b83f2d701e23869cfec013a48a12638f75b9d37612a9ddf99072c1051/mypy-1.14.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:27fc248022907e72abfd8e22ab1f10e903915ff69961174784a3900a8cba9ad9", size = 12091104, upload-time = "2024-12-30T16:38:53.735Z" }, + { url = "https://files.pythonhosted.org/packages/f1/bf/76a569158db678fee59f4fd30b8e7a0d75bcbaeef49edd882a0d63af6d66/mypy-1.14.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:499d6a72fb7e5de92218db961f1a66d5f11783f9ae549d214617edab5d4dbdbb", size = 12830167, upload-time = "2024-12-30T16:38:56.437Z" }, + { url = "https://files.pythonhosted.org/packages/43/bc/0bc6b694b3103de9fed61867f1c8bd33336b913d16831431e7cb48ef1c92/mypy-1.14.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:57961db9795eb566dc1d1b4e9139ebc4c6b0cb6e7254ecde69d1552bf7613f60", size = 13013834, upload-time = "2024-12-30T16:38:59.204Z" }, + { url = "https://files.pythonhosted.org/packages/b0/79/5f5ec47849b6df1e6943d5fd8e6632fbfc04b4fd4acfa5a5a9535d11b4e2/mypy-1.14.1-cp39-cp39-win_amd64.whl", hash = "sha256:07ba89fdcc9451f2ebb02853deb6aaaa3d2239a236669a63ab3801bbf923ef5c", size = 9781231, upload-time = "2024-12-30T16:39:05.124Z" }, + { url = "https://files.pythonhosted.org/packages/a0/b5/32dd67b69a16d088e533962e5044e51004176a9952419de0370cdaead0f8/mypy-1.14.1-py3-none-any.whl", hash = "sha256:b66a60cc4073aeb8ae00057f9c1f64d49e90f918fbcef9a977eb121da8b8f1d1", size = 2752905, upload-time = "2024-12-30T16:38:42.021Z" }, +] + +[[package]] +name = "mypy-extensions" +version = "1.1.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/a2/6e/371856a3fb9d31ca8dac321cda606860fa4548858c0cc45d9d1d4ca2628b/mypy_extensions-1.1.0.tar.gz", hash = "sha256:52e68efc3284861e772bbcd66823fde5ae21fd2fdb51c62a211403730b916558", size = 6343, upload-time = "2025-04-22T14:54:24.164Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/79/7b/2c79738432f5c924bef5071f933bcc9efd0473bac3b4aa584a6f7c1c8df8/mypy_extensions-1.1.0-py3-none-any.whl", hash = "sha256:1be4cccdb0f2482337c4743e60421de3a356cd97508abadd57d47403e94f5505", size = 4963, upload-time = "2025-04-22T14:54:22.983Z" }, +] + +[[package]] +name = "packaging" +version = "25.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/a1/d4/1fc4078c65507b51b96ca8f8c3ba19e6a61c8253c72794544580a7b6c24d/packaging-25.0.tar.gz", hash = "sha256:d443872c98d677bf60f6a1f2f8c1cb748e8fe762d2bf9d3148b5599295b0fc4f", size = 165727, upload-time = "2025-04-19T11:48:59.673Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/20/12/38679034af332785aac8774540895e234f4d07f7545804097de4b666afd8/packaging-25.0-py3-none-any.whl", hash = "sha256:29572ef2b1f17581046b3a2227d5c611fb25ec70ca1ba8554b24b0e69331a484", size = 66469, upload-time = "2025-04-19T11:48:57.875Z" }, +] + +[[package]] +name = "platformdirs" +version = "4.4.0" +source = { registry = "https://pypi.org/simple" } +resolution-markers = [ + "python_full_version < '3.10'", +] +sdist = { url = "https://files.pythonhosted.org/packages/23/e8/21db9c9987b0e728855bd57bff6984f67952bea55d6f75e055c46b5383e8/platformdirs-4.4.0.tar.gz", hash = "sha256:ca753cf4d81dc309bc67b0ea38fd15dc97bc30ce419a7f58d13eb3bf14c4febf", size = 21634, upload-time = "2025-08-26T14:32:04.268Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/40/4b/2028861e724d3bd36227adfa20d3fd24c3fc6d52032f4a93c133be5d17ce/platformdirs-4.4.0-py3-none-any.whl", hash = "sha256:abd01743f24e5287cd7a5db3752faf1a2d65353f38ec26d98e25a6db65958c85", size = 18654, upload-time = "2025-08-26T14:32:02.735Z" }, +] + +[[package]] +name = "platformdirs" +version = "4.5.1" +source = { registry = "https://pypi.org/simple" } +resolution-markers = [ + "python_full_version >= '3.12'", + "python_full_version == '3.11.*'", + "python_full_version == '3.10.*'", +] +sdist = { url = "https://files.pythonhosted.org/packages/cf/86/0248f086a84f01b37aaec0fa567b397df1a119f73c16f6c7a9aac73ea309/platformdirs-4.5.1.tar.gz", hash = "sha256:61d5cdcc6065745cdd94f0f878977f8de9437be93de97c1c12f853c9c0cdcbda", size = 21715, upload-time = "2025-12-05T13:52:58.638Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/cb/28/3bfe2fa5a7b9c46fe7e13c97bda14c895fb10fa2ebf1d0abb90e0cea7ee1/platformdirs-4.5.1-py3-none-any.whl", hash = "sha256:d03afa3963c806a9bed9d5125c8f4cb2fdaf74a55ab60e5d59b3fde758104d31", size = 18731, upload-time = "2025-12-05T13:52:56.823Z" }, +] + +[[package]] +name = "pluggy" +version = "1.6.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/f9/e2/3e91f31a7d2b083fe6ef3fa267035b518369d9511ffab804f839851d2779/pluggy-1.6.0.tar.gz", hash = "sha256:7dcc130b76258d33b90f61b658791dede3486c3e6bfb003ee5c9bfb396dd22f3", size = 69412, upload-time = "2025-05-15T12:30:07.975Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/54/20/4d324d65cc6d9205fabedc306948156824eb9f0ee1633355a8f7ec5c66bf/pluggy-1.6.0-py3-none-any.whl", hash = "sha256:e920276dd6813095e9377c0bc5566d94c932c33b27a3e3945d8389c374dd4746", size = 20538, upload-time = "2025-05-15T12:30:06.134Z" }, +] + +[[package]] +name = "pyasn1" +version = "0.6.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/ba/e9/01f1a64245b89f039897cb0130016d79f77d52669aae6ee7b159a6c4c018/pyasn1-0.6.1.tar.gz", hash = "sha256:6f580d2bdd84365380830acf45550f2511469f673cb4a5ae3857a3170128b034", size = 145322, upload-time = "2024-09-10T22:41:42.55Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c8/f1/d6a797abb14f6283c0ddff96bbdd46937f64122b8c925cab503dd37f8214/pyasn1-0.6.1-py3-none-any.whl", hash = "sha256:0d632f46f2ba09143da3a8afe9e33fb6f92fa2320ab7e886e2d0f7672af84629", size = 83135, upload-time = "2024-09-11T16:00:36.122Z" }, +] + +[[package]] +name = "pyasn1-modules" +version = "0.4.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "pyasn1" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/e9/e6/78ebbb10a8c8e4b61a59249394a4a594c1a7af95593dc933a349c8d00964/pyasn1_modules-0.4.2.tar.gz", hash = "sha256:677091de870a80aae844b1ca6134f54652fa2c8c5a52aa396440ac3106e941e6", size = 307892, upload-time = "2025-03-28T02:41:22.17Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/47/8d/d529b5d697919ba8c11ad626e835d4039be708a35b0d22de83a269a6682c/pyasn1_modules-0.4.2-py3-none-any.whl", hash = "sha256:29253a9207ce32b64c3ac6600edc75368f98473906e8fd1043bd6b5b1de2c14a", size = 181259, upload-time = "2025-03-28T02:41:19.028Z" }, +] + +[[package]] +name = "pydantic" +version = "2.12.5" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "annotated-types" }, + { name = "pydantic-core" }, + { name = "typing-extensions" }, + { name = "typing-inspection" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/69/44/36f1a6e523abc58ae5f928898e4aca2e0ea509b5aa6f6f392a5d882be928/pydantic-2.12.5.tar.gz", hash = "sha256:4d351024c75c0f085a9febbb665ce8c0c6ec5d30e903bdb6394b7ede26aebb49", size = 821591, upload-time = "2025-11-26T15:11:46.471Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/5a/87/b70ad306ebb6f9b585f114d0ac2137d792b48be34d732d60e597c2f8465a/pydantic-2.12.5-py3-none-any.whl", hash = "sha256:e561593fccf61e8a20fc46dfc2dfe075b8be7d0188df33f221ad1f0139180f9d", size = 463580, upload-time = "2025-11-26T15:11:44.605Z" }, +] + +[[package]] +name = "pydantic-core" +version = "2.41.5" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/71/70/23b021c950c2addd24ec408e9ab05d59b035b39d97cdc1130e1bce647bb6/pydantic_core-2.41.5.tar.gz", hash = "sha256:08daa51ea16ad373ffd5e7606252cc32f07bc72b28284b6bc9c6df804816476e", size = 460952, upload-time = "2025-11-04T13:43:49.098Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c6/90/32c9941e728d564b411d574d8ee0cf09b12ec978cb22b294995bae5549a5/pydantic_core-2.41.5-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:77b63866ca88d804225eaa4af3e664c5faf3568cea95360d21f4725ab6e07146", size = 2107298, upload-time = "2025-11-04T13:39:04.116Z" }, + { url = "https://files.pythonhosted.org/packages/fb/a8/61c96a77fe28993d9a6fb0f4127e05430a267b235a124545d79fea46dd65/pydantic_core-2.41.5-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:dfa8a0c812ac681395907e71e1274819dec685fec28273a28905df579ef137e2", size = 1901475, upload-time = "2025-11-04T13:39:06.055Z" }, + { url = "https://files.pythonhosted.org/packages/5d/b6/338abf60225acc18cdc08b4faef592d0310923d19a87fba1faf05af5346e/pydantic_core-2.41.5-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5921a4d3ca3aee735d9fd163808f5e8dd6c6972101e4adbda9a4667908849b97", size = 1918815, upload-time = "2025-11-04T13:39:10.41Z" }, + { url = "https://files.pythonhosted.org/packages/d1/1c/2ed0433e682983d8e8cba9c8d8ef274d4791ec6a6f24c58935b90e780e0a/pydantic_core-2.41.5-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:e25c479382d26a2a41b7ebea1043564a937db462816ea07afa8a44c0866d52f9", size = 2065567, upload-time = "2025-11-04T13:39:12.244Z" }, + { url = "https://files.pythonhosted.org/packages/b3/24/cf84974ee7d6eae06b9e63289b7b8f6549d416b5c199ca2d7ce13bbcf619/pydantic_core-2.41.5-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f547144f2966e1e16ae626d8ce72b4cfa0caedc7fa28052001c94fb2fcaa1c52", size = 2230442, upload-time = "2025-11-04T13:39:13.962Z" }, + { url = "https://files.pythonhosted.org/packages/fd/21/4e287865504b3edc0136c89c9c09431be326168b1eb7841911cbc877a995/pydantic_core-2.41.5-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:6f52298fbd394f9ed112d56f3d11aabd0d5bd27beb3084cc3d8ad069483b8941", size = 2350956, upload-time = "2025-11-04T13:39:15.889Z" }, + { url = "https://files.pythonhosted.org/packages/a8/76/7727ef2ffa4b62fcab916686a68a0426b9b790139720e1934e8ba797e238/pydantic_core-2.41.5-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:100baa204bb412b74fe285fb0f3a385256dad1d1879f0a5cb1499ed2e83d132a", size = 2068253, upload-time = "2025-11-04T13:39:17.403Z" }, + { url = "https://files.pythonhosted.org/packages/d5/8c/a4abfc79604bcb4c748e18975c44f94f756f08fb04218d5cb87eb0d3a63e/pydantic_core-2.41.5-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:05a2c8852530ad2812cb7914dc61a1125dc4e06252ee98e5638a12da6cc6fb6c", size = 2177050, upload-time = "2025-11-04T13:39:19.351Z" }, + { url = "https://files.pythonhosted.org/packages/67/b1/de2e9a9a79b480f9cb0b6e8b6ba4c50b18d4e89852426364c66aa82bb7b3/pydantic_core-2.41.5-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:29452c56df2ed968d18d7e21f4ab0ac55e71dc59524872f6fc57dcf4a3249ed2", size = 2147178, upload-time = "2025-11-04T13:39:21Z" }, + { url = "https://files.pythonhosted.org/packages/16/c1/dfb33f837a47b20417500efaa0378adc6635b3c79e8369ff7a03c494b4ac/pydantic_core-2.41.5-cp310-cp310-musllinux_1_1_armv7l.whl", hash = "sha256:d5160812ea7a8a2ffbe233d8da666880cad0cbaf5d4de74ae15c313213d62556", size = 2341833, upload-time = "2025-11-04T13:39:22.606Z" }, + { url = "https://files.pythonhosted.org/packages/47/36/00f398642a0f4b815a9a558c4f1dca1b4020a7d49562807d7bc9ff279a6c/pydantic_core-2.41.5-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:df3959765b553b9440adfd3c795617c352154e497a4eaf3752555cfb5da8fc49", size = 2321156, upload-time = "2025-11-04T13:39:25.843Z" }, + { url = "https://files.pythonhosted.org/packages/7e/70/cad3acd89fde2010807354d978725ae111ddf6d0ea46d1ea1775b5c1bd0c/pydantic_core-2.41.5-cp310-cp310-win32.whl", hash = "sha256:1f8d33a7f4d5a7889e60dc39856d76d09333d8a6ed0f5f1190635cbec70ec4ba", size = 1989378, upload-time = "2025-11-04T13:39:27.92Z" }, + { url = "https://files.pythonhosted.org/packages/76/92/d338652464c6c367e5608e4488201702cd1cbb0f33f7b6a85a60fe5f3720/pydantic_core-2.41.5-cp310-cp310-win_amd64.whl", hash = "sha256:62de39db01b8d593e45871af2af9e497295db8d73b085f6bfd0b18c83c70a8f9", size = 2013622, upload-time = "2025-11-04T13:39:29.848Z" }, + { url = "https://files.pythonhosted.org/packages/e8/72/74a989dd9f2084b3d9530b0915fdda64ac48831c30dbf7c72a41a5232db8/pydantic_core-2.41.5-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:a3a52f6156e73e7ccb0f8cced536adccb7042be67cb45f9562e12b319c119da6", size = 2105873, upload-time = "2025-11-04T13:39:31.373Z" }, + { url = "https://files.pythonhosted.org/packages/12/44/37e403fd9455708b3b942949e1d7febc02167662bf1a7da5b78ee1ea2842/pydantic_core-2.41.5-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:7f3bf998340c6d4b0c9a2f02d6a400e51f123b59565d74dc60d252ce888c260b", size = 1899826, upload-time = "2025-11-04T13:39:32.897Z" }, + { url = "https://files.pythonhosted.org/packages/33/7f/1d5cab3ccf44c1935a359d51a8a2a9e1a654b744b5e7f80d41b88d501eec/pydantic_core-2.41.5-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:378bec5c66998815d224c9ca994f1e14c0c21cb95d2f52b6021cc0b2a58f2a5a", size = 1917869, upload-time = "2025-11-04T13:39:34.469Z" }, + { url = "https://files.pythonhosted.org/packages/6e/6a/30d94a9674a7fe4f4744052ed6c5e083424510be1e93da5bc47569d11810/pydantic_core-2.41.5-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:e7b576130c69225432866fe2f4a469a85a54ade141d96fd396dffcf607b558f8", size = 2063890, upload-time = "2025-11-04T13:39:36.053Z" }, + { url = "https://files.pythonhosted.org/packages/50/be/76e5d46203fcb2750e542f32e6c371ffa9b8ad17364cf94bb0818dbfb50c/pydantic_core-2.41.5-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:6cb58b9c66f7e4179a2d5e0f849c48eff5c1fca560994d6eb6543abf955a149e", size = 2229740, upload-time = "2025-11-04T13:39:37.753Z" }, + { url = "https://files.pythonhosted.org/packages/d3/ee/fed784df0144793489f87db310a6bbf8118d7b630ed07aa180d6067e653a/pydantic_core-2.41.5-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:88942d3a3dff3afc8288c21e565e476fc278902ae4d6d134f1eeda118cc830b1", size = 2350021, upload-time = "2025-11-04T13:39:40.94Z" }, + { url = "https://files.pythonhosted.org/packages/c8/be/8fed28dd0a180dca19e72c233cbf58efa36df055e5b9d90d64fd1740b828/pydantic_core-2.41.5-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f31d95a179f8d64d90f6831d71fa93290893a33148d890ba15de25642c5d075b", size = 2066378, upload-time = "2025-11-04T13:39:42.523Z" }, + { url = "https://files.pythonhosted.org/packages/b0/3b/698cf8ae1d536a010e05121b4958b1257f0b5522085e335360e53a6b1c8b/pydantic_core-2.41.5-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:c1df3d34aced70add6f867a8cf413e299177e0c22660cc767218373d0779487b", size = 2175761, upload-time = "2025-11-04T13:39:44.553Z" }, + { url = "https://files.pythonhosted.org/packages/b8/ba/15d537423939553116dea94ce02f9c31be0fa9d0b806d427e0308ec17145/pydantic_core-2.41.5-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:4009935984bd36bd2c774e13f9a09563ce8de4abaa7226f5108262fa3e637284", size = 2146303, upload-time = "2025-11-04T13:39:46.238Z" }, + { url = "https://files.pythonhosted.org/packages/58/7f/0de669bf37d206723795f9c90c82966726a2ab06c336deba4735b55af431/pydantic_core-2.41.5-cp311-cp311-musllinux_1_1_armv7l.whl", hash = "sha256:34a64bc3441dc1213096a20fe27e8e128bd3ff89921706e83c0b1ac971276594", size = 2340355, upload-time = "2025-11-04T13:39:48.002Z" }, + { url = "https://files.pythonhosted.org/packages/e5/de/e7482c435b83d7e3c3ee5ee4451f6e8973cff0eb6007d2872ce6383f6398/pydantic_core-2.41.5-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:c9e19dd6e28fdcaa5a1de679aec4141f691023916427ef9bae8584f9c2fb3b0e", size = 2319875, upload-time = "2025-11-04T13:39:49.705Z" }, + { url = "https://files.pythonhosted.org/packages/fe/e6/8c9e81bb6dd7560e33b9053351c29f30c8194b72f2d6932888581f503482/pydantic_core-2.41.5-cp311-cp311-win32.whl", hash = "sha256:2c010c6ded393148374c0f6f0bf89d206bf3217f201faa0635dcd56bd1520f6b", size = 1987549, upload-time = "2025-11-04T13:39:51.842Z" }, + { url = "https://files.pythonhosted.org/packages/11/66/f14d1d978ea94d1bc21fc98fcf570f9542fe55bfcc40269d4e1a21c19bf7/pydantic_core-2.41.5-cp311-cp311-win_amd64.whl", hash = "sha256:76ee27c6e9c7f16f47db7a94157112a2f3a00e958bc626e2f4ee8bec5c328fbe", size = 2011305, upload-time = "2025-11-04T13:39:53.485Z" }, + { url = "https://files.pythonhosted.org/packages/56/d8/0e271434e8efd03186c5386671328154ee349ff0354d83c74f5caaf096ed/pydantic_core-2.41.5-cp311-cp311-win_arm64.whl", hash = "sha256:4bc36bbc0b7584de96561184ad7f012478987882ebf9f9c389b23f432ea3d90f", size = 1972902, upload-time = "2025-11-04T13:39:56.488Z" }, + { url = "https://files.pythonhosted.org/packages/5f/5d/5f6c63eebb5afee93bcaae4ce9a898f3373ca23df3ccaef086d0233a35a7/pydantic_core-2.41.5-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:f41a7489d32336dbf2199c8c0a215390a751c5b014c2c1c5366e817202e9cdf7", size = 2110990, upload-time = "2025-11-04T13:39:58.079Z" }, + { url = "https://files.pythonhosted.org/packages/aa/32/9c2e8ccb57c01111e0fd091f236c7b371c1bccea0fa85247ac55b1e2b6b6/pydantic_core-2.41.5-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:070259a8818988b9a84a449a2a7337c7f430a22acc0859c6b110aa7212a6d9c0", size = 1896003, upload-time = "2025-11-04T13:39:59.956Z" }, + { url = "https://files.pythonhosted.org/packages/68/b8/a01b53cb0e59139fbc9e4fda3e9724ede8de279097179be4ff31f1abb65a/pydantic_core-2.41.5-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e96cea19e34778f8d59fe40775a7a574d95816eb150850a85a7a4c8f4b94ac69", size = 1919200, upload-time = "2025-11-04T13:40:02.241Z" }, + { url = "https://files.pythonhosted.org/packages/38/de/8c36b5198a29bdaade07b5985e80a233a5ac27137846f3bc2d3b40a47360/pydantic_core-2.41.5-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ed2e99c456e3fadd05c991f8f437ef902e00eedf34320ba2b0842bd1c3ca3a75", size = 2052578, upload-time = "2025-11-04T13:40:04.401Z" }, + { url = "https://files.pythonhosted.org/packages/00/b5/0e8e4b5b081eac6cb3dbb7e60a65907549a1ce035a724368c330112adfdd/pydantic_core-2.41.5-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:65840751b72fbfd82c3c640cff9284545342a4f1eb1586ad0636955b261b0b05", size = 2208504, upload-time = "2025-11-04T13:40:06.072Z" }, + { url = "https://files.pythonhosted.org/packages/77/56/87a61aad59c7c5b9dc8caad5a41a5545cba3810c3e828708b3d7404f6cef/pydantic_core-2.41.5-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e536c98a7626a98feb2d3eaf75944ef6f3dbee447e1f841eae16f2f0a72d8ddc", size = 2335816, upload-time = "2025-11-04T13:40:07.835Z" }, + { url = "https://files.pythonhosted.org/packages/0d/76/941cc9f73529988688a665a5c0ecff1112b3d95ab48f81db5f7606f522d3/pydantic_core-2.41.5-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:eceb81a8d74f9267ef4081e246ffd6d129da5d87e37a77c9bde550cb04870c1c", size = 2075366, upload-time = "2025-11-04T13:40:09.804Z" }, + { url = "https://files.pythonhosted.org/packages/d3/43/ebef01f69baa07a482844faaa0a591bad1ef129253ffd0cdaa9d8a7f72d3/pydantic_core-2.41.5-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d38548150c39b74aeeb0ce8ee1d8e82696f4a4e16ddc6de7b1d8823f7de4b9b5", size = 2171698, upload-time = "2025-11-04T13:40:12.004Z" }, + { url = "https://files.pythonhosted.org/packages/b1/87/41f3202e4193e3bacfc2c065fab7706ebe81af46a83d3e27605029c1f5a6/pydantic_core-2.41.5-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:c23e27686783f60290e36827f9c626e63154b82b116d7fe9adba1fda36da706c", size = 2132603, upload-time = "2025-11-04T13:40:13.868Z" }, + { url = "https://files.pythonhosted.org/packages/49/7d/4c00df99cb12070b6bccdef4a195255e6020a550d572768d92cc54dba91a/pydantic_core-2.41.5-cp312-cp312-musllinux_1_1_armv7l.whl", hash = "sha256:482c982f814460eabe1d3bb0adfdc583387bd4691ef00b90575ca0d2b6fe2294", size = 2329591, upload-time = "2025-11-04T13:40:15.672Z" }, + { url = "https://files.pythonhosted.org/packages/cc/6a/ebf4b1d65d458f3cda6a7335d141305dfa19bdc61140a884d165a8a1bbc7/pydantic_core-2.41.5-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:bfea2a5f0b4d8d43adf9d7b8bf019fb46fdd10a2e5cde477fbcb9d1fa08c68e1", size = 2319068, upload-time = "2025-11-04T13:40:17.532Z" }, + { url = "https://files.pythonhosted.org/packages/49/3b/774f2b5cd4192d5ab75870ce4381fd89cf218af999515baf07e7206753f0/pydantic_core-2.41.5-cp312-cp312-win32.whl", hash = "sha256:b74557b16e390ec12dca509bce9264c3bbd128f8a2c376eaa68003d7f327276d", size = 1985908, upload-time = "2025-11-04T13:40:19.309Z" }, + { url = "https://files.pythonhosted.org/packages/86/45/00173a033c801cacf67c190fef088789394feaf88a98a7035b0e40d53dc9/pydantic_core-2.41.5-cp312-cp312-win_amd64.whl", hash = "sha256:1962293292865bca8e54702b08a4f26da73adc83dd1fcf26fbc875b35d81c815", size = 2020145, upload-time = "2025-11-04T13:40:21.548Z" }, + { url = "https://files.pythonhosted.org/packages/f9/22/91fbc821fa6d261b376a3f73809f907cec5ca6025642c463d3488aad22fb/pydantic_core-2.41.5-cp312-cp312-win_arm64.whl", hash = "sha256:1746d4a3d9a794cacae06a5eaaccb4b8643a131d45fbc9af23e353dc0a5ba5c3", size = 1976179, upload-time = "2025-11-04T13:40:23.393Z" }, + { url = "https://files.pythonhosted.org/packages/87/06/8806241ff1f70d9939f9af039c6c35f2360cf16e93c2ca76f184e76b1564/pydantic_core-2.41.5-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:941103c9be18ac8daf7b7adca8228f8ed6bb7a1849020f643b3a14d15b1924d9", size = 2120403, upload-time = "2025-11-04T13:40:25.248Z" }, + { url = "https://files.pythonhosted.org/packages/94/02/abfa0e0bda67faa65fef1c84971c7e45928e108fe24333c81f3bfe35d5f5/pydantic_core-2.41.5-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:112e305c3314f40c93998e567879e887a3160bb8689ef3d2c04b6cc62c33ac34", size = 1896206, upload-time = "2025-11-04T13:40:27.099Z" }, + { url = "https://files.pythonhosted.org/packages/15/df/a4c740c0943e93e6500f9eb23f4ca7ec9bf71b19e608ae5b579678c8d02f/pydantic_core-2.41.5-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0cbaad15cb0c90aa221d43c00e77bb33c93e8d36e0bf74760cd00e732d10a6a0", size = 1919307, upload-time = "2025-11-04T13:40:29.806Z" }, + { url = "https://files.pythonhosted.org/packages/9a/e3/6324802931ae1d123528988e0e86587c2072ac2e5394b4bc2bc34b61ff6e/pydantic_core-2.41.5-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:03ca43e12fab6023fc79d28ca6b39b05f794ad08ec2feccc59a339b02f2b3d33", size = 2063258, upload-time = "2025-11-04T13:40:33.544Z" }, + { url = "https://files.pythonhosted.org/packages/c9/d4/2230d7151d4957dd79c3044ea26346c148c98fbf0ee6ebd41056f2d62ab5/pydantic_core-2.41.5-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:dc799088c08fa04e43144b164feb0c13f9a0bc40503f8df3e9fde58a3c0c101e", size = 2214917, upload-time = "2025-11-04T13:40:35.479Z" }, + { url = "https://files.pythonhosted.org/packages/e6/9f/eaac5df17a3672fef0081b6c1bb0b82b33ee89aa5cec0d7b05f52fd4a1fa/pydantic_core-2.41.5-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:97aeba56665b4c3235a0e52b2c2f5ae9cd071b8a8310ad27bddb3f7fb30e9aa2", size = 2332186, upload-time = "2025-11-04T13:40:37.436Z" }, + { url = "https://files.pythonhosted.org/packages/cf/4e/35a80cae583a37cf15604b44240e45c05e04e86f9cfd766623149297e971/pydantic_core-2.41.5-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:406bf18d345822d6c21366031003612b9c77b3e29ffdb0f612367352aab7d586", size = 2073164, upload-time = "2025-11-04T13:40:40.289Z" }, + { url = "https://files.pythonhosted.org/packages/bf/e3/f6e262673c6140dd3305d144d032f7bd5f7497d3871c1428521f19f9efa2/pydantic_core-2.41.5-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:b93590ae81f7010dbe380cdeab6f515902ebcbefe0b9327cc4804d74e93ae69d", size = 2179146, upload-time = "2025-11-04T13:40:42.809Z" }, + { url = "https://files.pythonhosted.org/packages/75/c7/20bd7fc05f0c6ea2056a4565c6f36f8968c0924f19b7d97bbfea55780e73/pydantic_core-2.41.5-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:01a3d0ab748ee531f4ea6c3e48ad9dac84ddba4b0d82291f87248f2f9de8d740", size = 2137788, upload-time = "2025-11-04T13:40:44.752Z" }, + { url = "https://files.pythonhosted.org/packages/3a/8d/34318ef985c45196e004bc46c6eab2eda437e744c124ef0dbe1ff2c9d06b/pydantic_core-2.41.5-cp313-cp313-musllinux_1_1_armv7l.whl", hash = "sha256:6561e94ba9dacc9c61bce40e2d6bdc3bfaa0259d3ff36ace3b1e6901936d2e3e", size = 2340133, upload-time = "2025-11-04T13:40:46.66Z" }, + { url = "https://files.pythonhosted.org/packages/9c/59/013626bf8c78a5a5d9350d12e7697d3d4de951a75565496abd40ccd46bee/pydantic_core-2.41.5-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:915c3d10f81bec3a74fbd4faebe8391013ba61e5a1a8d48c4455b923bdda7858", size = 2324852, upload-time = "2025-11-04T13:40:48.575Z" }, + { url = "https://files.pythonhosted.org/packages/1a/d9/c248c103856f807ef70c18a4f986693a46a8ffe1602e5d361485da502d20/pydantic_core-2.41.5-cp313-cp313-win32.whl", hash = "sha256:650ae77860b45cfa6e2cdafc42618ceafab3a2d9a3811fcfbd3bbf8ac3c40d36", size = 1994679, upload-time = "2025-11-04T13:40:50.619Z" }, + { url = "https://files.pythonhosted.org/packages/9e/8b/341991b158ddab181cff136acd2552c9f35bd30380422a639c0671e99a91/pydantic_core-2.41.5-cp313-cp313-win_amd64.whl", hash = "sha256:79ec52ec461e99e13791ec6508c722742ad745571f234ea6255bed38c6480f11", size = 2019766, upload-time = "2025-11-04T13:40:52.631Z" }, + { url = "https://files.pythonhosted.org/packages/73/7d/f2f9db34af103bea3e09735bb40b021788a5e834c81eedb541991badf8f5/pydantic_core-2.41.5-cp313-cp313-win_arm64.whl", hash = "sha256:3f84d5c1b4ab906093bdc1ff10484838aca54ef08de4afa9de0f5f14d69639cd", size = 1981005, upload-time = "2025-11-04T13:40:54.734Z" }, + { url = "https://files.pythonhosted.org/packages/ea/28/46b7c5c9635ae96ea0fbb779e271a38129df2550f763937659ee6c5dbc65/pydantic_core-2.41.5-cp314-cp314-macosx_10_12_x86_64.whl", hash = "sha256:3f37a19d7ebcdd20b96485056ba9e8b304e27d9904d233d7b1015db320e51f0a", size = 2119622, upload-time = "2025-11-04T13:40:56.68Z" }, + { url = "https://files.pythonhosted.org/packages/74/1a/145646e5687e8d9a1e8d09acb278c8535ebe9e972e1f162ed338a622f193/pydantic_core-2.41.5-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:1d1d9764366c73f996edd17abb6d9d7649a7eb690006ab6adbda117717099b14", size = 1891725, upload-time = "2025-11-04T13:40:58.807Z" }, + { url = "https://files.pythonhosted.org/packages/23/04/e89c29e267b8060b40dca97bfc64a19b2a3cf99018167ea1677d96368273/pydantic_core-2.41.5-cp314-cp314-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:25e1c2af0fce638d5f1988b686f3b3ea8cd7de5f244ca147c777769e798a9cd1", size = 1915040, upload-time = "2025-11-04T13:41:00.853Z" }, + { url = "https://files.pythonhosted.org/packages/84/a3/15a82ac7bd97992a82257f777b3583d3e84bdb06ba6858f745daa2ec8a85/pydantic_core-2.41.5-cp314-cp314-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:506d766a8727beef16b7adaeb8ee6217c64fc813646b424d0804d67c16eddb66", size = 2063691, upload-time = "2025-11-04T13:41:03.504Z" }, + { url = "https://files.pythonhosted.org/packages/74/9b/0046701313c6ef08c0c1cf0e028c67c770a4e1275ca73131563c5f2a310a/pydantic_core-2.41.5-cp314-cp314-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4819fa52133c9aa3c387b3328f25c1facc356491e6135b459f1de698ff64d869", size = 2213897, upload-time = "2025-11-04T13:41:05.804Z" }, + { url = "https://files.pythonhosted.org/packages/8a/cd/6bac76ecd1b27e75a95ca3a9a559c643b3afcd2dd62086d4b7a32a18b169/pydantic_core-2.41.5-cp314-cp314-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2b761d210c9ea91feda40d25b4efe82a1707da2ef62901466a42492c028553a2", size = 2333302, upload-time = "2025-11-04T13:41:07.809Z" }, + { url = "https://files.pythonhosted.org/packages/4c/d2/ef2074dc020dd6e109611a8be4449b98cd25e1b9b8a303c2f0fca2f2bcf7/pydantic_core-2.41.5-cp314-cp314-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:22f0fb8c1c583a3b6f24df2470833b40207e907b90c928cc8d3594b76f874375", size = 2064877, upload-time = "2025-11-04T13:41:09.827Z" }, + { url = "https://files.pythonhosted.org/packages/18/66/e9db17a9a763d72f03de903883c057b2592c09509ccfe468187f2a2eef29/pydantic_core-2.41.5-cp314-cp314-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2782c870e99878c634505236d81e5443092fba820f0373997ff75f90f68cd553", size = 2180680, upload-time = "2025-11-04T13:41:12.379Z" }, + { url = "https://files.pythonhosted.org/packages/d3/9e/3ce66cebb929f3ced22be85d4c2399b8e85b622db77dad36b73c5387f8f8/pydantic_core-2.41.5-cp314-cp314-musllinux_1_1_aarch64.whl", hash = "sha256:0177272f88ab8312479336e1d777f6b124537d47f2123f89cb37e0accea97f90", size = 2138960, upload-time = "2025-11-04T13:41:14.627Z" }, + { url = "https://files.pythonhosted.org/packages/a6/62/205a998f4327d2079326b01abee48e502ea739d174f0a89295c481a2272e/pydantic_core-2.41.5-cp314-cp314-musllinux_1_1_armv7l.whl", hash = "sha256:63510af5e38f8955b8ee5687740d6ebf7c2a0886d15a6d65c32814613681bc07", size = 2339102, upload-time = "2025-11-04T13:41:16.868Z" }, + { url = "https://files.pythonhosted.org/packages/3c/0d/f05e79471e889d74d3d88f5bd20d0ed189ad94c2423d81ff8d0000aab4ff/pydantic_core-2.41.5-cp314-cp314-musllinux_1_1_x86_64.whl", hash = "sha256:e56ba91f47764cc14f1daacd723e3e82d1a89d783f0f5afe9c364b8bb491ccdb", size = 2326039, upload-time = "2025-11-04T13:41:18.934Z" }, + { url = "https://files.pythonhosted.org/packages/ec/e1/e08a6208bb100da7e0c4b288eed624a703f4d129bde2da475721a80cab32/pydantic_core-2.41.5-cp314-cp314-win32.whl", hash = "sha256:aec5cf2fd867b4ff45b9959f8b20ea3993fc93e63c7363fe6851424c8a7e7c23", size = 1995126, upload-time = "2025-11-04T13:41:21.418Z" }, + { url = "https://files.pythonhosted.org/packages/48/5d/56ba7b24e9557f99c9237e29f5c09913c81eeb2f3217e40e922353668092/pydantic_core-2.41.5-cp314-cp314-win_amd64.whl", hash = "sha256:8e7c86f27c585ef37c35e56a96363ab8de4e549a95512445b85c96d3e2f7c1bf", size = 2015489, upload-time = "2025-11-04T13:41:24.076Z" }, + { url = "https://files.pythonhosted.org/packages/4e/bb/f7a190991ec9e3e0ba22e4993d8755bbc4a32925c0b5b42775c03e8148f9/pydantic_core-2.41.5-cp314-cp314-win_arm64.whl", hash = "sha256:e672ba74fbc2dc8eea59fb6d4aed6845e6905fc2a8afe93175d94a83ba2a01a0", size = 1977288, upload-time = "2025-11-04T13:41:26.33Z" }, + { url = "https://files.pythonhosted.org/packages/92/ed/77542d0c51538e32e15afe7899d79efce4b81eee631d99850edc2f5e9349/pydantic_core-2.41.5-cp314-cp314t-macosx_10_12_x86_64.whl", hash = "sha256:8566def80554c3faa0e65ac30ab0932b9e3a5cd7f8323764303d468e5c37595a", size = 2120255, upload-time = "2025-11-04T13:41:28.569Z" }, + { url = "https://files.pythonhosted.org/packages/bb/3d/6913dde84d5be21e284439676168b28d8bbba5600d838b9dca99de0fad71/pydantic_core-2.41.5-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:b80aa5095cd3109962a298ce14110ae16b8c1aece8b72f9dafe81cf597ad80b3", size = 1863760, upload-time = "2025-11-04T13:41:31.055Z" }, + { url = "https://files.pythonhosted.org/packages/5a/f0/e5e6b99d4191da102f2b0eb9687aaa7f5bea5d9964071a84effc3e40f997/pydantic_core-2.41.5-cp314-cp314t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3006c3dd9ba34b0c094c544c6006cc79e87d8612999f1a5d43b769b89181f23c", size = 1878092, upload-time = "2025-11-04T13:41:33.21Z" }, + { url = "https://files.pythonhosted.org/packages/71/48/36fb760642d568925953bcc8116455513d6e34c4beaa37544118c36aba6d/pydantic_core-2.41.5-cp314-cp314t-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:72f6c8b11857a856bcfa48c86f5368439f74453563f951e473514579d44aa612", size = 2053385, upload-time = "2025-11-04T13:41:35.508Z" }, + { url = "https://files.pythonhosted.org/packages/20/25/92dc684dd8eb75a234bc1c764b4210cf2646479d54b47bf46061657292a8/pydantic_core-2.41.5-cp314-cp314t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5cb1b2f9742240e4bb26b652a5aeb840aa4b417c7748b6f8387927bc6e45e40d", size = 2218832, upload-time = "2025-11-04T13:41:37.732Z" }, + { url = "https://files.pythonhosted.org/packages/e2/09/f53e0b05023d3e30357d82eb35835d0f6340ca344720a4599cd663dca599/pydantic_core-2.41.5-cp314-cp314t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:bd3d54f38609ff308209bd43acea66061494157703364ae40c951f83ba99a1a9", size = 2327585, upload-time = "2025-11-04T13:41:40Z" }, + { url = "https://files.pythonhosted.org/packages/aa/4e/2ae1aa85d6af35a39b236b1b1641de73f5a6ac4d5a7509f77b814885760c/pydantic_core-2.41.5-cp314-cp314t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2ff4321e56e879ee8d2a879501c8e469414d948f4aba74a2d4593184eb326660", size = 2041078, upload-time = "2025-11-04T13:41:42.323Z" }, + { url = "https://files.pythonhosted.org/packages/cd/13/2e215f17f0ef326fc72afe94776edb77525142c693767fc347ed6288728d/pydantic_core-2.41.5-cp314-cp314t-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d0d2568a8c11bf8225044aa94409e21da0cb09dcdafe9ecd10250b2baad531a9", size = 2173914, upload-time = "2025-11-04T13:41:45.221Z" }, + { url = "https://files.pythonhosted.org/packages/02/7a/f999a6dcbcd0e5660bc348a3991c8915ce6599f4f2c6ac22f01d7a10816c/pydantic_core-2.41.5-cp314-cp314t-musllinux_1_1_aarch64.whl", hash = "sha256:a39455728aabd58ceabb03c90e12f71fd30fa69615760a075b9fec596456ccc3", size = 2129560, upload-time = "2025-11-04T13:41:47.474Z" }, + { url = "https://files.pythonhosted.org/packages/3a/b1/6c990ac65e3b4c079a4fb9f5b05f5b013afa0f4ed6780a3dd236d2cbdc64/pydantic_core-2.41.5-cp314-cp314t-musllinux_1_1_armv7l.whl", hash = "sha256:239edca560d05757817c13dc17c50766136d21f7cd0fac50295499ae24f90fdf", size = 2329244, upload-time = "2025-11-04T13:41:49.992Z" }, + { url = "https://files.pythonhosted.org/packages/d9/02/3c562f3a51afd4d88fff8dffb1771b30cfdfd79befd9883ee094f5b6c0d8/pydantic_core-2.41.5-cp314-cp314t-musllinux_1_1_x86_64.whl", hash = "sha256:2a5e06546e19f24c6a96a129142a75cee553cc018ffee48a460059b1185f4470", size = 2331955, upload-time = "2025-11-04T13:41:54.079Z" }, + { url = "https://files.pythonhosted.org/packages/5c/96/5fb7d8c3c17bc8c62fdb031c47d77a1af698f1d7a406b0f79aaa1338f9ad/pydantic_core-2.41.5-cp314-cp314t-win32.whl", hash = "sha256:b4ececa40ac28afa90871c2cc2b9ffd2ff0bf749380fbdf57d165fd23da353aa", size = 1988906, upload-time = "2025-11-04T13:41:56.606Z" }, + { url = "https://files.pythonhosted.org/packages/22/ed/182129d83032702912c2e2d8bbe33c036f342cc735737064668585dac28f/pydantic_core-2.41.5-cp314-cp314t-win_amd64.whl", hash = "sha256:80aa89cad80b32a912a65332f64a4450ed00966111b6615ca6816153d3585a8c", size = 1981607, upload-time = "2025-11-04T13:41:58.889Z" }, + { url = "https://files.pythonhosted.org/packages/9f/ed/068e41660b832bb0b1aa5b58011dea2a3fe0ba7861ff38c4d4904c1c1a99/pydantic_core-2.41.5-cp314-cp314t-win_arm64.whl", hash = "sha256:35b44f37a3199f771c3eaa53051bc8a70cd7b54f333531c59e29fd4db5d15008", size = 1974769, upload-time = "2025-11-04T13:42:01.186Z" }, + { url = "https://files.pythonhosted.org/packages/54/db/160dffb57ed9a3705c4cbcbff0ac03bdae45f1ca7d58ab74645550df3fbd/pydantic_core-2.41.5-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:8bfeaf8735be79f225f3fefab7f941c712aaca36f1128c9d7e2352ee1aa87bdf", size = 2107999, upload-time = "2025-11-04T13:42:03.885Z" }, + { url = "https://files.pythonhosted.org/packages/a3/7d/88e7de946f60d9263cc84819f32513520b85c0f8322f9b8f6e4afc938383/pydantic_core-2.41.5-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:346285d28e4c8017da95144c7f3acd42740d637ff41946af5ce6e5e420502dd5", size = 1929745, upload-time = "2025-11-04T13:42:06.075Z" }, + { url = "https://files.pythonhosted.org/packages/d5/c2/aef51e5b283780e85e99ff19db0f05842d2d4a8a8cd15e63b0280029b08f/pydantic_core-2.41.5-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a75dafbf87d6276ddc5b2bf6fae5254e3d0876b626eb24969a574fff9149ee5d", size = 1920220, upload-time = "2025-11-04T13:42:08.457Z" }, + { url = "https://files.pythonhosted.org/packages/c7/97/492ab10f9ac8695cd76b2fdb24e9e61f394051df71594e9bcc891c9f586e/pydantic_core-2.41.5-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:7b93a4d08587e2b7e7882de461e82b6ed76d9026ce91ca7915e740ecc7855f60", size = 2067296, upload-time = "2025-11-04T13:42:10.817Z" }, + { url = "https://files.pythonhosted.org/packages/ec/23/984149650e5269c59a2a4c41d234a9570adc68ab29981825cfaf4cfad8f4/pydantic_core-2.41.5-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e8465ab91a4bd96d36dde3263f06caa6a8a6019e4113f24dc753d79a8b3a3f82", size = 2231548, upload-time = "2025-11-04T13:42:13.843Z" }, + { url = "https://files.pythonhosted.org/packages/71/0c/85bcbb885b9732c28bec67a222dbed5ed2d77baee1f8bba2002e8cd00c5c/pydantic_core-2.41.5-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:299e0a22e7ae2b85c1a57f104538b2656e8ab1873511fd718a1c1c6f149b77b5", size = 2362571, upload-time = "2025-11-04T13:42:16.208Z" }, + { url = "https://files.pythonhosted.org/packages/c0/4a/412d2048be12c334003e9b823a3fa3d038e46cc2d64dd8aab50b31b65499/pydantic_core-2.41.5-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:707625ef0983fcfb461acfaf14de2067c5942c6bb0f3b4c99158bed6fedd3cf3", size = 2068175, upload-time = "2025-11-04T13:42:18.911Z" }, + { url = "https://files.pythonhosted.org/packages/73/f4/c58b6a776b502d0a5540ad02e232514285513572060f0d78f7832ca3c98b/pydantic_core-2.41.5-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:f41eb9797986d6ebac5e8edff36d5cef9de40def462311b3eb3eeded1431e425", size = 2177203, upload-time = "2025-11-04T13:42:22.578Z" }, + { url = "https://files.pythonhosted.org/packages/ed/ae/f06ea4c7e7a9eead3d165e7623cd2ea0cb788e277e4f935af63fc98fa4e6/pydantic_core-2.41.5-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:0384e2e1021894b1ff5a786dbf94771e2986ebe2869533874d7e43bc79c6f504", size = 2148191, upload-time = "2025-11-04T13:42:24.89Z" }, + { url = "https://files.pythonhosted.org/packages/c1/57/25a11dcdc656bf5f8b05902c3c2934ac3ea296257cc4a3f79a6319e61856/pydantic_core-2.41.5-cp39-cp39-musllinux_1_1_armv7l.whl", hash = "sha256:f0cd744688278965817fd0839c4a4116add48d23890d468bc436f78beb28abf5", size = 2343907, upload-time = "2025-11-04T13:42:27.683Z" }, + { url = "https://files.pythonhosted.org/packages/96/82/e33d5f4933d7a03327c0c43c65d575e5919d4974ffc026bc917a5f7b9f61/pydantic_core-2.41.5-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:753e230374206729bf0a807954bcc6c150d3743928a73faffee51ac6557a03c3", size = 2322174, upload-time = "2025-11-04T13:42:30.776Z" }, + { url = "https://files.pythonhosted.org/packages/81/45/4091be67ce9f469e81656f880f3506f6a5624121ec5eb3eab37d7581897d/pydantic_core-2.41.5-cp39-cp39-win32.whl", hash = "sha256:873e0d5b4fb9b89ef7c2d2a963ea7d02879d9da0da8d9d4933dee8ee86a8b460", size = 1990353, upload-time = "2025-11-04T13:42:33.111Z" }, + { url = "https://files.pythonhosted.org/packages/44/8a/a98aede18db6e9cd5d66bcacd8a409fcf8134204cdede2e7de35c5a2c5ef/pydantic_core-2.41.5-cp39-cp39-win_amd64.whl", hash = "sha256:e4f4a984405e91527a0d62649ee21138f8e3d0ef103be488c1dc11a80d7f184b", size = 2015698, upload-time = "2025-11-04T13:42:35.484Z" }, + { url = "https://files.pythonhosted.org/packages/11/72/90fda5ee3b97e51c494938a4a44c3a35a9c96c19bba12372fb9c634d6f57/pydantic_core-2.41.5-graalpy311-graalpy242_311_native-macosx_10_12_x86_64.whl", hash = "sha256:b96d5f26b05d03cc60f11a7761a5ded1741da411e7fe0909e27a5e6a0cb7b034", size = 2115441, upload-time = "2025-11-04T13:42:39.557Z" }, + { url = "https://files.pythonhosted.org/packages/1f/53/8942f884fa33f50794f119012dc6a1a02ac43a56407adaac20463df8e98f/pydantic_core-2.41.5-graalpy311-graalpy242_311_native-macosx_11_0_arm64.whl", hash = "sha256:634e8609e89ceecea15e2d61bc9ac3718caaaa71963717bf3c8f38bfde64242c", size = 1930291, upload-time = "2025-11-04T13:42:42.169Z" }, + { url = "https://files.pythonhosted.org/packages/79/c8/ecb9ed9cd942bce09fc888ee960b52654fbdbede4ba6c2d6e0d3b1d8b49c/pydantic_core-2.41.5-graalpy311-graalpy242_311_native-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:93e8740d7503eb008aa2df04d3b9735f845d43ae845e6dcd2be0b55a2da43cd2", size = 1948632, upload-time = "2025-11-04T13:42:44.564Z" }, + { url = "https://files.pythonhosted.org/packages/2e/1b/687711069de7efa6af934e74f601e2a4307365e8fdc404703afc453eab26/pydantic_core-2.41.5-graalpy311-graalpy242_311_native-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f15489ba13d61f670dcc96772e733aad1a6f9c429cc27574c6cdaed82d0146ad", size = 2138905, upload-time = "2025-11-04T13:42:47.156Z" }, + { url = "https://files.pythonhosted.org/packages/09/32/59b0c7e63e277fa7911c2fc70ccfb45ce4b98991e7ef37110663437005af/pydantic_core-2.41.5-graalpy312-graalpy250_312_native-macosx_10_12_x86_64.whl", hash = "sha256:7da7087d756b19037bc2c06edc6c170eeef3c3bafcb8f532ff17d64dc427adfd", size = 2110495, upload-time = "2025-11-04T13:42:49.689Z" }, + { url = "https://files.pythonhosted.org/packages/aa/81/05e400037eaf55ad400bcd318c05bb345b57e708887f07ddb2d20e3f0e98/pydantic_core-2.41.5-graalpy312-graalpy250_312_native-macosx_11_0_arm64.whl", hash = "sha256:aabf5777b5c8ca26f7824cb4a120a740c9588ed58df9b2d196ce92fba42ff8dc", size = 1915388, upload-time = "2025-11-04T13:42:52.215Z" }, + { url = "https://files.pythonhosted.org/packages/6e/0d/e3549b2399f71d56476b77dbf3cf8937cec5cd70536bdc0e374a421d0599/pydantic_core-2.41.5-graalpy312-graalpy250_312_native-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c007fe8a43d43b3969e8469004e9845944f1a80e6acd47c150856bb87f230c56", size = 1942879, upload-time = "2025-11-04T13:42:56.483Z" }, + { url = "https://files.pythonhosted.org/packages/f7/07/34573da085946b6a313d7c42f82f16e8920bfd730665de2d11c0c37a74b5/pydantic_core-2.41.5-graalpy312-graalpy250_312_native-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:76d0819de158cd855d1cbb8fcafdf6f5cf1eb8e470abe056d5d161106e38062b", size = 2139017, upload-time = "2025-11-04T13:42:59.471Z" }, + { url = "https://files.pythonhosted.org/packages/e6/b0/1a2aa41e3b5a4ba11420aba2d091b2d17959c8d1519ece3627c371951e73/pydantic_core-2.41.5-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:b5819cd790dbf0c5eb9f82c73c16b39a65dd6dd4d1439dcdea7816ec9adddab8", size = 2103351, upload-time = "2025-11-04T13:43:02.058Z" }, + { url = "https://files.pythonhosted.org/packages/a4/ee/31b1f0020baaf6d091c87900ae05c6aeae101fa4e188e1613c80e4f1ea31/pydantic_core-2.41.5-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:5a4e67afbc95fa5c34cf27d9089bca7fcab4e51e57278d710320a70b956d1b9a", size = 1925363, upload-time = "2025-11-04T13:43:05.159Z" }, + { url = "https://files.pythonhosted.org/packages/e1/89/ab8e86208467e467a80deaca4e434adac37b10a9d134cd2f99b28a01e483/pydantic_core-2.41.5-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ece5c59f0ce7d001e017643d8d24da587ea1f74f6993467d85ae8a5ef9d4f42b", size = 2135615, upload-time = "2025-11-04T13:43:08.116Z" }, + { url = "https://files.pythonhosted.org/packages/99/0a/99a53d06dd0348b2008f2f30884b34719c323f16c3be4e6cc1203b74a91d/pydantic_core-2.41.5-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:16f80f7abe3351f8ea6858914ddc8c77e02578544a0ebc15b4c2e1a0e813b0b2", size = 2175369, upload-time = "2025-11-04T13:43:12.49Z" }, + { url = "https://files.pythonhosted.org/packages/6d/94/30ca3b73c6d485b9bb0bc66e611cff4a7138ff9736b7e66bcf0852151636/pydantic_core-2.41.5-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:33cb885e759a705b426baada1fe68cbb0a2e68e34c5d0d0289a364cf01709093", size = 2144218, upload-time = "2025-11-04T13:43:15.431Z" }, + { url = "https://files.pythonhosted.org/packages/87/57/31b4f8e12680b739a91f472b5671294236b82586889ef764b5fbc6669238/pydantic_core-2.41.5-pp310-pypy310_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:c8d8b4eb992936023be7dee581270af5c6e0697a8559895f527f5b7105ecd36a", size = 2329951, upload-time = "2025-11-04T13:43:18.062Z" }, + { url = "https://files.pythonhosted.org/packages/7d/73/3c2c8edef77b8f7310e6fb012dbc4b8551386ed575b9eb6fb2506e28a7eb/pydantic_core-2.41.5-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:242a206cd0318f95cd21bdacff3fcc3aab23e79bba5cac3db5a841c9ef9c6963", size = 2318428, upload-time = "2025-11-04T13:43:20.679Z" }, + { url = "https://files.pythonhosted.org/packages/2f/02/8559b1f26ee0d502c74f9cca5c0d2fd97e967e083e006bbbb4e97f3a043a/pydantic_core-2.41.5-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:d3a978c4f57a597908b7e697229d996d77a6d3c94901e9edee593adada95ce1a", size = 2147009, upload-time = "2025-11-04T13:43:23.286Z" }, + { url = "https://files.pythonhosted.org/packages/5f/9b/1b3f0e9f9305839d7e84912f9e8bfbd191ed1b1ef48083609f0dabde978c/pydantic_core-2.41.5-pp311-pypy311_pp73-macosx_10_12_x86_64.whl", hash = "sha256:b2379fa7ed44ddecb5bfe4e48577d752db9fc10be00a6b7446e9663ba143de26", size = 2101980, upload-time = "2025-11-04T13:43:25.97Z" }, + { url = "https://files.pythonhosted.org/packages/a4/ed/d71fefcb4263df0da6a85b5d8a7508360f2f2e9b3bf5814be9c8bccdccc1/pydantic_core-2.41.5-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:266fb4cbf5e3cbd0b53669a6d1b039c45e3ce651fd5442eff4d07c2cc8d66808", size = 1923865, upload-time = "2025-11-04T13:43:28.763Z" }, + { url = "https://files.pythonhosted.org/packages/ce/3a/626b38db460d675f873e4444b4bb030453bbe7b4ba55df821d026a0493c4/pydantic_core-2.41.5-pp311-pypy311_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:58133647260ea01e4d0500089a8c4f07bd7aa6ce109682b1426394988d8aaacc", size = 2134256, upload-time = "2025-11-04T13:43:31.71Z" }, + { url = "https://files.pythonhosted.org/packages/83/d9/8412d7f06f616bbc053d30cb4e5f76786af3221462ad5eee1f202021eb4e/pydantic_core-2.41.5-pp311-pypy311_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:287dad91cfb551c363dc62899a80e9e14da1f0e2b6ebde82c806612ca2a13ef1", size = 2174762, upload-time = "2025-11-04T13:43:34.744Z" }, + { url = "https://files.pythonhosted.org/packages/55/4c/162d906b8e3ba3a99354e20faa1b49a85206c47de97a639510a0e673f5da/pydantic_core-2.41.5-pp311-pypy311_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:03b77d184b9eb40240ae9fd676ca364ce1085f203e1b1256f8ab9984dca80a84", size = 2143141, upload-time = "2025-11-04T13:43:37.701Z" }, + { url = "https://files.pythonhosted.org/packages/1f/f2/f11dd73284122713f5f89fc940f370d035fa8e1e078d446b3313955157fe/pydantic_core-2.41.5-pp311-pypy311_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:a668ce24de96165bb239160b3d854943128f4334822900534f2fe947930e5770", size = 2330317, upload-time = "2025-11-04T13:43:40.406Z" }, + { url = "https://files.pythonhosted.org/packages/88/9d/b06ca6acfe4abb296110fb1273a4d848a0bfb2ff65f3ee92127b3244e16b/pydantic_core-2.41.5-pp311-pypy311_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:f14f8f046c14563f8eb3f45f499cc658ab8d10072961e07225e507adb700e93f", size = 2316992, upload-time = "2025-11-04T13:43:43.602Z" }, + { url = "https://files.pythonhosted.org/packages/36/c7/cfc8e811f061c841d7990b0201912c3556bfeb99cdcb7ed24adc8d6f8704/pydantic_core-2.41.5-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:56121965f7a4dc965bff783d70b907ddf3d57f6eba29b6d2e5dabfaf07799c51", size = 2145302, upload-time = "2025-11-04T13:43:46.64Z" }, +] + +[[package]] +name = "pygments" +version = "2.19.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/b0/77/a5b8c569bf593b0140bde72ea885a803b82086995367bf2037de0159d924/pygments-2.19.2.tar.gz", hash = "sha256:636cb2477cec7f8952536970bc533bc43743542f70392ae026374600add5b887", size = 4968631, upload-time = "2025-06-21T13:39:12.283Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c7/21/705964c7812476f378728bdf590ca4b771ec72385c533964653c68e86bdc/pygments-2.19.2-py3-none-any.whl", hash = "sha256:86540386c03d588bb81d44bc3928634ff26449851e99741617ecb9037ee5ec0b", size = 1225217, upload-time = "2025-06-21T13:39:07.939Z" }, +] + +[[package]] +name = "pylint" +version = "3.2.3" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "astroid" }, + { name = "colorama", marker = "sys_platform == 'win32'" }, + { name = "dill" }, + { name = "isort" }, + { name = "mccabe" }, + { name = "platformdirs", version = "4.4.0", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.10'" }, + { name = "platformdirs", version = "4.5.1", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.10'" }, + { name = "tomli", marker = "python_full_version < '3.11'" }, + { name = "tomlkit" }, + { name = "typing-extensions", marker = "python_full_version < '3.10'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/9a/e9/60280b14cc1012794120345ce378504cf17409e38cd88f455dc24e0ad6b5/pylint-3.2.3.tar.gz", hash = "sha256:02f6c562b215582386068d52a30f520d84fdbcf2a95fc7e855b816060d048b60", size = 1506739, upload-time = "2024-06-06T14:19:17.955Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/50/d3/d346f779cbc9384d8b805a7557b5f2b8ee9f842bffebec9fc6364d6ae183/pylint-3.2.3-py3-none-any.whl", hash = "sha256:b3d7d2708a3e04b4679e02d99e72329a8b7ee8afb8d04110682278781f889fa8", size = 519244, upload-time = "2024-06-06T14:19:13.228Z" }, +] + +[[package]] +name = "pytest" +version = "8.4.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "colorama", marker = "sys_platform == 'win32'" }, + { name = "exceptiongroup", marker = "python_full_version < '3.11'" }, + { name = "iniconfig", version = "2.1.0", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.10'" }, + { name = "iniconfig", version = "2.3.0", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.10'" }, + { name = "packaging" }, + { name = "pluggy" }, + { name = "pygments" }, + { name = "tomli", marker = "python_full_version < '3.11'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/a3/5c/00a0e072241553e1a7496d638deababa67c5058571567b92a7eaa258397c/pytest-8.4.2.tar.gz", hash = "sha256:86c0d0b93306b961d58d62a4db4879f27fe25513d4b969df351abdddb3c30e01", size = 1519618, upload-time = "2025-09-04T14:34:22.711Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/a8/a4/20da314d277121d6534b3a980b29035dcd51e6744bd79075a6ce8fa4eb8d/pytest-8.4.2-py3-none-any.whl", hash = "sha256:872f880de3fc3a5bdc88a11b39c9710c3497a547cfa9320bc3c5e62fbf272e79", size = 365750, upload-time = "2025-09-04T14:34:20.226Z" }, +] + +[[package]] +name = "pytest-asyncio" +version = "0.23.8" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "pytest" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/de/b4/0b378b7bf26a8ae161c3890c0b48a91a04106c5713ce81b4b080ea2f4f18/pytest_asyncio-0.23.8.tar.gz", hash = "sha256:759b10b33a6dc61cce40a8bd5205e302978bbbcc00e279a8b61d9a6a3c82e4d3", size = 46920, upload-time = "2024-07-17T17:39:34.617Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ee/82/62e2d63639ecb0fbe8a7ee59ef0bc69a4669ec50f6d3459f74ad4e4189a2/pytest_asyncio-0.23.8-py3-none-any.whl", hash = "sha256:50265d892689a5faefb84df80819d1ecef566eb3549cf915dfb33569359d1ce2", size = 17663, upload-time = "2024-07-17T17:39:32.478Z" }, +] + +[[package]] +name = "python-dateutil" +version = "2.9.0.post0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "six" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/66/c0/0c8b6ad9f17a802ee498c46e004a0eb49bc148f2fd230864601a86dcf6db/python-dateutil-2.9.0.post0.tar.gz", hash = "sha256:37dd54208da7e1cd875388217d5e00ebd4179249f90fb72437e91a35459a0ad3", size = 342432, upload-time = "2024-03-01T18:36:20.211Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ec/57/56b9bcc3c9c6a792fcbaf139543cee77261f3651ca9da0c93f5c1221264b/python_dateutil-2.9.0.post0-py2.py3-none-any.whl", hash = "sha256:a8b2bc7bffae282281c8140a97d3aa9c14da0b136dfe83f850eea9a5f7470427", size = 229892, upload-time = "2024-03-01T18:36:18.57Z" }, +] + +[[package]] +name = "requests" +version = "2.32.5" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "certifi" }, + { name = "charset-normalizer" }, + { name = "idna" }, + { name = "urllib3" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/c9/74/b3ff8e6c8446842c3f5c837e9c3dfcfe2018ea6ecef224c710c85ef728f4/requests-2.32.5.tar.gz", hash = "sha256:dbba0bac56e100853db0ea71b82b4dfd5fe2bf6d3754a8893c3af500cec7d7cf", size = 134517, upload-time = "2025-08-18T20:46:02.573Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/1e/db/4254e3eabe8020b458f1a747140d32277ec7a271daf1d235b70dc0b4e6e3/requests-2.32.5-py3-none-any.whl", hash = "sha256:2462f94637a34fd532264295e186976db0f5d453d1cdd31473c85a6a161affb6", size = 64738, upload-time = "2025-08-18T20:46:00.542Z" }, +] + +[[package]] +name = "rsa" +version = "4.9.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "pyasn1" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/da/8a/22b7beea3ee0d44b1916c0c1cb0ee3af23b700b6da9f04991899d0c555d4/rsa-4.9.1.tar.gz", hash = "sha256:e7bdbfdb5497da4c07dfd35530e1a902659db6ff241e39d9953cad06ebd0ae75", size = 29034, upload-time = "2025-04-16T09:51:18.218Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/64/8d/0133e4eb4beed9e425d9a98ed6e081a55d195481b7632472be1af08d2f6b/rsa-4.9.1-py3-none-any.whl", hash = "sha256:68635866661c6836b8d39430f97a996acbd61bfa49406748ea243539fe239762", size = 34696, upload-time = "2025-04-16T09:51:17.142Z" }, +] + +[[package]] +name = "six" +version = "1.17.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/94/e7/b2c673351809dca68a0e064b6af791aa332cf192da575fd474ed7d6f16a2/six-1.17.0.tar.gz", hash = "sha256:ff70335d468e7eb6ec65b95b99d3a2836546063f63acc5171de367e834932a81", size = 34031, upload-time = "2024-12-04T17:35:28.174Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/b7/ce/149a00dd41f10bc29e5921b496af8b574d8413afcd5e30dfa0ed46c2cc5e/six-1.17.0-py2.py3-none-any.whl", hash = "sha256:4721f391ed90541fddacab5acf947aa0d3dc7d27b2e1e8eda2be8970586c3274", size = 11050, upload-time = "2024-12-04T17:35:26.475Z" }, +] + +[[package]] +name = "tomli" +version = "2.3.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/52/ed/3f73f72945444548f33eba9a87fc7a6e969915e7b1acc8260b30e1f76a2f/tomli-2.3.0.tar.gz", hash = "sha256:64be704a875d2a59753d80ee8a533c3fe183e3f06807ff7dc2232938ccb01549", size = 17392, upload-time = "2025-10-08T22:01:47.119Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/b3/2e/299f62b401438d5fe1624119c723f5d877acc86a4c2492da405626665f12/tomli-2.3.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:88bd15eb972f3664f5ed4b57c1634a97153b4bac4479dcb6a495f41921eb7f45", size = 153236, upload-time = "2025-10-08T22:01:00.137Z" }, + { url = "https://files.pythonhosted.org/packages/86/7f/d8fffe6a7aefdb61bced88fcb5e280cfd71e08939da5894161bd71bea022/tomli-2.3.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:883b1c0d6398a6a9d29b508c331fa56adbcdff647f6ace4dfca0f50e90dfd0ba", size = 148084, upload-time = "2025-10-08T22:01:01.63Z" }, + { url = "https://files.pythonhosted.org/packages/47/5c/24935fb6a2ee63e86d80e4d3b58b222dafaf438c416752c8b58537c8b89a/tomli-2.3.0-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:d1381caf13ab9f300e30dd8feadb3de072aeb86f1d34a8569453ff32a7dea4bf", size = 234832, upload-time = "2025-10-08T22:01:02.543Z" }, + { url = "https://files.pythonhosted.org/packages/89/da/75dfd804fc11e6612846758a23f13271b76d577e299592b4371a4ca4cd09/tomli-2.3.0-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:a0e285d2649b78c0d9027570d4da3425bdb49830a6156121360b3f8511ea3441", size = 242052, upload-time = "2025-10-08T22:01:03.836Z" }, + { url = "https://files.pythonhosted.org/packages/70/8c/f48ac899f7b3ca7eb13af73bacbc93aec37f9c954df3c08ad96991c8c373/tomli-2.3.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:0a154a9ae14bfcf5d8917a59b51ffd5a3ac1fd149b71b47a3a104ca4edcfa845", size = 239555, upload-time = "2025-10-08T22:01:04.834Z" }, + { url = "https://files.pythonhosted.org/packages/ba/28/72f8afd73f1d0e7829bfc093f4cb98ce0a40ffc0cc997009ee1ed94ba705/tomli-2.3.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:74bf8464ff93e413514fefd2be591c3b0b23231a77f901db1eb30d6f712fc42c", size = 245128, upload-time = "2025-10-08T22:01:05.84Z" }, + { url = "https://files.pythonhosted.org/packages/b6/eb/a7679c8ac85208706d27436e8d421dfa39d4c914dcf5fa8083a9305f58d9/tomli-2.3.0-cp311-cp311-win32.whl", hash = "sha256:00b5f5d95bbfc7d12f91ad8c593a1659b6387b43f054104cda404be6bda62456", size = 96445, upload-time = "2025-10-08T22:01:06.896Z" }, + { url = "https://files.pythonhosted.org/packages/0a/fe/3d3420c4cb1ad9cb462fb52967080575f15898da97e21cb6f1361d505383/tomli-2.3.0-cp311-cp311-win_amd64.whl", hash = "sha256:4dc4ce8483a5d429ab602f111a93a6ab1ed425eae3122032db7e9acf449451be", size = 107165, upload-time = "2025-10-08T22:01:08.107Z" }, + { url = "https://files.pythonhosted.org/packages/ff/b7/40f36368fcabc518bb11c8f06379a0fd631985046c038aca08c6d6a43c6e/tomli-2.3.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:d7d86942e56ded512a594786a5ba0a5e521d02529b3826e7761a05138341a2ac", size = 154891, upload-time = "2025-10-08T22:01:09.082Z" }, + { url = "https://files.pythonhosted.org/packages/f9/3f/d9dd692199e3b3aab2e4e4dd948abd0f790d9ded8cd10cbaae276a898434/tomli-2.3.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:73ee0b47d4dad1c5e996e3cd33b8a76a50167ae5f96a2607cbe8cc773506ab22", size = 148796, upload-time = "2025-10-08T22:01:10.266Z" }, + { url = "https://files.pythonhosted.org/packages/60/83/59bff4996c2cf9f9387a0f5a3394629c7efa5ef16142076a23a90f1955fa/tomli-2.3.0-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:792262b94d5d0a466afb5bc63c7daa9d75520110971ee269152083270998316f", size = 242121, upload-time = "2025-10-08T22:01:11.332Z" }, + { url = "https://files.pythonhosted.org/packages/45/e5/7c5119ff39de8693d6baab6c0b6dcb556d192c165596e9fc231ea1052041/tomli-2.3.0-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:4f195fe57ecceac95a66a75ac24d9d5fbc98ef0962e09b2eddec5d39375aae52", size = 250070, upload-time = "2025-10-08T22:01:12.498Z" }, + { url = "https://files.pythonhosted.org/packages/45/12/ad5126d3a278f27e6701abde51d342aa78d06e27ce2bb596a01f7709a5a2/tomli-2.3.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:e31d432427dcbf4d86958c184b9bfd1e96b5b71f8eb17e6d02531f434fd335b8", size = 245859, upload-time = "2025-10-08T22:01:13.551Z" }, + { url = "https://files.pythonhosted.org/packages/fb/a1/4d6865da6a71c603cfe6ad0e6556c73c76548557a8d658f9e3b142df245f/tomli-2.3.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:7b0882799624980785240ab732537fcfc372601015c00f7fc367c55308c186f6", size = 250296, upload-time = "2025-10-08T22:01:14.614Z" }, + { url = "https://files.pythonhosted.org/packages/a0/b7/a7a7042715d55c9ba6e8b196d65d2cb662578b4d8cd17d882d45322b0d78/tomli-2.3.0-cp312-cp312-win32.whl", hash = "sha256:ff72b71b5d10d22ecb084d345fc26f42b5143c5533db5e2eaba7d2d335358876", size = 97124, upload-time = "2025-10-08T22:01:15.629Z" }, + { url = "https://files.pythonhosted.org/packages/06/1e/f22f100db15a68b520664eb3328fb0ae4e90530887928558112c8d1f4515/tomli-2.3.0-cp312-cp312-win_amd64.whl", hash = "sha256:1cb4ed918939151a03f33d4242ccd0aa5f11b3547d0cf30f7c74a408a5b99878", size = 107698, upload-time = "2025-10-08T22:01:16.51Z" }, + { url = "https://files.pythonhosted.org/packages/89/48/06ee6eabe4fdd9ecd48bf488f4ac783844fd777f547b8d1b61c11939974e/tomli-2.3.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:5192f562738228945d7b13d4930baffda67b69425a7f0da96d360b0a3888136b", size = 154819, upload-time = "2025-10-08T22:01:17.964Z" }, + { url = "https://files.pythonhosted.org/packages/f1/01/88793757d54d8937015c75dcdfb673c65471945f6be98e6a0410fba167ed/tomli-2.3.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:be71c93a63d738597996be9528f4abe628d1adf5e6eb11607bc8fe1a510b5dae", size = 148766, upload-time = "2025-10-08T22:01:18.959Z" }, + { url = "https://files.pythonhosted.org/packages/42/17/5e2c956f0144b812e7e107f94f1cc54af734eb17b5191c0bbfb72de5e93e/tomli-2.3.0-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:c4665508bcbac83a31ff8ab08f424b665200c0e1e645d2bd9ab3d3e557b6185b", size = 240771, upload-time = "2025-10-08T22:01:20.106Z" }, + { url = "https://files.pythonhosted.org/packages/d5/f4/0fbd014909748706c01d16824eadb0307115f9562a15cbb012cd9b3512c5/tomli-2.3.0-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:4021923f97266babc6ccab9f5068642a0095faa0a51a246a6a02fccbb3514eaf", size = 248586, upload-time = "2025-10-08T22:01:21.164Z" }, + { url = "https://files.pythonhosted.org/packages/30/77/fed85e114bde5e81ecf9bc5da0cc69f2914b38f4708c80ae67d0c10180c5/tomli-2.3.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:a4ea38c40145a357d513bffad0ed869f13c1773716cf71ccaa83b0fa0cc4e42f", size = 244792, upload-time = "2025-10-08T22:01:22.417Z" }, + { url = "https://files.pythonhosted.org/packages/55/92/afed3d497f7c186dc71e6ee6d4fcb0acfa5f7d0a1a2878f8beae379ae0cc/tomli-2.3.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:ad805ea85eda330dbad64c7ea7a4556259665bdf9d2672f5dccc740eb9d3ca05", size = 248909, upload-time = "2025-10-08T22:01:23.859Z" }, + { url = "https://files.pythonhosted.org/packages/f8/84/ef50c51b5a9472e7265ce1ffc7f24cd4023d289e109f669bdb1553f6a7c2/tomli-2.3.0-cp313-cp313-win32.whl", hash = "sha256:97d5eec30149fd3294270e889b4234023f2c69747e555a27bd708828353ab606", size = 96946, upload-time = "2025-10-08T22:01:24.893Z" }, + { url = "https://files.pythonhosted.org/packages/b2/b7/718cd1da0884f281f95ccfa3a6cc572d30053cba64603f79d431d3c9b61b/tomli-2.3.0-cp313-cp313-win_amd64.whl", hash = "sha256:0c95ca56fbe89e065c6ead5b593ee64b84a26fca063b5d71a1122bf26e533999", size = 107705, upload-time = "2025-10-08T22:01:26.153Z" }, + { url = "https://files.pythonhosted.org/packages/19/94/aeafa14a52e16163008060506fcb6aa1949d13548d13752171a755c65611/tomli-2.3.0-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:cebc6fe843e0733ee827a282aca4999b596241195f43b4cc371d64fc6639da9e", size = 154244, upload-time = "2025-10-08T22:01:27.06Z" }, + { url = "https://files.pythonhosted.org/packages/db/e4/1e58409aa78eefa47ccd19779fc6f36787edbe7d4cd330eeeedb33a4515b/tomli-2.3.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:4c2ef0244c75aba9355561272009d934953817c49f47d768070c3c94355c2aa3", size = 148637, upload-time = "2025-10-08T22:01:28.059Z" }, + { url = "https://files.pythonhosted.org/packages/26/b6/d1eccb62f665e44359226811064596dd6a366ea1f985839c566cd61525ae/tomli-2.3.0-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:c22a8bf253bacc0cf11f35ad9808b6cb75ada2631c2d97c971122583b129afbc", size = 241925, upload-time = "2025-10-08T22:01:29.066Z" }, + { url = "https://files.pythonhosted.org/packages/70/91/7cdab9a03e6d3d2bb11beae108da5bdc1c34bdeb06e21163482544ddcc90/tomli-2.3.0-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:0eea8cc5c5e9f89c9b90c4896a8deefc74f518db5927d0e0e8d4a80953d774d0", size = 249045, upload-time = "2025-10-08T22:01:31.98Z" }, + { url = "https://files.pythonhosted.org/packages/15/1b/8c26874ed1f6e4f1fcfeb868db8a794cbe9f227299402db58cfcc858766c/tomli-2.3.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:b74a0e59ec5d15127acdabd75ea17726ac4c5178ae51b85bfe39c4f8a278e879", size = 245835, upload-time = "2025-10-08T22:01:32.989Z" }, + { url = "https://files.pythonhosted.org/packages/fd/42/8e3c6a9a4b1a1360c1a2a39f0b972cef2cc9ebd56025168c4137192a9321/tomli-2.3.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:b5870b50c9db823c595983571d1296a6ff3e1b88f734a4c8f6fc6188397de005", size = 253109, upload-time = "2025-10-08T22:01:34.052Z" }, + { url = "https://files.pythonhosted.org/packages/22/0c/b4da635000a71b5f80130937eeac12e686eefb376b8dee113b4a582bba42/tomli-2.3.0-cp314-cp314-win32.whl", hash = "sha256:feb0dacc61170ed7ab602d3d972a58f14ee3ee60494292d384649a3dc38ef463", size = 97930, upload-time = "2025-10-08T22:01:35.082Z" }, + { url = "https://files.pythonhosted.org/packages/b9/74/cb1abc870a418ae99cd5c9547d6bce30701a954e0e721821df483ef7223c/tomli-2.3.0-cp314-cp314-win_amd64.whl", hash = "sha256:b273fcbd7fc64dc3600c098e39136522650c49bca95df2d11cf3b626422392c8", size = 107964, upload-time = "2025-10-08T22:01:36.057Z" }, + { url = "https://files.pythonhosted.org/packages/54/78/5c46fff6432a712af9f792944f4fcd7067d8823157949f4e40c56b8b3c83/tomli-2.3.0-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:940d56ee0410fa17ee1f12b817b37a4d4e4dc4d27340863cc67236c74f582e77", size = 163065, upload-time = "2025-10-08T22:01:37.27Z" }, + { url = "https://files.pythonhosted.org/packages/39/67/f85d9bd23182f45eca8939cd2bc7050e1f90c41f4a2ecbbd5963a1d1c486/tomli-2.3.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:f85209946d1fe94416debbb88d00eb92ce9cd5266775424ff81bc959e001acaf", size = 159088, upload-time = "2025-10-08T22:01:38.235Z" }, + { url = "https://files.pythonhosted.org/packages/26/5a/4b546a0405b9cc0659b399f12b6adb750757baf04250b148d3c5059fc4eb/tomli-2.3.0-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:a56212bdcce682e56b0aaf79e869ba5d15a6163f88d5451cbde388d48b13f530", size = 268193, upload-time = "2025-10-08T22:01:39.712Z" }, + { url = "https://files.pythonhosted.org/packages/42/4f/2c12a72ae22cf7b59a7fe75b3465b7aba40ea9145d026ba41cb382075b0e/tomli-2.3.0-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c5f3ffd1e098dfc032d4d3af5c0ac64f6d286d98bc148698356847b80fa4de1b", size = 275488, upload-time = "2025-10-08T22:01:40.773Z" }, + { url = "https://files.pythonhosted.org/packages/92/04/a038d65dbe160c3aa5a624e93ad98111090f6804027d474ba9c37c8ae186/tomli-2.3.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:5e01decd096b1530d97d5d85cb4dff4af2d8347bd35686654a004f8dea20fc67", size = 272669, upload-time = "2025-10-08T22:01:41.824Z" }, + { url = "https://files.pythonhosted.org/packages/be/2f/8b7c60a9d1612a7cbc39ffcca4f21a73bf368a80fc25bccf8253e2563267/tomli-2.3.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:8a35dd0e643bb2610f156cca8db95d213a90015c11fee76c946aa62b7ae7e02f", size = 279709, upload-time = "2025-10-08T22:01:43.177Z" }, + { url = "https://files.pythonhosted.org/packages/7e/46/cc36c679f09f27ded940281c38607716c86cf8ba4a518d524e349c8b4874/tomli-2.3.0-cp314-cp314t-win32.whl", hash = "sha256:a1f7f282fe248311650081faafa5f4732bdbfef5d45fe3f2e702fbc6f2d496e0", size = 107563, upload-time = "2025-10-08T22:01:44.233Z" }, + { url = "https://files.pythonhosted.org/packages/84/ff/426ca8683cf7b753614480484f6437f568fd2fda2edbdf57a2d3d8b27a0b/tomli-2.3.0-cp314-cp314t-win_amd64.whl", hash = "sha256:70a251f8d4ba2d9ac2542eecf008b3c8a9fc5c3f9f02c56a9d7952612be2fdba", size = 119756, upload-time = "2025-10-08T22:01:45.234Z" }, + { url = "https://files.pythonhosted.org/packages/77/b8/0135fadc89e73be292b473cb820b4f5a08197779206b33191e801feeae40/tomli-2.3.0-py3-none-any.whl", hash = "sha256:e95b1af3c5b07d9e643909b5abbec77cd9f1217e6d0bca72b0234736b9fb1f1b", size = 14408, upload-time = "2025-10-08T22:01:46.04Z" }, +] + +[[package]] +name = "tomlkit" +version = "0.13.3" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/cc/18/0bbf3884e9eaa38819ebe46a7bd25dcd56b67434402b66a58c4b8e552575/tomlkit-0.13.3.tar.gz", hash = "sha256:430cf247ee57df2b94ee3fbe588e71d362a941ebb545dec29b53961d61add2a1", size = 185207, upload-time = "2025-06-05T07:13:44.947Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/bd/75/8539d011f6be8e29f339c42e633aae3cb73bffa95dd0f9adec09b9c58e85/tomlkit-0.13.3-py3-none-any.whl", hash = "sha256:c89c649d79ee40629a9fda55f8ace8c6a1b42deb912b2a8fd8d942ddadb606b0", size = 38901, upload-time = "2025-06-05T07:13:43.546Z" }, +] + +[[package]] +name = "types-python-dateutil" +version = "2.9.0.20251115" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/6a/36/06d01fb52c0d57e9ad0c237654990920fa41195e4b3d640830dabf9eeb2f/types_python_dateutil-2.9.0.20251115.tar.gz", hash = "sha256:8a47f2c3920f52a994056b8786309b43143faa5a64d4cbb2722d6addabdf1a58", size = 16363, upload-time = "2025-11-15T03:00:13.717Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/43/0b/56961d3ba517ed0df9b3a27bfda6514f3d01b28d499d1bce9068cfe4edd1/types_python_dateutil-2.9.0.20251115-py3-none-any.whl", hash = "sha256:9cf9c1c582019753b8639a081deefd7e044b9fa36bd8217f565c6c4e36ee0624", size = 18251, upload-time = "2025-11-15T03:00:12.317Z" }, +] + +[[package]] +name = "typing-extensions" +version = "4.15.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/72/94/1a15dd82efb362ac84269196e94cf00f187f7ed21c242792a923cdb1c61f/typing_extensions-4.15.0.tar.gz", hash = "sha256:0cea48d173cc12fa28ecabc3b837ea3cf6f38c6d1136f85cbaaf598984861466", size = 109391, upload-time = "2025-08-25T13:49:26.313Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/18/67/36e9267722cc04a6b9f15c7f3441c2363321a3ea07da7ae0c0707beb2a9c/typing_extensions-4.15.0-py3-none-any.whl", hash = "sha256:f0fa19c6845758ab08074a0cfa8b7aecb71c999ca73d62883bc25cc018c4e548", size = 44614, upload-time = "2025-08-25T13:49:24.86Z" }, +] + +[[package]] +name = "typing-inspection" +version = "0.4.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/55/e3/70399cb7dd41c10ac53367ae42139cf4b1ca5f36bb3dc6c9d33acdb43655/typing_inspection-0.4.2.tar.gz", hash = "sha256:ba561c48a67c5958007083d386c3295464928b01faa735ab8547c5692e87f464", size = 75949, upload-time = "2025-10-01T02:14:41.687Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/dc/9b/47798a6c91d8bdb567fe2698fe81e0c6b7cb7ef4d13da4114b41d239f65d/typing_inspection-0.4.2-py3-none-any.whl", hash = "sha256:4ed1cacbdc298c220f1bd249ed5287caa16f34d44ef4e9c3d0cbad5b521545e7", size = 14611, upload-time = "2025-10-01T02:14:40.154Z" }, +] + +[[package]] +name = "urllib3" +version = "2.6.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/1e/24/a2a2ed9addd907787d7aa0355ba36a6cadf1768b934c652ea78acbd59dcd/urllib3-2.6.2.tar.gz", hash = "sha256:016f9c98bb7e98085cb2b4b17b87d2c702975664e4f060c6532e64d1c1a5e797", size = 432930, upload-time = "2025-12-11T15:56:40.252Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/6d/b9/4095b668ea3678bf6a0af005527f39de12fb026516fb3df17495a733b7f8/urllib3-2.6.2-py3-none-any.whl", hash = "sha256:ec21cddfe7724fc7cb4ba4bea7aa8e2ef36f607a4bab81aa6ce42a13dc3f03dd", size = 131182, upload-time = "2025-12-11T15:56:38.584Z" }, +] diff --git a/poetry.lock b/poetry.lock deleted file mode 100644 index 51676115..00000000 --- a/poetry.lock +++ /dev/null @@ -1,1697 +0,0 @@ -# This file is automatically @generated by Poetry 2.1.3 and should not be changed by hand. - -[[package]] -name = "annotated-types" -version = "0.7.0" -description = "Reusable constraint types to use with typing.Annotated" -optional = false -python-versions = ">=3.8" -groups = ["main", "dev"] -files = [ - {file = "annotated_types-0.7.0-py3-none-any.whl", hash = "sha256:1f02e8b43a8fbbc3f3e0d4f0f4bfc8131bcb4eebe8849b8e5c773f3a1c582a53"}, - {file = "annotated_types-0.7.0.tar.gz", hash = "sha256:aff07c09a53a08bc8cfccb9c85b05f1aa9a2a6f23728d790723543408344ce89"}, -] -markers = {dev = "python_version >= \"3.10\""} - -[[package]] -name = "anyio" -version = "4.5.2" -description = "High level compatibility layer for multiple asynchronous event loop implementations" -optional = false -python-versions = ">=3.8" -groups = ["main", "dev"] -files = [ - {file = "anyio-4.5.2-py3-none-any.whl", hash = "sha256:c011ee36bc1e8ba40e5a81cb9df91925c218fe9b778554e0b56a21e1b5d4716f"}, - {file = "anyio-4.5.2.tar.gz", hash = "sha256:23009af4ed04ce05991845451e11ef02fc7c5ed29179ac9a420e5ad0ac7ddc5b"}, -] -markers = {dev = "python_version >= \"3.10\""} - -[package.dependencies] -exceptiongroup = {version = ">=1.0.2", markers = "python_version < \"3.11\""} -idna = ">=2.8" -sniffio = ">=1.1" -typing-extensions = {version = ">=4.1", markers = "python_version < \"3.11\""} - -[package.extras] -doc = ["Sphinx (>=7.4,<8.0)", "packaging", "sphinx-autodoc-typehints (>=1.2.0)", "sphinx-rtd-theme"] -test = ["anyio[trio]", "coverage[toml] (>=7)", "exceptiongroup (>=1.2.0)", "hypothesis (>=4.0)", "psutil (>=5.9)", "pytest (>=7.0)", "pytest-mock (>=3.6.1)", "trustme", "truststore (>=0.9.1) ; python_version >= \"3.10\"", "uvloop (>=0.21.0b1) ; platform_python_implementation == \"CPython\" and platform_system != \"Windows\""] -trio = ["trio (>=0.26.1)"] - -[[package]] -name = "astroid" -version = "3.2.4" -description = "An abstract syntax tree for Python with inference support." -optional = false -python-versions = ">=3.8.0" -groups = ["dev"] -files = [ - {file = "astroid-3.2.4-py3-none-any.whl", hash = "sha256:413658a61eeca6202a59231abb473f932038fbcbf1666587f66d482083413a25"}, - {file = "astroid-3.2.4.tar.gz", hash = "sha256:0e14202810b30da1b735827f78f5157be2bbd4a7a59b7707ca0bfc2fb4c0063a"}, -] - -[package.dependencies] -typing-extensions = {version = ">=4.0.0", markers = "python_version < \"3.11\""} - -[[package]] -name = "authlib" -version = "1.6.0" -description = "The ultimate Python library in building OAuth and OpenID Connect servers and clients." -optional = false -python-versions = ">=3.9" -groups = ["main", "dev"] -files = [ - {file = "authlib-1.6.0-py2.py3-none-any.whl", hash = "sha256:91685589498f79e8655e8a8947431ad6288831d643f11c55c2143ffcc738048d"}, - {file = "authlib-1.6.0.tar.gz", hash = "sha256:4367d32031b7af175ad3a323d571dc7257b7099d55978087ceae4a0d88cd3210"}, -] - -[package.dependencies] -cryptography = "*" - -[[package]] -name = "cachetools" -version = "5.5.0" -description = "Extensible memoizing collections and decorators" -optional = true -python-versions = ">=3.7" -groups = ["main"] -markers = "extra == \"gcp\"" -files = [ - {file = "cachetools-5.5.0-py3-none-any.whl", hash = "sha256:02134e8439cdc2ffb62023ce1debca2944c3f289d66bb17ead3ab3dede74b292"}, - {file = "cachetools-5.5.0.tar.gz", hash = "sha256:2cc24fb4cbe39633fb7badd9db9ca6295d766d9c2995f245725a46715d050f2a"}, -] - -[[package]] -name = "certifi" -version = "2024.8.30" -description = "Python package for providing Mozilla's CA Bundle." -optional = false -python-versions = ">=3.6" -groups = ["main", "dev"] -files = [ - {file = "certifi-2024.8.30-py3-none-any.whl", hash = "sha256:922820b53db7a7257ffbda3f597266d435245903d80737e34f8a45ff3e3230d8"}, - {file = "certifi-2024.8.30.tar.gz", hash = "sha256:bec941d2aa8195e248a60b31ff9f0558284cf01a52591ceda73ea9afffd69fd9"}, -] -markers = {dev = "python_version >= \"3.10\""} - -[[package]] -name = "cffi" -version = "1.17.1" -description = "Foreign Function Interface for Python calling C code." -optional = false -python-versions = ">=3.8" -groups = ["main", "dev"] -markers = "platform_python_implementation != \"PyPy\"" -files = [ - {file = "cffi-1.17.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:df8b1c11f177bc2313ec4b2d46baec87a5f3e71fc8b45dab2ee7cae86d9aba14"}, - {file = "cffi-1.17.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:8f2cdc858323644ab277e9bb925ad72ae0e67f69e804f4898c070998d50b1a67"}, - {file = "cffi-1.17.1-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:edae79245293e15384b51f88b00613ba9f7198016a5948b5dddf4917d4d26382"}, - {file = "cffi-1.17.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:45398b671ac6d70e67da8e4224a065cec6a93541bb7aebe1b198a61b58c7b702"}, - {file = "cffi-1.17.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ad9413ccdeda48c5afdae7e4fa2192157e991ff761e7ab8fdd8926f40b160cc3"}, - {file = "cffi-1.17.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5da5719280082ac6bd9aa7becb3938dc9f9cbd57fac7d2871717b1feb0902ab6"}, - {file = "cffi-1.17.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2bb1a08b8008b281856e5971307cc386a8e9c5b625ac297e853d36da6efe9c17"}, - {file = "cffi-1.17.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:045d61c734659cc045141be4bae381a41d89b741f795af1dd018bfb532fd0df8"}, - {file = "cffi-1.17.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:6883e737d7d9e4899a8a695e00ec36bd4e5e4f18fabe0aca0efe0a4b44cdb13e"}, - {file = "cffi-1.17.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:6b8b4a92e1c65048ff98cfe1f735ef8f1ceb72e3d5f0c25fdb12087a23da22be"}, - {file = "cffi-1.17.1-cp310-cp310-win32.whl", hash = "sha256:c9c3d058ebabb74db66e431095118094d06abf53284d9c81f27300d0e0d8bc7c"}, - {file = "cffi-1.17.1-cp310-cp310-win_amd64.whl", hash = "sha256:0f048dcf80db46f0098ccac01132761580d28e28bc0f78ae0d58048063317e15"}, - {file = "cffi-1.17.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:a45e3c6913c5b87b3ff120dcdc03f6131fa0065027d0ed7ee6190736a74cd401"}, - {file = "cffi-1.17.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:30c5e0cb5ae493c04c8b42916e52ca38079f1b235c2f8ae5f4527b963c401caf"}, - {file = "cffi-1.17.1-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f75c7ab1f9e4aca5414ed4d8e5c0e303a34f4421f8a0d47a4d019ceff0ab6af4"}, - {file = "cffi-1.17.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a1ed2dd2972641495a3ec98445e09766f077aee98a1c896dcb4ad0d303628e41"}, - {file = "cffi-1.17.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:46bf43160c1a35f7ec506d254e5c890f3c03648a4dbac12d624e4490a7046cd1"}, - {file = "cffi-1.17.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a24ed04c8ffd54b0729c07cee15a81d964e6fee0e3d4d342a27b020d22959dc6"}, - {file = "cffi-1.17.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:610faea79c43e44c71e1ec53a554553fa22321b65fae24889706c0a84d4ad86d"}, - {file = "cffi-1.17.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:a9b15d491f3ad5d692e11f6b71f7857e7835eb677955c00cc0aefcd0669adaf6"}, - {file = "cffi-1.17.1-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:de2ea4b5833625383e464549fec1bc395c1bdeeb5f25c4a3a82b5a8c756ec22f"}, - {file = "cffi-1.17.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:fc48c783f9c87e60831201f2cce7f3b2e4846bf4d8728eabe54d60700b318a0b"}, - {file = "cffi-1.17.1-cp311-cp311-win32.whl", hash = "sha256:85a950a4ac9c359340d5963966e3e0a94a676bd6245a4b55bc43949eee26a655"}, - {file = "cffi-1.17.1-cp311-cp311-win_amd64.whl", hash = "sha256:caaf0640ef5f5517f49bc275eca1406b0ffa6aa184892812030f04c2abf589a0"}, - {file = "cffi-1.17.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:805b4371bf7197c329fcb3ead37e710d1bca9da5d583f5073b799d5c5bd1eee4"}, - {file = "cffi-1.17.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:733e99bc2df47476e3848417c5a4540522f234dfd4ef3ab7fafdf555b082ec0c"}, - {file = "cffi-1.17.1-cp312-cp312-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1257bdabf294dceb59f5e70c64a3e2f462c30c7ad68092d01bbbfb1c16b1ba36"}, - {file = "cffi-1.17.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:da95af8214998d77a98cc14e3a3bd00aa191526343078b530ceb0bd710fb48a5"}, - {file = "cffi-1.17.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d63afe322132c194cf832bfec0dc69a99fb9bb6bbd550f161a49e9e855cc78ff"}, - {file = "cffi-1.17.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f79fc4fc25f1c8698ff97788206bb3c2598949bfe0fef03d299eb1b5356ada99"}, - {file = "cffi-1.17.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b62ce867176a75d03a665bad002af8e6d54644fad99a3c70905c543130e39d93"}, - {file = "cffi-1.17.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:386c8bf53c502fff58903061338ce4f4950cbdcb23e2902d86c0f722b786bbe3"}, - {file = "cffi-1.17.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:4ceb10419a9adf4460ea14cfd6bc43d08701f0835e979bf821052f1805850fe8"}, - {file = "cffi-1.17.1-cp312-cp312-win32.whl", hash = "sha256:a08d7e755f8ed21095a310a693525137cfe756ce62d066e53f502a83dc550f65"}, - {file = "cffi-1.17.1-cp312-cp312-win_amd64.whl", hash = "sha256:51392eae71afec0d0c8fb1a53b204dbb3bcabcb3c9b807eedf3e1e6ccf2de903"}, - {file = "cffi-1.17.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:f3a2b4222ce6b60e2e8b337bb9596923045681d71e5a082783484d845390938e"}, - {file = "cffi-1.17.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:0984a4925a435b1da406122d4d7968dd861c1385afe3b45ba82b750f229811e2"}, - {file = "cffi-1.17.1-cp313-cp313-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d01b12eeeb4427d3110de311e1774046ad344f5b1a7403101878976ecd7a10f3"}, - {file = "cffi-1.17.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:706510fe141c86a69c8ddc029c7910003a17353970cff3b904ff0686a5927683"}, - {file = "cffi-1.17.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:de55b766c7aa2e2a3092c51e0483d700341182f08e67c63630d5b6f200bb28e5"}, - {file = "cffi-1.17.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c59d6e989d07460165cc5ad3c61f9fd8f1b4796eacbd81cee78957842b834af4"}, - {file = "cffi-1.17.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dd398dbc6773384a17fe0d3e7eeb8d1a21c2200473ee6806bb5e6a8e62bb73dd"}, - {file = "cffi-1.17.1-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:3edc8d958eb099c634dace3c7e16560ae474aa3803a5df240542b305d14e14ed"}, - {file = "cffi-1.17.1-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:72e72408cad3d5419375fc87d289076ee319835bdfa2caad331e377589aebba9"}, - {file = "cffi-1.17.1-cp313-cp313-win32.whl", hash = "sha256:e03eab0a8677fa80d646b5ddece1cbeaf556c313dcfac435ba11f107ba117b5d"}, - {file = "cffi-1.17.1-cp313-cp313-win_amd64.whl", hash = "sha256:f6a16c31041f09ead72d69f583767292f750d24913dadacf5756b966aacb3f1a"}, - {file = "cffi-1.17.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:636062ea65bd0195bc012fea9321aca499c0504409f413dc88af450b57ffd03b"}, - {file = "cffi-1.17.1-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c7eac2ef9b63c79431bc4b25f1cd649d7f061a28808cbc6c47b534bd789ef964"}, - {file = "cffi-1.17.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e221cf152cff04059d011ee126477f0d9588303eb57e88923578ace7baad17f9"}, - {file = "cffi-1.17.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:31000ec67d4221a71bd3f67df918b1f88f676f1c3b535a7eb473255fdc0b83fc"}, - {file = "cffi-1.17.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:6f17be4345073b0a7b8ea599688f692ac3ef23ce28e5df79c04de519dbc4912c"}, - {file = "cffi-1.17.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0e2b1fac190ae3ebfe37b979cc1ce69c81f4e4fe5746bb401dca63a9062cdaf1"}, - {file = "cffi-1.17.1-cp38-cp38-win32.whl", hash = "sha256:7596d6620d3fa590f677e9ee430df2958d2d6d6de2feeae5b20e82c00b76fbf8"}, - {file = "cffi-1.17.1-cp38-cp38-win_amd64.whl", hash = "sha256:78122be759c3f8a014ce010908ae03364d00a1f81ab5c7f4a7a5120607ea56e1"}, - {file = "cffi-1.17.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:b2ab587605f4ba0bf81dc0cb08a41bd1c0a5906bd59243d56bad7668a6fc6c16"}, - {file = "cffi-1.17.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:28b16024becceed8c6dfbc75629e27788d8a3f9030691a1dbf9821a128b22c36"}, - {file = "cffi-1.17.1-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1d599671f396c4723d016dbddb72fe8e0397082b0a77a4fab8028923bec050e8"}, - {file = "cffi-1.17.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ca74b8dbe6e8e8263c0ffd60277de77dcee6c837a3d0881d8c1ead7268c9e576"}, - {file = "cffi-1.17.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f7f5baafcc48261359e14bcd6d9bff6d4b28d9103847c9e136694cb0501aef87"}, - {file = "cffi-1.17.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:98e3969bcff97cae1b2def8ba499ea3d6f31ddfdb7635374834cf89a1a08ecf0"}, - {file = "cffi-1.17.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cdf5ce3acdfd1661132f2a9c19cac174758dc2352bfe37d98aa7512c6b7178b3"}, - {file = "cffi-1.17.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:9755e4345d1ec879e3849e62222a18c7174d65a6a92d5b346b1863912168b595"}, - {file = "cffi-1.17.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:f1e22e8c4419538cb197e4dd60acc919d7696e5ef98ee4da4e01d3f8cfa4cc5a"}, - {file = "cffi-1.17.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:c03e868a0b3bc35839ba98e74211ed2b05d2119be4e8a0f224fba9384f1fe02e"}, - {file = "cffi-1.17.1-cp39-cp39-win32.whl", hash = "sha256:e31ae45bc2e29f6b2abd0de1cc3b9d5205aa847cafaecb8af1476a609a2f6eb7"}, - {file = "cffi-1.17.1-cp39-cp39-win_amd64.whl", hash = "sha256:d016c76bdd850f3c626af19b0542c9677ba156e4ee4fccfdd7848803533ef662"}, - {file = "cffi-1.17.1.tar.gz", hash = "sha256:1c39c6016c32bc48dd54561950ebd6836e1670f2ae46128f67cf49e789c52824"}, -] - -[package.dependencies] -pycparser = "*" - -[[package]] -name = "charset-normalizer" -version = "3.4.0" -description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet." -optional = false -python-versions = ">=3.7.0" -groups = ["main"] -files = [ - {file = "charset_normalizer-3.4.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:4f9fc98dad6c2eaa32fc3af1417d95b5e3d08aff968df0cd320066def971f9a6"}, - {file = "charset_normalizer-3.4.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0de7b687289d3c1b3e8660d0741874abe7888100efe14bd0f9fd7141bcbda92b"}, - {file = "charset_normalizer-3.4.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:5ed2e36c3e9b4f21dd9422f6893dec0abf2cca553af509b10cd630f878d3eb99"}, - {file = "charset_normalizer-3.4.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:40d3ff7fc90b98c637bda91c89d51264a3dcf210cade3a2c6f838c7268d7a4ca"}, - {file = "charset_normalizer-3.4.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1110e22af8ca26b90bd6364fe4c763329b0ebf1ee213ba32b68c73de5752323d"}, - {file = "charset_normalizer-3.4.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:86f4e8cca779080f66ff4f191a685ced73d2f72d50216f7112185dc02b90b9b7"}, - {file = "charset_normalizer-3.4.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7f683ddc7eedd742e2889d2bfb96d69573fde1d92fcb811979cdb7165bb9c7d3"}, - {file = "charset_normalizer-3.4.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:27623ba66c183eca01bf9ff833875b459cad267aeeb044477fedac35e19ba907"}, - {file = "charset_normalizer-3.4.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:f606a1881d2663630ea5b8ce2efe2111740df4b687bd78b34a8131baa007f79b"}, - {file = "charset_normalizer-3.4.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:0b309d1747110feb25d7ed6b01afdec269c647d382c857ef4663bbe6ad95a912"}, - {file = "charset_normalizer-3.4.0-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:136815f06a3ae311fae551c3df1f998a1ebd01ddd424aa5603a4336997629e95"}, - {file = "charset_normalizer-3.4.0-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:14215b71a762336254351b00ec720a8e85cada43b987da5a042e4ce3e82bd68e"}, - {file = "charset_normalizer-3.4.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:79983512b108e4a164b9c8d34de3992f76d48cadc9554c9e60b43f308988aabe"}, - {file = "charset_normalizer-3.4.0-cp310-cp310-win32.whl", hash = "sha256:c94057af19bc953643a33581844649a7fdab902624d2eb739738a30e2b3e60fc"}, - {file = "charset_normalizer-3.4.0-cp310-cp310-win_amd64.whl", hash = "sha256:55f56e2ebd4e3bc50442fbc0888c9d8c94e4e06a933804e2af3e89e2f9c1c749"}, - {file = "charset_normalizer-3.4.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:0d99dd8ff461990f12d6e42c7347fd9ab2532fb70e9621ba520f9e8637161d7c"}, - {file = "charset_normalizer-3.4.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:c57516e58fd17d03ebe67e181a4e4e2ccab1168f8c2976c6a334d4f819fe5944"}, - {file = "charset_normalizer-3.4.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:6dba5d19c4dfab08e58d5b36304b3f92f3bd5d42c1a3fa37b5ba5cdf6dfcbcee"}, - {file = "charset_normalizer-3.4.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bf4475b82be41b07cc5e5ff94810e6a01f276e37c2d55571e3fe175e467a1a1c"}, - {file = "charset_normalizer-3.4.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ce031db0408e487fd2775d745ce30a7cd2923667cf3b69d48d219f1d8f5ddeb6"}, - {file = "charset_normalizer-3.4.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8ff4e7cdfdb1ab5698e675ca622e72d58a6fa2a8aa58195de0c0061288e6e3ea"}, - {file = "charset_normalizer-3.4.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3710a9751938947e6327ea9f3ea6332a09bf0ba0c09cae9cb1f250bd1f1549bc"}, - {file = "charset_normalizer-3.4.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:82357d85de703176b5587dbe6ade8ff67f9f69a41c0733cf2425378b49954de5"}, - {file = "charset_normalizer-3.4.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:47334db71978b23ebcf3c0f9f5ee98b8d65992b65c9c4f2d34c2eaf5bcaf0594"}, - {file = "charset_normalizer-3.4.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:8ce7fd6767a1cc5a92a639b391891bf1c268b03ec7e021c7d6d902285259685c"}, - {file = "charset_normalizer-3.4.0-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:f1a2f519ae173b5b6a2c9d5fa3116ce16e48b3462c8b96dfdded11055e3d6365"}, - {file = "charset_normalizer-3.4.0-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:63bc5c4ae26e4bc6be6469943b8253c0fd4e4186c43ad46e713ea61a0ba49129"}, - {file = "charset_normalizer-3.4.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:bcb4f8ea87d03bc51ad04add8ceaf9b0f085ac045ab4d74e73bbc2dc033f0236"}, - {file = "charset_normalizer-3.4.0-cp311-cp311-win32.whl", hash = "sha256:9ae4ef0b3f6b41bad6366fb0ea4fc1d7ed051528e113a60fa2a65a9abb5b1d99"}, - {file = "charset_normalizer-3.4.0-cp311-cp311-win_amd64.whl", hash = "sha256:cee4373f4d3ad28f1ab6290684d8e2ebdb9e7a1b74fdc39e4c211995f77bec27"}, - {file = "charset_normalizer-3.4.0-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:0713f3adb9d03d49d365b70b84775d0a0d18e4ab08d12bc46baa6132ba78aaf6"}, - {file = "charset_normalizer-3.4.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:de7376c29d95d6719048c194a9cf1a1b0393fbe8488a22008610b0361d834ecf"}, - {file = "charset_normalizer-3.4.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:4a51b48f42d9358460b78725283f04bddaf44a9358197b889657deba38f329db"}, - {file = "charset_normalizer-3.4.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b295729485b06c1a0683af02a9e42d2caa9db04a373dc38a6a58cdd1e8abddf1"}, - {file = "charset_normalizer-3.4.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ee803480535c44e7f5ad00788526da7d85525cfefaf8acf8ab9a310000be4b03"}, - {file = "charset_normalizer-3.4.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3d59d125ffbd6d552765510e3f31ed75ebac2c7470c7274195b9161a32350284"}, - {file = "charset_normalizer-3.4.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8cda06946eac330cbe6598f77bb54e690b4ca93f593dee1568ad22b04f347c15"}, - {file = "charset_normalizer-3.4.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:07afec21bbbbf8a5cc3651aa96b980afe2526e7f048fdfb7f1014d84acc8b6d8"}, - {file = "charset_normalizer-3.4.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:6b40e8d38afe634559e398cc32b1472f376a4099c75fe6299ae607e404c033b2"}, - {file = "charset_normalizer-3.4.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:b8dcd239c743aa2f9c22ce674a145e0a25cb1566c495928440a181ca1ccf6719"}, - {file = "charset_normalizer-3.4.0-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:84450ba661fb96e9fd67629b93d2941c871ca86fc38d835d19d4225ff946a631"}, - {file = "charset_normalizer-3.4.0-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:44aeb140295a2f0659e113b31cfe92c9061622cadbc9e2a2f7b8ef6b1e29ef4b"}, - {file = "charset_normalizer-3.4.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:1db4e7fefefd0f548d73e2e2e041f9df5c59e178b4c72fbac4cc6f535cfb1565"}, - {file = "charset_normalizer-3.4.0-cp312-cp312-win32.whl", hash = "sha256:5726cf76c982532c1863fb64d8c6dd0e4c90b6ece9feb06c9f202417a31f7dd7"}, - {file = "charset_normalizer-3.4.0-cp312-cp312-win_amd64.whl", hash = "sha256:b197e7094f232959f8f20541ead1d9862ac5ebea1d58e9849c1bf979255dfac9"}, - {file = "charset_normalizer-3.4.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:dd4eda173a9fcccb5f2e2bd2a9f423d180194b1bf17cf59e3269899235b2a114"}, - {file = "charset_normalizer-3.4.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:e9e3c4c9e1ed40ea53acf11e2a386383c3304212c965773704e4603d589343ed"}, - {file = "charset_normalizer-3.4.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:92a7e36b000bf022ef3dbb9c46bfe2d52c047d5e3f3343f43204263c5addc250"}, - {file = "charset_normalizer-3.4.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:54b6a92d009cbe2fb11054ba694bc9e284dad30a26757b1e372a1fdddaf21920"}, - {file = "charset_normalizer-3.4.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1ffd9493de4c922f2a38c2bf62b831dcec90ac673ed1ca182fe11b4d8e9f2a64"}, - {file = "charset_normalizer-3.4.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:35c404d74c2926d0287fbd63ed5d27eb911eb9e4a3bb2c6d294f3cfd4a9e0c23"}, - {file = "charset_normalizer-3.4.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4796efc4faf6b53a18e3d46343535caed491776a22af773f366534056c4e1fbc"}, - {file = "charset_normalizer-3.4.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e7fdd52961feb4c96507aa649550ec2a0d527c086d284749b2f582f2d40a2e0d"}, - {file = "charset_normalizer-3.4.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:92db3c28b5b2a273346bebb24857fda45601aef6ae1c011c0a997106581e8a88"}, - {file = "charset_normalizer-3.4.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:ab973df98fc99ab39080bfb0eb3a925181454d7c3ac8a1e695fddfae696d9e90"}, - {file = "charset_normalizer-3.4.0-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:4b67fdab07fdd3c10bb21edab3cbfe8cf5696f453afce75d815d9d7223fbe88b"}, - {file = "charset_normalizer-3.4.0-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:aa41e526a5d4a9dfcfbab0716c7e8a1b215abd3f3df5a45cf18a12721d31cb5d"}, - {file = "charset_normalizer-3.4.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:ffc519621dce0c767e96b9c53f09c5d215578e10b02c285809f76509a3931482"}, - {file = "charset_normalizer-3.4.0-cp313-cp313-win32.whl", hash = "sha256:f19c1585933c82098c2a520f8ec1227f20e339e33aca8fa6f956f6691b784e67"}, - {file = "charset_normalizer-3.4.0-cp313-cp313-win_amd64.whl", hash = "sha256:707b82d19e65c9bd28b81dde95249b07bf9f5b90ebe1ef17d9b57473f8a64b7b"}, - {file = "charset_normalizer-3.4.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:dbe03226baf438ac4fda9e2d0715022fd579cb641c4cf639fa40d53b2fe6f3e2"}, - {file = "charset_normalizer-3.4.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dd9a8bd8900e65504a305bf8ae6fa9fbc66de94178c420791d0293702fce2df7"}, - {file = "charset_normalizer-3.4.0-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b8831399554b92b72af5932cdbbd4ddc55c55f631bb13ff8fe4e6536a06c5c51"}, - {file = "charset_normalizer-3.4.0-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a14969b8691f7998e74663b77b4c36c0337cb1df552da83d5c9004a93afdb574"}, - {file = "charset_normalizer-3.4.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dcaf7c1524c0542ee2fc82cc8ec337f7a9f7edee2532421ab200d2b920fc97cf"}, - {file = "charset_normalizer-3.4.0-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:425c5f215d0eecee9a56cdb703203dda90423247421bf0d67125add85d0c4455"}, - {file = "charset_normalizer-3.4.0-cp37-cp37m-musllinux_1_2_aarch64.whl", hash = "sha256:d5b054862739d276e09928de37c79ddeec42a6e1bfc55863be96a36ba22926f6"}, - {file = "charset_normalizer-3.4.0-cp37-cp37m-musllinux_1_2_i686.whl", hash = "sha256:f3e73a4255342d4eb26ef6df01e3962e73aa29baa3124a8e824c5d3364a65748"}, - {file = "charset_normalizer-3.4.0-cp37-cp37m-musllinux_1_2_ppc64le.whl", hash = "sha256:2f6c34da58ea9c1a9515621f4d9ac379871a8f21168ba1b5e09d74250de5ad62"}, - {file = "charset_normalizer-3.4.0-cp37-cp37m-musllinux_1_2_s390x.whl", hash = "sha256:f09cb5a7bbe1ecae6e87901a2eb23e0256bb524a79ccc53eb0b7629fbe7677c4"}, - {file = "charset_normalizer-3.4.0-cp37-cp37m-musllinux_1_2_x86_64.whl", hash = "sha256:0099d79bdfcf5c1f0c2c72f91516702ebf8b0b8ddd8905f97a8aecf49712c621"}, - {file = "charset_normalizer-3.4.0-cp37-cp37m-win32.whl", hash = "sha256:9c98230f5042f4945f957d006edccc2af1e03ed5e37ce7c373f00a5a4daa6149"}, - {file = "charset_normalizer-3.4.0-cp37-cp37m-win_amd64.whl", hash = "sha256:62f60aebecfc7f4b82e3f639a7d1433a20ec32824db2199a11ad4f5e146ef5ee"}, - {file = "charset_normalizer-3.4.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:af73657b7a68211996527dbfeffbb0864e043d270580c5aef06dc4b659a4b578"}, - {file = "charset_normalizer-3.4.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:cab5d0b79d987c67f3b9e9c53f54a61360422a5a0bc075f43cab5621d530c3b6"}, - {file = "charset_normalizer-3.4.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:9289fd5dddcf57bab41d044f1756550f9e7cf0c8e373b8cdf0ce8773dc4bd417"}, - {file = "charset_normalizer-3.4.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6b493a043635eb376e50eedf7818f2f322eabbaa974e948bd8bdd29eb7ef2a51"}, - {file = "charset_normalizer-3.4.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9fa2566ca27d67c86569e8c85297aaf413ffab85a8960500f12ea34ff98e4c41"}, - {file = "charset_normalizer-3.4.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a8e538f46104c815be19c975572d74afb53f29650ea2025bbfaef359d2de2f7f"}, - {file = "charset_normalizer-3.4.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6fd30dc99682dc2c603c2b315bded2799019cea829f8bf57dc6b61efde6611c8"}, - {file = "charset_normalizer-3.4.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2006769bd1640bdf4d5641c69a3d63b71b81445473cac5ded39740a226fa88ab"}, - {file = "charset_normalizer-3.4.0-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:dc15e99b2d8a656f8e666854404f1ba54765871104e50c8e9813af8a7db07f12"}, - {file = "charset_normalizer-3.4.0-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:ab2e5bef076f5a235c3774b4f4028a680432cded7cad37bba0fd90d64b187d19"}, - {file = "charset_normalizer-3.4.0-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:4ec9dd88a5b71abfc74e9df5ebe7921c35cbb3b641181a531ca65cdb5e8e4dea"}, - {file = "charset_normalizer-3.4.0-cp38-cp38-musllinux_1_2_s390x.whl", hash = "sha256:43193c5cda5d612f247172016c4bb71251c784d7a4d9314677186a838ad34858"}, - {file = "charset_normalizer-3.4.0-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:aa693779a8b50cd97570e5a0f343538a8dbd3e496fa5dcb87e29406ad0299654"}, - {file = "charset_normalizer-3.4.0-cp38-cp38-win32.whl", hash = "sha256:7706f5850360ac01d80c89bcef1640683cc12ed87f42579dab6c5d3ed6888613"}, - {file = "charset_normalizer-3.4.0-cp38-cp38-win_amd64.whl", hash = "sha256:c3e446d253bd88f6377260d07c895816ebf33ffffd56c1c792b13bff9c3e1ade"}, - {file = "charset_normalizer-3.4.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:980b4f289d1d90ca5efcf07958d3eb38ed9c0b7676bf2831a54d4f66f9c27dfa"}, - {file = "charset_normalizer-3.4.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:f28f891ccd15c514a0981f3b9db9aa23d62fe1a99997512b0491d2ed323d229a"}, - {file = "charset_normalizer-3.4.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:a8aacce6e2e1edcb6ac625fb0f8c3a9570ccc7bfba1f63419b3769ccf6a00ed0"}, - {file = "charset_normalizer-3.4.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bd7af3717683bea4c87acd8c0d3d5b44d56120b26fd3f8a692bdd2d5260c620a"}, - {file = "charset_normalizer-3.4.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5ff2ed8194587faf56555927b3aa10e6fb69d931e33953943bc4f837dfee2242"}, - {file = "charset_normalizer-3.4.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e91f541a85298cf35433bf66f3fab2a4a2cff05c127eeca4af174f6d497f0d4b"}, - {file = "charset_normalizer-3.4.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:309a7de0a0ff3040acaebb35ec45d18db4b28232f21998851cfa709eeff49d62"}, - {file = "charset_normalizer-3.4.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:285e96d9d53422efc0d7a17c60e59f37fbf3dfa942073f666db4ac71e8d726d0"}, - {file = "charset_normalizer-3.4.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:5d447056e2ca60382d460a604b6302d8db69476fd2015c81e7c35417cfabe4cd"}, - {file = "charset_normalizer-3.4.0-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:20587d20f557fe189b7947d8e7ec5afa110ccf72a3128d61a2a387c3313f46be"}, - {file = "charset_normalizer-3.4.0-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:130272c698667a982a5d0e626851ceff662565379baf0ff2cc58067b81d4f11d"}, - {file = "charset_normalizer-3.4.0-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:ab22fbd9765e6954bc0bcff24c25ff71dcbfdb185fcdaca49e81bac68fe724d3"}, - {file = "charset_normalizer-3.4.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:7782afc9b6b42200f7362858f9e73b1f8316afb276d316336c0ec3bd73312742"}, - {file = "charset_normalizer-3.4.0-cp39-cp39-win32.whl", hash = "sha256:2de62e8801ddfff069cd5c504ce3bc9672b23266597d4e4f50eda28846c322f2"}, - {file = "charset_normalizer-3.4.0-cp39-cp39-win_amd64.whl", hash = "sha256:95c3c157765b031331dd4db3c775e58deaee050a3042fcad72cbc4189d7c8dca"}, - {file = "charset_normalizer-3.4.0-py3-none-any.whl", hash = "sha256:fe9f97feb71aa9896b81973a7bbada8c49501dc73e58a10fcef6663af95e5079"}, - {file = "charset_normalizer-3.4.0.tar.gz", hash = "sha256:223217c3d4f82c3ac5e29032b3f1c2eb0fb591b72161f86d93f5719079dae93e"}, -] - -[[package]] -name = "click" -version = "8.2.1" -description = "Composable command line interface toolkit" -optional = false -python-versions = ">=3.10" -groups = ["main", "dev"] -markers = "python_version >= \"3.10\"" -files = [ - {file = "click-8.2.1-py3-none-any.whl", hash = "sha256:61a3265b914e850b85317d0b3109c7f8cd35a670f963866005d6ef1d5175a12b"}, - {file = "click-8.2.1.tar.gz", hash = "sha256:27c491cc05d968d271d5a1db13e3b5a184636d9d930f148c50b038f0d0646202"}, -] - -[package.dependencies] -colorama = {version = "*", markers = "platform_system == \"Windows\""} - -[[package]] -name = "colorama" -version = "0.4.6" -description = "Cross-platform colored terminal text." -optional = false -python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7" -groups = ["main", "dev"] -files = [ - {file = "colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6"}, - {file = "colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44"}, -] - -[[package]] -name = "cryptography" -version = "43.0.3" -description = "cryptography is a package which provides cryptographic recipes and primitives to Python developers." -optional = false -python-versions = ">=3.7" -groups = ["main", "dev"] -markers = "python_version == \"3.9\"" -files = [ - {file = "cryptography-43.0.3-cp37-abi3-macosx_10_9_universal2.whl", hash = "sha256:bf7a1932ac4176486eab36a19ed4c0492da5d97123f1406cf15e41b05e787d2e"}, - {file = "cryptography-43.0.3-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:63efa177ff54aec6e1c0aefaa1a241232dcd37413835a9b674b6e3f0ae2bfd3e"}, - {file = "cryptography-43.0.3-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7e1ce50266f4f70bf41a2c6dc4358afadae90e2a1e5342d3c08883df1675374f"}, - {file = "cryptography-43.0.3-cp37-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:443c4a81bb10daed9a8f334365fe52542771f25aedaf889fd323a853ce7377d6"}, - {file = "cryptography-43.0.3-cp37-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:74f57f24754fe349223792466a709f8e0c093205ff0dca557af51072ff47ab18"}, - {file = "cryptography-43.0.3-cp37-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:9762ea51a8fc2a88b70cf2995e5675b38d93bf36bd67d91721c309df184f49bd"}, - {file = "cryptography-43.0.3-cp37-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:81ef806b1fef6b06dcebad789f988d3b37ccaee225695cf3e07648eee0fc6b73"}, - {file = "cryptography-43.0.3-cp37-abi3-win32.whl", hash = "sha256:cbeb489927bd7af4aa98d4b261af9a5bc025bd87f0e3547e11584be9e9427be2"}, - {file = "cryptography-43.0.3-cp37-abi3-win_amd64.whl", hash = "sha256:f46304d6f0c6ab8e52770addfa2fc41e6629495548862279641972b6215451cd"}, - {file = "cryptography-43.0.3-cp39-abi3-macosx_10_9_universal2.whl", hash = "sha256:8ac43ae87929a5982f5948ceda07001ee5e83227fd69cf55b109144938d96984"}, - {file = "cryptography-43.0.3-cp39-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:846da004a5804145a5f441b8530b4bf35afbf7da70f82409f151695b127213d5"}, - {file = "cryptography-43.0.3-cp39-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0f996e7268af62598f2fc1204afa98a3b5712313a55c4c9d434aef49cadc91d4"}, - {file = "cryptography-43.0.3-cp39-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:f7b178f11ed3664fd0e995a47ed2b5ff0a12d893e41dd0494f406d1cf555cab7"}, - {file = "cryptography-43.0.3-cp39-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:c2e6fc39c4ab499049df3bdf567f768a723a5e8464816e8f009f121a5a9f4405"}, - {file = "cryptography-43.0.3-cp39-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:e1be4655c7ef6e1bbe6b5d0403526601323420bcf414598955968c9ef3eb7d16"}, - {file = "cryptography-43.0.3-cp39-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:df6b6c6d742395dd77a23ea3728ab62f98379eff8fb61be2744d4679ab678f73"}, - {file = "cryptography-43.0.3-cp39-abi3-win32.whl", hash = "sha256:d56e96520b1020449bbace2b78b603442e7e378a9b3bd68de65c782db1507995"}, - {file = "cryptography-43.0.3-cp39-abi3-win_amd64.whl", hash = "sha256:0c580952eef9bf68c4747774cde7ec1d85a6e61de97281f2dba83c7d2c806362"}, - {file = "cryptography-43.0.3-pp310-pypy310_pp73-macosx_10_9_x86_64.whl", hash = "sha256:d03b5621a135bffecad2c73e9f4deb1a0f977b9a8ffe6f8e002bf6c9d07b918c"}, - {file = "cryptography-43.0.3-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:a2a431ee15799d6db9fe80c82b055bae5a752bef645bba795e8e52687c69efe3"}, - {file = "cryptography-43.0.3-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:281c945d0e28c92ca5e5930664c1cefd85efe80e5c0d2bc58dd63383fda29f83"}, - {file = "cryptography-43.0.3-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:f18c716be16bc1fea8e95def49edf46b82fccaa88587a45f8dc0ff6ab5d8e0a7"}, - {file = "cryptography-43.0.3-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:4a02ded6cd4f0a5562a8887df8b3bd14e822a90f97ac5e544c162899bc467664"}, - {file = "cryptography-43.0.3-pp39-pypy39_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:53a583b6637ab4c4e3591a15bc9db855b8d9dee9a669b550f311480acab6eb08"}, - {file = "cryptography-43.0.3-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:1ec0bcf7e17c0c5669d881b1cd38c4972fade441b27bda1051665faaa89bdcaa"}, - {file = "cryptography-43.0.3-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:2ce6fae5bdad59577b44e4dfed356944fbf1d925269114c28be377692643b4ff"}, - {file = "cryptography-43.0.3.tar.gz", hash = "sha256:315b9001266a492a6ff443b61238f956b214dbec9910a081ba5b6646a055a805"}, -] - -[package.dependencies] -cffi = {version = ">=1.12", markers = "platform_python_implementation != \"PyPy\""} - -[package.extras] -docs = ["sphinx (>=5.3.0)", "sphinx-rtd-theme (>=1.1.1)"] -docstest = ["pyenchant (>=1.6.11)", "readme-renderer", "sphinxcontrib-spelling (>=4.0.1)"] -nox = ["nox"] -pep8test = ["check-sdist", "click", "mypy", "ruff"] -sdist = ["build"] -ssh = ["bcrypt (>=3.1.5)"] -test = ["certifi", "cryptography-vectors (==43.0.3)", "pretend", "pytest (>=6.2.0)", "pytest-benchmark", "pytest-cov", "pytest-xdist"] -test-randomorder = ["pytest-randomly"] - -[[package]] -name = "cryptography" -version = "45.0.3" -description = "cryptography is a package which provides cryptographic recipes and primitives to Python developers." -optional = false -python-versions = "!=3.9.0,!=3.9.1,>=3.7" -groups = ["main", "dev"] -markers = "python_version >= \"3.10\"" -files = [ - {file = "cryptography-45.0.3-cp311-abi3-macosx_10_9_universal2.whl", hash = "sha256:7573d9eebaeceeb55285205dbbb8753ac1e962af3d9640791d12b36864065e71"}, - {file = "cryptography-45.0.3-cp311-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d377dde61c5d67eb4311eace661c3efda46c62113ff56bf05e2d679e02aebb5b"}, - {file = "cryptography-45.0.3-cp311-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fae1e637f527750811588e4582988932c222f8251f7b7ea93739acb624e1487f"}, - {file = "cryptography-45.0.3-cp311-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:ca932e11218bcc9ef812aa497cdf669484870ecbcf2d99b765d6c27a86000942"}, - {file = "cryptography-45.0.3-cp311-abi3-manylinux_2_28_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:af3f92b1dc25621f5fad065288a44ac790c5798e986a34d393ab27d2b27fcff9"}, - {file = "cryptography-45.0.3-cp311-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:2f8f8f0b73b885ddd7f3d8c2b2234a7d3ba49002b0223f58cfde1bedd9563c56"}, - {file = "cryptography-45.0.3-cp311-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:9cc80ce69032ffa528b5e16d217fa4d8d4bb7d6ba8659c1b4d74a1b0f4235fca"}, - {file = "cryptography-45.0.3-cp311-abi3-manylinux_2_34_x86_64.whl", hash = "sha256:c824c9281cb628015bfc3c59335163d4ca0540d49de4582d6c2637312907e4b1"}, - {file = "cryptography-45.0.3-cp311-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:5833bb4355cb377ebd880457663a972cd044e7f49585aee39245c0d592904578"}, - {file = "cryptography-45.0.3-cp311-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:9bb5bf55dcb69f7067d80354d0a348368da907345a2c448b0babc4215ccd3497"}, - {file = "cryptography-45.0.3-cp311-abi3-win32.whl", hash = "sha256:3ad69eeb92a9de9421e1f6685e85a10fbcfb75c833b42cc9bc2ba9fb00da4710"}, - {file = "cryptography-45.0.3-cp311-abi3-win_amd64.whl", hash = "sha256:97787952246a77d77934d41b62fb1b6f3581d83f71b44796a4158d93b8f5c490"}, - {file = "cryptography-45.0.3-cp37-abi3-macosx_10_9_universal2.whl", hash = "sha256:c92519d242703b675ccefd0f0562eb45e74d438e001f8ab52d628e885751fb06"}, - {file = "cryptography-45.0.3-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c5edcb90da1843df85292ef3a313513766a78fbbb83f584a5a58fb001a5a9d57"}, - {file = "cryptography-45.0.3-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:38deed72285c7ed699864f964a3f4cf11ab3fb38e8d39cfcd96710cd2b5bb716"}, - {file = "cryptography-45.0.3-cp37-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:5555365a50efe1f486eed6ac7062c33b97ccef409f5970a0b6f205a7cfab59c8"}, - {file = "cryptography-45.0.3-cp37-abi3-manylinux_2_28_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:9e4253ed8f5948a3589b3caee7ad9a5bf218ffd16869c516535325fece163dcc"}, - {file = "cryptography-45.0.3-cp37-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:cfd84777b4b6684955ce86156cfb5e08d75e80dc2585e10d69e47f014f0a5342"}, - {file = "cryptography-45.0.3-cp37-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:a2b56de3417fd5f48773ad8e91abaa700b678dc7fe1e0c757e1ae340779acf7b"}, - {file = "cryptography-45.0.3-cp37-abi3-manylinux_2_34_x86_64.whl", hash = "sha256:57a6500d459e8035e813bd8b51b671977fb149a8c95ed814989da682314d0782"}, - {file = "cryptography-45.0.3-cp37-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:f22af3c78abfbc7cbcdf2c55d23c3e022e1a462ee2481011d518c7fb9c9f3d65"}, - {file = "cryptography-45.0.3-cp37-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:232954730c362638544758a8160c4ee1b832dc011d2c41a306ad8f7cccc5bb0b"}, - {file = "cryptography-45.0.3-cp37-abi3-win32.whl", hash = "sha256:cb6ab89421bc90e0422aca911c69044c2912fc3debb19bb3c1bfe28ee3dff6ab"}, - {file = "cryptography-45.0.3-cp37-abi3-win_amd64.whl", hash = "sha256:d54ae41e6bd70ea23707843021c778f151ca258081586f0cfa31d936ae43d1b2"}, - {file = "cryptography-45.0.3-pp310-pypy310_pp73-macosx_10_9_x86_64.whl", hash = "sha256:ed43d396f42028c1f47b5fec012e9e12631266e3825e95c00e3cf94d472dac49"}, - {file = "cryptography-45.0.3-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:fed5aaca1750e46db870874c9c273cd5182a9e9deb16f06f7bdffdb5c2bde4b9"}, - {file = "cryptography-45.0.3-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:00094838ecc7c6594171e8c8a9166124c1197b074cfca23645cee573910d76bc"}, - {file = "cryptography-45.0.3-pp310-pypy310_pp73-manylinux_2_34_aarch64.whl", hash = "sha256:92d5f428c1a0439b2040435a1d6bc1b26ebf0af88b093c3628913dd464d13fa1"}, - {file = "cryptography-45.0.3-pp310-pypy310_pp73-manylinux_2_34_x86_64.whl", hash = "sha256:ec64ee375b5aaa354b2b273c921144a660a511f9df8785e6d1c942967106438e"}, - {file = "cryptography-45.0.3-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:71320fbefd05454ef2d457c481ba9a5b0e540f3753354fff6f780927c25d19b0"}, - {file = "cryptography-45.0.3-pp311-pypy311_pp73-macosx_10_9_x86_64.whl", hash = "sha256:edd6d51869beb7f0d472e902ef231a9b7689508e83880ea16ca3311a00bf5ce7"}, - {file = "cryptography-45.0.3-pp311-pypy311_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:555e5e2d3a53b4fabeca32835878b2818b3f23966a4efb0d566689777c5a12c8"}, - {file = "cryptography-45.0.3-pp311-pypy311_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:25286aacb947286620a31f78f2ed1a32cded7be5d8b729ba3fb2c988457639e4"}, - {file = "cryptography-45.0.3-pp311-pypy311_pp73-manylinux_2_34_aarch64.whl", hash = "sha256:050ce5209d5072472971e6efbfc8ec5a8f9a841de5a4db0ebd9c2e392cb81972"}, - {file = "cryptography-45.0.3-pp311-pypy311_pp73-manylinux_2_34_x86_64.whl", hash = "sha256:dc10ec1e9f21f33420cc05214989544727e776286c1c16697178978327b95c9c"}, - {file = "cryptography-45.0.3-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:9eda14f049d7f09c2e8fb411dda17dd6b16a3c76a1de5e249188a32aeb92de19"}, - {file = "cryptography-45.0.3.tar.gz", hash = "sha256:ec21313dd335c51d7877baf2972569f40a4291b76a0ce51391523ae358d05899"}, -] - -[package.dependencies] -cffi = {version = ">=1.14", markers = "platform_python_implementation != \"PyPy\""} - -[package.extras] -docs = ["sphinx (>=5.3.0)", "sphinx-inline-tabs ; python_full_version >= \"3.8.0\"", "sphinx-rtd-theme (>=3.0.0) ; python_full_version >= \"3.8.0\""] -docstest = ["pyenchant (>=3)", "readme-renderer (>=30.0)", "sphinxcontrib-spelling (>=7.3.1)"] -nox = ["nox (>=2024.4.15)", "nox[uv] (>=2024.3.2) ; python_full_version >= \"3.8.0\""] -pep8test = ["check-sdist ; python_full_version >= \"3.8.0\"", "click (>=8.0.1)", "mypy (>=1.4)", "ruff (>=0.3.6)"] -sdist = ["build (>=1.0.0)"] -ssh = ["bcrypt (>=3.1.5)"] -test = ["certifi (>=2024)", "cryptography-vectors (==45.0.3)", "pretend (>=0.7)", "pytest (>=7.4.0)", "pytest-benchmark (>=4.0)", "pytest-cov (>=2.10.1)", "pytest-xdist (>=3.5.0)"] -test-randomorder = ["pytest-randomly"] - -[[package]] -name = "dill" -version = "0.3.9" -description = "serialize all of Python" -optional = false -python-versions = ">=3.8" -groups = ["dev"] -files = [ - {file = "dill-0.3.9-py3-none-any.whl", hash = "sha256:468dff3b89520b474c0397703366b7b95eebe6303f108adf9b19da1f702be87a"}, - {file = "dill-0.3.9.tar.gz", hash = "sha256:81aa267dddf68cbfe8029c42ca9ec6a4ab3b22371d1c450abc54422577b4512c"}, -] - -[package.extras] -graph = ["objgraph (>=1.7.2)"] -profile = ["gprof2dot (>=2022.7.29)"] - -[[package]] -name = "eval-type-backport" -version = "0.2.0" -description = "Like `typing._eval_type`, but lets older Python versions use newer typing features." -optional = false -python-versions = ">=3.8" -groups = ["main"] -files = [ - {file = "eval_type_backport-0.2.0-py3-none-any.whl", hash = "sha256:ac2f73d30d40c5a30a80b8739a789d6bb5e49fdffa66d7912667e2015d9c9933"}, - {file = "eval_type_backport-0.2.0.tar.gz", hash = "sha256:68796cfbc7371ebf923f03bdf7bef415f3ec098aeced24e054b253a0e78f7b37"}, -] - -[package.extras] -tests = ["pytest"] - -[[package]] -name = "exceptiongroup" -version = "1.2.2" -description = "Backport of PEP 654 (exception groups)" -optional = false -python-versions = ">=3.7" -groups = ["main", "dev"] -markers = "python_version < \"3.11\"" -files = [ - {file = "exceptiongroup-1.2.2-py3-none-any.whl", hash = "sha256:3111b9d131c238bec2f8f516e123e14ba243563fb135d3fe885990585aa7795b"}, - {file = "exceptiongroup-1.2.2.tar.gz", hash = "sha256:47c2edf7c6738fafb49fd34290706d1a1a2f4d1c6df275526b62cbb4aa5393cc"}, -] - -[package.extras] -test = ["pytest (>=6)"] - -[[package]] -name = "google-auth" -version = "2.27.0" -description = "Google Authentication Library" -optional = true -python-versions = ">=3.7" -groups = ["main"] -markers = "extra == \"gcp\"" -files = [ - {file = "google-auth-2.27.0.tar.gz", hash = "sha256:e863a56ccc2d8efa83df7a80272601e43487fa9a728a376205c86c26aaefa821"}, - {file = "google_auth-2.27.0-py2.py3-none-any.whl", hash = "sha256:8e4bad367015430ff253fe49d500fdc3396c1a434db5740828c728e45bcce245"}, -] - -[package.dependencies] -cachetools = ">=2.0.0,<6.0" -pyasn1-modules = ">=0.2.1" -rsa = ">=3.1.4,<5" - -[package.extras] -aiohttp = ["aiohttp (>=3.6.2,<4.0.0.dev0)", "requests (>=2.20.0,<3.0.0.dev0)"] -enterprise-cert = ["cryptography (==36.0.2)", "pyopenssl (==22.0.0)"] -pyopenssl = ["cryptography (>=38.0.3)", "pyopenssl (>=20.0.0)"] -reauth = ["pyu2f (>=0.1.5)"] -requests = ["requests (>=2.20.0,<3.0.0.dev0)"] - -[[package]] -name = "googleapis-common-protos" -version = "1.70.0" -description = "Common protobufs used in Google APIs" -optional = false -python-versions = ">=3.7" -groups = ["main"] -files = [ - {file = "googleapis_common_protos-1.70.0-py3-none-any.whl", hash = "sha256:b8bfcca8c25a2bb253e0e0b0adaf8c00773e5e6af6fd92397576680b807e0fd8"}, - {file = "googleapis_common_protos-1.70.0.tar.gz", hash = "sha256:0e1b44e0ea153e6594f9f394fef15193a68aaaea2d843f83e2742717ca753257"}, -] - -[package.dependencies] -protobuf = ">=3.20.2,<4.21.1 || >4.21.1,<4.21.2 || >4.21.2,<4.21.3 || >4.21.3,<4.21.4 || >4.21.4,<4.21.5 || >4.21.5,<7.0.0" - -[package.extras] -grpc = ["grpcio (>=1.44.0,<2.0.0)"] - -[[package]] -name = "griffe" -version = "1.7.3" -description = "Signatures for entire Python programs. Extract the structure, the frame, the skeleton of your project, to generate API documentation or find breaking changes in your API." -optional = false -python-versions = ">=3.9" -groups = ["main", "dev"] -files = [ - {file = "griffe-1.7.3-py3-none-any.whl", hash = "sha256:c6b3ee30c2f0f17f30bcdef5068d6ab7a2a4f1b8bf1a3e74b56fffd21e1c5f75"}, - {file = "griffe-1.7.3.tar.gz", hash = "sha256:52ee893c6a3a968b639ace8015bec9d36594961e156e23315c8e8e51401fa50b"}, -] - -[package.dependencies] -colorama = ">=0.4" - -[[package]] -name = "h11" -version = "0.14.0" -description = "A pure-Python, bring-your-own-I/O implementation of HTTP/1.1" -optional = false -python-versions = ">=3.7" -groups = ["main", "dev"] -files = [ - {file = "h11-0.14.0-py3-none-any.whl", hash = "sha256:e3fe4ac4b851c468cc8363d500db52c2ead036020723024a109d37346efaa761"}, - {file = "h11-0.14.0.tar.gz", hash = "sha256:8f19fbbe99e72420ff35c00b27a34cb9937e902a8b810e2c88300c6f0a3b699d"}, -] -markers = {dev = "python_version >= \"3.10\""} - -[[package]] -name = "httpcore" -version = "1.0.7" -description = "A minimal low-level HTTP client." -optional = false -python-versions = ">=3.8" -groups = ["main", "dev"] -files = [ - {file = "httpcore-1.0.7-py3-none-any.whl", hash = "sha256:a3fff8f43dc260d5bd363d9f9cf1830fa3a458b332856f34282de498ed420edd"}, - {file = "httpcore-1.0.7.tar.gz", hash = "sha256:8551cb62a169ec7162ac7be8d4817d561f60e08eaa485234898414bb5a8a0b4c"}, -] -markers = {dev = "python_version >= \"3.10\""} - -[package.dependencies] -certifi = "*" -h11 = ">=0.13,<0.15" - -[package.extras] -asyncio = ["anyio (>=4.0,<5.0)"] -http2 = ["h2 (>=3,<5)"] -socks = ["socksio (==1.*)"] -trio = ["trio (>=0.22.0,<1.0)"] - -[[package]] -name = "httpx" -version = "0.28.1" -description = "The next generation HTTP client." -optional = false -python-versions = ">=3.8" -groups = ["main", "dev"] -files = [ - {file = "httpx-0.28.1-py3-none-any.whl", hash = "sha256:d909fcccc110f8c7faf814ca82a9a4d816bc5a6dbfea25d6591d6985b8ba59ad"}, - {file = "httpx-0.28.1.tar.gz", hash = "sha256:75e98c5f16b0f35b567856f597f06ff2270a374470a5c2392242528e3e3e42fc"}, -] -markers = {dev = "python_version >= \"3.10\""} - -[package.dependencies] -anyio = "*" -certifi = "*" -httpcore = "==1.*" -idna = "*" - -[package.extras] -brotli = ["brotli ; platform_python_implementation == \"CPython\"", "brotlicffi ; platform_python_implementation != \"CPython\""] -cli = ["click (==8.*)", "pygments (==2.*)", "rich (>=10,<14)"] -http2 = ["h2 (>=3,<5)"] -socks = ["socksio (==1.*)"] -zstd = ["zstandard (>=0.18.0)"] - -[[package]] -name = "httpx-sse" -version = "0.4.0" -description = "Consume Server-Sent Event (SSE) messages with HTTPX." -optional = false -python-versions = ">=3.8" -groups = ["main", "dev"] -markers = "python_version >= \"3.10\"" -files = [ - {file = "httpx-sse-0.4.0.tar.gz", hash = "sha256:1e81a3a3070ce322add1d3529ed42eb5f70817f45ed6ec915ab753f961139721"}, - {file = "httpx_sse-0.4.0-py3-none-any.whl", hash = "sha256:f329af6eae57eaa2bdfd962b42524764af68075ea87370a2de920af5341e318f"}, -] - -[[package]] -name = "idna" -version = "3.10" -description = "Internationalized Domain Names in Applications (IDNA)" -optional = false -python-versions = ">=3.6" -groups = ["main", "dev"] -files = [ - {file = "idna-3.10-py3-none-any.whl", hash = "sha256:946d195a0d259cbba61165e88e65941f16e9b36ea6ddb97f00452bae8b1287d3"}, - {file = "idna-3.10.tar.gz", hash = "sha256:12f65c9b470abda6dc35cf8e63cc574b1c52b11df2c86030af0ac09b01b13ea9"}, -] -markers = {dev = "python_version >= \"3.10\""} - -[package.extras] -all = ["flake8 (>=7.1.1)", "mypy (>=1.11.2)", "pytest (>=8.3.2)", "ruff (>=0.6.2)"] - -[[package]] -name = "importlib-metadata" -version = "8.7.0" -description = "Read metadata from Python packages" -optional = false -python-versions = ">=3.9" -groups = ["main"] -files = [ - {file = "importlib_metadata-8.7.0-py3-none-any.whl", hash = "sha256:e5dd1551894c77868a30651cef00984d50e1002d06942a7101d34870c5f02afd"}, - {file = "importlib_metadata-8.7.0.tar.gz", hash = "sha256:d13b81ad223b890aa16c5471f2ac3056cf76c5f10f82d6f9292f0b415f389000"}, -] - -[package.dependencies] -zipp = ">=3.20" - -[package.extras] -check = ["pytest-checkdocs (>=2.4)", "pytest-ruff (>=0.2.1) ; sys_platform != \"cygwin\""] -cover = ["pytest-cov"] -doc = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"] -enabler = ["pytest-enabler (>=2.2)"] -perf = ["ipython"] -test = ["flufl.flake8", "importlib_resources (>=1.3) ; python_version < \"3.9\"", "jaraco.test (>=5.4)", "packaging", "pyfakefs", "pytest (>=6,!=8.1.*)", "pytest-perf (>=0.9.2)"] -type = ["pytest-mypy"] - -[[package]] -name = "iniconfig" -version = "2.0.0" -description = "brain-dead simple config-ini parsing" -optional = false -python-versions = ">=3.7" -groups = ["dev"] -files = [ - {file = "iniconfig-2.0.0-py3-none-any.whl", hash = "sha256:b6a85871a79d2e3b22d2d1b94ac2824226a63c6b741c88f7ae975f18b6778374"}, - {file = "iniconfig-2.0.0.tar.gz", hash = "sha256:2d91e135bf72d31a410b17c16da610a82cb55f6b0477d1a902134b24a455b8b3"}, -] - -[[package]] -name = "invoke" -version = "2.2.0" -description = "Pythonic task execution" -optional = false -python-versions = ">=3.6" -groups = ["main"] -files = [ - {file = "invoke-2.2.0-py3-none-any.whl", hash = "sha256:6ea924cc53d4f78e3d98bc436b08069a03077e6f85ad1ddaa8a116d7dad15820"}, - {file = "invoke-2.2.0.tar.gz", hash = "sha256:ee6cbb101af1a859c7fe84f2a264c059020b0cb7fe3535f9424300ab568f6bd5"}, -] - -[[package]] -name = "isort" -version = "5.13.2" -description = "A Python utility / library to sort Python imports." -optional = false -python-versions = ">=3.8.0" -groups = ["dev"] -files = [ - {file = "isort-5.13.2-py3-none-any.whl", hash = "sha256:8ca5e72a8d85860d5a3fa69b8745237f2939afe12dbf656afbcb47fe72d947a6"}, - {file = "isort-5.13.2.tar.gz", hash = "sha256:48fdfcb9face5d58a4f6dde2e72a1fb8dcaf8ab26f95ab49fab84c2ddefb0109"}, -] - -[package.extras] -colors = ["colorama (>=0.4.6)"] - -[[package]] -name = "mccabe" -version = "0.7.0" -description = "McCabe checker, plugin for flake8" -optional = false -python-versions = ">=3.6" -groups = ["dev"] -files = [ - {file = "mccabe-0.7.0-py2.py3-none-any.whl", hash = "sha256:6c2d30ab6be0e4a46919781807b4f0d834ebdd6c6e3dca0bda5a15f863427b6e"}, - {file = "mccabe-0.7.0.tar.gz", hash = "sha256:348e0240c33b60bbdf4e523192ef919f28cb2c3d7d5c7794f74009290f236325"}, -] - -[[package]] -name = "mcp" -version = "1.9.1" -description = "Model Context Protocol SDK" -optional = false -python-versions = ">=3.10" -groups = ["main", "dev"] -markers = "python_version >= \"3.10\"" -files = [ - {file = "mcp-1.9.1-py3-none-any.whl", hash = "sha256:2900ded8ffafc3c8a7bfcfe8bc5204037e988e753ec398f371663e6a06ecd9a9"}, - {file = "mcp-1.9.1.tar.gz", hash = "sha256:19879cd6dde3d763297617242888c2f695a95dfa854386a6a68676a646ce75e4"}, -] - -[package.dependencies] -anyio = ">=4.5" -httpx = ">=0.27" -httpx-sse = ">=0.4" -pydantic = ">=2.7.2,<3.0.0" -pydantic-settings = ">=2.5.2" -python-multipart = ">=0.0.9" -sse-starlette = ">=1.6.1" -starlette = ">=0.27" -uvicorn = {version = ">=0.23.1", markers = "sys_platform != \"emscripten\""} - -[package.extras] -cli = ["python-dotenv (>=1.0.0)", "typer (>=0.12.4)"] -rich = ["rich (>=13.9.4)"] -ws = ["websockets (>=15.0.1)"] - -[[package]] -name = "mypy" -version = "1.15.0" -description = "Optional static typing for Python" -optional = false -python-versions = ">=3.9" -groups = ["dev", "lint"] -files = [ - {file = "mypy-1.15.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:979e4e1a006511dacf628e36fadfecbcc0160a8af6ca7dad2f5025529e082c13"}, - {file = "mypy-1.15.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c4bb0e1bd29f7d34efcccd71cf733580191e9a264a2202b0239da95984c5b559"}, - {file = "mypy-1.15.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:be68172e9fd9ad8fb876c6389f16d1c1b5f100ffa779f77b1fb2176fcc9ab95b"}, - {file = "mypy-1.15.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c7be1e46525adfa0d97681432ee9fcd61a3964c2446795714699a998d193f1a3"}, - {file = "mypy-1.15.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:2e2c2e6d3593f6451b18588848e66260ff62ccca522dd231cd4dd59b0160668b"}, - {file = "mypy-1.15.0-cp310-cp310-win_amd64.whl", hash = "sha256:6983aae8b2f653e098edb77f893f7b6aca69f6cffb19b2cc7443f23cce5f4828"}, - {file = "mypy-1.15.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:2922d42e16d6de288022e5ca321cd0618b238cfc5570e0263e5ba0a77dbef56f"}, - {file = "mypy-1.15.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:2ee2d57e01a7c35de00f4634ba1bbf015185b219e4dc5909e281016df43f5ee5"}, - {file = "mypy-1.15.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:973500e0774b85d9689715feeffcc980193086551110fd678ebe1f4342fb7c5e"}, - {file = "mypy-1.15.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:5a95fb17c13e29d2d5195869262f8125dfdb5c134dc8d9a9d0aecf7525b10c2c"}, - {file = "mypy-1.15.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:1905f494bfd7d85a23a88c5d97840888a7bd516545fc5aaedff0267e0bb54e2f"}, - {file = "mypy-1.15.0-cp311-cp311-win_amd64.whl", hash = "sha256:c9817fa23833ff189db061e6d2eff49b2f3b6ed9856b4a0a73046e41932d744f"}, - {file = "mypy-1.15.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:aea39e0583d05124836ea645f412e88a5c7d0fd77a6d694b60d9b6b2d9f184fd"}, - {file = "mypy-1.15.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:2f2147ab812b75e5b5499b01ade1f4a81489a147c01585cda36019102538615f"}, - {file = "mypy-1.15.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ce436f4c6d218a070048ed6a44c0bbb10cd2cc5e272b29e7845f6a2f57ee4464"}, - {file = "mypy-1.15.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:8023ff13985661b50a5928fc7a5ca15f3d1affb41e5f0a9952cb68ef090b31ee"}, - {file = "mypy-1.15.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:1124a18bc11a6a62887e3e137f37f53fbae476dc36c185d549d4f837a2a6a14e"}, - {file = "mypy-1.15.0-cp312-cp312-win_amd64.whl", hash = "sha256:171a9ca9a40cd1843abeca0e405bc1940cd9b305eaeea2dda769ba096932bb22"}, - {file = "mypy-1.15.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:93faf3fdb04768d44bf28693293f3904bbb555d076b781ad2530214ee53e3445"}, - {file = "mypy-1.15.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:811aeccadfb730024c5d3e326b2fbe9249bb7413553f15499a4050f7c30e801d"}, - {file = "mypy-1.15.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:98b7b9b9aedb65fe628c62a6dc57f6d5088ef2dfca37903a7d9ee374d03acca5"}, - {file = "mypy-1.15.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c43a7682e24b4f576d93072216bf56eeff70d9140241f9edec0c104d0c515036"}, - {file = "mypy-1.15.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:baefc32840a9f00babd83251560e0ae1573e2f9d1b067719479bfb0e987c6357"}, - {file = "mypy-1.15.0-cp313-cp313-win_amd64.whl", hash = "sha256:b9378e2c00146c44793c98b8d5a61039a048e31f429fb0eb546d93f4b000bedf"}, - {file = "mypy-1.15.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:e601a7fa172c2131bff456bb3ee08a88360760d0d2f8cbd7a75a65497e2df078"}, - {file = "mypy-1.15.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:712e962a6357634fef20412699a3655c610110e01cdaa6180acec7fc9f8513ba"}, - {file = "mypy-1.15.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:f95579473af29ab73a10bada2f9722856792a36ec5af5399b653aa28360290a5"}, - {file = "mypy-1.15.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:8f8722560a14cde92fdb1e31597760dc35f9f5524cce17836c0d22841830fd5b"}, - {file = "mypy-1.15.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:1fbb8da62dc352133d7d7ca90ed2fb0e9d42bb1a32724c287d3c76c58cbaa9c2"}, - {file = "mypy-1.15.0-cp39-cp39-win_amd64.whl", hash = "sha256:d10d994b41fb3497719bbf866f227b3489048ea4bbbb5015357db306249f7980"}, - {file = "mypy-1.15.0-py3-none-any.whl", hash = "sha256:5469affef548bd1895d86d3bf10ce2b44e33d86923c29e4d675b3e323437ea3e"}, - {file = "mypy-1.15.0.tar.gz", hash = "sha256:404534629d51d3efea5c800ee7c42b72a6554d6c400e6a79eafe15d11341fd43"}, -] - -[package.dependencies] -mypy_extensions = ">=1.0.0" -tomli = {version = ">=1.1.0", markers = "python_version < \"3.11\""} -typing_extensions = ">=4.6.0" - -[package.extras] -dmypy = ["psutil (>=4.0)"] -faster-cache = ["orjson"] -install-types = ["pip"] -mypyc = ["setuptools (>=50)"] -reports = ["lxml"] - -[[package]] -name = "mypy-extensions" -version = "1.0.0" -description = "Type system extensions for programs checked with the mypy type checker." -optional = false -python-versions = ">=3.5" -groups = ["dev", "lint"] -files = [ - {file = "mypy_extensions-1.0.0-py3-none-any.whl", hash = "sha256:4392f6c0eb8a5668a69e23d168ffa70f0be9ccfd32b5cc2d26a34ae5b844552d"}, - {file = "mypy_extensions-1.0.0.tar.gz", hash = "sha256:75dbf8955dc00442a438fc4d0666508a9a97b6bd41aa2f0ffe9d2f2725af0782"}, -] - -[[package]] -name = "nodeenv" -version = "1.9.1" -description = "Node.js virtual environment builder" -optional = false -python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7" -groups = ["lint"] -files = [ - {file = "nodeenv-1.9.1-py2.py3-none-any.whl", hash = "sha256:ba11c9782d29c27c70ffbdda2d7415098754709be8a7056d79a737cd901155c9"}, - {file = "nodeenv-1.9.1.tar.gz", hash = "sha256:6ec12890a2dab7946721edbfbcd91f3319c6ccc9aec47be7c7e6b7011ee6645f"}, -] - -[[package]] -name = "opentelemetry-api" -version = "1.38.0" -description = "OpenTelemetry Python API" -optional = false -python-versions = ">=3.9" -groups = ["main"] -files = [ - {file = "opentelemetry_api-1.38.0-py3-none-any.whl", hash = "sha256:2891b0197f47124454ab9f0cf58f3be33faca394457ac3e09daba13ff50aa582"}, - {file = "opentelemetry_api-1.38.0.tar.gz", hash = "sha256:f4c193b5e8acb0912b06ac5b16321908dd0843d75049c091487322284a3eea12"}, -] - -[package.dependencies] -importlib-metadata = ">=6.0,<8.8.0" -typing-extensions = ">=4.5.0" - -[[package]] -name = "opentelemetry-exporter-otlp-proto-common" -version = "1.38.0" -description = "OpenTelemetry Protobuf encoding" -optional = false -python-versions = ">=3.9" -groups = ["main"] -files = [ - {file = "opentelemetry_exporter_otlp_proto_common-1.38.0-py3-none-any.whl", hash = "sha256:03cb76ab213300fe4f4c62b7d8f17d97fcfd21b89f0b5ce38ea156327ddda74a"}, - {file = "opentelemetry_exporter_otlp_proto_common-1.38.0.tar.gz", hash = "sha256:e333278afab4695aa8114eeb7bf4e44e65c6607d54968271a249c180b2cb605c"}, -] - -[package.dependencies] -opentelemetry-proto = "1.38.0" - -[[package]] -name = "opentelemetry-exporter-otlp-proto-http" -version = "1.38.0" -description = "OpenTelemetry Collector Protobuf over HTTP Exporter" -optional = false -python-versions = ">=3.9" -groups = ["main"] -files = [ - {file = "opentelemetry_exporter_otlp_proto_http-1.38.0-py3-none-any.whl", hash = "sha256:84b937305edfc563f08ec69b9cb2298be8188371217e867c1854d77198d0825b"}, - {file = "opentelemetry_exporter_otlp_proto_http-1.38.0.tar.gz", hash = "sha256:f16bd44baf15cbe07633c5112ffc68229d0edbeac7b37610be0b2def4e21e90b"}, -] - -[package.dependencies] -googleapis-common-protos = ">=1.52,<2.0" -opentelemetry-api = ">=1.15,<2.0" -opentelemetry-exporter-otlp-proto-common = "1.38.0" -opentelemetry-proto = "1.38.0" -opentelemetry-sdk = ">=1.38.0,<1.39.0" -requests = ">=2.7,<3.0" -typing-extensions = ">=4.5.0" - -[[package]] -name = "opentelemetry-proto" -version = "1.38.0" -description = "OpenTelemetry Python Proto" -optional = false -python-versions = ">=3.9" -groups = ["main"] -files = [ - {file = "opentelemetry_proto-1.38.0-py3-none-any.whl", hash = "sha256:b6ebe54d3217c42e45462e2a1ae28c3e2bf2ec5a5645236a490f55f45f1a0a18"}, - {file = "opentelemetry_proto-1.38.0.tar.gz", hash = "sha256:88b161e89d9d372ce723da289b7da74c3a8354a8e5359992be813942969ed468"}, -] - -[package.dependencies] -protobuf = ">=5.0,<7.0" - -[[package]] -name = "opentelemetry-sdk" -version = "1.38.0" -description = "OpenTelemetry Python SDK" -optional = false -python-versions = ">=3.9" -groups = ["main"] -files = [ - {file = "opentelemetry_sdk-1.38.0-py3-none-any.whl", hash = "sha256:1c66af6564ecc1553d72d811a01df063ff097cdc82ce188da9951f93b8d10f6b"}, - {file = "opentelemetry_sdk-1.38.0.tar.gz", hash = "sha256:93df5d4d871ed09cb4272305be4d996236eedb232253e3ab864c8620f051cebe"}, -] - -[package.dependencies] -opentelemetry-api = "1.38.0" -opentelemetry-semantic-conventions = "0.59b0" -typing-extensions = ">=4.5.0" - -[[package]] -name = "opentelemetry-semantic-conventions" -version = "0.59b0" -description = "OpenTelemetry Semantic Conventions" -optional = false -python-versions = ">=3.9" -groups = ["main"] -files = [ - {file = "opentelemetry_semantic_conventions-0.59b0-py3-none-any.whl", hash = "sha256:35d3b8833ef97d614136e253c1da9342b4c3c083bbaf29ce31d572a1c3825eed"}, - {file = "opentelemetry_semantic_conventions-0.59b0.tar.gz", hash = "sha256:7a6db3f30d70202d5bf9fa4b69bc866ca6a30437287de6c510fb594878aed6b0"}, -] - -[package.dependencies] -opentelemetry-api = "1.38.0" -typing-extensions = ">=4.5.0" - -[[package]] -name = "packaging" -version = "24.2" -description = "Core utilities for Python packages" -optional = false -python-versions = ">=3.8" -groups = ["dev"] -files = [ - {file = "packaging-24.2-py3-none-any.whl", hash = "sha256:09abb1bccd265c01f4a3aa3f7a7db064b36514d2cba19a2f694fe6150451a759"}, - {file = "packaging-24.2.tar.gz", hash = "sha256:c228a6dc5e932d346bc5739379109d49e8853dd8223571c7c5b55260edc0b97f"}, -] - -[[package]] -name = "platformdirs" -version = "4.3.6" -description = "A small Python package for determining appropriate platform-specific dirs, e.g. a `user data dir`." -optional = false -python-versions = ">=3.8" -groups = ["dev"] -files = [ - {file = "platformdirs-4.3.6-py3-none-any.whl", hash = "sha256:73e575e1408ab8103900836b97580d5307456908a03e92031bab39e4554cc3fb"}, - {file = "platformdirs-4.3.6.tar.gz", hash = "sha256:357fb2acbc885b0419afd3ce3ed34564c13c9b95c89360cd9563f73aa5e2b907"}, -] - -[package.extras] -docs = ["furo (>=2024.8.6)", "proselint (>=0.14)", "sphinx (>=8.0.2)", "sphinx-autodoc-typehints (>=2.4)"] -test = ["appdirs (==1.4.4)", "covdefaults (>=2.3)", "pytest (>=8.3.2)", "pytest-cov (>=5)", "pytest-mock (>=3.14)"] -type = ["mypy (>=1.11.2)"] - -[[package]] -name = "pluggy" -version = "1.5.0" -description = "plugin and hook calling mechanisms for python" -optional = false -python-versions = ">=3.8" -groups = ["dev"] -files = [ - {file = "pluggy-1.5.0-py3-none-any.whl", hash = "sha256:44e1ad92c8ca002de6377e165f3e0f1be63266ab4d554740532335b9d75ea669"}, - {file = "pluggy-1.5.0.tar.gz", hash = "sha256:2cffa88e94fdc978c4c574f15f9e59b7f4201d439195c3715ca9e2486f1d0cf1"}, -] - -[package.extras] -dev = ["pre-commit", "tox"] -testing = ["pytest", "pytest-benchmark"] - -[[package]] -name = "protobuf" -version = "6.33.0" -description = "" -optional = false -python-versions = ">=3.9" -groups = ["main"] -files = [ - {file = "protobuf-6.33.0-cp310-abi3-win32.whl", hash = "sha256:d6101ded078042a8f17959eccd9236fb7a9ca20d3b0098bbcb91533a5680d035"}, - {file = "protobuf-6.33.0-cp310-abi3-win_amd64.whl", hash = "sha256:9a031d10f703f03768f2743a1c403af050b6ae1f3480e9c140f39c45f81b13ee"}, - {file = "protobuf-6.33.0-cp39-abi3-macosx_10_9_universal2.whl", hash = "sha256:905b07a65f1a4b72412314082c7dbfae91a9e8b68a0cc1577515f8df58ecf455"}, - {file = "protobuf-6.33.0-cp39-abi3-manylinux2014_aarch64.whl", hash = "sha256:e0697ece353e6239b90ee43a9231318302ad8353c70e6e45499fa52396debf90"}, - {file = "protobuf-6.33.0-cp39-abi3-manylinux2014_s390x.whl", hash = "sha256:e0a1715e4f27355afd9570f3ea369735afc853a6c3951a6afe1f80d8569ad298"}, - {file = "protobuf-6.33.0-cp39-abi3-manylinux2014_x86_64.whl", hash = "sha256:35be49fd3f4fefa4e6e2aacc35e8b837d6703c37a2168a55ac21e9b1bc7559ef"}, - {file = "protobuf-6.33.0-cp39-cp39-win32.whl", hash = "sha256:cd33a8e38ea3e39df66e1bbc462b076d6e5ba3a4ebbde58219d777223a7873d3"}, - {file = "protobuf-6.33.0-cp39-cp39-win_amd64.whl", hash = "sha256:c963e86c3655af3a917962c9619e1a6b9670540351d7af9439d06064e3317cc9"}, - {file = "protobuf-6.33.0-py3-none-any.whl", hash = "sha256:25c9e1963c6734448ea2d308cfa610e692b801304ba0908d7bfa564ac5132995"}, - {file = "protobuf-6.33.0.tar.gz", hash = "sha256:140303d5c8d2037730c548f8c7b93b20bb1dc301be280c378b82b8894589c954"}, -] - -[[package]] -name = "pyasn1" -version = "0.6.1" -description = "Pure-Python implementation of ASN.1 types and DER/BER/CER codecs (X.208)" -optional = true -python-versions = ">=3.8" -groups = ["main"] -markers = "extra == \"gcp\"" -files = [ - {file = "pyasn1-0.6.1-py3-none-any.whl", hash = "sha256:0d632f46f2ba09143da3a8afe9e33fb6f92fa2320ab7e886e2d0f7672af84629"}, - {file = "pyasn1-0.6.1.tar.gz", hash = "sha256:6f580d2bdd84365380830acf45550f2511469f673cb4a5ae3857a3170128b034"}, -] - -[[package]] -name = "pyasn1-modules" -version = "0.4.1" -description = "A collection of ASN.1-based protocols modules" -optional = true -python-versions = ">=3.8" -groups = ["main"] -markers = "extra == \"gcp\"" -files = [ - {file = "pyasn1_modules-0.4.1-py3-none-any.whl", hash = "sha256:49bfa96b45a292b711e986f222502c1c9a5e1f4e568fc30e2574a6c7d07838fd"}, - {file = "pyasn1_modules-0.4.1.tar.gz", hash = "sha256:c28e2dbf9c06ad61c71a075c7e0f9fd0f1b0bb2d2ad4377f240d33ac2ab60a7c"}, -] - -[package.dependencies] -pyasn1 = ">=0.4.6,<0.7.0" - -[[package]] -name = "pycparser" -version = "2.22" -description = "C parser in Python" -optional = false -python-versions = ">=3.8" -groups = ["main", "dev"] -markers = "platform_python_implementation != \"PyPy\"" -files = [ - {file = "pycparser-2.22-py3-none-any.whl", hash = "sha256:c3702b6d3dd8c7abc1afa565d7e63d53a1d0bd86cdc24edd75470f4de499cfcc"}, - {file = "pycparser-2.22.tar.gz", hash = "sha256:491c8be9c040f5390f5bf44a5b07752bd07f56edf992381b05c701439eec10f6"}, -] - -[[package]] -name = "pydantic" -version = "2.10.6" -description = "Data validation using Python type hints" -optional = false -python-versions = ">=3.8" -groups = ["main", "dev"] -files = [ - {file = "pydantic-2.10.6-py3-none-any.whl", hash = "sha256:427d664bf0b8a2b34ff5dd0f5a18df00591adcee7198fbd71981054cef37b584"}, - {file = "pydantic-2.10.6.tar.gz", hash = "sha256:ca5daa827cce33de7a42be142548b0096bf05a7e7b365aebfa5f8eeec7128236"}, -] -markers = {dev = "python_version >= \"3.10\""} - -[package.dependencies] -annotated-types = ">=0.6.0" -pydantic-core = "2.27.2" -typing-extensions = ">=4.12.2" - -[package.extras] -email = ["email-validator (>=2.0.0)"] -timezone = ["tzdata ; python_version >= \"3.9\" and platform_system == \"Windows\""] - -[[package]] -name = "pydantic-core" -version = "2.27.2" -description = "Core functionality for Pydantic validation and serialization" -optional = false -python-versions = ">=3.8" -groups = ["main", "dev"] -files = [ - {file = "pydantic_core-2.27.2-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:2d367ca20b2f14095a8f4fa1210f5a7b78b8a20009ecced6b12818f455b1e9fa"}, - {file = "pydantic_core-2.27.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:491a2b73db93fab69731eaee494f320faa4e093dbed776be1a829c2eb222c34c"}, - {file = "pydantic_core-2.27.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7969e133a6f183be60e9f6f56bfae753585680f3b7307a8e555a948d443cc05a"}, - {file = "pydantic_core-2.27.2-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:3de9961f2a346257caf0aa508a4da705467f53778e9ef6fe744c038119737ef5"}, - {file = "pydantic_core-2.27.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e2bb4d3e5873c37bb3dd58714d4cd0b0e6238cebc4177ac8fe878f8b3aa8e74c"}, - {file = "pydantic_core-2.27.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:280d219beebb0752699480fe8f1dc61ab6615c2046d76b7ab7ee38858de0a4e7"}, - {file = "pydantic_core-2.27.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:47956ae78b6422cbd46f772f1746799cbb862de838fd8d1fbd34a82e05b0983a"}, - {file = "pydantic_core-2.27.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:14d4a5c49d2f009d62a2a7140d3064f686d17a5d1a268bc641954ba181880236"}, - {file = "pydantic_core-2.27.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:337b443af21d488716f8d0b6164de833e788aa6bd7e3a39c005febc1284f4962"}, - {file = "pydantic_core-2.27.2-cp310-cp310-musllinux_1_1_armv7l.whl", hash = "sha256:03d0f86ea3184a12f41a2d23f7ccb79cdb5a18e06993f8a45baa8dfec746f0e9"}, - {file = "pydantic_core-2.27.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:7041c36f5680c6e0f08d922aed302e98b3745d97fe1589db0a3eebf6624523af"}, - {file = "pydantic_core-2.27.2-cp310-cp310-win32.whl", hash = "sha256:50a68f3e3819077be2c98110c1f9dcb3817e93f267ba80a2c05bb4f8799e2ff4"}, - {file = "pydantic_core-2.27.2-cp310-cp310-win_amd64.whl", hash = "sha256:e0fd26b16394ead34a424eecf8a31a1f5137094cabe84a1bcb10fa6ba39d3d31"}, - {file = "pydantic_core-2.27.2-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:8e10c99ef58cfdf2a66fc15d66b16c4a04f62bca39db589ae8cba08bc55331bc"}, - {file = "pydantic_core-2.27.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:26f32e0adf166a84d0cb63be85c562ca8a6fa8de28e5f0d92250c6b7e9e2aff7"}, - {file = "pydantic_core-2.27.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8c19d1ea0673cd13cc2f872f6c9ab42acc4e4f492a7ca9d3795ce2b112dd7e15"}, - {file = "pydantic_core-2.27.2-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:5e68c4446fe0810e959cdff46ab0a41ce2f2c86d227d96dc3847af0ba7def306"}, - {file = "pydantic_core-2.27.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d9640b0059ff4f14d1f37321b94061c6db164fbe49b334b31643e0528d100d99"}, - {file = "pydantic_core-2.27.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:40d02e7d45c9f8af700f3452f329ead92da4c5f4317ca9b896de7ce7199ea459"}, - {file = "pydantic_core-2.27.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1c1fd185014191700554795c99b347d64f2bb637966c4cfc16998a0ca700d048"}, - {file = "pydantic_core-2.27.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d81d2068e1c1228a565af076598f9e7451712700b673de8f502f0334f281387d"}, - {file = "pydantic_core-2.27.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:1a4207639fb02ec2dbb76227d7c751a20b1a6b4bc52850568e52260cae64ca3b"}, - {file = "pydantic_core-2.27.2-cp311-cp311-musllinux_1_1_armv7l.whl", hash = "sha256:3de3ce3c9ddc8bbd88f6e0e304dea0e66d843ec9de1b0042b0911c1663ffd474"}, - {file = "pydantic_core-2.27.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:30c5f68ded0c36466acede341551106821043e9afaad516adfb6e8fa80a4e6a6"}, - {file = "pydantic_core-2.27.2-cp311-cp311-win32.whl", hash = "sha256:c70c26d2c99f78b125a3459f8afe1aed4d9687c24fd677c6a4436bc042e50d6c"}, - {file = "pydantic_core-2.27.2-cp311-cp311-win_amd64.whl", hash = "sha256:08e125dbdc505fa69ca7d9c499639ab6407cfa909214d500897d02afb816e7cc"}, - {file = "pydantic_core-2.27.2-cp311-cp311-win_arm64.whl", hash = "sha256:26f0d68d4b235a2bae0c3fc585c585b4ecc51382db0e3ba402a22cbc440915e4"}, - {file = "pydantic_core-2.27.2-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:9e0c8cfefa0ef83b4da9588448b6d8d2a2bf1a53c3f1ae5fca39eb3061e2f0b0"}, - {file = "pydantic_core-2.27.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:83097677b8e3bd7eaa6775720ec8e0405f1575015a463285a92bfdfe254529ef"}, - {file = "pydantic_core-2.27.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:172fce187655fece0c90d90a678424b013f8fbb0ca8b036ac266749c09438cb7"}, - {file = "pydantic_core-2.27.2-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:519f29f5213271eeeeb3093f662ba2fd512b91c5f188f3bb7b27bc5973816934"}, - {file = "pydantic_core-2.27.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:05e3a55d124407fffba0dd6b0c0cd056d10e983ceb4e5dbd10dda135c31071d6"}, - {file = "pydantic_core-2.27.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9c3ed807c7b91de05e63930188f19e921d1fe90de6b4f5cd43ee7fcc3525cb8c"}, - {file = "pydantic_core-2.27.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6fb4aadc0b9a0c063206846d603b92030eb6f03069151a625667f982887153e2"}, - {file = "pydantic_core-2.27.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:28ccb213807e037460326424ceb8b5245acb88f32f3d2777427476e1b32c48c4"}, - {file = "pydantic_core-2.27.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:de3cd1899e2c279b140adde9357c4495ed9d47131b4a4eaff9052f23398076b3"}, - {file = "pydantic_core-2.27.2-cp312-cp312-musllinux_1_1_armv7l.whl", hash = "sha256:220f892729375e2d736b97d0e51466252ad84c51857d4d15f5e9692f9ef12be4"}, - {file = "pydantic_core-2.27.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:a0fcd29cd6b4e74fe8ddd2c90330fd8edf2e30cb52acda47f06dd615ae72da57"}, - {file = "pydantic_core-2.27.2-cp312-cp312-win32.whl", hash = "sha256:1e2cb691ed9834cd6a8be61228471d0a503731abfb42f82458ff27be7b2186fc"}, - {file = "pydantic_core-2.27.2-cp312-cp312-win_amd64.whl", hash = "sha256:cc3f1a99a4f4f9dd1de4fe0312c114e740b5ddead65bb4102884b384c15d8bc9"}, - {file = "pydantic_core-2.27.2-cp312-cp312-win_arm64.whl", hash = "sha256:3911ac9284cd8a1792d3cb26a2da18f3ca26c6908cc434a18f730dc0db7bfa3b"}, - {file = "pydantic_core-2.27.2-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:7d14bd329640e63852364c306f4d23eb744e0f8193148d4044dd3dacdaacbd8b"}, - {file = "pydantic_core-2.27.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:82f91663004eb8ed30ff478d77c4d1179b3563df6cdb15c0817cd1cdaf34d154"}, - {file = "pydantic_core-2.27.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:71b24c7d61131bb83df10cc7e687433609963a944ccf45190cfc21e0887b08c9"}, - {file = "pydantic_core-2.27.2-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:fa8e459d4954f608fa26116118bb67f56b93b209c39b008277ace29937453dc9"}, - {file = "pydantic_core-2.27.2-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ce8918cbebc8da707ba805b7fd0b382816858728ae7fe19a942080c24e5b7cd1"}, - {file = "pydantic_core-2.27.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:eda3f5c2a021bbc5d976107bb302e0131351c2ba54343f8a496dc8783d3d3a6a"}, - {file = "pydantic_core-2.27.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bd8086fa684c4775c27f03f062cbb9eaa6e17f064307e86b21b9e0abc9c0f02e"}, - {file = "pydantic_core-2.27.2-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:8d9b3388db186ba0c099a6d20f0604a44eabdeef1777ddd94786cdae158729e4"}, - {file = "pydantic_core-2.27.2-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:7a66efda2387de898c8f38c0cf7f14fca0b51a8ef0b24bfea5849f1b3c95af27"}, - {file = "pydantic_core-2.27.2-cp313-cp313-musllinux_1_1_armv7l.whl", hash = "sha256:18a101c168e4e092ab40dbc2503bdc0f62010e95d292b27827871dc85450d7ee"}, - {file = "pydantic_core-2.27.2-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:ba5dd002f88b78a4215ed2f8ddbdf85e8513382820ba15ad5ad8955ce0ca19a1"}, - {file = "pydantic_core-2.27.2-cp313-cp313-win32.whl", hash = "sha256:1ebaf1d0481914d004a573394f4be3a7616334be70261007e47c2a6fe7e50130"}, - {file = "pydantic_core-2.27.2-cp313-cp313-win_amd64.whl", hash = "sha256:953101387ecf2f5652883208769a79e48db18c6df442568a0b5ccd8c2723abee"}, - {file = "pydantic_core-2.27.2-cp313-cp313-win_arm64.whl", hash = "sha256:ac4dbfd1691affb8f48c2c13241a2e3b60ff23247cbcf981759c768b6633cf8b"}, - {file = "pydantic_core-2.27.2-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:d3e8d504bdd3f10835468f29008d72fc8359d95c9c415ce6e767203db6127506"}, - {file = "pydantic_core-2.27.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:521eb9b7f036c9b6187f0b47318ab0d7ca14bd87f776240b90b21c1f4f149320"}, - {file = "pydantic_core-2.27.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:85210c4d99a0114f5a9481b44560d7d1e35e32cc5634c656bc48e590b669b145"}, - {file = "pydantic_core-2.27.2-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:d716e2e30c6f140d7560ef1538953a5cd1a87264c737643d481f2779fc247fe1"}, - {file = "pydantic_core-2.27.2-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f66d89ba397d92f840f8654756196d93804278457b5fbede59598a1f9f90b228"}, - {file = "pydantic_core-2.27.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:669e193c1c576a58f132e3158f9dfa9662969edb1a250c54d8fa52590045f046"}, - {file = "pydantic_core-2.27.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9fdbe7629b996647b99c01b37f11170a57ae675375b14b8c13b8518b8320ced5"}, - {file = "pydantic_core-2.27.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d262606bf386a5ba0b0af3b97f37c83d7011439e3dc1a9298f21efb292e42f1a"}, - {file = "pydantic_core-2.27.2-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:cabb9bcb7e0d97f74df8646f34fc76fbf793b7f6dc2438517d7a9e50eee4f14d"}, - {file = "pydantic_core-2.27.2-cp38-cp38-musllinux_1_1_armv7l.whl", hash = "sha256:d2d63f1215638d28221f664596b1ccb3944f6e25dd18cd3b86b0a4c408d5ebb9"}, - {file = "pydantic_core-2.27.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:bca101c00bff0adb45a833f8451b9105d9df18accb8743b08107d7ada14bd7da"}, - {file = "pydantic_core-2.27.2-cp38-cp38-win32.whl", hash = "sha256:f6f8e111843bbb0dee4cb6594cdc73e79b3329b526037ec242a3e49012495b3b"}, - {file = "pydantic_core-2.27.2-cp38-cp38-win_amd64.whl", hash = "sha256:fd1aea04935a508f62e0d0ef1f5ae968774a32afc306fb8545e06f5ff5cdf3ad"}, - {file = "pydantic_core-2.27.2-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:c10eb4f1659290b523af58fa7cffb452a61ad6ae5613404519aee4bfbf1df993"}, - {file = "pydantic_core-2.27.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:ef592d4bad47296fb11f96cd7dc898b92e795032b4894dfb4076cfccd43a9308"}, - {file = "pydantic_core-2.27.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c61709a844acc6bf0b7dce7daae75195a10aac96a596ea1b776996414791ede4"}, - {file = "pydantic_core-2.27.2-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:42c5f762659e47fdb7b16956c71598292f60a03aa92f8b6351504359dbdba6cf"}, - {file = "pydantic_core-2.27.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4c9775e339e42e79ec99c441d9730fccf07414af63eac2f0e48e08fd38a64d76"}, - {file = "pydantic_core-2.27.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:57762139821c31847cfb2df63c12f725788bd9f04bc2fb392790959b8f70f118"}, - {file = "pydantic_core-2.27.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0d1e85068e818c73e048fe28cfc769040bb1f475524f4745a5dc621f75ac7630"}, - {file = "pydantic_core-2.27.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:097830ed52fd9e427942ff3b9bc17fab52913b2f50f2880dc4a5611446606a54"}, - {file = "pydantic_core-2.27.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:044a50963a614ecfae59bb1eaf7ea7efc4bc62f49ed594e18fa1e5d953c40e9f"}, - {file = "pydantic_core-2.27.2-cp39-cp39-musllinux_1_1_armv7l.whl", hash = "sha256:4e0b4220ba5b40d727c7f879eac379b822eee5d8fff418e9d3381ee45b3b0362"}, - {file = "pydantic_core-2.27.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:5e4f4bb20d75e9325cc9696c6802657b58bc1dbbe3022f32cc2b2b632c3fbb96"}, - {file = "pydantic_core-2.27.2-cp39-cp39-win32.whl", hash = "sha256:cca63613e90d001b9f2f9a9ceb276c308bfa2a43fafb75c8031c4f66039e8c6e"}, - {file = "pydantic_core-2.27.2-cp39-cp39-win_amd64.whl", hash = "sha256:77d1bca19b0f7021b3a982e6f903dcd5b2b06076def36a652e3907f596e29f67"}, - {file = "pydantic_core-2.27.2-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:2bf14caea37e91198329b828eae1618c068dfb8ef17bb33287a7ad4b61ac314e"}, - {file = "pydantic_core-2.27.2-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:b0cb791f5b45307caae8810c2023a184c74605ec3bcbb67d13846c28ff731ff8"}, - {file = "pydantic_core-2.27.2-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:688d3fd9fcb71f41c4c015c023d12a79d1c4c0732ec9eb35d96e3388a120dcf3"}, - {file = "pydantic_core-2.27.2-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3d591580c34f4d731592f0e9fe40f9cc1b430d297eecc70b962e93c5c668f15f"}, - {file = "pydantic_core-2.27.2-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:82f986faf4e644ffc189a7f1aafc86e46ef70372bb153e7001e8afccc6e54133"}, - {file = "pydantic_core-2.27.2-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:bec317a27290e2537f922639cafd54990551725fc844249e64c523301d0822fc"}, - {file = "pydantic_core-2.27.2-pp310-pypy310_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:0296abcb83a797db256b773f45773da397da75a08f5fcaef41f2044adec05f50"}, - {file = "pydantic_core-2.27.2-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:0d75070718e369e452075a6017fbf187f788e17ed67a3abd47fa934d001863d9"}, - {file = "pydantic_core-2.27.2-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:7e17b560be3c98a8e3aa66ce828bdebb9e9ac6ad5466fba92eb74c4c95cb1151"}, - {file = "pydantic_core-2.27.2-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:c33939a82924da9ed65dab5a65d427205a73181d8098e79b6b426bdf8ad4e656"}, - {file = "pydantic_core-2.27.2-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:00bad2484fa6bda1e216e7345a798bd37c68fb2d97558edd584942aa41b7d278"}, - {file = "pydantic_core-2.27.2-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c817e2b40aba42bac6f457498dacabc568c3b7a986fc9ba7c8d9d260b71485fb"}, - {file = "pydantic_core-2.27.2-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:251136cdad0cb722e93732cb45ca5299fb56e1344a833640bf93b2803f8d1bfd"}, - {file = "pydantic_core-2.27.2-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d2088237af596f0a524d3afc39ab3b036e8adb054ee57cbb1dcf8e09da5b29cc"}, - {file = "pydantic_core-2.27.2-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:d4041c0b966a84b4ae7a09832eb691a35aec90910cd2dbe7a208de59be77965b"}, - {file = "pydantic_core-2.27.2-pp39-pypy39_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:8083d4e875ebe0b864ffef72a4304827015cff328a1be6e22cc850753bfb122b"}, - {file = "pydantic_core-2.27.2-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:f141ee28a0ad2123b6611b6ceff018039df17f32ada8b534e6aa039545a3efb2"}, - {file = "pydantic_core-2.27.2-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:7d0c8399fcc1848491f00e0314bd59fb34a9c008761bcb422a057670c3f65e35"}, - {file = "pydantic_core-2.27.2.tar.gz", hash = "sha256:eb026e5a4c1fee05726072337ff51d1efb6f59090b7da90d30ea58625b1ffb39"}, -] -markers = {dev = "python_version >= \"3.10\""} - -[package.dependencies] -typing-extensions = ">=4.6.0,<4.7.0 || >4.7.0" - -[[package]] -name = "pydantic-settings" -version = "2.9.1" -description = "Settings management using Pydantic" -optional = false -python-versions = ">=3.9" -groups = ["main", "dev"] -markers = "python_version >= \"3.10\"" -files = [ - {file = "pydantic_settings-2.9.1-py3-none-any.whl", hash = "sha256:59b4f431b1defb26fe620c71a7d3968a710d719f5f4cdbbdb7926edeb770f6ef"}, - {file = "pydantic_settings-2.9.1.tar.gz", hash = "sha256:c509bf79d27563add44e8446233359004ed85066cd096d8b510f715e6ef5d268"}, -] - -[package.dependencies] -pydantic = ">=2.7.0" -python-dotenv = ">=0.21.0" -typing-inspection = ">=0.4.0" - -[package.extras] -aws-secrets-manager = ["boto3 (>=1.35.0)", "boto3-stubs[secretsmanager]"] -azure-key-vault = ["azure-identity (>=1.16.0)", "azure-keyvault-secrets (>=4.8.0)"] -gcp-secret-manager = ["google-cloud-secret-manager (>=2.23.1)"] -toml = ["tomli (>=2.0.1)"] -yaml = ["pyyaml (>=6.0.1)"] - -[[package]] -name = "pylint" -version = "3.2.3" -description = "python code static checker" -optional = false -python-versions = ">=3.8.0" -groups = ["dev"] -files = [ - {file = "pylint-3.2.3-py3-none-any.whl", hash = "sha256:b3d7d2708a3e04b4679e02d99e72329a8b7ee8afb8d04110682278781f889fa8"}, - {file = "pylint-3.2.3.tar.gz", hash = "sha256:02f6c562b215582386068d52a30f520d84fdbcf2a95fc7e855b816060d048b60"}, -] - -[package.dependencies] -astroid = ">=3.2.2,<=3.3.0-dev0" -colorama = {version = ">=0.4.5", markers = "sys_platform == \"win32\""} -dill = [ - {version = ">=0.2", markers = "python_version < \"3.11\""}, - {version = ">=0.3.7", markers = "python_version >= \"3.12\""}, - {version = ">=0.3.6", markers = "python_version == \"3.11\""}, -] -isort = ">=4.2.5,<5.13.0 || >5.13.0,<6" -mccabe = ">=0.6,<0.8" -platformdirs = ">=2.2.0" -tomli = {version = ">=1.1.0", markers = "python_version < \"3.11\""} -tomlkit = ">=0.10.1" -typing-extensions = {version = ">=3.10.0", markers = "python_version < \"3.10\""} - -[package.extras] -spelling = ["pyenchant (>=3.2,<4.0)"] -testutils = ["gitpython (>3)"] - -[[package]] -name = "pyright" -version = "1.1.401" -description = "Command line wrapper for pyright" -optional = false -python-versions = ">=3.7" -groups = ["lint"] -files = [ - {file = "pyright-1.1.401-py3-none-any.whl", hash = "sha256:6fde30492ba5b0d7667c16ecaf6c699fab8d7a1263f6a18549e0b00bf7724c06"}, - {file = "pyright-1.1.401.tar.gz", hash = "sha256:788a82b6611fa5e34a326a921d86d898768cddf59edde8e93e56087d277cc6f1"}, -] - -[package.dependencies] -nodeenv = ">=1.6.0" -typing-extensions = ">=4.1" - -[package.extras] -all = ["nodejs-wheel-binaries", "twine (>=3.4.1)"] -dev = ["twine (>=3.4.1)"] -nodejs = ["nodejs-wheel-binaries"] - -[[package]] -name = "pytest" -version = "8.3.4" -description = "pytest: simple powerful testing with Python" -optional = false -python-versions = ">=3.8" -groups = ["dev"] -files = [ - {file = "pytest-8.3.4-py3-none-any.whl", hash = "sha256:50e16d954148559c9a74109af1eaf0c945ba2d8f30f0a3d3335edde19788b6f6"}, - {file = "pytest-8.3.4.tar.gz", hash = "sha256:965370d062bce11e73868e0335abac31b4d3de0e82f4007408d242b4f8610761"}, -] - -[package.dependencies] -colorama = {version = "*", markers = "sys_platform == \"win32\""} -exceptiongroup = {version = ">=1.0.0rc8", markers = "python_version < \"3.11\""} -iniconfig = "*" -packaging = "*" -pluggy = ">=1.5,<2" -tomli = {version = ">=1", markers = "python_version < \"3.11\""} - -[package.extras] -dev = ["argcomplete", "attrs (>=19.2)", "hypothesis (>=3.56)", "mock", "pygments (>=2.7.2)", "requests", "setuptools", "xmlschema"] - -[[package]] -name = "pytest-asyncio" -version = "0.23.8" -description = "Pytest support for asyncio" -optional = false -python-versions = ">=3.8" -groups = ["dev"] -files = [ - {file = "pytest_asyncio-0.23.8-py3-none-any.whl", hash = "sha256:50265d892689a5faefb84df80819d1ecef566eb3549cf915dfb33569359d1ce2"}, - {file = "pytest_asyncio-0.23.8.tar.gz", hash = "sha256:759b10b33a6dc61cce40a8bd5205e302978bbbcc00e279a8b61d9a6a3c82e4d3"}, -] - -[package.dependencies] -pytest = ">=7.0.0,<9" - -[package.extras] -docs = ["sphinx (>=5.3)", "sphinx-rtd-theme (>=1.0)"] -testing = ["coverage (>=6.2)", "hypothesis (>=5.7.1)"] - -[[package]] -name = "python-dateutil" -version = "2.9.0.post0" -description = "Extensions to the standard Python datetime module" -optional = false -python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7" -groups = ["main"] -files = [ - {file = "python-dateutil-2.9.0.post0.tar.gz", hash = "sha256:37dd54208da7e1cd875388217d5e00ebd4179249f90fb72437e91a35459a0ad3"}, - {file = "python_dateutil-2.9.0.post0-py2.py3-none-any.whl", hash = "sha256:a8b2bc7bffae282281c8140a97d3aa9c14da0b136dfe83f850eea9a5f7470427"}, -] - -[package.dependencies] -six = ">=1.5" - -[[package]] -name = "python-dotenv" -version = "1.1.0" -description = "Read key-value pairs from a .env file and set them as environment variables" -optional = false -python-versions = ">=3.9" -groups = ["main", "dev"] -markers = "python_version >= \"3.10\"" -files = [ - {file = "python_dotenv-1.1.0-py3-none-any.whl", hash = "sha256:d7c01d9e2293916c18baf562d95698754b0dbbb5e74d457c45d4f6561fb9d55d"}, - {file = "python_dotenv-1.1.0.tar.gz", hash = "sha256:41f90bc6f5f177fb41f53e87666db362025010eb28f60a01c9143bfa33a2b2d5"}, -] - -[package.extras] -cli = ["click (>=5.0)"] - -[[package]] -name = "python-multipart" -version = "0.0.20" -description = "A streaming multipart parser for Python" -optional = false -python-versions = ">=3.8" -groups = ["main", "dev"] -markers = "python_version >= \"3.10\"" -files = [ - {file = "python_multipart-0.0.20-py3-none-any.whl", hash = "sha256:8a62d3a8335e06589fe01f2a3e178cdcc632f3fbe0d492ad9ee0ec35aab1f104"}, - {file = "python_multipart-0.0.20.tar.gz", hash = "sha256:8dd0cab45b8e23064ae09147625994d090fa46f5b0d1e13af944c331a7fa9d13"}, -] - -[[package]] -name = "pyyaml" -version = "6.0.2" -description = "YAML parser and emitter for Python" -optional = false -python-versions = ">=3.8" -groups = ["main"] -files = [ - {file = "PyYAML-6.0.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0a9a2848a5b7feac301353437eb7d5957887edbf81d56e903999a75a3d743086"}, - {file = "PyYAML-6.0.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:29717114e51c84ddfba879543fb232a6ed60086602313ca38cce623c1d62cfbf"}, - {file = "PyYAML-6.0.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8824b5a04a04a047e72eea5cec3bc266db09e35de6bdfe34c9436ac5ee27d237"}, - {file = "PyYAML-6.0.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7c36280e6fb8385e520936c3cb3b8042851904eba0e58d277dca80a5cfed590b"}, - {file = "PyYAML-6.0.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ec031d5d2feb36d1d1a24380e4db6d43695f3748343d99434e6f5f9156aaa2ed"}, - {file = "PyYAML-6.0.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:936d68689298c36b53b29f23c6dbb74de12b4ac12ca6cfe0e047bedceea56180"}, - {file = "PyYAML-6.0.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:23502f431948090f597378482b4812b0caae32c22213aecf3b55325e049a6c68"}, - {file = "PyYAML-6.0.2-cp310-cp310-win32.whl", hash = "sha256:2e99c6826ffa974fe6e27cdb5ed0021786b03fc98e5ee3c5bfe1fd5015f42b99"}, - {file = "PyYAML-6.0.2-cp310-cp310-win_amd64.whl", hash = "sha256:a4d3091415f010369ae4ed1fc6b79def9416358877534caf6a0fdd2146c87a3e"}, - {file = "PyYAML-6.0.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:cc1c1159b3d456576af7a3e4d1ba7e6924cb39de8f67111c735f6fc832082774"}, - {file = "PyYAML-6.0.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:1e2120ef853f59c7419231f3bf4e7021f1b936f6ebd222406c3b60212205d2ee"}, - {file = "PyYAML-6.0.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5d225db5a45f21e78dd9358e58a98702a0302f2659a3c6cd320564b75b86f47c"}, - {file = "PyYAML-6.0.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5ac9328ec4831237bec75defaf839f7d4564be1e6b25ac710bd1a96321cc8317"}, - {file = "PyYAML-6.0.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3ad2a3decf9aaba3d29c8f537ac4b243e36bef957511b4766cb0057d32b0be85"}, - {file = "PyYAML-6.0.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:ff3824dc5261f50c9b0dfb3be22b4567a6f938ccce4587b38952d85fd9e9afe4"}, - {file = "PyYAML-6.0.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:797b4f722ffa07cc8d62053e4cff1486fa6dc094105d13fea7b1de7d8bf71c9e"}, - {file = "PyYAML-6.0.2-cp311-cp311-win32.whl", hash = "sha256:11d8f3dd2b9c1207dcaf2ee0bbbfd5991f571186ec9cc78427ba5bd32afae4b5"}, - {file = "PyYAML-6.0.2-cp311-cp311-win_amd64.whl", hash = "sha256:e10ce637b18caea04431ce14fabcf5c64a1c61ec9c56b071a4b7ca131ca52d44"}, - {file = "PyYAML-6.0.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:c70c95198c015b85feafc136515252a261a84561b7b1d51e3384e0655ddf25ab"}, - {file = "PyYAML-6.0.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:ce826d6ef20b1bc864f0a68340c8b3287705cae2f8b4b1d932177dcc76721725"}, - {file = "PyYAML-6.0.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1f71ea527786de97d1a0cc0eacd1defc0985dcf6b3f17bb77dcfc8c34bec4dc5"}, - {file = "PyYAML-6.0.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9b22676e8097e9e22e36d6b7bda33190d0d400f345f23d4065d48f4ca7ae0425"}, - {file = "PyYAML-6.0.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:80bab7bfc629882493af4aa31a4cfa43a4c57c83813253626916b8c7ada83476"}, - {file = "PyYAML-6.0.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:0833f8694549e586547b576dcfaba4a6b55b9e96098b36cdc7ebefe667dfed48"}, - {file = "PyYAML-6.0.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8b9c7197f7cb2738065c481a0461e50ad02f18c78cd75775628afb4d7137fb3b"}, - {file = "PyYAML-6.0.2-cp312-cp312-win32.whl", hash = "sha256:ef6107725bd54b262d6dedcc2af448a266975032bc85ef0172c5f059da6325b4"}, - {file = "PyYAML-6.0.2-cp312-cp312-win_amd64.whl", hash = "sha256:7e7401d0de89a9a855c839bc697c079a4af81cf878373abd7dc625847d25cbd8"}, - {file = "PyYAML-6.0.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:efdca5630322a10774e8e98e1af481aad470dd62c3170801852d752aa7a783ba"}, - {file = "PyYAML-6.0.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:50187695423ffe49e2deacb8cd10510bc361faac997de9efef88badc3bb9e2d1"}, - {file = "PyYAML-6.0.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0ffe8360bab4910ef1b9e87fb812d8bc0a308b0d0eef8c8f44e0254ab3b07133"}, - {file = "PyYAML-6.0.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:17e311b6c678207928d649faa7cb0d7b4c26a0ba73d41e99c4fff6b6c3276484"}, - {file = "PyYAML-6.0.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:70b189594dbe54f75ab3a1acec5f1e3faa7e8cf2f1e08d9b561cb41b845f69d5"}, - {file = "PyYAML-6.0.2-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:41e4e3953a79407c794916fa277a82531dd93aad34e29c2a514c2c0c5fe971cc"}, - {file = "PyYAML-6.0.2-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:68ccc6023a3400877818152ad9a1033e3db8625d899c72eacb5a668902e4d652"}, - {file = "PyYAML-6.0.2-cp313-cp313-win32.whl", hash = "sha256:bc2fa7c6b47d6bc618dd7fb02ef6fdedb1090ec036abab80d4681424b84c1183"}, - {file = "PyYAML-6.0.2-cp313-cp313-win_amd64.whl", hash = "sha256:8388ee1976c416731879ac16da0aff3f63b286ffdd57cdeb95f3f2e085687563"}, - {file = "PyYAML-6.0.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:24471b829b3bf607e04e88d79542a9d48bb037c2267d7927a874e6c205ca7e9a"}, - {file = "PyYAML-6.0.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d7fded462629cfa4b685c5416b949ebad6cec74af5e2d42905d41e257e0869f5"}, - {file = "PyYAML-6.0.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d84a1718ee396f54f3a086ea0a66d8e552b2ab2017ef8b420e92edbc841c352d"}, - {file = "PyYAML-6.0.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9056c1ecd25795207ad294bcf39f2db3d845767be0ea6e6a34d856f006006083"}, - {file = "PyYAML-6.0.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:82d09873e40955485746739bcb8b4586983670466c23382c19cffecbf1fd8706"}, - {file = "PyYAML-6.0.2-cp38-cp38-win32.whl", hash = "sha256:43fa96a3ca0d6b1812e01ced1044a003533c47f6ee8aca31724f78e93ccc089a"}, - {file = "PyYAML-6.0.2-cp38-cp38-win_amd64.whl", hash = "sha256:01179a4a8559ab5de078078f37e5c1a30d76bb88519906844fd7bdea1b7729ff"}, - {file = "PyYAML-6.0.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:688ba32a1cffef67fd2e9398a2efebaea461578b0923624778664cc1c914db5d"}, - {file = "PyYAML-6.0.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:a8786accb172bd8afb8be14490a16625cbc387036876ab6ba70912730faf8e1f"}, - {file = "PyYAML-6.0.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d8e03406cac8513435335dbab54c0d385e4a49e4945d2909a581c83647ca0290"}, - {file = "PyYAML-6.0.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f753120cb8181e736c57ef7636e83f31b9c0d1722c516f7e86cf15b7aa57ff12"}, - {file = "PyYAML-6.0.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3b1fdb9dc17f5a7677423d508ab4f243a726dea51fa5e70992e59a7411c89d19"}, - {file = "PyYAML-6.0.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:0b69e4ce7a131fe56b7e4d770c67429700908fc0752af059838b1cfb41960e4e"}, - {file = "PyYAML-6.0.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:a9f8c2e67970f13b16084e04f134610fd1d374bf477b17ec1599185cf611d725"}, - {file = "PyYAML-6.0.2-cp39-cp39-win32.whl", hash = "sha256:6395c297d42274772abc367baaa79683958044e5d3835486c16da75d2a694631"}, - {file = "PyYAML-6.0.2-cp39-cp39-win_amd64.whl", hash = "sha256:39693e1f8320ae4f43943590b49779ffb98acb81f788220ea932a6b6c51004d8"}, - {file = "pyyaml-6.0.2.tar.gz", hash = "sha256:d584d9ec91ad65861cc08d42e834324ef890a082e591037abe114850ff7bbc3e"}, -] - -[[package]] -name = "requests" -version = "2.32.3" -description = "Python HTTP for Humans." -optional = false -python-versions = ">=3.8" -groups = ["main"] -files = [ - {file = "requests-2.32.3-py3-none-any.whl", hash = "sha256:70761cfe03c773ceb22aa2f671b4757976145175cdfca038c02654d061d6dcc6"}, - {file = "requests-2.32.3.tar.gz", hash = "sha256:55365417734eb18255590a9ff9eb97e9e1da868d4ccd6402399eaf68af20a760"}, -] - -[package.dependencies] -certifi = ">=2017.4.17" -charset-normalizer = ">=2,<4" -idna = ">=2.5,<4" -urllib3 = ">=1.21.1,<3" - -[package.extras] -socks = ["PySocks (>=1.5.6,!=1.5.7)"] -use-chardet-on-py3 = ["chardet (>=3.0.2,<6)"] - -[[package]] -name = "rsa" -version = "4.2" -description = "Pure-Python RSA implementation" -optional = true -python-versions = "*" -groups = ["main"] -markers = "extra == \"gcp\"" -files = [ - {file = "rsa-4.2.tar.gz", hash = "sha256:aaefa4b84752e3e99bd8333a2e1e3e7a7da64614042bd66f775573424370108a"}, -] - -[package.dependencies] -pyasn1 = ">=0.1.3" - -[[package]] -name = "ruff" -version = "0.11.11" -description = "An extremely fast Python linter and code formatter, written in Rust." -optional = false -python-versions = ">=3.7" -groups = ["lint"] -files = [ - {file = "ruff-0.11.11-py3-none-linux_armv6l.whl", hash = "sha256:9924e5ae54125ed8958a4f7de320dab7380f6e9fa3195e3dc3b137c6842a0092"}, - {file = "ruff-0.11.11-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:c8a93276393d91e952f790148eb226658dd275cddfde96c6ca304873f11d2ae4"}, - {file = "ruff-0.11.11-py3-none-macosx_11_0_arm64.whl", hash = "sha256:d6e333dbe2e6ae84cdedefa943dfd6434753ad321764fd937eef9d6b62022bcd"}, - {file = "ruff-0.11.11-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7885d9a5e4c77b24e8c88aba8c80be9255fa22ab326019dac2356cff42089fc6"}, - {file = "ruff-0.11.11-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:1b5ab797fcc09121ed82e9b12b6f27e34859e4227080a42d090881be888755d4"}, - {file = "ruff-0.11.11-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e231ff3132c1119ece836487a02785f099a43992b95c2f62847d29bace3c75ac"}, - {file = "ruff-0.11.11-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:a97c9babe1d4081037a90289986925726b802d180cca784ac8da2bbbc335f709"}, - {file = "ruff-0.11.11-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d8c4ddcbe8a19f59f57fd814b8b117d4fcea9bee7c0492e6cf5fdc22cfa563c8"}, - {file = "ruff-0.11.11-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:6224076c344a7694c6fbbb70d4f2a7b730f6d47d2a9dc1e7f9d9bb583faf390b"}, - {file = "ruff-0.11.11-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:882821fcdf7ae8db7a951df1903d9cb032bbe838852e5fc3c2b6c3ab54e39875"}, - {file = "ruff-0.11.11-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:dcec2d50756463d9df075a26a85a6affbc1b0148873da3997286caf1ce03cae1"}, - {file = "ruff-0.11.11-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:99c28505ecbaeb6594701a74e395b187ee083ee26478c1a795d35084d53ebd81"}, - {file = "ruff-0.11.11-py3-none-musllinux_1_2_i686.whl", hash = "sha256:9263f9e5aa4ff1dec765e99810f1cc53f0c868c5329b69f13845f699fe74f639"}, - {file = "ruff-0.11.11-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:64ac6f885e3ecb2fdbb71de2701d4e34526651f1e8503af8fb30d4915a3fe345"}, - {file = "ruff-0.11.11-py3-none-win32.whl", hash = "sha256:1adcb9a18802268aaa891ffb67b1c94cd70578f126637118e8099b8e4adcf112"}, - {file = "ruff-0.11.11-py3-none-win_amd64.whl", hash = "sha256:748b4bb245f11e91a04a4ff0f96e386711df0a30412b9fe0c74d5bdc0e4a531f"}, - {file = "ruff-0.11.11-py3-none-win_arm64.whl", hash = "sha256:6c51f136c0364ab1b774767aa8b86331bd8e9d414e2d107db7a2189f35ea1f7b"}, - {file = "ruff-0.11.11.tar.gz", hash = "sha256:7774173cc7c1980e6bf67569ebb7085989a78a103922fb83ef3dfe230cd0687d"}, -] - -[[package]] -name = "six" -version = "1.16.0" -description = "Python 2 and 3 compatibility utilities" -optional = false -python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*" -groups = ["main"] -files = [ - {file = "six-1.16.0-py2.py3-none-any.whl", hash = "sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254"}, - {file = "six-1.16.0.tar.gz", hash = "sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926"}, -] - -[[package]] -name = "sniffio" -version = "1.3.1" -description = "Sniff out which async library your code is running under" -optional = false -python-versions = ">=3.7" -groups = ["main", "dev"] -files = [ - {file = "sniffio-1.3.1-py3-none-any.whl", hash = "sha256:2f6da418d1f1e0fddd844478f41680e794e6051915791a034ff65e5f100525a2"}, - {file = "sniffio-1.3.1.tar.gz", hash = "sha256:f4324edc670a0f49750a81b895f35c3adb843cca46f0530f79fc1babb23789dc"}, -] -markers = {dev = "python_version >= \"3.10\""} - -[[package]] -name = "sse-starlette" -version = "2.1.3" -description = "SSE plugin for Starlette" -optional = false -python-versions = ">=3.8" -groups = ["main", "dev"] -markers = "python_version >= \"3.10\"" -files = [ - {file = "sse_starlette-2.1.3-py3-none-any.whl", hash = "sha256:8ec846438b4665b9e8c560fcdea6bc8081a3abf7942faa95e5a744999d219772"}, - {file = "sse_starlette-2.1.3.tar.gz", hash = "sha256:9cd27eb35319e1414e3d2558ee7414487f9529ce3b3cf9b21434fd110e017169"}, -] - -[package.dependencies] -anyio = "*" -starlette = "*" -uvicorn = "*" - -[package.extras] -examples = ["fastapi"] - -[[package]] -name = "starlette" -version = "0.46.2" -description = "The little ASGI library that shines." -optional = false -python-versions = ">=3.9" -groups = ["main", "dev"] -markers = "python_version >= \"3.10\"" -files = [ - {file = "starlette-0.46.2-py3-none-any.whl", hash = "sha256:595633ce89f8ffa71a015caed34a5b2dc1c0cdb3f0f1fbd1e69339cf2abeec35"}, - {file = "starlette-0.46.2.tar.gz", hash = "sha256:7f7361f34eed179294600af672f565727419830b54b7b084efe44bb82d2fccd5"}, -] - -[package.dependencies] -anyio = ">=3.6.2,<5" - -[package.extras] -full = ["httpx (>=0.27.0,<0.29.0)", "itsdangerous", "jinja2", "python-multipart (>=0.0.18)", "pyyaml"] - -[[package]] -name = "tomli" -version = "2.2.1" -description = "A lil' TOML parser" -optional = false -python-versions = ">=3.8" -groups = ["dev", "lint"] -markers = "python_version < \"3.11\"" -files = [ - {file = "tomli-2.2.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:678e4fa69e4575eb77d103de3df8a895e1591b48e740211bd1067378c69e8249"}, - {file = "tomli-2.2.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:023aa114dd824ade0100497eb2318602af309e5a55595f76b626d6d9f3b7b0a6"}, - {file = "tomli-2.2.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ece47d672db52ac607a3d9599a9d48dcb2f2f735c6c2d1f34130085bb12b112a"}, - {file = "tomli-2.2.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6972ca9c9cc9f0acaa56a8ca1ff51e7af152a9f87fb64623e31d5c83700080ee"}, - {file = "tomli-2.2.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c954d2250168d28797dd4e3ac5cf812a406cd5a92674ee4c8f123c889786aa8e"}, - {file = "tomli-2.2.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:8dd28b3e155b80f4d54beb40a441d366adcfe740969820caf156c019fb5c7ec4"}, - {file = "tomli-2.2.1-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:e59e304978767a54663af13c07b3d1af22ddee3bb2fb0618ca1593e4f593a106"}, - {file = "tomli-2.2.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:33580bccab0338d00994d7f16f4c4ec25b776af3ffaac1ed74e0b3fc95e885a8"}, - {file = "tomli-2.2.1-cp311-cp311-win32.whl", hash = "sha256:465af0e0875402f1d226519c9904f37254b3045fc5084697cefb9bdde1ff99ff"}, - {file = "tomli-2.2.1-cp311-cp311-win_amd64.whl", hash = "sha256:2d0f2fdd22b02c6d81637a3c95f8cd77f995846af7414c5c4b8d0545afa1bc4b"}, - {file = "tomli-2.2.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:4a8f6e44de52d5e6c657c9fe83b562f5f4256d8ebbfe4ff922c495620a7f6cea"}, - {file = "tomli-2.2.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:8d57ca8095a641b8237d5b079147646153d22552f1c637fd3ba7f4b0b29167a8"}, - {file = "tomli-2.2.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4e340144ad7ae1533cb897d406382b4b6fede8890a03738ff1683af800d54192"}, - {file = "tomli-2.2.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:db2b95f9de79181805df90bedc5a5ab4c165e6ec3fe99f970d0e302f384ad222"}, - {file = "tomli-2.2.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:40741994320b232529c802f8bc86da4e1aa9f413db394617b9a256ae0f9a7f77"}, - {file = "tomli-2.2.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:400e720fe168c0f8521520190686ef8ef033fb19fc493da09779e592861b78c6"}, - {file = "tomli-2.2.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:02abe224de6ae62c19f090f68da4e27b10af2b93213d36cf44e6e1c5abd19fdd"}, - {file = "tomli-2.2.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:b82ebccc8c8a36f2094e969560a1b836758481f3dc360ce9a3277c65f374285e"}, - {file = "tomli-2.2.1-cp312-cp312-win32.whl", hash = "sha256:889f80ef92701b9dbb224e49ec87c645ce5df3fa2cc548664eb8a25e03127a98"}, - {file = "tomli-2.2.1-cp312-cp312-win_amd64.whl", hash = "sha256:7fc04e92e1d624a4a63c76474610238576942d6b8950a2d7f908a340494e67e4"}, - {file = "tomli-2.2.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:f4039b9cbc3048b2416cc57ab3bda989a6fcf9b36cf8937f01a6e731b64f80d7"}, - {file = "tomli-2.2.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:286f0ca2ffeeb5b9bd4fcc8d6c330534323ec51b2f52da063b11c502da16f30c"}, - {file = "tomli-2.2.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a92ef1a44547e894e2a17d24e7557a5e85a9e1d0048b0b5e7541f76c5032cb13"}, - {file = "tomli-2.2.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9316dc65bed1684c9a98ee68759ceaed29d229e985297003e494aa825ebb0281"}, - {file = "tomli-2.2.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e85e99945e688e32d5a35c1ff38ed0b3f41f43fad8df0bdf79f72b2ba7bc5272"}, - {file = "tomli-2.2.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:ac065718db92ca818f8d6141b5f66369833d4a80a9d74435a268c52bdfa73140"}, - {file = "tomli-2.2.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:d920f33822747519673ee656a4b6ac33e382eca9d331c87770faa3eef562aeb2"}, - {file = "tomli-2.2.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:a198f10c4d1b1375d7687bc25294306e551bf1abfa4eace6650070a5c1ae2744"}, - {file = "tomli-2.2.1-cp313-cp313-win32.whl", hash = "sha256:d3f5614314d758649ab2ab3a62d4f2004c825922f9e370b29416484086b264ec"}, - {file = "tomli-2.2.1-cp313-cp313-win_amd64.whl", hash = "sha256:a38aa0308e754b0e3c67e344754dff64999ff9b513e691d0e786265c93583c69"}, - {file = "tomli-2.2.1-py3-none-any.whl", hash = "sha256:cb55c73c5f4408779d0cf3eef9f762b9c9f147a77de7b258bef0a5628adc85cc"}, - {file = "tomli-2.2.1.tar.gz", hash = "sha256:cd45e1dc79c835ce60f7404ec8119f2eb06d38b1deba146f07ced3bbc44505ff"}, -] - -[[package]] -name = "tomlkit" -version = "0.13.2" -description = "Style preserving TOML library" -optional = false -python-versions = ">=3.8" -groups = ["dev"] -files = [ - {file = "tomlkit-0.13.2-py3-none-any.whl", hash = "sha256:7a974427f6e119197f670fbbbeae7bef749a6c14e793db934baefc1b5f03efde"}, - {file = "tomlkit-0.13.2.tar.gz", hash = "sha256:fff5fe59a87295b278abd31bec92c15d9bc4a06885ab12bcea52c71119392e79"}, -] - -[[package]] -name = "types-authlib" -version = "1.5.0.20250516" -description = "Typing stubs for Authlib" -optional = false -python-versions = ">=3.9" -groups = ["dev"] -files = [ - {file = "types_authlib-1.5.0.20250516-py3-none-any.whl", hash = "sha256:c553659ba00b7e5f98d1bc183a47224a882de5d32c07917b1587a6a22ddd2583"}, - {file = "types_authlib-1.5.0.20250516.tar.gz", hash = "sha256:6d11b46622c4c338087d059e9036887408c788cf254f0fb11ff69f2a85ca7231"}, -] - -[package.dependencies] -cryptography = "*" - -[[package]] -name = "types-python-dateutil" -version = "2.9.0.20241003" -description = "Typing stubs for python-dateutil" -optional = false -python-versions = ">=3.8" -groups = ["dev"] -files = [ - {file = "types-python-dateutil-2.9.0.20241003.tar.gz", hash = "sha256:58cb85449b2a56d6684e41aeefb4c4280631246a0da1a719bdbe6f3fb0317446"}, - {file = "types_python_dateutil-2.9.0.20241003-py3-none-any.whl", hash = "sha256:250e1d8e80e7bbc3a6c99b907762711d1a1cdd00e978ad39cb5940f6f0a87f3d"}, -] - -[[package]] -name = "types-pyyaml" -version = "6.0.12.20250516" -description = "Typing stubs for PyYAML" -optional = false -python-versions = ">=3.9" -groups = ["dev"] -files = [ - {file = "types_pyyaml-6.0.12.20250516-py3-none-any.whl", hash = "sha256:8478208feaeb53a34cb5d970c56a7cd76b72659442e733e268a94dc72b2d0530"}, - {file = "types_pyyaml-6.0.12.20250516.tar.gz", hash = "sha256:9f21a70216fc0fa1b216a8176db5f9e0af6eb35d2f2932acb87689d03a5bf6ba"}, -] - -[[package]] -name = "typing-extensions" -version = "4.12.2" -description = "Backported and Experimental Type Hints for Python 3.8+" -optional = false -python-versions = ">=3.8" -groups = ["main", "dev", "lint"] -files = [ - {file = "typing_extensions-4.12.2-py3-none-any.whl", hash = "sha256:04e5ca0351e0f3f85c6853954072df659d0d13fac324d0072316b67d7794700d"}, - {file = "typing_extensions-4.12.2.tar.gz", hash = "sha256:1a7ead55c7e559dd4dee8856e3a88b41225abfe1ce8df57b7c13915fe121ffb8"}, -] - -[[package]] -name = "typing-inspection" -version = "0.4.0" -description = "Runtime typing introspection tools" -optional = false -python-versions = ">=3.9" -groups = ["main", "dev"] -files = [ - {file = "typing_inspection-0.4.0-py3-none-any.whl", hash = "sha256:50e72559fcd2a6367a19f7a7e610e6afcb9fac940c650290eed893d61386832f"}, - {file = "typing_inspection-0.4.0.tar.gz", hash = "sha256:9765c87de36671694a67904bf2c96e395be9c6439bb6c87b5142569dcdd65122"}, -] -markers = {dev = "python_version >= \"3.10\""} - -[package.dependencies] -typing-extensions = ">=4.12.0" - -[[package]] -name = "urllib3" -version = "2.2.3" -description = "HTTP library with thread-safe connection pooling, file post, and more." -optional = false -python-versions = ">=3.8" -groups = ["main"] -files = [ - {file = "urllib3-2.2.3-py3-none-any.whl", hash = "sha256:ca899ca043dcb1bafa3e262d73aa25c465bfb49e0bd9dd5d59f1d0acba2f8fac"}, - {file = "urllib3-2.2.3.tar.gz", hash = "sha256:e7d814a81dad81e6caf2ec9fdedb284ecc9c73076b62654547cc64ccdcae26e9"}, -] - -[package.extras] -brotli = ["brotli (>=1.0.9) ; platform_python_implementation == \"CPython\"", "brotlicffi (>=0.8.0) ; platform_python_implementation != \"CPython\""] -h2 = ["h2 (>=4,<5)"] -socks = ["pysocks (>=1.5.6,!=1.5.7,<2.0)"] -zstd = ["zstandard (>=0.18.0)"] - -[[package]] -name = "uvicorn" -version = "0.34.2" -description = "The lightning-fast ASGI server." -optional = false -python-versions = ">=3.9" -groups = ["main", "dev"] -markers = "python_version >= \"3.10\"" -files = [ - {file = "uvicorn-0.34.2-py3-none-any.whl", hash = "sha256:deb49af569084536d269fe0a6d67e3754f104cf03aba7c11c40f01aadf33c403"}, - {file = "uvicorn-0.34.2.tar.gz", hash = "sha256:0e929828f6186353a80b58ea719861d2629d766293b6d19baf086ba31d4f3328"}, -] - -[package.dependencies] -click = ">=7.0" -h11 = ">=0.8" -typing-extensions = {version = ">=4.0", markers = "python_version < \"3.11\""} - -[package.extras] -standard = ["colorama (>=0.4) ; sys_platform == \"win32\"", "httptools (>=0.6.3)", "python-dotenv (>=0.13)", "pyyaml (>=5.1)", "uvloop (>=0.14.0,!=0.15.0,!=0.15.1) ; sys_platform != \"win32\" and sys_platform != \"cygwin\" and platform_python_implementation != \"PyPy\"", "watchfiles (>=0.13)", "websockets (>=10.4)"] - -[[package]] -name = "zipp" -version = "3.23.0" -description = "Backport of pathlib-compatible object wrapper for zip files" -optional = false -python-versions = ">=3.9" -groups = ["main"] -files = [ - {file = "zipp-3.23.0-py3-none-any.whl", hash = "sha256:071652d6115ed432f5ce1d34c336c0adfd6a884660d1e9712a256d3d3bd4b14e"}, - {file = "zipp-3.23.0.tar.gz", hash = "sha256:a07157588a12518c9d4034df3fbbee09c814741a33ff63c05fa29d26a2404166"}, -] - -[package.extras] -check = ["pytest-checkdocs (>=2.4)", "pytest-ruff (>=0.2.1) ; sys_platform != \"cygwin\""] -cover = ["pytest-cov"] -doc = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"] -enabler = ["pytest-enabler (>=2.2)"] -test = ["big-O", "jaraco.functools", "jaraco.itertools", "jaraco.test", "more_itertools", "pytest (>=6,!=8.1.*)", "pytest-ignore-flaky"] -type = ["pytest-mypy"] - -[extras] -agents = ["authlib", "griffe", "mcp"] -gcp = ["google-auth", "requests"] - -[metadata] -lock-version = "2.1" -python-versions = ">=3.9" -content-hash = "9d707321f2730f9d1e581d43778dd605a83fdc3d3c375f597b1a2dabb2584ba0" diff --git a/poetry.toml b/poetry.toml deleted file mode 100644 index cd3492ac..00000000 --- a/poetry.toml +++ /dev/null @@ -1,3 +0,0 @@ - -[virtualenvs] -in-project = true diff --git a/pyproject.toml b/pyproject.toml index 58efd52d..3208eb77 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -2,9 +2,9 @@ name = "mistralai" version = "1.10.0" description = "Python Client SDK for the Mistral AI API." -authors = [{ name = "Mistral" },] -readme = "README-PYPI.md" +authors = [{ name = "Mistral" }] requires-python = ">=3.9" +readme = "README-PYPI.md" dependencies = [ "eval-type-backport >=0.2.0", "httpx >=0.28.1", @@ -19,40 +19,10 @@ dependencies = [ "opentelemetry-semantic-conventions (>=0.59b0,<0.60)", ] -[tool.poetry] -repository = "https://github.com/mistralai/client-python.git" -packages = [ - { include = "mistralai", from = "src" }, - { include = "mistralai_azure", from = "packages/mistralai_azure/src" }, - { include = "mistralai_gcp", from = "packages/mistralai_gcp/src" }, -] -include = ["py.typed", "src/mistralai/py.typed"] - -[tool.setuptools.package-data] -"*" = ["py.typed", "src/mistralai/py.typed"] - -[tool.poetry.group.dev.dependencies] -mypy = "==1.15.0" -pylint = "==3.2.3" -pytest = "^8.2.2" -pytest-asyncio = "^0.23.7" -types-python-dateutil = "^2.9.0.20240316" -types-authlib = "^1.5.0.20250516" -types-pyyaml = "^6.0.12.20250516" -mcp = { version = "^1.0", python = ">=3.10" } -griffe = "^1.7.3" -authlib = "^1.5.2" - -[tool.poetry.group.lint.dependencies] -ruff = "^0.11.10" -pyright = "^1.1.401" -mypy = "==1.15.0" - - [project.optional-dependencies] gcp = [ "google-auth >=2.27.0", - "requests >=2.32.3" + "requests >=2.32.3", ] agents = [ "mcp >=1.0,<2.0; python_version >= '3.10'", @@ -60,9 +30,63 @@ agents = [ "authlib >=1.5.2,<2.0", ] +[project.urls] +Repository = "https://github.com/mistralai/client-python.git" + +[dependency-groups] +dev = [ + "mypy==1.15.0", + "pylint==3.2.3", + "pytest>=8.2.2,<9", + "pytest-asyncio>=0.23.7,<0.24", + "types-python-dateutil>=2.9.0.20240316,<3", + "types-authlib>=1.5.0.20250516,<2", + "types-pyyaml>=6.0.12.20250516,<7", + "mcp>=1.0,<2 ; python_version >= '3.10'", + "griffe>=1.7.3,<2", + "authlib>=1.5.2,<2", +] +lint = [ + "ruff>=0.11.10,<0.12", + "pyright>=1.1.401,<2", + "mypy==1.15.0", +] + +[tool.uv] +default-groups = [ + "dev", + "lint", +] + +[tool.setuptools.package-data] +"*" = ["py.typed", "src/mistralai/py.typed"] + +[tool.hatch.build.targets.sdist] +include = [ + "src/mistralai", + "packages/mistralai_azure/src/mistralai_azure", + "packages/mistralai_gcp/src/mistralai_gcp", +] + +[tool.hatch.build.targets.sdist.force-include] +"py.typed" = "py.typed" +"src/mistralai/py.typed" = "src/mistralai/py.typed" + +[tool.hatch.build.targets.wheel] +include = [ + "src/mistralai", + "packages/mistralai_azure/src/mistralai_azure", + "packages/mistralai_gcp/src/mistralai_gcp", +] + +[tool.hatch.build.targets.wheel.sources] +"src/mistralai" = "mistralai" +"packages/mistralai_azure/src/mistralai_azure" = "mistralai_azure" +"packages/mistralai_gcp/src/mistralai_gcp" = "mistralai_gcp" + [build-system] -requires = ["poetry-core"] -build-backend = "poetry.core.masonry.api" +requires = ["hatchling"] +build-backend = "hatchling.build" [tool.pytest.ini_options] asyncio_default_fixture_loop_scope = "function" diff --git a/scripts/lint_custom_code.sh b/scripts/lint_custom_code.sh index 163bb3a6..3b03883d 100755 --- a/scripts/lint_custom_code.sh +++ b/scripts/lint_custom_code.sh @@ -1,38 +1,36 @@ #!/usr/bin/env bash -set -e - ERRORS=0 echo "Running mypy..." # TODO: Uncomment once the examples are fixed -# poetry run mypy examples/ || ERRORS=1 +# uv run mypy examples/ || ERRORS=1 echo "-> running on extra" -poetry run mypy src/mistralai/extra/ || ERRORS=1 +uv run mypy src/mistralai/extra/ || ERRORS=1 echo "-> running on hooks" -poetry run mypy src/mistralai/_hooks/ \ ---exclude __init__.py --exclude sdkhooks.py --exclude types.py || ERRORS=1 +uv run mypy src/mistralai/_hooks/ \ + --exclude __init__.py --exclude sdkhooks.py --exclude types.py || ERRORS=1 echo "Running pyright..." # TODO: Uncomment once the examples are fixed -# poetry run pyright examples/ || ERRORS=1 +# uv run pyright examples/ || ERRORS=1 echo "-> running on extra" -poetry run pyright src/mistralai/extra/ || ERRORS=1 +uv run pyright src/mistralai/extra/ || ERRORS=1 echo "-> running on hooks" -poetry run pyright src/mistralai/_hooks/ || ERRORS=1 +uv run pyright src/mistralai/_hooks/ || ERRORS=1 echo "Running ruff..." echo "-> running on examples" -poetry run ruff check examples/ || ERRORS=1 +uv run ruff check examples/ || ERRORS=1 echo "-> running on extra" -poetry run ruff check src/mistralai/extra/ || ERRORS=1 +uv run ruff check src/mistralai/extra/ || ERRORS=1 echo "-> running on hooks" -poetry run ruff check src/mistralai/_hooks/ \ ---exclude __init__.py --exclude sdkhooks.py --exclude types.py || ERRORS=1 +uv run ruff check src/mistralai/_hooks/ \ + --exclude __init__.py --exclude sdkhooks.py --exclude types.py || ERRORS=1 if [ "$ERRORS" -ne 0 ]; then -echo "❌ One or more linters failed" -exit 1 + echo "❌ One or more linters failed" + exit 1 else -echo "✅ All linters passed" + echo "✅ All linters passed" fi diff --git a/scripts/publish.sh b/scripts/publish.sh index 2a3ead70..6ff725f3 100755 --- a/scripts/publish.sh +++ b/scripts/publish.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash -export POETRY_PYPI_TOKEN_PYPI=${PYPI_TOKEN} +export UV_PUBLISH_TOKEN=${PYPI_TOKEN} -poetry run python scripts/prepare_readme.py +uv run python scripts/prepare_readme.py -poetry publish --build --skip-existing +uv build +uv publish diff --git a/src/mistralai/extra/README.md b/src/mistralai/extra/README.md index dfce43b3..0593d84a 100644 --- a/src/mistralai/extra/README.md +++ b/src/mistralai/extra/README.md @@ -34,7 +34,7 @@ class Chat(BaseSDK): 3. Now build the SDK with the custom code: ```bash -rm -rf dist; poetry build; python3 -m pip install ~/client-python/dist/mistralai-1.4.1-py3-none-any.whl --force-reinstall +rm -rf dist; uv build; uv pip install --reinstall ~/client-python/dist/mistralai-1.4.1-py3-none-any.whl ``` 4. And now you should be able to call the custom method: diff --git a/src/mistralai/extra/mcp/base.py b/src/mistralai/extra/mcp/base.py index 8be5585c..929b6b11 100644 --- a/src/mistralai/extra/mcp/base.py +++ b/src/mistralai/extra/mcp/base.py @@ -1,11 +1,14 @@ -from typing import Optional, Union +from typing import Optional, Sequence, Union import logging import typing from contextlib import AsyncExitStack from typing import Protocol, Any -from mcp import ClientSession -from mcp.types import ListPromptsResult, EmbeddedResource, ImageContent, TextContent +from mcp import ClientSession # pyright: ignore[reportMissingImports] +from mcp.types import ( # pyright: ignore[reportMissingImports] + ContentBlock, + ListPromptsResult, +) from mistralai.extra.exceptions import MCPException from mistralai.models import ( @@ -62,15 +65,13 @@ def __init__(self, name: Optional[str] = None): self._exit_stack: Optional[AsyncExitStack] = None self._is_initialized = False - def _convert_content( - self, mcp_content: Union[TextContent, ImageContent, EmbeddedResource] - ) -> TextChunkTypedDict: + def _convert_content(self, mcp_content: ContentBlock) -> TextChunkTypedDict: if not mcp_content.type == "text": raise MCPException("Only supporting text tool responses for now.") return {"type": "text", "text": mcp_content.text} def _convert_content_list( - self, mcp_contents: list[Union[TextContent, ImageContent, EmbeddedResource]] + self, mcp_contents: Sequence[ContentBlock] ) -> list[TextChunkTypedDict]: content_chunks = [] for mcp_content in mcp_contents: diff --git a/src/mistralai/extra/mcp/sse.py b/src/mistralai/extra/mcp/sse.py index 2dfe7a2d..3898ba75 100644 --- a/src/mistralai/extra/mcp/sse.py +++ b/src/mistralai/extra/mcp/sse.py @@ -14,8 +14,8 @@ from mistralai.extra.mcp.auth import OAuthParams, AsyncOAuth2Client from anyio.streams.memory import MemoryObjectReceiveStream, MemoryObjectSendStream -from mcp.client.sse import sse_client -from mcp.shared.message import SessionMessage +from mcp.client.sse import sse_client # pyright: ignore[reportMissingImports] +from mcp.shared.message import SessionMessage # pyright: ignore[reportMissingImports] from authlib.oauth2.rfc6749 import OAuth2Token from mistralai.types import BaseModel diff --git a/src/mistralai/extra/mcp/stdio.py b/src/mistralai/extra/mcp/stdio.py index 28c3b8c5..b7af4029 100644 --- a/src/mistralai/extra/mcp/stdio.py +++ b/src/mistralai/extra/mcp/stdio.py @@ -6,7 +6,7 @@ MCPClientBase, ) -from mcp import stdio_client, StdioServerParameters +from mcp import stdio_client, StdioServerParameters # pyright: ignore[reportMissingImports] logger = logging.getLogger(__name__) diff --git a/src/mistralai/extra/observability/otel.py b/src/mistralai/extra/observability/otel.py index 46c667d0..6037e681 100644 --- a/src/mistralai/extra/observability/otel.py +++ b/src/mistralai/extra/observability/otel.py @@ -5,7 +5,7 @@ import traceback from datetime import datetime, timezone from enum import Enum -from typing import Optional, Tuple +from typing import Optional, Tuple, Union import httpx import opentelemetry.semconv._incubating.attributes.gen_ai_attributes as gen_ai_attributes @@ -135,7 +135,7 @@ def enrich_span_from_response(tracer: trace.Tracer, span: Span, operation_id: st response_data = json.loads(response.content) # Base attributes - attributes: dict[str, str | int] = { + attributes: dict[str, Union[str, int]] = { http_attributes.HTTP_RESPONSE_STATUS_CODE: response.status_code, MistralAIAttributes.MISTRAL_AI_OPERATION_ID: operation_id, gen_ai_attributes.GEN_AI_PROVIDER_NAME: gen_ai_attributes.GenAiProviderNameValues.MISTRAL_AI.value diff --git a/uv.lock b/uv.lock new file mode 100644 index 00000000..97fbeeec --- /dev/null +++ b/uv.lock @@ -0,0 +1,1693 @@ +version = 1 +revision = 3 +requires-python = ">=3.9" +resolution-markers = [ + "python_full_version >= '3.12'", + "python_full_version == '3.11.*'", + "python_full_version == '3.10.*'", + "python_full_version < '3.10'", +] + +[[package]] +name = "annotated-types" +version = "0.7.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/ee/67/531ea369ba64dcff5ec9c3402f9f51bf748cec26dde048a2f973a4eea7f5/annotated_types-0.7.0.tar.gz", hash = "sha256:aff07c09a53a08bc8cfccb9c85b05f1aa9a2a6f23728d790723543408344ce89", size = 16081, upload-time = "2024-05-20T21:33:25.928Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/78/b6/6307fbef88d9b5ee7421e68d78a9f162e0da4900bc5f5793f6d3d0e34fb8/annotated_types-0.7.0-py3-none-any.whl", hash = "sha256:1f02e8b43a8fbbc3f3e0d4f0f4bfc8131bcb4eebe8849b8e5c773f3a1c582a53", size = 13643, upload-time = "2024-05-20T21:33:24.1Z" }, +] + +[[package]] +name = "anyio" +version = "4.12.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "exceptiongroup", marker = "python_full_version < '3.11'" }, + { name = "idna" }, + { name = "typing-extensions", marker = "python_full_version < '3.13'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/16/ce/8a777047513153587e5434fd752e89334ac33e379aa3497db860eeb60377/anyio-4.12.0.tar.gz", hash = "sha256:73c693b567b0c55130c104d0b43a9baf3aa6a31fc6110116509f27bf75e21ec0", size = 228266, upload-time = "2025-11-28T23:37:38.911Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/7f/9c/36c5c37947ebfb8c7f22e0eb6e4d188ee2d53aa3880f3f2744fb894f0cb1/anyio-4.12.0-py3-none-any.whl", hash = "sha256:dad2376a628f98eeca4881fc56cd06affd18f659b17a747d3ff0307ced94b1bb", size = 113362, upload-time = "2025-11-28T23:36:57.897Z" }, +] + +[[package]] +name = "astroid" +version = "3.2.4" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "typing-extensions", marker = "python_full_version < '3.11'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/9e/53/1067e1113ecaf58312357f2cd93063674924119d80d173adc3f6f2387aa2/astroid-3.2.4.tar.gz", hash = "sha256:0e14202810b30da1b735827f78f5157be2bbd4a7a59b7707ca0bfc2fb4c0063a", size = 397576, upload-time = "2024-07-20T12:57:43.26Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/80/96/b32bbbb46170a1c8b8b1f28c794202e25cfe743565e9d3469b8eb1e0cc05/astroid-3.2.4-py3-none-any.whl", hash = "sha256:413658a61eeca6202a59231abb473f932038fbcbf1666587f66d482083413a25", size = 276348, upload-time = "2024-07-20T12:57:40.886Z" }, +] + +[[package]] +name = "attrs" +version = "25.4.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/6b/5c/685e6633917e101e5dcb62b9dd76946cbb57c26e133bae9e0cd36033c0a9/attrs-25.4.0.tar.gz", hash = "sha256:16d5969b87f0859ef33a48b35d55ac1be6e42ae49d5e853b597db70c35c57e11", size = 934251, upload-time = "2025-10-06T13:54:44.725Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/3a/2a/7cc015f5b9f5db42b7d48157e23356022889fc354a2813c15934b7cb5c0e/attrs-25.4.0-py3-none-any.whl", hash = "sha256:adcf7e2a1fb3b36ac48d97835bb6d8ade15b8dcce26aba8bf1d14847b57a3373", size = 67615, upload-time = "2025-10-06T13:54:43.17Z" }, +] + +[[package]] +name = "authlib" +version = "1.6.6" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "cryptography" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/bb/9b/b1661026ff24bc641b76b78c5222d614776b0c085bcfdac9bd15a1cb4b35/authlib-1.6.6.tar.gz", hash = "sha256:45770e8e056d0f283451d9996fbb59b70d45722b45d854d58f32878d0a40c38e", size = 164894, upload-time = "2025-12-12T08:01:41.464Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/54/51/321e821856452f7386c4e9df866f196720b1ad0c5ea1623ea7399969ae3b/authlib-1.6.6-py2.py3-none-any.whl", hash = "sha256:7d9e9bc535c13974313a87f53e8430eb6ea3d1cf6ae4f6efcd793f2e949143fd", size = 244005, upload-time = "2025-12-12T08:01:40.209Z" }, +] + +[[package]] +name = "cachetools" +version = "6.2.4" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/bc/1d/ede8680603f6016887c062a2cf4fc8fdba905866a3ab8831aa8aa651320c/cachetools-6.2.4.tar.gz", hash = "sha256:82c5c05585e70b6ba2d3ae09ea60b79548872185d2f24ae1f2709d37299fd607", size = 31731, upload-time = "2025-12-15T18:24:53.744Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/2c/fc/1d7b80d0eb7b714984ce40efc78859c022cd930e402f599d8ca9e39c78a4/cachetools-6.2.4-py3-none-any.whl", hash = "sha256:69a7a52634fed8b8bf6e24a050fb60bff1c9bd8f6d24572b99c32d4e71e62a51", size = 11551, upload-time = "2025-12-15T18:24:52.332Z" }, +] + +[[package]] +name = "certifi" +version = "2025.11.12" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/a2/8c/58f469717fa48465e4a50c014a0400602d3c437d7c0c468e17ada824da3a/certifi-2025.11.12.tar.gz", hash = "sha256:d8ab5478f2ecd78af242878415affce761ca6bc54a22a27e026d7c25357c3316", size = 160538, upload-time = "2025-11-12T02:54:51.517Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/70/7d/9bc192684cea499815ff478dfcdc13835ddf401365057044fb721ec6bddb/certifi-2025.11.12-py3-none-any.whl", hash = "sha256:97de8790030bbd5c2d96b7ec782fc2f7820ef8dba6db909ccf95449f2d062d4b", size = 159438, upload-time = "2025-11-12T02:54:49.735Z" }, +] + +[[package]] +name = "cffi" +version = "2.0.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "pycparser", marker = "implementation_name != 'PyPy'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/eb/56/b1ba7935a17738ae8453301356628e8147c79dbb825bcbc73dc7401f9846/cffi-2.0.0.tar.gz", hash = "sha256:44d1b5909021139fe36001ae048dbdde8214afa20200eda0f64c068cac5d5529", size = 523588, upload-time = "2025-09-08T23:24:04.541Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/93/d7/516d984057745a6cd96575eea814fe1edd6646ee6efd552fb7b0921dec83/cffi-2.0.0-cp310-cp310-macosx_10_13_x86_64.whl", hash = "sha256:0cf2d91ecc3fcc0625c2c530fe004f82c110405f101548512cce44322fa8ac44", size = 184283, upload-time = "2025-09-08T23:22:08.01Z" }, + { url = "https://files.pythonhosted.org/packages/9e/84/ad6a0b408daa859246f57c03efd28e5dd1b33c21737c2db84cae8c237aa5/cffi-2.0.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:f73b96c41e3b2adedc34a7356e64c8eb96e03a3782b535e043a986276ce12a49", size = 180504, upload-time = "2025-09-08T23:22:10.637Z" }, + { url = "https://files.pythonhosted.org/packages/50/bd/b1a6362b80628111e6653c961f987faa55262b4002fcec42308cad1db680/cffi-2.0.0-cp310-cp310-manylinux1_i686.manylinux2014_i686.manylinux_2_17_i686.manylinux_2_5_i686.whl", hash = "sha256:53f77cbe57044e88bbd5ed26ac1d0514d2acf0591dd6bb02a3ae37f76811b80c", size = 208811, upload-time = "2025-09-08T23:22:12.267Z" }, + { url = "https://files.pythonhosted.org/packages/4f/27/6933a8b2562d7bd1fb595074cf99cc81fc3789f6a6c05cdabb46284a3188/cffi-2.0.0-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:3e837e369566884707ddaf85fc1744b47575005c0a229de3327f8f9a20f4efeb", size = 216402, upload-time = "2025-09-08T23:22:13.455Z" }, + { url = "https://files.pythonhosted.org/packages/05/eb/b86f2a2645b62adcfff53b0dd97e8dfafb5c8aa864bd0d9a2c2049a0d551/cffi-2.0.0-cp310-cp310-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:5eda85d6d1879e692d546a078b44251cdd08dd1cfb98dfb77b670c97cee49ea0", size = 203217, upload-time = "2025-09-08T23:22:14.596Z" }, + { url = "https://files.pythonhosted.org/packages/9f/e0/6cbe77a53acf5acc7c08cc186c9928864bd7c005f9efd0d126884858a5fe/cffi-2.0.0-cp310-cp310-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:9332088d75dc3241c702d852d4671613136d90fa6881da7d770a483fd05248b4", size = 203079, upload-time = "2025-09-08T23:22:15.769Z" }, + { url = "https://files.pythonhosted.org/packages/98/29/9b366e70e243eb3d14a5cb488dfd3a0b6b2f1fb001a203f653b93ccfac88/cffi-2.0.0-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:fc7de24befaeae77ba923797c7c87834c73648a05a4bde34b3b7e5588973a453", size = 216475, upload-time = "2025-09-08T23:22:17.427Z" }, + { url = "https://files.pythonhosted.org/packages/21/7a/13b24e70d2f90a322f2900c5d8e1f14fa7e2a6b3332b7309ba7b2ba51a5a/cffi-2.0.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:cf364028c016c03078a23b503f02058f1814320a56ad535686f90565636a9495", size = 218829, upload-time = "2025-09-08T23:22:19.069Z" }, + { url = "https://files.pythonhosted.org/packages/60/99/c9dc110974c59cc981b1f5b66e1d8af8af764e00f0293266824d9c4254bc/cffi-2.0.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:e11e82b744887154b182fd3e7e8512418446501191994dbf9c9fc1f32cc8efd5", size = 211211, upload-time = "2025-09-08T23:22:20.588Z" }, + { url = "https://files.pythonhosted.org/packages/49/72/ff2d12dbf21aca1b32a40ed792ee6b40f6dc3a9cf1644bd7ef6e95e0ac5e/cffi-2.0.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:8ea985900c5c95ce9db1745f7933eeef5d314f0565b27625d9a10ec9881e1bfb", size = 218036, upload-time = "2025-09-08T23:22:22.143Z" }, + { url = "https://files.pythonhosted.org/packages/e2/cc/027d7fb82e58c48ea717149b03bcadcbdc293553edb283af792bd4bcbb3f/cffi-2.0.0-cp310-cp310-win32.whl", hash = "sha256:1f72fb8906754ac8a2cc3f9f5aaa298070652a0ffae577e0ea9bd480dc3c931a", size = 172184, upload-time = "2025-09-08T23:22:23.328Z" }, + { url = "https://files.pythonhosted.org/packages/33/fa/072dd15ae27fbb4e06b437eb6e944e75b068deb09e2a2826039e49ee2045/cffi-2.0.0-cp310-cp310-win_amd64.whl", hash = "sha256:b18a3ed7d5b3bd8d9ef7a8cb226502c6bf8308df1525e1cc676c3680e7176739", size = 182790, upload-time = "2025-09-08T23:22:24.752Z" }, + { url = "https://files.pythonhosted.org/packages/12/4a/3dfd5f7850cbf0d06dc84ba9aa00db766b52ca38d8b86e3a38314d52498c/cffi-2.0.0-cp311-cp311-macosx_10_13_x86_64.whl", hash = "sha256:b4c854ef3adc177950a8dfc81a86f5115d2abd545751a304c5bcf2c2c7283cfe", size = 184344, upload-time = "2025-09-08T23:22:26.456Z" }, + { url = "https://files.pythonhosted.org/packages/4f/8b/f0e4c441227ba756aafbe78f117485b25bb26b1c059d01f137fa6d14896b/cffi-2.0.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:2de9a304e27f7596cd03d16f1b7c72219bd944e99cc52b84d0145aefb07cbd3c", size = 180560, upload-time = "2025-09-08T23:22:28.197Z" }, + { url = "https://files.pythonhosted.org/packages/b1/b7/1200d354378ef52ec227395d95c2576330fd22a869f7a70e88e1447eb234/cffi-2.0.0-cp311-cp311-manylinux1_i686.manylinux2014_i686.manylinux_2_17_i686.manylinux_2_5_i686.whl", hash = "sha256:baf5215e0ab74c16e2dd324e8ec067ef59e41125d3eade2b863d294fd5035c92", size = 209613, upload-time = "2025-09-08T23:22:29.475Z" }, + { url = "https://files.pythonhosted.org/packages/b8/56/6033f5e86e8cc9bb629f0077ba71679508bdf54a9a5e112a3c0b91870332/cffi-2.0.0-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:730cacb21e1bdff3ce90babf007d0a0917cc3e6492f336c2f0134101e0944f93", size = 216476, upload-time = "2025-09-08T23:22:31.063Z" }, + { url = "https://files.pythonhosted.org/packages/dc/7f/55fecd70f7ece178db2f26128ec41430d8720f2d12ca97bf8f0a628207d5/cffi-2.0.0-cp311-cp311-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:6824f87845e3396029f3820c206e459ccc91760e8fa24422f8b0c3d1731cbec5", size = 203374, upload-time = "2025-09-08T23:22:32.507Z" }, + { url = "https://files.pythonhosted.org/packages/84/ef/a7b77c8bdc0f77adc3b46888f1ad54be8f3b7821697a7b89126e829e676a/cffi-2.0.0-cp311-cp311-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:9de40a7b0323d889cf8d23d1ef214f565ab154443c42737dfe52ff82cf857664", size = 202597, upload-time = "2025-09-08T23:22:34.132Z" }, + { url = "https://files.pythonhosted.org/packages/d7/91/500d892b2bf36529a75b77958edfcd5ad8e2ce4064ce2ecfeab2125d72d1/cffi-2.0.0-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:8941aaadaf67246224cee8c3803777eed332a19d909b47e29c9842ef1e79ac26", size = 215574, upload-time = "2025-09-08T23:22:35.443Z" }, + { url = "https://files.pythonhosted.org/packages/44/64/58f6255b62b101093d5df22dcb752596066c7e89dd725e0afaed242a61be/cffi-2.0.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:a05d0c237b3349096d3981b727493e22147f934b20f6f125a3eba8f994bec4a9", size = 218971, upload-time = "2025-09-08T23:22:36.805Z" }, + { url = "https://files.pythonhosted.org/packages/ab/49/fa72cebe2fd8a55fbe14956f9970fe8eb1ac59e5df042f603ef7c8ba0adc/cffi-2.0.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:94698a9c5f91f9d138526b48fe26a199609544591f859c870d477351dc7b2414", size = 211972, upload-time = "2025-09-08T23:22:38.436Z" }, + { url = "https://files.pythonhosted.org/packages/0b/28/dd0967a76aab36731b6ebfe64dec4e981aff7e0608f60c2d46b46982607d/cffi-2.0.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:5fed36fccc0612a53f1d4d9a816b50a36702c28a2aa880cb8a122b3466638743", size = 217078, upload-time = "2025-09-08T23:22:39.776Z" }, + { url = "https://files.pythonhosted.org/packages/2b/c0/015b25184413d7ab0a410775fdb4a50fca20f5589b5dab1dbbfa3baad8ce/cffi-2.0.0-cp311-cp311-win32.whl", hash = "sha256:c649e3a33450ec82378822b3dad03cc228b8f5963c0c12fc3b1e0ab940f768a5", size = 172076, upload-time = "2025-09-08T23:22:40.95Z" }, + { url = "https://files.pythonhosted.org/packages/ae/8f/dc5531155e7070361eb1b7e4c1a9d896d0cb21c49f807a6c03fd63fc877e/cffi-2.0.0-cp311-cp311-win_amd64.whl", hash = "sha256:66f011380d0e49ed280c789fbd08ff0d40968ee7b665575489afa95c98196ab5", size = 182820, upload-time = "2025-09-08T23:22:42.463Z" }, + { url = "https://files.pythonhosted.org/packages/95/5c/1b493356429f9aecfd56bc171285a4c4ac8697f76e9bbbbb105e537853a1/cffi-2.0.0-cp311-cp311-win_arm64.whl", hash = "sha256:c6638687455baf640e37344fe26d37c404db8b80d037c3d29f58fe8d1c3b194d", size = 177635, upload-time = "2025-09-08T23:22:43.623Z" }, + { url = "https://files.pythonhosted.org/packages/ea/47/4f61023ea636104d4f16ab488e268b93008c3d0bb76893b1b31db1f96802/cffi-2.0.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:6d02d6655b0e54f54c4ef0b94eb6be0607b70853c45ce98bd278dc7de718be5d", size = 185271, upload-time = "2025-09-08T23:22:44.795Z" }, + { url = "https://files.pythonhosted.org/packages/df/a2/781b623f57358e360d62cdd7a8c681f074a71d445418a776eef0aadb4ab4/cffi-2.0.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:8eca2a813c1cb7ad4fb74d368c2ffbbb4789d377ee5bb8df98373c2cc0dee76c", size = 181048, upload-time = "2025-09-08T23:22:45.938Z" }, + { url = "https://files.pythonhosted.org/packages/ff/df/a4f0fbd47331ceeba3d37c2e51e9dfc9722498becbeec2bd8bc856c9538a/cffi-2.0.0-cp312-cp312-manylinux1_i686.manylinux2014_i686.manylinux_2_17_i686.manylinux_2_5_i686.whl", hash = "sha256:21d1152871b019407d8ac3985f6775c079416c282e431a4da6afe7aefd2bccbe", size = 212529, upload-time = "2025-09-08T23:22:47.349Z" }, + { url = "https://files.pythonhosted.org/packages/d5/72/12b5f8d3865bf0f87cf1404d8c374e7487dcf097a1c91c436e72e6badd83/cffi-2.0.0-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:b21e08af67b8a103c71a250401c78d5e0893beff75e28c53c98f4de42f774062", size = 220097, upload-time = "2025-09-08T23:22:48.677Z" }, + { url = "https://files.pythonhosted.org/packages/c2/95/7a135d52a50dfa7c882ab0ac17e8dc11cec9d55d2c18dda414c051c5e69e/cffi-2.0.0-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:1e3a615586f05fc4065a8b22b8152f0c1b00cdbc60596d187c2a74f9e3036e4e", size = 207983, upload-time = "2025-09-08T23:22:50.06Z" }, + { url = "https://files.pythonhosted.org/packages/3a/c8/15cb9ada8895957ea171c62dc78ff3e99159ee7adb13c0123c001a2546c1/cffi-2.0.0-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:81afed14892743bbe14dacb9e36d9e0e504cd204e0b165062c488942b9718037", size = 206519, upload-time = "2025-09-08T23:22:51.364Z" }, + { url = "https://files.pythonhosted.org/packages/78/2d/7fa73dfa841b5ac06c7b8855cfc18622132e365f5b81d02230333ff26e9e/cffi-2.0.0-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:3e17ed538242334bf70832644a32a7aae3d83b57567f9fd60a26257e992b79ba", size = 219572, upload-time = "2025-09-08T23:22:52.902Z" }, + { url = "https://files.pythonhosted.org/packages/07/e0/267e57e387b4ca276b90f0434ff88b2c2241ad72b16d31836adddfd6031b/cffi-2.0.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:3925dd22fa2b7699ed2617149842d2e6adde22b262fcbfada50e3d195e4b3a94", size = 222963, upload-time = "2025-09-08T23:22:54.518Z" }, + { url = "https://files.pythonhosted.org/packages/b6/75/1f2747525e06f53efbd878f4d03bac5b859cbc11c633d0fb81432d98a795/cffi-2.0.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:2c8f814d84194c9ea681642fd164267891702542f028a15fc97d4674b6206187", size = 221361, upload-time = "2025-09-08T23:22:55.867Z" }, + { url = "https://files.pythonhosted.org/packages/7b/2b/2b6435f76bfeb6bbf055596976da087377ede68df465419d192acf00c437/cffi-2.0.0-cp312-cp312-win32.whl", hash = "sha256:da902562c3e9c550df360bfa53c035b2f241fed6d9aef119048073680ace4a18", size = 172932, upload-time = "2025-09-08T23:22:57.188Z" }, + { url = "https://files.pythonhosted.org/packages/f8/ed/13bd4418627013bec4ed6e54283b1959cf6db888048c7cf4b4c3b5b36002/cffi-2.0.0-cp312-cp312-win_amd64.whl", hash = "sha256:da68248800ad6320861f129cd9c1bf96ca849a2771a59e0344e88681905916f5", size = 183557, upload-time = "2025-09-08T23:22:58.351Z" }, + { url = "https://files.pythonhosted.org/packages/95/31/9f7f93ad2f8eff1dbc1c3656d7ca5bfd8fb52c9d786b4dcf19b2d02217fa/cffi-2.0.0-cp312-cp312-win_arm64.whl", hash = "sha256:4671d9dd5ec934cb9a73e7ee9676f9362aba54f7f34910956b84d727b0d73fb6", size = 177762, upload-time = "2025-09-08T23:22:59.668Z" }, + { url = "https://files.pythonhosted.org/packages/4b/8d/a0a47a0c9e413a658623d014e91e74a50cdd2c423f7ccfd44086ef767f90/cffi-2.0.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:00bdf7acc5f795150faa6957054fbbca2439db2f775ce831222b66f192f03beb", size = 185230, upload-time = "2025-09-08T23:23:00.879Z" }, + { url = "https://files.pythonhosted.org/packages/4a/d2/a6c0296814556c68ee32009d9c2ad4f85f2707cdecfd7727951ec228005d/cffi-2.0.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:45d5e886156860dc35862657e1494b9bae8dfa63bf56796f2fb56e1679fc0bca", size = 181043, upload-time = "2025-09-08T23:23:02.231Z" }, + { url = "https://files.pythonhosted.org/packages/b0/1e/d22cc63332bd59b06481ceaac49d6c507598642e2230f201649058a7e704/cffi-2.0.0-cp313-cp313-manylinux1_i686.manylinux2014_i686.manylinux_2_17_i686.manylinux_2_5_i686.whl", hash = "sha256:07b271772c100085dd28b74fa0cd81c8fb1a3ba18b21e03d7c27f3436a10606b", size = 212446, upload-time = "2025-09-08T23:23:03.472Z" }, + { url = "https://files.pythonhosted.org/packages/a9/f5/a2c23eb03b61a0b8747f211eb716446c826ad66818ddc7810cc2cc19b3f2/cffi-2.0.0-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:d48a880098c96020b02d5a1f7d9251308510ce8858940e6fa99ece33f610838b", size = 220101, upload-time = "2025-09-08T23:23:04.792Z" }, + { url = "https://files.pythonhosted.org/packages/f2/7f/e6647792fc5850d634695bc0e6ab4111ae88e89981d35ac269956605feba/cffi-2.0.0-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:f93fd8e5c8c0a4aa1f424d6173f14a892044054871c771f8566e4008eaa359d2", size = 207948, upload-time = "2025-09-08T23:23:06.127Z" }, + { url = "https://files.pythonhosted.org/packages/cb/1e/a5a1bd6f1fb30f22573f76533de12a00bf274abcdc55c8edab639078abb6/cffi-2.0.0-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:dd4f05f54a52fb558f1ba9f528228066954fee3ebe629fc1660d874d040ae5a3", size = 206422, upload-time = "2025-09-08T23:23:07.753Z" }, + { url = "https://files.pythonhosted.org/packages/98/df/0a1755e750013a2081e863e7cd37e0cdd02664372c754e5560099eb7aa44/cffi-2.0.0-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:c8d3b5532fc71b7a77c09192b4a5a200ea992702734a2e9279a37f2478236f26", size = 219499, upload-time = "2025-09-08T23:23:09.648Z" }, + { url = "https://files.pythonhosted.org/packages/50/e1/a969e687fcf9ea58e6e2a928ad5e2dd88cc12f6f0ab477e9971f2309b57c/cffi-2.0.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:d9b29c1f0ae438d5ee9acb31cadee00a58c46cc9c0b2f9038c6b0b3470877a8c", size = 222928, upload-time = "2025-09-08T23:23:10.928Z" }, + { url = "https://files.pythonhosted.org/packages/36/54/0362578dd2c9e557a28ac77698ed67323ed5b9775ca9d3fe73fe191bb5d8/cffi-2.0.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:6d50360be4546678fc1b79ffe7a66265e28667840010348dd69a314145807a1b", size = 221302, upload-time = "2025-09-08T23:23:12.42Z" }, + { url = "https://files.pythonhosted.org/packages/eb/6d/bf9bda840d5f1dfdbf0feca87fbdb64a918a69bca42cfa0ba7b137c48cb8/cffi-2.0.0-cp313-cp313-win32.whl", hash = "sha256:74a03b9698e198d47562765773b4a8309919089150a0bb17d829ad7b44b60d27", size = 172909, upload-time = "2025-09-08T23:23:14.32Z" }, + { url = "https://files.pythonhosted.org/packages/37/18/6519e1ee6f5a1e579e04b9ddb6f1676c17368a7aba48299c3759bbc3c8b3/cffi-2.0.0-cp313-cp313-win_amd64.whl", hash = "sha256:19f705ada2530c1167abacb171925dd886168931e0a7b78f5bffcae5c6b5be75", size = 183402, upload-time = "2025-09-08T23:23:15.535Z" }, + { url = "https://files.pythonhosted.org/packages/cb/0e/02ceeec9a7d6ee63bb596121c2c8e9b3a9e150936f4fbef6ca1943e6137c/cffi-2.0.0-cp313-cp313-win_arm64.whl", hash = "sha256:256f80b80ca3853f90c21b23ee78cd008713787b1b1e93eae9f3d6a7134abd91", size = 177780, upload-time = "2025-09-08T23:23:16.761Z" }, + { url = "https://files.pythonhosted.org/packages/92/c4/3ce07396253a83250ee98564f8d7e9789fab8e58858f35d07a9a2c78de9f/cffi-2.0.0-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:fc33c5141b55ed366cfaad382df24fe7dcbc686de5be719b207bb248e3053dc5", size = 185320, upload-time = "2025-09-08T23:23:18.087Z" }, + { url = "https://files.pythonhosted.org/packages/59/dd/27e9fa567a23931c838c6b02d0764611c62290062a6d4e8ff7863daf9730/cffi-2.0.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:c654de545946e0db659b3400168c9ad31b5d29593291482c43e3564effbcee13", size = 181487, upload-time = "2025-09-08T23:23:19.622Z" }, + { url = "https://files.pythonhosted.org/packages/d6/43/0e822876f87ea8a4ef95442c3d766a06a51fc5298823f884ef87aaad168c/cffi-2.0.0-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:24b6f81f1983e6df8db3adc38562c83f7d4a0c36162885ec7f7b77c7dcbec97b", size = 220049, upload-time = "2025-09-08T23:23:20.853Z" }, + { url = "https://files.pythonhosted.org/packages/b4/89/76799151d9c2d2d1ead63c2429da9ea9d7aac304603de0c6e8764e6e8e70/cffi-2.0.0-cp314-cp314-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:12873ca6cb9b0f0d3a0da705d6086fe911591737a59f28b7936bdfed27c0d47c", size = 207793, upload-time = "2025-09-08T23:23:22.08Z" }, + { url = "https://files.pythonhosted.org/packages/bb/dd/3465b14bb9e24ee24cb88c9e3730f6de63111fffe513492bf8c808a3547e/cffi-2.0.0-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:d9b97165e8aed9272a6bb17c01e3cc5871a594a446ebedc996e2397a1c1ea8ef", size = 206300, upload-time = "2025-09-08T23:23:23.314Z" }, + { url = "https://files.pythonhosted.org/packages/47/d9/d83e293854571c877a92da46fdec39158f8d7e68da75bf73581225d28e90/cffi-2.0.0-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:afb8db5439b81cf9c9d0c80404b60c3cc9c3add93e114dcae767f1477cb53775", size = 219244, upload-time = "2025-09-08T23:23:24.541Z" }, + { url = "https://files.pythonhosted.org/packages/2b/0f/1f177e3683aead2bb00f7679a16451d302c436b5cbf2505f0ea8146ef59e/cffi-2.0.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:737fe7d37e1a1bffe70bd5754ea763a62a066dc5913ca57e957824b72a85e205", size = 222828, upload-time = "2025-09-08T23:23:26.143Z" }, + { url = "https://files.pythonhosted.org/packages/c6/0f/cafacebd4b040e3119dcb32fed8bdef8dfe94da653155f9d0b9dc660166e/cffi-2.0.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:38100abb9d1b1435bc4cc340bb4489635dc2f0da7456590877030c9b3d40b0c1", size = 220926, upload-time = "2025-09-08T23:23:27.873Z" }, + { url = "https://files.pythonhosted.org/packages/3e/aa/df335faa45b395396fcbc03de2dfcab242cd61a9900e914fe682a59170b1/cffi-2.0.0-cp314-cp314-win32.whl", hash = "sha256:087067fa8953339c723661eda6b54bc98c5625757ea62e95eb4898ad5e776e9f", size = 175328, upload-time = "2025-09-08T23:23:44.61Z" }, + { url = "https://files.pythonhosted.org/packages/bb/92/882c2d30831744296ce713f0feb4c1cd30f346ef747b530b5318715cc367/cffi-2.0.0-cp314-cp314-win_amd64.whl", hash = "sha256:203a48d1fb583fc7d78a4c6655692963b860a417c0528492a6bc21f1aaefab25", size = 185650, upload-time = "2025-09-08T23:23:45.848Z" }, + { url = "https://files.pythonhosted.org/packages/9f/2c/98ece204b9d35a7366b5b2c6539c350313ca13932143e79dc133ba757104/cffi-2.0.0-cp314-cp314-win_arm64.whl", hash = "sha256:dbd5c7a25a7cb98f5ca55d258b103a2054f859a46ae11aaf23134f9cc0d356ad", size = 180687, upload-time = "2025-09-08T23:23:47.105Z" }, + { url = "https://files.pythonhosted.org/packages/3e/61/c768e4d548bfa607abcda77423448df8c471f25dbe64fb2ef6d555eae006/cffi-2.0.0-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:9a67fc9e8eb39039280526379fb3a70023d77caec1852002b4da7e8b270c4dd9", size = 188773, upload-time = "2025-09-08T23:23:29.347Z" }, + { url = "https://files.pythonhosted.org/packages/2c/ea/5f76bce7cf6fcd0ab1a1058b5af899bfbef198bea4d5686da88471ea0336/cffi-2.0.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:7a66c7204d8869299919db4d5069a82f1561581af12b11b3c9f48c584eb8743d", size = 185013, upload-time = "2025-09-08T23:23:30.63Z" }, + { url = "https://files.pythonhosted.org/packages/be/b4/c56878d0d1755cf9caa54ba71e5d049479c52f9e4afc230f06822162ab2f/cffi-2.0.0-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:7cc09976e8b56f8cebd752f7113ad07752461f48a58cbba644139015ac24954c", size = 221593, upload-time = "2025-09-08T23:23:31.91Z" }, + { url = "https://files.pythonhosted.org/packages/e0/0d/eb704606dfe8033e7128df5e90fee946bbcb64a04fcdaa97321309004000/cffi-2.0.0-cp314-cp314t-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:92b68146a71df78564e4ef48af17551a5ddd142e5190cdf2c5624d0c3ff5b2e8", size = 209354, upload-time = "2025-09-08T23:23:33.214Z" }, + { url = "https://files.pythonhosted.org/packages/d8/19/3c435d727b368ca475fb8742ab97c9cb13a0de600ce86f62eab7fa3eea60/cffi-2.0.0-cp314-cp314t-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:b1e74d11748e7e98e2f426ab176d4ed720a64412b6a15054378afdb71e0f37dc", size = 208480, upload-time = "2025-09-08T23:23:34.495Z" }, + { url = "https://files.pythonhosted.org/packages/d0/44/681604464ed9541673e486521497406fadcc15b5217c3e326b061696899a/cffi-2.0.0-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:28a3a209b96630bca57cce802da70c266eb08c6e97e5afd61a75611ee6c64592", size = 221584, upload-time = "2025-09-08T23:23:36.096Z" }, + { url = "https://files.pythonhosted.org/packages/25/8e/342a504ff018a2825d395d44d63a767dd8ebc927ebda557fecdaca3ac33a/cffi-2.0.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:7553fb2090d71822f02c629afe6042c299edf91ba1bf94951165613553984512", size = 224443, upload-time = "2025-09-08T23:23:37.328Z" }, + { url = "https://files.pythonhosted.org/packages/e1/5e/b666bacbbc60fbf415ba9988324a132c9a7a0448a9a8f125074671c0f2c3/cffi-2.0.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:6c6c373cfc5c83a975506110d17457138c8c63016b563cc9ed6e056a82f13ce4", size = 223437, upload-time = "2025-09-08T23:23:38.945Z" }, + { url = "https://files.pythonhosted.org/packages/a0/1d/ec1a60bd1a10daa292d3cd6bb0b359a81607154fb8165f3ec95fe003b85c/cffi-2.0.0-cp314-cp314t-win32.whl", hash = "sha256:1fc9ea04857caf665289b7a75923f2c6ed559b8298a1b8c49e59f7dd95c8481e", size = 180487, upload-time = "2025-09-08T23:23:40.423Z" }, + { url = "https://files.pythonhosted.org/packages/bf/41/4c1168c74fac325c0c8156f04b6749c8b6a8f405bbf91413ba088359f60d/cffi-2.0.0-cp314-cp314t-win_amd64.whl", hash = "sha256:d68b6cef7827e8641e8ef16f4494edda8b36104d79773a334beaa1e3521430f6", size = 191726, upload-time = "2025-09-08T23:23:41.742Z" }, + { url = "https://files.pythonhosted.org/packages/ae/3a/dbeec9d1ee0844c679f6bb5d6ad4e9f198b1224f4e7a32825f47f6192b0c/cffi-2.0.0-cp314-cp314t-win_arm64.whl", hash = "sha256:0a1527a803f0a659de1af2e1fd700213caba79377e27e4693648c2923da066f9", size = 184195, upload-time = "2025-09-08T23:23:43.004Z" }, + { url = "https://files.pythonhosted.org/packages/c0/cc/08ed5a43f2996a16b462f64a7055c6e962803534924b9b2f1371d8c00b7b/cffi-2.0.0-cp39-cp39-macosx_10_13_x86_64.whl", hash = "sha256:fe562eb1a64e67dd297ccc4f5addea2501664954f2692b69a76449ec7913ecbf", size = 184288, upload-time = "2025-09-08T23:23:48.404Z" }, + { url = "https://files.pythonhosted.org/packages/3d/de/38d9726324e127f727b4ecc376bc85e505bfe61ef130eaf3f290c6847dd4/cffi-2.0.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:de8dad4425a6ca6e4e5e297b27b5c824ecc7581910bf9aee86cb6835e6812aa7", size = 180509, upload-time = "2025-09-08T23:23:49.73Z" }, + { url = "https://files.pythonhosted.org/packages/9b/13/c92e36358fbcc39cf0962e83223c9522154ee8630e1df7c0b3a39a8124e2/cffi-2.0.0-cp39-cp39-manylinux1_i686.manylinux2014_i686.manylinux_2_17_i686.manylinux_2_5_i686.whl", hash = "sha256:4647afc2f90d1ddd33441e5b0e85b16b12ddec4fca55f0d9671fef036ecca27c", size = 208813, upload-time = "2025-09-08T23:23:51.263Z" }, + { url = "https://files.pythonhosted.org/packages/15/12/a7a79bd0df4c3bff744b2d7e52cc1b68d5e7e427b384252c42366dc1ecbc/cffi-2.0.0-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:3f4d46d8b35698056ec29bca21546e1551a205058ae1a181d871e278b0b28165", size = 216498, upload-time = "2025-09-08T23:23:52.494Z" }, + { url = "https://files.pythonhosted.org/packages/a3/ad/5c51c1c7600bdd7ed9a24a203ec255dccdd0ebf4527f7b922a0bde2fb6ed/cffi-2.0.0-cp39-cp39-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:e6e73b9e02893c764e7e8d5bb5ce277f1a009cd5243f8228f75f842bf937c534", size = 203243, upload-time = "2025-09-08T23:23:53.836Z" }, + { url = "https://files.pythonhosted.org/packages/32/f2/81b63e288295928739d715d00952c8c6034cb6c6a516b17d37e0c8be5600/cffi-2.0.0-cp39-cp39-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:cb527a79772e5ef98fb1d700678fe031e353e765d1ca2d409c92263c6d43e09f", size = 203158, upload-time = "2025-09-08T23:23:55.169Z" }, + { url = "https://files.pythonhosted.org/packages/1f/74/cc4096ce66f5939042ae094e2e96f53426a979864aa1f96a621ad128be27/cffi-2.0.0-cp39-cp39-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:61d028e90346df14fedc3d1e5441df818d095f3b87d286825dfcbd6459b7ef63", size = 216548, upload-time = "2025-09-08T23:23:56.506Z" }, + { url = "https://files.pythonhosted.org/packages/e8/be/f6424d1dc46b1091ffcc8964fa7c0ab0cd36839dd2761b49c90481a6ba1b/cffi-2.0.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:0f6084a0ea23d05d20c3edcda20c3d006f9b6f3fefeac38f59262e10cef47ee2", size = 218897, upload-time = "2025-09-08T23:23:57.825Z" }, + { url = "https://files.pythonhosted.org/packages/f7/e0/dda537c2309817edf60109e39265f24f24aa7f050767e22c98c53fe7f48b/cffi-2.0.0-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:1cd13c99ce269b3ed80b417dcd591415d3372bcac067009b6e0f59c7d4015e65", size = 211249, upload-time = "2025-09-08T23:23:59.139Z" }, + { url = "https://files.pythonhosted.org/packages/2b/e7/7c769804eb75e4c4b35e658dba01de1640a351a9653c3d49ca89d16ccc91/cffi-2.0.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:89472c9762729b5ae1ad974b777416bfda4ac5642423fa93bd57a09204712322", size = 218041, upload-time = "2025-09-08T23:24:00.496Z" }, + { url = "https://files.pythonhosted.org/packages/aa/d9/6218d78f920dcd7507fc16a766b5ef8f3b913cc7aa938e7fc80b9978d089/cffi-2.0.0-cp39-cp39-win32.whl", hash = "sha256:2081580ebb843f759b9f617314a24ed5738c51d2aee65d31e02f6f7a2b97707a", size = 172138, upload-time = "2025-09-08T23:24:01.7Z" }, + { url = "https://files.pythonhosted.org/packages/54/8f/a1e836f82d8e32a97e6b29cc8f641779181ac7363734f12df27db803ebda/cffi-2.0.0-cp39-cp39-win_amd64.whl", hash = "sha256:b882b3df248017dba09d6b16defe9b5c407fe32fc7c65a9c69798e6175601be9", size = 182794, upload-time = "2025-09-08T23:24:02.943Z" }, +] + +[[package]] +name = "charset-normalizer" +version = "3.4.4" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/13/69/33ddede1939fdd074bce5434295f38fae7136463422fe4fd3e0e89b98062/charset_normalizer-3.4.4.tar.gz", hash = "sha256:94537985111c35f28720e43603b8e7b43a6ecfb2ce1d3058bbe955b73404e21a", size = 129418, upload-time = "2025-10-14T04:42:32.879Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/1f/b8/6d51fc1d52cbd52cd4ccedd5b5b2f0f6a11bbf6765c782298b0f3e808541/charset_normalizer-3.4.4-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:e824f1492727fa856dd6eda4f7cee25f8518a12f3c4a56a74e8095695089cf6d", size = 209709, upload-time = "2025-10-14T04:40:11.385Z" }, + { url = "https://files.pythonhosted.org/packages/5c/af/1f9d7f7faafe2ddfb6f72a2e07a548a629c61ad510fe60f9630309908fef/charset_normalizer-3.4.4-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:4bd5d4137d500351a30687c2d3971758aac9a19208fc110ccb9d7188fbe709e8", size = 148814, upload-time = "2025-10-14T04:40:13.135Z" }, + { url = "https://files.pythonhosted.org/packages/79/3d/f2e3ac2bbc056ca0c204298ea4e3d9db9b4afe437812638759db2c976b5f/charset_normalizer-3.4.4-cp310-cp310-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:027f6de494925c0ab2a55eab46ae5129951638a49a34d87f4c3eda90f696b4ad", size = 144467, upload-time = "2025-10-14T04:40:14.728Z" }, + { url = "https://files.pythonhosted.org/packages/ec/85/1bf997003815e60d57de7bd972c57dc6950446a3e4ccac43bc3070721856/charset_normalizer-3.4.4-cp310-cp310-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:f820802628d2694cb7e56db99213f930856014862f3fd943d290ea8438d07ca8", size = 162280, upload-time = "2025-10-14T04:40:16.14Z" }, + { url = "https://files.pythonhosted.org/packages/3e/8e/6aa1952f56b192f54921c436b87f2aaf7c7a7c3d0d1a765547d64fd83c13/charset_normalizer-3.4.4-cp310-cp310-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:798d75d81754988d2565bff1b97ba5a44411867c0cf32b77a7e8f8d84796b10d", size = 159454, upload-time = "2025-10-14T04:40:17.567Z" }, + { url = "https://files.pythonhosted.org/packages/36/3b/60cbd1f8e93aa25d1c669c649b7a655b0b5fb4c571858910ea9332678558/charset_normalizer-3.4.4-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:9d1bb833febdff5c8927f922386db610b49db6e0d4f4ee29601d71e7c2694313", size = 153609, upload-time = "2025-10-14T04:40:19.08Z" }, + { url = "https://files.pythonhosted.org/packages/64/91/6a13396948b8fd3c4b4fd5bc74d045f5637d78c9675585e8e9fbe5636554/charset_normalizer-3.4.4-cp310-cp310-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:9cd98cdc06614a2f768d2b7286d66805f94c48cde050acdbbb7db2600ab3197e", size = 151849, upload-time = "2025-10-14T04:40:20.607Z" }, + { url = "https://files.pythonhosted.org/packages/b7/7a/59482e28b9981d105691e968c544cc0df3b7d6133152fb3dcdc8f135da7a/charset_normalizer-3.4.4-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:077fbb858e903c73f6c9db43374fd213b0b6a778106bc7032446a8e8b5b38b93", size = 151586, upload-time = "2025-10-14T04:40:21.719Z" }, + { url = "https://files.pythonhosted.org/packages/92/59/f64ef6a1c4bdd2baf892b04cd78792ed8684fbc48d4c2afe467d96b4df57/charset_normalizer-3.4.4-cp310-cp310-musllinux_1_2_armv7l.whl", hash = "sha256:244bfb999c71b35de57821b8ea746b24e863398194a4014e4c76adc2bbdfeff0", size = 145290, upload-time = "2025-10-14T04:40:23.069Z" }, + { url = "https://files.pythonhosted.org/packages/6b/63/3bf9f279ddfa641ffa1962b0db6a57a9c294361cc2f5fcac997049a00e9c/charset_normalizer-3.4.4-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:64b55f9dce520635f018f907ff1b0df1fdc31f2795a922fb49dd14fbcdf48c84", size = 163663, upload-time = "2025-10-14T04:40:24.17Z" }, + { url = "https://files.pythonhosted.org/packages/ed/09/c9e38fc8fa9e0849b172b581fd9803bdf6e694041127933934184e19f8c3/charset_normalizer-3.4.4-cp310-cp310-musllinux_1_2_riscv64.whl", hash = "sha256:faa3a41b2b66b6e50f84ae4a68c64fcd0c44355741c6374813a800cd6695db9e", size = 151964, upload-time = "2025-10-14T04:40:25.368Z" }, + { url = "https://files.pythonhosted.org/packages/d2/d1/d28b747e512d0da79d8b6a1ac18b7ab2ecfd81b2944c4c710e166d8dd09c/charset_normalizer-3.4.4-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:6515f3182dbe4ea06ced2d9e8666d97b46ef4c75e326b79bb624110f122551db", size = 161064, upload-time = "2025-10-14T04:40:26.806Z" }, + { url = "https://files.pythonhosted.org/packages/bb/9a/31d62b611d901c3b9e5500c36aab0ff5eb442043fb3a1c254200d3d397d9/charset_normalizer-3.4.4-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:cc00f04ed596e9dc0da42ed17ac5e596c6ccba999ba6bd92b0e0aef2f170f2d6", size = 155015, upload-time = "2025-10-14T04:40:28.284Z" }, + { url = "https://files.pythonhosted.org/packages/1f/f3/107e008fa2bff0c8b9319584174418e5e5285fef32f79d8ee6a430d0039c/charset_normalizer-3.4.4-cp310-cp310-win32.whl", hash = "sha256:f34be2938726fc13801220747472850852fe6b1ea75869a048d6f896838c896f", size = 99792, upload-time = "2025-10-14T04:40:29.613Z" }, + { url = "https://files.pythonhosted.org/packages/eb/66/e396e8a408843337d7315bab30dbf106c38966f1819f123257f5520f8a96/charset_normalizer-3.4.4-cp310-cp310-win_amd64.whl", hash = "sha256:a61900df84c667873b292c3de315a786dd8dac506704dea57bc957bd31e22c7d", size = 107198, upload-time = "2025-10-14T04:40:30.644Z" }, + { url = "https://files.pythonhosted.org/packages/b5/58/01b4f815bf0312704c267f2ccb6e5d42bcc7752340cd487bc9f8c3710597/charset_normalizer-3.4.4-cp310-cp310-win_arm64.whl", hash = "sha256:cead0978fc57397645f12578bfd2d5ea9138ea0fac82b2f63f7f7c6877986a69", size = 100262, upload-time = "2025-10-14T04:40:32.108Z" }, + { url = "https://files.pythonhosted.org/packages/ed/27/c6491ff4954e58a10f69ad90aca8a1b6fe9c5d3c6f380907af3c37435b59/charset_normalizer-3.4.4-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:6e1fcf0720908f200cd21aa4e6750a48ff6ce4afe7ff5a79a90d5ed8a08296f8", size = 206988, upload-time = "2025-10-14T04:40:33.79Z" }, + { url = "https://files.pythonhosted.org/packages/94/59/2e87300fe67ab820b5428580a53cad894272dbb97f38a7a814a2a1ac1011/charset_normalizer-3.4.4-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:5f819d5fe9234f9f82d75bdfa9aef3a3d72c4d24a6e57aeaebba32a704553aa0", size = 147324, upload-time = "2025-10-14T04:40:34.961Z" }, + { url = "https://files.pythonhosted.org/packages/07/fb/0cf61dc84b2b088391830f6274cb57c82e4da8bbc2efeac8c025edb88772/charset_normalizer-3.4.4-cp311-cp311-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:a59cb51917aa591b1c4e6a43c132f0cdc3c76dbad6155df4e28ee626cc77a0a3", size = 142742, upload-time = "2025-10-14T04:40:36.105Z" }, + { url = "https://files.pythonhosted.org/packages/62/8b/171935adf2312cd745d290ed93cf16cf0dfe320863ab7cbeeae1dcd6535f/charset_normalizer-3.4.4-cp311-cp311-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:8ef3c867360f88ac904fd3f5e1f902f13307af9052646963ee08ff4f131adafc", size = 160863, upload-time = "2025-10-14T04:40:37.188Z" }, + { url = "https://files.pythonhosted.org/packages/09/73/ad875b192bda14f2173bfc1bc9a55e009808484a4b256748d931b6948442/charset_normalizer-3.4.4-cp311-cp311-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:d9e45d7faa48ee908174d8fe84854479ef838fc6a705c9315372eacbc2f02897", size = 157837, upload-time = "2025-10-14T04:40:38.435Z" }, + { url = "https://files.pythonhosted.org/packages/6d/fc/de9cce525b2c5b94b47c70a4b4fb19f871b24995c728e957ee68ab1671ea/charset_normalizer-3.4.4-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:840c25fb618a231545cbab0564a799f101b63b9901f2569faecd6b222ac72381", size = 151550, upload-time = "2025-10-14T04:40:40.053Z" }, + { url = "https://files.pythonhosted.org/packages/55/c2/43edd615fdfba8c6f2dfbd459b25a6b3b551f24ea21981e23fb768503ce1/charset_normalizer-3.4.4-cp311-cp311-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:ca5862d5b3928c4940729dacc329aa9102900382fea192fc5e52eb69d6093815", size = 149162, upload-time = "2025-10-14T04:40:41.163Z" }, + { url = "https://files.pythonhosted.org/packages/03/86/bde4ad8b4d0e9429a4e82c1e8f5c659993a9a863ad62c7df05cf7b678d75/charset_normalizer-3.4.4-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:d9c7f57c3d666a53421049053eaacdd14bbd0a528e2186fcb2e672effd053bb0", size = 150019, upload-time = "2025-10-14T04:40:42.276Z" }, + { url = "https://files.pythonhosted.org/packages/1f/86/a151eb2af293a7e7bac3a739b81072585ce36ccfb4493039f49f1d3cae8c/charset_normalizer-3.4.4-cp311-cp311-musllinux_1_2_armv7l.whl", hash = "sha256:277e970e750505ed74c832b4bf75dac7476262ee2a013f5574dd49075879e161", size = 143310, upload-time = "2025-10-14T04:40:43.439Z" }, + { url = "https://files.pythonhosted.org/packages/b5/fe/43dae6144a7e07b87478fdfc4dbe9efd5defb0e7ec29f5f58a55aeef7bf7/charset_normalizer-3.4.4-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:31fd66405eaf47bb62e8cd575dc621c56c668f27d46a61d975a249930dd5e2a4", size = 162022, upload-time = "2025-10-14T04:40:44.547Z" }, + { url = "https://files.pythonhosted.org/packages/80/e6/7aab83774f5d2bca81f42ac58d04caf44f0cc2b65fc6db2b3b2e8a05f3b3/charset_normalizer-3.4.4-cp311-cp311-musllinux_1_2_riscv64.whl", hash = "sha256:0d3d8f15c07f86e9ff82319b3d9ef6f4bf907608f53fe9d92b28ea9ae3d1fd89", size = 149383, upload-time = "2025-10-14T04:40:46.018Z" }, + { url = "https://files.pythonhosted.org/packages/4f/e8/b289173b4edae05c0dde07f69f8db476a0b511eac556dfe0d6bda3c43384/charset_normalizer-3.4.4-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:9f7fcd74d410a36883701fafa2482a6af2ff5ba96b9a620e9e0721e28ead5569", size = 159098, upload-time = "2025-10-14T04:40:47.081Z" }, + { url = "https://files.pythonhosted.org/packages/d8/df/fe699727754cae3f8478493c7f45f777b17c3ef0600e28abfec8619eb49c/charset_normalizer-3.4.4-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:ebf3e58c7ec8a8bed6d66a75d7fb37b55e5015b03ceae72a8e7c74495551e224", size = 152991, upload-time = "2025-10-14T04:40:48.246Z" }, + { url = "https://files.pythonhosted.org/packages/1a/86/584869fe4ddb6ffa3bd9f491b87a01568797fb9bd8933f557dba9771beaf/charset_normalizer-3.4.4-cp311-cp311-win32.whl", hash = "sha256:eecbc200c7fd5ddb9a7f16c7decb07b566c29fa2161a16cf67b8d068bd21690a", size = 99456, upload-time = "2025-10-14T04:40:49.376Z" }, + { url = "https://files.pythonhosted.org/packages/65/f6/62fdd5feb60530f50f7e38b4f6a1d5203f4d16ff4f9f0952962c044e919a/charset_normalizer-3.4.4-cp311-cp311-win_amd64.whl", hash = "sha256:5ae497466c7901d54b639cf42d5b8c1b6a4fead55215500d2f486d34db48d016", size = 106978, upload-time = "2025-10-14T04:40:50.844Z" }, + { url = "https://files.pythonhosted.org/packages/7a/9d/0710916e6c82948b3be62d9d398cb4fcf4e97b56d6a6aeccd66c4b2f2bd5/charset_normalizer-3.4.4-cp311-cp311-win_arm64.whl", hash = "sha256:65e2befcd84bc6f37095f5961e68a6f077bf44946771354a28ad434c2cce0ae1", size = 99969, upload-time = "2025-10-14T04:40:52.272Z" }, + { url = "https://files.pythonhosted.org/packages/f3/85/1637cd4af66fa687396e757dec650f28025f2a2f5a5531a3208dc0ec43f2/charset_normalizer-3.4.4-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:0a98e6759f854bd25a58a73fa88833fba3b7c491169f86ce1180c948ab3fd394", size = 208425, upload-time = "2025-10-14T04:40:53.353Z" }, + { url = "https://files.pythonhosted.org/packages/9d/6a/04130023fef2a0d9c62d0bae2649b69f7b7d8d24ea5536feef50551029df/charset_normalizer-3.4.4-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:b5b290ccc2a263e8d185130284f8501e3e36c5e02750fc6b6bdeb2e9e96f1e25", size = 148162, upload-time = "2025-10-14T04:40:54.558Z" }, + { url = "https://files.pythonhosted.org/packages/78/29/62328d79aa60da22c9e0b9a66539feae06ca0f5a4171ac4f7dc285b83688/charset_normalizer-3.4.4-cp312-cp312-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:74bb723680f9f7a6234dcf67aea57e708ec1fbdf5699fb91dfd6f511b0a320ef", size = 144558, upload-time = "2025-10-14T04:40:55.677Z" }, + { url = "https://files.pythonhosted.org/packages/86/bb/b32194a4bf15b88403537c2e120b817c61cd4ecffa9b6876e941c3ee38fe/charset_normalizer-3.4.4-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:f1e34719c6ed0b92f418c7c780480b26b5d9c50349e9a9af7d76bf757530350d", size = 161497, upload-time = "2025-10-14T04:40:57.217Z" }, + { url = "https://files.pythonhosted.org/packages/19/89/a54c82b253d5b9b111dc74aca196ba5ccfcca8242d0fb64146d4d3183ff1/charset_normalizer-3.4.4-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:2437418e20515acec67d86e12bf70056a33abdacb5cb1655042f6538d6b085a8", size = 159240, upload-time = "2025-10-14T04:40:58.358Z" }, + { url = "https://files.pythonhosted.org/packages/c0/10/d20b513afe03acc89ec33948320a5544d31f21b05368436d580dec4e234d/charset_normalizer-3.4.4-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:11d694519d7f29d6cd09f6ac70028dba10f92f6cdd059096db198c283794ac86", size = 153471, upload-time = "2025-10-14T04:40:59.468Z" }, + { url = "https://files.pythonhosted.org/packages/61/fa/fbf177b55bdd727010f9c0a3c49eefa1d10f960e5f09d1d887bf93c2e698/charset_normalizer-3.4.4-cp312-cp312-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:ac1c4a689edcc530fc9d9aa11f5774b9e2f33f9a0c6a57864e90908f5208d30a", size = 150864, upload-time = "2025-10-14T04:41:00.623Z" }, + { url = "https://files.pythonhosted.org/packages/05/12/9fbc6a4d39c0198adeebbde20b619790e9236557ca59fc40e0e3cebe6f40/charset_normalizer-3.4.4-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:21d142cc6c0ec30d2efee5068ca36c128a30b0f2c53c1c07bd78cb6bc1d3be5f", size = 150647, upload-time = "2025-10-14T04:41:01.754Z" }, + { url = "https://files.pythonhosted.org/packages/ad/1f/6a9a593d52e3e8c5d2b167daf8c6b968808efb57ef4c210acb907c365bc4/charset_normalizer-3.4.4-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:5dbe56a36425d26d6cfb40ce79c314a2e4dd6211d51d6d2191c00bed34f354cc", size = 145110, upload-time = "2025-10-14T04:41:03.231Z" }, + { url = "https://files.pythonhosted.org/packages/30/42/9a52c609e72471b0fc54386dc63c3781a387bb4fe61c20231a4ebcd58bdd/charset_normalizer-3.4.4-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:5bfbb1b9acf3334612667b61bd3002196fe2a1eb4dd74d247e0f2a4d50ec9bbf", size = 162839, upload-time = "2025-10-14T04:41:04.715Z" }, + { url = "https://files.pythonhosted.org/packages/c4/5b/c0682bbf9f11597073052628ddd38344a3d673fda35a36773f7d19344b23/charset_normalizer-3.4.4-cp312-cp312-musllinux_1_2_riscv64.whl", hash = "sha256:d055ec1e26e441f6187acf818b73564e6e6282709e9bcb5b63f5b23068356a15", size = 150667, upload-time = "2025-10-14T04:41:05.827Z" }, + { url = "https://files.pythonhosted.org/packages/e4/24/a41afeab6f990cf2daf6cb8c67419b63b48cf518e4f56022230840c9bfb2/charset_normalizer-3.4.4-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:af2d8c67d8e573d6de5bc30cdb27e9b95e49115cd9baad5ddbd1a6207aaa82a9", size = 160535, upload-time = "2025-10-14T04:41:06.938Z" }, + { url = "https://files.pythonhosted.org/packages/2a/e5/6a4ce77ed243c4a50a1fecca6aaaab419628c818a49434be428fe24c9957/charset_normalizer-3.4.4-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:780236ac706e66881f3b7f2f32dfe90507a09e67d1d454c762cf642e6e1586e0", size = 154816, upload-time = "2025-10-14T04:41:08.101Z" }, + { url = "https://files.pythonhosted.org/packages/a8/ef/89297262b8092b312d29cdb2517cb1237e51db8ecef2e9af5edbe7b683b1/charset_normalizer-3.4.4-cp312-cp312-win32.whl", hash = "sha256:5833d2c39d8896e4e19b689ffc198f08ea58116bee26dea51e362ecc7cd3ed26", size = 99694, upload-time = "2025-10-14T04:41:09.23Z" }, + { url = "https://files.pythonhosted.org/packages/3d/2d/1e5ed9dd3b3803994c155cd9aacb60c82c331bad84daf75bcb9c91b3295e/charset_normalizer-3.4.4-cp312-cp312-win_amd64.whl", hash = "sha256:a79cfe37875f822425b89a82333404539ae63dbdddf97f84dcbc3d339aae9525", size = 107131, upload-time = "2025-10-14T04:41:10.467Z" }, + { url = "https://files.pythonhosted.org/packages/d0/d9/0ed4c7098a861482a7b6a95603edce4c0d9db2311af23da1fb2b75ec26fc/charset_normalizer-3.4.4-cp312-cp312-win_arm64.whl", hash = "sha256:376bec83a63b8021bb5c8ea75e21c4ccb86e7e45ca4eb81146091b56599b80c3", size = 100390, upload-time = "2025-10-14T04:41:11.915Z" }, + { url = "https://files.pythonhosted.org/packages/97/45/4b3a1239bbacd321068ea6e7ac28875b03ab8bc0aa0966452db17cd36714/charset_normalizer-3.4.4-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:e1f185f86a6f3403aa2420e815904c67b2f9ebc443f045edd0de921108345794", size = 208091, upload-time = "2025-10-14T04:41:13.346Z" }, + { url = "https://files.pythonhosted.org/packages/7d/62/73a6d7450829655a35bb88a88fca7d736f9882a27eacdca2c6d505b57e2e/charset_normalizer-3.4.4-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:6b39f987ae8ccdf0d2642338faf2abb1862340facc796048b604ef14919e55ed", size = 147936, upload-time = "2025-10-14T04:41:14.461Z" }, + { url = "https://files.pythonhosted.org/packages/89/c5/adb8c8b3d6625bef6d88b251bbb0d95f8205831b987631ab0c8bb5d937c2/charset_normalizer-3.4.4-cp313-cp313-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:3162d5d8ce1bb98dd51af660f2121c55d0fa541b46dff7bb9b9f86ea1d87de72", size = 144180, upload-time = "2025-10-14T04:41:15.588Z" }, + { url = "https://files.pythonhosted.org/packages/91/ed/9706e4070682d1cc219050b6048bfd293ccf67b3d4f5a4f39207453d4b99/charset_normalizer-3.4.4-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:81d5eb2a312700f4ecaa977a8235b634ce853200e828fbadf3a9c50bab278328", size = 161346, upload-time = "2025-10-14T04:41:16.738Z" }, + { url = "https://files.pythonhosted.org/packages/d5/0d/031f0d95e4972901a2f6f09ef055751805ff541511dc1252ba3ca1f80cf5/charset_normalizer-3.4.4-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:5bd2293095d766545ec1a8f612559f6b40abc0eb18bb2f5d1171872d34036ede", size = 158874, upload-time = "2025-10-14T04:41:17.923Z" }, + { url = "https://files.pythonhosted.org/packages/f5/83/6ab5883f57c9c801ce5e5677242328aa45592be8a00644310a008d04f922/charset_normalizer-3.4.4-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:a8a8b89589086a25749f471e6a900d3f662d1d3b6e2e59dcecf787b1cc3a1894", size = 153076, upload-time = "2025-10-14T04:41:19.106Z" }, + { url = "https://files.pythonhosted.org/packages/75/1e/5ff781ddf5260e387d6419959ee89ef13878229732732ee73cdae01800f2/charset_normalizer-3.4.4-cp313-cp313-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:bc7637e2f80d8530ee4a78e878bce464f70087ce73cf7c1caf142416923b98f1", size = 150601, upload-time = "2025-10-14T04:41:20.245Z" }, + { url = "https://files.pythonhosted.org/packages/d7/57/71be810965493d3510a6ca79b90c19e48696fb1ff964da319334b12677f0/charset_normalizer-3.4.4-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:f8bf04158c6b607d747e93949aa60618b61312fe647a6369f88ce2ff16043490", size = 150376, upload-time = "2025-10-14T04:41:21.398Z" }, + { url = "https://files.pythonhosted.org/packages/e5/d5/c3d057a78c181d007014feb7e9f2e65905a6c4ef182c0ddf0de2924edd65/charset_normalizer-3.4.4-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:554af85e960429cf30784dd47447d5125aaa3b99a6f0683589dbd27e2f45da44", size = 144825, upload-time = "2025-10-14T04:41:22.583Z" }, + { url = "https://files.pythonhosted.org/packages/e6/8c/d0406294828d4976f275ffbe66f00266c4b3136b7506941d87c00cab5272/charset_normalizer-3.4.4-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:74018750915ee7ad843a774364e13a3db91682f26142baddf775342c3f5b1133", size = 162583, upload-time = "2025-10-14T04:41:23.754Z" }, + { url = "https://files.pythonhosted.org/packages/d7/24/e2aa1f18c8f15c4c0e932d9287b8609dd30ad56dbe41d926bd846e22fb8d/charset_normalizer-3.4.4-cp313-cp313-musllinux_1_2_riscv64.whl", hash = "sha256:c0463276121fdee9c49b98908b3a89c39be45d86d1dbaa22957e38f6321d4ce3", size = 150366, upload-time = "2025-10-14T04:41:25.27Z" }, + { url = "https://files.pythonhosted.org/packages/e4/5b/1e6160c7739aad1e2df054300cc618b06bf784a7a164b0f238360721ab86/charset_normalizer-3.4.4-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:362d61fd13843997c1c446760ef36f240cf81d3ebf74ac62652aebaf7838561e", size = 160300, upload-time = "2025-10-14T04:41:26.725Z" }, + { url = "https://files.pythonhosted.org/packages/7a/10/f882167cd207fbdd743e55534d5d9620e095089d176d55cb22d5322f2afd/charset_normalizer-3.4.4-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:9a26f18905b8dd5d685d6d07b0cdf98a79f3c7a918906af7cc143ea2e164c8bc", size = 154465, upload-time = "2025-10-14T04:41:28.322Z" }, + { url = "https://files.pythonhosted.org/packages/89/66/c7a9e1b7429be72123441bfdbaf2bc13faab3f90b933f664db506dea5915/charset_normalizer-3.4.4-cp313-cp313-win32.whl", hash = "sha256:9b35f4c90079ff2e2edc5b26c0c77925e5d2d255c42c74fdb70fb49b172726ac", size = 99404, upload-time = "2025-10-14T04:41:29.95Z" }, + { url = "https://files.pythonhosted.org/packages/c4/26/b9924fa27db384bdcd97ab83b4f0a8058d96ad9626ead570674d5e737d90/charset_normalizer-3.4.4-cp313-cp313-win_amd64.whl", hash = "sha256:b435cba5f4f750aa6c0a0d92c541fb79f69a387c91e61f1795227e4ed9cece14", size = 107092, upload-time = "2025-10-14T04:41:31.188Z" }, + { url = "https://files.pythonhosted.org/packages/af/8f/3ed4bfa0c0c72a7ca17f0380cd9e4dd842b09f664e780c13cff1dcf2ef1b/charset_normalizer-3.4.4-cp313-cp313-win_arm64.whl", hash = "sha256:542d2cee80be6f80247095cc36c418f7bddd14f4a6de45af91dfad36d817bba2", size = 100408, upload-time = "2025-10-14T04:41:32.624Z" }, + { url = "https://files.pythonhosted.org/packages/2a/35/7051599bd493e62411d6ede36fd5af83a38f37c4767b92884df7301db25d/charset_normalizer-3.4.4-cp314-cp314-macosx_10_13_universal2.whl", hash = "sha256:da3326d9e65ef63a817ecbcc0df6e94463713b754fe293eaa03da99befb9a5bd", size = 207746, upload-time = "2025-10-14T04:41:33.773Z" }, + { url = "https://files.pythonhosted.org/packages/10/9a/97c8d48ef10d6cd4fcead2415523221624bf58bcf68a802721a6bc807c8f/charset_normalizer-3.4.4-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:8af65f14dc14a79b924524b1e7fffe304517b2bff5a58bf64f30b98bbc5079eb", size = 147889, upload-time = "2025-10-14T04:41:34.897Z" }, + { url = "https://files.pythonhosted.org/packages/10/bf/979224a919a1b606c82bd2c5fa49b5c6d5727aa47b4312bb27b1734f53cd/charset_normalizer-3.4.4-cp314-cp314-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:74664978bb272435107de04e36db5a9735e78232b85b77d45cfb38f758efd33e", size = 143641, upload-time = "2025-10-14T04:41:36.116Z" }, + { url = "https://files.pythonhosted.org/packages/ba/33/0ad65587441fc730dc7bd90e9716b30b4702dc7b617e6ba4997dc8651495/charset_normalizer-3.4.4-cp314-cp314-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:752944c7ffbfdd10c074dc58ec2d5a8a4cd9493b314d367c14d24c17684ddd14", size = 160779, upload-time = "2025-10-14T04:41:37.229Z" }, + { url = "https://files.pythonhosted.org/packages/67/ed/331d6b249259ee71ddea93f6f2f0a56cfebd46938bde6fcc6f7b9a3d0e09/charset_normalizer-3.4.4-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:d1f13550535ad8cff21b8d757a3257963e951d96e20ec82ab44bc64aeb62a191", size = 159035, upload-time = "2025-10-14T04:41:38.368Z" }, + { url = "https://files.pythonhosted.org/packages/67/ff/f6b948ca32e4f2a4576aa129d8bed61f2e0543bf9f5f2b7fc3758ed005c9/charset_normalizer-3.4.4-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:ecaae4149d99b1c9e7b88bb03e3221956f68fd6d50be2ef061b2381b61d20838", size = 152542, upload-time = "2025-10-14T04:41:39.862Z" }, + { url = "https://files.pythonhosted.org/packages/16/85/276033dcbcc369eb176594de22728541a925b2632f9716428c851b149e83/charset_normalizer-3.4.4-cp314-cp314-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:cb6254dc36b47a990e59e1068afacdcd02958bdcce30bb50cc1700a8b9d624a6", size = 149524, upload-time = "2025-10-14T04:41:41.319Z" }, + { url = "https://files.pythonhosted.org/packages/9e/f2/6a2a1f722b6aba37050e626530a46a68f74e63683947a8acff92569f979a/charset_normalizer-3.4.4-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:c8ae8a0f02f57a6e61203a31428fa1d677cbe50c93622b4149d5c0f319c1d19e", size = 150395, upload-time = "2025-10-14T04:41:42.539Z" }, + { url = "https://files.pythonhosted.org/packages/60/bb/2186cb2f2bbaea6338cad15ce23a67f9b0672929744381e28b0592676824/charset_normalizer-3.4.4-cp314-cp314-musllinux_1_2_armv7l.whl", hash = "sha256:47cc91b2f4dd2833fddaedd2893006b0106129d4b94fdb6af1f4ce5a9965577c", size = 143680, upload-time = "2025-10-14T04:41:43.661Z" }, + { url = "https://files.pythonhosted.org/packages/7d/a5/bf6f13b772fbb2a90360eb620d52ed8f796f3c5caee8398c3b2eb7b1c60d/charset_normalizer-3.4.4-cp314-cp314-musllinux_1_2_ppc64le.whl", hash = "sha256:82004af6c302b5d3ab2cfc4cc5f29db16123b1a8417f2e25f9066f91d4411090", size = 162045, upload-time = "2025-10-14T04:41:44.821Z" }, + { url = "https://files.pythonhosted.org/packages/df/c5/d1be898bf0dc3ef9030c3825e5d3b83f2c528d207d246cbabe245966808d/charset_normalizer-3.4.4-cp314-cp314-musllinux_1_2_riscv64.whl", hash = "sha256:2b7d8f6c26245217bd2ad053761201e9f9680f8ce52f0fcd8d0755aeae5b2152", size = 149687, upload-time = "2025-10-14T04:41:46.442Z" }, + { url = "https://files.pythonhosted.org/packages/a5/42/90c1f7b9341eef50c8a1cb3f098ac43b0508413f33affd762855f67a410e/charset_normalizer-3.4.4-cp314-cp314-musllinux_1_2_s390x.whl", hash = "sha256:799a7a5e4fb2d5898c60b640fd4981d6a25f1c11790935a44ce38c54e985f828", size = 160014, upload-time = "2025-10-14T04:41:47.631Z" }, + { url = "https://files.pythonhosted.org/packages/76/be/4d3ee471e8145d12795ab655ece37baed0929462a86e72372fd25859047c/charset_normalizer-3.4.4-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:99ae2cffebb06e6c22bdc25801d7b30f503cc87dbd283479e7b606f70aff57ec", size = 154044, upload-time = "2025-10-14T04:41:48.81Z" }, + { url = "https://files.pythonhosted.org/packages/b0/6f/8f7af07237c34a1defe7defc565a9bc1807762f672c0fde711a4b22bf9c0/charset_normalizer-3.4.4-cp314-cp314-win32.whl", hash = "sha256:f9d332f8c2a2fcbffe1378594431458ddbef721c1769d78e2cbc06280d8155f9", size = 99940, upload-time = "2025-10-14T04:41:49.946Z" }, + { url = "https://files.pythonhosted.org/packages/4b/51/8ade005e5ca5b0d80fb4aff72a3775b325bdc3d27408c8113811a7cbe640/charset_normalizer-3.4.4-cp314-cp314-win_amd64.whl", hash = "sha256:8a6562c3700cce886c5be75ade4a5db4214fda19fede41d9792d100288d8f94c", size = 107104, upload-time = "2025-10-14T04:41:51.051Z" }, + { url = "https://files.pythonhosted.org/packages/da/5f/6b8f83a55bb8278772c5ae54a577f3099025f9ade59d0136ac24a0df4bde/charset_normalizer-3.4.4-cp314-cp314-win_arm64.whl", hash = "sha256:de00632ca48df9daf77a2c65a484531649261ec9f25489917f09e455cb09ddb2", size = 100743, upload-time = "2025-10-14T04:41:52.122Z" }, + { url = "https://files.pythonhosted.org/packages/46/7c/0c4760bccf082737ca7ab84a4c2034fcc06b1f21cf3032ea98bd6feb1725/charset_normalizer-3.4.4-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:a9768c477b9d7bd54bc0c86dbaebdec6f03306675526c9927c0e8a04e8f94af9", size = 209609, upload-time = "2025-10-14T04:42:10.922Z" }, + { url = "https://files.pythonhosted.org/packages/bb/a4/69719daef2f3d7f1819de60c9a6be981b8eeead7542d5ec4440f3c80e111/charset_normalizer-3.4.4-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:1bee1e43c28aa63cb16e5c14e582580546b08e535299b8b6158a7c9c768a1f3d", size = 149029, upload-time = "2025-10-14T04:42:12.38Z" }, + { url = "https://files.pythonhosted.org/packages/e6/21/8d4e1d6c1e6070d3672908b8e4533a71b5b53e71d16828cc24d0efec564c/charset_normalizer-3.4.4-cp39-cp39-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:fd44c878ea55ba351104cb93cc85e74916eb8fa440ca7903e57575e97394f608", size = 144580, upload-time = "2025-10-14T04:42:13.549Z" }, + { url = "https://files.pythonhosted.org/packages/a7/0a/a616d001b3f25647a9068e0b9199f697ce507ec898cacb06a0d5a1617c99/charset_normalizer-3.4.4-cp39-cp39-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:0f04b14ffe5fdc8c4933862d8306109a2c51e0704acfa35d51598eb45a1e89fc", size = 162340, upload-time = "2025-10-14T04:42:14.892Z" }, + { url = "https://files.pythonhosted.org/packages/85/93/060b52deb249a5450460e0585c88a904a83aec474ab8e7aba787f45e79f2/charset_normalizer-3.4.4-cp39-cp39-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:cd09d08005f958f370f539f186d10aec3377d55b9eeb0d796025d4886119d76e", size = 159619, upload-time = "2025-10-14T04:42:16.676Z" }, + { url = "https://files.pythonhosted.org/packages/dd/21/0274deb1cc0632cd587a9a0ec6b4674d9108e461cb4cd40d457adaeb0564/charset_normalizer-3.4.4-cp39-cp39-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:4fe7859a4e3e8457458e2ff592f15ccb02f3da787fcd31e0183879c3ad4692a1", size = 153980, upload-time = "2025-10-14T04:42:17.917Z" }, + { url = "https://files.pythonhosted.org/packages/28/2b/e3d7d982858dccc11b31906976323d790dded2017a0572f093ff982d692f/charset_normalizer-3.4.4-cp39-cp39-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:fa09f53c465e532f4d3db095e0c55b615f010ad81803d383195b6b5ca6cbf5f3", size = 152174, upload-time = "2025-10-14T04:42:19.018Z" }, + { url = "https://files.pythonhosted.org/packages/6e/ff/4a269f8e35f1e58b2df52c131a1fa019acb7ef3f8697b7d464b07e9b492d/charset_normalizer-3.4.4-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:7fa17817dc5625de8a027cb8b26d9fefa3ea28c8253929b8d6649e705d2835b6", size = 151666, upload-time = "2025-10-14T04:42:20.171Z" }, + { url = "https://files.pythonhosted.org/packages/da/c9/ec39870f0b330d58486001dd8e532c6b9a905f5765f58a6f8204926b4a93/charset_normalizer-3.4.4-cp39-cp39-musllinux_1_2_armv7l.whl", hash = "sha256:5947809c8a2417be3267efc979c47d76a079758166f7d43ef5ae8e9f92751f88", size = 145550, upload-time = "2025-10-14T04:42:21.324Z" }, + { url = "https://files.pythonhosted.org/packages/75/8f/d186ab99e40e0ed9f82f033d6e49001701c81244d01905dd4a6924191a30/charset_normalizer-3.4.4-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:4902828217069c3c5c71094537a8e623f5d097858ac6ca8252f7b4d10b7560f1", size = 163721, upload-time = "2025-10-14T04:42:22.46Z" }, + { url = "https://files.pythonhosted.org/packages/96/b1/6047663b9744df26a7e479ac1e77af7134b1fcf9026243bb48ee2d18810f/charset_normalizer-3.4.4-cp39-cp39-musllinux_1_2_riscv64.whl", hash = "sha256:7c308f7e26e4363d79df40ca5b2be1c6ba9f02bdbccfed5abddb7859a6ce72cf", size = 152127, upload-time = "2025-10-14T04:42:23.712Z" }, + { url = "https://files.pythonhosted.org/packages/59/78/e5a6eac9179f24f704d1be67d08704c3c6ab9f00963963524be27c18ed87/charset_normalizer-3.4.4-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:2c9d3c380143a1fedbff95a312aa798578371eb29da42106a29019368a475318", size = 161175, upload-time = "2025-10-14T04:42:24.87Z" }, + { url = "https://files.pythonhosted.org/packages/e5/43/0e626e42d54dd2f8dd6fc5e1c5ff00f05fbca17cb699bedead2cae69c62f/charset_normalizer-3.4.4-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:cb01158d8b88ee68f15949894ccc6712278243d95f344770fa7593fa2d94410c", size = 155375, upload-time = "2025-10-14T04:42:27.246Z" }, + { url = "https://files.pythonhosted.org/packages/e9/91/d9615bf2e06f35e4997616ff31248c3657ed649c5ab9d35ea12fce54e380/charset_normalizer-3.4.4-cp39-cp39-win32.whl", hash = "sha256:2677acec1a2f8ef614c6888b5b4ae4060cc184174a938ed4e8ef690e15d3e505", size = 99692, upload-time = "2025-10-14T04:42:28.425Z" }, + { url = "https://files.pythonhosted.org/packages/d1/a9/6c040053909d9d1ef4fcab45fddec083aedc9052c10078339b47c8573ea8/charset_normalizer-3.4.4-cp39-cp39-win_amd64.whl", hash = "sha256:f8e160feb2aed042cd657a72acc0b481212ed28b1b9a95c0cee1621b524e1966", size = 107192, upload-time = "2025-10-14T04:42:29.482Z" }, + { url = "https://files.pythonhosted.org/packages/f0/c6/4fa536b2c0cd3edfb7ccf8469fa0f363ea67b7213a842b90909ca33dd851/charset_normalizer-3.4.4-cp39-cp39-win_arm64.whl", hash = "sha256:b5d84d37db046c5ca74ee7bb47dd6cbc13f80665fdde3e8040bdd3fb015ecb50", size = 100220, upload-time = "2025-10-14T04:42:30.632Z" }, + { url = "https://files.pythonhosted.org/packages/0a/4c/925909008ed5a988ccbb72dcc897407e5d6d3bd72410d69e051fc0c14647/charset_normalizer-3.4.4-py3-none-any.whl", hash = "sha256:7a32c560861a02ff789ad905a2fe94e3f840803362c84fecf1851cb4cf3dc37f", size = 53402, upload-time = "2025-10-14T04:42:31.76Z" }, +] + +[[package]] +name = "click" +version = "8.3.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "colorama", marker = "python_full_version >= '3.10' and sys_platform == 'win32'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/3d/fa/656b739db8587d7b5dfa22e22ed02566950fbfbcdc20311993483657a5c0/click-8.3.1.tar.gz", hash = "sha256:12ff4785d337a1bb490bb7e9c2b1ee5da3112e94a8622f26a6c77f5d2fc6842a", size = 295065, upload-time = "2025-11-15T20:45:42.706Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/98/78/01c019cdb5d6498122777c1a43056ebb3ebfeef2076d9d026bfe15583b2b/click-8.3.1-py3-none-any.whl", hash = "sha256:981153a64e25f12d547d3426c367a4857371575ee7ad18df2a6183ab0545b2a6", size = 108274, upload-time = "2025-11-15T20:45:41.139Z" }, +] + +[[package]] +name = "colorama" +version = "0.4.6" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/d8/53/6f443c9a4a8358a93a6792e2acffb9d9d5cb0a5cfd8802644b7b1c9a02e4/colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44", size = 27697, upload-time = "2022-10-25T02:36:22.414Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/d1/d6/3965ed04c63042e047cb6a3e6ed1a63a35087b6a609aa3a15ed8ac56c221/colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6", size = 25335, upload-time = "2022-10-25T02:36:20.889Z" }, +] + +[[package]] +name = "cryptography" +version = "46.0.3" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "cffi", marker = "platform_python_implementation != 'PyPy'" }, + { name = "typing-extensions", marker = "python_full_version < '3.11'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/9f/33/c00162f49c0e2fe8064a62cb92b93e50c74a72bc370ab92f86112b33ff62/cryptography-46.0.3.tar.gz", hash = "sha256:a8b17438104fed022ce745b362294d9ce35b4c2e45c1d958ad4a4b019285f4a1", size = 749258, upload-time = "2025-10-15T23:18:31.74Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/1d/42/9c391dd801d6cf0d561b5890549d4b27bafcc53b39c31a817e69d87c625b/cryptography-46.0.3-cp311-abi3-macosx_10_9_universal2.whl", hash = "sha256:109d4ddfadf17e8e7779c39f9b18111a09efb969a301a31e987416a0191ed93a", size = 7225004, upload-time = "2025-10-15T23:16:52.239Z" }, + { url = "https://files.pythonhosted.org/packages/1c/67/38769ca6b65f07461eb200e85fc1639b438bdc667be02cf7f2cd6a64601c/cryptography-46.0.3-cp311-abi3-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:09859af8466b69bc3c27bdf4f5d84a665e0f7ab5088412e9e2ec49758eca5cbc", size = 4296667, upload-time = "2025-10-15T23:16:54.369Z" }, + { url = "https://files.pythonhosted.org/packages/5c/49/498c86566a1d80e978b42f0d702795f69887005548c041636df6ae1ca64c/cryptography-46.0.3-cp311-abi3-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:01ca9ff2885f3acc98c29f1860552e37f6d7c7d013d7334ff2a9de43a449315d", size = 4450807, upload-time = "2025-10-15T23:16:56.414Z" }, + { url = "https://files.pythonhosted.org/packages/4b/0a/863a3604112174c8624a2ac3c038662d9e59970c7f926acdcfaed8d61142/cryptography-46.0.3-cp311-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:6eae65d4c3d33da080cff9c4ab1f711b15c1d9760809dad6ea763f3812d254cb", size = 4299615, upload-time = "2025-10-15T23:16:58.442Z" }, + { url = "https://files.pythonhosted.org/packages/64/02/b73a533f6b64a69f3cd3872acb6ebc12aef924d8d103133bb3ea750dc703/cryptography-46.0.3-cp311-abi3-manylinux_2_28_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:e5bf0ed4490068a2e72ac03d786693adeb909981cc596425d09032d372bcc849", size = 4016800, upload-time = "2025-10-15T23:17:00.378Z" }, + { url = "https://files.pythonhosted.org/packages/25/d5/16e41afbfa450cde85a3b7ec599bebefaef16b5c6ba4ec49a3532336ed72/cryptography-46.0.3-cp311-abi3-manylinux_2_28_ppc64le.whl", hash = "sha256:5ecfccd2329e37e9b7112a888e76d9feca2347f12f37918facbb893d7bb88ee8", size = 4984707, upload-time = "2025-10-15T23:17:01.98Z" }, + { url = "https://files.pythonhosted.org/packages/c9/56/e7e69b427c3878352c2fb9b450bd0e19ed552753491d39d7d0a2f5226d41/cryptography-46.0.3-cp311-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:a2c0cd47381a3229c403062f764160d57d4d175e022c1df84e168c6251a22eec", size = 4482541, upload-time = "2025-10-15T23:17:04.078Z" }, + { url = "https://files.pythonhosted.org/packages/78/f6/50736d40d97e8483172f1bb6e698895b92a223dba513b0ca6f06b2365339/cryptography-46.0.3-cp311-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:549e234ff32571b1f4076ac269fcce7a808d3bf98b76c8dd560e42dbc66d7d91", size = 4299464, upload-time = "2025-10-15T23:17:05.483Z" }, + { url = "https://files.pythonhosted.org/packages/00/de/d8e26b1a855f19d9994a19c702fa2e93b0456beccbcfe437eda00e0701f2/cryptography-46.0.3-cp311-abi3-manylinux_2_34_ppc64le.whl", hash = "sha256:c0a7bb1a68a5d3471880e264621346c48665b3bf1c3759d682fc0864c540bd9e", size = 4950838, upload-time = "2025-10-15T23:17:07.425Z" }, + { url = "https://files.pythonhosted.org/packages/8f/29/798fc4ec461a1c9e9f735f2fc58741b0daae30688f41b2497dcbc9ed1355/cryptography-46.0.3-cp311-abi3-manylinux_2_34_x86_64.whl", hash = "sha256:10b01676fc208c3e6feeb25a8b83d81767e8059e1fe86e1dc62d10a3018fa926", size = 4481596, upload-time = "2025-10-15T23:17:09.343Z" }, + { url = "https://files.pythonhosted.org/packages/15/8d/03cd48b20a573adfff7652b76271078e3045b9f49387920e7f1f631d125e/cryptography-46.0.3-cp311-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:0abf1ffd6e57c67e92af68330d05760b7b7efb243aab8377e583284dbab72c71", size = 4426782, upload-time = "2025-10-15T23:17:11.22Z" }, + { url = "https://files.pythonhosted.org/packages/fa/b1/ebacbfe53317d55cf33165bda24c86523497a6881f339f9aae5c2e13e57b/cryptography-46.0.3-cp311-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:a04bee9ab6a4da801eb9b51f1b708a1b5b5c9eb48c03f74198464c66f0d344ac", size = 4698381, upload-time = "2025-10-15T23:17:12.829Z" }, + { url = "https://files.pythonhosted.org/packages/96/92/8a6a9525893325fc057a01f654d7efc2c64b9de90413adcf605a85744ff4/cryptography-46.0.3-cp311-abi3-win32.whl", hash = "sha256:f260d0d41e9b4da1ed1e0f1ce571f97fe370b152ab18778e9e8f67d6af432018", size = 3055988, upload-time = "2025-10-15T23:17:14.65Z" }, + { url = "https://files.pythonhosted.org/packages/7e/bf/80fbf45253ea585a1e492a6a17efcb93467701fa79e71550a430c5e60df0/cryptography-46.0.3-cp311-abi3-win_amd64.whl", hash = "sha256:a9a3008438615669153eb86b26b61e09993921ebdd75385ddd748702c5adfddb", size = 3514451, upload-time = "2025-10-15T23:17:16.142Z" }, + { url = "https://files.pythonhosted.org/packages/2e/af/9b302da4c87b0beb9db4e756386a7c6c5b8003cd0e742277888d352ae91d/cryptography-46.0.3-cp311-abi3-win_arm64.whl", hash = "sha256:5d7f93296ee28f68447397bf5198428c9aeeab45705a55d53a6343455dcb2c3c", size = 2928007, upload-time = "2025-10-15T23:17:18.04Z" }, + { url = "https://files.pythonhosted.org/packages/f5/e2/a510aa736755bffa9d2f75029c229111a1d02f8ecd5de03078f4c18d91a3/cryptography-46.0.3-cp314-cp314t-macosx_10_9_universal2.whl", hash = "sha256:00a5e7e87938e5ff9ff5447ab086a5706a957137e6e433841e9d24f38a065217", size = 7158012, upload-time = "2025-10-15T23:17:19.982Z" }, + { url = "https://files.pythonhosted.org/packages/73/dc/9aa866fbdbb95b02e7f9d086f1fccfeebf8953509b87e3f28fff927ff8a0/cryptography-46.0.3-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:c8daeb2d2174beb4575b77482320303f3d39b8e81153da4f0fb08eb5fe86a6c5", size = 4288728, upload-time = "2025-10-15T23:17:21.527Z" }, + { url = "https://files.pythonhosted.org/packages/c5/fd/bc1daf8230eaa075184cbbf5f8cd00ba9db4fd32d63fb83da4671b72ed8a/cryptography-46.0.3-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:39b6755623145ad5eff1dab323f4eae2a32a77a7abef2c5089a04a3d04366715", size = 4435078, upload-time = "2025-10-15T23:17:23.042Z" }, + { url = "https://files.pythonhosted.org/packages/82/98/d3bd5407ce4c60017f8ff9e63ffee4200ab3e23fe05b765cab805a7db008/cryptography-46.0.3-cp314-cp314t-manylinux_2_28_aarch64.whl", hash = "sha256:db391fa7c66df6762ee3f00c95a89e6d428f4d60e7abc8328f4fe155b5ac6e54", size = 4293460, upload-time = "2025-10-15T23:17:24.885Z" }, + { url = "https://files.pythonhosted.org/packages/26/e9/e23e7900983c2b8af7a08098db406cf989d7f09caea7897e347598d4cd5b/cryptography-46.0.3-cp314-cp314t-manylinux_2_28_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:78a97cf6a8839a48c49271cdcbd5cf37ca2c1d6b7fdd86cc864f302b5e9bf459", size = 3995237, upload-time = "2025-10-15T23:17:26.449Z" }, + { url = "https://files.pythonhosted.org/packages/91/15/af68c509d4a138cfe299d0d7ddb14afba15233223ebd933b4bbdbc7155d3/cryptography-46.0.3-cp314-cp314t-manylinux_2_28_ppc64le.whl", hash = "sha256:dfb781ff7eaa91a6f7fd41776ec37c5853c795d3b358d4896fdbb5df168af422", size = 4967344, upload-time = "2025-10-15T23:17:28.06Z" }, + { url = "https://files.pythonhosted.org/packages/ca/e3/8643d077c53868b681af077edf6b3cb58288b5423610f21c62aadcbe99f4/cryptography-46.0.3-cp314-cp314t-manylinux_2_28_x86_64.whl", hash = "sha256:6f61efb26e76c45c4a227835ddeae96d83624fb0d29eb5df5b96e14ed1a0afb7", size = 4466564, upload-time = "2025-10-15T23:17:29.665Z" }, + { url = "https://files.pythonhosted.org/packages/0e/43/c1e8726fa59c236ff477ff2b5dc071e54b21e5a1e51aa2cee1676f1c986f/cryptography-46.0.3-cp314-cp314t-manylinux_2_34_aarch64.whl", hash = "sha256:23b1a8f26e43f47ceb6d6a43115f33a5a37d57df4ea0ca295b780ae8546e8044", size = 4292415, upload-time = "2025-10-15T23:17:31.686Z" }, + { url = "https://files.pythonhosted.org/packages/42/f9/2f8fefdb1aee8a8e3256a0568cffc4e6d517b256a2fe97a029b3f1b9fe7e/cryptography-46.0.3-cp314-cp314t-manylinux_2_34_ppc64le.whl", hash = "sha256:b419ae593c86b87014b9be7396b385491ad7f320bde96826d0dd174459e54665", size = 4931457, upload-time = "2025-10-15T23:17:33.478Z" }, + { url = "https://files.pythonhosted.org/packages/79/30/9b54127a9a778ccd6d27c3da7563e9f2d341826075ceab89ae3b41bf5be2/cryptography-46.0.3-cp314-cp314t-manylinux_2_34_x86_64.whl", hash = "sha256:50fc3343ac490c6b08c0cf0d704e881d0d660be923fd3076db3e932007e726e3", size = 4466074, upload-time = "2025-10-15T23:17:35.158Z" }, + { url = "https://files.pythonhosted.org/packages/ac/68/b4f4a10928e26c941b1b6a179143af9f4d27d88fe84a6a3c53592d2e76bf/cryptography-46.0.3-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:22d7e97932f511d6b0b04f2bfd818d73dcd5928db509460aaf48384778eb6d20", size = 4420569, upload-time = "2025-10-15T23:17:37.188Z" }, + { url = "https://files.pythonhosted.org/packages/a3/49/3746dab4c0d1979888f125226357d3262a6dd40e114ac29e3d2abdf1ec55/cryptography-46.0.3-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:d55f3dffadd674514ad19451161118fd010988540cee43d8bc20675e775925de", size = 4681941, upload-time = "2025-10-15T23:17:39.236Z" }, + { url = "https://files.pythonhosted.org/packages/fd/30/27654c1dbaf7e4a3531fa1fc77986d04aefa4d6d78259a62c9dc13d7ad36/cryptography-46.0.3-cp314-cp314t-win32.whl", hash = "sha256:8a6e050cb6164d3f830453754094c086ff2d0b2f3a897a1d9820f6139a1f0914", size = 3022339, upload-time = "2025-10-15T23:17:40.888Z" }, + { url = "https://files.pythonhosted.org/packages/f6/30/640f34ccd4d2a1bc88367b54b926b781b5a018d65f404d409aba76a84b1c/cryptography-46.0.3-cp314-cp314t-win_amd64.whl", hash = "sha256:760f83faa07f8b64e9c33fc963d790a2edb24efb479e3520c14a45741cd9b2db", size = 3494315, upload-time = "2025-10-15T23:17:42.769Z" }, + { url = "https://files.pythonhosted.org/packages/ba/8b/88cc7e3bd0a8e7b861f26981f7b820e1f46aa9d26cc482d0feba0ecb4919/cryptography-46.0.3-cp314-cp314t-win_arm64.whl", hash = "sha256:516ea134e703e9fe26bcd1277a4b59ad30586ea90c365a87781d7887a646fe21", size = 2919331, upload-time = "2025-10-15T23:17:44.468Z" }, + { url = "https://files.pythonhosted.org/packages/fd/23/45fe7f376a7df8daf6da3556603b36f53475a99ce4faacb6ba2cf3d82021/cryptography-46.0.3-cp38-abi3-macosx_10_9_universal2.whl", hash = "sha256:cb3d760a6117f621261d662bccc8ef5bc32ca673e037c83fbe565324f5c46936", size = 7218248, upload-time = "2025-10-15T23:17:46.294Z" }, + { url = "https://files.pythonhosted.org/packages/27/32/b68d27471372737054cbd34c84981f9edbc24fe67ca225d389799614e27f/cryptography-46.0.3-cp38-abi3-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:4b7387121ac7d15e550f5cb4a43aef2559ed759c35df7336c402bb8275ac9683", size = 4294089, upload-time = "2025-10-15T23:17:48.269Z" }, + { url = "https://files.pythonhosted.org/packages/26/42/fa8389d4478368743e24e61eea78846a0006caffaf72ea24a15159215a14/cryptography-46.0.3-cp38-abi3-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:15ab9b093e8f09daab0f2159bb7e47532596075139dd74365da52ecc9cb46c5d", size = 4440029, upload-time = "2025-10-15T23:17:49.837Z" }, + { url = "https://files.pythonhosted.org/packages/5f/eb/f483db0ec5ac040824f269e93dd2bd8a21ecd1027e77ad7bdf6914f2fd80/cryptography-46.0.3-cp38-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:46acf53b40ea38f9c6c229599a4a13f0d46a6c3fa9ef19fc1a124d62e338dfa0", size = 4297222, upload-time = "2025-10-15T23:17:51.357Z" }, + { url = "https://files.pythonhosted.org/packages/fd/cf/da9502c4e1912cb1da3807ea3618a6829bee8207456fbbeebc361ec38ba3/cryptography-46.0.3-cp38-abi3-manylinux_2_28_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:10ca84c4668d066a9878890047f03546f3ae0a6b8b39b697457b7757aaf18dbc", size = 4012280, upload-time = "2025-10-15T23:17:52.964Z" }, + { url = "https://files.pythonhosted.org/packages/6b/8f/9adb86b93330e0df8b3dcf03eae67c33ba89958fc2e03862ef1ac2b42465/cryptography-46.0.3-cp38-abi3-manylinux_2_28_ppc64le.whl", hash = "sha256:36e627112085bb3b81b19fed209c05ce2a52ee8b15d161b7c643a7d5a88491f3", size = 4978958, upload-time = "2025-10-15T23:17:54.965Z" }, + { url = "https://files.pythonhosted.org/packages/d1/a0/5fa77988289c34bdb9f913f5606ecc9ada1adb5ae870bd0d1054a7021cc4/cryptography-46.0.3-cp38-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:1000713389b75c449a6e979ffc7dcc8ac90b437048766cef052d4d30b8220971", size = 4473714, upload-time = "2025-10-15T23:17:56.754Z" }, + { url = "https://files.pythonhosted.org/packages/14/e5/fc82d72a58d41c393697aa18c9abe5ae1214ff6f2a5c18ac470f92777895/cryptography-46.0.3-cp38-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:b02cf04496f6576afffef5ddd04a0cb7d49cf6be16a9059d793a30b035f6b6ac", size = 4296970, upload-time = "2025-10-15T23:17:58.588Z" }, + { url = "https://files.pythonhosted.org/packages/78/06/5663ed35438d0b09056973994f1aec467492b33bd31da36e468b01ec1097/cryptography-46.0.3-cp38-abi3-manylinux_2_34_ppc64le.whl", hash = "sha256:71e842ec9bc7abf543b47cf86b9a743baa95f4677d22baa4c7d5c69e49e9bc04", size = 4940236, upload-time = "2025-10-15T23:18:00.897Z" }, + { url = "https://files.pythonhosted.org/packages/fc/59/873633f3f2dcd8a053b8dd1d38f783043b5fce589c0f6988bf55ef57e43e/cryptography-46.0.3-cp38-abi3-manylinux_2_34_x86_64.whl", hash = "sha256:402b58fc32614f00980b66d6e56a5b4118e6cb362ae8f3fda141ba4689bd4506", size = 4472642, upload-time = "2025-10-15T23:18:02.749Z" }, + { url = "https://files.pythonhosted.org/packages/3d/39/8e71f3930e40f6877737d6f69248cf74d4e34b886a3967d32f919cc50d3b/cryptography-46.0.3-cp38-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:ef639cb3372f69ec44915fafcd6698b6cc78fbe0c2ea41be867f6ed612811963", size = 4423126, upload-time = "2025-10-15T23:18:04.85Z" }, + { url = "https://files.pythonhosted.org/packages/cd/c7/f65027c2810e14c3e7268353b1681932b87e5a48e65505d8cc17c99e36ae/cryptography-46.0.3-cp38-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:3b51b8ca4f1c6453d8829e1eb7299499ca7f313900dd4d89a24b8b87c0a780d4", size = 4686573, upload-time = "2025-10-15T23:18:06.908Z" }, + { url = "https://files.pythonhosted.org/packages/0a/6e/1c8331ddf91ca4730ab3086a0f1be19c65510a33b5a441cb334e7a2d2560/cryptography-46.0.3-cp38-abi3-win32.whl", hash = "sha256:6276eb85ef938dc035d59b87c8a7dc559a232f954962520137529d77b18ff1df", size = 3036695, upload-time = "2025-10-15T23:18:08.672Z" }, + { url = "https://files.pythonhosted.org/packages/90/45/b0d691df20633eff80955a0fc7695ff9051ffce8b69741444bd9ed7bd0db/cryptography-46.0.3-cp38-abi3-win_amd64.whl", hash = "sha256:416260257577718c05135c55958b674000baef9a1c7d9e8f306ec60d71db850f", size = 3501720, upload-time = "2025-10-15T23:18:10.632Z" }, + { url = "https://files.pythonhosted.org/packages/e8/cb/2da4cc83f5edb9c3257d09e1e7ab7b23f049c7962cae8d842bbef0a9cec9/cryptography-46.0.3-cp38-abi3-win_arm64.whl", hash = "sha256:d89c3468de4cdc4f08a57e214384d0471911a3830fcdaf7a8cc587e42a866372", size = 2918740, upload-time = "2025-10-15T23:18:12.277Z" }, + { url = "https://files.pythonhosted.org/packages/d9/cd/1a8633802d766a0fa46f382a77e096d7e209e0817892929655fe0586ae32/cryptography-46.0.3-pp310-pypy310_pp73-macosx_10_9_x86_64.whl", hash = "sha256:a23582810fedb8c0bc47524558fb6c56aac3fc252cb306072fd2815da2a47c32", size = 3689163, upload-time = "2025-10-15T23:18:13.821Z" }, + { url = "https://files.pythonhosted.org/packages/4c/59/6b26512964ace6480c3e54681a9859c974172fb141c38df11eadd8416947/cryptography-46.0.3-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:e7aec276d68421f9574040c26e2a7c3771060bc0cff408bae1dcb19d3ab1e63c", size = 3429474, upload-time = "2025-10-15T23:18:15.477Z" }, + { url = "https://files.pythonhosted.org/packages/06/8a/e60e46adab4362a682cf142c7dcb5bf79b782ab2199b0dcb81f55970807f/cryptography-46.0.3-pp311-pypy311_pp73-macosx_10_9_x86_64.whl", hash = "sha256:7ce938a99998ed3c8aa7e7272dca1a610401ede816d36d0693907d863b10d9ea", size = 3698132, upload-time = "2025-10-15T23:18:17.056Z" }, + { url = "https://files.pythonhosted.org/packages/da/38/f59940ec4ee91e93d3311f7532671a5cef5570eb04a144bf203b58552d11/cryptography-46.0.3-pp311-pypy311_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:191bb60a7be5e6f54e30ba16fdfae78ad3a342a0599eb4193ba88e3f3d6e185b", size = 4243992, upload-time = "2025-10-15T23:18:18.695Z" }, + { url = "https://files.pythonhosted.org/packages/b0/0c/35b3d92ddebfdfda76bb485738306545817253d0a3ded0bfe80ef8e67aa5/cryptography-46.0.3-pp311-pypy311_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:c70cc23f12726be8f8bc72e41d5065d77e4515efae3690326764ea1b07845cfb", size = 4409944, upload-time = "2025-10-15T23:18:20.597Z" }, + { url = "https://files.pythonhosted.org/packages/99/55/181022996c4063fc0e7666a47049a1ca705abb9c8a13830f074edb347495/cryptography-46.0.3-pp311-pypy311_pp73-manylinux_2_34_aarch64.whl", hash = "sha256:9394673a9f4de09e28b5356e7fff97d778f8abad85c9d5ac4a4b7e25a0de7717", size = 4242957, upload-time = "2025-10-15T23:18:22.18Z" }, + { url = "https://files.pythonhosted.org/packages/ba/af/72cd6ef29f9c5f731251acadaeb821559fe25f10852f44a63374c9ca08c1/cryptography-46.0.3-pp311-pypy311_pp73-manylinux_2_34_x86_64.whl", hash = "sha256:94cd0549accc38d1494e1f8de71eca837d0509d0d44bf11d158524b0e12cebf9", size = 4409447, upload-time = "2025-10-15T23:18:24.209Z" }, + { url = "https://files.pythonhosted.org/packages/0d/c3/e90f4a4feae6410f914f8ebac129b9ae7a8c92eb60a638012dde42030a9d/cryptography-46.0.3-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:6b5063083824e5509fdba180721d55909ffacccc8adbec85268b48439423d78c", size = 3438528, upload-time = "2025-10-15T23:18:26.227Z" }, +] + +[[package]] +name = "dill" +version = "0.4.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/12/80/630b4b88364e9a8c8c5797f4602d0f76ef820909ee32f0bacb9f90654042/dill-0.4.0.tar.gz", hash = "sha256:0633f1d2df477324f53a895b02c901fb961bdbf65a17122586ea7019292cbcf0", size = 186976, upload-time = "2025-04-16T00:41:48.867Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/50/3d/9373ad9c56321fdab5b41197068e1d8c25883b3fea29dd361f9b55116869/dill-0.4.0-py3-none-any.whl", hash = "sha256:44f54bf6412c2c8464c14e8243eb163690a9800dbe2c367330883b19c7561049", size = 119668, upload-time = "2025-04-16T00:41:47.671Z" }, +] + +[[package]] +name = "eval-type-backport" +version = "0.3.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/fb/a3/cafafb4558fd638aadfe4121dc6cefb8d743368c085acb2f521df0f3d9d7/eval_type_backport-0.3.1.tar.gz", hash = "sha256:57e993f7b5b69d271e37482e62f74e76a0276c82490cf8e4f0dffeb6b332d5ed", size = 9445, upload-time = "2025-12-02T11:51:42.987Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/cf/22/fdc2e30d43ff853720042fa15baa3e6122722be1a7950a98233ebb55cd71/eval_type_backport-0.3.1-py3-none-any.whl", hash = "sha256:279ab641905e9f11129f56a8a78f493518515b83402b860f6f06dd7c011fdfa8", size = 6063, upload-time = "2025-12-02T11:51:41.665Z" }, +] + +[[package]] +name = "exceptiongroup" +version = "1.3.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "typing-extensions", marker = "python_full_version < '3.11'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/50/79/66800aadf48771f6b62f7eb014e352e5d06856655206165d775e675a02c9/exceptiongroup-1.3.1.tar.gz", hash = "sha256:8b412432c6055b0b7d14c310000ae93352ed6754f70fa8f7c34141f91c4e3219", size = 30371, upload-time = "2025-11-21T23:01:54.787Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/8a/0e/97c33bf5009bdbac74fd2beace167cab3f978feb69cc36f1ef79360d6c4e/exceptiongroup-1.3.1-py3-none-any.whl", hash = "sha256:a7a39a3bd276781e98394987d3a5701d0c4edffb633bb7a5144577f82c773598", size = 16740, upload-time = "2025-11-21T23:01:53.443Z" }, +] + +[[package]] +name = "google-auth" +version = "2.45.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "cachetools" }, + { name = "pyasn1-modules" }, + { name = "rsa" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/e5/00/3c794502a8b892c404b2dea5b3650eb21bfc7069612fbfd15c7f17c1cb0d/google_auth-2.45.0.tar.gz", hash = "sha256:90d3f41b6b72ea72dd9811e765699ee491ab24139f34ebf1ca2b9cc0c38708f3", size = 320708, upload-time = "2025-12-15T22:58:42.889Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c6/97/451d55e05487a5cd6279a01a7e34921858b16f7dc8aa38a2c684743cd2b3/google_auth-2.45.0-py2.py3-none-any.whl", hash = "sha256:82344e86dc00410ef5382d99be677c6043d72e502b625aa4f4afa0bdacca0f36", size = 233312, upload-time = "2025-12-15T22:58:40.777Z" }, +] + +[[package]] +name = "googleapis-common-protos" +version = "1.72.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "protobuf" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/e5/7b/adfd75544c415c487b33061fe7ae526165241c1ea133f9a9125a56b39fd8/googleapis_common_protos-1.72.0.tar.gz", hash = "sha256:e55a601c1b32b52d7a3e65f43563e2aa61bcd737998ee672ac9b951cd49319f5", size = 147433, upload-time = "2025-11-06T18:29:24.087Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c4/ab/09169d5a4612a5f92490806649ac8d41e3ec9129c636754575b3553f4ea4/googleapis_common_protos-1.72.0-py3-none-any.whl", hash = "sha256:4299c5a82d5ae1a9702ada957347726b167f9f8d1fc352477702a1e851ff4038", size = 297515, upload-time = "2025-11-06T18:29:13.14Z" }, +] + +[[package]] +name = "griffe" +version = "1.14.0" +source = { registry = "https://pypi.org/simple" } +resolution-markers = [ + "python_full_version < '3.10'", +] +dependencies = [ + { name = "colorama", marker = "python_full_version < '3.10'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/ec/d7/6c09dd7ce4c7837e4cdb11dce980cb45ae3cd87677298dc3b781b6bce7d3/griffe-1.14.0.tar.gz", hash = "sha256:9d2a15c1eca966d68e00517de5d69dd1bc5c9f2335ef6c1775362ba5b8651a13", size = 424684, upload-time = "2025-09-05T15:02:29.167Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/2a/b1/9ff6578d789a89812ff21e4e0f80ffae20a65d5dd84e7a17873fe3b365be/griffe-1.14.0-py3-none-any.whl", hash = "sha256:0e9d52832cccf0f7188cfe585ba962d2674b241c01916d780925df34873bceb0", size = 144439, upload-time = "2025-09-05T15:02:27.511Z" }, +] + +[[package]] +name = "griffe" +version = "1.15.0" +source = { registry = "https://pypi.org/simple" } +resolution-markers = [ + "python_full_version >= '3.12'", + "python_full_version == '3.11.*'", + "python_full_version == '3.10.*'", +] +dependencies = [ + { name = "colorama", marker = "python_full_version >= '3.10'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/0d/0c/3a471b6e31951dce2360477420d0a8d1e00dea6cf33b70f3e8c3ab6e28e1/griffe-1.15.0.tar.gz", hash = "sha256:7726e3afd6f298fbc3696e67958803e7ac843c1cfe59734b6251a40cdbfb5eea", size = 424112, upload-time = "2025-11-10T15:03:15.52Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/9c/83/3b1d03d36f224edded98e9affd0467630fc09d766c0e56fb1498cbb04a9b/griffe-1.15.0-py3-none-any.whl", hash = "sha256:6f6762661949411031f5fcda9593f586e6ce8340f0ba88921a0f2ef7a81eb9a3", size = 150705, upload-time = "2025-11-10T15:03:13.549Z" }, +] + +[[package]] +name = "h11" +version = "0.16.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/01/ee/02a2c011bdab74c6fb3c75474d40b3052059d95df7e73351460c8588d963/h11-0.16.0.tar.gz", hash = "sha256:4e35b956cf45792e4caa5885e69fba00bdbc6ffafbfa020300e549b208ee5ff1", size = 101250, upload-time = "2025-04-24T03:35:25.427Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/04/4b/29cac41a4d98d144bf5f6d33995617b185d14b22401f75ca86f384e87ff1/h11-0.16.0-py3-none-any.whl", hash = "sha256:63cf8bbe7522de3bf65932fda1d9c2772064ffb3dae62d55932da54b31cb6c86", size = 37515, upload-time = "2025-04-24T03:35:24.344Z" }, +] + +[[package]] +name = "httpcore" +version = "1.0.9" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "certifi" }, + { name = "h11" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/06/94/82699a10bca87a5556c9c59b5963f2d039dbd239f25bc2a63907a05a14cb/httpcore-1.0.9.tar.gz", hash = "sha256:6e34463af53fd2ab5d807f399a9b45ea31c3dfa2276f15a2c3f00afff6e176e8", size = 85484, upload-time = "2025-04-24T22:06:22.219Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/7e/f5/f66802a942d491edb555dd61e3a9961140fd64c90bce1eafd741609d334d/httpcore-1.0.9-py3-none-any.whl", hash = "sha256:2d400746a40668fc9dec9810239072b40b4484b640a8c38fd654a024c7a1bf55", size = 78784, upload-time = "2025-04-24T22:06:20.566Z" }, +] + +[[package]] +name = "httpx" +version = "0.28.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "anyio" }, + { name = "certifi" }, + { name = "httpcore" }, + { name = "idna" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/b1/df/48c586a5fe32a0f01324ee087459e112ebb7224f646c0b5023f5e79e9956/httpx-0.28.1.tar.gz", hash = "sha256:75e98c5f16b0f35b567856f597f06ff2270a374470a5c2392242528e3e3e42fc", size = 141406, upload-time = "2024-12-06T15:37:23.222Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/2a/39/e50c7c3a983047577ee07d2a9e53faf5a69493943ec3f6a384bdc792deb2/httpx-0.28.1-py3-none-any.whl", hash = "sha256:d909fcccc110f8c7faf814ca82a9a4d816bc5a6dbfea25d6591d6985b8ba59ad", size = 73517, upload-time = "2024-12-06T15:37:21.509Z" }, +] + +[[package]] +name = "httpx-sse" +version = "0.4.3" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/0f/4c/751061ffa58615a32c31b2d82e8482be8dd4a89154f003147acee90f2be9/httpx_sse-0.4.3.tar.gz", hash = "sha256:9b1ed0127459a66014aec3c56bebd93da3c1bc8bb6618c8082039a44889a755d", size = 15943, upload-time = "2025-10-10T21:48:22.271Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/d2/fd/6668e5aec43ab844de6fc74927e155a3b37bf40d7c3790e49fc0406b6578/httpx_sse-0.4.3-py3-none-any.whl", hash = "sha256:0ac1c9fe3c0afad2e0ebb25a934a59f4c7823b60792691f779fad2c5568830fc", size = 8960, upload-time = "2025-10-10T21:48:21.158Z" }, +] + +[[package]] +name = "idna" +version = "3.11" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/6f/6d/0703ccc57f3a7233505399edb88de3cbd678da106337b9fcde432b65ed60/idna-3.11.tar.gz", hash = "sha256:795dafcc9c04ed0c1fb032c2aa73654d8e8c5023a7df64a53f39190ada629902", size = 194582, upload-time = "2025-10-12T14:55:20.501Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/0e/61/66938bbb5fc52dbdf84594873d5b51fb1f7c7794e9c0f5bd885f30bc507b/idna-3.11-py3-none-any.whl", hash = "sha256:771a87f49d9defaf64091e6e6fe9c18d4833f140bd19464795bc32d966ca37ea", size = 71008, upload-time = "2025-10-12T14:55:18.883Z" }, +] + +[[package]] +name = "importlib-metadata" +version = "8.7.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "zipp" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/f3/49/3b30cad09e7771a4982d9975a8cbf64f00d4a1ececb53297f1d9a7be1b10/importlib_metadata-8.7.1.tar.gz", hash = "sha256:49fef1ae6440c182052f407c8d34a68f72efc36db9ca90dc0113398f2fdde8bb", size = 57107, upload-time = "2025-12-21T10:00:19.278Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/fa/5e/f8e9a1d23b9c20a551a8a02ea3637b4642e22c2626e3a13a9a29cdea99eb/importlib_metadata-8.7.1-py3-none-any.whl", hash = "sha256:5a1f80bf1daa489495071efbb095d75a634cf28a8bc299581244063b53176151", size = 27865, upload-time = "2025-12-21T10:00:18.329Z" }, +] + +[[package]] +name = "iniconfig" +version = "2.1.0" +source = { registry = "https://pypi.org/simple" } +resolution-markers = [ + "python_full_version < '3.10'", +] +sdist = { url = "https://files.pythonhosted.org/packages/f2/97/ebf4da567aa6827c909642694d71c9fcf53e5b504f2d96afea02718862f3/iniconfig-2.1.0.tar.gz", hash = "sha256:3abbd2e30b36733fee78f9c7f7308f2d0050e88f0087fd25c2645f63c773e1c7", size = 4793, upload-time = "2025-03-19T20:09:59.721Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/2c/e1/e6716421ea10d38022b952c159d5161ca1193197fb744506875fbb87ea7b/iniconfig-2.1.0-py3-none-any.whl", hash = "sha256:9deba5723312380e77435581c6bf4935c94cbfab9b1ed33ef8d238ea168eb760", size = 6050, upload-time = "2025-03-19T20:10:01.071Z" }, +] + +[[package]] +name = "iniconfig" +version = "2.3.0" +source = { registry = "https://pypi.org/simple" } +resolution-markers = [ + "python_full_version >= '3.12'", + "python_full_version == '3.11.*'", + "python_full_version == '3.10.*'", +] +sdist = { url = "https://files.pythonhosted.org/packages/72/34/14ca021ce8e5dfedc35312d08ba8bf51fdd999c576889fc2c24cb97f4f10/iniconfig-2.3.0.tar.gz", hash = "sha256:c76315c77db068650d49c5b56314774a7804df16fee4402c1f19d6d15d8c4730", size = 20503, upload-time = "2025-10-18T21:55:43.219Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/cb/b1/3846dd7f199d53cb17f49cba7e651e9ce294d8497c8c150530ed11865bb8/iniconfig-2.3.0-py3-none-any.whl", hash = "sha256:f631c04d2c48c52b84d0d0549c99ff3859c98df65b3101406327ecc7d53fbf12", size = 7484, upload-time = "2025-10-18T21:55:41.639Z" }, +] + +[[package]] +name = "invoke" +version = "2.2.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/de/bd/b461d3424a24c80490313fd77feeb666ca4f6a28c7e72713e3d9095719b4/invoke-2.2.1.tar.gz", hash = "sha256:515bf49b4a48932b79b024590348da22f39c4942dff991ad1fb8b8baea1be707", size = 304762, upload-time = "2025-10-11T00:36:35.172Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/32/4b/b99e37f88336009971405cbb7630610322ed6fbfa31e1d7ab3fbf3049a2d/invoke-2.2.1-py3-none-any.whl", hash = "sha256:2413bc441b376e5cd3f55bb5d364f973ad8bdd7bf87e53c79de3c11bf3feecc8", size = 160287, upload-time = "2025-10-11T00:36:33.703Z" }, +] + +[[package]] +name = "isort" +version = "5.13.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/87/f9/c1eb8635a24e87ade2efce21e3ce8cd6b8630bb685ddc9cdaca1349b2eb5/isort-5.13.2.tar.gz", hash = "sha256:48fdfcb9face5d58a4f6dde2e72a1fb8dcaf8ab26f95ab49fab84c2ddefb0109", size = 175303, upload-time = "2023-12-13T20:37:26.124Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/d1/b3/8def84f539e7d2289a02f0524b944b15d7c75dab7628bedf1c4f0992029c/isort-5.13.2-py3-none-any.whl", hash = "sha256:8ca5e72a8d85860d5a3fa69b8745237f2939afe12dbf656afbcb47fe72d947a6", size = 92310, upload-time = "2023-12-13T20:37:23.244Z" }, +] + +[[package]] +name = "jsonschema" +version = "4.25.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "attrs", marker = "python_full_version >= '3.10'" }, + { name = "jsonschema-specifications", marker = "python_full_version >= '3.10'" }, + { name = "referencing", marker = "python_full_version >= '3.10'" }, + { name = "rpds-py", marker = "python_full_version >= '3.10'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/74/69/f7185de793a29082a9f3c7728268ffb31cb5095131a9c139a74078e27336/jsonschema-4.25.1.tar.gz", hash = "sha256:e4a9655ce0da0c0b67a085847e00a3a51449e1157f4f75e9fb5aa545e122eb85", size = 357342, upload-time = "2025-08-18T17:03:50.038Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/bf/9c/8c95d856233c1f82500c2450b8c68576b4cf1c871db3afac5c34ff84e6fd/jsonschema-4.25.1-py3-none-any.whl", hash = "sha256:3fba0169e345c7175110351d456342c364814cfcf3b964ba4587f22915230a63", size = 90040, upload-time = "2025-08-18T17:03:48.373Z" }, +] + +[[package]] +name = "jsonschema-specifications" +version = "2025.9.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "referencing", marker = "python_full_version >= '3.10'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/19/74/a633ee74eb36c44aa6d1095e7cc5569bebf04342ee146178e2d36600708b/jsonschema_specifications-2025.9.1.tar.gz", hash = "sha256:b540987f239e745613c7a9176f3edb72b832a4ac465cf02712288397832b5e8d", size = 32855, upload-time = "2025-09-08T01:34:59.186Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/41/45/1a4ed80516f02155c51f51e8cedb3c1902296743db0bbc66608a0db2814f/jsonschema_specifications-2025.9.1-py3-none-any.whl", hash = "sha256:98802fee3a11ee76ecaca44429fda8a41bff98b00a0f2838151b113f210cc6fe", size = 18437, upload-time = "2025-09-08T01:34:57.871Z" }, +] + +[[package]] +name = "mccabe" +version = "0.7.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/e7/ff/0ffefdcac38932a54d2b5eed4e0ba8a408f215002cd178ad1df0f2806ff8/mccabe-0.7.0.tar.gz", hash = "sha256:348e0240c33b60bbdf4e523192ef919f28cb2c3d7d5c7794f74009290f236325", size = 9658, upload-time = "2022-01-24T01:14:51.113Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/27/1a/1f68f9ba0c207934b35b86a8ca3aad8395a3d6dd7921c0686e23853ff5a9/mccabe-0.7.0-py2.py3-none-any.whl", hash = "sha256:6c2d30ab6be0e4a46919781807b4f0d834ebdd6c6e3dca0bda5a15f863427b6e", size = 7350, upload-time = "2022-01-24T01:14:49.62Z" }, +] + +[[package]] +name = "mcp" +version = "1.25.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "anyio", marker = "python_full_version >= '3.10'" }, + { name = "httpx", marker = "python_full_version >= '3.10'" }, + { name = "httpx-sse", marker = "python_full_version >= '3.10'" }, + { name = "jsonschema", marker = "python_full_version >= '3.10'" }, + { name = "pydantic", marker = "python_full_version >= '3.10'" }, + { name = "pydantic-settings", marker = "python_full_version >= '3.10'" }, + { name = "pyjwt", extra = ["crypto"], marker = "python_full_version >= '3.10'" }, + { name = "python-multipart", marker = "python_full_version >= '3.10'" }, + { name = "pywin32", marker = "python_full_version >= '3.10' and sys_platform == 'win32'" }, + { name = "sse-starlette", marker = "python_full_version >= '3.10'" }, + { name = "starlette", marker = "python_full_version >= '3.10'" }, + { name = "typing-extensions", marker = "python_full_version >= '3.10'" }, + { name = "typing-inspection", marker = "python_full_version >= '3.10'" }, + { name = "uvicorn", marker = "python_full_version >= '3.10' and sys_platform != 'emscripten'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/d5/2d/649d80a0ecf6a1f82632ca44bec21c0461a9d9fc8934d38cb5b319f2db5e/mcp-1.25.0.tar.gz", hash = "sha256:56310361ebf0364e2d438e5b45f7668cbb124e158bb358333cd06e49e83a6802", size = 605387, upload-time = "2025-12-19T10:19:56.985Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/e2/fc/6dc7659c2ae5ddf280477011f4213a74f806862856b796ef08f028e664bf/mcp-1.25.0-py3-none-any.whl", hash = "sha256:b37c38144a666add0862614cc79ec276e97d72aa8ca26d622818d4e278b9721a", size = 233076, upload-time = "2025-12-19T10:19:55.416Z" }, +] + +[[package]] +name = "mistralai" +version = "1.10.0" +source = { editable = "." } +dependencies = [ + { name = "eval-type-backport" }, + { name = "httpx" }, + { name = "invoke" }, + { name = "opentelemetry-api" }, + { name = "opentelemetry-exporter-otlp-proto-http" }, + { name = "opentelemetry-sdk" }, + { name = "opentelemetry-semantic-conventions" }, + { name = "pydantic" }, + { name = "python-dateutil" }, + { name = "pyyaml" }, + { name = "typing-inspection" }, +] + +[package.optional-dependencies] +agents = [ + { name = "authlib" }, + { name = "griffe", version = "1.14.0", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.10'" }, + { name = "griffe", version = "1.15.0", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.10'" }, + { name = "mcp", marker = "python_full_version >= '3.10'" }, +] +gcp = [ + { name = "google-auth" }, + { name = "requests" }, +] + +[package.dev-dependencies] +dev = [ + { name = "authlib" }, + { name = "griffe", version = "1.14.0", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.10'" }, + { name = "griffe", version = "1.15.0", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.10'" }, + { name = "mcp", marker = "python_full_version >= '3.10'" }, + { name = "mypy" }, + { name = "pylint" }, + { name = "pytest" }, + { name = "pytest-asyncio" }, + { name = "types-authlib" }, + { name = "types-python-dateutil" }, + { name = "types-pyyaml" }, +] +lint = [ + { name = "mypy" }, + { name = "pyright" }, + { name = "ruff" }, +] + +[package.metadata] +requires-dist = [ + { name = "authlib", marker = "extra == 'agents'", specifier = ">=1.5.2,<2.0" }, + { name = "eval-type-backport", specifier = ">=0.2.0" }, + { name = "google-auth", marker = "extra == 'gcp'", specifier = ">=2.27.0" }, + { name = "griffe", marker = "extra == 'agents'", specifier = ">=1.7.3,<2.0" }, + { name = "httpx", specifier = ">=0.28.1" }, + { name = "invoke", specifier = ">=2.2.0,<3.0.0" }, + { name = "mcp", marker = "python_full_version >= '3.10' and extra == 'agents'", specifier = ">=1.0,<2.0" }, + { name = "opentelemetry-api", specifier = ">=1.33.1,<2.0.0" }, + { name = "opentelemetry-exporter-otlp-proto-http", specifier = ">=1.37.0,<2.0.0" }, + { name = "opentelemetry-sdk", specifier = ">=1.33.1,<2.0.0" }, + { name = "opentelemetry-semantic-conventions", specifier = ">=0.59b0,<0.60" }, + { name = "pydantic", specifier = ">=2.10.3" }, + { name = "python-dateutil", specifier = ">=2.8.2" }, + { name = "pyyaml", specifier = ">=6.0.2,<7.0.0" }, + { name = "requests", marker = "extra == 'gcp'", specifier = ">=2.32.3" }, + { name = "typing-inspection", specifier = ">=0.4.0" }, +] +provides-extras = ["gcp", "agents"] + +[package.metadata.requires-dev] +dev = [ + { name = "authlib", specifier = ">=1.5.2,<2" }, + { name = "griffe", specifier = ">=1.7.3,<2" }, + { name = "mcp", marker = "python_full_version >= '3.10'", specifier = ">=1.0,<2" }, + { name = "mypy", specifier = "==1.15.0" }, + { name = "pylint", specifier = "==3.2.3" }, + { name = "pytest", specifier = ">=8.2.2,<9" }, + { name = "pytest-asyncio", specifier = ">=0.23.7,<0.24" }, + { name = "types-authlib", specifier = ">=1.5.0.20250516,<2" }, + { name = "types-python-dateutil", specifier = ">=2.9.0.20240316,<3" }, + { name = "types-pyyaml", specifier = ">=6.0.12.20250516,<7" }, +] +lint = [ + { name = "mypy", specifier = "==1.15.0" }, + { name = "pyright", specifier = ">=1.1.401,<2" }, + { name = "ruff", specifier = ">=0.11.10,<0.12" }, +] + +[[package]] +name = "mypy" +version = "1.15.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "mypy-extensions" }, + { name = "tomli", marker = "python_full_version < '3.11'" }, + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/ce/43/d5e49a86afa64bd3839ea0d5b9c7103487007d728e1293f52525d6d5486a/mypy-1.15.0.tar.gz", hash = "sha256:404534629d51d3efea5c800ee7c42b72a6554d6c400e6a79eafe15d11341fd43", size = 3239717, upload-time = "2025-02-05T03:50:34.655Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/68/f8/65a7ce8d0e09b6329ad0c8d40330d100ea343bd4dd04c4f8ae26462d0a17/mypy-1.15.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:979e4e1a006511dacf628e36fadfecbcc0160a8af6ca7dad2f5025529e082c13", size = 10738433, upload-time = "2025-02-05T03:49:29.145Z" }, + { url = "https://files.pythonhosted.org/packages/b4/95/9c0ecb8eacfe048583706249439ff52105b3f552ea9c4024166c03224270/mypy-1.15.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c4bb0e1bd29f7d34efcccd71cf733580191e9a264a2202b0239da95984c5b559", size = 9861472, upload-time = "2025-02-05T03:49:16.986Z" }, + { url = "https://files.pythonhosted.org/packages/84/09/9ec95e982e282e20c0d5407bc65031dfd0f0f8ecc66b69538296e06fcbee/mypy-1.15.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:be68172e9fd9ad8fb876c6389f16d1c1b5f100ffa779f77b1fb2176fcc9ab95b", size = 11611424, upload-time = "2025-02-05T03:49:46.908Z" }, + { url = "https://files.pythonhosted.org/packages/78/13/f7d14e55865036a1e6a0a69580c240f43bc1f37407fe9235c0d4ef25ffb0/mypy-1.15.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c7be1e46525adfa0d97681432ee9fcd61a3964c2446795714699a998d193f1a3", size = 12365450, upload-time = "2025-02-05T03:50:05.89Z" }, + { url = "https://files.pythonhosted.org/packages/48/e1/301a73852d40c241e915ac6d7bcd7fedd47d519246db2d7b86b9d7e7a0cb/mypy-1.15.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:2e2c2e6d3593f6451b18588848e66260ff62ccca522dd231cd4dd59b0160668b", size = 12551765, upload-time = "2025-02-05T03:49:33.56Z" }, + { url = "https://files.pythonhosted.org/packages/77/ba/c37bc323ae5fe7f3f15a28e06ab012cd0b7552886118943e90b15af31195/mypy-1.15.0-cp310-cp310-win_amd64.whl", hash = "sha256:6983aae8b2f653e098edb77f893f7b6aca69f6cffb19b2cc7443f23cce5f4828", size = 9274701, upload-time = "2025-02-05T03:49:38.981Z" }, + { url = "https://files.pythonhosted.org/packages/03/bc/f6339726c627bd7ca1ce0fa56c9ae2d0144604a319e0e339bdadafbbb599/mypy-1.15.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:2922d42e16d6de288022e5ca321cd0618b238cfc5570e0263e5ba0a77dbef56f", size = 10662338, upload-time = "2025-02-05T03:50:17.287Z" }, + { url = "https://files.pythonhosted.org/packages/e2/90/8dcf506ca1a09b0d17555cc00cd69aee402c203911410136cd716559efe7/mypy-1.15.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:2ee2d57e01a7c35de00f4634ba1bbf015185b219e4dc5909e281016df43f5ee5", size = 9787540, upload-time = "2025-02-05T03:49:51.21Z" }, + { url = "https://files.pythonhosted.org/packages/05/05/a10f9479681e5da09ef2f9426f650d7b550d4bafbef683b69aad1ba87457/mypy-1.15.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:973500e0774b85d9689715feeffcc980193086551110fd678ebe1f4342fb7c5e", size = 11538051, upload-time = "2025-02-05T03:50:20.885Z" }, + { url = "https://files.pythonhosted.org/packages/e9/9a/1f7d18b30edd57441a6411fcbc0c6869448d1a4bacbaee60656ac0fc29c8/mypy-1.15.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:5a95fb17c13e29d2d5195869262f8125dfdb5c134dc8d9a9d0aecf7525b10c2c", size = 12286751, upload-time = "2025-02-05T03:49:42.408Z" }, + { url = "https://files.pythonhosted.org/packages/72/af/19ff499b6f1dafcaf56f9881f7a965ac2f474f69f6f618b5175b044299f5/mypy-1.15.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:1905f494bfd7d85a23a88c5d97840888a7bd516545fc5aaedff0267e0bb54e2f", size = 12421783, upload-time = "2025-02-05T03:49:07.707Z" }, + { url = "https://files.pythonhosted.org/packages/96/39/11b57431a1f686c1aed54bf794870efe0f6aeca11aca281a0bd87a5ad42c/mypy-1.15.0-cp311-cp311-win_amd64.whl", hash = "sha256:c9817fa23833ff189db061e6d2eff49b2f3b6ed9856b4a0a73046e41932d744f", size = 9265618, upload-time = "2025-02-05T03:49:54.581Z" }, + { url = "https://files.pythonhosted.org/packages/98/3a/03c74331c5eb8bd025734e04c9840532226775c47a2c39b56a0c8d4f128d/mypy-1.15.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:aea39e0583d05124836ea645f412e88a5c7d0fd77a6d694b60d9b6b2d9f184fd", size = 10793981, upload-time = "2025-02-05T03:50:28.25Z" }, + { url = "https://files.pythonhosted.org/packages/f0/1a/41759b18f2cfd568848a37c89030aeb03534411eef981df621d8fad08a1d/mypy-1.15.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:2f2147ab812b75e5b5499b01ade1f4a81489a147c01585cda36019102538615f", size = 9749175, upload-time = "2025-02-05T03:50:13.411Z" }, + { url = "https://files.pythonhosted.org/packages/12/7e/873481abf1ef112c582db832740f4c11b2bfa510e829d6da29b0ab8c3f9c/mypy-1.15.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ce436f4c6d218a070048ed6a44c0bbb10cd2cc5e272b29e7845f6a2f57ee4464", size = 11455675, upload-time = "2025-02-05T03:50:31.421Z" }, + { url = "https://files.pythonhosted.org/packages/b3/d0/92ae4cde706923a2d3f2d6c39629134063ff64b9dedca9c1388363da072d/mypy-1.15.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:8023ff13985661b50a5928fc7a5ca15f3d1affb41e5f0a9952cb68ef090b31ee", size = 12410020, upload-time = "2025-02-05T03:48:48.705Z" }, + { url = "https://files.pythonhosted.org/packages/46/8b/df49974b337cce35f828ba6fda228152d6db45fed4c86ba56ffe442434fd/mypy-1.15.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:1124a18bc11a6a62887e3e137f37f53fbae476dc36c185d549d4f837a2a6a14e", size = 12498582, upload-time = "2025-02-05T03:49:03.628Z" }, + { url = "https://files.pythonhosted.org/packages/13/50/da5203fcf6c53044a0b699939f31075c45ae8a4cadf538a9069b165c1050/mypy-1.15.0-cp312-cp312-win_amd64.whl", hash = "sha256:171a9ca9a40cd1843abeca0e405bc1940cd9b305eaeea2dda769ba096932bb22", size = 9366614, upload-time = "2025-02-05T03:50:00.313Z" }, + { url = "https://files.pythonhosted.org/packages/6a/9b/fd2e05d6ffff24d912f150b87db9e364fa8282045c875654ce7e32fffa66/mypy-1.15.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:93faf3fdb04768d44bf28693293f3904bbb555d076b781ad2530214ee53e3445", size = 10788592, upload-time = "2025-02-05T03:48:55.789Z" }, + { url = "https://files.pythonhosted.org/packages/74/37/b246d711c28a03ead1fd906bbc7106659aed7c089d55fe40dd58db812628/mypy-1.15.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:811aeccadfb730024c5d3e326b2fbe9249bb7413553f15499a4050f7c30e801d", size = 9753611, upload-time = "2025-02-05T03:48:44.581Z" }, + { url = "https://files.pythonhosted.org/packages/a6/ac/395808a92e10cfdac8003c3de9a2ab6dc7cde6c0d2a4df3df1b815ffd067/mypy-1.15.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:98b7b9b9aedb65fe628c62a6dc57f6d5088ef2dfca37903a7d9ee374d03acca5", size = 11438443, upload-time = "2025-02-05T03:49:25.514Z" }, + { url = "https://files.pythonhosted.org/packages/d2/8b/801aa06445d2de3895f59e476f38f3f8d610ef5d6908245f07d002676cbf/mypy-1.15.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c43a7682e24b4f576d93072216bf56eeff70d9140241f9edec0c104d0c515036", size = 12402541, upload-time = "2025-02-05T03:49:57.623Z" }, + { url = "https://files.pythonhosted.org/packages/c7/67/5a4268782eb77344cc613a4cf23540928e41f018a9a1ec4c6882baf20ab8/mypy-1.15.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:baefc32840a9f00babd83251560e0ae1573e2f9d1b067719479bfb0e987c6357", size = 12494348, upload-time = "2025-02-05T03:48:52.361Z" }, + { url = "https://files.pythonhosted.org/packages/83/3e/57bb447f7bbbfaabf1712d96f9df142624a386d98fb026a761532526057e/mypy-1.15.0-cp313-cp313-win_amd64.whl", hash = "sha256:b9378e2c00146c44793c98b8d5a61039a048e31f429fb0eb546d93f4b000bedf", size = 9373648, upload-time = "2025-02-05T03:49:11.395Z" }, + { url = "https://files.pythonhosted.org/packages/5a/fa/79cf41a55b682794abe71372151dbbf856e3008f6767057229e6649d294a/mypy-1.15.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:e601a7fa172c2131bff456bb3ee08a88360760d0d2f8cbd7a75a65497e2df078", size = 10737129, upload-time = "2025-02-05T03:50:24.509Z" }, + { url = "https://files.pythonhosted.org/packages/d3/33/dd8feb2597d648de29e3da0a8bf4e1afbda472964d2a4a0052203a6f3594/mypy-1.15.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:712e962a6357634fef20412699a3655c610110e01cdaa6180acec7fc9f8513ba", size = 9856335, upload-time = "2025-02-05T03:49:36.398Z" }, + { url = "https://files.pythonhosted.org/packages/e4/b5/74508959c1b06b96674b364ffeb7ae5802646b32929b7701fc6b18447592/mypy-1.15.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:f95579473af29ab73a10bada2f9722856792a36ec5af5399b653aa28360290a5", size = 11611935, upload-time = "2025-02-05T03:49:14.154Z" }, + { url = "https://files.pythonhosted.org/packages/6c/53/da61b9d9973efcd6507183fdad96606996191657fe79701b2c818714d573/mypy-1.15.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:8f8722560a14cde92fdb1e31597760dc35f9f5524cce17836c0d22841830fd5b", size = 12365827, upload-time = "2025-02-05T03:48:59.458Z" }, + { url = "https://files.pythonhosted.org/packages/c1/72/965bd9ee89540c79a25778cc080c7e6ef40aa1eeac4d52cec7eae6eb5228/mypy-1.15.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:1fbb8da62dc352133d7d7ca90ed2fb0e9d42bb1a32724c287d3c76c58cbaa9c2", size = 12541924, upload-time = "2025-02-05T03:50:03.12Z" }, + { url = "https://files.pythonhosted.org/packages/46/d0/f41645c2eb263e6c77ada7d76f894c580c9ddb20d77f0c24d34273a4dab2/mypy-1.15.0-cp39-cp39-win_amd64.whl", hash = "sha256:d10d994b41fb3497719bbf866f227b3489048ea4bbbb5015357db306249f7980", size = 9271176, upload-time = "2025-02-05T03:50:10.86Z" }, + { url = "https://files.pythonhosted.org/packages/09/4e/a7d65c7322c510de2c409ff3828b03354a7c43f5a8ed458a7a131b41c7b9/mypy-1.15.0-py3-none-any.whl", hash = "sha256:5469affef548bd1895d86d3bf10ce2b44e33d86923c29e4d675b3e323437ea3e", size = 2221777, upload-time = "2025-02-05T03:50:08.348Z" }, +] + +[[package]] +name = "mypy-extensions" +version = "1.1.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/a2/6e/371856a3fb9d31ca8dac321cda606860fa4548858c0cc45d9d1d4ca2628b/mypy_extensions-1.1.0.tar.gz", hash = "sha256:52e68efc3284861e772bbcd66823fde5ae21fd2fdb51c62a211403730b916558", size = 6343, upload-time = "2025-04-22T14:54:24.164Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/79/7b/2c79738432f5c924bef5071f933bcc9efd0473bac3b4aa584a6f7c1c8df8/mypy_extensions-1.1.0-py3-none-any.whl", hash = "sha256:1be4cccdb0f2482337c4743e60421de3a356cd97508abadd57d47403e94f5505", size = 4963, upload-time = "2025-04-22T14:54:22.983Z" }, +] + +[[package]] +name = "nodeenv" +version = "1.10.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/24/bf/d1bda4f6168e0b2e9e5958945e01910052158313224ada5ce1fb2e1113b8/nodeenv-1.10.0.tar.gz", hash = "sha256:996c191ad80897d076bdfba80a41994c2b47c68e224c542b48feba42ba00f8bb", size = 55611, upload-time = "2025-12-20T14:08:54.006Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/88/b2/d0896bdcdc8d28a7fc5717c305f1a861c26e18c05047949fb371034d98bd/nodeenv-1.10.0-py2.py3-none-any.whl", hash = "sha256:5bb13e3eed2923615535339b3c620e76779af4cb4c6a90deccc9e36b274d3827", size = 23438, upload-time = "2025-12-20T14:08:52.782Z" }, +] + +[[package]] +name = "opentelemetry-api" +version = "1.38.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "importlib-metadata" }, + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/08/d8/0f354c375628e048bd0570645b310797299754730079853095bf000fba69/opentelemetry_api-1.38.0.tar.gz", hash = "sha256:f4c193b5e8acb0912b06ac5b16321908dd0843d75049c091487322284a3eea12", size = 65242, upload-time = "2025-10-16T08:35:50.25Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ae/a2/d86e01c28300bd41bab8f18afd613676e2bd63515417b77636fc1add426f/opentelemetry_api-1.38.0-py3-none-any.whl", hash = "sha256:2891b0197f47124454ab9f0cf58f3be33faca394457ac3e09daba13ff50aa582", size = 65947, upload-time = "2025-10-16T08:35:30.23Z" }, +] + +[[package]] +name = "opentelemetry-exporter-otlp-proto-common" +version = "1.38.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "opentelemetry-proto" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/19/83/dd4660f2956ff88ed071e9e0e36e830df14b8c5dc06722dbde1841accbe8/opentelemetry_exporter_otlp_proto_common-1.38.0.tar.gz", hash = "sha256:e333278afab4695aa8114eeb7bf4e44e65c6607d54968271a249c180b2cb605c", size = 20431, upload-time = "2025-10-16T08:35:53.285Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/a7/9e/55a41c9601191e8cd8eb626b54ee6827b9c9d4a46d736f32abc80d8039fc/opentelemetry_exporter_otlp_proto_common-1.38.0-py3-none-any.whl", hash = "sha256:03cb76ab213300fe4f4c62b7d8f17d97fcfd21b89f0b5ce38ea156327ddda74a", size = 18359, upload-time = "2025-10-16T08:35:34.099Z" }, +] + +[[package]] +name = "opentelemetry-exporter-otlp-proto-http" +version = "1.38.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "googleapis-common-protos" }, + { name = "opentelemetry-api" }, + { name = "opentelemetry-exporter-otlp-proto-common" }, + { name = "opentelemetry-proto" }, + { name = "opentelemetry-sdk" }, + { name = "requests" }, + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/81/0a/debcdfb029fbd1ccd1563f7c287b89a6f7bef3b2902ade56797bfd020854/opentelemetry_exporter_otlp_proto_http-1.38.0.tar.gz", hash = "sha256:f16bd44baf15cbe07633c5112ffc68229d0edbeac7b37610be0b2def4e21e90b", size = 17282, upload-time = "2025-10-16T08:35:54.422Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/e5/77/154004c99fb9f291f74aa0822a2f5bbf565a72d8126b3a1b63ed8e5f83c7/opentelemetry_exporter_otlp_proto_http-1.38.0-py3-none-any.whl", hash = "sha256:84b937305edfc563f08ec69b9cb2298be8188371217e867c1854d77198d0825b", size = 19579, upload-time = "2025-10-16T08:35:36.269Z" }, +] + +[[package]] +name = "opentelemetry-proto" +version = "1.38.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "protobuf" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/51/14/f0c4f0f6371b9cb7f9fa9ee8918bfd59ac7040c7791f1e6da32a1839780d/opentelemetry_proto-1.38.0.tar.gz", hash = "sha256:88b161e89d9d372ce723da289b7da74c3a8354a8e5359992be813942969ed468", size = 46152, upload-time = "2025-10-16T08:36:01.612Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/b6/6a/82b68b14efca5150b2632f3692d627afa76b77378c4999f2648979409528/opentelemetry_proto-1.38.0-py3-none-any.whl", hash = "sha256:b6ebe54d3217c42e45462e2a1ae28c3e2bf2ec5a5645236a490f55f45f1a0a18", size = 72535, upload-time = "2025-10-16T08:35:45.749Z" }, +] + +[[package]] +name = "opentelemetry-sdk" +version = "1.38.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "opentelemetry-api" }, + { name = "opentelemetry-semantic-conventions" }, + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/85/cb/f0eee1445161faf4c9af3ba7b848cc22a50a3d3e2515051ad8628c35ff80/opentelemetry_sdk-1.38.0.tar.gz", hash = "sha256:93df5d4d871ed09cb4272305be4d996236eedb232253e3ab864c8620f051cebe", size = 171942, upload-time = "2025-10-16T08:36:02.257Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/2f/2e/e93777a95d7d9c40d270a371392b6d6f1ff170c2a3cb32d6176741b5b723/opentelemetry_sdk-1.38.0-py3-none-any.whl", hash = "sha256:1c66af6564ecc1553d72d811a01df063ff097cdc82ce188da9951f93b8d10f6b", size = 132349, upload-time = "2025-10-16T08:35:46.995Z" }, +] + +[[package]] +name = "opentelemetry-semantic-conventions" +version = "0.59b0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "opentelemetry-api" }, + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/40/bc/8b9ad3802cd8ac6583a4eb7de7e5d7db004e89cb7efe7008f9c8a537ee75/opentelemetry_semantic_conventions-0.59b0.tar.gz", hash = "sha256:7a6db3f30d70202d5bf9fa4b69bc866ca6a30437287de6c510fb594878aed6b0", size = 129861, upload-time = "2025-10-16T08:36:03.346Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/24/7d/c88d7b15ba8fe5c6b8f93be50fc11795e9fc05386c44afaf6b76fe191f9b/opentelemetry_semantic_conventions-0.59b0-py3-none-any.whl", hash = "sha256:35d3b8833ef97d614136e253c1da9342b4c3c083bbaf29ce31d572a1c3825eed", size = 207954, upload-time = "2025-10-16T08:35:48.054Z" }, +] + +[[package]] +name = "packaging" +version = "25.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/a1/d4/1fc4078c65507b51b96ca8f8c3ba19e6a61c8253c72794544580a7b6c24d/packaging-25.0.tar.gz", hash = "sha256:d443872c98d677bf60f6a1f2f8c1cb748e8fe762d2bf9d3148b5599295b0fc4f", size = 165727, upload-time = "2025-04-19T11:48:59.673Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/20/12/38679034af332785aac8774540895e234f4d07f7545804097de4b666afd8/packaging-25.0-py3-none-any.whl", hash = "sha256:29572ef2b1f17581046b3a2227d5c611fb25ec70ca1ba8554b24b0e69331a484", size = 66469, upload-time = "2025-04-19T11:48:57.875Z" }, +] + +[[package]] +name = "platformdirs" +version = "4.4.0" +source = { registry = "https://pypi.org/simple" } +resolution-markers = [ + "python_full_version < '3.10'", +] +sdist = { url = "https://files.pythonhosted.org/packages/23/e8/21db9c9987b0e728855bd57bff6984f67952bea55d6f75e055c46b5383e8/platformdirs-4.4.0.tar.gz", hash = "sha256:ca753cf4d81dc309bc67b0ea38fd15dc97bc30ce419a7f58d13eb3bf14c4febf", size = 21634, upload-time = "2025-08-26T14:32:04.268Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/40/4b/2028861e724d3bd36227adfa20d3fd24c3fc6d52032f4a93c133be5d17ce/platformdirs-4.4.0-py3-none-any.whl", hash = "sha256:abd01743f24e5287cd7a5db3752faf1a2d65353f38ec26d98e25a6db65958c85", size = 18654, upload-time = "2025-08-26T14:32:02.735Z" }, +] + +[[package]] +name = "platformdirs" +version = "4.5.1" +source = { registry = "https://pypi.org/simple" } +resolution-markers = [ + "python_full_version >= '3.12'", + "python_full_version == '3.11.*'", + "python_full_version == '3.10.*'", +] +sdist = { url = "https://files.pythonhosted.org/packages/cf/86/0248f086a84f01b37aaec0fa567b397df1a119f73c16f6c7a9aac73ea309/platformdirs-4.5.1.tar.gz", hash = "sha256:61d5cdcc6065745cdd94f0f878977f8de9437be93de97c1c12f853c9c0cdcbda", size = 21715, upload-time = "2025-12-05T13:52:58.638Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/cb/28/3bfe2fa5a7b9c46fe7e13c97bda14c895fb10fa2ebf1d0abb90e0cea7ee1/platformdirs-4.5.1-py3-none-any.whl", hash = "sha256:d03afa3963c806a9bed9d5125c8f4cb2fdaf74a55ab60e5d59b3fde758104d31", size = 18731, upload-time = "2025-12-05T13:52:56.823Z" }, +] + +[[package]] +name = "pluggy" +version = "1.6.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/f9/e2/3e91f31a7d2b083fe6ef3fa267035b518369d9511ffab804f839851d2779/pluggy-1.6.0.tar.gz", hash = "sha256:7dcc130b76258d33b90f61b658791dede3486c3e6bfb003ee5c9bfb396dd22f3", size = 69412, upload-time = "2025-05-15T12:30:07.975Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/54/20/4d324d65cc6d9205fabedc306948156824eb9f0ee1633355a8f7ec5c66bf/pluggy-1.6.0-py3-none-any.whl", hash = "sha256:e920276dd6813095e9377c0bc5566d94c932c33b27a3e3945d8389c374dd4746", size = 20538, upload-time = "2025-05-15T12:30:06.134Z" }, +] + +[[package]] +name = "protobuf" +version = "6.33.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/34/44/e49ecff446afeec9d1a66d6bbf9adc21e3c7cea7803a920ca3773379d4f6/protobuf-6.33.2.tar.gz", hash = "sha256:56dc370c91fbb8ac85bc13582c9e373569668a290aa2e66a590c2a0d35ddb9e4", size = 444296, upload-time = "2025-12-06T00:17:53.311Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/bc/91/1e3a34881a88697a7354ffd177e8746e97a722e5e8db101544b47e84afb1/protobuf-6.33.2-cp310-abi3-win32.whl", hash = "sha256:87eb388bd2d0f78febd8f4c8779c79247b26a5befad525008e49a6955787ff3d", size = 425603, upload-time = "2025-12-06T00:17:41.114Z" }, + { url = "https://files.pythonhosted.org/packages/64/20/4d50191997e917ae13ad0a235c8b42d8c1ab9c3e6fd455ca16d416944355/protobuf-6.33.2-cp310-abi3-win_amd64.whl", hash = "sha256:fc2a0e8b05b180e5fc0dd1559fe8ebdae21a27e81ac77728fb6c42b12c7419b4", size = 436930, upload-time = "2025-12-06T00:17:43.278Z" }, + { url = "https://files.pythonhosted.org/packages/b2/ca/7e485da88ba45c920fb3f50ae78de29ab925d9e54ef0de678306abfbb497/protobuf-6.33.2-cp39-abi3-macosx_10_9_universal2.whl", hash = "sha256:d9b19771ca75935b3a4422957bc518b0cecb978b31d1dd12037b088f6bcc0e43", size = 427621, upload-time = "2025-12-06T00:17:44.445Z" }, + { url = "https://files.pythonhosted.org/packages/7d/4f/f743761e41d3b2b2566748eb76bbff2b43e14d5fcab694f494a16458b05f/protobuf-6.33.2-cp39-abi3-manylinux2014_aarch64.whl", hash = "sha256:b5d3b5625192214066d99b2b605f5783483575656784de223f00a8d00754fc0e", size = 324460, upload-time = "2025-12-06T00:17:45.678Z" }, + { url = "https://files.pythonhosted.org/packages/b1/fa/26468d00a92824020f6f2090d827078c09c9c587e34cbfd2d0c7911221f8/protobuf-6.33.2-cp39-abi3-manylinux2014_s390x.whl", hash = "sha256:8cd7640aee0b7828b6d03ae518b5b4806fdfc1afe8de82f79c3454f8aef29872", size = 339168, upload-time = "2025-12-06T00:17:46.813Z" }, + { url = "https://files.pythonhosted.org/packages/56/13/333b8f421738f149d4fe5e49553bc2a2ab75235486259f689b4b91f96cec/protobuf-6.33.2-cp39-abi3-manylinux2014_x86_64.whl", hash = "sha256:1f8017c48c07ec5859106533b682260ba3d7c5567b1ca1f24297ce03384d1b4f", size = 323270, upload-time = "2025-12-06T00:17:48.253Z" }, + { url = "https://files.pythonhosted.org/packages/87/85/5c1115e68fd34e8ada6fa75974b4c778a298a3c7170575b49efc1eb99dd2/protobuf-6.33.2-cp39-cp39-win32.whl", hash = "sha256:7109dcc38a680d033ffb8bf896727423528db9163be1b6a02d6a49606dcadbfe", size = 425692, upload-time = "2025-12-06T00:17:49.62Z" }, + { url = "https://files.pythonhosted.org/packages/c5/74/18d9de7fd3c41a8b4808d6268515b320abae003423da1b1319f71bdf0779/protobuf-6.33.2-cp39-cp39-win_amd64.whl", hash = "sha256:2981c58f582f44b6b13173e12bb8656711189c2a70250845f264b877f00b1913", size = 436932, upload-time = "2025-12-06T00:17:51.098Z" }, + { url = "https://files.pythonhosted.org/packages/0e/15/4f02896cc3df04fc465010a4c6a0cd89810f54617a32a70ef531ed75d61c/protobuf-6.33.2-py3-none-any.whl", hash = "sha256:7636aad9bb01768870266de5dc009de2d1b936771b38a793f73cbbf279c91c5c", size = 170501, upload-time = "2025-12-06T00:17:52.211Z" }, +] + +[[package]] +name = "pyasn1" +version = "0.6.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/ba/e9/01f1a64245b89f039897cb0130016d79f77d52669aae6ee7b159a6c4c018/pyasn1-0.6.1.tar.gz", hash = "sha256:6f580d2bdd84365380830acf45550f2511469f673cb4a5ae3857a3170128b034", size = 145322, upload-time = "2024-09-10T22:41:42.55Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c8/f1/d6a797abb14f6283c0ddff96bbdd46937f64122b8c925cab503dd37f8214/pyasn1-0.6.1-py3-none-any.whl", hash = "sha256:0d632f46f2ba09143da3a8afe9e33fb6f92fa2320ab7e886e2d0f7672af84629", size = 83135, upload-time = "2024-09-11T16:00:36.122Z" }, +] + +[[package]] +name = "pyasn1-modules" +version = "0.4.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "pyasn1" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/e9/e6/78ebbb10a8c8e4b61a59249394a4a594c1a7af95593dc933a349c8d00964/pyasn1_modules-0.4.2.tar.gz", hash = "sha256:677091de870a80aae844b1ca6134f54652fa2c8c5a52aa396440ac3106e941e6", size = 307892, upload-time = "2025-03-28T02:41:22.17Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/47/8d/d529b5d697919ba8c11ad626e835d4039be708a35b0d22de83a269a6682c/pyasn1_modules-0.4.2-py3-none-any.whl", hash = "sha256:29253a9207ce32b64c3ac6600edc75368f98473906e8fd1043bd6b5b1de2c14a", size = 181259, upload-time = "2025-03-28T02:41:19.028Z" }, +] + +[[package]] +name = "pycparser" +version = "2.23" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/fe/cf/d2d3b9f5699fb1e4615c8e32ff220203e43b248e1dfcc6736ad9057731ca/pycparser-2.23.tar.gz", hash = "sha256:78816d4f24add8f10a06d6f05b4d424ad9e96cfebf68a4ddc99c65c0720d00c2", size = 173734, upload-time = "2025-09-09T13:23:47.91Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/a0/e3/59cd50310fc9b59512193629e1984c1f95e5c8ae6e5d8c69532ccc65a7fe/pycparser-2.23-py3-none-any.whl", hash = "sha256:e5c6e8d3fbad53479cab09ac03729e0a9faf2bee3db8208a550daf5af81a5934", size = 118140, upload-time = "2025-09-09T13:23:46.651Z" }, +] + +[[package]] +name = "pydantic" +version = "2.12.5" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "annotated-types" }, + { name = "pydantic-core" }, + { name = "typing-extensions" }, + { name = "typing-inspection" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/69/44/36f1a6e523abc58ae5f928898e4aca2e0ea509b5aa6f6f392a5d882be928/pydantic-2.12.5.tar.gz", hash = "sha256:4d351024c75c0f085a9febbb665ce8c0c6ec5d30e903bdb6394b7ede26aebb49", size = 821591, upload-time = "2025-11-26T15:11:46.471Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/5a/87/b70ad306ebb6f9b585f114d0ac2137d792b48be34d732d60e597c2f8465a/pydantic-2.12.5-py3-none-any.whl", hash = "sha256:e561593fccf61e8a20fc46dfc2dfe075b8be7d0188df33f221ad1f0139180f9d", size = 463580, upload-time = "2025-11-26T15:11:44.605Z" }, +] + +[[package]] +name = "pydantic-core" +version = "2.41.5" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/71/70/23b021c950c2addd24ec408e9ab05d59b035b39d97cdc1130e1bce647bb6/pydantic_core-2.41.5.tar.gz", hash = "sha256:08daa51ea16ad373ffd5e7606252cc32f07bc72b28284b6bc9c6df804816476e", size = 460952, upload-time = "2025-11-04T13:43:49.098Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c6/90/32c9941e728d564b411d574d8ee0cf09b12ec978cb22b294995bae5549a5/pydantic_core-2.41.5-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:77b63866ca88d804225eaa4af3e664c5faf3568cea95360d21f4725ab6e07146", size = 2107298, upload-time = "2025-11-04T13:39:04.116Z" }, + { url = "https://files.pythonhosted.org/packages/fb/a8/61c96a77fe28993d9a6fb0f4127e05430a267b235a124545d79fea46dd65/pydantic_core-2.41.5-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:dfa8a0c812ac681395907e71e1274819dec685fec28273a28905df579ef137e2", size = 1901475, upload-time = "2025-11-04T13:39:06.055Z" }, + { url = "https://files.pythonhosted.org/packages/5d/b6/338abf60225acc18cdc08b4faef592d0310923d19a87fba1faf05af5346e/pydantic_core-2.41.5-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5921a4d3ca3aee735d9fd163808f5e8dd6c6972101e4adbda9a4667908849b97", size = 1918815, upload-time = "2025-11-04T13:39:10.41Z" }, + { url = "https://files.pythonhosted.org/packages/d1/1c/2ed0433e682983d8e8cba9c8d8ef274d4791ec6a6f24c58935b90e780e0a/pydantic_core-2.41.5-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:e25c479382d26a2a41b7ebea1043564a937db462816ea07afa8a44c0866d52f9", size = 2065567, upload-time = "2025-11-04T13:39:12.244Z" }, + { url = "https://files.pythonhosted.org/packages/b3/24/cf84974ee7d6eae06b9e63289b7b8f6549d416b5c199ca2d7ce13bbcf619/pydantic_core-2.41.5-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f547144f2966e1e16ae626d8ce72b4cfa0caedc7fa28052001c94fb2fcaa1c52", size = 2230442, upload-time = "2025-11-04T13:39:13.962Z" }, + { url = "https://files.pythonhosted.org/packages/fd/21/4e287865504b3edc0136c89c9c09431be326168b1eb7841911cbc877a995/pydantic_core-2.41.5-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:6f52298fbd394f9ed112d56f3d11aabd0d5bd27beb3084cc3d8ad069483b8941", size = 2350956, upload-time = "2025-11-04T13:39:15.889Z" }, + { url = "https://files.pythonhosted.org/packages/a8/76/7727ef2ffa4b62fcab916686a68a0426b9b790139720e1934e8ba797e238/pydantic_core-2.41.5-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:100baa204bb412b74fe285fb0f3a385256dad1d1879f0a5cb1499ed2e83d132a", size = 2068253, upload-time = "2025-11-04T13:39:17.403Z" }, + { url = "https://files.pythonhosted.org/packages/d5/8c/a4abfc79604bcb4c748e18975c44f94f756f08fb04218d5cb87eb0d3a63e/pydantic_core-2.41.5-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:05a2c8852530ad2812cb7914dc61a1125dc4e06252ee98e5638a12da6cc6fb6c", size = 2177050, upload-time = "2025-11-04T13:39:19.351Z" }, + { url = "https://files.pythonhosted.org/packages/67/b1/de2e9a9a79b480f9cb0b6e8b6ba4c50b18d4e89852426364c66aa82bb7b3/pydantic_core-2.41.5-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:29452c56df2ed968d18d7e21f4ab0ac55e71dc59524872f6fc57dcf4a3249ed2", size = 2147178, upload-time = "2025-11-04T13:39:21Z" }, + { url = "https://files.pythonhosted.org/packages/16/c1/dfb33f837a47b20417500efaa0378adc6635b3c79e8369ff7a03c494b4ac/pydantic_core-2.41.5-cp310-cp310-musllinux_1_1_armv7l.whl", hash = "sha256:d5160812ea7a8a2ffbe233d8da666880cad0cbaf5d4de74ae15c313213d62556", size = 2341833, upload-time = "2025-11-04T13:39:22.606Z" }, + { url = "https://files.pythonhosted.org/packages/47/36/00f398642a0f4b815a9a558c4f1dca1b4020a7d49562807d7bc9ff279a6c/pydantic_core-2.41.5-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:df3959765b553b9440adfd3c795617c352154e497a4eaf3752555cfb5da8fc49", size = 2321156, upload-time = "2025-11-04T13:39:25.843Z" }, + { url = "https://files.pythonhosted.org/packages/7e/70/cad3acd89fde2010807354d978725ae111ddf6d0ea46d1ea1775b5c1bd0c/pydantic_core-2.41.5-cp310-cp310-win32.whl", hash = "sha256:1f8d33a7f4d5a7889e60dc39856d76d09333d8a6ed0f5f1190635cbec70ec4ba", size = 1989378, upload-time = "2025-11-04T13:39:27.92Z" }, + { url = "https://files.pythonhosted.org/packages/76/92/d338652464c6c367e5608e4488201702cd1cbb0f33f7b6a85a60fe5f3720/pydantic_core-2.41.5-cp310-cp310-win_amd64.whl", hash = "sha256:62de39db01b8d593e45871af2af9e497295db8d73b085f6bfd0b18c83c70a8f9", size = 2013622, upload-time = "2025-11-04T13:39:29.848Z" }, + { url = "https://files.pythonhosted.org/packages/e8/72/74a989dd9f2084b3d9530b0915fdda64ac48831c30dbf7c72a41a5232db8/pydantic_core-2.41.5-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:a3a52f6156e73e7ccb0f8cced536adccb7042be67cb45f9562e12b319c119da6", size = 2105873, upload-time = "2025-11-04T13:39:31.373Z" }, + { url = "https://files.pythonhosted.org/packages/12/44/37e403fd9455708b3b942949e1d7febc02167662bf1a7da5b78ee1ea2842/pydantic_core-2.41.5-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:7f3bf998340c6d4b0c9a2f02d6a400e51f123b59565d74dc60d252ce888c260b", size = 1899826, upload-time = "2025-11-04T13:39:32.897Z" }, + { url = "https://files.pythonhosted.org/packages/33/7f/1d5cab3ccf44c1935a359d51a8a2a9e1a654b744b5e7f80d41b88d501eec/pydantic_core-2.41.5-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:378bec5c66998815d224c9ca994f1e14c0c21cb95d2f52b6021cc0b2a58f2a5a", size = 1917869, upload-time = "2025-11-04T13:39:34.469Z" }, + { url = "https://files.pythonhosted.org/packages/6e/6a/30d94a9674a7fe4f4744052ed6c5e083424510be1e93da5bc47569d11810/pydantic_core-2.41.5-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:e7b576130c69225432866fe2f4a469a85a54ade141d96fd396dffcf607b558f8", size = 2063890, upload-time = "2025-11-04T13:39:36.053Z" }, + { url = "https://files.pythonhosted.org/packages/50/be/76e5d46203fcb2750e542f32e6c371ffa9b8ad17364cf94bb0818dbfb50c/pydantic_core-2.41.5-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:6cb58b9c66f7e4179a2d5e0f849c48eff5c1fca560994d6eb6543abf955a149e", size = 2229740, upload-time = "2025-11-04T13:39:37.753Z" }, + { url = "https://files.pythonhosted.org/packages/d3/ee/fed784df0144793489f87db310a6bbf8118d7b630ed07aa180d6067e653a/pydantic_core-2.41.5-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:88942d3a3dff3afc8288c21e565e476fc278902ae4d6d134f1eeda118cc830b1", size = 2350021, upload-time = "2025-11-04T13:39:40.94Z" }, + { url = "https://files.pythonhosted.org/packages/c8/be/8fed28dd0a180dca19e72c233cbf58efa36df055e5b9d90d64fd1740b828/pydantic_core-2.41.5-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f31d95a179f8d64d90f6831d71fa93290893a33148d890ba15de25642c5d075b", size = 2066378, upload-time = "2025-11-04T13:39:42.523Z" }, + { url = "https://files.pythonhosted.org/packages/b0/3b/698cf8ae1d536a010e05121b4958b1257f0b5522085e335360e53a6b1c8b/pydantic_core-2.41.5-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:c1df3d34aced70add6f867a8cf413e299177e0c22660cc767218373d0779487b", size = 2175761, upload-time = "2025-11-04T13:39:44.553Z" }, + { url = "https://files.pythonhosted.org/packages/b8/ba/15d537423939553116dea94ce02f9c31be0fa9d0b806d427e0308ec17145/pydantic_core-2.41.5-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:4009935984bd36bd2c774e13f9a09563ce8de4abaa7226f5108262fa3e637284", size = 2146303, upload-time = "2025-11-04T13:39:46.238Z" }, + { url = "https://files.pythonhosted.org/packages/58/7f/0de669bf37d206723795f9c90c82966726a2ab06c336deba4735b55af431/pydantic_core-2.41.5-cp311-cp311-musllinux_1_1_armv7l.whl", hash = "sha256:34a64bc3441dc1213096a20fe27e8e128bd3ff89921706e83c0b1ac971276594", size = 2340355, upload-time = "2025-11-04T13:39:48.002Z" }, + { url = "https://files.pythonhosted.org/packages/e5/de/e7482c435b83d7e3c3ee5ee4451f6e8973cff0eb6007d2872ce6383f6398/pydantic_core-2.41.5-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:c9e19dd6e28fdcaa5a1de679aec4141f691023916427ef9bae8584f9c2fb3b0e", size = 2319875, upload-time = "2025-11-04T13:39:49.705Z" }, + { url = "https://files.pythonhosted.org/packages/fe/e6/8c9e81bb6dd7560e33b9053351c29f30c8194b72f2d6932888581f503482/pydantic_core-2.41.5-cp311-cp311-win32.whl", hash = "sha256:2c010c6ded393148374c0f6f0bf89d206bf3217f201faa0635dcd56bd1520f6b", size = 1987549, upload-time = "2025-11-04T13:39:51.842Z" }, + { url = "https://files.pythonhosted.org/packages/11/66/f14d1d978ea94d1bc21fc98fcf570f9542fe55bfcc40269d4e1a21c19bf7/pydantic_core-2.41.5-cp311-cp311-win_amd64.whl", hash = "sha256:76ee27c6e9c7f16f47db7a94157112a2f3a00e958bc626e2f4ee8bec5c328fbe", size = 2011305, upload-time = "2025-11-04T13:39:53.485Z" }, + { url = "https://files.pythonhosted.org/packages/56/d8/0e271434e8efd03186c5386671328154ee349ff0354d83c74f5caaf096ed/pydantic_core-2.41.5-cp311-cp311-win_arm64.whl", hash = "sha256:4bc36bbc0b7584de96561184ad7f012478987882ebf9f9c389b23f432ea3d90f", size = 1972902, upload-time = "2025-11-04T13:39:56.488Z" }, + { url = "https://files.pythonhosted.org/packages/5f/5d/5f6c63eebb5afee93bcaae4ce9a898f3373ca23df3ccaef086d0233a35a7/pydantic_core-2.41.5-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:f41a7489d32336dbf2199c8c0a215390a751c5b014c2c1c5366e817202e9cdf7", size = 2110990, upload-time = "2025-11-04T13:39:58.079Z" }, + { url = "https://files.pythonhosted.org/packages/aa/32/9c2e8ccb57c01111e0fd091f236c7b371c1bccea0fa85247ac55b1e2b6b6/pydantic_core-2.41.5-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:070259a8818988b9a84a449a2a7337c7f430a22acc0859c6b110aa7212a6d9c0", size = 1896003, upload-time = "2025-11-04T13:39:59.956Z" }, + { url = "https://files.pythonhosted.org/packages/68/b8/a01b53cb0e59139fbc9e4fda3e9724ede8de279097179be4ff31f1abb65a/pydantic_core-2.41.5-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e96cea19e34778f8d59fe40775a7a574d95816eb150850a85a7a4c8f4b94ac69", size = 1919200, upload-time = "2025-11-04T13:40:02.241Z" }, + { url = "https://files.pythonhosted.org/packages/38/de/8c36b5198a29bdaade07b5985e80a233a5ac27137846f3bc2d3b40a47360/pydantic_core-2.41.5-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ed2e99c456e3fadd05c991f8f437ef902e00eedf34320ba2b0842bd1c3ca3a75", size = 2052578, upload-time = "2025-11-04T13:40:04.401Z" }, + { url = "https://files.pythonhosted.org/packages/00/b5/0e8e4b5b081eac6cb3dbb7e60a65907549a1ce035a724368c330112adfdd/pydantic_core-2.41.5-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:65840751b72fbfd82c3c640cff9284545342a4f1eb1586ad0636955b261b0b05", size = 2208504, upload-time = "2025-11-04T13:40:06.072Z" }, + { url = "https://files.pythonhosted.org/packages/77/56/87a61aad59c7c5b9dc8caad5a41a5545cba3810c3e828708b3d7404f6cef/pydantic_core-2.41.5-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e536c98a7626a98feb2d3eaf75944ef6f3dbee447e1f841eae16f2f0a72d8ddc", size = 2335816, upload-time = "2025-11-04T13:40:07.835Z" }, + { url = "https://files.pythonhosted.org/packages/0d/76/941cc9f73529988688a665a5c0ecff1112b3d95ab48f81db5f7606f522d3/pydantic_core-2.41.5-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:eceb81a8d74f9267ef4081e246ffd6d129da5d87e37a77c9bde550cb04870c1c", size = 2075366, upload-time = "2025-11-04T13:40:09.804Z" }, + { url = "https://files.pythonhosted.org/packages/d3/43/ebef01f69baa07a482844faaa0a591bad1ef129253ffd0cdaa9d8a7f72d3/pydantic_core-2.41.5-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d38548150c39b74aeeb0ce8ee1d8e82696f4a4e16ddc6de7b1d8823f7de4b9b5", size = 2171698, upload-time = "2025-11-04T13:40:12.004Z" }, + { url = "https://files.pythonhosted.org/packages/b1/87/41f3202e4193e3bacfc2c065fab7706ebe81af46a83d3e27605029c1f5a6/pydantic_core-2.41.5-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:c23e27686783f60290e36827f9c626e63154b82b116d7fe9adba1fda36da706c", size = 2132603, upload-time = "2025-11-04T13:40:13.868Z" }, + { url = "https://files.pythonhosted.org/packages/49/7d/4c00df99cb12070b6bccdef4a195255e6020a550d572768d92cc54dba91a/pydantic_core-2.41.5-cp312-cp312-musllinux_1_1_armv7l.whl", hash = "sha256:482c982f814460eabe1d3bb0adfdc583387bd4691ef00b90575ca0d2b6fe2294", size = 2329591, upload-time = "2025-11-04T13:40:15.672Z" }, + { url = "https://files.pythonhosted.org/packages/cc/6a/ebf4b1d65d458f3cda6a7335d141305dfa19bdc61140a884d165a8a1bbc7/pydantic_core-2.41.5-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:bfea2a5f0b4d8d43adf9d7b8bf019fb46fdd10a2e5cde477fbcb9d1fa08c68e1", size = 2319068, upload-time = "2025-11-04T13:40:17.532Z" }, + { url = "https://files.pythonhosted.org/packages/49/3b/774f2b5cd4192d5ab75870ce4381fd89cf218af999515baf07e7206753f0/pydantic_core-2.41.5-cp312-cp312-win32.whl", hash = "sha256:b74557b16e390ec12dca509bce9264c3bbd128f8a2c376eaa68003d7f327276d", size = 1985908, upload-time = "2025-11-04T13:40:19.309Z" }, + { url = "https://files.pythonhosted.org/packages/86/45/00173a033c801cacf67c190fef088789394feaf88a98a7035b0e40d53dc9/pydantic_core-2.41.5-cp312-cp312-win_amd64.whl", hash = "sha256:1962293292865bca8e54702b08a4f26da73adc83dd1fcf26fbc875b35d81c815", size = 2020145, upload-time = "2025-11-04T13:40:21.548Z" }, + { url = "https://files.pythonhosted.org/packages/f9/22/91fbc821fa6d261b376a3f73809f907cec5ca6025642c463d3488aad22fb/pydantic_core-2.41.5-cp312-cp312-win_arm64.whl", hash = "sha256:1746d4a3d9a794cacae06a5eaaccb4b8643a131d45fbc9af23e353dc0a5ba5c3", size = 1976179, upload-time = "2025-11-04T13:40:23.393Z" }, + { url = "https://files.pythonhosted.org/packages/87/06/8806241ff1f70d9939f9af039c6c35f2360cf16e93c2ca76f184e76b1564/pydantic_core-2.41.5-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:941103c9be18ac8daf7b7adca8228f8ed6bb7a1849020f643b3a14d15b1924d9", size = 2120403, upload-time = "2025-11-04T13:40:25.248Z" }, + { url = "https://files.pythonhosted.org/packages/94/02/abfa0e0bda67faa65fef1c84971c7e45928e108fe24333c81f3bfe35d5f5/pydantic_core-2.41.5-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:112e305c3314f40c93998e567879e887a3160bb8689ef3d2c04b6cc62c33ac34", size = 1896206, upload-time = "2025-11-04T13:40:27.099Z" }, + { url = "https://files.pythonhosted.org/packages/15/df/a4c740c0943e93e6500f9eb23f4ca7ec9bf71b19e608ae5b579678c8d02f/pydantic_core-2.41.5-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0cbaad15cb0c90aa221d43c00e77bb33c93e8d36e0bf74760cd00e732d10a6a0", size = 1919307, upload-time = "2025-11-04T13:40:29.806Z" }, + { url = "https://files.pythonhosted.org/packages/9a/e3/6324802931ae1d123528988e0e86587c2072ac2e5394b4bc2bc34b61ff6e/pydantic_core-2.41.5-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:03ca43e12fab6023fc79d28ca6b39b05f794ad08ec2feccc59a339b02f2b3d33", size = 2063258, upload-time = "2025-11-04T13:40:33.544Z" }, + { url = "https://files.pythonhosted.org/packages/c9/d4/2230d7151d4957dd79c3044ea26346c148c98fbf0ee6ebd41056f2d62ab5/pydantic_core-2.41.5-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:dc799088c08fa04e43144b164feb0c13f9a0bc40503f8df3e9fde58a3c0c101e", size = 2214917, upload-time = "2025-11-04T13:40:35.479Z" }, + { url = "https://files.pythonhosted.org/packages/e6/9f/eaac5df17a3672fef0081b6c1bb0b82b33ee89aa5cec0d7b05f52fd4a1fa/pydantic_core-2.41.5-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:97aeba56665b4c3235a0e52b2c2f5ae9cd071b8a8310ad27bddb3f7fb30e9aa2", size = 2332186, upload-time = "2025-11-04T13:40:37.436Z" }, + { url = "https://files.pythonhosted.org/packages/cf/4e/35a80cae583a37cf15604b44240e45c05e04e86f9cfd766623149297e971/pydantic_core-2.41.5-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:406bf18d345822d6c21366031003612b9c77b3e29ffdb0f612367352aab7d586", size = 2073164, upload-time = "2025-11-04T13:40:40.289Z" }, + { url = "https://files.pythonhosted.org/packages/bf/e3/f6e262673c6140dd3305d144d032f7bd5f7497d3871c1428521f19f9efa2/pydantic_core-2.41.5-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:b93590ae81f7010dbe380cdeab6f515902ebcbefe0b9327cc4804d74e93ae69d", size = 2179146, upload-time = "2025-11-04T13:40:42.809Z" }, + { url = "https://files.pythonhosted.org/packages/75/c7/20bd7fc05f0c6ea2056a4565c6f36f8968c0924f19b7d97bbfea55780e73/pydantic_core-2.41.5-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:01a3d0ab748ee531f4ea6c3e48ad9dac84ddba4b0d82291f87248f2f9de8d740", size = 2137788, upload-time = "2025-11-04T13:40:44.752Z" }, + { url = "https://files.pythonhosted.org/packages/3a/8d/34318ef985c45196e004bc46c6eab2eda437e744c124ef0dbe1ff2c9d06b/pydantic_core-2.41.5-cp313-cp313-musllinux_1_1_armv7l.whl", hash = "sha256:6561e94ba9dacc9c61bce40e2d6bdc3bfaa0259d3ff36ace3b1e6901936d2e3e", size = 2340133, upload-time = "2025-11-04T13:40:46.66Z" }, + { url = "https://files.pythonhosted.org/packages/9c/59/013626bf8c78a5a5d9350d12e7697d3d4de951a75565496abd40ccd46bee/pydantic_core-2.41.5-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:915c3d10f81bec3a74fbd4faebe8391013ba61e5a1a8d48c4455b923bdda7858", size = 2324852, upload-time = "2025-11-04T13:40:48.575Z" }, + { url = "https://files.pythonhosted.org/packages/1a/d9/c248c103856f807ef70c18a4f986693a46a8ffe1602e5d361485da502d20/pydantic_core-2.41.5-cp313-cp313-win32.whl", hash = "sha256:650ae77860b45cfa6e2cdafc42618ceafab3a2d9a3811fcfbd3bbf8ac3c40d36", size = 1994679, upload-time = "2025-11-04T13:40:50.619Z" }, + { url = "https://files.pythonhosted.org/packages/9e/8b/341991b158ddab181cff136acd2552c9f35bd30380422a639c0671e99a91/pydantic_core-2.41.5-cp313-cp313-win_amd64.whl", hash = "sha256:79ec52ec461e99e13791ec6508c722742ad745571f234ea6255bed38c6480f11", size = 2019766, upload-time = "2025-11-04T13:40:52.631Z" }, + { url = "https://files.pythonhosted.org/packages/73/7d/f2f9db34af103bea3e09735bb40b021788a5e834c81eedb541991badf8f5/pydantic_core-2.41.5-cp313-cp313-win_arm64.whl", hash = "sha256:3f84d5c1b4ab906093bdc1ff10484838aca54ef08de4afa9de0f5f14d69639cd", size = 1981005, upload-time = "2025-11-04T13:40:54.734Z" }, + { url = "https://files.pythonhosted.org/packages/ea/28/46b7c5c9635ae96ea0fbb779e271a38129df2550f763937659ee6c5dbc65/pydantic_core-2.41.5-cp314-cp314-macosx_10_12_x86_64.whl", hash = "sha256:3f37a19d7ebcdd20b96485056ba9e8b304e27d9904d233d7b1015db320e51f0a", size = 2119622, upload-time = "2025-11-04T13:40:56.68Z" }, + { url = "https://files.pythonhosted.org/packages/74/1a/145646e5687e8d9a1e8d09acb278c8535ebe9e972e1f162ed338a622f193/pydantic_core-2.41.5-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:1d1d9764366c73f996edd17abb6d9d7649a7eb690006ab6adbda117717099b14", size = 1891725, upload-time = "2025-11-04T13:40:58.807Z" }, + { url = "https://files.pythonhosted.org/packages/23/04/e89c29e267b8060b40dca97bfc64a19b2a3cf99018167ea1677d96368273/pydantic_core-2.41.5-cp314-cp314-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:25e1c2af0fce638d5f1988b686f3b3ea8cd7de5f244ca147c777769e798a9cd1", size = 1915040, upload-time = "2025-11-04T13:41:00.853Z" }, + { url = "https://files.pythonhosted.org/packages/84/a3/15a82ac7bd97992a82257f777b3583d3e84bdb06ba6858f745daa2ec8a85/pydantic_core-2.41.5-cp314-cp314-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:506d766a8727beef16b7adaeb8ee6217c64fc813646b424d0804d67c16eddb66", size = 2063691, upload-time = "2025-11-04T13:41:03.504Z" }, + { url = "https://files.pythonhosted.org/packages/74/9b/0046701313c6ef08c0c1cf0e028c67c770a4e1275ca73131563c5f2a310a/pydantic_core-2.41.5-cp314-cp314-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4819fa52133c9aa3c387b3328f25c1facc356491e6135b459f1de698ff64d869", size = 2213897, upload-time = "2025-11-04T13:41:05.804Z" }, + { url = "https://files.pythonhosted.org/packages/8a/cd/6bac76ecd1b27e75a95ca3a9a559c643b3afcd2dd62086d4b7a32a18b169/pydantic_core-2.41.5-cp314-cp314-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2b761d210c9ea91feda40d25b4efe82a1707da2ef62901466a42492c028553a2", size = 2333302, upload-time = "2025-11-04T13:41:07.809Z" }, + { url = "https://files.pythonhosted.org/packages/4c/d2/ef2074dc020dd6e109611a8be4449b98cd25e1b9b8a303c2f0fca2f2bcf7/pydantic_core-2.41.5-cp314-cp314-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:22f0fb8c1c583a3b6f24df2470833b40207e907b90c928cc8d3594b76f874375", size = 2064877, upload-time = "2025-11-04T13:41:09.827Z" }, + { url = "https://files.pythonhosted.org/packages/18/66/e9db17a9a763d72f03de903883c057b2592c09509ccfe468187f2a2eef29/pydantic_core-2.41.5-cp314-cp314-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2782c870e99878c634505236d81e5443092fba820f0373997ff75f90f68cd553", size = 2180680, upload-time = "2025-11-04T13:41:12.379Z" }, + { url = "https://files.pythonhosted.org/packages/d3/9e/3ce66cebb929f3ced22be85d4c2399b8e85b622db77dad36b73c5387f8f8/pydantic_core-2.41.5-cp314-cp314-musllinux_1_1_aarch64.whl", hash = "sha256:0177272f88ab8312479336e1d777f6b124537d47f2123f89cb37e0accea97f90", size = 2138960, upload-time = "2025-11-04T13:41:14.627Z" }, + { url = "https://files.pythonhosted.org/packages/a6/62/205a998f4327d2079326b01abee48e502ea739d174f0a89295c481a2272e/pydantic_core-2.41.5-cp314-cp314-musllinux_1_1_armv7l.whl", hash = "sha256:63510af5e38f8955b8ee5687740d6ebf7c2a0886d15a6d65c32814613681bc07", size = 2339102, upload-time = "2025-11-04T13:41:16.868Z" }, + { url = "https://files.pythonhosted.org/packages/3c/0d/f05e79471e889d74d3d88f5bd20d0ed189ad94c2423d81ff8d0000aab4ff/pydantic_core-2.41.5-cp314-cp314-musllinux_1_1_x86_64.whl", hash = "sha256:e56ba91f47764cc14f1daacd723e3e82d1a89d783f0f5afe9c364b8bb491ccdb", size = 2326039, upload-time = "2025-11-04T13:41:18.934Z" }, + { url = "https://files.pythonhosted.org/packages/ec/e1/e08a6208bb100da7e0c4b288eed624a703f4d129bde2da475721a80cab32/pydantic_core-2.41.5-cp314-cp314-win32.whl", hash = "sha256:aec5cf2fd867b4ff45b9959f8b20ea3993fc93e63c7363fe6851424c8a7e7c23", size = 1995126, upload-time = "2025-11-04T13:41:21.418Z" }, + { url = "https://files.pythonhosted.org/packages/48/5d/56ba7b24e9557f99c9237e29f5c09913c81eeb2f3217e40e922353668092/pydantic_core-2.41.5-cp314-cp314-win_amd64.whl", hash = "sha256:8e7c86f27c585ef37c35e56a96363ab8de4e549a95512445b85c96d3e2f7c1bf", size = 2015489, upload-time = "2025-11-04T13:41:24.076Z" }, + { url = "https://files.pythonhosted.org/packages/4e/bb/f7a190991ec9e3e0ba22e4993d8755bbc4a32925c0b5b42775c03e8148f9/pydantic_core-2.41.5-cp314-cp314-win_arm64.whl", hash = "sha256:e672ba74fbc2dc8eea59fb6d4aed6845e6905fc2a8afe93175d94a83ba2a01a0", size = 1977288, upload-time = "2025-11-04T13:41:26.33Z" }, + { url = "https://files.pythonhosted.org/packages/92/ed/77542d0c51538e32e15afe7899d79efce4b81eee631d99850edc2f5e9349/pydantic_core-2.41.5-cp314-cp314t-macosx_10_12_x86_64.whl", hash = "sha256:8566def80554c3faa0e65ac30ab0932b9e3a5cd7f8323764303d468e5c37595a", size = 2120255, upload-time = "2025-11-04T13:41:28.569Z" }, + { url = "https://files.pythonhosted.org/packages/bb/3d/6913dde84d5be21e284439676168b28d8bbba5600d838b9dca99de0fad71/pydantic_core-2.41.5-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:b80aa5095cd3109962a298ce14110ae16b8c1aece8b72f9dafe81cf597ad80b3", size = 1863760, upload-time = "2025-11-04T13:41:31.055Z" }, + { url = "https://files.pythonhosted.org/packages/5a/f0/e5e6b99d4191da102f2b0eb9687aaa7f5bea5d9964071a84effc3e40f997/pydantic_core-2.41.5-cp314-cp314t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3006c3dd9ba34b0c094c544c6006cc79e87d8612999f1a5d43b769b89181f23c", size = 1878092, upload-time = "2025-11-04T13:41:33.21Z" }, + { url = "https://files.pythonhosted.org/packages/71/48/36fb760642d568925953bcc8116455513d6e34c4beaa37544118c36aba6d/pydantic_core-2.41.5-cp314-cp314t-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:72f6c8b11857a856bcfa48c86f5368439f74453563f951e473514579d44aa612", size = 2053385, upload-time = "2025-11-04T13:41:35.508Z" }, + { url = "https://files.pythonhosted.org/packages/20/25/92dc684dd8eb75a234bc1c764b4210cf2646479d54b47bf46061657292a8/pydantic_core-2.41.5-cp314-cp314t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5cb1b2f9742240e4bb26b652a5aeb840aa4b417c7748b6f8387927bc6e45e40d", size = 2218832, upload-time = "2025-11-04T13:41:37.732Z" }, + { url = "https://files.pythonhosted.org/packages/e2/09/f53e0b05023d3e30357d82eb35835d0f6340ca344720a4599cd663dca599/pydantic_core-2.41.5-cp314-cp314t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:bd3d54f38609ff308209bd43acea66061494157703364ae40c951f83ba99a1a9", size = 2327585, upload-time = "2025-11-04T13:41:40Z" }, + { url = "https://files.pythonhosted.org/packages/aa/4e/2ae1aa85d6af35a39b236b1b1641de73f5a6ac4d5a7509f77b814885760c/pydantic_core-2.41.5-cp314-cp314t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2ff4321e56e879ee8d2a879501c8e469414d948f4aba74a2d4593184eb326660", size = 2041078, upload-time = "2025-11-04T13:41:42.323Z" }, + { url = "https://files.pythonhosted.org/packages/cd/13/2e215f17f0ef326fc72afe94776edb77525142c693767fc347ed6288728d/pydantic_core-2.41.5-cp314-cp314t-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d0d2568a8c11bf8225044aa94409e21da0cb09dcdafe9ecd10250b2baad531a9", size = 2173914, upload-time = "2025-11-04T13:41:45.221Z" }, + { url = "https://files.pythonhosted.org/packages/02/7a/f999a6dcbcd0e5660bc348a3991c8915ce6599f4f2c6ac22f01d7a10816c/pydantic_core-2.41.5-cp314-cp314t-musllinux_1_1_aarch64.whl", hash = "sha256:a39455728aabd58ceabb03c90e12f71fd30fa69615760a075b9fec596456ccc3", size = 2129560, upload-time = "2025-11-04T13:41:47.474Z" }, + { url = "https://files.pythonhosted.org/packages/3a/b1/6c990ac65e3b4c079a4fb9f5b05f5b013afa0f4ed6780a3dd236d2cbdc64/pydantic_core-2.41.5-cp314-cp314t-musllinux_1_1_armv7l.whl", hash = "sha256:239edca560d05757817c13dc17c50766136d21f7cd0fac50295499ae24f90fdf", size = 2329244, upload-time = "2025-11-04T13:41:49.992Z" }, + { url = "https://files.pythonhosted.org/packages/d9/02/3c562f3a51afd4d88fff8dffb1771b30cfdfd79befd9883ee094f5b6c0d8/pydantic_core-2.41.5-cp314-cp314t-musllinux_1_1_x86_64.whl", hash = "sha256:2a5e06546e19f24c6a96a129142a75cee553cc018ffee48a460059b1185f4470", size = 2331955, upload-time = "2025-11-04T13:41:54.079Z" }, + { url = "https://files.pythonhosted.org/packages/5c/96/5fb7d8c3c17bc8c62fdb031c47d77a1af698f1d7a406b0f79aaa1338f9ad/pydantic_core-2.41.5-cp314-cp314t-win32.whl", hash = "sha256:b4ececa40ac28afa90871c2cc2b9ffd2ff0bf749380fbdf57d165fd23da353aa", size = 1988906, upload-time = "2025-11-04T13:41:56.606Z" }, + { url = "https://files.pythonhosted.org/packages/22/ed/182129d83032702912c2e2d8bbe33c036f342cc735737064668585dac28f/pydantic_core-2.41.5-cp314-cp314t-win_amd64.whl", hash = "sha256:80aa89cad80b32a912a65332f64a4450ed00966111b6615ca6816153d3585a8c", size = 1981607, upload-time = "2025-11-04T13:41:58.889Z" }, + { url = "https://files.pythonhosted.org/packages/9f/ed/068e41660b832bb0b1aa5b58011dea2a3fe0ba7861ff38c4d4904c1c1a99/pydantic_core-2.41.5-cp314-cp314t-win_arm64.whl", hash = "sha256:35b44f37a3199f771c3eaa53051bc8a70cd7b54f333531c59e29fd4db5d15008", size = 1974769, upload-time = "2025-11-04T13:42:01.186Z" }, + { url = "https://files.pythonhosted.org/packages/54/db/160dffb57ed9a3705c4cbcbff0ac03bdae45f1ca7d58ab74645550df3fbd/pydantic_core-2.41.5-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:8bfeaf8735be79f225f3fefab7f941c712aaca36f1128c9d7e2352ee1aa87bdf", size = 2107999, upload-time = "2025-11-04T13:42:03.885Z" }, + { url = "https://files.pythonhosted.org/packages/a3/7d/88e7de946f60d9263cc84819f32513520b85c0f8322f9b8f6e4afc938383/pydantic_core-2.41.5-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:346285d28e4c8017da95144c7f3acd42740d637ff41946af5ce6e5e420502dd5", size = 1929745, upload-time = "2025-11-04T13:42:06.075Z" }, + { url = "https://files.pythonhosted.org/packages/d5/c2/aef51e5b283780e85e99ff19db0f05842d2d4a8a8cd15e63b0280029b08f/pydantic_core-2.41.5-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a75dafbf87d6276ddc5b2bf6fae5254e3d0876b626eb24969a574fff9149ee5d", size = 1920220, upload-time = "2025-11-04T13:42:08.457Z" }, + { url = "https://files.pythonhosted.org/packages/c7/97/492ab10f9ac8695cd76b2fdb24e9e61f394051df71594e9bcc891c9f586e/pydantic_core-2.41.5-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:7b93a4d08587e2b7e7882de461e82b6ed76d9026ce91ca7915e740ecc7855f60", size = 2067296, upload-time = "2025-11-04T13:42:10.817Z" }, + { url = "https://files.pythonhosted.org/packages/ec/23/984149650e5269c59a2a4c41d234a9570adc68ab29981825cfaf4cfad8f4/pydantic_core-2.41.5-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e8465ab91a4bd96d36dde3263f06caa6a8a6019e4113f24dc753d79a8b3a3f82", size = 2231548, upload-time = "2025-11-04T13:42:13.843Z" }, + { url = "https://files.pythonhosted.org/packages/71/0c/85bcbb885b9732c28bec67a222dbed5ed2d77baee1f8bba2002e8cd00c5c/pydantic_core-2.41.5-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:299e0a22e7ae2b85c1a57f104538b2656e8ab1873511fd718a1c1c6f149b77b5", size = 2362571, upload-time = "2025-11-04T13:42:16.208Z" }, + { url = "https://files.pythonhosted.org/packages/c0/4a/412d2048be12c334003e9b823a3fa3d038e46cc2d64dd8aab50b31b65499/pydantic_core-2.41.5-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:707625ef0983fcfb461acfaf14de2067c5942c6bb0f3b4c99158bed6fedd3cf3", size = 2068175, upload-time = "2025-11-04T13:42:18.911Z" }, + { url = "https://files.pythonhosted.org/packages/73/f4/c58b6a776b502d0a5540ad02e232514285513572060f0d78f7832ca3c98b/pydantic_core-2.41.5-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:f41eb9797986d6ebac5e8edff36d5cef9de40def462311b3eb3eeded1431e425", size = 2177203, upload-time = "2025-11-04T13:42:22.578Z" }, + { url = "https://files.pythonhosted.org/packages/ed/ae/f06ea4c7e7a9eead3d165e7623cd2ea0cb788e277e4f935af63fc98fa4e6/pydantic_core-2.41.5-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:0384e2e1021894b1ff5a786dbf94771e2986ebe2869533874d7e43bc79c6f504", size = 2148191, upload-time = "2025-11-04T13:42:24.89Z" }, + { url = "https://files.pythonhosted.org/packages/c1/57/25a11dcdc656bf5f8b05902c3c2934ac3ea296257cc4a3f79a6319e61856/pydantic_core-2.41.5-cp39-cp39-musllinux_1_1_armv7l.whl", hash = "sha256:f0cd744688278965817fd0839c4a4116add48d23890d468bc436f78beb28abf5", size = 2343907, upload-time = "2025-11-04T13:42:27.683Z" }, + { url = "https://files.pythonhosted.org/packages/96/82/e33d5f4933d7a03327c0c43c65d575e5919d4974ffc026bc917a5f7b9f61/pydantic_core-2.41.5-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:753e230374206729bf0a807954bcc6c150d3743928a73faffee51ac6557a03c3", size = 2322174, upload-time = "2025-11-04T13:42:30.776Z" }, + { url = "https://files.pythonhosted.org/packages/81/45/4091be67ce9f469e81656f880f3506f6a5624121ec5eb3eab37d7581897d/pydantic_core-2.41.5-cp39-cp39-win32.whl", hash = "sha256:873e0d5b4fb9b89ef7c2d2a963ea7d02879d9da0da8d9d4933dee8ee86a8b460", size = 1990353, upload-time = "2025-11-04T13:42:33.111Z" }, + { url = "https://files.pythonhosted.org/packages/44/8a/a98aede18db6e9cd5d66bcacd8a409fcf8134204cdede2e7de35c5a2c5ef/pydantic_core-2.41.5-cp39-cp39-win_amd64.whl", hash = "sha256:e4f4a984405e91527a0d62649ee21138f8e3d0ef103be488c1dc11a80d7f184b", size = 2015698, upload-time = "2025-11-04T13:42:35.484Z" }, + { url = "https://files.pythonhosted.org/packages/11/72/90fda5ee3b97e51c494938a4a44c3a35a9c96c19bba12372fb9c634d6f57/pydantic_core-2.41.5-graalpy311-graalpy242_311_native-macosx_10_12_x86_64.whl", hash = "sha256:b96d5f26b05d03cc60f11a7761a5ded1741da411e7fe0909e27a5e6a0cb7b034", size = 2115441, upload-time = "2025-11-04T13:42:39.557Z" }, + { url = "https://files.pythonhosted.org/packages/1f/53/8942f884fa33f50794f119012dc6a1a02ac43a56407adaac20463df8e98f/pydantic_core-2.41.5-graalpy311-graalpy242_311_native-macosx_11_0_arm64.whl", hash = "sha256:634e8609e89ceecea15e2d61bc9ac3718caaaa71963717bf3c8f38bfde64242c", size = 1930291, upload-time = "2025-11-04T13:42:42.169Z" }, + { url = "https://files.pythonhosted.org/packages/79/c8/ecb9ed9cd942bce09fc888ee960b52654fbdbede4ba6c2d6e0d3b1d8b49c/pydantic_core-2.41.5-graalpy311-graalpy242_311_native-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:93e8740d7503eb008aa2df04d3b9735f845d43ae845e6dcd2be0b55a2da43cd2", size = 1948632, upload-time = "2025-11-04T13:42:44.564Z" }, + { url = "https://files.pythonhosted.org/packages/2e/1b/687711069de7efa6af934e74f601e2a4307365e8fdc404703afc453eab26/pydantic_core-2.41.5-graalpy311-graalpy242_311_native-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f15489ba13d61f670dcc96772e733aad1a6f9c429cc27574c6cdaed82d0146ad", size = 2138905, upload-time = "2025-11-04T13:42:47.156Z" }, + { url = "https://files.pythonhosted.org/packages/09/32/59b0c7e63e277fa7911c2fc70ccfb45ce4b98991e7ef37110663437005af/pydantic_core-2.41.5-graalpy312-graalpy250_312_native-macosx_10_12_x86_64.whl", hash = "sha256:7da7087d756b19037bc2c06edc6c170eeef3c3bafcb8f532ff17d64dc427adfd", size = 2110495, upload-time = "2025-11-04T13:42:49.689Z" }, + { url = "https://files.pythonhosted.org/packages/aa/81/05e400037eaf55ad400bcd318c05bb345b57e708887f07ddb2d20e3f0e98/pydantic_core-2.41.5-graalpy312-graalpy250_312_native-macosx_11_0_arm64.whl", hash = "sha256:aabf5777b5c8ca26f7824cb4a120a740c9588ed58df9b2d196ce92fba42ff8dc", size = 1915388, upload-time = "2025-11-04T13:42:52.215Z" }, + { url = "https://files.pythonhosted.org/packages/6e/0d/e3549b2399f71d56476b77dbf3cf8937cec5cd70536bdc0e374a421d0599/pydantic_core-2.41.5-graalpy312-graalpy250_312_native-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c007fe8a43d43b3969e8469004e9845944f1a80e6acd47c150856bb87f230c56", size = 1942879, upload-time = "2025-11-04T13:42:56.483Z" }, + { url = "https://files.pythonhosted.org/packages/f7/07/34573da085946b6a313d7c42f82f16e8920bfd730665de2d11c0c37a74b5/pydantic_core-2.41.5-graalpy312-graalpy250_312_native-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:76d0819de158cd855d1cbb8fcafdf6f5cf1eb8e470abe056d5d161106e38062b", size = 2139017, upload-time = "2025-11-04T13:42:59.471Z" }, + { url = "https://files.pythonhosted.org/packages/e6/b0/1a2aa41e3b5a4ba11420aba2d091b2d17959c8d1519ece3627c371951e73/pydantic_core-2.41.5-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:b5819cd790dbf0c5eb9f82c73c16b39a65dd6dd4d1439dcdea7816ec9adddab8", size = 2103351, upload-time = "2025-11-04T13:43:02.058Z" }, + { url = "https://files.pythonhosted.org/packages/a4/ee/31b1f0020baaf6d091c87900ae05c6aeae101fa4e188e1613c80e4f1ea31/pydantic_core-2.41.5-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:5a4e67afbc95fa5c34cf27d9089bca7fcab4e51e57278d710320a70b956d1b9a", size = 1925363, upload-time = "2025-11-04T13:43:05.159Z" }, + { url = "https://files.pythonhosted.org/packages/e1/89/ab8e86208467e467a80deaca4e434adac37b10a9d134cd2f99b28a01e483/pydantic_core-2.41.5-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ece5c59f0ce7d001e017643d8d24da587ea1f74f6993467d85ae8a5ef9d4f42b", size = 2135615, upload-time = "2025-11-04T13:43:08.116Z" }, + { url = "https://files.pythonhosted.org/packages/99/0a/99a53d06dd0348b2008f2f30884b34719c323f16c3be4e6cc1203b74a91d/pydantic_core-2.41.5-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:16f80f7abe3351f8ea6858914ddc8c77e02578544a0ebc15b4c2e1a0e813b0b2", size = 2175369, upload-time = "2025-11-04T13:43:12.49Z" }, + { url = "https://files.pythonhosted.org/packages/6d/94/30ca3b73c6d485b9bb0bc66e611cff4a7138ff9736b7e66bcf0852151636/pydantic_core-2.41.5-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:33cb885e759a705b426baada1fe68cbb0a2e68e34c5d0d0289a364cf01709093", size = 2144218, upload-time = "2025-11-04T13:43:15.431Z" }, + { url = "https://files.pythonhosted.org/packages/87/57/31b4f8e12680b739a91f472b5671294236b82586889ef764b5fbc6669238/pydantic_core-2.41.5-pp310-pypy310_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:c8d8b4eb992936023be7dee581270af5c6e0697a8559895f527f5b7105ecd36a", size = 2329951, upload-time = "2025-11-04T13:43:18.062Z" }, + { url = "https://files.pythonhosted.org/packages/7d/73/3c2c8edef77b8f7310e6fb012dbc4b8551386ed575b9eb6fb2506e28a7eb/pydantic_core-2.41.5-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:242a206cd0318f95cd21bdacff3fcc3aab23e79bba5cac3db5a841c9ef9c6963", size = 2318428, upload-time = "2025-11-04T13:43:20.679Z" }, + { url = "https://files.pythonhosted.org/packages/2f/02/8559b1f26ee0d502c74f9cca5c0d2fd97e967e083e006bbbb4e97f3a043a/pydantic_core-2.41.5-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:d3a978c4f57a597908b7e697229d996d77a6d3c94901e9edee593adada95ce1a", size = 2147009, upload-time = "2025-11-04T13:43:23.286Z" }, + { url = "https://files.pythonhosted.org/packages/5f/9b/1b3f0e9f9305839d7e84912f9e8bfbd191ed1b1ef48083609f0dabde978c/pydantic_core-2.41.5-pp311-pypy311_pp73-macosx_10_12_x86_64.whl", hash = "sha256:b2379fa7ed44ddecb5bfe4e48577d752db9fc10be00a6b7446e9663ba143de26", size = 2101980, upload-time = "2025-11-04T13:43:25.97Z" }, + { url = "https://files.pythonhosted.org/packages/a4/ed/d71fefcb4263df0da6a85b5d8a7508360f2f2e9b3bf5814be9c8bccdccc1/pydantic_core-2.41.5-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:266fb4cbf5e3cbd0b53669a6d1b039c45e3ce651fd5442eff4d07c2cc8d66808", size = 1923865, upload-time = "2025-11-04T13:43:28.763Z" }, + { url = "https://files.pythonhosted.org/packages/ce/3a/626b38db460d675f873e4444b4bb030453bbe7b4ba55df821d026a0493c4/pydantic_core-2.41.5-pp311-pypy311_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:58133647260ea01e4d0500089a8c4f07bd7aa6ce109682b1426394988d8aaacc", size = 2134256, upload-time = "2025-11-04T13:43:31.71Z" }, + { url = "https://files.pythonhosted.org/packages/83/d9/8412d7f06f616bbc053d30cb4e5f76786af3221462ad5eee1f202021eb4e/pydantic_core-2.41.5-pp311-pypy311_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:287dad91cfb551c363dc62899a80e9e14da1f0e2b6ebde82c806612ca2a13ef1", size = 2174762, upload-time = "2025-11-04T13:43:34.744Z" }, + { url = "https://files.pythonhosted.org/packages/55/4c/162d906b8e3ba3a99354e20faa1b49a85206c47de97a639510a0e673f5da/pydantic_core-2.41.5-pp311-pypy311_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:03b77d184b9eb40240ae9fd676ca364ce1085f203e1b1256f8ab9984dca80a84", size = 2143141, upload-time = "2025-11-04T13:43:37.701Z" }, + { url = "https://files.pythonhosted.org/packages/1f/f2/f11dd73284122713f5f89fc940f370d035fa8e1e078d446b3313955157fe/pydantic_core-2.41.5-pp311-pypy311_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:a668ce24de96165bb239160b3d854943128f4334822900534f2fe947930e5770", size = 2330317, upload-time = "2025-11-04T13:43:40.406Z" }, + { url = "https://files.pythonhosted.org/packages/88/9d/b06ca6acfe4abb296110fb1273a4d848a0bfb2ff65f3ee92127b3244e16b/pydantic_core-2.41.5-pp311-pypy311_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:f14f8f046c14563f8eb3f45f499cc658ab8d10072961e07225e507adb700e93f", size = 2316992, upload-time = "2025-11-04T13:43:43.602Z" }, + { url = "https://files.pythonhosted.org/packages/36/c7/cfc8e811f061c841d7990b0201912c3556bfeb99cdcb7ed24adc8d6f8704/pydantic_core-2.41.5-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:56121965f7a4dc965bff783d70b907ddf3d57f6eba29b6d2e5dabfaf07799c51", size = 2145302, upload-time = "2025-11-04T13:43:46.64Z" }, +] + +[[package]] +name = "pydantic-settings" +version = "2.12.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "pydantic", marker = "python_full_version >= '3.10'" }, + { name = "python-dotenv", marker = "python_full_version >= '3.10'" }, + { name = "typing-inspection", marker = "python_full_version >= '3.10'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/43/4b/ac7e0aae12027748076d72a8764ff1c9d82ca75a7a52622e67ed3f765c54/pydantic_settings-2.12.0.tar.gz", hash = "sha256:005538ef951e3c2a68e1c08b292b5f2e71490def8589d4221b95dab00dafcfd0", size = 194184, upload-time = "2025-11-10T14:25:47.013Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c1/60/5d4751ba3f4a40a6891f24eec885f51afd78d208498268c734e256fb13c4/pydantic_settings-2.12.0-py3-none-any.whl", hash = "sha256:fddb9fd99a5b18da837b29710391e945b1e30c135477f484084ee513adb93809", size = 51880, upload-time = "2025-11-10T14:25:45.546Z" }, +] + +[[package]] +name = "pygments" +version = "2.19.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/b0/77/a5b8c569bf593b0140bde72ea885a803b82086995367bf2037de0159d924/pygments-2.19.2.tar.gz", hash = "sha256:636cb2477cec7f8952536970bc533bc43743542f70392ae026374600add5b887", size = 4968631, upload-time = "2025-06-21T13:39:12.283Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c7/21/705964c7812476f378728bdf590ca4b771ec72385c533964653c68e86bdc/pygments-2.19.2-py3-none-any.whl", hash = "sha256:86540386c03d588bb81d44bc3928634ff26449851e99741617ecb9037ee5ec0b", size = 1225217, upload-time = "2025-06-21T13:39:07.939Z" }, +] + +[[package]] +name = "pyjwt" +version = "2.10.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/e7/46/bd74733ff231675599650d3e47f361794b22ef3e3770998dda30d3b63726/pyjwt-2.10.1.tar.gz", hash = "sha256:3cc5772eb20009233caf06e9d8a0577824723b44e6648ee0a2aedb6cf9381953", size = 87785, upload-time = "2024-11-28T03:43:29.933Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/61/ad/689f02752eeec26aed679477e80e632ef1b682313be70793d798c1d5fc8f/PyJWT-2.10.1-py3-none-any.whl", hash = "sha256:dcdd193e30abefd5debf142f9adfcdd2b58004e644f25406ffaebd50bd98dacb", size = 22997, upload-time = "2024-11-28T03:43:27.893Z" }, +] + +[package.optional-dependencies] +crypto = [ + { name = "cryptography", marker = "python_full_version >= '3.10'" }, +] + +[[package]] +name = "pylint" +version = "3.2.3" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "astroid" }, + { name = "colorama", marker = "sys_platform == 'win32'" }, + { name = "dill" }, + { name = "isort" }, + { name = "mccabe" }, + { name = "platformdirs", version = "4.4.0", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.10'" }, + { name = "platformdirs", version = "4.5.1", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.10'" }, + { name = "tomli", marker = "python_full_version < '3.11'" }, + { name = "tomlkit" }, + { name = "typing-extensions", marker = "python_full_version < '3.10'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/9a/e9/60280b14cc1012794120345ce378504cf17409e38cd88f455dc24e0ad6b5/pylint-3.2.3.tar.gz", hash = "sha256:02f6c562b215582386068d52a30f520d84fdbcf2a95fc7e855b816060d048b60", size = 1506739, upload-time = "2024-06-06T14:19:17.955Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/50/d3/d346f779cbc9384d8b805a7557b5f2b8ee9f842bffebec9fc6364d6ae183/pylint-3.2.3-py3-none-any.whl", hash = "sha256:b3d7d2708a3e04b4679e02d99e72329a8b7ee8afb8d04110682278781f889fa8", size = 519244, upload-time = "2024-06-06T14:19:13.228Z" }, +] + +[[package]] +name = "pyright" +version = "1.1.407" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "nodeenv" }, + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/a6/1b/0aa08ee42948b61745ac5b5b5ccaec4669e8884b53d31c8ec20b2fcd6b6f/pyright-1.1.407.tar.gz", hash = "sha256:099674dba5c10489832d4a4b2d302636152a9a42d317986c38474c76fe562262", size = 4122872, upload-time = "2025-10-24T23:17:15.145Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/dc/93/b69052907d032b00c40cb656d21438ec00b3a471733de137a3f65a49a0a0/pyright-1.1.407-py3-none-any.whl", hash = "sha256:6dd419f54fcc13f03b52285796d65e639786373f433e243f8b94cf93a7444d21", size = 5997008, upload-time = "2025-10-24T23:17:13.159Z" }, +] + +[[package]] +name = "pytest" +version = "8.4.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "colorama", marker = "sys_platform == 'win32'" }, + { name = "exceptiongroup", marker = "python_full_version < '3.11'" }, + { name = "iniconfig", version = "2.1.0", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.10'" }, + { name = "iniconfig", version = "2.3.0", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.10'" }, + { name = "packaging" }, + { name = "pluggy" }, + { name = "pygments" }, + { name = "tomli", marker = "python_full_version < '3.11'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/a3/5c/00a0e072241553e1a7496d638deababa67c5058571567b92a7eaa258397c/pytest-8.4.2.tar.gz", hash = "sha256:86c0d0b93306b961d58d62a4db4879f27fe25513d4b969df351abdddb3c30e01", size = 1519618, upload-time = "2025-09-04T14:34:22.711Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/a8/a4/20da314d277121d6534b3a980b29035dcd51e6744bd79075a6ce8fa4eb8d/pytest-8.4.2-py3-none-any.whl", hash = "sha256:872f880de3fc3a5bdc88a11b39c9710c3497a547cfa9320bc3c5e62fbf272e79", size = 365750, upload-time = "2025-09-04T14:34:20.226Z" }, +] + +[[package]] +name = "pytest-asyncio" +version = "0.23.8" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "pytest" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/de/b4/0b378b7bf26a8ae161c3890c0b48a91a04106c5713ce81b4b080ea2f4f18/pytest_asyncio-0.23.8.tar.gz", hash = "sha256:759b10b33a6dc61cce40a8bd5205e302978bbbcc00e279a8b61d9a6a3c82e4d3", size = 46920, upload-time = "2024-07-17T17:39:34.617Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ee/82/62e2d63639ecb0fbe8a7ee59ef0bc69a4669ec50f6d3459f74ad4e4189a2/pytest_asyncio-0.23.8-py3-none-any.whl", hash = "sha256:50265d892689a5faefb84df80819d1ecef566eb3549cf915dfb33569359d1ce2", size = 17663, upload-time = "2024-07-17T17:39:32.478Z" }, +] + +[[package]] +name = "python-dateutil" +version = "2.9.0.post0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "six" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/66/c0/0c8b6ad9f17a802ee498c46e004a0eb49bc148f2fd230864601a86dcf6db/python-dateutil-2.9.0.post0.tar.gz", hash = "sha256:37dd54208da7e1cd875388217d5e00ebd4179249f90fb72437e91a35459a0ad3", size = 342432, upload-time = "2024-03-01T18:36:20.211Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ec/57/56b9bcc3c9c6a792fcbaf139543cee77261f3651ca9da0c93f5c1221264b/python_dateutil-2.9.0.post0-py2.py3-none-any.whl", hash = "sha256:a8b2bc7bffae282281c8140a97d3aa9c14da0b136dfe83f850eea9a5f7470427", size = 229892, upload-time = "2024-03-01T18:36:18.57Z" }, +] + +[[package]] +name = "python-dotenv" +version = "1.2.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/f0/26/19cadc79a718c5edbec86fd4919a6b6d3f681039a2f6d66d14be94e75fb9/python_dotenv-1.2.1.tar.gz", hash = "sha256:42667e897e16ab0d66954af0e60a9caa94f0fd4ecf3aaf6d2d260eec1aa36ad6", size = 44221, upload-time = "2025-10-26T15:12:10.434Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/14/1b/a298b06749107c305e1fe0f814c6c74aea7b2f1e10989cb30f544a1b3253/python_dotenv-1.2.1-py3-none-any.whl", hash = "sha256:b81ee9561e9ca4004139c6cbba3a238c32b03e4894671e181b671e8cb8425d61", size = 21230, upload-time = "2025-10-26T15:12:09.109Z" }, +] + +[[package]] +name = "python-multipart" +version = "0.0.21" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/78/96/804520d0850c7db98e5ccb70282e29208723f0964e88ffd9d0da2f52ea09/python_multipart-0.0.21.tar.gz", hash = "sha256:7137ebd4d3bbf70ea1622998f902b97a29434a9e8dc40eb203bbcf7c2a2cba92", size = 37196, upload-time = "2025-12-17T09:24:22.446Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/aa/76/03af049af4dcee5d27442f71b6924f01f3efb5d2bd34f23fcd563f2cc5f5/python_multipart-0.0.21-py3-none-any.whl", hash = "sha256:cf7a6713e01c87aa35387f4774e812c4361150938d20d232800f75ffcf266090", size = 24541, upload-time = "2025-12-17T09:24:21.153Z" }, +] + +[[package]] +name = "pywin32" +version = "311" +source = { registry = "https://pypi.org/simple" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/7b/40/44efbb0dfbd33aca6a6483191dae0716070ed99e2ecb0c53683f400a0b4f/pywin32-311-cp310-cp310-win32.whl", hash = "sha256:d03ff496d2a0cd4a5893504789d4a15399133fe82517455e78bad62efbb7f0a3", size = 8760432, upload-time = "2025-07-14T20:13:05.9Z" }, + { url = "https://files.pythonhosted.org/packages/5e/bf/360243b1e953bd254a82f12653974be395ba880e7ec23e3731d9f73921cc/pywin32-311-cp310-cp310-win_amd64.whl", hash = "sha256:797c2772017851984b97180b0bebe4b620bb86328e8a884bb626156295a63b3b", size = 9590103, upload-time = "2025-07-14T20:13:07.698Z" }, + { url = "https://files.pythonhosted.org/packages/57/38/d290720e6f138086fb3d5ffe0b6caa019a791dd57866940c82e4eeaf2012/pywin32-311-cp310-cp310-win_arm64.whl", hash = "sha256:0502d1facf1fed4839a9a51ccbcc63d952cf318f78ffc00a7e78528ac27d7a2b", size = 8778557, upload-time = "2025-07-14T20:13:11.11Z" }, + { url = "https://files.pythonhosted.org/packages/7c/af/449a6a91e5d6db51420875c54f6aff7c97a86a3b13a0b4f1a5c13b988de3/pywin32-311-cp311-cp311-win32.whl", hash = "sha256:184eb5e436dea364dcd3d2316d577d625c0351bf237c4e9a5fabbcfa5a58b151", size = 8697031, upload-time = "2025-07-14T20:13:13.266Z" }, + { url = "https://files.pythonhosted.org/packages/51/8f/9bb81dd5bb77d22243d33c8397f09377056d5c687aa6d4042bea7fbf8364/pywin32-311-cp311-cp311-win_amd64.whl", hash = "sha256:3ce80b34b22b17ccbd937a6e78e7225d80c52f5ab9940fe0506a1a16f3dab503", size = 9508308, upload-time = "2025-07-14T20:13:15.147Z" }, + { url = "https://files.pythonhosted.org/packages/44/7b/9c2ab54f74a138c491aba1b1cd0795ba61f144c711daea84a88b63dc0f6c/pywin32-311-cp311-cp311-win_arm64.whl", hash = "sha256:a733f1388e1a842abb67ffa8e7aad0e70ac519e09b0f6a784e65a136ec7cefd2", size = 8703930, upload-time = "2025-07-14T20:13:16.945Z" }, + { url = "https://files.pythonhosted.org/packages/e7/ab/01ea1943d4eba0f850c3c61e78e8dd59757ff815ff3ccd0a84de5f541f42/pywin32-311-cp312-cp312-win32.whl", hash = "sha256:750ec6e621af2b948540032557b10a2d43b0cee2ae9758c54154d711cc852d31", size = 8706543, upload-time = "2025-07-14T20:13:20.765Z" }, + { url = "https://files.pythonhosted.org/packages/d1/a8/a0e8d07d4d051ec7502cd58b291ec98dcc0c3fff027caad0470b72cfcc2f/pywin32-311-cp312-cp312-win_amd64.whl", hash = "sha256:b8c095edad5c211ff31c05223658e71bf7116daa0ecf3ad85f3201ea3190d067", size = 9495040, upload-time = "2025-07-14T20:13:22.543Z" }, + { url = "https://files.pythonhosted.org/packages/ba/3a/2ae996277b4b50f17d61f0603efd8253cb2d79cc7ae159468007b586396d/pywin32-311-cp312-cp312-win_arm64.whl", hash = "sha256:e286f46a9a39c4a18b319c28f59b61de793654af2f395c102b4f819e584b5852", size = 8710102, upload-time = "2025-07-14T20:13:24.682Z" }, + { url = "https://files.pythonhosted.org/packages/a5/be/3fd5de0979fcb3994bfee0d65ed8ca9506a8a1260651b86174f6a86f52b3/pywin32-311-cp313-cp313-win32.whl", hash = "sha256:f95ba5a847cba10dd8c4d8fefa9f2a6cf283b8b88ed6178fa8a6c1ab16054d0d", size = 8705700, upload-time = "2025-07-14T20:13:26.471Z" }, + { url = "https://files.pythonhosted.org/packages/e3/28/e0a1909523c6890208295a29e05c2adb2126364e289826c0a8bc7297bd5c/pywin32-311-cp313-cp313-win_amd64.whl", hash = "sha256:718a38f7e5b058e76aee1c56ddd06908116d35147e133427e59a3983f703a20d", size = 9494700, upload-time = "2025-07-14T20:13:28.243Z" }, + { url = "https://files.pythonhosted.org/packages/04/bf/90339ac0f55726dce7d794e6d79a18a91265bdf3aa70b6b9ca52f35e022a/pywin32-311-cp313-cp313-win_arm64.whl", hash = "sha256:7b4075d959648406202d92a2310cb990fea19b535c7f4a78d3f5e10b926eeb8a", size = 8709318, upload-time = "2025-07-14T20:13:30.348Z" }, + { url = "https://files.pythonhosted.org/packages/c9/31/097f2e132c4f16d99a22bfb777e0fd88bd8e1c634304e102f313af69ace5/pywin32-311-cp314-cp314-win32.whl", hash = "sha256:b7a2c10b93f8986666d0c803ee19b5990885872a7de910fc460f9b0c2fbf92ee", size = 8840714, upload-time = "2025-07-14T20:13:32.449Z" }, + { url = "https://files.pythonhosted.org/packages/90/4b/07c77d8ba0e01349358082713400435347df8426208171ce297da32c313d/pywin32-311-cp314-cp314-win_amd64.whl", hash = "sha256:3aca44c046bd2ed8c90de9cb8427f581c479e594e99b5c0bb19b29c10fd6cb87", size = 9656800, upload-time = "2025-07-14T20:13:34.312Z" }, + { url = "https://files.pythonhosted.org/packages/c0/d2/21af5c535501a7233e734b8af901574572da66fcc254cb35d0609c9080dd/pywin32-311-cp314-cp314-win_arm64.whl", hash = "sha256:a508e2d9025764a8270f93111a970e1d0fbfc33f4153b388bb649b7eec4f9b42", size = 8932540, upload-time = "2025-07-14T20:13:36.379Z" }, + { url = "https://files.pythonhosted.org/packages/59/42/b86689aac0cdaee7ae1c58d464b0ff04ca909c19bb6502d4973cdd9f9544/pywin32-311-cp39-cp39-win32.whl", hash = "sha256:aba8f82d551a942cb20d4a83413ccbac30790b50efb89a75e4f586ac0bb8056b", size = 8760837, upload-time = "2025-07-14T20:12:59.59Z" }, + { url = "https://files.pythonhosted.org/packages/9f/8a/1403d0353f8c5a2f0829d2b1c4becbf9da2f0a4d040886404fc4a5431e4d/pywin32-311-cp39-cp39-win_amd64.whl", hash = "sha256:e0c4cfb0621281fe40387df582097fd796e80430597cb9944f0ae70447bacd91", size = 9590187, upload-time = "2025-07-14T20:13:01.419Z" }, + { url = "https://files.pythonhosted.org/packages/60/22/e0e8d802f124772cec9c75430b01a212f86f9de7546bda715e54140d5aeb/pywin32-311-cp39-cp39-win_arm64.whl", hash = "sha256:62ea666235135fee79bb154e695f3ff67370afefd71bd7fea7512fc70ef31e3d", size = 8778162, upload-time = "2025-07-14T20:13:03.544Z" }, +] + +[[package]] +name = "pyyaml" +version = "6.0.3" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/05/8e/961c0007c59b8dd7729d542c61a4d537767a59645b82a0b521206e1e25c2/pyyaml-6.0.3.tar.gz", hash = "sha256:d76623373421df22fb4cf8817020cbb7ef15c725b9d5e45f17e189bfc384190f", size = 130960, upload-time = "2025-09-25T21:33:16.546Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/f4/a0/39350dd17dd6d6c6507025c0e53aef67a9293a6d37d3511f23ea510d5800/pyyaml-6.0.3-cp310-cp310-macosx_10_13_x86_64.whl", hash = "sha256:214ed4befebe12df36bcc8bc2b64b396ca31be9304b8f59e25c11cf94a4c033b", size = 184227, upload-time = "2025-09-25T21:31:46.04Z" }, + { url = "https://files.pythonhosted.org/packages/05/14/52d505b5c59ce73244f59c7a50ecf47093ce4765f116cdb98286a71eeca2/pyyaml-6.0.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:02ea2dfa234451bbb8772601d7b8e426c2bfa197136796224e50e35a78777956", size = 174019, upload-time = "2025-09-25T21:31:47.706Z" }, + { url = "https://files.pythonhosted.org/packages/43/f7/0e6a5ae5599c838c696adb4e6330a59f463265bfa1e116cfd1fbb0abaaae/pyyaml-6.0.3-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:b30236e45cf30d2b8e7b3e85881719e98507abed1011bf463a8fa23e9c3e98a8", size = 740646, upload-time = "2025-09-25T21:31:49.21Z" }, + { url = "https://files.pythonhosted.org/packages/2f/3a/61b9db1d28f00f8fd0ae760459a5c4bf1b941baf714e207b6eb0657d2578/pyyaml-6.0.3-cp310-cp310-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:66291b10affd76d76f54fad28e22e51719ef9ba22b29e1d7d03d6777a9174198", size = 840793, upload-time = "2025-09-25T21:31:50.735Z" }, + { url = "https://files.pythonhosted.org/packages/7a/1e/7acc4f0e74c4b3d9531e24739e0ab832a5edf40e64fbae1a9c01941cabd7/pyyaml-6.0.3-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:9c7708761fccb9397fe64bbc0395abcae8c4bf7b0eac081e12b809bf47700d0b", size = 770293, upload-time = "2025-09-25T21:31:51.828Z" }, + { url = "https://files.pythonhosted.org/packages/8b/ef/abd085f06853af0cd59fa5f913d61a8eab65d7639ff2a658d18a25d6a89d/pyyaml-6.0.3-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:418cf3f2111bc80e0933b2cd8cd04f286338bb88bdc7bc8e6dd775ebde60b5e0", size = 732872, upload-time = "2025-09-25T21:31:53.282Z" }, + { url = "https://files.pythonhosted.org/packages/1f/15/2bc9c8faf6450a8b3c9fc5448ed869c599c0a74ba2669772b1f3a0040180/pyyaml-6.0.3-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:5e0b74767e5f8c593e8c9b5912019159ed0533c70051e9cce3e8b6aa699fcd69", size = 758828, upload-time = "2025-09-25T21:31:54.807Z" }, + { url = "https://files.pythonhosted.org/packages/a3/00/531e92e88c00f4333ce359e50c19b8d1de9fe8d581b1534e35ccfbc5f393/pyyaml-6.0.3-cp310-cp310-win32.whl", hash = "sha256:28c8d926f98f432f88adc23edf2e6d4921ac26fb084b028c733d01868d19007e", size = 142415, upload-time = "2025-09-25T21:31:55.885Z" }, + { url = "https://files.pythonhosted.org/packages/2a/fa/926c003379b19fca39dd4634818b00dec6c62d87faf628d1394e137354d4/pyyaml-6.0.3-cp310-cp310-win_amd64.whl", hash = "sha256:bdb2c67c6c1390b63c6ff89f210c8fd09d9a1217a465701eac7316313c915e4c", size = 158561, upload-time = "2025-09-25T21:31:57.406Z" }, + { url = "https://files.pythonhosted.org/packages/6d/16/a95b6757765b7b031c9374925bb718d55e0a9ba8a1b6a12d25962ea44347/pyyaml-6.0.3-cp311-cp311-macosx_10_13_x86_64.whl", hash = "sha256:44edc647873928551a01e7a563d7452ccdebee747728c1080d881d68af7b997e", size = 185826, upload-time = "2025-09-25T21:31:58.655Z" }, + { url = "https://files.pythonhosted.org/packages/16/19/13de8e4377ed53079ee996e1ab0a9c33ec2faf808a4647b7b4c0d46dd239/pyyaml-6.0.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:652cb6edd41e718550aad172851962662ff2681490a8a711af6a4d288dd96824", size = 175577, upload-time = "2025-09-25T21:32:00.088Z" }, + { url = "https://files.pythonhosted.org/packages/0c/62/d2eb46264d4b157dae1275b573017abec435397aa59cbcdab6fc978a8af4/pyyaml-6.0.3-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:10892704fc220243f5305762e276552a0395f7beb4dbf9b14ec8fd43b57f126c", size = 775556, upload-time = "2025-09-25T21:32:01.31Z" }, + { url = "https://files.pythonhosted.org/packages/10/cb/16c3f2cf3266edd25aaa00d6c4350381c8b012ed6f5276675b9eba8d9ff4/pyyaml-6.0.3-cp311-cp311-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:850774a7879607d3a6f50d36d04f00ee69e7fc816450e5f7e58d7f17f1ae5c00", size = 882114, upload-time = "2025-09-25T21:32:03.376Z" }, + { url = "https://files.pythonhosted.org/packages/71/60/917329f640924b18ff085ab889a11c763e0b573da888e8404ff486657602/pyyaml-6.0.3-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:b8bb0864c5a28024fac8a632c443c87c5aa6f215c0b126c449ae1a150412f31d", size = 806638, upload-time = "2025-09-25T21:32:04.553Z" }, + { url = "https://files.pythonhosted.org/packages/dd/6f/529b0f316a9fd167281a6c3826b5583e6192dba792dd55e3203d3f8e655a/pyyaml-6.0.3-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:1d37d57ad971609cf3c53ba6a7e365e40660e3be0e5175fa9f2365a379d6095a", size = 767463, upload-time = "2025-09-25T21:32:06.152Z" }, + { url = "https://files.pythonhosted.org/packages/f2/6a/b627b4e0c1dd03718543519ffb2f1deea4a1e6d42fbab8021936a4d22589/pyyaml-6.0.3-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:37503bfbfc9d2c40b344d06b2199cf0e96e97957ab1c1b546fd4f87e53e5d3e4", size = 794986, upload-time = "2025-09-25T21:32:07.367Z" }, + { url = "https://files.pythonhosted.org/packages/45/91/47a6e1c42d9ee337c4839208f30d9f09caa9f720ec7582917b264defc875/pyyaml-6.0.3-cp311-cp311-win32.whl", hash = "sha256:8098f252adfa6c80ab48096053f512f2321f0b998f98150cea9bd23d83e1467b", size = 142543, upload-time = "2025-09-25T21:32:08.95Z" }, + { url = "https://files.pythonhosted.org/packages/da/e3/ea007450a105ae919a72393cb06f122f288ef60bba2dc64b26e2646fa315/pyyaml-6.0.3-cp311-cp311-win_amd64.whl", hash = "sha256:9f3bfb4965eb874431221a3ff3fdcddc7e74e3b07799e0e84ca4a0f867d449bf", size = 158763, upload-time = "2025-09-25T21:32:09.96Z" }, + { url = "https://files.pythonhosted.org/packages/d1/33/422b98d2195232ca1826284a76852ad5a86fe23e31b009c9886b2d0fb8b2/pyyaml-6.0.3-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:7f047e29dcae44602496db43be01ad42fc6f1cc0d8cd6c83d342306c32270196", size = 182063, upload-time = "2025-09-25T21:32:11.445Z" }, + { url = "https://files.pythonhosted.org/packages/89/a0/6cf41a19a1f2f3feab0e9c0b74134aa2ce6849093d5517a0c550fe37a648/pyyaml-6.0.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:fc09d0aa354569bc501d4e787133afc08552722d3ab34836a80547331bb5d4a0", size = 173973, upload-time = "2025-09-25T21:32:12.492Z" }, + { url = "https://files.pythonhosted.org/packages/ed/23/7a778b6bd0b9a8039df8b1b1d80e2e2ad78aa04171592c8a5c43a56a6af4/pyyaml-6.0.3-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:9149cad251584d5fb4981be1ecde53a1ca46c891a79788c0df828d2f166bda28", size = 775116, upload-time = "2025-09-25T21:32:13.652Z" }, + { url = "https://files.pythonhosted.org/packages/65/30/d7353c338e12baef4ecc1b09e877c1970bd3382789c159b4f89d6a70dc09/pyyaml-6.0.3-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:5fdec68f91a0c6739b380c83b951e2c72ac0197ace422360e6d5a959d8d97b2c", size = 844011, upload-time = "2025-09-25T21:32:15.21Z" }, + { url = "https://files.pythonhosted.org/packages/8b/9d/b3589d3877982d4f2329302ef98a8026e7f4443c765c46cfecc8858c6b4b/pyyaml-6.0.3-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:ba1cc08a7ccde2d2ec775841541641e4548226580ab850948cbfda66a1befcdc", size = 807870, upload-time = "2025-09-25T21:32:16.431Z" }, + { url = "https://files.pythonhosted.org/packages/05/c0/b3be26a015601b822b97d9149ff8cb5ead58c66f981e04fedf4e762f4bd4/pyyaml-6.0.3-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:8dc52c23056b9ddd46818a57b78404882310fb473d63f17b07d5c40421e47f8e", size = 761089, upload-time = "2025-09-25T21:32:17.56Z" }, + { url = "https://files.pythonhosted.org/packages/be/8e/98435a21d1d4b46590d5459a22d88128103f8da4c2d4cb8f14f2a96504e1/pyyaml-6.0.3-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:41715c910c881bc081f1e8872880d3c650acf13dfa8214bad49ed4cede7c34ea", size = 790181, upload-time = "2025-09-25T21:32:18.834Z" }, + { url = "https://files.pythonhosted.org/packages/74/93/7baea19427dcfbe1e5a372d81473250b379f04b1bd3c4c5ff825e2327202/pyyaml-6.0.3-cp312-cp312-win32.whl", hash = "sha256:96b533f0e99f6579b3d4d4995707cf36df9100d67e0c8303a0c55b27b5f99bc5", size = 137658, upload-time = "2025-09-25T21:32:20.209Z" }, + { url = "https://files.pythonhosted.org/packages/86/bf/899e81e4cce32febab4fb42bb97dcdf66bc135272882d1987881a4b519e9/pyyaml-6.0.3-cp312-cp312-win_amd64.whl", hash = "sha256:5fcd34e47f6e0b794d17de1b4ff496c00986e1c83f7ab2fb8fcfe9616ff7477b", size = 154003, upload-time = "2025-09-25T21:32:21.167Z" }, + { url = "https://files.pythonhosted.org/packages/1a/08/67bd04656199bbb51dbed1439b7f27601dfb576fb864099c7ef0c3e55531/pyyaml-6.0.3-cp312-cp312-win_arm64.whl", hash = "sha256:64386e5e707d03a7e172c0701abfb7e10f0fb753ee1d773128192742712a98fd", size = 140344, upload-time = "2025-09-25T21:32:22.617Z" }, + { url = "https://files.pythonhosted.org/packages/d1/11/0fd08f8192109f7169db964b5707a2f1e8b745d4e239b784a5a1dd80d1db/pyyaml-6.0.3-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:8da9669d359f02c0b91ccc01cac4a67f16afec0dac22c2ad09f46bee0697eba8", size = 181669, upload-time = "2025-09-25T21:32:23.673Z" }, + { url = "https://files.pythonhosted.org/packages/b1/16/95309993f1d3748cd644e02e38b75d50cbc0d9561d21f390a76242ce073f/pyyaml-6.0.3-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:2283a07e2c21a2aa78d9c4442724ec1eb15f5e42a723b99cb3d822d48f5f7ad1", size = 173252, upload-time = "2025-09-25T21:32:25.149Z" }, + { url = "https://files.pythonhosted.org/packages/50/31/b20f376d3f810b9b2371e72ef5adb33879b25edb7a6d072cb7ca0c486398/pyyaml-6.0.3-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ee2922902c45ae8ccada2c5b501ab86c36525b883eff4255313a253a3160861c", size = 767081, upload-time = "2025-09-25T21:32:26.575Z" }, + { url = "https://files.pythonhosted.org/packages/49/1e/a55ca81e949270d5d4432fbbd19dfea5321eda7c41a849d443dc92fd1ff7/pyyaml-6.0.3-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:a33284e20b78bd4a18c8c2282d549d10bc8408a2a7ff57653c0cf0b9be0afce5", size = 841159, upload-time = "2025-09-25T21:32:27.727Z" }, + { url = "https://files.pythonhosted.org/packages/74/27/e5b8f34d02d9995b80abcef563ea1f8b56d20134d8f4e5e81733b1feceb2/pyyaml-6.0.3-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:0f29edc409a6392443abf94b9cf89ce99889a1dd5376d94316ae5145dfedd5d6", size = 801626, upload-time = "2025-09-25T21:32:28.878Z" }, + { url = "https://files.pythonhosted.org/packages/f9/11/ba845c23988798f40e52ba45f34849aa8a1f2d4af4b798588010792ebad6/pyyaml-6.0.3-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:f7057c9a337546edc7973c0d3ba84ddcdf0daa14533c2065749c9075001090e6", size = 753613, upload-time = "2025-09-25T21:32:30.178Z" }, + { url = "https://files.pythonhosted.org/packages/3d/e0/7966e1a7bfc0a45bf0a7fb6b98ea03fc9b8d84fa7f2229e9659680b69ee3/pyyaml-6.0.3-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:eda16858a3cab07b80edaf74336ece1f986ba330fdb8ee0d6c0d68fe82bc96be", size = 794115, upload-time = "2025-09-25T21:32:31.353Z" }, + { url = "https://files.pythonhosted.org/packages/de/94/980b50a6531b3019e45ddeada0626d45fa85cbe22300844a7983285bed3b/pyyaml-6.0.3-cp313-cp313-win32.whl", hash = "sha256:d0eae10f8159e8fdad514efdc92d74fd8d682c933a6dd088030f3834bc8e6b26", size = 137427, upload-time = "2025-09-25T21:32:32.58Z" }, + { url = "https://files.pythonhosted.org/packages/97/c9/39d5b874e8b28845e4ec2202b5da735d0199dbe5b8fb85f91398814a9a46/pyyaml-6.0.3-cp313-cp313-win_amd64.whl", hash = "sha256:79005a0d97d5ddabfeeea4cf676af11e647e41d81c9a7722a193022accdb6b7c", size = 154090, upload-time = "2025-09-25T21:32:33.659Z" }, + { url = "https://files.pythonhosted.org/packages/73/e8/2bdf3ca2090f68bb3d75b44da7bbc71843b19c9f2b9cb9b0f4ab7a5a4329/pyyaml-6.0.3-cp313-cp313-win_arm64.whl", hash = "sha256:5498cd1645aa724a7c71c8f378eb29ebe23da2fc0d7a08071d89469bf1d2defb", size = 140246, upload-time = "2025-09-25T21:32:34.663Z" }, + { url = "https://files.pythonhosted.org/packages/9d/8c/f4bd7f6465179953d3ac9bc44ac1a8a3e6122cf8ada906b4f96c60172d43/pyyaml-6.0.3-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:8d1fab6bb153a416f9aeb4b8763bc0f22a5586065f86f7664fc23339fc1c1fac", size = 181814, upload-time = "2025-09-25T21:32:35.712Z" }, + { url = "https://files.pythonhosted.org/packages/bd/9c/4d95bb87eb2063d20db7b60faa3840c1b18025517ae857371c4dd55a6b3a/pyyaml-6.0.3-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:34d5fcd24b8445fadc33f9cf348c1047101756fd760b4dacb5c3e99755703310", size = 173809, upload-time = "2025-09-25T21:32:36.789Z" }, + { url = "https://files.pythonhosted.org/packages/92/b5/47e807c2623074914e29dabd16cbbdd4bf5e9b2db9f8090fa64411fc5382/pyyaml-6.0.3-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:501a031947e3a9025ed4405a168e6ef5ae3126c59f90ce0cd6f2bfc477be31b7", size = 766454, upload-time = "2025-09-25T21:32:37.966Z" }, + { url = "https://files.pythonhosted.org/packages/02/9e/e5e9b168be58564121efb3de6859c452fccde0ab093d8438905899a3a483/pyyaml-6.0.3-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:b3bc83488de33889877a0f2543ade9f70c67d66d9ebb4ac959502e12de895788", size = 836355, upload-time = "2025-09-25T21:32:39.178Z" }, + { url = "https://files.pythonhosted.org/packages/88/f9/16491d7ed2a919954993e48aa941b200f38040928474c9e85ea9e64222c3/pyyaml-6.0.3-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c458b6d084f9b935061bc36216e8a69a7e293a2f1e68bf956dcd9e6cbcd143f5", size = 794175, upload-time = "2025-09-25T21:32:40.865Z" }, + { url = "https://files.pythonhosted.org/packages/dd/3f/5989debef34dc6397317802b527dbbafb2b4760878a53d4166579111411e/pyyaml-6.0.3-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:7c6610def4f163542a622a73fb39f534f8c101d690126992300bf3207eab9764", size = 755228, upload-time = "2025-09-25T21:32:42.084Z" }, + { url = "https://files.pythonhosted.org/packages/d7/ce/af88a49043cd2e265be63d083fc75b27b6ed062f5f9fd6cdc223ad62f03e/pyyaml-6.0.3-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:5190d403f121660ce8d1d2c1bb2ef1bd05b5f68533fc5c2ea899bd15f4399b35", size = 789194, upload-time = "2025-09-25T21:32:43.362Z" }, + { url = "https://files.pythonhosted.org/packages/23/20/bb6982b26a40bb43951265ba29d4c246ef0ff59c9fdcdf0ed04e0687de4d/pyyaml-6.0.3-cp314-cp314-win_amd64.whl", hash = "sha256:4a2e8cebe2ff6ab7d1050ecd59c25d4c8bd7e6f400f5f82b96557ac0abafd0ac", size = 156429, upload-time = "2025-09-25T21:32:57.844Z" }, + { url = "https://files.pythonhosted.org/packages/f4/f4/a4541072bb9422c8a883ab55255f918fa378ecf083f5b85e87fc2b4eda1b/pyyaml-6.0.3-cp314-cp314-win_arm64.whl", hash = "sha256:93dda82c9c22deb0a405ea4dc5f2d0cda384168e466364dec6255b293923b2f3", size = 143912, upload-time = "2025-09-25T21:32:59.247Z" }, + { url = "https://files.pythonhosted.org/packages/7c/f9/07dd09ae774e4616edf6cda684ee78f97777bdd15847253637a6f052a62f/pyyaml-6.0.3-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:02893d100e99e03eda1c8fd5c441d8c60103fd175728e23e431db1b589cf5ab3", size = 189108, upload-time = "2025-09-25T21:32:44.377Z" }, + { url = "https://files.pythonhosted.org/packages/4e/78/8d08c9fb7ce09ad8c38ad533c1191cf27f7ae1effe5bb9400a46d9437fcf/pyyaml-6.0.3-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:c1ff362665ae507275af2853520967820d9124984e0f7466736aea23d8611fba", size = 183641, upload-time = "2025-09-25T21:32:45.407Z" }, + { url = "https://files.pythonhosted.org/packages/7b/5b/3babb19104a46945cf816d047db2788bcaf8c94527a805610b0289a01c6b/pyyaml-6.0.3-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:6adc77889b628398debc7b65c073bcb99c4a0237b248cacaf3fe8a557563ef6c", size = 831901, upload-time = "2025-09-25T21:32:48.83Z" }, + { url = "https://files.pythonhosted.org/packages/8b/cc/dff0684d8dc44da4d22a13f35f073d558c268780ce3c6ba1b87055bb0b87/pyyaml-6.0.3-cp314-cp314t-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:a80cb027f6b349846a3bf6d73b5e95e782175e52f22108cfa17876aaeff93702", size = 861132, upload-time = "2025-09-25T21:32:50.149Z" }, + { url = "https://files.pythonhosted.org/packages/b1/5e/f77dc6b9036943e285ba76b49e118d9ea929885becb0a29ba8a7c75e29fe/pyyaml-6.0.3-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:00c4bdeba853cc34e7dd471f16b4114f4162dc03e6b7afcc2128711f0eca823c", size = 839261, upload-time = "2025-09-25T21:32:51.808Z" }, + { url = "https://files.pythonhosted.org/packages/ce/88/a9db1376aa2a228197c58b37302f284b5617f56a5d959fd1763fb1675ce6/pyyaml-6.0.3-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:66e1674c3ef6f541c35191caae2d429b967b99e02040f5ba928632d9a7f0f065", size = 805272, upload-time = "2025-09-25T21:32:52.941Z" }, + { url = "https://files.pythonhosted.org/packages/da/92/1446574745d74df0c92e6aa4a7b0b3130706a4142b2d1a5869f2eaa423c6/pyyaml-6.0.3-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:16249ee61e95f858e83976573de0f5b2893b3677ba71c9dd36b9cf8be9ac6d65", size = 829923, upload-time = "2025-09-25T21:32:54.537Z" }, + { url = "https://files.pythonhosted.org/packages/f0/7a/1c7270340330e575b92f397352af856a8c06f230aa3e76f86b39d01b416a/pyyaml-6.0.3-cp314-cp314t-win_amd64.whl", hash = "sha256:4ad1906908f2f5ae4e5a8ddfce73c320c2a1429ec52eafd27138b7f1cbe341c9", size = 174062, upload-time = "2025-09-25T21:32:55.767Z" }, + { url = "https://files.pythonhosted.org/packages/f1/12/de94a39c2ef588c7e6455cfbe7343d3b2dc9d6b6b2f40c4c6565744c873d/pyyaml-6.0.3-cp314-cp314t-win_arm64.whl", hash = "sha256:ebc55a14a21cb14062aa4162f906cd962b28e2e9ea38f9b4391244cd8de4ae0b", size = 149341, upload-time = "2025-09-25T21:32:56.828Z" }, + { url = "https://files.pythonhosted.org/packages/9f/62/67fc8e68a75f738c9200422bf65693fb79a4cd0dc5b23310e5202e978090/pyyaml-6.0.3-cp39-cp39-macosx_10_13_x86_64.whl", hash = "sha256:b865addae83924361678b652338317d1bd7e79b1f4596f96b96c77a5a34b34da", size = 184450, upload-time = "2025-09-25T21:33:00.618Z" }, + { url = "https://files.pythonhosted.org/packages/ae/92/861f152ce87c452b11b9d0977952259aa7df792d71c1053365cc7b09cc08/pyyaml-6.0.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:c3355370a2c156cffb25e876646f149d5d68f5e0a3ce86a5084dd0b64a994917", size = 174319, upload-time = "2025-09-25T21:33:02.086Z" }, + { url = "https://files.pythonhosted.org/packages/d0/cd/f0cfc8c74f8a030017a2b9c771b7f47e5dd702c3e28e5b2071374bda2948/pyyaml-6.0.3-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:3c5677e12444c15717b902a5798264fa7909e41153cdf9ef7ad571b704a63dd9", size = 737631, upload-time = "2025-09-25T21:33:03.25Z" }, + { url = "https://files.pythonhosted.org/packages/ef/b2/18f2bd28cd2055a79a46c9b0895c0b3d987ce40ee471cecf58a1a0199805/pyyaml-6.0.3-cp39-cp39-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:5ed875a24292240029e4483f9d4a4b8a1ae08843b9c54f43fcc11e404532a8a5", size = 836795, upload-time = "2025-09-25T21:33:05.014Z" }, + { url = "https://files.pythonhosted.org/packages/73/b9/793686b2d54b531203c160ef12bec60228a0109c79bae6c1277961026770/pyyaml-6.0.3-cp39-cp39-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:0150219816b6a1fa26fb4699fb7daa9caf09eb1999f3b70fb6e786805e80375a", size = 750767, upload-time = "2025-09-25T21:33:06.398Z" }, + { url = "https://files.pythonhosted.org/packages/a9/86/a137b39a611def2ed78b0e66ce2fe13ee701a07c07aebe55c340ed2a050e/pyyaml-6.0.3-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:fa160448684b4e94d80416c0fa4aac48967a969efe22931448d853ada8baf926", size = 727982, upload-time = "2025-09-25T21:33:08.708Z" }, + { url = "https://files.pythonhosted.org/packages/dd/62/71c27c94f457cf4418ef8ccc71735324c549f7e3ea9d34aba50874563561/pyyaml-6.0.3-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:27c0abcb4a5dac13684a37f76e701e054692a9b2d3064b70f5e4eb54810553d7", size = 755677, upload-time = "2025-09-25T21:33:09.876Z" }, + { url = "https://files.pythonhosted.org/packages/29/3d/6f5e0d58bd924fb0d06c3a6bad00effbdae2de5adb5cda5648006ffbd8d3/pyyaml-6.0.3-cp39-cp39-win32.whl", hash = "sha256:1ebe39cb5fc479422b83de611d14e2c0d3bb2a18bbcb01f229ab3cfbd8fee7a0", size = 142592, upload-time = "2025-09-25T21:33:10.983Z" }, + { url = "https://files.pythonhosted.org/packages/f0/0c/25113e0b5e103d7f1490c0e947e303fe4a696c10b501dea7a9f49d4e876c/pyyaml-6.0.3-cp39-cp39-win_amd64.whl", hash = "sha256:2e71d11abed7344e42a8849600193d15b6def118602c4c176f748e4583246007", size = 158777, upload-time = "2025-09-25T21:33:15.55Z" }, +] + +[[package]] +name = "referencing" +version = "0.37.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "attrs", marker = "python_full_version >= '3.10'" }, + { name = "rpds-py", marker = "python_full_version >= '3.10'" }, + { name = "typing-extensions", marker = "python_full_version >= '3.10' and python_full_version < '3.13'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/22/f5/df4e9027acead3ecc63e50fe1e36aca1523e1719559c499951bb4b53188f/referencing-0.37.0.tar.gz", hash = "sha256:44aefc3142c5b842538163acb373e24cce6632bd54bdb01b21ad5863489f50d8", size = 78036, upload-time = "2025-10-13T15:30:48.871Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/2c/58/ca301544e1fa93ed4f80d724bf5b194f6e4b945841c5bfd555878eea9fcb/referencing-0.37.0-py3-none-any.whl", hash = "sha256:381329a9f99628c9069361716891d34ad94af76e461dcb0335825aecc7692231", size = 26766, upload-time = "2025-10-13T15:30:47.625Z" }, +] + +[[package]] +name = "requests" +version = "2.32.5" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "certifi" }, + { name = "charset-normalizer" }, + { name = "idna" }, + { name = "urllib3" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/c9/74/b3ff8e6c8446842c3f5c837e9c3dfcfe2018ea6ecef224c710c85ef728f4/requests-2.32.5.tar.gz", hash = "sha256:dbba0bac56e100853db0ea71b82b4dfd5fe2bf6d3754a8893c3af500cec7d7cf", size = 134517, upload-time = "2025-08-18T20:46:02.573Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/1e/db/4254e3eabe8020b458f1a747140d32277ec7a271daf1d235b70dc0b4e6e3/requests-2.32.5-py3-none-any.whl", hash = "sha256:2462f94637a34fd532264295e186976db0f5d453d1cdd31473c85a6a161affb6", size = 64738, upload-time = "2025-08-18T20:46:00.542Z" }, +] + +[[package]] +name = "rpds-py" +version = "0.30.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/20/af/3f2f423103f1113b36230496629986e0ef7e199d2aa8392452b484b38ced/rpds_py-0.30.0.tar.gz", hash = "sha256:dd8ff7cf90014af0c0f787eea34794ebf6415242ee1d6fa91eaba725cc441e84", size = 69469, upload-time = "2025-11-30T20:24:38.837Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/06/0c/0c411a0ec64ccb6d104dcabe0e713e05e153a9a2c3c2bd2b32ce412166fe/rpds_py-0.30.0-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:679ae98e00c0e8d68a7fda324e16b90fd5260945b45d3b824c892cec9eea3288", size = 370490, upload-time = "2025-11-30T20:21:33.256Z" }, + { url = "https://files.pythonhosted.org/packages/19/6a/4ba3d0fb7297ebae71171822554abe48d7cab29c28b8f9f2c04b79988c05/rpds_py-0.30.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:4cc2206b76b4f576934f0ed374b10d7ca5f457858b157ca52064bdfc26b9fc00", size = 359751, upload-time = "2025-11-30T20:21:34.591Z" }, + { url = "https://files.pythonhosted.org/packages/cd/7c/e4933565ef7f7a0818985d87c15d9d273f1a649afa6a52ea35ad011195ea/rpds_py-0.30.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:389a2d49eded1896c3d48b0136ead37c48e221b391c052fba3f4055c367f60a6", size = 389696, upload-time = "2025-11-30T20:21:36.122Z" }, + { url = "https://files.pythonhosted.org/packages/5e/01/6271a2511ad0815f00f7ed4390cf2567bec1d4b1da39e2c27a41e6e3b4de/rpds_py-0.30.0-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:32c8528634e1bf7121f3de08fa85b138f4e0dc47657866630611b03967f041d7", size = 403136, upload-time = "2025-11-30T20:21:37.728Z" }, + { url = "https://files.pythonhosted.org/packages/55/64/c857eb7cd7541e9b4eee9d49c196e833128a55b89a9850a9c9ac33ccf897/rpds_py-0.30.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f207f69853edd6f6700b86efb84999651baf3789e78a466431df1331608e5324", size = 524699, upload-time = "2025-11-30T20:21:38.92Z" }, + { url = "https://files.pythonhosted.org/packages/9c/ed/94816543404078af9ab26159c44f9e98e20fe47e2126d5d32c9d9948d10a/rpds_py-0.30.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:67b02ec25ba7a9e8fa74c63b6ca44cf5707f2fbfadae3ee8e7494297d56aa9df", size = 412022, upload-time = "2025-11-30T20:21:40.407Z" }, + { url = "https://files.pythonhosted.org/packages/61/b5/707f6cf0066a6412aacc11d17920ea2e19e5b2f04081c64526eb35b5c6e7/rpds_py-0.30.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0c0e95f6819a19965ff420f65578bacb0b00f251fefe2c8b23347c37174271f3", size = 390522, upload-time = "2025-11-30T20:21:42.17Z" }, + { url = "https://files.pythonhosted.org/packages/13/4e/57a85fda37a229ff4226f8cbcf09f2a455d1ed20e802ce5b2b4a7f5ed053/rpds_py-0.30.0-cp310-cp310-manylinux_2_31_riscv64.whl", hash = "sha256:a452763cc5198f2f98898eb98f7569649fe5da666c2dc6b5ddb10fde5a574221", size = 404579, upload-time = "2025-11-30T20:21:43.769Z" }, + { url = "https://files.pythonhosted.org/packages/f9/da/c9339293513ec680a721e0e16bf2bac3db6e5d7e922488de471308349bba/rpds_py-0.30.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:e0b65193a413ccc930671c55153a03ee57cecb49e6227204b04fae512eb657a7", size = 421305, upload-time = "2025-11-30T20:21:44.994Z" }, + { url = "https://files.pythonhosted.org/packages/f9/be/522cb84751114f4ad9d822ff5a1aa3c98006341895d5f084779b99596e5c/rpds_py-0.30.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:858738e9c32147f78b3ac24dc0edb6610000e56dc0f700fd5f651d0a0f0eb9ff", size = 572503, upload-time = "2025-11-30T20:21:46.91Z" }, + { url = "https://files.pythonhosted.org/packages/a2/9b/de879f7e7ceddc973ea6e4629e9b380213a6938a249e94b0cdbcc325bb66/rpds_py-0.30.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:da279aa314f00acbb803da1e76fa18666778e8a8f83484fba94526da5de2cba7", size = 598322, upload-time = "2025-11-30T20:21:48.709Z" }, + { url = "https://files.pythonhosted.org/packages/48/ac/f01fc22efec3f37d8a914fc1b2fb9bcafd56a299edbe96406f3053edea5a/rpds_py-0.30.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:7c64d38fb49b6cdeda16ab49e35fe0da2e1e9b34bc38bd78386530f218b37139", size = 560792, upload-time = "2025-11-30T20:21:50.024Z" }, + { url = "https://files.pythonhosted.org/packages/e2/da/4e2b19d0f131f35b6146425f846563d0ce036763e38913d917187307a671/rpds_py-0.30.0-cp310-cp310-win32.whl", hash = "sha256:6de2a32a1665b93233cde140ff8b3467bdb9e2af2b91079f0333a0974d12d464", size = 221901, upload-time = "2025-11-30T20:21:51.32Z" }, + { url = "https://files.pythonhosted.org/packages/96/cb/156d7a5cf4f78a7cc571465d8aec7a3c447c94f6749c5123f08438bcf7bc/rpds_py-0.30.0-cp310-cp310-win_amd64.whl", hash = "sha256:1726859cd0de969f88dc8673bdd954185b9104e05806be64bcd87badbe313169", size = 235823, upload-time = "2025-11-30T20:21:52.505Z" }, + { url = "https://files.pythonhosted.org/packages/4d/6e/f964e88b3d2abee2a82c1ac8366da848fce1c6d834dc2132c3fda3970290/rpds_py-0.30.0-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:a2bffea6a4ca9f01b3f8e548302470306689684e61602aa3d141e34da06cf425", size = 370157, upload-time = "2025-11-30T20:21:53.789Z" }, + { url = "https://files.pythonhosted.org/packages/94/ba/24e5ebb7c1c82e74c4e4f33b2112a5573ddc703915b13a073737b59b86e0/rpds_py-0.30.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:dc4f992dfe1e2bc3ebc7444f6c7051b4bc13cd8e33e43511e8ffd13bf407010d", size = 359676, upload-time = "2025-11-30T20:21:55.475Z" }, + { url = "https://files.pythonhosted.org/packages/84/86/04dbba1b087227747d64d80c3b74df946b986c57af0a9f0c98726d4d7a3b/rpds_py-0.30.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:422c3cb9856d80b09d30d2eb255d0754b23e090034e1deb4083f8004bd0761e4", size = 389938, upload-time = "2025-11-30T20:21:57.079Z" }, + { url = "https://files.pythonhosted.org/packages/42/bb/1463f0b1722b7f45431bdd468301991d1328b16cffe0b1c2918eba2c4eee/rpds_py-0.30.0-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:07ae8a593e1c3c6b82ca3292efbe73c30b61332fd612e05abee07c79359f292f", size = 402932, upload-time = "2025-11-30T20:21:58.47Z" }, + { url = "https://files.pythonhosted.org/packages/99/ee/2520700a5c1f2d76631f948b0736cdf9b0acb25abd0ca8e889b5c62ac2e3/rpds_py-0.30.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:12f90dd7557b6bd57f40abe7747e81e0c0b119bef015ea7726e69fe550e394a4", size = 525830, upload-time = "2025-11-30T20:21:59.699Z" }, + { url = "https://files.pythonhosted.org/packages/e0/ad/bd0331f740f5705cc555a5e17fdf334671262160270962e69a2bdef3bf76/rpds_py-0.30.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:99b47d6ad9a6da00bec6aabe5a6279ecd3c06a329d4aa4771034a21e335c3a97", size = 412033, upload-time = "2025-11-30T20:22:00.991Z" }, + { url = "https://files.pythonhosted.org/packages/f8/1e/372195d326549bb51f0ba0f2ecb9874579906b97e08880e7a65c3bef1a99/rpds_py-0.30.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:33f559f3104504506a44bb666b93a33f5d33133765b0c216a5bf2f1e1503af89", size = 390828, upload-time = "2025-11-30T20:22:02.723Z" }, + { url = "https://files.pythonhosted.org/packages/ab/2b/d88bb33294e3e0c76bc8f351a3721212713629ffca1700fa94979cb3eae8/rpds_py-0.30.0-cp311-cp311-manylinux_2_31_riscv64.whl", hash = "sha256:946fe926af6e44f3697abbc305ea168c2c31d3e3ef1058cf68f379bf0335a78d", size = 404683, upload-time = "2025-11-30T20:22:04.367Z" }, + { url = "https://files.pythonhosted.org/packages/50/32/c759a8d42bcb5289c1fac697cd92f6fe01a018dd937e62ae77e0e7f15702/rpds_py-0.30.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:495aeca4b93d465efde585977365187149e75383ad2684f81519f504f5c13038", size = 421583, upload-time = "2025-11-30T20:22:05.814Z" }, + { url = "https://files.pythonhosted.org/packages/2b/81/e729761dbd55ddf5d84ec4ff1f47857f4374b0f19bdabfcf929164da3e24/rpds_py-0.30.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:d9a0ca5da0386dee0655b4ccdf46119df60e0f10da268d04fe7cc87886872ba7", size = 572496, upload-time = "2025-11-30T20:22:07.713Z" }, + { url = "https://files.pythonhosted.org/packages/14/f6/69066a924c3557c9c30baa6ec3a0aa07526305684c6f86c696b08860726c/rpds_py-0.30.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:8d6d1cc13664ec13c1b84241204ff3b12f9bb82464b8ad6e7a5d3486975c2eed", size = 598669, upload-time = "2025-11-30T20:22:09.312Z" }, + { url = "https://files.pythonhosted.org/packages/5f/48/905896b1eb8a05630d20333d1d8ffd162394127b74ce0b0784ae04498d32/rpds_py-0.30.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:3896fa1be39912cf0757753826bc8bdc8ca331a28a7c4ae46b7a21280b06bb85", size = 561011, upload-time = "2025-11-30T20:22:11.309Z" }, + { url = "https://files.pythonhosted.org/packages/22/16/cd3027c7e279d22e5eb431dd3c0fbc677bed58797fe7581e148f3f68818b/rpds_py-0.30.0-cp311-cp311-win32.whl", hash = "sha256:55f66022632205940f1827effeff17c4fa7ae1953d2b74a8581baaefb7d16f8c", size = 221406, upload-time = "2025-11-30T20:22:13.101Z" }, + { url = "https://files.pythonhosted.org/packages/fa/5b/e7b7aa136f28462b344e652ee010d4de26ee9fd16f1bfd5811f5153ccf89/rpds_py-0.30.0-cp311-cp311-win_amd64.whl", hash = "sha256:a51033ff701fca756439d641c0ad09a41d9242fa69121c7d8769604a0a629825", size = 236024, upload-time = "2025-11-30T20:22:14.853Z" }, + { url = "https://files.pythonhosted.org/packages/14/a6/364bba985e4c13658edb156640608f2c9e1d3ea3c81b27aa9d889fff0e31/rpds_py-0.30.0-cp311-cp311-win_arm64.whl", hash = "sha256:47b0ef6231c58f506ef0b74d44e330405caa8428e770fec25329ed2cb971a229", size = 229069, upload-time = "2025-11-30T20:22:16.577Z" }, + { url = "https://files.pythonhosted.org/packages/03/e7/98a2f4ac921d82f33e03f3835f5bf3a4a40aa1bfdc57975e74a97b2b4bdd/rpds_py-0.30.0-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:a161f20d9a43006833cd7068375a94d035714d73a172b681d8881820600abfad", size = 375086, upload-time = "2025-11-30T20:22:17.93Z" }, + { url = "https://files.pythonhosted.org/packages/4d/a1/bca7fd3d452b272e13335db8d6b0b3ecde0f90ad6f16f3328c6fb150c889/rpds_py-0.30.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:6abc8880d9d036ecaafe709079969f56e876fcf107f7a8e9920ba6d5a3878d05", size = 359053, upload-time = "2025-11-30T20:22:19.297Z" }, + { url = "https://files.pythonhosted.org/packages/65/1c/ae157e83a6357eceff62ba7e52113e3ec4834a84cfe07fa4b0757a7d105f/rpds_py-0.30.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ca28829ae5f5d569bb62a79512c842a03a12576375d5ece7d2cadf8abe96ec28", size = 390763, upload-time = "2025-11-30T20:22:21.661Z" }, + { url = "https://files.pythonhosted.org/packages/d4/36/eb2eb8515e2ad24c0bd43c3ee9cd74c33f7ca6430755ccdb240fd3144c44/rpds_py-0.30.0-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:a1010ed9524c73b94d15919ca4d41d8780980e1765babf85f9a2f90d247153dd", size = 408951, upload-time = "2025-11-30T20:22:23.408Z" }, + { url = "https://files.pythonhosted.org/packages/d6/65/ad8dc1784a331fabbd740ef6f71ce2198c7ed0890dab595adb9ea2d775a1/rpds_py-0.30.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f8d1736cfb49381ba528cd5baa46f82fdc65c06e843dab24dd70b63d09121b3f", size = 514622, upload-time = "2025-11-30T20:22:25.16Z" }, + { url = "https://files.pythonhosted.org/packages/63/8e/0cfa7ae158e15e143fe03993b5bcd743a59f541f5952e1546b1ac1b5fd45/rpds_py-0.30.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d948b135c4693daff7bc2dcfc4ec57237a29bd37e60c2fabf5aff2bbacf3e2f1", size = 414492, upload-time = "2025-11-30T20:22:26.505Z" }, + { url = "https://files.pythonhosted.org/packages/60/1b/6f8f29f3f995c7ffdde46a626ddccd7c63aefc0efae881dc13b6e5d5bb16/rpds_py-0.30.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:47f236970bccb2233267d89173d3ad2703cd36a0e2a6e92d0560d333871a3d23", size = 394080, upload-time = "2025-11-30T20:22:27.934Z" }, + { url = "https://files.pythonhosted.org/packages/6d/d5/a266341051a7a3ca2f4b750a3aa4abc986378431fc2da508c5034d081b70/rpds_py-0.30.0-cp312-cp312-manylinux_2_31_riscv64.whl", hash = "sha256:2e6ecb5a5bcacf59c3f912155044479af1d0b6681280048b338b28e364aca1f6", size = 408680, upload-time = "2025-11-30T20:22:29.341Z" }, + { url = "https://files.pythonhosted.org/packages/10/3b/71b725851df9ab7a7a4e33cf36d241933da66040d195a84781f49c50490c/rpds_py-0.30.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:a8fa71a2e078c527c3e9dc9fc5a98c9db40bcc8a92b4e8858e36d329f8684b51", size = 423589, upload-time = "2025-11-30T20:22:31.469Z" }, + { url = "https://files.pythonhosted.org/packages/00/2b/e59e58c544dc9bd8bd8384ecdb8ea91f6727f0e37a7131baeff8d6f51661/rpds_py-0.30.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:73c67f2db7bc334e518d097c6d1e6fed021bbc9b7d678d6cc433478365d1d5f5", size = 573289, upload-time = "2025-11-30T20:22:32.997Z" }, + { url = "https://files.pythonhosted.org/packages/da/3e/a18e6f5b460893172a7d6a680e86d3b6bc87a54c1f0b03446a3c8c7b588f/rpds_py-0.30.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:5ba103fb455be00f3b1c2076c9d4264bfcb037c976167a6047ed82f23153f02e", size = 599737, upload-time = "2025-11-30T20:22:34.419Z" }, + { url = "https://files.pythonhosted.org/packages/5c/e2/714694e4b87b85a18e2c243614974413c60aa107fd815b8cbc42b873d1d7/rpds_py-0.30.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:7cee9c752c0364588353e627da8a7e808a66873672bcb5f52890c33fd965b394", size = 563120, upload-time = "2025-11-30T20:22:35.903Z" }, + { url = "https://files.pythonhosted.org/packages/6f/ab/d5d5e3bcedb0a77f4f613706b750e50a5a3ba1c15ccd3665ecc636c968fd/rpds_py-0.30.0-cp312-cp312-win32.whl", hash = "sha256:1ab5b83dbcf55acc8b08fc62b796ef672c457b17dbd7820a11d6c52c06839bdf", size = 223782, upload-time = "2025-11-30T20:22:37.271Z" }, + { url = "https://files.pythonhosted.org/packages/39/3b/f786af9957306fdc38a74cef405b7b93180f481fb48453a114bb6465744a/rpds_py-0.30.0-cp312-cp312-win_amd64.whl", hash = "sha256:a090322ca841abd453d43456ac34db46e8b05fd9b3b4ac0c78bcde8b089f959b", size = 240463, upload-time = "2025-11-30T20:22:39.021Z" }, + { url = "https://files.pythonhosted.org/packages/f3/d2/b91dc748126c1559042cfe41990deb92c4ee3e2b415f6b5234969ffaf0cc/rpds_py-0.30.0-cp312-cp312-win_arm64.whl", hash = "sha256:669b1805bd639dd2989b281be2cfd951c6121b65e729d9b843e9639ef1fd555e", size = 230868, upload-time = "2025-11-30T20:22:40.493Z" }, + { url = "https://files.pythonhosted.org/packages/ed/dc/d61221eb88ff410de3c49143407f6f3147acf2538c86f2ab7ce65ae7d5f9/rpds_py-0.30.0-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:f83424d738204d9770830d35290ff3273fbb02b41f919870479fab14b9d303b2", size = 374887, upload-time = "2025-11-30T20:22:41.812Z" }, + { url = "https://files.pythonhosted.org/packages/fd/32/55fb50ae104061dbc564ef15cc43c013dc4a9f4527a1f4d99baddf56fe5f/rpds_py-0.30.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:e7536cd91353c5273434b4e003cbda89034d67e7710eab8761fd918ec6c69cf8", size = 358904, upload-time = "2025-11-30T20:22:43.479Z" }, + { url = "https://files.pythonhosted.org/packages/58/70/faed8186300e3b9bdd138d0273109784eea2396c68458ed580f885dfe7ad/rpds_py-0.30.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2771c6c15973347f50fece41fc447c054b7ac2ae0502388ce3b6738cd366e3d4", size = 389945, upload-time = "2025-11-30T20:22:44.819Z" }, + { url = "https://files.pythonhosted.org/packages/bd/a8/073cac3ed2c6387df38f71296d002ab43496a96b92c823e76f46b8af0543/rpds_py-0.30.0-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:0a59119fc6e3f460315fe9d08149f8102aa322299deaa5cab5b40092345c2136", size = 407783, upload-time = "2025-11-30T20:22:46.103Z" }, + { url = "https://files.pythonhosted.org/packages/77/57/5999eb8c58671f1c11eba084115e77a8899d6e694d2a18f69f0ba471ec8b/rpds_py-0.30.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:76fec018282b4ead0364022e3c54b60bf368b9d926877957a8624b58419169b7", size = 515021, upload-time = "2025-11-30T20:22:47.458Z" }, + { url = "https://files.pythonhosted.org/packages/e0/af/5ab4833eadc36c0a8ed2bc5c0de0493c04f6c06de223170bd0798ff98ced/rpds_py-0.30.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:692bef75a5525db97318e8cd061542b5a79812d711ea03dbc1f6f8dbb0c5f0d2", size = 414589, upload-time = "2025-11-30T20:22:48.872Z" }, + { url = "https://files.pythonhosted.org/packages/b7/de/f7192e12b21b9e9a68a6d0f249b4af3fdcdff8418be0767a627564afa1f1/rpds_py-0.30.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9027da1ce107104c50c81383cae773ef5c24d296dd11c99e2629dbd7967a20c6", size = 394025, upload-time = "2025-11-30T20:22:50.196Z" }, + { url = "https://files.pythonhosted.org/packages/91/c4/fc70cd0249496493500e7cc2de87504f5aa6509de1e88623431fec76d4b6/rpds_py-0.30.0-cp313-cp313-manylinux_2_31_riscv64.whl", hash = "sha256:9cf69cdda1f5968a30a359aba2f7f9aa648a9ce4b580d6826437f2b291cfc86e", size = 408895, upload-time = "2025-11-30T20:22:51.87Z" }, + { url = "https://files.pythonhosted.org/packages/58/95/d9275b05ab96556fefff73a385813eb66032e4c99f411d0795372d9abcea/rpds_py-0.30.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:a4796a717bf12b9da9d3ad002519a86063dcac8988b030e405704ef7d74d2d9d", size = 422799, upload-time = "2025-11-30T20:22:53.341Z" }, + { url = "https://files.pythonhosted.org/packages/06/c1/3088fc04b6624eb12a57eb814f0d4997a44b0d208d6cace713033ff1a6ba/rpds_py-0.30.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:5d4c2aa7c50ad4728a094ebd5eb46c452e9cb7edbfdb18f9e1221f597a73e1e7", size = 572731, upload-time = "2025-11-30T20:22:54.778Z" }, + { url = "https://files.pythonhosted.org/packages/d8/42/c612a833183b39774e8ac8fecae81263a68b9583ee343db33ab571a7ce55/rpds_py-0.30.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:ba81a9203d07805435eb06f536d95a266c21e5b2dfbf6517748ca40c98d19e31", size = 599027, upload-time = "2025-11-30T20:22:56.212Z" }, + { url = "https://files.pythonhosted.org/packages/5f/60/525a50f45b01d70005403ae0e25f43c0384369ad24ffe46e8d9068b50086/rpds_py-0.30.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:945dccface01af02675628334f7cf49c2af4c1c904748efc5cf7bbdf0b579f95", size = 563020, upload-time = "2025-11-30T20:22:58.2Z" }, + { url = "https://files.pythonhosted.org/packages/0b/5d/47c4655e9bcd5ca907148535c10e7d489044243cc9941c16ed7cd53be91d/rpds_py-0.30.0-cp313-cp313-win32.whl", hash = "sha256:b40fb160a2db369a194cb27943582b38f79fc4887291417685f3ad693c5a1d5d", size = 223139, upload-time = "2025-11-30T20:23:00.209Z" }, + { url = "https://files.pythonhosted.org/packages/f2/e1/485132437d20aa4d3e1d8b3fb5a5e65aa8139f1e097080c2a8443201742c/rpds_py-0.30.0-cp313-cp313-win_amd64.whl", hash = "sha256:806f36b1b605e2d6a72716f321f20036b9489d29c51c91f4dd29a3e3afb73b15", size = 240224, upload-time = "2025-11-30T20:23:02.008Z" }, + { url = "https://files.pythonhosted.org/packages/24/95/ffd128ed1146a153d928617b0ef673960130be0009c77d8fbf0abe306713/rpds_py-0.30.0-cp313-cp313-win_arm64.whl", hash = "sha256:d96c2086587c7c30d44f31f42eae4eac89b60dabbac18c7669be3700f13c3ce1", size = 230645, upload-time = "2025-11-30T20:23:03.43Z" }, + { url = "https://files.pythonhosted.org/packages/ff/1b/b10de890a0def2a319a2626334a7f0ae388215eb60914dbac8a3bae54435/rpds_py-0.30.0-cp313-cp313t-macosx_10_12_x86_64.whl", hash = "sha256:eb0b93f2e5c2189ee831ee43f156ed34e2a89a78a66b98cadad955972548be5a", size = 364443, upload-time = "2025-11-30T20:23:04.878Z" }, + { url = "https://files.pythonhosted.org/packages/0d/bf/27e39f5971dc4f305a4fb9c672ca06f290f7c4e261c568f3dea16a410d47/rpds_py-0.30.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:922e10f31f303c7c920da8981051ff6d8c1a56207dbdf330d9047f6d30b70e5e", size = 353375, upload-time = "2025-11-30T20:23:06.342Z" }, + { url = "https://files.pythonhosted.org/packages/40/58/442ada3bba6e8e6615fc00483135c14a7538d2ffac30e2d933ccf6852232/rpds_py-0.30.0-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cdc62c8286ba9bf7f47befdcea13ea0e26bf294bda99758fd90535cbaf408000", size = 383850, upload-time = "2025-11-30T20:23:07.825Z" }, + { url = "https://files.pythonhosted.org/packages/14/14/f59b0127409a33c6ef6f5c1ebd5ad8e32d7861c9c7adfa9a624fc3889f6c/rpds_py-0.30.0-cp313-cp313t-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:47f9a91efc418b54fb8190a6b4aa7813a23fb79c51f4bb84e418f5476c38b8db", size = 392812, upload-time = "2025-11-30T20:23:09.228Z" }, + { url = "https://files.pythonhosted.org/packages/b3/66/e0be3e162ac299b3a22527e8913767d869e6cc75c46bd844aa43fb81ab62/rpds_py-0.30.0-cp313-cp313t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1f3587eb9b17f3789ad50824084fa6f81921bbf9a795826570bda82cb3ed91f2", size = 517841, upload-time = "2025-11-30T20:23:11.186Z" }, + { url = "https://files.pythonhosted.org/packages/3d/55/fa3b9cf31d0c963ecf1ba777f7cf4b2a2c976795ac430d24a1f43d25a6ba/rpds_py-0.30.0-cp313-cp313t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:39c02563fc592411c2c61d26b6c5fe1e51eaa44a75aa2c8735ca88b0d9599daa", size = 408149, upload-time = "2025-11-30T20:23:12.864Z" }, + { url = "https://files.pythonhosted.org/packages/60/ca/780cf3b1a32b18c0f05c441958d3758f02544f1d613abf9488cd78876378/rpds_py-0.30.0-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:51a1234d8febafdfd33a42d97da7a43f5dcb120c1060e352a3fbc0c6d36e2083", size = 383843, upload-time = "2025-11-30T20:23:14.638Z" }, + { url = "https://files.pythonhosted.org/packages/82/86/d5f2e04f2aa6247c613da0c1dd87fcd08fa17107e858193566048a1e2f0a/rpds_py-0.30.0-cp313-cp313t-manylinux_2_31_riscv64.whl", hash = "sha256:eb2c4071ab598733724c08221091e8d80e89064cd472819285a9ab0f24bcedb9", size = 396507, upload-time = "2025-11-30T20:23:16.105Z" }, + { url = "https://files.pythonhosted.org/packages/4b/9a/453255d2f769fe44e07ea9785c8347edaf867f7026872e76c1ad9f7bed92/rpds_py-0.30.0-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:6bdfdb946967d816e6adf9a3d8201bfad269c67efe6cefd7093ef959683c8de0", size = 414949, upload-time = "2025-11-30T20:23:17.539Z" }, + { url = "https://files.pythonhosted.org/packages/a3/31/622a86cdc0c45d6df0e9ccb6becdba5074735e7033c20e401a6d9d0e2ca0/rpds_py-0.30.0-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:c77afbd5f5250bf27bf516c7c4a016813eb2d3e116139aed0096940c5982da94", size = 565790, upload-time = "2025-11-30T20:23:19.029Z" }, + { url = "https://files.pythonhosted.org/packages/1c/5d/15bbf0fb4a3f58a3b1c67855ec1efcc4ceaef4e86644665fff03e1b66d8d/rpds_py-0.30.0-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:61046904275472a76c8c90c9ccee9013d70a6d0f73eecefd38c1ae7c39045a08", size = 590217, upload-time = "2025-11-30T20:23:20.885Z" }, + { url = "https://files.pythonhosted.org/packages/6d/61/21b8c41f68e60c8cc3b2e25644f0e3681926020f11d06ab0b78e3c6bbff1/rpds_py-0.30.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:4c5f36a861bc4b7da6516dbdf302c55313afa09b81931e8280361a4f6c9a2d27", size = 555806, upload-time = "2025-11-30T20:23:22.488Z" }, + { url = "https://files.pythonhosted.org/packages/f9/39/7e067bb06c31de48de3eb200f9fc7c58982a4d3db44b07e73963e10d3be9/rpds_py-0.30.0-cp313-cp313t-win32.whl", hash = "sha256:3d4a69de7a3e50ffc214ae16d79d8fbb0922972da0356dcf4d0fdca2878559c6", size = 211341, upload-time = "2025-11-30T20:23:24.449Z" }, + { url = "https://files.pythonhosted.org/packages/0a/4d/222ef0b46443cf4cf46764d9c630f3fe4abaa7245be9417e56e9f52b8f65/rpds_py-0.30.0-cp313-cp313t-win_amd64.whl", hash = "sha256:f14fc5df50a716f7ece6a80b6c78bb35ea2ca47c499e422aa4463455dd96d56d", size = 225768, upload-time = "2025-11-30T20:23:25.908Z" }, + { url = "https://files.pythonhosted.org/packages/86/81/dad16382ebbd3d0e0328776d8fd7ca94220e4fa0798d1dc5e7da48cb3201/rpds_py-0.30.0-cp314-cp314-macosx_10_12_x86_64.whl", hash = "sha256:68f19c879420aa08f61203801423f6cd5ac5f0ac4ac82a2368a9fcd6a9a075e0", size = 362099, upload-time = "2025-11-30T20:23:27.316Z" }, + { url = "https://files.pythonhosted.org/packages/2b/60/19f7884db5d5603edf3c6bce35408f45ad3e97e10007df0e17dd57af18f8/rpds_py-0.30.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:ec7c4490c672c1a0389d319b3a9cfcd098dcdc4783991553c332a15acf7249be", size = 353192, upload-time = "2025-11-30T20:23:29.151Z" }, + { url = "https://files.pythonhosted.org/packages/bf/c4/76eb0e1e72d1a9c4703c69607cec123c29028bff28ce41588792417098ac/rpds_py-0.30.0-cp314-cp314-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f251c812357a3fed308d684a5079ddfb9d933860fc6de89f2b7ab00da481e65f", size = 384080, upload-time = "2025-11-30T20:23:30.785Z" }, + { url = "https://files.pythonhosted.org/packages/72/87/87ea665e92f3298d1b26d78814721dc39ed8d2c74b86e83348d6b48a6f31/rpds_py-0.30.0-cp314-cp314-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ac98b175585ecf4c0348fd7b29c3864bda53b805c773cbf7bfdaffc8070c976f", size = 394841, upload-time = "2025-11-30T20:23:32.209Z" }, + { url = "https://files.pythonhosted.org/packages/77/ad/7783a89ca0587c15dcbf139b4a8364a872a25f861bdb88ed99f9b0dec985/rpds_py-0.30.0-cp314-cp314-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3e62880792319dbeb7eb866547f2e35973289e7d5696c6e295476448f5b63c87", size = 516670, upload-time = "2025-11-30T20:23:33.742Z" }, + { url = "https://files.pythonhosted.org/packages/5b/3c/2882bdac942bd2172f3da574eab16f309ae10a3925644e969536553cb4ee/rpds_py-0.30.0-cp314-cp314-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4e7fc54e0900ab35d041b0601431b0a0eb495f0851a0639b6ef90f7741b39a18", size = 408005, upload-time = "2025-11-30T20:23:35.253Z" }, + { url = "https://files.pythonhosted.org/packages/ce/81/9a91c0111ce1758c92516a3e44776920b579d9a7c09b2b06b642d4de3f0f/rpds_py-0.30.0-cp314-cp314-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:47e77dc9822d3ad616c3d5759ea5631a75e5809d5a28707744ef79d7a1bcfcad", size = 382112, upload-time = "2025-11-30T20:23:36.842Z" }, + { url = "https://files.pythonhosted.org/packages/cf/8e/1da49d4a107027e5fbc64daeab96a0706361a2918da10cb41769244b805d/rpds_py-0.30.0-cp314-cp314-manylinux_2_31_riscv64.whl", hash = "sha256:b4dc1a6ff022ff85ecafef7979a2c6eb423430e05f1165d6688234e62ba99a07", size = 399049, upload-time = "2025-11-30T20:23:38.343Z" }, + { url = "https://files.pythonhosted.org/packages/df/5a/7ee239b1aa48a127570ec03becbb29c9d5a9eb092febbd1699d567cae859/rpds_py-0.30.0-cp314-cp314-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:4559c972db3a360808309e06a74628b95eaccbf961c335c8fe0d590cf587456f", size = 415661, upload-time = "2025-11-30T20:23:40.263Z" }, + { url = "https://files.pythonhosted.org/packages/70/ea/caa143cf6b772f823bc7929a45da1fa83569ee49b11d18d0ada7f5ee6fd6/rpds_py-0.30.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:0ed177ed9bded28f8deb6ab40c183cd1192aa0de40c12f38be4d59cd33cb5c65", size = 565606, upload-time = "2025-11-30T20:23:42.186Z" }, + { url = "https://files.pythonhosted.org/packages/64/91/ac20ba2d69303f961ad8cf55bf7dbdb4763f627291ba3d0d7d67333cced9/rpds_py-0.30.0-cp314-cp314-musllinux_1_2_i686.whl", hash = "sha256:ad1fa8db769b76ea911cb4e10f049d80bf518c104f15b3edb2371cc65375c46f", size = 591126, upload-time = "2025-11-30T20:23:44.086Z" }, + { url = "https://files.pythonhosted.org/packages/21/20/7ff5f3c8b00c8a95f75985128c26ba44503fb35b8e0259d812766ea966c7/rpds_py-0.30.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:46e83c697b1f1c72b50e5ee5adb4353eef7406fb3f2043d64c33f20ad1c2fc53", size = 553371, upload-time = "2025-11-30T20:23:46.004Z" }, + { url = "https://files.pythonhosted.org/packages/72/c7/81dadd7b27c8ee391c132a6b192111ca58d866577ce2d9b0ca157552cce0/rpds_py-0.30.0-cp314-cp314-win32.whl", hash = "sha256:ee454b2a007d57363c2dfd5b6ca4a5d7e2c518938f8ed3b706e37e5d470801ed", size = 215298, upload-time = "2025-11-30T20:23:47.696Z" }, + { url = "https://files.pythonhosted.org/packages/3e/d2/1aaac33287e8cfb07aab2e6b8ac1deca62f6f65411344f1433c55e6f3eb8/rpds_py-0.30.0-cp314-cp314-win_amd64.whl", hash = "sha256:95f0802447ac2d10bcc69f6dc28fe95fdf17940367b21d34e34c737870758950", size = 228604, upload-time = "2025-11-30T20:23:49.501Z" }, + { url = "https://files.pythonhosted.org/packages/e8/95/ab005315818cc519ad074cb7784dae60d939163108bd2b394e60dc7b5461/rpds_py-0.30.0-cp314-cp314-win_arm64.whl", hash = "sha256:613aa4771c99f03346e54c3f038e4cc574ac09a3ddfb0e8878487335e96dead6", size = 222391, upload-time = "2025-11-30T20:23:50.96Z" }, + { url = "https://files.pythonhosted.org/packages/9e/68/154fe0194d83b973cdedcdcc88947a2752411165930182ae41d983dcefa6/rpds_py-0.30.0-cp314-cp314t-macosx_10_12_x86_64.whl", hash = "sha256:7e6ecfcb62edfd632e56983964e6884851786443739dbfe3582947e87274f7cb", size = 364868, upload-time = "2025-11-30T20:23:52.494Z" }, + { url = "https://files.pythonhosted.org/packages/83/69/8bbc8b07ec854d92a8b75668c24d2abcb1719ebf890f5604c61c9369a16f/rpds_py-0.30.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:a1d0bc22a7cdc173fedebb73ef81e07faef93692b8c1ad3733b67e31e1b6e1b8", size = 353747, upload-time = "2025-11-30T20:23:54.036Z" }, + { url = "https://files.pythonhosted.org/packages/ab/00/ba2e50183dbd9abcce9497fa5149c62b4ff3e22d338a30d690f9af970561/rpds_py-0.30.0-cp314-cp314t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0d08f00679177226c4cb8c5265012eea897c8ca3b93f429e546600c971bcbae7", size = 383795, upload-time = "2025-11-30T20:23:55.556Z" }, + { url = "https://files.pythonhosted.org/packages/05/6f/86f0272b84926bcb0e4c972262f54223e8ecc556b3224d281e6598fc9268/rpds_py-0.30.0-cp314-cp314t-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:5965af57d5848192c13534f90f9dd16464f3c37aaf166cc1da1cae1fd5a34898", size = 393330, upload-time = "2025-11-30T20:23:57.033Z" }, + { url = "https://files.pythonhosted.org/packages/cb/e9/0e02bb2e6dc63d212641da45df2b0bf29699d01715913e0d0f017ee29438/rpds_py-0.30.0-cp314-cp314t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9a4e86e34e9ab6b667c27f3211ca48f73dba7cd3d90f8d5b11be56e5dbc3fb4e", size = 518194, upload-time = "2025-11-30T20:23:58.637Z" }, + { url = "https://files.pythonhosted.org/packages/ee/ca/be7bca14cf21513bdf9c0606aba17d1f389ea2b6987035eb4f62bd923f25/rpds_py-0.30.0-cp314-cp314t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e5d3e6b26f2c785d65cc25ef1e5267ccbe1b069c5c21b8cc724efee290554419", size = 408340, upload-time = "2025-11-30T20:24:00.2Z" }, + { url = "https://files.pythonhosted.org/packages/c2/c7/736e00ebf39ed81d75544c0da6ef7b0998f8201b369acf842f9a90dc8fce/rpds_py-0.30.0-cp314-cp314t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:626a7433c34566535b6e56a1b39a7b17ba961e97ce3b80ec62e6f1312c025551", size = 383765, upload-time = "2025-11-30T20:24:01.759Z" }, + { url = "https://files.pythonhosted.org/packages/4a/3f/da50dfde9956aaf365c4adc9533b100008ed31aea635f2b8d7b627e25b49/rpds_py-0.30.0-cp314-cp314t-manylinux_2_31_riscv64.whl", hash = "sha256:acd7eb3f4471577b9b5a41baf02a978e8bdeb08b4b355273994f8b87032000a8", size = 396834, upload-time = "2025-11-30T20:24:03.687Z" }, + { url = "https://files.pythonhosted.org/packages/4e/00/34bcc2565b6020eab2623349efbdec810676ad571995911f1abdae62a3a0/rpds_py-0.30.0-cp314-cp314t-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:fe5fa731a1fa8a0a56b0977413f8cacac1768dad38d16b3a296712709476fbd5", size = 415470, upload-time = "2025-11-30T20:24:05.232Z" }, + { url = "https://files.pythonhosted.org/packages/8c/28/882e72b5b3e6f718d5453bd4d0d9cf8df36fddeb4ddbbab17869d5868616/rpds_py-0.30.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:74a3243a411126362712ee1524dfc90c650a503502f135d54d1b352bd01f2404", size = 565630, upload-time = "2025-11-30T20:24:06.878Z" }, + { url = "https://files.pythonhosted.org/packages/3b/97/04a65539c17692de5b85c6e293520fd01317fd878ea1995f0367d4532fb1/rpds_py-0.30.0-cp314-cp314t-musllinux_1_2_i686.whl", hash = "sha256:3e8eeb0544f2eb0d2581774be4c3410356eba189529a6b3e36bbbf9696175856", size = 591148, upload-time = "2025-11-30T20:24:08.445Z" }, + { url = "https://files.pythonhosted.org/packages/85/70/92482ccffb96f5441aab93e26c4d66489eb599efdcf96fad90c14bbfb976/rpds_py-0.30.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:dbd936cde57abfee19ab3213cf9c26be06d60750e60a8e4dd85d1ab12c8b1f40", size = 556030, upload-time = "2025-11-30T20:24:10.956Z" }, + { url = "https://files.pythonhosted.org/packages/20/53/7c7e784abfa500a2b6b583b147ee4bb5a2b3747a9166bab52fec4b5b5e7d/rpds_py-0.30.0-cp314-cp314t-win32.whl", hash = "sha256:dc824125c72246d924f7f796b4f63c1e9dc810c7d9e2355864b3c3a73d59ade0", size = 211570, upload-time = "2025-11-30T20:24:12.735Z" }, + { url = "https://files.pythonhosted.org/packages/d0/02/fa464cdfbe6b26e0600b62c528b72d8608f5cc49f96b8d6e38c95d60c676/rpds_py-0.30.0-cp314-cp314t-win_amd64.whl", hash = "sha256:27f4b0e92de5bfbc6f86e43959e6edd1425c33b5e69aab0984a72047f2bcf1e3", size = 226532, upload-time = "2025-11-30T20:24:14.634Z" }, + { url = "https://files.pythonhosted.org/packages/69/71/3f34339ee70521864411f8b6992e7ab13ac30d8e4e3309e07c7361767d91/rpds_py-0.30.0-pp311-pypy311_pp73-macosx_10_12_x86_64.whl", hash = "sha256:c2262bdba0ad4fc6fb5545660673925c2d2a5d9e2e0fb603aad545427be0fc58", size = 372292, upload-time = "2025-11-30T20:24:16.537Z" }, + { url = "https://files.pythonhosted.org/packages/57/09/f183df9b8f2d66720d2ef71075c59f7e1b336bec7ee4c48f0a2b06857653/rpds_py-0.30.0-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:ee6af14263f25eedc3bb918a3c04245106a42dfd4f5c2285ea6f997b1fc3f89a", size = 362128, upload-time = "2025-11-30T20:24:18.086Z" }, + { url = "https://files.pythonhosted.org/packages/7a/68/5c2594e937253457342e078f0cc1ded3dd7b2ad59afdbf2d354869110a02/rpds_py-0.30.0-pp311-pypy311_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3adbb8179ce342d235c31ab8ec511e66c73faa27a47e076ccc92421add53e2bb", size = 391542, upload-time = "2025-11-30T20:24:20.092Z" }, + { url = "https://files.pythonhosted.org/packages/49/5c/31ef1afd70b4b4fbdb2800249f34c57c64beb687495b10aec0365f53dfc4/rpds_py-0.30.0-pp311-pypy311_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:250fa00e9543ac9b97ac258bd37367ff5256666122c2d0f2bc97577c60a1818c", size = 404004, upload-time = "2025-11-30T20:24:22.231Z" }, + { url = "https://files.pythonhosted.org/packages/e3/63/0cfbea38d05756f3440ce6534d51a491d26176ac045e2707adc99bb6e60a/rpds_py-0.30.0-pp311-pypy311_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9854cf4f488b3d57b9aaeb105f06d78e5529d3145b1e4a41750167e8c213c6d3", size = 527063, upload-time = "2025-11-30T20:24:24.302Z" }, + { url = "https://files.pythonhosted.org/packages/42/e6/01e1f72a2456678b0f618fc9a1a13f882061690893c192fcad9f2926553a/rpds_py-0.30.0-pp311-pypy311_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:993914b8e560023bc0a8bf742c5f303551992dcb85e247b1e5c7f4a7d145bda5", size = 413099, upload-time = "2025-11-30T20:24:25.916Z" }, + { url = "https://files.pythonhosted.org/packages/b8/25/8df56677f209003dcbb180765520c544525e3ef21ea72279c98b9aa7c7fb/rpds_py-0.30.0-pp311-pypy311_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:58edca431fb9b29950807e301826586e5bbf24163677732429770a697ffe6738", size = 392177, upload-time = "2025-11-30T20:24:27.834Z" }, + { url = "https://files.pythonhosted.org/packages/4a/b4/0a771378c5f16f8115f796d1f437950158679bcd2a7c68cf251cfb00ed5b/rpds_py-0.30.0-pp311-pypy311_pp73-manylinux_2_31_riscv64.whl", hash = "sha256:dea5b552272a944763b34394d04577cf0f9bd013207bc32323b5a89a53cf9c2f", size = 406015, upload-time = "2025-11-30T20:24:29.457Z" }, + { url = "https://files.pythonhosted.org/packages/36/d8/456dbba0af75049dc6f63ff295a2f92766b9d521fa00de67a2bd6427d57a/rpds_py-0.30.0-pp311-pypy311_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:ba3af48635eb83d03f6c9735dfb21785303e73d22ad03d489e88adae6eab8877", size = 423736, upload-time = "2025-11-30T20:24:31.22Z" }, + { url = "https://files.pythonhosted.org/packages/13/64/b4d76f227d5c45a7e0b796c674fd81b0a6c4fbd48dc29271857d8219571c/rpds_py-0.30.0-pp311-pypy311_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:dff13836529b921e22f15cb099751209a60009731a68519630a24d61f0b1b30a", size = 573981, upload-time = "2025-11-30T20:24:32.934Z" }, + { url = "https://files.pythonhosted.org/packages/20/91/092bacadeda3edf92bf743cc96a7be133e13a39cdbfd7b5082e7ab638406/rpds_py-0.30.0-pp311-pypy311_pp73-musllinux_1_2_i686.whl", hash = "sha256:1b151685b23929ab7beec71080a8889d4d6d9fa9a983d213f07121205d48e2c4", size = 599782, upload-time = "2025-11-30T20:24:35.169Z" }, + { url = "https://files.pythonhosted.org/packages/d1/b7/b95708304cd49b7b6f82fdd039f1748b66ec2b21d6a45180910802f1abf1/rpds_py-0.30.0-pp311-pypy311_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:ac37f9f516c51e5753f27dfdef11a88330f04de2d564be3991384b2f3535d02e", size = 562191, upload-time = "2025-11-30T20:24:36.853Z" }, +] + +[[package]] +name = "rsa" +version = "4.9.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "pyasn1" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/da/8a/22b7beea3ee0d44b1916c0c1cb0ee3af23b700b6da9f04991899d0c555d4/rsa-4.9.1.tar.gz", hash = "sha256:e7bdbfdb5497da4c07dfd35530e1a902659db6ff241e39d9953cad06ebd0ae75", size = 29034, upload-time = "2025-04-16T09:51:18.218Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/64/8d/0133e4eb4beed9e425d9a98ed6e081a55d195481b7632472be1af08d2f6b/rsa-4.9.1-py3-none-any.whl", hash = "sha256:68635866661c6836b8d39430f97a996acbd61bfa49406748ea243539fe239762", size = 34696, upload-time = "2025-04-16T09:51:17.142Z" }, +] + +[[package]] +name = "ruff" +version = "0.11.13" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/ed/da/9c6f995903b4d9474b39da91d2d626659af3ff1eeb43e9ae7c119349dba6/ruff-0.11.13.tar.gz", hash = "sha256:26fa247dc68d1d4e72c179e08889a25ac0c7ba4d78aecfc835d49cbfd60bf514", size = 4282054, upload-time = "2025-06-05T21:00:15.721Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/7d/ce/a11d381192966e0b4290842cc8d4fac7dc9214ddf627c11c1afff87da29b/ruff-0.11.13-py3-none-linux_armv6l.whl", hash = "sha256:4bdfbf1240533f40042ec00c9e09a3aade6f8c10b6414cf11b519488d2635d46", size = 10292516, upload-time = "2025-06-05T20:59:32.944Z" }, + { url = "https://files.pythonhosted.org/packages/78/db/87c3b59b0d4e753e40b6a3b4a2642dfd1dcaefbff121ddc64d6c8b47ba00/ruff-0.11.13-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:aef9c9ed1b5ca28bb15c7eac83b8670cf3b20b478195bd49c8d756ba0a36cf48", size = 11106083, upload-time = "2025-06-05T20:59:37.03Z" }, + { url = "https://files.pythonhosted.org/packages/77/79/d8cec175856ff810a19825d09ce700265f905c643c69f45d2b737e4a470a/ruff-0.11.13-py3-none-macosx_11_0_arm64.whl", hash = "sha256:53b15a9dfdce029c842e9a5aebc3855e9ab7771395979ff85b7c1dedb53ddc2b", size = 10436024, upload-time = "2025-06-05T20:59:39.741Z" }, + { url = "https://files.pythonhosted.org/packages/8b/5b/f6d94f2980fa1ee854b41568368a2e1252681b9238ab2895e133d303538f/ruff-0.11.13-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ab153241400789138d13f362c43f7edecc0edfffce2afa6a68434000ecd8f69a", size = 10646324, upload-time = "2025-06-05T20:59:42.185Z" }, + { url = "https://files.pythonhosted.org/packages/6c/9c/b4c2acf24ea4426016d511dfdc787f4ce1ceb835f3c5fbdbcb32b1c63bda/ruff-0.11.13-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:6c51f93029d54a910d3d24f7dd0bb909e31b6cd989a5e4ac513f4eb41629f0dc", size = 10174416, upload-time = "2025-06-05T20:59:44.319Z" }, + { url = "https://files.pythonhosted.org/packages/f3/10/e2e62f77c65ede8cd032c2ca39c41f48feabedb6e282bfd6073d81bb671d/ruff-0.11.13-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1808b3ed53e1a777c2ef733aca9051dc9bf7c99b26ece15cb59a0320fbdbd629", size = 11724197, upload-time = "2025-06-05T20:59:46.935Z" }, + { url = "https://files.pythonhosted.org/packages/bb/f0/466fe8469b85c561e081d798c45f8a1d21e0b4a5ef795a1d7f1a9a9ec182/ruff-0.11.13-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:d28ce58b5ecf0f43c1b71edffabe6ed7f245d5336b17805803312ec9bc665933", size = 12511615, upload-time = "2025-06-05T20:59:49.534Z" }, + { url = "https://files.pythonhosted.org/packages/17/0e/cefe778b46dbd0cbcb03a839946c8f80a06f7968eb298aa4d1a4293f3448/ruff-0.11.13-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:55e4bc3a77842da33c16d55b32c6cac1ec5fb0fbec9c8c513bdce76c4f922165", size = 12117080, upload-time = "2025-06-05T20:59:51.654Z" }, + { url = "https://files.pythonhosted.org/packages/5d/2c/caaeda564cbe103bed145ea557cb86795b18651b0f6b3ff6a10e84e5a33f/ruff-0.11.13-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:633bf2c6f35678c56ec73189ba6fa19ff1c5e4807a78bf60ef487b9dd272cc71", size = 11326315, upload-time = "2025-06-05T20:59:54.469Z" }, + { url = "https://files.pythonhosted.org/packages/75/f0/782e7d681d660eda8c536962920c41309e6dd4ebcea9a2714ed5127d44bd/ruff-0.11.13-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4ffbc82d70424b275b089166310448051afdc6e914fdab90e08df66c43bb5ca9", size = 11555640, upload-time = "2025-06-05T20:59:56.986Z" }, + { url = "https://files.pythonhosted.org/packages/5d/d4/3d580c616316c7f07fb3c99dbecfe01fbaea7b6fd9a82b801e72e5de742a/ruff-0.11.13-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:4a9ddd3ec62a9a89578c85842b836e4ac832d4a2e0bfaad3b02243f930ceafcc", size = 10507364, upload-time = "2025-06-05T20:59:59.154Z" }, + { url = "https://files.pythonhosted.org/packages/5a/dc/195e6f17d7b3ea6b12dc4f3e9de575db7983db187c378d44606e5d503319/ruff-0.11.13-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:d237a496e0778d719efb05058c64d28b757c77824e04ffe8796c7436e26712b7", size = 10141462, upload-time = "2025-06-05T21:00:01.481Z" }, + { url = "https://files.pythonhosted.org/packages/f4/8e/39a094af6967faa57ecdeacb91bedfb232474ff8c3d20f16a5514e6b3534/ruff-0.11.13-py3-none-musllinux_1_2_i686.whl", hash = "sha256:26816a218ca6ef02142343fd24c70f7cd8c5aa6c203bca284407adf675984432", size = 11121028, upload-time = "2025-06-05T21:00:04.06Z" }, + { url = "https://files.pythonhosted.org/packages/5a/c0/b0b508193b0e8a1654ec683ebab18d309861f8bd64e3a2f9648b80d392cb/ruff-0.11.13-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:51c3f95abd9331dc5b87c47ac7f376db5616041173826dfd556cfe3d4977f492", size = 11602992, upload-time = "2025-06-05T21:00:06.249Z" }, + { url = "https://files.pythonhosted.org/packages/7c/91/263e33ab93ab09ca06ce4f8f8547a858cc198072f873ebc9be7466790bae/ruff-0.11.13-py3-none-win32.whl", hash = "sha256:96c27935418e4e8e77a26bb05962817f28b8ef3843a6c6cc49d8783b5507f250", size = 10474944, upload-time = "2025-06-05T21:00:08.459Z" }, + { url = "https://files.pythonhosted.org/packages/46/f4/7c27734ac2073aae8efb0119cae6931b6fb48017adf048fdf85c19337afc/ruff-0.11.13-py3-none-win_amd64.whl", hash = "sha256:29c3189895a8a6a657b7af4e97d330c8a3afd2c9c8f46c81e2fc5a31866517e3", size = 11548669, upload-time = "2025-06-05T21:00:11.147Z" }, + { url = "https://files.pythonhosted.org/packages/ec/bf/b273dd11673fed8a6bd46032c0ea2a04b2ac9bfa9c628756a5856ba113b0/ruff-0.11.13-py3-none-win_arm64.whl", hash = "sha256:b4385285e9179d608ff1d2fb9922062663c658605819a6876d8beef0c30b7f3b", size = 10683928, upload-time = "2025-06-05T21:00:13.758Z" }, +] + +[[package]] +name = "six" +version = "1.17.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/94/e7/b2c673351809dca68a0e064b6af791aa332cf192da575fd474ed7d6f16a2/six-1.17.0.tar.gz", hash = "sha256:ff70335d468e7eb6ec65b95b99d3a2836546063f63acc5171de367e834932a81", size = 34031, upload-time = "2024-12-04T17:35:28.174Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/b7/ce/149a00dd41f10bc29e5921b496af8b574d8413afcd5e30dfa0ed46c2cc5e/six-1.17.0-py2.py3-none-any.whl", hash = "sha256:4721f391ed90541fddacab5acf947aa0d3dc7d27b2e1e8eda2be8970586c3274", size = 11050, upload-time = "2024-12-04T17:35:26.475Z" }, +] + +[[package]] +name = "sse-starlette" +version = "3.1.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "anyio", marker = "python_full_version >= '3.10'" }, + { name = "starlette", marker = "python_full_version >= '3.10'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/da/34/f5df66cb383efdbf4f2db23cabb27f51b1dcb737efaf8a558f6f1d195134/sse_starlette-3.1.2.tar.gz", hash = "sha256:55eff034207a83a0eb86de9a68099bd0157838f0b8b999a1b742005c71e33618", size = 26303, upload-time = "2025-12-31T08:02:20.023Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/b7/95/8c4b76eec9ae574474e5d2997557cebf764bcd3586458956c30631ae08f4/sse_starlette-3.1.2-py3-none-any.whl", hash = "sha256:cd800dd349f4521b317b9391d3796fa97b71748a4da9b9e00aafab32dda375c8", size = 12484, upload-time = "2025-12-31T08:02:18.894Z" }, +] + +[[package]] +name = "starlette" +version = "0.50.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "anyio", marker = "python_full_version >= '3.10'" }, + { name = "typing-extensions", marker = "python_full_version >= '3.10' and python_full_version < '3.13'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/ba/b8/73a0e6a6e079a9d9cfa64113d771e421640b6f679a52eeb9b32f72d871a1/starlette-0.50.0.tar.gz", hash = "sha256:a2a17b22203254bcbc2e1f926d2d55f3f9497f769416b3190768befe598fa3ca", size = 2646985, upload-time = "2025-11-01T15:25:27.516Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/d9/52/1064f510b141bd54025f9b55105e26d1fa970b9be67ad766380a3c9b74b0/starlette-0.50.0-py3-none-any.whl", hash = "sha256:9e5391843ec9b6e472eed1365a78c8098cfceb7a74bfd4d6b1c0c0095efb3bca", size = 74033, upload-time = "2025-11-01T15:25:25.461Z" }, +] + +[[package]] +name = "tomli" +version = "2.3.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/52/ed/3f73f72945444548f33eba9a87fc7a6e969915e7b1acc8260b30e1f76a2f/tomli-2.3.0.tar.gz", hash = "sha256:64be704a875d2a59753d80ee8a533c3fe183e3f06807ff7dc2232938ccb01549", size = 17392, upload-time = "2025-10-08T22:01:47.119Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/b3/2e/299f62b401438d5fe1624119c723f5d877acc86a4c2492da405626665f12/tomli-2.3.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:88bd15eb972f3664f5ed4b57c1634a97153b4bac4479dcb6a495f41921eb7f45", size = 153236, upload-time = "2025-10-08T22:01:00.137Z" }, + { url = "https://files.pythonhosted.org/packages/86/7f/d8fffe6a7aefdb61bced88fcb5e280cfd71e08939da5894161bd71bea022/tomli-2.3.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:883b1c0d6398a6a9d29b508c331fa56adbcdff647f6ace4dfca0f50e90dfd0ba", size = 148084, upload-time = "2025-10-08T22:01:01.63Z" }, + { url = "https://files.pythonhosted.org/packages/47/5c/24935fb6a2ee63e86d80e4d3b58b222dafaf438c416752c8b58537c8b89a/tomli-2.3.0-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:d1381caf13ab9f300e30dd8feadb3de072aeb86f1d34a8569453ff32a7dea4bf", size = 234832, upload-time = "2025-10-08T22:01:02.543Z" }, + { url = "https://files.pythonhosted.org/packages/89/da/75dfd804fc11e6612846758a23f13271b76d577e299592b4371a4ca4cd09/tomli-2.3.0-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:a0e285d2649b78c0d9027570d4da3425bdb49830a6156121360b3f8511ea3441", size = 242052, upload-time = "2025-10-08T22:01:03.836Z" }, + { url = "https://files.pythonhosted.org/packages/70/8c/f48ac899f7b3ca7eb13af73bacbc93aec37f9c954df3c08ad96991c8c373/tomli-2.3.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:0a154a9ae14bfcf5d8917a59b51ffd5a3ac1fd149b71b47a3a104ca4edcfa845", size = 239555, upload-time = "2025-10-08T22:01:04.834Z" }, + { url = "https://files.pythonhosted.org/packages/ba/28/72f8afd73f1d0e7829bfc093f4cb98ce0a40ffc0cc997009ee1ed94ba705/tomli-2.3.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:74bf8464ff93e413514fefd2be591c3b0b23231a77f901db1eb30d6f712fc42c", size = 245128, upload-time = "2025-10-08T22:01:05.84Z" }, + { url = "https://files.pythonhosted.org/packages/b6/eb/a7679c8ac85208706d27436e8d421dfa39d4c914dcf5fa8083a9305f58d9/tomli-2.3.0-cp311-cp311-win32.whl", hash = "sha256:00b5f5d95bbfc7d12f91ad8c593a1659b6387b43f054104cda404be6bda62456", size = 96445, upload-time = "2025-10-08T22:01:06.896Z" }, + { url = "https://files.pythonhosted.org/packages/0a/fe/3d3420c4cb1ad9cb462fb52967080575f15898da97e21cb6f1361d505383/tomli-2.3.0-cp311-cp311-win_amd64.whl", hash = "sha256:4dc4ce8483a5d429ab602f111a93a6ab1ed425eae3122032db7e9acf449451be", size = 107165, upload-time = "2025-10-08T22:01:08.107Z" }, + { url = "https://files.pythonhosted.org/packages/ff/b7/40f36368fcabc518bb11c8f06379a0fd631985046c038aca08c6d6a43c6e/tomli-2.3.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:d7d86942e56ded512a594786a5ba0a5e521d02529b3826e7761a05138341a2ac", size = 154891, upload-time = "2025-10-08T22:01:09.082Z" }, + { url = "https://files.pythonhosted.org/packages/f9/3f/d9dd692199e3b3aab2e4e4dd948abd0f790d9ded8cd10cbaae276a898434/tomli-2.3.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:73ee0b47d4dad1c5e996e3cd33b8a76a50167ae5f96a2607cbe8cc773506ab22", size = 148796, upload-time = "2025-10-08T22:01:10.266Z" }, + { url = "https://files.pythonhosted.org/packages/60/83/59bff4996c2cf9f9387a0f5a3394629c7efa5ef16142076a23a90f1955fa/tomli-2.3.0-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:792262b94d5d0a466afb5bc63c7daa9d75520110971ee269152083270998316f", size = 242121, upload-time = "2025-10-08T22:01:11.332Z" }, + { url = "https://files.pythonhosted.org/packages/45/e5/7c5119ff39de8693d6baab6c0b6dcb556d192c165596e9fc231ea1052041/tomli-2.3.0-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:4f195fe57ecceac95a66a75ac24d9d5fbc98ef0962e09b2eddec5d39375aae52", size = 250070, upload-time = "2025-10-08T22:01:12.498Z" }, + { url = "https://files.pythonhosted.org/packages/45/12/ad5126d3a278f27e6701abde51d342aa78d06e27ce2bb596a01f7709a5a2/tomli-2.3.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:e31d432427dcbf4d86958c184b9bfd1e96b5b71f8eb17e6d02531f434fd335b8", size = 245859, upload-time = "2025-10-08T22:01:13.551Z" }, + { url = "https://files.pythonhosted.org/packages/fb/a1/4d6865da6a71c603cfe6ad0e6556c73c76548557a8d658f9e3b142df245f/tomli-2.3.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:7b0882799624980785240ab732537fcfc372601015c00f7fc367c55308c186f6", size = 250296, upload-time = "2025-10-08T22:01:14.614Z" }, + { url = "https://files.pythonhosted.org/packages/a0/b7/a7a7042715d55c9ba6e8b196d65d2cb662578b4d8cd17d882d45322b0d78/tomli-2.3.0-cp312-cp312-win32.whl", hash = "sha256:ff72b71b5d10d22ecb084d345fc26f42b5143c5533db5e2eaba7d2d335358876", size = 97124, upload-time = "2025-10-08T22:01:15.629Z" }, + { url = "https://files.pythonhosted.org/packages/06/1e/f22f100db15a68b520664eb3328fb0ae4e90530887928558112c8d1f4515/tomli-2.3.0-cp312-cp312-win_amd64.whl", hash = "sha256:1cb4ed918939151a03f33d4242ccd0aa5f11b3547d0cf30f7c74a408a5b99878", size = 107698, upload-time = "2025-10-08T22:01:16.51Z" }, + { url = "https://files.pythonhosted.org/packages/89/48/06ee6eabe4fdd9ecd48bf488f4ac783844fd777f547b8d1b61c11939974e/tomli-2.3.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:5192f562738228945d7b13d4930baffda67b69425a7f0da96d360b0a3888136b", size = 154819, upload-time = "2025-10-08T22:01:17.964Z" }, + { url = "https://files.pythonhosted.org/packages/f1/01/88793757d54d8937015c75dcdfb673c65471945f6be98e6a0410fba167ed/tomli-2.3.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:be71c93a63d738597996be9528f4abe628d1adf5e6eb11607bc8fe1a510b5dae", size = 148766, upload-time = "2025-10-08T22:01:18.959Z" }, + { url = "https://files.pythonhosted.org/packages/42/17/5e2c956f0144b812e7e107f94f1cc54af734eb17b5191c0bbfb72de5e93e/tomli-2.3.0-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:c4665508bcbac83a31ff8ab08f424b665200c0e1e645d2bd9ab3d3e557b6185b", size = 240771, upload-time = "2025-10-08T22:01:20.106Z" }, + { url = "https://files.pythonhosted.org/packages/d5/f4/0fbd014909748706c01d16824eadb0307115f9562a15cbb012cd9b3512c5/tomli-2.3.0-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:4021923f97266babc6ccab9f5068642a0095faa0a51a246a6a02fccbb3514eaf", size = 248586, upload-time = "2025-10-08T22:01:21.164Z" }, + { url = "https://files.pythonhosted.org/packages/30/77/fed85e114bde5e81ecf9bc5da0cc69f2914b38f4708c80ae67d0c10180c5/tomli-2.3.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:a4ea38c40145a357d513bffad0ed869f13c1773716cf71ccaa83b0fa0cc4e42f", size = 244792, upload-time = "2025-10-08T22:01:22.417Z" }, + { url = "https://files.pythonhosted.org/packages/55/92/afed3d497f7c186dc71e6ee6d4fcb0acfa5f7d0a1a2878f8beae379ae0cc/tomli-2.3.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:ad805ea85eda330dbad64c7ea7a4556259665bdf9d2672f5dccc740eb9d3ca05", size = 248909, upload-time = "2025-10-08T22:01:23.859Z" }, + { url = "https://files.pythonhosted.org/packages/f8/84/ef50c51b5a9472e7265ce1ffc7f24cd4023d289e109f669bdb1553f6a7c2/tomli-2.3.0-cp313-cp313-win32.whl", hash = "sha256:97d5eec30149fd3294270e889b4234023f2c69747e555a27bd708828353ab606", size = 96946, upload-time = "2025-10-08T22:01:24.893Z" }, + { url = "https://files.pythonhosted.org/packages/b2/b7/718cd1da0884f281f95ccfa3a6cc572d30053cba64603f79d431d3c9b61b/tomli-2.3.0-cp313-cp313-win_amd64.whl", hash = "sha256:0c95ca56fbe89e065c6ead5b593ee64b84a26fca063b5d71a1122bf26e533999", size = 107705, upload-time = "2025-10-08T22:01:26.153Z" }, + { url = "https://files.pythonhosted.org/packages/19/94/aeafa14a52e16163008060506fcb6aa1949d13548d13752171a755c65611/tomli-2.3.0-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:cebc6fe843e0733ee827a282aca4999b596241195f43b4cc371d64fc6639da9e", size = 154244, upload-time = "2025-10-08T22:01:27.06Z" }, + { url = "https://files.pythonhosted.org/packages/db/e4/1e58409aa78eefa47ccd19779fc6f36787edbe7d4cd330eeeedb33a4515b/tomli-2.3.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:4c2ef0244c75aba9355561272009d934953817c49f47d768070c3c94355c2aa3", size = 148637, upload-time = "2025-10-08T22:01:28.059Z" }, + { url = "https://files.pythonhosted.org/packages/26/b6/d1eccb62f665e44359226811064596dd6a366ea1f985839c566cd61525ae/tomli-2.3.0-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:c22a8bf253bacc0cf11f35ad9808b6cb75ada2631c2d97c971122583b129afbc", size = 241925, upload-time = "2025-10-08T22:01:29.066Z" }, + { url = "https://files.pythonhosted.org/packages/70/91/7cdab9a03e6d3d2bb11beae108da5bdc1c34bdeb06e21163482544ddcc90/tomli-2.3.0-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:0eea8cc5c5e9f89c9b90c4896a8deefc74f518db5927d0e0e8d4a80953d774d0", size = 249045, upload-time = "2025-10-08T22:01:31.98Z" }, + { url = "https://files.pythonhosted.org/packages/15/1b/8c26874ed1f6e4f1fcfeb868db8a794cbe9f227299402db58cfcc858766c/tomli-2.3.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:b74a0e59ec5d15127acdabd75ea17726ac4c5178ae51b85bfe39c4f8a278e879", size = 245835, upload-time = "2025-10-08T22:01:32.989Z" }, + { url = "https://files.pythonhosted.org/packages/fd/42/8e3c6a9a4b1a1360c1a2a39f0b972cef2cc9ebd56025168c4137192a9321/tomli-2.3.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:b5870b50c9db823c595983571d1296a6ff3e1b88f734a4c8f6fc6188397de005", size = 253109, upload-time = "2025-10-08T22:01:34.052Z" }, + { url = "https://files.pythonhosted.org/packages/22/0c/b4da635000a71b5f80130937eeac12e686eefb376b8dee113b4a582bba42/tomli-2.3.0-cp314-cp314-win32.whl", hash = "sha256:feb0dacc61170ed7ab602d3d972a58f14ee3ee60494292d384649a3dc38ef463", size = 97930, upload-time = "2025-10-08T22:01:35.082Z" }, + { url = "https://files.pythonhosted.org/packages/b9/74/cb1abc870a418ae99cd5c9547d6bce30701a954e0e721821df483ef7223c/tomli-2.3.0-cp314-cp314-win_amd64.whl", hash = "sha256:b273fcbd7fc64dc3600c098e39136522650c49bca95df2d11cf3b626422392c8", size = 107964, upload-time = "2025-10-08T22:01:36.057Z" }, + { url = "https://files.pythonhosted.org/packages/54/78/5c46fff6432a712af9f792944f4fcd7067d8823157949f4e40c56b8b3c83/tomli-2.3.0-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:940d56ee0410fa17ee1f12b817b37a4d4e4dc4d27340863cc67236c74f582e77", size = 163065, upload-time = "2025-10-08T22:01:37.27Z" }, + { url = "https://files.pythonhosted.org/packages/39/67/f85d9bd23182f45eca8939cd2bc7050e1f90c41f4a2ecbbd5963a1d1c486/tomli-2.3.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:f85209946d1fe94416debbb88d00eb92ce9cd5266775424ff81bc959e001acaf", size = 159088, upload-time = "2025-10-08T22:01:38.235Z" }, + { url = "https://files.pythonhosted.org/packages/26/5a/4b546a0405b9cc0659b399f12b6adb750757baf04250b148d3c5059fc4eb/tomli-2.3.0-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:a56212bdcce682e56b0aaf79e869ba5d15a6163f88d5451cbde388d48b13f530", size = 268193, upload-time = "2025-10-08T22:01:39.712Z" }, + { url = "https://files.pythonhosted.org/packages/42/4f/2c12a72ae22cf7b59a7fe75b3465b7aba40ea9145d026ba41cb382075b0e/tomli-2.3.0-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c5f3ffd1e098dfc032d4d3af5c0ac64f6d286d98bc148698356847b80fa4de1b", size = 275488, upload-time = "2025-10-08T22:01:40.773Z" }, + { url = "https://files.pythonhosted.org/packages/92/04/a038d65dbe160c3aa5a624e93ad98111090f6804027d474ba9c37c8ae186/tomli-2.3.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:5e01decd096b1530d97d5d85cb4dff4af2d8347bd35686654a004f8dea20fc67", size = 272669, upload-time = "2025-10-08T22:01:41.824Z" }, + { url = "https://files.pythonhosted.org/packages/be/2f/8b7c60a9d1612a7cbc39ffcca4f21a73bf368a80fc25bccf8253e2563267/tomli-2.3.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:8a35dd0e643bb2610f156cca8db95d213a90015c11fee76c946aa62b7ae7e02f", size = 279709, upload-time = "2025-10-08T22:01:43.177Z" }, + { url = "https://files.pythonhosted.org/packages/7e/46/cc36c679f09f27ded940281c38607716c86cf8ba4a518d524e349c8b4874/tomli-2.3.0-cp314-cp314t-win32.whl", hash = "sha256:a1f7f282fe248311650081faafa5f4732bdbfef5d45fe3f2e702fbc6f2d496e0", size = 107563, upload-time = "2025-10-08T22:01:44.233Z" }, + { url = "https://files.pythonhosted.org/packages/84/ff/426ca8683cf7b753614480484f6437f568fd2fda2edbdf57a2d3d8b27a0b/tomli-2.3.0-cp314-cp314t-win_amd64.whl", hash = "sha256:70a251f8d4ba2d9ac2542eecf008b3c8a9fc5c3f9f02c56a9d7952612be2fdba", size = 119756, upload-time = "2025-10-08T22:01:45.234Z" }, + { url = "https://files.pythonhosted.org/packages/77/b8/0135fadc89e73be292b473cb820b4f5a08197779206b33191e801feeae40/tomli-2.3.0-py3-none-any.whl", hash = "sha256:e95b1af3c5b07d9e643909b5abbec77cd9f1217e6d0bca72b0234736b9fb1f1b", size = 14408, upload-time = "2025-10-08T22:01:46.04Z" }, +] + +[[package]] +name = "tomlkit" +version = "0.13.3" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/cc/18/0bbf3884e9eaa38819ebe46a7bd25dcd56b67434402b66a58c4b8e552575/tomlkit-0.13.3.tar.gz", hash = "sha256:430cf247ee57df2b94ee3fbe588e71d362a941ebb545dec29b53961d61add2a1", size = 185207, upload-time = "2025-06-05T07:13:44.947Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/bd/75/8539d011f6be8e29f339c42e633aae3cb73bffa95dd0f9adec09b9c58e85/tomlkit-0.13.3-py3-none-any.whl", hash = "sha256:c89c649d79ee40629a9fda55f8ace8c6a1b42deb912b2a8fd8d942ddadb606b0", size = 38901, upload-time = "2025-06-05T07:13:43.546Z" }, +] + +[[package]] +name = "types-authlib" +version = "1.6.6.20251220" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "cryptography" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/46/c5/ed668b28a66b847903cc94362bf05171a01473e3782e60b362f246c160fe/types_authlib-1.6.6.20251220.tar.gz", hash = "sha256:a2369f23732fe88d5087ed720864f40d0319c19e8411d85a4930e31018996f90", size = 45595, upload-time = "2025-12-20T03:07:43.241Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ef/ef/e783f3d0d489f7bf66557c3f26cd620e4bae1cba43a59e69c4ced25853bc/types_authlib-1.6.6.20251220-py3-none-any.whl", hash = "sha256:dd1d545fe4c498686c0285d59dc950d87d977d294fc430617c91e0a11f6f4f2b", size = 102884, upload-time = "2025-12-20T03:07:42.028Z" }, +] + +[[package]] +name = "types-python-dateutil" +version = "2.9.0.20251115" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/6a/36/06d01fb52c0d57e9ad0c237654990920fa41195e4b3d640830dabf9eeb2f/types_python_dateutil-2.9.0.20251115.tar.gz", hash = "sha256:8a47f2c3920f52a994056b8786309b43143faa5a64d4cbb2722d6addabdf1a58", size = 16363, upload-time = "2025-11-15T03:00:13.717Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/43/0b/56961d3ba517ed0df9b3a27bfda6514f3d01b28d499d1bce9068cfe4edd1/types_python_dateutil-2.9.0.20251115-py3-none-any.whl", hash = "sha256:9cf9c1c582019753b8639a081deefd7e044b9fa36bd8217f565c6c4e36ee0624", size = 18251, upload-time = "2025-11-15T03:00:12.317Z" }, +] + +[[package]] +name = "types-pyyaml" +version = "6.0.12.20250915" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/7e/69/3c51b36d04da19b92f9e815be12753125bd8bc247ba0470a982e6979e71c/types_pyyaml-6.0.12.20250915.tar.gz", hash = "sha256:0f8b54a528c303f0e6f7165687dd33fafa81c807fcac23f632b63aa624ced1d3", size = 17522, upload-time = "2025-09-15T03:01:00.728Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/bd/e0/1eed384f02555dde685fff1a1ac805c1c7dcb6dd019c916fe659b1c1f9ec/types_pyyaml-6.0.12.20250915-py3-none-any.whl", hash = "sha256:e7d4d9e064e89a3b3cae120b4990cd370874d2bf12fa5f46c97018dd5d3c9ab6", size = 20338, upload-time = "2025-09-15T03:00:59.218Z" }, +] + +[[package]] +name = "typing-extensions" +version = "4.15.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/72/94/1a15dd82efb362ac84269196e94cf00f187f7ed21c242792a923cdb1c61f/typing_extensions-4.15.0.tar.gz", hash = "sha256:0cea48d173cc12fa28ecabc3b837ea3cf6f38c6d1136f85cbaaf598984861466", size = 109391, upload-time = "2025-08-25T13:49:26.313Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/18/67/36e9267722cc04a6b9f15c7f3441c2363321a3ea07da7ae0c0707beb2a9c/typing_extensions-4.15.0-py3-none-any.whl", hash = "sha256:f0fa19c6845758ab08074a0cfa8b7aecb71c999ca73d62883bc25cc018c4e548", size = 44614, upload-time = "2025-08-25T13:49:24.86Z" }, +] + +[[package]] +name = "typing-inspection" +version = "0.4.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/55/e3/70399cb7dd41c10ac53367ae42139cf4b1ca5f36bb3dc6c9d33acdb43655/typing_inspection-0.4.2.tar.gz", hash = "sha256:ba561c48a67c5958007083d386c3295464928b01faa735ab8547c5692e87f464", size = 75949, upload-time = "2025-10-01T02:14:41.687Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/dc/9b/47798a6c91d8bdb567fe2698fe81e0c6b7cb7ef4d13da4114b41d239f65d/typing_inspection-0.4.2-py3-none-any.whl", hash = "sha256:4ed1cacbdc298c220f1bd249ed5287caa16f34d44ef4e9c3d0cbad5b521545e7", size = 14611, upload-time = "2025-10-01T02:14:40.154Z" }, +] + +[[package]] +name = "urllib3" +version = "2.6.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/1e/24/a2a2ed9addd907787d7aa0355ba36a6cadf1768b934c652ea78acbd59dcd/urllib3-2.6.2.tar.gz", hash = "sha256:016f9c98bb7e98085cb2b4b17b87d2c702975664e4f060c6532e64d1c1a5e797", size = 432930, upload-time = "2025-12-11T15:56:40.252Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/6d/b9/4095b668ea3678bf6a0af005527f39de12fb026516fb3df17495a733b7f8/urllib3-2.6.2-py3-none-any.whl", hash = "sha256:ec21cddfe7724fc7cb4ba4bea7aa8e2ef36f607a4bab81aa6ce42a13dc3f03dd", size = 131182, upload-time = "2025-12-11T15:56:38.584Z" }, +] + +[[package]] +name = "uvicorn" +version = "0.40.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "click", marker = "python_full_version >= '3.10'" }, + { name = "h11", marker = "python_full_version >= '3.10'" }, + { name = "typing-extensions", marker = "python_full_version == '3.10.*'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/c3/d1/8f3c683c9561a4e6689dd3b1d345c815f10f86acd044ee1fb9a4dcd0b8c5/uvicorn-0.40.0.tar.gz", hash = "sha256:839676675e87e73694518b5574fd0f24c9d97b46bea16df7b8c05ea1a51071ea", size = 81761, upload-time = "2025-12-21T14:16:22.45Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/3d/d8/2083a1daa7439a66f3a48589a57d576aa117726762618f6bb09fe3798796/uvicorn-0.40.0-py3-none-any.whl", hash = "sha256:c6c8f55bc8bf13eb6fa9ff87ad62308bbbc33d0b67f84293151efe87e0d5f2ee", size = 68502, upload-time = "2025-12-21T14:16:21.041Z" }, +] + +[[package]] +name = "zipp" +version = "3.23.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/e3/02/0f2892c661036d50ede074e376733dca2ae7c6eb617489437771209d4180/zipp-3.23.0.tar.gz", hash = "sha256:a07157588a12518c9d4034df3fbbee09c814741a33ff63c05fa29d26a2404166", size = 25547, upload-time = "2025-06-08T17:06:39.4Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/2e/54/647ade08bf0db230bfea292f893923872fd20be6ac6f53b2b936ba839d75/zipp-3.23.0-py3-none-any.whl", hash = "sha256:071652d6115ed432f5ce1d34c336c0adfd6a884660d1e9712a256d3d3bd4b14e", size = 10276, upload-time = "2025-06-08T17:06:38.034Z" }, +] From 802af2ca9930262aad17ad4448da9852900fe191 Mon Sep 17 00:00:00 2001 From: Jean-Malo Delignon <56539593+jean-malo@users.noreply.github.com> Date: Wed, 31 Dec 2025 14:14:33 +0100 Subject: [PATCH 166/223] chore: drop support for EOL python 3.9 (#304) --- .github/workflows/run_example_scripts.yaml | 15 +- README.md | 5 +- packages/mistralai_azure/pylintrc | 4 +- packages/mistralai_azure/pyproject.toml | 3 +- packages/mistralai_azure/uv.lock | 3 +- packages/mistralai_gcp/pylintrc | 2 +- packages/mistralai_gcp/pyproject.toml | 3 +- packages/mistralai_gcp/uv.lock | 8 +- pylintrc | 2 +- pyproject.toml | 6 +- src/mistralai/extra/mcp/auth.py | 21 +- src/mistralai/extra/mcp/base.py | 20 +- src/mistralai/extra/mcp/sse.py | 28 ++- src/mistralai/extra/mcp/stdio.py | 11 +- src/mistralai/extra/observability/otel.py | 34 ++- src/mistralai/extra/run/context.py | 76 +++---- src/mistralai/extra/run/result.py | 59 +++--- src/mistralai/extra/run/tools.py | 17 +- src/mistralai/extra/struct_chat.py | 23 +- src/mistralai/extra/utils/response_format.py | 8 +- uv.lock | 211 ++++--------------- 21 files changed, 214 insertions(+), 345 deletions(-) diff --git a/.github/workflows/run_example_scripts.yaml b/.github/workflows/run_example_scripts.yaml index 8ea90354..84896d26 100644 --- a/.github/workflows/run_example_scripts.yaml +++ b/.github/workflows/run_example_scripts.yaml @@ -14,7 +14,7 @@ jobs: strategy: fail-fast: false matrix: - python-version: ['3.9', '3.10', '3.11', '3.12', '3.13'] + python-version: ['3.10', '3.11', '3.12', '3.13'] steps: - name: Checkout code @@ -42,18 +42,7 @@ jobs: touch README-PYPI.md # Create this file since the client is not built by Speakeasy uv build - - name: For python 3.9, install the client and run examples without extra dependencies. - if: matrix.python-version == '3.9' - run: | - PACKAGE="dist/$(ls dist | grep whl | head -n 1)" - uv pip install --system "$PACKAGE" - ./scripts/run_examples.sh --no-extra-dep - env: - MISTRAL_AGENT_ID: ${{ secrets.CI_AGENT_ID }} - MISTRAL_API_KEY: ${{ env.MISTRAL_API_KEY }} - - - name: For python 3.10+, install client with extras and run all examples. - if: matrix.python-version != '3.9' + - name: Install client with extras and run all examples. run: | PACKAGE="dist/$(ls dist | grep whl | head -n 1)[agents]" uv pip install --system "$PACKAGE" diff --git a/README.md b/README.md index d198cf10..ba054118 100644 --- a/README.md +++ b/README.md @@ -89,7 +89,7 @@ It's also possible to write a standalone Python script without needing to set up ```python #!/usr/bin/env -S uv run --script # /// script -# requires-python = ">=3.9" +# requires-python = ">=3.10" # dependencies = [ # "mistralai", # ] @@ -117,8 +117,7 @@ installing the package: pip install "mistralai[agents]" ``` -> Note: Because of some of our dependencies, these features are only available for python version higher or equal to -> 3.10. +> Note: These features require Python 3.10+ (the SDK minimum). ## SDK Example Usage diff --git a/packages/mistralai_azure/pylintrc b/packages/mistralai_azure/pylintrc index a8fcb932..95f656e2 100644 --- a/packages/mistralai_azure/pylintrc +++ b/packages/mistralai_azure/pylintrc @@ -89,7 +89,7 @@ persistent=yes # Minimum Python version to use for version dependent checks. Will default to # the version used to run pylint. -py-version=3.9 +py-version=3.10 # Discover python modules and packages in the file system subtree. recursive=no @@ -660,4 +660,4 @@ init-import=no # List of qualified module names which can have objects that can redefine # builtins. -redefining-builtins-modules=six.moves,past.builtins,future.builtins,builtins,io \ No newline at end of file +redefining-builtins-modules=six.moves,past.builtins,future.builtins,builtins,io diff --git a/packages/mistralai_azure/pyproject.toml b/packages/mistralai_azure/pyproject.toml index 59cd5be3..2842c215 100644 --- a/packages/mistralai_azure/pyproject.toml +++ b/packages/mistralai_azure/pyproject.toml @@ -3,7 +3,7 @@ name = "mistralai_azure" version = "1.6.0" description = "Python Client SDK for the Mistral AI API in Azure." authors = [{ name = "Mistral" }] -requires-python = ">=3.9.2" +requires-python = ">=3.10" readme = "README.md" dependencies = [ "httpcore >=1.0.9", @@ -63,4 +63,3 @@ ignore_missing_imports = true venvPath = "." venv = ".venv" - diff --git a/packages/mistralai_azure/uv.lock b/packages/mistralai_azure/uv.lock index 37c95c55..d77ea936 100644 --- a/packages/mistralai_azure/uv.lock +++ b/packages/mistralai_azure/uv.lock @@ -1,6 +1,6 @@ version = 1 revision = 3 -requires-python = ">=3.9.2" +requires-python = ">=3.10" resolution-markers = [ "python_full_version >= '3.12'", "python_full_version == '3.11.*'", @@ -402,7 +402,6 @@ dependencies = [ { name = "platformdirs" }, { name = "tomli", marker = "python_full_version < '3.11'" }, { name = "tomlkit" }, - { name = "typing-extensions", marker = "python_full_version < '3.10'" }, ] sdist = { url = "https://files.pythonhosted.org/packages/9a/e9/60280b14cc1012794120345ce378504cf17409e38cd88f455dc24e0ad6b5/pylint-3.2.3.tar.gz", hash = "sha256:02f6c562b215582386068d52a30f520d84fdbcf2a95fc7e855b816060d048b60", size = 1506739, upload-time = "2024-06-06T14:19:17.955Z" } wheels = [ diff --git a/packages/mistralai_gcp/pylintrc b/packages/mistralai_gcp/pylintrc index 266bc815..c80721af 100644 --- a/packages/mistralai_gcp/pylintrc +++ b/packages/mistralai_gcp/pylintrc @@ -89,7 +89,7 @@ persistent=yes # Minimum Python version to use for version dependent checks. Will default to # the version used to run pylint. -py-version=3.9 +py-version=3.10 # Discover python modules and packages in the file system subtree. recursive=no diff --git a/packages/mistralai_gcp/pyproject.toml b/packages/mistralai_gcp/pyproject.toml index f7f182b6..650ef73b 100644 --- a/packages/mistralai_gcp/pyproject.toml +++ b/packages/mistralai_gcp/pyproject.toml @@ -3,7 +3,7 @@ name = "mistralai-gcp" version = "1.6.0" description = "Python Client SDK for the Mistral AI API in GCP." authors = [{ name = "Mistral" }] -requires-python = ">=3.9" +requires-python = ">=3.10" readme = "README-PYPI.md" dependencies = [ "eval-type-backport >=0.2.0", @@ -66,4 +66,3 @@ ignore_missing_imports = true venvPath = "." venv = ".venv" - diff --git a/packages/mistralai_gcp/uv.lock b/packages/mistralai_gcp/uv.lock index ef292b22..afd17643 100644 --- a/packages/mistralai_gcp/uv.lock +++ b/packages/mistralai_gcp/uv.lock @@ -1,11 +1,10 @@ version = 1 revision = 3 -requires-python = ">=3.9" +requires-python = ">=3.10" resolution-markers = [ "python_full_version >= '3.12'", "python_full_version == '3.11.*'", "python_full_version == '3.10.*'", - "python_full_version < '3.10'", ] [[package]] @@ -270,7 +269,6 @@ name = "iniconfig" version = "2.1.0" source = { registry = "https://pypi.org/simple" } resolution-markers = [ - "python_full_version < '3.10'", ] sdist = { url = "https://files.pythonhosted.org/packages/f2/97/ebf4da567aa6827c909642694d71c9fcf53e5b504f2d96afea02718862f3/iniconfig-2.1.0.tar.gz", hash = "sha256:3abbd2e30b36733fee78f9c7f7308f2d0050e88f0087fd25c2645f63c773e1c7", size = 4793, upload-time = "2025-03-19T20:09:59.721Z" } wheels = [ @@ -419,7 +417,6 @@ name = "platformdirs" version = "4.4.0" source = { registry = "https://pypi.org/simple" } resolution-markers = [ - "python_full_version < '3.10'", ] sdist = { url = "https://files.pythonhosted.org/packages/23/e8/21db9c9987b0e728855bd57bff6984f67952bea55d6f75e055c46b5383e8/platformdirs-4.4.0.tar.gz", hash = "sha256:ca753cf4d81dc309bc67b0ea38fd15dc97bc30ce419a7f58d13eb3bf14c4febf", size = 21634, upload-time = "2025-08-26T14:32:04.268Z" } wheels = [ @@ -635,11 +632,9 @@ dependencies = [ { name = "dill" }, { name = "isort" }, { name = "mccabe" }, - { name = "platformdirs", version = "4.4.0", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.10'" }, { name = "platformdirs", version = "4.5.1", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.10'" }, { name = "tomli", marker = "python_full_version < '3.11'" }, { name = "tomlkit" }, - { name = "typing-extensions", marker = "python_full_version < '3.10'" }, ] sdist = { url = "https://files.pythonhosted.org/packages/9a/e9/60280b14cc1012794120345ce378504cf17409e38cd88f455dc24e0ad6b5/pylint-3.2.3.tar.gz", hash = "sha256:02f6c562b215582386068d52a30f520d84fdbcf2a95fc7e855b816060d048b60", size = 1506739, upload-time = "2024-06-06T14:19:17.955Z" } wheels = [ @@ -653,7 +648,6 @@ source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "colorama", marker = "sys_platform == 'win32'" }, { name = "exceptiongroup", marker = "python_full_version < '3.11'" }, - { name = "iniconfig", version = "2.1.0", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.10'" }, { name = "iniconfig", version = "2.3.0", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.10'" }, { name = "packaging" }, { name = "pluggy" }, diff --git a/pylintrc b/pylintrc index 29202a96..d4e4ba5e 100644 --- a/pylintrc +++ b/pylintrc @@ -90,7 +90,7 @@ persistent=yes # Minimum Python version to use for version dependent checks. Will default to # the version used to run pylint. -py-version=3.9 +py-version=3.10 # Discover python modules and packages in the file system subtree. recursive=no diff --git a/pyproject.toml b/pyproject.toml index 3208eb77..933a3162 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -3,7 +3,7 @@ name = "mistralai" version = "1.10.0" description = "Python Client SDK for the Mistral AI API." authors = [{ name = "Mistral" }] -requires-python = ">=3.9" +requires-python = ">=3.10" readme = "README-PYPI.md" dependencies = [ "eval-type-backport >=0.2.0", @@ -25,7 +25,7 @@ gcp = [ "requests >=2.32.3", ] agents = [ - "mcp >=1.0,<2.0; python_version >= '3.10'", + "mcp >=1.0,<2.0", "griffe >=1.7.3,<2.0", "authlib >=1.5.2,<2.0", ] @@ -42,7 +42,7 @@ dev = [ "types-python-dateutil>=2.9.0.20240316,<3", "types-authlib>=1.5.0.20250516,<2", "types-pyyaml>=6.0.12.20250516,<7", - "mcp>=1.0,<2 ; python_version >= '3.10'", + "mcp>=1.0,<2", "griffe>=1.7.3,<2", "authlib>=1.5.2,<2", ] diff --git a/src/mistralai/extra/mcp/auth.py b/src/mistralai/extra/mcp/auth.py index 909f5d4a..f2b2db8a 100644 --- a/src/mistralai/extra/mcp/auth.py +++ b/src/mistralai/extra/mcp/auth.py @@ -1,9 +1,8 @@ -from typing import Optional +import logging -from authlib.oauth2.rfc8414 import AuthorizationServerMetadata -from authlib.integrations.httpx_client import AsyncOAuth2Client as AsyncOAuth2ClientBase import httpx -import logging +from authlib.integrations.httpx_client import AsyncOAuth2Client as AsyncOAuth2ClientBase +from authlib.oauth2.rfc8414 import AuthorizationServerMetadata from mistralai.types import BaseModel @@ -16,8 +15,8 @@ class Oauth2AuthorizationScheme(BaseModel): authorization_url: str token_url: str scope: list[str] - description: Optional[str] = None - refresh_url: Optional[str] = None + description: str | None = None + refresh_url: str | None = None class OAuthParams(BaseModel): @@ -42,7 +41,7 @@ def from_oauth_params(cls, oauth_params: OAuthParams) -> "AsyncOAuth2Client": async def get_well_known_authorization_server_metadata( server_url: str, -) -> Optional[AuthorizationServerMetadata]: +) -> AuthorizationServerMetadata | None: """Fetch the metadata from the well-known location. This should be available on MCP servers as described by the specification: @@ -123,10 +122,10 @@ async def dynamic_client_registration( async def build_oauth_params( server_url: str, redirect_url: str, - client_id: Optional[str] = None, - client_secret: Optional[str] = None, - scope: Optional[list[str]] = None, - async_client: Optional[httpx.AsyncClient] = None, + client_id: str | None = None, + client_secret: str | None = None, + scope: list[str] | None = None, + async_client: httpx.AsyncClient | None = None, ) -> OAuthParams: """Get issuer metadata and build the oauth required params.""" metadata = await get_oauth_server_metadata(server_url=server_url) diff --git a/src/mistralai/extra/mcp/base.py b/src/mistralai/extra/mcp/base.py index 929b6b11..bbda67d5 100644 --- a/src/mistralai/extra/mcp/base.py +++ b/src/mistralai/extra/mcp/base.py @@ -1,8 +1,8 @@ -from typing import Optional, Sequence, Union import logging import typing +from collections.abc import Sequence from contextlib import AsyncExitStack -from typing import Protocol, Any +from typing import Any, Protocol from mcp import ClientSession # pyright: ignore[reportMissingImports] from mcp.types import ( # pyright: ignore[reportMissingImports] @@ -23,8 +23,8 @@ class MCPSystemPrompt(typing.TypedDict): - description: Optional[str] - messages: list[Union[SystemMessageTypedDict, AssistantMessageTypedDict]] + description: str | None + messages: list[SystemMessageTypedDict | AssistantMessageTypedDict] class MCPClientProtocol(Protocol): @@ -32,7 +32,7 @@ class MCPClientProtocol(Protocol): _name: str - async def initialize(self, exit_stack: Optional[AsyncExitStack]) -> None: + async def initialize(self, exit_stack: AsyncExitStack | None) -> None: ... async def aclose(self) -> None: @@ -42,7 +42,7 @@ async def get_tools(self) -> list[FunctionTool]: ... async def execute_tool( - self, name: str, arguments: dict + self, name: str, arguments: dict[str, Any] ) -> list[TextChunkTypedDict]: ... @@ -60,9 +60,9 @@ class MCPClientBase(MCPClientProtocol): _session: ClientSession - def __init__(self, name: Optional[str] = None): + def __init__(self, name: str | None = None): self._name = name or self.__class__.__name__ - self._exit_stack: Optional[AsyncExitStack] = None + self._exit_stack: AsyncExitStack | None = None self._is_initialized = False def _convert_content(self, mcp_content: ContentBlock) -> TextChunkTypedDict: @@ -109,7 +109,7 @@ async def get_system_prompt( "description": prompt_result.description, "messages": [ typing.cast( - Union[SystemMessageTypedDict, AssistantMessageTypedDict], + SystemMessageTypedDict | AssistantMessageTypedDict, { "role": message.role, "content": self._convert_content(mcp_content=message.content), @@ -122,7 +122,7 @@ async def get_system_prompt( async def list_system_prompts(self) -> ListPromptsResult: return await self._session.list_prompts() - async def initialize(self, exit_stack: Optional[AsyncExitStack] = None) -> None: + async def initialize(self, exit_stack: AsyncExitStack | None = None) -> None: """Initialize the MCP session.""" # client is already initialized so return if self._is_initialized: diff --git a/src/mistralai/extra/mcp/sse.py b/src/mistralai/extra/mcp/sse.py index 3898ba75..ba49fd1a 100644 --- a/src/mistralai/extra/mcp/sse.py +++ b/src/mistralai/extra/mcp/sse.py @@ -1,22 +1,20 @@ import http import logging -import typing -from typing import Any, Optional from contextlib import AsyncExitStack from functools import cached_property +from typing import Any import httpx +from anyio.streams.memory import MemoryObjectReceiveStream, MemoryObjectSendStream +from authlib.oauth2.rfc6749 import OAuth2Token +from mcp.client.sse import sse_client # pyright: ignore[reportMissingImports] +from mcp.shared.message import SessionMessage # pyright: ignore[reportMissingImports] from mistralai.extra.exceptions import MCPAuthException from mistralai.extra.mcp.base import ( MCPClientBase, ) from mistralai.extra.mcp.auth import OAuthParams, AsyncOAuth2Client -from anyio.streams.memory import MemoryObjectReceiveStream, MemoryObjectSendStream - -from mcp.client.sse import sse_client # pyright: ignore[reportMissingImports] -from mcp.shared.message import SessionMessage # pyright: ignore[reportMissingImports] -from authlib.oauth2.rfc6749 import OAuth2Token from mistralai.types import BaseModel @@ -27,7 +25,7 @@ class SSEServerParams(BaseModel): """Parameters required for a MCPClient with SSE transport""" url: str - headers: Optional[dict[str, Any]] = None + headers: dict[str, Any] | None = None timeout: float = 5 sse_read_timeout: float = 60 * 5 @@ -41,20 +39,20 @@ class MCPClientSSE(MCPClientBase): This is possibly going to change in the future since the protocol has ongoing discussions. """ - _oauth_params: Optional[OAuthParams] + _oauth_params: OAuthParams | None _sse_params: SSEServerParams def __init__( self, sse_params: SSEServerParams, - name: Optional[str] = None, - oauth_params: Optional[OAuthParams] = None, - auth_token: Optional[OAuth2Token] = None, + name: str | None = None, + oauth_params: OAuthParams | None = None, + auth_token: OAuth2Token | None = None, ): super().__init__(name=name) self._sse_params = sse_params - self._oauth_params: Optional[OAuthParams] = oauth_params - self._auth_token: Optional[OAuth2Token] = auth_token + self._oauth_params: OAuthParams | None = oauth_params + self._auth_token: OAuth2Token | None = auth_token @cached_property def base_url(self) -> str: @@ -142,7 +140,7 @@ async def requires_auth(self) -> bool: async def _get_transport( self, exit_stack: AsyncExitStack ) -> tuple[ - MemoryObjectReceiveStream[typing.Union[SessionMessage, Exception]], + MemoryObjectReceiveStream[SessionMessage | Exception], MemoryObjectSendStream[SessionMessage], ]: try: diff --git a/src/mistralai/extra/mcp/stdio.py b/src/mistralai/extra/mcp/stdio.py index b7af4029..a548066c 100644 --- a/src/mistralai/extra/mcp/stdio.py +++ b/src/mistralai/extra/mcp/stdio.py @@ -1,12 +1,9 @@ -from typing import Optional import logging from contextlib import AsyncExitStack -from mistralai.extra.mcp.base import ( - MCPClientBase, -) +from mcp import StdioServerParameters, stdio_client # pyright: ignore[reportMissingImports] -from mcp import stdio_client, StdioServerParameters # pyright: ignore[reportMissingImports] +from mistralai.extra.mcp.base import MCPClientBase logger = logging.getLogger(__name__) @@ -14,7 +11,9 @@ class MCPClientSTDIO(MCPClientBase): """MCP client that uses stdio for communication.""" - def __init__(self, stdio_params: StdioServerParameters, name: Optional[str] = None): + def __init__( + self, stdio_params: StdioServerParameters, name: str | None = None + ): super().__init__(name=name) self._stdio_params = stdio_params diff --git a/src/mistralai/extra/observability/otel.py b/src/mistralai/extra/observability/otel.py index 6037e681..8be0841d 100644 --- a/src/mistralai/extra/observability/otel.py +++ b/src/mistralai/extra/observability/otel.py @@ -5,7 +5,6 @@ import traceback from datetime import datetime, timezone from enum import Enum -from typing import Optional, Tuple, Union import httpx import opentelemetry.semconv._incubating.attributes.gen_ai_attributes as gen_ai_attributes @@ -135,7 +134,7 @@ def enrich_span_from_response(tracer: trace.Tracer, span: Span, operation_id: st response_data = json.loads(response.content) # Base attributes - attributes: dict[str, Union[str, int]] = { + attributes: dict[str, str | int] = { http_attributes.HTTP_RESPONSE_STATUS_CODE: response.status_code, MistralAIAttributes.MISTRAL_AI_OPERATION_ID: operation_id, gen_ai_attributes.GEN_AI_PROVIDER_NAME: gen_ai_attributes.GenAiProviderNameValues.MISTRAL_AI.value @@ -226,7 +225,7 @@ def export(self, spans): return SpanExportResult.FAILURE -def get_or_create_otel_tracer() -> Tuple[bool, Tracer]: +def get_or_create_otel_tracer() -> tuple[bool, Tracer]: """ 3 possible cases: @@ -273,7 +272,13 @@ def get_or_create_otel_tracer() -> Tuple[bool, Tracer]: return tracing_enabled, tracer -def get_traced_request_and_span(tracing_enabled: bool, tracer: Tracer, span: Optional[Span], operation_id: str, request: httpx.Request) -> Tuple[httpx.Request, Optional[Span]]: +def get_traced_request_and_span( + tracing_enabled: bool, + tracer: Tracer, + span: Span | None, + operation_id: str, + request: httpx.Request, +) -> tuple[httpx.Request, Span | None]: if not tracing_enabled: return request, span @@ -295,7 +300,13 @@ def get_traced_request_and_span(tracing_enabled: bool, tracer: Tracer, span: Opt return request, span -def get_traced_response(tracing_enabled: bool, tracer: Tracer, span: Optional[Span], operation_id: str, response: httpx.Response) -> httpx.Response: +def get_traced_response( + tracing_enabled: bool, + tracer: Tracer, + span: Span | None, + operation_id: str, + response: httpx.Response, +) -> httpx.Response: if not tracing_enabled or not span: return response try: @@ -315,7 +326,14 @@ def get_traced_response(tracing_enabled: bool, tracer: Tracer, span: Optional[Sp end_span(span=span) return response -def get_response_and_error(tracing_enabled: bool, tracer: Tracer, span: Optional[Span], operation_id: str, response: httpx.Response, error: Optional[Exception]) -> Tuple[httpx.Response, Optional[Exception]]: +def get_response_and_error( + tracing_enabled: bool, + tracer: Tracer, + span: Span | None, + operation_id: str, + response: httpx.Response, + error: Exception | None, +) -> tuple[httpx.Response, Exception | None]: if not tracing_enabled or not span: return response, error try: @@ -366,7 +384,7 @@ class TracedResponse(httpx.Response): This hack allows ending the span only once the stream is fully consumed. """ - def __init__(self, *args, span: Optional[Span], **kwargs) -> None: + def __init__(self, *args, span: Span | None, **kwargs) -> None: super().__init__(*args, **kwargs) self.span = span @@ -381,7 +399,7 @@ async def aclose(self) -> None: await super().aclose() @classmethod - def from_response(cls, resp: httpx.Response, span: Optional[Span]) -> "TracedResponse": + def from_response(cls, resp: httpx.Response, span: Span | None) -> "TracedResponse": traced_resp = cls.__new__(cls) traced_resp.__dict__ = copy.copy(resp.__dict__) traced_resp.span = span diff --git a/src/mistralai/extra/run/context.py b/src/mistralai/extra/run/context.py index 08350a84..0d78352a 100644 --- a/src/mistralai/extra/run/context.py +++ b/src/mistralai/extra/run/context.py @@ -1,47 +1,41 @@ import asyncio import inspect import typing -from contextlib import AsyncExitStack -from functools import wraps from collections.abc import Callable - +from contextlib import AsyncExitStack from dataclasses import dataclass, field -from typing import Union, Optional +from functools import wraps +from logging import getLogger import pydantic -from mistralai.extra import ( - response_format_from_pydantic_model, -) +from mistralai.extra import response_format_from_pydantic_model from mistralai.extra.exceptions import RunException from mistralai.extra.mcp.base import MCPClientProtocol from mistralai.extra.run.result import RunResult -from mistralai.types.basemodel import OptionalNullable, BaseModel, UNSET +from mistralai.extra.run.tools import ( + RunCoroutine, + RunFunction, + RunMCPTool, + RunTool, + create_function_result, + create_tool_call, +) from mistralai.models import ( - ResponseFormat, - FunctionCallEntry, - Tools, - ToolsTypedDict, CompletionArgs, CompletionArgsTypedDict, - FunctionResultEntry, ConversationInputs, ConversationInputsTypedDict, + FunctionCallEntry, + FunctionResultEntry, FunctionTool, - MessageInputEntry, InputEntries, + MessageInputEntry, + ResponseFormat, + Tools, + ToolsTypedDict, ) - -from logging import getLogger - -from mistralai.extra.run.tools import ( - create_function_result, - RunFunction, - create_tool_call, - RunTool, - RunMCPTool, - RunCoroutine, -) +from mistralai.types.basemodel import BaseModel, OptionalNullable, UNSET if typing.TYPE_CHECKING: from mistralai import Beta, OptionalNullable @@ -56,8 +50,8 @@ class AgentRequestKwargs(typing.TypedDict): class ModelRequestKwargs(typing.TypedDict): model: str instructions: OptionalNullable[str] - tools: OptionalNullable[Union[list[Tools], list[ToolsTypedDict]]] - completion_args: OptionalNullable[Union[CompletionArgs, CompletionArgsTypedDict]] + tools: OptionalNullable[list[Tools] | list[ToolsTypedDict]] + completion_args: OptionalNullable[CompletionArgs | CompletionArgsTypedDict] @dataclass @@ -72,7 +66,7 @@ class RunContext: passed if the user wants to continue an existing conversation. model (Options[str]): The model name to be used for the conversation. Can't be used along with 'agent_id'. agent_id (Options[str]): The agent id to be used for the conversation. Can't be used along with 'model'. - output_format (Optional[type[BaseModel]]): The output format expected from the conversation. It represents + output_format (type[BaseModel] | None): The output format expected from the conversation. It represents the `response_format` which is part of the `CompletionArgs`. request_count (int): The number of requests made in the current `RunContext`. continue_on_fn_error (bool): Flag to determine if the conversation should continue when function execution @@ -83,10 +77,10 @@ class RunContext: _callable_tools: dict[str, RunTool] = field(init=False, default_factory=dict) _mcp_clients: list[MCPClientProtocol] = field(init=False, default_factory=list) - conversation_id: Optional[str] = field(default=None) - model: Optional[str] = field(default=None) - agent_id: Optional[str] = field(default=None) - output_format: Optional[type[BaseModel]] = field(default=None) + conversation_id: str | None = field(default=None) + model: str | None = field(default=None) + agent_id: str | None = field(default=None) + output_format: type[BaseModel] | None = field(default=None) request_count: int = field(default=0) continue_on_fn_error: bool = field(default=False) @@ -215,10 +209,8 @@ async def prepare_agent_request(self, beta_client: "Beta") -> AgentRequestKwargs async def prepare_model_request( self, - tools: OptionalNullable[Union[list[Tools], list[ToolsTypedDict]]] = UNSET, - completion_args: OptionalNullable[ - Union[CompletionArgs, CompletionArgsTypedDict] - ] = UNSET, + tools: OptionalNullable[list[Tools] | list[ToolsTypedDict]] = UNSET, + completion_args: OptionalNullable[CompletionArgs | CompletionArgsTypedDict] = UNSET, instructions: OptionalNullable[str] = None, ) -> ModelRequestKwargs: if self.model is None: @@ -254,14 +246,12 @@ async def _validate_run( *, beta_client: "Beta", run_ctx: RunContext, - inputs: Union[ConversationInputs, ConversationInputsTypedDict], + inputs: ConversationInputs | ConversationInputsTypedDict, instructions: OptionalNullable[str] = UNSET, - tools: OptionalNullable[Union[list[Tools], list[ToolsTypedDict]]] = UNSET, - completion_args: OptionalNullable[ - Union[CompletionArgs, CompletionArgsTypedDict] - ] = UNSET, + tools: OptionalNullable[list[Tools] | list[ToolsTypedDict]] = UNSET, + completion_args: OptionalNullable[CompletionArgs | CompletionArgsTypedDict] = UNSET, ) -> tuple[ - Union[AgentRequestKwargs, ModelRequestKwargs], RunResult, list[InputEntries] + AgentRequestKwargs | ModelRequestKwargs, RunResult, list[InputEntries] ]: input_entries: list[InputEntries] = [] if isinstance(inputs, str): @@ -277,7 +267,7 @@ async def _validate_run( output_model=run_ctx.output_format, conversation_id=run_ctx.conversation_id, ) - req: Union[AgentRequestKwargs, ModelRequestKwargs] + req: AgentRequestKwargs | ModelRequestKwargs if run_ctx.agent_id: if tools or completion_args: raise RunException("Can't set tools or completion_args when using an agent") diff --git a/src/mistralai/extra/run/result.py b/src/mistralai/extra/run/result.py index 9592dccf..0af48ee7 100644 --- a/src/mistralai/extra/run/result.py +++ b/src/mistralai/extra/run/result.py @@ -1,9 +1,10 @@ import datetime import json import typing -from typing import Union, Annotated, Optional, Literal from dataclasses import dataclass, field -from pydantic import Discriminator, Tag, BaseModel +from typing import Annotated, Literal + +from pydantic import BaseModel, Discriminator, Tag from mistralai.extra.utils.response_format import pydantic_model_from_json from mistralai.models import ( @@ -35,15 +36,15 @@ ) from mistralai.utils import get_discriminator -RunOutputEntries = typing.Union[ - MessageOutputEntry, - FunctionCallEntry, - FunctionResultEntry, - AgentHandoffEntry, - ToolExecutionEntry, -] +RunOutputEntries = ( + MessageOutputEntry + | FunctionCallEntry + | FunctionResultEntry + | AgentHandoffEntry + | ToolExecutionEntry +) -RunEntries = typing.Union[RunOutputEntries, MessageInputEntry] +RunEntries = RunOutputEntries | MessageInputEntry def as_text(entry: RunOutputEntries) -> str: @@ -140,12 +141,12 @@ class RunFiles: @dataclass class RunResult: input_entries: list[InputEntries] - conversation_id: Optional[str] = field(default=None) + conversation_id: str | None = field(default=None) output_entries: list[RunOutputEntries] = field(default_factory=list) files: dict[str, RunFiles] = field(default_factory=dict) - output_model: Optional[type[BaseModel]] = field(default=None) + output_model: type[BaseModel] | None = field(default=None) - def get_file(self, file_id: str) -> Optional[RunFiles]: + def get_file(self, file_id: str) -> RunFiles | None: return self.files.get(file_id) @property @@ -172,36 +173,34 @@ def output_as_model(self) -> BaseModel: class FunctionResultEvent(BaseModel): - id: Optional[str] = None + id: str | None = None - type: Optional[Literal["function.result"]] = "function.result" + type: Literal["function.result"] | None = "function.result" result: str tool_call_id: str - created_at: Optional[datetime.datetime] = datetime.datetime.now( + created_at: datetime.datetime | None = datetime.datetime.now( tz=datetime.timezone.utc ) - output_index: Optional[int] = 0 + output_index: int | None = 0 -RunResultEventsType = typing.Union[SSETypes, Literal["function.result"]] +RunResultEventsType = SSETypes | Literal["function.result"] RunResultEventsData = typing.Annotated[ - Union[ - Annotated[AgentHandoffDoneEvent, Tag("agent.handoff.done")], - Annotated[AgentHandoffStartedEvent, Tag("agent.handoff.started")], - Annotated[ResponseDoneEvent, Tag("conversation.response.done")], - Annotated[ResponseErrorEvent, Tag("conversation.response.error")], - Annotated[ResponseStartedEvent, Tag("conversation.response.started")], - Annotated[FunctionCallEvent, Tag("function.call.delta")], - Annotated[MessageOutputEvent, Tag("message.output.delta")], - Annotated[ToolExecutionDoneEvent, Tag("tool.execution.done")], - Annotated[ToolExecutionStartedEvent, Tag("tool.execution.started")], - Annotated[FunctionResultEvent, Tag("function.result")], - ], + Annotated[AgentHandoffDoneEvent, Tag("agent.handoff.done")] + | Annotated[AgentHandoffStartedEvent, Tag("agent.handoff.started")] + | Annotated[ResponseDoneEvent, Tag("conversation.response.done")] + | Annotated[ResponseErrorEvent, Tag("conversation.response.error")] + | Annotated[ResponseStartedEvent, Tag("conversation.response.started")] + | Annotated[FunctionCallEvent, Tag("function.call.delta")] + | Annotated[MessageOutputEvent, Tag("message.output.delta")] + | Annotated[ToolExecutionDoneEvent, Tag("tool.execution.done")] + | Annotated[ToolExecutionStartedEvent, Tag("tool.execution.started")] + | Annotated[FunctionResultEvent, Tag("function.result")], Discriminator(lambda m: get_discriminator(m, "type", "type")), ] diff --git a/src/mistralai/extra/run/tools.py b/src/mistralai/extra/run/tools.py index e3f80935..b117fdea 100644 --- a/src/mistralai/extra/run/tools.py +++ b/src/mistralai/extra/run/tools.py @@ -1,14 +1,11 @@ +import inspect import itertools +import json import logging from dataclasses import dataclass -import inspect - -from pydantic import Field, create_model -from pydantic.fields import FieldInfo -import json -from typing import cast, Callable, Sequence, Any, ForwardRef, get_type_hints, Union +from typing import Any, Callable, ForwardRef, Sequence, cast, get_type_hints -from opentelemetry import trace +import opentelemetry.semconv._incubating.attributes.gen_ai_attributes as gen_ai_attributes from griffe import ( Docstring, DocstringSectionKind, @@ -16,7 +13,9 @@ DocstringParameter, DocstringSection, ) -import opentelemetry.semconv._incubating.attributes.gen_ai_attributes as gen_ai_attributes +from opentelemetry import trace +from pydantic import Field, create_model +from pydantic.fields import FieldInfo from mistralai.extra.exceptions import RunException from mistralai.extra.mcp.base import MCPClientProtocol @@ -54,7 +53,7 @@ class RunMCPTool: mcp_client: MCPClientProtocol -RunTool = Union[RunFunction, RunCoroutine, RunMCPTool] +RunTool = RunFunction | RunCoroutine | RunMCPTool def _get_function_description(docstring_sections: list[DocstringSection]) -> str: diff --git a/src/mistralai/extra/struct_chat.py b/src/mistralai/extra/struct_chat.py index 364b450f..773cbb6c 100644 --- a/src/mistralai/extra/struct_chat.py +++ b/src/mistralai/extra/struct_chat.py @@ -1,19 +1,26 @@ -from ..models import ChatCompletionResponse, ChatCompletionChoice, AssistantMessage -from .utils.response_format import CustomPydanticModel, pydantic_model_from_json -from typing import List, Optional, Type, Generic -from pydantic import BaseModel import json +from typing import Generic + +from ..models import AssistantMessage, ChatCompletionChoice, ChatCompletionResponse +from .utils.response_format import CustomPydanticModel, pydantic_model_from_json + class ParsedAssistantMessage(AssistantMessage, Generic[CustomPydanticModel]): - parsed: Optional[CustomPydanticModel] + parsed: CustomPydanticModel | None + class ParsedChatCompletionChoice(ChatCompletionChoice, Generic[CustomPydanticModel]): - message: Optional[ParsedAssistantMessage[CustomPydanticModel]] # type: ignore + message: ParsedAssistantMessage[CustomPydanticModel] | None # type: ignore + class ParsedChatCompletionResponse(ChatCompletionResponse, Generic[CustomPydanticModel]): - choices: Optional[List[ParsedChatCompletionChoice[CustomPydanticModel]]] # type: ignore + choices: list[ParsedChatCompletionChoice[CustomPydanticModel]] | None # type: ignore + -def convert_to_parsed_chat_completion_response(response: ChatCompletionResponse, response_format: Type[BaseModel]) -> ParsedChatCompletionResponse: +def convert_to_parsed_chat_completion_response( + response: ChatCompletionResponse, + response_format: type[CustomPydanticModel], +) -> ParsedChatCompletionResponse[CustomPydanticModel]: parsed_choices = [] if response.choices: diff --git a/src/mistralai/extra/utils/response_format.py b/src/mistralai/extra/utils/response_format.py index 67e15912..10bff89f 100644 --- a/src/mistralai/extra/utils/response_format.py +++ b/src/mistralai/extra/utils/response_format.py @@ -1,5 +1,6 @@ +from typing import Any, TypeVar + from pydantic import BaseModel -from typing import TypeVar, Any, Type, Dict from ...models import JSONSchema, ResponseFormat from ._pydantic_helper import rec_strict_json_schema @@ -7,7 +8,7 @@ def response_format_from_pydantic_model( - model: Type[CustomPydanticModel], + model: type[CustomPydanticModel], ) -> ResponseFormat: """Generate a strict JSON schema from a pydantic model.""" model_schema = rec_strict_json_schema(model.model_json_schema()) @@ -18,7 +19,8 @@ def response_format_from_pydantic_model( def pydantic_model_from_json( - json_data: Dict[str, Any], pydantic_model: Type[CustomPydanticModel] + json_data: dict[str, Any], + pydantic_model: type[CustomPydanticModel], ) -> CustomPydanticModel: """Parse a JSON schema into a pydantic model.""" return pydantic_model.model_validate(json_data) diff --git a/uv.lock b/uv.lock index 97fbeeec..59639629 100644 --- a/uv.lock +++ b/uv.lock @@ -1,11 +1,10 @@ version = 1 revision = 3 -requires-python = ">=3.9" +requires-python = ">=3.10" resolution-markers = [ "python_full_version >= '3.12'", "python_full_version == '3.11.*'", - "python_full_version == '3.10.*'", - "python_full_version < '3.10'", + "python_full_version < '3.11'", ] [[package]] @@ -162,18 +161,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/a0/1d/ec1a60bd1a10daa292d3cd6bb0b359a81607154fb8165f3ec95fe003b85c/cffi-2.0.0-cp314-cp314t-win32.whl", hash = "sha256:1fc9ea04857caf665289b7a75923f2c6ed559b8298a1b8c49e59f7dd95c8481e", size = 180487, upload-time = "2025-09-08T23:23:40.423Z" }, { url = "https://files.pythonhosted.org/packages/bf/41/4c1168c74fac325c0c8156f04b6749c8b6a8f405bbf91413ba088359f60d/cffi-2.0.0-cp314-cp314t-win_amd64.whl", hash = "sha256:d68b6cef7827e8641e8ef16f4494edda8b36104d79773a334beaa1e3521430f6", size = 191726, upload-time = "2025-09-08T23:23:41.742Z" }, { url = "https://files.pythonhosted.org/packages/ae/3a/dbeec9d1ee0844c679f6bb5d6ad4e9f198b1224f4e7a32825f47f6192b0c/cffi-2.0.0-cp314-cp314t-win_arm64.whl", hash = "sha256:0a1527a803f0a659de1af2e1fd700213caba79377e27e4693648c2923da066f9", size = 184195, upload-time = "2025-09-08T23:23:43.004Z" }, - { url = "https://files.pythonhosted.org/packages/c0/cc/08ed5a43f2996a16b462f64a7055c6e962803534924b9b2f1371d8c00b7b/cffi-2.0.0-cp39-cp39-macosx_10_13_x86_64.whl", hash = "sha256:fe562eb1a64e67dd297ccc4f5addea2501664954f2692b69a76449ec7913ecbf", size = 184288, upload-time = "2025-09-08T23:23:48.404Z" }, - { url = "https://files.pythonhosted.org/packages/3d/de/38d9726324e127f727b4ecc376bc85e505bfe61ef130eaf3f290c6847dd4/cffi-2.0.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:de8dad4425a6ca6e4e5e297b27b5c824ecc7581910bf9aee86cb6835e6812aa7", size = 180509, upload-time = "2025-09-08T23:23:49.73Z" }, - { url = "https://files.pythonhosted.org/packages/9b/13/c92e36358fbcc39cf0962e83223c9522154ee8630e1df7c0b3a39a8124e2/cffi-2.0.0-cp39-cp39-manylinux1_i686.manylinux2014_i686.manylinux_2_17_i686.manylinux_2_5_i686.whl", hash = "sha256:4647afc2f90d1ddd33441e5b0e85b16b12ddec4fca55f0d9671fef036ecca27c", size = 208813, upload-time = "2025-09-08T23:23:51.263Z" }, - { url = "https://files.pythonhosted.org/packages/15/12/a7a79bd0df4c3bff744b2d7e52cc1b68d5e7e427b384252c42366dc1ecbc/cffi-2.0.0-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:3f4d46d8b35698056ec29bca21546e1551a205058ae1a181d871e278b0b28165", size = 216498, upload-time = "2025-09-08T23:23:52.494Z" }, - { url = "https://files.pythonhosted.org/packages/a3/ad/5c51c1c7600bdd7ed9a24a203ec255dccdd0ebf4527f7b922a0bde2fb6ed/cffi-2.0.0-cp39-cp39-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:e6e73b9e02893c764e7e8d5bb5ce277f1a009cd5243f8228f75f842bf937c534", size = 203243, upload-time = "2025-09-08T23:23:53.836Z" }, - { url = "https://files.pythonhosted.org/packages/32/f2/81b63e288295928739d715d00952c8c6034cb6c6a516b17d37e0c8be5600/cffi-2.0.0-cp39-cp39-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:cb527a79772e5ef98fb1d700678fe031e353e765d1ca2d409c92263c6d43e09f", size = 203158, upload-time = "2025-09-08T23:23:55.169Z" }, - { url = "https://files.pythonhosted.org/packages/1f/74/cc4096ce66f5939042ae094e2e96f53426a979864aa1f96a621ad128be27/cffi-2.0.0-cp39-cp39-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:61d028e90346df14fedc3d1e5441df818d095f3b87d286825dfcbd6459b7ef63", size = 216548, upload-time = "2025-09-08T23:23:56.506Z" }, - { url = "https://files.pythonhosted.org/packages/e8/be/f6424d1dc46b1091ffcc8964fa7c0ab0cd36839dd2761b49c90481a6ba1b/cffi-2.0.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:0f6084a0ea23d05d20c3edcda20c3d006f9b6f3fefeac38f59262e10cef47ee2", size = 218897, upload-time = "2025-09-08T23:23:57.825Z" }, - { url = "https://files.pythonhosted.org/packages/f7/e0/dda537c2309817edf60109e39265f24f24aa7f050767e22c98c53fe7f48b/cffi-2.0.0-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:1cd13c99ce269b3ed80b417dcd591415d3372bcac067009b6e0f59c7d4015e65", size = 211249, upload-time = "2025-09-08T23:23:59.139Z" }, - { url = "https://files.pythonhosted.org/packages/2b/e7/7c769804eb75e4c4b35e658dba01de1640a351a9653c3d49ca89d16ccc91/cffi-2.0.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:89472c9762729b5ae1ad974b777416bfda4ac5642423fa93bd57a09204712322", size = 218041, upload-time = "2025-09-08T23:24:00.496Z" }, - { url = "https://files.pythonhosted.org/packages/aa/d9/6218d78f920dcd7507fc16a766b5ef8f3b913cc7aa938e7fc80b9978d089/cffi-2.0.0-cp39-cp39-win32.whl", hash = "sha256:2081580ebb843f759b9f617314a24ed5738c51d2aee65d31e02f6f7a2b97707a", size = 172138, upload-time = "2025-09-08T23:24:01.7Z" }, - { url = "https://files.pythonhosted.org/packages/54/8f/a1e836f82d8e32a97e6b29cc8f641779181ac7363734f12df27db803ebda/cffi-2.0.0-cp39-cp39-win_amd64.whl", hash = "sha256:b882b3df248017dba09d6b16defe9b5c407fe32fc7c65a9c69798e6175601be9", size = 182794, upload-time = "2025-09-08T23:24:02.943Z" }, ] [[package]] @@ -262,22 +249,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/b0/6f/8f7af07237c34a1defe7defc565a9bc1807762f672c0fde711a4b22bf9c0/charset_normalizer-3.4.4-cp314-cp314-win32.whl", hash = "sha256:f9d332f8c2a2fcbffe1378594431458ddbef721c1769d78e2cbc06280d8155f9", size = 99940, upload-time = "2025-10-14T04:41:49.946Z" }, { url = "https://files.pythonhosted.org/packages/4b/51/8ade005e5ca5b0d80fb4aff72a3775b325bdc3d27408c8113811a7cbe640/charset_normalizer-3.4.4-cp314-cp314-win_amd64.whl", hash = "sha256:8a6562c3700cce886c5be75ade4a5db4214fda19fede41d9792d100288d8f94c", size = 107104, upload-time = "2025-10-14T04:41:51.051Z" }, { url = "https://files.pythonhosted.org/packages/da/5f/6b8f83a55bb8278772c5ae54a577f3099025f9ade59d0136ac24a0df4bde/charset_normalizer-3.4.4-cp314-cp314-win_arm64.whl", hash = "sha256:de00632ca48df9daf77a2c65a484531649261ec9f25489917f09e455cb09ddb2", size = 100743, upload-time = "2025-10-14T04:41:52.122Z" }, - { url = "https://files.pythonhosted.org/packages/46/7c/0c4760bccf082737ca7ab84a4c2034fcc06b1f21cf3032ea98bd6feb1725/charset_normalizer-3.4.4-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:a9768c477b9d7bd54bc0c86dbaebdec6f03306675526c9927c0e8a04e8f94af9", size = 209609, upload-time = "2025-10-14T04:42:10.922Z" }, - { url = "https://files.pythonhosted.org/packages/bb/a4/69719daef2f3d7f1819de60c9a6be981b8eeead7542d5ec4440f3c80e111/charset_normalizer-3.4.4-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:1bee1e43c28aa63cb16e5c14e582580546b08e535299b8b6158a7c9c768a1f3d", size = 149029, upload-time = "2025-10-14T04:42:12.38Z" }, - { url = "https://files.pythonhosted.org/packages/e6/21/8d4e1d6c1e6070d3672908b8e4533a71b5b53e71d16828cc24d0efec564c/charset_normalizer-3.4.4-cp39-cp39-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:fd44c878ea55ba351104cb93cc85e74916eb8fa440ca7903e57575e97394f608", size = 144580, upload-time = "2025-10-14T04:42:13.549Z" }, - { url = "https://files.pythonhosted.org/packages/a7/0a/a616d001b3f25647a9068e0b9199f697ce507ec898cacb06a0d5a1617c99/charset_normalizer-3.4.4-cp39-cp39-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:0f04b14ffe5fdc8c4933862d8306109a2c51e0704acfa35d51598eb45a1e89fc", size = 162340, upload-time = "2025-10-14T04:42:14.892Z" }, - { url = "https://files.pythonhosted.org/packages/85/93/060b52deb249a5450460e0585c88a904a83aec474ab8e7aba787f45e79f2/charset_normalizer-3.4.4-cp39-cp39-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:cd09d08005f958f370f539f186d10aec3377d55b9eeb0d796025d4886119d76e", size = 159619, upload-time = "2025-10-14T04:42:16.676Z" }, - { url = "https://files.pythonhosted.org/packages/dd/21/0274deb1cc0632cd587a9a0ec6b4674d9108e461cb4cd40d457adaeb0564/charset_normalizer-3.4.4-cp39-cp39-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:4fe7859a4e3e8457458e2ff592f15ccb02f3da787fcd31e0183879c3ad4692a1", size = 153980, upload-time = "2025-10-14T04:42:17.917Z" }, - { url = "https://files.pythonhosted.org/packages/28/2b/e3d7d982858dccc11b31906976323d790dded2017a0572f093ff982d692f/charset_normalizer-3.4.4-cp39-cp39-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:fa09f53c465e532f4d3db095e0c55b615f010ad81803d383195b6b5ca6cbf5f3", size = 152174, upload-time = "2025-10-14T04:42:19.018Z" }, - { url = "https://files.pythonhosted.org/packages/6e/ff/4a269f8e35f1e58b2df52c131a1fa019acb7ef3f8697b7d464b07e9b492d/charset_normalizer-3.4.4-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:7fa17817dc5625de8a027cb8b26d9fefa3ea28c8253929b8d6649e705d2835b6", size = 151666, upload-time = "2025-10-14T04:42:20.171Z" }, - { url = "https://files.pythonhosted.org/packages/da/c9/ec39870f0b330d58486001dd8e532c6b9a905f5765f58a6f8204926b4a93/charset_normalizer-3.4.4-cp39-cp39-musllinux_1_2_armv7l.whl", hash = "sha256:5947809c8a2417be3267efc979c47d76a079758166f7d43ef5ae8e9f92751f88", size = 145550, upload-time = "2025-10-14T04:42:21.324Z" }, - { url = "https://files.pythonhosted.org/packages/75/8f/d186ab99e40e0ed9f82f033d6e49001701c81244d01905dd4a6924191a30/charset_normalizer-3.4.4-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:4902828217069c3c5c71094537a8e623f5d097858ac6ca8252f7b4d10b7560f1", size = 163721, upload-time = "2025-10-14T04:42:22.46Z" }, - { url = "https://files.pythonhosted.org/packages/96/b1/6047663b9744df26a7e479ac1e77af7134b1fcf9026243bb48ee2d18810f/charset_normalizer-3.4.4-cp39-cp39-musllinux_1_2_riscv64.whl", hash = "sha256:7c308f7e26e4363d79df40ca5b2be1c6ba9f02bdbccfed5abddb7859a6ce72cf", size = 152127, upload-time = "2025-10-14T04:42:23.712Z" }, - { url = "https://files.pythonhosted.org/packages/59/78/e5a6eac9179f24f704d1be67d08704c3c6ab9f00963963524be27c18ed87/charset_normalizer-3.4.4-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:2c9d3c380143a1fedbff95a312aa798578371eb29da42106a29019368a475318", size = 161175, upload-time = "2025-10-14T04:42:24.87Z" }, - { url = "https://files.pythonhosted.org/packages/e5/43/0e626e42d54dd2f8dd6fc5e1c5ff00f05fbca17cb699bedead2cae69c62f/charset_normalizer-3.4.4-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:cb01158d8b88ee68f15949894ccc6712278243d95f344770fa7593fa2d94410c", size = 155375, upload-time = "2025-10-14T04:42:27.246Z" }, - { url = "https://files.pythonhosted.org/packages/e9/91/d9615bf2e06f35e4997616ff31248c3657ed649c5ab9d35ea12fce54e380/charset_normalizer-3.4.4-cp39-cp39-win32.whl", hash = "sha256:2677acec1a2f8ef614c6888b5b4ae4060cc184174a938ed4e8ef690e15d3e505", size = 99692, upload-time = "2025-10-14T04:42:28.425Z" }, - { url = "https://files.pythonhosted.org/packages/d1/a9/6c040053909d9d1ef4fcab45fddec083aedc9052c10078339b47c8573ea8/charset_normalizer-3.4.4-cp39-cp39-win_amd64.whl", hash = "sha256:f8e160feb2aed042cd657a72acc0b481212ed28b1b9a95c0cee1621b524e1966", size = 107192, upload-time = "2025-10-14T04:42:29.482Z" }, - { url = "https://files.pythonhosted.org/packages/f0/c6/4fa536b2c0cd3edfb7ccf8469fa0f363ea67b7213a842b90909ca33dd851/charset_normalizer-3.4.4-cp39-cp39-win_arm64.whl", hash = "sha256:b5d84d37db046c5ca74ee7bb47dd6cbc13f80665fdde3e8040bdd3fb015ecb50", size = 100220, upload-time = "2025-10-14T04:42:30.632Z" }, { url = "https://files.pythonhosted.org/packages/0a/4c/925909008ed5a988ccbb72dcc897407e5d6d3bd72410d69e051fc0c14647/charset_normalizer-3.4.4-py3-none-any.whl", hash = "sha256:7a32c560861a02ff789ad905a2fe94e3f840803362c84fecf1851cb4cf3dc37f", size = 53402, upload-time = "2025-10-14T04:42:31.76Z" }, ] @@ -286,7 +257,7 @@ name = "click" version = "8.3.1" source = { registry = "https://pypi.org/simple" } dependencies = [ - { name = "colorama", marker = "python_full_version >= '3.10' and sys_platform == 'win32'" }, + { name = "colorama", marker = "sys_platform == 'win32'" }, ] sdist = { url = "https://files.pythonhosted.org/packages/3d/fa/656b739db8587d7b5dfa22e22ed02566950fbfbcdc20311993483657a5c0/click-8.3.1.tar.gz", hash = "sha256:12ff4785d337a1bb490bb7e9c2b1ee5da3112e94a8622f26a6c77f5d2fc6842a", size = 295065, upload-time = "2025-11-15T20:45:42.706Z" } wheels = [ @@ -423,32 +394,12 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/c4/ab/09169d5a4612a5f92490806649ac8d41e3ec9129c636754575b3553f4ea4/googleapis_common_protos-1.72.0-py3-none-any.whl", hash = "sha256:4299c5a82d5ae1a9702ada957347726b167f9f8d1fc352477702a1e851ff4038", size = 297515, upload-time = "2025-11-06T18:29:13.14Z" }, ] -[[package]] -name = "griffe" -version = "1.14.0" -source = { registry = "https://pypi.org/simple" } -resolution-markers = [ - "python_full_version < '3.10'", -] -dependencies = [ - { name = "colorama", marker = "python_full_version < '3.10'" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/ec/d7/6c09dd7ce4c7837e4cdb11dce980cb45ae3cd87677298dc3b781b6bce7d3/griffe-1.14.0.tar.gz", hash = "sha256:9d2a15c1eca966d68e00517de5d69dd1bc5c9f2335ef6c1775362ba5b8651a13", size = 424684, upload-time = "2025-09-05T15:02:29.167Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/2a/b1/9ff6578d789a89812ff21e4e0f80ffae20a65d5dd84e7a17873fe3b365be/griffe-1.14.0-py3-none-any.whl", hash = "sha256:0e9d52832cccf0f7188cfe585ba962d2674b241c01916d780925df34873bceb0", size = 144439, upload-time = "2025-09-05T15:02:27.511Z" }, -] - [[package]] name = "griffe" version = "1.15.0" source = { registry = "https://pypi.org/simple" } -resolution-markers = [ - "python_full_version >= '3.12'", - "python_full_version == '3.11.*'", - "python_full_version == '3.10.*'", -] dependencies = [ - { name = "colorama", marker = "python_full_version >= '3.10'" }, + { name = "colorama" }, ] sdist = { url = "https://files.pythonhosted.org/packages/0d/0c/3a471b6e31951dce2360477420d0a8d1e00dea6cf33b70f3e8c3ab6e28e1/griffe-1.15.0.tar.gz", hash = "sha256:7726e3afd6f298fbc3696e67958803e7ac843c1cfe59734b6251a40cdbfb5eea", size = 424112, upload-time = "2025-11-10T15:03:15.52Z" } wheels = [ @@ -522,27 +473,10 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/fa/5e/f8e9a1d23b9c20a551a8a02ea3637b4642e22c2626e3a13a9a29cdea99eb/importlib_metadata-8.7.1-py3-none-any.whl", hash = "sha256:5a1f80bf1daa489495071efbb095d75a634cf28a8bc299581244063b53176151", size = 27865, upload-time = "2025-12-21T10:00:18.329Z" }, ] -[[package]] -name = "iniconfig" -version = "2.1.0" -source = { registry = "https://pypi.org/simple" } -resolution-markers = [ - "python_full_version < '3.10'", -] -sdist = { url = "https://files.pythonhosted.org/packages/f2/97/ebf4da567aa6827c909642694d71c9fcf53e5b504f2d96afea02718862f3/iniconfig-2.1.0.tar.gz", hash = "sha256:3abbd2e30b36733fee78f9c7f7308f2d0050e88f0087fd25c2645f63c773e1c7", size = 4793, upload-time = "2025-03-19T20:09:59.721Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/2c/e1/e6716421ea10d38022b952c159d5161ca1193197fb744506875fbb87ea7b/iniconfig-2.1.0-py3-none-any.whl", hash = "sha256:9deba5723312380e77435581c6bf4935c94cbfab9b1ed33ef8d238ea168eb760", size = 6050, upload-time = "2025-03-19T20:10:01.071Z" }, -] - [[package]] name = "iniconfig" version = "2.3.0" source = { registry = "https://pypi.org/simple" } -resolution-markers = [ - "python_full_version >= '3.12'", - "python_full_version == '3.11.*'", - "python_full_version == '3.10.*'", -] sdist = { url = "https://files.pythonhosted.org/packages/72/34/14ca021ce8e5dfedc35312d08ba8bf51fdd999c576889fc2c24cb97f4f10/iniconfig-2.3.0.tar.gz", hash = "sha256:c76315c77db068650d49c5b56314774a7804df16fee4402c1f19d6d15d8c4730", size = 20503, upload-time = "2025-10-18T21:55:43.219Z" } wheels = [ { url = "https://files.pythonhosted.org/packages/cb/b1/3846dd7f199d53cb17f49cba7e651e9ce294d8497c8c150530ed11865bb8/iniconfig-2.3.0-py3-none-any.whl", hash = "sha256:f631c04d2c48c52b84d0d0549c99ff3859c98df65b3101406327ecc7d53fbf12", size = 7484, upload-time = "2025-10-18T21:55:41.639Z" }, @@ -571,10 +505,10 @@ name = "jsonschema" version = "4.25.1" source = { registry = "https://pypi.org/simple" } dependencies = [ - { name = "attrs", marker = "python_full_version >= '3.10'" }, - { name = "jsonschema-specifications", marker = "python_full_version >= '3.10'" }, - { name = "referencing", marker = "python_full_version >= '3.10'" }, - { name = "rpds-py", marker = "python_full_version >= '3.10'" }, + { name = "attrs" }, + { name = "jsonschema-specifications" }, + { name = "referencing" }, + { name = "rpds-py" }, ] sdist = { url = "https://files.pythonhosted.org/packages/74/69/f7185de793a29082a9f3c7728268ffb31cb5095131a9c139a74078e27336/jsonschema-4.25.1.tar.gz", hash = "sha256:e4a9655ce0da0c0b67a085847e00a3a51449e1157f4f75e9fb5aa545e122eb85", size = 357342, upload-time = "2025-08-18T17:03:50.038Z" } wheels = [ @@ -586,7 +520,7 @@ name = "jsonschema-specifications" version = "2025.9.1" source = { registry = "https://pypi.org/simple" } dependencies = [ - { name = "referencing", marker = "python_full_version >= '3.10'" }, + { name = "referencing" }, ] sdist = { url = "https://files.pythonhosted.org/packages/19/74/a633ee74eb36c44aa6d1095e7cc5569bebf04342ee146178e2d36600708b/jsonschema_specifications-2025.9.1.tar.gz", hash = "sha256:b540987f239e745613c7a9176f3edb72b832a4ac465cf02712288397832b5e8d", size = 32855, upload-time = "2025-09-08T01:34:59.186Z" } wheels = [ @@ -607,20 +541,20 @@ name = "mcp" version = "1.25.0" source = { registry = "https://pypi.org/simple" } dependencies = [ - { name = "anyio", marker = "python_full_version >= '3.10'" }, - { name = "httpx", marker = "python_full_version >= '3.10'" }, - { name = "httpx-sse", marker = "python_full_version >= '3.10'" }, - { name = "jsonschema", marker = "python_full_version >= '3.10'" }, - { name = "pydantic", marker = "python_full_version >= '3.10'" }, - { name = "pydantic-settings", marker = "python_full_version >= '3.10'" }, - { name = "pyjwt", extra = ["crypto"], marker = "python_full_version >= '3.10'" }, - { name = "python-multipart", marker = "python_full_version >= '3.10'" }, - { name = "pywin32", marker = "python_full_version >= '3.10' and sys_platform == 'win32'" }, - { name = "sse-starlette", marker = "python_full_version >= '3.10'" }, - { name = "starlette", marker = "python_full_version >= '3.10'" }, - { name = "typing-extensions", marker = "python_full_version >= '3.10'" }, - { name = "typing-inspection", marker = "python_full_version >= '3.10'" }, - { name = "uvicorn", marker = "python_full_version >= '3.10' and sys_platform != 'emscripten'" }, + { name = "anyio" }, + { name = "httpx" }, + { name = "httpx-sse" }, + { name = "jsonschema" }, + { name = "pydantic" }, + { name = "pydantic-settings" }, + { name = "pyjwt", extra = ["crypto"] }, + { name = "python-multipart" }, + { name = "pywin32", marker = "sys_platform == 'win32'" }, + { name = "sse-starlette" }, + { name = "starlette" }, + { name = "typing-extensions" }, + { name = "typing-inspection" }, + { name = "uvicorn", marker = "sys_platform != 'emscripten'" }, ] sdist = { url = "https://files.pythonhosted.org/packages/d5/2d/649d80a0ecf6a1f82632ca44bec21c0461a9d9fc8934d38cb5b319f2db5e/mcp-1.25.0.tar.gz", hash = "sha256:56310361ebf0364e2d438e5b45f7668cbb124e158bb358333cd06e49e83a6802", size = 605387, upload-time = "2025-12-19T10:19:56.985Z" } wheels = [ @@ -648,9 +582,8 @@ dependencies = [ [package.optional-dependencies] agents = [ { name = "authlib" }, - { name = "griffe", version = "1.14.0", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.10'" }, - { name = "griffe", version = "1.15.0", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.10'" }, - { name = "mcp", marker = "python_full_version >= '3.10'" }, + { name = "griffe" }, + { name = "mcp" }, ] gcp = [ { name = "google-auth" }, @@ -660,9 +593,8 @@ gcp = [ [package.dev-dependencies] dev = [ { name = "authlib" }, - { name = "griffe", version = "1.14.0", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.10'" }, - { name = "griffe", version = "1.15.0", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.10'" }, - { name = "mcp", marker = "python_full_version >= '3.10'" }, + { name = "griffe" }, + { name = "mcp" }, { name = "mypy" }, { name = "pylint" }, { name = "pytest" }, @@ -685,7 +617,7 @@ requires-dist = [ { name = "griffe", marker = "extra == 'agents'", specifier = ">=1.7.3,<2.0" }, { name = "httpx", specifier = ">=0.28.1" }, { name = "invoke", specifier = ">=2.2.0,<3.0.0" }, - { name = "mcp", marker = "python_full_version >= '3.10' and extra == 'agents'", specifier = ">=1.0,<2.0" }, + { name = "mcp", marker = "extra == 'agents'", specifier = ">=1.0,<2.0" }, { name = "opentelemetry-api", specifier = ">=1.33.1,<2.0.0" }, { name = "opentelemetry-exporter-otlp-proto-http", specifier = ">=1.37.0,<2.0.0" }, { name = "opentelemetry-sdk", specifier = ">=1.33.1,<2.0.0" }, @@ -702,7 +634,7 @@ provides-extras = ["gcp", "agents"] dev = [ { name = "authlib", specifier = ">=1.5.2,<2" }, { name = "griffe", specifier = ">=1.7.3,<2" }, - { name = "mcp", marker = "python_full_version >= '3.10'", specifier = ">=1.0,<2" }, + { name = "mcp", specifier = ">=1.0,<2" }, { name = "mypy", specifier = "==1.15.0" }, { name = "pylint", specifier = "==3.2.3" }, { name = "pytest", specifier = ">=8.2.2,<9" }, @@ -752,12 +684,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/d2/8b/801aa06445d2de3895f59e476f38f3f8d610ef5d6908245f07d002676cbf/mypy-1.15.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c43a7682e24b4f576d93072216bf56eeff70d9140241f9edec0c104d0c515036", size = 12402541, upload-time = "2025-02-05T03:49:57.623Z" }, { url = "https://files.pythonhosted.org/packages/c7/67/5a4268782eb77344cc613a4cf23540928e41f018a9a1ec4c6882baf20ab8/mypy-1.15.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:baefc32840a9f00babd83251560e0ae1573e2f9d1b067719479bfb0e987c6357", size = 12494348, upload-time = "2025-02-05T03:48:52.361Z" }, { url = "https://files.pythonhosted.org/packages/83/3e/57bb447f7bbbfaabf1712d96f9df142624a386d98fb026a761532526057e/mypy-1.15.0-cp313-cp313-win_amd64.whl", hash = "sha256:b9378e2c00146c44793c98b8d5a61039a048e31f429fb0eb546d93f4b000bedf", size = 9373648, upload-time = "2025-02-05T03:49:11.395Z" }, - { url = "https://files.pythonhosted.org/packages/5a/fa/79cf41a55b682794abe71372151dbbf856e3008f6767057229e6649d294a/mypy-1.15.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:e601a7fa172c2131bff456bb3ee08a88360760d0d2f8cbd7a75a65497e2df078", size = 10737129, upload-time = "2025-02-05T03:50:24.509Z" }, - { url = "https://files.pythonhosted.org/packages/d3/33/dd8feb2597d648de29e3da0a8bf4e1afbda472964d2a4a0052203a6f3594/mypy-1.15.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:712e962a6357634fef20412699a3655c610110e01cdaa6180acec7fc9f8513ba", size = 9856335, upload-time = "2025-02-05T03:49:36.398Z" }, - { url = "https://files.pythonhosted.org/packages/e4/b5/74508959c1b06b96674b364ffeb7ae5802646b32929b7701fc6b18447592/mypy-1.15.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:f95579473af29ab73a10bada2f9722856792a36ec5af5399b653aa28360290a5", size = 11611935, upload-time = "2025-02-05T03:49:14.154Z" }, - { url = "https://files.pythonhosted.org/packages/6c/53/da61b9d9973efcd6507183fdad96606996191657fe79701b2c818714d573/mypy-1.15.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:8f8722560a14cde92fdb1e31597760dc35f9f5524cce17836c0d22841830fd5b", size = 12365827, upload-time = "2025-02-05T03:48:59.458Z" }, - { url = "https://files.pythonhosted.org/packages/c1/72/965bd9ee89540c79a25778cc080c7e6ef40aa1eeac4d52cec7eae6eb5228/mypy-1.15.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:1fbb8da62dc352133d7d7ca90ed2fb0e9d42bb1a32724c287d3c76c58cbaa9c2", size = 12541924, upload-time = "2025-02-05T03:50:03.12Z" }, - { url = "https://files.pythonhosted.org/packages/46/d0/f41645c2eb263e6c77ada7d76f894c580c9ddb20d77f0c24d34273a4dab2/mypy-1.15.0-cp39-cp39-win_amd64.whl", hash = "sha256:d10d994b41fb3497719bbf866f227b3489048ea4bbbb5015357db306249f7980", size = 9271176, upload-time = "2025-02-05T03:50:10.86Z" }, { url = "https://files.pythonhosted.org/packages/09/4e/a7d65c7322c510de2c409ff3828b03354a7c43f5a8ed458a7a131b41c7b9/mypy-1.15.0-py3-none-any.whl", hash = "sha256:5469affef548bd1895d86d3bf10ce2b44e33d86923c29e4d675b3e323437ea3e", size = 2221777, upload-time = "2025-02-05T03:50:08.348Z" }, ] @@ -870,27 +796,10 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/20/12/38679034af332785aac8774540895e234f4d07f7545804097de4b666afd8/packaging-25.0-py3-none-any.whl", hash = "sha256:29572ef2b1f17581046b3a2227d5c611fb25ec70ca1ba8554b24b0e69331a484", size = 66469, upload-time = "2025-04-19T11:48:57.875Z" }, ] -[[package]] -name = "platformdirs" -version = "4.4.0" -source = { registry = "https://pypi.org/simple" } -resolution-markers = [ - "python_full_version < '3.10'", -] -sdist = { url = "https://files.pythonhosted.org/packages/23/e8/21db9c9987b0e728855bd57bff6984f67952bea55d6f75e055c46b5383e8/platformdirs-4.4.0.tar.gz", hash = "sha256:ca753cf4d81dc309bc67b0ea38fd15dc97bc30ce419a7f58d13eb3bf14c4febf", size = 21634, upload-time = "2025-08-26T14:32:04.268Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/40/4b/2028861e724d3bd36227adfa20d3fd24c3fc6d52032f4a93c133be5d17ce/platformdirs-4.4.0-py3-none-any.whl", hash = "sha256:abd01743f24e5287cd7a5db3752faf1a2d65353f38ec26d98e25a6db65958c85", size = 18654, upload-time = "2025-08-26T14:32:02.735Z" }, -] - [[package]] name = "platformdirs" version = "4.5.1" source = { registry = "https://pypi.org/simple" } -resolution-markers = [ - "python_full_version >= '3.12'", - "python_full_version == '3.11.*'", - "python_full_version == '3.10.*'", -] sdist = { url = "https://files.pythonhosted.org/packages/cf/86/0248f086a84f01b37aaec0fa567b397df1a119f73c16f6c7a9aac73ea309/platformdirs-4.5.1.tar.gz", hash = "sha256:61d5cdcc6065745cdd94f0f878977f8de9437be93de97c1c12f853c9c0cdcbda", size = 21715, upload-time = "2025-12-05T13:52:58.638Z" } wheels = [ { url = "https://files.pythonhosted.org/packages/cb/28/3bfe2fa5a7b9c46fe7e13c97bda14c895fb10fa2ebf1d0abb90e0cea7ee1/platformdirs-4.5.1-py3-none-any.whl", hash = "sha256:d03afa3963c806a9bed9d5125c8f4cb2fdaf74a55ab60e5d59b3fde758104d31", size = 18731, upload-time = "2025-12-05T13:52:56.823Z" }, @@ -917,8 +826,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/7d/4f/f743761e41d3b2b2566748eb76bbff2b43e14d5fcab694f494a16458b05f/protobuf-6.33.2-cp39-abi3-manylinux2014_aarch64.whl", hash = "sha256:b5d3b5625192214066d99b2b605f5783483575656784de223f00a8d00754fc0e", size = 324460, upload-time = "2025-12-06T00:17:45.678Z" }, { url = "https://files.pythonhosted.org/packages/b1/fa/26468d00a92824020f6f2090d827078c09c9c587e34cbfd2d0c7911221f8/protobuf-6.33.2-cp39-abi3-manylinux2014_s390x.whl", hash = "sha256:8cd7640aee0b7828b6d03ae518b5b4806fdfc1afe8de82f79c3454f8aef29872", size = 339168, upload-time = "2025-12-06T00:17:46.813Z" }, { url = "https://files.pythonhosted.org/packages/56/13/333b8f421738f149d4fe5e49553bc2a2ab75235486259f689b4b91f96cec/protobuf-6.33.2-cp39-abi3-manylinux2014_x86_64.whl", hash = "sha256:1f8017c48c07ec5859106533b682260ba3d7c5567b1ca1f24297ce03384d1b4f", size = 323270, upload-time = "2025-12-06T00:17:48.253Z" }, - { url = "https://files.pythonhosted.org/packages/87/85/5c1115e68fd34e8ada6fa75974b4c778a298a3c7170575b49efc1eb99dd2/protobuf-6.33.2-cp39-cp39-win32.whl", hash = "sha256:7109dcc38a680d033ffb8bf896727423528db9163be1b6a02d6a49606dcadbfe", size = 425692, upload-time = "2025-12-06T00:17:49.62Z" }, - { url = "https://files.pythonhosted.org/packages/c5/74/18d9de7fd3c41a8b4808d6268515b320abae003423da1b1319f71bdf0779/protobuf-6.33.2-cp39-cp39-win_amd64.whl", hash = "sha256:2981c58f582f44b6b13173e12bb8656711189c2a70250845f264b877f00b1913", size = 436932, upload-time = "2025-12-06T00:17:51.098Z" }, { url = "https://files.pythonhosted.org/packages/0e/15/4f02896cc3df04fc465010a4c6a0cd89810f54617a32a70ef531ed75d61c/protobuf-6.33.2-py3-none-any.whl", hash = "sha256:7636aad9bb01768870266de5dc009de2d1b936771b38a793f73cbbf279c91c5c", size = 170501, upload-time = "2025-12-06T00:17:52.211Z" }, ] @@ -1059,19 +966,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/5c/96/5fb7d8c3c17bc8c62fdb031c47d77a1af698f1d7a406b0f79aaa1338f9ad/pydantic_core-2.41.5-cp314-cp314t-win32.whl", hash = "sha256:b4ececa40ac28afa90871c2cc2b9ffd2ff0bf749380fbdf57d165fd23da353aa", size = 1988906, upload-time = "2025-11-04T13:41:56.606Z" }, { url = "https://files.pythonhosted.org/packages/22/ed/182129d83032702912c2e2d8bbe33c036f342cc735737064668585dac28f/pydantic_core-2.41.5-cp314-cp314t-win_amd64.whl", hash = "sha256:80aa89cad80b32a912a65332f64a4450ed00966111b6615ca6816153d3585a8c", size = 1981607, upload-time = "2025-11-04T13:41:58.889Z" }, { url = "https://files.pythonhosted.org/packages/9f/ed/068e41660b832bb0b1aa5b58011dea2a3fe0ba7861ff38c4d4904c1c1a99/pydantic_core-2.41.5-cp314-cp314t-win_arm64.whl", hash = "sha256:35b44f37a3199f771c3eaa53051bc8a70cd7b54f333531c59e29fd4db5d15008", size = 1974769, upload-time = "2025-11-04T13:42:01.186Z" }, - { url = "https://files.pythonhosted.org/packages/54/db/160dffb57ed9a3705c4cbcbff0ac03bdae45f1ca7d58ab74645550df3fbd/pydantic_core-2.41.5-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:8bfeaf8735be79f225f3fefab7f941c712aaca36f1128c9d7e2352ee1aa87bdf", size = 2107999, upload-time = "2025-11-04T13:42:03.885Z" }, - { url = "https://files.pythonhosted.org/packages/a3/7d/88e7de946f60d9263cc84819f32513520b85c0f8322f9b8f6e4afc938383/pydantic_core-2.41.5-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:346285d28e4c8017da95144c7f3acd42740d637ff41946af5ce6e5e420502dd5", size = 1929745, upload-time = "2025-11-04T13:42:06.075Z" }, - { url = "https://files.pythonhosted.org/packages/d5/c2/aef51e5b283780e85e99ff19db0f05842d2d4a8a8cd15e63b0280029b08f/pydantic_core-2.41.5-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a75dafbf87d6276ddc5b2bf6fae5254e3d0876b626eb24969a574fff9149ee5d", size = 1920220, upload-time = "2025-11-04T13:42:08.457Z" }, - { url = "https://files.pythonhosted.org/packages/c7/97/492ab10f9ac8695cd76b2fdb24e9e61f394051df71594e9bcc891c9f586e/pydantic_core-2.41.5-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:7b93a4d08587e2b7e7882de461e82b6ed76d9026ce91ca7915e740ecc7855f60", size = 2067296, upload-time = "2025-11-04T13:42:10.817Z" }, - { url = "https://files.pythonhosted.org/packages/ec/23/984149650e5269c59a2a4c41d234a9570adc68ab29981825cfaf4cfad8f4/pydantic_core-2.41.5-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e8465ab91a4bd96d36dde3263f06caa6a8a6019e4113f24dc753d79a8b3a3f82", size = 2231548, upload-time = "2025-11-04T13:42:13.843Z" }, - { url = "https://files.pythonhosted.org/packages/71/0c/85bcbb885b9732c28bec67a222dbed5ed2d77baee1f8bba2002e8cd00c5c/pydantic_core-2.41.5-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:299e0a22e7ae2b85c1a57f104538b2656e8ab1873511fd718a1c1c6f149b77b5", size = 2362571, upload-time = "2025-11-04T13:42:16.208Z" }, - { url = "https://files.pythonhosted.org/packages/c0/4a/412d2048be12c334003e9b823a3fa3d038e46cc2d64dd8aab50b31b65499/pydantic_core-2.41.5-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:707625ef0983fcfb461acfaf14de2067c5942c6bb0f3b4c99158bed6fedd3cf3", size = 2068175, upload-time = "2025-11-04T13:42:18.911Z" }, - { url = "https://files.pythonhosted.org/packages/73/f4/c58b6a776b502d0a5540ad02e232514285513572060f0d78f7832ca3c98b/pydantic_core-2.41.5-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:f41eb9797986d6ebac5e8edff36d5cef9de40def462311b3eb3eeded1431e425", size = 2177203, upload-time = "2025-11-04T13:42:22.578Z" }, - { url = "https://files.pythonhosted.org/packages/ed/ae/f06ea4c7e7a9eead3d165e7623cd2ea0cb788e277e4f935af63fc98fa4e6/pydantic_core-2.41.5-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:0384e2e1021894b1ff5a786dbf94771e2986ebe2869533874d7e43bc79c6f504", size = 2148191, upload-time = "2025-11-04T13:42:24.89Z" }, - { url = "https://files.pythonhosted.org/packages/c1/57/25a11dcdc656bf5f8b05902c3c2934ac3ea296257cc4a3f79a6319e61856/pydantic_core-2.41.5-cp39-cp39-musllinux_1_1_armv7l.whl", hash = "sha256:f0cd744688278965817fd0839c4a4116add48d23890d468bc436f78beb28abf5", size = 2343907, upload-time = "2025-11-04T13:42:27.683Z" }, - { url = "https://files.pythonhosted.org/packages/96/82/e33d5f4933d7a03327c0c43c65d575e5919d4974ffc026bc917a5f7b9f61/pydantic_core-2.41.5-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:753e230374206729bf0a807954bcc6c150d3743928a73faffee51ac6557a03c3", size = 2322174, upload-time = "2025-11-04T13:42:30.776Z" }, - { url = "https://files.pythonhosted.org/packages/81/45/4091be67ce9f469e81656f880f3506f6a5624121ec5eb3eab37d7581897d/pydantic_core-2.41.5-cp39-cp39-win32.whl", hash = "sha256:873e0d5b4fb9b89ef7c2d2a963ea7d02879d9da0da8d9d4933dee8ee86a8b460", size = 1990353, upload-time = "2025-11-04T13:42:33.111Z" }, - { url = "https://files.pythonhosted.org/packages/44/8a/a98aede18db6e9cd5d66bcacd8a409fcf8134204cdede2e7de35c5a2c5ef/pydantic_core-2.41.5-cp39-cp39-win_amd64.whl", hash = "sha256:e4f4a984405e91527a0d62649ee21138f8e3d0ef103be488c1dc11a80d7f184b", size = 2015698, upload-time = "2025-11-04T13:42:35.484Z" }, { url = "https://files.pythonhosted.org/packages/11/72/90fda5ee3b97e51c494938a4a44c3a35a9c96c19bba12372fb9c634d6f57/pydantic_core-2.41.5-graalpy311-graalpy242_311_native-macosx_10_12_x86_64.whl", hash = "sha256:b96d5f26b05d03cc60f11a7761a5ded1741da411e7fe0909e27a5e6a0cb7b034", size = 2115441, upload-time = "2025-11-04T13:42:39.557Z" }, { url = "https://files.pythonhosted.org/packages/1f/53/8942f884fa33f50794f119012dc6a1a02ac43a56407adaac20463df8e98f/pydantic_core-2.41.5-graalpy311-graalpy242_311_native-macosx_11_0_arm64.whl", hash = "sha256:634e8609e89ceecea15e2d61bc9ac3718caaaa71963717bf3c8f38bfde64242c", size = 1930291, upload-time = "2025-11-04T13:42:42.169Z" }, { url = "https://files.pythonhosted.org/packages/79/c8/ecb9ed9cd942bce09fc888ee960b52654fbdbede4ba6c2d6e0d3b1d8b49c/pydantic_core-2.41.5-graalpy311-graalpy242_311_native-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:93e8740d7503eb008aa2df04d3b9735f845d43ae845e6dcd2be0b55a2da43cd2", size = 1948632, upload-time = "2025-11-04T13:42:44.564Z" }, @@ -1103,9 +997,9 @@ name = "pydantic-settings" version = "2.12.0" source = { registry = "https://pypi.org/simple" } dependencies = [ - { name = "pydantic", marker = "python_full_version >= '3.10'" }, - { name = "python-dotenv", marker = "python_full_version >= '3.10'" }, - { name = "typing-inspection", marker = "python_full_version >= '3.10'" }, + { name = "pydantic" }, + { name = "python-dotenv" }, + { name = "typing-inspection" }, ] sdist = { url = "https://files.pythonhosted.org/packages/43/4b/ac7e0aae12027748076d72a8764ff1c9d82ca75a7a52622e67ed3f765c54/pydantic_settings-2.12.0.tar.gz", hash = "sha256:005538ef951e3c2a68e1c08b292b5f2e71490def8589d4221b95dab00dafcfd0", size = 194184, upload-time = "2025-11-10T14:25:47.013Z" } wheels = [ @@ -1132,7 +1026,7 @@ wheels = [ [package.optional-dependencies] crypto = [ - { name = "cryptography", marker = "python_full_version >= '3.10'" }, + { name = "cryptography" }, ] [[package]] @@ -1145,11 +1039,9 @@ dependencies = [ { name = "dill" }, { name = "isort" }, { name = "mccabe" }, - { name = "platformdirs", version = "4.4.0", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.10'" }, - { name = "platformdirs", version = "4.5.1", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.10'" }, + { name = "platformdirs" }, { name = "tomli", marker = "python_full_version < '3.11'" }, { name = "tomlkit" }, - { name = "typing-extensions", marker = "python_full_version < '3.10'" }, ] sdist = { url = "https://files.pythonhosted.org/packages/9a/e9/60280b14cc1012794120345ce378504cf17409e38cd88f455dc24e0ad6b5/pylint-3.2.3.tar.gz", hash = "sha256:02f6c562b215582386068d52a30f520d84fdbcf2a95fc7e855b816060d048b60", size = 1506739, upload-time = "2024-06-06T14:19:17.955Z" } wheels = [ @@ -1176,8 +1068,7 @@ source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "colorama", marker = "sys_platform == 'win32'" }, { name = "exceptiongroup", marker = "python_full_version < '3.11'" }, - { name = "iniconfig", version = "2.1.0", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.10'" }, - { name = "iniconfig", version = "2.3.0", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.10'" }, + { name = "iniconfig" }, { name = "packaging" }, { name = "pluggy" }, { name = "pygments" }, @@ -1250,9 +1141,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/c9/31/097f2e132c4f16d99a22bfb777e0fd88bd8e1c634304e102f313af69ace5/pywin32-311-cp314-cp314-win32.whl", hash = "sha256:b7a2c10b93f8986666d0c803ee19b5990885872a7de910fc460f9b0c2fbf92ee", size = 8840714, upload-time = "2025-07-14T20:13:32.449Z" }, { url = "https://files.pythonhosted.org/packages/90/4b/07c77d8ba0e01349358082713400435347df8426208171ce297da32c313d/pywin32-311-cp314-cp314-win_amd64.whl", hash = "sha256:3aca44c046bd2ed8c90de9cb8427f581c479e594e99b5c0bb19b29c10fd6cb87", size = 9656800, upload-time = "2025-07-14T20:13:34.312Z" }, { url = "https://files.pythonhosted.org/packages/c0/d2/21af5c535501a7233e734b8af901574572da66fcc254cb35d0609c9080dd/pywin32-311-cp314-cp314-win_arm64.whl", hash = "sha256:a508e2d9025764a8270f93111a970e1d0fbfc33f4153b388bb649b7eec4f9b42", size = 8932540, upload-time = "2025-07-14T20:13:36.379Z" }, - { url = "https://files.pythonhosted.org/packages/59/42/b86689aac0cdaee7ae1c58d464b0ff04ca909c19bb6502d4973cdd9f9544/pywin32-311-cp39-cp39-win32.whl", hash = "sha256:aba8f82d551a942cb20d4a83413ccbac30790b50efb89a75e4f586ac0bb8056b", size = 8760837, upload-time = "2025-07-14T20:12:59.59Z" }, - { url = "https://files.pythonhosted.org/packages/9f/8a/1403d0353f8c5a2f0829d2b1c4becbf9da2f0a4d040886404fc4a5431e4d/pywin32-311-cp39-cp39-win_amd64.whl", hash = "sha256:e0c4cfb0621281fe40387df582097fd796e80430597cb9944f0ae70447bacd91", size = 9590187, upload-time = "2025-07-14T20:13:01.419Z" }, - { url = "https://files.pythonhosted.org/packages/60/22/e0e8d802f124772cec9c75430b01a212f86f9de7546bda715e54140d5aeb/pywin32-311-cp39-cp39-win_arm64.whl", hash = "sha256:62ea666235135fee79bb154e695f3ff67370afefd71bd7fea7512fc70ef31e3d", size = 8778162, upload-time = "2025-07-14T20:13:03.544Z" }, ] [[package]] @@ -1317,15 +1205,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/da/92/1446574745d74df0c92e6aa4a7b0b3130706a4142b2d1a5869f2eaa423c6/pyyaml-6.0.3-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:16249ee61e95f858e83976573de0f5b2893b3677ba71c9dd36b9cf8be9ac6d65", size = 829923, upload-time = "2025-09-25T21:32:54.537Z" }, { url = "https://files.pythonhosted.org/packages/f0/7a/1c7270340330e575b92f397352af856a8c06f230aa3e76f86b39d01b416a/pyyaml-6.0.3-cp314-cp314t-win_amd64.whl", hash = "sha256:4ad1906908f2f5ae4e5a8ddfce73c320c2a1429ec52eafd27138b7f1cbe341c9", size = 174062, upload-time = "2025-09-25T21:32:55.767Z" }, { url = "https://files.pythonhosted.org/packages/f1/12/de94a39c2ef588c7e6455cfbe7343d3b2dc9d6b6b2f40c4c6565744c873d/pyyaml-6.0.3-cp314-cp314t-win_arm64.whl", hash = "sha256:ebc55a14a21cb14062aa4162f906cd962b28e2e9ea38f9b4391244cd8de4ae0b", size = 149341, upload-time = "2025-09-25T21:32:56.828Z" }, - { url = "https://files.pythonhosted.org/packages/9f/62/67fc8e68a75f738c9200422bf65693fb79a4cd0dc5b23310e5202e978090/pyyaml-6.0.3-cp39-cp39-macosx_10_13_x86_64.whl", hash = "sha256:b865addae83924361678b652338317d1bd7e79b1f4596f96b96c77a5a34b34da", size = 184450, upload-time = "2025-09-25T21:33:00.618Z" }, - { url = "https://files.pythonhosted.org/packages/ae/92/861f152ce87c452b11b9d0977952259aa7df792d71c1053365cc7b09cc08/pyyaml-6.0.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:c3355370a2c156cffb25e876646f149d5d68f5e0a3ce86a5084dd0b64a994917", size = 174319, upload-time = "2025-09-25T21:33:02.086Z" }, - { url = "https://files.pythonhosted.org/packages/d0/cd/f0cfc8c74f8a030017a2b9c771b7f47e5dd702c3e28e5b2071374bda2948/pyyaml-6.0.3-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:3c5677e12444c15717b902a5798264fa7909e41153cdf9ef7ad571b704a63dd9", size = 737631, upload-time = "2025-09-25T21:33:03.25Z" }, - { url = "https://files.pythonhosted.org/packages/ef/b2/18f2bd28cd2055a79a46c9b0895c0b3d987ce40ee471cecf58a1a0199805/pyyaml-6.0.3-cp39-cp39-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:5ed875a24292240029e4483f9d4a4b8a1ae08843b9c54f43fcc11e404532a8a5", size = 836795, upload-time = "2025-09-25T21:33:05.014Z" }, - { url = "https://files.pythonhosted.org/packages/73/b9/793686b2d54b531203c160ef12bec60228a0109c79bae6c1277961026770/pyyaml-6.0.3-cp39-cp39-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:0150219816b6a1fa26fb4699fb7daa9caf09eb1999f3b70fb6e786805e80375a", size = 750767, upload-time = "2025-09-25T21:33:06.398Z" }, - { url = "https://files.pythonhosted.org/packages/a9/86/a137b39a611def2ed78b0e66ce2fe13ee701a07c07aebe55c340ed2a050e/pyyaml-6.0.3-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:fa160448684b4e94d80416c0fa4aac48967a969efe22931448d853ada8baf926", size = 727982, upload-time = "2025-09-25T21:33:08.708Z" }, - { url = "https://files.pythonhosted.org/packages/dd/62/71c27c94f457cf4418ef8ccc71735324c549f7e3ea9d34aba50874563561/pyyaml-6.0.3-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:27c0abcb4a5dac13684a37f76e701e054692a9b2d3064b70f5e4eb54810553d7", size = 755677, upload-time = "2025-09-25T21:33:09.876Z" }, - { url = "https://files.pythonhosted.org/packages/29/3d/6f5e0d58bd924fb0d06c3a6bad00effbdae2de5adb5cda5648006ffbd8d3/pyyaml-6.0.3-cp39-cp39-win32.whl", hash = "sha256:1ebe39cb5fc479422b83de611d14e2c0d3bb2a18bbcb01f229ab3cfbd8fee7a0", size = 142592, upload-time = "2025-09-25T21:33:10.983Z" }, - { url = "https://files.pythonhosted.org/packages/f0/0c/25113e0b5e103d7f1490c0e947e303fe4a696c10b501dea7a9f49d4e876c/pyyaml-6.0.3-cp39-cp39-win_amd64.whl", hash = "sha256:2e71d11abed7344e42a8849600193d15b6def118602c4c176f748e4583246007", size = 158777, upload-time = "2025-09-25T21:33:15.55Z" }, ] [[package]] @@ -1333,9 +1212,9 @@ name = "referencing" version = "0.37.0" source = { registry = "https://pypi.org/simple" } dependencies = [ - { name = "attrs", marker = "python_full_version >= '3.10'" }, - { name = "rpds-py", marker = "python_full_version >= '3.10'" }, - { name = "typing-extensions", marker = "python_full_version >= '3.10' and python_full_version < '3.13'" }, + { name = "attrs" }, + { name = "rpds-py" }, + { name = "typing-extensions", marker = "python_full_version < '3.13'" }, ] sdist = { url = "https://files.pythonhosted.org/packages/22/f5/df4e9027acead3ecc63e50fe1e36aca1523e1719559c499951bb4b53188f/referencing-0.37.0.tar.gz", hash = "sha256:44aefc3142c5b842538163acb373e24cce6632bd54bdb01b21ad5863489f50d8", size = 78036, upload-time = "2025-10-13T15:30:48.871Z" } wheels = [ @@ -1530,8 +1409,8 @@ name = "sse-starlette" version = "3.1.2" source = { registry = "https://pypi.org/simple" } dependencies = [ - { name = "anyio", marker = "python_full_version >= '3.10'" }, - { name = "starlette", marker = "python_full_version >= '3.10'" }, + { name = "anyio" }, + { name = "starlette" }, ] sdist = { url = "https://files.pythonhosted.org/packages/da/34/f5df66cb383efdbf4f2db23cabb27f51b1dcb737efaf8a558f6f1d195134/sse_starlette-3.1.2.tar.gz", hash = "sha256:55eff034207a83a0eb86de9a68099bd0157838f0b8b999a1b742005c71e33618", size = 26303, upload-time = "2025-12-31T08:02:20.023Z" } wheels = [ @@ -1543,8 +1422,8 @@ name = "starlette" version = "0.50.0" source = { registry = "https://pypi.org/simple" } dependencies = [ - { name = "anyio", marker = "python_full_version >= '3.10'" }, - { name = "typing-extensions", marker = "python_full_version >= '3.10' and python_full_version < '3.13'" }, + { name = "anyio" }, + { name = "typing-extensions", marker = "python_full_version < '3.13'" }, ] sdist = { url = "https://files.pythonhosted.org/packages/ba/b8/73a0e6a6e079a9d9cfa64113d771e421640b6f679a52eeb9b32f72d871a1/starlette-0.50.0.tar.gz", hash = "sha256:a2a17b22203254bcbc2e1f926d2d55f3f9497f769416b3190768befe598fa3ca", size = 2646985, upload-time = "2025-11-01T15:25:27.516Z" } wheels = [ @@ -1674,9 +1553,9 @@ name = "uvicorn" version = "0.40.0" source = { registry = "https://pypi.org/simple" } dependencies = [ - { name = "click", marker = "python_full_version >= '3.10'" }, - { name = "h11", marker = "python_full_version >= '3.10'" }, - { name = "typing-extensions", marker = "python_full_version == '3.10.*'" }, + { name = "click" }, + { name = "h11" }, + { name = "typing-extensions", marker = "python_full_version < '3.11'" }, ] sdist = { url = "https://files.pythonhosted.org/packages/c3/d1/8f3c683c9561a4e6689dd3b1d345c815f10f86acd044ee1fb9a4dcd0b8c5/uvicorn-0.40.0.tar.gz", hash = "sha256:839676675e87e73694518b5574fd0f24c9d97b46bea16df7b8c05ea1a51071ea", size = 81761, upload-time = "2025-12-21T14:16:22.45Z" } wheels = [ From f77efc0fdd21392723e21fa11ed464525da0658b Mon Sep 17 00:00:00 2001 From: Jean-Malo Delignon <56539593+jean-malo@users.noreply.github.com> Date: Wed, 31 Dec 2025 14:28:31 +0100 Subject: [PATCH 167/223] chore: update speakeasy-api/sdk-generation-action reference (#305) Update the reference to the speakeasy-api/sdk-generation-action workflows to the latest version (a658ca0a4a9b11bbcd7d3fb4e3063fa843afabff) for all SDK generation workflows. This ensures the latest features and bug fixes are used during SDK generation. --- .github/workflows/sdk_generation_mistralai_azure_sdk.yaml | 2 +- .github/workflows/sdk_generation_mistralai_gcp_sdk.yaml | 2 +- .github/workflows/sdk_generation_mistralai_sdk.yaml | 2 +- .github/workflows/sdk_publish_mistralai_sdk.yaml | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/sdk_generation_mistralai_azure_sdk.yaml b/.github/workflows/sdk_generation_mistralai_azure_sdk.yaml index 2a510615..ed4e94cf 100644 --- a/.github/workflows/sdk_generation_mistralai_azure_sdk.yaml +++ b/.github/workflows/sdk_generation_mistralai_azure_sdk.yaml @@ -16,7 +16,7 @@ permissions: type: string jobs: generate: - uses: speakeasy-api/sdk-generation-action/.github/workflows/workflow-executor.yaml@5b268931ef6902bbb70fa01b467dc361c68f7617 # v15 + uses: speakeasy-api/sdk-generation-action/.github/workflows/workflow-executor.yaml@a658ca0a4a9b11bbcd7d3fb4e3063fa843afabff # v15 with: force: ${{ github.event.inputs.force }} mode: pr diff --git a/.github/workflows/sdk_generation_mistralai_gcp_sdk.yaml b/.github/workflows/sdk_generation_mistralai_gcp_sdk.yaml index f30440e8..4fefe244 100644 --- a/.github/workflows/sdk_generation_mistralai_gcp_sdk.yaml +++ b/.github/workflows/sdk_generation_mistralai_gcp_sdk.yaml @@ -16,7 +16,7 @@ permissions: type: string jobs: generate: - uses: speakeasy-api/sdk-generation-action/.github/workflows/workflow-executor.yaml@5b268931ef6902bbb70fa01b467dc361c68f7617 # v15 + uses: speakeasy-api/sdk-generation-action/.github/workflows/workflow-executor.yaml@a658ca0a4a9b11bbcd7d3fb4e3063fa843afabff # v15 with: force: ${{ github.event.inputs.force }} mode: pr diff --git a/.github/workflows/sdk_generation_mistralai_sdk.yaml b/.github/workflows/sdk_generation_mistralai_sdk.yaml index 77a630f7..b199798b 100644 --- a/.github/workflows/sdk_generation_mistralai_sdk.yaml +++ b/.github/workflows/sdk_generation_mistralai_sdk.yaml @@ -16,7 +16,7 @@ permissions: type: string jobs: generate: - uses: speakeasy-api/sdk-generation-action/.github/workflows/workflow-executor.yaml@5b268931ef6902bbb70fa01b467dc361c68f7617 # v15 + uses: speakeasy-api/sdk-generation-action/.github/workflows/workflow-executor.yaml@a658ca0a4a9b11bbcd7d3fb4e3063fa843afabff # v15 with: force: ${{ github.event.inputs.force }} mode: pr diff --git a/.github/workflows/sdk_publish_mistralai_sdk.yaml b/.github/workflows/sdk_publish_mistralai_sdk.yaml index e7928481..55b29ec1 100644 --- a/.github/workflows/sdk_publish_mistralai_sdk.yaml +++ b/.github/workflows/sdk_publish_mistralai_sdk.yaml @@ -13,7 +13,7 @@ permissions: - "*/RELEASES.md" jobs: publish: - uses: speakeasy-api/sdk-generation-action/.github/workflows/sdk-publish.yaml@5b268931ef6902bbb70fa01b467dc361c68f7617 # v15 + uses: speakeasy-api/sdk-generation-action/.github/workflows/sdk-publish.yaml@a658ca0a4a9b11bbcd7d3fb4e3063fa843afabff # v15 secrets: github_access_token: ${{ secrets.GITHUB_TOKEN }} pypi_token: ${{ secrets.PYPI_TOKEN }} From 2046ceff8e283a5b13d3dd1f8a84c2e7a5cbb289 Mon Sep 17 00:00:00 2001 From: Jean-Malo Delignon <56539593+jean-malo@users.noreply.github.com> Date: Wed, 31 Dec 2025 15:34:25 +0100 Subject: [PATCH 168/223] feat!: unify README handling across packages (#306) * feat!: unify README handling across packages This commit standardizes the way README files are handled during builds and publishes across all packages. The main changes include: 1. Removing the need for separate README-PYPI.md files by using a more flexible approach 2. Updating the prepare_readme.py script to handle relative links more robustly 3. Simplifying the build and publish workflows by removing redundant steps 4. Adding tests for the README preparation functionality The changes make the build process more consistent across different packages and reduce duplication in the workflow files. The new approach also provides better handling of relative links in README files during package publication. BREAKING CHANGE: The way README files are prepared for publication has changed, which may affect custom build processes that relied on the previous approach. * ignore * chore: add linting for scripts directory This commit adds mypy, pyright, and ruff linting checks for the scripts directory to ensure code quality and consistency across the project. The changes include: - Adding mypy checks for the scripts directory - Adding pyright checks for the scripts directory - Adding ruff checks for the scripts directory This helps maintain consistent code quality standards across all parts of the codebase. --- .genignore | 3 +- .github/workflows/lint_custom_code.yaml | 1 - .github/workflows/run_example_scripts.yaml | 1 - .github/workflows/test_custom_code.yaml | 4 +- .github/workflows/update_speakeasy.yaml | 1 - packages/mistralai_azure/pyproject.toml | 2 - packages/mistralai_azure/scripts/publish.sh | 2 +- packages/mistralai_gcp/pyproject.toml | 4 +- .../mistralai_gcp/scripts/prepare_readme.py | 9 -- packages/mistralai_gcp/scripts/publish.sh | 4 +- pyproject.toml | 3 +- scripts/lint_custom_code.sh | 6 + scripts/prepare_readme.py | 134 ++++++++++++++---- scripts/publish.sh | 4 +- tests/test_prepare_readme.py | 37 +++++ 15 files changed, 156 insertions(+), 59 deletions(-) delete mode 100644 packages/mistralai_gcp/scripts/prepare_readme.py create mode 100644 tests/test_prepare_readme.py diff --git a/.genignore b/.genignore index 3ef32897..b80cf0f6 100644 --- a/.genignore +++ b/.genignore @@ -2,4 +2,5 @@ pyproject.toml examples/* /utils/* src/mistral/extra/* -pylintrc \ No newline at end of file +pylintrc +scripts/prepare_readme.py diff --git a/.github/workflows/lint_custom_code.yaml b/.github/workflows/lint_custom_code.yaml index f6147b55..9dcb04e4 100644 --- a/.github/workflows/lint_custom_code.yaml +++ b/.github/workflows/lint_custom_code.yaml @@ -26,7 +26,6 @@ jobs: - name: Install dependencies run: | - touch README-PYPI.md uv sync --all-extras # The init, sdkhooks.py and types.py files in the _hooks folders are generated by Speakeasy hence the exclusion diff --git a/.github/workflows/run_example_scripts.yaml b/.github/workflows/run_example_scripts.yaml index 84896d26..cecefb0e 100644 --- a/.github/workflows/run_example_scripts.yaml +++ b/.github/workflows/run_example_scripts.yaml @@ -39,7 +39,6 @@ jobs: - name: Build the package run: | - touch README-PYPI.md # Create this file since the client is not built by Speakeasy uv build - name: Install client with extras and run all examples. diff --git a/.github/workflows/test_custom_code.yaml b/.github/workflows/test_custom_code.yaml index 8a22fcb1..9a53c1e5 100644 --- a/.github/workflows/test_custom_code.yaml +++ b/.github/workflows/test_custom_code.yaml @@ -27,8 +27,10 @@ jobs: - name: Install dependencies run: | - touch README-PYPI.md uv sync --all-extras - name: Run the 'src/mistralai/extra' package unit tests run: uv run python3.12 -m unittest discover -s src/mistralai/extra/tests -t src + + - name: Run pytest for repository tests + run: uv run pytest tests/ diff --git a/.github/workflows/update_speakeasy.yaml b/.github/workflows/update_speakeasy.yaml index f596cf66..9628bffa 100644 --- a/.github/workflows/update_speakeasy.yaml +++ b/.github/workflows/update_speakeasy.yaml @@ -38,7 +38,6 @@ jobs: - name: Install dependencies run: | - cp README.md README-PYPI.md uv sync --group dev --no-default-groups - name: Install Speakeasy CLI diff --git a/packages/mistralai_azure/pyproject.toml b/packages/mistralai_azure/pyproject.toml index 2842c215..016378d5 100644 --- a/packages/mistralai_azure/pyproject.toml +++ b/packages/mistralai_azure/pyproject.toml @@ -43,7 +43,6 @@ requires = ["hatchling"] build-backend = "hatchling.build" [tool.pytest.ini_options] -asyncio_default_fixture_loop_scope = "function" pythonpath = ["src"] [tool.mypy] @@ -62,4 +61,3 @@ ignore_missing_imports = true [tool.pyright] venvPath = "." venv = ".venv" - diff --git a/packages/mistralai_azure/scripts/publish.sh b/packages/mistralai_azure/scripts/publish.sh index f2f31e59..0c07c589 100755 --- a/packages/mistralai_azure/scripts/publish.sh +++ b/packages/mistralai_azure/scripts/publish.sh @@ -2,5 +2,5 @@ export UV_PUBLISH_TOKEN=${PYPI_TOKEN} -uv build +uv run python ../../scripts/prepare_readme.py --repo-subdir packages/mistralai_azure -- uv build uv publish diff --git a/packages/mistralai_gcp/pyproject.toml b/packages/mistralai_gcp/pyproject.toml index 650ef73b..79b8193b 100644 --- a/packages/mistralai_gcp/pyproject.toml +++ b/packages/mistralai_gcp/pyproject.toml @@ -4,7 +4,7 @@ version = "1.6.0" description = "Python Client SDK for the Mistral AI API in GCP." authors = [{ name = "Mistral" }] requires-python = ">=3.10" -readme = "README-PYPI.md" +readme = "README.md" dependencies = [ "eval-type-backport >=0.2.0", "google-auth (>=2.31.0,<3.0.0)", @@ -48,7 +48,6 @@ requires = ["hatchling"] build-backend = "hatchling.build" [tool.pytest.ini_options] -asyncio_default_fixture_loop_scope = "function" pythonpath = ["src"] [tool.mypy] @@ -65,4 +64,3 @@ ignore_missing_imports = true [tool.pyright] venvPath = "." venv = ".venv" - diff --git a/packages/mistralai_gcp/scripts/prepare_readme.py b/packages/mistralai_gcp/scripts/prepare_readme.py deleted file mode 100644 index 825d9ded..00000000 --- a/packages/mistralai_gcp/scripts/prepare_readme.py +++ /dev/null @@ -1,9 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -import shutil - -try: - shutil.copyfile("README.md", "README-PYPI.md") -except Exception as e: - print("Failed to copy README.md to README-PYPI.md") - print(e) diff --git a/packages/mistralai_gcp/scripts/publish.sh b/packages/mistralai_gcp/scripts/publish.sh index d2bef9f7..e9eb1f0b 100755 --- a/packages/mistralai_gcp/scripts/publish.sh +++ b/packages/mistralai_gcp/scripts/publish.sh @@ -2,7 +2,5 @@ export UV_PUBLISH_TOKEN=${PYPI_TOKEN} -uv run python scripts/prepare_readme.py - -uv build +uv run python ../../scripts/prepare_readme.py --repo-subdir packages/mistralai_gcp -- uv build uv publish diff --git a/pyproject.toml b/pyproject.toml index 933a3162..3c5b4574 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -4,7 +4,7 @@ version = "1.10.0" description = "Python Client SDK for the Mistral AI API." authors = [{ name = "Mistral" }] requires-python = ">=3.10" -readme = "README-PYPI.md" +readme = "README.md" dependencies = [ "eval-type-backport >=0.2.0", "httpx >=0.28.1", @@ -89,7 +89,6 @@ requires = ["hatchling"] build-backend = "hatchling.build" [tool.pytest.ini_options] -asyncio_default_fixture_loop_scope = "function" pythonpath = ["src"] [tool.mypy] diff --git a/scripts/lint_custom_code.sh b/scripts/lint_custom_code.sh index 3b03883d..7c084463 100755 --- a/scripts/lint_custom_code.sh +++ b/scripts/lint_custom_code.sh @@ -10,6 +10,8 @@ uv run mypy src/mistralai/extra/ || ERRORS=1 echo "-> running on hooks" uv run mypy src/mistralai/_hooks/ \ --exclude __init__.py --exclude sdkhooks.py --exclude types.py || ERRORS=1 +echo "-> running on scripts" +uv run mypy scripts/ || ERRORS=1 echo "Running pyright..." # TODO: Uncomment once the examples are fixed @@ -18,6 +20,8 @@ echo "-> running on extra" uv run pyright src/mistralai/extra/ || ERRORS=1 echo "-> running on hooks" uv run pyright src/mistralai/_hooks/ || ERRORS=1 +echo "-> running on scripts" +uv run pyright scripts/ || ERRORS=1 echo "Running ruff..." echo "-> running on examples" @@ -27,6 +31,8 @@ uv run ruff check src/mistralai/extra/ || ERRORS=1 echo "-> running on hooks" uv run ruff check src/mistralai/_hooks/ \ --exclude __init__.py --exclude sdkhooks.py --exclude types.py || ERRORS=1 +echo "-> running on scripts" +uv run ruff check scripts/ || ERRORS=1 if [ "$ERRORS" -ne 0 ]; then echo "❌ One or more linters failed" diff --git a/scripts/prepare_readme.py b/scripts/prepare_readme.py index 1b0a56ec..c220a055 100644 --- a/scripts/prepare_readme.py +++ b/scripts/prepare_readme.py @@ -1,35 +1,107 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - +import argparse import re -import shutil - -try: - with open("README.md", "r", encoding="utf-8") as rh: - readme_contents = rh.read() - GITHUB_URL = "https://github.com/mistralai/client-python.git" - GITHUB_URL = ( - GITHUB_URL[: -len(".git")] if GITHUB_URL.endswith(".git") else GITHUB_URL +import subprocess +import sys +from pathlib import Path + +DEFAULT_REPO_URL = "https://github.com/mistralai/client-python.git" +DEFAULT_BRANCH = "main" +LINK_PATTERN = re.compile(r"(\[[^\]]+\]\()((?!https?:)[^\)]+)(\))") + + +def build_base_url(repo_url: str, branch: str, repo_subdir: str) -> str: + """Build the GitHub base URL used to rewrite relative README links.""" + normalized_repo_url = repo_url[:-4] if repo_url.endswith(".git") else repo_url + normalized_subdir = repo_subdir.strip("/") + if normalized_subdir: + normalized_subdir = f"{normalized_subdir}/" + return f"{normalized_repo_url}/blob/{branch}/{normalized_subdir}" + + +def rewrite_relative_links(contents: str, base_url: str) -> str: + """Rewrite Markdown relative links to absolute GitHub URLs.""" + return LINK_PATTERN.sub( + lambda match: f"{match.group(1)}{base_url}{match.group(2)}{match.group(3)}", + contents, + ) + + +def run_with_rewritten_readme( + readme_path: Path, base_url: str, command: list[str] +) -> int: + """Rewrite README links, run a command, and restore the original README.""" + original_contents = readme_path.read_text(encoding="utf-8") + rewritten_contents = rewrite_relative_links(original_contents, base_url) + readme_path.write_text(rewritten_contents, encoding="utf-8") + try: + if not command: + return 0 + result = subprocess.run(command, check=False) + return result.returncode + finally: + readme_path.write_text(original_contents, encoding="utf-8") + + +def parse_args(argv: list[str]) -> argparse.Namespace: + """Parse command-line arguments for README rewriting.""" + parser = argparse.ArgumentParser( + description=( + "Rewrite README links to absolute GitHub URLs while running a command." ) - REPO_SUBDIR = "" - # links on PyPI should have absolute URLs - readme_contents = re.sub( - r"(\[[^\]]+\]\()((?!https?:)[^\)]+)(\))", - lambda m: m.group(1) - + GITHUB_URL - + "/blob/master/" - + REPO_SUBDIR - + m.group(2) - + m.group(3), - readme_contents, + ) + parser.add_argument( + "--readme", + type=Path, + default=Path("README.md"), + help="Path to the README file to rewrite.", + ) + parser.add_argument( + "--repo-url", + default=DEFAULT_REPO_URL, + help="Repository URL used to build absolute links.", + ) + parser.add_argument( + "--branch", + default=DEFAULT_BRANCH, + help="Repository branch used for absolute links.", + ) + parser.add_argument( + "--repo-subdir", + default="", + help="Repository subdirectory that contains the README.", + ) + parser.add_argument( + "command", + nargs=argparse.REMAINDER, + help=( + "Command to run (prefix with -- to stop option parsing). " + "If omitted, the rewritten README is printed to stdout." + ), + ) + return parser.parse_args(argv) + + +def main(argv: list[str]) -> int: + """Entry point for rewriting README links during build commands.""" + args = parse_args(argv) + readme_path = args.readme + if not readme_path.is_file(): + raise FileNotFoundError(f"README file not found: {readme_path}") + base_url = build_base_url(args.repo_url, args.branch, args.repo_subdir) + command = ( + args.command[1:] + if args.command and args.command[0] == "--" + else args.command + ) + if not command: + rewritten_contents = rewrite_relative_links( + readme_path.read_text(encoding="utf-8"), + base_url, ) + sys.stdout.write(rewritten_contents) + return 0 + return run_with_rewritten_readme(readme_path, base_url, command) - with open("README-PYPI.md", "w", encoding="utf-8") as wh: - wh.write(readme_contents) -except Exception as e: - try: - print("Failed to rewrite README.md to README-PYPI.md, copying original instead") - print(e) - shutil.copyfile("README.md", "README-PYPI.md") - except Exception as ie: - print("Failed to copy README.md to README-PYPI.md") - print(ie) + +if __name__ == "__main__": + sys.exit(main(sys.argv[1:])) diff --git a/scripts/publish.sh b/scripts/publish.sh index 6ff725f3..c41f3efb 100755 --- a/scripts/publish.sh +++ b/scripts/publish.sh @@ -1,7 +1,5 @@ #!/usr/bin/env bash export UV_PUBLISH_TOKEN=${PYPI_TOKEN} -uv run python scripts/prepare_readme.py - -uv build +uv run python scripts/prepare_readme.py -- uv build uv publish diff --git a/tests/test_prepare_readme.py b/tests/test_prepare_readme.py new file mode 100644 index 00000000..ce3e11c9 --- /dev/null +++ b/tests/test_prepare_readme.py @@ -0,0 +1,37 @@ +import importlib.util +from pathlib import Path + +SCRIPT_PATH = Path(__file__).resolve().parents[1] / "scripts" / "prepare_readme.py" +SPEC = importlib.util.spec_from_file_location("prepare_readme", SCRIPT_PATH) +if SPEC is None or SPEC.loader is None: + raise ImportError(f"Unable to load prepare_readme from {SCRIPT_PATH}") +prepare_readme = importlib.util.module_from_spec(SPEC) +SPEC.loader.exec_module(prepare_readme) + + +def test_rewrite_relative_links_keeps_absolute() -> None: + base_url = "https://example.com/blob/main/" + contents = "[Migration](MIGRATION.md)\n[Docs](https://docs.mistral.ai)" + expected = ( + "[Migration](https://example.com/blob/main/MIGRATION.md)\n" + "[Docs](https://docs.mistral.ai)" + ) + assert prepare_readme.rewrite_relative_links(contents, base_url) == expected + + +def test_main_prints_rewritten_readme_with_defaults(tmp_path, capsys) -> None: + original = "[Migration](MIGRATION.md)\n" + base_url = prepare_readme.build_base_url( + prepare_readme.DEFAULT_REPO_URL, + prepare_readme.DEFAULT_BRANCH, + "", + ) + expected = f"[Migration]({base_url}MIGRATION.md)\n" + readme_path = tmp_path / "README.md" + readme_path.write_text(original, encoding="utf-8") + + exit_code = prepare_readme.main(["--readme", str(readme_path)]) + + captured = capsys.readouterr() + assert exit_code == 0 + assert captured.out == expected From 99cae3ab336536d98b981ca615d5b9fbf6502113 Mon Sep 17 00:00:00 2001 From: glinf Date: Tue, 6 Jan 2026 15:16:34 +0100 Subject: [PATCH 169/223] fix(otel): don't setup TracerProvider, rely on application config The SDK was creating its own TracerProvider when OTEL_EXPORTER_OTLP_ENDPOINT was set but no TracerProvider was configured. This caused issues when used in applications that configure their own TracerProvider: 1. The SDK's TracerProvider would be set first, preventing the app's setup 2. The endpoint was passed without /v1/traces path, causing 404 errors Now the SDK follows OTEL best practices: - Libraries/SDKs get tracers from the global provider - Applications configure the TracerProvider - If no provider is set, tracing is effectively disabled (NoOp) This allows applications to control when and how OTEL is configured, and the SDK will automatically use whatever TracerProvider is available. Removed: - QuietOTLPSpanExporter class (no longer needed) - OTEL_EXPORTER_OTLP_* constants (no longer used) - TracerProvider setup logic --- src/mistralai/_hooks/tracing.py | 31 ++++++++- src/mistralai/extra/observability/otel.py | 83 ++++++----------------- 2 files changed, 50 insertions(+), 64 deletions(-) diff --git a/src/mistralai/_hooks/tracing.py b/src/mistralai/_hooks/tracing.py index f2ac9c86..fc4656fd 100644 --- a/src/mistralai/_hooks/tracing.py +++ b/src/mistralai/_hooks/tracing.py @@ -30,13 +30,30 @@ def __init__(self) -> None: def before_request( self, hook_ctx: BeforeRequestContext, request: httpx.Request ) -> Union[httpx.Request, Exception]: - request, self.request_span = get_traced_request_and_span(tracing_enabled=self.tracing_enabled, tracer=self.tracer, span=self.request_span, operation_id=hook_ctx.operation_id, request=request) + # Refresh tracer/provider per request so tracing can be enabled if the + # application configures OpenTelemetry after the client is instantiated. + self.tracing_enabled, self.tracer = get_or_create_otel_tracer() + self.request_span = None + request, self.request_span = get_traced_request_and_span( + tracing_enabled=self.tracing_enabled, + tracer=self.tracer, + span=self.request_span, + operation_id=hook_ctx.operation_id, + request=request, + ) return request def after_success( self, hook_ctx: AfterSuccessContext, response: httpx.Response ) -> Union[httpx.Response, Exception]: - response = get_traced_response(tracing_enabled=self.tracing_enabled, tracer=self.tracer, span=self.request_span, operation_id=hook_ctx.operation_id, response=response) + response = get_traced_response( + tracing_enabled=self.tracing_enabled, + tracer=self.tracer, + span=self.request_span, + operation_id=hook_ctx.operation_id, + response=response, + ) + self.request_span = None return response def after_error( @@ -46,5 +63,13 @@ def after_error( error: Optional[Exception], ) -> Union[Tuple[Optional[httpx.Response], Optional[Exception]], Exception]: if response: - response, error = get_response_and_error(tracing_enabled=self.tracing_enabled, tracer=self.tracer, span=self.request_span, operation_id=hook_ctx.operation_id, response=response, error=error) + response, error = get_response_and_error( + tracing_enabled=self.tracing_enabled, + tracer=self.tracer, + span=self.request_span, + operation_id=hook_ctx.operation_id, + response=response, + error=error, + ) + self.request_span = None return response, error diff --git a/src/mistralai/extra/observability/otel.py b/src/mistralai/extra/observability/otel.py index 8be0841d..4a8808ce 100644 --- a/src/mistralai/extra/observability/otel.py +++ b/src/mistralai/extra/observability/otel.py @@ -11,27 +11,17 @@ import opentelemetry.semconv._incubating.attributes.http_attributes as http_attributes import opentelemetry.semconv.attributes.server_attributes as server_attributes from opentelemetry import propagate, trace -from opentelemetry.exporter.otlp.proto.http.trace_exporter import OTLPSpanExporter -from opentelemetry.sdk.resources import SERVICE_NAME, Resource -from opentelemetry.sdk.trace import SpanProcessor, TracerProvider -from opentelemetry.sdk.trace.export import BatchSpanProcessor, SpanExportResult +from opentelemetry.sdk.trace import SpanProcessor from opentelemetry.trace import Span, Status, StatusCode, Tracer, set_span_in_context logger = logging.getLogger(__name__) OTEL_SERVICE_NAME: str = "mistralai_sdk" -OTEL_EXPORTER_OTLP_ENDPOINT: str = os.getenv("OTEL_EXPORTER_OTLP_ENDPOINT", "") -OTEL_EXPORTER_OTLP_TIMEOUT: int = int(os.getenv("OTEL_EXPORTER_OTLP_TIMEOUT", "2")) -OTEL_EXPORTER_OTLP_MAX_EXPORT_BATCH_SIZE: int = int(os.getenv("OTEL_EXPORTER_OTLP_MAX_EXPORT_BATCH_SIZE", "512")) -OTEL_EXPORTER_OTLP_SCHEDULE_DELAY_MILLIS: int = int(os.getenv("OTEL_EXPORTER_OTLP_SCHEDULE_DELAY_MILLIS", "1000")) -OTEL_EXPORTER_OTLP_MAX_QUEUE_SIZE: int = int(os.getenv("OTEL_EXPORTER_OTLP_MAX_QUEUE_SIZE", "2048")) -OTEL_EXPORTER_OTLP_EXPORT_TIMEOUT_MILLIS: int = int(os.getenv("OTEL_EXPORTER_OTLP_EXPORT_TIMEOUT_MILLIS", "5000")) - MISTRAL_SDK_OTEL_TRACER_NAME: str = OTEL_SERVICE_NAME + "_tracer" MISTRAL_SDK_DEBUG_TRACING: bool = os.getenv("MISTRAL_SDK_DEBUG_TRACING", "false").lower() == "true" -DEBUG_HINT: str = "To see detailed exporter logs, set MISTRAL_SDK_DEBUG_TRACING=true." +DEBUG_HINT: str = "To see detailed tracing logs, set MISTRAL_SDK_DEBUG_TRACING=true." class MistralAIAttributes: @@ -51,13 +41,11 @@ class MistralAINameValues(Enum): OCR = "ocr" class TracingErrors(Exception, Enum): - FAILED_TO_EXPORT_OTEL_SPANS = "Failed to export OpenTelemetry (OTEL) spans." - FAILED_TO_INITIALIZE_OPENTELEMETRY_TRACING = "Failed to initialize OpenTelemetry tracing." FAILED_TO_CREATE_SPAN_FOR_REQUEST = "Failed to create span for request." FAILED_TO_ENRICH_SPAN_WITH_RESPONSE = "Failed to enrich span with response." FAILED_TO_HANDLE_ERROR_IN_SPAN = "Failed to handle error in span." FAILED_TO_END_SPAN = "Failed to end span." - + def __str__(self): return str(self.value) @@ -179,6 +167,7 @@ def enrich_span_from_response(tracer: trace.Tracer, span: Span, operation_id: st start_ns = parse_time_to_nanos(output["created_at"]) end_ns = parse_time_to_nanos(output["completed_at"]) child_span = tracer.start_span("Tool Execution", start_time=start_ns, context=parent_context) + child_span.set_attributes({"agent.trace.public": ""}) tool_attributes = { gen_ai_attributes.GEN_AI_OPERATION_NAME: gen_ai_attributes.GenAiOperationNameValues.EXECUTE_TOOL.value, gen_ai_attributes.GEN_AI_TOOL_CALL_ID: output.get("id", ""), @@ -191,6 +180,7 @@ def enrich_span_from_response(tracer: trace.Tracer, span: Span, operation_id: st start_ns = parse_time_to_nanos(output["created_at"]) end_ns = parse_time_to_nanos(output["completed_at"]) child_span = tracer.start_span("Message Output", start_time=start_ns, context=parent_context) + child_span.set_attributes({"agent.trace.public": ""}) message_attributes = { gen_ai_attributes.GEN_AI_OPERATION_NAME: gen_ai_attributes.GenAiOperationNameValues.CHAT.value, gen_ai_attributes.GEN_AI_PROVIDER_NAME: gen_ai_attributes.GenAiProviderNameValues.MISTRAL_AI.value, @@ -216,60 +206,30 @@ def on_start(self, span, parent_context = None): span.set_attributes({"agent.trace.public": ""}) -class QuietOTLPSpanExporter(OTLPSpanExporter): - def export(self, spans): - try: - return super().export(spans) - except Exception: - logger.warning(f"{TracingErrors.FAILED_TO_EXPORT_OTEL_SPANS} {(traceback.format_exc() if MISTRAL_SDK_DEBUG_TRACING else DEBUG_HINT)}") - return SpanExportResult.FAILURE - - def get_or_create_otel_tracer() -> tuple[bool, Tracer]: """ - 3 possible cases: + Get a tracer from the current TracerProvider. - -> [SDK in a Workflow / App] If there is already a tracer provider set -> use that one + The SDK does not set up its own TracerProvider - it relies on the application + to configure OpenTelemetry. This follows OTEL best practices where: + - Libraries/SDKs get tracers from the global provider + - Applications configure the TracerProvider - -> [SDK standalone] If no tracer provider is set but the OTEL_EXPORTER_OTLP_ENDPOINT is set -> create a new tracer provider that exports to the OTEL_EXPORTER_OTLP_ENDPOINT + If no TracerProvider is configured, the ProxyTracerProvider (default) will + return a NoOp tracer, effectively disabling tracing. Once the application + sets up a real TracerProvider, subsequent spans will be recorded. - -> Else tracing is disabled + Returns: + Tuple[bool, Tracer]: (tracing_enabled, tracer) + - tracing_enabled is True if a real TracerProvider is configured + - tracer is always valid (may be NoOp if no provider configured) """ - tracing_enabled = True tracer_provider = trace.get_tracer_provider() - - if isinstance(tracer_provider, trace.ProxyTracerProvider): - if OTEL_EXPORTER_OTLP_ENDPOINT: - # SDK standalone: No tracer provider but OTEL_EXPORTER_OTLP_ENDPOINT is set -> create a new tracer provider that exports to the OTEL_EXPORTER_OTLP_ENDPOINT - try: - exporter = QuietOTLPSpanExporter( - endpoint=OTEL_EXPORTER_OTLP_ENDPOINT, - timeout=OTEL_EXPORTER_OTLP_TIMEOUT - ) - resource = Resource.create(attributes={SERVICE_NAME: OTEL_SERVICE_NAME}) - tracer_provider = TracerProvider(resource=resource) - - span_processor = BatchSpanProcessor( - exporter, - export_timeout_millis=OTEL_EXPORTER_OTLP_EXPORT_TIMEOUT_MILLIS, - max_export_batch_size=OTEL_EXPORTER_OTLP_MAX_EXPORT_BATCH_SIZE, - schedule_delay_millis=OTEL_EXPORTER_OTLP_SCHEDULE_DELAY_MILLIS, - max_queue_size=OTEL_EXPORTER_OTLP_MAX_QUEUE_SIZE - ) - - tracer_provider.add_span_processor(span_processor) - tracer_provider.add_span_processor(GenAISpanProcessor()) - trace.set_tracer_provider(tracer_provider) - - except Exception: - logger.warning(f"{TracingErrors.FAILED_TO_INITIALIZE_OPENTELEMETRY_TRACING} {(traceback.format_exc() if MISTRAL_SDK_DEBUG_TRACING else DEBUG_HINT)}") - tracing_enabled = False - else: - # No tracer provider nor OTEL_EXPORTER_OTLP_ENDPOINT set -> tracing is disabled - tracing_enabled = False - tracer = tracer_provider.get_tracer(MISTRAL_SDK_OTEL_TRACER_NAME) + # Tracing is considered enabled if we have a real TracerProvider (not the default proxy) + tracing_enabled = not isinstance(tracer_provider, trace.ProxyTracerProvider) + return tracing_enabled, tracer def get_traced_request_and_span( @@ -284,8 +244,9 @@ def get_traced_request_and_span( try: span = tracer.start_span(name=operation_id) + span.set_attributes({"agent.trace.public": ""}) # Inject the span context into the request headers to be used by the backend service to continue the trace - propagate.inject(request.headers) + propagate.inject(request.headers, context=set_span_in_context(span)) span = enrich_span_from_request(span, request) except Exception: logger.warning( From 1923fe68cf991ce31766ad7f93ce4af571521f33 Mon Sep 17 00:00:00 2001 From: Guillaume Dumont Date: Mon, 12 Jan 2026 11:46:05 +0100 Subject: [PATCH 170/223] Fix Speakeasy targets in GitHub actions (#312) This commit fixes the CI for GCP and Azure SDKs: * The GitHub actions now reference the correct Speakeasy targets * Fixes issues in packages/mistralai_gcp/src/mistralai_private_gcp/sdk.py (not tracked by Speakeasy since we added gcloud authentication) * Dropped the use of SDKError for non-HTTP errors (it now expects a raw_response in its constructor and internally makes accesses to its attributes). They have been replaced with ValueError when relevant (aligned with elsewhere in the SDK for invalid inputs) or basic Exceptions. * Changed the return type GoogleCloudBeforeRequestHook#before_request to a union like in the base class. --- .../sdk_generation_mistralai_azure_sdk.yaml | 2 +- .../sdk_generation_mistralai_gcp_sdk.yaml | 2 +- .../mistralai_gcp/src/mistralai_gcp/sdk.py | 22 ++++++++++--------- 3 files changed, 14 insertions(+), 12 deletions(-) diff --git a/.github/workflows/sdk_generation_mistralai_azure_sdk.yaml b/.github/workflows/sdk_generation_mistralai_azure_sdk.yaml index ed4e94cf..167d8865 100644 --- a/.github/workflows/sdk_generation_mistralai_azure_sdk.yaml +++ b/.github/workflows/sdk_generation_mistralai_azure_sdk.yaml @@ -22,7 +22,7 @@ jobs: mode: pr set_version: ${{ github.event.inputs.set_version }} speakeasy_version: latest - target: mistral-python-sdk-azure + target: mistralai-azure-sdk secrets: github_access_token: ${{ secrets.GITHUB_TOKEN }} pypi_token: ${{ secrets.PYPI_TOKEN }} diff --git a/.github/workflows/sdk_generation_mistralai_gcp_sdk.yaml b/.github/workflows/sdk_generation_mistralai_gcp_sdk.yaml index 4fefe244..aa753830 100644 --- a/.github/workflows/sdk_generation_mistralai_gcp_sdk.yaml +++ b/.github/workflows/sdk_generation_mistralai_gcp_sdk.yaml @@ -22,7 +22,7 @@ jobs: mode: pr set_version: ${{ github.event.inputs.set_version }} speakeasy_version: latest - target: mistral-python-sdk-google-cloud + target: mistralai-gcp-sdk secrets: github_access_token: ${{ secrets.GITHUB_TOKEN }} pypi_token: ${{ secrets.PYPI_TOKEN }} diff --git a/packages/mistralai_gcp/src/mistralai_gcp/sdk.py b/packages/mistralai_gcp/src/mistralai_gcp/sdk.py index dd93cc7f..de48fbbb 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/sdk.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/sdk.py @@ -2,7 +2,7 @@ import json import weakref -from typing import Any, Optional, cast +from typing import Any, Optional, Union, cast import google.auth import google.auth.credentials @@ -67,30 +67,32 @@ def __init__( :param timeout_ms: Optional request timeout applied to each operation in milliseconds """ + credentials = None if not access_token: credentials, loaded_project_id = google.auth.default( scopes=["https://www.googleapis.com/auth/cloud-platform"], ) - credentials.refresh(google.auth.transport.requests.Request()) - if not isinstance(credentials, google.auth.credentials.Credentials): - raise models.SDKError( - "credentials must be an instance of google.auth.credentials.Credentials" - ) + # default will already raise a google.auth.exceptions.DefaultCredentialsError if no credentials are found + assert isinstance( + credentials, google.auth.credentials.Credentials + ), "credentials must be an instance of google.auth.credentials.Credentials" + credentials.refresh(google.auth.transport.requests.Request()) project_id = project_id or loaded_project_id if project_id is None: - raise models.SDKError("project_id must be provided") + raise ValueError("project_id must be provided") def auth_token() -> str: if access_token: return access_token + assert credentials is not None, "credentials must be initialized" credentials.refresh(google.auth.transport.requests.Request()) token = credentials.token if not token: - raise models.SDKError("Failed to get token from credentials") + raise Exception("Failed to get token from credentials") return token client_supplied = True @@ -197,7 +199,7 @@ def __init__(self, region: str, project_id: str): def before_request( self, hook_ctx, request: httpx.Request - ) -> httpx.Request | Exception: + ) -> Union[httpx.Request, Exception]: # The goal of this function is to template in the region, project and model into the URL path # We do this here so that the API remains more user-friendly model_id = None @@ -210,7 +212,7 @@ def before_request( new_content = json.dumps(parsed).encode("utf-8") if model_id == "": - raise models.SDKError("model must be provided") + raise ValueError("model must be provided") stream = "streamRawPredict" in request.url.path specifier = "streamRawPredict" if stream else "rawPredict" From 9d1e7394d3ac97d7195b11b7418564be1f4dfd84 Mon Sep 17 00:00:00 2001 From: Guillaume Dumont Date: Mon, 12 Jan 2026 16:18:12 +0100 Subject: [PATCH 171/223] Switch the Azure and GCP SDKs to uv (#317) The migration to uv was done in #303. This PR reflects this change in the GCP/Azure .speakeasy/gen.yaml files. The pyproject.toml for those package has also been added in .genignore to prevent speakeasy from rewritting them (in line with what is done for the main SDK). --- packages/mistralai_azure/.genignore | 1 + packages/mistralai_azure/.speakeasy/gen.yaml | 1 + packages/mistralai_gcp/.genignore | 1 + packages/mistralai_gcp/.speakeasy/gen.yaml | 1 + 4 files changed, 4 insertions(+) diff --git a/packages/mistralai_azure/.genignore b/packages/mistralai_azure/.genignore index 513646da..ba7f2350 100644 --- a/packages/mistralai_azure/.genignore +++ b/packages/mistralai_azure/.genignore @@ -1,3 +1,4 @@ +pyproject.toml src/mistralai_azure/sdk.py README.md USAGE.md diff --git a/packages/mistralai_azure/.speakeasy/gen.yaml b/packages/mistralai_azure/.speakeasy/gen.yaml index 63e2da75..045e6139 100644 --- a/packages/mistralai_azure/.speakeasy/gen.yaml +++ b/packages/mistralai_azure/.speakeasy/gen.yaml @@ -52,6 +52,7 @@ python: methodArguments: infer-optional-args moduleName: "" outputModelSuffix: output + packageManager: uv packageName: mistralai_azure pytestFilterWarnings: [] pytestTimeout: 0 diff --git a/packages/mistralai_gcp/.genignore b/packages/mistralai_gcp/.genignore index ea10bc8e..76043176 100644 --- a/packages/mistralai_gcp/.genignore +++ b/packages/mistralai_gcp/.genignore @@ -1,3 +1,4 @@ +pyproject.toml src/mistralai_gcp/sdk.py README.md USAGE.md diff --git a/packages/mistralai_gcp/.speakeasy/gen.yaml b/packages/mistralai_gcp/.speakeasy/gen.yaml index d7be7fed..a82160ed 100644 --- a/packages/mistralai_gcp/.speakeasy/gen.yaml +++ b/packages/mistralai_gcp/.speakeasy/gen.yaml @@ -47,6 +47,7 @@ python: maxMethodParams: 15 methodArguments: infer-optional-args outputModelSuffix: output + packageManager: uv packageName: mistralai-gcp pytestTimeout: 0 responseFormat: flat From 79cf7f5ffe476864fd4ef0f6a55fc6a8036e91a5 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Tue, 13 Jan 2026 16:44:20 +0100 Subject: [PATCH 172/223] ## SDK Changes Detected: (#320) * `mistral_azure.chat.complete()`: * `request.messages.[].[assistant].content.[array]` **Changed** **Breaking** :warning: Co-authored-by: speakeasybot --- .speakeasy/workflow.lock | 11 +- packages/mistralai_azure/.speakeasy/gen.lock | 45 +++-- packages/mistralai_azure/.speakeasy/gen.yaml | 5 +- packages/mistralai_azure/RELEASES.md | 11 ++ .../docs/models/chatcompletionrequest.md | 40 ++--- .../models/chatcompletionrequesttoolchoice.md | 2 + .../models/chatcompletionstreamrequest.md | 40 ++--- .../chatcompletionstreamrequesttoolchoice.md | 2 + .../mistralai_azure/docs/models/ocrrequest.md | 22 +-- .../mistralai_azure/docs/models/prediction.md | 2 + .../docs/models/responseformat.md | 10 +- .../docs/models/responseformats.md | 2 - .../docs/models/systemmessagecontent.md | 4 +- .../docs/models/systemmessagecontentchunks.md | 17 ++ .../mistralai_azure/docs/models/thinkchunk.md | 10 ++ .../docs/models/thinkchunktype.md | 8 + .../mistralai_azure/docs/models/thinking.md | 17 ++ packages/mistralai_azure/pylintrc | 4 +- .../mistralai_azure/scripts/prepare_readme.py | 38 +++++ packages/mistralai_azure/scripts/publish.sh | 6 +- .../src/mistralai_azure/_version.py | 6 +- .../src/mistralai_azure/basesdk.py | 20 ++- .../src/mistralai_azure/chat.py | 157 +++++++----------- .../src/mistralai_azure/models/__init__.py | 53 +++++- .../models/chatcompletionrequest.py | 20 ++- .../models/chatcompletionstreamrequest.py | 20 ++- .../models/httpvalidationerror.py | 17 +- .../models/mistralazureerror.py | 26 +++ .../models/no_response_error.py | 13 ++ .../src/mistralai_azure/models/prediction.py | 4 + .../mistralai_azure/models/responseformat.py | 6 +- .../mistralai_azure/models/responseformats.py | 1 - .../models/responsevalidationerror.py | 25 +++ .../src/mistralai_azure/models/sdkerror.py | 44 +++-- .../mistralai_azure/models/systemmessage.py | 10 +- .../models/systemmessagecontentchunks.py | 21 +++ .../src/mistralai_azure/models/thinkchunk.py | 35 ++++ .../src/mistralai_azure/ocr.py | 51 ++---- .../src/mistralai_azure/utils/__init__.py | 23 ++- .../mistralai_azure/utils/eventstreaming.py | 10 ++ .../src/mistralai_azure/utils/serializers.py | 5 +- .../utils/unmarshal_json_response.py | 24 +++ 42 files changed, 617 insertions(+), 270 deletions(-) create mode 100644 packages/mistralai_azure/RELEASES.md create mode 100644 packages/mistralai_azure/docs/models/systemmessagecontentchunks.md create mode 100644 packages/mistralai_azure/docs/models/thinkchunk.md create mode 100644 packages/mistralai_azure/docs/models/thinkchunktype.md create mode 100644 packages/mistralai_azure/docs/models/thinking.md create mode 100644 packages/mistralai_azure/scripts/prepare_readme.py create mode 100644 packages/mistralai_azure/src/mistralai_azure/models/mistralazureerror.py create mode 100644 packages/mistralai_azure/src/mistralai_azure/models/no_response_error.py create mode 100644 packages/mistralai_azure/src/mistralai_azure/models/responsevalidationerror.py create mode 100644 packages/mistralai_azure/src/mistralai_azure/models/systemmessagecontentchunks.py create mode 100644 packages/mistralai_azure/src/mistralai_azure/models/thinkchunk.py create mode 100644 packages/mistralai_azure/src/mistralai_azure/utils/unmarshal_json_response.py diff --git a/.speakeasy/workflow.lock b/.speakeasy/workflow.lock index 48c4bf7b..c4acc23d 100644 --- a/.speakeasy/workflow.lock +++ b/.speakeasy/workflow.lock @@ -2,10 +2,11 @@ speakeasyVersion: 1.606.10 sources: mistral-azure-source: sourceNamespace: mistral-openapi-azure - sourceRevisionDigest: sha256:670c460702ec74f7077491464a6dc5ee9d873969c80e812c48dbf4deb160e470 - sourceBlobDigest: sha256:5a3ebfa4cb00a015bb7bb03ec7442fc7e0b9c17ca66ab35d3045290b2ad87eac + sourceRevisionDigest: sha256:fcc7262f29ef89a07cb718d7e6094c272627cf9f531588aef15a6e92dd50130a + sourceBlobDigest: sha256:9e6fc34474062726ceb96e424e858a0ae1b0506659cd11a58c72e1dd50dae885 tags: - latest + - speakeasy-sdk-regen-1768231850 mistral-google-cloud-source: sourceNamespace: mistral-openapi-google-cloud sourceRevisionDigest: sha256:4a5343e63c6a78152e472b00ccc46d7bcb15594496bc94c8040039d3a9d4c5f8 @@ -23,10 +24,10 @@ targets: mistralai-azure-sdk: source: mistral-azure-source sourceNamespace: mistral-openapi-azure - sourceRevisionDigest: sha256:670c460702ec74f7077491464a6dc5ee9d873969c80e812c48dbf4deb160e470 - sourceBlobDigest: sha256:5a3ebfa4cb00a015bb7bb03ec7442fc7e0b9c17ca66ab35d3045290b2ad87eac + sourceRevisionDigest: sha256:fcc7262f29ef89a07cb718d7e6094c272627cf9f531588aef15a6e92dd50130a + sourceBlobDigest: sha256:9e6fc34474062726ceb96e424e858a0ae1b0506659cd11a58c72e1dd50dae885 codeSamplesNamespace: mistral-openapi-azure-code-samples - codeSamplesRevisionDigest: sha256:e6802c97fd9783aa91cc0853de1a889944f699b88e0dafcf9fecd83de6e2c6c9 + codeSamplesRevisionDigest: sha256:9ab092d625da8034f7c17321ce5295ecad19ca4e2be2851a1a5a977c6bbeff05 mistralai-gcp-sdk: source: mistral-google-cloud-source sourceNamespace: mistral-openapi-google-cloud diff --git a/packages/mistralai_azure/.speakeasy/gen.lock b/packages/mistralai_azure/.speakeasy/gen.lock index bce8e3c8..ed28d2f6 100644 --- a/packages/mistralai_azure/.speakeasy/gen.lock +++ b/packages/mistralai_azure/.speakeasy/gen.lock @@ -1,23 +1,26 @@ lockVersion: 2.0.0 id: dc40fa48-2c4d-46ad-ac8b-270749770f34 management: - docChecksum: 87653f040f5b36c90e066870f34c478e + docChecksum: 3cd8710baef46375e8114574e63628e2 docVersion: 1.0.0 - speakeasyVersion: 1.568.2 - generationVersion: 2.634.2 - releaseVersion: 1.6.0 - configChecksum: 0b604304465a25f89acca310710262d1 + speakeasyVersion: 1.606.10 + generationVersion: 2.687.13 + releaseVersion: 1.7.0 + configChecksum: 011849ab2544f97bfda12235028c7a00 + repoURL: https://github.com/mistralai/client-python.git + repoSubDirectory: packages/mistralai_azure + installationURL: https://github.com/mistralai/client-python.git#subdirectory=packages/mistralai_azure published: true features: python: additionalDependencies: 1.0.0 additionalProperties: 1.0.1 constsAndDefaults: 1.0.5 - core: 5.19.3 + core: 5.20.1 defaultEnabledRetries: 0.2.0 enumUnions: 0.1.0 envVarSecurityUsage: 0.3.2 - examples: 3.0.1 + examples: 3.0.2 flatRequests: 1.0.1 globalSecurity: 3.0.3 globalSecurityCallbacks: 1.0.0 @@ -30,13 +33,12 @@ features: responseFormat: 1.0.1 retries: 3.0.2 sdkHooks: 1.1.0 - serverEvents: 1.0.7 + serverEvents: 1.0.8 serverEventsSentinels: 0.1.0 serverIDs: 3.0.0 unions: 3.0.4 generatedFiles: - .gitattributes - - .python-version - .vscode/settings.json - docs/models/arguments.md - docs/models/assistantmessage.md @@ -90,7 +92,11 @@ generatedFiles: - docs/models/stop.md - docs/models/systemmessage.md - docs/models/systemmessagecontent.md + - docs/models/systemmessagecontentchunks.md - docs/models/textchunk.md + - docs/models/thinkchunk.md + - docs/models/thinkchunktype.md + - docs/models/thinking.md - docs/models/tool.md - docs/models/toolcall.md - docs/models/toolchoice.md @@ -106,10 +112,9 @@ generatedFiles: - docs/models/usermessagerole.md - docs/models/utils/retryconfig.md - docs/models/validationerror.md - - poetry.toml - py.typed - pylintrc - - pyproject.toml + - scripts/prepare_readme.py - scripts/publish.sh - src/mistralai_azure/__init__.py - src/mistralai_azure/_hooks/__init__.py @@ -139,7 +144,9 @@ generatedFiles: - src/mistralai_azure/models/imageurl.py - src/mistralai_azure/models/imageurlchunk.py - src/mistralai_azure/models/jsonschema.py + - src/mistralai_azure/models/mistralazureerror.py - src/mistralai_azure/models/mistralpromptmode.py + - src/mistralai_azure/models/no_response_error.py - src/mistralai_azure/models/ocrimageobject.py - src/mistralai_azure/models/ocrpagedimensions.py - src/mistralai_azure/models/ocrpageobject.py @@ -150,10 +157,13 @@ generatedFiles: - src/mistralai_azure/models/referencechunk.py - src/mistralai_azure/models/responseformat.py - src/mistralai_azure/models/responseformats.py + - src/mistralai_azure/models/responsevalidationerror.py - src/mistralai_azure/models/sdkerror.py - src/mistralai_azure/models/security.py - src/mistralai_azure/models/systemmessage.py + - src/mistralai_azure/models/systemmessagecontentchunks.py - src/mistralai_azure/models/textchunk.py + - src/mistralai_azure/models/thinkchunk.py - src/mistralai_azure/models/tool.py - src/mistralai_azure/models/toolcall.py - src/mistralai_azure/models/toolchoice.py @@ -165,7 +175,6 @@ generatedFiles: - src/mistralai_azure/models/validationerror.py - src/mistralai_azure/ocr.py - src/mistralai_azure/py.typed - - src/mistralai_azure/sdk.py - src/mistralai_azure/sdkconfiguration.py - src/mistralai_azure/types/__init__.py - src/mistralai_azure/types/basemodel.py @@ -183,20 +192,21 @@ generatedFiles: - src/mistralai_azure/utils/retries.py - src/mistralai_azure/utils/security.py - src/mistralai_azure/utils/serializers.py + - src/mistralai_azure/utils/unmarshal_json_response.py - src/mistralai_azure/utils/url.py - src/mistralai_azure/utils/values.py examples: stream_chat: speakeasy-default-stream-chat: requestBody: - application/json: {"model": "azureai", "stream": true, "messages": [{"content": "Who is the best French painter? Answer in one short sentence.", "role": "user"}]} + application/json: {"model": "azureai", "stream": true, "messages": [{"content": "Who is the best French painter? Answer in one short sentence.", "role": "user"}], "response_format": {"type": "text"}} responses: "422": application/json: {} chat_completion_v1_chat_completions_post: speakeasy-default-chat-completion-v1-chat-completions-post: requestBody: - application/json: {"model": "azureai", "stream": false, "messages": [{"content": "Who is the best French painter? Answer in one short sentence.", "role": "user"}]} + application/json: {"model": "azureai", "stream": false, "messages": [{"content": "Who is the best French painter? Answer in one short sentence.", "role": "user"}], "response_format": {"type": "text"}} responses: "200": application/json: {"id": "cmpl-e5cc70bb28c444948073e77776eb30ef", "object": "chat.completion", "model": "mistral-small-latest", "usage": {"prompt_tokens": 0, "completion_tokens": 0, "total_tokens": 0}, "created": 1702256327, "choices": []} @@ -211,5 +221,12 @@ examples: application/json: {"pages": [], "model": "Golf", "usage_info": {"pages_processed": 944919}} "422": application/json: {} + userExample: + requestBody: + application/json: {"model": "CX-9", "document": {"document_url": "https://upset-labourer.net/", "type": "document_url"}, "bbox_annotation_format": {"type": "text"}, "document_annotation_format": {"type": "text"}} + responses: + "200": + application/json: {"pages": [{"index": 1, "markdown": "# LEVERAGING UNLABELED DATA TO PREDICT OUT-OF-DISTRIBUTION PERFORMANCE\nSaurabh Garg*
Carnegie Mellon University
sgarg2@andrew.cmu.edu
Sivaraman Balakrishnan
Carnegie Mellon University
sbalakri@andrew.cmu.edu
Zachary C. Lipton
Carnegie Mellon University
zlipton@andrew.cmu.edu\n## Behnam Neyshabur\nGoogle Research, Blueshift team
neyshabur@google.com\nHanie Sedghi
Google Research, Brain team
hsedghi@google.com\n#### Abstract\nReal-world machine learning deployments are characterized by mismatches between the source (training) and target (test) distributions that may cause performance drops. In this work, we investigate methods for predicting the target domain accuracy using only labeled source data and unlabeled target data. We propose Average Thresholded Confidence (ATC), a practical method that learns a threshold on the model's confidence, predicting accuracy as the fraction of unlabeled examples for which model confidence exceeds that threshold. ATC outperforms previous methods across several model architectures, types of distribution shifts (e.g., due to synthetic corruptions, dataset reproduction, or novel subpopulations), and datasets (WILDS, ImageNet, BREEDS, CIFAR, and MNIST). In our experiments, ATC estimates target performance $2-4 \\times$ more accurately than prior methods. We also explore the theoretical foundations of the problem, proving that, in general, identifying the accuracy is just as hard as identifying the optimal predictor and thus, the efficacy of any method rests upon (perhaps unstated) assumptions on the nature of the shift. Finally, analyzing our method on some toy distributions, we provide insights concerning when it works ${ }^{1}$.\n## 1 INTRODUCTION\nMachine learning models deployed in the real world typically encounter examples from previously unseen distributions. While the IID assumption enables us to evaluate models using held-out data from the source distribution (from which training data is sampled), this estimate is no longer valid in presence of a distribution shift. Moreover, under such shifts, model accuracy tends to degrade (Szegedy et al., 2014; Recht et al., 2019; Koh et al., 2021). Commonly, the only data available to the practitioner are a labeled training set (source) and unlabeled deployment-time data which makes the problem more difficult. In this setting, detecting shifts in the distribution of covariates is known to be possible (but difficult) in theory (Ramdas et al., 2015), and in practice (Rabanser et al., 2018). However, producing an optimal predictor using only labeled source and unlabeled target data is well-known to be impossible absent further assumptions (Ben-David et al., 2010; Lipton et al., 2018).\nTwo vital questions that remain are: (i) the precise conditions under which we can estimate a classifier's target-domain accuracy; and (ii) which methods are most practically useful. To begin, the straightforward way to assess the performance of a model under distribution shift would be to collect labeled (target domain) examples and then to evaluate the model on that data. However, collecting fresh labeled data from the target distribution is prohibitively expensive and time-consuming, especially if the target distribution is non-stationary. Hence, instead of using labeled data, we aim to use unlabeled data from the target distribution, that is comparatively abundant, to predict model performance. Note that in this work, our focus is not to improve performance on the target but, rather, to estimate the accuracy on the target for a given classifier.\n[^0]: Work done in part while Saurabh Garg was interning at Google ${ }^{1}$ Code is available at [https://github.com/saurabhgarg1996/ATC_code](https://github.com/saurabhgarg1996/ATC_code).\n", "images": [], "dimensions": {"dpi": 200, "height": 2200, "width": 1700}}, {"index": 2, "markdown": "![img-0.jpeg](img-0.jpeg)\nFigure 1: Illustration of our proposed method ATC. Left: using source domain validation data, we identify a threshold on a score (e.g. negative entropy) computed on model confidence such that fraction of examples above the threshold matches the validation set accuracy. ATC estimates accuracy on unlabeled target data as the fraction of examples with the score above the threshold. Interestingly, this threshold yields accurate estimates on a wide set of target distributions resulting from natural and synthetic shifts. Right: Efficacy of ATC over previously proposed approaches on our testbed with a post-hoc calibrated model. To obtain errors on the same scale, we rescale all errors with Average Confidence (AC) error. Lower estimation error is better. See Table 1 for exact numbers and comparison on various types of distribution shift. See Sec. 5 for details on our testbed.\nRecently, numerous methods have been proposed for this purpose (Deng & Zheng, 2021; Chen et al., 2021b; Jiang et al., 2021; Deng et al., 2021; Guillory et al., 2021). These methods either require calibration on the target domain to yield consistent estimates (Jiang et al., 2021; Guillory et al., 2021) or additional labeled data from several target domains to learn a linear regression function on a distributional distance that then predicts model performance (Deng et al., 2021; Deng & Zheng, 2021; Guillory et al., 2021). However, methods that require calibration on the target domain typically yield poor estimates since deep models trained and calibrated on source data are not, in general, calibrated on a (previously unseen) target domain (Ovadia et al., 2019). Besides, methods that leverage labeled data from target domains rely on the fact that unseen target domains exhibit strong linear correlation with seen target domains on the underlying distance measure and, hence, can be rendered ineffective when such target domains with labeled data are unavailable (in Sec. 5.1 we demonstrate such a failure on a real-world distribution shift problem). Therefore, throughout the paper, we assume access to labeled source data and only unlabeled data from target domain(s).\nIn this work, we first show that absent assumptions on the source classifier or the nature of the shift, no method of estimating accuracy will work generally (even in non-contrived settings). To estimate accuracy on target domain perfectly, we highlight that even given perfect knowledge of the labeled source distribution (i.e., $p_{s}(x, y)$ ) and unlabeled target distribution (i.e., $p_{t}(x)$ ), we need restrictions on the nature of the shift such that we can uniquely identify the target conditional $p_{t}(y \\mid x)$. Thus, in general, identifying the accuracy of the classifier is as hard as identifying the optimal predictor.\nSecond, motivated by the superiority of methods that use maximum softmax probability (or logit) of a model for Out-Of-Distribution (OOD) detection (Hendrycks & Gimpel, 2016; Hendrycks et al., 2019), we propose a simple method that leverages softmax probability to predict model performance. Our method, Average Thresholded Confidence (ATC), learns a threshold on a score (e.g., maximum confidence or negative entropy) of model confidence on validation source data and predicts target domain accuracy as the fraction of unlabeled target points that receive a score above that threshold. ATC selects a threshold on validation source data such that the fraction of source examples that receive the score above the threshold match the accuracy of those examples. Our primary contribution in ATC is the proposal of obtaining the threshold and observing its efficacy on (practical) accuracy estimation. Importantly, our work takes a step forward in positively answering the question raised in Deng & Zheng (2021); Deng et al. (2021) about a practical strategy to select a threshold that enables accuracy prediction with thresholded model confidence.\n", "images": [{"id": "img-0.jpeg", "top_left_x": 292, "top_left_y": 217, "bottom_right_x": 1405, "bottom_right_y": 649, "image_base64": ""}], "dimensions": {"dpi": 200, "height": 2200, "width": 1700}}, {"index": 3, "markdown": "", "images": [], "dimensions": {"dpi": 539192, "height": 944919, "width": 247256}}, {"index": 27, "markdown": "![img-8.jpeg](img-8.jpeg)\nFigure 9: Scatter plot of predicted accuracy versus (true) OOD accuracy for vision datasets except MNIST with a ResNet50 model. Results reported by aggregating MAE numbers over 4 different seeds.\n", "images": [{"id": "img-8.jpeg", "top_left_x": 290, "top_left_y": 226, "bottom_right_x": 1405, "bottom_right_y": 1834, "image_base64": ""}], "dimensions": {"dpi": 200, "height": 2200, "width": 1700}}, {"index": 28, "markdown": "| Dataset | Shift | IM | | AC | | DOC | | GDE | ATC-MC (Ours) | | ATC-NE (Ours) | | | :--: | :--: | :--: | :--: | :--: | :--: | :--: | :--: | :--: | :--: | :--: | :--: | :--: | | | | Pre T | Post T | Pre T | Post T | Pre T | Post T | Post T | Pre T | Post T | Pre T | Post T | | CIFAR10 | Natural | 6.60 | 5.74 | 9.88 | 6.89 | 7.25 | 6.07 | 4.77 | 3.21 | 3.02 | 2.99 | 2.85 | | | | (0.35) | (0.30) | (0.16) | (0.13) | (0.15) | (0.16) | (0.13) | (0.49) | (0.40) | (0.37) | (0.29) | | | Synthetic | 12.33 | 10.20 | 16.50 | 11.91 | 13.87 | 11.08 | 6.55 | 4.65 | 4.25 | 4.21 | 3.87 | | | | (0.51) | (0.48) | (0.26) | (0.17) | (0.18) | (0.17) | (0.35) | (0.55) | (0.55) | (0.55) | (0.75) | | CIFAR100 | Synthetic | 13.69 | 11.51 | 23.61 | 13.10 | 14.60 | 10.14 | 9.85 | 5.50 | 4.75 | 4.72 | 4.94 | | | | (0.55) | (0.41) | (1.16) | (0.80) | (0.77) | (0.64) | (0.57) | (0.70) | (0.73) | (0.74) | (0.74) | | ImageNet200 | Natural | 12.37 | 8.19 | 22.07 | 8.61 | 15.17 | 7.81 | 5.13 | 4.37 | 2.04 | 3.79 | 1.45 | | | | (0.25) | (0.33) | (0.08) | (0.25) | (0.11) | (0.29) | (0.08) | (0.39) | (0.24) | (0.30) | (0.27) | | | Synthetic | 19.86 | 12.94 | 32.44 | 13.35 | 25.02 | 12.38 | 5.41 | 5.93 | 3.09 | 5.00 | 2.68 | | | | (1.38) | (1.81) | (1.00) | (1.30) | (1.10) | (1.38) | (0.89) | (1.38) | (0.87) | (1.28) | (0.45) | | ImageNet | Natural | 7.77 | 6.50 | 18.13 | 6.02 | 8.13 | 5.76 | 6.23 | 3.88 | 2.17 | 2.06 | 0.80 | | | | (0.27) | (0.33) | (0.23) | (0.34) | (0.27) | (0.37) | (0.41) | (0.53) | (0.62) | (0.54) | (0.44) | | | Synthetic | 13.39 | 10.12 | 24.62 | 8.51 | 13.55 | 7.90 | 6.32 | 3.34 | 2.53 | 2.61 | 4.89 | | | | (0.53) | (0.63) | (0.64) | (0.71) | (0.61) | (0.72) | (0.33) | (0.53) | (0.36) | (0.33) | (0.83) | | FMoW-WILDS | Natural | 5.53 | 4.31 | 33.53 | 12.84 | 5.94 | 4.45 | 5.74 | 3.06 | 2.70 | 3.02 | 2.72 | | | | (0.33) | (0.63) | (0.13) | (12.06) | (0.36) | (0.77) | (0.55) | (0.36) | (0.54) | (0.35) | (0.44) | | RxRx1-WILDS | Natural | 5.80 | 5.72 | 7.90 | 4.84 | 5.98 | 5.98 | 6.03 | 4.66 | 4.56 | 4.41 | 4.47 | | | | (0.17) | (0.15) | (0.24) | (0.09) | (0.15) | (0.13) | (0.08) | (0.38) | (0.38) | (0.31) | (0.26) | | Amazon-WILDS | Natural | 2.40 | 2.29 | 8.01 | 2.38 | 2.40 | 2.28 | 17.87 | 1.65 | 1.62 | 1.60 | 1.59 | | | | (0.08) | (0.09) | (0.53) | (0.17) | (0.09) | (0.09) | (0.18) | (0.06) | (0.05) | (0.14) | (0.15) | | CivilCom.-WILDS | Natural | 12.64 | 10.80 | 16.76 | 11.03 | 13.31 | 10.99 | 16.65 | | 7.14 | | | | | | (0.52) | (0.48) | (0.53) | (0.49) | (0.52) | (0.49) | (0.25) | | (0.41) | | | | MNIST | Natural | 18.48 | 15.99 | 21.17 | 14.81 | 20.19 | 14.56 | 24.42 | 5.02 | 2.40 | 3.14 | 3.50 | | | | (0.45) | (1.53) | (0.24) | (3.89) | (0.23) | (3.47) | (0.41) | (0.44) | (1.83) | (0.49) | (0.17) | | ENTITY-13 | Same | 16.23 | 11.14 | 24.97 | 10.88 | 19.08 | 10.47 | 10.71 | 5.39 | 3.88 | 4.58 | 4.19 | | | | (0.77) | (0.65) | (0.70) | (0.77) | (0.65) | (0.72) | (0.74) | (0.92) | (0.61) | (0.85) | (0.16) | | | Novel | 28.53 | 22.02 | 38.33 | 21.64 | 32.43 | 21.22 | 20.61 | 13.58 | 10.28 | 12.25 | 6.63 | | | | (0.82) | (0.68) | (0.75) | (0.86) | (0.69) | (0.80) | (0.60) | (1.15) | (1.34) | (1.21) | (0.93) | | ENTITY-30 | Same | 18.59 | 14.46 | 28.82 | 14.30 | 21.63 | 13.46 | 12.92 | 9.12 | 7.75 | 8.15 | 7.64 | | | | (0.51) | (0.52) | (0.43) | (0.71) | (0.37) | (0.59) | (0.14) | (0.62) | (0.72) | (0.68) | (0.88) | | | Novel | 32.34 | 26.85 | 44.02 | 26.27 | 36.82 | 25.42 | 23.16 | 17.75 | 14.30 | 15.60 | 10.57 | | | | (0.60) | (0.58) | (0.56) | (0.79) | (0.47) | (0.68) | (0.12) | (0.76) | (0.85) | (0.86) | (0.86) | | NONLIVING-26 | Same | 18.66 | 17.17 | 26.39 | 16.14 | 19.86 | 15.58 | 16.63 | 10.87 | 10.24 | 10.07 | 10.26 | | | | (0.76) | (0.74) | (0.82) | (0.81) | (0.67) | (0.76) | (0.45) | (0.98) | (0.83) | (0.92) | (1.18) | | | Novel | 33.43 | 31.53 | 41.66 | 29.87 | 35.13 | 29.31 | 29.56 | 21.70 | 20.12 | 19.08 | 18.26 | | | | (0.67) | (0.65) | (0.67) | (0.71) | (0.54) | (0.64) | (0.21) | (0.86) | (0.75) | (0.82) | (1.12) | | LIVING-17 | Same | 12.63 | 11.05 | 18.32 | 10.46 | 14.43 | 10.14 | 9.87 | 4.57 | 3.95 | 3.81 | 4.21 | | | | (1.25) | (1.20) | (1.01) | (1.12) | (1.11) | (1.16) | (0.61) | (0.71) | (0.48) | (0.22) | (0.53) | | | Novel | 29.03 | 26.96 | 35.67 | 26.11 | 31.73 | 25.73 | 23.53 | 16.15 | 14.49 | 12.97 | 11.39 | | | | (1.44) | (1.38) | (1.09) | (1.27) | (1.19) | (1.35) | (0.52) | (1.36) | (1.46) | (1.52) | (1.72) |\nTable 3: Mean Absolute estimation Error (MAE) results for different datasets in our setup grouped by the nature of shift. 'Same' refers to same subpopulation shifts and 'Novel' refers novel subpopulation shifts. We include details about the target sets considered in each shift in Table 2. Post T denotes use of TS calibration on source. For language datasets, we use DistilBERT-base-uncased, for vision dataset we report results with DenseNet model with the exception of MNIST where we use FCN. Across all datasets, we observe that ATC achieves superior performance (lower MAE is better). For GDE post T and pre T estimates match since TS doesn't alter the argmax prediction. Results reported by aggregating MAE numbers over 4 different seeds. Values in parenthesis (i.e., $(\\cdot)$ ) denote standard deviation values.\n", "images": [], "dimensions": {"dpi": 200, "height": 2200, "width": 1700}}, {"index": 29, "markdown": "| Dataset | Shift | IM | | AC | | DOC | | GDE | ATC-MC (Ours) | | ATC-NE (Ours) | | | :--: | :--: | :--: | :--: | :--: | :--: | :--: | :--: | :--: | :--: | :--: | :--: | :--: | | | | Pre T | Post T | Pre T | Post T | Pre T | Post T | Post T | Pre T | Post T | Pre T | Post T | | CIFAR10 | Natural | 7.14 | 6.20 | 10.25 | 7.06 | 7.68 | 6.35 | 5.74 | 4.02 | 3.85 | 3.76 | 3.38 | | | | (0.14) | (0.11) | (0.31) | (0.33) | (0.28) | (0.27) | (0.25) | (0.38) | (0.30) | (0.33) | (0.32) | | | Synthetic | 12.62 | 10.75 | 16.50 | 11.91 | 13.93 | 11.20 | 7.97 | 5.66 | 5.03 | 4.87 | 3.63 | | | | (0.76) | (0.71) | (0.28) | (0.24) | (0.29) | (0.28) | (0.13) | (0.64) | (0.71) | (0.71) | (0.62) | | CIFAR100 | Synthetic | 12.77 | 12.34 | 16.89 | 12.73 | 11.18 | 9.63 | 12.00 | 5.61 | 5.55 | 5.65 | 5.76 | | | | (0.43) | (0.68) | (0.20) | (2.59) | (0.35) | (1.25) | (0.48) | (0.51) | (0.55) | (0.35) | (0.27) | | ImageNet200 | Natural | 12.63 | 7.99 | 23.08 | 7.22 | 15.40 | 6.33 | 5.00 | 4.60 | 1.80 | 4.06 | 1.38 | | | | (0.59) | (0.47) | (0.31) | (0.22) | (0.42) | (0.24) | (0.36) | (0.63) | (0.17) | (0.69) | (0.29) | | | Synthetic | 20.17 | 11.74 | 33.69 | 9.51 | 25.49 | 8.61 | 4.19 | 5.37 | 2.78 | 4.53 | 3.58 | | | | (0.74) | (0.80) | (0.73) | (0.51) | (0.66) | (0.50) | (0.14) | (0.88) | (0.23) | (0.79) | (0.33) | | ImageNet | Natural | 8.09 | 6.42 | 21.66 | 5.91 | 8.53 | 5.21 | 5.90 | 3.93 | 1.89 | 2.45 | 0.73 | | | | (0.25) | (0.28) | (0.38) | (0.22) | (0.26) | (0.25) | (0.44) | (0.26) | (0.21) | (0.16) | (0.10) | | | Synthetic | 13.93 | 9.90 | 28.05 | 7.56 | 13.82 | 6.19 | 6.70 | 3.33 | 2.55 | 2.12 | 5.06 | | | | (0.14) | (0.23) | (0.39) | (0.13) | (0.31) | (0.07) | (0.52) | (0.25) | (0.25) | (0.31) | (0.27) | | FMoW-WILDS | Natural | 5.15 | 3.55 | 34.64 | 5.03 | 5.58 | 3.46 | 5.08 | 2.59 | 2.33 | 2.52 | 2.22 | | | | (0.19) | (0.41) | (0.22) | (0.29) | (0.17) | (0.37) | (0.46) | (0.32) | (0.28) | (0.25) | (0.30) | | RxRx1-WILDS | Natural | 6.17 | 6.11 | 21.05 | 5.21 | 6.54 | 6.27 | 6.82 | 5.30 | 5.20 | 5.19 | 5.63 | | | | (0.20) | (0.24) | (0.31) | (0.18) | (0.21) | (0.20) | (0.31) | (0.30) | (0.44) | (0.43) | (0.55) | | Entity-13 | Same | 18.32 | 14.38 | 27.79 | 13.56 | 20.50 | 13.22 | 16.09 | 9.35 | 7.50 | 7.80 | 6.94 | | | | (0.29) | (0.53) | (1.18) | (0.58) | (0.47) | (0.58) | (0.84) | (0.79) | (0.65) | (0.62) | (0.71) | | | Novel | 28.82 | 24.03 | 38.97 | 22.96 | 31.66 | 22.61 | 25.26 | 17.11 | 13.96 | 14.75 | 9.94 | | | | (0.30) | (0.55) | (1.32) | (0.59) | (0.54) | (0.58) | (1.08) | (0.93) | (0.64) | (0.78) | | | Entity-30 | Same | 16.91 | 14.61 | 26.84 | 14.37 | 18.60 | 13.11 | 13.74 | 8.54 | 7.94 | 7.77 | 8.04 | | | | (1.33) | (1.11) | (2.15) | (1.34) | (1.69) | (1.30) | (1.07) | (1.47) | (1.38) | (1.44) | (1.51) | | | Novel | 28.66 | 25.83 | 39.21 | 25.03 | 30.95 | 23.73 | 23.15 | 15.57 | 13.24 | 12.44 | 11.05 | | | | (1.16) | (0.88) | (2.03) | (1.11) | (1.64) | (1.11) | (0.51) | (1.44) | (1.15) | (1.26) | (1.13) | | NonLIVING-26 | Same | 17.43 | 15.95 | 27.70 | 15.40 | 18.06 | 14.58 | 16.99 | 10.79 | 10.13 | 10.05 | 10.29 | | | | (0.90) | (0.86) | (0.90) | (0.69) | (1.00) | (0.78) | (1.25) | (0.62) | (0.32) | (0.46) | (0.79) | | | Novel | 29.51 | 27.75 | 40.02 | 26.77 | 30.36 | 25.93 | 27.70 | 19.64 | 17.75 | 16.90 | 15.69 | | | | (0.86) | (0.82) | (0.76) | (0.82) | (0.95) | (0.80) | (1.42) | (0.68) | (0.53) | (0.60) | (0.83) | | LIVING-17 | Same | 14.28 | 12.21 | 23.46 | 11.16 | 15.22 | 10.78 | 10.49 | 4.92 | 4.23 | 4.19 | 4.73 | | | | (0.96) | (0.93) | (1.16) | (0.90) | (0.96) | (0.99) | (0.97) | (0.57) | (0.42) | (0.35) | (0.24) | | | Novel | 28.91 | 26.35 | 38.62 | 24.91 | 30.32 | 24.52 | 22.49 | 15.42 | 13.02 | 12.29 | 10.34 | | | | (0.66) | (0.73) | (1.01) | (0.61) | (0.59) | (0.74) | (0.85) | (0.59) | (0.53) | (0.73) | (0.62) |\nTable 4: Mean Absolute estimation Error (MAE) results for different datasets in our setup grouped by the nature of shift for ResNet model. 'Same' refers to same subpopulation shifts and 'Novel' refers novel subpopulation shifts. We include details about the target sets considered in each shift in Table 2. Post T denotes use of TS calibration on source. Across all datasets, we observe that ATC achieves superior performance (lower MAE is better). For GDE post T and pre T estimates match since TS doesn't alter the argmax prediction. Results reported by aggregating MAE numbers over 4 different seeds. Values in parenthesis (i.e., $(\\cdot)$ ) denote standard deviation values.\n", "images": [], "dimensions": {"dpi": 200, "height": 2200, "width": 1700}}], "model": "mistral-ocr-2503-completion", "usage_info": {"pages_processed": 29, "doc_size_bytes": null}} examplesVersion: 1.0.2 generatedTests: {} +releaseNotes: "## SDK Changes Detected:\n* `mistral_azure.chat.complete()`: \n * `request.messages.[].[assistant].content.[array]` **Changed** **Breaking** :warning:\n" diff --git a/packages/mistralai_azure/.speakeasy/gen.yaml b/packages/mistralai_azure/.speakeasy/gen.yaml index 045e6139..ff7c32f5 100644 --- a/packages/mistralai_azure/.speakeasy/gen.yaml +++ b/packages/mistralai_azure/.speakeasy/gen.yaml @@ -21,11 +21,14 @@ generation: generateNewTests: false skipResponseBodyAssertions: false python: - version: 1.6.0 + version: 1.7.0 additionalDependencies: dev: pytest: ^8.2.2 pytest-asyncio: ^0.23.7 + allowedRedefinedBuiltins: + - id + - object authors: - Mistral baseErrorName: MistralAzureError diff --git a/packages/mistralai_azure/RELEASES.md b/packages/mistralai_azure/RELEASES.md new file mode 100644 index 00000000..e471af0f --- /dev/null +++ b/packages/mistralai_azure/RELEASES.md @@ -0,0 +1,11 @@ + + +## 2026-01-12 15:30:32 +### Changes +Based on: +- OpenAPI Doc +- Speakeasy CLI 1.606.10 (2.687.13) https://github.com/speakeasy-api/speakeasy +### Generated +- [python v1.7.0] packages/mistralai_azure +### Releases +- [PyPI v1.7.0] https://pypi.org/project/mistralai_azure/1.7.0 - packages/mistralai_azure \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/chatcompletionrequest.md b/packages/mistralai_azure/docs/models/chatcompletionrequest.md index b0f05d37..be296b4b 100644 --- a/packages/mistralai_azure/docs/models/chatcompletionrequest.md +++ b/packages/mistralai_azure/docs/models/chatcompletionrequest.md @@ -3,23 +3,23 @@ ## Fields -| Field | Type | Required | Description | Example | -| ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `model` | *Optional[str]* | :heavy_minus_sign: | The ID of the model to use for this request. | azureai | -| `temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. | | -| `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | -| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | -| `stream` | *Optional[bool]* | :heavy_minus_sign: | Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. | | -| `stop` | [Optional[models.ChatCompletionRequestStop]](../models/chatcompletionrequeststop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | -| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | -| `messages` | List[[models.ChatCompletionRequestMessages](../models/chatcompletionrequestmessages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | -| `response_format` | [Optional[models.ResponseFormat]](../models/responseformat.md) | :heavy_minus_sign: | N/A | | -| `tools` | List[[models.Tool](../models/tool.md)] | :heavy_minus_sign: | N/A | | -| `tool_choice` | [Optional[models.ChatCompletionRequestToolChoice]](../models/chatcompletionrequesttoolchoice.md) | :heavy_minus_sign: | N/A | | -| `presence_penalty` | *Optional[float]* | :heavy_minus_sign: | presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. | | -| `frequency_penalty` | *Optional[float]* | :heavy_minus_sign: | frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. | | -| `n` | *OptionalNullable[int]* | :heavy_minus_sign: | Number of completions to return for each request, input tokens are only billed once. | | -| `prediction` | [Optional[models.Prediction]](../models/prediction.md) | :heavy_minus_sign: | N/A | | -| `parallel_tool_calls` | *Optional[bool]* | :heavy_minus_sign: | N/A | | -| `prompt_mode` | [OptionalNullable[models.MistralPromptMode]](../models/mistralpromptmode.md) | :heavy_minus_sign: | Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. | | -| `safe_prompt` | *Optional[bool]* | :heavy_minus_sign: | Whether to inject a safety prompt before all conversations. | | \ No newline at end of file +| Field | Type | Required | Description | Example | +| --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `model` | *Optional[str]* | :heavy_minus_sign: | The ID of the model to use for this request. | azureai | +| `temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. | | +| `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | +| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | +| `stream` | *Optional[bool]* | :heavy_minus_sign: | Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. | | +| `stop` | [Optional[models.ChatCompletionRequestStop]](../models/chatcompletionrequeststop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | +| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | +| `messages` | List[[models.ChatCompletionRequestMessages](../models/chatcompletionrequestmessages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | +| `response_format` | [Optional[models.ResponseFormat]](../models/responseformat.md) | :heavy_minus_sign: | Specify the format that the model must output. By default it will use `{ "type": "text" }`. Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ "type": "json_schema" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide. | {
"type": "text"
} | +| `tools` | List[[models.Tool](../models/tool.md)] | :heavy_minus_sign: | A list of tools the model may call. Use this to provide a list of functions the model may generate JSON inputs for. | | +| `tool_choice` | [Optional[models.ChatCompletionRequestToolChoice]](../models/chatcompletionrequesttoolchoice.md) | :heavy_minus_sign: | Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{"type": "function", "function": {"name": "my_function"}}` forces the model to call that tool. | | +| `presence_penalty` | *Optional[float]* | :heavy_minus_sign: | The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. | | +| `frequency_penalty` | *Optional[float]* | :heavy_minus_sign: | The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. | | +| `n` | *OptionalNullable[int]* | :heavy_minus_sign: | Number of completions to return for each request, input tokens are only billed once. | | +| `prediction` | [Optional[models.Prediction]](../models/prediction.md) | :heavy_minus_sign: | Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content. | | +| `parallel_tool_calls` | *Optional[bool]* | :heavy_minus_sign: | Whether to enable parallel function calling during tool use, when enabled the model can call multiple tools in parallel. | | +| `prompt_mode` | [OptionalNullable[models.MistralPromptMode]](../models/mistralpromptmode.md) | :heavy_minus_sign: | Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. | | +| `safe_prompt` | *Optional[bool]* | :heavy_minus_sign: | Whether to inject a safety prompt before all conversations. | | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/chatcompletionrequesttoolchoice.md b/packages/mistralai_azure/docs/models/chatcompletionrequesttoolchoice.md index 1646528d..dc82a8ef 100644 --- a/packages/mistralai_azure/docs/models/chatcompletionrequesttoolchoice.md +++ b/packages/mistralai_azure/docs/models/chatcompletionrequesttoolchoice.md @@ -1,5 +1,7 @@ # ChatCompletionRequestToolChoice +Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{"type": "function", "function": {"name": "my_function"}}` forces the model to call that tool. + ## Supported Types diff --git a/packages/mistralai_azure/docs/models/chatcompletionstreamrequest.md b/packages/mistralai_azure/docs/models/chatcompletionstreamrequest.md index 90397dec..03ad3291 100644 --- a/packages/mistralai_azure/docs/models/chatcompletionstreamrequest.md +++ b/packages/mistralai_azure/docs/models/chatcompletionstreamrequest.md @@ -3,23 +3,23 @@ ## Fields -| Field | Type | Required | Description | Example | -| ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `model` | *Optional[str]* | :heavy_minus_sign: | The ID of the model to use for this request. | azureai | -| `temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. | | -| `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | -| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | -| `stream` | *Optional[bool]* | :heavy_minus_sign: | N/A | | -| `stop` | [Optional[models.Stop]](../models/stop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | -| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | -| `messages` | List[[models.Messages](../models/messages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | -| `response_format` | [Optional[models.ResponseFormat]](../models/responseformat.md) | :heavy_minus_sign: | N/A | | -| `tools` | List[[models.Tool](../models/tool.md)] | :heavy_minus_sign: | N/A | | -| `tool_choice` | [Optional[models.ChatCompletionStreamRequestToolChoice]](../models/chatcompletionstreamrequesttoolchoice.md) | :heavy_minus_sign: | N/A | | -| `presence_penalty` | *Optional[float]* | :heavy_minus_sign: | presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. | | -| `frequency_penalty` | *Optional[float]* | :heavy_minus_sign: | frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. | | -| `n` | *OptionalNullable[int]* | :heavy_minus_sign: | Number of completions to return for each request, input tokens are only billed once. | | -| `prediction` | [Optional[models.Prediction]](../models/prediction.md) | :heavy_minus_sign: | N/A | | -| `parallel_tool_calls` | *Optional[bool]* | :heavy_minus_sign: | N/A | | -| `prompt_mode` | [OptionalNullable[models.MistralPromptMode]](../models/mistralpromptmode.md) | :heavy_minus_sign: | Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. | | -| `safe_prompt` | *Optional[bool]* | :heavy_minus_sign: | Whether to inject a safety prompt before all conversations. | | \ No newline at end of file +| Field | Type | Required | Description | Example | +| --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `model` | *Optional[str]* | :heavy_minus_sign: | The ID of the model to use for this request. | azureai | +| `temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. | | +| `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | +| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | +| `stream` | *Optional[bool]* | :heavy_minus_sign: | N/A | | +| `stop` | [Optional[models.Stop]](../models/stop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | +| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | +| `messages` | List[[models.Messages](../models/messages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | +| `response_format` | [Optional[models.ResponseFormat]](../models/responseformat.md) | :heavy_minus_sign: | Specify the format that the model must output. By default it will use `{ "type": "text" }`. Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ "type": "json_schema" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide. | {
"type": "text"
} | +| `tools` | List[[models.Tool](../models/tool.md)] | :heavy_minus_sign: | A list of tools the model may call. Use this to provide a list of functions the model may generate JSON inputs for. | | +| `tool_choice` | [Optional[models.ChatCompletionStreamRequestToolChoice]](../models/chatcompletionstreamrequesttoolchoice.md) | :heavy_minus_sign: | Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{"type": "function", "function": {"name": "my_function"}}` forces the model to call that tool. | | +| `presence_penalty` | *Optional[float]* | :heavy_minus_sign: | The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. | | +| `frequency_penalty` | *Optional[float]* | :heavy_minus_sign: | The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. | | +| `n` | *OptionalNullable[int]* | :heavy_minus_sign: | Number of completions to return for each request, input tokens are only billed once. | | +| `prediction` | [Optional[models.Prediction]](../models/prediction.md) | :heavy_minus_sign: | Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content. | | +| `parallel_tool_calls` | *Optional[bool]* | :heavy_minus_sign: | Whether to enable parallel function calling during tool use, when enabled the model can call multiple tools in parallel. | | +| `prompt_mode` | [OptionalNullable[models.MistralPromptMode]](../models/mistralpromptmode.md) | :heavy_minus_sign: | Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. | | +| `safe_prompt` | *Optional[bool]* | :heavy_minus_sign: | Whether to inject a safety prompt before all conversations. | | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/chatcompletionstreamrequesttoolchoice.md b/packages/mistralai_azure/docs/models/chatcompletionstreamrequesttoolchoice.md index cce0ca3e..43f3ca38 100644 --- a/packages/mistralai_azure/docs/models/chatcompletionstreamrequesttoolchoice.md +++ b/packages/mistralai_azure/docs/models/chatcompletionstreamrequesttoolchoice.md @@ -1,5 +1,7 @@ # ChatCompletionStreamRequestToolChoice +Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{"type": "function", "function": {"name": "my_function"}}` forces the model to call that tool. + ## Supported Types diff --git a/packages/mistralai_azure/docs/models/ocrrequest.md b/packages/mistralai_azure/docs/models/ocrrequest.md index 6a9c77ab..0ec824d2 100644 --- a/packages/mistralai_azure/docs/models/ocrrequest.md +++ b/packages/mistralai_azure/docs/models/ocrrequest.md @@ -3,14 +3,14 @@ ## Fields -| Field | Type | Required | Description | -| ---------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `model` | *Nullable[str]* | :heavy_check_mark: | N/A | -| `id` | *Optional[str]* | :heavy_minus_sign: | N/A | -| `document` | [models.Document](../models/document.md) | :heavy_check_mark: | Document to run OCR on | -| `pages` | List[*int*] | :heavy_minus_sign: | Specific pages user wants to process in various formats: single number, range, or list of both. Starts from 0 | -| `include_image_base64` | *OptionalNullable[bool]* | :heavy_minus_sign: | Include image URLs in response | -| `image_limit` | *OptionalNullable[int]* | :heavy_minus_sign: | Max images to extract | -| `image_min_size` | *OptionalNullable[int]* | :heavy_minus_sign: | Minimum height and width of image to extract | -| `bbox_annotation_format` | [OptionalNullable[models.ResponseFormat]](../models/responseformat.md) | :heavy_minus_sign: | Structured output class for extracting useful information from each extracted bounding box / image from document. Only json_schema is valid for this field | -| `document_annotation_format` | [OptionalNullable[models.ResponseFormat]](../models/responseformat.md) | :heavy_minus_sign: | Structured output class for extracting useful information from the entire document. Only json_schema is valid for this field | \ No newline at end of file +| Field | Type | Required | Description | Example | +| ---------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `model` | *Nullable[str]* | :heavy_check_mark: | N/A | | +| `id` | *Optional[str]* | :heavy_minus_sign: | N/A | | +| `document` | [models.Document](../models/document.md) | :heavy_check_mark: | Document to run OCR on | | +| `pages` | List[*int*] | :heavy_minus_sign: | Specific pages user wants to process in various formats: single number, range, or list of both. Starts from 0 | | +| `include_image_base64` | *OptionalNullable[bool]* | :heavy_minus_sign: | Include image URLs in response | | +| `image_limit` | *OptionalNullable[int]* | :heavy_minus_sign: | Max images to extract | | +| `image_min_size` | *OptionalNullable[int]* | :heavy_minus_sign: | Minimum height and width of image to extract | | +| `bbox_annotation_format` | [OptionalNullable[models.ResponseFormat]](../models/responseformat.md) | :heavy_minus_sign: | Structured output class for extracting useful information from each extracted bounding box / image from document. Only json_schema is valid for this field | {
"type": "text"
} | +| `document_annotation_format` | [OptionalNullable[models.ResponseFormat]](../models/responseformat.md) | :heavy_minus_sign: | Structured output class for extracting useful information from the entire document. Only json_schema is valid for this field | {
"type": "text"
} | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/prediction.md b/packages/mistralai_azure/docs/models/prediction.md index 86e9c396..fae3c1ca 100644 --- a/packages/mistralai_azure/docs/models/prediction.md +++ b/packages/mistralai_azure/docs/models/prediction.md @@ -1,5 +1,7 @@ # Prediction +Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content. + ## Fields diff --git a/packages/mistralai_azure/docs/models/responseformat.md b/packages/mistralai_azure/docs/models/responseformat.md index 23a1641b..5cab22f2 100644 --- a/packages/mistralai_azure/docs/models/responseformat.md +++ b/packages/mistralai_azure/docs/models/responseformat.md @@ -1,9 +1,11 @@ # ResponseFormat +Specify the format that the model must output. By default it will use `{ "type": "text" }`. Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ "type": "json_schema" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide. + ## Fields -| Field | Type | Required | Description | -| -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `type` | [Optional[models.ResponseFormats]](../models/responseformats.md) | :heavy_minus_sign: | An object specifying the format that the model must output. Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. | -| `json_schema` | [OptionalNullable[models.JSONSchema]](../models/jsonschema.md) | :heavy_minus_sign: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| ---------------------------------------------------------------- | ---------------------------------------------------------------- | ---------------------------------------------------------------- | ---------------------------------------------------------------- | +| `type` | [Optional[models.ResponseFormats]](../models/responseformats.md) | :heavy_minus_sign: | N/A | +| `json_schema` | [OptionalNullable[models.JSONSchema]](../models/jsonschema.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/responseformats.md b/packages/mistralai_azure/docs/models/responseformats.md index 06886afe..2f5f1e55 100644 --- a/packages/mistralai_azure/docs/models/responseformats.md +++ b/packages/mistralai_azure/docs/models/responseformats.md @@ -1,7 +1,5 @@ # ResponseFormats -An object specifying the format that the model must output. Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. - ## Values diff --git a/packages/mistralai_azure/docs/models/systemmessagecontent.md b/packages/mistralai_azure/docs/models/systemmessagecontent.md index e0d27d9f..0c87baf3 100644 --- a/packages/mistralai_azure/docs/models/systemmessagecontent.md +++ b/packages/mistralai_azure/docs/models/systemmessagecontent.md @@ -9,9 +9,9 @@ value: str = /* values here */ ``` -### `List[models.TextChunk]` +### `List[models.SystemMessageContentChunks]` ```python -value: List[models.TextChunk] = /* values here */ +value: List[models.SystemMessageContentChunks] = /* values here */ ``` diff --git a/packages/mistralai_azure/docs/models/systemmessagecontentchunks.md b/packages/mistralai_azure/docs/models/systemmessagecontentchunks.md new file mode 100644 index 00000000..40030c17 --- /dev/null +++ b/packages/mistralai_azure/docs/models/systemmessagecontentchunks.md @@ -0,0 +1,17 @@ +# SystemMessageContentChunks + + +## Supported Types + +### `models.TextChunk` + +```python +value: models.TextChunk = /* values here */ +``` + +### `models.ThinkChunk` + +```python +value: models.ThinkChunk = /* values here */ +``` + diff --git a/packages/mistralai_azure/docs/models/thinkchunk.md b/packages/mistralai_azure/docs/models/thinkchunk.md new file mode 100644 index 00000000..66b2e0cd --- /dev/null +++ b/packages/mistralai_azure/docs/models/thinkchunk.md @@ -0,0 +1,10 @@ +# ThinkChunk + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------------------------------------------------- | ------------------------------------------------------------------------------- | ------------------------------------------------------------------------------- | ------------------------------------------------------------------------------- | +| `thinking` | List[[models.Thinking](../models/thinking.md)] | :heavy_check_mark: | N/A | +| `closed` | *Optional[bool]* | :heavy_minus_sign: | Whether the thinking chunk is closed or not. Currently only used for prefixing. | +| `type` | [Optional[models.ThinkChunkType]](../models/thinkchunktype.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/thinkchunktype.md b/packages/mistralai_azure/docs/models/thinkchunktype.md new file mode 100644 index 00000000..baf6f755 --- /dev/null +++ b/packages/mistralai_azure/docs/models/thinkchunktype.md @@ -0,0 +1,8 @@ +# ThinkChunkType + + +## Values + +| Name | Value | +| ---------- | ---------- | +| `THINKING` | thinking | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/thinking.md b/packages/mistralai_azure/docs/models/thinking.md new file mode 100644 index 00000000..c7a0d5c9 --- /dev/null +++ b/packages/mistralai_azure/docs/models/thinking.md @@ -0,0 +1,17 @@ +# Thinking + + +## Supported Types + +### `models.ReferenceChunk` + +```python +value: models.ReferenceChunk = /* values here */ +``` + +### `models.TextChunk` + +```python +value: models.TextChunk = /* values here */ +``` + diff --git a/packages/mistralai_azure/pylintrc b/packages/mistralai_azure/pylintrc index 95f656e2..a8fcb932 100644 --- a/packages/mistralai_azure/pylintrc +++ b/packages/mistralai_azure/pylintrc @@ -89,7 +89,7 @@ persistent=yes # Minimum Python version to use for version dependent checks. Will default to # the version used to run pylint. -py-version=3.10 +py-version=3.9 # Discover python modules and packages in the file system subtree. recursive=no @@ -660,4 +660,4 @@ init-import=no # List of qualified module names which can have objects that can redefine # builtins. -redefining-builtins-modules=six.moves,past.builtins,future.builtins,builtins,io +redefining-builtins-modules=six.moves,past.builtins,future.builtins,builtins,io \ No newline at end of file diff --git a/packages/mistralai_azure/scripts/prepare_readme.py b/packages/mistralai_azure/scripts/prepare_readme.py new file mode 100644 index 00000000..ff1121fd --- /dev/null +++ b/packages/mistralai_azure/scripts/prepare_readme.py @@ -0,0 +1,38 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +import re +import shutil + +try: + with open("README.md", "r", encoding="utf-8") as rh: + readme_contents = rh.read() + GITHUB_URL = "https://github.com/mistralai/client-python.git" + GITHUB_URL = ( + GITHUB_URL[: -len(".git")] if GITHUB_URL.endswith(".git") else GITHUB_URL + ) + REPO_SUBDIR = "packages/mistralai_azure" + # Ensure the subdirectory has a trailing slash + if not REPO_SUBDIR.endswith("/"): + REPO_SUBDIR += "/" + # links on PyPI should have absolute URLs + readme_contents = re.sub( + r"(\[[^\]]+\]\()((?!https?:)[^\)]+)(\))", + lambda m: m.group(1) + + GITHUB_URL + + "/blob/master/" + + REPO_SUBDIR + + m.group(2) + + m.group(3), + readme_contents, + ) + + with open("README-PYPI.md", "w", encoding="utf-8") as wh: + wh.write(readme_contents) +except Exception as e: + try: + print("Failed to rewrite README.md to README-PYPI.md, copying original instead") + print(e) + shutil.copyfile("README.md", "README-PYPI.md") + except Exception as ie: + print("Failed to copy README.md to README-PYPI.md") + print(ie) diff --git a/packages/mistralai_azure/scripts/publish.sh b/packages/mistralai_azure/scripts/publish.sh index 0c07c589..c35748f3 100755 --- a/packages/mistralai_azure/scripts/publish.sh +++ b/packages/mistralai_azure/scripts/publish.sh @@ -1,6 +1,6 @@ #!/usr/bin/env bash -export UV_PUBLISH_TOKEN=${PYPI_TOKEN} +uv run python scripts/prepare_readme.py -uv run python ../../scripts/prepare_readme.py --repo-subdir packages/mistralai_azure -- uv build -uv publish +uv build +uv publish --token $PYPI_TOKEN diff --git a/packages/mistralai_azure/src/mistralai_azure/_version.py b/packages/mistralai_azure/src/mistralai_azure/_version.py index 5fd03467..30bc3ab9 100644 --- a/packages/mistralai_azure/src/mistralai_azure/_version.py +++ b/packages/mistralai_azure/src/mistralai_azure/_version.py @@ -3,10 +3,10 @@ import importlib.metadata __title__: str = "mistralai_azure" -__version__: str = "1.6.0" +__version__: str = "1.7.0" __openapi_doc_version__: str = "1.0.0" -__gen_version__: str = "2.634.2" -__user_agent__: str = "speakeasy-sdk/python 1.6.0 2.634.2 1.0.0 mistralai_azure" +__gen_version__: str = "2.687.13" +__user_agent__: str = "speakeasy-sdk/python 1.7.0 2.687.13 1.0.0 mistralai_azure" try: if __package__ is not None: diff --git a/packages/mistralai_azure/src/mistralai_azure/basesdk.py b/packages/mistralai_azure/src/mistralai_azure/basesdk.py index 84738ce8..0ac7e5a6 100644 --- a/packages/mistralai_azure/src/mistralai_azure/basesdk.py +++ b/packages/mistralai_azure/src/mistralai_azure/basesdk.py @@ -15,9 +15,19 @@ class BaseSDK: sdk_configuration: SDKConfiguration + parent_ref: Optional[object] = None + """ + Reference to the root SDK instance, if any. This will prevent it from + being garbage collected while there are active streams. + """ - def __init__(self, sdk_config: SDKConfiguration) -> None: + def __init__( + self, + sdk_config: SDKConfiguration, + parent_ref: Optional[object] = None, + ) -> None: self.sdk_configuration = sdk_config + self.parent_ref = parent_ref def _get_url(self, base_url, url_variables): sdk_url, sdk_variables = self.sdk_configuration.get_server_details() @@ -244,7 +254,7 @@ def do(): if http_res is None: logger.debug("Raising no response SDK error") - raise models.SDKError("No response received") + raise models.NoResponseError("No response received") logger.debug( "Response:\nStatus Code: %s\nURL: %s\nHeaders: %s\nBody: %s", @@ -265,7 +275,7 @@ def do(): http_res = result else: logger.debug("Raising unexpected SDK error") - raise models.SDKError("Unexpected error occurred") + raise models.SDKError("Unexpected error occurred", http_res) return http_res @@ -316,7 +326,7 @@ async def do(): if http_res is None: logger.debug("Raising no response SDK error") - raise models.SDKError("No response received") + raise models.NoResponseError("No response received") logger.debug( "Response:\nStatus Code: %s\nURL: %s\nHeaders: %s\nBody: %s", @@ -337,7 +347,7 @@ async def do(): http_res = result else: logger.debug("Raising unexpected SDK error") - raise models.SDKError("Unexpected error occurred") + raise models.SDKError("Unexpected error occurred", http_res) return http_res diff --git a/packages/mistralai_azure/src/mistralai_azure/chat.py b/packages/mistralai_azure/src/mistralai_azure/chat.py index 20184014..a3ef1a63 100644 --- a/packages/mistralai_azure/src/mistralai_azure/chat.py +++ b/packages/mistralai_azure/src/mistralai_azure/chat.py @@ -5,6 +5,7 @@ from mistralai_azure._hooks import HookContext from mistralai_azure.types import OptionalNullable, UNSET from mistralai_azure.utils import eventstreaming +from mistralai_azure.utils.unmarshal_json_response import unmarshal_json_response from typing import Any, List, Mapping, Optional, Union @@ -60,14 +61,14 @@ def stream( :param stream: :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. - :param response_format: - :param tools: - :param tool_choice: - :param presence_penalty: presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. - :param frequency_penalty: frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. + :param response_format: Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide. + :param tools: A list of tools the model may call. Use this to provide a list of functions the model may generate JSON inputs for. + :param tool_choice: Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool. + :param presence_penalty: The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. + :param frequency_penalty: The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. :param n: Number of completions to return for each request, input tokens are only billed once. - :param prediction: - :param parallel_tool_calls: + :param prediction: Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content. + :param parallel_tool_calls: Whether to enable parallel function calling during tool use, when enabled the model can call multiple tools in parallel. :param prompt_mode: Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. :param safe_prompt: Whether to inject a safety prompt before all conversations. :param retries: Override the default retry configuration for this method @@ -159,32 +160,23 @@ def stream( http_res, lambda raw: utils.unmarshal_json(raw, models.CompletionEvent), sentinel="[DONE]", + client_ref=self, ) if utils.match_response(http_res, "422", "application/json"): http_res_text = utils.stream_to_text(http_res) - response_data = utils.unmarshal_json( - http_res_text, models.HTTPValidationErrorData + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res, http_res_text ) - raise models.HTTPValidationError(data=response_data) + raise models.HTTPValidationError(response_data, http_res, http_res_text) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) - content_type = http_res.headers.get("Content-Type") http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise models.SDKError("Unexpected response received", http_res, http_res_text) async def stream_async( self, @@ -235,14 +227,14 @@ async def stream_async( :param stream: :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. - :param response_format: - :param tools: - :param tool_choice: - :param presence_penalty: presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. - :param frequency_penalty: frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. + :param response_format: Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide. + :param tools: A list of tools the model may call. Use this to provide a list of functions the model may generate JSON inputs for. + :param tool_choice: Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool. + :param presence_penalty: The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. + :param frequency_penalty: The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. :param n: Number of completions to return for each request, input tokens are only billed once. - :param prediction: - :param parallel_tool_calls: + :param prediction: Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content. + :param parallel_tool_calls: Whether to enable parallel function calling during tool use, when enabled the model can call multiple tools in parallel. :param prompt_mode: Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. :param safe_prompt: Whether to inject a safety prompt before all conversations. :param retries: Override the default retry configuration for this method @@ -334,32 +326,23 @@ async def stream_async( http_res, lambda raw: utils.unmarshal_json(raw, models.CompletionEvent), sentinel="[DONE]", + client_ref=self, ) if utils.match_response(http_res, "422", "application/json"): http_res_text = await utils.stream_to_text_async(http_res) - response_data = utils.unmarshal_json( - http_res_text, models.HTTPValidationErrorData + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res, http_res_text ) - raise models.HTTPValidationError(data=response_data) + raise models.HTTPValidationError(response_data, http_res, http_res_text) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) - content_type = http_res.headers.get("Content-Type") http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise models.SDKError("Unexpected response received", http_res, http_res_text) def complete( self, @@ -416,14 +399,14 @@ def complete( :param stream: Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. - :param response_format: - :param tools: - :param tool_choice: - :param presence_penalty: presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. - :param frequency_penalty: frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. + :param response_format: Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide. + :param tools: A list of tools the model may call. Use this to provide a list of functions the model may generate JSON inputs for. + :param tool_choice: Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool. + :param presence_penalty: The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. + :param frequency_penalty: The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. :param n: Number of completions to return for each request, input tokens are only billed once. - :param prediction: - :param parallel_tool_calls: + :param prediction: Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content. + :param parallel_tool_calls: Whether to enable parallel function calling during tool use, when enabled the model can call multiple tools in parallel. :param prompt_mode: Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. :param safe_prompt: Whether to inject a safety prompt before all conversations. :param retries: Override the default retry configuration for this method @@ -512,33 +495,22 @@ def complete( response_data: Any = None if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json( - http_res.text, Optional[models.ChatCompletionResponse] + return unmarshal_json_response( + Optional[models.ChatCompletionResponse], http_res ) if utils.match_response(http_res, "422", "application/json"): - response_data = utils.unmarshal_json( - http_res.text, models.HTTPValidationErrorData + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(data=response_data) + raise models.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) - content_type = http_res.headers.get("Content-Type") - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise models.SDKError("Unexpected response received", http_res) async def complete_async( self, @@ -595,14 +567,14 @@ async def complete_async( :param stream: Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. - :param response_format: - :param tools: - :param tool_choice: - :param presence_penalty: presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. - :param frequency_penalty: frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. + :param response_format: Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide. + :param tools: A list of tools the model may call. Use this to provide a list of functions the model may generate JSON inputs for. + :param tool_choice: Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool. + :param presence_penalty: The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. + :param frequency_penalty: The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. :param n: Number of completions to return for each request, input tokens are only billed once. - :param prediction: - :param parallel_tool_calls: + :param prediction: Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content. + :param parallel_tool_calls: Whether to enable parallel function calling during tool use, when enabled the model can call multiple tools in parallel. :param prompt_mode: Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. :param safe_prompt: Whether to inject a safety prompt before all conversations. :param retries: Override the default retry configuration for this method @@ -691,30 +663,19 @@ async def complete_async( response_data: Any = None if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json( - http_res.text, Optional[models.ChatCompletionResponse] + return unmarshal_json_response( + Optional[models.ChatCompletionResponse], http_res ) if utils.match_response(http_res, "422", "application/json"): - response_data = utils.unmarshal_json( - http_res.text, models.HTTPValidationErrorData + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(data=response_data) + raise models.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) - content_type = http_res.headers.get("Content-Type") - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise models.SDKError("Unexpected response received", http_res) diff --git a/packages/mistralai_azure/src/mistralai_azure/models/__init__.py b/packages/mistralai_azure/src/mistralai_azure/models/__init__.py index bc1a3f4f..140eec88 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/__init__.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/__init__.py @@ -1,7 +1,10 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +from .mistralazureerror import MistralAzureError from typing import TYPE_CHECKING from importlib import import_module +import builtins +import sys if TYPE_CHECKING: from .assistantmessage import ( @@ -79,6 +82,7 @@ ) from .jsonschema import JSONSchema, JSONSchemaTypedDict from .mistralpromptmode import MistralPromptMode + from .no_response_error import NoResponseError from .ocrimageobject import OCRImageObject, OCRImageObjectTypedDict from .ocrpagedimensions import OCRPageDimensions, OCRPageDimensionsTypedDict from .ocrpageobject import OCRPageObject, OCRPageObjectTypedDict @@ -93,6 +97,7 @@ ) from .responseformat import ResponseFormat, ResponseFormatTypedDict from .responseformats import ResponseFormats + from .responsevalidationerror import ResponseValidationError from .sdkerror import SDKError from .security import Security, SecurityTypedDict from .systemmessage import ( @@ -102,7 +107,18 @@ SystemMessageContentTypedDict, SystemMessageTypedDict, ) + from .systemmessagecontentchunks import ( + SystemMessageContentChunks, + SystemMessageContentChunksTypedDict, + ) from .textchunk import TextChunk, TextChunkTypedDict, Type + from .thinkchunk import ( + ThinkChunk, + ThinkChunkType, + ThinkChunkTypedDict, + Thinking, + ThinkingTypedDict, + ) from .tool import Tool, ToolTypedDict from .toolcall import ToolCall, ToolCallTypedDict from .toolchoice import ToolChoice, ToolChoiceTypedDict @@ -196,7 +212,9 @@ "LocTypedDict", "Messages", "MessagesTypedDict", + "MistralAzureError", "MistralPromptMode", + "NoResponseError", "OCRImageObject", "OCRImageObjectTypedDict", "OCRPageDimensions", @@ -217,6 +235,7 @@ "ResponseFormat", "ResponseFormatTypedDict", "ResponseFormats", + "ResponseValidationError", "Role", "SDKError", "Security", @@ -225,10 +244,17 @@ "StopTypedDict", "SystemMessage", "SystemMessageContent", + "SystemMessageContentChunks", + "SystemMessageContentChunksTypedDict", "SystemMessageContentTypedDict", "SystemMessageTypedDict", "TextChunk", "TextChunkTypedDict", + "ThinkChunk", + "ThinkChunkType", + "ThinkChunkTypedDict", + "Thinking", + "ThinkingTypedDict", "Tool", "ToolCall", "ToolCallTypedDict", @@ -319,6 +345,7 @@ "JSONSchema": ".jsonschema", "JSONSchemaTypedDict": ".jsonschema", "MistralPromptMode": ".mistralpromptmode", + "NoResponseError": ".no_response_error", "OCRImageObject": ".ocrimageobject", "OCRImageObjectTypedDict": ".ocrimageobject", "OCRPageDimensions": ".ocrpagedimensions", @@ -341,6 +368,7 @@ "ResponseFormat": ".responseformat", "ResponseFormatTypedDict": ".responseformat", "ResponseFormats": ".responseformats", + "ResponseValidationError": ".responsevalidationerror", "SDKError": ".sdkerror", "Security": ".security", "SecurityTypedDict": ".security", @@ -349,9 +377,16 @@ "SystemMessageContent": ".systemmessage", "SystemMessageContentTypedDict": ".systemmessage", "SystemMessageTypedDict": ".systemmessage", + "SystemMessageContentChunks": ".systemmessagecontentchunks", + "SystemMessageContentChunksTypedDict": ".systemmessagecontentchunks", "TextChunk": ".textchunk", "TextChunkTypedDict": ".textchunk", "Type": ".textchunk", + "ThinkChunk": ".thinkchunk", + "ThinkChunkType": ".thinkchunk", + "ThinkChunkTypedDict": ".thinkchunk", + "Thinking": ".thinkchunk", + "ThinkingTypedDict": ".thinkchunk", "Tool": ".tool", "ToolTypedDict": ".tool", "ToolCall": ".toolcall", @@ -379,6 +414,18 @@ } +def dynamic_import(modname, retries=3): + for attempt in range(retries): + try: + return import_module(modname, __package__) + except KeyError: + # Clear any half-initialized module and retry + sys.modules.pop(modname, None) + if attempt == retries - 1: + break + raise KeyError(f"Failed to import module '{modname}' after {retries} attempts") + + def __getattr__(attr_name: str) -> object: module_name = _dynamic_imports.get(attr_name) if module_name is None: @@ -387,7 +434,7 @@ def __getattr__(attr_name: str) -> object: ) try: - module = import_module(module_name, __package__) + module = dynamic_import(module_name) result = getattr(module, attr_name) return result except ImportError as e: @@ -401,5 +448,5 @@ def __getattr__(attr_name: str) -> object: def __dir__(): - lazy_attrs = list(_dynamic_imports.keys()) - return sorted(lazy_attrs) + lazy_attrs = builtins.list(_dynamic_imports.keys()) + return builtins.sorted(lazy_attrs) diff --git a/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionrequest.py b/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionrequest.py index 8dffe1bd..ecb33b81 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionrequest.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionrequest.py @@ -63,11 +63,13 @@ "ChatCompletionRequestToolChoiceTypedDict", Union[ToolChoiceTypedDict, ToolChoiceEnum], ) +r"""Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool.""" ChatCompletionRequestToolChoice = TypeAliasType( "ChatCompletionRequestToolChoice", Union[ToolChoice, ToolChoiceEnum] ) +r"""Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool.""" class ChatCompletionRequestTypedDict(TypedDict): @@ -88,16 +90,21 @@ class ChatCompletionRequestTypedDict(TypedDict): random_seed: NotRequired[Nullable[int]] r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" response_format: NotRequired[ResponseFormatTypedDict] + r"""Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide.""" tools: NotRequired[Nullable[List[ToolTypedDict]]] + r"""A list of tools the model may call. Use this to provide a list of functions the model may generate JSON inputs for.""" tool_choice: NotRequired[ChatCompletionRequestToolChoiceTypedDict] + r"""Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool.""" presence_penalty: NotRequired[float] - r"""presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative.""" + r"""The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative.""" frequency_penalty: NotRequired[float] - r"""frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.""" + r"""The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.""" n: NotRequired[Nullable[int]] r"""Number of completions to return for each request, input tokens are only billed once.""" prediction: NotRequired[PredictionTypedDict] + r"""Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content.""" parallel_tool_calls: NotRequired[bool] + r"""Whether to enable parallel function calling during tool use, when enabled the model can call multiple tools in parallel.""" prompt_mode: NotRequired[Nullable[MistralPromptMode]] r"""Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used.""" safe_prompt: NotRequired[bool] @@ -130,23 +137,28 @@ class ChatCompletionRequest(BaseModel): r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" response_format: Optional[ResponseFormat] = None + r"""Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide.""" tools: OptionalNullable[List[Tool]] = UNSET + r"""A list of tools the model may call. Use this to provide a list of functions the model may generate JSON inputs for.""" tool_choice: Optional[ChatCompletionRequestToolChoice] = None + r"""Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool.""" presence_penalty: Optional[float] = None - r"""presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative.""" + r"""The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative.""" frequency_penalty: Optional[float] = None - r"""frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.""" + r"""The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.""" n: OptionalNullable[int] = UNSET r"""Number of completions to return for each request, input tokens are only billed once.""" prediction: Optional[Prediction] = None + r"""Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content.""" parallel_tool_calls: Optional[bool] = None + r"""Whether to enable parallel function calling during tool use, when enabled the model can call multiple tools in parallel.""" prompt_mode: Annotated[ OptionalNullable[MistralPromptMode], PlainValidator(validate_open_enum(False)) diff --git a/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionstreamrequest.py b/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionstreamrequest.py index 5fced93e..d13faa08 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionstreamrequest.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionstreamrequest.py @@ -59,11 +59,13 @@ "ChatCompletionStreamRequestToolChoiceTypedDict", Union[ToolChoiceTypedDict, ToolChoiceEnum], ) +r"""Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool.""" ChatCompletionStreamRequestToolChoice = TypeAliasType( "ChatCompletionStreamRequestToolChoice", Union[ToolChoice, ToolChoiceEnum] ) +r"""Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool.""" class ChatCompletionStreamRequestTypedDict(TypedDict): @@ -83,16 +85,21 @@ class ChatCompletionStreamRequestTypedDict(TypedDict): random_seed: NotRequired[Nullable[int]] r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" response_format: NotRequired[ResponseFormatTypedDict] + r"""Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide.""" tools: NotRequired[Nullable[List[ToolTypedDict]]] + r"""A list of tools the model may call. Use this to provide a list of functions the model may generate JSON inputs for.""" tool_choice: NotRequired[ChatCompletionStreamRequestToolChoiceTypedDict] + r"""Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool.""" presence_penalty: NotRequired[float] - r"""presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative.""" + r"""The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative.""" frequency_penalty: NotRequired[float] - r"""frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.""" + r"""The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.""" n: NotRequired[Nullable[int]] r"""Number of completions to return for each request, input tokens are only billed once.""" prediction: NotRequired[PredictionTypedDict] + r"""Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content.""" parallel_tool_calls: NotRequired[bool] + r"""Whether to enable parallel function calling during tool use, when enabled the model can call multiple tools in parallel.""" prompt_mode: NotRequired[Nullable[MistralPromptMode]] r"""Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used.""" safe_prompt: NotRequired[bool] @@ -124,23 +131,28 @@ class ChatCompletionStreamRequest(BaseModel): r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" response_format: Optional[ResponseFormat] = None + r"""Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide.""" tools: OptionalNullable[List[Tool]] = UNSET + r"""A list of tools the model may call. Use this to provide a list of functions the model may generate JSON inputs for.""" tool_choice: Optional[ChatCompletionStreamRequestToolChoice] = None + r"""Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool.""" presence_penalty: Optional[float] = None - r"""presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative.""" + r"""The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative.""" frequency_penalty: Optional[float] = None - r"""frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.""" + r"""The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.""" n: OptionalNullable[int] = UNSET r"""Number of completions to return for each request, input tokens are only billed once.""" prediction: Optional[Prediction] = None + r"""Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content.""" parallel_tool_calls: Optional[bool] = None + r"""Whether to enable parallel function calling during tool use, when enabled the model can call multiple tools in parallel.""" prompt_mode: Annotated[ OptionalNullable[MistralPromptMode], PlainValidator(validate_open_enum(False)) diff --git a/packages/mistralai_azure/src/mistralai_azure/models/httpvalidationerror.py b/packages/mistralai_azure/src/mistralai_azure/models/httpvalidationerror.py index 1d22d97a..92eac6a1 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/httpvalidationerror.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/httpvalidationerror.py @@ -2,7 +2,8 @@ from __future__ import annotations from .validationerror import ValidationError -from mistralai_azure import utils +import httpx +from mistralai_azure.models import MistralAzureError from mistralai_azure.types import BaseModel from typing import List, Optional @@ -11,11 +12,15 @@ class HTTPValidationErrorData(BaseModel): detail: Optional[List[ValidationError]] = None -class HTTPValidationError(Exception): +class HTTPValidationError(MistralAzureError): data: HTTPValidationErrorData - def __init__(self, data: HTTPValidationErrorData): + def __init__( + self, + data: HTTPValidationErrorData, + raw_response: httpx.Response, + body: Optional[str] = None, + ): + message = body or raw_response.text + super().__init__(message, raw_response, body) self.data = data - - def __str__(self) -> str: - return utils.marshal_json(self.data, HTTPValidationErrorData) diff --git a/packages/mistralai_azure/src/mistralai_azure/models/mistralazureerror.py b/packages/mistralai_azure/src/mistralai_azure/models/mistralazureerror.py new file mode 100644 index 00000000..9e45af0e --- /dev/null +++ b/packages/mistralai_azure/src/mistralai_azure/models/mistralazureerror.py @@ -0,0 +1,26 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +import httpx +from typing import Optional + + +class MistralAzureError(Exception): + """The base class for all HTTP error responses.""" + + message: str + status_code: int + body: str + headers: httpx.Headers + raw_response: httpx.Response + + def __init__( + self, message: str, raw_response: httpx.Response, body: Optional[str] = None + ): + self.message = message + self.status_code = raw_response.status_code + self.body = body if body is not None else raw_response.text + self.headers = raw_response.headers + self.raw_response = raw_response + + def __str__(self): + return self.message diff --git a/packages/mistralai_azure/src/mistralai_azure/models/no_response_error.py b/packages/mistralai_azure/src/mistralai_azure/models/no_response_error.py new file mode 100644 index 00000000..f98beea2 --- /dev/null +++ b/packages/mistralai_azure/src/mistralai_azure/models/no_response_error.py @@ -0,0 +1,13 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +class NoResponseError(Exception): + """Error raised when no HTTP response is received from the server.""" + + message: str + + def __init__(self, message: str = "No response received"): + self.message = message + super().__init__(message) + + def __str__(self): + return self.message diff --git a/packages/mistralai_azure/src/mistralai_azure/models/prediction.py b/packages/mistralai_azure/src/mistralai_azure/models/prediction.py index 888337d3..b23a935c 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/prediction.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/prediction.py @@ -10,11 +10,15 @@ class PredictionTypedDict(TypedDict): + r"""Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content.""" + type: Literal["content"] content: NotRequired[str] class Prediction(BaseModel): + r"""Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content.""" + TYPE: Annotated[ Annotated[ Optional[Literal["content"]], AfterValidator(validate_const("content")) diff --git a/packages/mistralai_azure/src/mistralai_azure/models/responseformat.py b/packages/mistralai_azure/src/mistralai_azure/models/responseformat.py index 6d09de5b..c989f3a4 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/responseformat.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/responseformat.py @@ -16,14 +16,16 @@ class ResponseFormatTypedDict(TypedDict): + r"""Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide.""" + type: NotRequired[ResponseFormats] - r"""An object specifying the format that the model must output. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message.""" json_schema: NotRequired[Nullable[JSONSchemaTypedDict]] class ResponseFormat(BaseModel): + r"""Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide.""" + type: Optional[ResponseFormats] = None - r"""An object specifying the format that the model must output. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message.""" json_schema: OptionalNullable[JSONSchema] = UNSET diff --git a/packages/mistralai_azure/src/mistralai_azure/models/responseformats.py b/packages/mistralai_azure/src/mistralai_azure/models/responseformats.py index 08c39951..258fe70e 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/responseformats.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/responseformats.py @@ -5,4 +5,3 @@ ResponseFormats = Literal["text", "json_object", "json_schema"] -r"""An object specifying the format that the model must output. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message.""" diff --git a/packages/mistralai_azure/src/mistralai_azure/models/responsevalidationerror.py b/packages/mistralai_azure/src/mistralai_azure/models/responsevalidationerror.py new file mode 100644 index 00000000..56573dfa --- /dev/null +++ b/packages/mistralai_azure/src/mistralai_azure/models/responsevalidationerror.py @@ -0,0 +1,25 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +import httpx +from typing import Optional + +from mistralai_azure.models import MistralAzureError + + +class ResponseValidationError(MistralAzureError): + """Error raised when there is a type mismatch between the response data and the expected Pydantic model.""" + + def __init__( + self, + message: str, + raw_response: httpx.Response, + cause: Exception, + body: Optional[str] = None, + ): + message = f"{message}: {cause}" + super().__init__(message, raw_response, body) + + @property + def cause(self): + """Normally the Pydantic ValidationError""" + return self.__cause__ diff --git a/packages/mistralai_azure/src/mistralai_azure/models/sdkerror.py b/packages/mistralai_azure/src/mistralai_azure/models/sdkerror.py index 03216cbf..e841ab3a 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/sdkerror.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/sdkerror.py @@ -1,22 +1,38 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -from dataclasses import dataclass -from typing import Optional import httpx +from typing import Optional + +from mistralai_azure.models import MistralAzureError + +MAX_MESSAGE_LEN = 10_000 + + +class SDKError(MistralAzureError): + """The fallback error class if no more specific error class is matched.""" + + def __init__( + self, message: str, raw_response: httpx.Response, body: Optional[str] = None + ): + body_display = body or raw_response.text or '""' + if message: + message += ": " + message += f"Status {raw_response.status_code}" -@dataclass -class SDKError(Exception): - """Represents an error returned by the API.""" + headers = raw_response.headers + content_type = headers.get("content-type", '""') + if content_type != "application/json": + if " " in content_type: + content_type = f'"{content_type}"' + message += f" Content-Type {content_type}" - message: str - status_code: int = -1 - body: str = "" - raw_response: Optional[httpx.Response] = None + if len(body_display) > MAX_MESSAGE_LEN: + truncated = body_display[:MAX_MESSAGE_LEN] + remaining = len(body_display) - MAX_MESSAGE_LEN + body_display = f"{truncated}...and {remaining} more chars" - def __str__(self): - body = "" - if len(self.body) > 0: - body = f"\n{self.body}" + message += f". Body: {body_display}" + message = message.strip() - return f"{self.message}: Status {self.status_code}{body}" + super().__init__(message, raw_response, body) diff --git a/packages/mistralai_azure/src/mistralai_azure/models/systemmessage.py b/packages/mistralai_azure/src/mistralai_azure/models/systemmessage.py index b7d975b6..d91a9058 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/systemmessage.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/systemmessage.py @@ -1,19 +1,23 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from __future__ import annotations -from .textchunk import TextChunk, TextChunkTypedDict +from .systemmessagecontentchunks import ( + SystemMessageContentChunks, + SystemMessageContentChunksTypedDict, +) from mistralai_azure.types import BaseModel from typing import List, Literal, Optional, Union from typing_extensions import NotRequired, TypeAliasType, TypedDict SystemMessageContentTypedDict = TypeAliasType( - "SystemMessageContentTypedDict", Union[str, List[TextChunkTypedDict]] + "SystemMessageContentTypedDict", + Union[str, List[SystemMessageContentChunksTypedDict]], ) SystemMessageContent = TypeAliasType( - "SystemMessageContent", Union[str, List[TextChunk]] + "SystemMessageContent", Union[str, List[SystemMessageContentChunks]] ) diff --git a/packages/mistralai_azure/src/mistralai_azure/models/systemmessagecontentchunks.py b/packages/mistralai_azure/src/mistralai_azure/models/systemmessagecontentchunks.py new file mode 100644 index 00000000..4615a16c --- /dev/null +++ b/packages/mistralai_azure/src/mistralai_azure/models/systemmessagecontentchunks.py @@ -0,0 +1,21 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .textchunk import TextChunk, TextChunkTypedDict +from .thinkchunk import ThinkChunk, ThinkChunkTypedDict +from mistralai_azure.utils import get_discriminator +from pydantic import Discriminator, Tag +from typing import Union +from typing_extensions import Annotated, TypeAliasType + + +SystemMessageContentChunksTypedDict = TypeAliasType( + "SystemMessageContentChunksTypedDict", + Union[TextChunkTypedDict, ThinkChunkTypedDict], +) + + +SystemMessageContentChunks = Annotated[ + Union[Annotated[TextChunk, Tag("text")], Annotated[ThinkChunk, Tag("thinking")]], + Discriminator(lambda m: get_discriminator(m, "type", "type")), +] diff --git a/packages/mistralai_azure/src/mistralai_azure/models/thinkchunk.py b/packages/mistralai_azure/src/mistralai_azure/models/thinkchunk.py new file mode 100644 index 00000000..8ff257f4 --- /dev/null +++ b/packages/mistralai_azure/src/mistralai_azure/models/thinkchunk.py @@ -0,0 +1,35 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .referencechunk import ReferenceChunk, ReferenceChunkTypedDict +from .textchunk import TextChunk, TextChunkTypedDict +from mistralai_azure.types import BaseModel +from typing import List, Literal, Optional, Union +from typing_extensions import NotRequired, TypeAliasType, TypedDict + + +ThinkingTypedDict = TypeAliasType( + "ThinkingTypedDict", Union[ReferenceChunkTypedDict, TextChunkTypedDict] +) + + +Thinking = TypeAliasType("Thinking", Union[ReferenceChunk, TextChunk]) + + +ThinkChunkType = Literal["thinking"] + + +class ThinkChunkTypedDict(TypedDict): + thinking: List[ThinkingTypedDict] + closed: NotRequired[bool] + r"""Whether the thinking chunk is closed or not. Currently only used for prefixing.""" + type: NotRequired[ThinkChunkType] + + +class ThinkChunk(BaseModel): + thinking: List[Thinking] + + closed: Optional[bool] = None + r"""Whether the thinking chunk is closed or not. Currently only used for prefixing.""" + + type: Optional[ThinkChunkType] = "thinking" diff --git a/packages/mistralai_azure/src/mistralai_azure/ocr.py b/packages/mistralai_azure/src/mistralai_azure/ocr.py index 71fe0337..c15a3da7 100644 --- a/packages/mistralai_azure/src/mistralai_azure/ocr.py +++ b/packages/mistralai_azure/src/mistralai_azure/ocr.py @@ -4,6 +4,7 @@ from mistralai_azure import models, utils from mistralai_azure._hooks import HookContext from mistralai_azure.types import Nullable, OptionalNullable, UNSET +from mistralai_azure.utils.unmarshal_json_response import unmarshal_json_response from typing import Any, List, Mapping, Optional, Union @@ -113,31 +114,20 @@ def process( response_data: Any = None if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, Optional[models.OCRResponse]) + return unmarshal_json_response(Optional[models.OCRResponse], http_res) if utils.match_response(http_res, "422", "application/json"): - response_data = utils.unmarshal_json( - http_res.text, models.HTTPValidationErrorData + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(data=response_data) + raise models.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) - content_type = http_res.headers.get("Content-Type") - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise models.SDKError("Unexpected response received", http_res) async def process_async( self, @@ -244,28 +234,17 @@ async def process_async( response_data: Any = None if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json(http_res.text, Optional[models.OCRResponse]) + return unmarshal_json_response(Optional[models.OCRResponse], http_res) if utils.match_response(http_res, "422", "application/json"): - response_data = utils.unmarshal_json( - http_res.text, models.HTTPValidationErrorData + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(data=response_data) + raise models.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) - content_type = http_res.headers.get("Content-Type") - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise models.SDKError("Unexpected response received", http_res) diff --git a/packages/mistralai_azure/src/mistralai_azure/utils/__init__.py b/packages/mistralai_azure/src/mistralai_azure/utils/__init__.py index dd4aa4b3..56164cf3 100644 --- a/packages/mistralai_azure/src/mistralai_azure/utils/__init__.py +++ b/packages/mistralai_azure/src/mistralai_azure/utils/__init__.py @@ -2,6 +2,8 @@ from typing import TYPE_CHECKING from importlib import import_module +import builtins +import sys if TYPE_CHECKING: from .annotations import get_discriminator @@ -158,6 +160,18 @@ } +def dynamic_import(modname, retries=3): + for attempt in range(retries): + try: + return import_module(modname, __package__) + except KeyError: + # Clear any half-initialized module and retry + sys.modules.pop(modname, None) + if attempt == retries - 1: + break + raise KeyError(f"Failed to import module '{modname}' after {retries} attempts") + + def __getattr__(attr_name: str) -> object: module_name = _dynamic_imports.get(attr_name) if module_name is None: @@ -166,9 +180,8 @@ def __getattr__(attr_name: str) -> object: ) try: - module = import_module(module_name, __package__) - result = getattr(module, attr_name) - return result + module = dynamic_import(module_name) + return getattr(module, attr_name) except ImportError as e: raise ImportError( f"Failed to import {attr_name} from {module_name}: {e}" @@ -180,5 +193,5 @@ def __getattr__(attr_name: str) -> object: def __dir__(): - lazy_attrs = list(_dynamic_imports.keys()) - return sorted(lazy_attrs) + lazy_attrs = builtins.list(_dynamic_imports.keys()) + return builtins.sorted(lazy_attrs) diff --git a/packages/mistralai_azure/src/mistralai_azure/utils/eventstreaming.py b/packages/mistralai_azure/src/mistralai_azure/utils/eventstreaming.py index 74a63f75..0969899b 100644 --- a/packages/mistralai_azure/src/mistralai_azure/utils/eventstreaming.py +++ b/packages/mistralai_azure/src/mistralai_azure/utils/eventstreaming.py @@ -17,6 +17,9 @@ class EventStream(Generic[T]): + # Holds a reference to the SDK client to avoid it being garbage collected + # and cause termination of the underlying httpx client. + client_ref: Optional[object] response: httpx.Response generator: Generator[T, None, None] @@ -25,9 +28,11 @@ def __init__( response: httpx.Response, decoder: Callable[[str], T], sentinel: Optional[str] = None, + client_ref: Optional[object] = None, ): self.response = response self.generator = stream_events(response, decoder, sentinel) + self.client_ref = client_ref def __iter__(self): return self @@ -43,6 +48,9 @@ def __exit__(self, exc_type, exc_val, exc_tb): class EventStreamAsync(Generic[T]): + # Holds a reference to the SDK client to avoid it being garbage collected + # and cause termination of the underlying httpx client. + client_ref: Optional[object] response: httpx.Response generator: AsyncGenerator[T, None] @@ -51,9 +59,11 @@ def __init__( response: httpx.Response, decoder: Callable[[str], T], sentinel: Optional[str] = None, + client_ref: Optional[object] = None, ): self.response = response self.generator = stream_events_async(response, decoder, sentinel) + self.client_ref = client_ref def __aiter__(self): return self diff --git a/packages/mistralai_azure/src/mistralai_azure/utils/serializers.py b/packages/mistralai_azure/src/mistralai_azure/utils/serializers.py index 76e44d71..378a14c0 100644 --- a/packages/mistralai_azure/src/mistralai_azure/utils/serializers.py +++ b/packages/mistralai_azure/src/mistralai_azure/utils/serializers.py @@ -192,7 +192,9 @@ def is_union(obj: object) -> bool: """ Returns True if the given object is a typing.Union or typing_extensions.Union. """ - return any(obj is typing_obj for typing_obj in _get_typing_objects_by_name_of("Union")) + return any( + obj is typing_obj for typing_obj in _get_typing_objects_by_name_of("Union") + ) def stream_to_text(stream: httpx.Response) -> str: @@ -245,4 +247,3 @@ def _get_typing_objects_by_name_of(name: str) -> Tuple[Any, ...]: f"Neither typing nor typing_extensions has an object called {name!r}" ) return result - diff --git a/packages/mistralai_azure/src/mistralai_azure/utils/unmarshal_json_response.py b/packages/mistralai_azure/src/mistralai_azure/utils/unmarshal_json_response.py new file mode 100644 index 00000000..6eee29b8 --- /dev/null +++ b/packages/mistralai_azure/src/mistralai_azure/utils/unmarshal_json_response.py @@ -0,0 +1,24 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from typing import Any, Optional + +import httpx + +from .serializers import unmarshal_json +from mistralai_azure import models + + +def unmarshal_json_response( + typ: Any, http_res: httpx.Response, body: Optional[str] = None +) -> Any: + if body is None: + body = http_res.text + try: + return unmarshal_json(body, typ) + except Exception as e: + raise models.ResponseValidationError( + "Response validation failed", + http_res, + e, + body, + ) from e From c5b60515ef3bd58a4846bb4062fbc90c3c5dbd9f Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Tue, 13 Jan 2026 16:44:36 +0100 Subject: [PATCH 173/223] ## SDK Changes Detected: (#321) * `mistral_gcp.chat.complete()`: * `request` **Changed** **Breaking** :warning: * `response` **Changed** * `mistral_gcp.fim.complete()`: `response` **Changed** Co-authored-by: speakeasybot --- .speakeasy/workflow.lock | 11 +- packages/mistralai_gcp/.gitignore | 3 + packages/mistralai_gcp/.speakeasy/gen.lock | 66 +-- packages/mistralai_gcp/.speakeasy/gen.yaml | 14 +- packages/mistralai_gcp/RELEASES.md | 11 + .../docs/models/chatcompletionrequest.md | 37 +- .../models/chatcompletionrequesttoolchoice.md | 2 + .../docs/models/chatcompletionresponse.md | 4 +- .../models/chatcompletionstreamrequest.md | 37 +- .../chatcompletionstreamrequesttoolchoice.md | 2 + .../docs/models/completionchunk.md | 6 +- .../docs/models/fimcompletionrequest.md | 4 +- .../docs/models/fimcompletionresponse.md | 4 +- .../docs/models/fimcompletionstreamrequest.md | 4 +- .../mistralai_gcp/docs/models/function.md | 4 +- .../mistralai_gcp/docs/models/jsonschema.md | 2 +- .../docs/models/mistralpromptmode.md | 8 + .../mistralai_gcp/docs/models/prediction.md | 2 + .../docs/models/responseformat.md | 10 +- .../docs/models/responseformats.md | 2 - .../docs/models/systemmessagecontent.md | 4 +- .../docs/models/systemmessagecontentchunks.md | 17 + .../mistralai_gcp/docs/models/thinkchunk.md | 10 + .../docs/models/thinkchunktype.md | 8 + .../mistralai_gcp/docs/models/thinking.md | 17 + packages/mistralai_gcp/docs/models/tool.md | 4 +- .../mistralai_gcp/docs/models/toolcall.md | 2 +- .../mistralai_gcp/docs/models/toolchoice.md | 4 +- .../mistralai_gcp/docs/models/usageinfo.md | 12 +- packages/mistralai_gcp/pylintrc | 11 +- .../mistralai_gcp/scripts/prepare_readme.py | 38 ++ packages/mistralai_gcp/scripts/publish.sh | 6 +- .../src/mistralai_gcp/_hooks/types.py | 7 + .../src/mistralai_gcp/_version.py | 8 +- .../src/mistralai_gcp/basesdk.py | 52 ++- .../mistralai_gcp/src/mistralai_gcp/chat.py | 173 +++---- .../mistralai_gcp/src/mistralai_gcp/fim.py | 113 ++--- .../src/mistralai_gcp/httpclient.py | 22 +- .../src/mistralai_gcp/models/__init__.py | 437 +++++++++++++----- .../mistralai_gcp/models/assistantmessage.py | 2 +- .../models/chatcompletionrequest.py | 43 +- .../models/chatcompletionresponse.py | 12 +- .../models/chatcompletionstreamrequest.py | 43 +- .../models/completionresponsestreamchoice.py | 2 +- .../src/mistralai_gcp/models/deltamessage.py | 2 +- .../models/fimcompletionrequest.py | 12 +- .../models/fimcompletionresponse.py | 12 +- .../models/fimcompletionstreamrequest.py | 12 +- .../models/httpvalidationerror.py | 17 +- .../src/mistralai_gcp/models/imageurl.py | 2 +- .../src/mistralai_gcp/models/jsonschema.py | 2 +- .../mistralai_gcp/models/mistralgcperror.py | 26 ++ .../mistralai_gcp/models/mistralpromptmode.py | 8 + .../mistralai_gcp/models/no_response_error.py | 13 + .../src/mistralai_gcp/models/prediction.py | 4 + .../mistralai_gcp/models/responseformat.py | 8 +- .../mistralai_gcp/models/responseformats.py | 1 - .../models/responsevalidationerror.py | 25 + .../src/mistralai_gcp/models/sdkerror.py | 44 +- .../src/mistralai_gcp/models/systemmessage.py | 10 +- .../models/systemmessagecontentchunks.py | 21 + .../src/mistralai_gcp/models/thinkchunk.py | 35 ++ .../src/mistralai_gcp/models/toolmessage.py | 2 +- .../src/mistralai_gcp/models/usageinfo.py | 79 +++- .../src/mistralai_gcp/models/usermessage.py | 2 +- .../src/mistralai_gcp/sdkconfiguration.py | 7 - .../src/mistralai_gcp/types/basemodel.py | 6 +- .../src/mistralai_gcp/utils/__init__.py | 188 ++++++-- .../src/mistralai_gcp/utils/datetimes.py | 23 + .../src/mistralai_gcp/utils/enums.py | 94 ++-- .../src/mistralai_gcp/utils/eventstreaming.py | 10 + .../src/mistralai_gcp/utils/forms.py | 77 +-- .../src/mistralai_gcp/utils/serializers.py | 36 +- .../utils/unmarshal_json_response.py | 24 + 74 files changed, 1440 insertions(+), 642 deletions(-) create mode 100644 packages/mistralai_gcp/RELEASES.md create mode 100644 packages/mistralai_gcp/docs/models/mistralpromptmode.md create mode 100644 packages/mistralai_gcp/docs/models/systemmessagecontentchunks.md create mode 100644 packages/mistralai_gcp/docs/models/thinkchunk.md create mode 100644 packages/mistralai_gcp/docs/models/thinkchunktype.md create mode 100644 packages/mistralai_gcp/docs/models/thinking.md create mode 100644 packages/mistralai_gcp/scripts/prepare_readme.py create mode 100644 packages/mistralai_gcp/src/mistralai_gcp/models/mistralgcperror.py create mode 100644 packages/mistralai_gcp/src/mistralai_gcp/models/mistralpromptmode.py create mode 100644 packages/mistralai_gcp/src/mistralai_gcp/models/no_response_error.py create mode 100644 packages/mistralai_gcp/src/mistralai_gcp/models/responsevalidationerror.py create mode 100644 packages/mistralai_gcp/src/mistralai_gcp/models/systemmessagecontentchunks.py create mode 100644 packages/mistralai_gcp/src/mistralai_gcp/models/thinkchunk.py create mode 100644 packages/mistralai_gcp/src/mistralai_gcp/utils/datetimes.py create mode 100644 packages/mistralai_gcp/src/mistralai_gcp/utils/unmarshal_json_response.py diff --git a/.speakeasy/workflow.lock b/.speakeasy/workflow.lock index c4acc23d..ac13e7ce 100644 --- a/.speakeasy/workflow.lock +++ b/.speakeasy/workflow.lock @@ -9,10 +9,11 @@ sources: - speakeasy-sdk-regen-1768231850 mistral-google-cloud-source: sourceNamespace: mistral-openapi-google-cloud - sourceRevisionDigest: sha256:4a5343e63c6a78152e472b00ccc46d7bcb15594496bc94c8040039d3a9d4c5f8 - sourceBlobDigest: sha256:3327f078a11596abdcbc21cd8a1adcf0b2aa474975cd9ab1feb745a2e50d555f + sourceRevisionDigest: sha256:bc59aaf55dc46e94ddf6cc687292807629d7a17ee5f573a504e7e44fd365e147 + sourceBlobDigest: sha256:545fe85c5dae11def2741fc7a99f297b7f0728c9677c3c7b94d56ddbed70581d tags: - latest + - speakeasy-sdk-regen-1768231856 mistral-openapi: sourceNamespace: mistral-openapi sourceRevisionDigest: sha256:cb63bd997cefe7b3b36e91a475df57cb779bf79f183340e0713d8ffb16a2dabc @@ -31,10 +32,10 @@ targets: mistralai-gcp-sdk: source: mistral-google-cloud-source sourceNamespace: mistral-openapi-google-cloud - sourceRevisionDigest: sha256:4a5343e63c6a78152e472b00ccc46d7bcb15594496bc94c8040039d3a9d4c5f8 - sourceBlobDigest: sha256:3327f078a11596abdcbc21cd8a1adcf0b2aa474975cd9ab1feb745a2e50d555f + sourceRevisionDigest: sha256:bc59aaf55dc46e94ddf6cc687292807629d7a17ee5f573a504e7e44fd365e147 + sourceBlobDigest: sha256:545fe85c5dae11def2741fc7a99f297b7f0728c9677c3c7b94d56ddbed70581d codeSamplesNamespace: mistral-openapi-google-cloud-code-samples - codeSamplesRevisionDigest: sha256:03b3e82c20d10faa8622f14696632b96b1a2e8d747b266fff345061298d5f3e4 + codeSamplesRevisionDigest: sha256:31fd0ba45daa00020ef6f07df435ad343b62328bf56489dfcb66b647beeb52b5 mistralai-sdk: source: mistral-openapi sourceNamespace: mistral-openapi diff --git a/packages/mistralai_gcp/.gitignore b/packages/mistralai_gcp/.gitignore index 5a82b069..f2ea8c39 100644 --- a/packages/mistralai_gcp/.gitignore +++ b/packages/mistralai_gcp/.gitignore @@ -1,3 +1,6 @@ +**/__pycache__/ +**/.speakeasy/temp/ +**/.speakeasy/logs/ .speakeasy/reports README-PYPI.md .venv/ diff --git a/packages/mistralai_gcp/.speakeasy/gen.lock b/packages/mistralai_gcp/.speakeasy/gen.lock index 5e157235..07deb7d7 100644 --- a/packages/mistralai_gcp/.speakeasy/gen.lock +++ b/packages/mistralai_gcp/.speakeasy/gen.lock @@ -1,41 +1,44 @@ lockVersion: 2.0.0 id: ec60f2d8-7869-45c1-918e-773d41a8cf74 management: - docChecksum: 28fe1ab59b4dee005217f2dbbd836060 - docVersion: 0.0.2 - speakeasyVersion: 1.517.3 - generationVersion: 2.548.6 - releaseVersion: 1.6.0 - configChecksum: 66bf5911f59189922e03a75a72923b32 + docChecksum: 05fc6f45406deac180ffc1df760c67f4 + docVersion: 1.0.0 + speakeasyVersion: 1.606.10 + generationVersion: 2.687.13 + releaseVersion: 1.7.0 + configChecksum: 265058aeeb734c5018a1be4c40ea6e39 + repoURL: https://github.com/mistralai/client-python.git + repoSubDirectory: packages/mistralai_gcp + installationURL: https://github.com/mistralai/client-python.git#subdirectory=packages/mistralai_gcp published: true features: python: additionalDependencies: 1.0.0 + additionalProperties: 1.0.1 constsAndDefaults: 1.0.5 - core: 5.12.3 + core: 5.20.1 defaultEnabledRetries: 0.2.0 enumUnions: 0.1.0 envVarSecurityUsage: 0.3.2 - examples: 3.0.1 + examples: 3.0.2 flatRequests: 1.0.1 globalSecurity: 3.0.3 globalSecurityCallbacks: 1.0.0 globalSecurityFlattening: 1.0.0 - globalServerURLs: 3.1.0 + globalServerURLs: 3.1.1 methodArguments: 1.0.2 nameOverrides: 3.0.1 nullables: 1.0.1 - openEnums: 1.0.0 + openEnums: 1.0.1 responseFormat: 1.0.1 retries: 3.0.2 - sdkHooks: 1.0.1 - serverEvents: 1.0.7 + sdkHooks: 1.1.0 + serverEvents: 1.0.8 serverEventsSentinels: 0.1.0 serverIDs: 3.0.0 unions: 3.0.4 generatedFiles: - .gitattributes - - .python-version - .vscode/settings.json - docs/models/arguments.md - docs/models/assistantmessage.md @@ -73,6 +76,7 @@ generatedFiles: - docs/models/jsonschema.md - docs/models/loc.md - docs/models/messages.md + - docs/models/mistralpromptmode.md - docs/models/prediction.md - docs/models/referencechunk.md - docs/models/referencechunktype.md @@ -83,7 +87,11 @@ generatedFiles: - docs/models/stop.md - docs/models/systemmessage.md - docs/models/systemmessagecontent.md + - docs/models/systemmessagecontentchunks.md - docs/models/textchunk.md + - docs/models/thinkchunk.md + - docs/models/thinkchunktype.md + - docs/models/thinking.md - docs/models/tool.md - docs/models/toolcall.md - docs/models/toolchoice.md @@ -99,10 +107,8 @@ generatedFiles: - docs/models/usermessagerole.md - docs/models/utils/retryconfig.md - docs/models/validationerror.md - - poetry.toml - py.typed - pylintrc - - pyproject.toml - scripts/prepare_readme.py - scripts/publish.sh - src/mistralai_gcp/__init__.py @@ -135,14 +141,20 @@ generatedFiles: - src/mistralai_gcp/models/imageurl.py - src/mistralai_gcp/models/imageurlchunk.py - src/mistralai_gcp/models/jsonschema.py + - src/mistralai_gcp/models/mistralgcperror.py + - src/mistralai_gcp/models/mistralpromptmode.py + - src/mistralai_gcp/models/no_response_error.py - src/mistralai_gcp/models/prediction.py - src/mistralai_gcp/models/referencechunk.py - src/mistralai_gcp/models/responseformat.py - src/mistralai_gcp/models/responseformats.py + - src/mistralai_gcp/models/responsevalidationerror.py - src/mistralai_gcp/models/sdkerror.py - src/mistralai_gcp/models/security.py - src/mistralai_gcp/models/systemmessage.py + - src/mistralai_gcp/models/systemmessagecontentchunks.py - src/mistralai_gcp/models/textchunk.py + - src/mistralai_gcp/models/thinkchunk.py - src/mistralai_gcp/models/tool.py - src/mistralai_gcp/models/toolcall.py - src/mistralai_gcp/models/toolchoice.py @@ -153,12 +165,12 @@ generatedFiles: - src/mistralai_gcp/models/usermessage.py - src/mistralai_gcp/models/validationerror.py - src/mistralai_gcp/py.typed - - src/mistralai_gcp/sdk.py - src/mistralai_gcp/sdkconfiguration.py - src/mistralai_gcp/types/__init__.py - src/mistralai_gcp/types/basemodel.py - src/mistralai_gcp/utils/__init__.py - src/mistralai_gcp/utils/annotations.py + - src/mistralai_gcp/utils/datetimes.py - src/mistralai_gcp/utils/enums.py - src/mistralai_gcp/utils/eventstreaming.py - src/mistralai_gcp/utils/forms.py @@ -170,42 +182,40 @@ generatedFiles: - src/mistralai_gcp/utils/retries.py - src/mistralai_gcp/utils/security.py - src/mistralai_gcp/utils/serializers.py + - src/mistralai_gcp/utils/unmarshal_json_response.py - src/mistralai_gcp/utils/url.py - src/mistralai_gcp/utils/values.py examples: stream_chat: speakeasy-default-stream-chat: requestBody: - application/json: {"model": "mistral-small-latest", "stream": true, "messages": [{"content": "Who is the best French painter? Answer in one short sentence.", "role": "user"}]} + application/json: {"model": "mistral-large-latest", "stream": true, "messages": [{"content": "Who is the best French painter? Answer in one short sentence.", "role": "user"}], "response_format": {"type": "text"}} responses: "422": application/json: {} - "200": {} chat_completion_v1_chat_completions_post: speakeasy-default-chat-completion-v1-chat-completions-post: requestBody: - application/json: {"model": "mistral-small-latest", "stream": false, "messages": [{"content": "Who is the best French painter? Answer in one short sentence.", "role": "user"}]} + application/json: {"model": "mistral-large-latest", "stream": false, "messages": [{"content": "Who is the best French painter? Answer in one short sentence.", "role": "user"}], "response_format": {"type": "text"}} responses: "200": - application/json: {"id": "cmpl-e5cc70bb28c444948073e77776eb30ef", "object": "chat.completion", "model": "mistral-small-latest", "usage": {"prompt_tokens": 16, "completion_tokens": 34, "total_tokens": 50}, "created": 1702256327, "choices": [{"index": 0, "message": {"prefix": false, "role": "assistant"}, "finish_reason": "stop"}, {"index": 0, "message": {"prefix": false, "role": "assistant"}, "finish_reason": "stop"}, {"index": 0, "message": {"prefix": false, "role": "assistant"}, "finish_reason": "stop"}]} + application/json: {"id": "cmpl-e5cc70bb28c444948073e77776eb30ef", "object": "chat.completion", "model": "mistral-small-latest", "usage": {"prompt_tokens": 0, "completion_tokens": 0, "total_tokens": 0}, "created": 1702256327, "choices": []} "422": application/json: {} stream_fim: speakeasy-default-stream-fim: requestBody: - application/json: {"model": "codestral-2405", "top_p": 1, "stream": true, "prompt": "def", "suffix": "return a+b"} + application/json: {"model": "codestral-latest", "top_p": 1, "stream": true, "prompt": "def", "suffix": "return a+b"} responses: "422": application/json: {} - "200": {} fim_completion_v1_fim_completions_post: - speakeasy-default-fim-completion-v1-fim-completions-post: + userExample: requestBody: - application/json: {"model": "codestral-2405", "top_p": 1, "stream": false, "prompt": "def", "suffix": "return a+b"} + application/json: {"model": "codestral-latest", "top_p": 1, "stream": false, "prompt": "def", "suffix": "return a+b"} responses: "200": - application/json: {"id": "cmpl-e5cc70bb28c444948073e77776eb30ef", "object": "chat.completion", "model": "codestral-latest", "usage": {"prompt_tokens": 16, "completion_tokens": 34, "total_tokens": 50}, "created": 1702256327, "choices": [{"index": 0, "message": {"prefix": false, "role": "assistant"}, "finish_reason": "stop"}, {"index": 0, "message": {"prefix": false, "role": "assistant"}, "finish_reason": "stop"}, {"index": 0, "message": {"prefix": false, "role": "assistant"}, "finish_reason": "stop"}]} - "422": - application/json: {} -examplesVersion: 1.0.0 + application/json: {"id": "447e3e0d457e42e98248b5d2ef52a2a3", "object": "chat.completion", "model": "codestral-2508", "usage": {"prompt_tokens": 8, "completion_tokens": 91, "total_tokens": 99}, "created": 1759496862, "choices": [{"index": 0, "message": {"content": "add_numbers(a: int, b: int) -> int:\n \"\"\"\n You are given two integers `a` and `b`. Your task is to write a function that\n returns the sum of these two integers. The function should be implemented in a\n way that it can handle very large integers (up to 10^18). As a reminder, your\n code has to be in python\n \"\"\"\n", "tool_calls": null, "prefix": false, "role": "assistant"}, "finish_reason": "stop"}]} +examplesVersion: 1.0.2 generatedTests: {} +releaseNotes: "## SDK Changes Detected:\n* `mistral_gcp.chat.complete()`: \n * `request` **Changed** **Breaking** :warning:\n * `response` **Changed**\n* `mistral_gcp.fim.complete()`: `response` **Changed**\n" diff --git a/packages/mistralai_gcp/.speakeasy/gen.yaml b/packages/mistralai_gcp/.speakeasy/gen.yaml index a82160ed..3df5bb18 100644 --- a/packages/mistralai_gcp/.speakeasy/gen.yaml +++ b/packages/mistralai_gcp/.speakeasy/gen.yaml @@ -4,6 +4,7 @@ generation: maintainOpenAPIOrder: true usageSnippets: optionalPropertyRendering: withExample + sdkInitStyle: constructor useClassNamesForArrayFields: true fixes: nameResolutionDec2023: true @@ -11,11 +12,16 @@ generation: parameterOrderingFeb2024: true requestResponseComponentNamesFeb2024: true securityFeb2025: false + sharedErrorComponentsApr2025: false auth: oAuth2ClientCredentialsEnabled: true oAuth2PasswordEnabled: false + tests: + generateTests: true + generateNewTests: false + skipResponseBodyAssertions: false python: - version: 1.6.0 + version: 1.7.0 additionalDependencies: dev: pytest: ^8.2.2 @@ -23,8 +29,12 @@ python: main: google-auth: ^2.31.0 requests: ^2.32.3 + allowedRedefinedBuiltins: + - id + - object authors: - Mistral + baseErrorName: MistralGcpError clientServerStatusCodesAsErrors: true defaultErrorName: SDKError description: Python Client SDK for the Mistral AI API in GCP. @@ -46,9 +56,11 @@ python: inputModelSuffix: input maxMethodParams: 15 methodArguments: infer-optional-args + moduleName: "" outputModelSuffix: output packageManager: uv packageName: mistralai-gcp + pytestFilterWarnings: [] pytestTimeout: 0 responseFormat: flat templateVersion: v2 diff --git a/packages/mistralai_gcp/RELEASES.md b/packages/mistralai_gcp/RELEASES.md new file mode 100644 index 00000000..b503c75f --- /dev/null +++ b/packages/mistralai_gcp/RELEASES.md @@ -0,0 +1,11 @@ + + +## 2026-01-12 16:00:24 +### Changes +Based on: +- OpenAPI Doc +- Speakeasy CLI 1.606.10 (2.687.13) https://github.com/speakeasy-api/speakeasy +### Generated +- [python v1.7.0] packages/mistralai_gcp +### Releases +- [PyPI v1.7.0] https://pypi.org/project/mistralai-gcp/1.7.0 - packages/mistralai_gcp \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/chatcompletionrequest.md b/packages/mistralai_gcp/docs/models/chatcompletionrequest.md index 9d735d08..48103e30 100644 --- a/packages/mistralai_gcp/docs/models/chatcompletionrequest.md +++ b/packages/mistralai_gcp/docs/models/chatcompletionrequest.md @@ -3,21 +3,22 @@ ## Fields -| Field | Type | Required | Description | Example | -| ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `model` | *str* | :heavy_check_mark: | ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions. | mistral-small-latest | -| `messages` | List[[models.ChatCompletionRequestMessages](../models/chatcompletionrequestmessages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | -| `temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. | | -| `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | -| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | -| `stream` | *Optional[bool]* | :heavy_minus_sign: | Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. | | -| `stop` | [Optional[models.ChatCompletionRequestStop]](../models/chatcompletionrequeststop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | -| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | -| `response_format` | [Optional[models.ResponseFormat]](../models/responseformat.md) | :heavy_minus_sign: | N/A | | -| `tools` | List[[models.Tool](../models/tool.md)] | :heavy_minus_sign: | N/A | | -| `tool_choice` | [Optional[models.ChatCompletionRequestToolChoice]](../models/chatcompletionrequesttoolchoice.md) | :heavy_minus_sign: | N/A | | -| `presence_penalty` | *Optional[float]* | :heavy_minus_sign: | presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. | | -| `frequency_penalty` | *Optional[float]* | :heavy_minus_sign: | frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. | | -| `n` | *OptionalNullable[int]* | :heavy_minus_sign: | Number of completions to return for each request, input tokens are only billed once. | | -| `prediction` | [Optional[models.Prediction]](../models/prediction.md) | :heavy_minus_sign: | N/A | | -| `parallel_tool_calls` | *Optional[bool]* | :heavy_minus_sign: | N/A | | \ No newline at end of file +| Field | Type | Required | Description | Example | +| --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `model` | *str* | :heavy_check_mark: | ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions. | mistral-large-latest | +| `temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. | | +| `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | +| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | +| `stream` | *Optional[bool]* | :heavy_minus_sign: | Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. | | +| `stop` | [Optional[models.ChatCompletionRequestStop]](../models/chatcompletionrequeststop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | +| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | +| `messages` | List[[models.ChatCompletionRequestMessages](../models/chatcompletionrequestmessages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | +| `response_format` | [Optional[models.ResponseFormat]](../models/responseformat.md) | :heavy_minus_sign: | Specify the format that the model must output. By default it will use `{ "type": "text" }`. Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ "type": "json_schema" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide. | {
"type": "text"
} | +| `tools` | List[[models.Tool](../models/tool.md)] | :heavy_minus_sign: | A list of tools the model may call. Use this to provide a list of functions the model may generate JSON inputs for. | | +| `tool_choice` | [Optional[models.ChatCompletionRequestToolChoice]](../models/chatcompletionrequesttoolchoice.md) | :heavy_minus_sign: | Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{"type": "function", "function": {"name": "my_function"}}` forces the model to call that tool. | | +| `presence_penalty` | *Optional[float]* | :heavy_minus_sign: | The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. | | +| `frequency_penalty` | *Optional[float]* | :heavy_minus_sign: | The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. | | +| `n` | *OptionalNullable[int]* | :heavy_minus_sign: | Number of completions to return for each request, input tokens are only billed once. | | +| `prediction` | [Optional[models.Prediction]](../models/prediction.md) | :heavy_minus_sign: | Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content. | | +| `parallel_tool_calls` | *Optional[bool]* | :heavy_minus_sign: | Whether to enable parallel function calling during tool use, when enabled the model can call multiple tools in parallel. | | +| `prompt_mode` | [OptionalNullable[models.MistralPromptMode]](../models/mistralpromptmode.md) | :heavy_minus_sign: | Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. | | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/chatcompletionrequesttoolchoice.md b/packages/mistralai_gcp/docs/models/chatcompletionrequesttoolchoice.md index 1646528d..dc82a8ef 100644 --- a/packages/mistralai_gcp/docs/models/chatcompletionrequesttoolchoice.md +++ b/packages/mistralai_gcp/docs/models/chatcompletionrequesttoolchoice.md @@ -1,5 +1,7 @@ # ChatCompletionRequestToolChoice +Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{"type": "function", "function": {"name": "my_function"}}` forces the model to call that tool. + ## Supported Types diff --git a/packages/mistralai_gcp/docs/models/chatcompletionresponse.md b/packages/mistralai_gcp/docs/models/chatcompletionresponse.md index ad376158..a0465ffb 100644 --- a/packages/mistralai_gcp/docs/models/chatcompletionresponse.md +++ b/packages/mistralai_gcp/docs/models/chatcompletionresponse.md @@ -9,5 +9,5 @@ | `object` | *str* | :heavy_check_mark: | N/A | chat.completion | | `model` | *str* | :heavy_check_mark: | N/A | mistral-small-latest | | `usage` | [models.UsageInfo](../models/usageinfo.md) | :heavy_check_mark: | N/A | | -| `created` | *Optional[int]* | :heavy_minus_sign: | N/A | 1702256327 | -| `choices` | List[[models.ChatCompletionChoice](../models/chatcompletionchoice.md)] | :heavy_minus_sign: | N/A | | \ No newline at end of file +| `created` | *int* | :heavy_check_mark: | N/A | 1702256327 | +| `choices` | List[[models.ChatCompletionChoice](../models/chatcompletionchoice.md)] | :heavy_check_mark: | N/A | | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/chatcompletionstreamrequest.md b/packages/mistralai_gcp/docs/models/chatcompletionstreamrequest.md index 827943cd..aaacc09c 100644 --- a/packages/mistralai_gcp/docs/models/chatcompletionstreamrequest.md +++ b/packages/mistralai_gcp/docs/models/chatcompletionstreamrequest.md @@ -3,21 +3,22 @@ ## Fields -| Field | Type | Required | Description | Example | -| ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `model` | *str* | :heavy_check_mark: | ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions. | mistral-small-latest | -| `messages` | List[[models.Messages](../models/messages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | -| `temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. | | -| `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | -| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | -| `stream` | *Optional[bool]* | :heavy_minus_sign: | N/A | | -| `stop` | [Optional[models.Stop]](../models/stop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | -| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | -| `response_format` | [Optional[models.ResponseFormat]](../models/responseformat.md) | :heavy_minus_sign: | N/A | | -| `tools` | List[[models.Tool](../models/tool.md)] | :heavy_minus_sign: | N/A | | -| `tool_choice` | [Optional[models.ChatCompletionStreamRequestToolChoice]](../models/chatcompletionstreamrequesttoolchoice.md) | :heavy_minus_sign: | N/A | | -| `presence_penalty` | *Optional[float]* | :heavy_minus_sign: | presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. | | -| `frequency_penalty` | *Optional[float]* | :heavy_minus_sign: | frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. | | -| `n` | *OptionalNullable[int]* | :heavy_minus_sign: | Number of completions to return for each request, input tokens are only billed once. | | -| `prediction` | [Optional[models.Prediction]](../models/prediction.md) | :heavy_minus_sign: | N/A | | -| `parallel_tool_calls` | *Optional[bool]* | :heavy_minus_sign: | N/A | | \ No newline at end of file +| Field | Type | Required | Description | Example | +| --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `model` | *str* | :heavy_check_mark: | ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions. | mistral-large-latest | +| `temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. | | +| `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | +| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | +| `stream` | *Optional[bool]* | :heavy_minus_sign: | N/A | | +| `stop` | [Optional[models.Stop]](../models/stop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | +| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | +| `messages` | List[[models.Messages](../models/messages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | +| `response_format` | [Optional[models.ResponseFormat]](../models/responseformat.md) | :heavy_minus_sign: | Specify the format that the model must output. By default it will use `{ "type": "text" }`. Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ "type": "json_schema" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide. | {
"type": "text"
} | +| `tools` | List[[models.Tool](../models/tool.md)] | :heavy_minus_sign: | A list of tools the model may call. Use this to provide a list of functions the model may generate JSON inputs for. | | +| `tool_choice` | [Optional[models.ChatCompletionStreamRequestToolChoice]](../models/chatcompletionstreamrequesttoolchoice.md) | :heavy_minus_sign: | Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{"type": "function", "function": {"name": "my_function"}}` forces the model to call that tool. | | +| `presence_penalty` | *Optional[float]* | :heavy_minus_sign: | The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. | | +| `frequency_penalty` | *Optional[float]* | :heavy_minus_sign: | The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. | | +| `n` | *OptionalNullable[int]* | :heavy_minus_sign: | Number of completions to return for each request, input tokens are only billed once. | | +| `prediction` | [Optional[models.Prediction]](../models/prediction.md) | :heavy_minus_sign: | Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content. | | +| `parallel_tool_calls` | *Optional[bool]* | :heavy_minus_sign: | Whether to enable parallel function calling during tool use, when enabled the model can call multiple tools in parallel. | | +| `prompt_mode` | [OptionalNullable[models.MistralPromptMode]](../models/mistralpromptmode.md) | :heavy_minus_sign: | Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. | | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/chatcompletionstreamrequesttoolchoice.md b/packages/mistralai_gcp/docs/models/chatcompletionstreamrequesttoolchoice.md index cce0ca3e..43f3ca38 100644 --- a/packages/mistralai_gcp/docs/models/chatcompletionstreamrequesttoolchoice.md +++ b/packages/mistralai_gcp/docs/models/chatcompletionstreamrequesttoolchoice.md @@ -1,5 +1,7 @@ # ChatCompletionStreamRequestToolChoice +Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{"type": "function", "function": {"name": "my_function"}}` forces the model to call that tool. + ## Supported Types diff --git a/packages/mistralai_gcp/docs/models/completionchunk.md b/packages/mistralai_gcp/docs/models/completionchunk.md index b8ae6a09..7f8ab5e6 100644 --- a/packages/mistralai_gcp/docs/models/completionchunk.md +++ b/packages/mistralai_gcp/docs/models/completionchunk.md @@ -6,8 +6,8 @@ | Field | Type | Required | Description | | ------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------ | | `id` | *str* | :heavy_check_mark: | N/A | -| `model` | *str* | :heavy_check_mark: | N/A | -| `choices` | List[[models.CompletionResponseStreamChoice](../models/completionresponsestreamchoice.md)] | :heavy_check_mark: | N/A | | `object` | *Optional[str]* | :heavy_minus_sign: | N/A | | `created` | *Optional[int]* | :heavy_minus_sign: | N/A | -| `usage` | [Optional[models.UsageInfo]](../models/usageinfo.md) | :heavy_minus_sign: | N/A | \ No newline at end of file +| `model` | *str* | :heavy_check_mark: | N/A | +| `usage` | [Optional[models.UsageInfo]](../models/usageinfo.md) | :heavy_minus_sign: | N/A | +| `choices` | List[[models.CompletionResponseStreamChoice](../models/completionresponsestreamchoice.md)] | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/fimcompletionrequest.md b/packages/mistralai_gcp/docs/models/fimcompletionrequest.md index 7507b90c..380f109c 100644 --- a/packages/mistralai_gcp/docs/models/fimcompletionrequest.md +++ b/packages/mistralai_gcp/docs/models/fimcompletionrequest.md @@ -5,13 +5,13 @@ | Field | Type | Required | Description | Example | | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `model` | *str* | :heavy_check_mark: | ID of the model to use. Only compatible for now with:
- `codestral-2405`
- `codestral-latest` | codestral-2405 | -| `prompt` | *str* | :heavy_check_mark: | The text/code to complete. | def | +| `model` | *str* | :heavy_check_mark: | ID of the model with FIM to use. | codestral-latest | | `temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. | | | `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | | `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | | `stream` | *Optional[bool]* | :heavy_minus_sign: | Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. | | | `stop` | [Optional[models.FIMCompletionRequestStop]](../models/fimcompletionrequeststop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | | `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | +| `prompt` | *str* | :heavy_check_mark: | The text/code to complete. | def | | `suffix` | *OptionalNullable[str]* | :heavy_minus_sign: | Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`. | return a+b | | `min_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The minimum number of tokens to generate in the completion. | | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/fimcompletionresponse.md b/packages/mistralai_gcp/docs/models/fimcompletionresponse.md index da786a1f..cd62d034 100644 --- a/packages/mistralai_gcp/docs/models/fimcompletionresponse.md +++ b/packages/mistralai_gcp/docs/models/fimcompletionresponse.md @@ -9,5 +9,5 @@ | `object` | *str* | :heavy_check_mark: | N/A | chat.completion | | `model` | *str* | :heavy_check_mark: | N/A | codestral-latest | | `usage` | [models.UsageInfo](../models/usageinfo.md) | :heavy_check_mark: | N/A | | -| `created` | *Optional[int]* | :heavy_minus_sign: | N/A | 1702256327 | -| `choices` | List[[models.ChatCompletionChoice](../models/chatcompletionchoice.md)] | :heavy_minus_sign: | N/A | | \ No newline at end of file +| `created` | *int* | :heavy_check_mark: | N/A | 1702256327 | +| `choices` | List[[models.ChatCompletionChoice](../models/chatcompletionchoice.md)] | :heavy_check_mark: | N/A | | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/fimcompletionstreamrequest.md b/packages/mistralai_gcp/docs/models/fimcompletionstreamrequest.md index 6cc439c7..a890ff2b 100644 --- a/packages/mistralai_gcp/docs/models/fimcompletionstreamrequest.md +++ b/packages/mistralai_gcp/docs/models/fimcompletionstreamrequest.md @@ -5,13 +5,13 @@ | Field | Type | Required | Description | Example | | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `model` | *str* | :heavy_check_mark: | ID of the model to use. Only compatible for now with:
- `codestral-2405`
- `codestral-latest` | codestral-2405 | -| `prompt` | *str* | :heavy_check_mark: | The text/code to complete. | def | +| `model` | *str* | :heavy_check_mark: | ID of the model with FIM to use. | codestral-latest | | `temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. | | | `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | | `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | | `stream` | *Optional[bool]* | :heavy_minus_sign: | N/A | | | `stop` | [Optional[models.FIMCompletionStreamRequestStop]](../models/fimcompletionstreamrequeststop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | | `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | +| `prompt` | *str* | :heavy_check_mark: | The text/code to complete. | def | | `suffix` | *OptionalNullable[str]* | :heavy_minus_sign: | Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`. | return a+b | | `min_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The minimum number of tokens to generate in the completion. | | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/function.md b/packages/mistralai_gcp/docs/models/function.md index a166b7bb..b2bdb3fe 100644 --- a/packages/mistralai_gcp/docs/models/function.md +++ b/packages/mistralai_gcp/docs/models/function.md @@ -6,6 +6,6 @@ | Field | Type | Required | Description | | ------------------ | ------------------ | ------------------ | ------------------ | | `name` | *str* | :heavy_check_mark: | N/A | -| `parameters` | Dict[str, *Any*] | :heavy_check_mark: | N/A | | `description` | *Optional[str]* | :heavy_minus_sign: | N/A | -| `strict` | *Optional[bool]* | :heavy_minus_sign: | N/A | \ No newline at end of file +| `strict` | *Optional[bool]* | :heavy_minus_sign: | N/A | +| `parameters` | Dict[str, *Any*] | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/jsonschema.md b/packages/mistralai_gcp/docs/models/jsonschema.md index ae387867..7ff7c070 100644 --- a/packages/mistralai_gcp/docs/models/jsonschema.md +++ b/packages/mistralai_gcp/docs/models/jsonschema.md @@ -6,6 +6,6 @@ | Field | Type | Required | Description | | ----------------------- | ----------------------- | ----------------------- | ----------------------- | | `name` | *str* | :heavy_check_mark: | N/A | -| `schema_definition` | Dict[str, *Any*] | :heavy_check_mark: | N/A | | `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `schema_definition` | Dict[str, *Any*] | :heavy_check_mark: | N/A | | `strict` | *Optional[bool]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/mistralpromptmode.md b/packages/mistralai_gcp/docs/models/mistralpromptmode.md new file mode 100644 index 00000000..7416e203 --- /dev/null +++ b/packages/mistralai_gcp/docs/models/mistralpromptmode.md @@ -0,0 +1,8 @@ +# MistralPromptMode + + +## Values + +| Name | Value | +| ----------- | ----------- | +| `REASONING` | reasoning | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/prediction.md b/packages/mistralai_gcp/docs/models/prediction.md index 86e9c396..fae3c1ca 100644 --- a/packages/mistralai_gcp/docs/models/prediction.md +++ b/packages/mistralai_gcp/docs/models/prediction.md @@ -1,5 +1,7 @@ # Prediction +Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content. + ## Fields diff --git a/packages/mistralai_gcp/docs/models/responseformat.md b/packages/mistralai_gcp/docs/models/responseformat.md index 23a1641b..5cab22f2 100644 --- a/packages/mistralai_gcp/docs/models/responseformat.md +++ b/packages/mistralai_gcp/docs/models/responseformat.md @@ -1,9 +1,11 @@ # ResponseFormat +Specify the format that the model must output. By default it will use `{ "type": "text" }`. Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ "type": "json_schema" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide. + ## Fields -| Field | Type | Required | Description | -| -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `type` | [Optional[models.ResponseFormats]](../models/responseformats.md) | :heavy_minus_sign: | An object specifying the format that the model must output. Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. | -| `json_schema` | [OptionalNullable[models.JSONSchema]](../models/jsonschema.md) | :heavy_minus_sign: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| ---------------------------------------------------------------- | ---------------------------------------------------------------- | ---------------------------------------------------------------- | ---------------------------------------------------------------- | +| `type` | [Optional[models.ResponseFormats]](../models/responseformats.md) | :heavy_minus_sign: | N/A | +| `json_schema` | [OptionalNullable[models.JSONSchema]](../models/jsonschema.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/responseformats.md b/packages/mistralai_gcp/docs/models/responseformats.md index 06886afe..2f5f1e55 100644 --- a/packages/mistralai_gcp/docs/models/responseformats.md +++ b/packages/mistralai_gcp/docs/models/responseformats.md @@ -1,7 +1,5 @@ # ResponseFormats -An object specifying the format that the model must output. Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. - ## Values diff --git a/packages/mistralai_gcp/docs/models/systemmessagecontent.md b/packages/mistralai_gcp/docs/models/systemmessagecontent.md index e0d27d9f..0c87baf3 100644 --- a/packages/mistralai_gcp/docs/models/systemmessagecontent.md +++ b/packages/mistralai_gcp/docs/models/systemmessagecontent.md @@ -9,9 +9,9 @@ value: str = /* values here */ ``` -### `List[models.TextChunk]` +### `List[models.SystemMessageContentChunks]` ```python -value: List[models.TextChunk] = /* values here */ +value: List[models.SystemMessageContentChunks] = /* values here */ ``` diff --git a/packages/mistralai_gcp/docs/models/systemmessagecontentchunks.md b/packages/mistralai_gcp/docs/models/systemmessagecontentchunks.md new file mode 100644 index 00000000..40030c17 --- /dev/null +++ b/packages/mistralai_gcp/docs/models/systemmessagecontentchunks.md @@ -0,0 +1,17 @@ +# SystemMessageContentChunks + + +## Supported Types + +### `models.TextChunk` + +```python +value: models.TextChunk = /* values here */ +``` + +### `models.ThinkChunk` + +```python +value: models.ThinkChunk = /* values here */ +``` + diff --git a/packages/mistralai_gcp/docs/models/thinkchunk.md b/packages/mistralai_gcp/docs/models/thinkchunk.md new file mode 100644 index 00000000..66b2e0cd --- /dev/null +++ b/packages/mistralai_gcp/docs/models/thinkchunk.md @@ -0,0 +1,10 @@ +# ThinkChunk + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------------------------------------------------- | ------------------------------------------------------------------------------- | ------------------------------------------------------------------------------- | ------------------------------------------------------------------------------- | +| `thinking` | List[[models.Thinking](../models/thinking.md)] | :heavy_check_mark: | N/A | +| `closed` | *Optional[bool]* | :heavy_minus_sign: | Whether the thinking chunk is closed or not. Currently only used for prefixing. | +| `type` | [Optional[models.ThinkChunkType]](../models/thinkchunktype.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/thinkchunktype.md b/packages/mistralai_gcp/docs/models/thinkchunktype.md new file mode 100644 index 00000000..baf6f755 --- /dev/null +++ b/packages/mistralai_gcp/docs/models/thinkchunktype.md @@ -0,0 +1,8 @@ +# ThinkChunkType + + +## Values + +| Name | Value | +| ---------- | ---------- | +| `THINKING` | thinking | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/thinking.md b/packages/mistralai_gcp/docs/models/thinking.md new file mode 100644 index 00000000..c7a0d5c9 --- /dev/null +++ b/packages/mistralai_gcp/docs/models/thinking.md @@ -0,0 +1,17 @@ +# Thinking + + +## Supported Types + +### `models.ReferenceChunk` + +```python +value: models.ReferenceChunk = /* values here */ +``` + +### `models.TextChunk` + +```python +value: models.TextChunk = /* values here */ +``` + diff --git a/packages/mistralai_gcp/docs/models/tool.md b/packages/mistralai_gcp/docs/models/tool.md index 822f86f8..fb661f72 100644 --- a/packages/mistralai_gcp/docs/models/tool.md +++ b/packages/mistralai_gcp/docs/models/tool.md @@ -5,5 +5,5 @@ | Field | Type | Required | Description | | ---------------------------------------------------- | ---------------------------------------------------- | ---------------------------------------------------- | ---------------------------------------------------- | -| `function` | [models.Function](../models/function.md) | :heavy_check_mark: | N/A | -| `type` | [Optional[models.ToolTypes]](../models/tooltypes.md) | :heavy_minus_sign: | N/A | \ No newline at end of file +| `type` | [Optional[models.ToolTypes]](../models/tooltypes.md) | :heavy_minus_sign: | N/A | +| `function` | [models.Function](../models/function.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/toolcall.md b/packages/mistralai_gcp/docs/models/toolcall.md index 574be1ea..3819236b 100644 --- a/packages/mistralai_gcp/docs/models/toolcall.md +++ b/packages/mistralai_gcp/docs/models/toolcall.md @@ -5,7 +5,7 @@ | Field | Type | Required | Description | | ---------------------------------------------------- | ---------------------------------------------------- | ---------------------------------------------------- | ---------------------------------------------------- | -| `function` | [models.FunctionCall](../models/functioncall.md) | :heavy_check_mark: | N/A | | `id` | *Optional[str]* | :heavy_minus_sign: | N/A | | `type` | [Optional[models.ToolTypes]](../models/tooltypes.md) | :heavy_minus_sign: | N/A | +| `function` | [models.FunctionCall](../models/functioncall.md) | :heavy_check_mark: | N/A | | `index` | *Optional[int]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/toolchoice.md b/packages/mistralai_gcp/docs/models/toolchoice.md index 792ebcd6..373046bb 100644 --- a/packages/mistralai_gcp/docs/models/toolchoice.md +++ b/packages/mistralai_gcp/docs/models/toolchoice.md @@ -7,5 +7,5 @@ ToolChoice is either a ToolChoiceEnum or a ToolChoice | Field | Type | Required | Description | | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | -| `function` | [models.FunctionName](../models/functionname.md) | :heavy_check_mark: | this restriction of `Function` is used to select a specific function to call | -| `type` | [Optional[models.ToolTypes]](../models/tooltypes.md) | :heavy_minus_sign: | N/A | \ No newline at end of file +| `type` | [Optional[models.ToolTypes]](../models/tooltypes.md) | :heavy_minus_sign: | N/A | +| `function` | [models.FunctionName](../models/functionname.md) | :heavy_check_mark: | this restriction of `Function` is used to select a specific function to call | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/usageinfo.md b/packages/mistralai_gcp/docs/models/usageinfo.md index 9f56a3ae..f5204ac9 100644 --- a/packages/mistralai_gcp/docs/models/usageinfo.md +++ b/packages/mistralai_gcp/docs/models/usageinfo.md @@ -3,8 +3,10 @@ ## Fields -| Field | Type | Required | Description | Example | -| ------------------- | ------------------- | ------------------- | ------------------- | ------------------- | -| `prompt_tokens` | *int* | :heavy_check_mark: | N/A | 16 | -| `completion_tokens` | *int* | :heavy_check_mark: | N/A | 34 | -| `total_tokens` | *int* | :heavy_check_mark: | N/A | 50 | \ No newline at end of file +| Field | Type | Required | Description | +| ----------------------- | ----------------------- | ----------------------- | ----------------------- | +| `prompt_tokens` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `completion_tokens` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `total_tokens` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `prompt_audio_seconds` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | +| `__pydantic_extra__` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai_gcp/pylintrc b/packages/mistralai_gcp/pylintrc index c80721af..a8fcb932 100644 --- a/packages/mistralai_gcp/pylintrc +++ b/packages/mistralai_gcp/pylintrc @@ -89,7 +89,7 @@ persistent=yes # Minimum Python version to use for version dependent checks. Will default to # the version used to run pylint. -py-version=3.10 +py-version=3.9 # Discover python modules and packages in the file system subtree. recursive=no @@ -188,8 +188,8 @@ good-names=i, Run, _, e, - n, - id + id, + n # Good variable names regexes, separated by a comma. If names match any regex, # they will always be accepted @@ -458,7 +458,8 @@ disable=raw-checker-failed, relative-beyond-top-level, consider-using-with, wildcard-import, - unused-wildcard-import + unused-wildcard-import, + too-many-return-statements # Enable the message, report, category or checker with the given id(s). You can # either give multiple identifier separated by comma (,) or put this option @@ -659,4 +660,4 @@ init-import=no # List of qualified module names which can have objects that can redefine # builtins. -redefining-builtins-modules=six.moves,past.builtins,future.builtins,builtins,io +redefining-builtins-modules=six.moves,past.builtins,future.builtins,builtins,io \ No newline at end of file diff --git a/packages/mistralai_gcp/scripts/prepare_readme.py b/packages/mistralai_gcp/scripts/prepare_readme.py new file mode 100644 index 00000000..6c4b9932 --- /dev/null +++ b/packages/mistralai_gcp/scripts/prepare_readme.py @@ -0,0 +1,38 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +import re +import shutil + +try: + with open("README.md", "r", encoding="utf-8") as rh: + readme_contents = rh.read() + GITHUB_URL = "https://github.com/mistralai/client-python.git" + GITHUB_URL = ( + GITHUB_URL[: -len(".git")] if GITHUB_URL.endswith(".git") else GITHUB_URL + ) + REPO_SUBDIR = "packages/mistralai_gcp" + # Ensure the subdirectory has a trailing slash + if not REPO_SUBDIR.endswith("/"): + REPO_SUBDIR += "/" + # links on PyPI should have absolute URLs + readme_contents = re.sub( + r"(\[[^\]]+\]\()((?!https?:)[^\)]+)(\))", + lambda m: m.group(1) + + GITHUB_URL + + "/blob/master/" + + REPO_SUBDIR + + m.group(2) + + m.group(3), + readme_contents, + ) + + with open("README-PYPI.md", "w", encoding="utf-8") as wh: + wh.write(readme_contents) +except Exception as e: + try: + print("Failed to rewrite README.md to README-PYPI.md, copying original instead") + print(e) + shutil.copyfile("README.md", "README-PYPI.md") + except Exception as ie: + print("Failed to copy README.md to README-PYPI.md") + print(ie) diff --git a/packages/mistralai_gcp/scripts/publish.sh b/packages/mistralai_gcp/scripts/publish.sh index e9eb1f0b..c35748f3 100755 --- a/packages/mistralai_gcp/scripts/publish.sh +++ b/packages/mistralai_gcp/scripts/publish.sh @@ -1,6 +1,6 @@ #!/usr/bin/env bash -export UV_PUBLISH_TOKEN=${PYPI_TOKEN} +uv run python scripts/prepare_readme.py -uv run python ../../scripts/prepare_readme.py --repo-subdir packages/mistralai_gcp -- uv build -uv publish +uv build +uv publish --token $PYPI_TOKEN diff --git a/packages/mistralai_gcp/src/mistralai_gcp/_hooks/types.py b/packages/mistralai_gcp/src/mistralai_gcp/_hooks/types.py index bb867b5b..f8088f4c 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/_hooks/types.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/_hooks/types.py @@ -3,10 +3,12 @@ from abc import ABC, abstractmethod import httpx from mistralai_gcp.httpclient import HttpClient +from mistralai_gcp.sdkconfiguration import SDKConfiguration from typing import Any, Callable, List, Optional, Tuple, Union class HookContext: + config: SDKConfiguration base_url: str operation_id: str oauth2_scopes: Optional[List[str]] = None @@ -14,11 +16,13 @@ class HookContext: def __init__( self, + config: SDKConfiguration, base_url: str, operation_id: str, oauth2_scopes: Optional[List[str]], security_source: Optional[Union[Any, Callable[[], Any]]], ): + self.config = config self.base_url = base_url self.operation_id = operation_id self.oauth2_scopes = oauth2_scopes @@ -28,6 +32,7 @@ def __init__( class BeforeRequestContext(HookContext): def __init__(self, hook_ctx: HookContext): super().__init__( + hook_ctx.config, hook_ctx.base_url, hook_ctx.operation_id, hook_ctx.oauth2_scopes, @@ -38,6 +43,7 @@ def __init__(self, hook_ctx: HookContext): class AfterSuccessContext(HookContext): def __init__(self, hook_ctx: HookContext): super().__init__( + hook_ctx.config, hook_ctx.base_url, hook_ctx.operation_id, hook_ctx.oauth2_scopes, @@ -48,6 +54,7 @@ def __init__(self, hook_ctx: HookContext): class AfterErrorContext(HookContext): def __init__(self, hook_ctx: HookContext): super().__init__( + hook_ctx.config, hook_ctx.base_url, hook_ctx.operation_id, hook_ctx.oauth2_scopes, diff --git a/packages/mistralai_gcp/src/mistralai_gcp/_version.py b/packages/mistralai_gcp/src/mistralai_gcp/_version.py index 11f38b63..acd8086c 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/_version.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/_version.py @@ -3,10 +3,10 @@ import importlib.metadata __title__: str = "mistralai-gcp" -__version__: str = "1.6.0" -__openapi_doc_version__: str = "0.0.2" -__gen_version__: str = "2.548.6" -__user_agent__: str = "speakeasy-sdk/python 1.6.0 2.548.6 0.0.2 mistralai-gcp" +__version__: str = "1.7.0" +__openapi_doc_version__: str = "1.0.0" +__gen_version__: str = "2.687.13" +__user_agent__: str = "speakeasy-sdk/python 1.7.0 2.687.13 1.0.0 mistralai-gcp" try: if __package__ is not None: diff --git a/packages/mistralai_gcp/src/mistralai_gcp/basesdk.py b/packages/mistralai_gcp/src/mistralai_gcp/basesdk.py index bb0aab96..f22e2346 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/basesdk.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/basesdk.py @@ -15,9 +15,19 @@ class BaseSDK: sdk_configuration: SDKConfiguration + parent_ref: Optional[object] = None + """ + Reference to the root SDK instance, if any. This will prevent it from + being garbage collected while there are active streams. + """ - def __init__(self, sdk_config: SDKConfiguration) -> None: + def __init__( + self, + sdk_config: SDKConfiguration, + parent_ref: Optional[object] = None, + ) -> None: self.sdk_configuration = sdk_config + self.parent_ref = parent_ref def _get_url(self, base_url, url_variables): sdk_url, sdk_variables = self.sdk_configuration.get_server_details() @@ -218,12 +228,12 @@ def do_request( client = self.sdk_configuration.client logger = self.sdk_configuration.debug_logger + hooks = self.sdk_configuration.__dict__["_hooks"] + def do(): http_res = None try: - req = self.sdk_configuration.get_hooks().before_request( - BeforeRequestContext(hook_ctx), request - ) + req = hooks.before_request(BeforeRequestContext(hook_ctx), request) logger.debug( "Request:\nMethod: %s\nURL: %s\nHeaders: %s\nBody: %s", req.method, @@ -237,16 +247,14 @@ def do(): http_res = client.send(req, stream=stream) except Exception as e: - _, e = self.sdk_configuration.get_hooks().after_error( - AfterErrorContext(hook_ctx), None, e - ) + _, e = hooks.after_error(AfterErrorContext(hook_ctx), None, e) if e is not None: logger.debug("Request Exception", exc_info=True) raise e if http_res is None: logger.debug("Raising no response SDK error") - raise models.SDKError("No response received") + raise models.NoResponseError("No response received") logger.debug( "Response:\nStatus Code: %s\nURL: %s\nHeaders: %s\nBody: %s", @@ -257,7 +265,7 @@ def do(): ) if utils.match_status_codes(error_status_codes, http_res.status_code): - result, err = self.sdk_configuration.get_hooks().after_error( + result, err = hooks.after_error( AfterErrorContext(hook_ctx), http_res, None ) if err is not None: @@ -267,7 +275,7 @@ def do(): http_res = result else: logger.debug("Raising unexpected SDK error") - raise models.SDKError("Unexpected error occurred") + raise models.SDKError("Unexpected error occurred", http_res) return http_res @@ -277,9 +285,7 @@ def do(): http_res = do() if not utils.match_status_codes(error_status_codes, http_res.status_code): - http_res = self.sdk_configuration.get_hooks().after_success( - AfterSuccessContext(hook_ctx), http_res - ) + http_res = hooks.after_success(AfterSuccessContext(hook_ctx), http_res) return http_res @@ -294,12 +300,12 @@ async def do_request_async( client = self.sdk_configuration.async_client logger = self.sdk_configuration.debug_logger + hooks = self.sdk_configuration.__dict__["_hooks"] + async def do(): http_res = None try: - req = self.sdk_configuration.get_hooks().before_request( - BeforeRequestContext(hook_ctx), request - ) + req = hooks.before_request(BeforeRequestContext(hook_ctx), request) logger.debug( "Request:\nMethod: %s\nURL: %s\nHeaders: %s\nBody: %s", req.method, @@ -313,16 +319,14 @@ async def do(): http_res = await client.send(req, stream=stream) except Exception as e: - _, e = self.sdk_configuration.get_hooks().after_error( - AfterErrorContext(hook_ctx), None, e - ) + _, e = hooks.after_error(AfterErrorContext(hook_ctx), None, e) if e is not None: logger.debug("Request Exception", exc_info=True) raise e if http_res is None: logger.debug("Raising no response SDK error") - raise models.SDKError("No response received") + raise models.NoResponseError("No response received") logger.debug( "Response:\nStatus Code: %s\nURL: %s\nHeaders: %s\nBody: %s", @@ -333,7 +337,7 @@ async def do(): ) if utils.match_status_codes(error_status_codes, http_res.status_code): - result, err = self.sdk_configuration.get_hooks().after_error( + result, err = hooks.after_error( AfterErrorContext(hook_ctx), http_res, None ) if err is not None: @@ -343,7 +347,7 @@ async def do(): http_res = result else: logger.debug("Raising unexpected SDK error") - raise models.SDKError("Unexpected error occurred") + raise models.SDKError("Unexpected error occurred", http_res) return http_res @@ -355,8 +359,6 @@ async def do(): http_res = await do() if not utils.match_status_codes(error_status_codes, http_res.status_code): - http_res = self.sdk_configuration.get_hooks().after_success( - AfterSuccessContext(hook_ctx), http_res - ) + http_res = hooks.after_success(AfterSuccessContext(hook_ctx), http_res) return http_res diff --git a/packages/mistralai_gcp/src/mistralai_gcp/chat.py b/packages/mistralai_gcp/src/mistralai_gcp/chat.py index dba369bf..57b94eaf 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/chat.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/chat.py @@ -5,6 +5,7 @@ from mistralai_gcp._hooks import HookContext from mistralai_gcp.types import OptionalNullable, UNSET from mistralai_gcp.utils import eventstreaming +from mistralai_gcp.utils.unmarshal_json_response import unmarshal_json_response from typing import Any, List, Mapping, Optional, Union @@ -41,6 +42,7 @@ def stream( Union[models.Prediction, models.PredictionTypedDict] ] = None, parallel_tool_calls: Optional[bool] = None, + prompt_mode: OptionalNullable[models.MistralPromptMode] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -58,14 +60,15 @@ def stream( :param stream: :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. - :param response_format: - :param tools: - :param tool_choice: - :param presence_penalty: presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. - :param frequency_penalty: frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. + :param response_format: Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide. + :param tools: A list of tools the model may call. Use this to provide a list of functions the model may generate JSON inputs for. + :param tool_choice: Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool. + :param presence_penalty: The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. + :param frequency_penalty: The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. :param n: Number of completions to return for each request, input tokens are only billed once. - :param prediction: - :param parallel_tool_calls: + :param prediction: Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content. + :param parallel_tool_calls: Whether to enable parallel function calling during tool use, when enabled the model can call multiple tools in parallel. + :param prompt_mode: Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -104,6 +107,7 @@ def stream( prediction, Optional[models.Prediction] ), parallel_tool_calls=parallel_tool_calls, + prompt_mode=prompt_mode, ) req = self._build_request( @@ -135,6 +139,7 @@ def stream( http_res = self.do_request( hook_ctx=HookContext( + config=self.sdk_configuration, base_url=base_url or "", operation_id="stream_chat", oauth2_scopes=[], @@ -152,32 +157,23 @@ def stream( http_res, lambda raw: utils.unmarshal_json(raw, models.CompletionEvent), sentinel="[DONE]", + client_ref=self, ) if utils.match_response(http_res, "422", "application/json"): http_res_text = utils.stream_to_text(http_res) - response_data = utils.unmarshal_json( - http_res_text, models.HTTPValidationErrorData + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res, http_res_text ) - raise models.HTTPValidationError(data=response_data) + raise models.HTTPValidationError(response_data, http_res, http_res_text) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) - content_type = http_res.headers.get("Content-Type") http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise models.SDKError("Unexpected response received", http_res, http_res_text) async def stream_async( self, @@ -209,6 +205,7 @@ async def stream_async( Union[models.Prediction, models.PredictionTypedDict] ] = None, parallel_tool_calls: Optional[bool] = None, + prompt_mode: OptionalNullable[models.MistralPromptMode] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -226,14 +223,15 @@ async def stream_async( :param stream: :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. - :param response_format: - :param tools: - :param tool_choice: - :param presence_penalty: presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. - :param frequency_penalty: frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. + :param response_format: Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide. + :param tools: A list of tools the model may call. Use this to provide a list of functions the model may generate JSON inputs for. + :param tool_choice: Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool. + :param presence_penalty: The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. + :param frequency_penalty: The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. :param n: Number of completions to return for each request, input tokens are only billed once. - :param prediction: - :param parallel_tool_calls: + :param prediction: Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content. + :param parallel_tool_calls: Whether to enable parallel function calling during tool use, when enabled the model can call multiple tools in parallel. + :param prompt_mode: Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -272,6 +270,7 @@ async def stream_async( prediction, Optional[models.Prediction] ), parallel_tool_calls=parallel_tool_calls, + prompt_mode=prompt_mode, ) req = self._build_request_async( @@ -303,6 +302,7 @@ async def stream_async( http_res = await self.do_request_async( hook_ctx=HookContext( + config=self.sdk_configuration, base_url=base_url or "", operation_id="stream_chat", oauth2_scopes=[], @@ -320,32 +320,23 @@ async def stream_async( http_res, lambda raw: utils.unmarshal_json(raw, models.CompletionEvent), sentinel="[DONE]", + client_ref=self, ) if utils.match_response(http_res, "422", "application/json"): http_res_text = await utils.stream_to_text_async(http_res) - response_data = utils.unmarshal_json( - http_res_text, models.HTTPValidationErrorData + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res, http_res_text ) - raise models.HTTPValidationError(data=response_data) + raise models.HTTPValidationError(response_data, http_res, http_res_text) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) - content_type = http_res.headers.get("Content-Type") http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise models.SDKError("Unexpected response received", http_res, http_res_text) def complete( self, @@ -385,6 +376,7 @@ def complete( Union[models.Prediction, models.PredictionTypedDict] ] = None, parallel_tool_calls: Optional[bool] = None, + prompt_mode: OptionalNullable[models.MistralPromptMode] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -400,14 +392,15 @@ def complete( :param stream: Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. - :param response_format: - :param tools: - :param tool_choice: - :param presence_penalty: presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. - :param frequency_penalty: frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. + :param response_format: Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide. + :param tools: A list of tools the model may call. Use this to provide a list of functions the model may generate JSON inputs for. + :param tool_choice: Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool. + :param presence_penalty: The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. + :param frequency_penalty: The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. :param n: Number of completions to return for each request, input tokens are only billed once. - :param prediction: - :param parallel_tool_calls: + :param prediction: Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content. + :param parallel_tool_calls: Whether to enable parallel function calling during tool use, when enabled the model can call multiple tools in parallel. + :param prompt_mode: Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -448,6 +441,7 @@ def complete( prediction, Optional[models.Prediction] ), parallel_tool_calls=parallel_tool_calls, + prompt_mode=prompt_mode, ) req = self._build_request( @@ -479,6 +473,7 @@ def complete( http_res = self.do_request( hook_ctx=HookContext( + config=self.sdk_configuration, base_url=base_url or "", operation_id="chat_completion_v1_chat_completions_post", oauth2_scopes=[], @@ -491,33 +486,22 @@ def complete( response_data: Any = None if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json( - http_res.text, Optional[models.ChatCompletionResponse] + return unmarshal_json_response( + Optional[models.ChatCompletionResponse], http_res ) if utils.match_response(http_res, "422", "application/json"): - response_data = utils.unmarshal_json( - http_res.text, models.HTTPValidationErrorData + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(data=response_data) + raise models.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) - content_type = http_res.headers.get("Content-Type") - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise models.SDKError("Unexpected response received", http_res) async def complete_async( self, @@ -557,6 +541,7 @@ async def complete_async( Union[models.Prediction, models.PredictionTypedDict] ] = None, parallel_tool_calls: Optional[bool] = None, + prompt_mode: OptionalNullable[models.MistralPromptMode] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -572,14 +557,15 @@ async def complete_async( :param stream: Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. - :param response_format: - :param tools: - :param tool_choice: - :param presence_penalty: presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. - :param frequency_penalty: frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. + :param response_format: Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide. + :param tools: A list of tools the model may call. Use this to provide a list of functions the model may generate JSON inputs for. + :param tool_choice: Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool. + :param presence_penalty: The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. + :param frequency_penalty: The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. :param n: Number of completions to return for each request, input tokens are only billed once. - :param prediction: - :param parallel_tool_calls: + :param prediction: Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content. + :param parallel_tool_calls: Whether to enable parallel function calling during tool use, when enabled the model can call multiple tools in parallel. + :param prompt_mode: Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -620,6 +606,7 @@ async def complete_async( prediction, Optional[models.Prediction] ), parallel_tool_calls=parallel_tool_calls, + prompt_mode=prompt_mode, ) req = self._build_request_async( @@ -651,6 +638,7 @@ async def complete_async( http_res = await self.do_request_async( hook_ctx=HookContext( + config=self.sdk_configuration, base_url=base_url or "", operation_id="chat_completion_v1_chat_completions_post", oauth2_scopes=[], @@ -663,30 +651,19 @@ async def complete_async( response_data: Any = None if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json( - http_res.text, Optional[models.ChatCompletionResponse] + return unmarshal_json_response( + Optional[models.ChatCompletionResponse], http_res ) if utils.match_response(http_res, "422", "application/json"): - response_data = utils.unmarshal_json( - http_res.text, models.HTTPValidationErrorData + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(data=response_data) + raise models.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) - content_type = http_res.headers.get("Content-Type") - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise models.SDKError("Unexpected response received", http_res) diff --git a/packages/mistralai_gcp/src/mistralai_gcp/fim.py b/packages/mistralai_gcp/src/mistralai_gcp/fim.py index 84821c6a..5909bf69 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/fim.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/fim.py @@ -5,6 +5,7 @@ from mistralai_gcp._hooks import HookContext from mistralai_gcp.types import OptionalNullable, UNSET from mistralai_gcp.utils import eventstreaming +from mistralai_gcp.utils.unmarshal_json_response import unmarshal_json_response from typing import Any, Mapping, Optional, Union @@ -38,7 +39,7 @@ def stream( Mistral AI provides the ability to stream responses back to a client in order to allow partial results for certain requests. Tokens will be sent as data-only server-sent events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. - :param model: ID of the model to use. Only compatible for now with: - `codestral-2405` - `codestral-latest` + :param model: ID of the model with FIM to use. :param prompt: The text/code to complete. :param temperature: What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. @@ -105,6 +106,7 @@ def stream( http_res = self.do_request( hook_ctx=HookContext( + config=self.sdk_configuration, base_url=base_url or "", operation_id="stream_fim", oauth2_scopes=[], @@ -122,32 +124,23 @@ def stream( http_res, lambda raw: utils.unmarshal_json(raw, models.CompletionEvent), sentinel="[DONE]", + client_ref=self, ) if utils.match_response(http_res, "422", "application/json"): http_res_text = utils.stream_to_text(http_res) - response_data = utils.unmarshal_json( - http_res_text, models.HTTPValidationErrorData + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res, http_res_text ) - raise models.HTTPValidationError(data=response_data) + raise models.HTTPValidationError(response_data, http_res, http_res_text) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) - content_type = http_res.headers.get("Content-Type") http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise models.SDKError("Unexpected response received", http_res, http_res_text) async def stream_async( self, @@ -176,7 +169,7 @@ async def stream_async( Mistral AI provides the ability to stream responses back to a client in order to allow partial results for certain requests. Tokens will be sent as data-only server-sent events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. - :param model: ID of the model to use. Only compatible for now with: - `codestral-2405` - `codestral-latest` + :param model: ID of the model with FIM to use. :param prompt: The text/code to complete. :param temperature: What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. @@ -243,6 +236,7 @@ async def stream_async( http_res = await self.do_request_async( hook_ctx=HookContext( + config=self.sdk_configuration, base_url=base_url or "", operation_id="stream_fim", oauth2_scopes=[], @@ -260,32 +254,23 @@ async def stream_async( http_res, lambda raw: utils.unmarshal_json(raw, models.CompletionEvent), sentinel="[DONE]", + client_ref=self, ) if utils.match_response(http_res, "422", "application/json"): http_res_text = await utils.stream_to_text_async(http_res) - response_data = utils.unmarshal_json( - http_res_text, models.HTTPValidationErrorData + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res, http_res_text ) - raise models.HTTPValidationError(data=response_data) + raise models.HTTPValidationError(response_data, http_res, http_res_text) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) - content_type = http_res.headers.get("Content-Type") http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise models.SDKError("Unexpected response received", http_res, http_res_text) def complete( self, @@ -314,7 +299,7 @@ def complete( FIM completion. - :param model: ID of the model to use. Only compatible for now with: - `codestral-2405` - `codestral-latest` + :param model: ID of the model with FIM to use. :param prompt: The text/code to complete. :param temperature: What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. @@ -381,6 +366,7 @@ def complete( http_res = self.do_request( hook_ctx=HookContext( + config=self.sdk_configuration, base_url=base_url or "", operation_id="fim_completion_v1_fim_completions_post", oauth2_scopes=[], @@ -393,33 +379,22 @@ def complete( response_data: Any = None if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json( - http_res.text, Optional[models.FIMCompletionResponse] + return unmarshal_json_response( + Optional[models.FIMCompletionResponse], http_res ) if utils.match_response(http_res, "422", "application/json"): - response_data = utils.unmarshal_json( - http_res.text, models.HTTPValidationErrorData + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(data=response_data) + raise models.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) - content_type = http_res.headers.get("Content-Type") - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise models.SDKError("Unexpected response received", http_res) async def complete_async( self, @@ -448,7 +423,7 @@ async def complete_async( FIM completion. - :param model: ID of the model to use. Only compatible for now with: - `codestral-2405` - `codestral-latest` + :param model: ID of the model with FIM to use. :param prompt: The text/code to complete. :param temperature: What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. @@ -515,6 +490,7 @@ async def complete_async( http_res = await self.do_request_async( hook_ctx=HookContext( + config=self.sdk_configuration, base_url=base_url or "", operation_id="fim_completion_v1_fim_completions_post", oauth2_scopes=[], @@ -527,30 +503,19 @@ async def complete_async( response_data: Any = None if utils.match_response(http_res, "200", "application/json"): - return utils.unmarshal_json( - http_res.text, Optional[models.FIMCompletionResponse] + return unmarshal_json_response( + Optional[models.FIMCompletionResponse], http_res ) if utils.match_response(http_res, "422", "application/json"): - response_data = utils.unmarshal_json( - http_res.text, models.HTTPValidationErrorData + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(data=response_data) + raise models.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - "API error occurred", http_res.status_code, http_res_text, http_res - ) + raise models.SDKError("API error occurred", http_res, http_res_text) - content_type = http_res.headers.get("Content-Type") - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError( - f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", - http_res.status_code, - http_res_text, - http_res, - ) + raise models.SDKError("Unexpected response received", http_res) diff --git a/packages/mistralai_gcp/src/mistralai_gcp/httpclient.py b/packages/mistralai_gcp/src/mistralai_gcp/httpclient.py index 1e426352..47b052cb 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/httpclient.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/httpclient.py @@ -2,7 +2,6 @@ # pyright: reportReturnType = false import asyncio -from concurrent.futures import ThreadPoolExecutor from typing_extensions import Protocol, runtime_checkable import httpx from typing import Any, Optional, Union @@ -116,21 +115,12 @@ def close_clients( pass if async_client is not None and not async_client_supplied: - is_async = False try: - asyncio.get_running_loop() - is_async = True + loop = asyncio.get_running_loop() + asyncio.run_coroutine_threadsafe(async_client.aclose(), loop) except RuntimeError: - pass - - try: - # If this function is called in an async loop then start another - # loop in a separate thread to close the async http client. - if is_async: - with ThreadPoolExecutor(max_workers=1) as executor: - future = executor.submit(asyncio.run, async_client.aclose()) - future.result() - else: + try: asyncio.run(async_client.aclose()) - except Exception: - pass + except RuntimeError: + # best effort + pass diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/__init__.py b/packages/mistralai_gcp/src/mistralai_gcp/models/__init__.py index 752e70e6..fe85b133 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/__init__.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/__init__.py @@ -1,122 +1,154 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -from .assistantmessage import ( - AssistantMessage, - AssistantMessageContent, - AssistantMessageContentTypedDict, - AssistantMessageRole, - AssistantMessageTypedDict, -) -from .chatcompletionchoice import ( - ChatCompletionChoice, - ChatCompletionChoiceFinishReason, - ChatCompletionChoiceTypedDict, -) -from .chatcompletionrequest import ( - ChatCompletionRequest, - ChatCompletionRequestMessages, - ChatCompletionRequestMessagesTypedDict, - ChatCompletionRequestStop, - ChatCompletionRequestStopTypedDict, - ChatCompletionRequestToolChoice, - ChatCompletionRequestToolChoiceTypedDict, - ChatCompletionRequestTypedDict, -) -from .chatcompletionresponse import ( - ChatCompletionResponse, - ChatCompletionResponseTypedDict, -) -from .chatcompletionstreamrequest import ( - ChatCompletionStreamRequest, - ChatCompletionStreamRequestToolChoice, - ChatCompletionStreamRequestToolChoiceTypedDict, - ChatCompletionStreamRequestTypedDict, - Messages, - MessagesTypedDict, - Stop, - StopTypedDict, -) -from .completionchunk import CompletionChunk, CompletionChunkTypedDict -from .completionevent import CompletionEvent, CompletionEventTypedDict -from .completionresponsestreamchoice import ( - CompletionResponseStreamChoice, - CompletionResponseStreamChoiceTypedDict, - FinishReason, -) -from .contentchunk import ContentChunk, ContentChunkTypedDict -from .deltamessage import Content, ContentTypedDict, DeltaMessage, DeltaMessageTypedDict -from .fimcompletionrequest import ( - FIMCompletionRequest, - FIMCompletionRequestStop, - FIMCompletionRequestStopTypedDict, - FIMCompletionRequestTypedDict, -) -from .fimcompletionresponse import FIMCompletionResponse, FIMCompletionResponseTypedDict -from .fimcompletionstreamrequest import ( - FIMCompletionStreamRequest, - FIMCompletionStreamRequestStop, - FIMCompletionStreamRequestStopTypedDict, - FIMCompletionStreamRequestTypedDict, -) -from .function import Function, FunctionTypedDict -from .functioncall import ( - Arguments, - ArgumentsTypedDict, - FunctionCall, - FunctionCallTypedDict, -) -from .functionname import FunctionName, FunctionNameTypedDict -from .httpvalidationerror import HTTPValidationError, HTTPValidationErrorData -from .imageurl import ImageURL, ImageURLTypedDict -from .imageurlchunk import ( - ImageURLChunk, - ImageURLChunkImageURL, - ImageURLChunkImageURLTypedDict, - ImageURLChunkType, - ImageURLChunkTypedDict, -) -from .jsonschema import JSONSchema, JSONSchemaTypedDict -from .prediction import Prediction, PredictionTypedDict -from .referencechunk import ReferenceChunk, ReferenceChunkType, ReferenceChunkTypedDict -from .responseformat import ResponseFormat, ResponseFormatTypedDict -from .responseformats import ResponseFormats -from .sdkerror import SDKError -from .security import Security, SecurityTypedDict -from .systemmessage import ( - Role, - SystemMessage, - SystemMessageContent, - SystemMessageContentTypedDict, - SystemMessageTypedDict, -) -from .textchunk import TextChunk, TextChunkTypedDict, Type -from .tool import Tool, ToolTypedDict -from .toolcall import ToolCall, ToolCallTypedDict -from .toolchoice import ToolChoice, ToolChoiceTypedDict -from .toolchoiceenum import ToolChoiceEnum -from .toolmessage import ( - ToolMessage, - ToolMessageContent, - ToolMessageContentTypedDict, - ToolMessageRole, - ToolMessageTypedDict, -) -from .tooltypes import ToolTypes -from .usageinfo import UsageInfo, UsageInfoTypedDict -from .usermessage import ( - UserMessage, - UserMessageContent, - UserMessageContentTypedDict, - UserMessageRole, - UserMessageTypedDict, -) -from .validationerror import ( - Loc, - LocTypedDict, - ValidationError, - ValidationErrorTypedDict, -) +from .mistralgcperror import MistralGcpError +from typing import TYPE_CHECKING +from importlib import import_module +import builtins +import sys +if TYPE_CHECKING: + from .assistantmessage import ( + AssistantMessage, + AssistantMessageContent, + AssistantMessageContentTypedDict, + AssistantMessageRole, + AssistantMessageTypedDict, + ) + from .chatcompletionchoice import ( + ChatCompletionChoice, + ChatCompletionChoiceFinishReason, + ChatCompletionChoiceTypedDict, + ) + from .chatcompletionrequest import ( + ChatCompletionRequest, + ChatCompletionRequestMessages, + ChatCompletionRequestMessagesTypedDict, + ChatCompletionRequestStop, + ChatCompletionRequestStopTypedDict, + ChatCompletionRequestToolChoice, + ChatCompletionRequestToolChoiceTypedDict, + ChatCompletionRequestTypedDict, + ) + from .chatcompletionresponse import ( + ChatCompletionResponse, + ChatCompletionResponseTypedDict, + ) + from .chatcompletionstreamrequest import ( + ChatCompletionStreamRequest, + ChatCompletionStreamRequestToolChoice, + ChatCompletionStreamRequestToolChoiceTypedDict, + ChatCompletionStreamRequestTypedDict, + Messages, + MessagesTypedDict, + Stop, + StopTypedDict, + ) + from .completionchunk import CompletionChunk, CompletionChunkTypedDict + from .completionevent import CompletionEvent, CompletionEventTypedDict + from .completionresponsestreamchoice import ( + CompletionResponseStreamChoice, + CompletionResponseStreamChoiceTypedDict, + FinishReason, + ) + from .contentchunk import ContentChunk, ContentChunkTypedDict + from .deltamessage import ( + Content, + ContentTypedDict, + DeltaMessage, + DeltaMessageTypedDict, + ) + from .fimcompletionrequest import ( + FIMCompletionRequest, + FIMCompletionRequestStop, + FIMCompletionRequestStopTypedDict, + FIMCompletionRequestTypedDict, + ) + from .fimcompletionresponse import ( + FIMCompletionResponse, + FIMCompletionResponseTypedDict, + ) + from .fimcompletionstreamrequest import ( + FIMCompletionStreamRequest, + FIMCompletionStreamRequestStop, + FIMCompletionStreamRequestStopTypedDict, + FIMCompletionStreamRequestTypedDict, + ) + from .function import Function, FunctionTypedDict + from .functioncall import ( + Arguments, + ArgumentsTypedDict, + FunctionCall, + FunctionCallTypedDict, + ) + from .functionname import FunctionName, FunctionNameTypedDict + from .httpvalidationerror import HTTPValidationError, HTTPValidationErrorData + from .imageurl import ImageURL, ImageURLTypedDict + from .imageurlchunk import ( + ImageURLChunk, + ImageURLChunkImageURL, + ImageURLChunkImageURLTypedDict, + ImageURLChunkType, + ImageURLChunkTypedDict, + ) + from .jsonschema import JSONSchema, JSONSchemaTypedDict + from .mistralpromptmode import MistralPromptMode + from .no_response_error import NoResponseError + from .prediction import Prediction, PredictionTypedDict + from .referencechunk import ( + ReferenceChunk, + ReferenceChunkType, + ReferenceChunkTypedDict, + ) + from .responseformat import ResponseFormat, ResponseFormatTypedDict + from .responseformats import ResponseFormats + from .responsevalidationerror import ResponseValidationError + from .sdkerror import SDKError + from .security import Security, SecurityTypedDict + from .systemmessage import ( + Role, + SystemMessage, + SystemMessageContent, + SystemMessageContentTypedDict, + SystemMessageTypedDict, + ) + from .systemmessagecontentchunks import ( + SystemMessageContentChunks, + SystemMessageContentChunksTypedDict, + ) + from .textchunk import TextChunk, TextChunkTypedDict, Type + from .thinkchunk import ( + ThinkChunk, + ThinkChunkType, + ThinkChunkTypedDict, + Thinking, + ThinkingTypedDict, + ) + from .tool import Tool, ToolTypedDict + from .toolcall import ToolCall, ToolCallTypedDict + from .toolchoice import ToolChoice, ToolChoiceTypedDict + from .toolchoiceenum import ToolChoiceEnum + from .toolmessage import ( + ToolMessage, + ToolMessageContent, + ToolMessageContentTypedDict, + ToolMessageRole, + ToolMessageTypedDict, + ) + from .tooltypes import ToolTypes + from .usageinfo import UsageInfo, UsageInfoTypedDict + from .usermessage import ( + UserMessage, + UserMessageContent, + UserMessageContentTypedDict, + UserMessageRole, + UserMessageTypedDict, + ) + from .validationerror import ( + Loc, + LocTypedDict, + ValidationError, + ValidationErrorTypedDict, + ) __all__ = [ "Arguments", @@ -187,6 +219,9 @@ "LocTypedDict", "Messages", "MessagesTypedDict", + "MistralGcpError", + "MistralPromptMode", + "NoResponseError", "Prediction", "PredictionTypedDict", "ReferenceChunk", @@ -195,6 +230,7 @@ "ResponseFormat", "ResponseFormatTypedDict", "ResponseFormats", + "ResponseValidationError", "Role", "SDKError", "Security", @@ -203,10 +239,17 @@ "StopTypedDict", "SystemMessage", "SystemMessageContent", + "SystemMessageContentChunks", + "SystemMessageContentChunksTypedDict", "SystemMessageContentTypedDict", "SystemMessageTypedDict", "TextChunk", "TextChunkTypedDict", + "ThinkChunk", + "ThinkChunkType", + "ThinkChunkTypedDict", + "Thinking", + "ThinkingTypedDict", "Tool", "ToolCall", "ToolCallTypedDict", @@ -231,3 +274,165 @@ "ValidationError", "ValidationErrorTypedDict", ] + +_dynamic_imports: dict[str, str] = { + "AssistantMessage": ".assistantmessage", + "AssistantMessageContent": ".assistantmessage", + "AssistantMessageContentTypedDict": ".assistantmessage", + "AssistantMessageRole": ".assistantmessage", + "AssistantMessageTypedDict": ".assistantmessage", + "ChatCompletionChoice": ".chatcompletionchoice", + "ChatCompletionChoiceFinishReason": ".chatcompletionchoice", + "ChatCompletionChoiceTypedDict": ".chatcompletionchoice", + "ChatCompletionRequest": ".chatcompletionrequest", + "ChatCompletionRequestMessages": ".chatcompletionrequest", + "ChatCompletionRequestMessagesTypedDict": ".chatcompletionrequest", + "ChatCompletionRequestStop": ".chatcompletionrequest", + "ChatCompletionRequestStopTypedDict": ".chatcompletionrequest", + "ChatCompletionRequestToolChoice": ".chatcompletionrequest", + "ChatCompletionRequestToolChoiceTypedDict": ".chatcompletionrequest", + "ChatCompletionRequestTypedDict": ".chatcompletionrequest", + "ChatCompletionResponse": ".chatcompletionresponse", + "ChatCompletionResponseTypedDict": ".chatcompletionresponse", + "ChatCompletionStreamRequest": ".chatcompletionstreamrequest", + "ChatCompletionStreamRequestToolChoice": ".chatcompletionstreamrequest", + "ChatCompletionStreamRequestToolChoiceTypedDict": ".chatcompletionstreamrequest", + "ChatCompletionStreamRequestTypedDict": ".chatcompletionstreamrequest", + "Messages": ".chatcompletionstreamrequest", + "MessagesTypedDict": ".chatcompletionstreamrequest", + "Stop": ".chatcompletionstreamrequest", + "StopTypedDict": ".chatcompletionstreamrequest", + "CompletionChunk": ".completionchunk", + "CompletionChunkTypedDict": ".completionchunk", + "CompletionEvent": ".completionevent", + "CompletionEventTypedDict": ".completionevent", + "CompletionResponseStreamChoice": ".completionresponsestreamchoice", + "CompletionResponseStreamChoiceTypedDict": ".completionresponsestreamchoice", + "FinishReason": ".completionresponsestreamchoice", + "ContentChunk": ".contentchunk", + "ContentChunkTypedDict": ".contentchunk", + "Content": ".deltamessage", + "ContentTypedDict": ".deltamessage", + "DeltaMessage": ".deltamessage", + "DeltaMessageTypedDict": ".deltamessage", + "FIMCompletionRequest": ".fimcompletionrequest", + "FIMCompletionRequestStop": ".fimcompletionrequest", + "FIMCompletionRequestStopTypedDict": ".fimcompletionrequest", + "FIMCompletionRequestTypedDict": ".fimcompletionrequest", + "FIMCompletionResponse": ".fimcompletionresponse", + "FIMCompletionResponseTypedDict": ".fimcompletionresponse", + "FIMCompletionStreamRequest": ".fimcompletionstreamrequest", + "FIMCompletionStreamRequestStop": ".fimcompletionstreamrequest", + "FIMCompletionStreamRequestStopTypedDict": ".fimcompletionstreamrequest", + "FIMCompletionStreamRequestTypedDict": ".fimcompletionstreamrequest", + "Function": ".function", + "FunctionTypedDict": ".function", + "Arguments": ".functioncall", + "ArgumentsTypedDict": ".functioncall", + "FunctionCall": ".functioncall", + "FunctionCallTypedDict": ".functioncall", + "FunctionName": ".functionname", + "FunctionNameTypedDict": ".functionname", + "HTTPValidationError": ".httpvalidationerror", + "HTTPValidationErrorData": ".httpvalidationerror", + "ImageURL": ".imageurl", + "ImageURLTypedDict": ".imageurl", + "ImageURLChunk": ".imageurlchunk", + "ImageURLChunkImageURL": ".imageurlchunk", + "ImageURLChunkImageURLTypedDict": ".imageurlchunk", + "ImageURLChunkType": ".imageurlchunk", + "ImageURLChunkTypedDict": ".imageurlchunk", + "JSONSchema": ".jsonschema", + "JSONSchemaTypedDict": ".jsonschema", + "MistralPromptMode": ".mistralpromptmode", + "NoResponseError": ".no_response_error", + "Prediction": ".prediction", + "PredictionTypedDict": ".prediction", + "ReferenceChunk": ".referencechunk", + "ReferenceChunkType": ".referencechunk", + "ReferenceChunkTypedDict": ".referencechunk", + "ResponseFormat": ".responseformat", + "ResponseFormatTypedDict": ".responseformat", + "ResponseFormats": ".responseformats", + "ResponseValidationError": ".responsevalidationerror", + "SDKError": ".sdkerror", + "Security": ".security", + "SecurityTypedDict": ".security", + "Role": ".systemmessage", + "SystemMessage": ".systemmessage", + "SystemMessageContent": ".systemmessage", + "SystemMessageContentTypedDict": ".systemmessage", + "SystemMessageTypedDict": ".systemmessage", + "SystemMessageContentChunks": ".systemmessagecontentchunks", + "SystemMessageContentChunksTypedDict": ".systemmessagecontentchunks", + "TextChunk": ".textchunk", + "TextChunkTypedDict": ".textchunk", + "Type": ".textchunk", + "ThinkChunk": ".thinkchunk", + "ThinkChunkType": ".thinkchunk", + "ThinkChunkTypedDict": ".thinkchunk", + "Thinking": ".thinkchunk", + "ThinkingTypedDict": ".thinkchunk", + "Tool": ".tool", + "ToolTypedDict": ".tool", + "ToolCall": ".toolcall", + "ToolCallTypedDict": ".toolcall", + "ToolChoice": ".toolchoice", + "ToolChoiceTypedDict": ".toolchoice", + "ToolChoiceEnum": ".toolchoiceenum", + "ToolMessage": ".toolmessage", + "ToolMessageContent": ".toolmessage", + "ToolMessageContentTypedDict": ".toolmessage", + "ToolMessageRole": ".toolmessage", + "ToolMessageTypedDict": ".toolmessage", + "ToolTypes": ".tooltypes", + "UsageInfo": ".usageinfo", + "UsageInfoTypedDict": ".usageinfo", + "UserMessage": ".usermessage", + "UserMessageContent": ".usermessage", + "UserMessageContentTypedDict": ".usermessage", + "UserMessageRole": ".usermessage", + "UserMessageTypedDict": ".usermessage", + "Loc": ".validationerror", + "LocTypedDict": ".validationerror", + "ValidationError": ".validationerror", + "ValidationErrorTypedDict": ".validationerror", +} + + +def dynamic_import(modname, retries=3): + for attempt in range(retries): + try: + return import_module(modname, __package__) + except KeyError: + # Clear any half-initialized module and retry + sys.modules.pop(modname, None) + if attempt == retries - 1: + break + raise KeyError(f"Failed to import module '{modname}' after {retries} attempts") + + +def __getattr__(attr_name: str) -> object: + module_name = _dynamic_imports.get(attr_name) + if module_name is None: + raise AttributeError( + f"No {attr_name} found in _dynamic_imports for module name -> {__name__} " + ) + + try: + module = dynamic_import(module_name) + result = getattr(module, attr_name) + return result + except ImportError as e: + raise ImportError( + f"Failed to import {attr_name} from {module_name}: {e}" + ) from e + except AttributeError as e: + raise AttributeError( + f"Failed to get {attr_name} from {module_name}: {e}" + ) from e + + +def __dir__(): + lazy_attrs = builtins.list(_dynamic_imports.keys()) + return builtins.sorted(lazy_attrs) diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/assistantmessage.py b/packages/mistralai_gcp/src/mistralai_gcp/models/assistantmessage.py index 9147f566..794b8c80 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/assistantmessage.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/assistantmessage.py @@ -56,7 +56,7 @@ def serialize_model(self, handler): m = {} - for n, f in self.model_fields.items(): + for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) serialized.pop(k, None) diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionrequest.py b/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionrequest.py index a0125c35..d693e3c3 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionrequest.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionrequest.py @@ -2,6 +2,7 @@ from __future__ import annotations from .assistantmessage import AssistantMessage, AssistantMessageTypedDict +from .mistralpromptmode import MistralPromptMode from .prediction import Prediction, PredictionTypedDict from .responseformat import ResponseFormat, ResponseFormatTypedDict from .systemmessage import SystemMessage, SystemMessageTypedDict @@ -17,8 +18,9 @@ UNSET, UNSET_SENTINEL, ) -from mistralai_gcp.utils import get_discriminator +from mistralai_gcp.utils import get_discriminator, validate_open_enum from pydantic import Discriminator, Tag, model_serializer +from pydantic.functional_validators import PlainValidator from typing import List, Optional, Union from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict @@ -61,11 +63,13 @@ "ChatCompletionRequestToolChoiceTypedDict", Union[ToolChoiceTypedDict, ToolChoiceEnum], ) +r"""Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool.""" ChatCompletionRequestToolChoice = TypeAliasType( "ChatCompletionRequestToolChoice", Union[ToolChoice, ToolChoiceEnum] ) +r"""Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool.""" class ChatCompletionRequestTypedDict(TypedDict): @@ -86,16 +90,23 @@ class ChatCompletionRequestTypedDict(TypedDict): random_seed: NotRequired[Nullable[int]] r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" response_format: NotRequired[ResponseFormatTypedDict] + r"""Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide.""" tools: NotRequired[Nullable[List[ToolTypedDict]]] + r"""A list of tools the model may call. Use this to provide a list of functions the model may generate JSON inputs for.""" tool_choice: NotRequired[ChatCompletionRequestToolChoiceTypedDict] + r"""Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool.""" presence_penalty: NotRequired[float] - r"""presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative.""" + r"""The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative.""" frequency_penalty: NotRequired[float] - r"""frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.""" + r"""The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.""" n: NotRequired[Nullable[int]] r"""Number of completions to return for each request, input tokens are only billed once.""" prediction: NotRequired[PredictionTypedDict] + r"""Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content.""" parallel_tool_calls: NotRequired[bool] + r"""Whether to enable parallel function calling during tool use, when enabled the model can call multiple tools in parallel.""" + prompt_mode: NotRequired[Nullable[MistralPromptMode]] + r"""Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used.""" class ChatCompletionRequest(BaseModel): @@ -124,23 +135,33 @@ class ChatCompletionRequest(BaseModel): r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" response_format: Optional[ResponseFormat] = None + r"""Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide.""" tools: OptionalNullable[List[Tool]] = UNSET + r"""A list of tools the model may call. Use this to provide a list of functions the model may generate JSON inputs for.""" tool_choice: Optional[ChatCompletionRequestToolChoice] = None + r"""Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool.""" presence_penalty: Optional[float] = None - r"""presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative.""" + r"""The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative.""" frequency_penalty: Optional[float] = None - r"""frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.""" + r"""The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.""" n: OptionalNullable[int] = UNSET r"""Number of completions to return for each request, input tokens are only billed once.""" prediction: Optional[Prediction] = None + r"""Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content.""" parallel_tool_calls: Optional[bool] = None + r"""Whether to enable parallel function calling during tool use, when enabled the model can call multiple tools in parallel.""" + + prompt_mode: Annotated[ + OptionalNullable[MistralPromptMode], PlainValidator(validate_open_enum(False)) + ] = UNSET + r"""Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used.""" @model_serializer(mode="wrap") def serialize_model(self, handler): @@ -159,15 +180,23 @@ def serialize_model(self, handler): "n", "prediction", "parallel_tool_calls", + "prompt_mode", + ] + nullable_fields = [ + "temperature", + "max_tokens", + "random_seed", + "tools", + "n", + "prompt_mode", ] - nullable_fields = ["temperature", "max_tokens", "random_seed", "tools", "n"] null_default_fields = [] serialized = handler(self) m = {} - for n, f in self.model_fields.items(): + for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) serialized.pop(k, None) diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionresponse.py b/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionresponse.py index 0404a9d2..a7953eb1 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionresponse.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionresponse.py @@ -4,8 +4,8 @@ from .chatcompletionchoice import ChatCompletionChoice, ChatCompletionChoiceTypedDict from .usageinfo import UsageInfo, UsageInfoTypedDict from mistralai_gcp.types import BaseModel -from typing import List, Optional -from typing_extensions import NotRequired, TypedDict +from typing import List +from typing_extensions import TypedDict class ChatCompletionResponseTypedDict(TypedDict): @@ -13,8 +13,8 @@ class ChatCompletionResponseTypedDict(TypedDict): object: str model: str usage: UsageInfoTypedDict - created: NotRequired[int] - choices: NotRequired[List[ChatCompletionChoiceTypedDict]] + created: int + choices: List[ChatCompletionChoiceTypedDict] class ChatCompletionResponse(BaseModel): @@ -26,6 +26,6 @@ class ChatCompletionResponse(BaseModel): usage: UsageInfo - created: Optional[int] = None + created: int - choices: Optional[List[ChatCompletionChoice]] = None + choices: List[ChatCompletionChoice] diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionstreamrequest.py b/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionstreamrequest.py index 656f1d58..c2d25128 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionstreamrequest.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionstreamrequest.py @@ -2,6 +2,7 @@ from __future__ import annotations from .assistantmessage import AssistantMessage, AssistantMessageTypedDict +from .mistralpromptmode import MistralPromptMode from .prediction import Prediction, PredictionTypedDict from .responseformat import ResponseFormat, ResponseFormatTypedDict from .systemmessage import SystemMessage, SystemMessageTypedDict @@ -17,8 +18,9 @@ UNSET, UNSET_SENTINEL, ) -from mistralai_gcp.utils import get_discriminator +from mistralai_gcp.utils import get_discriminator, validate_open_enum from pydantic import Discriminator, Tag, model_serializer +from pydantic.functional_validators import PlainValidator from typing import List, Optional, Union from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict @@ -57,11 +59,13 @@ "ChatCompletionStreamRequestToolChoiceTypedDict", Union[ToolChoiceTypedDict, ToolChoiceEnum], ) +r"""Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool.""" ChatCompletionStreamRequestToolChoice = TypeAliasType( "ChatCompletionStreamRequestToolChoice", Union[ToolChoice, ToolChoiceEnum] ) +r"""Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool.""" class ChatCompletionStreamRequestTypedDict(TypedDict): @@ -81,16 +85,23 @@ class ChatCompletionStreamRequestTypedDict(TypedDict): random_seed: NotRequired[Nullable[int]] r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" response_format: NotRequired[ResponseFormatTypedDict] + r"""Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide.""" tools: NotRequired[Nullable[List[ToolTypedDict]]] + r"""A list of tools the model may call. Use this to provide a list of functions the model may generate JSON inputs for.""" tool_choice: NotRequired[ChatCompletionStreamRequestToolChoiceTypedDict] + r"""Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool.""" presence_penalty: NotRequired[float] - r"""presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative.""" + r"""The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative.""" frequency_penalty: NotRequired[float] - r"""frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.""" + r"""The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.""" n: NotRequired[Nullable[int]] r"""Number of completions to return for each request, input tokens are only billed once.""" prediction: NotRequired[PredictionTypedDict] + r"""Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content.""" parallel_tool_calls: NotRequired[bool] + r"""Whether to enable parallel function calling during tool use, when enabled the model can call multiple tools in parallel.""" + prompt_mode: NotRequired[Nullable[MistralPromptMode]] + r"""Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used.""" class ChatCompletionStreamRequest(BaseModel): @@ -118,23 +129,33 @@ class ChatCompletionStreamRequest(BaseModel): r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" response_format: Optional[ResponseFormat] = None + r"""Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide.""" tools: OptionalNullable[List[Tool]] = UNSET + r"""A list of tools the model may call. Use this to provide a list of functions the model may generate JSON inputs for.""" tool_choice: Optional[ChatCompletionStreamRequestToolChoice] = None + r"""Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool.""" presence_penalty: Optional[float] = None - r"""presence_penalty determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative.""" + r"""The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative.""" frequency_penalty: Optional[float] = None - r"""frequency_penalty penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.""" + r"""The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.""" n: OptionalNullable[int] = UNSET r"""Number of completions to return for each request, input tokens are only billed once.""" prediction: Optional[Prediction] = None + r"""Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content.""" parallel_tool_calls: Optional[bool] = None + r"""Whether to enable parallel function calling during tool use, when enabled the model can call multiple tools in parallel.""" + + prompt_mode: Annotated[ + OptionalNullable[MistralPromptMode], PlainValidator(validate_open_enum(False)) + ] = UNSET + r"""Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used.""" @model_serializer(mode="wrap") def serialize_model(self, handler): @@ -153,15 +174,23 @@ def serialize_model(self, handler): "n", "prediction", "parallel_tool_calls", + "prompt_mode", + ] + nullable_fields = [ + "temperature", + "max_tokens", + "random_seed", + "tools", + "n", + "prompt_mode", ] - nullable_fields = ["temperature", "max_tokens", "random_seed", "tools", "n"] null_default_fields = [] serialized = handler(self) m = {} - for n, f in self.model_fields.items(): + for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) serialized.pop(k, None) diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/completionresponsestreamchoice.py b/packages/mistralai_gcp/src/mistralai_gcp/models/completionresponsestreamchoice.py index 8d779971..1be7dbdc 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/completionresponsestreamchoice.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/completionresponsestreamchoice.py @@ -38,7 +38,7 @@ def serialize_model(self, handler): m = {} - for n, f in self.model_fields.items(): + for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) serialized.pop(k, None) diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/deltamessage.py b/packages/mistralai_gcp/src/mistralai_gcp/models/deltamessage.py index f9f0868b..1801ac76 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/deltamessage.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/deltamessage.py @@ -46,7 +46,7 @@ def serialize_model(self, handler): m = {} - for n, f in self.model_fields.items(): + for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) serialized.pop(k, None) diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/fimcompletionrequest.py b/packages/mistralai_gcp/src/mistralai_gcp/models/fimcompletionrequest.py index 6dfb7373..12af226c 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/fimcompletionrequest.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/fimcompletionrequest.py @@ -27,10 +27,7 @@ class FIMCompletionRequestTypedDict(TypedDict): model: str - r"""ID of the model to use. Only compatible for now with: - - `codestral-2405` - - `codestral-latest` - """ + r"""ID of the model with FIM to use.""" prompt: str r"""The text/code to complete.""" temperature: NotRequired[Nullable[float]] @@ -53,10 +50,7 @@ class FIMCompletionRequestTypedDict(TypedDict): class FIMCompletionRequest(BaseModel): model: str - r"""ID of the model to use. Only compatible for now with: - - `codestral-2405` - - `codestral-latest` - """ + r"""ID of the model with FIM to use.""" prompt: str r"""The text/code to complete.""" @@ -110,7 +104,7 @@ def serialize_model(self, handler): m = {} - for n, f in self.model_fields.items(): + for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) serialized.pop(k, None) diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/fimcompletionresponse.py b/packages/mistralai_gcp/src/mistralai_gcp/models/fimcompletionresponse.py index a4d273a2..e1940b0a 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/fimcompletionresponse.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/fimcompletionresponse.py @@ -4,8 +4,8 @@ from .chatcompletionchoice import ChatCompletionChoice, ChatCompletionChoiceTypedDict from .usageinfo import UsageInfo, UsageInfoTypedDict from mistralai_gcp.types import BaseModel -from typing import List, Optional -from typing_extensions import NotRequired, TypedDict +from typing import List +from typing_extensions import TypedDict class FIMCompletionResponseTypedDict(TypedDict): @@ -13,8 +13,8 @@ class FIMCompletionResponseTypedDict(TypedDict): object: str model: str usage: UsageInfoTypedDict - created: NotRequired[int] - choices: NotRequired[List[ChatCompletionChoiceTypedDict]] + created: int + choices: List[ChatCompletionChoiceTypedDict] class FIMCompletionResponse(BaseModel): @@ -26,6 +26,6 @@ class FIMCompletionResponse(BaseModel): usage: UsageInfo - created: Optional[int] = None + created: int - choices: Optional[List[ChatCompletionChoice]] = None + choices: List[ChatCompletionChoice] diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/fimcompletionstreamrequest.py b/packages/mistralai_gcp/src/mistralai_gcp/models/fimcompletionstreamrequest.py index 406749bb..ba7a66d2 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/fimcompletionstreamrequest.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/fimcompletionstreamrequest.py @@ -27,10 +27,7 @@ class FIMCompletionStreamRequestTypedDict(TypedDict): model: str - r"""ID of the model to use. Only compatible for now with: - - `codestral-2405` - - `codestral-latest` - """ + r"""ID of the model with FIM to use.""" prompt: str r"""The text/code to complete.""" temperature: NotRequired[Nullable[float]] @@ -52,10 +49,7 @@ class FIMCompletionStreamRequestTypedDict(TypedDict): class FIMCompletionStreamRequest(BaseModel): model: str - r"""ID of the model to use. Only compatible for now with: - - `codestral-2405` - - `codestral-latest` - """ + r"""ID of the model with FIM to use.""" prompt: str r"""The text/code to complete.""" @@ -108,7 +102,7 @@ def serialize_model(self, handler): m = {} - for n, f in self.model_fields.items(): + for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) serialized.pop(k, None) diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/httpvalidationerror.py b/packages/mistralai_gcp/src/mistralai_gcp/models/httpvalidationerror.py index 11024f85..e1f11adc 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/httpvalidationerror.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/httpvalidationerror.py @@ -2,7 +2,8 @@ from __future__ import annotations from .validationerror import ValidationError -from mistralai_gcp import utils +import httpx +from mistralai_gcp.models import MistralGcpError from mistralai_gcp.types import BaseModel from typing import List, Optional @@ -11,11 +12,15 @@ class HTTPValidationErrorData(BaseModel): detail: Optional[List[ValidationError]] = None -class HTTPValidationError(Exception): +class HTTPValidationError(MistralGcpError): data: HTTPValidationErrorData - def __init__(self, data: HTTPValidationErrorData): + def __init__( + self, + data: HTTPValidationErrorData, + raw_response: httpx.Response, + body: Optional[str] = None, + ): + message = body or raw_response.text + super().__init__(message, raw_response, body) self.data = data - - def __str__(self) -> str: - return utils.marshal_json(self.data, HTTPValidationErrorData) diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/imageurl.py b/packages/mistralai_gcp/src/mistralai_gcp/models/imageurl.py index e7aa11f0..20d4ba77 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/imageurl.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/imageurl.py @@ -32,7 +32,7 @@ def serialize_model(self, handler): m = {} - for n, f in self.model_fields.items(): + for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) serialized.pop(k, None) diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/jsonschema.py b/packages/mistralai_gcp/src/mistralai_gcp/models/jsonschema.py index 2529ce31..26914b2f 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/jsonschema.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/jsonschema.py @@ -40,7 +40,7 @@ def serialize_model(self, handler): m = {} - for n, f in self.model_fields.items(): + for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) serialized.pop(k, None) diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/mistralgcperror.py b/packages/mistralai_gcp/src/mistralai_gcp/models/mistralgcperror.py new file mode 100644 index 00000000..a3c60cec --- /dev/null +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/mistralgcperror.py @@ -0,0 +1,26 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +import httpx +from typing import Optional + + +class MistralGcpError(Exception): + """The base class for all HTTP error responses.""" + + message: str + status_code: int + body: str + headers: httpx.Headers + raw_response: httpx.Response + + def __init__( + self, message: str, raw_response: httpx.Response, body: Optional[str] = None + ): + self.message = message + self.status_code = raw_response.status_code + self.body = body if body is not None else raw_response.text + self.headers = raw_response.headers + self.raw_response = raw_response + + def __str__(self): + return self.message diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/mistralpromptmode.py b/packages/mistralai_gcp/src/mistralai_gcp/models/mistralpromptmode.py new file mode 100644 index 00000000..3f4de0fa --- /dev/null +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/mistralpromptmode.py @@ -0,0 +1,8 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai_gcp.types import UnrecognizedStr +from typing import Literal, Union + + +MistralPromptMode = Union[Literal["reasoning"], UnrecognizedStr] diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/no_response_error.py b/packages/mistralai_gcp/src/mistralai_gcp/models/no_response_error.py new file mode 100644 index 00000000..f98beea2 --- /dev/null +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/no_response_error.py @@ -0,0 +1,13 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +class NoResponseError(Exception): + """Error raised when no HTTP response is received from the server.""" + + message: str + + def __init__(self, message: str = "No response received"): + self.message = message + super().__init__(message) + + def __str__(self): + return self.message diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/prediction.py b/packages/mistralai_gcp/src/mistralai_gcp/models/prediction.py index 742aac0b..36c87ab0 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/prediction.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/prediction.py @@ -10,11 +10,15 @@ class PredictionTypedDict(TypedDict): + r"""Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content.""" + type: Literal["content"] content: NotRequired[str] class Prediction(BaseModel): + r"""Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content.""" + TYPE: Annotated[ Annotated[ Optional[Literal["content"]], AfterValidator(validate_const("content")) diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/responseformat.py b/packages/mistralai_gcp/src/mistralai_gcp/models/responseformat.py index 5a24f644..9fe5116c 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/responseformat.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/responseformat.py @@ -16,14 +16,16 @@ class ResponseFormatTypedDict(TypedDict): + r"""Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide.""" + type: NotRequired[ResponseFormats] - r"""An object specifying the format that the model must output. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message.""" json_schema: NotRequired[Nullable[JSONSchemaTypedDict]] class ResponseFormat(BaseModel): + r"""Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide.""" + type: Optional[ResponseFormats] = None - r"""An object specifying the format that the model must output. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message.""" json_schema: OptionalNullable[JSONSchema] = UNSET @@ -37,7 +39,7 @@ def serialize_model(self, handler): m = {} - for n, f in self.model_fields.items(): + for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) serialized.pop(k, None) diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/responseformats.py b/packages/mistralai_gcp/src/mistralai_gcp/models/responseformats.py index 08c39951..258fe70e 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/responseformats.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/responseformats.py @@ -5,4 +5,3 @@ ResponseFormats = Literal["text", "json_object", "json_schema"] -r"""An object specifying the format that the model must output. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message.""" diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/responsevalidationerror.py b/packages/mistralai_gcp/src/mistralai_gcp/models/responsevalidationerror.py new file mode 100644 index 00000000..8d9b9f60 --- /dev/null +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/responsevalidationerror.py @@ -0,0 +1,25 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +import httpx +from typing import Optional + +from mistralai_gcp.models import MistralGcpError + + +class ResponseValidationError(MistralGcpError): + """Error raised when there is a type mismatch between the response data and the expected Pydantic model.""" + + def __init__( + self, + message: str, + raw_response: httpx.Response, + cause: Exception, + body: Optional[str] = None, + ): + message = f"{message}: {cause}" + super().__init__(message, raw_response, body) + + @property + def cause(self): + """Normally the Pydantic ValidationError""" + return self.__cause__ diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/sdkerror.py b/packages/mistralai_gcp/src/mistralai_gcp/models/sdkerror.py index 03216cbf..e85b4f49 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/sdkerror.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/sdkerror.py @@ -1,22 +1,38 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -from dataclasses import dataclass -from typing import Optional import httpx +from typing import Optional + +from mistralai_gcp.models import MistralGcpError + +MAX_MESSAGE_LEN = 10_000 + + +class SDKError(MistralGcpError): + """The fallback error class if no more specific error class is matched.""" + + def __init__( + self, message: str, raw_response: httpx.Response, body: Optional[str] = None + ): + body_display = body or raw_response.text or '""' + if message: + message += ": " + message += f"Status {raw_response.status_code}" -@dataclass -class SDKError(Exception): - """Represents an error returned by the API.""" + headers = raw_response.headers + content_type = headers.get("content-type", '""') + if content_type != "application/json": + if " " in content_type: + content_type = f'"{content_type}"' + message += f" Content-Type {content_type}" - message: str - status_code: int = -1 - body: str = "" - raw_response: Optional[httpx.Response] = None + if len(body_display) > MAX_MESSAGE_LEN: + truncated = body_display[:MAX_MESSAGE_LEN] + remaining = len(body_display) - MAX_MESSAGE_LEN + body_display = f"{truncated}...and {remaining} more chars" - def __str__(self): - body = "" - if len(self.body) > 0: - body = f"\n{self.body}" + message += f". Body: {body_display}" + message = message.strip() - return f"{self.message}: Status {self.status_code}{body}" + super().__init__(message, raw_response, body) diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/systemmessage.py b/packages/mistralai_gcp/src/mistralai_gcp/models/systemmessage.py index f14acf12..e0fa6993 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/systemmessage.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/systemmessage.py @@ -1,19 +1,23 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from __future__ import annotations -from .textchunk import TextChunk, TextChunkTypedDict +from .systemmessagecontentchunks import ( + SystemMessageContentChunks, + SystemMessageContentChunksTypedDict, +) from mistralai_gcp.types import BaseModel from typing import List, Literal, Optional, Union from typing_extensions import NotRequired, TypeAliasType, TypedDict SystemMessageContentTypedDict = TypeAliasType( - "SystemMessageContentTypedDict", Union[str, List[TextChunkTypedDict]] + "SystemMessageContentTypedDict", + Union[str, List[SystemMessageContentChunksTypedDict]], ) SystemMessageContent = TypeAliasType( - "SystemMessageContent", Union[str, List[TextChunk]] + "SystemMessageContent", Union[str, List[SystemMessageContentChunks]] ) diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/systemmessagecontentchunks.py b/packages/mistralai_gcp/src/mistralai_gcp/models/systemmessagecontentchunks.py new file mode 100644 index 00000000..e0b5bbc3 --- /dev/null +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/systemmessagecontentchunks.py @@ -0,0 +1,21 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .textchunk import TextChunk, TextChunkTypedDict +from .thinkchunk import ThinkChunk, ThinkChunkTypedDict +from mistralai_gcp.utils import get_discriminator +from pydantic import Discriminator, Tag +from typing import Union +from typing_extensions import Annotated, TypeAliasType + + +SystemMessageContentChunksTypedDict = TypeAliasType( + "SystemMessageContentChunksTypedDict", + Union[TextChunkTypedDict, ThinkChunkTypedDict], +) + + +SystemMessageContentChunks = Annotated[ + Union[Annotated[TextChunk, Tag("text")], Annotated[ThinkChunk, Tag("thinking")]], + Discriminator(lambda m: get_discriminator(m, "type", "type")), +] diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/thinkchunk.py b/packages/mistralai_gcp/src/mistralai_gcp/models/thinkchunk.py new file mode 100644 index 00000000..9c3010e2 --- /dev/null +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/thinkchunk.py @@ -0,0 +1,35 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .referencechunk import ReferenceChunk, ReferenceChunkTypedDict +from .textchunk import TextChunk, TextChunkTypedDict +from mistralai_gcp.types import BaseModel +from typing import List, Literal, Optional, Union +from typing_extensions import NotRequired, TypeAliasType, TypedDict + + +ThinkingTypedDict = TypeAliasType( + "ThinkingTypedDict", Union[ReferenceChunkTypedDict, TextChunkTypedDict] +) + + +Thinking = TypeAliasType("Thinking", Union[ReferenceChunk, TextChunk]) + + +ThinkChunkType = Literal["thinking"] + + +class ThinkChunkTypedDict(TypedDict): + thinking: List[ThinkingTypedDict] + closed: NotRequired[bool] + r"""Whether the thinking chunk is closed or not. Currently only used for prefixing.""" + type: NotRequired[ThinkChunkType] + + +class ThinkChunk(BaseModel): + thinking: List[Thinking] + + closed: Optional[bool] = None + r"""Whether the thinking chunk is closed or not. Currently only used for prefixing.""" + + type: Optional[ThinkChunkType] = "thinking" diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/toolmessage.py b/packages/mistralai_gcp/src/mistralai_gcp/models/toolmessage.py index 886b6ff1..bd187b32 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/toolmessage.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/toolmessage.py @@ -51,7 +51,7 @@ def serialize_model(self, handler): m = {} - for n, f in self.model_fields.items(): + for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) serialized.pop(k, None) diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/usageinfo.py b/packages/mistralai_gcp/src/mistralai_gcp/models/usageinfo.py index 9de6af7e..59f36158 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/usageinfo.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/usageinfo.py @@ -1,19 +1,82 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from __future__ import annotations -from mistralai_gcp.types import BaseModel -from typing_extensions import TypedDict +from mistralai_gcp.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +import pydantic +from pydantic import ConfigDict, model_serializer +from typing import Any, Dict, Optional +from typing_extensions import NotRequired, TypedDict class UsageInfoTypedDict(TypedDict): - prompt_tokens: int - completion_tokens: int - total_tokens: int + prompt_tokens: NotRequired[int] + completion_tokens: NotRequired[int] + total_tokens: NotRequired[int] + prompt_audio_seconds: NotRequired[Nullable[int]] class UsageInfo(BaseModel): - prompt_tokens: int + model_config = ConfigDict( + populate_by_name=True, arbitrary_types_allowed=True, extra="allow" + ) + __pydantic_extra__: Dict[str, Any] = pydantic.Field(init=False) - completion_tokens: int + prompt_tokens: Optional[int] = 0 - total_tokens: int + completion_tokens: Optional[int] = 0 + + total_tokens: Optional[int] = 0 + + prompt_audio_seconds: OptionalNullable[int] = UNSET + + @property + def additional_properties(self): + return self.__pydantic_extra__ + + @additional_properties.setter + def additional_properties(self, value): + self.__pydantic_extra__ = value # pyright: ignore[reportIncompatibleVariableOverride] + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = [ + "prompt_tokens", + "completion_tokens", + "total_tokens", + "prompt_audio_seconds", + ] + nullable_fields = ["prompt_audio_seconds"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + for k, v in serialized.items(): + m[k] = v + + return m diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/usermessage.py b/packages/mistralai_gcp/src/mistralai_gcp/models/usermessage.py index 287bb1b4..1f9a1630 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/usermessage.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/usermessage.py @@ -39,7 +39,7 @@ def serialize_model(self, handler): m = {} - for n, f in self.model_fields.items(): + for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) serialized.pop(k, None) diff --git a/packages/mistralai_gcp/src/mistralai_gcp/sdkconfiguration.py b/packages/mistralai_gcp/src/mistralai_gcp/sdkconfiguration.py index c373d27d..cf85c47e 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/sdkconfiguration.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/sdkconfiguration.py @@ -1,6 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -from ._hooks import SDKHooks from ._version import ( __gen_version__, __openapi_doc_version__, @@ -42,9 +41,6 @@ class SDKConfiguration: retry_config: OptionalNullable[RetryConfig] = Field(default_factory=lambda: UNSET) timeout_ms: Optional[int] = None - def __post_init__(self): - self._hooks = SDKHooks() - def get_server_details(self) -> Tuple[str, Dict[str, str]]: if self.server_url is not None and self.server_url: return remove_suffix(self.server_url, "/"), {} @@ -55,6 +51,3 @@ def get_server_details(self) -> Tuple[str, Dict[str, str]]: raise ValueError(f'Invalid server "{self.server}"') return SERVERS[self.server], {} - - def get_hooks(self) -> SDKHooks: - return self._hooks diff --git a/packages/mistralai_gcp/src/mistralai_gcp/types/basemodel.py b/packages/mistralai_gcp/src/mistralai_gcp/types/basemodel.py index a6187efa..231c2e37 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/types/basemodel.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/types/basemodel.py @@ -2,7 +2,7 @@ from pydantic import ConfigDict, model_serializer from pydantic import BaseModel as PydanticBaseModel -from typing import TYPE_CHECKING, Literal, Optional, TypeVar, Union, NewType +from typing import TYPE_CHECKING, Literal, Optional, TypeVar, Union from typing_extensions import TypeAliasType, TypeAlias @@ -35,5 +35,5 @@ def __bool__(self) -> Literal[False]: "OptionalNullable", Union[Optional[Nullable[T]], Unset], type_params=(T,) ) -UnrecognizedInt = NewType("UnrecognizedInt", int) -UnrecognizedStr = NewType("UnrecognizedStr", str) +UnrecognizedInt: TypeAlias = int +UnrecognizedStr: TypeAlias = str diff --git a/packages/mistralai_gcp/src/mistralai_gcp/utils/__init__.py b/packages/mistralai_gcp/src/mistralai_gcp/utils/__init__.py index 3cded8fe..56164cf3 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/utils/__init__.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/utils/__init__.py @@ -1,50 +1,57 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -from .annotations import get_discriminator -from .enums import OpenEnumMeta -from .headers import get_headers, get_response_headers -from .metadata import ( - FieldMetadata, - find_metadata, - FormMetadata, - HeaderMetadata, - MultipartFormMetadata, - PathParamMetadata, - QueryParamMetadata, - RequestMetadata, - SecurityMetadata, -) -from .queryparams import get_query_params -from .retries import BackoffStrategy, Retries, retry, retry_async, RetryConfig -from .requestbodies import serialize_request_body, SerializedRequestBody -from .security import get_security -from .serializers import ( - get_pydantic_model, - marshal_json, - unmarshal, - unmarshal_json, - serialize_decimal, - serialize_float, - serialize_int, - stream_to_text, - stream_to_text_async, - stream_to_bytes, - stream_to_bytes_async, - validate_const, - validate_decimal, - validate_float, - validate_int, - validate_open_enum, -) -from .url import generate_url, template_url, remove_suffix -from .values import ( - get_global_from_env, - match_content_type, - match_status_codes, - match_response, - cast_partial, -) -from .logger import Logger, get_body_content, get_default_logger +from typing import TYPE_CHECKING +from importlib import import_module +import builtins +import sys + +if TYPE_CHECKING: + from .annotations import get_discriminator + from .datetimes import parse_datetime + from .enums import OpenEnumMeta + from .headers import get_headers, get_response_headers + from .metadata import ( + FieldMetadata, + find_metadata, + FormMetadata, + HeaderMetadata, + MultipartFormMetadata, + PathParamMetadata, + QueryParamMetadata, + RequestMetadata, + SecurityMetadata, + ) + from .queryparams import get_query_params + from .retries import BackoffStrategy, Retries, retry, retry_async, RetryConfig + from .requestbodies import serialize_request_body, SerializedRequestBody + from .security import get_security + from .serializers import ( + get_pydantic_model, + marshal_json, + unmarshal, + unmarshal_json, + serialize_decimal, + serialize_float, + serialize_int, + stream_to_text, + stream_to_text_async, + stream_to_bytes, + stream_to_bytes_async, + validate_const, + validate_decimal, + validate_float, + validate_int, + validate_open_enum, + ) + from .url import generate_url, template_url, remove_suffix + from .values import ( + get_global_from_env, + match_content_type, + match_status_codes, + match_response, + cast_partial, + ) + from .logger import Logger, get_body_content, get_default_logger __all__ = [ "BackoffStrategy", @@ -55,6 +62,7 @@ "get_body_content", "get_default_logger", "get_discriminator", + "parse_datetime", "get_global_from_env", "get_headers", "get_pydantic_model", @@ -97,3 +105,93 @@ "validate_open_enum", "cast_partial", ] + +_dynamic_imports: dict[str, str] = { + "BackoffStrategy": ".retries", + "FieldMetadata": ".metadata", + "find_metadata": ".metadata", + "FormMetadata": ".metadata", + "generate_url": ".url", + "get_body_content": ".logger", + "get_default_logger": ".logger", + "get_discriminator": ".annotations", + "parse_datetime": ".datetimes", + "get_global_from_env": ".values", + "get_headers": ".headers", + "get_pydantic_model": ".serializers", + "get_query_params": ".queryparams", + "get_response_headers": ".headers", + "get_security": ".security", + "HeaderMetadata": ".metadata", + "Logger": ".logger", + "marshal_json": ".serializers", + "match_content_type": ".values", + "match_status_codes": ".values", + "match_response": ".values", + "MultipartFormMetadata": ".metadata", + "OpenEnumMeta": ".enums", + "PathParamMetadata": ".metadata", + "QueryParamMetadata": ".metadata", + "remove_suffix": ".url", + "Retries": ".retries", + "retry": ".retries", + "retry_async": ".retries", + "RetryConfig": ".retries", + "RequestMetadata": ".metadata", + "SecurityMetadata": ".metadata", + "serialize_decimal": ".serializers", + "serialize_float": ".serializers", + "serialize_int": ".serializers", + "serialize_request_body": ".requestbodies", + "SerializedRequestBody": ".requestbodies", + "stream_to_text": ".serializers", + "stream_to_text_async": ".serializers", + "stream_to_bytes": ".serializers", + "stream_to_bytes_async": ".serializers", + "template_url": ".url", + "unmarshal": ".serializers", + "unmarshal_json": ".serializers", + "validate_decimal": ".serializers", + "validate_const": ".serializers", + "validate_float": ".serializers", + "validate_int": ".serializers", + "validate_open_enum": ".serializers", + "cast_partial": ".values", +} + + +def dynamic_import(modname, retries=3): + for attempt in range(retries): + try: + return import_module(modname, __package__) + except KeyError: + # Clear any half-initialized module and retry + sys.modules.pop(modname, None) + if attempt == retries - 1: + break + raise KeyError(f"Failed to import module '{modname}' after {retries} attempts") + + +def __getattr__(attr_name: str) -> object: + module_name = _dynamic_imports.get(attr_name) + if module_name is None: + raise AttributeError( + f"no {attr_name} found in _dynamic_imports, module name -> {__name__} " + ) + + try: + module = dynamic_import(module_name) + return getattr(module, attr_name) + except ImportError as e: + raise ImportError( + f"Failed to import {attr_name} from {module_name}: {e}" + ) from e + except AttributeError as e: + raise AttributeError( + f"Failed to get {attr_name} from {module_name}: {e}" + ) from e + + +def __dir__(): + lazy_attrs = builtins.list(_dynamic_imports.keys()) + return builtins.sorted(lazy_attrs) diff --git a/packages/mistralai_gcp/src/mistralai_gcp/utils/datetimes.py b/packages/mistralai_gcp/src/mistralai_gcp/utils/datetimes.py new file mode 100644 index 00000000..a6c52cd6 --- /dev/null +++ b/packages/mistralai_gcp/src/mistralai_gcp/utils/datetimes.py @@ -0,0 +1,23 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from datetime import datetime +import sys + + +def parse_datetime(datetime_string: str) -> datetime: + """ + Convert a RFC 3339 / ISO 8601 formatted string into a datetime object. + Python versions 3.11 and later support parsing RFC 3339 directly with + datetime.fromisoformat(), but for earlier versions, this function + encapsulates the necessary extra logic. + """ + # Python 3.11 and later can parse RFC 3339 directly + if sys.version_info >= (3, 11): + return datetime.fromisoformat(datetime_string) + + # For Python 3.10 and earlier, a common ValueError is trailing 'Z' suffix, + # so fix that upfront. + if datetime_string.endswith("Z"): + datetime_string = datetime_string[:-1] + "+00:00" + + return datetime.fromisoformat(datetime_string) diff --git a/packages/mistralai_gcp/src/mistralai_gcp/utils/enums.py b/packages/mistralai_gcp/src/mistralai_gcp/utils/enums.py index c650b10c..c3bc13cf 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/utils/enums.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/utils/enums.py @@ -1,34 +1,74 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" import enum - +import sys class OpenEnumMeta(enum.EnumMeta): - def __call__( - cls, value, names=None, *, module=None, qualname=None, type=None, start=1 - ): - # The `type` kwarg also happens to be a built-in that pylint flags as - # redeclared. Safe to ignore this lint rule with this scope. - # pylint: disable=redefined-builtin + # The __call__ method `boundary` kwarg was added in 3.11 and must be present + # for pyright. Refer also: https://github.com/pylint-dev/pylint/issues/9622 + # pylint: disable=unexpected-keyword-arg + # The __call__ method `values` varg must be named for pyright. + # pylint: disable=keyword-arg-before-vararg + + if sys.version_info >= (3, 11): + def __call__( + cls, value, names=None, *values, module=None, qualname=None, type=None, start=1, boundary=None + ): + # The `type` kwarg also happens to be a built-in that pylint flags as + # redeclared. Safe to ignore this lint rule with this scope. + # pylint: disable=redefined-builtin + + if names is not None: + return super().__call__( + value, + names=names, + *values, + module=module, + qualname=qualname, + type=type, + start=start, + boundary=boundary, + ) + + try: + return super().__call__( + value, + names=names, # pyright: ignore[reportArgumentType] + *values, + module=module, + qualname=qualname, + type=type, + start=start, + boundary=boundary, + ) + except ValueError: + return value + else: + def __call__( + cls, value, names=None, *, module=None, qualname=None, type=None, start=1 + ): + # The `type` kwarg also happens to be a built-in that pylint flags as + # redeclared. Safe to ignore this lint rule with this scope. + # pylint: disable=redefined-builtin - if names is not None: - return super().__call__( - value, - names=names, - module=module, - qualname=qualname, - type=type, - start=start, - ) + if names is not None: + return super().__call__( + value, + names=names, + module=module, + qualname=qualname, + type=type, + start=start, + ) - try: - return super().__call__( - value, - names=names, # pyright: ignore[reportArgumentType] - module=module, - qualname=qualname, - type=type, - start=start, - ) - except ValueError: - return value + try: + return super().__call__( + value, + names=names, # pyright: ignore[reportArgumentType] + module=module, + qualname=qualname, + type=type, + start=start, + ) + except ValueError: + return value diff --git a/packages/mistralai_gcp/src/mistralai_gcp/utils/eventstreaming.py b/packages/mistralai_gcp/src/mistralai_gcp/utils/eventstreaming.py index 74a63f75..0969899b 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/utils/eventstreaming.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/utils/eventstreaming.py @@ -17,6 +17,9 @@ class EventStream(Generic[T]): + # Holds a reference to the SDK client to avoid it being garbage collected + # and cause termination of the underlying httpx client. + client_ref: Optional[object] response: httpx.Response generator: Generator[T, None, None] @@ -25,9 +28,11 @@ def __init__( response: httpx.Response, decoder: Callable[[str], T], sentinel: Optional[str] = None, + client_ref: Optional[object] = None, ): self.response = response self.generator = stream_events(response, decoder, sentinel) + self.client_ref = client_ref def __iter__(self): return self @@ -43,6 +48,9 @@ def __exit__(self, exc_type, exc_val, exc_tb): class EventStreamAsync(Generic[T]): + # Holds a reference to the SDK client to avoid it being garbage collected + # and cause termination of the underlying httpx client. + client_ref: Optional[object] response: httpx.Response generator: AsyncGenerator[T, None] @@ -51,9 +59,11 @@ def __init__( response: httpx.Response, decoder: Callable[[str], T], sentinel: Optional[str] = None, + client_ref: Optional[object] = None, ): self.response = response self.generator = stream_events_async(response, decoder, sentinel) + self.client_ref = client_ref def __aiter__(self): return self diff --git a/packages/mistralai_gcp/src/mistralai_gcp/utils/forms.py b/packages/mistralai_gcp/src/mistralai_gcp/utils/forms.py index 0472aba8..e873495f 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/utils/forms.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/utils/forms.py @@ -86,11 +86,39 @@ def _populate_form( return form +def _extract_file_properties(file_obj: Any) -> Tuple[str, Any, Any]: + """Extract file name, content, and content type from a file object.""" + file_fields: Dict[str, FieldInfo] = file_obj.__class__.model_fields + + file_name = "" + content = None + content_type = None + + for file_field_name in file_fields: + file_field = file_fields[file_field_name] + + file_metadata = find_field_metadata(file_field, MultipartFormMetadata) + if file_metadata is None: + continue + + if file_metadata.content: + content = getattr(file_obj, file_field_name, None) + elif file_field_name == "content_type": + content_type = getattr(file_obj, file_field_name, None) + else: + file_name = getattr(file_obj, file_field_name) + + if file_name == "" or content is None: + raise ValueError("invalid multipart/form-data file") + + return file_name, content, content_type + + def serialize_multipart_form( media_type: str, request: Any -) -> Tuple[str, Dict[str, Any], Dict[str, Any]]: +) -> Tuple[str, Dict[str, Any], List[Tuple[str, Any]]]: form: Dict[str, Any] = {} - files: Dict[str, Any] = {} + files: List[Tuple[str, Any]] = [] if not isinstance(request, BaseModel): raise TypeError("invalid request body type") @@ -112,39 +140,32 @@ def serialize_multipart_form( f_name = field.alias if field.alias else name if field_metadata.file: - file_fields: Dict[str, FieldInfo] = val.__class__.model_fields - - file_name = "" - content = None - content_type = None - - for file_field_name in file_fields: - file_field = file_fields[file_field_name] + if isinstance(val, List): + # Handle array of files + for file_obj in val: + if not _is_set(file_obj): + continue + + file_name, content, content_type = _extract_file_properties(file_obj) - file_metadata = find_field_metadata(file_field, MultipartFormMetadata) - if file_metadata is None: - continue + if content_type is not None: + files.append((f_name + "[]", (file_name, content, content_type))) + else: + files.append((f_name + "[]", (file_name, content))) + else: + # Handle single file + file_name, content, content_type = _extract_file_properties(val) - if file_metadata.content: - content = getattr(val, file_field_name, None) - elif file_field_name == "content_type": - content_type = getattr(val, file_field_name, None) + if content_type is not None: + files.append((f_name, (file_name, content, content_type))) else: - file_name = getattr(val, file_field_name) - - if file_name == "" or content is None: - raise ValueError("invalid multipart/form-data file") - - if content_type is not None: - files[f_name] = (file_name, content, content_type) - else: - files[f_name] = (file_name, content) + files.append((f_name, (file_name, content))) elif field_metadata.json: - files[f_name] = ( + files.append((f_name, ( None, marshal_json(val, request_field_types[name]), "application/json", - ) + ))) else: if isinstance(val, List): values = [] diff --git a/packages/mistralai_gcp/src/mistralai_gcp/utils/serializers.py b/packages/mistralai_gcp/src/mistralai_gcp/utils/serializers.py index baa41fbd..378a14c0 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/utils/serializers.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/utils/serializers.py @@ -1,13 +1,16 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from decimal import Decimal +import functools import json -from typing import Any, Dict, List, Union, get_args -import httpx +import typing +from typing import Any, Dict, List, Tuple, Union, get_args +import typing_extensions from typing_extensions import get_origin + +import httpx from pydantic import ConfigDict, create_model from pydantic_core import from_json -from typing_inspection.typing_objects import is_union from ..types.basemodel import BaseModel, Nullable, OptionalNullable, Unset @@ -185,6 +188,15 @@ def is_nullable(field): return False +def is_union(obj: object) -> bool: + """ + Returns True if the given object is a typing.Union or typing_extensions.Union. + """ + return any( + obj is typing_obj for typing_obj in _get_typing_objects_by_name_of("Union") + ) + + def stream_to_text(stream: httpx.Response) -> str: return "".join(stream.iter_text()) @@ -217,3 +229,21 @@ def _contains_pydantic_model(data: Any) -> bool: return any(_contains_pydantic_model(value) for value in data.values()) return False + + +@functools.cache +def _get_typing_objects_by_name_of(name: str) -> Tuple[Any, ...]: + """ + Get typing objects by name from typing and typing_extensions. + Reference: https://typing-extensions.readthedocs.io/en/latest/#runtime-use-of-types + """ + result = tuple( + getattr(module, name) + for module in (typing, typing_extensions) + if hasattr(module, name) + ) + if not result: + raise ValueError( + f"Neither typing nor typing_extensions has an object called {name!r}" + ) + return result diff --git a/packages/mistralai_gcp/src/mistralai_gcp/utils/unmarshal_json_response.py b/packages/mistralai_gcp/src/mistralai_gcp/utils/unmarshal_json_response.py new file mode 100644 index 00000000..8fe5c996 --- /dev/null +++ b/packages/mistralai_gcp/src/mistralai_gcp/utils/unmarshal_json_response.py @@ -0,0 +1,24 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from typing import Any, Optional + +import httpx + +from .serializers import unmarshal_json +from mistralai_gcp import models + + +def unmarshal_json_response( + typ: Any, http_res: httpx.Response, body: Optional[str] = None +) -> Any: + if body is None: + body = http_res.text + try: + return unmarshal_json(body, typ) + except Exception as e: + raise models.ResponseValidationError( + "Response validation failed", + http_res, + e, + body, + ) from e From 798b290a0b1679c816ba0b11c8c6418550ba9164 Mon Sep 17 00:00:00 2001 From: Guillaume Dumont Date: Tue, 13 Jan 2026 17:45:46 +0100 Subject: [PATCH 174/223] Fix missing dependency of the workflow on pyright (#322) Changes: * Added missing dependency on pyright in Azure/GCP's dev dependency groups * Fixed the github token used to publish the pull request --- .github/workflows/update_speakeasy.yaml | 4 ++-- packages/mistralai_azure/pyproject.toml | 1 + packages/mistralai_gcp/pyproject.toml | 1 + 3 files changed, 4 insertions(+), 2 deletions(-) diff --git a/.github/workflows/update_speakeasy.yaml b/.github/workflows/update_speakeasy.yaml index 9628bffa..78b5317b 100644 --- a/.github/workflows/update_speakeasy.yaml +++ b/.github/workflows/update_speakeasy.yaml @@ -38,7 +38,7 @@ jobs: - name: Install dependencies run: | - uv sync --group dev --no-default-groups + uv sync --group dev --group lint --no-default-groups - name: Install Speakeasy CLI run: | @@ -102,7 +102,7 @@ jobs: --label speakeasy-update \ --assignee ${{ github.actor }} env: - GITHUB_TOKEN: ${{ secrets.SPEAKEASY_WORKFLOW_GITHUB_PAT }} + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - name: Comment on workflow run if: steps.check-changes.outputs.has_changes == 'false' diff --git a/packages/mistralai_azure/pyproject.toml b/packages/mistralai_azure/pyproject.toml index 016378d5..2f5c92f6 100644 --- a/packages/mistralai_azure/pyproject.toml +++ b/packages/mistralai_azure/pyproject.toml @@ -15,6 +15,7 @@ dependencies = [ dev = [ "mypy==1.15.0", "pylint==3.2.3", + "pyright>=1.1.401,<2", "pytest>=8.2.2,<9", "pytest-asyncio>=0.23.7,<0.24", ] diff --git a/packages/mistralai_gcp/pyproject.toml b/packages/mistralai_gcp/pyproject.toml index 79b8193b..9293079a 100644 --- a/packages/mistralai_gcp/pyproject.toml +++ b/packages/mistralai_gcp/pyproject.toml @@ -19,6 +19,7 @@ dependencies = [ dev = [ "mypy==1.14.1", "pylint==3.2.3", + "pyright>=1.1.401,<2", "pytest>=8.2.2,<9", "pytest-asyncio>=0.23.7,<0.24", "types-python-dateutil>=2.9.0.20240316,<3", From eb85b6caa039318d9693e224d0f137bfe42dcf03 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Thu, 15 Jan 2026 19:49:51 +0100 Subject: [PATCH 175/223] ## SDK Changes Detected: (#328) * `mistral.beta.conversations.restart()`: * `request.inputs.[array].[]` **Changed** **Breaking** :warning: * `response.outputs.[].[message_output_entry].content.[array].[]` **Changed** **Breaking** :warning: * `mistral.beta.conversations.start()`: * `request.inputs.[array].[]` **Changed** **Breaking** :warning: * `response.outputs.[].[message_output_entry].content.[array].[]` **Changed** **Breaking** :warning: * `mistral.beta.conversations.append()`: * `request.inputs.[array].[]` **Changed** **Breaking** :warning: * `response.outputs.[].[message_output_entry].content.[array].[]` **Changed** **Breaking** :warning: * `mistral.beta.conversations.get_history()`: `response.entries.[]` **Changed** **Breaking** :warning: * `mistral.beta.conversations.get_messages()`: `response.messages.[]` **Changed** **Breaking** :warning: * `mistral.beta.agents.list()`: `request.metadata` **Changed** * `mistral.beta.conversations.list()`: `request.metadata` **Changed** * `mistral.batch.jobs.list()`: `response.data.[].outputs` **Added** * `mistral.batch.jobs.create()`: * `request` **Changed** * `response.outputs` **Added** * `mistral.batch.jobs.get()`: * `request.inline` **Added** * `response.outputs` **Added** * `mistral.batch.jobs.cancel()`: `response.outputs` **Added** * `mistral.embeddings.create()`: `request.metadata` **Added** * `mistral.classifiers.moderate()`: `request.metadata` **Added** * `mistral.classifiers.classify()`: `request.metadata` **Added** Co-authored-by: speakeasybot --- .speakeasy/gen.lock | 21 +++++--- .speakeasy/gen.yaml | 2 +- .speakeasy/workflow.lock | 12 ++--- README.md | 18 +++++-- RELEASES.md | 12 ++++- docs/models/audioencoding.md | 13 +++++ docs/models/audioformat.md | 9 ++++ docs/models/batchjobin.md | 3 +- docs/models/batchjobout.md | 1 + docs/models/batchrequest.md | 9 ++++ docs/models/classificationrequest.md | 1 + docs/models/embeddingrequest.md | 1 + .../jobsapiroutesbatchgetbatchjobrequest.md | 7 +-- docs/models/toolfilechunk.md | 2 +- docs/models/toolfilechunktool.md | 17 +++++++ docs/models/toolreferencechunk.md | 2 +- docs/models/toolreferencechunktool.md | 17 +++++++ docs/sdks/classifiers/README.md | 2 + docs/sdks/embeddings/README.md | 1 + docs/sdks/mistraljobs/README.md | 11 +++-- scripts/publish.sh | 7 +-- src/mistralai/_version.py | 4 +- src/mistralai/classifiers.py | 14 +++++- src/mistralai/embeddings.py | 8 +++- src/mistralai/mistral_jobs.py | 36 ++++++++++++-- src/mistralai/models/__init__.py | 31 +++++++++++- .../models/agents_api_v1_agents_listop.py | 2 +- .../agents_api_v1_conversations_listop.py | 2 +- src/mistralai/models/audioencoding.py | 13 +++++ src/mistralai/models/audioformat.py | 19 ++++++++ src/mistralai/models/batchjobin.py | 23 ++++++--- src/mistralai/models/batchjobout.py | 5 ++ src/mistralai/models/batchrequest.py | 48 +++++++++++++++++++ src/mistralai/models/classificationrequest.py | 40 ++++++++++++++-- src/mistralai/models/embeddingrequest.py | 14 ++++-- .../jobs_api_routes_batch_get_batch_jobop.py | 43 +++++++++++++++-- src/mistralai/models/toolfilechunk.py | 15 ++++-- src/mistralai/models/toolreferencechunk.py | 17 +++++-- 38 files changed, 434 insertions(+), 68 deletions(-) create mode 100644 docs/models/audioencoding.md create mode 100644 docs/models/audioformat.md create mode 100644 docs/models/batchrequest.md create mode 100644 docs/models/toolfilechunktool.md create mode 100644 docs/models/toolreferencechunktool.md create mode 100644 src/mistralai/models/audioencoding.py create mode 100644 src/mistralai/models/audioformat.py create mode 100644 src/mistralai/models/batchrequest.py diff --git a/.speakeasy/gen.lock b/.speakeasy/gen.lock index 3136ceae..4dfa69b1 100644 --- a/.speakeasy/gen.lock +++ b/.speakeasy/gen.lock @@ -1,12 +1,12 @@ lockVersion: 2.0.0 id: 2d045ec7-2ebb-4f4d-ad25-40953b132161 management: - docChecksum: 3135f1ce6dd57e0487ee2840362ced1a + docChecksum: 274d02258259534e27fc706556e295ef docVersion: 1.0.0 speakeasyVersion: 1.606.10 generationVersion: 2.687.13 - releaseVersion: 1.10.0 - configChecksum: 1446aab5f184e7184590fe5756b556a8 + releaseVersion: 1.10.1 + configChecksum: cfe28e4ccef517bdf74a267098925450 repoURL: https://github.com/mistralai/client-python.git installationURL: https://github.com/mistralai/client-python.git published: true @@ -28,6 +28,7 @@ features: globalSecurityCallbacks: 1.0.0 globalSecurityFlattening: 1.0.0 globalServerURLs: 3.1.1 + includes: 3.0.0 methodArguments: 1.0.2 multipartFileContentType: 1.0.0 nameOverrides: 3.0.1 @@ -94,6 +95,8 @@ generatedFiles: - docs/models/attributes.md - docs/models/audiochunk.md - docs/models/audiochunktype.md + - docs/models/audioencoding.md + - docs/models/audioformat.md - docs/models/audiotranscriptionrequest.md - docs/models/audiotranscriptionrequeststream.md - docs/models/basemodelcard.md @@ -105,6 +108,7 @@ generatedFiles: - docs/models/batchjobsout.md - docs/models/batchjobsoutobject.md - docs/models/batchjobstatus.md + - docs/models/batchrequest.md - docs/models/builtinconnectors.md - docs/models/chatclassificationrequest.md - docs/models/chatcompletionchoice.md @@ -411,11 +415,13 @@ generatedFiles: - docs/models/toolexecutionstartedeventname.md - docs/models/toolexecutionstartedeventtype.md - docs/models/toolfilechunk.md + - docs/models/toolfilechunktool.md - docs/models/toolfilechunktype.md - docs/models/toolmessage.md - docs/models/toolmessagecontent.md - docs/models/toolmessagerole.md - docs/models/toolreferencechunk.md + - docs/models/toolreferencechunktool.md - docs/models/toolreferencechunktype.md - docs/models/tools.md - docs/models/tooltypes.md @@ -474,9 +480,7 @@ generatedFiles: - docs/sdks/models/README.md - docs/sdks/ocr/README.md - docs/sdks/transcriptions/README.md - - poetry.toml - py.typed - - scripts/prepare_readme.py - scripts/publish.sh - src/mistralai/__init__.py - src/mistralai/_hooks/__init__.py @@ -530,6 +534,8 @@ generatedFiles: - src/mistralai/models/archiveftmodelout.py - src/mistralai/models/assistantmessage.py - src/mistralai/models/audiochunk.py + - src/mistralai/models/audioencoding.py + - src/mistralai/models/audioformat.py - src/mistralai/models/audiotranscriptionrequest.py - src/mistralai/models/audiotranscriptionrequeststream.py - src/mistralai/models/basemodelcard.py @@ -538,6 +544,7 @@ generatedFiles: - src/mistralai/models/batchjobout.py - src/mistralai/models/batchjobsout.py - src/mistralai/models/batchjobstatus.py + - src/mistralai/models/batchrequest.py - src/mistralai/models/builtinconnectors.py - src/mistralai/models/chatclassificationrequest.py - src/mistralai/models/chatcompletionchoice.py @@ -1145,7 +1152,7 @@ examples: jobs_api_routes_batch_create_batch_job: speakeasy-default-jobs-api-routes-batch-create-batch-job: requestBody: - application/json: {"input_files": ["fe3343a2-3b8d-404b-ba32-a78dede2614a"], "endpoint": "/v1/moderations", "model": "mistral-small-latest", "timeout_hours": 24} + application/json: {"endpoint": "/v1/moderations", "model": "mistral-small-latest", "timeout_hours": 24} responses: "200": application/json: {"id": "", "object": "batch", "input_files": ["7b2553d8-e17f-4df5-a862-a1678f6b5271", "8c618d9f-7d82-42ba-a284-d57d84f50a58", "c042f996-e842-441d-ae47-4e0850334e41"], "endpoint": "", "errors": [{"message": "", "count": 1}], "status": "SUCCESS", "created_at": 395527, "total_requests": 166919, "completed_requests": 258552, "succeeded_requests": 480980, "failed_requests": 684176} @@ -1531,4 +1538,4 @@ examples: application/json: {} examplesVersion: 1.0.2 generatedTests: {} -releaseNotes: "## SDK Changes Detected:\n* `mistral.beta.libraries.create()`: `response.owner_id` **Changed** **Breaking** :warning:\n* `mistral.beta.libraries.documents.get()`: `response` **Changed** **Breaking** :warning:\n* `mistral.models.list()`: \n * `response.data.[].[base].capabilities` **Changed**\n * `error.status[422]` **Removed** **Breaking** :warning:\n* `mistral.files.list()`: \n * `request.include_total` **Added**\n * `response.total` **Changed** **Breaking** :warning:\n* `mistral.beta.conversations.start()`: \n * `request` **Changed** **Breaking** :warning:\n * `response.outputs.[].[tool_execution_entry].name` **Changed** **Breaking** :warning:\n* `mistral.beta.libraries.accesses.delete()`: \n * `request.org_id` **Changed**\n * `response.share_with_uuid` **Changed** **Breaking** :warning:\n* `mistral.beta.libraries.accesses.update_or_create()`: \n * `request.org_id` **Changed**\n * `response.share_with_uuid` **Changed** **Breaking** :warning:\n* `mistral.beta.conversations.append()`: \n * `request.inputs.[array].[].[tool_execution_entry].name` **Changed** **Breaking** :warning:\n * `response.outputs.[].[tool_execution_entry].name` **Changed** **Breaking** :warning:\n* `mistral.beta.libraries.accesses.list()`: `response.data.[].share_with_uuid` **Changed** **Breaking** :warning:\n* `mistral.beta.conversations.restart()`: \n * `request` **Changed** **Breaking** :warning:\n * `response.outputs.[].[tool_execution_entry].name` **Changed** **Breaking** :warning:\n* `mistral.beta.libraries.documents.update()`: \n * `request.attributes` **Added**\n * `response` **Changed** **Breaking** :warning:\n* `mistral.beta.libraries.documents.upload()`: `response` **Changed** **Breaking** :warning:\n* `mistral.beta.libraries.documents.list()`: \n * `request.filters_attributes` **Added**\n * `response.data.[]` **Changed** **Breaking** :warning:\n* `mistral.beta.libraries.update()`: `response.owner_id` **Changed** **Breaking** :warning:\n* `mistral.beta.libraries.delete()`: `response.owner_id` **Changed** **Breaking** :warning:\n* `mistral.beta.libraries.get()`: `response.owner_id` **Changed** **Breaking** :warning:\n* `mistral.beta.conversations.get_history()`: `response.entries.[].[tool_execution_entry].name` **Changed** **Breaking** :warning:\n* `mistral.beta.libraries.list()`: `response.data.[].owner_id` **Changed** **Breaking** :warning:\n* `mistral.models.retrieve()`: `response.[base].capabilities` **Changed**\n* `mistral.agents.complete()`: `request.metadata` **Added**\n* `mistral.beta.agents.get()`: \n * `request.agent_version` **Added**\n * `response` **Changed**\n* `mistral.beta.agents.list()`: \n * `request` **Changed**\n * `response.[]` **Changed**\n* `mistral.beta.agents.update_version()`: `response` **Changed**\n* `mistral.beta.agents.delete()`: **Added**\n* `mistral.beta.conversations.list()`: \n * `request.metadata` **Added**\n * `response.[]` **Changed**\n* `mistral.beta.conversations.get()`: `response` **Changed**\n* `mistral.beta.agents.update()`: \n * `request` **Changed**\n * `response` **Changed**\n* `mistral.beta.conversations.delete()`: **Added**\n* `mistral.chat.complete()`: `request.metadata` **Added**\n* `mistral.fim.complete()`: `request.metadata` **Added**\n* `mistral.beta.agents.create()`: \n * `request.metadata` **Added**\n * `response` **Changed**\n* `mistral.ocr.process()`: \n * `request` **Changed**\n * `response.pages.[]` **Changed**\n" +releaseNotes: "## SDK Changes Detected:\n* `mistral.beta.conversations.restart()`: \n * `request.inputs.[array].[]` **Changed** **Breaking** :warning:\n * `response.outputs.[].[message_output_entry].content.[array].[]` **Changed** **Breaking** :warning:\n* `mistral.beta.conversations.start()`: \n * `request.inputs.[array].[]` **Changed** **Breaking** :warning:\n * `response.outputs.[].[message_output_entry].content.[array].[]` **Changed** **Breaking** :warning:\n* `mistral.beta.conversations.append()`: \n * `request.inputs.[array].[]` **Changed** **Breaking** :warning:\n * `response.outputs.[].[message_output_entry].content.[array].[]` **Changed** **Breaking** :warning:\n* `mistral.beta.conversations.get_history()`: `response.entries.[]` **Changed** **Breaking** :warning:\n* `mistral.beta.conversations.get_messages()`: `response.messages.[]` **Changed** **Breaking** :warning:\n* `mistral.beta.agents.list()`: `request.metadata` **Changed**\n* `mistral.beta.conversations.list()`: `request.metadata` **Changed**\n* `mistral.batch.jobs.list()`: `response.data.[].outputs` **Added**\n* `mistral.batch.jobs.create()`: \n * `request` **Changed**\n * `response.outputs` **Added**\n* `mistral.batch.jobs.get()`: \n * `request.inline` **Added**\n * `response.outputs` **Added**\n* `mistral.batch.jobs.cancel()`: `response.outputs` **Added**\n* `mistral.embeddings.create()`: `request.metadata` **Added**\n* `mistral.classifiers.moderate()`: `request.metadata` **Added**\n* `mistral.classifiers.classify()`: `request.metadata` **Added**\n" diff --git a/.speakeasy/gen.yaml b/.speakeasy/gen.yaml index 85ac8dac..bf732c1e 100644 --- a/.speakeasy/gen.yaml +++ b/.speakeasy/gen.yaml @@ -21,7 +21,7 @@ generation: generateNewTests: false skipResponseBodyAssertions: false python: - version: 1.10.0 + version: 1.10.1 additionalDependencies: dev: pytest: ^8.2.2 diff --git a/.speakeasy/workflow.lock b/.speakeasy/workflow.lock index ac13e7ce..240565eb 100644 --- a/.speakeasy/workflow.lock +++ b/.speakeasy/workflow.lock @@ -16,11 +16,11 @@ sources: - speakeasy-sdk-regen-1768231856 mistral-openapi: sourceNamespace: mistral-openapi - sourceRevisionDigest: sha256:cb63bd997cefe7b3b36e91a475df57cb779bf79f183340e0713d8ffb16a2dabc - sourceBlobDigest: sha256:f0caa06fb9bcadc35b097aa5ff69bb5020937652df311722b5e44a282bd95d6d + sourceRevisionDigest: sha256:ebe60088ce4a3780c57a08de7bc73f973f529822a05db12c5d9c6084e9a934e0 + sourceBlobDigest: sha256:c93947af3495a5129cb6aecfe0546463917fbe1f66f2cf8f5a0accb36c035501 tags: - latest - - speakeasy-sdk-regen-1765914268 + - speakeasy-sdk-regen-1768502381 targets: mistralai-azure-sdk: source: mistral-azure-source @@ -39,10 +39,10 @@ targets: mistralai-sdk: source: mistral-openapi sourceNamespace: mistral-openapi - sourceRevisionDigest: sha256:cb63bd997cefe7b3b36e91a475df57cb779bf79f183340e0713d8ffb16a2dabc - sourceBlobDigest: sha256:f0caa06fb9bcadc35b097aa5ff69bb5020937652df311722b5e44a282bd95d6d + sourceRevisionDigest: sha256:ebe60088ce4a3780c57a08de7bc73f973f529822a05db12c5d9c6084e9a934e0 + sourceBlobDigest: sha256:c93947af3495a5129cb6aecfe0546463917fbe1f66f2cf8f5a0accb36c035501 codeSamplesNamespace: mistral-openapi-code-samples - codeSamplesRevisionDigest: sha256:b1eacff97275a14ab0c2143e07bdfa4f4bd58f5370b2f106bcc6ada92b754d08 + codeSamplesRevisionDigest: sha256:3bf740149ae15c0019fa482ffe9f198149759b0140cdfcbf87e5082c0d22e9ac workflow: workflowVersion: 1.0.0 speakeasyVersion: 1.606.10 diff --git a/README.md b/README.md index ba054118..02554d99 100644 --- a/README.md +++ b/README.md @@ -58,7 +58,15 @@ Mistral AI API: Our Chat Completion and Embeddings APIs specification. Create yo > > Once a Python version reaches its [official end of life date](https://devguide.python.org/versions/), a 3-month grace period is provided for users to upgrade. Following this grace period, the minimum python version supported in the SDK will be updated. -The SDK can be installed with either *pip* or *uv* package managers. +The SDK can be installed with *uv*, *pip*, or *poetry* package managers. + +### uv + +*uv* is a fast Python package installer and resolver, designed as a drop-in replacement for pip and pip-tools. It's recommended for its speed and modern Python tooling capabilities. + +```bash +uv add mistralai +``` ### PIP @@ -68,12 +76,12 @@ The SDK can be installed with either *pip* or *uv* package managers. pip install mistralai ``` -### UV +### Poetry -*UV* is an extremely fast Python package and project manager. You can use it to add the SDK to your project: +*Poetry* is a modern tool that simplifies dependency management and package publishing by using a single `pyproject.toml` file to handle project metadata and dependencies. ```bash -uv add mistralai +poetry add mistralai ``` ### Shell and script usage with `uv` @@ -89,7 +97,7 @@ It's also possible to write a standalone Python script without needing to set up ```python #!/usr/bin/env -S uv run --script # /// script -# requires-python = ">=3.10" +# requires-python = ">=3.9" # dependencies = [ # "mistralai", # ] diff --git a/RELEASES.md b/RELEASES.md index e43d3f33..092c0122 100644 --- a/RELEASES.md +++ b/RELEASES.md @@ -338,4 +338,14 @@ Based on: ### Generated - [python v1.10.0] . ### Releases -- [PyPI v1.10.0] https://pypi.org/project/mistralai/1.10.0 - . \ No newline at end of file +- [PyPI v1.10.0] https://pypi.org/project/mistralai/1.10.0 - . + +## 2026-01-15 18:39:22 +### Changes +Based on: +- OpenAPI Doc +- Speakeasy CLI 1.606.10 (2.687.13) https://github.com/speakeasy-api/speakeasy +### Generated +- [python v1.10.1] . +### Releases +- [PyPI v1.10.1] https://pypi.org/project/mistralai/1.10.1 - . \ No newline at end of file diff --git a/docs/models/audioencoding.md b/docs/models/audioencoding.md new file mode 100644 index 00000000..feec8c71 --- /dev/null +++ b/docs/models/audioencoding.md @@ -0,0 +1,13 @@ +# AudioEncoding + + +## Values + +| Name | Value | +| ----------- | ----------- | +| `PCM_S16LE` | pcm_s16le | +| `PCM_S32LE` | pcm_s32le | +| `PCM_F16LE` | pcm_f16le | +| `PCM_F32LE` | pcm_f32le | +| `PCM_MULAW` | pcm_mulaw | +| `PCM_ALAW` | pcm_alaw | \ No newline at end of file diff --git a/docs/models/audioformat.md b/docs/models/audioformat.md new file mode 100644 index 00000000..d174ab99 --- /dev/null +++ b/docs/models/audioformat.md @@ -0,0 +1,9 @@ +# AudioFormat + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------------- | -------------------------------------------------- | -------------------------------------------------- | -------------------------------------------------- | +| `encoding` | [models.AudioEncoding](../models/audioencoding.md) | :heavy_check_mark: | N/A | +| `sample_rate` | *int* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/batchjobin.md b/docs/models/batchjobin.md index 6fd06696..7dcf265d 100644 --- a/docs/models/batchjobin.md +++ b/docs/models/batchjobin.md @@ -5,7 +5,8 @@ | Field | Type | Required | Description | Example | | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `input_files` | List[*str*] | :heavy_check_mark: | The list of input files to be used for batch inference, these files should be `jsonl` files, containing the input data corresponding to the bory request for the batch inference in a "body" field. An example of such file is the following: ```json {"custom_id": "0", "body": {"max_tokens": 100, "messages": [{"role": "user", "content": "What is the best French cheese?"}]}} {"custom_id": "1", "body": {"max_tokens": 100, "messages": [{"role": "user", "content": "What is the best French wine?"}]}} ``` | | +| `input_files` | List[*str*] | :heavy_minus_sign: | The list of input files to be used for batch inference, these files should be `jsonl` files, containing the input data corresponding to the bory request for the batch inference in a "body" field. An example of such file is the following: ```json {"custom_id": "0", "body": {"max_tokens": 100, "messages": [{"role": "user", "content": "What is the best French cheese?"}]}} {"custom_id": "1", "body": {"max_tokens": 100, "messages": [{"role": "user", "content": "What is the best French wine?"}]}} ``` | | +| `requests` | List[[models.BatchRequest](../models/batchrequest.md)] | :heavy_minus_sign: | N/A | | | `endpoint` | [models.APIEndpoint](../models/apiendpoint.md) | :heavy_check_mark: | N/A | | | `model` | *OptionalNullable[str]* | :heavy_minus_sign: | The model to be used for batch inference. | mistral-small-latest | | `agent_id` | *OptionalNullable[str]* | :heavy_minus_sign: | In case you want to use a specific agent from the **deprecated** agents api for batch inference, you can specify the agent ID here. | | diff --git a/docs/models/batchjobout.md b/docs/models/batchjobout.md index b66fff08..cb49649b 100644 --- a/docs/models/batchjobout.md +++ b/docs/models/batchjobout.md @@ -15,6 +15,7 @@ | `output_file` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | `error_file` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | `errors` | List[[models.BatchError](../models/batcherror.md)] | :heavy_check_mark: | N/A | +| `outputs` | List[Dict[str, *Any*]] | :heavy_minus_sign: | N/A | | `status` | [models.BatchJobStatus](../models/batchjobstatus.md) | :heavy_check_mark: | N/A | | `created_at` | *int* | :heavy_check_mark: | N/A | | `total_requests` | *int* | :heavy_check_mark: | N/A | diff --git a/docs/models/batchrequest.md b/docs/models/batchrequest.md new file mode 100644 index 00000000..6ee3b394 --- /dev/null +++ b/docs/models/batchrequest.md @@ -0,0 +1,9 @@ +# BatchRequest + + +## Fields + +| Field | Type | Required | Description | +| ----------------------- | ----------------------- | ----------------------- | ----------------------- | +| `custom_id` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `body` | Dict[str, *Any*] | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/classificationrequest.md b/docs/models/classificationrequest.md index 4b38c68a..99cdc4a0 100644 --- a/docs/models/classificationrequest.md +++ b/docs/models/classificationrequest.md @@ -6,4 +6,5 @@ | Field | Type | Required | Description | Example | | ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | | `model` | *str* | :heavy_check_mark: | ID of the model to use. | mistral-moderation-latest | +| `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | | | `inputs` | [models.ClassificationRequestInputs](../models/classificationrequestinputs.md) | :heavy_check_mark: | Text to classify. | | \ No newline at end of file diff --git a/docs/models/embeddingrequest.md b/docs/models/embeddingrequest.md index 0f2fc6a6..7269c055 100644 --- a/docs/models/embeddingrequest.md +++ b/docs/models/embeddingrequest.md @@ -6,6 +6,7 @@ | Field | Type | Required | Description | Example | | ------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------ | | `model` | *str* | :heavy_check_mark: | The ID of the model to be used for embedding. | mistral-embed | +| `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | | | `inputs` | [models.EmbeddingRequestInputs](../models/embeddingrequestinputs.md) | :heavy_check_mark: | The text content to be embedded, can be a string or an array of strings for fast processing in bulk. | [
"Embed this sentence.",
"As well as this one."
] | | `output_dimension` | *OptionalNullable[int]* | :heavy_minus_sign: | The dimension of the output embeddings when feature available. If not provided, a default output dimension will be used. | | | `output_dtype` | [Optional[models.EmbeddingDtype]](../models/embeddingdtype.md) | :heavy_minus_sign: | N/A | | diff --git a/docs/models/jobsapiroutesbatchgetbatchjobrequest.md b/docs/models/jobsapiroutesbatchgetbatchjobrequest.md index 3930aacd..8c259bea 100644 --- a/docs/models/jobsapiroutesbatchgetbatchjobrequest.md +++ b/docs/models/jobsapiroutesbatchgetbatchjobrequest.md @@ -3,6 +3,7 @@ ## Fields -| Field | Type | Required | Description | -| ------------------ | ------------------ | ------------------ | ------------------ | -| `job_id` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| ------------------------ | ------------------------ | ------------------------ | ------------------------ | +| `job_id` | *str* | :heavy_check_mark: | N/A | +| `inline` | *OptionalNullable[bool]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/toolfilechunk.md b/docs/models/toolfilechunk.md index f1b54c7c..a3ffaa2b 100644 --- a/docs/models/toolfilechunk.md +++ b/docs/models/toolfilechunk.md @@ -6,7 +6,7 @@ | Field | Type | Required | Description | | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | | `type` | [Optional[models.ToolFileChunkType]](../models/toolfilechunktype.md) | :heavy_minus_sign: | N/A | -| `tool` | [models.BuiltInConnectors](../models/builtinconnectors.md) | :heavy_check_mark: | N/A | +| `tool` | [models.ToolFileChunkTool](../models/toolfilechunktool.md) | :heavy_check_mark: | N/A | | `file_id` | *str* | :heavy_check_mark: | N/A | | `file_name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | `file_type` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/toolfilechunktool.md b/docs/models/toolfilechunktool.md new file mode 100644 index 00000000..aa5ac8a9 --- /dev/null +++ b/docs/models/toolfilechunktool.md @@ -0,0 +1,17 @@ +# ToolFileChunkTool + + +## Supported Types + +### `models.BuiltInConnectors` + +```python +value: models.BuiltInConnectors = /* values here */ +``` + +### `str` + +```python +value: str = /* values here */ +``` + diff --git a/docs/models/toolreferencechunk.md b/docs/models/toolreferencechunk.md index af447aee..3020dbc9 100644 --- a/docs/models/toolreferencechunk.md +++ b/docs/models/toolreferencechunk.md @@ -6,7 +6,7 @@ | Field | Type | Required | Description | | ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | | `type` | [Optional[models.ToolReferenceChunkType]](../models/toolreferencechunktype.md) | :heavy_minus_sign: | N/A | -| `tool` | [models.BuiltInConnectors](../models/builtinconnectors.md) | :heavy_check_mark: | N/A | +| `tool` | [models.ToolReferenceChunkTool](../models/toolreferencechunktool.md) | :heavy_check_mark: | N/A | | `title` | *str* | :heavy_check_mark: | N/A | | `url` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | `favicon` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | diff --git a/docs/models/toolreferencechunktool.md b/docs/models/toolreferencechunktool.md new file mode 100644 index 00000000..999f7c34 --- /dev/null +++ b/docs/models/toolreferencechunktool.md @@ -0,0 +1,17 @@ +# ToolReferenceChunkTool + + +## Supported Types + +### `models.BuiltInConnectors` + +```python +value: models.BuiltInConnectors = /* values here */ +``` + +### `str` + +```python +value: str = /* values here */ +``` + diff --git a/docs/sdks/classifiers/README.md b/docs/sdks/classifiers/README.md index 75b8c333..55c46d2d 100644 --- a/docs/sdks/classifiers/README.md +++ b/docs/sdks/classifiers/README.md @@ -44,6 +44,7 @@ with Mistral( | --------------------------------------------------------------------------------- | --------------------------------------------------------------------------------- | --------------------------------------------------------------------------------- | --------------------------------------------------------------------------------- | --------------------------------------------------------------------------------- | | `model` | *str* | :heavy_check_mark: | ID of the model to use. | mistral-moderation-latest | | `inputs` | [models.ClassificationRequestInputs](../../models/classificationrequestinputs.md) | :heavy_check_mark: | Text to classify. | | +| `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | | | `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | | ### Response @@ -135,6 +136,7 @@ with Mistral( | --------------------------------------------------------------------------------- | --------------------------------------------------------------------------------- | --------------------------------------------------------------------------------- | --------------------------------------------------------------------------------- | --------------------------------------------------------------------------------- | | `model` | *str* | :heavy_check_mark: | ID of the model to use. | mistral-moderation-latest | | `inputs` | [models.ClassificationRequestInputs](../../models/classificationrequestinputs.md) | :heavy_check_mark: | Text to classify. | | +| `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | | | `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | | ### Response diff --git a/docs/sdks/embeddings/README.md b/docs/sdks/embeddings/README.md index b03ea9cd..a071f3b2 100644 --- a/docs/sdks/embeddings/README.md +++ b/docs/sdks/embeddings/README.md @@ -41,6 +41,7 @@ with Mistral( | ------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------ | | `model` | *str* | :heavy_check_mark: | The ID of the model to be used for embedding. | mistral-embed | | `inputs` | [models.EmbeddingRequestInputs](../../models/embeddingrequestinputs.md) | :heavy_check_mark: | The text content to be embedded, can be a string or an array of strings for fast processing in bulk. | [
"Embed this sentence.",
"As well as this one."
] | +| `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | | | `output_dimension` | *OptionalNullable[int]* | :heavy_minus_sign: | The dimension of the output embeddings when feature available. If not provided, a default output dimension will be used. | | | `output_dtype` | [Optional[models.EmbeddingDtype]](../../models/embeddingdtype.md) | :heavy_minus_sign: | N/A | | | `encoding_format` | [Optional[models.EncodingFormat]](../../models/encodingformat.md) | :heavy_minus_sign: | N/A | | diff --git a/docs/sdks/mistraljobs/README.md b/docs/sdks/mistraljobs/README.md index 469a2029..05cddb88 100644 --- a/docs/sdks/mistraljobs/README.md +++ b/docs/sdks/mistraljobs/README.md @@ -73,9 +73,7 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.batch.jobs.create(input_files=[ - "fe3343a2-3b8d-404b-ba32-a78dede2614a", - ], endpoint="/v1/moderations", model="mistral-small-latest", timeout_hours=24) + res = mistral.batch.jobs.create(endpoint="/v1/moderations", model="mistral-small-latest", timeout_hours=24) # Handle response print(res) @@ -86,8 +84,9 @@ with Mistral( | Parameter | Type | Required | Description | Example | | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `input_files` | List[*str*] | :heavy_check_mark: | The list of input files to be used for batch inference, these files should be `jsonl` files, containing the input data corresponding to the bory request for the batch inference in a "body" field. An example of such file is the following: ```json {"custom_id": "0", "body": {"max_tokens": 100, "messages": [{"role": "user", "content": "What is the best French cheese?"}]}} {"custom_id": "1", "body": {"max_tokens": 100, "messages": [{"role": "user", "content": "What is the best French wine?"}]}} ``` | | | `endpoint` | [models.APIEndpoint](../../models/apiendpoint.md) | :heavy_check_mark: | N/A | | +| `input_files` | List[*str*] | :heavy_minus_sign: | The list of input files to be used for batch inference, these files should be `jsonl` files, containing the input data corresponding to the bory request for the batch inference in a "body" field. An example of such file is the following: ```json {"custom_id": "0", "body": {"max_tokens": 100, "messages": [{"role": "user", "content": "What is the best French cheese?"}]}} {"custom_id": "1", "body": {"max_tokens": 100, "messages": [{"role": "user", "content": "What is the best French wine?"}]}} ``` | | +| `requests` | List[[models.BatchRequest](../../models/batchrequest.md)] | :heavy_minus_sign: | N/A | | | `model` | *OptionalNullable[str]* | :heavy_minus_sign: | The model to be used for batch inference. | mistral-small-latest | | `agent_id` | *OptionalNullable[str]* | :heavy_minus_sign: | In case you want to use a specific agent from the **deprecated** agents api for batch inference, you can specify the agent ID here. | | | `metadata` | Dict[str, *str*] | :heavy_minus_sign: | The metadata of your choice to be associated with the batch inference job. | | @@ -108,6 +107,9 @@ with Mistral( Get a batch job details by its UUID. +Args: + inline: If True, return results inline in the response. + ### Example Usage @@ -132,6 +134,7 @@ with Mistral( | Parameter | Type | Required | Description | | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | | `job_id` | *str* | :heavy_check_mark: | N/A | +| `inline` | *OptionalNullable[bool]* | :heavy_minus_sign: | N/A | | `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | ### Response diff --git a/scripts/publish.sh b/scripts/publish.sh index c41f3efb..c35748f3 100755 --- a/scripts/publish.sh +++ b/scripts/publish.sh @@ -1,5 +1,6 @@ #!/usr/bin/env bash -export UV_PUBLISH_TOKEN=${PYPI_TOKEN} -uv run python scripts/prepare_readme.py -- uv build -uv publish +uv run python scripts/prepare_readme.py + +uv build +uv publish --token $PYPI_TOKEN diff --git a/src/mistralai/_version.py b/src/mistralai/_version.py index 851d6fbe..c9b53294 100644 --- a/src/mistralai/_version.py +++ b/src/mistralai/_version.py @@ -3,10 +3,10 @@ import importlib.metadata __title__: str = "mistralai" -__version__: str = "1.10.0" +__version__: str = "1.10.1" __openapi_doc_version__: str = "1.0.0" __gen_version__: str = "2.687.13" -__user_agent__: str = "speakeasy-sdk/python 1.10.0 2.687.13 1.0.0 mistralai" +__user_agent__: str = "speakeasy-sdk/python 1.10.1 2.687.13 1.0.0 mistralai" try: if __package__ is not None: diff --git a/src/mistralai/classifiers.py b/src/mistralai/classifiers.py index cd6a9415..a6b87940 100644 --- a/src/mistralai/classifiers.py +++ b/src/mistralai/classifiers.py @@ -6,7 +6,7 @@ from mistralai.types import OptionalNullable, UNSET from mistralai.utils import get_security_from_env from mistralai.utils.unmarshal_json_response import unmarshal_json_response -from typing import Any, Mapping, Optional, Union +from typing import Any, Dict, Mapping, Optional, Union class Classifiers(BaseSDK): @@ -20,6 +20,7 @@ def moderate( models.ClassificationRequestInputs, models.ClassificationRequestInputsTypedDict, ], + metadata: OptionalNullable[Dict[str, Any]] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -29,6 +30,7 @@ def moderate( :param model: ID of the model to use. :param inputs: Text to classify. + :param metadata: :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -46,6 +48,7 @@ def moderate( request = models.ClassificationRequest( model=model, + metadata=metadata, inputs=inputs, ) @@ -116,6 +119,7 @@ async def moderate_async( models.ClassificationRequestInputs, models.ClassificationRequestInputsTypedDict, ], + metadata: OptionalNullable[Dict[str, Any]] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -125,6 +129,7 @@ async def moderate_async( :param model: ID of the model to use. :param inputs: Text to classify. + :param metadata: :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -142,6 +147,7 @@ async def moderate_async( request = models.ClassificationRequest( model=model, + metadata=metadata, inputs=inputs, ) @@ -404,6 +410,7 @@ def classify( models.ClassificationRequestInputs, models.ClassificationRequestInputsTypedDict, ], + metadata: OptionalNullable[Dict[str, Any]] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -413,6 +420,7 @@ def classify( :param model: ID of the model to use. :param inputs: Text to classify. + :param metadata: :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -430,6 +438,7 @@ def classify( request = models.ClassificationRequest( model=model, + metadata=metadata, inputs=inputs, ) @@ -500,6 +509,7 @@ async def classify_async( models.ClassificationRequestInputs, models.ClassificationRequestInputsTypedDict, ], + metadata: OptionalNullable[Dict[str, Any]] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -509,6 +519,7 @@ async def classify_async( :param model: ID of the model to use. :param inputs: Text to classify. + :param metadata: :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -526,6 +537,7 @@ async def classify_async( request = models.ClassificationRequest( model=model, + metadata=metadata, inputs=inputs, ) diff --git a/src/mistralai/embeddings.py b/src/mistralai/embeddings.py index 76e8e719..349d31ca 100644 --- a/src/mistralai/embeddings.py +++ b/src/mistralai/embeddings.py @@ -6,7 +6,7 @@ from mistralai.types import OptionalNullable, UNSET from mistralai.utils import get_security_from_env from mistralai.utils.unmarshal_json_response import unmarshal_json_response -from typing import Any, Mapping, Optional, Union +from typing import Any, Dict, Mapping, Optional, Union class Embeddings(BaseSDK): @@ -19,6 +19,7 @@ def create( inputs: Union[ models.EmbeddingRequestInputs, models.EmbeddingRequestInputsTypedDict ], + metadata: OptionalNullable[Dict[str, Any]] = UNSET, output_dimension: OptionalNullable[int] = UNSET, output_dtype: Optional[models.EmbeddingDtype] = None, encoding_format: Optional[models.EncodingFormat] = None, @@ -33,6 +34,7 @@ def create( :param model: The ID of the model to be used for embedding. :param inputs: The text content to be embedded, can be a string or an array of strings for fast processing in bulk. + :param metadata: :param output_dimension: The dimension of the output embeddings when feature available. If not provided, a default output dimension will be used. :param output_dtype: :param encoding_format: @@ -53,6 +55,7 @@ def create( request = models.EmbeddingRequest( model=model, + metadata=metadata, inputs=inputs, output_dimension=output_dimension, output_dtype=output_dtype, @@ -125,6 +128,7 @@ async def create_async( inputs: Union[ models.EmbeddingRequestInputs, models.EmbeddingRequestInputsTypedDict ], + metadata: OptionalNullable[Dict[str, Any]] = UNSET, output_dimension: OptionalNullable[int] = UNSET, output_dtype: Optional[models.EmbeddingDtype] = None, encoding_format: Optional[models.EncodingFormat] = None, @@ -139,6 +143,7 @@ async def create_async( :param model: The ID of the model to be used for embedding. :param inputs: The text content to be embedded, can be a string or an array of strings for fast processing in bulk. + :param metadata: :param output_dimension: The dimension of the output embeddings when feature available. If not provided, a default output dimension will be used. :param output_dtype: :param encoding_format: @@ -159,6 +164,7 @@ async def create_async( request = models.EmbeddingRequest( model=model, + metadata=metadata, inputs=inputs, output_dimension=output_dimension, output_dtype=output_dtype, diff --git a/src/mistralai/mistral_jobs.py b/src/mistralai/mistral_jobs.py index 6c213756..09e43fba 100644 --- a/src/mistralai/mistral_jobs.py +++ b/src/mistralai/mistral_jobs.py @@ -7,7 +7,7 @@ from mistralai.types import OptionalNullable, UNSET from mistralai.utils import get_security_from_env from mistralai.utils.unmarshal_json_response import unmarshal_json_response -from typing import Any, Dict, List, Mapping, Optional +from typing import Any, Dict, List, Mapping, Optional, Union class MistralJobs(BaseSDK): @@ -222,8 +222,11 @@ async def list_async( def create( self, *, - input_files: List[str], endpoint: models.APIEndpoint, + input_files: OptionalNullable[List[str]] = UNSET, + requests: OptionalNullable[ + Union[List[models.BatchRequest], List[models.BatchRequestTypedDict]] + ] = UNSET, model: OptionalNullable[str] = UNSET, agent_id: OptionalNullable[str] = UNSET, metadata: OptionalNullable[Dict[str, str]] = UNSET, @@ -237,8 +240,9 @@ def create( Create a new batch job, it will be queued for processing. - :param input_files: The list of input files to be used for batch inference, these files should be `jsonl` files, containing the input data corresponding to the bory request for the batch inference in a \"body\" field. An example of such file is the following: ```json {\"custom_id\": \"0\", \"body\": {\"max_tokens\": 100, \"messages\": [{\"role\": \"user\", \"content\": \"What is the best French cheese?\"}]}} {\"custom_id\": \"1\", \"body\": {\"max_tokens\": 100, \"messages\": [{\"role\": \"user\", \"content\": \"What is the best French wine?\"}]}} ``` :param endpoint: + :param input_files: The list of input files to be used for batch inference, these files should be `jsonl` files, containing the input data corresponding to the bory request for the batch inference in a \"body\" field. An example of such file is the following: ```json {\"custom_id\": \"0\", \"body\": {\"max_tokens\": 100, \"messages\": [{\"role\": \"user\", \"content\": \"What is the best French cheese?\"}]}} {\"custom_id\": \"1\", \"body\": {\"max_tokens\": 100, \"messages\": [{\"role\": \"user\", \"content\": \"What is the best French wine?\"}]}} ``` + :param requests: :param model: The model to be used for batch inference. :param agent_id: In case you want to use a specific agent from the **deprecated** agents api for batch inference, you can specify the agent ID here. :param metadata: The metadata of your choice to be associated with the batch inference job. @@ -260,6 +264,9 @@ def create( request = models.BatchJobIn( input_files=input_files, + requests=utils.get_pydantic_model( + requests, OptionalNullable[List[models.BatchRequest]] + ), endpoint=endpoint, model=model, agent_id=agent_id, @@ -323,8 +330,11 @@ def create( async def create_async( self, *, - input_files: List[str], endpoint: models.APIEndpoint, + input_files: OptionalNullable[List[str]] = UNSET, + requests: OptionalNullable[ + Union[List[models.BatchRequest], List[models.BatchRequestTypedDict]] + ] = UNSET, model: OptionalNullable[str] = UNSET, agent_id: OptionalNullable[str] = UNSET, metadata: OptionalNullable[Dict[str, str]] = UNSET, @@ -338,8 +348,9 @@ async def create_async( Create a new batch job, it will be queued for processing. - :param input_files: The list of input files to be used for batch inference, these files should be `jsonl` files, containing the input data corresponding to the bory request for the batch inference in a \"body\" field. An example of such file is the following: ```json {\"custom_id\": \"0\", \"body\": {\"max_tokens\": 100, \"messages\": [{\"role\": \"user\", \"content\": \"What is the best French cheese?\"}]}} {\"custom_id\": \"1\", \"body\": {\"max_tokens\": 100, \"messages\": [{\"role\": \"user\", \"content\": \"What is the best French wine?\"}]}} ``` :param endpoint: + :param input_files: The list of input files to be used for batch inference, these files should be `jsonl` files, containing the input data corresponding to the bory request for the batch inference in a \"body\" field. An example of such file is the following: ```json {\"custom_id\": \"0\", \"body\": {\"max_tokens\": 100, \"messages\": [{\"role\": \"user\", \"content\": \"What is the best French cheese?\"}]}} {\"custom_id\": \"1\", \"body\": {\"max_tokens\": 100, \"messages\": [{\"role\": \"user\", \"content\": \"What is the best French wine?\"}]}} ``` + :param requests: :param model: The model to be used for batch inference. :param agent_id: In case you want to use a specific agent from the **deprecated** agents api for batch inference, you can specify the agent ID here. :param metadata: The metadata of your choice to be associated with the batch inference job. @@ -361,6 +372,9 @@ async def create_async( request = models.BatchJobIn( input_files=input_files, + requests=utils.get_pydantic_model( + requests, OptionalNullable[List[models.BatchRequest]] + ), endpoint=endpoint, model=model, agent_id=agent_id, @@ -425,6 +439,7 @@ def get( self, *, job_id: str, + inline: OptionalNullable[bool] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -434,7 +449,11 @@ def get( Get a batch job details by its UUID. + Args: + inline: If True, return results inline in the response. + :param job_id: + :param inline: :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -452,6 +471,7 @@ def get( request = models.JobsAPIRoutesBatchGetBatchJobRequest( job_id=job_id, + inline=inline, ) req = self._build_request( @@ -508,6 +528,7 @@ async def get_async( self, *, job_id: str, + inline: OptionalNullable[bool] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -517,7 +538,11 @@ async def get_async( Get a batch job details by its UUID. + Args: + inline: If True, return results inline in the response. + :param job_id: + :param inline: :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -535,6 +560,7 @@ async def get_async( request = models.JobsAPIRoutesBatchGetBatchJobRequest( job_id=job_id, + inline=inline, ) req = self._build_request_async( diff --git a/src/mistralai/models/__init__.py b/src/mistralai/models/__init__.py index 7895aeaa..e69acaf8 100644 --- a/src/mistralai/models/__init__.py +++ b/src/mistralai/models/__init__.py @@ -141,6 +141,8 @@ AssistantMessageTypedDict, ) from .audiochunk import AudioChunk, AudioChunkType, AudioChunkTypedDict + from .audioencoding import AudioEncoding + from .audioformat import AudioFormat, AudioFormatTypedDict from .audiotranscriptionrequest import ( AudioTranscriptionRequest, AudioTranscriptionRequestTypedDict, @@ -155,6 +157,7 @@ from .batchjobout import BatchJobOut, BatchJobOutObject, BatchJobOutTypedDict from .batchjobsout import BatchJobsOut, BatchJobsOutObject, BatchJobsOutTypedDict from .batchjobstatus import BatchJobStatus + from .batchrequest import BatchRequest, BatchRequestTypedDict from .builtinconnectors import BuiltInConnectors from .chatclassificationrequest import ( ChatClassificationRequest, @@ -840,7 +843,13 @@ ToolExecutionStartedEventType, ToolExecutionStartedEventTypedDict, ) - from .toolfilechunk import ToolFileChunk, ToolFileChunkType, ToolFileChunkTypedDict + from .toolfilechunk import ( + ToolFileChunk, + ToolFileChunkTool, + ToolFileChunkToolTypedDict, + ToolFileChunkType, + ToolFileChunkTypedDict, + ) from .toolmessage import ( ToolMessage, ToolMessageContent, @@ -850,6 +859,8 @@ ) from .toolreferencechunk import ( ToolReferenceChunk, + ToolReferenceChunkTool, + ToolReferenceChunkToolTypedDict, ToolReferenceChunkType, ToolReferenceChunkTypedDict, ) @@ -1018,6 +1029,9 @@ "AudioChunk", "AudioChunkType", "AudioChunkTypedDict", + "AudioEncoding", + "AudioFormat", + "AudioFormatTypedDict", "AudioTranscriptionRequest", "AudioTranscriptionRequestStream", "AudioTranscriptionRequestStreamTypedDict", @@ -1036,6 +1050,8 @@ "BatchJobsOut", "BatchJobsOutObject", "BatchJobsOutTypedDict", + "BatchRequest", + "BatchRequestTypedDict", "BuiltInConnectors", "ChatClassificationRequest", "ChatClassificationRequestTypedDict", @@ -1567,6 +1583,8 @@ "ToolExecutionStartedEventType", "ToolExecutionStartedEventTypedDict", "ToolFileChunk", + "ToolFileChunkTool", + "ToolFileChunkToolTypedDict", "ToolFileChunkType", "ToolFileChunkTypedDict", "ToolMessage", @@ -1575,6 +1593,8 @@ "ToolMessageRole", "ToolMessageTypedDict", "ToolReferenceChunk", + "ToolReferenceChunkTool", + "ToolReferenceChunkToolTypedDict", "ToolReferenceChunkType", "ToolReferenceChunkTypedDict", "ToolTypedDict", @@ -1724,6 +1744,9 @@ "AudioChunk": ".audiochunk", "AudioChunkType": ".audiochunk", "AudioChunkTypedDict": ".audiochunk", + "AudioEncoding": ".audioencoding", + "AudioFormat": ".audioformat", + "AudioFormatTypedDict": ".audioformat", "AudioTranscriptionRequest": ".audiotranscriptionrequest", "AudioTranscriptionRequestTypedDict": ".audiotranscriptionrequest", "AudioTranscriptionRequestStream": ".audiotranscriptionrequeststream", @@ -1742,6 +1765,8 @@ "BatchJobsOutObject": ".batchjobsout", "BatchJobsOutTypedDict": ".batchjobsout", "BatchJobStatus": ".batchjobstatus", + "BatchRequest": ".batchrequest", + "BatchRequestTypedDict": ".batchrequest", "BuiltInConnectors": ".builtinconnectors", "ChatClassificationRequest": ".chatclassificationrequest", "ChatClassificationRequestTypedDict": ".chatclassificationrequest", @@ -2277,6 +2302,8 @@ "ToolExecutionStartedEventType": ".toolexecutionstartedevent", "ToolExecutionStartedEventTypedDict": ".toolexecutionstartedevent", "ToolFileChunk": ".toolfilechunk", + "ToolFileChunkTool": ".toolfilechunk", + "ToolFileChunkToolTypedDict": ".toolfilechunk", "ToolFileChunkType": ".toolfilechunk", "ToolFileChunkTypedDict": ".toolfilechunk", "ToolMessage": ".toolmessage", @@ -2285,6 +2312,8 @@ "ToolMessageRole": ".toolmessage", "ToolMessageTypedDict": ".toolmessage", "ToolReferenceChunk": ".toolreferencechunk", + "ToolReferenceChunkTool": ".toolreferencechunk", + "ToolReferenceChunkToolTypedDict": ".toolreferencechunk", "ToolReferenceChunkType": ".toolreferencechunk", "ToolReferenceChunkTypedDict": ".toolreferencechunk", "ToolTypes": ".tooltypes", diff --git a/src/mistralai/models/agents_api_v1_agents_listop.py b/src/mistralai/models/agents_api_v1_agents_listop.py index 69a157a6..b3b8765c 100644 --- a/src/mistralai/models/agents_api_v1_agents_listop.py +++ b/src/mistralai/models/agents_api_v1_agents_listop.py @@ -52,7 +52,7 @@ class AgentsAPIV1AgentsListRequest(BaseModel): metadata: Annotated[ OptionalNullable[Dict[str, Any]], - FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + FieldMetadata(query=QueryParamMetadata(serialization="json")), ] = UNSET @model_serializer(mode="wrap") diff --git a/src/mistralai/models/agents_api_v1_conversations_listop.py b/src/mistralai/models/agents_api_v1_conversations_listop.py index e1c8489b..d314f838 100644 --- a/src/mistralai/models/agents_api_v1_conversations_listop.py +++ b/src/mistralai/models/agents_api_v1_conversations_listop.py @@ -29,7 +29,7 @@ class AgentsAPIV1ConversationsListRequest(BaseModel): metadata: Annotated[ OptionalNullable[Dict[str, Any]], - FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + FieldMetadata(query=QueryParamMetadata(serialization="json")), ] = UNSET @model_serializer(mode="wrap") diff --git a/src/mistralai/models/audioencoding.py b/src/mistralai/models/audioencoding.py new file mode 100644 index 00000000..7bb03f33 --- /dev/null +++ b/src/mistralai/models/audioencoding.py @@ -0,0 +1,13 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.types import UnrecognizedStr +from typing import Literal, Union + + +AudioEncoding = Union[ + Literal[ + "pcm_s16le", "pcm_s32le", "pcm_f16le", "pcm_f32le", "pcm_mulaw", "pcm_alaw" + ], + UnrecognizedStr, +] diff --git a/src/mistralai/models/audioformat.py b/src/mistralai/models/audioformat.py new file mode 100644 index 00000000..075b3c75 --- /dev/null +++ b/src/mistralai/models/audioformat.py @@ -0,0 +1,19 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .audioencoding import AudioEncoding +from mistralai.types import BaseModel +from mistralai.utils import validate_open_enum +from pydantic.functional_validators import PlainValidator +from typing_extensions import Annotated, TypedDict + + +class AudioFormatTypedDict(TypedDict): + encoding: AudioEncoding + sample_rate: int + + +class AudioFormat(BaseModel): + encoding: Annotated[AudioEncoding, PlainValidator(validate_open_enum(False))] + + sample_rate: int diff --git a/src/mistralai/models/batchjobin.py b/src/mistralai/models/batchjobin.py index 475ba863..0c37cce8 100644 --- a/src/mistralai/models/batchjobin.py +++ b/src/mistralai/models/batchjobin.py @@ -2,6 +2,7 @@ from __future__ import annotations from .apiendpoint import APIEndpoint +from .batchrequest import BatchRequest, BatchRequestTypedDict from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL from mistralai.utils import validate_open_enum from pydantic import model_serializer @@ -11,9 +12,10 @@ class BatchJobInTypedDict(TypedDict): - input_files: List[str] - r"""The list of input files to be used for batch inference, these files should be `jsonl` files, containing the input data corresponding to the bory request for the batch inference in a \"body\" field. An example of such file is the following: ```json {\"custom_id\": \"0\", \"body\": {\"max_tokens\": 100, \"messages\": [{\"role\": \"user\", \"content\": \"What is the best French cheese?\"}]}} {\"custom_id\": \"1\", \"body\": {\"max_tokens\": 100, \"messages\": [{\"role\": \"user\", \"content\": \"What is the best French wine?\"}]}} ```""" endpoint: APIEndpoint + input_files: NotRequired[Nullable[List[str]]] + r"""The list of input files to be used for batch inference, these files should be `jsonl` files, containing the input data corresponding to the bory request for the batch inference in a \"body\" field. An example of such file is the following: ```json {\"custom_id\": \"0\", \"body\": {\"max_tokens\": 100, \"messages\": [{\"role\": \"user\", \"content\": \"What is the best French cheese?\"}]}} {\"custom_id\": \"1\", \"body\": {\"max_tokens\": 100, \"messages\": [{\"role\": \"user\", \"content\": \"What is the best French wine?\"}]}} ```""" + requests: NotRequired[Nullable[List[BatchRequestTypedDict]]] model: NotRequired[Nullable[str]] r"""The model to be used for batch inference.""" agent_id: NotRequired[Nullable[str]] @@ -25,10 +27,12 @@ class BatchJobInTypedDict(TypedDict): class BatchJobIn(BaseModel): - input_files: List[str] + endpoint: Annotated[APIEndpoint, PlainValidator(validate_open_enum(False))] + + input_files: OptionalNullable[List[str]] = UNSET r"""The list of input files to be used for batch inference, these files should be `jsonl` files, containing the input data corresponding to the bory request for the batch inference in a \"body\" field. An example of such file is the following: ```json {\"custom_id\": \"0\", \"body\": {\"max_tokens\": 100, \"messages\": [{\"role\": \"user\", \"content\": \"What is the best French cheese?\"}]}} {\"custom_id\": \"1\", \"body\": {\"max_tokens\": 100, \"messages\": [{\"role\": \"user\", \"content\": \"What is the best French wine?\"}]}} ```""" - endpoint: Annotated[APIEndpoint, PlainValidator(validate_open_enum(False))] + requests: OptionalNullable[List[BatchRequest]] = UNSET model: OptionalNullable[str] = UNSET r"""The model to be used for batch inference.""" @@ -44,8 +48,15 @@ class BatchJobIn(BaseModel): @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = ["model", "agent_id", "metadata", "timeout_hours"] - nullable_fields = ["model", "agent_id", "metadata"] + optional_fields = [ + "input_files", + "requests", + "model", + "agent_id", + "metadata", + "timeout_hours", + ] + nullable_fields = ["input_files", "requests", "model", "agent_id", "metadata"] null_default_fields = [] serialized = handler(self) diff --git a/src/mistralai/models/batchjobout.py b/src/mistralai/models/batchjobout.py index 88304313..0173d9a6 100644 --- a/src/mistralai/models/batchjobout.py +++ b/src/mistralai/models/batchjobout.py @@ -29,6 +29,7 @@ class BatchJobOutTypedDict(TypedDict): agent_id: NotRequired[Nullable[str]] output_file: NotRequired[Nullable[str]] error_file: NotRequired[Nullable[str]] + outputs: NotRequired[Nullable[List[Dict[str, Any]]]] started_at: NotRequired[Nullable[int]] completed_at: NotRequired[Nullable[int]] @@ -66,6 +67,8 @@ class BatchJobOut(BaseModel): error_file: OptionalNullable[str] = UNSET + outputs: OptionalNullable[List[Dict[str, Any]]] = UNSET + started_at: OptionalNullable[int] = UNSET completed_at: OptionalNullable[int] = UNSET @@ -79,6 +82,7 @@ def serialize_model(self, handler): "agent_id", "output_file", "error_file", + "outputs", "started_at", "completed_at", ] @@ -88,6 +92,7 @@ def serialize_model(self, handler): "agent_id", "output_file", "error_file", + "outputs", "started_at", "completed_at", ] diff --git a/src/mistralai/models/batchrequest.py b/src/mistralai/models/batchrequest.py new file mode 100644 index 00000000..3d1e98f7 --- /dev/null +++ b/src/mistralai/models/batchrequest.py @@ -0,0 +1,48 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +from pydantic import model_serializer +from typing import Any, Dict +from typing_extensions import NotRequired, TypedDict + + +class BatchRequestTypedDict(TypedDict): + body: Dict[str, Any] + custom_id: NotRequired[Nullable[str]] + + +class BatchRequest(BaseModel): + body: Dict[str, Any] + + custom_id: OptionalNullable[str] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["custom_id"] + nullable_fields = ["custom_id"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/models/classificationrequest.py b/src/mistralai/models/classificationrequest.py index 39e25390..8a354378 100644 --- a/src/mistralai/models/classificationrequest.py +++ b/src/mistralai/models/classificationrequest.py @@ -1,10 +1,11 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from __future__ import annotations -from mistralai.types import BaseModel +from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL import pydantic -from typing import List, Union -from typing_extensions import Annotated, TypeAliasType, TypedDict +from pydantic import model_serializer +from typing import Any, Dict, List, Union +from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict ClassificationRequestInputsTypedDict = TypeAliasType( @@ -24,6 +25,7 @@ class ClassificationRequestTypedDict(TypedDict): r"""ID of the model to use.""" inputs: ClassificationRequestInputsTypedDict r"""Text to classify.""" + metadata: NotRequired[Nullable[Dict[str, Any]]] class ClassificationRequest(BaseModel): @@ -32,3 +34,35 @@ class ClassificationRequest(BaseModel): inputs: Annotated[ClassificationRequestInputs, pydantic.Field(alias="input")] r"""Text to classify.""" + + metadata: OptionalNullable[Dict[str, Any]] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["metadata"] + nullable_fields = ["metadata"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/models/embeddingrequest.py b/src/mistralai/models/embeddingrequest.py index 4af890a3..44797bfa 100644 --- a/src/mistralai/models/embeddingrequest.py +++ b/src/mistralai/models/embeddingrequest.py @@ -6,7 +6,7 @@ from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL import pydantic from pydantic import model_serializer -from typing import List, Optional, Union +from typing import Any, Dict, List, Optional, Union from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict @@ -25,6 +25,7 @@ class EmbeddingRequestTypedDict(TypedDict): r"""The ID of the model to be used for embedding.""" inputs: EmbeddingRequestInputsTypedDict r"""The text content to be embedded, can be a string or an array of strings for fast processing in bulk.""" + metadata: NotRequired[Nullable[Dict[str, Any]]] output_dimension: NotRequired[Nullable[int]] r"""The dimension of the output embeddings when feature available. If not provided, a default output dimension will be used.""" output_dtype: NotRequired[EmbeddingDtype] @@ -38,6 +39,8 @@ class EmbeddingRequest(BaseModel): inputs: Annotated[EmbeddingRequestInputs, pydantic.Field(alias="input")] r"""The text content to be embedded, can be a string or an array of strings for fast processing in bulk.""" + metadata: OptionalNullable[Dict[str, Any]] = UNSET + output_dimension: OptionalNullable[int] = UNSET r"""The dimension of the output embeddings when feature available. If not provided, a default output dimension will be used.""" @@ -47,8 +50,13 @@ class EmbeddingRequest(BaseModel): @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = ["output_dimension", "output_dtype", "encoding_format"] - nullable_fields = ["output_dimension"] + optional_fields = [ + "metadata", + "output_dimension", + "output_dtype", + "encoding_format", + ] + nullable_fields = ["metadata", "output_dimension"] null_default_fields = [] serialized = handler(self) diff --git a/src/mistralai/models/jobs_api_routes_batch_get_batch_jobop.py b/src/mistralai/models/jobs_api_routes_batch_get_batch_jobop.py index d9c7b398..9bfaf9c5 100644 --- a/src/mistralai/models/jobs_api_routes_batch_get_batch_jobop.py +++ b/src/mistralai/models/jobs_api_routes_batch_get_batch_jobop.py @@ -1,16 +1,53 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from __future__ import annotations -from mistralai.types import BaseModel -from mistralai.utils import FieldMetadata, PathParamMetadata -from typing_extensions import Annotated, TypedDict +from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +from mistralai.utils import FieldMetadata, PathParamMetadata, QueryParamMetadata +from pydantic import model_serializer +from typing_extensions import Annotated, NotRequired, TypedDict class JobsAPIRoutesBatchGetBatchJobRequestTypedDict(TypedDict): job_id: str + inline: NotRequired[Nullable[bool]] class JobsAPIRoutesBatchGetBatchJobRequest(BaseModel): job_id: Annotated[ str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) ] + + inline: Annotated[ + OptionalNullable[bool], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["inline"] + nullable_fields = ["inline"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/models/toolfilechunk.py b/src/mistralai/models/toolfilechunk.py index 77c07d6d..a6f58147 100644 --- a/src/mistralai/models/toolfilechunk.py +++ b/src/mistralai/models/toolfilechunk.py @@ -4,15 +4,22 @@ from .builtinconnectors import BuiltInConnectors from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL from pydantic import model_serializer -from typing import Literal, Optional -from typing_extensions import NotRequired, TypedDict +from typing import Literal, Optional, Union +from typing_extensions import NotRequired, TypeAliasType, TypedDict ToolFileChunkType = Literal["tool_file"] +ToolFileChunkToolTypedDict = TypeAliasType( + "ToolFileChunkToolTypedDict", Union[BuiltInConnectors, str] +) + + +ToolFileChunkTool = TypeAliasType("ToolFileChunkTool", Union[BuiltInConnectors, str]) + class ToolFileChunkTypedDict(TypedDict): - tool: BuiltInConnectors + tool: ToolFileChunkToolTypedDict file_id: str type: NotRequired[ToolFileChunkType] file_name: NotRequired[Nullable[str]] @@ -20,7 +27,7 @@ class ToolFileChunkTypedDict(TypedDict): class ToolFileChunk(BaseModel): - tool: BuiltInConnectors + tool: ToolFileChunkTool file_id: str diff --git a/src/mistralai/models/toolreferencechunk.py b/src/mistralai/models/toolreferencechunk.py index e50b8451..fb6d8de2 100644 --- a/src/mistralai/models/toolreferencechunk.py +++ b/src/mistralai/models/toolreferencechunk.py @@ -4,15 +4,24 @@ from .builtinconnectors import BuiltInConnectors from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL from pydantic import model_serializer -from typing import Literal, Optional -from typing_extensions import NotRequired, TypedDict +from typing import Literal, Optional, Union +from typing_extensions import NotRequired, TypeAliasType, TypedDict ToolReferenceChunkType = Literal["tool_reference"] +ToolReferenceChunkToolTypedDict = TypeAliasType( + "ToolReferenceChunkToolTypedDict", Union[BuiltInConnectors, str] +) + + +ToolReferenceChunkTool = TypeAliasType( + "ToolReferenceChunkTool", Union[BuiltInConnectors, str] +) + class ToolReferenceChunkTypedDict(TypedDict): - tool: BuiltInConnectors + tool: ToolReferenceChunkToolTypedDict title: str type: NotRequired[ToolReferenceChunkType] url: NotRequired[Nullable[str]] @@ -21,7 +30,7 @@ class ToolReferenceChunkTypedDict(TypedDict): class ToolReferenceChunk(BaseModel): - tool: BuiltInConnectors + tool: ToolReferenceChunkTool title: str From 9643a16f7d132778c740e1e0a02120f6c04d818d Mon Sep 17 00:00:00 2001 From: Jean-Malo Delignon <56539593+jean-malo@users.noreply.github.com> Date: Thu, 15 Jan 2026 20:38:40 +0100 Subject: [PATCH 176/223] feat(mistral): add async batch job chat completion example (#329) * feat(mistral): add async batch job chat completion example Adds a new example demonstrating how to use Mistral's async batch job API for chat completion. The example creates a batch job with multiple requests, monitors its status, and prints the results once completed. This provides a practical implementation of asynchronous batch processing with Mistral's API. * fix --- .../async_batch_job_chat_completion_inline.py | 40 +++++++++++++++++++ pyproject.toml | 2 +- 2 files changed, 41 insertions(+), 1 deletion(-) create mode 100644 examples/mistral/jobs/async_batch_job_chat_completion_inline.py diff --git a/examples/mistral/jobs/async_batch_job_chat_completion_inline.py b/examples/mistral/jobs/async_batch_job_chat_completion_inline.py new file mode 100644 index 00000000..94a01c6f --- /dev/null +++ b/examples/mistral/jobs/async_batch_job_chat_completion_inline.py @@ -0,0 +1,40 @@ +from mistralai import Mistral, BatchRequest, UserMessage +import os +import asyncio + + +async def main(): + client = Mistral(api_key=os.environ["MISTRAL_API_KEY"]) + + requests = [BatchRequest( + custom_id=str(i), + body=dict( + model="mistral-medium-latest", + messages=[UserMessage( + content=f"What's i + {i}" + )] + ) + ) for i in range(5) + ] + + job = await client.batch.jobs.create_async( + requests=requests, + model="mistral-small-latest", + endpoint="/v1/chat/completions", + metadata={"job_type": "testing"} + ) + + print(f"Created job with ID: {job.id}") + + while job.status not in ["SUCCESS", "FAILED"]: + await asyncio.sleep(1) + job = await client.batch.jobs.get_async(job_id=job.id) + print(f"Job status: {job.status}") + + print(f"Job is done, status {job.status}") + for res in job.outputs: + print(res["response"]["body"]) + +if __name__ == "__main__": + asyncio.run(main()) + diff --git a/pyproject.toml b/pyproject.toml index 3c5b4574..52aef0bb 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "mistralai" -version = "1.10.0" +version = "1.10.1" description = "Python Client SDK for the Mistral AI API." authors = [{ name = "Mistral" }] requires-python = ">=3.10" From 529bd4ec2a5575814b9e5b9db58915d7881aef75 Mon Sep 17 00:00:00 2001 From: Guillaume Dumont Date: Fri, 16 Jan 2026 10:18:17 +0100 Subject: [PATCH 177/223] Allow the publish workflow to be triggered manually (#331) pyproject.toml is managed manually (until we enable custom code) and we can forget to update the version there. Since the workflow only triggers when the README changes, it will not start if we fix the version in a separate commit. Allowing it to be triggered manually mitigates this issue. --- .github/workflows/sdk_publish_mistralai_sdk.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/sdk_publish_mistralai_sdk.yaml b/.github/workflows/sdk_publish_mistralai_sdk.yaml index 55b29ec1..a457b6c1 100644 --- a/.github/workflows/sdk_publish_mistralai_sdk.yaml +++ b/.github/workflows/sdk_publish_mistralai_sdk.yaml @@ -5,6 +5,7 @@ permissions: pull-requests: write statuses: write "on": + workflow_dispatch: push: branches: - main From 143c9e9f16d355b045a79b2e3d9277d03dccc81c Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Mon, 19 Jan 2026 14:38:40 +0100 Subject: [PATCH 178/223] Update Speakeasy SDKs to version 1.685.0 (#324) --- .gitignore | 1 + .speakeasy/gen.lock | 4446 ++++++++++++++--- .speakeasy/gen.yaml | 14 +- .speakeasy/workflow.lock | 29 +- .speakeasy/workflow.yaml | 2 +- README.md | 49 +- USAGE.md | 4 + docs/sdks/accesses/README.md | 3 +- docs/sdks/agents/README.md | 1 - docs/sdks/audio/README.md | 6 - docs/sdks/batch/README.md | 6 - docs/sdks/beta/README.md | 6 - docs/sdks/chat/README.md | 1 - docs/sdks/classifiers/README.md | 1 - docs/sdks/conversations/README.md | 3 +- docs/sdks/documents/README.md | 3 +- docs/sdks/embeddings/README.md | 1 - docs/sdks/files/README.md | 1 - docs/sdks/fim/README.md | 1 - docs/sdks/finetuning/README.md | 6 - docs/sdks/jobs/README.md | 3 +- docs/sdks/libraries/README.md | 3 +- docs/sdks/mistral/README.md | 7 - docs/sdks/mistralagents/README.md | 3 +- docs/sdks/mistraljobs/README.md | 3 +- docs/sdks/models/README.md | 1 - docs/sdks/ocr/README.md | 1 - docs/sdks/transcriptions/README.md | 3 +- packages/mistralai_azure/.gitignore | 2 + packages/mistralai_azure/.speakeasy/gen.lock | 745 ++- packages/mistralai_azure/.speakeasy/gen.yaml | 14 +- .../docs/models/chatcompletionrequest.md | 1 + .../models/chatcompletionstreamrequest.md | 1 + .../mistralai_azure/docs/models/format_.md | 11 + .../docs/models/ocrpageobject.md | 4 + .../mistralai_azure/docs/models/ocrrequest.md | 5 +- .../docs/models/ocrtableobject.md | 10 + .../docs/models/tableformat.md | 9 + packages/mistralai_azure/pyproject.toml | 2 +- .../src/mistralai_azure/_version.py | 6 +- .../src/mistralai_azure/basesdk.py | 6 + .../src/mistralai_azure/chat.py | 42 +- .../src/mistralai_azure/httpclient.py | 1 - .../src/mistralai_azure/models/__init__.py | 17 +- .../models/assistantmessage.py | 2 +- .../models/chatcompletionchoice.py | 17 +- .../models/chatcompletionrequest.py | 14 +- .../models/chatcompletionstreamrequest.py | 14 +- .../models/completionresponsestreamchoice.py | 18 +- .../models/documenturlchunk.py | 2 +- .../models/httpvalidationerror.py | 6 +- .../mistralai_azure/models/imageurlchunk.py | 2 +- .../models/mistralazureerror.py | 18 +- .../models/mistralpromptmode.py | 2 +- .../models/no_response_error.py | 6 +- .../mistralai_azure/models/ocrpageobject.py | 37 +- .../src/mistralai_azure/models/ocrrequest.py | 21 +- .../mistralai_azure/models/ocrtableobject.py | 34 + .../mistralai_azure/models/referencechunk.py | 2 +- .../mistralai_azure/models/responseformats.py | 6 +- .../models/responsevalidationerror.py | 2 + .../src/mistralai_azure/models/sdkerror.py | 2 + .../mistralai_azure/models/systemmessage.py | 2 +- .../src/mistralai_azure/models/textchunk.py | 2 +- .../src/mistralai_azure/models/thinkchunk.py | 2 +- .../src/mistralai_azure/models/tool.py | 8 +- .../src/mistralai_azure/models/toolcall.py | 8 +- .../src/mistralai_azure/models/toolchoice.py | 8 +- .../mistralai_azure/models/toolchoiceenum.py | 7 +- .../src/mistralai_azure/models/toolmessage.py | 2 +- .../src/mistralai_azure/models/tooltypes.py | 2 +- .../src/mistralai_azure/models/usermessage.py | 2 +- .../src/mistralai_azure/ocr.py | 32 +- .../src/mistralai_azure/types/basemodel.py | 44 +- .../src/mistralai_azure/utils/__init__.py | 3 - .../src/mistralai_azure/utils/annotations.py | 40 +- .../src/mistralai_azure/utils/enums.py | 60 + .../src/mistralai_azure/utils/forms.py | 31 +- .../src/mistralai_azure/utils/queryparams.py | 16 +- .../mistralai_azure/utils/requestbodies.py | 6 +- .../src/mistralai_azure/utils/retries.py | 74 +- .../src/mistralai_azure/utils/serializers.py | 20 - .../utils/unmarshal_json_response.py | 16 +- packages/mistralai_azure/uv.lock | 54 +- packages/mistralai_gcp/.gitignore | 2 + packages/mistralai_gcp/.speakeasy/gen.lock | 687 ++- packages/mistralai_gcp/.speakeasy/gen.yaml | 14 +- .../docs/models/chatcompletionrequest.md | 1 + .../models/chatcompletionstreamrequest.md | 1 + .../docs/models/fimcompletionrequest.md | 1 + .../docs/models/fimcompletionstreamrequest.md | 1 + packages/mistralai_gcp/pyproject.toml | 6 +- .../src/mistralai_gcp/_version.py | 6 +- .../src/mistralai_gcp/basesdk.py | 6 + .../mistralai_gcp/src/mistralai_gcp/chat.py | 42 +- .../mistralai_gcp/src/mistralai_gcp/fim.py | 42 +- .../src/mistralai_gcp/httpclient.py | 1 - .../mistralai_gcp/models/assistantmessage.py | 2 +- .../models/chatcompletionchoice.py | 17 +- .../models/chatcompletionrequest.py | 14 +- .../models/chatcompletionstreamrequest.py | 14 +- .../models/completionresponsestreamchoice.py | 18 +- .../models/fimcompletionrequest.py | 7 +- .../models/fimcompletionstreamrequest.py | 7 +- .../models/httpvalidationerror.py | 6 +- .../src/mistralai_gcp/models/imageurlchunk.py | 2 +- .../mistralai_gcp/models/mistralgcperror.py | 18 +- .../mistralai_gcp/models/mistralpromptmode.py | 2 +- .../mistralai_gcp/models/no_response_error.py | 6 +- .../mistralai_gcp/models/referencechunk.py | 2 +- .../mistralai_gcp/models/responseformats.py | 6 +- .../models/responsevalidationerror.py | 2 + .../src/mistralai_gcp/models/sdkerror.py | 2 + .../src/mistralai_gcp/models/systemmessage.py | 2 +- .../src/mistralai_gcp/models/textchunk.py | 2 +- .../src/mistralai_gcp/models/thinkchunk.py | 2 +- .../src/mistralai_gcp/models/tool.py | 8 +- .../src/mistralai_gcp/models/toolcall.py | 8 +- .../src/mistralai_gcp/models/toolchoice.py | 8 +- .../mistralai_gcp/models/toolchoiceenum.py | 7 +- .../src/mistralai_gcp/models/toolmessage.py | 2 +- .../src/mistralai_gcp/models/tooltypes.py | 2 +- .../src/mistralai_gcp/models/usermessage.py | 2 +- .../src/mistralai_gcp/types/basemodel.py | 44 +- .../src/mistralai_gcp/utils/__init__.py | 3 - .../src/mistralai_gcp/utils/annotations.py | 40 +- .../src/mistralai_gcp/utils/enums.py | 60 + .../src/mistralai_gcp/utils/forms.py | 31 +- .../src/mistralai_gcp/utils/queryparams.py | 16 +- .../src/mistralai_gcp/utils/requestbodies.py | 6 +- .../src/mistralai_gcp/utils/retries.py | 74 +- .../src/mistralai_gcp/utils/serializers.py | 20 - .../utils/unmarshal_json_response.py | 16 +- packages/mistralai_gcp/uv.lock | 97 +- pyproject.toml | 2 +- src/mistralai/_version.py | 6 +- src/mistralai/accesses.py | 34 +- src/mistralai/agents.py | 132 +- src/mistralai/basesdk.py | 6 + src/mistralai/chat.py | 136 +- src/mistralai/classifiers.py | 57 +- src/mistralai/conversations.py | 250 +- src/mistralai/documents.py | 98 +- src/mistralai/embeddings.py | 25 +- src/mistralai/files.py | 82 +- src/mistralai/fim.py | 32 +- src/mistralai/httpclient.py | 1 - src/mistralai/jobs.py | 91 +- src/mistralai/libraries.py | 30 +- src/mistralai/mistral_agents.py | 82 +- src/mistralai/mistral_jobs.py | 47 +- src/mistralai/models/agent.py | 2 +- src/mistralai/models/agentconversation.py | 2 +- src/mistralai/models/agenthandoffdoneevent.py | 2 +- src/mistralai/models/agenthandoffentry.py | 5 +- .../models/agenthandoffstartedevent.py | 2 +- .../models/agentscompletionrequest.py | 7 +- .../models/agentscompletionstreamrequest.py | 7 +- src/mistralai/models/archiveftmodelout.py | 2 +- src/mistralai/models/assistantmessage.py | 2 +- src/mistralai/models/audiochunk.py | 2 +- src/mistralai/models/audioencoding.py | 7 +- src/mistralai/models/audioformat.py | 6 +- src/mistralai/models/basemodelcard.py | 2 +- src/mistralai/models/batchjobin.py | 6 +- src/mistralai/models/batchjobout.py | 2 +- src/mistralai/models/batchjobsout.py | 2 +- src/mistralai/models/chatcompletionchoice.py | 15 +- src/mistralai/models/chatcompletionrequest.py | 7 +- .../models/chatcompletionstreamrequest.py | 7 +- .../models/classifierdetailedjobout.py | 6 +- src/mistralai/models/classifierftmodelout.py | 5 +- src/mistralai/models/classifierjobout.py | 6 +- src/mistralai/models/codeinterpretertool.py | 2 +- .../models/completiondetailedjobout.py | 7 +- src/mistralai/models/completionftmodelout.py | 5 +- src/mistralai/models/completionjobout.py | 7 +- .../models/completionresponsestreamchoice.py | 17 +- .../models/conversationappendrequest.py | 5 +- .../models/conversationappendstreamrequest.py | 5 +- src/mistralai/models/conversationhistory.py | 3 +- src/mistralai/models/conversationmessages.py | 2 +- src/mistralai/models/conversationrequest.py | 6 +- src/mistralai/models/conversationresponse.py | 3 +- .../models/conversationrestartrequest.py | 5 +- .../conversationrestartstreamrequest.py | 5 +- .../models/conversationstreamrequest.py | 6 +- src/mistralai/models/documentlibrarytool.py | 2 +- src/mistralai/models/documenturlchunk.py | 2 +- src/mistralai/models/embeddingdtype.py | 8 +- src/mistralai/models/encodingformat.py | 5 +- src/mistralai/models/entitytype.py | 9 +- src/mistralai/models/filepurpose.py | 9 +- .../models/files_api_routes_list_filesop.py | 15 +- .../models/files_api_routes_upload_fileop.py | 8 +- src/mistralai/models/fileschema.py | 8 +- src/mistralai/models/finetuneablemodeltype.py | 5 +- .../models/ftclassifierlossfunction.py | 5 +- src/mistralai/models/ftmodelcard.py | 2 +- src/mistralai/models/functioncallentry.py | 5 +- src/mistralai/models/functioncallevent.py | 2 +- src/mistralai/models/functionresultentry.py | 5 +- src/mistralai/models/functiontool.py | 2 +- src/mistralai/models/githubrepositoryin.py | 2 +- src/mistralai/models/githubrepositoryout.py | 2 +- src/mistralai/models/httpvalidationerror.py | 6 +- src/mistralai/models/imagegenerationtool.py | 2 +- src/mistralai/models/imageurlchunk.py | 2 +- src/mistralai/models/jobsout.py | 2 +- src/mistralai/models/legacyjobmetadataout.py | 2 +- src/mistralai/models/messageinputentry.py | 12 +- src/mistralai/models/messageoutputentry.py | 9 +- src/mistralai/models/messageoutputevent.py | 6 +- src/mistralai/models/mistralerror.py | 18 +- src/mistralai/models/mistralpromptmode.py | 2 +- src/mistralai/models/modelconversation.py | 2 +- src/mistralai/models/no_response_error.py | 6 +- src/mistralai/models/ocrrequest.py | 5 +- src/mistralai/models/ocrtableobject.py | 5 +- src/mistralai/models/referencechunk.py | 2 +- src/mistralai/models/requestsource.py | 6 +- src/mistralai/models/responsedoneevent.py | 2 +- src/mistralai/models/responseerrorevent.py | 2 +- src/mistralai/models/responseformats.py | 6 +- src/mistralai/models/responsestartedevent.py | 2 +- .../models/responsevalidationerror.py | 2 + src/mistralai/models/retrievefileout.py | 8 +- src/mistralai/models/sampletype.py | 8 +- src/mistralai/models/sdkerror.py | 2 + src/mistralai/models/shareenum.py | 8 +- src/mistralai/models/sharingdelete.py | 6 +- src/mistralai/models/sharingin.py | 8 +- src/mistralai/models/source.py | 9 +- src/mistralai/models/systemmessage.py | 2 +- src/mistralai/models/textchunk.py | 2 +- src/mistralai/models/thinkchunk.py | 2 +- src/mistralai/models/timestampgranularity.py | 2 +- src/mistralai/models/tool.py | 8 +- src/mistralai/models/toolcall.py | 8 +- src/mistralai/models/toolchoice.py | 8 +- src/mistralai/models/toolchoiceenum.py | 7 +- .../models/toolexecutiondeltaevent.py | 3 +- .../models/toolexecutiondoneevent.py | 3 +- src/mistralai/models/toolexecutionentry.py | 6 +- .../models/toolexecutionstartedevent.py | 3 +- src/mistralai/models/toolfilechunk.py | 3 +- src/mistralai/models/toolmessage.py | 2 +- src/mistralai/models/toolreferencechunk.py | 3 +- src/mistralai/models/tooltypes.py | 2 +- .../models/transcriptionsegmentchunk.py | 2 +- .../models/transcriptionstreamdone.py | 2 +- .../models/transcriptionstreamlanguage.py | 2 +- .../models/transcriptionstreamsegmentdelta.py | 2 +- .../models/transcriptionstreamtextdelta.py | 2 +- src/mistralai/models/unarchiveftmodelout.py | 2 +- src/mistralai/models/uploadfileout.py | 8 +- src/mistralai/models/usermessage.py | 2 +- src/mistralai/models/wandbintegration.py | 2 +- src/mistralai/models/wandbintegrationout.py | 2 +- src/mistralai/models/websearchpremiumtool.py | 2 +- src/mistralai/models/websearchtool.py | 2 +- src/mistralai/models_.py | 36 +- src/mistralai/ocr.py | 42 +- src/mistralai/sdk.py | 4 +- src/mistralai/transcriptions.py | 40 +- src/mistralai/types/basemodel.py | 44 +- src/mistralai/utils/__init__.py | 3 - src/mistralai/utils/annotations.py | 40 +- src/mistralai/utils/enums.py | 60 + src/mistralai/utils/forms.py | 31 +- src/mistralai/utils/queryparams.py | 16 +- src/mistralai/utils/requestbodies.py | 6 +- src/mistralai/utils/retries.py | 74 +- src/mistralai/utils/serializers.py | 20 - .../utils/unmarshal_json_response.py | 16 +- uv.lock | 2 +- 276 files changed, 7577 insertions(+), 2003 deletions(-) delete mode 100644 docs/sdks/audio/README.md delete mode 100644 docs/sdks/batch/README.md delete mode 100644 docs/sdks/beta/README.md delete mode 100644 docs/sdks/finetuning/README.md delete mode 100644 docs/sdks/mistral/README.md create mode 100644 packages/mistralai_azure/docs/models/format_.md create mode 100644 packages/mistralai_azure/docs/models/ocrtableobject.md create mode 100644 packages/mistralai_azure/docs/models/tableformat.md create mode 100644 packages/mistralai_azure/src/mistralai_azure/models/ocrtableobject.py diff --git a/.gitignore b/.gitignore index 336f773d..cf2de5ee 100644 --- a/.gitignore +++ b/.gitignore @@ -1,3 +1,4 @@ +.env.local .idea **/__pycache__/ **/.speakeasy/temp/ diff --git a/.speakeasy/gen.lock b/.speakeasy/gen.lock index 4dfa69b1..18a38312 100644 --- a/.speakeasy/gen.lock +++ b/.speakeasy/gen.lock @@ -3,19 +3,23 @@ id: 2d045ec7-2ebb-4f4d-ad25-40953b132161 management: docChecksum: 274d02258259534e27fc706556e295ef docVersion: 1.0.0 - speakeasyVersion: 1.606.10 - generationVersion: 2.687.13 - releaseVersion: 1.10.1 - configChecksum: cfe28e4ccef517bdf74a267098925450 + speakeasyVersion: 1.685.0 + generationVersion: 2.794.1 + releaseVersion: 1.11.0 + configChecksum: 99d8b30f701935f8b8bf94786669ddb1 repoURL: https://github.com/mistralai/client-python.git installationURL: https://github.com/mistralai/client-python.git published: true +persistentEdits: + generation_id: a478cd50-39ad-455d-b98e-792b4cdd77ed + pristine_commit_hash: 59eb5189fd6e8f40f2f2af96df44ce24b3571bf4 + pristine_tree_hash: fa13610a853ef05b5bbabb0bcf0895fbb5a5f02a features: python: additionalDependencies: 1.0.0 additionalProperties: 1.0.1 constsAndDefaults: 1.0.5 - core: 5.20.1 + core: 5.23.18 customCodeRegions: 0.1.1 defaultEnabledRetries: 0.2.0 downloadStreams: 1.0.1 @@ -24,765 +28,2965 @@ features: examples: 3.0.2 flatRequests: 1.0.1 flattening: 3.1.1 - globalSecurity: 3.0.3 + globalSecurity: 3.0.4 globalSecurityCallbacks: 1.0.0 globalSecurityFlattening: 1.0.0 - globalServerURLs: 3.1.1 + globalServerURLs: 3.2.0 includes: 3.0.0 methodArguments: 1.0.2 multipartFileContentType: 1.0.0 nameOverrides: 3.0.1 nullables: 1.0.1 - openEnums: 1.0.1 + openEnums: 1.0.2 responseFormat: 1.0.1 - retries: 3.0.2 - sdkHooks: 1.1.0 - serverEvents: 1.0.8 + retries: 3.0.3 + sdkHooks: 1.2.0 + serverEvents: 1.0.11 serverEventsSentinels: 0.1.0 serverIDs: 3.0.0 - unions: 3.0.4 + unions: 3.1.1 uploadStreams: 1.0.0 -generatedFiles: - - .gitattributes - - .vscode/settings.json - - USAGE.md - - docs/models/agent.md - - docs/models/agentconversation.md - - docs/models/agentconversationobject.md - - docs/models/agentcreationrequest.md - - docs/models/agentcreationrequesttools.md - - docs/models/agenthandoffdoneevent.md - - docs/models/agenthandoffdoneeventtype.md - - docs/models/agenthandoffentry.md - - docs/models/agenthandoffentryobject.md - - docs/models/agenthandoffentrytype.md - - docs/models/agenthandoffstartedevent.md - - docs/models/agenthandoffstartedeventtype.md - - docs/models/agentobject.md - - docs/models/agentsapiv1agentsdeleterequest.md - - docs/models/agentsapiv1agentsgetrequest.md - - docs/models/agentsapiv1agentslistrequest.md - - docs/models/agentsapiv1agentsupdaterequest.md - - docs/models/agentsapiv1agentsupdateversionrequest.md - - docs/models/agentsapiv1conversationsappendrequest.md - - docs/models/agentsapiv1conversationsappendstreamrequest.md - - docs/models/agentsapiv1conversationsdeleterequest.md - - docs/models/agentsapiv1conversationsgetrequest.md - - docs/models/agentsapiv1conversationsgetresponsev1conversationsget.md - - docs/models/agentsapiv1conversationshistoryrequest.md - - docs/models/agentsapiv1conversationslistrequest.md - - docs/models/agentsapiv1conversationsmessagesrequest.md - - docs/models/agentsapiv1conversationsrestartrequest.md - - docs/models/agentsapiv1conversationsrestartstreamrequest.md - - docs/models/agentscompletionrequest.md - - docs/models/agentscompletionrequestmessages.md - - docs/models/agentscompletionrequeststop.md - - docs/models/agentscompletionrequesttoolchoice.md - - docs/models/agentscompletionstreamrequest.md - - docs/models/agentscompletionstreamrequestmessages.md - - docs/models/agentscompletionstreamrequeststop.md - - docs/models/agentscompletionstreamrequesttoolchoice.md - - docs/models/agenttools.md - - docs/models/agentupdaterequest.md - - docs/models/agentupdaterequesttools.md - - docs/models/apiendpoint.md - - docs/models/archiveftmodelout.md - - docs/models/archiveftmodeloutobject.md - - docs/models/arguments.md - - docs/models/assistantmessage.md - - docs/models/assistantmessagecontent.md - - docs/models/assistantmessagerole.md - - docs/models/attributes.md - - docs/models/audiochunk.md - - docs/models/audiochunktype.md - - docs/models/audioencoding.md - - docs/models/audioformat.md - - docs/models/audiotranscriptionrequest.md - - docs/models/audiotranscriptionrequeststream.md - - docs/models/basemodelcard.md - - docs/models/basemodelcardtype.md - - docs/models/batcherror.md - - docs/models/batchjobin.md - - docs/models/batchjobout.md - - docs/models/batchjoboutobject.md - - docs/models/batchjobsout.md - - docs/models/batchjobsoutobject.md - - docs/models/batchjobstatus.md - - docs/models/batchrequest.md - - docs/models/builtinconnectors.md - - docs/models/chatclassificationrequest.md - - docs/models/chatcompletionchoice.md - - docs/models/chatcompletionrequest.md - - docs/models/chatcompletionrequesttoolchoice.md - - docs/models/chatcompletionresponse.md - - docs/models/chatcompletionstreamrequest.md - - docs/models/chatcompletionstreamrequestmessages.md - - docs/models/chatcompletionstreamrequeststop.md - - docs/models/chatcompletionstreamrequesttoolchoice.md - - docs/models/chatmoderationrequest.md - - docs/models/chatmoderationrequestinputs.md - - docs/models/checkpointout.md - - docs/models/classificationrequest.md - - docs/models/classificationrequestinputs.md - - docs/models/classificationresponse.md - - docs/models/classificationtargetresult.md - - docs/models/classifierdetailedjobout.md - - docs/models/classifierdetailedjoboutintegrations.md - - docs/models/classifierdetailedjoboutjobtype.md - - docs/models/classifierdetailedjoboutobject.md - - docs/models/classifierdetailedjoboutstatus.md - - docs/models/classifierftmodelout.md - - docs/models/classifierftmodeloutmodeltype.md - - docs/models/classifierftmodeloutobject.md - - docs/models/classifierjobout.md - - docs/models/classifierjoboutintegrations.md - - docs/models/classifierjoboutjobtype.md - - docs/models/classifierjoboutobject.md - - docs/models/classifierjoboutstatus.md - - docs/models/classifiertargetin.md - - docs/models/classifiertargetout.md - - docs/models/classifiertrainingparameters.md - - docs/models/classifiertrainingparametersin.md - - docs/models/codeinterpretertool.md - - docs/models/codeinterpretertooltype.md - - docs/models/completionargs.md - - docs/models/completionargsstop.md - - docs/models/completionchunk.md - - docs/models/completiondetailedjobout.md - - docs/models/completiondetailedjoboutintegrations.md - - docs/models/completiondetailedjoboutjobtype.md - - docs/models/completiondetailedjoboutobject.md - - docs/models/completiondetailedjoboutrepositories.md - - docs/models/completiondetailedjoboutstatus.md - - docs/models/completionevent.md - - docs/models/completionftmodelout.md - - docs/models/completionftmodeloutobject.md - - docs/models/completionjobout.md - - docs/models/completionjoboutobject.md - - docs/models/completionresponsestreamchoice.md - - docs/models/completionresponsestreamchoicefinishreason.md - - docs/models/completiontrainingparameters.md - - docs/models/completiontrainingparametersin.md - - docs/models/content.md - - docs/models/contentchunk.md - - docs/models/conversationappendrequest.md - - docs/models/conversationappendrequesthandoffexecution.md - - docs/models/conversationappendstreamrequest.md - - docs/models/conversationappendstreamrequesthandoffexecution.md - - docs/models/conversationevents.md - - docs/models/conversationeventsdata.md - - docs/models/conversationhistory.md - - docs/models/conversationhistoryobject.md - - docs/models/conversationinputs.md - - docs/models/conversationmessages.md - - docs/models/conversationmessagesobject.md - - docs/models/conversationrequest.md - - docs/models/conversationresponse.md - - docs/models/conversationresponseobject.md - - docs/models/conversationrestartrequest.md - - docs/models/conversationrestartrequesthandoffexecution.md - - docs/models/conversationrestartstreamrequest.md - - docs/models/conversationrestartstreamrequesthandoffexecution.md - - docs/models/conversationstreamrequest.md - - docs/models/conversationstreamrequesthandoffexecution.md - - docs/models/conversationstreamrequesttools.md - - docs/models/conversationusageinfo.md - - docs/models/data.md - - docs/models/deletefileout.md - - docs/models/deletemodelout.md - - docs/models/deletemodelv1modelsmodeliddeleterequest.md - - docs/models/deltamessage.md - - docs/models/document.md - - docs/models/documentlibrarytool.md - - docs/models/documentlibrarytooltype.md - - docs/models/documentout.md - - docs/models/documenttextcontent.md - - docs/models/documentupdatein.md - - docs/models/documenturlchunk.md - - docs/models/documenturlchunktype.md - - docs/models/embeddingdtype.md - - docs/models/embeddingrequest.md - - docs/models/embeddingrequestinputs.md - - docs/models/embeddingresponse.md - - docs/models/embeddingresponsedata.md - - docs/models/encodingformat.md - - docs/models/entitytype.md - - docs/models/entries.md - - docs/models/eventout.md - - docs/models/file.md - - docs/models/filechunk.md - - docs/models/filepurpose.md - - docs/models/filesapiroutesdeletefilerequest.md - - docs/models/filesapiroutesdownloadfilerequest.md - - docs/models/filesapiroutesgetsignedurlrequest.md - - docs/models/filesapirouteslistfilesrequest.md - - docs/models/filesapiroutesretrievefilerequest.md - - docs/models/filesapiroutesuploadfilemultipartbodyparams.md - - docs/models/fileschema.md - - docs/models/filesignedurl.md - - docs/models/fimcompletionrequest.md - - docs/models/fimcompletionrequeststop.md - - docs/models/fimcompletionresponse.md - - docs/models/fimcompletionstreamrequest.md - - docs/models/fimcompletionstreamrequeststop.md - - docs/models/finetuneablemodeltype.md - - docs/models/finishreason.md - - docs/models/format_.md - - docs/models/ftclassifierlossfunction.md - - docs/models/ftmodelcapabilitiesout.md - - docs/models/ftmodelcard.md - - docs/models/ftmodelcardtype.md - - docs/models/function.md - - docs/models/functioncall.md - - docs/models/functioncallentry.md - - docs/models/functioncallentryarguments.md - - docs/models/functioncallentryobject.md - - docs/models/functioncallentrytype.md - - docs/models/functioncallevent.md - - docs/models/functioncalleventtype.md - - docs/models/functionname.md - - docs/models/functionresultentry.md - - docs/models/functionresultentryobject.md - - docs/models/functionresultentrytype.md - - docs/models/functiontool.md - - docs/models/functiontooltype.md - - docs/models/githubrepositoryin.md - - docs/models/githubrepositoryintype.md - - docs/models/githubrepositoryout.md - - docs/models/githubrepositoryouttype.md - - docs/models/handoffexecution.md - - docs/models/httpvalidationerror.md - - docs/models/hyperparameters.md - - docs/models/imagegenerationtool.md - - docs/models/imagegenerationtooltype.md - - docs/models/imageurl.md - - docs/models/imageurlchunk.md - - docs/models/imageurlchunkimageurl.md - - docs/models/imageurlchunktype.md - - docs/models/inputentries.md - - docs/models/inputs.md - - docs/models/instructrequest.md - - docs/models/instructrequestinputs.md - - docs/models/instructrequestinputsmessages.md - - docs/models/instructrequestmessages.md - - docs/models/integrations.md - - docs/models/jobin.md - - docs/models/jobinintegrations.md - - docs/models/jobinrepositories.md - - docs/models/jobmetadataout.md - - docs/models/jobsapiroutesbatchcancelbatchjobrequest.md - - docs/models/jobsapiroutesbatchgetbatchjobrequest.md - - docs/models/jobsapiroutesbatchgetbatchjobsrequest.md - - docs/models/jobsapiroutesfinetuningarchivefinetunedmodelrequest.md - - docs/models/jobsapiroutesfinetuningcancelfinetuningjobrequest.md - - docs/models/jobsapiroutesfinetuningcancelfinetuningjobresponse.md - - docs/models/jobsapiroutesfinetuningcreatefinetuningjobresponse.md - - docs/models/jobsapiroutesfinetuninggetfinetuningjobrequest.md - - docs/models/jobsapiroutesfinetuninggetfinetuningjobresponse.md - - docs/models/jobsapiroutesfinetuninggetfinetuningjobsrequest.md - - docs/models/jobsapiroutesfinetuningstartfinetuningjobrequest.md - - docs/models/jobsapiroutesfinetuningstartfinetuningjobresponse.md - - docs/models/jobsapiroutesfinetuningunarchivefinetunedmodelrequest.md - - docs/models/jobsapiroutesfinetuningupdatefinetunedmodelrequest.md - - docs/models/jobsapiroutesfinetuningupdatefinetunedmodelresponse.md - - docs/models/jobsout.md - - docs/models/jobsoutdata.md - - docs/models/jobsoutobject.md - - docs/models/jobtype.md - - docs/models/jsonschema.md - - docs/models/legacyjobmetadataout.md - - docs/models/legacyjobmetadataoutobject.md - - docs/models/librariesdeletev1request.md - - docs/models/librariesdocumentsdeletev1request.md - - docs/models/librariesdocumentsgetextractedtextsignedurlv1request.md - - docs/models/librariesdocumentsgetsignedurlv1request.md - - docs/models/librariesdocumentsgetstatusv1request.md - - docs/models/librariesdocumentsgettextcontentv1request.md - - docs/models/librariesdocumentsgetv1request.md - - docs/models/librariesdocumentslistv1request.md - - docs/models/librariesdocumentsreprocessv1request.md - - docs/models/librariesdocumentsupdatev1request.md - - docs/models/librariesdocumentsuploadv1documentupload.md - - docs/models/librariesdocumentsuploadv1request.md - - docs/models/librariesgetv1request.md - - docs/models/librariessharecreatev1request.md - - docs/models/librariessharedeletev1request.md - - docs/models/librariessharelistv1request.md - - docs/models/librariesupdatev1request.md - - docs/models/libraryin.md - - docs/models/libraryinupdate.md - - docs/models/libraryout.md - - docs/models/listdocumentout.md - - docs/models/listfilesout.md - - docs/models/listlibraryout.md - - docs/models/listsharingout.md - - docs/models/loc.md - - docs/models/messageentries.md - - docs/models/messageinputcontentchunks.md - - docs/models/messageinputentry.md - - docs/models/messageinputentrycontent.md - - docs/models/messageinputentryrole.md - - docs/models/messageinputentrytype.md - - docs/models/messageoutputcontentchunks.md - - docs/models/messageoutputentry.md - - docs/models/messageoutputentrycontent.md - - docs/models/messageoutputentryobject.md - - docs/models/messageoutputentryrole.md - - docs/models/messageoutputentrytype.md - - docs/models/messageoutputevent.md - - docs/models/messageoutputeventcontent.md - - docs/models/messageoutputeventrole.md - - docs/models/messageoutputeventtype.md - - docs/models/messages.md - - docs/models/metricout.md - - docs/models/mistralpromptmode.md - - docs/models/modelcapabilities.md - - docs/models/modelconversation.md - - docs/models/modelconversationobject.md - - docs/models/modelconversationtools.md - - docs/models/modellist.md - - docs/models/modeltype.md - - docs/models/moderationobject.md - - docs/models/moderationresponse.md - - docs/models/name.md - - docs/models/object.md - - docs/models/ocrimageobject.md - - docs/models/ocrpagedimensions.md - - docs/models/ocrpageobject.md - - docs/models/ocrrequest.md - - docs/models/ocrresponse.md - - docs/models/ocrtableobject.md - - docs/models/ocrusageinfo.md - - docs/models/one.md - - docs/models/outputcontentchunks.md - - docs/models/outputs.md - - docs/models/paginationinfo.md - - docs/models/prediction.md - - docs/models/processingstatusout.md - - docs/models/queryparamstatus.md - - docs/models/referencechunk.md - - docs/models/referencechunktype.md - - docs/models/repositories.md - - docs/models/requestsource.md - - docs/models/response1.md - - docs/models/responsebody.md - - docs/models/responsedoneevent.md - - docs/models/responsedoneeventtype.md - - docs/models/responseerrorevent.md - - docs/models/responseerroreventtype.md - - docs/models/responseformat.md - - docs/models/responseformats.md - - docs/models/responsestartedevent.md - - docs/models/responsestartedeventtype.md - - docs/models/retrievefileout.md - - docs/models/retrievemodelv1modelsmodelidgetrequest.md - - docs/models/retrievemodelv1modelsmodelidgetresponseretrievemodelv1modelsmodelidget.md - - docs/models/role.md - - docs/models/sampletype.md - - docs/models/security.md - - docs/models/shareenum.md - - docs/models/sharingdelete.md - - docs/models/sharingin.md - - docs/models/sharingout.md - - docs/models/source.md - - docs/models/ssetypes.md - - docs/models/status.md - - docs/models/stop.md - - docs/models/systemmessage.md - - docs/models/systemmessagecontent.md - - docs/models/systemmessagecontentchunks.md - - docs/models/tableformat.md - - docs/models/textchunk.md - - docs/models/textchunktype.md - - docs/models/thinkchunk.md - - docs/models/thinkchunktype.md - - docs/models/thinking.md - - docs/models/timestampgranularity.md - - docs/models/tool.md - - docs/models/toolcall.md - - docs/models/toolchoice.md - - docs/models/toolchoiceenum.md - - docs/models/toolexecutiondeltaevent.md - - docs/models/toolexecutiondeltaeventname.md - - docs/models/toolexecutiondeltaeventtype.md - - docs/models/toolexecutiondoneevent.md - - docs/models/toolexecutiondoneeventname.md - - docs/models/toolexecutiondoneeventtype.md - - docs/models/toolexecutionentry.md - - docs/models/toolexecutionentryobject.md - - docs/models/toolexecutionentrytype.md - - docs/models/toolexecutionstartedevent.md - - docs/models/toolexecutionstartedeventname.md - - docs/models/toolexecutionstartedeventtype.md - - docs/models/toolfilechunk.md - - docs/models/toolfilechunktool.md - - docs/models/toolfilechunktype.md - - docs/models/toolmessage.md - - docs/models/toolmessagecontent.md - - docs/models/toolmessagerole.md - - docs/models/toolreferencechunk.md - - docs/models/toolreferencechunktool.md - - docs/models/toolreferencechunktype.md - - docs/models/tools.md - - docs/models/tooltypes.md - - docs/models/trainingfile.md - - docs/models/transcriptionresponse.md - - docs/models/transcriptionsegmentchunk.md - - docs/models/transcriptionstreamdone.md - - docs/models/transcriptionstreamdonetype.md - - docs/models/transcriptionstreamevents.md - - docs/models/transcriptionstreameventsdata.md - - docs/models/transcriptionstreameventtypes.md - - docs/models/transcriptionstreamlanguage.md - - docs/models/transcriptionstreamlanguagetype.md - - docs/models/transcriptionstreamsegmentdelta.md - - docs/models/transcriptionstreamsegmentdeltatype.md - - docs/models/transcriptionstreamtextdelta.md - - docs/models/transcriptionstreamtextdeltatype.md - - docs/models/two.md - - docs/models/type.md - - docs/models/unarchiveftmodelout.md - - docs/models/unarchiveftmodeloutobject.md - - docs/models/updateftmodelin.md - - docs/models/uploadfileout.md - - docs/models/usageinfo.md - - docs/models/usermessage.md - - docs/models/usermessagecontent.md - - docs/models/usermessagerole.md - - docs/models/utils/retryconfig.md - - docs/models/validationerror.md - - docs/models/wandbintegration.md - - docs/models/wandbintegrationout.md - - docs/models/wandbintegrationouttype.md - - docs/models/wandbintegrationtype.md - - docs/models/websearchpremiumtool.md - - docs/models/websearchpremiumtooltype.md - - docs/models/websearchtool.md - - docs/models/websearchtooltype.md - - docs/sdks/accesses/README.md - - docs/sdks/agents/README.md - - docs/sdks/audio/README.md - - docs/sdks/batch/README.md - - docs/sdks/beta/README.md - - docs/sdks/chat/README.md - - docs/sdks/classifiers/README.md - - docs/sdks/conversations/README.md - - docs/sdks/documents/README.md - - docs/sdks/embeddings/README.md - - docs/sdks/files/README.md - - docs/sdks/fim/README.md - - docs/sdks/finetuning/README.md - - docs/sdks/jobs/README.md - - docs/sdks/libraries/README.md - - docs/sdks/mistral/README.md - - docs/sdks/mistralagents/README.md - - docs/sdks/mistraljobs/README.md - - docs/sdks/models/README.md - - docs/sdks/ocr/README.md - - docs/sdks/transcriptions/README.md - - py.typed - - scripts/publish.sh - - src/mistralai/__init__.py - - src/mistralai/_hooks/__init__.py - - src/mistralai/_hooks/sdkhooks.py - - src/mistralai/_hooks/types.py - - src/mistralai/_version.py - - src/mistralai/accesses.py - - src/mistralai/agents.py - - src/mistralai/audio.py - - src/mistralai/basesdk.py - - src/mistralai/batch.py - - src/mistralai/beta.py - - src/mistralai/chat.py - - src/mistralai/classifiers.py - - src/mistralai/conversations.py - - src/mistralai/documents.py - - src/mistralai/embeddings.py - - src/mistralai/files.py - - src/mistralai/fim.py - - src/mistralai/fine_tuning.py - - src/mistralai/httpclient.py - - src/mistralai/jobs.py - - src/mistralai/libraries.py - - src/mistralai/mistral_agents.py - - src/mistralai/mistral_jobs.py - - src/mistralai/models/__init__.py - - src/mistralai/models/agent.py - - src/mistralai/models/agentconversation.py - - src/mistralai/models/agentcreationrequest.py - - src/mistralai/models/agenthandoffdoneevent.py - - src/mistralai/models/agenthandoffentry.py - - src/mistralai/models/agenthandoffstartedevent.py - - src/mistralai/models/agents_api_v1_agents_deleteop.py - - src/mistralai/models/agents_api_v1_agents_getop.py - - src/mistralai/models/agents_api_v1_agents_listop.py - - src/mistralai/models/agents_api_v1_agents_update_versionop.py - - src/mistralai/models/agents_api_v1_agents_updateop.py - - src/mistralai/models/agents_api_v1_conversations_append_streamop.py - - src/mistralai/models/agents_api_v1_conversations_appendop.py - - src/mistralai/models/agents_api_v1_conversations_deleteop.py - - src/mistralai/models/agents_api_v1_conversations_getop.py - - src/mistralai/models/agents_api_v1_conversations_historyop.py - - src/mistralai/models/agents_api_v1_conversations_listop.py - - src/mistralai/models/agents_api_v1_conversations_messagesop.py - - src/mistralai/models/agents_api_v1_conversations_restart_streamop.py - - src/mistralai/models/agents_api_v1_conversations_restartop.py - - src/mistralai/models/agentscompletionrequest.py - - src/mistralai/models/agentscompletionstreamrequest.py - - src/mistralai/models/agentupdaterequest.py - - src/mistralai/models/apiendpoint.py - - src/mistralai/models/archiveftmodelout.py - - src/mistralai/models/assistantmessage.py - - src/mistralai/models/audiochunk.py - - src/mistralai/models/audioencoding.py - - src/mistralai/models/audioformat.py - - src/mistralai/models/audiotranscriptionrequest.py - - src/mistralai/models/audiotranscriptionrequeststream.py - - src/mistralai/models/basemodelcard.py - - src/mistralai/models/batcherror.py - - src/mistralai/models/batchjobin.py - - src/mistralai/models/batchjobout.py - - src/mistralai/models/batchjobsout.py - - src/mistralai/models/batchjobstatus.py - - src/mistralai/models/batchrequest.py - - src/mistralai/models/builtinconnectors.py - - src/mistralai/models/chatclassificationrequest.py - - src/mistralai/models/chatcompletionchoice.py - - src/mistralai/models/chatcompletionrequest.py - - src/mistralai/models/chatcompletionresponse.py - - src/mistralai/models/chatcompletionstreamrequest.py - - src/mistralai/models/chatmoderationrequest.py - - src/mistralai/models/checkpointout.py - - src/mistralai/models/classificationrequest.py - - src/mistralai/models/classificationresponse.py - - src/mistralai/models/classificationtargetresult.py - - src/mistralai/models/classifierdetailedjobout.py - - src/mistralai/models/classifierftmodelout.py - - src/mistralai/models/classifierjobout.py - - src/mistralai/models/classifiertargetin.py - - src/mistralai/models/classifiertargetout.py - - src/mistralai/models/classifiertrainingparameters.py - - src/mistralai/models/classifiertrainingparametersin.py - - src/mistralai/models/codeinterpretertool.py - - src/mistralai/models/completionargs.py - - src/mistralai/models/completionargsstop.py - - src/mistralai/models/completionchunk.py - - src/mistralai/models/completiondetailedjobout.py - - src/mistralai/models/completionevent.py - - src/mistralai/models/completionftmodelout.py - - src/mistralai/models/completionjobout.py - - src/mistralai/models/completionresponsestreamchoice.py - - src/mistralai/models/completiontrainingparameters.py - - src/mistralai/models/completiontrainingparametersin.py - - src/mistralai/models/contentchunk.py - - src/mistralai/models/conversationappendrequest.py - - src/mistralai/models/conversationappendstreamrequest.py - - src/mistralai/models/conversationevents.py - - src/mistralai/models/conversationhistory.py - - src/mistralai/models/conversationinputs.py - - src/mistralai/models/conversationmessages.py - - src/mistralai/models/conversationrequest.py - - src/mistralai/models/conversationresponse.py - - src/mistralai/models/conversationrestartrequest.py - - src/mistralai/models/conversationrestartstreamrequest.py - - src/mistralai/models/conversationstreamrequest.py - - src/mistralai/models/conversationusageinfo.py - - src/mistralai/models/delete_model_v1_models_model_id_deleteop.py - - src/mistralai/models/deletefileout.py - - src/mistralai/models/deletemodelout.py - - src/mistralai/models/deltamessage.py - - src/mistralai/models/documentlibrarytool.py - - src/mistralai/models/documentout.py - - src/mistralai/models/documenttextcontent.py - - src/mistralai/models/documentupdatein.py - - src/mistralai/models/documenturlchunk.py - - src/mistralai/models/embeddingdtype.py - - src/mistralai/models/embeddingrequest.py - - src/mistralai/models/embeddingresponse.py - - src/mistralai/models/embeddingresponsedata.py - - src/mistralai/models/encodingformat.py - - src/mistralai/models/entitytype.py - - src/mistralai/models/eventout.py - - src/mistralai/models/file.py - - src/mistralai/models/filechunk.py - - src/mistralai/models/filepurpose.py - - src/mistralai/models/files_api_routes_delete_fileop.py - - src/mistralai/models/files_api_routes_download_fileop.py - - src/mistralai/models/files_api_routes_get_signed_urlop.py - - src/mistralai/models/files_api_routes_list_filesop.py - - src/mistralai/models/files_api_routes_retrieve_fileop.py - - src/mistralai/models/files_api_routes_upload_fileop.py - - src/mistralai/models/fileschema.py - - src/mistralai/models/filesignedurl.py - - src/mistralai/models/fimcompletionrequest.py - - src/mistralai/models/fimcompletionresponse.py - - src/mistralai/models/fimcompletionstreamrequest.py - - src/mistralai/models/finetuneablemodeltype.py - - src/mistralai/models/ftclassifierlossfunction.py - - src/mistralai/models/ftmodelcapabilitiesout.py - - src/mistralai/models/ftmodelcard.py - - src/mistralai/models/function.py - - src/mistralai/models/functioncall.py - - src/mistralai/models/functioncallentry.py - - src/mistralai/models/functioncallentryarguments.py - - src/mistralai/models/functioncallevent.py - - src/mistralai/models/functionname.py - - src/mistralai/models/functionresultentry.py - - src/mistralai/models/functiontool.py - - src/mistralai/models/githubrepositoryin.py - - src/mistralai/models/githubrepositoryout.py - - src/mistralai/models/httpvalidationerror.py - - src/mistralai/models/imagegenerationtool.py - - src/mistralai/models/imageurl.py - - src/mistralai/models/imageurlchunk.py - - src/mistralai/models/inputentries.py - - src/mistralai/models/inputs.py - - src/mistralai/models/instructrequest.py - - src/mistralai/models/jobin.py - - src/mistralai/models/jobmetadataout.py - - src/mistralai/models/jobs_api_routes_batch_cancel_batch_jobop.py - - src/mistralai/models/jobs_api_routes_batch_get_batch_jobop.py - - src/mistralai/models/jobs_api_routes_batch_get_batch_jobsop.py - - src/mistralai/models/jobs_api_routes_fine_tuning_archive_fine_tuned_modelop.py - - src/mistralai/models/jobs_api_routes_fine_tuning_cancel_fine_tuning_jobop.py - - src/mistralai/models/jobs_api_routes_fine_tuning_create_fine_tuning_jobop.py - - src/mistralai/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobop.py - - src/mistralai/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobsop.py - - src/mistralai/models/jobs_api_routes_fine_tuning_start_fine_tuning_jobop.py - - src/mistralai/models/jobs_api_routes_fine_tuning_unarchive_fine_tuned_modelop.py - - src/mistralai/models/jobs_api_routes_fine_tuning_update_fine_tuned_modelop.py - - src/mistralai/models/jobsout.py - - src/mistralai/models/jsonschema.py - - src/mistralai/models/legacyjobmetadataout.py - - src/mistralai/models/libraries_delete_v1op.py - - src/mistralai/models/libraries_documents_delete_v1op.py - - src/mistralai/models/libraries_documents_get_extracted_text_signed_url_v1op.py - - src/mistralai/models/libraries_documents_get_signed_url_v1op.py - - src/mistralai/models/libraries_documents_get_status_v1op.py - - src/mistralai/models/libraries_documents_get_text_content_v1op.py - - src/mistralai/models/libraries_documents_get_v1op.py - - src/mistralai/models/libraries_documents_list_v1op.py - - src/mistralai/models/libraries_documents_reprocess_v1op.py - - src/mistralai/models/libraries_documents_update_v1op.py - - src/mistralai/models/libraries_documents_upload_v1op.py - - src/mistralai/models/libraries_get_v1op.py - - src/mistralai/models/libraries_share_create_v1op.py - - src/mistralai/models/libraries_share_delete_v1op.py - - src/mistralai/models/libraries_share_list_v1op.py - - src/mistralai/models/libraries_update_v1op.py - - src/mistralai/models/libraryin.py - - src/mistralai/models/libraryinupdate.py - - src/mistralai/models/libraryout.py - - src/mistralai/models/listdocumentout.py - - src/mistralai/models/listfilesout.py - - src/mistralai/models/listlibraryout.py - - src/mistralai/models/listsharingout.py - - src/mistralai/models/messageentries.py - - src/mistralai/models/messageinputcontentchunks.py - - src/mistralai/models/messageinputentry.py - - src/mistralai/models/messageoutputcontentchunks.py - - src/mistralai/models/messageoutputentry.py - - src/mistralai/models/messageoutputevent.py - - src/mistralai/models/metricout.py - - src/mistralai/models/mistralerror.py - - src/mistralai/models/mistralpromptmode.py - - src/mistralai/models/modelcapabilities.py - - src/mistralai/models/modelconversation.py - - src/mistralai/models/modellist.py - - src/mistralai/models/moderationobject.py - - src/mistralai/models/moderationresponse.py - - src/mistralai/models/no_response_error.py - - src/mistralai/models/ocrimageobject.py - - src/mistralai/models/ocrpagedimensions.py - - src/mistralai/models/ocrpageobject.py - - src/mistralai/models/ocrrequest.py - - src/mistralai/models/ocrresponse.py - - src/mistralai/models/ocrtableobject.py - - src/mistralai/models/ocrusageinfo.py - - src/mistralai/models/outputcontentchunks.py - - src/mistralai/models/paginationinfo.py - - src/mistralai/models/prediction.py - - src/mistralai/models/processingstatusout.py - - src/mistralai/models/referencechunk.py - - src/mistralai/models/requestsource.py - - src/mistralai/models/responsedoneevent.py - - src/mistralai/models/responseerrorevent.py - - src/mistralai/models/responseformat.py - - src/mistralai/models/responseformats.py - - src/mistralai/models/responsestartedevent.py - - src/mistralai/models/responsevalidationerror.py - - src/mistralai/models/retrieve_model_v1_models_model_id_getop.py - - src/mistralai/models/retrievefileout.py - - src/mistralai/models/sampletype.py - - src/mistralai/models/sdkerror.py - - src/mistralai/models/security.py - - src/mistralai/models/shareenum.py - - src/mistralai/models/sharingdelete.py - - src/mistralai/models/sharingin.py - - src/mistralai/models/sharingout.py - - src/mistralai/models/source.py - - src/mistralai/models/ssetypes.py - - src/mistralai/models/systemmessage.py - - src/mistralai/models/systemmessagecontentchunks.py - - src/mistralai/models/textchunk.py - - src/mistralai/models/thinkchunk.py - - src/mistralai/models/timestampgranularity.py - - src/mistralai/models/tool.py - - src/mistralai/models/toolcall.py - - src/mistralai/models/toolchoice.py - - src/mistralai/models/toolchoiceenum.py - - src/mistralai/models/toolexecutiondeltaevent.py - - src/mistralai/models/toolexecutiondoneevent.py - - src/mistralai/models/toolexecutionentry.py - - src/mistralai/models/toolexecutionstartedevent.py - - src/mistralai/models/toolfilechunk.py - - src/mistralai/models/toolmessage.py - - src/mistralai/models/toolreferencechunk.py - - src/mistralai/models/tooltypes.py - - src/mistralai/models/trainingfile.py - - src/mistralai/models/transcriptionresponse.py - - src/mistralai/models/transcriptionsegmentchunk.py - - src/mistralai/models/transcriptionstreamdone.py - - src/mistralai/models/transcriptionstreamevents.py - - src/mistralai/models/transcriptionstreameventtypes.py - - src/mistralai/models/transcriptionstreamlanguage.py - - src/mistralai/models/transcriptionstreamsegmentdelta.py - - src/mistralai/models/transcriptionstreamtextdelta.py - - src/mistralai/models/unarchiveftmodelout.py - - src/mistralai/models/updateftmodelin.py - - src/mistralai/models/uploadfileout.py - - src/mistralai/models/usageinfo.py - - src/mistralai/models/usermessage.py - - src/mistralai/models/validationerror.py - - src/mistralai/models/wandbintegration.py - - src/mistralai/models/wandbintegrationout.py - - src/mistralai/models/websearchpremiumtool.py - - src/mistralai/models/websearchtool.py - - src/mistralai/models_.py - - src/mistralai/ocr.py - - src/mistralai/py.typed - - src/mistralai/sdk.py - - src/mistralai/sdkconfiguration.py - - src/mistralai/transcriptions.py - - src/mistralai/types/__init__.py - - src/mistralai/types/basemodel.py - - src/mistralai/utils/__init__.py - - src/mistralai/utils/annotations.py - - src/mistralai/utils/datetimes.py - - src/mistralai/utils/enums.py - - src/mistralai/utils/eventstreaming.py - - src/mistralai/utils/forms.py - - src/mistralai/utils/headers.py - - src/mistralai/utils/logger.py - - src/mistralai/utils/metadata.py - - src/mistralai/utils/queryparams.py - - src/mistralai/utils/requestbodies.py - - src/mistralai/utils/retries.py - - src/mistralai/utils/security.py - - src/mistralai/utils/serializers.py - - src/mistralai/utils/unmarshal_json_response.py - - src/mistralai/utils/url.py - - src/mistralai/utils/values.py +trackedFiles: + .gitattributes: + id: 24139dae6567 + last_write_checksum: sha1:53134de3ada576f37c22276901e1b5b6d85cd2da + pristine_git_object: 4d75d59008e4d8609876d263419a9dc56c8d6f3a + .vscode/settings.json: + id: 89aa447020cd + last_write_checksum: sha1:f84632c81029fcdda8c3b0c768d02b836fc80526 + pristine_git_object: 8d79f0abb72526f1fb34a4c03e5bba612c6ba2ae + USAGE.md: + id: 3aed33ce6e6f + last_write_checksum: sha1:4b34a680cd5a2b2acbadc41d0b309b3f30c1dfe5 + pristine_git_object: a31d502f33508216f686f4328cbbc8c14f8170ee + docs/models/agent.md: + id: ffdbb4c53c87 + last_write_checksum: sha1:ec6c799658040b3c75d6ae0572bb391c6aea3fd4 + pristine_git_object: ee054dd349848eff144d7064c319c3c8434bdc6c + docs/models/agentconversation.md: + id: 3590c1a566fa + last_write_checksum: sha1:a88c8e10a9de2bc99cabd38ab9fc775a2d33e9ef + pristine_git_object: 92fd673c0710889ae3f1d77f82c32113f39457b7 + docs/models/agentconversationobject.md: + id: cfd35d9dd4f2 + last_write_checksum: sha1:112552d4a241967cf0a7dcb981428e7e0715dc34 + pristine_git_object: ea7cc75c5197ed42f9fb508a969baa16effe1f98 + docs/models/agentcreationrequest.md: + id: 697a770fe5c0 + last_write_checksum: sha1:c8221a20a68675b444d668a58a649b25b54786e9 + pristine_git_object: afc27d3b688f9ca187606243c810fd19d12bb840 + docs/models/agentcreationrequesttools.md: + id: 932bf99a19a8 + last_write_checksum: sha1:49294bdd30b7413956bd8dc039ad7c9d15243282 + pristine_git_object: c2525850649b4dad76b44fd21cac822e12986818 + docs/models/agenthandoffdoneevent.md: + id: dcf166a3c3b0 + last_write_checksum: sha1:281473cbc3929e2deb3e069e74551e7e26b4fdba + pristine_git_object: c0039f41825e3667cd8e91adae5bb78a2e3ac8ae + docs/models/agenthandoffdoneeventtype.md: + id: 4d412ea3af67 + last_write_checksum: sha1:720ebe2c6029611b8ecd4caa1b5a58d6417251c6 + pristine_git_object: c864ce4381eb30532feb010b39b991a2070f134b + docs/models/agenthandoffentry.md: + id: 39d54f489b84 + last_write_checksum: sha1:7d949e750fd24dea20cabae340f9204d8f756008 + pristine_git_object: 8831b0ebad1c4e857f4f4353d1815753bb13125f + docs/models/agenthandoffentryobject.md: + id: ac62dd5f1002 + last_write_checksum: sha1:9d25ec388406e6faa765cf163e1e6dcb590ca0e9 + pristine_git_object: 4bb876fb3c60a42cf530c932b7c60278e6036f03 + docs/models/agenthandoffentrytype.md: + id: 07506fd159e0 + last_write_checksum: sha1:27ce9bdf225fbad46230e339a5c6d96213f1df62 + pristine_git_object: 527ebceb2ff1bbba1067f30438befd5e2c2e91d6 + docs/models/agenthandoffstartedevent.md: + id: b620102af460 + last_write_checksum: sha1:a635a7f57e197519d6c51349f6db44199f8e0d43 + pristine_git_object: 035cd02aaf338785d9f6410fde248591c5ffa5f7 + docs/models/agenthandoffstartedeventtype.md: + id: 09b09b971d58 + last_write_checksum: sha1:a3cf06d2c414b1609bdbbbd9e35c8d3f14af262a + pristine_git_object: 4ffaff15cd7b5d4b08080c4fb78e92c455c73f35 + docs/models/agentobject.md: + id: ed24a6d647a0 + last_write_checksum: sha1:ff5dfde6cc19f09c83afb5b4f0f103096df6691d + pristine_git_object: 70e143b030d3041c7538ecdacb8f5f9f8d1b5c92 + docs/models/agentsapiv1agentsdeleterequest.md: + id: 0faaaa59add9 + last_write_checksum: sha1:2a34269e682bb910b83814b4d730ba2ce07f8cb2 + pristine_git_object: 2799f41817ab0f7a22b49b4ff895c8308525953c + docs/models/agentsapiv1agentsgetrequest.md: + id: 01740ae62cff + last_write_checksum: sha1:0ed4bb58c94493e21826b38d33c2498de9150b98 + pristine_git_object: 825e03a02e14d03ce47022df840c118de8cd921f + docs/models/agentsapiv1agentslistrequest.md: + id: c2720c209527 + last_write_checksum: sha1:7e5cf3361dd00fce8468757cc73c7edb2877d582 + pristine_git_object: c4f05b5c9169300d4429e601cb70d0aa1fd88c70 + docs/models/agentsapiv1agentsupdaterequest.md: + id: 7692812cd677 + last_write_checksum: sha1:8b17ce9d488b5eab892b66ca44d0e0a01b56aa11 + pristine_git_object: f60f8e5ba0cc6923935187ba221875d757c4693e + docs/models/agentsapiv1agentsupdateversionrequest.md: + id: a001251b1624 + last_write_checksum: sha1:0ee9e0fc55fd969f2b8f2c55dec93bf10e0e5b2f + pristine_git_object: e937acc9b1d3f50eee69495b1305f7aee1c960ac + docs/models/agentsapiv1conversationsappendrequest.md: + id: 70f76380e810 + last_write_checksum: sha1:d428dc114b60362d269b5ae50a57ea60b9edee1a + pristine_git_object: ac8a00ecab30305de8eb8c7c08cda1b1c04148c3 + docs/models/agentsapiv1conversationsappendstreamrequest.md: + id: f6ada9a592c5 + last_write_checksum: sha1:8a806ca2e5bad75d9d0cf50726dc0d5b8e7e3eab + pristine_git_object: dbc330f11aa3039c9cea2dd7d477d56d5c4969d0 + docs/models/agentsapiv1conversationsdeleterequest.md: + id: c2c9f084ed93 + last_write_checksum: sha1:9ecca93f8123cebdd1f9e74cf0f4a104b46402a8 + pristine_git_object: c6eed281331cb4d2cac4470de5e04935d22eca5a + docs/models/agentsapiv1conversationsgetrequest.md: + id: d6acce23f92c + last_write_checksum: sha1:b5d5529b72c16293d3d9b5c45dcb2e3798405bcf + pristine_git_object: 67d450c88778cb27d7d0ba06d49d9f419840b32e + docs/models/agentsapiv1conversationsgetresponsev1conversationsget.md: + id: 97b0d4a71cbc + last_write_checksum: sha1:8d3df6d122eeb58043c81e30cfa701526cc572f0 + pristine_git_object: 4bc836f353f66b0f8b24f278cc78d41dbec72e36 + docs/models/agentsapiv1conversationshistoryrequest.md: + id: e3efc36ea8b5 + last_write_checksum: sha1:4155100eaed6d3b7410b3f4476f000d1879576be + pristine_git_object: 7e5d39e9a11ac437a24b8c059db56527fa93f8b0 + docs/models/agentsapiv1conversationslistrequest.md: + id: 406c3e92777a + last_write_checksum: sha1:d5c5effcf2ca32900678d20b667bdaf8ca908194 + pristine_git_object: 62c9011faf26b3a4268186f01caf98c186e7d5b4 + docs/models/agentsapiv1conversationsmessagesrequest.md: + id: 2c749c6620d4 + last_write_checksum: sha1:781e526b030653dc189d94ca04cdc4742f9506d2 + pristine_git_object: a91ab0466d57379eacea9d475c72db9cb228a649 + docs/models/agentsapiv1conversationsrestartrequest.md: + id: 6955883f9a44 + last_write_checksum: sha1:99c1455c7fde9b82b6940e6e1ed4f363d7c38de9 + pristine_git_object: a18a41f5395adae3942573792c86ddf7c3812ff4 + docs/models/agentsapiv1conversationsrestartstreamrequest.md: + id: 0c39856fd70e + last_write_checksum: sha1:d03475c088c059077049270c69be01c67a17f178 + pristine_git_object: 7548286af5d1db51fbfd29c893eb8afdc3c97c4d + docs/models/agentscompletionrequest.md: + id: 906b82c214dc + last_write_checksum: sha1:60a969d5e54cbbb8e9296380908f1d31544e80e2 + pristine_git_object: 2a0c4144fb5919e5ce892db1210bde90820c127c + docs/models/agentscompletionrequestmessages.md: + id: 152837715a56 + last_write_checksum: sha1:338b094596f610c6eacaf0995c585f371f628f0d + pristine_git_object: d6a1e69106fc4b4804bfcc0f95e30782be40b363 + docs/models/agentscompletionrequeststop.md: + id: ad1e0e74b6b8 + last_write_checksum: sha1:b2422d4dada80d54b2dd499a6659a3894318d2c9 + pristine_git_object: 21ce6fb539238168e6d1dfc5a8206d55d33018d3 + docs/models/agentscompletionrequesttoolchoice.md: + id: bd8a6f9fbb47 + last_write_checksum: sha1:f3d9ec3c82b6bbd2c3cbc320a71b927edcc292b1 + pristine_git_object: 63b9dca9fbb8d829f93d8327a77fbc385a846c76 + docs/models/agentscompletionstreamrequest.md: + id: 21d09756447b + last_write_checksum: sha1:97372c5a10b06f826b9da6bde2b9c5f6984cc15b + pristine_git_object: b2ccd4e8fe2fc3f63d4b517f7ecfc21f3aef9d67 + docs/models/agentscompletionstreamrequestmessages.md: + id: d527345f99b1 + last_write_checksum: sha1:a5e00a940960bd6751586b92329aea797af50550 + pristine_git_object: 1bc736af55a3582a18959e445f10fc75f050476b + docs/models/agentscompletionstreamrequeststop.md: + id: 4925b6b8fbca + last_write_checksum: sha1:c9d0d73ca46643ffdf02e6c6cd35de5c39460c20 + pristine_git_object: 981005f3ff2277eae57c56787edb5f1f62d1fe46 + docs/models/agentscompletionstreamrequesttoolchoice.md: + id: b1f76f7a4e1c + last_write_checksum: sha1:843c4946d5cab61df2cba458af40835c4e8bcafe + pristine_git_object: 4354523a7d0d21721a96e91938b89236169ccced + docs/models/agenttools.md: + id: 493997aabfdb + last_write_checksum: sha1:90e3537a61b4120892a3aafe545d6bed937bf46a + pristine_git_object: 15891f566b3430e1f199da332f4531dd29002bed + docs/models/agentupdaterequest.md: + id: 75a7f820b906 + last_write_checksum: sha1:d282d1cd39ecb3c447e651a9ea25010ecfa519f7 + pristine_git_object: 641d1e406f0fba0fce9f10c16a15f883c7095c07 + docs/models/agentupdaterequesttools.md: + id: a39223b88fc9 + last_write_checksum: sha1:925ef5852c2031c9bf2608577e55edbc36708730 + pristine_git_object: 1752ee6861d23c6abaa6b748f4ff43e9545505ec + docs/models/apiendpoint.md: + id: be613fd9b947 + last_write_checksum: sha1:4d984c11248f7da42c949164e69b53995d5942c4 + pristine_git_object: 8d83a26f19241da5ce626ff9526575c50e5d27be + docs/models/archiveftmodelout.md: + id: 9e855deac0d1 + last_write_checksum: sha1:ab79a7762ca33eb1f16b3ed2e5aa5318ec398829 + pristine_git_object: 46a9e755555480d333f91adfe840cdf09313e6c2 + docs/models/archiveftmodeloutobject.md: + id: 9afeccafe5b6 + last_write_checksum: sha1:4bf1b38dc9b6f275affaf353b4bf28bc63ef817c + pristine_git_object: f6f46889da24995f8e5130def3140a9fd1aff57c + docs/models/arguments.md: + id: 7ea5e33709a7 + last_write_checksum: sha1:09eea126210d7fd0353e60a76bf1dbed173f13ec + pristine_git_object: 2e54e27e0ca97bee87918b2ae38cc6c335669a79 + docs/models/assistantmessage.md: + id: 7e0218023943 + last_write_checksum: sha1:e75d407349842b2de46ee3ca6250f9f51121cf38 + pristine_git_object: 3d0bd90b4433c1a919f917f4bcf2518927cdcd50 + docs/models/assistantmessagecontent.md: + id: 9f1795bbe642 + last_write_checksum: sha1:1ce4066623a8d62d969e5ed3a088d73a9ba26643 + pristine_git_object: 047b7cf95f4db203bf2c501680b73ca0562a122d + docs/models/assistantmessagerole.md: + id: bb5d2a4bc72f + last_write_checksum: sha1:82f2c4f469426bd476c1003a91394afb89cb7c91 + pristine_git_object: 658229e77eb6419391cf7941568164541c528387 + docs/models/attributes.md: + id: ececf40457de + last_write_checksum: sha1:9f23adf16a682cc43346d157f7e971c596b416ef + pristine_git_object: 147708d9238e40e1cdb222beee15fbe8c1603050 + docs/models/audiochunk.md: + id: 88315a758fd4 + last_write_checksum: sha1:deae67e30f57eb9ae100d8c3bc26f77e8fb28396 + pristine_git_object: c443e7ade726ba88dd7ce9a8341687ef38abe598 + docs/models/audiochunktype.md: + id: cfdd0b7a74b3 + last_write_checksum: sha1:aaafb6be2f880e23fc29958389c44fd60e85f5e4 + pristine_git_object: 46ebf3729db50fd915e56124adcf63a09d93dbf4 + docs/models/audioencoding.md: + id: 1e0dfee9c2a0 + last_write_checksum: sha1:5d47cfaca916d7a47adbea71748595b3ab69a478 + pristine_git_object: feec8c71bf5a89a5c0099a9d075bc2bd36dd5f73 + docs/models/audioformat.md: + id: 41973dd397de + last_write_checksum: sha1:b81fbaf4f8aa03937c91f76d371ad5860836a772 + pristine_git_object: d174ab9959cadde659f76db94ed87c743e0f6783 + docs/models/audiotranscriptionrequest.md: + id: ebf59641bc84 + last_write_checksum: sha1:b76d6e7ee3f1a0ca96e1064db61896e287027711 + pristine_git_object: f2e17dd35eda24a48b0c105ecce63a73d754e051 + docs/models/audiotranscriptionrequeststream.md: + id: 79b5f721b753 + last_write_checksum: sha1:e8fc60f874bb7e8ee03c4e05bdf88b2db1afbfaf + pristine_git_object: 975e437a299efb27c069812f424a0107999de640 + docs/models/basemodelcard.md: + id: 2f62bfbd650e + last_write_checksum: sha1:7ee94bd9ceb6af84024863aa8183540bee7ffcce + pristine_git_object: 58ad5e25131804287b5f7c834afc3ad480d065a9 + docs/models/basemodelcardtype.md: + id: ac404098e2ff + last_write_checksum: sha1:b20b34e9a5f2f52d0563d8fbfa3d00042817ce87 + pristine_git_object: 4a40ce76799b5c224c5687287e8fc14857999d85 + docs/models/batcherror.md: + id: 8053e29a3f26 + last_write_checksum: sha1:23a12dc2e95f92a7a3691bd65a1b05012c669f0f + pristine_git_object: 95016cdc4c6225d23edc4436e11e4a7feacf1fe6 + docs/models/batchjobin.md: + id: 10f37fc761f1 + last_write_checksum: sha1:0acea471920959b7c85a015e557216c783de4e88 + pristine_git_object: 7dcf265dfe63cbbd13b7fa0e56fc62717f3ee050 + docs/models/batchjobout.md: + id: 49a98e5b2aba + last_write_checksum: sha1:82e0c730eeac4fc9ee787b213e4653cee1cca5aa + pristine_git_object: cb49649b87aeb3ec10068d96222e3d803c508324 + docs/models/batchjoboutobject.md: + id: 8964218f4f7e + last_write_checksum: sha1:8fffd069c91ea950d321cd41994df78df3eb2051 + pristine_git_object: 64ae89654c3d1a2743e67068f66fbd56f70c14b5 + docs/models/batchjobsout.md: + id: d8041dee5b90 + last_write_checksum: sha1:619fcebe753b14a34b7d3ba56f7b45c6c2690fad + pristine_git_object: a76cfdccf96ac2adf783417444be70c5b208582b + docs/models/batchjobsoutobject.md: + id: 885adfc869d5 + last_write_checksum: sha1:3fdc878e360b22d1074bd61f95d7461d478d78a2 + pristine_git_object: d4bf9f65ae546b160dd8ec5f3ecdc4228dc91bfa + docs/models/batchjobstatus.md: + id: 7e6f034d3c91 + last_write_checksum: sha1:9e876b4b94255e1399bbb31feb51e08691bcb8fc + pristine_git_object: 64617b31488130f94bf47952ccaa4958670473c8 + docs/models/batchrequest.md: + id: b113ca846594 + last_write_checksum: sha1:f9dc702c27b8257e008390519df744290e09c4b4 + pristine_git_object: 6ee3b394a8b1125769a355359b5a44bc7c3224ea + docs/models/builtinconnectors.md: + id: 9d14e972f08a + last_write_checksum: sha1:1f32eb515e32c58685d0bdc15de09656194c508c + pristine_git_object: f96f50444aaa23ca291db2fd0dc69db0d9d149d9 + docs/models/chatclassificationrequest.md: + id: 57b86771c870 + last_write_checksum: sha1:2ee5fff26c780ade7ed89617358befa93a6dfd23 + pristine_git_object: 910d62ae20fc67e9a3200397aeab95513bfed90f + docs/models/chatcompletionchoice.md: + id: 0d15c59ab501 + last_write_checksum: sha1:449b3e772891ec8d2ef77b6959a437514bb48d9c + pristine_git_object: d77d286eb0b2d2b018b6ff5f9617225be4fa9fa5 + docs/models/chatcompletionrequest.md: + id: adffe90369d0 + last_write_checksum: sha1:7dce1fcd0918e2c94ad90337fb7a89179a5b8402 + pristine_git_object: 109fa7b13d19ccc85e4633e64b44613640c171fb + docs/models/chatcompletionrequesttoolchoice.md: + id: b97041b2f15b + last_write_checksum: sha1:7ad7eb133f70e07d0d6a9def36aadd08b35cf861 + pristine_git_object: dc82a8ef91e7bfd44f1d2d9d9a4ef61b6e76cc34 + docs/models/chatcompletionresponse.md: + id: 7c53b24681b9 + last_write_checksum: sha1:a56581c0846638cfe6df26d3045fb4f874ccd931 + pristine_git_object: a0465ffbfc5558628953e03fbc53b80bbdc8649b + docs/models/chatcompletionstreamrequest.md: + id: cf8f29558a68 + last_write_checksum: sha1:6f3ca8df1ce48dceb72547012a3e973e09a16d61 + pristine_git_object: 7d5fb411bde92e39910018cc2ad8d4d67ea980a1 + docs/models/chatcompletionstreamrequestmessages.md: + id: b343649e1a58 + last_write_checksum: sha1:04ea9c0e1abcc1956a5990847027bbbbcc778620 + pristine_git_object: 479906112d167c909301c1835df549f4a6456f95 + docs/models/chatcompletionstreamrequeststop.md: + id: d0e89a4dca78 + last_write_checksum: sha1:a889e9580fa94bda7c848682d6ba501b7f5c0f41 + pristine_git_object: a48460a92ac47fec1de2188ba46b238229736d32 + docs/models/chatcompletionstreamrequesttoolchoice.md: + id: 210d5e5b1413 + last_write_checksum: sha1:0543164caf3f4fb2bef3061dbd1a5e6b34b17ae9 + pristine_git_object: 43f3ca3809bf1a2a040e2ad7c19a2b22db0b73f8 + docs/models/chatmoderationrequest.md: + id: 22862d4d20ec + last_write_checksum: sha1:2fb708270756e1296a063b0d12252e7a5b2fb92a + pristine_git_object: 69b6c1dc2c10abbbc2574f3782b2d85687661f11 + docs/models/chatmoderationrequestinputs.md: + id: 6d7386a07f09 + last_write_checksum: sha1:f95cffb7d88cfa238a483c949af2d386f875def2 + pristine_git_object: cf775d609e5d308ffb041deed7a70ae3f7fd70a7 + docs/models/checkpointout.md: + id: 909ce66e1f65 + last_write_checksum: sha1:89e678d55b97353ad1c3b28d9f1ab101f6be0928 + pristine_git_object: 053592d2c57c43220bec3df27cc1486554178955 + docs/models/classificationrequest.md: + id: 6f79e905a3fa + last_write_checksum: sha1:3e083210e1cfdd3539e714928688648673767ae8 + pristine_git_object: 99cdc4a0863577d523e8921af31a179f109bc9fb + docs/models/classificationrequestinputs.md: + id: aff99510c85a + last_write_checksum: sha1:c4b52dd83924f56bef1f54c4fbbdf3cd62e96dbe + pristine_git_object: 69d75d11276f6101452a9debfa2cbcdd39333849 + docs/models/classificationresponse.md: + id: 21227dec49f2 + last_write_checksum: sha1:56756a6c0c36ce94653b676eba1f648907a87a79 + pristine_git_object: d1633ae779850cba0eac4a9c26b5b776a7b789e0 + docs/models/classificationtargetresult.md: + id: 97a5eab5eb54 + last_write_checksum: sha1:41269d1372be3523f46cb57bd19292af4971f7c0 + pristine_git_object: f3b10727b023dd83a207d955b3d0f3cd4b7479a1 + docs/models/classifierdetailedjobout.md: + id: a2084ba5cc8c + last_write_checksum: sha1:75fec933eb83e28b81aa69561d7aaf0fb79b869b + pristine_git_object: ccc88f89ed81e6e879a88b9729c4945704370fd9 + docs/models/classifierdetailedjoboutintegrations.md: + id: 3c607522e70d + last_write_checksum: sha1:e483390fb183bd1960373e4613a15ab31a52b7c7 + pristine_git_object: 5a09465ece564b1bf4dd323918a20f6747019cac + docs/models/classifierdetailedjoboutjobtype.md: + id: 176bd257be82 + last_write_checksum: sha1:ad0f41bac94d711d2b51b2ec4e09d0155db2b6eb + pristine_git_object: 0d1c6573b925e0ef836f5a607ac24f801e0d72eb + docs/models/classifierdetailedjoboutobject.md: + id: 1ca54621f5bf + last_write_checksum: sha1:5ae3d2847a66487d70bc2ff97a8c31bbbba191c7 + pristine_git_object: 08cbcffc1c60c11c07d6e8c4724f46394f7d0854 + docs/models/classifierdetailedjoboutstatus.md: + id: a98493f9d02d + last_write_checksum: sha1:3441d9961e9093d314dd1bc88df1743cd12866d2 + pristine_git_object: c3118aafa8614f20c9adf331033e7822b6391752 + docs/models/classifierftmodelout.md: + id: 268ac482c38b + last_write_checksum: sha1:77ff5ad1a9c142de2a43939be9cd3f57038a9bfc + pristine_git_object: dd9e8bf9c0ee291b44cd4f06146dea3d3280c143 + docs/models/classifierftmodeloutmodeltype.md: + id: 40536012f45c + last_write_checksum: sha1:c6fde7ce8542ba6a56a91584aa0d6b1eb99fde6d + pristine_git_object: e1e7e465378c4c0112f08dc140052fad7955995e + docs/models/classifierftmodeloutobject.md: + id: 6aa25d9fe076 + last_write_checksum: sha1:5a5fe345b3a2b3e65ce3171e8d6e9b9493ec7b06 + pristine_git_object: 9fe05bcf42325a390e5c984c7bdf346668944928 + docs/models/classifierjobout.md: + id: 2e3498af3f8c + last_write_checksum: sha1:a9706e8df1a0a569e5e42e7a1494737e391cb55a + pristine_git_object: aa1d3ca910535e283059903a2c39331673c1982b + docs/models/classifierjoboutintegrations.md: + id: 3c4aff0af3fd + last_write_checksum: sha1:b843cb1635940ff74737f92ec1ac5da893a239f2 + pristine_git_object: d938d0b991f71e46096a9b12320c6237265bd811 + docs/models/classifierjoboutjobtype.md: + id: 772280dfaefc + last_write_checksum: sha1:b809726c9edd5a47be7582eb028acbd58014b565 + pristine_git_object: 7f5236fa87ea9bb5fd93873a2d2f9a6a8c4f9456 + docs/models/classifierjoboutobject.md: + id: 04543f046d40 + last_write_checksum: sha1:96863c621ddf0425b818edcd5da32ddbd5fd1194 + pristine_git_object: 1b42d547de7bdfb109c3ff750c6754e15ec4a8c1 + docs/models/classifierjoboutstatus.md: + id: 2411c6bf3297 + last_write_checksum: sha1:6ceef218b783505231a0ec653292460e6cb1a65b + pristine_git_object: 4520f1648323227863f78f7f86b2b4567bb7ace7 + docs/models/classifiertargetin.md: + id: 90d2da204677 + last_write_checksum: sha1:18fca3deee476b3dd23d55a9a40ced96cdc21f83 + pristine_git_object: 78cab67b4ced9fd0139a1dc4e6b687de870f9c62 + docs/models/classifiertargetout.md: + id: 1ce5c0513022 + last_write_checksum: sha1:2b8ed8a25b6ea6f2717cb4edcfa3f6a1ff3e69e4 + pristine_git_object: 57535ae5cb7d30177d1800d3597fe2f6ec3ad024 + docs/models/classifiertrainingparameters.md: + id: 9370e1ccd3d5 + last_write_checksum: sha1:03f7c32717792966afdec50cb9dc1c85bb99dd84 + pristine_git_object: 3b6f3be6942bbcf56261f773864a518d16923880 + docs/models/classifiertrainingparametersin.md: + id: 8bcca130af93 + last_write_checksum: sha1:7e9d61d3377031c740ea98d6c3dc65be99dc059b + pristine_git_object: 1287c973fae9762310597fbeceaef26865ace04f + docs/models/codeinterpretertool.md: + id: f009740c6e54 + last_write_checksum: sha1:bba7c0b8f0979b0c77a31c70621dccb03d6722a5 + pristine_git_object: d5ad789ed012accaa105ced4f8dfd8e9eb83d4a3 + docs/models/codeinterpretertooltype.md: + id: d6d0f83de515 + last_write_checksum: sha1:f41ae23451c22692410340d44bcec36a1f45910b + pristine_git_object: f704b65e2842e36be4d2b96c9334cda4a6b02cde + docs/models/completionargs.md: + id: 3b54534f9830 + last_write_checksum: sha1:c0368b7c21524228939b2093ff1a4524eb57aeb7 + pristine_git_object: 60d091374a80418892df9700dc0c21e7dad28775 + docs/models/completionargsstop.md: + id: 40b0f0c81dc8 + last_write_checksum: sha1:2a576618c62d4818af0048ed3a79080149a88642 + pristine_git_object: b93f993e44a18fb0f3711163277f538cfedbf828 + docs/models/completionchunk.md: + id: 60cb30423c60 + last_write_checksum: sha1:61b976fe2e71236cf7941ee1635decc31bd304b2 + pristine_git_object: 7f8ab5e631e2c6d1d9830325e591a7e434b83a35 + docs/models/completiondetailedjobout.md: + id: 634ca7241abd + last_write_checksum: sha1:b0af22a4e5eb409d6aa2a91c4ee3924d38923f5f + pristine_git_object: 84613080715078a73204d3984e7f97477ef548ae + docs/models/completiondetailedjoboutintegrations.md: + id: ecf47529e409 + last_write_checksum: sha1:5ff41070f932c911a724867a91a0a26c1d62032e + pristine_git_object: af6bbcc5f43176df2dea01a4a1a31f3c616ee3b9 + docs/models/completiondetailedjoboutjobtype.md: + id: cb794f29a3f2 + last_write_checksum: sha1:24533bc2a5bb42b560f02af4d93f008f9e5b7873 + pristine_git_object: fb24db0cc3d9495f01732bdb0e1c3df8a5865540 + docs/models/completiondetailedjoboutobject.md: + id: 8e418065aa1c + last_write_checksum: sha1:d429d772a6a4249809bbf0c26a6547e5f2de3f11 + pristine_git_object: 1bec88e5f4c5f082c53157b8ee95b4b05cb787e3 + docs/models/completiondetailedjoboutrepositories.md: + id: bb83e77df490 + last_write_checksum: sha1:dc2d60c6be1d3385d584ce9629abaaaaa46cf0ef + pristine_git_object: 4f9727c36fac5515d0afbc801904abc3652a5b20 + docs/models/completiondetailedjoboutstatus.md: + id: c606d38452e2 + last_write_checksum: sha1:1e9a5736de32a44cf539f7eaf8214aad72ec4994 + pristine_git_object: b80525bad8f6292892d8aee864a549c8ec52171c + docs/models/completionevent.md: + id: e57cd17cb9dc + last_write_checksum: sha1:4f59c67af0b11c77b80d2b9c7aca36484d2be219 + pristine_git_object: 7a66e8fee2bb0f1c58166177653893bb05b98f1d + docs/models/completionftmodelout.md: + id: 93fed66a5794 + last_write_checksum: sha1:c66aecd2e10f79c84c057eeae1986e975cb40220 + pristine_git_object: cd0858258521ced3990ff393fd00c11ef0abe094 + docs/models/completionftmodeloutobject.md: + id: c6e5667c5f03 + last_write_checksum: sha1:b4cbdc01a2b439d923ad542cf852797c24d234e8 + pristine_git_object: 6f9d858caa563f4a25ae752dd40ba632ecd0af75 + docs/models/completionjobout.md: + id: 77315b024171 + last_write_checksum: sha1:bae2f49bb9064e24f886487e44ce1688993fa949 + pristine_git_object: cb471746c4f23d2ec8451f4c45bf57e2f001072f + docs/models/completionjoboutobject.md: + id: 922a1e3a4e33 + last_write_checksum: sha1:020211def2c4cd969398cf009b187ca19bd7a943 + pristine_git_object: 712b107d79a8c60c4330da4f3af307545bf1a7ec + docs/models/completionresponsestreamchoice.md: + id: d56824d615a6 + last_write_checksum: sha1:0296a490df009dbfd04893fdebcc88dd6102a872 + pristine_git_object: 1532c25b8fc065d486f52d4610a7f757e5340875 + docs/models/completionresponsestreamchoicefinishreason.md: + id: 5f1fbfc90b8e + last_write_checksum: sha1:20824b4a223cbd3658b32440973a7d47dcd108b9 + pristine_git_object: 0fece473297227c75db4e7ded63417a2f117cac0 + docs/models/completiontrainingparameters.md: + id: b716b0195d39 + last_write_checksum: sha1:1d8d7c469f933ea741ec15c8b9ef8b986e0ca95e + pristine_git_object: 4746a95df18c78331f572425a16b2b3dcbc2df4c + docs/models/completiontrainingparametersin.md: + id: 7223a57004ab + last_write_checksum: sha1:8f77e5fe2ce149115b0bda372c57fafa931abd90 + pristine_git_object: 9fcc714e5f000e6134f7f03f1dd4f56956323385 + docs/models/content.md: + id: bfd859c99f86 + last_write_checksum: sha1:6673dbd19871a701955a322348a4f7e51c38ffc8 + pristine_git_object: a833dc2c6043e36b85131c9243b4cc02b9fcc4c6 + docs/models/contentchunk.md: + id: d2d3a32080cd + last_write_checksum: sha1:b253e4b802adb5b66d896bfc6245ac4d21a0c67c + pristine_git_object: cb7e51d3a6e05f197fceff4a4999594f3e340dac + docs/models/conversationappendrequest.md: + id: 722746e5065c + last_write_checksum: sha1:1677ab5b06748a7650464c0d7596e66e6759ede2 + pristine_git_object: 1cdb584b62423072f9a7cdc61f045b0d161525df + docs/models/conversationappendrequesthandoffexecution.md: + id: e3f56d558037 + last_write_checksum: sha1:dc71c8db746bb08f6630e995cf6af9fda747e954 + pristine_git_object: 7418b36a55fab959639aec456a946600eb908efb + docs/models/conversationappendstreamrequest.md: + id: e9f8131435e8 + last_write_checksum: sha1:559d90bbf6d64f46221edaa6482837f0ee3b0626 + pristine_git_object: a8516ea7fc7db1d6bc0abb8f99b967a1715ceb4b + docs/models/conversationappendstreamrequesthandoffexecution.md: + id: 5739ea777905 + last_write_checksum: sha1:c85584b63c0c5d859ee5d46d6ae167a8ee44e279 + pristine_git_object: 1bbced3e61a521401ae93a7b1f73d0e9c061e5fd + docs/models/conversationevents.md: + id: be63cc7c526e + last_write_checksum: sha1:1667c767ef53fd7aef90452fde2a8245ed2b2ae6 + pristine_git_object: f1e2c4e90181ff729d3fdb37b0135e9bbd095c04 + docs/models/conversationeventsdata.md: + id: d4907b066f4b + last_write_checksum: sha1:f58b7f3e738c2d0146b228076a5dc0c6cf84ffb1 + pristine_git_object: 5452d7d5ce2aa59a6d89c7b7363290e91ed8a0a3 + docs/models/conversationhistory.md: + id: 7e97e8e6d6e9 + last_write_checksum: sha1:cc6b40d6e6ff923555e959be5ef50a00c73154a7 + pristine_git_object: ebb1d5136cebf2bc9b77047fe83feecc68532d03 + docs/models/conversationhistoryobject.md: + id: 088f7df6b658 + last_write_checksum: sha1:bcce4ef55e6e556f3c10f65e860faaedc8eb0671 + pristine_git_object: a14e7f9c7a392f0d98e79cff9cc3ea54f30146fa + docs/models/conversationinputs.md: + id: 23e3160b457d + last_write_checksum: sha1:0c6abaa34575ee0eb22f12606de3eab7f4b7fbaf + pristine_git_object: 86db40ea1390e84c10a31155b3cde9066eac23b0 + docs/models/conversationmessages.md: + id: 46684ffdf874 + last_write_checksum: sha1:01ccdc4b509d5f46ff185f686d332587e25fc5b7 + pristine_git_object: c3f00979b748ad83246a3824bb9be462895eafd6 + docs/models/conversationmessagesobject.md: + id: b1833c3c20e4 + last_write_checksum: sha1:bb91a6e2c89066299660375e5e18381d0df5a7ff + pristine_git_object: db3a441bde0d086bccda4814ddfbf737539681a6 + docs/models/conversationrequest.md: + id: dd7f4d6807f2 + last_write_checksum: sha1:4ecca434753494ff0af66952655af92293690702 + pristine_git_object: 04378ae34c754f2ed67a34d14923c7b0d1605d4e + docs/models/conversationresponse.md: + id: 2eccf42d48af + last_write_checksum: sha1:69059d02d5354897d23c9d9654d38a85c7e0afc6 + pristine_git_object: 38cdadd0055d457fa371984eabcba7782e130839 + docs/models/conversationresponseobject.md: + id: 6c028b455297 + last_write_checksum: sha1:76270a07b86b1a973b28106f2a11673d082a385b + pristine_git_object: bea66e5277feca4358dd6447959ca945eff2171a + docs/models/conversationrestartrequest.md: + id: 558e9daa00bd + last_write_checksum: sha1:97c25a370411e1bce144c61272ca8f32066112be + pristine_git_object: f389a1e5c42cf0f73784d5563eaa6d0b29e0d69e + docs/models/conversationrestartrequesthandoffexecution.md: + id: faee86c7832c + last_write_checksum: sha1:44728be55e96193e6f433e2f46f8f749f1671097 + pristine_git_object: 5790624b82ce47ea99e5c25c825fbc25145bfb8e + docs/models/conversationrestartstreamrequest.md: + id: 01b92ab1b56d + last_write_checksum: sha1:90f0ab9aba1919cbc2b9cfc8e5ec9d80f8f3910c + pristine_git_object: d7358dc20b2b60cb287b3c4a1c174a7883871a54 + docs/models/conversationrestartstreamrequesthandoffexecution.md: + id: 3e9c4a9ab94d + last_write_checksum: sha1:300e197f11ad5efc654b51198b75049890258eef + pristine_git_object: 97266b43444f5ed50eeedf574abd99cb201199fd + docs/models/conversationstreamrequest.md: + id: 833f266c4f96 + last_write_checksum: sha1:b7196c9194bc5167d35d09774a3f26bc7d543790 + pristine_git_object: e403db68e7932f60b1343d9282e2c110414486ce + docs/models/conversationstreamrequesthandoffexecution.md: + id: e6701e5f9f0c + last_write_checksum: sha1:ef2ebe8f23f27144e7403f0a522326a7e4f25f50 + pristine_git_object: c98e194c1d204c3a5d4234f0553712a7025d7f85 + docs/models/conversationstreamrequesttools.md: + id: 83ea0526da4e + last_write_checksum: sha1:c445fc14cbb882871a83990943569bdf09a662f9 + pristine_git_object: 700c844876754e85428898f6cabda8fb0dedf114 + docs/models/conversationusageinfo.md: + id: 57ef89d3ab83 + last_write_checksum: sha1:d92408ad37d7261b0f83588e6216871074a50225 + pristine_git_object: 57e260335959c605a0b9b4eaa8bf1f8272f73ae0 + docs/models/data.md: + id: 9a31987caf78 + last_write_checksum: sha1:da040f995f799c04214eff92982dd8d6c057ae93 + pristine_git_object: 95dc8d28aa4669513ae0f255c81aadaf3d793370 + docs/models/deletefileout.md: + id: c7b84242a45c + last_write_checksum: sha1:f2b039ab88fc83ec5dd765cab8e2ed8cce7e417d + pristine_git_object: 4709cc4958d008dc24430deb597f801b91c6957f + docs/models/deletemodelout.md: + id: 5643e76768d5 + last_write_checksum: sha1:1593c64f7673e59b7ef1f4ae9f5f6b556dd6a269 + pristine_git_object: 5fd4df7a7013dcd4f6489ad29cdc664714d32efd + docs/models/deletemodelv1modelsmodeliddeleterequest.md: + id: c838cee0f093 + last_write_checksum: sha1:e5b6d18b4f8ab91630ae34a4f50f01e536e08d99 + pristine_git_object: d9bc15fe393388f7d0c41abce97ead17e35e2ba4 + docs/models/deltamessage.md: + id: 6c5ed6b60968 + last_write_checksum: sha1:c213149256c620715d744c89685d5b6cbdea6f58 + pristine_git_object: 61deabbf7e37388fdd4c1789089d120cc0b937b9 + docs/models/document.md: + id: cd1d2a444370 + last_write_checksum: sha1:d00a2ac808a0ae83a7b97da87e647ecc8dca9c52 + pristine_git_object: 509d43b733d68d462853d9eb52fc913c855dff40 + docs/models/documentlibrarytool.md: + id: 68083b0ef8f3 + last_write_checksum: sha1:5f21be0a248ff4dedc26908b9ee0039d7ac1421c + pristine_git_object: 82315f32b920d32741b2e53bc10e411f74a85602 + docs/models/documentlibrarytooltype.md: + id: 23c5ba5c4b3f + last_write_checksum: sha1:bcb58941aafaca2b8ad6e71425d5f16e881b4f97 + pristine_git_object: ebd420f69a4ace05daa7edd82b9315b2a4354b5f + docs/models/documentout.md: + id: a69fd1f47711 + last_write_checksum: sha1:ed446078e7194a0e44e21ab1af958d6a83597edb + pristine_git_object: 28df11eb1aef1fdaf3c1103b5d61549fb32ea85d + docs/models/documenttextcontent.md: + id: 29587399f346 + last_write_checksum: sha1:93382da0228027a02501abbcf681f247814d3d68 + pristine_git_object: 989f49e9bcb29f4127cb11df683c76993f14eba8 + docs/models/documentupdatein.md: + id: 185ab27259a7 + last_write_checksum: sha1:e0faccd04229204968dbc4e8131ee72f81288182 + pristine_git_object: 0993886d56868aba6844824f0e0fdf1bdb9d74f6 + docs/models/documenturlchunk.md: + id: 48437d297408 + last_write_checksum: sha1:38c3e2ad5353a4632bd827f00419c5d8eb2def54 + pristine_git_object: 6c9a5b4d9e6769be242b27ef0208f6af704689c0 + docs/models/documenturlchunktype.md: + id: a3574c91f539 + last_write_checksum: sha1:a0134fc0ea822d55b1204ee71140f2aa9d8dbe9c + pristine_git_object: 32e1fa9e975a3633fb49057b38b0ea0206b2d8ef + docs/models/embeddingdtype.md: + id: 22786e732e28 + last_write_checksum: sha1:dbd16968cdecf706c890769d8d1557298f41ef71 + pristine_git_object: 01656b0a85aa87f19909b18100bb6981f89683fc + docs/models/embeddingrequest.md: + id: bebee24421b4 + last_write_checksum: sha1:8e2bfa35f55b55f83fa2ebf7bee28cd00cb681d1 + pristine_git_object: 7269c0551a0c1040693eafdd99e1b8ebe98478a5 + docs/models/embeddingrequestinputs.md: + id: 6a35f3b1910a + last_write_checksum: sha1:e12ca056fac504e5af06a304d09154d3ecd17919 + pristine_git_object: 527a089b38b5cd316173ced4dc74a1429c8e4406 + docs/models/embeddingresponse.md: + id: 31cd0f6b7bb5 + last_write_checksum: sha1:1d7351c68b075aba8e91e53d29bdab3c6dd5c3a2 + pristine_git_object: 2bd85b4d245978ec396da067060cfe892f19c64f + docs/models/embeddingresponsedata.md: + id: 89b078acdc42 + last_write_checksum: sha1:e3e9200948f864382e0ecd3e04240b13d013141a + pristine_git_object: 20b50618ac99c63f7cf57fe4377840bfc1f85823 + docs/models/encodingformat.md: + id: 066e154e4d43 + last_write_checksum: sha1:8d6c4b29dea5ff7b0ae2b586951308fad99c60eb + pristine_git_object: 7d5941cfe6cea2e85b20d6fb0031e9b807bac471 + docs/models/entitytype.md: + id: 130a2f7038b0 + last_write_checksum: sha1:01c3c10e737bcd58be70b437f7ee74632972a983 + pristine_git_object: 7c040b382d4c1b6bc63f582566d938be75a5f954 + docs/models/entries.md: + id: 93dc7a28346c + last_write_checksum: sha1:c6c61c922df17562e9ca5d8d2d325579db5c88bc + pristine_git_object: 8e5a20d052c47008b8a399b7fb740bece3b35386 + docs/models/eventout.md: + id: 9960732c3718 + last_write_checksum: sha1:dbc23814b2e54ded4aa014d63510b3a2a3259329 + pristine_git_object: d9202353be984d51b9c05fb0f490053ce6ccfe4a + docs/models/file.md: + id: 4ad31355bd1c + last_write_checksum: sha1:ade4d3c908c664a07a3c333cc24bc1bfb43ab88b + pristine_git_object: 37cc418f9e5189c18f312c42060fd702e2963765 + docs/models/filechunk.md: + id: edc076728e9d + last_write_checksum: sha1:07ab5db503211adba2fa099e66d12ac3c4bbf680 + pristine_git_object: 18217114060ac4e4b45fefabace4628684f27e5c + docs/models/filepurpose.md: + id: ed6216584490 + last_write_checksum: sha1:02767595f85228f7bfcf359f8384b8263580d53a + pristine_git_object: 14cab13ee191ae60e2c5e1e336d0a5abc13f778b + docs/models/filesapiroutesdeletefilerequest.md: + id: 7fdf9a97320b + last_write_checksum: sha1:411e38d0e08a499049796d1557f79d669fc65107 + pristine_git_object: 1b02c2dbb7b3ced86ddb49c2323d1d88732b480c + docs/models/filesapiroutesdownloadfilerequest.md: + id: b9c13bb26345 + last_write_checksum: sha1:1f41dad5ba9bd63881de04d24ef49a0650d30421 + pristine_git_object: 8b28cb0e5c60ac9676656624eb3c2c6fdc8a3e88 + docs/models/filesapiroutesgetsignedurlrequest.md: + id: 08f3772db370 + last_write_checksum: sha1:26aa0140444ccef7307ef6f236932032e4784e8f + pristine_git_object: dbe3c801003c7bb8616f0c5be2dac2ab1e7e9fb1 + docs/models/filesapirouteslistfilesrequest.md: + id: 04bdf7c654bd + last_write_checksum: sha1:258317fd5c0738cff883f31e13393ac64f817a6f + pristine_git_object: 3801a96e19f149a665bde4890e26df54d7f07d77 + docs/models/filesapiroutesretrievefilerequest.md: + id: 2783bfd9c4b9 + last_write_checksum: sha1:a1249ef0aedb3056e613078488832c96b91f8cab + pristine_git_object: 961bae1f51a4ae9df21b28fd7a5ca91dc7b3888b + docs/models/filesapiroutesuploadfilemultipartbodyparams.md: + id: 558bf53c7b65 + last_write_checksum: sha1:de3f26e8bd89aae0e2c2078b9e1f7f47adccafbd + pristine_git_object: a5dd1174ab987e511d70a0f8fdaefbeaeda18c43 + docs/models/fileschema.md: + id: 9a05a660399d + last_write_checksum: sha1:97987d64285ff3092635754c78ad7b68d863e197 + pristine_git_object: 4f3e72dba17a964155007755ad9d69f0304b2adb + docs/models/filesignedurl.md: + id: c0a57176d62e + last_write_checksum: sha1:2c64ef5abc75e617496f0a28d3e1cebfe269a6b9 + pristine_git_object: 52ce3f4f0c44df0ef3ed1918f92ad63f76ffc144 + docs/models/fimcompletionrequest.md: + id: b44677ecc293 + last_write_checksum: sha1:24bcb54d39b3fabd487549a27b4c0a65dd5ffe50 + pristine_git_object: fde0b625c29340e8dce1eb3026ce644b1885e53a + docs/models/fimcompletionrequeststop.md: + id: ea5475297a83 + last_write_checksum: sha1:a6cdb4bda01ac58016a71f35da48a5d10df11623 + pristine_git_object: a0dbb00a82a03acc8b62b81d7597722a6ca46118 + docs/models/fimcompletionresponse.md: + id: 050d62ba2fac + last_write_checksum: sha1:a6101a69e83b7a5bcf96ec77ba1cab8748f734f4 + pristine_git_object: cd62d0349503fd8b13582d0ba47ab9cff40f6b28 + docs/models/fimcompletionstreamrequest.md: + id: c881d7e27637 + last_write_checksum: sha1:f8755bc554dd44568c42eb5b6dde04db464647ab + pristine_git_object: ba62d854f030390418597cbd8febae0e1ce27ea8 + docs/models/fimcompletionstreamrequeststop.md: + id: c97a11b764e9 + last_write_checksum: sha1:958d5087050fdeb128745884ebcf565b4fdc3886 + pristine_git_object: 5a9e2ff020d4939f7fd42c0673ea7bdd16cca99d + docs/models/finetuneablemodeltype.md: + id: e16926b57814 + last_write_checksum: sha1:52006811b756ff5af865ed6f74838d0903f0ee52 + pristine_git_object: 34b24bd4db1ad3f9e77e2c6a45a41d2fbc5cf7fd + docs/models/finishreason.md: + id: 73315c2a39b3 + last_write_checksum: sha1:dc258e82af5babd6efabadb20cd6e2f9663dbb64 + pristine_git_object: 2af53f6e55b74455a696c17ab00ba626a1c3711f + docs/models/format_.md: + id: a17c22228eda + last_write_checksum: sha1:dad6de59fec6378d50356007602e2a0254d8d2e4 + pristine_git_object: 97d286a4ed7cff0a4058bbfa06c4573428182876 + docs/models/ftclassifierlossfunction.md: + id: b546cfde5aa6 + last_write_checksum: sha1:752d9d238a90a3ef55205576fa38cee56ea1539e + pristine_git_object: 919cdd384315c99d4b590bc562298403733344ce + docs/models/ftmodelcapabilitiesout.md: + id: f7be0dd1d889 + last_write_checksum: sha1:670412a0c0268f646dd444537bd79ce9440170c8 + pristine_git_object: 19690476c64ac7be53f974347c1618730f0013ce + docs/models/ftmodelcard.md: + id: 15ed6f94deea + last_write_checksum: sha1:2dccc70020274152bb8a76f0f7699694f8683652 + pristine_git_object: 35032775db8ae6f4c6fbac309edacd27ee7868af + docs/models/ftmodelcardtype.md: + id: e2ba85c02d1c + last_write_checksum: sha1:f6a718013be6a8cb340f58f1ff7b919217594622 + pristine_git_object: 0b38470b9222df6c51baef2e7e9e10c0156a2e05 + docs/models/function.md: + id: 416a80fba031 + last_write_checksum: sha1:a9485076d430a7753558461ce87bf42d09e34511 + pristine_git_object: b2bdb3fe82520ea79d0cf1a10ee41c844f90b859 + docs/models/functioncall.md: + id: a78cd1d7f605 + last_write_checksum: sha1:65bf78744b8531cdefb6a288f1af5cbf9d9e2395 + pristine_git_object: 7ccd90dca4868db9b6e178712f95d375210013c8 + docs/models/functioncallentry.md: + id: 016986b7d6b0 + last_write_checksum: sha1:bd3e67aea9eb4f70064e67e00385966d44f73f24 + pristine_git_object: fd3aa5c575019d08db258842262e8814e57dc6d5 + docs/models/functioncallentryarguments.md: + id: c4c609e52680 + last_write_checksum: sha1:ae88aa697e33d60f351a30052aa3d6e2a8a3e188 + pristine_git_object: f1f6e39e724673556a57059a4dbda24f31a4d4b9 + docs/models/functioncallentryobject.md: + id: ea634770754e + last_write_checksum: sha1:d6bc885e9689397d4801b76c1a3c8751a75cf212 + pristine_git_object: 3cf2e427bfb6f2bc7acea1e0c6aafe965187f63f + docs/models/functioncallentrytype.md: + id: b99da15c307b + last_write_checksum: sha1:04665a6718ad5990b3beda7316d55120fbe471b0 + pristine_git_object: 7ea34c5206bdf205d74d8d49c87ddee5607582e9 + docs/models/functioncallevent.md: + id: cc9f2e603464 + last_write_checksum: sha1:c3a6a7ce8af38d7ba7a2ece48c352eed95edc578 + pristine_git_object: c25679a5d89745c1e186cdeb72fda490b2f45af2 + docs/models/functioncalleventtype.md: + id: 1aab7a86c5d6 + last_write_checksum: sha1:61d480f424df9a74a615be673cae4dcaf7875d81 + pristine_git_object: 8cf3f03866d72ac710015eec57d6b9caa079022e + docs/models/functionname.md: + id: 4b3bd62c0f26 + last_write_checksum: sha1:754fe32bdffe53c1057b302702f5516f4e551cfb + pristine_git_object: 87d7b4852de629015166605b273deb9341202dc0 + docs/models/functionresultentry.md: + id: 24d4cb18998c + last_write_checksum: sha1:528cae03e09e43bdf13e1a3fef64fd9ed334319b + pristine_git_object: 6df54d3d15e6d4a03e9af47335829f01a2226108 + docs/models/functionresultentryobject.md: + id: 025dc546525c + last_write_checksum: sha1:01a0085fb99253582383dd3b12a14d19c803c33c + pristine_git_object: fe52e0a5a848ea09dfb4913dd8d2e9f988f29de7 + docs/models/functionresultentrytype.md: + id: 69651967bdee + last_write_checksum: sha1:41489b0f727a00d86b313b8aefec85b4c30c7602 + pristine_git_object: 35c94d8e553e1cb641bef28fec2d8b3576d142f6 + docs/models/functiontool.md: + id: 5fb499088cdf + last_write_checksum: sha1:f616c6de97a6e0d622b16b99f95c2c5a94661789 + pristine_git_object: 8c42459304100777cf85416a5c3a984bc0e7a7ca + docs/models/functiontooltype.md: + id: bc0bcbe69ad9 + last_write_checksum: sha1:c0fae17a8e5a9b7240ff16af7eef9fb4782fe983 + pristine_git_object: 9c095625b60f1e2e0fd09b08e3ba315545d6a036 + docs/models/githubrepositoryin.md: + id: b42209ef8423 + last_write_checksum: sha1:fece86cdee3ba3a5719244a953193ed2f7b982f7 + pristine_git_object: 1584152ba934756793d5228d5691c07d3256c7b8 + docs/models/githubrepositoryintype.md: + id: e2f2ca622221 + last_write_checksum: sha1:349dc9c6e4db5ec5394c8649c3b872db3545c182 + pristine_git_object: 63da967cb7a75ec328f9b9fbd1062e43f2cabc07 + docs/models/githubrepositoryout.md: + id: 0ca86e122722 + last_write_checksum: sha1:f6ffda992af75d3f95751106db1b0f0c82a2eca7 + pristine_git_object: 03f0b2661e46b48489ede1208d9c38c4324b2b35 + docs/models/githubrepositoryouttype.md: + id: f3ab58fa1b0e + last_write_checksum: sha1:8f26cd692f499279b9c4182010d56c75374ed9ec + pristine_git_object: 46c3eefd1d67ea6968a3c7025e6dc27e8f0f1ac5 + docs/models/handoffexecution.md: + id: d0b2e094fa39 + last_write_checksum: sha1:1d8fafc8105b6c15e50620353c0457b629951804 + pristine_git_object: 61e7dade49090096a49d99b5c8291f629fd43c4e + docs/models/httpvalidationerror.md: + id: a211c095f2ac + last_write_checksum: sha1:277a46811144643262651853dc6176d21b33573e + pristine_git_object: 712a148c3e2305dca4c702851865f9f8c8e674cc + docs/models/hyperparameters.md: + id: c167bad5b302 + last_write_checksum: sha1:5b7f76360dea58be5350bbe074482da45e57599c + pristine_git_object: 46a6dd6baa1b1574bad5eadc1e83d4b72d56c0c8 + docs/models/imagegenerationtool.md: + id: d5deb6b06d28 + last_write_checksum: sha1:8596d0119712e68b1deafd18860ed6ed452a31fa + pristine_git_object: b8fc9cf40c8cb010231837ffe3d66cb3762dd666 + docs/models/imagegenerationtooltype.md: + id: fc670aabaff7 + last_write_checksum: sha1:234109f99f467905e6e7b74036e2c395090840e4 + pristine_git_object: 29681b58e1afe945faa76f9dd424deb01cdfb1bd + docs/models/imageurl.md: + id: e75dd23cec1d + last_write_checksum: sha1:30131c77dd240c3bae48d9693698358e5cc0ae63 + pristine_git_object: 7c2bcbc36e99c3cf467d213d6a6a59d6300433d8 + docs/models/imageurlchunk.md: + id: 4407097bfff3 + last_write_checksum: sha1:7a478fd638234ece78770c7fc5e8d0adaf1c3727 + pristine_git_object: f1b926ef8e82443aa1446b1c64c2f02e33d7c789 + docs/models/imageurlchunkimageurl.md: + id: c7fae88454ce + last_write_checksum: sha1:5eff71b7a8be7baacb9ba8ca0be0a0f7a391a325 + pristine_git_object: 767389082d25f06e617fec2ef0134dd9fb2d4064 + docs/models/imageurlchunktype.md: + id: b9af2db9ff60 + last_write_checksum: sha1:990546f94648a09faf9d3ae55d7f6ee66de13e85 + pristine_git_object: 2064a0b405870313bd4b802a3b1988418ce8439e + docs/models/inputentries.md: + id: a5c647d5ad90 + last_write_checksum: sha1:4231bb97837bdcff4515ae1b00ff5e7712256e53 + pristine_git_object: b44a467d258cfa8cc3d2a3236330471dbc3af109 + docs/models/inputs.md: + id: 4b0a7fb87af8 + last_write_checksum: sha1:19d8da9624030a47a3285276c5893a0fc7609435 + pristine_git_object: 0f62a7ce8e965d0879507e98f808b9eb254282a6 + docs/models/instructrequest.md: + id: a0034d7349a2 + last_write_checksum: sha1:91c446be8428efd44163ed8366a37c376554211a + pristine_git_object: 9500cb588b5d27d934b04cc5fa0be26a270f6d82 + docs/models/instructrequestinputs.md: + id: 2a677880e32a + last_write_checksum: sha1:1b989ef7ef4c84f59c83af11b3243d934c85e348 + pristine_git_object: 4caa028f85be2324966e61321c917cbd0c65de01 + docs/models/instructrequestinputsmessages.md: + id: c0cb1f866e69 + last_write_checksum: sha1:558f78fafbd44c5ea7030491a39d0c7ccd994d01 + pristine_git_object: 237e131f1b1161c8b90df11d49739f5bfe9ee829 + docs/models/instructrequestmessages.md: + id: 639538e7d70d + last_write_checksum: sha1:8c26b3b97f095e5c525b0e3c18d45aded9bd03a2 + pristine_git_object: 9c866a7db86b40e997cb3f06d68e67eb033f3360 + docs/models/integrations.md: + id: f9eb2b4df2f8 + last_write_checksum: sha1:e0b12cf5661d4e6332da28913c5394e5a85071bf + pristine_git_object: 35214d63ef2b902aa39bfdd2fd6dc5f319cc203b + docs/models/jobin.md: + id: 1b7b37214fa8 + last_write_checksum: sha1:6dadb7d78e2dc04966bd041ddb54428108098f76 + pristine_git_object: b96517705cea7b9efd266f146080ad1aed3cc8cb + docs/models/jobinintegrations.md: + id: 5f293420eced + last_write_checksum: sha1:288931c5427e1a435b1396e131e95a43cbcbc2b9 + pristine_git_object: 91c102426d05b4f88ca5a661f53f1acf316b5b88 + docs/models/jobinrepositories.md: + id: 5c94c2d28ce8 + last_write_checksum: sha1:e7fbe667fa5703dedd78672d936f1b02caf301b5 + pristine_git_object: b94477af4c51c7939fd6dcdb75cbc56459d4a30a + docs/models/jobmetadataout.md: + id: 30eb634fe247 + last_write_checksum: sha1:46d54b6f6004a6e571afd5207db5170dfbce7081 + pristine_git_object: 6218a161b71abbb35eb4ca6e3ce664226983efc2 + docs/models/jobsapiroutesbatchcancelbatchjobrequest.md: + id: 798cb1ca1385 + last_write_checksum: sha1:67e8bda117608aee0e09a702a1ef8a4b03c40b68 + pristine_git_object: c19d0241784ff69bc68a11f405437400057d6f62 + docs/models/jobsapiroutesbatchgetbatchjobrequest.md: + id: e83a7ec84f8a + last_write_checksum: sha1:d661875832b4b9d5f545262216c9fcb9a77c8cd0 + pristine_git_object: 8c259bea9bef11f779fd609f1212565d574457e2 + docs/models/jobsapiroutesbatchgetbatchjobsrequest.md: + id: 5b9c44ad4d31 + last_write_checksum: sha1:8e28b08c86355b097836e55559fda85487000092 + pristine_git_object: b062b8731ca7c99af968be2e65cca6aa5f122b37 + docs/models/jobsapiroutesfinetuningarchivefinetunedmodelrequest.md: + id: 8eb8c127091e + last_write_checksum: sha1:2b93a6bed5743461bb03c8337fb25dfc5a15522e + pristine_git_object: f9700df50b8f512c4139c1830aba18989d022b8e + docs/models/jobsapiroutesfinetuningcancelfinetuningjobrequest.md: + id: deff83b39b78 + last_write_checksum: sha1:dac8d8f2e95aed2db9b46711e6e80816881d5d14 + pristine_git_object: 883cbac685563d2e0959b63638f6b967ebdf1ee9 + docs/models/jobsapiroutesfinetuningcancelfinetuningjobresponse.md: + id: c45757ba1ed9 + last_write_checksum: sha1:52d4f945aff24c03627111d0e7c73cbbba60129f + pristine_git_object: 1b331662b17cd24c22e88b01bf00d042cb658516 + docs/models/jobsapiroutesfinetuningcreatefinetuningjobresponse.md: + id: 8aa8030f26d7 + last_write_checksum: sha1:ebc6ac03e99d69fed1bae6cb4e858e0aecf2dd88 + pristine_git_object: eeddc3cdfdd975cdb69fbfcd306e9445010eb82f + docs/models/jobsapiroutesfinetuninggetfinetuningjobrequest.md: + id: a9b75762e534 + last_write_checksum: sha1:8f1395447928e089c88dce8c0ced1030ec5f0eba + pristine_git_object: fde19800303a901149bf39c5330ef8c4da87df62 + docs/models/jobsapiroutesfinetuninggetfinetuningjobresponse.md: + id: c0b31f4fc621 + last_write_checksum: sha1:6f70f5cabb62e2df7c1e4086f7a8b100143cc2aa + pristine_git_object: e0d2e3610ce460d834c2d07d9a34b09f8257217b + docs/models/jobsapiroutesfinetuninggetfinetuningjobsrequest.md: + id: 52078f097503 + last_write_checksum: sha1:5d8fe21d292264209508ae484a7e88d33bff373f + pristine_git_object: 3dca3cd85245e0956b557fc5d6ae6c5e265df38d + docs/models/jobsapiroutesfinetuningstartfinetuningjobrequest.md: + id: b4e2b814d8c3 + last_write_checksum: sha1:f13b5c8f2e74cc73b58a30d366032c764603f95e + pristine_git_object: 4429fe480ab9486de98940a119ac63f40045313b + docs/models/jobsapiroutesfinetuningstartfinetuningjobresponse.md: + id: cfd848845787 + last_write_checksum: sha1:b3a64f467ab1c16427ef77d3acb0749ab155e213 + pristine_git_object: 64f4cca608f8e505f9eeaac623955200dd5b9553 + docs/models/jobsapiroutesfinetuningunarchivefinetunedmodelrequest.md: + id: 75b5dd1bcbaa + last_write_checksum: sha1:dd30e7ff8748d26497458f3398c0547113dc058f + pristine_git_object: 95c1734daa7164bedeeb1fa58dd792939f25bc17 + docs/models/jobsapiroutesfinetuningupdatefinetunedmodelrequest.md: + id: 60bd2e28993a + last_write_checksum: sha1:7ff770c3d0148a4818957b279875bbe5b1ecfc62 + pristine_git_object: 6d93832e68739e465de7c61993b8bcfa1468bafc + docs/models/jobsapiroutesfinetuningupdatefinetunedmodelresponse.md: + id: c265a30fd4cf + last_write_checksum: sha1:e1a739e755b4e573f592743cd34116da97a67450 + pristine_git_object: 54f4c3981978e1ac4bdf42d5b746b73a62d13162 + docs/models/jobsout.md: + id: cbe31f43047d + last_write_checksum: sha1:73e1ce0ff11741c22dc00d768055ad603034147c + pristine_git_object: 977013f7a679dd89fb48c4a95b266a9ea5f3f7cf + docs/models/jobsoutdata.md: + id: 809574cac86a + last_write_checksum: sha1:06455044d314c4edbd1ce4833d551c10918f0a3e + pristine_git_object: 28cec31117416b79eb8688d84b47b157974574cc + docs/models/jobsoutobject.md: + id: 1c99619e2435 + last_write_checksum: sha1:cffbcfb8673e12feb8e22fd397bf68c8745c76bb + pristine_git_object: f6c8a2c3079003a885ee9bdfc73cf7c7c7d8eded + docs/models/jobtype.md: + id: 86685dbc7863 + last_write_checksum: sha1:da927d34a69b0b2569314cc7a62733ee1ab85186 + pristine_git_object: 847c662259537ed54cc108e8de8d8eb93defbe58 + docs/models/jsonschema.md: + id: a6b15ed6fac8 + last_write_checksum: sha1:523465666ad3c292252b3fe60f345c7ffb29053f + pristine_git_object: 7ff7c070353c58290416aff5b01d1dfc43905269 + docs/models/legacyjobmetadataout.md: + id: b3b8c262f61a + last_write_checksum: sha1:bc611bf233bd5b224b1367c6b800de6c3b589b38 + pristine_git_object: 53a45485b70017e729709359407d6c9f3e0fbe35 + docs/models/legacyjobmetadataoutobject.md: + id: 5bafaafb6137 + last_write_checksum: sha1:30e5942a6d0c9fde35d29cd9d87a4304b0e4fa26 + pristine_git_object: 9873ada894f79647c05e386521c6b4208d740524 + docs/models/librariesdeletev1request.md: + id: c0c3b2e1aabc + last_write_checksum: sha1:bef84f8851b06d2d914b605f11109de1850d0294 + pristine_git_object: 68d7e54369ce75422bf8b0ff16cada1c0ae2b05c + docs/models/librariesdocumentsdeletev1request.md: + id: 9d557bd7d1cc + last_write_checksum: sha1:1b580b657559356886915ee5579b90a03db19337 + pristine_git_object: efccdb1bbc36cf644ed2d1716cbd202e6d6bf6c5 + docs/models/librariesdocumentsgetextractedtextsignedurlv1request.md: + id: 27ad38ce4cb1 + last_write_checksum: sha1:b35ad610330232b395b5f87cc15f6ae270de6816 + pristine_git_object: 14ca66f72693f1df05eb93e0cca45f440b62d282 + docs/models/librariesdocumentsgetsignedurlv1request.md: + id: 4498715b6cfb + last_write_checksum: sha1:31f78079e31e070d080c99555cd2d85318fc4610 + pristine_git_object: 7c08c180d59a8e8475fea89424b8b2021d51385f + docs/models/librariesdocumentsgetstatusv1request.md: + id: c2219d3a3738 + last_write_checksum: sha1:44e79df94cf2686e83d7a2e793140a6a7b3a1c05 + pristine_git_object: e6d41875966348fd9e770d06c8099e48f0e64b5d + docs/models/librariesdocumentsgettextcontentv1request.md: + id: 850dfa465952 + last_write_checksum: sha1:4a1212e111525f4265d2924ce52f9c13d2787d4d + pristine_git_object: 2f58a4460ccdad531391318c62191e76c1ec22ac + docs/models/librariesdocumentsgetv1request.md: + id: cdd0df2f7e9d + last_write_checksum: sha1:36e5ef39552159044ecd28d20ee0792ea5bcadef + pristine_git_object: 6febc058425bb38857c391ee4c40d600858e6058 + docs/models/librariesdocumentslistv1request.md: + id: 7b5756e50d64 + last_write_checksum: sha1:2605b7972a3d7b4f73ab8052be4bf740f44f6f6f + pristine_git_object: 44f6300115853053214639982516a60b3268e778 + docs/models/librariesdocumentsreprocessv1request.md: + id: 1b8bf57b3f0a + last_write_checksum: sha1:8528785c1b4ae18d6ec6f261d29d5daac0d420a3 + pristine_git_object: 196ba17b749ce9efc1c30189864e474896814f85 + docs/models/librariesdocumentsupdatev1request.md: + id: b9147b1c0e38 + last_write_checksum: sha1:45b2cc114886b300e3b996a8b71241ac5c7260a3 + pristine_git_object: 2f18b014af4577a0ae862dfeea599d5f700005cb + docs/models/librariesdocumentsuploadv1documentupload.md: + id: c76458963b1c + last_write_checksum: sha1:6973cb619a8e50bb12e96cffdc6b57fcf7add000 + pristine_git_object: a0ba95da33a248fd639ca1af5f443fd043dae0ea + docs/models/librariesdocumentsuploadv1request.md: + id: 89a89d889c72 + last_write_checksum: sha1:4f67f0bc5b2accb6dcf31ce7be0e9447ab4da811 + pristine_git_object: 7c91ca9b92839be8ab1efb4428cc8d7a78d57e1e + docs/models/librariesgetv1request.md: + id: f47ad71ec7ca + last_write_checksum: sha1:3b2bf1e4f6069d0c954e1ebf95b575a32c4adeac + pristine_git_object: 6e1e04c39c15a85d96710f8d3a8ed11a22412816 + docs/models/librariessharecreatev1request.md: + id: 99e7bb8f7fed + last_write_checksum: sha1:e40d710ad1023768a0574b3283ef35544f6b0088 + pristine_git_object: 4c05241de4ee5a76df335ae9ea71004bd02b8669 + docs/models/librariessharedeletev1request.md: + id: bc8adba83f39 + last_write_checksum: sha1:79fc5a9a3cee5b060f29edd95f00e0fea32579cf + pristine_git_object: 850e22ab79863ba544f453138322c0eb5bf544cd + docs/models/librariessharelistv1request.md: + id: 86e6f08565e2 + last_write_checksum: sha1:6f2ffff66fa5fb141d930bca7bb56e978d62b4a5 + pristine_git_object: 98bf6d17ab013c1dd3f0ab18c37bbfc1a63f1b76 + docs/models/librariesupdatev1request.md: + id: f7e51b528406 + last_write_checksum: sha1:cec4aa232c78ca2bd862aee3d5fb3bcc2ad9dc05 + pristine_git_object: a68ef7a8f52ee4a606cb88d0a3f96de8c2fbccb8 + docs/models/libraryin.md: + id: a08170e6397c + last_write_checksum: sha1:2c996ecf1ae5d9e8df702a79741b72b3571eb6ef + pristine_git_object: d6b119148725627bcf76594c4a24e915399cd8f8 + docs/models/libraryinupdate.md: + id: 6d06b6b21498 + last_write_checksum: sha1:4ec01d7f7e24f58a74613d4847725bfd516b7d7f + pristine_git_object: 4aa169c7669c00fcedc423fbff6f386697360787 + docs/models/libraryout.md: + id: 2e8b6d91ded2 + last_write_checksum: sha1:d71053b44725147265871be445217e3e1a0e5ede + pristine_git_object: ebf46d57de6bad7022a3e8cb8eaf88728bbbe888 + docs/models/listdocumentout.md: + id: 4bec19e96c34 + last_write_checksum: sha1:c0b3a6e3841f120c52b1d7718d7226a52fe1b6d6 + pristine_git_object: f14157b8db55c1201d9f7151742e9ddf0d191c16 + docs/models/listfilesout.md: + id: 98d4c59cc07e + last_write_checksum: sha1:e76df31628984095f1123005009ddc4b59b1c2bc + pristine_git_object: bcb1f13aa17f41dadb6af37541e929364e2d6cec + docs/models/listlibraryout.md: + id: ea34f8548bd6 + last_write_checksum: sha1:cec920357bc48bea286c05d16c480a9a9369b459 + pristine_git_object: db76ffa10eb97f143ad4a6930e520e389fe18153 + docs/models/listsharingout.md: + id: a3249129f37e + last_write_checksum: sha1:4831e4f02e1d5e86f138c7bb6b04d095aa4df30f + pristine_git_object: bcac4834f3bd008868435189f40bbf9e368da0d2 + docs/models/loc.md: + id: b071d5a509cc + last_write_checksum: sha1:09a04749333ab50ae806c3ac6adcaa90d54df0f1 + pristine_git_object: d6094ac2c6e0326c039dad2f6b89158694ef6aa7 + docs/models/messageentries.md: + id: 9af3a27b862b + last_write_checksum: sha1:a3eb6e37b780644313738f84e6c5ac653b4686bc + pristine_git_object: 76256fb913376a15d5bcd2531b18f1a78b980c9d + docs/models/messageinputcontentchunks.md: + id: 34aac9c271db + last_write_checksum: sha1:641cd1dba3721f85b049c5ee514879f067483949 + pristine_git_object: 4fd18a0dcb4f6af4a9c3956116f8958dc2fa78d1 + docs/models/messageinputentry.md: + id: eb74af2b9341 + last_write_checksum: sha1:a65737ba7d9592ff91b42689c5c98fca8060d868 + pristine_git_object: d55eb8769c3963518fcbc910d2e1398b6f46fd87 + docs/models/messageinputentrycontent.md: + id: 7e12c6be6913 + last_write_checksum: sha1:6be8be0ebea2b93712ff6273c776ed3c6bc40f9a + pristine_git_object: 65e55d97606cf6f3119b7b297074587e88d3d01e + docs/models/messageinputentryrole.md: + id: 2497d07a793d + last_write_checksum: sha1:a41eb58f853f25489d8c00f7a9595f443dcca2e6 + pristine_git_object: f2fdc71d8bc818b18209cd1834d4fead4dfd3ba6 + docs/models/messageinputentrytype.md: + id: 5d2a466dad0f + last_write_checksum: sha1:19f689ffdd647f3ddc747daf6cb0b4e811dfdcee + pristine_git_object: d3378124db83c92174e28fe36907263e2cbe6938 + docs/models/messageoutputcontentchunks.md: + id: 802048198dc0 + last_write_checksum: sha1:d70a638af21ee46126aa0434bf2d66c8dd8e43ff + pristine_git_object: d9c3d50e295b50618f106ef5f6b40929a28164df + docs/models/messageoutputentry.md: + id: f969119c8134 + last_write_checksum: sha1:cf5032929394584a31b3f12f55dfce6f665f71c7 + pristine_git_object: 5b42e20d1b03263f3d4d9f5cefe6c8d49c984e01 + docs/models/messageoutputentrycontent.md: + id: 44019e6e5698 + last_write_checksum: sha1:d0cc7a8ebe649614c8763aaadbf03624bb9e47e3 + pristine_git_object: 5206e4eb0d95e10b46c91f9f26ae00407d2dd337 + docs/models/messageoutputentryobject.md: + id: b3a7567581df + last_write_checksum: sha1:46528a6f87408c6113d689f2243eddf84bcbc55f + pristine_git_object: bb254c82737007516398287ff7878406866dceeb + docs/models/messageoutputentryrole.md: + id: bf7aafcdddab + last_write_checksum: sha1:e28643b6183866b2759401f7ebf849d4848abb10 + pristine_git_object: 783ee0aae4625f7b6e2ca701ac8fcdddcfe0e412 + docs/models/messageoutputentrytype.md: + id: 960cecf5fde3 + last_write_checksum: sha1:b6e52e971b6eb69582162a7d96979cacff6f5a9c + pristine_git_object: cb4a7a1b15d44a465dbfbd7fe319b8dbc0b62406 + docs/models/messageoutputevent.md: + id: b690693fa806 + last_write_checksum: sha1:8a87ff6b624d133bcea36729fb1b1a1a88b3eaf0 + pristine_git_object: 92c1c61587e34f6e143263e35c33acc9332870d6 + docs/models/messageoutputeventcontent.md: + id: cecea075d823 + last_write_checksum: sha1:16dac25382642cf2614e24cb8dcef6538be34914 + pristine_git_object: 16d8d52f6ff9f43798a94e96c5219314731ab5fb + docs/models/messageoutputeventrole.md: + id: 87d07815e9be + last_write_checksum: sha1:a6db79edc1bf2d7d0f4762653c8d7860cb86e300 + pristine_git_object: e38c6472e577e0f1686e22dc61d589fdb2928434 + docs/models/messageoutputeventtype.md: + id: 13c082072934 + last_write_checksum: sha1:03c07b7a6046e138b9b7c02084727785f05a5a67 + pristine_git_object: 1f43fdcce5a8cfe4d781b4a6faa4a265975ae817 + docs/models/messages.md: + id: 2103cd675c2f + last_write_checksum: sha1:f6940c9c67b98c49ae2bc2764f6c14178321f244 + pristine_git_object: 1d394500e8ffdd140457575568fc2ce465a1cc3a + docs/models/metricout.md: + id: 7c6ff0ad95f9 + last_write_checksum: sha1:eef34dc522a351e23d7371c00a07662a0711ea73 + pristine_git_object: 3c552bac2fa3a5a3783db994d47d255a94643110 + docs/models/mistralpromptmode.md: + id: d17d5db4d3b6 + last_write_checksum: sha1:5ccd31d3804f70b6abb0e5a00bda57b9102225e3 + pristine_git_object: 7416e2037c507d19ac02aed914da1208a2fed0a1 + docs/models/modelcapabilities.md: + id: 283fbc5fa32f + last_write_checksum: sha1:69312b751771ae8ffa0d1452e3c6c545fdbf52b7 + pristine_git_object: 646c8e94fd208cbf01df19ad6c9707ad235bc59b + docs/models/modelconversation.md: + id: 497521ee9bd6 + last_write_checksum: sha1:bd11f51f1b6fedbf8a1e1973889d1961086c164f + pristine_git_object: 1a03ef7d1dd9e1d6b51f0f9391c46feb5cd822a8 + docs/models/modelconversationobject.md: + id: 4c5699d157a9 + last_write_checksum: sha1:8e2e82e1fa4cb97f8c7a8a129b3cc9cd651e4055 + pristine_git_object: ead1fa26f5d9641a198a14b43a0f5689456e5821 + docs/models/modelconversationtools.md: + id: b3463ae729a7 + last_write_checksum: sha1:eb78650e337ab5354a0cdfbfcf975ed02495230b + pristine_git_object: 5cc97437c34263ad650c84c8702e158ee74ecfb1 + docs/models/modellist.md: + id: ce07fd9ce413 + last_write_checksum: sha1:4f2956eeba39cc14f2289f24990e85b3588c132a + pristine_git_object: 760882c6c5b442b09bbc91f910f960138d6a00c8 + docs/models/modeltype.md: + id: 9f69805691d1 + last_write_checksum: sha1:f3a8bce458460e55124ce5dd6814e7cada8e0e89 + pristine_git_object: a31c3ca0aa78cae9619b313f1cda95b9c391ee12 + docs/models/moderationobject.md: + id: 4e84364835f5 + last_write_checksum: sha1:2831033dcc3d93d32b8813498f6eb3082e2d3c4e + pristine_git_object: 320b2ab4935f8751eb58794e8eb9e422de35ae7c + docs/models/moderationresponse.md: + id: e15cf12e553b + last_write_checksum: sha1:18e8f4b4b97cb444824fcdce8f518c4e5a27c372 + pristine_git_object: 75a5eec74071fdd0d330c9f3e10dac0873077f20 + docs/models/name.md: + id: 6ee802922293 + last_write_checksum: sha1:91a266ed489c046a4ec511d4c03eb6e413c2ff02 + pristine_git_object: 18b978a8cc2c38d65c37e7dd110315cedb221620 + docs/models/object.md: + id: 7ffe67d0b83f + last_write_checksum: sha1:dfb590560db658dc5062e7cedc1f3f29c0d012a0 + pristine_git_object: 0122c0db4541d95d57d2edb3f18b9e1921dc3099 + docs/models/ocrimageobject.md: + id: b72f3c5853b2 + last_write_checksum: sha1:90c5158dec6a7b31c858677b6a8efa1e3cabd504 + pristine_git_object: 3c0d5544a80499b011467f29ef83d49f53801af6 + docs/models/ocrpagedimensions.md: + id: b3429f9883f5 + last_write_checksum: sha1:6435aa56e6153b0c90a546818ed780105ae1042a + pristine_git_object: c93ca64d5e20319ec6ec1bcb82b28c6ce0940f29 + docs/models/ocrpageobject.md: + id: 88a9e101b11e + last_write_checksum: sha1:091077fedf1b699d5160a21fe352056c247ef988 + pristine_git_object: 02473d44f73485fd7b7f0031d51bfac835d4036e + docs/models/ocrrequest.md: + id: 6862a3fc2d0f + last_write_checksum: sha1:f32fcc5916f9eedf7adfaa60beda30a9ec42f32e + pristine_git_object: 76e4da925937fd4bdd42307f116a74d4dbf2bea3 + docs/models/ocrresponse.md: + id: 30042328fb78 + last_write_checksum: sha1:8e4a4ae404ea752f3e9f1108c2a5f89ed6cfb143 + pristine_git_object: 0a309317644eedc643009b6cec3a7dbb142b1a15 + docs/models/ocrtableobject.md: + id: c967796380e6 + last_write_checksum: sha1:3b78858cc130fc8792ec3d149c8f657fd3f7a4c3 + pristine_git_object: 4e27697c15983f86274648b2d7bacac557081630 + docs/models/ocrusageinfo.md: + id: 419abbb8353a + last_write_checksum: sha1:6e717a3f3de3c464e8b3237f06867cdfecec339e + pristine_git_object: d9d79125cb02bc2b09d8dc543a5e2d4a6c55571c + docs/models/one.md: + id: 69a5df93c480 + last_write_checksum: sha1:cb6d46c2939a0e2314e29ff0307a2b0632caca65 + pristine_git_object: 3de496a6201d47ea52fc15bfe16a44bd6d3be900 + docs/models/outputcontentchunks.md: + id: f7e175c8e002 + last_write_checksum: sha1:5094466110028801726cc825e8809f524fe1ee24 + pristine_git_object: c76bc31d4d8791b7bef4dc6cbff6671b38a7927d + docs/models/outputs.md: + id: 58b672ddb5b3 + last_write_checksum: sha1:7553d62771ac5a85f8f330978b400cdd420cf865 + pristine_git_object: 7756c6276cc141b69d8099e0bbcbd2bccc1b5112 + docs/models/paginationinfo.md: + id: 3d2b61cbbf88 + last_write_checksum: sha1:1da38e172024fe703f3180ea3c6ec91fe3c51ed0 + pristine_git_object: ad1fbb86c714c152a5e6e99d8a741e7346884e55 + docs/models/prediction.md: + id: 3c70b2262201 + last_write_checksum: sha1:ca8a77219e6113f2358a5363e935288d90df0725 + pristine_git_object: fae3c1ca4ba2c2ddb3b7de401ecdc8d56dcc7740 + docs/models/processingstatusout.md: + id: 83c8c59c1802 + last_write_checksum: sha1:046375bb3035cc033d4484099cd7f5a4f53ce88c + pristine_git_object: 7b67583f4209778ac6f945631c0ee03ba1f4c663 + docs/models/queryparamstatus.md: + id: 15628120923d + last_write_checksum: sha1:36f1c9b6a6af6f27fbf0190417abf95b4a0bc1b9 + pristine_git_object: dcd2090861b16f72b0fb321714b4143bc14b7566 + docs/models/referencechunk.md: + id: 07895f9debfd + last_write_checksum: sha1:97d01dd2b907e87b58bebd9c950e1bef29747c89 + pristine_git_object: a132ca2fe6fbbaca644491cbc36d88b0c67cc6bc + docs/models/referencechunktype.md: + id: 0944b80ea9c8 + last_write_checksum: sha1:956b270766c7f11fe99f4a9b484cc29c159e7471 + pristine_git_object: 1e0e2fe64883ef5f3e628777b261b1224661d257 + docs/models/repositories.md: + id: 0531efe9bced + last_write_checksum: sha1:249bdb315eb1f0bd54601e5b8a45e58cb1ec7638 + pristine_git_object: 02274e3d58d55f4a18dfdf578fa53d2459e1345e + docs/models/requestsource.md: + id: 8857ab6025c4 + last_write_checksum: sha1:4b7ecc7c5327c74e46e2b98bd6e3814935cdecdf + pristine_git_object: c81c115992439350d56c91d2e3351a13df40676b + docs/models/response1.md: + id: 245c499462a9 + last_write_checksum: sha1:6d64b50b59875744eb3c1038d7cdcba9397fdbae + pristine_git_object: 2e73fdbb204c14cadc028d0891ede0ca4d4178d7 + docs/models/responsebody.md: + id: a2c4400c632e + last_write_checksum: sha1:a1705a40914ac8f96000953bd53ca01f66643fcd + pristine_git_object: 8a218517178eed859683f87f143c5397f96d10d9 + docs/models/responsedoneevent.md: + id: 38c38c3c065b + last_write_checksum: sha1:9910c6c35ad7cb8e5ae0edabcdba8a8a498b3138 + pristine_git_object: ec25bd6d364b0b4959b11a6d1595bdb57cba6564 + docs/models/responsedoneeventtype.md: + id: 03a896b6f98a + last_write_checksum: sha1:09ccbc7ed0143a884481a5943221be2e4a16c123 + pristine_git_object: 58f7f44d74553f649bf1b54385926a5b5d6033f5 + docs/models/responseerrorevent.md: + id: 3e868aa9958d + last_write_checksum: sha1:9ed1d04b3ed1f468f4dc9218890aa24e0c84fc03 + pristine_git_object: 2ea6a2e0ec412ae484f60fa1d09d02e776499bb9 + docs/models/responseerroreventtype.md: + id: 5595b8eec59e + last_write_checksum: sha1:442185b0615ec81923f4c97478e758b451c52439 + pristine_git_object: 3b3fc303fc7f75c609b18a785f59517b222b6881 + docs/models/responseformat.md: + id: 50a1e4140614 + last_write_checksum: sha1:e877b2e81470ef5eec5675dfb91a47e74d5d3add + pristine_git_object: 5cab22f2bf1c412699f6a7ed18ef801ecbc3ee4b + docs/models/responseformats.md: + id: cf1f250b82db + last_write_checksum: sha1:105e1f9181913104b554051838cbdd0f728aa2c4 + pristine_git_object: 2f5f1e5511b048323fee18a0ffdd506fe2b3d56f + docs/models/responsestartedevent.md: + id: 88e3b9f0aa8d + last_write_checksum: sha1:fa9db583e8223d2d8284866f7e6cf6d775751478 + pristine_git_object: 481bd5bba67a524dbadf9f1570a28ae20ec9f642 + docs/models/responsestartedeventtype.md: + id: 1d27fafe0f03 + last_write_checksum: sha1:c30ca125ec76af9a2191ebc125f5f8b9558b0ecb + pristine_git_object: 2d9273bd02bf371378575619443ec948beec8d66 + docs/models/retrievefileout.md: + id: 8e82ae08d9b5 + last_write_checksum: sha1:600d5ea4f75dab07fb1139112962affcf633a6c9 + pristine_git_object: 28f97dd25718833aaa42c361337e5e60488bcdc8 + docs/models/retrievemodelv1modelsmodelidgetrequest.md: + id: ac567924689c + last_write_checksum: sha1:7534c5ec5f1ae1e750c8f610f81f2106587e81a9 + pristine_git_object: f1280f8862e9d3212a5cfccd9453884b4055710a + docs/models/retrievemodelv1modelsmodelidgetresponseretrievemodelv1modelsmodelidget.md: + id: c2a914584353 + last_write_checksum: sha1:bdd52e2c434fc6fd10e341d41de9dda1a28ddb4f + pristine_git_object: 3ac96521a8f58f1ed4caedbb4ab7fe3fe2b238c5 + docs/models/role.md: + id: b694540a5b1e + last_write_checksum: sha1:260a50c56a8bd03cc535edf98ebec06437f87f8d + pristine_git_object: affca78d5574cc42d8e6169f21968e5a8765e053 + docs/models/sampletype.md: + id: 0e09775cd9d3 + last_write_checksum: sha1:33cef5c5b097ab7a9cd6232fe3f7bca65cd1187a + pristine_git_object: 34a6a012b1daeeb22626417650269e9376cc9170 + docs/models/security.md: + id: 452e4d4eb67a + last_write_checksum: sha1:45b7b8881a6560a468153662d61b99605a492edf + pristine_git_object: 2e0839d06f821dd97780dc22f202dedf23e4efe1 + docs/models/shareenum.md: + id: 53a713500576 + last_write_checksum: sha1:9d45d4bd272e6c146c3a8a21fd759acf2ae22148 + pristine_git_object: dc5d2b68a810c2983b5a47fbff747dfc2cc17598 + docs/models/sharingdelete.md: + id: 165cac179416 + last_write_checksum: sha1:1a0b3c95f4b56173510e234d7a76df85c593f360 + pristine_git_object: 1dcec0950c7fcd264ea9369c24244b54ba2bcfbf + docs/models/sharingin.md: + id: 08d396ee70ad + last_write_checksum: sha1:662edfc07a007e94fe1e54a07cf89d7c83c08df5 + pristine_git_object: bac18c8d43f801e8b5cf5b3cd089f9da0ee2281a + docs/models/sharingout.md: + id: 5db4547c7c56 + last_write_checksum: sha1:bd15c318d1a3f5bee7d7104d34cbd8ba6233bbb8 + pristine_git_object: 35aeff43593f3c9067c22a2f8b1468d7faa5af34 + docs/models/source.md: + id: 6541ef7b41e7 + last_write_checksum: sha1:d0015be42fe759d818ebd75b0cec9f83535a3b89 + pristine_git_object: bb1ed6124647b02c4350123bf257b0bf17fc38fd + docs/models/ssetypes.md: + id: 6a902241137c + last_write_checksum: sha1:567027284c7572c0fa24132cd119e956386ff9d0 + pristine_git_object: ae06b5e870d31b10f17224c99af1628a7252bbc3 + docs/models/status.md: + id: 959cd204aadf + last_write_checksum: sha1:618f30fd5ba191bb918c953864bfac4a63192a40 + pristine_git_object: 5e22eb736c734121b4b057812cacb43b3e299b52 + docs/models/stop.md: + id: f231cc9f5041 + last_write_checksum: sha1:86903cac5f57ad9b8ac07ecba6c454d40a53bdc8 + pristine_git_object: ba40ca83136d6d6cb4f1ef9e5ca3104a704e4846 + docs/models/systemmessage.md: + id: fdb7963e1cdf + last_write_checksum: sha1:97e726dff19a39b468767d5c01fc6256277ee71f + pristine_git_object: 0dba71c00f40c85e74b2c1967e077ffff9660f13 + docs/models/systemmessagecontent.md: + id: 94a56febaeda + last_write_checksum: sha1:6cb10b4b860b4204df57a29c650c85c826395aeb + pristine_git_object: 0c87baf3c2fade64a2738a9a4b3ce19647e5dc9a + docs/models/systemmessagecontentchunks.md: + id: cea1c19e9d7a + last_write_checksum: sha1:986aec0f8098158515bbccd0c22e0b3d4151bb32 + pristine_git_object: 40030c170746d9953d25b979ab7e6f522018e230 + docs/models/tableformat.md: + id: d8cd08c55c3c + last_write_checksum: sha1:e0736ea9576466d71821aa1e67fc632cc5a85414 + pristine_git_object: 54f029b814fdcfa2e93e2b8b0594ef9e4eab792a + docs/models/textchunk.md: + id: 6cd12e0ef110 + last_write_checksum: sha1:f04818ca76e68b3d3684927e4032d5d7de882f6a + pristine_git_object: d488cb51abeb4913c8441d9fbe9e5b964099bb7e + docs/models/textchunktype.md: + id: 886e88ebde41 + last_write_checksum: sha1:ba8db2a3910d1c8af424930c01ecc44889335bd3 + pristine_git_object: e2a2ae8bcdf8a35ad580a7de6271a5d26cd19504 + docs/models/thinkchunk.md: + id: bca24d7153f6 + last_write_checksum: sha1:feb95a931bb9cdbfe28ab351618687e513cf830b + pristine_git_object: 66b2e0cde70e25e2927180d2e709503401fddeab + docs/models/thinkchunktype.md: + id: 0fbeed985341 + last_write_checksum: sha1:790f991f95c86c26a6abb9c9c5debda8b53526f5 + pristine_git_object: baf6f755252d027295be082b53ecf80555039414 + docs/models/thinking.md: + id: 07234f8dd364 + last_write_checksum: sha1:a5962d1615b57996730da19e59fbfaa684321442 + pristine_git_object: c7a0d5c9811ea37aaf9e16b6e93c833ab979573f + docs/models/timestampgranularity.md: + id: eb4d5a8e6f08 + last_write_checksum: sha1:c2134d9f1f96d4eef48cedfe2b93eb061d5ea47f + pristine_git_object: 0d2a8054590463a167f69c36c00b8f2fc3c7906d + docs/models/tool.md: + id: 8966139dbeed + last_write_checksum: sha1:1725bf53fc9f1ca3f332322d91de24c9d58adc6a + pristine_git_object: fb661f72887271d5bb470e4edf025a32b00ade17 + docs/models/toolcall.md: + id: 80892ea1a051 + last_write_checksum: sha1:cb27b9d36cfe6227978c7a7a01b1349b6bac99d9 + pristine_git_object: 3819236b9f3eee2f6878818cfbbe2817e97f7de2 + docs/models/toolchoice.md: + id: "097076343426" + last_write_checksum: sha1:25b33b34da02c3b46349dc8b6223f9ae18370d16 + pristine_git_object: 373046bbbc834169293b4f4ae8b2e238f952ddde + docs/models/toolchoiceenum.md: + id: 15410de51ffc + last_write_checksum: sha1:ca0cf9bf128bebc8faedd9333cc6a56b30f58130 + pristine_git_object: 0be3d6c54b13a8bf30773398a2c12e0d30d3ae58 + docs/models/toolexecutiondeltaevent.md: + id: f2fc876ef7c6 + last_write_checksum: sha1:901756826684886179c21f47c063c55700c79ec4 + pristine_git_object: 7bee6d831a92085a88c0772300bcad4ce8194edb + docs/models/toolexecutiondeltaeventname.md: + id: 93fd3a3b669d + last_write_checksum: sha1:d5dcdb165c220209ee76d81938f2d9808c77d4fc + pristine_git_object: 9c3edef8c0698d7293a71ee56410a0ed67fd1924 + docs/models/toolexecutiondeltaeventtype.md: + id: ae6e8a5bf0ce + last_write_checksum: sha1:dd405269077b6a4756fd086067c9bbe88f430924 + pristine_git_object: a4a2f8cc9927499c990bad0590e84b2a609add8d + docs/models/toolexecutiondoneevent.md: + id: b604a4ca5876 + last_write_checksum: sha1:267ff0e19884e08abf3818b890579c1a13a3fa98 + pristine_git_object: 5898ea5eff103b99886789805d9113dfd8b01588 + docs/models/toolexecutiondoneeventname.md: + id: d19dc0060655 + last_write_checksum: sha1:aa5677087e6933699135a53f664f5b86bbae5ac6 + pristine_git_object: 6449079d7b467796355e3353f4245046cced17e8 + docs/models/toolexecutiondoneeventtype.md: + id: 7c5a318d924b + last_write_checksum: sha1:55a5041cdf8c7e05fcfd7260a72f7cd3f1b2baf8 + pristine_git_object: 872624c1f274259cdd22100995b5d99bf27eaeac + docs/models/toolexecutionentry.md: + id: 75a7560ab96e + last_write_checksum: sha1:66086952d92940830a53f5583f1751b09d902fcf + pristine_git_object: 3678116df64ad398fef00bab39dd35c3fd5ee1f5 + docs/models/toolexecutionentryobject.md: + id: af106f91001f + last_write_checksum: sha1:6df075bee4e84edf9b57fcf62f27b22a4e7700f4 + pristine_git_object: 0ca79af56d60094099c8830f638a748a92a40f21 + docs/models/toolexecutionentrytype.md: + id: b61e79a59610 + last_write_checksum: sha1:b0485bae901e14117f76b8e16fe80023a0913787 + pristine_git_object: a67629b8bdefe59d188969a2b78fa409ffeedb2a + docs/models/toolexecutionstartedevent.md: + id: 37657383654d + last_write_checksum: sha1:3051a74c1746c8341d50a22f34bd54f6347ee0c8 + pristine_git_object: de81312bda08970cded88d1b3df23ebc1481ebf2 + docs/models/toolexecutionstartedeventname.md: + id: be6b33417678 + last_write_checksum: sha1:f8857baa02607b0a0da8d96d130f1cb765e3d364 + pristine_git_object: 3308c483bab521f7fa987a62ebd0ad9cec562c3a + docs/models/toolexecutionstartedeventtype.md: + id: 9eff7a0d9ad5 + last_write_checksum: sha1:86fe6aec11baff4090efd11d10e8b31772598349 + pristine_git_object: 56695d1f804c28808cf92715140959b60eb9a9fd + docs/models/toolfilechunk.md: + id: 67347e2bef90 + last_write_checksum: sha1:0a499d354a4758cd8cf06b0035bca105ed29a01b + pristine_git_object: a3ffaa2b8339ae3a090a6a033b022db61a75125b + docs/models/toolfilechunktool.md: + id: eafe1cfd7437 + last_write_checksum: sha1:73a31dbff0851612f1e03d8fac3dbbee77af2df0 + pristine_git_object: aa5ac8a99a33d8c511f3d08de93e693bf75fb2a1 + docs/models/toolfilechunktype.md: + id: f895006e53e4 + last_write_checksum: sha1:258a55eef5646f4bf20a150ee0c48780bdddcd19 + pristine_git_object: 7e99acefff265f616b576a90a5f0484add92bffb + docs/models/toolmessage.md: + id: 0553747c37a1 + last_write_checksum: sha1:3ac87031fdd4ba8b0996e95be8e7ef1a7ff41167 + pristine_git_object: a54f49332c2873471759b477fb4c712fa4fb61f5 + docs/models/toolmessagecontent.md: + id: f0522d2d3c93 + last_write_checksum: sha1:783769c0200baa1b6751327aa3e009fa83da72ee + pristine_git_object: 5c76091fbd2c8e0d768921fab19c7b761df73411 + docs/models/toolmessagerole.md: + id: f333d4d1ab56 + last_write_checksum: sha1:7e1c004bad24e928da0c286a9f053516b172d24f + pristine_git_object: c24e59c0c79ea886d266e38c673edd51531b9be6 + docs/models/toolreferencechunk.md: + id: 10414b39b7b3 + last_write_checksum: sha1:2e24f2331bb19de7d68d0e580b099c03f5207199 + pristine_git_object: 3020dbc96563e2d36941b17b0945ab1e926948f4 + docs/models/toolreferencechunktool.md: + id: c2210d74792a + last_write_checksum: sha1:368add3ac6df876bc85bb4968de840ac578ae623 + pristine_git_object: 999f7c34885015a687c4213d067b144f1585c946 + docs/models/toolreferencechunktype.md: + id: 42a4cae4fd96 + last_write_checksum: sha1:43620d9529a1ccb2fac975fbe2e6fcaa62b5baa5 + pristine_git_object: bc57d277a39eef3c112c08ffc31a91f5c075c5a4 + docs/models/tools.md: + id: b78ed2931856 + last_write_checksum: sha1:ea4dcd2eafe87fc271c2f6f22f9b1cedc9f8316e + pristine_git_object: f308d732e3adfcc711590c3e1bee627c94032a6b + docs/models/tooltypes.md: + id: adb50fe63ea2 + last_write_checksum: sha1:f224c3d8732450b9c969b3e04027b7df7892694c + pristine_git_object: 84e49253c9b9bd1bd314e2a126106404cbb52f16 + docs/models/trainingfile.md: + id: 4039958e8930 + last_write_checksum: sha1:d02543c2d1446e56501f2ac358a09669b0077648 + pristine_git_object: cde218bb2281a1274d013844ad76b4b2a34b986c + docs/models/transcriptionresponse.md: + id: 39e2354aca38 + last_write_checksum: sha1:7b32e2179c3efc675c05bba322cc33554a9ff9db + pristine_git_object: 1bc0189c5d1833c946a71c9773346e21b08d2404 + docs/models/transcriptionsegmentchunk.md: + id: f09db8b2273e + last_write_checksum: sha1:c94ef1aa3dc2568ec77d186fa9061598f0ebccf1 + pristine_git_object: bebc9f72f521cf9cbd1818d53239cd632a025a31 + docs/models/transcriptionstreamdone.md: + id: 2253923d93cf + last_write_checksum: sha1:043ebcd284007f8c8536f2726ec5f525abffeb6b + pristine_git_object: 9ecf7d9ca32410d92c93c62ead9674e097533ec3 + docs/models/transcriptionstreamdonetype.md: + id: 3f5aec641135 + last_write_checksum: sha1:b86f7b20dff031e7dbe02b4805058a025c39dcac + pristine_git_object: db092c4fa47d7401919a02c199198e4ae99a5de1 + docs/models/transcriptionstreamevents.md: + id: d0f4eedfa2b6 + last_write_checksum: sha1:ec6b992049bd0337d57baab56603b1fa36a0a35b + pristine_git_object: f760385dfbd9779e63d61ec6357901bc9b4ca8e9 + docs/models/transcriptionstreameventsdata.md: + id: 506af75a0708 + last_write_checksum: sha1:99fcb3bf3aab0fb87dc02a4e6ccef9271ff0ae89 + pristine_git_object: eea8e9281634c56517e28f613afee38e0b0846ad + docs/models/transcriptionstreameventtypes.md: + id: 701782e8a63d + last_write_checksum: sha1:ff79dfb5d942c807b03c9e329a254bfa95b99a16 + pristine_git_object: e4eb25a6400dcc5a48b5eb5f65e96f7be91fa761 + docs/models/transcriptionstreamlanguage.md: + id: 5e9df200153c + last_write_checksum: sha1:82967c1b056bc1358adb21644bf78f0e37068e0f + pristine_git_object: e16c8fdce3f04ae688ddc18650b359d2dd5d6f6f + docs/models/transcriptionstreamlanguagetype.md: + id: 81c8bd31eeb1 + last_write_checksum: sha1:6cf3efec178180266bccda24f27328edfbebbd93 + pristine_git_object: e93521e10d43299676f44c8297608cc94c6106e6 + docs/models/transcriptionstreamsegmentdelta.md: + id: f59c3fb696f2 + last_write_checksum: sha1:d44b6c1359c0ed504f97edb46b3acf0145967fe7 + pristine_git_object: 3deeedf067c833cae8df1ab366a2e54b3f9e9186 + docs/models/transcriptionstreamsegmentdeltatype.md: + id: 03ee222a3afd + last_write_checksum: sha1:d02b5f92cf2d8182aeaa8dd3428b988ab4fc0fad + pristine_git_object: 03ff3e8bb4f25770200ed9fb43dd246375934c58 + docs/models/transcriptionstreamtextdelta.md: + id: 69a13554b554 + last_write_checksum: sha1:9f6c7bdc50484ff46b6715141cee9912f1f2f3ff + pristine_git_object: adddfe187546c0161260cf06953efb197bf25693 + docs/models/transcriptionstreamtextdeltatype.md: + id: ae14d97dc3fa + last_write_checksum: sha1:2abfea3b109518f7371ab78ade6fa514d6e3e968 + pristine_git_object: b7c9d675402cd122ee61deaa4ea7051c2503cf0e + docs/models/two.md: + id: 3720b8efc931 + last_write_checksum: sha1:8676158171bef1373b5e0b7c91a31c4dd6f9128a + pristine_git_object: 59dc2be2a2036cbdac26683e2afd83085387188f + docs/models/type.md: + id: 98c32f09b2c8 + last_write_checksum: sha1:9b07c46f7e1aacaab319e8dfdcfdfc94a2b7bf31 + pristine_git_object: d05ead75c8f6d38b4dbcc2cdad16f1ba4dd4f7e8 + docs/models/unarchiveftmodelout.md: + id: 4f2a771b328a + last_write_checksum: sha1:b3be8add91bbe10704ff674891f2e6377b34b539 + pristine_git_object: 287c9a007e0b2113738a1884450133558d23540e + docs/models/unarchiveftmodeloutobject.md: + id: 5fa9545c3df0 + last_write_checksum: sha1:29c0a228082142925a0fd72fef5a578f06ac764d + pristine_git_object: 623dcec24e2c676c9d50d3a3547b1dd9ffd78038 + docs/models/updateftmodelin.md: + id: 1b98d220f114 + last_write_checksum: sha1:d1c7a8f5b32228d8e93ad4455fccda51b802f08f + pristine_git_object: 4e55b1a7d96e1ad5c1e65c6f54484b24cd05fcfc + docs/models/uploadfileout.md: + id: c991d0bfc54c + last_write_checksum: sha1:ce5af8ffadb8443a6d1ca5fbbc014de42da35b9d + pristine_git_object: 6f09c9a6920f373c730fa3538b0c2953d757c257 + docs/models/usageinfo.md: + id: ec6fe65028a9 + last_write_checksum: sha1:cf71fb9676d870eba7c4d10a69636e1db4054adc + pristine_git_object: f5204ac94a4d6191839031c66c5a9bc0124a1f35 + docs/models/usermessage.md: + id: ed66d7a0f80b + last_write_checksum: sha1:8291f7703e49ed669775dc953ea8cab6715dc7ed + pristine_git_object: 63b0131091cd211b3b1477c1d63b5666a26db546 + docs/models/usermessagecontent.md: + id: 52c072c851e8 + last_write_checksum: sha1:1de02bcf7082768ebe1bb912fdbebbec5a577b5a + pristine_git_object: 8350f9e8f8996c136093e38760990f62fd01f8cf + docs/models/usermessagerole.md: + id: 99ffa937c462 + last_write_checksum: sha1:52014480516828b43827aa966b7319d9074f1111 + pristine_git_object: 171124e45988e784c56a6b92a0057ba00efc0db4 + docs/models/utils/retryconfig.md: + id: 4343ac43161c + last_write_checksum: sha1:562c0f21e308ad10c27f85f75704c15592c6929d + pristine_git_object: 69dd549ec7f5f885101d08dd502e25748183aebf + docs/models/validationerror.md: + id: 304bdf06ef8b + last_write_checksum: sha1:1889f608099577e6a116c14b211a6811d6b22786 + pristine_git_object: 7a1654a1a5cfb3ab92360b361e8e962bf2db4582 + docs/models/wandbintegration.md: + id: ba1f7fe1b1a3 + last_write_checksum: sha1:1702d58db559818304404a5dc8c70d71fb2be716 + pristine_git_object: 199d2eddc61069c80b628a12bff359ac2abc7338 + docs/models/wandbintegrationout.md: + id: c1a0f85273d8 + last_write_checksum: sha1:c2addbba8c15b7c115129d5249c4a6d7dc527d2d + pristine_git_object: cec02ed87555128e6027e00f3385a61028694ac0 + docs/models/wandbintegrationouttype.md: + id: 647c7c2eab8a + last_write_checksum: sha1:78ad7847183b18319995b5e3de0262ba6fffecac + pristine_git_object: 5a7533c99671e0556c3c11f179312ec8268ce477 + docs/models/wandbintegrationtype.md: + id: 08c414c73826 + last_write_checksum: sha1:0990c604ec45f2f1fd1019e87705533b0c9be023 + pristine_git_object: 4fdffe22e370fd64429d83753c30a0079be0e7fd + docs/models/websearchpremiumtool.md: + id: 267988aa8c3f + last_write_checksum: sha1:cc040d754d40c644a2a8fd70302eb7ee864bfff3 + pristine_git_object: 941fc2b8448d4caeae9318fdf08053a2b59a9bee + docs/models/websearchpremiumtooltype.md: + id: c70fa6b0ee9f + last_write_checksum: sha1:069ad330c3f5b3c6b8a375de4484f151698c439c + pristine_git_object: 348bfe854914114c84cd74997a63fe2badc0756d + docs/models/websearchtool.md: + id: fc4df52fb9b5 + last_write_checksum: sha1:53e128c3f0f6781227d99d46838579dc15ab26d2 + pristine_git_object: c8d708bdcdbfc387a09683bdd47ebabedd566cb0 + docs/models/websearchtooltype.md: + id: 6591e569c4f3 + last_write_checksum: sha1:f9b6672bc3fbb5bb70c4919cb7b98160a0ebe9ff + pristine_git_object: 57b6acbbd3b85aae5a9b7e2f754689637c01a912 + docs/sdks/accesses/README.md: + id: 2ea167c2eff2 + last_write_checksum: sha1:ac4ec473f9991ea2ca3e66838f8f791a54d881e3 + pristine_git_object: 040bc24c6acb9153296e105009ac4ef251cc2dd4 + docs/sdks/agents/README.md: + id: 5965d8232fd8 + last_write_checksum: sha1:f368d2c40ad72aa9e8de04809bd300e935dbb63b + pristine_git_object: 173925eead663741af81d5f624c2964278bde979 + docs/sdks/chat/README.md: + id: 393193527c2c + last_write_checksum: sha1:931ab91704f496b220c7da1aa985cea14d969784 + pristine_git_object: 5bb24baa3444d72faace5473d0a775a0e5ad403e + docs/sdks/classifiers/README.md: + id: 74eb09b8d620 + last_write_checksum: sha1:d047af486fd4acd7f813232b20164eab11541c2d + pristine_git_object: e76efb79d8b1353208b42619f4cc5b688ef5d561 + docs/sdks/conversations/README.md: + id: e22a9d2c5424 + last_write_checksum: sha1:b4e49eadaf5a3bb50f5c3a88a759bc529db2584f + pristine_git_object: c488848cc4c18a098deae8f02c0d4a86d1d898db + docs/sdks/documents/README.md: + id: 9758e88a0a9d + last_write_checksum: sha1:84791e86c3b9c15f8fd16d2a3df6bd3685023a69 + pristine_git_object: d3f5a9757c2327dab8e5b1962542b37c5e2551af + docs/sdks/embeddings/README.md: + id: 15b5b04486c1 + last_write_checksum: sha1:4da183aaf0df15d3a027077784903d93d8ea58e0 + pristine_git_object: 4390b7bd999a75a608f324f685b2284a8fa277ec + docs/sdks/files/README.md: + id: e576d7a117f0 + last_write_checksum: sha1:88cd213e513854b8beee72b8ea751f74bf32a845 + pristine_git_object: f0dfd59364c06e84d9cce517594a2912e2b724c8 + docs/sdks/fim/README.md: + id: 499b227bf6ca + last_write_checksum: sha1:824f7d1b58ff0b650367737c0e9b91a9d2d14a45 + pristine_git_object: db6f2e1b65866e1309d94e852fa0a1e82d2606fd + docs/sdks/jobs/README.md: + id: 7371cdc8b89a + last_write_checksum: sha1:5117aebda0558e7b82150f0b91480e3362687a89 + pristine_git_object: 666224a728cc433bca9520437d36a2b526ac2df6 + docs/sdks/libraries/README.md: + id: df9a982905a3 + last_write_checksum: sha1:8769d4b43f93c744fca43c34a7d7e9d99122c886 + pristine_git_object: e672c190ad6ac4623f99357d7e59d52f6722518f + docs/sdks/mistralagents/README.md: + id: 20b3478ad16d + last_write_checksum: sha1:b13e50de2ff10eabb4534f561c8cac185485280b + pristine_git_object: 97819467c39bc4f813093e55756e38ba06263a87 + docs/sdks/mistraljobs/README.md: + id: 71aafa44d228 + last_write_checksum: sha1:255a4221b3b61ef247b39c9723a78408cda486d3 + pristine_git_object: f1aa3f61973b1ee48777afb7fecc4bdf459882a0 + docs/sdks/models/README.md: + id: b35bdf4bc7ed + last_write_checksum: sha1:8e256360d014fc3384256a9f155c6382f8e16a6d + pristine_git_object: d51866b6cff74932bf86c266f75773c2d3e74fd0 + docs/sdks/ocr/README.md: + id: 545e35d2613e + last_write_checksum: sha1:fc478d79405c775e9ae65334122d4539be952492 + pristine_git_object: 6fd904cc045b8accf5cc11436fd66f4024c9897f + docs/sdks/transcriptions/README.md: + id: 089cf94ecf47 + last_write_checksum: sha1:fdf785e4cbab20aec41122735435a38f582f7f29 + pristine_git_object: 3243258c4debd94e10c98c2b18dcc47838143a5b + py.typed: + id: 258c3ed47ae4 + last_write_checksum: sha1:8efc425ffe830805ffcc0f3055871bdcdc542c60 + pristine_git_object: 3e38f1a929f7d6b1d6de74604aa87e3d8f010544 + scripts/publish.sh: + id: fe273b08f514 + last_write_checksum: sha1:b290b25b36dca3d5eb1a2e66a2e1bcf2e7326cf3 + pristine_git_object: c35748f360329c2bc370e9b189f49b1a360b2c48 + src/mistralai/__init__.py: + id: 7aaa1403a9fc + last_write_checksum: sha1:36306d1d404b6aeb912d27f1d9c52f098ff7bf9b + pristine_git_object: dd02e42e4cc509dc90e6ae70493054021faa5f9c + src/mistralai/_hooks/__init__.py: + id: 89bd3648c8ca + last_write_checksum: sha1:e3111289afd28ad557c21d9e2f918caabfb7037d + pristine_git_object: 2ee66cdd592fe41731c24ddd407c8ca31c50aec1 + src/mistralai/_hooks/sdkhooks.py: + id: a085b78b3f45 + last_write_checksum: sha1:1d9666df503110a00569c2a79886ac3be49a3ffb + pristine_git_object: 1f9a9316c430821226ada4db2b37f87083f1c326 + src/mistralai/_hooks/types.py: + id: 066b285c9341 + last_write_checksum: sha1:16bf3c53068c38ba0f838172787178c883551283 + pristine_git_object: 6d0f3e1166cb0271f89f5ba83441c88199d7a432 + src/mistralai/_version.py: + id: 37b53ba66d7f + last_write_checksum: sha1:eb93ac459ae2c6e3551452f251db32d7c3ee3908 + pristine_git_object: 8c26c0cea422eea543df9c639b4b7d4b751b7692 + src/mistralai/accesses.py: + id: 98cb4addd052 + last_write_checksum: sha1:5d9d495274d67b1343ba99d755c1c01c64c2ead1 + pristine_git_object: be02ee5bafa1b10a52e79d1ad5481fa80908d99a + src/mistralai/agents.py: + id: aa07ea92bffb + last_write_checksum: sha1:2a760562daf1a01a66e5250658dffc5043e3c8ea + pristine_git_object: 73e4ee3c885f7c3472a9dc5c0546c02d4e19a1c4 + src/mistralai/audio.py: + id: c398f6a11e24 + last_write_checksum: sha1:11f9713b4f970509cffe0e6122c61f9aeafc9e73 + pristine_git_object: 5687abdb5676903661a33a3bee115f289f5fe9df + src/mistralai/basesdk.py: + id: 3127264590ce + last_write_checksum: sha1:5340f1c5976fd87d3b17b285535b63bbbe7db120 + pristine_git_object: c9a32aa13eae485d0159632dadbfbb2452978709 + src/mistralai/batch.py: + id: 60df0c5efce3 + last_write_checksum: sha1:9d463fd6ac747635ab2b0e61c918a098aae5a370 + pristine_git_object: 7ed7ccefdaab2368dc7bb9fa8c718a05dcec3ca6 + src/mistralai/beta.py: + id: 7d1c8d453249 + last_write_checksum: sha1:780b45086f215d1f04983d1ea6c89acc16475cfc + pristine_git_object: 4bbf1fa36053c6754026285f3a149911b653d735 + src/mistralai/chat.py: + id: cb76f81a1426 + last_write_checksum: sha1:cf0a3b1b2d1163cb96c0c57d4cf0bede556c02b1 + pristine_git_object: 1528c4c93fc8b5f5d02976db836a1cefda4d1e57 + src/mistralai/classifiers.py: + id: a8f7d4c1c787 + last_write_checksum: sha1:6eabb0ba04fdf77d4bb9b45399c6f2ce55fe8317 + pristine_git_object: 7c32506ec03cc0fd88b786ff49d7690fd4283d2a + src/mistralai/conversations.py: + id: be58e57a6198 + last_write_checksum: sha1:76169b9954e645c9d7260b4d9e08be87de7ec643 + pristine_git_object: 93ed8c281a2f44e19f833309ec67b5f35cab1b53 + src/mistralai/documents.py: + id: 1945602083a8 + last_write_checksum: sha1:14d1e6b5a95869d70a6fc89b07d5365c98aff5d7 + pristine_git_object: fac58fdb2e76668911fc6c59918b1b444aed0bd5 + src/mistralai/embeddings.py: + id: 2bbb9b5427d7 + last_write_checksum: sha1:842f784ab976936902be23331b672bdba8c88bc9 + pristine_git_object: 7430f8042df4fec517288d0ddb0eb174e7e43a8e + src/mistralai/files.py: + id: 0e29db0e2269 + last_write_checksum: sha1:e4f833d390f1b3b682f073a76ffb6e29f89c55d1 + pristine_git_object: ab2c75a2f6774a99fe67ac5d3b0fa6544d093181 + src/mistralai/fim.py: + id: 71a865142baf + last_write_checksum: sha1:7accf79c11a17fefbacde7f2b0f966f3716233df + pristine_git_object: 53109c70f0ad9844a4c445a5ed674f675b24d274 + src/mistralai/fine_tuning.py: + id: 12578f7d13a6 + last_write_checksum: sha1:e48227f7ea5b51d837e7619f59582e663eb94ed1 + pristine_git_object: 8ed5788a58ab2e9d1125b30624c734a602084294 + src/mistralai/httpclient.py: + id: dcfb0dd6b386 + last_write_checksum: sha1:5e55338d6ee9f01ab648cad4380201a8a3da7dd7 + pristine_git_object: 89560b566073785535643e694c112bedbd3db13d + src/mistralai/jobs.py: + id: 6869267a98bf + last_write_checksum: sha1:e771ca001a64cc3be33964e95393495a16ab3d8c + pristine_git_object: df8ae4d3489f2791586ac6399bfe6039522f09b4 + src/mistralai/libraries.py: + id: e5b244f28b27 + last_write_checksum: sha1:7084d7b61238494f834fe20dcf387810e77f3eb0 + pristine_git_object: 32648937feb79adf6155423cbe9bac4d7fe52224 + src/mistralai/mistral_agents.py: + id: 671c4985aaa1 + last_write_checksum: sha1:5e80f9f13f811dc0c47ba200eab0e4203b8d1472 + pristine_git_object: 1d2040682c3e1c9fdae8562bad7919bbce5c68c8 + src/mistralai/mistral_jobs.py: + id: 18065a449da0 + last_write_checksum: sha1:fb205d962444f6aba163ecd3169c12489b3f0cc9 + pristine_git_object: d1aeec8a014b22e44f4fe5e751206c3648e875af + src/mistralai/models/__init__.py: + id: 3228134f03e5 + last_write_checksum: sha1:2984e08157e90f500bfd135c037723b5d1902e9c + pristine_git_object: e69acaf83ab2433f99f431dd992004543839d33a + src/mistralai/models/agent.py: + id: ca4162a131b1 + last_write_checksum: sha1:fe8a7c8c9c4ba59613d7d89f0c2e7a6958e25f85 + pristine_git_object: eb30905b3de2b69ece35bdd40f390b2fa6ffc5a8 + src/mistralai/models/agentconversation.py: + id: bd3035451c40 + last_write_checksum: sha1:2e4a6a5ae0da2e9ccbb588c8487b48077d561d93 + pristine_git_object: 625fb4fc6697860060dfdeb449986d89efc232d6 + src/mistralai/models/agentcreationrequest.py: + id: 87f33bd9ea58 + last_write_checksum: sha1:a6885376d36a5a17273d8d8d8d45e3d6c3ee1b9f + pristine_git_object: 6a14201eca82f26871ab4f87e547a5e9bcf3b933 + src/mistralai/models/agenthandoffdoneevent.py: + id: 496685a9343b + last_write_checksum: sha1:f03d37569960b56155e977aa68fbbaad8e25f687 + pristine_git_object: 1cdbf45652ff70d045c650734ab6bdc0eca97734 + src/mistralai/models/agenthandoffentry.py: + id: 836045caeb8f + last_write_checksum: sha1:e5c6b73014cd6859a47cb5958cdfa7b105e3aa3e + pristine_git_object: 66136256215caf7c1f174deec70ab9fbfff634fc + src/mistralai/models/agenthandoffstartedevent.py: + id: ce8e306fa522 + last_write_checksum: sha1:2b5bac2f628c0e7cdd6df73404f69f5d405e576c + pristine_git_object: 11bfa918903f8de96f98f722eaaf9a70b4fca8c1 + src/mistralai/models/agents_api_v1_agents_deleteop.py: + id: 588791d168a1 + last_write_checksum: sha1:2dae37c3b9778d688663550b9803d52111577f3e + pristine_git_object: 38e04953cc320f503a2f6e77096985da60896f2a + src/mistralai/models/agents_api_v1_agents_getop.py: + id: 2358eceee519 + last_write_checksum: sha1:362d0c781b2c79d829f6e4901e558aaca937b105 + pristine_git_object: dced6dbb49c31fe2981cbd3865c0d580082a1ade + src/mistralai/models/agents_api_v1_agents_listop.py: + id: 15579851e4fe + last_write_checksum: sha1:eae021d178b661254dde8bea0b2cbdb11b9b429c + pristine_git_object: b3b8765c194bc29757468b605c13f2b7372f161e + src/mistralai/models/agents_api_v1_agents_update_versionop.py: + id: 262e7a2f05e3 + last_write_checksum: sha1:faa5550d08ddbb8223e8e6f2fcea6f09408bd228 + pristine_git_object: 5e4b97b3b175a8485fd04adc5b92a4870a46bda9 + src/mistralai/models/agents_api_v1_agents_updateop.py: + id: 72f9d6466691 + last_write_checksum: sha1:9c99959045d9d182a9814954dcd769b294267165 + pristine_git_object: 32696fbe60f17067520bf574bac8144abeb7af3f + src/mistralai/models/agents_api_v1_conversations_append_streamop.py: + id: 89a020d8fdfd + last_write_checksum: sha1:ec2fbbc5017a2374ab3f75a33592399b83fcc5f6 + pristine_git_object: d2489ffb2e01dc6a4e93aee931723be55261ca6c + src/mistralai/models/agents_api_v1_conversations_appendop.py: + id: fd73b0582d26 + last_write_checksum: sha1:22f62e8277ae5845e2b3c41d81d962edc3592090 + pristine_git_object: ba37697ea506fe08ecee5ed7585a1deee56a0827 + src/mistralai/models/agents_api_v1_conversations_deleteop.py: + id: ecd0a5c14be5 + last_write_checksum: sha1:bd894dcef52e02541fa09ae0d51755dad946e3c2 + pristine_git_object: 94126cae1a7a4cd09037d8224cd79f63935a2636 + src/mistralai/models/agents_api_v1_conversations_getop.py: + id: 600a28e887fe + last_write_checksum: sha1:b2dbccf934677ed646bb9ad6e947787bb6c4235b + pristine_git_object: a37a61babd146035d51095143f8781c0d94be0c3 + src/mistralai/models/agents_api_v1_conversations_historyop.py: + id: 5e3db049c234 + last_write_checksum: sha1:fde97f139a93c4723abc4f08ebcf20afcdf67d54 + pristine_git_object: b8c33d1b1b18b0a0c6b263962efc1d84d066021a + src/mistralai/models/agents_api_v1_conversations_listop.py: + id: 3cf4a3751a1c + last_write_checksum: sha1:ac8ae982fc23123b8b3ce3c1ba58980a1c6e2119 + pristine_git_object: d314f83853dbef74fa2e5ce2b5a800843110cc14 + src/mistralai/models/agents_api_v1_conversations_messagesop.py: + id: c7eb683e873e + last_write_checksum: sha1:d96c4e78c4ce75b668bc23aec91be399a0d26541 + pristine_git_object: f0dac8bf6a58882b55c88b12e039357c5ff7dfe4 + src/mistralai/models/agents_api_v1_conversations_restart_streamop.py: + id: c9d4d80d68d5 + last_write_checksum: sha1:8a96d0ccbe2918a13e022f629ea62120e9ed5c0d + pristine_git_object: f39b74eb6358938de7fddf7d1fd92eb4fb011f6b + src/mistralai/models/agents_api_v1_conversations_restartop.py: + id: 9dadcde20152 + last_write_checksum: sha1:44a127399dfcbc7c07af3c686469bcbb6e798b40 + pristine_git_object: f706c066d1de93cf03c9a7829fc3ea79eddfc8ad + src/mistralai/models/agentscompletionrequest.py: + id: 843813a24928 + last_write_checksum: sha1:f84d77c55787a07c5a8f7cb25d13dc02762e5c80 + pristine_git_object: cc07a6bdd38e221e66ca4162ef74354ef1c9f5e2 + src/mistralai/models/agentscompletionstreamrequest.py: + id: 6be8367d3443 + last_write_checksum: sha1:7bc5fd554e4adf8d8eb0a8f81aae32266b174932 + pristine_git_object: d6a887be8f33db56ae0eec47b5300a3a29736067 + src/mistralai/models/agentupdaterequest.py: + id: 24e7a9fdb507 + last_write_checksum: sha1:a5bb4a17ff80a3471321d38faa1e6605ebe541a4 + pristine_git_object: e496907c084f0a6cf90de6ebbf508d3137699bf0 + src/mistralai/models/apiendpoint.py: + id: b26effd643dc + last_write_checksum: sha1:07ba583784d9099e6a24e94805a405112e2fcb41 + pristine_git_object: 0ad9366f0efbcf989f63fa66750dce2ecc5bb56a + src/mistralai/models/archiveftmodelout.py: + id: 48fc1069be95 + last_write_checksum: sha1:c3c6b5ae470f23805201cd5565fca095bc9b7a74 + pristine_git_object: 0f753cfc948282f4ee5004fe463c091ed99e83a7 + src/mistralai/models/assistantmessage.py: + id: e73f1d43e4ad + last_write_checksum: sha1:b5d1d0a77b9a4e2f7272ff9fe7e319c2bc1bdb25 + pristine_git_object: a38a10c4968634d64f4bdb58d74f4955b29a92a8 + src/mistralai/models/audiochunk.py: + id: ad7cf79b2cca + last_write_checksum: sha1:c13008582708d368c3dee398cc4226f747b5a9d0 + pristine_git_object: 64fc43ff4c4ebb99b7a6c7aa3090b13ba4a2bdbc + src/mistralai/models/audioencoding.py: + id: f4713d60f468 + last_write_checksum: sha1:ffd1fd54680ea0bab343bdb22145b9eabc25c68d + pristine_git_object: 13eb6d1567f768da3753a73ddba9fa5e3ebfa7b3 + src/mistralai/models/audioformat.py: + id: 3572f5e8c65b + last_write_checksum: sha1:7259b46ebe4044633c0251eea5b3c88dedcc76a6 + pristine_git_object: 48ab648c3525fcc9fe1c722b7beee0f649e30e7a + src/mistralai/models/audiotranscriptionrequest.py: + id: 4c6a6fee484a + last_write_checksum: sha1:d8fb192581056b4ae053f9e6919874850462cb03 + pristine_git_object: 308e2599f4ba8878b0fc20ee2660289b55ae7c9a + src/mistralai/models/audiotranscriptionrequeststream.py: + id: 863eca721e72 + last_write_checksum: sha1:a7ec74e5e05a705f2d61d1fe8a635178bcea3cd6 + pristine_git_object: 04374503f931f3964851d09def70535276bdf194 + src/mistralai/models/basemodelcard.py: + id: 5554644ee6f2 + last_write_checksum: sha1:aa5af32cda04d45bcdf2c8fb380529c4fbc828aa + pristine_git_object: 706841b7fc71051890201445050b5383c4b0e998 + src/mistralai/models/batcherror.py: + id: 657a766ed6c7 + last_write_checksum: sha1:5d727f59bbc23e36747af5e95ce20fcbf4ab3f7c + pristine_git_object: 4f8234465c57779d026fe65e131ba4cbe2746d40 + src/mistralai/models/batchjobin.py: + id: 7229d3fdd93b + last_write_checksum: sha1:074e8efd2474a1bf0949a7abcb90d3504a742f94 + pristine_git_object: 839a9b3cadb96986537422bc2a49532fcf9c2029 + src/mistralai/models/batchjobout.py: + id: 420d2a600dfe + last_write_checksum: sha1:486ecb38d44e9e3f8509504e30fe902f6869da1b + pristine_git_object: 904cd3496134ca38b8e53772f7b30e812bb92e65 + src/mistralai/models/batchjobsout.py: + id: 7bd4a7b41c82 + last_write_checksum: sha1:838e36e981a3dedb54663a32d8657d2a6ffaa364 + pristine_git_object: a1eba5db0ab8d8308b9e933352b55e32b80f33c7 + src/mistralai/models/batchjobstatus.py: + id: ee3393d6b301 + last_write_checksum: sha1:9e042ccd0901fe4fc08fcc8abe5a3f3e1ffe9cbb + pristine_git_object: 4b28059ba71b394d91f32dba3ba538a73c9af7a5 + src/mistralai/models/batchrequest.py: + id: 6b77bb906183 + last_write_checksum: sha1:5f4b2f5804c689e3468fe93e2b7855f2f164bbe8 + pristine_git_object: 3d1e98f7a1162abadd37d6661841727d33dbafd7 + src/mistralai/models/builtinconnectors.py: + id: 611d5b9f6fa4 + last_write_checksum: sha1:4e94744e3854d4cdc9d1272e4f1d9371f9829a5f + pristine_git_object: 6a3b2476d54096722eb3e7a271629d108028bd35 + src/mistralai/models/chatclassificationrequest.py: + id: 7fee7b849791 + last_write_checksum: sha1:22d8e106c165c9a16f220dc242b9165e5dcd6963 + pristine_git_object: f06f4f34d264d5bd049ced125d8675434c4fab96 + src/mistralai/models/chatcompletionchoice.py: + id: 362cbbc2f932 + last_write_checksum: sha1:6d66a95497493bff71ed75954e7eb9965370a3a4 + pristine_git_object: f2057ab4addf806d0458c40cb8bdf1f823da51f2 + src/mistralai/models/chatcompletionrequest.py: + id: ed77c35d0007 + last_write_checksum: sha1:e40cfe95a97a04addf2b37e6ba8df61ab3c1e199 + pristine_git_object: ad8b542863fd4158c1966e839d4ca9992982c2f8 + src/mistralai/models/chatcompletionresponse.py: + id: 227c368abb96 + last_write_checksum: sha1:1f8d263cc3388507fcec7a0e2419d755433a1e3e + pristine_git_object: 3d03b1265f4c41b6e11d10edcff0e4f9fea1e434 + src/mistralai/models/chatcompletionstreamrequest.py: + id: d01414c359f7 + last_write_checksum: sha1:76c0d6dcd9d1e50208c8906f3ae29e0bea39a71b + pristine_git_object: 10f97e5f006c904d37aa9bb1584030196c53ed98 + src/mistralai/models/chatmoderationrequest.py: + id: 9146b8de3702 + last_write_checksum: sha1:c0465d837b1517e061036f69faa0f40464873ff6 + pristine_git_object: 2f58d52fd00e2a1003445a1e524e3856dd8ad4c7 + src/mistralai/models/checkpointout.py: + id: ee97be8b74d3 + last_write_checksum: sha1:55cd36289696fa4da06a06812a62859bac83479f + pristine_git_object: aefb7731d0dfc71db4647509ef4e0ad1d70a3a95 + src/mistralai/models/classificationrequest.py: + id: fbb8aaa182b6 + last_write_checksum: sha1:300492b338cc354bee820a3b27fae7ad9900af5c + pristine_git_object: 8a3543785599e49df7f54069c98dedecbc545e12 + src/mistralai/models/classificationresponse.py: + id: b73b192344cb + last_write_checksum: sha1:0fa30f6b7eba3cbf1951bd45724d99b1ff023bb1 + pristine_git_object: b7741f373f062d552a67550dcd30e0592805ce93 + src/mistralai/models/classificationtargetresult.py: + id: 718124fab7ab + last_write_checksum: sha1:de004f490ec6da5bee26590697a97c68d7db9168 + pristine_git_object: 60c5a51b0a5e3f2b248f1df04ba12ec5075556eb + src/mistralai/models/classifierdetailedjobout.py: + id: aebdcce0d168 + last_write_checksum: sha1:5d16ca3b3c375a899ee25fc9ce74d877d71b7be1 + pristine_git_object: 701aee6e638ee8ca3e43500abce790a6f76df0c7 + src/mistralai/models/classifierftmodelout.py: + id: 12437ddfc64e + last_write_checksum: sha1:2436c401d49eb7fa0440fca6f09045f20bb52da1 + pristine_git_object: d2a31fae8c534b1008b96c8d4f1e22d69b85c6f3 + src/mistralai/models/classifierjobout.py: + id: aa6ee49244f8 + last_write_checksum: sha1:0c2fe0e01ccfa25686565bc836d3745313f61498 + pristine_git_object: a2f7cc08b35152a1b56bbfbaa49f9231df651719 + src/mistralai/models/classifiertargetin.py: + id: 0439c322ce64 + last_write_checksum: sha1:92b7928166f1a0ed8a52c6ccd7523119690d9a35 + pristine_git_object: d8a060e4896cbe9ccf27be91a44a84a3a84589f7 + src/mistralai/models/classifiertargetout.py: + id: 1c9447805aaa + last_write_checksum: sha1:bf961d9be0bd5239032a612eb822ad8adcee6d99 + pristine_git_object: ddc587f46a3bc78df5d88793c768431429ccf409 + src/mistralai/models/classifiertrainingparameters.py: + id: 8d7d510cb1a1 + last_write_checksum: sha1:72c19293d514c684e1bd4a432b34382f4d674e26 + pristine_git_object: 718beeac3aa1fc2b8af52d61510f34414bcab990 + src/mistralai/models/classifiertrainingparametersin.py: + id: 3da8da32eac4 + last_write_checksum: sha1:ae5088ac22014504b3d3494db46869b87716342b + pristine_git_object: 9868843fbb81cc45657980b36c3c9409d386114d + src/mistralai/models/codeinterpretertool.py: + id: 8c90fc7cca85 + last_write_checksum: sha1:d0e3832422493176bcb29b4edec0aa40c34faa12 + pristine_git_object: 48b74ee85c897179f6f2855d6737e34031b6c0f8 + src/mistralai/models/completionargs.py: + id: 6673897ce695 + last_write_checksum: sha1:a6b22e1abc324b8adceb65cbf990c0a0ab34b603 + pristine_git_object: 40aa0314895b5b2e9b598d05f9987d39518a6c60 + src/mistralai/models/completionargsstop.py: + id: d3cf548dde2f + last_write_checksum: sha1:99912f7a10e92419308cf3c112c36f023de3fc11 + pristine_git_object: de7a09564540daaa6819f06195c347b3e01914f7 + src/mistralai/models/completionchunk.py: + id: d3dba36f2e47 + last_write_checksum: sha1:e93199f69c09b0f7c5c169c90c990a7e7439b64a + pristine_git_object: 4d1fcfbf2e46382cc1b8bbe760efa66ceb4207b3 + src/mistralai/models/completiondetailedjobout.py: + id: 7e46c1d1597b + last_write_checksum: sha1:4ef7f96a2ac505891fec22e4fe491ea21da67e0b + pristine_git_object: df41bc2ab5bf484d755d31fa132158bd1dc5b489 + src/mistralai/models/completionevent.py: + id: 7d9b2ff555f0 + last_write_checksum: sha1:268f8b79bf33e0113d1146577827fe10e47d3078 + pristine_git_object: cc8599103944b8eebead6b315098a823e4d086e3 + src/mistralai/models/completionftmodelout.py: + id: 20e6aae7163d + last_write_checksum: sha1:8272d246489fe8d3743d28b37b49b660ca832ea1 + pristine_git_object: 7b6520de657363e984eef8efd870b4b841dc52e0 + src/mistralai/models/completionjobout.py: + id: 36ce54765988 + last_write_checksum: sha1:c167fae08705eccd65ec30e99046276bdcdd1b97 + pristine_git_object: 70995d2a8e45ac5bf9a4b870d7b745e07f09856f + src/mistralai/models/completionresponsestreamchoice.py: + id: a5323819cf5b + last_write_checksum: sha1:dfb9c108006fc3ac0f1d0bbe8e379792f90fac19 + pristine_git_object: 80f63987d3d41512b8a12f452aab41c97d2691b0 + src/mistralai/models/completiontrainingparameters.py: + id: 701db02d1d12 + last_write_checksum: sha1:bb6d3ca605c585e6281d85363e374923ed6ddd33 + pristine_git_object: 0200e81c35f05863eee7753e530d9c2290c56404 + src/mistralai/models/completiontrainingparametersin.py: + id: 0858706b6fc7 + last_write_checksum: sha1:0c8735e28dc6c27bf759a6bd93e8f1cf0919b382 + pristine_git_object: 1f74bb9da85bd721c8f11521b916ae986cd473eb + src/mistralai/models/contentchunk.py: + id: f753f1e60f3b + last_write_checksum: sha1:af68b3ca874420a034d7e116a67974da125d5a30 + pristine_git_object: 47170eefb0ed04399548d254896fa616b24ec258 + src/mistralai/models/conversationappendrequest.py: + id: ddbd85dab2db + last_write_checksum: sha1:c8ca45ad5b8340531a469e9847ee64f80c8db4c3 + pristine_git_object: 15cbc687396ee59eee742d65e490c354fdbf0688 + src/mistralai/models/conversationappendstreamrequest.py: + id: 7d9c85747963 + last_write_checksum: sha1:ada1cbcad5ce2dd6a6bc268b30f78dc69901ff6c + pristine_git_object: 8cecf89d3342be9a94066716863f4fa121b29012 + src/mistralai/models/conversationevents.py: + id: f543ca03cde2 + last_write_checksum: sha1:7e6ac7ea6f4e216071af7460133b6c7791f9ce65 + pristine_git_object: ba4c628c9de7fb85b1dcd5a47282f97df62a3730 + src/mistralai/models/conversationhistory.py: + id: ab4d51ae0094 + last_write_checksum: sha1:1d85aa48d019ce003e2d151477e0c5925bd619e7 + pristine_git_object: d5206a571e865e80981ebfcc99e65859b0dc1ad1 + src/mistralai/models/conversationinputs.py: + id: 50986036d205 + last_write_checksum: sha1:3e8c4650808b8059c3a0e9b1db60136ba35942df + pristine_git_object: 4d30cd76d14358e12c3d30c22e3c95078ecde4bd + src/mistralai/models/conversationmessages.py: + id: be3ced2d07e7 + last_write_checksum: sha1:410317f1b45f395faa66a9becd7bb2398511ba60 + pristine_git_object: 32ca9c20cb37ff65f7e9b126650a78a4b97e4b56 + src/mistralai/models/conversationrequest.py: + id: ceffcc288c2d + last_write_checksum: sha1:32e7b41c01d2d7accccb1f79248b9e1c56c816f3 + pristine_git_object: 09d934ed3db66ecbd5ab8e3406c3ffb8a1c3c606 + src/mistralai/models/conversationresponse.py: + id: 016ec02abd32 + last_write_checksum: sha1:37c3f143b83939b369fe8637932974d163da3c37 + pristine_git_object: ff318e35ee63e43c64e504301236327374442a16 + src/mistralai/models/conversationrestartrequest.py: + id: 2a8207f159f5 + last_write_checksum: sha1:8f53b5faba0b19d8fdf22388c72eb2580ee121f6 + pristine_git_object: a9c8410c7b1010780bf1d98b1580453aeef07509 + src/mistralai/models/conversationrestartstreamrequest.py: + id: d98d3e0c8eed + last_write_checksum: sha1:cba039d9276869be283d83218659f4bf7537b958 + pristine_git_object: 0703bb5fe6566ff15677e5f604537ab9ae2b79bd + src/mistralai/models/conversationstreamrequest.py: + id: f7051f125d44 + last_write_checksum: sha1:7ce5ab24500754f4c4f36fd07934fe992d7bbb2e + pristine_git_object: 6ff56e1786e7342284bac0fb4b669806cee55c0f + src/mistralai/models/conversationusageinfo.py: + id: 922894aa994b + last_write_checksum: sha1:0e0039421d7291ecbbf820ea843031c50371dd9e + pristine_git_object: 9ae6f4fb6a7b4fd056c677c2152625de422b490a + src/mistralai/models/delete_model_v1_models_model_id_deleteop.py: + id: 409899d6ca23 + last_write_checksum: sha1:2d1e5b8947b56abba06363358973032e196c8139 + pristine_git_object: 4acb8d5373f25d7200378d0b8a767451978aa5a9 + src/mistralai/models/deletefileout.py: + id: d51d0de32738 + last_write_checksum: sha1:da9e95bb804820dea4977f65f62c08e491d9bb4b + pristine_git_object: 2b346ec4879c8811f824c7e6bde9fef922f37382 + src/mistralai/models/deletemodelout.py: + id: 8dcf3427f17b + last_write_checksum: sha1:8243b0bcf735a67d4cffb254fe9de95f130a0d8a + pristine_git_object: c1b1effcbe3b093f7dede49684cf88aa0a9b27a7 + src/mistralai/models/deltamessage.py: + id: 43ee8a48546e + last_write_checksum: sha1:8bc50b7943d5ae4725eb57b7ca21a4c1217e4c0d + pristine_git_object: 88aefe7f652296c02377714586d38b8e318a419d + src/mistralai/models/documentlibrarytool.py: + id: 24c1c0293181 + last_write_checksum: sha1:7ec74875595149f433ee1b8a95d8183aa1cf8738 + pristine_git_object: 8d4c122b0412682a792c754a06e10809bfd8c25c + src/mistralai/models/documentout.py: + id: 205cb7721dfa + last_write_checksum: sha1:9316ed725bd9d7a2ef1f4e856f61def684442bd7 + pristine_git_object: 81d9605f38e40a703911fefc15731ec102c74ccb + src/mistralai/models/documenttextcontent.py: + id: 685680d8640b + last_write_checksum: sha1:dafce4998fa5964ac6833e71f7cb4f23455c14e6 + pristine_git_object: c02528c2052d535f7c815fb1165df451d49fef79 + src/mistralai/models/documentupdatein.py: + id: 6d69a91f40bd + last_write_checksum: sha1:dcbc51f1a1192bb99732405420e57fedb32dd1de + pristine_git_object: bd89ff4793e4fd78a4bae1c9f5aad716011ecbfd + src/mistralai/models/documenturlchunk.py: + id: 34a86f25f54f + last_write_checksum: sha1:1496b3d587fd2c5dc1c3f18de1ac59a29c324849 + pristine_git_object: 6d0b1dc6c9f6ebca8638e0c8991a9aa6df2b7e48 + src/mistralai/models/embeddingdtype.py: + id: bca8ae3779ed + last_write_checksum: sha1:962f629fa4ee8a36e731d33f8f730d5741a9e772 + pristine_git_object: 26eee779e12ae8114a90d3f18f99f3dd50e46b9e + src/mistralai/models/embeddingrequest.py: + id: ccb2b16068c8 + last_write_checksum: sha1:bf7877e386362d6187ffb284a1ceee1dea4cc5b7 + pristine_git_object: 44797bfad1b76ba809fab3791bffa2c78791e27b + src/mistralai/models/embeddingresponse.py: + id: c38279b9f663 + last_write_checksum: sha1:369740f705b08fede21edc04adf86505e55c9b76 + pristine_git_object: aae6fa60e131d4378bc631576b18f4d8a47f2770 + src/mistralai/models/embeddingresponsedata.py: + id: b73c5696eb71 + last_write_checksum: sha1:9709503bdde0a61603237fe6e84c410467e7e9f4 + pristine_git_object: 01e2765fb206b0ee36dfeb51cf3066613c74ac13 + src/mistralai/models/encodingformat.py: + id: 9f4fad7d5a9e + last_write_checksum: sha1:f9a3568cd008edb02f475a860e5849d9a40d0246 + pristine_git_object: be6c1a14e4680e24e70b8bbda018759056b784ca + src/mistralai/models/entitytype.py: + id: 4d056950d537 + last_write_checksum: sha1:7087fb7ad2886188380cd692997b2850c950a6b8 + pristine_git_object: 8d2d4bbe837da3e21988548e09710ab629d1aacd + src/mistralai/models/eventout.py: + id: 2601c7113273 + last_write_checksum: sha1:93ba178c3f6459dbc638e49c3eddcc188c7ff5d0 + pristine_git_object: 3281903429b154eb095a7c41b1751cfef97e497d + src/mistralai/models/file.py: + id: 7c1aa0c610c0 + last_write_checksum: sha1:3735ec925554b397e36fd2322062f555fbcde270 + pristine_git_object: 682d7f6e24b736dabd0566ab1b45b20dae5ea019 + src/mistralai/models/filechunk.py: + id: ea6a1ad435e8 + last_write_checksum: sha1:56d91860c1c91c40662313ea6f156db886bb55b6 + pristine_git_object: 83e60cef29045ced5ae48b68481bce3317690b8e + src/mistralai/models/filepurpose.py: + id: 3928b3171a09 + last_write_checksum: sha1:2ffb9fd99624b7b9997f826526045a9a956fde14 + pristine_git_object: b109b35017d5aa086ac964d78163f41e64277874 + src/mistralai/models/files_api_routes_delete_fileop.py: + id: fa02d4d126c7 + last_write_checksum: sha1:c96b106d6496087673f6d1b914e748c49ec13755 + pristine_git_object: a84a7a8eee4b6895bb2e835f82376126b3e423ec + src/mistralai/models/files_api_routes_download_fileop.py: + id: 1dc2e2823a00 + last_write_checksum: sha1:6001bcf871ab76635abcb3f081b029c8154a191e + pristine_git_object: 168a7fa6701578b77876fe0bddeb1003d06f33b7 + src/mistralai/models/files_api_routes_get_signed_urlop.py: + id: 628ed2f82ce4 + last_write_checksum: sha1:c970025b1e453ad67298d12611542abb46ded54d + pristine_git_object: 708d40ab993f93227b9795c745383ab954c1c89c + src/mistralai/models/files_api_routes_list_filesop.py: + id: 865dd74c577c + last_write_checksum: sha1:df0af95515546660ec9ff343c17f0b2dfe8b0375 + pristine_git_object: 9b9422b405ba967d7f6ed84196fe8e1dc9c5d95f + src/mistralai/models/files_api_routes_retrieve_fileop.py: + id: d821f72ee198 + last_write_checksum: sha1:d0d07123fd941bb99a00a36e87bc7ab4c21506a6 + pristine_git_object: 0c2a95ef590f179fe60a19340e34adb230dd8901 + src/mistralai/models/files_api_routes_upload_fileop.py: + id: ccca25a2fe91 + last_write_checksum: sha1:64b1d3c3fe9323d40096798760c546dc1c30a57d + pristine_git_object: aeefe842b327c89c0a78ba3d6e4a1ccb8d4a25fe + src/mistralai/models/fileschema.py: + id: 8a02ff440be5 + last_write_checksum: sha1:55120d1d9322e9381d92f33b23597f5ed0e20e4c + pristine_git_object: 9a88f1bbdf34ffb619794be9c041635ff333e489 + src/mistralai/models/filesignedurl.py: + id: 6fe55959eedd + last_write_checksum: sha1:afbe1cdfbdf2f760fc996a5065c70fa271a35885 + pristine_git_object: 092be7f8090272bdebfea6cbda7b87d9877d59e8 + src/mistralai/models/fimcompletionrequest.py: + id: a54284b7041a + last_write_checksum: sha1:7e477e032b3a48fe08610dd5dc50dee0948950e9 + pristine_git_object: 801a358b02441b7537f4bae64e93b4308c720040 + src/mistralai/models/fimcompletionresponse.py: + id: 15f25c04c5dd + last_write_checksum: sha1:b7787a7dc82b31ed851a52ae2f0828cc8746d61e + pristine_git_object: f27972b9e6e2f9dc7837be7278fda4910755f1f4 + src/mistralai/models/fimcompletionstreamrequest.py: + id: ba6b92828dc7 + last_write_checksum: sha1:a8f2c6cbd5a41ad85b7d0faced90d8f05b29f646 + pristine_git_object: 2e8e6db2a21a86ffd7cc61f92fed5c55f19e2e50 + src/mistralai/models/finetuneablemodeltype.py: + id: cbd439e85b18 + last_write_checksum: sha1:8694f016e8a4758308225b92b57bee162accf9d7 + pristine_git_object: f5b8b2ed45b56d25b387da44c398ae79f3a52c73 + src/mistralai/models/ftclassifierlossfunction.py: + id: 95255316968d + last_write_checksum: sha1:69e08ab728e095b8e3846ed2dc142aa1e82a864a + pristine_git_object: c4ef66e0fe69edace4912f2708f69a6e606c0654 + src/mistralai/models/ftmodelcapabilitiesout.py: + id: 1bc9230e1852 + last_write_checksum: sha1:c841f76ba219c82e3324b69ad8eba4abd522d0b9 + pristine_git_object: 7f3aa18b982c11fb6463e96333250b632dd195c8 + src/mistralai/models/ftmodelcard.py: + id: 4f25bcf18e86 + last_write_checksum: sha1:f1d80e6aa664e63b4a23a6365465d42415fc4bbb + pristine_git_object: 1c3bd04da0cc2bc86bec97d7890ad6594879b334 + src/mistralai/models/function.py: + id: 66b7b7ab8fc4 + last_write_checksum: sha1:5da05a98ca5a68c175bd212dd41127ef98013da6 + pristine_git_object: 7d40cf758ffbb3b6b4e62b50274829bd1c809a9c + src/mistralai/models/functioncall.py: + id: 5e03760bb753 + last_write_checksum: sha1:20d2a8196b6ccaffe490b188b1482a309b2dce79 + pristine_git_object: 0cce622a4835fcbd9425928b115a707848c65f54 + src/mistralai/models/functioncallentry.py: + id: 1d5c6cef6e92 + last_write_checksum: sha1:f357b1fde226c52c0dc2b105df66aeb6d17ab1bf + pristine_git_object: 4ea62c4ffc671b20d35cd967f3da0f1a34c92e2e + src/mistralai/models/functioncallentryarguments.py: + id: bd63a10181da + last_write_checksum: sha1:6beb9aca5bfc2719f357f47a5627c9edccef051f + pristine_git_object: ac9e6227647b28bfd135c35bd32ca792d8dd414b + src/mistralai/models/functioncallevent.py: + id: 868025c914c8 + last_write_checksum: sha1:4eb5b07218c9ab923cbe689e3de116d14281a422 + pristine_git_object: e3992cf173907a485ced9ec12323a680613e9e6a + src/mistralai/models/functionname.py: + id: 46a9b195fef5 + last_write_checksum: sha1:2219be87b06033dad9933b2f4efd99a4758179f1 + pristine_git_object: 0a6c0b1411b6f9194453c9fe22d52d035eb80c4f + src/mistralai/models/functionresultentry.py: + id: d617bbe28e36 + last_write_checksum: sha1:a781805577eb871b4595bae235c1d25e2e483fdc + pristine_git_object: 1c61395a82830dc689f2e011b9e6c86eba58cda3 + src/mistralai/models/functiontool.py: + id: e1b3d619ef0b + last_write_checksum: sha1:31e375a2222079e9e70459c55ff27a8b3add869d + pristine_git_object: 009fe28008a166d551566378e3c2730963aca591 + src/mistralai/models/githubrepositoryin.py: + id: e7f21180a768 + last_write_checksum: sha1:b4f630e15057e4ff8bfc5fb7ba2f0085a76c5f06 + pristine_git_object: b16ce0d2898b000f08e3d960a3411941a2324473 + src/mistralai/models/githubrepositoryout.py: + id: a3e494bbd813 + last_write_checksum: sha1:00a9bc4d6308cd960077fb639b1778723a71f583 + pristine_git_object: 372477c106a37b1b9d5cec02751c63fb08abcf53 + src/mistralai/models/httpvalidationerror.py: + id: 224ee4b3f0f0 + last_write_checksum: sha1:3f8d51b670993863fcd17421d1ace72e8621fd51 + pristine_git_object: d467577af04921f5d9bfa906ae6f4e06055a8785 + src/mistralai/models/imagegenerationtool.py: + id: 63bbe395acb2 + last_write_checksum: sha1:404e9cbabada212b87cc2e0b8799a18ff1cecf95 + pristine_git_object: a92335dbd2d0d03be5c2df4132df1cc26eaf38dd + src/mistralai/models/imageurl.py: + id: 20116779b5a0 + last_write_checksum: sha1:2d6090577370f5eb2e364029a11bb61bd86ef226 + pristine_git_object: 6f077b69019fbc598ddc402ba991c83f8a047632 + src/mistralai/models/imageurlchunk.py: + id: 0a6e87c96993 + last_write_checksum: sha1:0b7e4c0d5129698b1b01608eb59b27513f6a9818 + pristine_git_object: 8e8aac4238381527d9156fcb72288b28a82f9689 + src/mistralai/models/inputentries.py: + id: cbf378d5b92a + last_write_checksum: sha1:afc03830974af11516c0b997f1cd181218ee4fb0 + pristine_git_object: 8ae29837a6c090fbe1998562684d3a372a9bdc31 + src/mistralai/models/inputs.py: + id: a53031bc9cb6 + last_write_checksum: sha1:94290a72cb6cfa40813bc79a66a463978ae9ae1c + pristine_git_object: 34d20f3428a5d994c4a199c411dc8097b3c259d7 + src/mistralai/models/instructrequest.py: + id: d23d1da148c8 + last_write_checksum: sha1:2c4f4babc9944f90bc725bb0c460c8de85b3d75e + pristine_git_object: dddbda00a418629462e3df12a61a6b1c56c1d2bd + src/mistralai/models/jobin.py: + id: 42f6df34c72e + last_write_checksum: sha1:e5a78c9a2cd48fb1d7d062ec2f8d54f8d3ac493e + pristine_git_object: aa0cd06c704902919f672e263e969630df783ef6 + src/mistralai/models/jobmetadataout.py: + id: eaa2e54e2e2b + last_write_checksum: sha1:90afd144e2f9ec77c3be2694db1d96e4bc23fecb + pristine_git_object: 10ef781ebbba4c5eaab6f40f5d5f9f828944c983 + src/mistralai/models/jobs_api_routes_batch_cancel_batch_jobop.py: + id: 5d3a14d60da7 + last_write_checksum: sha1:4925f408587e91581c0181baf9acd1dcb5a50768 + pristine_git_object: 5b83d534d7efd25c0bc47406c79dfd59e22ec1d6 + src/mistralai/models/jobs_api_routes_batch_get_batch_jobop.py: + id: 74c718778882 + last_write_checksum: sha1:92a89c2d0384b2251636a61113310c84da0001bf + pristine_git_object: 9bfaf9c5230e4a1cc0187faeedc78ebcaaf38b98 + src/mistralai/models/jobs_api_routes_batch_get_batch_jobsop.py: + id: 072c77cfbaa5 + last_write_checksum: sha1:f890bc21fa71e33a930d48cdbf18fd503419406c + pristine_git_object: c48246d54c696bd85fbe67348d5eef1a2a1944db + src/mistralai/models/jobs_api_routes_fine_tuning_archive_fine_tuned_modelop.py: + id: db002a822be0 + last_write_checksum: sha1:3a1019f200193556df61cbe3786b03c2dbab431f + pristine_git_object: d728efd175f1df6b59b74d0b2fa602c0e0199897 + src/mistralai/models/jobs_api_routes_fine_tuning_cancel_fine_tuning_jobop.py: + id: ad69f51c764d + last_write_checksum: sha1:c84477987738a389ddf88546060263ecfb46506a + pristine_git_object: ceb19a69131958a2de6c3e678c40a1ca5d35fd73 + src/mistralai/models/jobs_api_routes_fine_tuning_create_fine_tuning_jobop.py: + id: a5c2c6e89b85 + last_write_checksum: sha1:dfb755d386e7c93540f42392f18efae7f61c4625 + pristine_git_object: 39af3ea6fab66941faf7718d616ff2a386e8219b + src/mistralai/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobop.py: + id: 221ec5d0482f + last_write_checksum: sha1:f2ce2c6a8924deda372d749ea2a09a2526b8da44 + pristine_git_object: be99dd2d329f5921513ba3ad6e5c5a9807d1a363 + src/mistralai/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobsop.py: + id: bd0fd94f34fc + last_write_checksum: sha1:48390cf76ffc1d712e33bd0bcece8dea956e75cb + pristine_git_object: 9aec8eb25c54e8fecedd9dd9e823ccf32c1a36b8 + src/mistralai/models/jobs_api_routes_fine_tuning_start_fine_tuning_jobop.py: + id: cba224459ae6 + last_write_checksum: sha1:238eeb9b7f48ff4e3262cc0cc5e55d96fe565073 + pristine_git_object: 8103b67b55eab0f9197cd9fb421e6ea4ca10e76e + src/mistralai/models/jobs_api_routes_fine_tuning_unarchive_fine_tuned_modelop.py: + id: ecc5a3420980 + last_write_checksum: sha1:8e026bc610fead1e55886c741f6b38817bb6b2ff + pristine_git_object: a84274ff5b2c45f2adc2c0234db090c498decc51 + src/mistralai/models/jobs_api_routes_fine_tuning_update_fine_tuned_modelop.py: + id: 3e8d8e70d526 + last_write_checksum: sha1:a5538fcb4248fd83749dc303f9585d7354ff8b92 + pristine_git_object: a10528ca0f7056ef82e0aeae8f4262c65e47791d + src/mistralai/models/jobsout.py: + id: bb1000b03e73 + last_write_checksum: sha1:d06d7b33e5630d45795efc2a8443ae3070866b07 + pristine_git_object: 680b1d582bc8fbce17a381be8364333dd87ce333 + src/mistralai/models/jsonschema.py: + id: 4bcf195c31bb + last_write_checksum: sha1:a0d2b72f809e321fc8abf740e57ec39a384c09d4 + pristine_git_object: e2b6a45e5e5e68b6f562dc39519ab12ffca50322 + src/mistralai/models/legacyjobmetadataout.py: + id: 172ade2efb26 + last_write_checksum: sha1:bf608218a88f7e59cd6c9d0958940b68a200ba0d + pristine_git_object: 499512197a9f9600ac9f7cee43f024dde67fd775 + src/mistralai/models/libraries_delete_v1op.py: + id: ef50051027ec + last_write_checksum: sha1:2a9632da75355679918714a68b96e3ddf88fa5d3 + pristine_git_object: 56f8f8a8706b7aac67cf9b156a2e8710a4fdef36 + src/mistralai/models/libraries_documents_delete_v1op.py: + id: e18557420efe + last_write_checksum: sha1:6904ea388795a0b5f523959c979cf9b3a2c3ef4e + pristine_git_object: c33710b0e29664594891055c36199ea4846516dc + src/mistralai/models/libraries_documents_get_extracted_text_signed_url_v1op.py: + id: c8df3283cb98 + last_write_checksum: sha1:fefde9e22a010f900bd9012a2d438f909d54815f + pristine_git_object: e2459c1c68c81eb67983ac76de23dd8609420291 + src/mistralai/models/libraries_documents_get_signed_url_v1op.py: + id: 279ac5d9f945 + last_write_checksum: sha1:8ee5b6386f98d2af619f070e83e1f3772c07e199 + pristine_git_object: bc913ba56bd98d9937ddd5516837b5a8ead10454 + src/mistralai/models/libraries_documents_get_status_v1op.py: + id: ded8f142264f + last_write_checksum: sha1:ac1f85ecb74ef43e6e831794badbbd57e99f7028 + pristine_git_object: 08992d7c9ee5ba85ef97971fa6e06af465e39fa9 + src/mistralai/models/libraries_documents_get_text_content_v1op.py: + id: 497b693d0ba6 + last_write_checksum: sha1:11eeb61bab8b745ba22f2087393ba0cf91b76180 + pristine_git_object: 21a131ad6448597a996f7d96723f6bc8cf12ddf0 + src/mistralai/models/libraries_documents_get_v1op.py: + id: 7b1e6957ca40 + last_write_checksum: sha1:a3e3d1dee18ee2900417db836b1f8b49a14e0501 + pristine_git_object: ff2bdedbcaa8cf4c8e31091ed529274bf5d3ec04 + src/mistralai/models/libraries_documents_list_v1op.py: + id: d5cc573ae1a0 + last_write_checksum: sha1:43b6af0f23ff88d6e13f48acf12baa01a03eb243 + pristine_git_object: e6ff29cf4edb7b269cd66c5299b7531b13973dd2 + src/mistralai/models/libraries_documents_reprocess_v1op.py: + id: 3e832394e71b + last_write_checksum: sha1:36ced698b57573338eb95f5d70983ba4b9dcb0e0 + pristine_git_object: 861993e7e0fd06576e878758a44029613d381a4c + src/mistralai/models/libraries_documents_update_v1op.py: + id: 902a2c649e04 + last_write_checksum: sha1:c8ba64250a66dbdd9ac409ffeccb6bb75ba619c2 + pristine_git_object: 5551d5eec7961a5cc0fa9018ba680304e1f99d57 + src/mistralai/models/libraries_documents_upload_v1op.py: + id: a4586d35c41c + last_write_checksum: sha1:83c40a6b1a790d292c72c90847926d458ea73d83 + pristine_git_object: 51f536cca6141b0243d3c3fff8da3224a0c51ea5 + src/mistralai/models/libraries_get_v1op.py: + id: ed8ae2dc35b4 + last_write_checksum: sha1:c9dc682319790ec77c3827b44e3e8937de0de17f + pristine_git_object: b87090f6bb56c7f7d019483c0e979f9f2fdc3378 + src/mistralai/models/libraries_share_create_v1op.py: + id: 6a5d94d8a3dc + last_write_checksum: sha1:312ec2ea1635e86da293a0f402498031591c9854 + pristine_git_object: a8b0e35db9a452a62dbc0893009a9708684d2a23 + src/mistralai/models/libraries_share_delete_v1op.py: + id: 474f847642a7 + last_write_checksum: sha1:557000669df73a160d83bcaaf456579890fa7f92 + pristine_git_object: e29d556a73a87a6f799948f05517a50545dfd79e + src/mistralai/models/libraries_share_list_v1op.py: + id: 5ccdc4491119 + last_write_checksum: sha1:c3ca37074f14aad02a9d01099fe7134204d5520e + pristine_git_object: b276d756e95e9e7dc53cd7ff5da857052c055046 + src/mistralai/models/libraries_update_v1op.py: + id: 6de043d02383 + last_write_checksum: sha1:0936d1273af7659d7283c1defc2094178bc58003 + pristine_git_object: c93895d97f165d4fa4cc33097f6b772b55337623 + src/mistralai/models/libraryin.py: + id: 0277ef6b7a58 + last_write_checksum: sha1:56e033aef199fd831da7efff829c266206134f99 + pristine_git_object: 872d494d66abde55130a6d2a6c30de950f51232c + src/mistralai/models/libraryinupdate.py: + id: 96904d836434 + last_write_checksum: sha1:50c13a51aee5fc6c562090dad803ca6b3a1a5bed + pristine_git_object: 6e8ab81acae479e5fb999c91bfc55f6e1cbee5cc + src/mistralai/models/libraryout.py: + id: e483109c6e21 + last_write_checksum: sha1:6394431205bd4c308de4ee600e839ac0c6624fc0 + pristine_git_object: d3bc36f94735fbabb23d6c19ff481e404227f548 + src/mistralai/models/listdocumentout.py: + id: 872891f10a41 + last_write_checksum: sha1:61f444f7318e20921ddda1efd1e63e9bbec1d93d + pristine_git_object: 9d39e0873f463cce5fca723a3c85f47cf0f6ddeb + src/mistralai/models/listfilesout.py: + id: 43a961a42ca8 + last_write_checksum: sha1:d3e0d056a8337adaffced63e2ed5b4b37a60927d + pristine_git_object: 2f82b37db7f3cb69d68ab097f9f75488939f66c8 + src/mistralai/models/listlibraryout.py: + id: dcd1a940efe5 + last_write_checksum: sha1:7dc2876bf50861c8e94079859725cadf2d7b14c4 + pristine_git_object: 1e647fe1db65421d73ba6e0f35cc580e99ea7212 + src/mistralai/models/listsharingout.py: + id: c04e23806a57 + last_write_checksum: sha1:efd9e780445bdcf4a4e7794cd1aedaa85067f904 + pristine_git_object: 38c0dbe0ab9aeb3c977e38f2bf95d84297456980 + src/mistralai/models/messageentries.py: + id: 2e456a2494da + last_write_checksum: sha1:7c2503f4a1be5e5dde5640dd7413fed06aee09b4 + pristine_git_object: 9b1706dee5fbb5cae18674708a1571b187bf0576 + src/mistralai/models/messageinputcontentchunks.py: + id: 344669e96a85 + last_write_checksum: sha1:740e82b72d5472f0cc967e745c07393e2df8ae38 + pristine_git_object: e90d8aa0317e553bfc0cceb4a356cf9994ecfb60 + src/mistralai/models/messageinputentry.py: + id: 2e0500be6230 + last_write_checksum: sha1:118ffb7715993d7c103be5d26894ce33d8437f8a + pristine_git_object: edf05631be8d89002fd3a3bfb3034a143b12ed21 + src/mistralai/models/messageoutputcontentchunks.py: + id: e8bb72ef0c0f + last_write_checksum: sha1:f239151ae206f6e82ee3096d357ff33cf9a08138 + pristine_git_object: 136a7608e7e2a612d48271a7c257e2bb383584f3 + src/mistralai/models/messageoutputentry.py: + id: 0113bf848952 + last_write_checksum: sha1:3a1569ef7b3efadb87418d3ed38a6df0710cca1b + pristine_git_object: 0e2df81e3e75841d31bafd200697e9fd236b6fbe + src/mistralai/models/messageoutputevent.py: + id: d194af351767 + last_write_checksum: sha1:b9c4bf8db3d22d6b01d79044258729b5daafc050 + pristine_git_object: 751767a31666e839ec35d722707d97db605be25f + src/mistralai/models/metricout.py: + id: "369168426763" + last_write_checksum: sha1:d245a65254d0a142a154ee0f453cd7b64677e666 + pristine_git_object: 930b5c2181d4c5c5d89474b66fc1a4eef7ca7865 + src/mistralai/models/mistralerror.py: + id: 89288c78040b + last_write_checksum: sha1:07fce1e971a25d95ffa8c8f3624d62cdf96e353a + pristine_git_object: 28cfd22dc3d567aa4ae55cc19ad89341fa9c96a1 + src/mistralai/models/mistralpromptmode.py: + id: b2580604c1fe + last_write_checksum: sha1:1ac4d9fb8fbf0b21958be5483a569da7f1f49ff0 + pristine_git_object: ee82fb6d056e2d9699628698750e68b4ab6ef851 + src/mistralai/models/modelcapabilities.py: + id: a9589b97b15c + last_write_checksum: sha1:d7a7d530750418a54a5fc1698d855df7a519a45c + pristine_git_object: 4b5d5da7da9573f998e977e8a14a9b8f8cbf4f55 + src/mistralai/models/modelconversation.py: + id: 7d8b7b8d62a8 + last_write_checksum: sha1:b76cc407f807c19c1ff5602f7dd1d0421db2486d + pristine_git_object: 8eca4f973cd20e8bcb70a519f8dc3749878f04a2 + src/mistralai/models/modellist.py: + id: 22085995d513 + last_write_checksum: sha1:f753c11b430f8dd4daffb60bef467c6fa20f5e52 + pristine_git_object: 394cb3fa66a8881b013f78f1c8ee5440c9933427 + src/mistralai/models/moderationobject.py: + id: de835c5cd36e + last_write_checksum: sha1:24befa2934888192a12d9954749b8e591eb22582 + pristine_git_object: 5eff2d2a100c96eb7491ca99716fc9523fb74643 + src/mistralai/models/moderationresponse.py: + id: 831711e73705 + last_write_checksum: sha1:a96af206b8cd7c161c77cde0d3720880f20cf7f8 + pristine_git_object: ed13cd6bc226e8e505ef248760374c795705440f + src/mistralai/models/no_response_error.py: + id: 3102fe819ad6 + last_write_checksum: sha1:7f326424a7d5ae1bcd5c89a0d6b3dbda9138942f + pristine_git_object: 1deab64bc43e1e65bf3c412d326a4032ce342366 + src/mistralai/models/ocrimageobject.py: + id: 44523566cf03 + last_write_checksum: sha1:75bb3b2eec938bd59052ea85244130770d787cbf + pristine_git_object: cec0acf4104ba7153270a1130ac2ac58a171b147 + src/mistralai/models/ocrpagedimensions.py: + id: 0d8589f80c1a + last_write_checksum: sha1:d62f216c61756592e6cde4a5d72b68eedeaddcc5 + pristine_git_object: d1aeb54d869545aec3ecaad1240f1be2059280f1 + src/mistralai/models/ocrpageobject.py: + id: 2dfef21e786f + last_write_checksum: sha1:667013bdfafb5ed0867fa9cd350455f66fee3e90 + pristine_git_object: 737defbaea323e0f3ccd95c2a721f57acc9f43a0 + src/mistralai/models/ocrrequest.py: + id: 7dbc4bb7cafb + last_write_checksum: sha1:b8a5efbd582bdf9e188d4777b319d2b16e0caf3d + pristine_git_object: 8bd133706746435af17898ee1afe78d94f2d1948 + src/mistralai/models/ocrresponse.py: + id: a187e70d8c2e + last_write_checksum: sha1:0c09aee803a5e1a3ba7c7f5d0ce46e96ee3339ca + pristine_git_object: 7b65bee7e6c0fffc7019f7843dcf88c0b5fade4e + src/mistralai/models/ocrtableobject.py: + id: 1be0c3cc027f + last_write_checksum: sha1:804d15ad21276f47f5ea9beccab9e471840ac32e + pristine_git_object: 5f30ab5e15dabf6a96498f46cf6178dca7fdb906 + src/mistralai/models/ocrusageinfo.py: + id: 91ab3d4cd57a + last_write_checksum: sha1:018eaf85ebffbb3392ed3c6688a41882a0893015 + pristine_git_object: 36c9f826cc64f67b254bdd07b00ad77857a91e1c + src/mistralai/models/outputcontentchunks.py: + id: 25ae74f4c9b8 + last_write_checksum: sha1:47d74d212ebcb68ff75a547b3221f2aee3e07bfe + pristine_git_object: ad0c087e0dcbe302dd9c73d1ea03e109e9a66ef7 + src/mistralai/models/paginationinfo.py: + id: 7e6919dfd6b1 + last_write_checksum: sha1:5ae05b383e9381862b8a980d83e73765b726294d + pristine_git_object: 00d4f1ec906e8485fdcb3e4b16a0b01acfa2be4b + src/mistralai/models/prediction.py: + id: ad77ec075e6d + last_write_checksum: sha1:d359ab3a37229212459228329219a1ec26a0381d + pristine_git_object: 582d87896b477de867cadf5e85d58ee71c445df3 + src/mistralai/models/processingstatusout.py: + id: 54d1c125ef83 + last_write_checksum: sha1:475749250ada2566c5a5d769eda1d350ddd8be8f + pristine_git_object: e67bfa865dcf94656a67f8612a5420f8b43cc0ec + src/mistralai/models/referencechunk.py: + id: 6cdbb4e60749 + last_write_checksum: sha1:48a4dddda06aadd16f6ea34c58848430bd561432 + pristine_git_object: 1864ac794d4e637556003cbb2bf91c10832d90f9 + src/mistralai/models/requestsource.py: + id: 1836766b9e81 + last_write_checksum: sha1:31aae791bf737ad123fe189227d113838204ed42 + pristine_git_object: 7b0a35c44050b6fca868479e261805a77f33e230 + src/mistralai/models/responsedoneevent.py: + id: 6300eaecde3c + last_write_checksum: sha1:693d832a480e943ff9c3e4f6822bea8358750ee1 + pristine_git_object: 5a3a3dfb8630713a618cc23f97660840e4fbbeca + src/mistralai/models/responseerrorevent.py: + id: 88185105876c + last_write_checksum: sha1:5adfc1acdba4035f1a646a7678dd09e16d05e747 + pristine_git_object: 6cb1b26885ad9ded4f75f226b0ce713206cb0a49 + src/mistralai/models/responseformat.py: + id: 6d5e093fdba8 + last_write_checksum: sha1:4c4a801671419f403263caafbd90dbae6e2203da + pristine_git_object: 92284017b5b895673e510a739bc5c5ed104de4af + src/mistralai/models/responseformats.py: + id: e5fccecf2b70 + last_write_checksum: sha1:a212e85d286b5b49219f57d071a2232ff8b5263b + pristine_git_object: cbf83ce7b54ff8634f741334831807bfb5c98991 + src/mistralai/models/responsestartedevent.py: + id: 37fbb3e37d75 + last_write_checksum: sha1:1d1eb4b486b2b92d167367d6525a8ea709d00c15 + pristine_git_object: d14d45ef8aa0d4e6dfa5893c52ae292f1f9a5780 + src/mistralai/models/responsevalidationerror.py: + id: 4b46e43f015b + last_write_checksum: sha1:c90231f7d7d3e93d6a36972ec4bead76fcb9ac47 + pristine_git_object: ed30165511c209289a030c5e9d9af1d2ad93d77c + src/mistralai/models/retrieve_model_v1_models_model_id_getop.py: + id: 81db6b688ded + last_write_checksum: sha1:8a7f0585855118e73fcd8f7213757172ac94c6fc + pristine_git_object: bfe62474610239f6e1ac0b5a4dc4b6ee9d321bd6 + src/mistralai/models/retrievefileout.py: + id: 5cf73a0007f0 + last_write_checksum: sha1:04abbd25f8757b7d9763a2c0aaca561a78960fbd + pristine_git_object: 94540083c22b330dc48428e0d80f1cf2292b93ab + src/mistralai/models/sampletype.py: + id: d1558bd8d355 + last_write_checksum: sha1:fbfdf1616eb6b64d785c11f11a33fca794de19eb + pristine_git_object: efb43e9be278aa00cda9828c5c8cb3edabc68d0f + src/mistralai/models/sdkerror.py: + id: d3c914c3c63a + last_write_checksum: sha1:6d6dafaf73210b86ef2fea441e2e864752242737 + pristine_git_object: 65c45cf1c2cb4047e3cce21538890e5f62136f0f + src/mistralai/models/security.py: + id: 88dd24d389d4 + last_write_checksum: sha1:3d460b276d68380a64d8d91947981ce27d92e552 + pristine_git_object: cf05ba8fbce8d7b9199396c41ccd4c218d71998b + src/mistralai/models/shareenum.py: + id: 371f676fce97 + last_write_checksum: sha1:9061b04c7b26435911ea18b095d76400e1ab1698 + pristine_git_object: 634ba4b7e800e134f209fa851391b1a49cd6fc97 + src/mistralai/models/sharingdelete.py: + id: 334b4a8820ae + last_write_checksum: sha1:e21d1a3cd972b02beecd3a2d3ed3ebf70ea9c414 + pristine_git_object: ebcdbab517d524cf4f2056fb253acb713e042d58 + src/mistralai/models/sharingin.py: + id: b762157651b7 + last_write_checksum: sha1:479261e2c4ad827b878b66afa5dfaec49df4573a + pristine_git_object: f7bb89ca1b670cfa9d66b3135e762e04ba6454a4 + src/mistralai/models/sharingout.py: + id: "198686162036" + last_write_checksum: sha1:ae269a353d6733ac81ab6a4f3ea3368eef2a99ec + pristine_git_object: 12455818a5c1f44538696015bee079bce9567cdc + src/mistralai/models/source.py: + id: 6f2e7cd2285e + last_write_checksum: sha1:b0fe76d6566e4573317ad4c862ddc11423a8bde7 + pristine_git_object: cc3abce298c4b817081610238e489d4023ca6f3f + src/mistralai/models/ssetypes.py: + id: 7817469fd731 + last_write_checksum: sha1:1901bf6feee92ac100113e0a98dc0abe6e769375 + pristine_git_object: 796f0327cbb1372c1b2a817a7db39f8f185a59be + src/mistralai/models/systemmessage.py: + id: 0f0c7d12c400 + last_write_checksum: sha1:6886cc2f9603aabf75289ccc895e23ad45e65dc7 + pristine_git_object: 2b34607b39a1a99d6569985818a89d9e973f3cdd + src/mistralai/models/systemmessagecontentchunks.py: + id: 5a051e10f9df + last_write_checksum: sha1:bef0630a287d9000595a26049290b978c0816ddc + pristine_git_object: a1f04d1e5802521d4913b9ec1978c3b9d77ac38f + src/mistralai/models/textchunk.py: + id: 7dee31ce6ec3 + last_write_checksum: sha1:5ae5f498eaf03aa99354509c7558de42f7933c0c + pristine_git_object: 6052686ee52d3713ddce08f22c042bab2569f4da + src/mistralai/models/thinkchunk.py: + id: 8d0ee5d8ba9c + last_write_checksum: sha1:34f0cc91e66cb0ad46331b4e0385534d13b9ee1c + pristine_git_object: 627ae4883698696774b7a285a73326a4509c6828 + src/mistralai/models/timestampgranularity.py: + id: e0cb6c4efa2a + last_write_checksum: sha1:2b554048013632407c391444d972e29362751468 + pristine_git_object: 02816df67dd326a17d27dc815c49c6e1172693b8 + src/mistralai/models/tool.py: + id: c0a9b60b6cf1 + last_write_checksum: sha1:805030012b6cf4d6159c1515b44e1c999ea2349a + pristine_git_object: b14a6adf2a804153e071c28b7e225594278b7443 + src/mistralai/models/toolcall.py: + id: 08f53b1090d7 + last_write_checksum: sha1:3b876a5d90066ebc4a337e7ba90b0607d9028c9e + pristine_git_object: 1f36792484f22af884a2b651442dbf1086e36f53 + src/mistralai/models/toolchoice.py: + id: de7498a868da + last_write_checksum: sha1:ec3178ff2a398b569ea6161e37006a349b75e94f + pristine_git_object: f8e1b48621527ca86f07efd4500089d339ddeb6a + src/mistralai/models/toolchoiceenum.py: + id: 580f382c7857 + last_write_checksum: sha1:3dbba9a58c5569aafe115f3f7713a52b01ad8620 + pristine_git_object: 01f6f677b379f9e3c99db9d1ad248cb0033a2804 + src/mistralai/models/toolexecutiondeltaevent.py: + id: 674ab6adad2e + last_write_checksum: sha1:002e73c21df7e785268d77bad00b7967a514ede7 + pristine_git_object: 4fca46a80810a9976a0de70fef9e895be82fa921 + src/mistralai/models/toolexecutiondoneevent.py: + id: 86a2329a500d + last_write_checksum: sha1:00174f618358d49546ff8725a6dc3a9aebe5926c + pristine_git_object: 621d55718957c766c796f6f98814ed917ccbaadc + src/mistralai/models/toolexecutionentry.py: + id: 41e2484af138 + last_write_checksum: sha1:c05c9f72cf939d4da334489be57e952b2fbd68f9 + pristine_git_object: 9f70a63b720b120283adc1292188f1f0dd8086a1 + src/mistralai/models/toolexecutionstartedevent.py: + id: 0987fdd1cd45 + last_write_checksum: sha1:beab5d913fb60fc98ec81dffb4636143e23286ec + pristine_git_object: 80dd5e97084cdedcdb2752491a61d8b2aadb091a + src/mistralai/models/toolfilechunk.py: + id: 275d194f5a7b + last_write_checksum: sha1:0ecb2b0ef96d57084c19f43553fdfafdf209ec16 + pristine_git_object: 87bc822c091f1b0c1896f0da16764e225e3f324c + src/mistralai/models/toolmessage.py: + id: dff99c41aecf + last_write_checksum: sha1:19fbda605416fcc20f842b6d3067f64de2691246 + pristine_git_object: ef917c4369a7459e70f04da2c20ed62b9316d9bc + src/mistralai/models/toolreferencechunk.py: + id: 5e3482e21a7e + last_write_checksum: sha1:21038657452d30fd80b5204451b7b7bfbbce6cf6 + pristine_git_object: 2a751cb08f1442ca5f91ab0b688db822c6f72dd7 + src/mistralai/models/tooltypes.py: + id: c4ef111ec45b + last_write_checksum: sha1:f9cd152556d95e9e197ac0c10f65303789e28bcb + pristine_git_object: f54893c259518313218d9ee307669c291a8c0cf8 + src/mistralai/models/trainingfile.py: + id: 150e9031690e + last_write_checksum: sha1:f20266317087b92eb74ed8cd48e7477666faf9a8 + pristine_git_object: 99bd49dd760960558be40adf138f9b4b95ee62d9 + src/mistralai/models/transcriptionresponse.py: + id: b50f2e392e31 + last_write_checksum: sha1:79d57bf44dbad0f364ac57ad967642271b7a7526 + pristine_git_object: 54a98a5ba7b83a6b7f6a39046b400a61e9889898 + src/mistralai/models/transcriptionsegmentchunk.py: + id: ccd6d5675b49 + last_write_checksum: sha1:367abd8a8182d9db9f2b19540aed2b974ad7bbe2 + pristine_git_object: aa30f053a624b25c7fd1739c05f406a81873ff60 + src/mistralai/models/transcriptionstreamdone.py: + id: 42177659bf0f + last_write_checksum: sha1:5fda2b766b2af41749006835e45c95f708eddb28 + pristine_git_object: e1b1ab3d6f257786a5180f6876f47d47414e7e72 + src/mistralai/models/transcriptionstreamevents.py: + id: 9593874b7574 + last_write_checksum: sha1:ace344cfbec0af2ad43b0b61ae444e34f9e9da99 + pristine_git_object: 8207c03fef9d76ca7405b85d93c2f462eae22329 + src/mistralai/models/transcriptionstreameventtypes.py: + id: e2e35365ad39 + last_write_checksum: sha1:38b7334aebf400e1abb2b20b0f2890880f0fc2f7 + pristine_git_object: 4a910f0abca2912746cac60fd5a16bd5464f2457 + src/mistralai/models/transcriptionstreamlanguage.py: + id: 635759ec85f3 + last_write_checksum: sha1:93e389c2c8b41e378cfe7f88f05d8312236024e6 + pristine_git_object: 15b7514415e536bb04fd1a69ccea20615b5b1fcf + src/mistralai/models/transcriptionstreamsegmentdelta.py: + id: 83d02b065099 + last_write_checksum: sha1:1f48714d450fff004f9cf24b81749848240fe722 + pristine_git_object: d779ed837913c8c13a4599a06a2ed75afa827a48 + src/mistralai/models/transcriptionstreamtextdelta.py: + id: ce0861d8affd + last_write_checksum: sha1:84a3b6c6d84a896e59e2874de59d812d3db657a5 + pristine_git_object: daee151f4ceaaee6c224b6dd078b4dfb680495b3 + src/mistralai/models/unarchiveftmodelout.py: + id: d758d3dee216 + last_write_checksum: sha1:b60e3292d2c4e6bf1456649184eaef4c75732cfc + pristine_git_object: 55c0ea8aa841ecef08f64020f099353efbdbcf7d + src/mistralai/models/updateftmodelin.py: + id: dbf79e18efd0 + last_write_checksum: sha1:aab40882f622a32054d73e33ca2be279bb880080 + pristine_git_object: 1bd0eaf2eb9b3427da6f4581b36d4316c0d129bf + src/mistralai/models/uploadfileout.py: + id: 1fa81af96888 + last_write_checksum: sha1:ebd3800e23e32b7f95665393db9a8e955c2912ea + pristine_git_object: f235fdcdf23d39d408d20a43597652f8daf677b0 + src/mistralai/models/usageinfo.py: + id: 62e303fb96aa + last_write_checksum: sha1:7f81b8c11fb5076e03a9fa40865382c9b45b700e + pristine_git_object: cedad5c12a96418567294e91812bfd96dce875bf + src/mistralai/models/usermessage.py: + id: dd10edab3b81 + last_write_checksum: sha1:a22b667ed90d8e34923d36422ef7ea6ae83d2dd7 + pristine_git_object: 61590bed06e1a397a1166a04a0b2405b833d19ff + src/mistralai/models/validationerror.py: + id: 0c6798c22859 + last_write_checksum: sha1:be4e31bc68c0eed17cd16679064760ac1f035d7b + pristine_git_object: e971e016d64237f24d86c171222f66575152fd1f + src/mistralai/models/wandbintegration.py: + id: a2f0944d8dbd + last_write_checksum: sha1:43a3c6f8d77cde042cfa129954f48c419d3fe1b9 + pristine_git_object: 690538963550d6adaf291fab8344f317c3c9080e + src/mistralai/models/wandbintegrationout.py: + id: bfae63e4ff4c + last_write_checksum: sha1:843e286ce58f072f27e8cb67b4c4f35001ffe0f0 + pristine_git_object: f5a9ba802b489f595bfc2578b9f3456b5230bdb3 + src/mistralai/models/websearchpremiumtool.py: + id: "710695472090" + last_write_checksum: sha1:85a562f976a03e9a3a659018caa78d2e26caeef9 + pristine_git_object: 3bbe753acb99f74f8eb7aa63a387f35714b0a259 + src/mistralai/models/websearchtool.py: + id: d8f773002c11 + last_write_checksum: sha1:1e48212c4cc43bf937a3d21837878a1722666a30 + pristine_git_object: eeafecb4847e66075b64dc34512aaca7a045900b + src/mistralai/models_.py: + id: dfcd71fd4c33 + last_write_checksum: sha1:076e72b91c364f1a4905092b02e2ad7ebf7765c6 + pristine_git_object: d44930a0db06117ba538424273935016a133e0ae + src/mistralai/ocr.py: + id: e23da68c9ae8 + last_write_checksum: sha1:9de69bb9928130acbe099d2cd833dc09fbfccee5 + pristine_git_object: 8c2e478b26fbaabe56f645c77dcb294fe3a953c1 + src/mistralai/py.typed: + id: 3923b7c50c56 + last_write_checksum: sha1:8efc425ffe830805ffcc0f3055871bdcdc542c60 + pristine_git_object: 3e38f1a929f7d6b1d6de74604aa87e3d8f010544 + src/mistralai/sdk.py: + id: b2a76476b492 + last_write_checksum: sha1:f0ce70fdd61fc69a6afb59a46b42719c14e429d8 + pristine_git_object: c83b53e0445788e27d0e451030807f1c6b86560b + src/mistralai/sdkconfiguration.py: + id: e6e7f1fb8b52 + last_write_checksum: sha1:63a0ae64777a9d39debeb6ef36ac6d71dadc6d80 + pristine_git_object: 7e77925ddc9aa01c4e5f8ba2ca52aa7f32e89859 + src/mistralai/transcriptions.py: + id: ba6b040274f2 + last_write_checksum: sha1:079bcd1c4a6b1d74e97cc6d77bccf4eea1232cd7 + pristine_git_object: bdbeb1ccbb938c825e5c3371a0f761a90a6e17b8 + src/mistralai/types/__init__.py: + id: b89b8375c971 + last_write_checksum: sha1:140ebdd01a46f92ffc710c52c958c4eba3cf68ed + pristine_git_object: fc76fe0c5505e29859b5d2bb707d48fd27661b8c + src/mistralai/types/basemodel.py: + id: 18149749a011 + last_write_checksum: sha1:10d84aedeb9d35edfdadf2c3020caa1d24d8b584 + pristine_git_object: a9a640a1a7048736383f96c67c6290c86bf536ee + src/mistralai/utils/__init__.py: + id: 6f6ad3db2456 + last_write_checksum: sha1:c7c1ee47be7ac3774b042c8aee439143493ed3ce + pristine_git_object: f9c2edce8ecf2d2a4ab0ad36129ac70afd3d1f2f + src/mistralai/utils/annotations.py: + id: 76966ef1943a + last_write_checksum: sha1:a4824ad65f730303e4e1e3ec1febf87b4eb46dbc + pristine_git_object: 12e0aa4f1151bb52474cc02e88397329b90703f6 + src/mistralai/utils/datetimes.py: + id: a0aa72e39d40 + last_write_checksum: sha1:c721e4123000e7dc61ec52b28a739439d9e17341 + pristine_git_object: a6c52cd61bbe2d459046c940ce5e8c469f2f0664 + src/mistralai/utils/enums.py: + id: 400af6d98484 + last_write_checksum: sha1:bc8c3c1285ae09ba8a094ee5c3d9c7f41fa1284d + pristine_git_object: 3324e1bc2668c54c4d5f5a1a845675319757a828 + src/mistralai/utils/eventstreaming.py: + id: 7b58f8ceb28e + last_write_checksum: sha1:bababae5d54b7efc360db701daa49e18a92c2f3b + pristine_git_object: 0969899bfc491e5e408d05643525f347ea95e4fc + src/mistralai/utils/forms.py: + id: a584268d234f + last_write_checksum: sha1:15fa7e9ab1611e062a9984cf06cb20969713d295 + pristine_git_object: f961e76beaf0a8b1fe0dda44754a74eebd3608e7 + src/mistralai/utils/headers.py: + id: 3b4141506f5a + last_write_checksum: sha1:7c6df233ee006332b566a8afa9ce9a245941d935 + pristine_git_object: 37864cbbbc40d1a47112bbfdd3ba79568fc8818a + src/mistralai/utils/logger.py: + id: e35e15a1b67e + last_write_checksum: sha1:23efbe8d8d3b9412877f3cd35b37477d0e460a2f + pristine_git_object: cc08930715f6f03a559a2f30c3a9482071a3e1e2 + src/mistralai/utils/metadata.py: + id: 617f23c58d0d + last_write_checksum: sha1:c6a560bd0c63ab158582f34dadb69433ea73b3d4 + pristine_git_object: 173b3e5ce658675c2f504222a56b3daaaa68107d + src/mistralai/utils/queryparams.py: + id: 6d86b06d25db + last_write_checksum: sha1:b94c3f314fd3da0d1d215afc2731f48748e2aa59 + pristine_git_object: c04e0db82b68eca041f2cb2614d748fbac80fd41 + src/mistralai/utils/requestbodies.py: + id: 09529564c402 + last_write_checksum: sha1:41e2d2d2d3ecc394c8122ca4d4b85e1c3e03f054 + pristine_git_object: 1de32b6d26f46590232f398fdba6ce0072f1659c + src/mistralai/utils/retries.py: + id: 3c8dad479e7d + last_write_checksum: sha1:5b97ac4f59357d70c2529975d50364c88bcad607 + pristine_git_object: 88a91b10cd2076b4a2c6cff2ac6bfaa5e3c5ad13 + src/mistralai/utils/security.py: + id: e8a6622acc38 + last_write_checksum: sha1:d86d2fd73cbb4a77f72395c10fff1c700efcf42e + pristine_git_object: 3b8526bfdf5c9c871ed184a2ec785f7bc1ebe57e + src/mistralai/utils/serializers.py: + id: e3688f9815db + last_write_checksum: sha1:ce1d8d7f500a9ccba0aeca5057cee9c271f4dfd7 + pristine_git_object: 14321eb479de81d0d9580ec8291e0ff91bf29e57 + src/mistralai/utils/unmarshal_json_response.py: + id: 3bc4add4e1b6 + last_write_checksum: sha1:0b7b57b8a97ff6bfbb4dea22d59b8aade9a487f2 + pristine_git_object: 64d0b3a6c59921ac0a5fb05d52ba47d0b696ae0e + src/mistralai/utils/url.py: + id: 8aa618817e83 + last_write_checksum: sha1:6479961baa90432ca25626f8e40a7bbc32e73b41 + pristine_git_object: c78ccbae426ce6d385709d97ce0b1c2813ea2418 + src/mistralai/utils/values.py: + id: 3b1394457cf4 + last_write_checksum: sha1:acaa178a7c41ddd000f58cc691e4632d925b2553 + pristine_git_object: dae01a44384ac3bc13ae07453a053bf6c898ebe3 examples: list_models_v1_models_get: speakeasy-default-list-models-v1-models-get: @@ -1538,4 +3742,738 @@ examples: application/json: {} examplesVersion: 1.0.2 generatedTests: {} -releaseNotes: "## SDK Changes Detected:\n* `mistral.beta.conversations.restart()`: \n * `request.inputs.[array].[]` **Changed** **Breaking** :warning:\n * `response.outputs.[].[message_output_entry].content.[array].[]` **Changed** **Breaking** :warning:\n* `mistral.beta.conversations.start()`: \n * `request.inputs.[array].[]` **Changed** **Breaking** :warning:\n * `response.outputs.[].[message_output_entry].content.[array].[]` **Changed** **Breaking** :warning:\n* `mistral.beta.conversations.append()`: \n * `request.inputs.[array].[]` **Changed** **Breaking** :warning:\n * `response.outputs.[].[message_output_entry].content.[array].[]` **Changed** **Breaking** :warning:\n* `mistral.beta.conversations.get_history()`: `response.entries.[]` **Changed** **Breaking** :warning:\n* `mistral.beta.conversations.get_messages()`: `response.messages.[]` **Changed** **Breaking** :warning:\n* `mistral.beta.agents.list()`: `request.metadata` **Changed**\n* `mistral.beta.conversations.list()`: `request.metadata` **Changed**\n* `mistral.batch.jobs.list()`: `response.data.[].outputs` **Added**\n* `mistral.batch.jobs.create()`: \n * `request` **Changed**\n * `response.outputs` **Added**\n* `mistral.batch.jobs.get()`: \n * `request.inline` **Added**\n * `response.outputs` **Added**\n* `mistral.batch.jobs.cancel()`: `response.outputs` **Added**\n* `mistral.embeddings.create()`: `request.metadata` **Added**\n* `mistral.classifiers.moderate()`: `request.metadata` **Added**\n* `mistral.classifiers.classify()`: `request.metadata` **Added**\n" +generatedFiles: + - .gitattributes + - .vscode/settings.json + - USAGE.md + - docs/models/agent.md + - docs/models/agentconversation.md + - docs/models/agentconversationobject.md + - docs/models/agentcreationrequest.md + - docs/models/agentcreationrequesttools.md + - docs/models/agenthandoffdoneevent.md + - docs/models/agenthandoffdoneeventtype.md + - docs/models/agenthandoffentry.md + - docs/models/agenthandoffentryobject.md + - docs/models/agenthandoffentrytype.md + - docs/models/agenthandoffstartedevent.md + - docs/models/agenthandoffstartedeventtype.md + - docs/models/agentobject.md + - docs/models/agentsapiv1agentsdeleterequest.md + - docs/models/agentsapiv1agentsgetrequest.md + - docs/models/agentsapiv1agentslistrequest.md + - docs/models/agentsapiv1agentsupdaterequest.md + - docs/models/agentsapiv1agentsupdateversionrequest.md + - docs/models/agentsapiv1conversationsappendrequest.md + - docs/models/agentsapiv1conversationsappendstreamrequest.md + - docs/models/agentsapiv1conversationsdeleterequest.md + - docs/models/agentsapiv1conversationsgetrequest.md + - docs/models/agentsapiv1conversationsgetresponsev1conversationsget.md + - docs/models/agentsapiv1conversationshistoryrequest.md + - docs/models/agentsapiv1conversationslistrequest.md + - docs/models/agentsapiv1conversationsmessagesrequest.md + - docs/models/agentsapiv1conversationsrestartrequest.md + - docs/models/agentsapiv1conversationsrestartstreamrequest.md + - docs/models/agentscompletionrequest.md + - docs/models/agentscompletionrequestmessages.md + - docs/models/agentscompletionrequeststop.md + - docs/models/agentscompletionrequesttoolchoice.md + - docs/models/agentscompletionstreamrequest.md + - docs/models/agentscompletionstreamrequestmessages.md + - docs/models/agentscompletionstreamrequeststop.md + - docs/models/agentscompletionstreamrequesttoolchoice.md + - docs/models/agenttools.md + - docs/models/agentupdaterequest.md + - docs/models/agentupdaterequesttools.md + - docs/models/apiendpoint.md + - docs/models/archiveftmodelout.md + - docs/models/archiveftmodeloutobject.md + - docs/models/arguments.md + - docs/models/assistantmessage.md + - docs/models/assistantmessagecontent.md + - docs/models/assistantmessagerole.md + - docs/models/attributes.md + - docs/models/audiochunk.md + - docs/models/audiochunktype.md + - docs/models/audiotranscriptionrequest.md + - docs/models/audiotranscriptionrequeststream.md + - docs/models/basemodelcard.md + - docs/models/basemodelcardtype.md + - docs/models/batcherror.md + - docs/models/batchjobin.md + - docs/models/batchjobout.md + - docs/models/batchjoboutobject.md + - docs/models/batchjobsout.md + - docs/models/batchjobsoutobject.md + - docs/models/batchjobstatus.md + - docs/models/builtinconnectors.md + - docs/models/chatclassificationrequest.md + - docs/models/chatcompletionchoice.md + - docs/models/chatcompletionrequest.md + - docs/models/chatcompletionrequesttoolchoice.md + - docs/models/chatcompletionresponse.md + - docs/models/chatcompletionstreamrequest.md + - docs/models/chatcompletionstreamrequestmessages.md + - docs/models/chatcompletionstreamrequeststop.md + - docs/models/chatcompletionstreamrequesttoolchoice.md + - docs/models/chatmoderationrequest.md + - docs/models/chatmoderationrequestinputs.md + - docs/models/checkpointout.md + - docs/models/classificationrequest.md + - docs/models/classificationrequestinputs.md + - docs/models/classificationresponse.md + - docs/models/classificationtargetresult.md + - docs/models/classifierdetailedjobout.md + - docs/models/classifierdetailedjoboutintegrations.md + - docs/models/classifierdetailedjoboutjobtype.md + - docs/models/classifierdetailedjoboutobject.md + - docs/models/classifierdetailedjoboutstatus.md + - docs/models/classifierftmodelout.md + - docs/models/classifierftmodeloutmodeltype.md + - docs/models/classifierftmodeloutobject.md + - docs/models/classifierjobout.md + - docs/models/classifierjoboutintegrations.md + - docs/models/classifierjoboutjobtype.md + - docs/models/classifierjoboutobject.md + - docs/models/classifierjoboutstatus.md + - docs/models/classifiertargetin.md + - docs/models/classifiertargetout.md + - docs/models/classifiertrainingparameters.md + - docs/models/classifiertrainingparametersin.md + - docs/models/codeinterpretertool.md + - docs/models/codeinterpretertooltype.md + - docs/models/completionargs.md + - docs/models/completionargsstop.md + - docs/models/completionchunk.md + - docs/models/completiondetailedjobout.md + - docs/models/completiondetailedjoboutintegrations.md + - docs/models/completiondetailedjoboutjobtype.md + - docs/models/completiondetailedjoboutobject.md + - docs/models/completiondetailedjoboutrepositories.md + - docs/models/completiondetailedjoboutstatus.md + - docs/models/completionevent.md + - docs/models/completionftmodelout.md + - docs/models/completionftmodeloutobject.md + - docs/models/completionjobout.md + - docs/models/completionjoboutobject.md + - docs/models/completionresponsestreamchoice.md + - docs/models/completionresponsestreamchoicefinishreason.md + - docs/models/completiontrainingparameters.md + - docs/models/completiontrainingparametersin.md + - docs/models/content.md + - docs/models/contentchunk.md + - docs/models/conversationappendrequest.md + - docs/models/conversationappendrequesthandoffexecution.md + - docs/models/conversationappendstreamrequest.md + - docs/models/conversationappendstreamrequesthandoffexecution.md + - docs/models/conversationevents.md + - docs/models/conversationeventsdata.md + - docs/models/conversationhistory.md + - docs/models/conversationhistoryobject.md + - docs/models/conversationinputs.md + - docs/models/conversationmessages.md + - docs/models/conversationmessagesobject.md + - docs/models/conversationrequest.md + - docs/models/conversationresponse.md + - docs/models/conversationresponseobject.md + - docs/models/conversationrestartrequest.md + - docs/models/conversationrestartrequesthandoffexecution.md + - docs/models/conversationrestartstreamrequest.md + - docs/models/conversationrestartstreamrequesthandoffexecution.md + - docs/models/conversationstreamrequest.md + - docs/models/conversationstreamrequesthandoffexecution.md + - docs/models/conversationstreamrequesttools.md + - docs/models/conversationusageinfo.md + - docs/models/data.md + - docs/models/deletefileout.md + - docs/models/deletemodelout.md + - docs/models/deletemodelv1modelsmodeliddeleterequest.md + - docs/models/deltamessage.md + - docs/models/document.md + - docs/models/documentlibrarytool.md + - docs/models/documentlibrarytooltype.md + - docs/models/documentout.md + - docs/models/documenttextcontent.md + - docs/models/documentupdatein.md + - docs/models/documenturlchunk.md + - docs/models/documenturlchunktype.md + - docs/models/embeddingdtype.md + - docs/models/embeddingrequest.md + - docs/models/embeddingrequestinputs.md + - docs/models/embeddingresponse.md + - docs/models/embeddingresponsedata.md + - docs/models/encodingformat.md + - docs/models/entitytype.md + - docs/models/entries.md + - docs/models/eventout.md + - docs/models/file.md + - docs/models/filechunk.md + - docs/models/filepurpose.md + - docs/models/filesapiroutesdeletefilerequest.md + - docs/models/filesapiroutesdownloadfilerequest.md + - docs/models/filesapiroutesgetsignedurlrequest.md + - docs/models/filesapirouteslistfilesrequest.md + - docs/models/filesapiroutesretrievefilerequest.md + - docs/models/filesapiroutesuploadfilemultipartbodyparams.md + - docs/models/fileschema.md + - docs/models/filesignedurl.md + - docs/models/fimcompletionrequest.md + - docs/models/fimcompletionrequeststop.md + - docs/models/fimcompletionresponse.md + - docs/models/fimcompletionstreamrequest.md + - docs/models/fimcompletionstreamrequeststop.md + - docs/models/finetuneablemodeltype.md + - docs/models/finishreason.md + - docs/models/format_.md + - docs/models/ftclassifierlossfunction.md + - docs/models/ftmodelcapabilitiesout.md + - docs/models/ftmodelcard.md + - docs/models/ftmodelcardtype.md + - docs/models/function.md + - docs/models/functioncall.md + - docs/models/functioncallentry.md + - docs/models/functioncallentryarguments.md + - docs/models/functioncallentryobject.md + - docs/models/functioncallentrytype.md + - docs/models/functioncallevent.md + - docs/models/functioncalleventtype.md + - docs/models/functionname.md + - docs/models/functionresultentry.md + - docs/models/functionresultentryobject.md + - docs/models/functionresultentrytype.md + - docs/models/functiontool.md + - docs/models/functiontooltype.md + - docs/models/githubrepositoryin.md + - docs/models/githubrepositoryintype.md + - docs/models/githubrepositoryout.md + - docs/models/githubrepositoryouttype.md + - docs/models/handoffexecution.md + - docs/models/httpvalidationerror.md + - docs/models/hyperparameters.md + - docs/models/imagegenerationtool.md + - docs/models/imagegenerationtooltype.md + - docs/models/imageurl.md + - docs/models/imageurlchunk.md + - docs/models/imageurlchunkimageurl.md + - docs/models/imageurlchunktype.md + - docs/models/inputentries.md + - docs/models/inputs.md + - docs/models/instructrequest.md + - docs/models/instructrequestinputs.md + - docs/models/instructrequestinputsmessages.md + - docs/models/instructrequestmessages.md + - docs/models/integrations.md + - docs/models/jobin.md + - docs/models/jobinintegrations.md + - docs/models/jobinrepositories.md + - docs/models/jobmetadataout.md + - docs/models/jobsapiroutesbatchcancelbatchjobrequest.md + - docs/models/jobsapiroutesbatchgetbatchjobrequest.md + - docs/models/jobsapiroutesbatchgetbatchjobsrequest.md + - docs/models/jobsapiroutesfinetuningarchivefinetunedmodelrequest.md + - docs/models/jobsapiroutesfinetuningcancelfinetuningjobrequest.md + - docs/models/jobsapiroutesfinetuningcancelfinetuningjobresponse.md + - docs/models/jobsapiroutesfinetuningcreatefinetuningjobresponse.md + - docs/models/jobsapiroutesfinetuninggetfinetuningjobrequest.md + - docs/models/jobsapiroutesfinetuninggetfinetuningjobresponse.md + - docs/models/jobsapiroutesfinetuninggetfinetuningjobsrequest.md + - docs/models/jobsapiroutesfinetuningstartfinetuningjobrequest.md + - docs/models/jobsapiroutesfinetuningstartfinetuningjobresponse.md + - docs/models/jobsapiroutesfinetuningunarchivefinetunedmodelrequest.md + - docs/models/jobsapiroutesfinetuningupdatefinetunedmodelrequest.md + - docs/models/jobsapiroutesfinetuningupdatefinetunedmodelresponse.md + - docs/models/jobsout.md + - docs/models/jobsoutdata.md + - docs/models/jobsoutobject.md + - docs/models/jobtype.md + - docs/models/jsonschema.md + - docs/models/legacyjobmetadataout.md + - docs/models/legacyjobmetadataoutobject.md + - docs/models/librariesdeletev1request.md + - docs/models/librariesdocumentsdeletev1request.md + - docs/models/librariesdocumentsgetextractedtextsignedurlv1request.md + - docs/models/librariesdocumentsgetsignedurlv1request.md + - docs/models/librariesdocumentsgetstatusv1request.md + - docs/models/librariesdocumentsgettextcontentv1request.md + - docs/models/librariesdocumentsgetv1request.md + - docs/models/librariesdocumentslistv1request.md + - docs/models/librariesdocumentsreprocessv1request.md + - docs/models/librariesdocumentsupdatev1request.md + - docs/models/librariesdocumentsuploadv1documentupload.md + - docs/models/librariesdocumentsuploadv1request.md + - docs/models/librariesgetv1request.md + - docs/models/librariessharecreatev1request.md + - docs/models/librariessharedeletev1request.md + - docs/models/librariessharelistv1request.md + - docs/models/librariesupdatev1request.md + - docs/models/libraryin.md + - docs/models/libraryinupdate.md + - docs/models/libraryout.md + - docs/models/listdocumentout.md + - docs/models/listfilesout.md + - docs/models/listlibraryout.md + - docs/models/listsharingout.md + - docs/models/loc.md + - docs/models/messageentries.md + - docs/models/messageinputcontentchunks.md + - docs/models/messageinputentry.md + - docs/models/messageinputentrycontent.md + - docs/models/messageinputentryrole.md + - docs/models/messageinputentrytype.md + - docs/models/messageoutputcontentchunks.md + - docs/models/messageoutputentry.md + - docs/models/messageoutputentrycontent.md + - docs/models/messageoutputentryobject.md + - docs/models/messageoutputentryrole.md + - docs/models/messageoutputentrytype.md + - docs/models/messageoutputevent.md + - docs/models/messageoutputeventcontent.md + - docs/models/messageoutputeventrole.md + - docs/models/messageoutputeventtype.md + - docs/models/messages.md + - docs/models/metricout.md + - docs/models/mistralpromptmode.md + - docs/models/modelcapabilities.md + - docs/models/modelconversation.md + - docs/models/modelconversationobject.md + - docs/models/modelconversationtools.md + - docs/models/modellist.md + - docs/models/modeltype.md + - docs/models/moderationobject.md + - docs/models/moderationresponse.md + - docs/models/name.md + - docs/models/object.md + - docs/models/ocrimageobject.md + - docs/models/ocrpagedimensions.md + - docs/models/ocrpageobject.md + - docs/models/ocrrequest.md + - docs/models/ocrresponse.md + - docs/models/ocrtableobject.md + - docs/models/ocrusageinfo.md + - docs/models/one.md + - docs/models/outputcontentchunks.md + - docs/models/outputs.md + - docs/models/paginationinfo.md + - docs/models/prediction.md + - docs/models/processingstatusout.md + - docs/models/queryparamstatus.md + - docs/models/referencechunk.md + - docs/models/referencechunktype.md + - docs/models/repositories.md + - docs/models/requestsource.md + - docs/models/response1.md + - docs/models/responsebody.md + - docs/models/responsedoneevent.md + - docs/models/responsedoneeventtype.md + - docs/models/responseerrorevent.md + - docs/models/responseerroreventtype.md + - docs/models/responseformat.md + - docs/models/responseformats.md + - docs/models/responsestartedevent.md + - docs/models/responsestartedeventtype.md + - docs/models/retrievefileout.md + - docs/models/retrievemodelv1modelsmodelidgetrequest.md + - docs/models/retrievemodelv1modelsmodelidgetresponseretrievemodelv1modelsmodelidget.md + - docs/models/role.md + - docs/models/sampletype.md + - docs/models/security.md + - docs/models/shareenum.md + - docs/models/sharingdelete.md + - docs/models/sharingin.md + - docs/models/sharingout.md + - docs/models/source.md + - docs/models/ssetypes.md + - docs/models/status.md + - docs/models/stop.md + - docs/models/systemmessage.md + - docs/models/systemmessagecontent.md + - docs/models/systemmessagecontentchunks.md + - docs/models/tableformat.md + - docs/models/textchunk.md + - docs/models/textchunktype.md + - docs/models/thinkchunk.md + - docs/models/thinkchunktype.md + - docs/models/thinking.md + - docs/models/timestampgranularity.md + - docs/models/tool.md + - docs/models/toolcall.md + - docs/models/toolchoice.md + - docs/models/toolchoiceenum.md + - docs/models/toolexecutiondeltaevent.md + - docs/models/toolexecutiondeltaeventname.md + - docs/models/toolexecutiondeltaeventtype.md + - docs/models/toolexecutiondoneevent.md + - docs/models/toolexecutiondoneeventname.md + - docs/models/toolexecutiondoneeventtype.md + - docs/models/toolexecutionentry.md + - docs/models/toolexecutionentryobject.md + - docs/models/toolexecutionentrytype.md + - docs/models/toolexecutionstartedevent.md + - docs/models/toolexecutionstartedeventname.md + - docs/models/toolexecutionstartedeventtype.md + - docs/models/toolfilechunk.md + - docs/models/toolfilechunktype.md + - docs/models/toolmessage.md + - docs/models/toolmessagecontent.md + - docs/models/toolmessagerole.md + - docs/models/toolreferencechunk.md + - docs/models/toolreferencechunktype.md + - docs/models/tools.md + - docs/models/tooltypes.md + - docs/models/trainingfile.md + - docs/models/transcriptionresponse.md + - docs/models/transcriptionsegmentchunk.md + - docs/models/transcriptionstreamdone.md + - docs/models/transcriptionstreamdonetype.md + - docs/models/transcriptionstreamevents.md + - docs/models/transcriptionstreameventsdata.md + - docs/models/transcriptionstreameventtypes.md + - docs/models/transcriptionstreamlanguage.md + - docs/models/transcriptionstreamlanguagetype.md + - docs/models/transcriptionstreamsegmentdelta.md + - docs/models/transcriptionstreamsegmentdeltatype.md + - docs/models/transcriptionstreamtextdelta.md + - docs/models/transcriptionstreamtextdeltatype.md + - docs/models/two.md + - docs/models/type.md + - docs/models/unarchiveftmodelout.md + - docs/models/unarchiveftmodeloutobject.md + - docs/models/updateftmodelin.md + - docs/models/uploadfileout.md + - docs/models/usageinfo.md + - docs/models/usermessage.md + - docs/models/usermessagecontent.md + - docs/models/usermessagerole.md + - docs/models/utils/retryconfig.md + - docs/models/validationerror.md + - docs/models/wandbintegration.md + - docs/models/wandbintegrationout.md + - docs/models/wandbintegrationouttype.md + - docs/models/wandbintegrationtype.md + - docs/models/websearchpremiumtool.md + - docs/models/websearchpremiumtooltype.md + - docs/models/websearchtool.md + - docs/models/websearchtooltype.md + - docs/sdks/accesses/README.md + - docs/sdks/agents/README.md + - docs/sdks/audio/README.md + - docs/sdks/batch/README.md + - docs/sdks/beta/README.md + - docs/sdks/chat/README.md + - docs/sdks/classifiers/README.md + - docs/sdks/conversations/README.md + - docs/sdks/documents/README.md + - docs/sdks/embeddings/README.md + - docs/sdks/files/README.md + - docs/sdks/fim/README.md + - docs/sdks/finetuning/README.md + - docs/sdks/jobs/README.md + - docs/sdks/libraries/README.md + - docs/sdks/mistral/README.md + - docs/sdks/mistralagents/README.md + - docs/sdks/mistraljobs/README.md + - docs/sdks/models/README.md + - docs/sdks/ocr/README.md + - docs/sdks/transcriptions/README.md + - poetry.toml + - py.typed + - scripts/prepare_readme.py + - scripts/publish.sh + - src/mistralai/__init__.py + - src/mistralai/_hooks/__init__.py + - src/mistralai/_hooks/sdkhooks.py + - src/mistralai/_hooks/types.py + - src/mistralai/_version.py + - src/mistralai/accesses.py + - src/mistralai/agents.py + - src/mistralai/audio.py + - src/mistralai/basesdk.py + - src/mistralai/batch.py + - src/mistralai/beta.py + - src/mistralai/chat.py + - src/mistralai/classifiers.py + - src/mistralai/conversations.py + - src/mistralai/documents.py + - src/mistralai/embeddings.py + - src/mistralai/files.py + - src/mistralai/fim.py + - src/mistralai/fine_tuning.py + - src/mistralai/httpclient.py + - src/mistralai/jobs.py + - src/mistralai/libraries.py + - src/mistralai/mistral_agents.py + - src/mistralai/mistral_jobs.py + - src/mistralai/models/__init__.py + - src/mistralai/models/agent.py + - src/mistralai/models/agentconversation.py + - src/mistralai/models/agentcreationrequest.py + - src/mistralai/models/agenthandoffdoneevent.py + - src/mistralai/models/agenthandoffentry.py + - src/mistralai/models/agenthandoffstartedevent.py + - src/mistralai/models/agents_api_v1_agents_deleteop.py + - src/mistralai/models/agents_api_v1_agents_getop.py + - src/mistralai/models/agents_api_v1_agents_listop.py + - src/mistralai/models/agents_api_v1_agents_update_versionop.py + - src/mistralai/models/agents_api_v1_agents_updateop.py + - src/mistralai/models/agents_api_v1_conversations_append_streamop.py + - src/mistralai/models/agents_api_v1_conversations_appendop.py + - src/mistralai/models/agents_api_v1_conversations_deleteop.py + - src/mistralai/models/agents_api_v1_conversations_getop.py + - src/mistralai/models/agents_api_v1_conversations_historyop.py + - src/mistralai/models/agents_api_v1_conversations_listop.py + - src/mistralai/models/agents_api_v1_conversations_messagesop.py + - src/mistralai/models/agents_api_v1_conversations_restart_streamop.py + - src/mistralai/models/agents_api_v1_conversations_restartop.py + - src/mistralai/models/agentscompletionrequest.py + - src/mistralai/models/agentscompletionstreamrequest.py + - src/mistralai/models/agentupdaterequest.py + - src/mistralai/models/apiendpoint.py + - src/mistralai/models/archiveftmodelout.py + - src/mistralai/models/assistantmessage.py + - src/mistralai/models/audiochunk.py + - src/mistralai/models/audiotranscriptionrequest.py + - src/mistralai/models/audiotranscriptionrequeststream.py + - src/mistralai/models/basemodelcard.py + - src/mistralai/models/batcherror.py + - src/mistralai/models/batchjobin.py + - src/mistralai/models/batchjobout.py + - src/mistralai/models/batchjobsout.py + - src/mistralai/models/batchjobstatus.py + - src/mistralai/models/builtinconnectors.py + - src/mistralai/models/chatclassificationrequest.py + - src/mistralai/models/chatcompletionchoice.py + - src/mistralai/models/chatcompletionrequest.py + - src/mistralai/models/chatcompletionresponse.py + - src/mistralai/models/chatcompletionstreamrequest.py + - src/mistralai/models/chatmoderationrequest.py + - src/mistralai/models/checkpointout.py + - src/mistralai/models/classificationrequest.py + - src/mistralai/models/classificationresponse.py + - src/mistralai/models/classificationtargetresult.py + - src/mistralai/models/classifierdetailedjobout.py + - src/mistralai/models/classifierftmodelout.py + - src/mistralai/models/classifierjobout.py + - src/mistralai/models/classifiertargetin.py + - src/mistralai/models/classifiertargetout.py + - src/mistralai/models/classifiertrainingparameters.py + - src/mistralai/models/classifiertrainingparametersin.py + - src/mistralai/models/codeinterpretertool.py + - src/mistralai/models/completionargs.py + - src/mistralai/models/completionargsstop.py + - src/mistralai/models/completionchunk.py + - src/mistralai/models/completiondetailedjobout.py + - src/mistralai/models/completionevent.py + - src/mistralai/models/completionftmodelout.py + - src/mistralai/models/completionjobout.py + - src/mistralai/models/completionresponsestreamchoice.py + - src/mistralai/models/completiontrainingparameters.py + - src/mistralai/models/completiontrainingparametersin.py + - src/mistralai/models/contentchunk.py + - src/mistralai/models/conversationappendrequest.py + - src/mistralai/models/conversationappendstreamrequest.py + - src/mistralai/models/conversationevents.py + - src/mistralai/models/conversationhistory.py + - src/mistralai/models/conversationinputs.py + - src/mistralai/models/conversationmessages.py + - src/mistralai/models/conversationrequest.py + - src/mistralai/models/conversationresponse.py + - src/mistralai/models/conversationrestartrequest.py + - src/mistralai/models/conversationrestartstreamrequest.py + - src/mistralai/models/conversationstreamrequest.py + - src/mistralai/models/conversationusageinfo.py + - src/mistralai/models/delete_model_v1_models_model_id_deleteop.py + - src/mistralai/models/deletefileout.py + - src/mistralai/models/deletemodelout.py + - src/mistralai/models/deltamessage.py + - src/mistralai/models/documentlibrarytool.py + - src/mistralai/models/documentout.py + - src/mistralai/models/documenttextcontent.py + - src/mistralai/models/documentupdatein.py + - src/mistralai/models/documenturlchunk.py + - src/mistralai/models/embeddingdtype.py + - src/mistralai/models/embeddingrequest.py + - src/mistralai/models/embeddingresponse.py + - src/mistralai/models/embeddingresponsedata.py + - src/mistralai/models/encodingformat.py + - src/mistralai/models/entitytype.py + - src/mistralai/models/eventout.py + - src/mistralai/models/file.py + - src/mistralai/models/filechunk.py + - src/mistralai/models/filepurpose.py + - src/mistralai/models/files_api_routes_delete_fileop.py + - src/mistralai/models/files_api_routes_download_fileop.py + - src/mistralai/models/files_api_routes_get_signed_urlop.py + - src/mistralai/models/files_api_routes_list_filesop.py + - src/mistralai/models/files_api_routes_retrieve_fileop.py + - src/mistralai/models/files_api_routes_upload_fileop.py + - src/mistralai/models/fileschema.py + - src/mistralai/models/filesignedurl.py + - src/mistralai/models/fimcompletionrequest.py + - src/mistralai/models/fimcompletionresponse.py + - src/mistralai/models/fimcompletionstreamrequest.py + - src/mistralai/models/finetuneablemodeltype.py + - src/mistralai/models/ftclassifierlossfunction.py + - src/mistralai/models/ftmodelcapabilitiesout.py + - src/mistralai/models/ftmodelcard.py + - src/mistralai/models/function.py + - src/mistralai/models/functioncall.py + - src/mistralai/models/functioncallentry.py + - src/mistralai/models/functioncallentryarguments.py + - src/mistralai/models/functioncallevent.py + - src/mistralai/models/functionname.py + - src/mistralai/models/functionresultentry.py + - src/mistralai/models/functiontool.py + - src/mistralai/models/githubrepositoryin.py + - src/mistralai/models/githubrepositoryout.py + - src/mistralai/models/httpvalidationerror.py + - src/mistralai/models/imagegenerationtool.py + - src/mistralai/models/imageurl.py + - src/mistralai/models/imageurlchunk.py + - src/mistralai/models/inputentries.py + - src/mistralai/models/inputs.py + - src/mistralai/models/instructrequest.py + - src/mistralai/models/jobin.py + - src/mistralai/models/jobmetadataout.py + - src/mistralai/models/jobs_api_routes_batch_cancel_batch_jobop.py + - src/mistralai/models/jobs_api_routes_batch_get_batch_jobop.py + - src/mistralai/models/jobs_api_routes_batch_get_batch_jobsop.py + - src/mistralai/models/jobs_api_routes_fine_tuning_archive_fine_tuned_modelop.py + - src/mistralai/models/jobs_api_routes_fine_tuning_cancel_fine_tuning_jobop.py + - src/mistralai/models/jobs_api_routes_fine_tuning_create_fine_tuning_jobop.py + - src/mistralai/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobop.py + - src/mistralai/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobsop.py + - src/mistralai/models/jobs_api_routes_fine_tuning_start_fine_tuning_jobop.py + - src/mistralai/models/jobs_api_routes_fine_tuning_unarchive_fine_tuned_modelop.py + - src/mistralai/models/jobs_api_routes_fine_tuning_update_fine_tuned_modelop.py + - src/mistralai/models/jobsout.py + - src/mistralai/models/jsonschema.py + - src/mistralai/models/legacyjobmetadataout.py + - src/mistralai/models/libraries_delete_v1op.py + - src/mistralai/models/libraries_documents_delete_v1op.py + - src/mistralai/models/libraries_documents_get_extracted_text_signed_url_v1op.py + - src/mistralai/models/libraries_documents_get_signed_url_v1op.py + - src/mistralai/models/libraries_documents_get_status_v1op.py + - src/mistralai/models/libraries_documents_get_text_content_v1op.py + - src/mistralai/models/libraries_documents_get_v1op.py + - src/mistralai/models/libraries_documents_list_v1op.py + - src/mistralai/models/libraries_documents_reprocess_v1op.py + - src/mistralai/models/libraries_documents_update_v1op.py + - src/mistralai/models/libraries_documents_upload_v1op.py + - src/mistralai/models/libraries_get_v1op.py + - src/mistralai/models/libraries_share_create_v1op.py + - src/mistralai/models/libraries_share_delete_v1op.py + - src/mistralai/models/libraries_share_list_v1op.py + - src/mistralai/models/libraries_update_v1op.py + - src/mistralai/models/libraryin.py + - src/mistralai/models/libraryinupdate.py + - src/mistralai/models/libraryout.py + - src/mistralai/models/listdocumentout.py + - src/mistralai/models/listfilesout.py + - src/mistralai/models/listlibraryout.py + - src/mistralai/models/listsharingout.py + - src/mistralai/models/messageentries.py + - src/mistralai/models/messageinputcontentchunks.py + - src/mistralai/models/messageinputentry.py + - src/mistralai/models/messageoutputcontentchunks.py + - src/mistralai/models/messageoutputentry.py + - src/mistralai/models/messageoutputevent.py + - src/mistralai/models/metricout.py + - src/mistralai/models/mistralerror.py + - src/mistralai/models/mistralpromptmode.py + - src/mistralai/models/modelcapabilities.py + - src/mistralai/models/modelconversation.py + - src/mistralai/models/modellist.py + - src/mistralai/models/moderationobject.py + - src/mistralai/models/moderationresponse.py + - src/mistralai/models/no_response_error.py + - src/mistralai/models/ocrimageobject.py + - src/mistralai/models/ocrpagedimensions.py + - src/mistralai/models/ocrpageobject.py + - src/mistralai/models/ocrrequest.py + - src/mistralai/models/ocrresponse.py + - src/mistralai/models/ocrtableobject.py + - src/mistralai/models/ocrusageinfo.py + - src/mistralai/models/outputcontentchunks.py + - src/mistralai/models/paginationinfo.py + - src/mistralai/models/prediction.py + - src/mistralai/models/processingstatusout.py + - src/mistralai/models/referencechunk.py + - src/mistralai/models/requestsource.py + - src/mistralai/models/responsedoneevent.py + - src/mistralai/models/responseerrorevent.py + - src/mistralai/models/responseformat.py + - src/mistralai/models/responseformats.py + - src/mistralai/models/responsestartedevent.py + - src/mistralai/models/responsevalidationerror.py + - src/mistralai/models/retrieve_model_v1_models_model_id_getop.py + - src/mistralai/models/retrievefileout.py + - src/mistralai/models/sampletype.py + - src/mistralai/models/sdkerror.py + - src/mistralai/models/security.py + - src/mistralai/models/shareenum.py + - src/mistralai/models/sharingdelete.py + - src/mistralai/models/sharingin.py + - src/mistralai/models/sharingout.py + - src/mistralai/models/source.py + - src/mistralai/models/ssetypes.py + - src/mistralai/models/systemmessage.py + - src/mistralai/models/systemmessagecontentchunks.py + - src/mistralai/models/textchunk.py + - src/mistralai/models/thinkchunk.py + - src/mistralai/models/timestampgranularity.py + - src/mistralai/models/tool.py + - src/mistralai/models/toolcall.py + - src/mistralai/models/toolchoice.py + - src/mistralai/models/toolchoiceenum.py + - src/mistralai/models/toolexecutiondeltaevent.py + - src/mistralai/models/toolexecutiondoneevent.py + - src/mistralai/models/toolexecutionentry.py + - src/mistralai/models/toolexecutionstartedevent.py + - src/mistralai/models/toolfilechunk.py + - src/mistralai/models/toolmessage.py + - src/mistralai/models/toolreferencechunk.py + - src/mistralai/models/tooltypes.py + - src/mistralai/models/trainingfile.py + - src/mistralai/models/transcriptionresponse.py + - src/mistralai/models/transcriptionsegmentchunk.py + - src/mistralai/models/transcriptionstreamdone.py + - src/mistralai/models/transcriptionstreamevents.py + - src/mistralai/models/transcriptionstreameventtypes.py + - src/mistralai/models/transcriptionstreamlanguage.py + - src/mistralai/models/transcriptionstreamsegmentdelta.py + - src/mistralai/models/transcriptionstreamtextdelta.py + - src/mistralai/models/unarchiveftmodelout.py + - src/mistralai/models/updateftmodelin.py + - src/mistralai/models/uploadfileout.py + - src/mistralai/models/usageinfo.py + - src/mistralai/models/usermessage.py + - src/mistralai/models/validationerror.py + - src/mistralai/models/wandbintegration.py + - src/mistralai/models/wandbintegrationout.py + - src/mistralai/models/websearchpremiumtool.py + - src/mistralai/models/websearchtool.py + - src/mistralai/models_.py + - src/mistralai/ocr.py + - src/mistralai/py.typed + - src/mistralai/sdk.py + - src/mistralai/sdkconfiguration.py + - src/mistralai/transcriptions.py + - src/mistralai/types/__init__.py + - src/mistralai/types/basemodel.py + - src/mistralai/utils/__init__.py + - src/mistralai/utils/annotations.py + - src/mistralai/utils/datetimes.py + - src/mistralai/utils/enums.py + - src/mistralai/utils/eventstreaming.py + - src/mistralai/utils/forms.py + - src/mistralai/utils/headers.py + - src/mistralai/utils/logger.py + - src/mistralai/utils/metadata.py + - src/mistralai/utils/queryparams.py + - src/mistralai/utils/requestbodies.py + - src/mistralai/utils/retries.py + - src/mistralai/utils/security.py + - src/mistralai/utils/serializers.py + - src/mistralai/utils/unmarshal_json_response.py + - src/mistralai/utils/url.py + - src/mistralai/utils/values.py diff --git a/.speakeasy/gen.yaml b/.speakeasy/gen.yaml index bf732c1e..53216a9e 100644 --- a/.speakeasy/gen.yaml +++ b/.speakeasy/gen.yaml @@ -16,12 +16,17 @@ generation: auth: oAuth2ClientCredentialsEnabled: true oAuth2PasswordEnabled: false + hoistGlobalSecurity: true + schemas: + allOfMergeStrategy: shallowMerge + requestBodyFieldName: "" + persistentEdits: {} tests: generateTests: true generateNewTests: false skipResponseBodyAssertions: false python: - version: 1.10.1 + version: 1.11.0 additionalDependencies: dev: pytest: ^8.2.2 @@ -29,10 +34,12 @@ python: allowedRedefinedBuiltins: - id - object + asyncMode: both authors: - Mistral baseErrorName: MistralError clientServerStatusCodesAsErrors: true + constFieldCasing: upper defaultErrorName: SDKError description: Python Client SDK for the Mistral AI API. enableCustomCodeRegions: true @@ -51,14 +58,19 @@ python: operations: "" shared: "" webhooks: "" + inferUnionDiscriminators: true inputModelSuffix: input + license: "" maxMethodParams: 15 methodArguments: infer-optional-args moduleName: "" + multipartArrayFormat: legacy outputModelSuffix: output packageManager: uv packageName: mistralai + preApplyUnionDiscriminators: false pytestFilterWarnings: [] pytestTimeout: 0 responseFormat: flat + sseFlatResponse: false templateVersion: v2 diff --git a/.speakeasy/workflow.lock b/.speakeasy/workflow.lock index 240565eb..d5f1c965 100644 --- a/.speakeasy/workflow.lock +++ b/.speakeasy/workflow.lock @@ -1,51 +1,48 @@ -speakeasyVersion: 1.606.10 +speakeasyVersion: 1.685.0 sources: mistral-azure-source: sourceNamespace: mistral-openapi-azure - sourceRevisionDigest: sha256:fcc7262f29ef89a07cb718d7e6094c272627cf9f531588aef15a6e92dd50130a - sourceBlobDigest: sha256:9e6fc34474062726ceb96e424e858a0ae1b0506659cd11a58c72e1dd50dae885 + sourceRevisionDigest: sha256:544a7fd4d099e72a9a12681b326d44201f1b163e4df2f5fd643d831167255d84 + sourceBlobDigest: sha256:41c72401329a30983907c32a60063da8ccd82137cf79d7f452089b5b83bb9d92 tags: - latest - - speakeasy-sdk-regen-1768231850 mistral-google-cloud-source: sourceNamespace: mistral-openapi-google-cloud - sourceRevisionDigest: sha256:bc59aaf55dc46e94ddf6cc687292807629d7a17ee5f573a504e7e44fd365e147 - sourceBlobDigest: sha256:545fe85c5dae11def2741fc7a99f297b7f0728c9677c3c7b94d56ddbed70581d + sourceRevisionDigest: sha256:e3dd3079347edf744151936aaee4ec0ce3eeeb8f46b5c7f31f8e224221e879d4 + sourceBlobDigest: sha256:7a525230930debff23fec4e92e3ad2a57889ea46de86cc96d519615709ae8a16 tags: - latest - - speakeasy-sdk-regen-1768231856 mistral-openapi: sourceNamespace: mistral-openapi sourceRevisionDigest: sha256:ebe60088ce4a3780c57a08de7bc73f973f529822a05db12c5d9c6084e9a934e0 sourceBlobDigest: sha256:c93947af3495a5129cb6aecfe0546463917fbe1f66f2cf8f5a0accb36c035501 tags: - latest - - speakeasy-sdk-regen-1768502381 targets: mistralai-azure-sdk: source: mistral-azure-source sourceNamespace: mistral-openapi-azure - sourceRevisionDigest: sha256:fcc7262f29ef89a07cb718d7e6094c272627cf9f531588aef15a6e92dd50130a - sourceBlobDigest: sha256:9e6fc34474062726ceb96e424e858a0ae1b0506659cd11a58c72e1dd50dae885 + sourceRevisionDigest: sha256:544a7fd4d099e72a9a12681b326d44201f1b163e4df2f5fd643d831167255d84 + sourceBlobDigest: sha256:41c72401329a30983907c32a60063da8ccd82137cf79d7f452089b5b83bb9d92 codeSamplesNamespace: mistral-openapi-azure-code-samples - codeSamplesRevisionDigest: sha256:9ab092d625da8034f7c17321ce5295ecad19ca4e2be2851a1a5a977c6bbeff05 + codeSamplesRevisionDigest: sha256:57821a9bf6cfe7001dfcbcaa2f17b233b98c2f79e2d7588540c41750f10b9c05 mistralai-gcp-sdk: source: mistral-google-cloud-source sourceNamespace: mistral-openapi-google-cloud - sourceRevisionDigest: sha256:bc59aaf55dc46e94ddf6cc687292807629d7a17ee5f573a504e7e44fd365e147 - sourceBlobDigest: sha256:545fe85c5dae11def2741fc7a99f297b7f0728c9677c3c7b94d56ddbed70581d + sourceRevisionDigest: sha256:e3dd3079347edf744151936aaee4ec0ce3eeeb8f46b5c7f31f8e224221e879d4 + sourceBlobDigest: sha256:7a525230930debff23fec4e92e3ad2a57889ea46de86cc96d519615709ae8a16 codeSamplesNamespace: mistral-openapi-google-cloud-code-samples - codeSamplesRevisionDigest: sha256:31fd0ba45daa00020ef6f07df435ad343b62328bf56489dfcb66b647beeb52b5 + codeSamplesRevisionDigest: sha256:404d5964361b3ced085b11e4b8408c36a4a92efe12a97f7497919efdf7594f6f mistralai-sdk: source: mistral-openapi sourceNamespace: mistral-openapi sourceRevisionDigest: sha256:ebe60088ce4a3780c57a08de7bc73f973f529822a05db12c5d9c6084e9a934e0 sourceBlobDigest: sha256:c93947af3495a5129cb6aecfe0546463917fbe1f66f2cf8f5a0accb36c035501 codeSamplesNamespace: mistral-openapi-code-samples - codeSamplesRevisionDigest: sha256:3bf740149ae15c0019fa482ffe9f198149759b0140cdfcbf87e5082c0d22e9ac + codeSamplesRevisionDigest: sha256:14b511ab3d9f6f3d9ee0c81c32c6fa2dd6be9b6a1047298cf9f1162328045b4f workflow: workflowVersion: 1.0.0 - speakeasyVersion: 1.606.10 + speakeasyVersion: 1.685.0 sources: mistral-azure-source: inputs: diff --git a/.speakeasy/workflow.yaml b/.speakeasy/workflow.yaml index 3156d149..8557ab42 100644 --- a/.speakeasy/workflow.yaml +++ b/.speakeasy/workflow.yaml @@ -1,5 +1,5 @@ workflowVersion: 1.0.0 -speakeasyVersion: 1.606.10 +speakeasyVersion: 1.685.0 sources: mistral-azure-source: inputs: diff --git a/README.md b/README.md index 02554d99..d755d249 100644 --- a/README.md +++ b/README.md @@ -160,6 +160,7 @@ with Mistral(
The same SDK client can also be used to make asynchronous requests by importing asyncio. + ```python # Asynchronous Example import asyncio @@ -213,6 +214,7 @@ with Mistral(
The same SDK client can also be used to make asynchronous requests by importing asyncio. + ```python # Asynchronous Example import asyncio @@ -266,6 +268,7 @@ with Mistral(
The same SDK client can also be used to make asynchronous requests by importing asyncio. + ```python # Asynchronous Example import asyncio @@ -319,6 +322,7 @@ with Mistral(
The same SDK client can also be used to make asynchronous requests by importing asyncio. + ```python # Asynchronous Example import asyncio @@ -444,33 +448,24 @@ The documentation for the GCP SDK is available [here](packages/mistralai_gcp/REA
Available methods -### [agents](docs/sdks/agents/README.md) +### [Agents](docs/sdks/agents/README.md) * [complete](docs/sdks/agents/README.md#complete) - Agents Completion * [stream](docs/sdks/agents/README.md#stream) - Stream Agents completion -### [audio](docs/sdks/audio/README.md) - - -#### [audio.transcriptions](docs/sdks/transcriptions/README.md) +### [Audio.Transcriptions](docs/sdks/transcriptions/README.md) * [complete](docs/sdks/transcriptions/README.md#complete) - Create Transcription * [stream](docs/sdks/transcriptions/README.md#stream) - Create Streaming Transcription (SSE) -### [batch](docs/sdks/batch/README.md) - - -#### [batch.jobs](docs/sdks/mistraljobs/README.md) +### [Batch.Jobs](docs/sdks/mistraljobs/README.md) * [list](docs/sdks/mistraljobs/README.md#list) - Get Batch Jobs * [create](docs/sdks/mistraljobs/README.md#create) - Create Batch Job * [get](docs/sdks/mistraljobs/README.md#get) - Get Batch Job * [cancel](docs/sdks/mistraljobs/README.md#cancel) - Cancel Batch Job -### [beta](docs/sdks/beta/README.md) - - -#### [beta.agents](docs/sdks/mistralagents/README.md) +### [Beta.Agents](docs/sdks/mistralagents/README.md) * [create](docs/sdks/mistralagents/README.md#create) - Create a agent that can be used within a conversation. * [list](docs/sdks/mistralagents/README.md#list) - List agent entities. @@ -479,7 +474,7 @@ The documentation for the GCP SDK is available [here](packages/mistralai_gcp/REA * [delete](docs/sdks/mistralagents/README.md#delete) - Delete an agent entity. * [update_version](docs/sdks/mistralagents/README.md#update_version) - Update an agent version. -#### [beta.conversations](docs/sdks/conversations/README.md) +### [Beta.Conversations](docs/sdks/conversations/README.md) * [start](docs/sdks/conversations/README.md#start) - Create a conversation and append entries to it. * [list](docs/sdks/conversations/README.md#list) - List all created conversations. @@ -493,7 +488,7 @@ The documentation for the GCP SDK is available [here](packages/mistralai_gcp/REA * [append_stream](docs/sdks/conversations/README.md#append_stream) - Append new entries to an existing conversation. * [restart_stream](docs/sdks/conversations/README.md#restart_stream) - Restart a conversation starting from a given entry. -#### [beta.libraries](docs/sdks/libraries/README.md) +### [Beta.Libraries](docs/sdks/libraries/README.md) * [list](docs/sdks/libraries/README.md#list) - List all libraries you have access to. * [create](docs/sdks/libraries/README.md#create) - Create a new Library. @@ -501,13 +496,13 @@ The documentation for the GCP SDK is available [here](packages/mistralai_gcp/REA * [delete](docs/sdks/libraries/README.md#delete) - Delete a library and all of it's document. * [update](docs/sdks/libraries/README.md#update) - Update a library. -#### [beta.libraries.accesses](docs/sdks/accesses/README.md) +#### [Beta.Libraries.Accesses](docs/sdks/accesses/README.md) * [list](docs/sdks/accesses/README.md#list) - List all of the access to this library. * [update_or_create](docs/sdks/accesses/README.md#update_or_create) - Create or update an access level. * [delete](docs/sdks/accesses/README.md#delete) - Delete an access level. -#### [beta.libraries.documents](docs/sdks/documents/README.md) +#### [Beta.Libraries.Documents](docs/sdks/documents/README.md) * [list](docs/sdks/documents/README.md#list) - List documents in a given library. * [upload](docs/sdks/documents/README.md#upload) - Upload a new document. @@ -520,23 +515,23 @@ The documentation for the GCP SDK is available [here](packages/mistralai_gcp/REA * [extracted_text_signed_url](docs/sdks/documents/README.md#extracted_text_signed_url) - Retrieve the signed URL of text extracted from a given document. * [reprocess](docs/sdks/documents/README.md#reprocess) - Reprocess a document. -### [chat](docs/sdks/chat/README.md) +### [Chat](docs/sdks/chat/README.md) * [complete](docs/sdks/chat/README.md#complete) - Chat Completion * [stream](docs/sdks/chat/README.md#stream) - Stream chat completion -### [classifiers](docs/sdks/classifiers/README.md) +### [Classifiers](docs/sdks/classifiers/README.md) * [moderate](docs/sdks/classifiers/README.md#moderate) - Moderations * [moderate_chat](docs/sdks/classifiers/README.md#moderate_chat) - Chat Moderations * [classify](docs/sdks/classifiers/README.md#classify) - Classifications * [classify_chat](docs/sdks/classifiers/README.md#classify_chat) - Chat Classifications -### [embeddings](docs/sdks/embeddings/README.md) +### [Embeddings](docs/sdks/embeddings/README.md) * [create](docs/sdks/embeddings/README.md#create) - Embeddings -### [files](docs/sdks/files/README.md) +### [Files](docs/sdks/files/README.md) * [upload](docs/sdks/files/README.md#upload) - Upload File * [list](docs/sdks/files/README.md#list) - List Files @@ -545,15 +540,12 @@ The documentation for the GCP SDK is available [here](packages/mistralai_gcp/REA * [download](docs/sdks/files/README.md#download) - Download File * [get_signed_url](docs/sdks/files/README.md#get_signed_url) - Get Signed Url -### [fim](docs/sdks/fim/README.md) +### [Fim](docs/sdks/fim/README.md) * [complete](docs/sdks/fim/README.md#complete) - Fim Completion * [stream](docs/sdks/fim/README.md#stream) - Stream fim completion -### [fine_tuning](docs/sdks/finetuning/README.md) - - -#### [fine_tuning.jobs](docs/sdks/jobs/README.md) +### [FineTuning.Jobs](docs/sdks/jobs/README.md) * [list](docs/sdks/jobs/README.md#list) - Get Fine Tuning Jobs * [create](docs/sdks/jobs/README.md#create) - Create Fine Tuning Job @@ -561,8 +553,7 @@ The documentation for the GCP SDK is available [here](packages/mistralai_gcp/REA * [cancel](docs/sdks/jobs/README.md#cancel) - Cancel Fine Tuning Job * [start](docs/sdks/jobs/README.md#start) - Start Fine Tuning Job - -### [models](docs/sdks/models/README.md) +### [Models](docs/sdks/models/README.md) * [list](docs/sdks/models/README.md#list) - List Models * [retrieve](docs/sdks/models/README.md#retrieve) - Retrieve Model @@ -571,7 +562,7 @@ The documentation for the GCP SDK is available [here](packages/mistralai_gcp/REA * [archive](docs/sdks/models/README.md#archive) - Archive Fine Tuned Model * [unarchive](docs/sdks/models/README.md#unarchive) - Unarchive Fine Tuned Model -### [ocr](docs/sdks/ocr/README.md) +### [Ocr](docs/sdks/ocr/README.md) * [process](docs/sdks/ocr/README.md#process) - OCR diff --git a/USAGE.md b/USAGE.md index b230b016..a31d502f 100644 --- a/USAGE.md +++ b/USAGE.md @@ -29,6 +29,7 @@ with Mistral(
The same SDK client can also be used to make asynchronous requests by importing asyncio. + ```python # Asynchronous Example import asyncio @@ -82,6 +83,7 @@ with Mistral(
The same SDK client can also be used to make asynchronous requests by importing asyncio. + ```python # Asynchronous Example import asyncio @@ -135,6 +137,7 @@ with Mistral(
The same SDK client can also be used to make asynchronous requests by importing asyncio. + ```python # Asynchronous Example import asyncio @@ -188,6 +191,7 @@ with Mistral(
The same SDK client can also be used to make asynchronous requests by importing asyncio. + ```python # Asynchronous Example import asyncio diff --git a/docs/sdks/accesses/README.md b/docs/sdks/accesses/README.md index af768506..040bc24c 100644 --- a/docs/sdks/accesses/README.md +++ b/docs/sdks/accesses/README.md @@ -1,5 +1,4 @@ -# Accesses -(*beta.libraries.accesses*) +# Beta.Libraries.Accesses ## Overview diff --git a/docs/sdks/agents/README.md b/docs/sdks/agents/README.md index 87a411cd..173925ee 100644 --- a/docs/sdks/agents/README.md +++ b/docs/sdks/agents/README.md @@ -1,5 +1,4 @@ # Agents -(*agents*) ## Overview diff --git a/docs/sdks/audio/README.md b/docs/sdks/audio/README.md deleted file mode 100644 index 2101c266..00000000 --- a/docs/sdks/audio/README.md +++ /dev/null @@ -1,6 +0,0 @@ -# Audio -(*audio*) - -## Overview - -### Available Operations diff --git a/docs/sdks/batch/README.md b/docs/sdks/batch/README.md deleted file mode 100644 index ec7d8340..00000000 --- a/docs/sdks/batch/README.md +++ /dev/null @@ -1,6 +0,0 @@ -# Batch -(*batch*) - -## Overview - -### Available Operations diff --git a/docs/sdks/beta/README.md b/docs/sdks/beta/README.md deleted file mode 100644 index f5b5f822..00000000 --- a/docs/sdks/beta/README.md +++ /dev/null @@ -1,6 +0,0 @@ -# Beta -(*beta*) - -## Overview - -### Available Operations diff --git a/docs/sdks/chat/README.md b/docs/sdks/chat/README.md index 213ab710..5bb24baa 100644 --- a/docs/sdks/chat/README.md +++ b/docs/sdks/chat/README.md @@ -1,5 +1,4 @@ # Chat -(*chat*) ## Overview diff --git a/docs/sdks/classifiers/README.md b/docs/sdks/classifiers/README.md index 55c46d2d..e76efb79 100644 --- a/docs/sdks/classifiers/README.md +++ b/docs/sdks/classifiers/README.md @@ -1,5 +1,4 @@ # Classifiers -(*classifiers*) ## Overview diff --git a/docs/sdks/conversations/README.md b/docs/sdks/conversations/README.md index 1e2d560e..c488848c 100644 --- a/docs/sdks/conversations/README.md +++ b/docs/sdks/conversations/README.md @@ -1,5 +1,4 @@ -# Conversations -(*beta.conversations*) +# Beta.Conversations ## Overview diff --git a/docs/sdks/documents/README.md b/docs/sdks/documents/README.md index c1551925..d3f5a975 100644 --- a/docs/sdks/documents/README.md +++ b/docs/sdks/documents/README.md @@ -1,5 +1,4 @@ -# Documents -(*beta.libraries.documents*) +# Beta.Libraries.Documents ## Overview diff --git a/docs/sdks/embeddings/README.md b/docs/sdks/embeddings/README.md index a071f3b2..4390b7bd 100644 --- a/docs/sdks/embeddings/README.md +++ b/docs/sdks/embeddings/README.md @@ -1,5 +1,4 @@ # Embeddings -(*embeddings*) ## Overview diff --git a/docs/sdks/files/README.md b/docs/sdks/files/README.md index 0a68c1f5..f0dfd593 100644 --- a/docs/sdks/files/README.md +++ b/docs/sdks/files/README.md @@ -1,5 +1,4 @@ # Files -(*files*) ## Overview diff --git a/docs/sdks/fim/README.md b/docs/sdks/fim/README.md index d282a810..db6f2e1b 100644 --- a/docs/sdks/fim/README.md +++ b/docs/sdks/fim/README.md @@ -1,5 +1,4 @@ # Fim -(*fim*) ## Overview diff --git a/docs/sdks/finetuning/README.md b/docs/sdks/finetuning/README.md deleted file mode 100644 index 3e0f12ce..00000000 --- a/docs/sdks/finetuning/README.md +++ /dev/null @@ -1,6 +0,0 @@ -# FineTuning -(*fine_tuning*) - -## Overview - -### Available Operations diff --git a/docs/sdks/jobs/README.md b/docs/sdks/jobs/README.md index b06170f8..666224a7 100644 --- a/docs/sdks/jobs/README.md +++ b/docs/sdks/jobs/README.md @@ -1,5 +1,4 @@ -# Jobs -(*fine_tuning.jobs*) +# FineTuning.Jobs ## Overview diff --git a/docs/sdks/libraries/README.md b/docs/sdks/libraries/README.md index 14d39f97..e672c190 100644 --- a/docs/sdks/libraries/README.md +++ b/docs/sdks/libraries/README.md @@ -1,5 +1,4 @@ -# Libraries -(*beta.libraries*) +# Beta.Libraries ## Overview diff --git a/docs/sdks/mistral/README.md b/docs/sdks/mistral/README.md deleted file mode 100644 index 4b9573d0..00000000 --- a/docs/sdks/mistral/README.md +++ /dev/null @@ -1,7 +0,0 @@ -# Mistral SDK - -## Overview - -Mistral AI API: Our Chat Completion and Embeddings APIs specification. Create your account on [La Plateforme](https://console.mistral.ai) to get access and read the [docs](https://docs.mistral.ai) to learn how to use it. - -### Available Operations diff --git a/docs/sdks/mistralagents/README.md b/docs/sdks/mistralagents/README.md index 767ba56d..97819467 100644 --- a/docs/sdks/mistralagents/README.md +++ b/docs/sdks/mistralagents/README.md @@ -1,5 +1,4 @@ -# MistralAgents -(*beta.agents*) +# Beta.Agents ## Overview diff --git a/docs/sdks/mistraljobs/README.md b/docs/sdks/mistraljobs/README.md index 05cddb88..f1aa3f61 100644 --- a/docs/sdks/mistraljobs/README.md +++ b/docs/sdks/mistraljobs/README.md @@ -1,5 +1,4 @@ -# MistralJobs -(*batch.jobs*) +# Batch.Jobs ## Overview diff --git a/docs/sdks/models/README.md b/docs/sdks/models/README.md index 94491520..d51866b6 100644 --- a/docs/sdks/models/README.md +++ b/docs/sdks/models/README.md @@ -1,5 +1,4 @@ # Models -(*models*) ## Overview diff --git a/docs/sdks/ocr/README.md b/docs/sdks/ocr/README.md index 9264d104..6fd904cc 100644 --- a/docs/sdks/ocr/README.md +++ b/docs/sdks/ocr/README.md @@ -1,5 +1,4 @@ # Ocr -(*ocr*) ## Overview diff --git a/docs/sdks/transcriptions/README.md b/docs/sdks/transcriptions/README.md index 52b7884e..3243258c 100644 --- a/docs/sdks/transcriptions/README.md +++ b/docs/sdks/transcriptions/README.md @@ -1,5 +1,4 @@ -# Transcriptions -(*audio.transcriptions*) +# Audio.Transcriptions ## Overview diff --git a/packages/mistralai_azure/.gitignore b/packages/mistralai_azure/.gitignore index f2ea8c39..b386de74 100644 --- a/packages/mistralai_azure/.gitignore +++ b/packages/mistralai_azure/.gitignore @@ -1,3 +1,5 @@ +.env +.env.local **/__pycache__/ **/.speakeasy/temp/ **/.speakeasy/logs/ diff --git a/packages/mistralai_azure/.speakeasy/gen.lock b/packages/mistralai_azure/.speakeasy/gen.lock index ed28d2f6..45ed9b17 100644 --- a/packages/mistralai_azure/.speakeasy/gen.lock +++ b/packages/mistralai_azure/.speakeasy/gen.lock @@ -1,42 +1,726 @@ lockVersion: 2.0.0 id: dc40fa48-2c4d-46ad-ac8b-270749770f34 management: - docChecksum: 3cd8710baef46375e8114574e63628e2 + docChecksum: eb0d097e3bdb7c0784f34ca2af2ce554 docVersion: 1.0.0 - speakeasyVersion: 1.606.10 - generationVersion: 2.687.13 - releaseVersion: 1.7.0 - configChecksum: 011849ab2544f97bfda12235028c7a00 + speakeasyVersion: 1.685.0 + generationVersion: 2.794.1 + releaseVersion: 1.8.1 + configChecksum: 0448ba634aa36625c6ac276e17e3b3b5 repoURL: https://github.com/mistralai/client-python.git repoSubDirectory: packages/mistralai_azure installationURL: https://github.com/mistralai/client-python.git#subdirectory=packages/mistralai_azure published: true +persistentEdits: + generation_id: 0d580549-db09-4078-890b-62de0e5fe937 + pristine_commit_hash: b561cb140a25a721f54d0aad3c9a03d419c8fc19 + pristine_tree_hash: d122bdae045ddf46c910e2f5da53d78da18ef009 features: python: additionalDependencies: 1.0.0 additionalProperties: 1.0.1 constsAndDefaults: 1.0.5 - core: 5.20.1 + core: 5.23.18 defaultEnabledRetries: 0.2.0 enumUnions: 0.1.0 envVarSecurityUsage: 0.3.2 examples: 3.0.2 flatRequests: 1.0.1 - globalSecurity: 3.0.3 + globalSecurity: 3.0.4 globalSecurityCallbacks: 1.0.0 globalSecurityFlattening: 1.0.0 - globalServerURLs: 3.1.1 + globalServerURLs: 3.2.0 + includes: 3.0.0 methodArguments: 1.0.2 nameOverrides: 3.0.1 nullables: 1.0.1 - openEnums: 1.0.1 + openEnums: 1.0.2 responseFormat: 1.0.1 - retries: 3.0.2 - sdkHooks: 1.1.0 - serverEvents: 1.0.8 + retries: 3.0.3 + sdkHooks: 1.2.0 + serverEvents: 1.0.11 serverEventsSentinels: 0.1.0 serverIDs: 3.0.0 - unions: 3.0.4 + unions: 3.1.1 +trackedFiles: + .gitattributes: + id: 24139dae6567 + last_write_checksum: sha1:53134de3ada576f37c22276901e1b5b6d85cd2da + pristine_git_object: 4d75d59008e4d8609876d263419a9dc56c8d6f3a + .vscode/settings.json: + id: 89aa447020cd + last_write_checksum: sha1:f84632c81029fcdda8c3b0c768d02b836fc80526 + pristine_git_object: 8d79f0abb72526f1fb34a4c03e5bba612c6ba2ae + docs/models/arguments.md: + id: 7ea5e33709a7 + last_write_checksum: sha1:09eea126210d7fd0353e60a76bf1dbed173f13ec + pristine_git_object: 2e54e27e0ca97bee87918b2ae38cc6c335669a79 + docs/models/assistantmessage.md: + id: 7e0218023943 + last_write_checksum: sha1:e75d407349842b2de46ee3ca6250f9f51121cf38 + pristine_git_object: 3d0bd90b4433c1a919f917f4bcf2518927cdcd50 + docs/models/assistantmessagecontent.md: + id: 9f1795bbe642 + last_write_checksum: sha1:1ce4066623a8d62d969e5ed3a088d73a9ba26643 + pristine_git_object: 047b7cf95f4db203bf2c501680b73ca0562a122d + docs/models/assistantmessagerole.md: + id: bb5d2a4bc72f + last_write_checksum: sha1:82f2c4f469426bd476c1003a91394afb89cb7c91 + pristine_git_object: 658229e77eb6419391cf7941568164541c528387 + docs/models/chatcompletionchoice.md: + id: 0d15c59ab501 + last_write_checksum: sha1:a6274a39a4239e054816d08517bf8507cb5c4564 + pristine_git_object: deaa0ea073e1b6c21bd466c10db31db2464066f1 + docs/models/chatcompletionchoicefinishreason.md: + id: 225764da91d3 + last_write_checksum: sha1:b894d3408cb801e072c3c302a5676ff939d59284 + pristine_git_object: b2f15ecbe88328de95b4961ddb3940fd8a6ee64b + docs/models/chatcompletionrequest.md: + id: adffe90369d0 + last_write_checksum: sha1:de5476eb16a5ff75942b1ece68dbe547110dbbb8 + pristine_git_object: 104a1f96e60e1d4b86305dab2829be084b00b153 + docs/models/chatcompletionrequestmessages.md: + id: ec996b350e12 + last_write_checksum: sha1:2ecec8d12cdb48426f4eb62732066fc79fcd4ec3 + pristine_git_object: bc7708a67f06d74e8a5bf1facb2b23fb1e08053c + docs/models/chatcompletionrequeststop.md: + id: fcaf5bbea451 + last_write_checksum: sha1:71a25f84f0d88c7acf72e801ced6159546201851 + pristine_git_object: 749296d420c0671d2a1d6d22483b51f577a86485 + docs/models/chatcompletionrequesttoolchoice.md: + id: b97041b2f15b + last_write_checksum: sha1:7ad7eb133f70e07d0d6a9def36aadd08b35cf861 + pristine_git_object: dc82a8ef91e7bfd44f1d2d9d9a4ef61b6e76cc34 + docs/models/chatcompletionresponse.md: + id: 7c53b24681b9 + last_write_checksum: sha1:a56581c0846638cfe6df26d3045fb4f874ccd931 + pristine_git_object: a0465ffbfc5558628953e03fbc53b80bbdc8649b + docs/models/chatcompletionstreamrequest.md: + id: cf8f29558a68 + last_write_checksum: sha1:f6bc4a0f064fc3420ae9b29c7e6fc9100ae85e4d + pristine_git_object: 85f237b4fc59ffc487377f150952284cc2102d85 + docs/models/chatcompletionstreamrequesttoolchoice.md: + id: 210d5e5b1413 + last_write_checksum: sha1:0543164caf3f4fb2bef3061dbd1a5e6b34b17ae9 + pristine_git_object: 43f3ca3809bf1a2a040e2ad7c19a2b22db0b73f8 + docs/models/completionchunk.md: + id: 60cb30423c60 + last_write_checksum: sha1:61b976fe2e71236cf7941ee1635decc31bd304b2 + pristine_git_object: 7f8ab5e631e2c6d1d9830325e591a7e434b83a35 + docs/models/completionevent.md: + id: e57cd17cb9dc + last_write_checksum: sha1:4f59c67af0b11c77b80d2b9c7aca36484d2be219 + pristine_git_object: 7a66e8fee2bb0f1c58166177653893bb05b98f1d + docs/models/completionresponsestreamchoice.md: + id: d56824d615a6 + last_write_checksum: sha1:dcf4b125b533192cb5aea1a68551866954712dc5 + pristine_git_object: c807dacd98eb3561ee45f40db71a92cb72b0f6de + docs/models/content.md: + id: bfd859c99f86 + last_write_checksum: sha1:6673dbd19871a701955a322348a4f7e51c38ffc8 + pristine_git_object: a833dc2c6043e36b85131c9243b4cc02b9fcc4c6 + docs/models/contentchunk.md: + id: d2d3a32080cd + last_write_checksum: sha1:5839a26cdc412b78caad7fb59df97bdcea57be6d + pristine_git_object: 22023e8b19692df969693b7a14f8cf6e0143859f + docs/models/deltamessage.md: + id: 6c5ed6b60968 + last_write_checksum: sha1:c213149256c620715d744c89685d5b6cbdea6f58 + pristine_git_object: 61deabbf7e37388fdd4c1789089d120cc0b937b9 + docs/models/document.md: + id: cd1d2a444370 + last_write_checksum: sha1:d00a2ac808a0ae83a7b97da87e647ecc8dca9c52 + pristine_git_object: 509d43b733d68d462853d9eb52fc913c855dff40 + docs/models/documenturlchunk.md: + id: 48437d297408 + last_write_checksum: sha1:38c3e2ad5353a4632bd827f00419c5d8eb2def54 + pristine_git_object: 6c9a5b4d9e6769be242b27ef0208f6af704689c0 + docs/models/documenturlchunktype.md: + id: a3574c91f539 + last_write_checksum: sha1:a0134fc0ea822d55b1204ee71140f2aa9d8dbe9c + pristine_git_object: 32e1fa9e975a3633fb49057b38b0ea0206b2d8ef + docs/models/filechunk.md: + id: edc076728e9d + last_write_checksum: sha1:07ab5db503211adba2fa099e66d12ac3c4bbf680 + pristine_git_object: 18217114060ac4e4b45fefabace4628684f27e5c + docs/models/finishreason.md: + id: 73315c2a39b3 + last_write_checksum: sha1:5b58c7fa9219f728b9731287e21abe1be9f11e4a + pristine_git_object: 45a5aedb7241cf080df3eb976a4413064d314009 + docs/models/format_.md: + id: a17c22228eda + last_write_checksum: sha1:dad6de59fec6378d50356007602e2a0254d8d2e4 + pristine_git_object: 97d286a4ed7cff0a4058bbfa06c4573428182876 + docs/models/function.md: + id: 416a80fba031 + last_write_checksum: sha1:a9485076d430a7753558461ce87bf42d09e34511 + pristine_git_object: b2bdb3fe82520ea79d0cf1a10ee41c844f90b859 + docs/models/functioncall.md: + id: a78cd1d7f605 + last_write_checksum: sha1:65bf78744b8531cdefb6a288f1af5cbf9d9e2395 + pristine_git_object: 7ccd90dca4868db9b6e178712f95d375210013c8 + docs/models/functionname.md: + id: 4b3bd62c0f26 + last_write_checksum: sha1:754fe32bdffe53c1057b302702f5516f4e551cfb + pristine_git_object: 87d7b4852de629015166605b273deb9341202dc0 + docs/models/httpvalidationerror.md: + id: a211c095f2ac + last_write_checksum: sha1:277a46811144643262651853dc6176d21b33573e + pristine_git_object: 712a148c3e2305dca4c702851865f9f8c8e674cc + docs/models/imageurl.md: + id: e75dd23cec1d + last_write_checksum: sha1:30131c77dd240c3bae48d9693698358e5cc0ae63 + pristine_git_object: 7c2bcbc36e99c3cf467d213d6a6a59d6300433d8 + docs/models/imageurlchunk.md: + id: 4407097bfff3 + last_write_checksum: sha1:7a478fd638234ece78770c7fc5e8d0adaf1c3727 + pristine_git_object: f1b926ef8e82443aa1446b1c64c2f02e33d7c789 + docs/models/imageurlchunkimageurl.md: + id: c7fae88454ce + last_write_checksum: sha1:5eff71b7a8be7baacb9ba8ca0be0a0f7a391a325 + pristine_git_object: 767389082d25f06e617fec2ef0134dd9fb2d4064 + docs/models/imageurlchunktype.md: + id: b9af2db9ff60 + last_write_checksum: sha1:990546f94648a09faf9d3ae55d7f6ee66de13e85 + pristine_git_object: 2064a0b405870313bd4b802a3b1988418ce8439e + docs/models/jsonschema.md: + id: a6b15ed6fac8 + last_write_checksum: sha1:523465666ad3c292252b3fe60f345c7ffb29053f + pristine_git_object: 7ff7c070353c58290416aff5b01d1dfc43905269 + docs/models/loc.md: + id: b071d5a509cc + last_write_checksum: sha1:09a04749333ab50ae806c3ac6adcaa90d54df0f1 + pristine_git_object: d6094ac2c6e0326c039dad2f6b89158694ef6aa7 + docs/models/messages.md: + id: 2103cd675c2f + last_write_checksum: sha1:f6940c9c67b98c49ae2bc2764f6c14178321f244 + pristine_git_object: 1d394500e8ffdd140457575568fc2ce465a1cc3a + docs/models/mistralpromptmode.md: + id: d17d5db4d3b6 + last_write_checksum: sha1:5ccd31d3804f70b6abb0e5a00bda57b9102225e3 + pristine_git_object: 7416e2037c507d19ac02aed914da1208a2fed0a1 + docs/models/ocrimageobject.md: + id: b72f3c5853b2 + last_write_checksum: sha1:90c5158dec6a7b31c858677b6a8efa1e3cabd504 + pristine_git_object: 3c0d5544a80499b011467f29ef83d49f53801af6 + docs/models/ocrpagedimensions.md: + id: b3429f9883f5 + last_write_checksum: sha1:6435aa56e6153b0c90a546818ed780105ae1042a + pristine_git_object: c93ca64d5e20319ec6ec1bcb82b28c6ce0940f29 + docs/models/ocrpageobject.md: + id: 88a9e101b11e + last_write_checksum: sha1:091077fedf1b699d5160a21fe352056c247ef988 + pristine_git_object: 02473d44f73485fd7b7f0031d51bfac835d4036e + docs/models/ocrrequest.md: + id: 6862a3fc2d0f + last_write_checksum: sha1:f32fcc5916f9eedf7adfaa60beda30a9ec42f32e + pristine_git_object: 76e4da925937fd4bdd42307f116a74d4dbf2bea3 + docs/models/ocrresponse.md: + id: 30042328fb78 + last_write_checksum: sha1:8e4a4ae404ea752f3e9f1108c2a5f89ed6cfb143 + pristine_git_object: 0a309317644eedc643009b6cec3a7dbb142b1a15 + docs/models/ocrtableobject.md: + id: c967796380e6 + last_write_checksum: sha1:3b78858cc130fc8792ec3d149c8f657fd3f7a4c3 + pristine_git_object: 4e27697c15983f86274648b2d7bacac557081630 + docs/models/ocrusageinfo.md: + id: 419abbb8353a + last_write_checksum: sha1:6e717a3f3de3c464e8b3237f06867cdfecec339e + pristine_git_object: d9d79125cb02bc2b09d8dc543a5e2d4a6c55571c + docs/models/prediction.md: + id: 3c70b2262201 + last_write_checksum: sha1:ca8a77219e6113f2358a5363e935288d90df0725 + pristine_git_object: fae3c1ca4ba2c2ddb3b7de401ecdc8d56dcc7740 + docs/models/referencechunk.md: + id: 07895f9debfd + last_write_checksum: sha1:97d01dd2b907e87b58bebd9c950e1bef29747c89 + pristine_git_object: a132ca2fe6fbbaca644491cbc36d88b0c67cc6bc + docs/models/referencechunktype.md: + id: 0944b80ea9c8 + last_write_checksum: sha1:956b270766c7f11fe99f4a9b484cc29c159e7471 + pristine_git_object: 1e0e2fe64883ef5f3e628777b261b1224661d257 + docs/models/responseformat.md: + id: 50a1e4140614 + last_write_checksum: sha1:e877b2e81470ef5eec5675dfb91a47e74d5d3add + pristine_git_object: 5cab22f2bf1c412699f6a7ed18ef801ecbc3ee4b + docs/models/responseformats.md: + id: cf1f250b82db + last_write_checksum: sha1:105e1f9181913104b554051838cbdd0f728aa2c4 + pristine_git_object: 2f5f1e5511b048323fee18a0ffdd506fe2b3d56f + docs/models/role.md: + id: b694540a5b1e + last_write_checksum: sha1:260a50c56a8bd03cc535edf98ebec06437f87f8d + pristine_git_object: affca78d5574cc42d8e6169f21968e5a8765e053 + docs/models/security.md: + id: 452e4d4eb67a + last_write_checksum: sha1:ce2871b49c1632d50e22d0b1ebe4999021d52313 + pristine_git_object: c698674c513f5b20c04f629e50154e67977275f7 + docs/models/stop.md: + id: f231cc9f5041 + last_write_checksum: sha1:86903cac5f57ad9b8ac07ecba6c454d40a53bdc8 + pristine_git_object: ba40ca83136d6d6cb4f1ef9e5ca3104a704e4846 + docs/models/systemmessage.md: + id: fdb7963e1cdf + last_write_checksum: sha1:97e726dff19a39b468767d5c01fc6256277ee71f + pristine_git_object: 0dba71c00f40c85e74b2c1967e077ffff9660f13 + docs/models/systemmessagecontent.md: + id: 94a56febaeda + last_write_checksum: sha1:6cb10b4b860b4204df57a29c650c85c826395aeb + pristine_git_object: 0c87baf3c2fade64a2738a9a4b3ce19647e5dc9a + docs/models/systemmessagecontentchunks.md: + id: cea1c19e9d7a + last_write_checksum: sha1:986aec0f8098158515bbccd0c22e0b3d4151bb32 + pristine_git_object: 40030c170746d9953d25b979ab7e6f522018e230 + docs/models/tableformat.md: + id: d8cd08c55c3c + last_write_checksum: sha1:e0736ea9576466d71821aa1e67fc632cc5a85414 + pristine_git_object: 54f029b814fdcfa2e93e2b8b0594ef9e4eab792a + docs/models/textchunk.md: + id: 6cd12e0ef110 + last_write_checksum: sha1:6d41d1991d122805734ed0d90ee01592aa5ae6ff + pristine_git_object: 6daab3c381bd8c13d2935bf62578648a8470fc76 + docs/models/thinkchunk.md: + id: bca24d7153f6 + last_write_checksum: sha1:feb95a931bb9cdbfe28ab351618687e513cf830b + pristine_git_object: 66b2e0cde70e25e2927180d2e709503401fddeab + docs/models/thinkchunktype.md: + id: 0fbeed985341 + last_write_checksum: sha1:790f991f95c86c26a6abb9c9c5debda8b53526f5 + pristine_git_object: baf6f755252d027295be082b53ecf80555039414 + docs/models/thinking.md: + id: 07234f8dd364 + last_write_checksum: sha1:a5962d1615b57996730da19e59fbfaa684321442 + pristine_git_object: c7a0d5c9811ea37aaf9e16b6e93c833ab979573f + docs/models/tool.md: + id: 8966139dbeed + last_write_checksum: sha1:1725bf53fc9f1ca3f332322d91de24c9d58adc6a + pristine_git_object: fb661f72887271d5bb470e4edf025a32b00ade17 + docs/models/toolcall.md: + id: 80892ea1a051 + last_write_checksum: sha1:cb27b9d36cfe6227978c7a7a01b1349b6bac99d9 + pristine_git_object: 3819236b9f3eee2f6878818cfbbe2817e97f7de2 + docs/models/toolchoice.md: + id: "097076343426" + last_write_checksum: sha1:25b33b34da02c3b46349dc8b6223f9ae18370d16 + pristine_git_object: 373046bbbc834169293b4f4ae8b2e238f952ddde + docs/models/toolchoiceenum.md: + id: 15410de51ffc + last_write_checksum: sha1:ca0cf9bf128bebc8faedd9333cc6a56b30f58130 + pristine_git_object: 0be3d6c54b13a8bf30773398a2c12e0d30d3ae58 + docs/models/toolmessage.md: + id: 0553747c37a1 + last_write_checksum: sha1:3ac87031fdd4ba8b0996e95be8e7ef1a7ff41167 + pristine_git_object: a54f49332c2873471759b477fb4c712fa4fb61f5 + docs/models/toolmessagecontent.md: + id: f0522d2d3c93 + last_write_checksum: sha1:783769c0200baa1b6751327aa3e009fa83da72ee + pristine_git_object: 5c76091fbd2c8e0d768921fab19c7b761df73411 + docs/models/toolmessagerole.md: + id: f333d4d1ab56 + last_write_checksum: sha1:7e1c004bad24e928da0c286a9f053516b172d24f + pristine_git_object: c24e59c0c79ea886d266e38c673edd51531b9be6 + docs/models/tooltypes.md: + id: adb50fe63ea2 + last_write_checksum: sha1:f224c3d8732450b9c969b3e04027b7df7892694c + pristine_git_object: 84e49253c9b9bd1bd314e2a126106404cbb52f16 + docs/models/type.md: + id: 98c32f09b2c8 + last_write_checksum: sha1:8aa9ca999e9648ddc2240bf80780684e3e858ddf + pristine_git_object: eb0581e7174b6951d69c485a64af5244cb8687fa + docs/models/usageinfo.md: + id: ec6fe65028a9 + last_write_checksum: sha1:cf71fb9676d870eba7c4d10a69636e1db4054adc + pristine_git_object: f5204ac94a4d6191839031c66c5a9bc0124a1f35 + docs/models/usermessage.md: + id: ed66d7a0f80b + last_write_checksum: sha1:8291f7703e49ed669775dc953ea8cab6715dc7ed + pristine_git_object: 63b0131091cd211b3b1477c1d63b5666a26db546 + docs/models/usermessagecontent.md: + id: 52c072c851e8 + last_write_checksum: sha1:1de02bcf7082768ebe1bb912fdbebbec5a577b5a + pristine_git_object: 8350f9e8f8996c136093e38760990f62fd01f8cf + docs/models/usermessagerole.md: + id: 99ffa937c462 + last_write_checksum: sha1:52014480516828b43827aa966b7319d9074f1111 + pristine_git_object: 171124e45988e784c56a6b92a0057ba00efc0db4 + docs/models/utils/retryconfig.md: + id: 4343ac43161c + last_write_checksum: sha1:562c0f21e308ad10c27f85f75704c15592c6929d + pristine_git_object: 69dd549ec7f5f885101d08dd502e25748183aebf + docs/models/validationerror.md: + id: 304bdf06ef8b + last_write_checksum: sha1:1889f608099577e6a116c14b211a6811d6b22786 + pristine_git_object: 7a1654a1a5cfb3ab92360b361e8e962bf2db4582 + py.typed: + id: 258c3ed47ae4 + last_write_checksum: sha1:8efc425ffe830805ffcc0f3055871bdcdc542c60 + pristine_git_object: 3e38f1a929f7d6b1d6de74604aa87e3d8f010544 + pylintrc: + id: 7ce8b9f946e6 + last_write_checksum: sha1:6b615d49741eb9ae16375d3a499767783d1128a1 + pristine_git_object: a8fcb932ba2a01c5e96e3b04c59371e930b75558 + scripts/prepare_readme.py: + id: e0c5957a6035 + last_write_checksum: sha1:2291075229aebf4e036800b5b9299b37fcb8707c + pristine_git_object: ff1121fda23730f356d2df2ad17c8e991b9fc605 + scripts/publish.sh: + id: fe273b08f514 + last_write_checksum: sha1:b290b25b36dca3d5eb1a2e66a2e1bcf2e7326cf3 + pristine_git_object: c35748f360329c2bc370e9b189f49b1a360b2c48 + src/mistralai_azure/__init__.py: + id: 3cd9e92c2f72 + last_write_checksum: sha1:36306d1d404b6aeb912d27f1d9c52f098ff7bf9b + pristine_git_object: dd02e42e4cc509dc90e6ae70493054021faa5f9c + src/mistralai_azure/_hooks/__init__.py: + id: 66932eacf398 + last_write_checksum: sha1:e3111289afd28ad557c21d9e2f918caabfb7037d + pristine_git_object: 2ee66cdd592fe41731c24ddd407c8ca31c50aec1 + src/mistralai_azure/_hooks/sdkhooks.py: + id: 1184c9201c62 + last_write_checksum: sha1:c98774db1664db2bc6d80e8a5f4f5133260f201a + pristine_git_object: 37ff4e9f0ebd42a58ada6300098a5b1b85a54b69 + src/mistralai_azure/_hooks/types.py: + id: a32fe1943bce + last_write_checksum: sha1:78fc31840a38e668a73871885c779929196a8bec + pristine_git_object: 0c22d7ebccdd64097033454b7c698d10ee59987d + src/mistralai_azure/_version.py: + id: 7711a0bb1da3 + last_write_checksum: sha1:9a446d67d6a86cdf9d9e3447c1c09a4f719b2c9b + pristine_git_object: 79277f9a358b4c851363e11e1e8f534779e9f271 + src/mistralai_azure/basesdk.py: + id: 7d825dbc7d6e + last_write_checksum: sha1:4070786599952b3c603d1384d87d7b92bb13b974 + pristine_git_object: 89f7dc493d7f50d5f2d3f468c0a8392a6ec5e28b + src/mistralai_azure/chat.py: + id: ebf1c99bea88 + last_write_checksum: sha1:2d78fa9e8b3e300e18b6fb3bc116e824261efb55 + pristine_git_object: 10bb247fb89f0f9ef110300224c95f2a7653ad2f + src/mistralai_azure/httpclient.py: + id: 808a3f534ffa + last_write_checksum: sha1:5e55338d6ee9f01ab648cad4380201a8a3da7dd7 + pristine_git_object: 89560b566073785535643e694c112bedbd3db13d + src/mistralai_azure/models/__init__.py: + id: e5fcf3933d2c + last_write_checksum: sha1:43f0ebb24f64a382fd18202da65a321d9925dbee + pristine_git_object: 9baa3ff1865cd8aec9e9b93d6e3c315e8c7870c5 + src/mistralai_azure/models/assistantmessage.py: + id: 15f117b45380 + last_write_checksum: sha1:3c2872d06ad465dbbbedcf8d397d1f12961e1e2e + pristine_git_object: 7790eb10a034d892c3c1e793c412c75ff8820e40 + src/mistralai_azure/models/chatcompletionchoice.py: + id: 93cfc6cec0d2 + last_write_checksum: sha1:f5dfcf407d8abd5ce8eb23f058c589861c71a0df + pristine_git_object: 7c6eb933faf09cc5c6102575d371ac280f2e242d + src/mistralai_azure/models/chatcompletionrequest.py: + id: d046a16b5e58 + last_write_checksum: sha1:bc1e0b5c8b11bfef5a9b135436c2f3f555a11fa3 + pristine_git_object: a7b095f34c572e1805650e44f847946280ccb3fe + src/mistralai_azure/models/chatcompletionresponse.py: + id: fc342e80f579 + last_write_checksum: sha1:a93593ec734420bc122f0b0b8c49d630795f1d42 + pristine_git_object: 7a66f3221a154b1a0f47c4f808ece8e580280548 + src/mistralai_azure/models/chatcompletionstreamrequest.py: + id: 1052b055a791 + last_write_checksum: sha1:18f71c5eeda25d23f2c82ddcdb710a20b44b806c + pristine_git_object: 96cd631b0ed74e5c82c6e2492011001021b019f8 + src/mistralai_azure/models/completionchunk.py: + id: e04bc380589d + last_write_checksum: sha1:490c3236276ae8fdecb883744e263aecbe4c608c + pristine_git_object: d6cc2a86a1fda1ebce1f3c5a169ab1118705e3f0 + src/mistralai_azure/models/completionevent.py: + id: e75909f919b1 + last_write_checksum: sha1:9f5423ad56747fb7cc95a6f01e0826510571d4c1 + pristine_git_object: 5a2039c2492bab82184b4f2469806f8b977a7246 + src/mistralai_azure/models/completionresponsestreamchoice.py: + id: 24fe265a60d8 + last_write_checksum: sha1:df52342c3458cca6396d538c5d9a42f07131796d + pristine_git_object: 0e890aacf79f8f220f585d914c6fbe8863232036 + src/mistralai_azure/models/contentchunk.py: + id: 9e6b90acdf54 + last_write_checksum: sha1:e93c57ef87654a06d8849030f65db3d279f8f7ad + pristine_git_object: e6a3e24a8857ea1661874197eec967f0ac99e31d + src/mistralai_azure/models/deltamessage.py: + id: 593eaaeda97b + last_write_checksum: sha1:9c2f6e52c81d2f5bf71f520861158dc5eae6eab7 + pristine_git_object: 7fa3c3f216153ebc0a2d31e590793698e95a8be8 + src/mistralai_azure/models/documenturlchunk.py: + id: bff69bfa8014 + last_write_checksum: sha1:5c515c4c85b78d8f4cf147faab9cf01c3501e0b9 + pristine_git_object: ea8d5625a6d1579dd60f2e4a067f455c82334986 + src/mistralai_azure/models/filechunk.py: + id: 0de687fe41c1 + last_write_checksum: sha1:56a1765b46702d24ee9c00ab3a06ccdbffdd63f9 + pristine_git_object: 2c3edc078b5e781b4d7163ab01e02a3347c81e2f + src/mistralai_azure/models/function.py: + id: 16111a6101f2 + last_write_checksum: sha1:456d34df457592f1975b0d1e158207d4446a6c41 + pristine_git_object: a4642f92a0cf614b458591c220a83ae1c422ce25 + src/mistralai_azure/models/functioncall.py: + id: e383b31a7f16 + last_write_checksum: sha1:cec288f925fa58842bb7d9e688f6122a01973d4b + pristine_git_object: dd93c4629c3bd81dd6fb305474ce0cd5443e1bdb + src/mistralai_azure/models/functionname.py: + id: ebc3e07e4b6f + last_write_checksum: sha1:743cec4c3f586d67d1ab2816d8d76170f46a3ca1 + pristine_git_object: b55c82af3f29efe38698bc776a8532c647dccc36 + src/mistralai_azure/models/httpvalidationerror.py: + id: da4825943f94 + last_write_checksum: sha1:dce58ead8f7f901514250e1ae5965ba039b1da14 + pristine_git_object: 56607d9437ce39097deac134d4f622ea523cbda7 + src/mistralai_azure/models/imageurl.py: + id: 80cc0df94e9d + last_write_checksum: sha1:a1a416ae5bf9c559219cff5f008a90f251a52477 + pristine_git_object: a5a66360b017cbdc342775241aa4aa2322534c6a + src/mistralai_azure/models/imageurlchunk.py: + id: c5c6dd2f1782 + last_write_checksum: sha1:11634325be12aa567b42227f2117e9b8c854a51c + pristine_git_object: a40e451c60caca688a9379dcb20d545e9e6b76e2 + src/mistralai_azure/models/jsonschema.py: + id: 8c635811dd6b + last_write_checksum: sha1:a99a6de224e51eb6cf85fa6de8cf37266ab5fe6d + pristine_git_object: 0f7563fc17bf172d527d09507294b4ef5646c22c + src/mistralai_azure/models/mistralazureerror.py: + id: a919897c4ea9 + last_write_checksum: sha1:25f4411c7411faad753d46118edf74828b1c9f7c + pristine_git_object: c5bf17528c7cf25bac8f8874f58692c601fcdd76 + src/mistralai_azure/models/mistralpromptmode.py: + id: f62a521bcdae + last_write_checksum: sha1:fcb16c10986bd6946f79b9e330a4be9f26f7e724 + pristine_git_object: 22fb643896688b68af238f6ac75cf41a00b0511b + src/mistralai_azure/models/no_response_error.py: + id: 54523e14f29b + last_write_checksum: sha1:7f326424a7d5ae1bcd5c89a0d6b3dbda9138942f + pristine_git_object: 1deab64bc43e1e65bf3c412d326a4032ce342366 + src/mistralai_azure/models/ocrimageobject.py: + id: 6c349909fb0c + last_write_checksum: sha1:0fed6abf8172f6ee40e703ef86ee9d902c6e5d7e + pristine_git_object: 9d0dd01dbb5be095e234aa3ec9469fface68c3d2 + src/mistralai_azure/models/ocrpagedimensions.py: + id: f33f598001b2 + last_write_checksum: sha1:5281879ef3d737a17a539cefda9f222302ead7da + pristine_git_object: efb62a58f22ad62c730b3af93bff151586105957 + src/mistralai_azure/models/ocrpageobject.py: + id: 99f20768c4d6 + last_write_checksum: sha1:c7479b83b0eb619e6b0f82344e81bc691f0b3a46 + pristine_git_object: e95718001e07bb89ba2fc9094f88b894572148bb + src/mistralai_azure/models/ocrrequest.py: + id: 4e574d5fb9be + last_write_checksum: sha1:6ca937598dd92c6c6ab7b8d59363595a3e8760e9 + pristine_git_object: 565a0a30a7f9fae374c14fb5fcb0f19385cf05e4 + src/mistralai_azure/models/ocrresponse.py: + id: 326a4d9fab25 + last_write_checksum: sha1:cf597498a5841a56bbd1aeb8478bd57a01d93cb1 + pristine_git_object: 3e43fa8eb7b80fafbd9344ad5a98c0ead98c54cb + src/mistralai_azure/models/ocrtableobject.py: + id: 3ba1292c343a + last_write_checksum: sha1:2d1d05902a9ed6bccdb41ccac6782f015450cf2e + pristine_git_object: 189f059eaa8a32cc32a5320ea9fe33d779e8ef1c + src/mistralai_azure/models/ocrusageinfo.py: + id: 0de4eae62e4b + last_write_checksum: sha1:85e5a850bd2f847e4a02b0731b0327ca0a02f643 + pristine_git_object: 1f5c9f1bc2cf2d728dec06b0930602852474a29e + src/mistralai_azure/models/prediction.py: + id: 9e8a0a7a3ca7 + last_write_checksum: sha1:e78af600f109a7489a5bcce80b48adf29cc0c4c3 + pristine_git_object: b23a935c00cd7ce4e7b7bd6fe8f2da87f8aaca92 + src/mistralai_azure/models/referencechunk.py: + id: 420a12dfec3b + last_write_checksum: sha1:f49da7a4541f55b283e9391e6397a9e4286570bd + pristine_git_object: 32d2ca68e67be3f03e14f74fd7e7692fa05b70f5 + src/mistralai_azure/models/responseformat.py: + id: aa7acbc1bda7 + last_write_checksum: sha1:70e7960bb4ec5db5f133c4cc8f6e813e39f8c671 + pristine_git_object: c989f3a4467c21416ea59b33fbc734a1477a6eb3 + src/mistralai_azure/models/responseformats.py: + id: 780a7aa0e87e + last_write_checksum: sha1:a212e85d286b5b49219f57d071a2232ff8b5263b + pristine_git_object: cbf83ce7b54ff8634f741334831807bfb5c98991 + src/mistralai_azure/models/responsevalidationerror.py: + id: 1952c765e2ec + last_write_checksum: sha1:d516c0c88210dd28b65747daa2fa1b63f432fe89 + pristine_git_object: a33954ccead3a8df87bdcc30a090efbb0ebecb94 + src/mistralai_azure/models/sdkerror.py: + id: bd8616367442 + last_write_checksum: sha1:41c259fac1bd50d33f1a2fd64d1ed17fd8d0d075 + pristine_git_object: 216d7f8fca986ac29162a1a7cba8c18b7f73d012 + src/mistralai_azure/models/security.py: + id: 7b3bcb55164e + last_write_checksum: sha1:9cacce270a27809ded4ee91aecac4a681154f5f0 + pristine_git_object: c1ae83138b09eab742f755a0f11428cf8c0fd60d + src/mistralai_azure/models/systemmessage.py: + id: 2e15bb043753 + last_write_checksum: sha1:8ec96bfc0533414a698d92387021cac116eadade + pristine_git_object: f99bf4ffb112b068159a3b95bc99ec7ce91b3f7d + src/mistralai_azure/models/systemmessagecontentchunks.py: + id: b6d9a4838359 + last_write_checksum: sha1:1e3f4688317d10f207dd42ef39cf2ac8f6042e54 + pristine_git_object: 4615a16cf39496dffc4982c6f0552d8bf353e280 + src/mistralai_azure/models/textchunk.py: + id: c169e3f0ffc9 + last_write_checksum: sha1:6cb623bafd4005e527dca9b908bb9f4b371342da + pristine_git_object: 5845456e5ca3089bcb551112408a0de84c597a91 + src/mistralai_azure/models/thinkchunk.py: + id: b1b9aeee4dcf + last_write_checksum: sha1:d15b39ef3e12195183664c32854233b9410d565b + pristine_git_object: f53a9f1ad2e6f124a36c9fb9be65bc09dbfbff4b + src/mistralai_azure/models/tool.py: + id: 99c8106f5428 + last_write_checksum: sha1:6142383805723bbc2b22f1bfcc660288378d1e42 + pristine_git_object: c91deec28488062a0220af41492fdfb34330e7a4 + src/mistralai_azure/models/toolcall.py: + id: 3643db1054cd + last_write_checksum: sha1:9b095f1efe1ea554cfacbc4a8e0c59b1c57d7f32 + pristine_git_object: 44fe8ec86b8f31ad8ee9591ae49036e8caa9ac41 + src/mistralai_azure/models/toolchoice.py: + id: 669768b7cbda + last_write_checksum: sha1:1217d8186e64d16f4c369079c62e3ac466726c60 + pristine_git_object: 93b4b7fe72f05a2ece9fed08a83139f4510b2574 + src/mistralai_azure/models/toolchoiceenum.py: + id: 5f7df8457771 + last_write_checksum: sha1:3dbba9a58c5569aafe115f3f7713a52b01ad8620 + pristine_git_object: 01f6f677b379f9e3c99db9d1ad248cb0033a2804 + src/mistralai_azure/models/toolmessage.py: + id: 1d9845bf98b3 + last_write_checksum: sha1:52bd15280bcae27ec7ba6a1c64b15648de5b0868 + pristine_git_object: 4bc5c9a9b509fdb89a4cf5ce81231189bf46bab4 + src/mistralai_azure/models/tooltypes.py: + id: 34c499f03e21 + last_write_checksum: sha1:f060bd3aebf7d42c1066c543c47cfa020e61eb27 + pristine_git_object: 638890c589ee642fd0a43e00337505e53ea3ec3a + src/mistralai_azure/models/usageinfo.py: + id: 59a5033672bf + last_write_checksum: sha1:7d0e7a483331077309b78e035cab9d65e87d3f65 + pristine_git_object: bbe5cdfaae260df81e93da11d05a1ba55ecbe329 + src/mistralai_azure/models/usermessage.py: + id: c54119314021 + last_write_checksum: sha1:b45f38755a96b07100baf5149631f366009e701f + pristine_git_object: 85fedb4bd1bcf64f69e4ead5310cf3fb354a6e3c + src/mistralai_azure/models/validationerror.py: + id: 83cd7bfd6d92 + last_write_checksum: sha1:250ed57498dabd11c0e2b6d255969e0285bb4214 + pristine_git_object: 4caff4a6b74aeb322bf42cd2070b7bd576ca834a + src/mistralai_azure/ocr.py: + id: 77e2e0f594ad + last_write_checksum: sha1:a455095c62c2dfad071d70682c2f57e7d64934db + pristine_git_object: da823f816dda9d462a795e9b946d5634ff6d48e2 + src/mistralai_azure/py.typed: + id: 98df238e554c + last_write_checksum: sha1:8efc425ffe830805ffcc0f3055871bdcdc542c60 + pristine_git_object: 3e38f1a929f7d6b1d6de74604aa87e3d8f010544 + src/mistralai_azure/sdkconfiguration.py: + id: 476a4f9e2f3e + last_write_checksum: sha1:6b117889b46a546be6e949c1bf843834ceff7417 + pristine_git_object: 51289cf05559ba32dd17e45fab78df4a8697063f + src/mistralai_azure/types/__init__.py: + id: d761bb7a67a5 + last_write_checksum: sha1:140ebdd01a46f92ffc710c52c958c4eba3cf68ed + pristine_git_object: fc76fe0c5505e29859b5d2bb707d48fd27661b8c + src/mistralai_azure/types/basemodel.py: + id: 68c97875efb7 + last_write_checksum: sha1:10d84aedeb9d35edfdadf2c3020caa1d24d8b584 + pristine_git_object: a9a640a1a7048736383f96c67c6290c86bf536ee + src/mistralai_azure/utils/__init__.py: + id: 3c68abef839b + last_write_checksum: sha1:887f56a717845fab7445cc368d2a17d850c3565a + pristine_git_object: 05f26ade57efb8c54a774fbcb939fb1a7dc655ce + src/mistralai_azure/utils/annotations.py: + id: 476ee839718f + last_write_checksum: sha1:a4824ad65f730303e4e1e3ec1febf87b4eb46dbc + pristine_git_object: 12e0aa4f1151bb52474cc02e88397329b90703f6 + src/mistralai_azure/utils/datetimes.py: + id: e9faf3b28c48 + last_write_checksum: sha1:c721e4123000e7dc61ec52b28a739439d9e17341 + pristine_git_object: a6c52cd61bbe2d459046c940ce5e8c469f2f0664 + src/mistralai_azure/utils/enums.py: + id: 4d10693bf655 + last_write_checksum: sha1:bc8c3c1285ae09ba8a094ee5c3d9c7f41fa1284d + pristine_git_object: 3324e1bc2668c54c4d5f5a1a845675319757a828 + src/mistralai_azure/utils/eventstreaming.py: + id: 5f5e90529fd7 + last_write_checksum: sha1:bababae5d54b7efc360db701daa49e18a92c2f3b + pristine_git_object: 0969899bfc491e5e408d05643525f347ea95e4fc + src/mistralai_azure/utils/forms.py: + id: 91c3fe9ba311 + last_write_checksum: sha1:15fa7e9ab1611e062a9984cf06cb20969713d295 + pristine_git_object: f961e76beaf0a8b1fe0dda44754a74eebd3608e7 + src/mistralai_azure/utils/headers.py: + id: d37ef2f03e41 + last_write_checksum: sha1:7c6df233ee006332b566a8afa9ce9a245941d935 + pristine_git_object: 37864cbbbc40d1a47112bbfdd3ba79568fc8818a + src/mistralai_azure/utils/logger.py: + id: 9122a46617cc + last_write_checksum: sha1:f3fdb154a3f09b8cc43d74c7e9c02f899f8086e4 + pristine_git_object: b661aff65d38b77d035149699aea09b2785d2fc6 + src/mistralai_azure/utils/metadata.py: + id: 2d93fa8523eb + last_write_checksum: sha1:c6a560bd0c63ab158582f34dadb69433ea73b3d4 + pristine_git_object: 173b3e5ce658675c2f504222a56b3daaaa68107d + src/mistralai_azure/utils/queryparams.py: + id: dfd31ba97c2b + last_write_checksum: sha1:b94c3f314fd3da0d1d215afc2731f48748e2aa59 + pristine_git_object: c04e0db82b68eca041f2cb2614d748fbac80fd41 + src/mistralai_azure/utils/requestbodies.py: + id: c91db641d5b9 + last_write_checksum: sha1:41e2d2d2d3ecc394c8122ca4d4b85e1c3e03f054 + pristine_git_object: 1de32b6d26f46590232f398fdba6ce0072f1659c + src/mistralai_azure/utils/retries.py: + id: 6f0cd9f6169d + last_write_checksum: sha1:5b97ac4f59357d70c2529975d50364c88bcad607 + pristine_git_object: 88a91b10cd2076b4a2c6cff2ac6bfaa5e3c5ad13 + src/mistralai_azure/utils/security.py: + id: "270040388028" + last_write_checksum: sha1:a17130ace2c0db6394f38dd941ad2b700cc755c8 + pristine_git_object: 295a3f40031dbb40073ad227fd4a355660f97ab2 + src/mistralai_azure/utils/serializers.py: + id: 595ddab03803 + last_write_checksum: sha1:ce1d8d7f500a9ccba0aeca5057cee9c271f4dfd7 + pristine_git_object: 14321eb479de81d0d9580ec8291e0ff91bf29e57 + src/mistralai_azure/utils/unmarshal_json_response.py: + id: bde89a892417 + last_write_checksum: sha1:d2ce9e3478b38e54e4bb3a43610ee0bab00c2e27 + pristine_git_object: f5813119b559442ee85c0b310765db3866bfa09d + src/mistralai_azure/utils/url.py: + id: 080c62716b06 + last_write_checksum: sha1:6479961baa90432ca25626f8e40a7bbc32e73b41 + pristine_git_object: c78ccbae426ce6d385709d97ce0b1c2813ea2418 + src/mistralai_azure/utils/values.py: + id: 640889083cda + last_write_checksum: sha1:acaa178a7c41ddd000f58cc691e4632d925b2553 + pristine_git_object: dae01a44384ac3bc13ae07453a053bf6c898ebe3 +examples: + stream_chat: + speakeasy-default-stream-chat: + requestBody: + application/json: {"model": "azureai", "stream": true, "messages": [{"content": "Who is the best French painter? Answer in one short sentence.", "role": "user"}], "response_format": {"type": "text"}} + responses: + "422": + application/json: {} + chat_completion_v1_chat_completions_post: + speakeasy-default-chat-completion-v1-chat-completions-post: + requestBody: + application/json: {"model": "azureai", "stream": false, "messages": [{"content": "Who is the best French painter? Answer in one short sentence.", "role": "user"}], "response_format": {"type": "text"}} + responses: + "200": + application/json: {"id": "cmpl-e5cc70bb28c444948073e77776eb30ef", "object": "chat.completion", "model": "mistral-small-latest", "usage": {"prompt_tokens": 0, "completion_tokens": 0, "total_tokens": 0}, "created": 1702256327, "choices": []} + "422": + application/json: {} + ocr_v1_ocr_post: + speakeasy-default-ocr-v1-ocr-post: + requestBody: + application/json: {"model": "CX-9", "document": {"document_url": "https://upset-labourer.net/", "type": "document_url"}} + responses: + "200": + application/json: {"pages": [], "model": "Golf", "usage_info": {"pages_processed": 944919}} + "422": + application/json: {} + userExample: + requestBody: + application/json: {"model": "CX-9", "document": {"document_url": "https://upset-labourer.net/", "type": "document_url"}, "bbox_annotation_format": {"type": "text"}, "document_annotation_format": {"type": "text"}} + responses: + "200": + application/json: {"pages": [{"index": 1, "markdown": "# LEVERAGING UNLABELED DATA TO PREDICT OUT-OF-DISTRIBUTION PERFORMANCE\nSaurabh Garg*
Carnegie Mellon University
sgarg2@andrew.cmu.edu
Sivaraman Balakrishnan
Carnegie Mellon University
sbalakri@andrew.cmu.edu
Zachary C. Lipton
Carnegie Mellon University
zlipton@andrew.cmu.edu\n## Behnam Neyshabur\nGoogle Research, Blueshift team
neyshabur@google.com\nHanie Sedghi
Google Research, Brain team
hsedghi@google.com\n#### Abstract\nReal-world machine learning deployments are characterized by mismatches between the source (training) and target (test) distributions that may cause performance drops. In this work, we investigate methods for predicting the target domain accuracy using only labeled source data and unlabeled target data. We propose Average Thresholded Confidence (ATC), a practical method that learns a threshold on the model's confidence, predicting accuracy as the fraction of unlabeled examples for which model confidence exceeds that threshold. ATC outperforms previous methods across several model architectures, types of distribution shifts (e.g., due to synthetic corruptions, dataset reproduction, or novel subpopulations), and datasets (WILDS, ImageNet, BREEDS, CIFAR, and MNIST). In our experiments, ATC estimates target performance $2-4 \\times$ more accurately than prior methods. We also explore the theoretical foundations of the problem, proving that, in general, identifying the accuracy is just as hard as identifying the optimal predictor and thus, the efficacy of any method rests upon (perhaps unstated) assumptions on the nature of the shift. Finally, analyzing our method on some toy distributions, we provide insights concerning when it works ${ }^{1}$.\n## 1 INTRODUCTION\nMachine learning models deployed in the real world typically encounter examples from previously unseen distributions. While the IID assumption enables us to evaluate models using held-out data from the source distribution (from which training data is sampled), this estimate is no longer valid in presence of a distribution shift. Moreover, under such shifts, model accuracy tends to degrade (Szegedy et al., 2014; Recht et al., 2019; Koh et al., 2021). Commonly, the only data available to the practitioner are a labeled training set (source) and unlabeled deployment-time data which makes the problem more difficult. In this setting, detecting shifts in the distribution of covariates is known to be possible (but difficult) in theory (Ramdas et al., 2015), and in practice (Rabanser et al., 2018). However, producing an optimal predictor using only labeled source and unlabeled target data is well-known to be impossible absent further assumptions (Ben-David et al., 2010; Lipton et al., 2018).\nTwo vital questions that remain are: (i) the precise conditions under which we can estimate a classifier's target-domain accuracy; and (ii) which methods are most practically useful. To begin, the straightforward way to assess the performance of a model under distribution shift would be to collect labeled (target domain) examples and then to evaluate the model on that data. However, collecting fresh labeled data from the target distribution is prohibitively expensive and time-consuming, especially if the target distribution is non-stationary. Hence, instead of using labeled data, we aim to use unlabeled data from the target distribution, that is comparatively abundant, to predict model performance. Note that in this work, our focus is not to improve performance on the target but, rather, to estimate the accuracy on the target for a given classifier.\n[^0]: Work done in part while Saurabh Garg was interning at Google ${ }^{1}$ Code is available at [https://github.com/saurabhgarg1996/ATC_code](https://github.com/saurabhgarg1996/ATC_code).\n", "images": [], "dimensions": {"dpi": 200, "height": 2200, "width": 1700}}, {"index": 2, "markdown": "![img-0.jpeg](img-0.jpeg)\nFigure 1: Illustration of our proposed method ATC. Left: using source domain validation data, we identify a threshold on a score (e.g. negative entropy) computed on model confidence such that fraction of examples above the threshold matches the validation set accuracy. ATC estimates accuracy on unlabeled target data as the fraction of examples with the score above the threshold. Interestingly, this threshold yields accurate estimates on a wide set of target distributions resulting from natural and synthetic shifts. Right: Efficacy of ATC over previously proposed approaches on our testbed with a post-hoc calibrated model. To obtain errors on the same scale, we rescale all errors with Average Confidence (AC) error. Lower estimation error is better. See Table 1 for exact numbers and comparison on various types of distribution shift. See Sec. 5 for details on our testbed.\nRecently, numerous methods have been proposed for this purpose (Deng & Zheng, 2021; Chen et al., 2021b; Jiang et al., 2021; Deng et al., 2021; Guillory et al., 2021). These methods either require calibration on the target domain to yield consistent estimates (Jiang et al., 2021; Guillory et al., 2021) or additional labeled data from several target domains to learn a linear regression function on a distributional distance that then predicts model performance (Deng et al., 2021; Deng & Zheng, 2021; Guillory et al., 2021). However, methods that require calibration on the target domain typically yield poor estimates since deep models trained and calibrated on source data are not, in general, calibrated on a (previously unseen) target domain (Ovadia et al., 2019). Besides, methods that leverage labeled data from target domains rely on the fact that unseen target domains exhibit strong linear correlation with seen target domains on the underlying distance measure and, hence, can be rendered ineffective when such target domains with labeled data are unavailable (in Sec. 5.1 we demonstrate such a failure on a real-world distribution shift problem). Therefore, throughout the paper, we assume access to labeled source data and only unlabeled data from target domain(s).\nIn this work, we first show that absent assumptions on the source classifier or the nature of the shift, no method of estimating accuracy will work generally (even in non-contrived settings). To estimate accuracy on target domain perfectly, we highlight that even given perfect knowledge of the labeled source distribution (i.e., $p_{s}(x, y)$ ) and unlabeled target distribution (i.e., $p_{t}(x)$ ), we need restrictions on the nature of the shift such that we can uniquely identify the target conditional $p_{t}(y \\mid x)$. Thus, in general, identifying the accuracy of the classifier is as hard as identifying the optimal predictor.\nSecond, motivated by the superiority of methods that use maximum softmax probability (or logit) of a model for Out-Of-Distribution (OOD) detection (Hendrycks & Gimpel, 2016; Hendrycks et al., 2019), we propose a simple method that leverages softmax probability to predict model performance. Our method, Average Thresholded Confidence (ATC), learns a threshold on a score (e.g., maximum confidence or negative entropy) of model confidence on validation source data and predicts target domain accuracy as the fraction of unlabeled target points that receive a score above that threshold. ATC selects a threshold on validation source data such that the fraction of source examples that receive the score above the threshold match the accuracy of those examples. Our primary contribution in ATC is the proposal of obtaining the threshold and observing its efficacy on (practical) accuracy estimation. Importantly, our work takes a step forward in positively answering the question raised in Deng & Zheng (2021); Deng et al. (2021) about a practical strategy to select a threshold that enables accuracy prediction with thresholded model confidence.\n", "images": [{"id": "img-0.jpeg", "top_left_x": 292, "top_left_y": 217, "bottom_right_x": 1405, "bottom_right_y": 649, "image_base64": ""}], "dimensions": {"dpi": 200, "height": 2200, "width": 1700}}, {"index": 3, "markdown": "", "images": [], "dimensions": {"dpi": 539192, "height": 944919, "width": 247256}}, {"index": 27, "markdown": "![img-8.jpeg](img-8.jpeg)\nFigure 9: Scatter plot of predicted accuracy versus (true) OOD accuracy for vision datasets except MNIST with a ResNet50 model. Results reported by aggregating MAE numbers over 4 different seeds.\n", "images": [{"id": "img-8.jpeg", "top_left_x": 290, "top_left_y": 226, "bottom_right_x": 1405, "bottom_right_y": 1834, "image_base64": ""}], "dimensions": {"dpi": 200, "height": 2200, "width": 1700}}, {"index": 28, "markdown": "| Dataset | Shift | IM | | AC | | DOC | | GDE | ATC-MC (Ours) | | ATC-NE (Ours) | | | :--: | :--: | :--: | :--: | :--: | :--: | :--: | :--: | :--: | :--: | :--: | :--: | :--: | | | | Pre T | Post T | Pre T | Post T | Pre T | Post T | Post T | Pre T | Post T | Pre T | Post T | | CIFAR10 | Natural | 6.60 | 5.74 | 9.88 | 6.89 | 7.25 | 6.07 | 4.77 | 3.21 | 3.02 | 2.99 | 2.85 | | | | (0.35) | (0.30) | (0.16) | (0.13) | (0.15) | (0.16) | (0.13) | (0.49) | (0.40) | (0.37) | (0.29) | | | Synthetic | 12.33 | 10.20 | 16.50 | 11.91 | 13.87 | 11.08 | 6.55 | 4.65 | 4.25 | 4.21 | 3.87 | | | | (0.51) | (0.48) | (0.26) | (0.17) | (0.18) | (0.17) | (0.35) | (0.55) | (0.55) | (0.55) | (0.75) | | CIFAR100 | Synthetic | 13.69 | 11.51 | 23.61 | 13.10 | 14.60 | 10.14 | 9.85 | 5.50 | 4.75 | 4.72 | 4.94 | | | | (0.55) | (0.41) | (1.16) | (0.80) | (0.77) | (0.64) | (0.57) | (0.70) | (0.73) | (0.74) | (0.74) | | ImageNet200 | Natural | 12.37 | 8.19 | 22.07 | 8.61 | 15.17 | 7.81 | 5.13 | 4.37 | 2.04 | 3.79 | 1.45 | | | | (0.25) | (0.33) | (0.08) | (0.25) | (0.11) | (0.29) | (0.08) | (0.39) | (0.24) | (0.30) | (0.27) | | | Synthetic | 19.86 | 12.94 | 32.44 | 13.35 | 25.02 | 12.38 | 5.41 | 5.93 | 3.09 | 5.00 | 2.68 | | | | (1.38) | (1.81) | (1.00) | (1.30) | (1.10) | (1.38) | (0.89) | (1.38) | (0.87) | (1.28) | (0.45) | | ImageNet | Natural | 7.77 | 6.50 | 18.13 | 6.02 | 8.13 | 5.76 | 6.23 | 3.88 | 2.17 | 2.06 | 0.80 | | | | (0.27) | (0.33) | (0.23) | (0.34) | (0.27) | (0.37) | (0.41) | (0.53) | (0.62) | (0.54) | (0.44) | | | Synthetic | 13.39 | 10.12 | 24.62 | 8.51 | 13.55 | 7.90 | 6.32 | 3.34 | 2.53 | 2.61 | 4.89 | | | | (0.53) | (0.63) | (0.64) | (0.71) | (0.61) | (0.72) | (0.33) | (0.53) | (0.36) | (0.33) | (0.83) | | FMoW-WILDS | Natural | 5.53 | 4.31 | 33.53 | 12.84 | 5.94 | 4.45 | 5.74 | 3.06 | 2.70 | 3.02 | 2.72 | | | | (0.33) | (0.63) | (0.13) | (12.06) | (0.36) | (0.77) | (0.55) | (0.36) | (0.54) | (0.35) | (0.44) | | RxRx1-WILDS | Natural | 5.80 | 5.72 | 7.90 | 4.84 | 5.98 | 5.98 | 6.03 | 4.66 | 4.56 | 4.41 | 4.47 | | | | (0.17) | (0.15) | (0.24) | (0.09) | (0.15) | (0.13) | (0.08) | (0.38) | (0.38) | (0.31) | (0.26) | | Amazon-WILDS | Natural | 2.40 | 2.29 | 8.01 | 2.38 | 2.40 | 2.28 | 17.87 | 1.65 | 1.62 | 1.60 | 1.59 | | | | (0.08) | (0.09) | (0.53) | (0.17) | (0.09) | (0.09) | (0.18) | (0.06) | (0.05) | (0.14) | (0.15) | | CivilCom.-WILDS | Natural | 12.64 | 10.80 | 16.76 | 11.03 | 13.31 | 10.99 | 16.65 | | 7.14 | | | | | | (0.52) | (0.48) | (0.53) | (0.49) | (0.52) | (0.49) | (0.25) | | (0.41) | | | | MNIST | Natural | 18.48 | 15.99 | 21.17 | 14.81 | 20.19 | 14.56 | 24.42 | 5.02 | 2.40 | 3.14 | 3.50 | | | | (0.45) | (1.53) | (0.24) | (3.89) | (0.23) | (3.47) | (0.41) | (0.44) | (1.83) | (0.49) | (0.17) | | ENTITY-13 | Same | 16.23 | 11.14 | 24.97 | 10.88 | 19.08 | 10.47 | 10.71 | 5.39 | 3.88 | 4.58 | 4.19 | | | | (0.77) | (0.65) | (0.70) | (0.77) | (0.65) | (0.72) | (0.74) | (0.92) | (0.61) | (0.85) | (0.16) | | | Novel | 28.53 | 22.02 | 38.33 | 21.64 | 32.43 | 21.22 | 20.61 | 13.58 | 10.28 | 12.25 | 6.63 | | | | (0.82) | (0.68) | (0.75) | (0.86) | (0.69) | (0.80) | (0.60) | (1.15) | (1.34) | (1.21) | (0.93) | | ENTITY-30 | Same | 18.59 | 14.46 | 28.82 | 14.30 | 21.63 | 13.46 | 12.92 | 9.12 | 7.75 | 8.15 | 7.64 | | | | (0.51) | (0.52) | (0.43) | (0.71) | (0.37) | (0.59) | (0.14) | (0.62) | (0.72) | (0.68) | (0.88) | | | Novel | 32.34 | 26.85 | 44.02 | 26.27 | 36.82 | 25.42 | 23.16 | 17.75 | 14.30 | 15.60 | 10.57 | | | | (0.60) | (0.58) | (0.56) | (0.79) | (0.47) | (0.68) | (0.12) | (0.76) | (0.85) | (0.86) | (0.86) | | NONLIVING-26 | Same | 18.66 | 17.17 | 26.39 | 16.14 | 19.86 | 15.58 | 16.63 | 10.87 | 10.24 | 10.07 | 10.26 | | | | (0.76) | (0.74) | (0.82) | (0.81) | (0.67) | (0.76) | (0.45) | (0.98) | (0.83) | (0.92) | (1.18) | | | Novel | 33.43 | 31.53 | 41.66 | 29.87 | 35.13 | 29.31 | 29.56 | 21.70 | 20.12 | 19.08 | 18.26 | | | | (0.67) | (0.65) | (0.67) | (0.71) | (0.54) | (0.64) | (0.21) | (0.86) | (0.75) | (0.82) | (1.12) | | LIVING-17 | Same | 12.63 | 11.05 | 18.32 | 10.46 | 14.43 | 10.14 | 9.87 | 4.57 | 3.95 | 3.81 | 4.21 | | | | (1.25) | (1.20) | (1.01) | (1.12) | (1.11) | (1.16) | (0.61) | (0.71) | (0.48) | (0.22) | (0.53) | | | Novel | 29.03 | 26.96 | 35.67 | 26.11 | 31.73 | 25.73 | 23.53 | 16.15 | 14.49 | 12.97 | 11.39 | | | | (1.44) | (1.38) | (1.09) | (1.27) | (1.19) | (1.35) | (0.52) | (1.36) | (1.46) | (1.52) | (1.72) |\nTable 3: Mean Absolute estimation Error (MAE) results for different datasets in our setup grouped by the nature of shift. 'Same' refers to same subpopulation shifts and 'Novel' refers novel subpopulation shifts. We include details about the target sets considered in each shift in Table 2. Post T denotes use of TS calibration on source. For language datasets, we use DistilBERT-base-uncased, for vision dataset we report results with DenseNet model with the exception of MNIST where we use FCN. Across all datasets, we observe that ATC achieves superior performance (lower MAE is better). For GDE post T and pre T estimates match since TS doesn't alter the argmax prediction. Results reported by aggregating MAE numbers over 4 different seeds. Values in parenthesis (i.e., $(\\cdot)$ ) denote standard deviation values.\n", "images": [], "dimensions": {"dpi": 200, "height": 2200, "width": 1700}}, {"index": 29, "markdown": "| Dataset | Shift | IM | | AC | | DOC | | GDE | ATC-MC (Ours) | | ATC-NE (Ours) | | | :--: | :--: | :--: | :--: | :--: | :--: | :--: | :--: | :--: | :--: | :--: | :--: | :--: | | | | Pre T | Post T | Pre T | Post T | Pre T | Post T | Post T | Pre T | Post T | Pre T | Post T | | CIFAR10 | Natural | 7.14 | 6.20 | 10.25 | 7.06 | 7.68 | 6.35 | 5.74 | 4.02 | 3.85 | 3.76 | 3.38 | | | | (0.14) | (0.11) | (0.31) | (0.33) | (0.28) | (0.27) | (0.25) | (0.38) | (0.30) | (0.33) | (0.32) | | | Synthetic | 12.62 | 10.75 | 16.50 | 11.91 | 13.93 | 11.20 | 7.97 | 5.66 | 5.03 | 4.87 | 3.63 | | | | (0.76) | (0.71) | (0.28) | (0.24) | (0.29) | (0.28) | (0.13) | (0.64) | (0.71) | (0.71) | (0.62) | | CIFAR100 | Synthetic | 12.77 | 12.34 | 16.89 | 12.73 | 11.18 | 9.63 | 12.00 | 5.61 | 5.55 | 5.65 | 5.76 | | | | (0.43) | (0.68) | (0.20) | (2.59) | (0.35) | (1.25) | (0.48) | (0.51) | (0.55) | (0.35) | (0.27) | | ImageNet200 | Natural | 12.63 | 7.99 | 23.08 | 7.22 | 15.40 | 6.33 | 5.00 | 4.60 | 1.80 | 4.06 | 1.38 | | | | (0.59) | (0.47) | (0.31) | (0.22) | (0.42) | (0.24) | (0.36) | (0.63) | (0.17) | (0.69) | (0.29) | | | Synthetic | 20.17 | 11.74 | 33.69 | 9.51 | 25.49 | 8.61 | 4.19 | 5.37 | 2.78 | 4.53 | 3.58 | | | | (0.74) | (0.80) | (0.73) | (0.51) | (0.66) | (0.50) | (0.14) | (0.88) | (0.23) | (0.79) | (0.33) | | ImageNet | Natural | 8.09 | 6.42 | 21.66 | 5.91 | 8.53 | 5.21 | 5.90 | 3.93 | 1.89 | 2.45 | 0.73 | | | | (0.25) | (0.28) | (0.38) | (0.22) | (0.26) | (0.25) | (0.44) | (0.26) | (0.21) | (0.16) | (0.10) | | | Synthetic | 13.93 | 9.90 | 28.05 | 7.56 | 13.82 | 6.19 | 6.70 | 3.33 | 2.55 | 2.12 | 5.06 | | | | (0.14) | (0.23) | (0.39) | (0.13) | (0.31) | (0.07) | (0.52) | (0.25) | (0.25) | (0.31) | (0.27) | | FMoW-WILDS | Natural | 5.15 | 3.55 | 34.64 | 5.03 | 5.58 | 3.46 | 5.08 | 2.59 | 2.33 | 2.52 | 2.22 | | | | (0.19) | (0.41) | (0.22) | (0.29) | (0.17) | (0.37) | (0.46) | (0.32) | (0.28) | (0.25) | (0.30) | | RxRx1-WILDS | Natural | 6.17 | 6.11 | 21.05 | 5.21 | 6.54 | 6.27 | 6.82 | 5.30 | 5.20 | 5.19 | 5.63 | | | | (0.20) | (0.24) | (0.31) | (0.18) | (0.21) | (0.20) | (0.31) | (0.30) | (0.44) | (0.43) | (0.55) | | Entity-13 | Same | 18.32 | 14.38 | 27.79 | 13.56 | 20.50 | 13.22 | 16.09 | 9.35 | 7.50 | 7.80 | 6.94 | | | | (0.29) | (0.53) | (1.18) | (0.58) | (0.47) | (0.58) | (0.84) | (0.79) | (0.65) | (0.62) | (0.71) | | | Novel | 28.82 | 24.03 | 38.97 | 22.96 | 31.66 | 22.61 | 25.26 | 17.11 | 13.96 | 14.75 | 9.94 | | | | (0.30) | (0.55) | (1.32) | (0.59) | (0.54) | (0.58) | (1.08) | (0.93) | (0.64) | (0.78) | | | Entity-30 | Same | 16.91 | 14.61 | 26.84 | 14.37 | 18.60 | 13.11 | 13.74 | 8.54 | 7.94 | 7.77 | 8.04 | | | | (1.33) | (1.11) | (2.15) | (1.34) | (1.69) | (1.30) | (1.07) | (1.47) | (1.38) | (1.44) | (1.51) | | | Novel | 28.66 | 25.83 | 39.21 | 25.03 | 30.95 | 23.73 | 23.15 | 15.57 | 13.24 | 12.44 | 11.05 | | | | (1.16) | (0.88) | (2.03) | (1.11) | (1.64) | (1.11) | (0.51) | (1.44) | (1.15) | (1.26) | (1.13) | | NonLIVING-26 | Same | 17.43 | 15.95 | 27.70 | 15.40 | 18.06 | 14.58 | 16.99 | 10.79 | 10.13 | 10.05 | 10.29 | | | | (0.90) | (0.86) | (0.90) | (0.69) | (1.00) | (0.78) | (1.25) | (0.62) | (0.32) | (0.46) | (0.79) | | | Novel | 29.51 | 27.75 | 40.02 | 26.77 | 30.36 | 25.93 | 27.70 | 19.64 | 17.75 | 16.90 | 15.69 | | | | (0.86) | (0.82) | (0.76) | (0.82) | (0.95) | (0.80) | (1.42) | (0.68) | (0.53) | (0.60) | (0.83) | | LIVING-17 | Same | 14.28 | 12.21 | 23.46 | 11.16 | 15.22 | 10.78 | 10.49 | 4.92 | 4.23 | 4.19 | 4.73 | | | | (0.96) | (0.93) | (1.16) | (0.90) | (0.96) | (0.99) | (0.97) | (0.57) | (0.42) | (0.35) | (0.24) | | | Novel | 28.91 | 26.35 | 38.62 | 24.91 | 30.32 | 24.52 | 22.49 | 15.42 | 13.02 | 12.29 | 10.34 | | | | (0.66) | (0.73) | (1.01) | (0.61) | (0.59) | (0.74) | (0.85) | (0.59) | (0.53) | (0.73) | (0.62) |\nTable 4: Mean Absolute estimation Error (MAE) results for different datasets in our setup grouped by the nature of shift for ResNet model. 'Same' refers to same subpopulation shifts and 'Novel' refers novel subpopulation shifts. We include details about the target sets considered in each shift in Table 2. Post T denotes use of TS calibration on source. Across all datasets, we observe that ATC achieves superior performance (lower MAE is better). For GDE post T and pre T estimates match since TS doesn't alter the argmax prediction. Results reported by aggregating MAE numbers over 4 different seeds. Values in parenthesis (i.e., $(\\cdot)$ ) denote standard deviation values.\n", "images": [], "dimensions": {"dpi": 200, "height": 2200, "width": 1700}}], "model": "mistral-ocr-2503-completion", "usage_info": {"pages_processed": 29, "doc_size_bytes": null}} +examplesVersion: 1.0.2 +generatedTests: {} generatedFiles: - .gitattributes - .vscode/settings.json @@ -195,38 +879,3 @@ generatedFiles: - src/mistralai_azure/utils/unmarshal_json_response.py - src/mistralai_azure/utils/url.py - src/mistralai_azure/utils/values.py -examples: - stream_chat: - speakeasy-default-stream-chat: - requestBody: - application/json: {"model": "azureai", "stream": true, "messages": [{"content": "Who is the best French painter? Answer in one short sentence.", "role": "user"}], "response_format": {"type": "text"}} - responses: - "422": - application/json: {} - chat_completion_v1_chat_completions_post: - speakeasy-default-chat-completion-v1-chat-completions-post: - requestBody: - application/json: {"model": "azureai", "stream": false, "messages": [{"content": "Who is the best French painter? Answer in one short sentence.", "role": "user"}], "response_format": {"type": "text"}} - responses: - "200": - application/json: {"id": "cmpl-e5cc70bb28c444948073e77776eb30ef", "object": "chat.completion", "model": "mistral-small-latest", "usage": {"prompt_tokens": 0, "completion_tokens": 0, "total_tokens": 0}, "created": 1702256327, "choices": []} - "422": - application/json: {} - ocr_v1_ocr_post: - speakeasy-default-ocr-v1-ocr-post: - requestBody: - application/json: {"model": "CX-9", "document": {"document_url": "https://upset-labourer.net/", "type": "document_url"}} - responses: - "200": - application/json: {"pages": [], "model": "Golf", "usage_info": {"pages_processed": 944919}} - "422": - application/json: {} - userExample: - requestBody: - application/json: {"model": "CX-9", "document": {"document_url": "https://upset-labourer.net/", "type": "document_url"}, "bbox_annotation_format": {"type": "text"}, "document_annotation_format": {"type": "text"}} - responses: - "200": - application/json: {"pages": [{"index": 1, "markdown": "# LEVERAGING UNLABELED DATA TO PREDICT OUT-OF-DISTRIBUTION PERFORMANCE\nSaurabh Garg*
Carnegie Mellon University
sgarg2@andrew.cmu.edu
Sivaraman Balakrishnan
Carnegie Mellon University
sbalakri@andrew.cmu.edu
Zachary C. Lipton
Carnegie Mellon University
zlipton@andrew.cmu.edu\n## Behnam Neyshabur\nGoogle Research, Blueshift team
neyshabur@google.com\nHanie Sedghi
Google Research, Brain team
hsedghi@google.com\n#### Abstract\nReal-world machine learning deployments are characterized by mismatches between the source (training) and target (test) distributions that may cause performance drops. In this work, we investigate methods for predicting the target domain accuracy using only labeled source data and unlabeled target data. We propose Average Thresholded Confidence (ATC), a practical method that learns a threshold on the model's confidence, predicting accuracy as the fraction of unlabeled examples for which model confidence exceeds that threshold. ATC outperforms previous methods across several model architectures, types of distribution shifts (e.g., due to synthetic corruptions, dataset reproduction, or novel subpopulations), and datasets (WILDS, ImageNet, BREEDS, CIFAR, and MNIST). In our experiments, ATC estimates target performance $2-4 \\times$ more accurately than prior methods. We also explore the theoretical foundations of the problem, proving that, in general, identifying the accuracy is just as hard as identifying the optimal predictor and thus, the efficacy of any method rests upon (perhaps unstated) assumptions on the nature of the shift. Finally, analyzing our method on some toy distributions, we provide insights concerning when it works ${ }^{1}$.\n## 1 INTRODUCTION\nMachine learning models deployed in the real world typically encounter examples from previously unseen distributions. While the IID assumption enables us to evaluate models using held-out data from the source distribution (from which training data is sampled), this estimate is no longer valid in presence of a distribution shift. Moreover, under such shifts, model accuracy tends to degrade (Szegedy et al., 2014; Recht et al., 2019; Koh et al., 2021). Commonly, the only data available to the practitioner are a labeled training set (source) and unlabeled deployment-time data which makes the problem more difficult. In this setting, detecting shifts in the distribution of covariates is known to be possible (but difficult) in theory (Ramdas et al., 2015), and in practice (Rabanser et al., 2018). However, producing an optimal predictor using only labeled source and unlabeled target data is well-known to be impossible absent further assumptions (Ben-David et al., 2010; Lipton et al., 2018).\nTwo vital questions that remain are: (i) the precise conditions under which we can estimate a classifier's target-domain accuracy; and (ii) which methods are most practically useful. To begin, the straightforward way to assess the performance of a model under distribution shift would be to collect labeled (target domain) examples and then to evaluate the model on that data. However, collecting fresh labeled data from the target distribution is prohibitively expensive and time-consuming, especially if the target distribution is non-stationary. Hence, instead of using labeled data, we aim to use unlabeled data from the target distribution, that is comparatively abundant, to predict model performance. Note that in this work, our focus is not to improve performance on the target but, rather, to estimate the accuracy on the target for a given classifier.\n[^0]: Work done in part while Saurabh Garg was interning at Google ${ }^{1}$ Code is available at [https://github.com/saurabhgarg1996/ATC_code](https://github.com/saurabhgarg1996/ATC_code).\n", "images": [], "dimensions": {"dpi": 200, "height": 2200, "width": 1700}}, {"index": 2, "markdown": "![img-0.jpeg](img-0.jpeg)\nFigure 1: Illustration of our proposed method ATC. Left: using source domain validation data, we identify a threshold on a score (e.g. negative entropy) computed on model confidence such that fraction of examples above the threshold matches the validation set accuracy. ATC estimates accuracy on unlabeled target data as the fraction of examples with the score above the threshold. Interestingly, this threshold yields accurate estimates on a wide set of target distributions resulting from natural and synthetic shifts. Right: Efficacy of ATC over previously proposed approaches on our testbed with a post-hoc calibrated model. To obtain errors on the same scale, we rescale all errors with Average Confidence (AC) error. Lower estimation error is better. See Table 1 for exact numbers and comparison on various types of distribution shift. See Sec. 5 for details on our testbed.\nRecently, numerous methods have been proposed for this purpose (Deng & Zheng, 2021; Chen et al., 2021b; Jiang et al., 2021; Deng et al., 2021; Guillory et al., 2021). These methods either require calibration on the target domain to yield consistent estimates (Jiang et al., 2021; Guillory et al., 2021) or additional labeled data from several target domains to learn a linear regression function on a distributional distance that then predicts model performance (Deng et al., 2021; Deng & Zheng, 2021; Guillory et al., 2021). However, methods that require calibration on the target domain typically yield poor estimates since deep models trained and calibrated on source data are not, in general, calibrated on a (previously unseen) target domain (Ovadia et al., 2019). Besides, methods that leverage labeled data from target domains rely on the fact that unseen target domains exhibit strong linear correlation with seen target domains on the underlying distance measure and, hence, can be rendered ineffective when such target domains with labeled data are unavailable (in Sec. 5.1 we demonstrate such a failure on a real-world distribution shift problem). Therefore, throughout the paper, we assume access to labeled source data and only unlabeled data from target domain(s).\nIn this work, we first show that absent assumptions on the source classifier or the nature of the shift, no method of estimating accuracy will work generally (even in non-contrived settings). To estimate accuracy on target domain perfectly, we highlight that even given perfect knowledge of the labeled source distribution (i.e., $p_{s}(x, y)$ ) and unlabeled target distribution (i.e., $p_{t}(x)$ ), we need restrictions on the nature of the shift such that we can uniquely identify the target conditional $p_{t}(y \\mid x)$. Thus, in general, identifying the accuracy of the classifier is as hard as identifying the optimal predictor.\nSecond, motivated by the superiority of methods that use maximum softmax probability (or logit) of a model for Out-Of-Distribution (OOD) detection (Hendrycks & Gimpel, 2016; Hendrycks et al., 2019), we propose a simple method that leverages softmax probability to predict model performance. Our method, Average Thresholded Confidence (ATC), learns a threshold on a score (e.g., maximum confidence or negative entropy) of model confidence on validation source data and predicts target domain accuracy as the fraction of unlabeled target points that receive a score above that threshold. ATC selects a threshold on validation source data such that the fraction of source examples that receive the score above the threshold match the accuracy of those examples. Our primary contribution in ATC is the proposal of obtaining the threshold and observing its efficacy on (practical) accuracy estimation. Importantly, our work takes a step forward in positively answering the question raised in Deng & Zheng (2021); Deng et al. (2021) about a practical strategy to select a threshold that enables accuracy prediction with thresholded model confidence.\n", "images": [{"id": "img-0.jpeg", "top_left_x": 292, "top_left_y": 217, "bottom_right_x": 1405, "bottom_right_y": 649, "image_base64": ""}], "dimensions": {"dpi": 200, "height": 2200, "width": 1700}}, {"index": 3, "markdown": "", "images": [], "dimensions": {"dpi": 539192, "height": 944919, "width": 247256}}, {"index": 27, "markdown": "![img-8.jpeg](img-8.jpeg)\nFigure 9: Scatter plot of predicted accuracy versus (true) OOD accuracy for vision datasets except MNIST with a ResNet50 model. Results reported by aggregating MAE numbers over 4 different seeds.\n", "images": [{"id": "img-8.jpeg", "top_left_x": 290, "top_left_y": 226, "bottom_right_x": 1405, "bottom_right_y": 1834, "image_base64": ""}], "dimensions": {"dpi": 200, "height": 2200, "width": 1700}}, {"index": 28, "markdown": "| Dataset | Shift | IM | | AC | | DOC | | GDE | ATC-MC (Ours) | | ATC-NE (Ours) | | | :--: | :--: | :--: | :--: | :--: | :--: | :--: | :--: | :--: | :--: | :--: | :--: | :--: | | | | Pre T | Post T | Pre T | Post T | Pre T | Post T | Post T | Pre T | Post T | Pre T | Post T | | CIFAR10 | Natural | 6.60 | 5.74 | 9.88 | 6.89 | 7.25 | 6.07 | 4.77 | 3.21 | 3.02 | 2.99 | 2.85 | | | | (0.35) | (0.30) | (0.16) | (0.13) | (0.15) | (0.16) | (0.13) | (0.49) | (0.40) | (0.37) | (0.29) | | | Synthetic | 12.33 | 10.20 | 16.50 | 11.91 | 13.87 | 11.08 | 6.55 | 4.65 | 4.25 | 4.21 | 3.87 | | | | (0.51) | (0.48) | (0.26) | (0.17) | (0.18) | (0.17) | (0.35) | (0.55) | (0.55) | (0.55) | (0.75) | | CIFAR100 | Synthetic | 13.69 | 11.51 | 23.61 | 13.10 | 14.60 | 10.14 | 9.85 | 5.50 | 4.75 | 4.72 | 4.94 | | | | (0.55) | (0.41) | (1.16) | (0.80) | (0.77) | (0.64) | (0.57) | (0.70) | (0.73) | (0.74) | (0.74) | | ImageNet200 | Natural | 12.37 | 8.19 | 22.07 | 8.61 | 15.17 | 7.81 | 5.13 | 4.37 | 2.04 | 3.79 | 1.45 | | | | (0.25) | (0.33) | (0.08) | (0.25) | (0.11) | (0.29) | (0.08) | (0.39) | (0.24) | (0.30) | (0.27) | | | Synthetic | 19.86 | 12.94 | 32.44 | 13.35 | 25.02 | 12.38 | 5.41 | 5.93 | 3.09 | 5.00 | 2.68 | | | | (1.38) | (1.81) | (1.00) | (1.30) | (1.10) | (1.38) | (0.89) | (1.38) | (0.87) | (1.28) | (0.45) | | ImageNet | Natural | 7.77 | 6.50 | 18.13 | 6.02 | 8.13 | 5.76 | 6.23 | 3.88 | 2.17 | 2.06 | 0.80 | | | | (0.27) | (0.33) | (0.23) | (0.34) | (0.27) | (0.37) | (0.41) | (0.53) | (0.62) | (0.54) | (0.44) | | | Synthetic | 13.39 | 10.12 | 24.62 | 8.51 | 13.55 | 7.90 | 6.32 | 3.34 | 2.53 | 2.61 | 4.89 | | | | (0.53) | (0.63) | (0.64) | (0.71) | (0.61) | (0.72) | (0.33) | (0.53) | (0.36) | (0.33) | (0.83) | | FMoW-WILDS | Natural | 5.53 | 4.31 | 33.53 | 12.84 | 5.94 | 4.45 | 5.74 | 3.06 | 2.70 | 3.02 | 2.72 | | | | (0.33) | (0.63) | (0.13) | (12.06) | (0.36) | (0.77) | (0.55) | (0.36) | (0.54) | (0.35) | (0.44) | | RxRx1-WILDS | Natural | 5.80 | 5.72 | 7.90 | 4.84 | 5.98 | 5.98 | 6.03 | 4.66 | 4.56 | 4.41 | 4.47 | | | | (0.17) | (0.15) | (0.24) | (0.09) | (0.15) | (0.13) | (0.08) | (0.38) | (0.38) | (0.31) | (0.26) | | Amazon-WILDS | Natural | 2.40 | 2.29 | 8.01 | 2.38 | 2.40 | 2.28 | 17.87 | 1.65 | 1.62 | 1.60 | 1.59 | | | | (0.08) | (0.09) | (0.53) | (0.17) | (0.09) | (0.09) | (0.18) | (0.06) | (0.05) | (0.14) | (0.15) | | CivilCom.-WILDS | Natural | 12.64 | 10.80 | 16.76 | 11.03 | 13.31 | 10.99 | 16.65 | | 7.14 | | | | | | (0.52) | (0.48) | (0.53) | (0.49) | (0.52) | (0.49) | (0.25) | | (0.41) | | | | MNIST | Natural | 18.48 | 15.99 | 21.17 | 14.81 | 20.19 | 14.56 | 24.42 | 5.02 | 2.40 | 3.14 | 3.50 | | | | (0.45) | (1.53) | (0.24) | (3.89) | (0.23) | (3.47) | (0.41) | (0.44) | (1.83) | (0.49) | (0.17) | | ENTITY-13 | Same | 16.23 | 11.14 | 24.97 | 10.88 | 19.08 | 10.47 | 10.71 | 5.39 | 3.88 | 4.58 | 4.19 | | | | (0.77) | (0.65) | (0.70) | (0.77) | (0.65) | (0.72) | (0.74) | (0.92) | (0.61) | (0.85) | (0.16) | | | Novel | 28.53 | 22.02 | 38.33 | 21.64 | 32.43 | 21.22 | 20.61 | 13.58 | 10.28 | 12.25 | 6.63 | | | | (0.82) | (0.68) | (0.75) | (0.86) | (0.69) | (0.80) | (0.60) | (1.15) | (1.34) | (1.21) | (0.93) | | ENTITY-30 | Same | 18.59 | 14.46 | 28.82 | 14.30 | 21.63 | 13.46 | 12.92 | 9.12 | 7.75 | 8.15 | 7.64 | | | | (0.51) | (0.52) | (0.43) | (0.71) | (0.37) | (0.59) | (0.14) | (0.62) | (0.72) | (0.68) | (0.88) | | | Novel | 32.34 | 26.85 | 44.02 | 26.27 | 36.82 | 25.42 | 23.16 | 17.75 | 14.30 | 15.60 | 10.57 | | | | (0.60) | (0.58) | (0.56) | (0.79) | (0.47) | (0.68) | (0.12) | (0.76) | (0.85) | (0.86) | (0.86) | | NONLIVING-26 | Same | 18.66 | 17.17 | 26.39 | 16.14 | 19.86 | 15.58 | 16.63 | 10.87 | 10.24 | 10.07 | 10.26 | | | | (0.76) | (0.74) | (0.82) | (0.81) | (0.67) | (0.76) | (0.45) | (0.98) | (0.83) | (0.92) | (1.18) | | | Novel | 33.43 | 31.53 | 41.66 | 29.87 | 35.13 | 29.31 | 29.56 | 21.70 | 20.12 | 19.08 | 18.26 | | | | (0.67) | (0.65) | (0.67) | (0.71) | (0.54) | (0.64) | (0.21) | (0.86) | (0.75) | (0.82) | (1.12) | | LIVING-17 | Same | 12.63 | 11.05 | 18.32 | 10.46 | 14.43 | 10.14 | 9.87 | 4.57 | 3.95 | 3.81 | 4.21 | | | | (1.25) | (1.20) | (1.01) | (1.12) | (1.11) | (1.16) | (0.61) | (0.71) | (0.48) | (0.22) | (0.53) | | | Novel | 29.03 | 26.96 | 35.67 | 26.11 | 31.73 | 25.73 | 23.53 | 16.15 | 14.49 | 12.97 | 11.39 | | | | (1.44) | (1.38) | (1.09) | (1.27) | (1.19) | (1.35) | (0.52) | (1.36) | (1.46) | (1.52) | (1.72) |\nTable 3: Mean Absolute estimation Error (MAE) results for different datasets in our setup grouped by the nature of shift. 'Same' refers to same subpopulation shifts and 'Novel' refers novel subpopulation shifts. We include details about the target sets considered in each shift in Table 2. Post T denotes use of TS calibration on source. For language datasets, we use DistilBERT-base-uncased, for vision dataset we report results with DenseNet model with the exception of MNIST where we use FCN. Across all datasets, we observe that ATC achieves superior performance (lower MAE is better). For GDE post T and pre T estimates match since TS doesn't alter the argmax prediction. Results reported by aggregating MAE numbers over 4 different seeds. Values in parenthesis (i.e., $(\\cdot)$ ) denote standard deviation values.\n", "images": [], "dimensions": {"dpi": 200, "height": 2200, "width": 1700}}, {"index": 29, "markdown": "| Dataset | Shift | IM | | AC | | DOC | | GDE | ATC-MC (Ours) | | ATC-NE (Ours) | | | :--: | :--: | :--: | :--: | :--: | :--: | :--: | :--: | :--: | :--: | :--: | :--: | :--: | | | | Pre T | Post T | Pre T | Post T | Pre T | Post T | Post T | Pre T | Post T | Pre T | Post T | | CIFAR10 | Natural | 7.14 | 6.20 | 10.25 | 7.06 | 7.68 | 6.35 | 5.74 | 4.02 | 3.85 | 3.76 | 3.38 | | | | (0.14) | (0.11) | (0.31) | (0.33) | (0.28) | (0.27) | (0.25) | (0.38) | (0.30) | (0.33) | (0.32) | | | Synthetic | 12.62 | 10.75 | 16.50 | 11.91 | 13.93 | 11.20 | 7.97 | 5.66 | 5.03 | 4.87 | 3.63 | | | | (0.76) | (0.71) | (0.28) | (0.24) | (0.29) | (0.28) | (0.13) | (0.64) | (0.71) | (0.71) | (0.62) | | CIFAR100 | Synthetic | 12.77 | 12.34 | 16.89 | 12.73 | 11.18 | 9.63 | 12.00 | 5.61 | 5.55 | 5.65 | 5.76 | | | | (0.43) | (0.68) | (0.20) | (2.59) | (0.35) | (1.25) | (0.48) | (0.51) | (0.55) | (0.35) | (0.27) | | ImageNet200 | Natural | 12.63 | 7.99 | 23.08 | 7.22 | 15.40 | 6.33 | 5.00 | 4.60 | 1.80 | 4.06 | 1.38 | | | | (0.59) | (0.47) | (0.31) | (0.22) | (0.42) | (0.24) | (0.36) | (0.63) | (0.17) | (0.69) | (0.29) | | | Synthetic | 20.17 | 11.74 | 33.69 | 9.51 | 25.49 | 8.61 | 4.19 | 5.37 | 2.78 | 4.53 | 3.58 | | | | (0.74) | (0.80) | (0.73) | (0.51) | (0.66) | (0.50) | (0.14) | (0.88) | (0.23) | (0.79) | (0.33) | | ImageNet | Natural | 8.09 | 6.42 | 21.66 | 5.91 | 8.53 | 5.21 | 5.90 | 3.93 | 1.89 | 2.45 | 0.73 | | | | (0.25) | (0.28) | (0.38) | (0.22) | (0.26) | (0.25) | (0.44) | (0.26) | (0.21) | (0.16) | (0.10) | | | Synthetic | 13.93 | 9.90 | 28.05 | 7.56 | 13.82 | 6.19 | 6.70 | 3.33 | 2.55 | 2.12 | 5.06 | | | | (0.14) | (0.23) | (0.39) | (0.13) | (0.31) | (0.07) | (0.52) | (0.25) | (0.25) | (0.31) | (0.27) | | FMoW-WILDS | Natural | 5.15 | 3.55 | 34.64 | 5.03 | 5.58 | 3.46 | 5.08 | 2.59 | 2.33 | 2.52 | 2.22 | | | | (0.19) | (0.41) | (0.22) | (0.29) | (0.17) | (0.37) | (0.46) | (0.32) | (0.28) | (0.25) | (0.30) | | RxRx1-WILDS | Natural | 6.17 | 6.11 | 21.05 | 5.21 | 6.54 | 6.27 | 6.82 | 5.30 | 5.20 | 5.19 | 5.63 | | | | (0.20) | (0.24) | (0.31) | (0.18) | (0.21) | (0.20) | (0.31) | (0.30) | (0.44) | (0.43) | (0.55) | | Entity-13 | Same | 18.32 | 14.38 | 27.79 | 13.56 | 20.50 | 13.22 | 16.09 | 9.35 | 7.50 | 7.80 | 6.94 | | | | (0.29) | (0.53) | (1.18) | (0.58) | (0.47) | (0.58) | (0.84) | (0.79) | (0.65) | (0.62) | (0.71) | | | Novel | 28.82 | 24.03 | 38.97 | 22.96 | 31.66 | 22.61 | 25.26 | 17.11 | 13.96 | 14.75 | 9.94 | | | | (0.30) | (0.55) | (1.32) | (0.59) | (0.54) | (0.58) | (1.08) | (0.93) | (0.64) | (0.78) | | | Entity-30 | Same | 16.91 | 14.61 | 26.84 | 14.37 | 18.60 | 13.11 | 13.74 | 8.54 | 7.94 | 7.77 | 8.04 | | | | (1.33) | (1.11) | (2.15) | (1.34) | (1.69) | (1.30) | (1.07) | (1.47) | (1.38) | (1.44) | (1.51) | | | Novel | 28.66 | 25.83 | 39.21 | 25.03 | 30.95 | 23.73 | 23.15 | 15.57 | 13.24 | 12.44 | 11.05 | | | | (1.16) | (0.88) | (2.03) | (1.11) | (1.64) | (1.11) | (0.51) | (1.44) | (1.15) | (1.26) | (1.13) | | NonLIVING-26 | Same | 17.43 | 15.95 | 27.70 | 15.40 | 18.06 | 14.58 | 16.99 | 10.79 | 10.13 | 10.05 | 10.29 | | | | (0.90) | (0.86) | (0.90) | (0.69) | (1.00) | (0.78) | (1.25) | (0.62) | (0.32) | (0.46) | (0.79) | | | Novel | 29.51 | 27.75 | 40.02 | 26.77 | 30.36 | 25.93 | 27.70 | 19.64 | 17.75 | 16.90 | 15.69 | | | | (0.86) | (0.82) | (0.76) | (0.82) | (0.95) | (0.80) | (1.42) | (0.68) | (0.53) | (0.60) | (0.83) | | LIVING-17 | Same | 14.28 | 12.21 | 23.46 | 11.16 | 15.22 | 10.78 | 10.49 | 4.92 | 4.23 | 4.19 | 4.73 | | | | (0.96) | (0.93) | (1.16) | (0.90) | (0.96) | (0.99) | (0.97) | (0.57) | (0.42) | (0.35) | (0.24) | | | Novel | 28.91 | 26.35 | 38.62 | 24.91 | 30.32 | 24.52 | 22.49 | 15.42 | 13.02 | 12.29 | 10.34 | | | | (0.66) | (0.73) | (1.01) | (0.61) | (0.59) | (0.74) | (0.85) | (0.59) | (0.53) | (0.73) | (0.62) |\nTable 4: Mean Absolute estimation Error (MAE) results for different datasets in our setup grouped by the nature of shift for ResNet model. 'Same' refers to same subpopulation shifts and 'Novel' refers novel subpopulation shifts. We include details about the target sets considered in each shift in Table 2. Post T denotes use of TS calibration on source. Across all datasets, we observe that ATC achieves superior performance (lower MAE is better). For GDE post T and pre T estimates match since TS doesn't alter the argmax prediction. Results reported by aggregating MAE numbers over 4 different seeds. Values in parenthesis (i.e., $(\\cdot)$ ) denote standard deviation values.\n", "images": [], "dimensions": {"dpi": 200, "height": 2200, "width": 1700}}], "model": "mistral-ocr-2503-completion", "usage_info": {"pages_processed": 29, "doc_size_bytes": null}} -examplesVersion: 1.0.2 -generatedTests: {} -releaseNotes: "## SDK Changes Detected:\n* `mistral_azure.chat.complete()`: \n * `request.messages.[].[assistant].content.[array]` **Changed** **Breaking** :warning:\n" diff --git a/packages/mistralai_azure/.speakeasy/gen.yaml b/packages/mistralai_azure/.speakeasy/gen.yaml index ff7c32f5..e2be4d84 100644 --- a/packages/mistralai_azure/.speakeasy/gen.yaml +++ b/packages/mistralai_azure/.speakeasy/gen.yaml @@ -16,12 +16,17 @@ generation: auth: oAuth2ClientCredentialsEnabled: true oAuth2PasswordEnabled: false + hoistGlobalSecurity: true + schemas: + allOfMergeStrategy: shallowMerge + requestBodyFieldName: "" + persistentEdits: {} tests: generateTests: true generateNewTests: false skipResponseBodyAssertions: false python: - version: 1.7.0 + version: 1.8.1 additionalDependencies: dev: pytest: ^8.2.2 @@ -29,10 +34,12 @@ python: allowedRedefinedBuiltins: - id - object + asyncMode: both authors: - Mistral baseErrorName: MistralAzureError clientServerStatusCodesAsErrors: true + constFieldCasing: upper defaultErrorName: SDKError description: Python Client SDK for the Mistral AI API in Azure. enableCustomCodeRegions: false @@ -50,14 +57,19 @@ python: operations: "" shared: "" webhooks: "" + inferUnionDiscriminators: true inputModelSuffix: input + license: "" maxMethodParams: 15 methodArguments: infer-optional-args moduleName: "" + multipartArrayFormat: legacy outputModelSuffix: output packageManager: uv packageName: mistralai_azure + preApplyUnionDiscriminators: false pytestFilterWarnings: [] pytestTimeout: 0 responseFormat: flat + sseFlatResponse: false templateVersion: v2 diff --git a/packages/mistralai_azure/docs/models/chatcompletionrequest.md b/packages/mistralai_azure/docs/models/chatcompletionrequest.md index be296b4b..104a1f96 100644 --- a/packages/mistralai_azure/docs/models/chatcompletionrequest.md +++ b/packages/mistralai_azure/docs/models/chatcompletionrequest.md @@ -12,6 +12,7 @@ | `stream` | *Optional[bool]* | :heavy_minus_sign: | Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. | | | `stop` | [Optional[models.ChatCompletionRequestStop]](../models/chatcompletionrequeststop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | | `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | +| `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | | | `messages` | List[[models.ChatCompletionRequestMessages](../models/chatcompletionrequestmessages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | | `response_format` | [Optional[models.ResponseFormat]](../models/responseformat.md) | :heavy_minus_sign: | Specify the format that the model must output. By default it will use `{ "type": "text" }`. Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ "type": "json_schema" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide. | {
"type": "text"
} | | `tools` | List[[models.Tool](../models/tool.md)] | :heavy_minus_sign: | A list of tools the model may call. Use this to provide a list of functions the model may generate JSON inputs for. | | diff --git a/packages/mistralai_azure/docs/models/chatcompletionstreamrequest.md b/packages/mistralai_azure/docs/models/chatcompletionstreamrequest.md index 03ad3291..85f237b4 100644 --- a/packages/mistralai_azure/docs/models/chatcompletionstreamrequest.md +++ b/packages/mistralai_azure/docs/models/chatcompletionstreamrequest.md @@ -12,6 +12,7 @@ | `stream` | *Optional[bool]* | :heavy_minus_sign: | N/A | | | `stop` | [Optional[models.Stop]](../models/stop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | | `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | +| `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | | | `messages` | List[[models.Messages](../models/messages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | | `response_format` | [Optional[models.ResponseFormat]](../models/responseformat.md) | :heavy_minus_sign: | Specify the format that the model must output. By default it will use `{ "type": "text" }`. Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ "type": "json_schema" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide. | {
"type": "text"
} | | `tools` | List[[models.Tool](../models/tool.md)] | :heavy_minus_sign: | A list of tools the model may call. Use this to provide a list of functions the model may generate JSON inputs for. | | diff --git a/packages/mistralai_azure/docs/models/format_.md b/packages/mistralai_azure/docs/models/format_.md new file mode 100644 index 00000000..97d286a4 --- /dev/null +++ b/packages/mistralai_azure/docs/models/format_.md @@ -0,0 +1,11 @@ +# Format + +Format of the table + + +## Values + +| Name | Value | +| ---------- | ---------- | +| `MARKDOWN` | markdown | +| `HTML` | html | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/ocrpageobject.md b/packages/mistralai_azure/docs/models/ocrpageobject.md index 9db3bb77..02473d44 100644 --- a/packages/mistralai_azure/docs/models/ocrpageobject.md +++ b/packages/mistralai_azure/docs/models/ocrpageobject.md @@ -8,4 +8,8 @@ | `index` | *int* | :heavy_check_mark: | The page index in a pdf document starting from 0 | | `markdown` | *str* | :heavy_check_mark: | The markdown string response of the page | | `images` | List[[models.OCRImageObject](../models/ocrimageobject.md)] | :heavy_check_mark: | List of all extracted images in the page | +| `tables` | List[[models.OCRTableObject](../models/ocrtableobject.md)] | :heavy_minus_sign: | List of all extracted tables in the page | +| `hyperlinks` | List[*str*] | :heavy_minus_sign: | List of all hyperlinks in the page | +| `header` | *OptionalNullable[str]* | :heavy_minus_sign: | Header of the page | +| `footer` | *OptionalNullable[str]* | :heavy_minus_sign: | Footer of the page | | `dimensions` | [Nullable[models.OCRPageDimensions]](../models/ocrpagedimensions.md) | :heavy_check_mark: | The dimensions of the PDF Page's screenshot image | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/ocrrequest.md b/packages/mistralai_azure/docs/models/ocrrequest.md index 0ec824d2..76e4da92 100644 --- a/packages/mistralai_azure/docs/models/ocrrequest.md +++ b/packages/mistralai_azure/docs/models/ocrrequest.md @@ -13,4 +13,7 @@ | `image_limit` | *OptionalNullable[int]* | :heavy_minus_sign: | Max images to extract | | | `image_min_size` | *OptionalNullable[int]* | :heavy_minus_sign: | Minimum height and width of image to extract | | | `bbox_annotation_format` | [OptionalNullable[models.ResponseFormat]](../models/responseformat.md) | :heavy_minus_sign: | Structured output class for extracting useful information from each extracted bounding box / image from document. Only json_schema is valid for this field | {
"type": "text"
} | -| `document_annotation_format` | [OptionalNullable[models.ResponseFormat]](../models/responseformat.md) | :heavy_minus_sign: | Structured output class for extracting useful information from the entire document. Only json_schema is valid for this field | {
"type": "text"
} | \ No newline at end of file +| `document_annotation_format` | [OptionalNullable[models.ResponseFormat]](../models/responseformat.md) | :heavy_minus_sign: | Structured output class for extracting useful information from the entire document. Only json_schema is valid for this field | {
"type": "text"
} | +| `table_format` | [OptionalNullable[models.TableFormat]](../models/tableformat.md) | :heavy_minus_sign: | N/A | | +| `extract_header` | *Optional[bool]* | :heavy_minus_sign: | N/A | | +| `extract_footer` | *Optional[bool]* | :heavy_minus_sign: | N/A | | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/ocrtableobject.md b/packages/mistralai_azure/docs/models/ocrtableobject.md new file mode 100644 index 00000000..4e27697c --- /dev/null +++ b/packages/mistralai_azure/docs/models/ocrtableobject.md @@ -0,0 +1,10 @@ +# OCRTableObject + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------- | ---------------------------------------- | ---------------------------------------- | ---------------------------------------- | +| `id` | *str* | :heavy_check_mark: | Table ID for extracted table in a page | +| `content` | *str* | :heavy_check_mark: | Content of the table in the given format | +| `format_` | [models.Format](../models/format_.md) | :heavy_check_mark: | Format of the table | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/tableformat.md b/packages/mistralai_azure/docs/models/tableformat.md new file mode 100644 index 00000000..54f029b8 --- /dev/null +++ b/packages/mistralai_azure/docs/models/tableformat.md @@ -0,0 +1,9 @@ +# TableFormat + + +## Values + +| Name | Value | +| ---------- | ---------- | +| `MARKDOWN` | markdown | +| `HTML` | html | \ No newline at end of file diff --git a/packages/mistralai_azure/pyproject.toml b/packages/mistralai_azure/pyproject.toml index 2f5c92f6..d129a290 100644 --- a/packages/mistralai_azure/pyproject.toml +++ b/packages/mistralai_azure/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "mistralai_azure" -version = "1.6.0" +version = "1.8.0" description = "Python Client SDK for the Mistral AI API in Azure." authors = [{ name = "Mistral" }] requires-python = ">=3.10" diff --git a/packages/mistralai_azure/src/mistralai_azure/_version.py b/packages/mistralai_azure/src/mistralai_azure/_version.py index 30bc3ab9..79277f9a 100644 --- a/packages/mistralai_azure/src/mistralai_azure/_version.py +++ b/packages/mistralai_azure/src/mistralai_azure/_version.py @@ -3,10 +3,10 @@ import importlib.metadata __title__: str = "mistralai_azure" -__version__: str = "1.7.0" +__version__: str = "1.8.1" __openapi_doc_version__: str = "1.0.0" -__gen_version__: str = "2.687.13" -__user_agent__: str = "speakeasy-sdk/python 1.7.0 2.687.13 1.0.0 mistralai_azure" +__gen_version__: str = "2.794.1" +__user_agent__: str = "speakeasy-sdk/python 1.8.1 2.794.1 1.0.0 mistralai_azure" try: if __package__ is not None: diff --git a/packages/mistralai_azure/src/mistralai_azure/basesdk.py b/packages/mistralai_azure/src/mistralai_azure/basesdk.py index 0ac7e5a6..89f7dc49 100644 --- a/packages/mistralai_azure/src/mistralai_azure/basesdk.py +++ b/packages/mistralai_azure/src/mistralai_azure/basesdk.py @@ -60,6 +60,7 @@ def _build_request_async( ] = None, url_override: Optional[str] = None, http_headers: Optional[Mapping[str, str]] = None, + allow_empty_value: Optional[List[str]] = None, ) -> httpx.Request: client = self.sdk_configuration.async_client return self._build_request_with_client( @@ -80,6 +81,7 @@ def _build_request_async( get_serialized_body, url_override, http_headers, + allow_empty_value, ) def _build_request( @@ -102,6 +104,7 @@ def _build_request( ] = None, url_override: Optional[str] = None, http_headers: Optional[Mapping[str, str]] = None, + allow_empty_value: Optional[List[str]] = None, ) -> httpx.Request: client = self.sdk_configuration.client return self._build_request_with_client( @@ -122,6 +125,7 @@ def _build_request( get_serialized_body, url_override, http_headers, + allow_empty_value, ) def _build_request_with_client( @@ -145,6 +149,7 @@ def _build_request_with_client( ] = None, url_override: Optional[str] = None, http_headers: Optional[Mapping[str, str]] = None, + allow_empty_value: Optional[List[str]] = None, ) -> httpx.Request: query_params = {} @@ -160,6 +165,7 @@ def _build_request_with_client( query_params = utils.get_query_params( request if request_has_query_params else None, _globals if request_has_query_params else None, + allow_empty_value, ) else: # Pick up the query parameter from the override so they can be diff --git a/packages/mistralai_azure/src/mistralai_azure/chat.py b/packages/mistralai_azure/src/mistralai_azure/chat.py index a3ef1a63..10bb247f 100644 --- a/packages/mistralai_azure/src/mistralai_azure/chat.py +++ b/packages/mistralai_azure/src/mistralai_azure/chat.py @@ -6,7 +6,7 @@ from mistralai_azure.types import OptionalNullable, UNSET from mistralai_azure.utils import eventstreaming from mistralai_azure.utils.unmarshal_json_response import unmarshal_json_response -from typing import Any, List, Mapping, Optional, Union +from typing import Any, Dict, List, Mapping, Optional, Union class Chat(BaseSDK): @@ -23,6 +23,7 @@ def stream( stream: Optional[bool] = True, stop: Optional[Union[models.Stop, models.StopTypedDict]] = None, random_seed: OptionalNullable[int] = UNSET, + metadata: OptionalNullable[Dict[str, Any]] = UNSET, response_format: Optional[ Union[models.ResponseFormat, models.ResponseFormatTypedDict] ] = None, @@ -48,7 +49,7 @@ def stream( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> Optional[eventstreaming.EventStream[models.CompletionEvent]]: + ) -> eventstreaming.EventStream[models.CompletionEvent]: r"""Stream chat completion Mistral AI provides the ability to stream responses back to a client in order to allow partial results for certain requests. Tokens will be sent as data-only server-sent events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. @@ -61,6 +62,7 @@ def stream( :param stream: :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. + :param metadata: :param response_format: Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide. :param tools: A list of tools the model may call. Use this to provide a list of functions the model may generate JSON inputs for. :param tool_choice: Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool. @@ -94,6 +96,7 @@ def stream( stream=stream, stop=stop, random_seed=random_seed, + metadata=metadata, messages=utils.get_pydantic_model(messages, List[models.Messages]), response_format=utils.get_pydantic_model( response_format, Optional[models.ResponseFormat] @@ -129,6 +132,7 @@ def stream( get_serialized_body=lambda: utils.serialize_request_body( request, False, False, "json", models.ChatCompletionStreamRequest ), + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -145,7 +149,7 @@ def stream( config=self.sdk_configuration, base_url=base_url or "", operation_id="stream_chat", - oauth2_scopes=[], + oauth2_scopes=None, security_source=self.sdk_configuration.security, ), request=req, @@ -189,6 +193,7 @@ async def stream_async( stream: Optional[bool] = True, stop: Optional[Union[models.Stop, models.StopTypedDict]] = None, random_seed: OptionalNullable[int] = UNSET, + metadata: OptionalNullable[Dict[str, Any]] = UNSET, response_format: Optional[ Union[models.ResponseFormat, models.ResponseFormatTypedDict] ] = None, @@ -214,7 +219,7 @@ async def stream_async( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> Optional[eventstreaming.EventStreamAsync[models.CompletionEvent]]: + ) -> eventstreaming.EventStreamAsync[models.CompletionEvent]: r"""Stream chat completion Mistral AI provides the ability to stream responses back to a client in order to allow partial results for certain requests. Tokens will be sent as data-only server-sent events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. @@ -227,6 +232,7 @@ async def stream_async( :param stream: :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. + :param metadata: :param response_format: Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide. :param tools: A list of tools the model may call. Use this to provide a list of functions the model may generate JSON inputs for. :param tool_choice: Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool. @@ -260,6 +266,7 @@ async def stream_async( stream=stream, stop=stop, random_seed=random_seed, + metadata=metadata, messages=utils.get_pydantic_model(messages, List[models.Messages]), response_format=utils.get_pydantic_model( response_format, Optional[models.ResponseFormat] @@ -295,6 +302,7 @@ async def stream_async( get_serialized_body=lambda: utils.serialize_request_body( request, False, False, "json", models.ChatCompletionStreamRequest ), + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -311,7 +319,7 @@ async def stream_async( config=self.sdk_configuration, base_url=base_url or "", operation_id="stream_chat", - oauth2_scopes=[], + oauth2_scopes=None, security_source=self.sdk_configuration.security, ), request=req, @@ -363,6 +371,7 @@ def complete( ] ] = None, random_seed: OptionalNullable[int] = UNSET, + metadata: OptionalNullable[Dict[str, Any]] = UNSET, response_format: Optional[ Union[models.ResponseFormat, models.ResponseFormatTypedDict] ] = None, @@ -388,7 +397,7 @@ def complete( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> Optional[models.ChatCompletionResponse]: + ) -> models.ChatCompletionResponse: r"""Chat Completion :param messages: The prompt(s) to generate completions for, encoded as a list of dict with role and content. @@ -399,6 +408,7 @@ def complete( :param stream: Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. + :param metadata: :param response_format: Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide. :param tools: A list of tools the model may call. Use this to provide a list of functions the model may generate JSON inputs for. :param tool_choice: Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool. @@ -432,6 +442,7 @@ def complete( stream=stream, stop=stop, random_seed=random_seed, + metadata=metadata, messages=utils.get_pydantic_model( messages, List[models.ChatCompletionRequestMessages] ), @@ -469,6 +480,7 @@ def complete( get_serialized_body=lambda: utils.serialize_request_body( request, False, False, "json", models.ChatCompletionRequest ), + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -485,7 +497,7 @@ def complete( config=self.sdk_configuration, base_url=base_url or "", operation_id="chat_completion_v1_chat_completions_post", - oauth2_scopes=[], + oauth2_scopes=None, security_source=self.sdk_configuration.security, ), request=req, @@ -495,9 +507,7 @@ def complete( response_data: Any = None if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response( - Optional[models.ChatCompletionResponse], http_res - ) + return unmarshal_json_response(models.ChatCompletionResponse, http_res) if utils.match_response(http_res, "422", "application/json"): response_data = unmarshal_json_response( models.HTTPValidationErrorData, http_res @@ -531,6 +541,7 @@ async def complete_async( ] ] = None, random_seed: OptionalNullable[int] = UNSET, + metadata: OptionalNullable[Dict[str, Any]] = UNSET, response_format: Optional[ Union[models.ResponseFormat, models.ResponseFormatTypedDict] ] = None, @@ -556,7 +567,7 @@ async def complete_async( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> Optional[models.ChatCompletionResponse]: + ) -> models.ChatCompletionResponse: r"""Chat Completion :param messages: The prompt(s) to generate completions for, encoded as a list of dict with role and content. @@ -567,6 +578,7 @@ async def complete_async( :param stream: Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. + :param metadata: :param response_format: Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide. :param tools: A list of tools the model may call. Use this to provide a list of functions the model may generate JSON inputs for. :param tool_choice: Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool. @@ -600,6 +612,7 @@ async def complete_async( stream=stream, stop=stop, random_seed=random_seed, + metadata=metadata, messages=utils.get_pydantic_model( messages, List[models.ChatCompletionRequestMessages] ), @@ -637,6 +650,7 @@ async def complete_async( get_serialized_body=lambda: utils.serialize_request_body( request, False, False, "json", models.ChatCompletionRequest ), + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -653,7 +667,7 @@ async def complete_async( config=self.sdk_configuration, base_url=base_url or "", operation_id="chat_completion_v1_chat_completions_post", - oauth2_scopes=[], + oauth2_scopes=None, security_source=self.sdk_configuration.security, ), request=req, @@ -663,9 +677,7 @@ async def complete_async( response_data: Any = None if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response( - Optional[models.ChatCompletionResponse], http_res - ) + return unmarshal_json_response(models.ChatCompletionResponse, http_res) if utils.match_response(http_res, "422", "application/json"): response_data = unmarshal_json_response( models.HTTPValidationErrorData, http_res diff --git a/packages/mistralai_azure/src/mistralai_azure/httpclient.py b/packages/mistralai_azure/src/mistralai_azure/httpclient.py index 47b052cb..89560b56 100644 --- a/packages/mistralai_azure/src/mistralai_azure/httpclient.py +++ b/packages/mistralai_azure/src/mistralai_azure/httpclient.py @@ -107,7 +107,6 @@ def close_clients( # to them from the owning SDK instance and they can be reaped. owner.client = None owner.async_client = None - if sync_client is not None and not sync_client_supplied: try: sync_client.close() diff --git a/packages/mistralai_azure/src/mistralai_azure/models/__init__.py b/packages/mistralai_azure/src/mistralai_azure/models/__init__.py index 140eec88..9baa3ff1 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/__init__.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/__init__.py @@ -86,8 +86,15 @@ from .ocrimageobject import OCRImageObject, OCRImageObjectTypedDict from .ocrpagedimensions import OCRPageDimensions, OCRPageDimensionsTypedDict from .ocrpageobject import OCRPageObject, OCRPageObjectTypedDict - from .ocrrequest import Document, DocumentTypedDict, OCRRequest, OCRRequestTypedDict + from .ocrrequest import ( + Document, + DocumentTypedDict, + OCRRequest, + OCRRequestTypedDict, + TableFormat, + ) from .ocrresponse import OCRResponse, OCRResponseTypedDict + from .ocrtableobject import Format, OCRTableObject, OCRTableObjectTypedDict from .ocrusageinfo import OCRUsageInfo, OCRUsageInfoTypedDict from .prediction import Prediction, PredictionTypedDict from .referencechunk import ( @@ -191,6 +198,7 @@ "FileChunk", "FileChunkTypedDict", "FinishReason", + "Format", "Function", "FunctionCall", "FunctionCallTypedDict", @@ -225,6 +233,8 @@ "OCRRequestTypedDict", "OCRResponse", "OCRResponseTypedDict", + "OCRTableObject", + "OCRTableObjectTypedDict", "OCRUsageInfo", "OCRUsageInfoTypedDict", "Prediction", @@ -248,6 +258,7 @@ "SystemMessageContentChunksTypedDict", "SystemMessageContentTypedDict", "SystemMessageTypedDict", + "TableFormat", "TextChunk", "TextChunkTypedDict", "ThinkChunk", @@ -356,8 +367,12 @@ "DocumentTypedDict": ".ocrrequest", "OCRRequest": ".ocrrequest", "OCRRequestTypedDict": ".ocrrequest", + "TableFormat": ".ocrrequest", "OCRResponse": ".ocrresponse", "OCRResponseTypedDict": ".ocrresponse", + "Format": ".ocrtableobject", + "OCRTableObject": ".ocrtableobject", + "OCRTableObjectTypedDict": ".ocrtableobject", "OCRUsageInfo": ".ocrusageinfo", "OCRUsageInfoTypedDict": ".ocrusageinfo", "Prediction": ".prediction", diff --git a/packages/mistralai_azure/src/mistralai_azure/models/assistantmessage.py b/packages/mistralai_azure/src/mistralai_azure/models/assistantmessage.py index 86f5ec09..7790eb10 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/assistantmessage.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/assistantmessage.py @@ -25,7 +25,7 @@ ) -AssistantMessageRole = Literal["assistant"] +AssistantMessageRole = Literal["assistant",] class AssistantMessageTypedDict(TypedDict): diff --git a/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionchoice.py b/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionchoice.py index a78b72d5..7c6eb933 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionchoice.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionchoice.py @@ -3,14 +3,19 @@ from __future__ import annotations from .assistantmessage import AssistantMessage, AssistantMessageTypedDict from mistralai_azure.types import BaseModel, UnrecognizedStr -from mistralai_azure.utils import validate_open_enum -from pydantic.functional_validators import PlainValidator from typing import Literal, Union -from typing_extensions import Annotated, TypedDict +from typing_extensions import TypedDict ChatCompletionChoiceFinishReason = Union[ - Literal["stop", "length", "model_length", "error", "tool_calls"], UnrecognizedStr + Literal[ + "stop", + "length", + "model_length", + "error", + "tool_calls", + ], + UnrecognizedStr, ] @@ -25,6 +30,4 @@ class ChatCompletionChoice(BaseModel): message: AssistantMessage - finish_reason: Annotated[ - ChatCompletionChoiceFinishReason, PlainValidator(validate_open_enum(False)) - ] + finish_reason: ChatCompletionChoiceFinishReason diff --git a/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionrequest.py b/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionrequest.py index ecb33b81..a7b095f3 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionrequest.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionrequest.py @@ -18,10 +18,9 @@ UNSET, UNSET_SENTINEL, ) -from mistralai_azure.utils import get_discriminator, validate_open_enum +from mistralai_azure.utils import get_discriminator from pydantic import Discriminator, Tag, model_serializer -from pydantic.functional_validators import PlainValidator -from typing import List, Optional, Union +from typing import Any, Dict, List, Optional, Union from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict @@ -89,6 +88,7 @@ class ChatCompletionRequestTypedDict(TypedDict): r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" random_seed: NotRequired[Nullable[int]] r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" + metadata: NotRequired[Nullable[Dict[str, Any]]] response_format: NotRequired[ResponseFormatTypedDict] r"""Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide.""" tools: NotRequired[Nullable[List[ToolTypedDict]]] @@ -136,6 +136,8 @@ class ChatCompletionRequest(BaseModel): random_seed: OptionalNullable[int] = UNSET r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" + metadata: OptionalNullable[Dict[str, Any]] = UNSET + response_format: Optional[ResponseFormat] = None r"""Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide.""" @@ -160,9 +162,7 @@ class ChatCompletionRequest(BaseModel): parallel_tool_calls: Optional[bool] = None r"""Whether to enable parallel function calling during tool use, when enabled the model can call multiple tools in parallel.""" - prompt_mode: Annotated[ - OptionalNullable[MistralPromptMode], PlainValidator(validate_open_enum(False)) - ] = UNSET + prompt_mode: OptionalNullable[MistralPromptMode] = UNSET r"""Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used.""" safe_prompt: Optional[bool] = None @@ -178,6 +178,7 @@ def serialize_model(self, handler): "stream", "stop", "random_seed", + "metadata", "response_format", "tools", "tool_choice", @@ -193,6 +194,7 @@ def serialize_model(self, handler): "temperature", "max_tokens", "random_seed", + "metadata", "tools", "n", "prompt_mode", diff --git a/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionstreamrequest.py b/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionstreamrequest.py index d13faa08..96cd631b 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionstreamrequest.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionstreamrequest.py @@ -18,10 +18,9 @@ UNSET, UNSET_SENTINEL, ) -from mistralai_azure.utils import get_discriminator, validate_open_enum +from mistralai_azure.utils import get_discriminator from pydantic import Discriminator, Tag, model_serializer -from pydantic.functional_validators import PlainValidator -from typing import List, Optional, Union +from typing import Any, Dict, List, Optional, Union from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict @@ -84,6 +83,7 @@ class ChatCompletionStreamRequestTypedDict(TypedDict): r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" random_seed: NotRequired[Nullable[int]] r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" + metadata: NotRequired[Nullable[Dict[str, Any]]] response_format: NotRequired[ResponseFormatTypedDict] r"""Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide.""" tools: NotRequired[Nullable[List[ToolTypedDict]]] @@ -130,6 +130,8 @@ class ChatCompletionStreamRequest(BaseModel): random_seed: OptionalNullable[int] = UNSET r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" + metadata: OptionalNullable[Dict[str, Any]] = UNSET + response_format: Optional[ResponseFormat] = None r"""Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide.""" @@ -154,9 +156,7 @@ class ChatCompletionStreamRequest(BaseModel): parallel_tool_calls: Optional[bool] = None r"""Whether to enable parallel function calling during tool use, when enabled the model can call multiple tools in parallel.""" - prompt_mode: Annotated[ - OptionalNullable[MistralPromptMode], PlainValidator(validate_open_enum(False)) - ] = UNSET + prompt_mode: OptionalNullable[MistralPromptMode] = UNSET r"""Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used.""" safe_prompt: Optional[bool] = None @@ -172,6 +172,7 @@ def serialize_model(self, handler): "stream", "stop", "random_seed", + "metadata", "response_format", "tools", "tool_choice", @@ -187,6 +188,7 @@ def serialize_model(self, handler): "temperature", "max_tokens", "random_seed", + "metadata", "tools", "n", "prompt_mode", diff --git a/packages/mistralai_azure/src/mistralai_azure/models/completionresponsestreamchoice.py b/packages/mistralai_azure/src/mistralai_azure/models/completionresponsestreamchoice.py index 1a492204..0e890aac 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/completionresponsestreamchoice.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/completionresponsestreamchoice.py @@ -3,14 +3,20 @@ from __future__ import annotations from .deltamessage import DeltaMessage, DeltaMessageTypedDict from mistralai_azure.types import BaseModel, Nullable, UNSET_SENTINEL, UnrecognizedStr -from mistralai_azure.utils import validate_open_enum from pydantic import model_serializer -from pydantic.functional_validators import PlainValidator from typing import Literal, Union -from typing_extensions import Annotated, TypedDict +from typing_extensions import TypedDict -FinishReason = Union[Literal["stop", "length", "error", "tool_calls"], UnrecognizedStr] +FinishReason = Union[ + Literal[ + "stop", + "length", + "error", + "tool_calls", + ], + UnrecognizedStr, +] class CompletionResponseStreamChoiceTypedDict(TypedDict): @@ -24,9 +30,7 @@ class CompletionResponseStreamChoice(BaseModel): delta: DeltaMessage - finish_reason: Annotated[ - Nullable[FinishReason], PlainValidator(validate_open_enum(False)) - ] + finish_reason: Nullable[FinishReason] @model_serializer(mode="wrap") def serialize_model(self, handler): diff --git a/packages/mistralai_azure/src/mistralai_azure/models/documenturlchunk.py b/packages/mistralai_azure/src/mistralai_azure/models/documenturlchunk.py index 23ff71a6..ea8d5625 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/documenturlchunk.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/documenturlchunk.py @@ -13,7 +13,7 @@ from typing_extensions import NotRequired, TypedDict -DocumentURLChunkType = Literal["document_url"] +DocumentURLChunkType = Literal["document_url",] class DocumentURLChunkTypedDict(TypedDict): diff --git a/packages/mistralai_azure/src/mistralai_azure/models/httpvalidationerror.py b/packages/mistralai_azure/src/mistralai_azure/models/httpvalidationerror.py index 92eac6a1..56607d94 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/httpvalidationerror.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/httpvalidationerror.py @@ -2,6 +2,7 @@ from __future__ import annotations from .validationerror import ValidationError +from dataclasses import dataclass, field import httpx from mistralai_azure.models import MistralAzureError from mistralai_azure.types import BaseModel @@ -12,8 +13,9 @@ class HTTPValidationErrorData(BaseModel): detail: Optional[List[ValidationError]] = None +@dataclass(unsafe_hash=True) class HTTPValidationError(MistralAzureError): - data: HTTPValidationErrorData + data: HTTPValidationErrorData = field(hash=False) def __init__( self, @@ -23,4 +25,4 @@ def __init__( ): message = body or raw_response.text super().__init__(message, raw_response, body) - self.data = data + object.__setattr__(self, "data", data) diff --git a/packages/mistralai_azure/src/mistralai_azure/models/imageurlchunk.py b/packages/mistralai_azure/src/mistralai_azure/models/imageurlchunk.py index 734d7f79..a40e451c 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/imageurlchunk.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/imageurlchunk.py @@ -15,7 +15,7 @@ ImageURLChunkImageURL = TypeAliasType("ImageURLChunkImageURL", Union[ImageURL, str]) -ImageURLChunkType = Literal["image_url"] +ImageURLChunkType = Literal["image_url",] class ImageURLChunkTypedDict(TypedDict): diff --git a/packages/mistralai_azure/src/mistralai_azure/models/mistralazureerror.py b/packages/mistralai_azure/src/mistralai_azure/models/mistralazureerror.py index 9e45af0e..c5bf1752 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/mistralazureerror.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/mistralazureerror.py @@ -2,25 +2,29 @@ import httpx from typing import Optional +from dataclasses import dataclass, field +@dataclass(unsafe_hash=True) class MistralAzureError(Exception): """The base class for all HTTP error responses.""" message: str status_code: int body: str - headers: httpx.Headers - raw_response: httpx.Response + headers: httpx.Headers = field(hash=False) + raw_response: httpx.Response = field(hash=False) def __init__( self, message: str, raw_response: httpx.Response, body: Optional[str] = None ): - self.message = message - self.status_code = raw_response.status_code - self.body = body if body is not None else raw_response.text - self.headers = raw_response.headers - self.raw_response = raw_response + object.__setattr__(self, "message", message) + object.__setattr__(self, "status_code", raw_response.status_code) + object.__setattr__( + self, "body", body if body is not None else raw_response.text + ) + object.__setattr__(self, "headers", raw_response.headers) + object.__setattr__(self, "raw_response", raw_response) def __str__(self): return self.message diff --git a/packages/mistralai_azure/src/mistralai_azure/models/mistralpromptmode.py b/packages/mistralai_azure/src/mistralai_azure/models/mistralpromptmode.py index bd4584a5..22fb6438 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/mistralpromptmode.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/mistralpromptmode.py @@ -5,4 +5,4 @@ from typing import Literal, Union -MistralPromptMode = Union[Literal["reasoning"], UnrecognizedStr] +MistralPromptMode = Union[Literal["reasoning",], UnrecognizedStr] diff --git a/packages/mistralai_azure/src/mistralai_azure/models/no_response_error.py b/packages/mistralai_azure/src/mistralai_azure/models/no_response_error.py index f98beea2..1deab64b 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/no_response_error.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/no_response_error.py @@ -1,12 +1,16 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +from dataclasses import dataclass + + +@dataclass(unsafe_hash=True) class NoResponseError(Exception): """Error raised when no HTTP response is received from the server.""" message: str def __init__(self, message: str = "No response received"): - self.message = message + object.__setattr__(self, "message", message) super().__init__(message) def __str__(self): diff --git a/packages/mistralai_azure/src/mistralai_azure/models/ocrpageobject.py b/packages/mistralai_azure/src/mistralai_azure/models/ocrpageobject.py index 4438e732..e9571800 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/ocrpageobject.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/ocrpageobject.py @@ -3,10 +3,17 @@ from __future__ import annotations from .ocrimageobject import OCRImageObject, OCRImageObjectTypedDict from .ocrpagedimensions import OCRPageDimensions, OCRPageDimensionsTypedDict -from mistralai_azure.types import BaseModel, Nullable, UNSET_SENTINEL +from .ocrtableobject import OCRTableObject, OCRTableObjectTypedDict +from mistralai_azure.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) from pydantic import model_serializer -from typing import List -from typing_extensions import TypedDict +from typing import List, Optional +from typing_extensions import NotRequired, TypedDict class OCRPageObjectTypedDict(TypedDict): @@ -18,6 +25,14 @@ class OCRPageObjectTypedDict(TypedDict): r"""List of all extracted images in the page""" dimensions: Nullable[OCRPageDimensionsTypedDict] r"""The dimensions of the PDF Page's screenshot image""" + tables: NotRequired[List[OCRTableObjectTypedDict]] + r"""List of all extracted tables in the page""" + hyperlinks: NotRequired[List[str]] + r"""List of all hyperlinks in the page""" + header: NotRequired[Nullable[str]] + r"""Header of the page""" + footer: NotRequired[Nullable[str]] + r"""Footer of the page""" class OCRPageObject(BaseModel): @@ -33,10 +48,22 @@ class OCRPageObject(BaseModel): dimensions: Nullable[OCRPageDimensions] r"""The dimensions of the PDF Page's screenshot image""" + tables: Optional[List[OCRTableObject]] = None + r"""List of all extracted tables in the page""" + + hyperlinks: Optional[List[str]] = None + r"""List of all hyperlinks in the page""" + + header: OptionalNullable[str] = UNSET + r"""Header of the page""" + + footer: OptionalNullable[str] = UNSET + r"""Footer of the page""" + @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = [] - nullable_fields = ["dimensions"] + optional_fields = ["tables", "hyperlinks", "header", "footer"] + nullable_fields = ["header", "footer", "dimensions"] null_default_fields = [] serialized = handler(self) diff --git a/packages/mistralai_azure/src/mistralai_azure/models/ocrrequest.py b/packages/mistralai_azure/src/mistralai_azure/models/ocrrequest.py index 533d0742..565a0a30 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/ocrrequest.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/ocrrequest.py @@ -13,7 +13,7 @@ UNSET_SENTINEL, ) from pydantic import model_serializer -from typing import List, Optional, Union +from typing import List, Literal, Optional, Union from typing_extensions import NotRequired, TypeAliasType, TypedDict @@ -28,6 +28,12 @@ r"""Document to run OCR on""" +TableFormat = Literal[ + "markdown", + "html", +] + + class OCRRequestTypedDict(TypedDict): model: Nullable[str] document: DocumentTypedDict @@ -45,6 +51,9 @@ class OCRRequestTypedDict(TypedDict): r"""Structured output class for extracting useful information from each extracted bounding box / image from document. Only json_schema is valid for this field""" document_annotation_format: NotRequired[Nullable[ResponseFormatTypedDict]] r"""Structured output class for extracting useful information from the entire document. Only json_schema is valid for this field""" + table_format: NotRequired[Nullable[TableFormat]] + extract_header: NotRequired[bool] + extract_footer: NotRequired[bool] class OCRRequest(BaseModel): @@ -73,6 +82,12 @@ class OCRRequest(BaseModel): document_annotation_format: OptionalNullable[ResponseFormat] = UNSET r"""Structured output class for extracting useful information from the entire document. Only json_schema is valid for this field""" + table_format: OptionalNullable[TableFormat] = UNSET + + extract_header: Optional[bool] = None + + extract_footer: Optional[bool] = None + @model_serializer(mode="wrap") def serialize_model(self, handler): optional_fields = [ @@ -83,6 +98,9 @@ def serialize_model(self, handler): "image_min_size", "bbox_annotation_format", "document_annotation_format", + "table_format", + "extract_header", + "extract_footer", ] nullable_fields = [ "model", @@ -92,6 +110,7 @@ def serialize_model(self, handler): "image_min_size", "bbox_annotation_format", "document_annotation_format", + "table_format", ] null_default_fields = [] diff --git a/packages/mistralai_azure/src/mistralai_azure/models/ocrtableobject.py b/packages/mistralai_azure/src/mistralai_azure/models/ocrtableobject.py new file mode 100644 index 00000000..189f059e --- /dev/null +++ b/packages/mistralai_azure/src/mistralai_azure/models/ocrtableobject.py @@ -0,0 +1,34 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai_azure.types import BaseModel +import pydantic +from typing import Literal +from typing_extensions import Annotated, TypedDict + + +Format = Literal[ + "markdown", + "html", +] +r"""Format of the table""" + + +class OCRTableObjectTypedDict(TypedDict): + id: str + r"""Table ID for extracted table in a page""" + content: str + r"""Content of the table in the given format""" + format_: Format + r"""Format of the table""" + + +class OCRTableObject(BaseModel): + id: str + r"""Table ID for extracted table in a page""" + + content: str + r"""Content of the table in the given format""" + + format_: Annotated[Format, pydantic.Field(alias="format")] + r"""Format of the table""" diff --git a/packages/mistralai_azure/src/mistralai_azure/models/referencechunk.py b/packages/mistralai_azure/src/mistralai_azure/models/referencechunk.py index 4df3bfbc..32d2ca68 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/referencechunk.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/referencechunk.py @@ -6,7 +6,7 @@ from typing_extensions import NotRequired, TypedDict -ReferenceChunkType = Literal["reference"] +ReferenceChunkType = Literal["reference",] class ReferenceChunkTypedDict(TypedDict): diff --git a/packages/mistralai_azure/src/mistralai_azure/models/responseformats.py b/packages/mistralai_azure/src/mistralai_azure/models/responseformats.py index 258fe70e..cbf83ce7 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/responseformats.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/responseformats.py @@ -4,4 +4,8 @@ from typing import Literal -ResponseFormats = Literal["text", "json_object", "json_schema"] +ResponseFormats = Literal[ + "text", + "json_object", + "json_schema", +] diff --git a/packages/mistralai_azure/src/mistralai_azure/models/responsevalidationerror.py b/packages/mistralai_azure/src/mistralai_azure/models/responsevalidationerror.py index 56573dfa..a33954cc 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/responsevalidationerror.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/responsevalidationerror.py @@ -2,10 +2,12 @@ import httpx from typing import Optional +from dataclasses import dataclass from mistralai_azure.models import MistralAzureError +@dataclass(unsafe_hash=True) class ResponseValidationError(MistralAzureError): """Error raised when there is a type mismatch between the response data and the expected Pydantic model.""" diff --git a/packages/mistralai_azure/src/mistralai_azure/models/sdkerror.py b/packages/mistralai_azure/src/mistralai_azure/models/sdkerror.py index e841ab3a..216d7f8f 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/sdkerror.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/sdkerror.py @@ -2,12 +2,14 @@ import httpx from typing import Optional +from dataclasses import dataclass from mistralai_azure.models import MistralAzureError MAX_MESSAGE_LEN = 10_000 +@dataclass(unsafe_hash=True) class SDKError(MistralAzureError): """The fallback error class if no more specific error class is matched.""" diff --git a/packages/mistralai_azure/src/mistralai_azure/models/systemmessage.py b/packages/mistralai_azure/src/mistralai_azure/models/systemmessage.py index d91a9058..f99bf4ff 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/systemmessage.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/systemmessage.py @@ -21,7 +21,7 @@ ) -Role = Literal["system"] +Role = Literal["system",] class SystemMessageTypedDict(TypedDict): diff --git a/packages/mistralai_azure/src/mistralai_azure/models/textchunk.py b/packages/mistralai_azure/src/mistralai_azure/models/textchunk.py index be60c8f9..5845456e 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/textchunk.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/textchunk.py @@ -6,7 +6,7 @@ from typing_extensions import NotRequired, TypedDict -Type = Literal["text"] +Type = Literal["text",] class TextChunkTypedDict(TypedDict): diff --git a/packages/mistralai_azure/src/mistralai_azure/models/thinkchunk.py b/packages/mistralai_azure/src/mistralai_azure/models/thinkchunk.py index 8ff257f4..f53a9f1a 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/thinkchunk.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/thinkchunk.py @@ -16,7 +16,7 @@ Thinking = TypeAliasType("Thinking", Union[ReferenceChunk, TextChunk]) -ThinkChunkType = Literal["thinking"] +ThinkChunkType = Literal["thinking",] class ThinkChunkTypedDict(TypedDict): diff --git a/packages/mistralai_azure/src/mistralai_azure/models/tool.py b/packages/mistralai_azure/src/mistralai_azure/models/tool.py index ffd9b062..c91deec2 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/tool.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/tool.py @@ -4,10 +4,8 @@ from .function import Function, FunctionTypedDict from .tooltypes import ToolTypes from mistralai_azure.types import BaseModel -from mistralai_azure.utils import validate_open_enum -from pydantic.functional_validators import PlainValidator from typing import Optional -from typing_extensions import Annotated, NotRequired, TypedDict +from typing_extensions import NotRequired, TypedDict class ToolTypedDict(TypedDict): @@ -18,6 +16,4 @@ class ToolTypedDict(TypedDict): class Tool(BaseModel): function: Function - type: Annotated[Optional[ToolTypes], PlainValidator(validate_open_enum(False))] = ( - None - ) + type: Optional[ToolTypes] = None diff --git a/packages/mistralai_azure/src/mistralai_azure/models/toolcall.py b/packages/mistralai_azure/src/mistralai_azure/models/toolcall.py index 6ccdcaa2..44fe8ec8 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/toolcall.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/toolcall.py @@ -4,10 +4,8 @@ from .functioncall import FunctionCall, FunctionCallTypedDict from .tooltypes import ToolTypes from mistralai_azure.types import BaseModel -from mistralai_azure.utils import validate_open_enum -from pydantic.functional_validators import PlainValidator from typing import Optional -from typing_extensions import Annotated, NotRequired, TypedDict +from typing_extensions import NotRequired, TypedDict class ToolCallTypedDict(TypedDict): @@ -22,8 +20,6 @@ class ToolCall(BaseModel): id: Optional[str] = "null" - type: Annotated[Optional[ToolTypes], PlainValidator(validate_open_enum(False))] = ( - None - ) + type: Optional[ToolTypes] = None index: Optional[int] = 0 diff --git a/packages/mistralai_azure/src/mistralai_azure/models/toolchoice.py b/packages/mistralai_azure/src/mistralai_azure/models/toolchoice.py index cc3c2c1f..93b4b7fe 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/toolchoice.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/toolchoice.py @@ -4,10 +4,8 @@ from .functionname import FunctionName, FunctionNameTypedDict from .tooltypes import ToolTypes from mistralai_azure.types import BaseModel -from mistralai_azure.utils import validate_open_enum -from pydantic.functional_validators import PlainValidator from typing import Optional -from typing_extensions import Annotated, NotRequired, TypedDict +from typing_extensions import NotRequired, TypedDict class ToolChoiceTypedDict(TypedDict): @@ -24,6 +22,4 @@ class ToolChoice(BaseModel): function: FunctionName r"""this restriction of `Function` is used to select a specific function to call""" - type: Annotated[Optional[ToolTypes], PlainValidator(validate_open_enum(False))] = ( - None - ) + type: Optional[ToolTypes] = None diff --git a/packages/mistralai_azure/src/mistralai_azure/models/toolchoiceenum.py b/packages/mistralai_azure/src/mistralai_azure/models/toolchoiceenum.py index 8e6a6ad8..01f6f677 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/toolchoiceenum.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/toolchoiceenum.py @@ -4,4 +4,9 @@ from typing import Literal -ToolChoiceEnum = Literal["auto", "none", "any", "required"] +ToolChoiceEnum = Literal[ + "auto", + "none", + "any", + "required", +] diff --git a/packages/mistralai_azure/src/mistralai_azure/models/toolmessage.py b/packages/mistralai_azure/src/mistralai_azure/models/toolmessage.py index abca8abe..4bc5c9a9 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/toolmessage.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/toolmessage.py @@ -22,7 +22,7 @@ ToolMessageContent = TypeAliasType("ToolMessageContent", Union[str, List[ContentChunk]]) -ToolMessageRole = Literal["tool"] +ToolMessageRole = Literal["tool",] class ToolMessageTypedDict(TypedDict): diff --git a/packages/mistralai_azure/src/mistralai_azure/models/tooltypes.py b/packages/mistralai_azure/src/mistralai_azure/models/tooltypes.py index dfcd31f0..638890c5 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/tooltypes.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/tooltypes.py @@ -5,4 +5,4 @@ from typing import Literal, Union -ToolTypes = Union[Literal["function"], UnrecognizedStr] +ToolTypes = Union[Literal["function",], UnrecognizedStr] diff --git a/packages/mistralai_azure/src/mistralai_azure/models/usermessage.py b/packages/mistralai_azure/src/mistralai_azure/models/usermessage.py index 05976fc0..85fedb4b 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/usermessage.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/usermessage.py @@ -16,7 +16,7 @@ UserMessageContent = TypeAliasType("UserMessageContent", Union[str, List[ContentChunk]]) -UserMessageRole = Literal["user"] +UserMessageRole = Literal["user",] class UserMessageTypedDict(TypedDict): diff --git a/packages/mistralai_azure/src/mistralai_azure/ocr.py b/packages/mistralai_azure/src/mistralai_azure/ocr.py index c15a3da7..da823f81 100644 --- a/packages/mistralai_azure/src/mistralai_azure/ocr.py +++ b/packages/mistralai_azure/src/mistralai_azure/ocr.py @@ -25,11 +25,14 @@ def process( document_annotation_format: OptionalNullable[ Union[models.ResponseFormat, models.ResponseFormatTypedDict] ] = UNSET, + table_format: OptionalNullable[models.TableFormat] = UNSET, + extract_header: Optional[bool] = None, + extract_footer: Optional[bool] = None, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> Optional[models.OCRResponse]: + ) -> models.OCRResponse: r"""OCR :param model: @@ -41,6 +44,9 @@ def process( :param image_min_size: Minimum height and width of image to extract :param bbox_annotation_format: Structured output class for extracting useful information from each extracted bounding box / image from document. Only json_schema is valid for this field :param document_annotation_format: Structured output class for extracting useful information from the entire document. Only json_schema is valid for this field + :param table_format: + :param extract_header: + :param extract_footer: :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -70,6 +76,9 @@ def process( document_annotation_format=utils.get_pydantic_model( document_annotation_format, OptionalNullable[models.ResponseFormat] ), + table_format=table_format, + extract_header=extract_header, + extract_footer=extract_footer, ) req = self._build_request( @@ -88,6 +97,7 @@ def process( get_serialized_body=lambda: utils.serialize_request_body( request, False, False, "json", models.OCRRequest ), + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -104,7 +114,7 @@ def process( config=self.sdk_configuration, base_url=base_url or "", operation_id="ocr_v1_ocr_post", - oauth2_scopes=[], + oauth2_scopes=None, security_source=self.sdk_configuration.security, ), request=req, @@ -114,7 +124,7 @@ def process( response_data: Any = None if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(Optional[models.OCRResponse], http_res) + return unmarshal_json_response(models.OCRResponse, http_res) if utils.match_response(http_res, "422", "application/json"): response_data = unmarshal_json_response( models.HTTPValidationErrorData, http_res @@ -145,11 +155,14 @@ async def process_async( document_annotation_format: OptionalNullable[ Union[models.ResponseFormat, models.ResponseFormatTypedDict] ] = UNSET, + table_format: OptionalNullable[models.TableFormat] = UNSET, + extract_header: Optional[bool] = None, + extract_footer: Optional[bool] = None, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> Optional[models.OCRResponse]: + ) -> models.OCRResponse: r"""OCR :param model: @@ -161,6 +174,9 @@ async def process_async( :param image_min_size: Minimum height and width of image to extract :param bbox_annotation_format: Structured output class for extracting useful information from each extracted bounding box / image from document. Only json_schema is valid for this field :param document_annotation_format: Structured output class for extracting useful information from the entire document. Only json_schema is valid for this field + :param table_format: + :param extract_header: + :param extract_footer: :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -190,6 +206,9 @@ async def process_async( document_annotation_format=utils.get_pydantic_model( document_annotation_format, OptionalNullable[models.ResponseFormat] ), + table_format=table_format, + extract_header=extract_header, + extract_footer=extract_footer, ) req = self._build_request_async( @@ -208,6 +227,7 @@ async def process_async( get_serialized_body=lambda: utils.serialize_request_body( request, False, False, "json", models.OCRRequest ), + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -224,7 +244,7 @@ async def process_async( config=self.sdk_configuration, base_url=base_url or "", operation_id="ocr_v1_ocr_post", - oauth2_scopes=[], + oauth2_scopes=None, security_source=self.sdk_configuration.security, ), request=req, @@ -234,7 +254,7 @@ async def process_async( response_data: Any = None if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(Optional[models.OCRResponse], http_res) + return unmarshal_json_response(models.OCRResponse, http_res) if utils.match_response(http_res, "422", "application/json"): response_data = unmarshal_json_response( models.HTTPValidationErrorData, http_res diff --git a/packages/mistralai_azure/src/mistralai_azure/types/basemodel.py b/packages/mistralai_azure/src/mistralai_azure/types/basemodel.py index 231c2e37..a9a640a1 100644 --- a/packages/mistralai_azure/src/mistralai_azure/types/basemodel.py +++ b/packages/mistralai_azure/src/mistralai_azure/types/basemodel.py @@ -2,7 +2,8 @@ from pydantic import ConfigDict, model_serializer from pydantic import BaseModel as PydanticBaseModel -from typing import TYPE_CHECKING, Literal, Optional, TypeVar, Union +from pydantic_core import core_schema +from typing import TYPE_CHECKING, Any, Literal, Optional, TypeVar, Union from typing_extensions import TypeAliasType, TypeAlias @@ -35,5 +36,42 @@ def __bool__(self) -> Literal[False]: "OptionalNullable", Union[Optional[Nullable[T]], Unset], type_params=(T,) ) -UnrecognizedInt: TypeAlias = int -UnrecognizedStr: TypeAlias = str + +class UnrecognizedStr(str): + @classmethod + def __get_pydantic_core_schema__(cls, _source_type: Any, _handler: Any) -> core_schema.CoreSchema: + # Make UnrecognizedStr only work in lax mode, not strict mode + # This makes it a "fallback" option when more specific types (like Literals) don't match + def validate_lax(v: Any) -> 'UnrecognizedStr': + if isinstance(v, cls): + return v + return cls(str(v)) + + # Use lax_or_strict_schema where strict always fails + # This forces Pydantic to prefer other union members in strict mode + # and only fall back to UnrecognizedStr in lax mode + return core_schema.lax_or_strict_schema( + lax_schema=core_schema.chain_schema([ + core_schema.str_schema(), + core_schema.no_info_plain_validator_function(validate_lax) + ]), + strict_schema=core_schema.none_schema(), # Always fails in strict mode + ) + + +class UnrecognizedInt(int): + @classmethod + def __get_pydantic_core_schema__(cls, _source_type: Any, _handler: Any) -> core_schema.CoreSchema: + # Make UnrecognizedInt only work in lax mode, not strict mode + # This makes it a "fallback" option when more specific types (like Literals) don't match + def validate_lax(v: Any) -> 'UnrecognizedInt': + if isinstance(v, cls): + return v + return cls(int(v)) + return core_schema.lax_or_strict_schema( + lax_schema=core_schema.chain_schema([ + core_schema.int_schema(), + core_schema.no_info_plain_validator_function(validate_lax) + ]), + strict_schema=core_schema.none_schema(), # Always fails in strict mode + ) diff --git a/packages/mistralai_azure/src/mistralai_azure/utils/__init__.py b/packages/mistralai_azure/src/mistralai_azure/utils/__init__.py index 56164cf3..05f26ade 100644 --- a/packages/mistralai_azure/src/mistralai_azure/utils/__init__.py +++ b/packages/mistralai_azure/src/mistralai_azure/utils/__init__.py @@ -41,7 +41,6 @@ validate_decimal, validate_float, validate_int, - validate_open_enum, ) from .url import generate_url, template_url, remove_suffix from .values import ( @@ -102,7 +101,6 @@ "validate_const", "validate_float", "validate_int", - "validate_open_enum", "cast_partial", ] @@ -155,7 +153,6 @@ "validate_const": ".serializers", "validate_float": ".serializers", "validate_int": ".serializers", - "validate_open_enum": ".serializers", "cast_partial": ".values", } diff --git a/packages/mistralai_azure/src/mistralai_azure/utils/annotations.py b/packages/mistralai_azure/src/mistralai_azure/utils/annotations.py index 387874ed..12e0aa4f 100644 --- a/packages/mistralai_azure/src/mistralai_azure/utils/annotations.py +++ b/packages/mistralai_azure/src/mistralai_azure/utils/annotations.py @@ -3,6 +3,7 @@ from enum import Enum from typing import Any, Optional + def get_discriminator(model: Any, fieldname: str, key: str) -> str: """ Recursively search for the discriminator attribute in a model. @@ -25,31 +26,54 @@ def get_field_discriminator(field: Any) -> Optional[str]: if isinstance(field, dict): if key in field: - return f'{field[key]}' + return f"{field[key]}" if hasattr(field, fieldname): attr = getattr(field, fieldname) if isinstance(attr, Enum): - return f'{attr.value}' - return f'{attr}' + return f"{attr.value}" + return f"{attr}" if hasattr(field, upper_fieldname): attr = getattr(field, upper_fieldname) if isinstance(attr, Enum): - return f'{attr.value}' - return f'{attr}' + return f"{attr.value}" + return f"{attr}" return None + def search_nested_discriminator(obj: Any) -> Optional[str]: + """Recursively search for discriminator in nested structures.""" + # First try direct field lookup + discriminator = get_field_discriminator(obj) + if discriminator is not None: + return discriminator + + # If it's a dict, search in nested values + if isinstance(obj, dict): + for value in obj.values(): + if isinstance(value, list): + # Search in list items + for item in value: + nested_discriminator = search_nested_discriminator(item) + if nested_discriminator is not None: + return nested_discriminator + elif isinstance(value, dict): + # Search in nested dict + nested_discriminator = search_nested_discriminator(value) + if nested_discriminator is not None: + return nested_discriminator + + return None if isinstance(model, list): for field in model: - discriminator = get_field_discriminator(field) + discriminator = search_nested_discriminator(field) if discriminator is not None: return discriminator - discriminator = get_field_discriminator(model) + discriminator = search_nested_discriminator(model) if discriminator is not None: return discriminator - raise ValueError(f'Could not find discriminator field {fieldname} in {model}') + raise ValueError(f"Could not find discriminator field {fieldname} in {model}") diff --git a/packages/mistralai_azure/src/mistralai_azure/utils/enums.py b/packages/mistralai_azure/src/mistralai_azure/utils/enums.py index c3bc13cf..3324e1bc 100644 --- a/packages/mistralai_azure/src/mistralai_azure/utils/enums.py +++ b/packages/mistralai_azure/src/mistralai_azure/utils/enums.py @@ -2,6 +2,10 @@ import enum import sys +from typing import Any + +from pydantic_core import core_schema + class OpenEnumMeta(enum.EnumMeta): # The __call__ method `boundary` kwarg was added in 3.11 and must be present @@ -72,3 +76,59 @@ def __call__( ) except ValueError: return value + + def __new__(mcs, name, bases, namespace, **kwargs): + cls = super().__new__(mcs, name, bases, namespace, **kwargs) + + # Add __get_pydantic_core_schema__ to make open enums work correctly + # in union discrimination. In strict mode (used by Pydantic for unions), + # only known enum values match. In lax mode, unknown values are accepted. + def __get_pydantic_core_schema__( + cls_inner: Any, _source_type: Any, _handler: Any + ) -> core_schema.CoreSchema: + # Create a validator that only accepts known enum values (for strict mode) + def validate_strict(v: Any) -> Any: + if isinstance(v, cls_inner): + return v + # Use the parent EnumMeta's __call__ which raises ValueError for unknown values + return enum.EnumMeta.__call__(cls_inner, v) + + # Create a lax validator that accepts unknown values + def validate_lax(v: Any) -> Any: + if isinstance(v, cls_inner): + return v + try: + return enum.EnumMeta.__call__(cls_inner, v) + except ValueError: + # Return the raw value for unknown enum values + return v + + # Determine the base type schema (str or int) + is_int_enum = False + for base in cls_inner.__mro__: + if base is int: + is_int_enum = True + break + if base is str: + break + + base_schema = ( + core_schema.int_schema() + if is_int_enum + else core_schema.str_schema() + ) + + # Use lax_or_strict_schema: + # - strict mode: only known enum values match (raises ValueError for unknown) + # - lax mode: accept any value, return enum member or raw value + return core_schema.lax_or_strict_schema( + lax_schema=core_schema.chain_schema( + [base_schema, core_schema.no_info_plain_validator_function(validate_lax)] + ), + strict_schema=core_schema.chain_schema( + [base_schema, core_schema.no_info_plain_validator_function(validate_strict)] + ), + ) + + setattr(cls, "__get_pydantic_core_schema__", classmethod(__get_pydantic_core_schema__)) + return cls diff --git a/packages/mistralai_azure/src/mistralai_azure/utils/forms.py b/packages/mistralai_azure/src/mistralai_azure/utils/forms.py index e873495f..f961e76b 100644 --- a/packages/mistralai_azure/src/mistralai_azure/utils/forms.py +++ b/packages/mistralai_azure/src/mistralai_azure/utils/forms.py @@ -142,16 +142,21 @@ def serialize_multipart_form( if field_metadata.file: if isinstance(val, List): # Handle array of files + array_field_name = f_name + "[]" for file_obj in val: if not _is_set(file_obj): continue - - file_name, content, content_type = _extract_file_properties(file_obj) + + file_name, content, content_type = _extract_file_properties( + file_obj + ) if content_type is not None: - files.append((f_name + "[]", (file_name, content, content_type))) + files.append( + (array_field_name, (file_name, content, content_type)) + ) else: - files.append((f_name + "[]", (file_name, content))) + files.append((array_field_name, (file_name, content))) else: # Handle single file file_name, content, content_type = _extract_file_properties(val) @@ -161,11 +166,16 @@ def serialize_multipart_form( else: files.append((f_name, (file_name, content))) elif field_metadata.json: - files.append((f_name, ( - None, - marshal_json(val, request_field_types[name]), - "application/json", - ))) + files.append( + ( + f_name, + ( + None, + marshal_json(val, request_field_types[name]), + "application/json", + ), + ) + ) else: if isinstance(val, List): values = [] @@ -175,7 +185,8 @@ def serialize_multipart_form( continue values.append(_val_to_string(value)) - form[f_name + "[]"] = values + array_field_name = f_name + "[]" + form[array_field_name] = values else: form[f_name] = _val_to_string(val) return media_type, form, files diff --git a/packages/mistralai_azure/src/mistralai_azure/utils/queryparams.py b/packages/mistralai_azure/src/mistralai_azure/utils/queryparams.py index 37a6e7f9..c04e0db8 100644 --- a/packages/mistralai_azure/src/mistralai_azure/utils/queryparams.py +++ b/packages/mistralai_azure/src/mistralai_azure/utils/queryparams.py @@ -27,12 +27,13 @@ def get_query_params( query_params: Any, gbls: Optional[Any] = None, + allow_empty_value: Optional[List[str]] = None, ) -> Dict[str, List[str]]: params: Dict[str, List[str]] = {} - globals_already_populated = _populate_query_params(query_params, gbls, params, []) + globals_already_populated = _populate_query_params(query_params, gbls, params, [], allow_empty_value) if _is_set(gbls): - _populate_query_params(gbls, None, params, globals_already_populated) + _populate_query_params(gbls, None, params, globals_already_populated, allow_empty_value) return params @@ -42,6 +43,7 @@ def _populate_query_params( gbls: Any, query_param_values: Dict[str, List[str]], skip_fields: List[str], + allow_empty_value: Optional[List[str]] = None, ) -> List[str]: globals_already_populated: List[str] = [] @@ -69,6 +71,16 @@ def _populate_query_params( globals_already_populated.append(name) f_name = field.alias if field.alias is not None else name + + allow_empty_set = set(allow_empty_value or []) + should_include_empty = f_name in allow_empty_set and ( + value is None or value == [] or value == "" + ) + + if should_include_empty: + query_param_values[f_name] = [""] + continue + serialization = metadata.serialization if serialization is not None: serialized_parms = _get_serialized_params( diff --git a/packages/mistralai_azure/src/mistralai_azure/utils/requestbodies.py b/packages/mistralai_azure/src/mistralai_azure/utils/requestbodies.py index d5240dd5..1de32b6d 100644 --- a/packages/mistralai_azure/src/mistralai_azure/utils/requestbodies.py +++ b/packages/mistralai_azure/src/mistralai_azure/utils/requestbodies.py @@ -44,15 +44,15 @@ def serialize_request_body( serialized_request_body = SerializedRequestBody(media_type) - if re.match(r"(application|text)\/.*?\+*json.*", media_type) is not None: + if re.match(r"^(application|text)\/([^+]+\+)*json.*", media_type) is not None: serialized_request_body.content = marshal_json(request_body, request_body_type) - elif re.match(r"multipart\/.*", media_type) is not None: + elif re.match(r"^multipart\/.*", media_type) is not None: ( serialized_request_body.media_type, serialized_request_body.data, serialized_request_body.files, ) = serialize_multipart_form(media_type, request_body) - elif re.match(r"application\/x-www-form-urlencoded.*", media_type) is not None: + elif re.match(r"^application\/x-www-form-urlencoded.*", media_type) is not None: serialized_request_body.data = serialize_form_data(request_body) elif isinstance(request_body, (bytes, bytearray, io.BytesIO, io.BufferedReader)): serialized_request_body.content = request_body diff --git a/packages/mistralai_azure/src/mistralai_azure/utils/retries.py b/packages/mistralai_azure/src/mistralai_azure/utils/retries.py index 4d608671..88a91b10 100644 --- a/packages/mistralai_azure/src/mistralai_azure/utils/retries.py +++ b/packages/mistralai_azure/src/mistralai_azure/utils/retries.py @@ -3,7 +3,9 @@ import asyncio import random import time -from typing import List +from datetime import datetime +from email.utils import parsedate_to_datetime +from typing import List, Optional import httpx @@ -51,9 +53,11 @@ def __init__(self, config: RetryConfig, status_codes: List[str]): class TemporaryError(Exception): response: httpx.Response + retry_after: Optional[int] def __init__(self, response: httpx.Response): self.response = response + self.retry_after = _parse_retry_after_header(response) class PermanentError(Exception): @@ -63,6 +67,62 @@ def __init__(self, inner: Exception): self.inner = inner +def _parse_retry_after_header(response: httpx.Response) -> Optional[int]: + """Parse Retry-After header from response. + + Returns: + Retry interval in milliseconds, or None if header is missing or invalid. + """ + retry_after_header = response.headers.get("retry-after") + if not retry_after_header: + return None + + try: + seconds = float(retry_after_header) + return round(seconds * 1000) + except ValueError: + pass + + try: + retry_date = parsedate_to_datetime(retry_after_header) + delta = (retry_date - datetime.now(retry_date.tzinfo)).total_seconds() + return round(max(0, delta) * 1000) + except (ValueError, TypeError): + pass + + return None + + +def _get_sleep_interval( + exception: Exception, + initial_interval: int, + max_interval: int, + exponent: float, + retries: int, +) -> float: + """Get sleep interval for retry with exponential backoff. + + Args: + exception: The exception that triggered the retry. + initial_interval: Initial retry interval in milliseconds. + max_interval: Maximum retry interval in milliseconds. + exponent: Base for exponential backoff calculation. + retries: Current retry attempt count. + + Returns: + Sleep interval in seconds. + """ + if ( + isinstance(exception, TemporaryError) + and exception.retry_after is not None + and exception.retry_after > 0 + ): + return exception.retry_after / 1000 + + sleep = (initial_interval / 1000) * exponent**retries + random.uniform(0, 1) + return min(sleep, max_interval / 1000) + + def retry(func, retries: Retries): if retries.config.strategy == "backoff": @@ -183,8 +243,10 @@ def retry_with_backoff( return exception.response raise - sleep = (initial_interval / 1000) * exponent**retries + random.uniform(0, 1) - sleep = min(sleep, max_interval / 1000) + + sleep = _get_sleep_interval( + exception, initial_interval, max_interval, exponent, retries + ) time.sleep(sleep) retries += 1 @@ -211,7 +273,9 @@ async def retry_with_backoff_async( return exception.response raise - sleep = (initial_interval / 1000) * exponent**retries + random.uniform(0, 1) - sleep = min(sleep, max_interval / 1000) + + sleep = _get_sleep_interval( + exception, initial_interval, max_interval, exponent, retries + ) await asyncio.sleep(sleep) retries += 1 diff --git a/packages/mistralai_azure/src/mistralai_azure/utils/serializers.py b/packages/mistralai_azure/src/mistralai_azure/utils/serializers.py index 378a14c0..14321eb4 100644 --- a/packages/mistralai_azure/src/mistralai_azure/utils/serializers.py +++ b/packages/mistralai_azure/src/mistralai_azure/utils/serializers.py @@ -102,26 +102,6 @@ def validate_int(b): return int(b) -def validate_open_enum(is_int: bool): - def validate(e): - if e is None: - return None - - if isinstance(e, Unset): - return e - - if is_int: - if not isinstance(e, int): - raise ValueError("Expected int") - else: - if not isinstance(e, str): - raise ValueError("Expected string") - - return e - - return validate - - def validate_const(v): def validate(c): # Optional[T] is a Union[T, None] diff --git a/packages/mistralai_azure/src/mistralai_azure/utils/unmarshal_json_response.py b/packages/mistralai_azure/src/mistralai_azure/utils/unmarshal_json_response.py index 6eee29b8..f5813119 100644 --- a/packages/mistralai_azure/src/mistralai_azure/utils/unmarshal_json_response.py +++ b/packages/mistralai_azure/src/mistralai_azure/utils/unmarshal_json_response.py @@ -1,12 +1,26 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -from typing import Any, Optional +from typing import Any, Optional, Type, TypeVar, overload import httpx from .serializers import unmarshal_json from mistralai_azure import models +T = TypeVar("T") + + +@overload +def unmarshal_json_response( + typ: Type[T], http_res: httpx.Response, body: Optional[str] = None +) -> T: ... + + +@overload +def unmarshal_json_response( + typ: Any, http_res: httpx.Response, body: Optional[str] = None +) -> Any: ... + def unmarshal_json_response( typ: Any, http_res: httpx.Response, body: Optional[str] = None diff --git a/packages/mistralai_azure/uv.lock b/packages/mistralai_azure/uv.lock index d77ea936..a227d093 100644 --- a/packages/mistralai_azure/uv.lock +++ b/packages/mistralai_azure/uv.lock @@ -154,7 +154,7 @@ wheels = [ [[package]] name = "mistralai-azure" -version = "1.6.0" +version = "1.8.0" source = { editable = "." } dependencies = [ { name = "httpcore" }, @@ -166,6 +166,7 @@ dependencies = [ dev = [ { name = "mypy" }, { name = "pylint" }, + { name = "pyright" }, { name = "pytest" }, { name = "pytest-asyncio" }, ] @@ -181,6 +182,7 @@ requires-dist = [ dev = [ { name = "mypy", specifier = "==1.15.0" }, { name = "pylint", specifier = "==3.2.3" }, + { name = "pyright", specifier = ">=1.1.401,<2" }, { name = "pytest", specifier = ">=8.2.2,<9" }, { name = "pytest-asyncio", specifier = ">=0.23.7,<0.24" }, ] @@ -220,12 +222,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/d2/8b/801aa06445d2de3895f59e476f38f3f8d610ef5d6908245f07d002676cbf/mypy-1.15.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c43a7682e24b4f576d93072216bf56eeff70d9140241f9edec0c104d0c515036", size = 12402541, upload-time = "2025-02-05T03:49:57.623Z" }, { url = "https://files.pythonhosted.org/packages/c7/67/5a4268782eb77344cc613a4cf23540928e41f018a9a1ec4c6882baf20ab8/mypy-1.15.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:baefc32840a9f00babd83251560e0ae1573e2f9d1b067719479bfb0e987c6357", size = 12494348, upload-time = "2025-02-05T03:48:52.361Z" }, { url = "https://files.pythonhosted.org/packages/83/3e/57bb447f7bbbfaabf1712d96f9df142624a386d98fb026a761532526057e/mypy-1.15.0-cp313-cp313-win_amd64.whl", hash = "sha256:b9378e2c00146c44793c98b8d5a61039a048e31f429fb0eb546d93f4b000bedf", size = 9373648, upload-time = "2025-02-05T03:49:11.395Z" }, - { url = "https://files.pythonhosted.org/packages/5a/fa/79cf41a55b682794abe71372151dbbf856e3008f6767057229e6649d294a/mypy-1.15.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:e601a7fa172c2131bff456bb3ee08a88360760d0d2f8cbd7a75a65497e2df078", size = 10737129, upload-time = "2025-02-05T03:50:24.509Z" }, - { url = "https://files.pythonhosted.org/packages/d3/33/dd8feb2597d648de29e3da0a8bf4e1afbda472964d2a4a0052203a6f3594/mypy-1.15.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:712e962a6357634fef20412699a3655c610110e01cdaa6180acec7fc9f8513ba", size = 9856335, upload-time = "2025-02-05T03:49:36.398Z" }, - { url = "https://files.pythonhosted.org/packages/e4/b5/74508959c1b06b96674b364ffeb7ae5802646b32929b7701fc6b18447592/mypy-1.15.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:f95579473af29ab73a10bada2f9722856792a36ec5af5399b653aa28360290a5", size = 11611935, upload-time = "2025-02-05T03:49:14.154Z" }, - { url = "https://files.pythonhosted.org/packages/6c/53/da61b9d9973efcd6507183fdad96606996191657fe79701b2c818714d573/mypy-1.15.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:8f8722560a14cde92fdb1e31597760dc35f9f5524cce17836c0d22841830fd5b", size = 12365827, upload-time = "2025-02-05T03:48:59.458Z" }, - { url = "https://files.pythonhosted.org/packages/c1/72/965bd9ee89540c79a25778cc080c7e6ef40aa1eeac4d52cec7eae6eb5228/mypy-1.15.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:1fbb8da62dc352133d7d7ca90ed2fb0e9d42bb1a32724c287d3c76c58cbaa9c2", size = 12541924, upload-time = "2025-02-05T03:50:03.12Z" }, - { url = "https://files.pythonhosted.org/packages/46/d0/f41645c2eb263e6c77ada7d76f894c580c9ddb20d77f0c24d34273a4dab2/mypy-1.15.0-cp39-cp39-win_amd64.whl", hash = "sha256:d10d994b41fb3497719bbf866f227b3489048ea4bbbb5015357db306249f7980", size = 9271176, upload-time = "2025-02-05T03:50:10.86Z" }, { url = "https://files.pythonhosted.org/packages/09/4e/a7d65c7322c510de2c409ff3828b03354a7c43f5a8ed458a7a131b41c7b9/mypy-1.15.0-py3-none-any.whl", hash = "sha256:5469affef548bd1895d86d3bf10ce2b44e33d86923c29e4d675b3e323437ea3e", size = 2221777, upload-time = "2025-02-05T03:50:08.348Z" }, ] @@ -238,6 +234,15 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/2a/e2/5d3f6ada4297caebe1a2add3b126fe800c96f56dbe5d1988a2cbe0b267aa/mypy_extensions-1.0.0-py3-none-any.whl", hash = "sha256:4392f6c0eb8a5668a69e23d168ffa70f0be9ccfd32b5cc2d26a34ae5b844552d", size = 4695, upload-time = "2023-02-04T12:11:25.002Z" }, ] +[[package]] +name = "nodeenv" +version = "1.10.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/24/bf/d1bda4f6168e0b2e9e5958945e01910052158313224ada5ce1fb2e1113b8/nodeenv-1.10.0.tar.gz", hash = "sha256:996c191ad80897d076bdfba80a41994c2b47c68e224c542b48feba42ba00f8bb", size = 55611, upload-time = "2025-12-20T14:08:54.006Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/88/b2/d0896bdcdc8d28a7fc5717c305f1a861c26e18c05047949fb371034d98bd/nodeenv-1.10.0-py2.py3-none-any.whl", hash = "sha256:5bb13e3eed2923615535339b3c620e76779af4cb4c6a90deccc9e36b274d3827", size = 23438, upload-time = "2025-12-20T14:08:52.782Z" }, +] + [[package]] name = "packaging" version = "24.1" @@ -347,19 +352,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/a4/7d/e09391c2eebeab681df2b74bfe6c43422fffede8dc74187b2b0bf6fd7571/pydantic_core-2.33.2-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:61c18fba8e5e9db3ab908620af374db0ac1baa69f0f32df4f61ae23f15e586ac", size = 1806162, upload-time = "2025-04-23T18:32:20.188Z" }, { url = "https://files.pythonhosted.org/packages/f1/3d/847b6b1fed9f8ed3bb95a9ad04fbd0b212e832d4f0f50ff4d9ee5a9f15cf/pydantic_core-2.33.2-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:95237e53bb015f67b63c91af7518a62a8660376a6a0db19b89acc77a4d6199f5", size = 1981560, upload-time = "2025-04-23T18:32:22.354Z" }, { url = "https://files.pythonhosted.org/packages/6f/9a/e73262f6c6656262b5fdd723ad90f518f579b7bc8622e43a942eec53c938/pydantic_core-2.33.2-cp313-cp313t-win_amd64.whl", hash = "sha256:c2fc0a768ef76c15ab9238afa6da7f69895bb5d1ee83aeea2e3509af4472d0b9", size = 1935777, upload-time = "2025-04-23T18:32:25.088Z" }, - { url = "https://files.pythonhosted.org/packages/53/ea/bbe9095cdd771987d13c82d104a9c8559ae9aec1e29f139e286fd2e9256e/pydantic_core-2.33.2-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:a2b911a5b90e0374d03813674bf0a5fbbb7741570dcd4b4e85a2e48d17def29d", size = 2028677, upload-time = "2025-04-23T18:32:27.227Z" }, - { url = "https://files.pythonhosted.org/packages/49/1d/4ac5ed228078737d457a609013e8f7edc64adc37b91d619ea965758369e5/pydantic_core-2.33.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:6fa6dfc3e4d1f734a34710f391ae822e0a8eb8559a85c6979e14e65ee6ba2954", size = 1864735, upload-time = "2025-04-23T18:32:29.019Z" }, - { url = "https://files.pythonhosted.org/packages/23/9a/2e70d6388d7cda488ae38f57bc2f7b03ee442fbcf0d75d848304ac7e405b/pydantic_core-2.33.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c54c939ee22dc8e2d545da79fc5381f1c020d6d3141d3bd747eab59164dc89fb", size = 1898467, upload-time = "2025-04-23T18:32:31.119Z" }, - { url = "https://files.pythonhosted.org/packages/ff/2e/1568934feb43370c1ffb78a77f0baaa5a8b6897513e7a91051af707ffdc4/pydantic_core-2.33.2-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:53a57d2ed685940a504248187d5685e49eb5eef0f696853647bf37c418c538f7", size = 1983041, upload-time = "2025-04-23T18:32:33.655Z" }, - { url = "https://files.pythonhosted.org/packages/01/1a/1a1118f38ab64eac2f6269eb8c120ab915be30e387bb561e3af904b12499/pydantic_core-2.33.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:09fb9dd6571aacd023fe6aaca316bd01cf60ab27240d7eb39ebd66a3a15293b4", size = 2136503, upload-time = "2025-04-23T18:32:35.519Z" }, - { url = "https://files.pythonhosted.org/packages/5c/da/44754d1d7ae0f22d6d3ce6c6b1486fc07ac2c524ed8f6eca636e2e1ee49b/pydantic_core-2.33.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0e6116757f7959a712db11f3e9c0a99ade00a5bbedae83cb801985aa154f071b", size = 2736079, upload-time = "2025-04-23T18:32:37.659Z" }, - { url = "https://files.pythonhosted.org/packages/4d/98/f43cd89172220ec5aa86654967b22d862146bc4d736b1350b4c41e7c9c03/pydantic_core-2.33.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8d55ab81c57b8ff8548c3e4947f119551253f4e3787a7bbc0b6b3ca47498a9d3", size = 2006508, upload-time = "2025-04-23T18:32:39.637Z" }, - { url = "https://files.pythonhosted.org/packages/2b/cc/f77e8e242171d2158309f830f7d5d07e0531b756106f36bc18712dc439df/pydantic_core-2.33.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:c20c462aa4434b33a2661701b861604913f912254e441ab8d78d30485736115a", size = 2113693, upload-time = "2025-04-23T18:32:41.818Z" }, - { url = "https://files.pythonhosted.org/packages/54/7a/7be6a7bd43e0a47c147ba7fbf124fe8aaf1200bc587da925509641113b2d/pydantic_core-2.33.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:44857c3227d3fb5e753d5fe4a3420d6376fa594b07b621e220cd93703fe21782", size = 2074224, upload-time = "2025-04-23T18:32:44.033Z" }, - { url = "https://files.pythonhosted.org/packages/2a/07/31cf8fadffbb03be1cb520850e00a8490c0927ec456e8293cafda0726184/pydantic_core-2.33.2-cp39-cp39-musllinux_1_1_armv7l.whl", hash = "sha256:eb9b459ca4df0e5c87deb59d37377461a538852765293f9e6ee834f0435a93b9", size = 2245403, upload-time = "2025-04-23T18:32:45.836Z" }, - { url = "https://files.pythonhosted.org/packages/b6/8d/bbaf4c6721b668d44f01861f297eb01c9b35f612f6b8e14173cb204e6240/pydantic_core-2.33.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:9fcd347d2cc5c23b06de6d3b7b8275be558a0c90549495c699e379a80bf8379e", size = 2242331, upload-time = "2025-04-23T18:32:47.618Z" }, - { url = "https://files.pythonhosted.org/packages/bb/93/3cc157026bca8f5006250e74515119fcaa6d6858aceee8f67ab6dc548c16/pydantic_core-2.33.2-cp39-cp39-win32.whl", hash = "sha256:83aa99b1285bc8f038941ddf598501a86f1536789740991d7d8756e34f1e74d9", size = 1910571, upload-time = "2025-04-23T18:32:49.401Z" }, - { url = "https://files.pythonhosted.org/packages/5b/90/7edc3b2a0d9f0dda8806c04e511a67b0b7a41d2187e2003673a996fb4310/pydantic_core-2.33.2-cp39-cp39-win_amd64.whl", hash = "sha256:f481959862f57f29601ccced557cc2e817bce7533ab8e01a797a48b49c9692b3", size = 1956504, upload-time = "2025-04-23T18:32:51.287Z" }, { url = "https://files.pythonhosted.org/packages/30/68/373d55e58b7e83ce371691f6eaa7175e3a24b956c44628eb25d7da007917/pydantic_core-2.33.2-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:5c4aa4e82353f65e548c476b37e64189783aa5384903bfea4f41580f255fddfa", size = 2023982, upload-time = "2025-04-23T18:32:53.14Z" }, { url = "https://files.pythonhosted.org/packages/a4/16/145f54ac08c96a63d8ed6442f9dec17b2773d19920b627b18d4f10a061ea/pydantic_core-2.33.2-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:d946c8bf0d5c24bf4fe333af284c59a19358aa3ec18cb3dc4370080da1e8ad29", size = 1858412, upload-time = "2025-04-23T18:32:55.52Z" }, { url = "https://files.pythonhosted.org/packages/41/b1/c6dc6c3e2de4516c0bb2c46f6a373b91b5660312342a0cf5826e38ad82fa/pydantic_core-2.33.2-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:87b31b6846e361ef83fedb187bb5b4372d0da3f7e28d85415efa92d6125d6e6d", size = 1892749, upload-time = "2025-04-23T18:32:57.546Z" }, @@ -378,15 +370,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/b8/e9/1f7efbe20d0b2b10f6718944b5d8ece9152390904f29a78e68d4e7961159/pydantic_core-2.33.2-pp311-pypy311_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:de4b83bb311557e439b9e186f733f6c645b9417c84e2eb8203f3f820a4b988bf", size = 2239013, upload-time = "2025-04-23T18:33:26.621Z" }, { url = "https://files.pythonhosted.org/packages/3c/b2/5309c905a93811524a49b4e031e9851a6b00ff0fb668794472ea7746b448/pydantic_core-2.33.2-pp311-pypy311_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:82f68293f055f51b51ea42fafc74b6aad03e70e191799430b90c13d643059ebb", size = 2238715, upload-time = "2025-04-23T18:33:28.656Z" }, { url = "https://files.pythonhosted.org/packages/32/56/8a7ca5d2cd2cda1d245d34b1c9a942920a718082ae8e54e5f3e5a58b7add/pydantic_core-2.33.2-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:329467cecfb529c925cf2bbd4d60d2c509bc2fb52a20c1045bf09bb70971a9c1", size = 2066757, upload-time = "2025-04-23T18:33:30.645Z" }, - { url = "https://files.pythonhosted.org/packages/08/98/dbf3fdfabaf81cda5622154fda78ea9965ac467e3239078e0dcd6df159e7/pydantic_core-2.33.2-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:87acbfcf8e90ca885206e98359d7dca4bcbb35abdc0ff66672a293e1d7a19101", size = 2024034, upload-time = "2025-04-23T18:33:32.843Z" }, - { url = "https://files.pythonhosted.org/packages/8d/99/7810aa9256e7f2ccd492590f86b79d370df1e9292f1f80b000b6a75bd2fb/pydantic_core-2.33.2-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:7f92c15cd1e97d4b12acd1cc9004fa092578acfa57b67ad5e43a197175d01a64", size = 1858578, upload-time = "2025-04-23T18:33:34.912Z" }, - { url = "https://files.pythonhosted.org/packages/d8/60/bc06fa9027c7006cc6dd21e48dbf39076dc39d9abbaf718a1604973a9670/pydantic_core-2.33.2-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d3f26877a748dc4251cfcfda9dfb5f13fcb034f5308388066bcfe9031b63ae7d", size = 1892858, upload-time = "2025-04-23T18:33:36.933Z" }, - { url = "https://files.pythonhosted.org/packages/f2/40/9d03997d9518816c68b4dfccb88969756b9146031b61cd37f781c74c9b6a/pydantic_core-2.33.2-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dac89aea9af8cd672fa7b510e7b8c33b0bba9a43186680550ccf23020f32d535", size = 2068498, upload-time = "2025-04-23T18:33:38.997Z" }, - { url = "https://files.pythonhosted.org/packages/d8/62/d490198d05d2d86672dc269f52579cad7261ced64c2df213d5c16e0aecb1/pydantic_core-2.33.2-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:970919794d126ba8645f3837ab6046fb4e72bbc057b3709144066204c19a455d", size = 2108428, upload-time = "2025-04-23T18:33:41.18Z" }, - { url = "https://files.pythonhosted.org/packages/9a/ec/4cd215534fd10b8549015f12ea650a1a973da20ce46430b68fc3185573e8/pydantic_core-2.33.2-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:3eb3fe62804e8f859c49ed20a8451342de53ed764150cb14ca71357c765dc2a6", size = 2069854, upload-time = "2025-04-23T18:33:43.446Z" }, - { url = "https://files.pythonhosted.org/packages/1a/1a/abbd63d47e1d9b0d632fee6bb15785d0889c8a6e0a6c3b5a8e28ac1ec5d2/pydantic_core-2.33.2-pp39-pypy39_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:3abcd9392a36025e3bd55f9bd38d908bd17962cc49bc6da8e7e96285336e2bca", size = 2237859, upload-time = "2025-04-23T18:33:45.56Z" }, - { url = "https://files.pythonhosted.org/packages/80/1c/fa883643429908b1c90598fd2642af8839efd1d835b65af1f75fba4d94fe/pydantic_core-2.33.2-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:3a1c81334778f9e3af2f8aeb7a960736e5cab1dfebfb26aabca09afd2906c039", size = 2239059, upload-time = "2025-04-23T18:33:47.735Z" }, - { url = "https://files.pythonhosted.org/packages/d4/29/3cade8a924a61f60ccfa10842f75eb12787e1440e2b8660ceffeb26685e7/pydantic_core-2.33.2-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:2807668ba86cb38c6817ad9bc66215ab8584d1d304030ce4f0887336f28a5e27", size = 2066661, upload-time = "2025-04-23T18:33:49.995Z" }, ] [[package]] @@ -408,6 +391,19 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/50/d3/d346f779cbc9384d8b805a7557b5f2b8ee9f842bffebec9fc6364d6ae183/pylint-3.2.3-py3-none-any.whl", hash = "sha256:b3d7d2708a3e04b4679e02d99e72329a8b7ee8afb8d04110682278781f889fa8", size = 519244, upload-time = "2024-06-06T14:19:13.228Z" }, ] +[[package]] +name = "pyright" +version = "1.1.408" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "nodeenv" }, + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/74/b2/5db700e52554b8f025faa9c3c624c59f1f6c8841ba81ab97641b54322f16/pyright-1.1.408.tar.gz", hash = "sha256:f28f2321f96852fa50b5829ea492f6adb0e6954568d1caa3f3af3a5f555eb684", size = 4400578, upload-time = "2026-01-08T08:07:38.795Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/0c/82/a2c93e32800940d9573fb28c346772a14778b84ba7524e691b324620ab89/pyright-1.1.408-py3-none-any.whl", hash = "sha256:090b32865f4fdb1e0e6cd82bf5618480d48eecd2eb2e70f960982a3d9a4c17c1", size = 6399144, upload-time = "2026-01-08T08:07:37.082Z" }, +] + [[package]] name = "pytest" version = "8.3.2" diff --git a/packages/mistralai_gcp/.gitignore b/packages/mistralai_gcp/.gitignore index f2ea8c39..b386de74 100644 --- a/packages/mistralai_gcp/.gitignore +++ b/packages/mistralai_gcp/.gitignore @@ -1,3 +1,5 @@ +.env +.env.local **/__pycache__/ **/.speakeasy/temp/ **/.speakeasy/logs/ diff --git a/packages/mistralai_gcp/.speakeasy/gen.lock b/packages/mistralai_gcp/.speakeasy/gen.lock index 07deb7d7..0bf3209f 100644 --- a/packages/mistralai_gcp/.speakeasy/gen.lock +++ b/packages/mistralai_gcp/.speakeasy/gen.lock @@ -1,42 +1,669 @@ lockVersion: 2.0.0 id: ec60f2d8-7869-45c1-918e-773d41a8cf74 management: - docChecksum: 05fc6f45406deac180ffc1df760c67f4 + docChecksum: a7d9a161ca71328c62514af87c72bd88 docVersion: 1.0.0 - speakeasyVersion: 1.606.10 - generationVersion: 2.687.13 - releaseVersion: 1.7.0 - configChecksum: 265058aeeb734c5018a1be4c40ea6e39 + speakeasyVersion: 1.685.0 + generationVersion: 2.794.1 + releaseVersion: 1.8.0 + configChecksum: 42a1e5752a774fcdb0a5949bd6535933 repoURL: https://github.com/mistralai/client-python.git repoSubDirectory: packages/mistralai_gcp installationURL: https://github.com/mistralai/client-python.git#subdirectory=packages/mistralai_gcp published: true +persistentEdits: + generation_id: 749d4ba0-3c79-459a-a407-b84537a057da + pristine_commit_hash: ae909165077818f36014ef4d28edaa3572c8cc64 + pristine_tree_hash: f04041c3f961a8702dfa1eaa1185b1b605875f82 features: python: additionalDependencies: 1.0.0 additionalProperties: 1.0.1 constsAndDefaults: 1.0.5 - core: 5.20.1 + core: 5.23.18 defaultEnabledRetries: 0.2.0 enumUnions: 0.1.0 envVarSecurityUsage: 0.3.2 examples: 3.0.2 flatRequests: 1.0.1 - globalSecurity: 3.0.3 + globalSecurity: 3.0.4 globalSecurityCallbacks: 1.0.0 globalSecurityFlattening: 1.0.0 - globalServerURLs: 3.1.1 + globalServerURLs: 3.2.0 + includes: 3.0.0 methodArguments: 1.0.2 nameOverrides: 3.0.1 nullables: 1.0.1 - openEnums: 1.0.1 + openEnums: 1.0.2 responseFormat: 1.0.1 - retries: 3.0.2 - sdkHooks: 1.1.0 - serverEvents: 1.0.8 + retries: 3.0.3 + sdkHooks: 1.2.0 + serverEvents: 1.0.11 serverEventsSentinels: 0.1.0 serverIDs: 3.0.0 - unions: 3.0.4 + unions: 3.1.1 +trackedFiles: + .gitattributes: + id: 24139dae6567 + last_write_checksum: sha1:53134de3ada576f37c22276901e1b5b6d85cd2da + pristine_git_object: 4d75d59008e4d8609876d263419a9dc56c8d6f3a + .vscode/settings.json: + id: 89aa447020cd + last_write_checksum: sha1:f84632c81029fcdda8c3b0c768d02b836fc80526 + pristine_git_object: 8d79f0abb72526f1fb34a4c03e5bba612c6ba2ae + docs/models/arguments.md: + id: 7ea5e33709a7 + last_write_checksum: sha1:09eea126210d7fd0353e60a76bf1dbed173f13ec + pristine_git_object: 2e54e27e0ca97bee87918b2ae38cc6c335669a79 + docs/models/assistantmessage.md: + id: 7e0218023943 + last_write_checksum: sha1:e75d407349842b2de46ee3ca6250f9f51121cf38 + pristine_git_object: 3d0bd90b4433c1a919f917f4bcf2518927cdcd50 + docs/models/assistantmessagecontent.md: + id: 9f1795bbe642 + last_write_checksum: sha1:1ce4066623a8d62d969e5ed3a088d73a9ba26643 + pristine_git_object: 047b7cf95f4db203bf2c501680b73ca0562a122d + docs/models/assistantmessagerole.md: + id: bb5d2a4bc72f + last_write_checksum: sha1:82f2c4f469426bd476c1003a91394afb89cb7c91 + pristine_git_object: 658229e77eb6419391cf7941568164541c528387 + docs/models/chatcompletionchoice.md: + id: 0d15c59ab501 + last_write_checksum: sha1:a6274a39a4239e054816d08517bf8507cb5c4564 + pristine_git_object: deaa0ea073e1b6c21bd466c10db31db2464066f1 + docs/models/chatcompletionchoicefinishreason.md: + id: 225764da91d3 + last_write_checksum: sha1:b894d3408cb801e072c3c302a5676ff939d59284 + pristine_git_object: b2f15ecbe88328de95b4961ddb3940fd8a6ee64b + docs/models/chatcompletionrequest.md: + id: adffe90369d0 + last_write_checksum: sha1:1ac7d6b5a8aba9c922cf5fe45f94aee55228f9db + pristine_git_object: 6886f9dcd43e8d61f4ec6692235f281cb03a5c86 + docs/models/chatcompletionrequestmessages.md: + id: ec996b350e12 + last_write_checksum: sha1:2ecec8d12cdb48426f4eb62732066fc79fcd4ec3 + pristine_git_object: bc7708a67f06d74e8a5bf1facb2b23fb1e08053c + docs/models/chatcompletionrequeststop.md: + id: fcaf5bbea451 + last_write_checksum: sha1:71a25f84f0d88c7acf72e801ced6159546201851 + pristine_git_object: 749296d420c0671d2a1d6d22483b51f577a86485 + docs/models/chatcompletionrequesttoolchoice.md: + id: b97041b2f15b + last_write_checksum: sha1:7ad7eb133f70e07d0d6a9def36aadd08b35cf861 + pristine_git_object: dc82a8ef91e7bfd44f1d2d9d9a4ef61b6e76cc34 + docs/models/chatcompletionresponse.md: + id: 7c53b24681b9 + last_write_checksum: sha1:a56581c0846638cfe6df26d3045fb4f874ccd931 + pristine_git_object: a0465ffbfc5558628953e03fbc53b80bbdc8649b + docs/models/chatcompletionstreamrequest.md: + id: cf8f29558a68 + last_write_checksum: sha1:33778fdf71aa9b934ae48d51664daaa0dd817e04 + pristine_git_object: ff1940dd8a92d7892d895c3fc0e0a4b321e55534 + docs/models/chatcompletionstreamrequesttoolchoice.md: + id: 210d5e5b1413 + last_write_checksum: sha1:0543164caf3f4fb2bef3061dbd1a5e6b34b17ae9 + pristine_git_object: 43f3ca3809bf1a2a040e2ad7c19a2b22db0b73f8 + docs/models/completionchunk.md: + id: 60cb30423c60 + last_write_checksum: sha1:61b976fe2e71236cf7941ee1635decc31bd304b2 + pristine_git_object: 7f8ab5e631e2c6d1d9830325e591a7e434b83a35 + docs/models/completionevent.md: + id: e57cd17cb9dc + last_write_checksum: sha1:4f59c67af0b11c77b80d2b9c7aca36484d2be219 + pristine_git_object: 7a66e8fee2bb0f1c58166177653893bb05b98f1d + docs/models/completionresponsestreamchoice.md: + id: d56824d615a6 + last_write_checksum: sha1:dcf4b125b533192cb5aea1a68551866954712dc5 + pristine_git_object: c807dacd98eb3561ee45f40db71a92cb72b0f6de + docs/models/content.md: + id: bfd859c99f86 + last_write_checksum: sha1:6673dbd19871a701955a322348a4f7e51c38ffc8 + pristine_git_object: a833dc2c6043e36b85131c9243b4cc02b9fcc4c6 + docs/models/contentchunk.md: + id: d2d3a32080cd + last_write_checksum: sha1:5839a26cdc412b78caad7fb59df97bdcea57be6d + pristine_git_object: 22023e8b19692df969693b7a14f8cf6e0143859f + docs/models/deltamessage.md: + id: 6c5ed6b60968 + last_write_checksum: sha1:c213149256c620715d744c89685d5b6cbdea6f58 + pristine_git_object: 61deabbf7e37388fdd4c1789089d120cc0b937b9 + docs/models/fimcompletionrequest.md: + id: b44677ecc293 + last_write_checksum: sha1:24bcb54d39b3fabd487549a27b4c0a65dd5ffe50 + pristine_git_object: fde0b625c29340e8dce1eb3026ce644b1885e53a + docs/models/fimcompletionrequeststop.md: + id: ea5475297a83 + last_write_checksum: sha1:a6cdb4bda01ac58016a71f35da48a5d10df11623 + pristine_git_object: a0dbb00a82a03acc8b62b81d7597722a6ca46118 + docs/models/fimcompletionresponse.md: + id: 050d62ba2fac + last_write_checksum: sha1:a6101a69e83b7a5bcf96ec77ba1cab8748f734f4 + pristine_git_object: cd62d0349503fd8b13582d0ba47ab9cff40f6b28 + docs/models/fimcompletionstreamrequest.md: + id: c881d7e27637 + last_write_checksum: sha1:f8755bc554dd44568c42eb5b6dde04db464647ab + pristine_git_object: ba62d854f030390418597cbd8febae0e1ce27ea8 + docs/models/fimcompletionstreamrequeststop.md: + id: c97a11b764e9 + last_write_checksum: sha1:958d5087050fdeb128745884ebcf565b4fdc3886 + pristine_git_object: 5a9e2ff020d4939f7fd42c0673ea7bdd16cca99d + docs/models/finishreason.md: + id: 73315c2a39b3 + last_write_checksum: sha1:5b58c7fa9219f728b9731287e21abe1be9f11e4a + pristine_git_object: 45a5aedb7241cf080df3eb976a4413064d314009 + docs/models/function.md: + id: 416a80fba031 + last_write_checksum: sha1:a9485076d430a7753558461ce87bf42d09e34511 + pristine_git_object: b2bdb3fe82520ea79d0cf1a10ee41c844f90b859 + docs/models/functioncall.md: + id: a78cd1d7f605 + last_write_checksum: sha1:65bf78744b8531cdefb6a288f1af5cbf9d9e2395 + pristine_git_object: 7ccd90dca4868db9b6e178712f95d375210013c8 + docs/models/functionname.md: + id: 4b3bd62c0f26 + last_write_checksum: sha1:754fe32bdffe53c1057b302702f5516f4e551cfb + pristine_git_object: 87d7b4852de629015166605b273deb9341202dc0 + docs/models/httpvalidationerror.md: + id: a211c095f2ac + last_write_checksum: sha1:277a46811144643262651853dc6176d21b33573e + pristine_git_object: 712a148c3e2305dca4c702851865f9f8c8e674cc + docs/models/imageurl.md: + id: e75dd23cec1d + last_write_checksum: sha1:30131c77dd240c3bae48d9693698358e5cc0ae63 + pristine_git_object: 7c2bcbc36e99c3cf467d213d6a6a59d6300433d8 + docs/models/imageurlchunk.md: + id: 4407097bfff3 + last_write_checksum: sha1:7a478fd638234ece78770c7fc5e8d0adaf1c3727 + pristine_git_object: f1b926ef8e82443aa1446b1c64c2f02e33d7c789 + docs/models/imageurlchunkimageurl.md: + id: c7fae88454ce + last_write_checksum: sha1:5eff71b7a8be7baacb9ba8ca0be0a0f7a391a325 + pristine_git_object: 767389082d25f06e617fec2ef0134dd9fb2d4064 + docs/models/imageurlchunktype.md: + id: b9af2db9ff60 + last_write_checksum: sha1:990546f94648a09faf9d3ae55d7f6ee66de13e85 + pristine_git_object: 2064a0b405870313bd4b802a3b1988418ce8439e + docs/models/jsonschema.md: + id: a6b15ed6fac8 + last_write_checksum: sha1:523465666ad3c292252b3fe60f345c7ffb29053f + pristine_git_object: 7ff7c070353c58290416aff5b01d1dfc43905269 + docs/models/loc.md: + id: b071d5a509cc + last_write_checksum: sha1:09a04749333ab50ae806c3ac6adcaa90d54df0f1 + pristine_git_object: d6094ac2c6e0326c039dad2f6b89158694ef6aa7 + docs/models/messages.md: + id: 2103cd675c2f + last_write_checksum: sha1:f6940c9c67b98c49ae2bc2764f6c14178321f244 + pristine_git_object: 1d394500e8ffdd140457575568fc2ce465a1cc3a + docs/models/mistralpromptmode.md: + id: d17d5db4d3b6 + last_write_checksum: sha1:5ccd31d3804f70b6abb0e5a00bda57b9102225e3 + pristine_git_object: 7416e2037c507d19ac02aed914da1208a2fed0a1 + docs/models/prediction.md: + id: 3c70b2262201 + last_write_checksum: sha1:ca8a77219e6113f2358a5363e935288d90df0725 + pristine_git_object: fae3c1ca4ba2c2ddb3b7de401ecdc8d56dcc7740 + docs/models/referencechunk.md: + id: 07895f9debfd + last_write_checksum: sha1:97d01dd2b907e87b58bebd9c950e1bef29747c89 + pristine_git_object: a132ca2fe6fbbaca644491cbc36d88b0c67cc6bc + docs/models/referencechunktype.md: + id: 0944b80ea9c8 + last_write_checksum: sha1:956b270766c7f11fe99f4a9b484cc29c159e7471 + pristine_git_object: 1e0e2fe64883ef5f3e628777b261b1224661d257 + docs/models/responseformat.md: + id: 50a1e4140614 + last_write_checksum: sha1:e877b2e81470ef5eec5675dfb91a47e74d5d3add + pristine_git_object: 5cab22f2bf1c412699f6a7ed18ef801ecbc3ee4b + docs/models/responseformats.md: + id: cf1f250b82db + last_write_checksum: sha1:105e1f9181913104b554051838cbdd0f728aa2c4 + pristine_git_object: 2f5f1e5511b048323fee18a0ffdd506fe2b3d56f + docs/models/role.md: + id: b694540a5b1e + last_write_checksum: sha1:260a50c56a8bd03cc535edf98ebec06437f87f8d + pristine_git_object: affca78d5574cc42d8e6169f21968e5a8765e053 + docs/models/security.md: + id: 452e4d4eb67a + last_write_checksum: sha1:ce2871b49c1632d50e22d0b1ebe4999021d52313 + pristine_git_object: c698674c513f5b20c04f629e50154e67977275f7 + docs/models/stop.md: + id: f231cc9f5041 + last_write_checksum: sha1:86903cac5f57ad9b8ac07ecba6c454d40a53bdc8 + pristine_git_object: ba40ca83136d6d6cb4f1ef9e5ca3104a704e4846 + docs/models/systemmessage.md: + id: fdb7963e1cdf + last_write_checksum: sha1:97e726dff19a39b468767d5c01fc6256277ee71f + pristine_git_object: 0dba71c00f40c85e74b2c1967e077ffff9660f13 + docs/models/systemmessagecontent.md: + id: 94a56febaeda + last_write_checksum: sha1:6cb10b4b860b4204df57a29c650c85c826395aeb + pristine_git_object: 0c87baf3c2fade64a2738a9a4b3ce19647e5dc9a + docs/models/systemmessagecontentchunks.md: + id: cea1c19e9d7a + last_write_checksum: sha1:986aec0f8098158515bbccd0c22e0b3d4151bb32 + pristine_git_object: 40030c170746d9953d25b979ab7e6f522018e230 + docs/models/textchunk.md: + id: 6cd12e0ef110 + last_write_checksum: sha1:6d41d1991d122805734ed0d90ee01592aa5ae6ff + pristine_git_object: 6daab3c381bd8c13d2935bf62578648a8470fc76 + docs/models/thinkchunk.md: + id: bca24d7153f6 + last_write_checksum: sha1:feb95a931bb9cdbfe28ab351618687e513cf830b + pristine_git_object: 66b2e0cde70e25e2927180d2e709503401fddeab + docs/models/thinkchunktype.md: + id: 0fbeed985341 + last_write_checksum: sha1:790f991f95c86c26a6abb9c9c5debda8b53526f5 + pristine_git_object: baf6f755252d027295be082b53ecf80555039414 + docs/models/thinking.md: + id: 07234f8dd364 + last_write_checksum: sha1:a5962d1615b57996730da19e59fbfaa684321442 + pristine_git_object: c7a0d5c9811ea37aaf9e16b6e93c833ab979573f + docs/models/tool.md: + id: 8966139dbeed + last_write_checksum: sha1:1725bf53fc9f1ca3f332322d91de24c9d58adc6a + pristine_git_object: fb661f72887271d5bb470e4edf025a32b00ade17 + docs/models/toolcall.md: + id: 80892ea1a051 + last_write_checksum: sha1:cb27b9d36cfe6227978c7a7a01b1349b6bac99d9 + pristine_git_object: 3819236b9f3eee2f6878818cfbbe2817e97f7de2 + docs/models/toolchoice.md: + id: "097076343426" + last_write_checksum: sha1:25b33b34da02c3b46349dc8b6223f9ae18370d16 + pristine_git_object: 373046bbbc834169293b4f4ae8b2e238f952ddde + docs/models/toolchoiceenum.md: + id: 15410de51ffc + last_write_checksum: sha1:ca0cf9bf128bebc8faedd9333cc6a56b30f58130 + pristine_git_object: 0be3d6c54b13a8bf30773398a2c12e0d30d3ae58 + docs/models/toolmessage.md: + id: 0553747c37a1 + last_write_checksum: sha1:3ac87031fdd4ba8b0996e95be8e7ef1a7ff41167 + pristine_git_object: a54f49332c2873471759b477fb4c712fa4fb61f5 + docs/models/toolmessagecontent.md: + id: f0522d2d3c93 + last_write_checksum: sha1:783769c0200baa1b6751327aa3e009fa83da72ee + pristine_git_object: 5c76091fbd2c8e0d768921fab19c7b761df73411 + docs/models/toolmessagerole.md: + id: f333d4d1ab56 + last_write_checksum: sha1:7e1c004bad24e928da0c286a9f053516b172d24f + pristine_git_object: c24e59c0c79ea886d266e38c673edd51531b9be6 + docs/models/tooltypes.md: + id: adb50fe63ea2 + last_write_checksum: sha1:f224c3d8732450b9c969b3e04027b7df7892694c + pristine_git_object: 84e49253c9b9bd1bd314e2a126106404cbb52f16 + docs/models/type.md: + id: 98c32f09b2c8 + last_write_checksum: sha1:8aa9ca999e9648ddc2240bf80780684e3e858ddf + pristine_git_object: eb0581e7174b6951d69c485a64af5244cb8687fa + docs/models/usageinfo.md: + id: ec6fe65028a9 + last_write_checksum: sha1:cf71fb9676d870eba7c4d10a69636e1db4054adc + pristine_git_object: f5204ac94a4d6191839031c66c5a9bc0124a1f35 + docs/models/usermessage.md: + id: ed66d7a0f80b + last_write_checksum: sha1:8291f7703e49ed669775dc953ea8cab6715dc7ed + pristine_git_object: 63b0131091cd211b3b1477c1d63b5666a26db546 + docs/models/usermessagecontent.md: + id: 52c072c851e8 + last_write_checksum: sha1:1de02bcf7082768ebe1bb912fdbebbec5a577b5a + pristine_git_object: 8350f9e8f8996c136093e38760990f62fd01f8cf + docs/models/usermessagerole.md: + id: 99ffa937c462 + last_write_checksum: sha1:52014480516828b43827aa966b7319d9074f1111 + pristine_git_object: 171124e45988e784c56a6b92a0057ba00efc0db4 + docs/models/utils/retryconfig.md: + id: 4343ac43161c + last_write_checksum: sha1:562c0f21e308ad10c27f85f75704c15592c6929d + pristine_git_object: 69dd549ec7f5f885101d08dd502e25748183aebf + docs/models/validationerror.md: + id: 304bdf06ef8b + last_write_checksum: sha1:1889f608099577e6a116c14b211a6811d6b22786 + pristine_git_object: 7a1654a1a5cfb3ab92360b361e8e962bf2db4582 + py.typed: + id: 258c3ed47ae4 + last_write_checksum: sha1:8efc425ffe830805ffcc0f3055871bdcdc542c60 + pristine_git_object: 3e38f1a929f7d6b1d6de74604aa87e3d8f010544 + pylintrc: + id: 7ce8b9f946e6 + last_write_checksum: sha1:6b615d49741eb9ae16375d3a499767783d1128a1 + pristine_git_object: a8fcb932ba2a01c5e96e3b04c59371e930b75558 + scripts/prepare_readme.py: + id: e0c5957a6035 + last_write_checksum: sha1:81c7dbabc0e726a4a150e6ef1bcba578d3f1153d + pristine_git_object: 6c4b993238c1a60d4df4bb7de0a0b0a82e385dbf + scripts/publish.sh: + id: fe273b08f514 + last_write_checksum: sha1:b290b25b36dca3d5eb1a2e66a2e1bcf2e7326cf3 + pristine_git_object: c35748f360329c2bc370e9b189f49b1a360b2c48 + src/mistralai_gcp/__init__.py: + id: b6565f49e73b + last_write_checksum: sha1:36306d1d404b6aeb912d27f1d9c52f098ff7bf9b + pristine_git_object: dd02e42e4cc509dc90e6ae70493054021faa5f9c + src/mistralai_gcp/_hooks/__init__.py: + id: 663f3129700b + last_write_checksum: sha1:e3111289afd28ad557c21d9e2f918caabfb7037d + pristine_git_object: 2ee66cdd592fe41731c24ddd407c8ca31c50aec1 + src/mistralai_gcp/_hooks/sdkhooks.py: + id: 46ab7e644296 + last_write_checksum: sha1:a2c018871bea47706a76b03d9a17dab717c339c0 + pristine_git_object: b81c2a2739d316cfed54daec48df6375155eb802 + src/mistralai_gcp/_hooks/types.py: + id: 1f77198243ec + last_write_checksum: sha1:fbf5f1fb258b75133c6b12ae688c03c18b2debd5 + pristine_git_object: f8088f4c26d3ae27202c716c80c391d4daed4156 + src/mistralai_gcp/_version.py: + id: 4e2b8c406f49 + last_write_checksum: sha1:18c7db41065d76e733dc84c997f2a7808892a7c7 + pristine_git_object: a170f0ab6d229979b9077477809b10f2774a0144 + src/mistralai_gcp/basesdk.py: + id: b84fa6111b69 + last_write_checksum: sha1:41381dd799bd5e1f8a15bb65a0672dc6cc4796c4 + pristine_git_object: 7a93de23ad83096b2392e05b11f29030b5961456 + src/mistralai_gcp/chat.py: + id: 1cc7d54332ba + last_write_checksum: sha1:b4b4db3cfeac555718e2d74e897c6ba22b07a672 + pristine_git_object: 3dd6040fc7a565ffc4854bcc25e1e24a6683896d + src/mistralai_gcp/fim.py: + id: 1e5bec08157c + last_write_checksum: sha1:2c27170c5932893d4e8bec8ce45b2dc6e0957cd6 + pristine_git_object: 36d9fd60baaad606d9d57a30afdd9566b06b4caa + src/mistralai_gcp/httpclient.py: + id: 7de4ac861042 + last_write_checksum: sha1:5e55338d6ee9f01ab648cad4380201a8a3da7dd7 + pristine_git_object: 89560b566073785535643e694c112bedbd3db13d + src/mistralai_gcp/models/__init__.py: + id: 9a7b2a1f0dba + last_write_checksum: sha1:54654df1aecc8d4f634ebd4dbcb0fed16da80309 + pristine_git_object: fe85b133a3a7652cfcfd3b44074be3729c8a9b7b + src/mistralai_gcp/models/assistantmessage.py: + id: 0779dd85c548 + last_write_checksum: sha1:ccf5d6a93bf007d47f0415320afb047278e10172 + pristine_git_object: 17d740b6eeb433b2865a652d1dd760227ad38191 + src/mistralai_gcp/models/chatcompletionchoice.py: + id: b5843c853153 + last_write_checksum: sha1:d389ddcfb64980b6c56a42d53bce7c63e26cc569 + pristine_git_object: fe3ee952a207f772ec49972cbd30f83654c84ad9 + src/mistralai_gcp/models/chatcompletionrequest.py: + id: 42d6cdf4646f + last_write_checksum: sha1:9685d594f13e8500e9c7fbab1e0d4042fccfc23d + pristine_git_object: 80345f9d956f64396f48850641842b2a3a6f8bee + src/mistralai_gcp/models/chatcompletionresponse.py: + id: 14720f23411e + last_write_checksum: sha1:46f14c3e00d21e9f01756f111d353768ad939494 + pristine_git_object: a7953eb156cc8185d70f92df8a75a2ebb77840b9 + src/mistralai_gcp/models/chatcompletionstreamrequest.py: + id: 2e17680adc7e + last_write_checksum: sha1:37c2daaad5c557234b5f067152280440f4c96933 + pristine_git_object: e857d51522dc9964cde865d7f681bd856a3cbdaf + src/mistralai_gcp/models/completionchunk.py: + id: 7fa670acf4b8 + last_write_checksum: sha1:0d0fdb8efda7f0b6a8ff376b7da94cac8060e4e2 + pristine_git_object: ca002f52239f69b96dd967b5e91cb4ed544e51d0 + src/mistralai_gcp/models/completionevent.py: + id: c25e6676e263 + last_write_checksum: sha1:528f13beedc9befc6fb71d4f9f2a2d4ff5e91817 + pristine_git_object: 33278c119c62205b8d9b09297066dc61c2a86cd1 + src/mistralai_gcp/models/completionresponsestreamchoice.py: + id: 46946832a23e + last_write_checksum: sha1:bc42569eb80dc034a1bde9170c35e6bc4ff52bb8 + pristine_git_object: ec9df52818fabf6bef33094bc7d25398066df3af + src/mistralai_gcp/models/contentchunk.py: + id: 96dd7160dff7 + last_write_checksum: sha1:484722b90615ca7af20993c570de79fe990a50f2 + pristine_git_object: da5671e348d363927af77188da6af07240398826 + src/mistralai_gcp/models/deltamessage.py: + id: db6c3c4d3384 + last_write_checksum: sha1:e596610fa0dd100203cd7e515750782bfbdb0445 + pristine_git_object: 1801ac76522df2efc362712d46262aeba95abc87 + src/mistralai_gcp/models/fimcompletionrequest.py: + id: ed8593c435af + last_write_checksum: sha1:6561263425e385568189ffc61e6b00034136adc3 + pristine_git_object: bcc97c90d4d327c83666423317dae2dc90db3b82 + src/mistralai_gcp/models/fimcompletionresponse.py: + id: 5f85a7cdb5fd + last_write_checksum: sha1:3ac2057157c7d1cb1bfc81fca2915ba72546f968 + pristine_git_object: e1940b0a2290fc3f9afcbd9e945397b1b90660ec + src/mistralai_gcp/models/fimcompletionstreamrequest.py: + id: f17c4f8fa580 + last_write_checksum: sha1:ebbe89e576d498070fde6b195d5afa2dc8bd5eac + pristine_git_object: 34d2ba65682b971f675f427cdf3aa6539071ce3a + src/mistralai_gcp/models/function.py: + id: 4612d6f83b9a + last_write_checksum: sha1:7692ea8102475e4d82d83722a8aea1efde668774 + pristine_git_object: 7ad1ae645f99ab13c022c92e7733ff4b15d39cac + src/mistralai_gcp/models/functioncall.py: + id: a3ca765a9368 + last_write_checksum: sha1:e044de5b26b15d46dce8ad8bd0d13bdf3d24ef7d + pristine_git_object: 99554c8862922184a05074bf01f71fbe20ac8fea + src/mistralai_gcp/models/functionname.py: + id: f97eb2c1bae3 + last_write_checksum: sha1:6343e5b4f724db6088c2055b058a9ebdd9bda995 + pristine_git_object: 00ec22f5ca6ff2d68d5cce2a020846a672ab0a1b + src/mistralai_gcp/models/httpvalidationerror.py: + id: f1ac6b7c81f3 + last_write_checksum: sha1:8e98e27a5440e2e1dbe330d1c889d43919d90b51 + pristine_git_object: 79609351e675148ef074988bb6ea8a11b81087dc + src/mistralai_gcp/models/imageurl.py: + id: 1668e9d55730 + last_write_checksum: sha1:2b8eaac00c956beb87434f8d5a21dff12611c788 + pristine_git_object: 20d4ba7719a6c04d2c7864459a68cca808e1a3f2 + src/mistralai_gcp/models/imageurlchunk.py: + id: ebc4dfed0347 + last_write_checksum: sha1:5c625584449139a410138c9986323d1f86b52735 + pristine_git_object: ddb53f21a13aeed7884e213e92752de1870d9fb5 + src/mistralai_gcp/models/jsonschema.py: + id: 4c32e4fa593e + last_write_checksum: sha1:3c972f731f2bd92262ea04a65771c093254d3a5f + pristine_git_object: 26914b2f8562da07e2d54d68a5806bedd32ec16a + src/mistralai_gcp/models/mistralgcperror.py: + id: 690cf29f596b + last_write_checksum: sha1:0ec55c68e3daccf2aba3c52f0a7c77ad5102f4c9 + pristine_git_object: fec729a590b2ea981e01f4af99d8b36ba52b4664 + src/mistralai_gcp/models/mistralpromptmode.py: + id: d2ba58ed5184 + last_write_checksum: sha1:8518548e80dcd8798ee72c2557c473327ba9289b + pristine_git_object: 1440f6ea9d18139ce5f10eb38d951b0995f74a20 + src/mistralai_gcp/models/no_response_error.py: + id: 7a773ba0687f + last_write_checksum: sha1:7f326424a7d5ae1bcd5c89a0d6b3dbda9138942f + pristine_git_object: 1deab64bc43e1e65bf3c412d326a4032ce342366 + src/mistralai_gcp/models/prediction.py: + id: cd3b43190e22 + last_write_checksum: sha1:a0411a8e3b1ecb42b91405dd9ee2a2ee5f3fad59 + pristine_git_object: 36c87ab046ed9f1a28a371fbdc5c7d584d71b6d3 + src/mistralai_gcp/models/referencechunk.py: + id: ee00a52fb6dd + last_write_checksum: sha1:d0c05b6b1e7d085833d4a9ef85f1e0088c86d3a5 + pristine_git_object: 904e8b8250570371e2b59895196986a45e6d3562 + src/mistralai_gcp/models/responseformat.py: + id: ad17dac36a51 + last_write_checksum: sha1:296d4b52f934c48490b71d85e1e9d0e207cee21a + pristine_git_object: 9fe5116ca46d713f5f23c92ec1de8a73c5124408 + src/mistralai_gcp/models/responseformats.py: + id: deb9c36c5ec5 + last_write_checksum: sha1:a212e85d286b5b49219f57d071a2232ff8b5263b + pristine_git_object: cbf83ce7b54ff8634f741334831807bfb5c98991 + src/mistralai_gcp/models/responsevalidationerror.py: + id: 78e210042d35 + last_write_checksum: sha1:b8ba70238453017393e721c7d61b5f1e268d7c17 + pristine_git_object: ebd4f214747d451dc2733d6ea838c67bb0c84797 + src/mistralai_gcp/models/sdkerror.py: + id: beed68eccaa1 + last_write_checksum: sha1:a058f2519ec22f72457e800600be469f13ff9915 + pristine_git_object: 7f53bbcd548d15f4fdd529bd3caea5249eb5e8e6 + src/mistralai_gcp/models/security.py: + id: 32f877bd1399 + last_write_checksum: sha1:7bad1150440143f9f6faefe33911edf6c2afdec1 + pristine_git_object: 3857494264c0444d330c54570483710a5ed321f0 + src/mistralai_gcp/models/systemmessage.py: + id: 13826cd6cb74 + last_write_checksum: sha1:876e84816c4e27ad77d6313777ba841ea3086cf9 + pristine_git_object: d74bdf3255bac53335eea08a6010cf1cc19380dd + src/mistralai_gcp/models/systemmessagecontentchunks.py: + id: 8233735d37db + last_write_checksum: sha1:38fedfdb83824054a1734bcc7d39e7e040bf4792 + pristine_git_object: e0b5bbc30828cbf572e603efc86ee2695102ea31 + src/mistralai_gcp/models/textchunk.py: + id: a330626b2006 + last_write_checksum: sha1:b801cf9b1913a70841c8fbdc9d433f0380ea82d8 + pristine_git_object: c4a8cf28cd2281cfda40cefa70ce1bd64d3e750d + src/mistralai_gcp/models/thinkchunk.py: + id: c38f6a213cc1 + last_write_checksum: sha1:a072f3bf01c2dc90ef6cc1b188b2e00e15923e07 + pristine_git_object: b88c0cb54c6926b3c896b3c192c5f3c51c676a51 + src/mistralai_gcp/models/tool.py: + id: 86b94d6a3bcb + last_write_checksum: sha1:14a081eb8639d834a7c4f209a79e7d1270202205 + pristine_git_object: 800de633962a0ccddab52596eae542318bb491b0 + src/mistralai_gcp/models/toolcall.py: + id: 3047e78c2ac3 + last_write_checksum: sha1:d219f8d7de19f501b799caf0e232bdce95e2c891 + pristine_git_object: 23ef157aa1d24498805a489a8cebf3c0e257d919 + src/mistralai_gcp/models/toolchoice.py: + id: 1f3d5233426e + last_write_checksum: sha1:936b1ac7b44bc1bf357e6a66cc42ed0127ad015e + pristine_git_object: 4a1483305f606afcc704e8d51ae363468354849e + src/mistralai_gcp/models/toolchoiceenum.py: + id: b4431b9cf3fd + last_write_checksum: sha1:3dbba9a58c5569aafe115f3f7713a52b01ad8620 + pristine_git_object: 01f6f677b379f9e3c99db9d1ad248cb0033a2804 + src/mistralai_gcp/models/toolmessage.py: + id: e21a2326eb31 + last_write_checksum: sha1:c332f72e272fff7970f52e2b15223a2898ce9b15 + pristine_git_object: d6aa2621b83bde261fab7bd15f58273861f88738 + src/mistralai_gcp/models/tooltypes.py: + id: b4c1716d51b3 + last_write_checksum: sha1:0f8fe0c437736eb584cce298a5e72c4e25f7c42b + pristine_git_object: 8b812ae0cfee81a1cd8ab0180e65f57d19a0dcbd + src/mistralai_gcp/models/usageinfo.py: + id: 574d1999c265 + last_write_checksum: sha1:a0a88fe5b3cae9317781b99cb3cc1916a9ba17cc + pristine_git_object: 59f36158761c3a86900256a6ed73845c455417c7 + src/mistralai_gcp/models/usermessage.py: + id: cf3691ffafa6 + last_write_checksum: sha1:d0ed86a67403d65ed6ac7a31aa5f73e19ecfa670 + pristine_git_object: 0168b45235bc891888c095565af832535dd26139 + src/mistralai_gcp/models/validationerror.py: + id: f1a6468621bd + last_write_checksum: sha1:a4cc5969f12e00be3506edc90ec21a01d5415eff + pristine_git_object: 033d4b63d1c321ae2c49e8684b34817adddca4c2 + src/mistralai_gcp/py.typed: + id: 7f25f97fed44 + last_write_checksum: sha1:8efc425ffe830805ffcc0f3055871bdcdc542c60 + pristine_git_object: 3e38f1a929f7d6b1d6de74604aa87e3d8f010544 + src/mistralai_gcp/sdkconfiguration.py: + id: 84fd7d3e219a + last_write_checksum: sha1:df51450c87f807c849e2aefb0a154aa4426fd8e3 + pristine_git_object: cf85c47e5e33956a64ddea53d85cdb7cc4bb687e + src/mistralai_gcp/types/__init__.py: + id: 15a92fdbd0a1 + last_write_checksum: sha1:140ebdd01a46f92ffc710c52c958c4eba3cf68ed + pristine_git_object: fc76fe0c5505e29859b5d2bb707d48fd27661b8c + src/mistralai_gcp/types/basemodel.py: + id: 0dd6dc277359 + last_write_checksum: sha1:10d84aedeb9d35edfdadf2c3020caa1d24d8b584 + pristine_git_object: a9a640a1a7048736383f96c67c6290c86bf536ee + src/mistralai_gcp/utils/__init__.py: + id: bb44726e5fa4 + last_write_checksum: sha1:887f56a717845fab7445cc368d2a17d850c3565a + pristine_git_object: 05f26ade57efb8c54a774fbcb939fb1a7dc655ce + src/mistralai_gcp/utils/annotations.py: + id: aeecca0c40a3 + last_write_checksum: sha1:a4824ad65f730303e4e1e3ec1febf87b4eb46dbc + pristine_git_object: 12e0aa4f1151bb52474cc02e88397329b90703f6 + src/mistralai_gcp/utils/datetimes.py: + id: e3e3bb6cb264 + last_write_checksum: sha1:c721e4123000e7dc61ec52b28a739439d9e17341 + pristine_git_object: a6c52cd61bbe2d459046c940ce5e8c469f2f0664 + src/mistralai_gcp/utils/enums.py: + id: 9f020fc8d361 + last_write_checksum: sha1:bc8c3c1285ae09ba8a094ee5c3d9c7f41fa1284d + pristine_git_object: 3324e1bc2668c54c4d5f5a1a845675319757a828 + src/mistralai_gcp/utils/eventstreaming.py: + id: d570df9074cf + last_write_checksum: sha1:bababae5d54b7efc360db701daa49e18a92c2f3b + pristine_git_object: 0969899bfc491e5e408d05643525f347ea95e4fc + src/mistralai_gcp/utils/forms.py: + id: fe642748c385 + last_write_checksum: sha1:15fa7e9ab1611e062a9984cf06cb20969713d295 + pristine_git_object: f961e76beaf0a8b1fe0dda44754a74eebd3608e7 + src/mistralai_gcp/utils/headers.py: + id: 0cb933d098ed + last_write_checksum: sha1:7c6df233ee006332b566a8afa9ce9a245941d935 + pristine_git_object: 37864cbbbc40d1a47112bbfdd3ba79568fc8818a + src/mistralai_gcp/utils/logger.py: + id: 2992f9bda9c7 + last_write_checksum: sha1:f3fdb154a3f09b8cc43d74c7e9c02f899f8086e4 + pristine_git_object: b661aff65d38b77d035149699aea09b2785d2fc6 + src/mistralai_gcp/utils/metadata.py: + id: af274ae68c93 + last_write_checksum: sha1:c6a560bd0c63ab158582f34dadb69433ea73b3d4 + pristine_git_object: 173b3e5ce658675c2f504222a56b3daaaa68107d + src/mistralai_gcp/utils/queryparams.py: + id: b20aa8da5982 + last_write_checksum: sha1:b94c3f314fd3da0d1d215afc2731f48748e2aa59 + pristine_git_object: c04e0db82b68eca041f2cb2614d748fbac80fd41 + src/mistralai_gcp/utils/requestbodies.py: + id: 1a2ddaa8f5a2 + last_write_checksum: sha1:41e2d2d2d3ecc394c8122ca4d4b85e1c3e03f054 + pristine_git_object: 1de32b6d26f46590232f398fdba6ce0072f1659c + src/mistralai_gcp/utils/retries.py: + id: 8caeba1fe4ab + last_write_checksum: sha1:5b97ac4f59357d70c2529975d50364c88bcad607 + pristine_git_object: 88a91b10cd2076b4a2c6cff2ac6bfaa5e3c5ad13 + src/mistralai_gcp/utils/security.py: + id: fa4f52aaad5d + last_write_checksum: sha1:a17130ace2c0db6394f38dd941ad2b700cc755c8 + pristine_git_object: 295a3f40031dbb40073ad227fd4a355660f97ab2 + src/mistralai_gcp/utils/serializers.py: + id: 920ccb5c87f2 + last_write_checksum: sha1:ce1d8d7f500a9ccba0aeca5057cee9c271f4dfd7 + pristine_git_object: 14321eb479de81d0d9580ec8291e0ff91bf29e57 + src/mistralai_gcp/utils/unmarshal_json_response.py: + id: 65d5fa644cf8 + last_write_checksum: sha1:877dd4bb58700039a481fdf7d7216d2d9a0b3e92 + pristine_git_object: c168a293f7018fc3b83cac0d8f723475e5f05631 + src/mistralai_gcp/utils/url.py: + id: 116eb5a78ca7 + last_write_checksum: sha1:6479961baa90432ca25626f8e40a7bbc32e73b41 + pristine_git_object: c78ccbae426ce6d385709d97ce0b1c2813ea2418 + src/mistralai_gcp/utils/values.py: + id: 9cc9ee47c951 + last_write_checksum: sha1:acaa178a7c41ddd000f58cc691e4632d925b2553 + pristine_git_object: dae01a44384ac3bc13ae07453a053bf6c898ebe3 +examples: + stream_chat: + speakeasy-default-stream-chat: + requestBody: + application/json: {"model": "mistral-large-latest", "stream": true, "messages": [{"content": "Who is the best French painter? Answer in one short sentence.", "role": "user"}], "response_format": {"type": "text"}} + responses: + "422": + application/json: {} + chat_completion_v1_chat_completions_post: + speakeasy-default-chat-completion-v1-chat-completions-post: + requestBody: + application/json: {"model": "mistral-large-latest", "stream": false, "messages": [{"content": "Who is the best French painter? Answer in one short sentence.", "role": "user"}], "response_format": {"type": "text"}} + responses: + "200": + application/json: {"id": "cmpl-e5cc70bb28c444948073e77776eb30ef", "object": "chat.completion", "model": "mistral-small-latest", "usage": {"prompt_tokens": 0, "completion_tokens": 0, "total_tokens": 0}, "created": 1702256327, "choices": []} + "422": + application/json: {} + stream_fim: + speakeasy-default-stream-fim: + requestBody: + application/json: {"model": "codestral-latest", "top_p": 1, "stream": true, "prompt": "def", "suffix": "return a+b"} + responses: + "422": + application/json: {} + fim_completion_v1_fim_completions_post: + userExample: + requestBody: + application/json: {"model": "codestral-latest", "top_p": 1, "stream": false, "prompt": "def", "suffix": "return a+b"} + responses: + "200": + application/json: {"id": "447e3e0d457e42e98248b5d2ef52a2a3", "object": "chat.completion", "model": "codestral-2508", "usage": {"prompt_tokens": 8, "completion_tokens": 91, "total_tokens": 99}, "created": 1759496862, "choices": [{"index": 0, "message": {"content": "add_numbers(a: int, b: int) -> int:\n \"\"\"\n You are given two integers `a` and `b`. Your task is to write a function that\n returns the sum of these two integers. The function should be implemented in a\n way that it can handle very large integers (up to 10^18). As a reminder, your\n code has to be in python\n \"\"\"\n", "tool_calls": null, "prefix": false, "role": "assistant"}, "finish_reason": "stop"}]} +examplesVersion: 1.0.2 +generatedTests: {} generatedFiles: - .gitattributes - .vscode/settings.json @@ -185,37 +812,3 @@ generatedFiles: - src/mistralai_gcp/utils/unmarshal_json_response.py - src/mistralai_gcp/utils/url.py - src/mistralai_gcp/utils/values.py -examples: - stream_chat: - speakeasy-default-stream-chat: - requestBody: - application/json: {"model": "mistral-large-latest", "stream": true, "messages": [{"content": "Who is the best French painter? Answer in one short sentence.", "role": "user"}], "response_format": {"type": "text"}} - responses: - "422": - application/json: {} - chat_completion_v1_chat_completions_post: - speakeasy-default-chat-completion-v1-chat-completions-post: - requestBody: - application/json: {"model": "mistral-large-latest", "stream": false, "messages": [{"content": "Who is the best French painter? Answer in one short sentence.", "role": "user"}], "response_format": {"type": "text"}} - responses: - "200": - application/json: {"id": "cmpl-e5cc70bb28c444948073e77776eb30ef", "object": "chat.completion", "model": "mistral-small-latest", "usage": {"prompt_tokens": 0, "completion_tokens": 0, "total_tokens": 0}, "created": 1702256327, "choices": []} - "422": - application/json: {} - stream_fim: - speakeasy-default-stream-fim: - requestBody: - application/json: {"model": "codestral-latest", "top_p": 1, "stream": true, "prompt": "def", "suffix": "return a+b"} - responses: - "422": - application/json: {} - fim_completion_v1_fim_completions_post: - userExample: - requestBody: - application/json: {"model": "codestral-latest", "top_p": 1, "stream": false, "prompt": "def", "suffix": "return a+b"} - responses: - "200": - application/json: {"id": "447e3e0d457e42e98248b5d2ef52a2a3", "object": "chat.completion", "model": "codestral-2508", "usage": {"prompt_tokens": 8, "completion_tokens": 91, "total_tokens": 99}, "created": 1759496862, "choices": [{"index": 0, "message": {"content": "add_numbers(a: int, b: int) -> int:\n \"\"\"\n You are given two integers `a` and `b`. Your task is to write a function that\n returns the sum of these two integers. The function should be implemented in a\n way that it can handle very large integers (up to 10^18). As a reminder, your\n code has to be in python\n \"\"\"\n", "tool_calls": null, "prefix": false, "role": "assistant"}, "finish_reason": "stop"}]} -examplesVersion: 1.0.2 -generatedTests: {} -releaseNotes: "## SDK Changes Detected:\n* `mistral_gcp.chat.complete()`: \n * `request` **Changed** **Breaking** :warning:\n * `response` **Changed**\n* `mistral_gcp.fim.complete()`: `response` **Changed**\n" diff --git a/packages/mistralai_gcp/.speakeasy/gen.yaml b/packages/mistralai_gcp/.speakeasy/gen.yaml index 3df5bb18..2aacaa62 100644 --- a/packages/mistralai_gcp/.speakeasy/gen.yaml +++ b/packages/mistralai_gcp/.speakeasy/gen.yaml @@ -16,12 +16,17 @@ generation: auth: oAuth2ClientCredentialsEnabled: true oAuth2PasswordEnabled: false + hoistGlobalSecurity: true + schemas: + allOfMergeStrategy: shallowMerge + requestBodyFieldName: "" + persistentEdits: {} tests: generateTests: true generateNewTests: false skipResponseBodyAssertions: false python: - version: 1.7.0 + version: 1.8.0 additionalDependencies: dev: pytest: ^8.2.2 @@ -32,10 +37,12 @@ python: allowedRedefinedBuiltins: - id - object + asyncMode: both authors: - Mistral baseErrorName: MistralGcpError clientServerStatusCodesAsErrors: true + constFieldCasing: upper defaultErrorName: SDKError description: Python Client SDK for the Mistral AI API in GCP. enableCustomCodeRegions: false @@ -53,14 +60,19 @@ python: operations: "" shared: "" webhooks: "" + inferUnionDiscriminators: true inputModelSuffix: input + license: "" maxMethodParams: 15 methodArguments: infer-optional-args moduleName: "" + multipartArrayFormat: legacy outputModelSuffix: output packageManager: uv packageName: mistralai-gcp + preApplyUnionDiscriminators: false pytestFilterWarnings: [] pytestTimeout: 0 responseFormat: flat + sseFlatResponse: false templateVersion: v2 diff --git a/packages/mistralai_gcp/docs/models/chatcompletionrequest.md b/packages/mistralai_gcp/docs/models/chatcompletionrequest.md index 48103e30..6886f9dc 100644 --- a/packages/mistralai_gcp/docs/models/chatcompletionrequest.md +++ b/packages/mistralai_gcp/docs/models/chatcompletionrequest.md @@ -12,6 +12,7 @@ | `stream` | *Optional[bool]* | :heavy_minus_sign: | Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. | | | `stop` | [Optional[models.ChatCompletionRequestStop]](../models/chatcompletionrequeststop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | | `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | +| `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | | | `messages` | List[[models.ChatCompletionRequestMessages](../models/chatcompletionrequestmessages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | | `response_format` | [Optional[models.ResponseFormat]](../models/responseformat.md) | :heavy_minus_sign: | Specify the format that the model must output. By default it will use `{ "type": "text" }`. Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ "type": "json_schema" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide. | {
"type": "text"
} | | `tools` | List[[models.Tool](../models/tool.md)] | :heavy_minus_sign: | A list of tools the model may call. Use this to provide a list of functions the model may generate JSON inputs for. | | diff --git a/packages/mistralai_gcp/docs/models/chatcompletionstreamrequest.md b/packages/mistralai_gcp/docs/models/chatcompletionstreamrequest.md index aaacc09c..ff1940dd 100644 --- a/packages/mistralai_gcp/docs/models/chatcompletionstreamrequest.md +++ b/packages/mistralai_gcp/docs/models/chatcompletionstreamrequest.md @@ -12,6 +12,7 @@ | `stream` | *Optional[bool]* | :heavy_minus_sign: | N/A | | | `stop` | [Optional[models.Stop]](../models/stop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | | `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | +| `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | | | `messages` | List[[models.Messages](../models/messages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | | `response_format` | [Optional[models.ResponseFormat]](../models/responseformat.md) | :heavy_minus_sign: | Specify the format that the model must output. By default it will use `{ "type": "text" }`. Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ "type": "json_schema" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide. | {
"type": "text"
} | | `tools` | List[[models.Tool](../models/tool.md)] | :heavy_minus_sign: | A list of tools the model may call. Use this to provide a list of functions the model may generate JSON inputs for. | | diff --git a/packages/mistralai_gcp/docs/models/fimcompletionrequest.md b/packages/mistralai_gcp/docs/models/fimcompletionrequest.md index 380f109c..fde0b625 100644 --- a/packages/mistralai_gcp/docs/models/fimcompletionrequest.md +++ b/packages/mistralai_gcp/docs/models/fimcompletionrequest.md @@ -12,6 +12,7 @@ | `stream` | *Optional[bool]* | :heavy_minus_sign: | Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. | | | `stop` | [Optional[models.FIMCompletionRequestStop]](../models/fimcompletionrequeststop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | | `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | +| `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | | | `prompt` | *str* | :heavy_check_mark: | The text/code to complete. | def | | `suffix` | *OptionalNullable[str]* | :heavy_minus_sign: | Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`. | return a+b | | `min_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The minimum number of tokens to generate in the completion. | | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/fimcompletionstreamrequest.md b/packages/mistralai_gcp/docs/models/fimcompletionstreamrequest.md index a890ff2b..ba62d854 100644 --- a/packages/mistralai_gcp/docs/models/fimcompletionstreamrequest.md +++ b/packages/mistralai_gcp/docs/models/fimcompletionstreamrequest.md @@ -12,6 +12,7 @@ | `stream` | *Optional[bool]* | :heavy_minus_sign: | N/A | | | `stop` | [Optional[models.FIMCompletionStreamRequestStop]](../models/fimcompletionstreamrequeststop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | | `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | +| `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | | | `prompt` | *str* | :heavy_check_mark: | The text/code to complete. | def | | `suffix` | *OptionalNullable[str]* | :heavy_minus_sign: | Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`. | return a+b | | `min_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The minimum number of tokens to generate in the completion. | | \ No newline at end of file diff --git a/packages/mistralai_gcp/pyproject.toml b/packages/mistralai_gcp/pyproject.toml index 9293079a..df3e43ae 100644 --- a/packages/mistralai_gcp/pyproject.toml +++ b/packages/mistralai_gcp/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "mistralai-gcp" -version = "1.6.0" +version = "1.8.0" description = "Python Client SDK for the Mistral AI API in GCP." authors = [{ name = "Mistral" }] requires-python = ">=3.10" @@ -62,6 +62,10 @@ ignore_missing_imports = true module = "jsonpath" ignore_missing_imports = true +[[tool.mypy.overrides]] +module = "google" +ignore_missing_imports = true + [tool.pyright] venvPath = "." venv = ".venv" diff --git a/packages/mistralai_gcp/src/mistralai_gcp/_version.py b/packages/mistralai_gcp/src/mistralai_gcp/_version.py index acd8086c..a170f0ab 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/_version.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/_version.py @@ -3,10 +3,10 @@ import importlib.metadata __title__: str = "mistralai-gcp" -__version__: str = "1.7.0" +__version__: str = "1.8.0" __openapi_doc_version__: str = "1.0.0" -__gen_version__: str = "2.687.13" -__user_agent__: str = "speakeasy-sdk/python 1.7.0 2.687.13 1.0.0 mistralai-gcp" +__gen_version__: str = "2.794.1" +__user_agent__: str = "speakeasy-sdk/python 1.8.0 2.794.1 1.0.0 mistralai-gcp" try: if __package__ is not None: diff --git a/packages/mistralai_gcp/src/mistralai_gcp/basesdk.py b/packages/mistralai_gcp/src/mistralai_gcp/basesdk.py index f22e2346..7a93de23 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/basesdk.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/basesdk.py @@ -60,6 +60,7 @@ def _build_request_async( ] = None, url_override: Optional[str] = None, http_headers: Optional[Mapping[str, str]] = None, + allow_empty_value: Optional[List[str]] = None, ) -> httpx.Request: client = self.sdk_configuration.async_client return self._build_request_with_client( @@ -80,6 +81,7 @@ def _build_request_async( get_serialized_body, url_override, http_headers, + allow_empty_value, ) def _build_request( @@ -102,6 +104,7 @@ def _build_request( ] = None, url_override: Optional[str] = None, http_headers: Optional[Mapping[str, str]] = None, + allow_empty_value: Optional[List[str]] = None, ) -> httpx.Request: client = self.sdk_configuration.client return self._build_request_with_client( @@ -122,6 +125,7 @@ def _build_request( get_serialized_body, url_override, http_headers, + allow_empty_value, ) def _build_request_with_client( @@ -145,6 +149,7 @@ def _build_request_with_client( ] = None, url_override: Optional[str] = None, http_headers: Optional[Mapping[str, str]] = None, + allow_empty_value: Optional[List[str]] = None, ) -> httpx.Request: query_params = {} @@ -160,6 +165,7 @@ def _build_request_with_client( query_params = utils.get_query_params( request if request_has_query_params else None, _globals if request_has_query_params else None, + allow_empty_value, ) else: # Pick up the query parameter from the override so they can be diff --git a/packages/mistralai_gcp/src/mistralai_gcp/chat.py b/packages/mistralai_gcp/src/mistralai_gcp/chat.py index 57b94eaf..3dd6040f 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/chat.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/chat.py @@ -6,7 +6,7 @@ from mistralai_gcp.types import OptionalNullable, UNSET from mistralai_gcp.utils import eventstreaming from mistralai_gcp.utils.unmarshal_json_response import unmarshal_json_response -from typing import Any, List, Mapping, Optional, Union +from typing import Any, Dict, List, Mapping, Optional, Union class Chat(BaseSDK): @@ -23,6 +23,7 @@ def stream( stream: Optional[bool] = True, stop: Optional[Union[models.Stop, models.StopTypedDict]] = None, random_seed: OptionalNullable[int] = UNSET, + metadata: OptionalNullable[Dict[str, Any]] = UNSET, response_format: Optional[ Union[models.ResponseFormat, models.ResponseFormatTypedDict] ] = None, @@ -47,7 +48,7 @@ def stream( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> Optional[eventstreaming.EventStream[models.CompletionEvent]]: + ) -> eventstreaming.EventStream[models.CompletionEvent]: r"""Stream chat completion Mistral AI provides the ability to stream responses back to a client in order to allow partial results for certain requests. Tokens will be sent as data-only server-sent events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. @@ -60,6 +61,7 @@ def stream( :param stream: :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. + :param metadata: :param response_format: Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide. :param tools: A list of tools the model may call. Use this to provide a list of functions the model may generate JSON inputs for. :param tool_choice: Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool. @@ -92,6 +94,7 @@ def stream( stream=stream, stop=stop, random_seed=random_seed, + metadata=metadata, messages=utils.get_pydantic_model(messages, List[models.Messages]), response_format=utils.get_pydantic_model( response_format, Optional[models.ResponseFormat] @@ -126,6 +129,7 @@ def stream( get_serialized_body=lambda: utils.serialize_request_body( request, False, False, "json", models.ChatCompletionStreamRequest ), + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -142,7 +146,7 @@ def stream( config=self.sdk_configuration, base_url=base_url or "", operation_id="stream_chat", - oauth2_scopes=[], + oauth2_scopes=None, security_source=self.sdk_configuration.security, ), request=req, @@ -186,6 +190,7 @@ async def stream_async( stream: Optional[bool] = True, stop: Optional[Union[models.Stop, models.StopTypedDict]] = None, random_seed: OptionalNullable[int] = UNSET, + metadata: OptionalNullable[Dict[str, Any]] = UNSET, response_format: Optional[ Union[models.ResponseFormat, models.ResponseFormatTypedDict] ] = None, @@ -210,7 +215,7 @@ async def stream_async( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> Optional[eventstreaming.EventStreamAsync[models.CompletionEvent]]: + ) -> eventstreaming.EventStreamAsync[models.CompletionEvent]: r"""Stream chat completion Mistral AI provides the ability to stream responses back to a client in order to allow partial results for certain requests. Tokens will be sent as data-only server-sent events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. @@ -223,6 +228,7 @@ async def stream_async( :param stream: :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. + :param metadata: :param response_format: Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide. :param tools: A list of tools the model may call. Use this to provide a list of functions the model may generate JSON inputs for. :param tool_choice: Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool. @@ -255,6 +261,7 @@ async def stream_async( stream=stream, stop=stop, random_seed=random_seed, + metadata=metadata, messages=utils.get_pydantic_model(messages, List[models.Messages]), response_format=utils.get_pydantic_model( response_format, Optional[models.ResponseFormat] @@ -289,6 +296,7 @@ async def stream_async( get_serialized_body=lambda: utils.serialize_request_body( request, False, False, "json", models.ChatCompletionStreamRequest ), + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -305,7 +313,7 @@ async def stream_async( config=self.sdk_configuration, base_url=base_url or "", operation_id="stream_chat", - oauth2_scopes=[], + oauth2_scopes=None, security_source=self.sdk_configuration.security, ), request=req, @@ -357,6 +365,7 @@ def complete( ] ] = None, random_seed: OptionalNullable[int] = UNSET, + metadata: OptionalNullable[Dict[str, Any]] = UNSET, response_format: Optional[ Union[models.ResponseFormat, models.ResponseFormatTypedDict] ] = None, @@ -381,7 +390,7 @@ def complete( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> Optional[models.ChatCompletionResponse]: + ) -> models.ChatCompletionResponse: r"""Chat Completion :param model: ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions. @@ -392,6 +401,7 @@ def complete( :param stream: Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. + :param metadata: :param response_format: Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide. :param tools: A list of tools the model may call. Use this to provide a list of functions the model may generate JSON inputs for. :param tool_choice: Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool. @@ -424,6 +434,7 @@ def complete( stream=stream, stop=stop, random_seed=random_seed, + metadata=metadata, messages=utils.get_pydantic_model( messages, List[models.ChatCompletionRequestMessages] ), @@ -460,6 +471,7 @@ def complete( get_serialized_body=lambda: utils.serialize_request_body( request, False, False, "json", models.ChatCompletionRequest ), + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -476,7 +488,7 @@ def complete( config=self.sdk_configuration, base_url=base_url or "", operation_id="chat_completion_v1_chat_completions_post", - oauth2_scopes=[], + oauth2_scopes=None, security_source=self.sdk_configuration.security, ), request=req, @@ -486,9 +498,7 @@ def complete( response_data: Any = None if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response( - Optional[models.ChatCompletionResponse], http_res - ) + return unmarshal_json_response(models.ChatCompletionResponse, http_res) if utils.match_response(http_res, "422", "application/json"): response_data = unmarshal_json_response( models.HTTPValidationErrorData, http_res @@ -522,6 +532,7 @@ async def complete_async( ] ] = None, random_seed: OptionalNullable[int] = UNSET, + metadata: OptionalNullable[Dict[str, Any]] = UNSET, response_format: Optional[ Union[models.ResponseFormat, models.ResponseFormatTypedDict] ] = None, @@ -546,7 +557,7 @@ async def complete_async( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> Optional[models.ChatCompletionResponse]: + ) -> models.ChatCompletionResponse: r"""Chat Completion :param model: ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions. @@ -557,6 +568,7 @@ async def complete_async( :param stream: Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. + :param metadata: :param response_format: Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide. :param tools: A list of tools the model may call. Use this to provide a list of functions the model may generate JSON inputs for. :param tool_choice: Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool. @@ -589,6 +601,7 @@ async def complete_async( stream=stream, stop=stop, random_seed=random_seed, + metadata=metadata, messages=utils.get_pydantic_model( messages, List[models.ChatCompletionRequestMessages] ), @@ -625,6 +638,7 @@ async def complete_async( get_serialized_body=lambda: utils.serialize_request_body( request, False, False, "json", models.ChatCompletionRequest ), + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -641,7 +655,7 @@ async def complete_async( config=self.sdk_configuration, base_url=base_url or "", operation_id="chat_completion_v1_chat_completions_post", - oauth2_scopes=[], + oauth2_scopes=None, security_source=self.sdk_configuration.security, ), request=req, @@ -651,9 +665,7 @@ async def complete_async( response_data: Any = None if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response( - Optional[models.ChatCompletionResponse], http_res - ) + return unmarshal_json_response(models.ChatCompletionResponse, http_res) if utils.match_response(http_res, "422", "application/json"): response_data = unmarshal_json_response( models.HTTPValidationErrorData, http_res diff --git a/packages/mistralai_gcp/src/mistralai_gcp/fim.py b/packages/mistralai_gcp/src/mistralai_gcp/fim.py index 5909bf69..36d9fd60 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/fim.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/fim.py @@ -6,7 +6,7 @@ from mistralai_gcp.types import OptionalNullable, UNSET from mistralai_gcp.utils import eventstreaming from mistralai_gcp.utils.unmarshal_json_response import unmarshal_json_response -from typing import Any, Mapping, Optional, Union +from typing import Any, Dict, Mapping, Optional, Union class Fim(BaseSDK): @@ -28,13 +28,14 @@ def stream( ] ] = None, random_seed: OptionalNullable[int] = UNSET, + metadata: OptionalNullable[Dict[str, Any]] = UNSET, suffix: OptionalNullable[str] = UNSET, min_tokens: OptionalNullable[int] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> Optional[eventstreaming.EventStream[models.CompletionEvent]]: + ) -> eventstreaming.EventStream[models.CompletionEvent]: r"""Stream fim completion Mistral AI provides the ability to stream responses back to a client in order to allow partial results for certain requests. Tokens will be sent as data-only server-sent events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. @@ -47,6 +48,7 @@ def stream( :param stream: :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. + :param metadata: :param suffix: Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`. :param min_tokens: The minimum number of tokens to generate in the completion. :param retries: Override the default retry configuration for this method @@ -72,6 +74,7 @@ def stream( stream=stream, stop=stop, random_seed=random_seed, + metadata=metadata, prompt=prompt, suffix=suffix, min_tokens=min_tokens, @@ -93,6 +96,7 @@ def stream( get_serialized_body=lambda: utils.serialize_request_body( request, False, False, "json", models.FIMCompletionStreamRequest ), + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -109,7 +113,7 @@ def stream( config=self.sdk_configuration, base_url=base_url or "", operation_id="stream_fim", - oauth2_scopes=[], + oauth2_scopes=None, security_source=self.sdk_configuration.security, ), request=req, @@ -158,13 +162,14 @@ async def stream_async( ] ] = None, random_seed: OptionalNullable[int] = UNSET, + metadata: OptionalNullable[Dict[str, Any]] = UNSET, suffix: OptionalNullable[str] = UNSET, min_tokens: OptionalNullable[int] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> Optional[eventstreaming.EventStreamAsync[models.CompletionEvent]]: + ) -> eventstreaming.EventStreamAsync[models.CompletionEvent]: r"""Stream fim completion Mistral AI provides the ability to stream responses back to a client in order to allow partial results for certain requests. Tokens will be sent as data-only server-sent events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. @@ -177,6 +182,7 @@ async def stream_async( :param stream: :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. + :param metadata: :param suffix: Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`. :param min_tokens: The minimum number of tokens to generate in the completion. :param retries: Override the default retry configuration for this method @@ -202,6 +208,7 @@ async def stream_async( stream=stream, stop=stop, random_seed=random_seed, + metadata=metadata, prompt=prompt, suffix=suffix, min_tokens=min_tokens, @@ -223,6 +230,7 @@ async def stream_async( get_serialized_body=lambda: utils.serialize_request_body( request, False, False, "json", models.FIMCompletionStreamRequest ), + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -239,7 +247,7 @@ async def stream_async( config=self.sdk_configuration, base_url=base_url or "", operation_id="stream_fim", - oauth2_scopes=[], + oauth2_scopes=None, security_source=self.sdk_configuration.security, ), request=req, @@ -288,13 +296,14 @@ def complete( ] ] = None, random_seed: OptionalNullable[int] = UNSET, + metadata: OptionalNullable[Dict[str, Any]] = UNSET, suffix: OptionalNullable[str] = UNSET, min_tokens: OptionalNullable[int] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> Optional[models.FIMCompletionResponse]: + ) -> models.FIMCompletionResponse: r"""Fim Completion FIM completion. @@ -307,6 +316,7 @@ def complete( :param stream: Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. + :param metadata: :param suffix: Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`. :param min_tokens: The minimum number of tokens to generate in the completion. :param retries: Override the default retry configuration for this method @@ -332,6 +342,7 @@ def complete( stream=stream, stop=stop, random_seed=random_seed, + metadata=metadata, prompt=prompt, suffix=suffix, min_tokens=min_tokens, @@ -353,6 +364,7 @@ def complete( get_serialized_body=lambda: utils.serialize_request_body( request, False, False, "json", models.FIMCompletionRequest ), + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -369,7 +381,7 @@ def complete( config=self.sdk_configuration, base_url=base_url or "", operation_id="fim_completion_v1_fim_completions_post", - oauth2_scopes=[], + oauth2_scopes=None, security_source=self.sdk_configuration.security, ), request=req, @@ -379,9 +391,7 @@ def complete( response_data: Any = None if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response( - Optional[models.FIMCompletionResponse], http_res - ) + return unmarshal_json_response(models.FIMCompletionResponse, http_res) if utils.match_response(http_res, "422", "application/json"): response_data = unmarshal_json_response( models.HTTPValidationErrorData, http_res @@ -412,13 +422,14 @@ async def complete_async( ] ] = None, random_seed: OptionalNullable[int] = UNSET, + metadata: OptionalNullable[Dict[str, Any]] = UNSET, suffix: OptionalNullable[str] = UNSET, min_tokens: OptionalNullable[int] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> Optional[models.FIMCompletionResponse]: + ) -> models.FIMCompletionResponse: r"""Fim Completion FIM completion. @@ -431,6 +442,7 @@ async def complete_async( :param stream: Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. + :param metadata: :param suffix: Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`. :param min_tokens: The minimum number of tokens to generate in the completion. :param retries: Override the default retry configuration for this method @@ -456,6 +468,7 @@ async def complete_async( stream=stream, stop=stop, random_seed=random_seed, + metadata=metadata, prompt=prompt, suffix=suffix, min_tokens=min_tokens, @@ -477,6 +490,7 @@ async def complete_async( get_serialized_body=lambda: utils.serialize_request_body( request, False, False, "json", models.FIMCompletionRequest ), + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -493,7 +507,7 @@ async def complete_async( config=self.sdk_configuration, base_url=base_url or "", operation_id="fim_completion_v1_fim_completions_post", - oauth2_scopes=[], + oauth2_scopes=None, security_source=self.sdk_configuration.security, ), request=req, @@ -503,9 +517,7 @@ async def complete_async( response_data: Any = None if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response( - Optional[models.FIMCompletionResponse], http_res - ) + return unmarshal_json_response(models.FIMCompletionResponse, http_res) if utils.match_response(http_res, "422", "application/json"): response_data = unmarshal_json_response( models.HTTPValidationErrorData, http_res diff --git a/packages/mistralai_gcp/src/mistralai_gcp/httpclient.py b/packages/mistralai_gcp/src/mistralai_gcp/httpclient.py index 47b052cb..89560b56 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/httpclient.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/httpclient.py @@ -107,7 +107,6 @@ def close_clients( # to them from the owning SDK instance and they can be reaped. owner.client = None owner.async_client = None - if sync_client is not None and not sync_client_supplied: try: sync_client.close() diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/assistantmessage.py b/packages/mistralai_gcp/src/mistralai_gcp/models/assistantmessage.py index 794b8c80..17d740b6 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/assistantmessage.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/assistantmessage.py @@ -25,7 +25,7 @@ ) -AssistantMessageRole = Literal["assistant"] +AssistantMessageRole = Literal["assistant",] class AssistantMessageTypedDict(TypedDict): diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionchoice.py b/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionchoice.py index 9bcf1240..fe3ee952 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionchoice.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionchoice.py @@ -3,14 +3,19 @@ from __future__ import annotations from .assistantmessage import AssistantMessage, AssistantMessageTypedDict from mistralai_gcp.types import BaseModel, UnrecognizedStr -from mistralai_gcp.utils import validate_open_enum -from pydantic.functional_validators import PlainValidator from typing import Literal, Union -from typing_extensions import Annotated, TypedDict +from typing_extensions import TypedDict ChatCompletionChoiceFinishReason = Union[ - Literal["stop", "length", "model_length", "error", "tool_calls"], UnrecognizedStr + Literal[ + "stop", + "length", + "model_length", + "error", + "tool_calls", + ], + UnrecognizedStr, ] @@ -25,6 +30,4 @@ class ChatCompletionChoice(BaseModel): message: AssistantMessage - finish_reason: Annotated[ - ChatCompletionChoiceFinishReason, PlainValidator(validate_open_enum(False)) - ] + finish_reason: ChatCompletionChoiceFinishReason diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionrequest.py b/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionrequest.py index d693e3c3..80345f9d 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionrequest.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionrequest.py @@ -18,10 +18,9 @@ UNSET, UNSET_SENTINEL, ) -from mistralai_gcp.utils import get_discriminator, validate_open_enum +from mistralai_gcp.utils import get_discriminator from pydantic import Discriminator, Tag, model_serializer -from pydantic.functional_validators import PlainValidator -from typing import List, Optional, Union +from typing import Any, Dict, List, Optional, Union from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict @@ -89,6 +88,7 @@ class ChatCompletionRequestTypedDict(TypedDict): r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" random_seed: NotRequired[Nullable[int]] r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" + metadata: NotRequired[Nullable[Dict[str, Any]]] response_format: NotRequired[ResponseFormatTypedDict] r"""Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide.""" tools: NotRequired[Nullable[List[ToolTypedDict]]] @@ -134,6 +134,8 @@ class ChatCompletionRequest(BaseModel): random_seed: OptionalNullable[int] = UNSET r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" + metadata: OptionalNullable[Dict[str, Any]] = UNSET + response_format: Optional[ResponseFormat] = None r"""Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide.""" @@ -158,9 +160,7 @@ class ChatCompletionRequest(BaseModel): parallel_tool_calls: Optional[bool] = None r"""Whether to enable parallel function calling during tool use, when enabled the model can call multiple tools in parallel.""" - prompt_mode: Annotated[ - OptionalNullable[MistralPromptMode], PlainValidator(validate_open_enum(False)) - ] = UNSET + prompt_mode: OptionalNullable[MistralPromptMode] = UNSET r"""Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used.""" @model_serializer(mode="wrap") @@ -172,6 +172,7 @@ def serialize_model(self, handler): "stream", "stop", "random_seed", + "metadata", "response_format", "tools", "tool_choice", @@ -186,6 +187,7 @@ def serialize_model(self, handler): "temperature", "max_tokens", "random_seed", + "metadata", "tools", "n", "prompt_mode", diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionstreamrequest.py b/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionstreamrequest.py index c2d25128..e857d515 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionstreamrequest.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionstreamrequest.py @@ -18,10 +18,9 @@ UNSET, UNSET_SENTINEL, ) -from mistralai_gcp.utils import get_discriminator, validate_open_enum +from mistralai_gcp.utils import get_discriminator from pydantic import Discriminator, Tag, model_serializer -from pydantic.functional_validators import PlainValidator -from typing import List, Optional, Union +from typing import Any, Dict, List, Optional, Union from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict @@ -84,6 +83,7 @@ class ChatCompletionStreamRequestTypedDict(TypedDict): r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" random_seed: NotRequired[Nullable[int]] r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" + metadata: NotRequired[Nullable[Dict[str, Any]]] response_format: NotRequired[ResponseFormatTypedDict] r"""Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide.""" tools: NotRequired[Nullable[List[ToolTypedDict]]] @@ -128,6 +128,8 @@ class ChatCompletionStreamRequest(BaseModel): random_seed: OptionalNullable[int] = UNSET r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" + metadata: OptionalNullable[Dict[str, Any]] = UNSET + response_format: Optional[ResponseFormat] = None r"""Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide.""" @@ -152,9 +154,7 @@ class ChatCompletionStreamRequest(BaseModel): parallel_tool_calls: Optional[bool] = None r"""Whether to enable parallel function calling during tool use, when enabled the model can call multiple tools in parallel.""" - prompt_mode: Annotated[ - OptionalNullable[MistralPromptMode], PlainValidator(validate_open_enum(False)) - ] = UNSET + prompt_mode: OptionalNullable[MistralPromptMode] = UNSET r"""Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used.""" @model_serializer(mode="wrap") @@ -166,6 +166,7 @@ def serialize_model(self, handler): "stream", "stop", "random_seed", + "metadata", "response_format", "tools", "tool_choice", @@ -180,6 +181,7 @@ def serialize_model(self, handler): "temperature", "max_tokens", "random_seed", + "metadata", "tools", "n", "prompt_mode", diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/completionresponsestreamchoice.py b/packages/mistralai_gcp/src/mistralai_gcp/models/completionresponsestreamchoice.py index 1be7dbdc..ec9df528 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/completionresponsestreamchoice.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/completionresponsestreamchoice.py @@ -3,14 +3,20 @@ from __future__ import annotations from .deltamessage import DeltaMessage, DeltaMessageTypedDict from mistralai_gcp.types import BaseModel, Nullable, UNSET_SENTINEL, UnrecognizedStr -from mistralai_gcp.utils import validate_open_enum from pydantic import model_serializer -from pydantic.functional_validators import PlainValidator from typing import Literal, Union -from typing_extensions import Annotated, TypedDict +from typing_extensions import TypedDict -FinishReason = Union[Literal["stop", "length", "error", "tool_calls"], UnrecognizedStr] +FinishReason = Union[ + Literal[ + "stop", + "length", + "error", + "tool_calls", + ], + UnrecognizedStr, +] class CompletionResponseStreamChoiceTypedDict(TypedDict): @@ -24,9 +30,7 @@ class CompletionResponseStreamChoice(BaseModel): delta: DeltaMessage - finish_reason: Annotated[ - Nullable[FinishReason], PlainValidator(validate_open_enum(False)) - ] + finish_reason: Nullable[FinishReason] @model_serializer(mode="wrap") def serialize_model(self, handler): diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/fimcompletionrequest.py b/packages/mistralai_gcp/src/mistralai_gcp/models/fimcompletionrequest.py index 12af226c..bcc97c90 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/fimcompletionrequest.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/fimcompletionrequest.py @@ -9,7 +9,7 @@ UNSET_SENTINEL, ) from pydantic import model_serializer -from typing import List, Optional, Union +from typing import Any, Dict, List, Optional, Union from typing_extensions import NotRequired, TypeAliasType, TypedDict @@ -42,6 +42,7 @@ class FIMCompletionRequestTypedDict(TypedDict): r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" random_seed: NotRequired[Nullable[int]] r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" + metadata: NotRequired[Nullable[Dict[str, Any]]] suffix: NotRequired[Nullable[str]] r"""Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`.""" min_tokens: NotRequired[Nullable[int]] @@ -73,6 +74,8 @@ class FIMCompletionRequest(BaseModel): random_seed: OptionalNullable[int] = UNSET r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" + metadata: OptionalNullable[Dict[str, Any]] = UNSET + suffix: OptionalNullable[str] = UNSET r"""Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`.""" @@ -88,6 +91,7 @@ def serialize_model(self, handler): "stream", "stop", "random_seed", + "metadata", "suffix", "min_tokens", ] @@ -95,6 +99,7 @@ def serialize_model(self, handler): "temperature", "max_tokens", "random_seed", + "metadata", "suffix", "min_tokens", ] diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/fimcompletionstreamrequest.py b/packages/mistralai_gcp/src/mistralai_gcp/models/fimcompletionstreamrequest.py index ba7a66d2..34d2ba65 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/fimcompletionstreamrequest.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/fimcompletionstreamrequest.py @@ -9,7 +9,7 @@ UNSET_SENTINEL, ) from pydantic import model_serializer -from typing import List, Optional, Union +from typing import Any, Dict, List, Optional, Union from typing_extensions import NotRequired, TypeAliasType, TypedDict @@ -41,6 +41,7 @@ class FIMCompletionStreamRequestTypedDict(TypedDict): r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" random_seed: NotRequired[Nullable[int]] r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" + metadata: NotRequired[Nullable[Dict[str, Any]]] suffix: NotRequired[Nullable[str]] r"""Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`.""" min_tokens: NotRequired[Nullable[int]] @@ -71,6 +72,8 @@ class FIMCompletionStreamRequest(BaseModel): random_seed: OptionalNullable[int] = UNSET r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" + metadata: OptionalNullable[Dict[str, Any]] = UNSET + suffix: OptionalNullable[str] = UNSET r"""Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`.""" @@ -86,6 +89,7 @@ def serialize_model(self, handler): "stream", "stop", "random_seed", + "metadata", "suffix", "min_tokens", ] @@ -93,6 +97,7 @@ def serialize_model(self, handler): "temperature", "max_tokens", "random_seed", + "metadata", "suffix", "min_tokens", ] diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/httpvalidationerror.py b/packages/mistralai_gcp/src/mistralai_gcp/models/httpvalidationerror.py index e1f11adc..79609351 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/httpvalidationerror.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/httpvalidationerror.py @@ -2,6 +2,7 @@ from __future__ import annotations from .validationerror import ValidationError +from dataclasses import dataclass, field import httpx from mistralai_gcp.models import MistralGcpError from mistralai_gcp.types import BaseModel @@ -12,8 +13,9 @@ class HTTPValidationErrorData(BaseModel): detail: Optional[List[ValidationError]] = None +@dataclass(unsafe_hash=True) class HTTPValidationError(MistralGcpError): - data: HTTPValidationErrorData + data: HTTPValidationErrorData = field(hash=False) def __init__( self, @@ -23,4 +25,4 @@ def __init__( ): message = body or raw_response.text super().__init__(message, raw_response, body) - self.data = data + object.__setattr__(self, "data", data) diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/imageurlchunk.py b/packages/mistralai_gcp/src/mistralai_gcp/models/imageurlchunk.py index 1fc0b808..ddb53f21 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/imageurlchunk.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/imageurlchunk.py @@ -15,7 +15,7 @@ ImageURLChunkImageURL = TypeAliasType("ImageURLChunkImageURL", Union[ImageURL, str]) -ImageURLChunkType = Literal["image_url"] +ImageURLChunkType = Literal["image_url",] class ImageURLChunkTypedDict(TypedDict): diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/mistralgcperror.py b/packages/mistralai_gcp/src/mistralai_gcp/models/mistralgcperror.py index a3c60cec..fec729a5 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/mistralgcperror.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/mistralgcperror.py @@ -2,25 +2,29 @@ import httpx from typing import Optional +from dataclasses import dataclass, field +@dataclass(unsafe_hash=True) class MistralGcpError(Exception): """The base class for all HTTP error responses.""" message: str status_code: int body: str - headers: httpx.Headers - raw_response: httpx.Response + headers: httpx.Headers = field(hash=False) + raw_response: httpx.Response = field(hash=False) def __init__( self, message: str, raw_response: httpx.Response, body: Optional[str] = None ): - self.message = message - self.status_code = raw_response.status_code - self.body = body if body is not None else raw_response.text - self.headers = raw_response.headers - self.raw_response = raw_response + object.__setattr__(self, "message", message) + object.__setattr__(self, "status_code", raw_response.status_code) + object.__setattr__( + self, "body", body if body is not None else raw_response.text + ) + object.__setattr__(self, "headers", raw_response.headers) + object.__setattr__(self, "raw_response", raw_response) def __str__(self): return self.message diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/mistralpromptmode.py b/packages/mistralai_gcp/src/mistralai_gcp/models/mistralpromptmode.py index 3f4de0fa..1440f6ea 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/mistralpromptmode.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/mistralpromptmode.py @@ -5,4 +5,4 @@ from typing import Literal, Union -MistralPromptMode = Union[Literal["reasoning"], UnrecognizedStr] +MistralPromptMode = Union[Literal["reasoning",], UnrecognizedStr] diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/no_response_error.py b/packages/mistralai_gcp/src/mistralai_gcp/models/no_response_error.py index f98beea2..1deab64b 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/no_response_error.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/no_response_error.py @@ -1,12 +1,16 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +from dataclasses import dataclass + + +@dataclass(unsafe_hash=True) class NoResponseError(Exception): """Error raised when no HTTP response is received from the server.""" message: str def __init__(self, message: str = "No response received"): - self.message = message + object.__setattr__(self, "message", message) super().__init__(message) def __str__(self): diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/referencechunk.py b/packages/mistralai_gcp/src/mistralai_gcp/models/referencechunk.py index c4fa3b8b..904e8b82 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/referencechunk.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/referencechunk.py @@ -6,7 +6,7 @@ from typing_extensions import NotRequired, TypedDict -ReferenceChunkType = Literal["reference"] +ReferenceChunkType = Literal["reference",] class ReferenceChunkTypedDict(TypedDict): diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/responseformats.py b/packages/mistralai_gcp/src/mistralai_gcp/models/responseformats.py index 258fe70e..cbf83ce7 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/responseformats.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/responseformats.py @@ -4,4 +4,8 @@ from typing import Literal -ResponseFormats = Literal["text", "json_object", "json_schema"] +ResponseFormats = Literal[ + "text", + "json_object", + "json_schema", +] diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/responsevalidationerror.py b/packages/mistralai_gcp/src/mistralai_gcp/models/responsevalidationerror.py index 8d9b9f60..ebd4f214 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/responsevalidationerror.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/responsevalidationerror.py @@ -2,10 +2,12 @@ import httpx from typing import Optional +from dataclasses import dataclass from mistralai_gcp.models import MistralGcpError +@dataclass(unsafe_hash=True) class ResponseValidationError(MistralGcpError): """Error raised when there is a type mismatch between the response data and the expected Pydantic model.""" diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/sdkerror.py b/packages/mistralai_gcp/src/mistralai_gcp/models/sdkerror.py index e85b4f49..7f53bbcd 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/sdkerror.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/sdkerror.py @@ -2,12 +2,14 @@ import httpx from typing import Optional +from dataclasses import dataclass from mistralai_gcp.models import MistralGcpError MAX_MESSAGE_LEN = 10_000 +@dataclass(unsafe_hash=True) class SDKError(MistralGcpError): """The fallback error class if no more specific error class is matched.""" diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/systemmessage.py b/packages/mistralai_gcp/src/mistralai_gcp/models/systemmessage.py index e0fa6993..d74bdf32 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/systemmessage.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/systemmessage.py @@ -21,7 +21,7 @@ ) -Role = Literal["system"] +Role = Literal["system",] class SystemMessageTypedDict(TypedDict): diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/textchunk.py b/packages/mistralai_gcp/src/mistralai_gcp/models/textchunk.py index 12f666cd..c4a8cf28 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/textchunk.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/textchunk.py @@ -6,7 +6,7 @@ from typing_extensions import NotRequired, TypedDict -Type = Literal["text"] +Type = Literal["text",] class TextChunkTypedDict(TypedDict): diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/thinkchunk.py b/packages/mistralai_gcp/src/mistralai_gcp/models/thinkchunk.py index 9c3010e2..b88c0cb5 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/thinkchunk.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/thinkchunk.py @@ -16,7 +16,7 @@ Thinking = TypeAliasType("Thinking", Union[ReferenceChunk, TextChunk]) -ThinkChunkType = Literal["thinking"] +ThinkChunkType = Literal["thinking",] class ThinkChunkTypedDict(TypedDict): diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/tool.py b/packages/mistralai_gcp/src/mistralai_gcp/models/tool.py index a1d477da..800de633 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/tool.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/tool.py @@ -4,10 +4,8 @@ from .function import Function, FunctionTypedDict from .tooltypes import ToolTypes from mistralai_gcp.types import BaseModel -from mistralai_gcp.utils import validate_open_enum -from pydantic.functional_validators import PlainValidator from typing import Optional -from typing_extensions import Annotated, NotRequired, TypedDict +from typing_extensions import NotRequired, TypedDict class ToolTypedDict(TypedDict): @@ -18,6 +16,4 @@ class ToolTypedDict(TypedDict): class Tool(BaseModel): function: Function - type: Annotated[Optional[ToolTypes], PlainValidator(validate_open_enum(False))] = ( - None - ) + type: Optional[ToolTypes] = None diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/toolcall.py b/packages/mistralai_gcp/src/mistralai_gcp/models/toolcall.py index ecbac8d6..23ef157a 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/toolcall.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/toolcall.py @@ -4,10 +4,8 @@ from .functioncall import FunctionCall, FunctionCallTypedDict from .tooltypes import ToolTypes from mistralai_gcp.types import BaseModel -from mistralai_gcp.utils import validate_open_enum -from pydantic.functional_validators import PlainValidator from typing import Optional -from typing_extensions import Annotated, NotRequired, TypedDict +from typing_extensions import NotRequired, TypedDict class ToolCallTypedDict(TypedDict): @@ -22,8 +20,6 @@ class ToolCall(BaseModel): id: Optional[str] = "null" - type: Annotated[Optional[ToolTypes], PlainValidator(validate_open_enum(False))] = ( - None - ) + type: Optional[ToolTypes] = None index: Optional[int] = 0 diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/toolchoice.py b/packages/mistralai_gcp/src/mistralai_gcp/models/toolchoice.py index dc213e62..4a148330 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/toolchoice.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/toolchoice.py @@ -4,10 +4,8 @@ from .functionname import FunctionName, FunctionNameTypedDict from .tooltypes import ToolTypes from mistralai_gcp.types import BaseModel -from mistralai_gcp.utils import validate_open_enum -from pydantic.functional_validators import PlainValidator from typing import Optional -from typing_extensions import Annotated, NotRequired, TypedDict +from typing_extensions import NotRequired, TypedDict class ToolChoiceTypedDict(TypedDict): @@ -24,6 +22,4 @@ class ToolChoice(BaseModel): function: FunctionName r"""this restriction of `Function` is used to select a specific function to call""" - type: Annotated[Optional[ToolTypes], PlainValidator(validate_open_enum(False))] = ( - None - ) + type: Optional[ToolTypes] = None diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/toolchoiceenum.py b/packages/mistralai_gcp/src/mistralai_gcp/models/toolchoiceenum.py index 8e6a6ad8..01f6f677 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/toolchoiceenum.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/toolchoiceenum.py @@ -4,4 +4,9 @@ from typing import Literal -ToolChoiceEnum = Literal["auto", "none", "any", "required"] +ToolChoiceEnum = Literal[ + "auto", + "none", + "any", + "required", +] diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/toolmessage.py b/packages/mistralai_gcp/src/mistralai_gcp/models/toolmessage.py index bd187b32..d6aa2621 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/toolmessage.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/toolmessage.py @@ -22,7 +22,7 @@ ToolMessageContent = TypeAliasType("ToolMessageContent", Union[str, List[ContentChunk]]) -ToolMessageRole = Literal["tool"] +ToolMessageRole = Literal["tool",] class ToolMessageTypedDict(TypedDict): diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/tooltypes.py b/packages/mistralai_gcp/src/mistralai_gcp/models/tooltypes.py index 878444c6..8b812ae0 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/tooltypes.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/tooltypes.py @@ -5,4 +5,4 @@ from typing import Literal, Union -ToolTypes = Union[Literal["function"], UnrecognizedStr] +ToolTypes = Union[Literal["function",], UnrecognizedStr] diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/usermessage.py b/packages/mistralai_gcp/src/mistralai_gcp/models/usermessage.py index 1f9a1630..0168b452 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/usermessage.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/usermessage.py @@ -16,7 +16,7 @@ UserMessageContent = TypeAliasType("UserMessageContent", Union[str, List[ContentChunk]]) -UserMessageRole = Literal["user"] +UserMessageRole = Literal["user",] class UserMessageTypedDict(TypedDict): diff --git a/packages/mistralai_gcp/src/mistralai_gcp/types/basemodel.py b/packages/mistralai_gcp/src/mistralai_gcp/types/basemodel.py index 231c2e37..a9a640a1 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/types/basemodel.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/types/basemodel.py @@ -2,7 +2,8 @@ from pydantic import ConfigDict, model_serializer from pydantic import BaseModel as PydanticBaseModel -from typing import TYPE_CHECKING, Literal, Optional, TypeVar, Union +from pydantic_core import core_schema +from typing import TYPE_CHECKING, Any, Literal, Optional, TypeVar, Union from typing_extensions import TypeAliasType, TypeAlias @@ -35,5 +36,42 @@ def __bool__(self) -> Literal[False]: "OptionalNullable", Union[Optional[Nullable[T]], Unset], type_params=(T,) ) -UnrecognizedInt: TypeAlias = int -UnrecognizedStr: TypeAlias = str + +class UnrecognizedStr(str): + @classmethod + def __get_pydantic_core_schema__(cls, _source_type: Any, _handler: Any) -> core_schema.CoreSchema: + # Make UnrecognizedStr only work in lax mode, not strict mode + # This makes it a "fallback" option when more specific types (like Literals) don't match + def validate_lax(v: Any) -> 'UnrecognizedStr': + if isinstance(v, cls): + return v + return cls(str(v)) + + # Use lax_or_strict_schema where strict always fails + # This forces Pydantic to prefer other union members in strict mode + # and only fall back to UnrecognizedStr in lax mode + return core_schema.lax_or_strict_schema( + lax_schema=core_schema.chain_schema([ + core_schema.str_schema(), + core_schema.no_info_plain_validator_function(validate_lax) + ]), + strict_schema=core_schema.none_schema(), # Always fails in strict mode + ) + + +class UnrecognizedInt(int): + @classmethod + def __get_pydantic_core_schema__(cls, _source_type: Any, _handler: Any) -> core_schema.CoreSchema: + # Make UnrecognizedInt only work in lax mode, not strict mode + # This makes it a "fallback" option when more specific types (like Literals) don't match + def validate_lax(v: Any) -> 'UnrecognizedInt': + if isinstance(v, cls): + return v + return cls(int(v)) + return core_schema.lax_or_strict_schema( + lax_schema=core_schema.chain_schema([ + core_schema.int_schema(), + core_schema.no_info_plain_validator_function(validate_lax) + ]), + strict_schema=core_schema.none_schema(), # Always fails in strict mode + ) diff --git a/packages/mistralai_gcp/src/mistralai_gcp/utils/__init__.py b/packages/mistralai_gcp/src/mistralai_gcp/utils/__init__.py index 56164cf3..05f26ade 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/utils/__init__.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/utils/__init__.py @@ -41,7 +41,6 @@ validate_decimal, validate_float, validate_int, - validate_open_enum, ) from .url import generate_url, template_url, remove_suffix from .values import ( @@ -102,7 +101,6 @@ "validate_const", "validate_float", "validate_int", - "validate_open_enum", "cast_partial", ] @@ -155,7 +153,6 @@ "validate_const": ".serializers", "validate_float": ".serializers", "validate_int": ".serializers", - "validate_open_enum": ".serializers", "cast_partial": ".values", } diff --git a/packages/mistralai_gcp/src/mistralai_gcp/utils/annotations.py b/packages/mistralai_gcp/src/mistralai_gcp/utils/annotations.py index 387874ed..12e0aa4f 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/utils/annotations.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/utils/annotations.py @@ -3,6 +3,7 @@ from enum import Enum from typing import Any, Optional + def get_discriminator(model: Any, fieldname: str, key: str) -> str: """ Recursively search for the discriminator attribute in a model. @@ -25,31 +26,54 @@ def get_field_discriminator(field: Any) -> Optional[str]: if isinstance(field, dict): if key in field: - return f'{field[key]}' + return f"{field[key]}" if hasattr(field, fieldname): attr = getattr(field, fieldname) if isinstance(attr, Enum): - return f'{attr.value}' - return f'{attr}' + return f"{attr.value}" + return f"{attr}" if hasattr(field, upper_fieldname): attr = getattr(field, upper_fieldname) if isinstance(attr, Enum): - return f'{attr.value}' - return f'{attr}' + return f"{attr.value}" + return f"{attr}" return None + def search_nested_discriminator(obj: Any) -> Optional[str]: + """Recursively search for discriminator in nested structures.""" + # First try direct field lookup + discriminator = get_field_discriminator(obj) + if discriminator is not None: + return discriminator + + # If it's a dict, search in nested values + if isinstance(obj, dict): + for value in obj.values(): + if isinstance(value, list): + # Search in list items + for item in value: + nested_discriminator = search_nested_discriminator(item) + if nested_discriminator is not None: + return nested_discriminator + elif isinstance(value, dict): + # Search in nested dict + nested_discriminator = search_nested_discriminator(value) + if nested_discriminator is not None: + return nested_discriminator + + return None if isinstance(model, list): for field in model: - discriminator = get_field_discriminator(field) + discriminator = search_nested_discriminator(field) if discriminator is not None: return discriminator - discriminator = get_field_discriminator(model) + discriminator = search_nested_discriminator(model) if discriminator is not None: return discriminator - raise ValueError(f'Could not find discriminator field {fieldname} in {model}') + raise ValueError(f"Could not find discriminator field {fieldname} in {model}") diff --git a/packages/mistralai_gcp/src/mistralai_gcp/utils/enums.py b/packages/mistralai_gcp/src/mistralai_gcp/utils/enums.py index c3bc13cf..3324e1bc 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/utils/enums.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/utils/enums.py @@ -2,6 +2,10 @@ import enum import sys +from typing import Any + +from pydantic_core import core_schema + class OpenEnumMeta(enum.EnumMeta): # The __call__ method `boundary` kwarg was added in 3.11 and must be present @@ -72,3 +76,59 @@ def __call__( ) except ValueError: return value + + def __new__(mcs, name, bases, namespace, **kwargs): + cls = super().__new__(mcs, name, bases, namespace, **kwargs) + + # Add __get_pydantic_core_schema__ to make open enums work correctly + # in union discrimination. In strict mode (used by Pydantic for unions), + # only known enum values match. In lax mode, unknown values are accepted. + def __get_pydantic_core_schema__( + cls_inner: Any, _source_type: Any, _handler: Any + ) -> core_schema.CoreSchema: + # Create a validator that only accepts known enum values (for strict mode) + def validate_strict(v: Any) -> Any: + if isinstance(v, cls_inner): + return v + # Use the parent EnumMeta's __call__ which raises ValueError for unknown values + return enum.EnumMeta.__call__(cls_inner, v) + + # Create a lax validator that accepts unknown values + def validate_lax(v: Any) -> Any: + if isinstance(v, cls_inner): + return v + try: + return enum.EnumMeta.__call__(cls_inner, v) + except ValueError: + # Return the raw value for unknown enum values + return v + + # Determine the base type schema (str or int) + is_int_enum = False + for base in cls_inner.__mro__: + if base is int: + is_int_enum = True + break + if base is str: + break + + base_schema = ( + core_schema.int_schema() + if is_int_enum + else core_schema.str_schema() + ) + + # Use lax_or_strict_schema: + # - strict mode: only known enum values match (raises ValueError for unknown) + # - lax mode: accept any value, return enum member or raw value + return core_schema.lax_or_strict_schema( + lax_schema=core_schema.chain_schema( + [base_schema, core_schema.no_info_plain_validator_function(validate_lax)] + ), + strict_schema=core_schema.chain_schema( + [base_schema, core_schema.no_info_plain_validator_function(validate_strict)] + ), + ) + + setattr(cls, "__get_pydantic_core_schema__", classmethod(__get_pydantic_core_schema__)) + return cls diff --git a/packages/mistralai_gcp/src/mistralai_gcp/utils/forms.py b/packages/mistralai_gcp/src/mistralai_gcp/utils/forms.py index e873495f..f961e76b 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/utils/forms.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/utils/forms.py @@ -142,16 +142,21 @@ def serialize_multipart_form( if field_metadata.file: if isinstance(val, List): # Handle array of files + array_field_name = f_name + "[]" for file_obj in val: if not _is_set(file_obj): continue - - file_name, content, content_type = _extract_file_properties(file_obj) + + file_name, content, content_type = _extract_file_properties( + file_obj + ) if content_type is not None: - files.append((f_name + "[]", (file_name, content, content_type))) + files.append( + (array_field_name, (file_name, content, content_type)) + ) else: - files.append((f_name + "[]", (file_name, content))) + files.append((array_field_name, (file_name, content))) else: # Handle single file file_name, content, content_type = _extract_file_properties(val) @@ -161,11 +166,16 @@ def serialize_multipart_form( else: files.append((f_name, (file_name, content))) elif field_metadata.json: - files.append((f_name, ( - None, - marshal_json(val, request_field_types[name]), - "application/json", - ))) + files.append( + ( + f_name, + ( + None, + marshal_json(val, request_field_types[name]), + "application/json", + ), + ) + ) else: if isinstance(val, List): values = [] @@ -175,7 +185,8 @@ def serialize_multipart_form( continue values.append(_val_to_string(value)) - form[f_name + "[]"] = values + array_field_name = f_name + "[]" + form[array_field_name] = values else: form[f_name] = _val_to_string(val) return media_type, form, files diff --git a/packages/mistralai_gcp/src/mistralai_gcp/utils/queryparams.py b/packages/mistralai_gcp/src/mistralai_gcp/utils/queryparams.py index 37a6e7f9..c04e0db8 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/utils/queryparams.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/utils/queryparams.py @@ -27,12 +27,13 @@ def get_query_params( query_params: Any, gbls: Optional[Any] = None, + allow_empty_value: Optional[List[str]] = None, ) -> Dict[str, List[str]]: params: Dict[str, List[str]] = {} - globals_already_populated = _populate_query_params(query_params, gbls, params, []) + globals_already_populated = _populate_query_params(query_params, gbls, params, [], allow_empty_value) if _is_set(gbls): - _populate_query_params(gbls, None, params, globals_already_populated) + _populate_query_params(gbls, None, params, globals_already_populated, allow_empty_value) return params @@ -42,6 +43,7 @@ def _populate_query_params( gbls: Any, query_param_values: Dict[str, List[str]], skip_fields: List[str], + allow_empty_value: Optional[List[str]] = None, ) -> List[str]: globals_already_populated: List[str] = [] @@ -69,6 +71,16 @@ def _populate_query_params( globals_already_populated.append(name) f_name = field.alias if field.alias is not None else name + + allow_empty_set = set(allow_empty_value or []) + should_include_empty = f_name in allow_empty_set and ( + value is None or value == [] or value == "" + ) + + if should_include_empty: + query_param_values[f_name] = [""] + continue + serialization = metadata.serialization if serialization is not None: serialized_parms = _get_serialized_params( diff --git a/packages/mistralai_gcp/src/mistralai_gcp/utils/requestbodies.py b/packages/mistralai_gcp/src/mistralai_gcp/utils/requestbodies.py index d5240dd5..1de32b6d 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/utils/requestbodies.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/utils/requestbodies.py @@ -44,15 +44,15 @@ def serialize_request_body( serialized_request_body = SerializedRequestBody(media_type) - if re.match(r"(application|text)\/.*?\+*json.*", media_type) is not None: + if re.match(r"^(application|text)\/([^+]+\+)*json.*", media_type) is not None: serialized_request_body.content = marshal_json(request_body, request_body_type) - elif re.match(r"multipart\/.*", media_type) is not None: + elif re.match(r"^multipart\/.*", media_type) is not None: ( serialized_request_body.media_type, serialized_request_body.data, serialized_request_body.files, ) = serialize_multipart_form(media_type, request_body) - elif re.match(r"application\/x-www-form-urlencoded.*", media_type) is not None: + elif re.match(r"^application\/x-www-form-urlencoded.*", media_type) is not None: serialized_request_body.data = serialize_form_data(request_body) elif isinstance(request_body, (bytes, bytearray, io.BytesIO, io.BufferedReader)): serialized_request_body.content = request_body diff --git a/packages/mistralai_gcp/src/mistralai_gcp/utils/retries.py b/packages/mistralai_gcp/src/mistralai_gcp/utils/retries.py index 4d608671..88a91b10 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/utils/retries.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/utils/retries.py @@ -3,7 +3,9 @@ import asyncio import random import time -from typing import List +from datetime import datetime +from email.utils import parsedate_to_datetime +from typing import List, Optional import httpx @@ -51,9 +53,11 @@ def __init__(self, config: RetryConfig, status_codes: List[str]): class TemporaryError(Exception): response: httpx.Response + retry_after: Optional[int] def __init__(self, response: httpx.Response): self.response = response + self.retry_after = _parse_retry_after_header(response) class PermanentError(Exception): @@ -63,6 +67,62 @@ def __init__(self, inner: Exception): self.inner = inner +def _parse_retry_after_header(response: httpx.Response) -> Optional[int]: + """Parse Retry-After header from response. + + Returns: + Retry interval in milliseconds, or None if header is missing or invalid. + """ + retry_after_header = response.headers.get("retry-after") + if not retry_after_header: + return None + + try: + seconds = float(retry_after_header) + return round(seconds * 1000) + except ValueError: + pass + + try: + retry_date = parsedate_to_datetime(retry_after_header) + delta = (retry_date - datetime.now(retry_date.tzinfo)).total_seconds() + return round(max(0, delta) * 1000) + except (ValueError, TypeError): + pass + + return None + + +def _get_sleep_interval( + exception: Exception, + initial_interval: int, + max_interval: int, + exponent: float, + retries: int, +) -> float: + """Get sleep interval for retry with exponential backoff. + + Args: + exception: The exception that triggered the retry. + initial_interval: Initial retry interval in milliseconds. + max_interval: Maximum retry interval in milliseconds. + exponent: Base for exponential backoff calculation. + retries: Current retry attempt count. + + Returns: + Sleep interval in seconds. + """ + if ( + isinstance(exception, TemporaryError) + and exception.retry_after is not None + and exception.retry_after > 0 + ): + return exception.retry_after / 1000 + + sleep = (initial_interval / 1000) * exponent**retries + random.uniform(0, 1) + return min(sleep, max_interval / 1000) + + def retry(func, retries: Retries): if retries.config.strategy == "backoff": @@ -183,8 +243,10 @@ def retry_with_backoff( return exception.response raise - sleep = (initial_interval / 1000) * exponent**retries + random.uniform(0, 1) - sleep = min(sleep, max_interval / 1000) + + sleep = _get_sleep_interval( + exception, initial_interval, max_interval, exponent, retries + ) time.sleep(sleep) retries += 1 @@ -211,7 +273,9 @@ async def retry_with_backoff_async( return exception.response raise - sleep = (initial_interval / 1000) * exponent**retries + random.uniform(0, 1) - sleep = min(sleep, max_interval / 1000) + + sleep = _get_sleep_interval( + exception, initial_interval, max_interval, exponent, retries + ) await asyncio.sleep(sleep) retries += 1 diff --git a/packages/mistralai_gcp/src/mistralai_gcp/utils/serializers.py b/packages/mistralai_gcp/src/mistralai_gcp/utils/serializers.py index 378a14c0..14321eb4 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/utils/serializers.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/utils/serializers.py @@ -102,26 +102,6 @@ def validate_int(b): return int(b) -def validate_open_enum(is_int: bool): - def validate(e): - if e is None: - return None - - if isinstance(e, Unset): - return e - - if is_int: - if not isinstance(e, int): - raise ValueError("Expected int") - else: - if not isinstance(e, str): - raise ValueError("Expected string") - - return e - - return validate - - def validate_const(v): def validate(c): # Optional[T] is a Union[T, None] diff --git a/packages/mistralai_gcp/src/mistralai_gcp/utils/unmarshal_json_response.py b/packages/mistralai_gcp/src/mistralai_gcp/utils/unmarshal_json_response.py index 8fe5c996..c168a293 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/utils/unmarshal_json_response.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/utils/unmarshal_json_response.py @@ -1,12 +1,26 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -from typing import Any, Optional +from typing import Any, Optional, Type, TypeVar, overload import httpx from .serializers import unmarshal_json from mistralai_gcp import models +T = TypeVar("T") + + +@overload +def unmarshal_json_response( + typ: Type[T], http_res: httpx.Response, body: Optional[str] = None +) -> T: ... + + +@overload +def unmarshal_json_response( + typ: Any, http_res: httpx.Response, body: Optional[str] = None +) -> Any: ... + def unmarshal_json_response( typ: Any, http_res: httpx.Response, body: Optional[str] = None diff --git a/packages/mistralai_gcp/uv.lock b/packages/mistralai_gcp/uv.lock index afd17643..4fbca724 100644 --- a/packages/mistralai_gcp/uv.lock +++ b/packages/mistralai_gcp/uv.lock @@ -4,7 +4,7 @@ requires-python = ">=3.10" resolution-markers = [ "python_full_version >= '3.12'", "python_full_version == '3.11.*'", - "python_full_version == '3.10.*'", + "python_full_version < '3.11'", ] [[package]] @@ -146,22 +146,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/b0/6f/8f7af07237c34a1defe7defc565a9bc1807762f672c0fde711a4b22bf9c0/charset_normalizer-3.4.4-cp314-cp314-win32.whl", hash = "sha256:f9d332f8c2a2fcbffe1378594431458ddbef721c1769d78e2cbc06280d8155f9", size = 99940, upload-time = "2025-10-14T04:41:49.946Z" }, { url = "https://files.pythonhosted.org/packages/4b/51/8ade005e5ca5b0d80fb4aff72a3775b325bdc3d27408c8113811a7cbe640/charset_normalizer-3.4.4-cp314-cp314-win_amd64.whl", hash = "sha256:8a6562c3700cce886c5be75ade4a5db4214fda19fede41d9792d100288d8f94c", size = 107104, upload-time = "2025-10-14T04:41:51.051Z" }, { url = "https://files.pythonhosted.org/packages/da/5f/6b8f83a55bb8278772c5ae54a577f3099025f9ade59d0136ac24a0df4bde/charset_normalizer-3.4.4-cp314-cp314-win_arm64.whl", hash = "sha256:de00632ca48df9daf77a2c65a484531649261ec9f25489917f09e455cb09ddb2", size = 100743, upload-time = "2025-10-14T04:41:52.122Z" }, - { url = "https://files.pythonhosted.org/packages/46/7c/0c4760bccf082737ca7ab84a4c2034fcc06b1f21cf3032ea98bd6feb1725/charset_normalizer-3.4.4-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:a9768c477b9d7bd54bc0c86dbaebdec6f03306675526c9927c0e8a04e8f94af9", size = 209609, upload-time = "2025-10-14T04:42:10.922Z" }, - { url = "https://files.pythonhosted.org/packages/bb/a4/69719daef2f3d7f1819de60c9a6be981b8eeead7542d5ec4440f3c80e111/charset_normalizer-3.4.4-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:1bee1e43c28aa63cb16e5c14e582580546b08e535299b8b6158a7c9c768a1f3d", size = 149029, upload-time = "2025-10-14T04:42:12.38Z" }, - { url = "https://files.pythonhosted.org/packages/e6/21/8d4e1d6c1e6070d3672908b8e4533a71b5b53e71d16828cc24d0efec564c/charset_normalizer-3.4.4-cp39-cp39-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:fd44c878ea55ba351104cb93cc85e74916eb8fa440ca7903e57575e97394f608", size = 144580, upload-time = "2025-10-14T04:42:13.549Z" }, - { url = "https://files.pythonhosted.org/packages/a7/0a/a616d001b3f25647a9068e0b9199f697ce507ec898cacb06a0d5a1617c99/charset_normalizer-3.4.4-cp39-cp39-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:0f04b14ffe5fdc8c4933862d8306109a2c51e0704acfa35d51598eb45a1e89fc", size = 162340, upload-time = "2025-10-14T04:42:14.892Z" }, - { url = "https://files.pythonhosted.org/packages/85/93/060b52deb249a5450460e0585c88a904a83aec474ab8e7aba787f45e79f2/charset_normalizer-3.4.4-cp39-cp39-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:cd09d08005f958f370f539f186d10aec3377d55b9eeb0d796025d4886119d76e", size = 159619, upload-time = "2025-10-14T04:42:16.676Z" }, - { url = "https://files.pythonhosted.org/packages/dd/21/0274deb1cc0632cd587a9a0ec6b4674d9108e461cb4cd40d457adaeb0564/charset_normalizer-3.4.4-cp39-cp39-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:4fe7859a4e3e8457458e2ff592f15ccb02f3da787fcd31e0183879c3ad4692a1", size = 153980, upload-time = "2025-10-14T04:42:17.917Z" }, - { url = "https://files.pythonhosted.org/packages/28/2b/e3d7d982858dccc11b31906976323d790dded2017a0572f093ff982d692f/charset_normalizer-3.4.4-cp39-cp39-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:fa09f53c465e532f4d3db095e0c55b615f010ad81803d383195b6b5ca6cbf5f3", size = 152174, upload-time = "2025-10-14T04:42:19.018Z" }, - { url = "https://files.pythonhosted.org/packages/6e/ff/4a269f8e35f1e58b2df52c131a1fa019acb7ef3f8697b7d464b07e9b492d/charset_normalizer-3.4.4-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:7fa17817dc5625de8a027cb8b26d9fefa3ea28c8253929b8d6649e705d2835b6", size = 151666, upload-time = "2025-10-14T04:42:20.171Z" }, - { url = "https://files.pythonhosted.org/packages/da/c9/ec39870f0b330d58486001dd8e532c6b9a905f5765f58a6f8204926b4a93/charset_normalizer-3.4.4-cp39-cp39-musllinux_1_2_armv7l.whl", hash = "sha256:5947809c8a2417be3267efc979c47d76a079758166f7d43ef5ae8e9f92751f88", size = 145550, upload-time = "2025-10-14T04:42:21.324Z" }, - { url = "https://files.pythonhosted.org/packages/75/8f/d186ab99e40e0ed9f82f033d6e49001701c81244d01905dd4a6924191a30/charset_normalizer-3.4.4-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:4902828217069c3c5c71094537a8e623f5d097858ac6ca8252f7b4d10b7560f1", size = 163721, upload-time = "2025-10-14T04:42:22.46Z" }, - { url = "https://files.pythonhosted.org/packages/96/b1/6047663b9744df26a7e479ac1e77af7134b1fcf9026243bb48ee2d18810f/charset_normalizer-3.4.4-cp39-cp39-musllinux_1_2_riscv64.whl", hash = "sha256:7c308f7e26e4363d79df40ca5b2be1c6ba9f02bdbccfed5abddb7859a6ce72cf", size = 152127, upload-time = "2025-10-14T04:42:23.712Z" }, - { url = "https://files.pythonhosted.org/packages/59/78/e5a6eac9179f24f704d1be67d08704c3c6ab9f00963963524be27c18ed87/charset_normalizer-3.4.4-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:2c9d3c380143a1fedbff95a312aa798578371eb29da42106a29019368a475318", size = 161175, upload-time = "2025-10-14T04:42:24.87Z" }, - { url = "https://files.pythonhosted.org/packages/e5/43/0e626e42d54dd2f8dd6fc5e1c5ff00f05fbca17cb699bedead2cae69c62f/charset_normalizer-3.4.4-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:cb01158d8b88ee68f15949894ccc6712278243d95f344770fa7593fa2d94410c", size = 155375, upload-time = "2025-10-14T04:42:27.246Z" }, - { url = "https://files.pythonhosted.org/packages/e9/91/d9615bf2e06f35e4997616ff31248c3657ed649c5ab9d35ea12fce54e380/charset_normalizer-3.4.4-cp39-cp39-win32.whl", hash = "sha256:2677acec1a2f8ef614c6888b5b4ae4060cc184174a938ed4e8ef690e15d3e505", size = 99692, upload-time = "2025-10-14T04:42:28.425Z" }, - { url = "https://files.pythonhosted.org/packages/d1/a9/6c040053909d9d1ef4fcab45fddec083aedc9052c10078339b47c8573ea8/charset_normalizer-3.4.4-cp39-cp39-win_amd64.whl", hash = "sha256:f8e160feb2aed042cd657a72acc0b481212ed28b1b9a95c0cee1621b524e1966", size = 107192, upload-time = "2025-10-14T04:42:29.482Z" }, - { url = "https://files.pythonhosted.org/packages/f0/c6/4fa536b2c0cd3edfb7ccf8469fa0f363ea67b7213a842b90909ca33dd851/charset_normalizer-3.4.4-cp39-cp39-win_arm64.whl", hash = "sha256:b5d84d37db046c5ca74ee7bb47dd6cbc13f80665fdde3e8040bdd3fb015ecb50", size = 100220, upload-time = "2025-10-14T04:42:30.632Z" }, { url = "https://files.pythonhosted.org/packages/0a/4c/925909008ed5a988ccbb72dcc897407e5d6d3bd72410d69e051fc0c14647/charset_normalizer-3.4.4-py3-none-any.whl", hash = "sha256:7a32c560861a02ff789ad905a2fe94e3f840803362c84fecf1851cb4cf3dc37f", size = 53402, upload-time = "2025-10-14T04:42:31.76Z" }, ] @@ -264,26 +248,10 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/0e/61/66938bbb5fc52dbdf84594873d5b51fb1f7c7794e9c0f5bd885f30bc507b/idna-3.11-py3-none-any.whl", hash = "sha256:771a87f49d9defaf64091e6e6fe9c18d4833f140bd19464795bc32d966ca37ea", size = 71008, upload-time = "2025-10-12T14:55:18.883Z" }, ] -[[package]] -name = "iniconfig" -version = "2.1.0" -source = { registry = "https://pypi.org/simple" } -resolution-markers = [ -] -sdist = { url = "https://files.pythonhosted.org/packages/f2/97/ebf4da567aa6827c909642694d71c9fcf53e5b504f2d96afea02718862f3/iniconfig-2.1.0.tar.gz", hash = "sha256:3abbd2e30b36733fee78f9c7f7308f2d0050e88f0087fd25c2645f63c773e1c7", size = 4793, upload-time = "2025-03-19T20:09:59.721Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/2c/e1/e6716421ea10d38022b952c159d5161ca1193197fb744506875fbb87ea7b/iniconfig-2.1.0-py3-none-any.whl", hash = "sha256:9deba5723312380e77435581c6bf4935c94cbfab9b1ed33ef8d238ea168eb760", size = 6050, upload-time = "2025-03-19T20:10:01.071Z" }, -] - [[package]] name = "iniconfig" version = "2.3.0" source = { registry = "https://pypi.org/simple" } -resolution-markers = [ - "python_full_version >= '3.12'", - "python_full_version == '3.11.*'", - "python_full_version == '3.10.*'", -] sdist = { url = "https://files.pythonhosted.org/packages/72/34/14ca021ce8e5dfedc35312d08ba8bf51fdd999c576889fc2c24cb97f4f10/iniconfig-2.3.0.tar.gz", hash = "sha256:c76315c77db068650d49c5b56314774a7804df16fee4402c1f19d6d15d8c4730", size = 20503, upload-time = "2025-10-18T21:55:43.219Z" } wheels = [ { url = "https://files.pythonhosted.org/packages/cb/b1/3846dd7f199d53cb17f49cba7e651e9ce294d8497c8c150530ed11865bb8/iniconfig-2.3.0-py3-none-any.whl", hash = "sha256:f631c04d2c48c52b84d0d0549c99ff3859c98df65b3101406327ecc7d53fbf12", size = 7484, upload-time = "2025-10-18T21:55:41.639Z" }, @@ -309,7 +277,7 @@ wheels = [ [[package]] name = "mistralai-gcp" -version = "1.6.0" +version = "1.8.0" source = { editable = "." } dependencies = [ { name = "eval-type-backport" }, @@ -325,6 +293,7 @@ dependencies = [ dev = [ { name = "mypy" }, { name = "pylint" }, + { name = "pyright" }, { name = "pytest" }, { name = "pytest-asyncio" }, { name = "types-python-dateutil" }, @@ -345,6 +314,7 @@ requires-dist = [ dev = [ { name = "mypy", specifier = "==1.14.1" }, { name = "pylint", specifier = "==3.2.3" }, + { name = "pyright", specifier = ">=1.1.401,<2" }, { name = "pytest", specifier = ">=8.2.2,<9" }, { name = "pytest-asyncio", specifier = ">=0.23.7,<0.24" }, { name = "types-python-dateutil", specifier = ">=2.9.0.20240316,<3" }, @@ -385,12 +355,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/56/9d/4a236b9c57f5d8f08ed346914b3f091a62dd7e19336b2b2a0d85485f82ff/mypy-1.14.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:d4b19b03fdf54f3c5b2fa474c56b4c13c9dbfb9a2db4370ede7ec11a2c5927d9", size = 12867660, upload-time = "2024-12-30T16:38:17.342Z" }, { url = "https://files.pythonhosted.org/packages/40/88/a61a5497e2f68d9027de2bb139c7bb9abaeb1be1584649fa9d807f80a338/mypy-1.14.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:0c911fde686394753fff899c409fd4e16e9b294c24bfd5e1ea4675deae1ac6fd", size = 12969198, upload-time = "2024-12-30T16:38:32.839Z" }, { url = "https://files.pythonhosted.org/packages/54/da/3d6fc5d92d324701b0c23fb413c853892bfe0e1dbe06c9138037d459756b/mypy-1.14.1-cp313-cp313-win_amd64.whl", hash = "sha256:8b21525cb51671219f5307be85f7e646a153e5acc656e5cebf64bfa076c50107", size = 9885276, upload-time = "2024-12-30T16:38:20.828Z" }, - { url = "https://files.pythonhosted.org/packages/ca/1f/186d133ae2514633f8558e78cd658070ba686c0e9275c5a5c24a1e1f0d67/mypy-1.14.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:3888a1816d69f7ab92092f785a462944b3ca16d7c470d564165fe703b0970c35", size = 11200493, upload-time = "2024-12-30T16:38:26.935Z" }, - { url = "https://files.pythonhosted.org/packages/af/fc/4842485d034e38a4646cccd1369f6b1ccd7bc86989c52770d75d719a9941/mypy-1.14.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:46c756a444117c43ee984bd055db99e498bc613a70bbbc120272bd13ca579fbc", size = 10357702, upload-time = "2024-12-30T16:38:50.623Z" }, - { url = "https://files.pythonhosted.org/packages/b4/e6/457b83f2d701e23869cfec013a48a12638f75b9d37612a9ddf99072c1051/mypy-1.14.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:27fc248022907e72abfd8e22ab1f10e903915ff69961174784a3900a8cba9ad9", size = 12091104, upload-time = "2024-12-30T16:38:53.735Z" }, - { url = "https://files.pythonhosted.org/packages/f1/bf/76a569158db678fee59f4fd30b8e7a0d75bcbaeef49edd882a0d63af6d66/mypy-1.14.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:499d6a72fb7e5de92218db961f1a66d5f11783f9ae549d214617edab5d4dbdbb", size = 12830167, upload-time = "2024-12-30T16:38:56.437Z" }, - { url = "https://files.pythonhosted.org/packages/43/bc/0bc6b694b3103de9fed61867f1c8bd33336b913d16831431e7cb48ef1c92/mypy-1.14.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:57961db9795eb566dc1d1b4e9139ebc4c6b0cb6e7254ecde69d1552bf7613f60", size = 13013834, upload-time = "2024-12-30T16:38:59.204Z" }, - { url = "https://files.pythonhosted.org/packages/b0/79/5f5ec47849b6df1e6943d5fd8e6632fbfc04b4fd4acfa5a5a9535d11b4e2/mypy-1.14.1-cp39-cp39-win_amd64.whl", hash = "sha256:07ba89fdcc9451f2ebb02853deb6aaaa3d2239a236669a63ab3801bbf923ef5c", size = 9781231, upload-time = "2024-12-30T16:39:05.124Z" }, { url = "https://files.pythonhosted.org/packages/a0/b5/32dd67b69a16d088e533962e5044e51004176a9952419de0370cdaead0f8/mypy-1.14.1-py3-none-any.whl", hash = "sha256:b66a60cc4073aeb8ae00057f9c1f64d49e90f918fbcef9a977eb121da8b8f1d1", size = 2752905, upload-time = "2024-12-30T16:38:42.021Z" }, ] @@ -404,34 +368,27 @@ wheels = [ ] [[package]] -name = "packaging" -version = "25.0" +name = "nodeenv" +version = "1.10.0" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/a1/d4/1fc4078c65507b51b96ca8f8c3ba19e6a61c8253c72794544580a7b6c24d/packaging-25.0.tar.gz", hash = "sha256:d443872c98d677bf60f6a1f2f8c1cb748e8fe762d2bf9d3148b5599295b0fc4f", size = 165727, upload-time = "2025-04-19T11:48:59.673Z" } +sdist = { url = "https://files.pythonhosted.org/packages/24/bf/d1bda4f6168e0b2e9e5958945e01910052158313224ada5ce1fb2e1113b8/nodeenv-1.10.0.tar.gz", hash = "sha256:996c191ad80897d076bdfba80a41994c2b47c68e224c542b48feba42ba00f8bb", size = 55611, upload-time = "2025-12-20T14:08:54.006Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/20/12/38679034af332785aac8774540895e234f4d07f7545804097de4b666afd8/packaging-25.0-py3-none-any.whl", hash = "sha256:29572ef2b1f17581046b3a2227d5c611fb25ec70ca1ba8554b24b0e69331a484", size = 66469, upload-time = "2025-04-19T11:48:57.875Z" }, + { url = "https://files.pythonhosted.org/packages/88/b2/d0896bdcdc8d28a7fc5717c305f1a861c26e18c05047949fb371034d98bd/nodeenv-1.10.0-py2.py3-none-any.whl", hash = "sha256:5bb13e3eed2923615535339b3c620e76779af4cb4c6a90deccc9e36b274d3827", size = 23438, upload-time = "2025-12-20T14:08:52.782Z" }, ] [[package]] -name = "platformdirs" -version = "4.4.0" +name = "packaging" +version = "25.0" source = { registry = "https://pypi.org/simple" } -resolution-markers = [ -] -sdist = { url = "https://files.pythonhosted.org/packages/23/e8/21db9c9987b0e728855bd57bff6984f67952bea55d6f75e055c46b5383e8/platformdirs-4.4.0.tar.gz", hash = "sha256:ca753cf4d81dc309bc67b0ea38fd15dc97bc30ce419a7f58d13eb3bf14c4febf", size = 21634, upload-time = "2025-08-26T14:32:04.268Z" } +sdist = { url = "https://files.pythonhosted.org/packages/a1/d4/1fc4078c65507b51b96ca8f8c3ba19e6a61c8253c72794544580a7b6c24d/packaging-25.0.tar.gz", hash = "sha256:d443872c98d677bf60f6a1f2f8c1cb748e8fe762d2bf9d3148b5599295b0fc4f", size = 165727, upload-time = "2025-04-19T11:48:59.673Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/40/4b/2028861e724d3bd36227adfa20d3fd24c3fc6d52032f4a93c133be5d17ce/platformdirs-4.4.0-py3-none-any.whl", hash = "sha256:abd01743f24e5287cd7a5db3752faf1a2d65353f38ec26d98e25a6db65958c85", size = 18654, upload-time = "2025-08-26T14:32:02.735Z" }, + { url = "https://files.pythonhosted.org/packages/20/12/38679034af332785aac8774540895e234f4d07f7545804097de4b666afd8/packaging-25.0-py3-none-any.whl", hash = "sha256:29572ef2b1f17581046b3a2227d5c611fb25ec70ca1ba8554b24b0e69331a484", size = 66469, upload-time = "2025-04-19T11:48:57.875Z" }, ] [[package]] name = "platformdirs" version = "4.5.1" source = { registry = "https://pypi.org/simple" } -resolution-markers = [ - "python_full_version >= '3.12'", - "python_full_version == '3.11.*'", - "python_full_version == '3.10.*'", -] sdist = { url = "https://files.pythonhosted.org/packages/cf/86/0248f086a84f01b37aaec0fa567b397df1a119f73c16f6c7a9aac73ea309/platformdirs-4.5.1.tar.gz", hash = "sha256:61d5cdcc6065745cdd94f0f878977f8de9437be93de97c1c12f853c9c0cdcbda", size = 21715, upload-time = "2025-12-05T13:52:58.638Z" } wheels = [ { url = "https://files.pythonhosted.org/packages/cb/28/3bfe2fa5a7b9c46fe7e13c97bda14c895fb10fa2ebf1d0abb90e0cea7ee1/platformdirs-4.5.1-py3-none-any.whl", hash = "sha256:d03afa3963c806a9bed9d5125c8f4cb2fdaf74a55ab60e5d59b3fde758104d31", size = 18731, upload-time = "2025-12-05T13:52:56.823Z" }, @@ -574,19 +531,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/5c/96/5fb7d8c3c17bc8c62fdb031c47d77a1af698f1d7a406b0f79aaa1338f9ad/pydantic_core-2.41.5-cp314-cp314t-win32.whl", hash = "sha256:b4ececa40ac28afa90871c2cc2b9ffd2ff0bf749380fbdf57d165fd23da353aa", size = 1988906, upload-time = "2025-11-04T13:41:56.606Z" }, { url = "https://files.pythonhosted.org/packages/22/ed/182129d83032702912c2e2d8bbe33c036f342cc735737064668585dac28f/pydantic_core-2.41.5-cp314-cp314t-win_amd64.whl", hash = "sha256:80aa89cad80b32a912a65332f64a4450ed00966111b6615ca6816153d3585a8c", size = 1981607, upload-time = "2025-11-04T13:41:58.889Z" }, { url = "https://files.pythonhosted.org/packages/9f/ed/068e41660b832bb0b1aa5b58011dea2a3fe0ba7861ff38c4d4904c1c1a99/pydantic_core-2.41.5-cp314-cp314t-win_arm64.whl", hash = "sha256:35b44f37a3199f771c3eaa53051bc8a70cd7b54f333531c59e29fd4db5d15008", size = 1974769, upload-time = "2025-11-04T13:42:01.186Z" }, - { url = "https://files.pythonhosted.org/packages/54/db/160dffb57ed9a3705c4cbcbff0ac03bdae45f1ca7d58ab74645550df3fbd/pydantic_core-2.41.5-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:8bfeaf8735be79f225f3fefab7f941c712aaca36f1128c9d7e2352ee1aa87bdf", size = 2107999, upload-time = "2025-11-04T13:42:03.885Z" }, - { url = "https://files.pythonhosted.org/packages/a3/7d/88e7de946f60d9263cc84819f32513520b85c0f8322f9b8f6e4afc938383/pydantic_core-2.41.5-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:346285d28e4c8017da95144c7f3acd42740d637ff41946af5ce6e5e420502dd5", size = 1929745, upload-time = "2025-11-04T13:42:06.075Z" }, - { url = "https://files.pythonhosted.org/packages/d5/c2/aef51e5b283780e85e99ff19db0f05842d2d4a8a8cd15e63b0280029b08f/pydantic_core-2.41.5-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a75dafbf87d6276ddc5b2bf6fae5254e3d0876b626eb24969a574fff9149ee5d", size = 1920220, upload-time = "2025-11-04T13:42:08.457Z" }, - { url = "https://files.pythonhosted.org/packages/c7/97/492ab10f9ac8695cd76b2fdb24e9e61f394051df71594e9bcc891c9f586e/pydantic_core-2.41.5-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:7b93a4d08587e2b7e7882de461e82b6ed76d9026ce91ca7915e740ecc7855f60", size = 2067296, upload-time = "2025-11-04T13:42:10.817Z" }, - { url = "https://files.pythonhosted.org/packages/ec/23/984149650e5269c59a2a4c41d234a9570adc68ab29981825cfaf4cfad8f4/pydantic_core-2.41.5-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e8465ab91a4bd96d36dde3263f06caa6a8a6019e4113f24dc753d79a8b3a3f82", size = 2231548, upload-time = "2025-11-04T13:42:13.843Z" }, - { url = "https://files.pythonhosted.org/packages/71/0c/85bcbb885b9732c28bec67a222dbed5ed2d77baee1f8bba2002e8cd00c5c/pydantic_core-2.41.5-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:299e0a22e7ae2b85c1a57f104538b2656e8ab1873511fd718a1c1c6f149b77b5", size = 2362571, upload-time = "2025-11-04T13:42:16.208Z" }, - { url = "https://files.pythonhosted.org/packages/c0/4a/412d2048be12c334003e9b823a3fa3d038e46cc2d64dd8aab50b31b65499/pydantic_core-2.41.5-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:707625ef0983fcfb461acfaf14de2067c5942c6bb0f3b4c99158bed6fedd3cf3", size = 2068175, upload-time = "2025-11-04T13:42:18.911Z" }, - { url = "https://files.pythonhosted.org/packages/73/f4/c58b6a776b502d0a5540ad02e232514285513572060f0d78f7832ca3c98b/pydantic_core-2.41.5-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:f41eb9797986d6ebac5e8edff36d5cef9de40def462311b3eb3eeded1431e425", size = 2177203, upload-time = "2025-11-04T13:42:22.578Z" }, - { url = "https://files.pythonhosted.org/packages/ed/ae/f06ea4c7e7a9eead3d165e7623cd2ea0cb788e277e4f935af63fc98fa4e6/pydantic_core-2.41.5-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:0384e2e1021894b1ff5a786dbf94771e2986ebe2869533874d7e43bc79c6f504", size = 2148191, upload-time = "2025-11-04T13:42:24.89Z" }, - { url = "https://files.pythonhosted.org/packages/c1/57/25a11dcdc656bf5f8b05902c3c2934ac3ea296257cc4a3f79a6319e61856/pydantic_core-2.41.5-cp39-cp39-musllinux_1_1_armv7l.whl", hash = "sha256:f0cd744688278965817fd0839c4a4116add48d23890d468bc436f78beb28abf5", size = 2343907, upload-time = "2025-11-04T13:42:27.683Z" }, - { url = "https://files.pythonhosted.org/packages/96/82/e33d5f4933d7a03327c0c43c65d575e5919d4974ffc026bc917a5f7b9f61/pydantic_core-2.41.5-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:753e230374206729bf0a807954bcc6c150d3743928a73faffee51ac6557a03c3", size = 2322174, upload-time = "2025-11-04T13:42:30.776Z" }, - { url = "https://files.pythonhosted.org/packages/81/45/4091be67ce9f469e81656f880f3506f6a5624121ec5eb3eab37d7581897d/pydantic_core-2.41.5-cp39-cp39-win32.whl", hash = "sha256:873e0d5b4fb9b89ef7c2d2a963ea7d02879d9da0da8d9d4933dee8ee86a8b460", size = 1990353, upload-time = "2025-11-04T13:42:33.111Z" }, - { url = "https://files.pythonhosted.org/packages/44/8a/a98aede18db6e9cd5d66bcacd8a409fcf8134204cdede2e7de35c5a2c5ef/pydantic_core-2.41.5-cp39-cp39-win_amd64.whl", hash = "sha256:e4f4a984405e91527a0d62649ee21138f8e3d0ef103be488c1dc11a80d7f184b", size = 2015698, upload-time = "2025-11-04T13:42:35.484Z" }, { url = "https://files.pythonhosted.org/packages/11/72/90fda5ee3b97e51c494938a4a44c3a35a9c96c19bba12372fb9c634d6f57/pydantic_core-2.41.5-graalpy311-graalpy242_311_native-macosx_10_12_x86_64.whl", hash = "sha256:b96d5f26b05d03cc60f11a7761a5ded1741da411e7fe0909e27a5e6a0cb7b034", size = 2115441, upload-time = "2025-11-04T13:42:39.557Z" }, { url = "https://files.pythonhosted.org/packages/1f/53/8942f884fa33f50794f119012dc6a1a02ac43a56407adaac20463df8e98f/pydantic_core-2.41.5-graalpy311-graalpy242_311_native-macosx_11_0_arm64.whl", hash = "sha256:634e8609e89ceecea15e2d61bc9ac3718caaaa71963717bf3c8f38bfde64242c", size = 1930291, upload-time = "2025-11-04T13:42:42.169Z" }, { url = "https://files.pythonhosted.org/packages/79/c8/ecb9ed9cd942bce09fc888ee960b52654fbdbede4ba6c2d6e0d3b1d8b49c/pydantic_core-2.41.5-graalpy311-graalpy242_311_native-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:93e8740d7503eb008aa2df04d3b9735f845d43ae845e6dcd2be0b55a2da43cd2", size = 1948632, upload-time = "2025-11-04T13:42:44.564Z" }, @@ -632,7 +576,7 @@ dependencies = [ { name = "dill" }, { name = "isort" }, { name = "mccabe" }, - { name = "platformdirs", version = "4.5.1", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.10'" }, + { name = "platformdirs" }, { name = "tomli", marker = "python_full_version < '3.11'" }, { name = "tomlkit" }, ] @@ -641,6 +585,19 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/50/d3/d346f779cbc9384d8b805a7557b5f2b8ee9f842bffebec9fc6364d6ae183/pylint-3.2.3-py3-none-any.whl", hash = "sha256:b3d7d2708a3e04b4679e02d99e72329a8b7ee8afb8d04110682278781f889fa8", size = 519244, upload-time = "2024-06-06T14:19:13.228Z" }, ] +[[package]] +name = "pyright" +version = "1.1.408" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "nodeenv" }, + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/74/b2/5db700e52554b8f025faa9c3c624c59f1f6c8841ba81ab97641b54322f16/pyright-1.1.408.tar.gz", hash = "sha256:f28f2321f96852fa50b5829ea492f6adb0e6954568d1caa3f3af3a5f555eb684", size = 4400578, upload-time = "2026-01-08T08:07:38.795Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/0c/82/a2c93e32800940d9573fb28c346772a14778b84ba7524e691b324620ab89/pyright-1.1.408-py3-none-any.whl", hash = "sha256:090b32865f4fdb1e0e6cd82bf5618480d48eecd2eb2e70f960982a3d9a4c17c1", size = 6399144, upload-time = "2026-01-08T08:07:37.082Z" }, +] + [[package]] name = "pytest" version = "8.4.2" @@ -648,7 +605,7 @@ source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "colorama", marker = "sys_platform == 'win32'" }, { name = "exceptiongroup", marker = "python_full_version < '3.11'" }, - { name = "iniconfig", version = "2.3.0", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.10'" }, + { name = "iniconfig" }, { name = "packaging" }, { name = "pluggy" }, { name = "pygments" }, diff --git a/pyproject.toml b/pyproject.toml index 52aef0bb..2f58565d 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "mistralai" -version = "1.10.1" +version = "1.11.0" description = "Python Client SDK for the Mistral AI API." authors = [{ name = "Mistral" }] requires-python = ">=3.10" diff --git a/src/mistralai/_version.py b/src/mistralai/_version.py index c9b53294..8c26c0ce 100644 --- a/src/mistralai/_version.py +++ b/src/mistralai/_version.py @@ -3,10 +3,10 @@ import importlib.metadata __title__: str = "mistralai" -__version__: str = "1.10.1" +__version__: str = "1.11.0" __openapi_doc_version__: str = "1.0.0" -__gen_version__: str = "2.687.13" -__user_agent__: str = "speakeasy-sdk/python 1.10.1 2.687.13 1.0.0 mistralai" +__gen_version__: str = "2.794.1" +__user_agent__: str = "speakeasy-sdk/python 1.11.0 2.794.1 1.0.0 mistralai" try: if __package__ is not None: diff --git a/src/mistralai/accesses.py b/src/mistralai/accesses.py index dd8ffade..be02ee5b 100644 --- a/src/mistralai/accesses.py +++ b/src/mistralai/accesses.py @@ -3,6 +3,10 @@ from .basesdk import BaseSDK from mistralai import models, utils from mistralai._hooks import HookContext +from mistralai.models import ( + entitytype as models_entitytype, + shareenum as models_shareenum, +) from mistralai.types import OptionalNullable, UNSET from mistralai.utils import get_security_from_env from mistralai.utils.unmarshal_json_response import unmarshal_json_response @@ -58,6 +62,7 @@ def list( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -74,7 +79,7 @@ def list( config=self.sdk_configuration, base_url=base_url or "", operation_id="libraries_share_list_v1", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -147,6 +152,7 @@ async def list_async( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -163,7 +169,7 @@ async def list_async( config=self.sdk_configuration, base_url=base_url or "", operation_id="libraries_share_list_v1", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -194,9 +200,9 @@ def update_or_create( self, *, library_id: str, - level: models.ShareEnum, + level: models_shareenum.ShareEnum, share_with_uuid: str, - share_with_type: models.EntityType, + share_with_type: models_entitytype.EntityType, org_id: OptionalNullable[str] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, @@ -253,6 +259,7 @@ def update_or_create( get_serialized_body=lambda: utils.serialize_request_body( request.sharing_in, False, False, "json", models.SharingIn ), + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -269,7 +276,7 @@ def update_or_create( config=self.sdk_configuration, base_url=base_url or "", operation_id="libraries_share_create_v1", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -300,9 +307,9 @@ async def update_or_create_async( self, *, library_id: str, - level: models.ShareEnum, + level: models_shareenum.ShareEnum, share_with_uuid: str, - share_with_type: models.EntityType, + share_with_type: models_entitytype.EntityType, org_id: OptionalNullable[str] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, @@ -359,6 +366,7 @@ async def update_or_create_async( get_serialized_body=lambda: utils.serialize_request_body( request.sharing_in, False, False, "json", models.SharingIn ), + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -375,7 +383,7 @@ async def update_or_create_async( config=self.sdk_configuration, base_url=base_url or "", operation_id="libraries_share_create_v1", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -407,7 +415,7 @@ def delete( *, library_id: str, share_with_uuid: str, - share_with_type: models.EntityType, + share_with_type: models_entitytype.EntityType, org_id: OptionalNullable[str] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, @@ -462,6 +470,7 @@ def delete( get_serialized_body=lambda: utils.serialize_request_body( request.sharing_delete, False, False, "json", models.SharingDelete ), + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -478,7 +487,7 @@ def delete( config=self.sdk_configuration, base_url=base_url or "", operation_id="libraries_share_delete_v1", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -510,7 +519,7 @@ async def delete_async( *, library_id: str, share_with_uuid: str, - share_with_type: models.EntityType, + share_with_type: models_entitytype.EntityType, org_id: OptionalNullable[str] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, @@ -565,6 +574,7 @@ async def delete_async( get_serialized_body=lambda: utils.serialize_request_body( request.sharing_delete, False, False, "json", models.SharingDelete ), + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -581,7 +591,7 @@ async def delete_async( config=self.sdk_configuration, base_url=base_url or "", operation_id="libraries_share_delete_v1", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), diff --git a/src/mistralai/agents.py b/src/mistralai/agents.py index 173921fa..73e4ee3c 100644 --- a/src/mistralai/agents.py +++ b/src/mistralai/agents.py @@ -3,6 +3,14 @@ from .basesdk import BaseSDK from mistralai import models, utils from mistralai._hooks import HookContext +from mistralai.models import ( + agentscompletionrequest as models_agentscompletionrequest, + agentscompletionstreamrequest as models_agentscompletionstreamrequest, + mistralpromptmode as models_mistralpromptmode, + prediction as models_prediction, + responseformat as models_responseformat, + tool as models_tool, +) from mistralai.types import OptionalNullable, UNSET from mistralai.utils import eventstreaming, get_security_from_env from mistralai.utils.unmarshal_json_response import unmarshal_json_response @@ -16,40 +24,47 @@ def complete( self, *, messages: Union[ - List[models.AgentsCompletionRequestMessages], - List[models.AgentsCompletionRequestMessagesTypedDict], + List[models_agentscompletionrequest.AgentsCompletionRequestMessages], + List[ + models_agentscompletionrequest.AgentsCompletionRequestMessagesTypedDict + ], ], agent_id: str, max_tokens: OptionalNullable[int] = UNSET, stream: Optional[bool] = False, stop: Optional[ Union[ - models.AgentsCompletionRequestStop, - models.AgentsCompletionRequestStopTypedDict, + models_agentscompletionrequest.AgentsCompletionRequestStop, + models_agentscompletionrequest.AgentsCompletionRequestStopTypedDict, ] ] = None, random_seed: OptionalNullable[int] = UNSET, metadata: OptionalNullable[Dict[str, Any]] = UNSET, response_format: Optional[ - Union[models.ResponseFormat, models.ResponseFormatTypedDict] + Union[ + models_responseformat.ResponseFormat, + models_responseformat.ResponseFormatTypedDict, + ] ] = None, tools: OptionalNullable[ - Union[List[models.Tool], List[models.ToolTypedDict]] + Union[List[models_tool.Tool], List[models_tool.ToolTypedDict]] ] = UNSET, tool_choice: Optional[ Union[ - models.AgentsCompletionRequestToolChoice, - models.AgentsCompletionRequestToolChoiceTypedDict, + models_agentscompletionrequest.AgentsCompletionRequestToolChoice, + models_agentscompletionrequest.AgentsCompletionRequestToolChoiceTypedDict, ] ] = None, presence_penalty: Optional[float] = None, frequency_penalty: Optional[float] = None, n: OptionalNullable[int] = UNSET, prediction: Optional[ - Union[models.Prediction, models.PredictionTypedDict] + Union[models_prediction.Prediction, models_prediction.PredictionTypedDict] ] = None, parallel_tool_calls: Optional[bool] = None, - prompt_mode: OptionalNullable[models.MistralPromptMode] = UNSET, + prompt_mode: OptionalNullable[ + models_mistralpromptmode.MistralPromptMode + ] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -131,6 +146,7 @@ def complete( get_serialized_body=lambda: utils.serialize_request_body( request, False, False, "json", models.AgentsCompletionRequest ), + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -147,7 +163,7 @@ def complete( config=self.sdk_configuration, base_url=base_url or "", operation_id="agents_completion_v1_agents_completions_post", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -178,40 +194,47 @@ async def complete_async( self, *, messages: Union[ - List[models.AgentsCompletionRequestMessages], - List[models.AgentsCompletionRequestMessagesTypedDict], + List[models_agentscompletionrequest.AgentsCompletionRequestMessages], + List[ + models_agentscompletionrequest.AgentsCompletionRequestMessagesTypedDict + ], ], agent_id: str, max_tokens: OptionalNullable[int] = UNSET, stream: Optional[bool] = False, stop: Optional[ Union[ - models.AgentsCompletionRequestStop, - models.AgentsCompletionRequestStopTypedDict, + models_agentscompletionrequest.AgentsCompletionRequestStop, + models_agentscompletionrequest.AgentsCompletionRequestStopTypedDict, ] ] = None, random_seed: OptionalNullable[int] = UNSET, metadata: OptionalNullable[Dict[str, Any]] = UNSET, response_format: Optional[ - Union[models.ResponseFormat, models.ResponseFormatTypedDict] + Union[ + models_responseformat.ResponseFormat, + models_responseformat.ResponseFormatTypedDict, + ] ] = None, tools: OptionalNullable[ - Union[List[models.Tool], List[models.ToolTypedDict]] + Union[List[models_tool.Tool], List[models_tool.ToolTypedDict]] ] = UNSET, tool_choice: Optional[ Union[ - models.AgentsCompletionRequestToolChoice, - models.AgentsCompletionRequestToolChoiceTypedDict, + models_agentscompletionrequest.AgentsCompletionRequestToolChoice, + models_agentscompletionrequest.AgentsCompletionRequestToolChoiceTypedDict, ] ] = None, presence_penalty: Optional[float] = None, frequency_penalty: Optional[float] = None, n: OptionalNullable[int] = UNSET, prediction: Optional[ - Union[models.Prediction, models.PredictionTypedDict] + Union[models_prediction.Prediction, models_prediction.PredictionTypedDict] ] = None, parallel_tool_calls: Optional[bool] = None, - prompt_mode: OptionalNullable[models.MistralPromptMode] = UNSET, + prompt_mode: OptionalNullable[ + models_mistralpromptmode.MistralPromptMode + ] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -293,6 +316,7 @@ async def complete_async( get_serialized_body=lambda: utils.serialize_request_body( request, False, False, "json", models.AgentsCompletionRequest ), + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -309,7 +333,7 @@ async def complete_async( config=self.sdk_configuration, base_url=base_url or "", operation_id="agents_completion_v1_agents_completions_post", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -340,40 +364,49 @@ def stream( self, *, messages: Union[ - List[models.AgentsCompletionStreamRequestMessages], - List[models.AgentsCompletionStreamRequestMessagesTypedDict], + List[ + models_agentscompletionstreamrequest.AgentsCompletionStreamRequestMessages + ], + List[ + models_agentscompletionstreamrequest.AgentsCompletionStreamRequestMessagesTypedDict + ], ], agent_id: str, max_tokens: OptionalNullable[int] = UNSET, stream: Optional[bool] = True, stop: Optional[ Union[ - models.AgentsCompletionStreamRequestStop, - models.AgentsCompletionStreamRequestStopTypedDict, + models_agentscompletionstreamrequest.AgentsCompletionStreamRequestStop, + models_agentscompletionstreamrequest.AgentsCompletionStreamRequestStopTypedDict, ] ] = None, random_seed: OptionalNullable[int] = UNSET, metadata: OptionalNullable[Dict[str, Any]] = UNSET, response_format: Optional[ - Union[models.ResponseFormat, models.ResponseFormatTypedDict] + Union[ + models_responseformat.ResponseFormat, + models_responseformat.ResponseFormatTypedDict, + ] ] = None, tools: OptionalNullable[ - Union[List[models.Tool], List[models.ToolTypedDict]] + Union[List[models_tool.Tool], List[models_tool.ToolTypedDict]] ] = UNSET, tool_choice: Optional[ Union[ - models.AgentsCompletionStreamRequestToolChoice, - models.AgentsCompletionStreamRequestToolChoiceTypedDict, + models_agentscompletionstreamrequest.AgentsCompletionStreamRequestToolChoice, + models_agentscompletionstreamrequest.AgentsCompletionStreamRequestToolChoiceTypedDict, ] ] = None, presence_penalty: Optional[float] = None, frequency_penalty: Optional[float] = None, n: OptionalNullable[int] = UNSET, prediction: Optional[ - Union[models.Prediction, models.PredictionTypedDict] + Union[models_prediction.Prediction, models_prediction.PredictionTypedDict] ] = None, parallel_tool_calls: Optional[bool] = None, - prompt_mode: OptionalNullable[models.MistralPromptMode] = UNSET, + prompt_mode: OptionalNullable[ + models_mistralpromptmode.MistralPromptMode + ] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -457,6 +490,7 @@ def stream( get_serialized_body=lambda: utils.serialize_request_body( request, False, False, "json", models.AgentsCompletionStreamRequest ), + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -473,7 +507,7 @@ def stream( config=self.sdk_configuration, base_url=base_url or "", operation_id="stream_agents", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -512,40 +546,49 @@ async def stream_async( self, *, messages: Union[ - List[models.AgentsCompletionStreamRequestMessages], - List[models.AgentsCompletionStreamRequestMessagesTypedDict], + List[ + models_agentscompletionstreamrequest.AgentsCompletionStreamRequestMessages + ], + List[ + models_agentscompletionstreamrequest.AgentsCompletionStreamRequestMessagesTypedDict + ], ], agent_id: str, max_tokens: OptionalNullable[int] = UNSET, stream: Optional[bool] = True, stop: Optional[ Union[ - models.AgentsCompletionStreamRequestStop, - models.AgentsCompletionStreamRequestStopTypedDict, + models_agentscompletionstreamrequest.AgentsCompletionStreamRequestStop, + models_agentscompletionstreamrequest.AgentsCompletionStreamRequestStopTypedDict, ] ] = None, random_seed: OptionalNullable[int] = UNSET, metadata: OptionalNullable[Dict[str, Any]] = UNSET, response_format: Optional[ - Union[models.ResponseFormat, models.ResponseFormatTypedDict] + Union[ + models_responseformat.ResponseFormat, + models_responseformat.ResponseFormatTypedDict, + ] ] = None, tools: OptionalNullable[ - Union[List[models.Tool], List[models.ToolTypedDict]] + Union[List[models_tool.Tool], List[models_tool.ToolTypedDict]] ] = UNSET, tool_choice: Optional[ Union[ - models.AgentsCompletionStreamRequestToolChoice, - models.AgentsCompletionStreamRequestToolChoiceTypedDict, + models_agentscompletionstreamrequest.AgentsCompletionStreamRequestToolChoice, + models_agentscompletionstreamrequest.AgentsCompletionStreamRequestToolChoiceTypedDict, ] ] = None, presence_penalty: Optional[float] = None, frequency_penalty: Optional[float] = None, n: OptionalNullable[int] = UNSET, prediction: Optional[ - Union[models.Prediction, models.PredictionTypedDict] + Union[models_prediction.Prediction, models_prediction.PredictionTypedDict] ] = None, parallel_tool_calls: Optional[bool] = None, - prompt_mode: OptionalNullable[models.MistralPromptMode] = UNSET, + prompt_mode: OptionalNullable[ + models_mistralpromptmode.MistralPromptMode + ] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -629,6 +672,7 @@ async def stream_async( get_serialized_body=lambda: utils.serialize_request_body( request, False, False, "json", models.AgentsCompletionStreamRequest ), + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -645,7 +689,7 @@ async def stream_async( config=self.sdk_configuration, base_url=base_url or "", operation_id="stream_agents", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), diff --git a/src/mistralai/basesdk.py b/src/mistralai/basesdk.py index 6b62ddae..c9a32aa1 100644 --- a/src/mistralai/basesdk.py +++ b/src/mistralai/basesdk.py @@ -60,6 +60,7 @@ def _build_request_async( ] = None, url_override: Optional[str] = None, http_headers: Optional[Mapping[str, str]] = None, + allow_empty_value: Optional[List[str]] = None, ) -> httpx.Request: client = self.sdk_configuration.async_client return self._build_request_with_client( @@ -80,6 +81,7 @@ def _build_request_async( get_serialized_body, url_override, http_headers, + allow_empty_value, ) def _build_request( @@ -102,6 +104,7 @@ def _build_request( ] = None, url_override: Optional[str] = None, http_headers: Optional[Mapping[str, str]] = None, + allow_empty_value: Optional[List[str]] = None, ) -> httpx.Request: client = self.sdk_configuration.client return self._build_request_with_client( @@ -122,6 +125,7 @@ def _build_request( get_serialized_body, url_override, http_headers, + allow_empty_value, ) def _build_request_with_client( @@ -145,6 +149,7 @@ def _build_request_with_client( ] = None, url_override: Optional[str] = None, http_headers: Optional[Mapping[str, str]] = None, + allow_empty_value: Optional[List[str]] = None, ) -> httpx.Request: query_params = {} @@ -160,6 +165,7 @@ def _build_request_with_client( query_params = utils.get_query_params( request if request_has_query_params else None, _globals if request_has_query_params else None, + allow_empty_value, ) else: # Pick up the query parameter from the override so they can be diff --git a/src/mistralai/chat.py b/src/mistralai/chat.py index 6a8058f7..1528c4c9 100644 --- a/src/mistralai/chat.py +++ b/src/mistralai/chat.py @@ -3,6 +3,14 @@ from .basesdk import BaseSDK from mistralai import models, utils from mistralai._hooks import HookContext +from mistralai.models import ( + chatcompletionrequest as models_chatcompletionrequest, + chatcompletionstreamrequest as models_chatcompletionstreamrequest, + mistralpromptmode as models_mistralpromptmode, + prediction as models_prediction, + responseformat as models_responseformat, + tool as models_tool, +) from mistralai.types import OptionalNullable, UNSET from mistralai.utils import eventstreaming, get_security_from_env from mistralai.utils.unmarshal_json_response import unmarshal_json_response @@ -98,34 +106,47 @@ def complete( self, *, model: str, - messages: Union[List[models.Messages], List[models.MessagesTypedDict]], + messages: Union[ + List[models_chatcompletionrequest.Messages], + List[models_chatcompletionrequest.MessagesTypedDict], + ], temperature: OptionalNullable[float] = UNSET, top_p: Optional[float] = None, max_tokens: OptionalNullable[int] = UNSET, stream: Optional[bool] = False, - stop: Optional[Union[models.Stop, models.StopTypedDict]] = None, + stop: Optional[ + Union[ + models_chatcompletionrequest.Stop, + models_chatcompletionrequest.StopTypedDict, + ] + ] = None, random_seed: OptionalNullable[int] = UNSET, metadata: OptionalNullable[Dict[str, Any]] = UNSET, response_format: Optional[ - Union[models.ResponseFormat, models.ResponseFormatTypedDict] + Union[ + models_responseformat.ResponseFormat, + models_responseformat.ResponseFormatTypedDict, + ] ] = None, tools: OptionalNullable[ - Union[List[models.Tool], List[models.ToolTypedDict]] + Union[List[models_tool.Tool], List[models_tool.ToolTypedDict]] ] = UNSET, tool_choice: Optional[ Union[ - models.ChatCompletionRequestToolChoice, - models.ChatCompletionRequestToolChoiceTypedDict, + models_chatcompletionrequest.ChatCompletionRequestToolChoice, + models_chatcompletionrequest.ChatCompletionRequestToolChoiceTypedDict, ] ] = None, presence_penalty: Optional[float] = None, frequency_penalty: Optional[float] = None, n: OptionalNullable[int] = UNSET, prediction: Optional[ - Union[models.Prediction, models.PredictionTypedDict] + Union[models_prediction.Prediction, models_prediction.PredictionTypedDict] ] = None, parallel_tool_calls: Optional[bool] = None, - prompt_mode: OptionalNullable[models.MistralPromptMode] = UNSET, + prompt_mode: OptionalNullable[ + models_mistralpromptmode.MistralPromptMode + ] = UNSET, safe_prompt: Optional[bool] = None, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, @@ -212,6 +233,7 @@ def complete( get_serialized_body=lambda: utils.serialize_request_body( request, False, False, "json", models.ChatCompletionRequest ), + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -228,7 +250,7 @@ def complete( config=self.sdk_configuration, base_url=base_url or "", operation_id="chat_completion_v1_chat_completions_post", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -259,34 +281,47 @@ async def complete_async( self, *, model: str, - messages: Union[List[models.Messages], List[models.MessagesTypedDict]], + messages: Union[ + List[models_chatcompletionrequest.Messages], + List[models_chatcompletionrequest.MessagesTypedDict], + ], temperature: OptionalNullable[float] = UNSET, top_p: Optional[float] = None, max_tokens: OptionalNullable[int] = UNSET, stream: Optional[bool] = False, - stop: Optional[Union[models.Stop, models.StopTypedDict]] = None, + stop: Optional[ + Union[ + models_chatcompletionrequest.Stop, + models_chatcompletionrequest.StopTypedDict, + ] + ] = None, random_seed: OptionalNullable[int] = UNSET, metadata: OptionalNullable[Dict[str, Any]] = UNSET, response_format: Optional[ - Union[models.ResponseFormat, models.ResponseFormatTypedDict] + Union[ + models_responseformat.ResponseFormat, + models_responseformat.ResponseFormatTypedDict, + ] ] = None, tools: OptionalNullable[ - Union[List[models.Tool], List[models.ToolTypedDict]] + Union[List[models_tool.Tool], List[models_tool.ToolTypedDict]] ] = UNSET, tool_choice: Optional[ Union[ - models.ChatCompletionRequestToolChoice, - models.ChatCompletionRequestToolChoiceTypedDict, + models_chatcompletionrequest.ChatCompletionRequestToolChoice, + models_chatcompletionrequest.ChatCompletionRequestToolChoiceTypedDict, ] ] = None, presence_penalty: Optional[float] = None, frequency_penalty: Optional[float] = None, n: OptionalNullable[int] = UNSET, prediction: Optional[ - Union[models.Prediction, models.PredictionTypedDict] + Union[models_prediction.Prediction, models_prediction.PredictionTypedDict] ] = None, parallel_tool_calls: Optional[bool] = None, - prompt_mode: OptionalNullable[models.MistralPromptMode] = UNSET, + prompt_mode: OptionalNullable[ + models_mistralpromptmode.MistralPromptMode + ] = UNSET, safe_prompt: Optional[bool] = None, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, @@ -373,6 +408,7 @@ async def complete_async( get_serialized_body=lambda: utils.serialize_request_body( request, False, False, "json", models.ChatCompletionRequest ), + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -389,7 +425,7 @@ async def complete_async( config=self.sdk_configuration, base_url=base_url or "", operation_id="chat_completion_v1_chat_completions_post", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -421,8 +457,12 @@ def stream( *, model: str, messages: Union[ - List[models.ChatCompletionStreamRequestMessages], - List[models.ChatCompletionStreamRequestMessagesTypedDict], + List[ + models_chatcompletionstreamrequest.ChatCompletionStreamRequestMessages + ], + List[ + models_chatcompletionstreamrequest.ChatCompletionStreamRequestMessagesTypedDict + ], ], temperature: OptionalNullable[float] = UNSET, top_p: Optional[float] = None, @@ -430,32 +470,37 @@ def stream( stream: Optional[bool] = True, stop: Optional[ Union[ - models.ChatCompletionStreamRequestStop, - models.ChatCompletionStreamRequestStopTypedDict, + models_chatcompletionstreamrequest.ChatCompletionStreamRequestStop, + models_chatcompletionstreamrequest.ChatCompletionStreamRequestStopTypedDict, ] ] = None, random_seed: OptionalNullable[int] = UNSET, metadata: OptionalNullable[Dict[str, Any]] = UNSET, response_format: Optional[ - Union[models.ResponseFormat, models.ResponseFormatTypedDict] + Union[ + models_responseformat.ResponseFormat, + models_responseformat.ResponseFormatTypedDict, + ] ] = None, tools: OptionalNullable[ - Union[List[models.Tool], List[models.ToolTypedDict]] + Union[List[models_tool.Tool], List[models_tool.ToolTypedDict]] ] = UNSET, tool_choice: Optional[ Union[ - models.ChatCompletionStreamRequestToolChoice, - models.ChatCompletionStreamRequestToolChoiceTypedDict, + models_chatcompletionstreamrequest.ChatCompletionStreamRequestToolChoice, + models_chatcompletionstreamrequest.ChatCompletionStreamRequestToolChoiceTypedDict, ] ] = None, presence_penalty: Optional[float] = None, frequency_penalty: Optional[float] = None, n: OptionalNullable[int] = UNSET, prediction: Optional[ - Union[models.Prediction, models.PredictionTypedDict] + Union[models_prediction.Prediction, models_prediction.PredictionTypedDict] ] = None, parallel_tool_calls: Optional[bool] = None, - prompt_mode: OptionalNullable[models.MistralPromptMode] = UNSET, + prompt_mode: OptionalNullable[ + models_mistralpromptmode.MistralPromptMode + ] = UNSET, safe_prompt: Optional[bool] = None, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, @@ -546,6 +591,7 @@ def stream( get_serialized_body=lambda: utils.serialize_request_body( request, False, False, "json", models.ChatCompletionStreamRequest ), + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -562,7 +608,7 @@ def stream( config=self.sdk_configuration, base_url=base_url or "", operation_id="stream_chat", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -602,8 +648,12 @@ async def stream_async( *, model: str, messages: Union[ - List[models.ChatCompletionStreamRequestMessages], - List[models.ChatCompletionStreamRequestMessagesTypedDict], + List[ + models_chatcompletionstreamrequest.ChatCompletionStreamRequestMessages + ], + List[ + models_chatcompletionstreamrequest.ChatCompletionStreamRequestMessagesTypedDict + ], ], temperature: OptionalNullable[float] = UNSET, top_p: Optional[float] = None, @@ -611,32 +661,37 @@ async def stream_async( stream: Optional[bool] = True, stop: Optional[ Union[ - models.ChatCompletionStreamRequestStop, - models.ChatCompletionStreamRequestStopTypedDict, + models_chatcompletionstreamrequest.ChatCompletionStreamRequestStop, + models_chatcompletionstreamrequest.ChatCompletionStreamRequestStopTypedDict, ] ] = None, random_seed: OptionalNullable[int] = UNSET, metadata: OptionalNullable[Dict[str, Any]] = UNSET, response_format: Optional[ - Union[models.ResponseFormat, models.ResponseFormatTypedDict] + Union[ + models_responseformat.ResponseFormat, + models_responseformat.ResponseFormatTypedDict, + ] ] = None, tools: OptionalNullable[ - Union[List[models.Tool], List[models.ToolTypedDict]] + Union[List[models_tool.Tool], List[models_tool.ToolTypedDict]] ] = UNSET, tool_choice: Optional[ Union[ - models.ChatCompletionStreamRequestToolChoice, - models.ChatCompletionStreamRequestToolChoiceTypedDict, + models_chatcompletionstreamrequest.ChatCompletionStreamRequestToolChoice, + models_chatcompletionstreamrequest.ChatCompletionStreamRequestToolChoiceTypedDict, ] ] = None, presence_penalty: Optional[float] = None, frequency_penalty: Optional[float] = None, n: OptionalNullable[int] = UNSET, prediction: Optional[ - Union[models.Prediction, models.PredictionTypedDict] + Union[models_prediction.Prediction, models_prediction.PredictionTypedDict] ] = None, parallel_tool_calls: Optional[bool] = None, - prompt_mode: OptionalNullable[models.MistralPromptMode] = UNSET, + prompt_mode: OptionalNullable[ + models_mistralpromptmode.MistralPromptMode + ] = UNSET, safe_prompt: Optional[bool] = None, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, @@ -727,6 +782,7 @@ async def stream_async( get_serialized_body=lambda: utils.serialize_request_body( request, False, False, "json", models.ChatCompletionStreamRequest ), + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -743,7 +799,7 @@ async def stream_async( config=self.sdk_configuration, base_url=base_url or "", operation_id="stream_chat", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), diff --git a/src/mistralai/classifiers.py b/src/mistralai/classifiers.py index a6b87940..7c32506e 100644 --- a/src/mistralai/classifiers.py +++ b/src/mistralai/classifiers.py @@ -3,6 +3,11 @@ from .basesdk import BaseSDK from mistralai import models, utils from mistralai._hooks import HookContext +from mistralai.models import ( + chatmoderationrequest as models_chatmoderationrequest, + classificationrequest as models_classificationrequest, + inputs as models_inputs, +) from mistralai.types import OptionalNullable, UNSET from mistralai.utils import get_security_from_env from mistralai.utils.unmarshal_json_response import unmarshal_json_response @@ -17,8 +22,8 @@ def moderate( *, model: str, inputs: Union[ - models.ClassificationRequestInputs, - models.ClassificationRequestInputsTypedDict, + models_classificationrequest.ClassificationRequestInputs, + models_classificationrequest.ClassificationRequestInputsTypedDict, ], metadata: OptionalNullable[Dict[str, Any]] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, @@ -68,6 +73,7 @@ def moderate( get_serialized_body=lambda: utils.serialize_request_body( request, False, False, "json", models.ClassificationRequest ), + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -84,7 +90,7 @@ def moderate( config=self.sdk_configuration, base_url=base_url or "", operation_id="moderations_v1_moderations_post", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -116,8 +122,8 @@ async def moderate_async( *, model: str, inputs: Union[ - models.ClassificationRequestInputs, - models.ClassificationRequestInputsTypedDict, + models_classificationrequest.ClassificationRequestInputs, + models_classificationrequest.ClassificationRequestInputsTypedDict, ], metadata: OptionalNullable[Dict[str, Any]] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, @@ -167,6 +173,7 @@ async def moderate_async( get_serialized_body=lambda: utils.serialize_request_body( request, False, False, "json", models.ClassificationRequest ), + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -183,7 +190,7 @@ async def moderate_async( config=self.sdk_configuration, base_url=base_url or "", operation_id="moderations_v1_moderations_post", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -214,8 +221,8 @@ def moderate_chat( self, *, inputs: Union[ - models.ChatModerationRequestInputs, - models.ChatModerationRequestInputsTypedDict, + models_chatmoderationrequest.ChatModerationRequestInputs, + models_chatmoderationrequest.ChatModerationRequestInputsTypedDict, ], model: str, retries: OptionalNullable[utils.RetryConfig] = UNSET, @@ -263,6 +270,7 @@ def moderate_chat( get_serialized_body=lambda: utils.serialize_request_body( request, False, False, "json", models.ChatModerationRequest ), + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -279,7 +287,7 @@ def moderate_chat( config=self.sdk_configuration, base_url=base_url or "", operation_id="chat_moderations_v1_chat_moderations_post", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -310,8 +318,8 @@ async def moderate_chat_async( self, *, inputs: Union[ - models.ChatModerationRequestInputs, - models.ChatModerationRequestInputsTypedDict, + models_chatmoderationrequest.ChatModerationRequestInputs, + models_chatmoderationrequest.ChatModerationRequestInputsTypedDict, ], model: str, retries: OptionalNullable[utils.RetryConfig] = UNSET, @@ -359,6 +367,7 @@ async def moderate_chat_async( get_serialized_body=lambda: utils.serialize_request_body( request, False, False, "json", models.ChatModerationRequest ), + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -375,7 +384,7 @@ async def moderate_chat_async( config=self.sdk_configuration, base_url=base_url or "", operation_id="chat_moderations_v1_chat_moderations_post", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -407,8 +416,8 @@ def classify( *, model: str, inputs: Union[ - models.ClassificationRequestInputs, - models.ClassificationRequestInputsTypedDict, + models_classificationrequest.ClassificationRequestInputs, + models_classificationrequest.ClassificationRequestInputsTypedDict, ], metadata: OptionalNullable[Dict[str, Any]] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, @@ -458,6 +467,7 @@ def classify( get_serialized_body=lambda: utils.serialize_request_body( request, False, False, "json", models.ClassificationRequest ), + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -474,7 +484,7 @@ def classify( config=self.sdk_configuration, base_url=base_url or "", operation_id="classifications_v1_classifications_post", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -506,8 +516,8 @@ async def classify_async( *, model: str, inputs: Union[ - models.ClassificationRequestInputs, - models.ClassificationRequestInputsTypedDict, + models_classificationrequest.ClassificationRequestInputs, + models_classificationrequest.ClassificationRequestInputsTypedDict, ], metadata: OptionalNullable[Dict[str, Any]] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, @@ -557,6 +567,7 @@ async def classify_async( get_serialized_body=lambda: utils.serialize_request_body( request, False, False, "json", models.ClassificationRequest ), + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -573,7 +584,7 @@ async def classify_async( config=self.sdk_configuration, base_url=base_url or "", operation_id="classifications_v1_classifications_post", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -604,7 +615,7 @@ def classify_chat( self, *, model: str, - inputs: Union[models.Inputs, models.InputsTypedDict], + inputs: Union[models_inputs.Inputs, models_inputs.InputsTypedDict], retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -650,6 +661,7 @@ def classify_chat( get_serialized_body=lambda: utils.serialize_request_body( request, False, False, "json", models.ChatClassificationRequest ), + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -666,7 +678,7 @@ def classify_chat( config=self.sdk_configuration, base_url=base_url or "", operation_id="chat_classifications_v1_chat_classifications_post", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -697,7 +709,7 @@ async def classify_chat_async( self, *, model: str, - inputs: Union[models.Inputs, models.InputsTypedDict], + inputs: Union[models_inputs.Inputs, models_inputs.InputsTypedDict], retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -743,6 +755,7 @@ async def classify_chat_async( get_serialized_body=lambda: utils.serialize_request_body( request, False, False, "json", models.ChatClassificationRequest ), + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -759,7 +772,7 @@ async def classify_chat_async( config=self.sdk_configuration, base_url=base_url or "", operation_id="chat_classifications_v1_chat_classifications_post", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), diff --git a/src/mistralai/conversations.py b/src/mistralai/conversations.py index a7d58abd..93ed8c28 100644 --- a/src/mistralai/conversations.py +++ b/src/mistralai/conversations.py @@ -3,6 +3,16 @@ from .basesdk import BaseSDK from mistralai import models, utils from mistralai._hooks import HookContext +from mistralai.models import ( + completionargs as models_completionargs, + conversationappendrequest as models_conversationappendrequest, + conversationappendstreamrequest as models_conversationappendstreamrequest, + conversationinputs as models_conversationinputs, + conversationrequest as models_conversationrequest, + conversationrestartrequest as models_conversationrestartrequest, + conversationrestartstreamrequest as models_conversationrestartstreamrequest, + conversationstreamrequest as models_conversationstreamrequest, +) from mistralai.types import OptionalNullable, UNSET from mistralai.utils import eventstreaming, get_security_from_env from mistralai.utils.unmarshal_json_response import unmarshal_json_response @@ -223,14 +233,27 @@ async def run_generator() -> ( def start( self, *, - inputs: Union[models.ConversationInputs, models.ConversationInputsTypedDict], + inputs: Union[ + models_conversationinputs.ConversationInputs, + models_conversationinputs.ConversationInputsTypedDict, + ], stream: Optional[bool] = False, store: OptionalNullable[bool] = UNSET, - handoff_execution: OptionalNullable[models.HandoffExecution] = UNSET, + handoff_execution: OptionalNullable[ + models_conversationrequest.HandoffExecution + ] = UNSET, instructions: OptionalNullable[str] = UNSET, - tools: Optional[Union[List[models.Tools], List[models.ToolsTypedDict]]] = None, + tools: Optional[ + Union[ + List[models_conversationrequest.Tools], + List[models_conversationrequest.ToolsTypedDict], + ] + ] = None, completion_args: OptionalNullable[ - Union[models.CompletionArgs, models.CompletionArgsTypedDict] + Union[ + models_completionargs.CompletionArgs, + models_completionargs.CompletionArgsTypedDict, + ] ] = UNSET, name: OptionalNullable[str] = UNSET, description: OptionalNullable[str] = UNSET, @@ -309,6 +332,7 @@ def start( get_serialized_body=lambda: utils.serialize_request_body( request, False, False, "json", models.ConversationRequest ), + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -325,7 +349,7 @@ def start( config=self.sdk_configuration, base_url=base_url or "", operation_id="agents_api_v1_conversations_start", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -355,14 +379,27 @@ def start( async def start_async( self, *, - inputs: Union[models.ConversationInputs, models.ConversationInputsTypedDict], + inputs: Union[ + models_conversationinputs.ConversationInputs, + models_conversationinputs.ConversationInputsTypedDict, + ], stream: Optional[bool] = False, store: OptionalNullable[bool] = UNSET, - handoff_execution: OptionalNullable[models.HandoffExecution] = UNSET, + handoff_execution: OptionalNullable[ + models_conversationrequest.HandoffExecution + ] = UNSET, instructions: OptionalNullable[str] = UNSET, - tools: Optional[Union[List[models.Tools], List[models.ToolsTypedDict]]] = None, + tools: Optional[ + Union[ + List[models_conversationrequest.Tools], + List[models_conversationrequest.ToolsTypedDict], + ] + ] = None, completion_args: OptionalNullable[ - Union[models.CompletionArgs, models.CompletionArgsTypedDict] + Union[ + models_completionargs.CompletionArgs, + models_completionargs.CompletionArgsTypedDict, + ] ] = UNSET, name: OptionalNullable[str] = UNSET, description: OptionalNullable[str] = UNSET, @@ -441,6 +478,7 @@ async def start_async( get_serialized_body=lambda: utils.serialize_request_body( request, False, False, "json", models.ConversationRequest ), + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -457,7 +495,7 @@ async def start_async( config=self.sdk_configuration, base_url=base_url or "", operation_id="agents_api_v1_conversations_start", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -536,6 +574,7 @@ def list( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -552,7 +591,7 @@ def list( config=self.sdk_configuration, base_url=base_url or "", operation_id="agents_api_v1_conversations_list", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -631,6 +670,7 @@ async def list_async( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -647,7 +687,7 @@ async def list_async( config=self.sdk_configuration, base_url=base_url or "", operation_id="agents_api_v1_conversations_list", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -720,6 +760,7 @@ def get( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -736,7 +777,7 @@ def get( config=self.sdk_configuration, base_url=base_url or "", operation_id="agents_api_v1_conversations_get", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -811,6 +852,7 @@ async def get_async( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -827,7 +869,7 @@ async def get_async( config=self.sdk_configuration, base_url=base_url or "", operation_id="agents_api_v1_conversations_get", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -902,6 +944,7 @@ def delete( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -918,7 +961,7 @@ def delete( config=self.sdk_configuration, base_url=base_url or "", operation_id="agents_api_v1_conversations_delete", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -991,6 +1034,7 @@ async def delete_async( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -1007,7 +1051,7 @@ async def delete_async( config=self.sdk_configuration, base_url=base_url or "", operation_id="agents_api_v1_conversations_delete", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -1038,14 +1082,20 @@ def append( self, *, conversation_id: str, - inputs: Union[models.ConversationInputs, models.ConversationInputsTypedDict], + inputs: Union[ + models_conversationinputs.ConversationInputs, + models_conversationinputs.ConversationInputsTypedDict, + ], stream: Optional[bool] = False, store: Optional[bool] = True, handoff_execution: Optional[ - models.ConversationAppendRequestHandoffExecution + models_conversationappendrequest.ConversationAppendRequestHandoffExecution ] = "server", completion_args: Optional[ - Union[models.CompletionArgs, models.CompletionArgsTypedDict] + Union[ + models_completionargs.CompletionArgs, + models_completionargs.CompletionArgsTypedDict, + ] ] = None, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, @@ -1110,6 +1160,7 @@ def append( "json", models.ConversationAppendRequest, ), + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -1126,7 +1177,7 @@ def append( config=self.sdk_configuration, base_url=base_url or "", operation_id="agents_api_v1_conversations_append", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -1157,14 +1208,20 @@ async def append_async( self, *, conversation_id: str, - inputs: Union[models.ConversationInputs, models.ConversationInputsTypedDict], + inputs: Union[ + models_conversationinputs.ConversationInputs, + models_conversationinputs.ConversationInputsTypedDict, + ], stream: Optional[bool] = False, store: Optional[bool] = True, handoff_execution: Optional[ - models.ConversationAppendRequestHandoffExecution + models_conversationappendrequest.ConversationAppendRequestHandoffExecution ] = "server", completion_args: Optional[ - Union[models.CompletionArgs, models.CompletionArgsTypedDict] + Union[ + models_completionargs.CompletionArgs, + models_completionargs.CompletionArgsTypedDict, + ] ] = None, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, @@ -1229,6 +1286,7 @@ async def append_async( "json", models.ConversationAppendRequest, ), + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -1245,7 +1303,7 @@ async def append_async( config=self.sdk_configuration, base_url=base_url or "", operation_id="agents_api_v1_conversations_append", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -1318,6 +1376,7 @@ def get_history( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -1334,7 +1393,7 @@ def get_history( config=self.sdk_configuration, base_url=base_url or "", operation_id="agents_api_v1_conversations_history", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -1407,6 +1466,7 @@ async def get_history_async( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -1423,7 +1483,7 @@ async def get_history_async( config=self.sdk_configuration, base_url=base_url or "", operation_id="agents_api_v1_conversations_history", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -1496,6 +1556,7 @@ def get_messages( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -1512,7 +1573,7 @@ def get_messages( config=self.sdk_configuration, base_url=base_url or "", operation_id="agents_api_v1_conversations_messages", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -1585,6 +1646,7 @@ async def get_messages_async( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -1601,7 +1663,7 @@ async def get_messages_async( config=self.sdk_configuration, base_url=base_url or "", operation_id="agents_api_v1_conversations_messages", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -1632,15 +1694,21 @@ def restart( self, *, conversation_id: str, - inputs: Union[models.ConversationInputs, models.ConversationInputsTypedDict], + inputs: Union[ + models_conversationinputs.ConversationInputs, + models_conversationinputs.ConversationInputsTypedDict, + ], from_entry_id: str, stream: Optional[bool] = False, store: Optional[bool] = True, handoff_execution: Optional[ - models.ConversationRestartRequestHandoffExecution + models_conversationrestartrequest.ConversationRestartRequestHandoffExecution ] = "server", completion_args: Optional[ - Union[models.CompletionArgs, models.CompletionArgsTypedDict] + Union[ + models_completionargs.CompletionArgs, + models_completionargs.CompletionArgsTypedDict, + ] ] = None, metadata: OptionalNullable[Dict[str, Any]] = UNSET, agent_version: OptionalNullable[int] = UNSET, @@ -1713,6 +1781,7 @@ def restart( "json", models.ConversationRestartRequest, ), + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -1729,7 +1798,7 @@ def restart( config=self.sdk_configuration, base_url=base_url or "", operation_id="agents_api_v1_conversations_restart", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -1760,15 +1829,21 @@ async def restart_async( self, *, conversation_id: str, - inputs: Union[models.ConversationInputs, models.ConversationInputsTypedDict], + inputs: Union[ + models_conversationinputs.ConversationInputs, + models_conversationinputs.ConversationInputsTypedDict, + ], from_entry_id: str, stream: Optional[bool] = False, store: Optional[bool] = True, handoff_execution: Optional[ - models.ConversationRestartRequestHandoffExecution + models_conversationrestartrequest.ConversationRestartRequestHandoffExecution ] = "server", completion_args: Optional[ - Union[models.CompletionArgs, models.CompletionArgsTypedDict] + Union[ + models_completionargs.CompletionArgs, + models_completionargs.CompletionArgsTypedDict, + ] ] = None, metadata: OptionalNullable[Dict[str, Any]] = UNSET, agent_version: OptionalNullable[int] = UNSET, @@ -1841,6 +1916,7 @@ async def restart_async( "json", models.ConversationRestartRequest, ), + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -1857,7 +1933,7 @@ async def restart_async( config=self.sdk_configuration, base_url=base_url or "", operation_id="agents_api_v1_conversations_restart", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -1887,21 +1963,29 @@ async def restart_async( def start_stream( self, *, - inputs: Union[models.ConversationInputs, models.ConversationInputsTypedDict], + inputs: Union[ + models_conversationinputs.ConversationInputs, + models_conversationinputs.ConversationInputsTypedDict, + ], stream: Optional[bool] = True, store: OptionalNullable[bool] = UNSET, handoff_execution: OptionalNullable[ - models.ConversationStreamRequestHandoffExecution + models_conversationstreamrequest.ConversationStreamRequestHandoffExecution ] = UNSET, instructions: OptionalNullable[str] = UNSET, tools: Optional[ Union[ - List[models.ConversationStreamRequestTools], - List[models.ConversationStreamRequestToolsTypedDict], + List[models_conversationstreamrequest.ConversationStreamRequestTools], + List[ + models_conversationstreamrequest.ConversationStreamRequestToolsTypedDict + ], ] ] = None, completion_args: OptionalNullable[ - Union[models.CompletionArgs, models.CompletionArgsTypedDict] + Union[ + models_completionargs.CompletionArgs, + models_completionargs.CompletionArgsTypedDict, + ] ] = UNSET, name: OptionalNullable[str] = UNSET, description: OptionalNullable[str] = UNSET, @@ -1982,6 +2066,7 @@ def start_stream( get_serialized_body=lambda: utils.serialize_request_body( request, False, False, "json", models.ConversationStreamRequest ), + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -1998,7 +2083,7 @@ def start_stream( config=self.sdk_configuration, base_url=base_url or "", operation_id="agents_api_v1_conversations_start_stream", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -2035,21 +2120,29 @@ def start_stream( async def start_stream_async( self, *, - inputs: Union[models.ConversationInputs, models.ConversationInputsTypedDict], + inputs: Union[ + models_conversationinputs.ConversationInputs, + models_conversationinputs.ConversationInputsTypedDict, + ], stream: Optional[bool] = True, store: OptionalNullable[bool] = UNSET, handoff_execution: OptionalNullable[ - models.ConversationStreamRequestHandoffExecution + models_conversationstreamrequest.ConversationStreamRequestHandoffExecution ] = UNSET, instructions: OptionalNullable[str] = UNSET, tools: Optional[ Union[ - List[models.ConversationStreamRequestTools], - List[models.ConversationStreamRequestToolsTypedDict], + List[models_conversationstreamrequest.ConversationStreamRequestTools], + List[ + models_conversationstreamrequest.ConversationStreamRequestToolsTypedDict + ], ] ] = None, completion_args: OptionalNullable[ - Union[models.CompletionArgs, models.CompletionArgsTypedDict] + Union[ + models_completionargs.CompletionArgs, + models_completionargs.CompletionArgsTypedDict, + ] ] = UNSET, name: OptionalNullable[str] = UNSET, description: OptionalNullable[str] = UNSET, @@ -2130,6 +2223,7 @@ async def start_stream_async( get_serialized_body=lambda: utils.serialize_request_body( request, False, False, "json", models.ConversationStreamRequest ), + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -2146,7 +2240,7 @@ async def start_stream_async( config=self.sdk_configuration, base_url=base_url or "", operation_id="agents_api_v1_conversations_start_stream", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -2184,14 +2278,20 @@ def append_stream( self, *, conversation_id: str, - inputs: Union[models.ConversationInputs, models.ConversationInputsTypedDict], + inputs: Union[ + models_conversationinputs.ConversationInputs, + models_conversationinputs.ConversationInputsTypedDict, + ], stream: Optional[bool] = True, store: Optional[bool] = True, handoff_execution: Optional[ - models.ConversationAppendStreamRequestHandoffExecution + models_conversationappendstreamrequest.ConversationAppendStreamRequestHandoffExecution ] = "server", completion_args: Optional[ - Union[models.CompletionArgs, models.CompletionArgsTypedDict] + Union[ + models_completionargs.CompletionArgs, + models_completionargs.CompletionArgsTypedDict, + ] ] = None, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, @@ -2256,6 +2356,7 @@ def append_stream( "json", models.ConversationAppendStreamRequest, ), + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -2272,7 +2373,7 @@ def append_stream( config=self.sdk_configuration, base_url=base_url or "", operation_id="agents_api_v1_conversations_append_stream", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -2310,14 +2411,20 @@ async def append_stream_async( self, *, conversation_id: str, - inputs: Union[models.ConversationInputs, models.ConversationInputsTypedDict], + inputs: Union[ + models_conversationinputs.ConversationInputs, + models_conversationinputs.ConversationInputsTypedDict, + ], stream: Optional[bool] = True, store: Optional[bool] = True, handoff_execution: Optional[ - models.ConversationAppendStreamRequestHandoffExecution + models_conversationappendstreamrequest.ConversationAppendStreamRequestHandoffExecution ] = "server", completion_args: Optional[ - Union[models.CompletionArgs, models.CompletionArgsTypedDict] + Union[ + models_completionargs.CompletionArgs, + models_completionargs.CompletionArgsTypedDict, + ] ] = None, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, @@ -2382,6 +2489,7 @@ async def append_stream_async( "json", models.ConversationAppendStreamRequest, ), + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -2398,7 +2506,7 @@ async def append_stream_async( config=self.sdk_configuration, base_url=base_url or "", operation_id="agents_api_v1_conversations_append_stream", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -2436,15 +2544,21 @@ def restart_stream( self, *, conversation_id: str, - inputs: Union[models.ConversationInputs, models.ConversationInputsTypedDict], + inputs: Union[ + models_conversationinputs.ConversationInputs, + models_conversationinputs.ConversationInputsTypedDict, + ], from_entry_id: str, stream: Optional[bool] = True, store: Optional[bool] = True, handoff_execution: Optional[ - models.ConversationRestartStreamRequestHandoffExecution + models_conversationrestartstreamrequest.ConversationRestartStreamRequestHandoffExecution ] = "server", completion_args: Optional[ - Union[models.CompletionArgs, models.CompletionArgsTypedDict] + Union[ + models_completionargs.CompletionArgs, + models_completionargs.CompletionArgsTypedDict, + ] ] = None, metadata: OptionalNullable[Dict[str, Any]] = UNSET, agent_version: OptionalNullable[int] = UNSET, @@ -2517,6 +2631,7 @@ def restart_stream( "json", models.ConversationRestartStreamRequest, ), + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -2533,7 +2648,7 @@ def restart_stream( config=self.sdk_configuration, base_url=base_url or "", operation_id="agents_api_v1_conversations_restart_stream", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -2571,15 +2686,21 @@ async def restart_stream_async( self, *, conversation_id: str, - inputs: Union[models.ConversationInputs, models.ConversationInputsTypedDict], + inputs: Union[ + models_conversationinputs.ConversationInputs, + models_conversationinputs.ConversationInputsTypedDict, + ], from_entry_id: str, stream: Optional[bool] = True, store: Optional[bool] = True, handoff_execution: Optional[ - models.ConversationRestartStreamRequestHandoffExecution + models_conversationrestartstreamrequest.ConversationRestartStreamRequestHandoffExecution ] = "server", completion_args: Optional[ - Union[models.CompletionArgs, models.CompletionArgsTypedDict] + Union[ + models_completionargs.CompletionArgs, + models_completionargs.CompletionArgsTypedDict, + ] ] = None, metadata: OptionalNullable[Dict[str, Any]] = UNSET, agent_version: OptionalNullable[int] = UNSET, @@ -2652,6 +2773,7 @@ async def restart_stream_async( "json", models.ConversationRestartStreamRequest, ), + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -2668,7 +2790,7 @@ async def restart_stream_async( config=self.sdk_configuration, base_url=base_url or "", operation_id="agents_api_v1_conversations_restart_stream", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), diff --git a/src/mistralai/documents.py b/src/mistralai/documents.py index c1497bff..fac58fdb 100644 --- a/src/mistralai/documents.py +++ b/src/mistralai/documents.py @@ -3,6 +3,10 @@ from .basesdk import BaseSDK from mistralai import models, utils from mistralai._hooks import HookContext +from mistralai.models import ( + documentupdatein as models_documentupdatein, + file as models_file, +) from mistralai.types import OptionalNullable, UNSET from mistralai.utils import get_security_from_env from mistralai.utils.unmarshal_json_response import unmarshal_json_response @@ -76,6 +80,7 @@ def list( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -92,7 +97,7 @@ def list( config=self.sdk_configuration, base_url=base_url or "", operation_id="libraries_documents_list_v1", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -183,6 +188,7 @@ async def list_async( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -199,7 +205,7 @@ async def list_async( config=self.sdk_configuration, base_url=base_url or "", operation_id="libraries_documents_list_v1", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -230,7 +236,7 @@ def upload( self, *, library_id: str, - file: Union[models.File, models.FileTypedDict], + file: Union[models_file.File, models_file.FileTypedDict], retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -241,7 +247,15 @@ def upload( Given a library, upload a new document to that library. It is queued for processing, it status will change it has been processed. The processing has to be completed in order be discoverable for the library search :param library_id: - :param file: The File object (not file name) to be uploaded. To upload a file and specify a custom file name you should format your request as such: ```bash file=@path/to/your/file.jsonl;filename=custom_name.jsonl ``` Otherwise, you can just keep the original file name: ```bash file=@path/to/your/file.jsonl ``` + :param file: The File object (not file name) to be uploaded. + To upload a file and specify a custom file name you should format your request as such: + ```bash + file=@path/to/your/file.jsonl;filename=custom_name.jsonl + ``` + Otherwise, you can just keep the original file name: + ```bash + file=@path/to/your/file.jsonl + ``` :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -284,6 +298,7 @@ def upload( "multipart", models.LibrariesDocumentsUploadV1DocumentUpload, ), + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -300,7 +315,7 @@ def upload( config=self.sdk_configuration, base_url=base_url or "", operation_id="libraries_documents_upload_v1", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -331,7 +346,7 @@ async def upload_async( self, *, library_id: str, - file: Union[models.File, models.FileTypedDict], + file: Union[models_file.File, models_file.FileTypedDict], retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -342,7 +357,15 @@ async def upload_async( Given a library, upload a new document to that library. It is queued for processing, it status will change it has been processed. The processing has to be completed in order be discoverable for the library search :param library_id: - :param file: The File object (not file name) to be uploaded. To upload a file and specify a custom file name you should format your request as such: ```bash file=@path/to/your/file.jsonl;filename=custom_name.jsonl ``` Otherwise, you can just keep the original file name: ```bash file=@path/to/your/file.jsonl ``` + :param file: The File object (not file name) to be uploaded. + To upload a file and specify a custom file name you should format your request as such: + ```bash + file=@path/to/your/file.jsonl;filename=custom_name.jsonl + ``` + Otherwise, you can just keep the original file name: + ```bash + file=@path/to/your/file.jsonl + ``` :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -385,6 +408,7 @@ async def upload_async( "multipart", models.LibrariesDocumentsUploadV1DocumentUpload, ), + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -401,7 +425,7 @@ async def upload_async( config=self.sdk_configuration, base_url=base_url or "", operation_id="libraries_documents_upload_v1", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -477,6 +501,7 @@ def get( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -493,7 +518,7 @@ def get( config=self.sdk_configuration, base_url=base_url or "", operation_id="libraries_documents_get_v1", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -569,6 +594,7 @@ async def get_async( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -585,7 +611,7 @@ async def get_async( config=self.sdk_configuration, base_url=base_url or "", operation_id="libraries_documents_get_v1", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -619,7 +645,10 @@ def update( document_id: str, name: OptionalNullable[str] = UNSET, attributes: OptionalNullable[ - Union[Dict[str, models.Attributes], Dict[str, models.AttributesTypedDict]] + Union[ + Dict[str, models_documentupdatein.Attributes], + Dict[str, models_documentupdatein.AttributesTypedDict], + ] ] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, @@ -678,6 +707,7 @@ def update( "json", models.DocumentUpdateIn, ), + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -694,7 +724,7 @@ def update( config=self.sdk_configuration, base_url=base_url or "", operation_id="libraries_documents_update_v1", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -728,7 +758,10 @@ async def update_async( document_id: str, name: OptionalNullable[str] = UNSET, attributes: OptionalNullable[ - Union[Dict[str, models.Attributes], Dict[str, models.AttributesTypedDict]] + Union[ + Dict[str, models_documentupdatein.Attributes], + Dict[str, models_documentupdatein.AttributesTypedDict], + ] ] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, @@ -787,6 +820,7 @@ async def update_async( "json", models.DocumentUpdateIn, ), + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -803,7 +837,7 @@ async def update_async( config=self.sdk_configuration, base_url=base_url or "", operation_id="libraries_documents_update_v1", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -879,6 +913,7 @@ def delete( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -895,7 +930,7 @@ def delete( config=self.sdk_configuration, base_url=base_url or "", operation_id="libraries_documents_delete_v1", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -971,6 +1006,7 @@ async def delete_async( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -987,7 +1023,7 @@ async def delete_async( config=self.sdk_configuration, base_url=base_url or "", operation_id="libraries_documents_delete_v1", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -1063,6 +1099,7 @@ def text_content( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -1079,7 +1116,7 @@ def text_content( config=self.sdk_configuration, base_url=base_url or "", operation_id="libraries_documents_get_text_content_v1", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -1155,6 +1192,7 @@ async def text_content_async( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -1171,7 +1209,7 @@ async def text_content_async( config=self.sdk_configuration, base_url=base_url or "", operation_id="libraries_documents_get_text_content_v1", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -1247,6 +1285,7 @@ def status( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -1263,7 +1302,7 @@ def status( config=self.sdk_configuration, base_url=base_url or "", operation_id="libraries_documents_get_status_v1", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -1339,6 +1378,7 @@ async def status_async( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -1355,7 +1395,7 @@ async def status_async( config=self.sdk_configuration, base_url=base_url or "", operation_id="libraries_documents_get_status_v1", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -1431,6 +1471,7 @@ def get_signed_url( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -1447,7 +1488,7 @@ def get_signed_url( config=self.sdk_configuration, base_url=base_url or "", operation_id="libraries_documents_get_signed_url_v1", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -1523,6 +1564,7 @@ async def get_signed_url_async( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -1539,7 +1581,7 @@ async def get_signed_url_async( config=self.sdk_configuration, base_url=base_url or "", operation_id="libraries_documents_get_signed_url_v1", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -1615,6 +1657,7 @@ def extracted_text_signed_url( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -1631,7 +1674,7 @@ def extracted_text_signed_url( config=self.sdk_configuration, base_url=base_url or "", operation_id="libraries_documents_get_extracted_text_signed_url_v1", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -1707,6 +1750,7 @@ async def extracted_text_signed_url_async( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -1723,7 +1767,7 @@ async def extracted_text_signed_url_async( config=self.sdk_configuration, base_url=base_url or "", operation_id="libraries_documents_get_extracted_text_signed_url_v1", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -1799,6 +1843,7 @@ def reprocess( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -1815,7 +1860,7 @@ def reprocess( config=self.sdk_configuration, base_url=base_url or "", operation_id="libraries_documents_reprocess_v1", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -1891,6 +1936,7 @@ async def reprocess_async( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -1907,7 +1953,7 @@ async def reprocess_async( config=self.sdk_configuration, base_url=base_url or "", operation_id="libraries_documents_reprocess_v1", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), diff --git a/src/mistralai/embeddings.py b/src/mistralai/embeddings.py index 349d31ca..7430f804 100644 --- a/src/mistralai/embeddings.py +++ b/src/mistralai/embeddings.py @@ -3,6 +3,11 @@ from .basesdk import BaseSDK from mistralai import models, utils from mistralai._hooks import HookContext +from mistralai.models import ( + embeddingdtype as models_embeddingdtype, + embeddingrequest as models_embeddingrequest, + encodingformat as models_encodingformat, +) from mistralai.types import OptionalNullable, UNSET from mistralai.utils import get_security_from_env from mistralai.utils.unmarshal_json_response import unmarshal_json_response @@ -17,12 +22,13 @@ def create( *, model: str, inputs: Union[ - models.EmbeddingRequestInputs, models.EmbeddingRequestInputsTypedDict + models_embeddingrequest.EmbeddingRequestInputs, + models_embeddingrequest.EmbeddingRequestInputsTypedDict, ], metadata: OptionalNullable[Dict[str, Any]] = UNSET, output_dimension: OptionalNullable[int] = UNSET, - output_dtype: Optional[models.EmbeddingDtype] = None, - encoding_format: Optional[models.EncodingFormat] = None, + output_dtype: Optional[models_embeddingdtype.EmbeddingDtype] = None, + encoding_format: Optional[models_encodingformat.EncodingFormat] = None, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -78,6 +84,7 @@ def create( get_serialized_body=lambda: utils.serialize_request_body( request, False, False, "json", models.EmbeddingRequest ), + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -94,7 +101,7 @@ def create( config=self.sdk_configuration, base_url=base_url or "", operation_id="embeddings_v1_embeddings_post", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -126,12 +133,13 @@ async def create_async( *, model: str, inputs: Union[ - models.EmbeddingRequestInputs, models.EmbeddingRequestInputsTypedDict + models_embeddingrequest.EmbeddingRequestInputs, + models_embeddingrequest.EmbeddingRequestInputsTypedDict, ], metadata: OptionalNullable[Dict[str, Any]] = UNSET, output_dimension: OptionalNullable[int] = UNSET, - output_dtype: Optional[models.EmbeddingDtype] = None, - encoding_format: Optional[models.EncodingFormat] = None, + output_dtype: Optional[models_embeddingdtype.EmbeddingDtype] = None, + encoding_format: Optional[models_encodingformat.EncodingFormat] = None, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -187,6 +195,7 @@ async def create_async( get_serialized_body=lambda: utils.serialize_request_body( request, False, False, "json", models.EmbeddingRequest ), + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -203,7 +212,7 @@ async def create_async( config=self.sdk_configuration, base_url=base_url or "", operation_id="embeddings_v1_embeddings_post", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), diff --git a/src/mistralai/files.py b/src/mistralai/files.py index ae4eb779..ab2c75a2 100644 --- a/src/mistralai/files.py +++ b/src/mistralai/files.py @@ -4,6 +4,12 @@ import httpx from mistralai import models, utils from mistralai._hooks import HookContext +from mistralai.models import ( + file as models_file, + filepurpose as models_filepurpose, + sampletype as models_sampletype, + source as models_source, +) from mistralai.types import OptionalNullable, UNSET from mistralai.utils import get_security_from_env from mistralai.utils.unmarshal_json_response import unmarshal_json_response @@ -16,8 +22,8 @@ class Files(BaseSDK): def upload( self, *, - file: Union[models.File, models.FileTypedDict], - purpose: Optional[models.FilePurpose] = None, + file: Union[models_file.File, models_file.FileTypedDict], + purpose: Optional[models_filepurpose.FilePurpose] = None, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -31,7 +37,15 @@ def upload( Please contact us if you need to increase these storage limits. - :param file: The File object (not file name) to be uploaded. To upload a file and specify a custom file name you should format your request as such: ```bash file=@path/to/your/file.jsonl;filename=custom_name.jsonl ``` Otherwise, you can just keep the original file name: ```bash file=@path/to/your/file.jsonl ``` + :param file: The File object (not file name) to be uploaded. + To upload a file and specify a custom file name you should format your request as such: + ```bash + file=@path/to/your/file.jsonl;filename=custom_name.jsonl + ``` + Otherwise, you can just keep the original file name: + ```bash + file=@path/to/your/file.jsonl + ``` :param purpose: :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method @@ -73,6 +87,7 @@ def upload( "multipart", models.FilesAPIRoutesUploadFileMultiPartBodyParams, ), + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -89,7 +104,7 @@ def upload( config=self.sdk_configuration, base_url=base_url or "", operation_id="files_api_routes_upload_file", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -113,8 +128,8 @@ def upload( async def upload_async( self, *, - file: Union[models.File, models.FileTypedDict], - purpose: Optional[models.FilePurpose] = None, + file: Union[models_file.File, models_file.FileTypedDict], + purpose: Optional[models_filepurpose.FilePurpose] = None, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -128,7 +143,15 @@ async def upload_async( Please contact us if you need to increase these storage limits. - :param file: The File object (not file name) to be uploaded. To upload a file and specify a custom file name you should format your request as such: ```bash file=@path/to/your/file.jsonl;filename=custom_name.jsonl ``` Otherwise, you can just keep the original file name: ```bash file=@path/to/your/file.jsonl ``` + :param file: The File object (not file name) to be uploaded. + To upload a file and specify a custom file name you should format your request as such: + ```bash + file=@path/to/your/file.jsonl;filename=custom_name.jsonl + ``` + Otherwise, you can just keep the original file name: + ```bash + file=@path/to/your/file.jsonl + ``` :param purpose: :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method @@ -170,6 +193,7 @@ async def upload_async( "multipart", models.FilesAPIRoutesUploadFileMultiPartBodyParams, ), + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -186,7 +210,7 @@ async def upload_async( config=self.sdk_configuration, base_url=base_url or "", operation_id="files_api_routes_upload_file", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -213,10 +237,10 @@ def list( page: Optional[int] = 0, page_size: Optional[int] = 100, include_total: Optional[bool] = True, - sample_type: OptionalNullable[List[models.SampleType]] = UNSET, - source: OptionalNullable[List[models.Source]] = UNSET, + sample_type: OptionalNullable[List[models_sampletype.SampleType]] = UNSET, + source: OptionalNullable[List[models_source.Source]] = UNSET, search: OptionalNullable[str] = UNSET, - purpose: OptionalNullable[models.FilePurpose] = UNSET, + purpose: OptionalNullable[models_filepurpose.FilePurpose] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -271,6 +295,7 @@ def list( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -287,7 +312,7 @@ def list( config=self.sdk_configuration, base_url=base_url or "", operation_id="files_api_routes_list_files", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -314,10 +339,10 @@ async def list_async( page: Optional[int] = 0, page_size: Optional[int] = 100, include_total: Optional[bool] = True, - sample_type: OptionalNullable[List[models.SampleType]] = UNSET, - source: OptionalNullable[List[models.Source]] = UNSET, + sample_type: OptionalNullable[List[models_sampletype.SampleType]] = UNSET, + source: OptionalNullable[List[models_source.Source]] = UNSET, search: OptionalNullable[str] = UNSET, - purpose: OptionalNullable[models.FilePurpose] = UNSET, + purpose: OptionalNullable[models_filepurpose.FilePurpose] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -372,6 +397,7 @@ async def list_async( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -388,7 +414,7 @@ async def list_async( config=self.sdk_configuration, base_url=base_url or "", operation_id="files_api_routes_list_files", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -455,6 +481,7 @@ def retrieve( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -471,7 +498,7 @@ def retrieve( config=self.sdk_configuration, base_url=base_url or "", operation_id="files_api_routes_retrieve_file", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -538,6 +565,7 @@ async def retrieve_async( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -554,7 +582,7 @@ async def retrieve_async( config=self.sdk_configuration, base_url=base_url or "", operation_id="files_api_routes_retrieve_file", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -621,6 +649,7 @@ def delete( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -637,7 +666,7 @@ def delete( config=self.sdk_configuration, base_url=base_url or "", operation_id="files_api_routes_delete_file", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -704,6 +733,7 @@ async def delete_async( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -720,7 +750,7 @@ async def delete_async( config=self.sdk_configuration, base_url=base_url or "", operation_id="files_api_routes_delete_file", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -787,6 +817,7 @@ def download( accept_header_value="application/octet-stream", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -803,7 +834,7 @@ def download( config=self.sdk_configuration, base_url=base_url or "", operation_id="files_api_routes_download_file", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -872,6 +903,7 @@ async def download_async( accept_header_value="application/octet-stream", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -888,7 +920,7 @@ async def download_async( config=self.sdk_configuration, base_url=base_url or "", operation_id="files_api_routes_download_file", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -958,6 +990,7 @@ def get_signed_url( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -974,7 +1007,7 @@ def get_signed_url( config=self.sdk_configuration, base_url=base_url or "", operation_id="files_api_routes_get_signed_url", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -1042,6 +1075,7 @@ async def get_signed_url_async( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -1058,7 +1092,7 @@ async def get_signed_url_async( config=self.sdk_configuration, base_url=base_url or "", operation_id="files_api_routes_get_signed_url", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), diff --git a/src/mistralai/fim.py b/src/mistralai/fim.py index 49bdb32e..53109c70 100644 --- a/src/mistralai/fim.py +++ b/src/mistralai/fim.py @@ -3,6 +3,10 @@ from .basesdk import BaseSDK from mistralai import models, utils from mistralai._hooks import HookContext +from mistralai.models import ( + fimcompletionrequest as models_fimcompletionrequest, + fimcompletionstreamrequest as models_fimcompletionstreamrequest, +) from mistralai.types import OptionalNullable, UNSET from mistralai.utils import eventstreaming, get_security_from_env from mistralai.utils.unmarshal_json_response import unmarshal_json_response @@ -23,8 +27,8 @@ def complete( stream: Optional[bool] = False, stop: Optional[ Union[ - models.FIMCompletionRequestStop, - models.FIMCompletionRequestStopTypedDict, + models_fimcompletionrequest.FIMCompletionRequestStop, + models_fimcompletionrequest.FIMCompletionRequestStopTypedDict, ] ] = None, random_seed: OptionalNullable[int] = UNSET, @@ -96,6 +100,7 @@ def complete( get_serialized_body=lambda: utils.serialize_request_body( request, False, False, "json", models.FIMCompletionRequest ), + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -112,7 +117,7 @@ def complete( config=self.sdk_configuration, base_url=base_url or "", operation_id="fim_completion_v1_fim_completions_post", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -150,8 +155,8 @@ async def complete_async( stream: Optional[bool] = False, stop: Optional[ Union[ - models.FIMCompletionRequestStop, - models.FIMCompletionRequestStopTypedDict, + models_fimcompletionrequest.FIMCompletionRequestStop, + models_fimcompletionrequest.FIMCompletionRequestStopTypedDict, ] ] = None, random_seed: OptionalNullable[int] = UNSET, @@ -223,6 +228,7 @@ async def complete_async( get_serialized_body=lambda: utils.serialize_request_body( request, False, False, "json", models.FIMCompletionRequest ), + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -239,7 +245,7 @@ async def complete_async( config=self.sdk_configuration, base_url=base_url or "", operation_id="fim_completion_v1_fim_completions_post", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -277,8 +283,8 @@ def stream( stream: Optional[bool] = True, stop: Optional[ Union[ - models.FIMCompletionStreamRequestStop, - models.FIMCompletionStreamRequestStopTypedDict, + models_fimcompletionstreamrequest.FIMCompletionStreamRequestStop, + models_fimcompletionstreamrequest.FIMCompletionStreamRequestStopTypedDict, ] ] = None, random_seed: OptionalNullable[int] = UNSET, @@ -350,6 +356,7 @@ def stream( get_serialized_body=lambda: utils.serialize_request_body( request, False, False, "json", models.FIMCompletionStreamRequest ), + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -366,7 +373,7 @@ def stream( config=self.sdk_configuration, base_url=base_url or "", operation_id="stream_fim", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -412,8 +419,8 @@ async def stream_async( stream: Optional[bool] = True, stop: Optional[ Union[ - models.FIMCompletionStreamRequestStop, - models.FIMCompletionStreamRequestStopTypedDict, + models_fimcompletionstreamrequest.FIMCompletionStreamRequestStop, + models_fimcompletionstreamrequest.FIMCompletionStreamRequestStopTypedDict, ] ] = None, random_seed: OptionalNullable[int] = UNSET, @@ -485,6 +492,7 @@ async def stream_async( get_serialized_body=lambda: utils.serialize_request_body( request, False, False, "json", models.FIMCompletionStreamRequest ), + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -501,7 +509,7 @@ async def stream_async( config=self.sdk_configuration, base_url=base_url or "", operation_id="stream_fim", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), diff --git a/src/mistralai/httpclient.py b/src/mistralai/httpclient.py index 47b052cb..89560b56 100644 --- a/src/mistralai/httpclient.py +++ b/src/mistralai/httpclient.py @@ -107,7 +107,6 @@ def close_clients( # to them from the owning SDK instance and they can be reaped. owner.client = None owner.async_client = None - if sync_client is not None and not sync_client_supplied: try: sync_client.close() diff --git a/src/mistralai/jobs.py b/src/mistralai/jobs.py index af6364cb..df8ae4d3 100644 --- a/src/mistralai/jobs.py +++ b/src/mistralai/jobs.py @@ -4,6 +4,13 @@ from datetime import datetime from mistralai import models, utils from mistralai._hooks import HookContext +from mistralai.models import ( + classifiertargetin as models_classifiertargetin, + finetuneablemodeltype as models_finetuneablemodeltype, + jobin as models_jobin, + jobs_api_routes_fine_tuning_get_fine_tuning_jobsop as models_jobs_api_routes_fine_tuning_get_fine_tuning_jobsop, + trainingfile as models_trainingfile, +) from mistralai.types import OptionalNullable, UNSET from mistralai.utils import get_security_from_env from mistralai.utils.unmarshal_json_response import unmarshal_json_response @@ -20,7 +27,9 @@ def list( created_after: OptionalNullable[datetime] = UNSET, created_before: OptionalNullable[datetime] = UNSET, created_by_me: Optional[bool] = False, - status: OptionalNullable[models.QueryParamStatus] = UNSET, + status: OptionalNullable[ + models_jobs_api_routes_fine_tuning_get_fine_tuning_jobsop.QueryParamStatus + ] = UNSET, wandb_project: OptionalNullable[str] = UNSET, wandb_name: OptionalNullable[str] = UNSET, suffix: OptionalNullable[str] = UNSET, @@ -84,6 +93,7 @@ def list( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -100,7 +110,7 @@ def list( config=self.sdk_configuration, base_url=base_url or "", operation_id="jobs_api_routes_fine_tuning_get_fine_tuning_jobs", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -130,7 +140,9 @@ async def list_async( created_after: OptionalNullable[datetime] = UNSET, created_before: OptionalNullable[datetime] = UNSET, created_by_me: Optional[bool] = False, - status: OptionalNullable[models.QueryParamStatus] = UNSET, + status: OptionalNullable[ + models_jobs_api_routes_fine_tuning_get_fine_tuning_jobsop.QueryParamStatus + ] = UNSET, wandb_project: OptionalNullable[str] = UNSET, wandb_name: OptionalNullable[str] = UNSET, suffix: OptionalNullable[str] = UNSET, @@ -194,6 +206,7 @@ async def list_async( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -210,7 +223,7 @@ async def list_async( config=self.sdk_configuration, base_url=base_url or "", operation_id="jobs_api_routes_fine_tuning_get_fine_tuning_jobs", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -235,29 +248,38 @@ def create( self, *, model: str, - hyperparameters: Union[models.Hyperparameters, models.HyperparametersTypedDict], + hyperparameters: Union[ + models_jobin.Hyperparameters, models_jobin.HyperparametersTypedDict + ], training_files: Optional[ - Union[List[models.TrainingFile], List[models.TrainingFileTypedDict]] + Union[ + List[models_trainingfile.TrainingFile], + List[models_trainingfile.TrainingFileTypedDict], + ] ] = None, validation_files: OptionalNullable[List[str]] = UNSET, suffix: OptionalNullable[str] = UNSET, integrations: OptionalNullable[ Union[ - List[models.JobInIntegrations], List[models.JobInIntegrationsTypedDict] + List[models_jobin.JobInIntegrations], + List[models_jobin.JobInIntegrationsTypedDict], ] ] = UNSET, auto_start: Optional[bool] = None, invalid_sample_skip_percentage: Optional[float] = 0, - job_type: OptionalNullable[models.FineTuneableModelType] = UNSET, + job_type: OptionalNullable[ + models_finetuneablemodeltype.FineTuneableModelType + ] = UNSET, repositories: OptionalNullable[ Union[ - List[models.JobInRepositories], List[models.JobInRepositoriesTypedDict] + List[models_jobin.JobInRepositories], + List[models_jobin.JobInRepositoriesTypedDict], ] ] = UNSET, classifier_targets: OptionalNullable[ Union[ - List[models.ClassifierTargetIn], - List[models.ClassifierTargetInTypedDict], + List[models_classifiertargetin.ClassifierTargetIn], + List[models_classifiertargetin.ClassifierTargetInTypedDict], ] ] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, @@ -335,6 +357,7 @@ def create( get_serialized_body=lambda: utils.serialize_request_body( request, False, False, "json", models.JobIn ), + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -351,7 +374,7 @@ def create( config=self.sdk_configuration, base_url=base_url or "", operation_id="jobs_api_routes_fine_tuning_create_fine_tuning_job", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -378,29 +401,38 @@ async def create_async( self, *, model: str, - hyperparameters: Union[models.Hyperparameters, models.HyperparametersTypedDict], + hyperparameters: Union[ + models_jobin.Hyperparameters, models_jobin.HyperparametersTypedDict + ], training_files: Optional[ - Union[List[models.TrainingFile], List[models.TrainingFileTypedDict]] + Union[ + List[models_trainingfile.TrainingFile], + List[models_trainingfile.TrainingFileTypedDict], + ] ] = None, validation_files: OptionalNullable[List[str]] = UNSET, suffix: OptionalNullable[str] = UNSET, integrations: OptionalNullable[ Union[ - List[models.JobInIntegrations], List[models.JobInIntegrationsTypedDict] + List[models_jobin.JobInIntegrations], + List[models_jobin.JobInIntegrationsTypedDict], ] ] = UNSET, auto_start: Optional[bool] = None, invalid_sample_skip_percentage: Optional[float] = 0, - job_type: OptionalNullable[models.FineTuneableModelType] = UNSET, + job_type: OptionalNullable[ + models_finetuneablemodeltype.FineTuneableModelType + ] = UNSET, repositories: OptionalNullable[ Union[ - List[models.JobInRepositories], List[models.JobInRepositoriesTypedDict] + List[models_jobin.JobInRepositories], + List[models_jobin.JobInRepositoriesTypedDict], ] ] = UNSET, classifier_targets: OptionalNullable[ Union[ - List[models.ClassifierTargetIn], - List[models.ClassifierTargetInTypedDict], + List[models_classifiertargetin.ClassifierTargetIn], + List[models_classifiertargetin.ClassifierTargetInTypedDict], ] ] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, @@ -478,6 +510,7 @@ async def create_async( get_serialized_body=lambda: utils.serialize_request_body( request, False, False, "json", models.JobIn ), + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -494,7 +527,7 @@ async def create_async( config=self.sdk_configuration, base_url=base_url or "", operation_id="jobs_api_routes_fine_tuning_create_fine_tuning_job", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -563,6 +596,7 @@ def get( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -579,7 +613,7 @@ def get( config=self.sdk_configuration, base_url=base_url or "", operation_id="jobs_api_routes_fine_tuning_get_fine_tuning_job", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -648,6 +682,7 @@ async def get_async( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -664,7 +699,7 @@ async def get_async( config=self.sdk_configuration, base_url=base_url or "", operation_id="jobs_api_routes_fine_tuning_get_fine_tuning_job", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -733,6 +768,7 @@ def cancel( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -749,7 +785,7 @@ def cancel( config=self.sdk_configuration, base_url=base_url or "", operation_id="jobs_api_routes_fine_tuning_cancel_fine_tuning_job", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -818,6 +854,7 @@ async def cancel_async( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -834,7 +871,7 @@ async def cancel_async( config=self.sdk_configuration, base_url=base_url or "", operation_id="jobs_api_routes_fine_tuning_cancel_fine_tuning_job", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -903,6 +940,7 @@ def start( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -919,7 +957,7 @@ def start( config=self.sdk_configuration, base_url=base_url or "", operation_id="jobs_api_routes_fine_tuning_start_fine_tuning_job", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -988,6 +1026,7 @@ async def start_async( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -1004,7 +1043,7 @@ async def start_async( config=self.sdk_configuration, base_url=base_url or "", operation_id="jobs_api_routes_fine_tuning_start_fine_tuning_job", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), diff --git a/src/mistralai/libraries.py b/src/mistralai/libraries.py index e9f19047..32648937 100644 --- a/src/mistralai/libraries.py +++ b/src/mistralai/libraries.py @@ -70,6 +70,7 @@ def list( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -86,7 +87,7 @@ def list( config=self.sdk_configuration, base_url=base_url or "", operation_id="libraries_list_v1", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -146,6 +147,7 @@ async def list_async( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -162,7 +164,7 @@ async def list_async( config=self.sdk_configuration, base_url=base_url or "", operation_id="libraries_list_v1", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -238,6 +240,7 @@ def create( get_serialized_body=lambda: utils.serialize_request_body( request, False, False, "json", models.LibraryIn ), + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -254,7 +257,7 @@ def create( config=self.sdk_configuration, base_url=base_url or "", operation_id="libraries_create_v1", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -336,6 +339,7 @@ async def create_async( get_serialized_body=lambda: utils.serialize_request_body( request, False, False, "json", models.LibraryIn ), + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -352,7 +356,7 @@ async def create_async( config=self.sdk_configuration, base_url=base_url or "", operation_id="libraries_create_v1", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -425,6 +429,7 @@ def get( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -441,7 +446,7 @@ def get( config=self.sdk_configuration, base_url=base_url or "", operation_id="libraries_get_v1", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -514,6 +519,7 @@ async def get_async( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -530,7 +536,7 @@ async def get_async( config=self.sdk_configuration, base_url=base_url or "", operation_id="libraries_get_v1", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -603,6 +609,7 @@ def delete( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -619,7 +626,7 @@ def delete( config=self.sdk_configuration, base_url=base_url or "", operation_id="libraries_delete_v1", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -692,6 +699,7 @@ async def delete_async( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -708,7 +716,7 @@ async def delete_async( config=self.sdk_configuration, base_url=base_url or "", operation_id="libraries_delete_v1", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -792,6 +800,7 @@ def update( get_serialized_body=lambda: utils.serialize_request_body( request.library_in_update, False, False, "json", models.LibraryInUpdate ), + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -808,7 +817,7 @@ def update( config=self.sdk_configuration, base_url=base_url or "", operation_id="libraries_update_v1", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -892,6 +901,7 @@ async def update_async( get_serialized_body=lambda: utils.serialize_request_body( request.library_in_update, False, False, "json", models.LibraryInUpdate ), + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -908,7 +918,7 @@ async def update_async( config=self.sdk_configuration, base_url=base_url or "", operation_id="libraries_update_v1", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), diff --git a/src/mistralai/mistral_agents.py b/src/mistralai/mistral_agents.py index 0d9ad0b7..1d204068 100644 --- a/src/mistralai/mistral_agents.py +++ b/src/mistralai/mistral_agents.py @@ -3,6 +3,12 @@ from .basesdk import BaseSDK from mistralai import models, utils from mistralai._hooks import HookContext +from mistralai.models import ( + agentcreationrequest as models_agentcreationrequest, + agentupdaterequest as models_agentupdaterequest, + completionargs as models_completionargs, + requestsource as models_requestsource, +) from mistralai.types import OptionalNullable, UNSET from mistralai.utils import get_security_from_env from mistralai.utils.unmarshal_json_response import unmarshal_json_response @@ -20,12 +26,15 @@ def create( instructions: OptionalNullable[str] = UNSET, tools: Optional[ Union[ - List[models.AgentCreationRequestTools], - List[models.AgentCreationRequestToolsTypedDict], + List[models_agentcreationrequest.AgentCreationRequestTools], + List[models_agentcreationrequest.AgentCreationRequestToolsTypedDict], ] ] = None, completion_args: Optional[ - Union[models.CompletionArgs, models.CompletionArgsTypedDict] + Union[ + models_completionargs.CompletionArgs, + models_completionargs.CompletionArgsTypedDict, + ] ] = None, description: OptionalNullable[str] = UNSET, handoffs: OptionalNullable[List[str]] = UNSET, @@ -93,6 +102,7 @@ def create( get_serialized_body=lambda: utils.serialize_request_body( request, False, False, "json", models.AgentCreationRequest ), + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -109,7 +119,7 @@ def create( config=self.sdk_configuration, base_url=base_url or "", operation_id="agents_api_v1_agents_create", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -144,12 +154,15 @@ async def create_async( instructions: OptionalNullable[str] = UNSET, tools: Optional[ Union[ - List[models.AgentCreationRequestTools], - List[models.AgentCreationRequestToolsTypedDict], + List[models_agentcreationrequest.AgentCreationRequestTools], + List[models_agentcreationrequest.AgentCreationRequestToolsTypedDict], ] ] = None, completion_args: Optional[ - Union[models.CompletionArgs, models.CompletionArgsTypedDict] + Union[ + models_completionargs.CompletionArgs, + models_completionargs.CompletionArgsTypedDict, + ] ] = None, description: OptionalNullable[str] = UNSET, handoffs: OptionalNullable[List[str]] = UNSET, @@ -217,6 +230,7 @@ async def create_async( get_serialized_body=lambda: utils.serialize_request_body( request, False, False, "json", models.AgentCreationRequest ), + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -233,7 +247,7 @@ async def create_async( config=self.sdk_configuration, base_url=base_url or "", operation_id="agents_api_v1_agents_create", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -266,7 +280,7 @@ def list( page: Optional[int] = 0, page_size: Optional[int] = 20, deployment_chat: OptionalNullable[bool] = UNSET, - sources: OptionalNullable[List[models.RequestSource]] = UNSET, + sources: OptionalNullable[List[models_requestsource.RequestSource]] = UNSET, name: OptionalNullable[str] = UNSET, id: OptionalNullable[str] = UNSET, metadata: OptionalNullable[Dict[str, Any]] = UNSET, @@ -324,6 +338,7 @@ def list( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -340,7 +355,7 @@ def list( config=self.sdk_configuration, base_url=base_url or "", operation_id="agents_api_v1_agents_list", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -373,7 +388,7 @@ async def list_async( page: Optional[int] = 0, page_size: Optional[int] = 20, deployment_chat: OptionalNullable[bool] = UNSET, - sources: OptionalNullable[List[models.RequestSource]] = UNSET, + sources: OptionalNullable[List[models_requestsource.RequestSource]] = UNSET, name: OptionalNullable[str] = UNSET, id: OptionalNullable[str] = UNSET, metadata: OptionalNullable[Dict[str, Any]] = UNSET, @@ -431,6 +446,7 @@ async def list_async( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -447,7 +463,7 @@ async def list_async( config=self.sdk_configuration, base_url=base_url or "", operation_id="agents_api_v1_agents_list", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -523,6 +539,7 @@ def get( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -539,7 +556,7 @@ def get( config=self.sdk_configuration, base_url=base_url or "", operation_id="agents_api_v1_agents_get", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -615,6 +632,7 @@ async def get_async( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -631,7 +649,7 @@ async def get_async( config=self.sdk_configuration, base_url=base_url or "", operation_id="agents_api_v1_agents_get", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -665,12 +683,15 @@ def update( instructions: OptionalNullable[str] = UNSET, tools: Optional[ Union[ - List[models.AgentUpdateRequestTools], - List[models.AgentUpdateRequestToolsTypedDict], + List[models_agentupdaterequest.AgentUpdateRequestTools], + List[models_agentupdaterequest.AgentUpdateRequestToolsTypedDict], ] ] = None, completion_args: Optional[ - Union[models.CompletionArgs, models.CompletionArgsTypedDict] + Union[ + models_completionargs.CompletionArgs, + models_completionargs.CompletionArgsTypedDict, + ] ] = None, model: OptionalNullable[str] = UNSET, name: OptionalNullable[str] = UNSET, @@ -751,6 +772,7 @@ def update( "json", models.AgentUpdateRequest, ), + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -767,7 +789,7 @@ def update( config=self.sdk_configuration, base_url=base_url or "", operation_id="agents_api_v1_agents_update", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -801,12 +823,15 @@ async def update_async( instructions: OptionalNullable[str] = UNSET, tools: Optional[ Union[ - List[models.AgentUpdateRequestTools], - List[models.AgentUpdateRequestToolsTypedDict], + List[models_agentupdaterequest.AgentUpdateRequestTools], + List[models_agentupdaterequest.AgentUpdateRequestToolsTypedDict], ] ] = None, completion_args: Optional[ - Union[models.CompletionArgs, models.CompletionArgsTypedDict] + Union[ + models_completionargs.CompletionArgs, + models_completionargs.CompletionArgsTypedDict, + ] ] = None, model: OptionalNullable[str] = UNSET, name: OptionalNullable[str] = UNSET, @@ -887,6 +912,7 @@ async def update_async( "json", models.AgentUpdateRequest, ), + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -903,7 +929,7 @@ async def update_async( config=self.sdk_configuration, base_url=base_url or "", operation_id="agents_api_v1_agents_update", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -974,6 +1000,7 @@ def delete( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -990,7 +1017,7 @@ def delete( config=self.sdk_configuration, base_url=base_url or "", operation_id="agents_api_v1_agents_delete", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -1061,6 +1088,7 @@ async def delete_async( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -1077,7 +1105,7 @@ async def delete_async( config=self.sdk_configuration, base_url=base_url or "", operation_id="agents_api_v1_agents_delete", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -1153,6 +1181,7 @@ def update_version( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -1169,7 +1198,7 @@ def update_version( config=self.sdk_configuration, base_url=base_url or "", operation_id="agents_api_v1_agents_update_version", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -1245,6 +1274,7 @@ async def update_version_async( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -1261,7 +1291,7 @@ async def update_version_async( config=self.sdk_configuration, base_url=base_url or "", operation_id="agents_api_v1_agents_update_version", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), diff --git a/src/mistralai/mistral_jobs.py b/src/mistralai/mistral_jobs.py index 09e43fba..d1aeec8a 100644 --- a/src/mistralai/mistral_jobs.py +++ b/src/mistralai/mistral_jobs.py @@ -4,6 +4,11 @@ from datetime import datetime from mistralai import models, utils from mistralai._hooks import HookContext +from mistralai.models import ( + apiendpoint as models_apiendpoint, + batchjobstatus as models_batchjobstatus, + batchrequest as models_batchrequest, +) from mistralai.types import OptionalNullable, UNSET from mistralai.utils import get_security_from_env from mistralai.utils.unmarshal_json_response import unmarshal_json_response @@ -21,7 +26,7 @@ def list( metadata: OptionalNullable[Dict[str, Any]] = UNSET, created_after: OptionalNullable[datetime] = UNSET, created_by_me: Optional[bool] = False, - status: OptionalNullable[List[models.BatchJobStatus]] = UNSET, + status: OptionalNullable[List[models_batchjobstatus.BatchJobStatus]] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -78,6 +83,7 @@ def list( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -94,7 +100,7 @@ def list( config=self.sdk_configuration, base_url=base_url or "", operation_id="jobs_api_routes_batch_get_batch_jobs", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -125,7 +131,7 @@ async def list_async( metadata: OptionalNullable[Dict[str, Any]] = UNSET, created_after: OptionalNullable[datetime] = UNSET, created_by_me: Optional[bool] = False, - status: OptionalNullable[List[models.BatchJobStatus]] = UNSET, + status: OptionalNullable[List[models_batchjobstatus.BatchJobStatus]] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -182,6 +188,7 @@ async def list_async( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -198,7 +205,7 @@ async def list_async( config=self.sdk_configuration, base_url=base_url or "", operation_id="jobs_api_routes_batch_get_batch_jobs", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -222,10 +229,13 @@ async def list_async( def create( self, *, - endpoint: models.APIEndpoint, + endpoint: models_apiendpoint.APIEndpoint, input_files: OptionalNullable[List[str]] = UNSET, requests: OptionalNullable[ - Union[List[models.BatchRequest], List[models.BatchRequestTypedDict]] + Union[ + List[models_batchrequest.BatchRequest], + List[models_batchrequest.BatchRequestTypedDict], + ] ] = UNSET, model: OptionalNullable[str] = UNSET, agent_id: OptionalNullable[str] = UNSET, @@ -290,6 +300,7 @@ def create( get_serialized_body=lambda: utils.serialize_request_body( request, False, False, "json", models.BatchJobIn ), + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -306,7 +317,7 @@ def create( config=self.sdk_configuration, base_url=base_url or "", operation_id="jobs_api_routes_batch_create_batch_job", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -330,10 +341,13 @@ def create( async def create_async( self, *, - endpoint: models.APIEndpoint, + endpoint: models_apiendpoint.APIEndpoint, input_files: OptionalNullable[List[str]] = UNSET, requests: OptionalNullable[ - Union[List[models.BatchRequest], List[models.BatchRequestTypedDict]] + Union[ + List[models_batchrequest.BatchRequest], + List[models_batchrequest.BatchRequestTypedDict], + ] ] = UNSET, model: OptionalNullable[str] = UNSET, agent_id: OptionalNullable[str] = UNSET, @@ -398,6 +412,7 @@ async def create_async( get_serialized_body=lambda: utils.serialize_request_body( request, False, False, "json", models.BatchJobIn ), + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -414,7 +429,7 @@ async def create_async( config=self.sdk_configuration, base_url=base_url or "", operation_id="jobs_api_routes_batch_create_batch_job", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -487,6 +502,7 @@ def get( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -503,7 +519,7 @@ def get( config=self.sdk_configuration, base_url=base_url or "", operation_id="jobs_api_routes_batch_get_batch_job", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -576,6 +592,7 @@ async def get_async( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -592,7 +609,7 @@ async def get_async( config=self.sdk_configuration, base_url=base_url or "", operation_id="jobs_api_routes_batch_get_batch_job", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -659,6 +676,7 @@ def cancel( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -675,7 +693,7 @@ def cancel( config=self.sdk_configuration, base_url=base_url or "", operation_id="jobs_api_routes_batch_cancel_batch_job", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -742,6 +760,7 @@ async def cancel_async( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -758,7 +777,7 @@ async def cancel_async( config=self.sdk_configuration, base_url=base_url or "", operation_id="jobs_api_routes_batch_cancel_batch_job", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), diff --git a/src/mistralai/models/agent.py b/src/mistralai/models/agent.py index 5d0b39fa..eb30905b 100644 --- a/src/mistralai/models/agent.py +++ b/src/mistralai/models/agent.py @@ -42,7 +42,7 @@ ] -AgentObject = Literal["agent"] +AgentObject = Literal["agent",] class AgentTypedDict(TypedDict): diff --git a/src/mistralai/models/agentconversation.py b/src/mistralai/models/agentconversation.py index 7fa3dfe9..625fb4fc 100644 --- a/src/mistralai/models/agentconversation.py +++ b/src/mistralai/models/agentconversation.py @@ -8,7 +8,7 @@ from typing_extensions import NotRequired, TypedDict -AgentConversationObject = Literal["conversation"] +AgentConversationObject = Literal["conversation",] class AgentConversationTypedDict(TypedDict): diff --git a/src/mistralai/models/agenthandoffdoneevent.py b/src/mistralai/models/agenthandoffdoneevent.py index fa545a02..1cdbf456 100644 --- a/src/mistralai/models/agenthandoffdoneevent.py +++ b/src/mistralai/models/agenthandoffdoneevent.py @@ -7,7 +7,7 @@ from typing_extensions import NotRequired, TypedDict -AgentHandoffDoneEventType = Literal["agent.handoff.done"] +AgentHandoffDoneEventType = Literal["agent.handoff.done",] class AgentHandoffDoneEventTypedDict(TypedDict): diff --git a/src/mistralai/models/agenthandoffentry.py b/src/mistralai/models/agenthandoffentry.py index 44bfe0f2..66136256 100644 --- a/src/mistralai/models/agenthandoffentry.py +++ b/src/mistralai/models/agenthandoffentry.py @@ -8,9 +8,10 @@ from typing_extensions import NotRequired, TypedDict -AgentHandoffEntryObject = Literal["entry"] +AgentHandoffEntryObject = Literal["entry",] -AgentHandoffEntryType = Literal["agent.handoff"] + +AgentHandoffEntryType = Literal["agent.handoff",] class AgentHandoffEntryTypedDict(TypedDict): diff --git a/src/mistralai/models/agenthandoffstartedevent.py b/src/mistralai/models/agenthandoffstartedevent.py index 9033a0a9..11bfa918 100644 --- a/src/mistralai/models/agenthandoffstartedevent.py +++ b/src/mistralai/models/agenthandoffstartedevent.py @@ -7,7 +7,7 @@ from typing_extensions import NotRequired, TypedDict -AgentHandoffStartedEventType = Literal["agent.handoff.started"] +AgentHandoffStartedEventType = Literal["agent.handoff.started",] class AgentHandoffStartedEventTypedDict(TypedDict): diff --git a/src/mistralai/models/agentscompletionrequest.py b/src/mistralai/models/agentscompletionrequest.py index cff4df64..cc07a6bd 100644 --- a/src/mistralai/models/agentscompletionrequest.py +++ b/src/mistralai/models/agentscompletionrequest.py @@ -12,9 +12,8 @@ from .toolmessage import ToolMessage, ToolMessageTypedDict from .usermessage import UserMessage, UserMessageTypedDict from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from mistralai.utils import get_discriminator, validate_open_enum +from mistralai.utils import get_discriminator from pydantic import Discriminator, Tag, model_serializer -from pydantic.functional_validators import PlainValidator from typing import Any, Dict, List, Optional, Union from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict @@ -137,9 +136,7 @@ class AgentsCompletionRequest(BaseModel): parallel_tool_calls: Optional[bool] = None - prompt_mode: Annotated[ - OptionalNullable[MistralPromptMode], PlainValidator(validate_open_enum(False)) - ] = UNSET + prompt_mode: OptionalNullable[MistralPromptMode] = UNSET r"""Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used.""" @model_serializer(mode="wrap") diff --git a/src/mistralai/models/agentscompletionstreamrequest.py b/src/mistralai/models/agentscompletionstreamrequest.py index 69edc23c..d6a887be 100644 --- a/src/mistralai/models/agentscompletionstreamrequest.py +++ b/src/mistralai/models/agentscompletionstreamrequest.py @@ -12,9 +12,8 @@ from .toolmessage import ToolMessage, ToolMessageTypedDict from .usermessage import UserMessage, UserMessageTypedDict from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from mistralai.utils import get_discriminator, validate_open_enum +from mistralai.utils import get_discriminator from pydantic import Discriminator, Tag, model_serializer -from pydantic.functional_validators import PlainValidator from typing import Any, Dict, List, Optional, Union from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict @@ -135,9 +134,7 @@ class AgentsCompletionStreamRequest(BaseModel): parallel_tool_calls: Optional[bool] = None - prompt_mode: Annotated[ - OptionalNullable[MistralPromptMode], PlainValidator(validate_open_enum(False)) - ] = UNSET + prompt_mode: OptionalNullable[MistralPromptMode] = UNSET r"""Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used.""" @model_serializer(mode="wrap") diff --git a/src/mistralai/models/archiveftmodelout.py b/src/mistralai/models/archiveftmodelout.py index cff27c4e..0f753cfc 100644 --- a/src/mistralai/models/archiveftmodelout.py +++ b/src/mistralai/models/archiveftmodelout.py @@ -6,7 +6,7 @@ from typing_extensions import NotRequired, TypedDict -ArchiveFTModelOutObject = Literal["model"] +ArchiveFTModelOutObject = Literal["model",] class ArchiveFTModelOutTypedDict(TypedDict): diff --git a/src/mistralai/models/assistantmessage.py b/src/mistralai/models/assistantmessage.py index 18841a72..a38a10c4 100644 --- a/src/mistralai/models/assistantmessage.py +++ b/src/mistralai/models/assistantmessage.py @@ -19,7 +19,7 @@ ) -AssistantMessageRole = Literal["assistant"] +AssistantMessageRole = Literal["assistant",] class AssistantMessageTypedDict(TypedDict): diff --git a/src/mistralai/models/audiochunk.py b/src/mistralai/models/audiochunk.py index 2780570a..64fc43ff 100644 --- a/src/mistralai/models/audiochunk.py +++ b/src/mistralai/models/audiochunk.py @@ -6,7 +6,7 @@ from typing_extensions import NotRequired, TypedDict -AudioChunkType = Literal["input_audio"] +AudioChunkType = Literal["input_audio",] class AudioChunkTypedDict(TypedDict): diff --git a/src/mistralai/models/audioencoding.py b/src/mistralai/models/audioencoding.py index 7bb03f33..13eb6d15 100644 --- a/src/mistralai/models/audioencoding.py +++ b/src/mistralai/models/audioencoding.py @@ -7,7 +7,12 @@ AudioEncoding = Union[ Literal[ - "pcm_s16le", "pcm_s32le", "pcm_f16le", "pcm_f32le", "pcm_mulaw", "pcm_alaw" + "pcm_s16le", + "pcm_s32le", + "pcm_f16le", + "pcm_f32le", + "pcm_mulaw", + "pcm_alaw", ], UnrecognizedStr, ] diff --git a/src/mistralai/models/audioformat.py b/src/mistralai/models/audioformat.py index 075b3c75..48ab648c 100644 --- a/src/mistralai/models/audioformat.py +++ b/src/mistralai/models/audioformat.py @@ -3,9 +3,7 @@ from __future__ import annotations from .audioencoding import AudioEncoding from mistralai.types import BaseModel -from mistralai.utils import validate_open_enum -from pydantic.functional_validators import PlainValidator -from typing_extensions import Annotated, TypedDict +from typing_extensions import TypedDict class AudioFormatTypedDict(TypedDict): @@ -14,6 +12,6 @@ class AudioFormatTypedDict(TypedDict): class AudioFormat(BaseModel): - encoding: Annotated[AudioEncoding, PlainValidator(validate_open_enum(False))] + encoding: AudioEncoding sample_rate: int diff --git a/src/mistralai/models/basemodelcard.py b/src/mistralai/models/basemodelcard.py index a4a061ff..706841b7 100644 --- a/src/mistralai/models/basemodelcard.py +++ b/src/mistralai/models/basemodelcard.py @@ -12,7 +12,7 @@ from typing_extensions import Annotated, NotRequired, TypedDict -BaseModelCardType = Literal["base"] +BaseModelCardType = Literal["base",] class BaseModelCardTypedDict(TypedDict): diff --git a/src/mistralai/models/batchjobin.py b/src/mistralai/models/batchjobin.py index 0c37cce8..839a9b3c 100644 --- a/src/mistralai/models/batchjobin.py +++ b/src/mistralai/models/batchjobin.py @@ -4,11 +4,9 @@ from .apiendpoint import APIEndpoint from .batchrequest import BatchRequest, BatchRequestTypedDict from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from mistralai.utils import validate_open_enum from pydantic import model_serializer -from pydantic.functional_validators import PlainValidator from typing import Dict, List, Optional -from typing_extensions import Annotated, NotRequired, TypedDict +from typing_extensions import NotRequired, TypedDict class BatchJobInTypedDict(TypedDict): @@ -27,7 +25,7 @@ class BatchJobInTypedDict(TypedDict): class BatchJobIn(BaseModel): - endpoint: Annotated[APIEndpoint, PlainValidator(validate_open_enum(False))] + endpoint: APIEndpoint input_files: OptionalNullable[List[str]] = UNSET r"""The list of input files to be used for batch inference, these files should be `jsonl` files, containing the input data corresponding to the bory request for the batch inference in a \"body\" field. An example of such file is the following: ```json {\"custom_id\": \"0\", \"body\": {\"max_tokens\": 100, \"messages\": [{\"role\": \"user\", \"content\": \"What is the best French cheese?\"}]}} {\"custom_id\": \"1\", \"body\": {\"max_tokens\": 100, \"messages\": [{\"role\": \"user\", \"content\": \"What is the best French wine?\"}]}} ```""" diff --git a/src/mistralai/models/batchjobout.py b/src/mistralai/models/batchjobout.py index 0173d9a6..904cd349 100644 --- a/src/mistralai/models/batchjobout.py +++ b/src/mistralai/models/batchjobout.py @@ -9,7 +9,7 @@ from typing_extensions import NotRequired, TypedDict -BatchJobOutObject = Literal["batch"] +BatchJobOutObject = Literal["batch",] class BatchJobOutTypedDict(TypedDict): diff --git a/src/mistralai/models/batchjobsout.py b/src/mistralai/models/batchjobsout.py index 8ce26f31..a1eba5db 100644 --- a/src/mistralai/models/batchjobsout.py +++ b/src/mistralai/models/batchjobsout.py @@ -7,7 +7,7 @@ from typing_extensions import NotRequired, TypedDict -BatchJobsOutObject = Literal["list"] +BatchJobsOutObject = Literal["list",] class BatchJobsOutTypedDict(TypedDict): diff --git a/src/mistralai/models/chatcompletionchoice.py b/src/mistralai/models/chatcompletionchoice.py index f4f37fb4..f2057ab4 100644 --- a/src/mistralai/models/chatcompletionchoice.py +++ b/src/mistralai/models/chatcompletionchoice.py @@ -3,14 +3,19 @@ from __future__ import annotations from .assistantmessage import AssistantMessage, AssistantMessageTypedDict from mistralai.types import BaseModel, UnrecognizedStr -from mistralai.utils import validate_open_enum -from pydantic.functional_validators import PlainValidator from typing import Literal, Union -from typing_extensions import Annotated, TypedDict +from typing_extensions import TypedDict FinishReason = Union[ - Literal["stop", "length", "model_length", "error", "tool_calls"], UnrecognizedStr + Literal[ + "stop", + "length", + "model_length", + "error", + "tool_calls", + ], + UnrecognizedStr, ] @@ -25,4 +30,4 @@ class ChatCompletionChoice(BaseModel): message: AssistantMessage - finish_reason: Annotated[FinishReason, PlainValidator(validate_open_enum(False))] + finish_reason: FinishReason diff --git a/src/mistralai/models/chatcompletionrequest.py b/src/mistralai/models/chatcompletionrequest.py index a309421b..ad8b5428 100644 --- a/src/mistralai/models/chatcompletionrequest.py +++ b/src/mistralai/models/chatcompletionrequest.py @@ -12,9 +12,8 @@ from .toolmessage import ToolMessage, ToolMessageTypedDict from .usermessage import UserMessage, UserMessageTypedDict from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from mistralai.utils import get_discriminator, validate_open_enum +from mistralai.utils import get_discriminator from pydantic import Discriminator, Tag, model_serializer -from pydantic.functional_validators import PlainValidator from typing import Any, Dict, List, Optional, Union from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict @@ -153,9 +152,7 @@ class ChatCompletionRequest(BaseModel): parallel_tool_calls: Optional[bool] = None r"""Whether to enable parallel function calling during tool use, when enabled the model can call multiple tools in parallel.""" - prompt_mode: Annotated[ - OptionalNullable[MistralPromptMode], PlainValidator(validate_open_enum(False)) - ] = UNSET + prompt_mode: OptionalNullable[MistralPromptMode] = UNSET r"""Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used.""" safe_prompt: Optional[bool] = None diff --git a/src/mistralai/models/chatcompletionstreamrequest.py b/src/mistralai/models/chatcompletionstreamrequest.py index 7a28cf01..10f97e5f 100644 --- a/src/mistralai/models/chatcompletionstreamrequest.py +++ b/src/mistralai/models/chatcompletionstreamrequest.py @@ -12,9 +12,8 @@ from .toolmessage import ToolMessage, ToolMessageTypedDict from .usermessage import UserMessage, UserMessageTypedDict from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from mistralai.utils import get_discriminator, validate_open_enum +from mistralai.utils import get_discriminator from pydantic import Discriminator, Tag, model_serializer -from pydantic.functional_validators import PlainValidator from typing import Any, Dict, List, Optional, Union from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict @@ -155,9 +154,7 @@ class ChatCompletionStreamRequest(BaseModel): parallel_tool_calls: Optional[bool] = None r"""Whether to enable parallel function calling during tool use, when enabled the model can call multiple tools in parallel.""" - prompt_mode: Annotated[ - OptionalNullable[MistralPromptMode], PlainValidator(validate_open_enum(False)) - ] = UNSET + prompt_mode: OptionalNullable[MistralPromptMode] = UNSET r"""Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used.""" safe_prompt: Optional[bool] = None diff --git a/src/mistralai/models/classifierdetailedjobout.py b/src/mistralai/models/classifierdetailedjobout.py index da5bd281..701aee6e 100644 --- a/src/mistralai/models/classifierdetailedjobout.py +++ b/src/mistralai/models/classifierdetailedjobout.py @@ -29,7 +29,9 @@ "CANCELLATION_REQUESTED", ] -ClassifierDetailedJobOutObject = Literal["job"] + +ClassifierDetailedJobOutObject = Literal["job",] + ClassifierDetailedJobOutIntegrationsTypedDict = WandbIntegrationOutTypedDict @@ -37,7 +39,7 @@ ClassifierDetailedJobOutIntegrations = WandbIntegrationOut -ClassifierDetailedJobOutJobType = Literal["classifier"] +ClassifierDetailedJobOutJobType = Literal["classifier",] class ClassifierDetailedJobOutTypedDict(TypedDict): diff --git a/src/mistralai/models/classifierftmodelout.py b/src/mistralai/models/classifierftmodelout.py index 56ffe96d..d2a31fae 100644 --- a/src/mistralai/models/classifierftmodelout.py +++ b/src/mistralai/models/classifierftmodelout.py @@ -12,9 +12,10 @@ from typing_extensions import NotRequired, TypedDict -ClassifierFTModelOutObject = Literal["model"] +ClassifierFTModelOutObject = Literal["model",] -ClassifierFTModelOutModelType = Literal["classifier"] + +ClassifierFTModelOutModelType = Literal["classifier",] class ClassifierFTModelOutTypedDict(TypedDict): diff --git a/src/mistralai/models/classifierjobout.py b/src/mistralai/models/classifierjobout.py index c8df6da3..a2f7cc08 100644 --- a/src/mistralai/models/classifierjobout.py +++ b/src/mistralai/models/classifierjobout.py @@ -27,16 +27,18 @@ ] r"""The current status of the fine-tuning job.""" -ClassifierJobOutObject = Literal["job"] + +ClassifierJobOutObject = Literal["job",] r"""The object type of the fine-tuning job.""" + ClassifierJobOutIntegrationsTypedDict = WandbIntegrationOutTypedDict ClassifierJobOutIntegrations = WandbIntegrationOut -ClassifierJobOutJobType = Literal["classifier"] +ClassifierJobOutJobType = Literal["classifier",] r"""The type of job (`FT` for fine-tuning).""" diff --git a/src/mistralai/models/codeinterpretertool.py b/src/mistralai/models/codeinterpretertool.py index b0fc4d20..48b74ee8 100644 --- a/src/mistralai/models/codeinterpretertool.py +++ b/src/mistralai/models/codeinterpretertool.py @@ -6,7 +6,7 @@ from typing_extensions import NotRequired, TypedDict -CodeInterpreterToolType = Literal["code_interpreter"] +CodeInterpreterToolType = Literal["code_interpreter",] class CodeInterpreterToolTypedDict(TypedDict): diff --git a/src/mistralai/models/completiondetailedjobout.py b/src/mistralai/models/completiondetailedjobout.py index 8fb1b62a..df41bc2a 100644 --- a/src/mistralai/models/completiondetailedjobout.py +++ b/src/mistralai/models/completiondetailedjobout.py @@ -29,7 +29,9 @@ "CANCELLATION_REQUESTED", ] -CompletionDetailedJobOutObject = Literal["job"] + +CompletionDetailedJobOutObject = Literal["job",] + CompletionDetailedJobOutIntegrationsTypedDict = WandbIntegrationOutTypedDict @@ -37,7 +39,8 @@ CompletionDetailedJobOutIntegrations = WandbIntegrationOut -CompletionDetailedJobOutJobType = Literal["completion"] +CompletionDetailedJobOutJobType = Literal["completion",] + CompletionDetailedJobOutRepositoriesTypedDict = GithubRepositoryOutTypedDict diff --git a/src/mistralai/models/completionftmodelout.py b/src/mistralai/models/completionftmodelout.py index ab71168b..7b6520de 100644 --- a/src/mistralai/models/completionftmodelout.py +++ b/src/mistralai/models/completionftmodelout.py @@ -11,9 +11,10 @@ from typing_extensions import NotRequired, TypedDict -CompletionFTModelOutObject = Literal["model"] +CompletionFTModelOutObject = Literal["model",] -ModelType = Literal["completion"] + +ModelType = Literal["completion",] class CompletionFTModelOutTypedDict(TypedDict): diff --git a/src/mistralai/models/completionjobout.py b/src/mistralai/models/completionjobout.py index bed67b50..70995d2a 100644 --- a/src/mistralai/models/completionjobout.py +++ b/src/mistralai/models/completionjobout.py @@ -28,18 +28,21 @@ ] r"""The current status of the fine-tuning job.""" -CompletionJobOutObject = Literal["job"] + +CompletionJobOutObject = Literal["job",] r"""The object type of the fine-tuning job.""" + IntegrationsTypedDict = WandbIntegrationOutTypedDict Integrations = WandbIntegrationOut -JobType = Literal["completion"] +JobType = Literal["completion",] r"""The type of job (`FT` for fine-tuning).""" + RepositoriesTypedDict = GithubRepositoryOutTypedDict diff --git a/src/mistralai/models/completionresponsestreamchoice.py b/src/mistralai/models/completionresponsestreamchoice.py index 2426148c..80f63987 100644 --- a/src/mistralai/models/completionresponsestreamchoice.py +++ b/src/mistralai/models/completionresponsestreamchoice.py @@ -3,15 +3,19 @@ from __future__ import annotations from .deltamessage import DeltaMessage, DeltaMessageTypedDict from mistralai.types import BaseModel, Nullable, UNSET_SENTINEL, UnrecognizedStr -from mistralai.utils import validate_open_enum from pydantic import model_serializer -from pydantic.functional_validators import PlainValidator from typing import Literal, Union -from typing_extensions import Annotated, TypedDict +from typing_extensions import TypedDict CompletionResponseStreamChoiceFinishReason = Union[ - Literal["stop", "length", "error", "tool_calls"], UnrecognizedStr + Literal[ + "stop", + "length", + "error", + "tool_calls", + ], + UnrecognizedStr, ] @@ -26,10 +30,7 @@ class CompletionResponseStreamChoice(BaseModel): delta: DeltaMessage - finish_reason: Annotated[ - Nullable[CompletionResponseStreamChoiceFinishReason], - PlainValidator(validate_open_enum(False)), - ] + finish_reason: Nullable[CompletionResponseStreamChoiceFinishReason] @model_serializer(mode="wrap") def serialize_model(self, handler): diff --git a/src/mistralai/models/conversationappendrequest.py b/src/mistralai/models/conversationappendrequest.py index ecc47e45..15cbc687 100644 --- a/src/mistralai/models/conversationappendrequest.py +++ b/src/mistralai/models/conversationappendrequest.py @@ -8,7 +8,10 @@ from typing_extensions import NotRequired, TypedDict -ConversationAppendRequestHandoffExecution = Literal["client", "server"] +ConversationAppendRequestHandoffExecution = Literal[ + "client", + "server", +] class ConversationAppendRequestTypedDict(TypedDict): diff --git a/src/mistralai/models/conversationappendstreamrequest.py b/src/mistralai/models/conversationappendstreamrequest.py index 25ffe5fb..8cecf89d 100644 --- a/src/mistralai/models/conversationappendstreamrequest.py +++ b/src/mistralai/models/conversationappendstreamrequest.py @@ -8,7 +8,10 @@ from typing_extensions import NotRequired, TypedDict -ConversationAppendStreamRequestHandoffExecution = Literal["client", "server"] +ConversationAppendStreamRequestHandoffExecution = Literal[ + "client", + "server", +] class ConversationAppendStreamRequestTypedDict(TypedDict): diff --git a/src/mistralai/models/conversationhistory.py b/src/mistralai/models/conversationhistory.py index 472915fe..d5206a57 100644 --- a/src/mistralai/models/conversationhistory.py +++ b/src/mistralai/models/conversationhistory.py @@ -12,7 +12,8 @@ from typing_extensions import NotRequired, TypeAliasType, TypedDict -ConversationHistoryObject = Literal["conversation.history"] +ConversationHistoryObject = Literal["conversation.history",] + EntriesTypedDict = TypeAliasType( "EntriesTypedDict", diff --git a/src/mistralai/models/conversationmessages.py b/src/mistralai/models/conversationmessages.py index 9027045b..32ca9c20 100644 --- a/src/mistralai/models/conversationmessages.py +++ b/src/mistralai/models/conversationmessages.py @@ -7,7 +7,7 @@ from typing_extensions import NotRequired, TypedDict -ConversationMessagesObject = Literal["conversation.messages"] +ConversationMessagesObject = Literal["conversation.messages",] class ConversationMessagesTypedDict(TypedDict): diff --git a/src/mistralai/models/conversationrequest.py b/src/mistralai/models/conversationrequest.py index bd4368d2..09d934ed 100644 --- a/src/mistralai/models/conversationrequest.py +++ b/src/mistralai/models/conversationrequest.py @@ -16,7 +16,11 @@ from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict -HandoffExecution = Literal["client", "server"] +HandoffExecution = Literal[ + "client", + "server", +] + ToolsTypedDict = TypeAliasType( "ToolsTypedDict", diff --git a/src/mistralai/models/conversationresponse.py b/src/mistralai/models/conversationresponse.py index 61de8565..ff318e35 100644 --- a/src/mistralai/models/conversationresponse.py +++ b/src/mistralai/models/conversationresponse.py @@ -11,7 +11,8 @@ from typing_extensions import NotRequired, TypeAliasType, TypedDict -ConversationResponseObject = Literal["conversation.response"] +ConversationResponseObject = Literal["conversation.response",] + OutputsTypedDict = TypeAliasType( "OutputsTypedDict", diff --git a/src/mistralai/models/conversationrestartrequest.py b/src/mistralai/models/conversationrestartrequest.py index 091917fe..a9c8410c 100644 --- a/src/mistralai/models/conversationrestartrequest.py +++ b/src/mistralai/models/conversationrestartrequest.py @@ -9,7 +9,10 @@ from typing_extensions import NotRequired, TypedDict -ConversationRestartRequestHandoffExecution = Literal["client", "server"] +ConversationRestartRequestHandoffExecution = Literal[ + "client", + "server", +] class ConversationRestartRequestTypedDict(TypedDict): diff --git a/src/mistralai/models/conversationrestartstreamrequest.py b/src/mistralai/models/conversationrestartstreamrequest.py index 4bcf255a..0703bb5f 100644 --- a/src/mistralai/models/conversationrestartstreamrequest.py +++ b/src/mistralai/models/conversationrestartstreamrequest.py @@ -9,7 +9,10 @@ from typing_extensions import NotRequired, TypedDict -ConversationRestartStreamRequestHandoffExecution = Literal["client", "server"] +ConversationRestartStreamRequestHandoffExecution = Literal[ + "client", + "server", +] class ConversationRestartStreamRequestTypedDict(TypedDict): diff --git a/src/mistralai/models/conversationstreamrequest.py b/src/mistralai/models/conversationstreamrequest.py index 8c6d56c2..6ff56e17 100644 --- a/src/mistralai/models/conversationstreamrequest.py +++ b/src/mistralai/models/conversationstreamrequest.py @@ -16,7 +16,11 @@ from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict -ConversationStreamRequestHandoffExecution = Literal["client", "server"] +ConversationStreamRequestHandoffExecution = Literal[ + "client", + "server", +] + ConversationStreamRequestToolsTypedDict = TypeAliasType( "ConversationStreamRequestToolsTypedDict", diff --git a/src/mistralai/models/documentlibrarytool.py b/src/mistralai/models/documentlibrarytool.py index f36de710..8d4c122b 100644 --- a/src/mistralai/models/documentlibrarytool.py +++ b/src/mistralai/models/documentlibrarytool.py @@ -6,7 +6,7 @@ from typing_extensions import NotRequired, TypedDict -DocumentLibraryToolType = Literal["document_library"] +DocumentLibraryToolType = Literal["document_library",] class DocumentLibraryToolTypedDict(TypedDict): diff --git a/src/mistralai/models/documenturlchunk.py b/src/mistralai/models/documenturlchunk.py index 33f29ba8..6d0b1dc6 100644 --- a/src/mistralai/models/documenturlchunk.py +++ b/src/mistralai/models/documenturlchunk.py @@ -7,7 +7,7 @@ from typing_extensions import NotRequired, TypedDict -DocumentURLChunkType = Literal["document_url"] +DocumentURLChunkType = Literal["document_url",] class DocumentURLChunkTypedDict(TypedDict): diff --git a/src/mistralai/models/embeddingdtype.py b/src/mistralai/models/embeddingdtype.py index 4f3c41bd..26eee779 100644 --- a/src/mistralai/models/embeddingdtype.py +++ b/src/mistralai/models/embeddingdtype.py @@ -4,4 +4,10 @@ from typing import Literal -EmbeddingDtype = Literal["float", "int8", "uint8", "binary", "ubinary"] +EmbeddingDtype = Literal[ + "float", + "int8", + "uint8", + "binary", + "ubinary", +] diff --git a/src/mistralai/models/encodingformat.py b/src/mistralai/models/encodingformat.py index 6c28a15a..be6c1a14 100644 --- a/src/mistralai/models/encodingformat.py +++ b/src/mistralai/models/encodingformat.py @@ -4,4 +4,7 @@ from typing import Literal -EncodingFormat = Literal["float", "base64"] +EncodingFormat = Literal[ + "float", + "base64", +] diff --git a/src/mistralai/models/entitytype.py b/src/mistralai/models/entitytype.py index b5149c5f..8d2d4bbe 100644 --- a/src/mistralai/models/entitytype.py +++ b/src/mistralai/models/entitytype.py @@ -5,5 +5,12 @@ from typing import Literal, Union -EntityType = Union[Literal["User", "Workspace", "Org"], UnrecognizedStr] +EntityType = Union[ + Literal[ + "User", + "Workspace", + "Org", + ], + UnrecognizedStr, +] r"""The type of entity, used to share a library.""" diff --git a/src/mistralai/models/filepurpose.py b/src/mistralai/models/filepurpose.py index 8599192b..b109b350 100644 --- a/src/mistralai/models/filepurpose.py +++ b/src/mistralai/models/filepurpose.py @@ -5,4 +5,11 @@ from typing import Literal, Union -FilePurpose = Union[Literal["fine-tune", "batch", "ocr"], UnrecognizedStr] +FilePurpose = Union[ + Literal[ + "fine-tune", + "batch", + "ocr", + ], + UnrecognizedStr, +] diff --git a/src/mistralai/models/files_api_routes_list_filesop.py b/src/mistralai/models/files_api_routes_list_filesop.py index 8e174a58..9b9422b4 100644 --- a/src/mistralai/models/files_api_routes_list_filesop.py +++ b/src/mistralai/models/files_api_routes_list_filesop.py @@ -5,9 +5,8 @@ from .sampletype import SampleType from .source import Source from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from mistralai.utils import FieldMetadata, QueryParamMetadata, validate_open_enum +from mistralai.utils import FieldMetadata, QueryParamMetadata from pydantic import model_serializer -from pydantic.functional_validators import PlainValidator from typing import List, Optional from typing_extensions import Annotated, NotRequired, TypedDict @@ -39,16 +38,12 @@ class FilesAPIRoutesListFilesRequest(BaseModel): ] = True sample_type: Annotated[ - OptionalNullable[ - List[Annotated[SampleType, PlainValidator(validate_open_enum(False))]] - ], + OptionalNullable[List[SampleType]], FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), ] = UNSET source: Annotated[ - OptionalNullable[ - List[Annotated[Source, PlainValidator(validate_open_enum(False))]] - ], + OptionalNullable[List[Source]], FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), ] = UNSET @@ -58,9 +53,7 @@ class FilesAPIRoutesListFilesRequest(BaseModel): ] = UNSET purpose: Annotated[ - Annotated[ - OptionalNullable[FilePurpose], PlainValidator(validate_open_enum(False)) - ], + OptionalNullable[FilePurpose], FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), ] = UNSET diff --git a/src/mistralai/models/files_api_routes_upload_fileop.py b/src/mistralai/models/files_api_routes_upload_fileop.py index 34321cf5..aeefe842 100644 --- a/src/mistralai/models/files_api_routes_upload_fileop.py +++ b/src/mistralai/models/files_api_routes_upload_fileop.py @@ -4,8 +4,7 @@ from .file import File, FileTypedDict from .filepurpose import FilePurpose from mistralai.types import BaseModel -from mistralai.utils import FieldMetadata, MultipartFormMetadata, validate_open_enum -from pydantic.functional_validators import PlainValidator +from mistralai.utils import FieldMetadata, MultipartFormMetadata from typing import Optional from typing_extensions import Annotated, NotRequired, TypedDict @@ -38,7 +37,4 @@ class FilesAPIRoutesUploadFileMultiPartBodyParams(BaseModel): ``` """ - purpose: Annotated[ - Annotated[Optional[FilePurpose], PlainValidator(validate_open_enum(False))], - FieldMetadata(multipart=True), - ] = None + purpose: Annotated[Optional[FilePurpose], FieldMetadata(multipart=True)] = None diff --git a/src/mistralai/models/fileschema.py b/src/mistralai/models/fileschema.py index 7c7b60c6..9a88f1bb 100644 --- a/src/mistralai/models/fileschema.py +++ b/src/mistralai/models/fileschema.py @@ -5,10 +5,8 @@ from .sampletype import SampleType from .source import Source from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from mistralai.utils import validate_open_enum import pydantic from pydantic import model_serializer -from pydantic.functional_validators import PlainValidator from typing_extensions import Annotated, NotRequired, TypedDict @@ -47,11 +45,11 @@ class FileSchema(BaseModel): filename: str r"""The name of the uploaded file.""" - purpose: Annotated[FilePurpose, PlainValidator(validate_open_enum(False))] + purpose: FilePurpose - sample_type: Annotated[SampleType, PlainValidator(validate_open_enum(False))] + sample_type: SampleType - source: Annotated[Source, PlainValidator(validate_open_enum(False))] + source: Source num_lines: OptionalNullable[int] = UNSET diff --git a/src/mistralai/models/finetuneablemodeltype.py b/src/mistralai/models/finetuneablemodeltype.py index 3507dc91..f5b8b2ed 100644 --- a/src/mistralai/models/finetuneablemodeltype.py +++ b/src/mistralai/models/finetuneablemodeltype.py @@ -4,4 +4,7 @@ from typing import Literal -FineTuneableModelType = Literal["completion", "classifier"] +FineTuneableModelType = Literal[ + "completion", + "classifier", +] diff --git a/src/mistralai/models/ftclassifierlossfunction.py b/src/mistralai/models/ftclassifierlossfunction.py index df2d19ff..c4ef66e0 100644 --- a/src/mistralai/models/ftclassifierlossfunction.py +++ b/src/mistralai/models/ftclassifierlossfunction.py @@ -4,4 +4,7 @@ from typing import Literal -FTClassifierLossFunction = Literal["single_class", "multi_class"] +FTClassifierLossFunction = Literal[ + "single_class", + "multi_class", +] diff --git a/src/mistralai/models/ftmodelcard.py b/src/mistralai/models/ftmodelcard.py index 7159ce00..1c3bd04d 100644 --- a/src/mistralai/models/ftmodelcard.py +++ b/src/mistralai/models/ftmodelcard.py @@ -12,7 +12,7 @@ from typing_extensions import Annotated, NotRequired, TypedDict -FTModelCardType = Literal["fine-tuned"] +FTModelCardType = Literal["fine-tuned",] class FTModelCardTypedDict(TypedDict): diff --git a/src/mistralai/models/functioncallentry.py b/src/mistralai/models/functioncallentry.py index 1e47fda9..4ea62c4f 100644 --- a/src/mistralai/models/functioncallentry.py +++ b/src/mistralai/models/functioncallentry.py @@ -12,9 +12,10 @@ from typing_extensions import NotRequired, TypedDict -FunctionCallEntryObject = Literal["entry"] +FunctionCallEntryObject = Literal["entry",] -FunctionCallEntryType = Literal["function.call"] + +FunctionCallEntryType = Literal["function.call",] class FunctionCallEntryTypedDict(TypedDict): diff --git a/src/mistralai/models/functioncallevent.py b/src/mistralai/models/functioncallevent.py index 90b4b226..e3992cf1 100644 --- a/src/mistralai/models/functioncallevent.py +++ b/src/mistralai/models/functioncallevent.py @@ -7,7 +7,7 @@ from typing_extensions import NotRequired, TypedDict -FunctionCallEventType = Literal["function.call.delta"] +FunctionCallEventType = Literal["function.call.delta",] class FunctionCallEventTypedDict(TypedDict): diff --git a/src/mistralai/models/functionresultentry.py b/src/mistralai/models/functionresultentry.py index f09e11ae..1c61395a 100644 --- a/src/mistralai/models/functionresultentry.py +++ b/src/mistralai/models/functionresultentry.py @@ -8,9 +8,10 @@ from typing_extensions import NotRequired, TypedDict -FunctionResultEntryObject = Literal["entry"] +FunctionResultEntryObject = Literal["entry",] -FunctionResultEntryType = Literal["function.result"] + +FunctionResultEntryType = Literal["function.result",] class FunctionResultEntryTypedDict(TypedDict): diff --git a/src/mistralai/models/functiontool.py b/src/mistralai/models/functiontool.py index 7ce5c464..009fe280 100644 --- a/src/mistralai/models/functiontool.py +++ b/src/mistralai/models/functiontool.py @@ -7,7 +7,7 @@ from typing_extensions import NotRequired, TypedDict -FunctionToolType = Literal["function"] +FunctionToolType = Literal["function",] class FunctionToolTypedDict(TypedDict): diff --git a/src/mistralai/models/githubrepositoryin.py b/src/mistralai/models/githubrepositoryin.py index 801c0540..b16ce0d2 100644 --- a/src/mistralai/models/githubrepositoryin.py +++ b/src/mistralai/models/githubrepositoryin.py @@ -7,7 +7,7 @@ from typing_extensions import NotRequired, TypedDict -GithubRepositoryInType = Literal["github"] +GithubRepositoryInType = Literal["github",] class GithubRepositoryInTypedDict(TypedDict): diff --git a/src/mistralai/models/githubrepositoryout.py b/src/mistralai/models/githubrepositoryout.py index 0d74c17a..372477c1 100644 --- a/src/mistralai/models/githubrepositoryout.py +++ b/src/mistralai/models/githubrepositoryout.py @@ -7,7 +7,7 @@ from typing_extensions import NotRequired, TypedDict -GithubRepositoryOutType = Literal["github"] +GithubRepositoryOutType = Literal["github",] class GithubRepositoryOutTypedDict(TypedDict): diff --git a/src/mistralai/models/httpvalidationerror.py b/src/mistralai/models/httpvalidationerror.py index e9136063..d467577a 100644 --- a/src/mistralai/models/httpvalidationerror.py +++ b/src/mistralai/models/httpvalidationerror.py @@ -2,6 +2,7 @@ from __future__ import annotations from .validationerror import ValidationError +from dataclasses import dataclass, field import httpx from mistralai.models import MistralError from mistralai.types import BaseModel @@ -12,8 +13,9 @@ class HTTPValidationErrorData(BaseModel): detail: Optional[List[ValidationError]] = None +@dataclass(unsafe_hash=True) class HTTPValidationError(MistralError): - data: HTTPValidationErrorData + data: HTTPValidationErrorData = field(hash=False) def __init__( self, @@ -23,4 +25,4 @@ def __init__( ): message = body or raw_response.text super().__init__(message, raw_response, body) - self.data = data + object.__setattr__(self, "data", data) diff --git a/src/mistralai/models/imagegenerationtool.py b/src/mistralai/models/imagegenerationtool.py index 27bb2d12..a92335db 100644 --- a/src/mistralai/models/imagegenerationtool.py +++ b/src/mistralai/models/imagegenerationtool.py @@ -6,7 +6,7 @@ from typing_extensions import NotRequired, TypedDict -ImageGenerationToolType = Literal["image_generation"] +ImageGenerationToolType = Literal["image_generation",] class ImageGenerationToolTypedDict(TypedDict): diff --git a/src/mistralai/models/imageurlchunk.py b/src/mistralai/models/imageurlchunk.py index 498690f5..8e8aac42 100644 --- a/src/mistralai/models/imageurlchunk.py +++ b/src/mistralai/models/imageurlchunk.py @@ -15,7 +15,7 @@ ImageURLChunkImageURL = TypeAliasType("ImageURLChunkImageURL", Union[ImageURL, str]) -ImageURLChunkType = Literal["image_url"] +ImageURLChunkType = Literal["image_url",] class ImageURLChunkTypedDict(TypedDict): diff --git a/src/mistralai/models/jobsout.py b/src/mistralai/models/jobsout.py index abdf18fd..680b1d58 100644 --- a/src/mistralai/models/jobsout.py +++ b/src/mistralai/models/jobsout.py @@ -24,7 +24,7 @@ ] -JobsOutObject = Literal["list"] +JobsOutObject = Literal["list",] class JobsOutTypedDict(TypedDict): diff --git a/src/mistralai/models/legacyjobmetadataout.py b/src/mistralai/models/legacyjobmetadataout.py index d878173b..49951219 100644 --- a/src/mistralai/models/legacyjobmetadataout.py +++ b/src/mistralai/models/legacyjobmetadataout.py @@ -7,7 +7,7 @@ from typing_extensions import NotRequired, TypedDict -LegacyJobMetadataOutObject = Literal["job.metadata"] +LegacyJobMetadataOutObject = Literal["job.metadata",] class LegacyJobMetadataOutTypedDict(TypedDict): diff --git a/src/mistralai/models/messageinputentry.py b/src/mistralai/models/messageinputentry.py index c14ad5ae..edf05631 100644 --- a/src/mistralai/models/messageinputentry.py +++ b/src/mistralai/models/messageinputentry.py @@ -12,11 +12,17 @@ from typing_extensions import NotRequired, TypeAliasType, TypedDict -Object = Literal["entry"] +Object = Literal["entry",] -MessageInputEntryType = Literal["message.input"] -MessageInputEntryRole = Literal["assistant", "user"] +MessageInputEntryType = Literal["message.input",] + + +MessageInputEntryRole = Literal[ + "assistant", + "user", +] + MessageInputEntryContentTypedDict = TypeAliasType( "MessageInputEntryContentTypedDict", diff --git a/src/mistralai/models/messageoutputentry.py b/src/mistralai/models/messageoutputentry.py index 1c2e4107..0e2df81e 100644 --- a/src/mistralai/models/messageoutputentry.py +++ b/src/mistralai/models/messageoutputentry.py @@ -12,11 +12,14 @@ from typing_extensions import NotRequired, TypeAliasType, TypedDict -MessageOutputEntryObject = Literal["entry"] +MessageOutputEntryObject = Literal["entry",] -MessageOutputEntryType = Literal["message.output"] -MessageOutputEntryRole = Literal["assistant"] +MessageOutputEntryType = Literal["message.output",] + + +MessageOutputEntryRole = Literal["assistant",] + MessageOutputEntryContentTypedDict = TypeAliasType( "MessageOutputEntryContentTypedDict", diff --git a/src/mistralai/models/messageoutputevent.py b/src/mistralai/models/messageoutputevent.py index 474cb081..751767a3 100644 --- a/src/mistralai/models/messageoutputevent.py +++ b/src/mistralai/models/messageoutputevent.py @@ -9,9 +9,11 @@ from typing_extensions import NotRequired, TypeAliasType, TypedDict -MessageOutputEventType = Literal["message.output.delta"] +MessageOutputEventType = Literal["message.output.delta",] + + +MessageOutputEventRole = Literal["assistant",] -MessageOutputEventRole = Literal["assistant"] MessageOutputEventContentTypedDict = TypeAliasType( "MessageOutputEventContentTypedDict", Union[str, OutputContentChunksTypedDict] diff --git a/src/mistralai/models/mistralerror.py b/src/mistralai/models/mistralerror.py index a0ee5078..28cfd22d 100644 --- a/src/mistralai/models/mistralerror.py +++ b/src/mistralai/models/mistralerror.py @@ -2,25 +2,29 @@ import httpx from typing import Optional +from dataclasses import dataclass, field +@dataclass(unsafe_hash=True) class MistralError(Exception): """The base class for all HTTP error responses.""" message: str status_code: int body: str - headers: httpx.Headers - raw_response: httpx.Response + headers: httpx.Headers = field(hash=False) + raw_response: httpx.Response = field(hash=False) def __init__( self, message: str, raw_response: httpx.Response, body: Optional[str] = None ): - self.message = message - self.status_code = raw_response.status_code - self.body = body if body is not None else raw_response.text - self.headers = raw_response.headers - self.raw_response = raw_response + object.__setattr__(self, "message", message) + object.__setattr__(self, "status_code", raw_response.status_code) + object.__setattr__( + self, "body", body if body is not None else raw_response.text + ) + object.__setattr__(self, "headers", raw_response.headers) + object.__setattr__(self, "raw_response", raw_response) def __str__(self): return self.message diff --git a/src/mistralai/models/mistralpromptmode.py b/src/mistralai/models/mistralpromptmode.py index 0ffd6787..ee82fb6d 100644 --- a/src/mistralai/models/mistralpromptmode.py +++ b/src/mistralai/models/mistralpromptmode.py @@ -5,4 +5,4 @@ from typing import Literal, Union -MistralPromptMode = Union[Literal["reasoning"], UnrecognizedStr] +MistralPromptMode = Union[Literal["reasoning",], UnrecognizedStr] diff --git a/src/mistralai/models/modelconversation.py b/src/mistralai/models/modelconversation.py index e413b6fb..8eca4f97 100644 --- a/src/mistralai/models/modelconversation.py +++ b/src/mistralai/models/modelconversation.py @@ -42,7 +42,7 @@ ] -ModelConversationObject = Literal["conversation"] +ModelConversationObject = Literal["conversation",] class ModelConversationTypedDict(TypedDict): diff --git a/src/mistralai/models/no_response_error.py b/src/mistralai/models/no_response_error.py index f98beea2..1deab64b 100644 --- a/src/mistralai/models/no_response_error.py +++ b/src/mistralai/models/no_response_error.py @@ -1,12 +1,16 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +from dataclasses import dataclass + + +@dataclass(unsafe_hash=True) class NoResponseError(Exception): """Error raised when no HTTP response is received from the server.""" message: str def __init__(self, message: str = "No response received"): - self.message = message + object.__setattr__(self, "message", message) super().__init__(message) def __str__(self): diff --git a/src/mistralai/models/ocrrequest.py b/src/mistralai/models/ocrrequest.py index e600d5b6..8bd13370 100644 --- a/src/mistralai/models/ocrrequest.py +++ b/src/mistralai/models/ocrrequest.py @@ -22,7 +22,10 @@ r"""Document to run OCR on""" -TableFormat = Literal["markdown", "html"] +TableFormat = Literal[ + "markdown", + "html", +] class OCRRequestTypedDict(TypedDict): diff --git a/src/mistralai/models/ocrtableobject.py b/src/mistralai/models/ocrtableobject.py index 76f21f3b..5f30ab5e 100644 --- a/src/mistralai/models/ocrtableobject.py +++ b/src/mistralai/models/ocrtableobject.py @@ -7,7 +7,10 @@ from typing_extensions import Annotated, TypedDict -Format = Literal["markdown", "html"] +Format = Literal[ + "markdown", + "html", +] r"""Format of the table""" diff --git a/src/mistralai/models/referencechunk.py b/src/mistralai/models/referencechunk.py index 4a5503f2..1864ac79 100644 --- a/src/mistralai/models/referencechunk.py +++ b/src/mistralai/models/referencechunk.py @@ -6,7 +6,7 @@ from typing_extensions import NotRequired, TypedDict -ReferenceChunkType = Literal["reference"] +ReferenceChunkType = Literal["reference",] class ReferenceChunkTypedDict(TypedDict): diff --git a/src/mistralai/models/requestsource.py b/src/mistralai/models/requestsource.py index 5ab93af0..7b0a35c4 100644 --- a/src/mistralai/models/requestsource.py +++ b/src/mistralai/models/requestsource.py @@ -4,4 +4,8 @@ from typing import Literal -RequestSource = Literal["api", "playground", "agent_builder_v1"] +RequestSource = Literal[ + "api", + "playground", + "agent_builder_v1", +] diff --git a/src/mistralai/models/responsedoneevent.py b/src/mistralai/models/responsedoneevent.py index 296cb430..5a3a3dfb 100644 --- a/src/mistralai/models/responsedoneevent.py +++ b/src/mistralai/models/responsedoneevent.py @@ -8,7 +8,7 @@ from typing_extensions import NotRequired, TypedDict -ResponseDoneEventType = Literal["conversation.response.done"] +ResponseDoneEventType = Literal["conversation.response.done",] class ResponseDoneEventTypedDict(TypedDict): diff --git a/src/mistralai/models/responseerrorevent.py b/src/mistralai/models/responseerrorevent.py index e4190d17..6cb1b268 100644 --- a/src/mistralai/models/responseerrorevent.py +++ b/src/mistralai/models/responseerrorevent.py @@ -7,7 +7,7 @@ from typing_extensions import NotRequired, TypedDict -ResponseErrorEventType = Literal["conversation.response.error"] +ResponseErrorEventType = Literal["conversation.response.error",] class ResponseErrorEventTypedDict(TypedDict): diff --git a/src/mistralai/models/responseformats.py b/src/mistralai/models/responseformats.py index 258fe70e..cbf83ce7 100644 --- a/src/mistralai/models/responseformats.py +++ b/src/mistralai/models/responseformats.py @@ -4,4 +4,8 @@ from typing import Literal -ResponseFormats = Literal["text", "json_object", "json_schema"] +ResponseFormats = Literal[ + "text", + "json_object", + "json_schema", +] diff --git a/src/mistralai/models/responsestartedevent.py b/src/mistralai/models/responsestartedevent.py index 6acb483e..d14d45ef 100644 --- a/src/mistralai/models/responsestartedevent.py +++ b/src/mistralai/models/responsestartedevent.py @@ -7,7 +7,7 @@ from typing_extensions import NotRequired, TypedDict -ResponseStartedEventType = Literal["conversation.response.started"] +ResponseStartedEventType = Literal["conversation.response.started",] class ResponseStartedEventTypedDict(TypedDict): diff --git a/src/mistralai/models/responsevalidationerror.py b/src/mistralai/models/responsevalidationerror.py index fe31cfbd..ed301655 100644 --- a/src/mistralai/models/responsevalidationerror.py +++ b/src/mistralai/models/responsevalidationerror.py @@ -2,10 +2,12 @@ import httpx from typing import Optional +from dataclasses import dataclass from mistralai.models import MistralError +@dataclass(unsafe_hash=True) class ResponseValidationError(MistralError): """Error raised when there is a type mismatch between the response data and the expected Pydantic model.""" diff --git a/src/mistralai/models/retrievefileout.py b/src/mistralai/models/retrievefileout.py index 7d734b0f..94540083 100644 --- a/src/mistralai/models/retrievefileout.py +++ b/src/mistralai/models/retrievefileout.py @@ -5,10 +5,8 @@ from .sampletype import SampleType from .source import Source from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from mistralai.utils import validate_open_enum import pydantic from pydantic import model_serializer -from pydantic.functional_validators import PlainValidator from typing_extensions import Annotated, NotRequired, TypedDict @@ -48,11 +46,11 @@ class RetrieveFileOut(BaseModel): filename: str r"""The name of the uploaded file.""" - purpose: Annotated[FilePurpose, PlainValidator(validate_open_enum(False))] + purpose: FilePurpose - sample_type: Annotated[SampleType, PlainValidator(validate_open_enum(False))] + sample_type: SampleType - source: Annotated[Source, PlainValidator(validate_open_enum(False))] + source: Source deleted: bool diff --git a/src/mistralai/models/sampletype.py b/src/mistralai/models/sampletype.py index adc90ec7..efb43e9b 100644 --- a/src/mistralai/models/sampletype.py +++ b/src/mistralai/models/sampletype.py @@ -6,6 +6,12 @@ SampleType = Union[ - Literal["pretrain", "instruct", "batch_request", "batch_result", "batch_error"], + Literal[ + "pretrain", + "instruct", + "batch_request", + "batch_result", + "batch_error", + ], UnrecognizedStr, ] diff --git a/src/mistralai/models/sdkerror.py b/src/mistralai/models/sdkerror.py index 2513f36b..65c45cf1 100644 --- a/src/mistralai/models/sdkerror.py +++ b/src/mistralai/models/sdkerror.py @@ -2,12 +2,14 @@ import httpx from typing import Optional +from dataclasses import dataclass from mistralai.models import MistralError MAX_MESSAGE_LEN = 10_000 +@dataclass(unsafe_hash=True) class SDKError(MistralError): """The fallback error class if no more specific error class is matched.""" diff --git a/src/mistralai/models/shareenum.py b/src/mistralai/models/shareenum.py index c2945514..634ba4b7 100644 --- a/src/mistralai/models/shareenum.py +++ b/src/mistralai/models/shareenum.py @@ -5,4 +5,10 @@ from typing import Literal, Union -ShareEnum = Union[Literal["Viewer", "Editor"], UnrecognizedStr] +ShareEnum = Union[ + Literal[ + "Viewer", + "Editor", + ], + UnrecognizedStr, +] diff --git a/src/mistralai/models/sharingdelete.py b/src/mistralai/models/sharingdelete.py index d1cd7074..ebcdbab5 100644 --- a/src/mistralai/models/sharingdelete.py +++ b/src/mistralai/models/sharingdelete.py @@ -3,10 +3,8 @@ from __future__ import annotations from .entitytype import EntityType from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from mistralai.utils import validate_open_enum from pydantic import model_serializer -from pydantic.functional_validators import PlainValidator -from typing_extensions import Annotated, NotRequired, TypedDict +from typing_extensions import NotRequired, TypedDict class SharingDeleteTypedDict(TypedDict): @@ -21,7 +19,7 @@ class SharingDelete(BaseModel): share_with_uuid: str r"""The id of the entity (user, workspace or organization) to share with""" - share_with_type: Annotated[EntityType, PlainValidator(validate_open_enum(False))] + share_with_type: EntityType r"""The type of entity, used to share a library.""" org_id: OptionalNullable[str] = UNSET diff --git a/src/mistralai/models/sharingin.py b/src/mistralai/models/sharingin.py index d3ada343..f7bb89ca 100644 --- a/src/mistralai/models/sharingin.py +++ b/src/mistralai/models/sharingin.py @@ -4,10 +4,8 @@ from .entitytype import EntityType from .shareenum import ShareEnum from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from mistralai.utils import validate_open_enum from pydantic import model_serializer -from pydantic.functional_validators import PlainValidator -from typing_extensions import Annotated, NotRequired, TypedDict +from typing_extensions import NotRequired, TypedDict class SharingInTypedDict(TypedDict): @@ -20,12 +18,12 @@ class SharingInTypedDict(TypedDict): class SharingIn(BaseModel): - level: Annotated[ShareEnum, PlainValidator(validate_open_enum(False))] + level: ShareEnum share_with_uuid: str r"""The id of the entity (user, workspace or organization) to share with""" - share_with_type: Annotated[EntityType, PlainValidator(validate_open_enum(False))] + share_with_type: EntityType r"""The type of entity, used to share a library.""" org_id: OptionalNullable[str] = UNSET diff --git a/src/mistralai/models/source.py b/src/mistralai/models/source.py index c21550f2..cc3abce2 100644 --- a/src/mistralai/models/source.py +++ b/src/mistralai/models/source.py @@ -5,4 +5,11 @@ from typing import Literal, Union -Source = Union[Literal["upload", "repository", "mistral"], UnrecognizedStr] +Source = Union[ + Literal[ + "upload", + "repository", + "mistral", + ], + UnrecognizedStr, +] diff --git a/src/mistralai/models/systemmessage.py b/src/mistralai/models/systemmessage.py index 25b51f95..2b34607b 100644 --- a/src/mistralai/models/systemmessage.py +++ b/src/mistralai/models/systemmessage.py @@ -21,7 +21,7 @@ ) -Role = Literal["system"] +Role = Literal["system",] class SystemMessageTypedDict(TypedDict): diff --git a/src/mistralai/models/textchunk.py b/src/mistralai/models/textchunk.py index 02b115f6..6052686e 100644 --- a/src/mistralai/models/textchunk.py +++ b/src/mistralai/models/textchunk.py @@ -6,7 +6,7 @@ from typing_extensions import NotRequired, TypedDict -TextChunkType = Literal["text"] +TextChunkType = Literal["text",] class TextChunkTypedDict(TypedDict): diff --git a/src/mistralai/models/thinkchunk.py b/src/mistralai/models/thinkchunk.py index 24b466f9..627ae488 100644 --- a/src/mistralai/models/thinkchunk.py +++ b/src/mistralai/models/thinkchunk.py @@ -16,7 +16,7 @@ Thinking = TypeAliasType("Thinking", Union[ReferenceChunk, TextChunk]) -ThinkChunkType = Literal["thinking"] +ThinkChunkType = Literal["thinking",] class ThinkChunkTypedDict(TypedDict): diff --git a/src/mistralai/models/timestampgranularity.py b/src/mistralai/models/timestampgranularity.py index dd1b6446..02816df6 100644 --- a/src/mistralai/models/timestampgranularity.py +++ b/src/mistralai/models/timestampgranularity.py @@ -4,4 +4,4 @@ from typing import Literal -TimestampGranularity = Literal["segment"] +TimestampGranularity = Literal["segment",] diff --git a/src/mistralai/models/tool.py b/src/mistralai/models/tool.py index 6e746df3..b14a6adf 100644 --- a/src/mistralai/models/tool.py +++ b/src/mistralai/models/tool.py @@ -4,10 +4,8 @@ from .function import Function, FunctionTypedDict from .tooltypes import ToolTypes from mistralai.types import BaseModel -from mistralai.utils import validate_open_enum -from pydantic.functional_validators import PlainValidator from typing import Optional -from typing_extensions import Annotated, NotRequired, TypedDict +from typing_extensions import NotRequired, TypedDict class ToolTypedDict(TypedDict): @@ -18,6 +16,4 @@ class ToolTypedDict(TypedDict): class Tool(BaseModel): function: Function - type: Annotated[Optional[ToolTypes], PlainValidator(validate_open_enum(False))] = ( - None - ) + type: Optional[ToolTypes] = None diff --git a/src/mistralai/models/toolcall.py b/src/mistralai/models/toolcall.py index 92dbb4a9..1f367924 100644 --- a/src/mistralai/models/toolcall.py +++ b/src/mistralai/models/toolcall.py @@ -4,10 +4,8 @@ from .functioncall import FunctionCall, FunctionCallTypedDict from .tooltypes import ToolTypes from mistralai.types import BaseModel -from mistralai.utils import validate_open_enum -from pydantic.functional_validators import PlainValidator from typing import Optional -from typing_extensions import Annotated, NotRequired, TypedDict +from typing_extensions import NotRequired, TypedDict class ToolCallTypedDict(TypedDict): @@ -22,8 +20,6 @@ class ToolCall(BaseModel): id: Optional[str] = "null" - type: Annotated[Optional[ToolTypes], PlainValidator(validate_open_enum(False))] = ( - None - ) + type: Optional[ToolTypes] = None index: Optional[int] = 0 diff --git a/src/mistralai/models/toolchoice.py b/src/mistralai/models/toolchoice.py index 3b7d60e0..f8e1b486 100644 --- a/src/mistralai/models/toolchoice.py +++ b/src/mistralai/models/toolchoice.py @@ -4,10 +4,8 @@ from .functionname import FunctionName, FunctionNameTypedDict from .tooltypes import ToolTypes from mistralai.types import BaseModel -from mistralai.utils import validate_open_enum -from pydantic.functional_validators import PlainValidator from typing import Optional -from typing_extensions import Annotated, NotRequired, TypedDict +from typing_extensions import NotRequired, TypedDict class ToolChoiceTypedDict(TypedDict): @@ -24,6 +22,4 @@ class ToolChoice(BaseModel): function: FunctionName r"""this restriction of `Function` is used to select a specific function to call""" - type: Annotated[Optional[ToolTypes], PlainValidator(validate_open_enum(False))] = ( - None - ) + type: Optional[ToolTypes] = None diff --git a/src/mistralai/models/toolchoiceenum.py b/src/mistralai/models/toolchoiceenum.py index 8e6a6ad8..01f6f677 100644 --- a/src/mistralai/models/toolchoiceenum.py +++ b/src/mistralai/models/toolchoiceenum.py @@ -4,4 +4,9 @@ from typing import Literal -ToolChoiceEnum = Literal["auto", "none", "any", "required"] +ToolChoiceEnum = Literal[ + "auto", + "none", + "any", + "required", +] diff --git a/src/mistralai/models/toolexecutiondeltaevent.py b/src/mistralai/models/toolexecutiondeltaevent.py index 25438206..4fca46a8 100644 --- a/src/mistralai/models/toolexecutiondeltaevent.py +++ b/src/mistralai/models/toolexecutiondeltaevent.py @@ -8,7 +8,8 @@ from typing_extensions import NotRequired, TypeAliasType, TypedDict -ToolExecutionDeltaEventType = Literal["tool.execution.delta"] +ToolExecutionDeltaEventType = Literal["tool.execution.delta",] + ToolExecutionDeltaEventNameTypedDict = TypeAliasType( "ToolExecutionDeltaEventNameTypedDict", Union[BuiltInConnectors, str] diff --git a/src/mistralai/models/toolexecutiondoneevent.py b/src/mistralai/models/toolexecutiondoneevent.py index 2dea3324..621d5571 100644 --- a/src/mistralai/models/toolexecutiondoneevent.py +++ b/src/mistralai/models/toolexecutiondoneevent.py @@ -8,7 +8,8 @@ from typing_extensions import NotRequired, TypeAliasType, TypedDict -ToolExecutionDoneEventType = Literal["tool.execution.done"] +ToolExecutionDoneEventType = Literal["tool.execution.done",] + ToolExecutionDoneEventNameTypedDict = TypeAliasType( "ToolExecutionDoneEventNameTypedDict", Union[BuiltInConnectors, str] diff --git a/src/mistralai/models/toolexecutionentry.py b/src/mistralai/models/toolexecutionentry.py index abe53e06..9f70a63b 100644 --- a/src/mistralai/models/toolexecutionentry.py +++ b/src/mistralai/models/toolexecutionentry.py @@ -9,9 +9,11 @@ from typing_extensions import NotRequired, TypeAliasType, TypedDict -ToolExecutionEntryObject = Literal["entry"] +ToolExecutionEntryObject = Literal["entry",] + + +ToolExecutionEntryType = Literal["tool.execution",] -ToolExecutionEntryType = Literal["tool.execution"] NameTypedDict = TypeAliasType("NameTypedDict", Union[BuiltInConnectors, str]) diff --git a/src/mistralai/models/toolexecutionstartedevent.py b/src/mistralai/models/toolexecutionstartedevent.py index cf4ecbfc..80dd5e97 100644 --- a/src/mistralai/models/toolexecutionstartedevent.py +++ b/src/mistralai/models/toolexecutionstartedevent.py @@ -8,7 +8,8 @@ from typing_extensions import NotRequired, TypeAliasType, TypedDict -ToolExecutionStartedEventType = Literal["tool.execution.started"] +ToolExecutionStartedEventType = Literal["tool.execution.started",] + ToolExecutionStartedEventNameTypedDict = TypeAliasType( "ToolExecutionStartedEventNameTypedDict", Union[BuiltInConnectors, str] diff --git a/src/mistralai/models/toolfilechunk.py b/src/mistralai/models/toolfilechunk.py index a6f58147..87bc822c 100644 --- a/src/mistralai/models/toolfilechunk.py +++ b/src/mistralai/models/toolfilechunk.py @@ -8,7 +8,8 @@ from typing_extensions import NotRequired, TypeAliasType, TypedDict -ToolFileChunkType = Literal["tool_file"] +ToolFileChunkType = Literal["tool_file",] + ToolFileChunkToolTypedDict = TypeAliasType( "ToolFileChunkToolTypedDict", Union[BuiltInConnectors, str] diff --git a/src/mistralai/models/toolmessage.py b/src/mistralai/models/toolmessage.py index 82f62e0f..ef917c43 100644 --- a/src/mistralai/models/toolmessage.py +++ b/src/mistralai/models/toolmessage.py @@ -16,7 +16,7 @@ ToolMessageContent = TypeAliasType("ToolMessageContent", Union[str, List[ContentChunk]]) -ToolMessageRole = Literal["tool"] +ToolMessageRole = Literal["tool",] class ToolMessageTypedDict(TypedDict): diff --git a/src/mistralai/models/toolreferencechunk.py b/src/mistralai/models/toolreferencechunk.py index fb6d8de2..2a751cb0 100644 --- a/src/mistralai/models/toolreferencechunk.py +++ b/src/mistralai/models/toolreferencechunk.py @@ -8,7 +8,8 @@ from typing_extensions import NotRequired, TypeAliasType, TypedDict -ToolReferenceChunkType = Literal["tool_reference"] +ToolReferenceChunkType = Literal["tool_reference",] + ToolReferenceChunkToolTypedDict = TypeAliasType( "ToolReferenceChunkToolTypedDict", Union[BuiltInConnectors, str] diff --git a/src/mistralai/models/tooltypes.py b/src/mistralai/models/tooltypes.py index fb581820..f54893c2 100644 --- a/src/mistralai/models/tooltypes.py +++ b/src/mistralai/models/tooltypes.py @@ -5,4 +5,4 @@ from typing import Literal, Union -ToolTypes = Union[Literal["function"], UnrecognizedStr] +ToolTypes = Union[Literal["function",], UnrecognizedStr] diff --git a/src/mistralai/models/transcriptionsegmentchunk.py b/src/mistralai/models/transcriptionsegmentchunk.py index 53f1b397..aa30f053 100644 --- a/src/mistralai/models/transcriptionsegmentchunk.py +++ b/src/mistralai/models/transcriptionsegmentchunk.py @@ -8,7 +8,7 @@ from typing_extensions import NotRequired, TypedDict -Type = Literal["transcription_segment"] +Type = Literal["transcription_segment",] class TranscriptionSegmentChunkTypedDict(TypedDict): diff --git a/src/mistralai/models/transcriptionstreamdone.py b/src/mistralai/models/transcriptionstreamdone.py index ffd0e080..e1b1ab3d 100644 --- a/src/mistralai/models/transcriptionstreamdone.py +++ b/src/mistralai/models/transcriptionstreamdone.py @@ -13,7 +13,7 @@ from typing_extensions import NotRequired, TypedDict -TranscriptionStreamDoneType = Literal["transcription.done"] +TranscriptionStreamDoneType = Literal["transcription.done",] class TranscriptionStreamDoneTypedDict(TypedDict): diff --git a/src/mistralai/models/transcriptionstreamlanguage.py b/src/mistralai/models/transcriptionstreamlanguage.py index 8fc2aa6e..15b75144 100644 --- a/src/mistralai/models/transcriptionstreamlanguage.py +++ b/src/mistralai/models/transcriptionstreamlanguage.py @@ -8,7 +8,7 @@ from typing_extensions import NotRequired, TypedDict -TranscriptionStreamLanguageType = Literal["transcription.language"] +TranscriptionStreamLanguageType = Literal["transcription.language",] class TranscriptionStreamLanguageTypedDict(TypedDict): diff --git a/src/mistralai/models/transcriptionstreamsegmentdelta.py b/src/mistralai/models/transcriptionstreamsegmentdelta.py index 61b396b4..d779ed83 100644 --- a/src/mistralai/models/transcriptionstreamsegmentdelta.py +++ b/src/mistralai/models/transcriptionstreamsegmentdelta.py @@ -8,7 +8,7 @@ from typing_extensions import NotRequired, TypedDict -TranscriptionStreamSegmentDeltaType = Literal["transcription.segment"] +TranscriptionStreamSegmentDeltaType = Literal["transcription.segment",] class TranscriptionStreamSegmentDeltaTypedDict(TypedDict): diff --git a/src/mistralai/models/transcriptionstreamtextdelta.py b/src/mistralai/models/transcriptionstreamtextdelta.py index 8f0b0e59..daee151f 100644 --- a/src/mistralai/models/transcriptionstreamtextdelta.py +++ b/src/mistralai/models/transcriptionstreamtextdelta.py @@ -8,7 +8,7 @@ from typing_extensions import NotRequired, TypedDict -TranscriptionStreamTextDeltaType = Literal["transcription.text.delta"] +TranscriptionStreamTextDeltaType = Literal["transcription.text.delta",] class TranscriptionStreamTextDeltaTypedDict(TypedDict): diff --git a/src/mistralai/models/unarchiveftmodelout.py b/src/mistralai/models/unarchiveftmodelout.py index 6b2f730d..55c0ea8a 100644 --- a/src/mistralai/models/unarchiveftmodelout.py +++ b/src/mistralai/models/unarchiveftmodelout.py @@ -6,7 +6,7 @@ from typing_extensions import NotRequired, TypedDict -UnarchiveFTModelOutObject = Literal["model"] +UnarchiveFTModelOutObject = Literal["model",] class UnarchiveFTModelOutTypedDict(TypedDict): diff --git a/src/mistralai/models/uploadfileout.py b/src/mistralai/models/uploadfileout.py index 8f9f1067..f235fdcd 100644 --- a/src/mistralai/models/uploadfileout.py +++ b/src/mistralai/models/uploadfileout.py @@ -5,10 +5,8 @@ from .sampletype import SampleType from .source import Source from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from mistralai.utils import validate_open_enum import pydantic from pydantic import model_serializer -from pydantic.functional_validators import PlainValidator from typing_extensions import Annotated, NotRequired, TypedDict @@ -47,11 +45,11 @@ class UploadFileOut(BaseModel): filename: str r"""The name of the uploaded file.""" - purpose: Annotated[FilePurpose, PlainValidator(validate_open_enum(False))] + purpose: FilePurpose - sample_type: Annotated[SampleType, PlainValidator(validate_open_enum(False))] + sample_type: SampleType - source: Annotated[Source, PlainValidator(validate_open_enum(False))] + source: Source num_lines: OptionalNullable[int] = UNSET diff --git a/src/mistralai/models/usermessage.py b/src/mistralai/models/usermessage.py index 049bc755..61590bed 100644 --- a/src/mistralai/models/usermessage.py +++ b/src/mistralai/models/usermessage.py @@ -16,7 +16,7 @@ UserMessageContent = TypeAliasType("UserMessageContent", Union[str, List[ContentChunk]]) -UserMessageRole = Literal["user"] +UserMessageRole = Literal["user",] class UserMessageTypedDict(TypedDict): diff --git a/src/mistralai/models/wandbintegration.py b/src/mistralai/models/wandbintegration.py index 0789b648..69053896 100644 --- a/src/mistralai/models/wandbintegration.py +++ b/src/mistralai/models/wandbintegration.py @@ -7,7 +7,7 @@ from typing_extensions import NotRequired, TypedDict -WandbIntegrationType = Literal["wandb"] +WandbIntegrationType = Literal["wandb",] class WandbIntegrationTypedDict(TypedDict): diff --git a/src/mistralai/models/wandbintegrationout.py b/src/mistralai/models/wandbintegrationout.py index a1c2f570..f5a9ba80 100644 --- a/src/mistralai/models/wandbintegrationout.py +++ b/src/mistralai/models/wandbintegrationout.py @@ -7,7 +7,7 @@ from typing_extensions import NotRequired, TypedDict -WandbIntegrationOutType = Literal["wandb"] +WandbIntegrationOutType = Literal["wandb",] class WandbIntegrationOutTypedDict(TypedDict): diff --git a/src/mistralai/models/websearchpremiumtool.py b/src/mistralai/models/websearchpremiumtool.py index 70fc5626..3bbe753a 100644 --- a/src/mistralai/models/websearchpremiumtool.py +++ b/src/mistralai/models/websearchpremiumtool.py @@ -6,7 +6,7 @@ from typing_extensions import NotRequired, TypedDict -WebSearchPremiumToolType = Literal["web_search_premium"] +WebSearchPremiumToolType = Literal["web_search_premium",] class WebSearchPremiumToolTypedDict(TypedDict): diff --git a/src/mistralai/models/websearchtool.py b/src/mistralai/models/websearchtool.py index 3dfd1c53..eeafecb4 100644 --- a/src/mistralai/models/websearchtool.py +++ b/src/mistralai/models/websearchtool.py @@ -6,7 +6,7 @@ from typing_extensions import NotRequired, TypedDict -WebSearchToolType = Literal["web_search"] +WebSearchToolType = Literal["web_search",] class WebSearchToolTypedDict(TypedDict): diff --git a/src/mistralai/models_.py b/src/mistralai/models_.py index bf82cc16..d44930a0 100644 --- a/src/mistralai/models_.py +++ b/src/mistralai/models_.py @@ -51,6 +51,7 @@ def list( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -67,7 +68,7 @@ def list( config=self.sdk_configuration, base_url=base_url or "", operation_id="list_models_v1_models_get", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -127,6 +128,7 @@ async def list_async( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -143,7 +145,7 @@ async def list_async( config=self.sdk_configuration, base_url=base_url or "", operation_id="list_models_v1_models_get", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -210,6 +212,7 @@ def retrieve( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -226,7 +229,7 @@ def retrieve( config=self.sdk_configuration, base_url=base_url or "", operation_id="retrieve_model_v1_models__model_id__get", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -302,6 +305,7 @@ async def retrieve_async( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -318,7 +322,7 @@ async def retrieve_async( config=self.sdk_configuration, base_url=base_url or "", operation_id="retrieve_model_v1_models__model_id__get", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -394,6 +398,7 @@ def delete( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -410,7 +415,7 @@ def delete( config=self.sdk_configuration, base_url=base_url or "", operation_id="delete_model_v1_models__model_id__delete", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -483,6 +488,7 @@ async def delete_async( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -499,7 +505,7 @@ async def delete_async( config=self.sdk_configuration, base_url=base_url or "", operation_id="delete_model_v1_models__model_id__delete", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -583,6 +589,7 @@ def update( get_serialized_body=lambda: utils.serialize_request_body( request.update_ft_model_in, False, False, "json", models.UpdateFTModelIn ), + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -599,7 +606,7 @@ def update( config=self.sdk_configuration, base_url=base_url or "", operation_id="jobs_api_routes_fine_tuning_update_fine_tuned_model", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -679,6 +686,7 @@ async def update_async( get_serialized_body=lambda: utils.serialize_request_body( request.update_ft_model_in, False, False, "json", models.UpdateFTModelIn ), + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -695,7 +703,7 @@ async def update_async( config=self.sdk_configuration, base_url=base_url or "", operation_id="jobs_api_routes_fine_tuning_update_fine_tuned_model", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -764,6 +772,7 @@ def archive( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -780,7 +789,7 @@ def archive( config=self.sdk_configuration, base_url=base_url or "", operation_id="jobs_api_routes_fine_tuning_archive_fine_tuned_model", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -847,6 +856,7 @@ async def archive_async( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -863,7 +873,7 @@ async def archive_async( config=self.sdk_configuration, base_url=base_url or "", operation_id="jobs_api_routes_fine_tuning_archive_fine_tuned_model", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -930,6 +940,7 @@ def unarchive( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -946,7 +957,7 @@ def unarchive( config=self.sdk_configuration, base_url=base_url or "", operation_id="jobs_api_routes_fine_tuning_unarchive_fine_tuned_model", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -1013,6 +1024,7 @@ async def unarchive_async( accept_header_value="application/json", http_headers=http_headers, security=self.sdk_configuration.security, + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -1029,7 +1041,7 @@ async def unarchive_async( config=self.sdk_configuration, base_url=base_url or "", operation_id="jobs_api_routes_fine_tuning_unarchive_fine_tuned_model", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), diff --git a/src/mistralai/ocr.py b/src/mistralai/ocr.py index 6b283b35..8c2e478b 100644 --- a/src/mistralai/ocr.py +++ b/src/mistralai/ocr.py @@ -3,6 +3,10 @@ from .basesdk import BaseSDK from mistralai import models, utils from mistralai._hooks import HookContext +from mistralai.models import ( + ocrrequest as models_ocrrequest, + responseformat as models_responseformat, +) from mistralai.types import Nullable, OptionalNullable, UNSET from mistralai.utils import get_security_from_env from mistralai.utils.unmarshal_json_response import unmarshal_json_response @@ -16,19 +20,27 @@ def process( self, *, model: Nullable[str], - document: Union[models.Document, models.DocumentTypedDict], + document: Union[ + models_ocrrequest.Document, models_ocrrequest.DocumentTypedDict + ], id: Optional[str] = None, pages: OptionalNullable[List[int]] = UNSET, include_image_base64: OptionalNullable[bool] = UNSET, image_limit: OptionalNullable[int] = UNSET, image_min_size: OptionalNullable[int] = UNSET, bbox_annotation_format: OptionalNullable[ - Union[models.ResponseFormat, models.ResponseFormatTypedDict] + Union[ + models_responseformat.ResponseFormat, + models_responseformat.ResponseFormatTypedDict, + ] ] = UNSET, document_annotation_format: OptionalNullable[ - Union[models.ResponseFormat, models.ResponseFormatTypedDict] + Union[ + models_responseformat.ResponseFormat, + models_responseformat.ResponseFormatTypedDict, + ] ] = UNSET, - table_format: OptionalNullable[models.TableFormat] = UNSET, + table_format: OptionalNullable[models_ocrrequest.TableFormat] = UNSET, extract_header: Optional[bool] = None, extract_footer: Optional[bool] = None, retries: OptionalNullable[utils.RetryConfig] = UNSET, @@ -100,6 +112,7 @@ def process( get_serialized_body=lambda: utils.serialize_request_body( request, False, False, "json", models.OCRRequest ), + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -116,7 +129,7 @@ def process( config=self.sdk_configuration, base_url=base_url or "", operation_id="ocr_v1_ocr_post", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -147,19 +160,27 @@ async def process_async( self, *, model: Nullable[str], - document: Union[models.Document, models.DocumentTypedDict], + document: Union[ + models_ocrrequest.Document, models_ocrrequest.DocumentTypedDict + ], id: Optional[str] = None, pages: OptionalNullable[List[int]] = UNSET, include_image_base64: OptionalNullable[bool] = UNSET, image_limit: OptionalNullable[int] = UNSET, image_min_size: OptionalNullable[int] = UNSET, bbox_annotation_format: OptionalNullable[ - Union[models.ResponseFormat, models.ResponseFormatTypedDict] + Union[ + models_responseformat.ResponseFormat, + models_responseformat.ResponseFormatTypedDict, + ] ] = UNSET, document_annotation_format: OptionalNullable[ - Union[models.ResponseFormat, models.ResponseFormatTypedDict] + Union[ + models_responseformat.ResponseFormat, + models_responseformat.ResponseFormatTypedDict, + ] ] = UNSET, - table_format: OptionalNullable[models.TableFormat] = UNSET, + table_format: OptionalNullable[models_ocrrequest.TableFormat] = UNSET, extract_header: Optional[bool] = None, extract_footer: Optional[bool] = None, retries: OptionalNullable[utils.RetryConfig] = UNSET, @@ -231,6 +252,7 @@ async def process_async( get_serialized_body=lambda: utils.serialize_request_body( request, False, False, "json", models.OCRRequest ), + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -247,7 +269,7 @@ async def process_async( config=self.sdk_configuration, base_url=base_url or "", operation_id="ocr_v1_ocr_post", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), diff --git a/src/mistralai/sdk.py b/src/mistralai/sdk.py index 311147fd..c83b53e0 100644 --- a/src/mistralai/sdk.py +++ b/src/mistralai/sdk.py @@ -92,7 +92,7 @@ def __init__( """ client_supplied = True if client is None: - client = httpx.Client() + client = httpx.Client(follow_redirects=True) client_supplied = False assert issubclass( @@ -101,7 +101,7 @@ def __init__( async_client_supplied = True if async_client is None: - async_client = httpx.AsyncClient() + async_client = httpx.AsyncClient(follow_redirects=True) async_client_supplied = False if debug_logger is None: diff --git a/src/mistralai/transcriptions.py b/src/mistralai/transcriptions.py index dc8ad2e8..bdbeb1cc 100644 --- a/src/mistralai/transcriptions.py +++ b/src/mistralai/transcriptions.py @@ -3,6 +3,10 @@ from .basesdk import BaseSDK from mistralai import models, utils from mistralai._hooks import HookContext +from mistralai.models import ( + file as models_file, + timestampgranularity as models_timestampgranularity, +) from mistralai.types import OptionalNullable, UNSET from mistralai.utils import eventstreaming, get_security_from_env from mistralai.utils.unmarshal_json_response import unmarshal_json_response @@ -16,12 +20,14 @@ def complete( self, *, model: str, - file: Optional[Union[models.File, models.FileTypedDict]] = None, + file: Optional[Union[models_file.File, models_file.FileTypedDict]] = None, file_url: OptionalNullable[str] = UNSET, file_id: OptionalNullable[str] = UNSET, language: OptionalNullable[str] = UNSET, temperature: OptionalNullable[float] = UNSET, - timestamp_granularities: Optional[List[models.TimestampGranularity]] = None, + timestamp_granularities: Optional[ + List[models_timestampgranularity.TimestampGranularity] + ] = None, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -77,6 +83,7 @@ def complete( get_serialized_body=lambda: utils.serialize_request_body( request, False, False, "multipart", models.AudioTranscriptionRequest ), + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -93,7 +100,7 @@ def complete( config=self.sdk_configuration, base_url=base_url or "", operation_id="audio_api_v1_transcriptions_post", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -118,12 +125,14 @@ async def complete_async( self, *, model: str, - file: Optional[Union[models.File, models.FileTypedDict]] = None, + file: Optional[Union[models_file.File, models_file.FileTypedDict]] = None, file_url: OptionalNullable[str] = UNSET, file_id: OptionalNullable[str] = UNSET, language: OptionalNullable[str] = UNSET, temperature: OptionalNullable[float] = UNSET, - timestamp_granularities: Optional[List[models.TimestampGranularity]] = None, + timestamp_granularities: Optional[ + List[models_timestampgranularity.TimestampGranularity] + ] = None, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -179,6 +188,7 @@ async def complete_async( get_serialized_body=lambda: utils.serialize_request_body( request, False, False, "multipart", models.AudioTranscriptionRequest ), + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -195,7 +205,7 @@ async def complete_async( config=self.sdk_configuration, base_url=base_url or "", operation_id="audio_api_v1_transcriptions_post", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -220,12 +230,14 @@ def stream( self, *, model: str, - file: Optional[Union[models.File, models.FileTypedDict]] = None, + file: Optional[Union[models_file.File, models_file.FileTypedDict]] = None, file_url: OptionalNullable[str] = UNSET, file_id: OptionalNullable[str] = UNSET, language: OptionalNullable[str] = UNSET, temperature: OptionalNullable[float] = UNSET, - timestamp_granularities: Optional[List[models.TimestampGranularity]] = None, + timestamp_granularities: Optional[ + List[models_timestampgranularity.TimestampGranularity] + ] = None, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -285,6 +297,7 @@ def stream( "multipart", models.AudioTranscriptionRequestStream, ), + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -301,7 +314,7 @@ def stream( config=self.sdk_configuration, base_url=base_url or "", operation_id="audio_api_v1_transcriptions_post_stream", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), @@ -332,12 +345,14 @@ async def stream_async( self, *, model: str, - file: Optional[Union[models.File, models.FileTypedDict]] = None, + file: Optional[Union[models_file.File, models_file.FileTypedDict]] = None, file_url: OptionalNullable[str] = UNSET, file_id: OptionalNullable[str] = UNSET, language: OptionalNullable[str] = UNSET, temperature: OptionalNullable[float] = UNSET, - timestamp_granularities: Optional[List[models.TimestampGranularity]] = None, + timestamp_granularities: Optional[ + List[models_timestampgranularity.TimestampGranularity] + ] = None, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -397,6 +412,7 @@ async def stream_async( "multipart", models.AudioTranscriptionRequestStream, ), + allow_empty_value=None, timeout_ms=timeout_ms, ) @@ -413,7 +429,7 @@ async def stream_async( config=self.sdk_configuration, base_url=base_url or "", operation_id="audio_api_v1_transcriptions_post_stream", - oauth2_scopes=[], + oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security ), diff --git a/src/mistralai/types/basemodel.py b/src/mistralai/types/basemodel.py index 231c2e37..a9a640a1 100644 --- a/src/mistralai/types/basemodel.py +++ b/src/mistralai/types/basemodel.py @@ -2,7 +2,8 @@ from pydantic import ConfigDict, model_serializer from pydantic import BaseModel as PydanticBaseModel -from typing import TYPE_CHECKING, Literal, Optional, TypeVar, Union +from pydantic_core import core_schema +from typing import TYPE_CHECKING, Any, Literal, Optional, TypeVar, Union from typing_extensions import TypeAliasType, TypeAlias @@ -35,5 +36,42 @@ def __bool__(self) -> Literal[False]: "OptionalNullable", Union[Optional[Nullable[T]], Unset], type_params=(T,) ) -UnrecognizedInt: TypeAlias = int -UnrecognizedStr: TypeAlias = str + +class UnrecognizedStr(str): + @classmethod + def __get_pydantic_core_schema__(cls, _source_type: Any, _handler: Any) -> core_schema.CoreSchema: + # Make UnrecognizedStr only work in lax mode, not strict mode + # This makes it a "fallback" option when more specific types (like Literals) don't match + def validate_lax(v: Any) -> 'UnrecognizedStr': + if isinstance(v, cls): + return v + return cls(str(v)) + + # Use lax_or_strict_schema where strict always fails + # This forces Pydantic to prefer other union members in strict mode + # and only fall back to UnrecognizedStr in lax mode + return core_schema.lax_or_strict_schema( + lax_schema=core_schema.chain_schema([ + core_schema.str_schema(), + core_schema.no_info_plain_validator_function(validate_lax) + ]), + strict_schema=core_schema.none_schema(), # Always fails in strict mode + ) + + +class UnrecognizedInt(int): + @classmethod + def __get_pydantic_core_schema__(cls, _source_type: Any, _handler: Any) -> core_schema.CoreSchema: + # Make UnrecognizedInt only work in lax mode, not strict mode + # This makes it a "fallback" option when more specific types (like Literals) don't match + def validate_lax(v: Any) -> 'UnrecognizedInt': + if isinstance(v, cls): + return v + return cls(int(v)) + return core_schema.lax_or_strict_schema( + lax_schema=core_schema.chain_schema([ + core_schema.int_schema(), + core_schema.no_info_plain_validator_function(validate_lax) + ]), + strict_schema=core_schema.none_schema(), # Always fails in strict mode + ) diff --git a/src/mistralai/utils/__init__.py b/src/mistralai/utils/__init__.py index 87192dde..f9c2edce 100644 --- a/src/mistralai/utils/__init__.py +++ b/src/mistralai/utils/__init__.py @@ -42,7 +42,6 @@ validate_decimal, validate_float, validate_int, - validate_open_enum, ) from .url import generate_url, template_url, remove_suffix from .values import ( @@ -104,7 +103,6 @@ "validate_const", "validate_float", "validate_int", - "validate_open_enum", "cast_partial", ] @@ -158,7 +156,6 @@ "validate_const": ".serializers", "validate_float": ".serializers", "validate_int": ".serializers", - "validate_open_enum": ".serializers", "cast_partial": ".values", } diff --git a/src/mistralai/utils/annotations.py b/src/mistralai/utils/annotations.py index 387874ed..12e0aa4f 100644 --- a/src/mistralai/utils/annotations.py +++ b/src/mistralai/utils/annotations.py @@ -3,6 +3,7 @@ from enum import Enum from typing import Any, Optional + def get_discriminator(model: Any, fieldname: str, key: str) -> str: """ Recursively search for the discriminator attribute in a model. @@ -25,31 +26,54 @@ def get_field_discriminator(field: Any) -> Optional[str]: if isinstance(field, dict): if key in field: - return f'{field[key]}' + return f"{field[key]}" if hasattr(field, fieldname): attr = getattr(field, fieldname) if isinstance(attr, Enum): - return f'{attr.value}' - return f'{attr}' + return f"{attr.value}" + return f"{attr}" if hasattr(field, upper_fieldname): attr = getattr(field, upper_fieldname) if isinstance(attr, Enum): - return f'{attr.value}' - return f'{attr}' + return f"{attr.value}" + return f"{attr}" return None + def search_nested_discriminator(obj: Any) -> Optional[str]: + """Recursively search for discriminator in nested structures.""" + # First try direct field lookup + discriminator = get_field_discriminator(obj) + if discriminator is not None: + return discriminator + + # If it's a dict, search in nested values + if isinstance(obj, dict): + for value in obj.values(): + if isinstance(value, list): + # Search in list items + for item in value: + nested_discriminator = search_nested_discriminator(item) + if nested_discriminator is not None: + return nested_discriminator + elif isinstance(value, dict): + # Search in nested dict + nested_discriminator = search_nested_discriminator(value) + if nested_discriminator is not None: + return nested_discriminator + + return None if isinstance(model, list): for field in model: - discriminator = get_field_discriminator(field) + discriminator = search_nested_discriminator(field) if discriminator is not None: return discriminator - discriminator = get_field_discriminator(model) + discriminator = search_nested_discriminator(model) if discriminator is not None: return discriminator - raise ValueError(f'Could not find discriminator field {fieldname} in {model}') + raise ValueError(f"Could not find discriminator field {fieldname} in {model}") diff --git a/src/mistralai/utils/enums.py b/src/mistralai/utils/enums.py index c3bc13cf..3324e1bc 100644 --- a/src/mistralai/utils/enums.py +++ b/src/mistralai/utils/enums.py @@ -2,6 +2,10 @@ import enum import sys +from typing import Any + +from pydantic_core import core_schema + class OpenEnumMeta(enum.EnumMeta): # The __call__ method `boundary` kwarg was added in 3.11 and must be present @@ -72,3 +76,59 @@ def __call__( ) except ValueError: return value + + def __new__(mcs, name, bases, namespace, **kwargs): + cls = super().__new__(mcs, name, bases, namespace, **kwargs) + + # Add __get_pydantic_core_schema__ to make open enums work correctly + # in union discrimination. In strict mode (used by Pydantic for unions), + # only known enum values match. In lax mode, unknown values are accepted. + def __get_pydantic_core_schema__( + cls_inner: Any, _source_type: Any, _handler: Any + ) -> core_schema.CoreSchema: + # Create a validator that only accepts known enum values (for strict mode) + def validate_strict(v: Any) -> Any: + if isinstance(v, cls_inner): + return v + # Use the parent EnumMeta's __call__ which raises ValueError for unknown values + return enum.EnumMeta.__call__(cls_inner, v) + + # Create a lax validator that accepts unknown values + def validate_lax(v: Any) -> Any: + if isinstance(v, cls_inner): + return v + try: + return enum.EnumMeta.__call__(cls_inner, v) + except ValueError: + # Return the raw value for unknown enum values + return v + + # Determine the base type schema (str or int) + is_int_enum = False + for base in cls_inner.__mro__: + if base is int: + is_int_enum = True + break + if base is str: + break + + base_schema = ( + core_schema.int_schema() + if is_int_enum + else core_schema.str_schema() + ) + + # Use lax_or_strict_schema: + # - strict mode: only known enum values match (raises ValueError for unknown) + # - lax mode: accept any value, return enum member or raw value + return core_schema.lax_or_strict_schema( + lax_schema=core_schema.chain_schema( + [base_schema, core_schema.no_info_plain_validator_function(validate_lax)] + ), + strict_schema=core_schema.chain_schema( + [base_schema, core_schema.no_info_plain_validator_function(validate_strict)] + ), + ) + + setattr(cls, "__get_pydantic_core_schema__", classmethod(__get_pydantic_core_schema__)) + return cls diff --git a/src/mistralai/utils/forms.py b/src/mistralai/utils/forms.py index e873495f..f961e76b 100644 --- a/src/mistralai/utils/forms.py +++ b/src/mistralai/utils/forms.py @@ -142,16 +142,21 @@ def serialize_multipart_form( if field_metadata.file: if isinstance(val, List): # Handle array of files + array_field_name = f_name + "[]" for file_obj in val: if not _is_set(file_obj): continue - - file_name, content, content_type = _extract_file_properties(file_obj) + + file_name, content, content_type = _extract_file_properties( + file_obj + ) if content_type is not None: - files.append((f_name + "[]", (file_name, content, content_type))) + files.append( + (array_field_name, (file_name, content, content_type)) + ) else: - files.append((f_name + "[]", (file_name, content))) + files.append((array_field_name, (file_name, content))) else: # Handle single file file_name, content, content_type = _extract_file_properties(val) @@ -161,11 +166,16 @@ def serialize_multipart_form( else: files.append((f_name, (file_name, content))) elif field_metadata.json: - files.append((f_name, ( - None, - marshal_json(val, request_field_types[name]), - "application/json", - ))) + files.append( + ( + f_name, + ( + None, + marshal_json(val, request_field_types[name]), + "application/json", + ), + ) + ) else: if isinstance(val, List): values = [] @@ -175,7 +185,8 @@ def serialize_multipart_form( continue values.append(_val_to_string(value)) - form[f_name + "[]"] = values + array_field_name = f_name + "[]" + form[array_field_name] = values else: form[f_name] = _val_to_string(val) return media_type, form, files diff --git a/src/mistralai/utils/queryparams.py b/src/mistralai/utils/queryparams.py index 37a6e7f9..c04e0db8 100644 --- a/src/mistralai/utils/queryparams.py +++ b/src/mistralai/utils/queryparams.py @@ -27,12 +27,13 @@ def get_query_params( query_params: Any, gbls: Optional[Any] = None, + allow_empty_value: Optional[List[str]] = None, ) -> Dict[str, List[str]]: params: Dict[str, List[str]] = {} - globals_already_populated = _populate_query_params(query_params, gbls, params, []) + globals_already_populated = _populate_query_params(query_params, gbls, params, [], allow_empty_value) if _is_set(gbls): - _populate_query_params(gbls, None, params, globals_already_populated) + _populate_query_params(gbls, None, params, globals_already_populated, allow_empty_value) return params @@ -42,6 +43,7 @@ def _populate_query_params( gbls: Any, query_param_values: Dict[str, List[str]], skip_fields: List[str], + allow_empty_value: Optional[List[str]] = None, ) -> List[str]: globals_already_populated: List[str] = [] @@ -69,6 +71,16 @@ def _populate_query_params( globals_already_populated.append(name) f_name = field.alias if field.alias is not None else name + + allow_empty_set = set(allow_empty_value or []) + should_include_empty = f_name in allow_empty_set and ( + value is None or value == [] or value == "" + ) + + if should_include_empty: + query_param_values[f_name] = [""] + continue + serialization = metadata.serialization if serialization is not None: serialized_parms = _get_serialized_params( diff --git a/src/mistralai/utils/requestbodies.py b/src/mistralai/utils/requestbodies.py index d5240dd5..1de32b6d 100644 --- a/src/mistralai/utils/requestbodies.py +++ b/src/mistralai/utils/requestbodies.py @@ -44,15 +44,15 @@ def serialize_request_body( serialized_request_body = SerializedRequestBody(media_type) - if re.match(r"(application|text)\/.*?\+*json.*", media_type) is not None: + if re.match(r"^(application|text)\/([^+]+\+)*json.*", media_type) is not None: serialized_request_body.content = marshal_json(request_body, request_body_type) - elif re.match(r"multipart\/.*", media_type) is not None: + elif re.match(r"^multipart\/.*", media_type) is not None: ( serialized_request_body.media_type, serialized_request_body.data, serialized_request_body.files, ) = serialize_multipart_form(media_type, request_body) - elif re.match(r"application\/x-www-form-urlencoded.*", media_type) is not None: + elif re.match(r"^application\/x-www-form-urlencoded.*", media_type) is not None: serialized_request_body.data = serialize_form_data(request_body) elif isinstance(request_body, (bytes, bytearray, io.BytesIO, io.BufferedReader)): serialized_request_body.content = request_body diff --git a/src/mistralai/utils/retries.py b/src/mistralai/utils/retries.py index 4d608671..88a91b10 100644 --- a/src/mistralai/utils/retries.py +++ b/src/mistralai/utils/retries.py @@ -3,7 +3,9 @@ import asyncio import random import time -from typing import List +from datetime import datetime +from email.utils import parsedate_to_datetime +from typing import List, Optional import httpx @@ -51,9 +53,11 @@ def __init__(self, config: RetryConfig, status_codes: List[str]): class TemporaryError(Exception): response: httpx.Response + retry_after: Optional[int] def __init__(self, response: httpx.Response): self.response = response + self.retry_after = _parse_retry_after_header(response) class PermanentError(Exception): @@ -63,6 +67,62 @@ def __init__(self, inner: Exception): self.inner = inner +def _parse_retry_after_header(response: httpx.Response) -> Optional[int]: + """Parse Retry-After header from response. + + Returns: + Retry interval in milliseconds, or None if header is missing or invalid. + """ + retry_after_header = response.headers.get("retry-after") + if not retry_after_header: + return None + + try: + seconds = float(retry_after_header) + return round(seconds * 1000) + except ValueError: + pass + + try: + retry_date = parsedate_to_datetime(retry_after_header) + delta = (retry_date - datetime.now(retry_date.tzinfo)).total_seconds() + return round(max(0, delta) * 1000) + except (ValueError, TypeError): + pass + + return None + + +def _get_sleep_interval( + exception: Exception, + initial_interval: int, + max_interval: int, + exponent: float, + retries: int, +) -> float: + """Get sleep interval for retry with exponential backoff. + + Args: + exception: The exception that triggered the retry. + initial_interval: Initial retry interval in milliseconds. + max_interval: Maximum retry interval in milliseconds. + exponent: Base for exponential backoff calculation. + retries: Current retry attempt count. + + Returns: + Sleep interval in seconds. + """ + if ( + isinstance(exception, TemporaryError) + and exception.retry_after is not None + and exception.retry_after > 0 + ): + return exception.retry_after / 1000 + + sleep = (initial_interval / 1000) * exponent**retries + random.uniform(0, 1) + return min(sleep, max_interval / 1000) + + def retry(func, retries: Retries): if retries.config.strategy == "backoff": @@ -183,8 +243,10 @@ def retry_with_backoff( return exception.response raise - sleep = (initial_interval / 1000) * exponent**retries + random.uniform(0, 1) - sleep = min(sleep, max_interval / 1000) + + sleep = _get_sleep_interval( + exception, initial_interval, max_interval, exponent, retries + ) time.sleep(sleep) retries += 1 @@ -211,7 +273,9 @@ async def retry_with_backoff_async( return exception.response raise - sleep = (initial_interval / 1000) * exponent**retries + random.uniform(0, 1) - sleep = min(sleep, max_interval / 1000) + + sleep = _get_sleep_interval( + exception, initial_interval, max_interval, exponent, retries + ) await asyncio.sleep(sleep) retries += 1 diff --git a/src/mistralai/utils/serializers.py b/src/mistralai/utils/serializers.py index 378a14c0..14321eb4 100644 --- a/src/mistralai/utils/serializers.py +++ b/src/mistralai/utils/serializers.py @@ -102,26 +102,6 @@ def validate_int(b): return int(b) -def validate_open_enum(is_int: bool): - def validate(e): - if e is None: - return None - - if isinstance(e, Unset): - return e - - if is_int: - if not isinstance(e, int): - raise ValueError("Expected int") - else: - if not isinstance(e, str): - raise ValueError("Expected string") - - return e - - return validate - - def validate_const(v): def validate(c): # Optional[T] is a Union[T, None] diff --git a/src/mistralai/utils/unmarshal_json_response.py b/src/mistralai/utils/unmarshal_json_response.py index c0ce7e0f..64d0b3a6 100644 --- a/src/mistralai/utils/unmarshal_json_response.py +++ b/src/mistralai/utils/unmarshal_json_response.py @@ -1,12 +1,26 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -from typing import Any, Optional +from typing import Any, Optional, Type, TypeVar, overload import httpx from .serializers import unmarshal_json from mistralai import models +T = TypeVar("T") + + +@overload +def unmarshal_json_response( + typ: Type[T], http_res: httpx.Response, body: Optional[str] = None +) -> T: ... + + +@overload +def unmarshal_json_response( + typ: Any, http_res: httpx.Response, body: Optional[str] = None +) -> Any: ... + def unmarshal_json_response( typ: Any, http_res: httpx.Response, body: Optional[str] = None diff --git a/uv.lock b/uv.lock index 59639629..dc8f42ea 100644 --- a/uv.lock +++ b/uv.lock @@ -563,7 +563,7 @@ wheels = [ [[package]] name = "mistralai" -version = "1.10.0" +version = "1.11.0" source = { editable = "." } dependencies = [ { name = "eval-type-backport" }, From b8f72871a57911c7f44dd25e89b33d5361f9a70f Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Wed, 28 Jan 2026 16:49:44 +0100 Subject: [PATCH 179/223] ## Python SDK Changes: (#330) * `mistral.beta.agents.list_versions()`: **Added** * `mistral.beta.agents.get_version()`: **Added** * `mistral.ocr.process()`: `request.document_annotation_prompt` **Added** Co-authored-by: speakeasybot --- .speakeasy/gen.lock | 97 +++-- .speakeasy/gen.yaml | 2 +- .speakeasy/workflow.lock | 11 +- README.md | 4 +- RELEASES.md | 12 +- .../agentsapiv1agentsgetversionrequest.md | 9 + docs/models/agentsapiv1agentslistrequest.md | 4 +- .../agentsapiv1agentslistversionsrequest.md | 10 + docs/models/ocrrequest.md | 1 + docs/sdks/mistralagents/README.md | 91 ++++- docs/sdks/ocr/README.md | 1 + src/mistralai/_version.py | 4 +- src/mistralai/mistral_agents.py | 386 +++++++++++++++++- src/mistralai/models/__init__.py | 16 + .../agents_api_v1_agents_get_versionop.py | 21 + .../agents_api_v1_agents_list_versionsop.py | 33 ++ .../models/agents_api_v1_agents_listop.py | 4 + src/mistralai/models/ocrrequest.py | 7 + src/mistralai/ocr.py | 6 + 19 files changed, 675 insertions(+), 44 deletions(-) create mode 100644 docs/models/agentsapiv1agentsgetversionrequest.md create mode 100644 docs/models/agentsapiv1agentslistversionsrequest.md create mode 100644 src/mistralai/models/agents_api_v1_agents_get_versionop.py create mode 100644 src/mistralai/models/agents_api_v1_agents_list_versionsop.py diff --git a/.speakeasy/gen.lock b/.speakeasy/gen.lock index 18a38312..b89ea6b3 100644 --- a/.speakeasy/gen.lock +++ b/.speakeasy/gen.lock @@ -1,19 +1,19 @@ lockVersion: 2.0.0 id: 2d045ec7-2ebb-4f4d-ad25-40953b132161 management: - docChecksum: 274d02258259534e27fc706556e295ef + docChecksum: a61cb56fd9942dd20124e0422444bac3 docVersion: 1.0.0 speakeasyVersion: 1.685.0 generationVersion: 2.794.1 - releaseVersion: 1.11.0 - configChecksum: 99d8b30f701935f8b8bf94786669ddb1 + releaseVersion: 1.11.1 + configChecksum: 1a6d0af8e5d88c97b7e947763e633c3d repoURL: https://github.com/mistralai/client-python.git installationURL: https://github.com/mistralai/client-python.git published: true persistentEdits: - generation_id: a478cd50-39ad-455d-b98e-792b4cdd77ed - pristine_commit_hash: 59eb5189fd6e8f40f2f2af96df44ce24b3571bf4 - pristine_tree_hash: fa13610a853ef05b5bbabb0bcf0895fbb5a5f02a + generation_id: 031e6fcc-162d-451f-a98c-f65bf3605643 + pristine_commit_hash: 08ac7141d4e4dffd4a3327da51bd2a70d50ff68f + pristine_tree_hash: aeb852eedd1ebeb4411a5c0f286d53884362af3b features: python: additionalDependencies: 1.0.0 @@ -119,10 +119,18 @@ trackedFiles: id: 01740ae62cff last_write_checksum: sha1:0ed4bb58c94493e21826b38d33c2498de9150b98 pristine_git_object: 825e03a02e14d03ce47022df840c118de8cd921f + docs/models/agentsapiv1agentsgetversionrequest.md: + id: 88ed22b85cde + last_write_checksum: sha1:c6706d79c9253829cf4285c99d49873fa56596bf + pristine_git_object: 7617d2748c86f537bf125d90e67f41df71c1e5cd docs/models/agentsapiv1agentslistrequest.md: id: c2720c209527 - last_write_checksum: sha1:7e5cf3361dd00fce8468757cc73c7edb2877d582 - pristine_git_object: c4f05b5c9169300d4429e601cb70d0aa1fd88c70 + last_write_checksum: sha1:cb599d1583ee9374d44695f5ee7efe79dbb8a503 + pristine_git_object: 8cba13253d42a180b06eab8c10297ef362fb434d + docs/models/agentsapiv1agentslistversionsrequest.md: + id: 0bc44ed8d6bb + last_write_checksum: sha1:315790552fc5b2b3a6c4f7be2eb33100133abe18 + pristine_git_object: 91831700bed92cb4f609f8c412dcb0ee98b544ca docs/models/agentsapiv1agentsupdaterequest.md: id: 7692812cd677 last_write_checksum: sha1:8b17ce9d488b5eab892b66ca44d0e0a01b56aa11 @@ -1273,8 +1281,8 @@ trackedFiles: pristine_git_object: 02473d44f73485fd7b7f0031d51bfac835d4036e docs/models/ocrrequest.md: id: 6862a3fc2d0f - last_write_checksum: sha1:f32fcc5916f9eedf7adfaa60beda30a9ec42f32e - pristine_git_object: 76e4da925937fd4bdd42307f116a74d4dbf2bea3 + last_write_checksum: sha1:9311e2c87f8f4512c35a717d3b063f2861f878d4 + pristine_git_object: 87929e53f8a74823b82ecce56d15f22228134fa6 docs/models/ocrresponse.md: id: 30042328fb78 last_write_checksum: sha1:8e4a4ae404ea752f3e9f1108c2a5f89ed6cfb143 @@ -1757,8 +1765,8 @@ trackedFiles: pristine_git_object: e672c190ad6ac4623f99357d7e59d52f6722518f docs/sdks/mistralagents/README.md: id: 20b3478ad16d - last_write_checksum: sha1:b13e50de2ff10eabb4534f561c8cac185485280b - pristine_git_object: 97819467c39bc4f813093e55756e38ba06263a87 + last_write_checksum: sha1:73c444aaf6e547439dafb8d099142fd0059fdf4f + pristine_git_object: 8021fa07d58f71765097d1b3cea7ac4a2d6224a1 docs/sdks/mistraljobs/README.md: id: 71aafa44d228 last_write_checksum: sha1:255a4221b3b61ef247b39c9723a78408cda486d3 @@ -1769,8 +1777,8 @@ trackedFiles: pristine_git_object: d51866b6cff74932bf86c266f75773c2d3e74fd0 docs/sdks/ocr/README.md: id: 545e35d2613e - last_write_checksum: sha1:fc478d79405c775e9ae65334122d4539be952492 - pristine_git_object: 6fd904cc045b8accf5cc11436fd66f4024c9897f + last_write_checksum: sha1:25846e2fe16ecb69d94c0d53edb74c22419c49aa + pristine_git_object: efcb99314c7d07a3dc556c297333046fc5d9e097 docs/sdks/transcriptions/README.md: id: 089cf94ecf47 last_write_checksum: sha1:fdf785e4cbab20aec41122735435a38f582f7f29 @@ -1801,8 +1809,8 @@ trackedFiles: pristine_git_object: 6d0f3e1166cb0271f89f5ba83441c88199d7a432 src/mistralai/_version.py: id: 37b53ba66d7f - last_write_checksum: sha1:eb93ac459ae2c6e3551452f251db32d7c3ee3908 - pristine_git_object: 8c26c0cea422eea543df9c639b4b7d4b751b7692 + last_write_checksum: sha1:c4d3183c7342cd3d37f1a2fb2a707b2cb76cafec + pristine_git_object: aae7598df33f9fc79d17c1cb19baf2b61539e9db src/mistralai/accesses.py: id: 98cb4addd052 last_write_checksum: sha1:5d9d495274d67b1343ba99d755c1c01c64c2ead1 @@ -1873,16 +1881,16 @@ trackedFiles: pristine_git_object: 32648937feb79adf6155423cbe9bac4d7fe52224 src/mistralai/mistral_agents.py: id: 671c4985aaa1 - last_write_checksum: sha1:5e80f9f13f811dc0c47ba200eab0e4203b8d1472 - pristine_git_object: 1d2040682c3e1c9fdae8562bad7919bbce5c68c8 + last_write_checksum: sha1:01d02e6ea96903bf0b9893d24115a154e078096d + pristine_git_object: e4abf6e4cba4cfedbe1d6bd93292318f641d49d0 src/mistralai/mistral_jobs.py: id: 18065a449da0 last_write_checksum: sha1:fb205d962444f6aba163ecd3169c12489b3f0cc9 pristine_git_object: d1aeec8a014b22e44f4fe5e751206c3648e875af src/mistralai/models/__init__.py: id: 3228134f03e5 - last_write_checksum: sha1:2984e08157e90f500bfd135c037723b5d1902e9c - pristine_git_object: e69acaf83ab2433f99f431dd992004543839d33a + last_write_checksum: sha1:0e6ec6d05cfd56d49d761a68e4f42f550695aa81 + pristine_git_object: c35b3d24abc3863d88e40b8d9e8bd2c1a35a4541 src/mistralai/models/agent.py: id: ca4162a131b1 last_write_checksum: sha1:fe8a7c8c9c4ba59613d7d89f0c2e7a6958e25f85 @@ -1911,14 +1919,22 @@ trackedFiles: id: 588791d168a1 last_write_checksum: sha1:2dae37c3b9778d688663550b9803d52111577f3e pristine_git_object: 38e04953cc320f503a2f6e77096985da60896f2a + src/mistralai/models/agents_api_v1_agents_get_versionop.py: + id: bdb81ef0e35a + last_write_checksum: sha1:dab21f6fae05e2794208baf3b4e43feeeaf9b3bd + pristine_git_object: 4463d3b25aedad4f3b96a9fb7174a598c843939f src/mistralai/models/agents_api_v1_agents_getop.py: id: 2358eceee519 last_write_checksum: sha1:362d0c781b2c79d829f6e4901e558aaca937b105 pristine_git_object: dced6dbb49c31fe2981cbd3865c0d580082a1ade + src/mistralai/models/agents_api_v1_agents_list_versionsop.py: + id: 5f680df288a9 + last_write_checksum: sha1:a236170f366d9701346b57f9ee4c788a9a2293e5 + pristine_git_object: cf988b3d3b5130ff49f7ec0accb30a8e9dbfe4e1 src/mistralai/models/agents_api_v1_agents_listop.py: id: 15579851e4fe - last_write_checksum: sha1:eae021d178b661254dde8bea0b2cbdb11b9b429c - pristine_git_object: b3b8765c194bc29757468b605c13f2b7372f161e + last_write_checksum: sha1:1268af12d397f86e0486c42ec8115103e29ee137 + pristine_git_object: 88b5bad107d28943de8f25cb26c6597da2eba31d src/mistralai/models/agents_api_v1_agents_update_versionop.py: id: 262e7a2f05e3 last_write_checksum: sha1:faa5550d08ddbb8223e8e6f2fcea6f09408bd228 @@ -2637,8 +2653,8 @@ trackedFiles: pristine_git_object: 737defbaea323e0f3ccd95c2a721f57acc9f43a0 src/mistralai/models/ocrrequest.py: id: 7dbc4bb7cafb - last_write_checksum: sha1:b8a5efbd582bdf9e188d4777b319d2b16e0caf3d - pristine_git_object: 8bd133706746435af17898ee1afe78d94f2d1948 + last_write_checksum: sha1:2f49cf3d70f2aa11cf2e7ac9f7cc262901387eb5 + pristine_git_object: 0e061ac95f2d92d0d8bb14a2d27b64d01bb4e962 src/mistralai/models/ocrresponse.py: id: a187e70d8c2e last_write_checksum: sha1:0c09aee803a5e1a3ba7c7f5d0ce46e96ee3339ca @@ -2893,8 +2909,8 @@ trackedFiles: pristine_git_object: d44930a0db06117ba538424273935016a133e0ae src/mistralai/ocr.py: id: e23da68c9ae8 - last_write_checksum: sha1:9de69bb9928130acbe099d2cd833dc09fbfccee5 - pristine_git_object: 8c2e478b26fbaabe56f645c77dcb294fe3a953c1 + last_write_checksum: sha1:ce13d4ac0fc3cc52b2a76480c570d89cfe71c002 + pristine_git_object: ceb7dd85f958452aeb55868c65746ccf6ec200a5 src/mistralai/py.typed: id: 3923b7c50c56 last_write_checksum: sha1:8efc425ffe830805ffcc0f3055871bdcdc542c60 @@ -3740,8 +3756,37 @@ examples: responses: "422": application/json: {} + agents_api_v1_agents_list_versions: + speakeasy-default-agents-api-v1-agents-list-versions: + parameters: + path: + agent_id: "" + query: + page: 0 + page_size: 20 + responses: + "200": + application/json: [{"completion_args": {"response_format": {"type": "text"}}, "model": "Prius", "name": "", "object": "agent", "id": "", "version": 866135, "versions": [849276], "created_at": "2024-07-03T17:01:49.200Z", "updated_at": "2026-06-15T18:44:26.883Z", "deployment_chat": true, "source": ""}] + "422": + application/json: {} + agents_api_v1_agents_get_version: + speakeasy-default-agents-api-v1-agents-get-version: + parameters: + path: + agent_id: "" + version: 788393 + responses: + "200": + application/json: {"completion_args": {"response_format": {"type": "text"}}, "model": "Model 3", "name": "", "object": "agent", "id": "", "version": 377706, "versions": [658369, 642981], "created_at": "2024-10-02T23:01:15.980Z", "updated_at": "2026-12-22T00:55:26.568Z", "deployment_chat": false, "source": ""} + "422": + application/json: {} examplesVersion: 1.0.2 generatedTests: {} +releaseNotes: | + ## Python SDK Changes: + * `mistral.beta.agents.list_versions()`: **Added** + * `mistral.beta.agents.get_version()`: **Added** + * `mistral.ocr.process()`: `request.document_annotation_prompt` **Added** generatedFiles: - .gitattributes - .vscode/settings.json diff --git a/.speakeasy/gen.yaml b/.speakeasy/gen.yaml index 53216a9e..4f9a9747 100644 --- a/.speakeasy/gen.yaml +++ b/.speakeasy/gen.yaml @@ -26,7 +26,7 @@ generation: generateNewTests: false skipResponseBodyAssertions: false python: - version: 1.11.0 + version: 1.11.1 additionalDependencies: dev: pytest: ^8.2.2 diff --git a/.speakeasy/workflow.lock b/.speakeasy/workflow.lock index d5f1c965..89c966c7 100644 --- a/.speakeasy/workflow.lock +++ b/.speakeasy/workflow.lock @@ -14,10 +14,11 @@ sources: - latest mistral-openapi: sourceNamespace: mistral-openapi - sourceRevisionDigest: sha256:ebe60088ce4a3780c57a08de7bc73f973f529822a05db12c5d9c6084e9a934e0 - sourceBlobDigest: sha256:c93947af3495a5129cb6aecfe0546463917fbe1f66f2cf8f5a0accb36c035501 + sourceRevisionDigest: sha256:fd94dc1f574f3bb88a839543675b2c3b3aa895519ec2797efb143ead830ae982 + sourceBlobDigest: sha256:2dd0ee9d000907ffe699cdd48a18163b88297d0ce63f2cdc05efa35cee136bc0 tags: - latest + - speakeasy-sdk-regen-1768506286 targets: mistralai-azure-sdk: source: mistral-azure-source @@ -36,10 +37,10 @@ targets: mistralai-sdk: source: mistral-openapi sourceNamespace: mistral-openapi - sourceRevisionDigest: sha256:ebe60088ce4a3780c57a08de7bc73f973f529822a05db12c5d9c6084e9a934e0 - sourceBlobDigest: sha256:c93947af3495a5129cb6aecfe0546463917fbe1f66f2cf8f5a0accb36c035501 + sourceRevisionDigest: sha256:fd94dc1f574f3bb88a839543675b2c3b3aa895519ec2797efb143ead830ae982 + sourceBlobDigest: sha256:2dd0ee9d000907ffe699cdd48a18163b88297d0ce63f2cdc05efa35cee136bc0 codeSamplesNamespace: mistral-openapi-code-samples - codeSamplesRevisionDigest: sha256:14b511ab3d9f6f3d9ee0c81c32c6fa2dd6be9b6a1047298cf9f1162328045b4f + codeSamplesRevisionDigest: sha256:881a74af90c5678411207a0a9b0e370496d44b18174e96ba7c6812d400788637 workflow: workflowVersion: 1.0.0 speakeasyVersion: 1.685.0 diff --git a/README.md b/README.md index d755d249..2569d112 100644 --- a/README.md +++ b/README.md @@ -473,6 +473,8 @@ The documentation for the GCP SDK is available [here](packages/mistralai_gcp/REA * [update](docs/sdks/mistralagents/README.md#update) - Update an agent entity. * [delete](docs/sdks/mistralagents/README.md#delete) - Delete an agent entity. * [update_version](docs/sdks/mistralagents/README.md#update_version) - Update an agent version. +* [list_versions](docs/sdks/mistralagents/README.md#list_versions) - List all versions of an agent. +* [get_version](docs/sdks/mistralagents/README.md#get_version) - Retrieve a specific version of an agent. ### [Beta.Conversations](docs/sdks/conversations/README.md) @@ -750,7 +752,7 @@ with Mistral( **Inherit from [`MistralError`](./src/mistralai/models/mistralerror.py)**: -* [`HTTPValidationError`](./src/mistralai/models/httpvalidationerror.py): Validation Error. Status code `422`. Applicable to 48 of 70 methods.* +* [`HTTPValidationError`](./src/mistralai/models/httpvalidationerror.py): Validation Error. Status code `422`. Applicable to 50 of 72 methods.* * [`ResponseValidationError`](./src/mistralai/models/responsevalidationerror.py): Type mismatch between the response data and the expected Pydantic model. Provides access to the Pydantic validation error via the `cause` attribute.
diff --git a/RELEASES.md b/RELEASES.md index 092c0122..984e9145 100644 --- a/RELEASES.md +++ b/RELEASES.md @@ -348,4 +348,14 @@ Based on: ### Generated - [python v1.10.1] . ### Releases -- [PyPI v1.10.1] https://pypi.org/project/mistralai/1.10.1 - . \ No newline at end of file +- [PyPI v1.10.1] https://pypi.org/project/mistralai/1.10.1 - . + +## 2026-01-22 11:16:25 +### Changes +Based on: +- OpenAPI Doc +- Speakeasy CLI 1.685.0 (2.794.1) https://github.com/speakeasy-api/speakeasy +### Generated +- [python v1.11.1] . +### Releases +- [PyPI v1.11.1] https://pypi.org/project/mistralai/1.11.1 - . \ No newline at end of file diff --git a/docs/models/agentsapiv1agentsgetversionrequest.md b/docs/models/agentsapiv1agentsgetversionrequest.md new file mode 100644 index 00000000..7617d274 --- /dev/null +++ b/docs/models/agentsapiv1agentsgetversionrequest.md @@ -0,0 +1,9 @@ +# AgentsAPIV1AgentsGetVersionRequest + + +## Fields + +| Field | Type | Required | Description | +| ------------------ | ------------------ | ------------------ | ------------------ | +| `agent_id` | *str* | :heavy_check_mark: | N/A | +| `version` | *int* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/agentsapiv1agentslistrequest.md b/docs/models/agentsapiv1agentslistrequest.md index c4f05b5c..8cba1325 100644 --- a/docs/models/agentsapiv1agentslistrequest.md +++ b/docs/models/agentsapiv1agentslistrequest.md @@ -5,8 +5,8 @@ | Field | Type | Required | Description | | -------------------------------------------------------- | -------------------------------------------------------- | -------------------------------------------------------- | -------------------------------------------------------- | -| `page` | *Optional[int]* | :heavy_minus_sign: | N/A | -| `page_size` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `page` | *Optional[int]* | :heavy_minus_sign: | Page number (0-indexed) | +| `page_size` | *Optional[int]* | :heavy_minus_sign: | Number of agents per page | | `deployment_chat` | *OptionalNullable[bool]* | :heavy_minus_sign: | N/A | | `sources` | List[[models.RequestSource](../models/requestsource.md)] | :heavy_minus_sign: | N/A | | `name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | diff --git a/docs/models/agentsapiv1agentslistversionsrequest.md b/docs/models/agentsapiv1agentslistversionsrequest.md new file mode 100644 index 00000000..91831700 --- /dev/null +++ b/docs/models/agentsapiv1agentslistversionsrequest.md @@ -0,0 +1,10 @@ +# AgentsAPIV1AgentsListVersionsRequest + + +## Fields + +| Field | Type | Required | Description | +| --------------------------- | --------------------------- | --------------------------- | --------------------------- | +| `agent_id` | *str* | :heavy_check_mark: | N/A | +| `page` | *Optional[int]* | :heavy_minus_sign: | Page number (0-indexed) | +| `page_size` | *Optional[int]* | :heavy_minus_sign: | Number of versions per page | \ No newline at end of file diff --git a/docs/models/ocrrequest.md b/docs/models/ocrrequest.md index 76e4da92..87929e53 100644 --- a/docs/models/ocrrequest.md +++ b/docs/models/ocrrequest.md @@ -14,6 +14,7 @@ | `image_min_size` | *OptionalNullable[int]* | :heavy_minus_sign: | Minimum height and width of image to extract | | | `bbox_annotation_format` | [OptionalNullable[models.ResponseFormat]](../models/responseformat.md) | :heavy_minus_sign: | Structured output class for extracting useful information from each extracted bounding box / image from document. Only json_schema is valid for this field | {
"type": "text"
} | | `document_annotation_format` | [OptionalNullable[models.ResponseFormat]](../models/responseformat.md) | :heavy_minus_sign: | Structured output class for extracting useful information from the entire document. Only json_schema is valid for this field | {
"type": "text"
} | +| `document_annotation_prompt` | *OptionalNullable[str]* | :heavy_minus_sign: | Optional prompt to guide the model in extracting structured output from the entire document. A document_annotation_format must be provided. | | | `table_format` | [OptionalNullable[models.TableFormat]](../models/tableformat.md) | :heavy_minus_sign: | N/A | | | `extract_header` | *Optional[bool]* | :heavy_minus_sign: | N/A | | | `extract_footer` | *Optional[bool]* | :heavy_minus_sign: | N/A | | \ No newline at end of file diff --git a/docs/sdks/mistralagents/README.md b/docs/sdks/mistralagents/README.md index 97819467..8021fa07 100644 --- a/docs/sdks/mistralagents/README.md +++ b/docs/sdks/mistralagents/README.md @@ -12,6 +12,8 @@ * [update](#update) - Update an agent entity. * [delete](#delete) - Delete an agent entity. * [update_version](#update_version) - Update an agent version. +* [list_versions](#list_versions) - List all versions of an agent. +* [get_version](#get_version) - Retrieve a specific version of an agent. ## create @@ -92,8 +94,8 @@ with Mistral( | Parameter | Type | Required | Description | | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | -| `page` | *Optional[int]* | :heavy_minus_sign: | N/A | -| `page_size` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `page` | *Optional[int]* | :heavy_minus_sign: | Page number (0-indexed) | +| `page_size` | *Optional[int]* | :heavy_minus_sign: | Number of agents per page | | `deployment_chat` | *OptionalNullable[bool]* | :heavy_minus_sign: | N/A | | `sources` | List[[models.RequestSource](../../models/requestsource.md)] | :heavy_minus_sign: | N/A | | `name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | @@ -281,6 +283,91 @@ with Mistral( ### Errors +| Error Type | Status Code | Content Type | +| -------------------------- | -------------------------- | -------------------------- | +| models.HTTPValidationError | 422 | application/json | +| models.SDKError | 4XX, 5XX | \*/\* | + +## list_versions + +Retrieve all versions for a specific agent with full agent context. Supports pagination. + +### Example Usage + + +```python +from mistralai import Mistral +import os + + +with Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) as mistral: + + res = mistral.beta.agents.list_versions(agent_id="", page=0, page_size=20) + + # Handle response + print(res) + +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | +| `agent_id` | *str* | :heavy_check_mark: | N/A | +| `page` | *Optional[int]* | :heavy_minus_sign: | Page number (0-indexed) | +| `page_size` | *Optional[int]* | :heavy_minus_sign: | Number of versions per page | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | + +### Response + +**[List[models.Agent]](../../models/.md)** + +### Errors + +| Error Type | Status Code | Content Type | +| -------------------------- | -------------------------- | -------------------------- | +| models.HTTPValidationError | 422 | application/json | +| models.SDKError | 4XX, 5XX | \*/\* | + +## get_version + +Get a specific agent version by version number. + +### Example Usage + + +```python +from mistralai import Mistral +import os + + +with Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) as mistral: + + res = mistral.beta.agents.get_version(agent_id="", version=788393) + + # Handle response + print(res) + +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | +| `agent_id` | *str* | :heavy_check_mark: | N/A | +| `version` | *int* | :heavy_check_mark: | N/A | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | + +### Response + +**[models.Agent](../../models/agent.md)** + +### Errors + | Error Type | Status Code | Content Type | | -------------------------- | -------------------------- | -------------------------- | | models.HTTPValidationError | 422 | application/json | diff --git a/docs/sdks/ocr/README.md b/docs/sdks/ocr/README.md index 6fd904cc..efcb9931 100644 --- a/docs/sdks/ocr/README.md +++ b/docs/sdks/ocr/README.md @@ -53,6 +53,7 @@ with Mistral( | `image_min_size` | *OptionalNullable[int]* | :heavy_minus_sign: | Minimum height and width of image to extract | | | `bbox_annotation_format` | [OptionalNullable[models.ResponseFormat]](../../models/responseformat.md) | :heavy_minus_sign: | Structured output class for extracting useful information from each extracted bounding box / image from document. Only json_schema is valid for this field | {
"type": "text"
} | | `document_annotation_format` | [OptionalNullable[models.ResponseFormat]](../../models/responseformat.md) | :heavy_minus_sign: | Structured output class for extracting useful information from the entire document. Only json_schema is valid for this field | {
"type": "text"
} | +| `document_annotation_prompt` | *OptionalNullable[str]* | :heavy_minus_sign: | Optional prompt to guide the model in extracting structured output from the entire document. A document_annotation_format must be provided. | | | `table_format` | [OptionalNullable[models.TableFormat]](../../models/tableformat.md) | :heavy_minus_sign: | N/A | | | `extract_header` | *Optional[bool]* | :heavy_minus_sign: | N/A | | | `extract_footer` | *Optional[bool]* | :heavy_minus_sign: | N/A | | diff --git a/src/mistralai/_version.py b/src/mistralai/_version.py index 8c26c0ce..aae7598d 100644 --- a/src/mistralai/_version.py +++ b/src/mistralai/_version.py @@ -3,10 +3,10 @@ import importlib.metadata __title__: str = "mistralai" -__version__: str = "1.11.0" +__version__: str = "1.11.1" __openapi_doc_version__: str = "1.0.0" __gen_version__: str = "2.794.1" -__user_agent__: str = "speakeasy-sdk/python 1.11.0 2.794.1 1.0.0 mistralai" +__user_agent__: str = "speakeasy-sdk/python 1.11.1 2.794.1 1.0.0 mistralai" try: if __package__ is not None: diff --git a/src/mistralai/mistral_agents.py b/src/mistralai/mistral_agents.py index 1d204068..e4abf6e4 100644 --- a/src/mistralai/mistral_agents.py +++ b/src/mistralai/mistral_agents.py @@ -293,8 +293,8 @@ def list( Retrieve a list of agent entities sorted by creation time. - :param page: - :param page_size: + :param page: Page number (0-indexed) + :param page_size: Number of agents per page :param deployment_chat: :param sources: :param name: @@ -401,8 +401,8 @@ async def list_async( Retrieve a list of agent entities sorted by creation time. - :param page: - :param page_size: + :param page: Page number (0-indexed) + :param page_size: Number of agents per page :param deployment_chat: :param sources: :param name: @@ -1317,3 +1317,381 @@ async def update_version_async( raise models.SDKError("API error occurred", http_res, http_res_text) raise models.SDKError("Unexpected response received", http_res) + + def list_versions( + self, + *, + agent_id: str, + page: Optional[int] = 0, + page_size: Optional[int] = 20, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> List[models.Agent]: + r"""List all versions of an agent. + + Retrieve all versions for a specific agent with full agent context. Supports pagination. + + :param agent_id: + :param page: Page number (0-indexed) + :param page_size: Number of versions per page + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AgentsAPIV1AgentsListVersionsRequest( + agent_id=agent_id, + page=page, + page_size=page_size, + ) + + req = self._build_request( + method="GET", + path="/v1/agents/{agent_id}/versions", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="agents_api_v1_agents_list_versions", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(List[models.Agent], http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res + ) + raise models.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + async def list_versions_async( + self, + *, + agent_id: str, + page: Optional[int] = 0, + page_size: Optional[int] = 20, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> List[models.Agent]: + r"""List all versions of an agent. + + Retrieve all versions for a specific agent with full agent context. Supports pagination. + + :param agent_id: + :param page: Page number (0-indexed) + :param page_size: Number of versions per page + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AgentsAPIV1AgentsListVersionsRequest( + agent_id=agent_id, + page=page, + page_size=page_size, + ) + + req = self._build_request_async( + method="GET", + path="/v1/agents/{agent_id}/versions", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="agents_api_v1_agents_list_versions", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(List[models.Agent], http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res + ) + raise models.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + def get_version( + self, + *, + agent_id: str, + version: int, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.Agent: + r"""Retrieve a specific version of an agent. + + Get a specific agent version by version number. + + :param agent_id: + :param version: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AgentsAPIV1AgentsGetVersionRequest( + agent_id=agent_id, + version=version, + ) + + req = self._build_request( + method="GET", + path="/v1/agents/{agent_id}/version/{version}", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="agents_api_v1_agents_get_version", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.Agent, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res + ) + raise models.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + async def get_version_async( + self, + *, + agent_id: str, + version: int, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.Agent: + r"""Retrieve a specific version of an agent. + + Get a specific agent version by version number. + + :param agent_id: + :param version: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AgentsAPIV1AgentsGetVersionRequest( + agent_id=agent_id, + version=version, + ) + + req = self._build_request_async( + method="GET", + path="/v1/agents/{agent_id}/version/{version}", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="agents_api_v1_agents_get_version", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.Agent, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res + ) + raise models.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) diff --git a/src/mistralai/models/__init__.py b/src/mistralai/models/__init__.py index e69acaf8..c35b3d24 100644 --- a/src/mistralai/models/__init__.py +++ b/src/mistralai/models/__init__.py @@ -45,10 +45,18 @@ AgentsAPIV1AgentsDeleteRequest, AgentsAPIV1AgentsDeleteRequestTypedDict, ) + from .agents_api_v1_agents_get_versionop import ( + AgentsAPIV1AgentsGetVersionRequest, + AgentsAPIV1AgentsGetVersionRequestTypedDict, + ) from .agents_api_v1_agents_getop import ( AgentsAPIV1AgentsGetRequest, AgentsAPIV1AgentsGetRequestTypedDict, ) + from .agents_api_v1_agents_list_versionsop import ( + AgentsAPIV1AgentsListVersionsRequest, + AgentsAPIV1AgentsListVersionsRequestTypedDict, + ) from .agents_api_v1_agents_listop import ( AgentsAPIV1AgentsListRequest, AgentsAPIV1AgentsListRequestTypedDict, @@ -972,8 +980,12 @@ "AgentsAPIV1AgentsDeleteRequestTypedDict", "AgentsAPIV1AgentsGetRequest", "AgentsAPIV1AgentsGetRequestTypedDict", + "AgentsAPIV1AgentsGetVersionRequest", + "AgentsAPIV1AgentsGetVersionRequestTypedDict", "AgentsAPIV1AgentsListRequest", "AgentsAPIV1AgentsListRequestTypedDict", + "AgentsAPIV1AgentsListVersionsRequest", + "AgentsAPIV1AgentsListVersionsRequestTypedDict", "AgentsAPIV1AgentsUpdateRequest", "AgentsAPIV1AgentsUpdateRequestTypedDict", "AgentsAPIV1AgentsUpdateVersionRequest", @@ -1682,8 +1694,12 @@ "AgentHandoffStartedEventTypedDict": ".agenthandoffstartedevent", "AgentsAPIV1AgentsDeleteRequest": ".agents_api_v1_agents_deleteop", "AgentsAPIV1AgentsDeleteRequestTypedDict": ".agents_api_v1_agents_deleteop", + "AgentsAPIV1AgentsGetVersionRequest": ".agents_api_v1_agents_get_versionop", + "AgentsAPIV1AgentsGetVersionRequestTypedDict": ".agents_api_v1_agents_get_versionop", "AgentsAPIV1AgentsGetRequest": ".agents_api_v1_agents_getop", "AgentsAPIV1AgentsGetRequestTypedDict": ".agents_api_v1_agents_getop", + "AgentsAPIV1AgentsListVersionsRequest": ".agents_api_v1_agents_list_versionsop", + "AgentsAPIV1AgentsListVersionsRequestTypedDict": ".agents_api_v1_agents_list_versionsop", "AgentsAPIV1AgentsListRequest": ".agents_api_v1_agents_listop", "AgentsAPIV1AgentsListRequestTypedDict": ".agents_api_v1_agents_listop", "AgentsAPIV1AgentsUpdateVersionRequest": ".agents_api_v1_agents_update_versionop", diff --git a/src/mistralai/models/agents_api_v1_agents_get_versionop.py b/src/mistralai/models/agents_api_v1_agents_get_versionop.py new file mode 100644 index 00000000..4463d3b2 --- /dev/null +++ b/src/mistralai/models/agents_api_v1_agents_get_versionop.py @@ -0,0 +1,21 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.types import BaseModel +from mistralai.utils import FieldMetadata, PathParamMetadata +from typing_extensions import Annotated, TypedDict + + +class AgentsAPIV1AgentsGetVersionRequestTypedDict(TypedDict): + agent_id: str + version: int + + +class AgentsAPIV1AgentsGetVersionRequest(BaseModel): + agent_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] + + version: Annotated[ + int, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] diff --git a/src/mistralai/models/agents_api_v1_agents_list_versionsop.py b/src/mistralai/models/agents_api_v1_agents_list_versionsop.py new file mode 100644 index 00000000..cf988b3d --- /dev/null +++ b/src/mistralai/models/agents_api_v1_agents_list_versionsop.py @@ -0,0 +1,33 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.types import BaseModel +from mistralai.utils import FieldMetadata, PathParamMetadata, QueryParamMetadata +from typing import Optional +from typing_extensions import Annotated, NotRequired, TypedDict + + +class AgentsAPIV1AgentsListVersionsRequestTypedDict(TypedDict): + agent_id: str + page: NotRequired[int] + r"""Page number (0-indexed)""" + page_size: NotRequired[int] + r"""Number of versions per page""" + + +class AgentsAPIV1AgentsListVersionsRequest(BaseModel): + agent_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] + + page: Annotated[ + Optional[int], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = 0 + r"""Page number (0-indexed)""" + + page_size: Annotated[ + Optional[int], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = 20 + r"""Number of versions per page""" diff --git a/src/mistralai/models/agents_api_v1_agents_listop.py b/src/mistralai/models/agents_api_v1_agents_listop.py index b3b8765c..88b5bad1 100644 --- a/src/mistralai/models/agents_api_v1_agents_listop.py +++ b/src/mistralai/models/agents_api_v1_agents_listop.py @@ -11,7 +11,9 @@ class AgentsAPIV1AgentsListRequestTypedDict(TypedDict): page: NotRequired[int] + r"""Page number (0-indexed)""" page_size: NotRequired[int] + r"""Number of agents per page""" deployment_chat: NotRequired[Nullable[bool]] sources: NotRequired[Nullable[List[RequestSource]]] name: NotRequired[Nullable[str]] @@ -24,11 +26,13 @@ class AgentsAPIV1AgentsListRequest(BaseModel): Optional[int], FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), ] = 0 + r"""Page number (0-indexed)""" page_size: Annotated[ Optional[int], FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), ] = 20 + r"""Number of agents per page""" deployment_chat: Annotated[ OptionalNullable[bool], diff --git a/src/mistralai/models/ocrrequest.py b/src/mistralai/models/ocrrequest.py index 8bd13370..0e061ac9 100644 --- a/src/mistralai/models/ocrrequest.py +++ b/src/mistralai/models/ocrrequest.py @@ -45,6 +45,8 @@ class OCRRequestTypedDict(TypedDict): r"""Structured output class for extracting useful information from each extracted bounding box / image from document. Only json_schema is valid for this field""" document_annotation_format: NotRequired[Nullable[ResponseFormatTypedDict]] r"""Structured output class for extracting useful information from the entire document. Only json_schema is valid for this field""" + document_annotation_prompt: NotRequired[Nullable[str]] + r"""Optional prompt to guide the model in extracting structured output from the entire document. A document_annotation_format must be provided.""" table_format: NotRequired[Nullable[TableFormat]] extract_header: NotRequired[bool] extract_footer: NotRequired[bool] @@ -76,6 +78,9 @@ class OCRRequest(BaseModel): document_annotation_format: OptionalNullable[ResponseFormat] = UNSET r"""Structured output class for extracting useful information from the entire document. Only json_schema is valid for this field""" + document_annotation_prompt: OptionalNullable[str] = UNSET + r"""Optional prompt to guide the model in extracting structured output from the entire document. A document_annotation_format must be provided.""" + table_format: OptionalNullable[TableFormat] = UNSET extract_header: Optional[bool] = None @@ -92,6 +97,7 @@ def serialize_model(self, handler): "image_min_size", "bbox_annotation_format", "document_annotation_format", + "document_annotation_prompt", "table_format", "extract_header", "extract_footer", @@ -104,6 +110,7 @@ def serialize_model(self, handler): "image_min_size", "bbox_annotation_format", "document_annotation_format", + "document_annotation_prompt", "table_format", ] null_default_fields = [] diff --git a/src/mistralai/ocr.py b/src/mistralai/ocr.py index 8c2e478b..ceb7dd85 100644 --- a/src/mistralai/ocr.py +++ b/src/mistralai/ocr.py @@ -40,6 +40,7 @@ def process( models_responseformat.ResponseFormatTypedDict, ] ] = UNSET, + document_annotation_prompt: OptionalNullable[str] = UNSET, table_format: OptionalNullable[models_ocrrequest.TableFormat] = UNSET, extract_header: Optional[bool] = None, extract_footer: Optional[bool] = None, @@ -59,6 +60,7 @@ def process( :param image_min_size: Minimum height and width of image to extract :param bbox_annotation_format: Structured output class for extracting useful information from each extracted bounding box / image from document. Only json_schema is valid for this field :param document_annotation_format: Structured output class for extracting useful information from the entire document. Only json_schema is valid for this field + :param document_annotation_prompt: Optional prompt to guide the model in extracting structured output from the entire document. A document_annotation_format must be provided. :param table_format: :param extract_header: :param extract_footer: @@ -91,6 +93,7 @@ def process( document_annotation_format=utils.get_pydantic_model( document_annotation_format, OptionalNullable[models.ResponseFormat] ), + document_annotation_prompt=document_annotation_prompt, table_format=table_format, extract_header=extract_header, extract_footer=extract_footer, @@ -180,6 +183,7 @@ async def process_async( models_responseformat.ResponseFormatTypedDict, ] ] = UNSET, + document_annotation_prompt: OptionalNullable[str] = UNSET, table_format: OptionalNullable[models_ocrrequest.TableFormat] = UNSET, extract_header: Optional[bool] = None, extract_footer: Optional[bool] = None, @@ -199,6 +203,7 @@ async def process_async( :param image_min_size: Minimum height and width of image to extract :param bbox_annotation_format: Structured output class for extracting useful information from each extracted bounding box / image from document. Only json_schema is valid for this field :param document_annotation_format: Structured output class for extracting useful information from the entire document. Only json_schema is valid for this field + :param document_annotation_prompt: Optional prompt to guide the model in extracting structured output from the entire document. A document_annotation_format must be provided. :param table_format: :param extract_header: :param extract_footer: @@ -231,6 +236,7 @@ async def process_async( document_annotation_format=utils.get_pydantic_model( document_annotation_format, OptionalNullable[models.ResponseFormat] ), + document_annotation_prompt=document_annotation_prompt, table_format=table_format, extract_header=extract_header, extract_footer=extract_footer, From 47c641607fe9130891f04a7e1ea68aee488288af Mon Sep 17 00:00:00 2001 From: Guillaume Dumont Date: Wed, 28 Jan 2026 19:16:03 +0100 Subject: [PATCH 180/223] Bumping the pyproject.toml version to 1.11.1 (#333) This change was omitted in previous PR. --- pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index 2f58565d..680ae19b 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "mistralai" -version = "1.11.0" +version = "1.11.1" description = "Python Client SDK for the Mistral AI API." authors = [{ name = "Mistral" }] requires-python = ">=3.10" From ba8edee2499f7aa1b083d9d9cd78747f15ce7de6 Mon Sep 17 00:00:00 2001 From: Guillaume Dumont Date: Thu, 29 Jan 2026 14:01:38 +0100 Subject: [PATCH 181/223] Updating the version of speakeasy-api/sdk-generation-action actions (#334) This update should address the recent issues with the publishing workflow. --- .github/workflows/sdk_generation_mistralai_azure_sdk.yaml | 2 +- .github/workflows/sdk_generation_mistralai_gcp_sdk.yaml | 2 +- .github/workflows/sdk_generation_mistralai_sdk.yaml | 2 +- .github/workflows/sdk_publish_mistralai_sdk.yaml | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/sdk_generation_mistralai_azure_sdk.yaml b/.github/workflows/sdk_generation_mistralai_azure_sdk.yaml index 167d8865..22af64aa 100644 --- a/.github/workflows/sdk_generation_mistralai_azure_sdk.yaml +++ b/.github/workflows/sdk_generation_mistralai_azure_sdk.yaml @@ -16,7 +16,7 @@ permissions: type: string jobs: generate: - uses: speakeasy-api/sdk-generation-action/.github/workflows/workflow-executor.yaml@a658ca0a4a9b11bbcd7d3fb4e3063fa843afabff # v15 + uses: speakeasy-api/sdk-generation-action/.github/workflows/workflow-executor.yaml@7951d9dce457425b900b2dd317253499d98c2587 # v15 with: force: ${{ github.event.inputs.force }} mode: pr diff --git a/.github/workflows/sdk_generation_mistralai_gcp_sdk.yaml b/.github/workflows/sdk_generation_mistralai_gcp_sdk.yaml index aa753830..bf1d19b1 100644 --- a/.github/workflows/sdk_generation_mistralai_gcp_sdk.yaml +++ b/.github/workflows/sdk_generation_mistralai_gcp_sdk.yaml @@ -16,7 +16,7 @@ permissions: type: string jobs: generate: - uses: speakeasy-api/sdk-generation-action/.github/workflows/workflow-executor.yaml@a658ca0a4a9b11bbcd7d3fb4e3063fa843afabff # v15 + uses: speakeasy-api/sdk-generation-action/.github/workflows/workflow-executor.yaml@7951d9dce457425b900b2dd317253499d98c2587 # v15 with: force: ${{ github.event.inputs.force }} mode: pr diff --git a/.github/workflows/sdk_generation_mistralai_sdk.yaml b/.github/workflows/sdk_generation_mistralai_sdk.yaml index b199798b..cbe8f1e8 100644 --- a/.github/workflows/sdk_generation_mistralai_sdk.yaml +++ b/.github/workflows/sdk_generation_mistralai_sdk.yaml @@ -16,7 +16,7 @@ permissions: type: string jobs: generate: - uses: speakeasy-api/sdk-generation-action/.github/workflows/workflow-executor.yaml@a658ca0a4a9b11bbcd7d3fb4e3063fa843afabff # v15 + uses: speakeasy-api/sdk-generation-action/.github/workflows/workflow-executor.yaml@7951d9dce457425b900b2dd317253499d98c2587 # v15 with: force: ${{ github.event.inputs.force }} mode: pr diff --git a/.github/workflows/sdk_publish_mistralai_sdk.yaml b/.github/workflows/sdk_publish_mistralai_sdk.yaml index a457b6c1..0a225d70 100644 --- a/.github/workflows/sdk_publish_mistralai_sdk.yaml +++ b/.github/workflows/sdk_publish_mistralai_sdk.yaml @@ -14,7 +14,7 @@ permissions: - "*/RELEASES.md" jobs: publish: - uses: speakeasy-api/sdk-generation-action/.github/workflows/sdk-publish.yaml@a658ca0a4a9b11bbcd7d3fb4e3063fa843afabff # v15 + uses: speakeasy-api/sdk-generation-action/.github/workflows/sdk-publish.yaml@7951d9dce457425b900b2dd317253499d98c2587 # v15 secrets: github_access_token: ${{ secrets.GITHUB_TOKEN }} pypi_token: ${{ secrets.PYPI_TOKEN }} From a7783e307c6fb04a83612c601e21594535d8d218 Mon Sep 17 00:00:00 2001 From: speakeasybot Date: Sun, 1 Feb 2026 21:22:28 +0000 Subject: [PATCH 182/223] ## Python SDK Changes: * `mistral.beta.conversations.restart_stream()`: `request.agent_version` **Changed** **Breaking** :warning: * `mistral.beta.conversations.start()`: `request.agent_version` **Changed** **Breaking** :warning: * `mistral.beta.conversations.list()`: `response.[].[agent_conversation].agent_version` **Changed** **Breaking** :warning: * `mistral.beta.conversations.get()`: `response.[agent_conversation].agent_version` **Changed** **Breaking** :warning: * `mistral.beta.conversations.restart()`: `request.agent_version` **Changed** **Breaking** :warning: * `mistral.beta.conversations.start_stream()`: `request.agent_version` **Changed** **Breaking** :warning: * `mistral.beta.agents.get()`: `request.agent_version` **Changed** **Breaking** :warning: * `mistral.beta.agents.get_version()`: `request.version` **Changed** **Breaking** :warning: * `mistral.beta.agents.list_version_aliases()`: **Added** * `mistral.models.list()`: `response.data.[].[fine-tuned].capabilities.audio_transcription` **Added** * `mistral.models.retrieve()`: `response.[base].capabilities.audio_transcription` **Added** * `mistral.beta.agents.create_version_alias()`: **Added** * `mistral.files.list()`: `request.mimetypes` **Added** --- .speakeasy/gen.lock | 261 +++++++++--- .speakeasy/gen.yaml | 2 +- .speakeasy/workflow.lock | 12 +- README.md | 4 +- RELEASES.md | 12 +- docs/models/agentaliasresponse.md | 11 + docs/models/agentconversation.md | 22 +- docs/models/agentconversationagentversion.md | 17 + ...tsapiv1agentscreateorupdatealiasrequest.md | 10 + docs/models/agentsapiv1agentsgetrequest.md | 8 +- .../agentsapiv1agentsgetversionrequest.md | 2 +- ...ntsapiv1agentslistversionaliasesrequest.md | 8 + docs/models/agentversion.md | 17 + docs/models/conversationrequest.md | 2 +- docs/models/conversationrestartrequest.md | 2 +- .../conversationrestartrequestagentversion.md | 19 + .../conversationrestartstreamrequest.md | 2 +- ...rsationrestartstreamrequestagentversion.md | 19 + docs/models/conversationstreamrequest.md | 2 +- .../conversationstreamrequestagentversion.md | 17 + docs/models/filesapirouteslistfilesrequest.md | 3 +- docs/models/message.md | 19 + docs/models/mistralpromptmode.md | 4 + docs/models/modelcapabilities.md | 23 +- docs/models/queryparamagentversion.md | 17 + docs/models/realtimetranscriptionerror.md | 9 + .../realtimetranscriptionerrordetail.md | 9 + docs/models/realtimetranscriptionsession.md | 10 + .../realtimetranscriptionsessioncreated.md | 9 + .../realtimetranscriptionsessionupdated.md | 9 + docs/sdks/conversations/README.md | 8 +- docs/sdks/files/README.md | 1 + docs/sdks/mistralagents/README.md | 104 ++++- src/mistralai/_version.py | 4 +- src/mistralai/conversations.py | 56 ++- src/mistralai/files.py | 6 + src/mistralai/mistral_agents.py | 399 +++++++++++++++++- src/mistralai/models/__init__.py | 103 +++++ src/mistralai/models/agentaliasresponse.py | 23 + src/mistralai/models/agentconversation.py | 18 +- ..._api_v1_agents_create_or_update_aliasop.py | 26 ++ .../agents_api_v1_agents_get_versionop.py | 4 +- .../models/agents_api_v1_agents_getop.py | 15 +- ...ts_api_v1_agents_list_version_aliasesop.py | 16 + src/mistralai/models/conversationrequest.py | 10 +- .../models/conversationrestartrequest.py | 22 +- .../conversationrestartstreamrequest.py | 24 +- .../models/conversationstreamrequest.py | 14 +- .../models/files_api_routes_list_filesop.py | 9 +- src/mistralai/models/mistralpromptmode.py | 4 + src/mistralai/models/modelcapabilities.py | 3 + .../models/realtimetranscriptionerror.py | 27 ++ .../realtimetranscriptionerrordetail.py | 29 ++ .../models/realtimetranscriptionsession.py | 20 + .../realtimetranscriptionsessioncreated.py | 30 ++ .../realtimetranscriptionsessionupdated.py | 30 ++ uv.lock | 2 +- 57 files changed, 1405 insertions(+), 163 deletions(-) create mode 100644 docs/models/agentaliasresponse.md create mode 100644 docs/models/agentconversationagentversion.md create mode 100644 docs/models/agentsapiv1agentscreateorupdatealiasrequest.md create mode 100644 docs/models/agentsapiv1agentslistversionaliasesrequest.md create mode 100644 docs/models/agentversion.md create mode 100644 docs/models/conversationrestartrequestagentversion.md create mode 100644 docs/models/conversationrestartstreamrequestagentversion.md create mode 100644 docs/models/conversationstreamrequestagentversion.md create mode 100644 docs/models/message.md create mode 100644 docs/models/queryparamagentversion.md create mode 100644 docs/models/realtimetranscriptionerror.md create mode 100644 docs/models/realtimetranscriptionerrordetail.md create mode 100644 docs/models/realtimetranscriptionsession.md create mode 100644 docs/models/realtimetranscriptionsessioncreated.md create mode 100644 docs/models/realtimetranscriptionsessionupdated.md create mode 100644 src/mistralai/models/agentaliasresponse.py create mode 100644 src/mistralai/models/agents_api_v1_agents_create_or_update_aliasop.py create mode 100644 src/mistralai/models/agents_api_v1_agents_list_version_aliasesop.py create mode 100644 src/mistralai/models/realtimetranscriptionerror.py create mode 100644 src/mistralai/models/realtimetranscriptionerrordetail.py create mode 100644 src/mistralai/models/realtimetranscriptionsession.py create mode 100644 src/mistralai/models/realtimetranscriptionsessioncreated.py create mode 100644 src/mistralai/models/realtimetranscriptionsessionupdated.py diff --git a/.speakeasy/gen.lock b/.speakeasy/gen.lock index b89ea6b3..9d51b30a 100644 --- a/.speakeasy/gen.lock +++ b/.speakeasy/gen.lock @@ -1,19 +1,19 @@ lockVersion: 2.0.0 id: 2d045ec7-2ebb-4f4d-ad25-40953b132161 management: - docChecksum: a61cb56fd9942dd20124e0422444bac3 + docChecksum: cc385dce976ac06e6d062e992f0ee380 docVersion: 1.0.0 speakeasyVersion: 1.685.0 generationVersion: 2.794.1 - releaseVersion: 1.11.1 - configChecksum: 1a6d0af8e5d88c97b7e947763e633c3d + releaseVersion: 1.12.0 + configChecksum: 862d9a8667674972c091f9db84d42ba0 repoURL: https://github.com/mistralai/client-python.git installationURL: https://github.com/mistralai/client-python.git published: true persistentEdits: - generation_id: 031e6fcc-162d-451f-a98c-f65bf3605643 - pristine_commit_hash: 08ac7141d4e4dffd4a3327da51bd2a70d50ff68f - pristine_tree_hash: aeb852eedd1ebeb4411a5c0f286d53884362af3b + generation_id: 8b0735b6-5924-48f1-ade2-47cb374c76bc + pristine_commit_hash: a9971b936f50486e2e4ceef95d0b2c4708633219 + pristine_tree_hash: 51b8a57de0bf62da607fe0023eec1124458ebee9 features: python: additionalDependencies: 1.0.0 @@ -63,10 +63,18 @@ trackedFiles: id: ffdbb4c53c87 last_write_checksum: sha1:ec6c799658040b3c75d6ae0572bb391c6aea3fd4 pristine_git_object: ee054dd349848eff144d7064c319c3c8434bdc6c + docs/models/agentaliasresponse.md: + id: 5ac4721d8947 + last_write_checksum: sha1:15dcc6820e89d2c6bb799e331463419ce29ec167 + pristine_git_object: aa531ec5d1464f95e3938f148c1e88efc30fa6a6 docs/models/agentconversation.md: id: 3590c1a566fa - last_write_checksum: sha1:a88c8e10a9de2bc99cabd38ab9fc775a2d33e9ef - pristine_git_object: 92fd673c0710889ae3f1d77f82c32113f39457b7 + last_write_checksum: sha1:264d78815c3999bac377ab3f8c08a264178baf43 + pristine_git_object: a2d617316f1965acfabf7d2fe74334de16213829 + docs/models/agentconversationagentversion.md: + id: 468e0d1614bb + last_write_checksum: sha1:6e60bf4a18d791d694e90c89bdb8cc38e43c324b + pristine_git_object: 668a8dc0f0c51a231a73aed51b2db13de243a038 docs/models/agentconversationobject.md: id: cfd35d9dd4f2 last_write_checksum: sha1:112552d4a241967cf0a7dcb981428e7e0715dc34 @@ -111,22 +119,30 @@ trackedFiles: id: ed24a6d647a0 last_write_checksum: sha1:ff5dfde6cc19f09c83afb5b4f0f103096df6691d pristine_git_object: 70e143b030d3041c7538ecdacb8f5f9f8d1b5c92 + docs/models/agentsapiv1agentscreateorupdatealiasrequest.md: + id: c09ec9946094 + last_write_checksum: sha1:0883217b4bad21f5d4f8162ca72005bf9105a93f + pristine_git_object: 79406434cc6ff3d1485089f35639d6284f66d6cb docs/models/agentsapiv1agentsdeleterequest.md: id: 0faaaa59add9 last_write_checksum: sha1:2a34269e682bb910b83814b4d730ba2ce07f8cb2 pristine_git_object: 2799f41817ab0f7a22b49b4ff895c8308525953c docs/models/agentsapiv1agentsgetrequest.md: id: 01740ae62cff - last_write_checksum: sha1:0ed4bb58c94493e21826b38d33c2498de9150b98 - pristine_git_object: 825e03a02e14d03ce47022df840c118de8cd921f + last_write_checksum: sha1:9c4f6d88f29c39238757547da605ecb7106e76c2 + pristine_git_object: c71d4419afd3b51713e154b8021d4fe2b49d8af5 docs/models/agentsapiv1agentsgetversionrequest.md: id: 88ed22b85cde - last_write_checksum: sha1:c6706d79c9253829cf4285c99d49873fa56596bf - pristine_git_object: 7617d2748c86f537bf125d90e67f41df71c1e5cd + last_write_checksum: sha1:0ef23807c8efa2662144da66745045abdd2cb60a + pristine_git_object: 96a7358943a69e871a2bb7f0f30d6fe2bb8dff3d docs/models/agentsapiv1agentslistrequest.md: id: c2720c209527 last_write_checksum: sha1:cb599d1583ee9374d44695f5ee7efe79dbb8a503 pristine_git_object: 8cba13253d42a180b06eab8c10297ef362fb434d + docs/models/agentsapiv1agentslistversionaliasesrequest.md: + id: 69c8bce2c017 + last_write_checksum: sha1:4083fc80627b2cc04fd271df21393944730ef1ba + pristine_git_object: 3083bf92641404738948cd57306eac978b701551 docs/models/agentsapiv1agentslistversionsrequest.md: id: 0bc44ed8d6bb last_write_checksum: sha1:315790552fc5b2b3a6c4f7be2eb33100133abe18 @@ -223,6 +239,10 @@ trackedFiles: id: a39223b88fc9 last_write_checksum: sha1:925ef5852c2031c9bf2608577e55edbc36708730 pristine_git_object: 1752ee6861d23c6abaa6b748f4ff43e9545505ec + docs/models/agentversion.md: + id: b0aa02d6c085 + last_write_checksum: sha1:f6fcf351de43eed5345f88f5cb6a2bf928a594d9 + pristine_git_object: fd4b6a3ea4ade6c9f62594b377c8e791a50211e7 docs/models/apiendpoint.md: id: be613fd9b947 last_write_checksum: sha1:4d984c11248f7da42c949164e69b53995d5942c4 @@ -589,8 +609,8 @@ trackedFiles: pristine_git_object: db3a441bde0d086bccda4814ddfbf737539681a6 docs/models/conversationrequest.md: id: dd7f4d6807f2 - last_write_checksum: sha1:4ecca434753494ff0af66952655af92293690702 - pristine_git_object: 04378ae34c754f2ed67a34d14923c7b0d1605d4e + last_write_checksum: sha1:33dec32dbf20979ac04763e99a82e90ee474fef4 + pristine_git_object: 2b4ff8ef3398561d9b3e192a51ec22f64880389c docs/models/conversationresponse.md: id: 2eccf42d48af last_write_checksum: sha1:69059d02d5354897d23c9d9654d38a85c7e0afc6 @@ -601,24 +621,36 @@ trackedFiles: pristine_git_object: bea66e5277feca4358dd6447959ca945eff2171a docs/models/conversationrestartrequest.md: id: 558e9daa00bd - last_write_checksum: sha1:97c25a370411e1bce144c61272ca8f32066112be - pristine_git_object: f389a1e5c42cf0f73784d5563eaa6d0b29e0d69e + last_write_checksum: sha1:0e33f56f69313b9111b3394ecca693871d48acfa + pristine_git_object: d98653127fd522e35323b310d2342ccc08927962 + docs/models/conversationrestartrequestagentversion.md: + id: e6ea289c6b23 + last_write_checksum: sha1:a5abf95a81b7e080bd3cadf65c2db38ca458573f + pristine_git_object: 019ba301411729ec2c8078404adae998b3b9dacd docs/models/conversationrestartrequesthandoffexecution.md: id: faee86c7832c last_write_checksum: sha1:44728be55e96193e6f433e2f46f8f749f1671097 pristine_git_object: 5790624b82ce47ea99e5c25c825fbc25145bfb8e docs/models/conversationrestartstreamrequest.md: id: 01b92ab1b56d - last_write_checksum: sha1:90f0ab9aba1919cbc2b9cfc8e5ec9d80f8f3910c - pristine_git_object: d7358dc20b2b60cb287b3c4a1c174a7883871a54 + last_write_checksum: sha1:aa3d30800417e04f741324d60529f3190ea9cd16 + pristine_git_object: a5f8cbe73ed1ce28c82d76f0e9f933bda64f733c + docs/models/conversationrestartstreamrequestagentversion.md: + id: 395265f34ff6 + last_write_checksum: sha1:ebf4e89a478ab40e1f8cd3f9a000e179426bda47 + pristine_git_object: 9e0063003f1d8acce61cf4edda91ddbc23a3c69d docs/models/conversationrestartstreamrequesthandoffexecution.md: id: 3e9c4a9ab94d last_write_checksum: sha1:300e197f11ad5efc654b51198b75049890258eef pristine_git_object: 97266b43444f5ed50eeedf574abd99cb201199fd docs/models/conversationstreamrequest.md: id: 833f266c4f96 - last_write_checksum: sha1:b7196c9194bc5167d35d09774a3f26bc7d543790 - pristine_git_object: e403db68e7932f60b1343d9282e2c110414486ce + last_write_checksum: sha1:8d7400dcdb9525c2e45bdaa495df6ca7dcf7f992 + pristine_git_object: 299346f8aaa8ccddcbf7fd083389b74346ef2d4f + docs/models/conversationstreamrequestagentversion.md: + id: e99ccc842929 + last_write_checksum: sha1:0ba5fca217681cdc5e08e0d82db67884bed076a6 + pristine_git_object: 52ee96720abbb3fec822d0792dbde7020f9fb189 docs/models/conversationstreamrequesthandoffexecution.md: id: e6701e5f9f0c last_write_checksum: sha1:ef2ebe8f23f27144e7403f0a522326a7e4f25f50 @@ -745,8 +777,8 @@ trackedFiles: pristine_git_object: dbe3c801003c7bb8616f0c5be2dac2ab1e7e9fb1 docs/models/filesapirouteslistfilesrequest.md: id: 04bdf7c654bd - last_write_checksum: sha1:258317fd5c0738cff883f31e13393ac64f817a6f - pristine_git_object: 3801a96e19f149a665bde4890e26df54d7f07d77 + last_write_checksum: sha1:0a99755150c2ded8e5d59a96527021d29326b980 + pristine_git_object: 57d11722f1dba2640df97c22be2a91317c240608 docs/models/filesapiroutesretrievefilerequest.md: id: 2783bfd9c4b9 last_write_checksum: sha1:a1249ef0aedb3056e613078488832c96b91f8cab @@ -1151,6 +1183,10 @@ trackedFiles: id: b071d5a509cc last_write_checksum: sha1:09a04749333ab50ae806c3ac6adcaa90d54df0f1 pristine_git_object: d6094ac2c6e0326c039dad2f6b89158694ef6aa7 + docs/models/message.md: + id: a9614076792b + last_write_checksum: sha1:9199637b21212e630336d0d513c6b799732dee54 + pristine_git_object: 752f04a8b5ec3bedb0b5c3e4fbf3e5c3fccc07cd docs/models/messageentries.md: id: 9af3a27b862b last_write_checksum: sha1:a3eb6e37b780644313738f84e6c5ac653b4686bc @@ -1225,12 +1261,12 @@ trackedFiles: pristine_git_object: 3c552bac2fa3a5a3783db994d47d255a94643110 docs/models/mistralpromptmode.md: id: d17d5db4d3b6 - last_write_checksum: sha1:5ccd31d3804f70b6abb0e5a00bda57b9102225e3 - pristine_git_object: 7416e2037c507d19ac02aed914da1208a2fed0a1 + last_write_checksum: sha1:abcb7205c5086169c7d9449d15ac142448a7d258 + pristine_git_object: c3409d03b9646e21a3793372d06dcae6fef95463 docs/models/modelcapabilities.md: id: 283fbc5fa32f - last_write_checksum: sha1:69312b751771ae8ffa0d1452e3c6c545fdbf52b7 - pristine_git_object: 646c8e94fd208cbf01df19ad6c9707ad235bc59b + last_write_checksum: sha1:8a221e2334193907f84cf241ebaf6b86512bbd8b + pristine_git_object: c7dd2710011451c2db15f53ebc659770e786c4ca docs/models/modelconversation.md: id: 497521ee9bd6 last_write_checksum: sha1:bd11f51f1b6fedbf8a1e1973889d1961086c164f @@ -1319,10 +1355,34 @@ trackedFiles: id: 83c8c59c1802 last_write_checksum: sha1:046375bb3035cc033d4484099cd7f5a4f53ce88c pristine_git_object: 7b67583f4209778ac6f945631c0ee03ba1f4c663 + docs/models/queryparamagentversion.md: + id: 49d942f63049 + last_write_checksum: sha1:42557c6bf0afc1eabde48c4b6122f801608d8f05 + pristine_git_object: 3eb5ef1840299139bf969379cbfc3ed49127f176 docs/models/queryparamstatus.md: id: 15628120923d last_write_checksum: sha1:36f1c9b6a6af6f27fbf0190417abf95b4a0bc1b9 pristine_git_object: dcd2090861b16f72b0fb321714b4143bc14b7566 + docs/models/realtimetranscriptionerror.md: + id: 4bc5e819565b + last_write_checksum: sha1:c93e4b19a0aa68723ea69973a9f22a581c7b2ff6 + pristine_git_object: e01f2126b3084eade47a26ea092556f7f61142c9 + docs/models/realtimetranscriptionerrordetail.md: + id: ea137b1051f1 + last_write_checksum: sha1:43ae02b32b473d8ba1aaa3b336a40f706d6338d0 + pristine_git_object: 96420ada2ac94fca24a36ddacae9c876e14ccb7a + docs/models/realtimetranscriptionsession.md: + id: aeb0a0f87d6f + last_write_checksum: sha1:c3aa4050d9cc1b73df8496760f1c723d16183f3a + pristine_git_object: 94a0a89e8ca03866f8b09202a28c4e0f7c3af2e6 + docs/models/realtimetranscriptionsessioncreated.md: + id: aa2ae26192d6 + last_write_checksum: sha1:d13fec916d05300c86b52e951e81b1ceee230634 + pristine_git_object: 34e603fd0a1cbc8007eef06decb158213faebeed + docs/models/realtimetranscriptionsessionupdated.md: + id: 56ce3ae7e208 + last_write_checksum: sha1:833db566b2c8a6839b43cb4e760f2af53a2d7f57 + pristine_git_object: 7e2719957aae390ee18b699e61fbc7581242942f docs/models/referencechunk.md: id: 07895f9debfd last_write_checksum: sha1:97d01dd2b907e87b58bebd9c950e1bef29747c89 @@ -1737,8 +1797,8 @@ trackedFiles: pristine_git_object: e76efb79d8b1353208b42619f4cc5b688ef5d561 docs/sdks/conversations/README.md: id: e22a9d2c5424 - last_write_checksum: sha1:b4e49eadaf5a3bb50f5c3a88a759bc529db2584f - pristine_git_object: c488848cc4c18a098deae8f02c0d4a86d1d898db + last_write_checksum: sha1:06b7381c76c258e2a2dca3764456105929d98315 + pristine_git_object: ca383176a8b349cbaa757690b3f7a2cefe22cb1a docs/sdks/documents/README.md: id: 9758e88a0a9d last_write_checksum: sha1:84791e86c3b9c15f8fd16d2a3df6bd3685023a69 @@ -1749,8 +1809,8 @@ trackedFiles: pristine_git_object: 4390b7bd999a75a608f324f685b2284a8fa277ec docs/sdks/files/README.md: id: e576d7a117f0 - last_write_checksum: sha1:88cd213e513854b8beee72b8ea751f74bf32a845 - pristine_git_object: f0dfd59364c06e84d9cce517594a2912e2b724c8 + last_write_checksum: sha1:99d15a4acce49d5eca853b5a08fd81e76581dc52 + pristine_git_object: 57b53fc75208f4f6361636690b91564148448633 docs/sdks/fim/README.md: id: 499b227bf6ca last_write_checksum: sha1:824f7d1b58ff0b650367737c0e9b91a9d2d14a45 @@ -1765,8 +1825,8 @@ trackedFiles: pristine_git_object: e672c190ad6ac4623f99357d7e59d52f6722518f docs/sdks/mistralagents/README.md: id: 20b3478ad16d - last_write_checksum: sha1:73c444aaf6e547439dafb8d099142fd0059fdf4f - pristine_git_object: 8021fa07d58f71765097d1b3cea7ac4a2d6224a1 + last_write_checksum: sha1:c4e73cd96136392d01b0ce2a57bf0854d05688c0 + pristine_git_object: bdd8d588d88f4929c3b33bcecd72bbb5fce7402d docs/sdks/mistraljobs/README.md: id: 71aafa44d228 last_write_checksum: sha1:255a4221b3b61ef247b39c9723a78408cda486d3 @@ -1809,8 +1869,8 @@ trackedFiles: pristine_git_object: 6d0f3e1166cb0271f89f5ba83441c88199d7a432 src/mistralai/_version.py: id: 37b53ba66d7f - last_write_checksum: sha1:c4d3183c7342cd3d37f1a2fb2a707b2cb76cafec - pristine_git_object: aae7598df33f9fc79d17c1cb19baf2b61539e9db + last_write_checksum: sha1:a4d76992b028e2d138e2f7f6d3087c2a606a21c7 + pristine_git_object: 6ee91593a9fbcd6c53eae810c1c2d0120f56262e src/mistralai/accesses.py: id: 98cb4addd052 last_write_checksum: sha1:5d9d495274d67b1343ba99d755c1c01c64c2ead1 @@ -1845,8 +1905,8 @@ trackedFiles: pristine_git_object: 7c32506ec03cc0fd88b786ff49d7690fd4283d2a src/mistralai/conversations.py: id: be58e57a6198 - last_write_checksum: sha1:76169b9954e645c9d7260b4d9e08be87de7ec643 - pristine_git_object: 93ed8c281a2f44e19f833309ec67b5f35cab1b53 + last_write_checksum: sha1:b9287bbe777a042b8258494cd5162d32e6a89c20 + pristine_git_object: 194cb4c0a629654b31bbcce8391baf48601d0eb7 src/mistralai/documents.py: id: 1945602083a8 last_write_checksum: sha1:14d1e6b5a95869d70a6fc89b07d5365c98aff5d7 @@ -1857,8 +1917,8 @@ trackedFiles: pristine_git_object: 7430f8042df4fec517288d0ddb0eb174e7e43a8e src/mistralai/files.py: id: 0e29db0e2269 - last_write_checksum: sha1:e4f833d390f1b3b682f073a76ffb6e29f89c55d1 - pristine_git_object: ab2c75a2f6774a99fe67ac5d3b0fa6544d093181 + last_write_checksum: sha1:d79d5b1785f441a46673a7efa108ddb98c44376a + pristine_git_object: 90ada0ff707521d59d329bebac74005eb68488d8 src/mistralai/fim.py: id: 71a865142baf last_write_checksum: sha1:7accf79c11a17fefbacde7f2b0f966f3716233df @@ -1881,24 +1941,28 @@ trackedFiles: pristine_git_object: 32648937feb79adf6155423cbe9bac4d7fe52224 src/mistralai/mistral_agents.py: id: 671c4985aaa1 - last_write_checksum: sha1:01d02e6ea96903bf0b9893d24115a154e078096d - pristine_git_object: e4abf6e4cba4cfedbe1d6bd93292318f641d49d0 + last_write_checksum: sha1:1fe4fb4f2828b532ac3ddf3b72e748a53d5099e9 + pristine_git_object: 7fb0ce259cb1c1a3847c567bdc992c176489add6 src/mistralai/mistral_jobs.py: id: 18065a449da0 last_write_checksum: sha1:fb205d962444f6aba163ecd3169c12489b3f0cc9 pristine_git_object: d1aeec8a014b22e44f4fe5e751206c3648e875af src/mistralai/models/__init__.py: id: 3228134f03e5 - last_write_checksum: sha1:0e6ec6d05cfd56d49d761a68e4f42f550695aa81 - pristine_git_object: c35b3d24abc3863d88e40b8d9e8bd2c1a35a4541 + last_write_checksum: sha1:cb1fb02e33b85bf82db7d6fd15b2cc3b109c5060 + pristine_git_object: 23e652220f29a882748661a8c0d21aa2830471bf src/mistralai/models/agent.py: id: ca4162a131b1 last_write_checksum: sha1:fe8a7c8c9c4ba59613d7d89f0c2e7a6958e25f85 pristine_git_object: eb30905b3de2b69ece35bdd40f390b2fa6ffc5a8 + src/mistralai/models/agentaliasresponse.py: + id: d329dd68429e + last_write_checksum: sha1:a3ebf39f159f7cd63dbabd9ff2c79df97e43e41f + pristine_git_object: c0928da9c65c588c515f3f1668ccfb69d3a23861 src/mistralai/models/agentconversation.py: id: bd3035451c40 - last_write_checksum: sha1:2e4a6a5ae0da2e9ccbb588c8487b48077d561d93 - pristine_git_object: 625fb4fc6697860060dfdeb449986d89efc232d6 + last_write_checksum: sha1:724a256f4914116500fd962df4b3cfc79ea75c43 + pristine_git_object: 6007b5715fd4a463d25a244b716effafbeecace6 src/mistralai/models/agentcreationrequest.py: id: 87f33bd9ea58 last_write_checksum: sha1:a6885376d36a5a17273d8d8d8d45e3d6c3ee1b9f @@ -1915,18 +1979,26 @@ trackedFiles: id: ce8e306fa522 last_write_checksum: sha1:2b5bac2f628c0e7cdd6df73404f69f5d405e576c pristine_git_object: 11bfa918903f8de96f98f722eaaf9a70b4fca8c1 + src/mistralai/models/agents_api_v1_agents_create_or_update_aliasop.py: + id: dd0e03fda847 + last_write_checksum: sha1:a0dd39bb4b0af3a15b1aa8427a6f07d1826c04dc + pristine_git_object: 6cf9d0e0644ce0afd5f673f18fdda9dcccb5f04c src/mistralai/models/agents_api_v1_agents_deleteop.py: id: 588791d168a1 last_write_checksum: sha1:2dae37c3b9778d688663550b9803d52111577f3e pristine_git_object: 38e04953cc320f503a2f6e77096985da60896f2a src/mistralai/models/agents_api_v1_agents_get_versionop.py: id: bdb81ef0e35a - last_write_checksum: sha1:dab21f6fae05e2794208baf3b4e43feeeaf9b3bd - pristine_git_object: 4463d3b25aedad4f3b96a9fb7174a598c843939f + last_write_checksum: sha1:372da3794afd45d442d56edd3ec3cc4907f88223 + pristine_git_object: fddb10dde6707b6641b035e372270991d349f4f3 src/mistralai/models/agents_api_v1_agents_getop.py: id: 2358eceee519 - last_write_checksum: sha1:362d0c781b2c79d829f6e4901e558aaca937b105 - pristine_git_object: dced6dbb49c31fe2981cbd3865c0d580082a1ade + last_write_checksum: sha1:dca59474f75a6636ecac8265cab1bb51d36df56a + pristine_git_object: 2b7d89a5b34f3e768a18f9edbdf712fbcf5c20e4 + src/mistralai/models/agents_api_v1_agents_list_version_aliasesop.py: + id: 51215b825530 + last_write_checksum: sha1:d24f8eff3bd19414c0a04e474b33e1c63861a1da + pristine_git_object: 650a7187a3ac419069440fe040a166a036835b37 src/mistralai/models/agents_api_v1_agents_list_versionsop.py: id: 5f680df288a9 last_write_checksum: sha1:a236170f366d9701346b57f9ee4c788a9a2293e5 @@ -2197,24 +2269,24 @@ trackedFiles: pristine_git_object: 32ca9c20cb37ff65f7e9b126650a78a4b97e4b56 src/mistralai/models/conversationrequest.py: id: ceffcc288c2d - last_write_checksum: sha1:32e7b41c01d2d7accccb1f79248b9e1c56c816f3 - pristine_git_object: 09d934ed3db66ecbd5ab8e3406c3ffb8a1c3c606 + last_write_checksum: sha1:c4c62ef9cdf9bb08463bcb12919abd98ceb8d344 + pristine_git_object: 80581cc10a8e7555546e38c8b7068a2744eb552b src/mistralai/models/conversationresponse.py: id: 016ec02abd32 last_write_checksum: sha1:37c3f143b83939b369fe8637932974d163da3c37 pristine_git_object: ff318e35ee63e43c64e504301236327374442a16 src/mistralai/models/conversationrestartrequest.py: id: 2a8207f159f5 - last_write_checksum: sha1:8f53b5faba0b19d8fdf22388c72eb2580ee121f6 - pristine_git_object: a9c8410c7b1010780bf1d98b1580453aeef07509 + last_write_checksum: sha1:93cd4370afe6a06b375e0e54ca09225e02fc42d3 + pristine_git_object: 6f21d01267481b8b47d4d37609ac131c34c10a9b src/mistralai/models/conversationrestartstreamrequest.py: id: d98d3e0c8eed - last_write_checksum: sha1:cba039d9276869be283d83218659f4bf7537b958 - pristine_git_object: 0703bb5fe6566ff15677e5f604537ab9ae2b79bd + last_write_checksum: sha1:90f295ce27ba55d58899e06a29af223a464f5a4c + pristine_git_object: 2cec7958ab31378d480f0f93a5ed75ac8c624442 src/mistralai/models/conversationstreamrequest.py: id: f7051f125d44 - last_write_checksum: sha1:7ce5ab24500754f4c4f36fd07934fe992d7bbb2e - pristine_git_object: 6ff56e1786e7342284bac0fb4b669806cee55c0f + last_write_checksum: sha1:12bc85a14f110f5c8a3149540668bea178995fae + pristine_git_object: 1a481b77f706db7101521756c7c3476eaa1918c5 src/mistralai/models/conversationusageinfo.py: id: 922894aa994b last_write_checksum: sha1:0e0039421d7291ecbbf820ea843031c50371dd9e @@ -2309,8 +2381,8 @@ trackedFiles: pristine_git_object: 708d40ab993f93227b9795c745383ab954c1c89c src/mistralai/models/files_api_routes_list_filesop.py: id: 865dd74c577c - last_write_checksum: sha1:df0af95515546660ec9ff343c17f0b2dfe8b0375 - pristine_git_object: 9b9422b405ba967d7f6ed84196fe8e1dc9c5d95f + last_write_checksum: sha1:d75afa1ee7e34cbcfb8da78e3b5c9384b684b89b + pristine_git_object: 84d61b9b4d7032a60e3055b683a396e53b625274 src/mistralai/models/files_api_routes_retrieve_fileop.py: id: d821f72ee198 last_write_checksum: sha1:d0d07123fd941bb99a00a36e87bc7ab4c21506a6 @@ -2613,12 +2685,12 @@ trackedFiles: pristine_git_object: 28cfd22dc3d567aa4ae55cc19ad89341fa9c96a1 src/mistralai/models/mistralpromptmode.py: id: b2580604c1fe - last_write_checksum: sha1:1ac4d9fb8fbf0b21958be5483a569da7f1f49ff0 - pristine_git_object: ee82fb6d056e2d9699628698750e68b4ab6ef851 + last_write_checksum: sha1:71cf04622681998b091f51e4157463109761333f + pristine_git_object: dfb6f2d2a76fd2749d91397752a38b333bae8b02 src/mistralai/models/modelcapabilities.py: id: a9589b97b15c - last_write_checksum: sha1:d7a7d530750418a54a5fc1698d855df7a519a45c - pristine_git_object: 4b5d5da7da9573f998e977e8a14a9b8f8cbf4f55 + last_write_checksum: sha1:56ea040fb631f0825e9ce2c7b32de2c90f6923a1 + pristine_git_object: 6edf8e5bf238b91a245db3489f09ae24506103f3 src/mistralai/models/modelconversation.py: id: 7d8b7b8d62a8 last_write_checksum: sha1:b76cc407f807c19c1ff5602f7dd1d0421db2486d @@ -2683,6 +2755,26 @@ trackedFiles: id: 54d1c125ef83 last_write_checksum: sha1:475749250ada2566c5a5d769eda1d350ddd8be8f pristine_git_object: e67bfa865dcf94656a67f8612a5420f8b43cc0ec + src/mistralai/models/realtimetranscriptionerror.py: + id: f869fd6faf74 + last_write_checksum: sha1:17f78beea9e1821eed90c8a2412aadf953e17774 + pristine_git_object: 0785f7001aeaba7904120a62d569a35b7ee88a80 + src/mistralai/models/realtimetranscriptionerrordetail.py: + id: d106a319e66b + last_write_checksum: sha1:16e0fea1a3be85dfea6f2c44a53a15a3dc322b4c + pristine_git_object: cb5d73f861ce053a17b66695d2b56bafe1eeb03e + src/mistralai/models/realtimetranscriptionsession.py: + id: 48c7076e6ede + last_write_checksum: sha1:ae722fc946adf7282fd79c3a2c80fb53acc70ef2 + pristine_git_object: bcd0cfe37600b80e59cd50bd0edac3444be34fdb + src/mistralai/models/realtimetranscriptionsessioncreated.py: + id: 24825bcd61b2 + last_write_checksum: sha1:81f840757637e678c4512765ba8fda060f5af8cb + pristine_git_object: 9a2c2860d1538f03e795c62754244131820e2d44 + src/mistralai/models/realtimetranscriptionsessionupdated.py: + id: 5575fb5d1980 + last_write_checksum: sha1:a2d8d5947ba6b46dcd9a0a1e377067dbb92bfdf1 + pristine_git_object: ad1b513364f5d8d2f92fbc012509bf7567fa4573 src/mistralai/models/referencechunk.py: id: 6cdbb4e60749 last_write_checksum: sha1:48a4dddda06aadd16f6ea34c58848430bd561432 @@ -3022,7 +3114,7 @@ examples: model_id: "ft:open-mistral-7b:587a6b29:20240514:7e773925" responses: "200": - application/json: {"id": "", "object": "model", "owned_by": "mistralai", "capabilities": {"completion_chat": true, "function_calling": true, "completion_fim": false, "fine_tuning": false, "vision": false, "ocr": false, "classification": false, "moderation": false, "audio": false}, "max_context_length": 32768, "type": "fine-tuned", "job": "Product Markets Facilitator", "root": "", "archived": false} + application/json: {"id": "", "object": "model", "owned_by": "mistralai", "capabilities": {"completion_chat": true, "function_calling": true, "completion_fim": false, "fine_tuning": false, "vision": false, "ocr": false, "classification": false, "moderation": false, "audio": false, "audio_transcription": false}, "max_context_length": 32768, "type": "fine-tuned", "job": "Product Markets Facilitator", "root": "", "archived": false} "422": application/json: {} userExample: @@ -3031,7 +3123,7 @@ examples: model_id: "ft:open-mistral-7b:587a6b29:20240514:7e773925" responses: "200": - application/json: {"id": "", "object": "model", "owned_by": "mistralai", "capabilities": {"completion_chat": false, "function_calling": false, "completion_fim": false, "fine_tuning": false, "vision": false, "ocr": false, "classification": false, "moderation": false, "audio": false}, "max_context_length": 32768, "type": "fine-tuned", "job": "Product Markets Facilitator", "root": "", "archived": false} + application/json: {"id": "", "object": "model", "owned_by": "mistralai", "capabilities": {"completion_chat": false, "function_calling": false, "completion_fim": false, "fine_tuning": false, "vision": false, "ocr": false, "classification": false, "moderation": false, "audio": false, "audio_transcription": false}, "max_context_length": 32768, "type": "fine-tuned", "job": "Product Markets Facilitator", "root": "", "archived": false} delete_model_v1_models__model_id__delete: speakeasy-default-delete-model-v1-models-model-id-delete: parameters: @@ -3774,19 +3866,52 @@ examples: parameters: path: agent_id: "" - version: 788393 + version: "788393" responses: "200": application/json: {"completion_args": {"response_format": {"type": "text"}}, "model": "Model 3", "name": "", "object": "agent", "id": "", "version": 377706, "versions": [658369, 642981], "created_at": "2024-10-02T23:01:15.980Z", "updated_at": "2026-12-22T00:55:26.568Z", "deployment_chat": false, "source": ""} "422": application/json: {} + agents_api_v1_agents_create_or_update_alias: + speakeasy-default-agents-api-v1-agents-create-or-update-alias: + parameters: + path: + agent_id: "" + query: + alias: "" + version: 595141 + responses: + "200": + application/json: {"alias": "", "version": 768764, "created_at": "2026-12-28T00:40:21.715Z", "updated_at": "2025-09-01T12:54:58.254Z"} + "422": + application/json: {} + agents_api_v1_agents_list_version_aliases: + speakeasy-default-agents-api-v1-agents-list-version-aliases: + parameters: + path: + agent_id: "" + responses: + "200": + application/json: [{"alias": "", "version": 318290, "created_at": "2025-10-02T20:25:32.322Z", "updated_at": "2026-11-19T02:58:37.894Z"}] + "422": + application/json: {} examplesVersion: 1.0.2 generatedTests: {} releaseNotes: | ## Python SDK Changes: - * `mistral.beta.agents.list_versions()`: **Added** - * `mistral.beta.agents.get_version()`: **Added** - * `mistral.ocr.process()`: `request.document_annotation_prompt` **Added** + * `mistral.beta.conversations.restart_stream()`: `request.agent_version` **Changed** **Breaking** :warning: + * `mistral.beta.conversations.start()`: `request.agent_version` **Changed** **Breaking** :warning: + * `mistral.beta.conversations.list()`: `response.[].[agent_conversation].agent_version` **Changed** **Breaking** :warning: + * `mistral.beta.conversations.get()`: `response.[agent_conversation].agent_version` **Changed** **Breaking** :warning: + * `mistral.beta.conversations.restart()`: `request.agent_version` **Changed** **Breaking** :warning: + * `mistral.beta.conversations.start_stream()`: `request.agent_version` **Changed** **Breaking** :warning: + * `mistral.beta.agents.get()`: `request.agent_version` **Changed** **Breaking** :warning: + * `mistral.beta.agents.get_version()`: `request.version` **Changed** **Breaking** :warning: + * `mistral.beta.agents.list_version_aliases()`: **Added** + * `mistral.models.list()`: `response.data.[].[fine-tuned].capabilities.audio_transcription` **Added** + * `mistral.models.retrieve()`: `response.[base].capabilities.audio_transcription` **Added** + * `mistral.beta.agents.create_version_alias()`: **Added** + * `mistral.files.list()`: `request.mimetypes` **Added** generatedFiles: - .gitattributes - .vscode/settings.json diff --git a/.speakeasy/gen.yaml b/.speakeasy/gen.yaml index 4f9a9747..0cc6f059 100644 --- a/.speakeasy/gen.yaml +++ b/.speakeasy/gen.yaml @@ -26,7 +26,7 @@ generation: generateNewTests: false skipResponseBodyAssertions: false python: - version: 1.11.1 + version: 1.12.0 additionalDependencies: dev: pytest: ^8.2.2 diff --git a/.speakeasy/workflow.lock b/.speakeasy/workflow.lock index 89c966c7..3bb067a0 100644 --- a/.speakeasy/workflow.lock +++ b/.speakeasy/workflow.lock @@ -14,11 +14,11 @@ sources: - latest mistral-openapi: sourceNamespace: mistral-openapi - sourceRevisionDigest: sha256:fd94dc1f574f3bb88a839543675b2c3b3aa895519ec2797efb143ead830ae982 - sourceBlobDigest: sha256:2dd0ee9d000907ffe699cdd48a18163b88297d0ce63f2cdc05efa35cee136bc0 + sourceRevisionDigest: sha256:56bcbb02148ddfabd64cb2dced1a9efe0f00d0fa106435fdd0fb2a889c1c6fed + sourceBlobDigest: sha256:c014d9220f14e04b573acf291c173954b8d34d03d852877a91756afb68ccc65b tags: - latest - - speakeasy-sdk-regen-1768506286 + - speakeasy-sdk-regen-1769979831 targets: mistralai-azure-sdk: source: mistral-azure-source @@ -37,10 +37,10 @@ targets: mistralai-sdk: source: mistral-openapi sourceNamespace: mistral-openapi - sourceRevisionDigest: sha256:fd94dc1f574f3bb88a839543675b2c3b3aa895519ec2797efb143ead830ae982 - sourceBlobDigest: sha256:2dd0ee9d000907ffe699cdd48a18163b88297d0ce63f2cdc05efa35cee136bc0 + sourceRevisionDigest: sha256:56bcbb02148ddfabd64cb2dced1a9efe0f00d0fa106435fdd0fb2a889c1c6fed + sourceBlobDigest: sha256:c014d9220f14e04b573acf291c173954b8d34d03d852877a91756afb68ccc65b codeSamplesNamespace: mistral-openapi-code-samples - codeSamplesRevisionDigest: sha256:881a74af90c5678411207a0a9b0e370496d44b18174e96ba7c6812d400788637 + codeSamplesRevisionDigest: sha256:feb7bf2f6fab8456316453c7e14eda6201fe8649fe0ffcdb1eaa4580cc66a51e workflow: workflowVersion: 1.0.0 speakeasyVersion: 1.685.0 diff --git a/README.md b/README.md index 2569d112..131ce557 100644 --- a/README.md +++ b/README.md @@ -475,6 +475,8 @@ The documentation for the GCP SDK is available [here](packages/mistralai_gcp/REA * [update_version](docs/sdks/mistralagents/README.md#update_version) - Update an agent version. * [list_versions](docs/sdks/mistralagents/README.md#list_versions) - List all versions of an agent. * [get_version](docs/sdks/mistralagents/README.md#get_version) - Retrieve a specific version of an agent. +* [create_version_alias](docs/sdks/mistralagents/README.md#create_version_alias) - Create or update an agent version alias. +* [list_version_aliases](docs/sdks/mistralagents/README.md#list_version_aliases) - List all aliases for an agent. ### [Beta.Conversations](docs/sdks/conversations/README.md) @@ -752,7 +754,7 @@ with Mistral( **Inherit from [`MistralError`](./src/mistralai/models/mistralerror.py)**: -* [`HTTPValidationError`](./src/mistralai/models/httpvalidationerror.py): Validation Error. Status code `422`. Applicable to 50 of 72 methods.* +* [`HTTPValidationError`](./src/mistralai/models/httpvalidationerror.py): Validation Error. Status code `422`. Applicable to 52 of 74 methods.* * [`ResponseValidationError`](./src/mistralai/models/responsevalidationerror.py): Type mismatch between the response data and the expected Pydantic model. Provides access to the Pydantic validation error via the `cause` attribute. diff --git a/RELEASES.md b/RELEASES.md index 984e9145..90f534ef 100644 --- a/RELEASES.md +++ b/RELEASES.md @@ -358,4 +358,14 @@ Based on: ### Generated - [python v1.11.1] . ### Releases -- [PyPI v1.11.1] https://pypi.org/project/mistralai/1.11.1 - . \ No newline at end of file +- [PyPI v1.11.1] https://pypi.org/project/mistralai/1.11.1 - . + +## 2026-02-01 21:20:42 +### Changes +Based on: +- OpenAPI Doc +- Speakeasy CLI 1.685.0 (2.794.1) https://github.com/speakeasy-api/speakeasy +### Generated +- [python v1.12.0] . +### Releases +- [PyPI v1.12.0] https://pypi.org/project/mistralai/1.12.0 - . \ No newline at end of file diff --git a/docs/models/agentaliasresponse.md b/docs/models/agentaliasresponse.md new file mode 100644 index 00000000..aa531ec5 --- /dev/null +++ b/docs/models/agentaliasresponse.md @@ -0,0 +1,11 @@ +# AgentAliasResponse + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | +| `alias` | *str* | :heavy_check_mark: | N/A | +| `version` | *int* | :heavy_check_mark: | N/A | +| `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_check_mark: | N/A | +| `updated_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/agentconversation.md b/docs/models/agentconversation.md index 92fd673c..a2d61731 100644 --- a/docs/models/agentconversation.md +++ b/docs/models/agentconversation.md @@ -3,14 +3,14 @@ ## Fields -| Field | Type | Required | Description | -| -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -| `name` | *OptionalNullable[str]* | :heavy_minus_sign: | Name given to the conversation. | -| `description` | *OptionalNullable[str]* | :heavy_minus_sign: | Description of the what the conversation is about. | -| `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | Custom metadata for the conversation. | -| `object` | [Optional[models.AgentConversationObject]](../models/agentconversationobject.md) | :heavy_minus_sign: | N/A | -| `id` | *str* | :heavy_check_mark: | N/A | -| `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_check_mark: | N/A | -| `updated_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_check_mark: | N/A | -| `agent_id` | *str* | :heavy_check_mark: | N/A | -| `agent_version` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| ---------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------- | +| `name` | *OptionalNullable[str]* | :heavy_minus_sign: | Name given to the conversation. | +| `description` | *OptionalNullable[str]* | :heavy_minus_sign: | Description of the what the conversation is about. | +| `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | Custom metadata for the conversation. | +| `object` | [Optional[models.AgentConversationObject]](../models/agentconversationobject.md) | :heavy_minus_sign: | N/A | +| `id` | *str* | :heavy_check_mark: | N/A | +| `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_check_mark: | N/A | +| `updated_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_check_mark: | N/A | +| `agent_id` | *str* | :heavy_check_mark: | N/A | +| `agent_version` | [OptionalNullable[models.AgentConversationAgentVersion]](../models/agentconversationagentversion.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/agentconversationagentversion.md b/docs/models/agentconversationagentversion.md new file mode 100644 index 00000000..668a8dc0 --- /dev/null +++ b/docs/models/agentconversationagentversion.md @@ -0,0 +1,17 @@ +# AgentConversationAgentVersion + + +## Supported Types + +### `str` + +```python +value: str = /* values here */ +``` + +### `int` + +```python +value: int = /* values here */ +``` + diff --git a/docs/models/agentsapiv1agentscreateorupdatealiasrequest.md b/docs/models/agentsapiv1agentscreateorupdatealiasrequest.md new file mode 100644 index 00000000..79406434 --- /dev/null +++ b/docs/models/agentsapiv1agentscreateorupdatealiasrequest.md @@ -0,0 +1,10 @@ +# AgentsAPIV1AgentsCreateOrUpdateAliasRequest + + +## Fields + +| Field | Type | Required | Description | +| ------------------ | ------------------ | ------------------ | ------------------ | +| `agent_id` | *str* | :heavy_check_mark: | N/A | +| `alias` | *str* | :heavy_check_mark: | N/A | +| `version` | *int* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/agentsapiv1agentsgetrequest.md b/docs/models/agentsapiv1agentsgetrequest.md index 825e03a0..c71d4419 100644 --- a/docs/models/agentsapiv1agentsgetrequest.md +++ b/docs/models/agentsapiv1agentsgetrequest.md @@ -3,7 +3,7 @@ ## Fields -| Field | Type | Required | Description | -| ----------------------- | ----------------------- | ----------------------- | ----------------------- | -| `agent_id` | *str* | :heavy_check_mark: | N/A | -| `agent_version` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| -------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------- | +| `agent_id` | *str* | :heavy_check_mark: | N/A | +| `agent_version` | [OptionalNullable[models.QueryParamAgentVersion]](../models/queryparamagentversion.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/agentsapiv1agentsgetversionrequest.md b/docs/models/agentsapiv1agentsgetversionrequest.md index 7617d274..96a73589 100644 --- a/docs/models/agentsapiv1agentsgetversionrequest.md +++ b/docs/models/agentsapiv1agentsgetversionrequest.md @@ -6,4 +6,4 @@ | Field | Type | Required | Description | | ------------------ | ------------------ | ------------------ | ------------------ | | `agent_id` | *str* | :heavy_check_mark: | N/A | -| `version` | *int* | :heavy_check_mark: | N/A | \ No newline at end of file +| `version` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/agentsapiv1agentslistversionaliasesrequest.md b/docs/models/agentsapiv1agentslistversionaliasesrequest.md new file mode 100644 index 00000000..3083bf92 --- /dev/null +++ b/docs/models/agentsapiv1agentslistversionaliasesrequest.md @@ -0,0 +1,8 @@ +# AgentsAPIV1AgentsListVersionAliasesRequest + + +## Fields + +| Field | Type | Required | Description | +| ------------------ | ------------------ | ------------------ | ------------------ | +| `agent_id` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/agentversion.md b/docs/models/agentversion.md new file mode 100644 index 00000000..fd4b6a3e --- /dev/null +++ b/docs/models/agentversion.md @@ -0,0 +1,17 @@ +# AgentVersion + + +## Supported Types + +### `str` + +```python +value: str = /* values here */ +``` + +### `int` + +```python +value: int = /* values here */ +``` + diff --git a/docs/models/conversationrequest.md b/docs/models/conversationrequest.md index 04378ae3..2b4ff8ef 100644 --- a/docs/models/conversationrequest.md +++ b/docs/models/conversationrequest.md @@ -16,5 +16,5 @@ | `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | | `agent_id` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `agent_version` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | +| `agent_version` | [OptionalNullable[models.AgentVersion]](../models/agentversion.md) | :heavy_minus_sign: | N/A | | `model` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/conversationrestartrequest.md b/docs/models/conversationrestartrequest.md index f389a1e5..d9865312 100644 --- a/docs/models/conversationrestartrequest.md +++ b/docs/models/conversationrestartrequest.md @@ -14,4 +14,4 @@ Request to restart a new conversation from a given entry in the conversation. | `completion_args` | [Optional[models.CompletionArgs]](../models/completionargs.md) | :heavy_minus_sign: | White-listed arguments from the completion API | | `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | Custom metadata for the conversation. | | `from_entry_id` | *str* | :heavy_check_mark: | N/A | -| `agent_version` | *OptionalNullable[int]* | :heavy_minus_sign: | Specific version of the agent to use when restarting. If not provided, uses the current version. | \ No newline at end of file +| `agent_version` | [OptionalNullable[models.ConversationRestartRequestAgentVersion]](../models/conversationrestartrequestagentversion.md) | :heavy_minus_sign: | Specific version of the agent to use when restarting. If not provided, uses the current version. | \ No newline at end of file diff --git a/docs/models/conversationrestartrequestagentversion.md b/docs/models/conversationrestartrequestagentversion.md new file mode 100644 index 00000000..019ba301 --- /dev/null +++ b/docs/models/conversationrestartrequestagentversion.md @@ -0,0 +1,19 @@ +# ConversationRestartRequestAgentVersion + +Specific version of the agent to use when restarting. If not provided, uses the current version. + + +## Supported Types + +### `str` + +```python +value: str = /* values here */ +``` + +### `int` + +```python +value: int = /* values here */ +``` + diff --git a/docs/models/conversationrestartstreamrequest.md b/docs/models/conversationrestartstreamrequest.md index d7358dc2..a5f8cbe7 100644 --- a/docs/models/conversationrestartstreamrequest.md +++ b/docs/models/conversationrestartstreamrequest.md @@ -14,4 +14,4 @@ Request to restart a new conversation from a given entry in the conversation. | `completion_args` | [Optional[models.CompletionArgs]](../models/completionargs.md) | :heavy_minus_sign: | White-listed arguments from the completion API | | `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | Custom metadata for the conversation. | | `from_entry_id` | *str* | :heavy_check_mark: | N/A | -| `agent_version` | *OptionalNullable[int]* | :heavy_minus_sign: | Specific version of the agent to use when restarting. If not provided, uses the current version. | \ No newline at end of file +| `agent_version` | [OptionalNullable[models.ConversationRestartStreamRequestAgentVersion]](../models/conversationrestartstreamrequestagentversion.md) | :heavy_minus_sign: | Specific version of the agent to use when restarting. If not provided, uses the current version. | \ No newline at end of file diff --git a/docs/models/conversationrestartstreamrequestagentversion.md b/docs/models/conversationrestartstreamrequestagentversion.md new file mode 100644 index 00000000..9e006300 --- /dev/null +++ b/docs/models/conversationrestartstreamrequestagentversion.md @@ -0,0 +1,19 @@ +# ConversationRestartStreamRequestAgentVersion + +Specific version of the agent to use when restarting. If not provided, uses the current version. + + +## Supported Types + +### `str` + +```python +value: str = /* values here */ +``` + +### `int` + +```python +value: int = /* values here */ +``` + diff --git a/docs/models/conversationstreamrequest.md b/docs/models/conversationstreamrequest.md index e403db68..299346f8 100644 --- a/docs/models/conversationstreamrequest.md +++ b/docs/models/conversationstreamrequest.md @@ -16,5 +16,5 @@ | `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | | `agent_id` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `agent_version` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | +| `agent_version` | [OptionalNullable[models.ConversationStreamRequestAgentVersion]](../models/conversationstreamrequestagentversion.md) | :heavy_minus_sign: | N/A | | `model` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/conversationstreamrequestagentversion.md b/docs/models/conversationstreamrequestagentversion.md new file mode 100644 index 00000000..52ee9672 --- /dev/null +++ b/docs/models/conversationstreamrequestagentversion.md @@ -0,0 +1,17 @@ +# ConversationStreamRequestAgentVersion + + +## Supported Types + +### `str` + +```python +value: str = /* values here */ +``` + +### `int` + +```python +value: int = /* values here */ +``` + diff --git a/docs/models/filesapirouteslistfilesrequest.md b/docs/models/filesapirouteslistfilesrequest.md index 3801a96e..57d11722 100644 --- a/docs/models/filesapirouteslistfilesrequest.md +++ b/docs/models/filesapirouteslistfilesrequest.md @@ -11,4 +11,5 @@ | `sample_type` | List[[models.SampleType](../models/sampletype.md)] | :heavy_minus_sign: | N/A | | `source` | List[[models.Source](../models/source.md)] | :heavy_minus_sign: | N/A | | `search` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `purpose` | [OptionalNullable[models.FilePurpose]](../models/filepurpose.md) | :heavy_minus_sign: | N/A | \ No newline at end of file +| `purpose` | [OptionalNullable[models.FilePurpose]](../models/filepurpose.md) | :heavy_minus_sign: | N/A | +| `mimetypes` | List[*str*] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/message.md b/docs/models/message.md new file mode 100644 index 00000000..752f04a8 --- /dev/null +++ b/docs/models/message.md @@ -0,0 +1,19 @@ +# Message + +Human-readable error message. + + +## Supported Types + +### `str` + +```python +value: str = /* values here */ +``` + +### `Dict[str, Any]` + +```python +value: Dict[str, Any] = /* values here */ +``` + diff --git a/docs/models/mistralpromptmode.md b/docs/models/mistralpromptmode.md index 7416e203..c3409d03 100644 --- a/docs/models/mistralpromptmode.md +++ b/docs/models/mistralpromptmode.md @@ -1,5 +1,9 @@ # MistralPromptMode +Available options to the prompt_mode argument on the chat completion endpoint. +Values represent high-level intent. Assignment to actual SPs is handled internally. +System prompt may include knowledge cutoff date, model capabilities, tone to use, safety guidelines, etc. + ## Values diff --git a/docs/models/modelcapabilities.md b/docs/models/modelcapabilities.md index 646c8e94..c7dd2710 100644 --- a/docs/models/modelcapabilities.md +++ b/docs/models/modelcapabilities.md @@ -3,14 +3,15 @@ ## Fields -| Field | Type | Required | Description | -| ------------------ | ------------------ | ------------------ | ------------------ | -| `completion_chat` | *Optional[bool]* | :heavy_minus_sign: | N/A | -| `function_calling` | *Optional[bool]* | :heavy_minus_sign: | N/A | -| `completion_fim` | *Optional[bool]* | :heavy_minus_sign: | N/A | -| `fine_tuning` | *Optional[bool]* | :heavy_minus_sign: | N/A | -| `vision` | *Optional[bool]* | :heavy_minus_sign: | N/A | -| `ocr` | *Optional[bool]* | :heavy_minus_sign: | N/A | -| `classification` | *Optional[bool]* | :heavy_minus_sign: | N/A | -| `moderation` | *Optional[bool]* | :heavy_minus_sign: | N/A | -| `audio` | *Optional[bool]* | :heavy_minus_sign: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| --------------------- | --------------------- | --------------------- | --------------------- | +| `completion_chat` | *Optional[bool]* | :heavy_minus_sign: | N/A | +| `function_calling` | *Optional[bool]* | :heavy_minus_sign: | N/A | +| `completion_fim` | *Optional[bool]* | :heavy_minus_sign: | N/A | +| `fine_tuning` | *Optional[bool]* | :heavy_minus_sign: | N/A | +| `vision` | *Optional[bool]* | :heavy_minus_sign: | N/A | +| `ocr` | *Optional[bool]* | :heavy_minus_sign: | N/A | +| `classification` | *Optional[bool]* | :heavy_minus_sign: | N/A | +| `moderation` | *Optional[bool]* | :heavy_minus_sign: | N/A | +| `audio` | *Optional[bool]* | :heavy_minus_sign: | N/A | +| `audio_transcription` | *Optional[bool]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/queryparamagentversion.md b/docs/models/queryparamagentversion.md new file mode 100644 index 00000000..3eb5ef18 --- /dev/null +++ b/docs/models/queryparamagentversion.md @@ -0,0 +1,17 @@ +# QueryParamAgentVersion + + +## Supported Types + +### `int` + +```python +value: int = /* values here */ +``` + +### `str` + +```python +value: str = /* values here */ +``` + diff --git a/docs/models/realtimetranscriptionerror.md b/docs/models/realtimetranscriptionerror.md new file mode 100644 index 00000000..e01f2126 --- /dev/null +++ b/docs/models/realtimetranscriptionerror.md @@ -0,0 +1,9 @@ +# RealtimeTranscriptionError + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | +| `type` | *Optional[Literal["error"]]* | :heavy_minus_sign: | N/A | +| `error` | [models.RealtimeTranscriptionErrorDetail](../models/realtimetranscriptionerrordetail.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/realtimetranscriptionerrordetail.md b/docs/models/realtimetranscriptionerrordetail.md new file mode 100644 index 00000000..96420ada --- /dev/null +++ b/docs/models/realtimetranscriptionerrordetail.md @@ -0,0 +1,9 @@ +# RealtimeTranscriptionErrorDetail + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------- | -------------------------------------- | -------------------------------------- | -------------------------------------- | +| `message` | [models.Message](../models/message.md) | :heavy_check_mark: | Human-readable error message. | +| `code` | *int* | :heavy_check_mark: | Internal error code for debugging. | \ No newline at end of file diff --git a/docs/models/realtimetranscriptionsession.md b/docs/models/realtimetranscriptionsession.md new file mode 100644 index 00000000..94a0a89e --- /dev/null +++ b/docs/models/realtimetranscriptionsession.md @@ -0,0 +1,10 @@ +# RealtimeTranscriptionSession + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------- | ---------------------------------------------- | ---------------------------------------------- | ---------------------------------------------- | +| `request_id` | *str* | :heavy_check_mark: | N/A | +| `model` | *str* | :heavy_check_mark: | N/A | +| `audio_format` | [models.AudioFormat](../models/audioformat.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/realtimetranscriptionsessioncreated.md b/docs/models/realtimetranscriptionsessioncreated.md new file mode 100644 index 00000000..34e603fd --- /dev/null +++ b/docs/models/realtimetranscriptionsessioncreated.md @@ -0,0 +1,9 @@ +# RealtimeTranscriptionSessionCreated + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | +| `type` | *Optional[Literal["session.created"]]* | :heavy_minus_sign: | N/A | +| `session` | [models.RealtimeTranscriptionSession](../models/realtimetranscriptionsession.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/realtimetranscriptionsessionupdated.md b/docs/models/realtimetranscriptionsessionupdated.md new file mode 100644 index 00000000..7e271995 --- /dev/null +++ b/docs/models/realtimetranscriptionsessionupdated.md @@ -0,0 +1,9 @@ +# RealtimeTranscriptionSessionUpdated + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | +| `type` | *Optional[Literal["session.updated"]]* | :heavy_minus_sign: | N/A | +| `session` | [models.RealtimeTranscriptionSession](../models/realtimetranscriptionsession.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/sdks/conversations/README.md b/docs/sdks/conversations/README.md index c488848c..ca383176 100644 --- a/docs/sdks/conversations/README.md +++ b/docs/sdks/conversations/README.md @@ -60,7 +60,7 @@ with Mistral( | `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | | `agent_id` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `agent_version` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | +| `agent_version` | [OptionalNullable[models.AgentVersion]](../../models/agentversion.md) | :heavy_minus_sign: | N/A | | `model` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | @@ -366,7 +366,7 @@ with Mistral( | `handoff_execution` | [Optional[models.ConversationRestartRequestHandoffExecution]](../../models/conversationrestartrequesthandoffexecution.md) | :heavy_minus_sign: | N/A | | `completion_args` | [Optional[models.CompletionArgs]](../../models/completionargs.md) | :heavy_minus_sign: | White-listed arguments from the completion API | | `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | Custom metadata for the conversation. | -| `agent_version` | *OptionalNullable[int]* | :heavy_minus_sign: | Specific version of the agent to use when restarting. If not provided, uses the current version. | +| `agent_version` | [OptionalNullable[models.ConversationRestartRequestAgentVersion]](../../models/conversationrestartrequestagentversion.md) | :heavy_minus_sign: | Specific version of the agent to use when restarting. If not provided, uses the current version. | | `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | ### Response @@ -431,7 +431,7 @@ with Mistral( | `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | | `agent_id` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `agent_version` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | +| `agent_version` | [OptionalNullable[models.ConversationStreamRequestAgentVersion]](../../models/conversationstreamrequestagentversion.md) | :heavy_minus_sign: | N/A | | `model` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | @@ -547,7 +547,7 @@ with Mistral( | `handoff_execution` | [Optional[models.ConversationRestartStreamRequestHandoffExecution]](../../models/conversationrestartstreamrequesthandoffexecution.md) | :heavy_minus_sign: | N/A | | `completion_args` | [Optional[models.CompletionArgs]](../../models/completionargs.md) | :heavy_minus_sign: | White-listed arguments from the completion API | | `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | Custom metadata for the conversation. | -| `agent_version` | *OptionalNullable[int]* | :heavy_minus_sign: | Specific version of the agent to use when restarting. If not provided, uses the current version. | +| `agent_version` | [OptionalNullable[models.ConversationRestartStreamRequestAgentVersion]](../../models/conversationrestartstreamrequestagentversion.md) | :heavy_minus_sign: | Specific version of the agent to use when restarting. If not provided, uses the current version. | | `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | ### Response diff --git a/docs/sdks/files/README.md b/docs/sdks/files/README.md index f0dfd593..57b53fc7 100644 --- a/docs/sdks/files/README.md +++ b/docs/sdks/files/README.md @@ -95,6 +95,7 @@ with Mistral( | `source` | List[[models.Source](../../models/source.md)] | :heavy_minus_sign: | N/A | | `search` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | `purpose` | [OptionalNullable[models.FilePurpose]](../../models/filepurpose.md) | :heavy_minus_sign: | N/A | +| `mimetypes` | List[*str*] | :heavy_minus_sign: | N/A | | `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | ### Response diff --git a/docs/sdks/mistralagents/README.md b/docs/sdks/mistralagents/README.md index 8021fa07..bdd8d588 100644 --- a/docs/sdks/mistralagents/README.md +++ b/docs/sdks/mistralagents/README.md @@ -14,6 +14,8 @@ * [update_version](#update_version) - Update an agent version. * [list_versions](#list_versions) - List all versions of an agent. * [get_version](#get_version) - Retrieve a specific version of an agent. +* [create_version_alias](#create_version_alias) - Create or update an agent version alias. +* [list_version_aliases](#list_version_aliases) - List all aliases for an agent. ## create @@ -116,7 +118,7 @@ with Mistral( ## get -Given an agent retrieve an agent entity with its attributes. +Given an agent, retrieve an agent entity with its attributes. The agent_version parameter can be an integer version number or a string alias. ### Example Usage @@ -139,11 +141,11 @@ with Mistral( ### Parameters -| Parameter | Type | Required | Description | -| ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | -| `agent_id` | *str* | :heavy_check_mark: | N/A | -| `agent_version` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | -| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | +| Parameter | Type | Required | Description | +| ----------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------- | +| `agent_id` | *str* | :heavy_check_mark: | N/A | +| `agent_version` | [OptionalNullable[models.QueryParamAgentVersion]](../../models/queryparamagentversion.md) | :heavy_minus_sign: | N/A | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | ### Response @@ -337,7 +339,7 @@ Get a specific agent version by version number. ### Example Usage - + ```python from mistralai import Mistral import os @@ -347,7 +349,7 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.beta.agents.get_version(agent_id="", version=788393) + res = mistral.beta.agents.get_version(agent_id="", version="788393") # Handle response print(res) @@ -359,7 +361,7 @@ with Mistral( | Parameter | Type | Required | Description | | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | | `agent_id` | *str* | :heavy_check_mark: | N/A | -| `version` | *int* | :heavy_check_mark: | N/A | +| `version` | *str* | :heavy_check_mark: | N/A | | `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | ### Response @@ -368,6 +370,90 @@ with Mistral( ### Errors +| Error Type | Status Code | Content Type | +| -------------------------- | -------------------------- | -------------------------- | +| models.HTTPValidationError | 422 | application/json | +| models.SDKError | 4XX, 5XX | \*/\* | + +## create_version_alias + +Create a new alias or update an existing alias to point to a specific version. Aliases are unique per agent and can be reassigned to different versions. + +### Example Usage + + +```python +from mistralai import Mistral +import os + + +with Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) as mistral: + + res = mistral.beta.agents.create_version_alias(agent_id="", alias="", version=595141) + + # Handle response + print(res) + +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | +| `agent_id` | *str* | :heavy_check_mark: | N/A | +| `alias` | *str* | :heavy_check_mark: | N/A | +| `version` | *int* | :heavy_check_mark: | N/A | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | + +### Response + +**[models.AgentAliasResponse](../../models/agentaliasresponse.md)** + +### Errors + +| Error Type | Status Code | Content Type | +| -------------------------- | -------------------------- | -------------------------- | +| models.HTTPValidationError | 422 | application/json | +| models.SDKError | 4XX, 5XX | \*/\* | + +## list_version_aliases + +Retrieve all version aliases for a specific agent. + +### Example Usage + + +```python +from mistralai import Mistral +import os + + +with Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) as mistral: + + res = mistral.beta.agents.list_version_aliases(agent_id="") + + # Handle response + print(res) + +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | +| `agent_id` | *str* | :heavy_check_mark: | N/A | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | + +### Response + +**[List[models.AgentAliasResponse]](../../models/.md)** + +### Errors + | Error Type | Status Code | Content Type | | -------------------------- | -------------------------- | -------------------------- | | models.HTTPValidationError | 422 | application/json | diff --git a/src/mistralai/_version.py b/src/mistralai/_version.py index aae7598d..6ee91593 100644 --- a/src/mistralai/_version.py +++ b/src/mistralai/_version.py @@ -3,10 +3,10 @@ import importlib.metadata __title__: str = "mistralai" -__version__: str = "1.11.1" +__version__: str = "1.12.0" __openapi_doc_version__: str = "1.0.0" __gen_version__: str = "2.794.1" -__user_agent__: str = "speakeasy-sdk/python 1.11.1 2.794.1 1.0.0 mistralai" +__user_agent__: str = "speakeasy-sdk/python 1.12.0 2.794.1 1.0.0 mistralai" try: if __package__ is not None: diff --git a/src/mistralai/conversations.py b/src/mistralai/conversations.py index 93ed8c28..194cb4c0 100644 --- a/src/mistralai/conversations.py +++ b/src/mistralai/conversations.py @@ -259,7 +259,12 @@ def start( description: OptionalNullable[str] = UNSET, metadata: OptionalNullable[Dict[str, Any]] = UNSET, agent_id: OptionalNullable[str] = UNSET, - agent_version: OptionalNullable[int] = UNSET, + agent_version: OptionalNullable[ + Union[ + models_conversationrequest.AgentVersion, + models_conversationrequest.AgentVersionTypedDict, + ] + ] = UNSET, model: OptionalNullable[str] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, @@ -405,7 +410,12 @@ async def start_async( description: OptionalNullable[str] = UNSET, metadata: OptionalNullable[Dict[str, Any]] = UNSET, agent_id: OptionalNullable[str] = UNSET, - agent_version: OptionalNullable[int] = UNSET, + agent_version: OptionalNullable[ + Union[ + models_conversationrequest.AgentVersion, + models_conversationrequest.AgentVersionTypedDict, + ] + ] = UNSET, model: OptionalNullable[str] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, @@ -1711,7 +1721,12 @@ def restart( ] ] = None, metadata: OptionalNullable[Dict[str, Any]] = UNSET, - agent_version: OptionalNullable[int] = UNSET, + agent_version: OptionalNullable[ + Union[ + models_conversationrestartrequest.ConversationRestartRequestAgentVersion, + models_conversationrestartrequest.ConversationRestartRequestAgentVersionTypedDict, + ] + ] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -1846,7 +1861,12 @@ async def restart_async( ] ] = None, metadata: OptionalNullable[Dict[str, Any]] = UNSET, - agent_version: OptionalNullable[int] = UNSET, + agent_version: OptionalNullable[ + Union[ + models_conversationrestartrequest.ConversationRestartRequestAgentVersion, + models_conversationrestartrequest.ConversationRestartRequestAgentVersionTypedDict, + ] + ] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -1991,7 +2011,12 @@ def start_stream( description: OptionalNullable[str] = UNSET, metadata: OptionalNullable[Dict[str, Any]] = UNSET, agent_id: OptionalNullable[str] = UNSET, - agent_version: OptionalNullable[int] = UNSET, + agent_version: OptionalNullable[ + Union[ + models_conversationstreamrequest.ConversationStreamRequestAgentVersion, + models_conversationstreamrequest.ConversationStreamRequestAgentVersionTypedDict, + ] + ] = UNSET, model: OptionalNullable[str] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, @@ -2148,7 +2173,12 @@ async def start_stream_async( description: OptionalNullable[str] = UNSET, metadata: OptionalNullable[Dict[str, Any]] = UNSET, agent_id: OptionalNullable[str] = UNSET, - agent_version: OptionalNullable[int] = UNSET, + agent_version: OptionalNullable[ + Union[ + models_conversationstreamrequest.ConversationStreamRequestAgentVersion, + models_conversationstreamrequest.ConversationStreamRequestAgentVersionTypedDict, + ] + ] = UNSET, model: OptionalNullable[str] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, @@ -2561,7 +2591,12 @@ def restart_stream( ] ] = None, metadata: OptionalNullable[Dict[str, Any]] = UNSET, - agent_version: OptionalNullable[int] = UNSET, + agent_version: OptionalNullable[ + Union[ + models_conversationrestartstreamrequest.ConversationRestartStreamRequestAgentVersion, + models_conversationrestartstreamrequest.ConversationRestartStreamRequestAgentVersionTypedDict, + ] + ] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -2703,7 +2738,12 @@ async def restart_stream_async( ] ] = None, metadata: OptionalNullable[Dict[str, Any]] = UNSET, - agent_version: OptionalNullable[int] = UNSET, + agent_version: OptionalNullable[ + Union[ + models_conversationrestartstreamrequest.ConversationRestartStreamRequestAgentVersion, + models_conversationrestartstreamrequest.ConversationRestartStreamRequestAgentVersionTypedDict, + ] + ] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, diff --git a/src/mistralai/files.py b/src/mistralai/files.py index ab2c75a2..90ada0ff 100644 --- a/src/mistralai/files.py +++ b/src/mistralai/files.py @@ -241,6 +241,7 @@ def list( source: OptionalNullable[List[models_source.Source]] = UNSET, search: OptionalNullable[str] = UNSET, purpose: OptionalNullable[models_filepurpose.FilePurpose] = UNSET, + mimetypes: OptionalNullable[List[str]] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -257,6 +258,7 @@ def list( :param source: :param search: :param purpose: + :param mimetypes: :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -280,6 +282,7 @@ def list( source=source, search=search, purpose=purpose, + mimetypes=mimetypes, ) req = self._build_request( @@ -343,6 +346,7 @@ async def list_async( source: OptionalNullable[List[models_source.Source]] = UNSET, search: OptionalNullable[str] = UNSET, purpose: OptionalNullable[models_filepurpose.FilePurpose] = UNSET, + mimetypes: OptionalNullable[List[str]] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -359,6 +363,7 @@ async def list_async( :param source: :param search: :param purpose: + :param mimetypes: :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -382,6 +387,7 @@ async def list_async( source=source, search=search, purpose=purpose, + mimetypes=mimetypes, ) req = self._build_request_async( diff --git a/src/mistralai/mistral_agents.py b/src/mistralai/mistral_agents.py index e4abf6e4..7fb0ce25 100644 --- a/src/mistralai/mistral_agents.py +++ b/src/mistralai/mistral_agents.py @@ -5,6 +5,7 @@ from mistralai._hooks import HookContext from mistralai.models import ( agentcreationrequest as models_agentcreationrequest, + agents_api_v1_agents_getop as models_agents_api_v1_agents_getop, agentupdaterequest as models_agentupdaterequest, completionargs as models_completionargs, requestsource as models_requestsource, @@ -494,7 +495,12 @@ def get( self, *, agent_id: str, - agent_version: OptionalNullable[int] = UNSET, + agent_version: OptionalNullable[ + Union[ + models_agents_api_v1_agents_getop.QueryParamAgentVersion, + models_agents_api_v1_agents_getop.QueryParamAgentVersionTypedDict, + ] + ] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -502,7 +508,7 @@ def get( ) -> models.Agent: r"""Retrieve an agent entity. - Given an agent retrieve an agent entity with its attributes. + Given an agent, retrieve an agent entity with its attributes. The agent_version parameter can be an integer version number or a string alias. :param agent_id: :param agent_version: @@ -587,7 +593,12 @@ async def get_async( self, *, agent_id: str, - agent_version: OptionalNullable[int] = UNSET, + agent_version: OptionalNullable[ + Union[ + models_agents_api_v1_agents_getop.QueryParamAgentVersion, + models_agents_api_v1_agents_getop.QueryParamAgentVersionTypedDict, + ] + ] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -595,7 +606,7 @@ async def get_async( ) -> models.Agent: r"""Retrieve an agent entity. - Given an agent retrieve an agent entity with its attributes. + Given an agent, retrieve an agent entity with its attributes. The agent_version parameter can be an integer version number or a string alias. :param agent_id: :param agent_version: @@ -1514,7 +1525,7 @@ def get_version( self, *, agent_id: str, - version: int, + version: str, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -1548,7 +1559,7 @@ def get_version( req = self._build_request( method="GET", - path="/v1/agents/{agent_id}/version/{version}", + path="/v1/agents/{agent_id}/versions/{version}", base_url=base_url, url_variables=url_variables, request=request, @@ -1607,7 +1618,7 @@ async def get_version_async( self, *, agent_id: str, - version: int, + version: str, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -1641,7 +1652,7 @@ async def get_version_async( req = self._build_request_async( method="GET", - path="/v1/agents/{agent_id}/version/{version}", + path="/v1/agents/{agent_id}/versions/{version}", base_url=base_url, url_variables=url_variables, request=request, @@ -1695,3 +1706,375 @@ async def get_version_async( raise models.SDKError("API error occurred", http_res, http_res_text) raise models.SDKError("Unexpected response received", http_res) + + def create_version_alias( + self, + *, + agent_id: str, + alias: str, + version: int, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.AgentAliasResponse: + r"""Create or update an agent version alias. + + Create a new alias or update an existing alias to point to a specific version. Aliases are unique per agent and can be reassigned to different versions. + + :param agent_id: + :param alias: + :param version: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AgentsAPIV1AgentsCreateOrUpdateAliasRequest( + agent_id=agent_id, + alias=alias, + version=version, + ) + + req = self._build_request( + method="PUT", + path="/v1/agents/{agent_id}/aliases", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="agents_api_v1_agents_create_or_update_alias", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.AgentAliasResponse, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res + ) + raise models.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + async def create_version_alias_async( + self, + *, + agent_id: str, + alias: str, + version: int, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.AgentAliasResponse: + r"""Create or update an agent version alias. + + Create a new alias or update an existing alias to point to a specific version. Aliases are unique per agent and can be reassigned to different versions. + + :param agent_id: + :param alias: + :param version: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AgentsAPIV1AgentsCreateOrUpdateAliasRequest( + agent_id=agent_id, + alias=alias, + version=version, + ) + + req = self._build_request_async( + method="PUT", + path="/v1/agents/{agent_id}/aliases", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="agents_api_v1_agents_create_or_update_alias", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.AgentAliasResponse, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res + ) + raise models.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + def list_version_aliases( + self, + *, + agent_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> List[models.AgentAliasResponse]: + r"""List all aliases for an agent. + + Retrieve all version aliases for a specific agent. + + :param agent_id: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AgentsAPIV1AgentsListVersionAliasesRequest( + agent_id=agent_id, + ) + + req = self._build_request( + method="GET", + path="/v1/agents/{agent_id}/aliases", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="agents_api_v1_agents_list_version_aliases", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(List[models.AgentAliasResponse], http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res + ) + raise models.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + async def list_version_aliases_async( + self, + *, + agent_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> List[models.AgentAliasResponse]: + r"""List all aliases for an agent. + + Retrieve all version aliases for a specific agent. + + :param agent_id: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AgentsAPIV1AgentsListVersionAliasesRequest( + agent_id=agent_id, + ) + + req = self._build_request_async( + method="GET", + path="/v1/agents/{agent_id}/aliases", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="agents_api_v1_agents_list_version_aliases", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(List[models.AgentAliasResponse], http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res + ) + raise models.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) diff --git a/src/mistralai/models/__init__.py b/src/mistralai/models/__init__.py index c35b3d24..23e65222 100644 --- a/src/mistralai/models/__init__.py +++ b/src/mistralai/models/__init__.py @@ -14,8 +14,11 @@ AgentToolsTypedDict, AgentTypedDict, ) + from .agentaliasresponse import AgentAliasResponse, AgentAliasResponseTypedDict from .agentconversation import ( AgentConversation, + AgentConversationAgentVersion, + AgentConversationAgentVersionTypedDict, AgentConversationObject, AgentConversationTypedDict, ) @@ -41,6 +44,10 @@ AgentHandoffStartedEventType, AgentHandoffStartedEventTypedDict, ) + from .agents_api_v1_agents_create_or_update_aliasop import ( + AgentsAPIV1AgentsCreateOrUpdateAliasRequest, + AgentsAPIV1AgentsCreateOrUpdateAliasRequestTypedDict, + ) from .agents_api_v1_agents_deleteop import ( AgentsAPIV1AgentsDeleteRequest, AgentsAPIV1AgentsDeleteRequestTypedDict, @@ -52,6 +59,12 @@ from .agents_api_v1_agents_getop import ( AgentsAPIV1AgentsGetRequest, AgentsAPIV1AgentsGetRequestTypedDict, + QueryParamAgentVersion, + QueryParamAgentVersionTypedDict, + ) + from .agents_api_v1_agents_list_version_aliasesop import ( + AgentsAPIV1AgentsListVersionAliasesRequest, + AgentsAPIV1AgentsListVersionAliasesRequestTypedDict, ) from .agents_api_v1_agents_list_versionsop import ( AgentsAPIV1AgentsListVersionsRequest, @@ -340,6 +353,8 @@ ConversationMessagesTypedDict, ) from .conversationrequest import ( + AgentVersion, + AgentVersionTypedDict, ConversationRequest, ConversationRequestTypedDict, HandoffExecution, @@ -355,16 +370,22 @@ ) from .conversationrestartrequest import ( ConversationRestartRequest, + ConversationRestartRequestAgentVersion, + ConversationRestartRequestAgentVersionTypedDict, ConversationRestartRequestHandoffExecution, ConversationRestartRequestTypedDict, ) from .conversationrestartstreamrequest import ( ConversationRestartStreamRequest, + ConversationRestartStreamRequestAgentVersion, + ConversationRestartStreamRequestAgentVersionTypedDict, ConversationRestartStreamRequestHandoffExecution, ConversationRestartStreamRequestTypedDict, ) from .conversationstreamrequest import ( ConversationStreamRequest, + ConversationStreamRequestAgentVersion, + ConversationStreamRequestAgentVersionTypedDict, ConversationStreamRequestHandoffExecution, ConversationStreamRequestTools, ConversationStreamRequestToolsTypedDict, @@ -758,6 +779,28 @@ from .paginationinfo import PaginationInfo, PaginationInfoTypedDict from .prediction import Prediction, PredictionTypedDict from .processingstatusout import ProcessingStatusOut, ProcessingStatusOutTypedDict + from .realtimetranscriptionerror import ( + RealtimeTranscriptionError, + RealtimeTranscriptionErrorTypedDict, + ) + from .realtimetranscriptionerrordetail import ( + Message, + MessageTypedDict, + RealtimeTranscriptionErrorDetail, + RealtimeTranscriptionErrorDetailTypedDict, + ) + from .realtimetranscriptionsession import ( + RealtimeTranscriptionSession, + RealtimeTranscriptionSessionTypedDict, + ) + from .realtimetranscriptionsessioncreated import ( + RealtimeTranscriptionSessionCreated, + RealtimeTranscriptionSessionCreatedTypedDict, + ) + from .realtimetranscriptionsessionupdated import ( + RealtimeTranscriptionSessionUpdated, + RealtimeTranscriptionSessionUpdatedTypedDict, + ) from .referencechunk import ( ReferenceChunk, ReferenceChunkType, @@ -951,7 +994,11 @@ __all__ = [ "APIEndpoint", "Agent", + "AgentAliasResponse", + "AgentAliasResponseTypedDict", "AgentConversation", + "AgentConversationAgentVersion", + "AgentConversationAgentVersionTypedDict", "AgentConversationObject", "AgentConversationTypedDict", "AgentCreationRequest", @@ -976,6 +1023,10 @@ "AgentUpdateRequestTools", "AgentUpdateRequestToolsTypedDict", "AgentUpdateRequestTypedDict", + "AgentVersion", + "AgentVersionTypedDict", + "AgentsAPIV1AgentsCreateOrUpdateAliasRequest", + "AgentsAPIV1AgentsCreateOrUpdateAliasRequestTypedDict", "AgentsAPIV1AgentsDeleteRequest", "AgentsAPIV1AgentsDeleteRequestTypedDict", "AgentsAPIV1AgentsGetRequest", @@ -984,6 +1035,8 @@ "AgentsAPIV1AgentsGetVersionRequestTypedDict", "AgentsAPIV1AgentsListRequest", "AgentsAPIV1AgentsListRequestTypedDict", + "AgentsAPIV1AgentsListVersionAliasesRequest", + "AgentsAPIV1AgentsListVersionAliasesRequestTypedDict", "AgentsAPIV1AgentsListVersionsRequest", "AgentsAPIV1AgentsListVersionsRequestTypedDict", "AgentsAPIV1AgentsUpdateRequest", @@ -1184,12 +1237,18 @@ "ConversationResponseObject", "ConversationResponseTypedDict", "ConversationRestartRequest", + "ConversationRestartRequestAgentVersion", + "ConversationRestartRequestAgentVersionTypedDict", "ConversationRestartRequestHandoffExecution", "ConversationRestartRequestTypedDict", "ConversationRestartStreamRequest", + "ConversationRestartStreamRequestAgentVersion", + "ConversationRestartStreamRequestAgentVersionTypedDict", "ConversationRestartStreamRequestHandoffExecution", "ConversationRestartStreamRequestTypedDict", "ConversationStreamRequest", + "ConversationStreamRequestAgentVersion", + "ConversationStreamRequestAgentVersionTypedDict", "ConversationStreamRequestHandoffExecution", "ConversationStreamRequestTools", "ConversationStreamRequestToolsTypedDict", @@ -1431,6 +1490,7 @@ "ListSharingOutTypedDict", "Loc", "LocTypedDict", + "Message", "MessageEntries", "MessageEntriesTypedDict", "MessageInputContentChunks", @@ -1456,6 +1516,7 @@ "MessageOutputEventRole", "MessageOutputEventType", "MessageOutputEventTypedDict", + "MessageTypedDict", "Messages", "MessagesTypedDict", "MetricOut", @@ -1506,7 +1567,19 @@ "PredictionTypedDict", "ProcessingStatusOut", "ProcessingStatusOutTypedDict", + "QueryParamAgentVersion", + "QueryParamAgentVersionTypedDict", "QueryParamStatus", + "RealtimeTranscriptionError", + "RealtimeTranscriptionErrorDetail", + "RealtimeTranscriptionErrorDetailTypedDict", + "RealtimeTranscriptionErrorTypedDict", + "RealtimeTranscriptionSession", + "RealtimeTranscriptionSessionCreated", + "RealtimeTranscriptionSessionCreatedTypedDict", + "RealtimeTranscriptionSessionTypedDict", + "RealtimeTranscriptionSessionUpdated", + "RealtimeTranscriptionSessionUpdatedTypedDict", "ReferenceChunk", "ReferenceChunkType", "ReferenceChunkTypedDict", @@ -1675,7 +1748,11 @@ "AgentTools": ".agent", "AgentToolsTypedDict": ".agent", "AgentTypedDict": ".agent", + "AgentAliasResponse": ".agentaliasresponse", + "AgentAliasResponseTypedDict": ".agentaliasresponse", "AgentConversation": ".agentconversation", + "AgentConversationAgentVersion": ".agentconversation", + "AgentConversationAgentVersionTypedDict": ".agentconversation", "AgentConversationObject": ".agentconversation", "AgentConversationTypedDict": ".agentconversation", "AgentCreationRequest": ".agentcreationrequest", @@ -1692,12 +1769,18 @@ "AgentHandoffStartedEvent": ".agenthandoffstartedevent", "AgentHandoffStartedEventType": ".agenthandoffstartedevent", "AgentHandoffStartedEventTypedDict": ".agenthandoffstartedevent", + "AgentsAPIV1AgentsCreateOrUpdateAliasRequest": ".agents_api_v1_agents_create_or_update_aliasop", + "AgentsAPIV1AgentsCreateOrUpdateAliasRequestTypedDict": ".agents_api_v1_agents_create_or_update_aliasop", "AgentsAPIV1AgentsDeleteRequest": ".agents_api_v1_agents_deleteop", "AgentsAPIV1AgentsDeleteRequestTypedDict": ".agents_api_v1_agents_deleteop", "AgentsAPIV1AgentsGetVersionRequest": ".agents_api_v1_agents_get_versionop", "AgentsAPIV1AgentsGetVersionRequestTypedDict": ".agents_api_v1_agents_get_versionop", "AgentsAPIV1AgentsGetRequest": ".agents_api_v1_agents_getop", "AgentsAPIV1AgentsGetRequestTypedDict": ".agents_api_v1_agents_getop", + "QueryParamAgentVersion": ".agents_api_v1_agents_getop", + "QueryParamAgentVersionTypedDict": ".agents_api_v1_agents_getop", + "AgentsAPIV1AgentsListVersionAliasesRequest": ".agents_api_v1_agents_list_version_aliasesop", + "AgentsAPIV1AgentsListVersionAliasesRequestTypedDict": ".agents_api_v1_agents_list_version_aliasesop", "AgentsAPIV1AgentsListVersionsRequest": ".agents_api_v1_agents_list_versionsop", "AgentsAPIV1AgentsListVersionsRequestTypedDict": ".agents_api_v1_agents_list_versionsop", "AgentsAPIV1AgentsListRequest": ".agents_api_v1_agents_listop", @@ -1913,6 +1996,8 @@ "ConversationMessages": ".conversationmessages", "ConversationMessagesObject": ".conversationmessages", "ConversationMessagesTypedDict": ".conversationmessages", + "AgentVersion": ".conversationrequest", + "AgentVersionTypedDict": ".conversationrequest", "ConversationRequest": ".conversationrequest", "ConversationRequestTypedDict": ".conversationrequest", "HandoffExecution": ".conversationrequest", @@ -1924,12 +2009,18 @@ "Outputs": ".conversationresponse", "OutputsTypedDict": ".conversationresponse", "ConversationRestartRequest": ".conversationrestartrequest", + "ConversationRestartRequestAgentVersion": ".conversationrestartrequest", + "ConversationRestartRequestAgentVersionTypedDict": ".conversationrestartrequest", "ConversationRestartRequestHandoffExecution": ".conversationrestartrequest", "ConversationRestartRequestTypedDict": ".conversationrestartrequest", "ConversationRestartStreamRequest": ".conversationrestartstreamrequest", + "ConversationRestartStreamRequestAgentVersion": ".conversationrestartstreamrequest", + "ConversationRestartStreamRequestAgentVersionTypedDict": ".conversationrestartstreamrequest", "ConversationRestartStreamRequestHandoffExecution": ".conversationrestartstreamrequest", "ConversationRestartStreamRequestTypedDict": ".conversationrestartstreamrequest", "ConversationStreamRequest": ".conversationstreamrequest", + "ConversationStreamRequestAgentVersion": ".conversationstreamrequest", + "ConversationStreamRequestAgentVersionTypedDict": ".conversationstreamrequest", "ConversationStreamRequestHandoffExecution": ".conversationstreamrequest", "ConversationStreamRequestTools": ".conversationstreamrequest", "ConversationStreamRequestToolsTypedDict": ".conversationstreamrequest", @@ -2237,6 +2328,18 @@ "PredictionTypedDict": ".prediction", "ProcessingStatusOut": ".processingstatusout", "ProcessingStatusOutTypedDict": ".processingstatusout", + "RealtimeTranscriptionError": ".realtimetranscriptionerror", + "RealtimeTranscriptionErrorTypedDict": ".realtimetranscriptionerror", + "Message": ".realtimetranscriptionerrordetail", + "MessageTypedDict": ".realtimetranscriptionerrordetail", + "RealtimeTranscriptionErrorDetail": ".realtimetranscriptionerrordetail", + "RealtimeTranscriptionErrorDetailTypedDict": ".realtimetranscriptionerrordetail", + "RealtimeTranscriptionSession": ".realtimetranscriptionsession", + "RealtimeTranscriptionSessionTypedDict": ".realtimetranscriptionsession", + "RealtimeTranscriptionSessionCreated": ".realtimetranscriptionsessioncreated", + "RealtimeTranscriptionSessionCreatedTypedDict": ".realtimetranscriptionsessioncreated", + "RealtimeTranscriptionSessionUpdated": ".realtimetranscriptionsessionupdated", + "RealtimeTranscriptionSessionUpdatedTypedDict": ".realtimetranscriptionsessionupdated", "ReferenceChunk": ".referencechunk", "ReferenceChunkType": ".referencechunk", "ReferenceChunkTypedDict": ".referencechunk", diff --git a/src/mistralai/models/agentaliasresponse.py b/src/mistralai/models/agentaliasresponse.py new file mode 100644 index 00000000..c0928da9 --- /dev/null +++ b/src/mistralai/models/agentaliasresponse.py @@ -0,0 +1,23 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from datetime import datetime +from mistralai.types import BaseModel +from typing_extensions import TypedDict + + +class AgentAliasResponseTypedDict(TypedDict): + alias: str + version: int + created_at: datetime + updated_at: datetime + + +class AgentAliasResponse(BaseModel): + alias: str + + version: int + + created_at: datetime + + updated_at: datetime diff --git a/src/mistralai/models/agentconversation.py b/src/mistralai/models/agentconversation.py index 625fb4fc..6007b571 100644 --- a/src/mistralai/models/agentconversation.py +++ b/src/mistralai/models/agentconversation.py @@ -4,13 +4,23 @@ from datetime import datetime from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL from pydantic import model_serializer -from typing import Any, Dict, Literal, Optional -from typing_extensions import NotRequired, TypedDict +from typing import Any, Dict, Literal, Optional, Union +from typing_extensions import NotRequired, TypeAliasType, TypedDict AgentConversationObject = Literal["conversation",] +AgentConversationAgentVersionTypedDict = TypeAliasType( + "AgentConversationAgentVersionTypedDict", Union[str, int] +) + + +AgentConversationAgentVersion = TypeAliasType( + "AgentConversationAgentVersion", Union[str, int] +) + + class AgentConversationTypedDict(TypedDict): id: str created_at: datetime @@ -23,7 +33,7 @@ class AgentConversationTypedDict(TypedDict): metadata: NotRequired[Nullable[Dict[str, Any]]] r"""Custom metadata for the conversation.""" object: NotRequired[AgentConversationObject] - agent_version: NotRequired[Nullable[int]] + agent_version: NotRequired[Nullable[AgentConversationAgentVersionTypedDict]] class AgentConversation(BaseModel): @@ -46,7 +56,7 @@ class AgentConversation(BaseModel): object: Optional[AgentConversationObject] = "conversation" - agent_version: OptionalNullable[int] = UNSET + agent_version: OptionalNullable[AgentConversationAgentVersion] = UNSET @model_serializer(mode="wrap") def serialize_model(self, handler): diff --git a/src/mistralai/models/agents_api_v1_agents_create_or_update_aliasop.py b/src/mistralai/models/agents_api_v1_agents_create_or_update_aliasop.py new file mode 100644 index 00000000..6cf9d0e0 --- /dev/null +++ b/src/mistralai/models/agents_api_v1_agents_create_or_update_aliasop.py @@ -0,0 +1,26 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.types import BaseModel +from mistralai.utils import FieldMetadata, PathParamMetadata, QueryParamMetadata +from typing_extensions import Annotated, TypedDict + + +class AgentsAPIV1AgentsCreateOrUpdateAliasRequestTypedDict(TypedDict): + agent_id: str + alias: str + version: int + + +class AgentsAPIV1AgentsCreateOrUpdateAliasRequest(BaseModel): + agent_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] + + alias: Annotated[ + str, FieldMetadata(query=QueryParamMetadata(style="form", explode=True)) + ] + + version: Annotated[ + int, FieldMetadata(query=QueryParamMetadata(style="form", explode=True)) + ] diff --git a/src/mistralai/models/agents_api_v1_agents_get_versionop.py b/src/mistralai/models/agents_api_v1_agents_get_versionop.py index 4463d3b2..fddb10dd 100644 --- a/src/mistralai/models/agents_api_v1_agents_get_versionop.py +++ b/src/mistralai/models/agents_api_v1_agents_get_versionop.py @@ -8,7 +8,7 @@ class AgentsAPIV1AgentsGetVersionRequestTypedDict(TypedDict): agent_id: str - version: int + version: str class AgentsAPIV1AgentsGetVersionRequest(BaseModel): @@ -17,5 +17,5 @@ class AgentsAPIV1AgentsGetVersionRequest(BaseModel): ] version: Annotated[ - int, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) ] diff --git a/src/mistralai/models/agents_api_v1_agents_getop.py b/src/mistralai/models/agents_api_v1_agents_getop.py index dced6dbb..2b7d89a5 100644 --- a/src/mistralai/models/agents_api_v1_agents_getop.py +++ b/src/mistralai/models/agents_api_v1_agents_getop.py @@ -4,12 +4,21 @@ from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL from mistralai.utils import FieldMetadata, PathParamMetadata, QueryParamMetadata from pydantic import model_serializer -from typing_extensions import Annotated, NotRequired, TypedDict +from typing import Union +from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict + + +QueryParamAgentVersionTypedDict = TypeAliasType( + "QueryParamAgentVersionTypedDict", Union[int, str] +) + + +QueryParamAgentVersion = TypeAliasType("QueryParamAgentVersion", Union[int, str]) class AgentsAPIV1AgentsGetRequestTypedDict(TypedDict): agent_id: str - agent_version: NotRequired[Nullable[int]] + agent_version: NotRequired[Nullable[QueryParamAgentVersionTypedDict]] class AgentsAPIV1AgentsGetRequest(BaseModel): @@ -18,7 +27,7 @@ class AgentsAPIV1AgentsGetRequest(BaseModel): ] agent_version: Annotated[ - OptionalNullable[int], + OptionalNullable[QueryParamAgentVersion], FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), ] = UNSET diff --git a/src/mistralai/models/agents_api_v1_agents_list_version_aliasesop.py b/src/mistralai/models/agents_api_v1_agents_list_version_aliasesop.py new file mode 100644 index 00000000..650a7187 --- /dev/null +++ b/src/mistralai/models/agents_api_v1_agents_list_version_aliasesop.py @@ -0,0 +1,16 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.types import BaseModel +from mistralai.utils import FieldMetadata, PathParamMetadata +from typing_extensions import Annotated, TypedDict + + +class AgentsAPIV1AgentsListVersionAliasesRequestTypedDict(TypedDict): + agent_id: str + + +class AgentsAPIV1AgentsListVersionAliasesRequest(BaseModel): + agent_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] diff --git a/src/mistralai/models/conversationrequest.py b/src/mistralai/models/conversationrequest.py index 09d934ed..80581cc1 100644 --- a/src/mistralai/models/conversationrequest.py +++ b/src/mistralai/models/conversationrequest.py @@ -48,6 +48,12 @@ ] +AgentVersionTypedDict = TypeAliasType("AgentVersionTypedDict", Union[str, int]) + + +AgentVersion = TypeAliasType("AgentVersion", Union[str, int]) + + class ConversationRequestTypedDict(TypedDict): inputs: ConversationInputsTypedDict stream: NotRequired[bool] @@ -61,7 +67,7 @@ class ConversationRequestTypedDict(TypedDict): description: NotRequired[Nullable[str]] metadata: NotRequired[Nullable[Dict[str, Any]]] agent_id: NotRequired[Nullable[str]] - agent_version: NotRequired[Nullable[int]] + agent_version: NotRequired[Nullable[AgentVersionTypedDict]] model: NotRequired[Nullable[str]] @@ -89,7 +95,7 @@ class ConversationRequest(BaseModel): agent_id: OptionalNullable[str] = UNSET - agent_version: OptionalNullable[int] = UNSET + agent_version: OptionalNullable[AgentVersion] = UNSET model: OptionalNullable[str] = UNSET diff --git a/src/mistralai/models/conversationrestartrequest.py b/src/mistralai/models/conversationrestartrequest.py index a9c8410c..6f21d012 100644 --- a/src/mistralai/models/conversationrestartrequest.py +++ b/src/mistralai/models/conversationrestartrequest.py @@ -5,8 +5,8 @@ from .conversationinputs import ConversationInputs, ConversationInputsTypedDict from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL from pydantic import model_serializer -from typing import Any, Dict, Literal, Optional -from typing_extensions import NotRequired, TypedDict +from typing import Any, Dict, Literal, Optional, Union +from typing_extensions import NotRequired, TypeAliasType, TypedDict ConversationRestartRequestHandoffExecution = Literal[ @@ -15,6 +15,18 @@ ] +ConversationRestartRequestAgentVersionTypedDict = TypeAliasType( + "ConversationRestartRequestAgentVersionTypedDict", Union[str, int] +) +r"""Specific version of the agent to use when restarting. If not provided, uses the current version.""" + + +ConversationRestartRequestAgentVersion = TypeAliasType( + "ConversationRestartRequestAgentVersion", Union[str, int] +) +r"""Specific version of the agent to use when restarting. If not provided, uses the current version.""" + + class ConversationRestartRequestTypedDict(TypedDict): r"""Request to restart a new conversation from a given entry in the conversation.""" @@ -28,7 +40,9 @@ class ConversationRestartRequestTypedDict(TypedDict): r"""White-listed arguments from the completion API""" metadata: NotRequired[Nullable[Dict[str, Any]]] r"""Custom metadata for the conversation.""" - agent_version: NotRequired[Nullable[int]] + agent_version: NotRequired[ + Nullable[ConversationRestartRequestAgentVersionTypedDict] + ] r"""Specific version of the agent to use when restarting. If not provided, uses the current version.""" @@ -52,7 +66,7 @@ class ConversationRestartRequest(BaseModel): metadata: OptionalNullable[Dict[str, Any]] = UNSET r"""Custom metadata for the conversation.""" - agent_version: OptionalNullable[int] = UNSET + agent_version: OptionalNullable[ConversationRestartRequestAgentVersion] = UNSET r"""Specific version of the agent to use when restarting. If not provided, uses the current version.""" @model_serializer(mode="wrap") diff --git a/src/mistralai/models/conversationrestartstreamrequest.py b/src/mistralai/models/conversationrestartstreamrequest.py index 0703bb5f..2cec7958 100644 --- a/src/mistralai/models/conversationrestartstreamrequest.py +++ b/src/mistralai/models/conversationrestartstreamrequest.py @@ -5,8 +5,8 @@ from .conversationinputs import ConversationInputs, ConversationInputsTypedDict from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL from pydantic import model_serializer -from typing import Any, Dict, Literal, Optional -from typing_extensions import NotRequired, TypedDict +from typing import Any, Dict, Literal, Optional, Union +from typing_extensions import NotRequired, TypeAliasType, TypedDict ConversationRestartStreamRequestHandoffExecution = Literal[ @@ -15,6 +15,18 @@ ] +ConversationRestartStreamRequestAgentVersionTypedDict = TypeAliasType( + "ConversationRestartStreamRequestAgentVersionTypedDict", Union[str, int] +) +r"""Specific version of the agent to use when restarting. If not provided, uses the current version.""" + + +ConversationRestartStreamRequestAgentVersion = TypeAliasType( + "ConversationRestartStreamRequestAgentVersion", Union[str, int] +) +r"""Specific version of the agent to use when restarting. If not provided, uses the current version.""" + + class ConversationRestartStreamRequestTypedDict(TypedDict): r"""Request to restart a new conversation from a given entry in the conversation.""" @@ -28,7 +40,9 @@ class ConversationRestartStreamRequestTypedDict(TypedDict): r"""White-listed arguments from the completion API""" metadata: NotRequired[Nullable[Dict[str, Any]]] r"""Custom metadata for the conversation.""" - agent_version: NotRequired[Nullable[int]] + agent_version: NotRequired[ + Nullable[ConversationRestartStreamRequestAgentVersionTypedDict] + ] r"""Specific version of the agent to use when restarting. If not provided, uses the current version.""" @@ -54,7 +68,9 @@ class ConversationRestartStreamRequest(BaseModel): metadata: OptionalNullable[Dict[str, Any]] = UNSET r"""Custom metadata for the conversation.""" - agent_version: OptionalNullable[int] = UNSET + agent_version: OptionalNullable[ConversationRestartStreamRequestAgentVersion] = ( + UNSET + ) r"""Specific version of the agent to use when restarting. If not provided, uses the current version.""" @model_serializer(mode="wrap") diff --git a/src/mistralai/models/conversationstreamrequest.py b/src/mistralai/models/conversationstreamrequest.py index 6ff56e17..1a481b77 100644 --- a/src/mistralai/models/conversationstreamrequest.py +++ b/src/mistralai/models/conversationstreamrequest.py @@ -48,6 +48,16 @@ ] +ConversationStreamRequestAgentVersionTypedDict = TypeAliasType( + "ConversationStreamRequestAgentVersionTypedDict", Union[str, int] +) + + +ConversationStreamRequestAgentVersion = TypeAliasType( + "ConversationStreamRequestAgentVersion", Union[str, int] +) + + class ConversationStreamRequestTypedDict(TypedDict): inputs: ConversationInputsTypedDict stream: NotRequired[bool] @@ -61,7 +71,7 @@ class ConversationStreamRequestTypedDict(TypedDict): description: NotRequired[Nullable[str]] metadata: NotRequired[Nullable[Dict[str, Any]]] agent_id: NotRequired[Nullable[str]] - agent_version: NotRequired[Nullable[int]] + agent_version: NotRequired[Nullable[ConversationStreamRequestAgentVersionTypedDict]] model: NotRequired[Nullable[str]] @@ -91,7 +101,7 @@ class ConversationStreamRequest(BaseModel): agent_id: OptionalNullable[str] = UNSET - agent_version: OptionalNullable[int] = UNSET + agent_version: OptionalNullable[ConversationStreamRequestAgentVersion] = UNSET model: OptionalNullable[str] = UNSET diff --git a/src/mistralai/models/files_api_routes_list_filesop.py b/src/mistralai/models/files_api_routes_list_filesop.py index 9b9422b4..84d61b9b 100644 --- a/src/mistralai/models/files_api_routes_list_filesop.py +++ b/src/mistralai/models/files_api_routes_list_filesop.py @@ -19,6 +19,7 @@ class FilesAPIRoutesListFilesRequestTypedDict(TypedDict): source: NotRequired[Nullable[List[Source]]] search: NotRequired[Nullable[str]] purpose: NotRequired[Nullable[FilePurpose]] + mimetypes: NotRequired[Nullable[List[str]]] class FilesAPIRoutesListFilesRequest(BaseModel): @@ -57,6 +58,11 @@ class FilesAPIRoutesListFilesRequest(BaseModel): FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), ] = UNSET + mimetypes: Annotated[ + OptionalNullable[List[str]], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = UNSET + @model_serializer(mode="wrap") def serialize_model(self, handler): optional_fields = [ @@ -67,8 +73,9 @@ def serialize_model(self, handler): "source", "search", "purpose", + "mimetypes", ] - nullable_fields = ["sample_type", "source", "search", "purpose"] + nullable_fields = ["sample_type", "source", "search", "purpose", "mimetypes"] null_default_fields = [] serialized = handler(self) diff --git a/src/mistralai/models/mistralpromptmode.py b/src/mistralai/models/mistralpromptmode.py index ee82fb6d..dfb6f2d2 100644 --- a/src/mistralai/models/mistralpromptmode.py +++ b/src/mistralai/models/mistralpromptmode.py @@ -6,3 +6,7 @@ MistralPromptMode = Union[Literal["reasoning",], UnrecognizedStr] +r"""Available options to the prompt_mode argument on the chat completion endpoint. +Values represent high-level intent. Assignment to actual SPs is handled internally. +System prompt may include knowledge cutoff date, model capabilities, tone to use, safety guidelines, etc. +""" diff --git a/src/mistralai/models/modelcapabilities.py b/src/mistralai/models/modelcapabilities.py index 4b5d5da7..6edf8e5b 100644 --- a/src/mistralai/models/modelcapabilities.py +++ b/src/mistralai/models/modelcapabilities.py @@ -16,6 +16,7 @@ class ModelCapabilitiesTypedDict(TypedDict): classification: NotRequired[bool] moderation: NotRequired[bool] audio: NotRequired[bool] + audio_transcription: NotRequired[bool] class ModelCapabilities(BaseModel): @@ -36,3 +37,5 @@ class ModelCapabilities(BaseModel): moderation: Optional[bool] = False audio: Optional[bool] = False + + audio_transcription: Optional[bool] = False diff --git a/src/mistralai/models/realtimetranscriptionerror.py b/src/mistralai/models/realtimetranscriptionerror.py new file mode 100644 index 00000000..0785f700 --- /dev/null +++ b/src/mistralai/models/realtimetranscriptionerror.py @@ -0,0 +1,27 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .realtimetranscriptionerrordetail import ( + RealtimeTranscriptionErrorDetail, + RealtimeTranscriptionErrorDetailTypedDict, +) +from mistralai.types import BaseModel +from mistralai.utils import validate_const +import pydantic +from pydantic.functional_validators import AfterValidator +from typing import Literal, Optional +from typing_extensions import Annotated, TypedDict + + +class RealtimeTranscriptionErrorTypedDict(TypedDict): + error: RealtimeTranscriptionErrorDetailTypedDict + type: Literal["error"] + + +class RealtimeTranscriptionError(BaseModel): + error: RealtimeTranscriptionErrorDetail + + TYPE: Annotated[ + Annotated[Optional[Literal["error"]], AfterValidator(validate_const("error"))], + pydantic.Field(alias="type"), + ] = "error" diff --git a/src/mistralai/models/realtimetranscriptionerrordetail.py b/src/mistralai/models/realtimetranscriptionerrordetail.py new file mode 100644 index 00000000..cb5d73f8 --- /dev/null +++ b/src/mistralai/models/realtimetranscriptionerrordetail.py @@ -0,0 +1,29 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.types import BaseModel +from typing import Any, Dict, Union +from typing_extensions import TypeAliasType, TypedDict + + +MessageTypedDict = TypeAliasType("MessageTypedDict", Union[str, Dict[str, Any]]) +r"""Human-readable error message.""" + + +Message = TypeAliasType("Message", Union[str, Dict[str, Any]]) +r"""Human-readable error message.""" + + +class RealtimeTranscriptionErrorDetailTypedDict(TypedDict): + message: MessageTypedDict + r"""Human-readable error message.""" + code: int + r"""Internal error code for debugging.""" + + +class RealtimeTranscriptionErrorDetail(BaseModel): + message: Message + r"""Human-readable error message.""" + + code: int + r"""Internal error code for debugging.""" diff --git a/src/mistralai/models/realtimetranscriptionsession.py b/src/mistralai/models/realtimetranscriptionsession.py new file mode 100644 index 00000000..bcd0cfe3 --- /dev/null +++ b/src/mistralai/models/realtimetranscriptionsession.py @@ -0,0 +1,20 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .audioformat import AudioFormat, AudioFormatTypedDict +from mistralai.types import BaseModel +from typing_extensions import TypedDict + + +class RealtimeTranscriptionSessionTypedDict(TypedDict): + request_id: str + model: str + audio_format: AudioFormatTypedDict + + +class RealtimeTranscriptionSession(BaseModel): + request_id: str + + model: str + + audio_format: AudioFormat diff --git a/src/mistralai/models/realtimetranscriptionsessioncreated.py b/src/mistralai/models/realtimetranscriptionsessioncreated.py new file mode 100644 index 00000000..9a2c2860 --- /dev/null +++ b/src/mistralai/models/realtimetranscriptionsessioncreated.py @@ -0,0 +1,30 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .realtimetranscriptionsession import ( + RealtimeTranscriptionSession, + RealtimeTranscriptionSessionTypedDict, +) +from mistralai.types import BaseModel +from mistralai.utils import validate_const +import pydantic +from pydantic.functional_validators import AfterValidator +from typing import Literal, Optional +from typing_extensions import Annotated, TypedDict + + +class RealtimeTranscriptionSessionCreatedTypedDict(TypedDict): + session: RealtimeTranscriptionSessionTypedDict + type: Literal["session.created"] + + +class RealtimeTranscriptionSessionCreated(BaseModel): + session: RealtimeTranscriptionSession + + TYPE: Annotated[ + Annotated[ + Optional[Literal["session.created"]], + AfterValidator(validate_const("session.created")), + ], + pydantic.Field(alias="type"), + ] = "session.created" diff --git a/src/mistralai/models/realtimetranscriptionsessionupdated.py b/src/mistralai/models/realtimetranscriptionsessionupdated.py new file mode 100644 index 00000000..ad1b5133 --- /dev/null +++ b/src/mistralai/models/realtimetranscriptionsessionupdated.py @@ -0,0 +1,30 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .realtimetranscriptionsession import ( + RealtimeTranscriptionSession, + RealtimeTranscriptionSessionTypedDict, +) +from mistralai.types import BaseModel +from mistralai.utils import validate_const +import pydantic +from pydantic.functional_validators import AfterValidator +from typing import Literal, Optional +from typing_extensions import Annotated, TypedDict + + +class RealtimeTranscriptionSessionUpdatedTypedDict(TypedDict): + session: RealtimeTranscriptionSessionTypedDict + type: Literal["session.updated"] + + +class RealtimeTranscriptionSessionUpdated(BaseModel): + session: RealtimeTranscriptionSession + + TYPE: Annotated[ + Annotated[ + Optional[Literal["session.updated"]], + AfterValidator(validate_const("session.updated")), + ], + pydantic.Field(alias="type"), + ] = "session.updated" diff --git a/uv.lock b/uv.lock index dc8f42ea..efffa7ad 100644 --- a/uv.lock +++ b/uv.lock @@ -563,7 +563,7 @@ wheels = [ [[package]] name = "mistralai" -version = "1.11.0" +version = "1.11.1" source = { editable = "." } dependencies = [ { name = "eval-type-backport" }, From caf71b23f165f202a81afa11fdbc9d51a1f34ea5 Mon Sep 17 00:00:00 2001 From: jean-malo Date: Sun, 1 Feb 2026 22:44:58 +0100 Subject: [PATCH 183/223] feat(realtime): add realtime audio transcription support This commit adds support for realtime audio transcription using WebSocket connections. The implementation includes: 1. New realtime transcription client in the extra module 2. Examples for microphone and file-based transcription 3. Support for audio format negotiation 4. Proper error handling and connection management The realtime transcription feature requires the websockets package (>=13.0) which is now added as an optional dependency. This implementation allows for streaming audio data to the Mistral API and receiving transcription results in realtime. The changes include new models for realtime events and connection management, as well as updated audio.py to expose the realtime functionality. --- ...async_realtime_transcription_microphone.py | 225 +++++++++++++++ .../async_realtime_transcription_stream.py | 144 ++++++++++ examples/mistral/audio/chat_base64.py | 19 +- examples/mistral/audio/chat_no_streaming.py | 19 +- examples/mistral/audio/chat_streaming.py | 24 +- .../audio/transcription_diarize_async.py | 28 ++ .../async_batch_job_chat_completion_inline.py | 1 - pyproject.toml | 3 + scripts/run_examples.sh | 3 + src/mistralai/audio.py | 20 ++ src/mistralai/extra/__init__.py | 48 ++++ src/mistralai/extra/exceptions.py | 53 +++- src/mistralai/extra/realtime/__init__.py | 25 ++ src/mistralai/extra/realtime/connection.py | 207 +++++++++++++ src/mistralai/extra/realtime/transcription.py | 271 ++++++++++++++++++ uv.lock | 74 ++++- 16 files changed, 1133 insertions(+), 31 deletions(-) create mode 100644 examples/mistral/audio/async_realtime_transcription_microphone.py create mode 100644 examples/mistral/audio/async_realtime_transcription_stream.py create mode 100644 examples/mistral/audio/transcription_diarize_async.py create mode 100644 src/mistralai/extra/realtime/__init__.py create mode 100644 src/mistralai/extra/realtime/connection.py create mode 100644 src/mistralai/extra/realtime/transcription.py diff --git a/examples/mistral/audio/async_realtime_transcription_microphone.py b/examples/mistral/audio/async_realtime_transcription_microphone.py new file mode 100644 index 00000000..748dbcaf --- /dev/null +++ b/examples/mistral/audio/async_realtime_transcription_microphone.py @@ -0,0 +1,225 @@ +#!/usr/bin/env python +# /// script +# requires-python = ">=3.9" +# dependencies = [ +# "mistralai[realtime]", +# "pyaudio", +# "rich", +# ] +# [tool.uv.sources] +# mistralai = { path = "../../..", editable = true } +# /// + +import argparse +import asyncio +import os +import sys +from typing import AsyncIterator + +from rich.align import Align +from rich.console import Console +from rich.layout import Layout +from rich.live import Live +from rich.panel import Panel +from rich.text import Text + +from mistralai import Mistral +from mistralai.extra.realtime import UnknownRealtimeEvent +from mistralai.models import ( + AudioFormat, + RealtimeTranscriptionError, + RealtimeTranscriptionSessionCreated, + TranscriptionStreamDone, + TranscriptionStreamTextDelta, +) + +console = Console() + + +class TranscriptDisplay: + """Manages the live transcript display.""" + + def __init__(self, model: str) -> None: + self.model = model + self.transcript = "" + self.status = "🔌 Connecting..." + self.error: str | None = None + + def set_listening(self) -> None: + self.status = "🎤 Listening..." + + def add_text(self, text: str) -> None: + self.transcript += text + + def set_done(self) -> None: + self.status = "✅ Done" + + def set_error(self, error: str) -> None: + self.status = "❌ Error" + self.error = error + + def render(self) -> Layout: + layout = Layout() + + # Create minimal header + header_text = Text() + header_text.append("│ ", style="dim") + header_text.append(self.model, style="dim") + header_text.append(" │ ", style="dim") + + if "Listening" in self.status: + status_style = "green" + elif "Connecting" in self.status: + status_style = "yellow dim" + elif "Done" in self.status or "Stopped" in self.status: + status_style = "dim" + else: + status_style = "red" + header_text.append(self.status, style=status_style) + + header = Align.left(header_text, vertical="middle", pad=False) + + # Create main transcript area - no title, minimal border + transcript_text = Text( + self.transcript or "...", style="white" if self.transcript else "dim" + ) + transcript = Panel( + Align.left(transcript_text, vertical="top"), + border_style="dim", + padding=(1, 2), + ) + + # Minimal footer + footer_text = Text() + footer_text.append("ctrl+c", style="dim") + footer_text.append(" quit", style="dim italic") + footer = Align.left(footer_text, vertical="middle", pad=False) + + # Handle error display + if self.error: + layout.split_column( + Layout(header, name="header", size=1), + Layout(transcript, name="body"), + Layout( + Panel(Text(self.error, style="red"), border_style="red"), + name="error", + size=4, + ), + Layout(footer, name="footer", size=1), + ) + else: + layout.split_column( + Layout(header, name="header", size=1), + Layout(transcript, name="body"), + Layout(footer, name="footer", size=1), + ) + + return layout + + +async def iter_microphone( + *, + sample_rate: int, + chunk_duration_ms: int, +) -> AsyncIterator[bytes]: + """ + Yield microphone PCM chunks using PyAudio (16-bit mono). + Encoding is always pcm_s16le. + """ + import pyaudio + + p = pyaudio.PyAudio() + chunk_samples = int(sample_rate * chunk_duration_ms / 1000) + + stream = p.open( + format=pyaudio.paInt16, + channels=1, + rate=sample_rate, + input=True, + frames_per_buffer=chunk_samples, + ) + + loop = asyncio.get_running_loop() + try: + while True: + # stream.read is blocking; run it off-thread + data = await loop.run_in_executor(None, stream.read, chunk_samples, False) + yield data + finally: + stream.stop_stream() + stream.close() + p.terminate() + + +def parse_args() -> argparse.Namespace: + parser = argparse.ArgumentParser(description="Real-time microphone transcription.") + parser.add_argument("--model", default="voxtral-mini-transcribe-realtime-2602", help="Model ID") + parser.add_argument( + "--sample-rate", + type=int, + default=16000, + choices=[8000, 16000, 22050, 44100, 48000], + help="Sample rate in Hz", + ) + parser.add_argument( + "--chunk-duration", type=int, default=10, help="Chunk duration in ms" + ) + parser.add_argument( + "--api-key", default=os.environ.get("MISTRAL_API_KEY"), help="Mistral API key" + ) + parser.add_argument( + "--base-url", + default=os.environ.get("MISTRAL_BASE_URL", "wss://api.mistral.ai"), + ) + return parser.parse_args() + + +async def main() -> int: + args = parse_args() + api_key = args.api_key or os.environ["MISTRAL_API_KEY"] + + client = Mistral(api_key=api_key, server_url=args.base_url) + + # microphone is always pcm_s16le here + audio_format = AudioFormat(encoding="pcm_s16le", sample_rate=args.sample_rate) + + mic_stream = iter_microphone( + sample_rate=args.sample_rate, chunk_duration_ms=args.chunk_duration + ) + + display = TranscriptDisplay(model=args.model) + + with Live( + display.render(), console=console, refresh_per_second=10, screen=True + ) as live: + try: + async for event in client.audio.realtime.transcribe_stream( + audio_stream=mic_stream, + model=args.model, + audio_format=audio_format, + ): + if isinstance(event, RealtimeTranscriptionSessionCreated): + display.set_listening() + live.update(display.render()) + elif isinstance(event, TranscriptionStreamTextDelta): + display.add_text(event.text) + live.update(display.render()) + elif isinstance(event, TranscriptionStreamDone): + display.set_done() + live.update(display.render()) + break + elif isinstance(event, RealtimeTranscriptionError): + display.set_error(str(event.error)) + live.update(display.render()) + return 1 + elif isinstance(event, UnknownRealtimeEvent): + continue + except KeyboardInterrupt: + display.status = "⏹️ Stopped" + live.update(display.render()) + + return 0 + + +if __name__ == "__main__": + sys.exit(asyncio.run(main())) diff --git a/examples/mistral/audio/async_realtime_transcription_stream.py b/examples/mistral/audio/async_realtime_transcription_stream.py new file mode 100644 index 00000000..6dbcd103 --- /dev/null +++ b/examples/mistral/audio/async_realtime_transcription_stream.py @@ -0,0 +1,144 @@ +#!/usr/bin/env python + +import argparse +import asyncio +import os +import subprocess +import sys +import tempfile +from pathlib import Path +from typing import AsyncIterator + +from mistralai import Mistral +from mistralai.extra.realtime.connection import UnknownRealtimeEvent +from mistralai.models import ( + AudioFormat, + RealtimeTranscriptionError, + TranscriptionStreamDone, + TranscriptionStreamTextDelta, +) + + +def convert_audio_to_pcm( + input_path: Path, +) -> Path: + temp_file = tempfile.NamedTemporaryFile(suffix=".pcm", delete=False) + temp_path = Path(temp_file.name) + temp_file.close() + + cmd = [ + "ffmpeg", + "-y", + "-i", + str(input_path), + "-f", + "s16le", + "-ar", + str(16000), + "-ac", + "1", + str(temp_path), + ] + + try: + subprocess.run(cmd, check=True, capture_output=True, text=True) + except subprocess.CalledProcessError as exc: + temp_path.unlink(missing_ok=True) + raise RuntimeError(f"ffmpeg conversion failed: {exc.stderr}") from exc + + return temp_path + + +async def aiter_audio_file( + path: Path, + *, + chunk_size: int = 4096, + chunk_delay: float = 0.0, +) -> AsyncIterator[bytes]: + with open(path, "rb") as f: + while True: + chunk = f.read(chunk_size) + if not chunk: + break + yield chunk + if chunk_delay > 0: + await asyncio.sleep(chunk_delay) + + +def parse_args() -> argparse.Namespace: + parser = argparse.ArgumentParser( + description="Real-time audio transcription via WebSocket (iterator-based)." + ) + parser.add_argument("file", type=Path, help="Path to the audio file") + parser.add_argument("--model", default="voxtral-mini-2601", help="Model ID") + parser.add_argument( + "--api-key", + default=os.environ.get("MISTRAL_API_KEY"), + help="Mistral API key", + ) + parser.add_argument( + "--base-url", + default=os.environ.get("MISTRAL_BASE_URL", "https://api.mistral.ai"), + help="API base URL (http/https/ws/wss)", + ) + parser.add_argument( + "--chunk-size", type=int, default=4096, help="Audio chunk size in bytes" + ) + parser.add_argument( + "--chunk-delay", + type=float, + default=0.01, + help="Delay between chunks in seconds", + ) + parser.add_argument( + "--no-convert", + action="store_true", + help="Skip ffmpeg conversion (input must be raw PCM)", + ) + return parser.parse_args() + + +async def main() -> int: + args = parse_args() + api_key = args.api_key or os.environ["MISTRAL_API_KEY"] + + pcm_path = args.file + temp_path = None + + if not args.no_convert and args.file.suffix.lower() not in (".pcm", ".raw"): + pcm_path = convert_audio_to_pcm(args.file) + temp_path = pcm_path + + client = Mistral(api_key=api_key, server_url=args.base_url) + + try: + async for event in client.audio.realtime.transcribe_stream( + audio_stream=aiter_audio_file( + pcm_path, + chunk_size=args.chunk_size, + chunk_delay=args.chunk_delay, + ), + model=args.model, + audio_format=AudioFormat(encoding="pcm_s16le", sample_rate=16000), + ): + if isinstance(event, TranscriptionStreamTextDelta): + print(event.text, end="", flush=True) + elif isinstance(event, TranscriptionStreamDone): + print() + break + elif isinstance(event, RealtimeTranscriptionError): + print(f"\nError: {event.error}", file=sys.stderr) + break + elif isinstance(event, UnknownRealtimeEvent): + # ignore future / unknown events; keep going + continue + + finally: + if temp_path is not None: + temp_path.unlink(missing_ok=True) + + return 0 + + +if __name__ == "__main__": + sys.exit(asyncio.run(main())) diff --git a/examples/mistral/audio/chat_base64.py b/examples/mistral/audio/chat_base64.py index ea5ea79a..8468fbfb 100755 --- a/examples/mistral/audio/chat_base64.py +++ b/examples/mistral/audio/chat_base64.py @@ -6,7 +6,6 @@ from mistralai.models import UserMessage - def main(): api_key = os.environ["MISTRAL_API_KEY"] model = "voxtral-small-latest" @@ -16,13 +15,17 @@ def main(): content = f.read() chat_response = client.chat.complete( model=model, - messages=[UserMessage(content=[ - {"type": "text", "text": "What's in this audio file?"}, - { - "type": "input_audio", - "input_audio": base64.b64encode(content).decode('utf-8'), - }, - ])], + messages=[ + UserMessage( + content=[ + {"type": "text", "text": "What's in this audio file?"}, + { + "type": "input_audio", + "input_audio": base64.b64encode(content).decode("utf-8"), + }, + ] + ) + ], ) print(chat_response.choices[0].message.content) diff --git a/examples/mistral/audio/chat_no_streaming.py b/examples/mistral/audio/chat_no_streaming.py index 2caebb25..f10240bd 100755 --- a/examples/mistral/audio/chat_no_streaming.py +++ b/examples/mistral/audio/chat_no_streaming.py @@ -6,7 +6,6 @@ from mistralai.models import UserMessage - def main(): api_key = os.environ["MISTRAL_API_KEY"] model = "voxtral-small-latest" @@ -15,13 +14,17 @@ def main(): chat_response = client.chat.complete( model=model, - messages=[UserMessage(content=[ - {"type": "text", "text": "What is this audio about?"}, - { - "type": "input_audio", - "input_audio": "https://docs.mistral.ai/audio/bcn_weather.mp3", - }, - ])], + messages=[ + UserMessage( + content=[ + {"type": "text", "text": "What is this audio about?"}, + { + "type": "input_audio", + "input_audio": "https://docs.mistral.ai/audio/bcn_weather.mp3", + }, + ] + ) + ], ) print(chat_response.choices[0].message.content) diff --git a/examples/mistral/audio/chat_streaming.py b/examples/mistral/audio/chat_streaming.py index 060bfdd9..f9c913a0 100755 --- a/examples/mistral/audio/chat_streaming.py +++ b/examples/mistral/audio/chat_streaming.py @@ -6,26 +6,31 @@ from mistralai.models import UserMessage - def main(): api_key = os.environ["MISTRAL_API_KEY"] model = "voxtral-small-latest" client = Mistral(api_key=api_key) with open("examples/fixtures/bcn_weather.mp3", "rb") as f: - file = client.files.upload(file=File(content=f, file_name=f.name), purpose="audio") + file = client.files.upload( + file=File(content=f, file_name=f.name), purpose="audio" + ) print(f"Uploaded audio file, id={file.id}") signed_url = client.files.get_signed_url(file_id=file.id) try: chat_response = client.chat.stream( model=model, - messages=[UserMessage(content=[ - {"type": "text", "text": "What is this audio about?"}, - { - "type": "input_audio", - "input_audio": signed_url.url, - }, - ])], + messages=[ + UserMessage( + content=[ + {"type": "text", "text": "What is this audio about?"}, + { + "type": "input_audio", + "input_audio": signed_url.url, + }, + ] + ) + ], ) for chunk in chat_response: print(chunk.data.choices[0].delta.content) @@ -33,5 +38,6 @@ def main(): client.files.delete(file_id=file.id) print(f"Deleted audio file, id={file.id}") + if __name__ == "__main__": main() diff --git a/examples/mistral/audio/transcription_diarize_async.py b/examples/mistral/audio/transcription_diarize_async.py new file mode 100644 index 00000000..ef5323f4 --- /dev/null +++ b/examples/mistral/audio/transcription_diarize_async.py @@ -0,0 +1,28 @@ +#!/usr/bin/env python + +import os +import asyncio +from mistralai import Mistral, File + + +async def main(): + api_key = os.environ["MISTRAL_API_KEY"] + model = "voxtral-mini-2602" + + client = Mistral(api_key=api_key) + with open("examples/fixtures/bcn_weather.mp3", "rb") as f: + response = await client.audio.transcriptions.complete_async( + model=model, + file=File(content=f, file_name=f.name), + diarize=True, + timestamp_granularities=["segment"], + ) + for segment in response.segments: + speaker = segment.speaker_id or "unknown" + print( + f"[{segment.start:.1f}s → {segment.end:.1f}s] {speaker}: {segment.text.strip()}" + ) + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/examples/mistral/jobs/async_batch_job_chat_completion_inline.py b/examples/mistral/jobs/async_batch_job_chat_completion_inline.py index 94a01c6f..e728b8fa 100644 --- a/examples/mistral/jobs/async_batch_job_chat_completion_inline.py +++ b/examples/mistral/jobs/async_batch_job_chat_completion_inline.py @@ -37,4 +37,3 @@ async def main(): if __name__ == "__main__": asyncio.run(main()) - diff --git a/pyproject.toml b/pyproject.toml index 680ae19b..dbb5d44a 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -29,6 +29,9 @@ agents = [ "griffe >=1.7.3,<2.0", "authlib >=1.5.2,<2.0", ] +realtime = [ + "websockets >=13.0", +] [project.urls] Repository = "https://github.com/mistralai/client-python.git" diff --git a/scripts/run_examples.sh b/scripts/run_examples.sh index 106c10b2..5191033a 100755 --- a/scripts/run_examples.sh +++ b/scripts/run_examples.sh @@ -40,6 +40,9 @@ exclude_files=( "examples/mistral/agents/async_conversation_run_stream.py" "examples/mistral/agents/async_conversation_run_mcp.py" "examples/mistral/agents/async_conversation_run_mcp_remote.py" + "examples/mistral/audio/async_realtime_transcription_microphone.py" + "examples/mistral/audio/async_realtime_transcription_stream.py" + "examples/mistral/audio/transcription_diarize_async.py" ) # Check if the no-extra-dep flag is set diff --git a/src/mistralai/audio.py b/src/mistralai/audio.py index 5687abdb..54430d49 100644 --- a/src/mistralai/audio.py +++ b/src/mistralai/audio.py @@ -1,5 +1,12 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# region imports +from typing import TYPE_CHECKING + +if TYPE_CHECKING: + from mistralai.extra.realtime import RealtimeTranscription +# endregion imports + from .basesdk import BaseSDK from .sdkconfiguration import SDKConfiguration from mistralai.transcriptions import Transcriptions @@ -21,3 +28,16 @@ def _init_sdks(self): self.transcriptions = Transcriptions( self.sdk_configuration, parent_ref=self.parent_ref ) + + # region sdk-class-body + @property + def realtime(self) -> "RealtimeTranscription": + """Returns a client for real-time audio transcription via WebSocket.""" + if not hasattr(self, "_realtime"): + from mistralai.extra.realtime import RealtimeTranscription + + self._realtime = RealtimeTranscription(self.sdk_configuration) + + return self._realtime + + # endregion sdk-class-body diff --git a/src/mistralai/extra/__init__.py b/src/mistralai/extra/__init__.py index d9a81d24..cabda728 100644 --- a/src/mistralai/extra/__init__.py +++ b/src/mistralai/extra/__init__.py @@ -1,3 +1,5 @@ +from typing import TYPE_CHECKING + from .struct_chat import ( ParsedChatCompletionResponse, convert_to_parsed_chat_completion_response, @@ -5,9 +7,55 @@ from .utils import response_format_from_pydantic_model from .utils.response_format import CustomPydanticModel +if TYPE_CHECKING: + from .realtime import ( + AudioEncoding, + AudioFormat, + RealtimeConnection, + RealtimeTranscriptionError, + RealtimeTranscriptionErrorDetail, + RealtimeTranscriptionSession, + RealtimeTranscriptionSessionCreated, + RealtimeTranscriptionSessionUpdated, + RealtimeTranscription, + UnknownRealtimeEvent, + ) + +_REALTIME_EXPORTS = { + "RealtimeTranscription", + "RealtimeConnection", + "AudioEncoding", + "AudioFormat", + "UnknownRealtimeEvent", + "RealtimeTranscriptionError", + "RealtimeTranscriptionErrorDetail", + "RealtimeTranscriptionSession", + "RealtimeTranscriptionSessionCreated", + "RealtimeTranscriptionSessionUpdated", +} + + +def __getattr__(name: str): + if name in _REALTIME_EXPORTS: + from . import realtime + + return getattr(realtime, name) + raise AttributeError(f"module {__name__!r} has no attribute {name!r}") + + __all__ = [ "convert_to_parsed_chat_completion_response", "response_format_from_pydantic_model", "CustomPydanticModel", "ParsedChatCompletionResponse", + "RealtimeTranscription", + "RealtimeConnection", + "AudioEncoding", + "AudioFormat", + "UnknownRealtimeEvent", + "RealtimeTranscriptionError", + "RealtimeTranscriptionErrorDetail", + "RealtimeTranscriptionSession", + "RealtimeTranscriptionSessionCreated", + "RealtimeTranscriptionSessionUpdated", ] diff --git a/src/mistralai/extra/exceptions.py b/src/mistralai/extra/exceptions.py index 7853ddc2..ee107698 100644 --- a/src/mistralai/extra/exceptions.py +++ b/src/mistralai/extra/exceptions.py @@ -1,14 +1,59 @@ +from typing import Optional, TYPE_CHECKING + +if TYPE_CHECKING: + from mistralai.models import RealtimeTranscriptionError + + class MistralClientException(Exception): - """Base exception for all the client errors.""" + """Base exception for client errors.""" class RunException(MistralClientException): - """Exception raised for errors during a conversation run.""" + """Conversation run errors.""" class MCPException(MistralClientException): - """Exception raised for errors related to MCP operations.""" + """MCP operation errors.""" class MCPAuthException(MCPException): - """Exception raised for authentication errors with an MCP server.""" + """MCP authentication errors.""" + + +class RealtimeTranscriptionException(MistralClientException): + """Base realtime transcription exception.""" + + def __init__( + self, + message: str, + *, + code: Optional[int] = None, + payload: Optional[object] = None, + ) -> None: + super().__init__(message) + self.code = code + self.payload = payload + + +class RealtimeTranscriptionWSError(RealtimeTranscriptionException): + def __init__( + self, + message: str, + *, + payload: Optional["RealtimeTranscriptionError"] = None, + raw: Optional[object] = None, + ) -> None: + code: Optional[int] = None + if payload is not None: + try: + maybe_code = getattr(payload.error, "code", None) + if isinstance(maybe_code, int): + code = maybe_code + except Exception: + code = None + + super().__init__( + message, code=code, payload=payload if payload is not None else raw + ) + self.payload_typed = payload + self.payload_raw = raw diff --git a/src/mistralai/extra/realtime/__init__.py b/src/mistralai/extra/realtime/__init__.py new file mode 100644 index 00000000..85bf1d88 --- /dev/null +++ b/src/mistralai/extra/realtime/__init__.py @@ -0,0 +1,25 @@ +from mistralai.models import ( + AudioEncoding, + AudioFormat, + RealtimeTranscriptionError, + RealtimeTranscriptionErrorDetail, + RealtimeTranscriptionSession, + RealtimeTranscriptionSessionCreated, + RealtimeTranscriptionSessionUpdated, +) + +from .connection import UnknownRealtimeEvent, RealtimeConnection +from .transcription import RealtimeTranscription + +__all__ = [ + "AudioEncoding", + "AudioFormat", + "RealtimeTranscriptionError", + "RealtimeTranscriptionErrorDetail", + "RealtimeTranscriptionSession", + "RealtimeTranscriptionSessionCreated", + "RealtimeTranscriptionSessionUpdated", + "RealtimeConnection", + "RealtimeTranscription", + "UnknownRealtimeEvent", +] diff --git a/src/mistralai/extra/realtime/connection.py b/src/mistralai/extra/realtime/connection.py new file mode 100644 index 00000000..042854ab --- /dev/null +++ b/src/mistralai/extra/realtime/connection.py @@ -0,0 +1,207 @@ +from __future__ import annotations + +import base64 +import json +from asyncio import CancelledError +from collections import deque +from typing import Any, AsyncIterator, Deque, Optional, Union + +from pydantic import ValidationError, BaseModel + +try: + from websockets.asyncio.client import ClientConnection # websockets >= 13.0 +except ImportError as exc: + raise ImportError( + "The `websockets` package (>=13.0) is required for real-time transcription. " + "Install with: pip install 'mistralai[realtime]'" + ) from exc + +from mistralai.models import ( + AudioFormat, + RealtimeTranscriptionError, + RealtimeTranscriptionSession, + RealtimeTranscriptionSessionCreated, + RealtimeTranscriptionSessionUpdated, + TranscriptionStreamDone, + TranscriptionStreamLanguage, + TranscriptionStreamSegmentDelta, + TranscriptionStreamTextDelta, +) + + +class UnknownRealtimeEvent(BaseModel): + """ + Forward-compat fallback event: + - unknown message type + - invalid JSON payload + - schema validation failure + """ + type: Optional[str] + content: Any + error: Optional[str] = None + + +RealtimeEvent = Union[ + # session lifecycle + RealtimeTranscriptionSessionCreated, + RealtimeTranscriptionSessionUpdated, + # server errors + RealtimeTranscriptionError, + # transcription events + TranscriptionStreamLanguage, + TranscriptionStreamSegmentDelta, + TranscriptionStreamTextDelta, + TranscriptionStreamDone, + # forward-compat fallback + UnknownRealtimeEvent, +] + + +_MESSAGE_MODELS: dict[str, Any] = { + "session.created": RealtimeTranscriptionSessionCreated, + "session.updated": RealtimeTranscriptionSessionUpdated, + "error": RealtimeTranscriptionError, + "transcription.language": TranscriptionStreamLanguage, + "transcription.segment": TranscriptionStreamSegmentDelta, + "transcription.text.delta": TranscriptionStreamTextDelta, + "transcription.done": TranscriptionStreamDone, +} + + +def parse_realtime_event(payload: Any) -> RealtimeEvent: + """ + Tolerant parser: + - unknown event type -> UnknownRealtimeEvent + - validation failures -> UnknownRealtimeEvent (includes error string) + - invalid payload -> UnknownRealtimeEvent + """ + if not isinstance(payload, dict): + return UnknownRealtimeEvent( + type=None, content=payload, error="expected JSON object" + ) + + msg_type = payload.get("type") + if not isinstance(msg_type, str): + return UnknownRealtimeEvent( + type=None, content=payload, error="missing/invalid 'type'" + ) + + model_cls = _MESSAGE_MODELS.get(msg_type) + if model_cls is None: + return UnknownRealtimeEvent( + type=msg_type, content=payload, error="unknown event type" + ) + try: + parsed = model_cls.model_validate(payload) + return parsed + except ValidationError as exc: + return UnknownRealtimeEvent(type=msg_type, content=payload, error=str(exc)) + + +class RealtimeConnection: + def __init__( + self, + websocket: ClientConnection, + session: RealtimeTranscriptionSession, + *, + initial_events: Optional[list[RealtimeEvent]] = None, + ) -> None: + self._websocket = websocket + self._session = session + self._audio_format = session.audio_format + self._closed = False + self._initial_events: Deque[RealtimeEvent] = deque(initial_events or []) + + @property + def request_id(self) -> str: + return self._session.request_id + + @property + def session(self) -> RealtimeTranscriptionSession: + return self._session + + @property + def audio_format(self) -> AudioFormat: + return self._audio_format + + @property + def is_closed(self) -> bool: + return self._closed + + async def send_audio( + self, audio_bytes: Union[bytes, bytearray, memoryview] + ) -> None: + if self._closed: + raise RuntimeError("Connection is closed") + + message = { + "type": "input_audio.append", + "audio": base64.b64encode(bytes(audio_bytes)).decode("ascii"), + } + await self._websocket.send(json.dumps(message)) + + async def update_session(self, audio_format: AudioFormat) -> None: + if self._closed: + raise RuntimeError("Connection is closed") + + self._audio_format = audio_format + message = { + "type": "session.update", + "session": {"audio_format": audio_format.model_dump(mode="json")}, + } + await self._websocket.send(json.dumps(message)) + + async def end_audio(self) -> None: + if self._closed: + return + await self._websocket.send(json.dumps({"type": "input_audio.end"})) + + async def close(self, *, code: int = 1000, reason: str = "") -> None: + if self._closed: + return + self._closed = True + await self._websocket.close(code=code, reason=reason) + + async def __aenter__(self) -> "RealtimeConnection": + return self + + async def __aexit__(self, exc_type, exc, tb) -> None: + await self.close() + + def __aiter__(self) -> AsyncIterator[RealtimeEvent]: + return self.events() + + async def events(self) -> AsyncIterator[RealtimeEvent]: + # replay any handshake/prelude events (including session.created) + while self._initial_events: + ev = self._initial_events.popleft() + self._apply_session_updates(ev) + yield ev + + try: + async for msg in self._websocket: + text = ( + msg.decode("utf-8", errors="replace") + if isinstance(msg, (bytes, bytearray)) + else msg + ) + try: + data = json.loads(text) + except Exception as exc: + yield UnknownRealtimeEvent( + type=None, content=text, error=f"invalid JSON: {exc}" + ) + continue + + ev = parse_realtime_event(data) + self._apply_session_updates(ev) + yield ev + except CancelledError: + pass + finally: + await self.close() + + def _apply_session_updates(self, ev: RealtimeEvent) -> None: + if isinstance(ev, RealtimeTranscriptionSessionCreated) or isinstance(ev, RealtimeTranscriptionSessionUpdated): + self._session = ev.session + self._audio_format = ev.session.audio_format diff --git a/src/mistralai/extra/realtime/transcription.py b/src/mistralai/extra/realtime/transcription.py new file mode 100644 index 00000000..de117645 --- /dev/null +++ b/src/mistralai/extra/realtime/transcription.py @@ -0,0 +1,271 @@ +from __future__ import annotations + +import asyncio +import json +import time +from typing import AsyncIterator, Mapping, Optional +from urllib.parse import parse_qsl, urlencode, urlparse, urlunparse + +try: + from websockets.asyncio.client import ( + ClientConnection, + connect, + ) # websockets >= 13.0 +except ImportError as exc: + raise ImportError( + "The `websockets` package (>=13.0) is required for real-time transcription. " + "Install with: pip install 'mistralai[realtime]'" + ) from exc + +from mistralai import models, utils +from mistralai.models import ( + AudioFormat, + RealtimeTranscriptionError, + RealtimeTranscriptionSession, + RealtimeTranscriptionSessionCreated, +) +from mistralai.sdkconfiguration import SDKConfiguration +from mistralai.utils import generate_url, get_security, get_security_from_env + +from ..exceptions import RealtimeTranscriptionException, RealtimeTranscriptionWSError +from .connection import ( + RealtimeConnection, + RealtimeEvent, + UnknownRealtimeEvent, + parse_realtime_event, +) + + +class RealtimeTranscription: + """Client for realtime transcription over WebSocket (websockets >= 13.0).""" + + def __init__(self, sdk_config: SDKConfiguration) -> None: + self._sdk_config = sdk_config + + def _build_url( + self, + model: str, + *, + server_url: Optional[str], + query_params: Mapping[str, str], + ) -> str: + if server_url is not None: + base_url = utils.remove_suffix(server_url, "/") + else: + base_url, _ = self._sdk_config.get_server_details() + + url = generate_url(base_url, "/v1/audio/transcriptions/realtime", None) + + parsed = urlparse(url) + merged = dict(parse_qsl(parsed.query, keep_blank_values=True)) + merged["model"] = model + merged.update(dict(query_params)) + + return urlunparse(parsed._replace(query=urlencode(merged))) + + async def connect( + self, + model: str, + audio_format: Optional[AudioFormat] = None, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> RealtimeConnection: + if timeout_ms is None: + timeout_ms = self._sdk_config.timeout_ms + + security = self._sdk_config.security + if security is not None and callable(security): + security = security() + + resolved_security = get_security_from_env(security, models.Security) + + headers: dict[str, str] = {} + query_params: dict[str, str] = {} + + if resolved_security is not None: + security_headers, security_query = get_security(resolved_security) + headers |= security_headers + for key, values in security_query.items(): + if values: + query_params[key] = values[-1] + + if http_headers is not None: + headers |= dict(http_headers) + + url = self._build_url(model, server_url=server_url, query_params=query_params) + + parsed = urlparse(url) + if parsed.scheme == "https": + parsed = parsed._replace(scheme="wss") + elif parsed.scheme == "http": + parsed = parsed._replace(scheme="ws") + ws_url = urlunparse(parsed) + open_timeout = None if timeout_ms is None else timeout_ms / 1000.0 + user_agent = self._sdk_config.user_agent + + websocket: Optional[ClientConnection] = None + try: + websocket = await connect( + ws_url, + additional_headers=dict(headers), + open_timeout=open_timeout, + user_agent_header=user_agent, + ) + + session, initial_events = await _recv_handshake( + websocket, timeout_ms=timeout_ms + ) + connection = RealtimeConnection( + websocket=websocket, + session=session, + initial_events=initial_events, + ) + + if audio_format is not None: + await connection.update_session(audio_format) + + return connection + + except RealtimeTranscriptionException: + if websocket is not None: + await websocket.close() + raise + except Exception as exc: + if websocket is not None: + await websocket.close() + raise RealtimeTranscriptionException(f"Failed to connect: {exc}") from exc + + async def transcribe_stream( + self, + audio_stream: AsyncIterator[bytes], + model: str, + audio_format: Optional[AudioFormat] = None, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> AsyncIterator[RealtimeEvent]: + """ + Flow + - opens connection + - streams audio in background + - yields events from the connection + """ + async with await self.connect( + model=model, + audio_format=audio_format, + server_url=server_url, + timeout_ms=timeout_ms, + http_headers=http_headers, + ) as connection: + + async def _send() -> None: + async for chunk in audio_stream: + if connection.is_closed: + break + await connection.send_audio(chunk) + await connection.end_audio() + + send_task = asyncio.create_task(_send()) + + try: + async for event in connection: + yield event + + # stop early (caller still sees the terminating event) + if isinstance(event, RealtimeTranscriptionError): + break + if getattr(event, "type", None) == "transcription.done": + break + finally: + send_task.cancel() + try: + await send_task + except asyncio.CancelledError: + pass + await connection.close() + + +def _extract_error_message(payload: dict) -> str: + err = payload.get("error") + if isinstance(err, dict): + msg = err.get("message") + if isinstance(msg, str): + return msg + if isinstance(msg, dict): + detail = msg.get("detail") + if isinstance(detail, str): + return detail + return "Realtime transcription error" + + +async def _recv_handshake( + websocket: ClientConnection, + *, + timeout_ms: Optional[int], +) -> tuple[RealtimeTranscriptionSession, list[RealtimeEvent]]: + """ + Read messages until session.created or error. + Replay all messages read during handshake as initial events (lossless). + """ + timeout_s = None if timeout_ms is None else timeout_ms / 1000.0 + deadline = None if timeout_s is None else (time.monotonic() + timeout_s) + + initial_events: list[RealtimeEvent] = [] + + def remaining() -> Optional[float]: + if deadline is None: + return None + return max(0.0, deadline - time.monotonic()) + + try: + while True: + raw = await asyncio.wait_for(websocket.recv(), timeout=remaining()) + text = ( + raw.decode("utf-8", errors="replace") + if isinstance(raw, (bytes, bytearray)) + else raw + ) + + try: + payload = json.loads(text) + except Exception as exc: + initial_events.append( + UnknownRealtimeEvent( + type=None, content=text, error=f"invalid JSON: {exc}" + ) + ) + continue + + msg_type = payload.get("type") if isinstance(payload, dict) else None + if msg_type == "error" and isinstance(payload, dict): + parsed = parse_realtime_event(payload) + initial_events.append(parsed) + if isinstance(parsed, RealtimeTranscriptionError): + raise RealtimeTranscriptionWSError( + _extract_error_message(payload), + payload=parsed, + raw=payload, + ) + raise RealtimeTranscriptionWSError( + _extract_error_message(payload), + payload=None, + raw=payload, + ) + + event = parse_realtime_event(payload) + initial_events.append(event) + + if isinstance(event, RealtimeTranscriptionSessionCreated): + return event.session, initial_events + + except asyncio.TimeoutError as exc: + raise RealtimeTranscriptionException( + "Timeout waiting for session creation." + ) from exc + except RealtimeTranscriptionException: + raise + except Exception as exc: + raise RealtimeTranscriptionException( + f"Unexpected websocket handshake failure: {exc}" + ) from exc diff --git a/uv.lock b/uv.lock index efffa7ad..85e04bd9 100644 --- a/uv.lock +++ b/uv.lock @@ -589,6 +589,9 @@ gcp = [ { name = "google-auth" }, { name = "requests" }, ] +realtime = [ + { name = "websockets" }, +] [package.dev-dependencies] dev = [ @@ -627,8 +630,9 @@ requires-dist = [ { name = "pyyaml", specifier = ">=6.0.2,<7.0.0" }, { name = "requests", marker = "extra == 'gcp'", specifier = ">=2.32.3" }, { name = "typing-inspection", specifier = ">=0.4.0" }, + { name = "websockets", marker = "extra == 'realtime'", specifier = ">=13.0" }, ] -provides-extras = ["gcp", "agents"] +provides-extras = ["gcp", "agents", "realtime"] [package.metadata.requires-dev] dev = [ @@ -1562,6 +1566,74 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/3d/d8/2083a1daa7439a66f3a48589a57d576aa117726762618f6bb09fe3798796/uvicorn-0.40.0-py3-none-any.whl", hash = "sha256:c6c8f55bc8bf13eb6fa9ff87ad62308bbbc33d0b67f84293151efe87e0d5f2ee", size = 68502, upload-time = "2025-12-21T14:16:21.041Z" }, ] +[[package]] +name = "websockets" +version = "16.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/04/24/4b2031d72e840ce4c1ccb255f693b15c334757fc50023e4db9537080b8c4/websockets-16.0.tar.gz", hash = "sha256:5f6261a5e56e8d5c42a4497b364ea24d94d9563e8fbd44e78ac40879c60179b5", size = 179346, upload-time = "2026-01-10T09:23:47.181Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/20/74/221f58decd852f4b59cc3354cccaf87e8ef695fede361d03dc9a7396573b/websockets-16.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:04cdd5d2d1dacbad0a7bf36ccbcd3ccd5a30ee188f2560b7a62a30d14107b31a", size = 177343, upload-time = "2026-01-10T09:22:21.28Z" }, + { url = "https://files.pythonhosted.org/packages/19/0f/22ef6107ee52ab7f0b710d55d36f5a5d3ef19e8a205541a6d7ffa7994e5a/websockets-16.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:8ff32bb86522a9e5e31439a58addbb0166f0204d64066fb955265c4e214160f0", size = 175021, upload-time = "2026-01-10T09:22:22.696Z" }, + { url = "https://files.pythonhosted.org/packages/10/40/904a4cb30d9b61c0e278899bf36342e9b0208eb3c470324a9ecbaac2a30f/websockets-16.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:583b7c42688636f930688d712885cf1531326ee05effd982028212ccc13e5957", size = 175320, upload-time = "2026-01-10T09:22:23.94Z" }, + { url = "https://files.pythonhosted.org/packages/9d/2f/4b3ca7e106bc608744b1cdae041e005e446124bebb037b18799c2d356864/websockets-16.0-cp310-cp310-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:7d837379b647c0c4c2355c2499723f82f1635fd2c26510e1f587d89bc2199e72", size = 183815, upload-time = "2026-01-10T09:22:25.469Z" }, + { url = "https://files.pythonhosted.org/packages/86/26/d40eaa2a46d4302becec8d15b0fc5e45bdde05191e7628405a19cf491ccd/websockets-16.0-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:df57afc692e517a85e65b72e165356ed1df12386ecb879ad5693be08fac65dde", size = 185054, upload-time = "2026-01-10T09:22:27.101Z" }, + { url = "https://files.pythonhosted.org/packages/b0/ba/6500a0efc94f7373ee8fefa8c271acdfd4dca8bd49a90d4be7ccabfc397e/websockets-16.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:2b9f1e0d69bc60a4a87349d50c09a037a2607918746f07de04df9e43252c77a3", size = 184565, upload-time = "2026-01-10T09:22:28.293Z" }, + { url = "https://files.pythonhosted.org/packages/04/b4/96bf2cee7c8d8102389374a2616200574f5f01128d1082f44102140344cc/websockets-16.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:335c23addf3d5e6a8633f9f8eda77efad001671e80b95c491dd0924587ece0b3", size = 183848, upload-time = "2026-01-10T09:22:30.394Z" }, + { url = "https://files.pythonhosted.org/packages/02/8e/81f40fb00fd125357814e8c3025738fc4ffc3da4b6b4a4472a82ba304b41/websockets-16.0-cp310-cp310-win32.whl", hash = "sha256:37b31c1623c6605e4c00d466c9d633f9b812ea430c11c8a278774a1fde1acfa9", size = 178249, upload-time = "2026-01-10T09:22:32.083Z" }, + { url = "https://files.pythonhosted.org/packages/b4/5f/7e40efe8df57db9b91c88a43690ac66f7b7aa73a11aa6a66b927e44f26fa/websockets-16.0-cp310-cp310-win_amd64.whl", hash = "sha256:8e1dab317b6e77424356e11e99a432b7cb2f3ec8c5ab4dabbcee6add48f72b35", size = 178685, upload-time = "2026-01-10T09:22:33.345Z" }, + { url = "https://files.pythonhosted.org/packages/f2/db/de907251b4ff46ae804ad0409809504153b3f30984daf82a1d84a9875830/websockets-16.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:31a52addea25187bde0797a97d6fc3d2f92b6f72a9370792d65a6e84615ac8a8", size = 177340, upload-time = "2026-01-10T09:22:34.539Z" }, + { url = "https://files.pythonhosted.org/packages/f3/fa/abe89019d8d8815c8781e90d697dec52523fb8ebe308bf11664e8de1877e/websockets-16.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:417b28978cdccab24f46400586d128366313e8a96312e4b9362a4af504f3bbad", size = 175022, upload-time = "2026-01-10T09:22:36.332Z" }, + { url = "https://files.pythonhosted.org/packages/58/5d/88ea17ed1ded2079358b40d31d48abe90a73c9e5819dbcde1606e991e2ad/websockets-16.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:af80d74d4edfa3cb9ed973a0a5ba2b2a549371f8a741e0800cb07becdd20f23d", size = 175319, upload-time = "2026-01-10T09:22:37.602Z" }, + { url = "https://files.pythonhosted.org/packages/d2/ae/0ee92b33087a33632f37a635e11e1d99d429d3d323329675a6022312aac2/websockets-16.0-cp311-cp311-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:08d7af67b64d29823fed316505a89b86705f2b7981c07848fb5e3ea3020c1abe", size = 184631, upload-time = "2026-01-10T09:22:38.789Z" }, + { url = "https://files.pythonhosted.org/packages/c8/c5/27178df583b6c5b31b29f526ba2da5e2f864ecc79c99dae630a85d68c304/websockets-16.0-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:7be95cfb0a4dae143eaed2bcba8ac23f4892d8971311f1b06f3c6b78952ee70b", size = 185870, upload-time = "2026-01-10T09:22:39.893Z" }, + { url = "https://files.pythonhosted.org/packages/87/05/536652aa84ddc1c018dbb7e2c4cbcd0db884580bf8e95aece7593fde526f/websockets-16.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:d6297ce39ce5c2e6feb13c1a996a2ded3b6832155fcfc920265c76f24c7cceb5", size = 185361, upload-time = "2026-01-10T09:22:41.016Z" }, + { url = "https://files.pythonhosted.org/packages/6d/e2/d5332c90da12b1e01f06fb1b85c50cfc489783076547415bf9f0a659ec19/websockets-16.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:1c1b30e4f497b0b354057f3467f56244c603a79c0d1dafce1d16c283c25f6e64", size = 184615, upload-time = "2026-01-10T09:22:42.442Z" }, + { url = "https://files.pythonhosted.org/packages/77/fb/d3f9576691cae9253b51555f841bc6600bf0a983a461c79500ace5a5b364/websockets-16.0-cp311-cp311-win32.whl", hash = "sha256:5f451484aeb5cafee1ccf789b1b66f535409d038c56966d6101740c1614b86c6", size = 178246, upload-time = "2026-01-10T09:22:43.654Z" }, + { url = "https://files.pythonhosted.org/packages/54/67/eaff76b3dbaf18dcddabc3b8c1dba50b483761cccff67793897945b37408/websockets-16.0-cp311-cp311-win_amd64.whl", hash = "sha256:8d7f0659570eefb578dacde98e24fb60af35350193e4f56e11190787bee77dac", size = 178684, upload-time = "2026-01-10T09:22:44.941Z" }, + { url = "https://files.pythonhosted.org/packages/84/7b/bac442e6b96c9d25092695578dda82403c77936104b5682307bd4deb1ad4/websockets-16.0-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:71c989cbf3254fbd5e84d3bff31e4da39c43f884e64f2551d14bb3c186230f00", size = 177365, upload-time = "2026-01-10T09:22:46.787Z" }, + { url = "https://files.pythonhosted.org/packages/b0/fe/136ccece61bd690d9c1f715baaeefd953bb2360134de73519d5df19d29ca/websockets-16.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:8b6e209ffee39ff1b6d0fa7bfef6de950c60dfb91b8fcead17da4ee539121a79", size = 175038, upload-time = "2026-01-10T09:22:47.999Z" }, + { url = "https://files.pythonhosted.org/packages/40/1e/9771421ac2286eaab95b8575b0cb701ae3663abf8b5e1f64f1fd90d0a673/websockets-16.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:86890e837d61574c92a97496d590968b23c2ef0aeb8a9bc9421d174cd378ae39", size = 175328, upload-time = "2026-01-10T09:22:49.809Z" }, + { url = "https://files.pythonhosted.org/packages/18/29/71729b4671f21e1eaa5d6573031ab810ad2936c8175f03f97f3ff164c802/websockets-16.0-cp312-cp312-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:9b5aca38b67492ef518a8ab76851862488a478602229112c4b0d58d63a7a4d5c", size = 184915, upload-time = "2026-01-10T09:22:51.071Z" }, + { url = "https://files.pythonhosted.org/packages/97/bb/21c36b7dbbafc85d2d480cd65df02a1dc93bf76d97147605a8e27ff9409d/websockets-16.0-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:e0334872c0a37b606418ac52f6ab9cfd17317ac26365f7f65e203e2d0d0d359f", size = 186152, upload-time = "2026-01-10T09:22:52.224Z" }, + { url = "https://files.pythonhosted.org/packages/4a/34/9bf8df0c0cf88fa7bfe36678dc7b02970c9a7d5e065a3099292db87b1be2/websockets-16.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:a0b31e0b424cc6b5a04b8838bbaec1688834b2383256688cf47eb97412531da1", size = 185583, upload-time = "2026-01-10T09:22:53.443Z" }, + { url = "https://files.pythonhosted.org/packages/47/88/4dd516068e1a3d6ab3c7c183288404cd424a9a02d585efbac226cb61ff2d/websockets-16.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:485c49116d0af10ac698623c513c1cc01c9446c058a4e61e3bf6c19dff7335a2", size = 184880, upload-time = "2026-01-10T09:22:55.033Z" }, + { url = "https://files.pythonhosted.org/packages/91/d6/7d4553ad4bf1c0421e1ebd4b18de5d9098383b5caa1d937b63df8d04b565/websockets-16.0-cp312-cp312-win32.whl", hash = "sha256:eaded469f5e5b7294e2bdca0ab06becb6756ea86894a47806456089298813c89", size = 178261, upload-time = "2026-01-10T09:22:56.251Z" }, + { url = "https://files.pythonhosted.org/packages/c3/f0/f3a17365441ed1c27f850a80b2bc680a0fa9505d733fe152fdf5e98c1c0b/websockets-16.0-cp312-cp312-win_amd64.whl", hash = "sha256:5569417dc80977fc8c2d43a86f78e0a5a22fee17565d78621b6bb264a115d4ea", size = 178693, upload-time = "2026-01-10T09:22:57.478Z" }, + { url = "https://files.pythonhosted.org/packages/cc/9c/baa8456050d1c1b08dd0ec7346026668cbc6f145ab4e314d707bb845bf0d/websockets-16.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:878b336ac47938b474c8f982ac2f7266a540adc3fa4ad74ae96fea9823a02cc9", size = 177364, upload-time = "2026-01-10T09:22:59.333Z" }, + { url = "https://files.pythonhosted.org/packages/7e/0c/8811fc53e9bcff68fe7de2bcbe75116a8d959ac699a3200f4847a8925210/websockets-16.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:52a0fec0e6c8d9a784c2c78276a48a2bdf099e4ccc2a4cad53b27718dbfd0230", size = 175039, upload-time = "2026-01-10T09:23:01.171Z" }, + { url = "https://files.pythonhosted.org/packages/aa/82/39a5f910cb99ec0b59e482971238c845af9220d3ab9fa76dd9162cda9d62/websockets-16.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:e6578ed5b6981005df1860a56e3617f14a6c307e6a71b4fff8c48fdc50f3ed2c", size = 175323, upload-time = "2026-01-10T09:23:02.341Z" }, + { url = "https://files.pythonhosted.org/packages/bd/28/0a25ee5342eb5d5f297d992a77e56892ecb65e7854c7898fb7d35e9b33bd/websockets-16.0-cp313-cp313-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:95724e638f0f9c350bb1c2b0a7ad0e83d9cc0c9259f3ea94e40d7b02a2179ae5", size = 184975, upload-time = "2026-01-10T09:23:03.756Z" }, + { url = "https://files.pythonhosted.org/packages/f9/66/27ea52741752f5107c2e41fda05e8395a682a1e11c4e592a809a90c6a506/websockets-16.0-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:c0204dc62a89dc9d50d682412c10b3542d748260d743500a85c13cd1ee4bde82", size = 186203, upload-time = "2026-01-10T09:23:05.01Z" }, + { url = "https://files.pythonhosted.org/packages/37/e5/8e32857371406a757816a2b471939d51c463509be73fa538216ea52b792a/websockets-16.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:52ac480f44d32970d66763115edea932f1c5b1312de36df06d6b219f6741eed8", size = 185653, upload-time = "2026-01-10T09:23:06.301Z" }, + { url = "https://files.pythonhosted.org/packages/9b/67/f926bac29882894669368dc73f4da900fcdf47955d0a0185d60103df5737/websockets-16.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:6e5a82b677f8f6f59e8dfc34ec06ca6b5b48bc4fcda346acd093694cc2c24d8f", size = 184920, upload-time = "2026-01-10T09:23:07.492Z" }, + { url = "https://files.pythonhosted.org/packages/3c/a1/3d6ccdcd125b0a42a311bcd15a7f705d688f73b2a22d8cf1c0875d35d34a/websockets-16.0-cp313-cp313-win32.whl", hash = "sha256:abf050a199613f64c886ea10f38b47770a65154dc37181bfaff70c160f45315a", size = 178255, upload-time = "2026-01-10T09:23:09.245Z" }, + { url = "https://files.pythonhosted.org/packages/6b/ae/90366304d7c2ce80f9b826096a9e9048b4bb760e44d3b873bb272cba696b/websockets-16.0-cp313-cp313-win_amd64.whl", hash = "sha256:3425ac5cf448801335d6fdc7ae1eb22072055417a96cc6b31b3861f455fbc156", size = 178689, upload-time = "2026-01-10T09:23:10.483Z" }, + { url = "https://files.pythonhosted.org/packages/f3/1d/e88022630271f5bd349ed82417136281931e558d628dd52c4d8621b4a0b2/websockets-16.0-cp314-cp314-macosx_10_15_universal2.whl", hash = "sha256:8cc451a50f2aee53042ac52d2d053d08bf89bcb31ae799cb4487587661c038a0", size = 177406, upload-time = "2026-01-10T09:23:12.178Z" }, + { url = "https://files.pythonhosted.org/packages/f2/78/e63be1bf0724eeb4616efb1ae1c9044f7c3953b7957799abb5915bffd38e/websockets-16.0-cp314-cp314-macosx_10_15_x86_64.whl", hash = "sha256:daa3b6ff70a9241cf6c7fc9e949d41232d9d7d26fd3522b1ad2b4d62487e9904", size = 175085, upload-time = "2026-01-10T09:23:13.511Z" }, + { url = "https://files.pythonhosted.org/packages/bb/f4/d3c9220d818ee955ae390cf319a7c7a467beceb24f05ee7aaaa2414345ba/websockets-16.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:fd3cb4adb94a2a6e2b7c0d8d05cb94e6f1c81a0cf9dc2694fb65c7e8d94c42e4", size = 175328, upload-time = "2026-01-10T09:23:14.727Z" }, + { url = "https://files.pythonhosted.org/packages/63/bc/d3e208028de777087e6fb2b122051a6ff7bbcca0d6df9d9c2bf1dd869ae9/websockets-16.0-cp314-cp314-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:781caf5e8eee67f663126490c2f96f40906594cb86b408a703630f95550a8c3e", size = 185044, upload-time = "2026-01-10T09:23:15.939Z" }, + { url = "https://files.pythonhosted.org/packages/ad/6e/9a0927ac24bd33a0a9af834d89e0abc7cfd8e13bed17a86407a66773cc0e/websockets-16.0-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:caab51a72c51973ca21fa8a18bd8165e1a0183f1ac7066a182ff27107b71e1a4", size = 186279, upload-time = "2026-01-10T09:23:17.148Z" }, + { url = "https://files.pythonhosted.org/packages/b9/ca/bf1c68440d7a868180e11be653c85959502efd3a709323230314fda6e0b3/websockets-16.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:19c4dc84098e523fd63711e563077d39e90ec6702aff4b5d9e344a60cb3c0cb1", size = 185711, upload-time = "2026-01-10T09:23:18.372Z" }, + { url = "https://files.pythonhosted.org/packages/c4/f8/fdc34643a989561f217bb477cbc47a3a07212cbda91c0e4389c43c296ebf/websockets-16.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:a5e18a238a2b2249c9a9235466b90e96ae4795672598a58772dd806edc7ac6d3", size = 184982, upload-time = "2026-01-10T09:23:19.652Z" }, + { url = "https://files.pythonhosted.org/packages/dd/d1/574fa27e233764dbac9c52730d63fcf2823b16f0856b3329fc6268d6ae4f/websockets-16.0-cp314-cp314-win32.whl", hash = "sha256:a069d734c4a043182729edd3e9f247c3b2a4035415a9172fd0f1b71658a320a8", size = 177915, upload-time = "2026-01-10T09:23:21.458Z" }, + { url = "https://files.pythonhosted.org/packages/8a/f1/ae6b937bf3126b5134ce1f482365fde31a357c784ac51852978768b5eff4/websockets-16.0-cp314-cp314-win_amd64.whl", hash = "sha256:c0ee0e63f23914732c6d7e0cce24915c48f3f1512ec1d079ed01fc629dab269d", size = 178381, upload-time = "2026-01-10T09:23:22.715Z" }, + { url = "https://files.pythonhosted.org/packages/06/9b/f791d1db48403e1f0a27577a6beb37afae94254a8c6f08be4a23e4930bc0/websockets-16.0-cp314-cp314t-macosx_10_15_universal2.whl", hash = "sha256:a35539cacc3febb22b8f4d4a99cc79b104226a756aa7400adc722e83b0d03244", size = 177737, upload-time = "2026-01-10T09:23:24.523Z" }, + { url = "https://files.pythonhosted.org/packages/bd/40/53ad02341fa33b3ce489023f635367a4ac98b73570102ad2cdd770dacc9a/websockets-16.0-cp314-cp314t-macosx_10_15_x86_64.whl", hash = "sha256:b784ca5de850f4ce93ec85d3269d24d4c82f22b7212023c974c401d4980ebc5e", size = 175268, upload-time = "2026-01-10T09:23:25.781Z" }, + { url = "https://files.pythonhosted.org/packages/74/9b/6158d4e459b984f949dcbbb0c5d270154c7618e11c01029b9bbd1bb4c4f9/websockets-16.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:569d01a4e7fba956c5ae4fc988f0d4e187900f5497ce46339c996dbf24f17641", size = 175486, upload-time = "2026-01-10T09:23:27.033Z" }, + { url = "https://files.pythonhosted.org/packages/e5/2d/7583b30208b639c8090206f95073646c2c9ffd66f44df967981a64f849ad/websockets-16.0-cp314-cp314t-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:50f23cdd8343b984957e4077839841146f67a3d31ab0d00e6b824e74c5b2f6e8", size = 185331, upload-time = "2026-01-10T09:23:28.259Z" }, + { url = "https://files.pythonhosted.org/packages/45/b0/cce3784eb519b7b5ad680d14b9673a31ab8dcb7aad8b64d81709d2430aa8/websockets-16.0-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:152284a83a00c59b759697b7f9e9cddf4e3c7861dd0d964b472b70f78f89e80e", size = 186501, upload-time = "2026-01-10T09:23:29.449Z" }, + { url = "https://files.pythonhosted.org/packages/19/60/b8ebe4c7e89fb5f6cdf080623c9d92789a53636950f7abacfc33fe2b3135/websockets-16.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:bc59589ab64b0022385f429b94697348a6a234e8ce22544e3681b2e9331b5944", size = 186062, upload-time = "2026-01-10T09:23:31.368Z" }, + { url = "https://files.pythonhosted.org/packages/88/a8/a080593f89b0138b6cba1b28f8df5673b5506f72879322288b031337c0b8/websockets-16.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:32da954ffa2814258030e5a57bc73a3635463238e797c7375dc8091327434206", size = 185356, upload-time = "2026-01-10T09:23:32.627Z" }, + { url = "https://files.pythonhosted.org/packages/c2/b6/b9afed2afadddaf5ebb2afa801abf4b0868f42f8539bfe4b071b5266c9fe/websockets-16.0-cp314-cp314t-win32.whl", hash = "sha256:5a4b4cc550cb665dd8a47f868c8d04c8230f857363ad3c9caf7a0c3bf8c61ca6", size = 178085, upload-time = "2026-01-10T09:23:33.816Z" }, + { url = "https://files.pythonhosted.org/packages/9f/3e/28135a24e384493fa804216b79a6a6759a38cc4ff59118787b9fb693df93/websockets-16.0-cp314-cp314t-win_amd64.whl", hash = "sha256:b14dc141ed6d2dde437cddb216004bcac6a1df0935d79656387bd41632ba0bbd", size = 178531, upload-time = "2026-01-10T09:23:35.016Z" }, + { url = "https://files.pythonhosted.org/packages/72/07/c98a68571dcf256e74f1f816b8cc5eae6eb2d3d5cfa44d37f801619d9166/websockets-16.0-pp311-pypy311_pp73-macosx_10_15_x86_64.whl", hash = "sha256:349f83cd6c9a415428ee1005cadb5c2c56f4389bc06a9af16103c3bc3dcc8b7d", size = 174947, upload-time = "2026-01-10T09:23:36.166Z" }, + { url = "https://files.pythonhosted.org/packages/7e/52/93e166a81e0305b33fe416338be92ae863563fe7bce446b0f687b9df5aea/websockets-16.0-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:4a1aba3340a8dca8db6eb5a7986157f52eb9e436b74813764241981ca4888f03", size = 175260, upload-time = "2026-01-10T09:23:37.409Z" }, + { url = "https://files.pythonhosted.org/packages/56/0c/2dbf513bafd24889d33de2ff0368190a0e69f37bcfa19009ef819fe4d507/websockets-16.0-pp311-pypy311_pp73-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:f4a32d1bd841d4bcbffdcb3d2ce50c09c3909fbead375ab28d0181af89fd04da", size = 176071, upload-time = "2026-01-10T09:23:39.158Z" }, + { url = "https://files.pythonhosted.org/packages/a5/8f/aea9c71cc92bf9b6cc0f7f70df8f0b420636b6c96ef4feee1e16f80f75dd/websockets-16.0-pp311-pypy311_pp73-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:0298d07ee155e2e9fda5be8a9042200dd2e3bb0b8a38482156576f863a9d457c", size = 176968, upload-time = "2026-01-10T09:23:41.031Z" }, + { url = "https://files.pythonhosted.org/packages/9a/3f/f70e03f40ffc9a30d817eef7da1be72ee4956ba8d7255c399a01b135902a/websockets-16.0-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:a653aea902e0324b52f1613332ddf50b00c06fdaf7e92624fbf8c77c78fa5767", size = 178735, upload-time = "2026-01-10T09:23:42.259Z" }, + { url = "https://files.pythonhosted.org/packages/6f/28/258ebab549c2bf3e64d2b0217b973467394a9cea8c42f70418ca2c5d0d2e/websockets-16.0-py3-none-any.whl", hash = "sha256:1637db62fad1dc833276dded54215f2c7fa46912301a24bd94d45d46a011ceec", size = 171598, upload-time = "2026-01-10T09:23:45.395Z" }, +] + [[package]] name = "zipp" version = "3.23.0" From 102be7d9675189743d1806bc7ccc0965d47f8faa Mon Sep 17 00:00:00 2001 From: "alexandre.abouchahine" Date: Tue, 3 Feb 2026 15:40:36 +0100 Subject: [PATCH 184/223] add new audio params --- .speakeasy/gen.lock | 79 ++++++++----------- .speakeasy/workflow.lock | 11 ++- docs/models/audiotranscriptionrequest.md | 2 + .../models/audiotranscriptionrequeststream.md | 2 + docs/models/timestampgranularity.md | 3 +- docs/models/transcriptionsegmentchunk.md | 2 + .../models/transcriptionstreamsegmentdelta.md | 1 + docs/sdks/transcriptions/README.md | 8 +- .../audio/transcription_diarize_async.py | 4 +- pyproject.toml | 2 + scripts/run_examples.sh | 3 +- src/mistralai/audio.py | 14 ++-- .../models/audiotranscriptionrequest.py | 8 ++ .../models/audiotranscriptionrequeststream.py | 8 ++ src/mistralai/models/timestampgranularity.py | 5 +- .../models/transcriptionsegmentchunk.py | 43 +++++++++- .../models/transcriptionstreamsegmentdelta.py | 40 +++++++++- src/mistralai/transcriptions.py | 24 ++++++ uv.lock | 2 + 19 files changed, 190 insertions(+), 71 deletions(-) diff --git a/.speakeasy/gen.lock b/.speakeasy/gen.lock index 9d51b30a..f6c0f0a2 100644 --- a/.speakeasy/gen.lock +++ b/.speakeasy/gen.lock @@ -1,7 +1,7 @@ lockVersion: 2.0.0 id: 2d045ec7-2ebb-4f4d-ad25-40953b132161 management: - docChecksum: cc385dce976ac06e6d062e992f0ee380 + docChecksum: e4b3b07fe28f4666261325e923d6c5d9 docVersion: 1.0.0 speakeasyVersion: 1.685.0 generationVersion: 2.794.1 @@ -11,9 +11,9 @@ management: installationURL: https://github.com/mistralai/client-python.git published: true persistentEdits: - generation_id: 8b0735b6-5924-48f1-ade2-47cb374c76bc - pristine_commit_hash: a9971b936f50486e2e4ceef95d0b2c4708633219 - pristine_tree_hash: 51b8a57de0bf62da607fe0023eec1124458ebee9 + generation_id: 00cab5ea-60fa-456d-ad3f-1ae32427d619 + pristine_commit_hash: b6e4b5c0cd6a42df18b2e7aa44ac696d48576d06 + pristine_tree_hash: b358b046bcef8a5f9b8898d98a4d9fbf82b52e6e features: python: additionalDependencies: 1.0.0 @@ -293,12 +293,12 @@ trackedFiles: pristine_git_object: d174ab9959cadde659f76db94ed87c743e0f6783 docs/models/audiotranscriptionrequest.md: id: ebf59641bc84 - last_write_checksum: sha1:b76d6e7ee3f1a0ca96e1064db61896e287027711 - pristine_git_object: f2e17dd35eda24a48b0c105ecce63a73d754e051 + last_write_checksum: sha1:c55c97a06726812323a031897beffbb160021c05 + pristine_git_object: d7f5bd51b1289f0eb481d86a71bb483ee50bbc40 docs/models/audiotranscriptionrequeststream.md: id: 79b5f721b753 - last_write_checksum: sha1:e8fc60f874bb7e8ee03c4e05bdf88b2db1afbfaf - pristine_git_object: 975e437a299efb27c069812f424a0107999de640 + last_write_checksum: sha1:df6825c05b5a02dcf904ebaa40fb97e9186248cc + pristine_git_object: 5d64964d1a635da912f2553c306fb8654ebfca2e docs/models/basemodelcard.md: id: 2f62bfbd650e last_write_checksum: sha1:7ee94bd9ceb6af84024863aa8183540bee7ffcce @@ -1533,8 +1533,8 @@ trackedFiles: pristine_git_object: c7a0d5c9811ea37aaf9e16b6e93c833ab979573f docs/models/timestampgranularity.md: id: eb4d5a8e6f08 - last_write_checksum: sha1:c2134d9f1f96d4eef48cedfe2b93eb061d5ea47f - pristine_git_object: 0d2a8054590463a167f69c36c00b8f2fc3c7906d + last_write_checksum: sha1:e256a5e8c6010d500841295b89d88d0eface3b88 + pristine_git_object: d20012ea9925446c16c9162304642ba48391d34d docs/models/tool.md: id: 8966139dbeed last_write_checksum: sha1:1725bf53fc9f1ca3f332322d91de24c9d58adc6a @@ -1653,8 +1653,8 @@ trackedFiles: pristine_git_object: 1bc0189c5d1833c946a71c9773346e21b08d2404 docs/models/transcriptionsegmentchunk.md: id: f09db8b2273e - last_write_checksum: sha1:c94ef1aa3dc2568ec77d186fa9061598f0ebccf1 - pristine_git_object: bebc9f72f521cf9cbd1818d53239cd632a025a31 + last_write_checksum: sha1:b89ee132a3c63e56806f3f395c98a9e7e5e9c7d0 + pristine_git_object: f620b96a75a0b9c6e015ae1f460dcccb80d113ee docs/models/transcriptionstreamdone.md: id: 2253923d93cf last_write_checksum: sha1:043ebcd284007f8c8536f2726ec5f525abffeb6b @@ -1685,8 +1685,8 @@ trackedFiles: pristine_git_object: e93521e10d43299676f44c8297608cc94c6106e6 docs/models/transcriptionstreamsegmentdelta.md: id: f59c3fb696f2 - last_write_checksum: sha1:d44b6c1359c0ed504f97edb46b3acf0145967fe7 - pristine_git_object: 3deeedf067c833cae8df1ab366a2e54b3f9e9186 + last_write_checksum: sha1:4d03e881a4ad9c3bed6075bb8e25d00af391652c + pristine_git_object: 2ab32f9783f6645bba7603279c03db4465c70fff docs/models/transcriptionstreamsegmentdeltatype.md: id: 03ee222a3afd last_write_checksum: sha1:d02b5f92cf2d8182aeaa8dd3428b988ab4fc0fad @@ -1841,8 +1841,8 @@ trackedFiles: pristine_git_object: efcb99314c7d07a3dc556c297333046fc5d9e097 docs/sdks/transcriptions/README.md: id: 089cf94ecf47 - last_write_checksum: sha1:fdf785e4cbab20aec41122735435a38f582f7f29 - pristine_git_object: 3243258c4debd94e10c98c2b18dcc47838143a5b + last_write_checksum: sha1:01e68371b7a94cb35d6435efd3ef9247e8c27a94 + pristine_git_object: dabab00e85a3f480c8dc3dd7b792e68420ae08b6 py.typed: id: 258c3ed47ae4 last_write_checksum: sha1:8efc425ffe830805ffcc0f3055871bdcdc542c60 @@ -1881,8 +1881,8 @@ trackedFiles: pristine_git_object: 73e4ee3c885f7c3472a9dc5c0546c02d4e19a1c4 src/mistralai/audio.py: id: c398f6a11e24 - last_write_checksum: sha1:11f9713b4f970509cffe0e6122c61f9aeafc9e73 - pristine_git_object: 5687abdb5676903661a33a3bee115f289f5fe9df + last_write_checksum: sha1:aa75fa00e00d8059121d8de60844d70d50203661 + pristine_git_object: 3de29053f34654907c423ca6600f216f6b0dcbe0 src/mistralai/basesdk.py: id: 3127264590ce last_write_checksum: sha1:5340f1c5976fd87d3b17b285535b63bbbe7db120 @@ -2089,12 +2089,12 @@ trackedFiles: pristine_git_object: 48ab648c3525fcc9fe1c722b7beee0f649e30e7a src/mistralai/models/audiotranscriptionrequest.py: id: 4c6a6fee484a - last_write_checksum: sha1:d8fb192581056b4ae053f9e6919874850462cb03 - pristine_git_object: 308e2599f4ba8878b0fc20ee2660289b55ae7c9a + last_write_checksum: sha1:8dd41335ffd46dd1099bdb20baac32d043c5936c + pristine_git_object: 86417b4235292de3ab1d2b46116ce0ba94010087 src/mistralai/models/audiotranscriptionrequeststream.py: id: 863eca721e72 - last_write_checksum: sha1:a7ec74e5e05a705f2d61d1fe8a635178bcea3cd6 - pristine_git_object: 04374503f931f3964851d09def70535276bdf194 + last_write_checksum: sha1:010618236f3da1c99d63d334266622cf84e6b09f + pristine_git_object: 1f4087e8d33c8a3560d5ce58f2a1a7bc4627556b src/mistralai/models/basemodelcard.py: id: 5554644ee6f2 last_write_checksum: sha1:aa5af32cda04d45bcdf2c8fb380529c4fbc828aa @@ -2869,8 +2869,8 @@ trackedFiles: pristine_git_object: 627ae4883698696774b7a285a73326a4509c6828 src/mistralai/models/timestampgranularity.py: id: e0cb6c4efa2a - last_write_checksum: sha1:2b554048013632407c391444d972e29362751468 - pristine_git_object: 02816df67dd326a17d27dc815c49c6e1172693b8 + last_write_checksum: sha1:68ea11a4e27f23b2fcc976d0a8eeb95f6f28ba85 + pristine_git_object: 5bda890f500228ba0c3dc234edf09906b88cb522 src/mistralai/models/tool.py: id: c0a9b60b6cf1 last_write_checksum: sha1:805030012b6cf4d6159c1515b44e1c999ea2349a @@ -2929,8 +2929,8 @@ trackedFiles: pristine_git_object: 54a98a5ba7b83a6b7f6a39046b400a61e9889898 src/mistralai/models/transcriptionsegmentchunk.py: id: ccd6d5675b49 - last_write_checksum: sha1:367abd8a8182d9db9f2b19540aed2b974ad7bbe2 - pristine_git_object: aa30f053a624b25c7fd1739c05f406a81873ff60 + last_write_checksum: sha1:01b1c1c52a1e324c8f874586cdd0349fed35443c + pristine_git_object: 40ad20b3abc2f0b2c0d2d695ba89237f66cc0b2b src/mistralai/models/transcriptionstreamdone.py: id: 42177659bf0f last_write_checksum: sha1:5fda2b766b2af41749006835e45c95f708eddb28 @@ -2949,8 +2949,8 @@ trackedFiles: pristine_git_object: 15b7514415e536bb04fd1a69ccea20615b5b1fcf src/mistralai/models/transcriptionstreamsegmentdelta.py: id: 83d02b065099 - last_write_checksum: sha1:1f48714d450fff004f9cf24b81749848240fe722 - pristine_git_object: d779ed837913c8c13a4599a06a2ed75afa827a48 + last_write_checksum: sha1:3f70d4d58d8fedb784d056425662e7dc2f9ed244 + pristine_git_object: 550c83e7073bc99fdac6a0d59c5c30daa9d35f43 src/mistralai/models/transcriptionstreamtextdelta.py: id: ce0861d8affd last_write_checksum: sha1:84a3b6c6d84a896e59e2874de59d812d3db657a5 @@ -3017,8 +3017,8 @@ trackedFiles: pristine_git_object: 7e77925ddc9aa01c4e5f8ba2ca52aa7f32e89859 src/mistralai/transcriptions.py: id: ba6b040274f2 - last_write_checksum: sha1:079bcd1c4a6b1d74e97cc6d77bccf4eea1232cd7 - pristine_git_object: bdbeb1ccbb938c825e5c3371a0f761a90a6e17b8 + last_write_checksum: sha1:0cd336f14cccb581ff955feaf8bc6f7df185f27b + pristine_git_object: 90f2e58a3677e922cb5c8aac4b30d5e697ef2f05 src/mistralai/types/__init__.py: id: b89b8375c971 last_write_checksum: sha1:140ebdd01a46f92ffc710c52c958c4eba3cf68ed @@ -3824,14 +3824,14 @@ examples: application/json: {"model": "Beetle", "text": "", "usage": {"prompt_tokens": 0, "completion_tokens": 0, "total_tokens": 0}, "language": ""} userExample: requestBody: - multipart/form-data: {"model": "voxtral-mini-latest", "stream": false} + multipart/form-data: {"model": "voxtral-mini-latest", "stream": false, "diarize": false} responses: "200": application/json: {"model": "voxtral-mini-2507", "text": "This week, I traveled to Chicago to deliver my final farewell address to the nation, following in the tradition of presidents before me. It was an opportunity to say thank you. Whether we've seen eye to eye or rarely agreed at all, my conversations with you, the American people, in living rooms, in schools, at farms and on factory floors, at diners and on distant military outposts, All these conversations are what have kept me honest, kept me inspired, and kept me going. Every day, I learned from you. You made me a better President, and you made me a better man.\nOver the course of these eight years, I've seen the goodness, the resilience, and the hope of the American people. I've seen neighbors looking out for each other as we rescued our economy from the worst crisis of our lifetimes. I've hugged cancer survivors who finally know the security of affordable health care. I've seen communities like Joplin rebuild from disaster, and cities like Boston show the world that no terrorist will ever break the American spirit. I've seen the hopeful faces of young graduates and our newest military officers. I've mourned with grieving families searching for answers. And I found grace in a Charleston church. I've seen our scientists help a paralyzed man regain his sense of touch, and our wounded warriors walk again. I've seen our doctors and volunteers rebuild after earthquakes and stop pandemics in their tracks. I've learned from students who are building robots and curing diseases, and who will change the world in ways we can't even imagine. I've seen the youngest of children remind us of our obligations to care for our refugees, to work in peace, and above all, to look out for each other.\nThat's what's possible when we come together in the slow, hard, sometimes frustrating, but always vital work of self-government. But we can't take our democracy for granted. All of us, regardless of party, should throw ourselves into the work of citizenship. Not just when there is an election. Not just when our own narrow interest is at stake. But over the full span of a lifetime. If you're tired of arguing with strangers on the Internet, try to talk with one in real life. If something needs fixing, lace up your shoes and do some organizing. If you're disappointed by your elected officials, then grab a clipboard, get some signatures, and run for office yourself.\nOur success depends on our participation, regardless of which way the pendulum of power swings. It falls on each of us to be guardians of our democracy, to embrace the joyous task we've been given to continually try to improve this great nation of ours. Because for all our outward differences, we all share the same proud title – citizen.\nIt has been the honor of my life to serve you as President. Eight years later, I am even more optimistic about our country's promise. And I look forward to working along your side as a citizen for all my days that remain.\nThanks, everybody. God bless you. And God bless the United States of America.\n", "segments": [], "usage": {"prompt_tokens": 4, "completion_tokens": 635, "total_tokens": 3264, "prompt_audio_seconds": 203}, "language": "en"} audio_api_v1_transcriptions_post_stream: speakeasy-default-audio-api-v1-transcriptions-post-stream: requestBody: - multipart/form-data: {"model": "Camry", "stream": true} + multipart/form-data: {"model": "Camry", "stream": true, "diarize": false} agents_api_v1_conversations_delete: speakeasy-default-agents-api-v1-conversations-delete: parameters: @@ -3897,21 +3897,6 @@ examples: application/json: {} examplesVersion: 1.0.2 generatedTests: {} -releaseNotes: | - ## Python SDK Changes: - * `mistral.beta.conversations.restart_stream()`: `request.agent_version` **Changed** **Breaking** :warning: - * `mistral.beta.conversations.start()`: `request.agent_version` **Changed** **Breaking** :warning: - * `mistral.beta.conversations.list()`: `response.[].[agent_conversation].agent_version` **Changed** **Breaking** :warning: - * `mistral.beta.conversations.get()`: `response.[agent_conversation].agent_version` **Changed** **Breaking** :warning: - * `mistral.beta.conversations.restart()`: `request.agent_version` **Changed** **Breaking** :warning: - * `mistral.beta.conversations.start_stream()`: `request.agent_version` **Changed** **Breaking** :warning: - * `mistral.beta.agents.get()`: `request.agent_version` **Changed** **Breaking** :warning: - * `mistral.beta.agents.get_version()`: `request.version` **Changed** **Breaking** :warning: - * `mistral.beta.agents.list_version_aliases()`: **Added** - * `mistral.models.list()`: `response.data.[].[fine-tuned].capabilities.audio_transcription` **Added** - * `mistral.models.retrieve()`: `response.[base].capabilities.audio_transcription` **Added** - * `mistral.beta.agents.create_version_alias()`: **Added** - * `mistral.files.list()`: `request.mimetypes` **Added** generatedFiles: - .gitattributes - .vscode/settings.json diff --git a/.speakeasy/workflow.lock b/.speakeasy/workflow.lock index 3bb067a0..bb904c64 100644 --- a/.speakeasy/workflow.lock +++ b/.speakeasy/workflow.lock @@ -14,11 +14,10 @@ sources: - latest mistral-openapi: sourceNamespace: mistral-openapi - sourceRevisionDigest: sha256:56bcbb02148ddfabd64cb2dced1a9efe0f00d0fa106435fdd0fb2a889c1c6fed - sourceBlobDigest: sha256:c014d9220f14e04b573acf291c173954b8d34d03d852877a91756afb68ccc65b + sourceRevisionDigest: sha256:4e49849eba5334a3fe4a3d081baa9afdecd8f41dfc4c2a5115bc19ead4d92d13 + sourceBlobDigest: sha256:3ab3c61ac6a4e9fab37d924d516838ca27dd7e57a1b5e9059d4db2ef29efec56 tags: - latest - - speakeasy-sdk-regen-1769979831 targets: mistralai-azure-sdk: source: mistral-azure-source @@ -37,10 +36,10 @@ targets: mistralai-sdk: source: mistral-openapi sourceNamespace: mistral-openapi - sourceRevisionDigest: sha256:56bcbb02148ddfabd64cb2dced1a9efe0f00d0fa106435fdd0fb2a889c1c6fed - sourceBlobDigest: sha256:c014d9220f14e04b573acf291c173954b8d34d03d852877a91756afb68ccc65b + sourceRevisionDigest: sha256:4e49849eba5334a3fe4a3d081baa9afdecd8f41dfc4c2a5115bc19ead4d92d13 + sourceBlobDigest: sha256:3ab3c61ac6a4e9fab37d924d516838ca27dd7e57a1b5e9059d4db2ef29efec56 codeSamplesNamespace: mistral-openapi-code-samples - codeSamplesRevisionDigest: sha256:feb7bf2f6fab8456316453c7e14eda6201fe8649fe0ffcdb1eaa4580cc66a51e + codeSamplesRevisionDigest: sha256:8fa56ecd9dd6e5f831fb96c4cfd00c65f617a03ff67f876d75ecdf28cb5bbf3c workflow: workflowVersion: 1.0.0 speakeasyVersion: 1.685.0 diff --git a/docs/models/audiotranscriptionrequest.md b/docs/models/audiotranscriptionrequest.md index f2e17dd3..d7f5bd51 100644 --- a/docs/models/audiotranscriptionrequest.md +++ b/docs/models/audiotranscriptionrequest.md @@ -12,4 +12,6 @@ | `language` | *OptionalNullable[str]* | :heavy_minus_sign: | Language of the audio, e.g. 'en'. Providing the language can boost accuracy. | | | `temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | N/A | | | `stream` | *Optional[Literal[False]]* | :heavy_minus_sign: | N/A | | +| `diarize` | *Optional[bool]* | :heavy_minus_sign: | N/A | | +| `context_bias` | List[*str*] | :heavy_minus_sign: | N/A | | | `timestamp_granularities` | List[[models.TimestampGranularity](../models/timestampgranularity.md)] | :heavy_minus_sign: | Granularities of timestamps to include in the response. | | \ No newline at end of file diff --git a/docs/models/audiotranscriptionrequeststream.md b/docs/models/audiotranscriptionrequeststream.md index 975e437a..5d64964d 100644 --- a/docs/models/audiotranscriptionrequeststream.md +++ b/docs/models/audiotranscriptionrequeststream.md @@ -12,4 +12,6 @@ | `language` | *OptionalNullable[str]* | :heavy_minus_sign: | Language of the audio, e.g. 'en'. Providing the language can boost accuracy. | | `temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | N/A | | `stream` | *Optional[Literal[True]]* | :heavy_minus_sign: | N/A | +| `diarize` | *Optional[bool]* | :heavy_minus_sign: | N/A | +| `context_bias` | List[*str*] | :heavy_minus_sign: | N/A | | `timestamp_granularities` | List[[models.TimestampGranularity](../models/timestampgranularity.md)] | :heavy_minus_sign: | Granularities of timestamps to include in the response. | \ No newline at end of file diff --git a/docs/models/timestampgranularity.md b/docs/models/timestampgranularity.md index 0d2a8054..d20012ea 100644 --- a/docs/models/timestampgranularity.md +++ b/docs/models/timestampgranularity.md @@ -5,4 +5,5 @@ | Name | Value | | --------- | --------- | -| `SEGMENT` | segment | \ No newline at end of file +| `SEGMENT` | segment | +| `WORD` | word | \ No newline at end of file diff --git a/docs/models/transcriptionsegmentchunk.md b/docs/models/transcriptionsegmentchunk.md index bebc9f72..f620b96a 100644 --- a/docs/models/transcriptionsegmentchunk.md +++ b/docs/models/transcriptionsegmentchunk.md @@ -8,5 +8,7 @@ | `text` | *str* | :heavy_check_mark: | N/A | | `start` | *float* | :heavy_check_mark: | N/A | | `end` | *float* | :heavy_check_mark: | N/A | +| `score` | *OptionalNullable[float]* | :heavy_minus_sign: | N/A | +| `speaker_id` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | `type` | [Optional[models.Type]](../models/type.md) | :heavy_minus_sign: | N/A | | `__pydantic_extra__` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/transcriptionstreamsegmentdelta.md b/docs/models/transcriptionstreamsegmentdelta.md index 3deeedf0..2ab32f97 100644 --- a/docs/models/transcriptionstreamsegmentdelta.md +++ b/docs/models/transcriptionstreamsegmentdelta.md @@ -8,5 +8,6 @@ | `text` | *str* | :heavy_check_mark: | N/A | | `start` | *float* | :heavy_check_mark: | N/A | | `end` | *float* | :heavy_check_mark: | N/A | +| `speaker_id` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | `type` | [Optional[models.TranscriptionStreamSegmentDeltaType]](../models/transcriptionstreamsegmentdeltatype.md) | :heavy_minus_sign: | N/A | | `__pydantic_extra__` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/sdks/transcriptions/README.md b/docs/sdks/transcriptions/README.md index 3243258c..dabab00e 100644 --- a/docs/sdks/transcriptions/README.md +++ b/docs/sdks/transcriptions/README.md @@ -25,7 +25,7 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.audio.transcriptions.complete(model="Model X") + res = mistral.audio.transcriptions.complete(model="Model X", diarize=False) # Handle response print(res) @@ -42,6 +42,8 @@ with Mistral( | `file_id` | *OptionalNullable[str]* | :heavy_minus_sign: | ID of a file uploaded to /v1/files | | | `language` | *OptionalNullable[str]* | :heavy_minus_sign: | Language of the audio, e.g. 'en'. Providing the language can boost accuracy. | | | `temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | N/A | | +| `diarize` | *Optional[bool]* | :heavy_minus_sign: | N/A | | +| `context_bias` | List[*str*] | :heavy_minus_sign: | N/A | | | `timestamp_granularities` | List[[models.TimestampGranularity](../../models/timestampgranularity.md)] | :heavy_minus_sign: | Granularities of timestamps to include in the response. | | | `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | | @@ -71,7 +73,7 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.audio.transcriptions.stream(model="Camry") + res = mistral.audio.transcriptions.stream(model="Camry", diarize=False) with res as event_stream: for event in event_stream: @@ -90,6 +92,8 @@ with Mistral( | `file_id` | *OptionalNullable[str]* | :heavy_minus_sign: | ID of a file uploaded to /v1/files | | `language` | *OptionalNullable[str]* | :heavy_minus_sign: | Language of the audio, e.g. 'en'. Providing the language can boost accuracy. | | `temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | N/A | +| `diarize` | *Optional[bool]* | :heavy_minus_sign: | N/A | +| `context_bias` | List[*str*] | :heavy_minus_sign: | N/A | | `timestamp_granularities` | List[[models.TimestampGranularity](../../models/timestampgranularity.md)] | :heavy_minus_sign: | Granularities of timestamps to include in the response. | | `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | diff --git a/examples/mistral/audio/transcription_diarize_async.py b/examples/mistral/audio/transcription_diarize_async.py index ef5323f4..26754837 100644 --- a/examples/mistral/audio/transcription_diarize_async.py +++ b/examples/mistral/audio/transcription_diarize_async.py @@ -2,15 +2,17 @@ import os import asyncio +import pathlib from mistralai import Mistral, File +fixture_dir = pathlib.Path(__file__).parents[2] / "fixtures" async def main(): api_key = os.environ["MISTRAL_API_KEY"] model = "voxtral-mini-2602" client = Mistral(api_key=api_key) - with open("examples/fixtures/bcn_weather.mp3", "rb") as f: + with open(fixture_dir / "bcn_weather.mp3", "rb") as f: response = await client.audio.transcriptions.complete_async( model=model, file=File(content=f, file_name=f.name), diff --git a/pyproject.toml b/pyproject.toml index dbb5d44a..ef338022 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -48,6 +48,7 @@ dev = [ "mcp>=1.0,<2", "griffe>=1.7.3,<2", "authlib>=1.5.2,<2", + "websockets >=13.0", ] lint = [ "ruff>=0.11.10,<0.12", @@ -106,6 +107,7 @@ module = [ "jsonpath.*", "typing_inspect.*", "authlib.*", + "websockets.*", "mcp.*", "griffe.*" ] diff --git a/scripts/run_examples.sh b/scripts/run_examples.sh index 5191033a..40c6d17e 100755 --- a/scripts/run_examples.sh +++ b/scripts/run_examples.sh @@ -39,10 +39,9 @@ exclude_files=( "examples/mistral/mcp_servers/stdio_server.py" "examples/mistral/agents/async_conversation_run_stream.py" "examples/mistral/agents/async_conversation_run_mcp.py" - "examples/mistral/agents/async_conversation_run_mcp_remote.py" + "examples/mistral/agents/async_conversation_run_mcp_remote.py" "examples/mistral/audio/async_realtime_transcription_microphone.py" "examples/mistral/audio/async_realtime_transcription_stream.py" - "examples/mistral/audio/transcription_diarize_async.py" ) # Check if the no-extra-dep flag is set diff --git a/src/mistralai/audio.py b/src/mistralai/audio.py index 54430d49..3de29053 100644 --- a/src/mistralai/audio.py +++ b/src/mistralai/audio.py @@ -1,5 +1,10 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +from .basesdk import BaseSDK +from .sdkconfiguration import SDKConfiguration +from mistralai.transcriptions import Transcriptions +from typing import Optional + # region imports from typing import TYPE_CHECKING @@ -7,11 +12,6 @@ from mistralai.extra.realtime import RealtimeTranscription # endregion imports -from .basesdk import BaseSDK -from .sdkconfiguration import SDKConfiguration -from mistralai.transcriptions import Transcriptions -from typing import Optional - class Audio(BaseSDK): transcriptions: Transcriptions @@ -34,9 +34,9 @@ def _init_sdks(self): def realtime(self) -> "RealtimeTranscription": """Returns a client for real-time audio transcription via WebSocket.""" if not hasattr(self, "_realtime"): - from mistralai.extra.realtime import RealtimeTranscription + from mistralai.extra.realtime import RealtimeTranscription # pylint: disable=import-outside-toplevel - self._realtime = RealtimeTranscription(self.sdk_configuration) + self._realtime = RealtimeTranscription(self.sdk_configuration) # pylint: disable=attribute-defined-outside-init return self._realtime diff --git a/src/mistralai/models/audiotranscriptionrequest.py b/src/mistralai/models/audiotranscriptionrequest.py index 308e2599..86417b42 100644 --- a/src/mistralai/models/audiotranscriptionrequest.py +++ b/src/mistralai/models/audiotranscriptionrequest.py @@ -24,6 +24,8 @@ class AudioTranscriptionRequestTypedDict(TypedDict): r"""Language of the audio, e.g. 'en'. Providing the language can boost accuracy.""" temperature: NotRequired[Nullable[float]] stream: Literal[False] + diarize: NotRequired[bool] + context_bias: NotRequired[List[str]] timestamp_granularities: NotRequired[List[TimestampGranularity]] r"""Granularities of timestamps to include in the response.""" @@ -55,6 +57,10 @@ class AudioTranscriptionRequest(BaseModel): FieldMetadata(multipart=True), ] = False + diarize: Annotated[Optional[bool], FieldMetadata(multipart=True)] = False + + context_bias: Annotated[Optional[List[str]], FieldMetadata(multipart=True)] = None + timestamp_granularities: Annotated[ Optional[List[TimestampGranularity]], FieldMetadata(multipart=True) ] = None @@ -69,6 +75,8 @@ def serialize_model(self, handler): "language", "temperature", "stream", + "diarize", + "context_bias", "timestamp_granularities", ] nullable_fields = ["file_url", "file_id", "language", "temperature"] diff --git a/src/mistralai/models/audiotranscriptionrequeststream.py b/src/mistralai/models/audiotranscriptionrequeststream.py index 04374503..1f4087e8 100644 --- a/src/mistralai/models/audiotranscriptionrequeststream.py +++ b/src/mistralai/models/audiotranscriptionrequeststream.py @@ -23,6 +23,8 @@ class AudioTranscriptionRequestStreamTypedDict(TypedDict): r"""Language of the audio, e.g. 'en'. Providing the language can boost accuracy.""" temperature: NotRequired[Nullable[float]] stream: Literal[True] + diarize: NotRequired[bool] + context_bias: NotRequired[List[str]] timestamp_granularities: NotRequired[List[TimestampGranularity]] r"""Granularities of timestamps to include in the response.""" @@ -53,6 +55,10 @@ class AudioTranscriptionRequestStream(BaseModel): FieldMetadata(multipart=True), ] = True + diarize: Annotated[Optional[bool], FieldMetadata(multipart=True)] = False + + context_bias: Annotated[Optional[List[str]], FieldMetadata(multipart=True)] = None + timestamp_granularities: Annotated[ Optional[List[TimestampGranularity]], FieldMetadata(multipart=True) ] = None @@ -67,6 +73,8 @@ def serialize_model(self, handler): "language", "temperature", "stream", + "diarize", + "context_bias", "timestamp_granularities", ] nullable_fields = ["file_url", "file_id", "language", "temperature"] diff --git a/src/mistralai/models/timestampgranularity.py b/src/mistralai/models/timestampgranularity.py index 02816df6..5bda890f 100644 --- a/src/mistralai/models/timestampgranularity.py +++ b/src/mistralai/models/timestampgranularity.py @@ -4,4 +4,7 @@ from typing import Literal -TimestampGranularity = Literal["segment",] +TimestampGranularity = Literal[ + "segment", + "word", +] diff --git a/src/mistralai/models/transcriptionsegmentchunk.py b/src/mistralai/models/transcriptionsegmentchunk.py index aa30f053..40ad20b3 100644 --- a/src/mistralai/models/transcriptionsegmentchunk.py +++ b/src/mistralai/models/transcriptionsegmentchunk.py @@ -1,9 +1,9 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from __future__ import annotations -from mistralai.types import BaseModel +from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL import pydantic -from pydantic import ConfigDict +from pydantic import ConfigDict, model_serializer from typing import Any, Dict, Literal, Optional from typing_extensions import NotRequired, TypedDict @@ -15,6 +15,8 @@ class TranscriptionSegmentChunkTypedDict(TypedDict): text: str start: float end: float + score: NotRequired[Nullable[float]] + speaker_id: NotRequired[Nullable[str]] type: NotRequired[Type] @@ -30,6 +32,10 @@ class TranscriptionSegmentChunk(BaseModel): end: float + score: OptionalNullable[float] = UNSET + + speaker_id: OptionalNullable[str] = UNSET + type: Optional[Type] = "transcription_segment" @property @@ -39,3 +45,36 @@ def additional_properties(self): @additional_properties.setter def additional_properties(self, value): self.__pydantic_extra__ = value # pyright: ignore[reportIncompatibleVariableOverride] + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["score", "speaker_id", "type"] + nullable_fields = ["score", "speaker_id"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + for k, v in serialized.items(): + m[k] = v + + return m diff --git a/src/mistralai/models/transcriptionstreamsegmentdelta.py b/src/mistralai/models/transcriptionstreamsegmentdelta.py index d779ed83..550c83e7 100644 --- a/src/mistralai/models/transcriptionstreamsegmentdelta.py +++ b/src/mistralai/models/transcriptionstreamsegmentdelta.py @@ -1,9 +1,9 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from __future__ import annotations -from mistralai.types import BaseModel +from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL import pydantic -from pydantic import ConfigDict +from pydantic import ConfigDict, model_serializer from typing import Any, Dict, Literal, Optional from typing_extensions import NotRequired, TypedDict @@ -15,6 +15,7 @@ class TranscriptionStreamSegmentDeltaTypedDict(TypedDict): text: str start: float end: float + speaker_id: NotRequired[Nullable[str]] type: NotRequired[TranscriptionStreamSegmentDeltaType] @@ -30,6 +31,8 @@ class TranscriptionStreamSegmentDelta(BaseModel): end: float + speaker_id: OptionalNullable[str] = UNSET + type: Optional[TranscriptionStreamSegmentDeltaType] = "transcription.segment" @property @@ -39,3 +42,36 @@ def additional_properties(self): @additional_properties.setter def additional_properties(self, value): self.__pydantic_extra__ = value # pyright: ignore[reportIncompatibleVariableOverride] + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["speaker_id", "type"] + nullable_fields = ["speaker_id"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + for k, v in serialized.items(): + m[k] = v + + return m diff --git a/src/mistralai/transcriptions.py b/src/mistralai/transcriptions.py index bdbeb1cc..90f2e58a 100644 --- a/src/mistralai/transcriptions.py +++ b/src/mistralai/transcriptions.py @@ -25,6 +25,8 @@ def complete( file_id: OptionalNullable[str] = UNSET, language: OptionalNullable[str] = UNSET, temperature: OptionalNullable[float] = UNSET, + diarize: Optional[bool] = False, + context_bias: Optional[List[str]] = None, timestamp_granularities: Optional[ List[models_timestampgranularity.TimestampGranularity] ] = None, @@ -41,6 +43,8 @@ def complete( :param file_id: ID of a file uploaded to /v1/files :param language: Language of the audio, e.g. 'en'. Providing the language can boost accuracy. :param temperature: + :param diarize: + :param context_bias: :param timestamp_granularities: Granularities of timestamps to include in the response. :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method @@ -64,6 +68,8 @@ def complete( file_id=file_id, language=language, temperature=temperature, + diarize=diarize, + context_bias=context_bias, timestamp_granularities=timestamp_granularities, ) @@ -130,6 +136,8 @@ async def complete_async( file_id: OptionalNullable[str] = UNSET, language: OptionalNullable[str] = UNSET, temperature: OptionalNullable[float] = UNSET, + diarize: Optional[bool] = False, + context_bias: Optional[List[str]] = None, timestamp_granularities: Optional[ List[models_timestampgranularity.TimestampGranularity] ] = None, @@ -146,6 +154,8 @@ async def complete_async( :param file_id: ID of a file uploaded to /v1/files :param language: Language of the audio, e.g. 'en'. Providing the language can boost accuracy. :param temperature: + :param diarize: + :param context_bias: :param timestamp_granularities: Granularities of timestamps to include in the response. :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method @@ -169,6 +179,8 @@ async def complete_async( file_id=file_id, language=language, temperature=temperature, + diarize=diarize, + context_bias=context_bias, timestamp_granularities=timestamp_granularities, ) @@ -235,6 +247,8 @@ def stream( file_id: OptionalNullable[str] = UNSET, language: OptionalNullable[str] = UNSET, temperature: OptionalNullable[float] = UNSET, + diarize: Optional[bool] = False, + context_bias: Optional[List[str]] = None, timestamp_granularities: Optional[ List[models_timestampgranularity.TimestampGranularity] ] = None, @@ -251,6 +265,8 @@ def stream( :param file_id: ID of a file uploaded to /v1/files :param language: Language of the audio, e.g. 'en'. Providing the language can boost accuracy. :param temperature: + :param diarize: + :param context_bias: :param timestamp_granularities: Granularities of timestamps to include in the response. :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method @@ -274,6 +290,8 @@ def stream( file_id=file_id, language=language, temperature=temperature, + diarize=diarize, + context_bias=context_bias, timestamp_granularities=timestamp_granularities, ) @@ -350,6 +368,8 @@ async def stream_async( file_id: OptionalNullable[str] = UNSET, language: OptionalNullable[str] = UNSET, temperature: OptionalNullable[float] = UNSET, + diarize: Optional[bool] = False, + context_bias: Optional[List[str]] = None, timestamp_granularities: Optional[ List[models_timestampgranularity.TimestampGranularity] ] = None, @@ -366,6 +386,8 @@ async def stream_async( :param file_id: ID of a file uploaded to /v1/files :param language: Language of the audio, e.g. 'en'. Providing the language can boost accuracy. :param temperature: + :param diarize: + :param context_bias: :param timestamp_granularities: Granularities of timestamps to include in the response. :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method @@ -389,6 +411,8 @@ async def stream_async( file_id=file_id, language=language, temperature=temperature, + diarize=diarize, + context_bias=context_bias, timestamp_granularities=timestamp_granularities, ) diff --git a/uv.lock b/uv.lock index 85e04bd9..7158ecbd 100644 --- a/uv.lock +++ b/uv.lock @@ -605,6 +605,7 @@ dev = [ { name = "types-authlib" }, { name = "types-python-dateutil" }, { name = "types-pyyaml" }, + { name = "websockets" }, ] lint = [ { name = "mypy" }, @@ -646,6 +647,7 @@ dev = [ { name = "types-authlib", specifier = ">=1.5.0.20250516,<2" }, { name = "types-python-dateutil", specifier = ">=2.9.0.20240316,<3" }, { name = "types-pyyaml", specifier = ">=6.0.12.20250516,<7" }, + { name = "websockets", specifier = ">=13.0" }, ] lint = [ { name = "mypy", specifier = "==1.15.0" }, From 494ac9b3efd8664ce5de381307d8389b24df828b Mon Sep 17 00:00:00 2001 From: "alexandre.abouchahine" Date: Tue, 3 Feb 2026 16:36:13 +0100 Subject: [PATCH 185/223] bump pyproject version --- .../mistral/agents/async_conversation_run.py | 2 +- pyproject.toml | 2 +- scripts/run_examples.sh | 16 +--------------- uv.lock | 2 +- 4 files changed, 4 insertions(+), 18 deletions(-) diff --git a/examples/mistral/agents/async_conversation_run.py b/examples/mistral/agents/async_conversation_run.py index 9e118037..27f9c870 100644 --- a/examples/mistral/agents/async_conversation_run.py +++ b/examples/mistral/agents/async_conversation_run.py @@ -6,7 +6,7 @@ from mistralai.extra.run.context import RunContext from mistralai.types import BaseModel -MODEL = "mistral-medium-latest" +MODEL = "mistral-medium-2505" def math_question_generator(question_num: int): diff --git a/pyproject.toml b/pyproject.toml index ef338022..2cb90876 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "mistralai" -version = "1.11.1" +version = "1.12.0" description = "Python Client SDK for the Mistral AI API." authors = [{ name = "Mistral" }] requires-python = ">=3.10" diff --git a/scripts/run_examples.sh b/scripts/run_examples.sh index 40c6d17e..5bc6fc48 100755 --- a/scripts/run_examples.sh +++ b/scripts/run_examples.sh @@ -6,12 +6,8 @@ RETRY_COUNT=3 # Parse command line arguments while [[ $# -gt 0 ]]; do case $1 in - --no-extra-dep) - NO_EXTRA_DEP=true - shift - ;; --retry-count) - RETRY_COUNT="$2" + RETRY_COUNT="$1" shift 2 ;; --help) @@ -44,16 +40,6 @@ exclude_files=( "examples/mistral/audio/async_realtime_transcription_stream.py" ) -# Check if the no-extra-dep flag is set -if [ "$NO_EXTRA_DEP" = true ]; then - # Add more files to the exclude list - exclude_files+=( - "examples/mistral/agents/async_conversation_run_stream.py" - "examples/mistral/agents/async_conversation_run.py" - "examples/mistral/agents/async_multi_turn_conversation.py" - ) -fi - failed=0 echo "Skipping scripts" diff --git a/uv.lock b/uv.lock index 7158ecbd..fe22e76a 100644 --- a/uv.lock +++ b/uv.lock @@ -563,7 +563,7 @@ wheels = [ [[package]] name = "mistralai" -version = "1.11.1" +version = "1.12.0" source = { editable = "." } dependencies = [ { name = "eval-type-backport" }, From 61f82f0a705c7d83d2fefd66d591e4b9cf45f9bc Mon Sep 17 00:00:00 2001 From: Louis Sanna Date: Mon, 9 Feb 2026 16:11:08 +0100 Subject: [PATCH 186/223] chore: configure Speakeasy for mistralai.client module - Update version to 2.0.0a1 - Set moduleName to mistralai.client for PEP 420 namespace --- .speakeasy/gen.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.speakeasy/gen.yaml b/.speakeasy/gen.yaml index 0cc6f059..b47a192d 100644 --- a/.speakeasy/gen.yaml +++ b/.speakeasy/gen.yaml @@ -26,7 +26,7 @@ generation: generateNewTests: false skipResponseBodyAssertions: false python: - version: 1.12.0 + version: 2.0.0a1 additionalDependencies: dev: pytest: ^8.2.2 @@ -63,7 +63,7 @@ python: license: "" maxMethodParams: 15 methodArguments: infer-optional-args - moduleName: "" + moduleName: mistralai.client multipartArrayFormat: legacy outputModelSuffix: output packageManager: uv From 79fa300722b6eb889142357a1f14f789c91ba5f5 Mon Sep 17 00:00:00 2001 From: Louis Sanna Date: Mon, 9 Feb 2026 16:22:38 +0100 Subject: [PATCH 187/223] chore: remove old generated SDK files Prepare for PEP 420 namespace migration by removing Speakeasy-generated files from src/mistralai/. Custom code in extra/ and _hooks/ is preserved. Speakeasy will regenerate the SDK under src/mistralai/client/. --- src/mistralai/__init__.py | 18 - src/mistralai/_hooks/__init__.py | 5 - src/mistralai/_hooks/sdkhooks.py | 76 - src/mistralai/_hooks/types.py | 113 - src/mistralai/_version.py | 15 - src/mistralai/accesses.py | 619 ---- src/mistralai/agents.py | 725 ----- src/mistralai/async_client.py | 15 - src/mistralai/audio.py | 43 - src/mistralai/basesdk.py | 370 --- src/mistralai/batch.py | 20 - src/mistralai/beta.py | 31 - src/mistralai/chat.py | 835 ----- src/mistralai/classifiers.py | 800 ----- src/mistralai/client.py | 14 - src/mistralai/conversations.py | 2865 ----------------- src/mistralai/documents.py | 1981 ------------ src/mistralai/embeddings.py | 240 -- src/mistralai/files.py | 1120 ------- src/mistralai/fim.py | 545 ---- src/mistralai/fine_tuning.py | 20 - src/mistralai/httpclient.py | 125 - src/mistralai/jobs.py | 1067 ------ src/mistralai/libraries.py | 946 ------ src/mistralai/mistral_agents.py | 2080 ------------ src/mistralai/mistral_jobs.py | 799 ----- src/mistralai/models/__init__.py | 2531 --------------- src/mistralai/models/agent.py | 142 - src/mistralai/models/agentaliasresponse.py | 23 - src/mistralai/models/agentconversation.py | 89 - src/mistralai/models/agentcreationrequest.py | 113 - src/mistralai/models/agenthandoffdoneevent.py | 33 - src/mistralai/models/agenthandoffentry.py | 76 - .../models/agenthandoffstartedevent.py | 33 - ..._api_v1_agents_create_or_update_aliasop.py | 26 - .../models/agents_api_v1_agents_deleteop.py | 16 - .../agents_api_v1_agents_get_versionop.py | 21 - .../models/agents_api_v1_agents_getop.py | 62 - ...ts_api_v1_agents_list_version_aliasesop.py | 16 - .../agents_api_v1_agents_list_versionsop.py | 33 - .../models/agents_api_v1_agents_listop.py | 98 - .../agents_api_v1_agents_update_versionop.py | 21 - .../models/agents_api_v1_agents_updateop.py | 23 - ...ts_api_v1_conversations_append_streamop.py | 28 - .../agents_api_v1_conversations_appendop.py | 28 - .../agents_api_v1_conversations_deleteop.py | 18 - .../agents_api_v1_conversations_getop.py | 35 - .../agents_api_v1_conversations_historyop.py | 18 - .../agents_api_v1_conversations_listop.py | 74 - .../agents_api_v1_conversations_messagesop.py | 18 - ...s_api_v1_conversations_restart_streamop.py | 28 - .../agents_api_v1_conversations_restartop.py | 28 - .../models/agentscompletionrequest.py | 192 -- .../models/agentscompletionstreamrequest.py | 190 -- src/mistralai/models/agentupdaterequest.py | 127 - src/mistralai/models/apiendpoint.py | 22 - src/mistralai/models/archiveftmodelout.py | 23 - src/mistralai/models/assistantmessage.py | 71 - src/mistralai/models/audiochunk.py | 20 - src/mistralai/models/audioencoding.py | 18 - src/mistralai/models/audioformat.py | 17 - .../models/audiotranscriptionrequest.py | 107 - .../models/audiotranscriptionrequeststream.py | 105 - src/mistralai/models/basemodelcard.py | 110 - src/mistralai/models/batcherror.py | 17 - src/mistralai/models/batchjobin.py | 82 - src/mistralai/models/batchjobout.py | 123 - src/mistralai/models/batchjobsout.py | 24 - src/mistralai/models/batchjobstatus.py | 15 - src/mistralai/models/batchrequest.py | 48 - src/mistralai/models/builtinconnectors.py | 13 - .../models/chatclassificationrequest.py | 20 - src/mistralai/models/chatcompletionchoice.py | 33 - src/mistralai/models/chatcompletionrequest.py | 215 -- .../models/chatcompletionresponse.py | 31 - .../models/chatcompletionstreamrequest.py | 217 -- src/mistralai/models/chatmoderationrequest.py | 83 - src/mistralai/models/checkpointout.py | 26 - src/mistralai/models/classificationrequest.py | 68 - .../models/classificationresponse.py | 24 - .../models/classificationtargetresult.py | 14 - .../models/classifierdetailedjobout.py | 158 - src/mistralai/models/classifierftmodelout.py | 108 - src/mistralai/models/classifierjobout.py | 167 - src/mistralai/models/classifiertargetin.py | 55 - src/mistralai/models/classifiertargetout.py | 24 - .../models/classifiertrainingparameters.py | 73 - .../models/classifiertrainingparametersin.py | 85 - src/mistralai/models/codeinterpretertool.py | 17 - src/mistralai/models/completionargs.py | 101 - src/mistralai/models/completionargsstop.py | 13 - src/mistralai/models/completionchunk.py | 34 - .../models/completiondetailedjobout.py | 165 - src/mistralai/models/completionevent.py | 14 - src/mistralai/models/completionftmodelout.py | 104 - src/mistralai/models/completionjobout.py | 178 - .../models/completionresponsestreamchoice.py | 63 - .../models/completiontrainingparameters.py | 78 - .../models/completiontrainingparametersin.py | 90 - src/mistralai/models/contentchunk.py | 42 - .../models/conversationappendrequest.py | 38 - .../models/conversationappendstreamrequest.py | 40 - src/mistralai/models/conversationevents.py | 78 - src/mistralai/models/conversationhistory.py | 59 - src/mistralai/models/conversationinputs.py | 14 - src/mistralai/models/conversationmessages.py | 28 - src/mistralai/models/conversationrequest.py | 154 - src/mistralai/models/conversationresponse.py | 52 - .../models/conversationrestartrequest.py | 107 - .../conversationrestartstreamrequest.py | 111 - .../models/conversationstreamrequest.py | 160 - src/mistralai/models/conversationusageinfo.py | 63 - ...elete_model_v1_models_model_id_deleteop.py | 18 - src/mistralai/models/deletefileout.py | 25 - src/mistralai/models/deletemodelout.py | 26 - src/mistralai/models/deltamessage.py | 61 - src/mistralai/models/documentlibrarytool.py | 22 - src/mistralai/models/documentout.py | 121 - src/mistralai/models/documenttextcontent.py | 13 - src/mistralai/models/documentupdatein.py | 65 - src/mistralai/models/documenturlchunk.py | 56 - src/mistralai/models/embeddingdtype.py | 13 - src/mistralai/models/embeddingrequest.py | 84 - src/mistralai/models/embeddingresponse.py | 28 - src/mistralai/models/embeddingresponsedata.py | 20 - src/mistralai/models/encodingformat.py | 10 - src/mistralai/models/entitytype.py | 16 - src/mistralai/models/eventout.py | 55 - src/mistralai/models/file.py | 33 - src/mistralai/models/filechunk.py | 23 - src/mistralai/models/filepurpose.py | 15 - .../models/files_api_routes_delete_fileop.py | 16 - .../files_api_routes_download_fileop.py | 16 - .../files_api_routes_get_signed_urlop.py | 25 - .../models/files_api_routes_list_filesop.py | 103 - .../files_api_routes_retrieve_fileop.py | 16 - .../models/files_api_routes_upload_fileop.py | 40 - src/mistralai/models/fileschema.py | 88 - src/mistralai/models/filesignedurl.py | 13 - src/mistralai/models/fimcompletionrequest.py | 124 - src/mistralai/models/fimcompletionresponse.py | 31 - .../models/fimcompletionstreamrequest.py | 122 - src/mistralai/models/finetuneablemodeltype.py | 10 - .../models/ftclassifierlossfunction.py | 10 - .../models/ftmodelcapabilitiesout.py | 26 - src/mistralai/models/ftmodelcard.py | 126 - src/mistralai/models/function.py | 23 - src/mistralai/models/functioncall.py | 23 - src/mistralai/models/functioncallentry.py | 77 - .../models/functioncallentryarguments.py | 15 - src/mistralai/models/functioncallevent.py | 36 - src/mistralai/models/functionname.py | 17 - src/mistralai/models/functionresultentry.py | 70 - src/mistralai/models/functiontool.py | 21 - src/mistralai/models/githubrepositoryin.py | 63 - src/mistralai/models/githubrepositoryout.py | 63 - src/mistralai/models/httpvalidationerror.py | 28 - src/mistralai/models/imagegenerationtool.py | 17 - src/mistralai/models/imageurl.py | 47 - src/mistralai/models/imageurlchunk.py | 33 - src/mistralai/models/inputentries.py | 37 - src/mistralai/models/inputs.py | 54 - src/mistralai/models/instructrequest.py | 42 - src/mistralai/models/jobin.py | 141 - src/mistralai/models/jobmetadataout.py | 78 - ...obs_api_routes_batch_cancel_batch_jobop.py | 16 - .../jobs_api_routes_batch_get_batch_jobop.py | 53 - .../jobs_api_routes_batch_get_batch_jobsop.py | 102 - ..._fine_tuning_archive_fine_tuned_modelop.py | 18 - ...es_fine_tuning_cancel_fine_tuning_jobop.py | 45 - ...es_fine_tuning_create_fine_tuning_jobop.py | 38 - ...outes_fine_tuning_get_fine_tuning_jobop.py | 45 - ...utes_fine_tuning_get_fine_tuning_jobsop.py | 156 - ...tes_fine_tuning_start_fine_tuning_jobop.py | 43 - ...ine_tuning_unarchive_fine_tuned_modelop.py | 18 - ...s_fine_tuning_update_fine_tuned_modelop.py | 51 - src/mistralai/models/jobsout.py | 41 - src/mistralai/models/jsonschema.py | 55 - src/mistralai/models/legacyjobmetadataout.py | 119 - src/mistralai/models/libraries_delete_v1op.py | 16 - .../models/libraries_documents_delete_v1op.py | 21 - ...ents_get_extracted_text_signed_url_v1op.py | 21 - ...libraries_documents_get_signed_url_v1op.py | 21 - .../libraries_documents_get_status_v1op.py | 21 - ...braries_documents_get_text_content_v1op.py | 21 - .../models/libraries_documents_get_v1op.py | 21 - .../models/libraries_documents_list_v1op.py | 91 - .../libraries_documents_reprocess_v1op.py | 21 - .../models/libraries_documents_update_v1op.py | 28 - .../models/libraries_documents_upload_v1op.py | 56 - src/mistralai/models/libraries_get_v1op.py | 16 - .../models/libraries_share_create_v1op.py | 22 - .../models/libraries_share_delete_v1op.py | 23 - .../models/libraries_share_list_v1op.py | 16 - src/mistralai/models/libraries_update_v1op.py | 23 - src/mistralai/models/libraryin.py | 50 - src/mistralai/models/libraryinupdate.py | 47 - src/mistralai/models/libraryout.py | 110 - src/mistralai/models/listdocumentout.py | 19 - src/mistralai/models/listfilesout.py | 52 - src/mistralai/models/listlibraryout.py | 15 - src/mistralai/models/listsharingout.py | 15 - src/mistralai/models/messageentries.py | 18 - .../models/messageinputcontentchunks.py | 28 - src/mistralai/models/messageinputentry.py | 105 - .../models/messageoutputcontentchunks.py | 37 - src/mistralai/models/messageoutputentry.py | 103 - src/mistralai/models/messageoutputevent.py | 95 - src/mistralai/models/metricout.py | 54 - src/mistralai/models/mistralerror.py | 30 - src/mistralai/models/mistralpromptmode.py | 12 - src/mistralai/models/modelcapabilities.py | 41 - src/mistralai/models/modelconversation.py | 133 - src/mistralai/models/modellist.py | 34 - src/mistralai/models/moderationobject.py | 21 - src/mistralai/models/moderationresponse.py | 21 - src/mistralai/models/no_response_error.py | 17 - src/mistralai/models/ocrimageobject.py | 83 - src/mistralai/models/ocrpagedimensions.py | 25 - src/mistralai/models/ocrpageobject.py | 85 - src/mistralai/models/ocrrequest.py | 140 - src/mistralai/models/ocrresponse.py | 62 - src/mistralai/models/ocrtableobject.py | 34 - src/mistralai/models/ocrusageinfo.py | 51 - src/mistralai/models/outputcontentchunks.py | 37 - src/mistralai/models/paginationinfo.py | 25 - src/mistralai/models/prediction.py | 29 - src/mistralai/models/processingstatusout.py | 16 - .../models/realtimetranscriptionerror.py | 27 - .../realtimetranscriptionerrordetail.py | 29 - .../models/realtimetranscriptionsession.py | 20 - .../realtimetranscriptionsessioncreated.py | 30 - .../realtimetranscriptionsessionupdated.py | 30 - src/mistralai/models/referencechunk.py | 20 - src/mistralai/models/requestsource.py | 11 - src/mistralai/models/responsedoneevent.py | 25 - src/mistralai/models/responseerrorevent.py | 27 - src/mistralai/models/responseformat.py | 54 - src/mistralai/models/responseformats.py | 11 - src/mistralai/models/responsestartedevent.py | 24 - .../models/responsevalidationerror.py | 27 - ...retrieve_model_v1_models_model_id_getop.py | 38 - src/mistralai/models/retrievefileout.py | 91 - src/mistralai/models/sampletype.py | 17 - src/mistralai/models/sdkerror.py | 40 - src/mistralai/models/security.py | 25 - src/mistralai/models/shareenum.py | 14 - src/mistralai/models/sharingdelete.py | 55 - src/mistralai/models/sharingin.py | 59 - src/mistralai/models/sharingout.py | 59 - src/mistralai/models/source.py | 15 - src/mistralai/models/ssetypes.py | 19 - src/mistralai/models/systemmessage.py | 35 - .../models/systemmessagecontentchunks.py | 21 - src/mistralai/models/textchunk.py | 20 - src/mistralai/models/thinkchunk.py | 35 - src/mistralai/models/timestampgranularity.py | 10 - src/mistralai/models/tool.py | 19 - src/mistralai/models/toolcall.py | 25 - src/mistralai/models/toolchoice.py | 25 - src/mistralai/models/toolchoiceenum.py | 12 - .../models/toolexecutiondeltaevent.py | 44 - .../models/toolexecutiondoneevent.py | 44 - src/mistralai/models/toolexecutionentry.py | 80 - .../models/toolexecutionstartedevent.py | 44 - src/mistralai/models/toolfilechunk.py | 69 - src/mistralai/models/toolmessage.py | 66 - src/mistralai/models/toolreferencechunk.py | 74 - src/mistralai/models/tooltypes.py | 8 - src/mistralai/models/trainingfile.py | 17 - src/mistralai/models/transcriptionresponse.py | 79 - .../models/transcriptionsegmentchunk.py | 80 - .../models/transcriptionstreamdone.py | 85 - .../models/transcriptionstreamevents.py | 58 - .../models/transcriptionstreameventtypes.py | 12 - .../models/transcriptionstreamlanguage.py | 35 - .../models/transcriptionstreamsegmentdelta.py | 77 - .../models/transcriptionstreamtextdelta.py | 35 - src/mistralai/models/unarchiveftmodelout.py | 23 - src/mistralai/models/updateftmodelin.py | 47 - src/mistralai/models/uploadfileout.py | 88 - src/mistralai/models/usageinfo.py | 76 - src/mistralai/models/usermessage.py | 60 - src/mistralai/models/validationerror.py | 26 - src/mistralai/models/wandbintegration.py | 66 - src/mistralai/models/wandbintegrationout.py | 64 - src/mistralai/models/websearchpremiumtool.py | 17 - src/mistralai/models/websearchtool.py | 17 - src/mistralai/models_.py | 1063 ------ src/mistralai/ocr.py | 303 -- src/mistralai/py.typed | 1 - src/mistralai/sdk.py | 222 -- src/mistralai/sdkconfiguration.py | 53 - src/mistralai/transcriptions.py | 481 --- src/mistralai/types/__init__.py | 21 - src/mistralai/types/basemodel.py | 77 - src/mistralai/utils/__init__.py | 197 -- src/mistralai/utils/annotations.py | 79 - src/mistralai/utils/datetimes.py | 23 - src/mistralai/utils/enums.py | 134 - src/mistralai/utils/eventstreaming.py | 248 -- src/mistralai/utils/forms.py | 234 -- src/mistralai/utils/headers.py | 136 - src/mistralai/utils/logger.py | 27 - src/mistralai/utils/metadata.py | 118 - src/mistralai/utils/queryparams.py | 217 -- src/mistralai/utils/requestbodies.py | 66 - src/mistralai/utils/retries.py | 281 -- src/mistralai/utils/security.py | 192 -- src/mistralai/utils/serializers.py | 229 -- .../utils/unmarshal_json_response.py | 38 - src/mistralai/utils/url.py | 155 - src/mistralai/utils/values.py | 137 - 313 files changed, 35975 deletions(-) delete mode 100644 src/mistralai/__init__.py delete mode 100644 src/mistralai/_hooks/__init__.py delete mode 100644 src/mistralai/_hooks/sdkhooks.py delete mode 100644 src/mistralai/_hooks/types.py delete mode 100644 src/mistralai/_version.py delete mode 100644 src/mistralai/accesses.py delete mode 100644 src/mistralai/agents.py delete mode 100644 src/mistralai/async_client.py delete mode 100644 src/mistralai/audio.py delete mode 100644 src/mistralai/basesdk.py delete mode 100644 src/mistralai/batch.py delete mode 100644 src/mistralai/beta.py delete mode 100644 src/mistralai/chat.py delete mode 100644 src/mistralai/classifiers.py delete mode 100644 src/mistralai/client.py delete mode 100644 src/mistralai/conversations.py delete mode 100644 src/mistralai/documents.py delete mode 100644 src/mistralai/embeddings.py delete mode 100644 src/mistralai/files.py delete mode 100644 src/mistralai/fim.py delete mode 100644 src/mistralai/fine_tuning.py delete mode 100644 src/mistralai/httpclient.py delete mode 100644 src/mistralai/jobs.py delete mode 100644 src/mistralai/libraries.py delete mode 100644 src/mistralai/mistral_agents.py delete mode 100644 src/mistralai/mistral_jobs.py delete mode 100644 src/mistralai/models/__init__.py delete mode 100644 src/mistralai/models/agent.py delete mode 100644 src/mistralai/models/agentaliasresponse.py delete mode 100644 src/mistralai/models/agentconversation.py delete mode 100644 src/mistralai/models/agentcreationrequest.py delete mode 100644 src/mistralai/models/agenthandoffdoneevent.py delete mode 100644 src/mistralai/models/agenthandoffentry.py delete mode 100644 src/mistralai/models/agenthandoffstartedevent.py delete mode 100644 src/mistralai/models/agents_api_v1_agents_create_or_update_aliasop.py delete mode 100644 src/mistralai/models/agents_api_v1_agents_deleteop.py delete mode 100644 src/mistralai/models/agents_api_v1_agents_get_versionop.py delete mode 100644 src/mistralai/models/agents_api_v1_agents_getop.py delete mode 100644 src/mistralai/models/agents_api_v1_agents_list_version_aliasesop.py delete mode 100644 src/mistralai/models/agents_api_v1_agents_list_versionsop.py delete mode 100644 src/mistralai/models/agents_api_v1_agents_listop.py delete mode 100644 src/mistralai/models/agents_api_v1_agents_update_versionop.py delete mode 100644 src/mistralai/models/agents_api_v1_agents_updateop.py delete mode 100644 src/mistralai/models/agents_api_v1_conversations_append_streamop.py delete mode 100644 src/mistralai/models/agents_api_v1_conversations_appendop.py delete mode 100644 src/mistralai/models/agents_api_v1_conversations_deleteop.py delete mode 100644 src/mistralai/models/agents_api_v1_conversations_getop.py delete mode 100644 src/mistralai/models/agents_api_v1_conversations_historyop.py delete mode 100644 src/mistralai/models/agents_api_v1_conversations_listop.py delete mode 100644 src/mistralai/models/agents_api_v1_conversations_messagesop.py delete mode 100644 src/mistralai/models/agents_api_v1_conversations_restart_streamop.py delete mode 100644 src/mistralai/models/agents_api_v1_conversations_restartop.py delete mode 100644 src/mistralai/models/agentscompletionrequest.py delete mode 100644 src/mistralai/models/agentscompletionstreamrequest.py delete mode 100644 src/mistralai/models/agentupdaterequest.py delete mode 100644 src/mistralai/models/apiendpoint.py delete mode 100644 src/mistralai/models/archiveftmodelout.py delete mode 100644 src/mistralai/models/assistantmessage.py delete mode 100644 src/mistralai/models/audiochunk.py delete mode 100644 src/mistralai/models/audioencoding.py delete mode 100644 src/mistralai/models/audioformat.py delete mode 100644 src/mistralai/models/audiotranscriptionrequest.py delete mode 100644 src/mistralai/models/audiotranscriptionrequeststream.py delete mode 100644 src/mistralai/models/basemodelcard.py delete mode 100644 src/mistralai/models/batcherror.py delete mode 100644 src/mistralai/models/batchjobin.py delete mode 100644 src/mistralai/models/batchjobout.py delete mode 100644 src/mistralai/models/batchjobsout.py delete mode 100644 src/mistralai/models/batchjobstatus.py delete mode 100644 src/mistralai/models/batchrequest.py delete mode 100644 src/mistralai/models/builtinconnectors.py delete mode 100644 src/mistralai/models/chatclassificationrequest.py delete mode 100644 src/mistralai/models/chatcompletionchoice.py delete mode 100644 src/mistralai/models/chatcompletionrequest.py delete mode 100644 src/mistralai/models/chatcompletionresponse.py delete mode 100644 src/mistralai/models/chatcompletionstreamrequest.py delete mode 100644 src/mistralai/models/chatmoderationrequest.py delete mode 100644 src/mistralai/models/checkpointout.py delete mode 100644 src/mistralai/models/classificationrequest.py delete mode 100644 src/mistralai/models/classificationresponse.py delete mode 100644 src/mistralai/models/classificationtargetresult.py delete mode 100644 src/mistralai/models/classifierdetailedjobout.py delete mode 100644 src/mistralai/models/classifierftmodelout.py delete mode 100644 src/mistralai/models/classifierjobout.py delete mode 100644 src/mistralai/models/classifiertargetin.py delete mode 100644 src/mistralai/models/classifiertargetout.py delete mode 100644 src/mistralai/models/classifiertrainingparameters.py delete mode 100644 src/mistralai/models/classifiertrainingparametersin.py delete mode 100644 src/mistralai/models/codeinterpretertool.py delete mode 100644 src/mistralai/models/completionargs.py delete mode 100644 src/mistralai/models/completionargsstop.py delete mode 100644 src/mistralai/models/completionchunk.py delete mode 100644 src/mistralai/models/completiondetailedjobout.py delete mode 100644 src/mistralai/models/completionevent.py delete mode 100644 src/mistralai/models/completionftmodelout.py delete mode 100644 src/mistralai/models/completionjobout.py delete mode 100644 src/mistralai/models/completionresponsestreamchoice.py delete mode 100644 src/mistralai/models/completiontrainingparameters.py delete mode 100644 src/mistralai/models/completiontrainingparametersin.py delete mode 100644 src/mistralai/models/contentchunk.py delete mode 100644 src/mistralai/models/conversationappendrequest.py delete mode 100644 src/mistralai/models/conversationappendstreamrequest.py delete mode 100644 src/mistralai/models/conversationevents.py delete mode 100644 src/mistralai/models/conversationhistory.py delete mode 100644 src/mistralai/models/conversationinputs.py delete mode 100644 src/mistralai/models/conversationmessages.py delete mode 100644 src/mistralai/models/conversationrequest.py delete mode 100644 src/mistralai/models/conversationresponse.py delete mode 100644 src/mistralai/models/conversationrestartrequest.py delete mode 100644 src/mistralai/models/conversationrestartstreamrequest.py delete mode 100644 src/mistralai/models/conversationstreamrequest.py delete mode 100644 src/mistralai/models/conversationusageinfo.py delete mode 100644 src/mistralai/models/delete_model_v1_models_model_id_deleteop.py delete mode 100644 src/mistralai/models/deletefileout.py delete mode 100644 src/mistralai/models/deletemodelout.py delete mode 100644 src/mistralai/models/deltamessage.py delete mode 100644 src/mistralai/models/documentlibrarytool.py delete mode 100644 src/mistralai/models/documentout.py delete mode 100644 src/mistralai/models/documenttextcontent.py delete mode 100644 src/mistralai/models/documentupdatein.py delete mode 100644 src/mistralai/models/documenturlchunk.py delete mode 100644 src/mistralai/models/embeddingdtype.py delete mode 100644 src/mistralai/models/embeddingrequest.py delete mode 100644 src/mistralai/models/embeddingresponse.py delete mode 100644 src/mistralai/models/embeddingresponsedata.py delete mode 100644 src/mistralai/models/encodingformat.py delete mode 100644 src/mistralai/models/entitytype.py delete mode 100644 src/mistralai/models/eventout.py delete mode 100644 src/mistralai/models/file.py delete mode 100644 src/mistralai/models/filechunk.py delete mode 100644 src/mistralai/models/filepurpose.py delete mode 100644 src/mistralai/models/files_api_routes_delete_fileop.py delete mode 100644 src/mistralai/models/files_api_routes_download_fileop.py delete mode 100644 src/mistralai/models/files_api_routes_get_signed_urlop.py delete mode 100644 src/mistralai/models/files_api_routes_list_filesop.py delete mode 100644 src/mistralai/models/files_api_routes_retrieve_fileop.py delete mode 100644 src/mistralai/models/files_api_routes_upload_fileop.py delete mode 100644 src/mistralai/models/fileschema.py delete mode 100644 src/mistralai/models/filesignedurl.py delete mode 100644 src/mistralai/models/fimcompletionrequest.py delete mode 100644 src/mistralai/models/fimcompletionresponse.py delete mode 100644 src/mistralai/models/fimcompletionstreamrequest.py delete mode 100644 src/mistralai/models/finetuneablemodeltype.py delete mode 100644 src/mistralai/models/ftclassifierlossfunction.py delete mode 100644 src/mistralai/models/ftmodelcapabilitiesout.py delete mode 100644 src/mistralai/models/ftmodelcard.py delete mode 100644 src/mistralai/models/function.py delete mode 100644 src/mistralai/models/functioncall.py delete mode 100644 src/mistralai/models/functioncallentry.py delete mode 100644 src/mistralai/models/functioncallentryarguments.py delete mode 100644 src/mistralai/models/functioncallevent.py delete mode 100644 src/mistralai/models/functionname.py delete mode 100644 src/mistralai/models/functionresultentry.py delete mode 100644 src/mistralai/models/functiontool.py delete mode 100644 src/mistralai/models/githubrepositoryin.py delete mode 100644 src/mistralai/models/githubrepositoryout.py delete mode 100644 src/mistralai/models/httpvalidationerror.py delete mode 100644 src/mistralai/models/imagegenerationtool.py delete mode 100644 src/mistralai/models/imageurl.py delete mode 100644 src/mistralai/models/imageurlchunk.py delete mode 100644 src/mistralai/models/inputentries.py delete mode 100644 src/mistralai/models/inputs.py delete mode 100644 src/mistralai/models/instructrequest.py delete mode 100644 src/mistralai/models/jobin.py delete mode 100644 src/mistralai/models/jobmetadataout.py delete mode 100644 src/mistralai/models/jobs_api_routes_batch_cancel_batch_jobop.py delete mode 100644 src/mistralai/models/jobs_api_routes_batch_get_batch_jobop.py delete mode 100644 src/mistralai/models/jobs_api_routes_batch_get_batch_jobsop.py delete mode 100644 src/mistralai/models/jobs_api_routes_fine_tuning_archive_fine_tuned_modelop.py delete mode 100644 src/mistralai/models/jobs_api_routes_fine_tuning_cancel_fine_tuning_jobop.py delete mode 100644 src/mistralai/models/jobs_api_routes_fine_tuning_create_fine_tuning_jobop.py delete mode 100644 src/mistralai/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobop.py delete mode 100644 src/mistralai/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobsop.py delete mode 100644 src/mistralai/models/jobs_api_routes_fine_tuning_start_fine_tuning_jobop.py delete mode 100644 src/mistralai/models/jobs_api_routes_fine_tuning_unarchive_fine_tuned_modelop.py delete mode 100644 src/mistralai/models/jobs_api_routes_fine_tuning_update_fine_tuned_modelop.py delete mode 100644 src/mistralai/models/jobsout.py delete mode 100644 src/mistralai/models/jsonschema.py delete mode 100644 src/mistralai/models/legacyjobmetadataout.py delete mode 100644 src/mistralai/models/libraries_delete_v1op.py delete mode 100644 src/mistralai/models/libraries_documents_delete_v1op.py delete mode 100644 src/mistralai/models/libraries_documents_get_extracted_text_signed_url_v1op.py delete mode 100644 src/mistralai/models/libraries_documents_get_signed_url_v1op.py delete mode 100644 src/mistralai/models/libraries_documents_get_status_v1op.py delete mode 100644 src/mistralai/models/libraries_documents_get_text_content_v1op.py delete mode 100644 src/mistralai/models/libraries_documents_get_v1op.py delete mode 100644 src/mistralai/models/libraries_documents_list_v1op.py delete mode 100644 src/mistralai/models/libraries_documents_reprocess_v1op.py delete mode 100644 src/mistralai/models/libraries_documents_update_v1op.py delete mode 100644 src/mistralai/models/libraries_documents_upload_v1op.py delete mode 100644 src/mistralai/models/libraries_get_v1op.py delete mode 100644 src/mistralai/models/libraries_share_create_v1op.py delete mode 100644 src/mistralai/models/libraries_share_delete_v1op.py delete mode 100644 src/mistralai/models/libraries_share_list_v1op.py delete mode 100644 src/mistralai/models/libraries_update_v1op.py delete mode 100644 src/mistralai/models/libraryin.py delete mode 100644 src/mistralai/models/libraryinupdate.py delete mode 100644 src/mistralai/models/libraryout.py delete mode 100644 src/mistralai/models/listdocumentout.py delete mode 100644 src/mistralai/models/listfilesout.py delete mode 100644 src/mistralai/models/listlibraryout.py delete mode 100644 src/mistralai/models/listsharingout.py delete mode 100644 src/mistralai/models/messageentries.py delete mode 100644 src/mistralai/models/messageinputcontentchunks.py delete mode 100644 src/mistralai/models/messageinputentry.py delete mode 100644 src/mistralai/models/messageoutputcontentchunks.py delete mode 100644 src/mistralai/models/messageoutputentry.py delete mode 100644 src/mistralai/models/messageoutputevent.py delete mode 100644 src/mistralai/models/metricout.py delete mode 100644 src/mistralai/models/mistralerror.py delete mode 100644 src/mistralai/models/mistralpromptmode.py delete mode 100644 src/mistralai/models/modelcapabilities.py delete mode 100644 src/mistralai/models/modelconversation.py delete mode 100644 src/mistralai/models/modellist.py delete mode 100644 src/mistralai/models/moderationobject.py delete mode 100644 src/mistralai/models/moderationresponse.py delete mode 100644 src/mistralai/models/no_response_error.py delete mode 100644 src/mistralai/models/ocrimageobject.py delete mode 100644 src/mistralai/models/ocrpagedimensions.py delete mode 100644 src/mistralai/models/ocrpageobject.py delete mode 100644 src/mistralai/models/ocrrequest.py delete mode 100644 src/mistralai/models/ocrresponse.py delete mode 100644 src/mistralai/models/ocrtableobject.py delete mode 100644 src/mistralai/models/ocrusageinfo.py delete mode 100644 src/mistralai/models/outputcontentchunks.py delete mode 100644 src/mistralai/models/paginationinfo.py delete mode 100644 src/mistralai/models/prediction.py delete mode 100644 src/mistralai/models/processingstatusout.py delete mode 100644 src/mistralai/models/realtimetranscriptionerror.py delete mode 100644 src/mistralai/models/realtimetranscriptionerrordetail.py delete mode 100644 src/mistralai/models/realtimetranscriptionsession.py delete mode 100644 src/mistralai/models/realtimetranscriptionsessioncreated.py delete mode 100644 src/mistralai/models/realtimetranscriptionsessionupdated.py delete mode 100644 src/mistralai/models/referencechunk.py delete mode 100644 src/mistralai/models/requestsource.py delete mode 100644 src/mistralai/models/responsedoneevent.py delete mode 100644 src/mistralai/models/responseerrorevent.py delete mode 100644 src/mistralai/models/responseformat.py delete mode 100644 src/mistralai/models/responseformats.py delete mode 100644 src/mistralai/models/responsestartedevent.py delete mode 100644 src/mistralai/models/responsevalidationerror.py delete mode 100644 src/mistralai/models/retrieve_model_v1_models_model_id_getop.py delete mode 100644 src/mistralai/models/retrievefileout.py delete mode 100644 src/mistralai/models/sampletype.py delete mode 100644 src/mistralai/models/sdkerror.py delete mode 100644 src/mistralai/models/security.py delete mode 100644 src/mistralai/models/shareenum.py delete mode 100644 src/mistralai/models/sharingdelete.py delete mode 100644 src/mistralai/models/sharingin.py delete mode 100644 src/mistralai/models/sharingout.py delete mode 100644 src/mistralai/models/source.py delete mode 100644 src/mistralai/models/ssetypes.py delete mode 100644 src/mistralai/models/systemmessage.py delete mode 100644 src/mistralai/models/systemmessagecontentchunks.py delete mode 100644 src/mistralai/models/textchunk.py delete mode 100644 src/mistralai/models/thinkchunk.py delete mode 100644 src/mistralai/models/timestampgranularity.py delete mode 100644 src/mistralai/models/tool.py delete mode 100644 src/mistralai/models/toolcall.py delete mode 100644 src/mistralai/models/toolchoice.py delete mode 100644 src/mistralai/models/toolchoiceenum.py delete mode 100644 src/mistralai/models/toolexecutiondeltaevent.py delete mode 100644 src/mistralai/models/toolexecutiondoneevent.py delete mode 100644 src/mistralai/models/toolexecutionentry.py delete mode 100644 src/mistralai/models/toolexecutionstartedevent.py delete mode 100644 src/mistralai/models/toolfilechunk.py delete mode 100644 src/mistralai/models/toolmessage.py delete mode 100644 src/mistralai/models/toolreferencechunk.py delete mode 100644 src/mistralai/models/tooltypes.py delete mode 100644 src/mistralai/models/trainingfile.py delete mode 100644 src/mistralai/models/transcriptionresponse.py delete mode 100644 src/mistralai/models/transcriptionsegmentchunk.py delete mode 100644 src/mistralai/models/transcriptionstreamdone.py delete mode 100644 src/mistralai/models/transcriptionstreamevents.py delete mode 100644 src/mistralai/models/transcriptionstreameventtypes.py delete mode 100644 src/mistralai/models/transcriptionstreamlanguage.py delete mode 100644 src/mistralai/models/transcriptionstreamsegmentdelta.py delete mode 100644 src/mistralai/models/transcriptionstreamtextdelta.py delete mode 100644 src/mistralai/models/unarchiveftmodelout.py delete mode 100644 src/mistralai/models/updateftmodelin.py delete mode 100644 src/mistralai/models/uploadfileout.py delete mode 100644 src/mistralai/models/usageinfo.py delete mode 100644 src/mistralai/models/usermessage.py delete mode 100644 src/mistralai/models/validationerror.py delete mode 100644 src/mistralai/models/wandbintegration.py delete mode 100644 src/mistralai/models/wandbintegrationout.py delete mode 100644 src/mistralai/models/websearchpremiumtool.py delete mode 100644 src/mistralai/models/websearchtool.py delete mode 100644 src/mistralai/models_.py delete mode 100644 src/mistralai/ocr.py delete mode 100644 src/mistralai/py.typed delete mode 100644 src/mistralai/sdk.py delete mode 100644 src/mistralai/sdkconfiguration.py delete mode 100644 src/mistralai/transcriptions.py delete mode 100644 src/mistralai/types/__init__.py delete mode 100644 src/mistralai/types/basemodel.py delete mode 100644 src/mistralai/utils/__init__.py delete mode 100644 src/mistralai/utils/annotations.py delete mode 100644 src/mistralai/utils/datetimes.py delete mode 100644 src/mistralai/utils/enums.py delete mode 100644 src/mistralai/utils/eventstreaming.py delete mode 100644 src/mistralai/utils/forms.py delete mode 100644 src/mistralai/utils/headers.py delete mode 100644 src/mistralai/utils/logger.py delete mode 100644 src/mistralai/utils/metadata.py delete mode 100644 src/mistralai/utils/queryparams.py delete mode 100644 src/mistralai/utils/requestbodies.py delete mode 100644 src/mistralai/utils/retries.py delete mode 100644 src/mistralai/utils/security.py delete mode 100644 src/mistralai/utils/serializers.py delete mode 100644 src/mistralai/utils/unmarshal_json_response.py delete mode 100644 src/mistralai/utils/url.py delete mode 100644 src/mistralai/utils/values.py diff --git a/src/mistralai/__init__.py b/src/mistralai/__init__.py deleted file mode 100644 index dd02e42e..00000000 --- a/src/mistralai/__init__.py +++ /dev/null @@ -1,18 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from ._version import ( - __title__, - __version__, - __openapi_doc_version__, - __gen_version__, - __user_agent__, -) -from .sdk import * -from .sdkconfiguration import * -from .models import * - - -VERSION: str = __version__ -OPENAPI_DOC_VERSION = __openapi_doc_version__ -SPEAKEASY_GENERATOR_VERSION = __gen_version__ -USER_AGENT = __user_agent__ diff --git a/src/mistralai/_hooks/__init__.py b/src/mistralai/_hooks/__init__.py deleted file mode 100644 index 2ee66cdd..00000000 --- a/src/mistralai/_hooks/__init__.py +++ /dev/null @@ -1,5 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from .sdkhooks import * -from .types import * -from .registration import * diff --git a/src/mistralai/_hooks/sdkhooks.py b/src/mistralai/_hooks/sdkhooks.py deleted file mode 100644 index 1f9a9316..00000000 --- a/src/mistralai/_hooks/sdkhooks.py +++ /dev/null @@ -1,76 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -import httpx -from .types import ( - SDKInitHook, - BeforeRequestContext, - BeforeRequestHook, - AfterSuccessContext, - AfterSuccessHook, - AfterErrorContext, - AfterErrorHook, - Hooks, -) -from .registration import init_hooks -from typing import List, Optional, Tuple -from mistralai.httpclient import HttpClient - - -class SDKHooks(Hooks): - def __init__(self) -> None: - self.sdk_init_hooks: List[SDKInitHook] = [] - self.before_request_hooks: List[BeforeRequestHook] = [] - self.after_success_hooks: List[AfterSuccessHook] = [] - self.after_error_hooks: List[AfterErrorHook] = [] - init_hooks(self) - - def register_sdk_init_hook(self, hook: SDKInitHook) -> None: - self.sdk_init_hooks.append(hook) - - def register_before_request_hook(self, hook: BeforeRequestHook) -> None: - self.before_request_hooks.append(hook) - - def register_after_success_hook(self, hook: AfterSuccessHook) -> None: - self.after_success_hooks.append(hook) - - def register_after_error_hook(self, hook: AfterErrorHook) -> None: - self.after_error_hooks.append(hook) - - def sdk_init(self, base_url: str, client: HttpClient) -> Tuple[str, HttpClient]: - for hook in self.sdk_init_hooks: - base_url, client = hook.sdk_init(base_url, client) - return base_url, client - - def before_request( - self, hook_ctx: BeforeRequestContext, request: httpx.Request - ) -> httpx.Request: - for hook in self.before_request_hooks: - out = hook.before_request(hook_ctx, request) - if isinstance(out, Exception): - raise out - request = out - - return request - - def after_success( - self, hook_ctx: AfterSuccessContext, response: httpx.Response - ) -> httpx.Response: - for hook in self.after_success_hooks: - out = hook.after_success(hook_ctx, response) - if isinstance(out, Exception): - raise out - response = out - return response - - def after_error( - self, - hook_ctx: AfterErrorContext, - response: Optional[httpx.Response], - error: Optional[Exception], - ) -> Tuple[Optional[httpx.Response], Optional[Exception]]: - for hook in self.after_error_hooks: - result = hook.after_error(hook_ctx, response, error) - if isinstance(result, Exception): - raise result - response, error = result - return response, error diff --git a/src/mistralai/_hooks/types.py b/src/mistralai/_hooks/types.py deleted file mode 100644 index 6d0f3e11..00000000 --- a/src/mistralai/_hooks/types.py +++ /dev/null @@ -1,113 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from abc import ABC, abstractmethod -import httpx -from mistralai.httpclient import HttpClient -from mistralai.sdkconfiguration import SDKConfiguration -from typing import Any, Callable, List, Optional, Tuple, Union - - -class HookContext: - config: SDKConfiguration - base_url: str - operation_id: str - oauth2_scopes: Optional[List[str]] = None - security_source: Optional[Union[Any, Callable[[], Any]]] = None - - def __init__( - self, - config: SDKConfiguration, - base_url: str, - operation_id: str, - oauth2_scopes: Optional[List[str]], - security_source: Optional[Union[Any, Callable[[], Any]]], - ): - self.config = config - self.base_url = base_url - self.operation_id = operation_id - self.oauth2_scopes = oauth2_scopes - self.security_source = security_source - - -class BeforeRequestContext(HookContext): - def __init__(self, hook_ctx: HookContext): - super().__init__( - hook_ctx.config, - hook_ctx.base_url, - hook_ctx.operation_id, - hook_ctx.oauth2_scopes, - hook_ctx.security_source, - ) - - -class AfterSuccessContext(HookContext): - def __init__(self, hook_ctx: HookContext): - super().__init__( - hook_ctx.config, - hook_ctx.base_url, - hook_ctx.operation_id, - hook_ctx.oauth2_scopes, - hook_ctx.security_source, - ) - - -class AfterErrorContext(HookContext): - def __init__(self, hook_ctx: HookContext): - super().__init__( - hook_ctx.config, - hook_ctx.base_url, - hook_ctx.operation_id, - hook_ctx.oauth2_scopes, - hook_ctx.security_source, - ) - - -class SDKInitHook(ABC): - @abstractmethod - def sdk_init(self, base_url: str, client: HttpClient) -> Tuple[str, HttpClient]: - pass - - -class BeforeRequestHook(ABC): - @abstractmethod - def before_request( - self, hook_ctx: BeforeRequestContext, request: httpx.Request - ) -> Union[httpx.Request, Exception]: - pass - - -class AfterSuccessHook(ABC): - @abstractmethod - def after_success( - self, hook_ctx: AfterSuccessContext, response: httpx.Response - ) -> Union[httpx.Response, Exception]: - pass - - -class AfterErrorHook(ABC): - @abstractmethod - def after_error( - self, - hook_ctx: AfterErrorContext, - response: Optional[httpx.Response], - error: Optional[Exception], - ) -> Union[Tuple[Optional[httpx.Response], Optional[Exception]], Exception]: - pass - - -class Hooks(ABC): - @abstractmethod - def register_sdk_init_hook(self, hook: SDKInitHook): - pass - - @abstractmethod - def register_before_request_hook(self, hook: BeforeRequestHook): - pass - - @abstractmethod - def register_after_success_hook(self, hook: AfterSuccessHook): - pass - - @abstractmethod - def register_after_error_hook(self, hook: AfterErrorHook): - pass diff --git a/src/mistralai/_version.py b/src/mistralai/_version.py deleted file mode 100644 index 6ee91593..00000000 --- a/src/mistralai/_version.py +++ /dev/null @@ -1,15 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -import importlib.metadata - -__title__: str = "mistralai" -__version__: str = "1.12.0" -__openapi_doc_version__: str = "1.0.0" -__gen_version__: str = "2.794.1" -__user_agent__: str = "speakeasy-sdk/python 1.12.0 2.794.1 1.0.0 mistralai" - -try: - if __package__ is not None: - __version__ = importlib.metadata.version(__package__) -except importlib.metadata.PackageNotFoundError: - pass diff --git a/src/mistralai/accesses.py b/src/mistralai/accesses.py deleted file mode 100644 index be02ee5b..00000000 --- a/src/mistralai/accesses.py +++ /dev/null @@ -1,619 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from .basesdk import BaseSDK -from mistralai import models, utils -from mistralai._hooks import HookContext -from mistralai.models import ( - entitytype as models_entitytype, - shareenum as models_shareenum, -) -from mistralai.types import OptionalNullable, UNSET -from mistralai.utils import get_security_from_env -from mistralai.utils.unmarshal_json_response import unmarshal_json_response -from typing import Any, Mapping, Optional - - -class Accesses(BaseSDK): - r"""(beta) Libraries API - manage access to a library.""" - - def list( - self, - *, - library_id: str, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.ListSharingOut: - r"""List all of the access to this library. - - Given a library, list all of the Entity that have access and to what level. - - :param library_id: - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.LibrariesShareListV1Request( - library_id=library_id, - ) - - req = self._build_request( - method="GET", - path="/v1/libraries/{library_id}/share", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=False, - request_has_path_params=True, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = self.do_request( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="libraries_share_list_v1", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.ListSharingOut, http_res) - if utils.match_response(http_res, "422", "application/json"): - response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res - ) - raise models.HTTPValidationError(response_data, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) - - async def list_async( - self, - *, - library_id: str, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.ListSharingOut: - r"""List all of the access to this library. - - Given a library, list all of the Entity that have access and to what level. - - :param library_id: - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.LibrariesShareListV1Request( - library_id=library_id, - ) - - req = self._build_request_async( - method="GET", - path="/v1/libraries/{library_id}/share", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=False, - request_has_path_params=True, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = await self.do_request_async( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="libraries_share_list_v1", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.ListSharingOut, http_res) - if utils.match_response(http_res, "422", "application/json"): - response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res - ) - raise models.HTTPValidationError(response_data, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) - - def update_or_create( - self, - *, - library_id: str, - level: models_shareenum.ShareEnum, - share_with_uuid: str, - share_with_type: models_entitytype.EntityType, - org_id: OptionalNullable[str] = UNSET, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.SharingOut: - r"""Create or update an access level. - - Given a library id, you can create or update the access level of an entity. You have to be owner of the library to share a library. An owner cannot change their own role. A library cannot be shared outside of the organization. - - :param library_id: - :param level: - :param share_with_uuid: The id of the entity (user, workspace or organization) to share with - :param share_with_type: The type of entity, used to share a library. - :param org_id: - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.LibrariesShareCreateV1Request( - library_id=library_id, - sharing_in=models.SharingIn( - org_id=org_id, - level=level, - share_with_uuid=share_with_uuid, - share_with_type=share_with_type, - ), - ) - - req = self._build_request( - method="PUT", - path="/v1/libraries/{library_id}/share", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=True, - request_has_path_params=True, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - get_serialized_body=lambda: utils.serialize_request_body( - request.sharing_in, False, False, "json", models.SharingIn - ), - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = self.do_request( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="libraries_share_create_v1", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.SharingOut, http_res) - if utils.match_response(http_res, "422", "application/json"): - response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res - ) - raise models.HTTPValidationError(response_data, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) - - async def update_or_create_async( - self, - *, - library_id: str, - level: models_shareenum.ShareEnum, - share_with_uuid: str, - share_with_type: models_entitytype.EntityType, - org_id: OptionalNullable[str] = UNSET, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.SharingOut: - r"""Create or update an access level. - - Given a library id, you can create or update the access level of an entity. You have to be owner of the library to share a library. An owner cannot change their own role. A library cannot be shared outside of the organization. - - :param library_id: - :param level: - :param share_with_uuid: The id of the entity (user, workspace or organization) to share with - :param share_with_type: The type of entity, used to share a library. - :param org_id: - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.LibrariesShareCreateV1Request( - library_id=library_id, - sharing_in=models.SharingIn( - org_id=org_id, - level=level, - share_with_uuid=share_with_uuid, - share_with_type=share_with_type, - ), - ) - - req = self._build_request_async( - method="PUT", - path="/v1/libraries/{library_id}/share", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=True, - request_has_path_params=True, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - get_serialized_body=lambda: utils.serialize_request_body( - request.sharing_in, False, False, "json", models.SharingIn - ), - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = await self.do_request_async( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="libraries_share_create_v1", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.SharingOut, http_res) - if utils.match_response(http_res, "422", "application/json"): - response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res - ) - raise models.HTTPValidationError(response_data, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) - - def delete( - self, - *, - library_id: str, - share_with_uuid: str, - share_with_type: models_entitytype.EntityType, - org_id: OptionalNullable[str] = UNSET, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.SharingOut: - r"""Delete an access level. - - Given a library id, you can delete the access level of an entity. An owner cannot delete it's own access. You have to be the owner of the library to delete an acces other than yours. - - :param library_id: - :param share_with_uuid: The id of the entity (user, workspace or organization) to share with - :param share_with_type: The type of entity, used to share a library. - :param org_id: - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.LibrariesShareDeleteV1Request( - library_id=library_id, - sharing_delete=models.SharingDelete( - org_id=org_id, - share_with_uuid=share_with_uuid, - share_with_type=share_with_type, - ), - ) - - req = self._build_request( - method="DELETE", - path="/v1/libraries/{library_id}/share", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=True, - request_has_path_params=True, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - get_serialized_body=lambda: utils.serialize_request_body( - request.sharing_delete, False, False, "json", models.SharingDelete - ), - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = self.do_request( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="libraries_share_delete_v1", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.SharingOut, http_res) - if utils.match_response(http_res, "422", "application/json"): - response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res - ) - raise models.HTTPValidationError(response_data, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) - - async def delete_async( - self, - *, - library_id: str, - share_with_uuid: str, - share_with_type: models_entitytype.EntityType, - org_id: OptionalNullable[str] = UNSET, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.SharingOut: - r"""Delete an access level. - - Given a library id, you can delete the access level of an entity. An owner cannot delete it's own access. You have to be the owner of the library to delete an acces other than yours. - - :param library_id: - :param share_with_uuid: The id of the entity (user, workspace or organization) to share with - :param share_with_type: The type of entity, used to share a library. - :param org_id: - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.LibrariesShareDeleteV1Request( - library_id=library_id, - sharing_delete=models.SharingDelete( - org_id=org_id, - share_with_uuid=share_with_uuid, - share_with_type=share_with_type, - ), - ) - - req = self._build_request_async( - method="DELETE", - path="/v1/libraries/{library_id}/share", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=True, - request_has_path_params=True, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - get_serialized_body=lambda: utils.serialize_request_body( - request.sharing_delete, False, False, "json", models.SharingDelete - ), - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = await self.do_request_async( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="libraries_share_delete_v1", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.SharingOut, http_res) - if utils.match_response(http_res, "422", "application/json"): - response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res - ) - raise models.HTTPValidationError(response_data, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) diff --git a/src/mistralai/agents.py b/src/mistralai/agents.py deleted file mode 100644 index 73e4ee3c..00000000 --- a/src/mistralai/agents.py +++ /dev/null @@ -1,725 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from .basesdk import BaseSDK -from mistralai import models, utils -from mistralai._hooks import HookContext -from mistralai.models import ( - agentscompletionrequest as models_agentscompletionrequest, - agentscompletionstreamrequest as models_agentscompletionstreamrequest, - mistralpromptmode as models_mistralpromptmode, - prediction as models_prediction, - responseformat as models_responseformat, - tool as models_tool, -) -from mistralai.types import OptionalNullable, UNSET -from mistralai.utils import eventstreaming, get_security_from_env -from mistralai.utils.unmarshal_json_response import unmarshal_json_response -from typing import Any, Dict, List, Mapping, Optional, Union - - -class Agents(BaseSDK): - r"""Agents API.""" - - def complete( - self, - *, - messages: Union[ - List[models_agentscompletionrequest.AgentsCompletionRequestMessages], - List[ - models_agentscompletionrequest.AgentsCompletionRequestMessagesTypedDict - ], - ], - agent_id: str, - max_tokens: OptionalNullable[int] = UNSET, - stream: Optional[bool] = False, - stop: Optional[ - Union[ - models_agentscompletionrequest.AgentsCompletionRequestStop, - models_agentscompletionrequest.AgentsCompletionRequestStopTypedDict, - ] - ] = None, - random_seed: OptionalNullable[int] = UNSET, - metadata: OptionalNullable[Dict[str, Any]] = UNSET, - response_format: Optional[ - Union[ - models_responseformat.ResponseFormat, - models_responseformat.ResponseFormatTypedDict, - ] - ] = None, - tools: OptionalNullable[ - Union[List[models_tool.Tool], List[models_tool.ToolTypedDict]] - ] = UNSET, - tool_choice: Optional[ - Union[ - models_agentscompletionrequest.AgentsCompletionRequestToolChoice, - models_agentscompletionrequest.AgentsCompletionRequestToolChoiceTypedDict, - ] - ] = None, - presence_penalty: Optional[float] = None, - frequency_penalty: Optional[float] = None, - n: OptionalNullable[int] = UNSET, - prediction: Optional[ - Union[models_prediction.Prediction, models_prediction.PredictionTypedDict] - ] = None, - parallel_tool_calls: Optional[bool] = None, - prompt_mode: OptionalNullable[ - models_mistralpromptmode.MistralPromptMode - ] = UNSET, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.ChatCompletionResponse: - r"""Agents Completion - - :param messages: The prompt(s) to generate completions for, encoded as a list of dict with role and content. - :param agent_id: The ID of the agent to use for this completion. - :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. - :param stream: Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. - :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array - :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. - :param metadata: - :param response_format: Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide. - :param tools: - :param tool_choice: - :param presence_penalty: The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. - :param frequency_penalty: The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. - :param n: Number of completions to return for each request, input tokens are only billed once. - :param prediction: Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content. - :param parallel_tool_calls: - :param prompt_mode: Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.AgentsCompletionRequest( - max_tokens=max_tokens, - stream=stream, - stop=stop, - random_seed=random_seed, - metadata=metadata, - messages=utils.get_pydantic_model( - messages, List[models.AgentsCompletionRequestMessages] - ), - response_format=utils.get_pydantic_model( - response_format, Optional[models.ResponseFormat] - ), - tools=utils.get_pydantic_model(tools, OptionalNullable[List[models.Tool]]), - tool_choice=utils.get_pydantic_model( - tool_choice, Optional[models.AgentsCompletionRequestToolChoice] - ), - presence_penalty=presence_penalty, - frequency_penalty=frequency_penalty, - n=n, - prediction=utils.get_pydantic_model( - prediction, Optional[models.Prediction] - ), - parallel_tool_calls=parallel_tool_calls, - prompt_mode=prompt_mode, - agent_id=agent_id, - ) - - req = self._build_request( - method="POST", - path="/v1/agents/completions", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=True, - request_has_path_params=False, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - get_serialized_body=lambda: utils.serialize_request_body( - request, False, False, "json", models.AgentsCompletionRequest - ), - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = self.do_request( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="agents_completion_v1_agents_completions_post", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.ChatCompletionResponse, http_res) - if utils.match_response(http_res, "422", "application/json"): - response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res - ) - raise models.HTTPValidationError(response_data, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) - - async def complete_async( - self, - *, - messages: Union[ - List[models_agentscompletionrequest.AgentsCompletionRequestMessages], - List[ - models_agentscompletionrequest.AgentsCompletionRequestMessagesTypedDict - ], - ], - agent_id: str, - max_tokens: OptionalNullable[int] = UNSET, - stream: Optional[bool] = False, - stop: Optional[ - Union[ - models_agentscompletionrequest.AgentsCompletionRequestStop, - models_agentscompletionrequest.AgentsCompletionRequestStopTypedDict, - ] - ] = None, - random_seed: OptionalNullable[int] = UNSET, - metadata: OptionalNullable[Dict[str, Any]] = UNSET, - response_format: Optional[ - Union[ - models_responseformat.ResponseFormat, - models_responseformat.ResponseFormatTypedDict, - ] - ] = None, - tools: OptionalNullable[ - Union[List[models_tool.Tool], List[models_tool.ToolTypedDict]] - ] = UNSET, - tool_choice: Optional[ - Union[ - models_agentscompletionrequest.AgentsCompletionRequestToolChoice, - models_agentscompletionrequest.AgentsCompletionRequestToolChoiceTypedDict, - ] - ] = None, - presence_penalty: Optional[float] = None, - frequency_penalty: Optional[float] = None, - n: OptionalNullable[int] = UNSET, - prediction: Optional[ - Union[models_prediction.Prediction, models_prediction.PredictionTypedDict] - ] = None, - parallel_tool_calls: Optional[bool] = None, - prompt_mode: OptionalNullable[ - models_mistralpromptmode.MistralPromptMode - ] = UNSET, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.ChatCompletionResponse: - r"""Agents Completion - - :param messages: The prompt(s) to generate completions for, encoded as a list of dict with role and content. - :param agent_id: The ID of the agent to use for this completion. - :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. - :param stream: Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. - :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array - :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. - :param metadata: - :param response_format: Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide. - :param tools: - :param tool_choice: - :param presence_penalty: The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. - :param frequency_penalty: The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. - :param n: Number of completions to return for each request, input tokens are only billed once. - :param prediction: Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content. - :param parallel_tool_calls: - :param prompt_mode: Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.AgentsCompletionRequest( - max_tokens=max_tokens, - stream=stream, - stop=stop, - random_seed=random_seed, - metadata=metadata, - messages=utils.get_pydantic_model( - messages, List[models.AgentsCompletionRequestMessages] - ), - response_format=utils.get_pydantic_model( - response_format, Optional[models.ResponseFormat] - ), - tools=utils.get_pydantic_model(tools, OptionalNullable[List[models.Tool]]), - tool_choice=utils.get_pydantic_model( - tool_choice, Optional[models.AgentsCompletionRequestToolChoice] - ), - presence_penalty=presence_penalty, - frequency_penalty=frequency_penalty, - n=n, - prediction=utils.get_pydantic_model( - prediction, Optional[models.Prediction] - ), - parallel_tool_calls=parallel_tool_calls, - prompt_mode=prompt_mode, - agent_id=agent_id, - ) - - req = self._build_request_async( - method="POST", - path="/v1/agents/completions", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=True, - request_has_path_params=False, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - get_serialized_body=lambda: utils.serialize_request_body( - request, False, False, "json", models.AgentsCompletionRequest - ), - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = await self.do_request_async( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="agents_completion_v1_agents_completions_post", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.ChatCompletionResponse, http_res) - if utils.match_response(http_res, "422", "application/json"): - response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res - ) - raise models.HTTPValidationError(response_data, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) - - def stream( - self, - *, - messages: Union[ - List[ - models_agentscompletionstreamrequest.AgentsCompletionStreamRequestMessages - ], - List[ - models_agentscompletionstreamrequest.AgentsCompletionStreamRequestMessagesTypedDict - ], - ], - agent_id: str, - max_tokens: OptionalNullable[int] = UNSET, - stream: Optional[bool] = True, - stop: Optional[ - Union[ - models_agentscompletionstreamrequest.AgentsCompletionStreamRequestStop, - models_agentscompletionstreamrequest.AgentsCompletionStreamRequestStopTypedDict, - ] - ] = None, - random_seed: OptionalNullable[int] = UNSET, - metadata: OptionalNullable[Dict[str, Any]] = UNSET, - response_format: Optional[ - Union[ - models_responseformat.ResponseFormat, - models_responseformat.ResponseFormatTypedDict, - ] - ] = None, - tools: OptionalNullable[ - Union[List[models_tool.Tool], List[models_tool.ToolTypedDict]] - ] = UNSET, - tool_choice: Optional[ - Union[ - models_agentscompletionstreamrequest.AgentsCompletionStreamRequestToolChoice, - models_agentscompletionstreamrequest.AgentsCompletionStreamRequestToolChoiceTypedDict, - ] - ] = None, - presence_penalty: Optional[float] = None, - frequency_penalty: Optional[float] = None, - n: OptionalNullable[int] = UNSET, - prediction: Optional[ - Union[models_prediction.Prediction, models_prediction.PredictionTypedDict] - ] = None, - parallel_tool_calls: Optional[bool] = None, - prompt_mode: OptionalNullable[ - models_mistralpromptmode.MistralPromptMode - ] = UNSET, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> eventstreaming.EventStream[models.CompletionEvent]: - r"""Stream Agents completion - - Mistral AI provides the ability to stream responses back to a client in order to allow partial results for certain requests. Tokens will be sent as data-only server-sent events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. - - :param messages: The prompt(s) to generate completions for, encoded as a list of dict with role and content. - :param agent_id: The ID of the agent to use for this completion. - :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. - :param stream: - :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array - :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. - :param metadata: - :param response_format: Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide. - :param tools: - :param tool_choice: - :param presence_penalty: The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. - :param frequency_penalty: The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. - :param n: Number of completions to return for each request, input tokens are only billed once. - :param prediction: Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content. - :param parallel_tool_calls: - :param prompt_mode: Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.AgentsCompletionStreamRequest( - max_tokens=max_tokens, - stream=stream, - stop=stop, - random_seed=random_seed, - metadata=metadata, - messages=utils.get_pydantic_model( - messages, List[models.AgentsCompletionStreamRequestMessages] - ), - response_format=utils.get_pydantic_model( - response_format, Optional[models.ResponseFormat] - ), - tools=utils.get_pydantic_model(tools, OptionalNullable[List[models.Tool]]), - tool_choice=utils.get_pydantic_model( - tool_choice, Optional[models.AgentsCompletionStreamRequestToolChoice] - ), - presence_penalty=presence_penalty, - frequency_penalty=frequency_penalty, - n=n, - prediction=utils.get_pydantic_model( - prediction, Optional[models.Prediction] - ), - parallel_tool_calls=parallel_tool_calls, - prompt_mode=prompt_mode, - agent_id=agent_id, - ) - - req = self._build_request( - method="POST", - path="/v1/agents/completions#stream", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=True, - request_has_path_params=False, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="text/event-stream", - http_headers=http_headers, - security=self.sdk_configuration.security, - get_serialized_body=lambda: utils.serialize_request_body( - request, False, False, "json", models.AgentsCompletionStreamRequest - ), - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = self.do_request( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="stream_agents", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - stream=True, - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "200", "text/event-stream"): - return eventstreaming.EventStream( - http_res, - lambda raw: utils.unmarshal_json(raw, models.CompletionEvent), - sentinel="[DONE]", - client_ref=self, - ) - if utils.match_response(http_res, "422", "application/json"): - http_res_text = utils.stream_to_text(http_res) - response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res, http_res_text - ) - raise models.HTTPValidationError(response_data, http_res, http_res_text) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("Unexpected response received", http_res, http_res_text) - - async def stream_async( - self, - *, - messages: Union[ - List[ - models_agentscompletionstreamrequest.AgentsCompletionStreamRequestMessages - ], - List[ - models_agentscompletionstreamrequest.AgentsCompletionStreamRequestMessagesTypedDict - ], - ], - agent_id: str, - max_tokens: OptionalNullable[int] = UNSET, - stream: Optional[bool] = True, - stop: Optional[ - Union[ - models_agentscompletionstreamrequest.AgentsCompletionStreamRequestStop, - models_agentscompletionstreamrequest.AgentsCompletionStreamRequestStopTypedDict, - ] - ] = None, - random_seed: OptionalNullable[int] = UNSET, - metadata: OptionalNullable[Dict[str, Any]] = UNSET, - response_format: Optional[ - Union[ - models_responseformat.ResponseFormat, - models_responseformat.ResponseFormatTypedDict, - ] - ] = None, - tools: OptionalNullable[ - Union[List[models_tool.Tool], List[models_tool.ToolTypedDict]] - ] = UNSET, - tool_choice: Optional[ - Union[ - models_agentscompletionstreamrequest.AgentsCompletionStreamRequestToolChoice, - models_agentscompletionstreamrequest.AgentsCompletionStreamRequestToolChoiceTypedDict, - ] - ] = None, - presence_penalty: Optional[float] = None, - frequency_penalty: Optional[float] = None, - n: OptionalNullable[int] = UNSET, - prediction: Optional[ - Union[models_prediction.Prediction, models_prediction.PredictionTypedDict] - ] = None, - parallel_tool_calls: Optional[bool] = None, - prompt_mode: OptionalNullable[ - models_mistralpromptmode.MistralPromptMode - ] = UNSET, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> eventstreaming.EventStreamAsync[models.CompletionEvent]: - r"""Stream Agents completion - - Mistral AI provides the ability to stream responses back to a client in order to allow partial results for certain requests. Tokens will be sent as data-only server-sent events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. - - :param messages: The prompt(s) to generate completions for, encoded as a list of dict with role and content. - :param agent_id: The ID of the agent to use for this completion. - :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. - :param stream: - :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array - :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. - :param metadata: - :param response_format: Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide. - :param tools: - :param tool_choice: - :param presence_penalty: The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. - :param frequency_penalty: The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. - :param n: Number of completions to return for each request, input tokens are only billed once. - :param prediction: Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content. - :param parallel_tool_calls: - :param prompt_mode: Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.AgentsCompletionStreamRequest( - max_tokens=max_tokens, - stream=stream, - stop=stop, - random_seed=random_seed, - metadata=metadata, - messages=utils.get_pydantic_model( - messages, List[models.AgentsCompletionStreamRequestMessages] - ), - response_format=utils.get_pydantic_model( - response_format, Optional[models.ResponseFormat] - ), - tools=utils.get_pydantic_model(tools, OptionalNullable[List[models.Tool]]), - tool_choice=utils.get_pydantic_model( - tool_choice, Optional[models.AgentsCompletionStreamRequestToolChoice] - ), - presence_penalty=presence_penalty, - frequency_penalty=frequency_penalty, - n=n, - prediction=utils.get_pydantic_model( - prediction, Optional[models.Prediction] - ), - parallel_tool_calls=parallel_tool_calls, - prompt_mode=prompt_mode, - agent_id=agent_id, - ) - - req = self._build_request_async( - method="POST", - path="/v1/agents/completions#stream", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=True, - request_has_path_params=False, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="text/event-stream", - http_headers=http_headers, - security=self.sdk_configuration.security, - get_serialized_body=lambda: utils.serialize_request_body( - request, False, False, "json", models.AgentsCompletionStreamRequest - ), - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = await self.do_request_async( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="stream_agents", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - stream=True, - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "200", "text/event-stream"): - return eventstreaming.EventStreamAsync( - http_res, - lambda raw: utils.unmarshal_json(raw, models.CompletionEvent), - sentinel="[DONE]", - client_ref=self, - ) - if utils.match_response(http_res, "422", "application/json"): - http_res_text = await utils.stream_to_text_async(http_res) - response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res, http_res_text - ) - raise models.HTTPValidationError(response_data, http_res, http_res_text) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("Unexpected response received", http_res, http_res_text) diff --git a/src/mistralai/async_client.py b/src/mistralai/async_client.py deleted file mode 100644 index f9522a28..00000000 --- a/src/mistralai/async_client.py +++ /dev/null @@ -1,15 +0,0 @@ -from typing import Optional - -from .client import MIGRATION_MESSAGE - - -class MistralAsyncClient: - def __init__( - self, - api_key: Optional[str] = None, - endpoint: str = "", - max_retries: int = 5, - timeout: int = 120, - max_concurrent_requests: int = 64, - ): - raise NotImplementedError(MIGRATION_MESSAGE) diff --git a/src/mistralai/audio.py b/src/mistralai/audio.py deleted file mode 100644 index 3de29053..00000000 --- a/src/mistralai/audio.py +++ /dev/null @@ -1,43 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from .basesdk import BaseSDK -from .sdkconfiguration import SDKConfiguration -from mistralai.transcriptions import Transcriptions -from typing import Optional - -# region imports -from typing import TYPE_CHECKING - -if TYPE_CHECKING: - from mistralai.extra.realtime import RealtimeTranscription -# endregion imports - - -class Audio(BaseSDK): - transcriptions: Transcriptions - r"""API for audio transcription.""" - - def __init__( - self, sdk_config: SDKConfiguration, parent_ref: Optional[object] = None - ) -> None: - BaseSDK.__init__(self, sdk_config, parent_ref=parent_ref) - self.sdk_configuration = sdk_config - self._init_sdks() - - def _init_sdks(self): - self.transcriptions = Transcriptions( - self.sdk_configuration, parent_ref=self.parent_ref - ) - - # region sdk-class-body - @property - def realtime(self) -> "RealtimeTranscription": - """Returns a client for real-time audio transcription via WebSocket.""" - if not hasattr(self, "_realtime"): - from mistralai.extra.realtime import RealtimeTranscription # pylint: disable=import-outside-toplevel - - self._realtime = RealtimeTranscription(self.sdk_configuration) # pylint: disable=attribute-defined-outside-init - - return self._realtime - - # endregion sdk-class-body diff --git a/src/mistralai/basesdk.py b/src/mistralai/basesdk.py deleted file mode 100644 index c9a32aa1..00000000 --- a/src/mistralai/basesdk.py +++ /dev/null @@ -1,370 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from .sdkconfiguration import SDKConfiguration -import httpx -from mistralai import models, utils -from mistralai._hooks import ( - AfterErrorContext, - AfterSuccessContext, - BeforeRequestContext, -) -from mistralai.utils import RetryConfig, SerializedRequestBody, get_body_content -from typing import Callable, List, Mapping, Optional, Tuple -from urllib.parse import parse_qs, urlparse - - -class BaseSDK: - sdk_configuration: SDKConfiguration - parent_ref: Optional[object] = None - """ - Reference to the root SDK instance, if any. This will prevent it from - being garbage collected while there are active streams. - """ - - def __init__( - self, - sdk_config: SDKConfiguration, - parent_ref: Optional[object] = None, - ) -> None: - self.sdk_configuration = sdk_config - self.parent_ref = parent_ref - - def _get_url(self, base_url, url_variables): - sdk_url, sdk_variables = self.sdk_configuration.get_server_details() - - if base_url is None: - base_url = sdk_url - - if url_variables is None: - url_variables = sdk_variables - - return utils.template_url(base_url, url_variables) - - def _build_request_async( - self, - method, - path, - base_url, - url_variables, - request, - request_body_required, - request_has_path_params, - request_has_query_params, - user_agent_header, - accept_header_value, - _globals=None, - security=None, - timeout_ms: Optional[int] = None, - get_serialized_body: Optional[ - Callable[[], Optional[SerializedRequestBody]] - ] = None, - url_override: Optional[str] = None, - http_headers: Optional[Mapping[str, str]] = None, - allow_empty_value: Optional[List[str]] = None, - ) -> httpx.Request: - client = self.sdk_configuration.async_client - return self._build_request_with_client( - client, - method, - path, - base_url, - url_variables, - request, - request_body_required, - request_has_path_params, - request_has_query_params, - user_agent_header, - accept_header_value, - _globals, - security, - timeout_ms, - get_serialized_body, - url_override, - http_headers, - allow_empty_value, - ) - - def _build_request( - self, - method, - path, - base_url, - url_variables, - request, - request_body_required, - request_has_path_params, - request_has_query_params, - user_agent_header, - accept_header_value, - _globals=None, - security=None, - timeout_ms: Optional[int] = None, - get_serialized_body: Optional[ - Callable[[], Optional[SerializedRequestBody]] - ] = None, - url_override: Optional[str] = None, - http_headers: Optional[Mapping[str, str]] = None, - allow_empty_value: Optional[List[str]] = None, - ) -> httpx.Request: - client = self.sdk_configuration.client - return self._build_request_with_client( - client, - method, - path, - base_url, - url_variables, - request, - request_body_required, - request_has_path_params, - request_has_query_params, - user_agent_header, - accept_header_value, - _globals, - security, - timeout_ms, - get_serialized_body, - url_override, - http_headers, - allow_empty_value, - ) - - def _build_request_with_client( - self, - client, - method, - path, - base_url, - url_variables, - request, - request_body_required, - request_has_path_params, - request_has_query_params, - user_agent_header, - accept_header_value, - _globals=None, - security=None, - timeout_ms: Optional[int] = None, - get_serialized_body: Optional[ - Callable[[], Optional[SerializedRequestBody]] - ] = None, - url_override: Optional[str] = None, - http_headers: Optional[Mapping[str, str]] = None, - allow_empty_value: Optional[List[str]] = None, - ) -> httpx.Request: - query_params = {} - - url = url_override - if url is None: - url = utils.generate_url( - self._get_url(base_url, url_variables), - path, - request if request_has_path_params else None, - _globals if request_has_path_params else None, - ) - - query_params = utils.get_query_params( - request if request_has_query_params else None, - _globals if request_has_query_params else None, - allow_empty_value, - ) - else: - # Pick up the query parameter from the override so they can be - # preserved when building the request later on (necessary as of - # httpx 0.28). - parsed_override = urlparse(str(url_override)) - query_params = parse_qs(parsed_override.query, keep_blank_values=True) - - headers = utils.get_headers(request, _globals) - headers["Accept"] = accept_header_value - headers[user_agent_header] = self.sdk_configuration.user_agent - - if security is not None: - if callable(security): - security = security() - security = utils.get_security_from_env(security, models.Security) - if security is not None: - security_headers, security_query_params = utils.get_security(security) - headers = {**headers, **security_headers} - query_params = {**query_params, **security_query_params} - - serialized_request_body = SerializedRequestBody() - if get_serialized_body is not None: - rb = get_serialized_body() - if request_body_required and rb is None: - raise ValueError("request body is required") - - if rb is not None: - serialized_request_body = rb - - if ( - serialized_request_body.media_type is not None - and serialized_request_body.media_type - not in ( - "multipart/form-data", - "multipart/mixed", - ) - ): - headers["content-type"] = serialized_request_body.media_type - - if http_headers is not None: - for header, value in http_headers.items(): - headers[header] = value - - timeout = timeout_ms / 1000 if timeout_ms is not None else None - - return client.build_request( - method, - url, - params=query_params, - content=serialized_request_body.content, - data=serialized_request_body.data, - files=serialized_request_body.files, - headers=headers, - timeout=timeout, - ) - - def do_request( - self, - hook_ctx, - request, - error_status_codes, - stream=False, - retry_config: Optional[Tuple[RetryConfig, List[str]]] = None, - ) -> httpx.Response: - client = self.sdk_configuration.client - logger = self.sdk_configuration.debug_logger - - hooks = self.sdk_configuration.__dict__["_hooks"] - - def do(): - http_res = None - try: - req = hooks.before_request(BeforeRequestContext(hook_ctx), request) - logger.debug( - "Request:\nMethod: %s\nURL: %s\nHeaders: %s\nBody: %s", - req.method, - req.url, - req.headers, - get_body_content(req), - ) - - if client is None: - raise ValueError("client is required") - - http_res = client.send(req, stream=stream) - except Exception as e: - _, e = hooks.after_error(AfterErrorContext(hook_ctx), None, e) - if e is not None: - logger.debug("Request Exception", exc_info=True) - raise e - - if http_res is None: - logger.debug("Raising no response SDK error") - raise models.NoResponseError("No response received") - - logger.debug( - "Response:\nStatus Code: %s\nURL: %s\nHeaders: %s\nBody: %s", - http_res.status_code, - http_res.url, - http_res.headers, - "" if stream else http_res.text, - ) - - if utils.match_status_codes(error_status_codes, http_res.status_code): - result, err = hooks.after_error( - AfterErrorContext(hook_ctx), http_res, None - ) - if err is not None: - logger.debug("Request Exception", exc_info=True) - raise err - if result is not None: - http_res = result - else: - logger.debug("Raising unexpected SDK error") - raise models.SDKError("Unexpected error occurred", http_res) - - return http_res - - if retry_config is not None: - http_res = utils.retry(do, utils.Retries(retry_config[0], retry_config[1])) - else: - http_res = do() - - if not utils.match_status_codes(error_status_codes, http_res.status_code): - http_res = hooks.after_success(AfterSuccessContext(hook_ctx), http_res) - - return http_res - - async def do_request_async( - self, - hook_ctx, - request, - error_status_codes, - stream=False, - retry_config: Optional[Tuple[RetryConfig, List[str]]] = None, - ) -> httpx.Response: - client = self.sdk_configuration.async_client - logger = self.sdk_configuration.debug_logger - - hooks = self.sdk_configuration.__dict__["_hooks"] - - async def do(): - http_res = None - try: - req = hooks.before_request(BeforeRequestContext(hook_ctx), request) - logger.debug( - "Request:\nMethod: %s\nURL: %s\nHeaders: %s\nBody: %s", - req.method, - req.url, - req.headers, - get_body_content(req), - ) - - if client is None: - raise ValueError("client is required") - - http_res = await client.send(req, stream=stream) - except Exception as e: - _, e = hooks.after_error(AfterErrorContext(hook_ctx), None, e) - if e is not None: - logger.debug("Request Exception", exc_info=True) - raise e - - if http_res is None: - logger.debug("Raising no response SDK error") - raise models.NoResponseError("No response received") - - logger.debug( - "Response:\nStatus Code: %s\nURL: %s\nHeaders: %s\nBody: %s", - http_res.status_code, - http_res.url, - http_res.headers, - "" if stream else http_res.text, - ) - - if utils.match_status_codes(error_status_codes, http_res.status_code): - result, err = hooks.after_error( - AfterErrorContext(hook_ctx), http_res, None - ) - if err is not None: - logger.debug("Request Exception", exc_info=True) - raise err - if result is not None: - http_res = result - else: - logger.debug("Raising unexpected SDK error") - raise models.SDKError("Unexpected error occurred", http_res) - - return http_res - - if retry_config is not None: - http_res = await utils.retry_async( - do, utils.Retries(retry_config[0], retry_config[1]) - ) - else: - http_res = await do() - - if not utils.match_status_codes(error_status_codes, http_res.status_code): - http_res = hooks.after_success(AfterSuccessContext(hook_ctx), http_res) - - return http_res diff --git a/src/mistralai/batch.py b/src/mistralai/batch.py deleted file mode 100644 index 7ed7ccef..00000000 --- a/src/mistralai/batch.py +++ /dev/null @@ -1,20 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from .basesdk import BaseSDK -from .sdkconfiguration import SDKConfiguration -from mistralai.mistral_jobs import MistralJobs -from typing import Optional - - -class Batch(BaseSDK): - jobs: MistralJobs - - def __init__( - self, sdk_config: SDKConfiguration, parent_ref: Optional[object] = None - ) -> None: - BaseSDK.__init__(self, sdk_config, parent_ref=parent_ref) - self.sdk_configuration = sdk_config - self._init_sdks() - - def _init_sdks(self): - self.jobs = MistralJobs(self.sdk_configuration, parent_ref=self.parent_ref) diff --git a/src/mistralai/beta.py b/src/mistralai/beta.py deleted file mode 100644 index 4bbf1fa3..00000000 --- a/src/mistralai/beta.py +++ /dev/null @@ -1,31 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from .basesdk import BaseSDK -from .sdkconfiguration import SDKConfiguration -from mistralai.conversations import Conversations -from mistralai.libraries import Libraries -from mistralai.mistral_agents import MistralAgents -from typing import Optional - - -class Beta(BaseSDK): - conversations: Conversations - r"""(beta) Conversations API""" - agents: MistralAgents - r"""(beta) Agents API""" - libraries: Libraries - r"""(beta) Libraries API to create and manage libraries - index your documents to enhance agent capabilities.""" - - def __init__( - self, sdk_config: SDKConfiguration, parent_ref: Optional[object] = None - ) -> None: - BaseSDK.__init__(self, sdk_config, parent_ref=parent_ref) - self.sdk_configuration = sdk_config - self._init_sdks() - - def _init_sdks(self): - self.conversations = Conversations( - self.sdk_configuration, parent_ref=self.parent_ref - ) - self.agents = MistralAgents(self.sdk_configuration, parent_ref=self.parent_ref) - self.libraries = Libraries(self.sdk_configuration, parent_ref=self.parent_ref) diff --git a/src/mistralai/chat.py b/src/mistralai/chat.py deleted file mode 100644 index 1528c4c9..00000000 --- a/src/mistralai/chat.py +++ /dev/null @@ -1,835 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from .basesdk import BaseSDK -from mistralai import models, utils -from mistralai._hooks import HookContext -from mistralai.models import ( - chatcompletionrequest as models_chatcompletionrequest, - chatcompletionstreamrequest as models_chatcompletionstreamrequest, - mistralpromptmode as models_mistralpromptmode, - prediction as models_prediction, - responseformat as models_responseformat, - tool as models_tool, -) -from mistralai.types import OptionalNullable, UNSET -from mistralai.utils import eventstreaming, get_security_from_env -from mistralai.utils.unmarshal_json_response import unmarshal_json_response -from typing import Any, Dict, List, Mapping, Optional, Union - -# region imports -from typing import Type -from mistralai.extra import ( - convert_to_parsed_chat_completion_response, - response_format_from_pydantic_model, - CustomPydanticModel, - ParsedChatCompletionResponse, -) -# endregion imports - - -class Chat(BaseSDK): - r"""Chat Completion API.""" - - # region sdk-class-body - # Custom .parse methods for the Structure Outputs Feature. - - def parse( - self, response_format: Type[CustomPydanticModel], **kwargs: Any - ) -> ParsedChatCompletionResponse[CustomPydanticModel]: - """ - Parse the response using the provided response format. - :param Type[CustomPydanticModel] response_format: The Pydantic model to parse the response into - :param Any **kwargs Additional keyword arguments to pass to the .complete method - :return: The parsed response - """ - # Convert the input Pydantic Model to a strict JSON ready to be passed to chat.complete - json_response_format = response_format_from_pydantic_model(response_format) - # Run the inference - response = self.complete(**kwargs, response_format=json_response_format) - # Parse response back to the input pydantic model - parsed_response = convert_to_parsed_chat_completion_response( - response, response_format - ) - return parsed_response - - async def parse_async( - self, response_format: Type[CustomPydanticModel], **kwargs - ) -> ParsedChatCompletionResponse[CustomPydanticModel]: - """ - Asynchronously parse the response using the provided response format. - :param Type[CustomPydanticModel] response_format: The Pydantic model to parse the response into - :param Any **kwargs Additional keyword arguments to pass to the .complete method - :return: The parsed response - """ - json_response_format = response_format_from_pydantic_model(response_format) - response = await self.complete_async( # pylint: disable=E1125 - **kwargs, response_format=json_response_format - ) - parsed_response = convert_to_parsed_chat_completion_response( - response, response_format - ) - return parsed_response - - def parse_stream( - self, response_format: Type[CustomPydanticModel], **kwargs - ) -> eventstreaming.EventStream[models.CompletionEvent]: - """ - Parse the response using the provided response format. - For now the response will be in JSON format not in the input Pydantic model. - :param Type[CustomPydanticModel] response_format: The Pydantic model to parse the response into - :param Any **kwargs Additional keyword arguments to pass to the .stream method - :return: The JSON parsed response - """ - json_response_format = response_format_from_pydantic_model(response_format) - response = self.stream(**kwargs, response_format=json_response_format) - return response - - async def parse_stream_async( - self, response_format: Type[CustomPydanticModel], **kwargs - ) -> eventstreaming.EventStreamAsync[models.CompletionEvent]: - """ - Asynchronously parse the response using the provided response format. - For now the response will be in JSON format not in the input Pydantic model. - :param Type[CustomPydanticModel] response_format: The Pydantic model to parse the response into - :param Any **kwargs Additional keyword arguments to pass to the .stream method - :return: The JSON parsed response - """ - json_response_format = response_format_from_pydantic_model(response_format) - response = await self.stream_async( # pylint: disable=E1125 - **kwargs, response_format=json_response_format - ) - return response - - # endregion sdk-class-body - - def complete( - self, - *, - model: str, - messages: Union[ - List[models_chatcompletionrequest.Messages], - List[models_chatcompletionrequest.MessagesTypedDict], - ], - temperature: OptionalNullable[float] = UNSET, - top_p: Optional[float] = None, - max_tokens: OptionalNullable[int] = UNSET, - stream: Optional[bool] = False, - stop: Optional[ - Union[ - models_chatcompletionrequest.Stop, - models_chatcompletionrequest.StopTypedDict, - ] - ] = None, - random_seed: OptionalNullable[int] = UNSET, - metadata: OptionalNullable[Dict[str, Any]] = UNSET, - response_format: Optional[ - Union[ - models_responseformat.ResponseFormat, - models_responseformat.ResponseFormatTypedDict, - ] - ] = None, - tools: OptionalNullable[ - Union[List[models_tool.Tool], List[models_tool.ToolTypedDict]] - ] = UNSET, - tool_choice: Optional[ - Union[ - models_chatcompletionrequest.ChatCompletionRequestToolChoice, - models_chatcompletionrequest.ChatCompletionRequestToolChoiceTypedDict, - ] - ] = None, - presence_penalty: Optional[float] = None, - frequency_penalty: Optional[float] = None, - n: OptionalNullable[int] = UNSET, - prediction: Optional[ - Union[models_prediction.Prediction, models_prediction.PredictionTypedDict] - ] = None, - parallel_tool_calls: Optional[bool] = None, - prompt_mode: OptionalNullable[ - models_mistralpromptmode.MistralPromptMode - ] = UNSET, - safe_prompt: Optional[bool] = None, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.ChatCompletionResponse: - r"""Chat Completion - - :param model: ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions. - :param messages: The prompt(s) to generate completions for, encoded as a list of dict with role and content. - :param temperature: What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. - :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. - :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. - :param stream: Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. - :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array - :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. - :param metadata: - :param response_format: Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide. - :param tools: A list of tools the model may call. Use this to provide a list of functions the model may generate JSON inputs for. - :param tool_choice: Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool. - :param presence_penalty: The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. - :param frequency_penalty: The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. - :param n: Number of completions to return for each request, input tokens are only billed once. - :param prediction: Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content. - :param parallel_tool_calls: Whether to enable parallel function calling during tool use, when enabled the model can call multiple tools in parallel. - :param prompt_mode: Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. - :param safe_prompt: Whether to inject a safety prompt before all conversations. - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.ChatCompletionRequest( - model=model, - temperature=temperature, - top_p=top_p, - max_tokens=max_tokens, - stream=stream, - stop=stop, - random_seed=random_seed, - metadata=metadata, - messages=utils.get_pydantic_model(messages, List[models.Messages]), - response_format=utils.get_pydantic_model( - response_format, Optional[models.ResponseFormat] - ), - tools=utils.get_pydantic_model(tools, OptionalNullable[List[models.Tool]]), - tool_choice=utils.get_pydantic_model( - tool_choice, Optional[models.ChatCompletionRequestToolChoice] - ), - presence_penalty=presence_penalty, - frequency_penalty=frequency_penalty, - n=n, - prediction=utils.get_pydantic_model( - prediction, Optional[models.Prediction] - ), - parallel_tool_calls=parallel_tool_calls, - prompt_mode=prompt_mode, - safe_prompt=safe_prompt, - ) - - req = self._build_request( - method="POST", - path="/v1/chat/completions", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=True, - request_has_path_params=False, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - get_serialized_body=lambda: utils.serialize_request_body( - request, False, False, "json", models.ChatCompletionRequest - ), - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = self.do_request( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="chat_completion_v1_chat_completions_post", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.ChatCompletionResponse, http_res) - if utils.match_response(http_res, "422", "application/json"): - response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res - ) - raise models.HTTPValidationError(response_data, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) - - async def complete_async( - self, - *, - model: str, - messages: Union[ - List[models_chatcompletionrequest.Messages], - List[models_chatcompletionrequest.MessagesTypedDict], - ], - temperature: OptionalNullable[float] = UNSET, - top_p: Optional[float] = None, - max_tokens: OptionalNullable[int] = UNSET, - stream: Optional[bool] = False, - stop: Optional[ - Union[ - models_chatcompletionrequest.Stop, - models_chatcompletionrequest.StopTypedDict, - ] - ] = None, - random_seed: OptionalNullable[int] = UNSET, - metadata: OptionalNullable[Dict[str, Any]] = UNSET, - response_format: Optional[ - Union[ - models_responseformat.ResponseFormat, - models_responseformat.ResponseFormatTypedDict, - ] - ] = None, - tools: OptionalNullable[ - Union[List[models_tool.Tool], List[models_tool.ToolTypedDict]] - ] = UNSET, - tool_choice: Optional[ - Union[ - models_chatcompletionrequest.ChatCompletionRequestToolChoice, - models_chatcompletionrequest.ChatCompletionRequestToolChoiceTypedDict, - ] - ] = None, - presence_penalty: Optional[float] = None, - frequency_penalty: Optional[float] = None, - n: OptionalNullable[int] = UNSET, - prediction: Optional[ - Union[models_prediction.Prediction, models_prediction.PredictionTypedDict] - ] = None, - parallel_tool_calls: Optional[bool] = None, - prompt_mode: OptionalNullable[ - models_mistralpromptmode.MistralPromptMode - ] = UNSET, - safe_prompt: Optional[bool] = None, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.ChatCompletionResponse: - r"""Chat Completion - - :param model: ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions. - :param messages: The prompt(s) to generate completions for, encoded as a list of dict with role and content. - :param temperature: What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. - :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. - :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. - :param stream: Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. - :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array - :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. - :param metadata: - :param response_format: Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide. - :param tools: A list of tools the model may call. Use this to provide a list of functions the model may generate JSON inputs for. - :param tool_choice: Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool. - :param presence_penalty: The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. - :param frequency_penalty: The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. - :param n: Number of completions to return for each request, input tokens are only billed once. - :param prediction: Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content. - :param parallel_tool_calls: Whether to enable parallel function calling during tool use, when enabled the model can call multiple tools in parallel. - :param prompt_mode: Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. - :param safe_prompt: Whether to inject a safety prompt before all conversations. - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.ChatCompletionRequest( - model=model, - temperature=temperature, - top_p=top_p, - max_tokens=max_tokens, - stream=stream, - stop=stop, - random_seed=random_seed, - metadata=metadata, - messages=utils.get_pydantic_model(messages, List[models.Messages]), - response_format=utils.get_pydantic_model( - response_format, Optional[models.ResponseFormat] - ), - tools=utils.get_pydantic_model(tools, OptionalNullable[List[models.Tool]]), - tool_choice=utils.get_pydantic_model( - tool_choice, Optional[models.ChatCompletionRequestToolChoice] - ), - presence_penalty=presence_penalty, - frequency_penalty=frequency_penalty, - n=n, - prediction=utils.get_pydantic_model( - prediction, Optional[models.Prediction] - ), - parallel_tool_calls=parallel_tool_calls, - prompt_mode=prompt_mode, - safe_prompt=safe_prompt, - ) - - req = self._build_request_async( - method="POST", - path="/v1/chat/completions", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=True, - request_has_path_params=False, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - get_serialized_body=lambda: utils.serialize_request_body( - request, False, False, "json", models.ChatCompletionRequest - ), - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = await self.do_request_async( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="chat_completion_v1_chat_completions_post", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.ChatCompletionResponse, http_res) - if utils.match_response(http_res, "422", "application/json"): - response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res - ) - raise models.HTTPValidationError(response_data, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) - - def stream( - self, - *, - model: str, - messages: Union[ - List[ - models_chatcompletionstreamrequest.ChatCompletionStreamRequestMessages - ], - List[ - models_chatcompletionstreamrequest.ChatCompletionStreamRequestMessagesTypedDict - ], - ], - temperature: OptionalNullable[float] = UNSET, - top_p: Optional[float] = None, - max_tokens: OptionalNullable[int] = UNSET, - stream: Optional[bool] = True, - stop: Optional[ - Union[ - models_chatcompletionstreamrequest.ChatCompletionStreamRequestStop, - models_chatcompletionstreamrequest.ChatCompletionStreamRequestStopTypedDict, - ] - ] = None, - random_seed: OptionalNullable[int] = UNSET, - metadata: OptionalNullable[Dict[str, Any]] = UNSET, - response_format: Optional[ - Union[ - models_responseformat.ResponseFormat, - models_responseformat.ResponseFormatTypedDict, - ] - ] = None, - tools: OptionalNullable[ - Union[List[models_tool.Tool], List[models_tool.ToolTypedDict]] - ] = UNSET, - tool_choice: Optional[ - Union[ - models_chatcompletionstreamrequest.ChatCompletionStreamRequestToolChoice, - models_chatcompletionstreamrequest.ChatCompletionStreamRequestToolChoiceTypedDict, - ] - ] = None, - presence_penalty: Optional[float] = None, - frequency_penalty: Optional[float] = None, - n: OptionalNullable[int] = UNSET, - prediction: Optional[ - Union[models_prediction.Prediction, models_prediction.PredictionTypedDict] - ] = None, - parallel_tool_calls: Optional[bool] = None, - prompt_mode: OptionalNullable[ - models_mistralpromptmode.MistralPromptMode - ] = UNSET, - safe_prompt: Optional[bool] = None, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> eventstreaming.EventStream[models.CompletionEvent]: - r"""Stream chat completion - - Mistral AI provides the ability to stream responses back to a client in order to allow partial results for certain requests. Tokens will be sent as data-only server-sent events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. - - :param model: ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions. - :param messages: The prompt(s) to generate completions for, encoded as a list of dict with role and content. - :param temperature: What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. - :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. - :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. - :param stream: - :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array - :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. - :param metadata: - :param response_format: Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide. - :param tools: A list of tools the model may call. Use this to provide a list of functions the model may generate JSON inputs for. - :param tool_choice: Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool. - :param presence_penalty: The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. - :param frequency_penalty: The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. - :param n: Number of completions to return for each request, input tokens are only billed once. - :param prediction: Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content. - :param parallel_tool_calls: Whether to enable parallel function calling during tool use, when enabled the model can call multiple tools in parallel. - :param prompt_mode: Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. - :param safe_prompt: Whether to inject a safety prompt before all conversations. - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.ChatCompletionStreamRequest( - model=model, - temperature=temperature, - top_p=top_p, - max_tokens=max_tokens, - stream=stream, - stop=stop, - random_seed=random_seed, - metadata=metadata, - messages=utils.get_pydantic_model( - messages, List[models.ChatCompletionStreamRequestMessages] - ), - response_format=utils.get_pydantic_model( - response_format, Optional[models.ResponseFormat] - ), - tools=utils.get_pydantic_model(tools, OptionalNullable[List[models.Tool]]), - tool_choice=utils.get_pydantic_model( - tool_choice, Optional[models.ChatCompletionStreamRequestToolChoice] - ), - presence_penalty=presence_penalty, - frequency_penalty=frequency_penalty, - n=n, - prediction=utils.get_pydantic_model( - prediction, Optional[models.Prediction] - ), - parallel_tool_calls=parallel_tool_calls, - prompt_mode=prompt_mode, - safe_prompt=safe_prompt, - ) - - req = self._build_request( - method="POST", - path="/v1/chat/completions#stream", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=True, - request_has_path_params=False, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="text/event-stream", - http_headers=http_headers, - security=self.sdk_configuration.security, - get_serialized_body=lambda: utils.serialize_request_body( - request, False, False, "json", models.ChatCompletionStreamRequest - ), - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = self.do_request( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="stream_chat", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - stream=True, - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "200", "text/event-stream"): - return eventstreaming.EventStream( - http_res, - lambda raw: utils.unmarshal_json(raw, models.CompletionEvent), - sentinel="[DONE]", - client_ref=self, - ) - if utils.match_response(http_res, "422", "application/json"): - http_res_text = utils.stream_to_text(http_res) - response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res, http_res_text - ) - raise models.HTTPValidationError(response_data, http_res, http_res_text) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("Unexpected response received", http_res, http_res_text) - - async def stream_async( - self, - *, - model: str, - messages: Union[ - List[ - models_chatcompletionstreamrequest.ChatCompletionStreamRequestMessages - ], - List[ - models_chatcompletionstreamrequest.ChatCompletionStreamRequestMessagesTypedDict - ], - ], - temperature: OptionalNullable[float] = UNSET, - top_p: Optional[float] = None, - max_tokens: OptionalNullable[int] = UNSET, - stream: Optional[bool] = True, - stop: Optional[ - Union[ - models_chatcompletionstreamrequest.ChatCompletionStreamRequestStop, - models_chatcompletionstreamrequest.ChatCompletionStreamRequestStopTypedDict, - ] - ] = None, - random_seed: OptionalNullable[int] = UNSET, - metadata: OptionalNullable[Dict[str, Any]] = UNSET, - response_format: Optional[ - Union[ - models_responseformat.ResponseFormat, - models_responseformat.ResponseFormatTypedDict, - ] - ] = None, - tools: OptionalNullable[ - Union[List[models_tool.Tool], List[models_tool.ToolTypedDict]] - ] = UNSET, - tool_choice: Optional[ - Union[ - models_chatcompletionstreamrequest.ChatCompletionStreamRequestToolChoice, - models_chatcompletionstreamrequest.ChatCompletionStreamRequestToolChoiceTypedDict, - ] - ] = None, - presence_penalty: Optional[float] = None, - frequency_penalty: Optional[float] = None, - n: OptionalNullable[int] = UNSET, - prediction: Optional[ - Union[models_prediction.Prediction, models_prediction.PredictionTypedDict] - ] = None, - parallel_tool_calls: Optional[bool] = None, - prompt_mode: OptionalNullable[ - models_mistralpromptmode.MistralPromptMode - ] = UNSET, - safe_prompt: Optional[bool] = None, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> eventstreaming.EventStreamAsync[models.CompletionEvent]: - r"""Stream chat completion - - Mistral AI provides the ability to stream responses back to a client in order to allow partial results for certain requests. Tokens will be sent as data-only server-sent events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. - - :param model: ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions. - :param messages: The prompt(s) to generate completions for, encoded as a list of dict with role and content. - :param temperature: What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. - :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. - :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. - :param stream: - :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array - :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. - :param metadata: - :param response_format: Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide. - :param tools: A list of tools the model may call. Use this to provide a list of functions the model may generate JSON inputs for. - :param tool_choice: Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool. - :param presence_penalty: The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. - :param frequency_penalty: The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. - :param n: Number of completions to return for each request, input tokens are only billed once. - :param prediction: Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content. - :param parallel_tool_calls: Whether to enable parallel function calling during tool use, when enabled the model can call multiple tools in parallel. - :param prompt_mode: Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. - :param safe_prompt: Whether to inject a safety prompt before all conversations. - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.ChatCompletionStreamRequest( - model=model, - temperature=temperature, - top_p=top_p, - max_tokens=max_tokens, - stream=stream, - stop=stop, - random_seed=random_seed, - metadata=metadata, - messages=utils.get_pydantic_model( - messages, List[models.ChatCompletionStreamRequestMessages] - ), - response_format=utils.get_pydantic_model( - response_format, Optional[models.ResponseFormat] - ), - tools=utils.get_pydantic_model(tools, OptionalNullable[List[models.Tool]]), - tool_choice=utils.get_pydantic_model( - tool_choice, Optional[models.ChatCompletionStreamRequestToolChoice] - ), - presence_penalty=presence_penalty, - frequency_penalty=frequency_penalty, - n=n, - prediction=utils.get_pydantic_model( - prediction, Optional[models.Prediction] - ), - parallel_tool_calls=parallel_tool_calls, - prompt_mode=prompt_mode, - safe_prompt=safe_prompt, - ) - - req = self._build_request_async( - method="POST", - path="/v1/chat/completions#stream", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=True, - request_has_path_params=False, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="text/event-stream", - http_headers=http_headers, - security=self.sdk_configuration.security, - get_serialized_body=lambda: utils.serialize_request_body( - request, False, False, "json", models.ChatCompletionStreamRequest - ), - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = await self.do_request_async( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="stream_chat", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - stream=True, - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "200", "text/event-stream"): - return eventstreaming.EventStreamAsync( - http_res, - lambda raw: utils.unmarshal_json(raw, models.CompletionEvent), - sentinel="[DONE]", - client_ref=self, - ) - if utils.match_response(http_res, "422", "application/json"): - http_res_text = await utils.stream_to_text_async(http_res) - response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res, http_res_text - ) - raise models.HTTPValidationError(response_data, http_res, http_res_text) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("Unexpected response received", http_res, http_res_text) diff --git a/src/mistralai/classifiers.py b/src/mistralai/classifiers.py deleted file mode 100644 index 7c32506e..00000000 --- a/src/mistralai/classifiers.py +++ /dev/null @@ -1,800 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from .basesdk import BaseSDK -from mistralai import models, utils -from mistralai._hooks import HookContext -from mistralai.models import ( - chatmoderationrequest as models_chatmoderationrequest, - classificationrequest as models_classificationrequest, - inputs as models_inputs, -) -from mistralai.types import OptionalNullable, UNSET -from mistralai.utils import get_security_from_env -from mistralai.utils.unmarshal_json_response import unmarshal_json_response -from typing import Any, Dict, Mapping, Optional, Union - - -class Classifiers(BaseSDK): - r"""Classifiers API.""" - - def moderate( - self, - *, - model: str, - inputs: Union[ - models_classificationrequest.ClassificationRequestInputs, - models_classificationrequest.ClassificationRequestInputsTypedDict, - ], - metadata: OptionalNullable[Dict[str, Any]] = UNSET, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.ModerationResponse: - r"""Moderations - - :param model: ID of the model to use. - :param inputs: Text to classify. - :param metadata: - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.ClassificationRequest( - model=model, - metadata=metadata, - inputs=inputs, - ) - - req = self._build_request( - method="POST", - path="/v1/moderations", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=True, - request_has_path_params=False, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - get_serialized_body=lambda: utils.serialize_request_body( - request, False, False, "json", models.ClassificationRequest - ), - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = self.do_request( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="moderations_v1_moderations_post", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.ModerationResponse, http_res) - if utils.match_response(http_res, "422", "application/json"): - response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res - ) - raise models.HTTPValidationError(response_data, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) - - async def moderate_async( - self, - *, - model: str, - inputs: Union[ - models_classificationrequest.ClassificationRequestInputs, - models_classificationrequest.ClassificationRequestInputsTypedDict, - ], - metadata: OptionalNullable[Dict[str, Any]] = UNSET, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.ModerationResponse: - r"""Moderations - - :param model: ID of the model to use. - :param inputs: Text to classify. - :param metadata: - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.ClassificationRequest( - model=model, - metadata=metadata, - inputs=inputs, - ) - - req = self._build_request_async( - method="POST", - path="/v1/moderations", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=True, - request_has_path_params=False, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - get_serialized_body=lambda: utils.serialize_request_body( - request, False, False, "json", models.ClassificationRequest - ), - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = await self.do_request_async( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="moderations_v1_moderations_post", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.ModerationResponse, http_res) - if utils.match_response(http_res, "422", "application/json"): - response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res - ) - raise models.HTTPValidationError(response_data, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) - - def moderate_chat( - self, - *, - inputs: Union[ - models_chatmoderationrequest.ChatModerationRequestInputs, - models_chatmoderationrequest.ChatModerationRequestInputsTypedDict, - ], - model: str, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.ModerationResponse: - r"""Chat Moderations - - :param inputs: Chat to classify - :param model: - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.ChatModerationRequest( - inputs=utils.get_pydantic_model(inputs, models.ChatModerationRequestInputs), - model=model, - ) - - req = self._build_request( - method="POST", - path="/v1/chat/moderations", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=True, - request_has_path_params=False, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - get_serialized_body=lambda: utils.serialize_request_body( - request, False, False, "json", models.ChatModerationRequest - ), - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = self.do_request( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="chat_moderations_v1_chat_moderations_post", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.ModerationResponse, http_res) - if utils.match_response(http_res, "422", "application/json"): - response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res - ) - raise models.HTTPValidationError(response_data, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) - - async def moderate_chat_async( - self, - *, - inputs: Union[ - models_chatmoderationrequest.ChatModerationRequestInputs, - models_chatmoderationrequest.ChatModerationRequestInputsTypedDict, - ], - model: str, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.ModerationResponse: - r"""Chat Moderations - - :param inputs: Chat to classify - :param model: - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.ChatModerationRequest( - inputs=utils.get_pydantic_model(inputs, models.ChatModerationRequestInputs), - model=model, - ) - - req = self._build_request_async( - method="POST", - path="/v1/chat/moderations", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=True, - request_has_path_params=False, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - get_serialized_body=lambda: utils.serialize_request_body( - request, False, False, "json", models.ChatModerationRequest - ), - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = await self.do_request_async( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="chat_moderations_v1_chat_moderations_post", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.ModerationResponse, http_res) - if utils.match_response(http_res, "422", "application/json"): - response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res - ) - raise models.HTTPValidationError(response_data, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) - - def classify( - self, - *, - model: str, - inputs: Union[ - models_classificationrequest.ClassificationRequestInputs, - models_classificationrequest.ClassificationRequestInputsTypedDict, - ], - metadata: OptionalNullable[Dict[str, Any]] = UNSET, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.ClassificationResponse: - r"""Classifications - - :param model: ID of the model to use. - :param inputs: Text to classify. - :param metadata: - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.ClassificationRequest( - model=model, - metadata=metadata, - inputs=inputs, - ) - - req = self._build_request( - method="POST", - path="/v1/classifications", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=True, - request_has_path_params=False, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - get_serialized_body=lambda: utils.serialize_request_body( - request, False, False, "json", models.ClassificationRequest - ), - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = self.do_request( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="classifications_v1_classifications_post", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.ClassificationResponse, http_res) - if utils.match_response(http_res, "422", "application/json"): - response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res - ) - raise models.HTTPValidationError(response_data, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) - - async def classify_async( - self, - *, - model: str, - inputs: Union[ - models_classificationrequest.ClassificationRequestInputs, - models_classificationrequest.ClassificationRequestInputsTypedDict, - ], - metadata: OptionalNullable[Dict[str, Any]] = UNSET, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.ClassificationResponse: - r"""Classifications - - :param model: ID of the model to use. - :param inputs: Text to classify. - :param metadata: - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.ClassificationRequest( - model=model, - metadata=metadata, - inputs=inputs, - ) - - req = self._build_request_async( - method="POST", - path="/v1/classifications", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=True, - request_has_path_params=False, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - get_serialized_body=lambda: utils.serialize_request_body( - request, False, False, "json", models.ClassificationRequest - ), - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = await self.do_request_async( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="classifications_v1_classifications_post", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.ClassificationResponse, http_res) - if utils.match_response(http_res, "422", "application/json"): - response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res - ) - raise models.HTTPValidationError(response_data, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) - - def classify_chat( - self, - *, - model: str, - inputs: Union[models_inputs.Inputs, models_inputs.InputsTypedDict], - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.ClassificationResponse: - r"""Chat Classifications - - :param model: - :param inputs: Chat to classify - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.ChatClassificationRequest( - model=model, - inputs=utils.get_pydantic_model(inputs, models.Inputs), - ) - - req = self._build_request( - method="POST", - path="/v1/chat/classifications", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=True, - request_has_path_params=False, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - get_serialized_body=lambda: utils.serialize_request_body( - request, False, False, "json", models.ChatClassificationRequest - ), - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = self.do_request( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="chat_classifications_v1_chat_classifications_post", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.ClassificationResponse, http_res) - if utils.match_response(http_res, "422", "application/json"): - response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res - ) - raise models.HTTPValidationError(response_data, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) - - async def classify_chat_async( - self, - *, - model: str, - inputs: Union[models_inputs.Inputs, models_inputs.InputsTypedDict], - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.ClassificationResponse: - r"""Chat Classifications - - :param model: - :param inputs: Chat to classify - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.ChatClassificationRequest( - model=model, - inputs=utils.get_pydantic_model(inputs, models.Inputs), - ) - - req = self._build_request_async( - method="POST", - path="/v1/chat/classifications", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=True, - request_has_path_params=False, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - get_serialized_body=lambda: utils.serialize_request_body( - request, False, False, "json", models.ChatClassificationRequest - ), - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = await self.do_request_async( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="chat_classifications_v1_chat_classifications_post", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.ClassificationResponse, http_res) - if utils.match_response(http_res, "422", "application/json"): - response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res - ) - raise models.HTTPValidationError(response_data, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) diff --git a/src/mistralai/client.py b/src/mistralai/client.py deleted file mode 100644 index d3582f77..00000000 --- a/src/mistralai/client.py +++ /dev/null @@ -1,14 +0,0 @@ -from typing import Optional - -MIGRATION_MESSAGE = "This client is deprecated. To migrate to the new client, please refer to this guide: https://github.com/mistralai/client-python/blob/main/MIGRATION.md. If you need to use this client anyway, pin your version to 0.4.2." - - -class MistralClient: - def __init__( - self, - api_key: Optional[str] = None, - endpoint: str = "", - max_retries: int = 5, - timeout: int = 120, - ): - raise NotImplementedError(MIGRATION_MESSAGE) diff --git a/src/mistralai/conversations.py b/src/mistralai/conversations.py deleted file mode 100644 index 194cb4c0..00000000 --- a/src/mistralai/conversations.py +++ /dev/null @@ -1,2865 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from .basesdk import BaseSDK -from mistralai import models, utils -from mistralai._hooks import HookContext -from mistralai.models import ( - completionargs as models_completionargs, - conversationappendrequest as models_conversationappendrequest, - conversationappendstreamrequest as models_conversationappendstreamrequest, - conversationinputs as models_conversationinputs, - conversationrequest as models_conversationrequest, - conversationrestartrequest as models_conversationrestartrequest, - conversationrestartstreamrequest as models_conversationrestartstreamrequest, - conversationstreamrequest as models_conversationstreamrequest, -) -from mistralai.types import OptionalNullable, UNSET -from mistralai.utils import eventstreaming, get_security_from_env -from mistralai.utils.unmarshal_json_response import unmarshal_json_response -from typing import Any, Dict, List, Mapping, Optional, Union - -# region imports -import typing -from typing import AsyncGenerator -import logging -from collections import defaultdict - -from mistralai.models import ( - ResponseStartedEvent, - ConversationEventsData, - InputEntries, -) -from mistralai.extra.run.result import ( - RunResult, - RunResultEvents, - FunctionResultEvent, - reconstitue_entries, -) -from mistralai.extra.run.utils import run_requirements -from mistralai.extra.observability.otel import GenAISpanEnum, get_or_create_otel_tracer - -logger = logging.getLogger(__name__) -tracing_enabled, tracer = get_or_create_otel_tracer() - -if typing.TYPE_CHECKING: - from mistralai.extra.run.context import RunContext - -# endregion imports - - -class Conversations(BaseSDK): - r"""(beta) Conversations API""" - - # region sdk-class-body - # Custom run code allowing client side execution of code - - @run_requirements - async def run_async( - self, - run_ctx: "RunContext", - inputs: Union[models.ConversationInputs, models.ConversationInputsTypedDict], - instructions: OptionalNullable[str] = UNSET, - tools: OptionalNullable[ - Union[List[models.Tools], List[models.ToolsTypedDict]] - ] = UNSET, - completion_args: OptionalNullable[ - Union[models.CompletionArgs, models.CompletionArgsTypedDict] - ] = UNSET, - name: OptionalNullable[str] = UNSET, - description: OptionalNullable[str] = UNSET, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> RunResult: - """Run a conversation with the given inputs and context. - - The execution of a run will only stop when no required local execution can be done.""" - from mistralai.beta import Beta - from mistralai.extra.run.context import _validate_run - from mistralai.extra.run.tools import get_function_calls - - with tracer.start_as_current_span(GenAISpanEnum.VALIDATE_RUN.value): - req, run_result, input_entries = await _validate_run( - beta_client=Beta(self.sdk_configuration), - run_ctx=run_ctx, - inputs=inputs, - instructions=instructions, - tools=tools, - completion_args=completion_args, - ) - - with tracer.start_as_current_span(GenAISpanEnum.CONVERSATION.value): - while True: - if run_ctx.conversation_id is None: - res = await self.start_async( - inputs=input_entries, - http_headers=http_headers, - name=name, - description=description, - retries=retries, - server_url=server_url, - timeout_ms=timeout_ms, - **req, # type: ignore - ) - run_result.conversation_id = res.conversation_id - run_ctx.conversation_id = res.conversation_id - logger.info( - f"Started Run with conversation with id {res.conversation_id}" - ) - else: - res = await self.append_async( - conversation_id=run_ctx.conversation_id, - inputs=input_entries, - retries=retries, - server_url=server_url, - timeout_ms=timeout_ms, - ) - run_ctx.request_count += 1 - run_result.output_entries.extend(res.outputs) - fcalls = get_function_calls(res.outputs) - if not fcalls: - logger.debug("No more function calls to execute") - break - else: - fresults = await run_ctx.execute_function_calls(fcalls) - run_result.output_entries.extend(fresults) - input_entries = typing.cast(list[InputEntries], fresults) - return run_result - - @run_requirements - async def run_stream_async( - self, - run_ctx: "RunContext", - inputs: Union[models.ConversationInputs, models.ConversationInputsTypedDict], - instructions: OptionalNullable[str] = UNSET, - tools: OptionalNullable[ - Union[List[models.Tools], List[models.ToolsTypedDict]] - ] = UNSET, - completion_args: OptionalNullable[ - Union[models.CompletionArgs, models.CompletionArgsTypedDict] - ] = UNSET, - name: OptionalNullable[str] = UNSET, - description: OptionalNullable[str] = UNSET, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> AsyncGenerator[Union[RunResultEvents, RunResult], None]: - """Similar to `run_async` but returns a generator which streams events. - - The last streamed object is the RunResult object which summarises what happened in the run.""" - from mistralai.beta import Beta - from mistralai.extra.run.context import _validate_run - from mistralai.extra.run.tools import get_function_calls - - req, run_result, input_entries = await _validate_run( - beta_client=Beta(self.sdk_configuration), - run_ctx=run_ctx, - inputs=inputs, - instructions=instructions, - tools=tools, - completion_args=completion_args, - ) - - async def run_generator() -> ( - AsyncGenerator[Union[RunResultEvents, RunResult], None] - ): - current_entries = input_entries - while True: - received_event_tracker: defaultdict[ - int, list[ConversationEventsData] - ] = defaultdict(list) - if run_ctx.conversation_id is None: - res = await self.start_stream_async( - inputs=current_entries, - http_headers=http_headers, - name=name, - description=description, - retries=retries, - server_url=server_url, - timeout_ms=timeout_ms, - **req, # type: ignore - ) - else: - res = await self.append_stream_async( - conversation_id=run_ctx.conversation_id, - inputs=current_entries, - retries=retries, - server_url=server_url, - timeout_ms=timeout_ms, - ) - async for event in res: - if ( - isinstance(event.data, ResponseStartedEvent) - and run_ctx.conversation_id is None - ): - run_result.conversation_id = event.data.conversation_id - run_ctx.conversation_id = event.data.conversation_id - logger.info( - f"Started Run with conversation with id {run_ctx.conversation_id}" - ) - if ( - output_index := getattr(event.data, "output_index", None) - ) is not None: - received_event_tracker[output_index].append(event.data) - yield typing.cast(RunResultEvents, event) - run_ctx.request_count += 1 - outputs = reconstitue_entries(received_event_tracker) - run_result.output_entries.extend(outputs) - fcalls = get_function_calls(outputs) - if not fcalls: - logger.debug("No more function calls to execute") - break - else: - fresults = await run_ctx.execute_function_calls(fcalls) - run_result.output_entries.extend(fresults) - for fresult in fresults: - yield RunResultEvents( - event="function.result", - data=FunctionResultEvent( - type="function.result", - result=fresult.result, - tool_call_id=fresult.tool_call_id, - ), - ) - current_entries = typing.cast(list[InputEntries], fresults) - yield run_result - - return run_generator() - - # endregion sdk-class-body - - def start( - self, - *, - inputs: Union[ - models_conversationinputs.ConversationInputs, - models_conversationinputs.ConversationInputsTypedDict, - ], - stream: Optional[bool] = False, - store: OptionalNullable[bool] = UNSET, - handoff_execution: OptionalNullable[ - models_conversationrequest.HandoffExecution - ] = UNSET, - instructions: OptionalNullable[str] = UNSET, - tools: Optional[ - Union[ - List[models_conversationrequest.Tools], - List[models_conversationrequest.ToolsTypedDict], - ] - ] = None, - completion_args: OptionalNullable[ - Union[ - models_completionargs.CompletionArgs, - models_completionargs.CompletionArgsTypedDict, - ] - ] = UNSET, - name: OptionalNullable[str] = UNSET, - description: OptionalNullable[str] = UNSET, - metadata: OptionalNullable[Dict[str, Any]] = UNSET, - agent_id: OptionalNullable[str] = UNSET, - agent_version: OptionalNullable[ - Union[ - models_conversationrequest.AgentVersion, - models_conversationrequest.AgentVersionTypedDict, - ] - ] = UNSET, - model: OptionalNullable[str] = UNSET, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.ConversationResponse: - r"""Create a conversation and append entries to it. - - Create a new conversation, using a base model or an agent and append entries. Completion and tool executions are run and the response is appended to the conversation.Use the returned conversation_id to continue the conversation. - - :param inputs: - :param stream: - :param store: - :param handoff_execution: - :param instructions: - :param tools: List of tools which are available to the model during the conversation. - :param completion_args: - :param name: - :param description: - :param metadata: - :param agent_id: - :param agent_version: - :param model: - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.ConversationRequest( - inputs=utils.get_pydantic_model(inputs, models.ConversationInputs), - stream=stream, - store=store, - handoff_execution=handoff_execution, - instructions=instructions, - tools=utils.get_pydantic_model(tools, Optional[List[models.Tools]]), - completion_args=utils.get_pydantic_model( - completion_args, OptionalNullable[models.CompletionArgs] - ), - name=name, - description=description, - metadata=metadata, - agent_id=agent_id, - agent_version=agent_version, - model=model, - ) - - req = self._build_request( - method="POST", - path="/v1/conversations", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=True, - request_has_path_params=False, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - get_serialized_body=lambda: utils.serialize_request_body( - request, False, False, "json", models.ConversationRequest - ), - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = self.do_request( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="agents_api_v1_conversations_start", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.ConversationResponse, http_res) - if utils.match_response(http_res, "422", "application/json"): - response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res - ) - raise models.HTTPValidationError(response_data, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) - - async def start_async( - self, - *, - inputs: Union[ - models_conversationinputs.ConversationInputs, - models_conversationinputs.ConversationInputsTypedDict, - ], - stream: Optional[bool] = False, - store: OptionalNullable[bool] = UNSET, - handoff_execution: OptionalNullable[ - models_conversationrequest.HandoffExecution - ] = UNSET, - instructions: OptionalNullable[str] = UNSET, - tools: Optional[ - Union[ - List[models_conversationrequest.Tools], - List[models_conversationrequest.ToolsTypedDict], - ] - ] = None, - completion_args: OptionalNullable[ - Union[ - models_completionargs.CompletionArgs, - models_completionargs.CompletionArgsTypedDict, - ] - ] = UNSET, - name: OptionalNullable[str] = UNSET, - description: OptionalNullable[str] = UNSET, - metadata: OptionalNullable[Dict[str, Any]] = UNSET, - agent_id: OptionalNullable[str] = UNSET, - agent_version: OptionalNullable[ - Union[ - models_conversationrequest.AgentVersion, - models_conversationrequest.AgentVersionTypedDict, - ] - ] = UNSET, - model: OptionalNullable[str] = UNSET, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.ConversationResponse: - r"""Create a conversation and append entries to it. - - Create a new conversation, using a base model or an agent and append entries. Completion and tool executions are run and the response is appended to the conversation.Use the returned conversation_id to continue the conversation. - - :param inputs: - :param stream: - :param store: - :param handoff_execution: - :param instructions: - :param tools: List of tools which are available to the model during the conversation. - :param completion_args: - :param name: - :param description: - :param metadata: - :param agent_id: - :param agent_version: - :param model: - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.ConversationRequest( - inputs=utils.get_pydantic_model(inputs, models.ConversationInputs), - stream=stream, - store=store, - handoff_execution=handoff_execution, - instructions=instructions, - tools=utils.get_pydantic_model(tools, Optional[List[models.Tools]]), - completion_args=utils.get_pydantic_model( - completion_args, OptionalNullable[models.CompletionArgs] - ), - name=name, - description=description, - metadata=metadata, - agent_id=agent_id, - agent_version=agent_version, - model=model, - ) - - req = self._build_request_async( - method="POST", - path="/v1/conversations", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=True, - request_has_path_params=False, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - get_serialized_body=lambda: utils.serialize_request_body( - request, False, False, "json", models.ConversationRequest - ), - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = await self.do_request_async( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="agents_api_v1_conversations_start", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.ConversationResponse, http_res) - if utils.match_response(http_res, "422", "application/json"): - response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res - ) - raise models.HTTPValidationError(response_data, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) - - def list( - self, - *, - page: Optional[int] = 0, - page_size: Optional[int] = 100, - metadata: OptionalNullable[Dict[str, Any]] = UNSET, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> List[models.ResponseBody]: - r"""List all created conversations. - - Retrieve a list of conversation entities sorted by creation time. - - :param page: - :param page_size: - :param metadata: - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.AgentsAPIV1ConversationsListRequest( - page=page, - page_size=page_size, - metadata=metadata, - ) - - req = self._build_request( - method="GET", - path="/v1/conversations", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=False, - request_has_path_params=False, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = self.do_request( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="agents_api_v1_conversations_list", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(List[models.ResponseBody], http_res) - if utils.match_response(http_res, "422", "application/json"): - response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res - ) - raise models.HTTPValidationError(response_data, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) - - async def list_async( - self, - *, - page: Optional[int] = 0, - page_size: Optional[int] = 100, - metadata: OptionalNullable[Dict[str, Any]] = UNSET, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> List[models.ResponseBody]: - r"""List all created conversations. - - Retrieve a list of conversation entities sorted by creation time. - - :param page: - :param page_size: - :param metadata: - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.AgentsAPIV1ConversationsListRequest( - page=page, - page_size=page_size, - metadata=metadata, - ) - - req = self._build_request_async( - method="GET", - path="/v1/conversations", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=False, - request_has_path_params=False, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = await self.do_request_async( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="agents_api_v1_conversations_list", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(List[models.ResponseBody], http_res) - if utils.match_response(http_res, "422", "application/json"): - response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res - ) - raise models.HTTPValidationError(response_data, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) - - def get( - self, - *, - conversation_id: str, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.AgentsAPIV1ConversationsGetResponseV1ConversationsGet: - r"""Retrieve a conversation information. - - Given a conversation_id retrieve a conversation entity with its attributes. - - :param conversation_id: ID of the conversation from which we are fetching metadata. - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.AgentsAPIV1ConversationsGetRequest( - conversation_id=conversation_id, - ) - - req = self._build_request( - method="GET", - path="/v1/conversations/{conversation_id}", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=False, - request_has_path_params=True, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = self.do_request( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="agents_api_v1_conversations_get", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response( - models.AgentsAPIV1ConversationsGetResponseV1ConversationsGet, http_res - ) - if utils.match_response(http_res, "422", "application/json"): - response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res - ) - raise models.HTTPValidationError(response_data, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) - - async def get_async( - self, - *, - conversation_id: str, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.AgentsAPIV1ConversationsGetResponseV1ConversationsGet: - r"""Retrieve a conversation information. - - Given a conversation_id retrieve a conversation entity with its attributes. - - :param conversation_id: ID of the conversation from which we are fetching metadata. - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.AgentsAPIV1ConversationsGetRequest( - conversation_id=conversation_id, - ) - - req = self._build_request_async( - method="GET", - path="/v1/conversations/{conversation_id}", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=False, - request_has_path_params=True, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = await self.do_request_async( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="agents_api_v1_conversations_get", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response( - models.AgentsAPIV1ConversationsGetResponseV1ConversationsGet, http_res - ) - if utils.match_response(http_res, "422", "application/json"): - response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res - ) - raise models.HTTPValidationError(response_data, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) - - def delete( - self, - *, - conversation_id: str, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ): - r"""Delete a conversation. - - Delete a conversation given a conversation_id. - - :param conversation_id: ID of the conversation from which we are fetching metadata. - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.AgentsAPIV1ConversationsDeleteRequest( - conversation_id=conversation_id, - ) - - req = self._build_request( - method="DELETE", - path="/v1/conversations/{conversation_id}", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=False, - request_has_path_params=True, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = self.do_request( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="agents_api_v1_conversations_delete", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "204", "*"): - return - if utils.match_response(http_res, "422", "application/json"): - response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res - ) - raise models.HTTPValidationError(response_data, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) - - async def delete_async( - self, - *, - conversation_id: str, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ): - r"""Delete a conversation. - - Delete a conversation given a conversation_id. - - :param conversation_id: ID of the conversation from which we are fetching metadata. - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.AgentsAPIV1ConversationsDeleteRequest( - conversation_id=conversation_id, - ) - - req = self._build_request_async( - method="DELETE", - path="/v1/conversations/{conversation_id}", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=False, - request_has_path_params=True, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = await self.do_request_async( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="agents_api_v1_conversations_delete", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "204", "*"): - return - if utils.match_response(http_res, "422", "application/json"): - response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res - ) - raise models.HTTPValidationError(response_data, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) - - def append( - self, - *, - conversation_id: str, - inputs: Union[ - models_conversationinputs.ConversationInputs, - models_conversationinputs.ConversationInputsTypedDict, - ], - stream: Optional[bool] = False, - store: Optional[bool] = True, - handoff_execution: Optional[ - models_conversationappendrequest.ConversationAppendRequestHandoffExecution - ] = "server", - completion_args: Optional[ - Union[ - models_completionargs.CompletionArgs, - models_completionargs.CompletionArgsTypedDict, - ] - ] = None, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.ConversationResponse: - r"""Append new entries to an existing conversation. - - Run completion on the history of the conversation and the user entries. Return the new created entries. - - :param conversation_id: ID of the conversation to which we append entries. - :param inputs: - :param stream: - :param store: Whether to store the results into our servers or not. - :param handoff_execution: - :param completion_args: White-listed arguments from the completion API - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.AgentsAPIV1ConversationsAppendRequest( - conversation_id=conversation_id, - conversation_append_request=models.ConversationAppendRequest( - inputs=utils.get_pydantic_model(inputs, models.ConversationInputs), - stream=stream, - store=store, - handoff_execution=handoff_execution, - completion_args=utils.get_pydantic_model( - completion_args, Optional[models.CompletionArgs] - ), - ), - ) - - req = self._build_request( - method="POST", - path="/v1/conversations/{conversation_id}", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=True, - request_has_path_params=True, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - get_serialized_body=lambda: utils.serialize_request_body( - request.conversation_append_request, - False, - False, - "json", - models.ConversationAppendRequest, - ), - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = self.do_request( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="agents_api_v1_conversations_append", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.ConversationResponse, http_res) - if utils.match_response(http_res, "422", "application/json"): - response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res - ) - raise models.HTTPValidationError(response_data, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) - - async def append_async( - self, - *, - conversation_id: str, - inputs: Union[ - models_conversationinputs.ConversationInputs, - models_conversationinputs.ConversationInputsTypedDict, - ], - stream: Optional[bool] = False, - store: Optional[bool] = True, - handoff_execution: Optional[ - models_conversationappendrequest.ConversationAppendRequestHandoffExecution - ] = "server", - completion_args: Optional[ - Union[ - models_completionargs.CompletionArgs, - models_completionargs.CompletionArgsTypedDict, - ] - ] = None, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.ConversationResponse: - r"""Append new entries to an existing conversation. - - Run completion on the history of the conversation and the user entries. Return the new created entries. - - :param conversation_id: ID of the conversation to which we append entries. - :param inputs: - :param stream: - :param store: Whether to store the results into our servers or not. - :param handoff_execution: - :param completion_args: White-listed arguments from the completion API - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.AgentsAPIV1ConversationsAppendRequest( - conversation_id=conversation_id, - conversation_append_request=models.ConversationAppendRequest( - inputs=utils.get_pydantic_model(inputs, models.ConversationInputs), - stream=stream, - store=store, - handoff_execution=handoff_execution, - completion_args=utils.get_pydantic_model( - completion_args, Optional[models.CompletionArgs] - ), - ), - ) - - req = self._build_request_async( - method="POST", - path="/v1/conversations/{conversation_id}", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=True, - request_has_path_params=True, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - get_serialized_body=lambda: utils.serialize_request_body( - request.conversation_append_request, - False, - False, - "json", - models.ConversationAppendRequest, - ), - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = await self.do_request_async( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="agents_api_v1_conversations_append", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.ConversationResponse, http_res) - if utils.match_response(http_res, "422", "application/json"): - response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res - ) - raise models.HTTPValidationError(response_data, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) - - def get_history( - self, - *, - conversation_id: str, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.ConversationHistory: - r"""Retrieve all entries in a conversation. - - Given a conversation_id retrieve all the entries belonging to that conversation. The entries are sorted in the order they were appended, those can be messages, connectors or function_call. - - :param conversation_id: ID of the conversation from which we are fetching entries. - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.AgentsAPIV1ConversationsHistoryRequest( - conversation_id=conversation_id, - ) - - req = self._build_request( - method="GET", - path="/v1/conversations/{conversation_id}/history", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=False, - request_has_path_params=True, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = self.do_request( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="agents_api_v1_conversations_history", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.ConversationHistory, http_res) - if utils.match_response(http_res, "422", "application/json"): - response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res - ) - raise models.HTTPValidationError(response_data, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) - - async def get_history_async( - self, - *, - conversation_id: str, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.ConversationHistory: - r"""Retrieve all entries in a conversation. - - Given a conversation_id retrieve all the entries belonging to that conversation. The entries are sorted in the order they were appended, those can be messages, connectors or function_call. - - :param conversation_id: ID of the conversation from which we are fetching entries. - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.AgentsAPIV1ConversationsHistoryRequest( - conversation_id=conversation_id, - ) - - req = self._build_request_async( - method="GET", - path="/v1/conversations/{conversation_id}/history", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=False, - request_has_path_params=True, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = await self.do_request_async( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="agents_api_v1_conversations_history", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.ConversationHistory, http_res) - if utils.match_response(http_res, "422", "application/json"): - response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res - ) - raise models.HTTPValidationError(response_data, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) - - def get_messages( - self, - *, - conversation_id: str, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.ConversationMessages: - r"""Retrieve all messages in a conversation. - - Given a conversation_id retrieve all the messages belonging to that conversation. This is similar to retrieving all entries except we filter the messages only. - - :param conversation_id: ID of the conversation from which we are fetching messages. - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.AgentsAPIV1ConversationsMessagesRequest( - conversation_id=conversation_id, - ) - - req = self._build_request( - method="GET", - path="/v1/conversations/{conversation_id}/messages", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=False, - request_has_path_params=True, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = self.do_request( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="agents_api_v1_conversations_messages", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.ConversationMessages, http_res) - if utils.match_response(http_res, "422", "application/json"): - response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res - ) - raise models.HTTPValidationError(response_data, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) - - async def get_messages_async( - self, - *, - conversation_id: str, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.ConversationMessages: - r"""Retrieve all messages in a conversation. - - Given a conversation_id retrieve all the messages belonging to that conversation. This is similar to retrieving all entries except we filter the messages only. - - :param conversation_id: ID of the conversation from which we are fetching messages. - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.AgentsAPIV1ConversationsMessagesRequest( - conversation_id=conversation_id, - ) - - req = self._build_request_async( - method="GET", - path="/v1/conversations/{conversation_id}/messages", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=False, - request_has_path_params=True, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = await self.do_request_async( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="agents_api_v1_conversations_messages", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.ConversationMessages, http_res) - if utils.match_response(http_res, "422", "application/json"): - response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res - ) - raise models.HTTPValidationError(response_data, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) - - def restart( - self, - *, - conversation_id: str, - inputs: Union[ - models_conversationinputs.ConversationInputs, - models_conversationinputs.ConversationInputsTypedDict, - ], - from_entry_id: str, - stream: Optional[bool] = False, - store: Optional[bool] = True, - handoff_execution: Optional[ - models_conversationrestartrequest.ConversationRestartRequestHandoffExecution - ] = "server", - completion_args: Optional[ - Union[ - models_completionargs.CompletionArgs, - models_completionargs.CompletionArgsTypedDict, - ] - ] = None, - metadata: OptionalNullable[Dict[str, Any]] = UNSET, - agent_version: OptionalNullable[ - Union[ - models_conversationrestartrequest.ConversationRestartRequestAgentVersion, - models_conversationrestartrequest.ConversationRestartRequestAgentVersionTypedDict, - ] - ] = UNSET, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.ConversationResponse: - r"""Restart a conversation starting from a given entry. - - Given a conversation_id and an id, recreate a conversation from this point and run completion. A new conversation is returned with the new entries returned. - - :param conversation_id: ID of the original conversation which is being restarted. - :param inputs: - :param from_entry_id: - :param stream: - :param store: Whether to store the results into our servers or not. - :param handoff_execution: - :param completion_args: White-listed arguments from the completion API - :param metadata: Custom metadata for the conversation. - :param agent_version: Specific version of the agent to use when restarting. If not provided, uses the current version. - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.AgentsAPIV1ConversationsRestartRequest( - conversation_id=conversation_id, - conversation_restart_request=models.ConversationRestartRequest( - inputs=utils.get_pydantic_model(inputs, models.ConversationInputs), - stream=stream, - store=store, - handoff_execution=handoff_execution, - completion_args=utils.get_pydantic_model( - completion_args, Optional[models.CompletionArgs] - ), - metadata=metadata, - from_entry_id=from_entry_id, - agent_version=agent_version, - ), - ) - - req = self._build_request( - method="POST", - path="/v1/conversations/{conversation_id}/restart", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=True, - request_has_path_params=True, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - get_serialized_body=lambda: utils.serialize_request_body( - request.conversation_restart_request, - False, - False, - "json", - models.ConversationRestartRequest, - ), - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = self.do_request( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="agents_api_v1_conversations_restart", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.ConversationResponse, http_res) - if utils.match_response(http_res, "422", "application/json"): - response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res - ) - raise models.HTTPValidationError(response_data, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) - - async def restart_async( - self, - *, - conversation_id: str, - inputs: Union[ - models_conversationinputs.ConversationInputs, - models_conversationinputs.ConversationInputsTypedDict, - ], - from_entry_id: str, - stream: Optional[bool] = False, - store: Optional[bool] = True, - handoff_execution: Optional[ - models_conversationrestartrequest.ConversationRestartRequestHandoffExecution - ] = "server", - completion_args: Optional[ - Union[ - models_completionargs.CompletionArgs, - models_completionargs.CompletionArgsTypedDict, - ] - ] = None, - metadata: OptionalNullable[Dict[str, Any]] = UNSET, - agent_version: OptionalNullable[ - Union[ - models_conversationrestartrequest.ConversationRestartRequestAgentVersion, - models_conversationrestartrequest.ConversationRestartRequestAgentVersionTypedDict, - ] - ] = UNSET, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.ConversationResponse: - r"""Restart a conversation starting from a given entry. - - Given a conversation_id and an id, recreate a conversation from this point and run completion. A new conversation is returned with the new entries returned. - - :param conversation_id: ID of the original conversation which is being restarted. - :param inputs: - :param from_entry_id: - :param stream: - :param store: Whether to store the results into our servers or not. - :param handoff_execution: - :param completion_args: White-listed arguments from the completion API - :param metadata: Custom metadata for the conversation. - :param agent_version: Specific version of the agent to use when restarting. If not provided, uses the current version. - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.AgentsAPIV1ConversationsRestartRequest( - conversation_id=conversation_id, - conversation_restart_request=models.ConversationRestartRequest( - inputs=utils.get_pydantic_model(inputs, models.ConversationInputs), - stream=stream, - store=store, - handoff_execution=handoff_execution, - completion_args=utils.get_pydantic_model( - completion_args, Optional[models.CompletionArgs] - ), - metadata=metadata, - from_entry_id=from_entry_id, - agent_version=agent_version, - ), - ) - - req = self._build_request_async( - method="POST", - path="/v1/conversations/{conversation_id}/restart", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=True, - request_has_path_params=True, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - get_serialized_body=lambda: utils.serialize_request_body( - request.conversation_restart_request, - False, - False, - "json", - models.ConversationRestartRequest, - ), - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = await self.do_request_async( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="agents_api_v1_conversations_restart", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.ConversationResponse, http_res) - if utils.match_response(http_res, "422", "application/json"): - response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res - ) - raise models.HTTPValidationError(response_data, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) - - def start_stream( - self, - *, - inputs: Union[ - models_conversationinputs.ConversationInputs, - models_conversationinputs.ConversationInputsTypedDict, - ], - stream: Optional[bool] = True, - store: OptionalNullable[bool] = UNSET, - handoff_execution: OptionalNullable[ - models_conversationstreamrequest.ConversationStreamRequestHandoffExecution - ] = UNSET, - instructions: OptionalNullable[str] = UNSET, - tools: Optional[ - Union[ - List[models_conversationstreamrequest.ConversationStreamRequestTools], - List[ - models_conversationstreamrequest.ConversationStreamRequestToolsTypedDict - ], - ] - ] = None, - completion_args: OptionalNullable[ - Union[ - models_completionargs.CompletionArgs, - models_completionargs.CompletionArgsTypedDict, - ] - ] = UNSET, - name: OptionalNullable[str] = UNSET, - description: OptionalNullable[str] = UNSET, - metadata: OptionalNullable[Dict[str, Any]] = UNSET, - agent_id: OptionalNullable[str] = UNSET, - agent_version: OptionalNullable[ - Union[ - models_conversationstreamrequest.ConversationStreamRequestAgentVersion, - models_conversationstreamrequest.ConversationStreamRequestAgentVersionTypedDict, - ] - ] = UNSET, - model: OptionalNullable[str] = UNSET, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> eventstreaming.EventStream[models.ConversationEvents]: - r"""Create a conversation and append entries to it. - - Create a new conversation, using a base model or an agent and append entries. Completion and tool executions are run and the response is appended to the conversation.Use the returned conversation_id to continue the conversation. - - :param inputs: - :param stream: - :param store: - :param handoff_execution: - :param instructions: - :param tools: List of tools which are available to the model during the conversation. - :param completion_args: - :param name: - :param description: - :param metadata: - :param agent_id: - :param agent_version: - :param model: - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.ConversationStreamRequest( - inputs=utils.get_pydantic_model(inputs, models.ConversationInputs), - stream=stream, - store=store, - handoff_execution=handoff_execution, - instructions=instructions, - tools=utils.get_pydantic_model( - tools, Optional[List[models.ConversationStreamRequestTools]] - ), - completion_args=utils.get_pydantic_model( - completion_args, OptionalNullable[models.CompletionArgs] - ), - name=name, - description=description, - metadata=metadata, - agent_id=agent_id, - agent_version=agent_version, - model=model, - ) - - req = self._build_request( - method="POST", - path="/v1/conversations#stream", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=True, - request_has_path_params=False, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="text/event-stream", - http_headers=http_headers, - security=self.sdk_configuration.security, - get_serialized_body=lambda: utils.serialize_request_body( - request, False, False, "json", models.ConversationStreamRequest - ), - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = self.do_request( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="agents_api_v1_conversations_start_stream", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - stream=True, - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "200", "text/event-stream"): - return eventstreaming.EventStream( - http_res, - lambda raw: utils.unmarshal_json(raw, models.ConversationEvents), - client_ref=self, - ) - if utils.match_response(http_res, "422", "application/json"): - http_res_text = utils.stream_to_text(http_res) - response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res, http_res_text - ) - raise models.HTTPValidationError(response_data, http_res, http_res_text) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("Unexpected response received", http_res, http_res_text) - - async def start_stream_async( - self, - *, - inputs: Union[ - models_conversationinputs.ConversationInputs, - models_conversationinputs.ConversationInputsTypedDict, - ], - stream: Optional[bool] = True, - store: OptionalNullable[bool] = UNSET, - handoff_execution: OptionalNullable[ - models_conversationstreamrequest.ConversationStreamRequestHandoffExecution - ] = UNSET, - instructions: OptionalNullable[str] = UNSET, - tools: Optional[ - Union[ - List[models_conversationstreamrequest.ConversationStreamRequestTools], - List[ - models_conversationstreamrequest.ConversationStreamRequestToolsTypedDict - ], - ] - ] = None, - completion_args: OptionalNullable[ - Union[ - models_completionargs.CompletionArgs, - models_completionargs.CompletionArgsTypedDict, - ] - ] = UNSET, - name: OptionalNullable[str] = UNSET, - description: OptionalNullable[str] = UNSET, - metadata: OptionalNullable[Dict[str, Any]] = UNSET, - agent_id: OptionalNullable[str] = UNSET, - agent_version: OptionalNullable[ - Union[ - models_conversationstreamrequest.ConversationStreamRequestAgentVersion, - models_conversationstreamrequest.ConversationStreamRequestAgentVersionTypedDict, - ] - ] = UNSET, - model: OptionalNullable[str] = UNSET, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> eventstreaming.EventStreamAsync[models.ConversationEvents]: - r"""Create a conversation and append entries to it. - - Create a new conversation, using a base model or an agent and append entries. Completion and tool executions are run and the response is appended to the conversation.Use the returned conversation_id to continue the conversation. - - :param inputs: - :param stream: - :param store: - :param handoff_execution: - :param instructions: - :param tools: List of tools which are available to the model during the conversation. - :param completion_args: - :param name: - :param description: - :param metadata: - :param agent_id: - :param agent_version: - :param model: - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.ConversationStreamRequest( - inputs=utils.get_pydantic_model(inputs, models.ConversationInputs), - stream=stream, - store=store, - handoff_execution=handoff_execution, - instructions=instructions, - tools=utils.get_pydantic_model( - tools, Optional[List[models.ConversationStreamRequestTools]] - ), - completion_args=utils.get_pydantic_model( - completion_args, OptionalNullable[models.CompletionArgs] - ), - name=name, - description=description, - metadata=metadata, - agent_id=agent_id, - agent_version=agent_version, - model=model, - ) - - req = self._build_request_async( - method="POST", - path="/v1/conversations#stream", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=True, - request_has_path_params=False, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="text/event-stream", - http_headers=http_headers, - security=self.sdk_configuration.security, - get_serialized_body=lambda: utils.serialize_request_body( - request, False, False, "json", models.ConversationStreamRequest - ), - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = await self.do_request_async( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="agents_api_v1_conversations_start_stream", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - stream=True, - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "200", "text/event-stream"): - return eventstreaming.EventStreamAsync( - http_res, - lambda raw: utils.unmarshal_json(raw, models.ConversationEvents), - client_ref=self, - ) - if utils.match_response(http_res, "422", "application/json"): - http_res_text = await utils.stream_to_text_async(http_res) - response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res, http_res_text - ) - raise models.HTTPValidationError(response_data, http_res, http_res_text) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("Unexpected response received", http_res, http_res_text) - - def append_stream( - self, - *, - conversation_id: str, - inputs: Union[ - models_conversationinputs.ConversationInputs, - models_conversationinputs.ConversationInputsTypedDict, - ], - stream: Optional[bool] = True, - store: Optional[bool] = True, - handoff_execution: Optional[ - models_conversationappendstreamrequest.ConversationAppendStreamRequestHandoffExecution - ] = "server", - completion_args: Optional[ - Union[ - models_completionargs.CompletionArgs, - models_completionargs.CompletionArgsTypedDict, - ] - ] = None, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> eventstreaming.EventStream[models.ConversationEvents]: - r"""Append new entries to an existing conversation. - - Run completion on the history of the conversation and the user entries. Return the new created entries. - - :param conversation_id: ID of the conversation to which we append entries. - :param inputs: - :param stream: - :param store: Whether to store the results into our servers or not. - :param handoff_execution: - :param completion_args: White-listed arguments from the completion API - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.AgentsAPIV1ConversationsAppendStreamRequest( - conversation_id=conversation_id, - conversation_append_stream_request=models.ConversationAppendStreamRequest( - inputs=utils.get_pydantic_model(inputs, models.ConversationInputs), - stream=stream, - store=store, - handoff_execution=handoff_execution, - completion_args=utils.get_pydantic_model( - completion_args, Optional[models.CompletionArgs] - ), - ), - ) - - req = self._build_request( - method="POST", - path="/v1/conversations/{conversation_id}#stream", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=True, - request_has_path_params=True, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="text/event-stream", - http_headers=http_headers, - security=self.sdk_configuration.security, - get_serialized_body=lambda: utils.serialize_request_body( - request.conversation_append_stream_request, - False, - False, - "json", - models.ConversationAppendStreamRequest, - ), - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = self.do_request( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="agents_api_v1_conversations_append_stream", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - stream=True, - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "200", "text/event-stream"): - return eventstreaming.EventStream( - http_res, - lambda raw: utils.unmarshal_json(raw, models.ConversationEvents), - client_ref=self, - ) - if utils.match_response(http_res, "422", "application/json"): - http_res_text = utils.stream_to_text(http_res) - response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res, http_res_text - ) - raise models.HTTPValidationError(response_data, http_res, http_res_text) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("Unexpected response received", http_res, http_res_text) - - async def append_stream_async( - self, - *, - conversation_id: str, - inputs: Union[ - models_conversationinputs.ConversationInputs, - models_conversationinputs.ConversationInputsTypedDict, - ], - stream: Optional[bool] = True, - store: Optional[bool] = True, - handoff_execution: Optional[ - models_conversationappendstreamrequest.ConversationAppendStreamRequestHandoffExecution - ] = "server", - completion_args: Optional[ - Union[ - models_completionargs.CompletionArgs, - models_completionargs.CompletionArgsTypedDict, - ] - ] = None, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> eventstreaming.EventStreamAsync[models.ConversationEvents]: - r"""Append new entries to an existing conversation. - - Run completion on the history of the conversation and the user entries. Return the new created entries. - - :param conversation_id: ID of the conversation to which we append entries. - :param inputs: - :param stream: - :param store: Whether to store the results into our servers or not. - :param handoff_execution: - :param completion_args: White-listed arguments from the completion API - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.AgentsAPIV1ConversationsAppendStreamRequest( - conversation_id=conversation_id, - conversation_append_stream_request=models.ConversationAppendStreamRequest( - inputs=utils.get_pydantic_model(inputs, models.ConversationInputs), - stream=stream, - store=store, - handoff_execution=handoff_execution, - completion_args=utils.get_pydantic_model( - completion_args, Optional[models.CompletionArgs] - ), - ), - ) - - req = self._build_request_async( - method="POST", - path="/v1/conversations/{conversation_id}#stream", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=True, - request_has_path_params=True, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="text/event-stream", - http_headers=http_headers, - security=self.sdk_configuration.security, - get_serialized_body=lambda: utils.serialize_request_body( - request.conversation_append_stream_request, - False, - False, - "json", - models.ConversationAppendStreamRequest, - ), - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = await self.do_request_async( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="agents_api_v1_conversations_append_stream", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - stream=True, - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "200", "text/event-stream"): - return eventstreaming.EventStreamAsync( - http_res, - lambda raw: utils.unmarshal_json(raw, models.ConversationEvents), - client_ref=self, - ) - if utils.match_response(http_res, "422", "application/json"): - http_res_text = await utils.stream_to_text_async(http_res) - response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res, http_res_text - ) - raise models.HTTPValidationError(response_data, http_res, http_res_text) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("Unexpected response received", http_res, http_res_text) - - def restart_stream( - self, - *, - conversation_id: str, - inputs: Union[ - models_conversationinputs.ConversationInputs, - models_conversationinputs.ConversationInputsTypedDict, - ], - from_entry_id: str, - stream: Optional[bool] = True, - store: Optional[bool] = True, - handoff_execution: Optional[ - models_conversationrestartstreamrequest.ConversationRestartStreamRequestHandoffExecution - ] = "server", - completion_args: Optional[ - Union[ - models_completionargs.CompletionArgs, - models_completionargs.CompletionArgsTypedDict, - ] - ] = None, - metadata: OptionalNullable[Dict[str, Any]] = UNSET, - agent_version: OptionalNullable[ - Union[ - models_conversationrestartstreamrequest.ConversationRestartStreamRequestAgentVersion, - models_conversationrestartstreamrequest.ConversationRestartStreamRequestAgentVersionTypedDict, - ] - ] = UNSET, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> eventstreaming.EventStream[models.ConversationEvents]: - r"""Restart a conversation starting from a given entry. - - Given a conversation_id and an id, recreate a conversation from this point and run completion. A new conversation is returned with the new entries returned. - - :param conversation_id: ID of the original conversation which is being restarted. - :param inputs: - :param from_entry_id: - :param stream: - :param store: Whether to store the results into our servers or not. - :param handoff_execution: - :param completion_args: White-listed arguments from the completion API - :param metadata: Custom metadata for the conversation. - :param agent_version: Specific version of the agent to use when restarting. If not provided, uses the current version. - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.AgentsAPIV1ConversationsRestartStreamRequest( - conversation_id=conversation_id, - conversation_restart_stream_request=models.ConversationRestartStreamRequest( - inputs=utils.get_pydantic_model(inputs, models.ConversationInputs), - stream=stream, - store=store, - handoff_execution=handoff_execution, - completion_args=utils.get_pydantic_model( - completion_args, Optional[models.CompletionArgs] - ), - metadata=metadata, - from_entry_id=from_entry_id, - agent_version=agent_version, - ), - ) - - req = self._build_request( - method="POST", - path="/v1/conversations/{conversation_id}/restart#stream", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=True, - request_has_path_params=True, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="text/event-stream", - http_headers=http_headers, - security=self.sdk_configuration.security, - get_serialized_body=lambda: utils.serialize_request_body( - request.conversation_restart_stream_request, - False, - False, - "json", - models.ConversationRestartStreamRequest, - ), - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = self.do_request( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="agents_api_v1_conversations_restart_stream", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - stream=True, - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "200", "text/event-stream"): - return eventstreaming.EventStream( - http_res, - lambda raw: utils.unmarshal_json(raw, models.ConversationEvents), - client_ref=self, - ) - if utils.match_response(http_res, "422", "application/json"): - http_res_text = utils.stream_to_text(http_res) - response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res, http_res_text - ) - raise models.HTTPValidationError(response_data, http_res, http_res_text) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("Unexpected response received", http_res, http_res_text) - - async def restart_stream_async( - self, - *, - conversation_id: str, - inputs: Union[ - models_conversationinputs.ConversationInputs, - models_conversationinputs.ConversationInputsTypedDict, - ], - from_entry_id: str, - stream: Optional[bool] = True, - store: Optional[bool] = True, - handoff_execution: Optional[ - models_conversationrestartstreamrequest.ConversationRestartStreamRequestHandoffExecution - ] = "server", - completion_args: Optional[ - Union[ - models_completionargs.CompletionArgs, - models_completionargs.CompletionArgsTypedDict, - ] - ] = None, - metadata: OptionalNullable[Dict[str, Any]] = UNSET, - agent_version: OptionalNullable[ - Union[ - models_conversationrestartstreamrequest.ConversationRestartStreamRequestAgentVersion, - models_conversationrestartstreamrequest.ConversationRestartStreamRequestAgentVersionTypedDict, - ] - ] = UNSET, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> eventstreaming.EventStreamAsync[models.ConversationEvents]: - r"""Restart a conversation starting from a given entry. - - Given a conversation_id and an id, recreate a conversation from this point and run completion. A new conversation is returned with the new entries returned. - - :param conversation_id: ID of the original conversation which is being restarted. - :param inputs: - :param from_entry_id: - :param stream: - :param store: Whether to store the results into our servers or not. - :param handoff_execution: - :param completion_args: White-listed arguments from the completion API - :param metadata: Custom metadata for the conversation. - :param agent_version: Specific version of the agent to use when restarting. If not provided, uses the current version. - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.AgentsAPIV1ConversationsRestartStreamRequest( - conversation_id=conversation_id, - conversation_restart_stream_request=models.ConversationRestartStreamRequest( - inputs=utils.get_pydantic_model(inputs, models.ConversationInputs), - stream=stream, - store=store, - handoff_execution=handoff_execution, - completion_args=utils.get_pydantic_model( - completion_args, Optional[models.CompletionArgs] - ), - metadata=metadata, - from_entry_id=from_entry_id, - agent_version=agent_version, - ), - ) - - req = self._build_request_async( - method="POST", - path="/v1/conversations/{conversation_id}/restart#stream", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=True, - request_has_path_params=True, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="text/event-stream", - http_headers=http_headers, - security=self.sdk_configuration.security, - get_serialized_body=lambda: utils.serialize_request_body( - request.conversation_restart_stream_request, - False, - False, - "json", - models.ConversationRestartStreamRequest, - ), - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = await self.do_request_async( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="agents_api_v1_conversations_restart_stream", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - stream=True, - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "200", "text/event-stream"): - return eventstreaming.EventStreamAsync( - http_res, - lambda raw: utils.unmarshal_json(raw, models.ConversationEvents), - client_ref=self, - ) - if utils.match_response(http_res, "422", "application/json"): - http_res_text = await utils.stream_to_text_async(http_res) - response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res, http_res_text - ) - raise models.HTTPValidationError(response_data, http_res, http_res_text) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("Unexpected response received", http_res, http_res_text) diff --git a/src/mistralai/documents.py b/src/mistralai/documents.py deleted file mode 100644 index fac58fdb..00000000 --- a/src/mistralai/documents.py +++ /dev/null @@ -1,1981 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from .basesdk import BaseSDK -from mistralai import models, utils -from mistralai._hooks import HookContext -from mistralai.models import ( - documentupdatein as models_documentupdatein, - file as models_file, -) -from mistralai.types import OptionalNullable, UNSET -from mistralai.utils import get_security_from_env -from mistralai.utils.unmarshal_json_response import unmarshal_json_response -from typing import Any, Dict, Mapping, Optional, Union - - -class Documents(BaseSDK): - r"""(beta) Libraries API - manage documents in a library.""" - - def list( - self, - *, - library_id: str, - search: OptionalNullable[str] = UNSET, - page_size: Optional[int] = 100, - page: Optional[int] = 0, - filters_attributes: OptionalNullable[str] = UNSET, - sort_by: Optional[str] = "created_at", - sort_order: Optional[str] = "desc", - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.ListDocumentOut: - r"""List documents in a given library. - - Given a library, lists the document that have been uploaded to that library. - - :param library_id: - :param search: - :param page_size: - :param page: - :param filters_attributes: - :param sort_by: - :param sort_order: - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.LibrariesDocumentsListV1Request( - library_id=library_id, - search=search, - page_size=page_size, - page=page, - filters_attributes=filters_attributes, - sort_by=sort_by, - sort_order=sort_order, - ) - - req = self._build_request( - method="GET", - path="/v1/libraries/{library_id}/documents", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=False, - request_has_path_params=True, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = self.do_request( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="libraries_documents_list_v1", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.ListDocumentOut, http_res) - if utils.match_response(http_res, "422", "application/json"): - response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res - ) - raise models.HTTPValidationError(response_data, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) - - async def list_async( - self, - *, - library_id: str, - search: OptionalNullable[str] = UNSET, - page_size: Optional[int] = 100, - page: Optional[int] = 0, - filters_attributes: OptionalNullable[str] = UNSET, - sort_by: Optional[str] = "created_at", - sort_order: Optional[str] = "desc", - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.ListDocumentOut: - r"""List documents in a given library. - - Given a library, lists the document that have been uploaded to that library. - - :param library_id: - :param search: - :param page_size: - :param page: - :param filters_attributes: - :param sort_by: - :param sort_order: - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.LibrariesDocumentsListV1Request( - library_id=library_id, - search=search, - page_size=page_size, - page=page, - filters_attributes=filters_attributes, - sort_by=sort_by, - sort_order=sort_order, - ) - - req = self._build_request_async( - method="GET", - path="/v1/libraries/{library_id}/documents", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=False, - request_has_path_params=True, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = await self.do_request_async( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="libraries_documents_list_v1", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.ListDocumentOut, http_res) - if utils.match_response(http_res, "422", "application/json"): - response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res - ) - raise models.HTTPValidationError(response_data, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) - - def upload( - self, - *, - library_id: str, - file: Union[models_file.File, models_file.FileTypedDict], - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.DocumentOut: - r"""Upload a new document. - - Given a library, upload a new document to that library. It is queued for processing, it status will change it has been processed. The processing has to be completed in order be discoverable for the library search - - :param library_id: - :param file: The File object (not file name) to be uploaded. - To upload a file and specify a custom file name you should format your request as such: - ```bash - file=@path/to/your/file.jsonl;filename=custom_name.jsonl - ``` - Otherwise, you can just keep the original file name: - ```bash - file=@path/to/your/file.jsonl - ``` - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.LibrariesDocumentsUploadV1Request( - library_id=library_id, - request_body=models.LibrariesDocumentsUploadV1DocumentUpload( - file=utils.get_pydantic_model(file, models.File), - ), - ) - - req = self._build_request( - method="POST", - path="/v1/libraries/{library_id}/documents", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=True, - request_has_path_params=True, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - get_serialized_body=lambda: utils.serialize_request_body( - request.request_body, - False, - False, - "multipart", - models.LibrariesDocumentsUploadV1DocumentUpload, - ), - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = self.do_request( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="libraries_documents_upload_v1", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, ["200", "201"], "application/json"): - return unmarshal_json_response(models.DocumentOut, http_res) - if utils.match_response(http_res, "422", "application/json"): - response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res - ) - raise models.HTTPValidationError(response_data, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) - - async def upload_async( - self, - *, - library_id: str, - file: Union[models_file.File, models_file.FileTypedDict], - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.DocumentOut: - r"""Upload a new document. - - Given a library, upload a new document to that library. It is queued for processing, it status will change it has been processed. The processing has to be completed in order be discoverable for the library search - - :param library_id: - :param file: The File object (not file name) to be uploaded. - To upload a file and specify a custom file name you should format your request as such: - ```bash - file=@path/to/your/file.jsonl;filename=custom_name.jsonl - ``` - Otherwise, you can just keep the original file name: - ```bash - file=@path/to/your/file.jsonl - ``` - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.LibrariesDocumentsUploadV1Request( - library_id=library_id, - request_body=models.LibrariesDocumentsUploadV1DocumentUpload( - file=utils.get_pydantic_model(file, models.File), - ), - ) - - req = self._build_request_async( - method="POST", - path="/v1/libraries/{library_id}/documents", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=True, - request_has_path_params=True, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - get_serialized_body=lambda: utils.serialize_request_body( - request.request_body, - False, - False, - "multipart", - models.LibrariesDocumentsUploadV1DocumentUpload, - ), - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = await self.do_request_async( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="libraries_documents_upload_v1", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, ["200", "201"], "application/json"): - return unmarshal_json_response(models.DocumentOut, http_res) - if utils.match_response(http_res, "422", "application/json"): - response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res - ) - raise models.HTTPValidationError(response_data, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) - - def get( - self, - *, - library_id: str, - document_id: str, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.DocumentOut: - r"""Retrieve the metadata of a specific document. - - Given a library and a document in this library, you can retrieve the metadata of that document. - - :param library_id: - :param document_id: - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.LibrariesDocumentsGetV1Request( - library_id=library_id, - document_id=document_id, - ) - - req = self._build_request( - method="GET", - path="/v1/libraries/{library_id}/documents/{document_id}", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=False, - request_has_path_params=True, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = self.do_request( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="libraries_documents_get_v1", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.DocumentOut, http_res) - if utils.match_response(http_res, "422", "application/json"): - response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res - ) - raise models.HTTPValidationError(response_data, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) - - async def get_async( - self, - *, - library_id: str, - document_id: str, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.DocumentOut: - r"""Retrieve the metadata of a specific document. - - Given a library and a document in this library, you can retrieve the metadata of that document. - - :param library_id: - :param document_id: - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.LibrariesDocumentsGetV1Request( - library_id=library_id, - document_id=document_id, - ) - - req = self._build_request_async( - method="GET", - path="/v1/libraries/{library_id}/documents/{document_id}", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=False, - request_has_path_params=True, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = await self.do_request_async( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="libraries_documents_get_v1", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.DocumentOut, http_res) - if utils.match_response(http_res, "422", "application/json"): - response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res - ) - raise models.HTTPValidationError(response_data, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) - - def update( - self, - *, - library_id: str, - document_id: str, - name: OptionalNullable[str] = UNSET, - attributes: OptionalNullable[ - Union[ - Dict[str, models_documentupdatein.Attributes], - Dict[str, models_documentupdatein.AttributesTypedDict], - ] - ] = UNSET, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.DocumentOut: - r"""Update the metadata of a specific document. - - Given a library and a document in that library, update the name of that document. - - :param library_id: - :param document_id: - :param name: - :param attributes: - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.LibrariesDocumentsUpdateV1Request( - library_id=library_id, - document_id=document_id, - document_update_in=models.DocumentUpdateIn( - name=name, - attributes=attributes, - ), - ) - - req = self._build_request( - method="PUT", - path="/v1/libraries/{library_id}/documents/{document_id}", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=True, - request_has_path_params=True, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - get_serialized_body=lambda: utils.serialize_request_body( - request.document_update_in, - False, - False, - "json", - models.DocumentUpdateIn, - ), - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = self.do_request( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="libraries_documents_update_v1", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.DocumentOut, http_res) - if utils.match_response(http_res, "422", "application/json"): - response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res - ) - raise models.HTTPValidationError(response_data, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) - - async def update_async( - self, - *, - library_id: str, - document_id: str, - name: OptionalNullable[str] = UNSET, - attributes: OptionalNullable[ - Union[ - Dict[str, models_documentupdatein.Attributes], - Dict[str, models_documentupdatein.AttributesTypedDict], - ] - ] = UNSET, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.DocumentOut: - r"""Update the metadata of a specific document. - - Given a library and a document in that library, update the name of that document. - - :param library_id: - :param document_id: - :param name: - :param attributes: - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.LibrariesDocumentsUpdateV1Request( - library_id=library_id, - document_id=document_id, - document_update_in=models.DocumentUpdateIn( - name=name, - attributes=attributes, - ), - ) - - req = self._build_request_async( - method="PUT", - path="/v1/libraries/{library_id}/documents/{document_id}", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=True, - request_has_path_params=True, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - get_serialized_body=lambda: utils.serialize_request_body( - request.document_update_in, - False, - False, - "json", - models.DocumentUpdateIn, - ), - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = await self.do_request_async( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="libraries_documents_update_v1", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.DocumentOut, http_res) - if utils.match_response(http_res, "422", "application/json"): - response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res - ) - raise models.HTTPValidationError(response_data, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) - - def delete( - self, - *, - library_id: str, - document_id: str, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ): - r"""Delete a document. - - Given a library and a document in that library, delete that document. The document will be deleted from the library and the search index. - - :param library_id: - :param document_id: - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.LibrariesDocumentsDeleteV1Request( - library_id=library_id, - document_id=document_id, - ) - - req = self._build_request( - method="DELETE", - path="/v1/libraries/{library_id}/documents/{document_id}", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=False, - request_has_path_params=True, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = self.do_request( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="libraries_documents_delete_v1", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "204", "*"): - return - if utils.match_response(http_res, "422", "application/json"): - response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res - ) - raise models.HTTPValidationError(response_data, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) - - async def delete_async( - self, - *, - library_id: str, - document_id: str, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ): - r"""Delete a document. - - Given a library and a document in that library, delete that document. The document will be deleted from the library and the search index. - - :param library_id: - :param document_id: - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.LibrariesDocumentsDeleteV1Request( - library_id=library_id, - document_id=document_id, - ) - - req = self._build_request_async( - method="DELETE", - path="/v1/libraries/{library_id}/documents/{document_id}", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=False, - request_has_path_params=True, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = await self.do_request_async( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="libraries_documents_delete_v1", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "204", "*"): - return - if utils.match_response(http_res, "422", "application/json"): - response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res - ) - raise models.HTTPValidationError(response_data, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) - - def text_content( - self, - *, - library_id: str, - document_id: str, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.DocumentTextContent: - r"""Retrieve the text content of a specific document. - - Given a library and a document in that library, you can retrieve the text content of that document if it exists. For documents like pdf, docx and pptx the text content results from our processing using Mistral OCR. - - :param library_id: - :param document_id: - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.LibrariesDocumentsGetTextContentV1Request( - library_id=library_id, - document_id=document_id, - ) - - req = self._build_request( - method="GET", - path="/v1/libraries/{library_id}/documents/{document_id}/text_content", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=False, - request_has_path_params=True, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = self.do_request( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="libraries_documents_get_text_content_v1", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.DocumentTextContent, http_res) - if utils.match_response(http_res, "422", "application/json"): - response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res - ) - raise models.HTTPValidationError(response_data, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) - - async def text_content_async( - self, - *, - library_id: str, - document_id: str, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.DocumentTextContent: - r"""Retrieve the text content of a specific document. - - Given a library and a document in that library, you can retrieve the text content of that document if it exists. For documents like pdf, docx and pptx the text content results from our processing using Mistral OCR. - - :param library_id: - :param document_id: - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.LibrariesDocumentsGetTextContentV1Request( - library_id=library_id, - document_id=document_id, - ) - - req = self._build_request_async( - method="GET", - path="/v1/libraries/{library_id}/documents/{document_id}/text_content", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=False, - request_has_path_params=True, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = await self.do_request_async( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="libraries_documents_get_text_content_v1", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.DocumentTextContent, http_res) - if utils.match_response(http_res, "422", "application/json"): - response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res - ) - raise models.HTTPValidationError(response_data, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) - - def status( - self, - *, - library_id: str, - document_id: str, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.ProcessingStatusOut: - r"""Retrieve the processing status of a specific document. - - Given a library and a document in that library, retrieve the processing status of that document. - - :param library_id: - :param document_id: - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.LibrariesDocumentsGetStatusV1Request( - library_id=library_id, - document_id=document_id, - ) - - req = self._build_request( - method="GET", - path="/v1/libraries/{library_id}/documents/{document_id}/status", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=False, - request_has_path_params=True, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = self.do_request( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="libraries_documents_get_status_v1", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.ProcessingStatusOut, http_res) - if utils.match_response(http_res, "422", "application/json"): - response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res - ) - raise models.HTTPValidationError(response_data, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) - - async def status_async( - self, - *, - library_id: str, - document_id: str, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.ProcessingStatusOut: - r"""Retrieve the processing status of a specific document. - - Given a library and a document in that library, retrieve the processing status of that document. - - :param library_id: - :param document_id: - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.LibrariesDocumentsGetStatusV1Request( - library_id=library_id, - document_id=document_id, - ) - - req = self._build_request_async( - method="GET", - path="/v1/libraries/{library_id}/documents/{document_id}/status", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=False, - request_has_path_params=True, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = await self.do_request_async( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="libraries_documents_get_status_v1", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.ProcessingStatusOut, http_res) - if utils.match_response(http_res, "422", "application/json"): - response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res - ) - raise models.HTTPValidationError(response_data, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) - - def get_signed_url( - self, - *, - library_id: str, - document_id: str, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> str: - r"""Retrieve the signed URL of a specific document. - - Given a library and a document in that library, retrieve the signed URL of a specific document.The url will expire after 30 minutes and can be accessed by anyone with the link. - - :param library_id: - :param document_id: - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.LibrariesDocumentsGetSignedURLV1Request( - library_id=library_id, - document_id=document_id, - ) - - req = self._build_request( - method="GET", - path="/v1/libraries/{library_id}/documents/{document_id}/signed-url", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=False, - request_has_path_params=True, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = self.do_request( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="libraries_documents_get_signed_url_v1", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(str, http_res) - if utils.match_response(http_res, "422", "application/json"): - response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res - ) - raise models.HTTPValidationError(response_data, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) - - async def get_signed_url_async( - self, - *, - library_id: str, - document_id: str, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> str: - r"""Retrieve the signed URL of a specific document. - - Given a library and a document in that library, retrieve the signed URL of a specific document.The url will expire after 30 minutes and can be accessed by anyone with the link. - - :param library_id: - :param document_id: - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.LibrariesDocumentsGetSignedURLV1Request( - library_id=library_id, - document_id=document_id, - ) - - req = self._build_request_async( - method="GET", - path="/v1/libraries/{library_id}/documents/{document_id}/signed-url", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=False, - request_has_path_params=True, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = await self.do_request_async( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="libraries_documents_get_signed_url_v1", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(str, http_res) - if utils.match_response(http_res, "422", "application/json"): - response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res - ) - raise models.HTTPValidationError(response_data, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) - - def extracted_text_signed_url( - self, - *, - library_id: str, - document_id: str, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> str: - r"""Retrieve the signed URL of text extracted from a given document. - - Given a library and a document in that library, retrieve the signed URL of text extracted. For documents that are sent to the OCR this returns the result of the OCR queries. - - :param library_id: - :param document_id: - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.LibrariesDocumentsGetExtractedTextSignedURLV1Request( - library_id=library_id, - document_id=document_id, - ) - - req = self._build_request( - method="GET", - path="/v1/libraries/{library_id}/documents/{document_id}/extracted-text-signed-url", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=False, - request_has_path_params=True, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = self.do_request( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="libraries_documents_get_extracted_text_signed_url_v1", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(str, http_res) - if utils.match_response(http_res, "422", "application/json"): - response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res - ) - raise models.HTTPValidationError(response_data, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) - - async def extracted_text_signed_url_async( - self, - *, - library_id: str, - document_id: str, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> str: - r"""Retrieve the signed URL of text extracted from a given document. - - Given a library and a document in that library, retrieve the signed URL of text extracted. For documents that are sent to the OCR this returns the result of the OCR queries. - - :param library_id: - :param document_id: - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.LibrariesDocumentsGetExtractedTextSignedURLV1Request( - library_id=library_id, - document_id=document_id, - ) - - req = self._build_request_async( - method="GET", - path="/v1/libraries/{library_id}/documents/{document_id}/extracted-text-signed-url", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=False, - request_has_path_params=True, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = await self.do_request_async( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="libraries_documents_get_extracted_text_signed_url_v1", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(str, http_res) - if utils.match_response(http_res, "422", "application/json"): - response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res - ) - raise models.HTTPValidationError(response_data, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) - - def reprocess( - self, - *, - library_id: str, - document_id: str, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ): - r"""Reprocess a document. - - Given a library and a document in that library, reprocess that document, it will be billed again. - - :param library_id: - :param document_id: - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.LibrariesDocumentsReprocessV1Request( - library_id=library_id, - document_id=document_id, - ) - - req = self._build_request( - method="POST", - path="/v1/libraries/{library_id}/documents/{document_id}/reprocess", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=False, - request_has_path_params=True, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = self.do_request( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="libraries_documents_reprocess_v1", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "204", "*"): - return - if utils.match_response(http_res, "422", "application/json"): - response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res - ) - raise models.HTTPValidationError(response_data, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) - - async def reprocess_async( - self, - *, - library_id: str, - document_id: str, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ): - r"""Reprocess a document. - - Given a library and a document in that library, reprocess that document, it will be billed again. - - :param library_id: - :param document_id: - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.LibrariesDocumentsReprocessV1Request( - library_id=library_id, - document_id=document_id, - ) - - req = self._build_request_async( - method="POST", - path="/v1/libraries/{library_id}/documents/{document_id}/reprocess", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=False, - request_has_path_params=True, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = await self.do_request_async( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="libraries_documents_reprocess_v1", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "204", "*"): - return - if utils.match_response(http_res, "422", "application/json"): - response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res - ) - raise models.HTTPValidationError(response_data, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) diff --git a/src/mistralai/embeddings.py b/src/mistralai/embeddings.py deleted file mode 100644 index 7430f804..00000000 --- a/src/mistralai/embeddings.py +++ /dev/null @@ -1,240 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from .basesdk import BaseSDK -from mistralai import models, utils -from mistralai._hooks import HookContext -from mistralai.models import ( - embeddingdtype as models_embeddingdtype, - embeddingrequest as models_embeddingrequest, - encodingformat as models_encodingformat, -) -from mistralai.types import OptionalNullable, UNSET -from mistralai.utils import get_security_from_env -from mistralai.utils.unmarshal_json_response import unmarshal_json_response -from typing import Any, Dict, Mapping, Optional, Union - - -class Embeddings(BaseSDK): - r"""Embeddings API.""" - - def create( - self, - *, - model: str, - inputs: Union[ - models_embeddingrequest.EmbeddingRequestInputs, - models_embeddingrequest.EmbeddingRequestInputsTypedDict, - ], - metadata: OptionalNullable[Dict[str, Any]] = UNSET, - output_dimension: OptionalNullable[int] = UNSET, - output_dtype: Optional[models_embeddingdtype.EmbeddingDtype] = None, - encoding_format: Optional[models_encodingformat.EncodingFormat] = None, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.EmbeddingResponse: - r"""Embeddings - - Embeddings - - :param model: The ID of the model to be used for embedding. - :param inputs: The text content to be embedded, can be a string or an array of strings for fast processing in bulk. - :param metadata: - :param output_dimension: The dimension of the output embeddings when feature available. If not provided, a default output dimension will be used. - :param output_dtype: - :param encoding_format: - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.EmbeddingRequest( - model=model, - metadata=metadata, - inputs=inputs, - output_dimension=output_dimension, - output_dtype=output_dtype, - encoding_format=encoding_format, - ) - - req = self._build_request( - method="POST", - path="/v1/embeddings", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=True, - request_has_path_params=False, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - get_serialized_body=lambda: utils.serialize_request_body( - request, False, False, "json", models.EmbeddingRequest - ), - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = self.do_request( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="embeddings_v1_embeddings_post", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.EmbeddingResponse, http_res) - if utils.match_response(http_res, "422", "application/json"): - response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res - ) - raise models.HTTPValidationError(response_data, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) - - async def create_async( - self, - *, - model: str, - inputs: Union[ - models_embeddingrequest.EmbeddingRequestInputs, - models_embeddingrequest.EmbeddingRequestInputsTypedDict, - ], - metadata: OptionalNullable[Dict[str, Any]] = UNSET, - output_dimension: OptionalNullable[int] = UNSET, - output_dtype: Optional[models_embeddingdtype.EmbeddingDtype] = None, - encoding_format: Optional[models_encodingformat.EncodingFormat] = None, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.EmbeddingResponse: - r"""Embeddings - - Embeddings - - :param model: The ID of the model to be used for embedding. - :param inputs: The text content to be embedded, can be a string or an array of strings for fast processing in bulk. - :param metadata: - :param output_dimension: The dimension of the output embeddings when feature available. If not provided, a default output dimension will be used. - :param output_dtype: - :param encoding_format: - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.EmbeddingRequest( - model=model, - metadata=metadata, - inputs=inputs, - output_dimension=output_dimension, - output_dtype=output_dtype, - encoding_format=encoding_format, - ) - - req = self._build_request_async( - method="POST", - path="/v1/embeddings", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=True, - request_has_path_params=False, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - get_serialized_body=lambda: utils.serialize_request_body( - request, False, False, "json", models.EmbeddingRequest - ), - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = await self.do_request_async( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="embeddings_v1_embeddings_post", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.EmbeddingResponse, http_res) - if utils.match_response(http_res, "422", "application/json"): - response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res - ) - raise models.HTTPValidationError(response_data, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) diff --git a/src/mistralai/files.py b/src/mistralai/files.py deleted file mode 100644 index 90ada0ff..00000000 --- a/src/mistralai/files.py +++ /dev/null @@ -1,1120 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from .basesdk import BaseSDK -import httpx -from mistralai import models, utils -from mistralai._hooks import HookContext -from mistralai.models import ( - file as models_file, - filepurpose as models_filepurpose, - sampletype as models_sampletype, - source as models_source, -) -from mistralai.types import OptionalNullable, UNSET -from mistralai.utils import get_security_from_env -from mistralai.utils.unmarshal_json_response import unmarshal_json_response -from typing import List, Mapping, Optional, Union - - -class Files(BaseSDK): - r"""Files API""" - - def upload( - self, - *, - file: Union[models_file.File, models_file.FileTypedDict], - purpose: Optional[models_filepurpose.FilePurpose] = None, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.UploadFileOut: - r"""Upload File - - Upload a file that can be used across various endpoints. - - The size of individual files can be a maximum of 512 MB. The Fine-tuning API only supports .jsonl files. - - Please contact us if you need to increase these storage limits. - - :param file: The File object (not file name) to be uploaded. - To upload a file and specify a custom file name you should format your request as such: - ```bash - file=@path/to/your/file.jsonl;filename=custom_name.jsonl - ``` - Otherwise, you can just keep the original file name: - ```bash - file=@path/to/your/file.jsonl - ``` - :param purpose: - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.FilesAPIRoutesUploadFileMultiPartBodyParams( - purpose=purpose, - file=utils.get_pydantic_model(file, models.File), - ) - - req = self._build_request( - method="POST", - path="/v1/files", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=True, - request_has_path_params=False, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - get_serialized_body=lambda: utils.serialize_request_body( - request, - False, - False, - "multipart", - models.FilesAPIRoutesUploadFileMultiPartBodyParams, - ), - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = self.do_request( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="files_api_routes_upload_file", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["4XX", "5XX"], - retry_config=retry_config, - ) - - if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.UploadFileOut, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) - - async def upload_async( - self, - *, - file: Union[models_file.File, models_file.FileTypedDict], - purpose: Optional[models_filepurpose.FilePurpose] = None, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.UploadFileOut: - r"""Upload File - - Upload a file that can be used across various endpoints. - - The size of individual files can be a maximum of 512 MB. The Fine-tuning API only supports .jsonl files. - - Please contact us if you need to increase these storage limits. - - :param file: The File object (not file name) to be uploaded. - To upload a file and specify a custom file name you should format your request as such: - ```bash - file=@path/to/your/file.jsonl;filename=custom_name.jsonl - ``` - Otherwise, you can just keep the original file name: - ```bash - file=@path/to/your/file.jsonl - ``` - :param purpose: - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.FilesAPIRoutesUploadFileMultiPartBodyParams( - purpose=purpose, - file=utils.get_pydantic_model(file, models.File), - ) - - req = self._build_request_async( - method="POST", - path="/v1/files", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=True, - request_has_path_params=False, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - get_serialized_body=lambda: utils.serialize_request_body( - request, - False, - False, - "multipart", - models.FilesAPIRoutesUploadFileMultiPartBodyParams, - ), - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = await self.do_request_async( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="files_api_routes_upload_file", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["4XX", "5XX"], - retry_config=retry_config, - ) - - if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.UploadFileOut, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) - - def list( - self, - *, - page: Optional[int] = 0, - page_size: Optional[int] = 100, - include_total: Optional[bool] = True, - sample_type: OptionalNullable[List[models_sampletype.SampleType]] = UNSET, - source: OptionalNullable[List[models_source.Source]] = UNSET, - search: OptionalNullable[str] = UNSET, - purpose: OptionalNullable[models_filepurpose.FilePurpose] = UNSET, - mimetypes: OptionalNullable[List[str]] = UNSET, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.ListFilesOut: - r"""List Files - - Returns a list of files that belong to the user's organization. - - :param page: - :param page_size: - :param include_total: - :param sample_type: - :param source: - :param search: - :param purpose: - :param mimetypes: - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.FilesAPIRoutesListFilesRequest( - page=page, - page_size=page_size, - include_total=include_total, - sample_type=sample_type, - source=source, - search=search, - purpose=purpose, - mimetypes=mimetypes, - ) - - req = self._build_request( - method="GET", - path="/v1/files", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=False, - request_has_path_params=False, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = self.do_request( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="files_api_routes_list_files", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["4XX", "5XX"], - retry_config=retry_config, - ) - - if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.ListFilesOut, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) - - async def list_async( - self, - *, - page: Optional[int] = 0, - page_size: Optional[int] = 100, - include_total: Optional[bool] = True, - sample_type: OptionalNullable[List[models_sampletype.SampleType]] = UNSET, - source: OptionalNullable[List[models_source.Source]] = UNSET, - search: OptionalNullable[str] = UNSET, - purpose: OptionalNullable[models_filepurpose.FilePurpose] = UNSET, - mimetypes: OptionalNullable[List[str]] = UNSET, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.ListFilesOut: - r"""List Files - - Returns a list of files that belong to the user's organization. - - :param page: - :param page_size: - :param include_total: - :param sample_type: - :param source: - :param search: - :param purpose: - :param mimetypes: - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.FilesAPIRoutesListFilesRequest( - page=page, - page_size=page_size, - include_total=include_total, - sample_type=sample_type, - source=source, - search=search, - purpose=purpose, - mimetypes=mimetypes, - ) - - req = self._build_request_async( - method="GET", - path="/v1/files", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=False, - request_has_path_params=False, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = await self.do_request_async( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="files_api_routes_list_files", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["4XX", "5XX"], - retry_config=retry_config, - ) - - if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.ListFilesOut, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) - - def retrieve( - self, - *, - file_id: str, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.RetrieveFileOut: - r"""Retrieve File - - Returns information about a specific file. - - :param file_id: - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.FilesAPIRoutesRetrieveFileRequest( - file_id=file_id, - ) - - req = self._build_request( - method="GET", - path="/v1/files/{file_id}", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=False, - request_has_path_params=True, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = self.do_request( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="files_api_routes_retrieve_file", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["4XX", "5XX"], - retry_config=retry_config, - ) - - if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.RetrieveFileOut, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) - - async def retrieve_async( - self, - *, - file_id: str, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.RetrieveFileOut: - r"""Retrieve File - - Returns information about a specific file. - - :param file_id: - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.FilesAPIRoutesRetrieveFileRequest( - file_id=file_id, - ) - - req = self._build_request_async( - method="GET", - path="/v1/files/{file_id}", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=False, - request_has_path_params=True, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = await self.do_request_async( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="files_api_routes_retrieve_file", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["4XX", "5XX"], - retry_config=retry_config, - ) - - if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.RetrieveFileOut, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) - - def delete( - self, - *, - file_id: str, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.DeleteFileOut: - r"""Delete File - - Delete a file. - - :param file_id: - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.FilesAPIRoutesDeleteFileRequest( - file_id=file_id, - ) - - req = self._build_request( - method="DELETE", - path="/v1/files/{file_id}", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=False, - request_has_path_params=True, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = self.do_request( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="files_api_routes_delete_file", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["4XX", "5XX"], - retry_config=retry_config, - ) - - if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.DeleteFileOut, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) - - async def delete_async( - self, - *, - file_id: str, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.DeleteFileOut: - r"""Delete File - - Delete a file. - - :param file_id: - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.FilesAPIRoutesDeleteFileRequest( - file_id=file_id, - ) - - req = self._build_request_async( - method="DELETE", - path="/v1/files/{file_id}", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=False, - request_has_path_params=True, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = await self.do_request_async( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="files_api_routes_delete_file", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["4XX", "5XX"], - retry_config=retry_config, - ) - - if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.DeleteFileOut, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) - - def download( - self, - *, - file_id: str, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> httpx.Response: - r"""Download File - - Download a file - - :param file_id: - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.FilesAPIRoutesDownloadFileRequest( - file_id=file_id, - ) - - req = self._build_request( - method="GET", - path="/v1/files/{file_id}/content", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=False, - request_has_path_params=True, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/octet-stream", - http_headers=http_headers, - security=self.sdk_configuration.security, - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = self.do_request( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="files_api_routes_download_file", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["4XX", "5XX"], - stream=True, - retry_config=retry_config, - ) - - if utils.match_response(http_res, "200", "application/octet-stream"): - return http_res - if utils.match_response(http_res, "4XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("Unexpected response received", http_res, http_res_text) - - async def download_async( - self, - *, - file_id: str, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> httpx.Response: - r"""Download File - - Download a file - - :param file_id: - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.FilesAPIRoutesDownloadFileRequest( - file_id=file_id, - ) - - req = self._build_request_async( - method="GET", - path="/v1/files/{file_id}/content", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=False, - request_has_path_params=True, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/octet-stream", - http_headers=http_headers, - security=self.sdk_configuration.security, - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = await self.do_request_async( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="files_api_routes_download_file", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["4XX", "5XX"], - stream=True, - retry_config=retry_config, - ) - - if utils.match_response(http_res, "200", "application/octet-stream"): - return http_res - if utils.match_response(http_res, "4XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("Unexpected response received", http_res, http_res_text) - - def get_signed_url( - self, - *, - file_id: str, - expiry: Optional[int] = 24, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.FileSignedURL: - r"""Get Signed Url - - :param file_id: - :param expiry: Number of hours before the url becomes invalid. Defaults to 24h - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.FilesAPIRoutesGetSignedURLRequest( - file_id=file_id, - expiry=expiry, - ) - - req = self._build_request( - method="GET", - path="/v1/files/{file_id}/url", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=False, - request_has_path_params=True, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = self.do_request( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="files_api_routes_get_signed_url", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["4XX", "5XX"], - retry_config=retry_config, - ) - - if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.FileSignedURL, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) - - async def get_signed_url_async( - self, - *, - file_id: str, - expiry: Optional[int] = 24, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.FileSignedURL: - r"""Get Signed Url - - :param file_id: - :param expiry: Number of hours before the url becomes invalid. Defaults to 24h - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.FilesAPIRoutesGetSignedURLRequest( - file_id=file_id, - expiry=expiry, - ) - - req = self._build_request_async( - method="GET", - path="/v1/files/{file_id}/url", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=False, - request_has_path_params=True, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = await self.do_request_async( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="files_api_routes_get_signed_url", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["4XX", "5XX"], - retry_config=retry_config, - ) - - if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.FileSignedURL, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) diff --git a/src/mistralai/fim.py b/src/mistralai/fim.py deleted file mode 100644 index 53109c70..00000000 --- a/src/mistralai/fim.py +++ /dev/null @@ -1,545 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from .basesdk import BaseSDK -from mistralai import models, utils -from mistralai._hooks import HookContext -from mistralai.models import ( - fimcompletionrequest as models_fimcompletionrequest, - fimcompletionstreamrequest as models_fimcompletionstreamrequest, -) -from mistralai.types import OptionalNullable, UNSET -from mistralai.utils import eventstreaming, get_security_from_env -from mistralai.utils.unmarshal_json_response import unmarshal_json_response -from typing import Any, Dict, Mapping, Optional, Union - - -class Fim(BaseSDK): - r"""Fill-in-the-middle API.""" - - def complete( - self, - *, - model: str, - prompt: str, - temperature: OptionalNullable[float] = UNSET, - top_p: Optional[float] = 1, - max_tokens: OptionalNullable[int] = UNSET, - stream: Optional[bool] = False, - stop: Optional[ - Union[ - models_fimcompletionrequest.FIMCompletionRequestStop, - models_fimcompletionrequest.FIMCompletionRequestStopTypedDict, - ] - ] = None, - random_seed: OptionalNullable[int] = UNSET, - metadata: OptionalNullable[Dict[str, Any]] = UNSET, - suffix: OptionalNullable[str] = UNSET, - min_tokens: OptionalNullable[int] = UNSET, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.FIMCompletionResponse: - r"""Fim Completion - - FIM completion. - - :param model: ID of the model with FIM to use. - :param prompt: The text/code to complete. - :param temperature: What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. - :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. - :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. - :param stream: Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. - :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array - :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. - :param metadata: - :param suffix: Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`. - :param min_tokens: The minimum number of tokens to generate in the completion. - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.FIMCompletionRequest( - model=model, - temperature=temperature, - top_p=top_p, - max_tokens=max_tokens, - stream=stream, - stop=stop, - random_seed=random_seed, - metadata=metadata, - prompt=prompt, - suffix=suffix, - min_tokens=min_tokens, - ) - - req = self._build_request( - method="POST", - path="/v1/fim/completions", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=True, - request_has_path_params=False, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - get_serialized_body=lambda: utils.serialize_request_body( - request, False, False, "json", models.FIMCompletionRequest - ), - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = self.do_request( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="fim_completion_v1_fim_completions_post", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.FIMCompletionResponse, http_res) - if utils.match_response(http_res, "422", "application/json"): - response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res - ) - raise models.HTTPValidationError(response_data, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) - - async def complete_async( - self, - *, - model: str, - prompt: str, - temperature: OptionalNullable[float] = UNSET, - top_p: Optional[float] = 1, - max_tokens: OptionalNullable[int] = UNSET, - stream: Optional[bool] = False, - stop: Optional[ - Union[ - models_fimcompletionrequest.FIMCompletionRequestStop, - models_fimcompletionrequest.FIMCompletionRequestStopTypedDict, - ] - ] = None, - random_seed: OptionalNullable[int] = UNSET, - metadata: OptionalNullable[Dict[str, Any]] = UNSET, - suffix: OptionalNullable[str] = UNSET, - min_tokens: OptionalNullable[int] = UNSET, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.FIMCompletionResponse: - r"""Fim Completion - - FIM completion. - - :param model: ID of the model with FIM to use. - :param prompt: The text/code to complete. - :param temperature: What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. - :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. - :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. - :param stream: Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. - :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array - :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. - :param metadata: - :param suffix: Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`. - :param min_tokens: The minimum number of tokens to generate in the completion. - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.FIMCompletionRequest( - model=model, - temperature=temperature, - top_p=top_p, - max_tokens=max_tokens, - stream=stream, - stop=stop, - random_seed=random_seed, - metadata=metadata, - prompt=prompt, - suffix=suffix, - min_tokens=min_tokens, - ) - - req = self._build_request_async( - method="POST", - path="/v1/fim/completions", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=True, - request_has_path_params=False, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - get_serialized_body=lambda: utils.serialize_request_body( - request, False, False, "json", models.FIMCompletionRequest - ), - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = await self.do_request_async( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="fim_completion_v1_fim_completions_post", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.FIMCompletionResponse, http_res) - if utils.match_response(http_res, "422", "application/json"): - response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res - ) - raise models.HTTPValidationError(response_data, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) - - def stream( - self, - *, - model: str, - prompt: str, - temperature: OptionalNullable[float] = UNSET, - top_p: Optional[float] = 1, - max_tokens: OptionalNullable[int] = UNSET, - stream: Optional[bool] = True, - stop: Optional[ - Union[ - models_fimcompletionstreamrequest.FIMCompletionStreamRequestStop, - models_fimcompletionstreamrequest.FIMCompletionStreamRequestStopTypedDict, - ] - ] = None, - random_seed: OptionalNullable[int] = UNSET, - metadata: OptionalNullable[Dict[str, Any]] = UNSET, - suffix: OptionalNullable[str] = UNSET, - min_tokens: OptionalNullable[int] = UNSET, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> eventstreaming.EventStream[models.CompletionEvent]: - r"""Stream fim completion - - Mistral AI provides the ability to stream responses back to a client in order to allow partial results for certain requests. Tokens will be sent as data-only server-sent events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. - - :param model: ID of the model with FIM to use. - :param prompt: The text/code to complete. - :param temperature: What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. - :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. - :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. - :param stream: - :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array - :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. - :param metadata: - :param suffix: Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`. - :param min_tokens: The minimum number of tokens to generate in the completion. - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.FIMCompletionStreamRequest( - model=model, - temperature=temperature, - top_p=top_p, - max_tokens=max_tokens, - stream=stream, - stop=stop, - random_seed=random_seed, - metadata=metadata, - prompt=prompt, - suffix=suffix, - min_tokens=min_tokens, - ) - - req = self._build_request( - method="POST", - path="/v1/fim/completions#stream", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=True, - request_has_path_params=False, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="text/event-stream", - http_headers=http_headers, - security=self.sdk_configuration.security, - get_serialized_body=lambda: utils.serialize_request_body( - request, False, False, "json", models.FIMCompletionStreamRequest - ), - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = self.do_request( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="stream_fim", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - stream=True, - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "200", "text/event-stream"): - return eventstreaming.EventStream( - http_res, - lambda raw: utils.unmarshal_json(raw, models.CompletionEvent), - sentinel="[DONE]", - client_ref=self, - ) - if utils.match_response(http_res, "422", "application/json"): - http_res_text = utils.stream_to_text(http_res) - response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res, http_res_text - ) - raise models.HTTPValidationError(response_data, http_res, http_res_text) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("Unexpected response received", http_res, http_res_text) - - async def stream_async( - self, - *, - model: str, - prompt: str, - temperature: OptionalNullable[float] = UNSET, - top_p: Optional[float] = 1, - max_tokens: OptionalNullable[int] = UNSET, - stream: Optional[bool] = True, - stop: Optional[ - Union[ - models_fimcompletionstreamrequest.FIMCompletionStreamRequestStop, - models_fimcompletionstreamrequest.FIMCompletionStreamRequestStopTypedDict, - ] - ] = None, - random_seed: OptionalNullable[int] = UNSET, - metadata: OptionalNullable[Dict[str, Any]] = UNSET, - suffix: OptionalNullable[str] = UNSET, - min_tokens: OptionalNullable[int] = UNSET, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> eventstreaming.EventStreamAsync[models.CompletionEvent]: - r"""Stream fim completion - - Mistral AI provides the ability to stream responses back to a client in order to allow partial results for certain requests. Tokens will be sent as data-only server-sent events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. - - :param model: ID of the model with FIM to use. - :param prompt: The text/code to complete. - :param temperature: What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. - :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. - :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. - :param stream: - :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array - :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. - :param metadata: - :param suffix: Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`. - :param min_tokens: The minimum number of tokens to generate in the completion. - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.FIMCompletionStreamRequest( - model=model, - temperature=temperature, - top_p=top_p, - max_tokens=max_tokens, - stream=stream, - stop=stop, - random_seed=random_seed, - metadata=metadata, - prompt=prompt, - suffix=suffix, - min_tokens=min_tokens, - ) - - req = self._build_request_async( - method="POST", - path="/v1/fim/completions#stream", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=True, - request_has_path_params=False, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="text/event-stream", - http_headers=http_headers, - security=self.sdk_configuration.security, - get_serialized_body=lambda: utils.serialize_request_body( - request, False, False, "json", models.FIMCompletionStreamRequest - ), - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = await self.do_request_async( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="stream_fim", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - stream=True, - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "200", "text/event-stream"): - return eventstreaming.EventStreamAsync( - http_res, - lambda raw: utils.unmarshal_json(raw, models.CompletionEvent), - sentinel="[DONE]", - client_ref=self, - ) - if utils.match_response(http_res, "422", "application/json"): - http_res_text = await utils.stream_to_text_async(http_res) - response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res, http_res_text - ) - raise models.HTTPValidationError(response_data, http_res, http_res_text) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("Unexpected response received", http_res, http_res_text) diff --git a/src/mistralai/fine_tuning.py b/src/mistralai/fine_tuning.py deleted file mode 100644 index 8ed5788a..00000000 --- a/src/mistralai/fine_tuning.py +++ /dev/null @@ -1,20 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from .basesdk import BaseSDK -from .sdkconfiguration import SDKConfiguration -from mistralai.jobs import Jobs -from typing import Optional - - -class FineTuning(BaseSDK): - jobs: Jobs - - def __init__( - self, sdk_config: SDKConfiguration, parent_ref: Optional[object] = None - ) -> None: - BaseSDK.__init__(self, sdk_config, parent_ref=parent_ref) - self.sdk_configuration = sdk_config - self._init_sdks() - - def _init_sdks(self): - self.jobs = Jobs(self.sdk_configuration, parent_ref=self.parent_ref) diff --git a/src/mistralai/httpclient.py b/src/mistralai/httpclient.py deleted file mode 100644 index 89560b56..00000000 --- a/src/mistralai/httpclient.py +++ /dev/null @@ -1,125 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -# pyright: reportReturnType = false -import asyncio -from typing_extensions import Protocol, runtime_checkable -import httpx -from typing import Any, Optional, Union - - -@runtime_checkable -class HttpClient(Protocol): - def send( - self, - request: httpx.Request, - *, - stream: bool = False, - auth: Union[ - httpx._types.AuthTypes, httpx._client.UseClientDefault, None - ] = httpx.USE_CLIENT_DEFAULT, - follow_redirects: Union[ - bool, httpx._client.UseClientDefault - ] = httpx.USE_CLIENT_DEFAULT, - ) -> httpx.Response: - pass - - def build_request( - self, - method: str, - url: httpx._types.URLTypes, - *, - content: Optional[httpx._types.RequestContent] = None, - data: Optional[httpx._types.RequestData] = None, - files: Optional[httpx._types.RequestFiles] = None, - json: Optional[Any] = None, - params: Optional[httpx._types.QueryParamTypes] = None, - headers: Optional[httpx._types.HeaderTypes] = None, - cookies: Optional[httpx._types.CookieTypes] = None, - timeout: Union[ - httpx._types.TimeoutTypes, httpx._client.UseClientDefault - ] = httpx.USE_CLIENT_DEFAULT, - extensions: Optional[httpx._types.RequestExtensions] = None, - ) -> httpx.Request: - pass - - def close(self) -> None: - pass - - -@runtime_checkable -class AsyncHttpClient(Protocol): - async def send( - self, - request: httpx.Request, - *, - stream: bool = False, - auth: Union[ - httpx._types.AuthTypes, httpx._client.UseClientDefault, None - ] = httpx.USE_CLIENT_DEFAULT, - follow_redirects: Union[ - bool, httpx._client.UseClientDefault - ] = httpx.USE_CLIENT_DEFAULT, - ) -> httpx.Response: - pass - - def build_request( - self, - method: str, - url: httpx._types.URLTypes, - *, - content: Optional[httpx._types.RequestContent] = None, - data: Optional[httpx._types.RequestData] = None, - files: Optional[httpx._types.RequestFiles] = None, - json: Optional[Any] = None, - params: Optional[httpx._types.QueryParamTypes] = None, - headers: Optional[httpx._types.HeaderTypes] = None, - cookies: Optional[httpx._types.CookieTypes] = None, - timeout: Union[ - httpx._types.TimeoutTypes, httpx._client.UseClientDefault - ] = httpx.USE_CLIENT_DEFAULT, - extensions: Optional[httpx._types.RequestExtensions] = None, - ) -> httpx.Request: - pass - - async def aclose(self) -> None: - pass - - -class ClientOwner(Protocol): - client: Union[HttpClient, None] - async_client: Union[AsyncHttpClient, None] - - -def close_clients( - owner: ClientOwner, - sync_client: Union[HttpClient, None], - sync_client_supplied: bool, - async_client: Union[AsyncHttpClient, None], - async_client_supplied: bool, -) -> None: - """ - A finalizer function that is meant to be used with weakref.finalize to close - httpx clients used by an SDK so that underlying resources can be garbage - collected. - """ - - # Unset the client/async_client properties so there are no more references - # to them from the owning SDK instance and they can be reaped. - owner.client = None - owner.async_client = None - if sync_client is not None and not sync_client_supplied: - try: - sync_client.close() - except Exception: - pass - - if async_client is not None and not async_client_supplied: - try: - loop = asyncio.get_running_loop() - asyncio.run_coroutine_threadsafe(async_client.aclose(), loop) - except RuntimeError: - try: - asyncio.run(async_client.aclose()) - except RuntimeError: - # best effort - pass diff --git a/src/mistralai/jobs.py b/src/mistralai/jobs.py deleted file mode 100644 index df8ae4d3..00000000 --- a/src/mistralai/jobs.py +++ /dev/null @@ -1,1067 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from .basesdk import BaseSDK -from datetime import datetime -from mistralai import models, utils -from mistralai._hooks import HookContext -from mistralai.models import ( - classifiertargetin as models_classifiertargetin, - finetuneablemodeltype as models_finetuneablemodeltype, - jobin as models_jobin, - jobs_api_routes_fine_tuning_get_fine_tuning_jobsop as models_jobs_api_routes_fine_tuning_get_fine_tuning_jobsop, - trainingfile as models_trainingfile, -) -from mistralai.types import OptionalNullable, UNSET -from mistralai.utils import get_security_from_env -from mistralai.utils.unmarshal_json_response import unmarshal_json_response -from typing import List, Mapping, Optional, Union - - -class Jobs(BaseSDK): - def list( - self, - *, - page: Optional[int] = 0, - page_size: Optional[int] = 100, - model: OptionalNullable[str] = UNSET, - created_after: OptionalNullable[datetime] = UNSET, - created_before: OptionalNullable[datetime] = UNSET, - created_by_me: Optional[bool] = False, - status: OptionalNullable[ - models_jobs_api_routes_fine_tuning_get_fine_tuning_jobsop.QueryParamStatus - ] = UNSET, - wandb_project: OptionalNullable[str] = UNSET, - wandb_name: OptionalNullable[str] = UNSET, - suffix: OptionalNullable[str] = UNSET, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.JobsOut: - r"""Get Fine Tuning Jobs - - Get a list of fine-tuning jobs for your organization and user. - - :param page: The page number of the results to be returned. - :param page_size: The number of items to return per page. - :param model: The model name used for fine-tuning to filter on. When set, the other results are not displayed. - :param created_after: The date/time to filter on. When set, the results for previous creation times are not displayed. - :param created_before: - :param created_by_me: When set, only return results for jobs created by the API caller. Other results are not displayed. - :param status: The current job state to filter on. When set, the other results are not displayed. - :param wandb_project: The Weights and Biases project to filter on. When set, the other results are not displayed. - :param wandb_name: The Weight and Biases run name to filter on. When set, the other results are not displayed. - :param suffix: The model suffix to filter on. When set, the other results are not displayed. - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.JobsAPIRoutesFineTuningGetFineTuningJobsRequest( - page=page, - page_size=page_size, - model=model, - created_after=created_after, - created_before=created_before, - created_by_me=created_by_me, - status=status, - wandb_project=wandb_project, - wandb_name=wandb_name, - suffix=suffix, - ) - - req = self._build_request( - method="GET", - path="/v1/fine_tuning/jobs", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=False, - request_has_path_params=False, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = self.do_request( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="jobs_api_routes_fine_tuning_get_fine_tuning_jobs", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["4XX", "5XX"], - retry_config=retry_config, - ) - - if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.JobsOut, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) - - async def list_async( - self, - *, - page: Optional[int] = 0, - page_size: Optional[int] = 100, - model: OptionalNullable[str] = UNSET, - created_after: OptionalNullable[datetime] = UNSET, - created_before: OptionalNullable[datetime] = UNSET, - created_by_me: Optional[bool] = False, - status: OptionalNullable[ - models_jobs_api_routes_fine_tuning_get_fine_tuning_jobsop.QueryParamStatus - ] = UNSET, - wandb_project: OptionalNullable[str] = UNSET, - wandb_name: OptionalNullable[str] = UNSET, - suffix: OptionalNullable[str] = UNSET, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.JobsOut: - r"""Get Fine Tuning Jobs - - Get a list of fine-tuning jobs for your organization and user. - - :param page: The page number of the results to be returned. - :param page_size: The number of items to return per page. - :param model: The model name used for fine-tuning to filter on. When set, the other results are not displayed. - :param created_after: The date/time to filter on. When set, the results for previous creation times are not displayed. - :param created_before: - :param created_by_me: When set, only return results for jobs created by the API caller. Other results are not displayed. - :param status: The current job state to filter on. When set, the other results are not displayed. - :param wandb_project: The Weights and Biases project to filter on. When set, the other results are not displayed. - :param wandb_name: The Weight and Biases run name to filter on. When set, the other results are not displayed. - :param suffix: The model suffix to filter on. When set, the other results are not displayed. - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.JobsAPIRoutesFineTuningGetFineTuningJobsRequest( - page=page, - page_size=page_size, - model=model, - created_after=created_after, - created_before=created_before, - created_by_me=created_by_me, - status=status, - wandb_project=wandb_project, - wandb_name=wandb_name, - suffix=suffix, - ) - - req = self._build_request_async( - method="GET", - path="/v1/fine_tuning/jobs", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=False, - request_has_path_params=False, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = await self.do_request_async( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="jobs_api_routes_fine_tuning_get_fine_tuning_jobs", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["4XX", "5XX"], - retry_config=retry_config, - ) - - if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.JobsOut, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) - - def create( - self, - *, - model: str, - hyperparameters: Union[ - models_jobin.Hyperparameters, models_jobin.HyperparametersTypedDict - ], - training_files: Optional[ - Union[ - List[models_trainingfile.TrainingFile], - List[models_trainingfile.TrainingFileTypedDict], - ] - ] = None, - validation_files: OptionalNullable[List[str]] = UNSET, - suffix: OptionalNullable[str] = UNSET, - integrations: OptionalNullable[ - Union[ - List[models_jobin.JobInIntegrations], - List[models_jobin.JobInIntegrationsTypedDict], - ] - ] = UNSET, - auto_start: Optional[bool] = None, - invalid_sample_skip_percentage: Optional[float] = 0, - job_type: OptionalNullable[ - models_finetuneablemodeltype.FineTuneableModelType - ] = UNSET, - repositories: OptionalNullable[ - Union[ - List[models_jobin.JobInRepositories], - List[models_jobin.JobInRepositoriesTypedDict], - ] - ] = UNSET, - classifier_targets: OptionalNullable[ - Union[ - List[models_classifiertargetin.ClassifierTargetIn], - List[models_classifiertargetin.ClassifierTargetInTypedDict], - ] - ] = UNSET, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.JobsAPIRoutesFineTuningCreateFineTuningJobResponse: - r"""Create Fine Tuning Job - - Create a new fine-tuning job, it will be queued for processing. - - :param model: The name of the model to fine-tune. - :param hyperparameters: - :param training_files: - :param validation_files: A list containing the IDs of uploaded files that contain validation data. If you provide these files, the data is used to generate validation metrics periodically during fine-tuning. These metrics can be viewed in `checkpoints` when getting the status of a running fine-tuning job. The same data should not be present in both train and validation files. - :param suffix: A string that will be added to your fine-tuning model name. For example, a suffix of \"my-great-model\" would produce a model name like `ft:open-mistral-7b:my-great-model:xxx...` - :param integrations: A list of integrations to enable for your fine-tuning job. - :param auto_start: This field will be required in a future release. - :param invalid_sample_skip_percentage: - :param job_type: - :param repositories: - :param classifier_targets: - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.JobIn( - model=model, - training_files=utils.get_pydantic_model( - training_files, Optional[List[models.TrainingFile]] - ), - validation_files=validation_files, - suffix=suffix, - integrations=utils.get_pydantic_model( - integrations, OptionalNullable[List[models.JobInIntegrations]] - ), - auto_start=auto_start, - invalid_sample_skip_percentage=invalid_sample_skip_percentage, - job_type=job_type, - hyperparameters=utils.get_pydantic_model( - hyperparameters, models.Hyperparameters - ), - repositories=utils.get_pydantic_model( - repositories, OptionalNullable[List[models.JobInRepositories]] - ), - classifier_targets=utils.get_pydantic_model( - classifier_targets, OptionalNullable[List[models.ClassifierTargetIn]] - ), - ) - - req = self._build_request( - method="POST", - path="/v1/fine_tuning/jobs", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=True, - request_has_path_params=False, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - get_serialized_body=lambda: utils.serialize_request_body( - request, False, False, "json", models.JobIn - ), - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = self.do_request( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="jobs_api_routes_fine_tuning_create_fine_tuning_job", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["4XX", "5XX"], - retry_config=retry_config, - ) - - if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response( - models.JobsAPIRoutesFineTuningCreateFineTuningJobResponse, http_res - ) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) - - async def create_async( - self, - *, - model: str, - hyperparameters: Union[ - models_jobin.Hyperparameters, models_jobin.HyperparametersTypedDict - ], - training_files: Optional[ - Union[ - List[models_trainingfile.TrainingFile], - List[models_trainingfile.TrainingFileTypedDict], - ] - ] = None, - validation_files: OptionalNullable[List[str]] = UNSET, - suffix: OptionalNullable[str] = UNSET, - integrations: OptionalNullable[ - Union[ - List[models_jobin.JobInIntegrations], - List[models_jobin.JobInIntegrationsTypedDict], - ] - ] = UNSET, - auto_start: Optional[bool] = None, - invalid_sample_skip_percentage: Optional[float] = 0, - job_type: OptionalNullable[ - models_finetuneablemodeltype.FineTuneableModelType - ] = UNSET, - repositories: OptionalNullable[ - Union[ - List[models_jobin.JobInRepositories], - List[models_jobin.JobInRepositoriesTypedDict], - ] - ] = UNSET, - classifier_targets: OptionalNullable[ - Union[ - List[models_classifiertargetin.ClassifierTargetIn], - List[models_classifiertargetin.ClassifierTargetInTypedDict], - ] - ] = UNSET, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.JobsAPIRoutesFineTuningCreateFineTuningJobResponse: - r"""Create Fine Tuning Job - - Create a new fine-tuning job, it will be queued for processing. - - :param model: The name of the model to fine-tune. - :param hyperparameters: - :param training_files: - :param validation_files: A list containing the IDs of uploaded files that contain validation data. If you provide these files, the data is used to generate validation metrics periodically during fine-tuning. These metrics can be viewed in `checkpoints` when getting the status of a running fine-tuning job. The same data should not be present in both train and validation files. - :param suffix: A string that will be added to your fine-tuning model name. For example, a suffix of \"my-great-model\" would produce a model name like `ft:open-mistral-7b:my-great-model:xxx...` - :param integrations: A list of integrations to enable for your fine-tuning job. - :param auto_start: This field will be required in a future release. - :param invalid_sample_skip_percentage: - :param job_type: - :param repositories: - :param classifier_targets: - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.JobIn( - model=model, - training_files=utils.get_pydantic_model( - training_files, Optional[List[models.TrainingFile]] - ), - validation_files=validation_files, - suffix=suffix, - integrations=utils.get_pydantic_model( - integrations, OptionalNullable[List[models.JobInIntegrations]] - ), - auto_start=auto_start, - invalid_sample_skip_percentage=invalid_sample_skip_percentage, - job_type=job_type, - hyperparameters=utils.get_pydantic_model( - hyperparameters, models.Hyperparameters - ), - repositories=utils.get_pydantic_model( - repositories, OptionalNullable[List[models.JobInRepositories]] - ), - classifier_targets=utils.get_pydantic_model( - classifier_targets, OptionalNullable[List[models.ClassifierTargetIn]] - ), - ) - - req = self._build_request_async( - method="POST", - path="/v1/fine_tuning/jobs", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=True, - request_has_path_params=False, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - get_serialized_body=lambda: utils.serialize_request_body( - request, False, False, "json", models.JobIn - ), - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = await self.do_request_async( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="jobs_api_routes_fine_tuning_create_fine_tuning_job", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["4XX", "5XX"], - retry_config=retry_config, - ) - - if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response( - models.JobsAPIRoutesFineTuningCreateFineTuningJobResponse, http_res - ) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) - - def get( - self, - *, - job_id: str, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.JobsAPIRoutesFineTuningGetFineTuningJobResponse: - r"""Get Fine Tuning Job - - Get a fine-tuned job details by its UUID. - - :param job_id: The ID of the job to analyse. - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.JobsAPIRoutesFineTuningGetFineTuningJobRequest( - job_id=job_id, - ) - - req = self._build_request( - method="GET", - path="/v1/fine_tuning/jobs/{job_id}", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=False, - request_has_path_params=True, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = self.do_request( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="jobs_api_routes_fine_tuning_get_fine_tuning_job", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["4XX", "5XX"], - retry_config=retry_config, - ) - - if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response( - models.JobsAPIRoutesFineTuningGetFineTuningJobResponse, http_res - ) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) - - async def get_async( - self, - *, - job_id: str, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.JobsAPIRoutesFineTuningGetFineTuningJobResponse: - r"""Get Fine Tuning Job - - Get a fine-tuned job details by its UUID. - - :param job_id: The ID of the job to analyse. - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.JobsAPIRoutesFineTuningGetFineTuningJobRequest( - job_id=job_id, - ) - - req = self._build_request_async( - method="GET", - path="/v1/fine_tuning/jobs/{job_id}", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=False, - request_has_path_params=True, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = await self.do_request_async( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="jobs_api_routes_fine_tuning_get_fine_tuning_job", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["4XX", "5XX"], - retry_config=retry_config, - ) - - if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response( - models.JobsAPIRoutesFineTuningGetFineTuningJobResponse, http_res - ) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) - - def cancel( - self, - *, - job_id: str, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.JobsAPIRoutesFineTuningCancelFineTuningJobResponse: - r"""Cancel Fine Tuning Job - - Request the cancellation of a fine tuning job. - - :param job_id: The ID of the job to cancel. - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.JobsAPIRoutesFineTuningCancelFineTuningJobRequest( - job_id=job_id, - ) - - req = self._build_request( - method="POST", - path="/v1/fine_tuning/jobs/{job_id}/cancel", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=False, - request_has_path_params=True, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = self.do_request( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="jobs_api_routes_fine_tuning_cancel_fine_tuning_job", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["4XX", "5XX"], - retry_config=retry_config, - ) - - if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response( - models.JobsAPIRoutesFineTuningCancelFineTuningJobResponse, http_res - ) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) - - async def cancel_async( - self, - *, - job_id: str, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.JobsAPIRoutesFineTuningCancelFineTuningJobResponse: - r"""Cancel Fine Tuning Job - - Request the cancellation of a fine tuning job. - - :param job_id: The ID of the job to cancel. - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.JobsAPIRoutesFineTuningCancelFineTuningJobRequest( - job_id=job_id, - ) - - req = self._build_request_async( - method="POST", - path="/v1/fine_tuning/jobs/{job_id}/cancel", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=False, - request_has_path_params=True, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = await self.do_request_async( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="jobs_api_routes_fine_tuning_cancel_fine_tuning_job", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["4XX", "5XX"], - retry_config=retry_config, - ) - - if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response( - models.JobsAPIRoutesFineTuningCancelFineTuningJobResponse, http_res - ) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) - - def start( - self, - *, - job_id: str, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.JobsAPIRoutesFineTuningStartFineTuningJobResponse: - r"""Start Fine Tuning Job - - Request the start of a validated fine tuning job. - - :param job_id: - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.JobsAPIRoutesFineTuningStartFineTuningJobRequest( - job_id=job_id, - ) - - req = self._build_request( - method="POST", - path="/v1/fine_tuning/jobs/{job_id}/start", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=False, - request_has_path_params=True, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = self.do_request( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="jobs_api_routes_fine_tuning_start_fine_tuning_job", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["4XX", "5XX"], - retry_config=retry_config, - ) - - if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response( - models.JobsAPIRoutesFineTuningStartFineTuningJobResponse, http_res - ) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) - - async def start_async( - self, - *, - job_id: str, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.JobsAPIRoutesFineTuningStartFineTuningJobResponse: - r"""Start Fine Tuning Job - - Request the start of a validated fine tuning job. - - :param job_id: - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.JobsAPIRoutesFineTuningStartFineTuningJobRequest( - job_id=job_id, - ) - - req = self._build_request_async( - method="POST", - path="/v1/fine_tuning/jobs/{job_id}/start", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=False, - request_has_path_params=True, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = await self.do_request_async( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="jobs_api_routes_fine_tuning_start_fine_tuning_job", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["4XX", "5XX"], - retry_config=retry_config, - ) - - if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response( - models.JobsAPIRoutesFineTuningStartFineTuningJobResponse, http_res - ) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) diff --git a/src/mistralai/libraries.py b/src/mistralai/libraries.py deleted file mode 100644 index 32648937..00000000 --- a/src/mistralai/libraries.py +++ /dev/null @@ -1,946 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from .basesdk import BaseSDK -from .sdkconfiguration import SDKConfiguration -from mistralai import models, utils -from mistralai._hooks import HookContext -from mistralai.accesses import Accesses -from mistralai.documents import Documents -from mistralai.types import OptionalNullable, UNSET -from mistralai.utils import get_security_from_env -from mistralai.utils.unmarshal_json_response import unmarshal_json_response -from typing import Any, Mapping, Optional - - -class Libraries(BaseSDK): - r"""(beta) Libraries API to create and manage libraries - index your documents to enhance agent capabilities.""" - - documents: Documents - r"""(beta) Libraries API - manage documents in a library.""" - accesses: Accesses - r"""(beta) Libraries API - manage access to a library.""" - - def __init__( - self, sdk_config: SDKConfiguration, parent_ref: Optional[object] = None - ) -> None: - BaseSDK.__init__(self, sdk_config, parent_ref=parent_ref) - self.sdk_configuration = sdk_config - self._init_sdks() - - def _init_sdks(self): - self.documents = Documents(self.sdk_configuration, parent_ref=self.parent_ref) - self.accesses = Accesses(self.sdk_configuration, parent_ref=self.parent_ref) - - def list( - self, - *, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.ListLibraryOut: - r"""List all libraries you have access to. - - List all libraries that you have created or have been shared with you. - - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - req = self._build_request( - method="GET", - path="/v1/libraries", - base_url=base_url, - url_variables=url_variables, - request=None, - request_body_required=False, - request_has_path_params=False, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = self.do_request( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="libraries_list_v1", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["4XX", "5XX"], - retry_config=retry_config, - ) - - if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.ListLibraryOut, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) - - async def list_async( - self, - *, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.ListLibraryOut: - r"""List all libraries you have access to. - - List all libraries that you have created or have been shared with you. - - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - req = self._build_request_async( - method="GET", - path="/v1/libraries", - base_url=base_url, - url_variables=url_variables, - request=None, - request_body_required=False, - request_has_path_params=False, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = await self.do_request_async( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="libraries_list_v1", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["4XX", "5XX"], - retry_config=retry_config, - ) - - if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.ListLibraryOut, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) - - def create( - self, - *, - name: str, - description: OptionalNullable[str] = UNSET, - chunk_size: OptionalNullable[int] = UNSET, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.LibraryOut: - r"""Create a new Library. - - Create a new Library, you will be marked as the owner and only you will have the possibility to share it with others. When first created this will only be accessible by you. - - :param name: - :param description: - :param chunk_size: - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.LibraryIn( - name=name, - description=description, - chunk_size=chunk_size, - ) - - req = self._build_request( - method="POST", - path="/v1/libraries", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=True, - request_has_path_params=False, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - get_serialized_body=lambda: utils.serialize_request_body( - request, False, False, "json", models.LibraryIn - ), - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = self.do_request( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="libraries_create_v1", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "201", "application/json"): - return unmarshal_json_response(models.LibraryOut, http_res) - if utils.match_response(http_res, "422", "application/json"): - response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res - ) - raise models.HTTPValidationError(response_data, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) - - async def create_async( - self, - *, - name: str, - description: OptionalNullable[str] = UNSET, - chunk_size: OptionalNullable[int] = UNSET, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.LibraryOut: - r"""Create a new Library. - - Create a new Library, you will be marked as the owner and only you will have the possibility to share it with others. When first created this will only be accessible by you. - - :param name: - :param description: - :param chunk_size: - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.LibraryIn( - name=name, - description=description, - chunk_size=chunk_size, - ) - - req = self._build_request_async( - method="POST", - path="/v1/libraries", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=True, - request_has_path_params=False, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - get_serialized_body=lambda: utils.serialize_request_body( - request, False, False, "json", models.LibraryIn - ), - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = await self.do_request_async( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="libraries_create_v1", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "201", "application/json"): - return unmarshal_json_response(models.LibraryOut, http_res) - if utils.match_response(http_res, "422", "application/json"): - response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res - ) - raise models.HTTPValidationError(response_data, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) - - def get( - self, - *, - library_id: str, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.LibraryOut: - r"""Detailed information about a specific Library. - - Given a library id, details information about that Library. - - :param library_id: - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.LibrariesGetV1Request( - library_id=library_id, - ) - - req = self._build_request( - method="GET", - path="/v1/libraries/{library_id}", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=False, - request_has_path_params=True, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = self.do_request( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="libraries_get_v1", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.LibraryOut, http_res) - if utils.match_response(http_res, "422", "application/json"): - response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res - ) - raise models.HTTPValidationError(response_data, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) - - async def get_async( - self, - *, - library_id: str, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.LibraryOut: - r"""Detailed information about a specific Library. - - Given a library id, details information about that Library. - - :param library_id: - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.LibrariesGetV1Request( - library_id=library_id, - ) - - req = self._build_request_async( - method="GET", - path="/v1/libraries/{library_id}", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=False, - request_has_path_params=True, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = await self.do_request_async( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="libraries_get_v1", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.LibraryOut, http_res) - if utils.match_response(http_res, "422", "application/json"): - response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res - ) - raise models.HTTPValidationError(response_data, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) - - def delete( - self, - *, - library_id: str, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.LibraryOut: - r"""Delete a library and all of it's document. - - Given a library id, deletes it together with all documents that have been uploaded to that library. - - :param library_id: - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.LibrariesDeleteV1Request( - library_id=library_id, - ) - - req = self._build_request( - method="DELETE", - path="/v1/libraries/{library_id}", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=False, - request_has_path_params=True, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = self.do_request( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="libraries_delete_v1", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.LibraryOut, http_res) - if utils.match_response(http_res, "422", "application/json"): - response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res - ) - raise models.HTTPValidationError(response_data, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) - - async def delete_async( - self, - *, - library_id: str, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.LibraryOut: - r"""Delete a library and all of it's document. - - Given a library id, deletes it together with all documents that have been uploaded to that library. - - :param library_id: - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.LibrariesDeleteV1Request( - library_id=library_id, - ) - - req = self._build_request_async( - method="DELETE", - path="/v1/libraries/{library_id}", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=False, - request_has_path_params=True, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = await self.do_request_async( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="libraries_delete_v1", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.LibraryOut, http_res) - if utils.match_response(http_res, "422", "application/json"): - response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res - ) - raise models.HTTPValidationError(response_data, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) - - def update( - self, - *, - library_id: str, - name: OptionalNullable[str] = UNSET, - description: OptionalNullable[str] = UNSET, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.LibraryOut: - r"""Update a library. - - Given a library id, you can update the name and description. - - :param library_id: - :param name: - :param description: - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.LibrariesUpdateV1Request( - library_id=library_id, - library_in_update=models.LibraryInUpdate( - name=name, - description=description, - ), - ) - - req = self._build_request( - method="PUT", - path="/v1/libraries/{library_id}", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=True, - request_has_path_params=True, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - get_serialized_body=lambda: utils.serialize_request_body( - request.library_in_update, False, False, "json", models.LibraryInUpdate - ), - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = self.do_request( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="libraries_update_v1", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.LibraryOut, http_res) - if utils.match_response(http_res, "422", "application/json"): - response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res - ) - raise models.HTTPValidationError(response_data, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) - - async def update_async( - self, - *, - library_id: str, - name: OptionalNullable[str] = UNSET, - description: OptionalNullable[str] = UNSET, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.LibraryOut: - r"""Update a library. - - Given a library id, you can update the name and description. - - :param library_id: - :param name: - :param description: - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.LibrariesUpdateV1Request( - library_id=library_id, - library_in_update=models.LibraryInUpdate( - name=name, - description=description, - ), - ) - - req = self._build_request_async( - method="PUT", - path="/v1/libraries/{library_id}", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=True, - request_has_path_params=True, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - get_serialized_body=lambda: utils.serialize_request_body( - request.library_in_update, False, False, "json", models.LibraryInUpdate - ), - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = await self.do_request_async( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="libraries_update_v1", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.LibraryOut, http_res) - if utils.match_response(http_res, "422", "application/json"): - response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res - ) - raise models.HTTPValidationError(response_data, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) diff --git a/src/mistralai/mistral_agents.py b/src/mistralai/mistral_agents.py deleted file mode 100644 index 7fb0ce25..00000000 --- a/src/mistralai/mistral_agents.py +++ /dev/null @@ -1,2080 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from .basesdk import BaseSDK -from mistralai import models, utils -from mistralai._hooks import HookContext -from mistralai.models import ( - agentcreationrequest as models_agentcreationrequest, - agents_api_v1_agents_getop as models_agents_api_v1_agents_getop, - agentupdaterequest as models_agentupdaterequest, - completionargs as models_completionargs, - requestsource as models_requestsource, -) -from mistralai.types import OptionalNullable, UNSET -from mistralai.utils import get_security_from_env -from mistralai.utils.unmarshal_json_response import unmarshal_json_response -from typing import Any, Dict, List, Mapping, Optional, Union - - -class MistralAgents(BaseSDK): - r"""(beta) Agents API""" - - def create( - self, - *, - model: str, - name: str, - instructions: OptionalNullable[str] = UNSET, - tools: Optional[ - Union[ - List[models_agentcreationrequest.AgentCreationRequestTools], - List[models_agentcreationrequest.AgentCreationRequestToolsTypedDict], - ] - ] = None, - completion_args: Optional[ - Union[ - models_completionargs.CompletionArgs, - models_completionargs.CompletionArgsTypedDict, - ] - ] = None, - description: OptionalNullable[str] = UNSET, - handoffs: OptionalNullable[List[str]] = UNSET, - metadata: OptionalNullable[Dict[str, Any]] = UNSET, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.Agent: - r"""Create a agent that can be used within a conversation. - - Create a new agent giving it instructions, tools, description. The agent is then available to be used as a regular assistant in a conversation or as part of an agent pool from which it can be used. - - :param model: - :param name: - :param instructions: Instruction prompt the model will follow during the conversation. - :param tools: List of tools which are available to the model during the conversation. - :param completion_args: White-listed arguments from the completion API - :param description: - :param handoffs: - :param metadata: - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.AgentCreationRequest( - instructions=instructions, - tools=utils.get_pydantic_model( - tools, Optional[List[models.AgentCreationRequestTools]] - ), - completion_args=utils.get_pydantic_model( - completion_args, Optional[models.CompletionArgs] - ), - model=model, - name=name, - description=description, - handoffs=handoffs, - metadata=metadata, - ) - - req = self._build_request( - method="POST", - path="/v1/agents", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=True, - request_has_path_params=False, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - get_serialized_body=lambda: utils.serialize_request_body( - request, False, False, "json", models.AgentCreationRequest - ), - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = self.do_request( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="agents_api_v1_agents_create", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.Agent, http_res) - if utils.match_response(http_res, "422", "application/json"): - response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res - ) - raise models.HTTPValidationError(response_data, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) - - async def create_async( - self, - *, - model: str, - name: str, - instructions: OptionalNullable[str] = UNSET, - tools: Optional[ - Union[ - List[models_agentcreationrequest.AgentCreationRequestTools], - List[models_agentcreationrequest.AgentCreationRequestToolsTypedDict], - ] - ] = None, - completion_args: Optional[ - Union[ - models_completionargs.CompletionArgs, - models_completionargs.CompletionArgsTypedDict, - ] - ] = None, - description: OptionalNullable[str] = UNSET, - handoffs: OptionalNullable[List[str]] = UNSET, - metadata: OptionalNullable[Dict[str, Any]] = UNSET, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.Agent: - r"""Create a agent that can be used within a conversation. - - Create a new agent giving it instructions, tools, description. The agent is then available to be used as a regular assistant in a conversation or as part of an agent pool from which it can be used. - - :param model: - :param name: - :param instructions: Instruction prompt the model will follow during the conversation. - :param tools: List of tools which are available to the model during the conversation. - :param completion_args: White-listed arguments from the completion API - :param description: - :param handoffs: - :param metadata: - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.AgentCreationRequest( - instructions=instructions, - tools=utils.get_pydantic_model( - tools, Optional[List[models.AgentCreationRequestTools]] - ), - completion_args=utils.get_pydantic_model( - completion_args, Optional[models.CompletionArgs] - ), - model=model, - name=name, - description=description, - handoffs=handoffs, - metadata=metadata, - ) - - req = self._build_request_async( - method="POST", - path="/v1/agents", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=True, - request_has_path_params=False, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - get_serialized_body=lambda: utils.serialize_request_body( - request, False, False, "json", models.AgentCreationRequest - ), - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = await self.do_request_async( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="agents_api_v1_agents_create", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.Agent, http_res) - if utils.match_response(http_res, "422", "application/json"): - response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res - ) - raise models.HTTPValidationError(response_data, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) - - def list( - self, - *, - page: Optional[int] = 0, - page_size: Optional[int] = 20, - deployment_chat: OptionalNullable[bool] = UNSET, - sources: OptionalNullable[List[models_requestsource.RequestSource]] = UNSET, - name: OptionalNullable[str] = UNSET, - id: OptionalNullable[str] = UNSET, - metadata: OptionalNullable[Dict[str, Any]] = UNSET, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> List[models.Agent]: - r"""List agent entities. - - Retrieve a list of agent entities sorted by creation time. - - :param page: Page number (0-indexed) - :param page_size: Number of agents per page - :param deployment_chat: - :param sources: - :param name: - :param id: - :param metadata: - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.AgentsAPIV1AgentsListRequest( - page=page, - page_size=page_size, - deployment_chat=deployment_chat, - sources=sources, - name=name, - id=id, - metadata=metadata, - ) - - req = self._build_request( - method="GET", - path="/v1/agents", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=False, - request_has_path_params=False, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = self.do_request( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="agents_api_v1_agents_list", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(List[models.Agent], http_res) - if utils.match_response(http_res, "422", "application/json"): - response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res - ) - raise models.HTTPValidationError(response_data, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) - - async def list_async( - self, - *, - page: Optional[int] = 0, - page_size: Optional[int] = 20, - deployment_chat: OptionalNullable[bool] = UNSET, - sources: OptionalNullable[List[models_requestsource.RequestSource]] = UNSET, - name: OptionalNullable[str] = UNSET, - id: OptionalNullable[str] = UNSET, - metadata: OptionalNullable[Dict[str, Any]] = UNSET, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> List[models.Agent]: - r"""List agent entities. - - Retrieve a list of agent entities sorted by creation time. - - :param page: Page number (0-indexed) - :param page_size: Number of agents per page - :param deployment_chat: - :param sources: - :param name: - :param id: - :param metadata: - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.AgentsAPIV1AgentsListRequest( - page=page, - page_size=page_size, - deployment_chat=deployment_chat, - sources=sources, - name=name, - id=id, - metadata=metadata, - ) - - req = self._build_request_async( - method="GET", - path="/v1/agents", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=False, - request_has_path_params=False, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = await self.do_request_async( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="agents_api_v1_agents_list", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(List[models.Agent], http_res) - if utils.match_response(http_res, "422", "application/json"): - response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res - ) - raise models.HTTPValidationError(response_data, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) - - def get( - self, - *, - agent_id: str, - agent_version: OptionalNullable[ - Union[ - models_agents_api_v1_agents_getop.QueryParamAgentVersion, - models_agents_api_v1_agents_getop.QueryParamAgentVersionTypedDict, - ] - ] = UNSET, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.Agent: - r"""Retrieve an agent entity. - - Given an agent, retrieve an agent entity with its attributes. The agent_version parameter can be an integer version number or a string alias. - - :param agent_id: - :param agent_version: - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.AgentsAPIV1AgentsGetRequest( - agent_id=agent_id, - agent_version=agent_version, - ) - - req = self._build_request( - method="GET", - path="/v1/agents/{agent_id}", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=False, - request_has_path_params=True, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = self.do_request( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="agents_api_v1_agents_get", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.Agent, http_res) - if utils.match_response(http_res, "422", "application/json"): - response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res - ) - raise models.HTTPValidationError(response_data, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) - - async def get_async( - self, - *, - agent_id: str, - agent_version: OptionalNullable[ - Union[ - models_agents_api_v1_agents_getop.QueryParamAgentVersion, - models_agents_api_v1_agents_getop.QueryParamAgentVersionTypedDict, - ] - ] = UNSET, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.Agent: - r"""Retrieve an agent entity. - - Given an agent, retrieve an agent entity with its attributes. The agent_version parameter can be an integer version number or a string alias. - - :param agent_id: - :param agent_version: - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.AgentsAPIV1AgentsGetRequest( - agent_id=agent_id, - agent_version=agent_version, - ) - - req = self._build_request_async( - method="GET", - path="/v1/agents/{agent_id}", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=False, - request_has_path_params=True, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = await self.do_request_async( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="agents_api_v1_agents_get", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.Agent, http_res) - if utils.match_response(http_res, "422", "application/json"): - response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res - ) - raise models.HTTPValidationError(response_data, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) - - def update( - self, - *, - agent_id: str, - instructions: OptionalNullable[str] = UNSET, - tools: Optional[ - Union[ - List[models_agentupdaterequest.AgentUpdateRequestTools], - List[models_agentupdaterequest.AgentUpdateRequestToolsTypedDict], - ] - ] = None, - completion_args: Optional[ - Union[ - models_completionargs.CompletionArgs, - models_completionargs.CompletionArgsTypedDict, - ] - ] = None, - model: OptionalNullable[str] = UNSET, - name: OptionalNullable[str] = UNSET, - description: OptionalNullable[str] = UNSET, - handoffs: OptionalNullable[List[str]] = UNSET, - deployment_chat: OptionalNullable[bool] = UNSET, - metadata: OptionalNullable[Dict[str, Any]] = UNSET, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.Agent: - r"""Update an agent entity. - - Update an agent attributes and create a new version. - - :param agent_id: - :param instructions: Instruction prompt the model will follow during the conversation. - :param tools: List of tools which are available to the model during the conversation. - :param completion_args: White-listed arguments from the completion API - :param model: - :param name: - :param description: - :param handoffs: - :param deployment_chat: - :param metadata: - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.AgentsAPIV1AgentsUpdateRequest( - agent_id=agent_id, - agent_update_request=models.AgentUpdateRequest( - instructions=instructions, - tools=utils.get_pydantic_model( - tools, Optional[List[models.AgentUpdateRequestTools]] - ), - completion_args=utils.get_pydantic_model( - completion_args, Optional[models.CompletionArgs] - ), - model=model, - name=name, - description=description, - handoffs=handoffs, - deployment_chat=deployment_chat, - metadata=metadata, - ), - ) - - req = self._build_request( - method="PATCH", - path="/v1/agents/{agent_id}", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=True, - request_has_path_params=True, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - get_serialized_body=lambda: utils.serialize_request_body( - request.agent_update_request, - False, - False, - "json", - models.AgentUpdateRequest, - ), - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = self.do_request( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="agents_api_v1_agents_update", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.Agent, http_res) - if utils.match_response(http_res, "422", "application/json"): - response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res - ) - raise models.HTTPValidationError(response_data, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) - - async def update_async( - self, - *, - agent_id: str, - instructions: OptionalNullable[str] = UNSET, - tools: Optional[ - Union[ - List[models_agentupdaterequest.AgentUpdateRequestTools], - List[models_agentupdaterequest.AgentUpdateRequestToolsTypedDict], - ] - ] = None, - completion_args: Optional[ - Union[ - models_completionargs.CompletionArgs, - models_completionargs.CompletionArgsTypedDict, - ] - ] = None, - model: OptionalNullable[str] = UNSET, - name: OptionalNullable[str] = UNSET, - description: OptionalNullable[str] = UNSET, - handoffs: OptionalNullable[List[str]] = UNSET, - deployment_chat: OptionalNullable[bool] = UNSET, - metadata: OptionalNullable[Dict[str, Any]] = UNSET, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.Agent: - r"""Update an agent entity. - - Update an agent attributes and create a new version. - - :param agent_id: - :param instructions: Instruction prompt the model will follow during the conversation. - :param tools: List of tools which are available to the model during the conversation. - :param completion_args: White-listed arguments from the completion API - :param model: - :param name: - :param description: - :param handoffs: - :param deployment_chat: - :param metadata: - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.AgentsAPIV1AgentsUpdateRequest( - agent_id=agent_id, - agent_update_request=models.AgentUpdateRequest( - instructions=instructions, - tools=utils.get_pydantic_model( - tools, Optional[List[models.AgentUpdateRequestTools]] - ), - completion_args=utils.get_pydantic_model( - completion_args, Optional[models.CompletionArgs] - ), - model=model, - name=name, - description=description, - handoffs=handoffs, - deployment_chat=deployment_chat, - metadata=metadata, - ), - ) - - req = self._build_request_async( - method="PATCH", - path="/v1/agents/{agent_id}", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=True, - request_has_path_params=True, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - get_serialized_body=lambda: utils.serialize_request_body( - request.agent_update_request, - False, - False, - "json", - models.AgentUpdateRequest, - ), - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = await self.do_request_async( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="agents_api_v1_agents_update", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.Agent, http_res) - if utils.match_response(http_res, "422", "application/json"): - response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res - ) - raise models.HTTPValidationError(response_data, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) - - def delete( - self, - *, - agent_id: str, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ): - r"""Delete an agent entity. - - :param agent_id: - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.AgentsAPIV1AgentsDeleteRequest( - agent_id=agent_id, - ) - - req = self._build_request( - method="DELETE", - path="/v1/agents/{agent_id}", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=False, - request_has_path_params=True, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = self.do_request( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="agents_api_v1_agents_delete", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "204", "*"): - return - if utils.match_response(http_res, "422", "application/json"): - response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res - ) - raise models.HTTPValidationError(response_data, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) - - async def delete_async( - self, - *, - agent_id: str, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ): - r"""Delete an agent entity. - - :param agent_id: - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.AgentsAPIV1AgentsDeleteRequest( - agent_id=agent_id, - ) - - req = self._build_request_async( - method="DELETE", - path="/v1/agents/{agent_id}", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=False, - request_has_path_params=True, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = await self.do_request_async( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="agents_api_v1_agents_delete", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "204", "*"): - return - if utils.match_response(http_res, "422", "application/json"): - response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res - ) - raise models.HTTPValidationError(response_data, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) - - def update_version( - self, - *, - agent_id: str, - version: int, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.Agent: - r"""Update an agent version. - - Switch the version of an agent. - - :param agent_id: - :param version: - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.AgentsAPIV1AgentsUpdateVersionRequest( - agent_id=agent_id, - version=version, - ) - - req = self._build_request( - method="PATCH", - path="/v1/agents/{agent_id}/version", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=False, - request_has_path_params=True, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = self.do_request( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="agents_api_v1_agents_update_version", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.Agent, http_res) - if utils.match_response(http_res, "422", "application/json"): - response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res - ) - raise models.HTTPValidationError(response_data, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) - - async def update_version_async( - self, - *, - agent_id: str, - version: int, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.Agent: - r"""Update an agent version. - - Switch the version of an agent. - - :param agent_id: - :param version: - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.AgentsAPIV1AgentsUpdateVersionRequest( - agent_id=agent_id, - version=version, - ) - - req = self._build_request_async( - method="PATCH", - path="/v1/agents/{agent_id}/version", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=False, - request_has_path_params=True, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = await self.do_request_async( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="agents_api_v1_agents_update_version", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.Agent, http_res) - if utils.match_response(http_res, "422", "application/json"): - response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res - ) - raise models.HTTPValidationError(response_data, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) - - def list_versions( - self, - *, - agent_id: str, - page: Optional[int] = 0, - page_size: Optional[int] = 20, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> List[models.Agent]: - r"""List all versions of an agent. - - Retrieve all versions for a specific agent with full agent context. Supports pagination. - - :param agent_id: - :param page: Page number (0-indexed) - :param page_size: Number of versions per page - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.AgentsAPIV1AgentsListVersionsRequest( - agent_id=agent_id, - page=page, - page_size=page_size, - ) - - req = self._build_request( - method="GET", - path="/v1/agents/{agent_id}/versions", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=False, - request_has_path_params=True, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = self.do_request( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="agents_api_v1_agents_list_versions", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(List[models.Agent], http_res) - if utils.match_response(http_res, "422", "application/json"): - response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res - ) - raise models.HTTPValidationError(response_data, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) - - async def list_versions_async( - self, - *, - agent_id: str, - page: Optional[int] = 0, - page_size: Optional[int] = 20, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> List[models.Agent]: - r"""List all versions of an agent. - - Retrieve all versions for a specific agent with full agent context. Supports pagination. - - :param agent_id: - :param page: Page number (0-indexed) - :param page_size: Number of versions per page - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.AgentsAPIV1AgentsListVersionsRequest( - agent_id=agent_id, - page=page, - page_size=page_size, - ) - - req = self._build_request_async( - method="GET", - path="/v1/agents/{agent_id}/versions", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=False, - request_has_path_params=True, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = await self.do_request_async( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="agents_api_v1_agents_list_versions", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(List[models.Agent], http_res) - if utils.match_response(http_res, "422", "application/json"): - response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res - ) - raise models.HTTPValidationError(response_data, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) - - def get_version( - self, - *, - agent_id: str, - version: str, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.Agent: - r"""Retrieve a specific version of an agent. - - Get a specific agent version by version number. - - :param agent_id: - :param version: - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.AgentsAPIV1AgentsGetVersionRequest( - agent_id=agent_id, - version=version, - ) - - req = self._build_request( - method="GET", - path="/v1/agents/{agent_id}/versions/{version}", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=False, - request_has_path_params=True, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = self.do_request( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="agents_api_v1_agents_get_version", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.Agent, http_res) - if utils.match_response(http_res, "422", "application/json"): - response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res - ) - raise models.HTTPValidationError(response_data, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) - - async def get_version_async( - self, - *, - agent_id: str, - version: str, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.Agent: - r"""Retrieve a specific version of an agent. - - Get a specific agent version by version number. - - :param agent_id: - :param version: - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.AgentsAPIV1AgentsGetVersionRequest( - agent_id=agent_id, - version=version, - ) - - req = self._build_request_async( - method="GET", - path="/v1/agents/{agent_id}/versions/{version}", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=False, - request_has_path_params=True, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = await self.do_request_async( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="agents_api_v1_agents_get_version", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.Agent, http_res) - if utils.match_response(http_res, "422", "application/json"): - response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res - ) - raise models.HTTPValidationError(response_data, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) - - def create_version_alias( - self, - *, - agent_id: str, - alias: str, - version: int, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.AgentAliasResponse: - r"""Create or update an agent version alias. - - Create a new alias or update an existing alias to point to a specific version. Aliases are unique per agent and can be reassigned to different versions. - - :param agent_id: - :param alias: - :param version: - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.AgentsAPIV1AgentsCreateOrUpdateAliasRequest( - agent_id=agent_id, - alias=alias, - version=version, - ) - - req = self._build_request( - method="PUT", - path="/v1/agents/{agent_id}/aliases", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=False, - request_has_path_params=True, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = self.do_request( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="agents_api_v1_agents_create_or_update_alias", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.AgentAliasResponse, http_res) - if utils.match_response(http_res, "422", "application/json"): - response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res - ) - raise models.HTTPValidationError(response_data, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) - - async def create_version_alias_async( - self, - *, - agent_id: str, - alias: str, - version: int, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.AgentAliasResponse: - r"""Create or update an agent version alias. - - Create a new alias or update an existing alias to point to a specific version. Aliases are unique per agent and can be reassigned to different versions. - - :param agent_id: - :param alias: - :param version: - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.AgentsAPIV1AgentsCreateOrUpdateAliasRequest( - agent_id=agent_id, - alias=alias, - version=version, - ) - - req = self._build_request_async( - method="PUT", - path="/v1/agents/{agent_id}/aliases", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=False, - request_has_path_params=True, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = await self.do_request_async( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="agents_api_v1_agents_create_or_update_alias", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.AgentAliasResponse, http_res) - if utils.match_response(http_res, "422", "application/json"): - response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res - ) - raise models.HTTPValidationError(response_data, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) - - def list_version_aliases( - self, - *, - agent_id: str, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> List[models.AgentAliasResponse]: - r"""List all aliases for an agent. - - Retrieve all version aliases for a specific agent. - - :param agent_id: - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.AgentsAPIV1AgentsListVersionAliasesRequest( - agent_id=agent_id, - ) - - req = self._build_request( - method="GET", - path="/v1/agents/{agent_id}/aliases", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=False, - request_has_path_params=True, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = self.do_request( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="agents_api_v1_agents_list_version_aliases", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(List[models.AgentAliasResponse], http_res) - if utils.match_response(http_res, "422", "application/json"): - response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res - ) - raise models.HTTPValidationError(response_data, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) - - async def list_version_aliases_async( - self, - *, - agent_id: str, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> List[models.AgentAliasResponse]: - r"""List all aliases for an agent. - - Retrieve all version aliases for a specific agent. - - :param agent_id: - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.AgentsAPIV1AgentsListVersionAliasesRequest( - agent_id=agent_id, - ) - - req = self._build_request_async( - method="GET", - path="/v1/agents/{agent_id}/aliases", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=False, - request_has_path_params=True, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = await self.do_request_async( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="agents_api_v1_agents_list_version_aliases", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(List[models.AgentAliasResponse], http_res) - if utils.match_response(http_res, "422", "application/json"): - response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res - ) - raise models.HTTPValidationError(response_data, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) diff --git a/src/mistralai/mistral_jobs.py b/src/mistralai/mistral_jobs.py deleted file mode 100644 index d1aeec8a..00000000 --- a/src/mistralai/mistral_jobs.py +++ /dev/null @@ -1,799 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from .basesdk import BaseSDK -from datetime import datetime -from mistralai import models, utils -from mistralai._hooks import HookContext -from mistralai.models import ( - apiendpoint as models_apiendpoint, - batchjobstatus as models_batchjobstatus, - batchrequest as models_batchrequest, -) -from mistralai.types import OptionalNullable, UNSET -from mistralai.utils import get_security_from_env -from mistralai.utils.unmarshal_json_response import unmarshal_json_response -from typing import Any, Dict, List, Mapping, Optional, Union - - -class MistralJobs(BaseSDK): - def list( - self, - *, - page: Optional[int] = 0, - page_size: Optional[int] = 100, - model: OptionalNullable[str] = UNSET, - agent_id: OptionalNullable[str] = UNSET, - metadata: OptionalNullable[Dict[str, Any]] = UNSET, - created_after: OptionalNullable[datetime] = UNSET, - created_by_me: Optional[bool] = False, - status: OptionalNullable[List[models_batchjobstatus.BatchJobStatus]] = UNSET, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.BatchJobsOut: - r"""Get Batch Jobs - - Get a list of batch jobs for your organization and user. - - :param page: - :param page_size: - :param model: - :param agent_id: - :param metadata: - :param created_after: - :param created_by_me: - :param status: - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.JobsAPIRoutesBatchGetBatchJobsRequest( - page=page, - page_size=page_size, - model=model, - agent_id=agent_id, - metadata=metadata, - created_after=created_after, - created_by_me=created_by_me, - status=status, - ) - - req = self._build_request( - method="GET", - path="/v1/batch/jobs", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=False, - request_has_path_params=False, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = self.do_request( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="jobs_api_routes_batch_get_batch_jobs", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["4XX", "5XX"], - retry_config=retry_config, - ) - - if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.BatchJobsOut, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) - - async def list_async( - self, - *, - page: Optional[int] = 0, - page_size: Optional[int] = 100, - model: OptionalNullable[str] = UNSET, - agent_id: OptionalNullable[str] = UNSET, - metadata: OptionalNullable[Dict[str, Any]] = UNSET, - created_after: OptionalNullable[datetime] = UNSET, - created_by_me: Optional[bool] = False, - status: OptionalNullable[List[models_batchjobstatus.BatchJobStatus]] = UNSET, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.BatchJobsOut: - r"""Get Batch Jobs - - Get a list of batch jobs for your organization and user. - - :param page: - :param page_size: - :param model: - :param agent_id: - :param metadata: - :param created_after: - :param created_by_me: - :param status: - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.JobsAPIRoutesBatchGetBatchJobsRequest( - page=page, - page_size=page_size, - model=model, - agent_id=agent_id, - metadata=metadata, - created_after=created_after, - created_by_me=created_by_me, - status=status, - ) - - req = self._build_request_async( - method="GET", - path="/v1/batch/jobs", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=False, - request_has_path_params=False, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = await self.do_request_async( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="jobs_api_routes_batch_get_batch_jobs", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["4XX", "5XX"], - retry_config=retry_config, - ) - - if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.BatchJobsOut, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) - - def create( - self, - *, - endpoint: models_apiendpoint.APIEndpoint, - input_files: OptionalNullable[List[str]] = UNSET, - requests: OptionalNullable[ - Union[ - List[models_batchrequest.BatchRequest], - List[models_batchrequest.BatchRequestTypedDict], - ] - ] = UNSET, - model: OptionalNullable[str] = UNSET, - agent_id: OptionalNullable[str] = UNSET, - metadata: OptionalNullable[Dict[str, str]] = UNSET, - timeout_hours: Optional[int] = 24, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.BatchJobOut: - r"""Create Batch Job - - Create a new batch job, it will be queued for processing. - - :param endpoint: - :param input_files: The list of input files to be used for batch inference, these files should be `jsonl` files, containing the input data corresponding to the bory request for the batch inference in a \"body\" field. An example of such file is the following: ```json {\"custom_id\": \"0\", \"body\": {\"max_tokens\": 100, \"messages\": [{\"role\": \"user\", \"content\": \"What is the best French cheese?\"}]}} {\"custom_id\": \"1\", \"body\": {\"max_tokens\": 100, \"messages\": [{\"role\": \"user\", \"content\": \"What is the best French wine?\"}]}} ``` - :param requests: - :param model: The model to be used for batch inference. - :param agent_id: In case you want to use a specific agent from the **deprecated** agents api for batch inference, you can specify the agent ID here. - :param metadata: The metadata of your choice to be associated with the batch inference job. - :param timeout_hours: The timeout in hours for the batch inference job. - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.BatchJobIn( - input_files=input_files, - requests=utils.get_pydantic_model( - requests, OptionalNullable[List[models.BatchRequest]] - ), - endpoint=endpoint, - model=model, - agent_id=agent_id, - metadata=metadata, - timeout_hours=timeout_hours, - ) - - req = self._build_request( - method="POST", - path="/v1/batch/jobs", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=True, - request_has_path_params=False, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - get_serialized_body=lambda: utils.serialize_request_body( - request, False, False, "json", models.BatchJobIn - ), - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = self.do_request( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="jobs_api_routes_batch_create_batch_job", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["4XX", "5XX"], - retry_config=retry_config, - ) - - if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.BatchJobOut, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) - - async def create_async( - self, - *, - endpoint: models_apiendpoint.APIEndpoint, - input_files: OptionalNullable[List[str]] = UNSET, - requests: OptionalNullable[ - Union[ - List[models_batchrequest.BatchRequest], - List[models_batchrequest.BatchRequestTypedDict], - ] - ] = UNSET, - model: OptionalNullable[str] = UNSET, - agent_id: OptionalNullable[str] = UNSET, - metadata: OptionalNullable[Dict[str, str]] = UNSET, - timeout_hours: Optional[int] = 24, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.BatchJobOut: - r"""Create Batch Job - - Create a new batch job, it will be queued for processing. - - :param endpoint: - :param input_files: The list of input files to be used for batch inference, these files should be `jsonl` files, containing the input data corresponding to the bory request for the batch inference in a \"body\" field. An example of such file is the following: ```json {\"custom_id\": \"0\", \"body\": {\"max_tokens\": 100, \"messages\": [{\"role\": \"user\", \"content\": \"What is the best French cheese?\"}]}} {\"custom_id\": \"1\", \"body\": {\"max_tokens\": 100, \"messages\": [{\"role\": \"user\", \"content\": \"What is the best French wine?\"}]}} ``` - :param requests: - :param model: The model to be used for batch inference. - :param agent_id: In case you want to use a specific agent from the **deprecated** agents api for batch inference, you can specify the agent ID here. - :param metadata: The metadata of your choice to be associated with the batch inference job. - :param timeout_hours: The timeout in hours for the batch inference job. - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.BatchJobIn( - input_files=input_files, - requests=utils.get_pydantic_model( - requests, OptionalNullable[List[models.BatchRequest]] - ), - endpoint=endpoint, - model=model, - agent_id=agent_id, - metadata=metadata, - timeout_hours=timeout_hours, - ) - - req = self._build_request_async( - method="POST", - path="/v1/batch/jobs", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=True, - request_has_path_params=False, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - get_serialized_body=lambda: utils.serialize_request_body( - request, False, False, "json", models.BatchJobIn - ), - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = await self.do_request_async( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="jobs_api_routes_batch_create_batch_job", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["4XX", "5XX"], - retry_config=retry_config, - ) - - if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.BatchJobOut, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) - - def get( - self, - *, - job_id: str, - inline: OptionalNullable[bool] = UNSET, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.BatchJobOut: - r"""Get Batch Job - - Get a batch job details by its UUID. - - Args: - inline: If True, return results inline in the response. - - :param job_id: - :param inline: - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.JobsAPIRoutesBatchGetBatchJobRequest( - job_id=job_id, - inline=inline, - ) - - req = self._build_request( - method="GET", - path="/v1/batch/jobs/{job_id}", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=False, - request_has_path_params=True, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = self.do_request( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="jobs_api_routes_batch_get_batch_job", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["4XX", "5XX"], - retry_config=retry_config, - ) - - if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.BatchJobOut, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) - - async def get_async( - self, - *, - job_id: str, - inline: OptionalNullable[bool] = UNSET, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.BatchJobOut: - r"""Get Batch Job - - Get a batch job details by its UUID. - - Args: - inline: If True, return results inline in the response. - - :param job_id: - :param inline: - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.JobsAPIRoutesBatchGetBatchJobRequest( - job_id=job_id, - inline=inline, - ) - - req = self._build_request_async( - method="GET", - path="/v1/batch/jobs/{job_id}", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=False, - request_has_path_params=True, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = await self.do_request_async( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="jobs_api_routes_batch_get_batch_job", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["4XX", "5XX"], - retry_config=retry_config, - ) - - if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.BatchJobOut, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) - - def cancel( - self, - *, - job_id: str, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.BatchJobOut: - r"""Cancel Batch Job - - Request the cancellation of a batch job. - - :param job_id: - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.JobsAPIRoutesBatchCancelBatchJobRequest( - job_id=job_id, - ) - - req = self._build_request( - method="POST", - path="/v1/batch/jobs/{job_id}/cancel", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=False, - request_has_path_params=True, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = self.do_request( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="jobs_api_routes_batch_cancel_batch_job", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["4XX", "5XX"], - retry_config=retry_config, - ) - - if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.BatchJobOut, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) - - async def cancel_async( - self, - *, - job_id: str, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.BatchJobOut: - r"""Cancel Batch Job - - Request the cancellation of a batch job. - - :param job_id: - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.JobsAPIRoutesBatchCancelBatchJobRequest( - job_id=job_id, - ) - - req = self._build_request_async( - method="POST", - path="/v1/batch/jobs/{job_id}/cancel", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=False, - request_has_path_params=True, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = await self.do_request_async( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="jobs_api_routes_batch_cancel_batch_job", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["4XX", "5XX"], - retry_config=retry_config, - ) - - if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.BatchJobOut, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) diff --git a/src/mistralai/models/__init__.py b/src/mistralai/models/__init__.py deleted file mode 100644 index 23e65222..00000000 --- a/src/mistralai/models/__init__.py +++ /dev/null @@ -1,2531 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from .mistralerror import MistralError -from typing import TYPE_CHECKING -from importlib import import_module -import builtins -import sys - -if TYPE_CHECKING: - from .agent import ( - Agent, - AgentObject, - AgentTools, - AgentToolsTypedDict, - AgentTypedDict, - ) - from .agentaliasresponse import AgentAliasResponse, AgentAliasResponseTypedDict - from .agentconversation import ( - AgentConversation, - AgentConversationAgentVersion, - AgentConversationAgentVersionTypedDict, - AgentConversationObject, - AgentConversationTypedDict, - ) - from .agentcreationrequest import ( - AgentCreationRequest, - AgentCreationRequestTools, - AgentCreationRequestToolsTypedDict, - AgentCreationRequestTypedDict, - ) - from .agenthandoffdoneevent import ( - AgentHandoffDoneEvent, - AgentHandoffDoneEventType, - AgentHandoffDoneEventTypedDict, - ) - from .agenthandoffentry import ( - AgentHandoffEntry, - AgentHandoffEntryObject, - AgentHandoffEntryType, - AgentHandoffEntryTypedDict, - ) - from .agenthandoffstartedevent import ( - AgentHandoffStartedEvent, - AgentHandoffStartedEventType, - AgentHandoffStartedEventTypedDict, - ) - from .agents_api_v1_agents_create_or_update_aliasop import ( - AgentsAPIV1AgentsCreateOrUpdateAliasRequest, - AgentsAPIV1AgentsCreateOrUpdateAliasRequestTypedDict, - ) - from .agents_api_v1_agents_deleteop import ( - AgentsAPIV1AgentsDeleteRequest, - AgentsAPIV1AgentsDeleteRequestTypedDict, - ) - from .agents_api_v1_agents_get_versionop import ( - AgentsAPIV1AgentsGetVersionRequest, - AgentsAPIV1AgentsGetVersionRequestTypedDict, - ) - from .agents_api_v1_agents_getop import ( - AgentsAPIV1AgentsGetRequest, - AgentsAPIV1AgentsGetRequestTypedDict, - QueryParamAgentVersion, - QueryParamAgentVersionTypedDict, - ) - from .agents_api_v1_agents_list_version_aliasesop import ( - AgentsAPIV1AgentsListVersionAliasesRequest, - AgentsAPIV1AgentsListVersionAliasesRequestTypedDict, - ) - from .agents_api_v1_agents_list_versionsop import ( - AgentsAPIV1AgentsListVersionsRequest, - AgentsAPIV1AgentsListVersionsRequestTypedDict, - ) - from .agents_api_v1_agents_listop import ( - AgentsAPIV1AgentsListRequest, - AgentsAPIV1AgentsListRequestTypedDict, - ) - from .agents_api_v1_agents_update_versionop import ( - AgentsAPIV1AgentsUpdateVersionRequest, - AgentsAPIV1AgentsUpdateVersionRequestTypedDict, - ) - from .agents_api_v1_agents_updateop import ( - AgentsAPIV1AgentsUpdateRequest, - AgentsAPIV1AgentsUpdateRequestTypedDict, - ) - from .agents_api_v1_conversations_append_streamop import ( - AgentsAPIV1ConversationsAppendStreamRequest, - AgentsAPIV1ConversationsAppendStreamRequestTypedDict, - ) - from .agents_api_v1_conversations_appendop import ( - AgentsAPIV1ConversationsAppendRequest, - AgentsAPIV1ConversationsAppendRequestTypedDict, - ) - from .agents_api_v1_conversations_deleteop import ( - AgentsAPIV1ConversationsDeleteRequest, - AgentsAPIV1ConversationsDeleteRequestTypedDict, - ) - from .agents_api_v1_conversations_getop import ( - AgentsAPIV1ConversationsGetRequest, - AgentsAPIV1ConversationsGetRequestTypedDict, - AgentsAPIV1ConversationsGetResponseV1ConversationsGet, - AgentsAPIV1ConversationsGetResponseV1ConversationsGetTypedDict, - ) - from .agents_api_v1_conversations_historyop import ( - AgentsAPIV1ConversationsHistoryRequest, - AgentsAPIV1ConversationsHistoryRequestTypedDict, - ) - from .agents_api_v1_conversations_listop import ( - AgentsAPIV1ConversationsListRequest, - AgentsAPIV1ConversationsListRequestTypedDict, - ResponseBody, - ResponseBodyTypedDict, - ) - from .agents_api_v1_conversations_messagesop import ( - AgentsAPIV1ConversationsMessagesRequest, - AgentsAPIV1ConversationsMessagesRequestTypedDict, - ) - from .agents_api_v1_conversations_restart_streamop import ( - AgentsAPIV1ConversationsRestartStreamRequest, - AgentsAPIV1ConversationsRestartStreamRequestTypedDict, - ) - from .agents_api_v1_conversations_restartop import ( - AgentsAPIV1ConversationsRestartRequest, - AgentsAPIV1ConversationsRestartRequestTypedDict, - ) - from .agentscompletionrequest import ( - AgentsCompletionRequest, - AgentsCompletionRequestMessages, - AgentsCompletionRequestMessagesTypedDict, - AgentsCompletionRequestStop, - AgentsCompletionRequestStopTypedDict, - AgentsCompletionRequestToolChoice, - AgentsCompletionRequestToolChoiceTypedDict, - AgentsCompletionRequestTypedDict, - ) - from .agentscompletionstreamrequest import ( - AgentsCompletionStreamRequest, - AgentsCompletionStreamRequestMessages, - AgentsCompletionStreamRequestMessagesTypedDict, - AgentsCompletionStreamRequestStop, - AgentsCompletionStreamRequestStopTypedDict, - AgentsCompletionStreamRequestToolChoice, - AgentsCompletionStreamRequestToolChoiceTypedDict, - AgentsCompletionStreamRequestTypedDict, - ) - from .agentupdaterequest import ( - AgentUpdateRequest, - AgentUpdateRequestTools, - AgentUpdateRequestToolsTypedDict, - AgentUpdateRequestTypedDict, - ) - from .apiendpoint import APIEndpoint - from .archiveftmodelout import ( - ArchiveFTModelOut, - ArchiveFTModelOutObject, - ArchiveFTModelOutTypedDict, - ) - from .assistantmessage import ( - AssistantMessage, - AssistantMessageContent, - AssistantMessageContentTypedDict, - AssistantMessageRole, - AssistantMessageTypedDict, - ) - from .audiochunk import AudioChunk, AudioChunkType, AudioChunkTypedDict - from .audioencoding import AudioEncoding - from .audioformat import AudioFormat, AudioFormatTypedDict - from .audiotranscriptionrequest import ( - AudioTranscriptionRequest, - AudioTranscriptionRequestTypedDict, - ) - from .audiotranscriptionrequeststream import ( - AudioTranscriptionRequestStream, - AudioTranscriptionRequestStreamTypedDict, - ) - from .basemodelcard import BaseModelCard, BaseModelCardType, BaseModelCardTypedDict - from .batcherror import BatchError, BatchErrorTypedDict - from .batchjobin import BatchJobIn, BatchJobInTypedDict - from .batchjobout import BatchJobOut, BatchJobOutObject, BatchJobOutTypedDict - from .batchjobsout import BatchJobsOut, BatchJobsOutObject, BatchJobsOutTypedDict - from .batchjobstatus import BatchJobStatus - from .batchrequest import BatchRequest, BatchRequestTypedDict - from .builtinconnectors import BuiltInConnectors - from .chatclassificationrequest import ( - ChatClassificationRequest, - ChatClassificationRequestTypedDict, - ) - from .chatcompletionchoice import ( - ChatCompletionChoice, - ChatCompletionChoiceTypedDict, - FinishReason, - ) - from .chatcompletionrequest import ( - ChatCompletionRequest, - ChatCompletionRequestToolChoice, - ChatCompletionRequestToolChoiceTypedDict, - ChatCompletionRequestTypedDict, - Messages, - MessagesTypedDict, - Stop, - StopTypedDict, - ) - from .chatcompletionresponse import ( - ChatCompletionResponse, - ChatCompletionResponseTypedDict, - ) - from .chatcompletionstreamrequest import ( - ChatCompletionStreamRequest, - ChatCompletionStreamRequestMessages, - ChatCompletionStreamRequestMessagesTypedDict, - ChatCompletionStreamRequestStop, - ChatCompletionStreamRequestStopTypedDict, - ChatCompletionStreamRequestToolChoice, - ChatCompletionStreamRequestToolChoiceTypedDict, - ChatCompletionStreamRequestTypedDict, - ) - from .chatmoderationrequest import ( - ChatModerationRequest, - ChatModerationRequestInputs, - ChatModerationRequestInputsTypedDict, - ChatModerationRequestTypedDict, - One, - OneTypedDict, - Two, - TwoTypedDict, - ) - from .checkpointout import CheckpointOut, CheckpointOutTypedDict - from .classificationrequest import ( - ClassificationRequest, - ClassificationRequestInputs, - ClassificationRequestInputsTypedDict, - ClassificationRequestTypedDict, - ) - from .classificationresponse import ( - ClassificationResponse, - ClassificationResponseTypedDict, - ) - from .classificationtargetresult import ( - ClassificationTargetResult, - ClassificationTargetResultTypedDict, - ) - from .classifierdetailedjobout import ( - ClassifierDetailedJobOut, - ClassifierDetailedJobOutIntegrations, - ClassifierDetailedJobOutIntegrationsTypedDict, - ClassifierDetailedJobOutJobType, - ClassifierDetailedJobOutObject, - ClassifierDetailedJobOutStatus, - ClassifierDetailedJobOutTypedDict, - ) - from .classifierftmodelout import ( - ClassifierFTModelOut, - ClassifierFTModelOutModelType, - ClassifierFTModelOutObject, - ClassifierFTModelOutTypedDict, - ) - from .classifierjobout import ( - ClassifierJobOut, - ClassifierJobOutIntegrations, - ClassifierJobOutIntegrationsTypedDict, - ClassifierJobOutJobType, - ClassifierJobOutObject, - ClassifierJobOutStatus, - ClassifierJobOutTypedDict, - ) - from .classifiertargetin import ClassifierTargetIn, ClassifierTargetInTypedDict - from .classifiertargetout import ClassifierTargetOut, ClassifierTargetOutTypedDict - from .classifiertrainingparameters import ( - ClassifierTrainingParameters, - ClassifierTrainingParametersTypedDict, - ) - from .classifiertrainingparametersin import ( - ClassifierTrainingParametersIn, - ClassifierTrainingParametersInTypedDict, - ) - from .codeinterpretertool import ( - CodeInterpreterTool, - CodeInterpreterToolType, - CodeInterpreterToolTypedDict, - ) - from .completionargs import CompletionArgs, CompletionArgsTypedDict - from .completionargsstop import CompletionArgsStop, CompletionArgsStopTypedDict - from .completionchunk import CompletionChunk, CompletionChunkTypedDict - from .completiondetailedjobout import ( - CompletionDetailedJobOut, - CompletionDetailedJobOutIntegrations, - CompletionDetailedJobOutIntegrationsTypedDict, - CompletionDetailedJobOutJobType, - CompletionDetailedJobOutObject, - CompletionDetailedJobOutRepositories, - CompletionDetailedJobOutRepositoriesTypedDict, - CompletionDetailedJobOutStatus, - CompletionDetailedJobOutTypedDict, - ) - from .completionevent import CompletionEvent, CompletionEventTypedDict - from .completionftmodelout import ( - CompletionFTModelOut, - CompletionFTModelOutObject, - CompletionFTModelOutTypedDict, - ModelType, - ) - from .completionjobout import ( - CompletionJobOut, - CompletionJobOutObject, - CompletionJobOutTypedDict, - Integrations, - IntegrationsTypedDict, - JobType, - Repositories, - RepositoriesTypedDict, - Status, - ) - from .completionresponsestreamchoice import ( - CompletionResponseStreamChoice, - CompletionResponseStreamChoiceFinishReason, - CompletionResponseStreamChoiceTypedDict, - ) - from .completiontrainingparameters import ( - CompletionTrainingParameters, - CompletionTrainingParametersTypedDict, - ) - from .completiontrainingparametersin import ( - CompletionTrainingParametersIn, - CompletionTrainingParametersInTypedDict, - ) - from .contentchunk import ContentChunk, ContentChunkTypedDict - from .conversationappendrequest import ( - ConversationAppendRequest, - ConversationAppendRequestHandoffExecution, - ConversationAppendRequestTypedDict, - ) - from .conversationappendstreamrequest import ( - ConversationAppendStreamRequest, - ConversationAppendStreamRequestHandoffExecution, - ConversationAppendStreamRequestTypedDict, - ) - from .conversationevents import ( - ConversationEvents, - ConversationEventsData, - ConversationEventsDataTypedDict, - ConversationEventsTypedDict, - ) - from .conversationhistory import ( - ConversationHistory, - ConversationHistoryObject, - ConversationHistoryTypedDict, - Entries, - EntriesTypedDict, - ) - from .conversationinputs import ConversationInputs, ConversationInputsTypedDict - from .conversationmessages import ( - ConversationMessages, - ConversationMessagesObject, - ConversationMessagesTypedDict, - ) - from .conversationrequest import ( - AgentVersion, - AgentVersionTypedDict, - ConversationRequest, - ConversationRequestTypedDict, - HandoffExecution, - Tools, - ToolsTypedDict, - ) - from .conversationresponse import ( - ConversationResponse, - ConversationResponseObject, - ConversationResponseTypedDict, - Outputs, - OutputsTypedDict, - ) - from .conversationrestartrequest import ( - ConversationRestartRequest, - ConversationRestartRequestAgentVersion, - ConversationRestartRequestAgentVersionTypedDict, - ConversationRestartRequestHandoffExecution, - ConversationRestartRequestTypedDict, - ) - from .conversationrestartstreamrequest import ( - ConversationRestartStreamRequest, - ConversationRestartStreamRequestAgentVersion, - ConversationRestartStreamRequestAgentVersionTypedDict, - ConversationRestartStreamRequestHandoffExecution, - ConversationRestartStreamRequestTypedDict, - ) - from .conversationstreamrequest import ( - ConversationStreamRequest, - ConversationStreamRequestAgentVersion, - ConversationStreamRequestAgentVersionTypedDict, - ConversationStreamRequestHandoffExecution, - ConversationStreamRequestTools, - ConversationStreamRequestToolsTypedDict, - ConversationStreamRequestTypedDict, - ) - from .conversationusageinfo import ( - ConversationUsageInfo, - ConversationUsageInfoTypedDict, - ) - from .delete_model_v1_models_model_id_deleteop import ( - DeleteModelV1ModelsModelIDDeleteRequest, - DeleteModelV1ModelsModelIDDeleteRequestTypedDict, - ) - from .deletefileout import DeleteFileOut, DeleteFileOutTypedDict - from .deletemodelout import DeleteModelOut, DeleteModelOutTypedDict - from .deltamessage import ( - Content, - ContentTypedDict, - DeltaMessage, - DeltaMessageTypedDict, - ) - from .documentlibrarytool import ( - DocumentLibraryTool, - DocumentLibraryToolType, - DocumentLibraryToolTypedDict, - ) - from .documentout import DocumentOut, DocumentOutTypedDict - from .documenttextcontent import DocumentTextContent, DocumentTextContentTypedDict - from .documentupdatein import ( - Attributes, - AttributesTypedDict, - DocumentUpdateIn, - DocumentUpdateInTypedDict, - ) - from .documenturlchunk import ( - DocumentURLChunk, - DocumentURLChunkType, - DocumentURLChunkTypedDict, - ) - from .embeddingdtype import EmbeddingDtype - from .embeddingrequest import ( - EmbeddingRequest, - EmbeddingRequestInputs, - EmbeddingRequestInputsTypedDict, - EmbeddingRequestTypedDict, - ) - from .embeddingresponse import EmbeddingResponse, EmbeddingResponseTypedDict - from .embeddingresponsedata import ( - EmbeddingResponseData, - EmbeddingResponseDataTypedDict, - ) - from .encodingformat import EncodingFormat - from .entitytype import EntityType - from .eventout import EventOut, EventOutTypedDict - from .file import File, FileTypedDict - from .filechunk import FileChunk, FileChunkTypedDict - from .filepurpose import FilePurpose - from .files_api_routes_delete_fileop import ( - FilesAPIRoutesDeleteFileRequest, - FilesAPIRoutesDeleteFileRequestTypedDict, - ) - from .files_api_routes_download_fileop import ( - FilesAPIRoutesDownloadFileRequest, - FilesAPIRoutesDownloadFileRequestTypedDict, - ) - from .files_api_routes_get_signed_urlop import ( - FilesAPIRoutesGetSignedURLRequest, - FilesAPIRoutesGetSignedURLRequestTypedDict, - ) - from .files_api_routes_list_filesop import ( - FilesAPIRoutesListFilesRequest, - FilesAPIRoutesListFilesRequestTypedDict, - ) - from .files_api_routes_retrieve_fileop import ( - FilesAPIRoutesRetrieveFileRequest, - FilesAPIRoutesRetrieveFileRequestTypedDict, - ) - from .files_api_routes_upload_fileop import ( - FilesAPIRoutesUploadFileMultiPartBodyParams, - FilesAPIRoutesUploadFileMultiPartBodyParamsTypedDict, - ) - from .fileschema import FileSchema, FileSchemaTypedDict - from .filesignedurl import FileSignedURL, FileSignedURLTypedDict - from .fimcompletionrequest import ( - FIMCompletionRequest, - FIMCompletionRequestStop, - FIMCompletionRequestStopTypedDict, - FIMCompletionRequestTypedDict, - ) - from .fimcompletionresponse import ( - FIMCompletionResponse, - FIMCompletionResponseTypedDict, - ) - from .fimcompletionstreamrequest import ( - FIMCompletionStreamRequest, - FIMCompletionStreamRequestStop, - FIMCompletionStreamRequestStopTypedDict, - FIMCompletionStreamRequestTypedDict, - ) - from .finetuneablemodeltype import FineTuneableModelType - from .ftclassifierlossfunction import FTClassifierLossFunction - from .ftmodelcapabilitiesout import ( - FTModelCapabilitiesOut, - FTModelCapabilitiesOutTypedDict, - ) - from .ftmodelcard import FTModelCard, FTModelCardType, FTModelCardTypedDict - from .function import Function, FunctionTypedDict - from .functioncall import ( - Arguments, - ArgumentsTypedDict, - FunctionCall, - FunctionCallTypedDict, - ) - from .functioncallentry import ( - FunctionCallEntry, - FunctionCallEntryObject, - FunctionCallEntryType, - FunctionCallEntryTypedDict, - ) - from .functioncallentryarguments import ( - FunctionCallEntryArguments, - FunctionCallEntryArgumentsTypedDict, - ) - from .functioncallevent import ( - FunctionCallEvent, - FunctionCallEventType, - FunctionCallEventTypedDict, - ) - from .functionname import FunctionName, FunctionNameTypedDict - from .functionresultentry import ( - FunctionResultEntry, - FunctionResultEntryObject, - FunctionResultEntryType, - FunctionResultEntryTypedDict, - ) - from .functiontool import FunctionTool, FunctionToolType, FunctionToolTypedDict - from .githubrepositoryin import ( - GithubRepositoryIn, - GithubRepositoryInType, - GithubRepositoryInTypedDict, - ) - from .githubrepositoryout import ( - GithubRepositoryOut, - GithubRepositoryOutType, - GithubRepositoryOutTypedDict, - ) - from .httpvalidationerror import HTTPValidationError, HTTPValidationErrorData - from .imagegenerationtool import ( - ImageGenerationTool, - ImageGenerationToolType, - ImageGenerationToolTypedDict, - ) - from .imageurl import ImageURL, ImageURLTypedDict - from .imageurlchunk import ( - ImageURLChunk, - ImageURLChunkImageURL, - ImageURLChunkImageURLTypedDict, - ImageURLChunkType, - ImageURLChunkTypedDict, - ) - from .inputentries import InputEntries, InputEntriesTypedDict - from .inputs import ( - Inputs, - InputsTypedDict, - InstructRequestInputs, - InstructRequestInputsMessages, - InstructRequestInputsMessagesTypedDict, - InstructRequestInputsTypedDict, - ) - from .instructrequest import ( - InstructRequest, - InstructRequestMessages, - InstructRequestMessagesTypedDict, - InstructRequestTypedDict, - ) - from .jobin import ( - Hyperparameters, - HyperparametersTypedDict, - JobIn, - JobInIntegrations, - JobInIntegrationsTypedDict, - JobInRepositories, - JobInRepositoriesTypedDict, - JobInTypedDict, - ) - from .jobmetadataout import JobMetadataOut, JobMetadataOutTypedDict - from .jobs_api_routes_batch_cancel_batch_jobop import ( - JobsAPIRoutesBatchCancelBatchJobRequest, - JobsAPIRoutesBatchCancelBatchJobRequestTypedDict, - ) - from .jobs_api_routes_batch_get_batch_jobop import ( - JobsAPIRoutesBatchGetBatchJobRequest, - JobsAPIRoutesBatchGetBatchJobRequestTypedDict, - ) - from .jobs_api_routes_batch_get_batch_jobsop import ( - JobsAPIRoutesBatchGetBatchJobsRequest, - JobsAPIRoutesBatchGetBatchJobsRequestTypedDict, - ) - from .jobs_api_routes_fine_tuning_archive_fine_tuned_modelop import ( - JobsAPIRoutesFineTuningArchiveFineTunedModelRequest, - JobsAPIRoutesFineTuningArchiveFineTunedModelRequestTypedDict, - ) - from .jobs_api_routes_fine_tuning_cancel_fine_tuning_jobop import ( - JobsAPIRoutesFineTuningCancelFineTuningJobRequest, - JobsAPIRoutesFineTuningCancelFineTuningJobRequestTypedDict, - JobsAPIRoutesFineTuningCancelFineTuningJobResponse, - JobsAPIRoutesFineTuningCancelFineTuningJobResponseTypedDict, - ) - from .jobs_api_routes_fine_tuning_create_fine_tuning_jobop import ( - JobsAPIRoutesFineTuningCreateFineTuningJobResponse, - JobsAPIRoutesFineTuningCreateFineTuningJobResponseTypedDict, - Response1, - Response1TypedDict, - ) - from .jobs_api_routes_fine_tuning_get_fine_tuning_jobop import ( - JobsAPIRoutesFineTuningGetFineTuningJobRequest, - JobsAPIRoutesFineTuningGetFineTuningJobRequestTypedDict, - JobsAPIRoutesFineTuningGetFineTuningJobResponse, - JobsAPIRoutesFineTuningGetFineTuningJobResponseTypedDict, - ) - from .jobs_api_routes_fine_tuning_get_fine_tuning_jobsop import ( - JobsAPIRoutesFineTuningGetFineTuningJobsRequest, - JobsAPIRoutesFineTuningGetFineTuningJobsRequestTypedDict, - QueryParamStatus, - ) - from .jobs_api_routes_fine_tuning_start_fine_tuning_jobop import ( - JobsAPIRoutesFineTuningStartFineTuningJobRequest, - JobsAPIRoutesFineTuningStartFineTuningJobRequestTypedDict, - JobsAPIRoutesFineTuningStartFineTuningJobResponse, - JobsAPIRoutesFineTuningStartFineTuningJobResponseTypedDict, - ) - from .jobs_api_routes_fine_tuning_unarchive_fine_tuned_modelop import ( - JobsAPIRoutesFineTuningUnarchiveFineTunedModelRequest, - JobsAPIRoutesFineTuningUnarchiveFineTunedModelRequestTypedDict, - ) - from .jobs_api_routes_fine_tuning_update_fine_tuned_modelop import ( - JobsAPIRoutesFineTuningUpdateFineTunedModelRequest, - JobsAPIRoutesFineTuningUpdateFineTunedModelRequestTypedDict, - JobsAPIRoutesFineTuningUpdateFineTunedModelResponse, - JobsAPIRoutesFineTuningUpdateFineTunedModelResponseTypedDict, - ) - from .jobsout import ( - JobsOut, - JobsOutData, - JobsOutDataTypedDict, - JobsOutObject, - JobsOutTypedDict, - ) - from .jsonschema import JSONSchema, JSONSchemaTypedDict - from .legacyjobmetadataout import ( - LegacyJobMetadataOut, - LegacyJobMetadataOutObject, - LegacyJobMetadataOutTypedDict, - ) - from .libraries_delete_v1op import ( - LibrariesDeleteV1Request, - LibrariesDeleteV1RequestTypedDict, - ) - from .libraries_documents_delete_v1op import ( - LibrariesDocumentsDeleteV1Request, - LibrariesDocumentsDeleteV1RequestTypedDict, - ) - from .libraries_documents_get_extracted_text_signed_url_v1op import ( - LibrariesDocumentsGetExtractedTextSignedURLV1Request, - LibrariesDocumentsGetExtractedTextSignedURLV1RequestTypedDict, - ) - from .libraries_documents_get_signed_url_v1op import ( - LibrariesDocumentsGetSignedURLV1Request, - LibrariesDocumentsGetSignedURLV1RequestTypedDict, - ) - from .libraries_documents_get_status_v1op import ( - LibrariesDocumentsGetStatusV1Request, - LibrariesDocumentsGetStatusV1RequestTypedDict, - ) - from .libraries_documents_get_text_content_v1op import ( - LibrariesDocumentsGetTextContentV1Request, - LibrariesDocumentsGetTextContentV1RequestTypedDict, - ) - from .libraries_documents_get_v1op import ( - LibrariesDocumentsGetV1Request, - LibrariesDocumentsGetV1RequestTypedDict, - ) - from .libraries_documents_list_v1op import ( - LibrariesDocumentsListV1Request, - LibrariesDocumentsListV1RequestTypedDict, - ) - from .libraries_documents_reprocess_v1op import ( - LibrariesDocumentsReprocessV1Request, - LibrariesDocumentsReprocessV1RequestTypedDict, - ) - from .libraries_documents_update_v1op import ( - LibrariesDocumentsUpdateV1Request, - LibrariesDocumentsUpdateV1RequestTypedDict, - ) - from .libraries_documents_upload_v1op import ( - LibrariesDocumentsUploadV1DocumentUpload, - LibrariesDocumentsUploadV1DocumentUploadTypedDict, - LibrariesDocumentsUploadV1Request, - LibrariesDocumentsUploadV1RequestTypedDict, - ) - from .libraries_get_v1op import ( - LibrariesGetV1Request, - LibrariesGetV1RequestTypedDict, - ) - from .libraries_share_create_v1op import ( - LibrariesShareCreateV1Request, - LibrariesShareCreateV1RequestTypedDict, - ) - from .libraries_share_delete_v1op import ( - LibrariesShareDeleteV1Request, - LibrariesShareDeleteV1RequestTypedDict, - ) - from .libraries_share_list_v1op import ( - LibrariesShareListV1Request, - LibrariesShareListV1RequestTypedDict, - ) - from .libraries_update_v1op import ( - LibrariesUpdateV1Request, - LibrariesUpdateV1RequestTypedDict, - ) - from .libraryin import LibraryIn, LibraryInTypedDict - from .libraryinupdate import LibraryInUpdate, LibraryInUpdateTypedDict - from .libraryout import LibraryOut, LibraryOutTypedDict - from .listdocumentout import ListDocumentOut, ListDocumentOutTypedDict - from .listfilesout import ListFilesOut, ListFilesOutTypedDict - from .listlibraryout import ListLibraryOut, ListLibraryOutTypedDict - from .listsharingout import ListSharingOut, ListSharingOutTypedDict - from .messageentries import MessageEntries, MessageEntriesTypedDict - from .messageinputcontentchunks import ( - MessageInputContentChunks, - MessageInputContentChunksTypedDict, - ) - from .messageinputentry import ( - MessageInputEntry, - MessageInputEntryContent, - MessageInputEntryContentTypedDict, - MessageInputEntryRole, - MessageInputEntryType, - MessageInputEntryTypedDict, - Object, - ) - from .messageoutputcontentchunks import ( - MessageOutputContentChunks, - MessageOutputContentChunksTypedDict, - ) - from .messageoutputentry import ( - MessageOutputEntry, - MessageOutputEntryContent, - MessageOutputEntryContentTypedDict, - MessageOutputEntryObject, - MessageOutputEntryRole, - MessageOutputEntryType, - MessageOutputEntryTypedDict, - ) - from .messageoutputevent import ( - MessageOutputEvent, - MessageOutputEventContent, - MessageOutputEventContentTypedDict, - MessageOutputEventRole, - MessageOutputEventType, - MessageOutputEventTypedDict, - ) - from .metricout import MetricOut, MetricOutTypedDict - from .mistralpromptmode import MistralPromptMode - from .modelcapabilities import ModelCapabilities, ModelCapabilitiesTypedDict - from .modelconversation import ( - ModelConversation, - ModelConversationObject, - ModelConversationTools, - ModelConversationToolsTypedDict, - ModelConversationTypedDict, - ) - from .modellist import Data, DataTypedDict, ModelList, ModelListTypedDict - from .moderationobject import ModerationObject, ModerationObjectTypedDict - from .moderationresponse import ModerationResponse, ModerationResponseTypedDict - from .no_response_error import NoResponseError - from .ocrimageobject import OCRImageObject, OCRImageObjectTypedDict - from .ocrpagedimensions import OCRPageDimensions, OCRPageDimensionsTypedDict - from .ocrpageobject import OCRPageObject, OCRPageObjectTypedDict - from .ocrrequest import ( - Document, - DocumentTypedDict, - OCRRequest, - OCRRequestTypedDict, - TableFormat, - ) - from .ocrresponse import OCRResponse, OCRResponseTypedDict - from .ocrtableobject import Format, OCRTableObject, OCRTableObjectTypedDict - from .ocrusageinfo import OCRUsageInfo, OCRUsageInfoTypedDict - from .outputcontentchunks import OutputContentChunks, OutputContentChunksTypedDict - from .paginationinfo import PaginationInfo, PaginationInfoTypedDict - from .prediction import Prediction, PredictionTypedDict - from .processingstatusout import ProcessingStatusOut, ProcessingStatusOutTypedDict - from .realtimetranscriptionerror import ( - RealtimeTranscriptionError, - RealtimeTranscriptionErrorTypedDict, - ) - from .realtimetranscriptionerrordetail import ( - Message, - MessageTypedDict, - RealtimeTranscriptionErrorDetail, - RealtimeTranscriptionErrorDetailTypedDict, - ) - from .realtimetranscriptionsession import ( - RealtimeTranscriptionSession, - RealtimeTranscriptionSessionTypedDict, - ) - from .realtimetranscriptionsessioncreated import ( - RealtimeTranscriptionSessionCreated, - RealtimeTranscriptionSessionCreatedTypedDict, - ) - from .realtimetranscriptionsessionupdated import ( - RealtimeTranscriptionSessionUpdated, - RealtimeTranscriptionSessionUpdatedTypedDict, - ) - from .referencechunk import ( - ReferenceChunk, - ReferenceChunkType, - ReferenceChunkTypedDict, - ) - from .requestsource import RequestSource - from .responsedoneevent import ( - ResponseDoneEvent, - ResponseDoneEventType, - ResponseDoneEventTypedDict, - ) - from .responseerrorevent import ( - ResponseErrorEvent, - ResponseErrorEventType, - ResponseErrorEventTypedDict, - ) - from .responseformat import ResponseFormat, ResponseFormatTypedDict - from .responseformats import ResponseFormats - from .responsestartedevent import ( - ResponseStartedEvent, - ResponseStartedEventType, - ResponseStartedEventTypedDict, - ) - from .responsevalidationerror import ResponseValidationError - from .retrieve_model_v1_models_model_id_getop import ( - RetrieveModelV1ModelsModelIDGetRequest, - RetrieveModelV1ModelsModelIDGetRequestTypedDict, - RetrieveModelV1ModelsModelIDGetResponseRetrieveModelV1ModelsModelIDGet, - RetrieveModelV1ModelsModelIDGetResponseRetrieveModelV1ModelsModelIDGetTypedDict, - ) - from .retrievefileout import RetrieveFileOut, RetrieveFileOutTypedDict - from .sampletype import SampleType - from .sdkerror import SDKError - from .security import Security, SecurityTypedDict - from .shareenum import ShareEnum - from .sharingdelete import SharingDelete, SharingDeleteTypedDict - from .sharingin import SharingIn, SharingInTypedDict - from .sharingout import SharingOut, SharingOutTypedDict - from .source import Source - from .ssetypes import SSETypes - from .systemmessage import ( - Role, - SystemMessage, - SystemMessageContent, - SystemMessageContentTypedDict, - SystemMessageTypedDict, - ) - from .systemmessagecontentchunks import ( - SystemMessageContentChunks, - SystemMessageContentChunksTypedDict, - ) - from .textchunk import TextChunk, TextChunkType, TextChunkTypedDict - from .thinkchunk import ( - ThinkChunk, - ThinkChunkType, - ThinkChunkTypedDict, - Thinking, - ThinkingTypedDict, - ) - from .timestampgranularity import TimestampGranularity - from .tool import Tool, ToolTypedDict - from .toolcall import ToolCall, ToolCallTypedDict - from .toolchoice import ToolChoice, ToolChoiceTypedDict - from .toolchoiceenum import ToolChoiceEnum - from .toolexecutiondeltaevent import ( - ToolExecutionDeltaEvent, - ToolExecutionDeltaEventName, - ToolExecutionDeltaEventNameTypedDict, - ToolExecutionDeltaEventType, - ToolExecutionDeltaEventTypedDict, - ) - from .toolexecutiondoneevent import ( - ToolExecutionDoneEvent, - ToolExecutionDoneEventName, - ToolExecutionDoneEventNameTypedDict, - ToolExecutionDoneEventType, - ToolExecutionDoneEventTypedDict, - ) - from .toolexecutionentry import ( - Name, - NameTypedDict, - ToolExecutionEntry, - ToolExecutionEntryObject, - ToolExecutionEntryType, - ToolExecutionEntryTypedDict, - ) - from .toolexecutionstartedevent import ( - ToolExecutionStartedEvent, - ToolExecutionStartedEventName, - ToolExecutionStartedEventNameTypedDict, - ToolExecutionStartedEventType, - ToolExecutionStartedEventTypedDict, - ) - from .toolfilechunk import ( - ToolFileChunk, - ToolFileChunkTool, - ToolFileChunkToolTypedDict, - ToolFileChunkType, - ToolFileChunkTypedDict, - ) - from .toolmessage import ( - ToolMessage, - ToolMessageContent, - ToolMessageContentTypedDict, - ToolMessageRole, - ToolMessageTypedDict, - ) - from .toolreferencechunk import ( - ToolReferenceChunk, - ToolReferenceChunkTool, - ToolReferenceChunkToolTypedDict, - ToolReferenceChunkType, - ToolReferenceChunkTypedDict, - ) - from .tooltypes import ToolTypes - from .trainingfile import TrainingFile, TrainingFileTypedDict - from .transcriptionresponse import ( - TranscriptionResponse, - TranscriptionResponseTypedDict, - ) - from .transcriptionsegmentchunk import ( - TranscriptionSegmentChunk, - TranscriptionSegmentChunkTypedDict, - Type, - ) - from .transcriptionstreamdone import ( - TranscriptionStreamDone, - TranscriptionStreamDoneType, - TranscriptionStreamDoneTypedDict, - ) - from .transcriptionstreamevents import ( - TranscriptionStreamEvents, - TranscriptionStreamEventsData, - TranscriptionStreamEventsDataTypedDict, - TranscriptionStreamEventsTypedDict, - ) - from .transcriptionstreameventtypes import TranscriptionStreamEventTypes - from .transcriptionstreamlanguage import ( - TranscriptionStreamLanguage, - TranscriptionStreamLanguageType, - TranscriptionStreamLanguageTypedDict, - ) - from .transcriptionstreamsegmentdelta import ( - TranscriptionStreamSegmentDelta, - TranscriptionStreamSegmentDeltaType, - TranscriptionStreamSegmentDeltaTypedDict, - ) - from .transcriptionstreamtextdelta import ( - TranscriptionStreamTextDelta, - TranscriptionStreamTextDeltaType, - TranscriptionStreamTextDeltaTypedDict, - ) - from .unarchiveftmodelout import ( - UnarchiveFTModelOut, - UnarchiveFTModelOutObject, - UnarchiveFTModelOutTypedDict, - ) - from .updateftmodelin import UpdateFTModelIn, UpdateFTModelInTypedDict - from .uploadfileout import UploadFileOut, UploadFileOutTypedDict - from .usageinfo import UsageInfo, UsageInfoTypedDict - from .usermessage import ( - UserMessage, - UserMessageContent, - UserMessageContentTypedDict, - UserMessageRole, - UserMessageTypedDict, - ) - from .validationerror import ( - Loc, - LocTypedDict, - ValidationError, - ValidationErrorTypedDict, - ) - from .wandbintegration import ( - WandbIntegration, - WandbIntegrationType, - WandbIntegrationTypedDict, - ) - from .wandbintegrationout import ( - WandbIntegrationOut, - WandbIntegrationOutType, - WandbIntegrationOutTypedDict, - ) - from .websearchpremiumtool import ( - WebSearchPremiumTool, - WebSearchPremiumToolType, - WebSearchPremiumToolTypedDict, - ) - from .websearchtool import WebSearchTool, WebSearchToolType, WebSearchToolTypedDict - -__all__ = [ - "APIEndpoint", - "Agent", - "AgentAliasResponse", - "AgentAliasResponseTypedDict", - "AgentConversation", - "AgentConversationAgentVersion", - "AgentConversationAgentVersionTypedDict", - "AgentConversationObject", - "AgentConversationTypedDict", - "AgentCreationRequest", - "AgentCreationRequestTools", - "AgentCreationRequestToolsTypedDict", - "AgentCreationRequestTypedDict", - "AgentHandoffDoneEvent", - "AgentHandoffDoneEventType", - "AgentHandoffDoneEventTypedDict", - "AgentHandoffEntry", - "AgentHandoffEntryObject", - "AgentHandoffEntryType", - "AgentHandoffEntryTypedDict", - "AgentHandoffStartedEvent", - "AgentHandoffStartedEventType", - "AgentHandoffStartedEventTypedDict", - "AgentObject", - "AgentTools", - "AgentToolsTypedDict", - "AgentTypedDict", - "AgentUpdateRequest", - "AgentUpdateRequestTools", - "AgentUpdateRequestToolsTypedDict", - "AgentUpdateRequestTypedDict", - "AgentVersion", - "AgentVersionTypedDict", - "AgentsAPIV1AgentsCreateOrUpdateAliasRequest", - "AgentsAPIV1AgentsCreateOrUpdateAliasRequestTypedDict", - "AgentsAPIV1AgentsDeleteRequest", - "AgentsAPIV1AgentsDeleteRequestTypedDict", - "AgentsAPIV1AgentsGetRequest", - "AgentsAPIV1AgentsGetRequestTypedDict", - "AgentsAPIV1AgentsGetVersionRequest", - "AgentsAPIV1AgentsGetVersionRequestTypedDict", - "AgentsAPIV1AgentsListRequest", - "AgentsAPIV1AgentsListRequestTypedDict", - "AgentsAPIV1AgentsListVersionAliasesRequest", - "AgentsAPIV1AgentsListVersionAliasesRequestTypedDict", - "AgentsAPIV1AgentsListVersionsRequest", - "AgentsAPIV1AgentsListVersionsRequestTypedDict", - "AgentsAPIV1AgentsUpdateRequest", - "AgentsAPIV1AgentsUpdateRequestTypedDict", - "AgentsAPIV1AgentsUpdateVersionRequest", - "AgentsAPIV1AgentsUpdateVersionRequestTypedDict", - "AgentsAPIV1ConversationsAppendRequest", - "AgentsAPIV1ConversationsAppendRequestTypedDict", - "AgentsAPIV1ConversationsAppendStreamRequest", - "AgentsAPIV1ConversationsAppendStreamRequestTypedDict", - "AgentsAPIV1ConversationsDeleteRequest", - "AgentsAPIV1ConversationsDeleteRequestTypedDict", - "AgentsAPIV1ConversationsGetRequest", - "AgentsAPIV1ConversationsGetRequestTypedDict", - "AgentsAPIV1ConversationsGetResponseV1ConversationsGet", - "AgentsAPIV1ConversationsGetResponseV1ConversationsGetTypedDict", - "AgentsAPIV1ConversationsHistoryRequest", - "AgentsAPIV1ConversationsHistoryRequestTypedDict", - "AgentsAPIV1ConversationsListRequest", - "AgentsAPIV1ConversationsListRequestTypedDict", - "AgentsAPIV1ConversationsMessagesRequest", - "AgentsAPIV1ConversationsMessagesRequestTypedDict", - "AgentsAPIV1ConversationsRestartRequest", - "AgentsAPIV1ConversationsRestartRequestTypedDict", - "AgentsAPIV1ConversationsRestartStreamRequest", - "AgentsAPIV1ConversationsRestartStreamRequestTypedDict", - "AgentsCompletionRequest", - "AgentsCompletionRequestMessages", - "AgentsCompletionRequestMessagesTypedDict", - "AgentsCompletionRequestStop", - "AgentsCompletionRequestStopTypedDict", - "AgentsCompletionRequestToolChoice", - "AgentsCompletionRequestToolChoiceTypedDict", - "AgentsCompletionRequestTypedDict", - "AgentsCompletionStreamRequest", - "AgentsCompletionStreamRequestMessages", - "AgentsCompletionStreamRequestMessagesTypedDict", - "AgentsCompletionStreamRequestStop", - "AgentsCompletionStreamRequestStopTypedDict", - "AgentsCompletionStreamRequestToolChoice", - "AgentsCompletionStreamRequestToolChoiceTypedDict", - "AgentsCompletionStreamRequestTypedDict", - "ArchiveFTModelOut", - "ArchiveFTModelOutObject", - "ArchiveFTModelOutTypedDict", - "Arguments", - "ArgumentsTypedDict", - "AssistantMessage", - "AssistantMessageContent", - "AssistantMessageContentTypedDict", - "AssistantMessageRole", - "AssistantMessageTypedDict", - "Attributes", - "AttributesTypedDict", - "AudioChunk", - "AudioChunkType", - "AudioChunkTypedDict", - "AudioEncoding", - "AudioFormat", - "AudioFormatTypedDict", - "AudioTranscriptionRequest", - "AudioTranscriptionRequestStream", - "AudioTranscriptionRequestStreamTypedDict", - "AudioTranscriptionRequestTypedDict", - "BaseModelCard", - "BaseModelCardType", - "BaseModelCardTypedDict", - "BatchError", - "BatchErrorTypedDict", - "BatchJobIn", - "BatchJobInTypedDict", - "BatchJobOut", - "BatchJobOutObject", - "BatchJobOutTypedDict", - "BatchJobStatus", - "BatchJobsOut", - "BatchJobsOutObject", - "BatchJobsOutTypedDict", - "BatchRequest", - "BatchRequestTypedDict", - "BuiltInConnectors", - "ChatClassificationRequest", - "ChatClassificationRequestTypedDict", - "ChatCompletionChoice", - "ChatCompletionChoiceTypedDict", - "ChatCompletionRequest", - "ChatCompletionRequestToolChoice", - "ChatCompletionRequestToolChoiceTypedDict", - "ChatCompletionRequestTypedDict", - "ChatCompletionResponse", - "ChatCompletionResponseTypedDict", - "ChatCompletionStreamRequest", - "ChatCompletionStreamRequestMessages", - "ChatCompletionStreamRequestMessagesTypedDict", - "ChatCompletionStreamRequestStop", - "ChatCompletionStreamRequestStopTypedDict", - "ChatCompletionStreamRequestToolChoice", - "ChatCompletionStreamRequestToolChoiceTypedDict", - "ChatCompletionStreamRequestTypedDict", - "ChatModerationRequest", - "ChatModerationRequestInputs", - "ChatModerationRequestInputsTypedDict", - "ChatModerationRequestTypedDict", - "CheckpointOut", - "CheckpointOutTypedDict", - "ClassificationRequest", - "ClassificationRequestInputs", - "ClassificationRequestInputsTypedDict", - "ClassificationRequestTypedDict", - "ClassificationResponse", - "ClassificationResponseTypedDict", - "ClassificationTargetResult", - "ClassificationTargetResultTypedDict", - "ClassifierDetailedJobOut", - "ClassifierDetailedJobOutIntegrations", - "ClassifierDetailedJobOutIntegrationsTypedDict", - "ClassifierDetailedJobOutJobType", - "ClassifierDetailedJobOutObject", - "ClassifierDetailedJobOutStatus", - "ClassifierDetailedJobOutTypedDict", - "ClassifierFTModelOut", - "ClassifierFTModelOutModelType", - "ClassifierFTModelOutObject", - "ClassifierFTModelOutTypedDict", - "ClassifierJobOut", - "ClassifierJobOutIntegrations", - "ClassifierJobOutIntegrationsTypedDict", - "ClassifierJobOutJobType", - "ClassifierJobOutObject", - "ClassifierJobOutStatus", - "ClassifierJobOutTypedDict", - "ClassifierTargetIn", - "ClassifierTargetInTypedDict", - "ClassifierTargetOut", - "ClassifierTargetOutTypedDict", - "ClassifierTrainingParameters", - "ClassifierTrainingParametersIn", - "ClassifierTrainingParametersInTypedDict", - "ClassifierTrainingParametersTypedDict", - "CodeInterpreterTool", - "CodeInterpreterToolType", - "CodeInterpreterToolTypedDict", - "CompletionArgs", - "CompletionArgsStop", - "CompletionArgsStopTypedDict", - "CompletionArgsTypedDict", - "CompletionChunk", - "CompletionChunkTypedDict", - "CompletionDetailedJobOut", - "CompletionDetailedJobOutIntegrations", - "CompletionDetailedJobOutIntegrationsTypedDict", - "CompletionDetailedJobOutJobType", - "CompletionDetailedJobOutObject", - "CompletionDetailedJobOutRepositories", - "CompletionDetailedJobOutRepositoriesTypedDict", - "CompletionDetailedJobOutStatus", - "CompletionDetailedJobOutTypedDict", - "CompletionEvent", - "CompletionEventTypedDict", - "CompletionFTModelOut", - "CompletionFTModelOutObject", - "CompletionFTModelOutTypedDict", - "CompletionJobOut", - "CompletionJobOutObject", - "CompletionJobOutTypedDict", - "CompletionResponseStreamChoice", - "CompletionResponseStreamChoiceFinishReason", - "CompletionResponseStreamChoiceTypedDict", - "CompletionTrainingParameters", - "CompletionTrainingParametersIn", - "CompletionTrainingParametersInTypedDict", - "CompletionTrainingParametersTypedDict", - "Content", - "ContentChunk", - "ContentChunkTypedDict", - "ContentTypedDict", - "ConversationAppendRequest", - "ConversationAppendRequestHandoffExecution", - "ConversationAppendRequestTypedDict", - "ConversationAppendStreamRequest", - "ConversationAppendStreamRequestHandoffExecution", - "ConversationAppendStreamRequestTypedDict", - "ConversationEvents", - "ConversationEventsData", - "ConversationEventsDataTypedDict", - "ConversationEventsTypedDict", - "ConversationHistory", - "ConversationHistoryObject", - "ConversationHistoryTypedDict", - "ConversationInputs", - "ConversationInputsTypedDict", - "ConversationMessages", - "ConversationMessagesObject", - "ConversationMessagesTypedDict", - "ConversationRequest", - "ConversationRequestTypedDict", - "ConversationResponse", - "ConversationResponseObject", - "ConversationResponseTypedDict", - "ConversationRestartRequest", - "ConversationRestartRequestAgentVersion", - "ConversationRestartRequestAgentVersionTypedDict", - "ConversationRestartRequestHandoffExecution", - "ConversationRestartRequestTypedDict", - "ConversationRestartStreamRequest", - "ConversationRestartStreamRequestAgentVersion", - "ConversationRestartStreamRequestAgentVersionTypedDict", - "ConversationRestartStreamRequestHandoffExecution", - "ConversationRestartStreamRequestTypedDict", - "ConversationStreamRequest", - "ConversationStreamRequestAgentVersion", - "ConversationStreamRequestAgentVersionTypedDict", - "ConversationStreamRequestHandoffExecution", - "ConversationStreamRequestTools", - "ConversationStreamRequestToolsTypedDict", - "ConversationStreamRequestTypedDict", - "ConversationUsageInfo", - "ConversationUsageInfoTypedDict", - "Data", - "DataTypedDict", - "DeleteFileOut", - "DeleteFileOutTypedDict", - "DeleteModelOut", - "DeleteModelOutTypedDict", - "DeleteModelV1ModelsModelIDDeleteRequest", - "DeleteModelV1ModelsModelIDDeleteRequestTypedDict", - "DeltaMessage", - "DeltaMessageTypedDict", - "Document", - "DocumentLibraryTool", - "DocumentLibraryToolType", - "DocumentLibraryToolTypedDict", - "DocumentOut", - "DocumentOutTypedDict", - "DocumentTextContent", - "DocumentTextContentTypedDict", - "DocumentTypedDict", - "DocumentURLChunk", - "DocumentURLChunkType", - "DocumentURLChunkTypedDict", - "DocumentUpdateIn", - "DocumentUpdateInTypedDict", - "EmbeddingDtype", - "EmbeddingRequest", - "EmbeddingRequestInputs", - "EmbeddingRequestInputsTypedDict", - "EmbeddingRequestTypedDict", - "EmbeddingResponse", - "EmbeddingResponseData", - "EmbeddingResponseDataTypedDict", - "EmbeddingResponseTypedDict", - "EncodingFormat", - "EntityType", - "Entries", - "EntriesTypedDict", - "EventOut", - "EventOutTypedDict", - "FIMCompletionRequest", - "FIMCompletionRequestStop", - "FIMCompletionRequestStopTypedDict", - "FIMCompletionRequestTypedDict", - "FIMCompletionResponse", - "FIMCompletionResponseTypedDict", - "FIMCompletionStreamRequest", - "FIMCompletionStreamRequestStop", - "FIMCompletionStreamRequestStopTypedDict", - "FIMCompletionStreamRequestTypedDict", - "FTClassifierLossFunction", - "FTModelCapabilitiesOut", - "FTModelCapabilitiesOutTypedDict", - "FTModelCard", - "FTModelCardType", - "FTModelCardTypedDict", - "File", - "FileChunk", - "FileChunkTypedDict", - "FilePurpose", - "FileSchema", - "FileSchemaTypedDict", - "FileSignedURL", - "FileSignedURLTypedDict", - "FileTypedDict", - "FilesAPIRoutesDeleteFileRequest", - "FilesAPIRoutesDeleteFileRequestTypedDict", - "FilesAPIRoutesDownloadFileRequest", - "FilesAPIRoutesDownloadFileRequestTypedDict", - "FilesAPIRoutesGetSignedURLRequest", - "FilesAPIRoutesGetSignedURLRequestTypedDict", - "FilesAPIRoutesListFilesRequest", - "FilesAPIRoutesListFilesRequestTypedDict", - "FilesAPIRoutesRetrieveFileRequest", - "FilesAPIRoutesRetrieveFileRequestTypedDict", - "FilesAPIRoutesUploadFileMultiPartBodyParams", - "FilesAPIRoutesUploadFileMultiPartBodyParamsTypedDict", - "FineTuneableModelType", - "FinishReason", - "Format", - "Function", - "FunctionCall", - "FunctionCallEntry", - "FunctionCallEntryArguments", - "FunctionCallEntryArgumentsTypedDict", - "FunctionCallEntryObject", - "FunctionCallEntryType", - "FunctionCallEntryTypedDict", - "FunctionCallEvent", - "FunctionCallEventType", - "FunctionCallEventTypedDict", - "FunctionCallTypedDict", - "FunctionName", - "FunctionNameTypedDict", - "FunctionResultEntry", - "FunctionResultEntryObject", - "FunctionResultEntryType", - "FunctionResultEntryTypedDict", - "FunctionTool", - "FunctionToolType", - "FunctionToolTypedDict", - "FunctionTypedDict", - "GithubRepositoryIn", - "GithubRepositoryInType", - "GithubRepositoryInTypedDict", - "GithubRepositoryOut", - "GithubRepositoryOutType", - "GithubRepositoryOutTypedDict", - "HTTPValidationError", - "HTTPValidationErrorData", - "HandoffExecution", - "Hyperparameters", - "HyperparametersTypedDict", - "ImageGenerationTool", - "ImageGenerationToolType", - "ImageGenerationToolTypedDict", - "ImageURL", - "ImageURLChunk", - "ImageURLChunkImageURL", - "ImageURLChunkImageURLTypedDict", - "ImageURLChunkType", - "ImageURLChunkTypedDict", - "ImageURLTypedDict", - "InputEntries", - "InputEntriesTypedDict", - "Inputs", - "InputsTypedDict", - "InstructRequest", - "InstructRequestInputs", - "InstructRequestInputsMessages", - "InstructRequestInputsMessagesTypedDict", - "InstructRequestInputsTypedDict", - "InstructRequestMessages", - "InstructRequestMessagesTypedDict", - "InstructRequestTypedDict", - "Integrations", - "IntegrationsTypedDict", - "JSONSchema", - "JSONSchemaTypedDict", - "JobIn", - "JobInIntegrations", - "JobInIntegrationsTypedDict", - "JobInRepositories", - "JobInRepositoriesTypedDict", - "JobInTypedDict", - "JobMetadataOut", - "JobMetadataOutTypedDict", - "JobType", - "JobsAPIRoutesBatchCancelBatchJobRequest", - "JobsAPIRoutesBatchCancelBatchJobRequestTypedDict", - "JobsAPIRoutesBatchGetBatchJobRequest", - "JobsAPIRoutesBatchGetBatchJobRequestTypedDict", - "JobsAPIRoutesBatchGetBatchJobsRequest", - "JobsAPIRoutesBatchGetBatchJobsRequestTypedDict", - "JobsAPIRoutesFineTuningArchiveFineTunedModelRequest", - "JobsAPIRoutesFineTuningArchiveFineTunedModelRequestTypedDict", - "JobsAPIRoutesFineTuningCancelFineTuningJobRequest", - "JobsAPIRoutesFineTuningCancelFineTuningJobRequestTypedDict", - "JobsAPIRoutesFineTuningCancelFineTuningJobResponse", - "JobsAPIRoutesFineTuningCancelFineTuningJobResponseTypedDict", - "JobsAPIRoutesFineTuningCreateFineTuningJobResponse", - "JobsAPIRoutesFineTuningCreateFineTuningJobResponseTypedDict", - "JobsAPIRoutesFineTuningGetFineTuningJobRequest", - "JobsAPIRoutesFineTuningGetFineTuningJobRequestTypedDict", - "JobsAPIRoutesFineTuningGetFineTuningJobResponse", - "JobsAPIRoutesFineTuningGetFineTuningJobResponseTypedDict", - "JobsAPIRoutesFineTuningGetFineTuningJobsRequest", - "JobsAPIRoutesFineTuningGetFineTuningJobsRequestTypedDict", - "JobsAPIRoutesFineTuningStartFineTuningJobRequest", - "JobsAPIRoutesFineTuningStartFineTuningJobRequestTypedDict", - "JobsAPIRoutesFineTuningStartFineTuningJobResponse", - "JobsAPIRoutesFineTuningStartFineTuningJobResponseTypedDict", - "JobsAPIRoutesFineTuningUnarchiveFineTunedModelRequest", - "JobsAPIRoutesFineTuningUnarchiveFineTunedModelRequestTypedDict", - "JobsAPIRoutesFineTuningUpdateFineTunedModelRequest", - "JobsAPIRoutesFineTuningUpdateFineTunedModelRequestTypedDict", - "JobsAPIRoutesFineTuningUpdateFineTunedModelResponse", - "JobsAPIRoutesFineTuningUpdateFineTunedModelResponseTypedDict", - "JobsOut", - "JobsOutData", - "JobsOutDataTypedDict", - "JobsOutObject", - "JobsOutTypedDict", - "LegacyJobMetadataOut", - "LegacyJobMetadataOutObject", - "LegacyJobMetadataOutTypedDict", - "LibrariesDeleteV1Request", - "LibrariesDeleteV1RequestTypedDict", - "LibrariesDocumentsDeleteV1Request", - "LibrariesDocumentsDeleteV1RequestTypedDict", - "LibrariesDocumentsGetExtractedTextSignedURLV1Request", - "LibrariesDocumentsGetExtractedTextSignedURLV1RequestTypedDict", - "LibrariesDocumentsGetSignedURLV1Request", - "LibrariesDocumentsGetSignedURLV1RequestTypedDict", - "LibrariesDocumentsGetStatusV1Request", - "LibrariesDocumentsGetStatusV1RequestTypedDict", - "LibrariesDocumentsGetTextContentV1Request", - "LibrariesDocumentsGetTextContentV1RequestTypedDict", - "LibrariesDocumentsGetV1Request", - "LibrariesDocumentsGetV1RequestTypedDict", - "LibrariesDocumentsListV1Request", - "LibrariesDocumentsListV1RequestTypedDict", - "LibrariesDocumentsReprocessV1Request", - "LibrariesDocumentsReprocessV1RequestTypedDict", - "LibrariesDocumentsUpdateV1Request", - "LibrariesDocumentsUpdateV1RequestTypedDict", - "LibrariesDocumentsUploadV1DocumentUpload", - "LibrariesDocumentsUploadV1DocumentUploadTypedDict", - "LibrariesDocumentsUploadV1Request", - "LibrariesDocumentsUploadV1RequestTypedDict", - "LibrariesGetV1Request", - "LibrariesGetV1RequestTypedDict", - "LibrariesShareCreateV1Request", - "LibrariesShareCreateV1RequestTypedDict", - "LibrariesShareDeleteV1Request", - "LibrariesShareDeleteV1RequestTypedDict", - "LibrariesShareListV1Request", - "LibrariesShareListV1RequestTypedDict", - "LibrariesUpdateV1Request", - "LibrariesUpdateV1RequestTypedDict", - "LibraryIn", - "LibraryInTypedDict", - "LibraryInUpdate", - "LibraryInUpdateTypedDict", - "LibraryOut", - "LibraryOutTypedDict", - "ListDocumentOut", - "ListDocumentOutTypedDict", - "ListFilesOut", - "ListFilesOutTypedDict", - "ListLibraryOut", - "ListLibraryOutTypedDict", - "ListSharingOut", - "ListSharingOutTypedDict", - "Loc", - "LocTypedDict", - "Message", - "MessageEntries", - "MessageEntriesTypedDict", - "MessageInputContentChunks", - "MessageInputContentChunksTypedDict", - "MessageInputEntry", - "MessageInputEntryContent", - "MessageInputEntryContentTypedDict", - "MessageInputEntryRole", - "MessageInputEntryType", - "MessageInputEntryTypedDict", - "MessageOutputContentChunks", - "MessageOutputContentChunksTypedDict", - "MessageOutputEntry", - "MessageOutputEntryContent", - "MessageOutputEntryContentTypedDict", - "MessageOutputEntryObject", - "MessageOutputEntryRole", - "MessageOutputEntryType", - "MessageOutputEntryTypedDict", - "MessageOutputEvent", - "MessageOutputEventContent", - "MessageOutputEventContentTypedDict", - "MessageOutputEventRole", - "MessageOutputEventType", - "MessageOutputEventTypedDict", - "MessageTypedDict", - "Messages", - "MessagesTypedDict", - "MetricOut", - "MetricOutTypedDict", - "MistralError", - "MistralPromptMode", - "ModelCapabilities", - "ModelCapabilitiesTypedDict", - "ModelConversation", - "ModelConversationObject", - "ModelConversationTools", - "ModelConversationToolsTypedDict", - "ModelConversationTypedDict", - "ModelList", - "ModelListTypedDict", - "ModelType", - "ModerationObject", - "ModerationObjectTypedDict", - "ModerationResponse", - "ModerationResponseTypedDict", - "Name", - "NameTypedDict", - "NoResponseError", - "OCRImageObject", - "OCRImageObjectTypedDict", - "OCRPageDimensions", - "OCRPageDimensionsTypedDict", - "OCRPageObject", - "OCRPageObjectTypedDict", - "OCRRequest", - "OCRRequestTypedDict", - "OCRResponse", - "OCRResponseTypedDict", - "OCRTableObject", - "OCRTableObjectTypedDict", - "OCRUsageInfo", - "OCRUsageInfoTypedDict", - "Object", - "One", - "OneTypedDict", - "OutputContentChunks", - "OutputContentChunksTypedDict", - "Outputs", - "OutputsTypedDict", - "PaginationInfo", - "PaginationInfoTypedDict", - "Prediction", - "PredictionTypedDict", - "ProcessingStatusOut", - "ProcessingStatusOutTypedDict", - "QueryParamAgentVersion", - "QueryParamAgentVersionTypedDict", - "QueryParamStatus", - "RealtimeTranscriptionError", - "RealtimeTranscriptionErrorDetail", - "RealtimeTranscriptionErrorDetailTypedDict", - "RealtimeTranscriptionErrorTypedDict", - "RealtimeTranscriptionSession", - "RealtimeTranscriptionSessionCreated", - "RealtimeTranscriptionSessionCreatedTypedDict", - "RealtimeTranscriptionSessionTypedDict", - "RealtimeTranscriptionSessionUpdated", - "RealtimeTranscriptionSessionUpdatedTypedDict", - "ReferenceChunk", - "ReferenceChunkType", - "ReferenceChunkTypedDict", - "Repositories", - "RepositoriesTypedDict", - "RequestSource", - "Response1", - "Response1TypedDict", - "ResponseBody", - "ResponseBodyTypedDict", - "ResponseDoneEvent", - "ResponseDoneEventType", - "ResponseDoneEventTypedDict", - "ResponseErrorEvent", - "ResponseErrorEventType", - "ResponseErrorEventTypedDict", - "ResponseFormat", - "ResponseFormatTypedDict", - "ResponseFormats", - "ResponseStartedEvent", - "ResponseStartedEventType", - "ResponseStartedEventTypedDict", - "ResponseValidationError", - "RetrieveFileOut", - "RetrieveFileOutTypedDict", - "RetrieveModelV1ModelsModelIDGetRequest", - "RetrieveModelV1ModelsModelIDGetRequestTypedDict", - "RetrieveModelV1ModelsModelIDGetResponseRetrieveModelV1ModelsModelIDGet", - "RetrieveModelV1ModelsModelIDGetResponseRetrieveModelV1ModelsModelIDGetTypedDict", - "Role", - "SDKError", - "SSETypes", - "SampleType", - "Security", - "SecurityTypedDict", - "ShareEnum", - "SharingDelete", - "SharingDeleteTypedDict", - "SharingIn", - "SharingInTypedDict", - "SharingOut", - "SharingOutTypedDict", - "Source", - "Status", - "Stop", - "StopTypedDict", - "SystemMessage", - "SystemMessageContent", - "SystemMessageContentChunks", - "SystemMessageContentChunksTypedDict", - "SystemMessageContentTypedDict", - "SystemMessageTypedDict", - "TableFormat", - "TextChunk", - "TextChunkType", - "TextChunkTypedDict", - "ThinkChunk", - "ThinkChunkType", - "ThinkChunkTypedDict", - "Thinking", - "ThinkingTypedDict", - "TimestampGranularity", - "Tool", - "ToolCall", - "ToolCallTypedDict", - "ToolChoice", - "ToolChoiceEnum", - "ToolChoiceTypedDict", - "ToolExecutionDeltaEvent", - "ToolExecutionDeltaEventName", - "ToolExecutionDeltaEventNameTypedDict", - "ToolExecutionDeltaEventType", - "ToolExecutionDeltaEventTypedDict", - "ToolExecutionDoneEvent", - "ToolExecutionDoneEventName", - "ToolExecutionDoneEventNameTypedDict", - "ToolExecutionDoneEventType", - "ToolExecutionDoneEventTypedDict", - "ToolExecutionEntry", - "ToolExecutionEntryObject", - "ToolExecutionEntryType", - "ToolExecutionEntryTypedDict", - "ToolExecutionStartedEvent", - "ToolExecutionStartedEventName", - "ToolExecutionStartedEventNameTypedDict", - "ToolExecutionStartedEventType", - "ToolExecutionStartedEventTypedDict", - "ToolFileChunk", - "ToolFileChunkTool", - "ToolFileChunkToolTypedDict", - "ToolFileChunkType", - "ToolFileChunkTypedDict", - "ToolMessage", - "ToolMessageContent", - "ToolMessageContentTypedDict", - "ToolMessageRole", - "ToolMessageTypedDict", - "ToolReferenceChunk", - "ToolReferenceChunkTool", - "ToolReferenceChunkToolTypedDict", - "ToolReferenceChunkType", - "ToolReferenceChunkTypedDict", - "ToolTypedDict", - "ToolTypes", - "Tools", - "ToolsTypedDict", - "TrainingFile", - "TrainingFileTypedDict", - "TranscriptionResponse", - "TranscriptionResponseTypedDict", - "TranscriptionSegmentChunk", - "TranscriptionSegmentChunkTypedDict", - "TranscriptionStreamDone", - "TranscriptionStreamDoneType", - "TranscriptionStreamDoneTypedDict", - "TranscriptionStreamEventTypes", - "TranscriptionStreamEvents", - "TranscriptionStreamEventsData", - "TranscriptionStreamEventsDataTypedDict", - "TranscriptionStreamEventsTypedDict", - "TranscriptionStreamLanguage", - "TranscriptionStreamLanguageType", - "TranscriptionStreamLanguageTypedDict", - "TranscriptionStreamSegmentDelta", - "TranscriptionStreamSegmentDeltaType", - "TranscriptionStreamSegmentDeltaTypedDict", - "TranscriptionStreamTextDelta", - "TranscriptionStreamTextDeltaType", - "TranscriptionStreamTextDeltaTypedDict", - "Two", - "TwoTypedDict", - "Type", - "UnarchiveFTModelOut", - "UnarchiveFTModelOutObject", - "UnarchiveFTModelOutTypedDict", - "UpdateFTModelIn", - "UpdateFTModelInTypedDict", - "UploadFileOut", - "UploadFileOutTypedDict", - "UsageInfo", - "UsageInfoTypedDict", - "UserMessage", - "UserMessageContent", - "UserMessageContentTypedDict", - "UserMessageRole", - "UserMessageTypedDict", - "ValidationError", - "ValidationErrorTypedDict", - "WandbIntegration", - "WandbIntegrationOut", - "WandbIntegrationOutType", - "WandbIntegrationOutTypedDict", - "WandbIntegrationType", - "WandbIntegrationTypedDict", - "WebSearchPremiumTool", - "WebSearchPremiumToolType", - "WebSearchPremiumToolTypedDict", - "WebSearchTool", - "WebSearchToolType", - "WebSearchToolTypedDict", -] - -_dynamic_imports: dict[str, str] = { - "Agent": ".agent", - "AgentObject": ".agent", - "AgentTools": ".agent", - "AgentToolsTypedDict": ".agent", - "AgentTypedDict": ".agent", - "AgentAliasResponse": ".agentaliasresponse", - "AgentAliasResponseTypedDict": ".agentaliasresponse", - "AgentConversation": ".agentconversation", - "AgentConversationAgentVersion": ".agentconversation", - "AgentConversationAgentVersionTypedDict": ".agentconversation", - "AgentConversationObject": ".agentconversation", - "AgentConversationTypedDict": ".agentconversation", - "AgentCreationRequest": ".agentcreationrequest", - "AgentCreationRequestTools": ".agentcreationrequest", - "AgentCreationRequestToolsTypedDict": ".agentcreationrequest", - "AgentCreationRequestTypedDict": ".agentcreationrequest", - "AgentHandoffDoneEvent": ".agenthandoffdoneevent", - "AgentHandoffDoneEventType": ".agenthandoffdoneevent", - "AgentHandoffDoneEventTypedDict": ".agenthandoffdoneevent", - "AgentHandoffEntry": ".agenthandoffentry", - "AgentHandoffEntryObject": ".agenthandoffentry", - "AgentHandoffEntryType": ".agenthandoffentry", - "AgentHandoffEntryTypedDict": ".agenthandoffentry", - "AgentHandoffStartedEvent": ".agenthandoffstartedevent", - "AgentHandoffStartedEventType": ".agenthandoffstartedevent", - "AgentHandoffStartedEventTypedDict": ".agenthandoffstartedevent", - "AgentsAPIV1AgentsCreateOrUpdateAliasRequest": ".agents_api_v1_agents_create_or_update_aliasop", - "AgentsAPIV1AgentsCreateOrUpdateAliasRequestTypedDict": ".agents_api_v1_agents_create_or_update_aliasop", - "AgentsAPIV1AgentsDeleteRequest": ".agents_api_v1_agents_deleteop", - "AgentsAPIV1AgentsDeleteRequestTypedDict": ".agents_api_v1_agents_deleteop", - "AgentsAPIV1AgentsGetVersionRequest": ".agents_api_v1_agents_get_versionop", - "AgentsAPIV1AgentsGetVersionRequestTypedDict": ".agents_api_v1_agents_get_versionop", - "AgentsAPIV1AgentsGetRequest": ".agents_api_v1_agents_getop", - "AgentsAPIV1AgentsGetRequestTypedDict": ".agents_api_v1_agents_getop", - "QueryParamAgentVersion": ".agents_api_v1_agents_getop", - "QueryParamAgentVersionTypedDict": ".agents_api_v1_agents_getop", - "AgentsAPIV1AgentsListVersionAliasesRequest": ".agents_api_v1_agents_list_version_aliasesop", - "AgentsAPIV1AgentsListVersionAliasesRequestTypedDict": ".agents_api_v1_agents_list_version_aliasesop", - "AgentsAPIV1AgentsListVersionsRequest": ".agents_api_v1_agents_list_versionsop", - "AgentsAPIV1AgentsListVersionsRequestTypedDict": ".agents_api_v1_agents_list_versionsop", - "AgentsAPIV1AgentsListRequest": ".agents_api_v1_agents_listop", - "AgentsAPIV1AgentsListRequestTypedDict": ".agents_api_v1_agents_listop", - "AgentsAPIV1AgentsUpdateVersionRequest": ".agents_api_v1_agents_update_versionop", - "AgentsAPIV1AgentsUpdateVersionRequestTypedDict": ".agents_api_v1_agents_update_versionop", - "AgentsAPIV1AgentsUpdateRequest": ".agents_api_v1_agents_updateop", - "AgentsAPIV1AgentsUpdateRequestTypedDict": ".agents_api_v1_agents_updateop", - "AgentsAPIV1ConversationsAppendStreamRequest": ".agents_api_v1_conversations_append_streamop", - "AgentsAPIV1ConversationsAppendStreamRequestTypedDict": ".agents_api_v1_conversations_append_streamop", - "AgentsAPIV1ConversationsAppendRequest": ".agents_api_v1_conversations_appendop", - "AgentsAPIV1ConversationsAppendRequestTypedDict": ".agents_api_v1_conversations_appendop", - "AgentsAPIV1ConversationsDeleteRequest": ".agents_api_v1_conversations_deleteop", - "AgentsAPIV1ConversationsDeleteRequestTypedDict": ".agents_api_v1_conversations_deleteop", - "AgentsAPIV1ConversationsGetRequest": ".agents_api_v1_conversations_getop", - "AgentsAPIV1ConversationsGetRequestTypedDict": ".agents_api_v1_conversations_getop", - "AgentsAPIV1ConversationsGetResponseV1ConversationsGet": ".agents_api_v1_conversations_getop", - "AgentsAPIV1ConversationsGetResponseV1ConversationsGetTypedDict": ".agents_api_v1_conversations_getop", - "AgentsAPIV1ConversationsHistoryRequest": ".agents_api_v1_conversations_historyop", - "AgentsAPIV1ConversationsHistoryRequestTypedDict": ".agents_api_v1_conversations_historyop", - "AgentsAPIV1ConversationsListRequest": ".agents_api_v1_conversations_listop", - "AgentsAPIV1ConversationsListRequestTypedDict": ".agents_api_v1_conversations_listop", - "ResponseBody": ".agents_api_v1_conversations_listop", - "ResponseBodyTypedDict": ".agents_api_v1_conversations_listop", - "AgentsAPIV1ConversationsMessagesRequest": ".agents_api_v1_conversations_messagesop", - "AgentsAPIV1ConversationsMessagesRequestTypedDict": ".agents_api_v1_conversations_messagesop", - "AgentsAPIV1ConversationsRestartStreamRequest": ".agents_api_v1_conversations_restart_streamop", - "AgentsAPIV1ConversationsRestartStreamRequestTypedDict": ".agents_api_v1_conversations_restart_streamop", - "AgentsAPIV1ConversationsRestartRequest": ".agents_api_v1_conversations_restartop", - "AgentsAPIV1ConversationsRestartRequestTypedDict": ".agents_api_v1_conversations_restartop", - "AgentsCompletionRequest": ".agentscompletionrequest", - "AgentsCompletionRequestMessages": ".agentscompletionrequest", - "AgentsCompletionRequestMessagesTypedDict": ".agentscompletionrequest", - "AgentsCompletionRequestStop": ".agentscompletionrequest", - "AgentsCompletionRequestStopTypedDict": ".agentscompletionrequest", - "AgentsCompletionRequestToolChoice": ".agentscompletionrequest", - "AgentsCompletionRequestToolChoiceTypedDict": ".agentscompletionrequest", - "AgentsCompletionRequestTypedDict": ".agentscompletionrequest", - "AgentsCompletionStreamRequest": ".agentscompletionstreamrequest", - "AgentsCompletionStreamRequestMessages": ".agentscompletionstreamrequest", - "AgentsCompletionStreamRequestMessagesTypedDict": ".agentscompletionstreamrequest", - "AgentsCompletionStreamRequestStop": ".agentscompletionstreamrequest", - "AgentsCompletionStreamRequestStopTypedDict": ".agentscompletionstreamrequest", - "AgentsCompletionStreamRequestToolChoice": ".agentscompletionstreamrequest", - "AgentsCompletionStreamRequestToolChoiceTypedDict": ".agentscompletionstreamrequest", - "AgentsCompletionStreamRequestTypedDict": ".agentscompletionstreamrequest", - "AgentUpdateRequest": ".agentupdaterequest", - "AgentUpdateRequestTools": ".agentupdaterequest", - "AgentUpdateRequestToolsTypedDict": ".agentupdaterequest", - "AgentUpdateRequestTypedDict": ".agentupdaterequest", - "APIEndpoint": ".apiendpoint", - "ArchiveFTModelOut": ".archiveftmodelout", - "ArchiveFTModelOutObject": ".archiveftmodelout", - "ArchiveFTModelOutTypedDict": ".archiveftmodelout", - "AssistantMessage": ".assistantmessage", - "AssistantMessageContent": ".assistantmessage", - "AssistantMessageContentTypedDict": ".assistantmessage", - "AssistantMessageRole": ".assistantmessage", - "AssistantMessageTypedDict": ".assistantmessage", - "AudioChunk": ".audiochunk", - "AudioChunkType": ".audiochunk", - "AudioChunkTypedDict": ".audiochunk", - "AudioEncoding": ".audioencoding", - "AudioFormat": ".audioformat", - "AudioFormatTypedDict": ".audioformat", - "AudioTranscriptionRequest": ".audiotranscriptionrequest", - "AudioTranscriptionRequestTypedDict": ".audiotranscriptionrequest", - "AudioTranscriptionRequestStream": ".audiotranscriptionrequeststream", - "AudioTranscriptionRequestStreamTypedDict": ".audiotranscriptionrequeststream", - "BaseModelCard": ".basemodelcard", - "BaseModelCardType": ".basemodelcard", - "BaseModelCardTypedDict": ".basemodelcard", - "BatchError": ".batcherror", - "BatchErrorTypedDict": ".batcherror", - "BatchJobIn": ".batchjobin", - "BatchJobInTypedDict": ".batchjobin", - "BatchJobOut": ".batchjobout", - "BatchJobOutObject": ".batchjobout", - "BatchJobOutTypedDict": ".batchjobout", - "BatchJobsOut": ".batchjobsout", - "BatchJobsOutObject": ".batchjobsout", - "BatchJobsOutTypedDict": ".batchjobsout", - "BatchJobStatus": ".batchjobstatus", - "BatchRequest": ".batchrequest", - "BatchRequestTypedDict": ".batchrequest", - "BuiltInConnectors": ".builtinconnectors", - "ChatClassificationRequest": ".chatclassificationrequest", - "ChatClassificationRequestTypedDict": ".chatclassificationrequest", - "ChatCompletionChoice": ".chatcompletionchoice", - "ChatCompletionChoiceTypedDict": ".chatcompletionchoice", - "FinishReason": ".chatcompletionchoice", - "ChatCompletionRequest": ".chatcompletionrequest", - "ChatCompletionRequestToolChoice": ".chatcompletionrequest", - "ChatCompletionRequestToolChoiceTypedDict": ".chatcompletionrequest", - "ChatCompletionRequestTypedDict": ".chatcompletionrequest", - "Messages": ".chatcompletionrequest", - "MessagesTypedDict": ".chatcompletionrequest", - "Stop": ".chatcompletionrequest", - "StopTypedDict": ".chatcompletionrequest", - "ChatCompletionResponse": ".chatcompletionresponse", - "ChatCompletionResponseTypedDict": ".chatcompletionresponse", - "ChatCompletionStreamRequest": ".chatcompletionstreamrequest", - "ChatCompletionStreamRequestMessages": ".chatcompletionstreamrequest", - "ChatCompletionStreamRequestMessagesTypedDict": ".chatcompletionstreamrequest", - "ChatCompletionStreamRequestStop": ".chatcompletionstreamrequest", - "ChatCompletionStreamRequestStopTypedDict": ".chatcompletionstreamrequest", - "ChatCompletionStreamRequestToolChoice": ".chatcompletionstreamrequest", - "ChatCompletionStreamRequestToolChoiceTypedDict": ".chatcompletionstreamrequest", - "ChatCompletionStreamRequestTypedDict": ".chatcompletionstreamrequest", - "ChatModerationRequest": ".chatmoderationrequest", - "ChatModerationRequestInputs": ".chatmoderationrequest", - "ChatModerationRequestInputsTypedDict": ".chatmoderationrequest", - "ChatModerationRequestTypedDict": ".chatmoderationrequest", - "One": ".chatmoderationrequest", - "OneTypedDict": ".chatmoderationrequest", - "Two": ".chatmoderationrequest", - "TwoTypedDict": ".chatmoderationrequest", - "CheckpointOut": ".checkpointout", - "CheckpointOutTypedDict": ".checkpointout", - "ClassificationRequest": ".classificationrequest", - "ClassificationRequestInputs": ".classificationrequest", - "ClassificationRequestInputsTypedDict": ".classificationrequest", - "ClassificationRequestTypedDict": ".classificationrequest", - "ClassificationResponse": ".classificationresponse", - "ClassificationResponseTypedDict": ".classificationresponse", - "ClassificationTargetResult": ".classificationtargetresult", - "ClassificationTargetResultTypedDict": ".classificationtargetresult", - "ClassifierDetailedJobOut": ".classifierdetailedjobout", - "ClassifierDetailedJobOutIntegrations": ".classifierdetailedjobout", - "ClassifierDetailedJobOutIntegrationsTypedDict": ".classifierdetailedjobout", - "ClassifierDetailedJobOutJobType": ".classifierdetailedjobout", - "ClassifierDetailedJobOutObject": ".classifierdetailedjobout", - "ClassifierDetailedJobOutStatus": ".classifierdetailedjobout", - "ClassifierDetailedJobOutTypedDict": ".classifierdetailedjobout", - "ClassifierFTModelOut": ".classifierftmodelout", - "ClassifierFTModelOutModelType": ".classifierftmodelout", - "ClassifierFTModelOutObject": ".classifierftmodelout", - "ClassifierFTModelOutTypedDict": ".classifierftmodelout", - "ClassifierJobOut": ".classifierjobout", - "ClassifierJobOutIntegrations": ".classifierjobout", - "ClassifierJobOutIntegrationsTypedDict": ".classifierjobout", - "ClassifierJobOutJobType": ".classifierjobout", - "ClassifierJobOutObject": ".classifierjobout", - "ClassifierJobOutStatus": ".classifierjobout", - "ClassifierJobOutTypedDict": ".classifierjobout", - "ClassifierTargetIn": ".classifiertargetin", - "ClassifierTargetInTypedDict": ".classifiertargetin", - "ClassifierTargetOut": ".classifiertargetout", - "ClassifierTargetOutTypedDict": ".classifiertargetout", - "ClassifierTrainingParameters": ".classifiertrainingparameters", - "ClassifierTrainingParametersTypedDict": ".classifiertrainingparameters", - "ClassifierTrainingParametersIn": ".classifiertrainingparametersin", - "ClassifierTrainingParametersInTypedDict": ".classifiertrainingparametersin", - "CodeInterpreterTool": ".codeinterpretertool", - "CodeInterpreterToolType": ".codeinterpretertool", - "CodeInterpreterToolTypedDict": ".codeinterpretertool", - "CompletionArgs": ".completionargs", - "CompletionArgsTypedDict": ".completionargs", - "CompletionArgsStop": ".completionargsstop", - "CompletionArgsStopTypedDict": ".completionargsstop", - "CompletionChunk": ".completionchunk", - "CompletionChunkTypedDict": ".completionchunk", - "CompletionDetailedJobOut": ".completiondetailedjobout", - "CompletionDetailedJobOutIntegrations": ".completiondetailedjobout", - "CompletionDetailedJobOutIntegrationsTypedDict": ".completiondetailedjobout", - "CompletionDetailedJobOutJobType": ".completiondetailedjobout", - "CompletionDetailedJobOutObject": ".completiondetailedjobout", - "CompletionDetailedJobOutRepositories": ".completiondetailedjobout", - "CompletionDetailedJobOutRepositoriesTypedDict": ".completiondetailedjobout", - "CompletionDetailedJobOutStatus": ".completiondetailedjobout", - "CompletionDetailedJobOutTypedDict": ".completiondetailedjobout", - "CompletionEvent": ".completionevent", - "CompletionEventTypedDict": ".completionevent", - "CompletionFTModelOut": ".completionftmodelout", - "CompletionFTModelOutObject": ".completionftmodelout", - "CompletionFTModelOutTypedDict": ".completionftmodelout", - "ModelType": ".completionftmodelout", - "CompletionJobOut": ".completionjobout", - "CompletionJobOutObject": ".completionjobout", - "CompletionJobOutTypedDict": ".completionjobout", - "Integrations": ".completionjobout", - "IntegrationsTypedDict": ".completionjobout", - "JobType": ".completionjobout", - "Repositories": ".completionjobout", - "RepositoriesTypedDict": ".completionjobout", - "Status": ".completionjobout", - "CompletionResponseStreamChoice": ".completionresponsestreamchoice", - "CompletionResponseStreamChoiceFinishReason": ".completionresponsestreamchoice", - "CompletionResponseStreamChoiceTypedDict": ".completionresponsestreamchoice", - "CompletionTrainingParameters": ".completiontrainingparameters", - "CompletionTrainingParametersTypedDict": ".completiontrainingparameters", - "CompletionTrainingParametersIn": ".completiontrainingparametersin", - "CompletionTrainingParametersInTypedDict": ".completiontrainingparametersin", - "ContentChunk": ".contentchunk", - "ContentChunkTypedDict": ".contentchunk", - "ConversationAppendRequest": ".conversationappendrequest", - "ConversationAppendRequestHandoffExecution": ".conversationappendrequest", - "ConversationAppendRequestTypedDict": ".conversationappendrequest", - "ConversationAppendStreamRequest": ".conversationappendstreamrequest", - "ConversationAppendStreamRequestHandoffExecution": ".conversationappendstreamrequest", - "ConversationAppendStreamRequestTypedDict": ".conversationappendstreamrequest", - "ConversationEvents": ".conversationevents", - "ConversationEventsData": ".conversationevents", - "ConversationEventsDataTypedDict": ".conversationevents", - "ConversationEventsTypedDict": ".conversationevents", - "ConversationHistory": ".conversationhistory", - "ConversationHistoryObject": ".conversationhistory", - "ConversationHistoryTypedDict": ".conversationhistory", - "Entries": ".conversationhistory", - "EntriesTypedDict": ".conversationhistory", - "ConversationInputs": ".conversationinputs", - "ConversationInputsTypedDict": ".conversationinputs", - "ConversationMessages": ".conversationmessages", - "ConversationMessagesObject": ".conversationmessages", - "ConversationMessagesTypedDict": ".conversationmessages", - "AgentVersion": ".conversationrequest", - "AgentVersionTypedDict": ".conversationrequest", - "ConversationRequest": ".conversationrequest", - "ConversationRequestTypedDict": ".conversationrequest", - "HandoffExecution": ".conversationrequest", - "Tools": ".conversationrequest", - "ToolsTypedDict": ".conversationrequest", - "ConversationResponse": ".conversationresponse", - "ConversationResponseObject": ".conversationresponse", - "ConversationResponseTypedDict": ".conversationresponse", - "Outputs": ".conversationresponse", - "OutputsTypedDict": ".conversationresponse", - "ConversationRestartRequest": ".conversationrestartrequest", - "ConversationRestartRequestAgentVersion": ".conversationrestartrequest", - "ConversationRestartRequestAgentVersionTypedDict": ".conversationrestartrequest", - "ConversationRestartRequestHandoffExecution": ".conversationrestartrequest", - "ConversationRestartRequestTypedDict": ".conversationrestartrequest", - "ConversationRestartStreamRequest": ".conversationrestartstreamrequest", - "ConversationRestartStreamRequestAgentVersion": ".conversationrestartstreamrequest", - "ConversationRestartStreamRequestAgentVersionTypedDict": ".conversationrestartstreamrequest", - "ConversationRestartStreamRequestHandoffExecution": ".conversationrestartstreamrequest", - "ConversationRestartStreamRequestTypedDict": ".conversationrestartstreamrequest", - "ConversationStreamRequest": ".conversationstreamrequest", - "ConversationStreamRequestAgentVersion": ".conversationstreamrequest", - "ConversationStreamRequestAgentVersionTypedDict": ".conversationstreamrequest", - "ConversationStreamRequestHandoffExecution": ".conversationstreamrequest", - "ConversationStreamRequestTools": ".conversationstreamrequest", - "ConversationStreamRequestToolsTypedDict": ".conversationstreamrequest", - "ConversationStreamRequestTypedDict": ".conversationstreamrequest", - "ConversationUsageInfo": ".conversationusageinfo", - "ConversationUsageInfoTypedDict": ".conversationusageinfo", - "DeleteModelV1ModelsModelIDDeleteRequest": ".delete_model_v1_models_model_id_deleteop", - "DeleteModelV1ModelsModelIDDeleteRequestTypedDict": ".delete_model_v1_models_model_id_deleteop", - "DeleteFileOut": ".deletefileout", - "DeleteFileOutTypedDict": ".deletefileout", - "DeleteModelOut": ".deletemodelout", - "DeleteModelOutTypedDict": ".deletemodelout", - "Content": ".deltamessage", - "ContentTypedDict": ".deltamessage", - "DeltaMessage": ".deltamessage", - "DeltaMessageTypedDict": ".deltamessage", - "DocumentLibraryTool": ".documentlibrarytool", - "DocumentLibraryToolType": ".documentlibrarytool", - "DocumentLibraryToolTypedDict": ".documentlibrarytool", - "DocumentOut": ".documentout", - "DocumentOutTypedDict": ".documentout", - "DocumentTextContent": ".documenttextcontent", - "DocumentTextContentTypedDict": ".documenttextcontent", - "Attributes": ".documentupdatein", - "AttributesTypedDict": ".documentupdatein", - "DocumentUpdateIn": ".documentupdatein", - "DocumentUpdateInTypedDict": ".documentupdatein", - "DocumentURLChunk": ".documenturlchunk", - "DocumentURLChunkType": ".documenturlchunk", - "DocumentURLChunkTypedDict": ".documenturlchunk", - "EmbeddingDtype": ".embeddingdtype", - "EmbeddingRequest": ".embeddingrequest", - "EmbeddingRequestInputs": ".embeddingrequest", - "EmbeddingRequestInputsTypedDict": ".embeddingrequest", - "EmbeddingRequestTypedDict": ".embeddingrequest", - "EmbeddingResponse": ".embeddingresponse", - "EmbeddingResponseTypedDict": ".embeddingresponse", - "EmbeddingResponseData": ".embeddingresponsedata", - "EmbeddingResponseDataTypedDict": ".embeddingresponsedata", - "EncodingFormat": ".encodingformat", - "EntityType": ".entitytype", - "EventOut": ".eventout", - "EventOutTypedDict": ".eventout", - "File": ".file", - "FileTypedDict": ".file", - "FileChunk": ".filechunk", - "FileChunkTypedDict": ".filechunk", - "FilePurpose": ".filepurpose", - "FilesAPIRoutesDeleteFileRequest": ".files_api_routes_delete_fileop", - "FilesAPIRoutesDeleteFileRequestTypedDict": ".files_api_routes_delete_fileop", - "FilesAPIRoutesDownloadFileRequest": ".files_api_routes_download_fileop", - "FilesAPIRoutesDownloadFileRequestTypedDict": ".files_api_routes_download_fileop", - "FilesAPIRoutesGetSignedURLRequest": ".files_api_routes_get_signed_urlop", - "FilesAPIRoutesGetSignedURLRequestTypedDict": ".files_api_routes_get_signed_urlop", - "FilesAPIRoutesListFilesRequest": ".files_api_routes_list_filesop", - "FilesAPIRoutesListFilesRequestTypedDict": ".files_api_routes_list_filesop", - "FilesAPIRoutesRetrieveFileRequest": ".files_api_routes_retrieve_fileop", - "FilesAPIRoutesRetrieveFileRequestTypedDict": ".files_api_routes_retrieve_fileop", - "FilesAPIRoutesUploadFileMultiPartBodyParams": ".files_api_routes_upload_fileop", - "FilesAPIRoutesUploadFileMultiPartBodyParamsTypedDict": ".files_api_routes_upload_fileop", - "FileSchema": ".fileschema", - "FileSchemaTypedDict": ".fileschema", - "FileSignedURL": ".filesignedurl", - "FileSignedURLTypedDict": ".filesignedurl", - "FIMCompletionRequest": ".fimcompletionrequest", - "FIMCompletionRequestStop": ".fimcompletionrequest", - "FIMCompletionRequestStopTypedDict": ".fimcompletionrequest", - "FIMCompletionRequestTypedDict": ".fimcompletionrequest", - "FIMCompletionResponse": ".fimcompletionresponse", - "FIMCompletionResponseTypedDict": ".fimcompletionresponse", - "FIMCompletionStreamRequest": ".fimcompletionstreamrequest", - "FIMCompletionStreamRequestStop": ".fimcompletionstreamrequest", - "FIMCompletionStreamRequestStopTypedDict": ".fimcompletionstreamrequest", - "FIMCompletionStreamRequestTypedDict": ".fimcompletionstreamrequest", - "FineTuneableModelType": ".finetuneablemodeltype", - "FTClassifierLossFunction": ".ftclassifierlossfunction", - "FTModelCapabilitiesOut": ".ftmodelcapabilitiesout", - "FTModelCapabilitiesOutTypedDict": ".ftmodelcapabilitiesout", - "FTModelCard": ".ftmodelcard", - "FTModelCardType": ".ftmodelcard", - "FTModelCardTypedDict": ".ftmodelcard", - "Function": ".function", - "FunctionTypedDict": ".function", - "Arguments": ".functioncall", - "ArgumentsTypedDict": ".functioncall", - "FunctionCall": ".functioncall", - "FunctionCallTypedDict": ".functioncall", - "FunctionCallEntry": ".functioncallentry", - "FunctionCallEntryObject": ".functioncallentry", - "FunctionCallEntryType": ".functioncallentry", - "FunctionCallEntryTypedDict": ".functioncallentry", - "FunctionCallEntryArguments": ".functioncallentryarguments", - "FunctionCallEntryArgumentsTypedDict": ".functioncallentryarguments", - "FunctionCallEvent": ".functioncallevent", - "FunctionCallEventType": ".functioncallevent", - "FunctionCallEventTypedDict": ".functioncallevent", - "FunctionName": ".functionname", - "FunctionNameTypedDict": ".functionname", - "FunctionResultEntry": ".functionresultentry", - "FunctionResultEntryObject": ".functionresultentry", - "FunctionResultEntryType": ".functionresultentry", - "FunctionResultEntryTypedDict": ".functionresultentry", - "FunctionTool": ".functiontool", - "FunctionToolType": ".functiontool", - "FunctionToolTypedDict": ".functiontool", - "GithubRepositoryIn": ".githubrepositoryin", - "GithubRepositoryInType": ".githubrepositoryin", - "GithubRepositoryInTypedDict": ".githubrepositoryin", - "GithubRepositoryOut": ".githubrepositoryout", - "GithubRepositoryOutType": ".githubrepositoryout", - "GithubRepositoryOutTypedDict": ".githubrepositoryout", - "HTTPValidationError": ".httpvalidationerror", - "HTTPValidationErrorData": ".httpvalidationerror", - "ImageGenerationTool": ".imagegenerationtool", - "ImageGenerationToolType": ".imagegenerationtool", - "ImageGenerationToolTypedDict": ".imagegenerationtool", - "ImageURL": ".imageurl", - "ImageURLTypedDict": ".imageurl", - "ImageURLChunk": ".imageurlchunk", - "ImageURLChunkImageURL": ".imageurlchunk", - "ImageURLChunkImageURLTypedDict": ".imageurlchunk", - "ImageURLChunkType": ".imageurlchunk", - "ImageURLChunkTypedDict": ".imageurlchunk", - "InputEntries": ".inputentries", - "InputEntriesTypedDict": ".inputentries", - "Inputs": ".inputs", - "InputsTypedDict": ".inputs", - "InstructRequestInputs": ".inputs", - "InstructRequestInputsMessages": ".inputs", - "InstructRequestInputsMessagesTypedDict": ".inputs", - "InstructRequestInputsTypedDict": ".inputs", - "InstructRequest": ".instructrequest", - "InstructRequestMessages": ".instructrequest", - "InstructRequestMessagesTypedDict": ".instructrequest", - "InstructRequestTypedDict": ".instructrequest", - "Hyperparameters": ".jobin", - "HyperparametersTypedDict": ".jobin", - "JobIn": ".jobin", - "JobInIntegrations": ".jobin", - "JobInIntegrationsTypedDict": ".jobin", - "JobInRepositories": ".jobin", - "JobInRepositoriesTypedDict": ".jobin", - "JobInTypedDict": ".jobin", - "JobMetadataOut": ".jobmetadataout", - "JobMetadataOutTypedDict": ".jobmetadataout", - "JobsAPIRoutesBatchCancelBatchJobRequest": ".jobs_api_routes_batch_cancel_batch_jobop", - "JobsAPIRoutesBatchCancelBatchJobRequestTypedDict": ".jobs_api_routes_batch_cancel_batch_jobop", - "JobsAPIRoutesBatchGetBatchJobRequest": ".jobs_api_routes_batch_get_batch_jobop", - "JobsAPIRoutesBatchGetBatchJobRequestTypedDict": ".jobs_api_routes_batch_get_batch_jobop", - "JobsAPIRoutesBatchGetBatchJobsRequest": ".jobs_api_routes_batch_get_batch_jobsop", - "JobsAPIRoutesBatchGetBatchJobsRequestTypedDict": ".jobs_api_routes_batch_get_batch_jobsop", - "JobsAPIRoutesFineTuningArchiveFineTunedModelRequest": ".jobs_api_routes_fine_tuning_archive_fine_tuned_modelop", - "JobsAPIRoutesFineTuningArchiveFineTunedModelRequestTypedDict": ".jobs_api_routes_fine_tuning_archive_fine_tuned_modelop", - "JobsAPIRoutesFineTuningCancelFineTuningJobRequest": ".jobs_api_routes_fine_tuning_cancel_fine_tuning_jobop", - "JobsAPIRoutesFineTuningCancelFineTuningJobRequestTypedDict": ".jobs_api_routes_fine_tuning_cancel_fine_tuning_jobop", - "JobsAPIRoutesFineTuningCancelFineTuningJobResponse": ".jobs_api_routes_fine_tuning_cancel_fine_tuning_jobop", - "JobsAPIRoutesFineTuningCancelFineTuningJobResponseTypedDict": ".jobs_api_routes_fine_tuning_cancel_fine_tuning_jobop", - "JobsAPIRoutesFineTuningCreateFineTuningJobResponse": ".jobs_api_routes_fine_tuning_create_fine_tuning_jobop", - "JobsAPIRoutesFineTuningCreateFineTuningJobResponseTypedDict": ".jobs_api_routes_fine_tuning_create_fine_tuning_jobop", - "Response1": ".jobs_api_routes_fine_tuning_create_fine_tuning_jobop", - "Response1TypedDict": ".jobs_api_routes_fine_tuning_create_fine_tuning_jobop", - "JobsAPIRoutesFineTuningGetFineTuningJobRequest": ".jobs_api_routes_fine_tuning_get_fine_tuning_jobop", - "JobsAPIRoutesFineTuningGetFineTuningJobRequestTypedDict": ".jobs_api_routes_fine_tuning_get_fine_tuning_jobop", - "JobsAPIRoutesFineTuningGetFineTuningJobResponse": ".jobs_api_routes_fine_tuning_get_fine_tuning_jobop", - "JobsAPIRoutesFineTuningGetFineTuningJobResponseTypedDict": ".jobs_api_routes_fine_tuning_get_fine_tuning_jobop", - "JobsAPIRoutesFineTuningGetFineTuningJobsRequest": ".jobs_api_routes_fine_tuning_get_fine_tuning_jobsop", - "JobsAPIRoutesFineTuningGetFineTuningJobsRequestTypedDict": ".jobs_api_routes_fine_tuning_get_fine_tuning_jobsop", - "QueryParamStatus": ".jobs_api_routes_fine_tuning_get_fine_tuning_jobsop", - "JobsAPIRoutesFineTuningStartFineTuningJobRequest": ".jobs_api_routes_fine_tuning_start_fine_tuning_jobop", - "JobsAPIRoutesFineTuningStartFineTuningJobRequestTypedDict": ".jobs_api_routes_fine_tuning_start_fine_tuning_jobop", - "JobsAPIRoutesFineTuningStartFineTuningJobResponse": ".jobs_api_routes_fine_tuning_start_fine_tuning_jobop", - "JobsAPIRoutesFineTuningStartFineTuningJobResponseTypedDict": ".jobs_api_routes_fine_tuning_start_fine_tuning_jobop", - "JobsAPIRoutesFineTuningUnarchiveFineTunedModelRequest": ".jobs_api_routes_fine_tuning_unarchive_fine_tuned_modelop", - "JobsAPIRoutesFineTuningUnarchiveFineTunedModelRequestTypedDict": ".jobs_api_routes_fine_tuning_unarchive_fine_tuned_modelop", - "JobsAPIRoutesFineTuningUpdateFineTunedModelRequest": ".jobs_api_routes_fine_tuning_update_fine_tuned_modelop", - "JobsAPIRoutesFineTuningUpdateFineTunedModelRequestTypedDict": ".jobs_api_routes_fine_tuning_update_fine_tuned_modelop", - "JobsAPIRoutesFineTuningUpdateFineTunedModelResponse": ".jobs_api_routes_fine_tuning_update_fine_tuned_modelop", - "JobsAPIRoutesFineTuningUpdateFineTunedModelResponseTypedDict": ".jobs_api_routes_fine_tuning_update_fine_tuned_modelop", - "JobsOut": ".jobsout", - "JobsOutData": ".jobsout", - "JobsOutDataTypedDict": ".jobsout", - "JobsOutObject": ".jobsout", - "JobsOutTypedDict": ".jobsout", - "JSONSchema": ".jsonschema", - "JSONSchemaTypedDict": ".jsonschema", - "LegacyJobMetadataOut": ".legacyjobmetadataout", - "LegacyJobMetadataOutObject": ".legacyjobmetadataout", - "LegacyJobMetadataOutTypedDict": ".legacyjobmetadataout", - "LibrariesDeleteV1Request": ".libraries_delete_v1op", - "LibrariesDeleteV1RequestTypedDict": ".libraries_delete_v1op", - "LibrariesDocumentsDeleteV1Request": ".libraries_documents_delete_v1op", - "LibrariesDocumentsDeleteV1RequestTypedDict": ".libraries_documents_delete_v1op", - "LibrariesDocumentsGetExtractedTextSignedURLV1Request": ".libraries_documents_get_extracted_text_signed_url_v1op", - "LibrariesDocumentsGetExtractedTextSignedURLV1RequestTypedDict": ".libraries_documents_get_extracted_text_signed_url_v1op", - "LibrariesDocumentsGetSignedURLV1Request": ".libraries_documents_get_signed_url_v1op", - "LibrariesDocumentsGetSignedURLV1RequestTypedDict": ".libraries_documents_get_signed_url_v1op", - "LibrariesDocumentsGetStatusV1Request": ".libraries_documents_get_status_v1op", - "LibrariesDocumentsGetStatusV1RequestTypedDict": ".libraries_documents_get_status_v1op", - "LibrariesDocumentsGetTextContentV1Request": ".libraries_documents_get_text_content_v1op", - "LibrariesDocumentsGetTextContentV1RequestTypedDict": ".libraries_documents_get_text_content_v1op", - "LibrariesDocumentsGetV1Request": ".libraries_documents_get_v1op", - "LibrariesDocumentsGetV1RequestTypedDict": ".libraries_documents_get_v1op", - "LibrariesDocumentsListV1Request": ".libraries_documents_list_v1op", - "LibrariesDocumentsListV1RequestTypedDict": ".libraries_documents_list_v1op", - "LibrariesDocumentsReprocessV1Request": ".libraries_documents_reprocess_v1op", - "LibrariesDocumentsReprocessV1RequestTypedDict": ".libraries_documents_reprocess_v1op", - "LibrariesDocumentsUpdateV1Request": ".libraries_documents_update_v1op", - "LibrariesDocumentsUpdateV1RequestTypedDict": ".libraries_documents_update_v1op", - "LibrariesDocumentsUploadV1DocumentUpload": ".libraries_documents_upload_v1op", - "LibrariesDocumentsUploadV1DocumentUploadTypedDict": ".libraries_documents_upload_v1op", - "LibrariesDocumentsUploadV1Request": ".libraries_documents_upload_v1op", - "LibrariesDocumentsUploadV1RequestTypedDict": ".libraries_documents_upload_v1op", - "LibrariesGetV1Request": ".libraries_get_v1op", - "LibrariesGetV1RequestTypedDict": ".libraries_get_v1op", - "LibrariesShareCreateV1Request": ".libraries_share_create_v1op", - "LibrariesShareCreateV1RequestTypedDict": ".libraries_share_create_v1op", - "LibrariesShareDeleteV1Request": ".libraries_share_delete_v1op", - "LibrariesShareDeleteV1RequestTypedDict": ".libraries_share_delete_v1op", - "LibrariesShareListV1Request": ".libraries_share_list_v1op", - "LibrariesShareListV1RequestTypedDict": ".libraries_share_list_v1op", - "LibrariesUpdateV1Request": ".libraries_update_v1op", - "LibrariesUpdateV1RequestTypedDict": ".libraries_update_v1op", - "LibraryIn": ".libraryin", - "LibraryInTypedDict": ".libraryin", - "LibraryInUpdate": ".libraryinupdate", - "LibraryInUpdateTypedDict": ".libraryinupdate", - "LibraryOut": ".libraryout", - "LibraryOutTypedDict": ".libraryout", - "ListDocumentOut": ".listdocumentout", - "ListDocumentOutTypedDict": ".listdocumentout", - "ListFilesOut": ".listfilesout", - "ListFilesOutTypedDict": ".listfilesout", - "ListLibraryOut": ".listlibraryout", - "ListLibraryOutTypedDict": ".listlibraryout", - "ListSharingOut": ".listsharingout", - "ListSharingOutTypedDict": ".listsharingout", - "MessageEntries": ".messageentries", - "MessageEntriesTypedDict": ".messageentries", - "MessageInputContentChunks": ".messageinputcontentchunks", - "MessageInputContentChunksTypedDict": ".messageinputcontentchunks", - "MessageInputEntry": ".messageinputentry", - "MessageInputEntryContent": ".messageinputentry", - "MessageInputEntryContentTypedDict": ".messageinputentry", - "MessageInputEntryRole": ".messageinputentry", - "MessageInputEntryType": ".messageinputentry", - "MessageInputEntryTypedDict": ".messageinputentry", - "Object": ".messageinputentry", - "MessageOutputContentChunks": ".messageoutputcontentchunks", - "MessageOutputContentChunksTypedDict": ".messageoutputcontentchunks", - "MessageOutputEntry": ".messageoutputentry", - "MessageOutputEntryContent": ".messageoutputentry", - "MessageOutputEntryContentTypedDict": ".messageoutputentry", - "MessageOutputEntryObject": ".messageoutputentry", - "MessageOutputEntryRole": ".messageoutputentry", - "MessageOutputEntryType": ".messageoutputentry", - "MessageOutputEntryTypedDict": ".messageoutputentry", - "MessageOutputEvent": ".messageoutputevent", - "MessageOutputEventContent": ".messageoutputevent", - "MessageOutputEventContentTypedDict": ".messageoutputevent", - "MessageOutputEventRole": ".messageoutputevent", - "MessageOutputEventType": ".messageoutputevent", - "MessageOutputEventTypedDict": ".messageoutputevent", - "MetricOut": ".metricout", - "MetricOutTypedDict": ".metricout", - "MistralPromptMode": ".mistralpromptmode", - "ModelCapabilities": ".modelcapabilities", - "ModelCapabilitiesTypedDict": ".modelcapabilities", - "ModelConversation": ".modelconversation", - "ModelConversationObject": ".modelconversation", - "ModelConversationTools": ".modelconversation", - "ModelConversationToolsTypedDict": ".modelconversation", - "ModelConversationTypedDict": ".modelconversation", - "Data": ".modellist", - "DataTypedDict": ".modellist", - "ModelList": ".modellist", - "ModelListTypedDict": ".modellist", - "ModerationObject": ".moderationobject", - "ModerationObjectTypedDict": ".moderationobject", - "ModerationResponse": ".moderationresponse", - "ModerationResponseTypedDict": ".moderationresponse", - "NoResponseError": ".no_response_error", - "OCRImageObject": ".ocrimageobject", - "OCRImageObjectTypedDict": ".ocrimageobject", - "OCRPageDimensions": ".ocrpagedimensions", - "OCRPageDimensionsTypedDict": ".ocrpagedimensions", - "OCRPageObject": ".ocrpageobject", - "OCRPageObjectTypedDict": ".ocrpageobject", - "Document": ".ocrrequest", - "DocumentTypedDict": ".ocrrequest", - "OCRRequest": ".ocrrequest", - "OCRRequestTypedDict": ".ocrrequest", - "TableFormat": ".ocrrequest", - "OCRResponse": ".ocrresponse", - "OCRResponseTypedDict": ".ocrresponse", - "Format": ".ocrtableobject", - "OCRTableObject": ".ocrtableobject", - "OCRTableObjectTypedDict": ".ocrtableobject", - "OCRUsageInfo": ".ocrusageinfo", - "OCRUsageInfoTypedDict": ".ocrusageinfo", - "OutputContentChunks": ".outputcontentchunks", - "OutputContentChunksTypedDict": ".outputcontentchunks", - "PaginationInfo": ".paginationinfo", - "PaginationInfoTypedDict": ".paginationinfo", - "Prediction": ".prediction", - "PredictionTypedDict": ".prediction", - "ProcessingStatusOut": ".processingstatusout", - "ProcessingStatusOutTypedDict": ".processingstatusout", - "RealtimeTranscriptionError": ".realtimetranscriptionerror", - "RealtimeTranscriptionErrorTypedDict": ".realtimetranscriptionerror", - "Message": ".realtimetranscriptionerrordetail", - "MessageTypedDict": ".realtimetranscriptionerrordetail", - "RealtimeTranscriptionErrorDetail": ".realtimetranscriptionerrordetail", - "RealtimeTranscriptionErrorDetailTypedDict": ".realtimetranscriptionerrordetail", - "RealtimeTranscriptionSession": ".realtimetranscriptionsession", - "RealtimeTranscriptionSessionTypedDict": ".realtimetranscriptionsession", - "RealtimeTranscriptionSessionCreated": ".realtimetranscriptionsessioncreated", - "RealtimeTranscriptionSessionCreatedTypedDict": ".realtimetranscriptionsessioncreated", - "RealtimeTranscriptionSessionUpdated": ".realtimetranscriptionsessionupdated", - "RealtimeTranscriptionSessionUpdatedTypedDict": ".realtimetranscriptionsessionupdated", - "ReferenceChunk": ".referencechunk", - "ReferenceChunkType": ".referencechunk", - "ReferenceChunkTypedDict": ".referencechunk", - "RequestSource": ".requestsource", - "ResponseDoneEvent": ".responsedoneevent", - "ResponseDoneEventType": ".responsedoneevent", - "ResponseDoneEventTypedDict": ".responsedoneevent", - "ResponseErrorEvent": ".responseerrorevent", - "ResponseErrorEventType": ".responseerrorevent", - "ResponseErrorEventTypedDict": ".responseerrorevent", - "ResponseFormat": ".responseformat", - "ResponseFormatTypedDict": ".responseformat", - "ResponseFormats": ".responseformats", - "ResponseStartedEvent": ".responsestartedevent", - "ResponseStartedEventType": ".responsestartedevent", - "ResponseStartedEventTypedDict": ".responsestartedevent", - "ResponseValidationError": ".responsevalidationerror", - "RetrieveModelV1ModelsModelIDGetRequest": ".retrieve_model_v1_models_model_id_getop", - "RetrieveModelV1ModelsModelIDGetRequestTypedDict": ".retrieve_model_v1_models_model_id_getop", - "RetrieveModelV1ModelsModelIDGetResponseRetrieveModelV1ModelsModelIDGet": ".retrieve_model_v1_models_model_id_getop", - "RetrieveModelV1ModelsModelIDGetResponseRetrieveModelV1ModelsModelIDGetTypedDict": ".retrieve_model_v1_models_model_id_getop", - "RetrieveFileOut": ".retrievefileout", - "RetrieveFileOutTypedDict": ".retrievefileout", - "SampleType": ".sampletype", - "SDKError": ".sdkerror", - "Security": ".security", - "SecurityTypedDict": ".security", - "ShareEnum": ".shareenum", - "SharingDelete": ".sharingdelete", - "SharingDeleteTypedDict": ".sharingdelete", - "SharingIn": ".sharingin", - "SharingInTypedDict": ".sharingin", - "SharingOut": ".sharingout", - "SharingOutTypedDict": ".sharingout", - "Source": ".source", - "SSETypes": ".ssetypes", - "Role": ".systemmessage", - "SystemMessage": ".systemmessage", - "SystemMessageContent": ".systemmessage", - "SystemMessageContentTypedDict": ".systemmessage", - "SystemMessageTypedDict": ".systemmessage", - "SystemMessageContentChunks": ".systemmessagecontentchunks", - "SystemMessageContentChunksTypedDict": ".systemmessagecontentchunks", - "TextChunk": ".textchunk", - "TextChunkType": ".textchunk", - "TextChunkTypedDict": ".textchunk", - "ThinkChunk": ".thinkchunk", - "ThinkChunkType": ".thinkchunk", - "ThinkChunkTypedDict": ".thinkchunk", - "Thinking": ".thinkchunk", - "ThinkingTypedDict": ".thinkchunk", - "TimestampGranularity": ".timestampgranularity", - "Tool": ".tool", - "ToolTypedDict": ".tool", - "ToolCall": ".toolcall", - "ToolCallTypedDict": ".toolcall", - "ToolChoice": ".toolchoice", - "ToolChoiceTypedDict": ".toolchoice", - "ToolChoiceEnum": ".toolchoiceenum", - "ToolExecutionDeltaEvent": ".toolexecutiondeltaevent", - "ToolExecutionDeltaEventName": ".toolexecutiondeltaevent", - "ToolExecutionDeltaEventNameTypedDict": ".toolexecutiondeltaevent", - "ToolExecutionDeltaEventType": ".toolexecutiondeltaevent", - "ToolExecutionDeltaEventTypedDict": ".toolexecutiondeltaevent", - "ToolExecutionDoneEvent": ".toolexecutiondoneevent", - "ToolExecutionDoneEventName": ".toolexecutiondoneevent", - "ToolExecutionDoneEventNameTypedDict": ".toolexecutiondoneevent", - "ToolExecutionDoneEventType": ".toolexecutiondoneevent", - "ToolExecutionDoneEventTypedDict": ".toolexecutiondoneevent", - "Name": ".toolexecutionentry", - "NameTypedDict": ".toolexecutionentry", - "ToolExecutionEntry": ".toolexecutionentry", - "ToolExecutionEntryObject": ".toolexecutionentry", - "ToolExecutionEntryType": ".toolexecutionentry", - "ToolExecutionEntryTypedDict": ".toolexecutionentry", - "ToolExecutionStartedEvent": ".toolexecutionstartedevent", - "ToolExecutionStartedEventName": ".toolexecutionstartedevent", - "ToolExecutionStartedEventNameTypedDict": ".toolexecutionstartedevent", - "ToolExecutionStartedEventType": ".toolexecutionstartedevent", - "ToolExecutionStartedEventTypedDict": ".toolexecutionstartedevent", - "ToolFileChunk": ".toolfilechunk", - "ToolFileChunkTool": ".toolfilechunk", - "ToolFileChunkToolTypedDict": ".toolfilechunk", - "ToolFileChunkType": ".toolfilechunk", - "ToolFileChunkTypedDict": ".toolfilechunk", - "ToolMessage": ".toolmessage", - "ToolMessageContent": ".toolmessage", - "ToolMessageContentTypedDict": ".toolmessage", - "ToolMessageRole": ".toolmessage", - "ToolMessageTypedDict": ".toolmessage", - "ToolReferenceChunk": ".toolreferencechunk", - "ToolReferenceChunkTool": ".toolreferencechunk", - "ToolReferenceChunkToolTypedDict": ".toolreferencechunk", - "ToolReferenceChunkType": ".toolreferencechunk", - "ToolReferenceChunkTypedDict": ".toolreferencechunk", - "ToolTypes": ".tooltypes", - "TrainingFile": ".trainingfile", - "TrainingFileTypedDict": ".trainingfile", - "TranscriptionResponse": ".transcriptionresponse", - "TranscriptionResponseTypedDict": ".transcriptionresponse", - "TranscriptionSegmentChunk": ".transcriptionsegmentchunk", - "TranscriptionSegmentChunkTypedDict": ".transcriptionsegmentchunk", - "Type": ".transcriptionsegmentchunk", - "TranscriptionStreamDone": ".transcriptionstreamdone", - "TranscriptionStreamDoneType": ".transcriptionstreamdone", - "TranscriptionStreamDoneTypedDict": ".transcriptionstreamdone", - "TranscriptionStreamEvents": ".transcriptionstreamevents", - "TranscriptionStreamEventsData": ".transcriptionstreamevents", - "TranscriptionStreamEventsDataTypedDict": ".transcriptionstreamevents", - "TranscriptionStreamEventsTypedDict": ".transcriptionstreamevents", - "TranscriptionStreamEventTypes": ".transcriptionstreameventtypes", - "TranscriptionStreamLanguage": ".transcriptionstreamlanguage", - "TranscriptionStreamLanguageType": ".transcriptionstreamlanguage", - "TranscriptionStreamLanguageTypedDict": ".transcriptionstreamlanguage", - "TranscriptionStreamSegmentDelta": ".transcriptionstreamsegmentdelta", - "TranscriptionStreamSegmentDeltaType": ".transcriptionstreamsegmentdelta", - "TranscriptionStreamSegmentDeltaTypedDict": ".transcriptionstreamsegmentdelta", - "TranscriptionStreamTextDelta": ".transcriptionstreamtextdelta", - "TranscriptionStreamTextDeltaType": ".transcriptionstreamtextdelta", - "TranscriptionStreamTextDeltaTypedDict": ".transcriptionstreamtextdelta", - "UnarchiveFTModelOut": ".unarchiveftmodelout", - "UnarchiveFTModelOutObject": ".unarchiveftmodelout", - "UnarchiveFTModelOutTypedDict": ".unarchiveftmodelout", - "UpdateFTModelIn": ".updateftmodelin", - "UpdateFTModelInTypedDict": ".updateftmodelin", - "UploadFileOut": ".uploadfileout", - "UploadFileOutTypedDict": ".uploadfileout", - "UsageInfo": ".usageinfo", - "UsageInfoTypedDict": ".usageinfo", - "UserMessage": ".usermessage", - "UserMessageContent": ".usermessage", - "UserMessageContentTypedDict": ".usermessage", - "UserMessageRole": ".usermessage", - "UserMessageTypedDict": ".usermessage", - "Loc": ".validationerror", - "LocTypedDict": ".validationerror", - "ValidationError": ".validationerror", - "ValidationErrorTypedDict": ".validationerror", - "WandbIntegration": ".wandbintegration", - "WandbIntegrationType": ".wandbintegration", - "WandbIntegrationTypedDict": ".wandbintegration", - "WandbIntegrationOut": ".wandbintegrationout", - "WandbIntegrationOutType": ".wandbintegrationout", - "WandbIntegrationOutTypedDict": ".wandbintegrationout", - "WebSearchPremiumTool": ".websearchpremiumtool", - "WebSearchPremiumToolType": ".websearchpremiumtool", - "WebSearchPremiumToolTypedDict": ".websearchpremiumtool", - "WebSearchTool": ".websearchtool", - "WebSearchToolType": ".websearchtool", - "WebSearchToolTypedDict": ".websearchtool", -} - - -def dynamic_import(modname, retries=3): - for attempt in range(retries): - try: - return import_module(modname, __package__) - except KeyError: - # Clear any half-initialized module and retry - sys.modules.pop(modname, None) - if attempt == retries - 1: - break - raise KeyError(f"Failed to import module '{modname}' after {retries} attempts") - - -def __getattr__(attr_name: str) -> object: - module_name = _dynamic_imports.get(attr_name) - if module_name is None: - raise AttributeError( - f"No {attr_name} found in _dynamic_imports for module name -> {__name__} " - ) - - try: - module = dynamic_import(module_name) - result = getattr(module, attr_name) - return result - except ImportError as e: - raise ImportError( - f"Failed to import {attr_name} from {module_name}: {e}" - ) from e - except AttributeError as e: - raise AttributeError( - f"Failed to get {attr_name} from {module_name}: {e}" - ) from e - - -def __dir__(): - lazy_attrs = builtins.list(_dynamic_imports.keys()) - return builtins.sorted(lazy_attrs) diff --git a/src/mistralai/models/agent.py b/src/mistralai/models/agent.py deleted file mode 100644 index eb30905b..00000000 --- a/src/mistralai/models/agent.py +++ /dev/null @@ -1,142 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .codeinterpretertool import CodeInterpreterTool, CodeInterpreterToolTypedDict -from .completionargs import CompletionArgs, CompletionArgsTypedDict -from .documentlibrarytool import DocumentLibraryTool, DocumentLibraryToolTypedDict -from .functiontool import FunctionTool, FunctionToolTypedDict -from .imagegenerationtool import ImageGenerationTool, ImageGenerationToolTypedDict -from .websearchpremiumtool import WebSearchPremiumTool, WebSearchPremiumToolTypedDict -from .websearchtool import WebSearchTool, WebSearchToolTypedDict -from datetime import datetime -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from mistralai.utils import get_discriminator -from pydantic import Discriminator, Tag, model_serializer -from typing import Any, Dict, List, Literal, Optional, Union -from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict - - -AgentToolsTypedDict = TypeAliasType( - "AgentToolsTypedDict", - Union[ - WebSearchToolTypedDict, - WebSearchPremiumToolTypedDict, - CodeInterpreterToolTypedDict, - ImageGenerationToolTypedDict, - FunctionToolTypedDict, - DocumentLibraryToolTypedDict, - ], -) - - -AgentTools = Annotated[ - Union[ - Annotated[CodeInterpreterTool, Tag("code_interpreter")], - Annotated[DocumentLibraryTool, Tag("document_library")], - Annotated[FunctionTool, Tag("function")], - Annotated[ImageGenerationTool, Tag("image_generation")], - Annotated[WebSearchTool, Tag("web_search")], - Annotated[WebSearchPremiumTool, Tag("web_search_premium")], - ], - Discriminator(lambda m: get_discriminator(m, "type", "type")), -] - - -AgentObject = Literal["agent",] - - -class AgentTypedDict(TypedDict): - model: str - name: str - id: str - version: int - versions: List[int] - created_at: datetime - updated_at: datetime - deployment_chat: bool - source: str - instructions: NotRequired[Nullable[str]] - r"""Instruction prompt the model will follow during the conversation.""" - tools: NotRequired[List[AgentToolsTypedDict]] - r"""List of tools which are available to the model during the conversation.""" - completion_args: NotRequired[CompletionArgsTypedDict] - r"""White-listed arguments from the completion API""" - description: NotRequired[Nullable[str]] - handoffs: NotRequired[Nullable[List[str]]] - metadata: NotRequired[Nullable[Dict[str, Any]]] - object: NotRequired[AgentObject] - - -class Agent(BaseModel): - model: str - - name: str - - id: str - - version: int - - versions: List[int] - - created_at: datetime - - updated_at: datetime - - deployment_chat: bool - - source: str - - instructions: OptionalNullable[str] = UNSET - r"""Instruction prompt the model will follow during the conversation.""" - - tools: Optional[List[AgentTools]] = None - r"""List of tools which are available to the model during the conversation.""" - - completion_args: Optional[CompletionArgs] = None - r"""White-listed arguments from the completion API""" - - description: OptionalNullable[str] = UNSET - - handoffs: OptionalNullable[List[str]] = UNSET - - metadata: OptionalNullable[Dict[str, Any]] = UNSET - - object: Optional[AgentObject] = "agent" - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = [ - "instructions", - "tools", - "completion_args", - "description", - "handoffs", - "metadata", - "object", - ] - nullable_fields = ["instructions", "description", "handoffs", "metadata"] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/agentaliasresponse.py b/src/mistralai/models/agentaliasresponse.py deleted file mode 100644 index c0928da9..00000000 --- a/src/mistralai/models/agentaliasresponse.py +++ /dev/null @@ -1,23 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from datetime import datetime -from mistralai.types import BaseModel -from typing_extensions import TypedDict - - -class AgentAliasResponseTypedDict(TypedDict): - alias: str - version: int - created_at: datetime - updated_at: datetime - - -class AgentAliasResponse(BaseModel): - alias: str - - version: int - - created_at: datetime - - updated_at: datetime diff --git a/src/mistralai/models/agentconversation.py b/src/mistralai/models/agentconversation.py deleted file mode 100644 index 6007b571..00000000 --- a/src/mistralai/models/agentconversation.py +++ /dev/null @@ -1,89 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from datetime import datetime -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from pydantic import model_serializer -from typing import Any, Dict, Literal, Optional, Union -from typing_extensions import NotRequired, TypeAliasType, TypedDict - - -AgentConversationObject = Literal["conversation",] - - -AgentConversationAgentVersionTypedDict = TypeAliasType( - "AgentConversationAgentVersionTypedDict", Union[str, int] -) - - -AgentConversationAgentVersion = TypeAliasType( - "AgentConversationAgentVersion", Union[str, int] -) - - -class AgentConversationTypedDict(TypedDict): - id: str - created_at: datetime - updated_at: datetime - agent_id: str - name: NotRequired[Nullable[str]] - r"""Name given to the conversation.""" - description: NotRequired[Nullable[str]] - r"""Description of the what the conversation is about.""" - metadata: NotRequired[Nullable[Dict[str, Any]]] - r"""Custom metadata for the conversation.""" - object: NotRequired[AgentConversationObject] - agent_version: NotRequired[Nullable[AgentConversationAgentVersionTypedDict]] - - -class AgentConversation(BaseModel): - id: str - - created_at: datetime - - updated_at: datetime - - agent_id: str - - name: OptionalNullable[str] = UNSET - r"""Name given to the conversation.""" - - description: OptionalNullable[str] = UNSET - r"""Description of the what the conversation is about.""" - - metadata: OptionalNullable[Dict[str, Any]] = UNSET - r"""Custom metadata for the conversation.""" - - object: Optional[AgentConversationObject] = "conversation" - - agent_version: OptionalNullable[AgentConversationAgentVersion] = UNSET - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = ["name", "description", "metadata", "object", "agent_version"] - nullable_fields = ["name", "description", "metadata", "agent_version"] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/agentcreationrequest.py b/src/mistralai/models/agentcreationrequest.py deleted file mode 100644 index 6a14201e..00000000 --- a/src/mistralai/models/agentcreationrequest.py +++ /dev/null @@ -1,113 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .codeinterpretertool import CodeInterpreterTool, CodeInterpreterToolTypedDict -from .completionargs import CompletionArgs, CompletionArgsTypedDict -from .documentlibrarytool import DocumentLibraryTool, DocumentLibraryToolTypedDict -from .functiontool import FunctionTool, FunctionToolTypedDict -from .imagegenerationtool import ImageGenerationTool, ImageGenerationToolTypedDict -from .websearchpremiumtool import WebSearchPremiumTool, WebSearchPremiumToolTypedDict -from .websearchtool import WebSearchTool, WebSearchToolTypedDict -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from mistralai.utils import get_discriminator -from pydantic import Discriminator, Tag, model_serializer -from typing import Any, Dict, List, Optional, Union -from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict - - -AgentCreationRequestToolsTypedDict = TypeAliasType( - "AgentCreationRequestToolsTypedDict", - Union[ - WebSearchToolTypedDict, - WebSearchPremiumToolTypedDict, - CodeInterpreterToolTypedDict, - ImageGenerationToolTypedDict, - FunctionToolTypedDict, - DocumentLibraryToolTypedDict, - ], -) - - -AgentCreationRequestTools = Annotated[ - Union[ - Annotated[CodeInterpreterTool, Tag("code_interpreter")], - Annotated[DocumentLibraryTool, Tag("document_library")], - Annotated[FunctionTool, Tag("function")], - Annotated[ImageGenerationTool, Tag("image_generation")], - Annotated[WebSearchTool, Tag("web_search")], - Annotated[WebSearchPremiumTool, Tag("web_search_premium")], - ], - Discriminator(lambda m: get_discriminator(m, "type", "type")), -] - - -class AgentCreationRequestTypedDict(TypedDict): - model: str - name: str - instructions: NotRequired[Nullable[str]] - r"""Instruction prompt the model will follow during the conversation.""" - tools: NotRequired[List[AgentCreationRequestToolsTypedDict]] - r"""List of tools which are available to the model during the conversation.""" - completion_args: NotRequired[CompletionArgsTypedDict] - r"""White-listed arguments from the completion API""" - description: NotRequired[Nullable[str]] - handoffs: NotRequired[Nullable[List[str]]] - metadata: NotRequired[Nullable[Dict[str, Any]]] - - -class AgentCreationRequest(BaseModel): - model: str - - name: str - - instructions: OptionalNullable[str] = UNSET - r"""Instruction prompt the model will follow during the conversation.""" - - tools: Optional[List[AgentCreationRequestTools]] = None - r"""List of tools which are available to the model during the conversation.""" - - completion_args: Optional[CompletionArgs] = None - r"""White-listed arguments from the completion API""" - - description: OptionalNullable[str] = UNSET - - handoffs: OptionalNullable[List[str]] = UNSET - - metadata: OptionalNullable[Dict[str, Any]] = UNSET - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = [ - "instructions", - "tools", - "completion_args", - "description", - "handoffs", - "metadata", - ] - nullable_fields = ["instructions", "description", "handoffs", "metadata"] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/agenthandoffdoneevent.py b/src/mistralai/models/agenthandoffdoneevent.py deleted file mode 100644 index 1cdbf456..00000000 --- a/src/mistralai/models/agenthandoffdoneevent.py +++ /dev/null @@ -1,33 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from datetime import datetime -from mistralai.types import BaseModel -from typing import Literal, Optional -from typing_extensions import NotRequired, TypedDict - - -AgentHandoffDoneEventType = Literal["agent.handoff.done",] - - -class AgentHandoffDoneEventTypedDict(TypedDict): - id: str - next_agent_id: str - next_agent_name: str - type: NotRequired[AgentHandoffDoneEventType] - created_at: NotRequired[datetime] - output_index: NotRequired[int] - - -class AgentHandoffDoneEvent(BaseModel): - id: str - - next_agent_id: str - - next_agent_name: str - - type: Optional[AgentHandoffDoneEventType] = "agent.handoff.done" - - created_at: Optional[datetime] = None - - output_index: Optional[int] = 0 diff --git a/src/mistralai/models/agenthandoffentry.py b/src/mistralai/models/agenthandoffentry.py deleted file mode 100644 index 66136256..00000000 --- a/src/mistralai/models/agenthandoffentry.py +++ /dev/null @@ -1,76 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from datetime import datetime -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from pydantic import model_serializer -from typing import Literal, Optional -from typing_extensions import NotRequired, TypedDict - - -AgentHandoffEntryObject = Literal["entry",] - - -AgentHandoffEntryType = Literal["agent.handoff",] - - -class AgentHandoffEntryTypedDict(TypedDict): - previous_agent_id: str - previous_agent_name: str - next_agent_id: str - next_agent_name: str - object: NotRequired[AgentHandoffEntryObject] - type: NotRequired[AgentHandoffEntryType] - created_at: NotRequired[datetime] - completed_at: NotRequired[Nullable[datetime]] - id: NotRequired[str] - - -class AgentHandoffEntry(BaseModel): - previous_agent_id: str - - previous_agent_name: str - - next_agent_id: str - - next_agent_name: str - - object: Optional[AgentHandoffEntryObject] = "entry" - - type: Optional[AgentHandoffEntryType] = "agent.handoff" - - created_at: Optional[datetime] = None - - completed_at: OptionalNullable[datetime] = UNSET - - id: Optional[str] = None - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = ["object", "type", "created_at", "completed_at", "id"] - nullable_fields = ["completed_at"] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/agenthandoffstartedevent.py b/src/mistralai/models/agenthandoffstartedevent.py deleted file mode 100644 index 11bfa918..00000000 --- a/src/mistralai/models/agenthandoffstartedevent.py +++ /dev/null @@ -1,33 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from datetime import datetime -from mistralai.types import BaseModel -from typing import Literal, Optional -from typing_extensions import NotRequired, TypedDict - - -AgentHandoffStartedEventType = Literal["agent.handoff.started",] - - -class AgentHandoffStartedEventTypedDict(TypedDict): - id: str - previous_agent_id: str - previous_agent_name: str - type: NotRequired[AgentHandoffStartedEventType] - created_at: NotRequired[datetime] - output_index: NotRequired[int] - - -class AgentHandoffStartedEvent(BaseModel): - id: str - - previous_agent_id: str - - previous_agent_name: str - - type: Optional[AgentHandoffStartedEventType] = "agent.handoff.started" - - created_at: Optional[datetime] = None - - output_index: Optional[int] = 0 diff --git a/src/mistralai/models/agents_api_v1_agents_create_or_update_aliasop.py b/src/mistralai/models/agents_api_v1_agents_create_or_update_aliasop.py deleted file mode 100644 index 6cf9d0e0..00000000 --- a/src/mistralai/models/agents_api_v1_agents_create_or_update_aliasop.py +++ /dev/null @@ -1,26 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel -from mistralai.utils import FieldMetadata, PathParamMetadata, QueryParamMetadata -from typing_extensions import Annotated, TypedDict - - -class AgentsAPIV1AgentsCreateOrUpdateAliasRequestTypedDict(TypedDict): - agent_id: str - alias: str - version: int - - -class AgentsAPIV1AgentsCreateOrUpdateAliasRequest(BaseModel): - agent_id: Annotated[ - str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) - ] - - alias: Annotated[ - str, FieldMetadata(query=QueryParamMetadata(style="form", explode=True)) - ] - - version: Annotated[ - int, FieldMetadata(query=QueryParamMetadata(style="form", explode=True)) - ] diff --git a/src/mistralai/models/agents_api_v1_agents_deleteop.py b/src/mistralai/models/agents_api_v1_agents_deleteop.py deleted file mode 100644 index 38e04953..00000000 --- a/src/mistralai/models/agents_api_v1_agents_deleteop.py +++ /dev/null @@ -1,16 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel -from mistralai.utils import FieldMetadata, PathParamMetadata -from typing_extensions import Annotated, TypedDict - - -class AgentsAPIV1AgentsDeleteRequestTypedDict(TypedDict): - agent_id: str - - -class AgentsAPIV1AgentsDeleteRequest(BaseModel): - agent_id: Annotated[ - str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) - ] diff --git a/src/mistralai/models/agents_api_v1_agents_get_versionop.py b/src/mistralai/models/agents_api_v1_agents_get_versionop.py deleted file mode 100644 index fddb10dd..00000000 --- a/src/mistralai/models/agents_api_v1_agents_get_versionop.py +++ /dev/null @@ -1,21 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel -from mistralai.utils import FieldMetadata, PathParamMetadata -from typing_extensions import Annotated, TypedDict - - -class AgentsAPIV1AgentsGetVersionRequestTypedDict(TypedDict): - agent_id: str - version: str - - -class AgentsAPIV1AgentsGetVersionRequest(BaseModel): - agent_id: Annotated[ - str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) - ] - - version: Annotated[ - str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) - ] diff --git a/src/mistralai/models/agents_api_v1_agents_getop.py b/src/mistralai/models/agents_api_v1_agents_getop.py deleted file mode 100644 index 2b7d89a5..00000000 --- a/src/mistralai/models/agents_api_v1_agents_getop.py +++ /dev/null @@ -1,62 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from mistralai.utils import FieldMetadata, PathParamMetadata, QueryParamMetadata -from pydantic import model_serializer -from typing import Union -from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict - - -QueryParamAgentVersionTypedDict = TypeAliasType( - "QueryParamAgentVersionTypedDict", Union[int, str] -) - - -QueryParamAgentVersion = TypeAliasType("QueryParamAgentVersion", Union[int, str]) - - -class AgentsAPIV1AgentsGetRequestTypedDict(TypedDict): - agent_id: str - agent_version: NotRequired[Nullable[QueryParamAgentVersionTypedDict]] - - -class AgentsAPIV1AgentsGetRequest(BaseModel): - agent_id: Annotated[ - str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) - ] - - agent_version: Annotated[ - OptionalNullable[QueryParamAgentVersion], - FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), - ] = UNSET - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = ["agent_version"] - nullable_fields = ["agent_version"] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/agents_api_v1_agents_list_version_aliasesop.py b/src/mistralai/models/agents_api_v1_agents_list_version_aliasesop.py deleted file mode 100644 index 650a7187..00000000 --- a/src/mistralai/models/agents_api_v1_agents_list_version_aliasesop.py +++ /dev/null @@ -1,16 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel -from mistralai.utils import FieldMetadata, PathParamMetadata -from typing_extensions import Annotated, TypedDict - - -class AgentsAPIV1AgentsListVersionAliasesRequestTypedDict(TypedDict): - agent_id: str - - -class AgentsAPIV1AgentsListVersionAliasesRequest(BaseModel): - agent_id: Annotated[ - str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) - ] diff --git a/src/mistralai/models/agents_api_v1_agents_list_versionsop.py b/src/mistralai/models/agents_api_v1_agents_list_versionsop.py deleted file mode 100644 index cf988b3d..00000000 --- a/src/mistralai/models/agents_api_v1_agents_list_versionsop.py +++ /dev/null @@ -1,33 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel -from mistralai.utils import FieldMetadata, PathParamMetadata, QueryParamMetadata -from typing import Optional -from typing_extensions import Annotated, NotRequired, TypedDict - - -class AgentsAPIV1AgentsListVersionsRequestTypedDict(TypedDict): - agent_id: str - page: NotRequired[int] - r"""Page number (0-indexed)""" - page_size: NotRequired[int] - r"""Number of versions per page""" - - -class AgentsAPIV1AgentsListVersionsRequest(BaseModel): - agent_id: Annotated[ - str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) - ] - - page: Annotated[ - Optional[int], - FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), - ] = 0 - r"""Page number (0-indexed)""" - - page_size: Annotated[ - Optional[int], - FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), - ] = 20 - r"""Number of versions per page""" diff --git a/src/mistralai/models/agents_api_v1_agents_listop.py b/src/mistralai/models/agents_api_v1_agents_listop.py deleted file mode 100644 index 88b5bad1..00000000 --- a/src/mistralai/models/agents_api_v1_agents_listop.py +++ /dev/null @@ -1,98 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .requestsource import RequestSource -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from mistralai.utils import FieldMetadata, QueryParamMetadata -from pydantic import model_serializer -from typing import Any, Dict, List, Optional -from typing_extensions import Annotated, NotRequired, TypedDict - - -class AgentsAPIV1AgentsListRequestTypedDict(TypedDict): - page: NotRequired[int] - r"""Page number (0-indexed)""" - page_size: NotRequired[int] - r"""Number of agents per page""" - deployment_chat: NotRequired[Nullable[bool]] - sources: NotRequired[Nullable[List[RequestSource]]] - name: NotRequired[Nullable[str]] - id: NotRequired[Nullable[str]] - metadata: NotRequired[Nullable[Dict[str, Any]]] - - -class AgentsAPIV1AgentsListRequest(BaseModel): - page: Annotated[ - Optional[int], - FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), - ] = 0 - r"""Page number (0-indexed)""" - - page_size: Annotated[ - Optional[int], - FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), - ] = 20 - r"""Number of agents per page""" - - deployment_chat: Annotated[ - OptionalNullable[bool], - FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), - ] = UNSET - - sources: Annotated[ - OptionalNullable[List[RequestSource]], - FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), - ] = UNSET - - name: Annotated[ - OptionalNullable[str], - FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), - ] = UNSET - - id: Annotated[ - OptionalNullable[str], - FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), - ] = UNSET - - metadata: Annotated[ - OptionalNullable[Dict[str, Any]], - FieldMetadata(query=QueryParamMetadata(serialization="json")), - ] = UNSET - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = [ - "page", - "page_size", - "deployment_chat", - "sources", - "name", - "id", - "metadata", - ] - nullable_fields = ["deployment_chat", "sources", "name", "id", "metadata"] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/agents_api_v1_agents_update_versionop.py b/src/mistralai/models/agents_api_v1_agents_update_versionop.py deleted file mode 100644 index 5e4b97b3..00000000 --- a/src/mistralai/models/agents_api_v1_agents_update_versionop.py +++ /dev/null @@ -1,21 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel -from mistralai.utils import FieldMetadata, PathParamMetadata, QueryParamMetadata -from typing_extensions import Annotated, TypedDict - - -class AgentsAPIV1AgentsUpdateVersionRequestTypedDict(TypedDict): - agent_id: str - version: int - - -class AgentsAPIV1AgentsUpdateVersionRequest(BaseModel): - agent_id: Annotated[ - str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) - ] - - version: Annotated[ - int, FieldMetadata(query=QueryParamMetadata(style="form", explode=True)) - ] diff --git a/src/mistralai/models/agents_api_v1_agents_updateop.py b/src/mistralai/models/agents_api_v1_agents_updateop.py deleted file mode 100644 index 32696fbe..00000000 --- a/src/mistralai/models/agents_api_v1_agents_updateop.py +++ /dev/null @@ -1,23 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .agentupdaterequest import AgentUpdateRequest, AgentUpdateRequestTypedDict -from mistralai.types import BaseModel -from mistralai.utils import FieldMetadata, PathParamMetadata, RequestMetadata -from typing_extensions import Annotated, TypedDict - - -class AgentsAPIV1AgentsUpdateRequestTypedDict(TypedDict): - agent_id: str - agent_update_request: AgentUpdateRequestTypedDict - - -class AgentsAPIV1AgentsUpdateRequest(BaseModel): - agent_id: Annotated[ - str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) - ] - - agent_update_request: Annotated[ - AgentUpdateRequest, - FieldMetadata(request=RequestMetadata(media_type="application/json")), - ] diff --git a/src/mistralai/models/agents_api_v1_conversations_append_streamop.py b/src/mistralai/models/agents_api_v1_conversations_append_streamop.py deleted file mode 100644 index d2489ffb..00000000 --- a/src/mistralai/models/agents_api_v1_conversations_append_streamop.py +++ /dev/null @@ -1,28 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .conversationappendstreamrequest import ( - ConversationAppendStreamRequest, - ConversationAppendStreamRequestTypedDict, -) -from mistralai.types import BaseModel -from mistralai.utils import FieldMetadata, PathParamMetadata, RequestMetadata -from typing_extensions import Annotated, TypedDict - - -class AgentsAPIV1ConversationsAppendStreamRequestTypedDict(TypedDict): - conversation_id: str - r"""ID of the conversation to which we append entries.""" - conversation_append_stream_request: ConversationAppendStreamRequestTypedDict - - -class AgentsAPIV1ConversationsAppendStreamRequest(BaseModel): - conversation_id: Annotated[ - str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) - ] - r"""ID of the conversation to which we append entries.""" - - conversation_append_stream_request: Annotated[ - ConversationAppendStreamRequest, - FieldMetadata(request=RequestMetadata(media_type="application/json")), - ] diff --git a/src/mistralai/models/agents_api_v1_conversations_appendop.py b/src/mistralai/models/agents_api_v1_conversations_appendop.py deleted file mode 100644 index ba37697e..00000000 --- a/src/mistralai/models/agents_api_v1_conversations_appendop.py +++ /dev/null @@ -1,28 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .conversationappendrequest import ( - ConversationAppendRequest, - ConversationAppendRequestTypedDict, -) -from mistralai.types import BaseModel -from mistralai.utils import FieldMetadata, PathParamMetadata, RequestMetadata -from typing_extensions import Annotated, TypedDict - - -class AgentsAPIV1ConversationsAppendRequestTypedDict(TypedDict): - conversation_id: str - r"""ID of the conversation to which we append entries.""" - conversation_append_request: ConversationAppendRequestTypedDict - - -class AgentsAPIV1ConversationsAppendRequest(BaseModel): - conversation_id: Annotated[ - str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) - ] - r"""ID of the conversation to which we append entries.""" - - conversation_append_request: Annotated[ - ConversationAppendRequest, - FieldMetadata(request=RequestMetadata(media_type="application/json")), - ] diff --git a/src/mistralai/models/agents_api_v1_conversations_deleteop.py b/src/mistralai/models/agents_api_v1_conversations_deleteop.py deleted file mode 100644 index 94126cae..00000000 --- a/src/mistralai/models/agents_api_v1_conversations_deleteop.py +++ /dev/null @@ -1,18 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel -from mistralai.utils import FieldMetadata, PathParamMetadata -from typing_extensions import Annotated, TypedDict - - -class AgentsAPIV1ConversationsDeleteRequestTypedDict(TypedDict): - conversation_id: str - r"""ID of the conversation from which we are fetching metadata.""" - - -class AgentsAPIV1ConversationsDeleteRequest(BaseModel): - conversation_id: Annotated[ - str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) - ] - r"""ID of the conversation from which we are fetching metadata.""" diff --git a/src/mistralai/models/agents_api_v1_conversations_getop.py b/src/mistralai/models/agents_api_v1_conversations_getop.py deleted file mode 100644 index a37a61ba..00000000 --- a/src/mistralai/models/agents_api_v1_conversations_getop.py +++ /dev/null @@ -1,35 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .agentconversation import AgentConversation, AgentConversationTypedDict -from .modelconversation import ModelConversation, ModelConversationTypedDict -from mistralai.types import BaseModel -from mistralai.utils import FieldMetadata, PathParamMetadata -from typing import Union -from typing_extensions import Annotated, TypeAliasType, TypedDict - - -class AgentsAPIV1ConversationsGetRequestTypedDict(TypedDict): - conversation_id: str - r"""ID of the conversation from which we are fetching metadata.""" - - -class AgentsAPIV1ConversationsGetRequest(BaseModel): - conversation_id: Annotated[ - str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) - ] - r"""ID of the conversation from which we are fetching metadata.""" - - -AgentsAPIV1ConversationsGetResponseV1ConversationsGetTypedDict = TypeAliasType( - "AgentsAPIV1ConversationsGetResponseV1ConversationsGetTypedDict", - Union[AgentConversationTypedDict, ModelConversationTypedDict], -) -r"""Successful Response""" - - -AgentsAPIV1ConversationsGetResponseV1ConversationsGet = TypeAliasType( - "AgentsAPIV1ConversationsGetResponseV1ConversationsGet", - Union[AgentConversation, ModelConversation], -) -r"""Successful Response""" diff --git a/src/mistralai/models/agents_api_v1_conversations_historyop.py b/src/mistralai/models/agents_api_v1_conversations_historyop.py deleted file mode 100644 index b8c33d1b..00000000 --- a/src/mistralai/models/agents_api_v1_conversations_historyop.py +++ /dev/null @@ -1,18 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel -from mistralai.utils import FieldMetadata, PathParamMetadata -from typing_extensions import Annotated, TypedDict - - -class AgentsAPIV1ConversationsHistoryRequestTypedDict(TypedDict): - conversation_id: str - r"""ID of the conversation from which we are fetching entries.""" - - -class AgentsAPIV1ConversationsHistoryRequest(BaseModel): - conversation_id: Annotated[ - str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) - ] - r"""ID of the conversation from which we are fetching entries.""" diff --git a/src/mistralai/models/agents_api_v1_conversations_listop.py b/src/mistralai/models/agents_api_v1_conversations_listop.py deleted file mode 100644 index d314f838..00000000 --- a/src/mistralai/models/agents_api_v1_conversations_listop.py +++ /dev/null @@ -1,74 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .agentconversation import AgentConversation, AgentConversationTypedDict -from .modelconversation import ModelConversation, ModelConversationTypedDict -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from mistralai.utils import FieldMetadata, QueryParamMetadata -from pydantic import model_serializer -from typing import Any, Dict, Optional, Union -from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict - - -class AgentsAPIV1ConversationsListRequestTypedDict(TypedDict): - page: NotRequired[int] - page_size: NotRequired[int] - metadata: NotRequired[Nullable[Dict[str, Any]]] - - -class AgentsAPIV1ConversationsListRequest(BaseModel): - page: Annotated[ - Optional[int], - FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), - ] = 0 - - page_size: Annotated[ - Optional[int], - FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), - ] = 100 - - metadata: Annotated[ - OptionalNullable[Dict[str, Any]], - FieldMetadata(query=QueryParamMetadata(serialization="json")), - ] = UNSET - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = ["page", "page_size", "metadata"] - nullable_fields = ["metadata"] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m - - -ResponseBodyTypedDict = TypeAliasType( - "ResponseBodyTypedDict", - Union[AgentConversationTypedDict, ModelConversationTypedDict], -) - - -ResponseBody = TypeAliasType( - "ResponseBody", Union[AgentConversation, ModelConversation] -) diff --git a/src/mistralai/models/agents_api_v1_conversations_messagesop.py b/src/mistralai/models/agents_api_v1_conversations_messagesop.py deleted file mode 100644 index f0dac8bf..00000000 --- a/src/mistralai/models/agents_api_v1_conversations_messagesop.py +++ /dev/null @@ -1,18 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel -from mistralai.utils import FieldMetadata, PathParamMetadata -from typing_extensions import Annotated, TypedDict - - -class AgentsAPIV1ConversationsMessagesRequestTypedDict(TypedDict): - conversation_id: str - r"""ID of the conversation from which we are fetching messages.""" - - -class AgentsAPIV1ConversationsMessagesRequest(BaseModel): - conversation_id: Annotated[ - str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) - ] - r"""ID of the conversation from which we are fetching messages.""" diff --git a/src/mistralai/models/agents_api_v1_conversations_restart_streamop.py b/src/mistralai/models/agents_api_v1_conversations_restart_streamop.py deleted file mode 100644 index f39b74eb..00000000 --- a/src/mistralai/models/agents_api_v1_conversations_restart_streamop.py +++ /dev/null @@ -1,28 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .conversationrestartstreamrequest import ( - ConversationRestartStreamRequest, - ConversationRestartStreamRequestTypedDict, -) -from mistralai.types import BaseModel -from mistralai.utils import FieldMetadata, PathParamMetadata, RequestMetadata -from typing_extensions import Annotated, TypedDict - - -class AgentsAPIV1ConversationsRestartStreamRequestTypedDict(TypedDict): - conversation_id: str - r"""ID of the original conversation which is being restarted.""" - conversation_restart_stream_request: ConversationRestartStreamRequestTypedDict - - -class AgentsAPIV1ConversationsRestartStreamRequest(BaseModel): - conversation_id: Annotated[ - str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) - ] - r"""ID of the original conversation which is being restarted.""" - - conversation_restart_stream_request: Annotated[ - ConversationRestartStreamRequest, - FieldMetadata(request=RequestMetadata(media_type="application/json")), - ] diff --git a/src/mistralai/models/agents_api_v1_conversations_restartop.py b/src/mistralai/models/agents_api_v1_conversations_restartop.py deleted file mode 100644 index f706c066..00000000 --- a/src/mistralai/models/agents_api_v1_conversations_restartop.py +++ /dev/null @@ -1,28 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .conversationrestartrequest import ( - ConversationRestartRequest, - ConversationRestartRequestTypedDict, -) -from mistralai.types import BaseModel -from mistralai.utils import FieldMetadata, PathParamMetadata, RequestMetadata -from typing_extensions import Annotated, TypedDict - - -class AgentsAPIV1ConversationsRestartRequestTypedDict(TypedDict): - conversation_id: str - r"""ID of the original conversation which is being restarted.""" - conversation_restart_request: ConversationRestartRequestTypedDict - - -class AgentsAPIV1ConversationsRestartRequest(BaseModel): - conversation_id: Annotated[ - str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) - ] - r"""ID of the original conversation which is being restarted.""" - - conversation_restart_request: Annotated[ - ConversationRestartRequest, - FieldMetadata(request=RequestMetadata(media_type="application/json")), - ] diff --git a/src/mistralai/models/agentscompletionrequest.py b/src/mistralai/models/agentscompletionrequest.py deleted file mode 100644 index cc07a6bd..00000000 --- a/src/mistralai/models/agentscompletionrequest.py +++ /dev/null @@ -1,192 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .assistantmessage import AssistantMessage, AssistantMessageTypedDict -from .mistralpromptmode import MistralPromptMode -from .prediction import Prediction, PredictionTypedDict -from .responseformat import ResponseFormat, ResponseFormatTypedDict -from .systemmessage import SystemMessage, SystemMessageTypedDict -from .tool import Tool, ToolTypedDict -from .toolchoice import ToolChoice, ToolChoiceTypedDict -from .toolchoiceenum import ToolChoiceEnum -from .toolmessage import ToolMessage, ToolMessageTypedDict -from .usermessage import UserMessage, UserMessageTypedDict -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from mistralai.utils import get_discriminator -from pydantic import Discriminator, Tag, model_serializer -from typing import Any, Dict, List, Optional, Union -from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict - - -AgentsCompletionRequestStopTypedDict = TypeAliasType( - "AgentsCompletionRequestStopTypedDict", Union[str, List[str]] -) -r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" - - -AgentsCompletionRequestStop = TypeAliasType( - "AgentsCompletionRequestStop", Union[str, List[str]] -) -r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" - - -AgentsCompletionRequestMessagesTypedDict = TypeAliasType( - "AgentsCompletionRequestMessagesTypedDict", - Union[ - SystemMessageTypedDict, - UserMessageTypedDict, - AssistantMessageTypedDict, - ToolMessageTypedDict, - ], -) - - -AgentsCompletionRequestMessages = Annotated[ - Union[ - Annotated[AssistantMessage, Tag("assistant")], - Annotated[SystemMessage, Tag("system")], - Annotated[ToolMessage, Tag("tool")], - Annotated[UserMessage, Tag("user")], - ], - Discriminator(lambda m: get_discriminator(m, "role", "role")), -] - - -AgentsCompletionRequestToolChoiceTypedDict = TypeAliasType( - "AgentsCompletionRequestToolChoiceTypedDict", - Union[ToolChoiceTypedDict, ToolChoiceEnum], -) - - -AgentsCompletionRequestToolChoice = TypeAliasType( - "AgentsCompletionRequestToolChoice", Union[ToolChoice, ToolChoiceEnum] -) - - -class AgentsCompletionRequestTypedDict(TypedDict): - messages: List[AgentsCompletionRequestMessagesTypedDict] - r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content.""" - agent_id: str - r"""The ID of the agent to use for this completion.""" - max_tokens: NotRequired[Nullable[int]] - r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" - stream: NotRequired[bool] - r"""Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON.""" - stop: NotRequired[AgentsCompletionRequestStopTypedDict] - r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" - random_seed: NotRequired[Nullable[int]] - r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" - metadata: NotRequired[Nullable[Dict[str, Any]]] - response_format: NotRequired[ResponseFormatTypedDict] - r"""Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide.""" - tools: NotRequired[Nullable[List[ToolTypedDict]]] - tool_choice: NotRequired[AgentsCompletionRequestToolChoiceTypedDict] - presence_penalty: NotRequired[float] - r"""The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative.""" - frequency_penalty: NotRequired[float] - r"""The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.""" - n: NotRequired[Nullable[int]] - r"""Number of completions to return for each request, input tokens are only billed once.""" - prediction: NotRequired[PredictionTypedDict] - r"""Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content.""" - parallel_tool_calls: NotRequired[bool] - prompt_mode: NotRequired[Nullable[MistralPromptMode]] - r"""Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used.""" - - -class AgentsCompletionRequest(BaseModel): - messages: List[AgentsCompletionRequestMessages] - r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content.""" - - agent_id: str - r"""The ID of the agent to use for this completion.""" - - max_tokens: OptionalNullable[int] = UNSET - r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" - - stream: Optional[bool] = False - r"""Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON.""" - - stop: Optional[AgentsCompletionRequestStop] = None - r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" - - random_seed: OptionalNullable[int] = UNSET - r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" - - metadata: OptionalNullable[Dict[str, Any]] = UNSET - - response_format: Optional[ResponseFormat] = None - r"""Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide.""" - - tools: OptionalNullable[List[Tool]] = UNSET - - tool_choice: Optional[AgentsCompletionRequestToolChoice] = None - - presence_penalty: Optional[float] = None - r"""The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative.""" - - frequency_penalty: Optional[float] = None - r"""The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.""" - - n: OptionalNullable[int] = UNSET - r"""Number of completions to return for each request, input tokens are only billed once.""" - - prediction: Optional[Prediction] = None - r"""Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content.""" - - parallel_tool_calls: Optional[bool] = None - - prompt_mode: OptionalNullable[MistralPromptMode] = UNSET - r"""Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used.""" - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = [ - "max_tokens", - "stream", - "stop", - "random_seed", - "metadata", - "response_format", - "tools", - "tool_choice", - "presence_penalty", - "frequency_penalty", - "n", - "prediction", - "parallel_tool_calls", - "prompt_mode", - ] - nullable_fields = [ - "max_tokens", - "random_seed", - "metadata", - "tools", - "n", - "prompt_mode", - ] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/agentscompletionstreamrequest.py b/src/mistralai/models/agentscompletionstreamrequest.py deleted file mode 100644 index d6a887be..00000000 --- a/src/mistralai/models/agentscompletionstreamrequest.py +++ /dev/null @@ -1,190 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .assistantmessage import AssistantMessage, AssistantMessageTypedDict -from .mistralpromptmode import MistralPromptMode -from .prediction import Prediction, PredictionTypedDict -from .responseformat import ResponseFormat, ResponseFormatTypedDict -from .systemmessage import SystemMessage, SystemMessageTypedDict -from .tool import Tool, ToolTypedDict -from .toolchoice import ToolChoice, ToolChoiceTypedDict -from .toolchoiceenum import ToolChoiceEnum -from .toolmessage import ToolMessage, ToolMessageTypedDict -from .usermessage import UserMessage, UserMessageTypedDict -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from mistralai.utils import get_discriminator -from pydantic import Discriminator, Tag, model_serializer -from typing import Any, Dict, List, Optional, Union -from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict - - -AgentsCompletionStreamRequestStopTypedDict = TypeAliasType( - "AgentsCompletionStreamRequestStopTypedDict", Union[str, List[str]] -) -r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" - - -AgentsCompletionStreamRequestStop = TypeAliasType( - "AgentsCompletionStreamRequestStop", Union[str, List[str]] -) -r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" - - -AgentsCompletionStreamRequestMessagesTypedDict = TypeAliasType( - "AgentsCompletionStreamRequestMessagesTypedDict", - Union[ - SystemMessageTypedDict, - UserMessageTypedDict, - AssistantMessageTypedDict, - ToolMessageTypedDict, - ], -) - - -AgentsCompletionStreamRequestMessages = Annotated[ - Union[ - Annotated[AssistantMessage, Tag("assistant")], - Annotated[SystemMessage, Tag("system")], - Annotated[ToolMessage, Tag("tool")], - Annotated[UserMessage, Tag("user")], - ], - Discriminator(lambda m: get_discriminator(m, "role", "role")), -] - - -AgentsCompletionStreamRequestToolChoiceTypedDict = TypeAliasType( - "AgentsCompletionStreamRequestToolChoiceTypedDict", - Union[ToolChoiceTypedDict, ToolChoiceEnum], -) - - -AgentsCompletionStreamRequestToolChoice = TypeAliasType( - "AgentsCompletionStreamRequestToolChoice", Union[ToolChoice, ToolChoiceEnum] -) - - -class AgentsCompletionStreamRequestTypedDict(TypedDict): - messages: List[AgentsCompletionStreamRequestMessagesTypedDict] - r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content.""" - agent_id: str - r"""The ID of the agent to use for this completion.""" - max_tokens: NotRequired[Nullable[int]] - r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" - stream: NotRequired[bool] - stop: NotRequired[AgentsCompletionStreamRequestStopTypedDict] - r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" - random_seed: NotRequired[Nullable[int]] - r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" - metadata: NotRequired[Nullable[Dict[str, Any]]] - response_format: NotRequired[ResponseFormatTypedDict] - r"""Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide.""" - tools: NotRequired[Nullable[List[ToolTypedDict]]] - tool_choice: NotRequired[AgentsCompletionStreamRequestToolChoiceTypedDict] - presence_penalty: NotRequired[float] - r"""The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative.""" - frequency_penalty: NotRequired[float] - r"""The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.""" - n: NotRequired[Nullable[int]] - r"""Number of completions to return for each request, input tokens are only billed once.""" - prediction: NotRequired[PredictionTypedDict] - r"""Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content.""" - parallel_tool_calls: NotRequired[bool] - prompt_mode: NotRequired[Nullable[MistralPromptMode]] - r"""Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used.""" - - -class AgentsCompletionStreamRequest(BaseModel): - messages: List[AgentsCompletionStreamRequestMessages] - r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content.""" - - agent_id: str - r"""The ID of the agent to use for this completion.""" - - max_tokens: OptionalNullable[int] = UNSET - r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" - - stream: Optional[bool] = True - - stop: Optional[AgentsCompletionStreamRequestStop] = None - r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" - - random_seed: OptionalNullable[int] = UNSET - r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" - - metadata: OptionalNullable[Dict[str, Any]] = UNSET - - response_format: Optional[ResponseFormat] = None - r"""Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide.""" - - tools: OptionalNullable[List[Tool]] = UNSET - - tool_choice: Optional[AgentsCompletionStreamRequestToolChoice] = None - - presence_penalty: Optional[float] = None - r"""The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative.""" - - frequency_penalty: Optional[float] = None - r"""The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.""" - - n: OptionalNullable[int] = UNSET - r"""Number of completions to return for each request, input tokens are only billed once.""" - - prediction: Optional[Prediction] = None - r"""Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content.""" - - parallel_tool_calls: Optional[bool] = None - - prompt_mode: OptionalNullable[MistralPromptMode] = UNSET - r"""Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used.""" - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = [ - "max_tokens", - "stream", - "stop", - "random_seed", - "metadata", - "response_format", - "tools", - "tool_choice", - "presence_penalty", - "frequency_penalty", - "n", - "prediction", - "parallel_tool_calls", - "prompt_mode", - ] - nullable_fields = [ - "max_tokens", - "random_seed", - "metadata", - "tools", - "n", - "prompt_mode", - ] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/agentupdaterequest.py b/src/mistralai/models/agentupdaterequest.py deleted file mode 100644 index e496907c..00000000 --- a/src/mistralai/models/agentupdaterequest.py +++ /dev/null @@ -1,127 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .codeinterpretertool import CodeInterpreterTool, CodeInterpreterToolTypedDict -from .completionargs import CompletionArgs, CompletionArgsTypedDict -from .documentlibrarytool import DocumentLibraryTool, DocumentLibraryToolTypedDict -from .functiontool import FunctionTool, FunctionToolTypedDict -from .imagegenerationtool import ImageGenerationTool, ImageGenerationToolTypedDict -from .websearchpremiumtool import WebSearchPremiumTool, WebSearchPremiumToolTypedDict -from .websearchtool import WebSearchTool, WebSearchToolTypedDict -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from mistralai.utils import get_discriminator -from pydantic import Discriminator, Tag, model_serializer -from typing import Any, Dict, List, Optional, Union -from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict - - -AgentUpdateRequestToolsTypedDict = TypeAliasType( - "AgentUpdateRequestToolsTypedDict", - Union[ - WebSearchToolTypedDict, - WebSearchPremiumToolTypedDict, - CodeInterpreterToolTypedDict, - ImageGenerationToolTypedDict, - FunctionToolTypedDict, - DocumentLibraryToolTypedDict, - ], -) - - -AgentUpdateRequestTools = Annotated[ - Union[ - Annotated[CodeInterpreterTool, Tag("code_interpreter")], - Annotated[DocumentLibraryTool, Tag("document_library")], - Annotated[FunctionTool, Tag("function")], - Annotated[ImageGenerationTool, Tag("image_generation")], - Annotated[WebSearchTool, Tag("web_search")], - Annotated[WebSearchPremiumTool, Tag("web_search_premium")], - ], - Discriminator(lambda m: get_discriminator(m, "type", "type")), -] - - -class AgentUpdateRequestTypedDict(TypedDict): - instructions: NotRequired[Nullable[str]] - r"""Instruction prompt the model will follow during the conversation.""" - tools: NotRequired[List[AgentUpdateRequestToolsTypedDict]] - r"""List of tools which are available to the model during the conversation.""" - completion_args: NotRequired[CompletionArgsTypedDict] - r"""White-listed arguments from the completion API""" - model: NotRequired[Nullable[str]] - name: NotRequired[Nullable[str]] - description: NotRequired[Nullable[str]] - handoffs: NotRequired[Nullable[List[str]]] - deployment_chat: NotRequired[Nullable[bool]] - metadata: NotRequired[Nullable[Dict[str, Any]]] - - -class AgentUpdateRequest(BaseModel): - instructions: OptionalNullable[str] = UNSET - r"""Instruction prompt the model will follow during the conversation.""" - - tools: Optional[List[AgentUpdateRequestTools]] = None - r"""List of tools which are available to the model during the conversation.""" - - completion_args: Optional[CompletionArgs] = None - r"""White-listed arguments from the completion API""" - - model: OptionalNullable[str] = UNSET - - name: OptionalNullable[str] = UNSET - - description: OptionalNullable[str] = UNSET - - handoffs: OptionalNullable[List[str]] = UNSET - - deployment_chat: OptionalNullable[bool] = UNSET - - metadata: OptionalNullable[Dict[str, Any]] = UNSET - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = [ - "instructions", - "tools", - "completion_args", - "model", - "name", - "description", - "handoffs", - "deployment_chat", - "metadata", - ] - nullable_fields = [ - "instructions", - "model", - "name", - "description", - "handoffs", - "deployment_chat", - "metadata", - ] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/apiendpoint.py b/src/mistralai/models/apiendpoint.py deleted file mode 100644 index 0ad9366f..00000000 --- a/src/mistralai/models/apiendpoint.py +++ /dev/null @@ -1,22 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import UnrecognizedStr -from typing import Literal, Union - - -APIEndpoint = Union[ - Literal[ - "/v1/chat/completions", - "/v1/embeddings", - "/v1/fim/completions", - "/v1/moderations", - "/v1/chat/moderations", - "/v1/ocr", - "/v1/classifications", - "/v1/chat/classifications", - "/v1/conversations", - "/v1/audio/transcriptions", - ], - UnrecognizedStr, -] diff --git a/src/mistralai/models/archiveftmodelout.py b/src/mistralai/models/archiveftmodelout.py deleted file mode 100644 index 0f753cfc..00000000 --- a/src/mistralai/models/archiveftmodelout.py +++ /dev/null @@ -1,23 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel -from typing import Literal, Optional -from typing_extensions import NotRequired, TypedDict - - -ArchiveFTModelOutObject = Literal["model",] - - -class ArchiveFTModelOutTypedDict(TypedDict): - id: str - object: NotRequired[ArchiveFTModelOutObject] - archived: NotRequired[bool] - - -class ArchiveFTModelOut(BaseModel): - id: str - - object: Optional[ArchiveFTModelOutObject] = "model" - - archived: Optional[bool] = True diff --git a/src/mistralai/models/assistantmessage.py b/src/mistralai/models/assistantmessage.py deleted file mode 100644 index a38a10c4..00000000 --- a/src/mistralai/models/assistantmessage.py +++ /dev/null @@ -1,71 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .contentchunk import ContentChunk, ContentChunkTypedDict -from .toolcall import ToolCall, ToolCallTypedDict -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from pydantic import model_serializer -from typing import List, Literal, Optional, Union -from typing_extensions import NotRequired, TypeAliasType, TypedDict - - -AssistantMessageContentTypedDict = TypeAliasType( - "AssistantMessageContentTypedDict", Union[str, List[ContentChunkTypedDict]] -) - - -AssistantMessageContent = TypeAliasType( - "AssistantMessageContent", Union[str, List[ContentChunk]] -) - - -AssistantMessageRole = Literal["assistant",] - - -class AssistantMessageTypedDict(TypedDict): - content: NotRequired[Nullable[AssistantMessageContentTypedDict]] - tool_calls: NotRequired[Nullable[List[ToolCallTypedDict]]] - prefix: NotRequired[bool] - r"""Set this to `true` when adding an assistant message as prefix to condition the model response. The role of the prefix message is to force the model to start its answer by the content of the message.""" - role: NotRequired[AssistantMessageRole] - - -class AssistantMessage(BaseModel): - content: OptionalNullable[AssistantMessageContent] = UNSET - - tool_calls: OptionalNullable[List[ToolCall]] = UNSET - - prefix: Optional[bool] = False - r"""Set this to `true` when adding an assistant message as prefix to condition the model response. The role of the prefix message is to force the model to start its answer by the content of the message.""" - - role: Optional[AssistantMessageRole] = "assistant" - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = ["content", "tool_calls", "prefix", "role"] - nullable_fields = ["content", "tool_calls"] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/audiochunk.py b/src/mistralai/models/audiochunk.py deleted file mode 100644 index 64fc43ff..00000000 --- a/src/mistralai/models/audiochunk.py +++ /dev/null @@ -1,20 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel -from typing import Literal, Optional -from typing_extensions import NotRequired, TypedDict - - -AudioChunkType = Literal["input_audio",] - - -class AudioChunkTypedDict(TypedDict): - input_audio: str - type: NotRequired[AudioChunkType] - - -class AudioChunk(BaseModel): - input_audio: str - - type: Optional[AudioChunkType] = "input_audio" diff --git a/src/mistralai/models/audioencoding.py b/src/mistralai/models/audioencoding.py deleted file mode 100644 index 13eb6d15..00000000 --- a/src/mistralai/models/audioencoding.py +++ /dev/null @@ -1,18 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import UnrecognizedStr -from typing import Literal, Union - - -AudioEncoding = Union[ - Literal[ - "pcm_s16le", - "pcm_s32le", - "pcm_f16le", - "pcm_f32le", - "pcm_mulaw", - "pcm_alaw", - ], - UnrecognizedStr, -] diff --git a/src/mistralai/models/audioformat.py b/src/mistralai/models/audioformat.py deleted file mode 100644 index 48ab648c..00000000 --- a/src/mistralai/models/audioformat.py +++ /dev/null @@ -1,17 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .audioencoding import AudioEncoding -from mistralai.types import BaseModel -from typing_extensions import TypedDict - - -class AudioFormatTypedDict(TypedDict): - encoding: AudioEncoding - sample_rate: int - - -class AudioFormat(BaseModel): - encoding: AudioEncoding - - sample_rate: int diff --git a/src/mistralai/models/audiotranscriptionrequest.py b/src/mistralai/models/audiotranscriptionrequest.py deleted file mode 100644 index 86417b42..00000000 --- a/src/mistralai/models/audiotranscriptionrequest.py +++ /dev/null @@ -1,107 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .file import File, FileTypedDict -from .timestampgranularity import TimestampGranularity -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from mistralai.utils import FieldMetadata, MultipartFormMetadata, validate_const -import pydantic -from pydantic import model_serializer -from pydantic.functional_validators import AfterValidator -from typing import List, Literal, Optional -from typing_extensions import Annotated, NotRequired, TypedDict - - -class AudioTranscriptionRequestTypedDict(TypedDict): - model: str - r"""ID of the model to be used.""" - file: NotRequired[FileTypedDict] - file_url: NotRequired[Nullable[str]] - r"""Url of a file to be transcribed""" - file_id: NotRequired[Nullable[str]] - r"""ID of a file uploaded to /v1/files""" - language: NotRequired[Nullable[str]] - r"""Language of the audio, e.g. 'en'. Providing the language can boost accuracy.""" - temperature: NotRequired[Nullable[float]] - stream: Literal[False] - diarize: NotRequired[bool] - context_bias: NotRequired[List[str]] - timestamp_granularities: NotRequired[List[TimestampGranularity]] - r"""Granularities of timestamps to include in the response.""" - - -class AudioTranscriptionRequest(BaseModel): - model: Annotated[str, FieldMetadata(multipart=True)] - r"""ID of the model to be used.""" - - file: Annotated[ - Optional[File], FieldMetadata(multipart=MultipartFormMetadata(file=True)) - ] = None - - file_url: Annotated[OptionalNullable[str], FieldMetadata(multipart=True)] = UNSET - r"""Url of a file to be transcribed""" - - file_id: Annotated[OptionalNullable[str], FieldMetadata(multipart=True)] = UNSET - r"""ID of a file uploaded to /v1/files""" - - language: Annotated[OptionalNullable[str], FieldMetadata(multipart=True)] = UNSET - r"""Language of the audio, e.g. 'en'. Providing the language can boost accuracy.""" - - temperature: Annotated[OptionalNullable[float], FieldMetadata(multipart=True)] = ( - UNSET - ) - - STREAM: Annotated[ - Annotated[Optional[Literal[False]], AfterValidator(validate_const(False))], - pydantic.Field(alias="stream"), - FieldMetadata(multipart=True), - ] = False - - diarize: Annotated[Optional[bool], FieldMetadata(multipart=True)] = False - - context_bias: Annotated[Optional[List[str]], FieldMetadata(multipart=True)] = None - - timestamp_granularities: Annotated[ - Optional[List[TimestampGranularity]], FieldMetadata(multipart=True) - ] = None - r"""Granularities of timestamps to include in the response.""" - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = [ - "file", - "file_url", - "file_id", - "language", - "temperature", - "stream", - "diarize", - "context_bias", - "timestamp_granularities", - ] - nullable_fields = ["file_url", "file_id", "language", "temperature"] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/audiotranscriptionrequeststream.py b/src/mistralai/models/audiotranscriptionrequeststream.py deleted file mode 100644 index 1f4087e8..00000000 --- a/src/mistralai/models/audiotranscriptionrequeststream.py +++ /dev/null @@ -1,105 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .file import File, FileTypedDict -from .timestampgranularity import TimestampGranularity -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from mistralai.utils import FieldMetadata, MultipartFormMetadata, validate_const -import pydantic -from pydantic import model_serializer -from pydantic.functional_validators import AfterValidator -from typing import List, Literal, Optional -from typing_extensions import Annotated, NotRequired, TypedDict - - -class AudioTranscriptionRequestStreamTypedDict(TypedDict): - model: str - file: NotRequired[FileTypedDict] - file_url: NotRequired[Nullable[str]] - r"""Url of a file to be transcribed""" - file_id: NotRequired[Nullable[str]] - r"""ID of a file uploaded to /v1/files""" - language: NotRequired[Nullable[str]] - r"""Language of the audio, e.g. 'en'. Providing the language can boost accuracy.""" - temperature: NotRequired[Nullable[float]] - stream: Literal[True] - diarize: NotRequired[bool] - context_bias: NotRequired[List[str]] - timestamp_granularities: NotRequired[List[TimestampGranularity]] - r"""Granularities of timestamps to include in the response.""" - - -class AudioTranscriptionRequestStream(BaseModel): - model: Annotated[str, FieldMetadata(multipart=True)] - - file: Annotated[ - Optional[File], FieldMetadata(multipart=MultipartFormMetadata(file=True)) - ] = None - - file_url: Annotated[OptionalNullable[str], FieldMetadata(multipart=True)] = UNSET - r"""Url of a file to be transcribed""" - - file_id: Annotated[OptionalNullable[str], FieldMetadata(multipart=True)] = UNSET - r"""ID of a file uploaded to /v1/files""" - - language: Annotated[OptionalNullable[str], FieldMetadata(multipart=True)] = UNSET - r"""Language of the audio, e.g. 'en'. Providing the language can boost accuracy.""" - - temperature: Annotated[OptionalNullable[float], FieldMetadata(multipart=True)] = ( - UNSET - ) - - STREAM: Annotated[ - Annotated[Optional[Literal[True]], AfterValidator(validate_const(True))], - pydantic.Field(alias="stream"), - FieldMetadata(multipart=True), - ] = True - - diarize: Annotated[Optional[bool], FieldMetadata(multipart=True)] = False - - context_bias: Annotated[Optional[List[str]], FieldMetadata(multipart=True)] = None - - timestamp_granularities: Annotated[ - Optional[List[TimestampGranularity]], FieldMetadata(multipart=True) - ] = None - r"""Granularities of timestamps to include in the response.""" - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = [ - "file", - "file_url", - "file_id", - "language", - "temperature", - "stream", - "diarize", - "context_bias", - "timestamp_granularities", - ] - nullable_fields = ["file_url", "file_id", "language", "temperature"] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/basemodelcard.py b/src/mistralai/models/basemodelcard.py deleted file mode 100644 index 706841b7..00000000 --- a/src/mistralai/models/basemodelcard.py +++ /dev/null @@ -1,110 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .modelcapabilities import ModelCapabilities, ModelCapabilitiesTypedDict -from datetime import datetime -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from mistralai.utils import validate_const -import pydantic -from pydantic import model_serializer -from pydantic.functional_validators import AfterValidator -from typing import List, Literal, Optional -from typing_extensions import Annotated, NotRequired, TypedDict - - -BaseModelCardType = Literal["base",] - - -class BaseModelCardTypedDict(TypedDict): - id: str - capabilities: ModelCapabilitiesTypedDict - object: NotRequired[str] - created: NotRequired[int] - owned_by: NotRequired[str] - name: NotRequired[Nullable[str]] - description: NotRequired[Nullable[str]] - max_context_length: NotRequired[int] - aliases: NotRequired[List[str]] - deprecation: NotRequired[Nullable[datetime]] - deprecation_replacement_model: NotRequired[Nullable[str]] - default_model_temperature: NotRequired[Nullable[float]] - type: BaseModelCardType - - -class BaseModelCard(BaseModel): - id: str - - capabilities: ModelCapabilities - - object: Optional[str] = "model" - - created: Optional[int] = None - - owned_by: Optional[str] = "mistralai" - - name: OptionalNullable[str] = UNSET - - description: OptionalNullable[str] = UNSET - - max_context_length: Optional[int] = 32768 - - aliases: Optional[List[str]] = None - - deprecation: OptionalNullable[datetime] = UNSET - - deprecation_replacement_model: OptionalNullable[str] = UNSET - - default_model_temperature: OptionalNullable[float] = UNSET - - TYPE: Annotated[ - Annotated[Optional[BaseModelCardType], AfterValidator(validate_const("base"))], - pydantic.Field(alias="type"), - ] = "base" - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = [ - "object", - "created", - "owned_by", - "name", - "description", - "max_context_length", - "aliases", - "deprecation", - "deprecation_replacement_model", - "default_model_temperature", - "type", - ] - nullable_fields = [ - "name", - "description", - "deprecation", - "deprecation_replacement_model", - "default_model_temperature", - ] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/batcherror.py b/src/mistralai/models/batcherror.py deleted file mode 100644 index 4f823446..00000000 --- a/src/mistralai/models/batcherror.py +++ /dev/null @@ -1,17 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel -from typing import Optional -from typing_extensions import NotRequired, TypedDict - - -class BatchErrorTypedDict(TypedDict): - message: str - count: NotRequired[int] - - -class BatchError(BaseModel): - message: str - - count: Optional[int] = 1 diff --git a/src/mistralai/models/batchjobin.py b/src/mistralai/models/batchjobin.py deleted file mode 100644 index 839a9b3c..00000000 --- a/src/mistralai/models/batchjobin.py +++ /dev/null @@ -1,82 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .apiendpoint import APIEndpoint -from .batchrequest import BatchRequest, BatchRequestTypedDict -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from pydantic import model_serializer -from typing import Dict, List, Optional -from typing_extensions import NotRequired, TypedDict - - -class BatchJobInTypedDict(TypedDict): - endpoint: APIEndpoint - input_files: NotRequired[Nullable[List[str]]] - r"""The list of input files to be used for batch inference, these files should be `jsonl` files, containing the input data corresponding to the bory request for the batch inference in a \"body\" field. An example of such file is the following: ```json {\"custom_id\": \"0\", \"body\": {\"max_tokens\": 100, \"messages\": [{\"role\": \"user\", \"content\": \"What is the best French cheese?\"}]}} {\"custom_id\": \"1\", \"body\": {\"max_tokens\": 100, \"messages\": [{\"role\": \"user\", \"content\": \"What is the best French wine?\"}]}} ```""" - requests: NotRequired[Nullable[List[BatchRequestTypedDict]]] - model: NotRequired[Nullable[str]] - r"""The model to be used for batch inference.""" - agent_id: NotRequired[Nullable[str]] - r"""In case you want to use a specific agent from the **deprecated** agents api for batch inference, you can specify the agent ID here.""" - metadata: NotRequired[Nullable[Dict[str, str]]] - r"""The metadata of your choice to be associated with the batch inference job.""" - timeout_hours: NotRequired[int] - r"""The timeout in hours for the batch inference job.""" - - -class BatchJobIn(BaseModel): - endpoint: APIEndpoint - - input_files: OptionalNullable[List[str]] = UNSET - r"""The list of input files to be used for batch inference, these files should be `jsonl` files, containing the input data corresponding to the bory request for the batch inference in a \"body\" field. An example of such file is the following: ```json {\"custom_id\": \"0\", \"body\": {\"max_tokens\": 100, \"messages\": [{\"role\": \"user\", \"content\": \"What is the best French cheese?\"}]}} {\"custom_id\": \"1\", \"body\": {\"max_tokens\": 100, \"messages\": [{\"role\": \"user\", \"content\": \"What is the best French wine?\"}]}} ```""" - - requests: OptionalNullable[List[BatchRequest]] = UNSET - - model: OptionalNullable[str] = UNSET - r"""The model to be used for batch inference.""" - - agent_id: OptionalNullable[str] = UNSET - r"""In case you want to use a specific agent from the **deprecated** agents api for batch inference, you can specify the agent ID here.""" - - metadata: OptionalNullable[Dict[str, str]] = UNSET - r"""The metadata of your choice to be associated with the batch inference job.""" - - timeout_hours: Optional[int] = 24 - r"""The timeout in hours for the batch inference job.""" - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = [ - "input_files", - "requests", - "model", - "agent_id", - "metadata", - "timeout_hours", - ] - nullable_fields = ["input_files", "requests", "model", "agent_id", "metadata"] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/batchjobout.py b/src/mistralai/models/batchjobout.py deleted file mode 100644 index 904cd349..00000000 --- a/src/mistralai/models/batchjobout.py +++ /dev/null @@ -1,123 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .batcherror import BatchError, BatchErrorTypedDict -from .batchjobstatus import BatchJobStatus -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from pydantic import model_serializer -from typing import Any, Dict, List, Literal, Optional -from typing_extensions import NotRequired, TypedDict - - -BatchJobOutObject = Literal["batch",] - - -class BatchJobOutTypedDict(TypedDict): - id: str - input_files: List[str] - endpoint: str - errors: List[BatchErrorTypedDict] - status: BatchJobStatus - created_at: int - total_requests: int - completed_requests: int - succeeded_requests: int - failed_requests: int - object: NotRequired[BatchJobOutObject] - metadata: NotRequired[Nullable[Dict[str, Any]]] - model: NotRequired[Nullable[str]] - agent_id: NotRequired[Nullable[str]] - output_file: NotRequired[Nullable[str]] - error_file: NotRequired[Nullable[str]] - outputs: NotRequired[Nullable[List[Dict[str, Any]]]] - started_at: NotRequired[Nullable[int]] - completed_at: NotRequired[Nullable[int]] - - -class BatchJobOut(BaseModel): - id: str - - input_files: List[str] - - endpoint: str - - errors: List[BatchError] - - status: BatchJobStatus - - created_at: int - - total_requests: int - - completed_requests: int - - succeeded_requests: int - - failed_requests: int - - object: Optional[BatchJobOutObject] = "batch" - - metadata: OptionalNullable[Dict[str, Any]] = UNSET - - model: OptionalNullable[str] = UNSET - - agent_id: OptionalNullable[str] = UNSET - - output_file: OptionalNullable[str] = UNSET - - error_file: OptionalNullable[str] = UNSET - - outputs: OptionalNullable[List[Dict[str, Any]]] = UNSET - - started_at: OptionalNullable[int] = UNSET - - completed_at: OptionalNullable[int] = UNSET - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = [ - "object", - "metadata", - "model", - "agent_id", - "output_file", - "error_file", - "outputs", - "started_at", - "completed_at", - ] - nullable_fields = [ - "metadata", - "model", - "agent_id", - "output_file", - "error_file", - "outputs", - "started_at", - "completed_at", - ] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/batchjobsout.py b/src/mistralai/models/batchjobsout.py deleted file mode 100644 index a1eba5db..00000000 --- a/src/mistralai/models/batchjobsout.py +++ /dev/null @@ -1,24 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .batchjobout import BatchJobOut, BatchJobOutTypedDict -from mistralai.types import BaseModel -from typing import List, Literal, Optional -from typing_extensions import NotRequired, TypedDict - - -BatchJobsOutObject = Literal["list",] - - -class BatchJobsOutTypedDict(TypedDict): - total: int - data: NotRequired[List[BatchJobOutTypedDict]] - object: NotRequired[BatchJobsOutObject] - - -class BatchJobsOut(BaseModel): - total: int - - data: Optional[List[BatchJobOut]] = None - - object: Optional[BatchJobsOutObject] = "list" diff --git a/src/mistralai/models/batchjobstatus.py b/src/mistralai/models/batchjobstatus.py deleted file mode 100644 index 4b28059b..00000000 --- a/src/mistralai/models/batchjobstatus.py +++ /dev/null @@ -1,15 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from typing import Literal - - -BatchJobStatus = Literal[ - "QUEUED", - "RUNNING", - "SUCCESS", - "FAILED", - "TIMEOUT_EXCEEDED", - "CANCELLATION_REQUESTED", - "CANCELLED", -] diff --git a/src/mistralai/models/batchrequest.py b/src/mistralai/models/batchrequest.py deleted file mode 100644 index 3d1e98f7..00000000 --- a/src/mistralai/models/batchrequest.py +++ /dev/null @@ -1,48 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from pydantic import model_serializer -from typing import Any, Dict -from typing_extensions import NotRequired, TypedDict - - -class BatchRequestTypedDict(TypedDict): - body: Dict[str, Any] - custom_id: NotRequired[Nullable[str]] - - -class BatchRequest(BaseModel): - body: Dict[str, Any] - - custom_id: OptionalNullable[str] = UNSET - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = ["custom_id"] - nullable_fields = ["custom_id"] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/builtinconnectors.py b/src/mistralai/models/builtinconnectors.py deleted file mode 100644 index 6a3b2476..00000000 --- a/src/mistralai/models/builtinconnectors.py +++ /dev/null @@ -1,13 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from typing import Literal - - -BuiltInConnectors = Literal[ - "web_search", - "web_search_premium", - "code_interpreter", - "image_generation", - "document_library", -] diff --git a/src/mistralai/models/chatclassificationrequest.py b/src/mistralai/models/chatclassificationrequest.py deleted file mode 100644 index f06f4f34..00000000 --- a/src/mistralai/models/chatclassificationrequest.py +++ /dev/null @@ -1,20 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .inputs import Inputs, InputsTypedDict -from mistralai.types import BaseModel -import pydantic -from typing_extensions import Annotated, TypedDict - - -class ChatClassificationRequestTypedDict(TypedDict): - model: str - inputs: InputsTypedDict - r"""Chat to classify""" - - -class ChatClassificationRequest(BaseModel): - model: str - - inputs: Annotated[Inputs, pydantic.Field(alias="input")] - r"""Chat to classify""" diff --git a/src/mistralai/models/chatcompletionchoice.py b/src/mistralai/models/chatcompletionchoice.py deleted file mode 100644 index f2057ab4..00000000 --- a/src/mistralai/models/chatcompletionchoice.py +++ /dev/null @@ -1,33 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .assistantmessage import AssistantMessage, AssistantMessageTypedDict -from mistralai.types import BaseModel, UnrecognizedStr -from typing import Literal, Union -from typing_extensions import TypedDict - - -FinishReason = Union[ - Literal[ - "stop", - "length", - "model_length", - "error", - "tool_calls", - ], - UnrecognizedStr, -] - - -class ChatCompletionChoiceTypedDict(TypedDict): - index: int - message: AssistantMessageTypedDict - finish_reason: FinishReason - - -class ChatCompletionChoice(BaseModel): - index: int - - message: AssistantMessage - - finish_reason: FinishReason diff --git a/src/mistralai/models/chatcompletionrequest.py b/src/mistralai/models/chatcompletionrequest.py deleted file mode 100644 index ad8b5428..00000000 --- a/src/mistralai/models/chatcompletionrequest.py +++ /dev/null @@ -1,215 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .assistantmessage import AssistantMessage, AssistantMessageTypedDict -from .mistralpromptmode import MistralPromptMode -from .prediction import Prediction, PredictionTypedDict -from .responseformat import ResponseFormat, ResponseFormatTypedDict -from .systemmessage import SystemMessage, SystemMessageTypedDict -from .tool import Tool, ToolTypedDict -from .toolchoice import ToolChoice, ToolChoiceTypedDict -from .toolchoiceenum import ToolChoiceEnum -from .toolmessage import ToolMessage, ToolMessageTypedDict -from .usermessage import UserMessage, UserMessageTypedDict -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from mistralai.utils import get_discriminator -from pydantic import Discriminator, Tag, model_serializer -from typing import Any, Dict, List, Optional, Union -from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict - - -StopTypedDict = TypeAliasType("StopTypedDict", Union[str, List[str]]) -r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" - - -Stop = TypeAliasType("Stop", Union[str, List[str]]) -r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" - - -MessagesTypedDict = TypeAliasType( - "MessagesTypedDict", - Union[ - SystemMessageTypedDict, - UserMessageTypedDict, - AssistantMessageTypedDict, - ToolMessageTypedDict, - ], -) - - -Messages = Annotated[ - Union[ - Annotated[AssistantMessage, Tag("assistant")], - Annotated[SystemMessage, Tag("system")], - Annotated[ToolMessage, Tag("tool")], - Annotated[UserMessage, Tag("user")], - ], - Discriminator(lambda m: get_discriminator(m, "role", "role")), -] - - -ChatCompletionRequestToolChoiceTypedDict = TypeAliasType( - "ChatCompletionRequestToolChoiceTypedDict", - Union[ToolChoiceTypedDict, ToolChoiceEnum], -) -r"""Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool.""" - - -ChatCompletionRequestToolChoice = TypeAliasType( - "ChatCompletionRequestToolChoice", Union[ToolChoice, ToolChoiceEnum] -) -r"""Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool.""" - - -class ChatCompletionRequestTypedDict(TypedDict): - model: str - r"""ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions.""" - messages: List[MessagesTypedDict] - r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content.""" - temperature: NotRequired[Nullable[float]] - r"""What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value.""" - top_p: NotRequired[float] - r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.""" - max_tokens: NotRequired[Nullable[int]] - r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" - stream: NotRequired[bool] - r"""Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON.""" - stop: NotRequired[StopTypedDict] - r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" - random_seed: NotRequired[Nullable[int]] - r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" - metadata: NotRequired[Nullable[Dict[str, Any]]] - response_format: NotRequired[ResponseFormatTypedDict] - r"""Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide.""" - tools: NotRequired[Nullable[List[ToolTypedDict]]] - r"""A list of tools the model may call. Use this to provide a list of functions the model may generate JSON inputs for.""" - tool_choice: NotRequired[ChatCompletionRequestToolChoiceTypedDict] - r"""Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool.""" - presence_penalty: NotRequired[float] - r"""The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative.""" - frequency_penalty: NotRequired[float] - r"""The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.""" - n: NotRequired[Nullable[int]] - r"""Number of completions to return for each request, input tokens are only billed once.""" - prediction: NotRequired[PredictionTypedDict] - r"""Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content.""" - parallel_tool_calls: NotRequired[bool] - r"""Whether to enable parallel function calling during tool use, when enabled the model can call multiple tools in parallel.""" - prompt_mode: NotRequired[Nullable[MistralPromptMode]] - r"""Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used.""" - safe_prompt: NotRequired[bool] - r"""Whether to inject a safety prompt before all conversations.""" - - -class ChatCompletionRequest(BaseModel): - model: str - r"""ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions.""" - - messages: List[Messages] - r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content.""" - - temperature: OptionalNullable[float] = UNSET - r"""What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value.""" - - top_p: Optional[float] = None - r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.""" - - max_tokens: OptionalNullable[int] = UNSET - r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" - - stream: Optional[bool] = False - r"""Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON.""" - - stop: Optional[Stop] = None - r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" - - random_seed: OptionalNullable[int] = UNSET - r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" - - metadata: OptionalNullable[Dict[str, Any]] = UNSET - - response_format: Optional[ResponseFormat] = None - r"""Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide.""" - - tools: OptionalNullable[List[Tool]] = UNSET - r"""A list of tools the model may call. Use this to provide a list of functions the model may generate JSON inputs for.""" - - tool_choice: Optional[ChatCompletionRequestToolChoice] = None - r"""Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool.""" - - presence_penalty: Optional[float] = None - r"""The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative.""" - - frequency_penalty: Optional[float] = None - r"""The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.""" - - n: OptionalNullable[int] = UNSET - r"""Number of completions to return for each request, input tokens are only billed once.""" - - prediction: Optional[Prediction] = None - r"""Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content.""" - - parallel_tool_calls: Optional[bool] = None - r"""Whether to enable parallel function calling during tool use, when enabled the model can call multiple tools in parallel.""" - - prompt_mode: OptionalNullable[MistralPromptMode] = UNSET - r"""Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used.""" - - safe_prompt: Optional[bool] = None - r"""Whether to inject a safety prompt before all conversations.""" - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = [ - "temperature", - "top_p", - "max_tokens", - "stream", - "stop", - "random_seed", - "metadata", - "response_format", - "tools", - "tool_choice", - "presence_penalty", - "frequency_penalty", - "n", - "prediction", - "parallel_tool_calls", - "prompt_mode", - "safe_prompt", - ] - nullable_fields = [ - "temperature", - "max_tokens", - "random_seed", - "metadata", - "tools", - "n", - "prompt_mode", - ] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/chatcompletionresponse.py b/src/mistralai/models/chatcompletionresponse.py deleted file mode 100644 index 3d03b126..00000000 --- a/src/mistralai/models/chatcompletionresponse.py +++ /dev/null @@ -1,31 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .chatcompletionchoice import ChatCompletionChoice, ChatCompletionChoiceTypedDict -from .usageinfo import UsageInfo, UsageInfoTypedDict -from mistralai.types import BaseModel -from typing import List -from typing_extensions import TypedDict - - -class ChatCompletionResponseTypedDict(TypedDict): - id: str - object: str - model: str - usage: UsageInfoTypedDict - created: int - choices: List[ChatCompletionChoiceTypedDict] - - -class ChatCompletionResponse(BaseModel): - id: str - - object: str - - model: str - - usage: UsageInfo - - created: int - - choices: List[ChatCompletionChoice] diff --git a/src/mistralai/models/chatcompletionstreamrequest.py b/src/mistralai/models/chatcompletionstreamrequest.py deleted file mode 100644 index 10f97e5f..00000000 --- a/src/mistralai/models/chatcompletionstreamrequest.py +++ /dev/null @@ -1,217 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .assistantmessage import AssistantMessage, AssistantMessageTypedDict -from .mistralpromptmode import MistralPromptMode -from .prediction import Prediction, PredictionTypedDict -from .responseformat import ResponseFormat, ResponseFormatTypedDict -from .systemmessage import SystemMessage, SystemMessageTypedDict -from .tool import Tool, ToolTypedDict -from .toolchoice import ToolChoice, ToolChoiceTypedDict -from .toolchoiceenum import ToolChoiceEnum -from .toolmessage import ToolMessage, ToolMessageTypedDict -from .usermessage import UserMessage, UserMessageTypedDict -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from mistralai.utils import get_discriminator -from pydantic import Discriminator, Tag, model_serializer -from typing import Any, Dict, List, Optional, Union -from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict - - -ChatCompletionStreamRequestStopTypedDict = TypeAliasType( - "ChatCompletionStreamRequestStopTypedDict", Union[str, List[str]] -) -r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" - - -ChatCompletionStreamRequestStop = TypeAliasType( - "ChatCompletionStreamRequestStop", Union[str, List[str]] -) -r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" - - -ChatCompletionStreamRequestMessagesTypedDict = TypeAliasType( - "ChatCompletionStreamRequestMessagesTypedDict", - Union[ - SystemMessageTypedDict, - UserMessageTypedDict, - AssistantMessageTypedDict, - ToolMessageTypedDict, - ], -) - - -ChatCompletionStreamRequestMessages = Annotated[ - Union[ - Annotated[AssistantMessage, Tag("assistant")], - Annotated[SystemMessage, Tag("system")], - Annotated[ToolMessage, Tag("tool")], - Annotated[UserMessage, Tag("user")], - ], - Discriminator(lambda m: get_discriminator(m, "role", "role")), -] - - -ChatCompletionStreamRequestToolChoiceTypedDict = TypeAliasType( - "ChatCompletionStreamRequestToolChoiceTypedDict", - Union[ToolChoiceTypedDict, ToolChoiceEnum], -) -r"""Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool.""" - - -ChatCompletionStreamRequestToolChoice = TypeAliasType( - "ChatCompletionStreamRequestToolChoice", Union[ToolChoice, ToolChoiceEnum] -) -r"""Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool.""" - - -class ChatCompletionStreamRequestTypedDict(TypedDict): - model: str - r"""ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions.""" - messages: List[ChatCompletionStreamRequestMessagesTypedDict] - r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content.""" - temperature: NotRequired[Nullable[float]] - r"""What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value.""" - top_p: NotRequired[float] - r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.""" - max_tokens: NotRequired[Nullable[int]] - r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" - stream: NotRequired[bool] - stop: NotRequired[ChatCompletionStreamRequestStopTypedDict] - r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" - random_seed: NotRequired[Nullable[int]] - r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" - metadata: NotRequired[Nullable[Dict[str, Any]]] - response_format: NotRequired[ResponseFormatTypedDict] - r"""Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide.""" - tools: NotRequired[Nullable[List[ToolTypedDict]]] - r"""A list of tools the model may call. Use this to provide a list of functions the model may generate JSON inputs for.""" - tool_choice: NotRequired[ChatCompletionStreamRequestToolChoiceTypedDict] - r"""Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool.""" - presence_penalty: NotRequired[float] - r"""The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative.""" - frequency_penalty: NotRequired[float] - r"""The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.""" - n: NotRequired[Nullable[int]] - r"""Number of completions to return for each request, input tokens are only billed once.""" - prediction: NotRequired[PredictionTypedDict] - r"""Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content.""" - parallel_tool_calls: NotRequired[bool] - r"""Whether to enable parallel function calling during tool use, when enabled the model can call multiple tools in parallel.""" - prompt_mode: NotRequired[Nullable[MistralPromptMode]] - r"""Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used.""" - safe_prompt: NotRequired[bool] - r"""Whether to inject a safety prompt before all conversations.""" - - -class ChatCompletionStreamRequest(BaseModel): - model: str - r"""ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions.""" - - messages: List[ChatCompletionStreamRequestMessages] - r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content.""" - - temperature: OptionalNullable[float] = UNSET - r"""What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value.""" - - top_p: Optional[float] = None - r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.""" - - max_tokens: OptionalNullable[int] = UNSET - r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" - - stream: Optional[bool] = True - - stop: Optional[ChatCompletionStreamRequestStop] = None - r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" - - random_seed: OptionalNullable[int] = UNSET - r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" - - metadata: OptionalNullable[Dict[str, Any]] = UNSET - - response_format: Optional[ResponseFormat] = None - r"""Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide.""" - - tools: OptionalNullable[List[Tool]] = UNSET - r"""A list of tools the model may call. Use this to provide a list of functions the model may generate JSON inputs for.""" - - tool_choice: Optional[ChatCompletionStreamRequestToolChoice] = None - r"""Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool.""" - - presence_penalty: Optional[float] = None - r"""The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative.""" - - frequency_penalty: Optional[float] = None - r"""The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.""" - - n: OptionalNullable[int] = UNSET - r"""Number of completions to return for each request, input tokens are only billed once.""" - - prediction: Optional[Prediction] = None - r"""Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content.""" - - parallel_tool_calls: Optional[bool] = None - r"""Whether to enable parallel function calling during tool use, when enabled the model can call multiple tools in parallel.""" - - prompt_mode: OptionalNullable[MistralPromptMode] = UNSET - r"""Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used.""" - - safe_prompt: Optional[bool] = None - r"""Whether to inject a safety prompt before all conversations.""" - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = [ - "temperature", - "top_p", - "max_tokens", - "stream", - "stop", - "random_seed", - "metadata", - "response_format", - "tools", - "tool_choice", - "presence_penalty", - "frequency_penalty", - "n", - "prediction", - "parallel_tool_calls", - "prompt_mode", - "safe_prompt", - ] - nullable_fields = [ - "temperature", - "max_tokens", - "random_seed", - "metadata", - "tools", - "n", - "prompt_mode", - ] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/chatmoderationrequest.py b/src/mistralai/models/chatmoderationrequest.py deleted file mode 100644 index 2f58d52f..00000000 --- a/src/mistralai/models/chatmoderationrequest.py +++ /dev/null @@ -1,83 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .assistantmessage import AssistantMessage, AssistantMessageTypedDict -from .systemmessage import SystemMessage, SystemMessageTypedDict -from .toolmessage import ToolMessage, ToolMessageTypedDict -from .usermessage import UserMessage, UserMessageTypedDict -from mistralai.types import BaseModel -from mistralai.utils import get_discriminator -import pydantic -from pydantic import Discriminator, Tag -from typing import List, Union -from typing_extensions import Annotated, TypeAliasType, TypedDict - - -TwoTypedDict = TypeAliasType( - "TwoTypedDict", - Union[ - SystemMessageTypedDict, - UserMessageTypedDict, - AssistantMessageTypedDict, - ToolMessageTypedDict, - ], -) - - -Two = Annotated[ - Union[ - Annotated[AssistantMessage, Tag("assistant")], - Annotated[SystemMessage, Tag("system")], - Annotated[ToolMessage, Tag("tool")], - Annotated[UserMessage, Tag("user")], - ], - Discriminator(lambda m: get_discriminator(m, "role", "role")), -] - - -OneTypedDict = TypeAliasType( - "OneTypedDict", - Union[ - SystemMessageTypedDict, - UserMessageTypedDict, - AssistantMessageTypedDict, - ToolMessageTypedDict, - ], -) - - -One = Annotated[ - Union[ - Annotated[AssistantMessage, Tag("assistant")], - Annotated[SystemMessage, Tag("system")], - Annotated[ToolMessage, Tag("tool")], - Annotated[UserMessage, Tag("user")], - ], - Discriminator(lambda m: get_discriminator(m, "role", "role")), -] - - -ChatModerationRequestInputsTypedDict = TypeAliasType( - "ChatModerationRequestInputsTypedDict", - Union[List[OneTypedDict], List[List[TwoTypedDict]]], -) -r"""Chat to classify""" - - -ChatModerationRequestInputs = TypeAliasType( - "ChatModerationRequestInputs", Union[List[One], List[List[Two]]] -) -r"""Chat to classify""" - - -class ChatModerationRequestTypedDict(TypedDict): - inputs: ChatModerationRequestInputsTypedDict - r"""Chat to classify""" - model: str - - -class ChatModerationRequest(BaseModel): - inputs: Annotated[ChatModerationRequestInputs, pydantic.Field(alias="input")] - r"""Chat to classify""" - - model: str diff --git a/src/mistralai/models/checkpointout.py b/src/mistralai/models/checkpointout.py deleted file mode 100644 index aefb7731..00000000 --- a/src/mistralai/models/checkpointout.py +++ /dev/null @@ -1,26 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .metricout import MetricOut, MetricOutTypedDict -from mistralai.types import BaseModel -from typing_extensions import TypedDict - - -class CheckpointOutTypedDict(TypedDict): - metrics: MetricOutTypedDict - r"""Metrics at the step number during the fine-tuning job. Use these metrics to assess if the training is going smoothly (loss should decrease, token accuracy should increase).""" - step_number: int - r"""The step number that the checkpoint was created at.""" - created_at: int - r"""The UNIX timestamp (in seconds) for when the checkpoint was created.""" - - -class CheckpointOut(BaseModel): - metrics: MetricOut - r"""Metrics at the step number during the fine-tuning job. Use these metrics to assess if the training is going smoothly (loss should decrease, token accuracy should increase).""" - - step_number: int - r"""The step number that the checkpoint was created at.""" - - created_at: int - r"""The UNIX timestamp (in seconds) for when the checkpoint was created.""" diff --git a/src/mistralai/models/classificationrequest.py b/src/mistralai/models/classificationrequest.py deleted file mode 100644 index 8a354378..00000000 --- a/src/mistralai/models/classificationrequest.py +++ /dev/null @@ -1,68 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -import pydantic -from pydantic import model_serializer -from typing import Any, Dict, List, Union -from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict - - -ClassificationRequestInputsTypedDict = TypeAliasType( - "ClassificationRequestInputsTypedDict", Union[str, List[str]] -) -r"""Text to classify.""" - - -ClassificationRequestInputs = TypeAliasType( - "ClassificationRequestInputs", Union[str, List[str]] -) -r"""Text to classify.""" - - -class ClassificationRequestTypedDict(TypedDict): - model: str - r"""ID of the model to use.""" - inputs: ClassificationRequestInputsTypedDict - r"""Text to classify.""" - metadata: NotRequired[Nullable[Dict[str, Any]]] - - -class ClassificationRequest(BaseModel): - model: str - r"""ID of the model to use.""" - - inputs: Annotated[ClassificationRequestInputs, pydantic.Field(alias="input")] - r"""Text to classify.""" - - metadata: OptionalNullable[Dict[str, Any]] = UNSET - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = ["metadata"] - nullable_fields = ["metadata"] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/classificationresponse.py b/src/mistralai/models/classificationresponse.py deleted file mode 100644 index b7741f37..00000000 --- a/src/mistralai/models/classificationresponse.py +++ /dev/null @@ -1,24 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .classificationtargetresult import ( - ClassificationTargetResult, - ClassificationTargetResultTypedDict, -) -from mistralai.types import BaseModel -from typing import Dict, List -from typing_extensions import TypedDict - - -class ClassificationResponseTypedDict(TypedDict): - id: str - model: str - results: List[Dict[str, ClassificationTargetResultTypedDict]] - - -class ClassificationResponse(BaseModel): - id: str - - model: str - - results: List[Dict[str, ClassificationTargetResult]] diff --git a/src/mistralai/models/classificationtargetresult.py b/src/mistralai/models/classificationtargetresult.py deleted file mode 100644 index 60c5a51b..00000000 --- a/src/mistralai/models/classificationtargetresult.py +++ /dev/null @@ -1,14 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel -from typing import Dict -from typing_extensions import TypedDict - - -class ClassificationTargetResultTypedDict(TypedDict): - scores: Dict[str, float] - - -class ClassificationTargetResult(BaseModel): - scores: Dict[str, float] diff --git a/src/mistralai/models/classifierdetailedjobout.py b/src/mistralai/models/classifierdetailedjobout.py deleted file mode 100644 index 701aee6e..00000000 --- a/src/mistralai/models/classifierdetailedjobout.py +++ /dev/null @@ -1,158 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .checkpointout import CheckpointOut, CheckpointOutTypedDict -from .classifiertargetout import ClassifierTargetOut, ClassifierTargetOutTypedDict -from .classifiertrainingparameters import ( - ClassifierTrainingParameters, - ClassifierTrainingParametersTypedDict, -) -from .eventout import EventOut, EventOutTypedDict -from .jobmetadataout import JobMetadataOut, JobMetadataOutTypedDict -from .wandbintegrationout import WandbIntegrationOut, WandbIntegrationOutTypedDict -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from pydantic import model_serializer -from typing import List, Literal, Optional -from typing_extensions import NotRequired, TypedDict - - -ClassifierDetailedJobOutStatus = Literal[ - "QUEUED", - "STARTED", - "VALIDATING", - "VALIDATED", - "RUNNING", - "FAILED_VALIDATION", - "FAILED", - "SUCCESS", - "CANCELLED", - "CANCELLATION_REQUESTED", -] - - -ClassifierDetailedJobOutObject = Literal["job",] - - -ClassifierDetailedJobOutIntegrationsTypedDict = WandbIntegrationOutTypedDict - - -ClassifierDetailedJobOutIntegrations = WandbIntegrationOut - - -ClassifierDetailedJobOutJobType = Literal["classifier",] - - -class ClassifierDetailedJobOutTypedDict(TypedDict): - id: str - auto_start: bool - model: str - r"""The name of the model to fine-tune.""" - status: ClassifierDetailedJobOutStatus - created_at: int - modified_at: int - training_files: List[str] - hyperparameters: ClassifierTrainingParametersTypedDict - classifier_targets: List[ClassifierTargetOutTypedDict] - validation_files: NotRequired[Nullable[List[str]]] - object: NotRequired[ClassifierDetailedJobOutObject] - fine_tuned_model: NotRequired[Nullable[str]] - suffix: NotRequired[Nullable[str]] - integrations: NotRequired[ - Nullable[List[ClassifierDetailedJobOutIntegrationsTypedDict]] - ] - trained_tokens: NotRequired[Nullable[int]] - metadata: NotRequired[Nullable[JobMetadataOutTypedDict]] - job_type: NotRequired[ClassifierDetailedJobOutJobType] - events: NotRequired[List[EventOutTypedDict]] - r"""Event items are created every time the status of a fine-tuning job changes. The timestamped list of all events is accessible here.""" - checkpoints: NotRequired[List[CheckpointOutTypedDict]] - - -class ClassifierDetailedJobOut(BaseModel): - id: str - - auto_start: bool - - model: str - r"""The name of the model to fine-tune.""" - - status: ClassifierDetailedJobOutStatus - - created_at: int - - modified_at: int - - training_files: List[str] - - hyperparameters: ClassifierTrainingParameters - - classifier_targets: List[ClassifierTargetOut] - - validation_files: OptionalNullable[List[str]] = UNSET - - object: Optional[ClassifierDetailedJobOutObject] = "job" - - fine_tuned_model: OptionalNullable[str] = UNSET - - suffix: OptionalNullable[str] = UNSET - - integrations: OptionalNullable[List[ClassifierDetailedJobOutIntegrations]] = UNSET - - trained_tokens: OptionalNullable[int] = UNSET - - metadata: OptionalNullable[JobMetadataOut] = UNSET - - job_type: Optional[ClassifierDetailedJobOutJobType] = "classifier" - - events: Optional[List[EventOut]] = None - r"""Event items are created every time the status of a fine-tuning job changes. The timestamped list of all events is accessible here.""" - - checkpoints: Optional[List[CheckpointOut]] = None - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = [ - "validation_files", - "object", - "fine_tuned_model", - "suffix", - "integrations", - "trained_tokens", - "metadata", - "job_type", - "events", - "checkpoints", - ] - nullable_fields = [ - "validation_files", - "fine_tuned_model", - "suffix", - "integrations", - "trained_tokens", - "metadata", - ] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/classifierftmodelout.py b/src/mistralai/models/classifierftmodelout.py deleted file mode 100644 index d2a31fae..00000000 --- a/src/mistralai/models/classifierftmodelout.py +++ /dev/null @@ -1,108 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .classifiertargetout import ClassifierTargetOut, ClassifierTargetOutTypedDict -from .ftmodelcapabilitiesout import ( - FTModelCapabilitiesOut, - FTModelCapabilitiesOutTypedDict, -) -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from pydantic import model_serializer -from typing import List, Literal, Optional -from typing_extensions import NotRequired, TypedDict - - -ClassifierFTModelOutObject = Literal["model",] - - -ClassifierFTModelOutModelType = Literal["classifier",] - - -class ClassifierFTModelOutTypedDict(TypedDict): - id: str - created: int - owned_by: str - workspace_id: str - root: str - root_version: str - archived: bool - capabilities: FTModelCapabilitiesOutTypedDict - job: str - classifier_targets: List[ClassifierTargetOutTypedDict] - object: NotRequired[ClassifierFTModelOutObject] - name: NotRequired[Nullable[str]] - description: NotRequired[Nullable[str]] - max_context_length: NotRequired[int] - aliases: NotRequired[List[str]] - model_type: NotRequired[ClassifierFTModelOutModelType] - - -class ClassifierFTModelOut(BaseModel): - id: str - - created: int - - owned_by: str - - workspace_id: str - - root: str - - root_version: str - - archived: bool - - capabilities: FTModelCapabilitiesOut - - job: str - - classifier_targets: List[ClassifierTargetOut] - - object: Optional[ClassifierFTModelOutObject] = "model" - - name: OptionalNullable[str] = UNSET - - description: OptionalNullable[str] = UNSET - - max_context_length: Optional[int] = 32768 - - aliases: Optional[List[str]] = None - - model_type: Optional[ClassifierFTModelOutModelType] = "classifier" - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = [ - "object", - "name", - "description", - "max_context_length", - "aliases", - "model_type", - ] - nullable_fields = ["name", "description"] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/classifierjobout.py b/src/mistralai/models/classifierjobout.py deleted file mode 100644 index a2f7cc08..00000000 --- a/src/mistralai/models/classifierjobout.py +++ /dev/null @@ -1,167 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .classifiertrainingparameters import ( - ClassifierTrainingParameters, - ClassifierTrainingParametersTypedDict, -) -from .jobmetadataout import JobMetadataOut, JobMetadataOutTypedDict -from .wandbintegrationout import WandbIntegrationOut, WandbIntegrationOutTypedDict -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from pydantic import model_serializer -from typing import List, Literal, Optional -from typing_extensions import NotRequired, TypedDict - - -ClassifierJobOutStatus = Literal[ - "QUEUED", - "STARTED", - "VALIDATING", - "VALIDATED", - "RUNNING", - "FAILED_VALIDATION", - "FAILED", - "SUCCESS", - "CANCELLED", - "CANCELLATION_REQUESTED", -] -r"""The current status of the fine-tuning job.""" - - -ClassifierJobOutObject = Literal["job",] -r"""The object type of the fine-tuning job.""" - - -ClassifierJobOutIntegrationsTypedDict = WandbIntegrationOutTypedDict - - -ClassifierJobOutIntegrations = WandbIntegrationOut - - -ClassifierJobOutJobType = Literal["classifier",] -r"""The type of job (`FT` for fine-tuning).""" - - -class ClassifierJobOutTypedDict(TypedDict): - id: str - r"""The ID of the job.""" - auto_start: bool - model: str - r"""The name of the model to fine-tune.""" - status: ClassifierJobOutStatus - r"""The current status of the fine-tuning job.""" - created_at: int - r"""The UNIX timestamp (in seconds) for when the fine-tuning job was created.""" - modified_at: int - r"""The UNIX timestamp (in seconds) for when the fine-tuning job was last modified.""" - training_files: List[str] - r"""A list containing the IDs of uploaded files that contain training data.""" - hyperparameters: ClassifierTrainingParametersTypedDict - validation_files: NotRequired[Nullable[List[str]]] - r"""A list containing the IDs of uploaded files that contain validation data.""" - object: NotRequired[ClassifierJobOutObject] - r"""The object type of the fine-tuning job.""" - fine_tuned_model: NotRequired[Nullable[str]] - r"""The name of the fine-tuned model that is being created. The value will be `null` if the fine-tuning job is still running.""" - suffix: NotRequired[Nullable[str]] - r"""Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`.""" - integrations: NotRequired[Nullable[List[ClassifierJobOutIntegrationsTypedDict]]] - r"""A list of integrations enabled for your fine-tuning job.""" - trained_tokens: NotRequired[Nullable[int]] - r"""Total number of tokens trained.""" - metadata: NotRequired[Nullable[JobMetadataOutTypedDict]] - job_type: NotRequired[ClassifierJobOutJobType] - r"""The type of job (`FT` for fine-tuning).""" - - -class ClassifierJobOut(BaseModel): - id: str - r"""The ID of the job.""" - - auto_start: bool - - model: str - r"""The name of the model to fine-tune.""" - - status: ClassifierJobOutStatus - r"""The current status of the fine-tuning job.""" - - created_at: int - r"""The UNIX timestamp (in seconds) for when the fine-tuning job was created.""" - - modified_at: int - r"""The UNIX timestamp (in seconds) for when the fine-tuning job was last modified.""" - - training_files: List[str] - r"""A list containing the IDs of uploaded files that contain training data.""" - - hyperparameters: ClassifierTrainingParameters - - validation_files: OptionalNullable[List[str]] = UNSET - r"""A list containing the IDs of uploaded files that contain validation data.""" - - object: Optional[ClassifierJobOutObject] = "job" - r"""The object type of the fine-tuning job.""" - - fine_tuned_model: OptionalNullable[str] = UNSET - r"""The name of the fine-tuned model that is being created. The value will be `null` if the fine-tuning job is still running.""" - - suffix: OptionalNullable[str] = UNSET - r"""Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`.""" - - integrations: OptionalNullable[List[ClassifierJobOutIntegrations]] = UNSET - r"""A list of integrations enabled for your fine-tuning job.""" - - trained_tokens: OptionalNullable[int] = UNSET - r"""Total number of tokens trained.""" - - metadata: OptionalNullable[JobMetadataOut] = UNSET - - job_type: Optional[ClassifierJobOutJobType] = "classifier" - r"""The type of job (`FT` for fine-tuning).""" - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = [ - "validation_files", - "object", - "fine_tuned_model", - "suffix", - "integrations", - "trained_tokens", - "metadata", - "job_type", - ] - nullable_fields = [ - "validation_files", - "fine_tuned_model", - "suffix", - "integrations", - "trained_tokens", - "metadata", - ] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/classifiertargetin.py b/src/mistralai/models/classifiertargetin.py deleted file mode 100644 index d8a060e4..00000000 --- a/src/mistralai/models/classifiertargetin.py +++ /dev/null @@ -1,55 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .ftclassifierlossfunction import FTClassifierLossFunction -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from pydantic import model_serializer -from typing import List, Optional -from typing_extensions import NotRequired, TypedDict - - -class ClassifierTargetInTypedDict(TypedDict): - name: str - labels: List[str] - weight: NotRequired[float] - loss_function: NotRequired[Nullable[FTClassifierLossFunction]] - - -class ClassifierTargetIn(BaseModel): - name: str - - labels: List[str] - - weight: Optional[float] = 1 - - loss_function: OptionalNullable[FTClassifierLossFunction] = UNSET - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = ["weight", "loss_function"] - nullable_fields = ["loss_function"] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/classifiertargetout.py b/src/mistralai/models/classifiertargetout.py deleted file mode 100644 index ddc587f4..00000000 --- a/src/mistralai/models/classifiertargetout.py +++ /dev/null @@ -1,24 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .ftclassifierlossfunction import FTClassifierLossFunction -from mistralai.types import BaseModel -from typing import List -from typing_extensions import TypedDict - - -class ClassifierTargetOutTypedDict(TypedDict): - name: str - labels: List[str] - weight: float - loss_function: FTClassifierLossFunction - - -class ClassifierTargetOut(BaseModel): - name: str - - labels: List[str] - - weight: float - - loss_function: FTClassifierLossFunction diff --git a/src/mistralai/models/classifiertrainingparameters.py b/src/mistralai/models/classifiertrainingparameters.py deleted file mode 100644 index 718beeac..00000000 --- a/src/mistralai/models/classifiertrainingparameters.py +++ /dev/null @@ -1,73 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from pydantic import model_serializer -from typing import Optional -from typing_extensions import NotRequired, TypedDict - - -class ClassifierTrainingParametersTypedDict(TypedDict): - training_steps: NotRequired[Nullable[int]] - learning_rate: NotRequired[float] - weight_decay: NotRequired[Nullable[float]] - warmup_fraction: NotRequired[Nullable[float]] - epochs: NotRequired[Nullable[float]] - seq_len: NotRequired[Nullable[int]] - - -class ClassifierTrainingParameters(BaseModel): - training_steps: OptionalNullable[int] = UNSET - - learning_rate: Optional[float] = 0.0001 - - weight_decay: OptionalNullable[float] = UNSET - - warmup_fraction: OptionalNullable[float] = UNSET - - epochs: OptionalNullable[float] = UNSET - - seq_len: OptionalNullable[int] = UNSET - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = [ - "training_steps", - "learning_rate", - "weight_decay", - "warmup_fraction", - "epochs", - "seq_len", - ] - nullable_fields = [ - "training_steps", - "weight_decay", - "warmup_fraction", - "epochs", - "seq_len", - ] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/classifiertrainingparametersin.py b/src/mistralai/models/classifiertrainingparametersin.py deleted file mode 100644 index 9868843f..00000000 --- a/src/mistralai/models/classifiertrainingparametersin.py +++ /dev/null @@ -1,85 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from pydantic import model_serializer -from typing import Optional -from typing_extensions import NotRequired, TypedDict - - -class ClassifierTrainingParametersInTypedDict(TypedDict): - r"""The fine-tuning hyperparameter settings used in a classifier fine-tune job.""" - - training_steps: NotRequired[Nullable[int]] - r"""The number of training steps to perform. A training step refers to a single update of the model weights during the fine-tuning process. This update is typically calculated using a batch of samples from the training dataset.""" - learning_rate: NotRequired[float] - r"""A parameter describing how much to adjust the pre-trained model's weights in response to the estimated error each time the weights are updated during the fine-tuning process.""" - weight_decay: NotRequired[Nullable[float]] - r"""(Advanced Usage) Weight decay adds a term to the loss function that is proportional to the sum of the squared weights. This term reduces the magnitude of the weights and prevents them from growing too large.""" - warmup_fraction: NotRequired[Nullable[float]] - r"""(Advanced Usage) A parameter that specifies the percentage of the total training steps at which the learning rate warm-up phase ends. During this phase, the learning rate gradually increases from a small value to the initial learning rate, helping to stabilize the training process and improve convergence. Similar to `pct_start` in [mistral-finetune](https://github.com/mistralai/mistral-finetune)""" - epochs: NotRequired[Nullable[float]] - seq_len: NotRequired[Nullable[int]] - - -class ClassifierTrainingParametersIn(BaseModel): - r"""The fine-tuning hyperparameter settings used in a classifier fine-tune job.""" - - training_steps: OptionalNullable[int] = UNSET - r"""The number of training steps to perform. A training step refers to a single update of the model weights during the fine-tuning process. This update is typically calculated using a batch of samples from the training dataset.""" - - learning_rate: Optional[float] = 0.0001 - r"""A parameter describing how much to adjust the pre-trained model's weights in response to the estimated error each time the weights are updated during the fine-tuning process.""" - - weight_decay: OptionalNullable[float] = UNSET - r"""(Advanced Usage) Weight decay adds a term to the loss function that is proportional to the sum of the squared weights. This term reduces the magnitude of the weights and prevents them from growing too large.""" - - warmup_fraction: OptionalNullable[float] = UNSET - r"""(Advanced Usage) A parameter that specifies the percentage of the total training steps at which the learning rate warm-up phase ends. During this phase, the learning rate gradually increases from a small value to the initial learning rate, helping to stabilize the training process and improve convergence. Similar to `pct_start` in [mistral-finetune](https://github.com/mistralai/mistral-finetune)""" - - epochs: OptionalNullable[float] = UNSET - - seq_len: OptionalNullable[int] = UNSET - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = [ - "training_steps", - "learning_rate", - "weight_decay", - "warmup_fraction", - "epochs", - "seq_len", - ] - nullable_fields = [ - "training_steps", - "weight_decay", - "warmup_fraction", - "epochs", - "seq_len", - ] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/codeinterpretertool.py b/src/mistralai/models/codeinterpretertool.py deleted file mode 100644 index 48b74ee8..00000000 --- a/src/mistralai/models/codeinterpretertool.py +++ /dev/null @@ -1,17 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel -from typing import Literal, Optional -from typing_extensions import NotRequired, TypedDict - - -CodeInterpreterToolType = Literal["code_interpreter",] - - -class CodeInterpreterToolTypedDict(TypedDict): - type: NotRequired[CodeInterpreterToolType] - - -class CodeInterpreterTool(BaseModel): - type: Optional[CodeInterpreterToolType] = "code_interpreter" diff --git a/src/mistralai/models/completionargs.py b/src/mistralai/models/completionargs.py deleted file mode 100644 index 40aa0314..00000000 --- a/src/mistralai/models/completionargs.py +++ /dev/null @@ -1,101 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .completionargsstop import CompletionArgsStop, CompletionArgsStopTypedDict -from .prediction import Prediction, PredictionTypedDict -from .responseformat import ResponseFormat, ResponseFormatTypedDict -from .toolchoiceenum import ToolChoiceEnum -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from pydantic import model_serializer -from typing import Optional -from typing_extensions import NotRequired, TypedDict - - -class CompletionArgsTypedDict(TypedDict): - r"""White-listed arguments from the completion API""" - - stop: NotRequired[Nullable[CompletionArgsStopTypedDict]] - presence_penalty: NotRequired[Nullable[float]] - frequency_penalty: NotRequired[Nullable[float]] - temperature: NotRequired[Nullable[float]] - top_p: NotRequired[Nullable[float]] - max_tokens: NotRequired[Nullable[int]] - random_seed: NotRequired[Nullable[int]] - prediction: NotRequired[Nullable[PredictionTypedDict]] - response_format: NotRequired[Nullable[ResponseFormatTypedDict]] - tool_choice: NotRequired[ToolChoiceEnum] - - -class CompletionArgs(BaseModel): - r"""White-listed arguments from the completion API""" - - stop: OptionalNullable[CompletionArgsStop] = UNSET - - presence_penalty: OptionalNullable[float] = UNSET - - frequency_penalty: OptionalNullable[float] = UNSET - - temperature: OptionalNullable[float] = UNSET - - top_p: OptionalNullable[float] = UNSET - - max_tokens: OptionalNullable[int] = UNSET - - random_seed: OptionalNullable[int] = UNSET - - prediction: OptionalNullable[Prediction] = UNSET - - response_format: OptionalNullable[ResponseFormat] = UNSET - - tool_choice: Optional[ToolChoiceEnum] = None - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = [ - "stop", - "presence_penalty", - "frequency_penalty", - "temperature", - "top_p", - "max_tokens", - "random_seed", - "prediction", - "response_format", - "tool_choice", - ] - nullable_fields = [ - "stop", - "presence_penalty", - "frequency_penalty", - "temperature", - "top_p", - "max_tokens", - "random_seed", - "prediction", - "response_format", - ] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/completionargsstop.py b/src/mistralai/models/completionargsstop.py deleted file mode 100644 index de7a0956..00000000 --- a/src/mistralai/models/completionargsstop.py +++ /dev/null @@ -1,13 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from typing import List, Union -from typing_extensions import TypeAliasType - - -CompletionArgsStopTypedDict = TypeAliasType( - "CompletionArgsStopTypedDict", Union[str, List[str]] -) - - -CompletionArgsStop = TypeAliasType("CompletionArgsStop", Union[str, List[str]]) diff --git a/src/mistralai/models/completionchunk.py b/src/mistralai/models/completionchunk.py deleted file mode 100644 index 4d1fcfbf..00000000 --- a/src/mistralai/models/completionchunk.py +++ /dev/null @@ -1,34 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .completionresponsestreamchoice import ( - CompletionResponseStreamChoice, - CompletionResponseStreamChoiceTypedDict, -) -from .usageinfo import UsageInfo, UsageInfoTypedDict -from mistralai.types import BaseModel -from typing import List, Optional -from typing_extensions import NotRequired, TypedDict - - -class CompletionChunkTypedDict(TypedDict): - id: str - model: str - choices: List[CompletionResponseStreamChoiceTypedDict] - object: NotRequired[str] - created: NotRequired[int] - usage: NotRequired[UsageInfoTypedDict] - - -class CompletionChunk(BaseModel): - id: str - - model: str - - choices: List[CompletionResponseStreamChoice] - - object: Optional[str] = None - - created: Optional[int] = None - - usage: Optional[UsageInfo] = None diff --git a/src/mistralai/models/completiondetailedjobout.py b/src/mistralai/models/completiondetailedjobout.py deleted file mode 100644 index df41bc2a..00000000 --- a/src/mistralai/models/completiondetailedjobout.py +++ /dev/null @@ -1,165 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .checkpointout import CheckpointOut, CheckpointOutTypedDict -from .completiontrainingparameters import ( - CompletionTrainingParameters, - CompletionTrainingParametersTypedDict, -) -from .eventout import EventOut, EventOutTypedDict -from .githubrepositoryout import GithubRepositoryOut, GithubRepositoryOutTypedDict -from .jobmetadataout import JobMetadataOut, JobMetadataOutTypedDict -from .wandbintegrationout import WandbIntegrationOut, WandbIntegrationOutTypedDict -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from pydantic import model_serializer -from typing import List, Literal, Optional -from typing_extensions import NotRequired, TypedDict - - -CompletionDetailedJobOutStatus = Literal[ - "QUEUED", - "STARTED", - "VALIDATING", - "VALIDATED", - "RUNNING", - "FAILED_VALIDATION", - "FAILED", - "SUCCESS", - "CANCELLED", - "CANCELLATION_REQUESTED", -] - - -CompletionDetailedJobOutObject = Literal["job",] - - -CompletionDetailedJobOutIntegrationsTypedDict = WandbIntegrationOutTypedDict - - -CompletionDetailedJobOutIntegrations = WandbIntegrationOut - - -CompletionDetailedJobOutJobType = Literal["completion",] - - -CompletionDetailedJobOutRepositoriesTypedDict = GithubRepositoryOutTypedDict - - -CompletionDetailedJobOutRepositories = GithubRepositoryOut - - -class CompletionDetailedJobOutTypedDict(TypedDict): - id: str - auto_start: bool - model: str - r"""The name of the model to fine-tune.""" - status: CompletionDetailedJobOutStatus - created_at: int - modified_at: int - training_files: List[str] - hyperparameters: CompletionTrainingParametersTypedDict - validation_files: NotRequired[Nullable[List[str]]] - object: NotRequired[CompletionDetailedJobOutObject] - fine_tuned_model: NotRequired[Nullable[str]] - suffix: NotRequired[Nullable[str]] - integrations: NotRequired[ - Nullable[List[CompletionDetailedJobOutIntegrationsTypedDict]] - ] - trained_tokens: NotRequired[Nullable[int]] - metadata: NotRequired[Nullable[JobMetadataOutTypedDict]] - job_type: NotRequired[CompletionDetailedJobOutJobType] - repositories: NotRequired[List[CompletionDetailedJobOutRepositoriesTypedDict]] - events: NotRequired[List[EventOutTypedDict]] - r"""Event items are created every time the status of a fine-tuning job changes. The timestamped list of all events is accessible here.""" - checkpoints: NotRequired[List[CheckpointOutTypedDict]] - - -class CompletionDetailedJobOut(BaseModel): - id: str - - auto_start: bool - - model: str - r"""The name of the model to fine-tune.""" - - status: CompletionDetailedJobOutStatus - - created_at: int - - modified_at: int - - training_files: List[str] - - hyperparameters: CompletionTrainingParameters - - validation_files: OptionalNullable[List[str]] = UNSET - - object: Optional[CompletionDetailedJobOutObject] = "job" - - fine_tuned_model: OptionalNullable[str] = UNSET - - suffix: OptionalNullable[str] = UNSET - - integrations: OptionalNullable[List[CompletionDetailedJobOutIntegrations]] = UNSET - - trained_tokens: OptionalNullable[int] = UNSET - - metadata: OptionalNullable[JobMetadataOut] = UNSET - - job_type: Optional[CompletionDetailedJobOutJobType] = "completion" - - repositories: Optional[List[CompletionDetailedJobOutRepositories]] = None - - events: Optional[List[EventOut]] = None - r"""Event items are created every time the status of a fine-tuning job changes. The timestamped list of all events is accessible here.""" - - checkpoints: Optional[List[CheckpointOut]] = None - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = [ - "validation_files", - "object", - "fine_tuned_model", - "suffix", - "integrations", - "trained_tokens", - "metadata", - "job_type", - "repositories", - "events", - "checkpoints", - ] - nullable_fields = [ - "validation_files", - "fine_tuned_model", - "suffix", - "integrations", - "trained_tokens", - "metadata", - ] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/completionevent.py b/src/mistralai/models/completionevent.py deleted file mode 100644 index cc859910..00000000 --- a/src/mistralai/models/completionevent.py +++ /dev/null @@ -1,14 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .completionchunk import CompletionChunk, CompletionChunkTypedDict -from mistralai.types import BaseModel -from typing_extensions import TypedDict - - -class CompletionEventTypedDict(TypedDict): - data: CompletionChunkTypedDict - - -class CompletionEvent(BaseModel): - data: CompletionChunk diff --git a/src/mistralai/models/completionftmodelout.py b/src/mistralai/models/completionftmodelout.py deleted file mode 100644 index 7b6520de..00000000 --- a/src/mistralai/models/completionftmodelout.py +++ /dev/null @@ -1,104 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .ftmodelcapabilitiesout import ( - FTModelCapabilitiesOut, - FTModelCapabilitiesOutTypedDict, -) -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from pydantic import model_serializer -from typing import List, Literal, Optional -from typing_extensions import NotRequired, TypedDict - - -CompletionFTModelOutObject = Literal["model",] - - -ModelType = Literal["completion",] - - -class CompletionFTModelOutTypedDict(TypedDict): - id: str - created: int - owned_by: str - workspace_id: str - root: str - root_version: str - archived: bool - capabilities: FTModelCapabilitiesOutTypedDict - job: str - object: NotRequired[CompletionFTModelOutObject] - name: NotRequired[Nullable[str]] - description: NotRequired[Nullable[str]] - max_context_length: NotRequired[int] - aliases: NotRequired[List[str]] - model_type: NotRequired[ModelType] - - -class CompletionFTModelOut(BaseModel): - id: str - - created: int - - owned_by: str - - workspace_id: str - - root: str - - root_version: str - - archived: bool - - capabilities: FTModelCapabilitiesOut - - job: str - - object: Optional[CompletionFTModelOutObject] = "model" - - name: OptionalNullable[str] = UNSET - - description: OptionalNullable[str] = UNSET - - max_context_length: Optional[int] = 32768 - - aliases: Optional[List[str]] = None - - model_type: Optional[ModelType] = "completion" - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = [ - "object", - "name", - "description", - "max_context_length", - "aliases", - "model_type", - ] - nullable_fields = ["name", "description"] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/completionjobout.py b/src/mistralai/models/completionjobout.py deleted file mode 100644 index 70995d2a..00000000 --- a/src/mistralai/models/completionjobout.py +++ /dev/null @@ -1,178 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .completiontrainingparameters import ( - CompletionTrainingParameters, - CompletionTrainingParametersTypedDict, -) -from .githubrepositoryout import GithubRepositoryOut, GithubRepositoryOutTypedDict -from .jobmetadataout import JobMetadataOut, JobMetadataOutTypedDict -from .wandbintegrationout import WandbIntegrationOut, WandbIntegrationOutTypedDict -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from pydantic import model_serializer -from typing import List, Literal, Optional -from typing_extensions import NotRequired, TypedDict - - -Status = Literal[ - "QUEUED", - "STARTED", - "VALIDATING", - "VALIDATED", - "RUNNING", - "FAILED_VALIDATION", - "FAILED", - "SUCCESS", - "CANCELLED", - "CANCELLATION_REQUESTED", -] -r"""The current status of the fine-tuning job.""" - - -CompletionJobOutObject = Literal["job",] -r"""The object type of the fine-tuning job.""" - - -IntegrationsTypedDict = WandbIntegrationOutTypedDict - - -Integrations = WandbIntegrationOut - - -JobType = Literal["completion",] -r"""The type of job (`FT` for fine-tuning).""" - - -RepositoriesTypedDict = GithubRepositoryOutTypedDict - - -Repositories = GithubRepositoryOut - - -class CompletionJobOutTypedDict(TypedDict): - id: str - r"""The ID of the job.""" - auto_start: bool - model: str - r"""The name of the model to fine-tune.""" - status: Status - r"""The current status of the fine-tuning job.""" - created_at: int - r"""The UNIX timestamp (in seconds) for when the fine-tuning job was created.""" - modified_at: int - r"""The UNIX timestamp (in seconds) for when the fine-tuning job was last modified.""" - training_files: List[str] - r"""A list containing the IDs of uploaded files that contain training data.""" - hyperparameters: CompletionTrainingParametersTypedDict - validation_files: NotRequired[Nullable[List[str]]] - r"""A list containing the IDs of uploaded files that contain validation data.""" - object: NotRequired[CompletionJobOutObject] - r"""The object type of the fine-tuning job.""" - fine_tuned_model: NotRequired[Nullable[str]] - r"""The name of the fine-tuned model that is being created. The value will be `null` if the fine-tuning job is still running.""" - suffix: NotRequired[Nullable[str]] - r"""Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`.""" - integrations: NotRequired[Nullable[List[IntegrationsTypedDict]]] - r"""A list of integrations enabled for your fine-tuning job.""" - trained_tokens: NotRequired[Nullable[int]] - r"""Total number of tokens trained.""" - metadata: NotRequired[Nullable[JobMetadataOutTypedDict]] - job_type: NotRequired[JobType] - r"""The type of job (`FT` for fine-tuning).""" - repositories: NotRequired[List[RepositoriesTypedDict]] - - -class CompletionJobOut(BaseModel): - id: str - r"""The ID of the job.""" - - auto_start: bool - - model: str - r"""The name of the model to fine-tune.""" - - status: Status - r"""The current status of the fine-tuning job.""" - - created_at: int - r"""The UNIX timestamp (in seconds) for when the fine-tuning job was created.""" - - modified_at: int - r"""The UNIX timestamp (in seconds) for when the fine-tuning job was last modified.""" - - training_files: List[str] - r"""A list containing the IDs of uploaded files that contain training data.""" - - hyperparameters: CompletionTrainingParameters - - validation_files: OptionalNullable[List[str]] = UNSET - r"""A list containing the IDs of uploaded files that contain validation data.""" - - object: Optional[CompletionJobOutObject] = "job" - r"""The object type of the fine-tuning job.""" - - fine_tuned_model: OptionalNullable[str] = UNSET - r"""The name of the fine-tuned model that is being created. The value will be `null` if the fine-tuning job is still running.""" - - suffix: OptionalNullable[str] = UNSET - r"""Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`.""" - - integrations: OptionalNullable[List[Integrations]] = UNSET - r"""A list of integrations enabled for your fine-tuning job.""" - - trained_tokens: OptionalNullable[int] = UNSET - r"""Total number of tokens trained.""" - - metadata: OptionalNullable[JobMetadataOut] = UNSET - - job_type: Optional[JobType] = "completion" - r"""The type of job (`FT` for fine-tuning).""" - - repositories: Optional[List[Repositories]] = None - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = [ - "validation_files", - "object", - "fine_tuned_model", - "suffix", - "integrations", - "trained_tokens", - "metadata", - "job_type", - "repositories", - ] - nullable_fields = [ - "validation_files", - "fine_tuned_model", - "suffix", - "integrations", - "trained_tokens", - "metadata", - ] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/completionresponsestreamchoice.py b/src/mistralai/models/completionresponsestreamchoice.py deleted file mode 100644 index 80f63987..00000000 --- a/src/mistralai/models/completionresponsestreamchoice.py +++ /dev/null @@ -1,63 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .deltamessage import DeltaMessage, DeltaMessageTypedDict -from mistralai.types import BaseModel, Nullable, UNSET_SENTINEL, UnrecognizedStr -from pydantic import model_serializer -from typing import Literal, Union -from typing_extensions import TypedDict - - -CompletionResponseStreamChoiceFinishReason = Union[ - Literal[ - "stop", - "length", - "error", - "tool_calls", - ], - UnrecognizedStr, -] - - -class CompletionResponseStreamChoiceTypedDict(TypedDict): - index: int - delta: DeltaMessageTypedDict - finish_reason: Nullable[CompletionResponseStreamChoiceFinishReason] - - -class CompletionResponseStreamChoice(BaseModel): - index: int - - delta: DeltaMessage - - finish_reason: Nullable[CompletionResponseStreamChoiceFinishReason] - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = [] - nullable_fields = ["finish_reason"] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/completiontrainingparameters.py b/src/mistralai/models/completiontrainingparameters.py deleted file mode 100644 index 0200e81c..00000000 --- a/src/mistralai/models/completiontrainingparameters.py +++ /dev/null @@ -1,78 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from pydantic import model_serializer -from typing import Optional -from typing_extensions import NotRequired, TypedDict - - -class CompletionTrainingParametersTypedDict(TypedDict): - training_steps: NotRequired[Nullable[int]] - learning_rate: NotRequired[float] - weight_decay: NotRequired[Nullable[float]] - warmup_fraction: NotRequired[Nullable[float]] - epochs: NotRequired[Nullable[float]] - seq_len: NotRequired[Nullable[int]] - fim_ratio: NotRequired[Nullable[float]] - - -class CompletionTrainingParameters(BaseModel): - training_steps: OptionalNullable[int] = UNSET - - learning_rate: Optional[float] = 0.0001 - - weight_decay: OptionalNullable[float] = UNSET - - warmup_fraction: OptionalNullable[float] = UNSET - - epochs: OptionalNullable[float] = UNSET - - seq_len: OptionalNullable[int] = UNSET - - fim_ratio: OptionalNullable[float] = UNSET - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = [ - "training_steps", - "learning_rate", - "weight_decay", - "warmup_fraction", - "epochs", - "seq_len", - "fim_ratio", - ] - nullable_fields = [ - "training_steps", - "weight_decay", - "warmup_fraction", - "epochs", - "seq_len", - "fim_ratio", - ] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/completiontrainingparametersin.py b/src/mistralai/models/completiontrainingparametersin.py deleted file mode 100644 index 1f74bb9d..00000000 --- a/src/mistralai/models/completiontrainingparametersin.py +++ /dev/null @@ -1,90 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from pydantic import model_serializer -from typing import Optional -from typing_extensions import NotRequired, TypedDict - - -class CompletionTrainingParametersInTypedDict(TypedDict): - r"""The fine-tuning hyperparameter settings used in a fine-tune job.""" - - training_steps: NotRequired[Nullable[int]] - r"""The number of training steps to perform. A training step refers to a single update of the model weights during the fine-tuning process. This update is typically calculated using a batch of samples from the training dataset.""" - learning_rate: NotRequired[float] - r"""A parameter describing how much to adjust the pre-trained model's weights in response to the estimated error each time the weights are updated during the fine-tuning process.""" - weight_decay: NotRequired[Nullable[float]] - r"""(Advanced Usage) Weight decay adds a term to the loss function that is proportional to the sum of the squared weights. This term reduces the magnitude of the weights and prevents them from growing too large.""" - warmup_fraction: NotRequired[Nullable[float]] - r"""(Advanced Usage) A parameter that specifies the percentage of the total training steps at which the learning rate warm-up phase ends. During this phase, the learning rate gradually increases from a small value to the initial learning rate, helping to stabilize the training process and improve convergence. Similar to `pct_start` in [mistral-finetune](https://github.com/mistralai/mistral-finetune)""" - epochs: NotRequired[Nullable[float]] - seq_len: NotRequired[Nullable[int]] - fim_ratio: NotRequired[Nullable[float]] - - -class CompletionTrainingParametersIn(BaseModel): - r"""The fine-tuning hyperparameter settings used in a fine-tune job.""" - - training_steps: OptionalNullable[int] = UNSET - r"""The number of training steps to perform. A training step refers to a single update of the model weights during the fine-tuning process. This update is typically calculated using a batch of samples from the training dataset.""" - - learning_rate: Optional[float] = 0.0001 - r"""A parameter describing how much to adjust the pre-trained model's weights in response to the estimated error each time the weights are updated during the fine-tuning process.""" - - weight_decay: OptionalNullable[float] = UNSET - r"""(Advanced Usage) Weight decay adds a term to the loss function that is proportional to the sum of the squared weights. This term reduces the magnitude of the weights and prevents them from growing too large.""" - - warmup_fraction: OptionalNullable[float] = UNSET - r"""(Advanced Usage) A parameter that specifies the percentage of the total training steps at which the learning rate warm-up phase ends. During this phase, the learning rate gradually increases from a small value to the initial learning rate, helping to stabilize the training process and improve convergence. Similar to `pct_start` in [mistral-finetune](https://github.com/mistralai/mistral-finetune)""" - - epochs: OptionalNullable[float] = UNSET - - seq_len: OptionalNullable[int] = UNSET - - fim_ratio: OptionalNullable[float] = UNSET - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = [ - "training_steps", - "learning_rate", - "weight_decay", - "warmup_fraction", - "epochs", - "seq_len", - "fim_ratio", - ] - nullable_fields = [ - "training_steps", - "weight_decay", - "warmup_fraction", - "epochs", - "seq_len", - "fim_ratio", - ] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/contentchunk.py b/src/mistralai/models/contentchunk.py deleted file mode 100644 index 47170eef..00000000 --- a/src/mistralai/models/contentchunk.py +++ /dev/null @@ -1,42 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .audiochunk import AudioChunk, AudioChunkTypedDict -from .documenturlchunk import DocumentURLChunk, DocumentURLChunkTypedDict -from .filechunk import FileChunk, FileChunkTypedDict -from .imageurlchunk import ImageURLChunk, ImageURLChunkTypedDict -from .referencechunk import ReferenceChunk, ReferenceChunkTypedDict -from .textchunk import TextChunk, TextChunkTypedDict -from .thinkchunk import ThinkChunk, ThinkChunkTypedDict -from mistralai.utils import get_discriminator -from pydantic import Discriminator, Tag -from typing import Union -from typing_extensions import Annotated, TypeAliasType - - -ContentChunkTypedDict = TypeAliasType( - "ContentChunkTypedDict", - Union[ - TextChunkTypedDict, - ImageURLChunkTypedDict, - ReferenceChunkTypedDict, - FileChunkTypedDict, - AudioChunkTypedDict, - DocumentURLChunkTypedDict, - ThinkChunkTypedDict, - ], -) - - -ContentChunk = Annotated[ - Union[ - Annotated[ImageURLChunk, Tag("image_url")], - Annotated[DocumentURLChunk, Tag("document_url")], - Annotated[TextChunk, Tag("text")], - Annotated[ReferenceChunk, Tag("reference")], - Annotated[FileChunk, Tag("file")], - Annotated[ThinkChunk, Tag("thinking")], - Annotated[AudioChunk, Tag("input_audio")], - ], - Discriminator(lambda m: get_discriminator(m, "type", "type")), -] diff --git a/src/mistralai/models/conversationappendrequest.py b/src/mistralai/models/conversationappendrequest.py deleted file mode 100644 index 15cbc687..00000000 --- a/src/mistralai/models/conversationappendrequest.py +++ /dev/null @@ -1,38 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .completionargs import CompletionArgs, CompletionArgsTypedDict -from .conversationinputs import ConversationInputs, ConversationInputsTypedDict -from mistralai.types import BaseModel -from typing import Literal, Optional -from typing_extensions import NotRequired, TypedDict - - -ConversationAppendRequestHandoffExecution = Literal[ - "client", - "server", -] - - -class ConversationAppendRequestTypedDict(TypedDict): - inputs: ConversationInputsTypedDict - stream: NotRequired[bool] - store: NotRequired[bool] - r"""Whether to store the results into our servers or not.""" - handoff_execution: NotRequired[ConversationAppendRequestHandoffExecution] - completion_args: NotRequired[CompletionArgsTypedDict] - r"""White-listed arguments from the completion API""" - - -class ConversationAppendRequest(BaseModel): - inputs: ConversationInputs - - stream: Optional[bool] = False - - store: Optional[bool] = True - r"""Whether to store the results into our servers or not.""" - - handoff_execution: Optional[ConversationAppendRequestHandoffExecution] = "server" - - completion_args: Optional[CompletionArgs] = None - r"""White-listed arguments from the completion API""" diff --git a/src/mistralai/models/conversationappendstreamrequest.py b/src/mistralai/models/conversationappendstreamrequest.py deleted file mode 100644 index 8cecf89d..00000000 --- a/src/mistralai/models/conversationappendstreamrequest.py +++ /dev/null @@ -1,40 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .completionargs import CompletionArgs, CompletionArgsTypedDict -from .conversationinputs import ConversationInputs, ConversationInputsTypedDict -from mistralai.types import BaseModel -from typing import Literal, Optional -from typing_extensions import NotRequired, TypedDict - - -ConversationAppendStreamRequestHandoffExecution = Literal[ - "client", - "server", -] - - -class ConversationAppendStreamRequestTypedDict(TypedDict): - inputs: ConversationInputsTypedDict - stream: NotRequired[bool] - store: NotRequired[bool] - r"""Whether to store the results into our servers or not.""" - handoff_execution: NotRequired[ConversationAppendStreamRequestHandoffExecution] - completion_args: NotRequired[CompletionArgsTypedDict] - r"""White-listed arguments from the completion API""" - - -class ConversationAppendStreamRequest(BaseModel): - inputs: ConversationInputs - - stream: Optional[bool] = True - - store: Optional[bool] = True - r"""Whether to store the results into our servers or not.""" - - handoff_execution: Optional[ConversationAppendStreamRequestHandoffExecution] = ( - "server" - ) - - completion_args: Optional[CompletionArgs] = None - r"""White-listed arguments from the completion API""" diff --git a/src/mistralai/models/conversationevents.py b/src/mistralai/models/conversationevents.py deleted file mode 100644 index ba4c628c..00000000 --- a/src/mistralai/models/conversationevents.py +++ /dev/null @@ -1,78 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .agenthandoffdoneevent import AgentHandoffDoneEvent, AgentHandoffDoneEventTypedDict -from .agenthandoffstartedevent import ( - AgentHandoffStartedEvent, - AgentHandoffStartedEventTypedDict, -) -from .functioncallevent import FunctionCallEvent, FunctionCallEventTypedDict -from .messageoutputevent import MessageOutputEvent, MessageOutputEventTypedDict -from .responsedoneevent import ResponseDoneEvent, ResponseDoneEventTypedDict -from .responseerrorevent import ResponseErrorEvent, ResponseErrorEventTypedDict -from .responsestartedevent import ResponseStartedEvent, ResponseStartedEventTypedDict -from .ssetypes import SSETypes -from .toolexecutiondeltaevent import ( - ToolExecutionDeltaEvent, - ToolExecutionDeltaEventTypedDict, -) -from .toolexecutiondoneevent import ( - ToolExecutionDoneEvent, - ToolExecutionDoneEventTypedDict, -) -from .toolexecutionstartedevent import ( - ToolExecutionStartedEvent, - ToolExecutionStartedEventTypedDict, -) -from mistralai.types import BaseModel -from mistralai.utils import get_discriminator -from pydantic import Discriminator, Tag -from typing import Union -from typing_extensions import Annotated, TypeAliasType, TypedDict - - -ConversationEventsDataTypedDict = TypeAliasType( - "ConversationEventsDataTypedDict", - Union[ - ResponseStartedEventTypedDict, - ResponseDoneEventTypedDict, - ResponseErrorEventTypedDict, - ToolExecutionStartedEventTypedDict, - ToolExecutionDeltaEventTypedDict, - ToolExecutionDoneEventTypedDict, - AgentHandoffStartedEventTypedDict, - AgentHandoffDoneEventTypedDict, - FunctionCallEventTypedDict, - MessageOutputEventTypedDict, - ], -) - - -ConversationEventsData = Annotated[ - Union[ - Annotated[AgentHandoffDoneEvent, Tag("agent.handoff.done")], - Annotated[AgentHandoffStartedEvent, Tag("agent.handoff.started")], - Annotated[ResponseDoneEvent, Tag("conversation.response.done")], - Annotated[ResponseErrorEvent, Tag("conversation.response.error")], - Annotated[ResponseStartedEvent, Tag("conversation.response.started")], - Annotated[FunctionCallEvent, Tag("function.call.delta")], - Annotated[MessageOutputEvent, Tag("message.output.delta")], - Annotated[ToolExecutionDeltaEvent, Tag("tool.execution.delta")], - Annotated[ToolExecutionDoneEvent, Tag("tool.execution.done")], - Annotated[ToolExecutionStartedEvent, Tag("tool.execution.started")], - ], - Discriminator(lambda m: get_discriminator(m, "type", "type")), -] - - -class ConversationEventsTypedDict(TypedDict): - event: SSETypes - r"""Server side events sent when streaming a conversation response.""" - data: ConversationEventsDataTypedDict - - -class ConversationEvents(BaseModel): - event: SSETypes - r"""Server side events sent when streaming a conversation response.""" - - data: ConversationEventsData diff --git a/src/mistralai/models/conversationhistory.py b/src/mistralai/models/conversationhistory.py deleted file mode 100644 index d5206a57..00000000 --- a/src/mistralai/models/conversationhistory.py +++ /dev/null @@ -1,59 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .agenthandoffentry import AgentHandoffEntry, AgentHandoffEntryTypedDict -from .functioncallentry import FunctionCallEntry, FunctionCallEntryTypedDict -from .functionresultentry import FunctionResultEntry, FunctionResultEntryTypedDict -from .messageinputentry import MessageInputEntry, MessageInputEntryTypedDict -from .messageoutputentry import MessageOutputEntry, MessageOutputEntryTypedDict -from .toolexecutionentry import ToolExecutionEntry, ToolExecutionEntryTypedDict -from mistralai.types import BaseModel -from typing import List, Literal, Optional, Union -from typing_extensions import NotRequired, TypeAliasType, TypedDict - - -ConversationHistoryObject = Literal["conversation.history",] - - -EntriesTypedDict = TypeAliasType( - "EntriesTypedDict", - Union[ - FunctionResultEntryTypedDict, - MessageInputEntryTypedDict, - FunctionCallEntryTypedDict, - ToolExecutionEntryTypedDict, - MessageOutputEntryTypedDict, - AgentHandoffEntryTypedDict, - ], -) - - -Entries = TypeAliasType( - "Entries", - Union[ - FunctionResultEntry, - MessageInputEntry, - FunctionCallEntry, - ToolExecutionEntry, - MessageOutputEntry, - AgentHandoffEntry, - ], -) - - -class ConversationHistoryTypedDict(TypedDict): - r"""Retrieve all entries in a conversation.""" - - conversation_id: str - entries: List[EntriesTypedDict] - object: NotRequired[ConversationHistoryObject] - - -class ConversationHistory(BaseModel): - r"""Retrieve all entries in a conversation.""" - - conversation_id: str - - entries: List[Entries] - - object: Optional[ConversationHistoryObject] = "conversation.history" diff --git a/src/mistralai/models/conversationinputs.py b/src/mistralai/models/conversationinputs.py deleted file mode 100644 index 4d30cd76..00000000 --- a/src/mistralai/models/conversationinputs.py +++ /dev/null @@ -1,14 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .inputentries import InputEntries, InputEntriesTypedDict -from typing import List, Union -from typing_extensions import TypeAliasType - - -ConversationInputsTypedDict = TypeAliasType( - "ConversationInputsTypedDict", Union[str, List[InputEntriesTypedDict]] -) - - -ConversationInputs = TypeAliasType("ConversationInputs", Union[str, List[InputEntries]]) diff --git a/src/mistralai/models/conversationmessages.py b/src/mistralai/models/conversationmessages.py deleted file mode 100644 index 32ca9c20..00000000 --- a/src/mistralai/models/conversationmessages.py +++ /dev/null @@ -1,28 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .messageentries import MessageEntries, MessageEntriesTypedDict -from mistralai.types import BaseModel -from typing import List, Literal, Optional -from typing_extensions import NotRequired, TypedDict - - -ConversationMessagesObject = Literal["conversation.messages",] - - -class ConversationMessagesTypedDict(TypedDict): - r"""Similar to the conversation history but only keep the messages""" - - conversation_id: str - messages: List[MessageEntriesTypedDict] - object: NotRequired[ConversationMessagesObject] - - -class ConversationMessages(BaseModel): - r"""Similar to the conversation history but only keep the messages""" - - conversation_id: str - - messages: List[MessageEntries] - - object: Optional[ConversationMessagesObject] = "conversation.messages" diff --git a/src/mistralai/models/conversationrequest.py b/src/mistralai/models/conversationrequest.py deleted file mode 100644 index 80581cc1..00000000 --- a/src/mistralai/models/conversationrequest.py +++ /dev/null @@ -1,154 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .codeinterpretertool import CodeInterpreterTool, CodeInterpreterToolTypedDict -from .completionargs import CompletionArgs, CompletionArgsTypedDict -from .conversationinputs import ConversationInputs, ConversationInputsTypedDict -from .documentlibrarytool import DocumentLibraryTool, DocumentLibraryToolTypedDict -from .functiontool import FunctionTool, FunctionToolTypedDict -from .imagegenerationtool import ImageGenerationTool, ImageGenerationToolTypedDict -from .websearchpremiumtool import WebSearchPremiumTool, WebSearchPremiumToolTypedDict -from .websearchtool import WebSearchTool, WebSearchToolTypedDict -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from mistralai.utils import get_discriminator -from pydantic import Discriminator, Tag, model_serializer -from typing import Any, Dict, List, Literal, Optional, Union -from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict - - -HandoffExecution = Literal[ - "client", - "server", -] - - -ToolsTypedDict = TypeAliasType( - "ToolsTypedDict", - Union[ - WebSearchToolTypedDict, - WebSearchPremiumToolTypedDict, - CodeInterpreterToolTypedDict, - ImageGenerationToolTypedDict, - FunctionToolTypedDict, - DocumentLibraryToolTypedDict, - ], -) - - -Tools = Annotated[ - Union[ - Annotated[CodeInterpreterTool, Tag("code_interpreter")], - Annotated[DocumentLibraryTool, Tag("document_library")], - Annotated[FunctionTool, Tag("function")], - Annotated[ImageGenerationTool, Tag("image_generation")], - Annotated[WebSearchTool, Tag("web_search")], - Annotated[WebSearchPremiumTool, Tag("web_search_premium")], - ], - Discriminator(lambda m: get_discriminator(m, "type", "type")), -] - - -AgentVersionTypedDict = TypeAliasType("AgentVersionTypedDict", Union[str, int]) - - -AgentVersion = TypeAliasType("AgentVersion", Union[str, int]) - - -class ConversationRequestTypedDict(TypedDict): - inputs: ConversationInputsTypedDict - stream: NotRequired[bool] - store: NotRequired[Nullable[bool]] - handoff_execution: NotRequired[Nullable[HandoffExecution]] - instructions: NotRequired[Nullable[str]] - tools: NotRequired[List[ToolsTypedDict]] - r"""List of tools which are available to the model during the conversation.""" - completion_args: NotRequired[Nullable[CompletionArgsTypedDict]] - name: NotRequired[Nullable[str]] - description: NotRequired[Nullable[str]] - metadata: NotRequired[Nullable[Dict[str, Any]]] - agent_id: NotRequired[Nullable[str]] - agent_version: NotRequired[Nullable[AgentVersionTypedDict]] - model: NotRequired[Nullable[str]] - - -class ConversationRequest(BaseModel): - inputs: ConversationInputs - - stream: Optional[bool] = False - - store: OptionalNullable[bool] = UNSET - - handoff_execution: OptionalNullable[HandoffExecution] = UNSET - - instructions: OptionalNullable[str] = UNSET - - tools: Optional[List[Tools]] = None - r"""List of tools which are available to the model during the conversation.""" - - completion_args: OptionalNullable[CompletionArgs] = UNSET - - name: OptionalNullable[str] = UNSET - - description: OptionalNullable[str] = UNSET - - metadata: OptionalNullable[Dict[str, Any]] = UNSET - - agent_id: OptionalNullable[str] = UNSET - - agent_version: OptionalNullable[AgentVersion] = UNSET - - model: OptionalNullable[str] = UNSET - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = [ - "stream", - "store", - "handoff_execution", - "instructions", - "tools", - "completion_args", - "name", - "description", - "metadata", - "agent_id", - "agent_version", - "model", - ] - nullable_fields = [ - "store", - "handoff_execution", - "instructions", - "completion_args", - "name", - "description", - "metadata", - "agent_id", - "agent_version", - "model", - ] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/conversationresponse.py b/src/mistralai/models/conversationresponse.py deleted file mode 100644 index ff318e35..00000000 --- a/src/mistralai/models/conversationresponse.py +++ /dev/null @@ -1,52 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .agenthandoffentry import AgentHandoffEntry, AgentHandoffEntryTypedDict -from .conversationusageinfo import ConversationUsageInfo, ConversationUsageInfoTypedDict -from .functioncallentry import FunctionCallEntry, FunctionCallEntryTypedDict -from .messageoutputentry import MessageOutputEntry, MessageOutputEntryTypedDict -from .toolexecutionentry import ToolExecutionEntry, ToolExecutionEntryTypedDict -from mistralai.types import BaseModel -from typing import List, Literal, Optional, Union -from typing_extensions import NotRequired, TypeAliasType, TypedDict - - -ConversationResponseObject = Literal["conversation.response",] - - -OutputsTypedDict = TypeAliasType( - "OutputsTypedDict", - Union[ - ToolExecutionEntryTypedDict, - FunctionCallEntryTypedDict, - MessageOutputEntryTypedDict, - AgentHandoffEntryTypedDict, - ], -) - - -Outputs = TypeAliasType( - "Outputs", - Union[ToolExecutionEntry, FunctionCallEntry, MessageOutputEntry, AgentHandoffEntry], -) - - -class ConversationResponseTypedDict(TypedDict): - r"""The response after appending new entries to the conversation.""" - - conversation_id: str - outputs: List[OutputsTypedDict] - usage: ConversationUsageInfoTypedDict - object: NotRequired[ConversationResponseObject] - - -class ConversationResponse(BaseModel): - r"""The response after appending new entries to the conversation.""" - - conversation_id: str - - outputs: List[Outputs] - - usage: ConversationUsageInfo - - object: Optional[ConversationResponseObject] = "conversation.response" diff --git a/src/mistralai/models/conversationrestartrequest.py b/src/mistralai/models/conversationrestartrequest.py deleted file mode 100644 index 6f21d012..00000000 --- a/src/mistralai/models/conversationrestartrequest.py +++ /dev/null @@ -1,107 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .completionargs import CompletionArgs, CompletionArgsTypedDict -from .conversationinputs import ConversationInputs, ConversationInputsTypedDict -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from pydantic import model_serializer -from typing import Any, Dict, Literal, Optional, Union -from typing_extensions import NotRequired, TypeAliasType, TypedDict - - -ConversationRestartRequestHandoffExecution = Literal[ - "client", - "server", -] - - -ConversationRestartRequestAgentVersionTypedDict = TypeAliasType( - "ConversationRestartRequestAgentVersionTypedDict", Union[str, int] -) -r"""Specific version of the agent to use when restarting. If not provided, uses the current version.""" - - -ConversationRestartRequestAgentVersion = TypeAliasType( - "ConversationRestartRequestAgentVersion", Union[str, int] -) -r"""Specific version of the agent to use when restarting. If not provided, uses the current version.""" - - -class ConversationRestartRequestTypedDict(TypedDict): - r"""Request to restart a new conversation from a given entry in the conversation.""" - - inputs: ConversationInputsTypedDict - from_entry_id: str - stream: NotRequired[bool] - store: NotRequired[bool] - r"""Whether to store the results into our servers or not.""" - handoff_execution: NotRequired[ConversationRestartRequestHandoffExecution] - completion_args: NotRequired[CompletionArgsTypedDict] - r"""White-listed arguments from the completion API""" - metadata: NotRequired[Nullable[Dict[str, Any]]] - r"""Custom metadata for the conversation.""" - agent_version: NotRequired[ - Nullable[ConversationRestartRequestAgentVersionTypedDict] - ] - r"""Specific version of the agent to use when restarting. If not provided, uses the current version.""" - - -class ConversationRestartRequest(BaseModel): - r"""Request to restart a new conversation from a given entry in the conversation.""" - - inputs: ConversationInputs - - from_entry_id: str - - stream: Optional[bool] = False - - store: Optional[bool] = True - r"""Whether to store the results into our servers or not.""" - - handoff_execution: Optional[ConversationRestartRequestHandoffExecution] = "server" - - completion_args: Optional[CompletionArgs] = None - r"""White-listed arguments from the completion API""" - - metadata: OptionalNullable[Dict[str, Any]] = UNSET - r"""Custom metadata for the conversation.""" - - agent_version: OptionalNullable[ConversationRestartRequestAgentVersion] = UNSET - r"""Specific version of the agent to use when restarting. If not provided, uses the current version.""" - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = [ - "stream", - "store", - "handoff_execution", - "completion_args", - "metadata", - "agent_version", - ] - nullable_fields = ["metadata", "agent_version"] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/conversationrestartstreamrequest.py b/src/mistralai/models/conversationrestartstreamrequest.py deleted file mode 100644 index 2cec7958..00000000 --- a/src/mistralai/models/conversationrestartstreamrequest.py +++ /dev/null @@ -1,111 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .completionargs import CompletionArgs, CompletionArgsTypedDict -from .conversationinputs import ConversationInputs, ConversationInputsTypedDict -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from pydantic import model_serializer -from typing import Any, Dict, Literal, Optional, Union -from typing_extensions import NotRequired, TypeAliasType, TypedDict - - -ConversationRestartStreamRequestHandoffExecution = Literal[ - "client", - "server", -] - - -ConversationRestartStreamRequestAgentVersionTypedDict = TypeAliasType( - "ConversationRestartStreamRequestAgentVersionTypedDict", Union[str, int] -) -r"""Specific version of the agent to use when restarting. If not provided, uses the current version.""" - - -ConversationRestartStreamRequestAgentVersion = TypeAliasType( - "ConversationRestartStreamRequestAgentVersion", Union[str, int] -) -r"""Specific version of the agent to use when restarting. If not provided, uses the current version.""" - - -class ConversationRestartStreamRequestTypedDict(TypedDict): - r"""Request to restart a new conversation from a given entry in the conversation.""" - - inputs: ConversationInputsTypedDict - from_entry_id: str - stream: NotRequired[bool] - store: NotRequired[bool] - r"""Whether to store the results into our servers or not.""" - handoff_execution: NotRequired[ConversationRestartStreamRequestHandoffExecution] - completion_args: NotRequired[CompletionArgsTypedDict] - r"""White-listed arguments from the completion API""" - metadata: NotRequired[Nullable[Dict[str, Any]]] - r"""Custom metadata for the conversation.""" - agent_version: NotRequired[ - Nullable[ConversationRestartStreamRequestAgentVersionTypedDict] - ] - r"""Specific version of the agent to use when restarting. If not provided, uses the current version.""" - - -class ConversationRestartStreamRequest(BaseModel): - r"""Request to restart a new conversation from a given entry in the conversation.""" - - inputs: ConversationInputs - - from_entry_id: str - - stream: Optional[bool] = True - - store: Optional[bool] = True - r"""Whether to store the results into our servers or not.""" - - handoff_execution: Optional[ConversationRestartStreamRequestHandoffExecution] = ( - "server" - ) - - completion_args: Optional[CompletionArgs] = None - r"""White-listed arguments from the completion API""" - - metadata: OptionalNullable[Dict[str, Any]] = UNSET - r"""Custom metadata for the conversation.""" - - agent_version: OptionalNullable[ConversationRestartStreamRequestAgentVersion] = ( - UNSET - ) - r"""Specific version of the agent to use when restarting. If not provided, uses the current version.""" - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = [ - "stream", - "store", - "handoff_execution", - "completion_args", - "metadata", - "agent_version", - ] - nullable_fields = ["metadata", "agent_version"] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/conversationstreamrequest.py b/src/mistralai/models/conversationstreamrequest.py deleted file mode 100644 index 1a481b77..00000000 --- a/src/mistralai/models/conversationstreamrequest.py +++ /dev/null @@ -1,160 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .codeinterpretertool import CodeInterpreterTool, CodeInterpreterToolTypedDict -from .completionargs import CompletionArgs, CompletionArgsTypedDict -from .conversationinputs import ConversationInputs, ConversationInputsTypedDict -from .documentlibrarytool import DocumentLibraryTool, DocumentLibraryToolTypedDict -from .functiontool import FunctionTool, FunctionToolTypedDict -from .imagegenerationtool import ImageGenerationTool, ImageGenerationToolTypedDict -from .websearchpremiumtool import WebSearchPremiumTool, WebSearchPremiumToolTypedDict -from .websearchtool import WebSearchTool, WebSearchToolTypedDict -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from mistralai.utils import get_discriminator -from pydantic import Discriminator, Tag, model_serializer -from typing import Any, Dict, List, Literal, Optional, Union -from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict - - -ConversationStreamRequestHandoffExecution = Literal[ - "client", - "server", -] - - -ConversationStreamRequestToolsTypedDict = TypeAliasType( - "ConversationStreamRequestToolsTypedDict", - Union[ - WebSearchToolTypedDict, - WebSearchPremiumToolTypedDict, - CodeInterpreterToolTypedDict, - ImageGenerationToolTypedDict, - FunctionToolTypedDict, - DocumentLibraryToolTypedDict, - ], -) - - -ConversationStreamRequestTools = Annotated[ - Union[ - Annotated[CodeInterpreterTool, Tag("code_interpreter")], - Annotated[DocumentLibraryTool, Tag("document_library")], - Annotated[FunctionTool, Tag("function")], - Annotated[ImageGenerationTool, Tag("image_generation")], - Annotated[WebSearchTool, Tag("web_search")], - Annotated[WebSearchPremiumTool, Tag("web_search_premium")], - ], - Discriminator(lambda m: get_discriminator(m, "type", "type")), -] - - -ConversationStreamRequestAgentVersionTypedDict = TypeAliasType( - "ConversationStreamRequestAgentVersionTypedDict", Union[str, int] -) - - -ConversationStreamRequestAgentVersion = TypeAliasType( - "ConversationStreamRequestAgentVersion", Union[str, int] -) - - -class ConversationStreamRequestTypedDict(TypedDict): - inputs: ConversationInputsTypedDict - stream: NotRequired[bool] - store: NotRequired[Nullable[bool]] - handoff_execution: NotRequired[Nullable[ConversationStreamRequestHandoffExecution]] - instructions: NotRequired[Nullable[str]] - tools: NotRequired[List[ConversationStreamRequestToolsTypedDict]] - r"""List of tools which are available to the model during the conversation.""" - completion_args: NotRequired[Nullable[CompletionArgsTypedDict]] - name: NotRequired[Nullable[str]] - description: NotRequired[Nullable[str]] - metadata: NotRequired[Nullable[Dict[str, Any]]] - agent_id: NotRequired[Nullable[str]] - agent_version: NotRequired[Nullable[ConversationStreamRequestAgentVersionTypedDict]] - model: NotRequired[Nullable[str]] - - -class ConversationStreamRequest(BaseModel): - inputs: ConversationInputs - - stream: Optional[bool] = True - - store: OptionalNullable[bool] = UNSET - - handoff_execution: OptionalNullable[ConversationStreamRequestHandoffExecution] = ( - UNSET - ) - - instructions: OptionalNullable[str] = UNSET - - tools: Optional[List[ConversationStreamRequestTools]] = None - r"""List of tools which are available to the model during the conversation.""" - - completion_args: OptionalNullable[CompletionArgs] = UNSET - - name: OptionalNullable[str] = UNSET - - description: OptionalNullable[str] = UNSET - - metadata: OptionalNullable[Dict[str, Any]] = UNSET - - agent_id: OptionalNullable[str] = UNSET - - agent_version: OptionalNullable[ConversationStreamRequestAgentVersion] = UNSET - - model: OptionalNullable[str] = UNSET - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = [ - "stream", - "store", - "handoff_execution", - "instructions", - "tools", - "completion_args", - "name", - "description", - "metadata", - "agent_id", - "agent_version", - "model", - ] - nullable_fields = [ - "store", - "handoff_execution", - "instructions", - "completion_args", - "name", - "description", - "metadata", - "agent_id", - "agent_version", - "model", - ] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/conversationusageinfo.py b/src/mistralai/models/conversationusageinfo.py deleted file mode 100644 index 9ae6f4fb..00000000 --- a/src/mistralai/models/conversationusageinfo.py +++ /dev/null @@ -1,63 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from pydantic import model_serializer -from typing import Dict, Optional -from typing_extensions import NotRequired, TypedDict - - -class ConversationUsageInfoTypedDict(TypedDict): - prompt_tokens: NotRequired[int] - completion_tokens: NotRequired[int] - total_tokens: NotRequired[int] - connector_tokens: NotRequired[Nullable[int]] - connectors: NotRequired[Nullable[Dict[str, int]]] - - -class ConversationUsageInfo(BaseModel): - prompt_tokens: Optional[int] = 0 - - completion_tokens: Optional[int] = 0 - - total_tokens: Optional[int] = 0 - - connector_tokens: OptionalNullable[int] = UNSET - - connectors: OptionalNullable[Dict[str, int]] = UNSET - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = [ - "prompt_tokens", - "completion_tokens", - "total_tokens", - "connector_tokens", - "connectors", - ] - nullable_fields = ["connector_tokens", "connectors"] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/delete_model_v1_models_model_id_deleteop.py b/src/mistralai/models/delete_model_v1_models_model_id_deleteop.py deleted file mode 100644 index 4acb8d53..00000000 --- a/src/mistralai/models/delete_model_v1_models_model_id_deleteop.py +++ /dev/null @@ -1,18 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel -from mistralai.utils import FieldMetadata, PathParamMetadata -from typing_extensions import Annotated, TypedDict - - -class DeleteModelV1ModelsModelIDDeleteRequestTypedDict(TypedDict): - model_id: str - r"""The ID of the model to delete.""" - - -class DeleteModelV1ModelsModelIDDeleteRequest(BaseModel): - model_id: Annotated[ - str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) - ] - r"""The ID of the model to delete.""" diff --git a/src/mistralai/models/deletefileout.py b/src/mistralai/models/deletefileout.py deleted file mode 100644 index 2b346ec4..00000000 --- a/src/mistralai/models/deletefileout.py +++ /dev/null @@ -1,25 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel -from typing_extensions import TypedDict - - -class DeleteFileOutTypedDict(TypedDict): - id: str - r"""The ID of the deleted file.""" - object: str - r"""The object type that was deleted""" - deleted: bool - r"""The deletion status.""" - - -class DeleteFileOut(BaseModel): - id: str - r"""The ID of the deleted file.""" - - object: str - r"""The object type that was deleted""" - - deleted: bool - r"""The deletion status.""" diff --git a/src/mistralai/models/deletemodelout.py b/src/mistralai/models/deletemodelout.py deleted file mode 100644 index c1b1effc..00000000 --- a/src/mistralai/models/deletemodelout.py +++ /dev/null @@ -1,26 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel -from typing import Optional -from typing_extensions import NotRequired, TypedDict - - -class DeleteModelOutTypedDict(TypedDict): - id: str - r"""The ID of the deleted model.""" - object: NotRequired[str] - r"""The object type that was deleted""" - deleted: NotRequired[bool] - r"""The deletion status""" - - -class DeleteModelOut(BaseModel): - id: str - r"""The ID of the deleted model.""" - - object: Optional[str] = "model" - r"""The object type that was deleted""" - - deleted: Optional[bool] = True - r"""The deletion status""" diff --git a/src/mistralai/models/deltamessage.py b/src/mistralai/models/deltamessage.py deleted file mode 100644 index 88aefe7f..00000000 --- a/src/mistralai/models/deltamessage.py +++ /dev/null @@ -1,61 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .contentchunk import ContentChunk, ContentChunkTypedDict -from .toolcall import ToolCall, ToolCallTypedDict -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from pydantic import model_serializer -from typing import List, Union -from typing_extensions import NotRequired, TypeAliasType, TypedDict - - -ContentTypedDict = TypeAliasType( - "ContentTypedDict", Union[str, List[ContentChunkTypedDict]] -) - - -Content = TypeAliasType("Content", Union[str, List[ContentChunk]]) - - -class DeltaMessageTypedDict(TypedDict): - role: NotRequired[Nullable[str]] - content: NotRequired[Nullable[ContentTypedDict]] - tool_calls: NotRequired[Nullable[List[ToolCallTypedDict]]] - - -class DeltaMessage(BaseModel): - role: OptionalNullable[str] = UNSET - - content: OptionalNullable[Content] = UNSET - - tool_calls: OptionalNullable[List[ToolCall]] = UNSET - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = ["role", "content", "tool_calls"] - nullable_fields = ["role", "content", "tool_calls"] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/documentlibrarytool.py b/src/mistralai/models/documentlibrarytool.py deleted file mode 100644 index 8d4c122b..00000000 --- a/src/mistralai/models/documentlibrarytool.py +++ /dev/null @@ -1,22 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel -from typing import List, Literal, Optional -from typing_extensions import NotRequired, TypedDict - - -DocumentLibraryToolType = Literal["document_library",] - - -class DocumentLibraryToolTypedDict(TypedDict): - library_ids: List[str] - r"""Ids of the library in which to search.""" - type: NotRequired[DocumentLibraryToolType] - - -class DocumentLibraryTool(BaseModel): - library_ids: List[str] - r"""Ids of the library in which to search.""" - - type: Optional[DocumentLibraryToolType] = "document_library" diff --git a/src/mistralai/models/documentout.py b/src/mistralai/models/documentout.py deleted file mode 100644 index 81d9605f..00000000 --- a/src/mistralai/models/documentout.py +++ /dev/null @@ -1,121 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from datetime import datetime -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from pydantic import model_serializer -from typing import Any, Dict -from typing_extensions import NotRequired, TypedDict - - -class DocumentOutTypedDict(TypedDict): - id: str - library_id: str - hash: Nullable[str] - mime_type: Nullable[str] - extension: Nullable[str] - size: Nullable[int] - name: str - created_at: datetime - processing_status: str - uploaded_by_id: Nullable[str] - uploaded_by_type: str - tokens_processing_total: int - summary: NotRequired[Nullable[str]] - last_processed_at: NotRequired[Nullable[datetime]] - number_of_pages: NotRequired[Nullable[int]] - tokens_processing_main_content: NotRequired[Nullable[int]] - tokens_processing_summary: NotRequired[Nullable[int]] - url: NotRequired[Nullable[str]] - attributes: NotRequired[Nullable[Dict[str, Any]]] - - -class DocumentOut(BaseModel): - id: str - - library_id: str - - hash: Nullable[str] - - mime_type: Nullable[str] - - extension: Nullable[str] - - size: Nullable[int] - - name: str - - created_at: datetime - - processing_status: str - - uploaded_by_id: Nullable[str] - - uploaded_by_type: str - - tokens_processing_total: int - - summary: OptionalNullable[str] = UNSET - - last_processed_at: OptionalNullable[datetime] = UNSET - - number_of_pages: OptionalNullable[int] = UNSET - - tokens_processing_main_content: OptionalNullable[int] = UNSET - - tokens_processing_summary: OptionalNullable[int] = UNSET - - url: OptionalNullable[str] = UNSET - - attributes: OptionalNullable[Dict[str, Any]] = UNSET - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = [ - "summary", - "last_processed_at", - "number_of_pages", - "tokens_processing_main_content", - "tokens_processing_summary", - "url", - "attributes", - ] - nullable_fields = [ - "hash", - "mime_type", - "extension", - "size", - "summary", - "last_processed_at", - "number_of_pages", - "uploaded_by_id", - "tokens_processing_main_content", - "tokens_processing_summary", - "url", - "attributes", - ] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/documenttextcontent.py b/src/mistralai/models/documenttextcontent.py deleted file mode 100644 index c02528c2..00000000 --- a/src/mistralai/models/documenttextcontent.py +++ /dev/null @@ -1,13 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel -from typing_extensions import TypedDict - - -class DocumentTextContentTypedDict(TypedDict): - text: str - - -class DocumentTextContent(BaseModel): - text: str diff --git a/src/mistralai/models/documentupdatein.py b/src/mistralai/models/documentupdatein.py deleted file mode 100644 index bd89ff47..00000000 --- a/src/mistralai/models/documentupdatein.py +++ /dev/null @@ -1,65 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from datetime import datetime -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from pydantic import model_serializer -from typing import Dict, List, Union -from typing_extensions import NotRequired, TypeAliasType, TypedDict - - -AttributesTypedDict = TypeAliasType( - "AttributesTypedDict", - Union[ - bool, str, int, float, datetime, List[str], List[int], List[float], List[bool] - ], -) - - -Attributes = TypeAliasType( - "Attributes", - Union[ - bool, str, int, float, datetime, List[str], List[int], List[float], List[bool] - ], -) - - -class DocumentUpdateInTypedDict(TypedDict): - name: NotRequired[Nullable[str]] - attributes: NotRequired[Nullable[Dict[str, AttributesTypedDict]]] - - -class DocumentUpdateIn(BaseModel): - name: OptionalNullable[str] = UNSET - - attributes: OptionalNullable[Dict[str, Attributes]] = UNSET - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = ["name", "attributes"] - nullable_fields = ["name", "attributes"] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/documenturlchunk.py b/src/mistralai/models/documenturlchunk.py deleted file mode 100644 index 6d0b1dc6..00000000 --- a/src/mistralai/models/documenturlchunk.py +++ /dev/null @@ -1,56 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from pydantic import model_serializer -from typing import Literal, Optional -from typing_extensions import NotRequired, TypedDict - - -DocumentURLChunkType = Literal["document_url",] - - -class DocumentURLChunkTypedDict(TypedDict): - document_url: str - document_name: NotRequired[Nullable[str]] - r"""The filename of the document""" - type: NotRequired[DocumentURLChunkType] - - -class DocumentURLChunk(BaseModel): - document_url: str - - document_name: OptionalNullable[str] = UNSET - r"""The filename of the document""" - - type: Optional[DocumentURLChunkType] = "document_url" - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = ["document_name", "type"] - nullable_fields = ["document_name"] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/embeddingdtype.py b/src/mistralai/models/embeddingdtype.py deleted file mode 100644 index 26eee779..00000000 --- a/src/mistralai/models/embeddingdtype.py +++ /dev/null @@ -1,13 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from typing import Literal - - -EmbeddingDtype = Literal[ - "float", - "int8", - "uint8", - "binary", - "ubinary", -] diff --git a/src/mistralai/models/embeddingrequest.py b/src/mistralai/models/embeddingrequest.py deleted file mode 100644 index 44797bfa..00000000 --- a/src/mistralai/models/embeddingrequest.py +++ /dev/null @@ -1,84 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .embeddingdtype import EmbeddingDtype -from .encodingformat import EncodingFormat -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -import pydantic -from pydantic import model_serializer -from typing import Any, Dict, List, Optional, Union -from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict - - -EmbeddingRequestInputsTypedDict = TypeAliasType( - "EmbeddingRequestInputsTypedDict", Union[str, List[str]] -) -r"""The text content to be embedded, can be a string or an array of strings for fast processing in bulk.""" - - -EmbeddingRequestInputs = TypeAliasType("EmbeddingRequestInputs", Union[str, List[str]]) -r"""The text content to be embedded, can be a string or an array of strings for fast processing in bulk.""" - - -class EmbeddingRequestTypedDict(TypedDict): - model: str - r"""The ID of the model to be used for embedding.""" - inputs: EmbeddingRequestInputsTypedDict - r"""The text content to be embedded, can be a string or an array of strings for fast processing in bulk.""" - metadata: NotRequired[Nullable[Dict[str, Any]]] - output_dimension: NotRequired[Nullable[int]] - r"""The dimension of the output embeddings when feature available. If not provided, a default output dimension will be used.""" - output_dtype: NotRequired[EmbeddingDtype] - encoding_format: NotRequired[EncodingFormat] - - -class EmbeddingRequest(BaseModel): - model: str - r"""The ID of the model to be used for embedding.""" - - inputs: Annotated[EmbeddingRequestInputs, pydantic.Field(alias="input")] - r"""The text content to be embedded, can be a string or an array of strings for fast processing in bulk.""" - - metadata: OptionalNullable[Dict[str, Any]] = UNSET - - output_dimension: OptionalNullable[int] = UNSET - r"""The dimension of the output embeddings when feature available. If not provided, a default output dimension will be used.""" - - output_dtype: Optional[EmbeddingDtype] = None - - encoding_format: Optional[EncodingFormat] = None - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = [ - "metadata", - "output_dimension", - "output_dtype", - "encoding_format", - ] - nullable_fields = ["metadata", "output_dimension"] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/embeddingresponse.py b/src/mistralai/models/embeddingresponse.py deleted file mode 100644 index aae6fa60..00000000 --- a/src/mistralai/models/embeddingresponse.py +++ /dev/null @@ -1,28 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .embeddingresponsedata import EmbeddingResponseData, EmbeddingResponseDataTypedDict -from .usageinfo import UsageInfo, UsageInfoTypedDict -from mistralai.types import BaseModel -from typing import List -from typing_extensions import TypedDict - - -class EmbeddingResponseTypedDict(TypedDict): - id: str - object: str - model: str - usage: UsageInfoTypedDict - data: List[EmbeddingResponseDataTypedDict] - - -class EmbeddingResponse(BaseModel): - id: str - - object: str - - model: str - - usage: UsageInfo - - data: List[EmbeddingResponseData] diff --git a/src/mistralai/models/embeddingresponsedata.py b/src/mistralai/models/embeddingresponsedata.py deleted file mode 100644 index 01e2765f..00000000 --- a/src/mistralai/models/embeddingresponsedata.py +++ /dev/null @@ -1,20 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel -from typing import List, Optional -from typing_extensions import NotRequired, TypedDict - - -class EmbeddingResponseDataTypedDict(TypedDict): - object: NotRequired[str] - embedding: NotRequired[List[float]] - index: NotRequired[int] - - -class EmbeddingResponseData(BaseModel): - object: Optional[str] = None - - embedding: Optional[List[float]] = None - - index: Optional[int] = None diff --git a/src/mistralai/models/encodingformat.py b/src/mistralai/models/encodingformat.py deleted file mode 100644 index be6c1a14..00000000 --- a/src/mistralai/models/encodingformat.py +++ /dev/null @@ -1,10 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from typing import Literal - - -EncodingFormat = Literal[ - "float", - "base64", -] diff --git a/src/mistralai/models/entitytype.py b/src/mistralai/models/entitytype.py deleted file mode 100644 index 8d2d4bbe..00000000 --- a/src/mistralai/models/entitytype.py +++ /dev/null @@ -1,16 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import UnrecognizedStr -from typing import Literal, Union - - -EntityType = Union[ - Literal[ - "User", - "Workspace", - "Org", - ], - UnrecognizedStr, -] -r"""The type of entity, used to share a library.""" diff --git a/src/mistralai/models/eventout.py b/src/mistralai/models/eventout.py deleted file mode 100644 index 32819034..00000000 --- a/src/mistralai/models/eventout.py +++ /dev/null @@ -1,55 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from pydantic import model_serializer -from typing import Any, Dict -from typing_extensions import NotRequired, TypedDict - - -class EventOutTypedDict(TypedDict): - name: str - r"""The name of the event.""" - created_at: int - r"""The UNIX timestamp (in seconds) of the event.""" - data: NotRequired[Nullable[Dict[str, Any]]] - - -class EventOut(BaseModel): - name: str - r"""The name of the event.""" - - created_at: int - r"""The UNIX timestamp (in seconds) of the event.""" - - data: OptionalNullable[Dict[str, Any]] = UNSET - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = ["data"] - nullable_fields = ["data"] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/file.py b/src/mistralai/models/file.py deleted file mode 100644 index 682d7f6e..00000000 --- a/src/mistralai/models/file.py +++ /dev/null @@ -1,33 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -import io -from mistralai.types import BaseModel -from mistralai.utils import FieldMetadata, MultipartFormMetadata -import pydantic -from typing import IO, Optional, Union -from typing_extensions import Annotated, NotRequired, TypedDict - - -class FileTypedDict(TypedDict): - file_name: str - content: Union[bytes, IO[bytes], io.BufferedReader] - content_type: NotRequired[str] - - -class File(BaseModel): - file_name: Annotated[ - str, pydantic.Field(alias="fileName"), FieldMetadata(multipart=True) - ] - - content: Annotated[ - Union[bytes, IO[bytes], io.BufferedReader], - pydantic.Field(alias=""), - FieldMetadata(multipart=MultipartFormMetadata(content=True)), - ] - - content_type: Annotated[ - Optional[str], - pydantic.Field(alias="Content-Type"), - FieldMetadata(multipart=True), - ] = None diff --git a/src/mistralai/models/filechunk.py b/src/mistralai/models/filechunk.py deleted file mode 100644 index 83e60cef..00000000 --- a/src/mistralai/models/filechunk.py +++ /dev/null @@ -1,23 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel -from mistralai.utils import validate_const -import pydantic -from pydantic.functional_validators import AfterValidator -from typing import Literal, Optional -from typing_extensions import Annotated, TypedDict - - -class FileChunkTypedDict(TypedDict): - file_id: str - type: Literal["file"] - - -class FileChunk(BaseModel): - file_id: str - - TYPE: Annotated[ - Annotated[Optional[Literal["file"]], AfterValidator(validate_const("file"))], - pydantic.Field(alias="type"), - ] = "file" diff --git a/src/mistralai/models/filepurpose.py b/src/mistralai/models/filepurpose.py deleted file mode 100644 index b109b350..00000000 --- a/src/mistralai/models/filepurpose.py +++ /dev/null @@ -1,15 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import UnrecognizedStr -from typing import Literal, Union - - -FilePurpose = Union[ - Literal[ - "fine-tune", - "batch", - "ocr", - ], - UnrecognizedStr, -] diff --git a/src/mistralai/models/files_api_routes_delete_fileop.py b/src/mistralai/models/files_api_routes_delete_fileop.py deleted file mode 100644 index a84a7a8e..00000000 --- a/src/mistralai/models/files_api_routes_delete_fileop.py +++ /dev/null @@ -1,16 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel -from mistralai.utils import FieldMetadata, PathParamMetadata -from typing_extensions import Annotated, TypedDict - - -class FilesAPIRoutesDeleteFileRequestTypedDict(TypedDict): - file_id: str - - -class FilesAPIRoutesDeleteFileRequest(BaseModel): - file_id: Annotated[ - str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) - ] diff --git a/src/mistralai/models/files_api_routes_download_fileop.py b/src/mistralai/models/files_api_routes_download_fileop.py deleted file mode 100644 index 168a7fa6..00000000 --- a/src/mistralai/models/files_api_routes_download_fileop.py +++ /dev/null @@ -1,16 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel -from mistralai.utils import FieldMetadata, PathParamMetadata -from typing_extensions import Annotated, TypedDict - - -class FilesAPIRoutesDownloadFileRequestTypedDict(TypedDict): - file_id: str - - -class FilesAPIRoutesDownloadFileRequest(BaseModel): - file_id: Annotated[ - str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) - ] diff --git a/src/mistralai/models/files_api_routes_get_signed_urlop.py b/src/mistralai/models/files_api_routes_get_signed_urlop.py deleted file mode 100644 index 708d40ab..00000000 --- a/src/mistralai/models/files_api_routes_get_signed_urlop.py +++ /dev/null @@ -1,25 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel -from mistralai.utils import FieldMetadata, PathParamMetadata, QueryParamMetadata -from typing import Optional -from typing_extensions import Annotated, NotRequired, TypedDict - - -class FilesAPIRoutesGetSignedURLRequestTypedDict(TypedDict): - file_id: str - expiry: NotRequired[int] - r"""Number of hours before the url becomes invalid. Defaults to 24h""" - - -class FilesAPIRoutesGetSignedURLRequest(BaseModel): - file_id: Annotated[ - str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) - ] - - expiry: Annotated[ - Optional[int], - FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), - ] = 24 - r"""Number of hours before the url becomes invalid. Defaults to 24h""" diff --git a/src/mistralai/models/files_api_routes_list_filesop.py b/src/mistralai/models/files_api_routes_list_filesop.py deleted file mode 100644 index 84d61b9b..00000000 --- a/src/mistralai/models/files_api_routes_list_filesop.py +++ /dev/null @@ -1,103 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .filepurpose import FilePurpose -from .sampletype import SampleType -from .source import Source -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from mistralai.utils import FieldMetadata, QueryParamMetadata -from pydantic import model_serializer -from typing import List, Optional -from typing_extensions import Annotated, NotRequired, TypedDict - - -class FilesAPIRoutesListFilesRequestTypedDict(TypedDict): - page: NotRequired[int] - page_size: NotRequired[int] - include_total: NotRequired[bool] - sample_type: NotRequired[Nullable[List[SampleType]]] - source: NotRequired[Nullable[List[Source]]] - search: NotRequired[Nullable[str]] - purpose: NotRequired[Nullable[FilePurpose]] - mimetypes: NotRequired[Nullable[List[str]]] - - -class FilesAPIRoutesListFilesRequest(BaseModel): - page: Annotated[ - Optional[int], - FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), - ] = 0 - - page_size: Annotated[ - Optional[int], - FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), - ] = 100 - - include_total: Annotated[ - Optional[bool], - FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), - ] = True - - sample_type: Annotated[ - OptionalNullable[List[SampleType]], - FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), - ] = UNSET - - source: Annotated[ - OptionalNullable[List[Source]], - FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), - ] = UNSET - - search: Annotated[ - OptionalNullable[str], - FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), - ] = UNSET - - purpose: Annotated[ - OptionalNullable[FilePurpose], - FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), - ] = UNSET - - mimetypes: Annotated[ - OptionalNullable[List[str]], - FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), - ] = UNSET - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = [ - "page", - "page_size", - "include_total", - "sample_type", - "source", - "search", - "purpose", - "mimetypes", - ] - nullable_fields = ["sample_type", "source", "search", "purpose", "mimetypes"] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/files_api_routes_retrieve_fileop.py b/src/mistralai/models/files_api_routes_retrieve_fileop.py deleted file mode 100644 index 0c2a95ef..00000000 --- a/src/mistralai/models/files_api_routes_retrieve_fileop.py +++ /dev/null @@ -1,16 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel -from mistralai.utils import FieldMetadata, PathParamMetadata -from typing_extensions import Annotated, TypedDict - - -class FilesAPIRoutesRetrieveFileRequestTypedDict(TypedDict): - file_id: str - - -class FilesAPIRoutesRetrieveFileRequest(BaseModel): - file_id: Annotated[ - str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) - ] diff --git a/src/mistralai/models/files_api_routes_upload_fileop.py b/src/mistralai/models/files_api_routes_upload_fileop.py deleted file mode 100644 index aeefe842..00000000 --- a/src/mistralai/models/files_api_routes_upload_fileop.py +++ /dev/null @@ -1,40 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .file import File, FileTypedDict -from .filepurpose import FilePurpose -from mistralai.types import BaseModel -from mistralai.utils import FieldMetadata, MultipartFormMetadata -from typing import Optional -from typing_extensions import Annotated, NotRequired, TypedDict - - -class FilesAPIRoutesUploadFileMultiPartBodyParamsTypedDict(TypedDict): - file: FileTypedDict - r"""The File object (not file name) to be uploaded. - To upload a file and specify a custom file name you should format your request as such: - ```bash - file=@path/to/your/file.jsonl;filename=custom_name.jsonl - ``` - Otherwise, you can just keep the original file name: - ```bash - file=@path/to/your/file.jsonl - ``` - """ - purpose: NotRequired[FilePurpose] - - -class FilesAPIRoutesUploadFileMultiPartBodyParams(BaseModel): - file: Annotated[File, FieldMetadata(multipart=MultipartFormMetadata(file=True))] - r"""The File object (not file name) to be uploaded. - To upload a file and specify a custom file name you should format your request as such: - ```bash - file=@path/to/your/file.jsonl;filename=custom_name.jsonl - ``` - Otherwise, you can just keep the original file name: - ```bash - file=@path/to/your/file.jsonl - ``` - """ - - purpose: Annotated[Optional[FilePurpose], FieldMetadata(multipart=True)] = None diff --git a/src/mistralai/models/fileschema.py b/src/mistralai/models/fileschema.py deleted file mode 100644 index 9a88f1bb..00000000 --- a/src/mistralai/models/fileschema.py +++ /dev/null @@ -1,88 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .filepurpose import FilePurpose -from .sampletype import SampleType -from .source import Source -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -import pydantic -from pydantic import model_serializer -from typing_extensions import Annotated, NotRequired, TypedDict - - -class FileSchemaTypedDict(TypedDict): - id: str - r"""The unique identifier of the file.""" - object: str - r"""The object type, which is always \"file\".""" - size_bytes: int - r"""The size of the file, in bytes.""" - created_at: int - r"""The UNIX timestamp (in seconds) of the event.""" - filename: str - r"""The name of the uploaded file.""" - purpose: FilePurpose - sample_type: SampleType - source: Source - num_lines: NotRequired[Nullable[int]] - mimetype: NotRequired[Nullable[str]] - signature: NotRequired[Nullable[str]] - - -class FileSchema(BaseModel): - id: str - r"""The unique identifier of the file.""" - - object: str - r"""The object type, which is always \"file\".""" - - size_bytes: Annotated[int, pydantic.Field(alias="bytes")] - r"""The size of the file, in bytes.""" - - created_at: int - r"""The UNIX timestamp (in seconds) of the event.""" - - filename: str - r"""The name of the uploaded file.""" - - purpose: FilePurpose - - sample_type: SampleType - - source: Source - - num_lines: OptionalNullable[int] = UNSET - - mimetype: OptionalNullable[str] = UNSET - - signature: OptionalNullable[str] = UNSET - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = ["num_lines", "mimetype", "signature"] - nullable_fields = ["num_lines", "mimetype", "signature"] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/filesignedurl.py b/src/mistralai/models/filesignedurl.py deleted file mode 100644 index 092be7f8..00000000 --- a/src/mistralai/models/filesignedurl.py +++ /dev/null @@ -1,13 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel -from typing_extensions import TypedDict - - -class FileSignedURLTypedDict(TypedDict): - url: str - - -class FileSignedURL(BaseModel): - url: str diff --git a/src/mistralai/models/fimcompletionrequest.py b/src/mistralai/models/fimcompletionrequest.py deleted file mode 100644 index 801a358b..00000000 --- a/src/mistralai/models/fimcompletionrequest.py +++ /dev/null @@ -1,124 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from pydantic import model_serializer -from typing import Any, Dict, List, Optional, Union -from typing_extensions import NotRequired, TypeAliasType, TypedDict - - -FIMCompletionRequestStopTypedDict = TypeAliasType( - "FIMCompletionRequestStopTypedDict", Union[str, List[str]] -) -r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" - - -FIMCompletionRequestStop = TypeAliasType( - "FIMCompletionRequestStop", Union[str, List[str]] -) -r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" - - -class FIMCompletionRequestTypedDict(TypedDict): - model: str - r"""ID of the model with FIM to use.""" - prompt: str - r"""The text/code to complete.""" - temperature: NotRequired[Nullable[float]] - r"""What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value.""" - top_p: NotRequired[float] - r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.""" - max_tokens: NotRequired[Nullable[int]] - r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" - stream: NotRequired[bool] - r"""Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON.""" - stop: NotRequired[FIMCompletionRequestStopTypedDict] - r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" - random_seed: NotRequired[Nullable[int]] - r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" - metadata: NotRequired[Nullable[Dict[str, Any]]] - suffix: NotRequired[Nullable[str]] - r"""Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`.""" - min_tokens: NotRequired[Nullable[int]] - r"""The minimum number of tokens to generate in the completion.""" - - -class FIMCompletionRequest(BaseModel): - model: str - r"""ID of the model with FIM to use.""" - - prompt: str - r"""The text/code to complete.""" - - temperature: OptionalNullable[float] = UNSET - r"""What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value.""" - - top_p: Optional[float] = 1 - r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.""" - - max_tokens: OptionalNullable[int] = UNSET - r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" - - stream: Optional[bool] = False - r"""Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON.""" - - stop: Optional[FIMCompletionRequestStop] = None - r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" - - random_seed: OptionalNullable[int] = UNSET - r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" - - metadata: OptionalNullable[Dict[str, Any]] = UNSET - - suffix: OptionalNullable[str] = UNSET - r"""Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`.""" - - min_tokens: OptionalNullable[int] = UNSET - r"""The minimum number of tokens to generate in the completion.""" - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = [ - "temperature", - "top_p", - "max_tokens", - "stream", - "stop", - "random_seed", - "metadata", - "suffix", - "min_tokens", - ] - nullable_fields = [ - "temperature", - "max_tokens", - "random_seed", - "metadata", - "suffix", - "min_tokens", - ] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/fimcompletionresponse.py b/src/mistralai/models/fimcompletionresponse.py deleted file mode 100644 index f27972b9..00000000 --- a/src/mistralai/models/fimcompletionresponse.py +++ /dev/null @@ -1,31 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .chatcompletionchoice import ChatCompletionChoice, ChatCompletionChoiceTypedDict -from .usageinfo import UsageInfo, UsageInfoTypedDict -from mistralai.types import BaseModel -from typing import List -from typing_extensions import TypedDict - - -class FIMCompletionResponseTypedDict(TypedDict): - id: str - object: str - model: str - usage: UsageInfoTypedDict - created: int - choices: List[ChatCompletionChoiceTypedDict] - - -class FIMCompletionResponse(BaseModel): - id: str - - object: str - - model: str - - usage: UsageInfo - - created: int - - choices: List[ChatCompletionChoice] diff --git a/src/mistralai/models/fimcompletionstreamrequest.py b/src/mistralai/models/fimcompletionstreamrequest.py deleted file mode 100644 index 2e8e6db2..00000000 --- a/src/mistralai/models/fimcompletionstreamrequest.py +++ /dev/null @@ -1,122 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from pydantic import model_serializer -from typing import Any, Dict, List, Optional, Union -from typing_extensions import NotRequired, TypeAliasType, TypedDict - - -FIMCompletionStreamRequestStopTypedDict = TypeAliasType( - "FIMCompletionStreamRequestStopTypedDict", Union[str, List[str]] -) -r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" - - -FIMCompletionStreamRequestStop = TypeAliasType( - "FIMCompletionStreamRequestStop", Union[str, List[str]] -) -r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" - - -class FIMCompletionStreamRequestTypedDict(TypedDict): - model: str - r"""ID of the model with FIM to use.""" - prompt: str - r"""The text/code to complete.""" - temperature: NotRequired[Nullable[float]] - r"""What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value.""" - top_p: NotRequired[float] - r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.""" - max_tokens: NotRequired[Nullable[int]] - r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" - stream: NotRequired[bool] - stop: NotRequired[FIMCompletionStreamRequestStopTypedDict] - r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" - random_seed: NotRequired[Nullable[int]] - r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" - metadata: NotRequired[Nullable[Dict[str, Any]]] - suffix: NotRequired[Nullable[str]] - r"""Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`.""" - min_tokens: NotRequired[Nullable[int]] - r"""The minimum number of tokens to generate in the completion.""" - - -class FIMCompletionStreamRequest(BaseModel): - model: str - r"""ID of the model with FIM to use.""" - - prompt: str - r"""The text/code to complete.""" - - temperature: OptionalNullable[float] = UNSET - r"""What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value.""" - - top_p: Optional[float] = 1 - r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.""" - - max_tokens: OptionalNullable[int] = UNSET - r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" - - stream: Optional[bool] = True - - stop: Optional[FIMCompletionStreamRequestStop] = None - r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" - - random_seed: OptionalNullable[int] = UNSET - r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" - - metadata: OptionalNullable[Dict[str, Any]] = UNSET - - suffix: OptionalNullable[str] = UNSET - r"""Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`.""" - - min_tokens: OptionalNullable[int] = UNSET - r"""The minimum number of tokens to generate in the completion.""" - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = [ - "temperature", - "top_p", - "max_tokens", - "stream", - "stop", - "random_seed", - "metadata", - "suffix", - "min_tokens", - ] - nullable_fields = [ - "temperature", - "max_tokens", - "random_seed", - "metadata", - "suffix", - "min_tokens", - ] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/finetuneablemodeltype.py b/src/mistralai/models/finetuneablemodeltype.py deleted file mode 100644 index f5b8b2ed..00000000 --- a/src/mistralai/models/finetuneablemodeltype.py +++ /dev/null @@ -1,10 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from typing import Literal - - -FineTuneableModelType = Literal[ - "completion", - "classifier", -] diff --git a/src/mistralai/models/ftclassifierlossfunction.py b/src/mistralai/models/ftclassifierlossfunction.py deleted file mode 100644 index c4ef66e0..00000000 --- a/src/mistralai/models/ftclassifierlossfunction.py +++ /dev/null @@ -1,10 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from typing import Literal - - -FTClassifierLossFunction = Literal[ - "single_class", - "multi_class", -] diff --git a/src/mistralai/models/ftmodelcapabilitiesout.py b/src/mistralai/models/ftmodelcapabilitiesout.py deleted file mode 100644 index 7f3aa18b..00000000 --- a/src/mistralai/models/ftmodelcapabilitiesout.py +++ /dev/null @@ -1,26 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel -from typing import Optional -from typing_extensions import NotRequired, TypedDict - - -class FTModelCapabilitiesOutTypedDict(TypedDict): - completion_chat: NotRequired[bool] - completion_fim: NotRequired[bool] - function_calling: NotRequired[bool] - fine_tuning: NotRequired[bool] - classification: NotRequired[bool] - - -class FTModelCapabilitiesOut(BaseModel): - completion_chat: Optional[bool] = True - - completion_fim: Optional[bool] = False - - function_calling: Optional[bool] = False - - fine_tuning: Optional[bool] = False - - classification: Optional[bool] = False diff --git a/src/mistralai/models/ftmodelcard.py b/src/mistralai/models/ftmodelcard.py deleted file mode 100644 index 1c3bd04d..00000000 --- a/src/mistralai/models/ftmodelcard.py +++ /dev/null @@ -1,126 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .modelcapabilities import ModelCapabilities, ModelCapabilitiesTypedDict -from datetime import datetime -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from mistralai.utils import validate_const -import pydantic -from pydantic import model_serializer -from pydantic.functional_validators import AfterValidator -from typing import List, Literal, Optional -from typing_extensions import Annotated, NotRequired, TypedDict - - -FTModelCardType = Literal["fine-tuned",] - - -class FTModelCardTypedDict(TypedDict): - r"""Extra fields for fine-tuned models.""" - - id: str - capabilities: ModelCapabilitiesTypedDict - job: str - root: str - object: NotRequired[str] - created: NotRequired[int] - owned_by: NotRequired[str] - name: NotRequired[Nullable[str]] - description: NotRequired[Nullable[str]] - max_context_length: NotRequired[int] - aliases: NotRequired[List[str]] - deprecation: NotRequired[Nullable[datetime]] - deprecation_replacement_model: NotRequired[Nullable[str]] - default_model_temperature: NotRequired[Nullable[float]] - type: FTModelCardType - archived: NotRequired[bool] - - -class FTModelCard(BaseModel): - r"""Extra fields for fine-tuned models.""" - - id: str - - capabilities: ModelCapabilities - - job: str - - root: str - - object: Optional[str] = "model" - - created: Optional[int] = None - - owned_by: Optional[str] = "mistralai" - - name: OptionalNullable[str] = UNSET - - description: OptionalNullable[str] = UNSET - - max_context_length: Optional[int] = 32768 - - aliases: Optional[List[str]] = None - - deprecation: OptionalNullable[datetime] = UNSET - - deprecation_replacement_model: OptionalNullable[str] = UNSET - - default_model_temperature: OptionalNullable[float] = UNSET - - TYPE: Annotated[ - Annotated[ - Optional[FTModelCardType], AfterValidator(validate_const("fine-tuned")) - ], - pydantic.Field(alias="type"), - ] = "fine-tuned" - - archived: Optional[bool] = False - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = [ - "object", - "created", - "owned_by", - "name", - "description", - "max_context_length", - "aliases", - "deprecation", - "deprecation_replacement_model", - "default_model_temperature", - "type", - "archived", - ] - nullable_fields = [ - "name", - "description", - "deprecation", - "deprecation_replacement_model", - "default_model_temperature", - ] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/function.py b/src/mistralai/models/function.py deleted file mode 100644 index 7d40cf75..00000000 --- a/src/mistralai/models/function.py +++ /dev/null @@ -1,23 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel -from typing import Any, Dict, Optional -from typing_extensions import NotRequired, TypedDict - - -class FunctionTypedDict(TypedDict): - name: str - parameters: Dict[str, Any] - description: NotRequired[str] - strict: NotRequired[bool] - - -class Function(BaseModel): - name: str - - parameters: Dict[str, Any] - - description: Optional[str] = None - - strict: Optional[bool] = None diff --git a/src/mistralai/models/functioncall.py b/src/mistralai/models/functioncall.py deleted file mode 100644 index 0cce622a..00000000 --- a/src/mistralai/models/functioncall.py +++ /dev/null @@ -1,23 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel -from typing import Any, Dict, Union -from typing_extensions import TypeAliasType, TypedDict - - -ArgumentsTypedDict = TypeAliasType("ArgumentsTypedDict", Union[Dict[str, Any], str]) - - -Arguments = TypeAliasType("Arguments", Union[Dict[str, Any], str]) - - -class FunctionCallTypedDict(TypedDict): - name: str - arguments: ArgumentsTypedDict - - -class FunctionCall(BaseModel): - name: str - - arguments: Arguments diff --git a/src/mistralai/models/functioncallentry.py b/src/mistralai/models/functioncallentry.py deleted file mode 100644 index 4ea62c4f..00000000 --- a/src/mistralai/models/functioncallentry.py +++ /dev/null @@ -1,77 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .functioncallentryarguments import ( - FunctionCallEntryArguments, - FunctionCallEntryArgumentsTypedDict, -) -from datetime import datetime -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from pydantic import model_serializer -from typing import Literal, Optional -from typing_extensions import NotRequired, TypedDict - - -FunctionCallEntryObject = Literal["entry",] - - -FunctionCallEntryType = Literal["function.call",] - - -class FunctionCallEntryTypedDict(TypedDict): - tool_call_id: str - name: str - arguments: FunctionCallEntryArgumentsTypedDict - object: NotRequired[FunctionCallEntryObject] - type: NotRequired[FunctionCallEntryType] - created_at: NotRequired[datetime] - completed_at: NotRequired[Nullable[datetime]] - id: NotRequired[str] - - -class FunctionCallEntry(BaseModel): - tool_call_id: str - - name: str - - arguments: FunctionCallEntryArguments - - object: Optional[FunctionCallEntryObject] = "entry" - - type: Optional[FunctionCallEntryType] = "function.call" - - created_at: Optional[datetime] = None - - completed_at: OptionalNullable[datetime] = UNSET - - id: Optional[str] = None - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = ["object", "type", "created_at", "completed_at", "id"] - nullable_fields = ["completed_at"] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/functioncallentryarguments.py b/src/mistralai/models/functioncallentryarguments.py deleted file mode 100644 index ac9e6227..00000000 --- a/src/mistralai/models/functioncallentryarguments.py +++ /dev/null @@ -1,15 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from typing import Any, Dict, Union -from typing_extensions import TypeAliasType - - -FunctionCallEntryArgumentsTypedDict = TypeAliasType( - "FunctionCallEntryArgumentsTypedDict", Union[Dict[str, Any], str] -) - - -FunctionCallEntryArguments = TypeAliasType( - "FunctionCallEntryArguments", Union[Dict[str, Any], str] -) diff --git a/src/mistralai/models/functioncallevent.py b/src/mistralai/models/functioncallevent.py deleted file mode 100644 index e3992cf1..00000000 --- a/src/mistralai/models/functioncallevent.py +++ /dev/null @@ -1,36 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from datetime import datetime -from mistralai.types import BaseModel -from typing import Literal, Optional -from typing_extensions import NotRequired, TypedDict - - -FunctionCallEventType = Literal["function.call.delta",] - - -class FunctionCallEventTypedDict(TypedDict): - id: str - name: str - tool_call_id: str - arguments: str - type: NotRequired[FunctionCallEventType] - created_at: NotRequired[datetime] - output_index: NotRequired[int] - - -class FunctionCallEvent(BaseModel): - id: str - - name: str - - tool_call_id: str - - arguments: str - - type: Optional[FunctionCallEventType] = "function.call.delta" - - created_at: Optional[datetime] = None - - output_index: Optional[int] = 0 diff --git a/src/mistralai/models/functionname.py b/src/mistralai/models/functionname.py deleted file mode 100644 index 0a6c0b14..00000000 --- a/src/mistralai/models/functionname.py +++ /dev/null @@ -1,17 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel -from typing_extensions import TypedDict - - -class FunctionNameTypedDict(TypedDict): - r"""this restriction of `Function` is used to select a specific function to call""" - - name: str - - -class FunctionName(BaseModel): - r"""this restriction of `Function` is used to select a specific function to call""" - - name: str diff --git a/src/mistralai/models/functionresultentry.py b/src/mistralai/models/functionresultentry.py deleted file mode 100644 index 1c61395a..00000000 --- a/src/mistralai/models/functionresultentry.py +++ /dev/null @@ -1,70 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from datetime import datetime -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from pydantic import model_serializer -from typing import Literal, Optional -from typing_extensions import NotRequired, TypedDict - - -FunctionResultEntryObject = Literal["entry",] - - -FunctionResultEntryType = Literal["function.result",] - - -class FunctionResultEntryTypedDict(TypedDict): - tool_call_id: str - result: str - object: NotRequired[FunctionResultEntryObject] - type: NotRequired[FunctionResultEntryType] - created_at: NotRequired[datetime] - completed_at: NotRequired[Nullable[datetime]] - id: NotRequired[str] - - -class FunctionResultEntry(BaseModel): - tool_call_id: str - - result: str - - object: Optional[FunctionResultEntryObject] = "entry" - - type: Optional[FunctionResultEntryType] = "function.result" - - created_at: Optional[datetime] = None - - completed_at: OptionalNullable[datetime] = UNSET - - id: Optional[str] = None - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = ["object", "type", "created_at", "completed_at", "id"] - nullable_fields = ["completed_at"] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/functiontool.py b/src/mistralai/models/functiontool.py deleted file mode 100644 index 009fe280..00000000 --- a/src/mistralai/models/functiontool.py +++ /dev/null @@ -1,21 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .function import Function, FunctionTypedDict -from mistralai.types import BaseModel -from typing import Literal, Optional -from typing_extensions import NotRequired, TypedDict - - -FunctionToolType = Literal["function",] - - -class FunctionToolTypedDict(TypedDict): - function: FunctionTypedDict - type: NotRequired[FunctionToolType] - - -class FunctionTool(BaseModel): - function: Function - - type: Optional[FunctionToolType] = "function" diff --git a/src/mistralai/models/githubrepositoryin.py b/src/mistralai/models/githubrepositoryin.py deleted file mode 100644 index b16ce0d2..00000000 --- a/src/mistralai/models/githubrepositoryin.py +++ /dev/null @@ -1,63 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from pydantic import model_serializer -from typing import Literal, Optional -from typing_extensions import NotRequired, TypedDict - - -GithubRepositoryInType = Literal["github",] - - -class GithubRepositoryInTypedDict(TypedDict): - name: str - owner: str - token: str - type: NotRequired[GithubRepositoryInType] - ref: NotRequired[Nullable[str]] - weight: NotRequired[float] - - -class GithubRepositoryIn(BaseModel): - name: str - - owner: str - - token: str - - type: Optional[GithubRepositoryInType] = "github" - - ref: OptionalNullable[str] = UNSET - - weight: Optional[float] = 1 - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = ["type", "ref", "weight"] - nullable_fields = ["ref"] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/githubrepositoryout.py b/src/mistralai/models/githubrepositoryout.py deleted file mode 100644 index 372477c1..00000000 --- a/src/mistralai/models/githubrepositoryout.py +++ /dev/null @@ -1,63 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from pydantic import model_serializer -from typing import Literal, Optional -from typing_extensions import NotRequired, TypedDict - - -GithubRepositoryOutType = Literal["github",] - - -class GithubRepositoryOutTypedDict(TypedDict): - name: str - owner: str - commit_id: str - type: NotRequired[GithubRepositoryOutType] - ref: NotRequired[Nullable[str]] - weight: NotRequired[float] - - -class GithubRepositoryOut(BaseModel): - name: str - - owner: str - - commit_id: str - - type: Optional[GithubRepositoryOutType] = "github" - - ref: OptionalNullable[str] = UNSET - - weight: Optional[float] = 1 - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = ["type", "ref", "weight"] - nullable_fields = ["ref"] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/httpvalidationerror.py b/src/mistralai/models/httpvalidationerror.py deleted file mode 100644 index d467577a..00000000 --- a/src/mistralai/models/httpvalidationerror.py +++ /dev/null @@ -1,28 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .validationerror import ValidationError -from dataclasses import dataclass, field -import httpx -from mistralai.models import MistralError -from mistralai.types import BaseModel -from typing import List, Optional - - -class HTTPValidationErrorData(BaseModel): - detail: Optional[List[ValidationError]] = None - - -@dataclass(unsafe_hash=True) -class HTTPValidationError(MistralError): - data: HTTPValidationErrorData = field(hash=False) - - def __init__( - self, - data: HTTPValidationErrorData, - raw_response: httpx.Response, - body: Optional[str] = None, - ): - message = body or raw_response.text - super().__init__(message, raw_response, body) - object.__setattr__(self, "data", data) diff --git a/src/mistralai/models/imagegenerationtool.py b/src/mistralai/models/imagegenerationtool.py deleted file mode 100644 index a92335db..00000000 --- a/src/mistralai/models/imagegenerationtool.py +++ /dev/null @@ -1,17 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel -from typing import Literal, Optional -from typing_extensions import NotRequired, TypedDict - - -ImageGenerationToolType = Literal["image_generation",] - - -class ImageGenerationToolTypedDict(TypedDict): - type: NotRequired[ImageGenerationToolType] - - -class ImageGenerationTool(BaseModel): - type: Optional[ImageGenerationToolType] = "image_generation" diff --git a/src/mistralai/models/imageurl.py b/src/mistralai/models/imageurl.py deleted file mode 100644 index 6f077b69..00000000 --- a/src/mistralai/models/imageurl.py +++ /dev/null @@ -1,47 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from pydantic import model_serializer -from typing_extensions import NotRequired, TypedDict - - -class ImageURLTypedDict(TypedDict): - url: str - detail: NotRequired[Nullable[str]] - - -class ImageURL(BaseModel): - url: str - - detail: OptionalNullable[str] = UNSET - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = ["detail"] - nullable_fields = ["detail"] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/imageurlchunk.py b/src/mistralai/models/imageurlchunk.py deleted file mode 100644 index 8e8aac42..00000000 --- a/src/mistralai/models/imageurlchunk.py +++ /dev/null @@ -1,33 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .imageurl import ImageURL, ImageURLTypedDict -from mistralai.types import BaseModel -from typing import Literal, Optional, Union -from typing_extensions import NotRequired, TypeAliasType, TypedDict - - -ImageURLChunkImageURLTypedDict = TypeAliasType( - "ImageURLChunkImageURLTypedDict", Union[ImageURLTypedDict, str] -) - - -ImageURLChunkImageURL = TypeAliasType("ImageURLChunkImageURL", Union[ImageURL, str]) - - -ImageURLChunkType = Literal["image_url",] - - -class ImageURLChunkTypedDict(TypedDict): - r"""{\"type\":\"image_url\",\"image_url\":{\"url\":\"data:image/png;base64,iVBORw0""" - - image_url: ImageURLChunkImageURLTypedDict - type: NotRequired[ImageURLChunkType] - - -class ImageURLChunk(BaseModel): - r"""{\"type\":\"image_url\",\"image_url\":{\"url\":\"data:image/png;base64,iVBORw0""" - - image_url: ImageURLChunkImageURL - - type: Optional[ImageURLChunkType] = "image_url" diff --git a/src/mistralai/models/inputentries.py b/src/mistralai/models/inputentries.py deleted file mode 100644 index 8ae29837..00000000 --- a/src/mistralai/models/inputentries.py +++ /dev/null @@ -1,37 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .agenthandoffentry import AgentHandoffEntry, AgentHandoffEntryTypedDict -from .functioncallentry import FunctionCallEntry, FunctionCallEntryTypedDict -from .functionresultentry import FunctionResultEntry, FunctionResultEntryTypedDict -from .messageinputentry import MessageInputEntry, MessageInputEntryTypedDict -from .messageoutputentry import MessageOutputEntry, MessageOutputEntryTypedDict -from .toolexecutionentry import ToolExecutionEntry, ToolExecutionEntryTypedDict -from typing import Union -from typing_extensions import TypeAliasType - - -InputEntriesTypedDict = TypeAliasType( - "InputEntriesTypedDict", - Union[ - FunctionResultEntryTypedDict, - MessageInputEntryTypedDict, - FunctionCallEntryTypedDict, - ToolExecutionEntryTypedDict, - MessageOutputEntryTypedDict, - AgentHandoffEntryTypedDict, - ], -) - - -InputEntries = TypeAliasType( - "InputEntries", - Union[ - FunctionResultEntry, - MessageInputEntry, - FunctionCallEntry, - ToolExecutionEntry, - MessageOutputEntry, - AgentHandoffEntry, - ], -) diff --git a/src/mistralai/models/inputs.py b/src/mistralai/models/inputs.py deleted file mode 100644 index 34d20f34..00000000 --- a/src/mistralai/models/inputs.py +++ /dev/null @@ -1,54 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .assistantmessage import AssistantMessage, AssistantMessageTypedDict -from .instructrequest import InstructRequest, InstructRequestTypedDict -from .systemmessage import SystemMessage, SystemMessageTypedDict -from .toolmessage import ToolMessage, ToolMessageTypedDict -from .usermessage import UserMessage, UserMessageTypedDict -from mistralai.types import BaseModel -from mistralai.utils import get_discriminator -from pydantic import Discriminator, Tag -from typing import List, Union -from typing_extensions import Annotated, TypeAliasType, TypedDict - - -InstructRequestInputsMessagesTypedDict = TypeAliasType( - "InstructRequestInputsMessagesTypedDict", - Union[ - SystemMessageTypedDict, - UserMessageTypedDict, - AssistantMessageTypedDict, - ToolMessageTypedDict, - ], -) - - -InstructRequestInputsMessages = Annotated[ - Union[ - Annotated[AssistantMessage, Tag("assistant")], - Annotated[SystemMessage, Tag("system")], - Annotated[ToolMessage, Tag("tool")], - Annotated[UserMessage, Tag("user")], - ], - Discriminator(lambda m: get_discriminator(m, "role", "role")), -] - - -class InstructRequestInputsTypedDict(TypedDict): - messages: List[InstructRequestInputsMessagesTypedDict] - - -class InstructRequestInputs(BaseModel): - messages: List[InstructRequestInputsMessages] - - -InputsTypedDict = TypeAliasType( - "InputsTypedDict", - Union[InstructRequestInputsTypedDict, List[InstructRequestTypedDict]], -) -r"""Chat to classify""" - - -Inputs = TypeAliasType("Inputs", Union[InstructRequestInputs, List[InstructRequest]]) -r"""Chat to classify""" diff --git a/src/mistralai/models/instructrequest.py b/src/mistralai/models/instructrequest.py deleted file mode 100644 index dddbda00..00000000 --- a/src/mistralai/models/instructrequest.py +++ /dev/null @@ -1,42 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .assistantmessage import AssistantMessage, AssistantMessageTypedDict -from .systemmessage import SystemMessage, SystemMessageTypedDict -from .toolmessage import ToolMessage, ToolMessageTypedDict -from .usermessage import UserMessage, UserMessageTypedDict -from mistralai.types import BaseModel -from mistralai.utils import get_discriminator -from pydantic import Discriminator, Tag -from typing import List, Union -from typing_extensions import Annotated, TypeAliasType, TypedDict - - -InstructRequestMessagesTypedDict = TypeAliasType( - "InstructRequestMessagesTypedDict", - Union[ - SystemMessageTypedDict, - UserMessageTypedDict, - AssistantMessageTypedDict, - ToolMessageTypedDict, - ], -) - - -InstructRequestMessages = Annotated[ - Union[ - Annotated[AssistantMessage, Tag("assistant")], - Annotated[SystemMessage, Tag("system")], - Annotated[ToolMessage, Tag("tool")], - Annotated[UserMessage, Tag("user")], - ], - Discriminator(lambda m: get_discriminator(m, "role", "role")), -] - - -class InstructRequestTypedDict(TypedDict): - messages: List[InstructRequestMessagesTypedDict] - - -class InstructRequest(BaseModel): - messages: List[InstructRequestMessages] diff --git a/src/mistralai/models/jobin.py b/src/mistralai/models/jobin.py deleted file mode 100644 index aa0cd06c..00000000 --- a/src/mistralai/models/jobin.py +++ /dev/null @@ -1,141 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .classifiertargetin import ClassifierTargetIn, ClassifierTargetInTypedDict -from .classifiertrainingparametersin import ( - ClassifierTrainingParametersIn, - ClassifierTrainingParametersInTypedDict, -) -from .completiontrainingparametersin import ( - CompletionTrainingParametersIn, - CompletionTrainingParametersInTypedDict, -) -from .finetuneablemodeltype import FineTuneableModelType -from .githubrepositoryin import GithubRepositoryIn, GithubRepositoryInTypedDict -from .trainingfile import TrainingFile, TrainingFileTypedDict -from .wandbintegration import WandbIntegration, WandbIntegrationTypedDict -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from pydantic import model_serializer -from typing import List, Optional, Union -from typing_extensions import NotRequired, TypeAliasType, TypedDict - - -JobInIntegrationsTypedDict = WandbIntegrationTypedDict - - -JobInIntegrations = WandbIntegration - - -HyperparametersTypedDict = TypeAliasType( - "HyperparametersTypedDict", - Union[ - ClassifierTrainingParametersInTypedDict, CompletionTrainingParametersInTypedDict - ], -) - - -Hyperparameters = TypeAliasType( - "Hyperparameters", - Union[ClassifierTrainingParametersIn, CompletionTrainingParametersIn], -) - - -JobInRepositoriesTypedDict = GithubRepositoryInTypedDict - - -JobInRepositories = GithubRepositoryIn - - -class JobInTypedDict(TypedDict): - model: str - r"""The name of the model to fine-tune.""" - hyperparameters: HyperparametersTypedDict - training_files: NotRequired[List[TrainingFileTypedDict]] - validation_files: NotRequired[Nullable[List[str]]] - r"""A list containing the IDs of uploaded files that contain validation data. If you provide these files, the data is used to generate validation metrics periodically during fine-tuning. These metrics can be viewed in `checkpoints` when getting the status of a running fine-tuning job. The same data should not be present in both train and validation files.""" - suffix: NotRequired[Nullable[str]] - r"""A string that will be added to your fine-tuning model name. For example, a suffix of \"my-great-model\" would produce a model name like `ft:open-mistral-7b:my-great-model:xxx...`""" - integrations: NotRequired[Nullable[List[JobInIntegrationsTypedDict]]] - r"""A list of integrations to enable for your fine-tuning job.""" - auto_start: NotRequired[bool] - r"""This field will be required in a future release.""" - invalid_sample_skip_percentage: NotRequired[float] - job_type: NotRequired[Nullable[FineTuneableModelType]] - repositories: NotRequired[Nullable[List[JobInRepositoriesTypedDict]]] - classifier_targets: NotRequired[Nullable[List[ClassifierTargetInTypedDict]]] - - -class JobIn(BaseModel): - model: str - r"""The name of the model to fine-tune.""" - - hyperparameters: Hyperparameters - - training_files: Optional[List[TrainingFile]] = None - - validation_files: OptionalNullable[List[str]] = UNSET - r"""A list containing the IDs of uploaded files that contain validation data. If you provide these files, the data is used to generate validation metrics periodically during fine-tuning. These metrics can be viewed in `checkpoints` when getting the status of a running fine-tuning job. The same data should not be present in both train and validation files.""" - - suffix: OptionalNullable[str] = UNSET - r"""A string that will be added to your fine-tuning model name. For example, a suffix of \"my-great-model\" would produce a model name like `ft:open-mistral-7b:my-great-model:xxx...`""" - - integrations: OptionalNullable[List[JobInIntegrations]] = UNSET - r"""A list of integrations to enable for your fine-tuning job.""" - - auto_start: Optional[bool] = None - r"""This field will be required in a future release.""" - - invalid_sample_skip_percentage: Optional[float] = 0 - - job_type: OptionalNullable[FineTuneableModelType] = UNSET - - repositories: OptionalNullable[List[JobInRepositories]] = UNSET - - classifier_targets: OptionalNullable[List[ClassifierTargetIn]] = UNSET - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = [ - "training_files", - "validation_files", - "suffix", - "integrations", - "auto_start", - "invalid_sample_skip_percentage", - "job_type", - "repositories", - "classifier_targets", - ] - nullable_fields = [ - "validation_files", - "suffix", - "integrations", - "job_type", - "repositories", - "classifier_targets", - ] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/jobmetadataout.py b/src/mistralai/models/jobmetadataout.py deleted file mode 100644 index 10ef781e..00000000 --- a/src/mistralai/models/jobmetadataout.py +++ /dev/null @@ -1,78 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from pydantic import model_serializer -from typing_extensions import NotRequired, TypedDict - - -class JobMetadataOutTypedDict(TypedDict): - expected_duration_seconds: NotRequired[Nullable[int]] - cost: NotRequired[Nullable[float]] - cost_currency: NotRequired[Nullable[str]] - train_tokens_per_step: NotRequired[Nullable[int]] - train_tokens: NotRequired[Nullable[int]] - data_tokens: NotRequired[Nullable[int]] - estimated_start_time: NotRequired[Nullable[int]] - - -class JobMetadataOut(BaseModel): - expected_duration_seconds: OptionalNullable[int] = UNSET - - cost: OptionalNullable[float] = UNSET - - cost_currency: OptionalNullable[str] = UNSET - - train_tokens_per_step: OptionalNullable[int] = UNSET - - train_tokens: OptionalNullable[int] = UNSET - - data_tokens: OptionalNullable[int] = UNSET - - estimated_start_time: OptionalNullable[int] = UNSET - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = [ - "expected_duration_seconds", - "cost", - "cost_currency", - "train_tokens_per_step", - "train_tokens", - "data_tokens", - "estimated_start_time", - ] - nullable_fields = [ - "expected_duration_seconds", - "cost", - "cost_currency", - "train_tokens_per_step", - "train_tokens", - "data_tokens", - "estimated_start_time", - ] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/jobs_api_routes_batch_cancel_batch_jobop.py b/src/mistralai/models/jobs_api_routes_batch_cancel_batch_jobop.py deleted file mode 100644 index 5b83d534..00000000 --- a/src/mistralai/models/jobs_api_routes_batch_cancel_batch_jobop.py +++ /dev/null @@ -1,16 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel -from mistralai.utils import FieldMetadata, PathParamMetadata -from typing_extensions import Annotated, TypedDict - - -class JobsAPIRoutesBatchCancelBatchJobRequestTypedDict(TypedDict): - job_id: str - - -class JobsAPIRoutesBatchCancelBatchJobRequest(BaseModel): - job_id: Annotated[ - str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) - ] diff --git a/src/mistralai/models/jobs_api_routes_batch_get_batch_jobop.py b/src/mistralai/models/jobs_api_routes_batch_get_batch_jobop.py deleted file mode 100644 index 9bfaf9c5..00000000 --- a/src/mistralai/models/jobs_api_routes_batch_get_batch_jobop.py +++ /dev/null @@ -1,53 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from mistralai.utils import FieldMetadata, PathParamMetadata, QueryParamMetadata -from pydantic import model_serializer -from typing_extensions import Annotated, NotRequired, TypedDict - - -class JobsAPIRoutesBatchGetBatchJobRequestTypedDict(TypedDict): - job_id: str - inline: NotRequired[Nullable[bool]] - - -class JobsAPIRoutesBatchGetBatchJobRequest(BaseModel): - job_id: Annotated[ - str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) - ] - - inline: Annotated[ - OptionalNullable[bool], - FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), - ] = UNSET - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = ["inline"] - nullable_fields = ["inline"] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/jobs_api_routes_batch_get_batch_jobsop.py b/src/mistralai/models/jobs_api_routes_batch_get_batch_jobsop.py deleted file mode 100644 index c48246d5..00000000 --- a/src/mistralai/models/jobs_api_routes_batch_get_batch_jobsop.py +++ /dev/null @@ -1,102 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .batchjobstatus import BatchJobStatus -from datetime import datetime -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from mistralai.utils import FieldMetadata, QueryParamMetadata -from pydantic import model_serializer -from typing import Any, Dict, List, Optional -from typing_extensions import Annotated, NotRequired, TypedDict - - -class JobsAPIRoutesBatchGetBatchJobsRequestTypedDict(TypedDict): - page: NotRequired[int] - page_size: NotRequired[int] - model: NotRequired[Nullable[str]] - agent_id: NotRequired[Nullable[str]] - metadata: NotRequired[Nullable[Dict[str, Any]]] - created_after: NotRequired[Nullable[datetime]] - created_by_me: NotRequired[bool] - status: NotRequired[Nullable[List[BatchJobStatus]]] - - -class JobsAPIRoutesBatchGetBatchJobsRequest(BaseModel): - page: Annotated[ - Optional[int], - FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), - ] = 0 - - page_size: Annotated[ - Optional[int], - FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), - ] = 100 - - model: Annotated[ - OptionalNullable[str], - FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), - ] = UNSET - - agent_id: Annotated[ - OptionalNullable[str], - FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), - ] = UNSET - - metadata: Annotated[ - OptionalNullable[Dict[str, Any]], - FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), - ] = UNSET - - created_after: Annotated[ - OptionalNullable[datetime], - FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), - ] = UNSET - - created_by_me: Annotated[ - Optional[bool], - FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), - ] = False - - status: Annotated[ - OptionalNullable[List[BatchJobStatus]], - FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), - ] = UNSET - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = [ - "page", - "page_size", - "model", - "agent_id", - "metadata", - "created_after", - "created_by_me", - "status", - ] - nullable_fields = ["model", "agent_id", "metadata", "created_after", "status"] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/jobs_api_routes_fine_tuning_archive_fine_tuned_modelop.py b/src/mistralai/models/jobs_api_routes_fine_tuning_archive_fine_tuned_modelop.py deleted file mode 100644 index d728efd1..00000000 --- a/src/mistralai/models/jobs_api_routes_fine_tuning_archive_fine_tuned_modelop.py +++ /dev/null @@ -1,18 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel -from mistralai.utils import FieldMetadata, PathParamMetadata -from typing_extensions import Annotated, TypedDict - - -class JobsAPIRoutesFineTuningArchiveFineTunedModelRequestTypedDict(TypedDict): - model_id: str - r"""The ID of the model to archive.""" - - -class JobsAPIRoutesFineTuningArchiveFineTunedModelRequest(BaseModel): - model_id: Annotated[ - str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) - ] - r"""The ID of the model to archive.""" diff --git a/src/mistralai/models/jobs_api_routes_fine_tuning_cancel_fine_tuning_jobop.py b/src/mistralai/models/jobs_api_routes_fine_tuning_cancel_fine_tuning_jobop.py deleted file mode 100644 index ceb19a69..00000000 --- a/src/mistralai/models/jobs_api_routes_fine_tuning_cancel_fine_tuning_jobop.py +++ /dev/null @@ -1,45 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .classifierdetailedjobout import ( - ClassifierDetailedJobOut, - ClassifierDetailedJobOutTypedDict, -) -from .completiondetailedjobout import ( - CompletionDetailedJobOut, - CompletionDetailedJobOutTypedDict, -) -from mistralai.types import BaseModel -from mistralai.utils import FieldMetadata, PathParamMetadata, get_discriminator -from pydantic import Discriminator, Tag -from typing import Union -from typing_extensions import Annotated, TypeAliasType, TypedDict - - -class JobsAPIRoutesFineTuningCancelFineTuningJobRequestTypedDict(TypedDict): - job_id: str - r"""The ID of the job to cancel.""" - - -class JobsAPIRoutesFineTuningCancelFineTuningJobRequest(BaseModel): - job_id: Annotated[ - str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) - ] - r"""The ID of the job to cancel.""" - - -JobsAPIRoutesFineTuningCancelFineTuningJobResponseTypedDict = TypeAliasType( - "JobsAPIRoutesFineTuningCancelFineTuningJobResponseTypedDict", - Union[CompletionDetailedJobOutTypedDict, ClassifierDetailedJobOutTypedDict], -) -r"""OK""" - - -JobsAPIRoutesFineTuningCancelFineTuningJobResponse = Annotated[ - Union[ - Annotated[ClassifierDetailedJobOut, Tag("classifier")], - Annotated[CompletionDetailedJobOut, Tag("completion")], - ], - Discriminator(lambda m: get_discriminator(m, "job_type", "job_type")), -] -r"""OK""" diff --git a/src/mistralai/models/jobs_api_routes_fine_tuning_create_fine_tuning_jobop.py b/src/mistralai/models/jobs_api_routes_fine_tuning_create_fine_tuning_jobop.py deleted file mode 100644 index 39af3ea6..00000000 --- a/src/mistralai/models/jobs_api_routes_fine_tuning_create_fine_tuning_jobop.py +++ /dev/null @@ -1,38 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .classifierjobout import ClassifierJobOut, ClassifierJobOutTypedDict -from .completionjobout import CompletionJobOut, CompletionJobOutTypedDict -from .legacyjobmetadataout import LegacyJobMetadataOut, LegacyJobMetadataOutTypedDict -from mistralai.utils import get_discriminator -from pydantic import Discriminator, Tag -from typing import Union -from typing_extensions import Annotated, TypeAliasType - - -Response1TypedDict = TypeAliasType( - "Response1TypedDict", Union[ClassifierJobOutTypedDict, CompletionJobOutTypedDict] -) - - -Response1 = Annotated[ - Union[ - Annotated[ClassifierJobOut, Tag("classifier")], - Annotated[CompletionJobOut, Tag("completion")], - ], - Discriminator(lambda m: get_discriminator(m, "job_type", "job_type")), -] - - -JobsAPIRoutesFineTuningCreateFineTuningJobResponseTypedDict = TypeAliasType( - "JobsAPIRoutesFineTuningCreateFineTuningJobResponseTypedDict", - Union[LegacyJobMetadataOutTypedDict, Response1TypedDict], -) -r"""OK""" - - -JobsAPIRoutesFineTuningCreateFineTuningJobResponse = TypeAliasType( - "JobsAPIRoutesFineTuningCreateFineTuningJobResponse", - Union[LegacyJobMetadataOut, Response1], -) -r"""OK""" diff --git a/src/mistralai/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobop.py b/src/mistralai/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobop.py deleted file mode 100644 index be99dd2d..00000000 --- a/src/mistralai/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobop.py +++ /dev/null @@ -1,45 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .classifierdetailedjobout import ( - ClassifierDetailedJobOut, - ClassifierDetailedJobOutTypedDict, -) -from .completiondetailedjobout import ( - CompletionDetailedJobOut, - CompletionDetailedJobOutTypedDict, -) -from mistralai.types import BaseModel -from mistralai.utils import FieldMetadata, PathParamMetadata, get_discriminator -from pydantic import Discriminator, Tag -from typing import Union -from typing_extensions import Annotated, TypeAliasType, TypedDict - - -class JobsAPIRoutesFineTuningGetFineTuningJobRequestTypedDict(TypedDict): - job_id: str - r"""The ID of the job to analyse.""" - - -class JobsAPIRoutesFineTuningGetFineTuningJobRequest(BaseModel): - job_id: Annotated[ - str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) - ] - r"""The ID of the job to analyse.""" - - -JobsAPIRoutesFineTuningGetFineTuningJobResponseTypedDict = TypeAliasType( - "JobsAPIRoutesFineTuningGetFineTuningJobResponseTypedDict", - Union[CompletionDetailedJobOutTypedDict, ClassifierDetailedJobOutTypedDict], -) -r"""OK""" - - -JobsAPIRoutesFineTuningGetFineTuningJobResponse = Annotated[ - Union[ - Annotated[ClassifierDetailedJobOut, Tag("classifier")], - Annotated[CompletionDetailedJobOut, Tag("completion")], - ], - Discriminator(lambda m: get_discriminator(m, "job_type", "job_type")), -] -r"""OK""" diff --git a/src/mistralai/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobsop.py b/src/mistralai/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobsop.py deleted file mode 100644 index 9aec8eb2..00000000 --- a/src/mistralai/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobsop.py +++ /dev/null @@ -1,156 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from datetime import datetime -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from mistralai.utils import FieldMetadata, QueryParamMetadata -from pydantic import model_serializer -from typing import Literal, Optional -from typing_extensions import Annotated, NotRequired, TypedDict - - -QueryParamStatus = Literal[ - "QUEUED", - "STARTED", - "VALIDATING", - "VALIDATED", - "RUNNING", - "FAILED_VALIDATION", - "FAILED", - "SUCCESS", - "CANCELLED", - "CANCELLATION_REQUESTED", -] -r"""The current job state to filter on. When set, the other results are not displayed.""" - - -class JobsAPIRoutesFineTuningGetFineTuningJobsRequestTypedDict(TypedDict): - page: NotRequired[int] - r"""The page number of the results to be returned.""" - page_size: NotRequired[int] - r"""The number of items to return per page.""" - model: NotRequired[Nullable[str]] - r"""The model name used for fine-tuning to filter on. When set, the other results are not displayed.""" - created_after: NotRequired[Nullable[datetime]] - r"""The date/time to filter on. When set, the results for previous creation times are not displayed.""" - created_before: NotRequired[Nullable[datetime]] - created_by_me: NotRequired[bool] - r"""When set, only return results for jobs created by the API caller. Other results are not displayed.""" - status: NotRequired[Nullable[QueryParamStatus]] - r"""The current job state to filter on. When set, the other results are not displayed.""" - wandb_project: NotRequired[Nullable[str]] - r"""The Weights and Biases project to filter on. When set, the other results are not displayed.""" - wandb_name: NotRequired[Nullable[str]] - r"""The Weight and Biases run name to filter on. When set, the other results are not displayed.""" - suffix: NotRequired[Nullable[str]] - r"""The model suffix to filter on. When set, the other results are not displayed.""" - - -class JobsAPIRoutesFineTuningGetFineTuningJobsRequest(BaseModel): - page: Annotated[ - Optional[int], - FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), - ] = 0 - r"""The page number of the results to be returned.""" - - page_size: Annotated[ - Optional[int], - FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), - ] = 100 - r"""The number of items to return per page.""" - - model: Annotated[ - OptionalNullable[str], - FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), - ] = UNSET - r"""The model name used for fine-tuning to filter on. When set, the other results are not displayed.""" - - created_after: Annotated[ - OptionalNullable[datetime], - FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), - ] = UNSET - r"""The date/time to filter on. When set, the results for previous creation times are not displayed.""" - - created_before: Annotated[ - OptionalNullable[datetime], - FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), - ] = UNSET - - created_by_me: Annotated[ - Optional[bool], - FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), - ] = False - r"""When set, only return results for jobs created by the API caller. Other results are not displayed.""" - - status: Annotated[ - OptionalNullable[QueryParamStatus], - FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), - ] = UNSET - r"""The current job state to filter on. When set, the other results are not displayed.""" - - wandb_project: Annotated[ - OptionalNullable[str], - FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), - ] = UNSET - r"""The Weights and Biases project to filter on. When set, the other results are not displayed.""" - - wandb_name: Annotated[ - OptionalNullable[str], - FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), - ] = UNSET - r"""The Weight and Biases run name to filter on. When set, the other results are not displayed.""" - - suffix: Annotated[ - OptionalNullable[str], - FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), - ] = UNSET - r"""The model suffix to filter on. When set, the other results are not displayed.""" - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = [ - "page", - "page_size", - "model", - "created_after", - "created_before", - "created_by_me", - "status", - "wandb_project", - "wandb_name", - "suffix", - ] - nullable_fields = [ - "model", - "created_after", - "created_before", - "status", - "wandb_project", - "wandb_name", - "suffix", - ] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/jobs_api_routes_fine_tuning_start_fine_tuning_jobop.py b/src/mistralai/models/jobs_api_routes_fine_tuning_start_fine_tuning_jobop.py deleted file mode 100644 index 8103b67b..00000000 --- a/src/mistralai/models/jobs_api_routes_fine_tuning_start_fine_tuning_jobop.py +++ /dev/null @@ -1,43 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .classifierdetailedjobout import ( - ClassifierDetailedJobOut, - ClassifierDetailedJobOutTypedDict, -) -from .completiondetailedjobout import ( - CompletionDetailedJobOut, - CompletionDetailedJobOutTypedDict, -) -from mistralai.types import BaseModel -from mistralai.utils import FieldMetadata, PathParamMetadata, get_discriminator -from pydantic import Discriminator, Tag -from typing import Union -from typing_extensions import Annotated, TypeAliasType, TypedDict - - -class JobsAPIRoutesFineTuningStartFineTuningJobRequestTypedDict(TypedDict): - job_id: str - - -class JobsAPIRoutesFineTuningStartFineTuningJobRequest(BaseModel): - job_id: Annotated[ - str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) - ] - - -JobsAPIRoutesFineTuningStartFineTuningJobResponseTypedDict = TypeAliasType( - "JobsAPIRoutesFineTuningStartFineTuningJobResponseTypedDict", - Union[CompletionDetailedJobOutTypedDict, ClassifierDetailedJobOutTypedDict], -) -r"""OK""" - - -JobsAPIRoutesFineTuningStartFineTuningJobResponse = Annotated[ - Union[ - Annotated[ClassifierDetailedJobOut, Tag("classifier")], - Annotated[CompletionDetailedJobOut, Tag("completion")], - ], - Discriminator(lambda m: get_discriminator(m, "job_type", "job_type")), -] -r"""OK""" diff --git a/src/mistralai/models/jobs_api_routes_fine_tuning_unarchive_fine_tuned_modelop.py b/src/mistralai/models/jobs_api_routes_fine_tuning_unarchive_fine_tuned_modelop.py deleted file mode 100644 index a84274ff..00000000 --- a/src/mistralai/models/jobs_api_routes_fine_tuning_unarchive_fine_tuned_modelop.py +++ /dev/null @@ -1,18 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel -from mistralai.utils import FieldMetadata, PathParamMetadata -from typing_extensions import Annotated, TypedDict - - -class JobsAPIRoutesFineTuningUnarchiveFineTunedModelRequestTypedDict(TypedDict): - model_id: str - r"""The ID of the model to unarchive.""" - - -class JobsAPIRoutesFineTuningUnarchiveFineTunedModelRequest(BaseModel): - model_id: Annotated[ - str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) - ] - r"""The ID of the model to unarchive.""" diff --git a/src/mistralai/models/jobs_api_routes_fine_tuning_update_fine_tuned_modelop.py b/src/mistralai/models/jobs_api_routes_fine_tuning_update_fine_tuned_modelop.py deleted file mode 100644 index a10528ca..00000000 --- a/src/mistralai/models/jobs_api_routes_fine_tuning_update_fine_tuned_modelop.py +++ /dev/null @@ -1,51 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .classifierftmodelout import ClassifierFTModelOut, ClassifierFTModelOutTypedDict -from .completionftmodelout import CompletionFTModelOut, CompletionFTModelOutTypedDict -from .updateftmodelin import UpdateFTModelIn, UpdateFTModelInTypedDict -from mistralai.types import BaseModel -from mistralai.utils import ( - FieldMetadata, - PathParamMetadata, - RequestMetadata, - get_discriminator, -) -from pydantic import Discriminator, Tag -from typing import Union -from typing_extensions import Annotated, TypeAliasType, TypedDict - - -class JobsAPIRoutesFineTuningUpdateFineTunedModelRequestTypedDict(TypedDict): - model_id: str - r"""The ID of the model to update.""" - update_ft_model_in: UpdateFTModelInTypedDict - - -class JobsAPIRoutesFineTuningUpdateFineTunedModelRequest(BaseModel): - model_id: Annotated[ - str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) - ] - r"""The ID of the model to update.""" - - update_ft_model_in: Annotated[ - UpdateFTModelIn, - FieldMetadata(request=RequestMetadata(media_type="application/json")), - ] - - -JobsAPIRoutesFineTuningUpdateFineTunedModelResponseTypedDict = TypeAliasType( - "JobsAPIRoutesFineTuningUpdateFineTunedModelResponseTypedDict", - Union[CompletionFTModelOutTypedDict, ClassifierFTModelOutTypedDict], -) -r"""OK""" - - -JobsAPIRoutesFineTuningUpdateFineTunedModelResponse = Annotated[ - Union[ - Annotated[ClassifierFTModelOut, Tag("classifier")], - Annotated[CompletionFTModelOut, Tag("completion")], - ], - Discriminator(lambda m: get_discriminator(m, "model_type", "model_type")), -] -r"""OK""" diff --git a/src/mistralai/models/jobsout.py b/src/mistralai/models/jobsout.py deleted file mode 100644 index 680b1d58..00000000 --- a/src/mistralai/models/jobsout.py +++ /dev/null @@ -1,41 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .classifierjobout import ClassifierJobOut, ClassifierJobOutTypedDict -from .completionjobout import CompletionJobOut, CompletionJobOutTypedDict -from mistralai.types import BaseModel -from mistralai.utils import get_discriminator -from pydantic import Discriminator, Tag -from typing import List, Literal, Optional, Union -from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict - - -JobsOutDataTypedDict = TypeAliasType( - "JobsOutDataTypedDict", Union[ClassifierJobOutTypedDict, CompletionJobOutTypedDict] -) - - -JobsOutData = Annotated[ - Union[ - Annotated[ClassifierJobOut, Tag("classifier")], - Annotated[CompletionJobOut, Tag("completion")], - ], - Discriminator(lambda m: get_discriminator(m, "job_type", "job_type")), -] - - -JobsOutObject = Literal["list",] - - -class JobsOutTypedDict(TypedDict): - total: int - data: NotRequired[List[JobsOutDataTypedDict]] - object: NotRequired[JobsOutObject] - - -class JobsOut(BaseModel): - total: int - - data: Optional[List[JobsOutData]] = None - - object: Optional[JobsOutObject] = "list" diff --git a/src/mistralai/models/jsonschema.py b/src/mistralai/models/jsonschema.py deleted file mode 100644 index e2b6a45e..00000000 --- a/src/mistralai/models/jsonschema.py +++ /dev/null @@ -1,55 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -import pydantic -from pydantic import model_serializer -from typing import Any, Dict, Optional -from typing_extensions import Annotated, NotRequired, TypedDict - - -class JSONSchemaTypedDict(TypedDict): - name: str - schema_definition: Dict[str, Any] - description: NotRequired[Nullable[str]] - strict: NotRequired[bool] - - -class JSONSchema(BaseModel): - name: str - - schema_definition: Annotated[Dict[str, Any], pydantic.Field(alias="schema")] - - description: OptionalNullable[str] = UNSET - - strict: Optional[bool] = None - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = ["description", "strict"] - nullable_fields = ["description"] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/legacyjobmetadataout.py b/src/mistralai/models/legacyjobmetadataout.py deleted file mode 100644 index 49951219..00000000 --- a/src/mistralai/models/legacyjobmetadataout.py +++ /dev/null @@ -1,119 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from pydantic import model_serializer -from typing import Literal, Optional -from typing_extensions import NotRequired, TypedDict - - -LegacyJobMetadataOutObject = Literal["job.metadata",] - - -class LegacyJobMetadataOutTypedDict(TypedDict): - details: str - expected_duration_seconds: NotRequired[Nullable[int]] - r"""The approximated time (in seconds) for the fine-tuning process to complete.""" - cost: NotRequired[Nullable[float]] - r"""The cost of the fine-tuning job.""" - cost_currency: NotRequired[Nullable[str]] - r"""The currency used for the fine-tuning job cost.""" - train_tokens_per_step: NotRequired[Nullable[int]] - r"""The number of tokens consumed by one training step.""" - train_tokens: NotRequired[Nullable[int]] - r"""The total number of tokens used during the fine-tuning process.""" - data_tokens: NotRequired[Nullable[int]] - r"""The total number of tokens in the training dataset.""" - estimated_start_time: NotRequired[Nullable[int]] - deprecated: NotRequired[bool] - epochs: NotRequired[Nullable[float]] - r"""The number of complete passes through the entire training dataset.""" - training_steps: NotRequired[Nullable[int]] - r"""The number of training steps to perform. A training step refers to a single update of the model weights during the fine-tuning process. This update is typically calculated using a batch of samples from the training dataset.""" - object: NotRequired[LegacyJobMetadataOutObject] - - -class LegacyJobMetadataOut(BaseModel): - details: str - - expected_duration_seconds: OptionalNullable[int] = UNSET - r"""The approximated time (in seconds) for the fine-tuning process to complete.""" - - cost: OptionalNullable[float] = UNSET - r"""The cost of the fine-tuning job.""" - - cost_currency: OptionalNullable[str] = UNSET - r"""The currency used for the fine-tuning job cost.""" - - train_tokens_per_step: OptionalNullable[int] = UNSET - r"""The number of tokens consumed by one training step.""" - - train_tokens: OptionalNullable[int] = UNSET - r"""The total number of tokens used during the fine-tuning process.""" - - data_tokens: OptionalNullable[int] = UNSET - r"""The total number of tokens in the training dataset.""" - - estimated_start_time: OptionalNullable[int] = UNSET - - deprecated: Optional[bool] = True - - epochs: OptionalNullable[float] = UNSET - r"""The number of complete passes through the entire training dataset.""" - - training_steps: OptionalNullable[int] = UNSET - r"""The number of training steps to perform. A training step refers to a single update of the model weights during the fine-tuning process. This update is typically calculated using a batch of samples from the training dataset.""" - - object: Optional[LegacyJobMetadataOutObject] = "job.metadata" - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = [ - "expected_duration_seconds", - "cost", - "cost_currency", - "train_tokens_per_step", - "train_tokens", - "data_tokens", - "estimated_start_time", - "deprecated", - "epochs", - "training_steps", - "object", - ] - nullable_fields = [ - "expected_duration_seconds", - "cost", - "cost_currency", - "train_tokens_per_step", - "train_tokens", - "data_tokens", - "estimated_start_time", - "epochs", - "training_steps", - ] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/libraries_delete_v1op.py b/src/mistralai/models/libraries_delete_v1op.py deleted file mode 100644 index 56f8f8a8..00000000 --- a/src/mistralai/models/libraries_delete_v1op.py +++ /dev/null @@ -1,16 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel -from mistralai.utils import FieldMetadata, PathParamMetadata -from typing_extensions import Annotated, TypedDict - - -class LibrariesDeleteV1RequestTypedDict(TypedDict): - library_id: str - - -class LibrariesDeleteV1Request(BaseModel): - library_id: Annotated[ - str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) - ] diff --git a/src/mistralai/models/libraries_documents_delete_v1op.py b/src/mistralai/models/libraries_documents_delete_v1op.py deleted file mode 100644 index c33710b0..00000000 --- a/src/mistralai/models/libraries_documents_delete_v1op.py +++ /dev/null @@ -1,21 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel -from mistralai.utils import FieldMetadata, PathParamMetadata -from typing_extensions import Annotated, TypedDict - - -class LibrariesDocumentsDeleteV1RequestTypedDict(TypedDict): - library_id: str - document_id: str - - -class LibrariesDocumentsDeleteV1Request(BaseModel): - library_id: Annotated[ - str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) - ] - - document_id: Annotated[ - str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) - ] diff --git a/src/mistralai/models/libraries_documents_get_extracted_text_signed_url_v1op.py b/src/mistralai/models/libraries_documents_get_extracted_text_signed_url_v1op.py deleted file mode 100644 index e2459c1c..00000000 --- a/src/mistralai/models/libraries_documents_get_extracted_text_signed_url_v1op.py +++ /dev/null @@ -1,21 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel -from mistralai.utils import FieldMetadata, PathParamMetadata -from typing_extensions import Annotated, TypedDict - - -class LibrariesDocumentsGetExtractedTextSignedURLV1RequestTypedDict(TypedDict): - library_id: str - document_id: str - - -class LibrariesDocumentsGetExtractedTextSignedURLV1Request(BaseModel): - library_id: Annotated[ - str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) - ] - - document_id: Annotated[ - str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) - ] diff --git a/src/mistralai/models/libraries_documents_get_signed_url_v1op.py b/src/mistralai/models/libraries_documents_get_signed_url_v1op.py deleted file mode 100644 index bc913ba5..00000000 --- a/src/mistralai/models/libraries_documents_get_signed_url_v1op.py +++ /dev/null @@ -1,21 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel -from mistralai.utils import FieldMetadata, PathParamMetadata -from typing_extensions import Annotated, TypedDict - - -class LibrariesDocumentsGetSignedURLV1RequestTypedDict(TypedDict): - library_id: str - document_id: str - - -class LibrariesDocumentsGetSignedURLV1Request(BaseModel): - library_id: Annotated[ - str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) - ] - - document_id: Annotated[ - str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) - ] diff --git a/src/mistralai/models/libraries_documents_get_status_v1op.py b/src/mistralai/models/libraries_documents_get_status_v1op.py deleted file mode 100644 index 08992d7c..00000000 --- a/src/mistralai/models/libraries_documents_get_status_v1op.py +++ /dev/null @@ -1,21 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel -from mistralai.utils import FieldMetadata, PathParamMetadata -from typing_extensions import Annotated, TypedDict - - -class LibrariesDocumentsGetStatusV1RequestTypedDict(TypedDict): - library_id: str - document_id: str - - -class LibrariesDocumentsGetStatusV1Request(BaseModel): - library_id: Annotated[ - str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) - ] - - document_id: Annotated[ - str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) - ] diff --git a/src/mistralai/models/libraries_documents_get_text_content_v1op.py b/src/mistralai/models/libraries_documents_get_text_content_v1op.py deleted file mode 100644 index 21a131ad..00000000 --- a/src/mistralai/models/libraries_documents_get_text_content_v1op.py +++ /dev/null @@ -1,21 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel -from mistralai.utils import FieldMetadata, PathParamMetadata -from typing_extensions import Annotated, TypedDict - - -class LibrariesDocumentsGetTextContentV1RequestTypedDict(TypedDict): - library_id: str - document_id: str - - -class LibrariesDocumentsGetTextContentV1Request(BaseModel): - library_id: Annotated[ - str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) - ] - - document_id: Annotated[ - str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) - ] diff --git a/src/mistralai/models/libraries_documents_get_v1op.py b/src/mistralai/models/libraries_documents_get_v1op.py deleted file mode 100644 index ff2bdedb..00000000 --- a/src/mistralai/models/libraries_documents_get_v1op.py +++ /dev/null @@ -1,21 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel -from mistralai.utils import FieldMetadata, PathParamMetadata -from typing_extensions import Annotated, TypedDict - - -class LibrariesDocumentsGetV1RequestTypedDict(TypedDict): - library_id: str - document_id: str - - -class LibrariesDocumentsGetV1Request(BaseModel): - library_id: Annotated[ - str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) - ] - - document_id: Annotated[ - str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) - ] diff --git a/src/mistralai/models/libraries_documents_list_v1op.py b/src/mistralai/models/libraries_documents_list_v1op.py deleted file mode 100644 index e6ff29cf..00000000 --- a/src/mistralai/models/libraries_documents_list_v1op.py +++ /dev/null @@ -1,91 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from mistralai.utils import FieldMetadata, PathParamMetadata, QueryParamMetadata -from pydantic import model_serializer -from typing import Optional -from typing_extensions import Annotated, NotRequired, TypedDict - - -class LibrariesDocumentsListV1RequestTypedDict(TypedDict): - library_id: str - search: NotRequired[Nullable[str]] - page_size: NotRequired[int] - page: NotRequired[int] - filters_attributes: NotRequired[Nullable[str]] - sort_by: NotRequired[str] - sort_order: NotRequired[str] - - -class LibrariesDocumentsListV1Request(BaseModel): - library_id: Annotated[ - str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) - ] - - search: Annotated[ - OptionalNullable[str], - FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), - ] = UNSET - - page_size: Annotated[ - Optional[int], - FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), - ] = 100 - - page: Annotated[ - Optional[int], - FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), - ] = 0 - - filters_attributes: Annotated[ - OptionalNullable[str], - FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), - ] = UNSET - - sort_by: Annotated[ - Optional[str], - FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), - ] = "created_at" - - sort_order: Annotated[ - Optional[str], - FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), - ] = "desc" - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = [ - "search", - "page_size", - "page", - "filters_attributes", - "sort_by", - "sort_order", - ] - nullable_fields = ["search", "filters_attributes"] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/libraries_documents_reprocess_v1op.py b/src/mistralai/models/libraries_documents_reprocess_v1op.py deleted file mode 100644 index 861993e7..00000000 --- a/src/mistralai/models/libraries_documents_reprocess_v1op.py +++ /dev/null @@ -1,21 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel -from mistralai.utils import FieldMetadata, PathParamMetadata -from typing_extensions import Annotated, TypedDict - - -class LibrariesDocumentsReprocessV1RequestTypedDict(TypedDict): - library_id: str - document_id: str - - -class LibrariesDocumentsReprocessV1Request(BaseModel): - library_id: Annotated[ - str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) - ] - - document_id: Annotated[ - str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) - ] diff --git a/src/mistralai/models/libraries_documents_update_v1op.py b/src/mistralai/models/libraries_documents_update_v1op.py deleted file mode 100644 index 5551d5ee..00000000 --- a/src/mistralai/models/libraries_documents_update_v1op.py +++ /dev/null @@ -1,28 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .documentupdatein import DocumentUpdateIn, DocumentUpdateInTypedDict -from mistralai.types import BaseModel -from mistralai.utils import FieldMetadata, PathParamMetadata, RequestMetadata -from typing_extensions import Annotated, TypedDict - - -class LibrariesDocumentsUpdateV1RequestTypedDict(TypedDict): - library_id: str - document_id: str - document_update_in: DocumentUpdateInTypedDict - - -class LibrariesDocumentsUpdateV1Request(BaseModel): - library_id: Annotated[ - str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) - ] - - document_id: Annotated[ - str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) - ] - - document_update_in: Annotated[ - DocumentUpdateIn, - FieldMetadata(request=RequestMetadata(media_type="application/json")), - ] diff --git a/src/mistralai/models/libraries_documents_upload_v1op.py b/src/mistralai/models/libraries_documents_upload_v1op.py deleted file mode 100644 index 51f536cc..00000000 --- a/src/mistralai/models/libraries_documents_upload_v1op.py +++ /dev/null @@ -1,56 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .file import File, FileTypedDict -from mistralai.types import BaseModel -from mistralai.utils import ( - FieldMetadata, - MultipartFormMetadata, - PathParamMetadata, - RequestMetadata, -) -from typing_extensions import Annotated, TypedDict - - -class LibrariesDocumentsUploadV1DocumentUploadTypedDict(TypedDict): - file: FileTypedDict - r"""The File object (not file name) to be uploaded. - To upload a file and specify a custom file name you should format your request as such: - ```bash - file=@path/to/your/file.jsonl;filename=custom_name.jsonl - ``` - Otherwise, you can just keep the original file name: - ```bash - file=@path/to/your/file.jsonl - ``` - """ - - -class LibrariesDocumentsUploadV1DocumentUpload(BaseModel): - file: Annotated[File, FieldMetadata(multipart=MultipartFormMetadata(file=True))] - r"""The File object (not file name) to be uploaded. - To upload a file and specify a custom file name you should format your request as such: - ```bash - file=@path/to/your/file.jsonl;filename=custom_name.jsonl - ``` - Otherwise, you can just keep the original file name: - ```bash - file=@path/to/your/file.jsonl - ``` - """ - - -class LibrariesDocumentsUploadV1RequestTypedDict(TypedDict): - library_id: str - request_body: LibrariesDocumentsUploadV1DocumentUploadTypedDict - - -class LibrariesDocumentsUploadV1Request(BaseModel): - library_id: Annotated[ - str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) - ] - - request_body: Annotated[ - LibrariesDocumentsUploadV1DocumentUpload, - FieldMetadata(request=RequestMetadata(media_type="multipart/form-data")), - ] diff --git a/src/mistralai/models/libraries_get_v1op.py b/src/mistralai/models/libraries_get_v1op.py deleted file mode 100644 index b87090f6..00000000 --- a/src/mistralai/models/libraries_get_v1op.py +++ /dev/null @@ -1,16 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel -from mistralai.utils import FieldMetadata, PathParamMetadata -from typing_extensions import Annotated, TypedDict - - -class LibrariesGetV1RequestTypedDict(TypedDict): - library_id: str - - -class LibrariesGetV1Request(BaseModel): - library_id: Annotated[ - str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) - ] diff --git a/src/mistralai/models/libraries_share_create_v1op.py b/src/mistralai/models/libraries_share_create_v1op.py deleted file mode 100644 index a8b0e35d..00000000 --- a/src/mistralai/models/libraries_share_create_v1op.py +++ /dev/null @@ -1,22 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .sharingin import SharingIn, SharingInTypedDict -from mistralai.types import BaseModel -from mistralai.utils import FieldMetadata, PathParamMetadata, RequestMetadata -from typing_extensions import Annotated, TypedDict - - -class LibrariesShareCreateV1RequestTypedDict(TypedDict): - library_id: str - sharing_in: SharingInTypedDict - - -class LibrariesShareCreateV1Request(BaseModel): - library_id: Annotated[ - str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) - ] - - sharing_in: Annotated[ - SharingIn, FieldMetadata(request=RequestMetadata(media_type="application/json")) - ] diff --git a/src/mistralai/models/libraries_share_delete_v1op.py b/src/mistralai/models/libraries_share_delete_v1op.py deleted file mode 100644 index e29d556a..00000000 --- a/src/mistralai/models/libraries_share_delete_v1op.py +++ /dev/null @@ -1,23 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .sharingdelete import SharingDelete, SharingDeleteTypedDict -from mistralai.types import BaseModel -from mistralai.utils import FieldMetadata, PathParamMetadata, RequestMetadata -from typing_extensions import Annotated, TypedDict - - -class LibrariesShareDeleteV1RequestTypedDict(TypedDict): - library_id: str - sharing_delete: SharingDeleteTypedDict - - -class LibrariesShareDeleteV1Request(BaseModel): - library_id: Annotated[ - str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) - ] - - sharing_delete: Annotated[ - SharingDelete, - FieldMetadata(request=RequestMetadata(media_type="application/json")), - ] diff --git a/src/mistralai/models/libraries_share_list_v1op.py b/src/mistralai/models/libraries_share_list_v1op.py deleted file mode 100644 index b276d756..00000000 --- a/src/mistralai/models/libraries_share_list_v1op.py +++ /dev/null @@ -1,16 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel -from mistralai.utils import FieldMetadata, PathParamMetadata -from typing_extensions import Annotated, TypedDict - - -class LibrariesShareListV1RequestTypedDict(TypedDict): - library_id: str - - -class LibrariesShareListV1Request(BaseModel): - library_id: Annotated[ - str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) - ] diff --git a/src/mistralai/models/libraries_update_v1op.py b/src/mistralai/models/libraries_update_v1op.py deleted file mode 100644 index c93895d9..00000000 --- a/src/mistralai/models/libraries_update_v1op.py +++ /dev/null @@ -1,23 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .libraryinupdate import LibraryInUpdate, LibraryInUpdateTypedDict -from mistralai.types import BaseModel -from mistralai.utils import FieldMetadata, PathParamMetadata, RequestMetadata -from typing_extensions import Annotated, TypedDict - - -class LibrariesUpdateV1RequestTypedDict(TypedDict): - library_id: str - library_in_update: LibraryInUpdateTypedDict - - -class LibrariesUpdateV1Request(BaseModel): - library_id: Annotated[ - str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) - ] - - library_in_update: Annotated[ - LibraryInUpdate, - FieldMetadata(request=RequestMetadata(media_type="application/json")), - ] diff --git a/src/mistralai/models/libraryin.py b/src/mistralai/models/libraryin.py deleted file mode 100644 index 872d494d..00000000 --- a/src/mistralai/models/libraryin.py +++ /dev/null @@ -1,50 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from pydantic import model_serializer -from typing_extensions import NotRequired, TypedDict - - -class LibraryInTypedDict(TypedDict): - name: str - description: NotRequired[Nullable[str]] - chunk_size: NotRequired[Nullable[int]] - - -class LibraryIn(BaseModel): - name: str - - description: OptionalNullable[str] = UNSET - - chunk_size: OptionalNullable[int] = UNSET - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = ["description", "chunk_size"] - nullable_fields = ["description", "chunk_size"] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/libraryinupdate.py b/src/mistralai/models/libraryinupdate.py deleted file mode 100644 index 6e8ab81a..00000000 --- a/src/mistralai/models/libraryinupdate.py +++ /dev/null @@ -1,47 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from pydantic import model_serializer -from typing_extensions import NotRequired, TypedDict - - -class LibraryInUpdateTypedDict(TypedDict): - name: NotRequired[Nullable[str]] - description: NotRequired[Nullable[str]] - - -class LibraryInUpdate(BaseModel): - name: OptionalNullable[str] = UNSET - - description: OptionalNullable[str] = UNSET - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = ["name", "description"] - nullable_fields = ["name", "description"] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/libraryout.py b/src/mistralai/models/libraryout.py deleted file mode 100644 index d3bc36f9..00000000 --- a/src/mistralai/models/libraryout.py +++ /dev/null @@ -1,110 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from datetime import datetime -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from pydantic import model_serializer -from typing_extensions import NotRequired, TypedDict - - -class LibraryOutTypedDict(TypedDict): - id: str - name: str - created_at: datetime - updated_at: datetime - owner_id: Nullable[str] - owner_type: str - total_size: int - nb_documents: int - chunk_size: Nullable[int] - emoji: NotRequired[Nullable[str]] - description: NotRequired[Nullable[str]] - generated_description: NotRequired[Nullable[str]] - explicit_user_members_count: NotRequired[Nullable[int]] - explicit_workspace_members_count: NotRequired[Nullable[int]] - org_sharing_role: NotRequired[Nullable[str]] - generated_name: NotRequired[Nullable[str]] - r"""Generated Name""" - - -class LibraryOut(BaseModel): - id: str - - name: str - - created_at: datetime - - updated_at: datetime - - owner_id: Nullable[str] - - owner_type: str - - total_size: int - - nb_documents: int - - chunk_size: Nullable[int] - - emoji: OptionalNullable[str] = UNSET - - description: OptionalNullable[str] = UNSET - - generated_description: OptionalNullable[str] = UNSET - - explicit_user_members_count: OptionalNullable[int] = UNSET - - explicit_workspace_members_count: OptionalNullable[int] = UNSET - - org_sharing_role: OptionalNullable[str] = UNSET - - generated_name: OptionalNullable[str] = UNSET - r"""Generated Name""" - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = [ - "emoji", - "description", - "generated_description", - "explicit_user_members_count", - "explicit_workspace_members_count", - "org_sharing_role", - "generated_name", - ] - nullable_fields = [ - "owner_id", - "chunk_size", - "emoji", - "description", - "generated_description", - "explicit_user_members_count", - "explicit_workspace_members_count", - "org_sharing_role", - "generated_name", - ] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/listdocumentout.py b/src/mistralai/models/listdocumentout.py deleted file mode 100644 index 9d39e087..00000000 --- a/src/mistralai/models/listdocumentout.py +++ /dev/null @@ -1,19 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .documentout import DocumentOut, DocumentOutTypedDict -from .paginationinfo import PaginationInfo, PaginationInfoTypedDict -from mistralai.types import BaseModel -from typing import List -from typing_extensions import TypedDict - - -class ListDocumentOutTypedDict(TypedDict): - pagination: PaginationInfoTypedDict - data: List[DocumentOutTypedDict] - - -class ListDocumentOut(BaseModel): - pagination: PaginationInfo - - data: List[DocumentOut] diff --git a/src/mistralai/models/listfilesout.py b/src/mistralai/models/listfilesout.py deleted file mode 100644 index 2f82b37d..00000000 --- a/src/mistralai/models/listfilesout.py +++ /dev/null @@ -1,52 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .fileschema import FileSchema, FileSchemaTypedDict -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from pydantic import model_serializer -from typing import List -from typing_extensions import NotRequired, TypedDict - - -class ListFilesOutTypedDict(TypedDict): - data: List[FileSchemaTypedDict] - object: str - total: NotRequired[Nullable[int]] - - -class ListFilesOut(BaseModel): - data: List[FileSchema] - - object: str - - total: OptionalNullable[int] = UNSET - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = ["total"] - nullable_fields = ["total"] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/listlibraryout.py b/src/mistralai/models/listlibraryout.py deleted file mode 100644 index 1e647fe1..00000000 --- a/src/mistralai/models/listlibraryout.py +++ /dev/null @@ -1,15 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .libraryout import LibraryOut, LibraryOutTypedDict -from mistralai.types import BaseModel -from typing import List -from typing_extensions import TypedDict - - -class ListLibraryOutTypedDict(TypedDict): - data: List[LibraryOutTypedDict] - - -class ListLibraryOut(BaseModel): - data: List[LibraryOut] diff --git a/src/mistralai/models/listsharingout.py b/src/mistralai/models/listsharingout.py deleted file mode 100644 index 38c0dbe0..00000000 --- a/src/mistralai/models/listsharingout.py +++ /dev/null @@ -1,15 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .sharingout import SharingOut, SharingOutTypedDict -from mistralai.types import BaseModel -from typing import List -from typing_extensions import TypedDict - - -class ListSharingOutTypedDict(TypedDict): - data: List[SharingOutTypedDict] - - -class ListSharingOut(BaseModel): - data: List[SharingOut] diff --git a/src/mistralai/models/messageentries.py b/src/mistralai/models/messageentries.py deleted file mode 100644 index 9b1706de..00000000 --- a/src/mistralai/models/messageentries.py +++ /dev/null @@ -1,18 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .messageinputentry import MessageInputEntry, MessageInputEntryTypedDict -from .messageoutputentry import MessageOutputEntry, MessageOutputEntryTypedDict -from typing import Union -from typing_extensions import TypeAliasType - - -MessageEntriesTypedDict = TypeAliasType( - "MessageEntriesTypedDict", - Union[MessageInputEntryTypedDict, MessageOutputEntryTypedDict], -) - - -MessageEntries = TypeAliasType( - "MessageEntries", Union[MessageInputEntry, MessageOutputEntry] -) diff --git a/src/mistralai/models/messageinputcontentchunks.py b/src/mistralai/models/messageinputcontentchunks.py deleted file mode 100644 index e90d8aa0..00000000 --- a/src/mistralai/models/messageinputcontentchunks.py +++ /dev/null @@ -1,28 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .documenturlchunk import DocumentURLChunk, DocumentURLChunkTypedDict -from .imageurlchunk import ImageURLChunk, ImageURLChunkTypedDict -from .textchunk import TextChunk, TextChunkTypedDict -from .thinkchunk import ThinkChunk, ThinkChunkTypedDict -from .toolfilechunk import ToolFileChunk, ToolFileChunkTypedDict -from typing import Union -from typing_extensions import TypeAliasType - - -MessageInputContentChunksTypedDict = TypeAliasType( - "MessageInputContentChunksTypedDict", - Union[ - TextChunkTypedDict, - ImageURLChunkTypedDict, - DocumentURLChunkTypedDict, - ThinkChunkTypedDict, - ToolFileChunkTypedDict, - ], -) - - -MessageInputContentChunks = TypeAliasType( - "MessageInputContentChunks", - Union[TextChunk, ImageURLChunk, DocumentURLChunk, ThinkChunk, ToolFileChunk], -) diff --git a/src/mistralai/models/messageinputentry.py b/src/mistralai/models/messageinputentry.py deleted file mode 100644 index edf05631..00000000 --- a/src/mistralai/models/messageinputentry.py +++ /dev/null @@ -1,105 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .messageinputcontentchunks import ( - MessageInputContentChunks, - MessageInputContentChunksTypedDict, -) -from datetime import datetime -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from pydantic import model_serializer -from typing import List, Literal, Optional, Union -from typing_extensions import NotRequired, TypeAliasType, TypedDict - - -Object = Literal["entry",] - - -MessageInputEntryType = Literal["message.input",] - - -MessageInputEntryRole = Literal[ - "assistant", - "user", -] - - -MessageInputEntryContentTypedDict = TypeAliasType( - "MessageInputEntryContentTypedDict", - Union[str, List[MessageInputContentChunksTypedDict]], -) - - -MessageInputEntryContent = TypeAliasType( - "MessageInputEntryContent", Union[str, List[MessageInputContentChunks]] -) - - -class MessageInputEntryTypedDict(TypedDict): - r"""Representation of an input message inside the conversation.""" - - role: MessageInputEntryRole - content: MessageInputEntryContentTypedDict - object: NotRequired[Object] - type: NotRequired[MessageInputEntryType] - created_at: NotRequired[datetime] - completed_at: NotRequired[Nullable[datetime]] - id: NotRequired[str] - prefix: NotRequired[bool] - - -class MessageInputEntry(BaseModel): - r"""Representation of an input message inside the conversation.""" - - role: MessageInputEntryRole - - content: MessageInputEntryContent - - object: Optional[Object] = "entry" - - type: Optional[MessageInputEntryType] = "message.input" - - created_at: Optional[datetime] = None - - completed_at: OptionalNullable[datetime] = UNSET - - id: Optional[str] = None - - prefix: Optional[bool] = False - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = [ - "object", - "type", - "created_at", - "completed_at", - "id", - "prefix", - ] - nullable_fields = ["completed_at"] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/messageoutputcontentchunks.py b/src/mistralai/models/messageoutputcontentchunks.py deleted file mode 100644 index 136a7608..00000000 --- a/src/mistralai/models/messageoutputcontentchunks.py +++ /dev/null @@ -1,37 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .documenturlchunk import DocumentURLChunk, DocumentURLChunkTypedDict -from .imageurlchunk import ImageURLChunk, ImageURLChunkTypedDict -from .textchunk import TextChunk, TextChunkTypedDict -from .thinkchunk import ThinkChunk, ThinkChunkTypedDict -from .toolfilechunk import ToolFileChunk, ToolFileChunkTypedDict -from .toolreferencechunk import ToolReferenceChunk, ToolReferenceChunkTypedDict -from typing import Union -from typing_extensions import TypeAliasType - - -MessageOutputContentChunksTypedDict = TypeAliasType( - "MessageOutputContentChunksTypedDict", - Union[ - TextChunkTypedDict, - ImageURLChunkTypedDict, - DocumentURLChunkTypedDict, - ThinkChunkTypedDict, - ToolFileChunkTypedDict, - ToolReferenceChunkTypedDict, - ], -) - - -MessageOutputContentChunks = TypeAliasType( - "MessageOutputContentChunks", - Union[ - TextChunk, - ImageURLChunk, - DocumentURLChunk, - ThinkChunk, - ToolFileChunk, - ToolReferenceChunk, - ], -) diff --git a/src/mistralai/models/messageoutputentry.py b/src/mistralai/models/messageoutputentry.py deleted file mode 100644 index 0e2df81e..00000000 --- a/src/mistralai/models/messageoutputentry.py +++ /dev/null @@ -1,103 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .messageoutputcontentchunks import ( - MessageOutputContentChunks, - MessageOutputContentChunksTypedDict, -) -from datetime import datetime -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from pydantic import model_serializer -from typing import List, Literal, Optional, Union -from typing_extensions import NotRequired, TypeAliasType, TypedDict - - -MessageOutputEntryObject = Literal["entry",] - - -MessageOutputEntryType = Literal["message.output",] - - -MessageOutputEntryRole = Literal["assistant",] - - -MessageOutputEntryContentTypedDict = TypeAliasType( - "MessageOutputEntryContentTypedDict", - Union[str, List[MessageOutputContentChunksTypedDict]], -) - - -MessageOutputEntryContent = TypeAliasType( - "MessageOutputEntryContent", Union[str, List[MessageOutputContentChunks]] -) - - -class MessageOutputEntryTypedDict(TypedDict): - content: MessageOutputEntryContentTypedDict - object: NotRequired[MessageOutputEntryObject] - type: NotRequired[MessageOutputEntryType] - created_at: NotRequired[datetime] - completed_at: NotRequired[Nullable[datetime]] - id: NotRequired[str] - agent_id: NotRequired[Nullable[str]] - model: NotRequired[Nullable[str]] - role: NotRequired[MessageOutputEntryRole] - - -class MessageOutputEntry(BaseModel): - content: MessageOutputEntryContent - - object: Optional[MessageOutputEntryObject] = "entry" - - type: Optional[MessageOutputEntryType] = "message.output" - - created_at: Optional[datetime] = None - - completed_at: OptionalNullable[datetime] = UNSET - - id: Optional[str] = None - - agent_id: OptionalNullable[str] = UNSET - - model: OptionalNullable[str] = UNSET - - role: Optional[MessageOutputEntryRole] = "assistant" - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = [ - "object", - "type", - "created_at", - "completed_at", - "id", - "agent_id", - "model", - "role", - ] - nullable_fields = ["completed_at", "agent_id", "model"] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/messageoutputevent.py b/src/mistralai/models/messageoutputevent.py deleted file mode 100644 index 751767a3..00000000 --- a/src/mistralai/models/messageoutputevent.py +++ /dev/null @@ -1,95 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .outputcontentchunks import OutputContentChunks, OutputContentChunksTypedDict -from datetime import datetime -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from pydantic import model_serializer -from typing import Literal, Optional, Union -from typing_extensions import NotRequired, TypeAliasType, TypedDict - - -MessageOutputEventType = Literal["message.output.delta",] - - -MessageOutputEventRole = Literal["assistant",] - - -MessageOutputEventContentTypedDict = TypeAliasType( - "MessageOutputEventContentTypedDict", Union[str, OutputContentChunksTypedDict] -) - - -MessageOutputEventContent = TypeAliasType( - "MessageOutputEventContent", Union[str, OutputContentChunks] -) - - -class MessageOutputEventTypedDict(TypedDict): - id: str - content: MessageOutputEventContentTypedDict - type: NotRequired[MessageOutputEventType] - created_at: NotRequired[datetime] - output_index: NotRequired[int] - content_index: NotRequired[int] - model: NotRequired[Nullable[str]] - agent_id: NotRequired[Nullable[str]] - role: NotRequired[MessageOutputEventRole] - - -class MessageOutputEvent(BaseModel): - id: str - - content: MessageOutputEventContent - - type: Optional[MessageOutputEventType] = "message.output.delta" - - created_at: Optional[datetime] = None - - output_index: Optional[int] = 0 - - content_index: Optional[int] = 0 - - model: OptionalNullable[str] = UNSET - - agent_id: OptionalNullable[str] = UNSET - - role: Optional[MessageOutputEventRole] = "assistant" - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = [ - "type", - "created_at", - "output_index", - "content_index", - "model", - "agent_id", - "role", - ] - nullable_fields = ["model", "agent_id"] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/metricout.py b/src/mistralai/models/metricout.py deleted file mode 100644 index 930b5c21..00000000 --- a/src/mistralai/models/metricout.py +++ /dev/null @@ -1,54 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from pydantic import model_serializer -from typing_extensions import NotRequired, TypedDict - - -class MetricOutTypedDict(TypedDict): - r"""Metrics at the step number during the fine-tuning job. Use these metrics to assess if the training is going smoothly (loss should decrease, token accuracy should increase).""" - - train_loss: NotRequired[Nullable[float]] - valid_loss: NotRequired[Nullable[float]] - valid_mean_token_accuracy: NotRequired[Nullable[float]] - - -class MetricOut(BaseModel): - r"""Metrics at the step number during the fine-tuning job. Use these metrics to assess if the training is going smoothly (loss should decrease, token accuracy should increase).""" - - train_loss: OptionalNullable[float] = UNSET - - valid_loss: OptionalNullable[float] = UNSET - - valid_mean_token_accuracy: OptionalNullable[float] = UNSET - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = ["train_loss", "valid_loss", "valid_mean_token_accuracy"] - nullable_fields = ["train_loss", "valid_loss", "valid_mean_token_accuracy"] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/mistralerror.py b/src/mistralai/models/mistralerror.py deleted file mode 100644 index 28cfd22d..00000000 --- a/src/mistralai/models/mistralerror.py +++ /dev/null @@ -1,30 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -import httpx -from typing import Optional -from dataclasses import dataclass, field - - -@dataclass(unsafe_hash=True) -class MistralError(Exception): - """The base class for all HTTP error responses.""" - - message: str - status_code: int - body: str - headers: httpx.Headers = field(hash=False) - raw_response: httpx.Response = field(hash=False) - - def __init__( - self, message: str, raw_response: httpx.Response, body: Optional[str] = None - ): - object.__setattr__(self, "message", message) - object.__setattr__(self, "status_code", raw_response.status_code) - object.__setattr__( - self, "body", body if body is not None else raw_response.text - ) - object.__setattr__(self, "headers", raw_response.headers) - object.__setattr__(self, "raw_response", raw_response) - - def __str__(self): - return self.message diff --git a/src/mistralai/models/mistralpromptmode.py b/src/mistralai/models/mistralpromptmode.py deleted file mode 100644 index dfb6f2d2..00000000 --- a/src/mistralai/models/mistralpromptmode.py +++ /dev/null @@ -1,12 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import UnrecognizedStr -from typing import Literal, Union - - -MistralPromptMode = Union[Literal["reasoning",], UnrecognizedStr] -r"""Available options to the prompt_mode argument on the chat completion endpoint. -Values represent high-level intent. Assignment to actual SPs is handled internally. -System prompt may include knowledge cutoff date, model capabilities, tone to use, safety guidelines, etc. -""" diff --git a/src/mistralai/models/modelcapabilities.py b/src/mistralai/models/modelcapabilities.py deleted file mode 100644 index 6edf8e5b..00000000 --- a/src/mistralai/models/modelcapabilities.py +++ /dev/null @@ -1,41 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel -from typing import Optional -from typing_extensions import NotRequired, TypedDict - - -class ModelCapabilitiesTypedDict(TypedDict): - completion_chat: NotRequired[bool] - function_calling: NotRequired[bool] - completion_fim: NotRequired[bool] - fine_tuning: NotRequired[bool] - vision: NotRequired[bool] - ocr: NotRequired[bool] - classification: NotRequired[bool] - moderation: NotRequired[bool] - audio: NotRequired[bool] - audio_transcription: NotRequired[bool] - - -class ModelCapabilities(BaseModel): - completion_chat: Optional[bool] = False - - function_calling: Optional[bool] = False - - completion_fim: Optional[bool] = False - - fine_tuning: Optional[bool] = False - - vision: Optional[bool] = False - - ocr: Optional[bool] = False - - classification: Optional[bool] = False - - moderation: Optional[bool] = False - - audio: Optional[bool] = False - - audio_transcription: Optional[bool] = False diff --git a/src/mistralai/models/modelconversation.py b/src/mistralai/models/modelconversation.py deleted file mode 100644 index 8eca4f97..00000000 --- a/src/mistralai/models/modelconversation.py +++ /dev/null @@ -1,133 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .codeinterpretertool import CodeInterpreterTool, CodeInterpreterToolTypedDict -from .completionargs import CompletionArgs, CompletionArgsTypedDict -from .documentlibrarytool import DocumentLibraryTool, DocumentLibraryToolTypedDict -from .functiontool import FunctionTool, FunctionToolTypedDict -from .imagegenerationtool import ImageGenerationTool, ImageGenerationToolTypedDict -from .websearchpremiumtool import WebSearchPremiumTool, WebSearchPremiumToolTypedDict -from .websearchtool import WebSearchTool, WebSearchToolTypedDict -from datetime import datetime -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from mistralai.utils import get_discriminator -from pydantic import Discriminator, Tag, model_serializer -from typing import Any, Dict, List, Literal, Optional, Union -from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict - - -ModelConversationToolsTypedDict = TypeAliasType( - "ModelConversationToolsTypedDict", - Union[ - WebSearchToolTypedDict, - WebSearchPremiumToolTypedDict, - CodeInterpreterToolTypedDict, - ImageGenerationToolTypedDict, - FunctionToolTypedDict, - DocumentLibraryToolTypedDict, - ], -) - - -ModelConversationTools = Annotated[ - Union[ - Annotated[CodeInterpreterTool, Tag("code_interpreter")], - Annotated[DocumentLibraryTool, Tag("document_library")], - Annotated[FunctionTool, Tag("function")], - Annotated[ImageGenerationTool, Tag("image_generation")], - Annotated[WebSearchTool, Tag("web_search")], - Annotated[WebSearchPremiumTool, Tag("web_search_premium")], - ], - Discriminator(lambda m: get_discriminator(m, "type", "type")), -] - - -ModelConversationObject = Literal["conversation",] - - -class ModelConversationTypedDict(TypedDict): - id: str - created_at: datetime - updated_at: datetime - model: str - instructions: NotRequired[Nullable[str]] - r"""Instruction prompt the model will follow during the conversation.""" - tools: NotRequired[List[ModelConversationToolsTypedDict]] - r"""List of tools which are available to the model during the conversation.""" - completion_args: NotRequired[CompletionArgsTypedDict] - r"""White-listed arguments from the completion API""" - name: NotRequired[Nullable[str]] - r"""Name given to the conversation.""" - description: NotRequired[Nullable[str]] - r"""Description of the what the conversation is about.""" - metadata: NotRequired[Nullable[Dict[str, Any]]] - r"""Custom metadata for the conversation.""" - object: NotRequired[ModelConversationObject] - - -class ModelConversation(BaseModel): - id: str - - created_at: datetime - - updated_at: datetime - - model: str - - instructions: OptionalNullable[str] = UNSET - r"""Instruction prompt the model will follow during the conversation.""" - - tools: Optional[List[ModelConversationTools]] = None - r"""List of tools which are available to the model during the conversation.""" - - completion_args: Optional[CompletionArgs] = None - r"""White-listed arguments from the completion API""" - - name: OptionalNullable[str] = UNSET - r"""Name given to the conversation.""" - - description: OptionalNullable[str] = UNSET - r"""Description of the what the conversation is about.""" - - metadata: OptionalNullable[Dict[str, Any]] = UNSET - r"""Custom metadata for the conversation.""" - - object: Optional[ModelConversationObject] = "conversation" - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = [ - "instructions", - "tools", - "completion_args", - "name", - "description", - "metadata", - "object", - ] - nullable_fields = ["instructions", "name", "description", "metadata"] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/modellist.py b/src/mistralai/models/modellist.py deleted file mode 100644 index 394cb3fa..00000000 --- a/src/mistralai/models/modellist.py +++ /dev/null @@ -1,34 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .basemodelcard import BaseModelCard, BaseModelCardTypedDict -from .ftmodelcard import FTModelCard, FTModelCardTypedDict -from mistralai.types import BaseModel -from mistralai.utils import get_discriminator -from pydantic import Discriminator, Tag -from typing import List, Optional, Union -from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict - - -DataTypedDict = TypeAliasType( - "DataTypedDict", Union[BaseModelCardTypedDict, FTModelCardTypedDict] -) - - -Data = Annotated[ - Union[ - Annotated[BaseModelCard, Tag("base")], Annotated[FTModelCard, Tag("fine-tuned")] - ], - Discriminator(lambda m: get_discriminator(m, "type", "type")), -] - - -class ModelListTypedDict(TypedDict): - object: NotRequired[str] - data: NotRequired[List[DataTypedDict]] - - -class ModelList(BaseModel): - object: Optional[str] = "list" - - data: Optional[List[Data]] = None diff --git a/src/mistralai/models/moderationobject.py b/src/mistralai/models/moderationobject.py deleted file mode 100644 index 5eff2d2a..00000000 --- a/src/mistralai/models/moderationobject.py +++ /dev/null @@ -1,21 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel -from typing import Dict, Optional -from typing_extensions import NotRequired, TypedDict - - -class ModerationObjectTypedDict(TypedDict): - categories: NotRequired[Dict[str, bool]] - r"""Moderation result thresholds""" - category_scores: NotRequired[Dict[str, float]] - r"""Moderation result""" - - -class ModerationObject(BaseModel): - categories: Optional[Dict[str, bool]] = None - r"""Moderation result thresholds""" - - category_scores: Optional[Dict[str, float]] = None - r"""Moderation result""" diff --git a/src/mistralai/models/moderationresponse.py b/src/mistralai/models/moderationresponse.py deleted file mode 100644 index ed13cd6b..00000000 --- a/src/mistralai/models/moderationresponse.py +++ /dev/null @@ -1,21 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .moderationobject import ModerationObject, ModerationObjectTypedDict -from mistralai.types import BaseModel -from typing import List -from typing_extensions import TypedDict - - -class ModerationResponseTypedDict(TypedDict): - id: str - model: str - results: List[ModerationObjectTypedDict] - - -class ModerationResponse(BaseModel): - id: str - - model: str - - results: List[ModerationObject] diff --git a/src/mistralai/models/no_response_error.py b/src/mistralai/models/no_response_error.py deleted file mode 100644 index 1deab64b..00000000 --- a/src/mistralai/models/no_response_error.py +++ /dev/null @@ -1,17 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from dataclasses import dataclass - - -@dataclass(unsafe_hash=True) -class NoResponseError(Exception): - """Error raised when no HTTP response is received from the server.""" - - message: str - - def __init__(self, message: str = "No response received"): - object.__setattr__(self, "message", message) - super().__init__(message) - - def __str__(self): - return self.message diff --git a/src/mistralai/models/ocrimageobject.py b/src/mistralai/models/ocrimageobject.py deleted file mode 100644 index cec0acf4..00000000 --- a/src/mistralai/models/ocrimageobject.py +++ /dev/null @@ -1,83 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from pydantic import model_serializer -from typing_extensions import NotRequired, TypedDict - - -class OCRImageObjectTypedDict(TypedDict): - id: str - r"""Image ID for extracted image in a page""" - top_left_x: Nullable[int] - r"""X coordinate of top-left corner of the extracted image""" - top_left_y: Nullable[int] - r"""Y coordinate of top-left corner of the extracted image""" - bottom_right_x: Nullable[int] - r"""X coordinate of bottom-right corner of the extracted image""" - bottom_right_y: Nullable[int] - r"""Y coordinate of bottom-right corner of the extracted image""" - image_base64: NotRequired[Nullable[str]] - r"""Base64 string of the extracted image""" - image_annotation: NotRequired[Nullable[str]] - r"""Annotation of the extracted image in json str""" - - -class OCRImageObject(BaseModel): - id: str - r"""Image ID for extracted image in a page""" - - top_left_x: Nullable[int] - r"""X coordinate of top-left corner of the extracted image""" - - top_left_y: Nullable[int] - r"""Y coordinate of top-left corner of the extracted image""" - - bottom_right_x: Nullable[int] - r"""X coordinate of bottom-right corner of the extracted image""" - - bottom_right_y: Nullable[int] - r"""Y coordinate of bottom-right corner of the extracted image""" - - image_base64: OptionalNullable[str] = UNSET - r"""Base64 string of the extracted image""" - - image_annotation: OptionalNullable[str] = UNSET - r"""Annotation of the extracted image in json str""" - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = ["image_base64", "image_annotation"] - nullable_fields = [ - "top_left_x", - "top_left_y", - "bottom_right_x", - "bottom_right_y", - "image_base64", - "image_annotation", - ] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/ocrpagedimensions.py b/src/mistralai/models/ocrpagedimensions.py deleted file mode 100644 index d1aeb54d..00000000 --- a/src/mistralai/models/ocrpagedimensions.py +++ /dev/null @@ -1,25 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel -from typing_extensions import TypedDict - - -class OCRPageDimensionsTypedDict(TypedDict): - dpi: int - r"""Dots per inch of the page-image""" - height: int - r"""Height of the image in pixels""" - width: int - r"""Width of the image in pixels""" - - -class OCRPageDimensions(BaseModel): - dpi: int - r"""Dots per inch of the page-image""" - - height: int - r"""Height of the image in pixels""" - - width: int - r"""Width of the image in pixels""" diff --git a/src/mistralai/models/ocrpageobject.py b/src/mistralai/models/ocrpageobject.py deleted file mode 100644 index 737defba..00000000 --- a/src/mistralai/models/ocrpageobject.py +++ /dev/null @@ -1,85 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .ocrimageobject import OCRImageObject, OCRImageObjectTypedDict -from .ocrpagedimensions import OCRPageDimensions, OCRPageDimensionsTypedDict -from .ocrtableobject import OCRTableObject, OCRTableObjectTypedDict -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from pydantic import model_serializer -from typing import List, Optional -from typing_extensions import NotRequired, TypedDict - - -class OCRPageObjectTypedDict(TypedDict): - index: int - r"""The page index in a pdf document starting from 0""" - markdown: str - r"""The markdown string response of the page""" - images: List[OCRImageObjectTypedDict] - r"""List of all extracted images in the page""" - dimensions: Nullable[OCRPageDimensionsTypedDict] - r"""The dimensions of the PDF Page's screenshot image""" - tables: NotRequired[List[OCRTableObjectTypedDict]] - r"""List of all extracted tables in the page""" - hyperlinks: NotRequired[List[str]] - r"""List of all hyperlinks in the page""" - header: NotRequired[Nullable[str]] - r"""Header of the page""" - footer: NotRequired[Nullable[str]] - r"""Footer of the page""" - - -class OCRPageObject(BaseModel): - index: int - r"""The page index in a pdf document starting from 0""" - - markdown: str - r"""The markdown string response of the page""" - - images: List[OCRImageObject] - r"""List of all extracted images in the page""" - - dimensions: Nullable[OCRPageDimensions] - r"""The dimensions of the PDF Page's screenshot image""" - - tables: Optional[List[OCRTableObject]] = None - r"""List of all extracted tables in the page""" - - hyperlinks: Optional[List[str]] = None - r"""List of all hyperlinks in the page""" - - header: OptionalNullable[str] = UNSET - r"""Header of the page""" - - footer: OptionalNullable[str] = UNSET - r"""Footer of the page""" - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = ["tables", "hyperlinks", "header", "footer"] - nullable_fields = ["header", "footer", "dimensions"] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/ocrrequest.py b/src/mistralai/models/ocrrequest.py deleted file mode 100644 index 0e061ac9..00000000 --- a/src/mistralai/models/ocrrequest.py +++ /dev/null @@ -1,140 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .documenturlchunk import DocumentURLChunk, DocumentURLChunkTypedDict -from .filechunk import FileChunk, FileChunkTypedDict -from .imageurlchunk import ImageURLChunk, ImageURLChunkTypedDict -from .responseformat import ResponseFormat, ResponseFormatTypedDict -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from pydantic import model_serializer -from typing import List, Literal, Optional, Union -from typing_extensions import NotRequired, TypeAliasType, TypedDict - - -DocumentTypedDict = TypeAliasType( - "DocumentTypedDict", - Union[FileChunkTypedDict, ImageURLChunkTypedDict, DocumentURLChunkTypedDict], -) -r"""Document to run OCR on""" - - -Document = TypeAliasType("Document", Union[FileChunk, ImageURLChunk, DocumentURLChunk]) -r"""Document to run OCR on""" - - -TableFormat = Literal[ - "markdown", - "html", -] - - -class OCRRequestTypedDict(TypedDict): - model: Nullable[str] - document: DocumentTypedDict - r"""Document to run OCR on""" - id: NotRequired[str] - pages: NotRequired[Nullable[List[int]]] - r"""Specific pages user wants to process in various formats: single number, range, or list of both. Starts from 0""" - include_image_base64: NotRequired[Nullable[bool]] - r"""Include image URLs in response""" - image_limit: NotRequired[Nullable[int]] - r"""Max images to extract""" - image_min_size: NotRequired[Nullable[int]] - r"""Minimum height and width of image to extract""" - bbox_annotation_format: NotRequired[Nullable[ResponseFormatTypedDict]] - r"""Structured output class for extracting useful information from each extracted bounding box / image from document. Only json_schema is valid for this field""" - document_annotation_format: NotRequired[Nullable[ResponseFormatTypedDict]] - r"""Structured output class for extracting useful information from the entire document. Only json_schema is valid for this field""" - document_annotation_prompt: NotRequired[Nullable[str]] - r"""Optional prompt to guide the model in extracting structured output from the entire document. A document_annotation_format must be provided.""" - table_format: NotRequired[Nullable[TableFormat]] - extract_header: NotRequired[bool] - extract_footer: NotRequired[bool] - - -class OCRRequest(BaseModel): - model: Nullable[str] - - document: Document - r"""Document to run OCR on""" - - id: Optional[str] = None - - pages: OptionalNullable[List[int]] = UNSET - r"""Specific pages user wants to process in various formats: single number, range, or list of both. Starts from 0""" - - include_image_base64: OptionalNullable[bool] = UNSET - r"""Include image URLs in response""" - - image_limit: OptionalNullable[int] = UNSET - r"""Max images to extract""" - - image_min_size: OptionalNullable[int] = UNSET - r"""Minimum height and width of image to extract""" - - bbox_annotation_format: OptionalNullable[ResponseFormat] = UNSET - r"""Structured output class for extracting useful information from each extracted bounding box / image from document. Only json_schema is valid for this field""" - - document_annotation_format: OptionalNullable[ResponseFormat] = UNSET - r"""Structured output class for extracting useful information from the entire document. Only json_schema is valid for this field""" - - document_annotation_prompt: OptionalNullable[str] = UNSET - r"""Optional prompt to guide the model in extracting structured output from the entire document. A document_annotation_format must be provided.""" - - table_format: OptionalNullable[TableFormat] = UNSET - - extract_header: Optional[bool] = None - - extract_footer: Optional[bool] = None - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = [ - "id", - "pages", - "include_image_base64", - "image_limit", - "image_min_size", - "bbox_annotation_format", - "document_annotation_format", - "document_annotation_prompt", - "table_format", - "extract_header", - "extract_footer", - ] - nullable_fields = [ - "model", - "pages", - "include_image_base64", - "image_limit", - "image_min_size", - "bbox_annotation_format", - "document_annotation_format", - "document_annotation_prompt", - "table_format", - ] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/ocrresponse.py b/src/mistralai/models/ocrresponse.py deleted file mode 100644 index 7b65bee7..00000000 --- a/src/mistralai/models/ocrresponse.py +++ /dev/null @@ -1,62 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .ocrpageobject import OCRPageObject, OCRPageObjectTypedDict -from .ocrusageinfo import OCRUsageInfo, OCRUsageInfoTypedDict -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from pydantic import model_serializer -from typing import List -from typing_extensions import NotRequired, TypedDict - - -class OCRResponseTypedDict(TypedDict): - pages: List[OCRPageObjectTypedDict] - r"""List of OCR info for pages.""" - model: str - r"""The model used to generate the OCR.""" - usage_info: OCRUsageInfoTypedDict - document_annotation: NotRequired[Nullable[str]] - r"""Formatted response in the request_format if provided in json str""" - - -class OCRResponse(BaseModel): - pages: List[OCRPageObject] - r"""List of OCR info for pages.""" - - model: str - r"""The model used to generate the OCR.""" - - usage_info: OCRUsageInfo - - document_annotation: OptionalNullable[str] = UNSET - r"""Formatted response in the request_format if provided in json str""" - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = ["document_annotation"] - nullable_fields = ["document_annotation"] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/ocrtableobject.py b/src/mistralai/models/ocrtableobject.py deleted file mode 100644 index 5f30ab5e..00000000 --- a/src/mistralai/models/ocrtableobject.py +++ /dev/null @@ -1,34 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel -import pydantic -from typing import Literal -from typing_extensions import Annotated, TypedDict - - -Format = Literal[ - "markdown", - "html", -] -r"""Format of the table""" - - -class OCRTableObjectTypedDict(TypedDict): - id: str - r"""Table ID for extracted table in a page""" - content: str - r"""Content of the table in the given format""" - format_: Format - r"""Format of the table""" - - -class OCRTableObject(BaseModel): - id: str - r"""Table ID for extracted table in a page""" - - content: str - r"""Content of the table in the given format""" - - format_: Annotated[Format, pydantic.Field(alias="format")] - r"""Format of the table""" diff --git a/src/mistralai/models/ocrusageinfo.py b/src/mistralai/models/ocrusageinfo.py deleted file mode 100644 index 36c9f826..00000000 --- a/src/mistralai/models/ocrusageinfo.py +++ /dev/null @@ -1,51 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from pydantic import model_serializer -from typing_extensions import NotRequired, TypedDict - - -class OCRUsageInfoTypedDict(TypedDict): - pages_processed: int - r"""Number of pages processed""" - doc_size_bytes: NotRequired[Nullable[int]] - r"""Document size in bytes""" - - -class OCRUsageInfo(BaseModel): - pages_processed: int - r"""Number of pages processed""" - - doc_size_bytes: OptionalNullable[int] = UNSET - r"""Document size in bytes""" - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = ["doc_size_bytes"] - nullable_fields = ["doc_size_bytes"] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/outputcontentchunks.py b/src/mistralai/models/outputcontentchunks.py deleted file mode 100644 index ad0c087e..00000000 --- a/src/mistralai/models/outputcontentchunks.py +++ /dev/null @@ -1,37 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .documenturlchunk import DocumentURLChunk, DocumentURLChunkTypedDict -from .imageurlchunk import ImageURLChunk, ImageURLChunkTypedDict -from .textchunk import TextChunk, TextChunkTypedDict -from .thinkchunk import ThinkChunk, ThinkChunkTypedDict -from .toolfilechunk import ToolFileChunk, ToolFileChunkTypedDict -from .toolreferencechunk import ToolReferenceChunk, ToolReferenceChunkTypedDict -from typing import Union -from typing_extensions import TypeAliasType - - -OutputContentChunksTypedDict = TypeAliasType( - "OutputContentChunksTypedDict", - Union[ - TextChunkTypedDict, - ImageURLChunkTypedDict, - DocumentURLChunkTypedDict, - ThinkChunkTypedDict, - ToolFileChunkTypedDict, - ToolReferenceChunkTypedDict, - ], -) - - -OutputContentChunks = TypeAliasType( - "OutputContentChunks", - Union[ - TextChunk, - ImageURLChunk, - DocumentURLChunk, - ThinkChunk, - ToolFileChunk, - ToolReferenceChunk, - ], -) diff --git a/src/mistralai/models/paginationinfo.py b/src/mistralai/models/paginationinfo.py deleted file mode 100644 index 00d4f1ec..00000000 --- a/src/mistralai/models/paginationinfo.py +++ /dev/null @@ -1,25 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel -from typing_extensions import TypedDict - - -class PaginationInfoTypedDict(TypedDict): - total_items: int - total_pages: int - current_page: int - page_size: int - has_more: bool - - -class PaginationInfo(BaseModel): - total_items: int - - total_pages: int - - current_page: int - - page_size: int - - has_more: bool diff --git a/src/mistralai/models/prediction.py b/src/mistralai/models/prediction.py deleted file mode 100644 index 582d8789..00000000 --- a/src/mistralai/models/prediction.py +++ /dev/null @@ -1,29 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel -from mistralai.utils import validate_const -import pydantic -from pydantic.functional_validators import AfterValidator -from typing import Literal, Optional -from typing_extensions import Annotated, NotRequired, TypedDict - - -class PredictionTypedDict(TypedDict): - r"""Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content.""" - - type: Literal["content"] - content: NotRequired[str] - - -class Prediction(BaseModel): - r"""Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content.""" - - TYPE: Annotated[ - Annotated[ - Optional[Literal["content"]], AfterValidator(validate_const("content")) - ], - pydantic.Field(alias="type"), - ] = "content" - - content: Optional[str] = "" diff --git a/src/mistralai/models/processingstatusout.py b/src/mistralai/models/processingstatusout.py deleted file mode 100644 index e67bfa86..00000000 --- a/src/mistralai/models/processingstatusout.py +++ /dev/null @@ -1,16 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel -from typing_extensions import TypedDict - - -class ProcessingStatusOutTypedDict(TypedDict): - document_id: str - processing_status: str - - -class ProcessingStatusOut(BaseModel): - document_id: str - - processing_status: str diff --git a/src/mistralai/models/realtimetranscriptionerror.py b/src/mistralai/models/realtimetranscriptionerror.py deleted file mode 100644 index 0785f700..00000000 --- a/src/mistralai/models/realtimetranscriptionerror.py +++ /dev/null @@ -1,27 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .realtimetranscriptionerrordetail import ( - RealtimeTranscriptionErrorDetail, - RealtimeTranscriptionErrorDetailTypedDict, -) -from mistralai.types import BaseModel -from mistralai.utils import validate_const -import pydantic -from pydantic.functional_validators import AfterValidator -from typing import Literal, Optional -from typing_extensions import Annotated, TypedDict - - -class RealtimeTranscriptionErrorTypedDict(TypedDict): - error: RealtimeTranscriptionErrorDetailTypedDict - type: Literal["error"] - - -class RealtimeTranscriptionError(BaseModel): - error: RealtimeTranscriptionErrorDetail - - TYPE: Annotated[ - Annotated[Optional[Literal["error"]], AfterValidator(validate_const("error"))], - pydantic.Field(alias="type"), - ] = "error" diff --git a/src/mistralai/models/realtimetranscriptionerrordetail.py b/src/mistralai/models/realtimetranscriptionerrordetail.py deleted file mode 100644 index cb5d73f8..00000000 --- a/src/mistralai/models/realtimetranscriptionerrordetail.py +++ /dev/null @@ -1,29 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel -from typing import Any, Dict, Union -from typing_extensions import TypeAliasType, TypedDict - - -MessageTypedDict = TypeAliasType("MessageTypedDict", Union[str, Dict[str, Any]]) -r"""Human-readable error message.""" - - -Message = TypeAliasType("Message", Union[str, Dict[str, Any]]) -r"""Human-readable error message.""" - - -class RealtimeTranscriptionErrorDetailTypedDict(TypedDict): - message: MessageTypedDict - r"""Human-readable error message.""" - code: int - r"""Internal error code for debugging.""" - - -class RealtimeTranscriptionErrorDetail(BaseModel): - message: Message - r"""Human-readable error message.""" - - code: int - r"""Internal error code for debugging.""" diff --git a/src/mistralai/models/realtimetranscriptionsession.py b/src/mistralai/models/realtimetranscriptionsession.py deleted file mode 100644 index bcd0cfe3..00000000 --- a/src/mistralai/models/realtimetranscriptionsession.py +++ /dev/null @@ -1,20 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .audioformat import AudioFormat, AudioFormatTypedDict -from mistralai.types import BaseModel -from typing_extensions import TypedDict - - -class RealtimeTranscriptionSessionTypedDict(TypedDict): - request_id: str - model: str - audio_format: AudioFormatTypedDict - - -class RealtimeTranscriptionSession(BaseModel): - request_id: str - - model: str - - audio_format: AudioFormat diff --git a/src/mistralai/models/realtimetranscriptionsessioncreated.py b/src/mistralai/models/realtimetranscriptionsessioncreated.py deleted file mode 100644 index 9a2c2860..00000000 --- a/src/mistralai/models/realtimetranscriptionsessioncreated.py +++ /dev/null @@ -1,30 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .realtimetranscriptionsession import ( - RealtimeTranscriptionSession, - RealtimeTranscriptionSessionTypedDict, -) -from mistralai.types import BaseModel -from mistralai.utils import validate_const -import pydantic -from pydantic.functional_validators import AfterValidator -from typing import Literal, Optional -from typing_extensions import Annotated, TypedDict - - -class RealtimeTranscriptionSessionCreatedTypedDict(TypedDict): - session: RealtimeTranscriptionSessionTypedDict - type: Literal["session.created"] - - -class RealtimeTranscriptionSessionCreated(BaseModel): - session: RealtimeTranscriptionSession - - TYPE: Annotated[ - Annotated[ - Optional[Literal["session.created"]], - AfterValidator(validate_const("session.created")), - ], - pydantic.Field(alias="type"), - ] = "session.created" diff --git a/src/mistralai/models/realtimetranscriptionsessionupdated.py b/src/mistralai/models/realtimetranscriptionsessionupdated.py deleted file mode 100644 index ad1b5133..00000000 --- a/src/mistralai/models/realtimetranscriptionsessionupdated.py +++ /dev/null @@ -1,30 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .realtimetranscriptionsession import ( - RealtimeTranscriptionSession, - RealtimeTranscriptionSessionTypedDict, -) -from mistralai.types import BaseModel -from mistralai.utils import validate_const -import pydantic -from pydantic.functional_validators import AfterValidator -from typing import Literal, Optional -from typing_extensions import Annotated, TypedDict - - -class RealtimeTranscriptionSessionUpdatedTypedDict(TypedDict): - session: RealtimeTranscriptionSessionTypedDict - type: Literal["session.updated"] - - -class RealtimeTranscriptionSessionUpdated(BaseModel): - session: RealtimeTranscriptionSession - - TYPE: Annotated[ - Annotated[ - Optional[Literal["session.updated"]], - AfterValidator(validate_const("session.updated")), - ], - pydantic.Field(alias="type"), - ] = "session.updated" diff --git a/src/mistralai/models/referencechunk.py b/src/mistralai/models/referencechunk.py deleted file mode 100644 index 1864ac79..00000000 --- a/src/mistralai/models/referencechunk.py +++ /dev/null @@ -1,20 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel -from typing import List, Literal, Optional -from typing_extensions import NotRequired, TypedDict - - -ReferenceChunkType = Literal["reference",] - - -class ReferenceChunkTypedDict(TypedDict): - reference_ids: List[int] - type: NotRequired[ReferenceChunkType] - - -class ReferenceChunk(BaseModel): - reference_ids: List[int] - - type: Optional[ReferenceChunkType] = "reference" diff --git a/src/mistralai/models/requestsource.py b/src/mistralai/models/requestsource.py deleted file mode 100644 index 7b0a35c4..00000000 --- a/src/mistralai/models/requestsource.py +++ /dev/null @@ -1,11 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from typing import Literal - - -RequestSource = Literal[ - "api", - "playground", - "agent_builder_v1", -] diff --git a/src/mistralai/models/responsedoneevent.py b/src/mistralai/models/responsedoneevent.py deleted file mode 100644 index 5a3a3dfb..00000000 --- a/src/mistralai/models/responsedoneevent.py +++ /dev/null @@ -1,25 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .conversationusageinfo import ConversationUsageInfo, ConversationUsageInfoTypedDict -from datetime import datetime -from mistralai.types import BaseModel -from typing import Literal, Optional -from typing_extensions import NotRequired, TypedDict - - -ResponseDoneEventType = Literal["conversation.response.done",] - - -class ResponseDoneEventTypedDict(TypedDict): - usage: ConversationUsageInfoTypedDict - type: NotRequired[ResponseDoneEventType] - created_at: NotRequired[datetime] - - -class ResponseDoneEvent(BaseModel): - usage: ConversationUsageInfo - - type: Optional[ResponseDoneEventType] = "conversation.response.done" - - created_at: Optional[datetime] = None diff --git a/src/mistralai/models/responseerrorevent.py b/src/mistralai/models/responseerrorevent.py deleted file mode 100644 index 6cb1b268..00000000 --- a/src/mistralai/models/responseerrorevent.py +++ /dev/null @@ -1,27 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from datetime import datetime -from mistralai.types import BaseModel -from typing import Literal, Optional -from typing_extensions import NotRequired, TypedDict - - -ResponseErrorEventType = Literal["conversation.response.error",] - - -class ResponseErrorEventTypedDict(TypedDict): - message: str - code: int - type: NotRequired[ResponseErrorEventType] - created_at: NotRequired[datetime] - - -class ResponseErrorEvent(BaseModel): - message: str - - code: int - - type: Optional[ResponseErrorEventType] = "conversation.response.error" - - created_at: Optional[datetime] = None diff --git a/src/mistralai/models/responseformat.py b/src/mistralai/models/responseformat.py deleted file mode 100644 index 92284017..00000000 --- a/src/mistralai/models/responseformat.py +++ /dev/null @@ -1,54 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .jsonschema import JSONSchema, JSONSchemaTypedDict -from .responseformats import ResponseFormats -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from pydantic import model_serializer -from typing import Optional -from typing_extensions import NotRequired, TypedDict - - -class ResponseFormatTypedDict(TypedDict): - r"""Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide.""" - - type: NotRequired[ResponseFormats] - json_schema: NotRequired[Nullable[JSONSchemaTypedDict]] - - -class ResponseFormat(BaseModel): - r"""Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide.""" - - type: Optional[ResponseFormats] = None - - json_schema: OptionalNullable[JSONSchema] = UNSET - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = ["type", "json_schema"] - nullable_fields = ["json_schema"] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/responseformats.py b/src/mistralai/models/responseformats.py deleted file mode 100644 index cbf83ce7..00000000 --- a/src/mistralai/models/responseformats.py +++ /dev/null @@ -1,11 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from typing import Literal - - -ResponseFormats = Literal[ - "text", - "json_object", - "json_schema", -] diff --git a/src/mistralai/models/responsestartedevent.py b/src/mistralai/models/responsestartedevent.py deleted file mode 100644 index d14d45ef..00000000 --- a/src/mistralai/models/responsestartedevent.py +++ /dev/null @@ -1,24 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from datetime import datetime -from mistralai.types import BaseModel -from typing import Literal, Optional -from typing_extensions import NotRequired, TypedDict - - -ResponseStartedEventType = Literal["conversation.response.started",] - - -class ResponseStartedEventTypedDict(TypedDict): - conversation_id: str - type: NotRequired[ResponseStartedEventType] - created_at: NotRequired[datetime] - - -class ResponseStartedEvent(BaseModel): - conversation_id: str - - type: Optional[ResponseStartedEventType] = "conversation.response.started" - - created_at: Optional[datetime] = None diff --git a/src/mistralai/models/responsevalidationerror.py b/src/mistralai/models/responsevalidationerror.py deleted file mode 100644 index ed301655..00000000 --- a/src/mistralai/models/responsevalidationerror.py +++ /dev/null @@ -1,27 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -import httpx -from typing import Optional -from dataclasses import dataclass - -from mistralai.models import MistralError - - -@dataclass(unsafe_hash=True) -class ResponseValidationError(MistralError): - """Error raised when there is a type mismatch between the response data and the expected Pydantic model.""" - - def __init__( - self, - message: str, - raw_response: httpx.Response, - cause: Exception, - body: Optional[str] = None, - ): - message = f"{message}: {cause}" - super().__init__(message, raw_response, body) - - @property - def cause(self): - """Normally the Pydantic ValidationError""" - return self.__cause__ diff --git a/src/mistralai/models/retrieve_model_v1_models_model_id_getop.py b/src/mistralai/models/retrieve_model_v1_models_model_id_getop.py deleted file mode 100644 index bfe62474..00000000 --- a/src/mistralai/models/retrieve_model_v1_models_model_id_getop.py +++ /dev/null @@ -1,38 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .basemodelcard import BaseModelCard, BaseModelCardTypedDict -from .ftmodelcard import FTModelCard, FTModelCardTypedDict -from mistralai.types import BaseModel -from mistralai.utils import FieldMetadata, PathParamMetadata, get_discriminator -from pydantic import Discriminator, Tag -from typing import Union -from typing_extensions import Annotated, TypeAliasType, TypedDict - - -class RetrieveModelV1ModelsModelIDGetRequestTypedDict(TypedDict): - model_id: str - r"""The ID of the model to retrieve.""" - - -class RetrieveModelV1ModelsModelIDGetRequest(BaseModel): - model_id: Annotated[ - str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) - ] - r"""The ID of the model to retrieve.""" - - -RetrieveModelV1ModelsModelIDGetResponseRetrieveModelV1ModelsModelIDGetTypedDict = TypeAliasType( - "RetrieveModelV1ModelsModelIDGetResponseRetrieveModelV1ModelsModelIDGetTypedDict", - Union[BaseModelCardTypedDict, FTModelCardTypedDict], -) -r"""Successful Response""" - - -RetrieveModelV1ModelsModelIDGetResponseRetrieveModelV1ModelsModelIDGet = Annotated[ - Union[ - Annotated[BaseModelCard, Tag("base")], Annotated[FTModelCard, Tag("fine-tuned")] - ], - Discriminator(lambda m: get_discriminator(m, "type", "type")), -] -r"""Successful Response""" diff --git a/src/mistralai/models/retrievefileout.py b/src/mistralai/models/retrievefileout.py deleted file mode 100644 index 94540083..00000000 --- a/src/mistralai/models/retrievefileout.py +++ /dev/null @@ -1,91 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .filepurpose import FilePurpose -from .sampletype import SampleType -from .source import Source -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -import pydantic -from pydantic import model_serializer -from typing_extensions import Annotated, NotRequired, TypedDict - - -class RetrieveFileOutTypedDict(TypedDict): - id: str - r"""The unique identifier of the file.""" - object: str - r"""The object type, which is always \"file\".""" - size_bytes: int - r"""The size of the file, in bytes.""" - created_at: int - r"""The UNIX timestamp (in seconds) of the event.""" - filename: str - r"""The name of the uploaded file.""" - purpose: FilePurpose - sample_type: SampleType - source: Source - deleted: bool - num_lines: NotRequired[Nullable[int]] - mimetype: NotRequired[Nullable[str]] - signature: NotRequired[Nullable[str]] - - -class RetrieveFileOut(BaseModel): - id: str - r"""The unique identifier of the file.""" - - object: str - r"""The object type, which is always \"file\".""" - - size_bytes: Annotated[int, pydantic.Field(alias="bytes")] - r"""The size of the file, in bytes.""" - - created_at: int - r"""The UNIX timestamp (in seconds) of the event.""" - - filename: str - r"""The name of the uploaded file.""" - - purpose: FilePurpose - - sample_type: SampleType - - source: Source - - deleted: bool - - num_lines: OptionalNullable[int] = UNSET - - mimetype: OptionalNullable[str] = UNSET - - signature: OptionalNullable[str] = UNSET - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = ["num_lines", "mimetype", "signature"] - nullable_fields = ["num_lines", "mimetype", "signature"] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/sampletype.py b/src/mistralai/models/sampletype.py deleted file mode 100644 index efb43e9b..00000000 --- a/src/mistralai/models/sampletype.py +++ /dev/null @@ -1,17 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import UnrecognizedStr -from typing import Literal, Union - - -SampleType = Union[ - Literal[ - "pretrain", - "instruct", - "batch_request", - "batch_result", - "batch_error", - ], - UnrecognizedStr, -] diff --git a/src/mistralai/models/sdkerror.py b/src/mistralai/models/sdkerror.py deleted file mode 100644 index 65c45cf1..00000000 --- a/src/mistralai/models/sdkerror.py +++ /dev/null @@ -1,40 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -import httpx -from typing import Optional -from dataclasses import dataclass - -from mistralai.models import MistralError - -MAX_MESSAGE_LEN = 10_000 - - -@dataclass(unsafe_hash=True) -class SDKError(MistralError): - """The fallback error class if no more specific error class is matched.""" - - def __init__( - self, message: str, raw_response: httpx.Response, body: Optional[str] = None - ): - body_display = body or raw_response.text or '""' - - if message: - message += ": " - message += f"Status {raw_response.status_code}" - - headers = raw_response.headers - content_type = headers.get("content-type", '""') - if content_type != "application/json": - if " " in content_type: - content_type = f'"{content_type}"' - message += f" Content-Type {content_type}" - - if len(body_display) > MAX_MESSAGE_LEN: - truncated = body_display[:MAX_MESSAGE_LEN] - remaining = len(body_display) - MAX_MESSAGE_LEN - body_display = f"{truncated}...and {remaining} more chars" - - message += f". Body: {body_display}" - message = message.strip() - - super().__init__(message, raw_response, body) diff --git a/src/mistralai/models/security.py b/src/mistralai/models/security.py deleted file mode 100644 index cf05ba8f..00000000 --- a/src/mistralai/models/security.py +++ /dev/null @@ -1,25 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel -from mistralai.utils import FieldMetadata, SecurityMetadata -from typing import Optional -from typing_extensions import Annotated, NotRequired, TypedDict - - -class SecurityTypedDict(TypedDict): - api_key: NotRequired[str] - - -class Security(BaseModel): - api_key: Annotated[ - Optional[str], - FieldMetadata( - security=SecurityMetadata( - scheme=True, - scheme_type="http", - sub_type="bearer", - field_name="Authorization", - ) - ), - ] = None diff --git a/src/mistralai/models/shareenum.py b/src/mistralai/models/shareenum.py deleted file mode 100644 index 634ba4b7..00000000 --- a/src/mistralai/models/shareenum.py +++ /dev/null @@ -1,14 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import UnrecognizedStr -from typing import Literal, Union - - -ShareEnum = Union[ - Literal[ - "Viewer", - "Editor", - ], - UnrecognizedStr, -] diff --git a/src/mistralai/models/sharingdelete.py b/src/mistralai/models/sharingdelete.py deleted file mode 100644 index ebcdbab5..00000000 --- a/src/mistralai/models/sharingdelete.py +++ /dev/null @@ -1,55 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .entitytype import EntityType -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from pydantic import model_serializer -from typing_extensions import NotRequired, TypedDict - - -class SharingDeleteTypedDict(TypedDict): - share_with_uuid: str - r"""The id of the entity (user, workspace or organization) to share with""" - share_with_type: EntityType - r"""The type of entity, used to share a library.""" - org_id: NotRequired[Nullable[str]] - - -class SharingDelete(BaseModel): - share_with_uuid: str - r"""The id of the entity (user, workspace or organization) to share with""" - - share_with_type: EntityType - r"""The type of entity, used to share a library.""" - - org_id: OptionalNullable[str] = UNSET - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = ["org_id"] - nullable_fields = ["org_id"] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/sharingin.py b/src/mistralai/models/sharingin.py deleted file mode 100644 index f7bb89ca..00000000 --- a/src/mistralai/models/sharingin.py +++ /dev/null @@ -1,59 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .entitytype import EntityType -from .shareenum import ShareEnum -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from pydantic import model_serializer -from typing_extensions import NotRequired, TypedDict - - -class SharingInTypedDict(TypedDict): - level: ShareEnum - share_with_uuid: str - r"""The id of the entity (user, workspace or organization) to share with""" - share_with_type: EntityType - r"""The type of entity, used to share a library.""" - org_id: NotRequired[Nullable[str]] - - -class SharingIn(BaseModel): - level: ShareEnum - - share_with_uuid: str - r"""The id of the entity (user, workspace or organization) to share with""" - - share_with_type: EntityType - r"""The type of entity, used to share a library.""" - - org_id: OptionalNullable[str] = UNSET - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = ["org_id"] - nullable_fields = ["org_id"] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/sharingout.py b/src/mistralai/models/sharingout.py deleted file mode 100644 index 12455818..00000000 --- a/src/mistralai/models/sharingout.py +++ /dev/null @@ -1,59 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from pydantic import model_serializer -from typing_extensions import NotRequired, TypedDict - - -class SharingOutTypedDict(TypedDict): - library_id: str - org_id: str - role: str - share_with_type: str - share_with_uuid: Nullable[str] - user_id: NotRequired[Nullable[str]] - - -class SharingOut(BaseModel): - library_id: str - - org_id: str - - role: str - - share_with_type: str - - share_with_uuid: Nullable[str] - - user_id: OptionalNullable[str] = UNSET - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = ["user_id"] - nullable_fields = ["user_id", "share_with_uuid"] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/source.py b/src/mistralai/models/source.py deleted file mode 100644 index cc3abce2..00000000 --- a/src/mistralai/models/source.py +++ /dev/null @@ -1,15 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import UnrecognizedStr -from typing import Literal, Union - - -Source = Union[ - Literal[ - "upload", - "repository", - "mistral", - ], - UnrecognizedStr, -] diff --git a/src/mistralai/models/ssetypes.py b/src/mistralai/models/ssetypes.py deleted file mode 100644 index 796f0327..00000000 --- a/src/mistralai/models/ssetypes.py +++ /dev/null @@ -1,19 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from typing import Literal - - -SSETypes = Literal[ - "conversation.response.started", - "conversation.response.done", - "conversation.response.error", - "message.output.delta", - "tool.execution.started", - "tool.execution.delta", - "tool.execution.done", - "agent.handoff.started", - "agent.handoff.done", - "function.call.delta", -] -r"""Server side events sent when streaming a conversation response.""" diff --git a/src/mistralai/models/systemmessage.py b/src/mistralai/models/systemmessage.py deleted file mode 100644 index 2b34607b..00000000 --- a/src/mistralai/models/systemmessage.py +++ /dev/null @@ -1,35 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .systemmessagecontentchunks import ( - SystemMessageContentChunks, - SystemMessageContentChunksTypedDict, -) -from mistralai.types import BaseModel -from typing import List, Literal, Optional, Union -from typing_extensions import NotRequired, TypeAliasType, TypedDict - - -SystemMessageContentTypedDict = TypeAliasType( - "SystemMessageContentTypedDict", - Union[str, List[SystemMessageContentChunksTypedDict]], -) - - -SystemMessageContent = TypeAliasType( - "SystemMessageContent", Union[str, List[SystemMessageContentChunks]] -) - - -Role = Literal["system",] - - -class SystemMessageTypedDict(TypedDict): - content: SystemMessageContentTypedDict - role: NotRequired[Role] - - -class SystemMessage(BaseModel): - content: SystemMessageContent - - role: Optional[Role] = "system" diff --git a/src/mistralai/models/systemmessagecontentchunks.py b/src/mistralai/models/systemmessagecontentchunks.py deleted file mode 100644 index a1f04d1e..00000000 --- a/src/mistralai/models/systemmessagecontentchunks.py +++ /dev/null @@ -1,21 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .textchunk import TextChunk, TextChunkTypedDict -from .thinkchunk import ThinkChunk, ThinkChunkTypedDict -from mistralai.utils import get_discriminator -from pydantic import Discriminator, Tag -from typing import Union -from typing_extensions import Annotated, TypeAliasType - - -SystemMessageContentChunksTypedDict = TypeAliasType( - "SystemMessageContentChunksTypedDict", - Union[TextChunkTypedDict, ThinkChunkTypedDict], -) - - -SystemMessageContentChunks = Annotated[ - Union[Annotated[TextChunk, Tag("text")], Annotated[ThinkChunk, Tag("thinking")]], - Discriminator(lambda m: get_discriminator(m, "type", "type")), -] diff --git a/src/mistralai/models/textchunk.py b/src/mistralai/models/textchunk.py deleted file mode 100644 index 6052686e..00000000 --- a/src/mistralai/models/textchunk.py +++ /dev/null @@ -1,20 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel -from typing import Literal, Optional -from typing_extensions import NotRequired, TypedDict - - -TextChunkType = Literal["text",] - - -class TextChunkTypedDict(TypedDict): - text: str - type: NotRequired[TextChunkType] - - -class TextChunk(BaseModel): - text: str - - type: Optional[TextChunkType] = "text" diff --git a/src/mistralai/models/thinkchunk.py b/src/mistralai/models/thinkchunk.py deleted file mode 100644 index 627ae488..00000000 --- a/src/mistralai/models/thinkchunk.py +++ /dev/null @@ -1,35 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .referencechunk import ReferenceChunk, ReferenceChunkTypedDict -from .textchunk import TextChunk, TextChunkTypedDict -from mistralai.types import BaseModel -from typing import List, Literal, Optional, Union -from typing_extensions import NotRequired, TypeAliasType, TypedDict - - -ThinkingTypedDict = TypeAliasType( - "ThinkingTypedDict", Union[ReferenceChunkTypedDict, TextChunkTypedDict] -) - - -Thinking = TypeAliasType("Thinking", Union[ReferenceChunk, TextChunk]) - - -ThinkChunkType = Literal["thinking",] - - -class ThinkChunkTypedDict(TypedDict): - thinking: List[ThinkingTypedDict] - closed: NotRequired[bool] - r"""Whether the thinking chunk is closed or not. Currently only used for prefixing.""" - type: NotRequired[ThinkChunkType] - - -class ThinkChunk(BaseModel): - thinking: List[Thinking] - - closed: Optional[bool] = None - r"""Whether the thinking chunk is closed or not. Currently only used for prefixing.""" - - type: Optional[ThinkChunkType] = "thinking" diff --git a/src/mistralai/models/timestampgranularity.py b/src/mistralai/models/timestampgranularity.py deleted file mode 100644 index 5bda890f..00000000 --- a/src/mistralai/models/timestampgranularity.py +++ /dev/null @@ -1,10 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from typing import Literal - - -TimestampGranularity = Literal[ - "segment", - "word", -] diff --git a/src/mistralai/models/tool.py b/src/mistralai/models/tool.py deleted file mode 100644 index b14a6adf..00000000 --- a/src/mistralai/models/tool.py +++ /dev/null @@ -1,19 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .function import Function, FunctionTypedDict -from .tooltypes import ToolTypes -from mistralai.types import BaseModel -from typing import Optional -from typing_extensions import NotRequired, TypedDict - - -class ToolTypedDict(TypedDict): - function: FunctionTypedDict - type: NotRequired[ToolTypes] - - -class Tool(BaseModel): - function: Function - - type: Optional[ToolTypes] = None diff --git a/src/mistralai/models/toolcall.py b/src/mistralai/models/toolcall.py deleted file mode 100644 index 1f367924..00000000 --- a/src/mistralai/models/toolcall.py +++ /dev/null @@ -1,25 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .functioncall import FunctionCall, FunctionCallTypedDict -from .tooltypes import ToolTypes -from mistralai.types import BaseModel -from typing import Optional -from typing_extensions import NotRequired, TypedDict - - -class ToolCallTypedDict(TypedDict): - function: FunctionCallTypedDict - id: NotRequired[str] - type: NotRequired[ToolTypes] - index: NotRequired[int] - - -class ToolCall(BaseModel): - function: FunctionCall - - id: Optional[str] = "null" - - type: Optional[ToolTypes] = None - - index: Optional[int] = 0 diff --git a/src/mistralai/models/toolchoice.py b/src/mistralai/models/toolchoice.py deleted file mode 100644 index f8e1b486..00000000 --- a/src/mistralai/models/toolchoice.py +++ /dev/null @@ -1,25 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .functionname import FunctionName, FunctionNameTypedDict -from .tooltypes import ToolTypes -from mistralai.types import BaseModel -from typing import Optional -from typing_extensions import NotRequired, TypedDict - - -class ToolChoiceTypedDict(TypedDict): - r"""ToolChoice is either a ToolChoiceEnum or a ToolChoice""" - - function: FunctionNameTypedDict - r"""this restriction of `Function` is used to select a specific function to call""" - type: NotRequired[ToolTypes] - - -class ToolChoice(BaseModel): - r"""ToolChoice is either a ToolChoiceEnum or a ToolChoice""" - - function: FunctionName - r"""this restriction of `Function` is used to select a specific function to call""" - - type: Optional[ToolTypes] = None diff --git a/src/mistralai/models/toolchoiceenum.py b/src/mistralai/models/toolchoiceenum.py deleted file mode 100644 index 01f6f677..00000000 --- a/src/mistralai/models/toolchoiceenum.py +++ /dev/null @@ -1,12 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from typing import Literal - - -ToolChoiceEnum = Literal[ - "auto", - "none", - "any", - "required", -] diff --git a/src/mistralai/models/toolexecutiondeltaevent.py b/src/mistralai/models/toolexecutiondeltaevent.py deleted file mode 100644 index 4fca46a8..00000000 --- a/src/mistralai/models/toolexecutiondeltaevent.py +++ /dev/null @@ -1,44 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .builtinconnectors import BuiltInConnectors -from datetime import datetime -from mistralai.types import BaseModel -from typing import Literal, Optional, Union -from typing_extensions import NotRequired, TypeAliasType, TypedDict - - -ToolExecutionDeltaEventType = Literal["tool.execution.delta",] - - -ToolExecutionDeltaEventNameTypedDict = TypeAliasType( - "ToolExecutionDeltaEventNameTypedDict", Union[BuiltInConnectors, str] -) - - -ToolExecutionDeltaEventName = TypeAliasType( - "ToolExecutionDeltaEventName", Union[BuiltInConnectors, str] -) - - -class ToolExecutionDeltaEventTypedDict(TypedDict): - id: str - name: ToolExecutionDeltaEventNameTypedDict - arguments: str - type: NotRequired[ToolExecutionDeltaEventType] - created_at: NotRequired[datetime] - output_index: NotRequired[int] - - -class ToolExecutionDeltaEvent(BaseModel): - id: str - - name: ToolExecutionDeltaEventName - - arguments: str - - type: Optional[ToolExecutionDeltaEventType] = "tool.execution.delta" - - created_at: Optional[datetime] = None - - output_index: Optional[int] = 0 diff --git a/src/mistralai/models/toolexecutiondoneevent.py b/src/mistralai/models/toolexecutiondoneevent.py deleted file mode 100644 index 621d5571..00000000 --- a/src/mistralai/models/toolexecutiondoneevent.py +++ /dev/null @@ -1,44 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .builtinconnectors import BuiltInConnectors -from datetime import datetime -from mistralai.types import BaseModel -from typing import Any, Dict, Literal, Optional, Union -from typing_extensions import NotRequired, TypeAliasType, TypedDict - - -ToolExecutionDoneEventType = Literal["tool.execution.done",] - - -ToolExecutionDoneEventNameTypedDict = TypeAliasType( - "ToolExecutionDoneEventNameTypedDict", Union[BuiltInConnectors, str] -) - - -ToolExecutionDoneEventName = TypeAliasType( - "ToolExecutionDoneEventName", Union[BuiltInConnectors, str] -) - - -class ToolExecutionDoneEventTypedDict(TypedDict): - id: str - name: ToolExecutionDoneEventNameTypedDict - type: NotRequired[ToolExecutionDoneEventType] - created_at: NotRequired[datetime] - output_index: NotRequired[int] - info: NotRequired[Dict[str, Any]] - - -class ToolExecutionDoneEvent(BaseModel): - id: str - - name: ToolExecutionDoneEventName - - type: Optional[ToolExecutionDoneEventType] = "tool.execution.done" - - created_at: Optional[datetime] = None - - output_index: Optional[int] = 0 - - info: Optional[Dict[str, Any]] = None diff --git a/src/mistralai/models/toolexecutionentry.py b/src/mistralai/models/toolexecutionentry.py deleted file mode 100644 index 9f70a63b..00000000 --- a/src/mistralai/models/toolexecutionentry.py +++ /dev/null @@ -1,80 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .builtinconnectors import BuiltInConnectors -from datetime import datetime -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from pydantic import model_serializer -from typing import Any, Dict, Literal, Optional, Union -from typing_extensions import NotRequired, TypeAliasType, TypedDict - - -ToolExecutionEntryObject = Literal["entry",] - - -ToolExecutionEntryType = Literal["tool.execution",] - - -NameTypedDict = TypeAliasType("NameTypedDict", Union[BuiltInConnectors, str]) - - -Name = TypeAliasType("Name", Union[BuiltInConnectors, str]) - - -class ToolExecutionEntryTypedDict(TypedDict): - name: NameTypedDict - arguments: str - object: NotRequired[ToolExecutionEntryObject] - type: NotRequired[ToolExecutionEntryType] - created_at: NotRequired[datetime] - completed_at: NotRequired[Nullable[datetime]] - id: NotRequired[str] - info: NotRequired[Dict[str, Any]] - - -class ToolExecutionEntry(BaseModel): - name: Name - - arguments: str - - object: Optional[ToolExecutionEntryObject] = "entry" - - type: Optional[ToolExecutionEntryType] = "tool.execution" - - created_at: Optional[datetime] = None - - completed_at: OptionalNullable[datetime] = UNSET - - id: Optional[str] = None - - info: Optional[Dict[str, Any]] = None - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = ["object", "type", "created_at", "completed_at", "id", "info"] - nullable_fields = ["completed_at"] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/toolexecutionstartedevent.py b/src/mistralai/models/toolexecutionstartedevent.py deleted file mode 100644 index 80dd5e97..00000000 --- a/src/mistralai/models/toolexecutionstartedevent.py +++ /dev/null @@ -1,44 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .builtinconnectors import BuiltInConnectors -from datetime import datetime -from mistralai.types import BaseModel -from typing import Literal, Optional, Union -from typing_extensions import NotRequired, TypeAliasType, TypedDict - - -ToolExecutionStartedEventType = Literal["tool.execution.started",] - - -ToolExecutionStartedEventNameTypedDict = TypeAliasType( - "ToolExecutionStartedEventNameTypedDict", Union[BuiltInConnectors, str] -) - - -ToolExecutionStartedEventName = TypeAliasType( - "ToolExecutionStartedEventName", Union[BuiltInConnectors, str] -) - - -class ToolExecutionStartedEventTypedDict(TypedDict): - id: str - name: ToolExecutionStartedEventNameTypedDict - arguments: str - type: NotRequired[ToolExecutionStartedEventType] - created_at: NotRequired[datetime] - output_index: NotRequired[int] - - -class ToolExecutionStartedEvent(BaseModel): - id: str - - name: ToolExecutionStartedEventName - - arguments: str - - type: Optional[ToolExecutionStartedEventType] = "tool.execution.started" - - created_at: Optional[datetime] = None - - output_index: Optional[int] = 0 diff --git a/src/mistralai/models/toolfilechunk.py b/src/mistralai/models/toolfilechunk.py deleted file mode 100644 index 87bc822c..00000000 --- a/src/mistralai/models/toolfilechunk.py +++ /dev/null @@ -1,69 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .builtinconnectors import BuiltInConnectors -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from pydantic import model_serializer -from typing import Literal, Optional, Union -from typing_extensions import NotRequired, TypeAliasType, TypedDict - - -ToolFileChunkType = Literal["tool_file",] - - -ToolFileChunkToolTypedDict = TypeAliasType( - "ToolFileChunkToolTypedDict", Union[BuiltInConnectors, str] -) - - -ToolFileChunkTool = TypeAliasType("ToolFileChunkTool", Union[BuiltInConnectors, str]) - - -class ToolFileChunkTypedDict(TypedDict): - tool: ToolFileChunkToolTypedDict - file_id: str - type: NotRequired[ToolFileChunkType] - file_name: NotRequired[Nullable[str]] - file_type: NotRequired[Nullable[str]] - - -class ToolFileChunk(BaseModel): - tool: ToolFileChunkTool - - file_id: str - - type: Optional[ToolFileChunkType] = "tool_file" - - file_name: OptionalNullable[str] = UNSET - - file_type: OptionalNullable[str] = UNSET - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = ["type", "file_name", "file_type"] - nullable_fields = ["file_name", "file_type"] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/toolmessage.py b/src/mistralai/models/toolmessage.py deleted file mode 100644 index ef917c43..00000000 --- a/src/mistralai/models/toolmessage.py +++ /dev/null @@ -1,66 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .contentchunk import ContentChunk, ContentChunkTypedDict -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from pydantic import model_serializer -from typing import List, Literal, Optional, Union -from typing_extensions import NotRequired, TypeAliasType, TypedDict - - -ToolMessageContentTypedDict = TypeAliasType( - "ToolMessageContentTypedDict", Union[str, List[ContentChunkTypedDict]] -) - - -ToolMessageContent = TypeAliasType("ToolMessageContent", Union[str, List[ContentChunk]]) - - -ToolMessageRole = Literal["tool",] - - -class ToolMessageTypedDict(TypedDict): - content: Nullable[ToolMessageContentTypedDict] - tool_call_id: NotRequired[Nullable[str]] - name: NotRequired[Nullable[str]] - role: NotRequired[ToolMessageRole] - - -class ToolMessage(BaseModel): - content: Nullable[ToolMessageContent] - - tool_call_id: OptionalNullable[str] = UNSET - - name: OptionalNullable[str] = UNSET - - role: Optional[ToolMessageRole] = "tool" - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = ["tool_call_id", "name", "role"] - nullable_fields = ["content", "tool_call_id", "name"] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/toolreferencechunk.py b/src/mistralai/models/toolreferencechunk.py deleted file mode 100644 index 2a751cb0..00000000 --- a/src/mistralai/models/toolreferencechunk.py +++ /dev/null @@ -1,74 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .builtinconnectors import BuiltInConnectors -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from pydantic import model_serializer -from typing import Literal, Optional, Union -from typing_extensions import NotRequired, TypeAliasType, TypedDict - - -ToolReferenceChunkType = Literal["tool_reference",] - - -ToolReferenceChunkToolTypedDict = TypeAliasType( - "ToolReferenceChunkToolTypedDict", Union[BuiltInConnectors, str] -) - - -ToolReferenceChunkTool = TypeAliasType( - "ToolReferenceChunkTool", Union[BuiltInConnectors, str] -) - - -class ToolReferenceChunkTypedDict(TypedDict): - tool: ToolReferenceChunkToolTypedDict - title: str - type: NotRequired[ToolReferenceChunkType] - url: NotRequired[Nullable[str]] - favicon: NotRequired[Nullable[str]] - description: NotRequired[Nullable[str]] - - -class ToolReferenceChunk(BaseModel): - tool: ToolReferenceChunkTool - - title: str - - type: Optional[ToolReferenceChunkType] = "tool_reference" - - url: OptionalNullable[str] = UNSET - - favicon: OptionalNullable[str] = UNSET - - description: OptionalNullable[str] = UNSET - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = ["type", "url", "favicon", "description"] - nullable_fields = ["url", "favicon", "description"] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/tooltypes.py b/src/mistralai/models/tooltypes.py deleted file mode 100644 index f54893c2..00000000 --- a/src/mistralai/models/tooltypes.py +++ /dev/null @@ -1,8 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import UnrecognizedStr -from typing import Literal, Union - - -ToolTypes = Union[Literal["function",], UnrecognizedStr] diff --git a/src/mistralai/models/trainingfile.py b/src/mistralai/models/trainingfile.py deleted file mode 100644 index 99bd49dd..00000000 --- a/src/mistralai/models/trainingfile.py +++ /dev/null @@ -1,17 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel -from typing import Optional -from typing_extensions import NotRequired, TypedDict - - -class TrainingFileTypedDict(TypedDict): - file_id: str - weight: NotRequired[float] - - -class TrainingFile(BaseModel): - file_id: str - - weight: Optional[float] = 1 diff --git a/src/mistralai/models/transcriptionresponse.py b/src/mistralai/models/transcriptionresponse.py deleted file mode 100644 index 54a98a5b..00000000 --- a/src/mistralai/models/transcriptionresponse.py +++ /dev/null @@ -1,79 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .transcriptionsegmentchunk import ( - TranscriptionSegmentChunk, - TranscriptionSegmentChunkTypedDict, -) -from .usageinfo import UsageInfo, UsageInfoTypedDict -from mistralai.types import BaseModel, Nullable, UNSET_SENTINEL -import pydantic -from pydantic import ConfigDict, model_serializer -from typing import Any, Dict, List, Optional -from typing_extensions import NotRequired, TypedDict - - -class TranscriptionResponseTypedDict(TypedDict): - model: str - text: str - usage: UsageInfoTypedDict - language: Nullable[str] - segments: NotRequired[List[TranscriptionSegmentChunkTypedDict]] - - -class TranscriptionResponse(BaseModel): - model_config = ConfigDict( - populate_by_name=True, arbitrary_types_allowed=True, extra="allow" - ) - __pydantic_extra__: Dict[str, Any] = pydantic.Field(init=False) - - model: str - - text: str - - usage: UsageInfo - - language: Nullable[str] - - segments: Optional[List[TranscriptionSegmentChunk]] = None - - @property - def additional_properties(self): - return self.__pydantic_extra__ - - @additional_properties.setter - def additional_properties(self, value): - self.__pydantic_extra__ = value # pyright: ignore[reportIncompatibleVariableOverride] - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = ["segments"] - nullable_fields = ["language"] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - for k, v in serialized.items(): - m[k] = v - - return m diff --git a/src/mistralai/models/transcriptionsegmentchunk.py b/src/mistralai/models/transcriptionsegmentchunk.py deleted file mode 100644 index 40ad20b3..00000000 --- a/src/mistralai/models/transcriptionsegmentchunk.py +++ /dev/null @@ -1,80 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -import pydantic -from pydantic import ConfigDict, model_serializer -from typing import Any, Dict, Literal, Optional -from typing_extensions import NotRequired, TypedDict - - -Type = Literal["transcription_segment",] - - -class TranscriptionSegmentChunkTypedDict(TypedDict): - text: str - start: float - end: float - score: NotRequired[Nullable[float]] - speaker_id: NotRequired[Nullable[str]] - type: NotRequired[Type] - - -class TranscriptionSegmentChunk(BaseModel): - model_config = ConfigDict( - populate_by_name=True, arbitrary_types_allowed=True, extra="allow" - ) - __pydantic_extra__: Dict[str, Any] = pydantic.Field(init=False) - - text: str - - start: float - - end: float - - score: OptionalNullable[float] = UNSET - - speaker_id: OptionalNullable[str] = UNSET - - type: Optional[Type] = "transcription_segment" - - @property - def additional_properties(self): - return self.__pydantic_extra__ - - @additional_properties.setter - def additional_properties(self, value): - self.__pydantic_extra__ = value # pyright: ignore[reportIncompatibleVariableOverride] - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = ["score", "speaker_id", "type"] - nullable_fields = ["score", "speaker_id"] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - for k, v in serialized.items(): - m[k] = v - - return m diff --git a/src/mistralai/models/transcriptionstreamdone.py b/src/mistralai/models/transcriptionstreamdone.py deleted file mode 100644 index e1b1ab3d..00000000 --- a/src/mistralai/models/transcriptionstreamdone.py +++ /dev/null @@ -1,85 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .transcriptionsegmentchunk import ( - TranscriptionSegmentChunk, - TranscriptionSegmentChunkTypedDict, -) -from .usageinfo import UsageInfo, UsageInfoTypedDict -from mistralai.types import BaseModel, Nullable, UNSET_SENTINEL -import pydantic -from pydantic import ConfigDict, model_serializer -from typing import Any, Dict, List, Literal, Optional -from typing_extensions import NotRequired, TypedDict - - -TranscriptionStreamDoneType = Literal["transcription.done",] - - -class TranscriptionStreamDoneTypedDict(TypedDict): - model: str - text: str - usage: UsageInfoTypedDict - language: Nullable[str] - segments: NotRequired[List[TranscriptionSegmentChunkTypedDict]] - type: NotRequired[TranscriptionStreamDoneType] - - -class TranscriptionStreamDone(BaseModel): - model_config = ConfigDict( - populate_by_name=True, arbitrary_types_allowed=True, extra="allow" - ) - __pydantic_extra__: Dict[str, Any] = pydantic.Field(init=False) - - model: str - - text: str - - usage: UsageInfo - - language: Nullable[str] - - segments: Optional[List[TranscriptionSegmentChunk]] = None - - type: Optional[TranscriptionStreamDoneType] = "transcription.done" - - @property - def additional_properties(self): - return self.__pydantic_extra__ - - @additional_properties.setter - def additional_properties(self, value): - self.__pydantic_extra__ = value # pyright: ignore[reportIncompatibleVariableOverride] - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = ["segments", "type"] - nullable_fields = ["language"] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - for k, v in serialized.items(): - m[k] = v - - return m diff --git a/src/mistralai/models/transcriptionstreamevents.py b/src/mistralai/models/transcriptionstreamevents.py deleted file mode 100644 index 8207c03f..00000000 --- a/src/mistralai/models/transcriptionstreamevents.py +++ /dev/null @@ -1,58 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .transcriptionstreamdone import ( - TranscriptionStreamDone, - TranscriptionStreamDoneTypedDict, -) -from .transcriptionstreameventtypes import TranscriptionStreamEventTypes -from .transcriptionstreamlanguage import ( - TranscriptionStreamLanguage, - TranscriptionStreamLanguageTypedDict, -) -from .transcriptionstreamsegmentdelta import ( - TranscriptionStreamSegmentDelta, - TranscriptionStreamSegmentDeltaTypedDict, -) -from .transcriptionstreamtextdelta import ( - TranscriptionStreamTextDelta, - TranscriptionStreamTextDeltaTypedDict, -) -from mistralai.types import BaseModel -from mistralai.utils import get_discriminator -from pydantic import Discriminator, Tag -from typing import Union -from typing_extensions import Annotated, TypeAliasType, TypedDict - - -TranscriptionStreamEventsDataTypedDict = TypeAliasType( - "TranscriptionStreamEventsDataTypedDict", - Union[ - TranscriptionStreamTextDeltaTypedDict, - TranscriptionStreamLanguageTypedDict, - TranscriptionStreamSegmentDeltaTypedDict, - TranscriptionStreamDoneTypedDict, - ], -) - - -TranscriptionStreamEventsData = Annotated[ - Union[ - Annotated[TranscriptionStreamDone, Tag("transcription.done")], - Annotated[TranscriptionStreamLanguage, Tag("transcription.language")], - Annotated[TranscriptionStreamSegmentDelta, Tag("transcription.segment")], - Annotated[TranscriptionStreamTextDelta, Tag("transcription.text.delta")], - ], - Discriminator(lambda m: get_discriminator(m, "type", "type")), -] - - -class TranscriptionStreamEventsTypedDict(TypedDict): - event: TranscriptionStreamEventTypes - data: TranscriptionStreamEventsDataTypedDict - - -class TranscriptionStreamEvents(BaseModel): - event: TranscriptionStreamEventTypes - - data: TranscriptionStreamEventsData diff --git a/src/mistralai/models/transcriptionstreameventtypes.py b/src/mistralai/models/transcriptionstreameventtypes.py deleted file mode 100644 index 4a910f0a..00000000 --- a/src/mistralai/models/transcriptionstreameventtypes.py +++ /dev/null @@ -1,12 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from typing import Literal - - -TranscriptionStreamEventTypes = Literal[ - "transcription.language", - "transcription.segment", - "transcription.text.delta", - "transcription.done", -] diff --git a/src/mistralai/models/transcriptionstreamlanguage.py b/src/mistralai/models/transcriptionstreamlanguage.py deleted file mode 100644 index 15b75144..00000000 --- a/src/mistralai/models/transcriptionstreamlanguage.py +++ /dev/null @@ -1,35 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel -import pydantic -from pydantic import ConfigDict -from typing import Any, Dict, Literal, Optional -from typing_extensions import NotRequired, TypedDict - - -TranscriptionStreamLanguageType = Literal["transcription.language",] - - -class TranscriptionStreamLanguageTypedDict(TypedDict): - audio_language: str - type: NotRequired[TranscriptionStreamLanguageType] - - -class TranscriptionStreamLanguage(BaseModel): - model_config = ConfigDict( - populate_by_name=True, arbitrary_types_allowed=True, extra="allow" - ) - __pydantic_extra__: Dict[str, Any] = pydantic.Field(init=False) - - audio_language: str - - type: Optional[TranscriptionStreamLanguageType] = "transcription.language" - - @property - def additional_properties(self): - return self.__pydantic_extra__ - - @additional_properties.setter - def additional_properties(self, value): - self.__pydantic_extra__ = value # pyright: ignore[reportIncompatibleVariableOverride] diff --git a/src/mistralai/models/transcriptionstreamsegmentdelta.py b/src/mistralai/models/transcriptionstreamsegmentdelta.py deleted file mode 100644 index 550c83e7..00000000 --- a/src/mistralai/models/transcriptionstreamsegmentdelta.py +++ /dev/null @@ -1,77 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -import pydantic -from pydantic import ConfigDict, model_serializer -from typing import Any, Dict, Literal, Optional -from typing_extensions import NotRequired, TypedDict - - -TranscriptionStreamSegmentDeltaType = Literal["transcription.segment",] - - -class TranscriptionStreamSegmentDeltaTypedDict(TypedDict): - text: str - start: float - end: float - speaker_id: NotRequired[Nullable[str]] - type: NotRequired[TranscriptionStreamSegmentDeltaType] - - -class TranscriptionStreamSegmentDelta(BaseModel): - model_config = ConfigDict( - populate_by_name=True, arbitrary_types_allowed=True, extra="allow" - ) - __pydantic_extra__: Dict[str, Any] = pydantic.Field(init=False) - - text: str - - start: float - - end: float - - speaker_id: OptionalNullable[str] = UNSET - - type: Optional[TranscriptionStreamSegmentDeltaType] = "transcription.segment" - - @property - def additional_properties(self): - return self.__pydantic_extra__ - - @additional_properties.setter - def additional_properties(self, value): - self.__pydantic_extra__ = value # pyright: ignore[reportIncompatibleVariableOverride] - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = ["speaker_id", "type"] - nullable_fields = ["speaker_id"] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - for k, v in serialized.items(): - m[k] = v - - return m diff --git a/src/mistralai/models/transcriptionstreamtextdelta.py b/src/mistralai/models/transcriptionstreamtextdelta.py deleted file mode 100644 index daee151f..00000000 --- a/src/mistralai/models/transcriptionstreamtextdelta.py +++ /dev/null @@ -1,35 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel -import pydantic -from pydantic import ConfigDict -from typing import Any, Dict, Literal, Optional -from typing_extensions import NotRequired, TypedDict - - -TranscriptionStreamTextDeltaType = Literal["transcription.text.delta",] - - -class TranscriptionStreamTextDeltaTypedDict(TypedDict): - text: str - type: NotRequired[TranscriptionStreamTextDeltaType] - - -class TranscriptionStreamTextDelta(BaseModel): - model_config = ConfigDict( - populate_by_name=True, arbitrary_types_allowed=True, extra="allow" - ) - __pydantic_extra__: Dict[str, Any] = pydantic.Field(init=False) - - text: str - - type: Optional[TranscriptionStreamTextDeltaType] = "transcription.text.delta" - - @property - def additional_properties(self): - return self.__pydantic_extra__ - - @additional_properties.setter - def additional_properties(self, value): - self.__pydantic_extra__ = value # pyright: ignore[reportIncompatibleVariableOverride] diff --git a/src/mistralai/models/unarchiveftmodelout.py b/src/mistralai/models/unarchiveftmodelout.py deleted file mode 100644 index 55c0ea8a..00000000 --- a/src/mistralai/models/unarchiveftmodelout.py +++ /dev/null @@ -1,23 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel -from typing import Literal, Optional -from typing_extensions import NotRequired, TypedDict - - -UnarchiveFTModelOutObject = Literal["model",] - - -class UnarchiveFTModelOutTypedDict(TypedDict): - id: str - object: NotRequired[UnarchiveFTModelOutObject] - archived: NotRequired[bool] - - -class UnarchiveFTModelOut(BaseModel): - id: str - - object: Optional[UnarchiveFTModelOutObject] = "model" - - archived: Optional[bool] = False diff --git a/src/mistralai/models/updateftmodelin.py b/src/mistralai/models/updateftmodelin.py deleted file mode 100644 index 1bd0eaf2..00000000 --- a/src/mistralai/models/updateftmodelin.py +++ /dev/null @@ -1,47 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from pydantic import model_serializer -from typing_extensions import NotRequired, TypedDict - - -class UpdateFTModelInTypedDict(TypedDict): - name: NotRequired[Nullable[str]] - description: NotRequired[Nullable[str]] - - -class UpdateFTModelIn(BaseModel): - name: OptionalNullable[str] = UNSET - - description: OptionalNullable[str] = UNSET - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = ["name", "description"] - nullable_fields = ["name", "description"] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/uploadfileout.py b/src/mistralai/models/uploadfileout.py deleted file mode 100644 index f235fdcd..00000000 --- a/src/mistralai/models/uploadfileout.py +++ /dev/null @@ -1,88 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .filepurpose import FilePurpose -from .sampletype import SampleType -from .source import Source -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -import pydantic -from pydantic import model_serializer -from typing_extensions import Annotated, NotRequired, TypedDict - - -class UploadFileOutTypedDict(TypedDict): - id: str - r"""The unique identifier of the file.""" - object: str - r"""The object type, which is always \"file\".""" - size_bytes: int - r"""The size of the file, in bytes.""" - created_at: int - r"""The UNIX timestamp (in seconds) of the event.""" - filename: str - r"""The name of the uploaded file.""" - purpose: FilePurpose - sample_type: SampleType - source: Source - num_lines: NotRequired[Nullable[int]] - mimetype: NotRequired[Nullable[str]] - signature: NotRequired[Nullable[str]] - - -class UploadFileOut(BaseModel): - id: str - r"""The unique identifier of the file.""" - - object: str - r"""The object type, which is always \"file\".""" - - size_bytes: Annotated[int, pydantic.Field(alias="bytes")] - r"""The size of the file, in bytes.""" - - created_at: int - r"""The UNIX timestamp (in seconds) of the event.""" - - filename: str - r"""The name of the uploaded file.""" - - purpose: FilePurpose - - sample_type: SampleType - - source: Source - - num_lines: OptionalNullable[int] = UNSET - - mimetype: OptionalNullable[str] = UNSET - - signature: OptionalNullable[str] = UNSET - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = ["num_lines", "mimetype", "signature"] - nullable_fields = ["num_lines", "mimetype", "signature"] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/usageinfo.py b/src/mistralai/models/usageinfo.py deleted file mode 100644 index cedad5c1..00000000 --- a/src/mistralai/models/usageinfo.py +++ /dev/null @@ -1,76 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -import pydantic -from pydantic import ConfigDict, model_serializer -from typing import Any, Dict, Optional -from typing_extensions import NotRequired, TypedDict - - -class UsageInfoTypedDict(TypedDict): - prompt_tokens: NotRequired[int] - completion_tokens: NotRequired[int] - total_tokens: NotRequired[int] - prompt_audio_seconds: NotRequired[Nullable[int]] - - -class UsageInfo(BaseModel): - model_config = ConfigDict( - populate_by_name=True, arbitrary_types_allowed=True, extra="allow" - ) - __pydantic_extra__: Dict[str, Any] = pydantic.Field(init=False) - - prompt_tokens: Optional[int] = 0 - - completion_tokens: Optional[int] = 0 - - total_tokens: Optional[int] = 0 - - prompt_audio_seconds: OptionalNullable[int] = UNSET - - @property - def additional_properties(self): - return self.__pydantic_extra__ - - @additional_properties.setter - def additional_properties(self, value): - self.__pydantic_extra__ = value # pyright: ignore[reportIncompatibleVariableOverride] - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = [ - "prompt_tokens", - "completion_tokens", - "total_tokens", - "prompt_audio_seconds", - ] - nullable_fields = ["prompt_audio_seconds"] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - for k, v in serialized.items(): - m[k] = v - - return m diff --git a/src/mistralai/models/usermessage.py b/src/mistralai/models/usermessage.py deleted file mode 100644 index 61590bed..00000000 --- a/src/mistralai/models/usermessage.py +++ /dev/null @@ -1,60 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .contentchunk import ContentChunk, ContentChunkTypedDict -from mistralai.types import BaseModel, Nullable, UNSET_SENTINEL -from pydantic import model_serializer -from typing import List, Literal, Optional, Union -from typing_extensions import NotRequired, TypeAliasType, TypedDict - - -UserMessageContentTypedDict = TypeAliasType( - "UserMessageContentTypedDict", Union[str, List[ContentChunkTypedDict]] -) - - -UserMessageContent = TypeAliasType("UserMessageContent", Union[str, List[ContentChunk]]) - - -UserMessageRole = Literal["user",] - - -class UserMessageTypedDict(TypedDict): - content: Nullable[UserMessageContentTypedDict] - role: NotRequired[UserMessageRole] - - -class UserMessage(BaseModel): - content: Nullable[UserMessageContent] - - role: Optional[UserMessageRole] = "user" - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = ["role"] - nullable_fields = ["content"] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/validationerror.py b/src/mistralai/models/validationerror.py deleted file mode 100644 index e971e016..00000000 --- a/src/mistralai/models/validationerror.py +++ /dev/null @@ -1,26 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel -from typing import List, Union -from typing_extensions import TypeAliasType, TypedDict - - -LocTypedDict = TypeAliasType("LocTypedDict", Union[str, int]) - - -Loc = TypeAliasType("Loc", Union[str, int]) - - -class ValidationErrorTypedDict(TypedDict): - loc: List[LocTypedDict] - msg: str - type: str - - -class ValidationError(BaseModel): - loc: List[Loc] - - msg: str - - type: str diff --git a/src/mistralai/models/wandbintegration.py b/src/mistralai/models/wandbintegration.py deleted file mode 100644 index 69053896..00000000 --- a/src/mistralai/models/wandbintegration.py +++ /dev/null @@ -1,66 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from pydantic import model_serializer -from typing import Literal, Optional -from typing_extensions import NotRequired, TypedDict - - -WandbIntegrationType = Literal["wandb",] - - -class WandbIntegrationTypedDict(TypedDict): - project: str - r"""The name of the project that the new run will be created under.""" - api_key: str - r"""The WandB API key to use for authentication.""" - type: NotRequired[WandbIntegrationType] - name: NotRequired[Nullable[str]] - r"""A display name to set for the run. If not set, will use the job ID as the name.""" - run_name: NotRequired[Nullable[str]] - - -class WandbIntegration(BaseModel): - project: str - r"""The name of the project that the new run will be created under.""" - - api_key: str - r"""The WandB API key to use for authentication.""" - - type: Optional[WandbIntegrationType] = "wandb" - - name: OptionalNullable[str] = UNSET - r"""A display name to set for the run. If not set, will use the job ID as the name.""" - - run_name: OptionalNullable[str] = UNSET - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = ["type", "name", "run_name"] - nullable_fields = ["name", "run_name"] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/wandbintegrationout.py b/src/mistralai/models/wandbintegrationout.py deleted file mode 100644 index f5a9ba80..00000000 --- a/src/mistralai/models/wandbintegrationout.py +++ /dev/null @@ -1,64 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL -from pydantic import model_serializer -from typing import Literal, Optional -from typing_extensions import NotRequired, TypedDict - - -WandbIntegrationOutType = Literal["wandb",] - - -class WandbIntegrationOutTypedDict(TypedDict): - project: str - r"""The name of the project that the new run will be created under.""" - type: NotRequired[WandbIntegrationOutType] - name: NotRequired[Nullable[str]] - r"""A display name to set for the run. If not set, will use the job ID as the name.""" - run_name: NotRequired[Nullable[str]] - url: NotRequired[Nullable[str]] - - -class WandbIntegrationOut(BaseModel): - project: str - r"""The name of the project that the new run will be created under.""" - - type: Optional[WandbIntegrationOutType] = "wandb" - - name: OptionalNullable[str] = UNSET - r"""A display name to set for the run. If not set, will use the job ID as the name.""" - - run_name: OptionalNullable[str] = UNSET - - url: OptionalNullable[str] = UNSET - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = ["type", "name", "run_name", "url"] - nullable_fields = ["name", "run_name", "url"] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/models/websearchpremiumtool.py b/src/mistralai/models/websearchpremiumtool.py deleted file mode 100644 index 3bbe753a..00000000 --- a/src/mistralai/models/websearchpremiumtool.py +++ /dev/null @@ -1,17 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel -from typing import Literal, Optional -from typing_extensions import NotRequired, TypedDict - - -WebSearchPremiumToolType = Literal["web_search_premium",] - - -class WebSearchPremiumToolTypedDict(TypedDict): - type: NotRequired[WebSearchPremiumToolType] - - -class WebSearchPremiumTool(BaseModel): - type: Optional[WebSearchPremiumToolType] = "web_search_premium" diff --git a/src/mistralai/models/websearchtool.py b/src/mistralai/models/websearchtool.py deleted file mode 100644 index eeafecb4..00000000 --- a/src/mistralai/models/websearchtool.py +++ /dev/null @@ -1,17 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai.types import BaseModel -from typing import Literal, Optional -from typing_extensions import NotRequired, TypedDict - - -WebSearchToolType = Literal["web_search",] - - -class WebSearchToolTypedDict(TypedDict): - type: NotRequired[WebSearchToolType] - - -class WebSearchTool(BaseModel): - type: Optional[WebSearchToolType] = "web_search" diff --git a/src/mistralai/models_.py b/src/mistralai/models_.py deleted file mode 100644 index d44930a0..00000000 --- a/src/mistralai/models_.py +++ /dev/null @@ -1,1063 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from .basesdk import BaseSDK -from mistralai import models, utils -from mistralai._hooks import HookContext -from mistralai.types import OptionalNullable, UNSET -from mistralai.utils import get_security_from_env -from mistralai.utils.unmarshal_json_response import unmarshal_json_response -from typing import Any, Mapping, Optional - - -class Models(BaseSDK): - r"""Model Management API""" - - def list( - self, - *, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.ModelList: - r"""List Models - - List all models available to the user. - - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - req = self._build_request( - method="GET", - path="/v1/models", - base_url=base_url, - url_variables=url_variables, - request=None, - request_body_required=False, - request_has_path_params=False, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = self.do_request( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="list_models_v1_models_get", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["4XX", "5XX"], - retry_config=retry_config, - ) - - if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.ModelList, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) - - async def list_async( - self, - *, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.ModelList: - r"""List Models - - List all models available to the user. - - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - req = self._build_request_async( - method="GET", - path="/v1/models", - base_url=base_url, - url_variables=url_variables, - request=None, - request_body_required=False, - request_has_path_params=False, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = await self.do_request_async( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="list_models_v1_models_get", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["4XX", "5XX"], - retry_config=retry_config, - ) - - if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.ModelList, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) - - def retrieve( - self, - *, - model_id: str, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.RetrieveModelV1ModelsModelIDGetResponseRetrieveModelV1ModelsModelIDGet: - r"""Retrieve Model - - Retrieve information about a model. - - :param model_id: The ID of the model to retrieve. - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.RetrieveModelV1ModelsModelIDGetRequest( - model_id=model_id, - ) - - req = self._build_request( - method="GET", - path="/v1/models/{model_id}", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=False, - request_has_path_params=True, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = self.do_request( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="retrieve_model_v1_models__model_id__get", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response( - models.RetrieveModelV1ModelsModelIDGetResponseRetrieveModelV1ModelsModelIDGet, - http_res, - ) - if utils.match_response(http_res, "422", "application/json"): - response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res - ) - raise models.HTTPValidationError(response_data, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) - - async def retrieve_async( - self, - *, - model_id: str, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.RetrieveModelV1ModelsModelIDGetResponseRetrieveModelV1ModelsModelIDGet: - r"""Retrieve Model - - Retrieve information about a model. - - :param model_id: The ID of the model to retrieve. - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.RetrieveModelV1ModelsModelIDGetRequest( - model_id=model_id, - ) - - req = self._build_request_async( - method="GET", - path="/v1/models/{model_id}", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=False, - request_has_path_params=True, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = await self.do_request_async( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="retrieve_model_v1_models__model_id__get", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response( - models.RetrieveModelV1ModelsModelIDGetResponseRetrieveModelV1ModelsModelIDGet, - http_res, - ) - if utils.match_response(http_res, "422", "application/json"): - response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res - ) - raise models.HTTPValidationError(response_data, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) - - def delete( - self, - *, - model_id: str, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.DeleteModelOut: - r"""Delete Model - - Delete a fine-tuned model. - - :param model_id: The ID of the model to delete. - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.DeleteModelV1ModelsModelIDDeleteRequest( - model_id=model_id, - ) - - req = self._build_request( - method="DELETE", - path="/v1/models/{model_id}", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=False, - request_has_path_params=True, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = self.do_request( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="delete_model_v1_models__model_id__delete", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.DeleteModelOut, http_res) - if utils.match_response(http_res, "422", "application/json"): - response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res - ) - raise models.HTTPValidationError(response_data, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) - - async def delete_async( - self, - *, - model_id: str, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.DeleteModelOut: - r"""Delete Model - - Delete a fine-tuned model. - - :param model_id: The ID of the model to delete. - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.DeleteModelV1ModelsModelIDDeleteRequest( - model_id=model_id, - ) - - req = self._build_request_async( - method="DELETE", - path="/v1/models/{model_id}", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=False, - request_has_path_params=True, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = await self.do_request_async( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="delete_model_v1_models__model_id__delete", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.DeleteModelOut, http_res) - if utils.match_response(http_res, "422", "application/json"): - response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res - ) - raise models.HTTPValidationError(response_data, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) - - def update( - self, - *, - model_id: str, - name: OptionalNullable[str] = UNSET, - description: OptionalNullable[str] = UNSET, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.JobsAPIRoutesFineTuningUpdateFineTunedModelResponse: - r"""Update Fine Tuned Model - - Update a model name or description. - - :param model_id: The ID of the model to update. - :param name: - :param description: - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.JobsAPIRoutesFineTuningUpdateFineTunedModelRequest( - model_id=model_id, - update_ft_model_in=models.UpdateFTModelIn( - name=name, - description=description, - ), - ) - - req = self._build_request( - method="PATCH", - path="/v1/fine_tuning/models/{model_id}", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=True, - request_has_path_params=True, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - get_serialized_body=lambda: utils.serialize_request_body( - request.update_ft_model_in, False, False, "json", models.UpdateFTModelIn - ), - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = self.do_request( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="jobs_api_routes_fine_tuning_update_fine_tuned_model", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["4XX", "5XX"], - retry_config=retry_config, - ) - - if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response( - models.JobsAPIRoutesFineTuningUpdateFineTunedModelResponse, http_res - ) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) - - async def update_async( - self, - *, - model_id: str, - name: OptionalNullable[str] = UNSET, - description: OptionalNullable[str] = UNSET, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.JobsAPIRoutesFineTuningUpdateFineTunedModelResponse: - r"""Update Fine Tuned Model - - Update a model name or description. - - :param model_id: The ID of the model to update. - :param name: - :param description: - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.JobsAPIRoutesFineTuningUpdateFineTunedModelRequest( - model_id=model_id, - update_ft_model_in=models.UpdateFTModelIn( - name=name, - description=description, - ), - ) - - req = self._build_request_async( - method="PATCH", - path="/v1/fine_tuning/models/{model_id}", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=True, - request_has_path_params=True, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - get_serialized_body=lambda: utils.serialize_request_body( - request.update_ft_model_in, False, False, "json", models.UpdateFTModelIn - ), - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = await self.do_request_async( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="jobs_api_routes_fine_tuning_update_fine_tuned_model", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["4XX", "5XX"], - retry_config=retry_config, - ) - - if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response( - models.JobsAPIRoutesFineTuningUpdateFineTunedModelResponse, http_res - ) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) - - def archive( - self, - *, - model_id: str, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.ArchiveFTModelOut: - r"""Archive Fine Tuned Model - - Archive a fine-tuned model. - - :param model_id: The ID of the model to archive. - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.JobsAPIRoutesFineTuningArchiveFineTunedModelRequest( - model_id=model_id, - ) - - req = self._build_request( - method="POST", - path="/v1/fine_tuning/models/{model_id}/archive", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=False, - request_has_path_params=True, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = self.do_request( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="jobs_api_routes_fine_tuning_archive_fine_tuned_model", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["4XX", "5XX"], - retry_config=retry_config, - ) - - if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.ArchiveFTModelOut, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) - - async def archive_async( - self, - *, - model_id: str, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.ArchiveFTModelOut: - r"""Archive Fine Tuned Model - - Archive a fine-tuned model. - - :param model_id: The ID of the model to archive. - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.JobsAPIRoutesFineTuningArchiveFineTunedModelRequest( - model_id=model_id, - ) - - req = self._build_request_async( - method="POST", - path="/v1/fine_tuning/models/{model_id}/archive", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=False, - request_has_path_params=True, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = await self.do_request_async( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="jobs_api_routes_fine_tuning_archive_fine_tuned_model", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["4XX", "5XX"], - retry_config=retry_config, - ) - - if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.ArchiveFTModelOut, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) - - def unarchive( - self, - *, - model_id: str, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.UnarchiveFTModelOut: - r"""Unarchive Fine Tuned Model - - Un-archive a fine-tuned model. - - :param model_id: The ID of the model to unarchive. - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.JobsAPIRoutesFineTuningUnarchiveFineTunedModelRequest( - model_id=model_id, - ) - - req = self._build_request( - method="DELETE", - path="/v1/fine_tuning/models/{model_id}/archive", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=False, - request_has_path_params=True, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = self.do_request( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="jobs_api_routes_fine_tuning_unarchive_fine_tuned_model", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["4XX", "5XX"], - retry_config=retry_config, - ) - - if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.UnarchiveFTModelOut, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) - - async def unarchive_async( - self, - *, - model_id: str, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.UnarchiveFTModelOut: - r"""Unarchive Fine Tuned Model - - Un-archive a fine-tuned model. - - :param model_id: The ID of the model to unarchive. - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.JobsAPIRoutesFineTuningUnarchiveFineTunedModelRequest( - model_id=model_id, - ) - - req = self._build_request_async( - method="DELETE", - path="/v1/fine_tuning/models/{model_id}/archive", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=False, - request_has_path_params=True, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = await self.do_request_async( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="jobs_api_routes_fine_tuning_unarchive_fine_tuned_model", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["4XX", "5XX"], - retry_config=retry_config, - ) - - if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.UnarchiveFTModelOut, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) diff --git a/src/mistralai/ocr.py b/src/mistralai/ocr.py deleted file mode 100644 index ceb7dd85..00000000 --- a/src/mistralai/ocr.py +++ /dev/null @@ -1,303 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from .basesdk import BaseSDK -from mistralai import models, utils -from mistralai._hooks import HookContext -from mistralai.models import ( - ocrrequest as models_ocrrequest, - responseformat as models_responseformat, -) -from mistralai.types import Nullable, OptionalNullable, UNSET -from mistralai.utils import get_security_from_env -from mistralai.utils.unmarshal_json_response import unmarshal_json_response -from typing import Any, List, Mapping, Optional, Union - - -class Ocr(BaseSDK): - r"""OCR API""" - - def process( - self, - *, - model: Nullable[str], - document: Union[ - models_ocrrequest.Document, models_ocrrequest.DocumentTypedDict - ], - id: Optional[str] = None, - pages: OptionalNullable[List[int]] = UNSET, - include_image_base64: OptionalNullable[bool] = UNSET, - image_limit: OptionalNullable[int] = UNSET, - image_min_size: OptionalNullable[int] = UNSET, - bbox_annotation_format: OptionalNullable[ - Union[ - models_responseformat.ResponseFormat, - models_responseformat.ResponseFormatTypedDict, - ] - ] = UNSET, - document_annotation_format: OptionalNullable[ - Union[ - models_responseformat.ResponseFormat, - models_responseformat.ResponseFormatTypedDict, - ] - ] = UNSET, - document_annotation_prompt: OptionalNullable[str] = UNSET, - table_format: OptionalNullable[models_ocrrequest.TableFormat] = UNSET, - extract_header: Optional[bool] = None, - extract_footer: Optional[bool] = None, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.OCRResponse: - r"""OCR - - :param model: - :param document: Document to run OCR on - :param id: - :param pages: Specific pages user wants to process in various formats: single number, range, or list of both. Starts from 0 - :param include_image_base64: Include image URLs in response - :param image_limit: Max images to extract - :param image_min_size: Minimum height and width of image to extract - :param bbox_annotation_format: Structured output class for extracting useful information from each extracted bounding box / image from document. Only json_schema is valid for this field - :param document_annotation_format: Structured output class for extracting useful information from the entire document. Only json_schema is valid for this field - :param document_annotation_prompt: Optional prompt to guide the model in extracting structured output from the entire document. A document_annotation_format must be provided. - :param table_format: - :param extract_header: - :param extract_footer: - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.OCRRequest( - model=model, - id=id, - document=utils.get_pydantic_model(document, models.Document), - pages=pages, - include_image_base64=include_image_base64, - image_limit=image_limit, - image_min_size=image_min_size, - bbox_annotation_format=utils.get_pydantic_model( - bbox_annotation_format, OptionalNullable[models.ResponseFormat] - ), - document_annotation_format=utils.get_pydantic_model( - document_annotation_format, OptionalNullable[models.ResponseFormat] - ), - document_annotation_prompt=document_annotation_prompt, - table_format=table_format, - extract_header=extract_header, - extract_footer=extract_footer, - ) - - req = self._build_request( - method="POST", - path="/v1/ocr", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=True, - request_has_path_params=False, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - get_serialized_body=lambda: utils.serialize_request_body( - request, False, False, "json", models.OCRRequest - ), - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = self.do_request( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="ocr_v1_ocr_post", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.OCRResponse, http_res) - if utils.match_response(http_res, "422", "application/json"): - response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res - ) - raise models.HTTPValidationError(response_data, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) - - async def process_async( - self, - *, - model: Nullable[str], - document: Union[ - models_ocrrequest.Document, models_ocrrequest.DocumentTypedDict - ], - id: Optional[str] = None, - pages: OptionalNullable[List[int]] = UNSET, - include_image_base64: OptionalNullable[bool] = UNSET, - image_limit: OptionalNullable[int] = UNSET, - image_min_size: OptionalNullable[int] = UNSET, - bbox_annotation_format: OptionalNullable[ - Union[ - models_responseformat.ResponseFormat, - models_responseformat.ResponseFormatTypedDict, - ] - ] = UNSET, - document_annotation_format: OptionalNullable[ - Union[ - models_responseformat.ResponseFormat, - models_responseformat.ResponseFormatTypedDict, - ] - ] = UNSET, - document_annotation_prompt: OptionalNullable[str] = UNSET, - table_format: OptionalNullable[models_ocrrequest.TableFormat] = UNSET, - extract_header: Optional[bool] = None, - extract_footer: Optional[bool] = None, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.OCRResponse: - r"""OCR - - :param model: - :param document: Document to run OCR on - :param id: - :param pages: Specific pages user wants to process in various formats: single number, range, or list of both. Starts from 0 - :param include_image_base64: Include image URLs in response - :param image_limit: Max images to extract - :param image_min_size: Minimum height and width of image to extract - :param bbox_annotation_format: Structured output class for extracting useful information from each extracted bounding box / image from document. Only json_schema is valid for this field - :param document_annotation_format: Structured output class for extracting useful information from the entire document. Only json_schema is valid for this field - :param document_annotation_prompt: Optional prompt to guide the model in extracting structured output from the entire document. A document_annotation_format must be provided. - :param table_format: - :param extract_header: - :param extract_footer: - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.OCRRequest( - model=model, - id=id, - document=utils.get_pydantic_model(document, models.Document), - pages=pages, - include_image_base64=include_image_base64, - image_limit=image_limit, - image_min_size=image_min_size, - bbox_annotation_format=utils.get_pydantic_model( - bbox_annotation_format, OptionalNullable[models.ResponseFormat] - ), - document_annotation_format=utils.get_pydantic_model( - document_annotation_format, OptionalNullable[models.ResponseFormat] - ), - document_annotation_prompt=document_annotation_prompt, - table_format=table_format, - extract_header=extract_header, - extract_footer=extract_footer, - ) - - req = self._build_request_async( - method="POST", - path="/v1/ocr", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=True, - request_has_path_params=False, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - get_serialized_body=lambda: utils.serialize_request_body( - request, False, False, "json", models.OCRRequest - ), - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = await self.do_request_async( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="ocr_v1_ocr_post", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["422", "4XX", "5XX"], - retry_config=retry_config, - ) - - response_data: Any = None - if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.OCRResponse, http_res) - if utils.match_response(http_res, "422", "application/json"): - response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res - ) - raise models.HTTPValidationError(response_data, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) diff --git a/src/mistralai/py.typed b/src/mistralai/py.typed deleted file mode 100644 index 3e38f1a9..00000000 --- a/src/mistralai/py.typed +++ /dev/null @@ -1 +0,0 @@ -# Marker file for PEP 561. The package enables type hints. diff --git a/src/mistralai/sdk.py b/src/mistralai/sdk.py deleted file mode 100644 index c83b53e0..00000000 --- a/src/mistralai/sdk.py +++ /dev/null @@ -1,222 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from .basesdk import BaseSDK -from .httpclient import AsyncHttpClient, ClientOwner, HttpClient, close_clients -from .sdkconfiguration import SDKConfiguration -from .utils.logger import Logger, get_default_logger -from .utils.retries import RetryConfig -import httpx -import importlib -from mistralai import models, utils -from mistralai._hooks import SDKHooks -from mistralai.types import OptionalNullable, UNSET -import sys -from typing import Any, Callable, Dict, Optional, TYPE_CHECKING, Union, cast -import weakref - -if TYPE_CHECKING: - from mistralai.agents import Agents - from mistralai.audio import Audio - from mistralai.batch import Batch - from mistralai.beta import Beta - from mistralai.chat import Chat - from mistralai.classifiers import Classifiers - from mistralai.embeddings import Embeddings - from mistralai.files import Files - from mistralai.fim import Fim - from mistralai.fine_tuning import FineTuning - from mistralai.models_ import Models - from mistralai.ocr import Ocr - - -class Mistral(BaseSDK): - r"""Mistral AI API: Our Chat Completion and Embeddings APIs specification. Create your account on [La Plateforme](https://console.mistral.ai) to get access and read the [docs](https://docs.mistral.ai) to learn how to use it.""" - - models: "Models" - r"""Model Management API""" - beta: "Beta" - files: "Files" - r"""Files API""" - fine_tuning: "FineTuning" - batch: "Batch" - chat: "Chat" - r"""Chat Completion API.""" - fim: "Fim" - r"""Fill-in-the-middle API.""" - agents: "Agents" - r"""Agents API.""" - embeddings: "Embeddings" - r"""Embeddings API.""" - classifiers: "Classifiers" - r"""Classifiers API.""" - ocr: "Ocr" - r"""OCR API""" - audio: "Audio" - _sub_sdk_map = { - "models": ("mistralai.models_", "Models"), - "beta": ("mistralai.beta", "Beta"), - "files": ("mistralai.files", "Files"), - "fine_tuning": ("mistralai.fine_tuning", "FineTuning"), - "batch": ("mistralai.batch", "Batch"), - "chat": ("mistralai.chat", "Chat"), - "fim": ("mistralai.fim", "Fim"), - "agents": ("mistralai.agents", "Agents"), - "embeddings": ("mistralai.embeddings", "Embeddings"), - "classifiers": ("mistralai.classifiers", "Classifiers"), - "ocr": ("mistralai.ocr", "Ocr"), - "audio": ("mistralai.audio", "Audio"), - } - - def __init__( - self, - api_key: Optional[Union[Optional[str], Callable[[], Optional[str]]]] = None, - server: Optional[str] = None, - server_url: Optional[str] = None, - url_params: Optional[Dict[str, str]] = None, - client: Optional[HttpClient] = None, - async_client: Optional[AsyncHttpClient] = None, - retry_config: OptionalNullable[RetryConfig] = UNSET, - timeout_ms: Optional[int] = None, - debug_logger: Optional[Logger] = None, - ) -> None: - r"""Instantiates the SDK configuring it with the provided parameters. - - :param api_key: The api_key required for authentication - :param server: The server by name to use for all methods - :param server_url: The server URL to use for all methods - :param url_params: Parameters to optionally template the server URL with - :param client: The HTTP client to use for all synchronous methods - :param async_client: The Async HTTP client to use for all asynchronous methods - :param retry_config: The retry configuration to use for all supported methods - :param timeout_ms: Optional request timeout applied to each operation in milliseconds - """ - client_supplied = True - if client is None: - client = httpx.Client(follow_redirects=True) - client_supplied = False - - assert issubclass( - type(client), HttpClient - ), "The provided client must implement the HttpClient protocol." - - async_client_supplied = True - if async_client is None: - async_client = httpx.AsyncClient(follow_redirects=True) - async_client_supplied = False - - if debug_logger is None: - debug_logger = get_default_logger() - - assert issubclass( - type(async_client), AsyncHttpClient - ), "The provided async_client must implement the AsyncHttpClient protocol." - - security: Any = None - if callable(api_key): - # pylint: disable=unnecessary-lambda-assignment - security = lambda: models.Security(api_key=api_key()) - else: - security = models.Security(api_key=api_key) - - if server_url is not None: - if url_params is not None: - server_url = utils.template_url(server_url, url_params) - - BaseSDK.__init__( - self, - SDKConfiguration( - client=client, - client_supplied=client_supplied, - async_client=async_client, - async_client_supplied=async_client_supplied, - security=security, - server_url=server_url, - server=server, - retry_config=retry_config, - timeout_ms=timeout_ms, - debug_logger=debug_logger, - ), - parent_ref=self, - ) - - hooks = SDKHooks() - - # pylint: disable=protected-access - self.sdk_configuration.__dict__["_hooks"] = hooks - - current_server_url, *_ = self.sdk_configuration.get_server_details() - server_url, self.sdk_configuration.client = hooks.sdk_init( - current_server_url, client - ) - if current_server_url != server_url: - self.sdk_configuration.server_url = server_url - - weakref.finalize( - self, - close_clients, - cast(ClientOwner, self.sdk_configuration), - self.sdk_configuration.client, - self.sdk_configuration.client_supplied, - self.sdk_configuration.async_client, - self.sdk_configuration.async_client_supplied, - ) - - def dynamic_import(self, modname, retries=3): - for attempt in range(retries): - try: - return importlib.import_module(modname) - except KeyError: - # Clear any half-initialized module and retry - sys.modules.pop(modname, None) - if attempt == retries - 1: - break - raise KeyError(f"Failed to import module '{modname}' after {retries} attempts") - - def __getattr__(self, name: str): - if name in self._sub_sdk_map: - module_path, class_name = self._sub_sdk_map[name] - try: - module = self.dynamic_import(module_path) - klass = getattr(module, class_name) - instance = klass(self.sdk_configuration, parent_ref=self) - setattr(self, name, instance) - return instance - except ImportError as e: - raise AttributeError( - f"Failed to import module {module_path} for attribute {name}: {e}" - ) from e - except AttributeError as e: - raise AttributeError( - f"Failed to find class {class_name} in module {module_path} for attribute {name}: {e}" - ) from e - - raise AttributeError( - f"'{type(self).__name__}' object has no attribute '{name}'" - ) - - def __dir__(self): - default_attrs = list(super().__dir__()) - lazy_attrs = list(self._sub_sdk_map.keys()) - return sorted(list(set(default_attrs + lazy_attrs))) - - def __enter__(self): - return self - - async def __aenter__(self): - return self - - def __exit__(self, exc_type, exc_val, exc_tb): - if ( - self.sdk_configuration.client is not None - and not self.sdk_configuration.client_supplied - ): - self.sdk_configuration.client.close() - self.sdk_configuration.client = None - - async def __aexit__(self, exc_type, exc_val, exc_tb): - if ( - self.sdk_configuration.async_client is not None - and not self.sdk_configuration.async_client_supplied - ): - await self.sdk_configuration.async_client.aclose() - self.sdk_configuration.async_client = None diff --git a/src/mistralai/sdkconfiguration.py b/src/mistralai/sdkconfiguration.py deleted file mode 100644 index 7e77925d..00000000 --- a/src/mistralai/sdkconfiguration.py +++ /dev/null @@ -1,53 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from ._version import ( - __gen_version__, - __openapi_doc_version__, - __user_agent__, - __version__, -) -from .httpclient import AsyncHttpClient, HttpClient -from .utils import Logger, RetryConfig, remove_suffix -from dataclasses import dataclass -from mistralai import models -from mistralai.types import OptionalNullable, UNSET -from pydantic import Field -from typing import Callable, Dict, Optional, Tuple, Union - - -SERVER_EU = "eu" -r"""EU Production server""" -SERVERS = { - SERVER_EU: "https://api.mistral.ai", -} -"""Contains the list of servers available to the SDK""" - - -@dataclass -class SDKConfiguration: - client: Union[HttpClient, None] - client_supplied: bool - async_client: Union[AsyncHttpClient, None] - async_client_supplied: bool - debug_logger: Logger - security: Optional[Union[models.Security, Callable[[], models.Security]]] = None - server_url: Optional[str] = "" - server: Optional[str] = "" - language: str = "python" - openapi_doc_version: str = __openapi_doc_version__ - sdk_version: str = __version__ - gen_version: str = __gen_version__ - user_agent: str = __user_agent__ - retry_config: OptionalNullable[RetryConfig] = Field(default_factory=lambda: UNSET) - timeout_ms: Optional[int] = None - - def get_server_details(self) -> Tuple[str, Dict[str, str]]: - if self.server_url is not None and self.server_url: - return remove_suffix(self.server_url, "/"), {} - if not self.server: - self.server = SERVER_EU - - if self.server not in SERVERS: - raise ValueError(f'Invalid server "{self.server}"') - - return SERVERS[self.server], {} diff --git a/src/mistralai/transcriptions.py b/src/mistralai/transcriptions.py deleted file mode 100644 index 90f2e58a..00000000 --- a/src/mistralai/transcriptions.py +++ /dev/null @@ -1,481 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from .basesdk import BaseSDK -from mistralai import models, utils -from mistralai._hooks import HookContext -from mistralai.models import ( - file as models_file, - timestampgranularity as models_timestampgranularity, -) -from mistralai.types import OptionalNullable, UNSET -from mistralai.utils import eventstreaming, get_security_from_env -from mistralai.utils.unmarshal_json_response import unmarshal_json_response -from typing import List, Mapping, Optional, Union - - -class Transcriptions(BaseSDK): - r"""API for audio transcription.""" - - def complete( - self, - *, - model: str, - file: Optional[Union[models_file.File, models_file.FileTypedDict]] = None, - file_url: OptionalNullable[str] = UNSET, - file_id: OptionalNullable[str] = UNSET, - language: OptionalNullable[str] = UNSET, - temperature: OptionalNullable[float] = UNSET, - diarize: Optional[bool] = False, - context_bias: Optional[List[str]] = None, - timestamp_granularities: Optional[ - List[models_timestampgranularity.TimestampGranularity] - ] = None, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.TranscriptionResponse: - r"""Create Transcription - - :param model: ID of the model to be used. - :param file: - :param file_url: Url of a file to be transcribed - :param file_id: ID of a file uploaded to /v1/files - :param language: Language of the audio, e.g. 'en'. Providing the language can boost accuracy. - :param temperature: - :param diarize: - :param context_bias: - :param timestamp_granularities: Granularities of timestamps to include in the response. - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.AudioTranscriptionRequest( - model=model, - file=utils.get_pydantic_model(file, Optional[models.File]), - file_url=file_url, - file_id=file_id, - language=language, - temperature=temperature, - diarize=diarize, - context_bias=context_bias, - timestamp_granularities=timestamp_granularities, - ) - - req = self._build_request( - method="POST", - path="/v1/audio/transcriptions", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=True, - request_has_path_params=False, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - get_serialized_body=lambda: utils.serialize_request_body( - request, False, False, "multipart", models.AudioTranscriptionRequest - ), - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = self.do_request( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="audio_api_v1_transcriptions_post", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["4XX", "5XX"], - retry_config=retry_config, - ) - - if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.TranscriptionResponse, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) - - async def complete_async( - self, - *, - model: str, - file: Optional[Union[models_file.File, models_file.FileTypedDict]] = None, - file_url: OptionalNullable[str] = UNSET, - file_id: OptionalNullable[str] = UNSET, - language: OptionalNullable[str] = UNSET, - temperature: OptionalNullable[float] = UNSET, - diarize: Optional[bool] = False, - context_bias: Optional[List[str]] = None, - timestamp_granularities: Optional[ - List[models_timestampgranularity.TimestampGranularity] - ] = None, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> models.TranscriptionResponse: - r"""Create Transcription - - :param model: ID of the model to be used. - :param file: - :param file_url: Url of a file to be transcribed - :param file_id: ID of a file uploaded to /v1/files - :param language: Language of the audio, e.g. 'en'. Providing the language can boost accuracy. - :param temperature: - :param diarize: - :param context_bias: - :param timestamp_granularities: Granularities of timestamps to include in the response. - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.AudioTranscriptionRequest( - model=model, - file=utils.get_pydantic_model(file, Optional[models.File]), - file_url=file_url, - file_id=file_id, - language=language, - temperature=temperature, - diarize=diarize, - context_bias=context_bias, - timestamp_granularities=timestamp_granularities, - ) - - req = self._build_request_async( - method="POST", - path="/v1/audio/transcriptions", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=True, - request_has_path_params=False, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="application/json", - http_headers=http_headers, - security=self.sdk_configuration.security, - get_serialized_body=lambda: utils.serialize_request_body( - request, False, False, "multipart", models.AudioTranscriptionRequest - ), - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = await self.do_request_async( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="audio_api_v1_transcriptions_post", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["4XX", "5XX"], - retry_config=retry_config, - ) - - if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.TranscriptionResponse, http_res) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - raise models.SDKError("Unexpected response received", http_res) - - def stream( - self, - *, - model: str, - file: Optional[Union[models_file.File, models_file.FileTypedDict]] = None, - file_url: OptionalNullable[str] = UNSET, - file_id: OptionalNullable[str] = UNSET, - language: OptionalNullable[str] = UNSET, - temperature: OptionalNullable[float] = UNSET, - diarize: Optional[bool] = False, - context_bias: Optional[List[str]] = None, - timestamp_granularities: Optional[ - List[models_timestampgranularity.TimestampGranularity] - ] = None, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> eventstreaming.EventStream[models.TranscriptionStreamEvents]: - r"""Create Streaming Transcription (SSE) - - :param model: - :param file: - :param file_url: Url of a file to be transcribed - :param file_id: ID of a file uploaded to /v1/files - :param language: Language of the audio, e.g. 'en'. Providing the language can boost accuracy. - :param temperature: - :param diarize: - :param context_bias: - :param timestamp_granularities: Granularities of timestamps to include in the response. - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.AudioTranscriptionRequestStream( - model=model, - file=utils.get_pydantic_model(file, Optional[models.File]), - file_url=file_url, - file_id=file_id, - language=language, - temperature=temperature, - diarize=diarize, - context_bias=context_bias, - timestamp_granularities=timestamp_granularities, - ) - - req = self._build_request( - method="POST", - path="/v1/audio/transcriptions#stream", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=True, - request_has_path_params=False, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="text/event-stream", - http_headers=http_headers, - security=self.sdk_configuration.security, - get_serialized_body=lambda: utils.serialize_request_body( - request, - False, - False, - "multipart", - models.AudioTranscriptionRequestStream, - ), - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = self.do_request( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="audio_api_v1_transcriptions_post_stream", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["4XX", "5XX"], - stream=True, - retry_config=retry_config, - ) - - if utils.match_response(http_res, "200", "text/event-stream"): - return eventstreaming.EventStream( - http_res, - lambda raw: utils.unmarshal_json(raw, models.TranscriptionStreamEvents), - client_ref=self, - ) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("Unexpected response received", http_res, http_res_text) - - async def stream_async( - self, - *, - model: str, - file: Optional[Union[models_file.File, models_file.FileTypedDict]] = None, - file_url: OptionalNullable[str] = UNSET, - file_id: OptionalNullable[str] = UNSET, - language: OptionalNullable[str] = UNSET, - temperature: OptionalNullable[float] = UNSET, - diarize: Optional[bool] = False, - context_bias: Optional[List[str]] = None, - timestamp_granularities: Optional[ - List[models_timestampgranularity.TimestampGranularity] - ] = None, - retries: OptionalNullable[utils.RetryConfig] = UNSET, - server_url: Optional[str] = None, - timeout_ms: Optional[int] = None, - http_headers: Optional[Mapping[str, str]] = None, - ) -> eventstreaming.EventStreamAsync[models.TranscriptionStreamEvents]: - r"""Create Streaming Transcription (SSE) - - :param model: - :param file: - :param file_url: Url of a file to be transcribed - :param file_id: ID of a file uploaded to /v1/files - :param language: Language of the audio, e.g. 'en'. Providing the language can boost accuracy. - :param temperature: - :param diarize: - :param context_bias: - :param timestamp_granularities: Granularities of timestamps to include in the response. - :param retries: Override the default retry configuration for this method - :param server_url: Override the default server URL for this method - :param timeout_ms: Override the default request timeout configuration for this method in milliseconds - :param http_headers: Additional headers to set or replace on requests. - """ - base_url = None - url_variables = None - if timeout_ms is None: - timeout_ms = self.sdk_configuration.timeout_ms - - if server_url is not None: - base_url = server_url - else: - base_url = self._get_url(base_url, url_variables) - - request = models.AudioTranscriptionRequestStream( - model=model, - file=utils.get_pydantic_model(file, Optional[models.File]), - file_url=file_url, - file_id=file_id, - language=language, - temperature=temperature, - diarize=diarize, - context_bias=context_bias, - timestamp_granularities=timestamp_granularities, - ) - - req = self._build_request_async( - method="POST", - path="/v1/audio/transcriptions#stream", - base_url=base_url, - url_variables=url_variables, - request=request, - request_body_required=True, - request_has_path_params=False, - request_has_query_params=True, - user_agent_header="user-agent", - accept_header_value="text/event-stream", - http_headers=http_headers, - security=self.sdk_configuration.security, - get_serialized_body=lambda: utils.serialize_request_body( - request, - False, - False, - "multipart", - models.AudioTranscriptionRequestStream, - ), - allow_empty_value=None, - timeout_ms=timeout_ms, - ) - - if retries == UNSET: - if self.sdk_configuration.retry_config is not UNSET: - retries = self.sdk_configuration.retry_config - - retry_config = None - if isinstance(retries, utils.RetryConfig): - retry_config = (retries, ["429", "500", "502", "503", "504"]) - - http_res = await self.do_request_async( - hook_ctx=HookContext( - config=self.sdk_configuration, - base_url=base_url or "", - operation_id="audio_api_v1_transcriptions_post_stream", - oauth2_scopes=None, - security_source=get_security_from_env( - self.sdk_configuration.security, models.Security - ), - ), - request=req, - error_status_codes=["4XX", "5XX"], - stream=True, - retry_config=retry_config, - ) - - if utils.match_response(http_res, "200", "text/event-stream"): - return eventstreaming.EventStreamAsync( - http_res, - lambda raw: utils.unmarshal_json(raw, models.TranscriptionStreamEvents), - client_ref=self, - ) - if utils.match_response(http_res, "4XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - if utils.match_response(http_res, "5XX", "*"): - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) - - http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("Unexpected response received", http_res, http_res_text) diff --git a/src/mistralai/types/__init__.py b/src/mistralai/types/__init__.py deleted file mode 100644 index fc76fe0c..00000000 --- a/src/mistralai/types/__init__.py +++ /dev/null @@ -1,21 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from .basemodel import ( - BaseModel, - Nullable, - OptionalNullable, - UnrecognizedInt, - UnrecognizedStr, - UNSET, - UNSET_SENTINEL, -) - -__all__ = [ - "BaseModel", - "Nullable", - "OptionalNullable", - "UnrecognizedInt", - "UnrecognizedStr", - "UNSET", - "UNSET_SENTINEL", -] diff --git a/src/mistralai/types/basemodel.py b/src/mistralai/types/basemodel.py deleted file mode 100644 index a9a640a1..00000000 --- a/src/mistralai/types/basemodel.py +++ /dev/null @@ -1,77 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from pydantic import ConfigDict, model_serializer -from pydantic import BaseModel as PydanticBaseModel -from pydantic_core import core_schema -from typing import TYPE_CHECKING, Any, Literal, Optional, TypeVar, Union -from typing_extensions import TypeAliasType, TypeAlias - - -class BaseModel(PydanticBaseModel): - model_config = ConfigDict( - populate_by_name=True, arbitrary_types_allowed=True, protected_namespaces=() - ) - - -class Unset(BaseModel): - @model_serializer(mode="plain") - def serialize_model(self): - return UNSET_SENTINEL - - def __bool__(self) -> Literal[False]: - return False - - -UNSET = Unset() -UNSET_SENTINEL = "~?~unset~?~sentinel~?~" - - -T = TypeVar("T") -if TYPE_CHECKING: - Nullable: TypeAlias = Union[T, None] - OptionalNullable: TypeAlias = Union[Optional[Nullable[T]], Unset] -else: - Nullable = TypeAliasType("Nullable", Union[T, None], type_params=(T,)) - OptionalNullable = TypeAliasType( - "OptionalNullable", Union[Optional[Nullable[T]], Unset], type_params=(T,) - ) - - -class UnrecognizedStr(str): - @classmethod - def __get_pydantic_core_schema__(cls, _source_type: Any, _handler: Any) -> core_schema.CoreSchema: - # Make UnrecognizedStr only work in lax mode, not strict mode - # This makes it a "fallback" option when more specific types (like Literals) don't match - def validate_lax(v: Any) -> 'UnrecognizedStr': - if isinstance(v, cls): - return v - return cls(str(v)) - - # Use lax_or_strict_schema where strict always fails - # This forces Pydantic to prefer other union members in strict mode - # and only fall back to UnrecognizedStr in lax mode - return core_schema.lax_or_strict_schema( - lax_schema=core_schema.chain_schema([ - core_schema.str_schema(), - core_schema.no_info_plain_validator_function(validate_lax) - ]), - strict_schema=core_schema.none_schema(), # Always fails in strict mode - ) - - -class UnrecognizedInt(int): - @classmethod - def __get_pydantic_core_schema__(cls, _source_type: Any, _handler: Any) -> core_schema.CoreSchema: - # Make UnrecognizedInt only work in lax mode, not strict mode - # This makes it a "fallback" option when more specific types (like Literals) don't match - def validate_lax(v: Any) -> 'UnrecognizedInt': - if isinstance(v, cls): - return v - return cls(int(v)) - return core_schema.lax_or_strict_schema( - lax_schema=core_schema.chain_schema([ - core_schema.int_schema(), - core_schema.no_info_plain_validator_function(validate_lax) - ]), - strict_schema=core_schema.none_schema(), # Always fails in strict mode - ) diff --git a/src/mistralai/utils/__init__.py b/src/mistralai/utils/__init__.py deleted file mode 100644 index f9c2edce..00000000 --- a/src/mistralai/utils/__init__.py +++ /dev/null @@ -1,197 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from typing import TYPE_CHECKING -from importlib import import_module -import builtins -import sys - -if TYPE_CHECKING: - from .annotations import get_discriminator - from .datetimes import parse_datetime - from .enums import OpenEnumMeta - from .headers import get_headers, get_response_headers - from .metadata import ( - FieldMetadata, - find_metadata, - FormMetadata, - HeaderMetadata, - MultipartFormMetadata, - PathParamMetadata, - QueryParamMetadata, - RequestMetadata, - SecurityMetadata, - ) - from .queryparams import get_query_params - from .retries import BackoffStrategy, Retries, retry, retry_async, RetryConfig - from .requestbodies import serialize_request_body, SerializedRequestBody - from .security import get_security, get_security_from_env - - from .serializers import ( - get_pydantic_model, - marshal_json, - unmarshal, - unmarshal_json, - serialize_decimal, - serialize_float, - serialize_int, - stream_to_text, - stream_to_text_async, - stream_to_bytes, - stream_to_bytes_async, - validate_const, - validate_decimal, - validate_float, - validate_int, - ) - from .url import generate_url, template_url, remove_suffix - from .values import ( - get_global_from_env, - match_content_type, - match_status_codes, - match_response, - cast_partial, - ) - from .logger import Logger, get_body_content, get_default_logger - -__all__ = [ - "BackoffStrategy", - "FieldMetadata", - "find_metadata", - "FormMetadata", - "generate_url", - "get_body_content", - "get_default_logger", - "get_discriminator", - "parse_datetime", - "get_global_from_env", - "get_headers", - "get_pydantic_model", - "get_query_params", - "get_response_headers", - "get_security", - "get_security_from_env", - "HeaderMetadata", - "Logger", - "marshal_json", - "match_content_type", - "match_status_codes", - "match_response", - "MultipartFormMetadata", - "OpenEnumMeta", - "PathParamMetadata", - "QueryParamMetadata", - "remove_suffix", - "Retries", - "retry", - "retry_async", - "RetryConfig", - "RequestMetadata", - "SecurityMetadata", - "serialize_decimal", - "serialize_float", - "serialize_int", - "serialize_request_body", - "SerializedRequestBody", - "stream_to_text", - "stream_to_text_async", - "stream_to_bytes", - "stream_to_bytes_async", - "template_url", - "unmarshal", - "unmarshal_json", - "validate_decimal", - "validate_const", - "validate_float", - "validate_int", - "cast_partial", -] - -_dynamic_imports: dict[str, str] = { - "BackoffStrategy": ".retries", - "FieldMetadata": ".metadata", - "find_metadata": ".metadata", - "FormMetadata": ".metadata", - "generate_url": ".url", - "get_body_content": ".logger", - "get_default_logger": ".logger", - "get_discriminator": ".annotations", - "parse_datetime": ".datetimes", - "get_global_from_env": ".values", - "get_headers": ".headers", - "get_pydantic_model": ".serializers", - "get_query_params": ".queryparams", - "get_response_headers": ".headers", - "get_security": ".security", - "get_security_from_env": ".security", - "HeaderMetadata": ".metadata", - "Logger": ".logger", - "marshal_json": ".serializers", - "match_content_type": ".values", - "match_status_codes": ".values", - "match_response": ".values", - "MultipartFormMetadata": ".metadata", - "OpenEnumMeta": ".enums", - "PathParamMetadata": ".metadata", - "QueryParamMetadata": ".metadata", - "remove_suffix": ".url", - "Retries": ".retries", - "retry": ".retries", - "retry_async": ".retries", - "RetryConfig": ".retries", - "RequestMetadata": ".metadata", - "SecurityMetadata": ".metadata", - "serialize_decimal": ".serializers", - "serialize_float": ".serializers", - "serialize_int": ".serializers", - "serialize_request_body": ".requestbodies", - "SerializedRequestBody": ".requestbodies", - "stream_to_text": ".serializers", - "stream_to_text_async": ".serializers", - "stream_to_bytes": ".serializers", - "stream_to_bytes_async": ".serializers", - "template_url": ".url", - "unmarshal": ".serializers", - "unmarshal_json": ".serializers", - "validate_decimal": ".serializers", - "validate_const": ".serializers", - "validate_float": ".serializers", - "validate_int": ".serializers", - "cast_partial": ".values", -} - - -def dynamic_import(modname, retries=3): - for attempt in range(retries): - try: - return import_module(modname, __package__) - except KeyError: - # Clear any half-initialized module and retry - sys.modules.pop(modname, None) - if attempt == retries - 1: - break - raise KeyError(f"Failed to import module '{modname}' after {retries} attempts") - - -def __getattr__(attr_name: str) -> object: - module_name = _dynamic_imports.get(attr_name) - if module_name is None: - raise AttributeError( - f"no {attr_name} found in _dynamic_imports, module name -> {__name__} " - ) - - try: - module = dynamic_import(module_name) - return getattr(module, attr_name) - except ImportError as e: - raise ImportError( - f"Failed to import {attr_name} from {module_name}: {e}" - ) from e - except AttributeError as e: - raise AttributeError( - f"Failed to get {attr_name} from {module_name}: {e}" - ) from e - - -def __dir__(): - lazy_attrs = builtins.list(_dynamic_imports.keys()) - return builtins.sorted(lazy_attrs) diff --git a/src/mistralai/utils/annotations.py b/src/mistralai/utils/annotations.py deleted file mode 100644 index 12e0aa4f..00000000 --- a/src/mistralai/utils/annotations.py +++ /dev/null @@ -1,79 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from enum import Enum -from typing import Any, Optional - - -def get_discriminator(model: Any, fieldname: str, key: str) -> str: - """ - Recursively search for the discriminator attribute in a model. - - Args: - model (Any): The model to search within. - fieldname (str): The name of the field to search for. - key (str): The key to search for in dictionaries. - - Returns: - str: The name of the discriminator attribute. - - Raises: - ValueError: If the discriminator attribute is not found. - """ - upper_fieldname = fieldname.upper() - - def get_field_discriminator(field: Any) -> Optional[str]: - """Search for the discriminator attribute in a given field.""" - - if isinstance(field, dict): - if key in field: - return f"{field[key]}" - - if hasattr(field, fieldname): - attr = getattr(field, fieldname) - if isinstance(attr, Enum): - return f"{attr.value}" - return f"{attr}" - - if hasattr(field, upper_fieldname): - attr = getattr(field, upper_fieldname) - if isinstance(attr, Enum): - return f"{attr.value}" - return f"{attr}" - - return None - - def search_nested_discriminator(obj: Any) -> Optional[str]: - """Recursively search for discriminator in nested structures.""" - # First try direct field lookup - discriminator = get_field_discriminator(obj) - if discriminator is not None: - return discriminator - - # If it's a dict, search in nested values - if isinstance(obj, dict): - for value in obj.values(): - if isinstance(value, list): - # Search in list items - for item in value: - nested_discriminator = search_nested_discriminator(item) - if nested_discriminator is not None: - return nested_discriminator - elif isinstance(value, dict): - # Search in nested dict - nested_discriminator = search_nested_discriminator(value) - if nested_discriminator is not None: - return nested_discriminator - - return None - - if isinstance(model, list): - for field in model: - discriminator = search_nested_discriminator(field) - if discriminator is not None: - return discriminator - - discriminator = search_nested_discriminator(model) - if discriminator is not None: - return discriminator - - raise ValueError(f"Could not find discriminator field {fieldname} in {model}") diff --git a/src/mistralai/utils/datetimes.py b/src/mistralai/utils/datetimes.py deleted file mode 100644 index a6c52cd6..00000000 --- a/src/mistralai/utils/datetimes.py +++ /dev/null @@ -1,23 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from datetime import datetime -import sys - - -def parse_datetime(datetime_string: str) -> datetime: - """ - Convert a RFC 3339 / ISO 8601 formatted string into a datetime object. - Python versions 3.11 and later support parsing RFC 3339 directly with - datetime.fromisoformat(), but for earlier versions, this function - encapsulates the necessary extra logic. - """ - # Python 3.11 and later can parse RFC 3339 directly - if sys.version_info >= (3, 11): - return datetime.fromisoformat(datetime_string) - - # For Python 3.10 and earlier, a common ValueError is trailing 'Z' suffix, - # so fix that upfront. - if datetime_string.endswith("Z"): - datetime_string = datetime_string[:-1] + "+00:00" - - return datetime.fromisoformat(datetime_string) diff --git a/src/mistralai/utils/enums.py b/src/mistralai/utils/enums.py deleted file mode 100644 index 3324e1bc..00000000 --- a/src/mistralai/utils/enums.py +++ /dev/null @@ -1,134 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -import enum -import sys -from typing import Any - -from pydantic_core import core_schema - - -class OpenEnumMeta(enum.EnumMeta): - # The __call__ method `boundary` kwarg was added in 3.11 and must be present - # for pyright. Refer also: https://github.com/pylint-dev/pylint/issues/9622 - # pylint: disable=unexpected-keyword-arg - # The __call__ method `values` varg must be named for pyright. - # pylint: disable=keyword-arg-before-vararg - - if sys.version_info >= (3, 11): - def __call__( - cls, value, names=None, *values, module=None, qualname=None, type=None, start=1, boundary=None - ): - # The `type` kwarg also happens to be a built-in that pylint flags as - # redeclared. Safe to ignore this lint rule with this scope. - # pylint: disable=redefined-builtin - - if names is not None: - return super().__call__( - value, - names=names, - *values, - module=module, - qualname=qualname, - type=type, - start=start, - boundary=boundary, - ) - - try: - return super().__call__( - value, - names=names, # pyright: ignore[reportArgumentType] - *values, - module=module, - qualname=qualname, - type=type, - start=start, - boundary=boundary, - ) - except ValueError: - return value - else: - def __call__( - cls, value, names=None, *, module=None, qualname=None, type=None, start=1 - ): - # The `type` kwarg also happens to be a built-in that pylint flags as - # redeclared. Safe to ignore this lint rule with this scope. - # pylint: disable=redefined-builtin - - if names is not None: - return super().__call__( - value, - names=names, - module=module, - qualname=qualname, - type=type, - start=start, - ) - - try: - return super().__call__( - value, - names=names, # pyright: ignore[reportArgumentType] - module=module, - qualname=qualname, - type=type, - start=start, - ) - except ValueError: - return value - - def __new__(mcs, name, bases, namespace, **kwargs): - cls = super().__new__(mcs, name, bases, namespace, **kwargs) - - # Add __get_pydantic_core_schema__ to make open enums work correctly - # in union discrimination. In strict mode (used by Pydantic for unions), - # only known enum values match. In lax mode, unknown values are accepted. - def __get_pydantic_core_schema__( - cls_inner: Any, _source_type: Any, _handler: Any - ) -> core_schema.CoreSchema: - # Create a validator that only accepts known enum values (for strict mode) - def validate_strict(v: Any) -> Any: - if isinstance(v, cls_inner): - return v - # Use the parent EnumMeta's __call__ which raises ValueError for unknown values - return enum.EnumMeta.__call__(cls_inner, v) - - # Create a lax validator that accepts unknown values - def validate_lax(v: Any) -> Any: - if isinstance(v, cls_inner): - return v - try: - return enum.EnumMeta.__call__(cls_inner, v) - except ValueError: - # Return the raw value for unknown enum values - return v - - # Determine the base type schema (str or int) - is_int_enum = False - for base in cls_inner.__mro__: - if base is int: - is_int_enum = True - break - if base is str: - break - - base_schema = ( - core_schema.int_schema() - if is_int_enum - else core_schema.str_schema() - ) - - # Use lax_or_strict_schema: - # - strict mode: only known enum values match (raises ValueError for unknown) - # - lax mode: accept any value, return enum member or raw value - return core_schema.lax_or_strict_schema( - lax_schema=core_schema.chain_schema( - [base_schema, core_schema.no_info_plain_validator_function(validate_lax)] - ), - strict_schema=core_schema.chain_schema( - [base_schema, core_schema.no_info_plain_validator_function(validate_strict)] - ), - ) - - setattr(cls, "__get_pydantic_core_schema__", classmethod(__get_pydantic_core_schema__)) - return cls diff --git a/src/mistralai/utils/eventstreaming.py b/src/mistralai/utils/eventstreaming.py deleted file mode 100644 index 0969899b..00000000 --- a/src/mistralai/utils/eventstreaming.py +++ /dev/null @@ -1,248 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -import re -import json -from typing import ( - Callable, - Generic, - TypeVar, - Optional, - Generator, - AsyncGenerator, - Tuple, -) -import httpx - -T = TypeVar("T") - - -class EventStream(Generic[T]): - # Holds a reference to the SDK client to avoid it being garbage collected - # and cause termination of the underlying httpx client. - client_ref: Optional[object] - response: httpx.Response - generator: Generator[T, None, None] - - def __init__( - self, - response: httpx.Response, - decoder: Callable[[str], T], - sentinel: Optional[str] = None, - client_ref: Optional[object] = None, - ): - self.response = response - self.generator = stream_events(response, decoder, sentinel) - self.client_ref = client_ref - - def __iter__(self): - return self - - def __next__(self): - return next(self.generator) - - def __enter__(self): - return self - - def __exit__(self, exc_type, exc_val, exc_tb): - self.response.close() - - -class EventStreamAsync(Generic[T]): - # Holds a reference to the SDK client to avoid it being garbage collected - # and cause termination of the underlying httpx client. - client_ref: Optional[object] - response: httpx.Response - generator: AsyncGenerator[T, None] - - def __init__( - self, - response: httpx.Response, - decoder: Callable[[str], T], - sentinel: Optional[str] = None, - client_ref: Optional[object] = None, - ): - self.response = response - self.generator = stream_events_async(response, decoder, sentinel) - self.client_ref = client_ref - - def __aiter__(self): - return self - - async def __anext__(self): - return await self.generator.__anext__() - - async def __aenter__(self): - return self - - async def __aexit__(self, exc_type, exc_val, exc_tb): - await self.response.aclose() - - -class ServerEvent: - id: Optional[str] = None - event: Optional[str] = None - data: Optional[str] = None - retry: Optional[int] = None - - -MESSAGE_BOUNDARIES = [ - b"\r\n\r\n", - b"\n\n", - b"\r\r", -] - - -async def stream_events_async( - response: httpx.Response, - decoder: Callable[[str], T], - sentinel: Optional[str] = None, -) -> AsyncGenerator[T, None]: - buffer = bytearray() - position = 0 - discard = False - async for chunk in response.aiter_bytes(): - # We've encountered the sentinel value and should no longer process - # incoming data. Instead we throw new data away until the server closes - # the connection. - if discard: - continue - - buffer += chunk - for i in range(position, len(buffer)): - char = buffer[i : i + 1] - seq: Optional[bytes] = None - if char in [b"\r", b"\n"]: - for boundary in MESSAGE_BOUNDARIES: - seq = _peek_sequence(i, buffer, boundary) - if seq is not None: - break - if seq is None: - continue - - block = buffer[position:i] - position = i + len(seq) - event, discard = _parse_event(block, decoder, sentinel) - if event is not None: - yield event - - if position > 0: - buffer = buffer[position:] - position = 0 - - event, discard = _parse_event(buffer, decoder, sentinel) - if event is not None: - yield event - - -def stream_events( - response: httpx.Response, - decoder: Callable[[str], T], - sentinel: Optional[str] = None, -) -> Generator[T, None, None]: - buffer = bytearray() - position = 0 - discard = False - for chunk in response.iter_bytes(): - # We've encountered the sentinel value and should no longer process - # incoming data. Instead we throw new data away until the server closes - # the connection. - if discard: - continue - - buffer += chunk - for i in range(position, len(buffer)): - char = buffer[i : i + 1] - seq: Optional[bytes] = None - if char in [b"\r", b"\n"]: - for boundary in MESSAGE_BOUNDARIES: - seq = _peek_sequence(i, buffer, boundary) - if seq is not None: - break - if seq is None: - continue - - block = buffer[position:i] - position = i + len(seq) - event, discard = _parse_event(block, decoder, sentinel) - if event is not None: - yield event - - if position > 0: - buffer = buffer[position:] - position = 0 - - event, discard = _parse_event(buffer, decoder, sentinel) - if event is not None: - yield event - - -def _parse_event( - raw: bytearray, decoder: Callable[[str], T], sentinel: Optional[str] = None -) -> Tuple[Optional[T], bool]: - block = raw.decode() - lines = re.split(r"\r?\n|\r", block) - publish = False - event = ServerEvent() - data = "" - for line in lines: - if not line: - continue - - delim = line.find(":") - if delim <= 0: - continue - - field = line[0:delim] - value = line[delim + 1 :] if delim < len(line) - 1 else "" - if len(value) and value[0] == " ": - value = value[1:] - - if field == "event": - event.event = value - publish = True - elif field == "data": - data += value + "\n" - publish = True - elif field == "id": - event.id = value - publish = True - elif field == "retry": - event.retry = int(value) if value.isdigit() else None - publish = True - - if sentinel and data == f"{sentinel}\n": - return None, True - - if data: - data = data[:-1] - event.data = data - - data_is_primitive = ( - data.isnumeric() or data == "true" or data == "false" or data == "null" - ) - data_is_json = ( - data.startswith("{") or data.startswith("[") or data.startswith('"') - ) - - if data_is_primitive or data_is_json: - try: - event.data = json.loads(data) - except Exception: - pass - - out = None - if publish: - out = decoder(json.dumps(event.__dict__)) - - return out, False - - -def _peek_sequence(position: int, buffer: bytearray, sequence: bytes): - if len(sequence) > (len(buffer) - position): - return None - - for i, seq in enumerate(sequence): - if buffer[position + i] != seq: - return None - - return sequence diff --git a/src/mistralai/utils/forms.py b/src/mistralai/utils/forms.py deleted file mode 100644 index f961e76b..00000000 --- a/src/mistralai/utils/forms.py +++ /dev/null @@ -1,234 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from typing import ( - Any, - Dict, - get_type_hints, - List, - Tuple, -) -from pydantic import BaseModel -from pydantic.fields import FieldInfo - -from .serializers import marshal_json - -from .metadata import ( - FormMetadata, - MultipartFormMetadata, - find_field_metadata, -) -from .values import _is_set, _val_to_string - - -def _populate_form( - field_name: str, - explode: bool, - obj: Any, - delimiter: str, - form: Dict[str, List[str]], -): - if not _is_set(obj): - return form - - if isinstance(obj, BaseModel): - items = [] - - obj_fields: Dict[str, FieldInfo] = obj.__class__.model_fields - for name in obj_fields: - obj_field = obj_fields[name] - obj_field_name = obj_field.alias if obj_field.alias is not None else name - if obj_field_name == "": - continue - - val = getattr(obj, name) - if not _is_set(val): - continue - - if explode: - form[obj_field_name] = [_val_to_string(val)] - else: - items.append(f"{obj_field_name}{delimiter}{_val_to_string(val)}") - - if len(items) > 0: - form[field_name] = [delimiter.join(items)] - elif isinstance(obj, Dict): - items = [] - for key, value in obj.items(): - if not _is_set(value): - continue - - if explode: - form[key] = [_val_to_string(value)] - else: - items.append(f"{key}{delimiter}{_val_to_string(value)}") - - if len(items) > 0: - form[field_name] = [delimiter.join(items)] - elif isinstance(obj, List): - items = [] - - for value in obj: - if not _is_set(value): - continue - - if explode: - if not field_name in form: - form[field_name] = [] - form[field_name].append(_val_to_string(value)) - else: - items.append(_val_to_string(value)) - - if len(items) > 0: - form[field_name] = [delimiter.join([str(item) for item in items])] - else: - form[field_name] = [_val_to_string(obj)] - - return form - - -def _extract_file_properties(file_obj: Any) -> Tuple[str, Any, Any]: - """Extract file name, content, and content type from a file object.""" - file_fields: Dict[str, FieldInfo] = file_obj.__class__.model_fields - - file_name = "" - content = None - content_type = None - - for file_field_name in file_fields: - file_field = file_fields[file_field_name] - - file_metadata = find_field_metadata(file_field, MultipartFormMetadata) - if file_metadata is None: - continue - - if file_metadata.content: - content = getattr(file_obj, file_field_name, None) - elif file_field_name == "content_type": - content_type = getattr(file_obj, file_field_name, None) - else: - file_name = getattr(file_obj, file_field_name) - - if file_name == "" or content is None: - raise ValueError("invalid multipart/form-data file") - - return file_name, content, content_type - - -def serialize_multipart_form( - media_type: str, request: Any -) -> Tuple[str, Dict[str, Any], List[Tuple[str, Any]]]: - form: Dict[str, Any] = {} - files: List[Tuple[str, Any]] = [] - - if not isinstance(request, BaseModel): - raise TypeError("invalid request body type") - - request_fields: Dict[str, FieldInfo] = request.__class__.model_fields - request_field_types = get_type_hints(request.__class__) - - for name in request_fields: - field = request_fields[name] - - val = getattr(request, name) - if not _is_set(val): - continue - - field_metadata = find_field_metadata(field, MultipartFormMetadata) - if not field_metadata: - continue - - f_name = field.alias if field.alias else name - - if field_metadata.file: - if isinstance(val, List): - # Handle array of files - array_field_name = f_name + "[]" - for file_obj in val: - if not _is_set(file_obj): - continue - - file_name, content, content_type = _extract_file_properties( - file_obj - ) - - if content_type is not None: - files.append( - (array_field_name, (file_name, content, content_type)) - ) - else: - files.append((array_field_name, (file_name, content))) - else: - # Handle single file - file_name, content, content_type = _extract_file_properties(val) - - if content_type is not None: - files.append((f_name, (file_name, content, content_type))) - else: - files.append((f_name, (file_name, content))) - elif field_metadata.json: - files.append( - ( - f_name, - ( - None, - marshal_json(val, request_field_types[name]), - "application/json", - ), - ) - ) - else: - if isinstance(val, List): - values = [] - - for value in val: - if not _is_set(value): - continue - values.append(_val_to_string(value)) - - array_field_name = f_name + "[]" - form[array_field_name] = values - else: - form[f_name] = _val_to_string(val) - return media_type, form, files - - -def serialize_form_data(data: Any) -> Dict[str, Any]: - form: Dict[str, List[str]] = {} - - if isinstance(data, BaseModel): - data_fields: Dict[str, FieldInfo] = data.__class__.model_fields - data_field_types = get_type_hints(data.__class__) - for name in data_fields: - field = data_fields[name] - - val = getattr(data, name) - if not _is_set(val): - continue - - metadata = find_field_metadata(field, FormMetadata) - if metadata is None: - continue - - f_name = field.alias if field.alias is not None else name - - if metadata.json: - form[f_name] = [marshal_json(val, data_field_types[name])] - else: - if metadata.style == "form": - _populate_form( - f_name, - metadata.explode, - val, - ",", - form, - ) - else: - raise ValueError(f"Invalid form style for field {name}") - elif isinstance(data, Dict): - for key, value in data.items(): - if _is_set(value): - form[key] = [_val_to_string(value)] - else: - raise TypeError(f"Invalid request body type {type(data)} for form data") - - return form diff --git a/src/mistralai/utils/headers.py b/src/mistralai/utils/headers.py deleted file mode 100644 index 37864cbb..00000000 --- a/src/mistralai/utils/headers.py +++ /dev/null @@ -1,136 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from typing import ( - Any, - Dict, - List, - Optional, -) -from httpx import Headers -from pydantic import BaseModel -from pydantic.fields import FieldInfo - -from .metadata import ( - HeaderMetadata, - find_field_metadata, -) - -from .values import _is_set, _populate_from_globals, _val_to_string - - -def get_headers(headers_params: Any, gbls: Optional[Any] = None) -> Dict[str, str]: - headers: Dict[str, str] = {} - - globals_already_populated = [] - if _is_set(headers_params): - globals_already_populated = _populate_headers(headers_params, gbls, headers, []) - if _is_set(gbls): - _populate_headers(gbls, None, headers, globals_already_populated) - - return headers - - -def _populate_headers( - headers_params: Any, - gbls: Any, - header_values: Dict[str, str], - skip_fields: List[str], -) -> List[str]: - globals_already_populated: List[str] = [] - - if not isinstance(headers_params, BaseModel): - return globals_already_populated - - param_fields: Dict[str, FieldInfo] = headers_params.__class__.model_fields - for name in param_fields: - if name in skip_fields: - continue - - field = param_fields[name] - f_name = field.alias if field.alias is not None else name - - metadata = find_field_metadata(field, HeaderMetadata) - if metadata is None: - continue - - value, global_found = _populate_from_globals( - name, getattr(headers_params, name), HeaderMetadata, gbls - ) - if global_found: - globals_already_populated.append(name) - value = _serialize_header(metadata.explode, value) - - if value != "": - header_values[f_name] = value - - return globals_already_populated - - -def _serialize_header(explode: bool, obj: Any) -> str: - if not _is_set(obj): - return "" - - if isinstance(obj, BaseModel): - items = [] - obj_fields: Dict[str, FieldInfo] = obj.__class__.model_fields - for name in obj_fields: - obj_field = obj_fields[name] - obj_param_metadata = find_field_metadata(obj_field, HeaderMetadata) - - if not obj_param_metadata: - continue - - f_name = obj_field.alias if obj_field.alias is not None else name - - val = getattr(obj, name) - if not _is_set(val): - continue - - if explode: - items.append(f"{f_name}={_val_to_string(val)}") - else: - items.append(f_name) - items.append(_val_to_string(val)) - - if len(items) > 0: - return ",".join(items) - elif isinstance(obj, Dict): - items = [] - - for key, value in obj.items(): - if not _is_set(value): - continue - - if explode: - items.append(f"{key}={_val_to_string(value)}") - else: - items.append(key) - items.append(_val_to_string(value)) - - if len(items) > 0: - return ",".join([str(item) for item in items]) - elif isinstance(obj, List): - items = [] - - for value in obj: - if not _is_set(value): - continue - - items.append(_val_to_string(value)) - - if len(items) > 0: - return ",".join(items) - elif _is_set(obj): - return f"{_val_to_string(obj)}" - - return "" - - -def get_response_headers(headers: Headers) -> Dict[str, List[str]]: - res: Dict[str, List[str]] = {} - for k, v in headers.items(): - if not k in res: - res[k] = [] - - res[k].append(v) - return res diff --git a/src/mistralai/utils/logger.py b/src/mistralai/utils/logger.py deleted file mode 100644 index cc089307..00000000 --- a/src/mistralai/utils/logger.py +++ /dev/null @@ -1,27 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -import httpx -import logging -import os -from typing import Any, Protocol - - -class Logger(Protocol): - def debug(self, msg: str, *args: Any, **kwargs: Any) -> None: - pass - - -class NoOpLogger: - def debug(self, msg: str, *args: Any, **kwargs: Any) -> None: - pass - - -def get_body_content(req: httpx.Request) -> str: - return "" if not hasattr(req, "_content") else str(req.content) - - -def get_default_logger() -> Logger: - if os.getenv("MISTRAL_DEBUG"): - logging.basicConfig(level=logging.DEBUG) - return logging.getLogger("mistralai") - return NoOpLogger() diff --git a/src/mistralai/utils/metadata.py b/src/mistralai/utils/metadata.py deleted file mode 100644 index 173b3e5c..00000000 --- a/src/mistralai/utils/metadata.py +++ /dev/null @@ -1,118 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from typing import Optional, Type, TypeVar, Union -from dataclasses import dataclass -from pydantic.fields import FieldInfo - - -T = TypeVar("T") - - -@dataclass -class SecurityMetadata: - option: bool = False - scheme: bool = False - scheme_type: Optional[str] = None - sub_type: Optional[str] = None - field_name: Optional[str] = None - - def get_field_name(self, default: str) -> str: - return self.field_name or default - - -@dataclass -class ParamMetadata: - serialization: Optional[str] = None - style: str = "simple" - explode: bool = False - - -@dataclass -class PathParamMetadata(ParamMetadata): - pass - - -@dataclass -class QueryParamMetadata(ParamMetadata): - style: str = "form" - explode: bool = True - - -@dataclass -class HeaderMetadata(ParamMetadata): - pass - - -@dataclass -class RequestMetadata: - media_type: str = "application/octet-stream" - - -@dataclass -class MultipartFormMetadata: - file: bool = False - content: bool = False - json: bool = False - - -@dataclass -class FormMetadata: - json: bool = False - style: str = "form" - explode: bool = True - - -class FieldMetadata: - security: Optional[SecurityMetadata] = None - path: Optional[PathParamMetadata] = None - query: Optional[QueryParamMetadata] = None - header: Optional[HeaderMetadata] = None - request: Optional[RequestMetadata] = None - form: Optional[FormMetadata] = None - multipart: Optional[MultipartFormMetadata] = None - - def __init__( - self, - security: Optional[SecurityMetadata] = None, - path: Optional[Union[PathParamMetadata, bool]] = None, - query: Optional[Union[QueryParamMetadata, bool]] = None, - header: Optional[Union[HeaderMetadata, bool]] = None, - request: Optional[Union[RequestMetadata, bool]] = None, - form: Optional[Union[FormMetadata, bool]] = None, - multipart: Optional[Union[MultipartFormMetadata, bool]] = None, - ): - self.security = security - self.path = PathParamMetadata() if isinstance(path, bool) else path - self.query = QueryParamMetadata() if isinstance(query, bool) else query - self.header = HeaderMetadata() if isinstance(header, bool) else header - self.request = RequestMetadata() if isinstance(request, bool) else request - self.form = FormMetadata() if isinstance(form, bool) else form - self.multipart = ( - MultipartFormMetadata() if isinstance(multipart, bool) else multipart - ) - - -def find_field_metadata(field_info: FieldInfo, metadata_type: Type[T]) -> Optional[T]: - metadata = find_metadata(field_info, FieldMetadata) - if not metadata: - return None - - fields = metadata.__dict__ - - for field in fields: - if isinstance(fields[field], metadata_type): - return fields[field] - - return None - - -def find_metadata(field_info: FieldInfo, metadata_type: Type[T]) -> Optional[T]: - metadata = field_info.metadata - if not metadata: - return None - - for md in metadata: - if isinstance(md, metadata_type): - return md - - return None diff --git a/src/mistralai/utils/queryparams.py b/src/mistralai/utils/queryparams.py deleted file mode 100644 index c04e0db8..00000000 --- a/src/mistralai/utils/queryparams.py +++ /dev/null @@ -1,217 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from typing import ( - Any, - Dict, - get_type_hints, - List, - Optional, -) - -from pydantic import BaseModel -from pydantic.fields import FieldInfo - -from .metadata import ( - QueryParamMetadata, - find_field_metadata, -) -from .values import ( - _get_serialized_params, - _is_set, - _populate_from_globals, - _val_to_string, -) -from .forms import _populate_form - - -def get_query_params( - query_params: Any, - gbls: Optional[Any] = None, - allow_empty_value: Optional[List[str]] = None, -) -> Dict[str, List[str]]: - params: Dict[str, List[str]] = {} - - globals_already_populated = _populate_query_params(query_params, gbls, params, [], allow_empty_value) - if _is_set(gbls): - _populate_query_params(gbls, None, params, globals_already_populated, allow_empty_value) - - return params - - -def _populate_query_params( - query_params: Any, - gbls: Any, - query_param_values: Dict[str, List[str]], - skip_fields: List[str], - allow_empty_value: Optional[List[str]] = None, -) -> List[str]: - globals_already_populated: List[str] = [] - - if not isinstance(query_params, BaseModel): - return globals_already_populated - - param_fields: Dict[str, FieldInfo] = query_params.__class__.model_fields - param_field_types = get_type_hints(query_params.__class__) - for name in param_fields: - if name in skip_fields: - continue - - field = param_fields[name] - - metadata = find_field_metadata(field, QueryParamMetadata) - if not metadata: - continue - - value = getattr(query_params, name) if _is_set(query_params) else None - - value, global_found = _populate_from_globals( - name, value, QueryParamMetadata, gbls - ) - if global_found: - globals_already_populated.append(name) - - f_name = field.alias if field.alias is not None else name - - allow_empty_set = set(allow_empty_value or []) - should_include_empty = f_name in allow_empty_set and ( - value is None or value == [] or value == "" - ) - - if should_include_empty: - query_param_values[f_name] = [""] - continue - - serialization = metadata.serialization - if serialization is not None: - serialized_parms = _get_serialized_params( - metadata, f_name, value, param_field_types[name] - ) - for key, value in serialized_parms.items(): - if key in query_param_values: - query_param_values[key].extend(value) - else: - query_param_values[key] = [value] - else: - style = metadata.style - if style == "deepObject": - _populate_deep_object_query_params(f_name, value, query_param_values) - elif style == "form": - _populate_delimited_query_params( - metadata, f_name, value, ",", query_param_values - ) - elif style == "pipeDelimited": - _populate_delimited_query_params( - metadata, f_name, value, "|", query_param_values - ) - else: - raise NotImplementedError( - f"query param style {style} not yet supported" - ) - - return globals_already_populated - - -def _populate_deep_object_query_params( - field_name: str, - obj: Any, - params: Dict[str, List[str]], -): - if not _is_set(obj): - return - - if isinstance(obj, BaseModel): - _populate_deep_object_query_params_basemodel(field_name, obj, params) - elif isinstance(obj, Dict): - _populate_deep_object_query_params_dict(field_name, obj, params) - - -def _populate_deep_object_query_params_basemodel( - prior_params_key: str, - obj: Any, - params: Dict[str, List[str]], -): - if not _is_set(obj) or not isinstance(obj, BaseModel): - return - - obj_fields: Dict[str, FieldInfo] = obj.__class__.model_fields - for name in obj_fields: - obj_field = obj_fields[name] - - f_name = obj_field.alias if obj_field.alias is not None else name - - params_key = f"{prior_params_key}[{f_name}]" - - obj_param_metadata = find_field_metadata(obj_field, QueryParamMetadata) - if not _is_set(obj_param_metadata): - continue - - obj_val = getattr(obj, name) - if not _is_set(obj_val): - continue - - if isinstance(obj_val, BaseModel): - _populate_deep_object_query_params_basemodel(params_key, obj_val, params) - elif isinstance(obj_val, Dict): - _populate_deep_object_query_params_dict(params_key, obj_val, params) - elif isinstance(obj_val, List): - _populate_deep_object_query_params_list(params_key, obj_val, params) - else: - params[params_key] = [_val_to_string(obj_val)] - - -def _populate_deep_object_query_params_dict( - prior_params_key: str, - value: Dict, - params: Dict[str, List[str]], -): - if not _is_set(value): - return - - for key, val in value.items(): - if not _is_set(val): - continue - - params_key = f"{prior_params_key}[{key}]" - - if isinstance(val, BaseModel): - _populate_deep_object_query_params_basemodel(params_key, val, params) - elif isinstance(val, Dict): - _populate_deep_object_query_params_dict(params_key, val, params) - elif isinstance(val, List): - _populate_deep_object_query_params_list(params_key, val, params) - else: - params[params_key] = [_val_to_string(val)] - - -def _populate_deep_object_query_params_list( - params_key: str, - value: List, - params: Dict[str, List[str]], -): - if not _is_set(value): - return - - for val in value: - if not _is_set(val): - continue - - if params.get(params_key) is None: - params[params_key] = [] - - params[params_key].append(_val_to_string(val)) - - -def _populate_delimited_query_params( - metadata: QueryParamMetadata, - field_name: str, - obj: Any, - delimiter: str, - query_param_values: Dict[str, List[str]], -): - _populate_form( - field_name, - metadata.explode, - obj, - delimiter, - query_param_values, - ) diff --git a/src/mistralai/utils/requestbodies.py b/src/mistralai/utils/requestbodies.py deleted file mode 100644 index 1de32b6d..00000000 --- a/src/mistralai/utils/requestbodies.py +++ /dev/null @@ -1,66 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -import io -from dataclasses import dataclass -import re -from typing import ( - Any, - Optional, -) - -from .forms import serialize_form_data, serialize_multipart_form - -from .serializers import marshal_json - -SERIALIZATION_METHOD_TO_CONTENT_TYPE = { - "json": "application/json", - "form": "application/x-www-form-urlencoded", - "multipart": "multipart/form-data", - "raw": "application/octet-stream", - "string": "text/plain", -} - - -@dataclass -class SerializedRequestBody: - media_type: Optional[str] = None - content: Optional[Any] = None - data: Optional[Any] = None - files: Optional[Any] = None - - -def serialize_request_body( - request_body: Any, - nullable: bool, - optional: bool, - serialization_method: str, - request_body_type, -) -> Optional[SerializedRequestBody]: - if request_body is None: - if not nullable and optional: - return None - - media_type = SERIALIZATION_METHOD_TO_CONTENT_TYPE[serialization_method] - - serialized_request_body = SerializedRequestBody(media_type) - - if re.match(r"^(application|text)\/([^+]+\+)*json.*", media_type) is not None: - serialized_request_body.content = marshal_json(request_body, request_body_type) - elif re.match(r"^multipart\/.*", media_type) is not None: - ( - serialized_request_body.media_type, - serialized_request_body.data, - serialized_request_body.files, - ) = serialize_multipart_form(media_type, request_body) - elif re.match(r"^application\/x-www-form-urlencoded.*", media_type) is not None: - serialized_request_body.data = serialize_form_data(request_body) - elif isinstance(request_body, (bytes, bytearray, io.BytesIO, io.BufferedReader)): - serialized_request_body.content = request_body - elif isinstance(request_body, str): - serialized_request_body.content = request_body - else: - raise TypeError( - f"invalid request body type {type(request_body)} for mediaType {media_type}" - ) - - return serialized_request_body diff --git a/src/mistralai/utils/retries.py b/src/mistralai/utils/retries.py deleted file mode 100644 index 88a91b10..00000000 --- a/src/mistralai/utils/retries.py +++ /dev/null @@ -1,281 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -import asyncio -import random -import time -from datetime import datetime -from email.utils import parsedate_to_datetime -from typing import List, Optional - -import httpx - - -class BackoffStrategy: - initial_interval: int - max_interval: int - exponent: float - max_elapsed_time: int - - def __init__( - self, - initial_interval: int, - max_interval: int, - exponent: float, - max_elapsed_time: int, - ): - self.initial_interval = initial_interval - self.max_interval = max_interval - self.exponent = exponent - self.max_elapsed_time = max_elapsed_time - - -class RetryConfig: - strategy: str - backoff: BackoffStrategy - retry_connection_errors: bool - - def __init__( - self, strategy: str, backoff: BackoffStrategy, retry_connection_errors: bool - ): - self.strategy = strategy - self.backoff = backoff - self.retry_connection_errors = retry_connection_errors - - -class Retries: - config: RetryConfig - status_codes: List[str] - - def __init__(self, config: RetryConfig, status_codes: List[str]): - self.config = config - self.status_codes = status_codes - - -class TemporaryError(Exception): - response: httpx.Response - retry_after: Optional[int] - - def __init__(self, response: httpx.Response): - self.response = response - self.retry_after = _parse_retry_after_header(response) - - -class PermanentError(Exception): - inner: Exception - - def __init__(self, inner: Exception): - self.inner = inner - - -def _parse_retry_after_header(response: httpx.Response) -> Optional[int]: - """Parse Retry-After header from response. - - Returns: - Retry interval in milliseconds, or None if header is missing or invalid. - """ - retry_after_header = response.headers.get("retry-after") - if not retry_after_header: - return None - - try: - seconds = float(retry_after_header) - return round(seconds * 1000) - except ValueError: - pass - - try: - retry_date = parsedate_to_datetime(retry_after_header) - delta = (retry_date - datetime.now(retry_date.tzinfo)).total_seconds() - return round(max(0, delta) * 1000) - except (ValueError, TypeError): - pass - - return None - - -def _get_sleep_interval( - exception: Exception, - initial_interval: int, - max_interval: int, - exponent: float, - retries: int, -) -> float: - """Get sleep interval for retry with exponential backoff. - - Args: - exception: The exception that triggered the retry. - initial_interval: Initial retry interval in milliseconds. - max_interval: Maximum retry interval in milliseconds. - exponent: Base for exponential backoff calculation. - retries: Current retry attempt count. - - Returns: - Sleep interval in seconds. - """ - if ( - isinstance(exception, TemporaryError) - and exception.retry_after is not None - and exception.retry_after > 0 - ): - return exception.retry_after / 1000 - - sleep = (initial_interval / 1000) * exponent**retries + random.uniform(0, 1) - return min(sleep, max_interval / 1000) - - -def retry(func, retries: Retries): - if retries.config.strategy == "backoff": - - def do_request() -> httpx.Response: - res: httpx.Response - try: - res = func() - - for code in retries.status_codes: - if "X" in code.upper(): - code_range = int(code[0]) - - status_major = res.status_code / 100 - - if code_range <= status_major < code_range + 1: - raise TemporaryError(res) - else: - parsed_code = int(code) - - if res.status_code == parsed_code: - raise TemporaryError(res) - except httpx.ConnectError as exception: - if retries.config.retry_connection_errors: - raise - - raise PermanentError(exception) from exception - except httpx.TimeoutException as exception: - if retries.config.retry_connection_errors: - raise - - raise PermanentError(exception) from exception - except TemporaryError: - raise - except Exception as exception: - raise PermanentError(exception) from exception - - return res - - return retry_with_backoff( - do_request, - retries.config.backoff.initial_interval, - retries.config.backoff.max_interval, - retries.config.backoff.exponent, - retries.config.backoff.max_elapsed_time, - ) - - return func() - - -async def retry_async(func, retries: Retries): - if retries.config.strategy == "backoff": - - async def do_request() -> httpx.Response: - res: httpx.Response - try: - res = await func() - - for code in retries.status_codes: - if "X" in code.upper(): - code_range = int(code[0]) - - status_major = res.status_code / 100 - - if code_range <= status_major < code_range + 1: - raise TemporaryError(res) - else: - parsed_code = int(code) - - if res.status_code == parsed_code: - raise TemporaryError(res) - except httpx.ConnectError as exception: - if retries.config.retry_connection_errors: - raise - - raise PermanentError(exception) from exception - except httpx.TimeoutException as exception: - if retries.config.retry_connection_errors: - raise - - raise PermanentError(exception) from exception - except TemporaryError: - raise - except Exception as exception: - raise PermanentError(exception) from exception - - return res - - return await retry_with_backoff_async( - do_request, - retries.config.backoff.initial_interval, - retries.config.backoff.max_interval, - retries.config.backoff.exponent, - retries.config.backoff.max_elapsed_time, - ) - - return await func() - - -def retry_with_backoff( - func, - initial_interval=500, - max_interval=60000, - exponent=1.5, - max_elapsed_time=3600000, -): - start = round(time.time() * 1000) - retries = 0 - - while True: - try: - return func() - except PermanentError as exception: - raise exception.inner - except Exception as exception: # pylint: disable=broad-exception-caught - now = round(time.time() * 1000) - if now - start > max_elapsed_time: - if isinstance(exception, TemporaryError): - return exception.response - - raise - - sleep = _get_sleep_interval( - exception, initial_interval, max_interval, exponent, retries - ) - time.sleep(sleep) - retries += 1 - - -async def retry_with_backoff_async( - func, - initial_interval=500, - max_interval=60000, - exponent=1.5, - max_elapsed_time=3600000, -): - start = round(time.time() * 1000) - retries = 0 - - while True: - try: - return await func() - except PermanentError as exception: - raise exception.inner - except Exception as exception: # pylint: disable=broad-exception-caught - now = round(time.time() * 1000) - if now - start > max_elapsed_time: - if isinstance(exception, TemporaryError): - return exception.response - - raise - - sleep = _get_sleep_interval( - exception, initial_interval, max_interval, exponent, retries - ) - await asyncio.sleep(sleep) - retries += 1 diff --git a/src/mistralai/utils/security.py b/src/mistralai/utils/security.py deleted file mode 100644 index 3b8526bf..00000000 --- a/src/mistralai/utils/security.py +++ /dev/null @@ -1,192 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -import base64 - -from typing import ( - Any, - Dict, - List, - Optional, - Tuple, -) -from pydantic import BaseModel -from pydantic.fields import FieldInfo - -from .metadata import ( - SecurityMetadata, - find_field_metadata, -) -import os - - -def get_security(security: Any) -> Tuple[Dict[str, str], Dict[str, List[str]]]: - headers: Dict[str, str] = {} - query_params: Dict[str, List[str]] = {} - - if security is None: - return headers, query_params - - if not isinstance(security, BaseModel): - raise TypeError("security must be a pydantic model") - - sec_fields: Dict[str, FieldInfo] = security.__class__.model_fields - for name in sec_fields: - sec_field = sec_fields[name] - - value = getattr(security, name) - if value is None: - continue - - metadata = find_field_metadata(sec_field, SecurityMetadata) - if metadata is None: - continue - if metadata.option: - _parse_security_option(headers, query_params, value) - return headers, query_params - if metadata.scheme: - # Special case for basic auth or custom auth which could be a flattened model - if metadata.sub_type in ["basic", "custom"] and not isinstance( - value, BaseModel - ): - _parse_security_scheme(headers, query_params, metadata, name, security) - else: - _parse_security_scheme(headers, query_params, metadata, name, value) - - return headers, query_params - - -def get_security_from_env(security: Any, security_class: Any) -> Optional[BaseModel]: - if security is not None: - return security - - if not issubclass(security_class, BaseModel): - raise TypeError("security_class must be a pydantic model class") - - security_dict: Any = {} - - if os.getenv("MISTRAL_API_KEY"): - security_dict["api_key"] = os.getenv("MISTRAL_API_KEY") - - return security_class(**security_dict) if security_dict else None - - -def _parse_security_option( - headers: Dict[str, str], query_params: Dict[str, List[str]], option: Any -): - if not isinstance(option, BaseModel): - raise TypeError("security option must be a pydantic model") - - opt_fields: Dict[str, FieldInfo] = option.__class__.model_fields - for name in opt_fields: - opt_field = opt_fields[name] - - metadata = find_field_metadata(opt_field, SecurityMetadata) - if metadata is None or not metadata.scheme: - continue - _parse_security_scheme( - headers, query_params, metadata, name, getattr(option, name) - ) - - -def _parse_security_scheme( - headers: Dict[str, str], - query_params: Dict[str, List[str]], - scheme_metadata: SecurityMetadata, - field_name: str, - scheme: Any, -): - scheme_type = scheme_metadata.scheme_type - sub_type = scheme_metadata.sub_type - - if isinstance(scheme, BaseModel): - if scheme_type == "http": - if sub_type == "basic": - _parse_basic_auth_scheme(headers, scheme) - return - if sub_type == "custom": - return - - scheme_fields: Dict[str, FieldInfo] = scheme.__class__.model_fields - for name in scheme_fields: - scheme_field = scheme_fields[name] - - metadata = find_field_metadata(scheme_field, SecurityMetadata) - if metadata is None or metadata.field_name is None: - continue - - value = getattr(scheme, name) - - _parse_security_scheme_value( - headers, query_params, scheme_metadata, metadata, name, value - ) - else: - _parse_security_scheme_value( - headers, query_params, scheme_metadata, scheme_metadata, field_name, scheme - ) - - -def _parse_security_scheme_value( - headers: Dict[str, str], - query_params: Dict[str, List[str]], - scheme_metadata: SecurityMetadata, - security_metadata: SecurityMetadata, - field_name: str, - value: Any, -): - scheme_type = scheme_metadata.scheme_type - sub_type = scheme_metadata.sub_type - - header_name = security_metadata.get_field_name(field_name) - - if scheme_type == "apiKey": - if sub_type == "header": - headers[header_name] = value - elif sub_type == "query": - query_params[header_name] = [value] - else: - raise ValueError("sub type {sub_type} not supported") - elif scheme_type == "openIdConnect": - headers[header_name] = _apply_bearer(value) - elif scheme_type == "oauth2": - if sub_type != "client_credentials": - headers[header_name] = _apply_bearer(value) - elif scheme_type == "http": - if sub_type == "bearer": - headers[header_name] = _apply_bearer(value) - elif sub_type == "custom": - return - else: - raise ValueError("sub type {sub_type} not supported") - else: - raise ValueError("scheme type {scheme_type} not supported") - - -def _apply_bearer(token: str) -> str: - return token.lower().startswith("bearer ") and token or f"Bearer {token}" - - -def _parse_basic_auth_scheme(headers: Dict[str, str], scheme: Any): - username = "" - password = "" - - if not isinstance(scheme, BaseModel): - raise TypeError("basic auth scheme must be a pydantic model") - - scheme_fields: Dict[str, FieldInfo] = scheme.__class__.model_fields - for name in scheme_fields: - scheme_field = scheme_fields[name] - - metadata = find_field_metadata(scheme_field, SecurityMetadata) - if metadata is None or metadata.field_name is None: - continue - - field_name = metadata.field_name - value = getattr(scheme, name) - - if field_name == "username": - username = value - if field_name == "password": - password = value - - data = f"{username}:{password}".encode() - headers["Authorization"] = f"Basic {base64.b64encode(data).decode()}" diff --git a/src/mistralai/utils/serializers.py b/src/mistralai/utils/serializers.py deleted file mode 100644 index 14321eb4..00000000 --- a/src/mistralai/utils/serializers.py +++ /dev/null @@ -1,229 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from decimal import Decimal -import functools -import json -import typing -from typing import Any, Dict, List, Tuple, Union, get_args -import typing_extensions -from typing_extensions import get_origin - -import httpx -from pydantic import ConfigDict, create_model -from pydantic_core import from_json - -from ..types.basemodel import BaseModel, Nullable, OptionalNullable, Unset - - -def serialize_decimal(as_str: bool): - def serialize(d): - # Optional[T] is a Union[T, None] - if is_union(type(d)) and type(None) in get_args(type(d)) and d is None: - return None - if isinstance(d, Unset): - return d - - if not isinstance(d, Decimal): - raise ValueError("Expected Decimal object") - - return str(d) if as_str else float(d) - - return serialize - - -def validate_decimal(d): - if d is None: - return None - - if isinstance(d, (Decimal, Unset)): - return d - - if not isinstance(d, (str, int, float)): - raise ValueError("Expected string, int or float") - - return Decimal(str(d)) - - -def serialize_float(as_str: bool): - def serialize(f): - # Optional[T] is a Union[T, None] - if is_union(type(f)) and type(None) in get_args(type(f)) and f is None: - return None - if isinstance(f, Unset): - return f - - if not isinstance(f, float): - raise ValueError("Expected float") - - return str(f) if as_str else f - - return serialize - - -def validate_float(f): - if f is None: - return None - - if isinstance(f, (float, Unset)): - return f - - if not isinstance(f, str): - raise ValueError("Expected string") - - return float(f) - - -def serialize_int(as_str: bool): - def serialize(i): - # Optional[T] is a Union[T, None] - if is_union(type(i)) and type(None) in get_args(type(i)) and i is None: - return None - if isinstance(i, Unset): - return i - - if not isinstance(i, int): - raise ValueError("Expected int") - - return str(i) if as_str else i - - return serialize - - -def validate_int(b): - if b is None: - return None - - if isinstance(b, (int, Unset)): - return b - - if not isinstance(b, str): - raise ValueError("Expected string") - - return int(b) - - -def validate_const(v): - def validate(c): - # Optional[T] is a Union[T, None] - if is_union(type(c)) and type(None) in get_args(type(c)) and c is None: - return None - - if v != c: - raise ValueError(f"Expected {v}") - - return c - - return validate - - -def unmarshal_json(raw, typ: Any) -> Any: - return unmarshal(from_json(raw), typ) - - -def unmarshal(val, typ: Any) -> Any: - unmarshaller = create_model( - "Unmarshaller", - body=(typ, ...), - __config__=ConfigDict(populate_by_name=True, arbitrary_types_allowed=True), - ) - - m = unmarshaller(body=val) - - # pyright: ignore[reportAttributeAccessIssue] - return m.body # type: ignore - - -def marshal_json(val, typ): - if is_nullable(typ) and val is None: - return "null" - - marshaller = create_model( - "Marshaller", - body=(typ, ...), - __config__=ConfigDict(populate_by_name=True, arbitrary_types_allowed=True), - ) - - m = marshaller(body=val) - - d = m.model_dump(by_alias=True, mode="json", exclude_none=True) - - if len(d) == 0: - return "" - - return json.dumps(d[next(iter(d))], separators=(",", ":")) - - -def is_nullable(field): - origin = get_origin(field) - if origin is Nullable or origin is OptionalNullable: - return True - - if not origin is Union or type(None) not in get_args(field): - return False - - for arg in get_args(field): - if get_origin(arg) is Nullable or get_origin(arg) is OptionalNullable: - return True - - return False - - -def is_union(obj: object) -> bool: - """ - Returns True if the given object is a typing.Union or typing_extensions.Union. - """ - return any( - obj is typing_obj for typing_obj in _get_typing_objects_by_name_of("Union") - ) - - -def stream_to_text(stream: httpx.Response) -> str: - return "".join(stream.iter_text()) - - -async def stream_to_text_async(stream: httpx.Response) -> str: - return "".join([chunk async for chunk in stream.aiter_text()]) - - -def stream_to_bytes(stream: httpx.Response) -> bytes: - return stream.content - - -async def stream_to_bytes_async(stream: httpx.Response) -> bytes: - return await stream.aread() - - -def get_pydantic_model(data: Any, typ: Any) -> Any: - if not _contains_pydantic_model(data): - return unmarshal(data, typ) - - return data - - -def _contains_pydantic_model(data: Any) -> bool: - if isinstance(data, BaseModel): - return True - if isinstance(data, List): - return any(_contains_pydantic_model(item) for item in data) - if isinstance(data, Dict): - return any(_contains_pydantic_model(value) for value in data.values()) - - return False - - -@functools.cache -def _get_typing_objects_by_name_of(name: str) -> Tuple[Any, ...]: - """ - Get typing objects by name from typing and typing_extensions. - Reference: https://typing-extensions.readthedocs.io/en/latest/#runtime-use-of-types - """ - result = tuple( - getattr(module, name) - for module in (typing, typing_extensions) - if hasattr(module, name) - ) - if not result: - raise ValueError( - f"Neither typing nor typing_extensions has an object called {name!r}" - ) - return result diff --git a/src/mistralai/utils/unmarshal_json_response.py b/src/mistralai/utils/unmarshal_json_response.py deleted file mode 100644 index 64d0b3a6..00000000 --- a/src/mistralai/utils/unmarshal_json_response.py +++ /dev/null @@ -1,38 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from typing import Any, Optional, Type, TypeVar, overload - -import httpx - -from .serializers import unmarshal_json -from mistralai import models - -T = TypeVar("T") - - -@overload -def unmarshal_json_response( - typ: Type[T], http_res: httpx.Response, body: Optional[str] = None -) -> T: ... - - -@overload -def unmarshal_json_response( - typ: Any, http_res: httpx.Response, body: Optional[str] = None -) -> Any: ... - - -def unmarshal_json_response( - typ: Any, http_res: httpx.Response, body: Optional[str] = None -) -> Any: - if body is None: - body = http_res.text - try: - return unmarshal_json(body, typ) - except Exception as e: - raise models.ResponseValidationError( - "Response validation failed", - http_res, - e, - body, - ) from e diff --git a/src/mistralai/utils/url.py b/src/mistralai/utils/url.py deleted file mode 100644 index c78ccbae..00000000 --- a/src/mistralai/utils/url.py +++ /dev/null @@ -1,155 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from decimal import Decimal -from typing import ( - Any, - Dict, - get_type_hints, - List, - Optional, - Union, - get_args, - get_origin, -) -from pydantic import BaseModel -from pydantic.fields import FieldInfo - -from .metadata import ( - PathParamMetadata, - find_field_metadata, -) -from .values import ( - _get_serialized_params, - _is_set, - _populate_from_globals, - _val_to_string, -) - - -def generate_url( - server_url: str, - path: str, - path_params: Any, - gbls: Optional[Any] = None, -) -> str: - path_param_values: Dict[str, str] = {} - - globals_already_populated = _populate_path_params( - path_params, gbls, path_param_values, [] - ) - if _is_set(gbls): - _populate_path_params(gbls, None, path_param_values, globals_already_populated) - - for key, value in path_param_values.items(): - path = path.replace("{" + key + "}", value, 1) - - return remove_suffix(server_url, "/") + path - - -def _populate_path_params( - path_params: Any, - gbls: Any, - path_param_values: Dict[str, str], - skip_fields: List[str], -) -> List[str]: - globals_already_populated: List[str] = [] - - if not isinstance(path_params, BaseModel): - return globals_already_populated - - path_param_fields: Dict[str, FieldInfo] = path_params.__class__.model_fields - path_param_field_types = get_type_hints(path_params.__class__) - for name in path_param_fields: - if name in skip_fields: - continue - - field = path_param_fields[name] - - param_metadata = find_field_metadata(field, PathParamMetadata) - if param_metadata is None: - continue - - param = getattr(path_params, name) if _is_set(path_params) else None - param, global_found = _populate_from_globals( - name, param, PathParamMetadata, gbls - ) - if global_found: - globals_already_populated.append(name) - - if not _is_set(param): - continue - - f_name = field.alias if field.alias is not None else name - serialization = param_metadata.serialization - if serialization is not None: - serialized_params = _get_serialized_params( - param_metadata, f_name, param, path_param_field_types[name] - ) - for key, value in serialized_params.items(): - path_param_values[key] = value - else: - pp_vals: List[str] = [] - if param_metadata.style == "simple": - if isinstance(param, List): - for pp_val in param: - if not _is_set(pp_val): - continue - pp_vals.append(_val_to_string(pp_val)) - path_param_values[f_name] = ",".join(pp_vals) - elif isinstance(param, Dict): - for pp_key in param: - if not _is_set(param[pp_key]): - continue - if param_metadata.explode: - pp_vals.append(f"{pp_key}={_val_to_string(param[pp_key])}") - else: - pp_vals.append(f"{pp_key},{_val_to_string(param[pp_key])}") - path_param_values[f_name] = ",".join(pp_vals) - elif not isinstance(param, (str, int, float, complex, bool, Decimal)): - param_fields: Dict[str, FieldInfo] = param.__class__.model_fields - for name in param_fields: - param_field = param_fields[name] - - param_value_metadata = find_field_metadata( - param_field, PathParamMetadata - ) - if param_value_metadata is None: - continue - - param_name = ( - param_field.alias if param_field.alias is not None else name - ) - - param_field_val = getattr(param, name) - if not _is_set(param_field_val): - continue - if param_metadata.explode: - pp_vals.append( - f"{param_name}={_val_to_string(param_field_val)}" - ) - else: - pp_vals.append( - f"{param_name},{_val_to_string(param_field_val)}" - ) - path_param_values[f_name] = ",".join(pp_vals) - elif _is_set(param): - path_param_values[f_name] = _val_to_string(param) - - return globals_already_populated - - -def is_optional(field): - return get_origin(field) is Union and type(None) in get_args(field) - - -def template_url(url_with_params: str, params: Dict[str, str]) -> str: - for key, value in params.items(): - url_with_params = url_with_params.replace("{" + key + "}", value) - - return url_with_params - - -def remove_suffix(input_string, suffix): - if suffix and input_string.endswith(suffix): - return input_string[: -len(suffix)] - return input_string diff --git a/src/mistralai/utils/values.py b/src/mistralai/utils/values.py deleted file mode 100644 index dae01a44..00000000 --- a/src/mistralai/utils/values.py +++ /dev/null @@ -1,137 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from datetime import datetime -from enum import Enum -from email.message import Message -from functools import partial -import os -from typing import Any, Callable, Dict, List, Optional, Tuple, TypeVar, Union, cast - -from httpx import Response -from pydantic import BaseModel -from pydantic.fields import FieldInfo - -from ..types.basemodel import Unset - -from .serializers import marshal_json - -from .metadata import ParamMetadata, find_field_metadata - - -def match_content_type(content_type: str, pattern: str) -> bool: - if pattern in (content_type, "*", "*/*"): - return True - - msg = Message() - msg["content-type"] = content_type - media_type = msg.get_content_type() - - if media_type == pattern: - return True - - parts = media_type.split("/") - if len(parts) == 2: - if pattern in (f"{parts[0]}/*", f"*/{parts[1]}"): - return True - - return False - - -def match_status_codes(status_codes: List[str], status_code: int) -> bool: - if "default" in status_codes: - return True - - for code in status_codes: - if code == str(status_code): - return True - - if code.endswith("XX") and code.startswith(str(status_code)[:1]): - return True - return False - - -T = TypeVar("T") - -def cast_partial(typ): - return partial(cast, typ) - -def get_global_from_env( - value: Optional[T], env_key: str, type_cast: Callable[[str], T] -) -> Optional[T]: - if value is not None: - return value - env_value = os.getenv(env_key) - if env_value is not None: - try: - return type_cast(env_value) - except ValueError: - pass - return None - - -def match_response( - response: Response, code: Union[str, List[str]], content_type: str -) -> bool: - codes = code if isinstance(code, list) else [code] - return match_status_codes(codes, response.status_code) and match_content_type( - response.headers.get("content-type", "application/octet-stream"), content_type - ) - - -def _populate_from_globals( - param_name: str, value: Any, param_metadata_type: type, gbls: Any -) -> Tuple[Any, bool]: - if gbls is None: - return value, False - - if not isinstance(gbls, BaseModel): - raise TypeError("globals must be a pydantic model") - - global_fields: Dict[str, FieldInfo] = gbls.__class__.model_fields - found = False - for name in global_fields: - field = global_fields[name] - if name is not param_name: - continue - - found = True - - if value is not None: - return value, True - - global_value = getattr(gbls, name) - - param_metadata = find_field_metadata(field, param_metadata_type) - if param_metadata is None: - return value, True - - return global_value, True - - return value, found - - -def _val_to_string(val) -> str: - if isinstance(val, bool): - return str(val).lower() - if isinstance(val, datetime): - return str(val.isoformat().replace("+00:00", "Z")) - if isinstance(val, Enum): - return str(val.value) - - return str(val) - - -def _get_serialized_params( - metadata: ParamMetadata, field_name: str, obj: Any, typ: type -) -> Dict[str, str]: - params: Dict[str, str] = {} - - serialization = metadata.serialization - if serialization == "json": - params[field_name] = marshal_json(obj, typ) - - return params - - -def _is_set(value: Any) -> bool: - return value is not None and not isinstance(value, Unset) From ea79059477079c012e0e3a00cc7c0250bf72d914 Mon Sep 17 00:00:00 2001 From: Louis Sanna Date: Mon, 9 Feb 2026 16:23:58 +0100 Subject: [PATCH 188/223] chore: update pyproject.toml for v2.0.0a1 and namespace packages - Update version to 2.0.0a1 - Update py.typed paths for new client/ location - Add mypy namespace_packages and explicit_package_bases settings --- pyproject.toml | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 2cb90876..c9003a1e 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "mistralai" -version = "1.12.0" +version = "2.0.0a1" description = "Python Client SDK for the Mistral AI API." authors = [{ name = "Mistral" }] requires-python = ">=3.10" @@ -63,7 +63,7 @@ default-groups = [ ] [tool.setuptools.package-data] -"*" = ["py.typed", "src/mistralai/py.typed"] +"*" = ["py.typed", "src/mistralai/client/py.typed"] [tool.hatch.build.targets.sdist] include = [ @@ -74,7 +74,7 @@ include = [ [tool.hatch.build.targets.sdist.force-include] "py.typed" = "py.typed" -"src/mistralai/py.typed" = "src/mistralai/py.typed" +"src/mistralai/client/py.typed" = "src/mistralai/client/py.typed" [tool.hatch.build.targets.wheel] include = [ @@ -97,6 +97,9 @@ pythonpath = ["src"] [tool.mypy] disable_error_code = "misc" +namespace_packages = true +explicit_package_bases = true +mypy_path = "src" [[tool.mypy.overrides]] module = "typing_inspect" From 1b84d96935a3bbec597cde11b46599ca42e26205 Mon Sep 17 00:00:00 2001 From: Louis Sanna Date: Mon, 9 Feb 2026 16:25:12 +0100 Subject: [PATCH 189/223] feat: regenerate SDK under mistralai.client namespace Generated by Speakeasy with moduleName=mistralai.client. All SDK code now lives under src/mistralai/client/. --- .speakeasy/gen.lock | 2391 +++++++-------- .speakeasy/workflow.lock | 2 +- README.md | 64 +- USAGE.md | 16 +- docs/sdks/accesses/README.md | 6 +- docs/sdks/agents/README.md | 4 +- docs/sdks/chat/README.md | 4 +- docs/sdks/classifiers/README.md | 8 +- docs/sdks/conversations/README.md | 22 +- docs/sdks/documents/README.md | 20 +- docs/sdks/embeddings/README.md | 2 +- docs/sdks/files/README.md | 12 +- docs/sdks/fim/README.md | 4 +- docs/sdks/jobs/README.md | 10 +- docs/sdks/libraries/README.md | 10 +- docs/sdks/mistralagents/README.md | 20 +- docs/sdks/mistraljobs/README.md | 8 +- docs/sdks/models/README.md | 12 +- docs/sdks/ocr/README.md | 2 +- docs/sdks/transcriptions/README.md | 4 +- src/mistralai/client/__init__.py | 18 + src/mistralai/client/_hooks/__init__.py | 5 + src/mistralai/client/_hooks/registration.py | 13 + src/mistralai/client/_hooks/sdkhooks.py | 76 + src/mistralai/client/_hooks/types.py | 113 + src/mistralai/client/_version.py | 15 + src/mistralai/client/accesses.py | 619 ++++ src/mistralai/client/agents.py | 725 +++++ src/mistralai/client/audio.py | 23 + src/mistralai/client/basesdk.py | 370 +++ src/mistralai/client/batch.py | 20 + src/mistralai/client/beta.py | 31 + src/mistralai/client/chat.py | 753 +++++ src/mistralai/client/classifiers.py | 800 +++++ src/mistralai/client/conversations.py | 2657 +++++++++++++++++ src/mistralai/client/documents.py | 1981 ++++++++++++ src/mistralai/client/embeddings.py | 240 ++ src/mistralai/client/files.py | 1120 +++++++ src/mistralai/client/fim.py | 545 ++++ src/mistralai/client/fine_tuning.py | 20 + src/mistralai/client/httpclient.py | 125 + src/mistralai/client/jobs.py | 1067 +++++++ src/mistralai/client/libraries.py | 946 ++++++ src/mistralai/client/mistral_agents.py | 2080 +++++++++++++ src/mistralai/client/mistral_jobs.py | 799 +++++ src/mistralai/client/models/__init__.py | 2531 ++++++++++++++++ src/mistralai/client/models/agent.py | 148 + .../client/models/agentaliasresponse.py | 23 + .../client/models/agentconversation.py | 95 + .../client/models/agentcreationrequest.py | 119 + .../client/models/agenthandoffdoneevent.py | 33 + .../client/models/agenthandoffentry.py | 82 + .../client/models/agenthandoffstartedevent.py | 33 + ..._api_v1_agents_create_or_update_aliasop.py | 26 + .../models/agents_api_v1_agents_deleteop.py | 16 + .../agents_api_v1_agents_get_versionop.py | 21 + .../models/agents_api_v1_agents_getop.py | 68 + ...ts_api_v1_agents_list_version_aliasesop.py | 16 + .../agents_api_v1_agents_list_versionsop.py | 33 + .../models/agents_api_v1_agents_listop.py | 104 + .../agents_api_v1_agents_update_versionop.py | 21 + .../models/agents_api_v1_agents_updateop.py | 23 + ...ts_api_v1_conversations_append_streamop.py | 28 + .../agents_api_v1_conversations_appendop.py | 28 + .../agents_api_v1_conversations_deleteop.py | 18 + .../agents_api_v1_conversations_getop.py | 35 + .../agents_api_v1_conversations_historyop.py | 18 + .../agents_api_v1_conversations_listop.py | 80 + .../agents_api_v1_conversations_messagesop.py | 18 + ...s_api_v1_conversations_restart_streamop.py | 28 + .../agents_api_v1_conversations_restartop.py | 28 + .../client/models/agentscompletionrequest.py | 198 ++ .../models/agentscompletionstreamrequest.py | 196 ++ .../client/models/agentupdaterequest.py | 133 + src/mistralai/client/models/apiendpoint.py | 22 + .../client/models/archiveftmodelout.py | 23 + .../client/models/assistantmessage.py | 77 + src/mistralai/client/models/audiochunk.py | 20 + src/mistralai/client/models/audioencoding.py | 18 + src/mistralai/client/models/audioformat.py | 17 + .../models/audiotranscriptionrequest.py | 113 + .../models/audiotranscriptionrequeststream.py | 111 + src/mistralai/client/models/basemodelcard.py | 116 + src/mistralai/client/models/batcherror.py | 17 + src/mistralai/client/models/batchjobin.py | 88 + src/mistralai/client/models/batchjobout.py | 129 + src/mistralai/client/models/batchjobsout.py | 24 + src/mistralai/client/models/batchjobstatus.py | 15 + src/mistralai/client/models/batchrequest.py | 54 + .../client/models/builtinconnectors.py | 13 + .../models/chatclassificationrequest.py | 20 + .../client/models/chatcompletionchoice.py | 33 + .../client/models/chatcompletionrequest.py | 221 ++ .../client/models/chatcompletionresponse.py | 31 + .../models/chatcompletionstreamrequest.py | 223 ++ .../client/models/chatmoderationrequest.py | 83 + src/mistralai/client/models/checkpointout.py | 26 + .../client/models/classificationrequest.py | 74 + .../client/models/classificationresponse.py | 24 + .../models/classificationtargetresult.py | 14 + .../client/models/classifierdetailedjobout.py | 164 + .../client/models/classifierftmodelout.py | 114 + .../client/models/classifierjobout.py | 173 ++ .../client/models/classifiertargetin.py | 61 + .../client/models/classifiertargetout.py | 24 + .../models/classifiertrainingparameters.py | 79 + .../models/classifiertrainingparametersin.py | 91 + .../client/models/codeinterpretertool.py | 17 + src/mistralai/client/models/completionargs.py | 107 + .../client/models/completionargsstop.py | 13 + .../client/models/completionchunk.py | 34 + .../client/models/completiondetailedjobout.py | 171 ++ .../client/models/completionevent.py | 14 + .../client/models/completionftmodelout.py | 110 + .../client/models/completionjobout.py | 184 ++ .../models/completionresponsestreamchoice.py | 63 + .../models/completiontrainingparameters.py | 84 + .../models/completiontrainingparametersin.py | 96 + src/mistralai/client/models/contentchunk.py | 42 + .../models/conversationappendrequest.py | 38 + .../models/conversationappendstreamrequest.py | 40 + .../client/models/conversationevents.py | 78 + .../client/models/conversationhistory.py | 59 + .../client/models/conversationinputs.py | 14 + .../client/models/conversationmessages.py | 28 + .../client/models/conversationrequest.py | 160 + .../client/models/conversationresponse.py | 52 + .../models/conversationrestartrequest.py | 113 + .../conversationrestartstreamrequest.py | 117 + .../models/conversationstreamrequest.py | 166 + .../client/models/conversationusageinfo.py | 69 + ...elete_model_v1_models_model_id_deleteop.py | 18 + src/mistralai/client/models/deletefileout.py | 25 + src/mistralai/client/models/deletemodelout.py | 26 + src/mistralai/client/models/deltamessage.py | 67 + .../client/models/documentlibrarytool.py | 22 + src/mistralai/client/models/documentout.py | 127 + .../client/models/documenttextcontent.py | 13 + .../client/models/documentupdatein.py | 71 + .../client/models/documenturlchunk.py | 62 + src/mistralai/client/models/embeddingdtype.py | 13 + .../client/models/embeddingrequest.py | 90 + .../client/models/embeddingresponse.py | 28 + .../client/models/embeddingresponsedata.py | 20 + src/mistralai/client/models/encodingformat.py | 10 + src/mistralai/client/models/entitytype.py | 16 + src/mistralai/client/models/eventout.py | 61 + src/mistralai/client/models/file.py | 33 + src/mistralai/client/models/filechunk.py | 23 + src/mistralai/client/models/filepurpose.py | 15 + .../models/files_api_routes_delete_fileop.py | 16 + .../files_api_routes_download_fileop.py | 16 + .../files_api_routes_get_signed_urlop.py | 25 + .../models/files_api_routes_list_filesop.py | 109 + .../files_api_routes_retrieve_fileop.py | 16 + .../models/files_api_routes_upload_fileop.py | 40 + src/mistralai/client/models/fileschema.py | 94 + src/mistralai/client/models/filesignedurl.py | 13 + .../client/models/fimcompletionrequest.py | 130 + .../client/models/fimcompletionresponse.py | 31 + .../models/fimcompletionstreamrequest.py | 128 + .../client/models/finetuneablemodeltype.py | 10 + .../client/models/ftclassifierlossfunction.py | 10 + .../client/models/ftmodelcapabilitiesout.py | 26 + src/mistralai/client/models/ftmodelcard.py | 132 + src/mistralai/client/models/function.py | 23 + src/mistralai/client/models/functioncall.py | 23 + .../client/models/functioncallentry.py | 83 + .../models/functioncallentryarguments.py | 15 + .../client/models/functioncallevent.py | 36 + src/mistralai/client/models/functionname.py | 17 + .../client/models/functionresultentry.py | 76 + src/mistralai/client/models/functiontool.py | 21 + .../client/models/githubrepositoryin.py | 69 + .../client/models/githubrepositoryout.py | 69 + .../client/models/httpvalidationerror.py | 28 + .../client/models/imagegenerationtool.py | 17 + src/mistralai/client/models/imageurl.py | 53 + src/mistralai/client/models/imageurlchunk.py | 33 + src/mistralai/client/models/inputentries.py | 37 + src/mistralai/client/models/inputs.py | 54 + .../client/models/instructrequest.py | 42 + src/mistralai/client/models/jobin.py | 147 + src/mistralai/client/models/jobmetadataout.py | 84 + ...obs_api_routes_batch_cancel_batch_jobop.py | 16 + .../jobs_api_routes_batch_get_batch_jobop.py | 59 + .../jobs_api_routes_batch_get_batch_jobsop.py | 108 + ..._fine_tuning_archive_fine_tuned_modelop.py | 18 + ...es_fine_tuning_cancel_fine_tuning_jobop.py | 45 + ...es_fine_tuning_create_fine_tuning_jobop.py | 38 + ...outes_fine_tuning_get_fine_tuning_jobop.py | 45 + ...utes_fine_tuning_get_fine_tuning_jobsop.py | 162 + ...tes_fine_tuning_start_fine_tuning_jobop.py | 43 + ...ine_tuning_unarchive_fine_tuned_modelop.py | 18 + ...s_fine_tuning_update_fine_tuned_modelop.py | 51 + src/mistralai/client/models/jobsout.py | 41 + src/mistralai/client/models/jsonschema.py | 61 + .../client/models/legacyjobmetadataout.py | 125 + .../client/models/libraries_delete_v1op.py | 16 + .../models/libraries_documents_delete_v1op.py | 21 + ...ents_get_extracted_text_signed_url_v1op.py | 21 + ...libraries_documents_get_signed_url_v1op.py | 21 + .../libraries_documents_get_status_v1op.py | 21 + ...braries_documents_get_text_content_v1op.py | 21 + .../models/libraries_documents_get_v1op.py | 21 + .../models/libraries_documents_list_v1op.py | 97 + .../libraries_documents_reprocess_v1op.py | 21 + .../models/libraries_documents_update_v1op.py | 28 + .../models/libraries_documents_upload_v1op.py | 56 + .../client/models/libraries_get_v1op.py | 16 + .../models/libraries_share_create_v1op.py | 22 + .../models/libraries_share_delete_v1op.py | 23 + .../models/libraries_share_list_v1op.py | 16 + .../client/models/libraries_update_v1op.py | 23 + src/mistralai/client/models/libraryin.py | 56 + .../client/models/libraryinupdate.py | 53 + src/mistralai/client/models/libraryout.py | 116 + .../client/models/listdocumentout.py | 19 + src/mistralai/client/models/listfilesout.py | 58 + src/mistralai/client/models/listlibraryout.py | 15 + src/mistralai/client/models/listsharingout.py | 15 + src/mistralai/client/models/messageentries.py | 18 + .../models/messageinputcontentchunks.py | 28 + .../client/models/messageinputentry.py | 111 + .../models/messageoutputcontentchunks.py | 37 + .../client/models/messageoutputentry.py | 109 + .../client/models/messageoutputevent.py | 101 + src/mistralai/client/models/metricout.py | 60 + src/mistralai/client/models/mistralerror.py | 30 + .../client/models/mistralpromptmode.py | 12 + .../client/models/modelcapabilities.py | 41 + .../client/models/modelconversation.py | 139 + src/mistralai/client/models/modellist.py | 34 + .../client/models/moderationobject.py | 21 + .../client/models/moderationresponse.py | 21 + .../client/models/no_response_error.py | 17 + src/mistralai/client/models/ocrimageobject.py | 89 + .../client/models/ocrpagedimensions.py | 25 + src/mistralai/client/models/ocrpageobject.py | 91 + src/mistralai/client/models/ocrrequest.py | 146 + src/mistralai/client/models/ocrresponse.py | 68 + src/mistralai/client/models/ocrtableobject.py | 34 + src/mistralai/client/models/ocrusageinfo.py | 57 + .../client/models/outputcontentchunks.py | 37 + src/mistralai/client/models/paginationinfo.py | 25 + src/mistralai/client/models/prediction.py | 29 + .../client/models/processingstatusout.py | 16 + .../models/realtimetranscriptionerror.py | 27 + .../realtimetranscriptionerrordetail.py | 29 + .../models/realtimetranscriptionsession.py | 20 + .../realtimetranscriptionsessioncreated.py | 30 + .../realtimetranscriptionsessionupdated.py | 30 + src/mistralai/client/models/referencechunk.py | 20 + src/mistralai/client/models/requestsource.py | 11 + .../client/models/responsedoneevent.py | 25 + .../client/models/responseerrorevent.py | 27 + src/mistralai/client/models/responseformat.py | 60 + .../client/models/responseformats.py | 11 + .../client/models/responsestartedevent.py | 24 + .../client/models/responsevalidationerror.py | 27 + ...retrieve_model_v1_models_model_id_getop.py | 38 + .../client/models/retrievefileout.py | 97 + src/mistralai/client/models/sampletype.py | 17 + src/mistralai/client/models/sdkerror.py | 40 + src/mistralai/client/models/security.py | 25 + src/mistralai/client/models/shareenum.py | 14 + src/mistralai/client/models/sharingdelete.py | 61 + src/mistralai/client/models/sharingin.py | 65 + src/mistralai/client/models/sharingout.py | 65 + src/mistralai/client/models/source.py | 15 + src/mistralai/client/models/ssetypes.py | 19 + src/mistralai/client/models/systemmessage.py | 35 + .../models/systemmessagecontentchunks.py | 21 + src/mistralai/client/models/textchunk.py | 20 + src/mistralai/client/models/thinkchunk.py | 35 + .../client/models/timestampgranularity.py | 10 + src/mistralai/client/models/tool.py | 19 + src/mistralai/client/models/toolcall.py | 25 + src/mistralai/client/models/toolchoice.py | 25 + src/mistralai/client/models/toolchoiceenum.py | 12 + .../client/models/toolexecutiondeltaevent.py | 44 + .../client/models/toolexecutiondoneevent.py | 44 + .../client/models/toolexecutionentry.py | 86 + .../models/toolexecutionstartedevent.py | 44 + src/mistralai/client/models/toolfilechunk.py | 75 + src/mistralai/client/models/toolmessage.py | 72 + .../client/models/toolreferencechunk.py | 80 + src/mistralai/client/models/tooltypes.py | 8 + src/mistralai/client/models/trainingfile.py | 17 + .../client/models/transcriptionresponse.py | 79 + .../models/transcriptionsegmentchunk.py | 86 + .../client/models/transcriptionstreamdone.py | 85 + .../models/transcriptionstreamevents.py | 58 + .../models/transcriptionstreameventtypes.py | 12 + .../models/transcriptionstreamlanguage.py | 35 + .../models/transcriptionstreamsegmentdelta.py | 83 + .../models/transcriptionstreamtextdelta.py | 35 + .../client/models/unarchiveftmodelout.py | 23 + .../client/models/updateftmodelin.py | 53 + src/mistralai/client/models/uploadfileout.py | 94 + src/mistralai/client/models/usageinfo.py | 82 + src/mistralai/client/models/usermessage.py | 60 + .../client/models/validationerror.py | 26 + .../client/models/wandbintegration.py | 72 + .../client/models/wandbintegrationout.py | 70 + .../client/models/websearchpremiumtool.py | 17 + src/mistralai/client/models/websearchtool.py | 17 + src/mistralai/client/models_.py | 1063 +++++++ src/mistralai/client/ocr.py | 303 ++ src/mistralai/client/py.typed | 1 + src/mistralai/client/sdk.py | 222 ++ src/mistralai/client/sdkconfiguration.py | 53 + src/mistralai/client/transcriptions.py | 481 +++ src/mistralai/client/types/__init__.py | 21 + src/mistralai/client/types/basemodel.py | 77 + src/mistralai/client/utils/__init__.py | 197 ++ src/mistralai/client/utils/annotations.py | 79 + src/mistralai/client/utils/datetimes.py | 23 + src/mistralai/client/utils/enums.py | 134 + src/mistralai/client/utils/eventstreaming.py | 248 ++ src/mistralai/client/utils/forms.py | 234 ++ src/mistralai/client/utils/headers.py | 136 + src/mistralai/client/utils/logger.py | 27 + src/mistralai/client/utils/metadata.py | 118 + src/mistralai/client/utils/queryparams.py | 217 ++ src/mistralai/client/utils/requestbodies.py | 66 + src/mistralai/client/utils/retries.py | 281 ++ src/mistralai/client/utils/security.py | 192 ++ src/mistralai/client/utils/serializers.py | 229 ++ .../client/utils/unmarshal_json_response.py | 38 + src/mistralai/client/utils/url.py | 155 + src/mistralai/client/utils/values.py | 137 + uv.lock | 2 +- 333 files changed, 37507 insertions(+), 1311 deletions(-) create mode 100644 src/mistralai/client/__init__.py create mode 100644 src/mistralai/client/_hooks/__init__.py create mode 100644 src/mistralai/client/_hooks/registration.py create mode 100644 src/mistralai/client/_hooks/sdkhooks.py create mode 100644 src/mistralai/client/_hooks/types.py create mode 100644 src/mistralai/client/_version.py create mode 100644 src/mistralai/client/accesses.py create mode 100644 src/mistralai/client/agents.py create mode 100644 src/mistralai/client/audio.py create mode 100644 src/mistralai/client/basesdk.py create mode 100644 src/mistralai/client/batch.py create mode 100644 src/mistralai/client/beta.py create mode 100644 src/mistralai/client/chat.py create mode 100644 src/mistralai/client/classifiers.py create mode 100644 src/mistralai/client/conversations.py create mode 100644 src/mistralai/client/documents.py create mode 100644 src/mistralai/client/embeddings.py create mode 100644 src/mistralai/client/files.py create mode 100644 src/mistralai/client/fim.py create mode 100644 src/mistralai/client/fine_tuning.py create mode 100644 src/mistralai/client/httpclient.py create mode 100644 src/mistralai/client/jobs.py create mode 100644 src/mistralai/client/libraries.py create mode 100644 src/mistralai/client/mistral_agents.py create mode 100644 src/mistralai/client/mistral_jobs.py create mode 100644 src/mistralai/client/models/__init__.py create mode 100644 src/mistralai/client/models/agent.py create mode 100644 src/mistralai/client/models/agentaliasresponse.py create mode 100644 src/mistralai/client/models/agentconversation.py create mode 100644 src/mistralai/client/models/agentcreationrequest.py create mode 100644 src/mistralai/client/models/agenthandoffdoneevent.py create mode 100644 src/mistralai/client/models/agenthandoffentry.py create mode 100644 src/mistralai/client/models/agenthandoffstartedevent.py create mode 100644 src/mistralai/client/models/agents_api_v1_agents_create_or_update_aliasop.py create mode 100644 src/mistralai/client/models/agents_api_v1_agents_deleteop.py create mode 100644 src/mistralai/client/models/agents_api_v1_agents_get_versionop.py create mode 100644 src/mistralai/client/models/agents_api_v1_agents_getop.py create mode 100644 src/mistralai/client/models/agents_api_v1_agents_list_version_aliasesop.py create mode 100644 src/mistralai/client/models/agents_api_v1_agents_list_versionsop.py create mode 100644 src/mistralai/client/models/agents_api_v1_agents_listop.py create mode 100644 src/mistralai/client/models/agents_api_v1_agents_update_versionop.py create mode 100644 src/mistralai/client/models/agents_api_v1_agents_updateop.py create mode 100644 src/mistralai/client/models/agents_api_v1_conversations_append_streamop.py create mode 100644 src/mistralai/client/models/agents_api_v1_conversations_appendop.py create mode 100644 src/mistralai/client/models/agents_api_v1_conversations_deleteop.py create mode 100644 src/mistralai/client/models/agents_api_v1_conversations_getop.py create mode 100644 src/mistralai/client/models/agents_api_v1_conversations_historyop.py create mode 100644 src/mistralai/client/models/agents_api_v1_conversations_listop.py create mode 100644 src/mistralai/client/models/agents_api_v1_conversations_messagesop.py create mode 100644 src/mistralai/client/models/agents_api_v1_conversations_restart_streamop.py create mode 100644 src/mistralai/client/models/agents_api_v1_conversations_restartop.py create mode 100644 src/mistralai/client/models/agentscompletionrequest.py create mode 100644 src/mistralai/client/models/agentscompletionstreamrequest.py create mode 100644 src/mistralai/client/models/agentupdaterequest.py create mode 100644 src/mistralai/client/models/apiendpoint.py create mode 100644 src/mistralai/client/models/archiveftmodelout.py create mode 100644 src/mistralai/client/models/assistantmessage.py create mode 100644 src/mistralai/client/models/audiochunk.py create mode 100644 src/mistralai/client/models/audioencoding.py create mode 100644 src/mistralai/client/models/audioformat.py create mode 100644 src/mistralai/client/models/audiotranscriptionrequest.py create mode 100644 src/mistralai/client/models/audiotranscriptionrequeststream.py create mode 100644 src/mistralai/client/models/basemodelcard.py create mode 100644 src/mistralai/client/models/batcherror.py create mode 100644 src/mistralai/client/models/batchjobin.py create mode 100644 src/mistralai/client/models/batchjobout.py create mode 100644 src/mistralai/client/models/batchjobsout.py create mode 100644 src/mistralai/client/models/batchjobstatus.py create mode 100644 src/mistralai/client/models/batchrequest.py create mode 100644 src/mistralai/client/models/builtinconnectors.py create mode 100644 src/mistralai/client/models/chatclassificationrequest.py create mode 100644 src/mistralai/client/models/chatcompletionchoice.py create mode 100644 src/mistralai/client/models/chatcompletionrequest.py create mode 100644 src/mistralai/client/models/chatcompletionresponse.py create mode 100644 src/mistralai/client/models/chatcompletionstreamrequest.py create mode 100644 src/mistralai/client/models/chatmoderationrequest.py create mode 100644 src/mistralai/client/models/checkpointout.py create mode 100644 src/mistralai/client/models/classificationrequest.py create mode 100644 src/mistralai/client/models/classificationresponse.py create mode 100644 src/mistralai/client/models/classificationtargetresult.py create mode 100644 src/mistralai/client/models/classifierdetailedjobout.py create mode 100644 src/mistralai/client/models/classifierftmodelout.py create mode 100644 src/mistralai/client/models/classifierjobout.py create mode 100644 src/mistralai/client/models/classifiertargetin.py create mode 100644 src/mistralai/client/models/classifiertargetout.py create mode 100644 src/mistralai/client/models/classifiertrainingparameters.py create mode 100644 src/mistralai/client/models/classifiertrainingparametersin.py create mode 100644 src/mistralai/client/models/codeinterpretertool.py create mode 100644 src/mistralai/client/models/completionargs.py create mode 100644 src/mistralai/client/models/completionargsstop.py create mode 100644 src/mistralai/client/models/completionchunk.py create mode 100644 src/mistralai/client/models/completiondetailedjobout.py create mode 100644 src/mistralai/client/models/completionevent.py create mode 100644 src/mistralai/client/models/completionftmodelout.py create mode 100644 src/mistralai/client/models/completionjobout.py create mode 100644 src/mistralai/client/models/completionresponsestreamchoice.py create mode 100644 src/mistralai/client/models/completiontrainingparameters.py create mode 100644 src/mistralai/client/models/completiontrainingparametersin.py create mode 100644 src/mistralai/client/models/contentchunk.py create mode 100644 src/mistralai/client/models/conversationappendrequest.py create mode 100644 src/mistralai/client/models/conversationappendstreamrequest.py create mode 100644 src/mistralai/client/models/conversationevents.py create mode 100644 src/mistralai/client/models/conversationhistory.py create mode 100644 src/mistralai/client/models/conversationinputs.py create mode 100644 src/mistralai/client/models/conversationmessages.py create mode 100644 src/mistralai/client/models/conversationrequest.py create mode 100644 src/mistralai/client/models/conversationresponse.py create mode 100644 src/mistralai/client/models/conversationrestartrequest.py create mode 100644 src/mistralai/client/models/conversationrestartstreamrequest.py create mode 100644 src/mistralai/client/models/conversationstreamrequest.py create mode 100644 src/mistralai/client/models/conversationusageinfo.py create mode 100644 src/mistralai/client/models/delete_model_v1_models_model_id_deleteop.py create mode 100644 src/mistralai/client/models/deletefileout.py create mode 100644 src/mistralai/client/models/deletemodelout.py create mode 100644 src/mistralai/client/models/deltamessage.py create mode 100644 src/mistralai/client/models/documentlibrarytool.py create mode 100644 src/mistralai/client/models/documentout.py create mode 100644 src/mistralai/client/models/documenttextcontent.py create mode 100644 src/mistralai/client/models/documentupdatein.py create mode 100644 src/mistralai/client/models/documenturlchunk.py create mode 100644 src/mistralai/client/models/embeddingdtype.py create mode 100644 src/mistralai/client/models/embeddingrequest.py create mode 100644 src/mistralai/client/models/embeddingresponse.py create mode 100644 src/mistralai/client/models/embeddingresponsedata.py create mode 100644 src/mistralai/client/models/encodingformat.py create mode 100644 src/mistralai/client/models/entitytype.py create mode 100644 src/mistralai/client/models/eventout.py create mode 100644 src/mistralai/client/models/file.py create mode 100644 src/mistralai/client/models/filechunk.py create mode 100644 src/mistralai/client/models/filepurpose.py create mode 100644 src/mistralai/client/models/files_api_routes_delete_fileop.py create mode 100644 src/mistralai/client/models/files_api_routes_download_fileop.py create mode 100644 src/mistralai/client/models/files_api_routes_get_signed_urlop.py create mode 100644 src/mistralai/client/models/files_api_routes_list_filesop.py create mode 100644 src/mistralai/client/models/files_api_routes_retrieve_fileop.py create mode 100644 src/mistralai/client/models/files_api_routes_upload_fileop.py create mode 100644 src/mistralai/client/models/fileschema.py create mode 100644 src/mistralai/client/models/filesignedurl.py create mode 100644 src/mistralai/client/models/fimcompletionrequest.py create mode 100644 src/mistralai/client/models/fimcompletionresponse.py create mode 100644 src/mistralai/client/models/fimcompletionstreamrequest.py create mode 100644 src/mistralai/client/models/finetuneablemodeltype.py create mode 100644 src/mistralai/client/models/ftclassifierlossfunction.py create mode 100644 src/mistralai/client/models/ftmodelcapabilitiesout.py create mode 100644 src/mistralai/client/models/ftmodelcard.py create mode 100644 src/mistralai/client/models/function.py create mode 100644 src/mistralai/client/models/functioncall.py create mode 100644 src/mistralai/client/models/functioncallentry.py create mode 100644 src/mistralai/client/models/functioncallentryarguments.py create mode 100644 src/mistralai/client/models/functioncallevent.py create mode 100644 src/mistralai/client/models/functionname.py create mode 100644 src/mistralai/client/models/functionresultentry.py create mode 100644 src/mistralai/client/models/functiontool.py create mode 100644 src/mistralai/client/models/githubrepositoryin.py create mode 100644 src/mistralai/client/models/githubrepositoryout.py create mode 100644 src/mistralai/client/models/httpvalidationerror.py create mode 100644 src/mistralai/client/models/imagegenerationtool.py create mode 100644 src/mistralai/client/models/imageurl.py create mode 100644 src/mistralai/client/models/imageurlchunk.py create mode 100644 src/mistralai/client/models/inputentries.py create mode 100644 src/mistralai/client/models/inputs.py create mode 100644 src/mistralai/client/models/instructrequest.py create mode 100644 src/mistralai/client/models/jobin.py create mode 100644 src/mistralai/client/models/jobmetadataout.py create mode 100644 src/mistralai/client/models/jobs_api_routes_batch_cancel_batch_jobop.py create mode 100644 src/mistralai/client/models/jobs_api_routes_batch_get_batch_jobop.py create mode 100644 src/mistralai/client/models/jobs_api_routes_batch_get_batch_jobsop.py create mode 100644 src/mistralai/client/models/jobs_api_routes_fine_tuning_archive_fine_tuned_modelop.py create mode 100644 src/mistralai/client/models/jobs_api_routes_fine_tuning_cancel_fine_tuning_jobop.py create mode 100644 src/mistralai/client/models/jobs_api_routes_fine_tuning_create_fine_tuning_jobop.py create mode 100644 src/mistralai/client/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobop.py create mode 100644 src/mistralai/client/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobsop.py create mode 100644 src/mistralai/client/models/jobs_api_routes_fine_tuning_start_fine_tuning_jobop.py create mode 100644 src/mistralai/client/models/jobs_api_routes_fine_tuning_unarchive_fine_tuned_modelop.py create mode 100644 src/mistralai/client/models/jobs_api_routes_fine_tuning_update_fine_tuned_modelop.py create mode 100644 src/mistralai/client/models/jobsout.py create mode 100644 src/mistralai/client/models/jsonschema.py create mode 100644 src/mistralai/client/models/legacyjobmetadataout.py create mode 100644 src/mistralai/client/models/libraries_delete_v1op.py create mode 100644 src/mistralai/client/models/libraries_documents_delete_v1op.py create mode 100644 src/mistralai/client/models/libraries_documents_get_extracted_text_signed_url_v1op.py create mode 100644 src/mistralai/client/models/libraries_documents_get_signed_url_v1op.py create mode 100644 src/mistralai/client/models/libraries_documents_get_status_v1op.py create mode 100644 src/mistralai/client/models/libraries_documents_get_text_content_v1op.py create mode 100644 src/mistralai/client/models/libraries_documents_get_v1op.py create mode 100644 src/mistralai/client/models/libraries_documents_list_v1op.py create mode 100644 src/mistralai/client/models/libraries_documents_reprocess_v1op.py create mode 100644 src/mistralai/client/models/libraries_documents_update_v1op.py create mode 100644 src/mistralai/client/models/libraries_documents_upload_v1op.py create mode 100644 src/mistralai/client/models/libraries_get_v1op.py create mode 100644 src/mistralai/client/models/libraries_share_create_v1op.py create mode 100644 src/mistralai/client/models/libraries_share_delete_v1op.py create mode 100644 src/mistralai/client/models/libraries_share_list_v1op.py create mode 100644 src/mistralai/client/models/libraries_update_v1op.py create mode 100644 src/mistralai/client/models/libraryin.py create mode 100644 src/mistralai/client/models/libraryinupdate.py create mode 100644 src/mistralai/client/models/libraryout.py create mode 100644 src/mistralai/client/models/listdocumentout.py create mode 100644 src/mistralai/client/models/listfilesout.py create mode 100644 src/mistralai/client/models/listlibraryout.py create mode 100644 src/mistralai/client/models/listsharingout.py create mode 100644 src/mistralai/client/models/messageentries.py create mode 100644 src/mistralai/client/models/messageinputcontentchunks.py create mode 100644 src/mistralai/client/models/messageinputentry.py create mode 100644 src/mistralai/client/models/messageoutputcontentchunks.py create mode 100644 src/mistralai/client/models/messageoutputentry.py create mode 100644 src/mistralai/client/models/messageoutputevent.py create mode 100644 src/mistralai/client/models/metricout.py create mode 100644 src/mistralai/client/models/mistralerror.py create mode 100644 src/mistralai/client/models/mistralpromptmode.py create mode 100644 src/mistralai/client/models/modelcapabilities.py create mode 100644 src/mistralai/client/models/modelconversation.py create mode 100644 src/mistralai/client/models/modellist.py create mode 100644 src/mistralai/client/models/moderationobject.py create mode 100644 src/mistralai/client/models/moderationresponse.py create mode 100644 src/mistralai/client/models/no_response_error.py create mode 100644 src/mistralai/client/models/ocrimageobject.py create mode 100644 src/mistralai/client/models/ocrpagedimensions.py create mode 100644 src/mistralai/client/models/ocrpageobject.py create mode 100644 src/mistralai/client/models/ocrrequest.py create mode 100644 src/mistralai/client/models/ocrresponse.py create mode 100644 src/mistralai/client/models/ocrtableobject.py create mode 100644 src/mistralai/client/models/ocrusageinfo.py create mode 100644 src/mistralai/client/models/outputcontentchunks.py create mode 100644 src/mistralai/client/models/paginationinfo.py create mode 100644 src/mistralai/client/models/prediction.py create mode 100644 src/mistralai/client/models/processingstatusout.py create mode 100644 src/mistralai/client/models/realtimetranscriptionerror.py create mode 100644 src/mistralai/client/models/realtimetranscriptionerrordetail.py create mode 100644 src/mistralai/client/models/realtimetranscriptionsession.py create mode 100644 src/mistralai/client/models/realtimetranscriptionsessioncreated.py create mode 100644 src/mistralai/client/models/realtimetranscriptionsessionupdated.py create mode 100644 src/mistralai/client/models/referencechunk.py create mode 100644 src/mistralai/client/models/requestsource.py create mode 100644 src/mistralai/client/models/responsedoneevent.py create mode 100644 src/mistralai/client/models/responseerrorevent.py create mode 100644 src/mistralai/client/models/responseformat.py create mode 100644 src/mistralai/client/models/responseformats.py create mode 100644 src/mistralai/client/models/responsestartedevent.py create mode 100644 src/mistralai/client/models/responsevalidationerror.py create mode 100644 src/mistralai/client/models/retrieve_model_v1_models_model_id_getop.py create mode 100644 src/mistralai/client/models/retrievefileout.py create mode 100644 src/mistralai/client/models/sampletype.py create mode 100644 src/mistralai/client/models/sdkerror.py create mode 100644 src/mistralai/client/models/security.py create mode 100644 src/mistralai/client/models/shareenum.py create mode 100644 src/mistralai/client/models/sharingdelete.py create mode 100644 src/mistralai/client/models/sharingin.py create mode 100644 src/mistralai/client/models/sharingout.py create mode 100644 src/mistralai/client/models/source.py create mode 100644 src/mistralai/client/models/ssetypes.py create mode 100644 src/mistralai/client/models/systemmessage.py create mode 100644 src/mistralai/client/models/systemmessagecontentchunks.py create mode 100644 src/mistralai/client/models/textchunk.py create mode 100644 src/mistralai/client/models/thinkchunk.py create mode 100644 src/mistralai/client/models/timestampgranularity.py create mode 100644 src/mistralai/client/models/tool.py create mode 100644 src/mistralai/client/models/toolcall.py create mode 100644 src/mistralai/client/models/toolchoice.py create mode 100644 src/mistralai/client/models/toolchoiceenum.py create mode 100644 src/mistralai/client/models/toolexecutiondeltaevent.py create mode 100644 src/mistralai/client/models/toolexecutiondoneevent.py create mode 100644 src/mistralai/client/models/toolexecutionentry.py create mode 100644 src/mistralai/client/models/toolexecutionstartedevent.py create mode 100644 src/mistralai/client/models/toolfilechunk.py create mode 100644 src/mistralai/client/models/toolmessage.py create mode 100644 src/mistralai/client/models/toolreferencechunk.py create mode 100644 src/mistralai/client/models/tooltypes.py create mode 100644 src/mistralai/client/models/trainingfile.py create mode 100644 src/mistralai/client/models/transcriptionresponse.py create mode 100644 src/mistralai/client/models/transcriptionsegmentchunk.py create mode 100644 src/mistralai/client/models/transcriptionstreamdone.py create mode 100644 src/mistralai/client/models/transcriptionstreamevents.py create mode 100644 src/mistralai/client/models/transcriptionstreameventtypes.py create mode 100644 src/mistralai/client/models/transcriptionstreamlanguage.py create mode 100644 src/mistralai/client/models/transcriptionstreamsegmentdelta.py create mode 100644 src/mistralai/client/models/transcriptionstreamtextdelta.py create mode 100644 src/mistralai/client/models/unarchiveftmodelout.py create mode 100644 src/mistralai/client/models/updateftmodelin.py create mode 100644 src/mistralai/client/models/uploadfileout.py create mode 100644 src/mistralai/client/models/usageinfo.py create mode 100644 src/mistralai/client/models/usermessage.py create mode 100644 src/mistralai/client/models/validationerror.py create mode 100644 src/mistralai/client/models/wandbintegration.py create mode 100644 src/mistralai/client/models/wandbintegrationout.py create mode 100644 src/mistralai/client/models/websearchpremiumtool.py create mode 100644 src/mistralai/client/models/websearchtool.py create mode 100644 src/mistralai/client/models_.py create mode 100644 src/mistralai/client/ocr.py create mode 100644 src/mistralai/client/py.typed create mode 100644 src/mistralai/client/sdk.py create mode 100644 src/mistralai/client/sdkconfiguration.py create mode 100644 src/mistralai/client/transcriptions.py create mode 100644 src/mistralai/client/types/__init__.py create mode 100644 src/mistralai/client/types/basemodel.py create mode 100644 src/mistralai/client/utils/__init__.py create mode 100644 src/mistralai/client/utils/annotations.py create mode 100644 src/mistralai/client/utils/datetimes.py create mode 100644 src/mistralai/client/utils/enums.py create mode 100644 src/mistralai/client/utils/eventstreaming.py create mode 100644 src/mistralai/client/utils/forms.py create mode 100644 src/mistralai/client/utils/headers.py create mode 100644 src/mistralai/client/utils/logger.py create mode 100644 src/mistralai/client/utils/metadata.py create mode 100644 src/mistralai/client/utils/queryparams.py create mode 100644 src/mistralai/client/utils/requestbodies.py create mode 100644 src/mistralai/client/utils/retries.py create mode 100644 src/mistralai/client/utils/security.py create mode 100644 src/mistralai/client/utils/serializers.py create mode 100644 src/mistralai/client/utils/unmarshal_json_response.py create mode 100644 src/mistralai/client/utils/url.py create mode 100644 src/mistralai/client/utils/values.py diff --git a/.speakeasy/gen.lock b/.speakeasy/gen.lock index f6c0f0a2..7aae1acb 100644 --- a/.speakeasy/gen.lock +++ b/.speakeasy/gen.lock @@ -5,19 +5,20 @@ management: docVersion: 1.0.0 speakeasyVersion: 1.685.0 generationVersion: 2.794.1 - releaseVersion: 1.12.0 - configChecksum: 862d9a8667674972c091f9db84d42ba0 + releaseVersion: 2.0.0a1 + configChecksum: d5e0f55b62bca3e8aab33c7955415e61 repoURL: https://github.com/mistralai/client-python.git installationURL: https://github.com/mistralai/client-python.git published: true persistentEdits: - generation_id: 00cab5ea-60fa-456d-ad3f-1ae32427d619 - pristine_commit_hash: b6e4b5c0cd6a42df18b2e7aa44ac696d48576d06 - pristine_tree_hash: b358b046bcef8a5f9b8898d98a4d9fbf82b52e6e + generation_id: edcb81a1-4bcb-439e-bfcb-f30eaac48c6a + pristine_commit_hash: b192b65dd75820612c5c672593ed322d420d2c73 + pristine_tree_hash: 869c5c810e502634a018e5792d4c2efe2686dbad features: python: additionalDependencies: 1.0.0 additionalProperties: 1.0.1 + configurableModuleName: 0.2.0 constsAndDefaults: 1.0.5 core: 5.23.18 customCodeRegions: 0.1.1 @@ -57,8 +58,8 @@ trackedFiles: pristine_git_object: 8d79f0abb72526f1fb34a4c03e5bba612c6ba2ae USAGE.md: id: 3aed33ce6e6f - last_write_checksum: sha1:4b34a680cd5a2b2acbadc41d0b309b3f30c1dfe5 - pristine_git_object: a31d502f33508216f686f4328cbbc8c14f8170ee + last_write_checksum: sha1:b1cf4cc87111df10c55731b3f5abad22890387a2 + pristine_git_object: 1810386448a440cfc5f7b8579695b228ae40460d docs/models/agent.md: id: ffdbb4c53c87 last_write_checksum: sha1:ec6c799658040b3c75d6ae0572bb391c6aea3fd4 @@ -1781,68 +1782,68 @@ trackedFiles: pristine_git_object: 57b6acbbd3b85aae5a9b7e2f754689637c01a912 docs/sdks/accesses/README.md: id: 2ea167c2eff2 - last_write_checksum: sha1:ac4ec473f9991ea2ca3e66838f8f791a54d881e3 - pristine_git_object: 040bc24c6acb9153296e105009ac4ef251cc2dd4 + last_write_checksum: sha1:22bd7a11d44295c2f433955604d3578292f26c99 + pristine_git_object: 64a1e749aeb6f2c32497a72a649ecc2b7549c077 docs/sdks/agents/README.md: id: 5965d8232fd8 - last_write_checksum: sha1:f368d2c40ad72aa9e8de04809bd300e935dbb63b - pristine_git_object: 173925eead663741af81d5f624c2964278bde979 + last_write_checksum: sha1:34e01f46c1a32020fa3eeb40fe80c3c0e8de0983 + pristine_git_object: 75efc492c4114417c22a796824ee971e9180104e docs/sdks/chat/README.md: id: 393193527c2c - last_write_checksum: sha1:931ab91704f496b220c7da1aa985cea14d969784 - pristine_git_object: 5bb24baa3444d72faace5473d0a775a0e5ad403e + last_write_checksum: sha1:7bc2201f585bea247c0bb148ecdea220bcb384e1 + pristine_git_object: 89c4fffbb777427723307b13c124668601ff5839 docs/sdks/classifiers/README.md: id: 74eb09b8d620 - last_write_checksum: sha1:d047af486fd4acd7f813232b20164eab11541c2d - pristine_git_object: e76efb79d8b1353208b42619f4cc5b688ef5d561 + last_write_checksum: sha1:f424721545e683e230ee0c612765be2bdb9897cd + pristine_git_object: 634ee419f3334ba50dd25f0e2340c32db1ec40b3 docs/sdks/conversations/README.md: id: e22a9d2c5424 - last_write_checksum: sha1:06b7381c76c258e2a2dca3764456105929d98315 - pristine_git_object: ca383176a8b349cbaa757690b3f7a2cefe22cb1a + last_write_checksum: sha1:5ed03d60808cff2539e0e83df4714b3a274208a0 + pristine_git_object: acd43cdb63edd23665e808aaccc6ab3a4dc3dc85 docs/sdks/documents/README.md: id: 9758e88a0a9d - last_write_checksum: sha1:84791e86c3b9c15f8fd16d2a3df6bd3685023a69 - pristine_git_object: d3f5a9757c2327dab8e5b1962542b37c5e2551af + last_write_checksum: sha1:d9bcb4bf6c2189c282844f81b456fb29654e384c + pristine_git_object: d90e7ee7aab234cb992a904088cbbf2e57dd0baa docs/sdks/embeddings/README.md: id: 15b5b04486c1 - last_write_checksum: sha1:4da183aaf0df15d3a027077784903d93d8ea58e0 - pristine_git_object: 4390b7bd999a75a608f324f685b2284a8fa277ec + last_write_checksum: sha1:46e57c7808ce9c24dd54c3562379d2ff3e0526e8 + pristine_git_object: 0be7ea6dcace678d12d7e7e4f8e88daf7570df5d docs/sdks/files/README.md: id: e576d7a117f0 - last_write_checksum: sha1:99d15a4acce49d5eca853b5a08fd81e76581dc52 - pristine_git_object: 57b53fc75208f4f6361636690b91564148448633 + last_write_checksum: sha1:22298532be84a02d4fc8a524d6baa4fab0adcec4 + pristine_git_object: 44c39f8a3bd783b5c592e4f22c453bd76cef434a docs/sdks/fim/README.md: id: 499b227bf6ca - last_write_checksum: sha1:824f7d1b58ff0b650367737c0e9b91a9d2d14a45 - pristine_git_object: db6f2e1b65866e1309d94e852fa0a1e82d2606fd + last_write_checksum: sha1:34ff7167b0597bf668ef75ede016cb8884372d1b + pristine_git_object: 3c8c59c79db12c916577d6c064ddb16a511513fd docs/sdks/jobs/README.md: id: 7371cdc8b89a - last_write_checksum: sha1:5117aebda0558e7b82150f0b91480e3362687a89 - pristine_git_object: 666224a728cc433bca9520437d36a2b526ac2df6 + last_write_checksum: sha1:5dcd708cfcbb00d0ab9d41311c363c6fdae101b0 + pristine_git_object: 9c44be7559e2b7127d43ff50777fd32c7cf8b6ee docs/sdks/libraries/README.md: id: df9a982905a3 - last_write_checksum: sha1:8769d4b43f93c744fca43c34a7d7e9d99122c886 - pristine_git_object: e672c190ad6ac4623f99357d7e59d52f6722518f + last_write_checksum: sha1:0c710c0395906333b85bedd516cfca7dcb3b9b42 + pristine_git_object: bbdacf0538c6c055fef0c0109aac163e987a3dd5 docs/sdks/mistralagents/README.md: id: 20b3478ad16d - last_write_checksum: sha1:c4e73cd96136392d01b0ce2a57bf0854d05688c0 - pristine_git_object: bdd8d588d88f4929c3b33bcecd72bbb5fce7402d + last_write_checksum: sha1:b2dcb1516dd05dc38e0e0305969de248994aade4 + pristine_git_object: fe0f6e35a445e17ccedc2031c4b4204f5cc4d650 docs/sdks/mistraljobs/README.md: id: 71aafa44d228 - last_write_checksum: sha1:255a4221b3b61ef247b39c9723a78408cda486d3 - pristine_git_object: f1aa3f61973b1ee48777afb7fecc4bdf459882a0 + last_write_checksum: sha1:212bc82280a58f896172d173e5be516b926bc11c + pristine_git_object: 8f2358de28e88ffd1e3750292488c486f7bb893b docs/sdks/models/README.md: id: b35bdf4bc7ed - last_write_checksum: sha1:8e256360d014fc3384256a9f155c6382f8e16a6d - pristine_git_object: d51866b6cff74932bf86c266f75773c2d3e74fd0 + last_write_checksum: sha1:ca13e994ae31ddf37628eba9cc68cf8f64b48404 + pristine_git_object: 6fa28ca2e25c0b2f3fbf044b706d19f01193fc3c docs/sdks/ocr/README.md: id: 545e35d2613e - last_write_checksum: sha1:25846e2fe16ecb69d94c0d53edb74c22419c49aa - pristine_git_object: efcb99314c7d07a3dc556c297333046fc5d9e097 + last_write_checksum: sha1:a8d22a86b79a0166ecec26a3e9379fa110d49b73 + pristine_git_object: 9fd9d6fc14c5874dbb819239ea677a171a26969b docs/sdks/transcriptions/README.md: id: 089cf94ecf47 - last_write_checksum: sha1:01e68371b7a94cb35d6435efd3ef9247e8c27a94 - pristine_git_object: dabab00e85a3f480c8dc3dd7b792e68420ae08b6 + last_write_checksum: sha1:493070fcce7cec1a627b04daa31c38a6745659e7 + pristine_git_object: 9691b81d3a7eb27d7b2b489408d32513859646c9 py.typed: id: 258c3ed47ae4 last_write_checksum: sha1:8efc425ffe830805ffcc0f3055871bdcdc542c60 @@ -1851,1248 +1852,1248 @@ trackedFiles: id: fe273b08f514 last_write_checksum: sha1:b290b25b36dca3d5eb1a2e66a2e1bcf2e7326cf3 pristine_git_object: c35748f360329c2bc370e9b189f49b1a360b2c48 - src/mistralai/__init__.py: - id: 7aaa1403a9fc + src/mistralai/client/__init__.py: + id: f1b791f9d2a5 last_write_checksum: sha1:36306d1d404b6aeb912d27f1d9c52f098ff7bf9b pristine_git_object: dd02e42e4cc509dc90e6ae70493054021faa5f9c - src/mistralai/_hooks/__init__.py: - id: 89bd3648c8ca + src/mistralai/client/_hooks/__init__.py: + id: cef9ff97efd7 last_write_checksum: sha1:e3111289afd28ad557c21d9e2f918caabfb7037d pristine_git_object: 2ee66cdd592fe41731c24ddd407c8ca31c50aec1 - src/mistralai/_hooks/sdkhooks.py: - id: a085b78b3f45 - last_write_checksum: sha1:1d9666df503110a00569c2a79886ac3be49a3ffb - pristine_git_object: 1f9a9316c430821226ada4db2b37f87083f1c326 - src/mistralai/_hooks/types.py: - id: 066b285c9341 - last_write_checksum: sha1:16bf3c53068c38ba0f838172787178c883551283 - pristine_git_object: 6d0f3e1166cb0271f89f5ba83441c88199d7a432 - src/mistralai/_version.py: - id: 37b53ba66d7f - last_write_checksum: sha1:a4d76992b028e2d138e2f7f6d3087c2a606a21c7 - pristine_git_object: 6ee91593a9fbcd6c53eae810c1c2d0120f56262e - src/mistralai/accesses.py: - id: 98cb4addd052 - last_write_checksum: sha1:5d9d495274d67b1343ba99d755c1c01c64c2ead1 - pristine_git_object: be02ee5bafa1b10a52e79d1ad5481fa80908d99a - src/mistralai/agents.py: - id: aa07ea92bffb - last_write_checksum: sha1:2a760562daf1a01a66e5250658dffc5043e3c8ea - pristine_git_object: 73e4ee3c885f7c3472a9dc5c0546c02d4e19a1c4 - src/mistralai/audio.py: - id: c398f6a11e24 - last_write_checksum: sha1:aa75fa00e00d8059121d8de60844d70d50203661 - pristine_git_object: 3de29053f34654907c423ca6600f216f6b0dcbe0 - src/mistralai/basesdk.py: - id: 3127264590ce - last_write_checksum: sha1:5340f1c5976fd87d3b17b285535b63bbbe7db120 - pristine_git_object: c9a32aa13eae485d0159632dadbfbb2452978709 - src/mistralai/batch.py: - id: 60df0c5efce3 - last_write_checksum: sha1:9d463fd6ac747635ab2b0e61c918a098aae5a370 - pristine_git_object: 7ed7ccefdaab2368dc7bb9fa8c718a05dcec3ca6 - src/mistralai/beta.py: - id: 7d1c8d453249 - last_write_checksum: sha1:780b45086f215d1f04983d1ea6c89acc16475cfc - pristine_git_object: 4bbf1fa36053c6754026285f3a149911b653d735 - src/mistralai/chat.py: - id: cb76f81a1426 - last_write_checksum: sha1:cf0a3b1b2d1163cb96c0c57d4cf0bede556c02b1 - pristine_git_object: 1528c4c93fc8b5f5d02976db836a1cefda4d1e57 - src/mistralai/classifiers.py: - id: a8f7d4c1c787 - last_write_checksum: sha1:6eabb0ba04fdf77d4bb9b45399c6f2ce55fe8317 - pristine_git_object: 7c32506ec03cc0fd88b786ff49d7690fd4283d2a - src/mistralai/conversations.py: - id: be58e57a6198 - last_write_checksum: sha1:b9287bbe777a042b8258494cd5162d32e6a89c20 - pristine_git_object: 194cb4c0a629654b31bbcce8391baf48601d0eb7 - src/mistralai/documents.py: - id: 1945602083a8 - last_write_checksum: sha1:14d1e6b5a95869d70a6fc89b07d5365c98aff5d7 - pristine_git_object: fac58fdb2e76668911fc6c59918b1b444aed0bd5 - src/mistralai/embeddings.py: - id: 2bbb9b5427d7 - last_write_checksum: sha1:842f784ab976936902be23331b672bdba8c88bc9 - pristine_git_object: 7430f8042df4fec517288d0ddb0eb174e7e43a8e - src/mistralai/files.py: - id: 0e29db0e2269 - last_write_checksum: sha1:d79d5b1785f441a46673a7efa108ddb98c44376a - pristine_git_object: 90ada0ff707521d59d329bebac74005eb68488d8 - src/mistralai/fim.py: - id: 71a865142baf - last_write_checksum: sha1:7accf79c11a17fefbacde7f2b0f966f3716233df - pristine_git_object: 53109c70f0ad9844a4c445a5ed674f675b24d274 - src/mistralai/fine_tuning.py: - id: 12578f7d13a6 - last_write_checksum: sha1:e48227f7ea5b51d837e7619f59582e663eb94ed1 - pristine_git_object: 8ed5788a58ab2e9d1125b30624c734a602084294 - src/mistralai/httpclient.py: - id: dcfb0dd6b386 + src/mistralai/client/_hooks/sdkhooks.py: + id: ed1e485b2153 + last_write_checksum: sha1:5688b56bf910f5f176bcacc58f4ad440ac2fa169 + pristine_git_object: c9318db481df2293b37e9b964da417ee5de86911 + src/mistralai/client/_hooks/types.py: + id: 85cfedfb7582 + last_write_checksum: sha1:ea20450ab595abb6ad744ecbd58927e8fa1ce520 + pristine_git_object: e7e1bb7f61527de6095357e4f2ab11e342a4af87 + src/mistralai/client/_version.py: + id: cc807b30de19 + last_write_checksum: sha1:e654adbd2f066332b48c68d97e995dcc8f7dde84 + pristine_git_object: 8c5d6e54860c69881bf976887910fc32d183c6e5 + src/mistralai/client/accesses.py: + id: 76fc53bfcf59 + last_write_checksum: sha1:da6c930bfec52d4cc344408f0aaef2874705fa68 + pristine_git_object: 307c7156626e735c802c149ea3547648ea03da09 + src/mistralai/client/agents.py: + id: e946546e3eaa + last_write_checksum: sha1:4a2bc22e5a6d9aee56d04d2800084eb326ef9ba7 + pristine_git_object: c04abd21b5b7cb9b8ddfdb52ec67fffa7d21759a + src/mistralai/client/audio.py: + id: 7a8ed2e90d61 + last_write_checksum: sha1:9ecd271eedf02703b45e6bc4280df10ed2edbbc8 + pristine_git_object: 28ccda1b533b4cef31844bddae2289268b459a24 + src/mistralai/client/basesdk.py: + id: 7518c67b81ea + last_write_checksum: sha1:c10ba4619b8c70ff876304a93b432d4466cb7112 + pristine_git_object: bddc9012f28f7881b75a720a07a3ad60845e472e + src/mistralai/client/batch.py: + id: cffe114c7ac7 + last_write_checksum: sha1:b7236249d2a6235fc3834b2c3bba3feda838013e + pristine_git_object: d53a45fbcbeb7b1d8fb29c373101c9e2a586b877 + src/mistralai/client/beta.py: + id: 981417f45147 + last_write_checksum: sha1:2cf61e620e0e0e969e951d100e42c8c9b8facd27 + pristine_git_object: b30003eae52be5e79838fe994cda8474068a43dc + src/mistralai/client/chat.py: + id: 7eba0f088d47 + last_write_checksum: sha1:46321214352946f2077a0f60c4c903c354a42da1 + pristine_git_object: 9c50bce81c264c70256b2ff8716e88216a78535f + src/mistralai/client/classifiers.py: + id: 26e773725732 + last_write_checksum: sha1:b3bed5a404f8837cc12e516f3fb85f47fd37518a + pristine_git_object: 537e2438afcb570a3e436ab4dd8b7d604b35b627 + src/mistralai/client/conversations.py: + id: 40692a878064 + last_write_checksum: sha1:fc75dc4099891c8cbfbcc72284bf8e7dbbb834a5 + pristine_git_object: 9caf42214daf262b15bac5b36467700ee17cd7d1 + src/mistralai/client/documents.py: + id: bcc17286c31c + last_write_checksum: sha1:82287ef513f2f5ee1acb9ffe8323f2dad0fc86f4 + pristine_git_object: 009a604f1c2fa367d14df7fb9f4078083c4be501 + src/mistralai/client/embeddings.py: + id: f9c17258207e + last_write_checksum: sha1:a3fa049388bf794ed764a1a8b6736f6c29136c83 + pristine_git_object: 359f2f621d1628536b89f66115726064db34e51b + src/mistralai/client/files.py: + id: f12df4b2ce43 + last_write_checksum: sha1:72c1fda19adff9042461f498d5859bae62d4603a + pristine_git_object: 97817eab1a4b0a0649a128b06a9f3ff4077dffa5 + src/mistralai/client/fim.py: + id: 217bea5d701d + last_write_checksum: sha1:d62f3bee1322a41aefc0cc01aa8313e8b7e3ae1b + pristine_git_object: 4a834fe93a9b9a8af30f681c9541a7cef0a513e1 + src/mistralai/client/fine_tuning.py: + id: 5d5079bbd54e + last_write_checksum: sha1:e8061f6bb9912d668249c3c20235e9778345d23b + pristine_git_object: c57425fdf3225eaeccee47a17db198a3974995a3 + src/mistralai/client/httpclient.py: + id: 3e46bde74327 last_write_checksum: sha1:5e55338d6ee9f01ab648cad4380201a8a3da7dd7 pristine_git_object: 89560b566073785535643e694c112bedbd3db13d - src/mistralai/jobs.py: - id: 6869267a98bf - last_write_checksum: sha1:e771ca001a64cc3be33964e95393495a16ab3d8c - pristine_git_object: df8ae4d3489f2791586ac6399bfe6039522f09b4 - src/mistralai/libraries.py: - id: e5b244f28b27 - last_write_checksum: sha1:7084d7b61238494f834fe20dcf387810e77f3eb0 - pristine_git_object: 32648937feb79adf6155423cbe9bac4d7fe52224 - src/mistralai/mistral_agents.py: - id: 671c4985aaa1 - last_write_checksum: sha1:1fe4fb4f2828b532ac3ddf3b72e748a53d5099e9 - pristine_git_object: 7fb0ce259cb1c1a3847c567bdc992c176489add6 - src/mistralai/mistral_jobs.py: - id: 18065a449da0 - last_write_checksum: sha1:fb205d962444f6aba163ecd3169c12489b3f0cc9 - pristine_git_object: d1aeec8a014b22e44f4fe5e751206c3648e875af - src/mistralai/models/__init__.py: - id: 3228134f03e5 + src/mistralai/client/jobs.py: + id: 22e6e695e52b + last_write_checksum: sha1:a040fec9c1a50ec603e2cd22284db526c177a55b + pristine_git_object: 848926eaca286f74b5cfd4b0f0f72a8e2222c52f + src/mistralai/client/libraries.py: + id: d43a5f78045f + last_write_checksum: sha1:5264a24b973f49b4ea7252868f4a76baba9093b4 + pristine_git_object: 03a547410e042c19329ea9a91eef1bf25ecdcbe1 + src/mistralai/client/mistral_agents.py: + id: bd22ff89d9bb + last_write_checksum: sha1:7b6d1ac9256c1f958bbc9cf18355b4407f0cffc4 + pristine_git_object: 2ac7a29e4d7ab72c5fa29d13e7a8e4648906ead0 + src/mistralai/client/mistral_jobs.py: + id: e925bb9b27ce + last_write_checksum: sha1:b1d8ecfe998d64637089eb4a5a4cfdf4735717d1 + pristine_git_object: eae4403326ecfdf432a1ca7feb260ffe8ec251cf + src/mistralai/client/models/__init__.py: + id: e0e8dad92725 last_write_checksum: sha1:cb1fb02e33b85bf82db7d6fd15b2cc3b109c5060 pristine_git_object: 23e652220f29a882748661a8c0d21aa2830471bf - src/mistralai/models/agent.py: - id: ca4162a131b1 - last_write_checksum: sha1:fe8a7c8c9c4ba59613d7d89f0c2e7a6958e25f85 - pristine_git_object: eb30905b3de2b69ece35bdd40f390b2fa6ffc5a8 - src/mistralai/models/agentaliasresponse.py: - id: d329dd68429e - last_write_checksum: sha1:a3ebf39f159f7cd63dbabd9ff2c79df97e43e41f - pristine_git_object: c0928da9c65c588c515f3f1668ccfb69d3a23861 - src/mistralai/models/agentconversation.py: - id: bd3035451c40 - last_write_checksum: sha1:724a256f4914116500fd962df4b3cfc79ea75c43 - pristine_git_object: 6007b5715fd4a463d25a244b716effafbeecace6 - src/mistralai/models/agentcreationrequest.py: - id: 87f33bd9ea58 - last_write_checksum: sha1:a6885376d36a5a17273d8d8d8d45e3d6c3ee1b9f - pristine_git_object: 6a14201eca82f26871ab4f87e547a5e9bcf3b933 - src/mistralai/models/agenthandoffdoneevent.py: - id: 496685a9343b - last_write_checksum: sha1:f03d37569960b56155e977aa68fbbaad8e25f687 - pristine_git_object: 1cdbf45652ff70d045c650734ab6bdc0eca97734 - src/mistralai/models/agenthandoffentry.py: - id: 836045caeb8f - last_write_checksum: sha1:e5c6b73014cd6859a47cb5958cdfa7b105e3aa3e - pristine_git_object: 66136256215caf7c1f174deec70ab9fbfff634fc - src/mistralai/models/agenthandoffstartedevent.py: - id: ce8e306fa522 - last_write_checksum: sha1:2b5bac2f628c0e7cdd6df73404f69f5d405e576c - pristine_git_object: 11bfa918903f8de96f98f722eaaf9a70b4fca8c1 - src/mistralai/models/agents_api_v1_agents_create_or_update_aliasop.py: - id: dd0e03fda847 - last_write_checksum: sha1:a0dd39bb4b0af3a15b1aa8427a6f07d1826c04dc - pristine_git_object: 6cf9d0e0644ce0afd5f673f18fdda9dcccb5f04c - src/mistralai/models/agents_api_v1_agents_deleteop.py: - id: 588791d168a1 - last_write_checksum: sha1:2dae37c3b9778d688663550b9803d52111577f3e - pristine_git_object: 38e04953cc320f503a2f6e77096985da60896f2a - src/mistralai/models/agents_api_v1_agents_get_versionop.py: - id: bdb81ef0e35a - last_write_checksum: sha1:372da3794afd45d442d56edd3ec3cc4907f88223 - pristine_git_object: fddb10dde6707b6641b035e372270991d349f4f3 - src/mistralai/models/agents_api_v1_agents_getop.py: - id: 2358eceee519 - last_write_checksum: sha1:dca59474f75a6636ecac8265cab1bb51d36df56a - pristine_git_object: 2b7d89a5b34f3e768a18f9edbdf712fbcf5c20e4 - src/mistralai/models/agents_api_v1_agents_list_version_aliasesop.py: - id: 51215b825530 - last_write_checksum: sha1:d24f8eff3bd19414c0a04e474b33e1c63861a1da - pristine_git_object: 650a7187a3ac419069440fe040a166a036835b37 - src/mistralai/models/agents_api_v1_agents_list_versionsop.py: - id: 5f680df288a9 - last_write_checksum: sha1:a236170f366d9701346b57f9ee4c788a9a2293e5 - pristine_git_object: cf988b3d3b5130ff49f7ec0accb30a8e9dbfe4e1 - src/mistralai/models/agents_api_v1_agents_listop.py: - id: 15579851e4fe - last_write_checksum: sha1:1268af12d397f86e0486c42ec8115103e29ee137 - pristine_git_object: 88b5bad107d28943de8f25cb26c6597da2eba31d - src/mistralai/models/agents_api_v1_agents_update_versionop.py: - id: 262e7a2f05e3 - last_write_checksum: sha1:faa5550d08ddbb8223e8e6f2fcea6f09408bd228 - pristine_git_object: 5e4b97b3b175a8485fd04adc5b92a4870a46bda9 - src/mistralai/models/agents_api_v1_agents_updateop.py: - id: 72f9d6466691 - last_write_checksum: sha1:9c99959045d9d182a9814954dcd769b294267165 - pristine_git_object: 32696fbe60f17067520bf574bac8144abeb7af3f - src/mistralai/models/agents_api_v1_conversations_append_streamop.py: - id: 89a020d8fdfd - last_write_checksum: sha1:ec2fbbc5017a2374ab3f75a33592399b83fcc5f6 - pristine_git_object: d2489ffb2e01dc6a4e93aee931723be55261ca6c - src/mistralai/models/agents_api_v1_conversations_appendop.py: - id: fd73b0582d26 - last_write_checksum: sha1:22f62e8277ae5845e2b3c41d81d962edc3592090 - pristine_git_object: ba37697ea506fe08ecee5ed7585a1deee56a0827 - src/mistralai/models/agents_api_v1_conversations_deleteop.py: - id: ecd0a5c14be5 - last_write_checksum: sha1:bd894dcef52e02541fa09ae0d51755dad946e3c2 - pristine_git_object: 94126cae1a7a4cd09037d8224cd79f63935a2636 - src/mistralai/models/agents_api_v1_conversations_getop.py: - id: 600a28e887fe - last_write_checksum: sha1:b2dbccf934677ed646bb9ad6e947787bb6c4235b - pristine_git_object: a37a61babd146035d51095143f8781c0d94be0c3 - src/mistralai/models/agents_api_v1_conversations_historyop.py: - id: 5e3db049c234 - last_write_checksum: sha1:fde97f139a93c4723abc4f08ebcf20afcdf67d54 - pristine_git_object: b8c33d1b1b18b0a0c6b263962efc1d84d066021a - src/mistralai/models/agents_api_v1_conversations_listop.py: - id: 3cf4a3751a1c - last_write_checksum: sha1:ac8ae982fc23123b8b3ce3c1ba58980a1c6e2119 - pristine_git_object: d314f83853dbef74fa2e5ce2b5a800843110cc14 - src/mistralai/models/agents_api_v1_conversations_messagesop.py: - id: c7eb683e873e - last_write_checksum: sha1:d96c4e78c4ce75b668bc23aec91be399a0d26541 - pristine_git_object: f0dac8bf6a58882b55c88b12e039357c5ff7dfe4 - src/mistralai/models/agents_api_v1_conversations_restart_streamop.py: - id: c9d4d80d68d5 - last_write_checksum: sha1:8a96d0ccbe2918a13e022f629ea62120e9ed5c0d - pristine_git_object: f39b74eb6358938de7fddf7d1fd92eb4fb011f6b - src/mistralai/models/agents_api_v1_conversations_restartop.py: - id: 9dadcde20152 - last_write_checksum: sha1:44a127399dfcbc7c07af3c686469bcbb6e798b40 - pristine_git_object: f706c066d1de93cf03c9a7829fc3ea79eddfc8ad - src/mistralai/models/agentscompletionrequest.py: - id: 843813a24928 - last_write_checksum: sha1:f84d77c55787a07c5a8f7cb25d13dc02762e5c80 - pristine_git_object: cc07a6bdd38e221e66ca4162ef74354ef1c9f5e2 - src/mistralai/models/agentscompletionstreamrequest.py: - id: 6be8367d3443 - last_write_checksum: sha1:7bc5fd554e4adf8d8eb0a8f81aae32266b174932 - pristine_git_object: d6a887be8f33db56ae0eec47b5300a3a29736067 - src/mistralai/models/agentupdaterequest.py: - id: 24e7a9fdb507 - last_write_checksum: sha1:a5bb4a17ff80a3471321d38faa1e6605ebe541a4 - pristine_git_object: e496907c084f0a6cf90de6ebbf508d3137699bf0 - src/mistralai/models/apiendpoint.py: - id: b26effd643dc - last_write_checksum: sha1:07ba583784d9099e6a24e94805a405112e2fcb41 - pristine_git_object: 0ad9366f0efbcf989f63fa66750dce2ecc5bb56a - src/mistralai/models/archiveftmodelout.py: - id: 48fc1069be95 - last_write_checksum: sha1:c3c6b5ae470f23805201cd5565fca095bc9b7a74 - pristine_git_object: 0f753cfc948282f4ee5004fe463c091ed99e83a7 - src/mistralai/models/assistantmessage.py: - id: e73f1d43e4ad - last_write_checksum: sha1:b5d1d0a77b9a4e2f7272ff9fe7e319c2bc1bdb25 - pristine_git_object: a38a10c4968634d64f4bdb58d74f4955b29a92a8 - src/mistralai/models/audiochunk.py: - id: ad7cf79b2cca - last_write_checksum: sha1:c13008582708d368c3dee398cc4226f747b5a9d0 - pristine_git_object: 64fc43ff4c4ebb99b7a6c7aa3090b13ba4a2bdbc - src/mistralai/models/audioencoding.py: - id: f4713d60f468 - last_write_checksum: sha1:ffd1fd54680ea0bab343bdb22145b9eabc25c68d - pristine_git_object: 13eb6d1567f768da3753a73ddba9fa5e3ebfa7b3 - src/mistralai/models/audioformat.py: - id: 3572f5e8c65b - last_write_checksum: sha1:7259b46ebe4044633c0251eea5b3c88dedcc76a6 - pristine_git_object: 48ab648c3525fcc9fe1c722b7beee0f649e30e7a - src/mistralai/models/audiotranscriptionrequest.py: - id: 4c6a6fee484a - last_write_checksum: sha1:8dd41335ffd46dd1099bdb20baac32d043c5936c - pristine_git_object: 86417b4235292de3ab1d2b46116ce0ba94010087 - src/mistralai/models/audiotranscriptionrequeststream.py: - id: 863eca721e72 - last_write_checksum: sha1:010618236f3da1c99d63d334266622cf84e6b09f - pristine_git_object: 1f4087e8d33c8a3560d5ce58f2a1a7bc4627556b - src/mistralai/models/basemodelcard.py: - id: 5554644ee6f2 - last_write_checksum: sha1:aa5af32cda04d45bcdf2c8fb380529c4fbc828aa - pristine_git_object: 706841b7fc71051890201445050b5383c4b0e998 - src/mistralai/models/batcherror.py: - id: 657a766ed6c7 - last_write_checksum: sha1:5d727f59bbc23e36747af5e95ce20fcbf4ab3f7c - pristine_git_object: 4f8234465c57779d026fe65e131ba4cbe2746d40 - src/mistralai/models/batchjobin.py: - id: 7229d3fdd93b - last_write_checksum: sha1:074e8efd2474a1bf0949a7abcb90d3504a742f94 - pristine_git_object: 839a9b3cadb96986537422bc2a49532fcf9c2029 - src/mistralai/models/batchjobout.py: - id: 420d2a600dfe - last_write_checksum: sha1:486ecb38d44e9e3f8509504e30fe902f6869da1b - pristine_git_object: 904cd3496134ca38b8e53772f7b30e812bb92e65 - src/mistralai/models/batchjobsout.py: - id: 7bd4a7b41c82 - last_write_checksum: sha1:838e36e981a3dedb54663a32d8657d2a6ffaa364 - pristine_git_object: a1eba5db0ab8d8308b9e933352b55e32b80f33c7 - src/mistralai/models/batchjobstatus.py: - id: ee3393d6b301 + src/mistralai/client/models/agent.py: + id: 1336849c84fb + last_write_checksum: sha1:68609569847b9d638d948deba9563d5460c17b9f + pristine_git_object: 3bedb3a3a71c116f5ccb0294bc9f3ce6690e47b2 + src/mistralai/client/models/agentaliasresponse.py: + id: 3899a98a55dd + last_write_checksum: sha1:6dfa55d4b61a543382fab8e3a6e6d824feb5cfc7 + pristine_git_object: 4bc8225c0217f741328d52ef7df38f7a9c77af21 + src/mistralai/client/models/agentconversation.py: + id: 1b7d73eddf51 + last_write_checksum: sha1:2624deece37e8819cb0f60bbacbbf1922aa2c99c + pristine_git_object: 5dfa8c3137c59be90c655ba8cf8afb8a3966c93a + src/mistralai/client/models/agentcreationrequest.py: + id: 35b7f4933b3e + last_write_checksum: sha1:60caa3dfa2425ac3ff4e64d81ac9d18df0774157 + pristine_git_object: 61a5aff554f830ab9057ce9ceafc2ce78380290f + src/mistralai/client/models/agenthandoffdoneevent.py: + id: 82628bb5fcea + last_write_checksum: sha1:79de1153a3fce681ee547cc1d3bd0fd8fc5598d2 + pristine_git_object: c826aa5e1f2324cddb740b3ffc05095ff26c666d + src/mistralai/client/models/agenthandoffentry.py: + id: 5030bcaa3a07 + last_write_checksum: sha1:86622620c14e2aacbdcc47b9772a3b9bb4127018 + pristine_git_object: 0b0de13f8840e9ab221ea233040ca03241cba8b7 + src/mistralai/client/models/agenthandoffstartedevent.py: + id: 2f6093d9b222 + last_write_checksum: sha1:772bc7b396285560cdafd7d7fb4bc4ece79179ad + pristine_git_object: 4b8ff1e5e3639fb94b55c0a417e9478d5a4252b2 + src/mistralai/client/models/agents_api_v1_agents_create_or_update_aliasop.py: + id: 23a832f8f175 + last_write_checksum: sha1:9ca9a0be2db68005fc0dec3f24d24fccf8d0c631 + pristine_git_object: 33da325cadf36ce8162bac11f1576872bcbbdbd6 + src/mistralai/client/models/agents_api_v1_agents_deleteop.py: + id: 95adb6768908 + last_write_checksum: sha1:9118fb084668440cec39ddd47b613fb4cd796c8d + pristine_git_object: 58fe902f0a51b50db869dfa760f1a3a4cba36342 + src/mistralai/client/models/agents_api_v1_agents_get_versionop.py: + id: ef9914284afb + last_write_checksum: sha1:d9b429cd8ea7d20050c0bc2077eec0084ed916b6 + pristine_git_object: edcccda19d5c3e784a227c6356285ee48be3d7f2 + src/mistralai/client/models/agents_api_v1_agents_getop.py: + id: f5918c34f1c7 + last_write_checksum: sha1:412df95a1ac4b4f6a59e4391fd1226f2e26e4537 + pristine_git_object: d4817457a33d49ddaa09e8d41f3b03b69e8e491e + src/mistralai/client/models/agents_api_v1_agents_list_version_aliasesop.py: + id: a04815e6c798 + last_write_checksum: sha1:7bd6ba32e2aeeee4c34f02bab1d460eb384f9229 + pristine_git_object: b9770fffe5be41579f12d76f41a049e8b41b3ef8 + src/mistralai/client/models/agents_api_v1_agents_list_versionsop.py: + id: 19e3310c3907 + last_write_checksum: sha1:62b3b94ad3ed412f74cfc75572a91b7f3cd6b39b + pristine_git_object: 813335f9e972c976f0e887d1f26be3c224b36b0c + src/mistralai/client/models/agents_api_v1_agents_listop.py: + id: 25a6460a6e19 + last_write_checksum: sha1:586ad2257e4a2c70bdb6d0044afe7d1b20f23d93 + pristine_git_object: 119f51236dda0769ab3dc41a9dbbb11b5d5e935d + src/mistralai/client/models/agents_api_v1_agents_update_versionop.py: + id: 63f61b8891bf + last_write_checksum: sha1:b214f6850347e4c98930ef6f019fdad52668c8c0 + pristine_git_object: 116f952b2ba2a7dca47975a339267c85122cd29a + src/mistralai/client/models/agents_api_v1_agents_updateop.py: + id: bb55993c932d + last_write_checksum: sha1:28cd6d0b729745b2e16d91a5e005d59a6d3be124 + pristine_git_object: 116acaa741f79123e682db0be2adbb98cf8283d8 + src/mistralai/client/models/agents_api_v1_conversations_append_streamop.py: + id: ec00e0905f15 + last_write_checksum: sha1:67967a775c3a1ec139ccd6991465ea15327e3ba7 + pristine_git_object: 9f00ffd4b484f03dae6e670d019f61a4392afc85 + src/mistralai/client/models/agents_api_v1_conversations_appendop.py: + id: 39c6125e850c + last_write_checksum: sha1:93621c5ea8fbc5c038c92596b7d4c0aef0a01e2f + pristine_git_object: 13d07ba91207f82dcea8f58c238cc743cd6c3964 + src/mistralai/client/models/agents_api_v1_conversations_deleteop.py: + id: 0792e6abbdcb + last_write_checksum: sha1:dc60f272fed790bec27c654da0fb185aab27ff82 + pristine_git_object: 81066f90302d79bc2083d1e31aa13656c27cc65f + src/mistralai/client/models/agents_api_v1_conversations_getop.py: + id: c530f2fc64d0 + last_write_checksum: sha1:28cab443af4d623a22e836ab876da20d84eb8a41 + pristine_git_object: c919f99e38148fb9b2d51816d0dd231ee828b11d + src/mistralai/client/models/agents_api_v1_conversations_historyop.py: + id: 2f5ca33768aa + last_write_checksum: sha1:9f33f183cd07b823b4727662ea305c74853049c5 + pristine_git_object: ba1f8890c1083947e4d6882dff2b50b3987be738 + src/mistralai/client/models/agents_api_v1_conversations_listop.py: + id: 936e36181d36 + last_write_checksum: sha1:b338f793707c25ce9703266d8b7f6f560051b057 + pristine_git_object: bb3c7127c4b43019405689dc2ae10f5933c763bc + src/mistralai/client/models/agents_api_v1_conversations_messagesop.py: + id: b5141764a708 + last_write_checksum: sha1:0be49e2ad8a3edb079ce4b1f092654c7a6b7e309 + pristine_git_object: e05728f2c2c0a350bdaf72fe9dc488c923230ab7 + src/mistralai/client/models/agents_api_v1_conversations_restart_streamop.py: + id: c284a1711148 + last_write_checksum: sha1:ef22ebf2e217ab41ce0b69cf388122ee18ad7b05 + pristine_git_object: 9b489ab46486cc37349d64a4fc685f1355afb79a + src/mistralai/client/models/agents_api_v1_conversations_restartop.py: + id: 3ba234e5a8fc + last_write_checksum: sha1:e7e22098d8b31f5cc5cb0e8fafebe515842c2f88 + pristine_git_object: 8bce3ce519a69a6d1cb36383b22fb801768c4868 + src/mistralai/client/models/agentscompletionrequest.py: + id: 3960bc4c545f + last_write_checksum: sha1:7f2176c96916c85ac43278f3ac23fe5e3da35aca + pristine_git_object: 22368e44adb1b3ecff58d2b92592710335a062b9 + src/mistralai/client/models/agentscompletionstreamrequest.py: + id: 1b73f90befc2 + last_write_checksum: sha1:8126924507b41754ec1d4a10613cf189f5ea0aea + pristine_git_object: 37d46c79d8964d799679413e14122a5146799eb6 + src/mistralai/client/models/agentupdaterequest.py: + id: 2d5a3a437819 + last_write_checksum: sha1:97509eeb4cd25d31a0e1f3b4de1288580cb9a5cb + pristine_git_object: 261ac069ce4e2b630d39080edf47bf2ad510ffb4 + src/mistralai/client/models/apiendpoint.py: + id: 00b34ce0a24d + last_write_checksum: sha1:0a1a08e7faaa7be804de952248b4f715c942af9a + pristine_git_object: a6072d568e08ab1f5e010d5924794adfb2188920 + src/mistralai/client/models/archiveftmodelout.py: + id: bab499599d30 + last_write_checksum: sha1:352eb0aca8368d29ef1b68820540363e8fa69be4 + pristine_git_object: 6108c7e153abecfc85be93b6fa1f9f22480f6d9b + src/mistralai/client/models/assistantmessage.py: + id: 2b49546e0742 + last_write_checksum: sha1:235a0f8d14b3100f5c498a9784ddda1f824a77a9 + pristine_git_object: 3ba14ce78e01c92458477bb025b9e5ded074fd4d + src/mistralai/client/models/audiochunk.py: + id: ce5dce4dced2 + last_write_checksum: sha1:6d8ed87fd3f114b2b04aa15dd24d0dd5b1837215 + pristine_git_object: 80d836f27ae65f30c6ca0e1d4d5d585bbf498cfd + src/mistralai/client/models/audioencoding.py: + id: b14e6a50f730 + last_write_checksum: sha1:8c8d6c1da3958200bf774313c485189426439545 + pristine_git_object: 557f53ed7a90f05e5c457f8b217d3df07e113e0b + src/mistralai/client/models/audioformat.py: + id: c8655712c218 + last_write_checksum: sha1:baef21b264f77117bbaa1336d7efefae916b9119 + pristine_git_object: 7ea10b3ad610aa1500fd25500ff942988ea0e1db + src/mistralai/client/models/audiotranscriptionrequest.py: + id: e4148b4d23e7 + last_write_checksum: sha1:52c245a739864ca838d4c4ef4bdf74e7b0c60f2e + pristine_git_object: 78a3797882841a6fd1251d72756f6b75f6d01006 + src/mistralai/client/models/audiotranscriptionrequeststream.py: + id: 33a07317a3b3 + last_write_checksum: sha1:e468052c9ab8681ff0e1121e61aff406fc4427fc + pristine_git_object: 350643614e23002bc55e99e2d1807bedd80a0613 + src/mistralai/client/models/basemodelcard.py: + id: 556ebdc33276 + last_write_checksum: sha1:f524e61a160af83b20f7901afc585f61bfad6e05 + pristine_git_object: 8ce7f139b6018c4a7358a21534532cd3e741fa8a + src/mistralai/client/models/batcherror.py: + id: 1563e2a576ec + last_write_checksum: sha1:239f9c44477941c45a3e7fe863828299d36267d6 + pristine_git_object: a9c8362bfa08ab4727f08a6dd2b44a71040560f7 + src/mistralai/client/models/batchjobin.py: + id: 72b25c2038d4 + last_write_checksum: sha1:0064f199b6f27b5101f6a9abf0532f61c522e2c8 + pristine_git_object: 39cf70b5bdf8db8adaa5c9d1dd8a227b2365879b + src/mistralai/client/models/batchjobout.py: + id: cbf1d872a46e + last_write_checksum: sha1:44a92b4f427b77db29294a3b6d375f8622660ee1 + pristine_git_object: 008d43b4340cf8853fac751fb6f15525f765fe39 + src/mistralai/client/models/batchjobsout.py: + id: 20b2516e7efa + last_write_checksum: sha1:7d4223363e861137b9bce0dc78460c732a63c90b + pristine_git_object: 2654dac04c126a933f6d045f43f16a30263750dc + src/mistralai/client/models/batchjobstatus.py: + id: 61e08cf5eea9 last_write_checksum: sha1:9e042ccd0901fe4fc08fcc8abe5a3f3e1ffe9cbb pristine_git_object: 4b28059ba71b394d91f32dba3ba538a73c9af7a5 - src/mistralai/models/batchrequest.py: - id: 6b77bb906183 - last_write_checksum: sha1:5f4b2f5804c689e3468fe93e2b7855f2f164bbe8 - pristine_git_object: 3d1e98f7a1162abadd37d6661841727d33dbafd7 - src/mistralai/models/builtinconnectors.py: - id: 611d5b9f6fa4 + src/mistralai/client/models/batchrequest.py: + id: 6f36819eeb46 + last_write_checksum: sha1:0ce0e6982c96933e73a31c6ebfb29f78b6ebf13b + pristine_git_object: 24f50a9af9a74f6bec7e8903a966d114966a36d3 + src/mistralai/client/models/builtinconnectors.py: + id: 2d276ce938dc last_write_checksum: sha1:4e94744e3854d4cdc9d1272e4f1d9371f9829a5f pristine_git_object: 6a3b2476d54096722eb3e7a271629d108028bd35 - src/mistralai/models/chatclassificationrequest.py: - id: 7fee7b849791 - last_write_checksum: sha1:22d8e106c165c9a16f220dc242b9165e5dcd6963 - pristine_git_object: f06f4f34d264d5bd049ced125d8675434c4fab96 - src/mistralai/models/chatcompletionchoice.py: - id: 362cbbc2f932 - last_write_checksum: sha1:6d66a95497493bff71ed75954e7eb9965370a3a4 - pristine_git_object: f2057ab4addf806d0458c40cb8bdf1f823da51f2 - src/mistralai/models/chatcompletionrequest.py: - id: ed77c35d0007 - last_write_checksum: sha1:e40cfe95a97a04addf2b37e6ba8df61ab3c1e199 - pristine_git_object: ad8b542863fd4158c1966e839d4ca9992982c2f8 - src/mistralai/models/chatcompletionresponse.py: - id: 227c368abb96 - last_write_checksum: sha1:1f8d263cc3388507fcec7a0e2419d755433a1e3e - pristine_git_object: 3d03b1265f4c41b6e11d10edcff0e4f9fea1e434 - src/mistralai/models/chatcompletionstreamrequest.py: - id: d01414c359f7 - last_write_checksum: sha1:76c0d6dcd9d1e50208c8906f3ae29e0bea39a71b - pristine_git_object: 10f97e5f006c904d37aa9bb1584030196c53ed98 - src/mistralai/models/chatmoderationrequest.py: - id: 9146b8de3702 - last_write_checksum: sha1:c0465d837b1517e061036f69faa0f40464873ff6 - pristine_git_object: 2f58d52fd00e2a1003445a1e524e3856dd8ad4c7 - src/mistralai/models/checkpointout.py: - id: ee97be8b74d3 - last_write_checksum: sha1:55cd36289696fa4da06a06812a62859bac83479f - pristine_git_object: aefb7731d0dfc71db4647509ef4e0ad1d70a3a95 - src/mistralai/models/classificationrequest.py: - id: fbb8aaa182b6 - last_write_checksum: sha1:300492b338cc354bee820a3b27fae7ad9900af5c - pristine_git_object: 8a3543785599e49df7f54069c98dedecbc545e12 - src/mistralai/models/classificationresponse.py: - id: b73b192344cb - last_write_checksum: sha1:0fa30f6b7eba3cbf1951bd45724d99b1ff023bb1 - pristine_git_object: b7741f373f062d552a67550dcd30e0592805ce93 - src/mistralai/models/classificationtargetresult.py: - id: 718124fab7ab - last_write_checksum: sha1:de004f490ec6da5bee26590697a97c68d7db9168 - pristine_git_object: 60c5a51b0a5e3f2b248f1df04ba12ec5075556eb - src/mistralai/models/classifierdetailedjobout.py: - id: aebdcce0d168 - last_write_checksum: sha1:5d16ca3b3c375a899ee25fc9ce74d877d71b7be1 - pristine_git_object: 701aee6e638ee8ca3e43500abce790a6f76df0c7 - src/mistralai/models/classifierftmodelout.py: - id: 12437ddfc64e - last_write_checksum: sha1:2436c401d49eb7fa0440fca6f09045f20bb52da1 - pristine_git_object: d2a31fae8c534b1008b96c8d4f1e22d69b85c6f3 - src/mistralai/models/classifierjobout.py: - id: aa6ee49244f8 - last_write_checksum: sha1:0c2fe0e01ccfa25686565bc836d3745313f61498 - pristine_git_object: a2f7cc08b35152a1b56bbfbaa49f9231df651719 - src/mistralai/models/classifiertargetin.py: - id: 0439c322ce64 - last_write_checksum: sha1:92b7928166f1a0ed8a52c6ccd7523119690d9a35 - pristine_git_object: d8a060e4896cbe9ccf27be91a44a84a3a84589f7 - src/mistralai/models/classifiertargetout.py: - id: 1c9447805aaa - last_write_checksum: sha1:bf961d9be0bd5239032a612eb822ad8adcee6d99 - pristine_git_object: ddc587f46a3bc78df5d88793c768431429ccf409 - src/mistralai/models/classifiertrainingparameters.py: - id: 8d7d510cb1a1 - last_write_checksum: sha1:72c19293d514c684e1bd4a432b34382f4d674e26 - pristine_git_object: 718beeac3aa1fc2b8af52d61510f34414bcab990 - src/mistralai/models/classifiertrainingparametersin.py: - id: 3da8da32eac4 - last_write_checksum: sha1:ae5088ac22014504b3d3494db46869b87716342b - pristine_git_object: 9868843fbb81cc45657980b36c3c9409d386114d - src/mistralai/models/codeinterpretertool.py: - id: 8c90fc7cca85 - last_write_checksum: sha1:d0e3832422493176bcb29b4edec0aa40c34faa12 - pristine_git_object: 48b74ee85c897179f6f2855d6737e34031b6c0f8 - src/mistralai/models/completionargs.py: - id: 6673897ce695 - last_write_checksum: sha1:a6b22e1abc324b8adceb65cbf990c0a0ab34b603 - pristine_git_object: 40aa0314895b5b2e9b598d05f9987d39518a6c60 - src/mistralai/models/completionargsstop.py: - id: d3cf548dde2f + src/mistralai/client/models/chatclassificationrequest.py: + id: afd9cdc71834 + last_write_checksum: sha1:84cc02714fe8ae408a526ab68c143b9b51ea5279 + pristine_git_object: 450810225bb43bbd1539768e291840a210489f0f + src/mistralai/client/models/chatcompletionchoice.py: + id: 7e6a512f6a04 + last_write_checksum: sha1:bc3fb866e2eb661b1619f118af459d18ba545d40 + pristine_git_object: 5d888cfd73b82097d647f2f5ecdbdf8beee2e098 + src/mistralai/client/models/chatcompletionrequest.py: + id: 9979805d8c38 + last_write_checksum: sha1:ccd9f3908c71d6fc3ad57f41301348918b977a6f + pristine_git_object: 30fce28d5e071797a7180753f2825d39cfeac362 + src/mistralai/client/models/chatcompletionresponse.py: + id: 669d996b8e82 + last_write_checksum: sha1:af8071e660b09437a32482cdb25fd07096edc080 + pristine_git_object: 60a1f561ff29c3bc28ee6aea69b60b9d47c51471 + src/mistralai/client/models/chatcompletionstreamrequest.py: + id: 18cb2b2415d4 + last_write_checksum: sha1:a067cc25d2e8c5feb146bdb0b69fb5186e77c416 + pristine_git_object: 21dad38bb83e9b334850645ffa24e1099b121f6c + src/mistralai/client/models/chatmoderationrequest.py: + id: 057aecb07275 + last_write_checksum: sha1:f93d1758dd8c0f123d8c52d162e3b4c8681bf121 + pristine_git_object: 631c914d1a4f4453024665eb0a8233ec7a070332 + src/mistralai/client/models/checkpointout.py: + id: 3866fe32cd7c + last_write_checksum: sha1:c2b57fe880c75290b100904c26afaadd356fbe88 + pristine_git_object: 89189ed19dc521bc862da0aec1997bba0854def7 + src/mistralai/client/models/classificationrequest.py: + id: 6942fe3de24a + last_write_checksum: sha1:3b99dba1f7383defed1254fba60433808184e8e7 + pristine_git_object: c724ff534f60022599f34db09b517f853ae7968d + src/mistralai/client/models/classificationresponse.py: + id: eaf279db1109 + last_write_checksum: sha1:0e09986f5db869df04601cec3793552d17e7ed04 + pristine_git_object: 4bc21a58f0fb5b5f29357f2729250030b7d961bc + src/mistralai/client/models/classificationtargetresult.py: + id: 2445f12b2a57 + last_write_checksum: sha1:9325f4db4e098c3bf7e24cfc487788e272a5896f + pristine_git_object: 89a137c374efc0f8b3ee49f3434f264705f69639 + src/mistralai/client/models/classifierdetailedjobout.py: + id: d8daeb39ef9f + last_write_checksum: sha1:d33e6a4672b33b6092caec50cc957d98e32058f7 + pristine_git_object: 1de4534fcb12440a004e94bc0eced7483952581d + src/mistralai/client/models/classifierftmodelout.py: + id: 2903a7123b06 + last_write_checksum: sha1:4662ec585ade8347aeda4f020b7d31978bf8f9bb + pristine_git_object: a4572108674ea9c209b6224597878d5e824af686 + src/mistralai/client/models/classifierjobout.py: + id: e19e9c4416cc + last_write_checksum: sha1:0239761cb318518641281f584783bd2b42ec3340 + pristine_git_object: ab1e261d573a30714042af3f20ed439ddbf1d819 + src/mistralai/client/models/classifiertargetin.py: + id: ed021de1c06c + last_write_checksum: sha1:cd1c0b8425c752815825abaedab8f4e2589cbc8f + pristine_git_object: 231ee21e61f8df491057767eac1450c60e8c706a + src/mistralai/client/models/classifiertargetout.py: + id: 5131f55abefe + last_write_checksum: sha1:4d9f66e3739f99ff1ea6f3468fe029d664541d58 + pristine_git_object: 957104a7bcc880d84ddefe39e58969b20f36d24c + src/mistralai/client/models/classifiertrainingparameters.py: + id: 4000b05e3b8d + last_write_checksum: sha1:a9d4eecd716bd078065531198f5a57b189caeb79 + pristine_git_object: 60f53c374ece9a5d336e8ab20c05c2d2c2d931f9 + src/mistralai/client/models/classifiertrainingparametersin.py: + id: 4b33d5cf0345 + last_write_checksum: sha1:f50e68c14be4655d5cf80f6c98366d32bbd01869 + pristine_git_object: e24c9ddecf60c38e146b8f94ad35be95b3ea2609 + src/mistralai/client/models/codeinterpretertool.py: + id: 950cd8f4ad49 + last_write_checksum: sha1:533ae809df90e14e4ef6e4e993e20e37f969f39f + pristine_git_object: faf5b0b78f2d9981bb02eee0c28bba1fdba795b9 + src/mistralai/client/models/completionargs.py: + id: 3db008bcddca + last_write_checksum: sha1:4b4f444b06a286098ce4e5018ffef74b3abf5b91 + pristine_git_object: 010910f6f00a85b706a185ca5770fe70cc998905 + src/mistralai/client/models/completionargsstop.py: + id: 5f339214501d last_write_checksum: sha1:99912f7a10e92419308cf3c112c36f023de3fc11 pristine_git_object: de7a09564540daaa6819f06195c347b3e01914f7 - src/mistralai/models/completionchunk.py: - id: d3dba36f2e47 - last_write_checksum: sha1:e93199f69c09b0f7c5c169c90c990a7e7439b64a - pristine_git_object: 4d1fcfbf2e46382cc1b8bbe760efa66ceb4207b3 - src/mistralai/models/completiondetailedjobout.py: - id: 7e46c1d1597b - last_write_checksum: sha1:4ef7f96a2ac505891fec22e4fe491ea21da67e0b - pristine_git_object: df41bc2ab5bf484d755d31fa132158bd1dc5b489 - src/mistralai/models/completionevent.py: - id: 7d9b2ff555f0 - last_write_checksum: sha1:268f8b79bf33e0113d1146577827fe10e47d3078 - pristine_git_object: cc8599103944b8eebead6b315098a823e4d086e3 - src/mistralai/models/completionftmodelout.py: - id: 20e6aae7163d - last_write_checksum: sha1:8272d246489fe8d3743d28b37b49b660ca832ea1 - pristine_git_object: 7b6520de657363e984eef8efd870b4b841dc52e0 - src/mistralai/models/completionjobout.py: - id: 36ce54765988 - last_write_checksum: sha1:c167fae08705eccd65ec30e99046276bdcdd1b97 - pristine_git_object: 70995d2a8e45ac5bf9a4b870d7b745e07f09856f - src/mistralai/models/completionresponsestreamchoice.py: - id: a5323819cf5b - last_write_checksum: sha1:dfb9c108006fc3ac0f1d0bbe8e379792f90fac19 - pristine_git_object: 80f63987d3d41512b8a12f452aab41c97d2691b0 - src/mistralai/models/completiontrainingparameters.py: - id: 701db02d1d12 - last_write_checksum: sha1:bb6d3ca605c585e6281d85363e374923ed6ddd33 - pristine_git_object: 0200e81c35f05863eee7753e530d9c2290c56404 - src/mistralai/models/completiontrainingparametersin.py: - id: 0858706b6fc7 - last_write_checksum: sha1:0c8735e28dc6c27bf759a6bd93e8f1cf0919b382 - pristine_git_object: 1f74bb9da85bd721c8f11521b916ae986cd473eb - src/mistralai/models/contentchunk.py: - id: f753f1e60f3b - last_write_checksum: sha1:af68b3ca874420a034d7e116a67974da125d5a30 - pristine_git_object: 47170eefb0ed04399548d254896fa616b24ec258 - src/mistralai/models/conversationappendrequest.py: - id: ddbd85dab2db - last_write_checksum: sha1:c8ca45ad5b8340531a469e9847ee64f80c8db4c3 - pristine_git_object: 15cbc687396ee59eee742d65e490c354fdbf0688 - src/mistralai/models/conversationappendstreamrequest.py: - id: 7d9c85747963 - last_write_checksum: sha1:ada1cbcad5ce2dd6a6bc268b30f78dc69901ff6c - pristine_git_object: 8cecf89d3342be9a94066716863f4fa121b29012 - src/mistralai/models/conversationevents.py: - id: f543ca03cde2 - last_write_checksum: sha1:7e6ac7ea6f4e216071af7460133b6c7791f9ce65 - pristine_git_object: ba4c628c9de7fb85b1dcd5a47282f97df62a3730 - src/mistralai/models/conversationhistory.py: - id: ab4d51ae0094 - last_write_checksum: sha1:1d85aa48d019ce003e2d151477e0c5925bd619e7 - pristine_git_object: d5206a571e865e80981ebfcc99e65859b0dc1ad1 - src/mistralai/models/conversationinputs.py: - id: 50986036d205 + src/mistralai/client/models/completionchunk.py: + id: d786b44926f4 + last_write_checksum: sha1:e38d856ffefd3b72ff7034fa030ca0071caa0996 + pristine_git_object: 9790db6fe35e0043f3240c0f7e8172d36dee96f5 + src/mistralai/client/models/completiondetailedjobout.py: + id: 9bc38dcfbddf + last_write_checksum: sha1:df43d27716d99b6886a2b2a389e4c7b8c0b61630 + pristine_git_object: 85c0c803cf809338900b7b8dcde774d731b67f8f + src/mistralai/client/models/completionevent.py: + id: c68817e7e190 + last_write_checksum: sha1:c29f7e8a5b357e15606a01ad23e21341292b9c5e + pristine_git_object: 52db911eeb62ec7906b396d6936e3c7a0908bb76 + src/mistralai/client/models/completionftmodelout.py: + id: 0f5277833b3e + last_write_checksum: sha1:d125468e84529042a19e29d1c34aef70318ddf54 + pristine_git_object: ccecbb6a59f2994051708e66bce7ece3598a786f + src/mistralai/client/models/completionjobout.py: + id: 712e6c524f9a + last_write_checksum: sha1:4ca927d2eb17e2f2fe588fd22f6aaa32a4025b07 + pristine_git_object: ecd95bb9c93412b222659e6f369d3ff7e13c8bb2 + src/mistralai/client/models/completionresponsestreamchoice.py: + id: 5969a6bc07f3 + last_write_checksum: sha1:aa04c99a8bca998752b44fc3e2f2d5e24434a9bf + pristine_git_object: 1b8d6faccbe917aaf751b4efa676bf51c1dcd3ff + src/mistralai/client/models/completiontrainingparameters.py: + id: be202ea0d5a6 + last_write_checksum: sha1:fa4a0f44afeb3994c9273c5b4c9203eef810b957 + pristine_git_object: 36b285ab4f41209c71687a14c8650c0db52e165f + src/mistralai/client/models/completiontrainingparametersin.py: + id: 0df22b873b5f + last_write_checksum: sha1:109503fabafd24174c671f2caa0566af2d46800e + pristine_git_object: d0315d9984575cb6c02bc6e38cedde3deef77b9a + src/mistralai/client/models/contentchunk.py: + id: c007f5ee0325 + last_write_checksum: sha1:a319b67206f4d0132544607482e685b46e2dce8c + pristine_git_object: 0a25423f9f9a95ced75d817ad7712747ce0915ae + src/mistralai/client/models/conversationappendrequest.py: + id: 81ce529e0865 + last_write_checksum: sha1:4f38d4aa2b792b113ef34ce54df3ac9b2efca5e1 + pristine_git_object: 867c0a414c1340033af7f6d03ea8cef2dcb8ff4a + src/mistralai/client/models/conversationappendstreamrequest.py: + id: 27ada745e6ad + last_write_checksum: sha1:41dcb9467d562bcc8feb885a56f73ac8d013c2d8 + pristine_git_object: f51407bf2a363f705b0b61ed7be4ef6249525af5 + src/mistralai/client/models/conversationevents.py: + id: 8c8b08d853f6 + last_write_checksum: sha1:e0d920578ca14fa186b3efeee69ed03f7a2aa119 + pristine_git_object: 308588a1f094631935e4229f5538c5092f435d2c + src/mistralai/client/models/conversationhistory.py: + id: 60a51ff1682b + last_write_checksum: sha1:ed60e311224c3ada9c3768335394a5b338342433 + pristine_git_object: 40bd1e7220160f54b0ab938b3627c77fb4d4f9ef + src/mistralai/client/models/conversationinputs.py: + id: 711b769f2c40 last_write_checksum: sha1:3e8c4650808b8059c3a0e9b1db60136ba35942df pristine_git_object: 4d30cd76d14358e12c3d30c22e3c95078ecde4bd - src/mistralai/models/conversationmessages.py: - id: be3ced2d07e7 - last_write_checksum: sha1:410317f1b45f395faa66a9becd7bb2398511ba60 - pristine_git_object: 32ca9c20cb37ff65f7e9b126650a78a4b97e4b56 - src/mistralai/models/conversationrequest.py: - id: ceffcc288c2d - last_write_checksum: sha1:c4c62ef9cdf9bb08463bcb12919abd98ceb8d344 - pristine_git_object: 80581cc10a8e7555546e38c8b7068a2744eb552b - src/mistralai/models/conversationresponse.py: - id: 016ec02abd32 - last_write_checksum: sha1:37c3f143b83939b369fe8637932974d163da3c37 - pristine_git_object: ff318e35ee63e43c64e504301236327374442a16 - src/mistralai/models/conversationrestartrequest.py: - id: 2a8207f159f5 - last_write_checksum: sha1:93cd4370afe6a06b375e0e54ca09225e02fc42d3 - pristine_git_object: 6f21d01267481b8b47d4d37609ac131c34c10a9b - src/mistralai/models/conversationrestartstreamrequest.py: - id: d98d3e0c8eed - last_write_checksum: sha1:90f295ce27ba55d58899e06a29af223a464f5a4c - pristine_git_object: 2cec7958ab31378d480f0f93a5ed75ac8c624442 - src/mistralai/models/conversationstreamrequest.py: - id: f7051f125d44 - last_write_checksum: sha1:12bc85a14f110f5c8a3149540668bea178995fae - pristine_git_object: 1a481b77f706db7101521756c7c3476eaa1918c5 - src/mistralai/models/conversationusageinfo.py: - id: 922894aa994b - last_write_checksum: sha1:0e0039421d7291ecbbf820ea843031c50371dd9e - pristine_git_object: 9ae6f4fb6a7b4fd056c677c2152625de422b490a - src/mistralai/models/delete_model_v1_models_model_id_deleteop.py: - id: 409899d6ca23 - last_write_checksum: sha1:2d1e5b8947b56abba06363358973032e196c8139 - pristine_git_object: 4acb8d5373f25d7200378d0b8a767451978aa5a9 - src/mistralai/models/deletefileout.py: - id: d51d0de32738 - last_write_checksum: sha1:da9e95bb804820dea4977f65f62c08e491d9bb4b - pristine_git_object: 2b346ec4879c8811f824c7e6bde9fef922f37382 - src/mistralai/models/deletemodelout.py: - id: 8dcf3427f17b - last_write_checksum: sha1:8243b0bcf735a67d4cffb254fe9de95f130a0d8a - pristine_git_object: c1b1effcbe3b093f7dede49684cf88aa0a9b27a7 - src/mistralai/models/deltamessage.py: - id: 43ee8a48546e - last_write_checksum: sha1:8bc50b7943d5ae4725eb57b7ca21a4c1217e4c0d - pristine_git_object: 88aefe7f652296c02377714586d38b8e318a419d - src/mistralai/models/documentlibrarytool.py: - id: 24c1c0293181 - last_write_checksum: sha1:7ec74875595149f433ee1b8a95d8183aa1cf8738 - pristine_git_object: 8d4c122b0412682a792c754a06e10809bfd8c25c - src/mistralai/models/documentout.py: - id: 205cb7721dfa - last_write_checksum: sha1:9316ed725bd9d7a2ef1f4e856f61def684442bd7 - pristine_git_object: 81d9605f38e40a703911fefc15731ec102c74ccb - src/mistralai/models/documenttextcontent.py: - id: 685680d8640b - last_write_checksum: sha1:dafce4998fa5964ac6833e71f7cb4f23455c14e6 - pristine_git_object: c02528c2052d535f7c815fb1165df451d49fef79 - src/mistralai/models/documentupdatein.py: - id: 6d69a91f40bd - last_write_checksum: sha1:dcbc51f1a1192bb99732405420e57fedb32dd1de - pristine_git_object: bd89ff4793e4fd78a4bae1c9f5aad716011ecbfd - src/mistralai/models/documenturlchunk.py: - id: 34a86f25f54f - last_write_checksum: sha1:1496b3d587fd2c5dc1c3f18de1ac59a29c324849 - pristine_git_object: 6d0b1dc6c9f6ebca8638e0c8991a9aa6df2b7e48 - src/mistralai/models/embeddingdtype.py: - id: bca8ae3779ed + src/mistralai/client/models/conversationmessages.py: + id: 011c39501c26 + last_write_checksum: sha1:f71e85febab797d5c17b58ef8a1318545c974ed2 + pristine_git_object: 1ea05369b95fdaa7d7ae75398669f88826e5bb26 + src/mistralai/client/models/conversationrequest.py: + id: 58e3ae67f149 + last_write_checksum: sha1:20339231abbf60fb160f2dc24941860304c702fd + pristine_git_object: e3211c4c7b20c162473e619fad6dc0c6cea6b571 + src/mistralai/client/models/conversationresponse.py: + id: ad7a8472c7bf + last_write_checksum: sha1:50fdea156c2f2ce3116d41034094c071a3e136fa + pristine_git_object: 32d0f28f101f51a3ca79e4d57f4913b1c420b189 + src/mistralai/client/models/conversationrestartrequest.py: + id: 681d90d50514 + last_write_checksum: sha1:76c5393b280e263a38119d98bdcac917afe36881 + pristine_git_object: aa2bf7b0dcdf5e343a47787c4acd00fe3f8bd405 + src/mistralai/client/models/conversationrestartstreamrequest.py: + id: 521c2b5bfb2b + last_write_checksum: sha1:5ba78bf9048b1e954c45242f1843eb310b306a94 + pristine_git_object: 689815ebcfe577a1698938c9ccbf100b5d7995f8 + src/mistralai/client/models/conversationstreamrequest.py: + id: 58d633507527 + last_write_checksum: sha1:9cb79120c78867e12825ac4d504aa55ee5827168 + pristine_git_object: 219230a2a8dd7d42cc7f5613ca22cec5fa872750 + src/mistralai/client/models/conversationusageinfo.py: + id: 6685e3b50b50 + last_write_checksum: sha1:7fa37776d7f7da6b3a7874c6f398d6f607c01b52 + pristine_git_object: 7a818c89a102fe88eebc8fec78a0e195e26cf85d + src/mistralai/client/models/delete_model_v1_models_model_id_deleteop.py: + id: 767aba526e43 + last_write_checksum: sha1:9a8f9917fc5de154e8a6fdb44a1dd7db55bb1de5 + pristine_git_object: 1cd36128a231a6d4be328fde53d1f048ff7c2ccd + src/mistralai/client/models/deletefileout.py: + id: 5578701e7327 + last_write_checksum: sha1:76d209f8b3bba5e4bc984700fe3d8981c9d6142b + pristine_git_object: b25538bee35dedaae221ea064defb576339402c8 + src/mistralai/client/models/deletemodelout.py: + id: ef6a1671c739 + last_write_checksum: sha1:ef2f6774eaf33c1c78368cd92bc4108ecccd9a6c + pristine_git_object: 5aa8b68fe3680d3b51127d6a6b6068b1303756e8 + src/mistralai/client/models/deltamessage.py: + id: 68f53d67a140 + last_write_checksum: sha1:52296fa6d7fc3788b64dcb47aadd0818bcb86e11 + pristine_git_object: 0ae56da86f645e5a0db2a0aa4579342610243300 + src/mistralai/client/models/documentlibrarytool.py: + id: 3eb3c218f457 + last_write_checksum: sha1:af01ec63a1c5eb7c332b82b3ec1d3553891614c2 + pristine_git_object: 861a58d38125ca5af11772ebde39a7c57c39ad9c + src/mistralai/client/models/documentout.py: + id: 7a85b9dca506 + last_write_checksum: sha1:2de0e0f9be3a2362fbd7a49ff664b43e4c29a262 + pristine_git_object: 39d0aa2a5a77d3eb3349ae5e7b02271c2584fe56 + src/mistralai/client/models/documenttextcontent.py: + id: e730005e44cb + last_write_checksum: sha1:ad7e836b5f885d703fd5f09c09aba0628d77e05b + pristine_git_object: b1c1aa073dff4dcdc59d070058221b67ce9e36f9 + src/mistralai/client/models/documentupdatein.py: + id: d19c1b26a875 + last_write_checksum: sha1:bad1cee0906961f555784e03c23f345194959077 + pristine_git_object: 02022b89ef2b87349e0d1dc4cccc3d1908a2d1aa + src/mistralai/client/models/documenturlchunk.py: + id: 4309807f6048 + last_write_checksum: sha1:1253bdbe1233481622b76e340413ffb1d8996f0e + pristine_git_object: 00eb55357f19ac4534446e0ee761bdbccfb471e2 + src/mistralai/client/models/embeddingdtype.py: + id: 77f9526a78df last_write_checksum: sha1:962f629fa4ee8a36e731d33f8f730d5741a9e772 pristine_git_object: 26eee779e12ae8114a90d3f18f99f3dd50e46b9e - src/mistralai/models/embeddingrequest.py: - id: ccb2b16068c8 - last_write_checksum: sha1:bf7877e386362d6187ffb284a1ceee1dea4cc5b7 - pristine_git_object: 44797bfad1b76ba809fab3791bffa2c78791e27b - src/mistralai/models/embeddingresponse.py: - id: c38279b9f663 - last_write_checksum: sha1:369740f705b08fede21edc04adf86505e55c9b76 - pristine_git_object: aae6fa60e131d4378bc631576b18f4d8a47f2770 - src/mistralai/models/embeddingresponsedata.py: - id: b73c5696eb71 - last_write_checksum: sha1:9709503bdde0a61603237fe6e84c410467e7e9f4 - pristine_git_object: 01e2765fb206b0ee36dfeb51cf3066613c74ac13 - src/mistralai/models/encodingformat.py: - id: 9f4fad7d5a9e + src/mistralai/client/models/embeddingrequest.py: + id: eadbe3f9040c + last_write_checksum: sha1:c4f85f5b768afb0e01c9a9519b58286804cfbd6b + pristine_git_object: 1dfe97c8fa2162719d2a68e7a0ef2f348efa1f88 + src/mistralai/client/models/embeddingresponse.py: + id: f7d790e84b65 + last_write_checksum: sha1:285531abf3a45de3193ed3c8b07818faac97eb32 + pristine_git_object: 64a28ea9f1c57ed6e69e1d49c5c83f63fa38fd36 + src/mistralai/client/models/embeddingresponsedata.py: + id: 6d6ead6f3803 + last_write_checksum: sha1:ed821591832ebfa03acd0ce0a3ca5a0521e6fa53 + pristine_git_object: ebd0bf7b29e0a1aee442337fd02ce562fb2c5a3d + src/mistralai/client/models/encodingformat.py: + id: b51ec296cc92 last_write_checksum: sha1:f9a3568cd008edb02f475a860e5849d9a40d0246 pristine_git_object: be6c1a14e4680e24e70b8bbda018759056b784ca - src/mistralai/models/entitytype.py: - id: 4d056950d537 - last_write_checksum: sha1:7087fb7ad2886188380cd692997b2850c950a6b8 - pristine_git_object: 8d2d4bbe837da3e21988548e09710ab629d1aacd - src/mistralai/models/eventout.py: - id: 2601c7113273 - last_write_checksum: sha1:93ba178c3f6459dbc638e49c3eddcc188c7ff5d0 - pristine_git_object: 3281903429b154eb095a7c41b1751cfef97e497d - src/mistralai/models/file.py: - id: 7c1aa0c610c0 - last_write_checksum: sha1:3735ec925554b397e36fd2322062f555fbcde270 - pristine_git_object: 682d7f6e24b736dabd0566ab1b45b20dae5ea019 - src/mistralai/models/filechunk.py: - id: ea6a1ad435e8 - last_write_checksum: sha1:56d91860c1c91c40662313ea6f156db886bb55b6 - pristine_git_object: 83e60cef29045ced5ae48b68481bce3317690b8e - src/mistralai/models/filepurpose.py: - id: 3928b3171a09 - last_write_checksum: sha1:2ffb9fd99624b7b9997f826526045a9a956fde14 - pristine_git_object: b109b35017d5aa086ac964d78163f41e64277874 - src/mistralai/models/files_api_routes_delete_fileop.py: - id: fa02d4d126c7 - last_write_checksum: sha1:c96b106d6496087673f6d1b914e748c49ec13755 - pristine_git_object: a84a7a8eee4b6895bb2e835f82376126b3e423ec - src/mistralai/models/files_api_routes_download_fileop.py: - id: 1dc2e2823a00 - last_write_checksum: sha1:6001bcf871ab76635abcb3f081b029c8154a191e - pristine_git_object: 168a7fa6701578b77876fe0bddeb1003d06f33b7 - src/mistralai/models/files_api_routes_get_signed_urlop.py: - id: 628ed2f82ce4 - last_write_checksum: sha1:c970025b1e453ad67298d12611542abb46ded54d - pristine_git_object: 708d40ab993f93227b9795c745383ab954c1c89c - src/mistralai/models/files_api_routes_list_filesop.py: - id: 865dd74c577c - last_write_checksum: sha1:d75afa1ee7e34cbcfb8da78e3b5c9384b684b89b - pristine_git_object: 84d61b9b4d7032a60e3055b683a396e53b625274 - src/mistralai/models/files_api_routes_retrieve_fileop.py: - id: d821f72ee198 - last_write_checksum: sha1:d0d07123fd941bb99a00a36e87bc7ab4c21506a6 - pristine_git_object: 0c2a95ef590f179fe60a19340e34adb230dd8901 - src/mistralai/models/files_api_routes_upload_fileop.py: - id: ccca25a2fe91 - last_write_checksum: sha1:64b1d3c3fe9323d40096798760c546dc1c30a57d - pristine_git_object: aeefe842b327c89c0a78ba3d6e4a1ccb8d4a25fe - src/mistralai/models/fileschema.py: - id: 8a02ff440be5 - last_write_checksum: sha1:55120d1d9322e9381d92f33b23597f5ed0e20e4c - pristine_git_object: 9a88f1bbdf34ffb619794be9c041635ff333e489 - src/mistralai/models/filesignedurl.py: - id: 6fe55959eedd - last_write_checksum: sha1:afbe1cdfbdf2f760fc996a5065c70fa271a35885 - pristine_git_object: 092be7f8090272bdebfea6cbda7b87d9877d59e8 - src/mistralai/models/fimcompletionrequest.py: - id: a54284b7041a - last_write_checksum: sha1:7e477e032b3a48fe08610dd5dc50dee0948950e9 - pristine_git_object: 801a358b02441b7537f4bae64e93b4308c720040 - src/mistralai/models/fimcompletionresponse.py: - id: 15f25c04c5dd - last_write_checksum: sha1:b7787a7dc82b31ed851a52ae2f0828cc8746d61e - pristine_git_object: f27972b9e6e2f9dc7837be7278fda4910755f1f4 - src/mistralai/models/fimcompletionstreamrequest.py: - id: ba6b92828dc7 - last_write_checksum: sha1:a8f2c6cbd5a41ad85b7d0faced90d8f05b29f646 - pristine_git_object: 2e8e6db2a21a86ffd7cc61f92fed5c55f19e2e50 - src/mistralai/models/finetuneablemodeltype.py: - id: cbd439e85b18 + src/mistralai/client/models/entitytype.py: + id: 62d6a6a13288 + last_write_checksum: sha1:baefd3e820f1682bbd75ab195d1a47ccb3d16a19 + pristine_git_object: 9c16f4a1c0e61f8ffaee790de181572891db3f89 + src/mistralai/client/models/eventout.py: + id: da8ad645a9cb + last_write_checksum: sha1:326b575403d313c1739077ad6eb9047ded15a6f5 + pristine_git_object: 5e118d4599e935bcd6196a7cbc1baae8f4a82752 + src/mistralai/client/models/file.py: + id: f972c39edfcf + last_write_checksum: sha1:40ddf9b7e6d3e9a77899cd9d32a9ac921c531c87 + pristine_git_object: a8bbc6fab46a49e7171cabbef143a9bbb48e763c + src/mistralai/client/models/filechunk.py: + id: ff3c2d33ab1e + last_write_checksum: sha1:9ae8d68bfcb6695cce828af08e1c9a9ce779f1f3 + pristine_git_object: d8b96f69285ea967397813ae53722ca38e8d6443 + src/mistralai/client/models/filepurpose.py: + id: a11e7f9f2d45 + last_write_checksum: sha1:154a721dbd5e0c951757a596a96e5d880ecf4982 + pristine_git_object: eef1b08999956fd45fe23f2c03bb24546207b4e3 + src/mistralai/client/models/files_api_routes_delete_fileop.py: + id: 2f385cc6138f + last_write_checksum: sha1:e7b7ad30a08b1033ecd5433da694f69a91029bfc + pristine_git_object: b71748669906990998cc79345f789ed50865e110 + src/mistralai/client/models/files_api_routes_download_fileop.py: + id: 8184ee3577c3 + last_write_checksum: sha1:7781932cc271d47a2965217184e1dd35a187de3f + pristine_git_object: fa9e491a95625dbedde33bc9ea344aaebf992902 + src/mistralai/client/models/files_api_routes_get_signed_urlop.py: + id: 0a1a18c6431e + last_write_checksum: sha1:797201cde755cf8e349b71dc2ff7ce56d1eabb73 + pristine_git_object: a05f826232396957a3f65cb1c38c2ae13944d43b + src/mistralai/client/models/files_api_routes_list_filesop.py: + id: b2e92f2a29b4 + last_write_checksum: sha1:711cc470b8dedefd2c2c7e2ae7dfa6c4601e0f30 + pristine_git_object: ace996318446667b2da3ca2d37bd2b25bcfbb7a7 + src/mistralai/client/models/files_api_routes_retrieve_fileop.py: + id: 5d5dbb8d5f7a + last_write_checksum: sha1:ea34337ee17bdb99ad89c0c6742fb80cb0b67c13 + pristine_git_object: 4a9678e5aa7405cbe09f59ffbdb6c7927396f06a + src/mistralai/client/models/files_api_routes_upload_fileop.py: + id: f13b84de6fa7 + last_write_checksum: sha1:3dc679de7b41abb4b0710ade631e818621b6f3bc + pristine_git_object: 723c6cc264613b3670ac999829e66131b8424849 + src/mistralai/client/models/fileschema.py: + id: 19cde41ca32a + last_write_checksum: sha1:29fe7d4321fc2b20ae5fa349f30492aeb155c329 + pristine_git_object: 9ecde454f0dac17997ef75e5cdb850cccc8020fe + src/mistralai/client/models/filesignedurl.py: + id: a1754c725163 + last_write_checksum: sha1:0987cc364694efd61c62ba15a57cfb74aa0d0cc8 + pristine_git_object: cbca9847568ab7871d05b6bb416f230d3c9cddfc + src/mistralai/client/models/fimcompletionrequest.py: + id: cf3558adc3ab + last_write_checksum: sha1:a62845c9f60c8d4df4bfaa12e4edbb39dcc5dcb7 + pristine_git_object: c9eca0af3ccacfd815bfb8b11768e289b4828f4e + src/mistralai/client/models/fimcompletionresponse.py: + id: b860d2ba771e + last_write_checksum: sha1:00b5b7146932f412f8230da7164e5157d267a817 + pristine_git_object: 8a2eda0ced48f382b79e5c6d7b64b0c5f0b16c15 + src/mistralai/client/models/fimcompletionstreamrequest.py: + id: 1d1ee09f1913 + last_write_checksum: sha1:9260ae9a12c37b23d7dfa8ec6d3029d1d8a133ed + pristine_git_object: 2954380238dec5540e321012b8aa6609e404114c + src/mistralai/client/models/finetuneablemodeltype.py: + id: 05e097395df3 last_write_checksum: sha1:8694f016e8a4758308225b92b57bee162accf9d7 pristine_git_object: f5b8b2ed45b56d25b387da44c398ae79f3a52c73 - src/mistralai/models/ftclassifierlossfunction.py: - id: 95255316968d + src/mistralai/client/models/ftclassifierlossfunction.py: + id: d21e2a36ab1f last_write_checksum: sha1:69e08ab728e095b8e3846ed2dc142aa1e82a864a pristine_git_object: c4ef66e0fe69edace4912f2708f69a6e606c0654 - src/mistralai/models/ftmodelcapabilitiesout.py: - id: 1bc9230e1852 - last_write_checksum: sha1:c841f76ba219c82e3324b69ad8eba4abd522d0b9 - pristine_git_object: 7f3aa18b982c11fb6463e96333250b632dd195c8 - src/mistralai/models/ftmodelcard.py: - id: 4f25bcf18e86 - last_write_checksum: sha1:f1d80e6aa664e63b4a23a6365465d42415fc4bbb - pristine_git_object: 1c3bd04da0cc2bc86bec97d7890ad6594879b334 - src/mistralai/models/function.py: - id: 66b7b7ab8fc4 - last_write_checksum: sha1:5da05a98ca5a68c175bd212dd41127ef98013da6 - pristine_git_object: 7d40cf758ffbb3b6b4e62b50274829bd1c809a9c - src/mistralai/models/functioncall.py: - id: 5e03760bb753 - last_write_checksum: sha1:20d2a8196b6ccaffe490b188b1482a309b2dce79 - pristine_git_object: 0cce622a4835fcbd9425928b115a707848c65f54 - src/mistralai/models/functioncallentry.py: - id: 1d5c6cef6e92 - last_write_checksum: sha1:f357b1fde226c52c0dc2b105df66aeb6d17ab1bf - pristine_git_object: 4ea62c4ffc671b20d35cd967f3da0f1a34c92e2e - src/mistralai/models/functioncallentryarguments.py: - id: bd63a10181da + src/mistralai/client/models/ftmodelcapabilitiesout.py: + id: f70517be97d4 + last_write_checksum: sha1:44260fefae93bc44a099ff64eeae7657c489005c + pristine_git_object: be31aa3c14fb8fe9154ad8f54e9bf43f586951c7 + src/mistralai/client/models/ftmodelcard.py: + id: c4f15eed2ca2 + last_write_checksum: sha1:a6a71ce4a89688cb4780697e299a4274f7323e24 + pristine_git_object: 36cb723df8bcde355e19a55105932298a8e2e33a + src/mistralai/client/models/function.py: + id: 32275a9d8fee + last_write_checksum: sha1:f98db69c2fb49bbd6cff36fb4a25e348db6cd660 + pristine_git_object: 6e2b52edbd8d7cb6f7654eb76b7ca920636349cf + src/mistralai/client/models/functioncall.py: + id: 393fca552632 + last_write_checksum: sha1:ef22d048ddb5390f370fcf3405f4d46fa82ed574 + pristine_git_object: 6cb6f26e6c69bc134bcb45f53156e15e362b8a63 + src/mistralai/client/models/functioncallentry.py: + id: cd058446c0aa + last_write_checksum: sha1:661372b1ff4505cf7039ece11f12bb1866688bed + pristine_git_object: fce4d387df89a9fa484b0c7cc57556ea13278469 + src/mistralai/client/models/functioncallentryarguments.py: + id: 3df3767a7b93 last_write_checksum: sha1:6beb9aca5bfc2719f357f47a5627c9edccef051f pristine_git_object: ac9e6227647b28bfd135c35bd32ca792d8dd414b - src/mistralai/models/functioncallevent.py: - id: 868025c914c8 - last_write_checksum: sha1:4eb5b07218c9ab923cbe689e3de116d14281a422 - pristine_git_object: e3992cf173907a485ced9ec12323a680613e9e6a - src/mistralai/models/functionname.py: - id: 46a9b195fef5 - last_write_checksum: sha1:2219be87b06033dad9933b2f4efd99a4758179f1 - pristine_git_object: 0a6c0b1411b6f9194453c9fe22d52d035eb80c4f - src/mistralai/models/functionresultentry.py: - id: d617bbe28e36 - last_write_checksum: sha1:a781805577eb871b4595bae235c1d25e2e483fdc - pristine_git_object: 1c61395a82830dc689f2e011b9e6c86eba58cda3 - src/mistralai/models/functiontool.py: - id: e1b3d619ef0b - last_write_checksum: sha1:31e375a2222079e9e70459c55ff27a8b3add869d - pristine_git_object: 009fe28008a166d551566378e3c2730963aca591 - src/mistralai/models/githubrepositoryin.py: - id: e7f21180a768 - last_write_checksum: sha1:b4f630e15057e4ff8bfc5fb7ba2f0085a76c5f06 - pristine_git_object: b16ce0d2898b000f08e3d960a3411941a2324473 - src/mistralai/models/githubrepositoryout.py: - id: a3e494bbd813 - last_write_checksum: sha1:00a9bc4d6308cd960077fb639b1778723a71f583 - pristine_git_object: 372477c106a37b1b9d5cec02751c63fb08abcf53 - src/mistralai/models/httpvalidationerror.py: - id: 224ee4b3f0f0 - last_write_checksum: sha1:3f8d51b670993863fcd17421d1ace72e8621fd51 - pristine_git_object: d467577af04921f5d9bfa906ae6f4e06055a8785 - src/mistralai/models/imagegenerationtool.py: - id: 63bbe395acb2 - last_write_checksum: sha1:404e9cbabada212b87cc2e0b8799a18ff1cecf95 - pristine_git_object: a92335dbd2d0d03be5c2df4132df1cc26eaf38dd - src/mistralai/models/imageurl.py: - id: 20116779b5a0 - last_write_checksum: sha1:2d6090577370f5eb2e364029a11bb61bd86ef226 - pristine_git_object: 6f077b69019fbc598ddc402ba991c83f8a047632 - src/mistralai/models/imageurlchunk.py: - id: 0a6e87c96993 - last_write_checksum: sha1:0b7e4c0d5129698b1b01608eb59b27513f6a9818 - pristine_git_object: 8e8aac4238381527d9156fcb72288b28a82f9689 - src/mistralai/models/inputentries.py: - id: cbf378d5b92a + src/mistralai/client/models/functioncallevent.py: + id: 23b120b8f122 + last_write_checksum: sha1:535874a4593ce1f40f9683fa85159e4c4274f3ee + pristine_git_object: 4e040585285985cebc7e26ac402b6df8f4c063bb + src/mistralai/client/models/functionname.py: + id: 000acafdb0c0 + last_write_checksum: sha1:03d7b26a37311602ae52a3f6467fe2c306c468c1 + pristine_git_object: 2a05c1de42a6ff5775af5509c106eaa7b391778e + src/mistralai/client/models/functionresultentry.py: + id: 213df39bd5e6 + last_write_checksum: sha1:7e6d951cfd333f9677f4c651054f32658794cc48 + pristine_git_object: a843bf9bdd82b5cf3907e2172ed793a391c5cba2 + src/mistralai/client/models/functiontool.py: + id: 2e9ef5800117 + last_write_checksum: sha1:8ab806567a2ab6c2e04cb4ce394cbff2ae7aad50 + pristine_git_object: 74b50d1bcd2bc0af658bf5293c8cc7f328644fa1 + src/mistralai/client/models/githubrepositoryin.py: + id: eef26fbd2876 + last_write_checksum: sha1:3b64fb4f34e748ef71fa92241ecdd1c73aa9485a + pristine_git_object: e56fef9ba187792238991cc9373a7d2ccf0b8c0d + src/mistralai/client/models/githubrepositoryout.py: + id: d2434a167623 + last_write_checksum: sha1:d2be5c474d3a789491cad50b95e3f25933b0c66a + pristine_git_object: e3aa9ebc52e8613b15e3ff92a03593e2169dc935 + src/mistralai/client/models/httpvalidationerror.py: + id: 4099f568a6f8 + last_write_checksum: sha1:81432fd45c6faac14a6b48c6d7c85bbc908b175c + pristine_git_object: 34d9b54307db818e51118bc448032e0476688a35 + src/mistralai/client/models/imagegenerationtool.py: + id: e1532275faa0 + last_write_checksum: sha1:7eaea320c1b602df2e761405644361820ca57d33 + pristine_git_object: e09dba81314da940b2be64164e9b02d51e72f7b4 + src/mistralai/client/models/imageurl.py: + id: e4bbf5881fbf + last_write_checksum: sha1:d300e69742936f6e6583f580091827ada7da6c20 + pristine_git_object: 6e61d1ae2ec745774345c36e605748cf7733687b + src/mistralai/client/models/imageurlchunk.py: + id: 746fde62f637 + last_write_checksum: sha1:2311445f8c12347eab646f1b9ff7c4202642c907 + pristine_git_object: f967a3c8ced6d5fb4b274454100134e41c5b7a5c + src/mistralai/client/models/inputentries.py: + id: 44727997dacb last_write_checksum: sha1:afc03830974af11516c0b997f1cd181218ee4fb0 pristine_git_object: 8ae29837a6c090fbe1998562684d3a372a9bdc31 - src/mistralai/models/inputs.py: - id: a53031bc9cb6 - last_write_checksum: sha1:94290a72cb6cfa40813bc79a66a463978ae9ae1c - pristine_git_object: 34d20f3428a5d994c4a199c411dc8097b3c259d7 - src/mistralai/models/instructrequest.py: - id: d23d1da148c8 - last_write_checksum: sha1:2c4f4babc9944f90bc725bb0c460c8de85b3d75e - pristine_git_object: dddbda00a418629462e3df12a61a6b1c56c1d2bd - src/mistralai/models/jobin.py: - id: 42f6df34c72e - last_write_checksum: sha1:e5a78c9a2cd48fb1d7d062ec2f8d54f8d3ac493e - pristine_git_object: aa0cd06c704902919f672e263e969630df783ef6 - src/mistralai/models/jobmetadataout.py: - id: eaa2e54e2e2b - last_write_checksum: sha1:90afd144e2f9ec77c3be2694db1d96e4bc23fecb - pristine_git_object: 10ef781ebbba4c5eaab6f40f5d5f9f828944c983 - src/mistralai/models/jobs_api_routes_batch_cancel_batch_jobop.py: - id: 5d3a14d60da7 - last_write_checksum: sha1:4925f408587e91581c0181baf9acd1dcb5a50768 - pristine_git_object: 5b83d534d7efd25c0bc47406c79dfd59e22ec1d6 - src/mistralai/models/jobs_api_routes_batch_get_batch_jobop.py: - id: 74c718778882 - last_write_checksum: sha1:92a89c2d0384b2251636a61113310c84da0001bf - pristine_git_object: 9bfaf9c5230e4a1cc0187faeedc78ebcaaf38b98 - src/mistralai/models/jobs_api_routes_batch_get_batch_jobsop.py: - id: 072c77cfbaa5 - last_write_checksum: sha1:f890bc21fa71e33a930d48cdbf18fd503419406c - pristine_git_object: c48246d54c696bd85fbe67348d5eef1a2a1944db - src/mistralai/models/jobs_api_routes_fine_tuning_archive_fine_tuned_modelop.py: - id: db002a822be0 - last_write_checksum: sha1:3a1019f200193556df61cbe3786b03c2dbab431f - pristine_git_object: d728efd175f1df6b59b74d0b2fa602c0e0199897 - src/mistralai/models/jobs_api_routes_fine_tuning_cancel_fine_tuning_jobop.py: - id: ad69f51c764d - last_write_checksum: sha1:c84477987738a389ddf88546060263ecfb46506a - pristine_git_object: ceb19a69131958a2de6c3e678c40a1ca5d35fd73 - src/mistralai/models/jobs_api_routes_fine_tuning_create_fine_tuning_jobop.py: - id: a5c2c6e89b85 - last_write_checksum: sha1:dfb755d386e7c93540f42392f18efae7f61c4625 - pristine_git_object: 39af3ea6fab66941faf7718d616ff2a386e8219b - src/mistralai/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobop.py: - id: 221ec5d0482f - last_write_checksum: sha1:f2ce2c6a8924deda372d749ea2a09a2526b8da44 - pristine_git_object: be99dd2d329f5921513ba3ad6e5c5a9807d1a363 - src/mistralai/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobsop.py: - id: bd0fd94f34fc - last_write_checksum: sha1:48390cf76ffc1d712e33bd0bcece8dea956e75cb - pristine_git_object: 9aec8eb25c54e8fecedd9dd9e823ccf32c1a36b8 - src/mistralai/models/jobs_api_routes_fine_tuning_start_fine_tuning_jobop.py: - id: cba224459ae6 - last_write_checksum: sha1:238eeb9b7f48ff4e3262cc0cc5e55d96fe565073 - pristine_git_object: 8103b67b55eab0f9197cd9fb421e6ea4ca10e76e - src/mistralai/models/jobs_api_routes_fine_tuning_unarchive_fine_tuned_modelop.py: - id: ecc5a3420980 - last_write_checksum: sha1:8e026bc610fead1e55886c741f6b38817bb6b2ff - pristine_git_object: a84274ff5b2c45f2adc2c0234db090c498decc51 - src/mistralai/models/jobs_api_routes_fine_tuning_update_fine_tuned_modelop.py: - id: 3e8d8e70d526 - last_write_checksum: sha1:a5538fcb4248fd83749dc303f9585d7354ff8b92 - pristine_git_object: a10528ca0f7056ef82e0aeae8f4262c65e47791d - src/mistralai/models/jobsout.py: - id: bb1000b03e73 - last_write_checksum: sha1:d06d7b33e5630d45795efc2a8443ae3070866b07 - pristine_git_object: 680b1d582bc8fbce17a381be8364333dd87ce333 - src/mistralai/models/jsonschema.py: - id: 4bcf195c31bb - last_write_checksum: sha1:a0d2b72f809e321fc8abf740e57ec39a384c09d4 - pristine_git_object: e2b6a45e5e5e68b6f562dc39519ab12ffca50322 - src/mistralai/models/legacyjobmetadataout.py: - id: 172ade2efb26 - last_write_checksum: sha1:bf608218a88f7e59cd6c9d0958940b68a200ba0d - pristine_git_object: 499512197a9f9600ac9f7cee43f024dde67fd775 - src/mistralai/models/libraries_delete_v1op.py: - id: ef50051027ec - last_write_checksum: sha1:2a9632da75355679918714a68b96e3ddf88fa5d3 - pristine_git_object: 56f8f8a8706b7aac67cf9b156a2e8710a4fdef36 - src/mistralai/models/libraries_documents_delete_v1op.py: - id: e18557420efe - last_write_checksum: sha1:6904ea388795a0b5f523959c979cf9b3a2c3ef4e - pristine_git_object: c33710b0e29664594891055c36199ea4846516dc - src/mistralai/models/libraries_documents_get_extracted_text_signed_url_v1op.py: - id: c8df3283cb98 - last_write_checksum: sha1:fefde9e22a010f900bd9012a2d438f909d54815f - pristine_git_object: e2459c1c68c81eb67983ac76de23dd8609420291 - src/mistralai/models/libraries_documents_get_signed_url_v1op.py: - id: 279ac5d9f945 - last_write_checksum: sha1:8ee5b6386f98d2af619f070e83e1f3772c07e199 - pristine_git_object: bc913ba56bd98d9937ddd5516837b5a8ead10454 - src/mistralai/models/libraries_documents_get_status_v1op.py: - id: ded8f142264f - last_write_checksum: sha1:ac1f85ecb74ef43e6e831794badbbd57e99f7028 - pristine_git_object: 08992d7c9ee5ba85ef97971fa6e06af465e39fa9 - src/mistralai/models/libraries_documents_get_text_content_v1op.py: - id: 497b693d0ba6 - last_write_checksum: sha1:11eeb61bab8b745ba22f2087393ba0cf91b76180 - pristine_git_object: 21a131ad6448597a996f7d96723f6bc8cf12ddf0 - src/mistralai/models/libraries_documents_get_v1op.py: - id: 7b1e6957ca40 - last_write_checksum: sha1:a3e3d1dee18ee2900417db836b1f8b49a14e0501 - pristine_git_object: ff2bdedbcaa8cf4c8e31091ed529274bf5d3ec04 - src/mistralai/models/libraries_documents_list_v1op.py: - id: d5cc573ae1a0 - last_write_checksum: sha1:43b6af0f23ff88d6e13f48acf12baa01a03eb243 - pristine_git_object: e6ff29cf4edb7b269cd66c5299b7531b13973dd2 - src/mistralai/models/libraries_documents_reprocess_v1op.py: - id: 3e832394e71b - last_write_checksum: sha1:36ced698b57573338eb95f5d70983ba4b9dcb0e0 - pristine_git_object: 861993e7e0fd06576e878758a44029613d381a4c - src/mistralai/models/libraries_documents_update_v1op.py: - id: 902a2c649e04 - last_write_checksum: sha1:c8ba64250a66dbdd9ac409ffeccb6bb75ba619c2 - pristine_git_object: 5551d5eec7961a5cc0fa9018ba680304e1f99d57 - src/mistralai/models/libraries_documents_upload_v1op.py: - id: a4586d35c41c - last_write_checksum: sha1:83c40a6b1a790d292c72c90847926d458ea73d83 - pristine_git_object: 51f536cca6141b0243d3c3fff8da3224a0c51ea5 - src/mistralai/models/libraries_get_v1op.py: - id: ed8ae2dc35b4 - last_write_checksum: sha1:c9dc682319790ec77c3827b44e3e8937de0de17f - pristine_git_object: b87090f6bb56c7f7d019483c0e979f9f2fdc3378 - src/mistralai/models/libraries_share_create_v1op.py: - id: 6a5d94d8a3dc - last_write_checksum: sha1:312ec2ea1635e86da293a0f402498031591c9854 - pristine_git_object: a8b0e35db9a452a62dbc0893009a9708684d2a23 - src/mistralai/models/libraries_share_delete_v1op.py: - id: 474f847642a7 - last_write_checksum: sha1:557000669df73a160d83bcaaf456579890fa7f92 - pristine_git_object: e29d556a73a87a6f799948f05517a50545dfd79e - src/mistralai/models/libraries_share_list_v1op.py: - id: 5ccdc4491119 - last_write_checksum: sha1:c3ca37074f14aad02a9d01099fe7134204d5520e - pristine_git_object: b276d756e95e9e7dc53cd7ff5da857052c055046 - src/mistralai/models/libraries_update_v1op.py: - id: 6de043d02383 - last_write_checksum: sha1:0936d1273af7659d7283c1defc2094178bc58003 - pristine_git_object: c93895d97f165d4fa4cc33097f6b772b55337623 - src/mistralai/models/libraryin.py: - id: 0277ef6b7a58 - last_write_checksum: sha1:56e033aef199fd831da7efff829c266206134f99 - pristine_git_object: 872d494d66abde55130a6d2a6c30de950f51232c - src/mistralai/models/libraryinupdate.py: - id: 96904d836434 - last_write_checksum: sha1:50c13a51aee5fc6c562090dad803ca6b3a1a5bed - pristine_git_object: 6e8ab81acae479e5fb999c91bfc55f6e1cbee5cc - src/mistralai/models/libraryout.py: - id: e483109c6e21 - last_write_checksum: sha1:6394431205bd4c308de4ee600e839ac0c6624fc0 - pristine_git_object: d3bc36f94735fbabb23d6c19ff481e404227f548 - src/mistralai/models/listdocumentout.py: - id: 872891f10a41 - last_write_checksum: sha1:61f444f7318e20921ddda1efd1e63e9bbec1d93d - pristine_git_object: 9d39e0873f463cce5fca723a3c85f47cf0f6ddeb - src/mistralai/models/listfilesout.py: - id: 43a961a42ca8 - last_write_checksum: sha1:d3e0d056a8337adaffced63e2ed5b4b37a60927d - pristine_git_object: 2f82b37db7f3cb69d68ab097f9f75488939f66c8 - src/mistralai/models/listlibraryout.py: - id: dcd1a940efe5 - last_write_checksum: sha1:7dc2876bf50861c8e94079859725cadf2d7b14c4 - pristine_git_object: 1e647fe1db65421d73ba6e0f35cc580e99ea7212 - src/mistralai/models/listsharingout.py: - id: c04e23806a57 - last_write_checksum: sha1:efd9e780445bdcf4a4e7794cd1aedaa85067f904 - pristine_git_object: 38c0dbe0ab9aeb3c977e38f2bf95d84297456980 - src/mistralai/models/messageentries.py: - id: 2e456a2494da + src/mistralai/client/models/inputs.py: + id: 84a8007518c7 + last_write_checksum: sha1:3ecd986b0f5a0de3a4c88f06758cfa51068253e9 + pristine_git_object: fb0674760c1191f04e07f066e84ae9684a1431e3 + src/mistralai/client/models/instructrequest.py: + id: 6d3ad9f896c7 + last_write_checksum: sha1:5f8857f8fffe0b858cfc7bec268480003b562303 + pristine_git_object: 1b2f269359700582687fdf4492ea3cef64da48bb + src/mistralai/client/models/jobin.py: + id: f4d176123ccc + last_write_checksum: sha1:c1ec4b9ea0930612aea1b1c5c5cd419379ab0687 + pristine_git_object: dc7684fcbecd558fc6e3e3f17c4000ec217285c1 + src/mistralai/client/models/jobmetadataout.py: + id: 805f41e3292a + last_write_checksum: sha1:5f84c58dab92d76de8d74f2e02cdf7b2b4c9cc12 + pristine_git_object: f91e30c09232b5227972b3b02ba5efbde22ac387 + src/mistralai/client/models/jobs_api_routes_batch_cancel_batch_jobop.py: + id: b56cb6c17c95 + last_write_checksum: sha1:e5e2c422bb211bb4af3e8c1a4b48e491d0fdf5a4 + pristine_git_object: 21a04f7313b3594a204395ca080b76e2a4958c63 + src/mistralai/client/models/jobs_api_routes_batch_get_batch_jobop.py: + id: 36b5a6b3ceee + last_write_checksum: sha1:5ada7f2b7a666f985c856a6d9cab1969928c9488 + pristine_git_object: 32e34281cd188f4d6d23d100fe0d45002030c56b + src/mistralai/client/models/jobs_api_routes_batch_get_batch_jobsop.py: + id: d8f0af99c94d + last_write_checksum: sha1:3026ea0231866e792dd3cf83eb2b2bac93eda61b + pristine_git_object: 3557e773860e94d85f7a528d000f03adfcc60c2f + src/mistralai/client/models/jobs_api_routes_fine_tuning_archive_fine_tuned_modelop.py: + id: 34f89d2af0ec + last_write_checksum: sha1:2a7225666b02d42be0d3455a249a962948feadf9 + pristine_git_object: 4536b738442ec9710ddf67f2faf7d30b094d8cd5 + src/mistralai/client/models/jobs_api_routes_fine_tuning_cancel_fine_tuning_jobop.py: + id: d175c6e32ecb + last_write_checksum: sha1:07bfc80146492e3608a5c1683e4530de296c0938 + pristine_git_object: b36d3c3ef5abb30abc886876bb66384ea41bab9e + src/mistralai/client/models/jobs_api_routes_fine_tuning_create_fine_tuning_jobop.py: + id: 81651291187a + last_write_checksum: sha1:eb265e749cc076b2d39c103df48ceeeda6da7f5a + pristine_git_object: ece0d15a0654ec759904276ad5d95c5619ff016f + src/mistralai/client/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobop.py: + id: d910fd8fe2d6 + last_write_checksum: sha1:7ee82991b49a615517b3323abbfc0e5928419890 + pristine_git_object: aa5a26098e084885e8c2f63944e7549969899d3c + src/mistralai/client/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobsop.py: + id: cf43028824bf + last_write_checksum: sha1:3fd6b5c7c9ae24d662abd5d3c7ea9699e295e5ff + pristine_git_object: 7e399b31354e4f09c43efbe9ffe3d938f6af0d8c + src/mistralai/client/models/jobs_api_routes_fine_tuning_start_fine_tuning_jobop.py: + id: e7ff4a4a4edb + last_write_checksum: sha1:176fef64d07c58da36ca6672ce5440508787dc84 + pristine_git_object: ed5938b039be719169e62e033b7735bde7e72503 + src/mistralai/client/models/jobs_api_routes_fine_tuning_unarchive_fine_tuned_modelop.py: + id: 7cc1c80335a9 + last_write_checksum: sha1:4270cb52e5aef807ec2d8a9ab1ca1065b0cf8a10 + pristine_git_object: e1be0ac00af889a38647b5f7e4f9d26ed09ee7c4 + src/mistralai/client/models/jobs_api_routes_fine_tuning_update_fine_tuned_modelop.py: + id: 6d9dc624aafd + last_write_checksum: sha1:1a8054c02cd8fd3c48954812e153e97efa58aaef + pristine_git_object: a2b70b37e349c7f5fc6c687fbad015eb218de952 + src/mistralai/client/models/jobsout.py: + id: 22e91e9631a9 + last_write_checksum: sha1:f2a5aa117953410f0743c2dd024e4a462a0be105 + pristine_git_object: 9087704f0660e39f662efbd36f39713202598c43 + src/mistralai/client/models/jsonschema.py: + id: e1fc1d8a434a + last_write_checksum: sha1:6289875b78fab12efa9e3a4aa4bebdb08a95d332 + pristine_git_object: db2fa55ba9001bd3715451c15e9661a87ff7501a + src/mistralai/client/models/legacyjobmetadataout.py: + id: 4f44aa38c864 + last_write_checksum: sha1:b6aba9032bb250c5a23f2ff2a8521b7bddcd1a06 + pristine_git_object: 155ecea78cb94fc1a3ffaccc4af104a8a81c5d44 + src/mistralai/client/models/libraries_delete_v1op.py: + id: b2e8bbd19baa + last_write_checksum: sha1:566db1febc40c73476af31a27201a208b64bc32a + pristine_git_object: fa447de067518abb355b958954ff9a3ee9b2cf6d + src/mistralai/client/models/libraries_documents_delete_v1op.py: + id: 81eb34382a3d + last_write_checksum: sha1:c7bd801e5f75d1716101721cd3e711be978cb7c5 + pristine_git_object: bc5ec6e5443b32d47e570c4f43c43827928a3e39 + src/mistralai/client/models/libraries_documents_get_extracted_text_signed_url_v1op.py: + id: a7417ebd6040 + last_write_checksum: sha1:a298e22d9a68de87288419717b03273c1a26de6e + pristine_git_object: 24ed897d305cfccdc2b9717e214da901479cc70e + src/mistralai/client/models/libraries_documents_get_signed_url_v1op.py: + id: d4b7b47913ba + last_write_checksum: sha1:0855bb39a09514fb5709bd3674eb5fcc618299f1 + pristine_git_object: 350c8e73992583b7890889c5ff252096a8df7fbd + src/mistralai/client/models/libraries_documents_get_status_v1op.py: + id: f314f73e909c + last_write_checksum: sha1:ca4679fbdc833b42e35b4c015ddf8434321d86eb + pristine_git_object: 92b077d3b5850985cac73ee880de7eab31a5b8fd + src/mistralai/client/models/libraries_documents_get_text_content_v1op.py: + id: 1ca4e0c41321 + last_write_checksum: sha1:8dbd91ab145d4c01e91502c9349477e1f98551d7 + pristine_git_object: 68f9725a1a390028e3118611bb0df1b4ab103943 + src/mistralai/client/models/libraries_documents_get_v1op.py: + id: 26ff35f0c69d + last_write_checksum: sha1:208b7ca22416295d27f51513e3fe58947e1549c7 + pristine_git_object: a67e687eaffebbee81654bbbb78ad00bcc28999c + src/mistralai/client/models/libraries_documents_list_v1op.py: + id: 756f26de3cbe + last_write_checksum: sha1:a742a58c137ecf1cfd7446d5f2f60211ff087751 + pristine_git_object: 5dec33858719e713c0fa07538aa0dfcab8d69dad + src/mistralai/client/models/libraries_documents_reprocess_v1op.py: + id: dbbeb02fc336 + last_write_checksum: sha1:516691f61c18e18b96738360a85acd34ba415ca0 + pristine_git_object: 8aee75522f7677e9f6fc49e2f8c5a75124db3dc7 + src/mistralai/client/models/libraries_documents_update_v1op.py: + id: 734ba6c19f5f + last_write_checksum: sha1:929f437a1c366b6cbecfc86b43436767712327f8 + pristine_git_object: f677b4ddc96b51ecd777240844800b2634ca4358 + src/mistralai/client/models/libraries_documents_upload_v1op.py: + id: "744466971862" + last_write_checksum: sha1:63b6f82a3ed8b0655d3b5dea1811699553d62cb0 + pristine_git_object: e2d59d9f1556ca77c0666b2bba3213ef5386f82a + src/mistralai/client/models/libraries_get_v1op.py: + id: d493f39e7ebb + last_write_checksum: sha1:d61166f6c399516d905c7376fabe56c102265747 + pristine_git_object: 83ae377d245e5c93a4a9118dd049a9096e9f3074 + src/mistralai/client/models/libraries_share_create_v1op.py: + id: feaacfd46dd3 + last_write_checksum: sha1:66ddb6685924e1702cfc40dbcb9a0d2e525cb57d + pristine_git_object: d0313bd01acd6e5403402d0d80a604a6c2812e19 + src/mistralai/client/models/libraries_share_delete_v1op.py: + id: 7f3a679ca384 + last_write_checksum: sha1:3ac568a5e09a6c74bc6779cd9c0bc3df36b24785 + pristine_git_object: 620527d50c15f5b14307e7735b429fe194469ed5 + src/mistralai/client/models/libraries_share_list_v1op.py: + id: 8f0af379bf1c + last_write_checksum: sha1:3d764be7232233229dc79079101270ace179e65f + pristine_git_object: fd5d9d33ce4b757b369d191621a727f71b5d2e35 + src/mistralai/client/models/libraries_update_v1op.py: + id: 92c8d4132252 + last_write_checksum: sha1:482c5b78278a6e729ed980191c6c1b94dbd890e6 + pristine_git_object: c434ab7a8be94042e6add582520dba11dc9d8d01 + src/mistralai/client/models/libraryin.py: + id: 6147d5df71d9 + last_write_checksum: sha1:5b7fe7a4bde80032bd36fad27f5854ad4bb1832f + pristine_git_object: a7b36158a165ab5586cba26cc1f96ab6fe938501 + src/mistralai/client/models/libraryinupdate.py: + id: 300a6bb02e6e + last_write_checksum: sha1:95060dfcdafbfe2deb96f450b128cd5d6f4e0e5a + pristine_git_object: f0241ba17f95b2c30a102bf1d09ac094c6e757e5 + src/mistralai/client/models/libraryout.py: + id: 4e608c7aafc4 + last_write_checksum: sha1:4089ffe9adc8e561b9ec093330c276de653bff7f + pristine_git_object: d1953f16490d40876d05cdd615a3ae8cbcbfd9f6 + src/mistralai/client/models/listdocumentout.py: + id: b2c96075ce00 + last_write_checksum: sha1:13c5461b89970ae00cdce8b80045ed586fd113b7 + pristine_git_object: 24969a0f6dc3d2e0badd650a2694d1ffa0062988 + src/mistralai/client/models/listfilesout.py: + id: ae5fa21b141c + last_write_checksum: sha1:2ef7f78253cde73c3baae6aebeda6568bcb96c0d + pristine_git_object: 1db17c406778ac201dfcc1fd348a3e1176f05977 + src/mistralai/client/models/listlibraryout.py: + id: cb78c529e763 + last_write_checksum: sha1:044d3d17138c3af1feba6b980f92f8db7bd64578 + pristine_git_object: 24aaa1a9874d0e2054f6a49efe0f70101cec2fb2 + src/mistralai/client/models/listsharingout.py: + id: ee708a7ccdad + last_write_checksum: sha1:0644f080e93a533f40579b8c59e5039dea4ee02d + pristine_git_object: f139813f54e97810502d658ad924911de646ab09 + src/mistralai/client/models/messageentries.py: + id: e13f9009902b last_write_checksum: sha1:7c2503f4a1be5e5dde5640dd7413fed06aee09b4 pristine_git_object: 9b1706dee5fbb5cae18674708a1571b187bf0576 - src/mistralai/models/messageinputcontentchunks.py: - id: 344669e96a85 + src/mistralai/client/models/messageinputcontentchunks.py: + id: 01025c12866a last_write_checksum: sha1:740e82b72d5472f0cc967e745c07393e2df8ae38 pristine_git_object: e90d8aa0317e553bfc0cceb4a356cf9994ecfb60 - src/mistralai/models/messageinputentry.py: - id: 2e0500be6230 - last_write_checksum: sha1:118ffb7715993d7c103be5d26894ce33d8437f8a - pristine_git_object: edf05631be8d89002fd3a3bfb3034a143b12ed21 - src/mistralai/models/messageoutputcontentchunks.py: - id: e8bb72ef0c0f + src/mistralai/client/models/messageinputentry.py: + id: c0a4b5179095 + last_write_checksum: sha1:def6a5ce05756f76f7da6504bfc25eea166b21ab + pristine_git_object: 12a31097a88e90645c67a30451a379427cd4fcd3 + src/mistralai/client/models/messageoutputcontentchunks.py: + id: 2ed248515035 last_write_checksum: sha1:f239151ae206f6e82ee3096d357ff33cf9a08138 pristine_git_object: 136a7608e7e2a612d48271a7c257e2bb383584f3 - src/mistralai/models/messageoutputentry.py: - id: 0113bf848952 - last_write_checksum: sha1:3a1569ef7b3efadb87418d3ed38a6df0710cca1b - pristine_git_object: 0e2df81e3e75841d31bafd200697e9fd236b6fbe - src/mistralai/models/messageoutputevent.py: - id: d194af351767 - last_write_checksum: sha1:b9c4bf8db3d22d6b01d79044258729b5daafc050 - pristine_git_object: 751767a31666e839ec35d722707d97db605be25f - src/mistralai/models/metricout.py: - id: "369168426763" - last_write_checksum: sha1:d245a65254d0a142a154ee0f453cd7b64677e666 - pristine_git_object: 930b5c2181d4c5c5d89474b66fc1a4eef7ca7865 - src/mistralai/models/mistralerror.py: - id: 89288c78040b + src/mistralai/client/models/messageoutputentry.py: + id: a07577d2268d + last_write_checksum: sha1:d0ca07d6bf6445a16761889bf04a5851abe21ea3 + pristine_git_object: d52e4e3e722ef221f565a0bd40f505385974a0e1 + src/mistralai/client/models/messageoutputevent.py: + id: a2bbf63615c6 + last_write_checksum: sha1:19dda725e29108b2110903e7883ce442e4e90bd4 + pristine_git_object: 3db7f5a0908a72f75f6f7303af4ad426a4909d84 + src/mistralai/client/models/metricout.py: + id: 92d33621dda7 + last_write_checksum: sha1:056f6e7e76182df649804034d722c5ad2e43294f + pristine_git_object: f8027a69235861ae8f04ccc185d61fa13cc8cc14 + src/mistralai/client/models/mistralerror.py: + id: 68ffd8394c2e last_write_checksum: sha1:07fce1e971a25d95ffa8c8f3624d62cdf96e353a pristine_git_object: 28cfd22dc3d567aa4ae55cc19ad89341fa9c96a1 - src/mistralai/models/mistralpromptmode.py: - id: b2580604c1fe - last_write_checksum: sha1:71cf04622681998b091f51e4157463109761333f - pristine_git_object: dfb6f2d2a76fd2749d91397752a38b333bae8b02 - src/mistralai/models/modelcapabilities.py: - id: a9589b97b15c - last_write_checksum: sha1:56ea040fb631f0825e9ce2c7b32de2c90f6923a1 - pristine_git_object: 6edf8e5bf238b91a245db3489f09ae24506103f3 - src/mistralai/models/modelconversation.py: - id: 7d8b7b8d62a8 - last_write_checksum: sha1:b76cc407f807c19c1ff5602f7dd1d0421db2486d - pristine_git_object: 8eca4f973cd20e8bcb70a519f8dc3749878f04a2 - src/mistralai/models/modellist.py: - id: 22085995d513 - last_write_checksum: sha1:f753c11b430f8dd4daffb60bef467c6fa20f5e52 - pristine_git_object: 394cb3fa66a8881b013f78f1c8ee5440c9933427 - src/mistralai/models/moderationobject.py: - id: de835c5cd36e - last_write_checksum: sha1:24befa2934888192a12d9954749b8e591eb22582 - pristine_git_object: 5eff2d2a100c96eb7491ca99716fc9523fb74643 - src/mistralai/models/moderationresponse.py: - id: 831711e73705 - last_write_checksum: sha1:a96af206b8cd7c161c77cde0d3720880f20cf7f8 - pristine_git_object: ed13cd6bc226e8e505ef248760374c795705440f - src/mistralai/models/no_response_error.py: - id: 3102fe819ad6 + src/mistralai/client/models/mistralpromptmode.py: + id: 95abc4ec799a + last_write_checksum: sha1:ed0b87853d373d830b6572cbdf99d64f167b1d48 + pristine_git_object: 7008fc055bd1031096b7a486a17bf9a5b7841a4c + src/mistralai/client/models/modelcapabilities.py: + id: 64d8a422ea29 + last_write_checksum: sha1:3857f4b989eeed681dffe387d48d66f880537db6 + pristine_git_object: a6db80e73189addcb1e1951a093297e0523f5fa4 + src/mistralai/client/models/modelconversation.py: + id: fea0a651f888 + last_write_checksum: sha1:35fec41b1dac4a83bdf229de5dd0436916b144c8 + pristine_git_object: 574f053d4186288980754ead28bb6ce19b414064 + src/mistralai/client/models/modellist.py: + id: 00693c7eec60 + last_write_checksum: sha1:4b9cdd48439f0ebc1aa6637cc93f445fc3e8a424 + pristine_git_object: 6a5209fa6dac59539be338e9ac6ffbefd18057ee + src/mistralai/client/models/moderationobject.py: + id: 132faad0549a + last_write_checksum: sha1:d108ea519d2f491ddbc2e99ab5b8cc02e6987cf8 + pristine_git_object: a6b44b96f00f47c168cd1b2339b7aa44e6ca139e + src/mistralai/client/models/moderationresponse.py: + id: 06bab279cb31 + last_write_checksum: sha1:d31313c2164ecbc5a5714435a52b6f0dda87b8fe + pristine_git_object: 288c8d82d87a9944ae6d7a417bb92e558c6dcc0f + src/mistralai/client/models/no_response_error.py: + id: 2849e0a482e2 last_write_checksum: sha1:7f326424a7d5ae1bcd5c89a0d6b3dbda9138942f pristine_git_object: 1deab64bc43e1e65bf3c412d326a4032ce342366 - src/mistralai/models/ocrimageobject.py: - id: 44523566cf03 - last_write_checksum: sha1:75bb3b2eec938bd59052ea85244130770d787cbf - pristine_git_object: cec0acf4104ba7153270a1130ac2ac58a171b147 - src/mistralai/models/ocrpagedimensions.py: - id: 0d8589f80c1a - last_write_checksum: sha1:d62f216c61756592e6cde4a5d72b68eedeaddcc5 - pristine_git_object: d1aeb54d869545aec3ecaad1240f1be2059280f1 - src/mistralai/models/ocrpageobject.py: - id: 2dfef21e786f - last_write_checksum: sha1:667013bdfafb5ed0867fa9cd350455f66fee3e90 - pristine_git_object: 737defbaea323e0f3ccd95c2a721f57acc9f43a0 - src/mistralai/models/ocrrequest.py: - id: 7dbc4bb7cafb - last_write_checksum: sha1:2f49cf3d70f2aa11cf2e7ac9f7cc262901387eb5 - pristine_git_object: 0e061ac95f2d92d0d8bb14a2d27b64d01bb4e962 - src/mistralai/models/ocrresponse.py: - id: a187e70d8c2e - last_write_checksum: sha1:0c09aee803a5e1a3ba7c7f5d0ce46e96ee3339ca - pristine_git_object: 7b65bee7e6c0fffc7019f7843dcf88c0b5fade4e - src/mistralai/models/ocrtableobject.py: - id: 1be0c3cc027f - last_write_checksum: sha1:804d15ad21276f47f5ea9beccab9e471840ac32e - pristine_git_object: 5f30ab5e15dabf6a96498f46cf6178dca7fdb906 - src/mistralai/models/ocrusageinfo.py: - id: 91ab3d4cd57a - last_write_checksum: sha1:018eaf85ebffbb3392ed3c6688a41882a0893015 - pristine_git_object: 36c9f826cc64f67b254bdd07b00ad77857a91e1c - src/mistralai/models/outputcontentchunks.py: - id: 25ae74f4c9b8 + src/mistralai/client/models/ocrimageobject.py: + id: 685faeb41a80 + last_write_checksum: sha1:93f3d24c4b7513fffef60d5590f3e5a4a0b6e1e4 + pristine_git_object: e97fa8df46c6e39775b3c938c7e1862a507090d2 + src/mistralai/client/models/ocrpagedimensions.py: + id: 02f763afbc9f + last_write_checksum: sha1:28e91a96916711bce831e7fa33a69f0e10298eed + pristine_git_object: f4fc11e0952f59b70c49e00d9f1890d9dd93a0df + src/mistralai/client/models/ocrpageobject.py: + id: 07a099f89487 + last_write_checksum: sha1:367035d07f306aa5ce73fc77635d061a75612a68 + pristine_git_object: f8b43601e7a3dd4fae554c763d3ed1ee6f2927a3 + src/mistralai/client/models/ocrrequest.py: + id: 36f204c64074 + last_write_checksum: sha1:d4b7a8bf70efe5828d04d773f4b82284a18656f1 + pristine_git_object: 03a6028c5cc298b3ed66ae5f31c310d573a954e5 + src/mistralai/client/models/ocrresponse.py: + id: 2fdfc881ca56 + last_write_checksum: sha1:fb848d5f5c1456028a1e04b9e4f5be3234fa073f + pristine_git_object: 2813a1ca4c94d690f248a318a9e35d655d80600c + src/mistralai/client/models/ocrtableobject.py: + id: d74dd0d2ddac + last_write_checksum: sha1:6821e39003e2ca46dc31384c2635e59763fddb98 + pristine_git_object: 0c9091de8975d8bd8e673aadbb69a619b96d77e8 + src/mistralai/client/models/ocrusageinfo.py: + id: 272b7e1785d5 + last_write_checksum: sha1:b466bdd22ad5fa5f08c8aa51e3a6ff5e2fcbf749 + pristine_git_object: 62f07fd4fafa4c16a8cf80a9f52754904943272a + src/mistralai/client/models/outputcontentchunks.py: + id: 9ad9741f4975 last_write_checksum: sha1:47d74d212ebcb68ff75a547b3221f2aee3e07bfe pristine_git_object: ad0c087e0dcbe302dd9c73d1ea03e109e9a66ef7 - src/mistralai/models/paginationinfo.py: - id: 7e6919dfd6b1 - last_write_checksum: sha1:5ae05b383e9381862b8a980d83e73765b726294d - pristine_git_object: 00d4f1ec906e8485fdcb3e4b16a0b01acfa2be4b - src/mistralai/models/prediction.py: - id: ad77ec075e6d - last_write_checksum: sha1:d359ab3a37229212459228329219a1ec26a0381d - pristine_git_object: 582d87896b477de867cadf5e85d58ee71c445df3 - src/mistralai/models/processingstatusout.py: - id: 54d1c125ef83 - last_write_checksum: sha1:475749250ada2566c5a5d769eda1d350ddd8be8f - pristine_git_object: e67bfa865dcf94656a67f8612a5420f8b43cc0ec - src/mistralai/models/realtimetranscriptionerror.py: - id: f869fd6faf74 - last_write_checksum: sha1:17f78beea9e1821eed90c8a2412aadf953e17774 - pristine_git_object: 0785f7001aeaba7904120a62d569a35b7ee88a80 - src/mistralai/models/realtimetranscriptionerrordetail.py: - id: d106a319e66b - last_write_checksum: sha1:16e0fea1a3be85dfea6f2c44a53a15a3dc322b4c - pristine_git_object: cb5d73f861ce053a17b66695d2b56bafe1eeb03e - src/mistralai/models/realtimetranscriptionsession.py: - id: 48c7076e6ede - last_write_checksum: sha1:ae722fc946adf7282fd79c3a2c80fb53acc70ef2 - pristine_git_object: bcd0cfe37600b80e59cd50bd0edac3444be34fdb - src/mistralai/models/realtimetranscriptionsessioncreated.py: - id: 24825bcd61b2 - last_write_checksum: sha1:81f840757637e678c4512765ba8fda060f5af8cb - pristine_git_object: 9a2c2860d1538f03e795c62754244131820e2d44 - src/mistralai/models/realtimetranscriptionsessionupdated.py: - id: 5575fb5d1980 - last_write_checksum: sha1:a2d8d5947ba6b46dcd9a0a1e377067dbb92bfdf1 - pristine_git_object: ad1b513364f5d8d2f92fbc012509bf7567fa4573 - src/mistralai/models/referencechunk.py: - id: 6cdbb4e60749 - last_write_checksum: sha1:48a4dddda06aadd16f6ea34c58848430bd561432 - pristine_git_object: 1864ac794d4e637556003cbb2bf91c10832d90f9 - src/mistralai/models/requestsource.py: - id: 1836766b9e81 + src/mistralai/client/models/paginationinfo.py: + id: 48851e82d67e + last_write_checksum: sha1:b17cc84c592706882d5819b1a706c9a206de9198 + pristine_git_object: 0252f4482f50b34a35f52911b4b57b6899751b42 + src/mistralai/client/models/prediction.py: + id: 1cc842a069a5 + last_write_checksum: sha1:d9bd04d22d58e7e1be0195aaed218a4f407db9c0 + pristine_git_object: f2c5d9c60c50c6e397d7df9ce71ccff957b0e058 + src/mistralai/client/models/processingstatusout.py: + id: 3df842c4140f + last_write_checksum: sha1:83fbbccf635fabf60452dfa8dcac696033c3d436 + pristine_git_object: 031f386fb4381b8e2ead1bd22f7f53e59e37f6bb + src/mistralai/client/models/realtimetranscriptionerror.py: + id: 8c2267378f48 + last_write_checksum: sha1:671be287639964cc6ac7efbed41998f225845e2e + pristine_git_object: e6a889de576f9e36db551a44d4ed3cf0c032e599 + src/mistralai/client/models/realtimetranscriptionerrordetail.py: + id: 5bd25cdf9c7a + last_write_checksum: sha1:49ff15eb41e8964ba3b150e2fca70f6529dee58f + pristine_git_object: 27bb8d872792723b06238b3f0eebed815948fd63 + src/mistralai/client/models/realtimetranscriptionsession.py: + id: 02517fa5411a + last_write_checksum: sha1:a6db31662165d3df47a5da11efd1923121d1593e + pristine_git_object: 3a3306513c111125c71871024caa650176360c1b + src/mistralai/client/models/realtimetranscriptionsessioncreated.py: + id: 4e3731f63a3c + last_write_checksum: sha1:5d2e0541b58a3c647ded25d6a0cf8590f64cf0db + pristine_git_object: cc6d5028f221e1794c723dedac5c73564ddb61f7 + src/mistralai/client/models/realtimetranscriptionsessionupdated.py: + id: 686dc4f2450f + last_write_checksum: sha1:2311bf0107f0f957c48ee1841cc95369269a6105 + pristine_git_object: 3da23595291cd49e42d30646288f4f39da6f8c00 + src/mistralai/client/models/referencechunk.py: + id: 921acd3a224a + last_write_checksum: sha1:abfc5818dbe9e40be5d71436f2ffd1a9b53bd4ab + pristine_git_object: 4c703b8165329a55343c20b5080670168327afc4 + src/mistralai/client/models/requestsource.py: + id: 3f2774d9e609 last_write_checksum: sha1:31aae791bf737ad123fe189227d113838204ed42 pristine_git_object: 7b0a35c44050b6fca868479e261805a77f33e230 - src/mistralai/models/responsedoneevent.py: - id: 6300eaecde3c - last_write_checksum: sha1:693d832a480e943ff9c3e4f6822bea8358750ee1 - pristine_git_object: 5a3a3dfb8630713a618cc23f97660840e4fbbeca - src/mistralai/models/responseerrorevent.py: - id: 88185105876c - last_write_checksum: sha1:5adfc1acdba4035f1a646a7678dd09e16d05e747 - pristine_git_object: 6cb1b26885ad9ded4f75f226b0ce713206cb0a49 - src/mistralai/models/responseformat.py: - id: 6d5e093fdba8 - last_write_checksum: sha1:4c4a801671419f403263caafbd90dbae6e2203da - pristine_git_object: 92284017b5b895673e510a739bc5c5ed104de4af - src/mistralai/models/responseformats.py: - id: e5fccecf2b70 + src/mistralai/client/models/responsedoneevent.py: + id: cf8a686bf82c + last_write_checksum: sha1:1fa63522f52a48a8e328dc5b3fe2c6f5206b04cc + pristine_git_object: 5405625692cb22c60a7b5f5a6f1b58cee5676576 + src/mistralai/client/models/responseerrorevent.py: + id: b286d74e8724 + last_write_checksum: sha1:f570a02791afb3fe60e99cbb4993c2d1f8dc476d + pristine_git_object: c9ef95a04c91c32e7a7973309e2174b7e776f099 + src/mistralai/client/models/responseformat.py: + id: 6ab8bc8d22c0 + last_write_checksum: sha1:ad0489488713a977dbf4eac739ce2734c8280350 + pristine_git_object: 5899b0175cefd4159eb680a3715a72fa78577ba4 + src/mistralai/client/models/responseformats.py: + id: c4462a05fb08 last_write_checksum: sha1:a212e85d286b5b49219f57d071a2232ff8b5263b pristine_git_object: cbf83ce7b54ff8634f741334831807bfb5c98991 - src/mistralai/models/responsestartedevent.py: - id: 37fbb3e37d75 - last_write_checksum: sha1:1d1eb4b486b2b92d167367d6525a8ea709d00c15 - pristine_git_object: d14d45ef8aa0d4e6dfa5893c52ae292f1f9a5780 - src/mistralai/models/responsevalidationerror.py: - id: 4b46e43f015b - last_write_checksum: sha1:c90231f7d7d3e93d6a36972ec4bead76fcb9ac47 - pristine_git_object: ed30165511c209289a030c5e9d9af1d2ad93d77c - src/mistralai/models/retrieve_model_v1_models_model_id_getop.py: - id: 81db6b688ded - last_write_checksum: sha1:8a7f0585855118e73fcd8f7213757172ac94c6fc - pristine_git_object: bfe62474610239f6e1ac0b5a4dc4b6ee9d321bd6 - src/mistralai/models/retrievefileout.py: - id: 5cf73a0007f0 - last_write_checksum: sha1:04abbd25f8757b7d9763a2c0aaca561a78960fbd - pristine_git_object: 94540083c22b330dc48428e0d80f1cf2292b93ab - src/mistralai/models/sampletype.py: - id: d1558bd8d355 - last_write_checksum: sha1:fbfdf1616eb6b64d785c11f11a33fca794de19eb - pristine_git_object: efb43e9be278aa00cda9828c5c8cb3edabc68d0f - src/mistralai/models/sdkerror.py: - id: d3c914c3c63a - last_write_checksum: sha1:6d6dafaf73210b86ef2fea441e2e864752242737 - pristine_git_object: 65c45cf1c2cb4047e3cce21538890e5f62136f0f - src/mistralai/models/security.py: - id: 88dd24d389d4 - last_write_checksum: sha1:3d460b276d68380a64d8d91947981ce27d92e552 - pristine_git_object: cf05ba8fbce8d7b9199396c41ccd4c218d71998b - src/mistralai/models/shareenum.py: - id: 371f676fce97 - last_write_checksum: sha1:9061b04c7b26435911ea18b095d76400e1ab1698 - pristine_git_object: 634ba4b7e800e134f209fa851391b1a49cd6fc97 - src/mistralai/models/sharingdelete.py: - id: 334b4a8820ae - last_write_checksum: sha1:e21d1a3cd972b02beecd3a2d3ed3ebf70ea9c414 - pristine_git_object: ebcdbab517d524cf4f2056fb253acb713e042d58 - src/mistralai/models/sharingin.py: - id: b762157651b7 - last_write_checksum: sha1:479261e2c4ad827b878b66afa5dfaec49df4573a - pristine_git_object: f7bb89ca1b670cfa9d66b3135e762e04ba6454a4 - src/mistralai/models/sharingout.py: - id: "198686162036" - last_write_checksum: sha1:ae269a353d6733ac81ab6a4f3ea3368eef2a99ec - pristine_git_object: 12455818a5c1f44538696015bee079bce9567cdc - src/mistralai/models/source.py: - id: 6f2e7cd2285e - last_write_checksum: sha1:b0fe76d6566e4573317ad4c862ddc11423a8bde7 - pristine_git_object: cc3abce298c4b817081610238e489d4023ca6f3f - src/mistralai/models/ssetypes.py: - id: 7817469fd731 + src/mistralai/client/models/responsestartedevent.py: + id: 24f54ee8b0f2 + last_write_checksum: sha1:5f7a4fad7c13f89b6e3672e422d5ef902aa5bf03 + pristine_git_object: dc6a10f91e2bb0d13a582ed03e7db2089b75bcf7 + src/mistralai/client/models/responsevalidationerror.py: + id: c244a88981e0 + last_write_checksum: sha1:2687c9ca7df0763384030719e5c1447d83f511b3 + pristine_git_object: bab5d0b70e0bb2ea567a16a1a7c5db839651836f + src/mistralai/client/models/retrieve_model_v1_models_model_id_getop.py: + id: 6fefa90ca351 + last_write_checksum: sha1:c34e2f55663cafe353e628fbd978a6be7ca6a467 + pristine_git_object: 7fdcd37d5879aaca158f459df830a5a4dc55bfa0 + src/mistralai/client/models/retrievefileout.py: + id: 8bb5859aa0d0 + last_write_checksum: sha1:9d182b5b20c8edef9b98a42036b13afd98031fd5 + pristine_git_object: ffd0617a1c6465a5f8080eb65e382e7a9169eef4 + src/mistralai/client/models/sampletype.py: + id: a9309422fed7 + last_write_checksum: sha1:1eb21a68c138e9a0d39b4dd14bcffc9e3ff0784f + pristine_git_object: e0727b028c790a62da67784965f825436dead4f8 + src/mistralai/client/models/sdkerror.py: + id: 12f991dad510 + last_write_checksum: sha1:9ee3f2dfd9977ce77957d60116db7d04740a4eed + pristine_git_object: ceb03c4868f9c9111007d6c16411f5da1954f211 + src/mistralai/client/models/security.py: + id: c2ca0e2a36b7 + last_write_checksum: sha1:415802794c6a3f22c58e863be0f633727f681600 + pristine_git_object: 1b67229bee0b64f3a9e8fc3600a7b0c9c13c0a2d + src/mistralai/client/models/shareenum.py: + id: a0e2a7a16bf8 + last_write_checksum: sha1:0beaa4472ed607142b485c9e208441f9050746b9 + pristine_git_object: ca1b96245e81327aa830f07c0588dccdc1ee518e + src/mistralai/client/models/sharingdelete.py: + id: f5ecce372e06 + last_write_checksum: sha1:c943bfc24aa0f2035a1b5261d29efb5f3518a555 + pristine_git_object: d659342f1330d73354d557a45bc1a16015a38d8b + src/mistralai/client/models/sharingin.py: + id: e953dda09c02 + last_write_checksum: sha1:996c17a8db2c61daed285ee5cafd44481fbd1483 + pristine_git_object: 630f4c70552167237735797f6b64d3f1df5ea214 + src/mistralai/client/models/sharingout.py: + id: 0b8804effb5c + last_write_checksum: sha1:b3356792affd50e062bb1f1a84d835bbcfeb50ab + pristine_git_object: 195701d111514fe9aebfedce05dbb4bafab67fed + src/mistralai/client/models/source.py: + id: fcee60a4ea0d + last_write_checksum: sha1:6f3ea355c62280e1fc6008da69ed0b987f53fd72 + pristine_git_object: 181b327ea73a9bcf9fb90f95633da71cee96e599 + src/mistralai/client/models/ssetypes.py: + id: 1733e4765106 last_write_checksum: sha1:1901bf6feee92ac100113e0a98dc0abe6e769375 pristine_git_object: 796f0327cbb1372c1b2a817a7db39f8f185a59be - src/mistralai/models/systemmessage.py: - id: 0f0c7d12c400 - last_write_checksum: sha1:6886cc2f9603aabf75289ccc895e23ad45e65dc7 - pristine_git_object: 2b34607b39a1a99d6569985818a89d9e973f3cdd - src/mistralai/models/systemmessagecontentchunks.py: - id: 5a051e10f9df - last_write_checksum: sha1:bef0630a287d9000595a26049290b978c0816ddc - pristine_git_object: a1f04d1e5802521d4913b9ec1978c3b9d77ac38f - src/mistralai/models/textchunk.py: - id: 7dee31ce6ec3 - last_write_checksum: sha1:5ae5f498eaf03aa99354509c7558de42f7933c0c - pristine_git_object: 6052686ee52d3713ddce08f22c042bab2569f4da - src/mistralai/models/thinkchunk.py: - id: 8d0ee5d8ba9c - last_write_checksum: sha1:34f0cc91e66cb0ad46331b4e0385534d13b9ee1c - pristine_git_object: 627ae4883698696774b7a285a73326a4509c6828 - src/mistralai/models/timestampgranularity.py: - id: e0cb6c4efa2a + src/mistralai/client/models/systemmessage.py: + id: 500ef6e85ba1 + last_write_checksum: sha1:0e8e34fa66e4bb8bf1128b3007ef72bf33690e1e + pristine_git_object: 9e01bc57bd17a5ecf6be5fee3383bbb9e03a8ab5 + src/mistralai/client/models/systemmessagecontentchunks.py: + id: 297e8905d5af + last_write_checksum: sha1:4581a28c592708bf51dbc75b28fe9f7bddde3c70 + pristine_git_object: 7a79737964b79e39b760ef833cce24e411f5aa90 + src/mistralai/client/models/textchunk.py: + id: 9c96fb86a9ab + last_write_checksum: sha1:8abd7cb3d8149458d95268eea8f18d5096e77fb0 + pristine_git_object: 4207ce7e46141aed94cf0f8726bb2433709101ca + src/mistralai/client/models/thinkchunk.py: + id: 294bfce193a4 + last_write_checksum: sha1:a6cd3efbf01dc0a72818675893594179addcfd12 + pristine_git_object: b1560806b88b733bf3b574c3e0d45e93df892548 + src/mistralai/client/models/timestampgranularity.py: + id: 68ddf8d702ea last_write_checksum: sha1:68ea11a4e27f23b2fcc976d0a8eeb95f6f28ba85 pristine_git_object: 5bda890f500228ba0c3dc234edf09906b88cb522 - src/mistralai/models/tool.py: - id: c0a9b60b6cf1 - last_write_checksum: sha1:805030012b6cf4d6159c1515b44e1c999ea2349a - pristine_git_object: b14a6adf2a804153e071c28b7e225594278b7443 - src/mistralai/models/toolcall.py: - id: 08f53b1090d7 - last_write_checksum: sha1:3b876a5d90066ebc4a337e7ba90b0607d9028c9e - pristine_git_object: 1f36792484f22af884a2b651442dbf1086e36f53 - src/mistralai/models/toolchoice.py: - id: de7498a868da - last_write_checksum: sha1:ec3178ff2a398b569ea6161e37006a349b75e94f - pristine_git_object: f8e1b48621527ca86f07efd4500089d339ddeb6a - src/mistralai/models/toolchoiceenum.py: - id: 580f382c7857 + src/mistralai/client/models/tool.py: + id: 48b4f6f50fe9 + last_write_checksum: sha1:5f80f78858fb50e0688123f8dd1478eeb0e7c5af + pristine_git_object: 4b29f575a3604d83fd6b492c26327f36e6e5a681 + src/mistralai/client/models/toolcall.py: + id: fb34a1a3f3c2 + last_write_checksum: sha1:f4c5de640f5b942f180062388be187a910067a1b + pristine_git_object: 558b49bfaec7c306c093b97a4bbf722fe9f4b6b1 + src/mistralai/client/models/toolchoice.py: + id: 14f7e4cc35b6 + last_write_checksum: sha1:f833d01b307437a83705b9b669b0d95eab4c01e0 + pristine_git_object: 2c7f6cbf6ebfbbdcce7d82b885b5e07b6b52d066 + src/mistralai/client/models/toolchoiceenum.py: + id: c7798801f860 last_write_checksum: sha1:3dbba9a58c5569aafe115f3f7713a52b01ad8620 pristine_git_object: 01f6f677b379f9e3c99db9d1ad248cb0033a2804 - src/mistralai/models/toolexecutiondeltaevent.py: - id: 674ab6adad2e - last_write_checksum: sha1:002e73c21df7e785268d77bad00b7967a514ede7 - pristine_git_object: 4fca46a80810a9976a0de70fef9e895be82fa921 - src/mistralai/models/toolexecutiondoneevent.py: - id: 86a2329a500d - last_write_checksum: sha1:00174f618358d49546ff8725a6dc3a9aebe5926c - pristine_git_object: 621d55718957c766c796f6f98814ed917ccbaadc - src/mistralai/models/toolexecutionentry.py: - id: 41e2484af138 - last_write_checksum: sha1:c05c9f72cf939d4da334489be57e952b2fbd68f9 - pristine_git_object: 9f70a63b720b120283adc1292188f1f0dd8086a1 - src/mistralai/models/toolexecutionstartedevent.py: - id: 0987fdd1cd45 - last_write_checksum: sha1:beab5d913fb60fc98ec81dffb4636143e23286ec - pristine_git_object: 80dd5e97084cdedcdb2752491a61d8b2aadb091a - src/mistralai/models/toolfilechunk.py: - id: 275d194f5a7b - last_write_checksum: sha1:0ecb2b0ef96d57084c19f43553fdfafdf209ec16 - pristine_git_object: 87bc822c091f1b0c1896f0da16764e225e3f324c - src/mistralai/models/toolmessage.py: - id: dff99c41aecf - last_write_checksum: sha1:19fbda605416fcc20f842b6d3067f64de2691246 - pristine_git_object: ef917c4369a7459e70f04da2c20ed62b9316d9bc - src/mistralai/models/toolreferencechunk.py: - id: 5e3482e21a7e - last_write_checksum: sha1:21038657452d30fd80b5204451b7b7bfbbce6cf6 - pristine_git_object: 2a751cb08f1442ca5f91ab0b688db822c6f72dd7 - src/mistralai/models/tooltypes.py: - id: c4ef111ec45b - last_write_checksum: sha1:f9cd152556d95e9e197ac0c10f65303789e28bcb - pristine_git_object: f54893c259518313218d9ee307669c291a8c0cf8 - src/mistralai/models/trainingfile.py: - id: 150e9031690e - last_write_checksum: sha1:f20266317087b92eb74ed8cd48e7477666faf9a8 - pristine_git_object: 99bd49dd760960558be40adf138f9b4b95ee62d9 - src/mistralai/models/transcriptionresponse.py: - id: b50f2e392e31 - last_write_checksum: sha1:79d57bf44dbad0f364ac57ad967642271b7a7526 - pristine_git_object: 54a98a5ba7b83a6b7f6a39046b400a61e9889898 - src/mistralai/models/transcriptionsegmentchunk.py: - id: ccd6d5675b49 - last_write_checksum: sha1:01b1c1c52a1e324c8f874586cdd0349fed35443c - pristine_git_object: 40ad20b3abc2f0b2c0d2d695ba89237f66cc0b2b - src/mistralai/models/transcriptionstreamdone.py: - id: 42177659bf0f - last_write_checksum: sha1:5fda2b766b2af41749006835e45c95f708eddb28 - pristine_git_object: e1b1ab3d6f257786a5180f6876f47d47414e7e72 - src/mistralai/models/transcriptionstreamevents.py: - id: 9593874b7574 - last_write_checksum: sha1:ace344cfbec0af2ad43b0b61ae444e34f9e9da99 - pristine_git_object: 8207c03fef9d76ca7405b85d93c2f462eae22329 - src/mistralai/models/transcriptionstreameventtypes.py: - id: e2e35365ad39 + src/mistralai/client/models/toolexecutiondeltaevent.py: + id: df8f17cf3e07 + last_write_checksum: sha1:32257ebf812efe05763df71e498018d53884a32d + pristine_git_object: 0268e6a0d9b3c25afe1022e61a630e926a50f135 + src/mistralai/client/models/toolexecutiondoneevent.py: + id: 514fdee7d99f + last_write_checksum: sha1:e99be4db8d87bb3aa9383c062846d35923721292 + pristine_git_object: 854baee98a119caf237ca0f39e4ddd7a36577771 + src/mistralai/client/models/toolexecutionentry.py: + id: 76db69eebe41 + last_write_checksum: sha1:1577af968f800b28a3da2006c44016a901532591 + pristine_git_object: 839709fb8ea63cc358de9f5e71180bf9e94cf5a5 + src/mistralai/client/models/toolexecutionstartedevent.py: + id: 40fadb8e49a1 + last_write_checksum: sha1:49922a41c52e7f25eab26c8a34ec481c319c62b4 + pristine_git_object: 66438cfc33b171868f597ff3f80a82a40d1396f4 + src/mistralai/client/models/toolfilechunk.py: + id: 26c8aadf416a + last_write_checksum: sha1:753db4dd27eea752066a04774094cba73aeb8ca0 + pristine_git_object: 62b5ffeda19a7fa614ccc5e390450f2452dd119d + src/mistralai/client/models/toolmessage.py: + id: 15f1af161031 + last_write_checksum: sha1:47b4b3426ecde263ce4f2918ff98135952447b40 + pristine_git_object: eae2d2aef69dc4134f42714d69625e7b6c43e8c9 + src/mistralai/client/models/toolreferencechunk.py: + id: 822e9f3e70de + last_write_checksum: sha1:bf6b77aff4de13f4f374513e85785a1c6b17b87b + pristine_git_object: 882b1563a44cbc77256b6f44b1f41d602956d0b4 + src/mistralai/client/models/tooltypes.py: + id: 86c3b54272fd + last_write_checksum: sha1:94cd31b4a170bde0983bc48e8c1148693c3d67e0 + pristine_git_object: abb26c258280a889d784e662b45ed486fc648817 + src/mistralai/client/models/trainingfile.py: + id: 2edf9bce227d + last_write_checksum: sha1:12257eadce20511a4f3e3f3424e3bca112510f5f + pristine_git_object: 1d9763e0fd8e44f9b6e05254c5abb5a81fdf0b17 + src/mistralai/client/models/transcriptionresponse.py: + id: 60896dbc6345 + last_write_checksum: sha1:1f3066c34b7e76acc46ddb1e69869f3c62bfb841 + pristine_git_object: 24c0b92e424e91d40972c0826553a7d344a8f932 + src/mistralai/client/models/transcriptionsegmentchunk.py: + id: d1e6f3bdc74b + last_write_checksum: sha1:5f16b05debe943432b69d390844216a703adf71a + pristine_git_object: c89d84fcf3842da23e1f710309446b4c592ceeb3 + src/mistralai/client/models/transcriptionstreamdone.py: + id: 066a9158ed09 + last_write_checksum: sha1:1f9a29e826dcc91ed0c7f08b69aaa81987d810b7 + pristine_git_object: add17f562385c3befc2932b16448901154372ca6 + src/mistralai/client/models/transcriptionstreamevents.py: + id: b50b3d74f16f + last_write_checksum: sha1:38d2ff40e9d4f5d09fa24eef0925d306cf434bf0 + pristine_git_object: caaf943a4662ecccab96183f63c226eaefee2882 + src/mistralai/client/models/transcriptionstreameventtypes.py: + id: 6f71f6fbf4c5 last_write_checksum: sha1:38b7334aebf400e1abb2b20b0f2890880f0fc2f7 pristine_git_object: 4a910f0abca2912746cac60fd5a16bd5464f2457 - src/mistralai/models/transcriptionstreamlanguage.py: - id: 635759ec85f3 - last_write_checksum: sha1:93e389c2c8b41e378cfe7f88f05d8312236024e6 - pristine_git_object: 15b7514415e536bb04fd1a69ccea20615b5b1fcf - src/mistralai/models/transcriptionstreamsegmentdelta.py: - id: 83d02b065099 - last_write_checksum: sha1:3f70d4d58d8fedb784d056425662e7dc2f9ed244 - pristine_git_object: 550c83e7073bc99fdac6a0d59c5c30daa9d35f43 - src/mistralai/models/transcriptionstreamtextdelta.py: - id: ce0861d8affd - last_write_checksum: sha1:84a3b6c6d84a896e59e2874de59d812d3db657a5 - pristine_git_object: daee151f4ceaaee6c224b6dd078b4dfb680495b3 - src/mistralai/models/unarchiveftmodelout.py: - id: d758d3dee216 - last_write_checksum: sha1:b60e3292d2c4e6bf1456649184eaef4c75732cfc - pristine_git_object: 55c0ea8aa841ecef08f64020f099353efbdbcf7d - src/mistralai/models/updateftmodelin.py: - id: dbf79e18efd0 - last_write_checksum: sha1:aab40882f622a32054d73e33ca2be279bb880080 - pristine_git_object: 1bd0eaf2eb9b3427da6f4581b36d4316c0d129bf - src/mistralai/models/uploadfileout.py: - id: 1fa81af96888 - last_write_checksum: sha1:ebd3800e23e32b7f95665393db9a8e955c2912ea - pristine_git_object: f235fdcdf23d39d408d20a43597652f8daf677b0 - src/mistralai/models/usageinfo.py: - id: 62e303fb96aa - last_write_checksum: sha1:7f81b8c11fb5076e03a9fa40865382c9b45b700e - pristine_git_object: cedad5c12a96418567294e91812bfd96dce875bf - src/mistralai/models/usermessage.py: - id: dd10edab3b81 - last_write_checksum: sha1:a22b667ed90d8e34923d36422ef7ea6ae83d2dd7 - pristine_git_object: 61590bed06e1a397a1166a04a0b2405b833d19ff - src/mistralai/models/validationerror.py: - id: 0c6798c22859 - last_write_checksum: sha1:be4e31bc68c0eed17cd16679064760ac1f035d7b - pristine_git_object: e971e016d64237f24d86c171222f66575152fd1f - src/mistralai/models/wandbintegration.py: - id: a2f0944d8dbd - last_write_checksum: sha1:43a3c6f8d77cde042cfa129954f48c419d3fe1b9 - pristine_git_object: 690538963550d6adaf291fab8344f317c3c9080e - src/mistralai/models/wandbintegrationout.py: - id: bfae63e4ff4c - last_write_checksum: sha1:843e286ce58f072f27e8cb67b4c4f35001ffe0f0 - pristine_git_object: f5a9ba802b489f595bfc2578b9f3456b5230bdb3 - src/mistralai/models/websearchpremiumtool.py: - id: "710695472090" - last_write_checksum: sha1:85a562f976a03e9a3a659018caa78d2e26caeef9 - pristine_git_object: 3bbe753acb99f74f8eb7aa63a387f35714b0a259 - src/mistralai/models/websearchtool.py: - id: d8f773002c11 - last_write_checksum: sha1:1e48212c4cc43bf937a3d21837878a1722666a30 - pristine_git_object: eeafecb4847e66075b64dc34512aaca7a045900b - src/mistralai/models_.py: - id: dfcd71fd4c33 - last_write_checksum: sha1:076e72b91c364f1a4905092b02e2ad7ebf7765c6 - pristine_git_object: d44930a0db06117ba538424273935016a133e0ae - src/mistralai/ocr.py: - id: e23da68c9ae8 - last_write_checksum: sha1:ce13d4ac0fc3cc52b2a76480c570d89cfe71c002 - pristine_git_object: ceb7dd85f958452aeb55868c65746ccf6ec200a5 - src/mistralai/py.typed: - id: 3923b7c50c56 + src/mistralai/client/models/transcriptionstreamlanguage.py: + id: e94333e4bc27 + last_write_checksum: sha1:9427411056a6239956ed3963af53c452e6fc4705 + pristine_git_object: b47024adfca2d8da6f1f01ce573bcc339cbbc63a + src/mistralai/client/models/transcriptionstreamsegmentdelta.py: + id: c0a882ce57e5 + last_write_checksum: sha1:3cc8664a90c67c412fc3c58e6841571c476697ea + pristine_git_object: 7cfffb63f31d10a247e066c8f422e4f6af2cf489 + src/mistralai/client/models/transcriptionstreamtextdelta.py: + id: 6086dc081147 + last_write_checksum: sha1:d68e4b6cefa3a1492b461fbe17cff5c5216b58f5 + pristine_git_object: ce279cf67ffc4e225ce37490f4ffd0c0d64fe993 + src/mistralai/client/models/unarchiveftmodelout.py: + id: 9dbc3bfb71ed + last_write_checksum: sha1:b2a1f9af7a5a7f5cbcda3256c46d02926e0cf2da + pristine_git_object: 511c390b4192cf85ec86150c7dad84543c68e031 + src/mistralai/client/models/updateftmodelin.py: + id: 39e2d678e651 + last_write_checksum: sha1:dd8dda798b804c4927505ac1fcbd13787f32a25d + pristine_git_object: 0471a15458f3cff4939360d3891af0fdee9ec251 + src/mistralai/client/models/uploadfileout.py: + id: 42466f2bebfb + last_write_checksum: sha1:db43df223f848a25a1526624cd3722ef3014e700 + pristine_git_object: 55e56504db280fdb4772bb061128742866555e82 + src/mistralai/client/models/usageinfo.py: + id: 54adb9a3af16 + last_write_checksum: sha1:a5f57f73d176aa8f4a9ad91daefe8e6257398abc + pristine_git_object: f1186d97357320f4bfc9d3f2a626f58d2b1a38d0 + src/mistralai/client/models/usermessage.py: + id: cb583483acf4 + last_write_checksum: sha1:1c15371710f18d7ed8f612cc450f4873f83f1eb9 + pristine_git_object: 8d92cea803368e996d68dc2f3a2dadd1d06a4675 + src/mistralai/client/models/validationerror.py: + id: 15df3c7368ab + last_write_checksum: sha1:de86af94be29bd8bfd5fa2708eeb3dda3032423d + pristine_git_object: 352409be88a1175073e5438d6da86fc9a54896fc + src/mistralai/client/models/wandbintegration.py: + id: 4823c1e80942 + last_write_checksum: sha1:a76661e93fd3b6d8a3d210ef610a40ff1da203f7 + pristine_git_object: 89489fb4527c6515a609bcb533ef59ab516c7a38 + src/mistralai/client/models/wandbintegrationout.py: + id: 6b103d74195c + last_write_checksum: sha1:e648c37d559f8cec36b3c8e06979d8ac053a2ad6 + pristine_git_object: a7f9afeb6683a173115371a686af5f95e2d29056 + src/mistralai/client/models/websearchpremiumtool.py: + id: bfe88af887e3 + last_write_checksum: sha1:af6e2fae78c2f22b98d58ab55b365d1688dba8cb + pristine_git_object: 8d2d4b5dfea50a34ac744181790bf5db84809b1c + src/mistralai/client/models/websearchtool.py: + id: 26b0903423e5 + last_write_checksum: sha1:49295d52d59e914620dedf9d22fb2290896039cf + pristine_git_object: ba4cc09f84faebb438a631db6ac328fea2ced609 + src/mistralai/client/models_.py: + id: 1d277958a843 + last_write_checksum: sha1:8f76c2395cb534e94366033007df24bf56c43ac7 + pristine_git_object: 5ef9da096e58023aaa582f31717b4ee7a4b720b0 + src/mistralai/client/ocr.py: + id: 2f804a12fc62 + last_write_checksum: sha1:877f0c2db0319ea6b5ccf3d92f35bf633df10eda + pristine_git_object: ce7e2126dda2bc2b12cefb96e955edd3c7d4b6ab + src/mistralai/client/py.typed: + id: d95cd1565e33 last_write_checksum: sha1:8efc425ffe830805ffcc0f3055871bdcdc542c60 pristine_git_object: 3e38f1a929f7d6b1d6de74604aa87e3d8f010544 - src/mistralai/sdk.py: - id: b2a76476b492 - last_write_checksum: sha1:f0ce70fdd61fc69a6afb59a46b42719c14e429d8 - pristine_git_object: c83b53e0445788e27d0e451030807f1c6b86560b - src/mistralai/sdkconfiguration.py: - id: e6e7f1fb8b52 - last_write_checksum: sha1:63a0ae64777a9d39debeb6ef36ac6d71dadc6d80 - pristine_git_object: 7e77925ddc9aa01c4e5f8ba2ca52aa7f32e89859 - src/mistralai/transcriptions.py: - id: ba6b040274f2 - last_write_checksum: sha1:0cd336f14cccb581ff955feaf8bc6f7df185f27b - pristine_git_object: 90f2e58a3677e922cb5c8aac4b30d5e697ef2f05 - src/mistralai/types/__init__.py: - id: b89b8375c971 + src/mistralai/client/sdk.py: + id: 48edbcb38d7e + last_write_checksum: sha1:831d2d1fee16c8d970c946f80ec56ba965e4f0ca + pristine_git_object: 9957940005a1150762e9fc284993cefeb2e8831a + src/mistralai/client/sdkconfiguration.py: + id: b7dd68a0235e + last_write_checksum: sha1:a24763668db44bf36ca35d1efa4873e2495dd716 + pristine_git_object: df50d16fa502e8b4c2a4567f3541fd48bfc1e324 + src/mistralai/client/transcriptions.py: + id: 75b45780c978 + last_write_checksum: sha1:5c305412b646fa70232fd141e93378b3b4d4b3c4 + pristine_git_object: 455010243710d56d033861b1440cc1e30924d40c + src/mistralai/client/types/__init__.py: + id: 000b943f821c last_write_checksum: sha1:140ebdd01a46f92ffc710c52c958c4eba3cf68ed pristine_git_object: fc76fe0c5505e29859b5d2bb707d48fd27661b8c - src/mistralai/types/basemodel.py: - id: 18149749a011 + src/mistralai/client/types/basemodel.py: + id: 7ec465a1d3ff last_write_checksum: sha1:10d84aedeb9d35edfdadf2c3020caa1d24d8b584 pristine_git_object: a9a640a1a7048736383f96c67c6290c86bf536ee - src/mistralai/utils/__init__.py: - id: 6f6ad3db2456 + src/mistralai/client/utils/__init__.py: + id: b69505f4b269 last_write_checksum: sha1:c7c1ee47be7ac3774b042c8aee439143493ed3ce pristine_git_object: f9c2edce8ecf2d2a4ab0ad36129ac70afd3d1f2f - src/mistralai/utils/annotations.py: - id: 76966ef1943a + src/mistralai/client/utils/annotations.py: + id: 1ffdedfc66a2 last_write_checksum: sha1:a4824ad65f730303e4e1e3ec1febf87b4eb46dbc pristine_git_object: 12e0aa4f1151bb52474cc02e88397329b90703f6 - src/mistralai/utils/datetimes.py: - id: a0aa72e39d40 + src/mistralai/client/utils/datetimes.py: + id: c40066d868c9 last_write_checksum: sha1:c721e4123000e7dc61ec52b28a739439d9e17341 pristine_git_object: a6c52cd61bbe2d459046c940ce5e8c469f2f0664 - src/mistralai/utils/enums.py: - id: 400af6d98484 + src/mistralai/client/utils/enums.py: + id: a0735873b5ac last_write_checksum: sha1:bc8c3c1285ae09ba8a094ee5c3d9c7f41fa1284d pristine_git_object: 3324e1bc2668c54c4d5f5a1a845675319757a828 - src/mistralai/utils/eventstreaming.py: - id: 7b58f8ceb28e + src/mistralai/client/utils/eventstreaming.py: + id: 3263d7502030 last_write_checksum: sha1:bababae5d54b7efc360db701daa49e18a92c2f3b pristine_git_object: 0969899bfc491e5e408d05643525f347ea95e4fc - src/mistralai/utils/forms.py: - id: a584268d234f + src/mistralai/client/utils/forms.py: + id: 58842e905fce last_write_checksum: sha1:15fa7e9ab1611e062a9984cf06cb20969713d295 pristine_git_object: f961e76beaf0a8b1fe0dda44754a74eebd3608e7 - src/mistralai/utils/headers.py: - id: 3b4141506f5a + src/mistralai/client/utils/headers.py: + id: 9066de2ead8b last_write_checksum: sha1:7c6df233ee006332b566a8afa9ce9a245941d935 pristine_git_object: 37864cbbbc40d1a47112bbfdd3ba79568fc8818a - src/mistralai/utils/logger.py: - id: e35e15a1b67e - last_write_checksum: sha1:23efbe8d8d3b9412877f3cd35b37477d0e460a2f - pristine_git_object: cc08930715f6f03a559a2f30c3a9482071a3e1e2 - src/mistralai/utils/metadata.py: - id: 617f23c58d0d + src/mistralai/client/utils/logger.py: + id: 745023607a1f + last_write_checksum: sha1:3212454c3047548e8f9099366dc0e7c37e5918ac + pristine_git_object: 2ef27ee5bb8cd37d9aa66b076c449fd9c80e2627 + src/mistralai/client/utils/metadata.py: + id: d49d535ae52c last_write_checksum: sha1:c6a560bd0c63ab158582f34dadb69433ea73b3d4 pristine_git_object: 173b3e5ce658675c2f504222a56b3daaaa68107d - src/mistralai/utils/queryparams.py: - id: 6d86b06d25db + src/mistralai/client/utils/queryparams.py: + id: bb77d4664844 last_write_checksum: sha1:b94c3f314fd3da0d1d215afc2731f48748e2aa59 pristine_git_object: c04e0db82b68eca041f2cb2614d748fbac80fd41 - src/mistralai/utils/requestbodies.py: - id: 09529564c402 + src/mistralai/client/utils/requestbodies.py: + id: 946cfcd26ee4 last_write_checksum: sha1:41e2d2d2d3ecc394c8122ca4d4b85e1c3e03f054 pristine_git_object: 1de32b6d26f46590232f398fdba6ce0072f1659c - src/mistralai/utils/retries.py: - id: 3c8dad479e7d + src/mistralai/client/utils/retries.py: + id: 5f1a5b90423c last_write_checksum: sha1:5b97ac4f59357d70c2529975d50364c88bcad607 pristine_git_object: 88a91b10cd2076b4a2c6cff2ac6bfaa5e3c5ad13 - src/mistralai/utils/security.py: - id: e8a6622acc38 + src/mistralai/client/utils/security.py: + id: 1acb7c006265 last_write_checksum: sha1:d86d2fd73cbb4a77f72395c10fff1c700efcf42e pristine_git_object: 3b8526bfdf5c9c871ed184a2ec785f7bc1ebe57e - src/mistralai/utils/serializers.py: - id: e3688f9815db + src/mistralai/client/utils/serializers.py: + id: 53c57c7f29a8 last_write_checksum: sha1:ce1d8d7f500a9ccba0aeca5057cee9c271f4dfd7 pristine_git_object: 14321eb479de81d0d9580ec8291e0ff91bf29e57 - src/mistralai/utils/unmarshal_json_response.py: - id: 3bc4add4e1b6 - last_write_checksum: sha1:0b7b57b8a97ff6bfbb4dea22d59b8aade9a487f2 - pristine_git_object: 64d0b3a6c59921ac0a5fb05d52ba47d0b696ae0e - src/mistralai/utils/url.py: - id: 8aa618817e83 + src/mistralai/client/utils/unmarshal_json_response.py: + id: b13585fc5626 + last_write_checksum: sha1:4df16054b0c28b043d248dd8f56992574156bcd0 + pristine_git_object: 6d43d6e44056d64e272f60a466c47391a60c792d + src/mistralai/client/utils/url.py: + id: 3c6496c17510 last_write_checksum: sha1:6479961baa90432ca25626f8e40a7bbc32e73b41 pristine_git_object: c78ccbae426ce6d385709d97ce0b1c2813ea2418 - src/mistralai/utils/values.py: - id: 3b1394457cf4 + src/mistralai/client/utils/values.py: + id: bb6ade7a7f82 last_write_checksum: sha1:acaa178a7c41ddd000f58cc691e4632d925b2553 pristine_git_object: dae01a44384ac3bc13ae07453a053bf6c898ebe3 examples: diff --git a/.speakeasy/workflow.lock b/.speakeasy/workflow.lock index bb904c64..38b7899c 100644 --- a/.speakeasy/workflow.lock +++ b/.speakeasy/workflow.lock @@ -39,7 +39,7 @@ targets: sourceRevisionDigest: sha256:4e49849eba5334a3fe4a3d081baa9afdecd8f41dfc4c2a5115bc19ead4d92d13 sourceBlobDigest: sha256:3ab3c61ac6a4e9fab37d924d516838ca27dd7e57a1b5e9059d4db2ef29efec56 codeSamplesNamespace: mistral-openapi-code-samples - codeSamplesRevisionDigest: sha256:8fa56ecd9dd6e5f831fb96c4cfd00c65f617a03ff67f876d75ecdf28cb5bbf3c + codeSamplesRevisionDigest: sha256:deaa27e908bb7bee4f2ad753a92beb5749805f3f160eb56c5988b336d31a531c workflow: workflowVersion: 1.0.0 speakeasyVersion: 1.685.0 diff --git a/README.md b/README.md index 131ce557..e71b1a19 100644 --- a/README.md +++ b/README.md @@ -103,7 +103,7 @@ It's also possible to write a standalone Python script without needing to set up # ] # /// -from mistralai import Mistral +from mistralai.client import Mistral sdk = Mistral( # SDK arguments @@ -136,7 +136,7 @@ This example shows how to create chat completions. ```python # Synchronous Example -from mistralai import Mistral +from mistralai.client import Mistral import os @@ -164,7 +164,7 @@ The same SDK client can also be used to make asynchronous requests by importing ```python # Asynchronous Example import asyncio -from mistralai import Mistral +from mistralai.client import Mistral import os async def main(): @@ -194,7 +194,7 @@ This example shows how to upload a file. ```python # Synchronous Example -from mistralai import Mistral +from mistralai.client import Mistral import os @@ -218,7 +218,7 @@ The same SDK client can also be used to make asynchronous requests by importing ```python # Asynchronous Example import asyncio -from mistralai import Mistral +from mistralai.client import Mistral import os async def main(): @@ -244,7 +244,7 @@ This example shows how to create agents completions. ```python # Synchronous Example -from mistralai import Mistral +from mistralai.client import Mistral import os @@ -272,7 +272,7 @@ The same SDK client can also be used to make asynchronous requests by importing ```python # Asynchronous Example import asyncio -from mistralai import Mistral +from mistralai.client import Mistral import os async def main(): @@ -302,7 +302,7 @@ This example shows how to create embedding request. ```python # Synchronous Example -from mistralai import Mistral +from mistralai.client import Mistral import os @@ -326,7 +326,7 @@ The same SDK client can also be used to make asynchronous requests by importing ```python # Asynchronous Example import asyncio -from mistralai import Mistral +from mistralai.client import Mistral import os async def main(): @@ -586,7 +586,7 @@ The stream is also a [Context Manager][context-manager] and can be used with the underlying connection when the context is exited. ```python -from mistralai import Mistral +from mistralai.client import Mistral import os @@ -630,7 +630,7 @@ Certain SDK methods accept file objects as part of a request body or multi-part > ```python -from mistralai import Mistral +from mistralai.client import Mistral import os @@ -656,8 +656,8 @@ Some of the endpoints in this SDK support retries. If you use the SDK without an To change the default retry strategy for a single API call, simply provide a `RetryConfig` object to the call: ```python -from mistralai import Mistral -from mistralai.utils import BackoffStrategy, RetryConfig +from mistralai.client import Mistral +from mistralai.client.utils import BackoffStrategy, RetryConfig import os @@ -675,8 +675,8 @@ with Mistral( If you'd like to override the default retry strategy for all operations that support retries, you can use the `retry_config` optional parameter when initializing the SDK: ```python -from mistralai import Mistral -from mistralai.utils import BackoffStrategy, RetryConfig +from mistralai.client import Mistral +from mistralai.client.utils import BackoffStrategy, RetryConfig import os @@ -696,7 +696,7 @@ with Mistral( ## Error Handling -[`MistralError`](./src/mistralai/models/mistralerror.py) is the base class for all HTTP error responses. It has the following properties: +[`MistralError`](./src/mistralai/client/models/mistralerror.py) is the base class for all HTTP error responses. It has the following properties: | Property | Type | Description | | ------------------ | ---------------- | --------------------------------------------------------------------------------------- | @@ -709,8 +709,8 @@ with Mistral( ### Example ```python -import mistralai -from mistralai import Mistral, models +import mistralai.client +from mistralai.client import Mistral, models import os @@ -736,12 +736,12 @@ with Mistral( # Depending on the method different errors may be thrown if isinstance(e, models.HTTPValidationError): - print(e.data.detail) # Optional[List[mistralai.ValidationError]] + print(e.data.detail) # Optional[List[mistralai.client.ValidationError]] ``` ### Error Classes **Primary error:** -* [`MistralError`](./src/mistralai/models/mistralerror.py): The base class for HTTP error responses. +* [`MistralError`](./src/mistralai/client/models/mistralerror.py): The base class for HTTP error responses.
Less common errors (6) @@ -753,9 +753,9 @@ with Mistral( * [`httpx.TimeoutException`](https://www.python-httpx.org/exceptions/#httpx.TimeoutException): HTTP request timed out. -**Inherit from [`MistralError`](./src/mistralai/models/mistralerror.py)**: -* [`HTTPValidationError`](./src/mistralai/models/httpvalidationerror.py): Validation Error. Status code `422`. Applicable to 52 of 74 methods.* -* [`ResponseValidationError`](./src/mistralai/models/responsevalidationerror.py): Type mismatch between the response data and the expected Pydantic model. Provides access to the Pydantic validation error via the `cause` attribute. +**Inherit from [`MistralError`](./src/mistralai/client/models/mistralerror.py)**: +* [`HTTPValidationError`](./src/mistralai/client/models/httpvalidationerror.py): Validation Error. Status code `422`. Applicable to 52 of 74 methods.* +* [`ResponseValidationError`](./src/mistralai/client/models/responsevalidationerror.py): Type mismatch between the response data and the expected Pydantic model. Provides access to the Pydantic validation error via the `cause` attribute.
@@ -776,7 +776,7 @@ You can override the default server globally by passing a server name to the `se #### Example ```python -from mistralai import Mistral +from mistralai.client import Mistral import os @@ -796,7 +796,7 @@ with Mistral( The default server can also be overridden globally by passing a URL to the `server_url: str` optional parameter when initializing the SDK client instance. For example: ```python -from mistralai import Mistral +from mistralai.client import Mistral import os @@ -822,7 +822,7 @@ This allows you to wrap the client with your own custom logic, such as adding cu For example, you could specify a header for every request that this sdk makes as follows: ```python -from mistralai import Mistral +from mistralai.client import Mistral import httpx http_client = httpx.Client(headers={"x-custom-header": "someValue"}) @@ -831,8 +831,8 @@ s = Mistral(client=http_client) or you could wrap the client with your own custom logic: ```python -from mistralai import Mistral -from mistralai.httpclient import AsyncHttpClient +from mistralai.client import Mistral +from mistralai.client.httpclient import AsyncHttpClient import httpx class CustomClient(AsyncHttpClient): @@ -907,7 +907,7 @@ This SDK supports the following security scheme globally: To authenticate with the API the `api_key` parameter must be set when initializing the SDK client instance. For example: ```python -from mistralai import Mistral +from mistralai.client import Mistral import os @@ -931,7 +931,7 @@ The `Mistral` class implements the context manager protocol and registers a fina [context-manager]: https://docs.python.org/3/reference/datamodel.html#context-managers ```python -from mistralai import Mistral +from mistralai.client import Mistral import os def main(): @@ -958,11 +958,11 @@ You can setup your SDK to emit debug logs for SDK requests and responses. You can pass your own logger class directly into your SDK. ```python -from mistralai import Mistral +from mistralai.client import Mistral import logging logging.basicConfig(level=logging.DEBUG) -s = Mistral(debug_logger=logging.getLogger("mistralai")) +s = Mistral(debug_logger=logging.getLogger("mistralai.client")) ``` You can also enable a default debug logger by setting an environment variable `MISTRAL_DEBUG` to true. diff --git a/USAGE.md b/USAGE.md index a31d502f..18103864 100644 --- a/USAGE.md +++ b/USAGE.md @@ -5,7 +5,7 @@ This example shows how to create chat completions. ```python # Synchronous Example -from mistralai import Mistral +from mistralai.client import Mistral import os @@ -33,7 +33,7 @@ The same SDK client can also be used to make asynchronous requests by importing ```python # Asynchronous Example import asyncio -from mistralai import Mistral +from mistralai.client import Mistral import os async def main(): @@ -63,7 +63,7 @@ This example shows how to upload a file. ```python # Synchronous Example -from mistralai import Mistral +from mistralai.client import Mistral import os @@ -87,7 +87,7 @@ The same SDK client can also be used to make asynchronous requests by importing ```python # Asynchronous Example import asyncio -from mistralai import Mistral +from mistralai.client import Mistral import os async def main(): @@ -113,7 +113,7 @@ This example shows how to create agents completions. ```python # Synchronous Example -from mistralai import Mistral +from mistralai.client import Mistral import os @@ -141,7 +141,7 @@ The same SDK client can also be used to make asynchronous requests by importing ```python # Asynchronous Example import asyncio -from mistralai import Mistral +from mistralai.client import Mistral import os async def main(): @@ -171,7 +171,7 @@ This example shows how to create embedding request. ```python # Synchronous Example -from mistralai import Mistral +from mistralai.client import Mistral import os @@ -195,7 +195,7 @@ The same SDK client can also be used to make asynchronous requests by importing ```python # Asynchronous Example import asyncio -from mistralai import Mistral +from mistralai.client import Mistral import os async def main(): diff --git a/docs/sdks/accesses/README.md b/docs/sdks/accesses/README.md index 040bc24c..64a1e749 100644 --- a/docs/sdks/accesses/README.md +++ b/docs/sdks/accesses/README.md @@ -18,7 +18,7 @@ Given a library, list all of the Entity that have access and to what level. ```python -from mistralai import Mistral +from mistralai.client import Mistral import os @@ -59,7 +59,7 @@ Given a library id, you can create or update the access level of an entity. You ```python -from mistralai import Mistral +from mistralai.client import Mistral import os @@ -104,7 +104,7 @@ Given a library id, you can delete the access level of an entity. An owner canno ```python -from mistralai import Mistral +from mistralai.client import Mistral import os diff --git a/docs/sdks/agents/README.md b/docs/sdks/agents/README.md index 173925ee..75efc492 100644 --- a/docs/sdks/agents/README.md +++ b/docs/sdks/agents/README.md @@ -17,7 +17,7 @@ Agents Completion ```python -from mistralai import Mistral +from mistralai.client import Mistral import os @@ -80,7 +80,7 @@ Mistral AI provides the ability to stream responses back to a client in order to ```python -from mistralai import Mistral +from mistralai.client import Mistral import os diff --git a/docs/sdks/chat/README.md b/docs/sdks/chat/README.md index 5bb24baa..89c4fffb 100644 --- a/docs/sdks/chat/README.md +++ b/docs/sdks/chat/README.md @@ -17,7 +17,7 @@ Chat Completion ```python -from mistralai import Mistral +from mistralai.client import Mistral import os @@ -83,7 +83,7 @@ Mistral AI provides the ability to stream responses back to a client in order to ```python -from mistralai import Mistral +from mistralai.client import Mistral import os diff --git a/docs/sdks/classifiers/README.md b/docs/sdks/classifiers/README.md index e76efb79..634ee419 100644 --- a/docs/sdks/classifiers/README.md +++ b/docs/sdks/classifiers/README.md @@ -19,7 +19,7 @@ Moderations ```python -from mistralai import Mistral +from mistralai.client import Mistral import os @@ -65,7 +65,7 @@ Chat Moderations ```python -from mistralai import Mistral +from mistralai.client import Mistral import os @@ -112,7 +112,7 @@ Classifications ```python -from mistralai import Mistral +from mistralai.client import Mistral import os @@ -157,7 +157,7 @@ Chat Classifications ```python -from mistralai import Mistral +from mistralai.client import Mistral import os diff --git a/docs/sdks/conversations/README.md b/docs/sdks/conversations/README.md index ca383176..acd43cdb 100644 --- a/docs/sdks/conversations/README.md +++ b/docs/sdks/conversations/README.md @@ -26,7 +26,7 @@ Create a new conversation, using a base model or an agent and append entries. Co ```python -from mistralai import Mistral +from mistralai.client import Mistral import os @@ -83,7 +83,7 @@ Retrieve a list of conversation entities sorted by creation time. ```python -from mistralai import Mistral +from mistralai.client import Mistral import os @@ -126,7 +126,7 @@ Given a conversation_id retrieve a conversation entity with its attributes. ```python -from mistralai import Mistral +from mistralai.client import Mistral import os @@ -167,7 +167,7 @@ Delete a conversation given a conversation_id. ```python -from mistralai import Mistral +from mistralai.client import Mistral import os @@ -203,7 +203,7 @@ Run completion on the history of the conversation and the user entries. Return t ```python -from mistralai import Mistral +from mistralai.client import Mistral import os @@ -253,7 +253,7 @@ Given a conversation_id retrieve all the entries belonging to that conversation. ```python -from mistralai import Mistral +from mistralai.client import Mistral import os @@ -294,7 +294,7 @@ Given a conversation_id retrieve all the messages belonging to that conversation ```python -from mistralai import Mistral +from mistralai.client import Mistral import os @@ -335,7 +335,7 @@ Given a conversation_id and an id, recreate a conversation from this point and r ```python -from mistralai import Mistral +from mistralai.client import Mistral import os @@ -388,7 +388,7 @@ Create a new conversation, using a base model or an agent and append entries. Co ```python -from mistralai import Mistral +from mistralai.client import Mistral import os @@ -454,7 +454,7 @@ Run completion on the history of the conversation and the user entries. Return t ```python -from mistralai import Mistral +from mistralai.client import Mistral import os @@ -506,7 +506,7 @@ Given a conversation_id and an id, recreate a conversation from this point and r ```python -from mistralai import Mistral +from mistralai.client import Mistral import os diff --git a/docs/sdks/documents/README.md b/docs/sdks/documents/README.md index d3f5a975..d90e7ee7 100644 --- a/docs/sdks/documents/README.md +++ b/docs/sdks/documents/README.md @@ -25,7 +25,7 @@ Given a library, lists the document that have been uploaded to that library. ```python -from mistralai import Mistral +from mistralai.client import Mistral import os @@ -72,7 +72,7 @@ Given a library, upload a new document to that library. It is queued for process ```python -from mistralai import Mistral +from mistralai.client import Mistral import os @@ -117,7 +117,7 @@ Given a library and a document in this library, you can retrieve the metadata of ```python -from mistralai import Mistral +from mistralai.client import Mistral import os @@ -159,7 +159,7 @@ Given a library and a document in that library, update the name of that document ```python -from mistralai import Mistral +from mistralai.client import Mistral import os @@ -203,7 +203,7 @@ Given a library and a document in that library, delete that document. The docume ```python -from mistralai import Mistral +from mistralai.client import Mistral import os @@ -240,7 +240,7 @@ Given a library and a document in that library, you can retrieve the text conten ```python -from mistralai import Mistral +from mistralai.client import Mistral import os @@ -282,7 +282,7 @@ Given a library and a document in that library, retrieve the processing status o ```python -from mistralai import Mistral +from mistralai.client import Mistral import os @@ -324,7 +324,7 @@ Given a library and a document in that library, retrieve the signed URL of a spe ```python -from mistralai import Mistral +from mistralai.client import Mistral import os @@ -366,7 +366,7 @@ Given a library and a document in that library, retrieve the signed URL of text ```python -from mistralai import Mistral +from mistralai.client import Mistral import os @@ -408,7 +408,7 @@ Given a library and a document in that library, reprocess that document, it will ```python -from mistralai import Mistral +from mistralai.client import Mistral import os diff --git a/docs/sdks/embeddings/README.md b/docs/sdks/embeddings/README.md index 4390b7bd..0be7ea6d 100644 --- a/docs/sdks/embeddings/README.md +++ b/docs/sdks/embeddings/README.md @@ -16,7 +16,7 @@ Embeddings ```python -from mistralai import Mistral +from mistralai.client import Mistral import os diff --git a/docs/sdks/files/README.md b/docs/sdks/files/README.md index 57b53fc7..44c39f8a 100644 --- a/docs/sdks/files/README.md +++ b/docs/sdks/files/README.md @@ -25,7 +25,7 @@ Please contact us if you need to increase these storage limits. ```python -from mistralai import Mistral +from mistralai.client import Mistral import os @@ -69,7 +69,7 @@ Returns a list of files that belong to the user's organization. ```python -from mistralai import Mistral +from mistralai.client import Mistral import os @@ -116,7 +116,7 @@ Returns information about a specific file. ```python -from mistralai import Mistral +from mistralai.client import Mistral import os @@ -156,7 +156,7 @@ Delete a file. ```python -from mistralai import Mistral +from mistralai.client import Mistral import os @@ -196,7 +196,7 @@ Download a file ```python -from mistralai import Mistral +from mistralai.client import Mistral import os @@ -236,7 +236,7 @@ Get Signed Url ```python -from mistralai import Mistral +from mistralai.client import Mistral import os diff --git a/docs/sdks/fim/README.md b/docs/sdks/fim/README.md index db6f2e1b..3c8c59c7 100644 --- a/docs/sdks/fim/README.md +++ b/docs/sdks/fim/README.md @@ -17,7 +17,7 @@ FIM completion. ```python -from mistralai import Mistral +from mistralai.client import Mistral import os @@ -68,7 +68,7 @@ Mistral AI provides the ability to stream responses back to a client in order to ```python -from mistralai import Mistral +from mistralai.client import Mistral import os diff --git a/docs/sdks/jobs/README.md b/docs/sdks/jobs/README.md index 666224a7..9c44be75 100644 --- a/docs/sdks/jobs/README.md +++ b/docs/sdks/jobs/README.md @@ -18,7 +18,7 @@ Get a list of fine-tuning jobs for your organization and user. ```python -from mistralai import Mistral +from mistralai.client import Mistral import os @@ -67,7 +67,7 @@ Create a new fine-tuning job, it will be queued for processing. ```python -from mistralai import Mistral +from mistralai.client import Mistral import os @@ -119,7 +119,7 @@ Get a fine-tuned job details by its UUID. ```python -from mistralai import Mistral +from mistralai.client import Mistral import os @@ -159,7 +159,7 @@ Request the cancellation of a fine tuning job. ```python -from mistralai import Mistral +from mistralai.client import Mistral import os @@ -199,7 +199,7 @@ Request the start of a validated fine tuning job. ```python -from mistralai import Mistral +from mistralai.client import Mistral import os diff --git a/docs/sdks/libraries/README.md b/docs/sdks/libraries/README.md index e672c190..bbdacf05 100644 --- a/docs/sdks/libraries/README.md +++ b/docs/sdks/libraries/README.md @@ -20,7 +20,7 @@ List all libraries that you have created or have been shared with you. ```python -from mistralai import Mistral +from mistralai.client import Mistral import os @@ -59,7 +59,7 @@ Create a new Library, you will be marked as the owner and only you will have the ```python -from mistralai import Mistral +from mistralai.client import Mistral import os @@ -102,7 +102,7 @@ Given a library id, details information about that Library. ```python -from mistralai import Mistral +from mistralai.client import Mistral import os @@ -143,7 +143,7 @@ Given a library id, deletes it together with all documents that have been upload ```python -from mistralai import Mistral +from mistralai.client import Mistral import os @@ -184,7 +184,7 @@ Given a library id, you can update the name and description. ```python -from mistralai import Mistral +from mistralai.client import Mistral import os diff --git a/docs/sdks/mistralagents/README.md b/docs/sdks/mistralagents/README.md index bdd8d588..fe0f6e35 100644 --- a/docs/sdks/mistralagents/README.md +++ b/docs/sdks/mistralagents/README.md @@ -25,7 +25,7 @@ Create a new agent giving it instructions, tools, description. The agent is then ```python -from mistralai import Mistral +from mistralai.client import Mistral import os @@ -77,7 +77,7 @@ Retrieve a list of agent entities sorted by creation time. ```python -from mistralai import Mistral +from mistralai.client import Mistral import os @@ -124,7 +124,7 @@ Given an agent, retrieve an agent entity with its attributes. The agent_version ```python -from mistralai import Mistral +from mistralai.client import Mistral import os @@ -166,7 +166,7 @@ Update an agent attributes and create a new version. ```python -from mistralai import Mistral +from mistralai.client import Mistral import os @@ -220,7 +220,7 @@ Delete an agent entity. ```python -from mistralai import Mistral +from mistralai.client import Mistral import os @@ -256,7 +256,7 @@ Switch the version of an agent. ```python -from mistralai import Mistral +from mistralai.client import Mistral import os @@ -298,7 +298,7 @@ Retrieve all versions for a specific agent with full agent context. Supports pag ```python -from mistralai import Mistral +from mistralai.client import Mistral import os @@ -341,7 +341,7 @@ Get a specific agent version by version number. ```python -from mistralai import Mistral +from mistralai.client import Mistral import os @@ -383,7 +383,7 @@ Create a new alias or update an existing alias to point to a specific version. A ```python -from mistralai import Mistral +from mistralai.client import Mistral import os @@ -426,7 +426,7 @@ Retrieve all version aliases for a specific agent. ```python -from mistralai import Mistral +from mistralai.client import Mistral import os diff --git a/docs/sdks/mistraljobs/README.md b/docs/sdks/mistraljobs/README.md index f1aa3f61..8f2358de 100644 --- a/docs/sdks/mistraljobs/README.md +++ b/docs/sdks/mistraljobs/README.md @@ -17,7 +17,7 @@ Get a list of batch jobs for your organization and user. ```python -from mistralai import Mistral +from mistralai.client import Mistral import os @@ -64,7 +64,7 @@ Create a new batch job, it will be queued for processing. ```python -from mistralai import Mistral +from mistralai.client import Mistral import os @@ -113,7 +113,7 @@ Args: ```python -from mistralai import Mistral +from mistralai.client import Mistral import os @@ -154,7 +154,7 @@ Request the cancellation of a batch job. ```python -from mistralai import Mistral +from mistralai.client import Mistral import os diff --git a/docs/sdks/models/README.md b/docs/sdks/models/README.md index d51866b6..6fa28ca2 100644 --- a/docs/sdks/models/README.md +++ b/docs/sdks/models/README.md @@ -21,7 +21,7 @@ List all models available to the user. ```python -from mistralai import Mistral +from mistralai.client import Mistral import os @@ -60,7 +60,7 @@ Retrieve information about a model. ```python -from mistralai import Mistral +from mistralai.client import Mistral import os @@ -101,7 +101,7 @@ Delete a fine-tuned model. ```python -from mistralai import Mistral +from mistralai.client import Mistral import os @@ -142,7 +142,7 @@ Update a model name or description. ```python -from mistralai import Mistral +from mistralai.client import Mistral import os @@ -184,7 +184,7 @@ Archive a fine-tuned model. ```python -from mistralai import Mistral +from mistralai.client import Mistral import os @@ -224,7 +224,7 @@ Un-archive a fine-tuned model. ```python -from mistralai import Mistral +from mistralai.client import Mistral import os diff --git a/docs/sdks/ocr/README.md b/docs/sdks/ocr/README.md index efcb9931..9fd9d6fc 100644 --- a/docs/sdks/ocr/README.md +++ b/docs/sdks/ocr/README.md @@ -16,7 +16,7 @@ OCR ```python -from mistralai import Mistral +from mistralai.client import Mistral import os diff --git a/docs/sdks/transcriptions/README.md b/docs/sdks/transcriptions/README.md index dabab00e..9691b81d 100644 --- a/docs/sdks/transcriptions/README.md +++ b/docs/sdks/transcriptions/README.md @@ -17,7 +17,7 @@ Create Transcription ```python -from mistralai import Mistral +from mistralai.client import Mistral import os @@ -65,7 +65,7 @@ Create Streaming Transcription (SSE) ```python -from mistralai import Mistral +from mistralai.client import Mistral import os diff --git a/src/mistralai/client/__init__.py b/src/mistralai/client/__init__.py new file mode 100644 index 00000000..dd02e42e --- /dev/null +++ b/src/mistralai/client/__init__.py @@ -0,0 +1,18 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from ._version import ( + __title__, + __version__, + __openapi_doc_version__, + __gen_version__, + __user_agent__, +) +from .sdk import * +from .sdkconfiguration import * +from .models import * + + +VERSION: str = __version__ +OPENAPI_DOC_VERSION = __openapi_doc_version__ +SPEAKEASY_GENERATOR_VERSION = __gen_version__ +USER_AGENT = __user_agent__ diff --git a/src/mistralai/client/_hooks/__init__.py b/src/mistralai/client/_hooks/__init__.py new file mode 100644 index 00000000..2ee66cdd --- /dev/null +++ b/src/mistralai/client/_hooks/__init__.py @@ -0,0 +1,5 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from .sdkhooks import * +from .types import * +from .registration import * diff --git a/src/mistralai/client/_hooks/registration.py b/src/mistralai/client/_hooks/registration.py new file mode 100644 index 00000000..cab47787 --- /dev/null +++ b/src/mistralai/client/_hooks/registration.py @@ -0,0 +1,13 @@ +from .types import Hooks + + +# This file is only ever generated once on the first generation and then is free to be modified. +# Any hooks you wish to add should be registered in the init_hooks function. Feel free to define them +# in this file or in separate files in the hooks folder. + + +def init_hooks(hooks: Hooks): + # pylint: disable=unused-argument + """Add hooks by calling hooks.register{sdk_init/before_request/after_success/after_error}Hook + with an instance of a hook that implements that specific Hook interface + Hooks are registered per SDK instance, and are valid for the lifetime of the SDK instance""" diff --git a/src/mistralai/client/_hooks/sdkhooks.py b/src/mistralai/client/_hooks/sdkhooks.py new file mode 100644 index 00000000..c9318db4 --- /dev/null +++ b/src/mistralai/client/_hooks/sdkhooks.py @@ -0,0 +1,76 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +import httpx +from .types import ( + SDKInitHook, + BeforeRequestContext, + BeforeRequestHook, + AfterSuccessContext, + AfterSuccessHook, + AfterErrorContext, + AfterErrorHook, + Hooks, +) +from .registration import init_hooks +from typing import List, Optional, Tuple +from mistralai.client.httpclient import HttpClient + + +class SDKHooks(Hooks): + def __init__(self) -> None: + self.sdk_init_hooks: List[SDKInitHook] = [] + self.before_request_hooks: List[BeforeRequestHook] = [] + self.after_success_hooks: List[AfterSuccessHook] = [] + self.after_error_hooks: List[AfterErrorHook] = [] + init_hooks(self) + + def register_sdk_init_hook(self, hook: SDKInitHook) -> None: + self.sdk_init_hooks.append(hook) + + def register_before_request_hook(self, hook: BeforeRequestHook) -> None: + self.before_request_hooks.append(hook) + + def register_after_success_hook(self, hook: AfterSuccessHook) -> None: + self.after_success_hooks.append(hook) + + def register_after_error_hook(self, hook: AfterErrorHook) -> None: + self.after_error_hooks.append(hook) + + def sdk_init(self, base_url: str, client: HttpClient) -> Tuple[str, HttpClient]: + for hook in self.sdk_init_hooks: + base_url, client = hook.sdk_init(base_url, client) + return base_url, client + + def before_request( + self, hook_ctx: BeforeRequestContext, request: httpx.Request + ) -> httpx.Request: + for hook in self.before_request_hooks: + out = hook.before_request(hook_ctx, request) + if isinstance(out, Exception): + raise out + request = out + + return request + + def after_success( + self, hook_ctx: AfterSuccessContext, response: httpx.Response + ) -> httpx.Response: + for hook in self.after_success_hooks: + out = hook.after_success(hook_ctx, response) + if isinstance(out, Exception): + raise out + response = out + return response + + def after_error( + self, + hook_ctx: AfterErrorContext, + response: Optional[httpx.Response], + error: Optional[Exception], + ) -> Tuple[Optional[httpx.Response], Optional[Exception]]: + for hook in self.after_error_hooks: + result = hook.after_error(hook_ctx, response, error) + if isinstance(result, Exception): + raise result + response, error = result + return response, error diff --git a/src/mistralai/client/_hooks/types.py b/src/mistralai/client/_hooks/types.py new file mode 100644 index 00000000..e7e1bb7f --- /dev/null +++ b/src/mistralai/client/_hooks/types.py @@ -0,0 +1,113 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from abc import ABC, abstractmethod +import httpx +from mistralai.client.httpclient import HttpClient +from mistralai.client.sdkconfiguration import SDKConfiguration +from typing import Any, Callable, List, Optional, Tuple, Union + + +class HookContext: + config: SDKConfiguration + base_url: str + operation_id: str + oauth2_scopes: Optional[List[str]] = None + security_source: Optional[Union[Any, Callable[[], Any]]] = None + + def __init__( + self, + config: SDKConfiguration, + base_url: str, + operation_id: str, + oauth2_scopes: Optional[List[str]], + security_source: Optional[Union[Any, Callable[[], Any]]], + ): + self.config = config + self.base_url = base_url + self.operation_id = operation_id + self.oauth2_scopes = oauth2_scopes + self.security_source = security_source + + +class BeforeRequestContext(HookContext): + def __init__(self, hook_ctx: HookContext): + super().__init__( + hook_ctx.config, + hook_ctx.base_url, + hook_ctx.operation_id, + hook_ctx.oauth2_scopes, + hook_ctx.security_source, + ) + + +class AfterSuccessContext(HookContext): + def __init__(self, hook_ctx: HookContext): + super().__init__( + hook_ctx.config, + hook_ctx.base_url, + hook_ctx.operation_id, + hook_ctx.oauth2_scopes, + hook_ctx.security_source, + ) + + +class AfterErrorContext(HookContext): + def __init__(self, hook_ctx: HookContext): + super().__init__( + hook_ctx.config, + hook_ctx.base_url, + hook_ctx.operation_id, + hook_ctx.oauth2_scopes, + hook_ctx.security_source, + ) + + +class SDKInitHook(ABC): + @abstractmethod + def sdk_init(self, base_url: str, client: HttpClient) -> Tuple[str, HttpClient]: + pass + + +class BeforeRequestHook(ABC): + @abstractmethod + def before_request( + self, hook_ctx: BeforeRequestContext, request: httpx.Request + ) -> Union[httpx.Request, Exception]: + pass + + +class AfterSuccessHook(ABC): + @abstractmethod + def after_success( + self, hook_ctx: AfterSuccessContext, response: httpx.Response + ) -> Union[httpx.Response, Exception]: + pass + + +class AfterErrorHook(ABC): + @abstractmethod + def after_error( + self, + hook_ctx: AfterErrorContext, + response: Optional[httpx.Response], + error: Optional[Exception], + ) -> Union[Tuple[Optional[httpx.Response], Optional[Exception]], Exception]: + pass + + +class Hooks(ABC): + @abstractmethod + def register_sdk_init_hook(self, hook: SDKInitHook): + pass + + @abstractmethod + def register_before_request_hook(self, hook: BeforeRequestHook): + pass + + @abstractmethod + def register_after_success_hook(self, hook: AfterSuccessHook): + pass + + @abstractmethod + def register_after_error_hook(self, hook: AfterErrorHook): + pass diff --git a/src/mistralai/client/_version.py b/src/mistralai/client/_version.py new file mode 100644 index 00000000..8c5d6e54 --- /dev/null +++ b/src/mistralai/client/_version.py @@ -0,0 +1,15 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +import importlib.metadata + +__title__: str = "mistralai" +__version__: str = "2.0.0a1" +__openapi_doc_version__: str = "1.0.0" +__gen_version__: str = "2.794.1" +__user_agent__: str = "speakeasy-sdk/python 2.0.0a1 2.794.1 1.0.0 mistralai" + +try: + if __package__ is not None: + __version__ = importlib.metadata.version(__package__) +except importlib.metadata.PackageNotFoundError: + pass diff --git a/src/mistralai/client/accesses.py b/src/mistralai/client/accesses.py new file mode 100644 index 00000000..307c7156 --- /dev/null +++ b/src/mistralai/client/accesses.py @@ -0,0 +1,619 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from .basesdk import BaseSDK +from mistralai.client import models, utils +from mistralai.client._hooks import HookContext +from mistralai.client.models import ( + entitytype as models_entitytype, + shareenum as models_shareenum, +) +from mistralai.client.types import OptionalNullable, UNSET +from mistralai.client.utils import get_security_from_env +from mistralai.client.utils.unmarshal_json_response import unmarshal_json_response +from typing import Any, Mapping, Optional + + +class Accesses(BaseSDK): + r"""(beta) Libraries API - manage access to a library.""" + + def list( + self, + *, + library_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.ListSharingOut: + r"""List all of the access to this library. + + Given a library, list all of the Entity that have access and to what level. + + :param library_id: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.LibrariesShareListV1Request( + library_id=library_id, + ) + + req = self._build_request( + method="GET", + path="/v1/libraries/{library_id}/share", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="libraries_share_list_v1", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.ListSharingOut, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res + ) + raise models.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + async def list_async( + self, + *, + library_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.ListSharingOut: + r"""List all of the access to this library. + + Given a library, list all of the Entity that have access and to what level. + + :param library_id: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.LibrariesShareListV1Request( + library_id=library_id, + ) + + req = self._build_request_async( + method="GET", + path="/v1/libraries/{library_id}/share", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="libraries_share_list_v1", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.ListSharingOut, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res + ) + raise models.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + def update_or_create( + self, + *, + library_id: str, + level: models_shareenum.ShareEnum, + share_with_uuid: str, + share_with_type: models_entitytype.EntityType, + org_id: OptionalNullable[str] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.SharingOut: + r"""Create or update an access level. + + Given a library id, you can create or update the access level of an entity. You have to be owner of the library to share a library. An owner cannot change their own role. A library cannot be shared outside of the organization. + + :param library_id: + :param level: + :param share_with_uuid: The id of the entity (user, workspace or organization) to share with + :param share_with_type: The type of entity, used to share a library. + :param org_id: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.LibrariesShareCreateV1Request( + library_id=library_id, + sharing_in=models.SharingIn( + org_id=org_id, + level=level, + share_with_uuid=share_with_uuid, + share_with_type=share_with_type, + ), + ) + + req = self._build_request( + method="PUT", + path="/v1/libraries/{library_id}/share", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request.sharing_in, False, False, "json", models.SharingIn + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="libraries_share_create_v1", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.SharingOut, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res + ) + raise models.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + async def update_or_create_async( + self, + *, + library_id: str, + level: models_shareenum.ShareEnum, + share_with_uuid: str, + share_with_type: models_entitytype.EntityType, + org_id: OptionalNullable[str] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.SharingOut: + r"""Create or update an access level. + + Given a library id, you can create or update the access level of an entity. You have to be owner of the library to share a library. An owner cannot change their own role. A library cannot be shared outside of the organization. + + :param library_id: + :param level: + :param share_with_uuid: The id of the entity (user, workspace or organization) to share with + :param share_with_type: The type of entity, used to share a library. + :param org_id: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.LibrariesShareCreateV1Request( + library_id=library_id, + sharing_in=models.SharingIn( + org_id=org_id, + level=level, + share_with_uuid=share_with_uuid, + share_with_type=share_with_type, + ), + ) + + req = self._build_request_async( + method="PUT", + path="/v1/libraries/{library_id}/share", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request.sharing_in, False, False, "json", models.SharingIn + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="libraries_share_create_v1", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.SharingOut, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res + ) + raise models.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + def delete( + self, + *, + library_id: str, + share_with_uuid: str, + share_with_type: models_entitytype.EntityType, + org_id: OptionalNullable[str] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.SharingOut: + r"""Delete an access level. + + Given a library id, you can delete the access level of an entity. An owner cannot delete it's own access. You have to be the owner of the library to delete an acces other than yours. + + :param library_id: + :param share_with_uuid: The id of the entity (user, workspace or organization) to share with + :param share_with_type: The type of entity, used to share a library. + :param org_id: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.LibrariesShareDeleteV1Request( + library_id=library_id, + sharing_delete=models.SharingDelete( + org_id=org_id, + share_with_uuid=share_with_uuid, + share_with_type=share_with_type, + ), + ) + + req = self._build_request( + method="DELETE", + path="/v1/libraries/{library_id}/share", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request.sharing_delete, False, False, "json", models.SharingDelete + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="libraries_share_delete_v1", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.SharingOut, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res + ) + raise models.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + async def delete_async( + self, + *, + library_id: str, + share_with_uuid: str, + share_with_type: models_entitytype.EntityType, + org_id: OptionalNullable[str] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.SharingOut: + r"""Delete an access level. + + Given a library id, you can delete the access level of an entity. An owner cannot delete it's own access. You have to be the owner of the library to delete an acces other than yours. + + :param library_id: + :param share_with_uuid: The id of the entity (user, workspace or organization) to share with + :param share_with_type: The type of entity, used to share a library. + :param org_id: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.LibrariesShareDeleteV1Request( + library_id=library_id, + sharing_delete=models.SharingDelete( + org_id=org_id, + share_with_uuid=share_with_uuid, + share_with_type=share_with_type, + ), + ) + + req = self._build_request_async( + method="DELETE", + path="/v1/libraries/{library_id}/share", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request.sharing_delete, False, False, "json", models.SharingDelete + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="libraries_share_delete_v1", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.SharingOut, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res + ) + raise models.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) diff --git a/src/mistralai/client/agents.py b/src/mistralai/client/agents.py new file mode 100644 index 00000000..c04abd21 --- /dev/null +++ b/src/mistralai/client/agents.py @@ -0,0 +1,725 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from .basesdk import BaseSDK +from mistralai.client import models, utils +from mistralai.client._hooks import HookContext +from mistralai.client.models import ( + agentscompletionrequest as models_agentscompletionrequest, + agentscompletionstreamrequest as models_agentscompletionstreamrequest, + mistralpromptmode as models_mistralpromptmode, + prediction as models_prediction, + responseformat as models_responseformat, + tool as models_tool, +) +from mistralai.client.types import OptionalNullable, UNSET +from mistralai.client.utils import eventstreaming, get_security_from_env +from mistralai.client.utils.unmarshal_json_response import unmarshal_json_response +from typing import Any, Dict, List, Mapping, Optional, Union + + +class Agents(BaseSDK): + r"""Agents API.""" + + def complete( + self, + *, + messages: Union[ + List[models_agentscompletionrequest.AgentsCompletionRequestMessages], + List[ + models_agentscompletionrequest.AgentsCompletionRequestMessagesTypedDict + ], + ], + agent_id: str, + max_tokens: OptionalNullable[int] = UNSET, + stream: Optional[bool] = False, + stop: Optional[ + Union[ + models_agentscompletionrequest.AgentsCompletionRequestStop, + models_agentscompletionrequest.AgentsCompletionRequestStopTypedDict, + ] + ] = None, + random_seed: OptionalNullable[int] = UNSET, + metadata: OptionalNullable[Dict[str, Any]] = UNSET, + response_format: Optional[ + Union[ + models_responseformat.ResponseFormat, + models_responseformat.ResponseFormatTypedDict, + ] + ] = None, + tools: OptionalNullable[ + Union[List[models_tool.Tool], List[models_tool.ToolTypedDict]] + ] = UNSET, + tool_choice: Optional[ + Union[ + models_agentscompletionrequest.AgentsCompletionRequestToolChoice, + models_agentscompletionrequest.AgentsCompletionRequestToolChoiceTypedDict, + ] + ] = None, + presence_penalty: Optional[float] = None, + frequency_penalty: Optional[float] = None, + n: OptionalNullable[int] = UNSET, + prediction: Optional[ + Union[models_prediction.Prediction, models_prediction.PredictionTypedDict] + ] = None, + parallel_tool_calls: Optional[bool] = None, + prompt_mode: OptionalNullable[ + models_mistralpromptmode.MistralPromptMode + ] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.ChatCompletionResponse: + r"""Agents Completion + + :param messages: The prompt(s) to generate completions for, encoded as a list of dict with role and content. + :param agent_id: The ID of the agent to use for this completion. + :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. + :param stream: Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. + :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array + :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. + :param metadata: + :param response_format: Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide. + :param tools: + :param tool_choice: + :param presence_penalty: The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. + :param frequency_penalty: The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. + :param n: Number of completions to return for each request, input tokens are only billed once. + :param prediction: Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content. + :param parallel_tool_calls: + :param prompt_mode: Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AgentsCompletionRequest( + max_tokens=max_tokens, + stream=stream, + stop=stop, + random_seed=random_seed, + metadata=metadata, + messages=utils.get_pydantic_model( + messages, List[models.AgentsCompletionRequestMessages] + ), + response_format=utils.get_pydantic_model( + response_format, Optional[models.ResponseFormat] + ), + tools=utils.get_pydantic_model(tools, OptionalNullable[List[models.Tool]]), + tool_choice=utils.get_pydantic_model( + tool_choice, Optional[models.AgentsCompletionRequestToolChoice] + ), + presence_penalty=presence_penalty, + frequency_penalty=frequency_penalty, + n=n, + prediction=utils.get_pydantic_model( + prediction, Optional[models.Prediction] + ), + parallel_tool_calls=parallel_tool_calls, + prompt_mode=prompt_mode, + agent_id=agent_id, + ) + + req = self._build_request( + method="POST", + path="/v1/agents/completions", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.AgentsCompletionRequest + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="agents_completion_v1_agents_completions_post", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.ChatCompletionResponse, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res + ) + raise models.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + async def complete_async( + self, + *, + messages: Union[ + List[models_agentscompletionrequest.AgentsCompletionRequestMessages], + List[ + models_agentscompletionrequest.AgentsCompletionRequestMessagesTypedDict + ], + ], + agent_id: str, + max_tokens: OptionalNullable[int] = UNSET, + stream: Optional[bool] = False, + stop: Optional[ + Union[ + models_agentscompletionrequest.AgentsCompletionRequestStop, + models_agentscompletionrequest.AgentsCompletionRequestStopTypedDict, + ] + ] = None, + random_seed: OptionalNullable[int] = UNSET, + metadata: OptionalNullable[Dict[str, Any]] = UNSET, + response_format: Optional[ + Union[ + models_responseformat.ResponseFormat, + models_responseformat.ResponseFormatTypedDict, + ] + ] = None, + tools: OptionalNullable[ + Union[List[models_tool.Tool], List[models_tool.ToolTypedDict]] + ] = UNSET, + tool_choice: Optional[ + Union[ + models_agentscompletionrequest.AgentsCompletionRequestToolChoice, + models_agentscompletionrequest.AgentsCompletionRequestToolChoiceTypedDict, + ] + ] = None, + presence_penalty: Optional[float] = None, + frequency_penalty: Optional[float] = None, + n: OptionalNullable[int] = UNSET, + prediction: Optional[ + Union[models_prediction.Prediction, models_prediction.PredictionTypedDict] + ] = None, + parallel_tool_calls: Optional[bool] = None, + prompt_mode: OptionalNullable[ + models_mistralpromptmode.MistralPromptMode + ] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.ChatCompletionResponse: + r"""Agents Completion + + :param messages: The prompt(s) to generate completions for, encoded as a list of dict with role and content. + :param agent_id: The ID of the agent to use for this completion. + :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. + :param stream: Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. + :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array + :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. + :param metadata: + :param response_format: Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide. + :param tools: + :param tool_choice: + :param presence_penalty: The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. + :param frequency_penalty: The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. + :param n: Number of completions to return for each request, input tokens are only billed once. + :param prediction: Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content. + :param parallel_tool_calls: + :param prompt_mode: Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AgentsCompletionRequest( + max_tokens=max_tokens, + stream=stream, + stop=stop, + random_seed=random_seed, + metadata=metadata, + messages=utils.get_pydantic_model( + messages, List[models.AgentsCompletionRequestMessages] + ), + response_format=utils.get_pydantic_model( + response_format, Optional[models.ResponseFormat] + ), + tools=utils.get_pydantic_model(tools, OptionalNullable[List[models.Tool]]), + tool_choice=utils.get_pydantic_model( + tool_choice, Optional[models.AgentsCompletionRequestToolChoice] + ), + presence_penalty=presence_penalty, + frequency_penalty=frequency_penalty, + n=n, + prediction=utils.get_pydantic_model( + prediction, Optional[models.Prediction] + ), + parallel_tool_calls=parallel_tool_calls, + prompt_mode=prompt_mode, + agent_id=agent_id, + ) + + req = self._build_request_async( + method="POST", + path="/v1/agents/completions", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.AgentsCompletionRequest + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="agents_completion_v1_agents_completions_post", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.ChatCompletionResponse, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res + ) + raise models.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + def stream( + self, + *, + messages: Union[ + List[ + models_agentscompletionstreamrequest.AgentsCompletionStreamRequestMessages + ], + List[ + models_agentscompletionstreamrequest.AgentsCompletionStreamRequestMessagesTypedDict + ], + ], + agent_id: str, + max_tokens: OptionalNullable[int] = UNSET, + stream: Optional[bool] = True, + stop: Optional[ + Union[ + models_agentscompletionstreamrequest.AgentsCompletionStreamRequestStop, + models_agentscompletionstreamrequest.AgentsCompletionStreamRequestStopTypedDict, + ] + ] = None, + random_seed: OptionalNullable[int] = UNSET, + metadata: OptionalNullable[Dict[str, Any]] = UNSET, + response_format: Optional[ + Union[ + models_responseformat.ResponseFormat, + models_responseformat.ResponseFormatTypedDict, + ] + ] = None, + tools: OptionalNullable[ + Union[List[models_tool.Tool], List[models_tool.ToolTypedDict]] + ] = UNSET, + tool_choice: Optional[ + Union[ + models_agentscompletionstreamrequest.AgentsCompletionStreamRequestToolChoice, + models_agentscompletionstreamrequest.AgentsCompletionStreamRequestToolChoiceTypedDict, + ] + ] = None, + presence_penalty: Optional[float] = None, + frequency_penalty: Optional[float] = None, + n: OptionalNullable[int] = UNSET, + prediction: Optional[ + Union[models_prediction.Prediction, models_prediction.PredictionTypedDict] + ] = None, + parallel_tool_calls: Optional[bool] = None, + prompt_mode: OptionalNullable[ + models_mistralpromptmode.MistralPromptMode + ] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> eventstreaming.EventStream[models.CompletionEvent]: + r"""Stream Agents completion + + Mistral AI provides the ability to stream responses back to a client in order to allow partial results for certain requests. Tokens will be sent as data-only server-sent events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. + + :param messages: The prompt(s) to generate completions for, encoded as a list of dict with role and content. + :param agent_id: The ID of the agent to use for this completion. + :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. + :param stream: + :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array + :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. + :param metadata: + :param response_format: Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide. + :param tools: + :param tool_choice: + :param presence_penalty: The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. + :param frequency_penalty: The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. + :param n: Number of completions to return for each request, input tokens are only billed once. + :param prediction: Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content. + :param parallel_tool_calls: + :param prompt_mode: Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AgentsCompletionStreamRequest( + max_tokens=max_tokens, + stream=stream, + stop=stop, + random_seed=random_seed, + metadata=metadata, + messages=utils.get_pydantic_model( + messages, List[models.AgentsCompletionStreamRequestMessages] + ), + response_format=utils.get_pydantic_model( + response_format, Optional[models.ResponseFormat] + ), + tools=utils.get_pydantic_model(tools, OptionalNullable[List[models.Tool]]), + tool_choice=utils.get_pydantic_model( + tool_choice, Optional[models.AgentsCompletionStreamRequestToolChoice] + ), + presence_penalty=presence_penalty, + frequency_penalty=frequency_penalty, + n=n, + prediction=utils.get_pydantic_model( + prediction, Optional[models.Prediction] + ), + parallel_tool_calls=parallel_tool_calls, + prompt_mode=prompt_mode, + agent_id=agent_id, + ) + + req = self._build_request( + method="POST", + path="/v1/agents/completions#stream", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="text/event-stream", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.AgentsCompletionStreamRequest + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="stream_agents", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + stream=True, + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "text/event-stream"): + return eventstreaming.EventStream( + http_res, + lambda raw: utils.unmarshal_json(raw, models.CompletionEvent), + sentinel="[DONE]", + client_ref=self, + ) + if utils.match_response(http_res, "422", "application/json"): + http_res_text = utils.stream_to_text(http_res) + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res, http_res_text + ) + raise models.HTTPValidationError(response_data, http_res, http_res_text) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("Unexpected response received", http_res, http_res_text) + + async def stream_async( + self, + *, + messages: Union[ + List[ + models_agentscompletionstreamrequest.AgentsCompletionStreamRequestMessages + ], + List[ + models_agentscompletionstreamrequest.AgentsCompletionStreamRequestMessagesTypedDict + ], + ], + agent_id: str, + max_tokens: OptionalNullable[int] = UNSET, + stream: Optional[bool] = True, + stop: Optional[ + Union[ + models_agentscompletionstreamrequest.AgentsCompletionStreamRequestStop, + models_agentscompletionstreamrequest.AgentsCompletionStreamRequestStopTypedDict, + ] + ] = None, + random_seed: OptionalNullable[int] = UNSET, + metadata: OptionalNullable[Dict[str, Any]] = UNSET, + response_format: Optional[ + Union[ + models_responseformat.ResponseFormat, + models_responseformat.ResponseFormatTypedDict, + ] + ] = None, + tools: OptionalNullable[ + Union[List[models_tool.Tool], List[models_tool.ToolTypedDict]] + ] = UNSET, + tool_choice: Optional[ + Union[ + models_agentscompletionstreamrequest.AgentsCompletionStreamRequestToolChoice, + models_agentscompletionstreamrequest.AgentsCompletionStreamRequestToolChoiceTypedDict, + ] + ] = None, + presence_penalty: Optional[float] = None, + frequency_penalty: Optional[float] = None, + n: OptionalNullable[int] = UNSET, + prediction: Optional[ + Union[models_prediction.Prediction, models_prediction.PredictionTypedDict] + ] = None, + parallel_tool_calls: Optional[bool] = None, + prompt_mode: OptionalNullable[ + models_mistralpromptmode.MistralPromptMode + ] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> eventstreaming.EventStreamAsync[models.CompletionEvent]: + r"""Stream Agents completion + + Mistral AI provides the ability to stream responses back to a client in order to allow partial results for certain requests. Tokens will be sent as data-only server-sent events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. + + :param messages: The prompt(s) to generate completions for, encoded as a list of dict with role and content. + :param agent_id: The ID of the agent to use for this completion. + :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. + :param stream: + :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array + :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. + :param metadata: + :param response_format: Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide. + :param tools: + :param tool_choice: + :param presence_penalty: The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. + :param frequency_penalty: The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. + :param n: Number of completions to return for each request, input tokens are only billed once. + :param prediction: Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content. + :param parallel_tool_calls: + :param prompt_mode: Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AgentsCompletionStreamRequest( + max_tokens=max_tokens, + stream=stream, + stop=stop, + random_seed=random_seed, + metadata=metadata, + messages=utils.get_pydantic_model( + messages, List[models.AgentsCompletionStreamRequestMessages] + ), + response_format=utils.get_pydantic_model( + response_format, Optional[models.ResponseFormat] + ), + tools=utils.get_pydantic_model(tools, OptionalNullable[List[models.Tool]]), + tool_choice=utils.get_pydantic_model( + tool_choice, Optional[models.AgentsCompletionStreamRequestToolChoice] + ), + presence_penalty=presence_penalty, + frequency_penalty=frequency_penalty, + n=n, + prediction=utils.get_pydantic_model( + prediction, Optional[models.Prediction] + ), + parallel_tool_calls=parallel_tool_calls, + prompt_mode=prompt_mode, + agent_id=agent_id, + ) + + req = self._build_request_async( + method="POST", + path="/v1/agents/completions#stream", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="text/event-stream", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.AgentsCompletionStreamRequest + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="stream_agents", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + stream=True, + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "text/event-stream"): + return eventstreaming.EventStreamAsync( + http_res, + lambda raw: utils.unmarshal_json(raw, models.CompletionEvent), + sentinel="[DONE]", + client_ref=self, + ) + if utils.match_response(http_res, "422", "application/json"): + http_res_text = await utils.stream_to_text_async(http_res) + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res, http_res_text + ) + raise models.HTTPValidationError(response_data, http_res, http_res_text) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("Unexpected response received", http_res, http_res_text) diff --git a/src/mistralai/client/audio.py b/src/mistralai/client/audio.py new file mode 100644 index 00000000..28ccda1b --- /dev/null +++ b/src/mistralai/client/audio.py @@ -0,0 +1,23 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from .basesdk import BaseSDK +from .sdkconfiguration import SDKConfiguration +from mistralai.client.transcriptions import Transcriptions +from typing import Optional + + +class Audio(BaseSDK): + transcriptions: Transcriptions + r"""API for audio transcription.""" + + def __init__( + self, sdk_config: SDKConfiguration, parent_ref: Optional[object] = None + ) -> None: + BaseSDK.__init__(self, sdk_config, parent_ref=parent_ref) + self.sdk_configuration = sdk_config + self._init_sdks() + + def _init_sdks(self): + self.transcriptions = Transcriptions( + self.sdk_configuration, parent_ref=self.parent_ref + ) diff --git a/src/mistralai/client/basesdk.py b/src/mistralai/client/basesdk.py new file mode 100644 index 00000000..bddc9012 --- /dev/null +++ b/src/mistralai/client/basesdk.py @@ -0,0 +1,370 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from .sdkconfiguration import SDKConfiguration +import httpx +from mistralai.client import models, utils +from mistralai.client._hooks import ( + AfterErrorContext, + AfterSuccessContext, + BeforeRequestContext, +) +from mistralai.client.utils import RetryConfig, SerializedRequestBody, get_body_content +from typing import Callable, List, Mapping, Optional, Tuple +from urllib.parse import parse_qs, urlparse + + +class BaseSDK: + sdk_configuration: SDKConfiguration + parent_ref: Optional[object] = None + """ + Reference to the root SDK instance, if any. This will prevent it from + being garbage collected while there are active streams. + """ + + def __init__( + self, + sdk_config: SDKConfiguration, + parent_ref: Optional[object] = None, + ) -> None: + self.sdk_configuration = sdk_config + self.parent_ref = parent_ref + + def _get_url(self, base_url, url_variables): + sdk_url, sdk_variables = self.sdk_configuration.get_server_details() + + if base_url is None: + base_url = sdk_url + + if url_variables is None: + url_variables = sdk_variables + + return utils.template_url(base_url, url_variables) + + def _build_request_async( + self, + method, + path, + base_url, + url_variables, + request, + request_body_required, + request_has_path_params, + request_has_query_params, + user_agent_header, + accept_header_value, + _globals=None, + security=None, + timeout_ms: Optional[int] = None, + get_serialized_body: Optional[ + Callable[[], Optional[SerializedRequestBody]] + ] = None, + url_override: Optional[str] = None, + http_headers: Optional[Mapping[str, str]] = None, + allow_empty_value: Optional[List[str]] = None, + ) -> httpx.Request: + client = self.sdk_configuration.async_client + return self._build_request_with_client( + client, + method, + path, + base_url, + url_variables, + request, + request_body_required, + request_has_path_params, + request_has_query_params, + user_agent_header, + accept_header_value, + _globals, + security, + timeout_ms, + get_serialized_body, + url_override, + http_headers, + allow_empty_value, + ) + + def _build_request( + self, + method, + path, + base_url, + url_variables, + request, + request_body_required, + request_has_path_params, + request_has_query_params, + user_agent_header, + accept_header_value, + _globals=None, + security=None, + timeout_ms: Optional[int] = None, + get_serialized_body: Optional[ + Callable[[], Optional[SerializedRequestBody]] + ] = None, + url_override: Optional[str] = None, + http_headers: Optional[Mapping[str, str]] = None, + allow_empty_value: Optional[List[str]] = None, + ) -> httpx.Request: + client = self.sdk_configuration.client + return self._build_request_with_client( + client, + method, + path, + base_url, + url_variables, + request, + request_body_required, + request_has_path_params, + request_has_query_params, + user_agent_header, + accept_header_value, + _globals, + security, + timeout_ms, + get_serialized_body, + url_override, + http_headers, + allow_empty_value, + ) + + def _build_request_with_client( + self, + client, + method, + path, + base_url, + url_variables, + request, + request_body_required, + request_has_path_params, + request_has_query_params, + user_agent_header, + accept_header_value, + _globals=None, + security=None, + timeout_ms: Optional[int] = None, + get_serialized_body: Optional[ + Callable[[], Optional[SerializedRequestBody]] + ] = None, + url_override: Optional[str] = None, + http_headers: Optional[Mapping[str, str]] = None, + allow_empty_value: Optional[List[str]] = None, + ) -> httpx.Request: + query_params = {} + + url = url_override + if url is None: + url = utils.generate_url( + self._get_url(base_url, url_variables), + path, + request if request_has_path_params else None, + _globals if request_has_path_params else None, + ) + + query_params = utils.get_query_params( + request if request_has_query_params else None, + _globals if request_has_query_params else None, + allow_empty_value, + ) + else: + # Pick up the query parameter from the override so they can be + # preserved when building the request later on (necessary as of + # httpx 0.28). + parsed_override = urlparse(str(url_override)) + query_params = parse_qs(parsed_override.query, keep_blank_values=True) + + headers = utils.get_headers(request, _globals) + headers["Accept"] = accept_header_value + headers[user_agent_header] = self.sdk_configuration.user_agent + + if security is not None: + if callable(security): + security = security() + security = utils.get_security_from_env(security, models.Security) + if security is not None: + security_headers, security_query_params = utils.get_security(security) + headers = {**headers, **security_headers} + query_params = {**query_params, **security_query_params} + + serialized_request_body = SerializedRequestBody() + if get_serialized_body is not None: + rb = get_serialized_body() + if request_body_required and rb is None: + raise ValueError("request body is required") + + if rb is not None: + serialized_request_body = rb + + if ( + serialized_request_body.media_type is not None + and serialized_request_body.media_type + not in ( + "multipart/form-data", + "multipart/mixed", + ) + ): + headers["content-type"] = serialized_request_body.media_type + + if http_headers is not None: + for header, value in http_headers.items(): + headers[header] = value + + timeout = timeout_ms / 1000 if timeout_ms is not None else None + + return client.build_request( + method, + url, + params=query_params, + content=serialized_request_body.content, + data=serialized_request_body.data, + files=serialized_request_body.files, + headers=headers, + timeout=timeout, + ) + + def do_request( + self, + hook_ctx, + request, + error_status_codes, + stream=False, + retry_config: Optional[Tuple[RetryConfig, List[str]]] = None, + ) -> httpx.Response: + client = self.sdk_configuration.client + logger = self.sdk_configuration.debug_logger + + hooks = self.sdk_configuration.__dict__["_hooks"] + + def do(): + http_res = None + try: + req = hooks.before_request(BeforeRequestContext(hook_ctx), request) + logger.debug( + "Request:\nMethod: %s\nURL: %s\nHeaders: %s\nBody: %s", + req.method, + req.url, + req.headers, + get_body_content(req), + ) + + if client is None: + raise ValueError("client is required") + + http_res = client.send(req, stream=stream) + except Exception as e: + _, e = hooks.after_error(AfterErrorContext(hook_ctx), None, e) + if e is not None: + logger.debug("Request Exception", exc_info=True) + raise e + + if http_res is None: + logger.debug("Raising no response SDK error") + raise models.NoResponseError("No response received") + + logger.debug( + "Response:\nStatus Code: %s\nURL: %s\nHeaders: %s\nBody: %s", + http_res.status_code, + http_res.url, + http_res.headers, + "" if stream else http_res.text, + ) + + if utils.match_status_codes(error_status_codes, http_res.status_code): + result, err = hooks.after_error( + AfterErrorContext(hook_ctx), http_res, None + ) + if err is not None: + logger.debug("Request Exception", exc_info=True) + raise err + if result is not None: + http_res = result + else: + logger.debug("Raising unexpected SDK error") + raise models.SDKError("Unexpected error occurred", http_res) + + return http_res + + if retry_config is not None: + http_res = utils.retry(do, utils.Retries(retry_config[0], retry_config[1])) + else: + http_res = do() + + if not utils.match_status_codes(error_status_codes, http_res.status_code): + http_res = hooks.after_success(AfterSuccessContext(hook_ctx), http_res) + + return http_res + + async def do_request_async( + self, + hook_ctx, + request, + error_status_codes, + stream=False, + retry_config: Optional[Tuple[RetryConfig, List[str]]] = None, + ) -> httpx.Response: + client = self.sdk_configuration.async_client + logger = self.sdk_configuration.debug_logger + + hooks = self.sdk_configuration.__dict__["_hooks"] + + async def do(): + http_res = None + try: + req = hooks.before_request(BeforeRequestContext(hook_ctx), request) + logger.debug( + "Request:\nMethod: %s\nURL: %s\nHeaders: %s\nBody: %s", + req.method, + req.url, + req.headers, + get_body_content(req), + ) + + if client is None: + raise ValueError("client is required") + + http_res = await client.send(req, stream=stream) + except Exception as e: + _, e = hooks.after_error(AfterErrorContext(hook_ctx), None, e) + if e is not None: + logger.debug("Request Exception", exc_info=True) + raise e + + if http_res is None: + logger.debug("Raising no response SDK error") + raise models.NoResponseError("No response received") + + logger.debug( + "Response:\nStatus Code: %s\nURL: %s\nHeaders: %s\nBody: %s", + http_res.status_code, + http_res.url, + http_res.headers, + "" if stream else http_res.text, + ) + + if utils.match_status_codes(error_status_codes, http_res.status_code): + result, err = hooks.after_error( + AfterErrorContext(hook_ctx), http_res, None + ) + if err is not None: + logger.debug("Request Exception", exc_info=True) + raise err + if result is not None: + http_res = result + else: + logger.debug("Raising unexpected SDK error") + raise models.SDKError("Unexpected error occurred", http_res) + + return http_res + + if retry_config is not None: + http_res = await utils.retry_async( + do, utils.Retries(retry_config[0], retry_config[1]) + ) + else: + http_res = await do() + + if not utils.match_status_codes(error_status_codes, http_res.status_code): + http_res = hooks.after_success(AfterSuccessContext(hook_ctx), http_res) + + return http_res diff --git a/src/mistralai/client/batch.py b/src/mistralai/client/batch.py new file mode 100644 index 00000000..d53a45fb --- /dev/null +++ b/src/mistralai/client/batch.py @@ -0,0 +1,20 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from .basesdk import BaseSDK +from .sdkconfiguration import SDKConfiguration +from mistralai.client.mistral_jobs import MistralJobs +from typing import Optional + + +class Batch(BaseSDK): + jobs: MistralJobs + + def __init__( + self, sdk_config: SDKConfiguration, parent_ref: Optional[object] = None + ) -> None: + BaseSDK.__init__(self, sdk_config, parent_ref=parent_ref) + self.sdk_configuration = sdk_config + self._init_sdks() + + def _init_sdks(self): + self.jobs = MistralJobs(self.sdk_configuration, parent_ref=self.parent_ref) diff --git a/src/mistralai/client/beta.py b/src/mistralai/client/beta.py new file mode 100644 index 00000000..b30003ea --- /dev/null +++ b/src/mistralai/client/beta.py @@ -0,0 +1,31 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from .basesdk import BaseSDK +from .sdkconfiguration import SDKConfiguration +from mistralai.client.conversations import Conversations +from mistralai.client.libraries import Libraries +from mistralai.client.mistral_agents import MistralAgents +from typing import Optional + + +class Beta(BaseSDK): + conversations: Conversations + r"""(beta) Conversations API""" + agents: MistralAgents + r"""(beta) Agents API""" + libraries: Libraries + r"""(beta) Libraries API to create and manage libraries - index your documents to enhance agent capabilities.""" + + def __init__( + self, sdk_config: SDKConfiguration, parent_ref: Optional[object] = None + ) -> None: + BaseSDK.__init__(self, sdk_config, parent_ref=parent_ref) + self.sdk_configuration = sdk_config + self._init_sdks() + + def _init_sdks(self): + self.conversations = Conversations( + self.sdk_configuration, parent_ref=self.parent_ref + ) + self.agents = MistralAgents(self.sdk_configuration, parent_ref=self.parent_ref) + self.libraries = Libraries(self.sdk_configuration, parent_ref=self.parent_ref) diff --git a/src/mistralai/client/chat.py b/src/mistralai/client/chat.py new file mode 100644 index 00000000..9c50bce8 --- /dev/null +++ b/src/mistralai/client/chat.py @@ -0,0 +1,753 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from .basesdk import BaseSDK +from mistralai.client import models, utils +from mistralai.client._hooks import HookContext +from mistralai.client.models import ( + chatcompletionrequest as models_chatcompletionrequest, + chatcompletionstreamrequest as models_chatcompletionstreamrequest, + mistralpromptmode as models_mistralpromptmode, + prediction as models_prediction, + responseformat as models_responseformat, + tool as models_tool, +) +from mistralai.client.types import OptionalNullable, UNSET +from mistralai.client.utils import eventstreaming, get_security_from_env +from mistralai.client.utils.unmarshal_json_response import unmarshal_json_response +from typing import Any, Dict, List, Mapping, Optional, Union + + +class Chat(BaseSDK): + r"""Chat Completion API.""" + + def complete( + self, + *, + model: str, + messages: Union[ + List[models_chatcompletionrequest.Messages], + List[models_chatcompletionrequest.MessagesTypedDict], + ], + temperature: OptionalNullable[float] = UNSET, + top_p: Optional[float] = None, + max_tokens: OptionalNullable[int] = UNSET, + stream: Optional[bool] = False, + stop: Optional[ + Union[ + models_chatcompletionrequest.Stop, + models_chatcompletionrequest.StopTypedDict, + ] + ] = None, + random_seed: OptionalNullable[int] = UNSET, + metadata: OptionalNullable[Dict[str, Any]] = UNSET, + response_format: Optional[ + Union[ + models_responseformat.ResponseFormat, + models_responseformat.ResponseFormatTypedDict, + ] + ] = None, + tools: OptionalNullable[ + Union[List[models_tool.Tool], List[models_tool.ToolTypedDict]] + ] = UNSET, + tool_choice: Optional[ + Union[ + models_chatcompletionrequest.ChatCompletionRequestToolChoice, + models_chatcompletionrequest.ChatCompletionRequestToolChoiceTypedDict, + ] + ] = None, + presence_penalty: Optional[float] = None, + frequency_penalty: Optional[float] = None, + n: OptionalNullable[int] = UNSET, + prediction: Optional[ + Union[models_prediction.Prediction, models_prediction.PredictionTypedDict] + ] = None, + parallel_tool_calls: Optional[bool] = None, + prompt_mode: OptionalNullable[ + models_mistralpromptmode.MistralPromptMode + ] = UNSET, + safe_prompt: Optional[bool] = None, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.ChatCompletionResponse: + r"""Chat Completion + + :param model: ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions. + :param messages: The prompt(s) to generate completions for, encoded as a list of dict with role and content. + :param temperature: What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. + :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. + :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. + :param stream: Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. + :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array + :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. + :param metadata: + :param response_format: Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide. + :param tools: A list of tools the model may call. Use this to provide a list of functions the model may generate JSON inputs for. + :param tool_choice: Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool. + :param presence_penalty: The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. + :param frequency_penalty: The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. + :param n: Number of completions to return for each request, input tokens are only billed once. + :param prediction: Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content. + :param parallel_tool_calls: Whether to enable parallel function calling during tool use, when enabled the model can call multiple tools in parallel. + :param prompt_mode: Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. + :param safe_prompt: Whether to inject a safety prompt before all conversations. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.ChatCompletionRequest( + model=model, + temperature=temperature, + top_p=top_p, + max_tokens=max_tokens, + stream=stream, + stop=stop, + random_seed=random_seed, + metadata=metadata, + messages=utils.get_pydantic_model(messages, List[models.Messages]), + response_format=utils.get_pydantic_model( + response_format, Optional[models.ResponseFormat] + ), + tools=utils.get_pydantic_model(tools, OptionalNullable[List[models.Tool]]), + tool_choice=utils.get_pydantic_model( + tool_choice, Optional[models.ChatCompletionRequestToolChoice] + ), + presence_penalty=presence_penalty, + frequency_penalty=frequency_penalty, + n=n, + prediction=utils.get_pydantic_model( + prediction, Optional[models.Prediction] + ), + parallel_tool_calls=parallel_tool_calls, + prompt_mode=prompt_mode, + safe_prompt=safe_prompt, + ) + + req = self._build_request( + method="POST", + path="/v1/chat/completions", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.ChatCompletionRequest + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="chat_completion_v1_chat_completions_post", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.ChatCompletionResponse, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res + ) + raise models.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + async def complete_async( + self, + *, + model: str, + messages: Union[ + List[models_chatcompletionrequest.Messages], + List[models_chatcompletionrequest.MessagesTypedDict], + ], + temperature: OptionalNullable[float] = UNSET, + top_p: Optional[float] = None, + max_tokens: OptionalNullable[int] = UNSET, + stream: Optional[bool] = False, + stop: Optional[ + Union[ + models_chatcompletionrequest.Stop, + models_chatcompletionrequest.StopTypedDict, + ] + ] = None, + random_seed: OptionalNullable[int] = UNSET, + metadata: OptionalNullable[Dict[str, Any]] = UNSET, + response_format: Optional[ + Union[ + models_responseformat.ResponseFormat, + models_responseformat.ResponseFormatTypedDict, + ] + ] = None, + tools: OptionalNullable[ + Union[List[models_tool.Tool], List[models_tool.ToolTypedDict]] + ] = UNSET, + tool_choice: Optional[ + Union[ + models_chatcompletionrequest.ChatCompletionRequestToolChoice, + models_chatcompletionrequest.ChatCompletionRequestToolChoiceTypedDict, + ] + ] = None, + presence_penalty: Optional[float] = None, + frequency_penalty: Optional[float] = None, + n: OptionalNullable[int] = UNSET, + prediction: Optional[ + Union[models_prediction.Prediction, models_prediction.PredictionTypedDict] + ] = None, + parallel_tool_calls: Optional[bool] = None, + prompt_mode: OptionalNullable[ + models_mistralpromptmode.MistralPromptMode + ] = UNSET, + safe_prompt: Optional[bool] = None, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.ChatCompletionResponse: + r"""Chat Completion + + :param model: ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions. + :param messages: The prompt(s) to generate completions for, encoded as a list of dict with role and content. + :param temperature: What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. + :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. + :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. + :param stream: Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. + :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array + :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. + :param metadata: + :param response_format: Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide. + :param tools: A list of tools the model may call. Use this to provide a list of functions the model may generate JSON inputs for. + :param tool_choice: Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool. + :param presence_penalty: The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. + :param frequency_penalty: The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. + :param n: Number of completions to return for each request, input tokens are only billed once. + :param prediction: Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content. + :param parallel_tool_calls: Whether to enable parallel function calling during tool use, when enabled the model can call multiple tools in parallel. + :param prompt_mode: Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. + :param safe_prompt: Whether to inject a safety prompt before all conversations. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.ChatCompletionRequest( + model=model, + temperature=temperature, + top_p=top_p, + max_tokens=max_tokens, + stream=stream, + stop=stop, + random_seed=random_seed, + metadata=metadata, + messages=utils.get_pydantic_model(messages, List[models.Messages]), + response_format=utils.get_pydantic_model( + response_format, Optional[models.ResponseFormat] + ), + tools=utils.get_pydantic_model(tools, OptionalNullable[List[models.Tool]]), + tool_choice=utils.get_pydantic_model( + tool_choice, Optional[models.ChatCompletionRequestToolChoice] + ), + presence_penalty=presence_penalty, + frequency_penalty=frequency_penalty, + n=n, + prediction=utils.get_pydantic_model( + prediction, Optional[models.Prediction] + ), + parallel_tool_calls=parallel_tool_calls, + prompt_mode=prompt_mode, + safe_prompt=safe_prompt, + ) + + req = self._build_request_async( + method="POST", + path="/v1/chat/completions", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.ChatCompletionRequest + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="chat_completion_v1_chat_completions_post", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.ChatCompletionResponse, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res + ) + raise models.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + def stream( + self, + *, + model: str, + messages: Union[ + List[ + models_chatcompletionstreamrequest.ChatCompletionStreamRequestMessages + ], + List[ + models_chatcompletionstreamrequest.ChatCompletionStreamRequestMessagesTypedDict + ], + ], + temperature: OptionalNullable[float] = UNSET, + top_p: Optional[float] = None, + max_tokens: OptionalNullable[int] = UNSET, + stream: Optional[bool] = True, + stop: Optional[ + Union[ + models_chatcompletionstreamrequest.ChatCompletionStreamRequestStop, + models_chatcompletionstreamrequest.ChatCompletionStreamRequestStopTypedDict, + ] + ] = None, + random_seed: OptionalNullable[int] = UNSET, + metadata: OptionalNullable[Dict[str, Any]] = UNSET, + response_format: Optional[ + Union[ + models_responseformat.ResponseFormat, + models_responseformat.ResponseFormatTypedDict, + ] + ] = None, + tools: OptionalNullable[ + Union[List[models_tool.Tool], List[models_tool.ToolTypedDict]] + ] = UNSET, + tool_choice: Optional[ + Union[ + models_chatcompletionstreamrequest.ChatCompletionStreamRequestToolChoice, + models_chatcompletionstreamrequest.ChatCompletionStreamRequestToolChoiceTypedDict, + ] + ] = None, + presence_penalty: Optional[float] = None, + frequency_penalty: Optional[float] = None, + n: OptionalNullable[int] = UNSET, + prediction: Optional[ + Union[models_prediction.Prediction, models_prediction.PredictionTypedDict] + ] = None, + parallel_tool_calls: Optional[bool] = None, + prompt_mode: OptionalNullable[ + models_mistralpromptmode.MistralPromptMode + ] = UNSET, + safe_prompt: Optional[bool] = None, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> eventstreaming.EventStream[models.CompletionEvent]: + r"""Stream chat completion + + Mistral AI provides the ability to stream responses back to a client in order to allow partial results for certain requests. Tokens will be sent as data-only server-sent events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. + + :param model: ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions. + :param messages: The prompt(s) to generate completions for, encoded as a list of dict with role and content. + :param temperature: What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. + :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. + :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. + :param stream: + :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array + :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. + :param metadata: + :param response_format: Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide. + :param tools: A list of tools the model may call. Use this to provide a list of functions the model may generate JSON inputs for. + :param tool_choice: Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool. + :param presence_penalty: The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. + :param frequency_penalty: The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. + :param n: Number of completions to return for each request, input tokens are only billed once. + :param prediction: Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content. + :param parallel_tool_calls: Whether to enable parallel function calling during tool use, when enabled the model can call multiple tools in parallel. + :param prompt_mode: Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. + :param safe_prompt: Whether to inject a safety prompt before all conversations. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.ChatCompletionStreamRequest( + model=model, + temperature=temperature, + top_p=top_p, + max_tokens=max_tokens, + stream=stream, + stop=stop, + random_seed=random_seed, + metadata=metadata, + messages=utils.get_pydantic_model( + messages, List[models.ChatCompletionStreamRequestMessages] + ), + response_format=utils.get_pydantic_model( + response_format, Optional[models.ResponseFormat] + ), + tools=utils.get_pydantic_model(tools, OptionalNullable[List[models.Tool]]), + tool_choice=utils.get_pydantic_model( + tool_choice, Optional[models.ChatCompletionStreamRequestToolChoice] + ), + presence_penalty=presence_penalty, + frequency_penalty=frequency_penalty, + n=n, + prediction=utils.get_pydantic_model( + prediction, Optional[models.Prediction] + ), + parallel_tool_calls=parallel_tool_calls, + prompt_mode=prompt_mode, + safe_prompt=safe_prompt, + ) + + req = self._build_request( + method="POST", + path="/v1/chat/completions#stream", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="text/event-stream", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.ChatCompletionStreamRequest + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="stream_chat", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + stream=True, + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "text/event-stream"): + return eventstreaming.EventStream( + http_res, + lambda raw: utils.unmarshal_json(raw, models.CompletionEvent), + sentinel="[DONE]", + client_ref=self, + ) + if utils.match_response(http_res, "422", "application/json"): + http_res_text = utils.stream_to_text(http_res) + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res, http_res_text + ) + raise models.HTTPValidationError(response_data, http_res, http_res_text) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("Unexpected response received", http_res, http_res_text) + + async def stream_async( + self, + *, + model: str, + messages: Union[ + List[ + models_chatcompletionstreamrequest.ChatCompletionStreamRequestMessages + ], + List[ + models_chatcompletionstreamrequest.ChatCompletionStreamRequestMessagesTypedDict + ], + ], + temperature: OptionalNullable[float] = UNSET, + top_p: Optional[float] = None, + max_tokens: OptionalNullable[int] = UNSET, + stream: Optional[bool] = True, + stop: Optional[ + Union[ + models_chatcompletionstreamrequest.ChatCompletionStreamRequestStop, + models_chatcompletionstreamrequest.ChatCompletionStreamRequestStopTypedDict, + ] + ] = None, + random_seed: OptionalNullable[int] = UNSET, + metadata: OptionalNullable[Dict[str, Any]] = UNSET, + response_format: Optional[ + Union[ + models_responseformat.ResponseFormat, + models_responseformat.ResponseFormatTypedDict, + ] + ] = None, + tools: OptionalNullable[ + Union[List[models_tool.Tool], List[models_tool.ToolTypedDict]] + ] = UNSET, + tool_choice: Optional[ + Union[ + models_chatcompletionstreamrequest.ChatCompletionStreamRequestToolChoice, + models_chatcompletionstreamrequest.ChatCompletionStreamRequestToolChoiceTypedDict, + ] + ] = None, + presence_penalty: Optional[float] = None, + frequency_penalty: Optional[float] = None, + n: OptionalNullable[int] = UNSET, + prediction: Optional[ + Union[models_prediction.Prediction, models_prediction.PredictionTypedDict] + ] = None, + parallel_tool_calls: Optional[bool] = None, + prompt_mode: OptionalNullable[ + models_mistralpromptmode.MistralPromptMode + ] = UNSET, + safe_prompt: Optional[bool] = None, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> eventstreaming.EventStreamAsync[models.CompletionEvent]: + r"""Stream chat completion + + Mistral AI provides the ability to stream responses back to a client in order to allow partial results for certain requests. Tokens will be sent as data-only server-sent events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. + + :param model: ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions. + :param messages: The prompt(s) to generate completions for, encoded as a list of dict with role and content. + :param temperature: What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. + :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. + :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. + :param stream: + :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array + :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. + :param metadata: + :param response_format: Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide. + :param tools: A list of tools the model may call. Use this to provide a list of functions the model may generate JSON inputs for. + :param tool_choice: Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool. + :param presence_penalty: The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. + :param frequency_penalty: The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition. + :param n: Number of completions to return for each request, input tokens are only billed once. + :param prediction: Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content. + :param parallel_tool_calls: Whether to enable parallel function calling during tool use, when enabled the model can call multiple tools in parallel. + :param prompt_mode: Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used. + :param safe_prompt: Whether to inject a safety prompt before all conversations. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.ChatCompletionStreamRequest( + model=model, + temperature=temperature, + top_p=top_p, + max_tokens=max_tokens, + stream=stream, + stop=stop, + random_seed=random_seed, + metadata=metadata, + messages=utils.get_pydantic_model( + messages, List[models.ChatCompletionStreamRequestMessages] + ), + response_format=utils.get_pydantic_model( + response_format, Optional[models.ResponseFormat] + ), + tools=utils.get_pydantic_model(tools, OptionalNullable[List[models.Tool]]), + tool_choice=utils.get_pydantic_model( + tool_choice, Optional[models.ChatCompletionStreamRequestToolChoice] + ), + presence_penalty=presence_penalty, + frequency_penalty=frequency_penalty, + n=n, + prediction=utils.get_pydantic_model( + prediction, Optional[models.Prediction] + ), + parallel_tool_calls=parallel_tool_calls, + prompt_mode=prompt_mode, + safe_prompt=safe_prompt, + ) + + req = self._build_request_async( + method="POST", + path="/v1/chat/completions#stream", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="text/event-stream", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.ChatCompletionStreamRequest + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="stream_chat", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + stream=True, + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "text/event-stream"): + return eventstreaming.EventStreamAsync( + http_res, + lambda raw: utils.unmarshal_json(raw, models.CompletionEvent), + sentinel="[DONE]", + client_ref=self, + ) + if utils.match_response(http_res, "422", "application/json"): + http_res_text = await utils.stream_to_text_async(http_res) + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res, http_res_text + ) + raise models.HTTPValidationError(response_data, http_res, http_res_text) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("Unexpected response received", http_res, http_res_text) diff --git a/src/mistralai/client/classifiers.py b/src/mistralai/client/classifiers.py new file mode 100644 index 00000000..537e2438 --- /dev/null +++ b/src/mistralai/client/classifiers.py @@ -0,0 +1,800 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from .basesdk import BaseSDK +from mistralai.client import models, utils +from mistralai.client._hooks import HookContext +from mistralai.client.models import ( + chatmoderationrequest as models_chatmoderationrequest, + classificationrequest as models_classificationrequest, + inputs as models_inputs, +) +from mistralai.client.types import OptionalNullable, UNSET +from mistralai.client.utils import get_security_from_env +from mistralai.client.utils.unmarshal_json_response import unmarshal_json_response +from typing import Any, Dict, Mapping, Optional, Union + + +class Classifiers(BaseSDK): + r"""Classifiers API.""" + + def moderate( + self, + *, + model: str, + inputs: Union[ + models_classificationrequest.ClassificationRequestInputs, + models_classificationrequest.ClassificationRequestInputsTypedDict, + ], + metadata: OptionalNullable[Dict[str, Any]] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.ModerationResponse: + r"""Moderations + + :param model: ID of the model to use. + :param inputs: Text to classify. + :param metadata: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.ClassificationRequest( + model=model, + metadata=metadata, + inputs=inputs, + ) + + req = self._build_request( + method="POST", + path="/v1/moderations", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.ClassificationRequest + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="moderations_v1_moderations_post", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.ModerationResponse, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res + ) + raise models.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + async def moderate_async( + self, + *, + model: str, + inputs: Union[ + models_classificationrequest.ClassificationRequestInputs, + models_classificationrequest.ClassificationRequestInputsTypedDict, + ], + metadata: OptionalNullable[Dict[str, Any]] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.ModerationResponse: + r"""Moderations + + :param model: ID of the model to use. + :param inputs: Text to classify. + :param metadata: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.ClassificationRequest( + model=model, + metadata=metadata, + inputs=inputs, + ) + + req = self._build_request_async( + method="POST", + path="/v1/moderations", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.ClassificationRequest + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="moderations_v1_moderations_post", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.ModerationResponse, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res + ) + raise models.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + def moderate_chat( + self, + *, + inputs: Union[ + models_chatmoderationrequest.ChatModerationRequestInputs, + models_chatmoderationrequest.ChatModerationRequestInputsTypedDict, + ], + model: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.ModerationResponse: + r"""Chat Moderations + + :param inputs: Chat to classify + :param model: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.ChatModerationRequest( + inputs=utils.get_pydantic_model(inputs, models.ChatModerationRequestInputs), + model=model, + ) + + req = self._build_request( + method="POST", + path="/v1/chat/moderations", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.ChatModerationRequest + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="chat_moderations_v1_chat_moderations_post", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.ModerationResponse, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res + ) + raise models.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + async def moderate_chat_async( + self, + *, + inputs: Union[ + models_chatmoderationrequest.ChatModerationRequestInputs, + models_chatmoderationrequest.ChatModerationRequestInputsTypedDict, + ], + model: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.ModerationResponse: + r"""Chat Moderations + + :param inputs: Chat to classify + :param model: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.ChatModerationRequest( + inputs=utils.get_pydantic_model(inputs, models.ChatModerationRequestInputs), + model=model, + ) + + req = self._build_request_async( + method="POST", + path="/v1/chat/moderations", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.ChatModerationRequest + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="chat_moderations_v1_chat_moderations_post", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.ModerationResponse, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res + ) + raise models.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + def classify( + self, + *, + model: str, + inputs: Union[ + models_classificationrequest.ClassificationRequestInputs, + models_classificationrequest.ClassificationRequestInputsTypedDict, + ], + metadata: OptionalNullable[Dict[str, Any]] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.ClassificationResponse: + r"""Classifications + + :param model: ID of the model to use. + :param inputs: Text to classify. + :param metadata: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.ClassificationRequest( + model=model, + metadata=metadata, + inputs=inputs, + ) + + req = self._build_request( + method="POST", + path="/v1/classifications", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.ClassificationRequest + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="classifications_v1_classifications_post", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.ClassificationResponse, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res + ) + raise models.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + async def classify_async( + self, + *, + model: str, + inputs: Union[ + models_classificationrequest.ClassificationRequestInputs, + models_classificationrequest.ClassificationRequestInputsTypedDict, + ], + metadata: OptionalNullable[Dict[str, Any]] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.ClassificationResponse: + r"""Classifications + + :param model: ID of the model to use. + :param inputs: Text to classify. + :param metadata: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.ClassificationRequest( + model=model, + metadata=metadata, + inputs=inputs, + ) + + req = self._build_request_async( + method="POST", + path="/v1/classifications", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.ClassificationRequest + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="classifications_v1_classifications_post", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.ClassificationResponse, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res + ) + raise models.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + def classify_chat( + self, + *, + model: str, + inputs: Union[models_inputs.Inputs, models_inputs.InputsTypedDict], + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.ClassificationResponse: + r"""Chat Classifications + + :param model: + :param inputs: Chat to classify + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.ChatClassificationRequest( + model=model, + inputs=utils.get_pydantic_model(inputs, models.Inputs), + ) + + req = self._build_request( + method="POST", + path="/v1/chat/classifications", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.ChatClassificationRequest + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="chat_classifications_v1_chat_classifications_post", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.ClassificationResponse, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res + ) + raise models.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + async def classify_chat_async( + self, + *, + model: str, + inputs: Union[models_inputs.Inputs, models_inputs.InputsTypedDict], + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.ClassificationResponse: + r"""Chat Classifications + + :param model: + :param inputs: Chat to classify + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.ChatClassificationRequest( + model=model, + inputs=utils.get_pydantic_model(inputs, models.Inputs), + ) + + req = self._build_request_async( + method="POST", + path="/v1/chat/classifications", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.ChatClassificationRequest + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="chat_classifications_v1_chat_classifications_post", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.ClassificationResponse, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res + ) + raise models.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) diff --git a/src/mistralai/client/conversations.py b/src/mistralai/client/conversations.py new file mode 100644 index 00000000..9caf4221 --- /dev/null +++ b/src/mistralai/client/conversations.py @@ -0,0 +1,2657 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from .basesdk import BaseSDK +from mistralai.client import models, utils +from mistralai.client._hooks import HookContext +from mistralai.client.models import ( + completionargs as models_completionargs, + conversationappendrequest as models_conversationappendrequest, + conversationappendstreamrequest as models_conversationappendstreamrequest, + conversationinputs as models_conversationinputs, + conversationrequest as models_conversationrequest, + conversationrestartrequest as models_conversationrestartrequest, + conversationrestartstreamrequest as models_conversationrestartstreamrequest, + conversationstreamrequest as models_conversationstreamrequest, +) +from mistralai.client.types import OptionalNullable, UNSET +from mistralai.client.utils import eventstreaming, get_security_from_env +from mistralai.client.utils.unmarshal_json_response import unmarshal_json_response +from typing import Any, Dict, List, Mapping, Optional, Union + + +class Conversations(BaseSDK): + r"""(beta) Conversations API""" + + def start( + self, + *, + inputs: Union[ + models_conversationinputs.ConversationInputs, + models_conversationinputs.ConversationInputsTypedDict, + ], + stream: Optional[bool] = False, + store: OptionalNullable[bool] = UNSET, + handoff_execution: OptionalNullable[ + models_conversationrequest.HandoffExecution + ] = UNSET, + instructions: OptionalNullable[str] = UNSET, + tools: Optional[ + Union[ + List[models_conversationrequest.Tools], + List[models_conversationrequest.ToolsTypedDict], + ] + ] = None, + completion_args: OptionalNullable[ + Union[ + models_completionargs.CompletionArgs, + models_completionargs.CompletionArgsTypedDict, + ] + ] = UNSET, + name: OptionalNullable[str] = UNSET, + description: OptionalNullable[str] = UNSET, + metadata: OptionalNullable[Dict[str, Any]] = UNSET, + agent_id: OptionalNullable[str] = UNSET, + agent_version: OptionalNullable[ + Union[ + models_conversationrequest.AgentVersion, + models_conversationrequest.AgentVersionTypedDict, + ] + ] = UNSET, + model: OptionalNullable[str] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.ConversationResponse: + r"""Create a conversation and append entries to it. + + Create a new conversation, using a base model or an agent and append entries. Completion and tool executions are run and the response is appended to the conversation.Use the returned conversation_id to continue the conversation. + + :param inputs: + :param stream: + :param store: + :param handoff_execution: + :param instructions: + :param tools: List of tools which are available to the model during the conversation. + :param completion_args: + :param name: + :param description: + :param metadata: + :param agent_id: + :param agent_version: + :param model: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.ConversationRequest( + inputs=utils.get_pydantic_model(inputs, models.ConversationInputs), + stream=stream, + store=store, + handoff_execution=handoff_execution, + instructions=instructions, + tools=utils.get_pydantic_model(tools, Optional[List[models.Tools]]), + completion_args=utils.get_pydantic_model( + completion_args, OptionalNullable[models.CompletionArgs] + ), + name=name, + description=description, + metadata=metadata, + agent_id=agent_id, + agent_version=agent_version, + model=model, + ) + + req = self._build_request( + method="POST", + path="/v1/conversations", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.ConversationRequest + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="agents_api_v1_conversations_start", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.ConversationResponse, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res + ) + raise models.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + async def start_async( + self, + *, + inputs: Union[ + models_conversationinputs.ConversationInputs, + models_conversationinputs.ConversationInputsTypedDict, + ], + stream: Optional[bool] = False, + store: OptionalNullable[bool] = UNSET, + handoff_execution: OptionalNullable[ + models_conversationrequest.HandoffExecution + ] = UNSET, + instructions: OptionalNullable[str] = UNSET, + tools: Optional[ + Union[ + List[models_conversationrequest.Tools], + List[models_conversationrequest.ToolsTypedDict], + ] + ] = None, + completion_args: OptionalNullable[ + Union[ + models_completionargs.CompletionArgs, + models_completionargs.CompletionArgsTypedDict, + ] + ] = UNSET, + name: OptionalNullable[str] = UNSET, + description: OptionalNullable[str] = UNSET, + metadata: OptionalNullable[Dict[str, Any]] = UNSET, + agent_id: OptionalNullable[str] = UNSET, + agent_version: OptionalNullable[ + Union[ + models_conversationrequest.AgentVersion, + models_conversationrequest.AgentVersionTypedDict, + ] + ] = UNSET, + model: OptionalNullable[str] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.ConversationResponse: + r"""Create a conversation and append entries to it. + + Create a new conversation, using a base model or an agent and append entries. Completion and tool executions are run and the response is appended to the conversation.Use the returned conversation_id to continue the conversation. + + :param inputs: + :param stream: + :param store: + :param handoff_execution: + :param instructions: + :param tools: List of tools which are available to the model during the conversation. + :param completion_args: + :param name: + :param description: + :param metadata: + :param agent_id: + :param agent_version: + :param model: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.ConversationRequest( + inputs=utils.get_pydantic_model(inputs, models.ConversationInputs), + stream=stream, + store=store, + handoff_execution=handoff_execution, + instructions=instructions, + tools=utils.get_pydantic_model(tools, Optional[List[models.Tools]]), + completion_args=utils.get_pydantic_model( + completion_args, OptionalNullable[models.CompletionArgs] + ), + name=name, + description=description, + metadata=metadata, + agent_id=agent_id, + agent_version=agent_version, + model=model, + ) + + req = self._build_request_async( + method="POST", + path="/v1/conversations", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.ConversationRequest + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="agents_api_v1_conversations_start", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.ConversationResponse, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res + ) + raise models.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + def list( + self, + *, + page: Optional[int] = 0, + page_size: Optional[int] = 100, + metadata: OptionalNullable[Dict[str, Any]] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> List[models.ResponseBody]: + r"""List all created conversations. + + Retrieve a list of conversation entities sorted by creation time. + + :param page: + :param page_size: + :param metadata: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AgentsAPIV1ConversationsListRequest( + page=page, + page_size=page_size, + metadata=metadata, + ) + + req = self._build_request( + method="GET", + path="/v1/conversations", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="agents_api_v1_conversations_list", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(List[models.ResponseBody], http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res + ) + raise models.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + async def list_async( + self, + *, + page: Optional[int] = 0, + page_size: Optional[int] = 100, + metadata: OptionalNullable[Dict[str, Any]] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> List[models.ResponseBody]: + r"""List all created conversations. + + Retrieve a list of conversation entities sorted by creation time. + + :param page: + :param page_size: + :param metadata: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AgentsAPIV1ConversationsListRequest( + page=page, + page_size=page_size, + metadata=metadata, + ) + + req = self._build_request_async( + method="GET", + path="/v1/conversations", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="agents_api_v1_conversations_list", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(List[models.ResponseBody], http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res + ) + raise models.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + def get( + self, + *, + conversation_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.AgentsAPIV1ConversationsGetResponseV1ConversationsGet: + r"""Retrieve a conversation information. + + Given a conversation_id retrieve a conversation entity with its attributes. + + :param conversation_id: ID of the conversation from which we are fetching metadata. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AgentsAPIV1ConversationsGetRequest( + conversation_id=conversation_id, + ) + + req = self._build_request( + method="GET", + path="/v1/conversations/{conversation_id}", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="agents_api_v1_conversations_get", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response( + models.AgentsAPIV1ConversationsGetResponseV1ConversationsGet, http_res + ) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res + ) + raise models.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + async def get_async( + self, + *, + conversation_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.AgentsAPIV1ConversationsGetResponseV1ConversationsGet: + r"""Retrieve a conversation information. + + Given a conversation_id retrieve a conversation entity with its attributes. + + :param conversation_id: ID of the conversation from which we are fetching metadata. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AgentsAPIV1ConversationsGetRequest( + conversation_id=conversation_id, + ) + + req = self._build_request_async( + method="GET", + path="/v1/conversations/{conversation_id}", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="agents_api_v1_conversations_get", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response( + models.AgentsAPIV1ConversationsGetResponseV1ConversationsGet, http_res + ) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res + ) + raise models.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + def delete( + self, + *, + conversation_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ): + r"""Delete a conversation. + + Delete a conversation given a conversation_id. + + :param conversation_id: ID of the conversation from which we are fetching metadata. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AgentsAPIV1ConversationsDeleteRequest( + conversation_id=conversation_id, + ) + + req = self._build_request( + method="DELETE", + path="/v1/conversations/{conversation_id}", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="agents_api_v1_conversations_delete", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "204", "*"): + return + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res + ) + raise models.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + async def delete_async( + self, + *, + conversation_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ): + r"""Delete a conversation. + + Delete a conversation given a conversation_id. + + :param conversation_id: ID of the conversation from which we are fetching metadata. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AgentsAPIV1ConversationsDeleteRequest( + conversation_id=conversation_id, + ) + + req = self._build_request_async( + method="DELETE", + path="/v1/conversations/{conversation_id}", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="agents_api_v1_conversations_delete", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "204", "*"): + return + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res + ) + raise models.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + def append( + self, + *, + conversation_id: str, + inputs: Union[ + models_conversationinputs.ConversationInputs, + models_conversationinputs.ConversationInputsTypedDict, + ], + stream: Optional[bool] = False, + store: Optional[bool] = True, + handoff_execution: Optional[ + models_conversationappendrequest.ConversationAppendRequestHandoffExecution + ] = "server", + completion_args: Optional[ + Union[ + models_completionargs.CompletionArgs, + models_completionargs.CompletionArgsTypedDict, + ] + ] = None, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.ConversationResponse: + r"""Append new entries to an existing conversation. + + Run completion on the history of the conversation and the user entries. Return the new created entries. + + :param conversation_id: ID of the conversation to which we append entries. + :param inputs: + :param stream: + :param store: Whether to store the results into our servers or not. + :param handoff_execution: + :param completion_args: White-listed arguments from the completion API + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AgentsAPIV1ConversationsAppendRequest( + conversation_id=conversation_id, + conversation_append_request=models.ConversationAppendRequest( + inputs=utils.get_pydantic_model(inputs, models.ConversationInputs), + stream=stream, + store=store, + handoff_execution=handoff_execution, + completion_args=utils.get_pydantic_model( + completion_args, Optional[models.CompletionArgs] + ), + ), + ) + + req = self._build_request( + method="POST", + path="/v1/conversations/{conversation_id}", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request.conversation_append_request, + False, + False, + "json", + models.ConversationAppendRequest, + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="agents_api_v1_conversations_append", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.ConversationResponse, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res + ) + raise models.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + async def append_async( + self, + *, + conversation_id: str, + inputs: Union[ + models_conversationinputs.ConversationInputs, + models_conversationinputs.ConversationInputsTypedDict, + ], + stream: Optional[bool] = False, + store: Optional[bool] = True, + handoff_execution: Optional[ + models_conversationappendrequest.ConversationAppendRequestHandoffExecution + ] = "server", + completion_args: Optional[ + Union[ + models_completionargs.CompletionArgs, + models_completionargs.CompletionArgsTypedDict, + ] + ] = None, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.ConversationResponse: + r"""Append new entries to an existing conversation. + + Run completion on the history of the conversation and the user entries. Return the new created entries. + + :param conversation_id: ID of the conversation to which we append entries. + :param inputs: + :param stream: + :param store: Whether to store the results into our servers or not. + :param handoff_execution: + :param completion_args: White-listed arguments from the completion API + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AgentsAPIV1ConversationsAppendRequest( + conversation_id=conversation_id, + conversation_append_request=models.ConversationAppendRequest( + inputs=utils.get_pydantic_model(inputs, models.ConversationInputs), + stream=stream, + store=store, + handoff_execution=handoff_execution, + completion_args=utils.get_pydantic_model( + completion_args, Optional[models.CompletionArgs] + ), + ), + ) + + req = self._build_request_async( + method="POST", + path="/v1/conversations/{conversation_id}", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request.conversation_append_request, + False, + False, + "json", + models.ConversationAppendRequest, + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="agents_api_v1_conversations_append", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.ConversationResponse, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res + ) + raise models.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + def get_history( + self, + *, + conversation_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.ConversationHistory: + r"""Retrieve all entries in a conversation. + + Given a conversation_id retrieve all the entries belonging to that conversation. The entries are sorted in the order they were appended, those can be messages, connectors or function_call. + + :param conversation_id: ID of the conversation from which we are fetching entries. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AgentsAPIV1ConversationsHistoryRequest( + conversation_id=conversation_id, + ) + + req = self._build_request( + method="GET", + path="/v1/conversations/{conversation_id}/history", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="agents_api_v1_conversations_history", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.ConversationHistory, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res + ) + raise models.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + async def get_history_async( + self, + *, + conversation_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.ConversationHistory: + r"""Retrieve all entries in a conversation. + + Given a conversation_id retrieve all the entries belonging to that conversation. The entries are sorted in the order they were appended, those can be messages, connectors or function_call. + + :param conversation_id: ID of the conversation from which we are fetching entries. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AgentsAPIV1ConversationsHistoryRequest( + conversation_id=conversation_id, + ) + + req = self._build_request_async( + method="GET", + path="/v1/conversations/{conversation_id}/history", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="agents_api_v1_conversations_history", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.ConversationHistory, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res + ) + raise models.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + def get_messages( + self, + *, + conversation_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.ConversationMessages: + r"""Retrieve all messages in a conversation. + + Given a conversation_id retrieve all the messages belonging to that conversation. This is similar to retrieving all entries except we filter the messages only. + + :param conversation_id: ID of the conversation from which we are fetching messages. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AgentsAPIV1ConversationsMessagesRequest( + conversation_id=conversation_id, + ) + + req = self._build_request( + method="GET", + path="/v1/conversations/{conversation_id}/messages", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="agents_api_v1_conversations_messages", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.ConversationMessages, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res + ) + raise models.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + async def get_messages_async( + self, + *, + conversation_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.ConversationMessages: + r"""Retrieve all messages in a conversation. + + Given a conversation_id retrieve all the messages belonging to that conversation. This is similar to retrieving all entries except we filter the messages only. + + :param conversation_id: ID of the conversation from which we are fetching messages. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AgentsAPIV1ConversationsMessagesRequest( + conversation_id=conversation_id, + ) + + req = self._build_request_async( + method="GET", + path="/v1/conversations/{conversation_id}/messages", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="agents_api_v1_conversations_messages", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.ConversationMessages, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res + ) + raise models.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + def restart( + self, + *, + conversation_id: str, + inputs: Union[ + models_conversationinputs.ConversationInputs, + models_conversationinputs.ConversationInputsTypedDict, + ], + from_entry_id: str, + stream: Optional[bool] = False, + store: Optional[bool] = True, + handoff_execution: Optional[ + models_conversationrestartrequest.ConversationRestartRequestHandoffExecution + ] = "server", + completion_args: Optional[ + Union[ + models_completionargs.CompletionArgs, + models_completionargs.CompletionArgsTypedDict, + ] + ] = None, + metadata: OptionalNullable[Dict[str, Any]] = UNSET, + agent_version: OptionalNullable[ + Union[ + models_conversationrestartrequest.ConversationRestartRequestAgentVersion, + models_conversationrestartrequest.ConversationRestartRequestAgentVersionTypedDict, + ] + ] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.ConversationResponse: + r"""Restart a conversation starting from a given entry. + + Given a conversation_id and an id, recreate a conversation from this point and run completion. A new conversation is returned with the new entries returned. + + :param conversation_id: ID of the original conversation which is being restarted. + :param inputs: + :param from_entry_id: + :param stream: + :param store: Whether to store the results into our servers or not. + :param handoff_execution: + :param completion_args: White-listed arguments from the completion API + :param metadata: Custom metadata for the conversation. + :param agent_version: Specific version of the agent to use when restarting. If not provided, uses the current version. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AgentsAPIV1ConversationsRestartRequest( + conversation_id=conversation_id, + conversation_restart_request=models.ConversationRestartRequest( + inputs=utils.get_pydantic_model(inputs, models.ConversationInputs), + stream=stream, + store=store, + handoff_execution=handoff_execution, + completion_args=utils.get_pydantic_model( + completion_args, Optional[models.CompletionArgs] + ), + metadata=metadata, + from_entry_id=from_entry_id, + agent_version=agent_version, + ), + ) + + req = self._build_request( + method="POST", + path="/v1/conversations/{conversation_id}/restart", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request.conversation_restart_request, + False, + False, + "json", + models.ConversationRestartRequest, + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="agents_api_v1_conversations_restart", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.ConversationResponse, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res + ) + raise models.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + async def restart_async( + self, + *, + conversation_id: str, + inputs: Union[ + models_conversationinputs.ConversationInputs, + models_conversationinputs.ConversationInputsTypedDict, + ], + from_entry_id: str, + stream: Optional[bool] = False, + store: Optional[bool] = True, + handoff_execution: Optional[ + models_conversationrestartrequest.ConversationRestartRequestHandoffExecution + ] = "server", + completion_args: Optional[ + Union[ + models_completionargs.CompletionArgs, + models_completionargs.CompletionArgsTypedDict, + ] + ] = None, + metadata: OptionalNullable[Dict[str, Any]] = UNSET, + agent_version: OptionalNullable[ + Union[ + models_conversationrestartrequest.ConversationRestartRequestAgentVersion, + models_conversationrestartrequest.ConversationRestartRequestAgentVersionTypedDict, + ] + ] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.ConversationResponse: + r"""Restart a conversation starting from a given entry. + + Given a conversation_id and an id, recreate a conversation from this point and run completion. A new conversation is returned with the new entries returned. + + :param conversation_id: ID of the original conversation which is being restarted. + :param inputs: + :param from_entry_id: + :param stream: + :param store: Whether to store the results into our servers or not. + :param handoff_execution: + :param completion_args: White-listed arguments from the completion API + :param metadata: Custom metadata for the conversation. + :param agent_version: Specific version of the agent to use when restarting. If not provided, uses the current version. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AgentsAPIV1ConversationsRestartRequest( + conversation_id=conversation_id, + conversation_restart_request=models.ConversationRestartRequest( + inputs=utils.get_pydantic_model(inputs, models.ConversationInputs), + stream=stream, + store=store, + handoff_execution=handoff_execution, + completion_args=utils.get_pydantic_model( + completion_args, Optional[models.CompletionArgs] + ), + metadata=metadata, + from_entry_id=from_entry_id, + agent_version=agent_version, + ), + ) + + req = self._build_request_async( + method="POST", + path="/v1/conversations/{conversation_id}/restart", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request.conversation_restart_request, + False, + False, + "json", + models.ConversationRestartRequest, + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="agents_api_v1_conversations_restart", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.ConversationResponse, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res + ) + raise models.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + def start_stream( + self, + *, + inputs: Union[ + models_conversationinputs.ConversationInputs, + models_conversationinputs.ConversationInputsTypedDict, + ], + stream: Optional[bool] = True, + store: OptionalNullable[bool] = UNSET, + handoff_execution: OptionalNullable[ + models_conversationstreamrequest.ConversationStreamRequestHandoffExecution + ] = UNSET, + instructions: OptionalNullable[str] = UNSET, + tools: Optional[ + Union[ + List[models_conversationstreamrequest.ConversationStreamRequestTools], + List[ + models_conversationstreamrequest.ConversationStreamRequestToolsTypedDict + ], + ] + ] = None, + completion_args: OptionalNullable[ + Union[ + models_completionargs.CompletionArgs, + models_completionargs.CompletionArgsTypedDict, + ] + ] = UNSET, + name: OptionalNullable[str] = UNSET, + description: OptionalNullable[str] = UNSET, + metadata: OptionalNullable[Dict[str, Any]] = UNSET, + agent_id: OptionalNullable[str] = UNSET, + agent_version: OptionalNullable[ + Union[ + models_conversationstreamrequest.ConversationStreamRequestAgentVersion, + models_conversationstreamrequest.ConversationStreamRequestAgentVersionTypedDict, + ] + ] = UNSET, + model: OptionalNullable[str] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> eventstreaming.EventStream[models.ConversationEvents]: + r"""Create a conversation and append entries to it. + + Create a new conversation, using a base model or an agent and append entries. Completion and tool executions are run and the response is appended to the conversation.Use the returned conversation_id to continue the conversation. + + :param inputs: + :param stream: + :param store: + :param handoff_execution: + :param instructions: + :param tools: List of tools which are available to the model during the conversation. + :param completion_args: + :param name: + :param description: + :param metadata: + :param agent_id: + :param agent_version: + :param model: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.ConversationStreamRequest( + inputs=utils.get_pydantic_model(inputs, models.ConversationInputs), + stream=stream, + store=store, + handoff_execution=handoff_execution, + instructions=instructions, + tools=utils.get_pydantic_model( + tools, Optional[List[models.ConversationStreamRequestTools]] + ), + completion_args=utils.get_pydantic_model( + completion_args, OptionalNullable[models.CompletionArgs] + ), + name=name, + description=description, + metadata=metadata, + agent_id=agent_id, + agent_version=agent_version, + model=model, + ) + + req = self._build_request( + method="POST", + path="/v1/conversations#stream", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="text/event-stream", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.ConversationStreamRequest + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="agents_api_v1_conversations_start_stream", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + stream=True, + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "text/event-stream"): + return eventstreaming.EventStream( + http_res, + lambda raw: utils.unmarshal_json(raw, models.ConversationEvents), + client_ref=self, + ) + if utils.match_response(http_res, "422", "application/json"): + http_res_text = utils.stream_to_text(http_res) + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res, http_res_text + ) + raise models.HTTPValidationError(response_data, http_res, http_res_text) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("Unexpected response received", http_res, http_res_text) + + async def start_stream_async( + self, + *, + inputs: Union[ + models_conversationinputs.ConversationInputs, + models_conversationinputs.ConversationInputsTypedDict, + ], + stream: Optional[bool] = True, + store: OptionalNullable[bool] = UNSET, + handoff_execution: OptionalNullable[ + models_conversationstreamrequest.ConversationStreamRequestHandoffExecution + ] = UNSET, + instructions: OptionalNullable[str] = UNSET, + tools: Optional[ + Union[ + List[models_conversationstreamrequest.ConversationStreamRequestTools], + List[ + models_conversationstreamrequest.ConversationStreamRequestToolsTypedDict + ], + ] + ] = None, + completion_args: OptionalNullable[ + Union[ + models_completionargs.CompletionArgs, + models_completionargs.CompletionArgsTypedDict, + ] + ] = UNSET, + name: OptionalNullable[str] = UNSET, + description: OptionalNullable[str] = UNSET, + metadata: OptionalNullable[Dict[str, Any]] = UNSET, + agent_id: OptionalNullable[str] = UNSET, + agent_version: OptionalNullable[ + Union[ + models_conversationstreamrequest.ConversationStreamRequestAgentVersion, + models_conversationstreamrequest.ConversationStreamRequestAgentVersionTypedDict, + ] + ] = UNSET, + model: OptionalNullable[str] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> eventstreaming.EventStreamAsync[models.ConversationEvents]: + r"""Create a conversation and append entries to it. + + Create a new conversation, using a base model or an agent and append entries. Completion and tool executions are run and the response is appended to the conversation.Use the returned conversation_id to continue the conversation. + + :param inputs: + :param stream: + :param store: + :param handoff_execution: + :param instructions: + :param tools: List of tools which are available to the model during the conversation. + :param completion_args: + :param name: + :param description: + :param metadata: + :param agent_id: + :param agent_version: + :param model: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.ConversationStreamRequest( + inputs=utils.get_pydantic_model(inputs, models.ConversationInputs), + stream=stream, + store=store, + handoff_execution=handoff_execution, + instructions=instructions, + tools=utils.get_pydantic_model( + tools, Optional[List[models.ConversationStreamRequestTools]] + ), + completion_args=utils.get_pydantic_model( + completion_args, OptionalNullable[models.CompletionArgs] + ), + name=name, + description=description, + metadata=metadata, + agent_id=agent_id, + agent_version=agent_version, + model=model, + ) + + req = self._build_request_async( + method="POST", + path="/v1/conversations#stream", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="text/event-stream", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.ConversationStreamRequest + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="agents_api_v1_conversations_start_stream", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + stream=True, + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "text/event-stream"): + return eventstreaming.EventStreamAsync( + http_res, + lambda raw: utils.unmarshal_json(raw, models.ConversationEvents), + client_ref=self, + ) + if utils.match_response(http_res, "422", "application/json"): + http_res_text = await utils.stream_to_text_async(http_res) + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res, http_res_text + ) + raise models.HTTPValidationError(response_data, http_res, http_res_text) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("Unexpected response received", http_res, http_res_text) + + def append_stream( + self, + *, + conversation_id: str, + inputs: Union[ + models_conversationinputs.ConversationInputs, + models_conversationinputs.ConversationInputsTypedDict, + ], + stream: Optional[bool] = True, + store: Optional[bool] = True, + handoff_execution: Optional[ + models_conversationappendstreamrequest.ConversationAppendStreamRequestHandoffExecution + ] = "server", + completion_args: Optional[ + Union[ + models_completionargs.CompletionArgs, + models_completionargs.CompletionArgsTypedDict, + ] + ] = None, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> eventstreaming.EventStream[models.ConversationEvents]: + r"""Append new entries to an existing conversation. + + Run completion on the history of the conversation and the user entries. Return the new created entries. + + :param conversation_id: ID of the conversation to which we append entries. + :param inputs: + :param stream: + :param store: Whether to store the results into our servers or not. + :param handoff_execution: + :param completion_args: White-listed arguments from the completion API + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AgentsAPIV1ConversationsAppendStreamRequest( + conversation_id=conversation_id, + conversation_append_stream_request=models.ConversationAppendStreamRequest( + inputs=utils.get_pydantic_model(inputs, models.ConversationInputs), + stream=stream, + store=store, + handoff_execution=handoff_execution, + completion_args=utils.get_pydantic_model( + completion_args, Optional[models.CompletionArgs] + ), + ), + ) + + req = self._build_request( + method="POST", + path="/v1/conversations/{conversation_id}#stream", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="text/event-stream", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request.conversation_append_stream_request, + False, + False, + "json", + models.ConversationAppendStreamRequest, + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="agents_api_v1_conversations_append_stream", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + stream=True, + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "text/event-stream"): + return eventstreaming.EventStream( + http_res, + lambda raw: utils.unmarshal_json(raw, models.ConversationEvents), + client_ref=self, + ) + if utils.match_response(http_res, "422", "application/json"): + http_res_text = utils.stream_to_text(http_res) + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res, http_res_text + ) + raise models.HTTPValidationError(response_data, http_res, http_res_text) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("Unexpected response received", http_res, http_res_text) + + async def append_stream_async( + self, + *, + conversation_id: str, + inputs: Union[ + models_conversationinputs.ConversationInputs, + models_conversationinputs.ConversationInputsTypedDict, + ], + stream: Optional[bool] = True, + store: Optional[bool] = True, + handoff_execution: Optional[ + models_conversationappendstreamrequest.ConversationAppendStreamRequestHandoffExecution + ] = "server", + completion_args: Optional[ + Union[ + models_completionargs.CompletionArgs, + models_completionargs.CompletionArgsTypedDict, + ] + ] = None, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> eventstreaming.EventStreamAsync[models.ConversationEvents]: + r"""Append new entries to an existing conversation. + + Run completion on the history of the conversation and the user entries. Return the new created entries. + + :param conversation_id: ID of the conversation to which we append entries. + :param inputs: + :param stream: + :param store: Whether to store the results into our servers or not. + :param handoff_execution: + :param completion_args: White-listed arguments from the completion API + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AgentsAPIV1ConversationsAppendStreamRequest( + conversation_id=conversation_id, + conversation_append_stream_request=models.ConversationAppendStreamRequest( + inputs=utils.get_pydantic_model(inputs, models.ConversationInputs), + stream=stream, + store=store, + handoff_execution=handoff_execution, + completion_args=utils.get_pydantic_model( + completion_args, Optional[models.CompletionArgs] + ), + ), + ) + + req = self._build_request_async( + method="POST", + path="/v1/conversations/{conversation_id}#stream", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="text/event-stream", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request.conversation_append_stream_request, + False, + False, + "json", + models.ConversationAppendStreamRequest, + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="agents_api_v1_conversations_append_stream", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + stream=True, + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "text/event-stream"): + return eventstreaming.EventStreamAsync( + http_res, + lambda raw: utils.unmarshal_json(raw, models.ConversationEvents), + client_ref=self, + ) + if utils.match_response(http_res, "422", "application/json"): + http_res_text = await utils.stream_to_text_async(http_res) + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res, http_res_text + ) + raise models.HTTPValidationError(response_data, http_res, http_res_text) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("Unexpected response received", http_res, http_res_text) + + def restart_stream( + self, + *, + conversation_id: str, + inputs: Union[ + models_conversationinputs.ConversationInputs, + models_conversationinputs.ConversationInputsTypedDict, + ], + from_entry_id: str, + stream: Optional[bool] = True, + store: Optional[bool] = True, + handoff_execution: Optional[ + models_conversationrestartstreamrequest.ConversationRestartStreamRequestHandoffExecution + ] = "server", + completion_args: Optional[ + Union[ + models_completionargs.CompletionArgs, + models_completionargs.CompletionArgsTypedDict, + ] + ] = None, + metadata: OptionalNullable[Dict[str, Any]] = UNSET, + agent_version: OptionalNullable[ + Union[ + models_conversationrestartstreamrequest.ConversationRestartStreamRequestAgentVersion, + models_conversationrestartstreamrequest.ConversationRestartStreamRequestAgentVersionTypedDict, + ] + ] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> eventstreaming.EventStream[models.ConversationEvents]: + r"""Restart a conversation starting from a given entry. + + Given a conversation_id and an id, recreate a conversation from this point and run completion. A new conversation is returned with the new entries returned. + + :param conversation_id: ID of the original conversation which is being restarted. + :param inputs: + :param from_entry_id: + :param stream: + :param store: Whether to store the results into our servers or not. + :param handoff_execution: + :param completion_args: White-listed arguments from the completion API + :param metadata: Custom metadata for the conversation. + :param agent_version: Specific version of the agent to use when restarting. If not provided, uses the current version. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AgentsAPIV1ConversationsRestartStreamRequest( + conversation_id=conversation_id, + conversation_restart_stream_request=models.ConversationRestartStreamRequest( + inputs=utils.get_pydantic_model(inputs, models.ConversationInputs), + stream=stream, + store=store, + handoff_execution=handoff_execution, + completion_args=utils.get_pydantic_model( + completion_args, Optional[models.CompletionArgs] + ), + metadata=metadata, + from_entry_id=from_entry_id, + agent_version=agent_version, + ), + ) + + req = self._build_request( + method="POST", + path="/v1/conversations/{conversation_id}/restart#stream", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="text/event-stream", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request.conversation_restart_stream_request, + False, + False, + "json", + models.ConversationRestartStreamRequest, + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="agents_api_v1_conversations_restart_stream", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + stream=True, + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "text/event-stream"): + return eventstreaming.EventStream( + http_res, + lambda raw: utils.unmarshal_json(raw, models.ConversationEvents), + client_ref=self, + ) + if utils.match_response(http_res, "422", "application/json"): + http_res_text = utils.stream_to_text(http_res) + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res, http_res_text + ) + raise models.HTTPValidationError(response_data, http_res, http_res_text) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("Unexpected response received", http_res, http_res_text) + + async def restart_stream_async( + self, + *, + conversation_id: str, + inputs: Union[ + models_conversationinputs.ConversationInputs, + models_conversationinputs.ConversationInputsTypedDict, + ], + from_entry_id: str, + stream: Optional[bool] = True, + store: Optional[bool] = True, + handoff_execution: Optional[ + models_conversationrestartstreamrequest.ConversationRestartStreamRequestHandoffExecution + ] = "server", + completion_args: Optional[ + Union[ + models_completionargs.CompletionArgs, + models_completionargs.CompletionArgsTypedDict, + ] + ] = None, + metadata: OptionalNullable[Dict[str, Any]] = UNSET, + agent_version: OptionalNullable[ + Union[ + models_conversationrestartstreamrequest.ConversationRestartStreamRequestAgentVersion, + models_conversationrestartstreamrequest.ConversationRestartStreamRequestAgentVersionTypedDict, + ] + ] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> eventstreaming.EventStreamAsync[models.ConversationEvents]: + r"""Restart a conversation starting from a given entry. + + Given a conversation_id and an id, recreate a conversation from this point and run completion. A new conversation is returned with the new entries returned. + + :param conversation_id: ID of the original conversation which is being restarted. + :param inputs: + :param from_entry_id: + :param stream: + :param store: Whether to store the results into our servers or not. + :param handoff_execution: + :param completion_args: White-listed arguments from the completion API + :param metadata: Custom metadata for the conversation. + :param agent_version: Specific version of the agent to use when restarting. If not provided, uses the current version. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AgentsAPIV1ConversationsRestartStreamRequest( + conversation_id=conversation_id, + conversation_restart_stream_request=models.ConversationRestartStreamRequest( + inputs=utils.get_pydantic_model(inputs, models.ConversationInputs), + stream=stream, + store=store, + handoff_execution=handoff_execution, + completion_args=utils.get_pydantic_model( + completion_args, Optional[models.CompletionArgs] + ), + metadata=metadata, + from_entry_id=from_entry_id, + agent_version=agent_version, + ), + ) + + req = self._build_request_async( + method="POST", + path="/v1/conversations/{conversation_id}/restart#stream", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="text/event-stream", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request.conversation_restart_stream_request, + False, + False, + "json", + models.ConversationRestartStreamRequest, + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="agents_api_v1_conversations_restart_stream", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + stream=True, + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "text/event-stream"): + return eventstreaming.EventStreamAsync( + http_res, + lambda raw: utils.unmarshal_json(raw, models.ConversationEvents), + client_ref=self, + ) + if utils.match_response(http_res, "422", "application/json"): + http_res_text = await utils.stream_to_text_async(http_res) + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res, http_res_text + ) + raise models.HTTPValidationError(response_data, http_res, http_res_text) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("Unexpected response received", http_res, http_res_text) diff --git a/src/mistralai/client/documents.py b/src/mistralai/client/documents.py new file mode 100644 index 00000000..009a604f --- /dev/null +++ b/src/mistralai/client/documents.py @@ -0,0 +1,1981 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from .basesdk import BaseSDK +from mistralai.client import models, utils +from mistralai.client._hooks import HookContext +from mistralai.client.models import ( + documentupdatein as models_documentupdatein, + file as models_file, +) +from mistralai.client.types import OptionalNullable, UNSET +from mistralai.client.utils import get_security_from_env +from mistralai.client.utils.unmarshal_json_response import unmarshal_json_response +from typing import Any, Dict, Mapping, Optional, Union + + +class Documents(BaseSDK): + r"""(beta) Libraries API - manage documents in a library.""" + + def list( + self, + *, + library_id: str, + search: OptionalNullable[str] = UNSET, + page_size: Optional[int] = 100, + page: Optional[int] = 0, + filters_attributes: OptionalNullable[str] = UNSET, + sort_by: Optional[str] = "created_at", + sort_order: Optional[str] = "desc", + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.ListDocumentOut: + r"""List documents in a given library. + + Given a library, lists the document that have been uploaded to that library. + + :param library_id: + :param search: + :param page_size: + :param page: + :param filters_attributes: + :param sort_by: + :param sort_order: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.LibrariesDocumentsListV1Request( + library_id=library_id, + search=search, + page_size=page_size, + page=page, + filters_attributes=filters_attributes, + sort_by=sort_by, + sort_order=sort_order, + ) + + req = self._build_request( + method="GET", + path="/v1/libraries/{library_id}/documents", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="libraries_documents_list_v1", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.ListDocumentOut, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res + ) + raise models.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + async def list_async( + self, + *, + library_id: str, + search: OptionalNullable[str] = UNSET, + page_size: Optional[int] = 100, + page: Optional[int] = 0, + filters_attributes: OptionalNullable[str] = UNSET, + sort_by: Optional[str] = "created_at", + sort_order: Optional[str] = "desc", + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.ListDocumentOut: + r"""List documents in a given library. + + Given a library, lists the document that have been uploaded to that library. + + :param library_id: + :param search: + :param page_size: + :param page: + :param filters_attributes: + :param sort_by: + :param sort_order: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.LibrariesDocumentsListV1Request( + library_id=library_id, + search=search, + page_size=page_size, + page=page, + filters_attributes=filters_attributes, + sort_by=sort_by, + sort_order=sort_order, + ) + + req = self._build_request_async( + method="GET", + path="/v1/libraries/{library_id}/documents", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="libraries_documents_list_v1", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.ListDocumentOut, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res + ) + raise models.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + def upload( + self, + *, + library_id: str, + file: Union[models_file.File, models_file.FileTypedDict], + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.DocumentOut: + r"""Upload a new document. + + Given a library, upload a new document to that library. It is queued for processing, it status will change it has been processed. The processing has to be completed in order be discoverable for the library search + + :param library_id: + :param file: The File object (not file name) to be uploaded. + To upload a file and specify a custom file name you should format your request as such: + ```bash + file=@path/to/your/file.jsonl;filename=custom_name.jsonl + ``` + Otherwise, you can just keep the original file name: + ```bash + file=@path/to/your/file.jsonl + ``` + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.LibrariesDocumentsUploadV1Request( + library_id=library_id, + request_body=models.LibrariesDocumentsUploadV1DocumentUpload( + file=utils.get_pydantic_model(file, models.File), + ), + ) + + req = self._build_request( + method="POST", + path="/v1/libraries/{library_id}/documents", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request.request_body, + False, + False, + "multipart", + models.LibrariesDocumentsUploadV1DocumentUpload, + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="libraries_documents_upload_v1", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, ["200", "201"], "application/json"): + return unmarshal_json_response(models.DocumentOut, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res + ) + raise models.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + async def upload_async( + self, + *, + library_id: str, + file: Union[models_file.File, models_file.FileTypedDict], + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.DocumentOut: + r"""Upload a new document. + + Given a library, upload a new document to that library. It is queued for processing, it status will change it has been processed. The processing has to be completed in order be discoverable for the library search + + :param library_id: + :param file: The File object (not file name) to be uploaded. + To upload a file and specify a custom file name you should format your request as such: + ```bash + file=@path/to/your/file.jsonl;filename=custom_name.jsonl + ``` + Otherwise, you can just keep the original file name: + ```bash + file=@path/to/your/file.jsonl + ``` + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.LibrariesDocumentsUploadV1Request( + library_id=library_id, + request_body=models.LibrariesDocumentsUploadV1DocumentUpload( + file=utils.get_pydantic_model(file, models.File), + ), + ) + + req = self._build_request_async( + method="POST", + path="/v1/libraries/{library_id}/documents", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request.request_body, + False, + False, + "multipart", + models.LibrariesDocumentsUploadV1DocumentUpload, + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="libraries_documents_upload_v1", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, ["200", "201"], "application/json"): + return unmarshal_json_response(models.DocumentOut, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res + ) + raise models.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + def get( + self, + *, + library_id: str, + document_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.DocumentOut: + r"""Retrieve the metadata of a specific document. + + Given a library and a document in this library, you can retrieve the metadata of that document. + + :param library_id: + :param document_id: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.LibrariesDocumentsGetV1Request( + library_id=library_id, + document_id=document_id, + ) + + req = self._build_request( + method="GET", + path="/v1/libraries/{library_id}/documents/{document_id}", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="libraries_documents_get_v1", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.DocumentOut, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res + ) + raise models.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + async def get_async( + self, + *, + library_id: str, + document_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.DocumentOut: + r"""Retrieve the metadata of a specific document. + + Given a library and a document in this library, you can retrieve the metadata of that document. + + :param library_id: + :param document_id: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.LibrariesDocumentsGetV1Request( + library_id=library_id, + document_id=document_id, + ) + + req = self._build_request_async( + method="GET", + path="/v1/libraries/{library_id}/documents/{document_id}", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="libraries_documents_get_v1", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.DocumentOut, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res + ) + raise models.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + def update( + self, + *, + library_id: str, + document_id: str, + name: OptionalNullable[str] = UNSET, + attributes: OptionalNullable[ + Union[ + Dict[str, models_documentupdatein.Attributes], + Dict[str, models_documentupdatein.AttributesTypedDict], + ] + ] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.DocumentOut: + r"""Update the metadata of a specific document. + + Given a library and a document in that library, update the name of that document. + + :param library_id: + :param document_id: + :param name: + :param attributes: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.LibrariesDocumentsUpdateV1Request( + library_id=library_id, + document_id=document_id, + document_update_in=models.DocumentUpdateIn( + name=name, + attributes=attributes, + ), + ) + + req = self._build_request( + method="PUT", + path="/v1/libraries/{library_id}/documents/{document_id}", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request.document_update_in, + False, + False, + "json", + models.DocumentUpdateIn, + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="libraries_documents_update_v1", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.DocumentOut, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res + ) + raise models.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + async def update_async( + self, + *, + library_id: str, + document_id: str, + name: OptionalNullable[str] = UNSET, + attributes: OptionalNullable[ + Union[ + Dict[str, models_documentupdatein.Attributes], + Dict[str, models_documentupdatein.AttributesTypedDict], + ] + ] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.DocumentOut: + r"""Update the metadata of a specific document. + + Given a library and a document in that library, update the name of that document. + + :param library_id: + :param document_id: + :param name: + :param attributes: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.LibrariesDocumentsUpdateV1Request( + library_id=library_id, + document_id=document_id, + document_update_in=models.DocumentUpdateIn( + name=name, + attributes=attributes, + ), + ) + + req = self._build_request_async( + method="PUT", + path="/v1/libraries/{library_id}/documents/{document_id}", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request.document_update_in, + False, + False, + "json", + models.DocumentUpdateIn, + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="libraries_documents_update_v1", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.DocumentOut, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res + ) + raise models.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + def delete( + self, + *, + library_id: str, + document_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ): + r"""Delete a document. + + Given a library and a document in that library, delete that document. The document will be deleted from the library and the search index. + + :param library_id: + :param document_id: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.LibrariesDocumentsDeleteV1Request( + library_id=library_id, + document_id=document_id, + ) + + req = self._build_request( + method="DELETE", + path="/v1/libraries/{library_id}/documents/{document_id}", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="libraries_documents_delete_v1", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "204", "*"): + return + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res + ) + raise models.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + async def delete_async( + self, + *, + library_id: str, + document_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ): + r"""Delete a document. + + Given a library and a document in that library, delete that document. The document will be deleted from the library and the search index. + + :param library_id: + :param document_id: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.LibrariesDocumentsDeleteV1Request( + library_id=library_id, + document_id=document_id, + ) + + req = self._build_request_async( + method="DELETE", + path="/v1/libraries/{library_id}/documents/{document_id}", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="libraries_documents_delete_v1", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "204", "*"): + return + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res + ) + raise models.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + def text_content( + self, + *, + library_id: str, + document_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.DocumentTextContent: + r"""Retrieve the text content of a specific document. + + Given a library and a document in that library, you can retrieve the text content of that document if it exists. For documents like pdf, docx and pptx the text content results from our processing using Mistral OCR. + + :param library_id: + :param document_id: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.LibrariesDocumentsGetTextContentV1Request( + library_id=library_id, + document_id=document_id, + ) + + req = self._build_request( + method="GET", + path="/v1/libraries/{library_id}/documents/{document_id}/text_content", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="libraries_documents_get_text_content_v1", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.DocumentTextContent, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res + ) + raise models.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + async def text_content_async( + self, + *, + library_id: str, + document_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.DocumentTextContent: + r"""Retrieve the text content of a specific document. + + Given a library and a document in that library, you can retrieve the text content of that document if it exists. For documents like pdf, docx and pptx the text content results from our processing using Mistral OCR. + + :param library_id: + :param document_id: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.LibrariesDocumentsGetTextContentV1Request( + library_id=library_id, + document_id=document_id, + ) + + req = self._build_request_async( + method="GET", + path="/v1/libraries/{library_id}/documents/{document_id}/text_content", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="libraries_documents_get_text_content_v1", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.DocumentTextContent, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res + ) + raise models.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + def status( + self, + *, + library_id: str, + document_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.ProcessingStatusOut: + r"""Retrieve the processing status of a specific document. + + Given a library and a document in that library, retrieve the processing status of that document. + + :param library_id: + :param document_id: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.LibrariesDocumentsGetStatusV1Request( + library_id=library_id, + document_id=document_id, + ) + + req = self._build_request( + method="GET", + path="/v1/libraries/{library_id}/documents/{document_id}/status", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="libraries_documents_get_status_v1", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.ProcessingStatusOut, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res + ) + raise models.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + async def status_async( + self, + *, + library_id: str, + document_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.ProcessingStatusOut: + r"""Retrieve the processing status of a specific document. + + Given a library and a document in that library, retrieve the processing status of that document. + + :param library_id: + :param document_id: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.LibrariesDocumentsGetStatusV1Request( + library_id=library_id, + document_id=document_id, + ) + + req = self._build_request_async( + method="GET", + path="/v1/libraries/{library_id}/documents/{document_id}/status", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="libraries_documents_get_status_v1", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.ProcessingStatusOut, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res + ) + raise models.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + def get_signed_url( + self, + *, + library_id: str, + document_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> str: + r"""Retrieve the signed URL of a specific document. + + Given a library and a document in that library, retrieve the signed URL of a specific document.The url will expire after 30 minutes and can be accessed by anyone with the link. + + :param library_id: + :param document_id: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.LibrariesDocumentsGetSignedURLV1Request( + library_id=library_id, + document_id=document_id, + ) + + req = self._build_request( + method="GET", + path="/v1/libraries/{library_id}/documents/{document_id}/signed-url", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="libraries_documents_get_signed_url_v1", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(str, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res + ) + raise models.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + async def get_signed_url_async( + self, + *, + library_id: str, + document_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> str: + r"""Retrieve the signed URL of a specific document. + + Given a library and a document in that library, retrieve the signed URL of a specific document.The url will expire after 30 minutes and can be accessed by anyone with the link. + + :param library_id: + :param document_id: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.LibrariesDocumentsGetSignedURLV1Request( + library_id=library_id, + document_id=document_id, + ) + + req = self._build_request_async( + method="GET", + path="/v1/libraries/{library_id}/documents/{document_id}/signed-url", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="libraries_documents_get_signed_url_v1", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(str, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res + ) + raise models.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + def extracted_text_signed_url( + self, + *, + library_id: str, + document_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> str: + r"""Retrieve the signed URL of text extracted from a given document. + + Given a library and a document in that library, retrieve the signed URL of text extracted. For documents that are sent to the OCR this returns the result of the OCR queries. + + :param library_id: + :param document_id: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.LibrariesDocumentsGetExtractedTextSignedURLV1Request( + library_id=library_id, + document_id=document_id, + ) + + req = self._build_request( + method="GET", + path="/v1/libraries/{library_id}/documents/{document_id}/extracted-text-signed-url", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="libraries_documents_get_extracted_text_signed_url_v1", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(str, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res + ) + raise models.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + async def extracted_text_signed_url_async( + self, + *, + library_id: str, + document_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> str: + r"""Retrieve the signed URL of text extracted from a given document. + + Given a library and a document in that library, retrieve the signed URL of text extracted. For documents that are sent to the OCR this returns the result of the OCR queries. + + :param library_id: + :param document_id: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.LibrariesDocumentsGetExtractedTextSignedURLV1Request( + library_id=library_id, + document_id=document_id, + ) + + req = self._build_request_async( + method="GET", + path="/v1/libraries/{library_id}/documents/{document_id}/extracted-text-signed-url", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="libraries_documents_get_extracted_text_signed_url_v1", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(str, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res + ) + raise models.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + def reprocess( + self, + *, + library_id: str, + document_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ): + r"""Reprocess a document. + + Given a library and a document in that library, reprocess that document, it will be billed again. + + :param library_id: + :param document_id: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.LibrariesDocumentsReprocessV1Request( + library_id=library_id, + document_id=document_id, + ) + + req = self._build_request( + method="POST", + path="/v1/libraries/{library_id}/documents/{document_id}/reprocess", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="libraries_documents_reprocess_v1", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "204", "*"): + return + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res + ) + raise models.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + async def reprocess_async( + self, + *, + library_id: str, + document_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ): + r"""Reprocess a document. + + Given a library and a document in that library, reprocess that document, it will be billed again. + + :param library_id: + :param document_id: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.LibrariesDocumentsReprocessV1Request( + library_id=library_id, + document_id=document_id, + ) + + req = self._build_request_async( + method="POST", + path="/v1/libraries/{library_id}/documents/{document_id}/reprocess", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="libraries_documents_reprocess_v1", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "204", "*"): + return + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res + ) + raise models.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) diff --git a/src/mistralai/client/embeddings.py b/src/mistralai/client/embeddings.py new file mode 100644 index 00000000..359f2f62 --- /dev/null +++ b/src/mistralai/client/embeddings.py @@ -0,0 +1,240 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from .basesdk import BaseSDK +from mistralai.client import models, utils +from mistralai.client._hooks import HookContext +from mistralai.client.models import ( + embeddingdtype as models_embeddingdtype, + embeddingrequest as models_embeddingrequest, + encodingformat as models_encodingformat, +) +from mistralai.client.types import OptionalNullable, UNSET +from mistralai.client.utils import get_security_from_env +from mistralai.client.utils.unmarshal_json_response import unmarshal_json_response +from typing import Any, Dict, Mapping, Optional, Union + + +class Embeddings(BaseSDK): + r"""Embeddings API.""" + + def create( + self, + *, + model: str, + inputs: Union[ + models_embeddingrequest.EmbeddingRequestInputs, + models_embeddingrequest.EmbeddingRequestInputsTypedDict, + ], + metadata: OptionalNullable[Dict[str, Any]] = UNSET, + output_dimension: OptionalNullable[int] = UNSET, + output_dtype: Optional[models_embeddingdtype.EmbeddingDtype] = None, + encoding_format: Optional[models_encodingformat.EncodingFormat] = None, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.EmbeddingResponse: + r"""Embeddings + + Embeddings + + :param model: The ID of the model to be used for embedding. + :param inputs: The text content to be embedded, can be a string or an array of strings for fast processing in bulk. + :param metadata: + :param output_dimension: The dimension of the output embeddings when feature available. If not provided, a default output dimension will be used. + :param output_dtype: + :param encoding_format: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.EmbeddingRequest( + model=model, + metadata=metadata, + inputs=inputs, + output_dimension=output_dimension, + output_dtype=output_dtype, + encoding_format=encoding_format, + ) + + req = self._build_request( + method="POST", + path="/v1/embeddings", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.EmbeddingRequest + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="embeddings_v1_embeddings_post", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.EmbeddingResponse, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res + ) + raise models.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + async def create_async( + self, + *, + model: str, + inputs: Union[ + models_embeddingrequest.EmbeddingRequestInputs, + models_embeddingrequest.EmbeddingRequestInputsTypedDict, + ], + metadata: OptionalNullable[Dict[str, Any]] = UNSET, + output_dimension: OptionalNullable[int] = UNSET, + output_dtype: Optional[models_embeddingdtype.EmbeddingDtype] = None, + encoding_format: Optional[models_encodingformat.EncodingFormat] = None, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.EmbeddingResponse: + r"""Embeddings + + Embeddings + + :param model: The ID of the model to be used for embedding. + :param inputs: The text content to be embedded, can be a string or an array of strings for fast processing in bulk. + :param metadata: + :param output_dimension: The dimension of the output embeddings when feature available. If not provided, a default output dimension will be used. + :param output_dtype: + :param encoding_format: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.EmbeddingRequest( + model=model, + metadata=metadata, + inputs=inputs, + output_dimension=output_dimension, + output_dtype=output_dtype, + encoding_format=encoding_format, + ) + + req = self._build_request_async( + method="POST", + path="/v1/embeddings", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.EmbeddingRequest + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="embeddings_v1_embeddings_post", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.EmbeddingResponse, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res + ) + raise models.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) diff --git a/src/mistralai/client/files.py b/src/mistralai/client/files.py new file mode 100644 index 00000000..97817eab --- /dev/null +++ b/src/mistralai/client/files.py @@ -0,0 +1,1120 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from .basesdk import BaseSDK +import httpx +from mistralai.client import models, utils +from mistralai.client._hooks import HookContext +from mistralai.client.models import ( + file as models_file, + filepurpose as models_filepurpose, + sampletype as models_sampletype, + source as models_source, +) +from mistralai.client.types import OptionalNullable, UNSET +from mistralai.client.utils import get_security_from_env +from mistralai.client.utils.unmarshal_json_response import unmarshal_json_response +from typing import List, Mapping, Optional, Union + + +class Files(BaseSDK): + r"""Files API""" + + def upload( + self, + *, + file: Union[models_file.File, models_file.FileTypedDict], + purpose: Optional[models_filepurpose.FilePurpose] = None, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.UploadFileOut: + r"""Upload File + + Upload a file that can be used across various endpoints. + + The size of individual files can be a maximum of 512 MB. The Fine-tuning API only supports .jsonl files. + + Please contact us if you need to increase these storage limits. + + :param file: The File object (not file name) to be uploaded. + To upload a file and specify a custom file name you should format your request as such: + ```bash + file=@path/to/your/file.jsonl;filename=custom_name.jsonl + ``` + Otherwise, you can just keep the original file name: + ```bash + file=@path/to/your/file.jsonl + ``` + :param purpose: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.FilesAPIRoutesUploadFileMultiPartBodyParams( + purpose=purpose, + file=utils.get_pydantic_model(file, models.File), + ) + + req = self._build_request( + method="POST", + path="/v1/files", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request, + False, + False, + "multipart", + models.FilesAPIRoutesUploadFileMultiPartBodyParams, + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="files_api_routes_upload_file", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["4XX", "5XX"], + retry_config=retry_config, + ) + + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.UploadFileOut, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + async def upload_async( + self, + *, + file: Union[models_file.File, models_file.FileTypedDict], + purpose: Optional[models_filepurpose.FilePurpose] = None, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.UploadFileOut: + r"""Upload File + + Upload a file that can be used across various endpoints. + + The size of individual files can be a maximum of 512 MB. The Fine-tuning API only supports .jsonl files. + + Please contact us if you need to increase these storage limits. + + :param file: The File object (not file name) to be uploaded. + To upload a file and specify a custom file name you should format your request as such: + ```bash + file=@path/to/your/file.jsonl;filename=custom_name.jsonl + ``` + Otherwise, you can just keep the original file name: + ```bash + file=@path/to/your/file.jsonl + ``` + :param purpose: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.FilesAPIRoutesUploadFileMultiPartBodyParams( + purpose=purpose, + file=utils.get_pydantic_model(file, models.File), + ) + + req = self._build_request_async( + method="POST", + path="/v1/files", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request, + False, + False, + "multipart", + models.FilesAPIRoutesUploadFileMultiPartBodyParams, + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="files_api_routes_upload_file", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["4XX", "5XX"], + retry_config=retry_config, + ) + + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.UploadFileOut, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + def list( + self, + *, + page: Optional[int] = 0, + page_size: Optional[int] = 100, + include_total: Optional[bool] = True, + sample_type: OptionalNullable[List[models_sampletype.SampleType]] = UNSET, + source: OptionalNullable[List[models_source.Source]] = UNSET, + search: OptionalNullable[str] = UNSET, + purpose: OptionalNullable[models_filepurpose.FilePurpose] = UNSET, + mimetypes: OptionalNullable[List[str]] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.ListFilesOut: + r"""List Files + + Returns a list of files that belong to the user's organization. + + :param page: + :param page_size: + :param include_total: + :param sample_type: + :param source: + :param search: + :param purpose: + :param mimetypes: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.FilesAPIRoutesListFilesRequest( + page=page, + page_size=page_size, + include_total=include_total, + sample_type=sample_type, + source=source, + search=search, + purpose=purpose, + mimetypes=mimetypes, + ) + + req = self._build_request( + method="GET", + path="/v1/files", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="files_api_routes_list_files", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["4XX", "5XX"], + retry_config=retry_config, + ) + + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.ListFilesOut, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + async def list_async( + self, + *, + page: Optional[int] = 0, + page_size: Optional[int] = 100, + include_total: Optional[bool] = True, + sample_type: OptionalNullable[List[models_sampletype.SampleType]] = UNSET, + source: OptionalNullable[List[models_source.Source]] = UNSET, + search: OptionalNullable[str] = UNSET, + purpose: OptionalNullable[models_filepurpose.FilePurpose] = UNSET, + mimetypes: OptionalNullable[List[str]] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.ListFilesOut: + r"""List Files + + Returns a list of files that belong to the user's organization. + + :param page: + :param page_size: + :param include_total: + :param sample_type: + :param source: + :param search: + :param purpose: + :param mimetypes: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.FilesAPIRoutesListFilesRequest( + page=page, + page_size=page_size, + include_total=include_total, + sample_type=sample_type, + source=source, + search=search, + purpose=purpose, + mimetypes=mimetypes, + ) + + req = self._build_request_async( + method="GET", + path="/v1/files", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="files_api_routes_list_files", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["4XX", "5XX"], + retry_config=retry_config, + ) + + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.ListFilesOut, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + def retrieve( + self, + *, + file_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.RetrieveFileOut: + r"""Retrieve File + + Returns information about a specific file. + + :param file_id: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.FilesAPIRoutesRetrieveFileRequest( + file_id=file_id, + ) + + req = self._build_request( + method="GET", + path="/v1/files/{file_id}", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="files_api_routes_retrieve_file", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["4XX", "5XX"], + retry_config=retry_config, + ) + + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.RetrieveFileOut, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + async def retrieve_async( + self, + *, + file_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.RetrieveFileOut: + r"""Retrieve File + + Returns information about a specific file. + + :param file_id: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.FilesAPIRoutesRetrieveFileRequest( + file_id=file_id, + ) + + req = self._build_request_async( + method="GET", + path="/v1/files/{file_id}", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="files_api_routes_retrieve_file", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["4XX", "5XX"], + retry_config=retry_config, + ) + + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.RetrieveFileOut, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + def delete( + self, + *, + file_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.DeleteFileOut: + r"""Delete File + + Delete a file. + + :param file_id: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.FilesAPIRoutesDeleteFileRequest( + file_id=file_id, + ) + + req = self._build_request( + method="DELETE", + path="/v1/files/{file_id}", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="files_api_routes_delete_file", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["4XX", "5XX"], + retry_config=retry_config, + ) + + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.DeleteFileOut, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + async def delete_async( + self, + *, + file_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.DeleteFileOut: + r"""Delete File + + Delete a file. + + :param file_id: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.FilesAPIRoutesDeleteFileRequest( + file_id=file_id, + ) + + req = self._build_request_async( + method="DELETE", + path="/v1/files/{file_id}", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="files_api_routes_delete_file", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["4XX", "5XX"], + retry_config=retry_config, + ) + + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.DeleteFileOut, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + def download( + self, + *, + file_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> httpx.Response: + r"""Download File + + Download a file + + :param file_id: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.FilesAPIRoutesDownloadFileRequest( + file_id=file_id, + ) + + req = self._build_request( + method="GET", + path="/v1/files/{file_id}/content", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/octet-stream", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="files_api_routes_download_file", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["4XX", "5XX"], + stream=True, + retry_config=retry_config, + ) + + if utils.match_response(http_res, "200", "application/octet-stream"): + return http_res + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("Unexpected response received", http_res, http_res_text) + + async def download_async( + self, + *, + file_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> httpx.Response: + r"""Download File + + Download a file + + :param file_id: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.FilesAPIRoutesDownloadFileRequest( + file_id=file_id, + ) + + req = self._build_request_async( + method="GET", + path="/v1/files/{file_id}/content", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/octet-stream", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="files_api_routes_download_file", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["4XX", "5XX"], + stream=True, + retry_config=retry_config, + ) + + if utils.match_response(http_res, "200", "application/octet-stream"): + return http_res + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("Unexpected response received", http_res, http_res_text) + + def get_signed_url( + self, + *, + file_id: str, + expiry: Optional[int] = 24, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.FileSignedURL: + r"""Get Signed Url + + :param file_id: + :param expiry: Number of hours before the url becomes invalid. Defaults to 24h + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.FilesAPIRoutesGetSignedURLRequest( + file_id=file_id, + expiry=expiry, + ) + + req = self._build_request( + method="GET", + path="/v1/files/{file_id}/url", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="files_api_routes_get_signed_url", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["4XX", "5XX"], + retry_config=retry_config, + ) + + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.FileSignedURL, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + async def get_signed_url_async( + self, + *, + file_id: str, + expiry: Optional[int] = 24, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.FileSignedURL: + r"""Get Signed Url + + :param file_id: + :param expiry: Number of hours before the url becomes invalid. Defaults to 24h + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.FilesAPIRoutesGetSignedURLRequest( + file_id=file_id, + expiry=expiry, + ) + + req = self._build_request_async( + method="GET", + path="/v1/files/{file_id}/url", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="files_api_routes_get_signed_url", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["4XX", "5XX"], + retry_config=retry_config, + ) + + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.FileSignedURL, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) diff --git a/src/mistralai/client/fim.py b/src/mistralai/client/fim.py new file mode 100644 index 00000000..4a834fe9 --- /dev/null +++ b/src/mistralai/client/fim.py @@ -0,0 +1,545 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from .basesdk import BaseSDK +from mistralai.client import models, utils +from mistralai.client._hooks import HookContext +from mistralai.client.models import ( + fimcompletionrequest as models_fimcompletionrequest, + fimcompletionstreamrequest as models_fimcompletionstreamrequest, +) +from mistralai.client.types import OptionalNullable, UNSET +from mistralai.client.utils import eventstreaming, get_security_from_env +from mistralai.client.utils.unmarshal_json_response import unmarshal_json_response +from typing import Any, Dict, Mapping, Optional, Union + + +class Fim(BaseSDK): + r"""Fill-in-the-middle API.""" + + def complete( + self, + *, + model: str, + prompt: str, + temperature: OptionalNullable[float] = UNSET, + top_p: Optional[float] = 1, + max_tokens: OptionalNullable[int] = UNSET, + stream: Optional[bool] = False, + stop: Optional[ + Union[ + models_fimcompletionrequest.FIMCompletionRequestStop, + models_fimcompletionrequest.FIMCompletionRequestStopTypedDict, + ] + ] = None, + random_seed: OptionalNullable[int] = UNSET, + metadata: OptionalNullable[Dict[str, Any]] = UNSET, + suffix: OptionalNullable[str] = UNSET, + min_tokens: OptionalNullable[int] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.FIMCompletionResponse: + r"""Fim Completion + + FIM completion. + + :param model: ID of the model with FIM to use. + :param prompt: The text/code to complete. + :param temperature: What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. + :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. + :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. + :param stream: Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. + :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array + :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. + :param metadata: + :param suffix: Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`. + :param min_tokens: The minimum number of tokens to generate in the completion. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.FIMCompletionRequest( + model=model, + temperature=temperature, + top_p=top_p, + max_tokens=max_tokens, + stream=stream, + stop=stop, + random_seed=random_seed, + metadata=metadata, + prompt=prompt, + suffix=suffix, + min_tokens=min_tokens, + ) + + req = self._build_request( + method="POST", + path="/v1/fim/completions", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.FIMCompletionRequest + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="fim_completion_v1_fim_completions_post", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.FIMCompletionResponse, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res + ) + raise models.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + async def complete_async( + self, + *, + model: str, + prompt: str, + temperature: OptionalNullable[float] = UNSET, + top_p: Optional[float] = 1, + max_tokens: OptionalNullable[int] = UNSET, + stream: Optional[bool] = False, + stop: Optional[ + Union[ + models_fimcompletionrequest.FIMCompletionRequestStop, + models_fimcompletionrequest.FIMCompletionRequestStopTypedDict, + ] + ] = None, + random_seed: OptionalNullable[int] = UNSET, + metadata: OptionalNullable[Dict[str, Any]] = UNSET, + suffix: OptionalNullable[str] = UNSET, + min_tokens: OptionalNullable[int] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.FIMCompletionResponse: + r"""Fim Completion + + FIM completion. + + :param model: ID of the model with FIM to use. + :param prompt: The text/code to complete. + :param temperature: What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. + :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. + :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. + :param stream: Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. + :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array + :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. + :param metadata: + :param suffix: Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`. + :param min_tokens: The minimum number of tokens to generate in the completion. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.FIMCompletionRequest( + model=model, + temperature=temperature, + top_p=top_p, + max_tokens=max_tokens, + stream=stream, + stop=stop, + random_seed=random_seed, + metadata=metadata, + prompt=prompt, + suffix=suffix, + min_tokens=min_tokens, + ) + + req = self._build_request_async( + method="POST", + path="/v1/fim/completions", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.FIMCompletionRequest + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="fim_completion_v1_fim_completions_post", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.FIMCompletionResponse, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res + ) + raise models.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + def stream( + self, + *, + model: str, + prompt: str, + temperature: OptionalNullable[float] = UNSET, + top_p: Optional[float] = 1, + max_tokens: OptionalNullable[int] = UNSET, + stream: Optional[bool] = True, + stop: Optional[ + Union[ + models_fimcompletionstreamrequest.FIMCompletionStreamRequestStop, + models_fimcompletionstreamrequest.FIMCompletionStreamRequestStopTypedDict, + ] + ] = None, + random_seed: OptionalNullable[int] = UNSET, + metadata: OptionalNullable[Dict[str, Any]] = UNSET, + suffix: OptionalNullable[str] = UNSET, + min_tokens: OptionalNullable[int] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> eventstreaming.EventStream[models.CompletionEvent]: + r"""Stream fim completion + + Mistral AI provides the ability to stream responses back to a client in order to allow partial results for certain requests. Tokens will be sent as data-only server-sent events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. + + :param model: ID of the model with FIM to use. + :param prompt: The text/code to complete. + :param temperature: What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. + :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. + :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. + :param stream: + :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array + :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. + :param metadata: + :param suffix: Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`. + :param min_tokens: The minimum number of tokens to generate in the completion. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.FIMCompletionStreamRequest( + model=model, + temperature=temperature, + top_p=top_p, + max_tokens=max_tokens, + stream=stream, + stop=stop, + random_seed=random_seed, + metadata=metadata, + prompt=prompt, + suffix=suffix, + min_tokens=min_tokens, + ) + + req = self._build_request( + method="POST", + path="/v1/fim/completions#stream", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="text/event-stream", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.FIMCompletionStreamRequest + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="stream_fim", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + stream=True, + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "text/event-stream"): + return eventstreaming.EventStream( + http_res, + lambda raw: utils.unmarshal_json(raw, models.CompletionEvent), + sentinel="[DONE]", + client_ref=self, + ) + if utils.match_response(http_res, "422", "application/json"): + http_res_text = utils.stream_to_text(http_res) + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res, http_res_text + ) + raise models.HTTPValidationError(response_data, http_res, http_res_text) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("Unexpected response received", http_res, http_res_text) + + async def stream_async( + self, + *, + model: str, + prompt: str, + temperature: OptionalNullable[float] = UNSET, + top_p: Optional[float] = 1, + max_tokens: OptionalNullable[int] = UNSET, + stream: Optional[bool] = True, + stop: Optional[ + Union[ + models_fimcompletionstreamrequest.FIMCompletionStreamRequestStop, + models_fimcompletionstreamrequest.FIMCompletionStreamRequestStopTypedDict, + ] + ] = None, + random_seed: OptionalNullable[int] = UNSET, + metadata: OptionalNullable[Dict[str, Any]] = UNSET, + suffix: OptionalNullable[str] = UNSET, + min_tokens: OptionalNullable[int] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> eventstreaming.EventStreamAsync[models.CompletionEvent]: + r"""Stream fim completion + + Mistral AI provides the ability to stream responses back to a client in order to allow partial results for certain requests. Tokens will be sent as data-only server-sent events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. + + :param model: ID of the model with FIM to use. + :param prompt: The text/code to complete. + :param temperature: What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. + :param top_p: Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. + :param max_tokens: The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. + :param stream: + :param stop: Stop generation if this token is detected. Or if one of these tokens is detected when providing an array + :param random_seed: The seed to use for random sampling. If set, different calls will generate deterministic results. + :param metadata: + :param suffix: Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`. + :param min_tokens: The minimum number of tokens to generate in the completion. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.FIMCompletionStreamRequest( + model=model, + temperature=temperature, + top_p=top_p, + max_tokens=max_tokens, + stream=stream, + stop=stop, + random_seed=random_seed, + metadata=metadata, + prompt=prompt, + suffix=suffix, + min_tokens=min_tokens, + ) + + req = self._build_request_async( + method="POST", + path="/v1/fim/completions#stream", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="text/event-stream", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.FIMCompletionStreamRequest + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="stream_fim", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + stream=True, + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "text/event-stream"): + return eventstreaming.EventStreamAsync( + http_res, + lambda raw: utils.unmarshal_json(raw, models.CompletionEvent), + sentinel="[DONE]", + client_ref=self, + ) + if utils.match_response(http_res, "422", "application/json"): + http_res_text = await utils.stream_to_text_async(http_res) + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res, http_res_text + ) + raise models.HTTPValidationError(response_data, http_res, http_res_text) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("Unexpected response received", http_res, http_res_text) diff --git a/src/mistralai/client/fine_tuning.py b/src/mistralai/client/fine_tuning.py new file mode 100644 index 00000000..c57425fd --- /dev/null +++ b/src/mistralai/client/fine_tuning.py @@ -0,0 +1,20 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from .basesdk import BaseSDK +from .sdkconfiguration import SDKConfiguration +from mistralai.client.jobs import Jobs +from typing import Optional + + +class FineTuning(BaseSDK): + jobs: Jobs + + def __init__( + self, sdk_config: SDKConfiguration, parent_ref: Optional[object] = None + ) -> None: + BaseSDK.__init__(self, sdk_config, parent_ref=parent_ref) + self.sdk_configuration = sdk_config + self._init_sdks() + + def _init_sdks(self): + self.jobs = Jobs(self.sdk_configuration, parent_ref=self.parent_ref) diff --git a/src/mistralai/client/httpclient.py b/src/mistralai/client/httpclient.py new file mode 100644 index 00000000..89560b56 --- /dev/null +++ b/src/mistralai/client/httpclient.py @@ -0,0 +1,125 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +# pyright: reportReturnType = false +import asyncio +from typing_extensions import Protocol, runtime_checkable +import httpx +from typing import Any, Optional, Union + + +@runtime_checkable +class HttpClient(Protocol): + def send( + self, + request: httpx.Request, + *, + stream: bool = False, + auth: Union[ + httpx._types.AuthTypes, httpx._client.UseClientDefault, None + ] = httpx.USE_CLIENT_DEFAULT, + follow_redirects: Union[ + bool, httpx._client.UseClientDefault + ] = httpx.USE_CLIENT_DEFAULT, + ) -> httpx.Response: + pass + + def build_request( + self, + method: str, + url: httpx._types.URLTypes, + *, + content: Optional[httpx._types.RequestContent] = None, + data: Optional[httpx._types.RequestData] = None, + files: Optional[httpx._types.RequestFiles] = None, + json: Optional[Any] = None, + params: Optional[httpx._types.QueryParamTypes] = None, + headers: Optional[httpx._types.HeaderTypes] = None, + cookies: Optional[httpx._types.CookieTypes] = None, + timeout: Union[ + httpx._types.TimeoutTypes, httpx._client.UseClientDefault + ] = httpx.USE_CLIENT_DEFAULT, + extensions: Optional[httpx._types.RequestExtensions] = None, + ) -> httpx.Request: + pass + + def close(self) -> None: + pass + + +@runtime_checkable +class AsyncHttpClient(Protocol): + async def send( + self, + request: httpx.Request, + *, + stream: bool = False, + auth: Union[ + httpx._types.AuthTypes, httpx._client.UseClientDefault, None + ] = httpx.USE_CLIENT_DEFAULT, + follow_redirects: Union[ + bool, httpx._client.UseClientDefault + ] = httpx.USE_CLIENT_DEFAULT, + ) -> httpx.Response: + pass + + def build_request( + self, + method: str, + url: httpx._types.URLTypes, + *, + content: Optional[httpx._types.RequestContent] = None, + data: Optional[httpx._types.RequestData] = None, + files: Optional[httpx._types.RequestFiles] = None, + json: Optional[Any] = None, + params: Optional[httpx._types.QueryParamTypes] = None, + headers: Optional[httpx._types.HeaderTypes] = None, + cookies: Optional[httpx._types.CookieTypes] = None, + timeout: Union[ + httpx._types.TimeoutTypes, httpx._client.UseClientDefault + ] = httpx.USE_CLIENT_DEFAULT, + extensions: Optional[httpx._types.RequestExtensions] = None, + ) -> httpx.Request: + pass + + async def aclose(self) -> None: + pass + + +class ClientOwner(Protocol): + client: Union[HttpClient, None] + async_client: Union[AsyncHttpClient, None] + + +def close_clients( + owner: ClientOwner, + sync_client: Union[HttpClient, None], + sync_client_supplied: bool, + async_client: Union[AsyncHttpClient, None], + async_client_supplied: bool, +) -> None: + """ + A finalizer function that is meant to be used with weakref.finalize to close + httpx clients used by an SDK so that underlying resources can be garbage + collected. + """ + + # Unset the client/async_client properties so there are no more references + # to them from the owning SDK instance and they can be reaped. + owner.client = None + owner.async_client = None + if sync_client is not None and not sync_client_supplied: + try: + sync_client.close() + except Exception: + pass + + if async_client is not None and not async_client_supplied: + try: + loop = asyncio.get_running_loop() + asyncio.run_coroutine_threadsafe(async_client.aclose(), loop) + except RuntimeError: + try: + asyncio.run(async_client.aclose()) + except RuntimeError: + # best effort + pass diff --git a/src/mistralai/client/jobs.py b/src/mistralai/client/jobs.py new file mode 100644 index 00000000..848926ea --- /dev/null +++ b/src/mistralai/client/jobs.py @@ -0,0 +1,1067 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from .basesdk import BaseSDK +from datetime import datetime +from mistralai.client import models, utils +from mistralai.client._hooks import HookContext +from mistralai.client.models import ( + classifiertargetin as models_classifiertargetin, + finetuneablemodeltype as models_finetuneablemodeltype, + jobin as models_jobin, + jobs_api_routes_fine_tuning_get_fine_tuning_jobsop as models_jobs_api_routes_fine_tuning_get_fine_tuning_jobsop, + trainingfile as models_trainingfile, +) +from mistralai.client.types import OptionalNullable, UNSET +from mistralai.client.utils import get_security_from_env +from mistralai.client.utils.unmarshal_json_response import unmarshal_json_response +from typing import List, Mapping, Optional, Union + + +class Jobs(BaseSDK): + def list( + self, + *, + page: Optional[int] = 0, + page_size: Optional[int] = 100, + model: OptionalNullable[str] = UNSET, + created_after: OptionalNullable[datetime] = UNSET, + created_before: OptionalNullable[datetime] = UNSET, + created_by_me: Optional[bool] = False, + status: OptionalNullable[ + models_jobs_api_routes_fine_tuning_get_fine_tuning_jobsop.QueryParamStatus + ] = UNSET, + wandb_project: OptionalNullable[str] = UNSET, + wandb_name: OptionalNullable[str] = UNSET, + suffix: OptionalNullable[str] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.JobsOut: + r"""Get Fine Tuning Jobs + + Get a list of fine-tuning jobs for your organization and user. + + :param page: The page number of the results to be returned. + :param page_size: The number of items to return per page. + :param model: The model name used for fine-tuning to filter on. When set, the other results are not displayed. + :param created_after: The date/time to filter on. When set, the results for previous creation times are not displayed. + :param created_before: + :param created_by_me: When set, only return results for jobs created by the API caller. Other results are not displayed. + :param status: The current job state to filter on. When set, the other results are not displayed. + :param wandb_project: The Weights and Biases project to filter on. When set, the other results are not displayed. + :param wandb_name: The Weight and Biases run name to filter on. When set, the other results are not displayed. + :param suffix: The model suffix to filter on. When set, the other results are not displayed. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.JobsAPIRoutesFineTuningGetFineTuningJobsRequest( + page=page, + page_size=page_size, + model=model, + created_after=created_after, + created_before=created_before, + created_by_me=created_by_me, + status=status, + wandb_project=wandb_project, + wandb_name=wandb_name, + suffix=suffix, + ) + + req = self._build_request( + method="GET", + path="/v1/fine_tuning/jobs", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="jobs_api_routes_fine_tuning_get_fine_tuning_jobs", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["4XX", "5XX"], + retry_config=retry_config, + ) + + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.JobsOut, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + async def list_async( + self, + *, + page: Optional[int] = 0, + page_size: Optional[int] = 100, + model: OptionalNullable[str] = UNSET, + created_after: OptionalNullable[datetime] = UNSET, + created_before: OptionalNullable[datetime] = UNSET, + created_by_me: Optional[bool] = False, + status: OptionalNullable[ + models_jobs_api_routes_fine_tuning_get_fine_tuning_jobsop.QueryParamStatus + ] = UNSET, + wandb_project: OptionalNullable[str] = UNSET, + wandb_name: OptionalNullable[str] = UNSET, + suffix: OptionalNullable[str] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.JobsOut: + r"""Get Fine Tuning Jobs + + Get a list of fine-tuning jobs for your organization and user. + + :param page: The page number of the results to be returned. + :param page_size: The number of items to return per page. + :param model: The model name used for fine-tuning to filter on. When set, the other results are not displayed. + :param created_after: The date/time to filter on. When set, the results for previous creation times are not displayed. + :param created_before: + :param created_by_me: When set, only return results for jobs created by the API caller. Other results are not displayed. + :param status: The current job state to filter on. When set, the other results are not displayed. + :param wandb_project: The Weights and Biases project to filter on. When set, the other results are not displayed. + :param wandb_name: The Weight and Biases run name to filter on. When set, the other results are not displayed. + :param suffix: The model suffix to filter on. When set, the other results are not displayed. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.JobsAPIRoutesFineTuningGetFineTuningJobsRequest( + page=page, + page_size=page_size, + model=model, + created_after=created_after, + created_before=created_before, + created_by_me=created_by_me, + status=status, + wandb_project=wandb_project, + wandb_name=wandb_name, + suffix=suffix, + ) + + req = self._build_request_async( + method="GET", + path="/v1/fine_tuning/jobs", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="jobs_api_routes_fine_tuning_get_fine_tuning_jobs", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["4XX", "5XX"], + retry_config=retry_config, + ) + + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.JobsOut, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + def create( + self, + *, + model: str, + hyperparameters: Union[ + models_jobin.Hyperparameters, models_jobin.HyperparametersTypedDict + ], + training_files: Optional[ + Union[ + List[models_trainingfile.TrainingFile], + List[models_trainingfile.TrainingFileTypedDict], + ] + ] = None, + validation_files: OptionalNullable[List[str]] = UNSET, + suffix: OptionalNullable[str] = UNSET, + integrations: OptionalNullable[ + Union[ + List[models_jobin.JobInIntegrations], + List[models_jobin.JobInIntegrationsTypedDict], + ] + ] = UNSET, + auto_start: Optional[bool] = None, + invalid_sample_skip_percentage: Optional[float] = 0, + job_type: OptionalNullable[ + models_finetuneablemodeltype.FineTuneableModelType + ] = UNSET, + repositories: OptionalNullable[ + Union[ + List[models_jobin.JobInRepositories], + List[models_jobin.JobInRepositoriesTypedDict], + ] + ] = UNSET, + classifier_targets: OptionalNullable[ + Union[ + List[models_classifiertargetin.ClassifierTargetIn], + List[models_classifiertargetin.ClassifierTargetInTypedDict], + ] + ] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.JobsAPIRoutesFineTuningCreateFineTuningJobResponse: + r"""Create Fine Tuning Job + + Create a new fine-tuning job, it will be queued for processing. + + :param model: The name of the model to fine-tune. + :param hyperparameters: + :param training_files: + :param validation_files: A list containing the IDs of uploaded files that contain validation data. If you provide these files, the data is used to generate validation metrics periodically during fine-tuning. These metrics can be viewed in `checkpoints` when getting the status of a running fine-tuning job. The same data should not be present in both train and validation files. + :param suffix: A string that will be added to your fine-tuning model name. For example, a suffix of \"my-great-model\" would produce a model name like `ft:open-mistral-7b:my-great-model:xxx...` + :param integrations: A list of integrations to enable for your fine-tuning job. + :param auto_start: This field will be required in a future release. + :param invalid_sample_skip_percentage: + :param job_type: + :param repositories: + :param classifier_targets: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.JobIn( + model=model, + training_files=utils.get_pydantic_model( + training_files, Optional[List[models.TrainingFile]] + ), + validation_files=validation_files, + suffix=suffix, + integrations=utils.get_pydantic_model( + integrations, OptionalNullable[List[models.JobInIntegrations]] + ), + auto_start=auto_start, + invalid_sample_skip_percentage=invalid_sample_skip_percentage, + job_type=job_type, + hyperparameters=utils.get_pydantic_model( + hyperparameters, models.Hyperparameters + ), + repositories=utils.get_pydantic_model( + repositories, OptionalNullable[List[models.JobInRepositories]] + ), + classifier_targets=utils.get_pydantic_model( + classifier_targets, OptionalNullable[List[models.ClassifierTargetIn]] + ), + ) + + req = self._build_request( + method="POST", + path="/v1/fine_tuning/jobs", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.JobIn + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="jobs_api_routes_fine_tuning_create_fine_tuning_job", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["4XX", "5XX"], + retry_config=retry_config, + ) + + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response( + models.JobsAPIRoutesFineTuningCreateFineTuningJobResponse, http_res + ) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + async def create_async( + self, + *, + model: str, + hyperparameters: Union[ + models_jobin.Hyperparameters, models_jobin.HyperparametersTypedDict + ], + training_files: Optional[ + Union[ + List[models_trainingfile.TrainingFile], + List[models_trainingfile.TrainingFileTypedDict], + ] + ] = None, + validation_files: OptionalNullable[List[str]] = UNSET, + suffix: OptionalNullable[str] = UNSET, + integrations: OptionalNullable[ + Union[ + List[models_jobin.JobInIntegrations], + List[models_jobin.JobInIntegrationsTypedDict], + ] + ] = UNSET, + auto_start: Optional[bool] = None, + invalid_sample_skip_percentage: Optional[float] = 0, + job_type: OptionalNullable[ + models_finetuneablemodeltype.FineTuneableModelType + ] = UNSET, + repositories: OptionalNullable[ + Union[ + List[models_jobin.JobInRepositories], + List[models_jobin.JobInRepositoriesTypedDict], + ] + ] = UNSET, + classifier_targets: OptionalNullable[ + Union[ + List[models_classifiertargetin.ClassifierTargetIn], + List[models_classifiertargetin.ClassifierTargetInTypedDict], + ] + ] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.JobsAPIRoutesFineTuningCreateFineTuningJobResponse: + r"""Create Fine Tuning Job + + Create a new fine-tuning job, it will be queued for processing. + + :param model: The name of the model to fine-tune. + :param hyperparameters: + :param training_files: + :param validation_files: A list containing the IDs of uploaded files that contain validation data. If you provide these files, the data is used to generate validation metrics periodically during fine-tuning. These metrics can be viewed in `checkpoints` when getting the status of a running fine-tuning job. The same data should not be present in both train and validation files. + :param suffix: A string that will be added to your fine-tuning model name. For example, a suffix of \"my-great-model\" would produce a model name like `ft:open-mistral-7b:my-great-model:xxx...` + :param integrations: A list of integrations to enable for your fine-tuning job. + :param auto_start: This field will be required in a future release. + :param invalid_sample_skip_percentage: + :param job_type: + :param repositories: + :param classifier_targets: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.JobIn( + model=model, + training_files=utils.get_pydantic_model( + training_files, Optional[List[models.TrainingFile]] + ), + validation_files=validation_files, + suffix=suffix, + integrations=utils.get_pydantic_model( + integrations, OptionalNullable[List[models.JobInIntegrations]] + ), + auto_start=auto_start, + invalid_sample_skip_percentage=invalid_sample_skip_percentage, + job_type=job_type, + hyperparameters=utils.get_pydantic_model( + hyperparameters, models.Hyperparameters + ), + repositories=utils.get_pydantic_model( + repositories, OptionalNullable[List[models.JobInRepositories]] + ), + classifier_targets=utils.get_pydantic_model( + classifier_targets, OptionalNullable[List[models.ClassifierTargetIn]] + ), + ) + + req = self._build_request_async( + method="POST", + path="/v1/fine_tuning/jobs", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.JobIn + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="jobs_api_routes_fine_tuning_create_fine_tuning_job", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["4XX", "5XX"], + retry_config=retry_config, + ) + + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response( + models.JobsAPIRoutesFineTuningCreateFineTuningJobResponse, http_res + ) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + def get( + self, + *, + job_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.JobsAPIRoutesFineTuningGetFineTuningJobResponse: + r"""Get Fine Tuning Job + + Get a fine-tuned job details by its UUID. + + :param job_id: The ID of the job to analyse. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.JobsAPIRoutesFineTuningGetFineTuningJobRequest( + job_id=job_id, + ) + + req = self._build_request( + method="GET", + path="/v1/fine_tuning/jobs/{job_id}", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="jobs_api_routes_fine_tuning_get_fine_tuning_job", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["4XX", "5XX"], + retry_config=retry_config, + ) + + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response( + models.JobsAPIRoutesFineTuningGetFineTuningJobResponse, http_res + ) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + async def get_async( + self, + *, + job_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.JobsAPIRoutesFineTuningGetFineTuningJobResponse: + r"""Get Fine Tuning Job + + Get a fine-tuned job details by its UUID. + + :param job_id: The ID of the job to analyse. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.JobsAPIRoutesFineTuningGetFineTuningJobRequest( + job_id=job_id, + ) + + req = self._build_request_async( + method="GET", + path="/v1/fine_tuning/jobs/{job_id}", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="jobs_api_routes_fine_tuning_get_fine_tuning_job", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["4XX", "5XX"], + retry_config=retry_config, + ) + + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response( + models.JobsAPIRoutesFineTuningGetFineTuningJobResponse, http_res + ) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + def cancel( + self, + *, + job_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.JobsAPIRoutesFineTuningCancelFineTuningJobResponse: + r"""Cancel Fine Tuning Job + + Request the cancellation of a fine tuning job. + + :param job_id: The ID of the job to cancel. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.JobsAPIRoutesFineTuningCancelFineTuningJobRequest( + job_id=job_id, + ) + + req = self._build_request( + method="POST", + path="/v1/fine_tuning/jobs/{job_id}/cancel", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="jobs_api_routes_fine_tuning_cancel_fine_tuning_job", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["4XX", "5XX"], + retry_config=retry_config, + ) + + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response( + models.JobsAPIRoutesFineTuningCancelFineTuningJobResponse, http_res + ) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + async def cancel_async( + self, + *, + job_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.JobsAPIRoutesFineTuningCancelFineTuningJobResponse: + r"""Cancel Fine Tuning Job + + Request the cancellation of a fine tuning job. + + :param job_id: The ID of the job to cancel. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.JobsAPIRoutesFineTuningCancelFineTuningJobRequest( + job_id=job_id, + ) + + req = self._build_request_async( + method="POST", + path="/v1/fine_tuning/jobs/{job_id}/cancel", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="jobs_api_routes_fine_tuning_cancel_fine_tuning_job", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["4XX", "5XX"], + retry_config=retry_config, + ) + + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response( + models.JobsAPIRoutesFineTuningCancelFineTuningJobResponse, http_res + ) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + def start( + self, + *, + job_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.JobsAPIRoutesFineTuningStartFineTuningJobResponse: + r"""Start Fine Tuning Job + + Request the start of a validated fine tuning job. + + :param job_id: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.JobsAPIRoutesFineTuningStartFineTuningJobRequest( + job_id=job_id, + ) + + req = self._build_request( + method="POST", + path="/v1/fine_tuning/jobs/{job_id}/start", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="jobs_api_routes_fine_tuning_start_fine_tuning_job", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["4XX", "5XX"], + retry_config=retry_config, + ) + + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response( + models.JobsAPIRoutesFineTuningStartFineTuningJobResponse, http_res + ) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + async def start_async( + self, + *, + job_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.JobsAPIRoutesFineTuningStartFineTuningJobResponse: + r"""Start Fine Tuning Job + + Request the start of a validated fine tuning job. + + :param job_id: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.JobsAPIRoutesFineTuningStartFineTuningJobRequest( + job_id=job_id, + ) + + req = self._build_request_async( + method="POST", + path="/v1/fine_tuning/jobs/{job_id}/start", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="jobs_api_routes_fine_tuning_start_fine_tuning_job", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["4XX", "5XX"], + retry_config=retry_config, + ) + + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response( + models.JobsAPIRoutesFineTuningStartFineTuningJobResponse, http_res + ) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) diff --git a/src/mistralai/client/libraries.py b/src/mistralai/client/libraries.py new file mode 100644 index 00000000..03a54741 --- /dev/null +++ b/src/mistralai/client/libraries.py @@ -0,0 +1,946 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from .basesdk import BaseSDK +from .sdkconfiguration import SDKConfiguration +from mistralai.client import models, utils +from mistralai.client._hooks import HookContext +from mistralai.client.accesses import Accesses +from mistralai.client.documents import Documents +from mistralai.client.types import OptionalNullable, UNSET +from mistralai.client.utils import get_security_from_env +from mistralai.client.utils.unmarshal_json_response import unmarshal_json_response +from typing import Any, Mapping, Optional + + +class Libraries(BaseSDK): + r"""(beta) Libraries API to create and manage libraries - index your documents to enhance agent capabilities.""" + + documents: Documents + r"""(beta) Libraries API - manage documents in a library.""" + accesses: Accesses + r"""(beta) Libraries API - manage access to a library.""" + + def __init__( + self, sdk_config: SDKConfiguration, parent_ref: Optional[object] = None + ) -> None: + BaseSDK.__init__(self, sdk_config, parent_ref=parent_ref) + self.sdk_configuration = sdk_config + self._init_sdks() + + def _init_sdks(self): + self.documents = Documents(self.sdk_configuration, parent_ref=self.parent_ref) + self.accesses = Accesses(self.sdk_configuration, parent_ref=self.parent_ref) + + def list( + self, + *, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.ListLibraryOut: + r"""List all libraries you have access to. + + List all libraries that you have created or have been shared with you. + + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + req = self._build_request( + method="GET", + path="/v1/libraries", + base_url=base_url, + url_variables=url_variables, + request=None, + request_body_required=False, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="libraries_list_v1", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["4XX", "5XX"], + retry_config=retry_config, + ) + + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.ListLibraryOut, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + async def list_async( + self, + *, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.ListLibraryOut: + r"""List all libraries you have access to. + + List all libraries that you have created or have been shared with you. + + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + req = self._build_request_async( + method="GET", + path="/v1/libraries", + base_url=base_url, + url_variables=url_variables, + request=None, + request_body_required=False, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="libraries_list_v1", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["4XX", "5XX"], + retry_config=retry_config, + ) + + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.ListLibraryOut, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + def create( + self, + *, + name: str, + description: OptionalNullable[str] = UNSET, + chunk_size: OptionalNullable[int] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.LibraryOut: + r"""Create a new Library. + + Create a new Library, you will be marked as the owner and only you will have the possibility to share it with others. When first created this will only be accessible by you. + + :param name: + :param description: + :param chunk_size: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.LibraryIn( + name=name, + description=description, + chunk_size=chunk_size, + ) + + req = self._build_request( + method="POST", + path="/v1/libraries", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.LibraryIn + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="libraries_create_v1", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "201", "application/json"): + return unmarshal_json_response(models.LibraryOut, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res + ) + raise models.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + async def create_async( + self, + *, + name: str, + description: OptionalNullable[str] = UNSET, + chunk_size: OptionalNullable[int] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.LibraryOut: + r"""Create a new Library. + + Create a new Library, you will be marked as the owner and only you will have the possibility to share it with others. When first created this will only be accessible by you. + + :param name: + :param description: + :param chunk_size: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.LibraryIn( + name=name, + description=description, + chunk_size=chunk_size, + ) + + req = self._build_request_async( + method="POST", + path="/v1/libraries", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.LibraryIn + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="libraries_create_v1", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "201", "application/json"): + return unmarshal_json_response(models.LibraryOut, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res + ) + raise models.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + def get( + self, + *, + library_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.LibraryOut: + r"""Detailed information about a specific Library. + + Given a library id, details information about that Library. + + :param library_id: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.LibrariesGetV1Request( + library_id=library_id, + ) + + req = self._build_request( + method="GET", + path="/v1/libraries/{library_id}", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="libraries_get_v1", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.LibraryOut, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res + ) + raise models.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + async def get_async( + self, + *, + library_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.LibraryOut: + r"""Detailed information about a specific Library. + + Given a library id, details information about that Library. + + :param library_id: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.LibrariesGetV1Request( + library_id=library_id, + ) + + req = self._build_request_async( + method="GET", + path="/v1/libraries/{library_id}", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="libraries_get_v1", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.LibraryOut, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res + ) + raise models.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + def delete( + self, + *, + library_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.LibraryOut: + r"""Delete a library and all of it's document. + + Given a library id, deletes it together with all documents that have been uploaded to that library. + + :param library_id: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.LibrariesDeleteV1Request( + library_id=library_id, + ) + + req = self._build_request( + method="DELETE", + path="/v1/libraries/{library_id}", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="libraries_delete_v1", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.LibraryOut, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res + ) + raise models.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + async def delete_async( + self, + *, + library_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.LibraryOut: + r"""Delete a library and all of it's document. + + Given a library id, deletes it together with all documents that have been uploaded to that library. + + :param library_id: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.LibrariesDeleteV1Request( + library_id=library_id, + ) + + req = self._build_request_async( + method="DELETE", + path="/v1/libraries/{library_id}", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="libraries_delete_v1", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.LibraryOut, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res + ) + raise models.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + def update( + self, + *, + library_id: str, + name: OptionalNullable[str] = UNSET, + description: OptionalNullable[str] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.LibraryOut: + r"""Update a library. + + Given a library id, you can update the name and description. + + :param library_id: + :param name: + :param description: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.LibrariesUpdateV1Request( + library_id=library_id, + library_in_update=models.LibraryInUpdate( + name=name, + description=description, + ), + ) + + req = self._build_request( + method="PUT", + path="/v1/libraries/{library_id}", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request.library_in_update, False, False, "json", models.LibraryInUpdate + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="libraries_update_v1", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.LibraryOut, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res + ) + raise models.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + async def update_async( + self, + *, + library_id: str, + name: OptionalNullable[str] = UNSET, + description: OptionalNullable[str] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.LibraryOut: + r"""Update a library. + + Given a library id, you can update the name and description. + + :param library_id: + :param name: + :param description: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.LibrariesUpdateV1Request( + library_id=library_id, + library_in_update=models.LibraryInUpdate( + name=name, + description=description, + ), + ) + + req = self._build_request_async( + method="PUT", + path="/v1/libraries/{library_id}", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request.library_in_update, False, False, "json", models.LibraryInUpdate + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="libraries_update_v1", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.LibraryOut, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res + ) + raise models.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) diff --git a/src/mistralai/client/mistral_agents.py b/src/mistralai/client/mistral_agents.py new file mode 100644 index 00000000..2ac7a29e --- /dev/null +++ b/src/mistralai/client/mistral_agents.py @@ -0,0 +1,2080 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from .basesdk import BaseSDK +from mistralai.client import models, utils +from mistralai.client._hooks import HookContext +from mistralai.client.models import ( + agentcreationrequest as models_agentcreationrequest, + agents_api_v1_agents_getop as models_agents_api_v1_agents_getop, + agentupdaterequest as models_agentupdaterequest, + completionargs as models_completionargs, + requestsource as models_requestsource, +) +from mistralai.client.types import OptionalNullable, UNSET +from mistralai.client.utils import get_security_from_env +from mistralai.client.utils.unmarshal_json_response import unmarshal_json_response +from typing import Any, Dict, List, Mapping, Optional, Union + + +class MistralAgents(BaseSDK): + r"""(beta) Agents API""" + + def create( + self, + *, + model: str, + name: str, + instructions: OptionalNullable[str] = UNSET, + tools: Optional[ + Union[ + List[models_agentcreationrequest.AgentCreationRequestTools], + List[models_agentcreationrequest.AgentCreationRequestToolsTypedDict], + ] + ] = None, + completion_args: Optional[ + Union[ + models_completionargs.CompletionArgs, + models_completionargs.CompletionArgsTypedDict, + ] + ] = None, + description: OptionalNullable[str] = UNSET, + handoffs: OptionalNullable[List[str]] = UNSET, + metadata: OptionalNullable[Dict[str, Any]] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.Agent: + r"""Create a agent that can be used within a conversation. + + Create a new agent giving it instructions, tools, description. The agent is then available to be used as a regular assistant in a conversation or as part of an agent pool from which it can be used. + + :param model: + :param name: + :param instructions: Instruction prompt the model will follow during the conversation. + :param tools: List of tools which are available to the model during the conversation. + :param completion_args: White-listed arguments from the completion API + :param description: + :param handoffs: + :param metadata: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AgentCreationRequest( + instructions=instructions, + tools=utils.get_pydantic_model( + tools, Optional[List[models.AgentCreationRequestTools]] + ), + completion_args=utils.get_pydantic_model( + completion_args, Optional[models.CompletionArgs] + ), + model=model, + name=name, + description=description, + handoffs=handoffs, + metadata=metadata, + ) + + req = self._build_request( + method="POST", + path="/v1/agents", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.AgentCreationRequest + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="agents_api_v1_agents_create", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.Agent, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res + ) + raise models.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + async def create_async( + self, + *, + model: str, + name: str, + instructions: OptionalNullable[str] = UNSET, + tools: Optional[ + Union[ + List[models_agentcreationrequest.AgentCreationRequestTools], + List[models_agentcreationrequest.AgentCreationRequestToolsTypedDict], + ] + ] = None, + completion_args: Optional[ + Union[ + models_completionargs.CompletionArgs, + models_completionargs.CompletionArgsTypedDict, + ] + ] = None, + description: OptionalNullable[str] = UNSET, + handoffs: OptionalNullable[List[str]] = UNSET, + metadata: OptionalNullable[Dict[str, Any]] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.Agent: + r"""Create a agent that can be used within a conversation. + + Create a new agent giving it instructions, tools, description. The agent is then available to be used as a regular assistant in a conversation or as part of an agent pool from which it can be used. + + :param model: + :param name: + :param instructions: Instruction prompt the model will follow during the conversation. + :param tools: List of tools which are available to the model during the conversation. + :param completion_args: White-listed arguments from the completion API + :param description: + :param handoffs: + :param metadata: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AgentCreationRequest( + instructions=instructions, + tools=utils.get_pydantic_model( + tools, Optional[List[models.AgentCreationRequestTools]] + ), + completion_args=utils.get_pydantic_model( + completion_args, Optional[models.CompletionArgs] + ), + model=model, + name=name, + description=description, + handoffs=handoffs, + metadata=metadata, + ) + + req = self._build_request_async( + method="POST", + path="/v1/agents", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.AgentCreationRequest + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="agents_api_v1_agents_create", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.Agent, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res + ) + raise models.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + def list( + self, + *, + page: Optional[int] = 0, + page_size: Optional[int] = 20, + deployment_chat: OptionalNullable[bool] = UNSET, + sources: OptionalNullable[List[models_requestsource.RequestSource]] = UNSET, + name: OptionalNullable[str] = UNSET, + id: OptionalNullable[str] = UNSET, + metadata: OptionalNullable[Dict[str, Any]] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> List[models.Agent]: + r"""List agent entities. + + Retrieve a list of agent entities sorted by creation time. + + :param page: Page number (0-indexed) + :param page_size: Number of agents per page + :param deployment_chat: + :param sources: + :param name: + :param id: + :param metadata: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AgentsAPIV1AgentsListRequest( + page=page, + page_size=page_size, + deployment_chat=deployment_chat, + sources=sources, + name=name, + id=id, + metadata=metadata, + ) + + req = self._build_request( + method="GET", + path="/v1/agents", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="agents_api_v1_agents_list", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(List[models.Agent], http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res + ) + raise models.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + async def list_async( + self, + *, + page: Optional[int] = 0, + page_size: Optional[int] = 20, + deployment_chat: OptionalNullable[bool] = UNSET, + sources: OptionalNullable[List[models_requestsource.RequestSource]] = UNSET, + name: OptionalNullable[str] = UNSET, + id: OptionalNullable[str] = UNSET, + metadata: OptionalNullable[Dict[str, Any]] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> List[models.Agent]: + r"""List agent entities. + + Retrieve a list of agent entities sorted by creation time. + + :param page: Page number (0-indexed) + :param page_size: Number of agents per page + :param deployment_chat: + :param sources: + :param name: + :param id: + :param metadata: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AgentsAPIV1AgentsListRequest( + page=page, + page_size=page_size, + deployment_chat=deployment_chat, + sources=sources, + name=name, + id=id, + metadata=metadata, + ) + + req = self._build_request_async( + method="GET", + path="/v1/agents", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="agents_api_v1_agents_list", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(List[models.Agent], http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res + ) + raise models.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + def get( + self, + *, + agent_id: str, + agent_version: OptionalNullable[ + Union[ + models_agents_api_v1_agents_getop.QueryParamAgentVersion, + models_agents_api_v1_agents_getop.QueryParamAgentVersionTypedDict, + ] + ] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.Agent: + r"""Retrieve an agent entity. + + Given an agent, retrieve an agent entity with its attributes. The agent_version parameter can be an integer version number or a string alias. + + :param agent_id: + :param agent_version: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AgentsAPIV1AgentsGetRequest( + agent_id=agent_id, + agent_version=agent_version, + ) + + req = self._build_request( + method="GET", + path="/v1/agents/{agent_id}", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="agents_api_v1_agents_get", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.Agent, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res + ) + raise models.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + async def get_async( + self, + *, + agent_id: str, + agent_version: OptionalNullable[ + Union[ + models_agents_api_v1_agents_getop.QueryParamAgentVersion, + models_agents_api_v1_agents_getop.QueryParamAgentVersionTypedDict, + ] + ] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.Agent: + r"""Retrieve an agent entity. + + Given an agent, retrieve an agent entity with its attributes. The agent_version parameter can be an integer version number or a string alias. + + :param agent_id: + :param agent_version: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AgentsAPIV1AgentsGetRequest( + agent_id=agent_id, + agent_version=agent_version, + ) + + req = self._build_request_async( + method="GET", + path="/v1/agents/{agent_id}", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="agents_api_v1_agents_get", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.Agent, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res + ) + raise models.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + def update( + self, + *, + agent_id: str, + instructions: OptionalNullable[str] = UNSET, + tools: Optional[ + Union[ + List[models_agentupdaterequest.AgentUpdateRequestTools], + List[models_agentupdaterequest.AgentUpdateRequestToolsTypedDict], + ] + ] = None, + completion_args: Optional[ + Union[ + models_completionargs.CompletionArgs, + models_completionargs.CompletionArgsTypedDict, + ] + ] = None, + model: OptionalNullable[str] = UNSET, + name: OptionalNullable[str] = UNSET, + description: OptionalNullable[str] = UNSET, + handoffs: OptionalNullable[List[str]] = UNSET, + deployment_chat: OptionalNullable[bool] = UNSET, + metadata: OptionalNullable[Dict[str, Any]] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.Agent: + r"""Update an agent entity. + + Update an agent attributes and create a new version. + + :param agent_id: + :param instructions: Instruction prompt the model will follow during the conversation. + :param tools: List of tools which are available to the model during the conversation. + :param completion_args: White-listed arguments from the completion API + :param model: + :param name: + :param description: + :param handoffs: + :param deployment_chat: + :param metadata: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AgentsAPIV1AgentsUpdateRequest( + agent_id=agent_id, + agent_update_request=models.AgentUpdateRequest( + instructions=instructions, + tools=utils.get_pydantic_model( + tools, Optional[List[models.AgentUpdateRequestTools]] + ), + completion_args=utils.get_pydantic_model( + completion_args, Optional[models.CompletionArgs] + ), + model=model, + name=name, + description=description, + handoffs=handoffs, + deployment_chat=deployment_chat, + metadata=metadata, + ), + ) + + req = self._build_request( + method="PATCH", + path="/v1/agents/{agent_id}", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request.agent_update_request, + False, + False, + "json", + models.AgentUpdateRequest, + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="agents_api_v1_agents_update", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.Agent, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res + ) + raise models.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + async def update_async( + self, + *, + agent_id: str, + instructions: OptionalNullable[str] = UNSET, + tools: Optional[ + Union[ + List[models_agentupdaterequest.AgentUpdateRequestTools], + List[models_agentupdaterequest.AgentUpdateRequestToolsTypedDict], + ] + ] = None, + completion_args: Optional[ + Union[ + models_completionargs.CompletionArgs, + models_completionargs.CompletionArgsTypedDict, + ] + ] = None, + model: OptionalNullable[str] = UNSET, + name: OptionalNullable[str] = UNSET, + description: OptionalNullable[str] = UNSET, + handoffs: OptionalNullable[List[str]] = UNSET, + deployment_chat: OptionalNullable[bool] = UNSET, + metadata: OptionalNullable[Dict[str, Any]] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.Agent: + r"""Update an agent entity. + + Update an agent attributes and create a new version. + + :param agent_id: + :param instructions: Instruction prompt the model will follow during the conversation. + :param tools: List of tools which are available to the model during the conversation. + :param completion_args: White-listed arguments from the completion API + :param model: + :param name: + :param description: + :param handoffs: + :param deployment_chat: + :param metadata: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AgentsAPIV1AgentsUpdateRequest( + agent_id=agent_id, + agent_update_request=models.AgentUpdateRequest( + instructions=instructions, + tools=utils.get_pydantic_model( + tools, Optional[List[models.AgentUpdateRequestTools]] + ), + completion_args=utils.get_pydantic_model( + completion_args, Optional[models.CompletionArgs] + ), + model=model, + name=name, + description=description, + handoffs=handoffs, + deployment_chat=deployment_chat, + metadata=metadata, + ), + ) + + req = self._build_request_async( + method="PATCH", + path="/v1/agents/{agent_id}", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request.agent_update_request, + False, + False, + "json", + models.AgentUpdateRequest, + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="agents_api_v1_agents_update", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.Agent, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res + ) + raise models.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + def delete( + self, + *, + agent_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ): + r"""Delete an agent entity. + + :param agent_id: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AgentsAPIV1AgentsDeleteRequest( + agent_id=agent_id, + ) + + req = self._build_request( + method="DELETE", + path="/v1/agents/{agent_id}", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="agents_api_v1_agents_delete", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "204", "*"): + return + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res + ) + raise models.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + async def delete_async( + self, + *, + agent_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ): + r"""Delete an agent entity. + + :param agent_id: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AgentsAPIV1AgentsDeleteRequest( + agent_id=agent_id, + ) + + req = self._build_request_async( + method="DELETE", + path="/v1/agents/{agent_id}", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="agents_api_v1_agents_delete", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "204", "*"): + return + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res + ) + raise models.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + def update_version( + self, + *, + agent_id: str, + version: int, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.Agent: + r"""Update an agent version. + + Switch the version of an agent. + + :param agent_id: + :param version: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AgentsAPIV1AgentsUpdateVersionRequest( + agent_id=agent_id, + version=version, + ) + + req = self._build_request( + method="PATCH", + path="/v1/agents/{agent_id}/version", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="agents_api_v1_agents_update_version", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.Agent, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res + ) + raise models.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + async def update_version_async( + self, + *, + agent_id: str, + version: int, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.Agent: + r"""Update an agent version. + + Switch the version of an agent. + + :param agent_id: + :param version: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AgentsAPIV1AgentsUpdateVersionRequest( + agent_id=agent_id, + version=version, + ) + + req = self._build_request_async( + method="PATCH", + path="/v1/agents/{agent_id}/version", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="agents_api_v1_agents_update_version", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.Agent, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res + ) + raise models.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + def list_versions( + self, + *, + agent_id: str, + page: Optional[int] = 0, + page_size: Optional[int] = 20, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> List[models.Agent]: + r"""List all versions of an agent. + + Retrieve all versions for a specific agent with full agent context. Supports pagination. + + :param agent_id: + :param page: Page number (0-indexed) + :param page_size: Number of versions per page + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AgentsAPIV1AgentsListVersionsRequest( + agent_id=agent_id, + page=page, + page_size=page_size, + ) + + req = self._build_request( + method="GET", + path="/v1/agents/{agent_id}/versions", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="agents_api_v1_agents_list_versions", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(List[models.Agent], http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res + ) + raise models.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + async def list_versions_async( + self, + *, + agent_id: str, + page: Optional[int] = 0, + page_size: Optional[int] = 20, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> List[models.Agent]: + r"""List all versions of an agent. + + Retrieve all versions for a specific agent with full agent context. Supports pagination. + + :param agent_id: + :param page: Page number (0-indexed) + :param page_size: Number of versions per page + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AgentsAPIV1AgentsListVersionsRequest( + agent_id=agent_id, + page=page, + page_size=page_size, + ) + + req = self._build_request_async( + method="GET", + path="/v1/agents/{agent_id}/versions", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="agents_api_v1_agents_list_versions", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(List[models.Agent], http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res + ) + raise models.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + def get_version( + self, + *, + agent_id: str, + version: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.Agent: + r"""Retrieve a specific version of an agent. + + Get a specific agent version by version number. + + :param agent_id: + :param version: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AgentsAPIV1AgentsGetVersionRequest( + agent_id=agent_id, + version=version, + ) + + req = self._build_request( + method="GET", + path="/v1/agents/{agent_id}/versions/{version}", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="agents_api_v1_agents_get_version", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.Agent, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res + ) + raise models.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + async def get_version_async( + self, + *, + agent_id: str, + version: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.Agent: + r"""Retrieve a specific version of an agent. + + Get a specific agent version by version number. + + :param agent_id: + :param version: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AgentsAPIV1AgentsGetVersionRequest( + agent_id=agent_id, + version=version, + ) + + req = self._build_request_async( + method="GET", + path="/v1/agents/{agent_id}/versions/{version}", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="agents_api_v1_agents_get_version", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.Agent, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res + ) + raise models.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + def create_version_alias( + self, + *, + agent_id: str, + alias: str, + version: int, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.AgentAliasResponse: + r"""Create or update an agent version alias. + + Create a new alias or update an existing alias to point to a specific version. Aliases are unique per agent and can be reassigned to different versions. + + :param agent_id: + :param alias: + :param version: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AgentsAPIV1AgentsCreateOrUpdateAliasRequest( + agent_id=agent_id, + alias=alias, + version=version, + ) + + req = self._build_request( + method="PUT", + path="/v1/agents/{agent_id}/aliases", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="agents_api_v1_agents_create_or_update_alias", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.AgentAliasResponse, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res + ) + raise models.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + async def create_version_alias_async( + self, + *, + agent_id: str, + alias: str, + version: int, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.AgentAliasResponse: + r"""Create or update an agent version alias. + + Create a new alias or update an existing alias to point to a specific version. Aliases are unique per agent and can be reassigned to different versions. + + :param agent_id: + :param alias: + :param version: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AgentsAPIV1AgentsCreateOrUpdateAliasRequest( + agent_id=agent_id, + alias=alias, + version=version, + ) + + req = self._build_request_async( + method="PUT", + path="/v1/agents/{agent_id}/aliases", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="agents_api_v1_agents_create_or_update_alias", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.AgentAliasResponse, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res + ) + raise models.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + def list_version_aliases( + self, + *, + agent_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> List[models.AgentAliasResponse]: + r"""List all aliases for an agent. + + Retrieve all version aliases for a specific agent. + + :param agent_id: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AgentsAPIV1AgentsListVersionAliasesRequest( + agent_id=agent_id, + ) + + req = self._build_request( + method="GET", + path="/v1/agents/{agent_id}/aliases", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="agents_api_v1_agents_list_version_aliases", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(List[models.AgentAliasResponse], http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res + ) + raise models.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + async def list_version_aliases_async( + self, + *, + agent_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> List[models.AgentAliasResponse]: + r"""List all aliases for an agent. + + Retrieve all version aliases for a specific agent. + + :param agent_id: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AgentsAPIV1AgentsListVersionAliasesRequest( + agent_id=agent_id, + ) + + req = self._build_request_async( + method="GET", + path="/v1/agents/{agent_id}/aliases", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="agents_api_v1_agents_list_version_aliases", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(List[models.AgentAliasResponse], http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res + ) + raise models.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) diff --git a/src/mistralai/client/mistral_jobs.py b/src/mistralai/client/mistral_jobs.py new file mode 100644 index 00000000..eae44033 --- /dev/null +++ b/src/mistralai/client/mistral_jobs.py @@ -0,0 +1,799 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from .basesdk import BaseSDK +from datetime import datetime +from mistralai.client import models, utils +from mistralai.client._hooks import HookContext +from mistralai.client.models import ( + apiendpoint as models_apiendpoint, + batchjobstatus as models_batchjobstatus, + batchrequest as models_batchrequest, +) +from mistralai.client.types import OptionalNullable, UNSET +from mistralai.client.utils import get_security_from_env +from mistralai.client.utils.unmarshal_json_response import unmarshal_json_response +from typing import Any, Dict, List, Mapping, Optional, Union + + +class MistralJobs(BaseSDK): + def list( + self, + *, + page: Optional[int] = 0, + page_size: Optional[int] = 100, + model: OptionalNullable[str] = UNSET, + agent_id: OptionalNullable[str] = UNSET, + metadata: OptionalNullable[Dict[str, Any]] = UNSET, + created_after: OptionalNullable[datetime] = UNSET, + created_by_me: Optional[bool] = False, + status: OptionalNullable[List[models_batchjobstatus.BatchJobStatus]] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.BatchJobsOut: + r"""Get Batch Jobs + + Get a list of batch jobs for your organization and user. + + :param page: + :param page_size: + :param model: + :param agent_id: + :param metadata: + :param created_after: + :param created_by_me: + :param status: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.JobsAPIRoutesBatchGetBatchJobsRequest( + page=page, + page_size=page_size, + model=model, + agent_id=agent_id, + metadata=metadata, + created_after=created_after, + created_by_me=created_by_me, + status=status, + ) + + req = self._build_request( + method="GET", + path="/v1/batch/jobs", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="jobs_api_routes_batch_get_batch_jobs", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["4XX", "5XX"], + retry_config=retry_config, + ) + + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.BatchJobsOut, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + async def list_async( + self, + *, + page: Optional[int] = 0, + page_size: Optional[int] = 100, + model: OptionalNullable[str] = UNSET, + agent_id: OptionalNullable[str] = UNSET, + metadata: OptionalNullable[Dict[str, Any]] = UNSET, + created_after: OptionalNullable[datetime] = UNSET, + created_by_me: Optional[bool] = False, + status: OptionalNullable[List[models_batchjobstatus.BatchJobStatus]] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.BatchJobsOut: + r"""Get Batch Jobs + + Get a list of batch jobs for your organization and user. + + :param page: + :param page_size: + :param model: + :param agent_id: + :param metadata: + :param created_after: + :param created_by_me: + :param status: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.JobsAPIRoutesBatchGetBatchJobsRequest( + page=page, + page_size=page_size, + model=model, + agent_id=agent_id, + metadata=metadata, + created_after=created_after, + created_by_me=created_by_me, + status=status, + ) + + req = self._build_request_async( + method="GET", + path="/v1/batch/jobs", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="jobs_api_routes_batch_get_batch_jobs", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["4XX", "5XX"], + retry_config=retry_config, + ) + + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.BatchJobsOut, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + def create( + self, + *, + endpoint: models_apiendpoint.APIEndpoint, + input_files: OptionalNullable[List[str]] = UNSET, + requests: OptionalNullable[ + Union[ + List[models_batchrequest.BatchRequest], + List[models_batchrequest.BatchRequestTypedDict], + ] + ] = UNSET, + model: OptionalNullable[str] = UNSET, + agent_id: OptionalNullable[str] = UNSET, + metadata: OptionalNullable[Dict[str, str]] = UNSET, + timeout_hours: Optional[int] = 24, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.BatchJobOut: + r"""Create Batch Job + + Create a new batch job, it will be queued for processing. + + :param endpoint: + :param input_files: The list of input files to be used for batch inference, these files should be `jsonl` files, containing the input data corresponding to the bory request for the batch inference in a \"body\" field. An example of such file is the following: ```json {\"custom_id\": \"0\", \"body\": {\"max_tokens\": 100, \"messages\": [{\"role\": \"user\", \"content\": \"What is the best French cheese?\"}]}} {\"custom_id\": \"1\", \"body\": {\"max_tokens\": 100, \"messages\": [{\"role\": \"user\", \"content\": \"What is the best French wine?\"}]}} ``` + :param requests: + :param model: The model to be used for batch inference. + :param agent_id: In case you want to use a specific agent from the **deprecated** agents api for batch inference, you can specify the agent ID here. + :param metadata: The metadata of your choice to be associated with the batch inference job. + :param timeout_hours: The timeout in hours for the batch inference job. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.BatchJobIn( + input_files=input_files, + requests=utils.get_pydantic_model( + requests, OptionalNullable[List[models.BatchRequest]] + ), + endpoint=endpoint, + model=model, + agent_id=agent_id, + metadata=metadata, + timeout_hours=timeout_hours, + ) + + req = self._build_request( + method="POST", + path="/v1/batch/jobs", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.BatchJobIn + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="jobs_api_routes_batch_create_batch_job", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["4XX", "5XX"], + retry_config=retry_config, + ) + + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.BatchJobOut, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + async def create_async( + self, + *, + endpoint: models_apiendpoint.APIEndpoint, + input_files: OptionalNullable[List[str]] = UNSET, + requests: OptionalNullable[ + Union[ + List[models_batchrequest.BatchRequest], + List[models_batchrequest.BatchRequestTypedDict], + ] + ] = UNSET, + model: OptionalNullable[str] = UNSET, + agent_id: OptionalNullable[str] = UNSET, + metadata: OptionalNullable[Dict[str, str]] = UNSET, + timeout_hours: Optional[int] = 24, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.BatchJobOut: + r"""Create Batch Job + + Create a new batch job, it will be queued for processing. + + :param endpoint: + :param input_files: The list of input files to be used for batch inference, these files should be `jsonl` files, containing the input data corresponding to the bory request for the batch inference in a \"body\" field. An example of such file is the following: ```json {\"custom_id\": \"0\", \"body\": {\"max_tokens\": 100, \"messages\": [{\"role\": \"user\", \"content\": \"What is the best French cheese?\"}]}} {\"custom_id\": \"1\", \"body\": {\"max_tokens\": 100, \"messages\": [{\"role\": \"user\", \"content\": \"What is the best French wine?\"}]}} ``` + :param requests: + :param model: The model to be used for batch inference. + :param agent_id: In case you want to use a specific agent from the **deprecated** agents api for batch inference, you can specify the agent ID here. + :param metadata: The metadata of your choice to be associated with the batch inference job. + :param timeout_hours: The timeout in hours for the batch inference job. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.BatchJobIn( + input_files=input_files, + requests=utils.get_pydantic_model( + requests, OptionalNullable[List[models.BatchRequest]] + ), + endpoint=endpoint, + model=model, + agent_id=agent_id, + metadata=metadata, + timeout_hours=timeout_hours, + ) + + req = self._build_request_async( + method="POST", + path="/v1/batch/jobs", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.BatchJobIn + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="jobs_api_routes_batch_create_batch_job", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["4XX", "5XX"], + retry_config=retry_config, + ) + + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.BatchJobOut, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + def get( + self, + *, + job_id: str, + inline: OptionalNullable[bool] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.BatchJobOut: + r"""Get Batch Job + + Get a batch job details by its UUID. + + Args: + inline: If True, return results inline in the response. + + :param job_id: + :param inline: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.JobsAPIRoutesBatchGetBatchJobRequest( + job_id=job_id, + inline=inline, + ) + + req = self._build_request( + method="GET", + path="/v1/batch/jobs/{job_id}", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="jobs_api_routes_batch_get_batch_job", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["4XX", "5XX"], + retry_config=retry_config, + ) + + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.BatchJobOut, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + async def get_async( + self, + *, + job_id: str, + inline: OptionalNullable[bool] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.BatchJobOut: + r"""Get Batch Job + + Get a batch job details by its UUID. + + Args: + inline: If True, return results inline in the response. + + :param job_id: + :param inline: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.JobsAPIRoutesBatchGetBatchJobRequest( + job_id=job_id, + inline=inline, + ) + + req = self._build_request_async( + method="GET", + path="/v1/batch/jobs/{job_id}", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="jobs_api_routes_batch_get_batch_job", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["4XX", "5XX"], + retry_config=retry_config, + ) + + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.BatchJobOut, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + def cancel( + self, + *, + job_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.BatchJobOut: + r"""Cancel Batch Job + + Request the cancellation of a batch job. + + :param job_id: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.JobsAPIRoutesBatchCancelBatchJobRequest( + job_id=job_id, + ) + + req = self._build_request( + method="POST", + path="/v1/batch/jobs/{job_id}/cancel", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="jobs_api_routes_batch_cancel_batch_job", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["4XX", "5XX"], + retry_config=retry_config, + ) + + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.BatchJobOut, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + async def cancel_async( + self, + *, + job_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.BatchJobOut: + r"""Cancel Batch Job + + Request the cancellation of a batch job. + + :param job_id: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.JobsAPIRoutesBatchCancelBatchJobRequest( + job_id=job_id, + ) + + req = self._build_request_async( + method="POST", + path="/v1/batch/jobs/{job_id}/cancel", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="jobs_api_routes_batch_cancel_batch_job", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["4XX", "5XX"], + retry_config=retry_config, + ) + + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.BatchJobOut, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) diff --git a/src/mistralai/client/models/__init__.py b/src/mistralai/client/models/__init__.py new file mode 100644 index 00000000..23e65222 --- /dev/null +++ b/src/mistralai/client/models/__init__.py @@ -0,0 +1,2531 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from .mistralerror import MistralError +from typing import TYPE_CHECKING +from importlib import import_module +import builtins +import sys + +if TYPE_CHECKING: + from .agent import ( + Agent, + AgentObject, + AgentTools, + AgentToolsTypedDict, + AgentTypedDict, + ) + from .agentaliasresponse import AgentAliasResponse, AgentAliasResponseTypedDict + from .agentconversation import ( + AgentConversation, + AgentConversationAgentVersion, + AgentConversationAgentVersionTypedDict, + AgentConversationObject, + AgentConversationTypedDict, + ) + from .agentcreationrequest import ( + AgentCreationRequest, + AgentCreationRequestTools, + AgentCreationRequestToolsTypedDict, + AgentCreationRequestTypedDict, + ) + from .agenthandoffdoneevent import ( + AgentHandoffDoneEvent, + AgentHandoffDoneEventType, + AgentHandoffDoneEventTypedDict, + ) + from .agenthandoffentry import ( + AgentHandoffEntry, + AgentHandoffEntryObject, + AgentHandoffEntryType, + AgentHandoffEntryTypedDict, + ) + from .agenthandoffstartedevent import ( + AgentHandoffStartedEvent, + AgentHandoffStartedEventType, + AgentHandoffStartedEventTypedDict, + ) + from .agents_api_v1_agents_create_or_update_aliasop import ( + AgentsAPIV1AgentsCreateOrUpdateAliasRequest, + AgentsAPIV1AgentsCreateOrUpdateAliasRequestTypedDict, + ) + from .agents_api_v1_agents_deleteop import ( + AgentsAPIV1AgentsDeleteRequest, + AgentsAPIV1AgentsDeleteRequestTypedDict, + ) + from .agents_api_v1_agents_get_versionop import ( + AgentsAPIV1AgentsGetVersionRequest, + AgentsAPIV1AgentsGetVersionRequestTypedDict, + ) + from .agents_api_v1_agents_getop import ( + AgentsAPIV1AgentsGetRequest, + AgentsAPIV1AgentsGetRequestTypedDict, + QueryParamAgentVersion, + QueryParamAgentVersionTypedDict, + ) + from .agents_api_v1_agents_list_version_aliasesop import ( + AgentsAPIV1AgentsListVersionAliasesRequest, + AgentsAPIV1AgentsListVersionAliasesRequestTypedDict, + ) + from .agents_api_v1_agents_list_versionsop import ( + AgentsAPIV1AgentsListVersionsRequest, + AgentsAPIV1AgentsListVersionsRequestTypedDict, + ) + from .agents_api_v1_agents_listop import ( + AgentsAPIV1AgentsListRequest, + AgentsAPIV1AgentsListRequestTypedDict, + ) + from .agents_api_v1_agents_update_versionop import ( + AgentsAPIV1AgentsUpdateVersionRequest, + AgentsAPIV1AgentsUpdateVersionRequestTypedDict, + ) + from .agents_api_v1_agents_updateop import ( + AgentsAPIV1AgentsUpdateRequest, + AgentsAPIV1AgentsUpdateRequestTypedDict, + ) + from .agents_api_v1_conversations_append_streamop import ( + AgentsAPIV1ConversationsAppendStreamRequest, + AgentsAPIV1ConversationsAppendStreamRequestTypedDict, + ) + from .agents_api_v1_conversations_appendop import ( + AgentsAPIV1ConversationsAppendRequest, + AgentsAPIV1ConversationsAppendRequestTypedDict, + ) + from .agents_api_v1_conversations_deleteop import ( + AgentsAPIV1ConversationsDeleteRequest, + AgentsAPIV1ConversationsDeleteRequestTypedDict, + ) + from .agents_api_v1_conversations_getop import ( + AgentsAPIV1ConversationsGetRequest, + AgentsAPIV1ConversationsGetRequestTypedDict, + AgentsAPIV1ConversationsGetResponseV1ConversationsGet, + AgentsAPIV1ConversationsGetResponseV1ConversationsGetTypedDict, + ) + from .agents_api_v1_conversations_historyop import ( + AgentsAPIV1ConversationsHistoryRequest, + AgentsAPIV1ConversationsHistoryRequestTypedDict, + ) + from .agents_api_v1_conversations_listop import ( + AgentsAPIV1ConversationsListRequest, + AgentsAPIV1ConversationsListRequestTypedDict, + ResponseBody, + ResponseBodyTypedDict, + ) + from .agents_api_v1_conversations_messagesop import ( + AgentsAPIV1ConversationsMessagesRequest, + AgentsAPIV1ConversationsMessagesRequestTypedDict, + ) + from .agents_api_v1_conversations_restart_streamop import ( + AgentsAPIV1ConversationsRestartStreamRequest, + AgentsAPIV1ConversationsRestartStreamRequestTypedDict, + ) + from .agents_api_v1_conversations_restartop import ( + AgentsAPIV1ConversationsRestartRequest, + AgentsAPIV1ConversationsRestartRequestTypedDict, + ) + from .agentscompletionrequest import ( + AgentsCompletionRequest, + AgentsCompletionRequestMessages, + AgentsCompletionRequestMessagesTypedDict, + AgentsCompletionRequestStop, + AgentsCompletionRequestStopTypedDict, + AgentsCompletionRequestToolChoice, + AgentsCompletionRequestToolChoiceTypedDict, + AgentsCompletionRequestTypedDict, + ) + from .agentscompletionstreamrequest import ( + AgentsCompletionStreamRequest, + AgentsCompletionStreamRequestMessages, + AgentsCompletionStreamRequestMessagesTypedDict, + AgentsCompletionStreamRequestStop, + AgentsCompletionStreamRequestStopTypedDict, + AgentsCompletionStreamRequestToolChoice, + AgentsCompletionStreamRequestToolChoiceTypedDict, + AgentsCompletionStreamRequestTypedDict, + ) + from .agentupdaterequest import ( + AgentUpdateRequest, + AgentUpdateRequestTools, + AgentUpdateRequestToolsTypedDict, + AgentUpdateRequestTypedDict, + ) + from .apiendpoint import APIEndpoint + from .archiveftmodelout import ( + ArchiveFTModelOut, + ArchiveFTModelOutObject, + ArchiveFTModelOutTypedDict, + ) + from .assistantmessage import ( + AssistantMessage, + AssistantMessageContent, + AssistantMessageContentTypedDict, + AssistantMessageRole, + AssistantMessageTypedDict, + ) + from .audiochunk import AudioChunk, AudioChunkType, AudioChunkTypedDict + from .audioencoding import AudioEncoding + from .audioformat import AudioFormat, AudioFormatTypedDict + from .audiotranscriptionrequest import ( + AudioTranscriptionRequest, + AudioTranscriptionRequestTypedDict, + ) + from .audiotranscriptionrequeststream import ( + AudioTranscriptionRequestStream, + AudioTranscriptionRequestStreamTypedDict, + ) + from .basemodelcard import BaseModelCard, BaseModelCardType, BaseModelCardTypedDict + from .batcherror import BatchError, BatchErrorTypedDict + from .batchjobin import BatchJobIn, BatchJobInTypedDict + from .batchjobout import BatchJobOut, BatchJobOutObject, BatchJobOutTypedDict + from .batchjobsout import BatchJobsOut, BatchJobsOutObject, BatchJobsOutTypedDict + from .batchjobstatus import BatchJobStatus + from .batchrequest import BatchRequest, BatchRequestTypedDict + from .builtinconnectors import BuiltInConnectors + from .chatclassificationrequest import ( + ChatClassificationRequest, + ChatClassificationRequestTypedDict, + ) + from .chatcompletionchoice import ( + ChatCompletionChoice, + ChatCompletionChoiceTypedDict, + FinishReason, + ) + from .chatcompletionrequest import ( + ChatCompletionRequest, + ChatCompletionRequestToolChoice, + ChatCompletionRequestToolChoiceTypedDict, + ChatCompletionRequestTypedDict, + Messages, + MessagesTypedDict, + Stop, + StopTypedDict, + ) + from .chatcompletionresponse import ( + ChatCompletionResponse, + ChatCompletionResponseTypedDict, + ) + from .chatcompletionstreamrequest import ( + ChatCompletionStreamRequest, + ChatCompletionStreamRequestMessages, + ChatCompletionStreamRequestMessagesTypedDict, + ChatCompletionStreamRequestStop, + ChatCompletionStreamRequestStopTypedDict, + ChatCompletionStreamRequestToolChoice, + ChatCompletionStreamRequestToolChoiceTypedDict, + ChatCompletionStreamRequestTypedDict, + ) + from .chatmoderationrequest import ( + ChatModerationRequest, + ChatModerationRequestInputs, + ChatModerationRequestInputsTypedDict, + ChatModerationRequestTypedDict, + One, + OneTypedDict, + Two, + TwoTypedDict, + ) + from .checkpointout import CheckpointOut, CheckpointOutTypedDict + from .classificationrequest import ( + ClassificationRequest, + ClassificationRequestInputs, + ClassificationRequestInputsTypedDict, + ClassificationRequestTypedDict, + ) + from .classificationresponse import ( + ClassificationResponse, + ClassificationResponseTypedDict, + ) + from .classificationtargetresult import ( + ClassificationTargetResult, + ClassificationTargetResultTypedDict, + ) + from .classifierdetailedjobout import ( + ClassifierDetailedJobOut, + ClassifierDetailedJobOutIntegrations, + ClassifierDetailedJobOutIntegrationsTypedDict, + ClassifierDetailedJobOutJobType, + ClassifierDetailedJobOutObject, + ClassifierDetailedJobOutStatus, + ClassifierDetailedJobOutTypedDict, + ) + from .classifierftmodelout import ( + ClassifierFTModelOut, + ClassifierFTModelOutModelType, + ClassifierFTModelOutObject, + ClassifierFTModelOutTypedDict, + ) + from .classifierjobout import ( + ClassifierJobOut, + ClassifierJobOutIntegrations, + ClassifierJobOutIntegrationsTypedDict, + ClassifierJobOutJobType, + ClassifierJobOutObject, + ClassifierJobOutStatus, + ClassifierJobOutTypedDict, + ) + from .classifiertargetin import ClassifierTargetIn, ClassifierTargetInTypedDict + from .classifiertargetout import ClassifierTargetOut, ClassifierTargetOutTypedDict + from .classifiertrainingparameters import ( + ClassifierTrainingParameters, + ClassifierTrainingParametersTypedDict, + ) + from .classifiertrainingparametersin import ( + ClassifierTrainingParametersIn, + ClassifierTrainingParametersInTypedDict, + ) + from .codeinterpretertool import ( + CodeInterpreterTool, + CodeInterpreterToolType, + CodeInterpreterToolTypedDict, + ) + from .completionargs import CompletionArgs, CompletionArgsTypedDict + from .completionargsstop import CompletionArgsStop, CompletionArgsStopTypedDict + from .completionchunk import CompletionChunk, CompletionChunkTypedDict + from .completiondetailedjobout import ( + CompletionDetailedJobOut, + CompletionDetailedJobOutIntegrations, + CompletionDetailedJobOutIntegrationsTypedDict, + CompletionDetailedJobOutJobType, + CompletionDetailedJobOutObject, + CompletionDetailedJobOutRepositories, + CompletionDetailedJobOutRepositoriesTypedDict, + CompletionDetailedJobOutStatus, + CompletionDetailedJobOutTypedDict, + ) + from .completionevent import CompletionEvent, CompletionEventTypedDict + from .completionftmodelout import ( + CompletionFTModelOut, + CompletionFTModelOutObject, + CompletionFTModelOutTypedDict, + ModelType, + ) + from .completionjobout import ( + CompletionJobOut, + CompletionJobOutObject, + CompletionJobOutTypedDict, + Integrations, + IntegrationsTypedDict, + JobType, + Repositories, + RepositoriesTypedDict, + Status, + ) + from .completionresponsestreamchoice import ( + CompletionResponseStreamChoice, + CompletionResponseStreamChoiceFinishReason, + CompletionResponseStreamChoiceTypedDict, + ) + from .completiontrainingparameters import ( + CompletionTrainingParameters, + CompletionTrainingParametersTypedDict, + ) + from .completiontrainingparametersin import ( + CompletionTrainingParametersIn, + CompletionTrainingParametersInTypedDict, + ) + from .contentchunk import ContentChunk, ContentChunkTypedDict + from .conversationappendrequest import ( + ConversationAppendRequest, + ConversationAppendRequestHandoffExecution, + ConversationAppendRequestTypedDict, + ) + from .conversationappendstreamrequest import ( + ConversationAppendStreamRequest, + ConversationAppendStreamRequestHandoffExecution, + ConversationAppendStreamRequestTypedDict, + ) + from .conversationevents import ( + ConversationEvents, + ConversationEventsData, + ConversationEventsDataTypedDict, + ConversationEventsTypedDict, + ) + from .conversationhistory import ( + ConversationHistory, + ConversationHistoryObject, + ConversationHistoryTypedDict, + Entries, + EntriesTypedDict, + ) + from .conversationinputs import ConversationInputs, ConversationInputsTypedDict + from .conversationmessages import ( + ConversationMessages, + ConversationMessagesObject, + ConversationMessagesTypedDict, + ) + from .conversationrequest import ( + AgentVersion, + AgentVersionTypedDict, + ConversationRequest, + ConversationRequestTypedDict, + HandoffExecution, + Tools, + ToolsTypedDict, + ) + from .conversationresponse import ( + ConversationResponse, + ConversationResponseObject, + ConversationResponseTypedDict, + Outputs, + OutputsTypedDict, + ) + from .conversationrestartrequest import ( + ConversationRestartRequest, + ConversationRestartRequestAgentVersion, + ConversationRestartRequestAgentVersionTypedDict, + ConversationRestartRequestHandoffExecution, + ConversationRestartRequestTypedDict, + ) + from .conversationrestartstreamrequest import ( + ConversationRestartStreamRequest, + ConversationRestartStreamRequestAgentVersion, + ConversationRestartStreamRequestAgentVersionTypedDict, + ConversationRestartStreamRequestHandoffExecution, + ConversationRestartStreamRequestTypedDict, + ) + from .conversationstreamrequest import ( + ConversationStreamRequest, + ConversationStreamRequestAgentVersion, + ConversationStreamRequestAgentVersionTypedDict, + ConversationStreamRequestHandoffExecution, + ConversationStreamRequestTools, + ConversationStreamRequestToolsTypedDict, + ConversationStreamRequestTypedDict, + ) + from .conversationusageinfo import ( + ConversationUsageInfo, + ConversationUsageInfoTypedDict, + ) + from .delete_model_v1_models_model_id_deleteop import ( + DeleteModelV1ModelsModelIDDeleteRequest, + DeleteModelV1ModelsModelIDDeleteRequestTypedDict, + ) + from .deletefileout import DeleteFileOut, DeleteFileOutTypedDict + from .deletemodelout import DeleteModelOut, DeleteModelOutTypedDict + from .deltamessage import ( + Content, + ContentTypedDict, + DeltaMessage, + DeltaMessageTypedDict, + ) + from .documentlibrarytool import ( + DocumentLibraryTool, + DocumentLibraryToolType, + DocumentLibraryToolTypedDict, + ) + from .documentout import DocumentOut, DocumentOutTypedDict + from .documenttextcontent import DocumentTextContent, DocumentTextContentTypedDict + from .documentupdatein import ( + Attributes, + AttributesTypedDict, + DocumentUpdateIn, + DocumentUpdateInTypedDict, + ) + from .documenturlchunk import ( + DocumentURLChunk, + DocumentURLChunkType, + DocumentURLChunkTypedDict, + ) + from .embeddingdtype import EmbeddingDtype + from .embeddingrequest import ( + EmbeddingRequest, + EmbeddingRequestInputs, + EmbeddingRequestInputsTypedDict, + EmbeddingRequestTypedDict, + ) + from .embeddingresponse import EmbeddingResponse, EmbeddingResponseTypedDict + from .embeddingresponsedata import ( + EmbeddingResponseData, + EmbeddingResponseDataTypedDict, + ) + from .encodingformat import EncodingFormat + from .entitytype import EntityType + from .eventout import EventOut, EventOutTypedDict + from .file import File, FileTypedDict + from .filechunk import FileChunk, FileChunkTypedDict + from .filepurpose import FilePurpose + from .files_api_routes_delete_fileop import ( + FilesAPIRoutesDeleteFileRequest, + FilesAPIRoutesDeleteFileRequestTypedDict, + ) + from .files_api_routes_download_fileop import ( + FilesAPIRoutesDownloadFileRequest, + FilesAPIRoutesDownloadFileRequestTypedDict, + ) + from .files_api_routes_get_signed_urlop import ( + FilesAPIRoutesGetSignedURLRequest, + FilesAPIRoutesGetSignedURLRequestTypedDict, + ) + from .files_api_routes_list_filesop import ( + FilesAPIRoutesListFilesRequest, + FilesAPIRoutesListFilesRequestTypedDict, + ) + from .files_api_routes_retrieve_fileop import ( + FilesAPIRoutesRetrieveFileRequest, + FilesAPIRoutesRetrieveFileRequestTypedDict, + ) + from .files_api_routes_upload_fileop import ( + FilesAPIRoutesUploadFileMultiPartBodyParams, + FilesAPIRoutesUploadFileMultiPartBodyParamsTypedDict, + ) + from .fileschema import FileSchema, FileSchemaTypedDict + from .filesignedurl import FileSignedURL, FileSignedURLTypedDict + from .fimcompletionrequest import ( + FIMCompletionRequest, + FIMCompletionRequestStop, + FIMCompletionRequestStopTypedDict, + FIMCompletionRequestTypedDict, + ) + from .fimcompletionresponse import ( + FIMCompletionResponse, + FIMCompletionResponseTypedDict, + ) + from .fimcompletionstreamrequest import ( + FIMCompletionStreamRequest, + FIMCompletionStreamRequestStop, + FIMCompletionStreamRequestStopTypedDict, + FIMCompletionStreamRequestTypedDict, + ) + from .finetuneablemodeltype import FineTuneableModelType + from .ftclassifierlossfunction import FTClassifierLossFunction + from .ftmodelcapabilitiesout import ( + FTModelCapabilitiesOut, + FTModelCapabilitiesOutTypedDict, + ) + from .ftmodelcard import FTModelCard, FTModelCardType, FTModelCardTypedDict + from .function import Function, FunctionTypedDict + from .functioncall import ( + Arguments, + ArgumentsTypedDict, + FunctionCall, + FunctionCallTypedDict, + ) + from .functioncallentry import ( + FunctionCallEntry, + FunctionCallEntryObject, + FunctionCallEntryType, + FunctionCallEntryTypedDict, + ) + from .functioncallentryarguments import ( + FunctionCallEntryArguments, + FunctionCallEntryArgumentsTypedDict, + ) + from .functioncallevent import ( + FunctionCallEvent, + FunctionCallEventType, + FunctionCallEventTypedDict, + ) + from .functionname import FunctionName, FunctionNameTypedDict + from .functionresultentry import ( + FunctionResultEntry, + FunctionResultEntryObject, + FunctionResultEntryType, + FunctionResultEntryTypedDict, + ) + from .functiontool import FunctionTool, FunctionToolType, FunctionToolTypedDict + from .githubrepositoryin import ( + GithubRepositoryIn, + GithubRepositoryInType, + GithubRepositoryInTypedDict, + ) + from .githubrepositoryout import ( + GithubRepositoryOut, + GithubRepositoryOutType, + GithubRepositoryOutTypedDict, + ) + from .httpvalidationerror import HTTPValidationError, HTTPValidationErrorData + from .imagegenerationtool import ( + ImageGenerationTool, + ImageGenerationToolType, + ImageGenerationToolTypedDict, + ) + from .imageurl import ImageURL, ImageURLTypedDict + from .imageurlchunk import ( + ImageURLChunk, + ImageURLChunkImageURL, + ImageURLChunkImageURLTypedDict, + ImageURLChunkType, + ImageURLChunkTypedDict, + ) + from .inputentries import InputEntries, InputEntriesTypedDict + from .inputs import ( + Inputs, + InputsTypedDict, + InstructRequestInputs, + InstructRequestInputsMessages, + InstructRequestInputsMessagesTypedDict, + InstructRequestInputsTypedDict, + ) + from .instructrequest import ( + InstructRequest, + InstructRequestMessages, + InstructRequestMessagesTypedDict, + InstructRequestTypedDict, + ) + from .jobin import ( + Hyperparameters, + HyperparametersTypedDict, + JobIn, + JobInIntegrations, + JobInIntegrationsTypedDict, + JobInRepositories, + JobInRepositoriesTypedDict, + JobInTypedDict, + ) + from .jobmetadataout import JobMetadataOut, JobMetadataOutTypedDict + from .jobs_api_routes_batch_cancel_batch_jobop import ( + JobsAPIRoutesBatchCancelBatchJobRequest, + JobsAPIRoutesBatchCancelBatchJobRequestTypedDict, + ) + from .jobs_api_routes_batch_get_batch_jobop import ( + JobsAPIRoutesBatchGetBatchJobRequest, + JobsAPIRoutesBatchGetBatchJobRequestTypedDict, + ) + from .jobs_api_routes_batch_get_batch_jobsop import ( + JobsAPIRoutesBatchGetBatchJobsRequest, + JobsAPIRoutesBatchGetBatchJobsRequestTypedDict, + ) + from .jobs_api_routes_fine_tuning_archive_fine_tuned_modelop import ( + JobsAPIRoutesFineTuningArchiveFineTunedModelRequest, + JobsAPIRoutesFineTuningArchiveFineTunedModelRequestTypedDict, + ) + from .jobs_api_routes_fine_tuning_cancel_fine_tuning_jobop import ( + JobsAPIRoutesFineTuningCancelFineTuningJobRequest, + JobsAPIRoutesFineTuningCancelFineTuningJobRequestTypedDict, + JobsAPIRoutesFineTuningCancelFineTuningJobResponse, + JobsAPIRoutesFineTuningCancelFineTuningJobResponseTypedDict, + ) + from .jobs_api_routes_fine_tuning_create_fine_tuning_jobop import ( + JobsAPIRoutesFineTuningCreateFineTuningJobResponse, + JobsAPIRoutesFineTuningCreateFineTuningJobResponseTypedDict, + Response1, + Response1TypedDict, + ) + from .jobs_api_routes_fine_tuning_get_fine_tuning_jobop import ( + JobsAPIRoutesFineTuningGetFineTuningJobRequest, + JobsAPIRoutesFineTuningGetFineTuningJobRequestTypedDict, + JobsAPIRoutesFineTuningGetFineTuningJobResponse, + JobsAPIRoutesFineTuningGetFineTuningJobResponseTypedDict, + ) + from .jobs_api_routes_fine_tuning_get_fine_tuning_jobsop import ( + JobsAPIRoutesFineTuningGetFineTuningJobsRequest, + JobsAPIRoutesFineTuningGetFineTuningJobsRequestTypedDict, + QueryParamStatus, + ) + from .jobs_api_routes_fine_tuning_start_fine_tuning_jobop import ( + JobsAPIRoutesFineTuningStartFineTuningJobRequest, + JobsAPIRoutesFineTuningStartFineTuningJobRequestTypedDict, + JobsAPIRoutesFineTuningStartFineTuningJobResponse, + JobsAPIRoutesFineTuningStartFineTuningJobResponseTypedDict, + ) + from .jobs_api_routes_fine_tuning_unarchive_fine_tuned_modelop import ( + JobsAPIRoutesFineTuningUnarchiveFineTunedModelRequest, + JobsAPIRoutesFineTuningUnarchiveFineTunedModelRequestTypedDict, + ) + from .jobs_api_routes_fine_tuning_update_fine_tuned_modelop import ( + JobsAPIRoutesFineTuningUpdateFineTunedModelRequest, + JobsAPIRoutesFineTuningUpdateFineTunedModelRequestTypedDict, + JobsAPIRoutesFineTuningUpdateFineTunedModelResponse, + JobsAPIRoutesFineTuningUpdateFineTunedModelResponseTypedDict, + ) + from .jobsout import ( + JobsOut, + JobsOutData, + JobsOutDataTypedDict, + JobsOutObject, + JobsOutTypedDict, + ) + from .jsonschema import JSONSchema, JSONSchemaTypedDict + from .legacyjobmetadataout import ( + LegacyJobMetadataOut, + LegacyJobMetadataOutObject, + LegacyJobMetadataOutTypedDict, + ) + from .libraries_delete_v1op import ( + LibrariesDeleteV1Request, + LibrariesDeleteV1RequestTypedDict, + ) + from .libraries_documents_delete_v1op import ( + LibrariesDocumentsDeleteV1Request, + LibrariesDocumentsDeleteV1RequestTypedDict, + ) + from .libraries_documents_get_extracted_text_signed_url_v1op import ( + LibrariesDocumentsGetExtractedTextSignedURLV1Request, + LibrariesDocumentsGetExtractedTextSignedURLV1RequestTypedDict, + ) + from .libraries_documents_get_signed_url_v1op import ( + LibrariesDocumentsGetSignedURLV1Request, + LibrariesDocumentsGetSignedURLV1RequestTypedDict, + ) + from .libraries_documents_get_status_v1op import ( + LibrariesDocumentsGetStatusV1Request, + LibrariesDocumentsGetStatusV1RequestTypedDict, + ) + from .libraries_documents_get_text_content_v1op import ( + LibrariesDocumentsGetTextContentV1Request, + LibrariesDocumentsGetTextContentV1RequestTypedDict, + ) + from .libraries_documents_get_v1op import ( + LibrariesDocumentsGetV1Request, + LibrariesDocumentsGetV1RequestTypedDict, + ) + from .libraries_documents_list_v1op import ( + LibrariesDocumentsListV1Request, + LibrariesDocumentsListV1RequestTypedDict, + ) + from .libraries_documents_reprocess_v1op import ( + LibrariesDocumentsReprocessV1Request, + LibrariesDocumentsReprocessV1RequestTypedDict, + ) + from .libraries_documents_update_v1op import ( + LibrariesDocumentsUpdateV1Request, + LibrariesDocumentsUpdateV1RequestTypedDict, + ) + from .libraries_documents_upload_v1op import ( + LibrariesDocumentsUploadV1DocumentUpload, + LibrariesDocumentsUploadV1DocumentUploadTypedDict, + LibrariesDocumentsUploadV1Request, + LibrariesDocumentsUploadV1RequestTypedDict, + ) + from .libraries_get_v1op import ( + LibrariesGetV1Request, + LibrariesGetV1RequestTypedDict, + ) + from .libraries_share_create_v1op import ( + LibrariesShareCreateV1Request, + LibrariesShareCreateV1RequestTypedDict, + ) + from .libraries_share_delete_v1op import ( + LibrariesShareDeleteV1Request, + LibrariesShareDeleteV1RequestTypedDict, + ) + from .libraries_share_list_v1op import ( + LibrariesShareListV1Request, + LibrariesShareListV1RequestTypedDict, + ) + from .libraries_update_v1op import ( + LibrariesUpdateV1Request, + LibrariesUpdateV1RequestTypedDict, + ) + from .libraryin import LibraryIn, LibraryInTypedDict + from .libraryinupdate import LibraryInUpdate, LibraryInUpdateTypedDict + from .libraryout import LibraryOut, LibraryOutTypedDict + from .listdocumentout import ListDocumentOut, ListDocumentOutTypedDict + from .listfilesout import ListFilesOut, ListFilesOutTypedDict + from .listlibraryout import ListLibraryOut, ListLibraryOutTypedDict + from .listsharingout import ListSharingOut, ListSharingOutTypedDict + from .messageentries import MessageEntries, MessageEntriesTypedDict + from .messageinputcontentchunks import ( + MessageInputContentChunks, + MessageInputContentChunksTypedDict, + ) + from .messageinputentry import ( + MessageInputEntry, + MessageInputEntryContent, + MessageInputEntryContentTypedDict, + MessageInputEntryRole, + MessageInputEntryType, + MessageInputEntryTypedDict, + Object, + ) + from .messageoutputcontentchunks import ( + MessageOutputContentChunks, + MessageOutputContentChunksTypedDict, + ) + from .messageoutputentry import ( + MessageOutputEntry, + MessageOutputEntryContent, + MessageOutputEntryContentTypedDict, + MessageOutputEntryObject, + MessageOutputEntryRole, + MessageOutputEntryType, + MessageOutputEntryTypedDict, + ) + from .messageoutputevent import ( + MessageOutputEvent, + MessageOutputEventContent, + MessageOutputEventContentTypedDict, + MessageOutputEventRole, + MessageOutputEventType, + MessageOutputEventTypedDict, + ) + from .metricout import MetricOut, MetricOutTypedDict + from .mistralpromptmode import MistralPromptMode + from .modelcapabilities import ModelCapabilities, ModelCapabilitiesTypedDict + from .modelconversation import ( + ModelConversation, + ModelConversationObject, + ModelConversationTools, + ModelConversationToolsTypedDict, + ModelConversationTypedDict, + ) + from .modellist import Data, DataTypedDict, ModelList, ModelListTypedDict + from .moderationobject import ModerationObject, ModerationObjectTypedDict + from .moderationresponse import ModerationResponse, ModerationResponseTypedDict + from .no_response_error import NoResponseError + from .ocrimageobject import OCRImageObject, OCRImageObjectTypedDict + from .ocrpagedimensions import OCRPageDimensions, OCRPageDimensionsTypedDict + from .ocrpageobject import OCRPageObject, OCRPageObjectTypedDict + from .ocrrequest import ( + Document, + DocumentTypedDict, + OCRRequest, + OCRRequestTypedDict, + TableFormat, + ) + from .ocrresponse import OCRResponse, OCRResponseTypedDict + from .ocrtableobject import Format, OCRTableObject, OCRTableObjectTypedDict + from .ocrusageinfo import OCRUsageInfo, OCRUsageInfoTypedDict + from .outputcontentchunks import OutputContentChunks, OutputContentChunksTypedDict + from .paginationinfo import PaginationInfo, PaginationInfoTypedDict + from .prediction import Prediction, PredictionTypedDict + from .processingstatusout import ProcessingStatusOut, ProcessingStatusOutTypedDict + from .realtimetranscriptionerror import ( + RealtimeTranscriptionError, + RealtimeTranscriptionErrorTypedDict, + ) + from .realtimetranscriptionerrordetail import ( + Message, + MessageTypedDict, + RealtimeTranscriptionErrorDetail, + RealtimeTranscriptionErrorDetailTypedDict, + ) + from .realtimetranscriptionsession import ( + RealtimeTranscriptionSession, + RealtimeTranscriptionSessionTypedDict, + ) + from .realtimetranscriptionsessioncreated import ( + RealtimeTranscriptionSessionCreated, + RealtimeTranscriptionSessionCreatedTypedDict, + ) + from .realtimetranscriptionsessionupdated import ( + RealtimeTranscriptionSessionUpdated, + RealtimeTranscriptionSessionUpdatedTypedDict, + ) + from .referencechunk import ( + ReferenceChunk, + ReferenceChunkType, + ReferenceChunkTypedDict, + ) + from .requestsource import RequestSource + from .responsedoneevent import ( + ResponseDoneEvent, + ResponseDoneEventType, + ResponseDoneEventTypedDict, + ) + from .responseerrorevent import ( + ResponseErrorEvent, + ResponseErrorEventType, + ResponseErrorEventTypedDict, + ) + from .responseformat import ResponseFormat, ResponseFormatTypedDict + from .responseformats import ResponseFormats + from .responsestartedevent import ( + ResponseStartedEvent, + ResponseStartedEventType, + ResponseStartedEventTypedDict, + ) + from .responsevalidationerror import ResponseValidationError + from .retrieve_model_v1_models_model_id_getop import ( + RetrieveModelV1ModelsModelIDGetRequest, + RetrieveModelV1ModelsModelIDGetRequestTypedDict, + RetrieveModelV1ModelsModelIDGetResponseRetrieveModelV1ModelsModelIDGet, + RetrieveModelV1ModelsModelIDGetResponseRetrieveModelV1ModelsModelIDGetTypedDict, + ) + from .retrievefileout import RetrieveFileOut, RetrieveFileOutTypedDict + from .sampletype import SampleType + from .sdkerror import SDKError + from .security import Security, SecurityTypedDict + from .shareenum import ShareEnum + from .sharingdelete import SharingDelete, SharingDeleteTypedDict + from .sharingin import SharingIn, SharingInTypedDict + from .sharingout import SharingOut, SharingOutTypedDict + from .source import Source + from .ssetypes import SSETypes + from .systemmessage import ( + Role, + SystemMessage, + SystemMessageContent, + SystemMessageContentTypedDict, + SystemMessageTypedDict, + ) + from .systemmessagecontentchunks import ( + SystemMessageContentChunks, + SystemMessageContentChunksTypedDict, + ) + from .textchunk import TextChunk, TextChunkType, TextChunkTypedDict + from .thinkchunk import ( + ThinkChunk, + ThinkChunkType, + ThinkChunkTypedDict, + Thinking, + ThinkingTypedDict, + ) + from .timestampgranularity import TimestampGranularity + from .tool import Tool, ToolTypedDict + from .toolcall import ToolCall, ToolCallTypedDict + from .toolchoice import ToolChoice, ToolChoiceTypedDict + from .toolchoiceenum import ToolChoiceEnum + from .toolexecutiondeltaevent import ( + ToolExecutionDeltaEvent, + ToolExecutionDeltaEventName, + ToolExecutionDeltaEventNameTypedDict, + ToolExecutionDeltaEventType, + ToolExecutionDeltaEventTypedDict, + ) + from .toolexecutiondoneevent import ( + ToolExecutionDoneEvent, + ToolExecutionDoneEventName, + ToolExecutionDoneEventNameTypedDict, + ToolExecutionDoneEventType, + ToolExecutionDoneEventTypedDict, + ) + from .toolexecutionentry import ( + Name, + NameTypedDict, + ToolExecutionEntry, + ToolExecutionEntryObject, + ToolExecutionEntryType, + ToolExecutionEntryTypedDict, + ) + from .toolexecutionstartedevent import ( + ToolExecutionStartedEvent, + ToolExecutionStartedEventName, + ToolExecutionStartedEventNameTypedDict, + ToolExecutionStartedEventType, + ToolExecutionStartedEventTypedDict, + ) + from .toolfilechunk import ( + ToolFileChunk, + ToolFileChunkTool, + ToolFileChunkToolTypedDict, + ToolFileChunkType, + ToolFileChunkTypedDict, + ) + from .toolmessage import ( + ToolMessage, + ToolMessageContent, + ToolMessageContentTypedDict, + ToolMessageRole, + ToolMessageTypedDict, + ) + from .toolreferencechunk import ( + ToolReferenceChunk, + ToolReferenceChunkTool, + ToolReferenceChunkToolTypedDict, + ToolReferenceChunkType, + ToolReferenceChunkTypedDict, + ) + from .tooltypes import ToolTypes + from .trainingfile import TrainingFile, TrainingFileTypedDict + from .transcriptionresponse import ( + TranscriptionResponse, + TranscriptionResponseTypedDict, + ) + from .transcriptionsegmentchunk import ( + TranscriptionSegmentChunk, + TranscriptionSegmentChunkTypedDict, + Type, + ) + from .transcriptionstreamdone import ( + TranscriptionStreamDone, + TranscriptionStreamDoneType, + TranscriptionStreamDoneTypedDict, + ) + from .transcriptionstreamevents import ( + TranscriptionStreamEvents, + TranscriptionStreamEventsData, + TranscriptionStreamEventsDataTypedDict, + TranscriptionStreamEventsTypedDict, + ) + from .transcriptionstreameventtypes import TranscriptionStreamEventTypes + from .transcriptionstreamlanguage import ( + TranscriptionStreamLanguage, + TranscriptionStreamLanguageType, + TranscriptionStreamLanguageTypedDict, + ) + from .transcriptionstreamsegmentdelta import ( + TranscriptionStreamSegmentDelta, + TranscriptionStreamSegmentDeltaType, + TranscriptionStreamSegmentDeltaTypedDict, + ) + from .transcriptionstreamtextdelta import ( + TranscriptionStreamTextDelta, + TranscriptionStreamTextDeltaType, + TranscriptionStreamTextDeltaTypedDict, + ) + from .unarchiveftmodelout import ( + UnarchiveFTModelOut, + UnarchiveFTModelOutObject, + UnarchiveFTModelOutTypedDict, + ) + from .updateftmodelin import UpdateFTModelIn, UpdateFTModelInTypedDict + from .uploadfileout import UploadFileOut, UploadFileOutTypedDict + from .usageinfo import UsageInfo, UsageInfoTypedDict + from .usermessage import ( + UserMessage, + UserMessageContent, + UserMessageContentTypedDict, + UserMessageRole, + UserMessageTypedDict, + ) + from .validationerror import ( + Loc, + LocTypedDict, + ValidationError, + ValidationErrorTypedDict, + ) + from .wandbintegration import ( + WandbIntegration, + WandbIntegrationType, + WandbIntegrationTypedDict, + ) + from .wandbintegrationout import ( + WandbIntegrationOut, + WandbIntegrationOutType, + WandbIntegrationOutTypedDict, + ) + from .websearchpremiumtool import ( + WebSearchPremiumTool, + WebSearchPremiumToolType, + WebSearchPremiumToolTypedDict, + ) + from .websearchtool import WebSearchTool, WebSearchToolType, WebSearchToolTypedDict + +__all__ = [ + "APIEndpoint", + "Agent", + "AgentAliasResponse", + "AgentAliasResponseTypedDict", + "AgentConversation", + "AgentConversationAgentVersion", + "AgentConversationAgentVersionTypedDict", + "AgentConversationObject", + "AgentConversationTypedDict", + "AgentCreationRequest", + "AgentCreationRequestTools", + "AgentCreationRequestToolsTypedDict", + "AgentCreationRequestTypedDict", + "AgentHandoffDoneEvent", + "AgentHandoffDoneEventType", + "AgentHandoffDoneEventTypedDict", + "AgentHandoffEntry", + "AgentHandoffEntryObject", + "AgentHandoffEntryType", + "AgentHandoffEntryTypedDict", + "AgentHandoffStartedEvent", + "AgentHandoffStartedEventType", + "AgentHandoffStartedEventTypedDict", + "AgentObject", + "AgentTools", + "AgentToolsTypedDict", + "AgentTypedDict", + "AgentUpdateRequest", + "AgentUpdateRequestTools", + "AgentUpdateRequestToolsTypedDict", + "AgentUpdateRequestTypedDict", + "AgentVersion", + "AgentVersionTypedDict", + "AgentsAPIV1AgentsCreateOrUpdateAliasRequest", + "AgentsAPIV1AgentsCreateOrUpdateAliasRequestTypedDict", + "AgentsAPIV1AgentsDeleteRequest", + "AgentsAPIV1AgentsDeleteRequestTypedDict", + "AgentsAPIV1AgentsGetRequest", + "AgentsAPIV1AgentsGetRequestTypedDict", + "AgentsAPIV1AgentsGetVersionRequest", + "AgentsAPIV1AgentsGetVersionRequestTypedDict", + "AgentsAPIV1AgentsListRequest", + "AgentsAPIV1AgentsListRequestTypedDict", + "AgentsAPIV1AgentsListVersionAliasesRequest", + "AgentsAPIV1AgentsListVersionAliasesRequestTypedDict", + "AgentsAPIV1AgentsListVersionsRequest", + "AgentsAPIV1AgentsListVersionsRequestTypedDict", + "AgentsAPIV1AgentsUpdateRequest", + "AgentsAPIV1AgentsUpdateRequestTypedDict", + "AgentsAPIV1AgentsUpdateVersionRequest", + "AgentsAPIV1AgentsUpdateVersionRequestTypedDict", + "AgentsAPIV1ConversationsAppendRequest", + "AgentsAPIV1ConversationsAppendRequestTypedDict", + "AgentsAPIV1ConversationsAppendStreamRequest", + "AgentsAPIV1ConversationsAppendStreamRequestTypedDict", + "AgentsAPIV1ConversationsDeleteRequest", + "AgentsAPIV1ConversationsDeleteRequestTypedDict", + "AgentsAPIV1ConversationsGetRequest", + "AgentsAPIV1ConversationsGetRequestTypedDict", + "AgentsAPIV1ConversationsGetResponseV1ConversationsGet", + "AgentsAPIV1ConversationsGetResponseV1ConversationsGetTypedDict", + "AgentsAPIV1ConversationsHistoryRequest", + "AgentsAPIV1ConversationsHistoryRequestTypedDict", + "AgentsAPIV1ConversationsListRequest", + "AgentsAPIV1ConversationsListRequestTypedDict", + "AgentsAPIV1ConversationsMessagesRequest", + "AgentsAPIV1ConversationsMessagesRequestTypedDict", + "AgentsAPIV1ConversationsRestartRequest", + "AgentsAPIV1ConversationsRestartRequestTypedDict", + "AgentsAPIV1ConversationsRestartStreamRequest", + "AgentsAPIV1ConversationsRestartStreamRequestTypedDict", + "AgentsCompletionRequest", + "AgentsCompletionRequestMessages", + "AgentsCompletionRequestMessagesTypedDict", + "AgentsCompletionRequestStop", + "AgentsCompletionRequestStopTypedDict", + "AgentsCompletionRequestToolChoice", + "AgentsCompletionRequestToolChoiceTypedDict", + "AgentsCompletionRequestTypedDict", + "AgentsCompletionStreamRequest", + "AgentsCompletionStreamRequestMessages", + "AgentsCompletionStreamRequestMessagesTypedDict", + "AgentsCompletionStreamRequestStop", + "AgentsCompletionStreamRequestStopTypedDict", + "AgentsCompletionStreamRequestToolChoice", + "AgentsCompletionStreamRequestToolChoiceTypedDict", + "AgentsCompletionStreamRequestTypedDict", + "ArchiveFTModelOut", + "ArchiveFTModelOutObject", + "ArchiveFTModelOutTypedDict", + "Arguments", + "ArgumentsTypedDict", + "AssistantMessage", + "AssistantMessageContent", + "AssistantMessageContentTypedDict", + "AssistantMessageRole", + "AssistantMessageTypedDict", + "Attributes", + "AttributesTypedDict", + "AudioChunk", + "AudioChunkType", + "AudioChunkTypedDict", + "AudioEncoding", + "AudioFormat", + "AudioFormatTypedDict", + "AudioTranscriptionRequest", + "AudioTranscriptionRequestStream", + "AudioTranscriptionRequestStreamTypedDict", + "AudioTranscriptionRequestTypedDict", + "BaseModelCard", + "BaseModelCardType", + "BaseModelCardTypedDict", + "BatchError", + "BatchErrorTypedDict", + "BatchJobIn", + "BatchJobInTypedDict", + "BatchJobOut", + "BatchJobOutObject", + "BatchJobOutTypedDict", + "BatchJobStatus", + "BatchJobsOut", + "BatchJobsOutObject", + "BatchJobsOutTypedDict", + "BatchRequest", + "BatchRequestTypedDict", + "BuiltInConnectors", + "ChatClassificationRequest", + "ChatClassificationRequestTypedDict", + "ChatCompletionChoice", + "ChatCompletionChoiceTypedDict", + "ChatCompletionRequest", + "ChatCompletionRequestToolChoice", + "ChatCompletionRequestToolChoiceTypedDict", + "ChatCompletionRequestTypedDict", + "ChatCompletionResponse", + "ChatCompletionResponseTypedDict", + "ChatCompletionStreamRequest", + "ChatCompletionStreamRequestMessages", + "ChatCompletionStreamRequestMessagesTypedDict", + "ChatCompletionStreamRequestStop", + "ChatCompletionStreamRequestStopTypedDict", + "ChatCompletionStreamRequestToolChoice", + "ChatCompletionStreamRequestToolChoiceTypedDict", + "ChatCompletionStreamRequestTypedDict", + "ChatModerationRequest", + "ChatModerationRequestInputs", + "ChatModerationRequestInputsTypedDict", + "ChatModerationRequestTypedDict", + "CheckpointOut", + "CheckpointOutTypedDict", + "ClassificationRequest", + "ClassificationRequestInputs", + "ClassificationRequestInputsTypedDict", + "ClassificationRequestTypedDict", + "ClassificationResponse", + "ClassificationResponseTypedDict", + "ClassificationTargetResult", + "ClassificationTargetResultTypedDict", + "ClassifierDetailedJobOut", + "ClassifierDetailedJobOutIntegrations", + "ClassifierDetailedJobOutIntegrationsTypedDict", + "ClassifierDetailedJobOutJobType", + "ClassifierDetailedJobOutObject", + "ClassifierDetailedJobOutStatus", + "ClassifierDetailedJobOutTypedDict", + "ClassifierFTModelOut", + "ClassifierFTModelOutModelType", + "ClassifierFTModelOutObject", + "ClassifierFTModelOutTypedDict", + "ClassifierJobOut", + "ClassifierJobOutIntegrations", + "ClassifierJobOutIntegrationsTypedDict", + "ClassifierJobOutJobType", + "ClassifierJobOutObject", + "ClassifierJobOutStatus", + "ClassifierJobOutTypedDict", + "ClassifierTargetIn", + "ClassifierTargetInTypedDict", + "ClassifierTargetOut", + "ClassifierTargetOutTypedDict", + "ClassifierTrainingParameters", + "ClassifierTrainingParametersIn", + "ClassifierTrainingParametersInTypedDict", + "ClassifierTrainingParametersTypedDict", + "CodeInterpreterTool", + "CodeInterpreterToolType", + "CodeInterpreterToolTypedDict", + "CompletionArgs", + "CompletionArgsStop", + "CompletionArgsStopTypedDict", + "CompletionArgsTypedDict", + "CompletionChunk", + "CompletionChunkTypedDict", + "CompletionDetailedJobOut", + "CompletionDetailedJobOutIntegrations", + "CompletionDetailedJobOutIntegrationsTypedDict", + "CompletionDetailedJobOutJobType", + "CompletionDetailedJobOutObject", + "CompletionDetailedJobOutRepositories", + "CompletionDetailedJobOutRepositoriesTypedDict", + "CompletionDetailedJobOutStatus", + "CompletionDetailedJobOutTypedDict", + "CompletionEvent", + "CompletionEventTypedDict", + "CompletionFTModelOut", + "CompletionFTModelOutObject", + "CompletionFTModelOutTypedDict", + "CompletionJobOut", + "CompletionJobOutObject", + "CompletionJobOutTypedDict", + "CompletionResponseStreamChoice", + "CompletionResponseStreamChoiceFinishReason", + "CompletionResponseStreamChoiceTypedDict", + "CompletionTrainingParameters", + "CompletionTrainingParametersIn", + "CompletionTrainingParametersInTypedDict", + "CompletionTrainingParametersTypedDict", + "Content", + "ContentChunk", + "ContentChunkTypedDict", + "ContentTypedDict", + "ConversationAppendRequest", + "ConversationAppendRequestHandoffExecution", + "ConversationAppendRequestTypedDict", + "ConversationAppendStreamRequest", + "ConversationAppendStreamRequestHandoffExecution", + "ConversationAppendStreamRequestTypedDict", + "ConversationEvents", + "ConversationEventsData", + "ConversationEventsDataTypedDict", + "ConversationEventsTypedDict", + "ConversationHistory", + "ConversationHistoryObject", + "ConversationHistoryTypedDict", + "ConversationInputs", + "ConversationInputsTypedDict", + "ConversationMessages", + "ConversationMessagesObject", + "ConversationMessagesTypedDict", + "ConversationRequest", + "ConversationRequestTypedDict", + "ConversationResponse", + "ConversationResponseObject", + "ConversationResponseTypedDict", + "ConversationRestartRequest", + "ConversationRestartRequestAgentVersion", + "ConversationRestartRequestAgentVersionTypedDict", + "ConversationRestartRequestHandoffExecution", + "ConversationRestartRequestTypedDict", + "ConversationRestartStreamRequest", + "ConversationRestartStreamRequestAgentVersion", + "ConversationRestartStreamRequestAgentVersionTypedDict", + "ConversationRestartStreamRequestHandoffExecution", + "ConversationRestartStreamRequestTypedDict", + "ConversationStreamRequest", + "ConversationStreamRequestAgentVersion", + "ConversationStreamRequestAgentVersionTypedDict", + "ConversationStreamRequestHandoffExecution", + "ConversationStreamRequestTools", + "ConversationStreamRequestToolsTypedDict", + "ConversationStreamRequestTypedDict", + "ConversationUsageInfo", + "ConversationUsageInfoTypedDict", + "Data", + "DataTypedDict", + "DeleteFileOut", + "DeleteFileOutTypedDict", + "DeleteModelOut", + "DeleteModelOutTypedDict", + "DeleteModelV1ModelsModelIDDeleteRequest", + "DeleteModelV1ModelsModelIDDeleteRequestTypedDict", + "DeltaMessage", + "DeltaMessageTypedDict", + "Document", + "DocumentLibraryTool", + "DocumentLibraryToolType", + "DocumentLibraryToolTypedDict", + "DocumentOut", + "DocumentOutTypedDict", + "DocumentTextContent", + "DocumentTextContentTypedDict", + "DocumentTypedDict", + "DocumentURLChunk", + "DocumentURLChunkType", + "DocumentURLChunkTypedDict", + "DocumentUpdateIn", + "DocumentUpdateInTypedDict", + "EmbeddingDtype", + "EmbeddingRequest", + "EmbeddingRequestInputs", + "EmbeddingRequestInputsTypedDict", + "EmbeddingRequestTypedDict", + "EmbeddingResponse", + "EmbeddingResponseData", + "EmbeddingResponseDataTypedDict", + "EmbeddingResponseTypedDict", + "EncodingFormat", + "EntityType", + "Entries", + "EntriesTypedDict", + "EventOut", + "EventOutTypedDict", + "FIMCompletionRequest", + "FIMCompletionRequestStop", + "FIMCompletionRequestStopTypedDict", + "FIMCompletionRequestTypedDict", + "FIMCompletionResponse", + "FIMCompletionResponseTypedDict", + "FIMCompletionStreamRequest", + "FIMCompletionStreamRequestStop", + "FIMCompletionStreamRequestStopTypedDict", + "FIMCompletionStreamRequestTypedDict", + "FTClassifierLossFunction", + "FTModelCapabilitiesOut", + "FTModelCapabilitiesOutTypedDict", + "FTModelCard", + "FTModelCardType", + "FTModelCardTypedDict", + "File", + "FileChunk", + "FileChunkTypedDict", + "FilePurpose", + "FileSchema", + "FileSchemaTypedDict", + "FileSignedURL", + "FileSignedURLTypedDict", + "FileTypedDict", + "FilesAPIRoutesDeleteFileRequest", + "FilesAPIRoutesDeleteFileRequestTypedDict", + "FilesAPIRoutesDownloadFileRequest", + "FilesAPIRoutesDownloadFileRequestTypedDict", + "FilesAPIRoutesGetSignedURLRequest", + "FilesAPIRoutesGetSignedURLRequestTypedDict", + "FilesAPIRoutesListFilesRequest", + "FilesAPIRoutesListFilesRequestTypedDict", + "FilesAPIRoutesRetrieveFileRequest", + "FilesAPIRoutesRetrieveFileRequestTypedDict", + "FilesAPIRoutesUploadFileMultiPartBodyParams", + "FilesAPIRoutesUploadFileMultiPartBodyParamsTypedDict", + "FineTuneableModelType", + "FinishReason", + "Format", + "Function", + "FunctionCall", + "FunctionCallEntry", + "FunctionCallEntryArguments", + "FunctionCallEntryArgumentsTypedDict", + "FunctionCallEntryObject", + "FunctionCallEntryType", + "FunctionCallEntryTypedDict", + "FunctionCallEvent", + "FunctionCallEventType", + "FunctionCallEventTypedDict", + "FunctionCallTypedDict", + "FunctionName", + "FunctionNameTypedDict", + "FunctionResultEntry", + "FunctionResultEntryObject", + "FunctionResultEntryType", + "FunctionResultEntryTypedDict", + "FunctionTool", + "FunctionToolType", + "FunctionToolTypedDict", + "FunctionTypedDict", + "GithubRepositoryIn", + "GithubRepositoryInType", + "GithubRepositoryInTypedDict", + "GithubRepositoryOut", + "GithubRepositoryOutType", + "GithubRepositoryOutTypedDict", + "HTTPValidationError", + "HTTPValidationErrorData", + "HandoffExecution", + "Hyperparameters", + "HyperparametersTypedDict", + "ImageGenerationTool", + "ImageGenerationToolType", + "ImageGenerationToolTypedDict", + "ImageURL", + "ImageURLChunk", + "ImageURLChunkImageURL", + "ImageURLChunkImageURLTypedDict", + "ImageURLChunkType", + "ImageURLChunkTypedDict", + "ImageURLTypedDict", + "InputEntries", + "InputEntriesTypedDict", + "Inputs", + "InputsTypedDict", + "InstructRequest", + "InstructRequestInputs", + "InstructRequestInputsMessages", + "InstructRequestInputsMessagesTypedDict", + "InstructRequestInputsTypedDict", + "InstructRequestMessages", + "InstructRequestMessagesTypedDict", + "InstructRequestTypedDict", + "Integrations", + "IntegrationsTypedDict", + "JSONSchema", + "JSONSchemaTypedDict", + "JobIn", + "JobInIntegrations", + "JobInIntegrationsTypedDict", + "JobInRepositories", + "JobInRepositoriesTypedDict", + "JobInTypedDict", + "JobMetadataOut", + "JobMetadataOutTypedDict", + "JobType", + "JobsAPIRoutesBatchCancelBatchJobRequest", + "JobsAPIRoutesBatchCancelBatchJobRequestTypedDict", + "JobsAPIRoutesBatchGetBatchJobRequest", + "JobsAPIRoutesBatchGetBatchJobRequestTypedDict", + "JobsAPIRoutesBatchGetBatchJobsRequest", + "JobsAPIRoutesBatchGetBatchJobsRequestTypedDict", + "JobsAPIRoutesFineTuningArchiveFineTunedModelRequest", + "JobsAPIRoutesFineTuningArchiveFineTunedModelRequestTypedDict", + "JobsAPIRoutesFineTuningCancelFineTuningJobRequest", + "JobsAPIRoutesFineTuningCancelFineTuningJobRequestTypedDict", + "JobsAPIRoutesFineTuningCancelFineTuningJobResponse", + "JobsAPIRoutesFineTuningCancelFineTuningJobResponseTypedDict", + "JobsAPIRoutesFineTuningCreateFineTuningJobResponse", + "JobsAPIRoutesFineTuningCreateFineTuningJobResponseTypedDict", + "JobsAPIRoutesFineTuningGetFineTuningJobRequest", + "JobsAPIRoutesFineTuningGetFineTuningJobRequestTypedDict", + "JobsAPIRoutesFineTuningGetFineTuningJobResponse", + "JobsAPIRoutesFineTuningGetFineTuningJobResponseTypedDict", + "JobsAPIRoutesFineTuningGetFineTuningJobsRequest", + "JobsAPIRoutesFineTuningGetFineTuningJobsRequestTypedDict", + "JobsAPIRoutesFineTuningStartFineTuningJobRequest", + "JobsAPIRoutesFineTuningStartFineTuningJobRequestTypedDict", + "JobsAPIRoutesFineTuningStartFineTuningJobResponse", + "JobsAPIRoutesFineTuningStartFineTuningJobResponseTypedDict", + "JobsAPIRoutesFineTuningUnarchiveFineTunedModelRequest", + "JobsAPIRoutesFineTuningUnarchiveFineTunedModelRequestTypedDict", + "JobsAPIRoutesFineTuningUpdateFineTunedModelRequest", + "JobsAPIRoutesFineTuningUpdateFineTunedModelRequestTypedDict", + "JobsAPIRoutesFineTuningUpdateFineTunedModelResponse", + "JobsAPIRoutesFineTuningUpdateFineTunedModelResponseTypedDict", + "JobsOut", + "JobsOutData", + "JobsOutDataTypedDict", + "JobsOutObject", + "JobsOutTypedDict", + "LegacyJobMetadataOut", + "LegacyJobMetadataOutObject", + "LegacyJobMetadataOutTypedDict", + "LibrariesDeleteV1Request", + "LibrariesDeleteV1RequestTypedDict", + "LibrariesDocumentsDeleteV1Request", + "LibrariesDocumentsDeleteV1RequestTypedDict", + "LibrariesDocumentsGetExtractedTextSignedURLV1Request", + "LibrariesDocumentsGetExtractedTextSignedURLV1RequestTypedDict", + "LibrariesDocumentsGetSignedURLV1Request", + "LibrariesDocumentsGetSignedURLV1RequestTypedDict", + "LibrariesDocumentsGetStatusV1Request", + "LibrariesDocumentsGetStatusV1RequestTypedDict", + "LibrariesDocumentsGetTextContentV1Request", + "LibrariesDocumentsGetTextContentV1RequestTypedDict", + "LibrariesDocumentsGetV1Request", + "LibrariesDocumentsGetV1RequestTypedDict", + "LibrariesDocumentsListV1Request", + "LibrariesDocumentsListV1RequestTypedDict", + "LibrariesDocumentsReprocessV1Request", + "LibrariesDocumentsReprocessV1RequestTypedDict", + "LibrariesDocumentsUpdateV1Request", + "LibrariesDocumentsUpdateV1RequestTypedDict", + "LibrariesDocumentsUploadV1DocumentUpload", + "LibrariesDocumentsUploadV1DocumentUploadTypedDict", + "LibrariesDocumentsUploadV1Request", + "LibrariesDocumentsUploadV1RequestTypedDict", + "LibrariesGetV1Request", + "LibrariesGetV1RequestTypedDict", + "LibrariesShareCreateV1Request", + "LibrariesShareCreateV1RequestTypedDict", + "LibrariesShareDeleteV1Request", + "LibrariesShareDeleteV1RequestTypedDict", + "LibrariesShareListV1Request", + "LibrariesShareListV1RequestTypedDict", + "LibrariesUpdateV1Request", + "LibrariesUpdateV1RequestTypedDict", + "LibraryIn", + "LibraryInTypedDict", + "LibraryInUpdate", + "LibraryInUpdateTypedDict", + "LibraryOut", + "LibraryOutTypedDict", + "ListDocumentOut", + "ListDocumentOutTypedDict", + "ListFilesOut", + "ListFilesOutTypedDict", + "ListLibraryOut", + "ListLibraryOutTypedDict", + "ListSharingOut", + "ListSharingOutTypedDict", + "Loc", + "LocTypedDict", + "Message", + "MessageEntries", + "MessageEntriesTypedDict", + "MessageInputContentChunks", + "MessageInputContentChunksTypedDict", + "MessageInputEntry", + "MessageInputEntryContent", + "MessageInputEntryContentTypedDict", + "MessageInputEntryRole", + "MessageInputEntryType", + "MessageInputEntryTypedDict", + "MessageOutputContentChunks", + "MessageOutputContentChunksTypedDict", + "MessageOutputEntry", + "MessageOutputEntryContent", + "MessageOutputEntryContentTypedDict", + "MessageOutputEntryObject", + "MessageOutputEntryRole", + "MessageOutputEntryType", + "MessageOutputEntryTypedDict", + "MessageOutputEvent", + "MessageOutputEventContent", + "MessageOutputEventContentTypedDict", + "MessageOutputEventRole", + "MessageOutputEventType", + "MessageOutputEventTypedDict", + "MessageTypedDict", + "Messages", + "MessagesTypedDict", + "MetricOut", + "MetricOutTypedDict", + "MistralError", + "MistralPromptMode", + "ModelCapabilities", + "ModelCapabilitiesTypedDict", + "ModelConversation", + "ModelConversationObject", + "ModelConversationTools", + "ModelConversationToolsTypedDict", + "ModelConversationTypedDict", + "ModelList", + "ModelListTypedDict", + "ModelType", + "ModerationObject", + "ModerationObjectTypedDict", + "ModerationResponse", + "ModerationResponseTypedDict", + "Name", + "NameTypedDict", + "NoResponseError", + "OCRImageObject", + "OCRImageObjectTypedDict", + "OCRPageDimensions", + "OCRPageDimensionsTypedDict", + "OCRPageObject", + "OCRPageObjectTypedDict", + "OCRRequest", + "OCRRequestTypedDict", + "OCRResponse", + "OCRResponseTypedDict", + "OCRTableObject", + "OCRTableObjectTypedDict", + "OCRUsageInfo", + "OCRUsageInfoTypedDict", + "Object", + "One", + "OneTypedDict", + "OutputContentChunks", + "OutputContentChunksTypedDict", + "Outputs", + "OutputsTypedDict", + "PaginationInfo", + "PaginationInfoTypedDict", + "Prediction", + "PredictionTypedDict", + "ProcessingStatusOut", + "ProcessingStatusOutTypedDict", + "QueryParamAgentVersion", + "QueryParamAgentVersionTypedDict", + "QueryParamStatus", + "RealtimeTranscriptionError", + "RealtimeTranscriptionErrorDetail", + "RealtimeTranscriptionErrorDetailTypedDict", + "RealtimeTranscriptionErrorTypedDict", + "RealtimeTranscriptionSession", + "RealtimeTranscriptionSessionCreated", + "RealtimeTranscriptionSessionCreatedTypedDict", + "RealtimeTranscriptionSessionTypedDict", + "RealtimeTranscriptionSessionUpdated", + "RealtimeTranscriptionSessionUpdatedTypedDict", + "ReferenceChunk", + "ReferenceChunkType", + "ReferenceChunkTypedDict", + "Repositories", + "RepositoriesTypedDict", + "RequestSource", + "Response1", + "Response1TypedDict", + "ResponseBody", + "ResponseBodyTypedDict", + "ResponseDoneEvent", + "ResponseDoneEventType", + "ResponseDoneEventTypedDict", + "ResponseErrorEvent", + "ResponseErrorEventType", + "ResponseErrorEventTypedDict", + "ResponseFormat", + "ResponseFormatTypedDict", + "ResponseFormats", + "ResponseStartedEvent", + "ResponseStartedEventType", + "ResponseStartedEventTypedDict", + "ResponseValidationError", + "RetrieveFileOut", + "RetrieveFileOutTypedDict", + "RetrieveModelV1ModelsModelIDGetRequest", + "RetrieveModelV1ModelsModelIDGetRequestTypedDict", + "RetrieveModelV1ModelsModelIDGetResponseRetrieveModelV1ModelsModelIDGet", + "RetrieveModelV1ModelsModelIDGetResponseRetrieveModelV1ModelsModelIDGetTypedDict", + "Role", + "SDKError", + "SSETypes", + "SampleType", + "Security", + "SecurityTypedDict", + "ShareEnum", + "SharingDelete", + "SharingDeleteTypedDict", + "SharingIn", + "SharingInTypedDict", + "SharingOut", + "SharingOutTypedDict", + "Source", + "Status", + "Stop", + "StopTypedDict", + "SystemMessage", + "SystemMessageContent", + "SystemMessageContentChunks", + "SystemMessageContentChunksTypedDict", + "SystemMessageContentTypedDict", + "SystemMessageTypedDict", + "TableFormat", + "TextChunk", + "TextChunkType", + "TextChunkTypedDict", + "ThinkChunk", + "ThinkChunkType", + "ThinkChunkTypedDict", + "Thinking", + "ThinkingTypedDict", + "TimestampGranularity", + "Tool", + "ToolCall", + "ToolCallTypedDict", + "ToolChoice", + "ToolChoiceEnum", + "ToolChoiceTypedDict", + "ToolExecutionDeltaEvent", + "ToolExecutionDeltaEventName", + "ToolExecutionDeltaEventNameTypedDict", + "ToolExecutionDeltaEventType", + "ToolExecutionDeltaEventTypedDict", + "ToolExecutionDoneEvent", + "ToolExecutionDoneEventName", + "ToolExecutionDoneEventNameTypedDict", + "ToolExecutionDoneEventType", + "ToolExecutionDoneEventTypedDict", + "ToolExecutionEntry", + "ToolExecutionEntryObject", + "ToolExecutionEntryType", + "ToolExecutionEntryTypedDict", + "ToolExecutionStartedEvent", + "ToolExecutionStartedEventName", + "ToolExecutionStartedEventNameTypedDict", + "ToolExecutionStartedEventType", + "ToolExecutionStartedEventTypedDict", + "ToolFileChunk", + "ToolFileChunkTool", + "ToolFileChunkToolTypedDict", + "ToolFileChunkType", + "ToolFileChunkTypedDict", + "ToolMessage", + "ToolMessageContent", + "ToolMessageContentTypedDict", + "ToolMessageRole", + "ToolMessageTypedDict", + "ToolReferenceChunk", + "ToolReferenceChunkTool", + "ToolReferenceChunkToolTypedDict", + "ToolReferenceChunkType", + "ToolReferenceChunkTypedDict", + "ToolTypedDict", + "ToolTypes", + "Tools", + "ToolsTypedDict", + "TrainingFile", + "TrainingFileTypedDict", + "TranscriptionResponse", + "TranscriptionResponseTypedDict", + "TranscriptionSegmentChunk", + "TranscriptionSegmentChunkTypedDict", + "TranscriptionStreamDone", + "TranscriptionStreamDoneType", + "TranscriptionStreamDoneTypedDict", + "TranscriptionStreamEventTypes", + "TranscriptionStreamEvents", + "TranscriptionStreamEventsData", + "TranscriptionStreamEventsDataTypedDict", + "TranscriptionStreamEventsTypedDict", + "TranscriptionStreamLanguage", + "TranscriptionStreamLanguageType", + "TranscriptionStreamLanguageTypedDict", + "TranscriptionStreamSegmentDelta", + "TranscriptionStreamSegmentDeltaType", + "TranscriptionStreamSegmentDeltaTypedDict", + "TranscriptionStreamTextDelta", + "TranscriptionStreamTextDeltaType", + "TranscriptionStreamTextDeltaTypedDict", + "Two", + "TwoTypedDict", + "Type", + "UnarchiveFTModelOut", + "UnarchiveFTModelOutObject", + "UnarchiveFTModelOutTypedDict", + "UpdateFTModelIn", + "UpdateFTModelInTypedDict", + "UploadFileOut", + "UploadFileOutTypedDict", + "UsageInfo", + "UsageInfoTypedDict", + "UserMessage", + "UserMessageContent", + "UserMessageContentTypedDict", + "UserMessageRole", + "UserMessageTypedDict", + "ValidationError", + "ValidationErrorTypedDict", + "WandbIntegration", + "WandbIntegrationOut", + "WandbIntegrationOutType", + "WandbIntegrationOutTypedDict", + "WandbIntegrationType", + "WandbIntegrationTypedDict", + "WebSearchPremiumTool", + "WebSearchPremiumToolType", + "WebSearchPremiumToolTypedDict", + "WebSearchTool", + "WebSearchToolType", + "WebSearchToolTypedDict", +] + +_dynamic_imports: dict[str, str] = { + "Agent": ".agent", + "AgentObject": ".agent", + "AgentTools": ".agent", + "AgentToolsTypedDict": ".agent", + "AgentTypedDict": ".agent", + "AgentAliasResponse": ".agentaliasresponse", + "AgentAliasResponseTypedDict": ".agentaliasresponse", + "AgentConversation": ".agentconversation", + "AgentConversationAgentVersion": ".agentconversation", + "AgentConversationAgentVersionTypedDict": ".agentconversation", + "AgentConversationObject": ".agentconversation", + "AgentConversationTypedDict": ".agentconversation", + "AgentCreationRequest": ".agentcreationrequest", + "AgentCreationRequestTools": ".agentcreationrequest", + "AgentCreationRequestToolsTypedDict": ".agentcreationrequest", + "AgentCreationRequestTypedDict": ".agentcreationrequest", + "AgentHandoffDoneEvent": ".agenthandoffdoneevent", + "AgentHandoffDoneEventType": ".agenthandoffdoneevent", + "AgentHandoffDoneEventTypedDict": ".agenthandoffdoneevent", + "AgentHandoffEntry": ".agenthandoffentry", + "AgentHandoffEntryObject": ".agenthandoffentry", + "AgentHandoffEntryType": ".agenthandoffentry", + "AgentHandoffEntryTypedDict": ".agenthandoffentry", + "AgentHandoffStartedEvent": ".agenthandoffstartedevent", + "AgentHandoffStartedEventType": ".agenthandoffstartedevent", + "AgentHandoffStartedEventTypedDict": ".agenthandoffstartedevent", + "AgentsAPIV1AgentsCreateOrUpdateAliasRequest": ".agents_api_v1_agents_create_or_update_aliasop", + "AgentsAPIV1AgentsCreateOrUpdateAliasRequestTypedDict": ".agents_api_v1_agents_create_or_update_aliasop", + "AgentsAPIV1AgentsDeleteRequest": ".agents_api_v1_agents_deleteop", + "AgentsAPIV1AgentsDeleteRequestTypedDict": ".agents_api_v1_agents_deleteop", + "AgentsAPIV1AgentsGetVersionRequest": ".agents_api_v1_agents_get_versionop", + "AgentsAPIV1AgentsGetVersionRequestTypedDict": ".agents_api_v1_agents_get_versionop", + "AgentsAPIV1AgentsGetRequest": ".agents_api_v1_agents_getop", + "AgentsAPIV1AgentsGetRequestTypedDict": ".agents_api_v1_agents_getop", + "QueryParamAgentVersion": ".agents_api_v1_agents_getop", + "QueryParamAgentVersionTypedDict": ".agents_api_v1_agents_getop", + "AgentsAPIV1AgentsListVersionAliasesRequest": ".agents_api_v1_agents_list_version_aliasesop", + "AgentsAPIV1AgentsListVersionAliasesRequestTypedDict": ".agents_api_v1_agents_list_version_aliasesop", + "AgentsAPIV1AgentsListVersionsRequest": ".agents_api_v1_agents_list_versionsop", + "AgentsAPIV1AgentsListVersionsRequestTypedDict": ".agents_api_v1_agents_list_versionsop", + "AgentsAPIV1AgentsListRequest": ".agents_api_v1_agents_listop", + "AgentsAPIV1AgentsListRequestTypedDict": ".agents_api_v1_agents_listop", + "AgentsAPIV1AgentsUpdateVersionRequest": ".agents_api_v1_agents_update_versionop", + "AgentsAPIV1AgentsUpdateVersionRequestTypedDict": ".agents_api_v1_agents_update_versionop", + "AgentsAPIV1AgentsUpdateRequest": ".agents_api_v1_agents_updateop", + "AgentsAPIV1AgentsUpdateRequestTypedDict": ".agents_api_v1_agents_updateop", + "AgentsAPIV1ConversationsAppendStreamRequest": ".agents_api_v1_conversations_append_streamop", + "AgentsAPIV1ConversationsAppendStreamRequestTypedDict": ".agents_api_v1_conversations_append_streamop", + "AgentsAPIV1ConversationsAppendRequest": ".agents_api_v1_conversations_appendop", + "AgentsAPIV1ConversationsAppendRequestTypedDict": ".agents_api_v1_conversations_appendop", + "AgentsAPIV1ConversationsDeleteRequest": ".agents_api_v1_conversations_deleteop", + "AgentsAPIV1ConversationsDeleteRequestTypedDict": ".agents_api_v1_conversations_deleteop", + "AgentsAPIV1ConversationsGetRequest": ".agents_api_v1_conversations_getop", + "AgentsAPIV1ConversationsGetRequestTypedDict": ".agents_api_v1_conversations_getop", + "AgentsAPIV1ConversationsGetResponseV1ConversationsGet": ".agents_api_v1_conversations_getop", + "AgentsAPIV1ConversationsGetResponseV1ConversationsGetTypedDict": ".agents_api_v1_conversations_getop", + "AgentsAPIV1ConversationsHistoryRequest": ".agents_api_v1_conversations_historyop", + "AgentsAPIV1ConversationsHistoryRequestTypedDict": ".agents_api_v1_conversations_historyop", + "AgentsAPIV1ConversationsListRequest": ".agents_api_v1_conversations_listop", + "AgentsAPIV1ConversationsListRequestTypedDict": ".agents_api_v1_conversations_listop", + "ResponseBody": ".agents_api_v1_conversations_listop", + "ResponseBodyTypedDict": ".agents_api_v1_conversations_listop", + "AgentsAPIV1ConversationsMessagesRequest": ".agents_api_v1_conversations_messagesop", + "AgentsAPIV1ConversationsMessagesRequestTypedDict": ".agents_api_v1_conversations_messagesop", + "AgentsAPIV1ConversationsRestartStreamRequest": ".agents_api_v1_conversations_restart_streamop", + "AgentsAPIV1ConversationsRestartStreamRequestTypedDict": ".agents_api_v1_conversations_restart_streamop", + "AgentsAPIV1ConversationsRestartRequest": ".agents_api_v1_conversations_restartop", + "AgentsAPIV1ConversationsRestartRequestTypedDict": ".agents_api_v1_conversations_restartop", + "AgentsCompletionRequest": ".agentscompletionrequest", + "AgentsCompletionRequestMessages": ".agentscompletionrequest", + "AgentsCompletionRequestMessagesTypedDict": ".agentscompletionrequest", + "AgentsCompletionRequestStop": ".agentscompletionrequest", + "AgentsCompletionRequestStopTypedDict": ".agentscompletionrequest", + "AgentsCompletionRequestToolChoice": ".agentscompletionrequest", + "AgentsCompletionRequestToolChoiceTypedDict": ".agentscompletionrequest", + "AgentsCompletionRequestTypedDict": ".agentscompletionrequest", + "AgentsCompletionStreamRequest": ".agentscompletionstreamrequest", + "AgentsCompletionStreamRequestMessages": ".agentscompletionstreamrequest", + "AgentsCompletionStreamRequestMessagesTypedDict": ".agentscompletionstreamrequest", + "AgentsCompletionStreamRequestStop": ".agentscompletionstreamrequest", + "AgentsCompletionStreamRequestStopTypedDict": ".agentscompletionstreamrequest", + "AgentsCompletionStreamRequestToolChoice": ".agentscompletionstreamrequest", + "AgentsCompletionStreamRequestToolChoiceTypedDict": ".agentscompletionstreamrequest", + "AgentsCompletionStreamRequestTypedDict": ".agentscompletionstreamrequest", + "AgentUpdateRequest": ".agentupdaterequest", + "AgentUpdateRequestTools": ".agentupdaterequest", + "AgentUpdateRequestToolsTypedDict": ".agentupdaterequest", + "AgentUpdateRequestTypedDict": ".agentupdaterequest", + "APIEndpoint": ".apiendpoint", + "ArchiveFTModelOut": ".archiveftmodelout", + "ArchiveFTModelOutObject": ".archiveftmodelout", + "ArchiveFTModelOutTypedDict": ".archiveftmodelout", + "AssistantMessage": ".assistantmessage", + "AssistantMessageContent": ".assistantmessage", + "AssistantMessageContentTypedDict": ".assistantmessage", + "AssistantMessageRole": ".assistantmessage", + "AssistantMessageTypedDict": ".assistantmessage", + "AudioChunk": ".audiochunk", + "AudioChunkType": ".audiochunk", + "AudioChunkTypedDict": ".audiochunk", + "AudioEncoding": ".audioencoding", + "AudioFormat": ".audioformat", + "AudioFormatTypedDict": ".audioformat", + "AudioTranscriptionRequest": ".audiotranscriptionrequest", + "AudioTranscriptionRequestTypedDict": ".audiotranscriptionrequest", + "AudioTranscriptionRequestStream": ".audiotranscriptionrequeststream", + "AudioTranscriptionRequestStreamTypedDict": ".audiotranscriptionrequeststream", + "BaseModelCard": ".basemodelcard", + "BaseModelCardType": ".basemodelcard", + "BaseModelCardTypedDict": ".basemodelcard", + "BatchError": ".batcherror", + "BatchErrorTypedDict": ".batcherror", + "BatchJobIn": ".batchjobin", + "BatchJobInTypedDict": ".batchjobin", + "BatchJobOut": ".batchjobout", + "BatchJobOutObject": ".batchjobout", + "BatchJobOutTypedDict": ".batchjobout", + "BatchJobsOut": ".batchjobsout", + "BatchJobsOutObject": ".batchjobsout", + "BatchJobsOutTypedDict": ".batchjobsout", + "BatchJobStatus": ".batchjobstatus", + "BatchRequest": ".batchrequest", + "BatchRequestTypedDict": ".batchrequest", + "BuiltInConnectors": ".builtinconnectors", + "ChatClassificationRequest": ".chatclassificationrequest", + "ChatClassificationRequestTypedDict": ".chatclassificationrequest", + "ChatCompletionChoice": ".chatcompletionchoice", + "ChatCompletionChoiceTypedDict": ".chatcompletionchoice", + "FinishReason": ".chatcompletionchoice", + "ChatCompletionRequest": ".chatcompletionrequest", + "ChatCompletionRequestToolChoice": ".chatcompletionrequest", + "ChatCompletionRequestToolChoiceTypedDict": ".chatcompletionrequest", + "ChatCompletionRequestTypedDict": ".chatcompletionrequest", + "Messages": ".chatcompletionrequest", + "MessagesTypedDict": ".chatcompletionrequest", + "Stop": ".chatcompletionrequest", + "StopTypedDict": ".chatcompletionrequest", + "ChatCompletionResponse": ".chatcompletionresponse", + "ChatCompletionResponseTypedDict": ".chatcompletionresponse", + "ChatCompletionStreamRequest": ".chatcompletionstreamrequest", + "ChatCompletionStreamRequestMessages": ".chatcompletionstreamrequest", + "ChatCompletionStreamRequestMessagesTypedDict": ".chatcompletionstreamrequest", + "ChatCompletionStreamRequestStop": ".chatcompletionstreamrequest", + "ChatCompletionStreamRequestStopTypedDict": ".chatcompletionstreamrequest", + "ChatCompletionStreamRequestToolChoice": ".chatcompletionstreamrequest", + "ChatCompletionStreamRequestToolChoiceTypedDict": ".chatcompletionstreamrequest", + "ChatCompletionStreamRequestTypedDict": ".chatcompletionstreamrequest", + "ChatModerationRequest": ".chatmoderationrequest", + "ChatModerationRequestInputs": ".chatmoderationrequest", + "ChatModerationRequestInputsTypedDict": ".chatmoderationrequest", + "ChatModerationRequestTypedDict": ".chatmoderationrequest", + "One": ".chatmoderationrequest", + "OneTypedDict": ".chatmoderationrequest", + "Two": ".chatmoderationrequest", + "TwoTypedDict": ".chatmoderationrequest", + "CheckpointOut": ".checkpointout", + "CheckpointOutTypedDict": ".checkpointout", + "ClassificationRequest": ".classificationrequest", + "ClassificationRequestInputs": ".classificationrequest", + "ClassificationRequestInputsTypedDict": ".classificationrequest", + "ClassificationRequestTypedDict": ".classificationrequest", + "ClassificationResponse": ".classificationresponse", + "ClassificationResponseTypedDict": ".classificationresponse", + "ClassificationTargetResult": ".classificationtargetresult", + "ClassificationTargetResultTypedDict": ".classificationtargetresult", + "ClassifierDetailedJobOut": ".classifierdetailedjobout", + "ClassifierDetailedJobOutIntegrations": ".classifierdetailedjobout", + "ClassifierDetailedJobOutIntegrationsTypedDict": ".classifierdetailedjobout", + "ClassifierDetailedJobOutJobType": ".classifierdetailedjobout", + "ClassifierDetailedJobOutObject": ".classifierdetailedjobout", + "ClassifierDetailedJobOutStatus": ".classifierdetailedjobout", + "ClassifierDetailedJobOutTypedDict": ".classifierdetailedjobout", + "ClassifierFTModelOut": ".classifierftmodelout", + "ClassifierFTModelOutModelType": ".classifierftmodelout", + "ClassifierFTModelOutObject": ".classifierftmodelout", + "ClassifierFTModelOutTypedDict": ".classifierftmodelout", + "ClassifierJobOut": ".classifierjobout", + "ClassifierJobOutIntegrations": ".classifierjobout", + "ClassifierJobOutIntegrationsTypedDict": ".classifierjobout", + "ClassifierJobOutJobType": ".classifierjobout", + "ClassifierJobOutObject": ".classifierjobout", + "ClassifierJobOutStatus": ".classifierjobout", + "ClassifierJobOutTypedDict": ".classifierjobout", + "ClassifierTargetIn": ".classifiertargetin", + "ClassifierTargetInTypedDict": ".classifiertargetin", + "ClassifierTargetOut": ".classifiertargetout", + "ClassifierTargetOutTypedDict": ".classifiertargetout", + "ClassifierTrainingParameters": ".classifiertrainingparameters", + "ClassifierTrainingParametersTypedDict": ".classifiertrainingparameters", + "ClassifierTrainingParametersIn": ".classifiertrainingparametersin", + "ClassifierTrainingParametersInTypedDict": ".classifiertrainingparametersin", + "CodeInterpreterTool": ".codeinterpretertool", + "CodeInterpreterToolType": ".codeinterpretertool", + "CodeInterpreterToolTypedDict": ".codeinterpretertool", + "CompletionArgs": ".completionargs", + "CompletionArgsTypedDict": ".completionargs", + "CompletionArgsStop": ".completionargsstop", + "CompletionArgsStopTypedDict": ".completionargsstop", + "CompletionChunk": ".completionchunk", + "CompletionChunkTypedDict": ".completionchunk", + "CompletionDetailedJobOut": ".completiondetailedjobout", + "CompletionDetailedJobOutIntegrations": ".completiondetailedjobout", + "CompletionDetailedJobOutIntegrationsTypedDict": ".completiondetailedjobout", + "CompletionDetailedJobOutJobType": ".completiondetailedjobout", + "CompletionDetailedJobOutObject": ".completiondetailedjobout", + "CompletionDetailedJobOutRepositories": ".completiondetailedjobout", + "CompletionDetailedJobOutRepositoriesTypedDict": ".completiondetailedjobout", + "CompletionDetailedJobOutStatus": ".completiondetailedjobout", + "CompletionDetailedJobOutTypedDict": ".completiondetailedjobout", + "CompletionEvent": ".completionevent", + "CompletionEventTypedDict": ".completionevent", + "CompletionFTModelOut": ".completionftmodelout", + "CompletionFTModelOutObject": ".completionftmodelout", + "CompletionFTModelOutTypedDict": ".completionftmodelout", + "ModelType": ".completionftmodelout", + "CompletionJobOut": ".completionjobout", + "CompletionJobOutObject": ".completionjobout", + "CompletionJobOutTypedDict": ".completionjobout", + "Integrations": ".completionjobout", + "IntegrationsTypedDict": ".completionjobout", + "JobType": ".completionjobout", + "Repositories": ".completionjobout", + "RepositoriesTypedDict": ".completionjobout", + "Status": ".completionjobout", + "CompletionResponseStreamChoice": ".completionresponsestreamchoice", + "CompletionResponseStreamChoiceFinishReason": ".completionresponsestreamchoice", + "CompletionResponseStreamChoiceTypedDict": ".completionresponsestreamchoice", + "CompletionTrainingParameters": ".completiontrainingparameters", + "CompletionTrainingParametersTypedDict": ".completiontrainingparameters", + "CompletionTrainingParametersIn": ".completiontrainingparametersin", + "CompletionTrainingParametersInTypedDict": ".completiontrainingparametersin", + "ContentChunk": ".contentchunk", + "ContentChunkTypedDict": ".contentchunk", + "ConversationAppendRequest": ".conversationappendrequest", + "ConversationAppendRequestHandoffExecution": ".conversationappendrequest", + "ConversationAppendRequestTypedDict": ".conversationappendrequest", + "ConversationAppendStreamRequest": ".conversationappendstreamrequest", + "ConversationAppendStreamRequestHandoffExecution": ".conversationappendstreamrequest", + "ConversationAppendStreamRequestTypedDict": ".conversationappendstreamrequest", + "ConversationEvents": ".conversationevents", + "ConversationEventsData": ".conversationevents", + "ConversationEventsDataTypedDict": ".conversationevents", + "ConversationEventsTypedDict": ".conversationevents", + "ConversationHistory": ".conversationhistory", + "ConversationHistoryObject": ".conversationhistory", + "ConversationHistoryTypedDict": ".conversationhistory", + "Entries": ".conversationhistory", + "EntriesTypedDict": ".conversationhistory", + "ConversationInputs": ".conversationinputs", + "ConversationInputsTypedDict": ".conversationinputs", + "ConversationMessages": ".conversationmessages", + "ConversationMessagesObject": ".conversationmessages", + "ConversationMessagesTypedDict": ".conversationmessages", + "AgentVersion": ".conversationrequest", + "AgentVersionTypedDict": ".conversationrequest", + "ConversationRequest": ".conversationrequest", + "ConversationRequestTypedDict": ".conversationrequest", + "HandoffExecution": ".conversationrequest", + "Tools": ".conversationrequest", + "ToolsTypedDict": ".conversationrequest", + "ConversationResponse": ".conversationresponse", + "ConversationResponseObject": ".conversationresponse", + "ConversationResponseTypedDict": ".conversationresponse", + "Outputs": ".conversationresponse", + "OutputsTypedDict": ".conversationresponse", + "ConversationRestartRequest": ".conversationrestartrequest", + "ConversationRestartRequestAgentVersion": ".conversationrestartrequest", + "ConversationRestartRequestAgentVersionTypedDict": ".conversationrestartrequest", + "ConversationRestartRequestHandoffExecution": ".conversationrestartrequest", + "ConversationRestartRequestTypedDict": ".conversationrestartrequest", + "ConversationRestartStreamRequest": ".conversationrestartstreamrequest", + "ConversationRestartStreamRequestAgentVersion": ".conversationrestartstreamrequest", + "ConversationRestartStreamRequestAgentVersionTypedDict": ".conversationrestartstreamrequest", + "ConversationRestartStreamRequestHandoffExecution": ".conversationrestartstreamrequest", + "ConversationRestartStreamRequestTypedDict": ".conversationrestartstreamrequest", + "ConversationStreamRequest": ".conversationstreamrequest", + "ConversationStreamRequestAgentVersion": ".conversationstreamrequest", + "ConversationStreamRequestAgentVersionTypedDict": ".conversationstreamrequest", + "ConversationStreamRequestHandoffExecution": ".conversationstreamrequest", + "ConversationStreamRequestTools": ".conversationstreamrequest", + "ConversationStreamRequestToolsTypedDict": ".conversationstreamrequest", + "ConversationStreamRequestTypedDict": ".conversationstreamrequest", + "ConversationUsageInfo": ".conversationusageinfo", + "ConversationUsageInfoTypedDict": ".conversationusageinfo", + "DeleteModelV1ModelsModelIDDeleteRequest": ".delete_model_v1_models_model_id_deleteop", + "DeleteModelV1ModelsModelIDDeleteRequestTypedDict": ".delete_model_v1_models_model_id_deleteop", + "DeleteFileOut": ".deletefileout", + "DeleteFileOutTypedDict": ".deletefileout", + "DeleteModelOut": ".deletemodelout", + "DeleteModelOutTypedDict": ".deletemodelout", + "Content": ".deltamessage", + "ContentTypedDict": ".deltamessage", + "DeltaMessage": ".deltamessage", + "DeltaMessageTypedDict": ".deltamessage", + "DocumentLibraryTool": ".documentlibrarytool", + "DocumentLibraryToolType": ".documentlibrarytool", + "DocumentLibraryToolTypedDict": ".documentlibrarytool", + "DocumentOut": ".documentout", + "DocumentOutTypedDict": ".documentout", + "DocumentTextContent": ".documenttextcontent", + "DocumentTextContentTypedDict": ".documenttextcontent", + "Attributes": ".documentupdatein", + "AttributesTypedDict": ".documentupdatein", + "DocumentUpdateIn": ".documentupdatein", + "DocumentUpdateInTypedDict": ".documentupdatein", + "DocumentURLChunk": ".documenturlchunk", + "DocumentURLChunkType": ".documenturlchunk", + "DocumentURLChunkTypedDict": ".documenturlchunk", + "EmbeddingDtype": ".embeddingdtype", + "EmbeddingRequest": ".embeddingrequest", + "EmbeddingRequestInputs": ".embeddingrequest", + "EmbeddingRequestInputsTypedDict": ".embeddingrequest", + "EmbeddingRequestTypedDict": ".embeddingrequest", + "EmbeddingResponse": ".embeddingresponse", + "EmbeddingResponseTypedDict": ".embeddingresponse", + "EmbeddingResponseData": ".embeddingresponsedata", + "EmbeddingResponseDataTypedDict": ".embeddingresponsedata", + "EncodingFormat": ".encodingformat", + "EntityType": ".entitytype", + "EventOut": ".eventout", + "EventOutTypedDict": ".eventout", + "File": ".file", + "FileTypedDict": ".file", + "FileChunk": ".filechunk", + "FileChunkTypedDict": ".filechunk", + "FilePurpose": ".filepurpose", + "FilesAPIRoutesDeleteFileRequest": ".files_api_routes_delete_fileop", + "FilesAPIRoutesDeleteFileRequestTypedDict": ".files_api_routes_delete_fileop", + "FilesAPIRoutesDownloadFileRequest": ".files_api_routes_download_fileop", + "FilesAPIRoutesDownloadFileRequestTypedDict": ".files_api_routes_download_fileop", + "FilesAPIRoutesGetSignedURLRequest": ".files_api_routes_get_signed_urlop", + "FilesAPIRoutesGetSignedURLRequestTypedDict": ".files_api_routes_get_signed_urlop", + "FilesAPIRoutesListFilesRequest": ".files_api_routes_list_filesop", + "FilesAPIRoutesListFilesRequestTypedDict": ".files_api_routes_list_filesop", + "FilesAPIRoutesRetrieveFileRequest": ".files_api_routes_retrieve_fileop", + "FilesAPIRoutesRetrieveFileRequestTypedDict": ".files_api_routes_retrieve_fileop", + "FilesAPIRoutesUploadFileMultiPartBodyParams": ".files_api_routes_upload_fileop", + "FilesAPIRoutesUploadFileMultiPartBodyParamsTypedDict": ".files_api_routes_upload_fileop", + "FileSchema": ".fileschema", + "FileSchemaTypedDict": ".fileschema", + "FileSignedURL": ".filesignedurl", + "FileSignedURLTypedDict": ".filesignedurl", + "FIMCompletionRequest": ".fimcompletionrequest", + "FIMCompletionRequestStop": ".fimcompletionrequest", + "FIMCompletionRequestStopTypedDict": ".fimcompletionrequest", + "FIMCompletionRequestTypedDict": ".fimcompletionrequest", + "FIMCompletionResponse": ".fimcompletionresponse", + "FIMCompletionResponseTypedDict": ".fimcompletionresponse", + "FIMCompletionStreamRequest": ".fimcompletionstreamrequest", + "FIMCompletionStreamRequestStop": ".fimcompletionstreamrequest", + "FIMCompletionStreamRequestStopTypedDict": ".fimcompletionstreamrequest", + "FIMCompletionStreamRequestTypedDict": ".fimcompletionstreamrequest", + "FineTuneableModelType": ".finetuneablemodeltype", + "FTClassifierLossFunction": ".ftclassifierlossfunction", + "FTModelCapabilitiesOut": ".ftmodelcapabilitiesout", + "FTModelCapabilitiesOutTypedDict": ".ftmodelcapabilitiesout", + "FTModelCard": ".ftmodelcard", + "FTModelCardType": ".ftmodelcard", + "FTModelCardTypedDict": ".ftmodelcard", + "Function": ".function", + "FunctionTypedDict": ".function", + "Arguments": ".functioncall", + "ArgumentsTypedDict": ".functioncall", + "FunctionCall": ".functioncall", + "FunctionCallTypedDict": ".functioncall", + "FunctionCallEntry": ".functioncallentry", + "FunctionCallEntryObject": ".functioncallentry", + "FunctionCallEntryType": ".functioncallentry", + "FunctionCallEntryTypedDict": ".functioncallentry", + "FunctionCallEntryArguments": ".functioncallentryarguments", + "FunctionCallEntryArgumentsTypedDict": ".functioncallentryarguments", + "FunctionCallEvent": ".functioncallevent", + "FunctionCallEventType": ".functioncallevent", + "FunctionCallEventTypedDict": ".functioncallevent", + "FunctionName": ".functionname", + "FunctionNameTypedDict": ".functionname", + "FunctionResultEntry": ".functionresultentry", + "FunctionResultEntryObject": ".functionresultentry", + "FunctionResultEntryType": ".functionresultentry", + "FunctionResultEntryTypedDict": ".functionresultentry", + "FunctionTool": ".functiontool", + "FunctionToolType": ".functiontool", + "FunctionToolTypedDict": ".functiontool", + "GithubRepositoryIn": ".githubrepositoryin", + "GithubRepositoryInType": ".githubrepositoryin", + "GithubRepositoryInTypedDict": ".githubrepositoryin", + "GithubRepositoryOut": ".githubrepositoryout", + "GithubRepositoryOutType": ".githubrepositoryout", + "GithubRepositoryOutTypedDict": ".githubrepositoryout", + "HTTPValidationError": ".httpvalidationerror", + "HTTPValidationErrorData": ".httpvalidationerror", + "ImageGenerationTool": ".imagegenerationtool", + "ImageGenerationToolType": ".imagegenerationtool", + "ImageGenerationToolTypedDict": ".imagegenerationtool", + "ImageURL": ".imageurl", + "ImageURLTypedDict": ".imageurl", + "ImageURLChunk": ".imageurlchunk", + "ImageURLChunkImageURL": ".imageurlchunk", + "ImageURLChunkImageURLTypedDict": ".imageurlchunk", + "ImageURLChunkType": ".imageurlchunk", + "ImageURLChunkTypedDict": ".imageurlchunk", + "InputEntries": ".inputentries", + "InputEntriesTypedDict": ".inputentries", + "Inputs": ".inputs", + "InputsTypedDict": ".inputs", + "InstructRequestInputs": ".inputs", + "InstructRequestInputsMessages": ".inputs", + "InstructRequestInputsMessagesTypedDict": ".inputs", + "InstructRequestInputsTypedDict": ".inputs", + "InstructRequest": ".instructrequest", + "InstructRequestMessages": ".instructrequest", + "InstructRequestMessagesTypedDict": ".instructrequest", + "InstructRequestTypedDict": ".instructrequest", + "Hyperparameters": ".jobin", + "HyperparametersTypedDict": ".jobin", + "JobIn": ".jobin", + "JobInIntegrations": ".jobin", + "JobInIntegrationsTypedDict": ".jobin", + "JobInRepositories": ".jobin", + "JobInRepositoriesTypedDict": ".jobin", + "JobInTypedDict": ".jobin", + "JobMetadataOut": ".jobmetadataout", + "JobMetadataOutTypedDict": ".jobmetadataout", + "JobsAPIRoutesBatchCancelBatchJobRequest": ".jobs_api_routes_batch_cancel_batch_jobop", + "JobsAPIRoutesBatchCancelBatchJobRequestTypedDict": ".jobs_api_routes_batch_cancel_batch_jobop", + "JobsAPIRoutesBatchGetBatchJobRequest": ".jobs_api_routes_batch_get_batch_jobop", + "JobsAPIRoutesBatchGetBatchJobRequestTypedDict": ".jobs_api_routes_batch_get_batch_jobop", + "JobsAPIRoutesBatchGetBatchJobsRequest": ".jobs_api_routes_batch_get_batch_jobsop", + "JobsAPIRoutesBatchGetBatchJobsRequestTypedDict": ".jobs_api_routes_batch_get_batch_jobsop", + "JobsAPIRoutesFineTuningArchiveFineTunedModelRequest": ".jobs_api_routes_fine_tuning_archive_fine_tuned_modelop", + "JobsAPIRoutesFineTuningArchiveFineTunedModelRequestTypedDict": ".jobs_api_routes_fine_tuning_archive_fine_tuned_modelop", + "JobsAPIRoutesFineTuningCancelFineTuningJobRequest": ".jobs_api_routes_fine_tuning_cancel_fine_tuning_jobop", + "JobsAPIRoutesFineTuningCancelFineTuningJobRequestTypedDict": ".jobs_api_routes_fine_tuning_cancel_fine_tuning_jobop", + "JobsAPIRoutesFineTuningCancelFineTuningJobResponse": ".jobs_api_routes_fine_tuning_cancel_fine_tuning_jobop", + "JobsAPIRoutesFineTuningCancelFineTuningJobResponseTypedDict": ".jobs_api_routes_fine_tuning_cancel_fine_tuning_jobop", + "JobsAPIRoutesFineTuningCreateFineTuningJobResponse": ".jobs_api_routes_fine_tuning_create_fine_tuning_jobop", + "JobsAPIRoutesFineTuningCreateFineTuningJobResponseTypedDict": ".jobs_api_routes_fine_tuning_create_fine_tuning_jobop", + "Response1": ".jobs_api_routes_fine_tuning_create_fine_tuning_jobop", + "Response1TypedDict": ".jobs_api_routes_fine_tuning_create_fine_tuning_jobop", + "JobsAPIRoutesFineTuningGetFineTuningJobRequest": ".jobs_api_routes_fine_tuning_get_fine_tuning_jobop", + "JobsAPIRoutesFineTuningGetFineTuningJobRequestTypedDict": ".jobs_api_routes_fine_tuning_get_fine_tuning_jobop", + "JobsAPIRoutesFineTuningGetFineTuningJobResponse": ".jobs_api_routes_fine_tuning_get_fine_tuning_jobop", + "JobsAPIRoutesFineTuningGetFineTuningJobResponseTypedDict": ".jobs_api_routes_fine_tuning_get_fine_tuning_jobop", + "JobsAPIRoutesFineTuningGetFineTuningJobsRequest": ".jobs_api_routes_fine_tuning_get_fine_tuning_jobsop", + "JobsAPIRoutesFineTuningGetFineTuningJobsRequestTypedDict": ".jobs_api_routes_fine_tuning_get_fine_tuning_jobsop", + "QueryParamStatus": ".jobs_api_routes_fine_tuning_get_fine_tuning_jobsop", + "JobsAPIRoutesFineTuningStartFineTuningJobRequest": ".jobs_api_routes_fine_tuning_start_fine_tuning_jobop", + "JobsAPIRoutesFineTuningStartFineTuningJobRequestTypedDict": ".jobs_api_routes_fine_tuning_start_fine_tuning_jobop", + "JobsAPIRoutesFineTuningStartFineTuningJobResponse": ".jobs_api_routes_fine_tuning_start_fine_tuning_jobop", + "JobsAPIRoutesFineTuningStartFineTuningJobResponseTypedDict": ".jobs_api_routes_fine_tuning_start_fine_tuning_jobop", + "JobsAPIRoutesFineTuningUnarchiveFineTunedModelRequest": ".jobs_api_routes_fine_tuning_unarchive_fine_tuned_modelop", + "JobsAPIRoutesFineTuningUnarchiveFineTunedModelRequestTypedDict": ".jobs_api_routes_fine_tuning_unarchive_fine_tuned_modelop", + "JobsAPIRoutesFineTuningUpdateFineTunedModelRequest": ".jobs_api_routes_fine_tuning_update_fine_tuned_modelop", + "JobsAPIRoutesFineTuningUpdateFineTunedModelRequestTypedDict": ".jobs_api_routes_fine_tuning_update_fine_tuned_modelop", + "JobsAPIRoutesFineTuningUpdateFineTunedModelResponse": ".jobs_api_routes_fine_tuning_update_fine_tuned_modelop", + "JobsAPIRoutesFineTuningUpdateFineTunedModelResponseTypedDict": ".jobs_api_routes_fine_tuning_update_fine_tuned_modelop", + "JobsOut": ".jobsout", + "JobsOutData": ".jobsout", + "JobsOutDataTypedDict": ".jobsout", + "JobsOutObject": ".jobsout", + "JobsOutTypedDict": ".jobsout", + "JSONSchema": ".jsonschema", + "JSONSchemaTypedDict": ".jsonschema", + "LegacyJobMetadataOut": ".legacyjobmetadataout", + "LegacyJobMetadataOutObject": ".legacyjobmetadataout", + "LegacyJobMetadataOutTypedDict": ".legacyjobmetadataout", + "LibrariesDeleteV1Request": ".libraries_delete_v1op", + "LibrariesDeleteV1RequestTypedDict": ".libraries_delete_v1op", + "LibrariesDocumentsDeleteV1Request": ".libraries_documents_delete_v1op", + "LibrariesDocumentsDeleteV1RequestTypedDict": ".libraries_documents_delete_v1op", + "LibrariesDocumentsGetExtractedTextSignedURLV1Request": ".libraries_documents_get_extracted_text_signed_url_v1op", + "LibrariesDocumentsGetExtractedTextSignedURLV1RequestTypedDict": ".libraries_documents_get_extracted_text_signed_url_v1op", + "LibrariesDocumentsGetSignedURLV1Request": ".libraries_documents_get_signed_url_v1op", + "LibrariesDocumentsGetSignedURLV1RequestTypedDict": ".libraries_documents_get_signed_url_v1op", + "LibrariesDocumentsGetStatusV1Request": ".libraries_documents_get_status_v1op", + "LibrariesDocumentsGetStatusV1RequestTypedDict": ".libraries_documents_get_status_v1op", + "LibrariesDocumentsGetTextContentV1Request": ".libraries_documents_get_text_content_v1op", + "LibrariesDocumentsGetTextContentV1RequestTypedDict": ".libraries_documents_get_text_content_v1op", + "LibrariesDocumentsGetV1Request": ".libraries_documents_get_v1op", + "LibrariesDocumentsGetV1RequestTypedDict": ".libraries_documents_get_v1op", + "LibrariesDocumentsListV1Request": ".libraries_documents_list_v1op", + "LibrariesDocumentsListV1RequestTypedDict": ".libraries_documents_list_v1op", + "LibrariesDocumentsReprocessV1Request": ".libraries_documents_reprocess_v1op", + "LibrariesDocumentsReprocessV1RequestTypedDict": ".libraries_documents_reprocess_v1op", + "LibrariesDocumentsUpdateV1Request": ".libraries_documents_update_v1op", + "LibrariesDocumentsUpdateV1RequestTypedDict": ".libraries_documents_update_v1op", + "LibrariesDocumentsUploadV1DocumentUpload": ".libraries_documents_upload_v1op", + "LibrariesDocumentsUploadV1DocumentUploadTypedDict": ".libraries_documents_upload_v1op", + "LibrariesDocumentsUploadV1Request": ".libraries_documents_upload_v1op", + "LibrariesDocumentsUploadV1RequestTypedDict": ".libraries_documents_upload_v1op", + "LibrariesGetV1Request": ".libraries_get_v1op", + "LibrariesGetV1RequestTypedDict": ".libraries_get_v1op", + "LibrariesShareCreateV1Request": ".libraries_share_create_v1op", + "LibrariesShareCreateV1RequestTypedDict": ".libraries_share_create_v1op", + "LibrariesShareDeleteV1Request": ".libraries_share_delete_v1op", + "LibrariesShareDeleteV1RequestTypedDict": ".libraries_share_delete_v1op", + "LibrariesShareListV1Request": ".libraries_share_list_v1op", + "LibrariesShareListV1RequestTypedDict": ".libraries_share_list_v1op", + "LibrariesUpdateV1Request": ".libraries_update_v1op", + "LibrariesUpdateV1RequestTypedDict": ".libraries_update_v1op", + "LibraryIn": ".libraryin", + "LibraryInTypedDict": ".libraryin", + "LibraryInUpdate": ".libraryinupdate", + "LibraryInUpdateTypedDict": ".libraryinupdate", + "LibraryOut": ".libraryout", + "LibraryOutTypedDict": ".libraryout", + "ListDocumentOut": ".listdocumentout", + "ListDocumentOutTypedDict": ".listdocumentout", + "ListFilesOut": ".listfilesout", + "ListFilesOutTypedDict": ".listfilesout", + "ListLibraryOut": ".listlibraryout", + "ListLibraryOutTypedDict": ".listlibraryout", + "ListSharingOut": ".listsharingout", + "ListSharingOutTypedDict": ".listsharingout", + "MessageEntries": ".messageentries", + "MessageEntriesTypedDict": ".messageentries", + "MessageInputContentChunks": ".messageinputcontentchunks", + "MessageInputContentChunksTypedDict": ".messageinputcontentchunks", + "MessageInputEntry": ".messageinputentry", + "MessageInputEntryContent": ".messageinputentry", + "MessageInputEntryContentTypedDict": ".messageinputentry", + "MessageInputEntryRole": ".messageinputentry", + "MessageInputEntryType": ".messageinputentry", + "MessageInputEntryTypedDict": ".messageinputentry", + "Object": ".messageinputentry", + "MessageOutputContentChunks": ".messageoutputcontentchunks", + "MessageOutputContentChunksTypedDict": ".messageoutputcontentchunks", + "MessageOutputEntry": ".messageoutputentry", + "MessageOutputEntryContent": ".messageoutputentry", + "MessageOutputEntryContentTypedDict": ".messageoutputentry", + "MessageOutputEntryObject": ".messageoutputentry", + "MessageOutputEntryRole": ".messageoutputentry", + "MessageOutputEntryType": ".messageoutputentry", + "MessageOutputEntryTypedDict": ".messageoutputentry", + "MessageOutputEvent": ".messageoutputevent", + "MessageOutputEventContent": ".messageoutputevent", + "MessageOutputEventContentTypedDict": ".messageoutputevent", + "MessageOutputEventRole": ".messageoutputevent", + "MessageOutputEventType": ".messageoutputevent", + "MessageOutputEventTypedDict": ".messageoutputevent", + "MetricOut": ".metricout", + "MetricOutTypedDict": ".metricout", + "MistralPromptMode": ".mistralpromptmode", + "ModelCapabilities": ".modelcapabilities", + "ModelCapabilitiesTypedDict": ".modelcapabilities", + "ModelConversation": ".modelconversation", + "ModelConversationObject": ".modelconversation", + "ModelConversationTools": ".modelconversation", + "ModelConversationToolsTypedDict": ".modelconversation", + "ModelConversationTypedDict": ".modelconversation", + "Data": ".modellist", + "DataTypedDict": ".modellist", + "ModelList": ".modellist", + "ModelListTypedDict": ".modellist", + "ModerationObject": ".moderationobject", + "ModerationObjectTypedDict": ".moderationobject", + "ModerationResponse": ".moderationresponse", + "ModerationResponseTypedDict": ".moderationresponse", + "NoResponseError": ".no_response_error", + "OCRImageObject": ".ocrimageobject", + "OCRImageObjectTypedDict": ".ocrimageobject", + "OCRPageDimensions": ".ocrpagedimensions", + "OCRPageDimensionsTypedDict": ".ocrpagedimensions", + "OCRPageObject": ".ocrpageobject", + "OCRPageObjectTypedDict": ".ocrpageobject", + "Document": ".ocrrequest", + "DocumentTypedDict": ".ocrrequest", + "OCRRequest": ".ocrrequest", + "OCRRequestTypedDict": ".ocrrequest", + "TableFormat": ".ocrrequest", + "OCRResponse": ".ocrresponse", + "OCRResponseTypedDict": ".ocrresponse", + "Format": ".ocrtableobject", + "OCRTableObject": ".ocrtableobject", + "OCRTableObjectTypedDict": ".ocrtableobject", + "OCRUsageInfo": ".ocrusageinfo", + "OCRUsageInfoTypedDict": ".ocrusageinfo", + "OutputContentChunks": ".outputcontentchunks", + "OutputContentChunksTypedDict": ".outputcontentchunks", + "PaginationInfo": ".paginationinfo", + "PaginationInfoTypedDict": ".paginationinfo", + "Prediction": ".prediction", + "PredictionTypedDict": ".prediction", + "ProcessingStatusOut": ".processingstatusout", + "ProcessingStatusOutTypedDict": ".processingstatusout", + "RealtimeTranscriptionError": ".realtimetranscriptionerror", + "RealtimeTranscriptionErrorTypedDict": ".realtimetranscriptionerror", + "Message": ".realtimetranscriptionerrordetail", + "MessageTypedDict": ".realtimetranscriptionerrordetail", + "RealtimeTranscriptionErrorDetail": ".realtimetranscriptionerrordetail", + "RealtimeTranscriptionErrorDetailTypedDict": ".realtimetranscriptionerrordetail", + "RealtimeTranscriptionSession": ".realtimetranscriptionsession", + "RealtimeTranscriptionSessionTypedDict": ".realtimetranscriptionsession", + "RealtimeTranscriptionSessionCreated": ".realtimetranscriptionsessioncreated", + "RealtimeTranscriptionSessionCreatedTypedDict": ".realtimetranscriptionsessioncreated", + "RealtimeTranscriptionSessionUpdated": ".realtimetranscriptionsessionupdated", + "RealtimeTranscriptionSessionUpdatedTypedDict": ".realtimetranscriptionsessionupdated", + "ReferenceChunk": ".referencechunk", + "ReferenceChunkType": ".referencechunk", + "ReferenceChunkTypedDict": ".referencechunk", + "RequestSource": ".requestsource", + "ResponseDoneEvent": ".responsedoneevent", + "ResponseDoneEventType": ".responsedoneevent", + "ResponseDoneEventTypedDict": ".responsedoneevent", + "ResponseErrorEvent": ".responseerrorevent", + "ResponseErrorEventType": ".responseerrorevent", + "ResponseErrorEventTypedDict": ".responseerrorevent", + "ResponseFormat": ".responseformat", + "ResponseFormatTypedDict": ".responseformat", + "ResponseFormats": ".responseformats", + "ResponseStartedEvent": ".responsestartedevent", + "ResponseStartedEventType": ".responsestartedevent", + "ResponseStartedEventTypedDict": ".responsestartedevent", + "ResponseValidationError": ".responsevalidationerror", + "RetrieveModelV1ModelsModelIDGetRequest": ".retrieve_model_v1_models_model_id_getop", + "RetrieveModelV1ModelsModelIDGetRequestTypedDict": ".retrieve_model_v1_models_model_id_getop", + "RetrieveModelV1ModelsModelIDGetResponseRetrieveModelV1ModelsModelIDGet": ".retrieve_model_v1_models_model_id_getop", + "RetrieveModelV1ModelsModelIDGetResponseRetrieveModelV1ModelsModelIDGetTypedDict": ".retrieve_model_v1_models_model_id_getop", + "RetrieveFileOut": ".retrievefileout", + "RetrieveFileOutTypedDict": ".retrievefileout", + "SampleType": ".sampletype", + "SDKError": ".sdkerror", + "Security": ".security", + "SecurityTypedDict": ".security", + "ShareEnum": ".shareenum", + "SharingDelete": ".sharingdelete", + "SharingDeleteTypedDict": ".sharingdelete", + "SharingIn": ".sharingin", + "SharingInTypedDict": ".sharingin", + "SharingOut": ".sharingout", + "SharingOutTypedDict": ".sharingout", + "Source": ".source", + "SSETypes": ".ssetypes", + "Role": ".systemmessage", + "SystemMessage": ".systemmessage", + "SystemMessageContent": ".systemmessage", + "SystemMessageContentTypedDict": ".systemmessage", + "SystemMessageTypedDict": ".systemmessage", + "SystemMessageContentChunks": ".systemmessagecontentchunks", + "SystemMessageContentChunksTypedDict": ".systemmessagecontentchunks", + "TextChunk": ".textchunk", + "TextChunkType": ".textchunk", + "TextChunkTypedDict": ".textchunk", + "ThinkChunk": ".thinkchunk", + "ThinkChunkType": ".thinkchunk", + "ThinkChunkTypedDict": ".thinkchunk", + "Thinking": ".thinkchunk", + "ThinkingTypedDict": ".thinkchunk", + "TimestampGranularity": ".timestampgranularity", + "Tool": ".tool", + "ToolTypedDict": ".tool", + "ToolCall": ".toolcall", + "ToolCallTypedDict": ".toolcall", + "ToolChoice": ".toolchoice", + "ToolChoiceTypedDict": ".toolchoice", + "ToolChoiceEnum": ".toolchoiceenum", + "ToolExecutionDeltaEvent": ".toolexecutiondeltaevent", + "ToolExecutionDeltaEventName": ".toolexecutiondeltaevent", + "ToolExecutionDeltaEventNameTypedDict": ".toolexecutiondeltaevent", + "ToolExecutionDeltaEventType": ".toolexecutiondeltaevent", + "ToolExecutionDeltaEventTypedDict": ".toolexecutiondeltaevent", + "ToolExecutionDoneEvent": ".toolexecutiondoneevent", + "ToolExecutionDoneEventName": ".toolexecutiondoneevent", + "ToolExecutionDoneEventNameTypedDict": ".toolexecutiondoneevent", + "ToolExecutionDoneEventType": ".toolexecutiondoneevent", + "ToolExecutionDoneEventTypedDict": ".toolexecutiondoneevent", + "Name": ".toolexecutionentry", + "NameTypedDict": ".toolexecutionentry", + "ToolExecutionEntry": ".toolexecutionentry", + "ToolExecutionEntryObject": ".toolexecutionentry", + "ToolExecutionEntryType": ".toolexecutionentry", + "ToolExecutionEntryTypedDict": ".toolexecutionentry", + "ToolExecutionStartedEvent": ".toolexecutionstartedevent", + "ToolExecutionStartedEventName": ".toolexecutionstartedevent", + "ToolExecutionStartedEventNameTypedDict": ".toolexecutionstartedevent", + "ToolExecutionStartedEventType": ".toolexecutionstartedevent", + "ToolExecutionStartedEventTypedDict": ".toolexecutionstartedevent", + "ToolFileChunk": ".toolfilechunk", + "ToolFileChunkTool": ".toolfilechunk", + "ToolFileChunkToolTypedDict": ".toolfilechunk", + "ToolFileChunkType": ".toolfilechunk", + "ToolFileChunkTypedDict": ".toolfilechunk", + "ToolMessage": ".toolmessage", + "ToolMessageContent": ".toolmessage", + "ToolMessageContentTypedDict": ".toolmessage", + "ToolMessageRole": ".toolmessage", + "ToolMessageTypedDict": ".toolmessage", + "ToolReferenceChunk": ".toolreferencechunk", + "ToolReferenceChunkTool": ".toolreferencechunk", + "ToolReferenceChunkToolTypedDict": ".toolreferencechunk", + "ToolReferenceChunkType": ".toolreferencechunk", + "ToolReferenceChunkTypedDict": ".toolreferencechunk", + "ToolTypes": ".tooltypes", + "TrainingFile": ".trainingfile", + "TrainingFileTypedDict": ".trainingfile", + "TranscriptionResponse": ".transcriptionresponse", + "TranscriptionResponseTypedDict": ".transcriptionresponse", + "TranscriptionSegmentChunk": ".transcriptionsegmentchunk", + "TranscriptionSegmentChunkTypedDict": ".transcriptionsegmentchunk", + "Type": ".transcriptionsegmentchunk", + "TranscriptionStreamDone": ".transcriptionstreamdone", + "TranscriptionStreamDoneType": ".transcriptionstreamdone", + "TranscriptionStreamDoneTypedDict": ".transcriptionstreamdone", + "TranscriptionStreamEvents": ".transcriptionstreamevents", + "TranscriptionStreamEventsData": ".transcriptionstreamevents", + "TranscriptionStreamEventsDataTypedDict": ".transcriptionstreamevents", + "TranscriptionStreamEventsTypedDict": ".transcriptionstreamevents", + "TranscriptionStreamEventTypes": ".transcriptionstreameventtypes", + "TranscriptionStreamLanguage": ".transcriptionstreamlanguage", + "TranscriptionStreamLanguageType": ".transcriptionstreamlanguage", + "TranscriptionStreamLanguageTypedDict": ".transcriptionstreamlanguage", + "TranscriptionStreamSegmentDelta": ".transcriptionstreamsegmentdelta", + "TranscriptionStreamSegmentDeltaType": ".transcriptionstreamsegmentdelta", + "TranscriptionStreamSegmentDeltaTypedDict": ".transcriptionstreamsegmentdelta", + "TranscriptionStreamTextDelta": ".transcriptionstreamtextdelta", + "TranscriptionStreamTextDeltaType": ".transcriptionstreamtextdelta", + "TranscriptionStreamTextDeltaTypedDict": ".transcriptionstreamtextdelta", + "UnarchiveFTModelOut": ".unarchiveftmodelout", + "UnarchiveFTModelOutObject": ".unarchiveftmodelout", + "UnarchiveFTModelOutTypedDict": ".unarchiveftmodelout", + "UpdateFTModelIn": ".updateftmodelin", + "UpdateFTModelInTypedDict": ".updateftmodelin", + "UploadFileOut": ".uploadfileout", + "UploadFileOutTypedDict": ".uploadfileout", + "UsageInfo": ".usageinfo", + "UsageInfoTypedDict": ".usageinfo", + "UserMessage": ".usermessage", + "UserMessageContent": ".usermessage", + "UserMessageContentTypedDict": ".usermessage", + "UserMessageRole": ".usermessage", + "UserMessageTypedDict": ".usermessage", + "Loc": ".validationerror", + "LocTypedDict": ".validationerror", + "ValidationError": ".validationerror", + "ValidationErrorTypedDict": ".validationerror", + "WandbIntegration": ".wandbintegration", + "WandbIntegrationType": ".wandbintegration", + "WandbIntegrationTypedDict": ".wandbintegration", + "WandbIntegrationOut": ".wandbintegrationout", + "WandbIntegrationOutType": ".wandbintegrationout", + "WandbIntegrationOutTypedDict": ".wandbintegrationout", + "WebSearchPremiumTool": ".websearchpremiumtool", + "WebSearchPremiumToolType": ".websearchpremiumtool", + "WebSearchPremiumToolTypedDict": ".websearchpremiumtool", + "WebSearchTool": ".websearchtool", + "WebSearchToolType": ".websearchtool", + "WebSearchToolTypedDict": ".websearchtool", +} + + +def dynamic_import(modname, retries=3): + for attempt in range(retries): + try: + return import_module(modname, __package__) + except KeyError: + # Clear any half-initialized module and retry + sys.modules.pop(modname, None) + if attempt == retries - 1: + break + raise KeyError(f"Failed to import module '{modname}' after {retries} attempts") + + +def __getattr__(attr_name: str) -> object: + module_name = _dynamic_imports.get(attr_name) + if module_name is None: + raise AttributeError( + f"No {attr_name} found in _dynamic_imports for module name -> {__name__} " + ) + + try: + module = dynamic_import(module_name) + result = getattr(module, attr_name) + return result + except ImportError as e: + raise ImportError( + f"Failed to import {attr_name} from {module_name}: {e}" + ) from e + except AttributeError as e: + raise AttributeError( + f"Failed to get {attr_name} from {module_name}: {e}" + ) from e + + +def __dir__(): + lazy_attrs = builtins.list(_dynamic_imports.keys()) + return builtins.sorted(lazy_attrs) diff --git a/src/mistralai/client/models/agent.py b/src/mistralai/client/models/agent.py new file mode 100644 index 00000000..3bedb3a3 --- /dev/null +++ b/src/mistralai/client/models/agent.py @@ -0,0 +1,148 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .codeinterpretertool import CodeInterpreterTool, CodeInterpreterToolTypedDict +from .completionargs import CompletionArgs, CompletionArgsTypedDict +from .documentlibrarytool import DocumentLibraryTool, DocumentLibraryToolTypedDict +from .functiontool import FunctionTool, FunctionToolTypedDict +from .imagegenerationtool import ImageGenerationTool, ImageGenerationToolTypedDict +from .websearchpremiumtool import WebSearchPremiumTool, WebSearchPremiumToolTypedDict +from .websearchtool import WebSearchTool, WebSearchToolTypedDict +from datetime import datetime +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from mistralai.client.utils import get_discriminator +from pydantic import Discriminator, Tag, model_serializer +from typing import Any, Dict, List, Literal, Optional, Union +from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict + + +AgentToolsTypedDict = TypeAliasType( + "AgentToolsTypedDict", + Union[ + WebSearchToolTypedDict, + WebSearchPremiumToolTypedDict, + CodeInterpreterToolTypedDict, + ImageGenerationToolTypedDict, + FunctionToolTypedDict, + DocumentLibraryToolTypedDict, + ], +) + + +AgentTools = Annotated[ + Union[ + Annotated[CodeInterpreterTool, Tag("code_interpreter")], + Annotated[DocumentLibraryTool, Tag("document_library")], + Annotated[FunctionTool, Tag("function")], + Annotated[ImageGenerationTool, Tag("image_generation")], + Annotated[WebSearchTool, Tag("web_search")], + Annotated[WebSearchPremiumTool, Tag("web_search_premium")], + ], + Discriminator(lambda m: get_discriminator(m, "type", "type")), +] + + +AgentObject = Literal["agent",] + + +class AgentTypedDict(TypedDict): + model: str + name: str + id: str + version: int + versions: List[int] + created_at: datetime + updated_at: datetime + deployment_chat: bool + source: str + instructions: NotRequired[Nullable[str]] + r"""Instruction prompt the model will follow during the conversation.""" + tools: NotRequired[List[AgentToolsTypedDict]] + r"""List of tools which are available to the model during the conversation.""" + completion_args: NotRequired[CompletionArgsTypedDict] + r"""White-listed arguments from the completion API""" + description: NotRequired[Nullable[str]] + handoffs: NotRequired[Nullable[List[str]]] + metadata: NotRequired[Nullable[Dict[str, Any]]] + object: NotRequired[AgentObject] + + +class Agent(BaseModel): + model: str + + name: str + + id: str + + version: int + + versions: List[int] + + created_at: datetime + + updated_at: datetime + + deployment_chat: bool + + source: str + + instructions: OptionalNullable[str] = UNSET + r"""Instruction prompt the model will follow during the conversation.""" + + tools: Optional[List[AgentTools]] = None + r"""List of tools which are available to the model during the conversation.""" + + completion_args: Optional[CompletionArgs] = None + r"""White-listed arguments from the completion API""" + + description: OptionalNullable[str] = UNSET + + handoffs: OptionalNullable[List[str]] = UNSET + + metadata: OptionalNullable[Dict[str, Any]] = UNSET + + object: Optional[AgentObject] = "agent" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = [ + "instructions", + "tools", + "completion_args", + "description", + "handoffs", + "metadata", + "object", + ] + nullable_fields = ["instructions", "description", "handoffs", "metadata"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/agentaliasresponse.py b/src/mistralai/client/models/agentaliasresponse.py new file mode 100644 index 00000000..4bc8225c --- /dev/null +++ b/src/mistralai/client/models/agentaliasresponse.py @@ -0,0 +1,23 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from datetime import datetime +from mistralai.client.types import BaseModel +from typing_extensions import TypedDict + + +class AgentAliasResponseTypedDict(TypedDict): + alias: str + version: int + created_at: datetime + updated_at: datetime + + +class AgentAliasResponse(BaseModel): + alias: str + + version: int + + created_at: datetime + + updated_at: datetime diff --git a/src/mistralai/client/models/agentconversation.py b/src/mistralai/client/models/agentconversation.py new file mode 100644 index 00000000..5dfa8c31 --- /dev/null +++ b/src/mistralai/client/models/agentconversation.py @@ -0,0 +1,95 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from datetime import datetime +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing import Any, Dict, Literal, Optional, Union +from typing_extensions import NotRequired, TypeAliasType, TypedDict + + +AgentConversationObject = Literal["conversation",] + + +AgentConversationAgentVersionTypedDict = TypeAliasType( + "AgentConversationAgentVersionTypedDict", Union[str, int] +) + + +AgentConversationAgentVersion = TypeAliasType( + "AgentConversationAgentVersion", Union[str, int] +) + + +class AgentConversationTypedDict(TypedDict): + id: str + created_at: datetime + updated_at: datetime + agent_id: str + name: NotRequired[Nullable[str]] + r"""Name given to the conversation.""" + description: NotRequired[Nullable[str]] + r"""Description of the what the conversation is about.""" + metadata: NotRequired[Nullable[Dict[str, Any]]] + r"""Custom metadata for the conversation.""" + object: NotRequired[AgentConversationObject] + agent_version: NotRequired[Nullable[AgentConversationAgentVersionTypedDict]] + + +class AgentConversation(BaseModel): + id: str + + created_at: datetime + + updated_at: datetime + + agent_id: str + + name: OptionalNullable[str] = UNSET + r"""Name given to the conversation.""" + + description: OptionalNullable[str] = UNSET + r"""Description of the what the conversation is about.""" + + metadata: OptionalNullable[Dict[str, Any]] = UNSET + r"""Custom metadata for the conversation.""" + + object: Optional[AgentConversationObject] = "conversation" + + agent_version: OptionalNullable[AgentConversationAgentVersion] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["name", "description", "metadata", "object", "agent_version"] + nullable_fields = ["name", "description", "metadata", "agent_version"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/agentcreationrequest.py b/src/mistralai/client/models/agentcreationrequest.py new file mode 100644 index 00000000..61a5aff5 --- /dev/null +++ b/src/mistralai/client/models/agentcreationrequest.py @@ -0,0 +1,119 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .codeinterpretertool import CodeInterpreterTool, CodeInterpreterToolTypedDict +from .completionargs import CompletionArgs, CompletionArgsTypedDict +from .documentlibrarytool import DocumentLibraryTool, DocumentLibraryToolTypedDict +from .functiontool import FunctionTool, FunctionToolTypedDict +from .imagegenerationtool import ImageGenerationTool, ImageGenerationToolTypedDict +from .websearchpremiumtool import WebSearchPremiumTool, WebSearchPremiumToolTypedDict +from .websearchtool import WebSearchTool, WebSearchToolTypedDict +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from mistralai.client.utils import get_discriminator +from pydantic import Discriminator, Tag, model_serializer +from typing import Any, Dict, List, Optional, Union +from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict + + +AgentCreationRequestToolsTypedDict = TypeAliasType( + "AgentCreationRequestToolsTypedDict", + Union[ + WebSearchToolTypedDict, + WebSearchPremiumToolTypedDict, + CodeInterpreterToolTypedDict, + ImageGenerationToolTypedDict, + FunctionToolTypedDict, + DocumentLibraryToolTypedDict, + ], +) + + +AgentCreationRequestTools = Annotated[ + Union[ + Annotated[CodeInterpreterTool, Tag("code_interpreter")], + Annotated[DocumentLibraryTool, Tag("document_library")], + Annotated[FunctionTool, Tag("function")], + Annotated[ImageGenerationTool, Tag("image_generation")], + Annotated[WebSearchTool, Tag("web_search")], + Annotated[WebSearchPremiumTool, Tag("web_search_premium")], + ], + Discriminator(lambda m: get_discriminator(m, "type", "type")), +] + + +class AgentCreationRequestTypedDict(TypedDict): + model: str + name: str + instructions: NotRequired[Nullable[str]] + r"""Instruction prompt the model will follow during the conversation.""" + tools: NotRequired[List[AgentCreationRequestToolsTypedDict]] + r"""List of tools which are available to the model during the conversation.""" + completion_args: NotRequired[CompletionArgsTypedDict] + r"""White-listed arguments from the completion API""" + description: NotRequired[Nullable[str]] + handoffs: NotRequired[Nullable[List[str]]] + metadata: NotRequired[Nullable[Dict[str, Any]]] + + +class AgentCreationRequest(BaseModel): + model: str + + name: str + + instructions: OptionalNullable[str] = UNSET + r"""Instruction prompt the model will follow during the conversation.""" + + tools: Optional[List[AgentCreationRequestTools]] = None + r"""List of tools which are available to the model during the conversation.""" + + completion_args: Optional[CompletionArgs] = None + r"""White-listed arguments from the completion API""" + + description: OptionalNullable[str] = UNSET + + handoffs: OptionalNullable[List[str]] = UNSET + + metadata: OptionalNullable[Dict[str, Any]] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = [ + "instructions", + "tools", + "completion_args", + "description", + "handoffs", + "metadata", + ] + nullable_fields = ["instructions", "description", "handoffs", "metadata"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/agenthandoffdoneevent.py b/src/mistralai/client/models/agenthandoffdoneevent.py new file mode 100644 index 00000000..c826aa5e --- /dev/null +++ b/src/mistralai/client/models/agenthandoffdoneevent.py @@ -0,0 +1,33 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from datetime import datetime +from mistralai.client.types import BaseModel +from typing import Literal, Optional +from typing_extensions import NotRequired, TypedDict + + +AgentHandoffDoneEventType = Literal["agent.handoff.done",] + + +class AgentHandoffDoneEventTypedDict(TypedDict): + id: str + next_agent_id: str + next_agent_name: str + type: NotRequired[AgentHandoffDoneEventType] + created_at: NotRequired[datetime] + output_index: NotRequired[int] + + +class AgentHandoffDoneEvent(BaseModel): + id: str + + next_agent_id: str + + next_agent_name: str + + type: Optional[AgentHandoffDoneEventType] = "agent.handoff.done" + + created_at: Optional[datetime] = None + + output_index: Optional[int] = 0 diff --git a/src/mistralai/client/models/agenthandoffentry.py b/src/mistralai/client/models/agenthandoffentry.py new file mode 100644 index 00000000..0b0de13f --- /dev/null +++ b/src/mistralai/client/models/agenthandoffentry.py @@ -0,0 +1,82 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from datetime import datetime +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing import Literal, Optional +from typing_extensions import NotRequired, TypedDict + + +AgentHandoffEntryObject = Literal["entry",] + + +AgentHandoffEntryType = Literal["agent.handoff",] + + +class AgentHandoffEntryTypedDict(TypedDict): + previous_agent_id: str + previous_agent_name: str + next_agent_id: str + next_agent_name: str + object: NotRequired[AgentHandoffEntryObject] + type: NotRequired[AgentHandoffEntryType] + created_at: NotRequired[datetime] + completed_at: NotRequired[Nullable[datetime]] + id: NotRequired[str] + + +class AgentHandoffEntry(BaseModel): + previous_agent_id: str + + previous_agent_name: str + + next_agent_id: str + + next_agent_name: str + + object: Optional[AgentHandoffEntryObject] = "entry" + + type: Optional[AgentHandoffEntryType] = "agent.handoff" + + created_at: Optional[datetime] = None + + completed_at: OptionalNullable[datetime] = UNSET + + id: Optional[str] = None + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["object", "type", "created_at", "completed_at", "id"] + nullable_fields = ["completed_at"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/agenthandoffstartedevent.py b/src/mistralai/client/models/agenthandoffstartedevent.py new file mode 100644 index 00000000..4b8ff1e5 --- /dev/null +++ b/src/mistralai/client/models/agenthandoffstartedevent.py @@ -0,0 +1,33 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from datetime import datetime +from mistralai.client.types import BaseModel +from typing import Literal, Optional +from typing_extensions import NotRequired, TypedDict + + +AgentHandoffStartedEventType = Literal["agent.handoff.started",] + + +class AgentHandoffStartedEventTypedDict(TypedDict): + id: str + previous_agent_id: str + previous_agent_name: str + type: NotRequired[AgentHandoffStartedEventType] + created_at: NotRequired[datetime] + output_index: NotRequired[int] + + +class AgentHandoffStartedEvent(BaseModel): + id: str + + previous_agent_id: str + + previous_agent_name: str + + type: Optional[AgentHandoffStartedEventType] = "agent.handoff.started" + + created_at: Optional[datetime] = None + + output_index: Optional[int] = 0 diff --git a/src/mistralai/client/models/agents_api_v1_agents_create_or_update_aliasop.py b/src/mistralai/client/models/agents_api_v1_agents_create_or_update_aliasop.py new file mode 100644 index 00000000..33da325c --- /dev/null +++ b/src/mistralai/client/models/agents_api_v1_agents_create_or_update_aliasop.py @@ -0,0 +1,26 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.client.types import BaseModel +from mistralai.client.utils import FieldMetadata, PathParamMetadata, QueryParamMetadata +from typing_extensions import Annotated, TypedDict + + +class AgentsAPIV1AgentsCreateOrUpdateAliasRequestTypedDict(TypedDict): + agent_id: str + alias: str + version: int + + +class AgentsAPIV1AgentsCreateOrUpdateAliasRequest(BaseModel): + agent_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] + + alias: Annotated[ + str, FieldMetadata(query=QueryParamMetadata(style="form", explode=True)) + ] + + version: Annotated[ + int, FieldMetadata(query=QueryParamMetadata(style="form", explode=True)) + ] diff --git a/src/mistralai/client/models/agents_api_v1_agents_deleteop.py b/src/mistralai/client/models/agents_api_v1_agents_deleteop.py new file mode 100644 index 00000000..58fe902f --- /dev/null +++ b/src/mistralai/client/models/agents_api_v1_agents_deleteop.py @@ -0,0 +1,16 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.client.types import BaseModel +from mistralai.client.utils import FieldMetadata, PathParamMetadata +from typing_extensions import Annotated, TypedDict + + +class AgentsAPIV1AgentsDeleteRequestTypedDict(TypedDict): + agent_id: str + + +class AgentsAPIV1AgentsDeleteRequest(BaseModel): + agent_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] diff --git a/src/mistralai/client/models/agents_api_v1_agents_get_versionop.py b/src/mistralai/client/models/agents_api_v1_agents_get_versionop.py new file mode 100644 index 00000000..edcccda1 --- /dev/null +++ b/src/mistralai/client/models/agents_api_v1_agents_get_versionop.py @@ -0,0 +1,21 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.client.types import BaseModel +from mistralai.client.utils import FieldMetadata, PathParamMetadata +from typing_extensions import Annotated, TypedDict + + +class AgentsAPIV1AgentsGetVersionRequestTypedDict(TypedDict): + agent_id: str + version: str + + +class AgentsAPIV1AgentsGetVersionRequest(BaseModel): + agent_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] + + version: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] diff --git a/src/mistralai/client/models/agents_api_v1_agents_getop.py b/src/mistralai/client/models/agents_api_v1_agents_getop.py new file mode 100644 index 00000000..d4817457 --- /dev/null +++ b/src/mistralai/client/models/agents_api_v1_agents_getop.py @@ -0,0 +1,68 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from mistralai.client.utils import FieldMetadata, PathParamMetadata, QueryParamMetadata +from pydantic import model_serializer +from typing import Union +from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict + + +QueryParamAgentVersionTypedDict = TypeAliasType( + "QueryParamAgentVersionTypedDict", Union[int, str] +) + + +QueryParamAgentVersion = TypeAliasType("QueryParamAgentVersion", Union[int, str]) + + +class AgentsAPIV1AgentsGetRequestTypedDict(TypedDict): + agent_id: str + agent_version: NotRequired[Nullable[QueryParamAgentVersionTypedDict]] + + +class AgentsAPIV1AgentsGetRequest(BaseModel): + agent_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] + + agent_version: Annotated[ + OptionalNullable[QueryParamAgentVersion], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["agent_version"] + nullable_fields = ["agent_version"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/agents_api_v1_agents_list_version_aliasesop.py b/src/mistralai/client/models/agents_api_v1_agents_list_version_aliasesop.py new file mode 100644 index 00000000..b9770fff --- /dev/null +++ b/src/mistralai/client/models/agents_api_v1_agents_list_version_aliasesop.py @@ -0,0 +1,16 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.client.types import BaseModel +from mistralai.client.utils import FieldMetadata, PathParamMetadata +from typing_extensions import Annotated, TypedDict + + +class AgentsAPIV1AgentsListVersionAliasesRequestTypedDict(TypedDict): + agent_id: str + + +class AgentsAPIV1AgentsListVersionAliasesRequest(BaseModel): + agent_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] diff --git a/src/mistralai/client/models/agents_api_v1_agents_list_versionsop.py b/src/mistralai/client/models/agents_api_v1_agents_list_versionsop.py new file mode 100644 index 00000000..813335f9 --- /dev/null +++ b/src/mistralai/client/models/agents_api_v1_agents_list_versionsop.py @@ -0,0 +1,33 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.client.types import BaseModel +from mistralai.client.utils import FieldMetadata, PathParamMetadata, QueryParamMetadata +from typing import Optional +from typing_extensions import Annotated, NotRequired, TypedDict + + +class AgentsAPIV1AgentsListVersionsRequestTypedDict(TypedDict): + agent_id: str + page: NotRequired[int] + r"""Page number (0-indexed)""" + page_size: NotRequired[int] + r"""Number of versions per page""" + + +class AgentsAPIV1AgentsListVersionsRequest(BaseModel): + agent_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] + + page: Annotated[ + Optional[int], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = 0 + r"""Page number (0-indexed)""" + + page_size: Annotated[ + Optional[int], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = 20 + r"""Number of versions per page""" diff --git a/src/mistralai/client/models/agents_api_v1_agents_listop.py b/src/mistralai/client/models/agents_api_v1_agents_listop.py new file mode 100644 index 00000000..119f5123 --- /dev/null +++ b/src/mistralai/client/models/agents_api_v1_agents_listop.py @@ -0,0 +1,104 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .requestsource import RequestSource +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from mistralai.client.utils import FieldMetadata, QueryParamMetadata +from pydantic import model_serializer +from typing import Any, Dict, List, Optional +from typing_extensions import Annotated, NotRequired, TypedDict + + +class AgentsAPIV1AgentsListRequestTypedDict(TypedDict): + page: NotRequired[int] + r"""Page number (0-indexed)""" + page_size: NotRequired[int] + r"""Number of agents per page""" + deployment_chat: NotRequired[Nullable[bool]] + sources: NotRequired[Nullable[List[RequestSource]]] + name: NotRequired[Nullable[str]] + id: NotRequired[Nullable[str]] + metadata: NotRequired[Nullable[Dict[str, Any]]] + + +class AgentsAPIV1AgentsListRequest(BaseModel): + page: Annotated[ + Optional[int], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = 0 + r"""Page number (0-indexed)""" + + page_size: Annotated[ + Optional[int], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = 20 + r"""Number of agents per page""" + + deployment_chat: Annotated[ + OptionalNullable[bool], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = UNSET + + sources: Annotated[ + OptionalNullable[List[RequestSource]], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = UNSET + + name: Annotated[ + OptionalNullable[str], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = UNSET + + id: Annotated[ + OptionalNullable[str], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = UNSET + + metadata: Annotated[ + OptionalNullable[Dict[str, Any]], + FieldMetadata(query=QueryParamMetadata(serialization="json")), + ] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = [ + "page", + "page_size", + "deployment_chat", + "sources", + "name", + "id", + "metadata", + ] + nullable_fields = ["deployment_chat", "sources", "name", "id", "metadata"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/agents_api_v1_agents_update_versionop.py b/src/mistralai/client/models/agents_api_v1_agents_update_versionop.py new file mode 100644 index 00000000..116f952b --- /dev/null +++ b/src/mistralai/client/models/agents_api_v1_agents_update_versionop.py @@ -0,0 +1,21 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.client.types import BaseModel +from mistralai.client.utils import FieldMetadata, PathParamMetadata, QueryParamMetadata +from typing_extensions import Annotated, TypedDict + + +class AgentsAPIV1AgentsUpdateVersionRequestTypedDict(TypedDict): + agent_id: str + version: int + + +class AgentsAPIV1AgentsUpdateVersionRequest(BaseModel): + agent_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] + + version: Annotated[ + int, FieldMetadata(query=QueryParamMetadata(style="form", explode=True)) + ] diff --git a/src/mistralai/client/models/agents_api_v1_agents_updateop.py b/src/mistralai/client/models/agents_api_v1_agents_updateop.py new file mode 100644 index 00000000..116acaa7 --- /dev/null +++ b/src/mistralai/client/models/agents_api_v1_agents_updateop.py @@ -0,0 +1,23 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .agentupdaterequest import AgentUpdateRequest, AgentUpdateRequestTypedDict +from mistralai.client.types import BaseModel +from mistralai.client.utils import FieldMetadata, PathParamMetadata, RequestMetadata +from typing_extensions import Annotated, TypedDict + + +class AgentsAPIV1AgentsUpdateRequestTypedDict(TypedDict): + agent_id: str + agent_update_request: AgentUpdateRequestTypedDict + + +class AgentsAPIV1AgentsUpdateRequest(BaseModel): + agent_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] + + agent_update_request: Annotated[ + AgentUpdateRequest, + FieldMetadata(request=RequestMetadata(media_type="application/json")), + ] diff --git a/src/mistralai/client/models/agents_api_v1_conversations_append_streamop.py b/src/mistralai/client/models/agents_api_v1_conversations_append_streamop.py new file mode 100644 index 00000000..9f00ffd4 --- /dev/null +++ b/src/mistralai/client/models/agents_api_v1_conversations_append_streamop.py @@ -0,0 +1,28 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .conversationappendstreamrequest import ( + ConversationAppendStreamRequest, + ConversationAppendStreamRequestTypedDict, +) +from mistralai.client.types import BaseModel +from mistralai.client.utils import FieldMetadata, PathParamMetadata, RequestMetadata +from typing_extensions import Annotated, TypedDict + + +class AgentsAPIV1ConversationsAppendStreamRequestTypedDict(TypedDict): + conversation_id: str + r"""ID of the conversation to which we append entries.""" + conversation_append_stream_request: ConversationAppendStreamRequestTypedDict + + +class AgentsAPIV1ConversationsAppendStreamRequest(BaseModel): + conversation_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] + r"""ID of the conversation to which we append entries.""" + + conversation_append_stream_request: Annotated[ + ConversationAppendStreamRequest, + FieldMetadata(request=RequestMetadata(media_type="application/json")), + ] diff --git a/src/mistralai/client/models/agents_api_v1_conversations_appendop.py b/src/mistralai/client/models/agents_api_v1_conversations_appendop.py new file mode 100644 index 00000000..13d07ba9 --- /dev/null +++ b/src/mistralai/client/models/agents_api_v1_conversations_appendop.py @@ -0,0 +1,28 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .conversationappendrequest import ( + ConversationAppendRequest, + ConversationAppendRequestTypedDict, +) +from mistralai.client.types import BaseModel +from mistralai.client.utils import FieldMetadata, PathParamMetadata, RequestMetadata +from typing_extensions import Annotated, TypedDict + + +class AgentsAPIV1ConversationsAppendRequestTypedDict(TypedDict): + conversation_id: str + r"""ID of the conversation to which we append entries.""" + conversation_append_request: ConversationAppendRequestTypedDict + + +class AgentsAPIV1ConversationsAppendRequest(BaseModel): + conversation_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] + r"""ID of the conversation to which we append entries.""" + + conversation_append_request: Annotated[ + ConversationAppendRequest, + FieldMetadata(request=RequestMetadata(media_type="application/json")), + ] diff --git a/src/mistralai/client/models/agents_api_v1_conversations_deleteop.py b/src/mistralai/client/models/agents_api_v1_conversations_deleteop.py new file mode 100644 index 00000000..81066f90 --- /dev/null +++ b/src/mistralai/client/models/agents_api_v1_conversations_deleteop.py @@ -0,0 +1,18 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.client.types import BaseModel +from mistralai.client.utils import FieldMetadata, PathParamMetadata +from typing_extensions import Annotated, TypedDict + + +class AgentsAPIV1ConversationsDeleteRequestTypedDict(TypedDict): + conversation_id: str + r"""ID of the conversation from which we are fetching metadata.""" + + +class AgentsAPIV1ConversationsDeleteRequest(BaseModel): + conversation_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] + r"""ID of the conversation from which we are fetching metadata.""" diff --git a/src/mistralai/client/models/agents_api_v1_conversations_getop.py b/src/mistralai/client/models/agents_api_v1_conversations_getop.py new file mode 100644 index 00000000..c919f99e --- /dev/null +++ b/src/mistralai/client/models/agents_api_v1_conversations_getop.py @@ -0,0 +1,35 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .agentconversation import AgentConversation, AgentConversationTypedDict +from .modelconversation import ModelConversation, ModelConversationTypedDict +from mistralai.client.types import BaseModel +from mistralai.client.utils import FieldMetadata, PathParamMetadata +from typing import Union +from typing_extensions import Annotated, TypeAliasType, TypedDict + + +class AgentsAPIV1ConversationsGetRequestTypedDict(TypedDict): + conversation_id: str + r"""ID of the conversation from which we are fetching metadata.""" + + +class AgentsAPIV1ConversationsGetRequest(BaseModel): + conversation_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] + r"""ID of the conversation from which we are fetching metadata.""" + + +AgentsAPIV1ConversationsGetResponseV1ConversationsGetTypedDict = TypeAliasType( + "AgentsAPIV1ConversationsGetResponseV1ConversationsGetTypedDict", + Union[AgentConversationTypedDict, ModelConversationTypedDict], +) +r"""Successful Response""" + + +AgentsAPIV1ConversationsGetResponseV1ConversationsGet = TypeAliasType( + "AgentsAPIV1ConversationsGetResponseV1ConversationsGet", + Union[AgentConversation, ModelConversation], +) +r"""Successful Response""" diff --git a/src/mistralai/client/models/agents_api_v1_conversations_historyop.py b/src/mistralai/client/models/agents_api_v1_conversations_historyop.py new file mode 100644 index 00000000..ba1f8890 --- /dev/null +++ b/src/mistralai/client/models/agents_api_v1_conversations_historyop.py @@ -0,0 +1,18 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.client.types import BaseModel +from mistralai.client.utils import FieldMetadata, PathParamMetadata +from typing_extensions import Annotated, TypedDict + + +class AgentsAPIV1ConversationsHistoryRequestTypedDict(TypedDict): + conversation_id: str + r"""ID of the conversation from which we are fetching entries.""" + + +class AgentsAPIV1ConversationsHistoryRequest(BaseModel): + conversation_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] + r"""ID of the conversation from which we are fetching entries.""" diff --git a/src/mistralai/client/models/agents_api_v1_conversations_listop.py b/src/mistralai/client/models/agents_api_v1_conversations_listop.py new file mode 100644 index 00000000..bb3c7127 --- /dev/null +++ b/src/mistralai/client/models/agents_api_v1_conversations_listop.py @@ -0,0 +1,80 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .agentconversation import AgentConversation, AgentConversationTypedDict +from .modelconversation import ModelConversation, ModelConversationTypedDict +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from mistralai.client.utils import FieldMetadata, QueryParamMetadata +from pydantic import model_serializer +from typing import Any, Dict, Optional, Union +from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict + + +class AgentsAPIV1ConversationsListRequestTypedDict(TypedDict): + page: NotRequired[int] + page_size: NotRequired[int] + metadata: NotRequired[Nullable[Dict[str, Any]]] + + +class AgentsAPIV1ConversationsListRequest(BaseModel): + page: Annotated[ + Optional[int], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = 0 + + page_size: Annotated[ + Optional[int], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = 100 + + metadata: Annotated[ + OptionalNullable[Dict[str, Any]], + FieldMetadata(query=QueryParamMetadata(serialization="json")), + ] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["page", "page_size", "metadata"] + nullable_fields = ["metadata"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m + + +ResponseBodyTypedDict = TypeAliasType( + "ResponseBodyTypedDict", + Union[AgentConversationTypedDict, ModelConversationTypedDict], +) + + +ResponseBody = TypeAliasType( + "ResponseBody", Union[AgentConversation, ModelConversation] +) diff --git a/src/mistralai/client/models/agents_api_v1_conversations_messagesop.py b/src/mistralai/client/models/agents_api_v1_conversations_messagesop.py new file mode 100644 index 00000000..e05728f2 --- /dev/null +++ b/src/mistralai/client/models/agents_api_v1_conversations_messagesop.py @@ -0,0 +1,18 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.client.types import BaseModel +from mistralai.client.utils import FieldMetadata, PathParamMetadata +from typing_extensions import Annotated, TypedDict + + +class AgentsAPIV1ConversationsMessagesRequestTypedDict(TypedDict): + conversation_id: str + r"""ID of the conversation from which we are fetching messages.""" + + +class AgentsAPIV1ConversationsMessagesRequest(BaseModel): + conversation_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] + r"""ID of the conversation from which we are fetching messages.""" diff --git a/src/mistralai/client/models/agents_api_v1_conversations_restart_streamop.py b/src/mistralai/client/models/agents_api_v1_conversations_restart_streamop.py new file mode 100644 index 00000000..9b489ab4 --- /dev/null +++ b/src/mistralai/client/models/agents_api_v1_conversations_restart_streamop.py @@ -0,0 +1,28 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .conversationrestartstreamrequest import ( + ConversationRestartStreamRequest, + ConversationRestartStreamRequestTypedDict, +) +from mistralai.client.types import BaseModel +from mistralai.client.utils import FieldMetadata, PathParamMetadata, RequestMetadata +from typing_extensions import Annotated, TypedDict + + +class AgentsAPIV1ConversationsRestartStreamRequestTypedDict(TypedDict): + conversation_id: str + r"""ID of the original conversation which is being restarted.""" + conversation_restart_stream_request: ConversationRestartStreamRequestTypedDict + + +class AgentsAPIV1ConversationsRestartStreamRequest(BaseModel): + conversation_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] + r"""ID of the original conversation which is being restarted.""" + + conversation_restart_stream_request: Annotated[ + ConversationRestartStreamRequest, + FieldMetadata(request=RequestMetadata(media_type="application/json")), + ] diff --git a/src/mistralai/client/models/agents_api_v1_conversations_restartop.py b/src/mistralai/client/models/agents_api_v1_conversations_restartop.py new file mode 100644 index 00000000..8bce3ce5 --- /dev/null +++ b/src/mistralai/client/models/agents_api_v1_conversations_restartop.py @@ -0,0 +1,28 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .conversationrestartrequest import ( + ConversationRestartRequest, + ConversationRestartRequestTypedDict, +) +from mistralai.client.types import BaseModel +from mistralai.client.utils import FieldMetadata, PathParamMetadata, RequestMetadata +from typing_extensions import Annotated, TypedDict + + +class AgentsAPIV1ConversationsRestartRequestTypedDict(TypedDict): + conversation_id: str + r"""ID of the original conversation which is being restarted.""" + conversation_restart_request: ConversationRestartRequestTypedDict + + +class AgentsAPIV1ConversationsRestartRequest(BaseModel): + conversation_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] + r"""ID of the original conversation which is being restarted.""" + + conversation_restart_request: Annotated[ + ConversationRestartRequest, + FieldMetadata(request=RequestMetadata(media_type="application/json")), + ] diff --git a/src/mistralai/client/models/agentscompletionrequest.py b/src/mistralai/client/models/agentscompletionrequest.py new file mode 100644 index 00000000..22368e44 --- /dev/null +++ b/src/mistralai/client/models/agentscompletionrequest.py @@ -0,0 +1,198 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .assistantmessage import AssistantMessage, AssistantMessageTypedDict +from .mistralpromptmode import MistralPromptMode +from .prediction import Prediction, PredictionTypedDict +from .responseformat import ResponseFormat, ResponseFormatTypedDict +from .systemmessage import SystemMessage, SystemMessageTypedDict +from .tool import Tool, ToolTypedDict +from .toolchoice import ToolChoice, ToolChoiceTypedDict +from .toolchoiceenum import ToolChoiceEnum +from .toolmessage import ToolMessage, ToolMessageTypedDict +from .usermessage import UserMessage, UserMessageTypedDict +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from mistralai.client.utils import get_discriminator +from pydantic import Discriminator, Tag, model_serializer +from typing import Any, Dict, List, Optional, Union +from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict + + +AgentsCompletionRequestStopTypedDict = TypeAliasType( + "AgentsCompletionRequestStopTypedDict", Union[str, List[str]] +) +r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + + +AgentsCompletionRequestStop = TypeAliasType( + "AgentsCompletionRequestStop", Union[str, List[str]] +) +r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + + +AgentsCompletionRequestMessagesTypedDict = TypeAliasType( + "AgentsCompletionRequestMessagesTypedDict", + Union[ + SystemMessageTypedDict, + UserMessageTypedDict, + AssistantMessageTypedDict, + ToolMessageTypedDict, + ], +) + + +AgentsCompletionRequestMessages = Annotated[ + Union[ + Annotated[AssistantMessage, Tag("assistant")], + Annotated[SystemMessage, Tag("system")], + Annotated[ToolMessage, Tag("tool")], + Annotated[UserMessage, Tag("user")], + ], + Discriminator(lambda m: get_discriminator(m, "role", "role")), +] + + +AgentsCompletionRequestToolChoiceTypedDict = TypeAliasType( + "AgentsCompletionRequestToolChoiceTypedDict", + Union[ToolChoiceTypedDict, ToolChoiceEnum], +) + + +AgentsCompletionRequestToolChoice = TypeAliasType( + "AgentsCompletionRequestToolChoice", Union[ToolChoice, ToolChoiceEnum] +) + + +class AgentsCompletionRequestTypedDict(TypedDict): + messages: List[AgentsCompletionRequestMessagesTypedDict] + r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content.""" + agent_id: str + r"""The ID of the agent to use for this completion.""" + max_tokens: NotRequired[Nullable[int]] + r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" + stream: NotRequired[bool] + r"""Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON.""" + stop: NotRequired[AgentsCompletionRequestStopTypedDict] + r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + random_seed: NotRequired[Nullable[int]] + r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" + metadata: NotRequired[Nullable[Dict[str, Any]]] + response_format: NotRequired[ResponseFormatTypedDict] + r"""Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide.""" + tools: NotRequired[Nullable[List[ToolTypedDict]]] + tool_choice: NotRequired[AgentsCompletionRequestToolChoiceTypedDict] + presence_penalty: NotRequired[float] + r"""The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative.""" + frequency_penalty: NotRequired[float] + r"""The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.""" + n: NotRequired[Nullable[int]] + r"""Number of completions to return for each request, input tokens are only billed once.""" + prediction: NotRequired[PredictionTypedDict] + r"""Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content.""" + parallel_tool_calls: NotRequired[bool] + prompt_mode: NotRequired[Nullable[MistralPromptMode]] + r"""Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used.""" + + +class AgentsCompletionRequest(BaseModel): + messages: List[AgentsCompletionRequestMessages] + r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content.""" + + agent_id: str + r"""The ID of the agent to use for this completion.""" + + max_tokens: OptionalNullable[int] = UNSET + r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" + + stream: Optional[bool] = False + r"""Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON.""" + + stop: Optional[AgentsCompletionRequestStop] = None + r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + + random_seed: OptionalNullable[int] = UNSET + r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" + + metadata: OptionalNullable[Dict[str, Any]] = UNSET + + response_format: Optional[ResponseFormat] = None + r"""Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide.""" + + tools: OptionalNullable[List[Tool]] = UNSET + + tool_choice: Optional[AgentsCompletionRequestToolChoice] = None + + presence_penalty: Optional[float] = None + r"""The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative.""" + + frequency_penalty: Optional[float] = None + r"""The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.""" + + n: OptionalNullable[int] = UNSET + r"""Number of completions to return for each request, input tokens are only billed once.""" + + prediction: Optional[Prediction] = None + r"""Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content.""" + + parallel_tool_calls: Optional[bool] = None + + prompt_mode: OptionalNullable[MistralPromptMode] = UNSET + r"""Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used.""" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = [ + "max_tokens", + "stream", + "stop", + "random_seed", + "metadata", + "response_format", + "tools", + "tool_choice", + "presence_penalty", + "frequency_penalty", + "n", + "prediction", + "parallel_tool_calls", + "prompt_mode", + ] + nullable_fields = [ + "max_tokens", + "random_seed", + "metadata", + "tools", + "n", + "prompt_mode", + ] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/agentscompletionstreamrequest.py b/src/mistralai/client/models/agentscompletionstreamrequest.py new file mode 100644 index 00000000..37d46c79 --- /dev/null +++ b/src/mistralai/client/models/agentscompletionstreamrequest.py @@ -0,0 +1,196 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .assistantmessage import AssistantMessage, AssistantMessageTypedDict +from .mistralpromptmode import MistralPromptMode +from .prediction import Prediction, PredictionTypedDict +from .responseformat import ResponseFormat, ResponseFormatTypedDict +from .systemmessage import SystemMessage, SystemMessageTypedDict +from .tool import Tool, ToolTypedDict +from .toolchoice import ToolChoice, ToolChoiceTypedDict +from .toolchoiceenum import ToolChoiceEnum +from .toolmessage import ToolMessage, ToolMessageTypedDict +from .usermessage import UserMessage, UserMessageTypedDict +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from mistralai.client.utils import get_discriminator +from pydantic import Discriminator, Tag, model_serializer +from typing import Any, Dict, List, Optional, Union +from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict + + +AgentsCompletionStreamRequestStopTypedDict = TypeAliasType( + "AgentsCompletionStreamRequestStopTypedDict", Union[str, List[str]] +) +r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + + +AgentsCompletionStreamRequestStop = TypeAliasType( + "AgentsCompletionStreamRequestStop", Union[str, List[str]] +) +r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + + +AgentsCompletionStreamRequestMessagesTypedDict = TypeAliasType( + "AgentsCompletionStreamRequestMessagesTypedDict", + Union[ + SystemMessageTypedDict, + UserMessageTypedDict, + AssistantMessageTypedDict, + ToolMessageTypedDict, + ], +) + + +AgentsCompletionStreamRequestMessages = Annotated[ + Union[ + Annotated[AssistantMessage, Tag("assistant")], + Annotated[SystemMessage, Tag("system")], + Annotated[ToolMessage, Tag("tool")], + Annotated[UserMessage, Tag("user")], + ], + Discriminator(lambda m: get_discriminator(m, "role", "role")), +] + + +AgentsCompletionStreamRequestToolChoiceTypedDict = TypeAliasType( + "AgentsCompletionStreamRequestToolChoiceTypedDict", + Union[ToolChoiceTypedDict, ToolChoiceEnum], +) + + +AgentsCompletionStreamRequestToolChoice = TypeAliasType( + "AgentsCompletionStreamRequestToolChoice", Union[ToolChoice, ToolChoiceEnum] +) + + +class AgentsCompletionStreamRequestTypedDict(TypedDict): + messages: List[AgentsCompletionStreamRequestMessagesTypedDict] + r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content.""" + agent_id: str + r"""The ID of the agent to use for this completion.""" + max_tokens: NotRequired[Nullable[int]] + r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" + stream: NotRequired[bool] + stop: NotRequired[AgentsCompletionStreamRequestStopTypedDict] + r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + random_seed: NotRequired[Nullable[int]] + r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" + metadata: NotRequired[Nullable[Dict[str, Any]]] + response_format: NotRequired[ResponseFormatTypedDict] + r"""Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide.""" + tools: NotRequired[Nullable[List[ToolTypedDict]]] + tool_choice: NotRequired[AgentsCompletionStreamRequestToolChoiceTypedDict] + presence_penalty: NotRequired[float] + r"""The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative.""" + frequency_penalty: NotRequired[float] + r"""The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.""" + n: NotRequired[Nullable[int]] + r"""Number of completions to return for each request, input tokens are only billed once.""" + prediction: NotRequired[PredictionTypedDict] + r"""Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content.""" + parallel_tool_calls: NotRequired[bool] + prompt_mode: NotRequired[Nullable[MistralPromptMode]] + r"""Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used.""" + + +class AgentsCompletionStreamRequest(BaseModel): + messages: List[AgentsCompletionStreamRequestMessages] + r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content.""" + + agent_id: str + r"""The ID of the agent to use for this completion.""" + + max_tokens: OptionalNullable[int] = UNSET + r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" + + stream: Optional[bool] = True + + stop: Optional[AgentsCompletionStreamRequestStop] = None + r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + + random_seed: OptionalNullable[int] = UNSET + r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" + + metadata: OptionalNullable[Dict[str, Any]] = UNSET + + response_format: Optional[ResponseFormat] = None + r"""Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide.""" + + tools: OptionalNullable[List[Tool]] = UNSET + + tool_choice: Optional[AgentsCompletionStreamRequestToolChoice] = None + + presence_penalty: Optional[float] = None + r"""The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative.""" + + frequency_penalty: Optional[float] = None + r"""The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.""" + + n: OptionalNullable[int] = UNSET + r"""Number of completions to return for each request, input tokens are only billed once.""" + + prediction: Optional[Prediction] = None + r"""Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content.""" + + parallel_tool_calls: Optional[bool] = None + + prompt_mode: OptionalNullable[MistralPromptMode] = UNSET + r"""Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used.""" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = [ + "max_tokens", + "stream", + "stop", + "random_seed", + "metadata", + "response_format", + "tools", + "tool_choice", + "presence_penalty", + "frequency_penalty", + "n", + "prediction", + "parallel_tool_calls", + "prompt_mode", + ] + nullable_fields = [ + "max_tokens", + "random_seed", + "metadata", + "tools", + "n", + "prompt_mode", + ] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/agentupdaterequest.py b/src/mistralai/client/models/agentupdaterequest.py new file mode 100644 index 00000000..261ac069 --- /dev/null +++ b/src/mistralai/client/models/agentupdaterequest.py @@ -0,0 +1,133 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .codeinterpretertool import CodeInterpreterTool, CodeInterpreterToolTypedDict +from .completionargs import CompletionArgs, CompletionArgsTypedDict +from .documentlibrarytool import DocumentLibraryTool, DocumentLibraryToolTypedDict +from .functiontool import FunctionTool, FunctionToolTypedDict +from .imagegenerationtool import ImageGenerationTool, ImageGenerationToolTypedDict +from .websearchpremiumtool import WebSearchPremiumTool, WebSearchPremiumToolTypedDict +from .websearchtool import WebSearchTool, WebSearchToolTypedDict +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from mistralai.client.utils import get_discriminator +from pydantic import Discriminator, Tag, model_serializer +from typing import Any, Dict, List, Optional, Union +from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict + + +AgentUpdateRequestToolsTypedDict = TypeAliasType( + "AgentUpdateRequestToolsTypedDict", + Union[ + WebSearchToolTypedDict, + WebSearchPremiumToolTypedDict, + CodeInterpreterToolTypedDict, + ImageGenerationToolTypedDict, + FunctionToolTypedDict, + DocumentLibraryToolTypedDict, + ], +) + + +AgentUpdateRequestTools = Annotated[ + Union[ + Annotated[CodeInterpreterTool, Tag("code_interpreter")], + Annotated[DocumentLibraryTool, Tag("document_library")], + Annotated[FunctionTool, Tag("function")], + Annotated[ImageGenerationTool, Tag("image_generation")], + Annotated[WebSearchTool, Tag("web_search")], + Annotated[WebSearchPremiumTool, Tag("web_search_premium")], + ], + Discriminator(lambda m: get_discriminator(m, "type", "type")), +] + + +class AgentUpdateRequestTypedDict(TypedDict): + instructions: NotRequired[Nullable[str]] + r"""Instruction prompt the model will follow during the conversation.""" + tools: NotRequired[List[AgentUpdateRequestToolsTypedDict]] + r"""List of tools which are available to the model during the conversation.""" + completion_args: NotRequired[CompletionArgsTypedDict] + r"""White-listed arguments from the completion API""" + model: NotRequired[Nullable[str]] + name: NotRequired[Nullable[str]] + description: NotRequired[Nullable[str]] + handoffs: NotRequired[Nullable[List[str]]] + deployment_chat: NotRequired[Nullable[bool]] + metadata: NotRequired[Nullable[Dict[str, Any]]] + + +class AgentUpdateRequest(BaseModel): + instructions: OptionalNullable[str] = UNSET + r"""Instruction prompt the model will follow during the conversation.""" + + tools: Optional[List[AgentUpdateRequestTools]] = None + r"""List of tools which are available to the model during the conversation.""" + + completion_args: Optional[CompletionArgs] = None + r"""White-listed arguments from the completion API""" + + model: OptionalNullable[str] = UNSET + + name: OptionalNullable[str] = UNSET + + description: OptionalNullable[str] = UNSET + + handoffs: OptionalNullable[List[str]] = UNSET + + deployment_chat: OptionalNullable[bool] = UNSET + + metadata: OptionalNullable[Dict[str, Any]] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = [ + "instructions", + "tools", + "completion_args", + "model", + "name", + "description", + "handoffs", + "deployment_chat", + "metadata", + ] + nullable_fields = [ + "instructions", + "model", + "name", + "description", + "handoffs", + "deployment_chat", + "metadata", + ] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/apiendpoint.py b/src/mistralai/client/models/apiendpoint.py new file mode 100644 index 00000000..a6072d56 --- /dev/null +++ b/src/mistralai/client/models/apiendpoint.py @@ -0,0 +1,22 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.client.types import UnrecognizedStr +from typing import Literal, Union + + +APIEndpoint = Union[ + Literal[ + "/v1/chat/completions", + "/v1/embeddings", + "/v1/fim/completions", + "/v1/moderations", + "/v1/chat/moderations", + "/v1/ocr", + "/v1/classifications", + "/v1/chat/classifications", + "/v1/conversations", + "/v1/audio/transcriptions", + ], + UnrecognizedStr, +] diff --git a/src/mistralai/client/models/archiveftmodelout.py b/src/mistralai/client/models/archiveftmodelout.py new file mode 100644 index 00000000..6108c7e1 --- /dev/null +++ b/src/mistralai/client/models/archiveftmodelout.py @@ -0,0 +1,23 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.client.types import BaseModel +from typing import Literal, Optional +from typing_extensions import NotRequired, TypedDict + + +ArchiveFTModelOutObject = Literal["model",] + + +class ArchiveFTModelOutTypedDict(TypedDict): + id: str + object: NotRequired[ArchiveFTModelOutObject] + archived: NotRequired[bool] + + +class ArchiveFTModelOut(BaseModel): + id: str + + object: Optional[ArchiveFTModelOutObject] = "model" + + archived: Optional[bool] = True diff --git a/src/mistralai/client/models/assistantmessage.py b/src/mistralai/client/models/assistantmessage.py new file mode 100644 index 00000000..3ba14ce7 --- /dev/null +++ b/src/mistralai/client/models/assistantmessage.py @@ -0,0 +1,77 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .contentchunk import ContentChunk, ContentChunkTypedDict +from .toolcall import ToolCall, ToolCallTypedDict +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing import List, Literal, Optional, Union +from typing_extensions import NotRequired, TypeAliasType, TypedDict + + +AssistantMessageContentTypedDict = TypeAliasType( + "AssistantMessageContentTypedDict", Union[str, List[ContentChunkTypedDict]] +) + + +AssistantMessageContent = TypeAliasType( + "AssistantMessageContent", Union[str, List[ContentChunk]] +) + + +AssistantMessageRole = Literal["assistant",] + + +class AssistantMessageTypedDict(TypedDict): + content: NotRequired[Nullable[AssistantMessageContentTypedDict]] + tool_calls: NotRequired[Nullable[List[ToolCallTypedDict]]] + prefix: NotRequired[bool] + r"""Set this to `true` when adding an assistant message as prefix to condition the model response. The role of the prefix message is to force the model to start its answer by the content of the message.""" + role: NotRequired[AssistantMessageRole] + + +class AssistantMessage(BaseModel): + content: OptionalNullable[AssistantMessageContent] = UNSET + + tool_calls: OptionalNullable[List[ToolCall]] = UNSET + + prefix: Optional[bool] = False + r"""Set this to `true` when adding an assistant message as prefix to condition the model response. The role of the prefix message is to force the model to start its answer by the content of the message.""" + + role: Optional[AssistantMessageRole] = "assistant" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["content", "tool_calls", "prefix", "role"] + nullable_fields = ["content", "tool_calls"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/audiochunk.py b/src/mistralai/client/models/audiochunk.py new file mode 100644 index 00000000..80d836f2 --- /dev/null +++ b/src/mistralai/client/models/audiochunk.py @@ -0,0 +1,20 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.client.types import BaseModel +from typing import Literal, Optional +from typing_extensions import NotRequired, TypedDict + + +AudioChunkType = Literal["input_audio",] + + +class AudioChunkTypedDict(TypedDict): + input_audio: str + type: NotRequired[AudioChunkType] + + +class AudioChunk(BaseModel): + input_audio: str + + type: Optional[AudioChunkType] = "input_audio" diff --git a/src/mistralai/client/models/audioencoding.py b/src/mistralai/client/models/audioencoding.py new file mode 100644 index 00000000..557f53ed --- /dev/null +++ b/src/mistralai/client/models/audioencoding.py @@ -0,0 +1,18 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.client.types import UnrecognizedStr +from typing import Literal, Union + + +AudioEncoding = Union[ + Literal[ + "pcm_s16le", + "pcm_s32le", + "pcm_f16le", + "pcm_f32le", + "pcm_mulaw", + "pcm_alaw", + ], + UnrecognizedStr, +] diff --git a/src/mistralai/client/models/audioformat.py b/src/mistralai/client/models/audioformat.py new file mode 100644 index 00000000..7ea10b3a --- /dev/null +++ b/src/mistralai/client/models/audioformat.py @@ -0,0 +1,17 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .audioencoding import AudioEncoding +from mistralai.client.types import BaseModel +from typing_extensions import TypedDict + + +class AudioFormatTypedDict(TypedDict): + encoding: AudioEncoding + sample_rate: int + + +class AudioFormat(BaseModel): + encoding: AudioEncoding + + sample_rate: int diff --git a/src/mistralai/client/models/audiotranscriptionrequest.py b/src/mistralai/client/models/audiotranscriptionrequest.py new file mode 100644 index 00000000..78a37978 --- /dev/null +++ b/src/mistralai/client/models/audiotranscriptionrequest.py @@ -0,0 +1,113 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .file import File, FileTypedDict +from .timestampgranularity import TimestampGranularity +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from mistralai.client.utils import FieldMetadata, MultipartFormMetadata, validate_const +import pydantic +from pydantic import model_serializer +from pydantic.functional_validators import AfterValidator +from typing import List, Literal, Optional +from typing_extensions import Annotated, NotRequired, TypedDict + + +class AudioTranscriptionRequestTypedDict(TypedDict): + model: str + r"""ID of the model to be used.""" + file: NotRequired[FileTypedDict] + file_url: NotRequired[Nullable[str]] + r"""Url of a file to be transcribed""" + file_id: NotRequired[Nullable[str]] + r"""ID of a file uploaded to /v1/files""" + language: NotRequired[Nullable[str]] + r"""Language of the audio, e.g. 'en'. Providing the language can boost accuracy.""" + temperature: NotRequired[Nullable[float]] + stream: Literal[False] + diarize: NotRequired[bool] + context_bias: NotRequired[List[str]] + timestamp_granularities: NotRequired[List[TimestampGranularity]] + r"""Granularities of timestamps to include in the response.""" + + +class AudioTranscriptionRequest(BaseModel): + model: Annotated[str, FieldMetadata(multipart=True)] + r"""ID of the model to be used.""" + + file: Annotated[ + Optional[File], FieldMetadata(multipart=MultipartFormMetadata(file=True)) + ] = None + + file_url: Annotated[OptionalNullable[str], FieldMetadata(multipart=True)] = UNSET + r"""Url of a file to be transcribed""" + + file_id: Annotated[OptionalNullable[str], FieldMetadata(multipart=True)] = UNSET + r"""ID of a file uploaded to /v1/files""" + + language: Annotated[OptionalNullable[str], FieldMetadata(multipart=True)] = UNSET + r"""Language of the audio, e.g. 'en'. Providing the language can boost accuracy.""" + + temperature: Annotated[OptionalNullable[float], FieldMetadata(multipart=True)] = ( + UNSET + ) + + STREAM: Annotated[ + Annotated[Optional[Literal[False]], AfterValidator(validate_const(False))], + pydantic.Field(alias="stream"), + FieldMetadata(multipart=True), + ] = False + + diarize: Annotated[Optional[bool], FieldMetadata(multipart=True)] = False + + context_bias: Annotated[Optional[List[str]], FieldMetadata(multipart=True)] = None + + timestamp_granularities: Annotated[ + Optional[List[TimestampGranularity]], FieldMetadata(multipart=True) + ] = None + r"""Granularities of timestamps to include in the response.""" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = [ + "file", + "file_url", + "file_id", + "language", + "temperature", + "stream", + "diarize", + "context_bias", + "timestamp_granularities", + ] + nullable_fields = ["file_url", "file_id", "language", "temperature"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/audiotranscriptionrequeststream.py b/src/mistralai/client/models/audiotranscriptionrequeststream.py new file mode 100644 index 00000000..35064361 --- /dev/null +++ b/src/mistralai/client/models/audiotranscriptionrequeststream.py @@ -0,0 +1,111 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .file import File, FileTypedDict +from .timestampgranularity import TimestampGranularity +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from mistralai.client.utils import FieldMetadata, MultipartFormMetadata, validate_const +import pydantic +from pydantic import model_serializer +from pydantic.functional_validators import AfterValidator +from typing import List, Literal, Optional +from typing_extensions import Annotated, NotRequired, TypedDict + + +class AudioTranscriptionRequestStreamTypedDict(TypedDict): + model: str + file: NotRequired[FileTypedDict] + file_url: NotRequired[Nullable[str]] + r"""Url of a file to be transcribed""" + file_id: NotRequired[Nullable[str]] + r"""ID of a file uploaded to /v1/files""" + language: NotRequired[Nullable[str]] + r"""Language of the audio, e.g. 'en'. Providing the language can boost accuracy.""" + temperature: NotRequired[Nullable[float]] + stream: Literal[True] + diarize: NotRequired[bool] + context_bias: NotRequired[List[str]] + timestamp_granularities: NotRequired[List[TimestampGranularity]] + r"""Granularities of timestamps to include in the response.""" + + +class AudioTranscriptionRequestStream(BaseModel): + model: Annotated[str, FieldMetadata(multipart=True)] + + file: Annotated[ + Optional[File], FieldMetadata(multipart=MultipartFormMetadata(file=True)) + ] = None + + file_url: Annotated[OptionalNullable[str], FieldMetadata(multipart=True)] = UNSET + r"""Url of a file to be transcribed""" + + file_id: Annotated[OptionalNullable[str], FieldMetadata(multipart=True)] = UNSET + r"""ID of a file uploaded to /v1/files""" + + language: Annotated[OptionalNullable[str], FieldMetadata(multipart=True)] = UNSET + r"""Language of the audio, e.g. 'en'. Providing the language can boost accuracy.""" + + temperature: Annotated[OptionalNullable[float], FieldMetadata(multipart=True)] = ( + UNSET + ) + + STREAM: Annotated[ + Annotated[Optional[Literal[True]], AfterValidator(validate_const(True))], + pydantic.Field(alias="stream"), + FieldMetadata(multipart=True), + ] = True + + diarize: Annotated[Optional[bool], FieldMetadata(multipart=True)] = False + + context_bias: Annotated[Optional[List[str]], FieldMetadata(multipart=True)] = None + + timestamp_granularities: Annotated[ + Optional[List[TimestampGranularity]], FieldMetadata(multipart=True) + ] = None + r"""Granularities of timestamps to include in the response.""" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = [ + "file", + "file_url", + "file_id", + "language", + "temperature", + "stream", + "diarize", + "context_bias", + "timestamp_granularities", + ] + nullable_fields = ["file_url", "file_id", "language", "temperature"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/basemodelcard.py b/src/mistralai/client/models/basemodelcard.py new file mode 100644 index 00000000..8ce7f139 --- /dev/null +++ b/src/mistralai/client/models/basemodelcard.py @@ -0,0 +1,116 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .modelcapabilities import ModelCapabilities, ModelCapabilitiesTypedDict +from datetime import datetime +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from mistralai.client.utils import validate_const +import pydantic +from pydantic import model_serializer +from pydantic.functional_validators import AfterValidator +from typing import List, Literal, Optional +from typing_extensions import Annotated, NotRequired, TypedDict + + +BaseModelCardType = Literal["base",] + + +class BaseModelCardTypedDict(TypedDict): + id: str + capabilities: ModelCapabilitiesTypedDict + object: NotRequired[str] + created: NotRequired[int] + owned_by: NotRequired[str] + name: NotRequired[Nullable[str]] + description: NotRequired[Nullable[str]] + max_context_length: NotRequired[int] + aliases: NotRequired[List[str]] + deprecation: NotRequired[Nullable[datetime]] + deprecation_replacement_model: NotRequired[Nullable[str]] + default_model_temperature: NotRequired[Nullable[float]] + type: BaseModelCardType + + +class BaseModelCard(BaseModel): + id: str + + capabilities: ModelCapabilities + + object: Optional[str] = "model" + + created: Optional[int] = None + + owned_by: Optional[str] = "mistralai" + + name: OptionalNullable[str] = UNSET + + description: OptionalNullable[str] = UNSET + + max_context_length: Optional[int] = 32768 + + aliases: Optional[List[str]] = None + + deprecation: OptionalNullable[datetime] = UNSET + + deprecation_replacement_model: OptionalNullable[str] = UNSET + + default_model_temperature: OptionalNullable[float] = UNSET + + TYPE: Annotated[ + Annotated[Optional[BaseModelCardType], AfterValidator(validate_const("base"))], + pydantic.Field(alias="type"), + ] = "base" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = [ + "object", + "created", + "owned_by", + "name", + "description", + "max_context_length", + "aliases", + "deprecation", + "deprecation_replacement_model", + "default_model_temperature", + "type", + ] + nullable_fields = [ + "name", + "description", + "deprecation", + "deprecation_replacement_model", + "default_model_temperature", + ] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/batcherror.py b/src/mistralai/client/models/batcherror.py new file mode 100644 index 00000000..a9c8362b --- /dev/null +++ b/src/mistralai/client/models/batcherror.py @@ -0,0 +1,17 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.client.types import BaseModel +from typing import Optional +from typing_extensions import NotRequired, TypedDict + + +class BatchErrorTypedDict(TypedDict): + message: str + count: NotRequired[int] + + +class BatchError(BaseModel): + message: str + + count: Optional[int] = 1 diff --git a/src/mistralai/client/models/batchjobin.py b/src/mistralai/client/models/batchjobin.py new file mode 100644 index 00000000..39cf70b5 --- /dev/null +++ b/src/mistralai/client/models/batchjobin.py @@ -0,0 +1,88 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .apiendpoint import APIEndpoint +from .batchrequest import BatchRequest, BatchRequestTypedDict +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing import Dict, List, Optional +from typing_extensions import NotRequired, TypedDict + + +class BatchJobInTypedDict(TypedDict): + endpoint: APIEndpoint + input_files: NotRequired[Nullable[List[str]]] + r"""The list of input files to be used for batch inference, these files should be `jsonl` files, containing the input data corresponding to the bory request for the batch inference in a \"body\" field. An example of such file is the following: ```json {\"custom_id\": \"0\", \"body\": {\"max_tokens\": 100, \"messages\": [{\"role\": \"user\", \"content\": \"What is the best French cheese?\"}]}} {\"custom_id\": \"1\", \"body\": {\"max_tokens\": 100, \"messages\": [{\"role\": \"user\", \"content\": \"What is the best French wine?\"}]}} ```""" + requests: NotRequired[Nullable[List[BatchRequestTypedDict]]] + model: NotRequired[Nullable[str]] + r"""The model to be used for batch inference.""" + agent_id: NotRequired[Nullable[str]] + r"""In case you want to use a specific agent from the **deprecated** agents api for batch inference, you can specify the agent ID here.""" + metadata: NotRequired[Nullable[Dict[str, str]]] + r"""The metadata of your choice to be associated with the batch inference job.""" + timeout_hours: NotRequired[int] + r"""The timeout in hours for the batch inference job.""" + + +class BatchJobIn(BaseModel): + endpoint: APIEndpoint + + input_files: OptionalNullable[List[str]] = UNSET + r"""The list of input files to be used for batch inference, these files should be `jsonl` files, containing the input data corresponding to the bory request for the batch inference in a \"body\" field. An example of such file is the following: ```json {\"custom_id\": \"0\", \"body\": {\"max_tokens\": 100, \"messages\": [{\"role\": \"user\", \"content\": \"What is the best French cheese?\"}]}} {\"custom_id\": \"1\", \"body\": {\"max_tokens\": 100, \"messages\": [{\"role\": \"user\", \"content\": \"What is the best French wine?\"}]}} ```""" + + requests: OptionalNullable[List[BatchRequest]] = UNSET + + model: OptionalNullable[str] = UNSET + r"""The model to be used for batch inference.""" + + agent_id: OptionalNullable[str] = UNSET + r"""In case you want to use a specific agent from the **deprecated** agents api for batch inference, you can specify the agent ID here.""" + + metadata: OptionalNullable[Dict[str, str]] = UNSET + r"""The metadata of your choice to be associated with the batch inference job.""" + + timeout_hours: Optional[int] = 24 + r"""The timeout in hours for the batch inference job.""" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = [ + "input_files", + "requests", + "model", + "agent_id", + "metadata", + "timeout_hours", + ] + nullable_fields = ["input_files", "requests", "model", "agent_id", "metadata"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/batchjobout.py b/src/mistralai/client/models/batchjobout.py new file mode 100644 index 00000000..008d43b4 --- /dev/null +++ b/src/mistralai/client/models/batchjobout.py @@ -0,0 +1,129 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .batcherror import BatchError, BatchErrorTypedDict +from .batchjobstatus import BatchJobStatus +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing import Any, Dict, List, Literal, Optional +from typing_extensions import NotRequired, TypedDict + + +BatchJobOutObject = Literal["batch",] + + +class BatchJobOutTypedDict(TypedDict): + id: str + input_files: List[str] + endpoint: str + errors: List[BatchErrorTypedDict] + status: BatchJobStatus + created_at: int + total_requests: int + completed_requests: int + succeeded_requests: int + failed_requests: int + object: NotRequired[BatchJobOutObject] + metadata: NotRequired[Nullable[Dict[str, Any]]] + model: NotRequired[Nullable[str]] + agent_id: NotRequired[Nullable[str]] + output_file: NotRequired[Nullable[str]] + error_file: NotRequired[Nullable[str]] + outputs: NotRequired[Nullable[List[Dict[str, Any]]]] + started_at: NotRequired[Nullable[int]] + completed_at: NotRequired[Nullable[int]] + + +class BatchJobOut(BaseModel): + id: str + + input_files: List[str] + + endpoint: str + + errors: List[BatchError] + + status: BatchJobStatus + + created_at: int + + total_requests: int + + completed_requests: int + + succeeded_requests: int + + failed_requests: int + + object: Optional[BatchJobOutObject] = "batch" + + metadata: OptionalNullable[Dict[str, Any]] = UNSET + + model: OptionalNullable[str] = UNSET + + agent_id: OptionalNullable[str] = UNSET + + output_file: OptionalNullable[str] = UNSET + + error_file: OptionalNullable[str] = UNSET + + outputs: OptionalNullable[List[Dict[str, Any]]] = UNSET + + started_at: OptionalNullable[int] = UNSET + + completed_at: OptionalNullable[int] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = [ + "object", + "metadata", + "model", + "agent_id", + "output_file", + "error_file", + "outputs", + "started_at", + "completed_at", + ] + nullable_fields = [ + "metadata", + "model", + "agent_id", + "output_file", + "error_file", + "outputs", + "started_at", + "completed_at", + ] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/batchjobsout.py b/src/mistralai/client/models/batchjobsout.py new file mode 100644 index 00000000..2654dac0 --- /dev/null +++ b/src/mistralai/client/models/batchjobsout.py @@ -0,0 +1,24 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .batchjobout import BatchJobOut, BatchJobOutTypedDict +from mistralai.client.types import BaseModel +from typing import List, Literal, Optional +from typing_extensions import NotRequired, TypedDict + + +BatchJobsOutObject = Literal["list",] + + +class BatchJobsOutTypedDict(TypedDict): + total: int + data: NotRequired[List[BatchJobOutTypedDict]] + object: NotRequired[BatchJobsOutObject] + + +class BatchJobsOut(BaseModel): + total: int + + data: Optional[List[BatchJobOut]] = None + + object: Optional[BatchJobsOutObject] = "list" diff --git a/src/mistralai/client/models/batchjobstatus.py b/src/mistralai/client/models/batchjobstatus.py new file mode 100644 index 00000000..4b28059b --- /dev/null +++ b/src/mistralai/client/models/batchjobstatus.py @@ -0,0 +1,15 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from typing import Literal + + +BatchJobStatus = Literal[ + "QUEUED", + "RUNNING", + "SUCCESS", + "FAILED", + "TIMEOUT_EXCEEDED", + "CANCELLATION_REQUESTED", + "CANCELLED", +] diff --git a/src/mistralai/client/models/batchrequest.py b/src/mistralai/client/models/batchrequest.py new file mode 100644 index 00000000..24f50a9a --- /dev/null +++ b/src/mistralai/client/models/batchrequest.py @@ -0,0 +1,54 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing import Any, Dict +from typing_extensions import NotRequired, TypedDict + + +class BatchRequestTypedDict(TypedDict): + body: Dict[str, Any] + custom_id: NotRequired[Nullable[str]] + + +class BatchRequest(BaseModel): + body: Dict[str, Any] + + custom_id: OptionalNullable[str] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["custom_id"] + nullable_fields = ["custom_id"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/builtinconnectors.py b/src/mistralai/client/models/builtinconnectors.py new file mode 100644 index 00000000..6a3b2476 --- /dev/null +++ b/src/mistralai/client/models/builtinconnectors.py @@ -0,0 +1,13 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from typing import Literal + + +BuiltInConnectors = Literal[ + "web_search", + "web_search_premium", + "code_interpreter", + "image_generation", + "document_library", +] diff --git a/src/mistralai/client/models/chatclassificationrequest.py b/src/mistralai/client/models/chatclassificationrequest.py new file mode 100644 index 00000000..45081022 --- /dev/null +++ b/src/mistralai/client/models/chatclassificationrequest.py @@ -0,0 +1,20 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .inputs import Inputs, InputsTypedDict +from mistralai.client.types import BaseModel +import pydantic +from typing_extensions import Annotated, TypedDict + + +class ChatClassificationRequestTypedDict(TypedDict): + model: str + inputs: InputsTypedDict + r"""Chat to classify""" + + +class ChatClassificationRequest(BaseModel): + model: str + + inputs: Annotated[Inputs, pydantic.Field(alias="input")] + r"""Chat to classify""" diff --git a/src/mistralai/client/models/chatcompletionchoice.py b/src/mistralai/client/models/chatcompletionchoice.py new file mode 100644 index 00000000..5d888cfd --- /dev/null +++ b/src/mistralai/client/models/chatcompletionchoice.py @@ -0,0 +1,33 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .assistantmessage import AssistantMessage, AssistantMessageTypedDict +from mistralai.client.types import BaseModel, UnrecognizedStr +from typing import Literal, Union +from typing_extensions import TypedDict + + +FinishReason = Union[ + Literal[ + "stop", + "length", + "model_length", + "error", + "tool_calls", + ], + UnrecognizedStr, +] + + +class ChatCompletionChoiceTypedDict(TypedDict): + index: int + message: AssistantMessageTypedDict + finish_reason: FinishReason + + +class ChatCompletionChoice(BaseModel): + index: int + + message: AssistantMessage + + finish_reason: FinishReason diff --git a/src/mistralai/client/models/chatcompletionrequest.py b/src/mistralai/client/models/chatcompletionrequest.py new file mode 100644 index 00000000..30fce28d --- /dev/null +++ b/src/mistralai/client/models/chatcompletionrequest.py @@ -0,0 +1,221 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .assistantmessage import AssistantMessage, AssistantMessageTypedDict +from .mistralpromptmode import MistralPromptMode +from .prediction import Prediction, PredictionTypedDict +from .responseformat import ResponseFormat, ResponseFormatTypedDict +from .systemmessage import SystemMessage, SystemMessageTypedDict +from .tool import Tool, ToolTypedDict +from .toolchoice import ToolChoice, ToolChoiceTypedDict +from .toolchoiceenum import ToolChoiceEnum +from .toolmessage import ToolMessage, ToolMessageTypedDict +from .usermessage import UserMessage, UserMessageTypedDict +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from mistralai.client.utils import get_discriminator +from pydantic import Discriminator, Tag, model_serializer +from typing import Any, Dict, List, Optional, Union +from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict + + +StopTypedDict = TypeAliasType("StopTypedDict", Union[str, List[str]]) +r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + + +Stop = TypeAliasType("Stop", Union[str, List[str]]) +r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + + +MessagesTypedDict = TypeAliasType( + "MessagesTypedDict", + Union[ + SystemMessageTypedDict, + UserMessageTypedDict, + AssistantMessageTypedDict, + ToolMessageTypedDict, + ], +) + + +Messages = Annotated[ + Union[ + Annotated[AssistantMessage, Tag("assistant")], + Annotated[SystemMessage, Tag("system")], + Annotated[ToolMessage, Tag("tool")], + Annotated[UserMessage, Tag("user")], + ], + Discriminator(lambda m: get_discriminator(m, "role", "role")), +] + + +ChatCompletionRequestToolChoiceTypedDict = TypeAliasType( + "ChatCompletionRequestToolChoiceTypedDict", + Union[ToolChoiceTypedDict, ToolChoiceEnum], +) +r"""Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool.""" + + +ChatCompletionRequestToolChoice = TypeAliasType( + "ChatCompletionRequestToolChoice", Union[ToolChoice, ToolChoiceEnum] +) +r"""Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool.""" + + +class ChatCompletionRequestTypedDict(TypedDict): + model: str + r"""ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions.""" + messages: List[MessagesTypedDict] + r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content.""" + temperature: NotRequired[Nullable[float]] + r"""What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value.""" + top_p: NotRequired[float] + r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.""" + max_tokens: NotRequired[Nullable[int]] + r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" + stream: NotRequired[bool] + r"""Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON.""" + stop: NotRequired[StopTypedDict] + r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + random_seed: NotRequired[Nullable[int]] + r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" + metadata: NotRequired[Nullable[Dict[str, Any]]] + response_format: NotRequired[ResponseFormatTypedDict] + r"""Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide.""" + tools: NotRequired[Nullable[List[ToolTypedDict]]] + r"""A list of tools the model may call. Use this to provide a list of functions the model may generate JSON inputs for.""" + tool_choice: NotRequired[ChatCompletionRequestToolChoiceTypedDict] + r"""Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool.""" + presence_penalty: NotRequired[float] + r"""The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative.""" + frequency_penalty: NotRequired[float] + r"""The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.""" + n: NotRequired[Nullable[int]] + r"""Number of completions to return for each request, input tokens are only billed once.""" + prediction: NotRequired[PredictionTypedDict] + r"""Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content.""" + parallel_tool_calls: NotRequired[bool] + r"""Whether to enable parallel function calling during tool use, when enabled the model can call multiple tools in parallel.""" + prompt_mode: NotRequired[Nullable[MistralPromptMode]] + r"""Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used.""" + safe_prompt: NotRequired[bool] + r"""Whether to inject a safety prompt before all conversations.""" + + +class ChatCompletionRequest(BaseModel): + model: str + r"""ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions.""" + + messages: List[Messages] + r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content.""" + + temperature: OptionalNullable[float] = UNSET + r"""What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value.""" + + top_p: Optional[float] = None + r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.""" + + max_tokens: OptionalNullable[int] = UNSET + r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" + + stream: Optional[bool] = False + r"""Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON.""" + + stop: Optional[Stop] = None + r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + + random_seed: OptionalNullable[int] = UNSET + r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" + + metadata: OptionalNullable[Dict[str, Any]] = UNSET + + response_format: Optional[ResponseFormat] = None + r"""Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide.""" + + tools: OptionalNullable[List[Tool]] = UNSET + r"""A list of tools the model may call. Use this to provide a list of functions the model may generate JSON inputs for.""" + + tool_choice: Optional[ChatCompletionRequestToolChoice] = None + r"""Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool.""" + + presence_penalty: Optional[float] = None + r"""The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative.""" + + frequency_penalty: Optional[float] = None + r"""The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.""" + + n: OptionalNullable[int] = UNSET + r"""Number of completions to return for each request, input tokens are only billed once.""" + + prediction: Optional[Prediction] = None + r"""Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content.""" + + parallel_tool_calls: Optional[bool] = None + r"""Whether to enable parallel function calling during tool use, when enabled the model can call multiple tools in parallel.""" + + prompt_mode: OptionalNullable[MistralPromptMode] = UNSET + r"""Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used.""" + + safe_prompt: Optional[bool] = None + r"""Whether to inject a safety prompt before all conversations.""" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = [ + "temperature", + "top_p", + "max_tokens", + "stream", + "stop", + "random_seed", + "metadata", + "response_format", + "tools", + "tool_choice", + "presence_penalty", + "frequency_penalty", + "n", + "prediction", + "parallel_tool_calls", + "prompt_mode", + "safe_prompt", + ] + nullable_fields = [ + "temperature", + "max_tokens", + "random_seed", + "metadata", + "tools", + "n", + "prompt_mode", + ] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/chatcompletionresponse.py b/src/mistralai/client/models/chatcompletionresponse.py new file mode 100644 index 00000000..60a1f561 --- /dev/null +++ b/src/mistralai/client/models/chatcompletionresponse.py @@ -0,0 +1,31 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .chatcompletionchoice import ChatCompletionChoice, ChatCompletionChoiceTypedDict +from .usageinfo import UsageInfo, UsageInfoTypedDict +from mistralai.client.types import BaseModel +from typing import List +from typing_extensions import TypedDict + + +class ChatCompletionResponseTypedDict(TypedDict): + id: str + object: str + model: str + usage: UsageInfoTypedDict + created: int + choices: List[ChatCompletionChoiceTypedDict] + + +class ChatCompletionResponse(BaseModel): + id: str + + object: str + + model: str + + usage: UsageInfo + + created: int + + choices: List[ChatCompletionChoice] diff --git a/src/mistralai/client/models/chatcompletionstreamrequest.py b/src/mistralai/client/models/chatcompletionstreamrequest.py new file mode 100644 index 00000000..21dad38b --- /dev/null +++ b/src/mistralai/client/models/chatcompletionstreamrequest.py @@ -0,0 +1,223 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .assistantmessage import AssistantMessage, AssistantMessageTypedDict +from .mistralpromptmode import MistralPromptMode +from .prediction import Prediction, PredictionTypedDict +from .responseformat import ResponseFormat, ResponseFormatTypedDict +from .systemmessage import SystemMessage, SystemMessageTypedDict +from .tool import Tool, ToolTypedDict +from .toolchoice import ToolChoice, ToolChoiceTypedDict +from .toolchoiceenum import ToolChoiceEnum +from .toolmessage import ToolMessage, ToolMessageTypedDict +from .usermessage import UserMessage, UserMessageTypedDict +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from mistralai.client.utils import get_discriminator +from pydantic import Discriminator, Tag, model_serializer +from typing import Any, Dict, List, Optional, Union +from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict + + +ChatCompletionStreamRequestStopTypedDict = TypeAliasType( + "ChatCompletionStreamRequestStopTypedDict", Union[str, List[str]] +) +r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + + +ChatCompletionStreamRequestStop = TypeAliasType( + "ChatCompletionStreamRequestStop", Union[str, List[str]] +) +r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + + +ChatCompletionStreamRequestMessagesTypedDict = TypeAliasType( + "ChatCompletionStreamRequestMessagesTypedDict", + Union[ + SystemMessageTypedDict, + UserMessageTypedDict, + AssistantMessageTypedDict, + ToolMessageTypedDict, + ], +) + + +ChatCompletionStreamRequestMessages = Annotated[ + Union[ + Annotated[AssistantMessage, Tag("assistant")], + Annotated[SystemMessage, Tag("system")], + Annotated[ToolMessage, Tag("tool")], + Annotated[UserMessage, Tag("user")], + ], + Discriminator(lambda m: get_discriminator(m, "role", "role")), +] + + +ChatCompletionStreamRequestToolChoiceTypedDict = TypeAliasType( + "ChatCompletionStreamRequestToolChoiceTypedDict", + Union[ToolChoiceTypedDict, ToolChoiceEnum], +) +r"""Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool.""" + + +ChatCompletionStreamRequestToolChoice = TypeAliasType( + "ChatCompletionStreamRequestToolChoice", Union[ToolChoice, ToolChoiceEnum] +) +r"""Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool.""" + + +class ChatCompletionStreamRequestTypedDict(TypedDict): + model: str + r"""ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions.""" + messages: List[ChatCompletionStreamRequestMessagesTypedDict] + r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content.""" + temperature: NotRequired[Nullable[float]] + r"""What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value.""" + top_p: NotRequired[float] + r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.""" + max_tokens: NotRequired[Nullable[int]] + r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" + stream: NotRequired[bool] + stop: NotRequired[ChatCompletionStreamRequestStopTypedDict] + r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + random_seed: NotRequired[Nullable[int]] + r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" + metadata: NotRequired[Nullable[Dict[str, Any]]] + response_format: NotRequired[ResponseFormatTypedDict] + r"""Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide.""" + tools: NotRequired[Nullable[List[ToolTypedDict]]] + r"""A list of tools the model may call. Use this to provide a list of functions the model may generate JSON inputs for.""" + tool_choice: NotRequired[ChatCompletionStreamRequestToolChoiceTypedDict] + r"""Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool.""" + presence_penalty: NotRequired[float] + r"""The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative.""" + frequency_penalty: NotRequired[float] + r"""The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.""" + n: NotRequired[Nullable[int]] + r"""Number of completions to return for each request, input tokens are only billed once.""" + prediction: NotRequired[PredictionTypedDict] + r"""Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content.""" + parallel_tool_calls: NotRequired[bool] + r"""Whether to enable parallel function calling during tool use, when enabled the model can call multiple tools in parallel.""" + prompt_mode: NotRequired[Nullable[MistralPromptMode]] + r"""Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used.""" + safe_prompt: NotRequired[bool] + r"""Whether to inject a safety prompt before all conversations.""" + + +class ChatCompletionStreamRequest(BaseModel): + model: str + r"""ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions.""" + + messages: List[ChatCompletionStreamRequestMessages] + r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content.""" + + temperature: OptionalNullable[float] = UNSET + r"""What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value.""" + + top_p: Optional[float] = None + r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.""" + + max_tokens: OptionalNullable[int] = UNSET + r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" + + stream: Optional[bool] = True + + stop: Optional[ChatCompletionStreamRequestStop] = None + r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + + random_seed: OptionalNullable[int] = UNSET + r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" + + metadata: OptionalNullable[Dict[str, Any]] = UNSET + + response_format: Optional[ResponseFormat] = None + r"""Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide.""" + + tools: OptionalNullable[List[Tool]] = UNSET + r"""A list of tools the model may call. Use this to provide a list of functions the model may generate JSON inputs for.""" + + tool_choice: Optional[ChatCompletionStreamRequestToolChoice] = None + r"""Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool.""" + + presence_penalty: Optional[float] = None + r"""The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative.""" + + frequency_penalty: Optional[float] = None + r"""The `frequency_penalty` penalizes the repetition of words based on their frequency in the generated text. A higher frequency penalty discourages the model from repeating words that have already appeared frequently in the output, promoting diversity and reducing repetition.""" + + n: OptionalNullable[int] = UNSET + r"""Number of completions to return for each request, input tokens are only billed once.""" + + prediction: Optional[Prediction] = None + r"""Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content.""" + + parallel_tool_calls: Optional[bool] = None + r"""Whether to enable parallel function calling during tool use, when enabled the model can call multiple tools in parallel.""" + + prompt_mode: OptionalNullable[MistralPromptMode] = UNSET + r"""Allows toggling between the reasoning mode and no system prompt. When set to `reasoning` the system prompt for reasoning models will be used.""" + + safe_prompt: Optional[bool] = None + r"""Whether to inject a safety prompt before all conversations.""" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = [ + "temperature", + "top_p", + "max_tokens", + "stream", + "stop", + "random_seed", + "metadata", + "response_format", + "tools", + "tool_choice", + "presence_penalty", + "frequency_penalty", + "n", + "prediction", + "parallel_tool_calls", + "prompt_mode", + "safe_prompt", + ] + nullable_fields = [ + "temperature", + "max_tokens", + "random_seed", + "metadata", + "tools", + "n", + "prompt_mode", + ] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/chatmoderationrequest.py b/src/mistralai/client/models/chatmoderationrequest.py new file mode 100644 index 00000000..631c914d --- /dev/null +++ b/src/mistralai/client/models/chatmoderationrequest.py @@ -0,0 +1,83 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .assistantmessage import AssistantMessage, AssistantMessageTypedDict +from .systemmessage import SystemMessage, SystemMessageTypedDict +from .toolmessage import ToolMessage, ToolMessageTypedDict +from .usermessage import UserMessage, UserMessageTypedDict +from mistralai.client.types import BaseModel +from mistralai.client.utils import get_discriminator +import pydantic +from pydantic import Discriminator, Tag +from typing import List, Union +from typing_extensions import Annotated, TypeAliasType, TypedDict + + +TwoTypedDict = TypeAliasType( + "TwoTypedDict", + Union[ + SystemMessageTypedDict, + UserMessageTypedDict, + AssistantMessageTypedDict, + ToolMessageTypedDict, + ], +) + + +Two = Annotated[ + Union[ + Annotated[AssistantMessage, Tag("assistant")], + Annotated[SystemMessage, Tag("system")], + Annotated[ToolMessage, Tag("tool")], + Annotated[UserMessage, Tag("user")], + ], + Discriminator(lambda m: get_discriminator(m, "role", "role")), +] + + +OneTypedDict = TypeAliasType( + "OneTypedDict", + Union[ + SystemMessageTypedDict, + UserMessageTypedDict, + AssistantMessageTypedDict, + ToolMessageTypedDict, + ], +) + + +One = Annotated[ + Union[ + Annotated[AssistantMessage, Tag("assistant")], + Annotated[SystemMessage, Tag("system")], + Annotated[ToolMessage, Tag("tool")], + Annotated[UserMessage, Tag("user")], + ], + Discriminator(lambda m: get_discriminator(m, "role", "role")), +] + + +ChatModerationRequestInputsTypedDict = TypeAliasType( + "ChatModerationRequestInputsTypedDict", + Union[List[OneTypedDict], List[List[TwoTypedDict]]], +) +r"""Chat to classify""" + + +ChatModerationRequestInputs = TypeAliasType( + "ChatModerationRequestInputs", Union[List[One], List[List[Two]]] +) +r"""Chat to classify""" + + +class ChatModerationRequestTypedDict(TypedDict): + inputs: ChatModerationRequestInputsTypedDict + r"""Chat to classify""" + model: str + + +class ChatModerationRequest(BaseModel): + inputs: Annotated[ChatModerationRequestInputs, pydantic.Field(alias="input")] + r"""Chat to classify""" + + model: str diff --git a/src/mistralai/client/models/checkpointout.py b/src/mistralai/client/models/checkpointout.py new file mode 100644 index 00000000..89189ed1 --- /dev/null +++ b/src/mistralai/client/models/checkpointout.py @@ -0,0 +1,26 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .metricout import MetricOut, MetricOutTypedDict +from mistralai.client.types import BaseModel +from typing_extensions import TypedDict + + +class CheckpointOutTypedDict(TypedDict): + metrics: MetricOutTypedDict + r"""Metrics at the step number during the fine-tuning job. Use these metrics to assess if the training is going smoothly (loss should decrease, token accuracy should increase).""" + step_number: int + r"""The step number that the checkpoint was created at.""" + created_at: int + r"""The UNIX timestamp (in seconds) for when the checkpoint was created.""" + + +class CheckpointOut(BaseModel): + metrics: MetricOut + r"""Metrics at the step number during the fine-tuning job. Use these metrics to assess if the training is going smoothly (loss should decrease, token accuracy should increase).""" + + step_number: int + r"""The step number that the checkpoint was created at.""" + + created_at: int + r"""The UNIX timestamp (in seconds) for when the checkpoint was created.""" diff --git a/src/mistralai/client/models/classificationrequest.py b/src/mistralai/client/models/classificationrequest.py new file mode 100644 index 00000000..c724ff53 --- /dev/null +++ b/src/mistralai/client/models/classificationrequest.py @@ -0,0 +1,74 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +import pydantic +from pydantic import model_serializer +from typing import Any, Dict, List, Union +from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict + + +ClassificationRequestInputsTypedDict = TypeAliasType( + "ClassificationRequestInputsTypedDict", Union[str, List[str]] +) +r"""Text to classify.""" + + +ClassificationRequestInputs = TypeAliasType( + "ClassificationRequestInputs", Union[str, List[str]] +) +r"""Text to classify.""" + + +class ClassificationRequestTypedDict(TypedDict): + model: str + r"""ID of the model to use.""" + inputs: ClassificationRequestInputsTypedDict + r"""Text to classify.""" + metadata: NotRequired[Nullable[Dict[str, Any]]] + + +class ClassificationRequest(BaseModel): + model: str + r"""ID of the model to use.""" + + inputs: Annotated[ClassificationRequestInputs, pydantic.Field(alias="input")] + r"""Text to classify.""" + + metadata: OptionalNullable[Dict[str, Any]] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["metadata"] + nullable_fields = ["metadata"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/classificationresponse.py b/src/mistralai/client/models/classificationresponse.py new file mode 100644 index 00000000..4bc21a58 --- /dev/null +++ b/src/mistralai/client/models/classificationresponse.py @@ -0,0 +1,24 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .classificationtargetresult import ( + ClassificationTargetResult, + ClassificationTargetResultTypedDict, +) +from mistralai.client.types import BaseModel +from typing import Dict, List +from typing_extensions import TypedDict + + +class ClassificationResponseTypedDict(TypedDict): + id: str + model: str + results: List[Dict[str, ClassificationTargetResultTypedDict]] + + +class ClassificationResponse(BaseModel): + id: str + + model: str + + results: List[Dict[str, ClassificationTargetResult]] diff --git a/src/mistralai/client/models/classificationtargetresult.py b/src/mistralai/client/models/classificationtargetresult.py new file mode 100644 index 00000000..89a137c3 --- /dev/null +++ b/src/mistralai/client/models/classificationtargetresult.py @@ -0,0 +1,14 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.client.types import BaseModel +from typing import Dict +from typing_extensions import TypedDict + + +class ClassificationTargetResultTypedDict(TypedDict): + scores: Dict[str, float] + + +class ClassificationTargetResult(BaseModel): + scores: Dict[str, float] diff --git a/src/mistralai/client/models/classifierdetailedjobout.py b/src/mistralai/client/models/classifierdetailedjobout.py new file mode 100644 index 00000000..1de4534f --- /dev/null +++ b/src/mistralai/client/models/classifierdetailedjobout.py @@ -0,0 +1,164 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .checkpointout import CheckpointOut, CheckpointOutTypedDict +from .classifiertargetout import ClassifierTargetOut, ClassifierTargetOutTypedDict +from .classifiertrainingparameters import ( + ClassifierTrainingParameters, + ClassifierTrainingParametersTypedDict, +) +from .eventout import EventOut, EventOutTypedDict +from .jobmetadataout import JobMetadataOut, JobMetadataOutTypedDict +from .wandbintegrationout import WandbIntegrationOut, WandbIntegrationOutTypedDict +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing import List, Literal, Optional +from typing_extensions import NotRequired, TypedDict + + +ClassifierDetailedJobOutStatus = Literal[ + "QUEUED", + "STARTED", + "VALIDATING", + "VALIDATED", + "RUNNING", + "FAILED_VALIDATION", + "FAILED", + "SUCCESS", + "CANCELLED", + "CANCELLATION_REQUESTED", +] + + +ClassifierDetailedJobOutObject = Literal["job",] + + +ClassifierDetailedJobOutIntegrationsTypedDict = WandbIntegrationOutTypedDict + + +ClassifierDetailedJobOutIntegrations = WandbIntegrationOut + + +ClassifierDetailedJobOutJobType = Literal["classifier",] + + +class ClassifierDetailedJobOutTypedDict(TypedDict): + id: str + auto_start: bool + model: str + r"""The name of the model to fine-tune.""" + status: ClassifierDetailedJobOutStatus + created_at: int + modified_at: int + training_files: List[str] + hyperparameters: ClassifierTrainingParametersTypedDict + classifier_targets: List[ClassifierTargetOutTypedDict] + validation_files: NotRequired[Nullable[List[str]]] + object: NotRequired[ClassifierDetailedJobOutObject] + fine_tuned_model: NotRequired[Nullable[str]] + suffix: NotRequired[Nullable[str]] + integrations: NotRequired[ + Nullable[List[ClassifierDetailedJobOutIntegrationsTypedDict]] + ] + trained_tokens: NotRequired[Nullable[int]] + metadata: NotRequired[Nullable[JobMetadataOutTypedDict]] + job_type: NotRequired[ClassifierDetailedJobOutJobType] + events: NotRequired[List[EventOutTypedDict]] + r"""Event items are created every time the status of a fine-tuning job changes. The timestamped list of all events is accessible here.""" + checkpoints: NotRequired[List[CheckpointOutTypedDict]] + + +class ClassifierDetailedJobOut(BaseModel): + id: str + + auto_start: bool + + model: str + r"""The name of the model to fine-tune.""" + + status: ClassifierDetailedJobOutStatus + + created_at: int + + modified_at: int + + training_files: List[str] + + hyperparameters: ClassifierTrainingParameters + + classifier_targets: List[ClassifierTargetOut] + + validation_files: OptionalNullable[List[str]] = UNSET + + object: Optional[ClassifierDetailedJobOutObject] = "job" + + fine_tuned_model: OptionalNullable[str] = UNSET + + suffix: OptionalNullable[str] = UNSET + + integrations: OptionalNullable[List[ClassifierDetailedJobOutIntegrations]] = UNSET + + trained_tokens: OptionalNullable[int] = UNSET + + metadata: OptionalNullable[JobMetadataOut] = UNSET + + job_type: Optional[ClassifierDetailedJobOutJobType] = "classifier" + + events: Optional[List[EventOut]] = None + r"""Event items are created every time the status of a fine-tuning job changes. The timestamped list of all events is accessible here.""" + + checkpoints: Optional[List[CheckpointOut]] = None + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = [ + "validation_files", + "object", + "fine_tuned_model", + "suffix", + "integrations", + "trained_tokens", + "metadata", + "job_type", + "events", + "checkpoints", + ] + nullable_fields = [ + "validation_files", + "fine_tuned_model", + "suffix", + "integrations", + "trained_tokens", + "metadata", + ] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/classifierftmodelout.py b/src/mistralai/client/models/classifierftmodelout.py new file mode 100644 index 00000000..a4572108 --- /dev/null +++ b/src/mistralai/client/models/classifierftmodelout.py @@ -0,0 +1,114 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .classifiertargetout import ClassifierTargetOut, ClassifierTargetOutTypedDict +from .ftmodelcapabilitiesout import ( + FTModelCapabilitiesOut, + FTModelCapabilitiesOutTypedDict, +) +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing import List, Literal, Optional +from typing_extensions import NotRequired, TypedDict + + +ClassifierFTModelOutObject = Literal["model",] + + +ClassifierFTModelOutModelType = Literal["classifier",] + + +class ClassifierFTModelOutTypedDict(TypedDict): + id: str + created: int + owned_by: str + workspace_id: str + root: str + root_version: str + archived: bool + capabilities: FTModelCapabilitiesOutTypedDict + job: str + classifier_targets: List[ClassifierTargetOutTypedDict] + object: NotRequired[ClassifierFTModelOutObject] + name: NotRequired[Nullable[str]] + description: NotRequired[Nullable[str]] + max_context_length: NotRequired[int] + aliases: NotRequired[List[str]] + model_type: NotRequired[ClassifierFTModelOutModelType] + + +class ClassifierFTModelOut(BaseModel): + id: str + + created: int + + owned_by: str + + workspace_id: str + + root: str + + root_version: str + + archived: bool + + capabilities: FTModelCapabilitiesOut + + job: str + + classifier_targets: List[ClassifierTargetOut] + + object: Optional[ClassifierFTModelOutObject] = "model" + + name: OptionalNullable[str] = UNSET + + description: OptionalNullable[str] = UNSET + + max_context_length: Optional[int] = 32768 + + aliases: Optional[List[str]] = None + + model_type: Optional[ClassifierFTModelOutModelType] = "classifier" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = [ + "object", + "name", + "description", + "max_context_length", + "aliases", + "model_type", + ] + nullable_fields = ["name", "description"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/classifierjobout.py b/src/mistralai/client/models/classifierjobout.py new file mode 100644 index 00000000..ab1e261d --- /dev/null +++ b/src/mistralai/client/models/classifierjobout.py @@ -0,0 +1,173 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .classifiertrainingparameters import ( + ClassifierTrainingParameters, + ClassifierTrainingParametersTypedDict, +) +from .jobmetadataout import JobMetadataOut, JobMetadataOutTypedDict +from .wandbintegrationout import WandbIntegrationOut, WandbIntegrationOutTypedDict +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing import List, Literal, Optional +from typing_extensions import NotRequired, TypedDict + + +ClassifierJobOutStatus = Literal[ + "QUEUED", + "STARTED", + "VALIDATING", + "VALIDATED", + "RUNNING", + "FAILED_VALIDATION", + "FAILED", + "SUCCESS", + "CANCELLED", + "CANCELLATION_REQUESTED", +] +r"""The current status of the fine-tuning job.""" + + +ClassifierJobOutObject = Literal["job",] +r"""The object type of the fine-tuning job.""" + + +ClassifierJobOutIntegrationsTypedDict = WandbIntegrationOutTypedDict + + +ClassifierJobOutIntegrations = WandbIntegrationOut + + +ClassifierJobOutJobType = Literal["classifier",] +r"""The type of job (`FT` for fine-tuning).""" + + +class ClassifierJobOutTypedDict(TypedDict): + id: str + r"""The ID of the job.""" + auto_start: bool + model: str + r"""The name of the model to fine-tune.""" + status: ClassifierJobOutStatus + r"""The current status of the fine-tuning job.""" + created_at: int + r"""The UNIX timestamp (in seconds) for when the fine-tuning job was created.""" + modified_at: int + r"""The UNIX timestamp (in seconds) for when the fine-tuning job was last modified.""" + training_files: List[str] + r"""A list containing the IDs of uploaded files that contain training data.""" + hyperparameters: ClassifierTrainingParametersTypedDict + validation_files: NotRequired[Nullable[List[str]]] + r"""A list containing the IDs of uploaded files that contain validation data.""" + object: NotRequired[ClassifierJobOutObject] + r"""The object type of the fine-tuning job.""" + fine_tuned_model: NotRequired[Nullable[str]] + r"""The name of the fine-tuned model that is being created. The value will be `null` if the fine-tuning job is still running.""" + suffix: NotRequired[Nullable[str]] + r"""Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`.""" + integrations: NotRequired[Nullable[List[ClassifierJobOutIntegrationsTypedDict]]] + r"""A list of integrations enabled for your fine-tuning job.""" + trained_tokens: NotRequired[Nullable[int]] + r"""Total number of tokens trained.""" + metadata: NotRequired[Nullable[JobMetadataOutTypedDict]] + job_type: NotRequired[ClassifierJobOutJobType] + r"""The type of job (`FT` for fine-tuning).""" + + +class ClassifierJobOut(BaseModel): + id: str + r"""The ID of the job.""" + + auto_start: bool + + model: str + r"""The name of the model to fine-tune.""" + + status: ClassifierJobOutStatus + r"""The current status of the fine-tuning job.""" + + created_at: int + r"""The UNIX timestamp (in seconds) for when the fine-tuning job was created.""" + + modified_at: int + r"""The UNIX timestamp (in seconds) for when the fine-tuning job was last modified.""" + + training_files: List[str] + r"""A list containing the IDs of uploaded files that contain training data.""" + + hyperparameters: ClassifierTrainingParameters + + validation_files: OptionalNullable[List[str]] = UNSET + r"""A list containing the IDs of uploaded files that contain validation data.""" + + object: Optional[ClassifierJobOutObject] = "job" + r"""The object type of the fine-tuning job.""" + + fine_tuned_model: OptionalNullable[str] = UNSET + r"""The name of the fine-tuned model that is being created. The value will be `null` if the fine-tuning job is still running.""" + + suffix: OptionalNullable[str] = UNSET + r"""Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`.""" + + integrations: OptionalNullable[List[ClassifierJobOutIntegrations]] = UNSET + r"""A list of integrations enabled for your fine-tuning job.""" + + trained_tokens: OptionalNullable[int] = UNSET + r"""Total number of tokens trained.""" + + metadata: OptionalNullable[JobMetadataOut] = UNSET + + job_type: Optional[ClassifierJobOutJobType] = "classifier" + r"""The type of job (`FT` for fine-tuning).""" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = [ + "validation_files", + "object", + "fine_tuned_model", + "suffix", + "integrations", + "trained_tokens", + "metadata", + "job_type", + ] + nullable_fields = [ + "validation_files", + "fine_tuned_model", + "suffix", + "integrations", + "trained_tokens", + "metadata", + ] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/classifiertargetin.py b/src/mistralai/client/models/classifiertargetin.py new file mode 100644 index 00000000..231ee21e --- /dev/null +++ b/src/mistralai/client/models/classifiertargetin.py @@ -0,0 +1,61 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .ftclassifierlossfunction import FTClassifierLossFunction +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing import List, Optional +from typing_extensions import NotRequired, TypedDict + + +class ClassifierTargetInTypedDict(TypedDict): + name: str + labels: List[str] + weight: NotRequired[float] + loss_function: NotRequired[Nullable[FTClassifierLossFunction]] + + +class ClassifierTargetIn(BaseModel): + name: str + + labels: List[str] + + weight: Optional[float] = 1 + + loss_function: OptionalNullable[FTClassifierLossFunction] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["weight", "loss_function"] + nullable_fields = ["loss_function"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/classifiertargetout.py b/src/mistralai/client/models/classifiertargetout.py new file mode 100644 index 00000000..957104a7 --- /dev/null +++ b/src/mistralai/client/models/classifiertargetout.py @@ -0,0 +1,24 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .ftclassifierlossfunction import FTClassifierLossFunction +from mistralai.client.types import BaseModel +from typing import List +from typing_extensions import TypedDict + + +class ClassifierTargetOutTypedDict(TypedDict): + name: str + labels: List[str] + weight: float + loss_function: FTClassifierLossFunction + + +class ClassifierTargetOut(BaseModel): + name: str + + labels: List[str] + + weight: float + + loss_function: FTClassifierLossFunction diff --git a/src/mistralai/client/models/classifiertrainingparameters.py b/src/mistralai/client/models/classifiertrainingparameters.py new file mode 100644 index 00000000..60f53c37 --- /dev/null +++ b/src/mistralai/client/models/classifiertrainingparameters.py @@ -0,0 +1,79 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing import Optional +from typing_extensions import NotRequired, TypedDict + + +class ClassifierTrainingParametersTypedDict(TypedDict): + training_steps: NotRequired[Nullable[int]] + learning_rate: NotRequired[float] + weight_decay: NotRequired[Nullable[float]] + warmup_fraction: NotRequired[Nullable[float]] + epochs: NotRequired[Nullable[float]] + seq_len: NotRequired[Nullable[int]] + + +class ClassifierTrainingParameters(BaseModel): + training_steps: OptionalNullable[int] = UNSET + + learning_rate: Optional[float] = 0.0001 + + weight_decay: OptionalNullable[float] = UNSET + + warmup_fraction: OptionalNullable[float] = UNSET + + epochs: OptionalNullable[float] = UNSET + + seq_len: OptionalNullable[int] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = [ + "training_steps", + "learning_rate", + "weight_decay", + "warmup_fraction", + "epochs", + "seq_len", + ] + nullable_fields = [ + "training_steps", + "weight_decay", + "warmup_fraction", + "epochs", + "seq_len", + ] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/classifiertrainingparametersin.py b/src/mistralai/client/models/classifiertrainingparametersin.py new file mode 100644 index 00000000..e24c9dde --- /dev/null +++ b/src/mistralai/client/models/classifiertrainingparametersin.py @@ -0,0 +1,91 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing import Optional +from typing_extensions import NotRequired, TypedDict + + +class ClassifierTrainingParametersInTypedDict(TypedDict): + r"""The fine-tuning hyperparameter settings used in a classifier fine-tune job.""" + + training_steps: NotRequired[Nullable[int]] + r"""The number of training steps to perform. A training step refers to a single update of the model weights during the fine-tuning process. This update is typically calculated using a batch of samples from the training dataset.""" + learning_rate: NotRequired[float] + r"""A parameter describing how much to adjust the pre-trained model's weights in response to the estimated error each time the weights are updated during the fine-tuning process.""" + weight_decay: NotRequired[Nullable[float]] + r"""(Advanced Usage) Weight decay adds a term to the loss function that is proportional to the sum of the squared weights. This term reduces the magnitude of the weights and prevents them from growing too large.""" + warmup_fraction: NotRequired[Nullable[float]] + r"""(Advanced Usage) A parameter that specifies the percentage of the total training steps at which the learning rate warm-up phase ends. During this phase, the learning rate gradually increases from a small value to the initial learning rate, helping to stabilize the training process and improve convergence. Similar to `pct_start` in [mistral-finetune](https://github.com/mistralai/mistral-finetune)""" + epochs: NotRequired[Nullable[float]] + seq_len: NotRequired[Nullable[int]] + + +class ClassifierTrainingParametersIn(BaseModel): + r"""The fine-tuning hyperparameter settings used in a classifier fine-tune job.""" + + training_steps: OptionalNullable[int] = UNSET + r"""The number of training steps to perform. A training step refers to a single update of the model weights during the fine-tuning process. This update is typically calculated using a batch of samples from the training dataset.""" + + learning_rate: Optional[float] = 0.0001 + r"""A parameter describing how much to adjust the pre-trained model's weights in response to the estimated error each time the weights are updated during the fine-tuning process.""" + + weight_decay: OptionalNullable[float] = UNSET + r"""(Advanced Usage) Weight decay adds a term to the loss function that is proportional to the sum of the squared weights. This term reduces the magnitude of the weights and prevents them from growing too large.""" + + warmup_fraction: OptionalNullable[float] = UNSET + r"""(Advanced Usage) A parameter that specifies the percentage of the total training steps at which the learning rate warm-up phase ends. During this phase, the learning rate gradually increases from a small value to the initial learning rate, helping to stabilize the training process and improve convergence. Similar to `pct_start` in [mistral-finetune](https://github.com/mistralai/mistral-finetune)""" + + epochs: OptionalNullable[float] = UNSET + + seq_len: OptionalNullable[int] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = [ + "training_steps", + "learning_rate", + "weight_decay", + "warmup_fraction", + "epochs", + "seq_len", + ] + nullable_fields = [ + "training_steps", + "weight_decay", + "warmup_fraction", + "epochs", + "seq_len", + ] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/codeinterpretertool.py b/src/mistralai/client/models/codeinterpretertool.py new file mode 100644 index 00000000..faf5b0b7 --- /dev/null +++ b/src/mistralai/client/models/codeinterpretertool.py @@ -0,0 +1,17 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.client.types import BaseModel +from typing import Literal, Optional +from typing_extensions import NotRequired, TypedDict + + +CodeInterpreterToolType = Literal["code_interpreter",] + + +class CodeInterpreterToolTypedDict(TypedDict): + type: NotRequired[CodeInterpreterToolType] + + +class CodeInterpreterTool(BaseModel): + type: Optional[CodeInterpreterToolType] = "code_interpreter" diff --git a/src/mistralai/client/models/completionargs.py b/src/mistralai/client/models/completionargs.py new file mode 100644 index 00000000..010910f6 --- /dev/null +++ b/src/mistralai/client/models/completionargs.py @@ -0,0 +1,107 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .completionargsstop import CompletionArgsStop, CompletionArgsStopTypedDict +from .prediction import Prediction, PredictionTypedDict +from .responseformat import ResponseFormat, ResponseFormatTypedDict +from .toolchoiceenum import ToolChoiceEnum +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing import Optional +from typing_extensions import NotRequired, TypedDict + + +class CompletionArgsTypedDict(TypedDict): + r"""White-listed arguments from the completion API""" + + stop: NotRequired[Nullable[CompletionArgsStopTypedDict]] + presence_penalty: NotRequired[Nullable[float]] + frequency_penalty: NotRequired[Nullable[float]] + temperature: NotRequired[Nullable[float]] + top_p: NotRequired[Nullable[float]] + max_tokens: NotRequired[Nullable[int]] + random_seed: NotRequired[Nullable[int]] + prediction: NotRequired[Nullable[PredictionTypedDict]] + response_format: NotRequired[Nullable[ResponseFormatTypedDict]] + tool_choice: NotRequired[ToolChoiceEnum] + + +class CompletionArgs(BaseModel): + r"""White-listed arguments from the completion API""" + + stop: OptionalNullable[CompletionArgsStop] = UNSET + + presence_penalty: OptionalNullable[float] = UNSET + + frequency_penalty: OptionalNullable[float] = UNSET + + temperature: OptionalNullable[float] = UNSET + + top_p: OptionalNullable[float] = UNSET + + max_tokens: OptionalNullable[int] = UNSET + + random_seed: OptionalNullable[int] = UNSET + + prediction: OptionalNullable[Prediction] = UNSET + + response_format: OptionalNullable[ResponseFormat] = UNSET + + tool_choice: Optional[ToolChoiceEnum] = None + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = [ + "stop", + "presence_penalty", + "frequency_penalty", + "temperature", + "top_p", + "max_tokens", + "random_seed", + "prediction", + "response_format", + "tool_choice", + ] + nullable_fields = [ + "stop", + "presence_penalty", + "frequency_penalty", + "temperature", + "top_p", + "max_tokens", + "random_seed", + "prediction", + "response_format", + ] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/completionargsstop.py b/src/mistralai/client/models/completionargsstop.py new file mode 100644 index 00000000..de7a0956 --- /dev/null +++ b/src/mistralai/client/models/completionargsstop.py @@ -0,0 +1,13 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from typing import List, Union +from typing_extensions import TypeAliasType + + +CompletionArgsStopTypedDict = TypeAliasType( + "CompletionArgsStopTypedDict", Union[str, List[str]] +) + + +CompletionArgsStop = TypeAliasType("CompletionArgsStop", Union[str, List[str]]) diff --git a/src/mistralai/client/models/completionchunk.py b/src/mistralai/client/models/completionchunk.py new file mode 100644 index 00000000..9790db6f --- /dev/null +++ b/src/mistralai/client/models/completionchunk.py @@ -0,0 +1,34 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .completionresponsestreamchoice import ( + CompletionResponseStreamChoice, + CompletionResponseStreamChoiceTypedDict, +) +from .usageinfo import UsageInfo, UsageInfoTypedDict +from mistralai.client.types import BaseModel +from typing import List, Optional +from typing_extensions import NotRequired, TypedDict + + +class CompletionChunkTypedDict(TypedDict): + id: str + model: str + choices: List[CompletionResponseStreamChoiceTypedDict] + object: NotRequired[str] + created: NotRequired[int] + usage: NotRequired[UsageInfoTypedDict] + + +class CompletionChunk(BaseModel): + id: str + + model: str + + choices: List[CompletionResponseStreamChoice] + + object: Optional[str] = None + + created: Optional[int] = None + + usage: Optional[UsageInfo] = None diff --git a/src/mistralai/client/models/completiondetailedjobout.py b/src/mistralai/client/models/completiondetailedjobout.py new file mode 100644 index 00000000..85c0c803 --- /dev/null +++ b/src/mistralai/client/models/completiondetailedjobout.py @@ -0,0 +1,171 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .checkpointout import CheckpointOut, CheckpointOutTypedDict +from .completiontrainingparameters import ( + CompletionTrainingParameters, + CompletionTrainingParametersTypedDict, +) +from .eventout import EventOut, EventOutTypedDict +from .githubrepositoryout import GithubRepositoryOut, GithubRepositoryOutTypedDict +from .jobmetadataout import JobMetadataOut, JobMetadataOutTypedDict +from .wandbintegrationout import WandbIntegrationOut, WandbIntegrationOutTypedDict +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing import List, Literal, Optional +from typing_extensions import NotRequired, TypedDict + + +CompletionDetailedJobOutStatus = Literal[ + "QUEUED", + "STARTED", + "VALIDATING", + "VALIDATED", + "RUNNING", + "FAILED_VALIDATION", + "FAILED", + "SUCCESS", + "CANCELLED", + "CANCELLATION_REQUESTED", +] + + +CompletionDetailedJobOutObject = Literal["job",] + + +CompletionDetailedJobOutIntegrationsTypedDict = WandbIntegrationOutTypedDict + + +CompletionDetailedJobOutIntegrations = WandbIntegrationOut + + +CompletionDetailedJobOutJobType = Literal["completion",] + + +CompletionDetailedJobOutRepositoriesTypedDict = GithubRepositoryOutTypedDict + + +CompletionDetailedJobOutRepositories = GithubRepositoryOut + + +class CompletionDetailedJobOutTypedDict(TypedDict): + id: str + auto_start: bool + model: str + r"""The name of the model to fine-tune.""" + status: CompletionDetailedJobOutStatus + created_at: int + modified_at: int + training_files: List[str] + hyperparameters: CompletionTrainingParametersTypedDict + validation_files: NotRequired[Nullable[List[str]]] + object: NotRequired[CompletionDetailedJobOutObject] + fine_tuned_model: NotRequired[Nullable[str]] + suffix: NotRequired[Nullable[str]] + integrations: NotRequired[ + Nullable[List[CompletionDetailedJobOutIntegrationsTypedDict]] + ] + trained_tokens: NotRequired[Nullable[int]] + metadata: NotRequired[Nullable[JobMetadataOutTypedDict]] + job_type: NotRequired[CompletionDetailedJobOutJobType] + repositories: NotRequired[List[CompletionDetailedJobOutRepositoriesTypedDict]] + events: NotRequired[List[EventOutTypedDict]] + r"""Event items are created every time the status of a fine-tuning job changes. The timestamped list of all events is accessible here.""" + checkpoints: NotRequired[List[CheckpointOutTypedDict]] + + +class CompletionDetailedJobOut(BaseModel): + id: str + + auto_start: bool + + model: str + r"""The name of the model to fine-tune.""" + + status: CompletionDetailedJobOutStatus + + created_at: int + + modified_at: int + + training_files: List[str] + + hyperparameters: CompletionTrainingParameters + + validation_files: OptionalNullable[List[str]] = UNSET + + object: Optional[CompletionDetailedJobOutObject] = "job" + + fine_tuned_model: OptionalNullable[str] = UNSET + + suffix: OptionalNullable[str] = UNSET + + integrations: OptionalNullable[List[CompletionDetailedJobOutIntegrations]] = UNSET + + trained_tokens: OptionalNullable[int] = UNSET + + metadata: OptionalNullable[JobMetadataOut] = UNSET + + job_type: Optional[CompletionDetailedJobOutJobType] = "completion" + + repositories: Optional[List[CompletionDetailedJobOutRepositories]] = None + + events: Optional[List[EventOut]] = None + r"""Event items are created every time the status of a fine-tuning job changes. The timestamped list of all events is accessible here.""" + + checkpoints: Optional[List[CheckpointOut]] = None + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = [ + "validation_files", + "object", + "fine_tuned_model", + "suffix", + "integrations", + "trained_tokens", + "metadata", + "job_type", + "repositories", + "events", + "checkpoints", + ] + nullable_fields = [ + "validation_files", + "fine_tuned_model", + "suffix", + "integrations", + "trained_tokens", + "metadata", + ] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/completionevent.py b/src/mistralai/client/models/completionevent.py new file mode 100644 index 00000000..52db911e --- /dev/null +++ b/src/mistralai/client/models/completionevent.py @@ -0,0 +1,14 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .completionchunk import CompletionChunk, CompletionChunkTypedDict +from mistralai.client.types import BaseModel +from typing_extensions import TypedDict + + +class CompletionEventTypedDict(TypedDict): + data: CompletionChunkTypedDict + + +class CompletionEvent(BaseModel): + data: CompletionChunk diff --git a/src/mistralai/client/models/completionftmodelout.py b/src/mistralai/client/models/completionftmodelout.py new file mode 100644 index 00000000..ccecbb6a --- /dev/null +++ b/src/mistralai/client/models/completionftmodelout.py @@ -0,0 +1,110 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .ftmodelcapabilitiesout import ( + FTModelCapabilitiesOut, + FTModelCapabilitiesOutTypedDict, +) +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing import List, Literal, Optional +from typing_extensions import NotRequired, TypedDict + + +CompletionFTModelOutObject = Literal["model",] + + +ModelType = Literal["completion",] + + +class CompletionFTModelOutTypedDict(TypedDict): + id: str + created: int + owned_by: str + workspace_id: str + root: str + root_version: str + archived: bool + capabilities: FTModelCapabilitiesOutTypedDict + job: str + object: NotRequired[CompletionFTModelOutObject] + name: NotRequired[Nullable[str]] + description: NotRequired[Nullable[str]] + max_context_length: NotRequired[int] + aliases: NotRequired[List[str]] + model_type: NotRequired[ModelType] + + +class CompletionFTModelOut(BaseModel): + id: str + + created: int + + owned_by: str + + workspace_id: str + + root: str + + root_version: str + + archived: bool + + capabilities: FTModelCapabilitiesOut + + job: str + + object: Optional[CompletionFTModelOutObject] = "model" + + name: OptionalNullable[str] = UNSET + + description: OptionalNullable[str] = UNSET + + max_context_length: Optional[int] = 32768 + + aliases: Optional[List[str]] = None + + model_type: Optional[ModelType] = "completion" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = [ + "object", + "name", + "description", + "max_context_length", + "aliases", + "model_type", + ] + nullable_fields = ["name", "description"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/completionjobout.py b/src/mistralai/client/models/completionjobout.py new file mode 100644 index 00000000..ecd95bb9 --- /dev/null +++ b/src/mistralai/client/models/completionjobout.py @@ -0,0 +1,184 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .completiontrainingparameters import ( + CompletionTrainingParameters, + CompletionTrainingParametersTypedDict, +) +from .githubrepositoryout import GithubRepositoryOut, GithubRepositoryOutTypedDict +from .jobmetadataout import JobMetadataOut, JobMetadataOutTypedDict +from .wandbintegrationout import WandbIntegrationOut, WandbIntegrationOutTypedDict +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing import List, Literal, Optional +from typing_extensions import NotRequired, TypedDict + + +Status = Literal[ + "QUEUED", + "STARTED", + "VALIDATING", + "VALIDATED", + "RUNNING", + "FAILED_VALIDATION", + "FAILED", + "SUCCESS", + "CANCELLED", + "CANCELLATION_REQUESTED", +] +r"""The current status of the fine-tuning job.""" + + +CompletionJobOutObject = Literal["job",] +r"""The object type of the fine-tuning job.""" + + +IntegrationsTypedDict = WandbIntegrationOutTypedDict + + +Integrations = WandbIntegrationOut + + +JobType = Literal["completion",] +r"""The type of job (`FT` for fine-tuning).""" + + +RepositoriesTypedDict = GithubRepositoryOutTypedDict + + +Repositories = GithubRepositoryOut + + +class CompletionJobOutTypedDict(TypedDict): + id: str + r"""The ID of the job.""" + auto_start: bool + model: str + r"""The name of the model to fine-tune.""" + status: Status + r"""The current status of the fine-tuning job.""" + created_at: int + r"""The UNIX timestamp (in seconds) for when the fine-tuning job was created.""" + modified_at: int + r"""The UNIX timestamp (in seconds) for when the fine-tuning job was last modified.""" + training_files: List[str] + r"""A list containing the IDs of uploaded files that contain training data.""" + hyperparameters: CompletionTrainingParametersTypedDict + validation_files: NotRequired[Nullable[List[str]]] + r"""A list containing the IDs of uploaded files that contain validation data.""" + object: NotRequired[CompletionJobOutObject] + r"""The object type of the fine-tuning job.""" + fine_tuned_model: NotRequired[Nullable[str]] + r"""The name of the fine-tuned model that is being created. The value will be `null` if the fine-tuning job is still running.""" + suffix: NotRequired[Nullable[str]] + r"""Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`.""" + integrations: NotRequired[Nullable[List[IntegrationsTypedDict]]] + r"""A list of integrations enabled for your fine-tuning job.""" + trained_tokens: NotRequired[Nullable[int]] + r"""Total number of tokens trained.""" + metadata: NotRequired[Nullable[JobMetadataOutTypedDict]] + job_type: NotRequired[JobType] + r"""The type of job (`FT` for fine-tuning).""" + repositories: NotRequired[List[RepositoriesTypedDict]] + + +class CompletionJobOut(BaseModel): + id: str + r"""The ID of the job.""" + + auto_start: bool + + model: str + r"""The name of the model to fine-tune.""" + + status: Status + r"""The current status of the fine-tuning job.""" + + created_at: int + r"""The UNIX timestamp (in seconds) for when the fine-tuning job was created.""" + + modified_at: int + r"""The UNIX timestamp (in seconds) for when the fine-tuning job was last modified.""" + + training_files: List[str] + r"""A list containing the IDs of uploaded files that contain training data.""" + + hyperparameters: CompletionTrainingParameters + + validation_files: OptionalNullable[List[str]] = UNSET + r"""A list containing the IDs of uploaded files that contain validation data.""" + + object: Optional[CompletionJobOutObject] = "job" + r"""The object type of the fine-tuning job.""" + + fine_tuned_model: OptionalNullable[str] = UNSET + r"""The name of the fine-tuned model that is being created. The value will be `null` if the fine-tuning job is still running.""" + + suffix: OptionalNullable[str] = UNSET + r"""Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`.""" + + integrations: OptionalNullable[List[Integrations]] = UNSET + r"""A list of integrations enabled for your fine-tuning job.""" + + trained_tokens: OptionalNullable[int] = UNSET + r"""Total number of tokens trained.""" + + metadata: OptionalNullable[JobMetadataOut] = UNSET + + job_type: Optional[JobType] = "completion" + r"""The type of job (`FT` for fine-tuning).""" + + repositories: Optional[List[Repositories]] = None + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = [ + "validation_files", + "object", + "fine_tuned_model", + "suffix", + "integrations", + "trained_tokens", + "metadata", + "job_type", + "repositories", + ] + nullable_fields = [ + "validation_files", + "fine_tuned_model", + "suffix", + "integrations", + "trained_tokens", + "metadata", + ] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/completionresponsestreamchoice.py b/src/mistralai/client/models/completionresponsestreamchoice.py new file mode 100644 index 00000000..1b8d6fac --- /dev/null +++ b/src/mistralai/client/models/completionresponsestreamchoice.py @@ -0,0 +1,63 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .deltamessage import DeltaMessage, DeltaMessageTypedDict +from mistralai.client.types import BaseModel, Nullable, UNSET_SENTINEL, UnrecognizedStr +from pydantic import model_serializer +from typing import Literal, Union +from typing_extensions import TypedDict + + +CompletionResponseStreamChoiceFinishReason = Union[ + Literal[ + "stop", + "length", + "error", + "tool_calls", + ], + UnrecognizedStr, +] + + +class CompletionResponseStreamChoiceTypedDict(TypedDict): + index: int + delta: DeltaMessageTypedDict + finish_reason: Nullable[CompletionResponseStreamChoiceFinishReason] + + +class CompletionResponseStreamChoice(BaseModel): + index: int + + delta: DeltaMessage + + finish_reason: Nullable[CompletionResponseStreamChoiceFinishReason] + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = [] + nullable_fields = ["finish_reason"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/completiontrainingparameters.py b/src/mistralai/client/models/completiontrainingparameters.py new file mode 100644 index 00000000..36b285ab --- /dev/null +++ b/src/mistralai/client/models/completiontrainingparameters.py @@ -0,0 +1,84 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing import Optional +from typing_extensions import NotRequired, TypedDict + + +class CompletionTrainingParametersTypedDict(TypedDict): + training_steps: NotRequired[Nullable[int]] + learning_rate: NotRequired[float] + weight_decay: NotRequired[Nullable[float]] + warmup_fraction: NotRequired[Nullable[float]] + epochs: NotRequired[Nullable[float]] + seq_len: NotRequired[Nullable[int]] + fim_ratio: NotRequired[Nullable[float]] + + +class CompletionTrainingParameters(BaseModel): + training_steps: OptionalNullable[int] = UNSET + + learning_rate: Optional[float] = 0.0001 + + weight_decay: OptionalNullable[float] = UNSET + + warmup_fraction: OptionalNullable[float] = UNSET + + epochs: OptionalNullable[float] = UNSET + + seq_len: OptionalNullable[int] = UNSET + + fim_ratio: OptionalNullable[float] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = [ + "training_steps", + "learning_rate", + "weight_decay", + "warmup_fraction", + "epochs", + "seq_len", + "fim_ratio", + ] + nullable_fields = [ + "training_steps", + "weight_decay", + "warmup_fraction", + "epochs", + "seq_len", + "fim_ratio", + ] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/completiontrainingparametersin.py b/src/mistralai/client/models/completiontrainingparametersin.py new file mode 100644 index 00000000..d0315d99 --- /dev/null +++ b/src/mistralai/client/models/completiontrainingparametersin.py @@ -0,0 +1,96 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing import Optional +from typing_extensions import NotRequired, TypedDict + + +class CompletionTrainingParametersInTypedDict(TypedDict): + r"""The fine-tuning hyperparameter settings used in a fine-tune job.""" + + training_steps: NotRequired[Nullable[int]] + r"""The number of training steps to perform. A training step refers to a single update of the model weights during the fine-tuning process. This update is typically calculated using a batch of samples from the training dataset.""" + learning_rate: NotRequired[float] + r"""A parameter describing how much to adjust the pre-trained model's weights in response to the estimated error each time the weights are updated during the fine-tuning process.""" + weight_decay: NotRequired[Nullable[float]] + r"""(Advanced Usage) Weight decay adds a term to the loss function that is proportional to the sum of the squared weights. This term reduces the magnitude of the weights and prevents them from growing too large.""" + warmup_fraction: NotRequired[Nullable[float]] + r"""(Advanced Usage) A parameter that specifies the percentage of the total training steps at which the learning rate warm-up phase ends. During this phase, the learning rate gradually increases from a small value to the initial learning rate, helping to stabilize the training process and improve convergence. Similar to `pct_start` in [mistral-finetune](https://github.com/mistralai/mistral-finetune)""" + epochs: NotRequired[Nullable[float]] + seq_len: NotRequired[Nullable[int]] + fim_ratio: NotRequired[Nullable[float]] + + +class CompletionTrainingParametersIn(BaseModel): + r"""The fine-tuning hyperparameter settings used in a fine-tune job.""" + + training_steps: OptionalNullable[int] = UNSET + r"""The number of training steps to perform. A training step refers to a single update of the model weights during the fine-tuning process. This update is typically calculated using a batch of samples from the training dataset.""" + + learning_rate: Optional[float] = 0.0001 + r"""A parameter describing how much to adjust the pre-trained model's weights in response to the estimated error each time the weights are updated during the fine-tuning process.""" + + weight_decay: OptionalNullable[float] = UNSET + r"""(Advanced Usage) Weight decay adds a term to the loss function that is proportional to the sum of the squared weights. This term reduces the magnitude of the weights and prevents them from growing too large.""" + + warmup_fraction: OptionalNullable[float] = UNSET + r"""(Advanced Usage) A parameter that specifies the percentage of the total training steps at which the learning rate warm-up phase ends. During this phase, the learning rate gradually increases from a small value to the initial learning rate, helping to stabilize the training process and improve convergence. Similar to `pct_start` in [mistral-finetune](https://github.com/mistralai/mistral-finetune)""" + + epochs: OptionalNullable[float] = UNSET + + seq_len: OptionalNullable[int] = UNSET + + fim_ratio: OptionalNullable[float] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = [ + "training_steps", + "learning_rate", + "weight_decay", + "warmup_fraction", + "epochs", + "seq_len", + "fim_ratio", + ] + nullable_fields = [ + "training_steps", + "weight_decay", + "warmup_fraction", + "epochs", + "seq_len", + "fim_ratio", + ] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/contentchunk.py b/src/mistralai/client/models/contentchunk.py new file mode 100644 index 00000000..0a25423f --- /dev/null +++ b/src/mistralai/client/models/contentchunk.py @@ -0,0 +1,42 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .audiochunk import AudioChunk, AudioChunkTypedDict +from .documenturlchunk import DocumentURLChunk, DocumentURLChunkTypedDict +from .filechunk import FileChunk, FileChunkTypedDict +from .imageurlchunk import ImageURLChunk, ImageURLChunkTypedDict +from .referencechunk import ReferenceChunk, ReferenceChunkTypedDict +from .textchunk import TextChunk, TextChunkTypedDict +from .thinkchunk import ThinkChunk, ThinkChunkTypedDict +from mistralai.client.utils import get_discriminator +from pydantic import Discriminator, Tag +from typing import Union +from typing_extensions import Annotated, TypeAliasType + + +ContentChunkTypedDict = TypeAliasType( + "ContentChunkTypedDict", + Union[ + TextChunkTypedDict, + ImageURLChunkTypedDict, + ReferenceChunkTypedDict, + FileChunkTypedDict, + AudioChunkTypedDict, + DocumentURLChunkTypedDict, + ThinkChunkTypedDict, + ], +) + + +ContentChunk = Annotated[ + Union[ + Annotated[ImageURLChunk, Tag("image_url")], + Annotated[DocumentURLChunk, Tag("document_url")], + Annotated[TextChunk, Tag("text")], + Annotated[ReferenceChunk, Tag("reference")], + Annotated[FileChunk, Tag("file")], + Annotated[ThinkChunk, Tag("thinking")], + Annotated[AudioChunk, Tag("input_audio")], + ], + Discriminator(lambda m: get_discriminator(m, "type", "type")), +] diff --git a/src/mistralai/client/models/conversationappendrequest.py b/src/mistralai/client/models/conversationappendrequest.py new file mode 100644 index 00000000..867c0a41 --- /dev/null +++ b/src/mistralai/client/models/conversationappendrequest.py @@ -0,0 +1,38 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .completionargs import CompletionArgs, CompletionArgsTypedDict +from .conversationinputs import ConversationInputs, ConversationInputsTypedDict +from mistralai.client.types import BaseModel +from typing import Literal, Optional +from typing_extensions import NotRequired, TypedDict + + +ConversationAppendRequestHandoffExecution = Literal[ + "client", + "server", +] + + +class ConversationAppendRequestTypedDict(TypedDict): + inputs: ConversationInputsTypedDict + stream: NotRequired[bool] + store: NotRequired[bool] + r"""Whether to store the results into our servers or not.""" + handoff_execution: NotRequired[ConversationAppendRequestHandoffExecution] + completion_args: NotRequired[CompletionArgsTypedDict] + r"""White-listed arguments from the completion API""" + + +class ConversationAppendRequest(BaseModel): + inputs: ConversationInputs + + stream: Optional[bool] = False + + store: Optional[bool] = True + r"""Whether to store the results into our servers or not.""" + + handoff_execution: Optional[ConversationAppendRequestHandoffExecution] = "server" + + completion_args: Optional[CompletionArgs] = None + r"""White-listed arguments from the completion API""" diff --git a/src/mistralai/client/models/conversationappendstreamrequest.py b/src/mistralai/client/models/conversationappendstreamrequest.py new file mode 100644 index 00000000..f51407bf --- /dev/null +++ b/src/mistralai/client/models/conversationappendstreamrequest.py @@ -0,0 +1,40 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .completionargs import CompletionArgs, CompletionArgsTypedDict +from .conversationinputs import ConversationInputs, ConversationInputsTypedDict +from mistralai.client.types import BaseModel +from typing import Literal, Optional +from typing_extensions import NotRequired, TypedDict + + +ConversationAppendStreamRequestHandoffExecution = Literal[ + "client", + "server", +] + + +class ConversationAppendStreamRequestTypedDict(TypedDict): + inputs: ConversationInputsTypedDict + stream: NotRequired[bool] + store: NotRequired[bool] + r"""Whether to store the results into our servers or not.""" + handoff_execution: NotRequired[ConversationAppendStreamRequestHandoffExecution] + completion_args: NotRequired[CompletionArgsTypedDict] + r"""White-listed arguments from the completion API""" + + +class ConversationAppendStreamRequest(BaseModel): + inputs: ConversationInputs + + stream: Optional[bool] = True + + store: Optional[bool] = True + r"""Whether to store the results into our servers or not.""" + + handoff_execution: Optional[ConversationAppendStreamRequestHandoffExecution] = ( + "server" + ) + + completion_args: Optional[CompletionArgs] = None + r"""White-listed arguments from the completion API""" diff --git a/src/mistralai/client/models/conversationevents.py b/src/mistralai/client/models/conversationevents.py new file mode 100644 index 00000000..308588a1 --- /dev/null +++ b/src/mistralai/client/models/conversationevents.py @@ -0,0 +1,78 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .agenthandoffdoneevent import AgentHandoffDoneEvent, AgentHandoffDoneEventTypedDict +from .agenthandoffstartedevent import ( + AgentHandoffStartedEvent, + AgentHandoffStartedEventTypedDict, +) +from .functioncallevent import FunctionCallEvent, FunctionCallEventTypedDict +from .messageoutputevent import MessageOutputEvent, MessageOutputEventTypedDict +from .responsedoneevent import ResponseDoneEvent, ResponseDoneEventTypedDict +from .responseerrorevent import ResponseErrorEvent, ResponseErrorEventTypedDict +from .responsestartedevent import ResponseStartedEvent, ResponseStartedEventTypedDict +from .ssetypes import SSETypes +from .toolexecutiondeltaevent import ( + ToolExecutionDeltaEvent, + ToolExecutionDeltaEventTypedDict, +) +from .toolexecutiondoneevent import ( + ToolExecutionDoneEvent, + ToolExecutionDoneEventTypedDict, +) +from .toolexecutionstartedevent import ( + ToolExecutionStartedEvent, + ToolExecutionStartedEventTypedDict, +) +from mistralai.client.types import BaseModel +from mistralai.client.utils import get_discriminator +from pydantic import Discriminator, Tag +from typing import Union +from typing_extensions import Annotated, TypeAliasType, TypedDict + + +ConversationEventsDataTypedDict = TypeAliasType( + "ConversationEventsDataTypedDict", + Union[ + ResponseStartedEventTypedDict, + ResponseDoneEventTypedDict, + ResponseErrorEventTypedDict, + ToolExecutionStartedEventTypedDict, + ToolExecutionDeltaEventTypedDict, + ToolExecutionDoneEventTypedDict, + AgentHandoffStartedEventTypedDict, + AgentHandoffDoneEventTypedDict, + FunctionCallEventTypedDict, + MessageOutputEventTypedDict, + ], +) + + +ConversationEventsData = Annotated[ + Union[ + Annotated[AgentHandoffDoneEvent, Tag("agent.handoff.done")], + Annotated[AgentHandoffStartedEvent, Tag("agent.handoff.started")], + Annotated[ResponseDoneEvent, Tag("conversation.response.done")], + Annotated[ResponseErrorEvent, Tag("conversation.response.error")], + Annotated[ResponseStartedEvent, Tag("conversation.response.started")], + Annotated[FunctionCallEvent, Tag("function.call.delta")], + Annotated[MessageOutputEvent, Tag("message.output.delta")], + Annotated[ToolExecutionDeltaEvent, Tag("tool.execution.delta")], + Annotated[ToolExecutionDoneEvent, Tag("tool.execution.done")], + Annotated[ToolExecutionStartedEvent, Tag("tool.execution.started")], + ], + Discriminator(lambda m: get_discriminator(m, "type", "type")), +] + + +class ConversationEventsTypedDict(TypedDict): + event: SSETypes + r"""Server side events sent when streaming a conversation response.""" + data: ConversationEventsDataTypedDict + + +class ConversationEvents(BaseModel): + event: SSETypes + r"""Server side events sent when streaming a conversation response.""" + + data: ConversationEventsData diff --git a/src/mistralai/client/models/conversationhistory.py b/src/mistralai/client/models/conversationhistory.py new file mode 100644 index 00000000..40bd1e72 --- /dev/null +++ b/src/mistralai/client/models/conversationhistory.py @@ -0,0 +1,59 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .agenthandoffentry import AgentHandoffEntry, AgentHandoffEntryTypedDict +from .functioncallentry import FunctionCallEntry, FunctionCallEntryTypedDict +from .functionresultentry import FunctionResultEntry, FunctionResultEntryTypedDict +from .messageinputentry import MessageInputEntry, MessageInputEntryTypedDict +from .messageoutputentry import MessageOutputEntry, MessageOutputEntryTypedDict +from .toolexecutionentry import ToolExecutionEntry, ToolExecutionEntryTypedDict +from mistralai.client.types import BaseModel +from typing import List, Literal, Optional, Union +from typing_extensions import NotRequired, TypeAliasType, TypedDict + + +ConversationHistoryObject = Literal["conversation.history",] + + +EntriesTypedDict = TypeAliasType( + "EntriesTypedDict", + Union[ + FunctionResultEntryTypedDict, + MessageInputEntryTypedDict, + FunctionCallEntryTypedDict, + ToolExecutionEntryTypedDict, + MessageOutputEntryTypedDict, + AgentHandoffEntryTypedDict, + ], +) + + +Entries = TypeAliasType( + "Entries", + Union[ + FunctionResultEntry, + MessageInputEntry, + FunctionCallEntry, + ToolExecutionEntry, + MessageOutputEntry, + AgentHandoffEntry, + ], +) + + +class ConversationHistoryTypedDict(TypedDict): + r"""Retrieve all entries in a conversation.""" + + conversation_id: str + entries: List[EntriesTypedDict] + object: NotRequired[ConversationHistoryObject] + + +class ConversationHistory(BaseModel): + r"""Retrieve all entries in a conversation.""" + + conversation_id: str + + entries: List[Entries] + + object: Optional[ConversationHistoryObject] = "conversation.history" diff --git a/src/mistralai/client/models/conversationinputs.py b/src/mistralai/client/models/conversationinputs.py new file mode 100644 index 00000000..4d30cd76 --- /dev/null +++ b/src/mistralai/client/models/conversationinputs.py @@ -0,0 +1,14 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .inputentries import InputEntries, InputEntriesTypedDict +from typing import List, Union +from typing_extensions import TypeAliasType + + +ConversationInputsTypedDict = TypeAliasType( + "ConversationInputsTypedDict", Union[str, List[InputEntriesTypedDict]] +) + + +ConversationInputs = TypeAliasType("ConversationInputs", Union[str, List[InputEntries]]) diff --git a/src/mistralai/client/models/conversationmessages.py b/src/mistralai/client/models/conversationmessages.py new file mode 100644 index 00000000..1ea05369 --- /dev/null +++ b/src/mistralai/client/models/conversationmessages.py @@ -0,0 +1,28 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .messageentries import MessageEntries, MessageEntriesTypedDict +from mistralai.client.types import BaseModel +from typing import List, Literal, Optional +from typing_extensions import NotRequired, TypedDict + + +ConversationMessagesObject = Literal["conversation.messages",] + + +class ConversationMessagesTypedDict(TypedDict): + r"""Similar to the conversation history but only keep the messages""" + + conversation_id: str + messages: List[MessageEntriesTypedDict] + object: NotRequired[ConversationMessagesObject] + + +class ConversationMessages(BaseModel): + r"""Similar to the conversation history but only keep the messages""" + + conversation_id: str + + messages: List[MessageEntries] + + object: Optional[ConversationMessagesObject] = "conversation.messages" diff --git a/src/mistralai/client/models/conversationrequest.py b/src/mistralai/client/models/conversationrequest.py new file mode 100644 index 00000000..e3211c4c --- /dev/null +++ b/src/mistralai/client/models/conversationrequest.py @@ -0,0 +1,160 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .codeinterpretertool import CodeInterpreterTool, CodeInterpreterToolTypedDict +from .completionargs import CompletionArgs, CompletionArgsTypedDict +from .conversationinputs import ConversationInputs, ConversationInputsTypedDict +from .documentlibrarytool import DocumentLibraryTool, DocumentLibraryToolTypedDict +from .functiontool import FunctionTool, FunctionToolTypedDict +from .imagegenerationtool import ImageGenerationTool, ImageGenerationToolTypedDict +from .websearchpremiumtool import WebSearchPremiumTool, WebSearchPremiumToolTypedDict +from .websearchtool import WebSearchTool, WebSearchToolTypedDict +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from mistralai.client.utils import get_discriminator +from pydantic import Discriminator, Tag, model_serializer +from typing import Any, Dict, List, Literal, Optional, Union +from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict + + +HandoffExecution = Literal[ + "client", + "server", +] + + +ToolsTypedDict = TypeAliasType( + "ToolsTypedDict", + Union[ + WebSearchToolTypedDict, + WebSearchPremiumToolTypedDict, + CodeInterpreterToolTypedDict, + ImageGenerationToolTypedDict, + FunctionToolTypedDict, + DocumentLibraryToolTypedDict, + ], +) + + +Tools = Annotated[ + Union[ + Annotated[CodeInterpreterTool, Tag("code_interpreter")], + Annotated[DocumentLibraryTool, Tag("document_library")], + Annotated[FunctionTool, Tag("function")], + Annotated[ImageGenerationTool, Tag("image_generation")], + Annotated[WebSearchTool, Tag("web_search")], + Annotated[WebSearchPremiumTool, Tag("web_search_premium")], + ], + Discriminator(lambda m: get_discriminator(m, "type", "type")), +] + + +AgentVersionTypedDict = TypeAliasType("AgentVersionTypedDict", Union[str, int]) + + +AgentVersion = TypeAliasType("AgentVersion", Union[str, int]) + + +class ConversationRequestTypedDict(TypedDict): + inputs: ConversationInputsTypedDict + stream: NotRequired[bool] + store: NotRequired[Nullable[bool]] + handoff_execution: NotRequired[Nullable[HandoffExecution]] + instructions: NotRequired[Nullable[str]] + tools: NotRequired[List[ToolsTypedDict]] + r"""List of tools which are available to the model during the conversation.""" + completion_args: NotRequired[Nullable[CompletionArgsTypedDict]] + name: NotRequired[Nullable[str]] + description: NotRequired[Nullable[str]] + metadata: NotRequired[Nullable[Dict[str, Any]]] + agent_id: NotRequired[Nullable[str]] + agent_version: NotRequired[Nullable[AgentVersionTypedDict]] + model: NotRequired[Nullable[str]] + + +class ConversationRequest(BaseModel): + inputs: ConversationInputs + + stream: Optional[bool] = False + + store: OptionalNullable[bool] = UNSET + + handoff_execution: OptionalNullable[HandoffExecution] = UNSET + + instructions: OptionalNullable[str] = UNSET + + tools: Optional[List[Tools]] = None + r"""List of tools which are available to the model during the conversation.""" + + completion_args: OptionalNullable[CompletionArgs] = UNSET + + name: OptionalNullable[str] = UNSET + + description: OptionalNullable[str] = UNSET + + metadata: OptionalNullable[Dict[str, Any]] = UNSET + + agent_id: OptionalNullable[str] = UNSET + + agent_version: OptionalNullable[AgentVersion] = UNSET + + model: OptionalNullable[str] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = [ + "stream", + "store", + "handoff_execution", + "instructions", + "tools", + "completion_args", + "name", + "description", + "metadata", + "agent_id", + "agent_version", + "model", + ] + nullable_fields = [ + "store", + "handoff_execution", + "instructions", + "completion_args", + "name", + "description", + "metadata", + "agent_id", + "agent_version", + "model", + ] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/conversationresponse.py b/src/mistralai/client/models/conversationresponse.py new file mode 100644 index 00000000..32d0f28f --- /dev/null +++ b/src/mistralai/client/models/conversationresponse.py @@ -0,0 +1,52 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .agenthandoffentry import AgentHandoffEntry, AgentHandoffEntryTypedDict +from .conversationusageinfo import ConversationUsageInfo, ConversationUsageInfoTypedDict +from .functioncallentry import FunctionCallEntry, FunctionCallEntryTypedDict +from .messageoutputentry import MessageOutputEntry, MessageOutputEntryTypedDict +from .toolexecutionentry import ToolExecutionEntry, ToolExecutionEntryTypedDict +from mistralai.client.types import BaseModel +from typing import List, Literal, Optional, Union +from typing_extensions import NotRequired, TypeAliasType, TypedDict + + +ConversationResponseObject = Literal["conversation.response",] + + +OutputsTypedDict = TypeAliasType( + "OutputsTypedDict", + Union[ + ToolExecutionEntryTypedDict, + FunctionCallEntryTypedDict, + MessageOutputEntryTypedDict, + AgentHandoffEntryTypedDict, + ], +) + + +Outputs = TypeAliasType( + "Outputs", + Union[ToolExecutionEntry, FunctionCallEntry, MessageOutputEntry, AgentHandoffEntry], +) + + +class ConversationResponseTypedDict(TypedDict): + r"""The response after appending new entries to the conversation.""" + + conversation_id: str + outputs: List[OutputsTypedDict] + usage: ConversationUsageInfoTypedDict + object: NotRequired[ConversationResponseObject] + + +class ConversationResponse(BaseModel): + r"""The response after appending new entries to the conversation.""" + + conversation_id: str + + outputs: List[Outputs] + + usage: ConversationUsageInfo + + object: Optional[ConversationResponseObject] = "conversation.response" diff --git a/src/mistralai/client/models/conversationrestartrequest.py b/src/mistralai/client/models/conversationrestartrequest.py new file mode 100644 index 00000000..aa2bf7b0 --- /dev/null +++ b/src/mistralai/client/models/conversationrestartrequest.py @@ -0,0 +1,113 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .completionargs import CompletionArgs, CompletionArgsTypedDict +from .conversationinputs import ConversationInputs, ConversationInputsTypedDict +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing import Any, Dict, Literal, Optional, Union +from typing_extensions import NotRequired, TypeAliasType, TypedDict + + +ConversationRestartRequestHandoffExecution = Literal[ + "client", + "server", +] + + +ConversationRestartRequestAgentVersionTypedDict = TypeAliasType( + "ConversationRestartRequestAgentVersionTypedDict", Union[str, int] +) +r"""Specific version of the agent to use when restarting. If not provided, uses the current version.""" + + +ConversationRestartRequestAgentVersion = TypeAliasType( + "ConversationRestartRequestAgentVersion", Union[str, int] +) +r"""Specific version of the agent to use when restarting. If not provided, uses the current version.""" + + +class ConversationRestartRequestTypedDict(TypedDict): + r"""Request to restart a new conversation from a given entry in the conversation.""" + + inputs: ConversationInputsTypedDict + from_entry_id: str + stream: NotRequired[bool] + store: NotRequired[bool] + r"""Whether to store the results into our servers or not.""" + handoff_execution: NotRequired[ConversationRestartRequestHandoffExecution] + completion_args: NotRequired[CompletionArgsTypedDict] + r"""White-listed arguments from the completion API""" + metadata: NotRequired[Nullable[Dict[str, Any]]] + r"""Custom metadata for the conversation.""" + agent_version: NotRequired[ + Nullable[ConversationRestartRequestAgentVersionTypedDict] + ] + r"""Specific version of the agent to use when restarting. If not provided, uses the current version.""" + + +class ConversationRestartRequest(BaseModel): + r"""Request to restart a new conversation from a given entry in the conversation.""" + + inputs: ConversationInputs + + from_entry_id: str + + stream: Optional[bool] = False + + store: Optional[bool] = True + r"""Whether to store the results into our servers or not.""" + + handoff_execution: Optional[ConversationRestartRequestHandoffExecution] = "server" + + completion_args: Optional[CompletionArgs] = None + r"""White-listed arguments from the completion API""" + + metadata: OptionalNullable[Dict[str, Any]] = UNSET + r"""Custom metadata for the conversation.""" + + agent_version: OptionalNullable[ConversationRestartRequestAgentVersion] = UNSET + r"""Specific version of the agent to use when restarting. If not provided, uses the current version.""" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = [ + "stream", + "store", + "handoff_execution", + "completion_args", + "metadata", + "agent_version", + ] + nullable_fields = ["metadata", "agent_version"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/conversationrestartstreamrequest.py b/src/mistralai/client/models/conversationrestartstreamrequest.py new file mode 100644 index 00000000..689815eb --- /dev/null +++ b/src/mistralai/client/models/conversationrestartstreamrequest.py @@ -0,0 +1,117 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .completionargs import CompletionArgs, CompletionArgsTypedDict +from .conversationinputs import ConversationInputs, ConversationInputsTypedDict +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing import Any, Dict, Literal, Optional, Union +from typing_extensions import NotRequired, TypeAliasType, TypedDict + + +ConversationRestartStreamRequestHandoffExecution = Literal[ + "client", + "server", +] + + +ConversationRestartStreamRequestAgentVersionTypedDict = TypeAliasType( + "ConversationRestartStreamRequestAgentVersionTypedDict", Union[str, int] +) +r"""Specific version of the agent to use when restarting. If not provided, uses the current version.""" + + +ConversationRestartStreamRequestAgentVersion = TypeAliasType( + "ConversationRestartStreamRequestAgentVersion", Union[str, int] +) +r"""Specific version of the agent to use when restarting. If not provided, uses the current version.""" + + +class ConversationRestartStreamRequestTypedDict(TypedDict): + r"""Request to restart a new conversation from a given entry in the conversation.""" + + inputs: ConversationInputsTypedDict + from_entry_id: str + stream: NotRequired[bool] + store: NotRequired[bool] + r"""Whether to store the results into our servers or not.""" + handoff_execution: NotRequired[ConversationRestartStreamRequestHandoffExecution] + completion_args: NotRequired[CompletionArgsTypedDict] + r"""White-listed arguments from the completion API""" + metadata: NotRequired[Nullable[Dict[str, Any]]] + r"""Custom metadata for the conversation.""" + agent_version: NotRequired[ + Nullable[ConversationRestartStreamRequestAgentVersionTypedDict] + ] + r"""Specific version of the agent to use when restarting. If not provided, uses the current version.""" + + +class ConversationRestartStreamRequest(BaseModel): + r"""Request to restart a new conversation from a given entry in the conversation.""" + + inputs: ConversationInputs + + from_entry_id: str + + stream: Optional[bool] = True + + store: Optional[bool] = True + r"""Whether to store the results into our servers or not.""" + + handoff_execution: Optional[ConversationRestartStreamRequestHandoffExecution] = ( + "server" + ) + + completion_args: Optional[CompletionArgs] = None + r"""White-listed arguments from the completion API""" + + metadata: OptionalNullable[Dict[str, Any]] = UNSET + r"""Custom metadata for the conversation.""" + + agent_version: OptionalNullable[ConversationRestartStreamRequestAgentVersion] = ( + UNSET + ) + r"""Specific version of the agent to use when restarting. If not provided, uses the current version.""" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = [ + "stream", + "store", + "handoff_execution", + "completion_args", + "metadata", + "agent_version", + ] + nullable_fields = ["metadata", "agent_version"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/conversationstreamrequest.py b/src/mistralai/client/models/conversationstreamrequest.py new file mode 100644 index 00000000..219230a2 --- /dev/null +++ b/src/mistralai/client/models/conversationstreamrequest.py @@ -0,0 +1,166 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .codeinterpretertool import CodeInterpreterTool, CodeInterpreterToolTypedDict +from .completionargs import CompletionArgs, CompletionArgsTypedDict +from .conversationinputs import ConversationInputs, ConversationInputsTypedDict +from .documentlibrarytool import DocumentLibraryTool, DocumentLibraryToolTypedDict +from .functiontool import FunctionTool, FunctionToolTypedDict +from .imagegenerationtool import ImageGenerationTool, ImageGenerationToolTypedDict +from .websearchpremiumtool import WebSearchPremiumTool, WebSearchPremiumToolTypedDict +from .websearchtool import WebSearchTool, WebSearchToolTypedDict +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from mistralai.client.utils import get_discriminator +from pydantic import Discriminator, Tag, model_serializer +from typing import Any, Dict, List, Literal, Optional, Union +from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict + + +ConversationStreamRequestHandoffExecution = Literal[ + "client", + "server", +] + + +ConversationStreamRequestToolsTypedDict = TypeAliasType( + "ConversationStreamRequestToolsTypedDict", + Union[ + WebSearchToolTypedDict, + WebSearchPremiumToolTypedDict, + CodeInterpreterToolTypedDict, + ImageGenerationToolTypedDict, + FunctionToolTypedDict, + DocumentLibraryToolTypedDict, + ], +) + + +ConversationStreamRequestTools = Annotated[ + Union[ + Annotated[CodeInterpreterTool, Tag("code_interpreter")], + Annotated[DocumentLibraryTool, Tag("document_library")], + Annotated[FunctionTool, Tag("function")], + Annotated[ImageGenerationTool, Tag("image_generation")], + Annotated[WebSearchTool, Tag("web_search")], + Annotated[WebSearchPremiumTool, Tag("web_search_premium")], + ], + Discriminator(lambda m: get_discriminator(m, "type", "type")), +] + + +ConversationStreamRequestAgentVersionTypedDict = TypeAliasType( + "ConversationStreamRequestAgentVersionTypedDict", Union[str, int] +) + + +ConversationStreamRequestAgentVersion = TypeAliasType( + "ConversationStreamRequestAgentVersion", Union[str, int] +) + + +class ConversationStreamRequestTypedDict(TypedDict): + inputs: ConversationInputsTypedDict + stream: NotRequired[bool] + store: NotRequired[Nullable[bool]] + handoff_execution: NotRequired[Nullable[ConversationStreamRequestHandoffExecution]] + instructions: NotRequired[Nullable[str]] + tools: NotRequired[List[ConversationStreamRequestToolsTypedDict]] + r"""List of tools which are available to the model during the conversation.""" + completion_args: NotRequired[Nullable[CompletionArgsTypedDict]] + name: NotRequired[Nullable[str]] + description: NotRequired[Nullable[str]] + metadata: NotRequired[Nullable[Dict[str, Any]]] + agent_id: NotRequired[Nullable[str]] + agent_version: NotRequired[Nullable[ConversationStreamRequestAgentVersionTypedDict]] + model: NotRequired[Nullable[str]] + + +class ConversationStreamRequest(BaseModel): + inputs: ConversationInputs + + stream: Optional[bool] = True + + store: OptionalNullable[bool] = UNSET + + handoff_execution: OptionalNullable[ConversationStreamRequestHandoffExecution] = ( + UNSET + ) + + instructions: OptionalNullable[str] = UNSET + + tools: Optional[List[ConversationStreamRequestTools]] = None + r"""List of tools which are available to the model during the conversation.""" + + completion_args: OptionalNullable[CompletionArgs] = UNSET + + name: OptionalNullable[str] = UNSET + + description: OptionalNullable[str] = UNSET + + metadata: OptionalNullable[Dict[str, Any]] = UNSET + + agent_id: OptionalNullable[str] = UNSET + + agent_version: OptionalNullable[ConversationStreamRequestAgentVersion] = UNSET + + model: OptionalNullable[str] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = [ + "stream", + "store", + "handoff_execution", + "instructions", + "tools", + "completion_args", + "name", + "description", + "metadata", + "agent_id", + "agent_version", + "model", + ] + nullable_fields = [ + "store", + "handoff_execution", + "instructions", + "completion_args", + "name", + "description", + "metadata", + "agent_id", + "agent_version", + "model", + ] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/conversationusageinfo.py b/src/mistralai/client/models/conversationusageinfo.py new file mode 100644 index 00000000..7a818c89 --- /dev/null +++ b/src/mistralai/client/models/conversationusageinfo.py @@ -0,0 +1,69 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing import Dict, Optional +from typing_extensions import NotRequired, TypedDict + + +class ConversationUsageInfoTypedDict(TypedDict): + prompt_tokens: NotRequired[int] + completion_tokens: NotRequired[int] + total_tokens: NotRequired[int] + connector_tokens: NotRequired[Nullable[int]] + connectors: NotRequired[Nullable[Dict[str, int]]] + + +class ConversationUsageInfo(BaseModel): + prompt_tokens: Optional[int] = 0 + + completion_tokens: Optional[int] = 0 + + total_tokens: Optional[int] = 0 + + connector_tokens: OptionalNullable[int] = UNSET + + connectors: OptionalNullable[Dict[str, int]] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = [ + "prompt_tokens", + "completion_tokens", + "total_tokens", + "connector_tokens", + "connectors", + ] + nullable_fields = ["connector_tokens", "connectors"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/delete_model_v1_models_model_id_deleteop.py b/src/mistralai/client/models/delete_model_v1_models_model_id_deleteop.py new file mode 100644 index 00000000..1cd36128 --- /dev/null +++ b/src/mistralai/client/models/delete_model_v1_models_model_id_deleteop.py @@ -0,0 +1,18 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.client.types import BaseModel +from mistralai.client.utils import FieldMetadata, PathParamMetadata +from typing_extensions import Annotated, TypedDict + + +class DeleteModelV1ModelsModelIDDeleteRequestTypedDict(TypedDict): + model_id: str + r"""The ID of the model to delete.""" + + +class DeleteModelV1ModelsModelIDDeleteRequest(BaseModel): + model_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] + r"""The ID of the model to delete.""" diff --git a/src/mistralai/client/models/deletefileout.py b/src/mistralai/client/models/deletefileout.py new file mode 100644 index 00000000..b25538be --- /dev/null +++ b/src/mistralai/client/models/deletefileout.py @@ -0,0 +1,25 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.client.types import BaseModel +from typing_extensions import TypedDict + + +class DeleteFileOutTypedDict(TypedDict): + id: str + r"""The ID of the deleted file.""" + object: str + r"""The object type that was deleted""" + deleted: bool + r"""The deletion status.""" + + +class DeleteFileOut(BaseModel): + id: str + r"""The ID of the deleted file.""" + + object: str + r"""The object type that was deleted""" + + deleted: bool + r"""The deletion status.""" diff --git a/src/mistralai/client/models/deletemodelout.py b/src/mistralai/client/models/deletemodelout.py new file mode 100644 index 00000000..5aa8b68f --- /dev/null +++ b/src/mistralai/client/models/deletemodelout.py @@ -0,0 +1,26 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.client.types import BaseModel +from typing import Optional +from typing_extensions import NotRequired, TypedDict + + +class DeleteModelOutTypedDict(TypedDict): + id: str + r"""The ID of the deleted model.""" + object: NotRequired[str] + r"""The object type that was deleted""" + deleted: NotRequired[bool] + r"""The deletion status""" + + +class DeleteModelOut(BaseModel): + id: str + r"""The ID of the deleted model.""" + + object: Optional[str] = "model" + r"""The object type that was deleted""" + + deleted: Optional[bool] = True + r"""The deletion status""" diff --git a/src/mistralai/client/models/deltamessage.py b/src/mistralai/client/models/deltamessage.py new file mode 100644 index 00000000..0ae56da8 --- /dev/null +++ b/src/mistralai/client/models/deltamessage.py @@ -0,0 +1,67 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .contentchunk import ContentChunk, ContentChunkTypedDict +from .toolcall import ToolCall, ToolCallTypedDict +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing import List, Union +from typing_extensions import NotRequired, TypeAliasType, TypedDict + + +ContentTypedDict = TypeAliasType( + "ContentTypedDict", Union[str, List[ContentChunkTypedDict]] +) + + +Content = TypeAliasType("Content", Union[str, List[ContentChunk]]) + + +class DeltaMessageTypedDict(TypedDict): + role: NotRequired[Nullable[str]] + content: NotRequired[Nullable[ContentTypedDict]] + tool_calls: NotRequired[Nullable[List[ToolCallTypedDict]]] + + +class DeltaMessage(BaseModel): + role: OptionalNullable[str] = UNSET + + content: OptionalNullable[Content] = UNSET + + tool_calls: OptionalNullable[List[ToolCall]] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["role", "content", "tool_calls"] + nullable_fields = ["role", "content", "tool_calls"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/documentlibrarytool.py b/src/mistralai/client/models/documentlibrarytool.py new file mode 100644 index 00000000..861a58d3 --- /dev/null +++ b/src/mistralai/client/models/documentlibrarytool.py @@ -0,0 +1,22 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.client.types import BaseModel +from typing import List, Literal, Optional +from typing_extensions import NotRequired, TypedDict + + +DocumentLibraryToolType = Literal["document_library",] + + +class DocumentLibraryToolTypedDict(TypedDict): + library_ids: List[str] + r"""Ids of the library in which to search.""" + type: NotRequired[DocumentLibraryToolType] + + +class DocumentLibraryTool(BaseModel): + library_ids: List[str] + r"""Ids of the library in which to search.""" + + type: Optional[DocumentLibraryToolType] = "document_library" diff --git a/src/mistralai/client/models/documentout.py b/src/mistralai/client/models/documentout.py new file mode 100644 index 00000000..39d0aa2a --- /dev/null +++ b/src/mistralai/client/models/documentout.py @@ -0,0 +1,127 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from datetime import datetime +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing import Any, Dict +from typing_extensions import NotRequired, TypedDict + + +class DocumentOutTypedDict(TypedDict): + id: str + library_id: str + hash: Nullable[str] + mime_type: Nullable[str] + extension: Nullable[str] + size: Nullable[int] + name: str + created_at: datetime + processing_status: str + uploaded_by_id: Nullable[str] + uploaded_by_type: str + tokens_processing_total: int + summary: NotRequired[Nullable[str]] + last_processed_at: NotRequired[Nullable[datetime]] + number_of_pages: NotRequired[Nullable[int]] + tokens_processing_main_content: NotRequired[Nullable[int]] + tokens_processing_summary: NotRequired[Nullable[int]] + url: NotRequired[Nullable[str]] + attributes: NotRequired[Nullable[Dict[str, Any]]] + + +class DocumentOut(BaseModel): + id: str + + library_id: str + + hash: Nullable[str] + + mime_type: Nullable[str] + + extension: Nullable[str] + + size: Nullable[int] + + name: str + + created_at: datetime + + processing_status: str + + uploaded_by_id: Nullable[str] + + uploaded_by_type: str + + tokens_processing_total: int + + summary: OptionalNullable[str] = UNSET + + last_processed_at: OptionalNullable[datetime] = UNSET + + number_of_pages: OptionalNullable[int] = UNSET + + tokens_processing_main_content: OptionalNullable[int] = UNSET + + tokens_processing_summary: OptionalNullable[int] = UNSET + + url: OptionalNullable[str] = UNSET + + attributes: OptionalNullable[Dict[str, Any]] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = [ + "summary", + "last_processed_at", + "number_of_pages", + "tokens_processing_main_content", + "tokens_processing_summary", + "url", + "attributes", + ] + nullable_fields = [ + "hash", + "mime_type", + "extension", + "size", + "summary", + "last_processed_at", + "number_of_pages", + "uploaded_by_id", + "tokens_processing_main_content", + "tokens_processing_summary", + "url", + "attributes", + ] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/documenttextcontent.py b/src/mistralai/client/models/documenttextcontent.py new file mode 100644 index 00000000..b1c1aa07 --- /dev/null +++ b/src/mistralai/client/models/documenttextcontent.py @@ -0,0 +1,13 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.client.types import BaseModel +from typing_extensions import TypedDict + + +class DocumentTextContentTypedDict(TypedDict): + text: str + + +class DocumentTextContent(BaseModel): + text: str diff --git a/src/mistralai/client/models/documentupdatein.py b/src/mistralai/client/models/documentupdatein.py new file mode 100644 index 00000000..02022b89 --- /dev/null +++ b/src/mistralai/client/models/documentupdatein.py @@ -0,0 +1,71 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from datetime import datetime +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing import Dict, List, Union +from typing_extensions import NotRequired, TypeAliasType, TypedDict + + +AttributesTypedDict = TypeAliasType( + "AttributesTypedDict", + Union[ + bool, str, int, float, datetime, List[str], List[int], List[float], List[bool] + ], +) + + +Attributes = TypeAliasType( + "Attributes", + Union[ + bool, str, int, float, datetime, List[str], List[int], List[float], List[bool] + ], +) + + +class DocumentUpdateInTypedDict(TypedDict): + name: NotRequired[Nullable[str]] + attributes: NotRequired[Nullable[Dict[str, AttributesTypedDict]]] + + +class DocumentUpdateIn(BaseModel): + name: OptionalNullable[str] = UNSET + + attributes: OptionalNullable[Dict[str, Attributes]] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["name", "attributes"] + nullable_fields = ["name", "attributes"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/documenturlchunk.py b/src/mistralai/client/models/documenturlchunk.py new file mode 100644 index 00000000..00eb5535 --- /dev/null +++ b/src/mistralai/client/models/documenturlchunk.py @@ -0,0 +1,62 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing import Literal, Optional +from typing_extensions import NotRequired, TypedDict + + +DocumentURLChunkType = Literal["document_url",] + + +class DocumentURLChunkTypedDict(TypedDict): + document_url: str + document_name: NotRequired[Nullable[str]] + r"""The filename of the document""" + type: NotRequired[DocumentURLChunkType] + + +class DocumentURLChunk(BaseModel): + document_url: str + + document_name: OptionalNullable[str] = UNSET + r"""The filename of the document""" + + type: Optional[DocumentURLChunkType] = "document_url" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["document_name", "type"] + nullable_fields = ["document_name"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/embeddingdtype.py b/src/mistralai/client/models/embeddingdtype.py new file mode 100644 index 00000000..26eee779 --- /dev/null +++ b/src/mistralai/client/models/embeddingdtype.py @@ -0,0 +1,13 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from typing import Literal + + +EmbeddingDtype = Literal[ + "float", + "int8", + "uint8", + "binary", + "ubinary", +] diff --git a/src/mistralai/client/models/embeddingrequest.py b/src/mistralai/client/models/embeddingrequest.py new file mode 100644 index 00000000..1dfe97c8 --- /dev/null +++ b/src/mistralai/client/models/embeddingrequest.py @@ -0,0 +1,90 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .embeddingdtype import EmbeddingDtype +from .encodingformat import EncodingFormat +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +import pydantic +from pydantic import model_serializer +from typing import Any, Dict, List, Optional, Union +from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict + + +EmbeddingRequestInputsTypedDict = TypeAliasType( + "EmbeddingRequestInputsTypedDict", Union[str, List[str]] +) +r"""The text content to be embedded, can be a string or an array of strings for fast processing in bulk.""" + + +EmbeddingRequestInputs = TypeAliasType("EmbeddingRequestInputs", Union[str, List[str]]) +r"""The text content to be embedded, can be a string or an array of strings for fast processing in bulk.""" + + +class EmbeddingRequestTypedDict(TypedDict): + model: str + r"""The ID of the model to be used for embedding.""" + inputs: EmbeddingRequestInputsTypedDict + r"""The text content to be embedded, can be a string or an array of strings for fast processing in bulk.""" + metadata: NotRequired[Nullable[Dict[str, Any]]] + output_dimension: NotRequired[Nullable[int]] + r"""The dimension of the output embeddings when feature available. If not provided, a default output dimension will be used.""" + output_dtype: NotRequired[EmbeddingDtype] + encoding_format: NotRequired[EncodingFormat] + + +class EmbeddingRequest(BaseModel): + model: str + r"""The ID of the model to be used for embedding.""" + + inputs: Annotated[EmbeddingRequestInputs, pydantic.Field(alias="input")] + r"""The text content to be embedded, can be a string or an array of strings for fast processing in bulk.""" + + metadata: OptionalNullable[Dict[str, Any]] = UNSET + + output_dimension: OptionalNullable[int] = UNSET + r"""The dimension of the output embeddings when feature available. If not provided, a default output dimension will be used.""" + + output_dtype: Optional[EmbeddingDtype] = None + + encoding_format: Optional[EncodingFormat] = None + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = [ + "metadata", + "output_dimension", + "output_dtype", + "encoding_format", + ] + nullable_fields = ["metadata", "output_dimension"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/embeddingresponse.py b/src/mistralai/client/models/embeddingresponse.py new file mode 100644 index 00000000..64a28ea9 --- /dev/null +++ b/src/mistralai/client/models/embeddingresponse.py @@ -0,0 +1,28 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .embeddingresponsedata import EmbeddingResponseData, EmbeddingResponseDataTypedDict +from .usageinfo import UsageInfo, UsageInfoTypedDict +from mistralai.client.types import BaseModel +from typing import List +from typing_extensions import TypedDict + + +class EmbeddingResponseTypedDict(TypedDict): + id: str + object: str + model: str + usage: UsageInfoTypedDict + data: List[EmbeddingResponseDataTypedDict] + + +class EmbeddingResponse(BaseModel): + id: str + + object: str + + model: str + + usage: UsageInfo + + data: List[EmbeddingResponseData] diff --git a/src/mistralai/client/models/embeddingresponsedata.py b/src/mistralai/client/models/embeddingresponsedata.py new file mode 100644 index 00000000..ebd0bf7b --- /dev/null +++ b/src/mistralai/client/models/embeddingresponsedata.py @@ -0,0 +1,20 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.client.types import BaseModel +from typing import List, Optional +from typing_extensions import NotRequired, TypedDict + + +class EmbeddingResponseDataTypedDict(TypedDict): + object: NotRequired[str] + embedding: NotRequired[List[float]] + index: NotRequired[int] + + +class EmbeddingResponseData(BaseModel): + object: Optional[str] = None + + embedding: Optional[List[float]] = None + + index: Optional[int] = None diff --git a/src/mistralai/client/models/encodingformat.py b/src/mistralai/client/models/encodingformat.py new file mode 100644 index 00000000..be6c1a14 --- /dev/null +++ b/src/mistralai/client/models/encodingformat.py @@ -0,0 +1,10 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from typing import Literal + + +EncodingFormat = Literal[ + "float", + "base64", +] diff --git a/src/mistralai/client/models/entitytype.py b/src/mistralai/client/models/entitytype.py new file mode 100644 index 00000000..9c16f4a1 --- /dev/null +++ b/src/mistralai/client/models/entitytype.py @@ -0,0 +1,16 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.client.types import UnrecognizedStr +from typing import Literal, Union + + +EntityType = Union[ + Literal[ + "User", + "Workspace", + "Org", + ], + UnrecognizedStr, +] +r"""The type of entity, used to share a library.""" diff --git a/src/mistralai/client/models/eventout.py b/src/mistralai/client/models/eventout.py new file mode 100644 index 00000000..5e118d45 --- /dev/null +++ b/src/mistralai/client/models/eventout.py @@ -0,0 +1,61 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing import Any, Dict +from typing_extensions import NotRequired, TypedDict + + +class EventOutTypedDict(TypedDict): + name: str + r"""The name of the event.""" + created_at: int + r"""The UNIX timestamp (in seconds) of the event.""" + data: NotRequired[Nullable[Dict[str, Any]]] + + +class EventOut(BaseModel): + name: str + r"""The name of the event.""" + + created_at: int + r"""The UNIX timestamp (in seconds) of the event.""" + + data: OptionalNullable[Dict[str, Any]] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["data"] + nullable_fields = ["data"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/file.py b/src/mistralai/client/models/file.py new file mode 100644 index 00000000..a8bbc6fa --- /dev/null +++ b/src/mistralai/client/models/file.py @@ -0,0 +1,33 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +import io +from mistralai.client.types import BaseModel +from mistralai.client.utils import FieldMetadata, MultipartFormMetadata +import pydantic +from typing import IO, Optional, Union +from typing_extensions import Annotated, NotRequired, TypedDict + + +class FileTypedDict(TypedDict): + file_name: str + content: Union[bytes, IO[bytes], io.BufferedReader] + content_type: NotRequired[str] + + +class File(BaseModel): + file_name: Annotated[ + str, pydantic.Field(alias="fileName"), FieldMetadata(multipart=True) + ] + + content: Annotated[ + Union[bytes, IO[bytes], io.BufferedReader], + pydantic.Field(alias=""), + FieldMetadata(multipart=MultipartFormMetadata(content=True)), + ] + + content_type: Annotated[ + Optional[str], + pydantic.Field(alias="Content-Type"), + FieldMetadata(multipart=True), + ] = None diff --git a/src/mistralai/client/models/filechunk.py b/src/mistralai/client/models/filechunk.py new file mode 100644 index 00000000..d8b96f69 --- /dev/null +++ b/src/mistralai/client/models/filechunk.py @@ -0,0 +1,23 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.client.types import BaseModel +from mistralai.client.utils import validate_const +import pydantic +from pydantic.functional_validators import AfterValidator +from typing import Literal, Optional +from typing_extensions import Annotated, TypedDict + + +class FileChunkTypedDict(TypedDict): + file_id: str + type: Literal["file"] + + +class FileChunk(BaseModel): + file_id: str + + TYPE: Annotated[ + Annotated[Optional[Literal["file"]], AfterValidator(validate_const("file"))], + pydantic.Field(alias="type"), + ] = "file" diff --git a/src/mistralai/client/models/filepurpose.py b/src/mistralai/client/models/filepurpose.py new file mode 100644 index 00000000..eef1b089 --- /dev/null +++ b/src/mistralai/client/models/filepurpose.py @@ -0,0 +1,15 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.client.types import UnrecognizedStr +from typing import Literal, Union + + +FilePurpose = Union[ + Literal[ + "fine-tune", + "batch", + "ocr", + ], + UnrecognizedStr, +] diff --git a/src/mistralai/client/models/files_api_routes_delete_fileop.py b/src/mistralai/client/models/files_api_routes_delete_fileop.py new file mode 100644 index 00000000..b7174866 --- /dev/null +++ b/src/mistralai/client/models/files_api_routes_delete_fileop.py @@ -0,0 +1,16 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.client.types import BaseModel +from mistralai.client.utils import FieldMetadata, PathParamMetadata +from typing_extensions import Annotated, TypedDict + + +class FilesAPIRoutesDeleteFileRequestTypedDict(TypedDict): + file_id: str + + +class FilesAPIRoutesDeleteFileRequest(BaseModel): + file_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] diff --git a/src/mistralai/client/models/files_api_routes_download_fileop.py b/src/mistralai/client/models/files_api_routes_download_fileop.py new file mode 100644 index 00000000..fa9e491a --- /dev/null +++ b/src/mistralai/client/models/files_api_routes_download_fileop.py @@ -0,0 +1,16 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.client.types import BaseModel +from mistralai.client.utils import FieldMetadata, PathParamMetadata +from typing_extensions import Annotated, TypedDict + + +class FilesAPIRoutesDownloadFileRequestTypedDict(TypedDict): + file_id: str + + +class FilesAPIRoutesDownloadFileRequest(BaseModel): + file_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] diff --git a/src/mistralai/client/models/files_api_routes_get_signed_urlop.py b/src/mistralai/client/models/files_api_routes_get_signed_urlop.py new file mode 100644 index 00000000..a05f8262 --- /dev/null +++ b/src/mistralai/client/models/files_api_routes_get_signed_urlop.py @@ -0,0 +1,25 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.client.types import BaseModel +from mistralai.client.utils import FieldMetadata, PathParamMetadata, QueryParamMetadata +from typing import Optional +from typing_extensions import Annotated, NotRequired, TypedDict + + +class FilesAPIRoutesGetSignedURLRequestTypedDict(TypedDict): + file_id: str + expiry: NotRequired[int] + r"""Number of hours before the url becomes invalid. Defaults to 24h""" + + +class FilesAPIRoutesGetSignedURLRequest(BaseModel): + file_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] + + expiry: Annotated[ + Optional[int], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = 24 + r"""Number of hours before the url becomes invalid. Defaults to 24h""" diff --git a/src/mistralai/client/models/files_api_routes_list_filesop.py b/src/mistralai/client/models/files_api_routes_list_filesop.py new file mode 100644 index 00000000..ace99631 --- /dev/null +++ b/src/mistralai/client/models/files_api_routes_list_filesop.py @@ -0,0 +1,109 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .filepurpose import FilePurpose +from .sampletype import SampleType +from .source import Source +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from mistralai.client.utils import FieldMetadata, QueryParamMetadata +from pydantic import model_serializer +from typing import List, Optional +from typing_extensions import Annotated, NotRequired, TypedDict + + +class FilesAPIRoutesListFilesRequestTypedDict(TypedDict): + page: NotRequired[int] + page_size: NotRequired[int] + include_total: NotRequired[bool] + sample_type: NotRequired[Nullable[List[SampleType]]] + source: NotRequired[Nullable[List[Source]]] + search: NotRequired[Nullable[str]] + purpose: NotRequired[Nullable[FilePurpose]] + mimetypes: NotRequired[Nullable[List[str]]] + + +class FilesAPIRoutesListFilesRequest(BaseModel): + page: Annotated[ + Optional[int], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = 0 + + page_size: Annotated[ + Optional[int], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = 100 + + include_total: Annotated[ + Optional[bool], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = True + + sample_type: Annotated[ + OptionalNullable[List[SampleType]], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = UNSET + + source: Annotated[ + OptionalNullable[List[Source]], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = UNSET + + search: Annotated[ + OptionalNullable[str], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = UNSET + + purpose: Annotated[ + OptionalNullable[FilePurpose], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = UNSET + + mimetypes: Annotated[ + OptionalNullable[List[str]], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = [ + "page", + "page_size", + "include_total", + "sample_type", + "source", + "search", + "purpose", + "mimetypes", + ] + nullable_fields = ["sample_type", "source", "search", "purpose", "mimetypes"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/files_api_routes_retrieve_fileop.py b/src/mistralai/client/models/files_api_routes_retrieve_fileop.py new file mode 100644 index 00000000..4a9678e5 --- /dev/null +++ b/src/mistralai/client/models/files_api_routes_retrieve_fileop.py @@ -0,0 +1,16 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.client.types import BaseModel +from mistralai.client.utils import FieldMetadata, PathParamMetadata +from typing_extensions import Annotated, TypedDict + + +class FilesAPIRoutesRetrieveFileRequestTypedDict(TypedDict): + file_id: str + + +class FilesAPIRoutesRetrieveFileRequest(BaseModel): + file_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] diff --git a/src/mistralai/client/models/files_api_routes_upload_fileop.py b/src/mistralai/client/models/files_api_routes_upload_fileop.py new file mode 100644 index 00000000..723c6cc2 --- /dev/null +++ b/src/mistralai/client/models/files_api_routes_upload_fileop.py @@ -0,0 +1,40 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .file import File, FileTypedDict +from .filepurpose import FilePurpose +from mistralai.client.types import BaseModel +from mistralai.client.utils import FieldMetadata, MultipartFormMetadata +from typing import Optional +from typing_extensions import Annotated, NotRequired, TypedDict + + +class FilesAPIRoutesUploadFileMultiPartBodyParamsTypedDict(TypedDict): + file: FileTypedDict + r"""The File object (not file name) to be uploaded. + To upload a file and specify a custom file name you should format your request as such: + ```bash + file=@path/to/your/file.jsonl;filename=custom_name.jsonl + ``` + Otherwise, you can just keep the original file name: + ```bash + file=@path/to/your/file.jsonl + ``` + """ + purpose: NotRequired[FilePurpose] + + +class FilesAPIRoutesUploadFileMultiPartBodyParams(BaseModel): + file: Annotated[File, FieldMetadata(multipart=MultipartFormMetadata(file=True))] + r"""The File object (not file name) to be uploaded. + To upload a file and specify a custom file name you should format your request as such: + ```bash + file=@path/to/your/file.jsonl;filename=custom_name.jsonl + ``` + Otherwise, you can just keep the original file name: + ```bash + file=@path/to/your/file.jsonl + ``` + """ + + purpose: Annotated[Optional[FilePurpose], FieldMetadata(multipart=True)] = None diff --git a/src/mistralai/client/models/fileschema.py b/src/mistralai/client/models/fileschema.py new file mode 100644 index 00000000..9ecde454 --- /dev/null +++ b/src/mistralai/client/models/fileschema.py @@ -0,0 +1,94 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .filepurpose import FilePurpose +from .sampletype import SampleType +from .source import Source +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +import pydantic +from pydantic import model_serializer +from typing_extensions import Annotated, NotRequired, TypedDict + + +class FileSchemaTypedDict(TypedDict): + id: str + r"""The unique identifier of the file.""" + object: str + r"""The object type, which is always \"file\".""" + size_bytes: int + r"""The size of the file, in bytes.""" + created_at: int + r"""The UNIX timestamp (in seconds) of the event.""" + filename: str + r"""The name of the uploaded file.""" + purpose: FilePurpose + sample_type: SampleType + source: Source + num_lines: NotRequired[Nullable[int]] + mimetype: NotRequired[Nullable[str]] + signature: NotRequired[Nullable[str]] + + +class FileSchema(BaseModel): + id: str + r"""The unique identifier of the file.""" + + object: str + r"""The object type, which is always \"file\".""" + + size_bytes: Annotated[int, pydantic.Field(alias="bytes")] + r"""The size of the file, in bytes.""" + + created_at: int + r"""The UNIX timestamp (in seconds) of the event.""" + + filename: str + r"""The name of the uploaded file.""" + + purpose: FilePurpose + + sample_type: SampleType + + source: Source + + num_lines: OptionalNullable[int] = UNSET + + mimetype: OptionalNullable[str] = UNSET + + signature: OptionalNullable[str] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["num_lines", "mimetype", "signature"] + nullable_fields = ["num_lines", "mimetype", "signature"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/filesignedurl.py b/src/mistralai/client/models/filesignedurl.py new file mode 100644 index 00000000..cbca9847 --- /dev/null +++ b/src/mistralai/client/models/filesignedurl.py @@ -0,0 +1,13 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.client.types import BaseModel +from typing_extensions import TypedDict + + +class FileSignedURLTypedDict(TypedDict): + url: str + + +class FileSignedURL(BaseModel): + url: str diff --git a/src/mistralai/client/models/fimcompletionrequest.py b/src/mistralai/client/models/fimcompletionrequest.py new file mode 100644 index 00000000..c9eca0af --- /dev/null +++ b/src/mistralai/client/models/fimcompletionrequest.py @@ -0,0 +1,130 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing import Any, Dict, List, Optional, Union +from typing_extensions import NotRequired, TypeAliasType, TypedDict + + +FIMCompletionRequestStopTypedDict = TypeAliasType( + "FIMCompletionRequestStopTypedDict", Union[str, List[str]] +) +r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + + +FIMCompletionRequestStop = TypeAliasType( + "FIMCompletionRequestStop", Union[str, List[str]] +) +r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + + +class FIMCompletionRequestTypedDict(TypedDict): + model: str + r"""ID of the model with FIM to use.""" + prompt: str + r"""The text/code to complete.""" + temperature: NotRequired[Nullable[float]] + r"""What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value.""" + top_p: NotRequired[float] + r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.""" + max_tokens: NotRequired[Nullable[int]] + r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" + stream: NotRequired[bool] + r"""Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON.""" + stop: NotRequired[FIMCompletionRequestStopTypedDict] + r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + random_seed: NotRequired[Nullable[int]] + r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" + metadata: NotRequired[Nullable[Dict[str, Any]]] + suffix: NotRequired[Nullable[str]] + r"""Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`.""" + min_tokens: NotRequired[Nullable[int]] + r"""The minimum number of tokens to generate in the completion.""" + + +class FIMCompletionRequest(BaseModel): + model: str + r"""ID of the model with FIM to use.""" + + prompt: str + r"""The text/code to complete.""" + + temperature: OptionalNullable[float] = UNSET + r"""What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value.""" + + top_p: Optional[float] = 1 + r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.""" + + max_tokens: OptionalNullable[int] = UNSET + r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" + + stream: Optional[bool] = False + r"""Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON.""" + + stop: Optional[FIMCompletionRequestStop] = None + r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + + random_seed: OptionalNullable[int] = UNSET + r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" + + metadata: OptionalNullable[Dict[str, Any]] = UNSET + + suffix: OptionalNullable[str] = UNSET + r"""Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`.""" + + min_tokens: OptionalNullable[int] = UNSET + r"""The minimum number of tokens to generate in the completion.""" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = [ + "temperature", + "top_p", + "max_tokens", + "stream", + "stop", + "random_seed", + "metadata", + "suffix", + "min_tokens", + ] + nullable_fields = [ + "temperature", + "max_tokens", + "random_seed", + "metadata", + "suffix", + "min_tokens", + ] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/fimcompletionresponse.py b/src/mistralai/client/models/fimcompletionresponse.py new file mode 100644 index 00000000..8a2eda0c --- /dev/null +++ b/src/mistralai/client/models/fimcompletionresponse.py @@ -0,0 +1,31 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .chatcompletionchoice import ChatCompletionChoice, ChatCompletionChoiceTypedDict +from .usageinfo import UsageInfo, UsageInfoTypedDict +from mistralai.client.types import BaseModel +from typing import List +from typing_extensions import TypedDict + + +class FIMCompletionResponseTypedDict(TypedDict): + id: str + object: str + model: str + usage: UsageInfoTypedDict + created: int + choices: List[ChatCompletionChoiceTypedDict] + + +class FIMCompletionResponse(BaseModel): + id: str + + object: str + + model: str + + usage: UsageInfo + + created: int + + choices: List[ChatCompletionChoice] diff --git a/src/mistralai/client/models/fimcompletionstreamrequest.py b/src/mistralai/client/models/fimcompletionstreamrequest.py new file mode 100644 index 00000000..29543802 --- /dev/null +++ b/src/mistralai/client/models/fimcompletionstreamrequest.py @@ -0,0 +1,128 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing import Any, Dict, List, Optional, Union +from typing_extensions import NotRequired, TypeAliasType, TypedDict + + +FIMCompletionStreamRequestStopTypedDict = TypeAliasType( + "FIMCompletionStreamRequestStopTypedDict", Union[str, List[str]] +) +r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + + +FIMCompletionStreamRequestStop = TypeAliasType( + "FIMCompletionStreamRequestStop", Union[str, List[str]] +) +r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + + +class FIMCompletionStreamRequestTypedDict(TypedDict): + model: str + r"""ID of the model with FIM to use.""" + prompt: str + r"""The text/code to complete.""" + temperature: NotRequired[Nullable[float]] + r"""What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value.""" + top_p: NotRequired[float] + r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.""" + max_tokens: NotRequired[Nullable[int]] + r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" + stream: NotRequired[bool] + stop: NotRequired[FIMCompletionStreamRequestStopTypedDict] + r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + random_seed: NotRequired[Nullable[int]] + r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" + metadata: NotRequired[Nullable[Dict[str, Any]]] + suffix: NotRequired[Nullable[str]] + r"""Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`.""" + min_tokens: NotRequired[Nullable[int]] + r"""The minimum number of tokens to generate in the completion.""" + + +class FIMCompletionStreamRequest(BaseModel): + model: str + r"""ID of the model with FIM to use.""" + + prompt: str + r"""The text/code to complete.""" + + temperature: OptionalNullable[float] = UNSET + r"""What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value.""" + + top_p: Optional[float] = 1 + r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.""" + + max_tokens: OptionalNullable[int] = UNSET + r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" + + stream: Optional[bool] = True + + stop: Optional[FIMCompletionStreamRequestStop] = None + r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" + + random_seed: OptionalNullable[int] = UNSET + r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" + + metadata: OptionalNullable[Dict[str, Any]] = UNSET + + suffix: OptionalNullable[str] = UNSET + r"""Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`.""" + + min_tokens: OptionalNullable[int] = UNSET + r"""The minimum number of tokens to generate in the completion.""" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = [ + "temperature", + "top_p", + "max_tokens", + "stream", + "stop", + "random_seed", + "metadata", + "suffix", + "min_tokens", + ] + nullable_fields = [ + "temperature", + "max_tokens", + "random_seed", + "metadata", + "suffix", + "min_tokens", + ] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/finetuneablemodeltype.py b/src/mistralai/client/models/finetuneablemodeltype.py new file mode 100644 index 00000000..f5b8b2ed --- /dev/null +++ b/src/mistralai/client/models/finetuneablemodeltype.py @@ -0,0 +1,10 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from typing import Literal + + +FineTuneableModelType = Literal[ + "completion", + "classifier", +] diff --git a/src/mistralai/client/models/ftclassifierlossfunction.py b/src/mistralai/client/models/ftclassifierlossfunction.py new file mode 100644 index 00000000..c4ef66e0 --- /dev/null +++ b/src/mistralai/client/models/ftclassifierlossfunction.py @@ -0,0 +1,10 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from typing import Literal + + +FTClassifierLossFunction = Literal[ + "single_class", + "multi_class", +] diff --git a/src/mistralai/client/models/ftmodelcapabilitiesout.py b/src/mistralai/client/models/ftmodelcapabilitiesout.py new file mode 100644 index 00000000..be31aa3c --- /dev/null +++ b/src/mistralai/client/models/ftmodelcapabilitiesout.py @@ -0,0 +1,26 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.client.types import BaseModel +from typing import Optional +from typing_extensions import NotRequired, TypedDict + + +class FTModelCapabilitiesOutTypedDict(TypedDict): + completion_chat: NotRequired[bool] + completion_fim: NotRequired[bool] + function_calling: NotRequired[bool] + fine_tuning: NotRequired[bool] + classification: NotRequired[bool] + + +class FTModelCapabilitiesOut(BaseModel): + completion_chat: Optional[bool] = True + + completion_fim: Optional[bool] = False + + function_calling: Optional[bool] = False + + fine_tuning: Optional[bool] = False + + classification: Optional[bool] = False diff --git a/src/mistralai/client/models/ftmodelcard.py b/src/mistralai/client/models/ftmodelcard.py new file mode 100644 index 00000000..36cb723d --- /dev/null +++ b/src/mistralai/client/models/ftmodelcard.py @@ -0,0 +1,132 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .modelcapabilities import ModelCapabilities, ModelCapabilitiesTypedDict +from datetime import datetime +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from mistralai.client.utils import validate_const +import pydantic +from pydantic import model_serializer +from pydantic.functional_validators import AfterValidator +from typing import List, Literal, Optional +from typing_extensions import Annotated, NotRequired, TypedDict + + +FTModelCardType = Literal["fine-tuned",] + + +class FTModelCardTypedDict(TypedDict): + r"""Extra fields for fine-tuned models.""" + + id: str + capabilities: ModelCapabilitiesTypedDict + job: str + root: str + object: NotRequired[str] + created: NotRequired[int] + owned_by: NotRequired[str] + name: NotRequired[Nullable[str]] + description: NotRequired[Nullable[str]] + max_context_length: NotRequired[int] + aliases: NotRequired[List[str]] + deprecation: NotRequired[Nullable[datetime]] + deprecation_replacement_model: NotRequired[Nullable[str]] + default_model_temperature: NotRequired[Nullable[float]] + type: FTModelCardType + archived: NotRequired[bool] + + +class FTModelCard(BaseModel): + r"""Extra fields for fine-tuned models.""" + + id: str + + capabilities: ModelCapabilities + + job: str + + root: str + + object: Optional[str] = "model" + + created: Optional[int] = None + + owned_by: Optional[str] = "mistralai" + + name: OptionalNullable[str] = UNSET + + description: OptionalNullable[str] = UNSET + + max_context_length: Optional[int] = 32768 + + aliases: Optional[List[str]] = None + + deprecation: OptionalNullable[datetime] = UNSET + + deprecation_replacement_model: OptionalNullable[str] = UNSET + + default_model_temperature: OptionalNullable[float] = UNSET + + TYPE: Annotated[ + Annotated[ + Optional[FTModelCardType], AfterValidator(validate_const("fine-tuned")) + ], + pydantic.Field(alias="type"), + ] = "fine-tuned" + + archived: Optional[bool] = False + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = [ + "object", + "created", + "owned_by", + "name", + "description", + "max_context_length", + "aliases", + "deprecation", + "deprecation_replacement_model", + "default_model_temperature", + "type", + "archived", + ] + nullable_fields = [ + "name", + "description", + "deprecation", + "deprecation_replacement_model", + "default_model_temperature", + ] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/function.py b/src/mistralai/client/models/function.py new file mode 100644 index 00000000..6e2b52ed --- /dev/null +++ b/src/mistralai/client/models/function.py @@ -0,0 +1,23 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.client.types import BaseModel +from typing import Any, Dict, Optional +from typing_extensions import NotRequired, TypedDict + + +class FunctionTypedDict(TypedDict): + name: str + parameters: Dict[str, Any] + description: NotRequired[str] + strict: NotRequired[bool] + + +class Function(BaseModel): + name: str + + parameters: Dict[str, Any] + + description: Optional[str] = None + + strict: Optional[bool] = None diff --git a/src/mistralai/client/models/functioncall.py b/src/mistralai/client/models/functioncall.py new file mode 100644 index 00000000..6cb6f26e --- /dev/null +++ b/src/mistralai/client/models/functioncall.py @@ -0,0 +1,23 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.client.types import BaseModel +from typing import Any, Dict, Union +from typing_extensions import TypeAliasType, TypedDict + + +ArgumentsTypedDict = TypeAliasType("ArgumentsTypedDict", Union[Dict[str, Any], str]) + + +Arguments = TypeAliasType("Arguments", Union[Dict[str, Any], str]) + + +class FunctionCallTypedDict(TypedDict): + name: str + arguments: ArgumentsTypedDict + + +class FunctionCall(BaseModel): + name: str + + arguments: Arguments diff --git a/src/mistralai/client/models/functioncallentry.py b/src/mistralai/client/models/functioncallentry.py new file mode 100644 index 00000000..fce4d387 --- /dev/null +++ b/src/mistralai/client/models/functioncallentry.py @@ -0,0 +1,83 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .functioncallentryarguments import ( + FunctionCallEntryArguments, + FunctionCallEntryArgumentsTypedDict, +) +from datetime import datetime +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing import Literal, Optional +from typing_extensions import NotRequired, TypedDict + + +FunctionCallEntryObject = Literal["entry",] + + +FunctionCallEntryType = Literal["function.call",] + + +class FunctionCallEntryTypedDict(TypedDict): + tool_call_id: str + name: str + arguments: FunctionCallEntryArgumentsTypedDict + object: NotRequired[FunctionCallEntryObject] + type: NotRequired[FunctionCallEntryType] + created_at: NotRequired[datetime] + completed_at: NotRequired[Nullable[datetime]] + id: NotRequired[str] + + +class FunctionCallEntry(BaseModel): + tool_call_id: str + + name: str + + arguments: FunctionCallEntryArguments + + object: Optional[FunctionCallEntryObject] = "entry" + + type: Optional[FunctionCallEntryType] = "function.call" + + created_at: Optional[datetime] = None + + completed_at: OptionalNullable[datetime] = UNSET + + id: Optional[str] = None + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["object", "type", "created_at", "completed_at", "id"] + nullable_fields = ["completed_at"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/functioncallentryarguments.py b/src/mistralai/client/models/functioncallentryarguments.py new file mode 100644 index 00000000..ac9e6227 --- /dev/null +++ b/src/mistralai/client/models/functioncallentryarguments.py @@ -0,0 +1,15 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from typing import Any, Dict, Union +from typing_extensions import TypeAliasType + + +FunctionCallEntryArgumentsTypedDict = TypeAliasType( + "FunctionCallEntryArgumentsTypedDict", Union[Dict[str, Any], str] +) + + +FunctionCallEntryArguments = TypeAliasType( + "FunctionCallEntryArguments", Union[Dict[str, Any], str] +) diff --git a/src/mistralai/client/models/functioncallevent.py b/src/mistralai/client/models/functioncallevent.py new file mode 100644 index 00000000..4e040585 --- /dev/null +++ b/src/mistralai/client/models/functioncallevent.py @@ -0,0 +1,36 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from datetime import datetime +from mistralai.client.types import BaseModel +from typing import Literal, Optional +from typing_extensions import NotRequired, TypedDict + + +FunctionCallEventType = Literal["function.call.delta",] + + +class FunctionCallEventTypedDict(TypedDict): + id: str + name: str + tool_call_id: str + arguments: str + type: NotRequired[FunctionCallEventType] + created_at: NotRequired[datetime] + output_index: NotRequired[int] + + +class FunctionCallEvent(BaseModel): + id: str + + name: str + + tool_call_id: str + + arguments: str + + type: Optional[FunctionCallEventType] = "function.call.delta" + + created_at: Optional[datetime] = None + + output_index: Optional[int] = 0 diff --git a/src/mistralai/client/models/functionname.py b/src/mistralai/client/models/functionname.py new file mode 100644 index 00000000..2a05c1de --- /dev/null +++ b/src/mistralai/client/models/functionname.py @@ -0,0 +1,17 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.client.types import BaseModel +from typing_extensions import TypedDict + + +class FunctionNameTypedDict(TypedDict): + r"""this restriction of `Function` is used to select a specific function to call""" + + name: str + + +class FunctionName(BaseModel): + r"""this restriction of `Function` is used to select a specific function to call""" + + name: str diff --git a/src/mistralai/client/models/functionresultentry.py b/src/mistralai/client/models/functionresultentry.py new file mode 100644 index 00000000..a843bf9b --- /dev/null +++ b/src/mistralai/client/models/functionresultentry.py @@ -0,0 +1,76 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from datetime import datetime +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing import Literal, Optional +from typing_extensions import NotRequired, TypedDict + + +FunctionResultEntryObject = Literal["entry",] + + +FunctionResultEntryType = Literal["function.result",] + + +class FunctionResultEntryTypedDict(TypedDict): + tool_call_id: str + result: str + object: NotRequired[FunctionResultEntryObject] + type: NotRequired[FunctionResultEntryType] + created_at: NotRequired[datetime] + completed_at: NotRequired[Nullable[datetime]] + id: NotRequired[str] + + +class FunctionResultEntry(BaseModel): + tool_call_id: str + + result: str + + object: Optional[FunctionResultEntryObject] = "entry" + + type: Optional[FunctionResultEntryType] = "function.result" + + created_at: Optional[datetime] = None + + completed_at: OptionalNullable[datetime] = UNSET + + id: Optional[str] = None + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["object", "type", "created_at", "completed_at", "id"] + nullable_fields = ["completed_at"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/functiontool.py b/src/mistralai/client/models/functiontool.py new file mode 100644 index 00000000..74b50d1b --- /dev/null +++ b/src/mistralai/client/models/functiontool.py @@ -0,0 +1,21 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .function import Function, FunctionTypedDict +from mistralai.client.types import BaseModel +from typing import Literal, Optional +from typing_extensions import NotRequired, TypedDict + + +FunctionToolType = Literal["function",] + + +class FunctionToolTypedDict(TypedDict): + function: FunctionTypedDict + type: NotRequired[FunctionToolType] + + +class FunctionTool(BaseModel): + function: Function + + type: Optional[FunctionToolType] = "function" diff --git a/src/mistralai/client/models/githubrepositoryin.py b/src/mistralai/client/models/githubrepositoryin.py new file mode 100644 index 00000000..e56fef9b --- /dev/null +++ b/src/mistralai/client/models/githubrepositoryin.py @@ -0,0 +1,69 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing import Literal, Optional +from typing_extensions import NotRequired, TypedDict + + +GithubRepositoryInType = Literal["github",] + + +class GithubRepositoryInTypedDict(TypedDict): + name: str + owner: str + token: str + type: NotRequired[GithubRepositoryInType] + ref: NotRequired[Nullable[str]] + weight: NotRequired[float] + + +class GithubRepositoryIn(BaseModel): + name: str + + owner: str + + token: str + + type: Optional[GithubRepositoryInType] = "github" + + ref: OptionalNullable[str] = UNSET + + weight: Optional[float] = 1 + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["type", "ref", "weight"] + nullable_fields = ["ref"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/githubrepositoryout.py b/src/mistralai/client/models/githubrepositoryout.py new file mode 100644 index 00000000..e3aa9ebc --- /dev/null +++ b/src/mistralai/client/models/githubrepositoryout.py @@ -0,0 +1,69 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing import Literal, Optional +from typing_extensions import NotRequired, TypedDict + + +GithubRepositoryOutType = Literal["github",] + + +class GithubRepositoryOutTypedDict(TypedDict): + name: str + owner: str + commit_id: str + type: NotRequired[GithubRepositoryOutType] + ref: NotRequired[Nullable[str]] + weight: NotRequired[float] + + +class GithubRepositoryOut(BaseModel): + name: str + + owner: str + + commit_id: str + + type: Optional[GithubRepositoryOutType] = "github" + + ref: OptionalNullable[str] = UNSET + + weight: Optional[float] = 1 + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["type", "ref", "weight"] + nullable_fields = ["ref"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/httpvalidationerror.py b/src/mistralai/client/models/httpvalidationerror.py new file mode 100644 index 00000000..34d9b543 --- /dev/null +++ b/src/mistralai/client/models/httpvalidationerror.py @@ -0,0 +1,28 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .validationerror import ValidationError +from dataclasses import dataclass, field +import httpx +from mistralai.client.models import MistralError +from mistralai.client.types import BaseModel +from typing import List, Optional + + +class HTTPValidationErrorData(BaseModel): + detail: Optional[List[ValidationError]] = None + + +@dataclass(unsafe_hash=True) +class HTTPValidationError(MistralError): + data: HTTPValidationErrorData = field(hash=False) + + def __init__( + self, + data: HTTPValidationErrorData, + raw_response: httpx.Response, + body: Optional[str] = None, + ): + message = body or raw_response.text + super().__init__(message, raw_response, body) + object.__setattr__(self, "data", data) diff --git a/src/mistralai/client/models/imagegenerationtool.py b/src/mistralai/client/models/imagegenerationtool.py new file mode 100644 index 00000000..e09dba81 --- /dev/null +++ b/src/mistralai/client/models/imagegenerationtool.py @@ -0,0 +1,17 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.client.types import BaseModel +from typing import Literal, Optional +from typing_extensions import NotRequired, TypedDict + + +ImageGenerationToolType = Literal["image_generation",] + + +class ImageGenerationToolTypedDict(TypedDict): + type: NotRequired[ImageGenerationToolType] + + +class ImageGenerationTool(BaseModel): + type: Optional[ImageGenerationToolType] = "image_generation" diff --git a/src/mistralai/client/models/imageurl.py b/src/mistralai/client/models/imageurl.py new file mode 100644 index 00000000..6e61d1ae --- /dev/null +++ b/src/mistralai/client/models/imageurl.py @@ -0,0 +1,53 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing_extensions import NotRequired, TypedDict + + +class ImageURLTypedDict(TypedDict): + url: str + detail: NotRequired[Nullable[str]] + + +class ImageURL(BaseModel): + url: str + + detail: OptionalNullable[str] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["detail"] + nullable_fields = ["detail"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/imageurlchunk.py b/src/mistralai/client/models/imageurlchunk.py new file mode 100644 index 00000000..f967a3c8 --- /dev/null +++ b/src/mistralai/client/models/imageurlchunk.py @@ -0,0 +1,33 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .imageurl import ImageURL, ImageURLTypedDict +from mistralai.client.types import BaseModel +from typing import Literal, Optional, Union +from typing_extensions import NotRequired, TypeAliasType, TypedDict + + +ImageURLChunkImageURLTypedDict = TypeAliasType( + "ImageURLChunkImageURLTypedDict", Union[ImageURLTypedDict, str] +) + + +ImageURLChunkImageURL = TypeAliasType("ImageURLChunkImageURL", Union[ImageURL, str]) + + +ImageURLChunkType = Literal["image_url",] + + +class ImageURLChunkTypedDict(TypedDict): + r"""{\"type\":\"image_url\",\"image_url\":{\"url\":\"data:image/png;base64,iVBORw0""" + + image_url: ImageURLChunkImageURLTypedDict + type: NotRequired[ImageURLChunkType] + + +class ImageURLChunk(BaseModel): + r"""{\"type\":\"image_url\",\"image_url\":{\"url\":\"data:image/png;base64,iVBORw0""" + + image_url: ImageURLChunkImageURL + + type: Optional[ImageURLChunkType] = "image_url" diff --git a/src/mistralai/client/models/inputentries.py b/src/mistralai/client/models/inputentries.py new file mode 100644 index 00000000..8ae29837 --- /dev/null +++ b/src/mistralai/client/models/inputentries.py @@ -0,0 +1,37 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .agenthandoffentry import AgentHandoffEntry, AgentHandoffEntryTypedDict +from .functioncallentry import FunctionCallEntry, FunctionCallEntryTypedDict +from .functionresultentry import FunctionResultEntry, FunctionResultEntryTypedDict +from .messageinputentry import MessageInputEntry, MessageInputEntryTypedDict +from .messageoutputentry import MessageOutputEntry, MessageOutputEntryTypedDict +from .toolexecutionentry import ToolExecutionEntry, ToolExecutionEntryTypedDict +from typing import Union +from typing_extensions import TypeAliasType + + +InputEntriesTypedDict = TypeAliasType( + "InputEntriesTypedDict", + Union[ + FunctionResultEntryTypedDict, + MessageInputEntryTypedDict, + FunctionCallEntryTypedDict, + ToolExecutionEntryTypedDict, + MessageOutputEntryTypedDict, + AgentHandoffEntryTypedDict, + ], +) + + +InputEntries = TypeAliasType( + "InputEntries", + Union[ + FunctionResultEntry, + MessageInputEntry, + FunctionCallEntry, + ToolExecutionEntry, + MessageOutputEntry, + AgentHandoffEntry, + ], +) diff --git a/src/mistralai/client/models/inputs.py b/src/mistralai/client/models/inputs.py new file mode 100644 index 00000000..fb067476 --- /dev/null +++ b/src/mistralai/client/models/inputs.py @@ -0,0 +1,54 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .assistantmessage import AssistantMessage, AssistantMessageTypedDict +from .instructrequest import InstructRequest, InstructRequestTypedDict +from .systemmessage import SystemMessage, SystemMessageTypedDict +from .toolmessage import ToolMessage, ToolMessageTypedDict +from .usermessage import UserMessage, UserMessageTypedDict +from mistralai.client.types import BaseModel +from mistralai.client.utils import get_discriminator +from pydantic import Discriminator, Tag +from typing import List, Union +from typing_extensions import Annotated, TypeAliasType, TypedDict + + +InstructRequestInputsMessagesTypedDict = TypeAliasType( + "InstructRequestInputsMessagesTypedDict", + Union[ + SystemMessageTypedDict, + UserMessageTypedDict, + AssistantMessageTypedDict, + ToolMessageTypedDict, + ], +) + + +InstructRequestInputsMessages = Annotated[ + Union[ + Annotated[AssistantMessage, Tag("assistant")], + Annotated[SystemMessage, Tag("system")], + Annotated[ToolMessage, Tag("tool")], + Annotated[UserMessage, Tag("user")], + ], + Discriminator(lambda m: get_discriminator(m, "role", "role")), +] + + +class InstructRequestInputsTypedDict(TypedDict): + messages: List[InstructRequestInputsMessagesTypedDict] + + +class InstructRequestInputs(BaseModel): + messages: List[InstructRequestInputsMessages] + + +InputsTypedDict = TypeAliasType( + "InputsTypedDict", + Union[InstructRequestInputsTypedDict, List[InstructRequestTypedDict]], +) +r"""Chat to classify""" + + +Inputs = TypeAliasType("Inputs", Union[InstructRequestInputs, List[InstructRequest]]) +r"""Chat to classify""" diff --git a/src/mistralai/client/models/instructrequest.py b/src/mistralai/client/models/instructrequest.py new file mode 100644 index 00000000..1b2f2693 --- /dev/null +++ b/src/mistralai/client/models/instructrequest.py @@ -0,0 +1,42 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .assistantmessage import AssistantMessage, AssistantMessageTypedDict +from .systemmessage import SystemMessage, SystemMessageTypedDict +from .toolmessage import ToolMessage, ToolMessageTypedDict +from .usermessage import UserMessage, UserMessageTypedDict +from mistralai.client.types import BaseModel +from mistralai.client.utils import get_discriminator +from pydantic import Discriminator, Tag +from typing import List, Union +from typing_extensions import Annotated, TypeAliasType, TypedDict + + +InstructRequestMessagesTypedDict = TypeAliasType( + "InstructRequestMessagesTypedDict", + Union[ + SystemMessageTypedDict, + UserMessageTypedDict, + AssistantMessageTypedDict, + ToolMessageTypedDict, + ], +) + + +InstructRequestMessages = Annotated[ + Union[ + Annotated[AssistantMessage, Tag("assistant")], + Annotated[SystemMessage, Tag("system")], + Annotated[ToolMessage, Tag("tool")], + Annotated[UserMessage, Tag("user")], + ], + Discriminator(lambda m: get_discriminator(m, "role", "role")), +] + + +class InstructRequestTypedDict(TypedDict): + messages: List[InstructRequestMessagesTypedDict] + + +class InstructRequest(BaseModel): + messages: List[InstructRequestMessages] diff --git a/src/mistralai/client/models/jobin.py b/src/mistralai/client/models/jobin.py new file mode 100644 index 00000000..dc7684fc --- /dev/null +++ b/src/mistralai/client/models/jobin.py @@ -0,0 +1,147 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .classifiertargetin import ClassifierTargetIn, ClassifierTargetInTypedDict +from .classifiertrainingparametersin import ( + ClassifierTrainingParametersIn, + ClassifierTrainingParametersInTypedDict, +) +from .completiontrainingparametersin import ( + CompletionTrainingParametersIn, + CompletionTrainingParametersInTypedDict, +) +from .finetuneablemodeltype import FineTuneableModelType +from .githubrepositoryin import GithubRepositoryIn, GithubRepositoryInTypedDict +from .trainingfile import TrainingFile, TrainingFileTypedDict +from .wandbintegration import WandbIntegration, WandbIntegrationTypedDict +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing import List, Optional, Union +from typing_extensions import NotRequired, TypeAliasType, TypedDict + + +JobInIntegrationsTypedDict = WandbIntegrationTypedDict + + +JobInIntegrations = WandbIntegration + + +HyperparametersTypedDict = TypeAliasType( + "HyperparametersTypedDict", + Union[ + ClassifierTrainingParametersInTypedDict, CompletionTrainingParametersInTypedDict + ], +) + + +Hyperparameters = TypeAliasType( + "Hyperparameters", + Union[ClassifierTrainingParametersIn, CompletionTrainingParametersIn], +) + + +JobInRepositoriesTypedDict = GithubRepositoryInTypedDict + + +JobInRepositories = GithubRepositoryIn + + +class JobInTypedDict(TypedDict): + model: str + r"""The name of the model to fine-tune.""" + hyperparameters: HyperparametersTypedDict + training_files: NotRequired[List[TrainingFileTypedDict]] + validation_files: NotRequired[Nullable[List[str]]] + r"""A list containing the IDs of uploaded files that contain validation data. If you provide these files, the data is used to generate validation metrics periodically during fine-tuning. These metrics can be viewed in `checkpoints` when getting the status of a running fine-tuning job. The same data should not be present in both train and validation files.""" + suffix: NotRequired[Nullable[str]] + r"""A string that will be added to your fine-tuning model name. For example, a suffix of \"my-great-model\" would produce a model name like `ft:open-mistral-7b:my-great-model:xxx...`""" + integrations: NotRequired[Nullable[List[JobInIntegrationsTypedDict]]] + r"""A list of integrations to enable for your fine-tuning job.""" + auto_start: NotRequired[bool] + r"""This field will be required in a future release.""" + invalid_sample_skip_percentage: NotRequired[float] + job_type: NotRequired[Nullable[FineTuneableModelType]] + repositories: NotRequired[Nullable[List[JobInRepositoriesTypedDict]]] + classifier_targets: NotRequired[Nullable[List[ClassifierTargetInTypedDict]]] + + +class JobIn(BaseModel): + model: str + r"""The name of the model to fine-tune.""" + + hyperparameters: Hyperparameters + + training_files: Optional[List[TrainingFile]] = None + + validation_files: OptionalNullable[List[str]] = UNSET + r"""A list containing the IDs of uploaded files that contain validation data. If you provide these files, the data is used to generate validation metrics periodically during fine-tuning. These metrics can be viewed in `checkpoints` when getting the status of a running fine-tuning job. The same data should not be present in both train and validation files.""" + + suffix: OptionalNullable[str] = UNSET + r"""A string that will be added to your fine-tuning model name. For example, a suffix of \"my-great-model\" would produce a model name like `ft:open-mistral-7b:my-great-model:xxx...`""" + + integrations: OptionalNullable[List[JobInIntegrations]] = UNSET + r"""A list of integrations to enable for your fine-tuning job.""" + + auto_start: Optional[bool] = None + r"""This field will be required in a future release.""" + + invalid_sample_skip_percentage: Optional[float] = 0 + + job_type: OptionalNullable[FineTuneableModelType] = UNSET + + repositories: OptionalNullable[List[JobInRepositories]] = UNSET + + classifier_targets: OptionalNullable[List[ClassifierTargetIn]] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = [ + "training_files", + "validation_files", + "suffix", + "integrations", + "auto_start", + "invalid_sample_skip_percentage", + "job_type", + "repositories", + "classifier_targets", + ] + nullable_fields = [ + "validation_files", + "suffix", + "integrations", + "job_type", + "repositories", + "classifier_targets", + ] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/jobmetadataout.py b/src/mistralai/client/models/jobmetadataout.py new file mode 100644 index 00000000..f91e30c0 --- /dev/null +++ b/src/mistralai/client/models/jobmetadataout.py @@ -0,0 +1,84 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing_extensions import NotRequired, TypedDict + + +class JobMetadataOutTypedDict(TypedDict): + expected_duration_seconds: NotRequired[Nullable[int]] + cost: NotRequired[Nullable[float]] + cost_currency: NotRequired[Nullable[str]] + train_tokens_per_step: NotRequired[Nullable[int]] + train_tokens: NotRequired[Nullable[int]] + data_tokens: NotRequired[Nullable[int]] + estimated_start_time: NotRequired[Nullable[int]] + + +class JobMetadataOut(BaseModel): + expected_duration_seconds: OptionalNullable[int] = UNSET + + cost: OptionalNullable[float] = UNSET + + cost_currency: OptionalNullable[str] = UNSET + + train_tokens_per_step: OptionalNullable[int] = UNSET + + train_tokens: OptionalNullable[int] = UNSET + + data_tokens: OptionalNullable[int] = UNSET + + estimated_start_time: OptionalNullable[int] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = [ + "expected_duration_seconds", + "cost", + "cost_currency", + "train_tokens_per_step", + "train_tokens", + "data_tokens", + "estimated_start_time", + ] + nullable_fields = [ + "expected_duration_seconds", + "cost", + "cost_currency", + "train_tokens_per_step", + "train_tokens", + "data_tokens", + "estimated_start_time", + ] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/jobs_api_routes_batch_cancel_batch_jobop.py b/src/mistralai/client/models/jobs_api_routes_batch_cancel_batch_jobop.py new file mode 100644 index 00000000..21a04f73 --- /dev/null +++ b/src/mistralai/client/models/jobs_api_routes_batch_cancel_batch_jobop.py @@ -0,0 +1,16 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.client.types import BaseModel +from mistralai.client.utils import FieldMetadata, PathParamMetadata +from typing_extensions import Annotated, TypedDict + + +class JobsAPIRoutesBatchCancelBatchJobRequestTypedDict(TypedDict): + job_id: str + + +class JobsAPIRoutesBatchCancelBatchJobRequest(BaseModel): + job_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] diff --git a/src/mistralai/client/models/jobs_api_routes_batch_get_batch_jobop.py b/src/mistralai/client/models/jobs_api_routes_batch_get_batch_jobop.py new file mode 100644 index 00000000..32e34281 --- /dev/null +++ b/src/mistralai/client/models/jobs_api_routes_batch_get_batch_jobop.py @@ -0,0 +1,59 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from mistralai.client.utils import FieldMetadata, PathParamMetadata, QueryParamMetadata +from pydantic import model_serializer +from typing_extensions import Annotated, NotRequired, TypedDict + + +class JobsAPIRoutesBatchGetBatchJobRequestTypedDict(TypedDict): + job_id: str + inline: NotRequired[Nullable[bool]] + + +class JobsAPIRoutesBatchGetBatchJobRequest(BaseModel): + job_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] + + inline: Annotated[ + OptionalNullable[bool], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["inline"] + nullable_fields = ["inline"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/jobs_api_routes_batch_get_batch_jobsop.py b/src/mistralai/client/models/jobs_api_routes_batch_get_batch_jobsop.py new file mode 100644 index 00000000..3557e773 --- /dev/null +++ b/src/mistralai/client/models/jobs_api_routes_batch_get_batch_jobsop.py @@ -0,0 +1,108 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .batchjobstatus import BatchJobStatus +from datetime import datetime +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from mistralai.client.utils import FieldMetadata, QueryParamMetadata +from pydantic import model_serializer +from typing import Any, Dict, List, Optional +from typing_extensions import Annotated, NotRequired, TypedDict + + +class JobsAPIRoutesBatchGetBatchJobsRequestTypedDict(TypedDict): + page: NotRequired[int] + page_size: NotRequired[int] + model: NotRequired[Nullable[str]] + agent_id: NotRequired[Nullable[str]] + metadata: NotRequired[Nullable[Dict[str, Any]]] + created_after: NotRequired[Nullable[datetime]] + created_by_me: NotRequired[bool] + status: NotRequired[Nullable[List[BatchJobStatus]]] + + +class JobsAPIRoutesBatchGetBatchJobsRequest(BaseModel): + page: Annotated[ + Optional[int], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = 0 + + page_size: Annotated[ + Optional[int], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = 100 + + model: Annotated[ + OptionalNullable[str], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = UNSET + + agent_id: Annotated[ + OptionalNullable[str], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = UNSET + + metadata: Annotated[ + OptionalNullable[Dict[str, Any]], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = UNSET + + created_after: Annotated[ + OptionalNullable[datetime], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = UNSET + + created_by_me: Annotated[ + Optional[bool], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = False + + status: Annotated[ + OptionalNullable[List[BatchJobStatus]], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = [ + "page", + "page_size", + "model", + "agent_id", + "metadata", + "created_after", + "created_by_me", + "status", + ] + nullable_fields = ["model", "agent_id", "metadata", "created_after", "status"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/jobs_api_routes_fine_tuning_archive_fine_tuned_modelop.py b/src/mistralai/client/models/jobs_api_routes_fine_tuning_archive_fine_tuned_modelop.py new file mode 100644 index 00000000..4536b738 --- /dev/null +++ b/src/mistralai/client/models/jobs_api_routes_fine_tuning_archive_fine_tuned_modelop.py @@ -0,0 +1,18 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.client.types import BaseModel +from mistralai.client.utils import FieldMetadata, PathParamMetadata +from typing_extensions import Annotated, TypedDict + + +class JobsAPIRoutesFineTuningArchiveFineTunedModelRequestTypedDict(TypedDict): + model_id: str + r"""The ID of the model to archive.""" + + +class JobsAPIRoutesFineTuningArchiveFineTunedModelRequest(BaseModel): + model_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] + r"""The ID of the model to archive.""" diff --git a/src/mistralai/client/models/jobs_api_routes_fine_tuning_cancel_fine_tuning_jobop.py b/src/mistralai/client/models/jobs_api_routes_fine_tuning_cancel_fine_tuning_jobop.py new file mode 100644 index 00000000..b36d3c3e --- /dev/null +++ b/src/mistralai/client/models/jobs_api_routes_fine_tuning_cancel_fine_tuning_jobop.py @@ -0,0 +1,45 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .classifierdetailedjobout import ( + ClassifierDetailedJobOut, + ClassifierDetailedJobOutTypedDict, +) +from .completiondetailedjobout import ( + CompletionDetailedJobOut, + CompletionDetailedJobOutTypedDict, +) +from mistralai.client.types import BaseModel +from mistralai.client.utils import FieldMetadata, PathParamMetadata, get_discriminator +from pydantic import Discriminator, Tag +from typing import Union +from typing_extensions import Annotated, TypeAliasType, TypedDict + + +class JobsAPIRoutesFineTuningCancelFineTuningJobRequestTypedDict(TypedDict): + job_id: str + r"""The ID of the job to cancel.""" + + +class JobsAPIRoutesFineTuningCancelFineTuningJobRequest(BaseModel): + job_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] + r"""The ID of the job to cancel.""" + + +JobsAPIRoutesFineTuningCancelFineTuningJobResponseTypedDict = TypeAliasType( + "JobsAPIRoutesFineTuningCancelFineTuningJobResponseTypedDict", + Union[CompletionDetailedJobOutTypedDict, ClassifierDetailedJobOutTypedDict], +) +r"""OK""" + + +JobsAPIRoutesFineTuningCancelFineTuningJobResponse = Annotated[ + Union[ + Annotated[ClassifierDetailedJobOut, Tag("classifier")], + Annotated[CompletionDetailedJobOut, Tag("completion")], + ], + Discriminator(lambda m: get_discriminator(m, "job_type", "job_type")), +] +r"""OK""" diff --git a/src/mistralai/client/models/jobs_api_routes_fine_tuning_create_fine_tuning_jobop.py b/src/mistralai/client/models/jobs_api_routes_fine_tuning_create_fine_tuning_jobop.py new file mode 100644 index 00000000..ece0d15a --- /dev/null +++ b/src/mistralai/client/models/jobs_api_routes_fine_tuning_create_fine_tuning_jobop.py @@ -0,0 +1,38 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .classifierjobout import ClassifierJobOut, ClassifierJobOutTypedDict +from .completionjobout import CompletionJobOut, CompletionJobOutTypedDict +from .legacyjobmetadataout import LegacyJobMetadataOut, LegacyJobMetadataOutTypedDict +from mistralai.client.utils import get_discriminator +from pydantic import Discriminator, Tag +from typing import Union +from typing_extensions import Annotated, TypeAliasType + + +Response1TypedDict = TypeAliasType( + "Response1TypedDict", Union[ClassifierJobOutTypedDict, CompletionJobOutTypedDict] +) + + +Response1 = Annotated[ + Union[ + Annotated[ClassifierJobOut, Tag("classifier")], + Annotated[CompletionJobOut, Tag("completion")], + ], + Discriminator(lambda m: get_discriminator(m, "job_type", "job_type")), +] + + +JobsAPIRoutesFineTuningCreateFineTuningJobResponseTypedDict = TypeAliasType( + "JobsAPIRoutesFineTuningCreateFineTuningJobResponseTypedDict", + Union[LegacyJobMetadataOutTypedDict, Response1TypedDict], +) +r"""OK""" + + +JobsAPIRoutesFineTuningCreateFineTuningJobResponse = TypeAliasType( + "JobsAPIRoutesFineTuningCreateFineTuningJobResponse", + Union[LegacyJobMetadataOut, Response1], +) +r"""OK""" diff --git a/src/mistralai/client/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobop.py b/src/mistralai/client/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobop.py new file mode 100644 index 00000000..aa5a2609 --- /dev/null +++ b/src/mistralai/client/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobop.py @@ -0,0 +1,45 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .classifierdetailedjobout import ( + ClassifierDetailedJobOut, + ClassifierDetailedJobOutTypedDict, +) +from .completiondetailedjobout import ( + CompletionDetailedJobOut, + CompletionDetailedJobOutTypedDict, +) +from mistralai.client.types import BaseModel +from mistralai.client.utils import FieldMetadata, PathParamMetadata, get_discriminator +from pydantic import Discriminator, Tag +from typing import Union +from typing_extensions import Annotated, TypeAliasType, TypedDict + + +class JobsAPIRoutesFineTuningGetFineTuningJobRequestTypedDict(TypedDict): + job_id: str + r"""The ID of the job to analyse.""" + + +class JobsAPIRoutesFineTuningGetFineTuningJobRequest(BaseModel): + job_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] + r"""The ID of the job to analyse.""" + + +JobsAPIRoutesFineTuningGetFineTuningJobResponseTypedDict = TypeAliasType( + "JobsAPIRoutesFineTuningGetFineTuningJobResponseTypedDict", + Union[CompletionDetailedJobOutTypedDict, ClassifierDetailedJobOutTypedDict], +) +r"""OK""" + + +JobsAPIRoutesFineTuningGetFineTuningJobResponse = Annotated[ + Union[ + Annotated[ClassifierDetailedJobOut, Tag("classifier")], + Annotated[CompletionDetailedJobOut, Tag("completion")], + ], + Discriminator(lambda m: get_discriminator(m, "job_type", "job_type")), +] +r"""OK""" diff --git a/src/mistralai/client/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobsop.py b/src/mistralai/client/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobsop.py new file mode 100644 index 00000000..7e399b31 --- /dev/null +++ b/src/mistralai/client/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobsop.py @@ -0,0 +1,162 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from datetime import datetime +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from mistralai.client.utils import FieldMetadata, QueryParamMetadata +from pydantic import model_serializer +from typing import Literal, Optional +from typing_extensions import Annotated, NotRequired, TypedDict + + +QueryParamStatus = Literal[ + "QUEUED", + "STARTED", + "VALIDATING", + "VALIDATED", + "RUNNING", + "FAILED_VALIDATION", + "FAILED", + "SUCCESS", + "CANCELLED", + "CANCELLATION_REQUESTED", +] +r"""The current job state to filter on. When set, the other results are not displayed.""" + + +class JobsAPIRoutesFineTuningGetFineTuningJobsRequestTypedDict(TypedDict): + page: NotRequired[int] + r"""The page number of the results to be returned.""" + page_size: NotRequired[int] + r"""The number of items to return per page.""" + model: NotRequired[Nullable[str]] + r"""The model name used for fine-tuning to filter on. When set, the other results are not displayed.""" + created_after: NotRequired[Nullable[datetime]] + r"""The date/time to filter on. When set, the results for previous creation times are not displayed.""" + created_before: NotRequired[Nullable[datetime]] + created_by_me: NotRequired[bool] + r"""When set, only return results for jobs created by the API caller. Other results are not displayed.""" + status: NotRequired[Nullable[QueryParamStatus]] + r"""The current job state to filter on. When set, the other results are not displayed.""" + wandb_project: NotRequired[Nullable[str]] + r"""The Weights and Biases project to filter on. When set, the other results are not displayed.""" + wandb_name: NotRequired[Nullable[str]] + r"""The Weight and Biases run name to filter on. When set, the other results are not displayed.""" + suffix: NotRequired[Nullable[str]] + r"""The model suffix to filter on. When set, the other results are not displayed.""" + + +class JobsAPIRoutesFineTuningGetFineTuningJobsRequest(BaseModel): + page: Annotated[ + Optional[int], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = 0 + r"""The page number of the results to be returned.""" + + page_size: Annotated[ + Optional[int], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = 100 + r"""The number of items to return per page.""" + + model: Annotated[ + OptionalNullable[str], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = UNSET + r"""The model name used for fine-tuning to filter on. When set, the other results are not displayed.""" + + created_after: Annotated[ + OptionalNullable[datetime], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = UNSET + r"""The date/time to filter on. When set, the results for previous creation times are not displayed.""" + + created_before: Annotated[ + OptionalNullable[datetime], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = UNSET + + created_by_me: Annotated[ + Optional[bool], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = False + r"""When set, only return results for jobs created by the API caller. Other results are not displayed.""" + + status: Annotated[ + OptionalNullable[QueryParamStatus], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = UNSET + r"""The current job state to filter on. When set, the other results are not displayed.""" + + wandb_project: Annotated[ + OptionalNullable[str], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = UNSET + r"""The Weights and Biases project to filter on. When set, the other results are not displayed.""" + + wandb_name: Annotated[ + OptionalNullable[str], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = UNSET + r"""The Weight and Biases run name to filter on. When set, the other results are not displayed.""" + + suffix: Annotated[ + OptionalNullable[str], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = UNSET + r"""The model suffix to filter on. When set, the other results are not displayed.""" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = [ + "page", + "page_size", + "model", + "created_after", + "created_before", + "created_by_me", + "status", + "wandb_project", + "wandb_name", + "suffix", + ] + nullable_fields = [ + "model", + "created_after", + "created_before", + "status", + "wandb_project", + "wandb_name", + "suffix", + ] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/jobs_api_routes_fine_tuning_start_fine_tuning_jobop.py b/src/mistralai/client/models/jobs_api_routes_fine_tuning_start_fine_tuning_jobop.py new file mode 100644 index 00000000..ed5938b0 --- /dev/null +++ b/src/mistralai/client/models/jobs_api_routes_fine_tuning_start_fine_tuning_jobop.py @@ -0,0 +1,43 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .classifierdetailedjobout import ( + ClassifierDetailedJobOut, + ClassifierDetailedJobOutTypedDict, +) +from .completiondetailedjobout import ( + CompletionDetailedJobOut, + CompletionDetailedJobOutTypedDict, +) +from mistralai.client.types import BaseModel +from mistralai.client.utils import FieldMetadata, PathParamMetadata, get_discriminator +from pydantic import Discriminator, Tag +from typing import Union +from typing_extensions import Annotated, TypeAliasType, TypedDict + + +class JobsAPIRoutesFineTuningStartFineTuningJobRequestTypedDict(TypedDict): + job_id: str + + +class JobsAPIRoutesFineTuningStartFineTuningJobRequest(BaseModel): + job_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] + + +JobsAPIRoutesFineTuningStartFineTuningJobResponseTypedDict = TypeAliasType( + "JobsAPIRoutesFineTuningStartFineTuningJobResponseTypedDict", + Union[CompletionDetailedJobOutTypedDict, ClassifierDetailedJobOutTypedDict], +) +r"""OK""" + + +JobsAPIRoutesFineTuningStartFineTuningJobResponse = Annotated[ + Union[ + Annotated[ClassifierDetailedJobOut, Tag("classifier")], + Annotated[CompletionDetailedJobOut, Tag("completion")], + ], + Discriminator(lambda m: get_discriminator(m, "job_type", "job_type")), +] +r"""OK""" diff --git a/src/mistralai/client/models/jobs_api_routes_fine_tuning_unarchive_fine_tuned_modelop.py b/src/mistralai/client/models/jobs_api_routes_fine_tuning_unarchive_fine_tuned_modelop.py new file mode 100644 index 00000000..e1be0ac0 --- /dev/null +++ b/src/mistralai/client/models/jobs_api_routes_fine_tuning_unarchive_fine_tuned_modelop.py @@ -0,0 +1,18 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.client.types import BaseModel +from mistralai.client.utils import FieldMetadata, PathParamMetadata +from typing_extensions import Annotated, TypedDict + + +class JobsAPIRoutesFineTuningUnarchiveFineTunedModelRequestTypedDict(TypedDict): + model_id: str + r"""The ID of the model to unarchive.""" + + +class JobsAPIRoutesFineTuningUnarchiveFineTunedModelRequest(BaseModel): + model_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] + r"""The ID of the model to unarchive.""" diff --git a/src/mistralai/client/models/jobs_api_routes_fine_tuning_update_fine_tuned_modelop.py b/src/mistralai/client/models/jobs_api_routes_fine_tuning_update_fine_tuned_modelop.py new file mode 100644 index 00000000..a2b70b37 --- /dev/null +++ b/src/mistralai/client/models/jobs_api_routes_fine_tuning_update_fine_tuned_modelop.py @@ -0,0 +1,51 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .classifierftmodelout import ClassifierFTModelOut, ClassifierFTModelOutTypedDict +from .completionftmodelout import CompletionFTModelOut, CompletionFTModelOutTypedDict +from .updateftmodelin import UpdateFTModelIn, UpdateFTModelInTypedDict +from mistralai.client.types import BaseModel +from mistralai.client.utils import ( + FieldMetadata, + PathParamMetadata, + RequestMetadata, + get_discriminator, +) +from pydantic import Discriminator, Tag +from typing import Union +from typing_extensions import Annotated, TypeAliasType, TypedDict + + +class JobsAPIRoutesFineTuningUpdateFineTunedModelRequestTypedDict(TypedDict): + model_id: str + r"""The ID of the model to update.""" + update_ft_model_in: UpdateFTModelInTypedDict + + +class JobsAPIRoutesFineTuningUpdateFineTunedModelRequest(BaseModel): + model_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] + r"""The ID of the model to update.""" + + update_ft_model_in: Annotated[ + UpdateFTModelIn, + FieldMetadata(request=RequestMetadata(media_type="application/json")), + ] + + +JobsAPIRoutesFineTuningUpdateFineTunedModelResponseTypedDict = TypeAliasType( + "JobsAPIRoutesFineTuningUpdateFineTunedModelResponseTypedDict", + Union[CompletionFTModelOutTypedDict, ClassifierFTModelOutTypedDict], +) +r"""OK""" + + +JobsAPIRoutesFineTuningUpdateFineTunedModelResponse = Annotated[ + Union[ + Annotated[ClassifierFTModelOut, Tag("classifier")], + Annotated[CompletionFTModelOut, Tag("completion")], + ], + Discriminator(lambda m: get_discriminator(m, "model_type", "model_type")), +] +r"""OK""" diff --git a/src/mistralai/client/models/jobsout.py b/src/mistralai/client/models/jobsout.py new file mode 100644 index 00000000..9087704f --- /dev/null +++ b/src/mistralai/client/models/jobsout.py @@ -0,0 +1,41 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .classifierjobout import ClassifierJobOut, ClassifierJobOutTypedDict +from .completionjobout import CompletionJobOut, CompletionJobOutTypedDict +from mistralai.client.types import BaseModel +from mistralai.client.utils import get_discriminator +from pydantic import Discriminator, Tag +from typing import List, Literal, Optional, Union +from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict + + +JobsOutDataTypedDict = TypeAliasType( + "JobsOutDataTypedDict", Union[ClassifierJobOutTypedDict, CompletionJobOutTypedDict] +) + + +JobsOutData = Annotated[ + Union[ + Annotated[ClassifierJobOut, Tag("classifier")], + Annotated[CompletionJobOut, Tag("completion")], + ], + Discriminator(lambda m: get_discriminator(m, "job_type", "job_type")), +] + + +JobsOutObject = Literal["list",] + + +class JobsOutTypedDict(TypedDict): + total: int + data: NotRequired[List[JobsOutDataTypedDict]] + object: NotRequired[JobsOutObject] + + +class JobsOut(BaseModel): + total: int + + data: Optional[List[JobsOutData]] = None + + object: Optional[JobsOutObject] = "list" diff --git a/src/mistralai/client/models/jsonschema.py b/src/mistralai/client/models/jsonschema.py new file mode 100644 index 00000000..db2fa55b --- /dev/null +++ b/src/mistralai/client/models/jsonschema.py @@ -0,0 +1,61 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +import pydantic +from pydantic import model_serializer +from typing import Any, Dict, Optional +from typing_extensions import Annotated, NotRequired, TypedDict + + +class JSONSchemaTypedDict(TypedDict): + name: str + schema_definition: Dict[str, Any] + description: NotRequired[Nullable[str]] + strict: NotRequired[bool] + + +class JSONSchema(BaseModel): + name: str + + schema_definition: Annotated[Dict[str, Any], pydantic.Field(alias="schema")] + + description: OptionalNullable[str] = UNSET + + strict: Optional[bool] = None + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["description", "strict"] + nullable_fields = ["description"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/legacyjobmetadataout.py b/src/mistralai/client/models/legacyjobmetadataout.py new file mode 100644 index 00000000..155ecea7 --- /dev/null +++ b/src/mistralai/client/models/legacyjobmetadataout.py @@ -0,0 +1,125 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing import Literal, Optional +from typing_extensions import NotRequired, TypedDict + + +LegacyJobMetadataOutObject = Literal["job.metadata",] + + +class LegacyJobMetadataOutTypedDict(TypedDict): + details: str + expected_duration_seconds: NotRequired[Nullable[int]] + r"""The approximated time (in seconds) for the fine-tuning process to complete.""" + cost: NotRequired[Nullable[float]] + r"""The cost of the fine-tuning job.""" + cost_currency: NotRequired[Nullable[str]] + r"""The currency used for the fine-tuning job cost.""" + train_tokens_per_step: NotRequired[Nullable[int]] + r"""The number of tokens consumed by one training step.""" + train_tokens: NotRequired[Nullable[int]] + r"""The total number of tokens used during the fine-tuning process.""" + data_tokens: NotRequired[Nullable[int]] + r"""The total number of tokens in the training dataset.""" + estimated_start_time: NotRequired[Nullable[int]] + deprecated: NotRequired[bool] + epochs: NotRequired[Nullable[float]] + r"""The number of complete passes through the entire training dataset.""" + training_steps: NotRequired[Nullable[int]] + r"""The number of training steps to perform. A training step refers to a single update of the model weights during the fine-tuning process. This update is typically calculated using a batch of samples from the training dataset.""" + object: NotRequired[LegacyJobMetadataOutObject] + + +class LegacyJobMetadataOut(BaseModel): + details: str + + expected_duration_seconds: OptionalNullable[int] = UNSET + r"""The approximated time (in seconds) for the fine-tuning process to complete.""" + + cost: OptionalNullable[float] = UNSET + r"""The cost of the fine-tuning job.""" + + cost_currency: OptionalNullable[str] = UNSET + r"""The currency used for the fine-tuning job cost.""" + + train_tokens_per_step: OptionalNullable[int] = UNSET + r"""The number of tokens consumed by one training step.""" + + train_tokens: OptionalNullable[int] = UNSET + r"""The total number of tokens used during the fine-tuning process.""" + + data_tokens: OptionalNullable[int] = UNSET + r"""The total number of tokens in the training dataset.""" + + estimated_start_time: OptionalNullable[int] = UNSET + + deprecated: Optional[bool] = True + + epochs: OptionalNullable[float] = UNSET + r"""The number of complete passes through the entire training dataset.""" + + training_steps: OptionalNullable[int] = UNSET + r"""The number of training steps to perform. A training step refers to a single update of the model weights during the fine-tuning process. This update is typically calculated using a batch of samples from the training dataset.""" + + object: Optional[LegacyJobMetadataOutObject] = "job.metadata" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = [ + "expected_duration_seconds", + "cost", + "cost_currency", + "train_tokens_per_step", + "train_tokens", + "data_tokens", + "estimated_start_time", + "deprecated", + "epochs", + "training_steps", + "object", + ] + nullable_fields = [ + "expected_duration_seconds", + "cost", + "cost_currency", + "train_tokens_per_step", + "train_tokens", + "data_tokens", + "estimated_start_time", + "epochs", + "training_steps", + ] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/libraries_delete_v1op.py b/src/mistralai/client/models/libraries_delete_v1op.py new file mode 100644 index 00000000..fa447de0 --- /dev/null +++ b/src/mistralai/client/models/libraries_delete_v1op.py @@ -0,0 +1,16 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.client.types import BaseModel +from mistralai.client.utils import FieldMetadata, PathParamMetadata +from typing_extensions import Annotated, TypedDict + + +class LibrariesDeleteV1RequestTypedDict(TypedDict): + library_id: str + + +class LibrariesDeleteV1Request(BaseModel): + library_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] diff --git a/src/mistralai/client/models/libraries_documents_delete_v1op.py b/src/mistralai/client/models/libraries_documents_delete_v1op.py new file mode 100644 index 00000000..bc5ec6e5 --- /dev/null +++ b/src/mistralai/client/models/libraries_documents_delete_v1op.py @@ -0,0 +1,21 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.client.types import BaseModel +from mistralai.client.utils import FieldMetadata, PathParamMetadata +from typing_extensions import Annotated, TypedDict + + +class LibrariesDocumentsDeleteV1RequestTypedDict(TypedDict): + library_id: str + document_id: str + + +class LibrariesDocumentsDeleteV1Request(BaseModel): + library_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] + + document_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] diff --git a/src/mistralai/client/models/libraries_documents_get_extracted_text_signed_url_v1op.py b/src/mistralai/client/models/libraries_documents_get_extracted_text_signed_url_v1op.py new file mode 100644 index 00000000..24ed897d --- /dev/null +++ b/src/mistralai/client/models/libraries_documents_get_extracted_text_signed_url_v1op.py @@ -0,0 +1,21 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.client.types import BaseModel +from mistralai.client.utils import FieldMetadata, PathParamMetadata +from typing_extensions import Annotated, TypedDict + + +class LibrariesDocumentsGetExtractedTextSignedURLV1RequestTypedDict(TypedDict): + library_id: str + document_id: str + + +class LibrariesDocumentsGetExtractedTextSignedURLV1Request(BaseModel): + library_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] + + document_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] diff --git a/src/mistralai/client/models/libraries_documents_get_signed_url_v1op.py b/src/mistralai/client/models/libraries_documents_get_signed_url_v1op.py new file mode 100644 index 00000000..350c8e73 --- /dev/null +++ b/src/mistralai/client/models/libraries_documents_get_signed_url_v1op.py @@ -0,0 +1,21 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.client.types import BaseModel +from mistralai.client.utils import FieldMetadata, PathParamMetadata +from typing_extensions import Annotated, TypedDict + + +class LibrariesDocumentsGetSignedURLV1RequestTypedDict(TypedDict): + library_id: str + document_id: str + + +class LibrariesDocumentsGetSignedURLV1Request(BaseModel): + library_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] + + document_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] diff --git a/src/mistralai/client/models/libraries_documents_get_status_v1op.py b/src/mistralai/client/models/libraries_documents_get_status_v1op.py new file mode 100644 index 00000000..92b077d3 --- /dev/null +++ b/src/mistralai/client/models/libraries_documents_get_status_v1op.py @@ -0,0 +1,21 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.client.types import BaseModel +from mistralai.client.utils import FieldMetadata, PathParamMetadata +from typing_extensions import Annotated, TypedDict + + +class LibrariesDocumentsGetStatusV1RequestTypedDict(TypedDict): + library_id: str + document_id: str + + +class LibrariesDocumentsGetStatusV1Request(BaseModel): + library_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] + + document_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] diff --git a/src/mistralai/client/models/libraries_documents_get_text_content_v1op.py b/src/mistralai/client/models/libraries_documents_get_text_content_v1op.py new file mode 100644 index 00000000..68f9725a --- /dev/null +++ b/src/mistralai/client/models/libraries_documents_get_text_content_v1op.py @@ -0,0 +1,21 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.client.types import BaseModel +from mistralai.client.utils import FieldMetadata, PathParamMetadata +from typing_extensions import Annotated, TypedDict + + +class LibrariesDocumentsGetTextContentV1RequestTypedDict(TypedDict): + library_id: str + document_id: str + + +class LibrariesDocumentsGetTextContentV1Request(BaseModel): + library_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] + + document_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] diff --git a/src/mistralai/client/models/libraries_documents_get_v1op.py b/src/mistralai/client/models/libraries_documents_get_v1op.py new file mode 100644 index 00000000..a67e687e --- /dev/null +++ b/src/mistralai/client/models/libraries_documents_get_v1op.py @@ -0,0 +1,21 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.client.types import BaseModel +from mistralai.client.utils import FieldMetadata, PathParamMetadata +from typing_extensions import Annotated, TypedDict + + +class LibrariesDocumentsGetV1RequestTypedDict(TypedDict): + library_id: str + document_id: str + + +class LibrariesDocumentsGetV1Request(BaseModel): + library_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] + + document_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] diff --git a/src/mistralai/client/models/libraries_documents_list_v1op.py b/src/mistralai/client/models/libraries_documents_list_v1op.py new file mode 100644 index 00000000..5dec3385 --- /dev/null +++ b/src/mistralai/client/models/libraries_documents_list_v1op.py @@ -0,0 +1,97 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from mistralai.client.utils import FieldMetadata, PathParamMetadata, QueryParamMetadata +from pydantic import model_serializer +from typing import Optional +from typing_extensions import Annotated, NotRequired, TypedDict + + +class LibrariesDocumentsListV1RequestTypedDict(TypedDict): + library_id: str + search: NotRequired[Nullable[str]] + page_size: NotRequired[int] + page: NotRequired[int] + filters_attributes: NotRequired[Nullable[str]] + sort_by: NotRequired[str] + sort_order: NotRequired[str] + + +class LibrariesDocumentsListV1Request(BaseModel): + library_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] + + search: Annotated[ + OptionalNullable[str], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = UNSET + + page_size: Annotated[ + Optional[int], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = 100 + + page: Annotated[ + Optional[int], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = 0 + + filters_attributes: Annotated[ + OptionalNullable[str], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = UNSET + + sort_by: Annotated[ + Optional[str], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = "created_at" + + sort_order: Annotated[ + Optional[str], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = "desc" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = [ + "search", + "page_size", + "page", + "filters_attributes", + "sort_by", + "sort_order", + ] + nullable_fields = ["search", "filters_attributes"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/libraries_documents_reprocess_v1op.py b/src/mistralai/client/models/libraries_documents_reprocess_v1op.py new file mode 100644 index 00000000..8aee7552 --- /dev/null +++ b/src/mistralai/client/models/libraries_documents_reprocess_v1op.py @@ -0,0 +1,21 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.client.types import BaseModel +from mistralai.client.utils import FieldMetadata, PathParamMetadata +from typing_extensions import Annotated, TypedDict + + +class LibrariesDocumentsReprocessV1RequestTypedDict(TypedDict): + library_id: str + document_id: str + + +class LibrariesDocumentsReprocessV1Request(BaseModel): + library_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] + + document_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] diff --git a/src/mistralai/client/models/libraries_documents_update_v1op.py b/src/mistralai/client/models/libraries_documents_update_v1op.py new file mode 100644 index 00000000..f677b4dd --- /dev/null +++ b/src/mistralai/client/models/libraries_documents_update_v1op.py @@ -0,0 +1,28 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .documentupdatein import DocumentUpdateIn, DocumentUpdateInTypedDict +from mistralai.client.types import BaseModel +from mistralai.client.utils import FieldMetadata, PathParamMetadata, RequestMetadata +from typing_extensions import Annotated, TypedDict + + +class LibrariesDocumentsUpdateV1RequestTypedDict(TypedDict): + library_id: str + document_id: str + document_update_in: DocumentUpdateInTypedDict + + +class LibrariesDocumentsUpdateV1Request(BaseModel): + library_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] + + document_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] + + document_update_in: Annotated[ + DocumentUpdateIn, + FieldMetadata(request=RequestMetadata(media_type="application/json")), + ] diff --git a/src/mistralai/client/models/libraries_documents_upload_v1op.py b/src/mistralai/client/models/libraries_documents_upload_v1op.py new file mode 100644 index 00000000..e2d59d9f --- /dev/null +++ b/src/mistralai/client/models/libraries_documents_upload_v1op.py @@ -0,0 +1,56 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .file import File, FileTypedDict +from mistralai.client.types import BaseModel +from mistralai.client.utils import ( + FieldMetadata, + MultipartFormMetadata, + PathParamMetadata, + RequestMetadata, +) +from typing_extensions import Annotated, TypedDict + + +class LibrariesDocumentsUploadV1DocumentUploadTypedDict(TypedDict): + file: FileTypedDict + r"""The File object (not file name) to be uploaded. + To upload a file and specify a custom file name you should format your request as such: + ```bash + file=@path/to/your/file.jsonl;filename=custom_name.jsonl + ``` + Otherwise, you can just keep the original file name: + ```bash + file=@path/to/your/file.jsonl + ``` + """ + + +class LibrariesDocumentsUploadV1DocumentUpload(BaseModel): + file: Annotated[File, FieldMetadata(multipart=MultipartFormMetadata(file=True))] + r"""The File object (not file name) to be uploaded. + To upload a file and specify a custom file name you should format your request as such: + ```bash + file=@path/to/your/file.jsonl;filename=custom_name.jsonl + ``` + Otherwise, you can just keep the original file name: + ```bash + file=@path/to/your/file.jsonl + ``` + """ + + +class LibrariesDocumentsUploadV1RequestTypedDict(TypedDict): + library_id: str + request_body: LibrariesDocumentsUploadV1DocumentUploadTypedDict + + +class LibrariesDocumentsUploadV1Request(BaseModel): + library_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] + + request_body: Annotated[ + LibrariesDocumentsUploadV1DocumentUpload, + FieldMetadata(request=RequestMetadata(media_type="multipart/form-data")), + ] diff --git a/src/mistralai/client/models/libraries_get_v1op.py b/src/mistralai/client/models/libraries_get_v1op.py new file mode 100644 index 00000000..83ae377d --- /dev/null +++ b/src/mistralai/client/models/libraries_get_v1op.py @@ -0,0 +1,16 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.client.types import BaseModel +from mistralai.client.utils import FieldMetadata, PathParamMetadata +from typing_extensions import Annotated, TypedDict + + +class LibrariesGetV1RequestTypedDict(TypedDict): + library_id: str + + +class LibrariesGetV1Request(BaseModel): + library_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] diff --git a/src/mistralai/client/models/libraries_share_create_v1op.py b/src/mistralai/client/models/libraries_share_create_v1op.py new file mode 100644 index 00000000..d0313bd0 --- /dev/null +++ b/src/mistralai/client/models/libraries_share_create_v1op.py @@ -0,0 +1,22 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .sharingin import SharingIn, SharingInTypedDict +from mistralai.client.types import BaseModel +from mistralai.client.utils import FieldMetadata, PathParamMetadata, RequestMetadata +from typing_extensions import Annotated, TypedDict + + +class LibrariesShareCreateV1RequestTypedDict(TypedDict): + library_id: str + sharing_in: SharingInTypedDict + + +class LibrariesShareCreateV1Request(BaseModel): + library_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] + + sharing_in: Annotated[ + SharingIn, FieldMetadata(request=RequestMetadata(media_type="application/json")) + ] diff --git a/src/mistralai/client/models/libraries_share_delete_v1op.py b/src/mistralai/client/models/libraries_share_delete_v1op.py new file mode 100644 index 00000000..620527d5 --- /dev/null +++ b/src/mistralai/client/models/libraries_share_delete_v1op.py @@ -0,0 +1,23 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .sharingdelete import SharingDelete, SharingDeleteTypedDict +from mistralai.client.types import BaseModel +from mistralai.client.utils import FieldMetadata, PathParamMetadata, RequestMetadata +from typing_extensions import Annotated, TypedDict + + +class LibrariesShareDeleteV1RequestTypedDict(TypedDict): + library_id: str + sharing_delete: SharingDeleteTypedDict + + +class LibrariesShareDeleteV1Request(BaseModel): + library_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] + + sharing_delete: Annotated[ + SharingDelete, + FieldMetadata(request=RequestMetadata(media_type="application/json")), + ] diff --git a/src/mistralai/client/models/libraries_share_list_v1op.py b/src/mistralai/client/models/libraries_share_list_v1op.py new file mode 100644 index 00000000..fd5d9d33 --- /dev/null +++ b/src/mistralai/client/models/libraries_share_list_v1op.py @@ -0,0 +1,16 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.client.types import BaseModel +from mistralai.client.utils import FieldMetadata, PathParamMetadata +from typing_extensions import Annotated, TypedDict + + +class LibrariesShareListV1RequestTypedDict(TypedDict): + library_id: str + + +class LibrariesShareListV1Request(BaseModel): + library_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] diff --git a/src/mistralai/client/models/libraries_update_v1op.py b/src/mistralai/client/models/libraries_update_v1op.py new file mode 100644 index 00000000..c434ab7a --- /dev/null +++ b/src/mistralai/client/models/libraries_update_v1op.py @@ -0,0 +1,23 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .libraryinupdate import LibraryInUpdate, LibraryInUpdateTypedDict +from mistralai.client.types import BaseModel +from mistralai.client.utils import FieldMetadata, PathParamMetadata, RequestMetadata +from typing_extensions import Annotated, TypedDict + + +class LibrariesUpdateV1RequestTypedDict(TypedDict): + library_id: str + library_in_update: LibraryInUpdateTypedDict + + +class LibrariesUpdateV1Request(BaseModel): + library_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] + + library_in_update: Annotated[ + LibraryInUpdate, + FieldMetadata(request=RequestMetadata(media_type="application/json")), + ] diff --git a/src/mistralai/client/models/libraryin.py b/src/mistralai/client/models/libraryin.py new file mode 100644 index 00000000..a7b36158 --- /dev/null +++ b/src/mistralai/client/models/libraryin.py @@ -0,0 +1,56 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing_extensions import NotRequired, TypedDict + + +class LibraryInTypedDict(TypedDict): + name: str + description: NotRequired[Nullable[str]] + chunk_size: NotRequired[Nullable[int]] + + +class LibraryIn(BaseModel): + name: str + + description: OptionalNullable[str] = UNSET + + chunk_size: OptionalNullable[int] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["description", "chunk_size"] + nullable_fields = ["description", "chunk_size"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/libraryinupdate.py b/src/mistralai/client/models/libraryinupdate.py new file mode 100644 index 00000000..f0241ba1 --- /dev/null +++ b/src/mistralai/client/models/libraryinupdate.py @@ -0,0 +1,53 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing_extensions import NotRequired, TypedDict + + +class LibraryInUpdateTypedDict(TypedDict): + name: NotRequired[Nullable[str]] + description: NotRequired[Nullable[str]] + + +class LibraryInUpdate(BaseModel): + name: OptionalNullable[str] = UNSET + + description: OptionalNullable[str] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["name", "description"] + nullable_fields = ["name", "description"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/libraryout.py b/src/mistralai/client/models/libraryout.py new file mode 100644 index 00000000..d1953f16 --- /dev/null +++ b/src/mistralai/client/models/libraryout.py @@ -0,0 +1,116 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from datetime import datetime +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing_extensions import NotRequired, TypedDict + + +class LibraryOutTypedDict(TypedDict): + id: str + name: str + created_at: datetime + updated_at: datetime + owner_id: Nullable[str] + owner_type: str + total_size: int + nb_documents: int + chunk_size: Nullable[int] + emoji: NotRequired[Nullable[str]] + description: NotRequired[Nullable[str]] + generated_description: NotRequired[Nullable[str]] + explicit_user_members_count: NotRequired[Nullable[int]] + explicit_workspace_members_count: NotRequired[Nullable[int]] + org_sharing_role: NotRequired[Nullable[str]] + generated_name: NotRequired[Nullable[str]] + r"""Generated Name""" + + +class LibraryOut(BaseModel): + id: str + + name: str + + created_at: datetime + + updated_at: datetime + + owner_id: Nullable[str] + + owner_type: str + + total_size: int + + nb_documents: int + + chunk_size: Nullable[int] + + emoji: OptionalNullable[str] = UNSET + + description: OptionalNullable[str] = UNSET + + generated_description: OptionalNullable[str] = UNSET + + explicit_user_members_count: OptionalNullable[int] = UNSET + + explicit_workspace_members_count: OptionalNullable[int] = UNSET + + org_sharing_role: OptionalNullable[str] = UNSET + + generated_name: OptionalNullable[str] = UNSET + r"""Generated Name""" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = [ + "emoji", + "description", + "generated_description", + "explicit_user_members_count", + "explicit_workspace_members_count", + "org_sharing_role", + "generated_name", + ] + nullable_fields = [ + "owner_id", + "chunk_size", + "emoji", + "description", + "generated_description", + "explicit_user_members_count", + "explicit_workspace_members_count", + "org_sharing_role", + "generated_name", + ] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/listdocumentout.py b/src/mistralai/client/models/listdocumentout.py new file mode 100644 index 00000000..24969a0f --- /dev/null +++ b/src/mistralai/client/models/listdocumentout.py @@ -0,0 +1,19 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .documentout import DocumentOut, DocumentOutTypedDict +from .paginationinfo import PaginationInfo, PaginationInfoTypedDict +from mistralai.client.types import BaseModel +from typing import List +from typing_extensions import TypedDict + + +class ListDocumentOutTypedDict(TypedDict): + pagination: PaginationInfoTypedDict + data: List[DocumentOutTypedDict] + + +class ListDocumentOut(BaseModel): + pagination: PaginationInfo + + data: List[DocumentOut] diff --git a/src/mistralai/client/models/listfilesout.py b/src/mistralai/client/models/listfilesout.py new file mode 100644 index 00000000..1db17c40 --- /dev/null +++ b/src/mistralai/client/models/listfilesout.py @@ -0,0 +1,58 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .fileschema import FileSchema, FileSchemaTypedDict +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing import List +from typing_extensions import NotRequired, TypedDict + + +class ListFilesOutTypedDict(TypedDict): + data: List[FileSchemaTypedDict] + object: str + total: NotRequired[Nullable[int]] + + +class ListFilesOut(BaseModel): + data: List[FileSchema] + + object: str + + total: OptionalNullable[int] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["total"] + nullable_fields = ["total"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/listlibraryout.py b/src/mistralai/client/models/listlibraryout.py new file mode 100644 index 00000000..24aaa1a9 --- /dev/null +++ b/src/mistralai/client/models/listlibraryout.py @@ -0,0 +1,15 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .libraryout import LibraryOut, LibraryOutTypedDict +from mistralai.client.types import BaseModel +from typing import List +from typing_extensions import TypedDict + + +class ListLibraryOutTypedDict(TypedDict): + data: List[LibraryOutTypedDict] + + +class ListLibraryOut(BaseModel): + data: List[LibraryOut] diff --git a/src/mistralai/client/models/listsharingout.py b/src/mistralai/client/models/listsharingout.py new file mode 100644 index 00000000..f139813f --- /dev/null +++ b/src/mistralai/client/models/listsharingout.py @@ -0,0 +1,15 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .sharingout import SharingOut, SharingOutTypedDict +from mistralai.client.types import BaseModel +from typing import List +from typing_extensions import TypedDict + + +class ListSharingOutTypedDict(TypedDict): + data: List[SharingOutTypedDict] + + +class ListSharingOut(BaseModel): + data: List[SharingOut] diff --git a/src/mistralai/client/models/messageentries.py b/src/mistralai/client/models/messageentries.py new file mode 100644 index 00000000..9b1706de --- /dev/null +++ b/src/mistralai/client/models/messageentries.py @@ -0,0 +1,18 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .messageinputentry import MessageInputEntry, MessageInputEntryTypedDict +from .messageoutputentry import MessageOutputEntry, MessageOutputEntryTypedDict +from typing import Union +from typing_extensions import TypeAliasType + + +MessageEntriesTypedDict = TypeAliasType( + "MessageEntriesTypedDict", + Union[MessageInputEntryTypedDict, MessageOutputEntryTypedDict], +) + + +MessageEntries = TypeAliasType( + "MessageEntries", Union[MessageInputEntry, MessageOutputEntry] +) diff --git a/src/mistralai/client/models/messageinputcontentchunks.py b/src/mistralai/client/models/messageinputcontentchunks.py new file mode 100644 index 00000000..e90d8aa0 --- /dev/null +++ b/src/mistralai/client/models/messageinputcontentchunks.py @@ -0,0 +1,28 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .documenturlchunk import DocumentURLChunk, DocumentURLChunkTypedDict +from .imageurlchunk import ImageURLChunk, ImageURLChunkTypedDict +from .textchunk import TextChunk, TextChunkTypedDict +from .thinkchunk import ThinkChunk, ThinkChunkTypedDict +from .toolfilechunk import ToolFileChunk, ToolFileChunkTypedDict +from typing import Union +from typing_extensions import TypeAliasType + + +MessageInputContentChunksTypedDict = TypeAliasType( + "MessageInputContentChunksTypedDict", + Union[ + TextChunkTypedDict, + ImageURLChunkTypedDict, + DocumentURLChunkTypedDict, + ThinkChunkTypedDict, + ToolFileChunkTypedDict, + ], +) + + +MessageInputContentChunks = TypeAliasType( + "MessageInputContentChunks", + Union[TextChunk, ImageURLChunk, DocumentURLChunk, ThinkChunk, ToolFileChunk], +) diff --git a/src/mistralai/client/models/messageinputentry.py b/src/mistralai/client/models/messageinputentry.py new file mode 100644 index 00000000..12a31097 --- /dev/null +++ b/src/mistralai/client/models/messageinputentry.py @@ -0,0 +1,111 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .messageinputcontentchunks import ( + MessageInputContentChunks, + MessageInputContentChunksTypedDict, +) +from datetime import datetime +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing import List, Literal, Optional, Union +from typing_extensions import NotRequired, TypeAliasType, TypedDict + + +Object = Literal["entry",] + + +MessageInputEntryType = Literal["message.input",] + + +MessageInputEntryRole = Literal[ + "assistant", + "user", +] + + +MessageInputEntryContentTypedDict = TypeAliasType( + "MessageInputEntryContentTypedDict", + Union[str, List[MessageInputContentChunksTypedDict]], +) + + +MessageInputEntryContent = TypeAliasType( + "MessageInputEntryContent", Union[str, List[MessageInputContentChunks]] +) + + +class MessageInputEntryTypedDict(TypedDict): + r"""Representation of an input message inside the conversation.""" + + role: MessageInputEntryRole + content: MessageInputEntryContentTypedDict + object: NotRequired[Object] + type: NotRequired[MessageInputEntryType] + created_at: NotRequired[datetime] + completed_at: NotRequired[Nullable[datetime]] + id: NotRequired[str] + prefix: NotRequired[bool] + + +class MessageInputEntry(BaseModel): + r"""Representation of an input message inside the conversation.""" + + role: MessageInputEntryRole + + content: MessageInputEntryContent + + object: Optional[Object] = "entry" + + type: Optional[MessageInputEntryType] = "message.input" + + created_at: Optional[datetime] = None + + completed_at: OptionalNullable[datetime] = UNSET + + id: Optional[str] = None + + prefix: Optional[bool] = False + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = [ + "object", + "type", + "created_at", + "completed_at", + "id", + "prefix", + ] + nullable_fields = ["completed_at"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/messageoutputcontentchunks.py b/src/mistralai/client/models/messageoutputcontentchunks.py new file mode 100644 index 00000000..136a7608 --- /dev/null +++ b/src/mistralai/client/models/messageoutputcontentchunks.py @@ -0,0 +1,37 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .documenturlchunk import DocumentURLChunk, DocumentURLChunkTypedDict +from .imageurlchunk import ImageURLChunk, ImageURLChunkTypedDict +from .textchunk import TextChunk, TextChunkTypedDict +from .thinkchunk import ThinkChunk, ThinkChunkTypedDict +from .toolfilechunk import ToolFileChunk, ToolFileChunkTypedDict +from .toolreferencechunk import ToolReferenceChunk, ToolReferenceChunkTypedDict +from typing import Union +from typing_extensions import TypeAliasType + + +MessageOutputContentChunksTypedDict = TypeAliasType( + "MessageOutputContentChunksTypedDict", + Union[ + TextChunkTypedDict, + ImageURLChunkTypedDict, + DocumentURLChunkTypedDict, + ThinkChunkTypedDict, + ToolFileChunkTypedDict, + ToolReferenceChunkTypedDict, + ], +) + + +MessageOutputContentChunks = TypeAliasType( + "MessageOutputContentChunks", + Union[ + TextChunk, + ImageURLChunk, + DocumentURLChunk, + ThinkChunk, + ToolFileChunk, + ToolReferenceChunk, + ], +) diff --git a/src/mistralai/client/models/messageoutputentry.py b/src/mistralai/client/models/messageoutputentry.py new file mode 100644 index 00000000..d52e4e3e --- /dev/null +++ b/src/mistralai/client/models/messageoutputentry.py @@ -0,0 +1,109 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .messageoutputcontentchunks import ( + MessageOutputContentChunks, + MessageOutputContentChunksTypedDict, +) +from datetime import datetime +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing import List, Literal, Optional, Union +from typing_extensions import NotRequired, TypeAliasType, TypedDict + + +MessageOutputEntryObject = Literal["entry",] + + +MessageOutputEntryType = Literal["message.output",] + + +MessageOutputEntryRole = Literal["assistant",] + + +MessageOutputEntryContentTypedDict = TypeAliasType( + "MessageOutputEntryContentTypedDict", + Union[str, List[MessageOutputContentChunksTypedDict]], +) + + +MessageOutputEntryContent = TypeAliasType( + "MessageOutputEntryContent", Union[str, List[MessageOutputContentChunks]] +) + + +class MessageOutputEntryTypedDict(TypedDict): + content: MessageOutputEntryContentTypedDict + object: NotRequired[MessageOutputEntryObject] + type: NotRequired[MessageOutputEntryType] + created_at: NotRequired[datetime] + completed_at: NotRequired[Nullable[datetime]] + id: NotRequired[str] + agent_id: NotRequired[Nullable[str]] + model: NotRequired[Nullable[str]] + role: NotRequired[MessageOutputEntryRole] + + +class MessageOutputEntry(BaseModel): + content: MessageOutputEntryContent + + object: Optional[MessageOutputEntryObject] = "entry" + + type: Optional[MessageOutputEntryType] = "message.output" + + created_at: Optional[datetime] = None + + completed_at: OptionalNullable[datetime] = UNSET + + id: Optional[str] = None + + agent_id: OptionalNullable[str] = UNSET + + model: OptionalNullable[str] = UNSET + + role: Optional[MessageOutputEntryRole] = "assistant" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = [ + "object", + "type", + "created_at", + "completed_at", + "id", + "agent_id", + "model", + "role", + ] + nullable_fields = ["completed_at", "agent_id", "model"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/messageoutputevent.py b/src/mistralai/client/models/messageoutputevent.py new file mode 100644 index 00000000..3db7f5a0 --- /dev/null +++ b/src/mistralai/client/models/messageoutputevent.py @@ -0,0 +1,101 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .outputcontentchunks import OutputContentChunks, OutputContentChunksTypedDict +from datetime import datetime +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing import Literal, Optional, Union +from typing_extensions import NotRequired, TypeAliasType, TypedDict + + +MessageOutputEventType = Literal["message.output.delta",] + + +MessageOutputEventRole = Literal["assistant",] + + +MessageOutputEventContentTypedDict = TypeAliasType( + "MessageOutputEventContentTypedDict", Union[str, OutputContentChunksTypedDict] +) + + +MessageOutputEventContent = TypeAliasType( + "MessageOutputEventContent", Union[str, OutputContentChunks] +) + + +class MessageOutputEventTypedDict(TypedDict): + id: str + content: MessageOutputEventContentTypedDict + type: NotRequired[MessageOutputEventType] + created_at: NotRequired[datetime] + output_index: NotRequired[int] + content_index: NotRequired[int] + model: NotRequired[Nullable[str]] + agent_id: NotRequired[Nullable[str]] + role: NotRequired[MessageOutputEventRole] + + +class MessageOutputEvent(BaseModel): + id: str + + content: MessageOutputEventContent + + type: Optional[MessageOutputEventType] = "message.output.delta" + + created_at: Optional[datetime] = None + + output_index: Optional[int] = 0 + + content_index: Optional[int] = 0 + + model: OptionalNullable[str] = UNSET + + agent_id: OptionalNullable[str] = UNSET + + role: Optional[MessageOutputEventRole] = "assistant" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = [ + "type", + "created_at", + "output_index", + "content_index", + "model", + "agent_id", + "role", + ] + nullable_fields = ["model", "agent_id"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/metricout.py b/src/mistralai/client/models/metricout.py new file mode 100644 index 00000000..f8027a69 --- /dev/null +++ b/src/mistralai/client/models/metricout.py @@ -0,0 +1,60 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing_extensions import NotRequired, TypedDict + + +class MetricOutTypedDict(TypedDict): + r"""Metrics at the step number during the fine-tuning job. Use these metrics to assess if the training is going smoothly (loss should decrease, token accuracy should increase).""" + + train_loss: NotRequired[Nullable[float]] + valid_loss: NotRequired[Nullable[float]] + valid_mean_token_accuracy: NotRequired[Nullable[float]] + + +class MetricOut(BaseModel): + r"""Metrics at the step number during the fine-tuning job. Use these metrics to assess if the training is going smoothly (loss should decrease, token accuracy should increase).""" + + train_loss: OptionalNullable[float] = UNSET + + valid_loss: OptionalNullable[float] = UNSET + + valid_mean_token_accuracy: OptionalNullable[float] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["train_loss", "valid_loss", "valid_mean_token_accuracy"] + nullable_fields = ["train_loss", "valid_loss", "valid_mean_token_accuracy"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/mistralerror.py b/src/mistralai/client/models/mistralerror.py new file mode 100644 index 00000000..28cfd22d --- /dev/null +++ b/src/mistralai/client/models/mistralerror.py @@ -0,0 +1,30 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +import httpx +from typing import Optional +from dataclasses import dataclass, field + + +@dataclass(unsafe_hash=True) +class MistralError(Exception): + """The base class for all HTTP error responses.""" + + message: str + status_code: int + body: str + headers: httpx.Headers = field(hash=False) + raw_response: httpx.Response = field(hash=False) + + def __init__( + self, message: str, raw_response: httpx.Response, body: Optional[str] = None + ): + object.__setattr__(self, "message", message) + object.__setattr__(self, "status_code", raw_response.status_code) + object.__setattr__( + self, "body", body if body is not None else raw_response.text + ) + object.__setattr__(self, "headers", raw_response.headers) + object.__setattr__(self, "raw_response", raw_response) + + def __str__(self): + return self.message diff --git a/src/mistralai/client/models/mistralpromptmode.py b/src/mistralai/client/models/mistralpromptmode.py new file mode 100644 index 00000000..7008fc05 --- /dev/null +++ b/src/mistralai/client/models/mistralpromptmode.py @@ -0,0 +1,12 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.client.types import UnrecognizedStr +from typing import Literal, Union + + +MistralPromptMode = Union[Literal["reasoning",], UnrecognizedStr] +r"""Available options to the prompt_mode argument on the chat completion endpoint. +Values represent high-level intent. Assignment to actual SPs is handled internally. +System prompt may include knowledge cutoff date, model capabilities, tone to use, safety guidelines, etc. +""" diff --git a/src/mistralai/client/models/modelcapabilities.py b/src/mistralai/client/models/modelcapabilities.py new file mode 100644 index 00000000..a6db80e7 --- /dev/null +++ b/src/mistralai/client/models/modelcapabilities.py @@ -0,0 +1,41 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.client.types import BaseModel +from typing import Optional +from typing_extensions import NotRequired, TypedDict + + +class ModelCapabilitiesTypedDict(TypedDict): + completion_chat: NotRequired[bool] + function_calling: NotRequired[bool] + completion_fim: NotRequired[bool] + fine_tuning: NotRequired[bool] + vision: NotRequired[bool] + ocr: NotRequired[bool] + classification: NotRequired[bool] + moderation: NotRequired[bool] + audio: NotRequired[bool] + audio_transcription: NotRequired[bool] + + +class ModelCapabilities(BaseModel): + completion_chat: Optional[bool] = False + + function_calling: Optional[bool] = False + + completion_fim: Optional[bool] = False + + fine_tuning: Optional[bool] = False + + vision: Optional[bool] = False + + ocr: Optional[bool] = False + + classification: Optional[bool] = False + + moderation: Optional[bool] = False + + audio: Optional[bool] = False + + audio_transcription: Optional[bool] = False diff --git a/src/mistralai/client/models/modelconversation.py b/src/mistralai/client/models/modelconversation.py new file mode 100644 index 00000000..574f053d --- /dev/null +++ b/src/mistralai/client/models/modelconversation.py @@ -0,0 +1,139 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .codeinterpretertool import CodeInterpreterTool, CodeInterpreterToolTypedDict +from .completionargs import CompletionArgs, CompletionArgsTypedDict +from .documentlibrarytool import DocumentLibraryTool, DocumentLibraryToolTypedDict +from .functiontool import FunctionTool, FunctionToolTypedDict +from .imagegenerationtool import ImageGenerationTool, ImageGenerationToolTypedDict +from .websearchpremiumtool import WebSearchPremiumTool, WebSearchPremiumToolTypedDict +from .websearchtool import WebSearchTool, WebSearchToolTypedDict +from datetime import datetime +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from mistralai.client.utils import get_discriminator +from pydantic import Discriminator, Tag, model_serializer +from typing import Any, Dict, List, Literal, Optional, Union +from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict + + +ModelConversationToolsTypedDict = TypeAliasType( + "ModelConversationToolsTypedDict", + Union[ + WebSearchToolTypedDict, + WebSearchPremiumToolTypedDict, + CodeInterpreterToolTypedDict, + ImageGenerationToolTypedDict, + FunctionToolTypedDict, + DocumentLibraryToolTypedDict, + ], +) + + +ModelConversationTools = Annotated[ + Union[ + Annotated[CodeInterpreterTool, Tag("code_interpreter")], + Annotated[DocumentLibraryTool, Tag("document_library")], + Annotated[FunctionTool, Tag("function")], + Annotated[ImageGenerationTool, Tag("image_generation")], + Annotated[WebSearchTool, Tag("web_search")], + Annotated[WebSearchPremiumTool, Tag("web_search_premium")], + ], + Discriminator(lambda m: get_discriminator(m, "type", "type")), +] + + +ModelConversationObject = Literal["conversation",] + + +class ModelConversationTypedDict(TypedDict): + id: str + created_at: datetime + updated_at: datetime + model: str + instructions: NotRequired[Nullable[str]] + r"""Instruction prompt the model will follow during the conversation.""" + tools: NotRequired[List[ModelConversationToolsTypedDict]] + r"""List of tools which are available to the model during the conversation.""" + completion_args: NotRequired[CompletionArgsTypedDict] + r"""White-listed arguments from the completion API""" + name: NotRequired[Nullable[str]] + r"""Name given to the conversation.""" + description: NotRequired[Nullable[str]] + r"""Description of the what the conversation is about.""" + metadata: NotRequired[Nullable[Dict[str, Any]]] + r"""Custom metadata for the conversation.""" + object: NotRequired[ModelConversationObject] + + +class ModelConversation(BaseModel): + id: str + + created_at: datetime + + updated_at: datetime + + model: str + + instructions: OptionalNullable[str] = UNSET + r"""Instruction prompt the model will follow during the conversation.""" + + tools: Optional[List[ModelConversationTools]] = None + r"""List of tools which are available to the model during the conversation.""" + + completion_args: Optional[CompletionArgs] = None + r"""White-listed arguments from the completion API""" + + name: OptionalNullable[str] = UNSET + r"""Name given to the conversation.""" + + description: OptionalNullable[str] = UNSET + r"""Description of the what the conversation is about.""" + + metadata: OptionalNullable[Dict[str, Any]] = UNSET + r"""Custom metadata for the conversation.""" + + object: Optional[ModelConversationObject] = "conversation" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = [ + "instructions", + "tools", + "completion_args", + "name", + "description", + "metadata", + "object", + ] + nullable_fields = ["instructions", "name", "description", "metadata"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/modellist.py b/src/mistralai/client/models/modellist.py new file mode 100644 index 00000000..6a5209fa --- /dev/null +++ b/src/mistralai/client/models/modellist.py @@ -0,0 +1,34 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .basemodelcard import BaseModelCard, BaseModelCardTypedDict +from .ftmodelcard import FTModelCard, FTModelCardTypedDict +from mistralai.client.types import BaseModel +from mistralai.client.utils import get_discriminator +from pydantic import Discriminator, Tag +from typing import List, Optional, Union +from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict + + +DataTypedDict = TypeAliasType( + "DataTypedDict", Union[BaseModelCardTypedDict, FTModelCardTypedDict] +) + + +Data = Annotated[ + Union[ + Annotated[BaseModelCard, Tag("base")], Annotated[FTModelCard, Tag("fine-tuned")] + ], + Discriminator(lambda m: get_discriminator(m, "type", "type")), +] + + +class ModelListTypedDict(TypedDict): + object: NotRequired[str] + data: NotRequired[List[DataTypedDict]] + + +class ModelList(BaseModel): + object: Optional[str] = "list" + + data: Optional[List[Data]] = None diff --git a/src/mistralai/client/models/moderationobject.py b/src/mistralai/client/models/moderationobject.py new file mode 100644 index 00000000..a6b44b96 --- /dev/null +++ b/src/mistralai/client/models/moderationobject.py @@ -0,0 +1,21 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.client.types import BaseModel +from typing import Dict, Optional +from typing_extensions import NotRequired, TypedDict + + +class ModerationObjectTypedDict(TypedDict): + categories: NotRequired[Dict[str, bool]] + r"""Moderation result thresholds""" + category_scores: NotRequired[Dict[str, float]] + r"""Moderation result""" + + +class ModerationObject(BaseModel): + categories: Optional[Dict[str, bool]] = None + r"""Moderation result thresholds""" + + category_scores: Optional[Dict[str, float]] = None + r"""Moderation result""" diff --git a/src/mistralai/client/models/moderationresponse.py b/src/mistralai/client/models/moderationresponse.py new file mode 100644 index 00000000..288c8d82 --- /dev/null +++ b/src/mistralai/client/models/moderationresponse.py @@ -0,0 +1,21 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .moderationobject import ModerationObject, ModerationObjectTypedDict +from mistralai.client.types import BaseModel +from typing import List +from typing_extensions import TypedDict + + +class ModerationResponseTypedDict(TypedDict): + id: str + model: str + results: List[ModerationObjectTypedDict] + + +class ModerationResponse(BaseModel): + id: str + + model: str + + results: List[ModerationObject] diff --git a/src/mistralai/client/models/no_response_error.py b/src/mistralai/client/models/no_response_error.py new file mode 100644 index 00000000..1deab64b --- /dev/null +++ b/src/mistralai/client/models/no_response_error.py @@ -0,0 +1,17 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from dataclasses import dataclass + + +@dataclass(unsafe_hash=True) +class NoResponseError(Exception): + """Error raised when no HTTP response is received from the server.""" + + message: str + + def __init__(self, message: str = "No response received"): + object.__setattr__(self, "message", message) + super().__init__(message) + + def __str__(self): + return self.message diff --git a/src/mistralai/client/models/ocrimageobject.py b/src/mistralai/client/models/ocrimageobject.py new file mode 100644 index 00000000..e97fa8df --- /dev/null +++ b/src/mistralai/client/models/ocrimageobject.py @@ -0,0 +1,89 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing_extensions import NotRequired, TypedDict + + +class OCRImageObjectTypedDict(TypedDict): + id: str + r"""Image ID for extracted image in a page""" + top_left_x: Nullable[int] + r"""X coordinate of top-left corner of the extracted image""" + top_left_y: Nullable[int] + r"""Y coordinate of top-left corner of the extracted image""" + bottom_right_x: Nullable[int] + r"""X coordinate of bottom-right corner of the extracted image""" + bottom_right_y: Nullable[int] + r"""Y coordinate of bottom-right corner of the extracted image""" + image_base64: NotRequired[Nullable[str]] + r"""Base64 string of the extracted image""" + image_annotation: NotRequired[Nullable[str]] + r"""Annotation of the extracted image in json str""" + + +class OCRImageObject(BaseModel): + id: str + r"""Image ID for extracted image in a page""" + + top_left_x: Nullable[int] + r"""X coordinate of top-left corner of the extracted image""" + + top_left_y: Nullable[int] + r"""Y coordinate of top-left corner of the extracted image""" + + bottom_right_x: Nullable[int] + r"""X coordinate of bottom-right corner of the extracted image""" + + bottom_right_y: Nullable[int] + r"""Y coordinate of bottom-right corner of the extracted image""" + + image_base64: OptionalNullable[str] = UNSET + r"""Base64 string of the extracted image""" + + image_annotation: OptionalNullable[str] = UNSET + r"""Annotation of the extracted image in json str""" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["image_base64", "image_annotation"] + nullable_fields = [ + "top_left_x", + "top_left_y", + "bottom_right_x", + "bottom_right_y", + "image_base64", + "image_annotation", + ] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/ocrpagedimensions.py b/src/mistralai/client/models/ocrpagedimensions.py new file mode 100644 index 00000000..f4fc11e0 --- /dev/null +++ b/src/mistralai/client/models/ocrpagedimensions.py @@ -0,0 +1,25 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.client.types import BaseModel +from typing_extensions import TypedDict + + +class OCRPageDimensionsTypedDict(TypedDict): + dpi: int + r"""Dots per inch of the page-image""" + height: int + r"""Height of the image in pixels""" + width: int + r"""Width of the image in pixels""" + + +class OCRPageDimensions(BaseModel): + dpi: int + r"""Dots per inch of the page-image""" + + height: int + r"""Height of the image in pixels""" + + width: int + r"""Width of the image in pixels""" diff --git a/src/mistralai/client/models/ocrpageobject.py b/src/mistralai/client/models/ocrpageobject.py new file mode 100644 index 00000000..f8b43601 --- /dev/null +++ b/src/mistralai/client/models/ocrpageobject.py @@ -0,0 +1,91 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .ocrimageobject import OCRImageObject, OCRImageObjectTypedDict +from .ocrpagedimensions import OCRPageDimensions, OCRPageDimensionsTypedDict +from .ocrtableobject import OCRTableObject, OCRTableObjectTypedDict +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing import List, Optional +from typing_extensions import NotRequired, TypedDict + + +class OCRPageObjectTypedDict(TypedDict): + index: int + r"""The page index in a pdf document starting from 0""" + markdown: str + r"""The markdown string response of the page""" + images: List[OCRImageObjectTypedDict] + r"""List of all extracted images in the page""" + dimensions: Nullable[OCRPageDimensionsTypedDict] + r"""The dimensions of the PDF Page's screenshot image""" + tables: NotRequired[List[OCRTableObjectTypedDict]] + r"""List of all extracted tables in the page""" + hyperlinks: NotRequired[List[str]] + r"""List of all hyperlinks in the page""" + header: NotRequired[Nullable[str]] + r"""Header of the page""" + footer: NotRequired[Nullable[str]] + r"""Footer of the page""" + + +class OCRPageObject(BaseModel): + index: int + r"""The page index in a pdf document starting from 0""" + + markdown: str + r"""The markdown string response of the page""" + + images: List[OCRImageObject] + r"""List of all extracted images in the page""" + + dimensions: Nullable[OCRPageDimensions] + r"""The dimensions of the PDF Page's screenshot image""" + + tables: Optional[List[OCRTableObject]] = None + r"""List of all extracted tables in the page""" + + hyperlinks: Optional[List[str]] = None + r"""List of all hyperlinks in the page""" + + header: OptionalNullable[str] = UNSET + r"""Header of the page""" + + footer: OptionalNullable[str] = UNSET + r"""Footer of the page""" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["tables", "hyperlinks", "header", "footer"] + nullable_fields = ["header", "footer", "dimensions"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/ocrrequest.py b/src/mistralai/client/models/ocrrequest.py new file mode 100644 index 00000000..03a6028c --- /dev/null +++ b/src/mistralai/client/models/ocrrequest.py @@ -0,0 +1,146 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .documenturlchunk import DocumentURLChunk, DocumentURLChunkTypedDict +from .filechunk import FileChunk, FileChunkTypedDict +from .imageurlchunk import ImageURLChunk, ImageURLChunkTypedDict +from .responseformat import ResponseFormat, ResponseFormatTypedDict +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing import List, Literal, Optional, Union +from typing_extensions import NotRequired, TypeAliasType, TypedDict + + +DocumentTypedDict = TypeAliasType( + "DocumentTypedDict", + Union[FileChunkTypedDict, ImageURLChunkTypedDict, DocumentURLChunkTypedDict], +) +r"""Document to run OCR on""" + + +Document = TypeAliasType("Document", Union[FileChunk, ImageURLChunk, DocumentURLChunk]) +r"""Document to run OCR on""" + + +TableFormat = Literal[ + "markdown", + "html", +] + + +class OCRRequestTypedDict(TypedDict): + model: Nullable[str] + document: DocumentTypedDict + r"""Document to run OCR on""" + id: NotRequired[str] + pages: NotRequired[Nullable[List[int]]] + r"""Specific pages user wants to process in various formats: single number, range, or list of both. Starts from 0""" + include_image_base64: NotRequired[Nullable[bool]] + r"""Include image URLs in response""" + image_limit: NotRequired[Nullable[int]] + r"""Max images to extract""" + image_min_size: NotRequired[Nullable[int]] + r"""Minimum height and width of image to extract""" + bbox_annotation_format: NotRequired[Nullable[ResponseFormatTypedDict]] + r"""Structured output class for extracting useful information from each extracted bounding box / image from document. Only json_schema is valid for this field""" + document_annotation_format: NotRequired[Nullable[ResponseFormatTypedDict]] + r"""Structured output class for extracting useful information from the entire document. Only json_schema is valid for this field""" + document_annotation_prompt: NotRequired[Nullable[str]] + r"""Optional prompt to guide the model in extracting structured output from the entire document. A document_annotation_format must be provided.""" + table_format: NotRequired[Nullable[TableFormat]] + extract_header: NotRequired[bool] + extract_footer: NotRequired[bool] + + +class OCRRequest(BaseModel): + model: Nullable[str] + + document: Document + r"""Document to run OCR on""" + + id: Optional[str] = None + + pages: OptionalNullable[List[int]] = UNSET + r"""Specific pages user wants to process in various formats: single number, range, or list of both. Starts from 0""" + + include_image_base64: OptionalNullable[bool] = UNSET + r"""Include image URLs in response""" + + image_limit: OptionalNullable[int] = UNSET + r"""Max images to extract""" + + image_min_size: OptionalNullable[int] = UNSET + r"""Minimum height and width of image to extract""" + + bbox_annotation_format: OptionalNullable[ResponseFormat] = UNSET + r"""Structured output class for extracting useful information from each extracted bounding box / image from document. Only json_schema is valid for this field""" + + document_annotation_format: OptionalNullable[ResponseFormat] = UNSET + r"""Structured output class for extracting useful information from the entire document. Only json_schema is valid for this field""" + + document_annotation_prompt: OptionalNullable[str] = UNSET + r"""Optional prompt to guide the model in extracting structured output from the entire document. A document_annotation_format must be provided.""" + + table_format: OptionalNullable[TableFormat] = UNSET + + extract_header: Optional[bool] = None + + extract_footer: Optional[bool] = None + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = [ + "id", + "pages", + "include_image_base64", + "image_limit", + "image_min_size", + "bbox_annotation_format", + "document_annotation_format", + "document_annotation_prompt", + "table_format", + "extract_header", + "extract_footer", + ] + nullable_fields = [ + "model", + "pages", + "include_image_base64", + "image_limit", + "image_min_size", + "bbox_annotation_format", + "document_annotation_format", + "document_annotation_prompt", + "table_format", + ] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/ocrresponse.py b/src/mistralai/client/models/ocrresponse.py new file mode 100644 index 00000000..2813a1ca --- /dev/null +++ b/src/mistralai/client/models/ocrresponse.py @@ -0,0 +1,68 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .ocrpageobject import OCRPageObject, OCRPageObjectTypedDict +from .ocrusageinfo import OCRUsageInfo, OCRUsageInfoTypedDict +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing import List +from typing_extensions import NotRequired, TypedDict + + +class OCRResponseTypedDict(TypedDict): + pages: List[OCRPageObjectTypedDict] + r"""List of OCR info for pages.""" + model: str + r"""The model used to generate the OCR.""" + usage_info: OCRUsageInfoTypedDict + document_annotation: NotRequired[Nullable[str]] + r"""Formatted response in the request_format if provided in json str""" + + +class OCRResponse(BaseModel): + pages: List[OCRPageObject] + r"""List of OCR info for pages.""" + + model: str + r"""The model used to generate the OCR.""" + + usage_info: OCRUsageInfo + + document_annotation: OptionalNullable[str] = UNSET + r"""Formatted response in the request_format if provided in json str""" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["document_annotation"] + nullable_fields = ["document_annotation"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/ocrtableobject.py b/src/mistralai/client/models/ocrtableobject.py new file mode 100644 index 00000000..0c9091de --- /dev/null +++ b/src/mistralai/client/models/ocrtableobject.py @@ -0,0 +1,34 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.client.types import BaseModel +import pydantic +from typing import Literal +from typing_extensions import Annotated, TypedDict + + +Format = Literal[ + "markdown", + "html", +] +r"""Format of the table""" + + +class OCRTableObjectTypedDict(TypedDict): + id: str + r"""Table ID for extracted table in a page""" + content: str + r"""Content of the table in the given format""" + format_: Format + r"""Format of the table""" + + +class OCRTableObject(BaseModel): + id: str + r"""Table ID for extracted table in a page""" + + content: str + r"""Content of the table in the given format""" + + format_: Annotated[Format, pydantic.Field(alias="format")] + r"""Format of the table""" diff --git a/src/mistralai/client/models/ocrusageinfo.py b/src/mistralai/client/models/ocrusageinfo.py new file mode 100644 index 00000000..62f07fd4 --- /dev/null +++ b/src/mistralai/client/models/ocrusageinfo.py @@ -0,0 +1,57 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing_extensions import NotRequired, TypedDict + + +class OCRUsageInfoTypedDict(TypedDict): + pages_processed: int + r"""Number of pages processed""" + doc_size_bytes: NotRequired[Nullable[int]] + r"""Document size in bytes""" + + +class OCRUsageInfo(BaseModel): + pages_processed: int + r"""Number of pages processed""" + + doc_size_bytes: OptionalNullable[int] = UNSET + r"""Document size in bytes""" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["doc_size_bytes"] + nullable_fields = ["doc_size_bytes"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/outputcontentchunks.py b/src/mistralai/client/models/outputcontentchunks.py new file mode 100644 index 00000000..ad0c087e --- /dev/null +++ b/src/mistralai/client/models/outputcontentchunks.py @@ -0,0 +1,37 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .documenturlchunk import DocumentURLChunk, DocumentURLChunkTypedDict +from .imageurlchunk import ImageURLChunk, ImageURLChunkTypedDict +from .textchunk import TextChunk, TextChunkTypedDict +from .thinkchunk import ThinkChunk, ThinkChunkTypedDict +from .toolfilechunk import ToolFileChunk, ToolFileChunkTypedDict +from .toolreferencechunk import ToolReferenceChunk, ToolReferenceChunkTypedDict +from typing import Union +from typing_extensions import TypeAliasType + + +OutputContentChunksTypedDict = TypeAliasType( + "OutputContentChunksTypedDict", + Union[ + TextChunkTypedDict, + ImageURLChunkTypedDict, + DocumentURLChunkTypedDict, + ThinkChunkTypedDict, + ToolFileChunkTypedDict, + ToolReferenceChunkTypedDict, + ], +) + + +OutputContentChunks = TypeAliasType( + "OutputContentChunks", + Union[ + TextChunk, + ImageURLChunk, + DocumentURLChunk, + ThinkChunk, + ToolFileChunk, + ToolReferenceChunk, + ], +) diff --git a/src/mistralai/client/models/paginationinfo.py b/src/mistralai/client/models/paginationinfo.py new file mode 100644 index 00000000..0252f448 --- /dev/null +++ b/src/mistralai/client/models/paginationinfo.py @@ -0,0 +1,25 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.client.types import BaseModel +from typing_extensions import TypedDict + + +class PaginationInfoTypedDict(TypedDict): + total_items: int + total_pages: int + current_page: int + page_size: int + has_more: bool + + +class PaginationInfo(BaseModel): + total_items: int + + total_pages: int + + current_page: int + + page_size: int + + has_more: bool diff --git a/src/mistralai/client/models/prediction.py b/src/mistralai/client/models/prediction.py new file mode 100644 index 00000000..f2c5d9c6 --- /dev/null +++ b/src/mistralai/client/models/prediction.py @@ -0,0 +1,29 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.client.types import BaseModel +from mistralai.client.utils import validate_const +import pydantic +from pydantic.functional_validators import AfterValidator +from typing import Literal, Optional +from typing_extensions import Annotated, NotRequired, TypedDict + + +class PredictionTypedDict(TypedDict): + r"""Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content.""" + + type: Literal["content"] + content: NotRequired[str] + + +class Prediction(BaseModel): + r"""Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content.""" + + TYPE: Annotated[ + Annotated[ + Optional[Literal["content"]], AfterValidator(validate_const("content")) + ], + pydantic.Field(alias="type"), + ] = "content" + + content: Optional[str] = "" diff --git a/src/mistralai/client/models/processingstatusout.py b/src/mistralai/client/models/processingstatusout.py new file mode 100644 index 00000000..031f386f --- /dev/null +++ b/src/mistralai/client/models/processingstatusout.py @@ -0,0 +1,16 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.client.types import BaseModel +from typing_extensions import TypedDict + + +class ProcessingStatusOutTypedDict(TypedDict): + document_id: str + processing_status: str + + +class ProcessingStatusOut(BaseModel): + document_id: str + + processing_status: str diff --git a/src/mistralai/client/models/realtimetranscriptionerror.py b/src/mistralai/client/models/realtimetranscriptionerror.py new file mode 100644 index 00000000..e6a889de --- /dev/null +++ b/src/mistralai/client/models/realtimetranscriptionerror.py @@ -0,0 +1,27 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .realtimetranscriptionerrordetail import ( + RealtimeTranscriptionErrorDetail, + RealtimeTranscriptionErrorDetailTypedDict, +) +from mistralai.client.types import BaseModel +from mistralai.client.utils import validate_const +import pydantic +from pydantic.functional_validators import AfterValidator +from typing import Literal, Optional +from typing_extensions import Annotated, TypedDict + + +class RealtimeTranscriptionErrorTypedDict(TypedDict): + error: RealtimeTranscriptionErrorDetailTypedDict + type: Literal["error"] + + +class RealtimeTranscriptionError(BaseModel): + error: RealtimeTranscriptionErrorDetail + + TYPE: Annotated[ + Annotated[Optional[Literal["error"]], AfterValidator(validate_const("error"))], + pydantic.Field(alias="type"), + ] = "error" diff --git a/src/mistralai/client/models/realtimetranscriptionerrordetail.py b/src/mistralai/client/models/realtimetranscriptionerrordetail.py new file mode 100644 index 00000000..27bb8d87 --- /dev/null +++ b/src/mistralai/client/models/realtimetranscriptionerrordetail.py @@ -0,0 +1,29 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.client.types import BaseModel +from typing import Any, Dict, Union +from typing_extensions import TypeAliasType, TypedDict + + +MessageTypedDict = TypeAliasType("MessageTypedDict", Union[str, Dict[str, Any]]) +r"""Human-readable error message.""" + + +Message = TypeAliasType("Message", Union[str, Dict[str, Any]]) +r"""Human-readable error message.""" + + +class RealtimeTranscriptionErrorDetailTypedDict(TypedDict): + message: MessageTypedDict + r"""Human-readable error message.""" + code: int + r"""Internal error code for debugging.""" + + +class RealtimeTranscriptionErrorDetail(BaseModel): + message: Message + r"""Human-readable error message.""" + + code: int + r"""Internal error code for debugging.""" diff --git a/src/mistralai/client/models/realtimetranscriptionsession.py b/src/mistralai/client/models/realtimetranscriptionsession.py new file mode 100644 index 00000000..3a330651 --- /dev/null +++ b/src/mistralai/client/models/realtimetranscriptionsession.py @@ -0,0 +1,20 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .audioformat import AudioFormat, AudioFormatTypedDict +from mistralai.client.types import BaseModel +from typing_extensions import TypedDict + + +class RealtimeTranscriptionSessionTypedDict(TypedDict): + request_id: str + model: str + audio_format: AudioFormatTypedDict + + +class RealtimeTranscriptionSession(BaseModel): + request_id: str + + model: str + + audio_format: AudioFormat diff --git a/src/mistralai/client/models/realtimetranscriptionsessioncreated.py b/src/mistralai/client/models/realtimetranscriptionsessioncreated.py new file mode 100644 index 00000000..cc6d5028 --- /dev/null +++ b/src/mistralai/client/models/realtimetranscriptionsessioncreated.py @@ -0,0 +1,30 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .realtimetranscriptionsession import ( + RealtimeTranscriptionSession, + RealtimeTranscriptionSessionTypedDict, +) +from mistralai.client.types import BaseModel +from mistralai.client.utils import validate_const +import pydantic +from pydantic.functional_validators import AfterValidator +from typing import Literal, Optional +from typing_extensions import Annotated, TypedDict + + +class RealtimeTranscriptionSessionCreatedTypedDict(TypedDict): + session: RealtimeTranscriptionSessionTypedDict + type: Literal["session.created"] + + +class RealtimeTranscriptionSessionCreated(BaseModel): + session: RealtimeTranscriptionSession + + TYPE: Annotated[ + Annotated[ + Optional[Literal["session.created"]], + AfterValidator(validate_const("session.created")), + ], + pydantic.Field(alias="type"), + ] = "session.created" diff --git a/src/mistralai/client/models/realtimetranscriptionsessionupdated.py b/src/mistralai/client/models/realtimetranscriptionsessionupdated.py new file mode 100644 index 00000000..3da23595 --- /dev/null +++ b/src/mistralai/client/models/realtimetranscriptionsessionupdated.py @@ -0,0 +1,30 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .realtimetranscriptionsession import ( + RealtimeTranscriptionSession, + RealtimeTranscriptionSessionTypedDict, +) +from mistralai.client.types import BaseModel +from mistralai.client.utils import validate_const +import pydantic +from pydantic.functional_validators import AfterValidator +from typing import Literal, Optional +from typing_extensions import Annotated, TypedDict + + +class RealtimeTranscriptionSessionUpdatedTypedDict(TypedDict): + session: RealtimeTranscriptionSessionTypedDict + type: Literal["session.updated"] + + +class RealtimeTranscriptionSessionUpdated(BaseModel): + session: RealtimeTranscriptionSession + + TYPE: Annotated[ + Annotated[ + Optional[Literal["session.updated"]], + AfterValidator(validate_const("session.updated")), + ], + pydantic.Field(alias="type"), + ] = "session.updated" diff --git a/src/mistralai/client/models/referencechunk.py b/src/mistralai/client/models/referencechunk.py new file mode 100644 index 00000000..4c703b81 --- /dev/null +++ b/src/mistralai/client/models/referencechunk.py @@ -0,0 +1,20 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.client.types import BaseModel +from typing import List, Literal, Optional +from typing_extensions import NotRequired, TypedDict + + +ReferenceChunkType = Literal["reference",] + + +class ReferenceChunkTypedDict(TypedDict): + reference_ids: List[int] + type: NotRequired[ReferenceChunkType] + + +class ReferenceChunk(BaseModel): + reference_ids: List[int] + + type: Optional[ReferenceChunkType] = "reference" diff --git a/src/mistralai/client/models/requestsource.py b/src/mistralai/client/models/requestsource.py new file mode 100644 index 00000000..7b0a35c4 --- /dev/null +++ b/src/mistralai/client/models/requestsource.py @@ -0,0 +1,11 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from typing import Literal + + +RequestSource = Literal[ + "api", + "playground", + "agent_builder_v1", +] diff --git a/src/mistralai/client/models/responsedoneevent.py b/src/mistralai/client/models/responsedoneevent.py new file mode 100644 index 00000000..54056256 --- /dev/null +++ b/src/mistralai/client/models/responsedoneevent.py @@ -0,0 +1,25 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .conversationusageinfo import ConversationUsageInfo, ConversationUsageInfoTypedDict +from datetime import datetime +from mistralai.client.types import BaseModel +from typing import Literal, Optional +from typing_extensions import NotRequired, TypedDict + + +ResponseDoneEventType = Literal["conversation.response.done",] + + +class ResponseDoneEventTypedDict(TypedDict): + usage: ConversationUsageInfoTypedDict + type: NotRequired[ResponseDoneEventType] + created_at: NotRequired[datetime] + + +class ResponseDoneEvent(BaseModel): + usage: ConversationUsageInfo + + type: Optional[ResponseDoneEventType] = "conversation.response.done" + + created_at: Optional[datetime] = None diff --git a/src/mistralai/client/models/responseerrorevent.py b/src/mistralai/client/models/responseerrorevent.py new file mode 100644 index 00000000..c9ef95a0 --- /dev/null +++ b/src/mistralai/client/models/responseerrorevent.py @@ -0,0 +1,27 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from datetime import datetime +from mistralai.client.types import BaseModel +from typing import Literal, Optional +from typing_extensions import NotRequired, TypedDict + + +ResponseErrorEventType = Literal["conversation.response.error",] + + +class ResponseErrorEventTypedDict(TypedDict): + message: str + code: int + type: NotRequired[ResponseErrorEventType] + created_at: NotRequired[datetime] + + +class ResponseErrorEvent(BaseModel): + message: str + + code: int + + type: Optional[ResponseErrorEventType] = "conversation.response.error" + + created_at: Optional[datetime] = None diff --git a/src/mistralai/client/models/responseformat.py b/src/mistralai/client/models/responseformat.py new file mode 100644 index 00000000..5899b017 --- /dev/null +++ b/src/mistralai/client/models/responseformat.py @@ -0,0 +1,60 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .jsonschema import JSONSchema, JSONSchemaTypedDict +from .responseformats import ResponseFormats +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing import Optional +from typing_extensions import NotRequired, TypedDict + + +class ResponseFormatTypedDict(TypedDict): + r"""Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide.""" + + type: NotRequired[ResponseFormats] + json_schema: NotRequired[Nullable[JSONSchemaTypedDict]] + + +class ResponseFormat(BaseModel): + r"""Specify the format that the model must output. By default it will use `{ \"type\": \"text\" }`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ \"type\": \"json_schema\" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide.""" + + type: Optional[ResponseFormats] = None + + json_schema: OptionalNullable[JSONSchema] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["type", "json_schema"] + nullable_fields = ["json_schema"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/responseformats.py b/src/mistralai/client/models/responseformats.py new file mode 100644 index 00000000..cbf83ce7 --- /dev/null +++ b/src/mistralai/client/models/responseformats.py @@ -0,0 +1,11 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from typing import Literal + + +ResponseFormats = Literal[ + "text", + "json_object", + "json_schema", +] diff --git a/src/mistralai/client/models/responsestartedevent.py b/src/mistralai/client/models/responsestartedevent.py new file mode 100644 index 00000000..dc6a10f9 --- /dev/null +++ b/src/mistralai/client/models/responsestartedevent.py @@ -0,0 +1,24 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from datetime import datetime +from mistralai.client.types import BaseModel +from typing import Literal, Optional +from typing_extensions import NotRequired, TypedDict + + +ResponseStartedEventType = Literal["conversation.response.started",] + + +class ResponseStartedEventTypedDict(TypedDict): + conversation_id: str + type: NotRequired[ResponseStartedEventType] + created_at: NotRequired[datetime] + + +class ResponseStartedEvent(BaseModel): + conversation_id: str + + type: Optional[ResponseStartedEventType] = "conversation.response.started" + + created_at: Optional[datetime] = None diff --git a/src/mistralai/client/models/responsevalidationerror.py b/src/mistralai/client/models/responsevalidationerror.py new file mode 100644 index 00000000..bab5d0b7 --- /dev/null +++ b/src/mistralai/client/models/responsevalidationerror.py @@ -0,0 +1,27 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +import httpx +from typing import Optional +from dataclasses import dataclass + +from mistralai.client.models import MistralError + + +@dataclass(unsafe_hash=True) +class ResponseValidationError(MistralError): + """Error raised when there is a type mismatch between the response data and the expected Pydantic model.""" + + def __init__( + self, + message: str, + raw_response: httpx.Response, + cause: Exception, + body: Optional[str] = None, + ): + message = f"{message}: {cause}" + super().__init__(message, raw_response, body) + + @property + def cause(self): + """Normally the Pydantic ValidationError""" + return self.__cause__ diff --git a/src/mistralai/client/models/retrieve_model_v1_models_model_id_getop.py b/src/mistralai/client/models/retrieve_model_v1_models_model_id_getop.py new file mode 100644 index 00000000..7fdcd37d --- /dev/null +++ b/src/mistralai/client/models/retrieve_model_v1_models_model_id_getop.py @@ -0,0 +1,38 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .basemodelcard import BaseModelCard, BaseModelCardTypedDict +from .ftmodelcard import FTModelCard, FTModelCardTypedDict +from mistralai.client.types import BaseModel +from mistralai.client.utils import FieldMetadata, PathParamMetadata, get_discriminator +from pydantic import Discriminator, Tag +from typing import Union +from typing_extensions import Annotated, TypeAliasType, TypedDict + + +class RetrieveModelV1ModelsModelIDGetRequestTypedDict(TypedDict): + model_id: str + r"""The ID of the model to retrieve.""" + + +class RetrieveModelV1ModelsModelIDGetRequest(BaseModel): + model_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] + r"""The ID of the model to retrieve.""" + + +RetrieveModelV1ModelsModelIDGetResponseRetrieveModelV1ModelsModelIDGetTypedDict = TypeAliasType( + "RetrieveModelV1ModelsModelIDGetResponseRetrieveModelV1ModelsModelIDGetTypedDict", + Union[BaseModelCardTypedDict, FTModelCardTypedDict], +) +r"""Successful Response""" + + +RetrieveModelV1ModelsModelIDGetResponseRetrieveModelV1ModelsModelIDGet = Annotated[ + Union[ + Annotated[BaseModelCard, Tag("base")], Annotated[FTModelCard, Tag("fine-tuned")] + ], + Discriminator(lambda m: get_discriminator(m, "type", "type")), +] +r"""Successful Response""" diff --git a/src/mistralai/client/models/retrievefileout.py b/src/mistralai/client/models/retrievefileout.py new file mode 100644 index 00000000..ffd0617a --- /dev/null +++ b/src/mistralai/client/models/retrievefileout.py @@ -0,0 +1,97 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .filepurpose import FilePurpose +from .sampletype import SampleType +from .source import Source +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +import pydantic +from pydantic import model_serializer +from typing_extensions import Annotated, NotRequired, TypedDict + + +class RetrieveFileOutTypedDict(TypedDict): + id: str + r"""The unique identifier of the file.""" + object: str + r"""The object type, which is always \"file\".""" + size_bytes: int + r"""The size of the file, in bytes.""" + created_at: int + r"""The UNIX timestamp (in seconds) of the event.""" + filename: str + r"""The name of the uploaded file.""" + purpose: FilePurpose + sample_type: SampleType + source: Source + deleted: bool + num_lines: NotRequired[Nullable[int]] + mimetype: NotRequired[Nullable[str]] + signature: NotRequired[Nullable[str]] + + +class RetrieveFileOut(BaseModel): + id: str + r"""The unique identifier of the file.""" + + object: str + r"""The object type, which is always \"file\".""" + + size_bytes: Annotated[int, pydantic.Field(alias="bytes")] + r"""The size of the file, in bytes.""" + + created_at: int + r"""The UNIX timestamp (in seconds) of the event.""" + + filename: str + r"""The name of the uploaded file.""" + + purpose: FilePurpose + + sample_type: SampleType + + source: Source + + deleted: bool + + num_lines: OptionalNullable[int] = UNSET + + mimetype: OptionalNullable[str] = UNSET + + signature: OptionalNullable[str] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["num_lines", "mimetype", "signature"] + nullable_fields = ["num_lines", "mimetype", "signature"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/sampletype.py b/src/mistralai/client/models/sampletype.py new file mode 100644 index 00000000..e0727b02 --- /dev/null +++ b/src/mistralai/client/models/sampletype.py @@ -0,0 +1,17 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.client.types import UnrecognizedStr +from typing import Literal, Union + + +SampleType = Union[ + Literal[ + "pretrain", + "instruct", + "batch_request", + "batch_result", + "batch_error", + ], + UnrecognizedStr, +] diff --git a/src/mistralai/client/models/sdkerror.py b/src/mistralai/client/models/sdkerror.py new file mode 100644 index 00000000..ceb03c48 --- /dev/null +++ b/src/mistralai/client/models/sdkerror.py @@ -0,0 +1,40 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +import httpx +from typing import Optional +from dataclasses import dataclass + +from mistralai.client.models import MistralError + +MAX_MESSAGE_LEN = 10_000 + + +@dataclass(unsafe_hash=True) +class SDKError(MistralError): + """The fallback error class if no more specific error class is matched.""" + + def __init__( + self, message: str, raw_response: httpx.Response, body: Optional[str] = None + ): + body_display = body or raw_response.text or '""' + + if message: + message += ": " + message += f"Status {raw_response.status_code}" + + headers = raw_response.headers + content_type = headers.get("content-type", '""') + if content_type != "application/json": + if " " in content_type: + content_type = f'"{content_type}"' + message += f" Content-Type {content_type}" + + if len(body_display) > MAX_MESSAGE_LEN: + truncated = body_display[:MAX_MESSAGE_LEN] + remaining = len(body_display) - MAX_MESSAGE_LEN + body_display = f"{truncated}...and {remaining} more chars" + + message += f". Body: {body_display}" + message = message.strip() + + super().__init__(message, raw_response, body) diff --git a/src/mistralai/client/models/security.py b/src/mistralai/client/models/security.py new file mode 100644 index 00000000..1b67229b --- /dev/null +++ b/src/mistralai/client/models/security.py @@ -0,0 +1,25 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.client.types import BaseModel +from mistralai.client.utils import FieldMetadata, SecurityMetadata +from typing import Optional +from typing_extensions import Annotated, NotRequired, TypedDict + + +class SecurityTypedDict(TypedDict): + api_key: NotRequired[str] + + +class Security(BaseModel): + api_key: Annotated[ + Optional[str], + FieldMetadata( + security=SecurityMetadata( + scheme=True, + scheme_type="http", + sub_type="bearer", + field_name="Authorization", + ) + ), + ] = None diff --git a/src/mistralai/client/models/shareenum.py b/src/mistralai/client/models/shareenum.py new file mode 100644 index 00000000..ca1b9624 --- /dev/null +++ b/src/mistralai/client/models/shareenum.py @@ -0,0 +1,14 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.client.types import UnrecognizedStr +from typing import Literal, Union + + +ShareEnum = Union[ + Literal[ + "Viewer", + "Editor", + ], + UnrecognizedStr, +] diff --git a/src/mistralai/client/models/sharingdelete.py b/src/mistralai/client/models/sharingdelete.py new file mode 100644 index 00000000..d659342f --- /dev/null +++ b/src/mistralai/client/models/sharingdelete.py @@ -0,0 +1,61 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .entitytype import EntityType +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing_extensions import NotRequired, TypedDict + + +class SharingDeleteTypedDict(TypedDict): + share_with_uuid: str + r"""The id of the entity (user, workspace or organization) to share with""" + share_with_type: EntityType + r"""The type of entity, used to share a library.""" + org_id: NotRequired[Nullable[str]] + + +class SharingDelete(BaseModel): + share_with_uuid: str + r"""The id of the entity (user, workspace or organization) to share with""" + + share_with_type: EntityType + r"""The type of entity, used to share a library.""" + + org_id: OptionalNullable[str] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["org_id"] + nullable_fields = ["org_id"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/sharingin.py b/src/mistralai/client/models/sharingin.py new file mode 100644 index 00000000..630f4c70 --- /dev/null +++ b/src/mistralai/client/models/sharingin.py @@ -0,0 +1,65 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .entitytype import EntityType +from .shareenum import ShareEnum +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing_extensions import NotRequired, TypedDict + + +class SharingInTypedDict(TypedDict): + level: ShareEnum + share_with_uuid: str + r"""The id of the entity (user, workspace or organization) to share with""" + share_with_type: EntityType + r"""The type of entity, used to share a library.""" + org_id: NotRequired[Nullable[str]] + + +class SharingIn(BaseModel): + level: ShareEnum + + share_with_uuid: str + r"""The id of the entity (user, workspace or organization) to share with""" + + share_with_type: EntityType + r"""The type of entity, used to share a library.""" + + org_id: OptionalNullable[str] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["org_id"] + nullable_fields = ["org_id"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/sharingout.py b/src/mistralai/client/models/sharingout.py new file mode 100644 index 00000000..195701d1 --- /dev/null +++ b/src/mistralai/client/models/sharingout.py @@ -0,0 +1,65 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing_extensions import NotRequired, TypedDict + + +class SharingOutTypedDict(TypedDict): + library_id: str + org_id: str + role: str + share_with_type: str + share_with_uuid: Nullable[str] + user_id: NotRequired[Nullable[str]] + + +class SharingOut(BaseModel): + library_id: str + + org_id: str + + role: str + + share_with_type: str + + share_with_uuid: Nullable[str] + + user_id: OptionalNullable[str] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["user_id"] + nullable_fields = ["user_id", "share_with_uuid"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/source.py b/src/mistralai/client/models/source.py new file mode 100644 index 00000000..181b327e --- /dev/null +++ b/src/mistralai/client/models/source.py @@ -0,0 +1,15 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.client.types import UnrecognizedStr +from typing import Literal, Union + + +Source = Union[ + Literal[ + "upload", + "repository", + "mistral", + ], + UnrecognizedStr, +] diff --git a/src/mistralai/client/models/ssetypes.py b/src/mistralai/client/models/ssetypes.py new file mode 100644 index 00000000..796f0327 --- /dev/null +++ b/src/mistralai/client/models/ssetypes.py @@ -0,0 +1,19 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from typing import Literal + + +SSETypes = Literal[ + "conversation.response.started", + "conversation.response.done", + "conversation.response.error", + "message.output.delta", + "tool.execution.started", + "tool.execution.delta", + "tool.execution.done", + "agent.handoff.started", + "agent.handoff.done", + "function.call.delta", +] +r"""Server side events sent when streaming a conversation response.""" diff --git a/src/mistralai/client/models/systemmessage.py b/src/mistralai/client/models/systemmessage.py new file mode 100644 index 00000000..9e01bc57 --- /dev/null +++ b/src/mistralai/client/models/systemmessage.py @@ -0,0 +1,35 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .systemmessagecontentchunks import ( + SystemMessageContentChunks, + SystemMessageContentChunksTypedDict, +) +from mistralai.client.types import BaseModel +from typing import List, Literal, Optional, Union +from typing_extensions import NotRequired, TypeAliasType, TypedDict + + +SystemMessageContentTypedDict = TypeAliasType( + "SystemMessageContentTypedDict", + Union[str, List[SystemMessageContentChunksTypedDict]], +) + + +SystemMessageContent = TypeAliasType( + "SystemMessageContent", Union[str, List[SystemMessageContentChunks]] +) + + +Role = Literal["system",] + + +class SystemMessageTypedDict(TypedDict): + content: SystemMessageContentTypedDict + role: NotRequired[Role] + + +class SystemMessage(BaseModel): + content: SystemMessageContent + + role: Optional[Role] = "system" diff --git a/src/mistralai/client/models/systemmessagecontentchunks.py b/src/mistralai/client/models/systemmessagecontentchunks.py new file mode 100644 index 00000000..7a797379 --- /dev/null +++ b/src/mistralai/client/models/systemmessagecontentchunks.py @@ -0,0 +1,21 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .textchunk import TextChunk, TextChunkTypedDict +from .thinkchunk import ThinkChunk, ThinkChunkTypedDict +from mistralai.client.utils import get_discriminator +from pydantic import Discriminator, Tag +from typing import Union +from typing_extensions import Annotated, TypeAliasType + + +SystemMessageContentChunksTypedDict = TypeAliasType( + "SystemMessageContentChunksTypedDict", + Union[TextChunkTypedDict, ThinkChunkTypedDict], +) + + +SystemMessageContentChunks = Annotated[ + Union[Annotated[TextChunk, Tag("text")], Annotated[ThinkChunk, Tag("thinking")]], + Discriminator(lambda m: get_discriminator(m, "type", "type")), +] diff --git a/src/mistralai/client/models/textchunk.py b/src/mistralai/client/models/textchunk.py new file mode 100644 index 00000000..4207ce7e --- /dev/null +++ b/src/mistralai/client/models/textchunk.py @@ -0,0 +1,20 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.client.types import BaseModel +from typing import Literal, Optional +from typing_extensions import NotRequired, TypedDict + + +TextChunkType = Literal["text",] + + +class TextChunkTypedDict(TypedDict): + text: str + type: NotRequired[TextChunkType] + + +class TextChunk(BaseModel): + text: str + + type: Optional[TextChunkType] = "text" diff --git a/src/mistralai/client/models/thinkchunk.py b/src/mistralai/client/models/thinkchunk.py new file mode 100644 index 00000000..b1560806 --- /dev/null +++ b/src/mistralai/client/models/thinkchunk.py @@ -0,0 +1,35 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .referencechunk import ReferenceChunk, ReferenceChunkTypedDict +from .textchunk import TextChunk, TextChunkTypedDict +from mistralai.client.types import BaseModel +from typing import List, Literal, Optional, Union +from typing_extensions import NotRequired, TypeAliasType, TypedDict + + +ThinkingTypedDict = TypeAliasType( + "ThinkingTypedDict", Union[ReferenceChunkTypedDict, TextChunkTypedDict] +) + + +Thinking = TypeAliasType("Thinking", Union[ReferenceChunk, TextChunk]) + + +ThinkChunkType = Literal["thinking",] + + +class ThinkChunkTypedDict(TypedDict): + thinking: List[ThinkingTypedDict] + closed: NotRequired[bool] + r"""Whether the thinking chunk is closed or not. Currently only used for prefixing.""" + type: NotRequired[ThinkChunkType] + + +class ThinkChunk(BaseModel): + thinking: List[Thinking] + + closed: Optional[bool] = None + r"""Whether the thinking chunk is closed or not. Currently only used for prefixing.""" + + type: Optional[ThinkChunkType] = "thinking" diff --git a/src/mistralai/client/models/timestampgranularity.py b/src/mistralai/client/models/timestampgranularity.py new file mode 100644 index 00000000..5bda890f --- /dev/null +++ b/src/mistralai/client/models/timestampgranularity.py @@ -0,0 +1,10 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from typing import Literal + + +TimestampGranularity = Literal[ + "segment", + "word", +] diff --git a/src/mistralai/client/models/tool.py b/src/mistralai/client/models/tool.py new file mode 100644 index 00000000..4b29f575 --- /dev/null +++ b/src/mistralai/client/models/tool.py @@ -0,0 +1,19 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .function import Function, FunctionTypedDict +from .tooltypes import ToolTypes +from mistralai.client.types import BaseModel +from typing import Optional +from typing_extensions import NotRequired, TypedDict + + +class ToolTypedDict(TypedDict): + function: FunctionTypedDict + type: NotRequired[ToolTypes] + + +class Tool(BaseModel): + function: Function + + type: Optional[ToolTypes] = None diff --git a/src/mistralai/client/models/toolcall.py b/src/mistralai/client/models/toolcall.py new file mode 100644 index 00000000..558b49bf --- /dev/null +++ b/src/mistralai/client/models/toolcall.py @@ -0,0 +1,25 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .functioncall import FunctionCall, FunctionCallTypedDict +from .tooltypes import ToolTypes +from mistralai.client.types import BaseModel +from typing import Optional +from typing_extensions import NotRequired, TypedDict + + +class ToolCallTypedDict(TypedDict): + function: FunctionCallTypedDict + id: NotRequired[str] + type: NotRequired[ToolTypes] + index: NotRequired[int] + + +class ToolCall(BaseModel): + function: FunctionCall + + id: Optional[str] = "null" + + type: Optional[ToolTypes] = None + + index: Optional[int] = 0 diff --git a/src/mistralai/client/models/toolchoice.py b/src/mistralai/client/models/toolchoice.py new file mode 100644 index 00000000..2c7f6cbf --- /dev/null +++ b/src/mistralai/client/models/toolchoice.py @@ -0,0 +1,25 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .functionname import FunctionName, FunctionNameTypedDict +from .tooltypes import ToolTypes +from mistralai.client.types import BaseModel +from typing import Optional +from typing_extensions import NotRequired, TypedDict + + +class ToolChoiceTypedDict(TypedDict): + r"""ToolChoice is either a ToolChoiceEnum or a ToolChoice""" + + function: FunctionNameTypedDict + r"""this restriction of `Function` is used to select a specific function to call""" + type: NotRequired[ToolTypes] + + +class ToolChoice(BaseModel): + r"""ToolChoice is either a ToolChoiceEnum or a ToolChoice""" + + function: FunctionName + r"""this restriction of `Function` is used to select a specific function to call""" + + type: Optional[ToolTypes] = None diff --git a/src/mistralai/client/models/toolchoiceenum.py b/src/mistralai/client/models/toolchoiceenum.py new file mode 100644 index 00000000..01f6f677 --- /dev/null +++ b/src/mistralai/client/models/toolchoiceenum.py @@ -0,0 +1,12 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from typing import Literal + + +ToolChoiceEnum = Literal[ + "auto", + "none", + "any", + "required", +] diff --git a/src/mistralai/client/models/toolexecutiondeltaevent.py b/src/mistralai/client/models/toolexecutiondeltaevent.py new file mode 100644 index 00000000..0268e6a0 --- /dev/null +++ b/src/mistralai/client/models/toolexecutiondeltaevent.py @@ -0,0 +1,44 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .builtinconnectors import BuiltInConnectors +from datetime import datetime +from mistralai.client.types import BaseModel +from typing import Literal, Optional, Union +from typing_extensions import NotRequired, TypeAliasType, TypedDict + + +ToolExecutionDeltaEventType = Literal["tool.execution.delta",] + + +ToolExecutionDeltaEventNameTypedDict = TypeAliasType( + "ToolExecutionDeltaEventNameTypedDict", Union[BuiltInConnectors, str] +) + + +ToolExecutionDeltaEventName = TypeAliasType( + "ToolExecutionDeltaEventName", Union[BuiltInConnectors, str] +) + + +class ToolExecutionDeltaEventTypedDict(TypedDict): + id: str + name: ToolExecutionDeltaEventNameTypedDict + arguments: str + type: NotRequired[ToolExecutionDeltaEventType] + created_at: NotRequired[datetime] + output_index: NotRequired[int] + + +class ToolExecutionDeltaEvent(BaseModel): + id: str + + name: ToolExecutionDeltaEventName + + arguments: str + + type: Optional[ToolExecutionDeltaEventType] = "tool.execution.delta" + + created_at: Optional[datetime] = None + + output_index: Optional[int] = 0 diff --git a/src/mistralai/client/models/toolexecutiondoneevent.py b/src/mistralai/client/models/toolexecutiondoneevent.py new file mode 100644 index 00000000..854baee9 --- /dev/null +++ b/src/mistralai/client/models/toolexecutiondoneevent.py @@ -0,0 +1,44 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .builtinconnectors import BuiltInConnectors +from datetime import datetime +from mistralai.client.types import BaseModel +from typing import Any, Dict, Literal, Optional, Union +from typing_extensions import NotRequired, TypeAliasType, TypedDict + + +ToolExecutionDoneEventType = Literal["tool.execution.done",] + + +ToolExecutionDoneEventNameTypedDict = TypeAliasType( + "ToolExecutionDoneEventNameTypedDict", Union[BuiltInConnectors, str] +) + + +ToolExecutionDoneEventName = TypeAliasType( + "ToolExecutionDoneEventName", Union[BuiltInConnectors, str] +) + + +class ToolExecutionDoneEventTypedDict(TypedDict): + id: str + name: ToolExecutionDoneEventNameTypedDict + type: NotRequired[ToolExecutionDoneEventType] + created_at: NotRequired[datetime] + output_index: NotRequired[int] + info: NotRequired[Dict[str, Any]] + + +class ToolExecutionDoneEvent(BaseModel): + id: str + + name: ToolExecutionDoneEventName + + type: Optional[ToolExecutionDoneEventType] = "tool.execution.done" + + created_at: Optional[datetime] = None + + output_index: Optional[int] = 0 + + info: Optional[Dict[str, Any]] = None diff --git a/src/mistralai/client/models/toolexecutionentry.py b/src/mistralai/client/models/toolexecutionentry.py new file mode 100644 index 00000000..839709fb --- /dev/null +++ b/src/mistralai/client/models/toolexecutionentry.py @@ -0,0 +1,86 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .builtinconnectors import BuiltInConnectors +from datetime import datetime +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing import Any, Dict, Literal, Optional, Union +from typing_extensions import NotRequired, TypeAliasType, TypedDict + + +ToolExecutionEntryObject = Literal["entry",] + + +ToolExecutionEntryType = Literal["tool.execution",] + + +NameTypedDict = TypeAliasType("NameTypedDict", Union[BuiltInConnectors, str]) + + +Name = TypeAliasType("Name", Union[BuiltInConnectors, str]) + + +class ToolExecutionEntryTypedDict(TypedDict): + name: NameTypedDict + arguments: str + object: NotRequired[ToolExecutionEntryObject] + type: NotRequired[ToolExecutionEntryType] + created_at: NotRequired[datetime] + completed_at: NotRequired[Nullable[datetime]] + id: NotRequired[str] + info: NotRequired[Dict[str, Any]] + + +class ToolExecutionEntry(BaseModel): + name: Name + + arguments: str + + object: Optional[ToolExecutionEntryObject] = "entry" + + type: Optional[ToolExecutionEntryType] = "tool.execution" + + created_at: Optional[datetime] = None + + completed_at: OptionalNullable[datetime] = UNSET + + id: Optional[str] = None + + info: Optional[Dict[str, Any]] = None + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["object", "type", "created_at", "completed_at", "id", "info"] + nullable_fields = ["completed_at"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/toolexecutionstartedevent.py b/src/mistralai/client/models/toolexecutionstartedevent.py new file mode 100644 index 00000000..66438cfc --- /dev/null +++ b/src/mistralai/client/models/toolexecutionstartedevent.py @@ -0,0 +1,44 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .builtinconnectors import BuiltInConnectors +from datetime import datetime +from mistralai.client.types import BaseModel +from typing import Literal, Optional, Union +from typing_extensions import NotRequired, TypeAliasType, TypedDict + + +ToolExecutionStartedEventType = Literal["tool.execution.started",] + + +ToolExecutionStartedEventNameTypedDict = TypeAliasType( + "ToolExecutionStartedEventNameTypedDict", Union[BuiltInConnectors, str] +) + + +ToolExecutionStartedEventName = TypeAliasType( + "ToolExecutionStartedEventName", Union[BuiltInConnectors, str] +) + + +class ToolExecutionStartedEventTypedDict(TypedDict): + id: str + name: ToolExecutionStartedEventNameTypedDict + arguments: str + type: NotRequired[ToolExecutionStartedEventType] + created_at: NotRequired[datetime] + output_index: NotRequired[int] + + +class ToolExecutionStartedEvent(BaseModel): + id: str + + name: ToolExecutionStartedEventName + + arguments: str + + type: Optional[ToolExecutionStartedEventType] = "tool.execution.started" + + created_at: Optional[datetime] = None + + output_index: Optional[int] = 0 diff --git a/src/mistralai/client/models/toolfilechunk.py b/src/mistralai/client/models/toolfilechunk.py new file mode 100644 index 00000000..62b5ffed --- /dev/null +++ b/src/mistralai/client/models/toolfilechunk.py @@ -0,0 +1,75 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .builtinconnectors import BuiltInConnectors +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing import Literal, Optional, Union +from typing_extensions import NotRequired, TypeAliasType, TypedDict + + +ToolFileChunkType = Literal["tool_file",] + + +ToolFileChunkToolTypedDict = TypeAliasType( + "ToolFileChunkToolTypedDict", Union[BuiltInConnectors, str] +) + + +ToolFileChunkTool = TypeAliasType("ToolFileChunkTool", Union[BuiltInConnectors, str]) + + +class ToolFileChunkTypedDict(TypedDict): + tool: ToolFileChunkToolTypedDict + file_id: str + type: NotRequired[ToolFileChunkType] + file_name: NotRequired[Nullable[str]] + file_type: NotRequired[Nullable[str]] + + +class ToolFileChunk(BaseModel): + tool: ToolFileChunkTool + + file_id: str + + type: Optional[ToolFileChunkType] = "tool_file" + + file_name: OptionalNullable[str] = UNSET + + file_type: OptionalNullable[str] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["type", "file_name", "file_type"] + nullable_fields = ["file_name", "file_type"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/toolmessage.py b/src/mistralai/client/models/toolmessage.py new file mode 100644 index 00000000..eae2d2ae --- /dev/null +++ b/src/mistralai/client/models/toolmessage.py @@ -0,0 +1,72 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .contentchunk import ContentChunk, ContentChunkTypedDict +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing import List, Literal, Optional, Union +from typing_extensions import NotRequired, TypeAliasType, TypedDict + + +ToolMessageContentTypedDict = TypeAliasType( + "ToolMessageContentTypedDict", Union[str, List[ContentChunkTypedDict]] +) + + +ToolMessageContent = TypeAliasType("ToolMessageContent", Union[str, List[ContentChunk]]) + + +ToolMessageRole = Literal["tool",] + + +class ToolMessageTypedDict(TypedDict): + content: Nullable[ToolMessageContentTypedDict] + tool_call_id: NotRequired[Nullable[str]] + name: NotRequired[Nullable[str]] + role: NotRequired[ToolMessageRole] + + +class ToolMessage(BaseModel): + content: Nullable[ToolMessageContent] + + tool_call_id: OptionalNullable[str] = UNSET + + name: OptionalNullable[str] = UNSET + + role: Optional[ToolMessageRole] = "tool" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["tool_call_id", "name", "role"] + nullable_fields = ["content", "tool_call_id", "name"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/toolreferencechunk.py b/src/mistralai/client/models/toolreferencechunk.py new file mode 100644 index 00000000..882b1563 --- /dev/null +++ b/src/mistralai/client/models/toolreferencechunk.py @@ -0,0 +1,80 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .builtinconnectors import BuiltInConnectors +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing import Literal, Optional, Union +from typing_extensions import NotRequired, TypeAliasType, TypedDict + + +ToolReferenceChunkType = Literal["tool_reference",] + + +ToolReferenceChunkToolTypedDict = TypeAliasType( + "ToolReferenceChunkToolTypedDict", Union[BuiltInConnectors, str] +) + + +ToolReferenceChunkTool = TypeAliasType( + "ToolReferenceChunkTool", Union[BuiltInConnectors, str] +) + + +class ToolReferenceChunkTypedDict(TypedDict): + tool: ToolReferenceChunkToolTypedDict + title: str + type: NotRequired[ToolReferenceChunkType] + url: NotRequired[Nullable[str]] + favicon: NotRequired[Nullable[str]] + description: NotRequired[Nullable[str]] + + +class ToolReferenceChunk(BaseModel): + tool: ToolReferenceChunkTool + + title: str + + type: Optional[ToolReferenceChunkType] = "tool_reference" + + url: OptionalNullable[str] = UNSET + + favicon: OptionalNullable[str] = UNSET + + description: OptionalNullable[str] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["type", "url", "favicon", "description"] + nullable_fields = ["url", "favicon", "description"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/tooltypes.py b/src/mistralai/client/models/tooltypes.py new file mode 100644 index 00000000..abb26c25 --- /dev/null +++ b/src/mistralai/client/models/tooltypes.py @@ -0,0 +1,8 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.client.types import UnrecognizedStr +from typing import Literal, Union + + +ToolTypes = Union[Literal["function",], UnrecognizedStr] diff --git a/src/mistralai/client/models/trainingfile.py b/src/mistralai/client/models/trainingfile.py new file mode 100644 index 00000000..1d9763e0 --- /dev/null +++ b/src/mistralai/client/models/trainingfile.py @@ -0,0 +1,17 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.client.types import BaseModel +from typing import Optional +from typing_extensions import NotRequired, TypedDict + + +class TrainingFileTypedDict(TypedDict): + file_id: str + weight: NotRequired[float] + + +class TrainingFile(BaseModel): + file_id: str + + weight: Optional[float] = 1 diff --git a/src/mistralai/client/models/transcriptionresponse.py b/src/mistralai/client/models/transcriptionresponse.py new file mode 100644 index 00000000..24c0b92e --- /dev/null +++ b/src/mistralai/client/models/transcriptionresponse.py @@ -0,0 +1,79 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .transcriptionsegmentchunk import ( + TranscriptionSegmentChunk, + TranscriptionSegmentChunkTypedDict, +) +from .usageinfo import UsageInfo, UsageInfoTypedDict +from mistralai.client.types import BaseModel, Nullable, UNSET_SENTINEL +import pydantic +from pydantic import ConfigDict, model_serializer +from typing import Any, Dict, List, Optional +from typing_extensions import NotRequired, TypedDict + + +class TranscriptionResponseTypedDict(TypedDict): + model: str + text: str + usage: UsageInfoTypedDict + language: Nullable[str] + segments: NotRequired[List[TranscriptionSegmentChunkTypedDict]] + + +class TranscriptionResponse(BaseModel): + model_config = ConfigDict( + populate_by_name=True, arbitrary_types_allowed=True, extra="allow" + ) + __pydantic_extra__: Dict[str, Any] = pydantic.Field(init=False) + + model: str + + text: str + + usage: UsageInfo + + language: Nullable[str] + + segments: Optional[List[TranscriptionSegmentChunk]] = None + + @property + def additional_properties(self): + return self.__pydantic_extra__ + + @additional_properties.setter + def additional_properties(self, value): + self.__pydantic_extra__ = value # pyright: ignore[reportIncompatibleVariableOverride] + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["segments"] + nullable_fields = ["language"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + for k, v in serialized.items(): + m[k] = v + + return m diff --git a/src/mistralai/client/models/transcriptionsegmentchunk.py b/src/mistralai/client/models/transcriptionsegmentchunk.py new file mode 100644 index 00000000..c89d84fc --- /dev/null +++ b/src/mistralai/client/models/transcriptionsegmentchunk.py @@ -0,0 +1,86 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +import pydantic +from pydantic import ConfigDict, model_serializer +from typing import Any, Dict, Literal, Optional +from typing_extensions import NotRequired, TypedDict + + +Type = Literal["transcription_segment",] + + +class TranscriptionSegmentChunkTypedDict(TypedDict): + text: str + start: float + end: float + score: NotRequired[Nullable[float]] + speaker_id: NotRequired[Nullable[str]] + type: NotRequired[Type] + + +class TranscriptionSegmentChunk(BaseModel): + model_config = ConfigDict( + populate_by_name=True, arbitrary_types_allowed=True, extra="allow" + ) + __pydantic_extra__: Dict[str, Any] = pydantic.Field(init=False) + + text: str + + start: float + + end: float + + score: OptionalNullable[float] = UNSET + + speaker_id: OptionalNullable[str] = UNSET + + type: Optional[Type] = "transcription_segment" + + @property + def additional_properties(self): + return self.__pydantic_extra__ + + @additional_properties.setter + def additional_properties(self, value): + self.__pydantic_extra__ = value # pyright: ignore[reportIncompatibleVariableOverride] + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["score", "speaker_id", "type"] + nullable_fields = ["score", "speaker_id"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + for k, v in serialized.items(): + m[k] = v + + return m diff --git a/src/mistralai/client/models/transcriptionstreamdone.py b/src/mistralai/client/models/transcriptionstreamdone.py new file mode 100644 index 00000000..add17f56 --- /dev/null +++ b/src/mistralai/client/models/transcriptionstreamdone.py @@ -0,0 +1,85 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .transcriptionsegmentchunk import ( + TranscriptionSegmentChunk, + TranscriptionSegmentChunkTypedDict, +) +from .usageinfo import UsageInfo, UsageInfoTypedDict +from mistralai.client.types import BaseModel, Nullable, UNSET_SENTINEL +import pydantic +from pydantic import ConfigDict, model_serializer +from typing import Any, Dict, List, Literal, Optional +from typing_extensions import NotRequired, TypedDict + + +TranscriptionStreamDoneType = Literal["transcription.done",] + + +class TranscriptionStreamDoneTypedDict(TypedDict): + model: str + text: str + usage: UsageInfoTypedDict + language: Nullable[str] + segments: NotRequired[List[TranscriptionSegmentChunkTypedDict]] + type: NotRequired[TranscriptionStreamDoneType] + + +class TranscriptionStreamDone(BaseModel): + model_config = ConfigDict( + populate_by_name=True, arbitrary_types_allowed=True, extra="allow" + ) + __pydantic_extra__: Dict[str, Any] = pydantic.Field(init=False) + + model: str + + text: str + + usage: UsageInfo + + language: Nullable[str] + + segments: Optional[List[TranscriptionSegmentChunk]] = None + + type: Optional[TranscriptionStreamDoneType] = "transcription.done" + + @property + def additional_properties(self): + return self.__pydantic_extra__ + + @additional_properties.setter + def additional_properties(self, value): + self.__pydantic_extra__ = value # pyright: ignore[reportIncompatibleVariableOverride] + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["segments", "type"] + nullable_fields = ["language"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + for k, v in serialized.items(): + m[k] = v + + return m diff --git a/src/mistralai/client/models/transcriptionstreamevents.py b/src/mistralai/client/models/transcriptionstreamevents.py new file mode 100644 index 00000000..caaf943a --- /dev/null +++ b/src/mistralai/client/models/transcriptionstreamevents.py @@ -0,0 +1,58 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .transcriptionstreamdone import ( + TranscriptionStreamDone, + TranscriptionStreamDoneTypedDict, +) +from .transcriptionstreameventtypes import TranscriptionStreamEventTypes +from .transcriptionstreamlanguage import ( + TranscriptionStreamLanguage, + TranscriptionStreamLanguageTypedDict, +) +from .transcriptionstreamsegmentdelta import ( + TranscriptionStreamSegmentDelta, + TranscriptionStreamSegmentDeltaTypedDict, +) +from .transcriptionstreamtextdelta import ( + TranscriptionStreamTextDelta, + TranscriptionStreamTextDeltaTypedDict, +) +from mistralai.client.types import BaseModel +from mistralai.client.utils import get_discriminator +from pydantic import Discriminator, Tag +from typing import Union +from typing_extensions import Annotated, TypeAliasType, TypedDict + + +TranscriptionStreamEventsDataTypedDict = TypeAliasType( + "TranscriptionStreamEventsDataTypedDict", + Union[ + TranscriptionStreamTextDeltaTypedDict, + TranscriptionStreamLanguageTypedDict, + TranscriptionStreamSegmentDeltaTypedDict, + TranscriptionStreamDoneTypedDict, + ], +) + + +TranscriptionStreamEventsData = Annotated[ + Union[ + Annotated[TranscriptionStreamDone, Tag("transcription.done")], + Annotated[TranscriptionStreamLanguage, Tag("transcription.language")], + Annotated[TranscriptionStreamSegmentDelta, Tag("transcription.segment")], + Annotated[TranscriptionStreamTextDelta, Tag("transcription.text.delta")], + ], + Discriminator(lambda m: get_discriminator(m, "type", "type")), +] + + +class TranscriptionStreamEventsTypedDict(TypedDict): + event: TranscriptionStreamEventTypes + data: TranscriptionStreamEventsDataTypedDict + + +class TranscriptionStreamEvents(BaseModel): + event: TranscriptionStreamEventTypes + + data: TranscriptionStreamEventsData diff --git a/src/mistralai/client/models/transcriptionstreameventtypes.py b/src/mistralai/client/models/transcriptionstreameventtypes.py new file mode 100644 index 00000000..4a910f0a --- /dev/null +++ b/src/mistralai/client/models/transcriptionstreameventtypes.py @@ -0,0 +1,12 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from typing import Literal + + +TranscriptionStreamEventTypes = Literal[ + "transcription.language", + "transcription.segment", + "transcription.text.delta", + "transcription.done", +] diff --git a/src/mistralai/client/models/transcriptionstreamlanguage.py b/src/mistralai/client/models/transcriptionstreamlanguage.py new file mode 100644 index 00000000..b47024ad --- /dev/null +++ b/src/mistralai/client/models/transcriptionstreamlanguage.py @@ -0,0 +1,35 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.client.types import BaseModel +import pydantic +from pydantic import ConfigDict +from typing import Any, Dict, Literal, Optional +from typing_extensions import NotRequired, TypedDict + + +TranscriptionStreamLanguageType = Literal["transcription.language",] + + +class TranscriptionStreamLanguageTypedDict(TypedDict): + audio_language: str + type: NotRequired[TranscriptionStreamLanguageType] + + +class TranscriptionStreamLanguage(BaseModel): + model_config = ConfigDict( + populate_by_name=True, arbitrary_types_allowed=True, extra="allow" + ) + __pydantic_extra__: Dict[str, Any] = pydantic.Field(init=False) + + audio_language: str + + type: Optional[TranscriptionStreamLanguageType] = "transcription.language" + + @property + def additional_properties(self): + return self.__pydantic_extra__ + + @additional_properties.setter + def additional_properties(self, value): + self.__pydantic_extra__ = value # pyright: ignore[reportIncompatibleVariableOverride] diff --git a/src/mistralai/client/models/transcriptionstreamsegmentdelta.py b/src/mistralai/client/models/transcriptionstreamsegmentdelta.py new file mode 100644 index 00000000..7cfffb63 --- /dev/null +++ b/src/mistralai/client/models/transcriptionstreamsegmentdelta.py @@ -0,0 +1,83 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +import pydantic +from pydantic import ConfigDict, model_serializer +from typing import Any, Dict, Literal, Optional +from typing_extensions import NotRequired, TypedDict + + +TranscriptionStreamSegmentDeltaType = Literal["transcription.segment",] + + +class TranscriptionStreamSegmentDeltaTypedDict(TypedDict): + text: str + start: float + end: float + speaker_id: NotRequired[Nullable[str]] + type: NotRequired[TranscriptionStreamSegmentDeltaType] + + +class TranscriptionStreamSegmentDelta(BaseModel): + model_config = ConfigDict( + populate_by_name=True, arbitrary_types_allowed=True, extra="allow" + ) + __pydantic_extra__: Dict[str, Any] = pydantic.Field(init=False) + + text: str + + start: float + + end: float + + speaker_id: OptionalNullable[str] = UNSET + + type: Optional[TranscriptionStreamSegmentDeltaType] = "transcription.segment" + + @property + def additional_properties(self): + return self.__pydantic_extra__ + + @additional_properties.setter + def additional_properties(self, value): + self.__pydantic_extra__ = value # pyright: ignore[reportIncompatibleVariableOverride] + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["speaker_id", "type"] + nullable_fields = ["speaker_id"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + for k, v in serialized.items(): + m[k] = v + + return m diff --git a/src/mistralai/client/models/transcriptionstreamtextdelta.py b/src/mistralai/client/models/transcriptionstreamtextdelta.py new file mode 100644 index 00000000..ce279cf6 --- /dev/null +++ b/src/mistralai/client/models/transcriptionstreamtextdelta.py @@ -0,0 +1,35 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.client.types import BaseModel +import pydantic +from pydantic import ConfigDict +from typing import Any, Dict, Literal, Optional +from typing_extensions import NotRequired, TypedDict + + +TranscriptionStreamTextDeltaType = Literal["transcription.text.delta",] + + +class TranscriptionStreamTextDeltaTypedDict(TypedDict): + text: str + type: NotRequired[TranscriptionStreamTextDeltaType] + + +class TranscriptionStreamTextDelta(BaseModel): + model_config = ConfigDict( + populate_by_name=True, arbitrary_types_allowed=True, extra="allow" + ) + __pydantic_extra__: Dict[str, Any] = pydantic.Field(init=False) + + text: str + + type: Optional[TranscriptionStreamTextDeltaType] = "transcription.text.delta" + + @property + def additional_properties(self): + return self.__pydantic_extra__ + + @additional_properties.setter + def additional_properties(self, value): + self.__pydantic_extra__ = value # pyright: ignore[reportIncompatibleVariableOverride] diff --git a/src/mistralai/client/models/unarchiveftmodelout.py b/src/mistralai/client/models/unarchiveftmodelout.py new file mode 100644 index 00000000..511c390b --- /dev/null +++ b/src/mistralai/client/models/unarchiveftmodelout.py @@ -0,0 +1,23 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.client.types import BaseModel +from typing import Literal, Optional +from typing_extensions import NotRequired, TypedDict + + +UnarchiveFTModelOutObject = Literal["model",] + + +class UnarchiveFTModelOutTypedDict(TypedDict): + id: str + object: NotRequired[UnarchiveFTModelOutObject] + archived: NotRequired[bool] + + +class UnarchiveFTModelOut(BaseModel): + id: str + + object: Optional[UnarchiveFTModelOutObject] = "model" + + archived: Optional[bool] = False diff --git a/src/mistralai/client/models/updateftmodelin.py b/src/mistralai/client/models/updateftmodelin.py new file mode 100644 index 00000000..0471a154 --- /dev/null +++ b/src/mistralai/client/models/updateftmodelin.py @@ -0,0 +1,53 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing_extensions import NotRequired, TypedDict + + +class UpdateFTModelInTypedDict(TypedDict): + name: NotRequired[Nullable[str]] + description: NotRequired[Nullable[str]] + + +class UpdateFTModelIn(BaseModel): + name: OptionalNullable[str] = UNSET + + description: OptionalNullable[str] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["name", "description"] + nullable_fields = ["name", "description"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/uploadfileout.py b/src/mistralai/client/models/uploadfileout.py new file mode 100644 index 00000000..55e56504 --- /dev/null +++ b/src/mistralai/client/models/uploadfileout.py @@ -0,0 +1,94 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .filepurpose import FilePurpose +from .sampletype import SampleType +from .source import Source +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +import pydantic +from pydantic import model_serializer +from typing_extensions import Annotated, NotRequired, TypedDict + + +class UploadFileOutTypedDict(TypedDict): + id: str + r"""The unique identifier of the file.""" + object: str + r"""The object type, which is always \"file\".""" + size_bytes: int + r"""The size of the file, in bytes.""" + created_at: int + r"""The UNIX timestamp (in seconds) of the event.""" + filename: str + r"""The name of the uploaded file.""" + purpose: FilePurpose + sample_type: SampleType + source: Source + num_lines: NotRequired[Nullable[int]] + mimetype: NotRequired[Nullable[str]] + signature: NotRequired[Nullable[str]] + + +class UploadFileOut(BaseModel): + id: str + r"""The unique identifier of the file.""" + + object: str + r"""The object type, which is always \"file\".""" + + size_bytes: Annotated[int, pydantic.Field(alias="bytes")] + r"""The size of the file, in bytes.""" + + created_at: int + r"""The UNIX timestamp (in seconds) of the event.""" + + filename: str + r"""The name of the uploaded file.""" + + purpose: FilePurpose + + sample_type: SampleType + + source: Source + + num_lines: OptionalNullable[int] = UNSET + + mimetype: OptionalNullable[str] = UNSET + + signature: OptionalNullable[str] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["num_lines", "mimetype", "signature"] + nullable_fields = ["num_lines", "mimetype", "signature"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/usageinfo.py b/src/mistralai/client/models/usageinfo.py new file mode 100644 index 00000000..f1186d97 --- /dev/null +++ b/src/mistralai/client/models/usageinfo.py @@ -0,0 +1,82 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +import pydantic +from pydantic import ConfigDict, model_serializer +from typing import Any, Dict, Optional +from typing_extensions import NotRequired, TypedDict + + +class UsageInfoTypedDict(TypedDict): + prompt_tokens: NotRequired[int] + completion_tokens: NotRequired[int] + total_tokens: NotRequired[int] + prompt_audio_seconds: NotRequired[Nullable[int]] + + +class UsageInfo(BaseModel): + model_config = ConfigDict( + populate_by_name=True, arbitrary_types_allowed=True, extra="allow" + ) + __pydantic_extra__: Dict[str, Any] = pydantic.Field(init=False) + + prompt_tokens: Optional[int] = 0 + + completion_tokens: Optional[int] = 0 + + total_tokens: Optional[int] = 0 + + prompt_audio_seconds: OptionalNullable[int] = UNSET + + @property + def additional_properties(self): + return self.__pydantic_extra__ + + @additional_properties.setter + def additional_properties(self, value): + self.__pydantic_extra__ = value # pyright: ignore[reportIncompatibleVariableOverride] + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = [ + "prompt_tokens", + "completion_tokens", + "total_tokens", + "prompt_audio_seconds", + ] + nullable_fields = ["prompt_audio_seconds"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + for k, v in serialized.items(): + m[k] = v + + return m diff --git a/src/mistralai/client/models/usermessage.py b/src/mistralai/client/models/usermessage.py new file mode 100644 index 00000000..8d92cea8 --- /dev/null +++ b/src/mistralai/client/models/usermessage.py @@ -0,0 +1,60 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .contentchunk import ContentChunk, ContentChunkTypedDict +from mistralai.client.types import BaseModel, Nullable, UNSET_SENTINEL +from pydantic import model_serializer +from typing import List, Literal, Optional, Union +from typing_extensions import NotRequired, TypeAliasType, TypedDict + + +UserMessageContentTypedDict = TypeAliasType( + "UserMessageContentTypedDict", Union[str, List[ContentChunkTypedDict]] +) + + +UserMessageContent = TypeAliasType("UserMessageContent", Union[str, List[ContentChunk]]) + + +UserMessageRole = Literal["user",] + + +class UserMessageTypedDict(TypedDict): + content: Nullable[UserMessageContentTypedDict] + role: NotRequired[UserMessageRole] + + +class UserMessage(BaseModel): + content: Nullable[UserMessageContent] + + role: Optional[UserMessageRole] = "user" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["role"] + nullable_fields = ["content"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/validationerror.py b/src/mistralai/client/models/validationerror.py new file mode 100644 index 00000000..352409be --- /dev/null +++ b/src/mistralai/client/models/validationerror.py @@ -0,0 +1,26 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.client.types import BaseModel +from typing import List, Union +from typing_extensions import TypeAliasType, TypedDict + + +LocTypedDict = TypeAliasType("LocTypedDict", Union[str, int]) + + +Loc = TypeAliasType("Loc", Union[str, int]) + + +class ValidationErrorTypedDict(TypedDict): + loc: List[LocTypedDict] + msg: str + type: str + + +class ValidationError(BaseModel): + loc: List[Loc] + + msg: str + + type: str diff --git a/src/mistralai/client/models/wandbintegration.py b/src/mistralai/client/models/wandbintegration.py new file mode 100644 index 00000000..89489fb4 --- /dev/null +++ b/src/mistralai/client/models/wandbintegration.py @@ -0,0 +1,72 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing import Literal, Optional +from typing_extensions import NotRequired, TypedDict + + +WandbIntegrationType = Literal["wandb",] + + +class WandbIntegrationTypedDict(TypedDict): + project: str + r"""The name of the project that the new run will be created under.""" + api_key: str + r"""The WandB API key to use for authentication.""" + type: NotRequired[WandbIntegrationType] + name: NotRequired[Nullable[str]] + r"""A display name to set for the run. If not set, will use the job ID as the name.""" + run_name: NotRequired[Nullable[str]] + + +class WandbIntegration(BaseModel): + project: str + r"""The name of the project that the new run will be created under.""" + + api_key: str + r"""The WandB API key to use for authentication.""" + + type: Optional[WandbIntegrationType] = "wandb" + + name: OptionalNullable[str] = UNSET + r"""A display name to set for the run. If not set, will use the job ID as the name.""" + + run_name: OptionalNullable[str] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["type", "name", "run_name"] + nullable_fields = ["name", "run_name"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/wandbintegrationout.py b/src/mistralai/client/models/wandbintegrationout.py new file mode 100644 index 00000000..a7f9afeb --- /dev/null +++ b/src/mistralai/client/models/wandbintegrationout.py @@ -0,0 +1,70 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing import Literal, Optional +from typing_extensions import NotRequired, TypedDict + + +WandbIntegrationOutType = Literal["wandb",] + + +class WandbIntegrationOutTypedDict(TypedDict): + project: str + r"""The name of the project that the new run will be created under.""" + type: NotRequired[WandbIntegrationOutType] + name: NotRequired[Nullable[str]] + r"""A display name to set for the run. If not set, will use the job ID as the name.""" + run_name: NotRequired[Nullable[str]] + url: NotRequired[Nullable[str]] + + +class WandbIntegrationOut(BaseModel): + project: str + r"""The name of the project that the new run will be created under.""" + + type: Optional[WandbIntegrationOutType] = "wandb" + + name: OptionalNullable[str] = UNSET + r"""A display name to set for the run. If not set, will use the job ID as the name.""" + + run_name: OptionalNullable[str] = UNSET + + url: OptionalNullable[str] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["type", "name", "run_name", "url"] + nullable_fields = ["name", "run_name", "url"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/websearchpremiumtool.py b/src/mistralai/client/models/websearchpremiumtool.py new file mode 100644 index 00000000..8d2d4b5d --- /dev/null +++ b/src/mistralai/client/models/websearchpremiumtool.py @@ -0,0 +1,17 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.client.types import BaseModel +from typing import Literal, Optional +from typing_extensions import NotRequired, TypedDict + + +WebSearchPremiumToolType = Literal["web_search_premium",] + + +class WebSearchPremiumToolTypedDict(TypedDict): + type: NotRequired[WebSearchPremiumToolType] + + +class WebSearchPremiumTool(BaseModel): + type: Optional[WebSearchPremiumToolType] = "web_search_premium" diff --git a/src/mistralai/client/models/websearchtool.py b/src/mistralai/client/models/websearchtool.py new file mode 100644 index 00000000..ba4cc09f --- /dev/null +++ b/src/mistralai/client/models/websearchtool.py @@ -0,0 +1,17 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.client.types import BaseModel +from typing import Literal, Optional +from typing_extensions import NotRequired, TypedDict + + +WebSearchToolType = Literal["web_search",] + + +class WebSearchToolTypedDict(TypedDict): + type: NotRequired[WebSearchToolType] + + +class WebSearchTool(BaseModel): + type: Optional[WebSearchToolType] = "web_search" diff --git a/src/mistralai/client/models_.py b/src/mistralai/client/models_.py new file mode 100644 index 00000000..5ef9da09 --- /dev/null +++ b/src/mistralai/client/models_.py @@ -0,0 +1,1063 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from .basesdk import BaseSDK +from mistralai.client import models, utils +from mistralai.client._hooks import HookContext +from mistralai.client.types import OptionalNullable, UNSET +from mistralai.client.utils import get_security_from_env +from mistralai.client.utils.unmarshal_json_response import unmarshal_json_response +from typing import Any, Mapping, Optional + + +class Models(BaseSDK): + r"""Model Management API""" + + def list( + self, + *, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.ModelList: + r"""List Models + + List all models available to the user. + + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + req = self._build_request( + method="GET", + path="/v1/models", + base_url=base_url, + url_variables=url_variables, + request=None, + request_body_required=False, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="list_models_v1_models_get", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["4XX", "5XX"], + retry_config=retry_config, + ) + + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.ModelList, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + async def list_async( + self, + *, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.ModelList: + r"""List Models + + List all models available to the user. + + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + req = self._build_request_async( + method="GET", + path="/v1/models", + base_url=base_url, + url_variables=url_variables, + request=None, + request_body_required=False, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="list_models_v1_models_get", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["4XX", "5XX"], + retry_config=retry_config, + ) + + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.ModelList, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + def retrieve( + self, + *, + model_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.RetrieveModelV1ModelsModelIDGetResponseRetrieveModelV1ModelsModelIDGet: + r"""Retrieve Model + + Retrieve information about a model. + + :param model_id: The ID of the model to retrieve. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.RetrieveModelV1ModelsModelIDGetRequest( + model_id=model_id, + ) + + req = self._build_request( + method="GET", + path="/v1/models/{model_id}", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="retrieve_model_v1_models__model_id__get", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response( + models.RetrieveModelV1ModelsModelIDGetResponseRetrieveModelV1ModelsModelIDGet, + http_res, + ) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res + ) + raise models.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + async def retrieve_async( + self, + *, + model_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.RetrieveModelV1ModelsModelIDGetResponseRetrieveModelV1ModelsModelIDGet: + r"""Retrieve Model + + Retrieve information about a model. + + :param model_id: The ID of the model to retrieve. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.RetrieveModelV1ModelsModelIDGetRequest( + model_id=model_id, + ) + + req = self._build_request_async( + method="GET", + path="/v1/models/{model_id}", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="retrieve_model_v1_models__model_id__get", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response( + models.RetrieveModelV1ModelsModelIDGetResponseRetrieveModelV1ModelsModelIDGet, + http_res, + ) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res + ) + raise models.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + def delete( + self, + *, + model_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.DeleteModelOut: + r"""Delete Model + + Delete a fine-tuned model. + + :param model_id: The ID of the model to delete. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.DeleteModelV1ModelsModelIDDeleteRequest( + model_id=model_id, + ) + + req = self._build_request( + method="DELETE", + path="/v1/models/{model_id}", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="delete_model_v1_models__model_id__delete", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.DeleteModelOut, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res + ) + raise models.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + async def delete_async( + self, + *, + model_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.DeleteModelOut: + r"""Delete Model + + Delete a fine-tuned model. + + :param model_id: The ID of the model to delete. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.DeleteModelV1ModelsModelIDDeleteRequest( + model_id=model_id, + ) + + req = self._build_request_async( + method="DELETE", + path="/v1/models/{model_id}", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="delete_model_v1_models__model_id__delete", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.DeleteModelOut, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res + ) + raise models.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + def update( + self, + *, + model_id: str, + name: OptionalNullable[str] = UNSET, + description: OptionalNullable[str] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.JobsAPIRoutesFineTuningUpdateFineTunedModelResponse: + r"""Update Fine Tuned Model + + Update a model name or description. + + :param model_id: The ID of the model to update. + :param name: + :param description: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.JobsAPIRoutesFineTuningUpdateFineTunedModelRequest( + model_id=model_id, + update_ft_model_in=models.UpdateFTModelIn( + name=name, + description=description, + ), + ) + + req = self._build_request( + method="PATCH", + path="/v1/fine_tuning/models/{model_id}", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request.update_ft_model_in, False, False, "json", models.UpdateFTModelIn + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="jobs_api_routes_fine_tuning_update_fine_tuned_model", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["4XX", "5XX"], + retry_config=retry_config, + ) + + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response( + models.JobsAPIRoutesFineTuningUpdateFineTunedModelResponse, http_res + ) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + async def update_async( + self, + *, + model_id: str, + name: OptionalNullable[str] = UNSET, + description: OptionalNullable[str] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.JobsAPIRoutesFineTuningUpdateFineTunedModelResponse: + r"""Update Fine Tuned Model + + Update a model name or description. + + :param model_id: The ID of the model to update. + :param name: + :param description: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.JobsAPIRoutesFineTuningUpdateFineTunedModelRequest( + model_id=model_id, + update_ft_model_in=models.UpdateFTModelIn( + name=name, + description=description, + ), + ) + + req = self._build_request_async( + method="PATCH", + path="/v1/fine_tuning/models/{model_id}", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request.update_ft_model_in, False, False, "json", models.UpdateFTModelIn + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="jobs_api_routes_fine_tuning_update_fine_tuned_model", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["4XX", "5XX"], + retry_config=retry_config, + ) + + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response( + models.JobsAPIRoutesFineTuningUpdateFineTunedModelResponse, http_res + ) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + def archive( + self, + *, + model_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.ArchiveFTModelOut: + r"""Archive Fine Tuned Model + + Archive a fine-tuned model. + + :param model_id: The ID of the model to archive. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.JobsAPIRoutesFineTuningArchiveFineTunedModelRequest( + model_id=model_id, + ) + + req = self._build_request( + method="POST", + path="/v1/fine_tuning/models/{model_id}/archive", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="jobs_api_routes_fine_tuning_archive_fine_tuned_model", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["4XX", "5XX"], + retry_config=retry_config, + ) + + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.ArchiveFTModelOut, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + async def archive_async( + self, + *, + model_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.ArchiveFTModelOut: + r"""Archive Fine Tuned Model + + Archive a fine-tuned model. + + :param model_id: The ID of the model to archive. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.JobsAPIRoutesFineTuningArchiveFineTunedModelRequest( + model_id=model_id, + ) + + req = self._build_request_async( + method="POST", + path="/v1/fine_tuning/models/{model_id}/archive", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="jobs_api_routes_fine_tuning_archive_fine_tuned_model", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["4XX", "5XX"], + retry_config=retry_config, + ) + + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.ArchiveFTModelOut, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + def unarchive( + self, + *, + model_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.UnarchiveFTModelOut: + r"""Unarchive Fine Tuned Model + + Un-archive a fine-tuned model. + + :param model_id: The ID of the model to unarchive. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.JobsAPIRoutesFineTuningUnarchiveFineTunedModelRequest( + model_id=model_id, + ) + + req = self._build_request( + method="DELETE", + path="/v1/fine_tuning/models/{model_id}/archive", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="jobs_api_routes_fine_tuning_unarchive_fine_tuned_model", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["4XX", "5XX"], + retry_config=retry_config, + ) + + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.UnarchiveFTModelOut, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + async def unarchive_async( + self, + *, + model_id: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.UnarchiveFTModelOut: + r"""Unarchive Fine Tuned Model + + Un-archive a fine-tuned model. + + :param model_id: The ID of the model to unarchive. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.JobsAPIRoutesFineTuningUnarchiveFineTunedModelRequest( + model_id=model_id, + ) + + req = self._build_request_async( + method="DELETE", + path="/v1/fine_tuning/models/{model_id}/archive", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="jobs_api_routes_fine_tuning_unarchive_fine_tuned_model", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["4XX", "5XX"], + retry_config=retry_config, + ) + + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.UnarchiveFTModelOut, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) diff --git a/src/mistralai/client/ocr.py b/src/mistralai/client/ocr.py new file mode 100644 index 00000000..ce7e2126 --- /dev/null +++ b/src/mistralai/client/ocr.py @@ -0,0 +1,303 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from .basesdk import BaseSDK +from mistralai.client import models, utils +from mistralai.client._hooks import HookContext +from mistralai.client.models import ( + ocrrequest as models_ocrrequest, + responseformat as models_responseformat, +) +from mistralai.client.types import Nullable, OptionalNullable, UNSET +from mistralai.client.utils import get_security_from_env +from mistralai.client.utils.unmarshal_json_response import unmarshal_json_response +from typing import Any, List, Mapping, Optional, Union + + +class Ocr(BaseSDK): + r"""OCR API""" + + def process( + self, + *, + model: Nullable[str], + document: Union[ + models_ocrrequest.Document, models_ocrrequest.DocumentTypedDict + ], + id: Optional[str] = None, + pages: OptionalNullable[List[int]] = UNSET, + include_image_base64: OptionalNullable[bool] = UNSET, + image_limit: OptionalNullable[int] = UNSET, + image_min_size: OptionalNullable[int] = UNSET, + bbox_annotation_format: OptionalNullable[ + Union[ + models_responseformat.ResponseFormat, + models_responseformat.ResponseFormatTypedDict, + ] + ] = UNSET, + document_annotation_format: OptionalNullable[ + Union[ + models_responseformat.ResponseFormat, + models_responseformat.ResponseFormatTypedDict, + ] + ] = UNSET, + document_annotation_prompt: OptionalNullable[str] = UNSET, + table_format: OptionalNullable[models_ocrrequest.TableFormat] = UNSET, + extract_header: Optional[bool] = None, + extract_footer: Optional[bool] = None, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.OCRResponse: + r"""OCR + + :param model: + :param document: Document to run OCR on + :param id: + :param pages: Specific pages user wants to process in various formats: single number, range, or list of both. Starts from 0 + :param include_image_base64: Include image URLs in response + :param image_limit: Max images to extract + :param image_min_size: Minimum height and width of image to extract + :param bbox_annotation_format: Structured output class for extracting useful information from each extracted bounding box / image from document. Only json_schema is valid for this field + :param document_annotation_format: Structured output class for extracting useful information from the entire document. Only json_schema is valid for this field + :param document_annotation_prompt: Optional prompt to guide the model in extracting structured output from the entire document. A document_annotation_format must be provided. + :param table_format: + :param extract_header: + :param extract_footer: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.OCRRequest( + model=model, + id=id, + document=utils.get_pydantic_model(document, models.Document), + pages=pages, + include_image_base64=include_image_base64, + image_limit=image_limit, + image_min_size=image_min_size, + bbox_annotation_format=utils.get_pydantic_model( + bbox_annotation_format, OptionalNullable[models.ResponseFormat] + ), + document_annotation_format=utils.get_pydantic_model( + document_annotation_format, OptionalNullable[models.ResponseFormat] + ), + document_annotation_prompt=document_annotation_prompt, + table_format=table_format, + extract_header=extract_header, + extract_footer=extract_footer, + ) + + req = self._build_request( + method="POST", + path="/v1/ocr", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.OCRRequest + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="ocr_v1_ocr_post", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.OCRResponse, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res + ) + raise models.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + async def process_async( + self, + *, + model: Nullable[str], + document: Union[ + models_ocrrequest.Document, models_ocrrequest.DocumentTypedDict + ], + id: Optional[str] = None, + pages: OptionalNullable[List[int]] = UNSET, + include_image_base64: OptionalNullable[bool] = UNSET, + image_limit: OptionalNullable[int] = UNSET, + image_min_size: OptionalNullable[int] = UNSET, + bbox_annotation_format: OptionalNullable[ + Union[ + models_responseformat.ResponseFormat, + models_responseformat.ResponseFormatTypedDict, + ] + ] = UNSET, + document_annotation_format: OptionalNullable[ + Union[ + models_responseformat.ResponseFormat, + models_responseformat.ResponseFormatTypedDict, + ] + ] = UNSET, + document_annotation_prompt: OptionalNullable[str] = UNSET, + table_format: OptionalNullable[models_ocrrequest.TableFormat] = UNSET, + extract_header: Optional[bool] = None, + extract_footer: Optional[bool] = None, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.OCRResponse: + r"""OCR + + :param model: + :param document: Document to run OCR on + :param id: + :param pages: Specific pages user wants to process in various formats: single number, range, or list of both. Starts from 0 + :param include_image_base64: Include image URLs in response + :param image_limit: Max images to extract + :param image_min_size: Minimum height and width of image to extract + :param bbox_annotation_format: Structured output class for extracting useful information from each extracted bounding box / image from document. Only json_schema is valid for this field + :param document_annotation_format: Structured output class for extracting useful information from the entire document. Only json_schema is valid for this field + :param document_annotation_prompt: Optional prompt to guide the model in extracting structured output from the entire document. A document_annotation_format must be provided. + :param table_format: + :param extract_header: + :param extract_footer: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.OCRRequest( + model=model, + id=id, + document=utils.get_pydantic_model(document, models.Document), + pages=pages, + include_image_base64=include_image_base64, + image_limit=image_limit, + image_min_size=image_min_size, + bbox_annotation_format=utils.get_pydantic_model( + bbox_annotation_format, OptionalNullable[models.ResponseFormat] + ), + document_annotation_format=utils.get_pydantic_model( + document_annotation_format, OptionalNullable[models.ResponseFormat] + ), + document_annotation_prompt=document_annotation_prompt, + table_format=table_format, + extract_header=extract_header, + extract_footer=extract_footer, + ) + + req = self._build_request_async( + method="POST", + path="/v1/ocr", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "json", models.OCRRequest + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="ocr_v1_ocr_post", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.OCRResponse, http_res) + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res + ) + raise models.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) diff --git a/src/mistralai/client/py.typed b/src/mistralai/client/py.typed new file mode 100644 index 00000000..3e38f1a9 --- /dev/null +++ b/src/mistralai/client/py.typed @@ -0,0 +1 @@ +# Marker file for PEP 561. The package enables type hints. diff --git a/src/mistralai/client/sdk.py b/src/mistralai/client/sdk.py new file mode 100644 index 00000000..99579400 --- /dev/null +++ b/src/mistralai/client/sdk.py @@ -0,0 +1,222 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from .basesdk import BaseSDK +from .httpclient import AsyncHttpClient, ClientOwner, HttpClient, close_clients +from .sdkconfiguration import SDKConfiguration +from .utils.logger import Logger, get_default_logger +from .utils.retries import RetryConfig +import httpx +import importlib +from mistralai.client import models, utils +from mistralai.client._hooks import SDKHooks +from mistralai.client.types import OptionalNullable, UNSET +import sys +from typing import Any, Callable, Dict, Optional, TYPE_CHECKING, Union, cast +import weakref + +if TYPE_CHECKING: + from mistralai.client.agents import Agents + from mistralai.client.audio import Audio + from mistralai.client.batch import Batch + from mistralai.client.beta import Beta + from mistralai.client.chat import Chat + from mistralai.client.classifiers import Classifiers + from mistralai.client.embeddings import Embeddings + from mistralai.client.files import Files + from mistralai.client.fim import Fim + from mistralai.client.fine_tuning import FineTuning + from mistralai.client.models_ import Models + from mistralai.client.ocr import Ocr + + +class Mistral(BaseSDK): + r"""Mistral AI API: Our Chat Completion and Embeddings APIs specification. Create your account on [La Plateforme](https://console.mistral.ai) to get access and read the [docs](https://docs.mistral.ai) to learn how to use it.""" + + models: "Models" + r"""Model Management API""" + beta: "Beta" + files: "Files" + r"""Files API""" + fine_tuning: "FineTuning" + batch: "Batch" + chat: "Chat" + r"""Chat Completion API.""" + fim: "Fim" + r"""Fill-in-the-middle API.""" + agents: "Agents" + r"""Agents API.""" + embeddings: "Embeddings" + r"""Embeddings API.""" + classifiers: "Classifiers" + r"""Classifiers API.""" + ocr: "Ocr" + r"""OCR API""" + audio: "Audio" + _sub_sdk_map = { + "models": ("mistralai.client.models_", "Models"), + "beta": ("mistralai.client.beta", "Beta"), + "files": ("mistralai.client.files", "Files"), + "fine_tuning": ("mistralai.client.fine_tuning", "FineTuning"), + "batch": ("mistralai.client.batch", "Batch"), + "chat": ("mistralai.client.chat", "Chat"), + "fim": ("mistralai.client.fim", "Fim"), + "agents": ("mistralai.client.agents", "Agents"), + "embeddings": ("mistralai.client.embeddings", "Embeddings"), + "classifiers": ("mistralai.client.classifiers", "Classifiers"), + "ocr": ("mistralai.client.ocr", "Ocr"), + "audio": ("mistralai.client.audio", "Audio"), + } + + def __init__( + self, + api_key: Optional[Union[Optional[str], Callable[[], Optional[str]]]] = None, + server: Optional[str] = None, + server_url: Optional[str] = None, + url_params: Optional[Dict[str, str]] = None, + client: Optional[HttpClient] = None, + async_client: Optional[AsyncHttpClient] = None, + retry_config: OptionalNullable[RetryConfig] = UNSET, + timeout_ms: Optional[int] = None, + debug_logger: Optional[Logger] = None, + ) -> None: + r"""Instantiates the SDK configuring it with the provided parameters. + + :param api_key: The api_key required for authentication + :param server: The server by name to use for all methods + :param server_url: The server URL to use for all methods + :param url_params: Parameters to optionally template the server URL with + :param client: The HTTP client to use for all synchronous methods + :param async_client: The Async HTTP client to use for all asynchronous methods + :param retry_config: The retry configuration to use for all supported methods + :param timeout_ms: Optional request timeout applied to each operation in milliseconds + """ + client_supplied = True + if client is None: + client = httpx.Client(follow_redirects=True) + client_supplied = False + + assert issubclass( + type(client), HttpClient + ), "The provided client must implement the HttpClient protocol." + + async_client_supplied = True + if async_client is None: + async_client = httpx.AsyncClient(follow_redirects=True) + async_client_supplied = False + + if debug_logger is None: + debug_logger = get_default_logger() + + assert issubclass( + type(async_client), AsyncHttpClient + ), "The provided async_client must implement the AsyncHttpClient protocol." + + security: Any = None + if callable(api_key): + # pylint: disable=unnecessary-lambda-assignment + security = lambda: models.Security(api_key=api_key()) + else: + security = models.Security(api_key=api_key) + + if server_url is not None: + if url_params is not None: + server_url = utils.template_url(server_url, url_params) + + BaseSDK.__init__( + self, + SDKConfiguration( + client=client, + client_supplied=client_supplied, + async_client=async_client, + async_client_supplied=async_client_supplied, + security=security, + server_url=server_url, + server=server, + retry_config=retry_config, + timeout_ms=timeout_ms, + debug_logger=debug_logger, + ), + parent_ref=self, + ) + + hooks = SDKHooks() + + # pylint: disable=protected-access + self.sdk_configuration.__dict__["_hooks"] = hooks + + current_server_url, *_ = self.sdk_configuration.get_server_details() + server_url, self.sdk_configuration.client = hooks.sdk_init( + current_server_url, client + ) + if current_server_url != server_url: + self.sdk_configuration.server_url = server_url + + weakref.finalize( + self, + close_clients, + cast(ClientOwner, self.sdk_configuration), + self.sdk_configuration.client, + self.sdk_configuration.client_supplied, + self.sdk_configuration.async_client, + self.sdk_configuration.async_client_supplied, + ) + + def dynamic_import(self, modname, retries=3): + for attempt in range(retries): + try: + return importlib.import_module(modname) + except KeyError: + # Clear any half-initialized module and retry + sys.modules.pop(modname, None) + if attempt == retries - 1: + break + raise KeyError(f"Failed to import module '{modname}' after {retries} attempts") + + def __getattr__(self, name: str): + if name in self._sub_sdk_map: + module_path, class_name = self._sub_sdk_map[name] + try: + module = self.dynamic_import(module_path) + klass = getattr(module, class_name) + instance = klass(self.sdk_configuration, parent_ref=self) + setattr(self, name, instance) + return instance + except ImportError as e: + raise AttributeError( + f"Failed to import module {module_path} for attribute {name}: {e}" + ) from e + except AttributeError as e: + raise AttributeError( + f"Failed to find class {class_name} in module {module_path} for attribute {name}: {e}" + ) from e + + raise AttributeError( + f"'{type(self).__name__}' object has no attribute '{name}'" + ) + + def __dir__(self): + default_attrs = list(super().__dir__()) + lazy_attrs = list(self._sub_sdk_map.keys()) + return sorted(list(set(default_attrs + lazy_attrs))) + + def __enter__(self): + return self + + async def __aenter__(self): + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + if ( + self.sdk_configuration.client is not None + and not self.sdk_configuration.client_supplied + ): + self.sdk_configuration.client.close() + self.sdk_configuration.client = None + + async def __aexit__(self, exc_type, exc_val, exc_tb): + if ( + self.sdk_configuration.async_client is not None + and not self.sdk_configuration.async_client_supplied + ): + await self.sdk_configuration.async_client.aclose() + self.sdk_configuration.async_client = None diff --git a/src/mistralai/client/sdkconfiguration.py b/src/mistralai/client/sdkconfiguration.py new file mode 100644 index 00000000..df50d16f --- /dev/null +++ b/src/mistralai/client/sdkconfiguration.py @@ -0,0 +1,53 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from ._version import ( + __gen_version__, + __openapi_doc_version__, + __user_agent__, + __version__, +) +from .httpclient import AsyncHttpClient, HttpClient +from .utils import Logger, RetryConfig, remove_suffix +from dataclasses import dataclass +from mistralai.client import models +from mistralai.client.types import OptionalNullable, UNSET +from pydantic import Field +from typing import Callable, Dict, Optional, Tuple, Union + + +SERVER_EU = "eu" +r"""EU Production server""" +SERVERS = { + SERVER_EU: "https://api.mistral.ai", +} +"""Contains the list of servers available to the SDK""" + + +@dataclass +class SDKConfiguration: + client: Union[HttpClient, None] + client_supplied: bool + async_client: Union[AsyncHttpClient, None] + async_client_supplied: bool + debug_logger: Logger + security: Optional[Union[models.Security, Callable[[], models.Security]]] = None + server_url: Optional[str] = "" + server: Optional[str] = "" + language: str = "python" + openapi_doc_version: str = __openapi_doc_version__ + sdk_version: str = __version__ + gen_version: str = __gen_version__ + user_agent: str = __user_agent__ + retry_config: OptionalNullable[RetryConfig] = Field(default_factory=lambda: UNSET) + timeout_ms: Optional[int] = None + + def get_server_details(self) -> Tuple[str, Dict[str, str]]: + if self.server_url is not None and self.server_url: + return remove_suffix(self.server_url, "/"), {} + if not self.server: + self.server = SERVER_EU + + if self.server not in SERVERS: + raise ValueError(f'Invalid server "{self.server}"') + + return SERVERS[self.server], {} diff --git a/src/mistralai/client/transcriptions.py b/src/mistralai/client/transcriptions.py new file mode 100644 index 00000000..45501024 --- /dev/null +++ b/src/mistralai/client/transcriptions.py @@ -0,0 +1,481 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from .basesdk import BaseSDK +from mistralai.client import models, utils +from mistralai.client._hooks import HookContext +from mistralai.client.models import ( + file as models_file, + timestampgranularity as models_timestampgranularity, +) +from mistralai.client.types import OptionalNullable, UNSET +from mistralai.client.utils import eventstreaming, get_security_from_env +from mistralai.client.utils.unmarshal_json_response import unmarshal_json_response +from typing import List, Mapping, Optional, Union + + +class Transcriptions(BaseSDK): + r"""API for audio transcription.""" + + def complete( + self, + *, + model: str, + file: Optional[Union[models_file.File, models_file.FileTypedDict]] = None, + file_url: OptionalNullable[str] = UNSET, + file_id: OptionalNullable[str] = UNSET, + language: OptionalNullable[str] = UNSET, + temperature: OptionalNullable[float] = UNSET, + diarize: Optional[bool] = False, + context_bias: Optional[List[str]] = None, + timestamp_granularities: Optional[ + List[models_timestampgranularity.TimestampGranularity] + ] = None, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.TranscriptionResponse: + r"""Create Transcription + + :param model: ID of the model to be used. + :param file: + :param file_url: Url of a file to be transcribed + :param file_id: ID of a file uploaded to /v1/files + :param language: Language of the audio, e.g. 'en'. Providing the language can boost accuracy. + :param temperature: + :param diarize: + :param context_bias: + :param timestamp_granularities: Granularities of timestamps to include in the response. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AudioTranscriptionRequest( + model=model, + file=utils.get_pydantic_model(file, Optional[models.File]), + file_url=file_url, + file_id=file_id, + language=language, + temperature=temperature, + diarize=diarize, + context_bias=context_bias, + timestamp_granularities=timestamp_granularities, + ) + + req = self._build_request( + method="POST", + path="/v1/audio/transcriptions", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "multipart", models.AudioTranscriptionRequest + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="audio_api_v1_transcriptions_post", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["4XX", "5XX"], + retry_config=retry_config, + ) + + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.TranscriptionResponse, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + async def complete_async( + self, + *, + model: str, + file: Optional[Union[models_file.File, models_file.FileTypedDict]] = None, + file_url: OptionalNullable[str] = UNSET, + file_id: OptionalNullable[str] = UNSET, + language: OptionalNullable[str] = UNSET, + temperature: OptionalNullable[float] = UNSET, + diarize: Optional[bool] = False, + context_bias: Optional[List[str]] = None, + timestamp_granularities: Optional[ + List[models_timestampgranularity.TimestampGranularity] + ] = None, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> models.TranscriptionResponse: + r"""Create Transcription + + :param model: ID of the model to be used. + :param file: + :param file_url: Url of a file to be transcribed + :param file_id: ID of a file uploaded to /v1/files + :param language: Language of the audio, e.g. 'en'. Providing the language can boost accuracy. + :param temperature: + :param diarize: + :param context_bias: + :param timestamp_granularities: Granularities of timestamps to include in the response. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AudioTranscriptionRequest( + model=model, + file=utils.get_pydantic_model(file, Optional[models.File]), + file_url=file_url, + file_id=file_id, + language=language, + temperature=temperature, + diarize=diarize, + context_bias=context_bias, + timestamp_granularities=timestamp_granularities, + ) + + req = self._build_request_async( + method="POST", + path="/v1/audio/transcriptions", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "multipart", models.AudioTranscriptionRequest + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="audio_api_v1_transcriptions_post", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["4XX", "5XX"], + retry_config=retry_config, + ) + + if utils.match_response(http_res, "200", "application/json"): + return unmarshal_json_response(models.TranscriptionResponse, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + def stream( + self, + *, + model: str, + file: Optional[Union[models_file.File, models_file.FileTypedDict]] = None, + file_url: OptionalNullable[str] = UNSET, + file_id: OptionalNullable[str] = UNSET, + language: OptionalNullable[str] = UNSET, + temperature: OptionalNullable[float] = UNSET, + diarize: Optional[bool] = False, + context_bias: Optional[List[str]] = None, + timestamp_granularities: Optional[ + List[models_timestampgranularity.TimestampGranularity] + ] = None, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> eventstreaming.EventStream[models.TranscriptionStreamEvents]: + r"""Create Streaming Transcription (SSE) + + :param model: + :param file: + :param file_url: Url of a file to be transcribed + :param file_id: ID of a file uploaded to /v1/files + :param language: Language of the audio, e.g. 'en'. Providing the language can boost accuracy. + :param temperature: + :param diarize: + :param context_bias: + :param timestamp_granularities: Granularities of timestamps to include in the response. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AudioTranscriptionRequestStream( + model=model, + file=utils.get_pydantic_model(file, Optional[models.File]), + file_url=file_url, + file_id=file_id, + language=language, + temperature=temperature, + diarize=diarize, + context_bias=context_bias, + timestamp_granularities=timestamp_granularities, + ) + + req = self._build_request( + method="POST", + path="/v1/audio/transcriptions#stream", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="text/event-stream", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request, + False, + False, + "multipart", + models.AudioTranscriptionRequestStream, + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="audio_api_v1_transcriptions_post_stream", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["4XX", "5XX"], + stream=True, + retry_config=retry_config, + ) + + if utils.match_response(http_res, "200", "text/event-stream"): + return eventstreaming.EventStream( + http_res, + lambda raw: utils.unmarshal_json(raw, models.TranscriptionStreamEvents), + client_ref=self, + ) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("Unexpected response received", http_res, http_res_text) + + async def stream_async( + self, + *, + model: str, + file: Optional[Union[models_file.File, models_file.FileTypedDict]] = None, + file_url: OptionalNullable[str] = UNSET, + file_id: OptionalNullable[str] = UNSET, + language: OptionalNullable[str] = UNSET, + temperature: OptionalNullable[float] = UNSET, + diarize: Optional[bool] = False, + context_bias: Optional[List[str]] = None, + timestamp_granularities: Optional[ + List[models_timestampgranularity.TimestampGranularity] + ] = None, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> eventstreaming.EventStreamAsync[models.TranscriptionStreamEvents]: + r"""Create Streaming Transcription (SSE) + + :param model: + :param file: + :param file_url: Url of a file to be transcribed + :param file_id: ID of a file uploaded to /v1/files + :param language: Language of the audio, e.g. 'en'. Providing the language can boost accuracy. + :param temperature: + :param diarize: + :param context_bias: + :param timestamp_granularities: Granularities of timestamps to include in the response. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.AudioTranscriptionRequestStream( + model=model, + file=utils.get_pydantic_model(file, Optional[models.File]), + file_url=file_url, + file_id=file_id, + language=language, + temperature=temperature, + diarize=diarize, + context_bias=context_bias, + timestamp_granularities=timestamp_granularities, + ) + + req = self._build_request_async( + method="POST", + path="/v1/audio/transcriptions#stream", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="text/event-stream", + http_headers=http_headers, + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request, + False, + False, + "multipart", + models.AudioTranscriptionRequestStream, + ), + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="audio_api_v1_transcriptions_post_stream", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["4XX", "5XX"], + stream=True, + retry_config=retry_config, + ) + + if utils.match_response(http_res, "200", "text/event-stream"): + return eventstreaming.EventStreamAsync( + http_res, + lambda raw: utils.unmarshal_json(raw, models.TranscriptionStreamEvents), + client_ref=self, + ) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("Unexpected response received", http_res, http_res_text) diff --git a/src/mistralai/client/types/__init__.py b/src/mistralai/client/types/__init__.py new file mode 100644 index 00000000..fc76fe0c --- /dev/null +++ b/src/mistralai/client/types/__init__.py @@ -0,0 +1,21 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from .basemodel import ( + BaseModel, + Nullable, + OptionalNullable, + UnrecognizedInt, + UnrecognizedStr, + UNSET, + UNSET_SENTINEL, +) + +__all__ = [ + "BaseModel", + "Nullable", + "OptionalNullable", + "UnrecognizedInt", + "UnrecognizedStr", + "UNSET", + "UNSET_SENTINEL", +] diff --git a/src/mistralai/client/types/basemodel.py b/src/mistralai/client/types/basemodel.py new file mode 100644 index 00000000..a9a640a1 --- /dev/null +++ b/src/mistralai/client/types/basemodel.py @@ -0,0 +1,77 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from pydantic import ConfigDict, model_serializer +from pydantic import BaseModel as PydanticBaseModel +from pydantic_core import core_schema +from typing import TYPE_CHECKING, Any, Literal, Optional, TypeVar, Union +from typing_extensions import TypeAliasType, TypeAlias + + +class BaseModel(PydanticBaseModel): + model_config = ConfigDict( + populate_by_name=True, arbitrary_types_allowed=True, protected_namespaces=() + ) + + +class Unset(BaseModel): + @model_serializer(mode="plain") + def serialize_model(self): + return UNSET_SENTINEL + + def __bool__(self) -> Literal[False]: + return False + + +UNSET = Unset() +UNSET_SENTINEL = "~?~unset~?~sentinel~?~" + + +T = TypeVar("T") +if TYPE_CHECKING: + Nullable: TypeAlias = Union[T, None] + OptionalNullable: TypeAlias = Union[Optional[Nullable[T]], Unset] +else: + Nullable = TypeAliasType("Nullable", Union[T, None], type_params=(T,)) + OptionalNullable = TypeAliasType( + "OptionalNullable", Union[Optional[Nullable[T]], Unset], type_params=(T,) + ) + + +class UnrecognizedStr(str): + @classmethod + def __get_pydantic_core_schema__(cls, _source_type: Any, _handler: Any) -> core_schema.CoreSchema: + # Make UnrecognizedStr only work in lax mode, not strict mode + # This makes it a "fallback" option when more specific types (like Literals) don't match + def validate_lax(v: Any) -> 'UnrecognizedStr': + if isinstance(v, cls): + return v + return cls(str(v)) + + # Use lax_or_strict_schema where strict always fails + # This forces Pydantic to prefer other union members in strict mode + # and only fall back to UnrecognizedStr in lax mode + return core_schema.lax_or_strict_schema( + lax_schema=core_schema.chain_schema([ + core_schema.str_schema(), + core_schema.no_info_plain_validator_function(validate_lax) + ]), + strict_schema=core_schema.none_schema(), # Always fails in strict mode + ) + + +class UnrecognizedInt(int): + @classmethod + def __get_pydantic_core_schema__(cls, _source_type: Any, _handler: Any) -> core_schema.CoreSchema: + # Make UnrecognizedInt only work in lax mode, not strict mode + # This makes it a "fallback" option when more specific types (like Literals) don't match + def validate_lax(v: Any) -> 'UnrecognizedInt': + if isinstance(v, cls): + return v + return cls(int(v)) + return core_schema.lax_or_strict_schema( + lax_schema=core_schema.chain_schema([ + core_schema.int_schema(), + core_schema.no_info_plain_validator_function(validate_lax) + ]), + strict_schema=core_schema.none_schema(), # Always fails in strict mode + ) diff --git a/src/mistralai/client/utils/__init__.py b/src/mistralai/client/utils/__init__.py new file mode 100644 index 00000000..f9c2edce --- /dev/null +++ b/src/mistralai/client/utils/__init__.py @@ -0,0 +1,197 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from typing import TYPE_CHECKING +from importlib import import_module +import builtins +import sys + +if TYPE_CHECKING: + from .annotations import get_discriminator + from .datetimes import parse_datetime + from .enums import OpenEnumMeta + from .headers import get_headers, get_response_headers + from .metadata import ( + FieldMetadata, + find_metadata, + FormMetadata, + HeaderMetadata, + MultipartFormMetadata, + PathParamMetadata, + QueryParamMetadata, + RequestMetadata, + SecurityMetadata, + ) + from .queryparams import get_query_params + from .retries import BackoffStrategy, Retries, retry, retry_async, RetryConfig + from .requestbodies import serialize_request_body, SerializedRequestBody + from .security import get_security, get_security_from_env + + from .serializers import ( + get_pydantic_model, + marshal_json, + unmarshal, + unmarshal_json, + serialize_decimal, + serialize_float, + serialize_int, + stream_to_text, + stream_to_text_async, + stream_to_bytes, + stream_to_bytes_async, + validate_const, + validate_decimal, + validate_float, + validate_int, + ) + from .url import generate_url, template_url, remove_suffix + from .values import ( + get_global_from_env, + match_content_type, + match_status_codes, + match_response, + cast_partial, + ) + from .logger import Logger, get_body_content, get_default_logger + +__all__ = [ + "BackoffStrategy", + "FieldMetadata", + "find_metadata", + "FormMetadata", + "generate_url", + "get_body_content", + "get_default_logger", + "get_discriminator", + "parse_datetime", + "get_global_from_env", + "get_headers", + "get_pydantic_model", + "get_query_params", + "get_response_headers", + "get_security", + "get_security_from_env", + "HeaderMetadata", + "Logger", + "marshal_json", + "match_content_type", + "match_status_codes", + "match_response", + "MultipartFormMetadata", + "OpenEnumMeta", + "PathParamMetadata", + "QueryParamMetadata", + "remove_suffix", + "Retries", + "retry", + "retry_async", + "RetryConfig", + "RequestMetadata", + "SecurityMetadata", + "serialize_decimal", + "serialize_float", + "serialize_int", + "serialize_request_body", + "SerializedRequestBody", + "stream_to_text", + "stream_to_text_async", + "stream_to_bytes", + "stream_to_bytes_async", + "template_url", + "unmarshal", + "unmarshal_json", + "validate_decimal", + "validate_const", + "validate_float", + "validate_int", + "cast_partial", +] + +_dynamic_imports: dict[str, str] = { + "BackoffStrategy": ".retries", + "FieldMetadata": ".metadata", + "find_metadata": ".metadata", + "FormMetadata": ".metadata", + "generate_url": ".url", + "get_body_content": ".logger", + "get_default_logger": ".logger", + "get_discriminator": ".annotations", + "parse_datetime": ".datetimes", + "get_global_from_env": ".values", + "get_headers": ".headers", + "get_pydantic_model": ".serializers", + "get_query_params": ".queryparams", + "get_response_headers": ".headers", + "get_security": ".security", + "get_security_from_env": ".security", + "HeaderMetadata": ".metadata", + "Logger": ".logger", + "marshal_json": ".serializers", + "match_content_type": ".values", + "match_status_codes": ".values", + "match_response": ".values", + "MultipartFormMetadata": ".metadata", + "OpenEnumMeta": ".enums", + "PathParamMetadata": ".metadata", + "QueryParamMetadata": ".metadata", + "remove_suffix": ".url", + "Retries": ".retries", + "retry": ".retries", + "retry_async": ".retries", + "RetryConfig": ".retries", + "RequestMetadata": ".metadata", + "SecurityMetadata": ".metadata", + "serialize_decimal": ".serializers", + "serialize_float": ".serializers", + "serialize_int": ".serializers", + "serialize_request_body": ".requestbodies", + "SerializedRequestBody": ".requestbodies", + "stream_to_text": ".serializers", + "stream_to_text_async": ".serializers", + "stream_to_bytes": ".serializers", + "stream_to_bytes_async": ".serializers", + "template_url": ".url", + "unmarshal": ".serializers", + "unmarshal_json": ".serializers", + "validate_decimal": ".serializers", + "validate_const": ".serializers", + "validate_float": ".serializers", + "validate_int": ".serializers", + "cast_partial": ".values", +} + + +def dynamic_import(modname, retries=3): + for attempt in range(retries): + try: + return import_module(modname, __package__) + except KeyError: + # Clear any half-initialized module and retry + sys.modules.pop(modname, None) + if attempt == retries - 1: + break + raise KeyError(f"Failed to import module '{modname}' after {retries} attempts") + + +def __getattr__(attr_name: str) -> object: + module_name = _dynamic_imports.get(attr_name) + if module_name is None: + raise AttributeError( + f"no {attr_name} found in _dynamic_imports, module name -> {__name__} " + ) + + try: + module = dynamic_import(module_name) + return getattr(module, attr_name) + except ImportError as e: + raise ImportError( + f"Failed to import {attr_name} from {module_name}: {e}" + ) from e + except AttributeError as e: + raise AttributeError( + f"Failed to get {attr_name} from {module_name}: {e}" + ) from e + + +def __dir__(): + lazy_attrs = builtins.list(_dynamic_imports.keys()) + return builtins.sorted(lazy_attrs) diff --git a/src/mistralai/client/utils/annotations.py b/src/mistralai/client/utils/annotations.py new file mode 100644 index 00000000..12e0aa4f --- /dev/null +++ b/src/mistralai/client/utils/annotations.py @@ -0,0 +1,79 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from enum import Enum +from typing import Any, Optional + + +def get_discriminator(model: Any, fieldname: str, key: str) -> str: + """ + Recursively search for the discriminator attribute in a model. + + Args: + model (Any): The model to search within. + fieldname (str): The name of the field to search for. + key (str): The key to search for in dictionaries. + + Returns: + str: The name of the discriminator attribute. + + Raises: + ValueError: If the discriminator attribute is not found. + """ + upper_fieldname = fieldname.upper() + + def get_field_discriminator(field: Any) -> Optional[str]: + """Search for the discriminator attribute in a given field.""" + + if isinstance(field, dict): + if key in field: + return f"{field[key]}" + + if hasattr(field, fieldname): + attr = getattr(field, fieldname) + if isinstance(attr, Enum): + return f"{attr.value}" + return f"{attr}" + + if hasattr(field, upper_fieldname): + attr = getattr(field, upper_fieldname) + if isinstance(attr, Enum): + return f"{attr.value}" + return f"{attr}" + + return None + + def search_nested_discriminator(obj: Any) -> Optional[str]: + """Recursively search for discriminator in nested structures.""" + # First try direct field lookup + discriminator = get_field_discriminator(obj) + if discriminator is not None: + return discriminator + + # If it's a dict, search in nested values + if isinstance(obj, dict): + for value in obj.values(): + if isinstance(value, list): + # Search in list items + for item in value: + nested_discriminator = search_nested_discriminator(item) + if nested_discriminator is not None: + return nested_discriminator + elif isinstance(value, dict): + # Search in nested dict + nested_discriminator = search_nested_discriminator(value) + if nested_discriminator is not None: + return nested_discriminator + + return None + + if isinstance(model, list): + for field in model: + discriminator = search_nested_discriminator(field) + if discriminator is not None: + return discriminator + + discriminator = search_nested_discriminator(model) + if discriminator is not None: + return discriminator + + raise ValueError(f"Could not find discriminator field {fieldname} in {model}") diff --git a/src/mistralai/client/utils/datetimes.py b/src/mistralai/client/utils/datetimes.py new file mode 100644 index 00000000..a6c52cd6 --- /dev/null +++ b/src/mistralai/client/utils/datetimes.py @@ -0,0 +1,23 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from datetime import datetime +import sys + + +def parse_datetime(datetime_string: str) -> datetime: + """ + Convert a RFC 3339 / ISO 8601 formatted string into a datetime object. + Python versions 3.11 and later support parsing RFC 3339 directly with + datetime.fromisoformat(), but for earlier versions, this function + encapsulates the necessary extra logic. + """ + # Python 3.11 and later can parse RFC 3339 directly + if sys.version_info >= (3, 11): + return datetime.fromisoformat(datetime_string) + + # For Python 3.10 and earlier, a common ValueError is trailing 'Z' suffix, + # so fix that upfront. + if datetime_string.endswith("Z"): + datetime_string = datetime_string[:-1] + "+00:00" + + return datetime.fromisoformat(datetime_string) diff --git a/src/mistralai/client/utils/enums.py b/src/mistralai/client/utils/enums.py new file mode 100644 index 00000000..3324e1bc --- /dev/null +++ b/src/mistralai/client/utils/enums.py @@ -0,0 +1,134 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +import enum +import sys +from typing import Any + +from pydantic_core import core_schema + + +class OpenEnumMeta(enum.EnumMeta): + # The __call__ method `boundary` kwarg was added in 3.11 and must be present + # for pyright. Refer also: https://github.com/pylint-dev/pylint/issues/9622 + # pylint: disable=unexpected-keyword-arg + # The __call__ method `values` varg must be named for pyright. + # pylint: disable=keyword-arg-before-vararg + + if sys.version_info >= (3, 11): + def __call__( + cls, value, names=None, *values, module=None, qualname=None, type=None, start=1, boundary=None + ): + # The `type` kwarg also happens to be a built-in that pylint flags as + # redeclared. Safe to ignore this lint rule with this scope. + # pylint: disable=redefined-builtin + + if names is not None: + return super().__call__( + value, + names=names, + *values, + module=module, + qualname=qualname, + type=type, + start=start, + boundary=boundary, + ) + + try: + return super().__call__( + value, + names=names, # pyright: ignore[reportArgumentType] + *values, + module=module, + qualname=qualname, + type=type, + start=start, + boundary=boundary, + ) + except ValueError: + return value + else: + def __call__( + cls, value, names=None, *, module=None, qualname=None, type=None, start=1 + ): + # The `type` kwarg also happens to be a built-in that pylint flags as + # redeclared. Safe to ignore this lint rule with this scope. + # pylint: disable=redefined-builtin + + if names is not None: + return super().__call__( + value, + names=names, + module=module, + qualname=qualname, + type=type, + start=start, + ) + + try: + return super().__call__( + value, + names=names, # pyright: ignore[reportArgumentType] + module=module, + qualname=qualname, + type=type, + start=start, + ) + except ValueError: + return value + + def __new__(mcs, name, bases, namespace, **kwargs): + cls = super().__new__(mcs, name, bases, namespace, **kwargs) + + # Add __get_pydantic_core_schema__ to make open enums work correctly + # in union discrimination. In strict mode (used by Pydantic for unions), + # only known enum values match. In lax mode, unknown values are accepted. + def __get_pydantic_core_schema__( + cls_inner: Any, _source_type: Any, _handler: Any + ) -> core_schema.CoreSchema: + # Create a validator that only accepts known enum values (for strict mode) + def validate_strict(v: Any) -> Any: + if isinstance(v, cls_inner): + return v + # Use the parent EnumMeta's __call__ which raises ValueError for unknown values + return enum.EnumMeta.__call__(cls_inner, v) + + # Create a lax validator that accepts unknown values + def validate_lax(v: Any) -> Any: + if isinstance(v, cls_inner): + return v + try: + return enum.EnumMeta.__call__(cls_inner, v) + except ValueError: + # Return the raw value for unknown enum values + return v + + # Determine the base type schema (str or int) + is_int_enum = False + for base in cls_inner.__mro__: + if base is int: + is_int_enum = True + break + if base is str: + break + + base_schema = ( + core_schema.int_schema() + if is_int_enum + else core_schema.str_schema() + ) + + # Use lax_or_strict_schema: + # - strict mode: only known enum values match (raises ValueError for unknown) + # - lax mode: accept any value, return enum member or raw value + return core_schema.lax_or_strict_schema( + lax_schema=core_schema.chain_schema( + [base_schema, core_schema.no_info_plain_validator_function(validate_lax)] + ), + strict_schema=core_schema.chain_schema( + [base_schema, core_schema.no_info_plain_validator_function(validate_strict)] + ), + ) + + setattr(cls, "__get_pydantic_core_schema__", classmethod(__get_pydantic_core_schema__)) + return cls diff --git a/src/mistralai/client/utils/eventstreaming.py b/src/mistralai/client/utils/eventstreaming.py new file mode 100644 index 00000000..0969899b --- /dev/null +++ b/src/mistralai/client/utils/eventstreaming.py @@ -0,0 +1,248 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +import re +import json +from typing import ( + Callable, + Generic, + TypeVar, + Optional, + Generator, + AsyncGenerator, + Tuple, +) +import httpx + +T = TypeVar("T") + + +class EventStream(Generic[T]): + # Holds a reference to the SDK client to avoid it being garbage collected + # and cause termination of the underlying httpx client. + client_ref: Optional[object] + response: httpx.Response + generator: Generator[T, None, None] + + def __init__( + self, + response: httpx.Response, + decoder: Callable[[str], T], + sentinel: Optional[str] = None, + client_ref: Optional[object] = None, + ): + self.response = response + self.generator = stream_events(response, decoder, sentinel) + self.client_ref = client_ref + + def __iter__(self): + return self + + def __next__(self): + return next(self.generator) + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + self.response.close() + + +class EventStreamAsync(Generic[T]): + # Holds a reference to the SDK client to avoid it being garbage collected + # and cause termination of the underlying httpx client. + client_ref: Optional[object] + response: httpx.Response + generator: AsyncGenerator[T, None] + + def __init__( + self, + response: httpx.Response, + decoder: Callable[[str], T], + sentinel: Optional[str] = None, + client_ref: Optional[object] = None, + ): + self.response = response + self.generator = stream_events_async(response, decoder, sentinel) + self.client_ref = client_ref + + def __aiter__(self): + return self + + async def __anext__(self): + return await self.generator.__anext__() + + async def __aenter__(self): + return self + + async def __aexit__(self, exc_type, exc_val, exc_tb): + await self.response.aclose() + + +class ServerEvent: + id: Optional[str] = None + event: Optional[str] = None + data: Optional[str] = None + retry: Optional[int] = None + + +MESSAGE_BOUNDARIES = [ + b"\r\n\r\n", + b"\n\n", + b"\r\r", +] + + +async def stream_events_async( + response: httpx.Response, + decoder: Callable[[str], T], + sentinel: Optional[str] = None, +) -> AsyncGenerator[T, None]: + buffer = bytearray() + position = 0 + discard = False + async for chunk in response.aiter_bytes(): + # We've encountered the sentinel value and should no longer process + # incoming data. Instead we throw new data away until the server closes + # the connection. + if discard: + continue + + buffer += chunk + for i in range(position, len(buffer)): + char = buffer[i : i + 1] + seq: Optional[bytes] = None + if char in [b"\r", b"\n"]: + for boundary in MESSAGE_BOUNDARIES: + seq = _peek_sequence(i, buffer, boundary) + if seq is not None: + break + if seq is None: + continue + + block = buffer[position:i] + position = i + len(seq) + event, discard = _parse_event(block, decoder, sentinel) + if event is not None: + yield event + + if position > 0: + buffer = buffer[position:] + position = 0 + + event, discard = _parse_event(buffer, decoder, sentinel) + if event is not None: + yield event + + +def stream_events( + response: httpx.Response, + decoder: Callable[[str], T], + sentinel: Optional[str] = None, +) -> Generator[T, None, None]: + buffer = bytearray() + position = 0 + discard = False + for chunk in response.iter_bytes(): + # We've encountered the sentinel value and should no longer process + # incoming data. Instead we throw new data away until the server closes + # the connection. + if discard: + continue + + buffer += chunk + for i in range(position, len(buffer)): + char = buffer[i : i + 1] + seq: Optional[bytes] = None + if char in [b"\r", b"\n"]: + for boundary in MESSAGE_BOUNDARIES: + seq = _peek_sequence(i, buffer, boundary) + if seq is not None: + break + if seq is None: + continue + + block = buffer[position:i] + position = i + len(seq) + event, discard = _parse_event(block, decoder, sentinel) + if event is not None: + yield event + + if position > 0: + buffer = buffer[position:] + position = 0 + + event, discard = _parse_event(buffer, decoder, sentinel) + if event is not None: + yield event + + +def _parse_event( + raw: bytearray, decoder: Callable[[str], T], sentinel: Optional[str] = None +) -> Tuple[Optional[T], bool]: + block = raw.decode() + lines = re.split(r"\r?\n|\r", block) + publish = False + event = ServerEvent() + data = "" + for line in lines: + if not line: + continue + + delim = line.find(":") + if delim <= 0: + continue + + field = line[0:delim] + value = line[delim + 1 :] if delim < len(line) - 1 else "" + if len(value) and value[0] == " ": + value = value[1:] + + if field == "event": + event.event = value + publish = True + elif field == "data": + data += value + "\n" + publish = True + elif field == "id": + event.id = value + publish = True + elif field == "retry": + event.retry = int(value) if value.isdigit() else None + publish = True + + if sentinel and data == f"{sentinel}\n": + return None, True + + if data: + data = data[:-1] + event.data = data + + data_is_primitive = ( + data.isnumeric() or data == "true" or data == "false" or data == "null" + ) + data_is_json = ( + data.startswith("{") or data.startswith("[") or data.startswith('"') + ) + + if data_is_primitive or data_is_json: + try: + event.data = json.loads(data) + except Exception: + pass + + out = None + if publish: + out = decoder(json.dumps(event.__dict__)) + + return out, False + + +def _peek_sequence(position: int, buffer: bytearray, sequence: bytes): + if len(sequence) > (len(buffer) - position): + return None + + for i, seq in enumerate(sequence): + if buffer[position + i] != seq: + return None + + return sequence diff --git a/src/mistralai/client/utils/forms.py b/src/mistralai/client/utils/forms.py new file mode 100644 index 00000000..f961e76b --- /dev/null +++ b/src/mistralai/client/utils/forms.py @@ -0,0 +1,234 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from typing import ( + Any, + Dict, + get_type_hints, + List, + Tuple, +) +from pydantic import BaseModel +from pydantic.fields import FieldInfo + +from .serializers import marshal_json + +from .metadata import ( + FormMetadata, + MultipartFormMetadata, + find_field_metadata, +) +from .values import _is_set, _val_to_string + + +def _populate_form( + field_name: str, + explode: bool, + obj: Any, + delimiter: str, + form: Dict[str, List[str]], +): + if not _is_set(obj): + return form + + if isinstance(obj, BaseModel): + items = [] + + obj_fields: Dict[str, FieldInfo] = obj.__class__.model_fields + for name in obj_fields: + obj_field = obj_fields[name] + obj_field_name = obj_field.alias if obj_field.alias is not None else name + if obj_field_name == "": + continue + + val = getattr(obj, name) + if not _is_set(val): + continue + + if explode: + form[obj_field_name] = [_val_to_string(val)] + else: + items.append(f"{obj_field_name}{delimiter}{_val_to_string(val)}") + + if len(items) > 0: + form[field_name] = [delimiter.join(items)] + elif isinstance(obj, Dict): + items = [] + for key, value in obj.items(): + if not _is_set(value): + continue + + if explode: + form[key] = [_val_to_string(value)] + else: + items.append(f"{key}{delimiter}{_val_to_string(value)}") + + if len(items) > 0: + form[field_name] = [delimiter.join(items)] + elif isinstance(obj, List): + items = [] + + for value in obj: + if not _is_set(value): + continue + + if explode: + if not field_name in form: + form[field_name] = [] + form[field_name].append(_val_to_string(value)) + else: + items.append(_val_to_string(value)) + + if len(items) > 0: + form[field_name] = [delimiter.join([str(item) for item in items])] + else: + form[field_name] = [_val_to_string(obj)] + + return form + + +def _extract_file_properties(file_obj: Any) -> Tuple[str, Any, Any]: + """Extract file name, content, and content type from a file object.""" + file_fields: Dict[str, FieldInfo] = file_obj.__class__.model_fields + + file_name = "" + content = None + content_type = None + + for file_field_name in file_fields: + file_field = file_fields[file_field_name] + + file_metadata = find_field_metadata(file_field, MultipartFormMetadata) + if file_metadata is None: + continue + + if file_metadata.content: + content = getattr(file_obj, file_field_name, None) + elif file_field_name == "content_type": + content_type = getattr(file_obj, file_field_name, None) + else: + file_name = getattr(file_obj, file_field_name) + + if file_name == "" or content is None: + raise ValueError("invalid multipart/form-data file") + + return file_name, content, content_type + + +def serialize_multipart_form( + media_type: str, request: Any +) -> Tuple[str, Dict[str, Any], List[Tuple[str, Any]]]: + form: Dict[str, Any] = {} + files: List[Tuple[str, Any]] = [] + + if not isinstance(request, BaseModel): + raise TypeError("invalid request body type") + + request_fields: Dict[str, FieldInfo] = request.__class__.model_fields + request_field_types = get_type_hints(request.__class__) + + for name in request_fields: + field = request_fields[name] + + val = getattr(request, name) + if not _is_set(val): + continue + + field_metadata = find_field_metadata(field, MultipartFormMetadata) + if not field_metadata: + continue + + f_name = field.alias if field.alias else name + + if field_metadata.file: + if isinstance(val, List): + # Handle array of files + array_field_name = f_name + "[]" + for file_obj in val: + if not _is_set(file_obj): + continue + + file_name, content, content_type = _extract_file_properties( + file_obj + ) + + if content_type is not None: + files.append( + (array_field_name, (file_name, content, content_type)) + ) + else: + files.append((array_field_name, (file_name, content))) + else: + # Handle single file + file_name, content, content_type = _extract_file_properties(val) + + if content_type is not None: + files.append((f_name, (file_name, content, content_type))) + else: + files.append((f_name, (file_name, content))) + elif field_metadata.json: + files.append( + ( + f_name, + ( + None, + marshal_json(val, request_field_types[name]), + "application/json", + ), + ) + ) + else: + if isinstance(val, List): + values = [] + + for value in val: + if not _is_set(value): + continue + values.append(_val_to_string(value)) + + array_field_name = f_name + "[]" + form[array_field_name] = values + else: + form[f_name] = _val_to_string(val) + return media_type, form, files + + +def serialize_form_data(data: Any) -> Dict[str, Any]: + form: Dict[str, List[str]] = {} + + if isinstance(data, BaseModel): + data_fields: Dict[str, FieldInfo] = data.__class__.model_fields + data_field_types = get_type_hints(data.__class__) + for name in data_fields: + field = data_fields[name] + + val = getattr(data, name) + if not _is_set(val): + continue + + metadata = find_field_metadata(field, FormMetadata) + if metadata is None: + continue + + f_name = field.alias if field.alias is not None else name + + if metadata.json: + form[f_name] = [marshal_json(val, data_field_types[name])] + else: + if metadata.style == "form": + _populate_form( + f_name, + metadata.explode, + val, + ",", + form, + ) + else: + raise ValueError(f"Invalid form style for field {name}") + elif isinstance(data, Dict): + for key, value in data.items(): + if _is_set(value): + form[key] = [_val_to_string(value)] + else: + raise TypeError(f"Invalid request body type {type(data)} for form data") + + return form diff --git a/src/mistralai/client/utils/headers.py b/src/mistralai/client/utils/headers.py new file mode 100644 index 00000000..37864cbb --- /dev/null +++ b/src/mistralai/client/utils/headers.py @@ -0,0 +1,136 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from typing import ( + Any, + Dict, + List, + Optional, +) +from httpx import Headers +from pydantic import BaseModel +from pydantic.fields import FieldInfo + +from .metadata import ( + HeaderMetadata, + find_field_metadata, +) + +from .values import _is_set, _populate_from_globals, _val_to_string + + +def get_headers(headers_params: Any, gbls: Optional[Any] = None) -> Dict[str, str]: + headers: Dict[str, str] = {} + + globals_already_populated = [] + if _is_set(headers_params): + globals_already_populated = _populate_headers(headers_params, gbls, headers, []) + if _is_set(gbls): + _populate_headers(gbls, None, headers, globals_already_populated) + + return headers + + +def _populate_headers( + headers_params: Any, + gbls: Any, + header_values: Dict[str, str], + skip_fields: List[str], +) -> List[str]: + globals_already_populated: List[str] = [] + + if not isinstance(headers_params, BaseModel): + return globals_already_populated + + param_fields: Dict[str, FieldInfo] = headers_params.__class__.model_fields + for name in param_fields: + if name in skip_fields: + continue + + field = param_fields[name] + f_name = field.alias if field.alias is not None else name + + metadata = find_field_metadata(field, HeaderMetadata) + if metadata is None: + continue + + value, global_found = _populate_from_globals( + name, getattr(headers_params, name), HeaderMetadata, gbls + ) + if global_found: + globals_already_populated.append(name) + value = _serialize_header(metadata.explode, value) + + if value != "": + header_values[f_name] = value + + return globals_already_populated + + +def _serialize_header(explode: bool, obj: Any) -> str: + if not _is_set(obj): + return "" + + if isinstance(obj, BaseModel): + items = [] + obj_fields: Dict[str, FieldInfo] = obj.__class__.model_fields + for name in obj_fields: + obj_field = obj_fields[name] + obj_param_metadata = find_field_metadata(obj_field, HeaderMetadata) + + if not obj_param_metadata: + continue + + f_name = obj_field.alias if obj_field.alias is not None else name + + val = getattr(obj, name) + if not _is_set(val): + continue + + if explode: + items.append(f"{f_name}={_val_to_string(val)}") + else: + items.append(f_name) + items.append(_val_to_string(val)) + + if len(items) > 0: + return ",".join(items) + elif isinstance(obj, Dict): + items = [] + + for key, value in obj.items(): + if not _is_set(value): + continue + + if explode: + items.append(f"{key}={_val_to_string(value)}") + else: + items.append(key) + items.append(_val_to_string(value)) + + if len(items) > 0: + return ",".join([str(item) for item in items]) + elif isinstance(obj, List): + items = [] + + for value in obj: + if not _is_set(value): + continue + + items.append(_val_to_string(value)) + + if len(items) > 0: + return ",".join(items) + elif _is_set(obj): + return f"{_val_to_string(obj)}" + + return "" + + +def get_response_headers(headers: Headers) -> Dict[str, List[str]]: + res: Dict[str, List[str]] = {} + for k, v in headers.items(): + if not k in res: + res[k] = [] + + res[k].append(v) + return res diff --git a/src/mistralai/client/utils/logger.py b/src/mistralai/client/utils/logger.py new file mode 100644 index 00000000..2ef27ee5 --- /dev/null +++ b/src/mistralai/client/utils/logger.py @@ -0,0 +1,27 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +import httpx +import logging +import os +from typing import Any, Protocol + + +class Logger(Protocol): + def debug(self, msg: str, *args: Any, **kwargs: Any) -> None: + pass + + +class NoOpLogger: + def debug(self, msg: str, *args: Any, **kwargs: Any) -> None: + pass + + +def get_body_content(req: httpx.Request) -> str: + return "" if not hasattr(req, "_content") else str(req.content) + + +def get_default_logger() -> Logger: + if os.getenv("MISTRAL_DEBUG"): + logging.basicConfig(level=logging.DEBUG) + return logging.getLogger("mistralai.client") + return NoOpLogger() diff --git a/src/mistralai/client/utils/metadata.py b/src/mistralai/client/utils/metadata.py new file mode 100644 index 00000000..173b3e5c --- /dev/null +++ b/src/mistralai/client/utils/metadata.py @@ -0,0 +1,118 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from typing import Optional, Type, TypeVar, Union +from dataclasses import dataclass +from pydantic.fields import FieldInfo + + +T = TypeVar("T") + + +@dataclass +class SecurityMetadata: + option: bool = False + scheme: bool = False + scheme_type: Optional[str] = None + sub_type: Optional[str] = None + field_name: Optional[str] = None + + def get_field_name(self, default: str) -> str: + return self.field_name or default + + +@dataclass +class ParamMetadata: + serialization: Optional[str] = None + style: str = "simple" + explode: bool = False + + +@dataclass +class PathParamMetadata(ParamMetadata): + pass + + +@dataclass +class QueryParamMetadata(ParamMetadata): + style: str = "form" + explode: bool = True + + +@dataclass +class HeaderMetadata(ParamMetadata): + pass + + +@dataclass +class RequestMetadata: + media_type: str = "application/octet-stream" + + +@dataclass +class MultipartFormMetadata: + file: bool = False + content: bool = False + json: bool = False + + +@dataclass +class FormMetadata: + json: bool = False + style: str = "form" + explode: bool = True + + +class FieldMetadata: + security: Optional[SecurityMetadata] = None + path: Optional[PathParamMetadata] = None + query: Optional[QueryParamMetadata] = None + header: Optional[HeaderMetadata] = None + request: Optional[RequestMetadata] = None + form: Optional[FormMetadata] = None + multipart: Optional[MultipartFormMetadata] = None + + def __init__( + self, + security: Optional[SecurityMetadata] = None, + path: Optional[Union[PathParamMetadata, bool]] = None, + query: Optional[Union[QueryParamMetadata, bool]] = None, + header: Optional[Union[HeaderMetadata, bool]] = None, + request: Optional[Union[RequestMetadata, bool]] = None, + form: Optional[Union[FormMetadata, bool]] = None, + multipart: Optional[Union[MultipartFormMetadata, bool]] = None, + ): + self.security = security + self.path = PathParamMetadata() if isinstance(path, bool) else path + self.query = QueryParamMetadata() if isinstance(query, bool) else query + self.header = HeaderMetadata() if isinstance(header, bool) else header + self.request = RequestMetadata() if isinstance(request, bool) else request + self.form = FormMetadata() if isinstance(form, bool) else form + self.multipart = ( + MultipartFormMetadata() if isinstance(multipart, bool) else multipart + ) + + +def find_field_metadata(field_info: FieldInfo, metadata_type: Type[T]) -> Optional[T]: + metadata = find_metadata(field_info, FieldMetadata) + if not metadata: + return None + + fields = metadata.__dict__ + + for field in fields: + if isinstance(fields[field], metadata_type): + return fields[field] + + return None + + +def find_metadata(field_info: FieldInfo, metadata_type: Type[T]) -> Optional[T]: + metadata = field_info.metadata + if not metadata: + return None + + for md in metadata: + if isinstance(md, metadata_type): + return md + + return None diff --git a/src/mistralai/client/utils/queryparams.py b/src/mistralai/client/utils/queryparams.py new file mode 100644 index 00000000..c04e0db8 --- /dev/null +++ b/src/mistralai/client/utils/queryparams.py @@ -0,0 +1,217 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from typing import ( + Any, + Dict, + get_type_hints, + List, + Optional, +) + +from pydantic import BaseModel +from pydantic.fields import FieldInfo + +from .metadata import ( + QueryParamMetadata, + find_field_metadata, +) +from .values import ( + _get_serialized_params, + _is_set, + _populate_from_globals, + _val_to_string, +) +from .forms import _populate_form + + +def get_query_params( + query_params: Any, + gbls: Optional[Any] = None, + allow_empty_value: Optional[List[str]] = None, +) -> Dict[str, List[str]]: + params: Dict[str, List[str]] = {} + + globals_already_populated = _populate_query_params(query_params, gbls, params, [], allow_empty_value) + if _is_set(gbls): + _populate_query_params(gbls, None, params, globals_already_populated, allow_empty_value) + + return params + + +def _populate_query_params( + query_params: Any, + gbls: Any, + query_param_values: Dict[str, List[str]], + skip_fields: List[str], + allow_empty_value: Optional[List[str]] = None, +) -> List[str]: + globals_already_populated: List[str] = [] + + if not isinstance(query_params, BaseModel): + return globals_already_populated + + param_fields: Dict[str, FieldInfo] = query_params.__class__.model_fields + param_field_types = get_type_hints(query_params.__class__) + for name in param_fields: + if name in skip_fields: + continue + + field = param_fields[name] + + metadata = find_field_metadata(field, QueryParamMetadata) + if not metadata: + continue + + value = getattr(query_params, name) if _is_set(query_params) else None + + value, global_found = _populate_from_globals( + name, value, QueryParamMetadata, gbls + ) + if global_found: + globals_already_populated.append(name) + + f_name = field.alias if field.alias is not None else name + + allow_empty_set = set(allow_empty_value or []) + should_include_empty = f_name in allow_empty_set and ( + value is None or value == [] or value == "" + ) + + if should_include_empty: + query_param_values[f_name] = [""] + continue + + serialization = metadata.serialization + if serialization is not None: + serialized_parms = _get_serialized_params( + metadata, f_name, value, param_field_types[name] + ) + for key, value in serialized_parms.items(): + if key in query_param_values: + query_param_values[key].extend(value) + else: + query_param_values[key] = [value] + else: + style = metadata.style + if style == "deepObject": + _populate_deep_object_query_params(f_name, value, query_param_values) + elif style == "form": + _populate_delimited_query_params( + metadata, f_name, value, ",", query_param_values + ) + elif style == "pipeDelimited": + _populate_delimited_query_params( + metadata, f_name, value, "|", query_param_values + ) + else: + raise NotImplementedError( + f"query param style {style} not yet supported" + ) + + return globals_already_populated + + +def _populate_deep_object_query_params( + field_name: str, + obj: Any, + params: Dict[str, List[str]], +): + if not _is_set(obj): + return + + if isinstance(obj, BaseModel): + _populate_deep_object_query_params_basemodel(field_name, obj, params) + elif isinstance(obj, Dict): + _populate_deep_object_query_params_dict(field_name, obj, params) + + +def _populate_deep_object_query_params_basemodel( + prior_params_key: str, + obj: Any, + params: Dict[str, List[str]], +): + if not _is_set(obj) or not isinstance(obj, BaseModel): + return + + obj_fields: Dict[str, FieldInfo] = obj.__class__.model_fields + for name in obj_fields: + obj_field = obj_fields[name] + + f_name = obj_field.alias if obj_field.alias is not None else name + + params_key = f"{prior_params_key}[{f_name}]" + + obj_param_metadata = find_field_metadata(obj_field, QueryParamMetadata) + if not _is_set(obj_param_metadata): + continue + + obj_val = getattr(obj, name) + if not _is_set(obj_val): + continue + + if isinstance(obj_val, BaseModel): + _populate_deep_object_query_params_basemodel(params_key, obj_val, params) + elif isinstance(obj_val, Dict): + _populate_deep_object_query_params_dict(params_key, obj_val, params) + elif isinstance(obj_val, List): + _populate_deep_object_query_params_list(params_key, obj_val, params) + else: + params[params_key] = [_val_to_string(obj_val)] + + +def _populate_deep_object_query_params_dict( + prior_params_key: str, + value: Dict, + params: Dict[str, List[str]], +): + if not _is_set(value): + return + + for key, val in value.items(): + if not _is_set(val): + continue + + params_key = f"{prior_params_key}[{key}]" + + if isinstance(val, BaseModel): + _populate_deep_object_query_params_basemodel(params_key, val, params) + elif isinstance(val, Dict): + _populate_deep_object_query_params_dict(params_key, val, params) + elif isinstance(val, List): + _populate_deep_object_query_params_list(params_key, val, params) + else: + params[params_key] = [_val_to_string(val)] + + +def _populate_deep_object_query_params_list( + params_key: str, + value: List, + params: Dict[str, List[str]], +): + if not _is_set(value): + return + + for val in value: + if not _is_set(val): + continue + + if params.get(params_key) is None: + params[params_key] = [] + + params[params_key].append(_val_to_string(val)) + + +def _populate_delimited_query_params( + metadata: QueryParamMetadata, + field_name: str, + obj: Any, + delimiter: str, + query_param_values: Dict[str, List[str]], +): + _populate_form( + field_name, + metadata.explode, + obj, + delimiter, + query_param_values, + ) diff --git a/src/mistralai/client/utils/requestbodies.py b/src/mistralai/client/utils/requestbodies.py new file mode 100644 index 00000000..1de32b6d --- /dev/null +++ b/src/mistralai/client/utils/requestbodies.py @@ -0,0 +1,66 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +import io +from dataclasses import dataclass +import re +from typing import ( + Any, + Optional, +) + +from .forms import serialize_form_data, serialize_multipart_form + +from .serializers import marshal_json + +SERIALIZATION_METHOD_TO_CONTENT_TYPE = { + "json": "application/json", + "form": "application/x-www-form-urlencoded", + "multipart": "multipart/form-data", + "raw": "application/octet-stream", + "string": "text/plain", +} + + +@dataclass +class SerializedRequestBody: + media_type: Optional[str] = None + content: Optional[Any] = None + data: Optional[Any] = None + files: Optional[Any] = None + + +def serialize_request_body( + request_body: Any, + nullable: bool, + optional: bool, + serialization_method: str, + request_body_type, +) -> Optional[SerializedRequestBody]: + if request_body is None: + if not nullable and optional: + return None + + media_type = SERIALIZATION_METHOD_TO_CONTENT_TYPE[serialization_method] + + serialized_request_body = SerializedRequestBody(media_type) + + if re.match(r"^(application|text)\/([^+]+\+)*json.*", media_type) is not None: + serialized_request_body.content = marshal_json(request_body, request_body_type) + elif re.match(r"^multipart\/.*", media_type) is not None: + ( + serialized_request_body.media_type, + serialized_request_body.data, + serialized_request_body.files, + ) = serialize_multipart_form(media_type, request_body) + elif re.match(r"^application\/x-www-form-urlencoded.*", media_type) is not None: + serialized_request_body.data = serialize_form_data(request_body) + elif isinstance(request_body, (bytes, bytearray, io.BytesIO, io.BufferedReader)): + serialized_request_body.content = request_body + elif isinstance(request_body, str): + serialized_request_body.content = request_body + else: + raise TypeError( + f"invalid request body type {type(request_body)} for mediaType {media_type}" + ) + + return serialized_request_body diff --git a/src/mistralai/client/utils/retries.py b/src/mistralai/client/utils/retries.py new file mode 100644 index 00000000..88a91b10 --- /dev/null +++ b/src/mistralai/client/utils/retries.py @@ -0,0 +1,281 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +import asyncio +import random +import time +from datetime import datetime +from email.utils import parsedate_to_datetime +from typing import List, Optional + +import httpx + + +class BackoffStrategy: + initial_interval: int + max_interval: int + exponent: float + max_elapsed_time: int + + def __init__( + self, + initial_interval: int, + max_interval: int, + exponent: float, + max_elapsed_time: int, + ): + self.initial_interval = initial_interval + self.max_interval = max_interval + self.exponent = exponent + self.max_elapsed_time = max_elapsed_time + + +class RetryConfig: + strategy: str + backoff: BackoffStrategy + retry_connection_errors: bool + + def __init__( + self, strategy: str, backoff: BackoffStrategy, retry_connection_errors: bool + ): + self.strategy = strategy + self.backoff = backoff + self.retry_connection_errors = retry_connection_errors + + +class Retries: + config: RetryConfig + status_codes: List[str] + + def __init__(self, config: RetryConfig, status_codes: List[str]): + self.config = config + self.status_codes = status_codes + + +class TemporaryError(Exception): + response: httpx.Response + retry_after: Optional[int] + + def __init__(self, response: httpx.Response): + self.response = response + self.retry_after = _parse_retry_after_header(response) + + +class PermanentError(Exception): + inner: Exception + + def __init__(self, inner: Exception): + self.inner = inner + + +def _parse_retry_after_header(response: httpx.Response) -> Optional[int]: + """Parse Retry-After header from response. + + Returns: + Retry interval in milliseconds, or None if header is missing or invalid. + """ + retry_after_header = response.headers.get("retry-after") + if not retry_after_header: + return None + + try: + seconds = float(retry_after_header) + return round(seconds * 1000) + except ValueError: + pass + + try: + retry_date = parsedate_to_datetime(retry_after_header) + delta = (retry_date - datetime.now(retry_date.tzinfo)).total_seconds() + return round(max(0, delta) * 1000) + except (ValueError, TypeError): + pass + + return None + + +def _get_sleep_interval( + exception: Exception, + initial_interval: int, + max_interval: int, + exponent: float, + retries: int, +) -> float: + """Get sleep interval for retry with exponential backoff. + + Args: + exception: The exception that triggered the retry. + initial_interval: Initial retry interval in milliseconds. + max_interval: Maximum retry interval in milliseconds. + exponent: Base for exponential backoff calculation. + retries: Current retry attempt count. + + Returns: + Sleep interval in seconds. + """ + if ( + isinstance(exception, TemporaryError) + and exception.retry_after is not None + and exception.retry_after > 0 + ): + return exception.retry_after / 1000 + + sleep = (initial_interval / 1000) * exponent**retries + random.uniform(0, 1) + return min(sleep, max_interval / 1000) + + +def retry(func, retries: Retries): + if retries.config.strategy == "backoff": + + def do_request() -> httpx.Response: + res: httpx.Response + try: + res = func() + + for code in retries.status_codes: + if "X" in code.upper(): + code_range = int(code[0]) + + status_major = res.status_code / 100 + + if code_range <= status_major < code_range + 1: + raise TemporaryError(res) + else: + parsed_code = int(code) + + if res.status_code == parsed_code: + raise TemporaryError(res) + except httpx.ConnectError as exception: + if retries.config.retry_connection_errors: + raise + + raise PermanentError(exception) from exception + except httpx.TimeoutException as exception: + if retries.config.retry_connection_errors: + raise + + raise PermanentError(exception) from exception + except TemporaryError: + raise + except Exception as exception: + raise PermanentError(exception) from exception + + return res + + return retry_with_backoff( + do_request, + retries.config.backoff.initial_interval, + retries.config.backoff.max_interval, + retries.config.backoff.exponent, + retries.config.backoff.max_elapsed_time, + ) + + return func() + + +async def retry_async(func, retries: Retries): + if retries.config.strategy == "backoff": + + async def do_request() -> httpx.Response: + res: httpx.Response + try: + res = await func() + + for code in retries.status_codes: + if "X" in code.upper(): + code_range = int(code[0]) + + status_major = res.status_code / 100 + + if code_range <= status_major < code_range + 1: + raise TemporaryError(res) + else: + parsed_code = int(code) + + if res.status_code == parsed_code: + raise TemporaryError(res) + except httpx.ConnectError as exception: + if retries.config.retry_connection_errors: + raise + + raise PermanentError(exception) from exception + except httpx.TimeoutException as exception: + if retries.config.retry_connection_errors: + raise + + raise PermanentError(exception) from exception + except TemporaryError: + raise + except Exception as exception: + raise PermanentError(exception) from exception + + return res + + return await retry_with_backoff_async( + do_request, + retries.config.backoff.initial_interval, + retries.config.backoff.max_interval, + retries.config.backoff.exponent, + retries.config.backoff.max_elapsed_time, + ) + + return await func() + + +def retry_with_backoff( + func, + initial_interval=500, + max_interval=60000, + exponent=1.5, + max_elapsed_time=3600000, +): + start = round(time.time() * 1000) + retries = 0 + + while True: + try: + return func() + except PermanentError as exception: + raise exception.inner + except Exception as exception: # pylint: disable=broad-exception-caught + now = round(time.time() * 1000) + if now - start > max_elapsed_time: + if isinstance(exception, TemporaryError): + return exception.response + + raise + + sleep = _get_sleep_interval( + exception, initial_interval, max_interval, exponent, retries + ) + time.sleep(sleep) + retries += 1 + + +async def retry_with_backoff_async( + func, + initial_interval=500, + max_interval=60000, + exponent=1.5, + max_elapsed_time=3600000, +): + start = round(time.time() * 1000) + retries = 0 + + while True: + try: + return await func() + except PermanentError as exception: + raise exception.inner + except Exception as exception: # pylint: disable=broad-exception-caught + now = round(time.time() * 1000) + if now - start > max_elapsed_time: + if isinstance(exception, TemporaryError): + return exception.response + + raise + + sleep = _get_sleep_interval( + exception, initial_interval, max_interval, exponent, retries + ) + await asyncio.sleep(sleep) + retries += 1 diff --git a/src/mistralai/client/utils/security.py b/src/mistralai/client/utils/security.py new file mode 100644 index 00000000..3b8526bf --- /dev/null +++ b/src/mistralai/client/utils/security.py @@ -0,0 +1,192 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +import base64 + +from typing import ( + Any, + Dict, + List, + Optional, + Tuple, +) +from pydantic import BaseModel +from pydantic.fields import FieldInfo + +from .metadata import ( + SecurityMetadata, + find_field_metadata, +) +import os + + +def get_security(security: Any) -> Tuple[Dict[str, str], Dict[str, List[str]]]: + headers: Dict[str, str] = {} + query_params: Dict[str, List[str]] = {} + + if security is None: + return headers, query_params + + if not isinstance(security, BaseModel): + raise TypeError("security must be a pydantic model") + + sec_fields: Dict[str, FieldInfo] = security.__class__.model_fields + for name in sec_fields: + sec_field = sec_fields[name] + + value = getattr(security, name) + if value is None: + continue + + metadata = find_field_metadata(sec_field, SecurityMetadata) + if metadata is None: + continue + if metadata.option: + _parse_security_option(headers, query_params, value) + return headers, query_params + if metadata.scheme: + # Special case for basic auth or custom auth which could be a flattened model + if metadata.sub_type in ["basic", "custom"] and not isinstance( + value, BaseModel + ): + _parse_security_scheme(headers, query_params, metadata, name, security) + else: + _parse_security_scheme(headers, query_params, metadata, name, value) + + return headers, query_params + + +def get_security_from_env(security: Any, security_class: Any) -> Optional[BaseModel]: + if security is not None: + return security + + if not issubclass(security_class, BaseModel): + raise TypeError("security_class must be a pydantic model class") + + security_dict: Any = {} + + if os.getenv("MISTRAL_API_KEY"): + security_dict["api_key"] = os.getenv("MISTRAL_API_KEY") + + return security_class(**security_dict) if security_dict else None + + +def _parse_security_option( + headers: Dict[str, str], query_params: Dict[str, List[str]], option: Any +): + if not isinstance(option, BaseModel): + raise TypeError("security option must be a pydantic model") + + opt_fields: Dict[str, FieldInfo] = option.__class__.model_fields + for name in opt_fields: + opt_field = opt_fields[name] + + metadata = find_field_metadata(opt_field, SecurityMetadata) + if metadata is None or not metadata.scheme: + continue + _parse_security_scheme( + headers, query_params, metadata, name, getattr(option, name) + ) + + +def _parse_security_scheme( + headers: Dict[str, str], + query_params: Dict[str, List[str]], + scheme_metadata: SecurityMetadata, + field_name: str, + scheme: Any, +): + scheme_type = scheme_metadata.scheme_type + sub_type = scheme_metadata.sub_type + + if isinstance(scheme, BaseModel): + if scheme_type == "http": + if sub_type == "basic": + _parse_basic_auth_scheme(headers, scheme) + return + if sub_type == "custom": + return + + scheme_fields: Dict[str, FieldInfo] = scheme.__class__.model_fields + for name in scheme_fields: + scheme_field = scheme_fields[name] + + metadata = find_field_metadata(scheme_field, SecurityMetadata) + if metadata is None or metadata.field_name is None: + continue + + value = getattr(scheme, name) + + _parse_security_scheme_value( + headers, query_params, scheme_metadata, metadata, name, value + ) + else: + _parse_security_scheme_value( + headers, query_params, scheme_metadata, scheme_metadata, field_name, scheme + ) + + +def _parse_security_scheme_value( + headers: Dict[str, str], + query_params: Dict[str, List[str]], + scheme_metadata: SecurityMetadata, + security_metadata: SecurityMetadata, + field_name: str, + value: Any, +): + scheme_type = scheme_metadata.scheme_type + sub_type = scheme_metadata.sub_type + + header_name = security_metadata.get_field_name(field_name) + + if scheme_type == "apiKey": + if sub_type == "header": + headers[header_name] = value + elif sub_type == "query": + query_params[header_name] = [value] + else: + raise ValueError("sub type {sub_type} not supported") + elif scheme_type == "openIdConnect": + headers[header_name] = _apply_bearer(value) + elif scheme_type == "oauth2": + if sub_type != "client_credentials": + headers[header_name] = _apply_bearer(value) + elif scheme_type == "http": + if sub_type == "bearer": + headers[header_name] = _apply_bearer(value) + elif sub_type == "custom": + return + else: + raise ValueError("sub type {sub_type} not supported") + else: + raise ValueError("scheme type {scheme_type} not supported") + + +def _apply_bearer(token: str) -> str: + return token.lower().startswith("bearer ") and token or f"Bearer {token}" + + +def _parse_basic_auth_scheme(headers: Dict[str, str], scheme: Any): + username = "" + password = "" + + if not isinstance(scheme, BaseModel): + raise TypeError("basic auth scheme must be a pydantic model") + + scheme_fields: Dict[str, FieldInfo] = scheme.__class__.model_fields + for name in scheme_fields: + scheme_field = scheme_fields[name] + + metadata = find_field_metadata(scheme_field, SecurityMetadata) + if metadata is None or metadata.field_name is None: + continue + + field_name = metadata.field_name + value = getattr(scheme, name) + + if field_name == "username": + username = value + if field_name == "password": + password = value + + data = f"{username}:{password}".encode() + headers["Authorization"] = f"Basic {base64.b64encode(data).decode()}" diff --git a/src/mistralai/client/utils/serializers.py b/src/mistralai/client/utils/serializers.py new file mode 100644 index 00000000..14321eb4 --- /dev/null +++ b/src/mistralai/client/utils/serializers.py @@ -0,0 +1,229 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from decimal import Decimal +import functools +import json +import typing +from typing import Any, Dict, List, Tuple, Union, get_args +import typing_extensions +from typing_extensions import get_origin + +import httpx +from pydantic import ConfigDict, create_model +from pydantic_core import from_json + +from ..types.basemodel import BaseModel, Nullable, OptionalNullable, Unset + + +def serialize_decimal(as_str: bool): + def serialize(d): + # Optional[T] is a Union[T, None] + if is_union(type(d)) and type(None) in get_args(type(d)) and d is None: + return None + if isinstance(d, Unset): + return d + + if not isinstance(d, Decimal): + raise ValueError("Expected Decimal object") + + return str(d) if as_str else float(d) + + return serialize + + +def validate_decimal(d): + if d is None: + return None + + if isinstance(d, (Decimal, Unset)): + return d + + if not isinstance(d, (str, int, float)): + raise ValueError("Expected string, int or float") + + return Decimal(str(d)) + + +def serialize_float(as_str: bool): + def serialize(f): + # Optional[T] is a Union[T, None] + if is_union(type(f)) and type(None) in get_args(type(f)) and f is None: + return None + if isinstance(f, Unset): + return f + + if not isinstance(f, float): + raise ValueError("Expected float") + + return str(f) if as_str else f + + return serialize + + +def validate_float(f): + if f is None: + return None + + if isinstance(f, (float, Unset)): + return f + + if not isinstance(f, str): + raise ValueError("Expected string") + + return float(f) + + +def serialize_int(as_str: bool): + def serialize(i): + # Optional[T] is a Union[T, None] + if is_union(type(i)) and type(None) in get_args(type(i)) and i is None: + return None + if isinstance(i, Unset): + return i + + if not isinstance(i, int): + raise ValueError("Expected int") + + return str(i) if as_str else i + + return serialize + + +def validate_int(b): + if b is None: + return None + + if isinstance(b, (int, Unset)): + return b + + if not isinstance(b, str): + raise ValueError("Expected string") + + return int(b) + + +def validate_const(v): + def validate(c): + # Optional[T] is a Union[T, None] + if is_union(type(c)) and type(None) in get_args(type(c)) and c is None: + return None + + if v != c: + raise ValueError(f"Expected {v}") + + return c + + return validate + + +def unmarshal_json(raw, typ: Any) -> Any: + return unmarshal(from_json(raw), typ) + + +def unmarshal(val, typ: Any) -> Any: + unmarshaller = create_model( + "Unmarshaller", + body=(typ, ...), + __config__=ConfigDict(populate_by_name=True, arbitrary_types_allowed=True), + ) + + m = unmarshaller(body=val) + + # pyright: ignore[reportAttributeAccessIssue] + return m.body # type: ignore + + +def marshal_json(val, typ): + if is_nullable(typ) and val is None: + return "null" + + marshaller = create_model( + "Marshaller", + body=(typ, ...), + __config__=ConfigDict(populate_by_name=True, arbitrary_types_allowed=True), + ) + + m = marshaller(body=val) + + d = m.model_dump(by_alias=True, mode="json", exclude_none=True) + + if len(d) == 0: + return "" + + return json.dumps(d[next(iter(d))], separators=(",", ":")) + + +def is_nullable(field): + origin = get_origin(field) + if origin is Nullable or origin is OptionalNullable: + return True + + if not origin is Union or type(None) not in get_args(field): + return False + + for arg in get_args(field): + if get_origin(arg) is Nullable or get_origin(arg) is OptionalNullable: + return True + + return False + + +def is_union(obj: object) -> bool: + """ + Returns True if the given object is a typing.Union or typing_extensions.Union. + """ + return any( + obj is typing_obj for typing_obj in _get_typing_objects_by_name_of("Union") + ) + + +def stream_to_text(stream: httpx.Response) -> str: + return "".join(stream.iter_text()) + + +async def stream_to_text_async(stream: httpx.Response) -> str: + return "".join([chunk async for chunk in stream.aiter_text()]) + + +def stream_to_bytes(stream: httpx.Response) -> bytes: + return stream.content + + +async def stream_to_bytes_async(stream: httpx.Response) -> bytes: + return await stream.aread() + + +def get_pydantic_model(data: Any, typ: Any) -> Any: + if not _contains_pydantic_model(data): + return unmarshal(data, typ) + + return data + + +def _contains_pydantic_model(data: Any) -> bool: + if isinstance(data, BaseModel): + return True + if isinstance(data, List): + return any(_contains_pydantic_model(item) for item in data) + if isinstance(data, Dict): + return any(_contains_pydantic_model(value) for value in data.values()) + + return False + + +@functools.cache +def _get_typing_objects_by_name_of(name: str) -> Tuple[Any, ...]: + """ + Get typing objects by name from typing and typing_extensions. + Reference: https://typing-extensions.readthedocs.io/en/latest/#runtime-use-of-types + """ + result = tuple( + getattr(module, name) + for module in (typing, typing_extensions) + if hasattr(module, name) + ) + if not result: + raise ValueError( + f"Neither typing nor typing_extensions has an object called {name!r}" + ) + return result diff --git a/src/mistralai/client/utils/unmarshal_json_response.py b/src/mistralai/client/utils/unmarshal_json_response.py new file mode 100644 index 00000000..6d43d6e4 --- /dev/null +++ b/src/mistralai/client/utils/unmarshal_json_response.py @@ -0,0 +1,38 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from typing import Any, Optional, Type, TypeVar, overload + +import httpx + +from .serializers import unmarshal_json +from mistralai.client import models + +T = TypeVar("T") + + +@overload +def unmarshal_json_response( + typ: Type[T], http_res: httpx.Response, body: Optional[str] = None +) -> T: ... + + +@overload +def unmarshal_json_response( + typ: Any, http_res: httpx.Response, body: Optional[str] = None +) -> Any: ... + + +def unmarshal_json_response( + typ: Any, http_res: httpx.Response, body: Optional[str] = None +) -> Any: + if body is None: + body = http_res.text + try: + return unmarshal_json(body, typ) + except Exception as e: + raise models.ResponseValidationError( + "Response validation failed", + http_res, + e, + body, + ) from e diff --git a/src/mistralai/client/utils/url.py b/src/mistralai/client/utils/url.py new file mode 100644 index 00000000..c78ccbae --- /dev/null +++ b/src/mistralai/client/utils/url.py @@ -0,0 +1,155 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from decimal import Decimal +from typing import ( + Any, + Dict, + get_type_hints, + List, + Optional, + Union, + get_args, + get_origin, +) +from pydantic import BaseModel +from pydantic.fields import FieldInfo + +from .metadata import ( + PathParamMetadata, + find_field_metadata, +) +from .values import ( + _get_serialized_params, + _is_set, + _populate_from_globals, + _val_to_string, +) + + +def generate_url( + server_url: str, + path: str, + path_params: Any, + gbls: Optional[Any] = None, +) -> str: + path_param_values: Dict[str, str] = {} + + globals_already_populated = _populate_path_params( + path_params, gbls, path_param_values, [] + ) + if _is_set(gbls): + _populate_path_params(gbls, None, path_param_values, globals_already_populated) + + for key, value in path_param_values.items(): + path = path.replace("{" + key + "}", value, 1) + + return remove_suffix(server_url, "/") + path + + +def _populate_path_params( + path_params: Any, + gbls: Any, + path_param_values: Dict[str, str], + skip_fields: List[str], +) -> List[str]: + globals_already_populated: List[str] = [] + + if not isinstance(path_params, BaseModel): + return globals_already_populated + + path_param_fields: Dict[str, FieldInfo] = path_params.__class__.model_fields + path_param_field_types = get_type_hints(path_params.__class__) + for name in path_param_fields: + if name in skip_fields: + continue + + field = path_param_fields[name] + + param_metadata = find_field_metadata(field, PathParamMetadata) + if param_metadata is None: + continue + + param = getattr(path_params, name) if _is_set(path_params) else None + param, global_found = _populate_from_globals( + name, param, PathParamMetadata, gbls + ) + if global_found: + globals_already_populated.append(name) + + if not _is_set(param): + continue + + f_name = field.alias if field.alias is not None else name + serialization = param_metadata.serialization + if serialization is not None: + serialized_params = _get_serialized_params( + param_metadata, f_name, param, path_param_field_types[name] + ) + for key, value in serialized_params.items(): + path_param_values[key] = value + else: + pp_vals: List[str] = [] + if param_metadata.style == "simple": + if isinstance(param, List): + for pp_val in param: + if not _is_set(pp_val): + continue + pp_vals.append(_val_to_string(pp_val)) + path_param_values[f_name] = ",".join(pp_vals) + elif isinstance(param, Dict): + for pp_key in param: + if not _is_set(param[pp_key]): + continue + if param_metadata.explode: + pp_vals.append(f"{pp_key}={_val_to_string(param[pp_key])}") + else: + pp_vals.append(f"{pp_key},{_val_to_string(param[pp_key])}") + path_param_values[f_name] = ",".join(pp_vals) + elif not isinstance(param, (str, int, float, complex, bool, Decimal)): + param_fields: Dict[str, FieldInfo] = param.__class__.model_fields + for name in param_fields: + param_field = param_fields[name] + + param_value_metadata = find_field_metadata( + param_field, PathParamMetadata + ) + if param_value_metadata is None: + continue + + param_name = ( + param_field.alias if param_field.alias is not None else name + ) + + param_field_val = getattr(param, name) + if not _is_set(param_field_val): + continue + if param_metadata.explode: + pp_vals.append( + f"{param_name}={_val_to_string(param_field_val)}" + ) + else: + pp_vals.append( + f"{param_name},{_val_to_string(param_field_val)}" + ) + path_param_values[f_name] = ",".join(pp_vals) + elif _is_set(param): + path_param_values[f_name] = _val_to_string(param) + + return globals_already_populated + + +def is_optional(field): + return get_origin(field) is Union and type(None) in get_args(field) + + +def template_url(url_with_params: str, params: Dict[str, str]) -> str: + for key, value in params.items(): + url_with_params = url_with_params.replace("{" + key + "}", value) + + return url_with_params + + +def remove_suffix(input_string, suffix): + if suffix and input_string.endswith(suffix): + return input_string[: -len(suffix)] + return input_string diff --git a/src/mistralai/client/utils/values.py b/src/mistralai/client/utils/values.py new file mode 100644 index 00000000..dae01a44 --- /dev/null +++ b/src/mistralai/client/utils/values.py @@ -0,0 +1,137 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from datetime import datetime +from enum import Enum +from email.message import Message +from functools import partial +import os +from typing import Any, Callable, Dict, List, Optional, Tuple, TypeVar, Union, cast + +from httpx import Response +from pydantic import BaseModel +from pydantic.fields import FieldInfo + +from ..types.basemodel import Unset + +from .serializers import marshal_json + +from .metadata import ParamMetadata, find_field_metadata + + +def match_content_type(content_type: str, pattern: str) -> bool: + if pattern in (content_type, "*", "*/*"): + return True + + msg = Message() + msg["content-type"] = content_type + media_type = msg.get_content_type() + + if media_type == pattern: + return True + + parts = media_type.split("/") + if len(parts) == 2: + if pattern in (f"{parts[0]}/*", f"*/{parts[1]}"): + return True + + return False + + +def match_status_codes(status_codes: List[str], status_code: int) -> bool: + if "default" in status_codes: + return True + + for code in status_codes: + if code == str(status_code): + return True + + if code.endswith("XX") and code.startswith(str(status_code)[:1]): + return True + return False + + +T = TypeVar("T") + +def cast_partial(typ): + return partial(cast, typ) + +def get_global_from_env( + value: Optional[T], env_key: str, type_cast: Callable[[str], T] +) -> Optional[T]: + if value is not None: + return value + env_value = os.getenv(env_key) + if env_value is not None: + try: + return type_cast(env_value) + except ValueError: + pass + return None + + +def match_response( + response: Response, code: Union[str, List[str]], content_type: str +) -> bool: + codes = code if isinstance(code, list) else [code] + return match_status_codes(codes, response.status_code) and match_content_type( + response.headers.get("content-type", "application/octet-stream"), content_type + ) + + +def _populate_from_globals( + param_name: str, value: Any, param_metadata_type: type, gbls: Any +) -> Tuple[Any, bool]: + if gbls is None: + return value, False + + if not isinstance(gbls, BaseModel): + raise TypeError("globals must be a pydantic model") + + global_fields: Dict[str, FieldInfo] = gbls.__class__.model_fields + found = False + for name in global_fields: + field = global_fields[name] + if name is not param_name: + continue + + found = True + + if value is not None: + return value, True + + global_value = getattr(gbls, name) + + param_metadata = find_field_metadata(field, param_metadata_type) + if param_metadata is None: + return value, True + + return global_value, True + + return value, found + + +def _val_to_string(val) -> str: + if isinstance(val, bool): + return str(val).lower() + if isinstance(val, datetime): + return str(val.isoformat().replace("+00:00", "Z")) + if isinstance(val, Enum): + return str(val.value) + + return str(val) + + +def _get_serialized_params( + metadata: ParamMetadata, field_name: str, obj: Any, typ: type +) -> Dict[str, str]: + params: Dict[str, str] = {} + + serialization = metadata.serialization + if serialization == "json": + params[field_name] = marshal_json(obj, typ) + + return params + + +def _is_set(value: Any) -> bool: + return value is not None and not isinstance(value, Unset) diff --git a/uv.lock b/uv.lock index fe22e76a..4b1890b2 100644 --- a/uv.lock +++ b/uv.lock @@ -563,7 +563,7 @@ wheels = [ [[package]] name = "mistralai" -version = "1.12.0" +version = "2.0.0a1" source = { editable = "." } dependencies = [ { name = "eval-type-backport" }, From 233c672feb2c34145db71eac13c6923a5d76dd04 Mon Sep 17 00:00:00 2001 From: Louis Sanna Date: Mon, 9 Feb 2026 16:32:00 +0100 Subject: [PATCH 190/223] fix: migrate custom hooks to client/_hooks/ - Move custom_user_agent.py, deprecation_warning.py, tracing.py - Update tracing.py to use absolute import for mistralai.extra - Update registration.py to register all custom hooks --- src/mistralai/_hooks/registration.py | 22 ------------------- .../{ => client}/_hooks/custom_user_agent.py | 0 .../_hooks/deprecation_warning.py | 0 src/mistralai/client/_hooks/registration.py | 13 +++++++++-- src/mistralai/{ => client}/_hooks/tracing.py | 2 +- 5 files changed, 12 insertions(+), 25 deletions(-) delete mode 100644 src/mistralai/_hooks/registration.py rename src/mistralai/{ => client}/_hooks/custom_user_agent.py (100%) rename src/mistralai/{ => client}/_hooks/deprecation_warning.py (100%) rename src/mistralai/{ => client}/_hooks/tracing.py (98%) diff --git a/src/mistralai/_hooks/registration.py b/src/mistralai/_hooks/registration.py deleted file mode 100644 index 58bebab0..00000000 --- a/src/mistralai/_hooks/registration.py +++ /dev/null @@ -1,22 +0,0 @@ -from .custom_user_agent import CustomUserAgentHook -from .deprecation_warning import DeprecationWarningHook -from .tracing import TracingHook -from .types import Hooks - -# This file is only ever generated once on the first generation and then is free to be modified. -# Any hooks you wish to add should be registered in the init_hooks function. Feel free to define them -# in this file or in separate files in the hooks folder. - - -def init_hooks(hooks: Hooks): - # pylint: disable=unused-argument - """Add hooks by calling hooks.register{sdk_init/before_request/after_success/after_error}Hook - with an instance of a hook that implements that specific Hook interface - Hooks are registered per SDK instance, and are valid for the lifetime of the SDK instance - """ - tracing_hook = TracingHook() - hooks.register_before_request_hook(CustomUserAgentHook()) - hooks.register_after_success_hook(DeprecationWarningHook()) - hooks.register_after_success_hook(tracing_hook) - hooks.register_before_request_hook(tracing_hook) - hooks.register_after_error_hook(tracing_hook) diff --git a/src/mistralai/_hooks/custom_user_agent.py b/src/mistralai/client/_hooks/custom_user_agent.py similarity index 100% rename from src/mistralai/_hooks/custom_user_agent.py rename to src/mistralai/client/_hooks/custom_user_agent.py diff --git a/src/mistralai/_hooks/deprecation_warning.py b/src/mistralai/client/_hooks/deprecation_warning.py similarity index 100% rename from src/mistralai/_hooks/deprecation_warning.py rename to src/mistralai/client/_hooks/deprecation_warning.py diff --git a/src/mistralai/client/_hooks/registration.py b/src/mistralai/client/_hooks/registration.py index cab47787..58bebab0 100644 --- a/src/mistralai/client/_hooks/registration.py +++ b/src/mistralai/client/_hooks/registration.py @@ -1,6 +1,8 @@ +from .custom_user_agent import CustomUserAgentHook +from .deprecation_warning import DeprecationWarningHook +from .tracing import TracingHook from .types import Hooks - # This file is only ever generated once on the first generation and then is free to be modified. # Any hooks you wish to add should be registered in the init_hooks function. Feel free to define them # in this file or in separate files in the hooks folder. @@ -10,4 +12,11 @@ def init_hooks(hooks: Hooks): # pylint: disable=unused-argument """Add hooks by calling hooks.register{sdk_init/before_request/after_success/after_error}Hook with an instance of a hook that implements that specific Hook interface - Hooks are registered per SDK instance, and are valid for the lifetime of the SDK instance""" + Hooks are registered per SDK instance, and are valid for the lifetime of the SDK instance + """ + tracing_hook = TracingHook() + hooks.register_before_request_hook(CustomUserAgentHook()) + hooks.register_after_success_hook(DeprecationWarningHook()) + hooks.register_after_success_hook(tracing_hook) + hooks.register_before_request_hook(tracing_hook) + hooks.register_after_error_hook(tracing_hook) diff --git a/src/mistralai/_hooks/tracing.py b/src/mistralai/client/_hooks/tracing.py similarity index 98% rename from src/mistralai/_hooks/tracing.py rename to src/mistralai/client/_hooks/tracing.py index fc4656fd..b353d9bd 100644 --- a/src/mistralai/_hooks/tracing.py +++ b/src/mistralai/client/_hooks/tracing.py @@ -4,7 +4,7 @@ import httpx from opentelemetry.trace import Span -from ..extra.observability.otel import ( +from mistralai.extra.observability.otel import ( get_or_create_otel_tracer, get_response_and_error, get_traced_request_and_span, From 20305b37e6015172ba1bdbf1a9a37d41454ba614 Mon Sep 17 00:00:00 2001 From: Louis Sanna Date: Mon, 9 Feb 2026 16:34:27 +0100 Subject: [PATCH 191/223] fix: update extra/ imports for new namespace Update all imports in src/mistralai/extra/ from: - mistralai.models -> mistralai.client.models - mistralai.types -> mistralai.client.types - mistralai.utils -> mistralai.client.utils - mistralai.sdkconfiguration -> mistralai.client.sdkconfiguration --- src/mistralai/extra/exceptions.py | 2 +- src/mistralai/extra/mcp/auth.py | 2 +- src/mistralai/extra/mcp/base.py | 2 +- src/mistralai/extra/mcp/sse.py | 2 +- src/mistralai/extra/realtime/__init__.py | 2 +- src/mistralai/extra/realtime/connection.py | 2 +- src/mistralai/extra/realtime/transcription.py | 8 ++++---- src/mistralai/extra/run/context.py | 6 +++--- src/mistralai/extra/run/result.py | 4 ++-- src/mistralai/extra/run/tools.py | 2 +- src/mistralai/extra/struct_chat.py | 2 +- src/mistralai/extra/tests/test_struct_chat.py | 2 +- src/mistralai/extra/tests/test_utils.py | 4 ++-- src/mistralai/extra/utils/response_format.py | 2 +- 14 files changed, 21 insertions(+), 21 deletions(-) diff --git a/src/mistralai/extra/exceptions.py b/src/mistralai/extra/exceptions.py index ee107698..d2cd3e79 100644 --- a/src/mistralai/extra/exceptions.py +++ b/src/mistralai/extra/exceptions.py @@ -1,7 +1,7 @@ from typing import Optional, TYPE_CHECKING if TYPE_CHECKING: - from mistralai.models import RealtimeTranscriptionError + from mistralai.client.models import RealtimeTranscriptionError class MistralClientException(Exception): diff --git a/src/mistralai/extra/mcp/auth.py b/src/mistralai/extra/mcp/auth.py index f2b2db8a..8a61ddab 100644 --- a/src/mistralai/extra/mcp/auth.py +++ b/src/mistralai/extra/mcp/auth.py @@ -4,7 +4,7 @@ from authlib.integrations.httpx_client import AsyncOAuth2Client as AsyncOAuth2ClientBase from authlib.oauth2.rfc8414 import AuthorizationServerMetadata -from mistralai.types import BaseModel +from mistralai.client.types import BaseModel logger = logging.getLogger(__name__) diff --git a/src/mistralai/extra/mcp/base.py b/src/mistralai/extra/mcp/base.py index bbda67d5..1048c54f 100644 --- a/src/mistralai/extra/mcp/base.py +++ b/src/mistralai/extra/mcp/base.py @@ -11,7 +11,7 @@ ) from mistralai.extra.exceptions import MCPException -from mistralai.models import ( +from mistralai.client.models import ( FunctionTool, Function, SystemMessageTypedDict, diff --git a/src/mistralai/extra/mcp/sse.py b/src/mistralai/extra/mcp/sse.py index ba49fd1a..b4929c54 100644 --- a/src/mistralai/extra/mcp/sse.py +++ b/src/mistralai/extra/mcp/sse.py @@ -16,7 +16,7 @@ ) from mistralai.extra.mcp.auth import OAuthParams, AsyncOAuth2Client -from mistralai.types import BaseModel +from mistralai.client.types import BaseModel logger = logging.getLogger(__name__) diff --git a/src/mistralai/extra/realtime/__init__.py b/src/mistralai/extra/realtime/__init__.py index 85bf1d88..7b80e045 100644 --- a/src/mistralai/extra/realtime/__init__.py +++ b/src/mistralai/extra/realtime/__init__.py @@ -1,4 +1,4 @@ -from mistralai.models import ( +from mistralai.client.models import ( AudioEncoding, AudioFormat, RealtimeTranscriptionError, diff --git a/src/mistralai/extra/realtime/connection.py b/src/mistralai/extra/realtime/connection.py index 042854ab..ffbbc735 100644 --- a/src/mistralai/extra/realtime/connection.py +++ b/src/mistralai/extra/realtime/connection.py @@ -16,7 +16,7 @@ "Install with: pip install 'mistralai[realtime]'" ) from exc -from mistralai.models import ( +from mistralai.client.models import ( AudioFormat, RealtimeTranscriptionError, RealtimeTranscriptionSession, diff --git a/src/mistralai/extra/realtime/transcription.py b/src/mistralai/extra/realtime/transcription.py index de117645..655fd9c1 100644 --- a/src/mistralai/extra/realtime/transcription.py +++ b/src/mistralai/extra/realtime/transcription.py @@ -17,15 +17,15 @@ "Install with: pip install 'mistralai[realtime]'" ) from exc -from mistralai import models, utils -from mistralai.models import ( +from mistralai.client import models, utils +from mistralai.client.models import ( AudioFormat, RealtimeTranscriptionError, RealtimeTranscriptionSession, RealtimeTranscriptionSessionCreated, ) -from mistralai.sdkconfiguration import SDKConfiguration -from mistralai.utils import generate_url, get_security, get_security_from_env +from mistralai.client.sdkconfiguration import SDKConfiguration +from mistralai.client.utils import generate_url, get_security, get_security_from_env from ..exceptions import RealtimeTranscriptionException, RealtimeTranscriptionWSError from .connection import ( diff --git a/src/mistralai/extra/run/context.py b/src/mistralai/extra/run/context.py index 0d78352a..8e570e41 100644 --- a/src/mistralai/extra/run/context.py +++ b/src/mistralai/extra/run/context.py @@ -21,7 +21,7 @@ create_function_result, create_tool_call, ) -from mistralai.models import ( +from mistralai.client.models import ( CompletionArgs, CompletionArgsTypedDict, ConversationInputs, @@ -35,10 +35,10 @@ Tools, ToolsTypedDict, ) -from mistralai.types.basemodel import BaseModel, OptionalNullable, UNSET +from mistralai.client.types.basemodel import BaseModel, OptionalNullable, UNSET if typing.TYPE_CHECKING: - from mistralai import Beta, OptionalNullable + from mistralai.client import Beta, OptionalNullable logger = getLogger(__name__) diff --git a/src/mistralai/extra/run/result.py b/src/mistralai/extra/run/result.py index 0af48ee7..6e2bcc8a 100644 --- a/src/mistralai/extra/run/result.py +++ b/src/mistralai/extra/run/result.py @@ -7,7 +7,7 @@ from pydantic import BaseModel, Discriminator, Tag from mistralai.extra.utils.response_format import pydantic_model_from_json -from mistralai.models import ( +from mistralai.client.models import ( FunctionResultEntry, FunctionCallEntry, MessageOutputEntry, @@ -34,7 +34,7 @@ ToolReferenceChunk, FunctionCallEntryArguments, ) -from mistralai.utils import get_discriminator +from mistralai.client.utils import get_discriminator RunOutputEntries = ( MessageOutputEntry diff --git a/src/mistralai/extra/run/tools.py b/src/mistralai/extra/run/tools.py index b117fdea..94ef2852 100644 --- a/src/mistralai/extra/run/tools.py +++ b/src/mistralai/extra/run/tools.py @@ -21,7 +21,7 @@ from mistralai.extra.mcp.base import MCPClientProtocol from mistralai.extra.observability.otel import GenAISpanEnum, MistralAIAttributes, set_available_attributes from mistralai.extra.run.result import RunOutputEntries -from mistralai.models import ( +from mistralai.client.models import ( FunctionResultEntry, FunctionTool, Function, diff --git a/src/mistralai/extra/struct_chat.py b/src/mistralai/extra/struct_chat.py index 773cbb6c..d3fd3f5a 100644 --- a/src/mistralai/extra/struct_chat.py +++ b/src/mistralai/extra/struct_chat.py @@ -1,7 +1,7 @@ import json from typing import Generic -from ..models import AssistantMessage, ChatCompletionChoice, ChatCompletionResponse +from mistralai.client.models import AssistantMessage, ChatCompletionChoice, ChatCompletionResponse from .utils.response_format import CustomPydanticModel, pydantic_model_from_json diff --git a/src/mistralai/extra/tests/test_struct_chat.py b/src/mistralai/extra/tests/test_struct_chat.py index dd529ba5..7b79bf77 100644 --- a/src/mistralai/extra/tests/test_struct_chat.py +++ b/src/mistralai/extra/tests/test_struct_chat.py @@ -5,7 +5,7 @@ ParsedChatCompletionChoice, ParsedAssistantMessage, ) -from ...models import ( +from mistralai.client.models import ( ChatCompletionResponse, UsageInfo, ChatCompletionChoice, diff --git a/src/mistralai/extra/tests/test_utils.py b/src/mistralai/extra/tests/test_utils.py index 41fa53e3..35523fbd 100644 --- a/src/mistralai/extra/tests/test_utils.py +++ b/src/mistralai/extra/tests/test_utils.py @@ -5,8 +5,8 @@ ) from pydantic import BaseModel, ValidationError -from ...models import ResponseFormat, JSONSchema -from ...types.basemodel import Unset +from mistralai.client.models import ResponseFormat, JSONSchema +from mistralai.client.types.basemodel import Unset import unittest diff --git a/src/mistralai/extra/utils/response_format.py b/src/mistralai/extra/utils/response_format.py index 10bff89f..2378b562 100644 --- a/src/mistralai/extra/utils/response_format.py +++ b/src/mistralai/extra/utils/response_format.py @@ -1,7 +1,7 @@ from typing import Any, TypeVar from pydantic import BaseModel -from ...models import JSONSchema, ResponseFormat +from mistralai.client.models import JSONSchema, ResponseFormat from ._pydantic_helper import rec_strict_json_schema CustomPydanticModel = TypeVar("CustomPydanticModel", bound=BaseModel) From cae72da0ae1b1c9ce64ae79e3399624df13f602a Mon Sep 17 00:00:00 2001 From: Louis Sanna Date: Mon, 9 Feb 2026 16:35:29 +0100 Subject: [PATCH 192/223] docs: update example imports for new namespace Update all examples to use new import paths: - from mistralai import -> from mistralai.client import - from mistralai.models -> from mistralai.client.models - from mistralai.types -> from mistralai.client.types --- examples/mistral/agents/async_agents_no_streaming.py | 4 ++-- examples/mistral/agents/async_conversation_agent.py | 2 +- examples/mistral/agents/async_conversation_run.py | 4 ++-- examples/mistral/agents/async_conversation_run_mcp.py | 4 ++-- .../agents/async_conversation_run_mcp_remote.py | 2 +- .../agents/async_conversation_run_mcp_remote_auth.py | 2 +- .../mistral/agents/async_conversation_run_stream.py | 4 ++-- .../mistral/agents/async_multi_turn_conversation.py | 2 +- .../audio/async_realtime_transcription_microphone.py | 4 ++-- .../audio/async_realtime_transcription_stream.py | 4 ++-- examples/mistral/audio/chat_base64.py | 4 ++-- examples/mistral/audio/chat_no_streaming.py | 4 ++-- examples/mistral/audio/chat_streaming.py | 4 ++-- examples/mistral/audio/transcription_async.py | 2 +- examples/mistral/audio/transcription_diarize_async.py | 2 +- examples/mistral/audio/transcription_segments.py | 2 +- .../mistral/audio/transcription_segments_stream.py | 2 +- examples/mistral/audio/transcription_stream_async.py | 2 +- examples/mistral/audio/transcription_url.py | 2 +- examples/mistral/chat/async_chat_no_streaming.py | 4 ++-- .../mistral/chat/async_chat_with_image_no_streaming.py | 4 ++-- examples/mistral/chat/async_chat_with_streaming.py | 4 ++-- examples/mistral/chat/async_structured_outputs.py | 2 +- examples/mistral/chat/chat_no_streaming.py | 4 ++-- examples/mistral/chat/chat_prediction.py | 4 ++-- examples/mistral/chat/chat_with_streaming.py | 4 ++-- examples/mistral/chat/chatbot_with_streaming.py | 4 ++-- examples/mistral/chat/completion_with_streaming.py | 2 +- examples/mistral/chat/function_calling.py | 10 +++++----- examples/mistral/chat/json_format.py | 4 ++-- examples/mistral/chat/structured_outputs.py | 2 +- .../chat/structured_outputs_with_json_schema.py | 2 +- .../mistral/chat/structured_outputs_with_pydantic.py | 2 +- examples/mistral/classifier/async_classifier.py | 2 +- examples/mistral/embeddings/async_embeddings.py | 2 +- examples/mistral/embeddings/embeddings.py | 2 +- examples/mistral/fim/async_code_completion.py | 2 +- examples/mistral/fim/code_completion.py | 2 +- .../jobs/async_batch_job_chat_completion_inline.py | 2 +- examples/mistral/jobs/async_files.py | 4 ++-- examples/mistral/jobs/async_jobs.py | 4 ++-- examples/mistral/jobs/async_jobs_chat.py | 4 ++-- .../mistral/jobs/async_jobs_ocr_batch_annotation.py | 4 ++-- examples/mistral/jobs/dry_run_job.py | 4 ++-- examples/mistral/jobs/files.py | 4 ++-- examples/mistral/jobs/jobs.py | 4 ++-- examples/mistral/libraries/async_libraries.py | 4 ++-- examples/mistral/libraries/libraries.py | 4 ++-- examples/mistral/models/async_list_models.py | 2 +- examples/mistral/models/list_models.py | 2 +- examples/mistral/ocr/ocr_process_from_file.py | 2 +- examples/mistral/ocr/ocr_process_from_url.py | 2 +- 52 files changed, 82 insertions(+), 82 deletions(-) diff --git a/examples/mistral/agents/async_agents_no_streaming.py b/examples/mistral/agents/async_agents_no_streaming.py index 45f300ac..6041cad3 100755 --- a/examples/mistral/agents/async_agents_no_streaming.py +++ b/examples/mistral/agents/async_agents_no_streaming.py @@ -3,8 +3,8 @@ import asyncio import os -from mistralai import Mistral -from mistralai.models import UserMessage +from mistralai.client import Mistral +from mistralai.client.models import UserMessage async def main(): diff --git a/examples/mistral/agents/async_conversation_agent.py b/examples/mistral/agents/async_conversation_agent.py index 54f002ac..981f13c7 100644 --- a/examples/mistral/agents/async_conversation_agent.py +++ b/examples/mistral/agents/async_conversation_agent.py @@ -2,7 +2,7 @@ import asyncio import os -from mistralai import Mistral +from mistralai.client import Mistral MODEL = "mistral-medium-latest" diff --git a/examples/mistral/agents/async_conversation_run.py b/examples/mistral/agents/async_conversation_run.py index 27f9c870..10c81d77 100644 --- a/examples/mistral/agents/async_conversation_run.py +++ b/examples/mistral/agents/async_conversation_run.py @@ -2,9 +2,9 @@ import asyncio import os -from mistralai import Mistral +from mistralai.client import Mistral from mistralai.extra.run.context import RunContext -from mistralai.types import BaseModel +from mistralai.client.types import BaseModel MODEL = "mistral-medium-2505" diff --git a/examples/mistral/agents/async_conversation_run_mcp.py b/examples/mistral/agents/async_conversation_run_mcp.py index 0e373715..52550004 100644 --- a/examples/mistral/agents/async_conversation_run_mcp.py +++ b/examples/mistral/agents/async_conversation_run_mcp.py @@ -3,7 +3,7 @@ import os import random -from mistralai import Mistral +from mistralai.client import Mistral from mistralai.extra.run.context import RunContext from mcp import StdioServerParameters from mistralai.extra.mcp.stdio import ( @@ -11,7 +11,7 @@ ) from pathlib import Path -from mistralai.types import BaseModel +from mistralai.client.types import BaseModel cwd = Path(__file__).parent MODEL = "mistral-medium-latest" diff --git a/examples/mistral/agents/async_conversation_run_mcp_remote.py b/examples/mistral/agents/async_conversation_run_mcp_remote.py index 7b2f46a6..d6fac492 100644 --- a/examples/mistral/agents/async_conversation_run_mcp_remote.py +++ b/examples/mistral/agents/async_conversation_run_mcp_remote.py @@ -2,7 +2,7 @@ import asyncio import os -from mistralai import Mistral +from mistralai.client import Mistral from mistralai.extra.run.context import RunContext from mistralai.extra.mcp.sse import ( diff --git a/examples/mistral/agents/async_conversation_run_mcp_remote_auth.py b/examples/mistral/agents/async_conversation_run_mcp_remote_auth.py index f69d8096..c255895e 100644 --- a/examples/mistral/agents/async_conversation_run_mcp_remote_auth.py +++ b/examples/mistral/agents/async_conversation_run_mcp_remote_auth.py @@ -5,7 +5,7 @@ import threading import webbrowser -from mistralai import Mistral +from mistralai.client import Mistral from mistralai.extra.run.context import RunContext from mistralai.extra.mcp.sse import ( diff --git a/examples/mistral/agents/async_conversation_run_stream.py b/examples/mistral/agents/async_conversation_run_stream.py index 1e6ad87b..431b9cc9 100644 --- a/examples/mistral/agents/async_conversation_run_stream.py +++ b/examples/mistral/agents/async_conversation_run_stream.py @@ -3,14 +3,14 @@ import os import random -from mistralai import Mistral +from mistralai.client import Mistral from mistralai.extra.run.context import RunContext from mcp import StdioServerParameters from mistralai.extra.mcp.stdio import MCPClientSTDIO from pathlib import Path from mistralai.extra.run.result import RunResult -from mistralai.types import BaseModel +from mistralai.client.types import BaseModel cwd = Path(__file__).parent MODEL = "mistral-medium-latest" diff --git a/examples/mistral/agents/async_multi_turn_conversation.py b/examples/mistral/agents/async_multi_turn_conversation.py index d24443c0..26c2378f 100644 --- a/examples/mistral/agents/async_multi_turn_conversation.py +++ b/examples/mistral/agents/async_multi_turn_conversation.py @@ -1,5 +1,5 @@ import os -from mistralai import Mistral +from mistralai.client import Mistral from mistralai.extra.run.context import RunContext import logging diff --git a/examples/mistral/audio/async_realtime_transcription_microphone.py b/examples/mistral/audio/async_realtime_transcription_microphone.py index 748dbcaf..191a21e4 100644 --- a/examples/mistral/audio/async_realtime_transcription_microphone.py +++ b/examples/mistral/audio/async_realtime_transcription_microphone.py @@ -23,9 +23,9 @@ from rich.panel import Panel from rich.text import Text -from mistralai import Mistral +from mistralai.client import Mistral from mistralai.extra.realtime import UnknownRealtimeEvent -from mistralai.models import ( +from mistralai.client.models import ( AudioFormat, RealtimeTranscriptionError, RealtimeTranscriptionSessionCreated, diff --git a/examples/mistral/audio/async_realtime_transcription_stream.py b/examples/mistral/audio/async_realtime_transcription_stream.py index 6dbcd103..0a0ac609 100644 --- a/examples/mistral/audio/async_realtime_transcription_stream.py +++ b/examples/mistral/audio/async_realtime_transcription_stream.py @@ -9,9 +9,9 @@ from pathlib import Path from typing import AsyncIterator -from mistralai import Mistral +from mistralai.client import Mistral from mistralai.extra.realtime.connection import UnknownRealtimeEvent -from mistralai.models import ( +from mistralai.client.models import ( AudioFormat, RealtimeTranscriptionError, TranscriptionStreamDone, diff --git a/examples/mistral/audio/chat_base64.py b/examples/mistral/audio/chat_base64.py index 8468fbfb..d6afb2ab 100755 --- a/examples/mistral/audio/chat_base64.py +++ b/examples/mistral/audio/chat_base64.py @@ -2,8 +2,8 @@ import base64 import os -from mistralai import Mistral -from mistralai.models import UserMessage +from mistralai.client import Mistral +from mistralai.client.models import UserMessage def main(): diff --git a/examples/mistral/audio/chat_no_streaming.py b/examples/mistral/audio/chat_no_streaming.py index f10240bd..87237ec0 100755 --- a/examples/mistral/audio/chat_no_streaming.py +++ b/examples/mistral/audio/chat_no_streaming.py @@ -2,8 +2,8 @@ import os -from mistralai import Mistral -from mistralai.models import UserMessage +from mistralai.client import Mistral +from mistralai.client.models import UserMessage def main(): diff --git a/examples/mistral/audio/chat_streaming.py b/examples/mistral/audio/chat_streaming.py index f9c913a0..a9ab2323 100755 --- a/examples/mistral/audio/chat_streaming.py +++ b/examples/mistral/audio/chat_streaming.py @@ -2,8 +2,8 @@ import os -from mistralai import Mistral, File -from mistralai.models import UserMessage +from mistralai.client import Mistral, File +from mistralai.client.models import UserMessage def main(): diff --git a/examples/mistral/audio/transcription_async.py b/examples/mistral/audio/transcription_async.py index 9092fc03..c8fd9ae6 100644 --- a/examples/mistral/audio/transcription_async.py +++ b/examples/mistral/audio/transcription_async.py @@ -2,7 +2,7 @@ import os import asyncio -from mistralai import Mistral, File +from mistralai.client import Mistral, File async def main(): diff --git a/examples/mistral/audio/transcription_diarize_async.py b/examples/mistral/audio/transcription_diarize_async.py index 26754837..cbdf3512 100644 --- a/examples/mistral/audio/transcription_diarize_async.py +++ b/examples/mistral/audio/transcription_diarize_async.py @@ -3,7 +3,7 @@ import os import asyncio import pathlib -from mistralai import Mistral, File +from mistralai.client import Mistral, File fixture_dir = pathlib.Path(__file__).parents[2] / "fixtures" diff --git a/examples/mistral/audio/transcription_segments.py b/examples/mistral/audio/transcription_segments.py index 626b83e2..3d691711 100644 --- a/examples/mistral/audio/transcription_segments.py +++ b/examples/mistral/audio/transcription_segments.py @@ -2,7 +2,7 @@ import os -from mistralai import Mistral +from mistralai.client import Mistral def main(): diff --git a/examples/mistral/audio/transcription_segments_stream.py b/examples/mistral/audio/transcription_segments_stream.py index bedfbd40..32edf951 100644 --- a/examples/mistral/audio/transcription_segments_stream.py +++ b/examples/mistral/audio/transcription_segments_stream.py @@ -2,7 +2,7 @@ import os -from mistralai import Mistral +from mistralai.client import Mistral def main(): diff --git a/examples/mistral/audio/transcription_stream_async.py b/examples/mistral/audio/transcription_stream_async.py index b7f553b3..6e64dcf7 100644 --- a/examples/mistral/audio/transcription_stream_async.py +++ b/examples/mistral/audio/transcription_stream_async.py @@ -2,7 +2,7 @@ import asyncio import os -from mistralai import Mistral, File +from mistralai.client import Mistral, File async def main(): diff --git a/examples/mistral/audio/transcription_url.py b/examples/mistral/audio/transcription_url.py index b194b50c..907f830d 100644 --- a/examples/mistral/audio/transcription_url.py +++ b/examples/mistral/audio/transcription_url.py @@ -2,7 +2,7 @@ import os -from mistralai import Mistral +from mistralai.client import Mistral def main(): diff --git a/examples/mistral/chat/async_chat_no_streaming.py b/examples/mistral/chat/async_chat_no_streaming.py index 9448f09d..ad45d0fd 100755 --- a/examples/mistral/chat/async_chat_no_streaming.py +++ b/examples/mistral/chat/async_chat_no_streaming.py @@ -3,8 +3,8 @@ import asyncio import os -from mistralai import Mistral -from mistralai.models import UserMessage +from mistralai.client import Mistral +from mistralai.client.models import UserMessage async def main(): diff --git a/examples/mistral/chat/async_chat_with_image_no_streaming.py b/examples/mistral/chat/async_chat_with_image_no_streaming.py index efadff89..5d2cbdaa 100755 --- a/examples/mistral/chat/async_chat_with_image_no_streaming.py +++ b/examples/mistral/chat/async_chat_with_image_no_streaming.py @@ -4,8 +4,8 @@ import os -from mistralai import Mistral -from mistralai.models import UserMessage +from mistralai.client import Mistral +from mistralai.client.models import UserMessage async def main(): diff --git a/examples/mistral/chat/async_chat_with_streaming.py b/examples/mistral/chat/async_chat_with_streaming.py index 1ef500ae..1642ea41 100755 --- a/examples/mistral/chat/async_chat_with_streaming.py +++ b/examples/mistral/chat/async_chat_with_streaming.py @@ -3,8 +3,8 @@ import asyncio import os -from mistralai import Mistral -from mistralai.models import UserMessage +from mistralai.client import Mistral +from mistralai.client.models import UserMessage async def main(): diff --git a/examples/mistral/chat/async_structured_outputs.py b/examples/mistral/chat/async_structured_outputs.py index a512d38f..09ed5737 100644 --- a/examples/mistral/chat/async_structured_outputs.py +++ b/examples/mistral/chat/async_structured_outputs.py @@ -4,7 +4,7 @@ import os from pydantic import BaseModel -from mistralai import Mistral +from mistralai.client import Mistral async def main(): diff --git a/examples/mistral/chat/chat_no_streaming.py b/examples/mistral/chat/chat_no_streaming.py index 72506dd9..5f6968ca 100755 --- a/examples/mistral/chat/chat_no_streaming.py +++ b/examples/mistral/chat/chat_no_streaming.py @@ -2,8 +2,8 @@ import os -from mistralai import Mistral -from mistralai.models import UserMessage +from mistralai.client import Mistral +from mistralai.client.models import UserMessage def main(): diff --git a/examples/mistral/chat/chat_prediction.py b/examples/mistral/chat/chat_prediction.py index 1ff87e3f..88c57e77 100644 --- a/examples/mistral/chat/chat_prediction.py +++ b/examples/mistral/chat/chat_prediction.py @@ -2,8 +2,8 @@ import os -from mistralai import Mistral -from mistralai.models import UserMessage +from mistralai.client import Mistral +from mistralai.client.models import UserMessage def main(): diff --git a/examples/mistral/chat/chat_with_streaming.py b/examples/mistral/chat/chat_with_streaming.py index 66b167f1..94a3e29c 100755 --- a/examples/mistral/chat/chat_with_streaming.py +++ b/examples/mistral/chat/chat_with_streaming.py @@ -2,8 +2,8 @@ import os -from mistralai import Mistral -from mistralai.models import UserMessage +from mistralai.client import Mistral +from mistralai.client.models import UserMessage def main(): diff --git a/examples/mistral/chat/chatbot_with_streaming.py b/examples/mistral/chat/chatbot_with_streaming.py index 8d47deb5..bbc3881f 100755 --- a/examples/mistral/chat/chatbot_with_streaming.py +++ b/examples/mistral/chat/chatbot_with_streaming.py @@ -8,8 +8,8 @@ import readline import sys -from mistralai import Mistral -from mistralai.models import AssistantMessage, SystemMessage, UserMessage +from mistralai.client import Mistral +from mistralai.client.models import AssistantMessage, SystemMessage, UserMessage MODEL_LIST = [ "mistral-small-latest", diff --git a/examples/mistral/chat/completion_with_streaming.py b/examples/mistral/chat/completion_with_streaming.py index 5bee2033..399e8638 100644 --- a/examples/mistral/chat/completion_with_streaming.py +++ b/examples/mistral/chat/completion_with_streaming.py @@ -3,7 +3,7 @@ import asyncio import os -from mistralai import Mistral +from mistralai.client import Mistral async def main(): diff --git a/examples/mistral/chat/function_calling.py b/examples/mistral/chat/function_calling.py index aba7d671..f0eb9e70 100644 --- a/examples/mistral/chat/function_calling.py +++ b/examples/mistral/chat/function_calling.py @@ -3,11 +3,11 @@ import os from typing import Dict, List -from mistralai import Mistral -from mistralai.models.assistantmessage import AssistantMessage -from mistralai.models.function import Function -from mistralai.models.toolmessage import ToolMessage -from mistralai.models.usermessage import UserMessage +from mistralai.client import Mistral +from mistralai.client.models.assistantmessage import AssistantMessage +from mistralai.client.models.function import Function +from mistralai.client.models.toolmessage import ToolMessage +from mistralai.client.models.usermessage import UserMessage # Assuming we have the following data data = { diff --git a/examples/mistral/chat/json_format.py b/examples/mistral/chat/json_format.py index 23c38680..8fa1416a 100755 --- a/examples/mistral/chat/json_format.py +++ b/examples/mistral/chat/json_format.py @@ -2,8 +2,8 @@ import os -from mistralai import Mistral -from mistralai.models import UserMessage +from mistralai.client import Mistral +from mistralai.client.models import UserMessage def main(): diff --git a/examples/mistral/chat/structured_outputs.py b/examples/mistral/chat/structured_outputs.py index bc4a5e18..64521f46 100644 --- a/examples/mistral/chat/structured_outputs.py +++ b/examples/mistral/chat/structured_outputs.py @@ -3,7 +3,7 @@ import os from pydantic import BaseModel -from mistralai import Mistral +from mistralai.client import Mistral def main(): diff --git a/examples/mistral/chat/structured_outputs_with_json_schema.py b/examples/mistral/chat/structured_outputs_with_json_schema.py index 69ac9690..2f99f747 100644 --- a/examples/mistral/chat/structured_outputs_with_json_schema.py +++ b/examples/mistral/chat/structured_outputs_with_json_schema.py @@ -2,7 +2,7 @@ import os -from mistralai import Mistral +from mistralai.client import Mistral def main(): diff --git a/examples/mistral/chat/structured_outputs_with_pydantic.py b/examples/mistral/chat/structured_outputs_with_pydantic.py index 299f7509..ded9d52d 100644 --- a/examples/mistral/chat/structured_outputs_with_pydantic.py +++ b/examples/mistral/chat/structured_outputs_with_pydantic.py @@ -3,7 +3,7 @@ import os from pydantic import BaseModel -from mistralai import Mistral +from mistralai.client import Mistral from typing import List diff --git a/examples/mistral/classifier/async_classifier.py b/examples/mistral/classifier/async_classifier.py index 10c8bb76..d5ee6cc1 100644 --- a/examples/mistral/classifier/async_classifier.py +++ b/examples/mistral/classifier/async_classifier.py @@ -2,7 +2,7 @@ from pprint import pprint import asyncio -from mistralai import Mistral, TrainingFile, ClassifierTrainingParametersIn +from mistralai.client import Mistral, TrainingFile, ClassifierTrainingParametersIn import os diff --git a/examples/mistral/embeddings/async_embeddings.py b/examples/mistral/embeddings/async_embeddings.py index 781e87af..413769f3 100755 --- a/examples/mistral/embeddings/async_embeddings.py +++ b/examples/mistral/embeddings/async_embeddings.py @@ -3,7 +3,7 @@ import asyncio import os -from mistralai import Mistral +from mistralai.client import Mistral async def main(): diff --git a/examples/mistral/embeddings/embeddings.py b/examples/mistral/embeddings/embeddings.py index 046c87d4..64301ca0 100755 --- a/examples/mistral/embeddings/embeddings.py +++ b/examples/mistral/embeddings/embeddings.py @@ -2,7 +2,7 @@ import os -from mistralai import Mistral +from mistralai.client import Mistral def main(): diff --git a/examples/mistral/fim/async_code_completion.py b/examples/mistral/fim/async_code_completion.py index a6bc5717..cb6db241 100644 --- a/examples/mistral/fim/async_code_completion.py +++ b/examples/mistral/fim/async_code_completion.py @@ -3,7 +3,7 @@ import asyncio import os -from mistralai import Mistral +from mistralai.client import Mistral async def main(): diff --git a/examples/mistral/fim/code_completion.py b/examples/mistral/fim/code_completion.py index f3d70a68..4f25c59c 100644 --- a/examples/mistral/fim/code_completion.py +++ b/examples/mistral/fim/code_completion.py @@ -3,7 +3,7 @@ import asyncio import os -from mistralai import Mistral +from mistralai.client import Mistral async def main(): diff --git a/examples/mistral/jobs/async_batch_job_chat_completion_inline.py b/examples/mistral/jobs/async_batch_job_chat_completion_inline.py index e728b8fa..8a1d8774 100644 --- a/examples/mistral/jobs/async_batch_job_chat_completion_inline.py +++ b/examples/mistral/jobs/async_batch_job_chat_completion_inline.py @@ -1,4 +1,4 @@ -from mistralai import Mistral, BatchRequest, UserMessage +from mistralai.client import Mistral, BatchRequest, UserMessage import os import asyncio diff --git a/examples/mistral/jobs/async_files.py b/examples/mistral/jobs/async_files.py index 4dc21542..4bec5237 100644 --- a/examples/mistral/jobs/async_files.py +++ b/examples/mistral/jobs/async_files.py @@ -3,8 +3,8 @@ import asyncio import os -from mistralai import Mistral -from mistralai.models import File +from mistralai.client import Mistral +from mistralai.client.models import File async def main(): diff --git a/examples/mistral/jobs/async_jobs.py b/examples/mistral/jobs/async_jobs.py index 44a58af1..12f9035e 100644 --- a/examples/mistral/jobs/async_jobs.py +++ b/examples/mistral/jobs/async_jobs.py @@ -3,8 +3,8 @@ import asyncio import os -from mistralai import Mistral -from mistralai.models import File, CompletionTrainingParametersIn +from mistralai.client import Mistral +from mistralai.client.models import File, CompletionTrainingParametersIn async def main(): diff --git a/examples/mistral/jobs/async_jobs_chat.py b/examples/mistral/jobs/async_jobs_chat.py index 80e598c7..f14fb833 100644 --- a/examples/mistral/jobs/async_jobs_chat.py +++ b/examples/mistral/jobs/async_jobs_chat.py @@ -5,8 +5,8 @@ import random from pathlib import Path -from mistralai import Mistral -from mistralai.models import ( +from mistralai.client import Mistral +from mistralai.client.models import ( File, CompletionTrainingParametersIn, ) diff --git a/examples/mistral/jobs/async_jobs_ocr_batch_annotation.py b/examples/mistral/jobs/async_jobs_ocr_batch_annotation.py index e62bca17..f209507d 100644 --- a/examples/mistral/jobs/async_jobs_ocr_batch_annotation.py +++ b/examples/mistral/jobs/async_jobs_ocr_batch_annotation.py @@ -7,9 +7,9 @@ import httpx from pydantic import BaseModel, Field -from mistralai import Mistral +from mistralai.client import Mistral from mistralai.extra import response_format_from_pydantic_model -from mistralai.models import File +from mistralai.client.models import File SAMPLE_PDF_URL = "https://arxiv.org/pdf/2401.04088" diff --git a/examples/mistral/jobs/dry_run_job.py b/examples/mistral/jobs/dry_run_job.py index 84a2d0ce..d4280836 100644 --- a/examples/mistral/jobs/dry_run_job.py +++ b/examples/mistral/jobs/dry_run_job.py @@ -3,8 +3,8 @@ import asyncio import os -from mistralai import Mistral -from mistralai.models import CompletionTrainingParametersIn +from mistralai.client import Mistral +from mistralai.client.models import CompletionTrainingParametersIn async def main(): diff --git a/examples/mistral/jobs/files.py b/examples/mistral/jobs/files.py index 5dce880b..50f6472c 100644 --- a/examples/mistral/jobs/files.py +++ b/examples/mistral/jobs/files.py @@ -2,8 +2,8 @@ import os -from mistralai import Mistral -from mistralai.models import File +from mistralai.client import Mistral +from mistralai.client.models import File def main(): diff --git a/examples/mistral/jobs/jobs.py b/examples/mistral/jobs/jobs.py index f65fda8e..be3a821f 100644 --- a/examples/mistral/jobs/jobs.py +++ b/examples/mistral/jobs/jobs.py @@ -1,8 +1,8 @@ #!/usr/bin/env python import os -from mistralai import Mistral -from mistralai.models import File, CompletionTrainingParametersIn +from mistralai.client import Mistral +from mistralai.client.models import File, CompletionTrainingParametersIn def main(): diff --git a/examples/mistral/libraries/async_libraries.py b/examples/mistral/libraries/async_libraries.py index b2f9d4c4..fc5e6541 100644 --- a/examples/mistral/libraries/async_libraries.py +++ b/examples/mistral/libraries/async_libraries.py @@ -3,8 +3,8 @@ import os import asyncio -from mistralai import Mistral -from mistralai.models import File +from mistralai.client import Mistral +from mistralai.client.models import File async def main(): diff --git a/examples/mistral/libraries/libraries.py b/examples/mistral/libraries/libraries.py index 88436540..8e4b2998 100644 --- a/examples/mistral/libraries/libraries.py +++ b/examples/mistral/libraries/libraries.py @@ -2,8 +2,8 @@ import os -from mistralai import Mistral -from mistralai.models import File +from mistralai.client import Mistral +from mistralai.client.models import File def main(): diff --git a/examples/mistral/models/async_list_models.py b/examples/mistral/models/async_list_models.py index 4243d862..8b1ac503 100755 --- a/examples/mistral/models/async_list_models.py +++ b/examples/mistral/models/async_list_models.py @@ -3,7 +3,7 @@ import asyncio import os -from mistralai import Mistral +from mistralai.client import Mistral async def main(): diff --git a/examples/mistral/models/list_models.py b/examples/mistral/models/list_models.py index c6c0c855..9b68f806 100755 --- a/examples/mistral/models/list_models.py +++ b/examples/mistral/models/list_models.py @@ -2,7 +2,7 @@ import os -from mistralai import Mistral +from mistralai.client import Mistral def main(): diff --git a/examples/mistral/ocr/ocr_process_from_file.py b/examples/mistral/ocr/ocr_process_from_file.py index 84a7b4d8..9368ceeb 100644 --- a/examples/mistral/ocr/ocr_process_from_file.py +++ b/examples/mistral/ocr/ocr_process_from_file.py @@ -1,4 +1,4 @@ -from mistralai import Mistral +from mistralai.client import Mistral import os import json from pathlib import Path diff --git a/examples/mistral/ocr/ocr_process_from_url.py b/examples/mistral/ocr/ocr_process_from_url.py index 55f31282..4f3b0224 100644 --- a/examples/mistral/ocr/ocr_process_from_url.py +++ b/examples/mistral/ocr/ocr_process_from_url.py @@ -1,7 +1,7 @@ import json import os -from mistralai import Mistral +from mistralai.client import Mistral MISTRAL_7B_PDF_URL = "https://arxiv.org/pdf/2310.06825.pdf" From a7f5e1c0446baca8cc13084e2c364fa5a692b661 Mon Sep 17 00:00:00 2001 From: Louis Sanna Date: Mon, 9 Feb 2026 16:35:56 +0100 Subject: [PATCH 193/223] ci: update lint script paths and add namespace guard - Update hooks path from _hooks/ to client/_hooks/ - Add check that src/mistralai/__init__.py must not exist (PEP 420) --- scripts/lint_custom_code.sh | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/scripts/lint_custom_code.sh b/scripts/lint_custom_code.sh index 7c084463..5bf9d675 100755 --- a/scripts/lint_custom_code.sh +++ b/scripts/lint_custom_code.sh @@ -2,13 +2,21 @@ ERRORS=0 +echo "Checking PEP 420 namespace integrity..." +if [ -f src/mistralai/__init__.py ]; then + echo "ERROR: PEP 420 violation - src/mistralai/__init__.py must not exist" + ERRORS=1 +else + echo "-> PEP 420 namespace OK" +fi + echo "Running mypy..." # TODO: Uncomment once the examples are fixed # uv run mypy examples/ || ERRORS=1 echo "-> running on extra" uv run mypy src/mistralai/extra/ || ERRORS=1 echo "-> running on hooks" -uv run mypy src/mistralai/_hooks/ \ +uv run mypy src/mistralai/client/_hooks/ \ --exclude __init__.py --exclude sdkhooks.py --exclude types.py || ERRORS=1 echo "-> running on scripts" uv run mypy scripts/ || ERRORS=1 @@ -19,7 +27,7 @@ echo "Running pyright..." echo "-> running on extra" uv run pyright src/mistralai/extra/ || ERRORS=1 echo "-> running on hooks" -uv run pyright src/mistralai/_hooks/ || ERRORS=1 +uv run pyright src/mistralai/client/_hooks/ || ERRORS=1 echo "-> running on scripts" uv run pyright scripts/ || ERRORS=1 @@ -29,7 +37,7 @@ uv run ruff check examples/ || ERRORS=1 echo "-> running on extra" uv run ruff check src/mistralai/extra/ || ERRORS=1 echo "-> running on hooks" -uv run ruff check src/mistralai/_hooks/ \ +uv run ruff check src/mistralai/client/_hooks/ \ --exclude __init__.py --exclude sdkhooks.py --exclude types.py || ERRORS=1 echo "-> running on scripts" uv run ruff check scripts/ || ERRORS=1 From 9d290ad34abc9e99f9d6ca5ef3296081809f98b6 Mon Sep 17 00:00:00 2001 From: Louis Sanna Date: Mon, 9 Feb 2026 16:39:36 +0100 Subject: [PATCH 194/223] docs: add MIGRATION.md for v1 to v2 upgrade --- MIGRATION.md | 247 ++++----------------------------------------------- 1 file changed, 18 insertions(+), 229 deletions(-) diff --git a/MIGRATION.md b/MIGRATION.md index 7ccdf9c0..3333f6ba 100644 --- a/MIGRATION.md +++ b/MIGRATION.md @@ -1,242 +1,31 @@ +# Migration Guide: v1 to v2 -# Migration Guide for MistralAI Client from 0.\*.\* to 1.0.0 +## Import Changes -We have made significant changes to the `mistralai` library to improve its usability and consistency. This guide will help you migrate your code from the old client to the new one. +### Main Client -## Major Changes - -1. **Unified Client Class**: - - The `MistralClient` and `MistralAsyncClient` classes have been consolidated into a single `Mistral` class. - - This simplifies the API by providing a single entry point for both synchronous and asynchronous operations. - -2. **Method Names and Structure**: - - The method names and structure have been updated for better clarity and consistency. - - For example: - - `client.chat` is now `client.chat.complete` for non-streaming calls - - `client.chat_stream` is now `client.chat.stream` for streaming calls - - Async `client.chat` is now `client.chat.complete_async` for async non-streaming calls - - Async `client.chat_stream` is now `client.chat.stream_async` for async streaming calls - - -## Method changes - -### Sync - -| Old Methods | New Methods | -| -------------------------- | -------------------------------- | -| `MistralCLient` | `Mistral` | -| `client.chat` | `client.chat.complete` | -| `client.chat_stream` | `client.chat.stream` | -| `client.completions` | `client.fim.complete` | -| `client.completions_stream`| `client.fim.stream` | -| `client.embeddings` | `client.embeddings.create` | -| `client.list_models` | `client.models.list` | -| `client.delete_model` | `client.models.delete` | -| `client.files.create` | `client.files.upload` | -| `client.files.list` | `client.files.list` | -| `client.files.retrieve` | `client.files.retrieve` | -| `client.files.delete` | `client.files.delete` | -| `client.jobs.create` | `client.fine_tuning.jobs.create` | -| `client.jobs.list` | `client.fine_tuning.jobs.list` | -| `client.jobs.retrieve` | `client.fine_tuning.jobs.get` | -| `client.jobs.cancel` | `client.fine_tuning.jobs.cancel` | - -### Async - -| Old Methods | New Methods | -| -------------------------------- | -------------------------------------- | -| `MistralAsyncClient` | `Mistral` | -| `async_client.chat` | `client.chat.complete_async` | -| `async_client.chat_stream` | `client.chat.stream_async` | -| `async_client.completions` | `client.fim.complete_async` | -| `async_client.completions_stream`| `client.fim.stream_async` | -| `async_client.embeddings` | `client.embeddings.create_async` | -| `async_client.list_models` | `client.models.list_async` | -| `async_client.delete_model` | `client.models.delete_async` | -| `async_client.files.create` | `client.files.upload_async` | -| `async_client.files.list` | `client.files.list_async` | -| `async_client.files.retrieve` | `client.files.retrieve_async` | -| `async_client.files.delete` | `client.files.delete_async` | -| `async_client.jobs.create` | `client.fine_tuning.jobs.create_async` | -| `async_client.jobs.list` | `client.fine_tuning.jobs.list_async` | -| `async_client.jobs.retrieve` | `client.fine_tuning.jobs.get_async` | -| `async_client.jobs.cancel` | `client.fine_tuning.jobs.cancel_async` | - -### Message Changes - -The `ChatMessage` class has been replaced with a more flexible system. You can now use the `SystemMessage`, `UserMessage`, `AssistantMessage`, and `ToolMessage` classes to create messages. - -The return object of the stream call methods have been modified to `chunk.data.choices[0].delta.content` from `chunk.choices[0].delta.content`. - -## Example Migrations - -### Example 1: Non-Streaming Chat - -**Old:** ```python -from mistralai.client import MistralClient -from mistralai.models.chat_completion import ChatMessage - -api_key = os.environ["MISTRAL_API_KEY"] -model = "mistral-large-latest" +# v1 +from mistralai import Mistral -client = MistralClient(api_key=api_key) - -messages = [ - ChatMessage(role="user", content="What is the best French cheese?") -] - -# No streaming -chat_response = client.chat( - model=model, - messages=messages, -) - -print(chat_response.choices[0].message.content) +# v2 +from mistralai.client import Mistral ``` -**New:** +### Models and Types ```python -import os - -from mistralai import Mistral, UserMessage - -api_key = os.environ["MISTRAL_API_KEY"] -model = "mistral-large-latest" - -client = Mistral(api_key=api_key) - -messages = [ - { - "role": "user", - "content": "What is the best French cheese?", - }, -] -# Or using the new message classes -# messages = [ -# UserMessage(content="What is the best French cheese?"), -# ] - -chat_response = client.chat.complete( - model=model, - messages=messages, -) +# v1 +from mistralai.models import UserMessage -print(chat_response.choices[0].message.content) +# v2 +from mistralai.client.models import UserMessage ``` -### Example 2: Streaming Chat - -**Old:** - -```python -from mistralai.client import MistralClient -from mistralai.models.chat_completion import ChatMessage - -api_key = os.environ["MISTRAL_API_KEY"] -model = "mistral-large-latest" - -client = MistralClient(api_key=api_key) - -messages = [ - ChatMessage(role="user", content="What is the best French cheese?") -] - -# With streaming -stream_response = client.chat_stream(model=model, messages=messages) +## Quick Reference -for chunk in stream_response: - print(chunk.choices[0].delta.content) -``` -**New:** -```python -import os - -from mistralai import Mistral, UserMessage - -api_key = os.environ["MISTRAL_API_KEY"] -model = "mistral-large-latest" - -client = Mistral(api_key=api_key) - -messages = [ - { - "role": "user", - "content": "What is the best French cheese?", - }, -] -# Or using the new message classes -# messages = [ -# UserMessage(content="What is the best French cheese?"), -# ] - -stream_response = client.chat.stream( - model=model, - messages=messages, -) - -for chunk in stream_response: - print(chunk.data.choices[0].delta.content) - -``` - -### Example 3: Async - -**Old:** -```python -from mistralai.async_client import MistralAsyncClient -from mistralai.models.chat_completion import ChatMessage - -api_key = os.environ["MISTRAL_API_KEY"] -model = "mistral-large-latest" - -client = MistralAsyncClient(api_key=api_key) - -messages = [ - ChatMessage(role="user", content="What is the best French cheese?") -] - -# With async -async_response = client.chat_stream(model=model, messages=messages) - -async for chunk in async_response: - print(chunk.choices[0].delta.content) -``` - -**New:** -```python -import asyncio -import os - -from mistralai import Mistral, UserMessage - - -async def main(): - client = Mistral( - api_key=os.getenv("MISTRAL_API_KEY", ""), - ) - - messages = [ - { - "role": "user", - "content": "What is the best French cheese?", - }, - ] - # Or using the new message classes - # messages = [ - # UserMessage( - # content="What is the best French cheese?", - # ), - # ] - async_response = await client.chat.stream_async( - messages=messages, - model="mistral-large-latest", - ) - - async for chunk in async_response: - print(chunk.data.choices[0].delta.content) - - -asyncio.run(main()) -``` +| v1 | v2 | +|----|-----| +| `from mistralai import` | `from mistralai.client import` | +| `from mistralai.models` | `from mistralai.client.models` | +| `from mistralai.types` | `from mistralai.client.types` | From cf268e36ba234ead1bef936a2c772d1e02d4889e Mon Sep 17 00:00:00 2001 From: Louis Sanna Date: Mon, 9 Feb 2026 16:39:55 +0100 Subject: [PATCH 195/223] docs: update README for v2 migration reference --- README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index e71b1a19..129e8ee0 100644 --- a/README.md +++ b/README.md @@ -1,8 +1,8 @@ # Mistral Python Client ## Migration warning - -This documentation is for Mistral AI SDK v1. You can find more details on how to migrate from v0 to v1 [here](MIGRATION.md) + +This documentation is for Mistral AI SDK v2. You can find more details on how to migrate from v1 to v2 [here](MIGRATION.md) ## API Key Setup From 52b8d3f190effc8601c7ec791d1eef77ec64108b Mon Sep 17 00:00:00 2001 From: Louis Sanna Date: Mon, 9 Feb 2026 17:45:58 +0100 Subject: [PATCH 196/223] fix: restore custom SDK methods lost during regeneration Speakeasy's sdk-class-body regions were not copied when regenerating to the new mistralai.client namespace. Restored: - chat.py: parse, parse_async, parse_stream, parse_stream_async - conversations.py: run_async, run_stream_async - audio.py: realtime property Updated imports to use mistralai.client.* paths. --- src/mistralai/client/audio.py | 18 ++- src/mistralai/client/chat.py | 83 +++++++++- src/mistralai/client/conversations.py | 208 ++++++++++++++++++++++++++ 3 files changed, 307 insertions(+), 2 deletions(-) diff --git a/src/mistralai/client/audio.py b/src/mistralai/client/audio.py index 28ccda1b..e75d6dc8 100644 --- a/src/mistralai/client/audio.py +++ b/src/mistralai/client/audio.py @@ -3,13 +3,29 @@ from .basesdk import BaseSDK from .sdkconfiguration import SDKConfiguration from mistralai.client.transcriptions import Transcriptions -from typing import Optional +from typing import Optional, TYPE_CHECKING + +if TYPE_CHECKING: + from mistralai.extra.realtime import RealtimeTranscription class Audio(BaseSDK): transcriptions: Transcriptions r"""API for audio transcription.""" + # region sdk-class-body + @property + def realtime(self) -> "RealtimeTranscription": + """Returns a client for real-time audio transcription via WebSocket.""" + if not hasattr(self, "_realtime"): + from mistralai.extra.realtime import RealtimeTranscription # pylint: disable=import-outside-toplevel + + self._realtime = RealtimeTranscription(self.sdk_configuration) # pylint: disable=attribute-defined-outside-init + + return self._realtime + + # endregion sdk-class-body + def __init__( self, sdk_config: SDKConfiguration, parent_ref: Optional[object] = None ) -> None: diff --git a/src/mistralai/client/chat.py b/src/mistralai/client/chat.py index 9c50bce8..056c652e 100644 --- a/src/mistralai/client/chat.py +++ b/src/mistralai/client/chat.py @@ -14,12 +14,93 @@ from mistralai.client.types import OptionalNullable, UNSET from mistralai.client.utils import eventstreaming, get_security_from_env from mistralai.client.utils.unmarshal_json_response import unmarshal_json_response -from typing import Any, Dict, List, Mapping, Optional, Union +from typing import Any, Dict, List, Mapping, Optional, Type, Union + +from mistralai.extra.struct_chat import ( + ParsedChatCompletionResponse, + convert_to_parsed_chat_completion_response, +) +from mistralai.extra.utils.response_format import ( + CustomPydanticModel, + response_format_from_pydantic_model, +) class Chat(BaseSDK): r"""Chat Completion API.""" + # region sdk-class-body + # Custom .parse methods for the Structure Outputs Feature. + + def parse( + self, response_format: Type[CustomPydanticModel], **kwargs: Any + ) -> ParsedChatCompletionResponse[CustomPydanticModel]: + """ + Parse the response using the provided response format. + :param Type[CustomPydanticModel] response_format: The Pydantic model to parse the response into + :param Any **kwargs Additional keyword arguments to pass to the .complete method + :return: The parsed response + """ + # Convert the input Pydantic Model to a strict JSON ready to be passed to chat.complete + json_response_format = response_format_from_pydantic_model(response_format) + # Run the inference + response = self.complete(**kwargs, response_format=json_response_format) + # Parse response back to the input pydantic model + parsed_response = convert_to_parsed_chat_completion_response( + response, response_format + ) + return parsed_response + + async def parse_async( + self, response_format: Type[CustomPydanticModel], **kwargs + ) -> ParsedChatCompletionResponse[CustomPydanticModel]: + """ + Asynchronously parse the response using the provided response format. + :param Type[CustomPydanticModel] response_format: The Pydantic model to parse the response into + :param Any **kwargs Additional keyword arguments to pass to the .complete method + :return: The parsed response + """ + json_response_format = response_format_from_pydantic_model(response_format) + response = await self.complete_async( # pylint: disable=E1125 + **kwargs, response_format=json_response_format + ) + parsed_response = convert_to_parsed_chat_completion_response( + response, response_format + ) + return parsed_response + + def parse_stream( + self, response_format: Type[CustomPydanticModel], **kwargs + ) -> eventstreaming.EventStream[models.CompletionEvent]: + """ + Parse the response using the provided response format. + For now the response will be in JSON format not in the input Pydantic model. + :param Type[CustomPydanticModel] response_format: The Pydantic model to parse the response into + :param Any **kwargs Additional keyword arguments to pass to the .stream method + :return: The JSON parsed response + """ + json_response_format = response_format_from_pydantic_model(response_format) + response = self.stream(**kwargs, response_format=json_response_format) + return response + + async def parse_stream_async( + self, response_format: Type[CustomPydanticModel], **kwargs + ) -> eventstreaming.EventStreamAsync[models.CompletionEvent]: + """ + Asynchronously parse the response using the provided response format. + For now the response will be in JSON format not in the input Pydantic model. + :param Type[CustomPydanticModel] response_format: The Pydantic model to parse the response into + :param Any **kwargs Additional keyword arguments to pass to the .stream method + :return: The JSON parsed response + """ + json_response_format = response_format_from_pydantic_model(response_format) + response = await self.stream_async( # pylint: disable=E1125 + **kwargs, response_format=json_response_format + ) + return response + + # endregion sdk-class-body + def complete( self, *, diff --git a/src/mistralai/client/conversations.py b/src/mistralai/client/conversations.py index 9caf4221..12390b14 100644 --- a/src/mistralai/client/conversations.py +++ b/src/mistralai/client/conversations.py @@ -18,10 +18,218 @@ from mistralai.client.utils.unmarshal_json_response import unmarshal_json_response from typing import Any, Dict, List, Mapping, Optional, Union +# region imports +import typing +from typing import AsyncGenerator +import logging +from collections import defaultdict + +from mistralai.client.models import ( + ResponseStartedEvent, + ConversationEventsData, + InputEntries, +) +from mistralai.extra.run.result import ( + RunResult, + RunResultEvents, + FunctionResultEvent, + reconstitue_entries, +) +from mistralai.extra.run.utils import run_requirements +from mistralai.extra.observability.otel import GenAISpanEnum, get_or_create_otel_tracer + +logger = logging.getLogger(__name__) +tracing_enabled, tracer = get_or_create_otel_tracer() + +if typing.TYPE_CHECKING: + from mistralai.extra.run.context import RunContext + +# endregion imports + class Conversations(BaseSDK): r"""(beta) Conversations API""" + # region sdk-class-body + # Custom run code allowing client side execution of code + + @run_requirements + async def run_async( + self, + run_ctx: "RunContext", + inputs: Union[models.ConversationInputs, models.ConversationInputsTypedDict], + instructions: OptionalNullable[str] = UNSET, + tools: OptionalNullable[ + Union[List[models.Tools], List[models.ToolsTypedDict]] + ] = UNSET, + completion_args: OptionalNullable[ + Union[models.CompletionArgs, models.CompletionArgsTypedDict] + ] = UNSET, + name: OptionalNullable[str] = UNSET, + description: OptionalNullable[str] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> RunResult: + """Run a conversation with the given inputs and context. + + The execution of a run will only stop when no required local execution can be done.""" + from mistralai.client.beta import Beta + from mistralai.extra.run.context import _validate_run + from mistralai.extra.run.tools import get_function_calls + + with tracer.start_as_current_span(GenAISpanEnum.VALIDATE_RUN.value): + req, run_result, input_entries = await _validate_run( + beta_client=Beta(self.sdk_configuration), + run_ctx=run_ctx, + inputs=inputs, + instructions=instructions, + tools=tools, + completion_args=completion_args, + ) + + with tracer.start_as_current_span(GenAISpanEnum.CONVERSATION.value): + while True: + if run_ctx.conversation_id is None: + res = await self.start_async( + inputs=input_entries, + http_headers=http_headers, + name=name, + description=description, + retries=retries, + server_url=server_url, + timeout_ms=timeout_ms, + **req, # type: ignore + ) + run_result.conversation_id = res.conversation_id + run_ctx.conversation_id = res.conversation_id + logger.info( + f"Started Run with conversation with id {res.conversation_id}" + ) + else: + res = await self.append_async( + conversation_id=run_ctx.conversation_id, + inputs=input_entries, + retries=retries, + server_url=server_url, + timeout_ms=timeout_ms, + ) + run_ctx.request_count += 1 + run_result.output_entries.extend(res.outputs) + fcalls = get_function_calls(res.outputs) + if not fcalls: + logger.debug("No more function calls to execute") + break + else: + fresults = await run_ctx.execute_function_calls(fcalls) + run_result.output_entries.extend(fresults) + input_entries = typing.cast(list[InputEntries], fresults) + return run_result + + @run_requirements + async def run_stream_async( + self, + run_ctx: "RunContext", + inputs: Union[models.ConversationInputs, models.ConversationInputsTypedDict], + instructions: OptionalNullable[str] = UNSET, + tools: OptionalNullable[ + Union[List[models.Tools], List[models.ToolsTypedDict]] + ] = UNSET, + completion_args: OptionalNullable[ + Union[models.CompletionArgs, models.CompletionArgsTypedDict] + ] = UNSET, + name: OptionalNullable[str] = UNSET, + description: OptionalNullable[str] = UNSET, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ) -> AsyncGenerator[Union[RunResultEvents, RunResult], None]: + """Similar to `run_async` but returns a generator which streams events. + + The last streamed object is the RunResult object which summarises what happened in the run.""" + from mistralai.client.beta import Beta + from mistralai.extra.run.context import _validate_run + from mistralai.extra.run.tools import get_function_calls + + req, run_result, input_entries = await _validate_run( + beta_client=Beta(self.sdk_configuration), + run_ctx=run_ctx, + inputs=inputs, + instructions=instructions, + tools=tools, + completion_args=completion_args, + ) + + async def run_generator() -> ( + AsyncGenerator[Union[RunResultEvents, RunResult], None] + ): + current_entries = input_entries + while True: + received_event_tracker: defaultdict[ + int, list[ConversationEventsData] + ] = defaultdict(list) + if run_ctx.conversation_id is None: + res = await self.start_stream_async( + inputs=current_entries, + http_headers=http_headers, + name=name, + description=description, + retries=retries, + server_url=server_url, + timeout_ms=timeout_ms, + **req, # type: ignore + ) + else: + res = await self.append_stream_async( + conversation_id=run_ctx.conversation_id, + inputs=current_entries, + retries=retries, + server_url=server_url, + timeout_ms=timeout_ms, + ) + async for event in res: + if ( + isinstance(event.data, ResponseStartedEvent) + and run_ctx.conversation_id is None + ): + run_result.conversation_id = event.data.conversation_id + run_ctx.conversation_id = event.data.conversation_id + logger.info( + f"Started Run with conversation with id {run_ctx.conversation_id}" + ) + if ( + output_index := getattr(event.data, "output_index", None) + ) is not None: + received_event_tracker[output_index].append(event.data) + yield typing.cast(RunResultEvents, event) + run_ctx.request_count += 1 + outputs = reconstitue_entries(received_event_tracker) + run_result.output_entries.extend(outputs) + fcalls = get_function_calls(outputs) + if not fcalls: + logger.debug("No more function calls to execute") + break + else: + fresults = await run_ctx.execute_function_calls(fcalls) + run_result.output_entries.extend(fresults) + for fresult in fresults: + yield RunResultEvents( + event="function.result", + data=FunctionResultEvent( + type="function.result", + result=fresult.result, + tool_call_id=fresult.tool_call_id, + ), + ) + current_entries = typing.cast(list[InputEntries], fresults) + yield run_result + + return run_generator() + + # endregion sdk-class-body + def start( self, *, From bc680d7415973962e09062f1414f467789c0afb8 Mon Sep 17 00:00:00 2001 From: Louis Sanna Date: Mon, 9 Feb 2026 18:02:13 +0100 Subject: [PATCH 197/223] chore: update speakeasy lock files --- .speakeasy/gen.lock | 18 +++++++++--------- .speakeasy/workflow.lock | 2 +- 2 files changed, 10 insertions(+), 10 deletions(-) diff --git a/.speakeasy/gen.lock b/.speakeasy/gen.lock index 7aae1acb..345ea2c8 100644 --- a/.speakeasy/gen.lock +++ b/.speakeasy/gen.lock @@ -11,9 +11,9 @@ management: installationURL: https://github.com/mistralai/client-python.git published: true persistentEdits: - generation_id: edcb81a1-4bcb-439e-bfcb-f30eaac48c6a - pristine_commit_hash: b192b65dd75820612c5c672593ed322d420d2c73 - pristine_tree_hash: 869c5c810e502634a018e5792d4c2efe2686dbad + generation_id: b2306c28-6200-44c1-a856-ddd318359c15 + pristine_commit_hash: dc36861e5d8b9f4c91221be8f09dc13254755c9a + pristine_tree_hash: 640358903b623a1b0d7deabbb43f39e82676a1a1 features: python: additionalDependencies: 1.0.0 @@ -1882,8 +1882,8 @@ trackedFiles: pristine_git_object: c04abd21b5b7cb9b8ddfdb52ec67fffa7d21759a src/mistralai/client/audio.py: id: 7a8ed2e90d61 - last_write_checksum: sha1:9ecd271eedf02703b45e6bc4280df10ed2edbbc8 - pristine_git_object: 28ccda1b533b4cef31844bddae2289268b459a24 + last_write_checksum: sha1:941d0466d9ff5d07c30a6e41cf4434857518963a + pristine_git_object: 2834ade22ab137b7620bfd4318fba4bdd9ef087f src/mistralai/client/basesdk.py: id: 7518c67b81ea last_write_checksum: sha1:c10ba4619b8c70ff876304a93b432d4466cb7112 @@ -1898,16 +1898,16 @@ trackedFiles: pristine_git_object: b30003eae52be5e79838fe994cda8474068a43dc src/mistralai/client/chat.py: id: 7eba0f088d47 - last_write_checksum: sha1:46321214352946f2077a0f60c4c903c354a42da1 - pristine_git_object: 9c50bce81c264c70256b2ff8716e88216a78535f + last_write_checksum: sha1:53558e4f3e5ecc8d2cea51d2462aa3432d8c156e + pristine_git_object: 6fa210bb01b193e1bd034431923a3d4dc8c8a16c src/mistralai/client/classifiers.py: id: 26e773725732 last_write_checksum: sha1:b3bed5a404f8837cc12e516f3fb85f47fd37518a pristine_git_object: 537e2438afcb570a3e436ab4dd8b7d604b35b627 src/mistralai/client/conversations.py: id: 40692a878064 - last_write_checksum: sha1:fc75dc4099891c8cbfbcc72284bf8e7dbbb834a5 - pristine_git_object: 9caf42214daf262b15bac5b36467700ee17cd7d1 + last_write_checksum: sha1:fedcc53385d833f18fdd393591cb156bc5e5f3d1 + pristine_git_object: 285beddbd175fee210b697d4714c28196c1fa7a2 src/mistralai/client/documents.py: id: bcc17286c31c last_write_checksum: sha1:82287ef513f2f5ee1acb9ffe8323f2dad0fc86f4 diff --git a/.speakeasy/workflow.lock b/.speakeasy/workflow.lock index 38b7899c..a0e535c2 100644 --- a/.speakeasy/workflow.lock +++ b/.speakeasy/workflow.lock @@ -39,7 +39,7 @@ targets: sourceRevisionDigest: sha256:4e49849eba5334a3fe4a3d081baa9afdecd8f41dfc4c2a5115bc19ead4d92d13 sourceBlobDigest: sha256:3ab3c61ac6a4e9fab37d924d516838ca27dd7e57a1b5e9059d4db2ef29efec56 codeSamplesNamespace: mistral-openapi-code-samples - codeSamplesRevisionDigest: sha256:deaa27e908bb7bee4f2ad753a92beb5749805f3f160eb56c5988b336d31a531c + codeSamplesRevisionDigest: sha256:debd698577e8da014e900a57194128d867ad76fd0d2e2b361e9d0c298700fc67 workflow: workflowVersion: 1.0.0 speakeasyVersion: 1.685.0 From d4b4b2920b7b1a6566e413f44ebbdf3adbfa875d Mon Sep 17 00:00:00 2001 From: Louis Sanna Date: Mon, 9 Feb 2026 18:02:17 +0100 Subject: [PATCH 198/223] fix: add region markers for speakeasy custom code preservation - chat.py: wrap custom imports in # region imports block - audio.py: wrap TYPE_CHECKING import in # region imports block - conversations.py: add pylint disable comments, fix else-after-break These markers ensure speakeasy regeneration preserves custom code. --- src/mistralai/client/audio.py | 30 +++++++++-------- src/mistralai/client/chat.py | 6 +++- src/mistralai/client/conversations.py | 48 +++++++++++++-------------- 3 files changed, 45 insertions(+), 39 deletions(-) diff --git a/src/mistralai/client/audio.py b/src/mistralai/client/audio.py index e75d6dc8..2834ade2 100644 --- a/src/mistralai/client/audio.py +++ b/src/mistralai/client/audio.py @@ -3,16 +3,32 @@ from .basesdk import BaseSDK from .sdkconfiguration import SDKConfiguration from mistralai.client.transcriptions import Transcriptions -from typing import Optional, TYPE_CHECKING +from typing import Optional + +# region imports +from typing import TYPE_CHECKING if TYPE_CHECKING: from mistralai.extra.realtime import RealtimeTranscription +# endregion imports class Audio(BaseSDK): transcriptions: Transcriptions r"""API for audio transcription.""" + def __init__( + self, sdk_config: SDKConfiguration, parent_ref: Optional[object] = None + ) -> None: + BaseSDK.__init__(self, sdk_config, parent_ref=parent_ref) + self.sdk_configuration = sdk_config + self._init_sdks() + + def _init_sdks(self): + self.transcriptions = Transcriptions( + self.sdk_configuration, parent_ref=self.parent_ref + ) + # region sdk-class-body @property def realtime(self) -> "RealtimeTranscription": @@ -25,15 +41,3 @@ def realtime(self) -> "RealtimeTranscription": return self._realtime # endregion sdk-class-body - - def __init__( - self, sdk_config: SDKConfiguration, parent_ref: Optional[object] = None - ) -> None: - BaseSDK.__init__(self, sdk_config, parent_ref=parent_ref) - self.sdk_configuration = sdk_config - self._init_sdks() - - def _init_sdks(self): - self.transcriptions = Transcriptions( - self.sdk_configuration, parent_ref=self.parent_ref - ) diff --git a/src/mistralai/client/chat.py b/src/mistralai/client/chat.py index 056c652e..6fa210bb 100644 --- a/src/mistralai/client/chat.py +++ b/src/mistralai/client/chat.py @@ -14,7 +14,10 @@ from mistralai.client.types import OptionalNullable, UNSET from mistralai.client.utils import eventstreaming, get_security_from_env from mistralai.client.utils.unmarshal_json_response import unmarshal_json_response -from typing import Any, Dict, List, Mapping, Optional, Type, Union +from typing import Any, Dict, List, Mapping, Optional, Union + +# region imports +from typing import Type from mistralai.extra.struct_chat import ( ParsedChatCompletionResponse, @@ -24,6 +27,7 @@ CustomPydanticModel, response_format_from_pydantic_model, ) +# endregion imports class Chat(BaseSDK): diff --git a/src/mistralai/client/conversations.py b/src/mistralai/client/conversations.py index 12390b14..285beddb 100644 --- a/src/mistralai/client/conversations.py +++ b/src/mistralai/client/conversations.py @@ -75,9 +75,9 @@ async def run_async( """Run a conversation with the given inputs and context. The execution of a run will only stop when no required local execution can be done.""" - from mistralai.client.beta import Beta - from mistralai.extra.run.context import _validate_run - from mistralai.extra.run.tools import get_function_calls + from mistralai.client.beta import Beta # pylint: disable=import-outside-toplevel + from mistralai.extra.run.context import _validate_run # pylint: disable=import-outside-toplevel + from mistralai.extra.run.tools import get_function_calls # pylint: disable=import-outside-toplevel with tracer.start_as_current_span(GenAISpanEnum.VALIDATE_RUN.value): req, run_result, input_entries = await _validate_run( @@ -104,7 +104,7 @@ async def run_async( ) run_result.conversation_id = res.conversation_id run_ctx.conversation_id = res.conversation_id - logger.info( + logger.info( # pylint: disable=logging-fstring-interpolation f"Started Run with conversation with id {res.conversation_id}" ) else: @@ -121,10 +121,9 @@ async def run_async( if not fcalls: logger.debug("No more function calls to execute") break - else: - fresults = await run_ctx.execute_function_calls(fcalls) - run_result.output_entries.extend(fresults) - input_entries = typing.cast(list[InputEntries], fresults) + fresults = await run_ctx.execute_function_calls(fcalls) + run_result.output_entries.extend(fresults) + input_entries = typing.cast(list[InputEntries], fresults) return run_result @run_requirements @@ -149,9 +148,9 @@ async def run_stream_async( """Similar to `run_async` but returns a generator which streams events. The last streamed object is the RunResult object which summarises what happened in the run.""" - from mistralai.client.beta import Beta - from mistralai.extra.run.context import _validate_run - from mistralai.extra.run.tools import get_function_calls + from mistralai.client.beta import Beta # pylint: disable=import-outside-toplevel + from mistralai.extra.run.context import _validate_run # pylint: disable=import-outside-toplevel + from mistralai.extra.run.tools import get_function_calls # pylint: disable=import-outside-toplevel req, run_result, input_entries = await _validate_run( beta_client=Beta(self.sdk_configuration), @@ -196,7 +195,7 @@ async def run_generator() -> ( ): run_result.conversation_id = event.data.conversation_id run_ctx.conversation_id = event.data.conversation_id - logger.info( + logger.info( # pylint: disable=logging-fstring-interpolation f"Started Run with conversation with id {run_ctx.conversation_id}" ) if ( @@ -211,19 +210,18 @@ async def run_generator() -> ( if not fcalls: logger.debug("No more function calls to execute") break - else: - fresults = await run_ctx.execute_function_calls(fcalls) - run_result.output_entries.extend(fresults) - for fresult in fresults: - yield RunResultEvents( - event="function.result", - data=FunctionResultEvent( - type="function.result", - result=fresult.result, - tool_call_id=fresult.tool_call_id, - ), - ) - current_entries = typing.cast(list[InputEntries], fresults) + fresults = await run_ctx.execute_function_calls(fcalls) + run_result.output_entries.extend(fresults) + for fresult in fresults: + yield RunResultEvents( + event="function.result", + data=FunctionResultEvent( + type="function.result", + result=fresult.result, + tool_call_id=fresult.tool_call_id, + ), + ) + current_entries = typing.cast(list[InputEntries], fresults) yield run_result return run_generator() From a59414159754b0048d0f0c9193ce88ccf0548adf Mon Sep 17 00:00:00 2001 From: Louis Sanna Date: Mon, 9 Feb 2026 18:51:35 +0100 Subject: [PATCH 199/223] ci: update publish workflow for v1/v2 dual-branch support - Auto-publish from v1 branch on RELEASES.md changes - Require manual confirmation ("publish") for main branch deployments - Prevents accidental v2.0.0 release before it's ready This allows merging the v2 namespace migration to main safely while maintaining v1.x releases from the v1 branch. --- .github/workflows/sdk_publish_mistralai_sdk.yaml | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/.github/workflows/sdk_publish_mistralai_sdk.yaml b/.github/workflows/sdk_publish_mistralai_sdk.yaml index 0a225d70..f12ea5c4 100644 --- a/.github/workflows/sdk_publish_mistralai_sdk.yaml +++ b/.github/workflows/sdk_publish_mistralai_sdk.yaml @@ -6,14 +6,23 @@ permissions: statuses: write "on": workflow_dispatch: + inputs: + confirm_publish: + description: 'Type "publish" to confirm deployment from main branch' + required: false + type: string push: branches: - - main + - v1 paths: - RELEASES.md - "*/RELEASES.md" jobs: publish: + # Auto-publish from v1 branch; require manual confirmation from main + if: | + github.ref == 'refs/heads/v1' || + (github.event_name == 'workflow_dispatch' && github.event.inputs.confirm_publish == 'publish') uses: speakeasy-api/sdk-generation-action/.github/workflows/sdk-publish.yaml@7951d9dce457425b900b2dd317253499d98c2587 # v15 secrets: github_access_token: ${{ secrets.GITHUB_TOKEN }} From 48e7d75227df7d0cd7066b1570a26160ba00d4f8 Mon Sep 17 00:00:00 2001 From: Louis Sanna Date: Tue, 10 Feb 2026 11:00:57 +0100 Subject: [PATCH 200/223] =?UTF-8?q?docs:=20expand=20MIGRATION.md=20with=20?= =?UTF-8?q?v0=E2=86=92v1=20and=20v1=E2=86=92v2=20guides?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - Add v1→v2 section explaining PEP 420 namespace change - Explain motivation (azure/gcp companion packages) - Include automated migration sed commands - Preserve v0→v1 method mapping tables and examples --- MIGRATION.md | 164 ++++++++++++++++++++++++++++++++++++++++++++++----- 1 file changed, 150 insertions(+), 14 deletions(-) diff --git a/MIGRATION.md b/MIGRATION.md index 3333f6ba..4ab7f2ff 100644 --- a/MIGRATION.md +++ b/MIGRATION.md @@ -1,31 +1,167 @@ -# Migration Guide: v1 to v2 +# Migration Guide -## Import Changes +This guide covers migrating between major versions of the Mistral Python SDK. -### Main Client +--- + +## Migrating from v1.x to v2.x + +Version 2.0 updates the import paths from `mistralai` to `mistralai.client`. + +### Import Changes + +All imports move from `mistralai` to `mistralai.client`: ```python # v1 from mistralai import Mistral +from mistralai.models import UserMessage, AssistantMessage +from mistralai.types import BaseModel # v2 from mistralai.client import Mistral +from mistralai.client.models import UserMessage, AssistantMessage +from mistralai.client.types import BaseModel ``` -### Models and Types +### Quick Reference + +| v1 | v2 | +|---|---| +| `from mistralai import Mistral` | `from mistralai.client import Mistral` | +| `from mistralai.models import ...` | `from mistralai.client.models import ...` | +| `from mistralai.types import ...` | `from mistralai.client.types import ...` | +| `from mistralai.utils import ...` | `from mistralai.client.utils import ...` | + +### What Stays the Same + +- All method names and signatures remain identical +- The `Mistral` client API is unchanged +- All models (`UserMessage`, `AssistantMessage`, etc.) work the same way + +--- + +## Migrating from v0.x to v1.x + +Version 1.0 introduced significant changes to improve usability and consistency. + +### Major Changes + +1. **Unified Client Class**: `MistralClient` and `MistralAsyncClient` consolidated into a single `Mistral` class +2. **Method Structure**: Methods reorganized into resource-based groups (e.g., `client.chat.complete()`) +3. **Message Classes**: `ChatMessage` replaced with typed classes (`UserMessage`, `AssistantMessage`, etc.) +4. **Streaming Response**: Stream chunks now accessed via `chunk.data.choices[0].delta.content` +### Method Mapping + +#### Sync Methods + +| v0.x | v1.x | +|---|---| +| `MistralClient` | `Mistral` | +| `client.chat` | `client.chat.complete` | +| `client.chat_stream` | `client.chat.stream` | +| `client.completions` | `client.fim.complete` | +| `client.completions_stream` | `client.fim.stream` | +| `client.embeddings` | `client.embeddings.create` | +| `client.list_models` | `client.models.list` | +| `client.delete_model` | `client.models.delete` | +| `client.files.create` | `client.files.upload` | +| `client.jobs.create` | `client.fine_tuning.jobs.create` | +| `client.jobs.list` | `client.fine_tuning.jobs.list` | +| `client.jobs.retrieve` | `client.fine_tuning.jobs.get` | +| `client.jobs.cancel` | `client.fine_tuning.jobs.cancel` | + +#### Async Methods + +| v0.x | v1.x | +|---|---| +| `MistralAsyncClient` | `Mistral` | +| `async_client.chat` | `client.chat.complete_async` | +| `async_client.chat_stream` | `client.chat.stream_async` | +| `async_client.completions` | `client.fim.complete_async` | +| `async_client.completions_stream` | `client.fim.stream_async` | +| `async_client.embeddings` | `client.embeddings.create_async` | +| `async_client.list_models` | `client.models.list_async` | +| `async_client.files.create` | `client.files.upload_async` | +| `async_client.jobs.create` | `client.fine_tuning.jobs.create_async` | +| `async_client.jobs.list` | `client.fine_tuning.jobs.list_async` | +| `async_client.jobs.retrieve` | `client.fine_tuning.jobs.get_async` | +| `async_client.jobs.cancel` | `client.fine_tuning.jobs.cancel_async` | + +### Example: Non-Streaming Chat + +**v0.x:** ```python -# v1 -from mistralai.models import UserMessage +from mistralai.client import MistralClient +from mistralai.models.chat_completion import ChatMessage -# v2 -from mistralai.client.models import UserMessage +client = MistralClient(api_key=api_key) + +messages = [ChatMessage(role="user", content="What is the best French cheese?")] +response = client.chat(model="mistral-large-latest", messages=messages) + +print(response.choices[0].message.content) ``` -## Quick Reference +**v1.x:** +```python +from mistralai import Mistral, UserMessage -| v1 | v2 | -|----|-----| -| `from mistralai import` | `from mistralai.client import` | -| `from mistralai.models` | `from mistralai.client.models` | -| `from mistralai.types` | `from mistralai.client.types` | +client = Mistral(api_key=api_key) + +messages = [UserMessage(content="What is the best French cheese?")] +response = client.chat.complete(model="mistral-large-latest", messages=messages) + +print(response.choices[0].message.content) +``` + +### Example: Streaming Chat + +**v0.x:** +```python +from mistralai.client import MistralClient +from mistralai.models.chat_completion import ChatMessage + +client = MistralClient(api_key=api_key) +messages = [ChatMessage(role="user", content="What is the best French cheese?")] + +for chunk in client.chat_stream(model="mistral-large-latest", messages=messages): + print(chunk.choices[0].delta.content) +``` + +**v1.x:** +```python +from mistralai import Mistral, UserMessage + +client = Mistral(api_key=api_key) +messages = [UserMessage(content="What is the best French cheese?")] + +for chunk in client.chat.stream(model="mistral-large-latest", messages=messages): + print(chunk.data.choices[0].delta.content) # Note: chunk.data +``` + +### Example: Async Streaming + +**v0.x:** +```python +from mistralai.async_client import MistralAsyncClient +from mistralai.models.chat_completion import ChatMessage + +client = MistralAsyncClient(api_key=api_key) +messages = [ChatMessage(role="user", content="What is the best French cheese?")] + +async for chunk in client.chat_stream(model="mistral-large-latest", messages=messages): + print(chunk.choices[0].delta.content) +``` + +**v1.x:** +```python +from mistralai import Mistral, UserMessage + +client = Mistral(api_key=api_key) +messages = [UserMessage(content="What is the best French cheese?")] + +async for chunk in await client.chat.stream_async(model="mistral-large-latest", messages=messages): + print(chunk.data.choices[0].delta.content) +``` From 955b83acbc4ea9b464322990a1e82500a7afc40b Mon Sep 17 00:00:00 2001 From: Louis Sanna Date: Tue, 10 Feb 2026 14:20:46 +0100 Subject: [PATCH 201/223] ci: add warning to publish workflow about v2 alpha status --- .github/workflows/sdk_publish_mistralai_sdk.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/sdk_publish_mistralai_sdk.yaml b/.github/workflows/sdk_publish_mistralai_sdk.yaml index f12ea5c4..44635571 100644 --- a/.github/workflows/sdk_publish_mistralai_sdk.yaml +++ b/.github/workflows/sdk_publish_mistralai_sdk.yaml @@ -8,7 +8,7 @@ permissions: workflow_dispatch: inputs: confirm_publish: - description: 'Type "publish" to confirm deployment from main branch' + description: 'WARNING: This will publish v2 SDK (mistralai.client namespace) which is still WIP/alpha. To publish v1 (mistralai namespace), use the v1 branch instead. Type "publish" to confirm.' required: false type: string push: From d4325cbdbf80d9d28d43ded3206b085f6c19eb4f Mon Sep 17 00:00:00 2001 From: Louis Sanna Date: Tue, 10 Feb 2026 14:39:52 +0100 Subject: [PATCH 202/223] fix: handle null outputs and add timeout in batch job example - Add null check for job.outputs to prevent crash when API returns no outputs - Add CANCELLED to terminal states - Add 5 minute timeout to prevent infinite polling --- .../async_batch_job_chat_completion_inline.py | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) diff --git a/examples/mistral/jobs/async_batch_job_chat_completion_inline.py b/examples/mistral/jobs/async_batch_job_chat_completion_inline.py index 8a1d8774..8b4cedd3 100644 --- a/examples/mistral/jobs/async_batch_job_chat_completion_inline.py +++ b/examples/mistral/jobs/async_batch_job_chat_completion_inline.py @@ -26,14 +26,23 @@ async def main(): print(f"Created job with ID: {job.id}") - while job.status not in ["SUCCESS", "FAILED"]: + max_wait = 60 # 1 minute timeout for CI + elapsed = 0 + while job.status not in ["SUCCESS", "FAILED", "CANCELLED"]: await asyncio.sleep(1) + elapsed += 1 + if elapsed >= max_wait: + print(f"Timeout after {max_wait}s, job still {job.status}") + return job = await client.batch.jobs.get_async(job_id=job.id) print(f"Job status: {job.status}") print(f"Job is done, status {job.status}") - for res in job.outputs: - print(res["response"]["body"]) + if job.outputs: + for res in job.outputs: + print(res["response"]["body"]) + else: + print(f"No outputs (succeeded: {job.succeeded_requests}, failed: {job.failed_requests})") if __name__ == "__main__": asyncio.run(main()) From 1f932e842bf3f93a7b80bc11bb91ea878b2aeaee Mon Sep 17 00:00:00 2001 From: Louis Sanna Date: Tue, 10 Feb 2026 15:06:10 +0100 Subject: [PATCH 203/223] fix: simplify async_conversation_run example to reduce CI flakiness The original example used code_interpreter with differential equations, which caused timeouts and flaky CI failures. Simplified to "2+2" math. Original complex example preserved as async_conversation_run_code_interpreter.py and added to CI skip list (too slow/flaky for CI). --- .../mistral/agents/async_conversation_run.py | 39 ++----------- ...async_conversation_run_code_interpreter.py | 57 +++++++++++++++++++ scripts/run_examples.sh | 1 + 3 files changed, 63 insertions(+), 34 deletions(-) create mode 100644 examples/mistral/agents/async_conversation_run_code_interpreter.py diff --git a/examples/mistral/agents/async_conversation_run.py b/examples/mistral/agents/async_conversation_run.py index 10c81d77..bb96ed78 100644 --- a/examples/mistral/agents/async_conversation_run.py +++ b/examples/mistral/agents/async_conversation_run.py @@ -9,48 +9,19 @@ MODEL = "mistral-medium-2505" -def math_question_generator(question_num: int): - """Random generator of mathematical question - - Args: - question_num (int): the number of the question that will be returned, should be between 1-100 - """ - return ( - "solve the following differential equation: `y'' + 3y' + 2y = 0`" - if question_num % 2 == 0 - else "solve the following differential equation: `y'' - 4y' + 4y = e^x`" - ) - - async def main(): api_key = os.environ["MISTRAL_API_KEY"] client = Mistral(api_key=api_key) - class Explanation(BaseModel): - explanation: str - output: str - - class MathDemonstration(BaseModel): - steps: list[Explanation] - final_answer: str + class MathResult(BaseModel): + answer: int - async with RunContext(model=MODEL, output_format=MathDemonstration) as run_ctx: - # register a new function that can be executed on the client side - run_ctx.register_func(math_question_generator) + async with RunContext(model=MODEL, output_format=MathResult) as run_ctx: run_result = await client.beta.conversations.run_async( run_ctx=run_ctx, - instructions="Use the code interpreter to help you when asked mathematical questions.", - inputs=[ - {"role": "user", "content": "hey"}, - {"role": "assistant", "content": "hello"}, - {"role": "user", "content": "Request a math question and answer it."}, - ], - tools=[{"type": "code_interpreter"}], + inputs=[{"role": "user", "content": "What is 2 + 2?"}], ) - print("All run entries:") - for entry in run_result.output_entries: - print(f"{entry}") - print(f"Final model: {run_result.output_as_model}") + print(f"Result: {run_result.output_as_model}") if __name__ == "__main__": diff --git a/examples/mistral/agents/async_conversation_run_code_interpreter.py b/examples/mistral/agents/async_conversation_run_code_interpreter.py new file mode 100644 index 00000000..10c81d77 --- /dev/null +++ b/examples/mistral/agents/async_conversation_run_code_interpreter.py @@ -0,0 +1,57 @@ +#!/usr/bin/env python +import asyncio +import os + +from mistralai.client import Mistral +from mistralai.extra.run.context import RunContext +from mistralai.client.types import BaseModel + +MODEL = "mistral-medium-2505" + + +def math_question_generator(question_num: int): + """Random generator of mathematical question + + Args: + question_num (int): the number of the question that will be returned, should be between 1-100 + """ + return ( + "solve the following differential equation: `y'' + 3y' + 2y = 0`" + if question_num % 2 == 0 + else "solve the following differential equation: `y'' - 4y' + 4y = e^x`" + ) + + +async def main(): + api_key = os.environ["MISTRAL_API_KEY"] + client = Mistral(api_key=api_key) + + class Explanation(BaseModel): + explanation: str + output: str + + class MathDemonstration(BaseModel): + steps: list[Explanation] + final_answer: str + + async with RunContext(model=MODEL, output_format=MathDemonstration) as run_ctx: + # register a new function that can be executed on the client side + run_ctx.register_func(math_question_generator) + run_result = await client.beta.conversations.run_async( + run_ctx=run_ctx, + instructions="Use the code interpreter to help you when asked mathematical questions.", + inputs=[ + {"role": "user", "content": "hey"}, + {"role": "assistant", "content": "hello"}, + {"role": "user", "content": "Request a math question and answer it."}, + ], + tools=[{"type": "code_interpreter"}], + ) + print("All run entries:") + for entry in run_result.output_entries: + print(f"{entry}") + print(f"Final model: {run_result.output_as_model}") + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/scripts/run_examples.sh b/scripts/run_examples.sh index 5bc6fc48..40ff2c8f 100755 --- a/scripts/run_examples.sh +++ b/scripts/run_examples.sh @@ -33,6 +33,7 @@ exclude_files=( "examples/mistral/classifier/async_classifier.py" "examples/mistral/mcp_servers/sse_server.py" "examples/mistral/mcp_servers/stdio_server.py" + "examples/mistral/agents/async_conversation_run_code_interpreter.py" "examples/mistral/agents/async_conversation_run_stream.py" "examples/mistral/agents/async_conversation_run_mcp.py" "examples/mistral/agents/async_conversation_run_mcp_remote.py" From 6475b1caefd308c35e48d469a4bc9a314a81a10a Mon Sep 17 00:00:00 2001 From: Louis Sanna <85956496+louis-sanna-dev@users.noreply.github.com> Date: Wed, 11 Feb 2026 10:10:34 +0100 Subject: [PATCH 204/223] chore: update speakeasy gen.yaml with recommended v2 configs (#345) * chore: update speakeasy gen.yaml with recommended v2 configs Update generation fixes: - Enable nameResolutionFeb2025, securityFeb2025, sharedErrorComponentsApr2025 - Add methodSignaturesApr2024 and sharedNestedComponentsJan2026 Update python section: - Enable preApplyUnionDiscriminators - Add forwardCompatibleEnumsByDefault: true - Add forwardCompatibleUnionsByDefault: tagged-only - Add flatAdditionalProperties: true Bump version to 2.0.0a2. * chore: regenerate SDK with new speakeasy configs Regenerated with speakeasy v1.685.0 using the updated gen.yaml config. Key changes from the new config flags: - Forward-compatible enums (accept unknown values) - Forward-compatible tagged unions (Unknown variant) - Updated type names from name resolution fixes - Flat additional properties * fix: update extra module and docs for new speakeasy types - Update type references: Tools -> ConversationRequestTool - Remove type= parameter from FunctionTool constructor (now a constant) - Use isinstance() check instead of .type attribute access - Document type name changes in MIGRATION.md * chore: bump version to 2.0.0a2 * fix: fix example types and enable mypy in CI - Rename azure examples from .py.py to .py - Fix message types in azure and mistral examples - Add type annotations where needed for mypy - Enable mypy for examples in lint_custom_code.sh --- .speakeasy/gen.lock | 778 ++++++----------- .speakeasy/gen.yaml | 15 +- ...-2d045ec7-2ebb-4f4d-ad25-40953b132161.lock | 799 ++++++++++++++++++ MIGRATION.md | 14 +- README.md | 50 +- docs/models/agent.md | 2 +- docs/models/agentcreationrequest.md | 20 +- docs/models/agentcreationrequesttool.md | 41 + docs/models/agenthandoffdoneevent.md | 16 +- docs/models/agenthandoffdoneeventtype.md | 8 - docs/models/agenthandoffstartedevent.md | 16 +- docs/models/agenthandoffstartedeventtype.md | 8 - ...md => agentsapiv1agentsgetagentversion.md} | 2 +- docs/models/agentsapiv1agentsgetrequest.md | 8 +- ...> agentsapiv1conversationslistresponse.md} | 2 +- docs/models/agentscompletionrequest.md | 2 +- ...s.md => agentscompletionrequestmessage.md} | 2 +- docs/models/agentscompletionstreamrequest.md | 2 +- ...> agentscompletionstreamrequestmessage.md} | 2 +- .../agentscompletionstreamrequestmessages.md | 29 - docs/models/{tools.md => agenttool.md} | 2 +- docs/models/agentupdaterequest.md | 22 +- ...tiontools.md => agentupdaterequesttool.md} | 2 +- docs/models/audiochunk.md | 8 +- docs/models/audiochunktype.md | 8 - docs/models/basemodelcard.md | 2 +- docs/models/basemodelcardtype.md | 8 - docs/models/chatcompletionchoice.md | 10 +- ...md => chatcompletionchoicefinishreason.md} | 2 +- docs/models/chatcompletionrequest.md | 4 +- ...one.md => chatcompletionrequestmessage.md} | 2 +- .../{stop.md => chatcompletionrequeststop.md} | 2 +- docs/models/chatcompletionstreamrequest.md | 2 +- ... => chatcompletionstreamrequestmessage.md} | 2 +- docs/models/chatmoderationrequest.md | 8 +- docs/models/chatmoderationrequestinputs.md | 19 - ...ges.md => chatmoderationrequestinputs1.md} | 2 +- docs/models/chatmoderationrequestinputs2.md | 29 + docs/models/chatmoderationrequestinputs3.md | 19 + docs/models/classifierdetailedjobout.md | 4 +- ...=> classifierdetailedjoboutintegration.md} | 2 +- .../models/classifierdetailedjoboutjobtype.md | 8 - docs/models/classifierftmodelout.md | 36 +- docs/models/classifierftmodeloutmodeltype.md | 8 - docs/models/classifierjobout.md | 4 +- ...ions.md => classifierjoboutintegration.md} | 2 +- docs/models/classifierjoboutjobtype.md | 10 - docs/models/codeinterpretertool.md | 6 +- docs/models/codeinterpretertooltype.md | 8 - docs/models/completiondetailedjobout.md | 6 +- ...=> completiondetailedjoboutintegration.md} | 2 +- .../models/completiondetailedjoboutjobtype.md | 8 - ... => completiondetailedjoboutrepository.md} | 2 +- docs/models/completionftmodelout.md | 2 +- docs/models/completionjobout.md | 8 +- ...ions.md => completionjoboutintegration.md} | 2 +- ...ories.md => completionjoboutrepository.md} | 2 +- .../{status.md => completionjoboutstatus.md} | 2 +- docs/models/conversationhistory.md | 2 +- docs/models/conversationrequest.md | 30 +- ....md => conversationrequestagentversion.md} | 2 +- ...=> conversationrequesthandoffexecution.md} | 2 +- ...esttools.md => conversationrequesttool.md} | 2 +- docs/models/conversationresponse.md | 2 +- docs/models/conversationstreamrequest.md | 2 +- ...ls.md => conversationstreamrequesttool.md} | 2 +- docs/models/conversationstreamrequesttools.md | 41 - docs/models/deltamessage.md | 10 +- .../{content.md => deltamessagecontent.md} | 2 +- docs/models/documentlibrarytool.md | 8 +- docs/models/documentlibrarytooltype.md | 8 - ...dv1documentupload.md => documentupload.md} | 2 +- docs/models/{entries.md => entry.md} | 2 +- docs/models/ftmodelcard.md | 2 +- docs/models/ftmodelcardtype.md | 8 - docs/models/functioncallevent.md | 18 +- docs/models/functioncalleventtype.md | 8 - docs/models/functiontool.md | 8 +- docs/models/functiontooltype.md | 8 - docs/models/githubrepositoryin.md | 16 +- docs/models/githubrepositoryintype.md | 8 - docs/models/githubrepositoryout.md | 16 +- docs/models/githubrepositoryouttype.md | 8 - docs/models/imagegenerationtool.md | 6 +- docs/models/imagegenerationtooltype.md | 8 - docs/models/imageurlchunk.md | 2 +- ...geurlchunkimageurl.md => imageurlunion.md} | 2 +- docs/models/{messages.md => inputsmessage.md} | 2 +- docs/models/instructrequest.md | 6 +- docs/models/instructrequestinputs.md | 6 +- .../{two.md => instructrequestmessage.md} | 2 +- docs/models/jobin.md | 4 +- ...binintegrations.md => jobinintegration.md} | 2 +- ...obinrepositories.md => jobinrepository.md} | 2 +- ...esfinetuningcreatefinetuningjobresponse.md | 4 +- ...outesfinetuninggetfinetuningjobsrequest.md | 24 +- ...outesfinetuninggetfinetuningjobsstatus.md} | 2 +- docs/models/jobtype.md | 10 - .../librariesdocumentsuploadv1request.md | 8 +- docs/models/messageinputentry.md | 20 +- .../{object.md => messageinputentryobject.md} | 2 +- docs/models/messageoutputevent.md | 2 +- docs/models/messageoutputeventtype.md | 8 - docs/models/modelconversation.md | 2 +- ...agenttools.md => modelconversationtool.md} | 2 +- docs/models/modellist.md | 8 +- docs/models/{data.md => modellistdata.md} | 2 +- docs/models/modeltype.md | 8 - ...rtbodyparams.md => multipartbodyparams.md} | 2 +- docs/models/{outputs.md => output.md} | 2 +- .../realtimetranscriptionerrordetail.md | 8 +- ...ealtimetranscriptionerrordetailmessage.md} | 2 +- docs/models/{response1.md => response.md} | 2 +- docs/models/responsedoneevent.md | 10 +- docs/models/responsedoneeventtype.md | 8 - docs/models/responseerrorevent.md | 12 +- docs/models/responseerroreventtype.md | 8 - ...esponseretrievemodelv1modelsmodelidget.md} | 2 +- docs/models/responsestartedevent.md | 10 +- docs/models/responsestartedeventtype.md | 8 - ...nsget.md => responsev1conversationsget.md} | 2 +- docs/models/role.md | 8 - docs/models/systemmessage.md | 2 +- docs/models/toolexecutiondeltaevent.md | 16 +- docs/models/toolexecutiondeltaeventtype.md | 8 - docs/models/toolexecutiondoneevent.md | 16 +- docs/models/toolexecutiondoneeventtype.md | 8 - docs/models/toolexecutionentry.md | 2 +- .../{name.md => toolexecutionentryname.md} | 2 +- docs/models/toolexecutionstartedevent.md | 16 +- docs/models/toolexecutionstartedeventtype.md | 8 - docs/models/toolmessage.md | 2 +- docs/models/toolmessagerole.md | 8 - docs/models/transcriptionsegmentchunk.md | 18 +- ...pe.md => transcriptionsegmentchunktype.md} | 2 +- docs/models/transcriptionstreamdone.md | 18 +- docs/models/transcriptionstreamdonetype.md | 8 - docs/models/transcriptionstreamlanguage.md | 10 +- .../models/transcriptionstreamlanguagetype.md | 8 - .../models/transcriptionstreamsegmentdelta.md | 16 +- .../transcriptionstreamsegmentdeltatype.md | 8 - docs/models/transcriptionstreamtextdelta.md | 10 +- .../transcriptionstreamtextdeltatype.md | 8 - docs/models/usermessage.md | 2 +- docs/models/usermessagerole.md | 8 - docs/models/wandbintegration.md | 2 +- docs/models/wandbintegrationout.md | 14 +- docs/models/wandbintegrationouttype.md | 8 - docs/models/wandbintegrationtype.md | 8 - docs/models/websearchpremiumtool.md | 6 +- docs/models/websearchpremiumtooltype.md | 8 - docs/models/websearchtool.md | 6 +- docs/models/websearchtooltype.md | 8 - docs/sdks/agents/README.md | 4 +- .../sdks/{mistraljobs => batchjobs}/README.md | 0 .../{mistralagents => betaagents}/README.md | 58 +- docs/sdks/chat/README.md | 6 +- docs/sdks/classifiers/README.md | 10 +- docs/sdks/conversations/README.md | 38 +- docs/sdks/{jobs => finetuningjobs}/README.md | 30 +- docs/sdks/models/README.md | 2 +- examples/azure/az_chat_no_streaming.py | 15 + examples/azure/az_chat_no_streaming.py.py | 16 - examples/azure/chat_no_streaming.py | 15 + examples/azure/chat_no_streaming.py.py | 16 - .../mistral/chat/chatbot_with_streaming.py | 3 +- examples/mistral/chat/function_calling.py | 69 +- .../mistral/classifier/async_classifier.py | 9 +- pyproject.toml | 2 +- scripts/lint_custom_code.sh | 5 +- src/mistralai/client/_version.py | 4 +- src/mistralai/client/agents.py | 24 +- src/mistralai/client/batch.py | 6 +- .../client/{mistral_jobs.py => batch_jobs.py} | 2 +- src/mistralai/client/beta.py | 6 +- .../{mistral_agents.py => beta_agents.py} | 34 +- src/mistralai/client/chat.py | 40 +- src/mistralai/client/classifiers.py | 16 +- src/mistralai/client/conversations.py | 68 +- src/mistralai/client/documents.py | 16 +- src/mistralai/client/files.py | 16 +- src/mistralai/client/fine_tuning.py | 6 +- .../client/{jobs.py => fine_tuning_jobs.py} | 30 +- src/mistralai/client/models/__init__.py | 674 ++++++--------- src/mistralai/client/models/agent.py | 27 +- .../client/models/agentcreationrequest.py | 27 +- .../client/models/agenthandoffdoneevent.py | 18 +- .../client/models/agenthandoffstartedevent.py | 18 +- .../models/agents_api_v1_agents_getop.py | 12 +- .../agents_api_v1_conversations_getop.py | 9 +- .../agents_api_v1_conversations_listop.py | 8 +- .../client/models/agentscompletionrequest.py | 10 +- .../models/agentscompletionstreamrequest.py | 10 +- .../client/models/agentupdaterequest.py | 27 +- src/mistralai/client/models/audiochunk.py | 19 +- src/mistralai/client/models/basemodelcard.py | 8 +- src/mistralai/client/models/batchjobstatus.py | 22 +- .../client/models/builtinconnectors.py | 18 +- .../client/models/chatcompletionchoice.py | 6 +- .../client/models/chatcompletionrequest.py | 22 +- .../models/chatcompletionstreamrequest.py | 10 +- .../client/models/chatmoderationrequest.py | 30 +- .../client/models/classifierdetailedjobout.py | 56 +- .../client/models/classifierftmodelout.py | 16 +- .../client/models/classifierjobout.py | 57 +- .../client/models/codeinterpretertool.py | 20 +- .../client/models/completiondetailedjobout.py | 64 +- .../client/models/completionftmodelout.py | 16 +- .../client/models/completionjobout.py | 69 +- .../client/models/conversationevents.py | 25 +- .../client/models/conversationhistory.py | 12 +- .../client/models/conversationrequest.py | 45 +- .../client/models/conversationresponse.py | 12 +- .../models/conversationstreamrequest.py | 27 +- src/mistralai/client/models/deltamessage.py | 12 +- .../client/models/documentlibrarytool.py | 20 +- .../models/files_api_routes_upload_fileop.py | 4 +- .../client/models/ftclassifierlossfunction.py | 12 +- src/mistralai/client/models/ftmodelcard.py | 10 +- .../client/models/functioncallevent.py | 18 +- src/mistralai/client/models/functiontool.py | 17 +- .../client/models/githubrepositoryin.py | 17 +- .../client/models/githubrepositoryout.py | 17 +- .../client/models/imagegenerationtool.py | 20 +- src/mistralai/client/models/imageurlchunk.py | 10 +- src/mistralai/client/models/inputs.py | 10 +- .../client/models/instructrequest.py | 10 +- src/mistralai/client/models/jobin.py | 16 +- ...es_fine_tuning_cancel_fine_tuning_jobop.py | 11 +- ...es_fine_tuning_create_fine_tuning_jobop.py | 19 +- ...outes_fine_tuning_get_fine_tuning_jobop.py | 11 +- ...utes_fine_tuning_get_fine_tuning_jobsop.py | 6 +- ...tes_fine_tuning_start_fine_tuning_jobop.py | 11 +- ...s_fine_tuning_update_fine_tuned_modelop.py | 15 +- src/mistralai/client/models/jobsout.py | 9 +- .../models/libraries_documents_upload_v1op.py | 8 +- .../client/models/messageinputentry.py | 16 +- .../client/models/messageoutputevent.py | 19 +- .../client/models/modelconversation.py | 27 +- src/mistralai/client/models/modellist.py | 18 +- src/mistralai/client/models/ocrtableobject.py | 13 +- .../realtimetranscriptionerrordetail.py | 12 +- .../client/models/responsedoneevent.py | 18 +- .../client/models/responseerrorevent.py | 18 +- .../client/models/responseformats.py | 14 +- .../client/models/responsestartedevent.py | 18 +- ...retrieve_model_v1_models_model_id_getop.py | 15 +- src/mistralai/client/models/ssetypes.py | 28 +- src/mistralai/client/models/systemmessage.py | 17 +- src/mistralai/client/models/toolchoiceenum.py | 16 +- .../client/models/toolexecutiondeltaevent.py | 18 +- .../client/models/toolexecutiondoneevent.py | 18 +- .../client/models/toolexecutionentry.py | 12 +- .../models/toolexecutionstartedevent.py | 18 +- src/mistralai/client/models/toolmessage.py | 19 +- .../models/transcriptionsegmentchunk.py | 6 +- .../client/models/transcriptionstreamdone.py | 19 +- .../models/transcriptionstreamevents.py | 13 +- .../models/transcriptionstreameventtypes.py | 16 +- .../models/transcriptionstreamlanguage.py | 19 +- .../models/transcriptionstreamsegmentdelta.py | 21 +- .../models/transcriptionstreamtextdelta.py | 19 +- src/mistralai/client/models/usermessage.py | 19 +- .../client/models/wandbintegration.py | 19 +- .../client/models/wandbintegrationout.py | 19 +- .../client/models/websearchpremiumtool.py | 20 +- src/mistralai/client/models/websearchtool.py | 17 +- src/mistralai/client/models_.py | 10 +- src/mistralai/extra/mcp/base.py | 1 - src/mistralai/extra/run/context.py | 20 +- src/mistralai/extra/run/tools.py | 1 - uv.lock | 2 +- 272 files changed, 2960 insertions(+), 2658 deletions(-) create mode 100644 .speakeasy/generated-files-2d045ec7-2ebb-4f4d-ad25-40953b132161.lock create mode 100644 docs/models/agentcreationrequesttool.md delete mode 100644 docs/models/agenthandoffdoneeventtype.md delete mode 100644 docs/models/agenthandoffstartedeventtype.md rename docs/models/{queryparamagentversion.md => agentsapiv1agentsgetagentversion.md} (79%) rename docs/models/{responsebody.md => agentsapiv1conversationslistresponse.md} (84%) rename docs/models/{instructrequestinputsmessages.md => agentscompletionrequestmessage.md} (92%) rename docs/models/{chatcompletionstreamrequestmessages.md => agentscompletionstreamrequestmessage.md} (90%) delete mode 100644 docs/models/agentscompletionstreamrequestmessages.md rename docs/models/{tools.md => agenttool.md} (98%) rename docs/models/{modelconversationtools.md => agentupdaterequesttool.md} (96%) delete mode 100644 docs/models/audiochunktype.md delete mode 100644 docs/models/basemodelcardtype.md rename docs/models/{finishreason.md => chatcompletionchoicefinishreason.md} (88%) rename docs/models/{one.md => chatcompletionrequestmessage.md} (92%) rename docs/models/{stop.md => chatcompletionrequeststop.md} (90%) rename docs/models/{agentscompletionrequestmessages.md => chatcompletionstreamrequestmessage.md} (91%) delete mode 100644 docs/models/chatmoderationrequestinputs.md rename docs/models/{instructrequestmessages.md => chatmoderationrequestinputs1.md} (92%) create mode 100644 docs/models/chatmoderationrequestinputs2.md create mode 100644 docs/models/chatmoderationrequestinputs3.md rename docs/models/{completiondetailedjoboutintegrations.md => classifierdetailedjoboutintegration.md} (76%) delete mode 100644 docs/models/classifierdetailedjoboutjobtype.md delete mode 100644 docs/models/classifierftmodeloutmodeltype.md rename docs/models/{integrations.md => classifierjoboutintegration.md} (80%) delete mode 100644 docs/models/classifierjoboutjobtype.md delete mode 100644 docs/models/codeinterpretertooltype.md rename docs/models/{classifierdetailedjoboutintegrations.md => completiondetailedjoboutintegration.md} (76%) delete mode 100644 docs/models/completiondetailedjoboutjobtype.md rename docs/models/{completiondetailedjoboutrepositories.md => completiondetailedjoboutrepository.md} (76%) rename docs/models/{classifierjoboutintegrations.md => completionjoboutintegration.md} (80%) rename docs/models/{repositories.md => completionjoboutrepository.md} (81%) rename docs/models/{status.md => completionjoboutstatus.md} (96%) rename docs/models/{agentversion.md => conversationrequestagentversion.md} (80%) rename docs/models/{handoffexecution.md => conversationrequesthandoffexecution.md} (73%) rename docs/models/{agentupdaterequesttools.md => conversationrequesttool.md} (95%) rename docs/models/{agentcreationrequesttools.md => conversationstreamrequesttool.md} (95%) delete mode 100644 docs/models/conversationstreamrequesttools.md rename docs/models/{content.md => deltamessagecontent.md} (89%) delete mode 100644 docs/models/documentlibrarytooltype.md rename docs/models/{librariesdocumentsuploadv1documentupload.md => documentupload.md} (98%) rename docs/models/{entries.md => entry.md} (98%) delete mode 100644 docs/models/ftmodelcardtype.md delete mode 100644 docs/models/functioncalleventtype.md delete mode 100644 docs/models/functiontooltype.md delete mode 100644 docs/models/githubrepositoryintype.md delete mode 100644 docs/models/githubrepositoryouttype.md delete mode 100644 docs/models/imagegenerationtooltype.md rename docs/models/{imageurlchunkimageurl.md => imageurlunion.md} (86%) rename docs/models/{messages.md => inputsmessage.md} (96%) rename docs/models/{two.md => instructrequestmessage.md} (93%) rename docs/models/{jobinintegrations.md => jobinintegration.md} (85%) rename docs/models/{jobinrepositories.md => jobinrepository.md} (86%) rename docs/models/{queryparamstatus.md => jobsapiroutesfinetuninggetfinetuningjobsstatus.md} (94%) delete mode 100644 docs/models/jobtype.md rename docs/models/{object.md => messageinputentryobject.md} (75%) delete mode 100644 docs/models/messageoutputeventtype.md rename docs/models/{agenttools.md => modelconversationtool.md} (96%) rename docs/models/{data.md => modellistdata.md} (92%) delete mode 100644 docs/models/modeltype.md rename docs/models/{filesapiroutesuploadfilemultipartbodyparams.md => multipartbodyparams.md} (99%) rename docs/models/{outputs.md => output.md} (97%) rename docs/models/{message.md => realtimetranscriptionerrordetailmessage.md} (81%) rename docs/models/{response1.md => response.md} (94%) delete mode 100644 docs/models/responsedoneeventtype.md delete mode 100644 docs/models/responseerroreventtype.md rename docs/models/{retrievemodelv1modelsmodelidgetresponseretrievemodelv1modelsmodelidget.md => responseretrievemodelv1modelsmodelidget.md} (75%) delete mode 100644 docs/models/responsestartedeventtype.md rename docs/models/{agentsapiv1conversationsgetresponsev1conversationsget.md => responsev1conversationsget.md} (81%) delete mode 100644 docs/models/role.md delete mode 100644 docs/models/toolexecutiondeltaeventtype.md delete mode 100644 docs/models/toolexecutiondoneeventtype.md rename docs/models/{name.md => toolexecutionentryname.md} (87%) delete mode 100644 docs/models/toolexecutionstartedeventtype.md delete mode 100644 docs/models/toolmessagerole.md rename docs/models/{type.md => transcriptionsegmentchunktype.md} (84%) delete mode 100644 docs/models/transcriptionstreamdonetype.md delete mode 100644 docs/models/transcriptionstreamlanguagetype.md delete mode 100644 docs/models/transcriptionstreamsegmentdeltatype.md delete mode 100644 docs/models/transcriptionstreamtextdeltatype.md delete mode 100644 docs/models/usermessagerole.md delete mode 100644 docs/models/wandbintegrationouttype.md delete mode 100644 docs/models/wandbintegrationtype.md delete mode 100644 docs/models/websearchpremiumtooltype.md delete mode 100644 docs/models/websearchtooltype.md rename docs/sdks/{mistraljobs => batchjobs}/README.md (100%) rename docs/sdks/{mistralagents => betaagents}/README.md (78%) rename docs/sdks/{jobs => finetuningjobs}/README.md (83%) create mode 100644 examples/azure/az_chat_no_streaming.py delete mode 100644 examples/azure/az_chat_no_streaming.py.py create mode 100644 examples/azure/chat_no_streaming.py delete mode 100644 examples/azure/chat_no_streaming.py.py rename src/mistralai/client/{mistral_jobs.py => batch_jobs.py} (99%) rename src/mistralai/client/{mistral_agents.py => beta_agents.py} (99%) rename src/mistralai/client/{jobs.py => fine_tuning_jobs.py} (98%) diff --git a/.speakeasy/gen.lock b/.speakeasy/gen.lock index 345ea2c8..69828bd7 100644 --- a/.speakeasy/gen.lock +++ b/.speakeasy/gen.lock @@ -62,7 +62,7 @@ trackedFiles: pristine_git_object: 1810386448a440cfc5f7b8579695b228ae40460d docs/models/agent.md: id: ffdbb4c53c87 - last_write_checksum: sha1:ec6c799658040b3c75d6ae0572bb391c6aea3fd4 + last_write_checksum: sha1:26d2fb743d3fdd54a6ab1258a37f08d1726927ac pristine_git_object: ee054dd349848eff144d7064c319c3c8434bdc6c docs/models/agentaliasresponse.md: id: 5ac4721d8947 @@ -82,20 +82,14 @@ trackedFiles: pristine_git_object: ea7cc75c5197ed42f9fb508a969baa16effe1f98 docs/models/agentcreationrequest.md: id: 697a770fe5c0 - last_write_checksum: sha1:c8221a20a68675b444d668a58a649b25b54786e9 + last_write_checksum: sha1:d77c75f922c64df266b101a2fd23c7fe56b7894b pristine_git_object: afc27d3b688f9ca187606243c810fd19d12bb840 - docs/models/agentcreationrequesttools.md: - id: 932bf99a19a8 - last_write_checksum: sha1:49294bdd30b7413956bd8dc039ad7c9d15243282 - pristine_git_object: c2525850649b4dad76b44fd21cac822e12986818 + docs/models/agentcreationrequesttool.md: + last_write_checksum: sha1:310d4b107554a9c16143191fdc306a5438b63768 docs/models/agenthandoffdoneevent.md: id: dcf166a3c3b0 - last_write_checksum: sha1:281473cbc3929e2deb3e069e74551e7e26b4fdba + last_write_checksum: sha1:9e95c09f724827f5e9c202fd634bdfa2baef1b6e pristine_git_object: c0039f41825e3667cd8e91adae5bb78a2e3ac8ae - docs/models/agenthandoffdoneeventtype.md: - id: 4d412ea3af67 - last_write_checksum: sha1:720ebe2c6029611b8ecd4caa1b5a58d6417251c6 - pristine_git_object: c864ce4381eb30532feb010b39b991a2070f134b docs/models/agenthandoffentry.md: id: 39d54f489b84 last_write_checksum: sha1:7d949e750fd24dea20cabae340f9204d8f756008 @@ -110,12 +104,8 @@ trackedFiles: pristine_git_object: 527ebceb2ff1bbba1067f30438befd5e2c2e91d6 docs/models/agenthandoffstartedevent.md: id: b620102af460 - last_write_checksum: sha1:a635a7f57e197519d6c51349f6db44199f8e0d43 + last_write_checksum: sha1:33732e0465423348c2ace458506a597a3dadf9b2 pristine_git_object: 035cd02aaf338785d9f6410fde248591c5ffa5f7 - docs/models/agenthandoffstartedeventtype.md: - id: 09b09b971d58 - last_write_checksum: sha1:a3cf06d2c414b1609bdbbbd9e35c8d3f14af262a - pristine_git_object: 4ffaff15cd7b5d4b08080c4fb78e92c455c73f35 docs/models/agentobject.md: id: ed24a6d647a0 last_write_checksum: sha1:ff5dfde6cc19f09c83afb5b4f0f103096df6691d @@ -128,9 +118,11 @@ trackedFiles: id: 0faaaa59add9 last_write_checksum: sha1:2a34269e682bb910b83814b4d730ba2ce07f8cb2 pristine_git_object: 2799f41817ab0f7a22b49b4ff895c8308525953c + docs/models/agentsapiv1agentsgetagentversion.md: + last_write_checksum: sha1:e4f4c6a64b1c2ec9465b7ad008df4d7859098e59 docs/models/agentsapiv1agentsgetrequest.md: id: 01740ae62cff - last_write_checksum: sha1:9c4f6d88f29c39238757547da605ecb7106e76c2 + last_write_checksum: sha1:bc86e90289ec09b40212083a82455b4fe71c7194 pristine_git_object: c71d4419afd3b51713e154b8021d4fe2b49d8af5 docs/models/agentsapiv1agentsgetversionrequest.md: id: 88ed22b85cde @@ -172,10 +164,6 @@ trackedFiles: id: d6acce23f92c last_write_checksum: sha1:b5d5529b72c16293d3d9b5c45dcb2e3798405bcf pristine_git_object: 67d450c88778cb27d7d0ba06d49d9f419840b32e - docs/models/agentsapiv1conversationsgetresponsev1conversationsget.md: - id: 97b0d4a71cbc - last_write_checksum: sha1:8d3df6d122eeb58043c81e30cfa701526cc572f0 - pristine_git_object: 4bc836f353f66b0f8b24f278cc78d41dbec72e36 docs/models/agentsapiv1conversationshistoryrequest.md: id: e3efc36ea8b5 last_write_checksum: sha1:4155100eaed6d3b7410b3f4476f000d1879576be @@ -184,6 +172,8 @@ trackedFiles: id: 406c3e92777a last_write_checksum: sha1:d5c5effcf2ca32900678d20b667bdaf8ca908194 pristine_git_object: 62c9011faf26b3a4268186f01caf98c186e7d5b4 + docs/models/agentsapiv1conversationslistresponse.md: + last_write_checksum: sha1:1144f41f8a97daacfb75c11fdf3575e553cf0859 docs/models/agentsapiv1conversationsmessagesrequest.md: id: 2c749c6620d4 last_write_checksum: sha1:781e526b030653dc189d94ca04cdc4742f9506d2 @@ -198,12 +188,10 @@ trackedFiles: pristine_git_object: 7548286af5d1db51fbfd29c893eb8afdc3c97c4d docs/models/agentscompletionrequest.md: id: 906b82c214dc - last_write_checksum: sha1:60a969d5e54cbbb8e9296380908f1d31544e80e2 + last_write_checksum: sha1:84ee0378e413830260a279a67fc3b1342e643328 pristine_git_object: 2a0c4144fb5919e5ce892db1210bde90820c127c - docs/models/agentscompletionrequestmessages.md: - id: 152837715a56 - last_write_checksum: sha1:338b094596f610c6eacaf0995c585f371f628f0d - pristine_git_object: d6a1e69106fc4b4804bfcc0f95e30782be40b363 + docs/models/agentscompletionrequestmessage.md: + last_write_checksum: sha1:ecf7b7cdf0d24a5e97b520366cf816b8731734bb docs/models/agentscompletionrequeststop.md: id: ad1e0e74b6b8 last_write_checksum: sha1:b2422d4dada80d54b2dd499a6659a3894318d2c9 @@ -214,12 +202,10 @@ trackedFiles: pristine_git_object: 63b9dca9fbb8d829f93d8327a77fbc385a846c76 docs/models/agentscompletionstreamrequest.md: id: 21d09756447b - last_write_checksum: sha1:97372c5a10b06f826b9da6bde2b9c5f6984cc15b + last_write_checksum: sha1:0c88bc63255733480b65b61685dcc356fcc9ed66 pristine_git_object: b2ccd4e8fe2fc3f63d4b517f7ecfc21f3aef9d67 - docs/models/agentscompletionstreamrequestmessages.md: - id: d527345f99b1 - last_write_checksum: sha1:a5e00a940960bd6751586b92329aea797af50550 - pristine_git_object: 1bc736af55a3582a18959e445f10fc75f050476b + docs/models/agentscompletionstreamrequestmessage.md: + last_write_checksum: sha1:98744c9646969250242cbbfbdf428dbd7030e4bb docs/models/agentscompletionstreamrequeststop.md: id: 4925b6b8fbca last_write_checksum: sha1:c9d0d73ca46643ffdf02e6c6cd35de5c39460c20 @@ -228,22 +214,14 @@ trackedFiles: id: b1f76f7a4e1c last_write_checksum: sha1:843c4946d5cab61df2cba458af40835c4e8bcafe pristine_git_object: 4354523a7d0d21721a96e91938b89236169ccced - docs/models/agenttools.md: - id: 493997aabfdb - last_write_checksum: sha1:90e3537a61b4120892a3aafe545d6bed937bf46a - pristine_git_object: 15891f566b3430e1f199da332f4531dd29002bed + docs/models/agenttool.md: + last_write_checksum: sha1:9154d0ac6b0ab8970a10a8ad7716009d62e80ce7 docs/models/agentupdaterequest.md: id: 75a7f820b906 - last_write_checksum: sha1:d282d1cd39ecb3c447e651a9ea25010ecfa519f7 + last_write_checksum: sha1:306134659876c4e87324dfec879ab0b691a74f3a pristine_git_object: 641d1e406f0fba0fce9f10c16a15f883c7095c07 - docs/models/agentupdaterequesttools.md: - id: a39223b88fc9 - last_write_checksum: sha1:925ef5852c2031c9bf2608577e55edbc36708730 - pristine_git_object: 1752ee6861d23c6abaa6b748f4ff43e9545505ec - docs/models/agentversion.md: - id: b0aa02d6c085 - last_write_checksum: sha1:f6fcf351de43eed5345f88f5cb6a2bf928a594d9 - pristine_git_object: fd4b6a3ea4ade6c9f62594b377c8e791a50211e7 + docs/models/agentupdaterequesttool.md: + last_write_checksum: sha1:25d8a331a706bf8e6056b99f8ff1a46abff6ae72 docs/models/apiendpoint.md: id: be613fd9b947 last_write_checksum: sha1:4d984c11248f7da42c949164e69b53995d5942c4 @@ -278,12 +256,8 @@ trackedFiles: pristine_git_object: 147708d9238e40e1cdb222beee15fbe8c1603050 docs/models/audiochunk.md: id: 88315a758fd4 - last_write_checksum: sha1:deae67e30f57eb9ae100d8c3bc26f77e8fb28396 + last_write_checksum: sha1:d52e493765280fc0b1df61a0ce1086205965c712 pristine_git_object: c443e7ade726ba88dd7ce9a8341687ef38abe598 - docs/models/audiochunktype.md: - id: cfdd0b7a74b3 - last_write_checksum: sha1:aaafb6be2f880e23fc29958389c44fd60e85f5e4 - pristine_git_object: 46ebf3729db50fd915e56124adcf63a09d93dbf4 docs/models/audioencoding.md: id: 1e0dfee9c2a0 last_write_checksum: sha1:5d47cfaca916d7a47adbea71748595b3ab69a478 @@ -302,12 +276,8 @@ trackedFiles: pristine_git_object: 5d64964d1a635da912f2553c306fb8654ebfca2e docs/models/basemodelcard.md: id: 2f62bfbd650e - last_write_checksum: sha1:7ee94bd9ceb6af84024863aa8183540bee7ffcce + last_write_checksum: sha1:4b29e0d24060b6724e82aeee05befe1cddb316f4 pristine_git_object: 58ad5e25131804287b5f7c834afc3ad480d065a9 - docs/models/basemodelcardtype.md: - id: ac404098e2ff - last_write_checksum: sha1:b20b34e9a5f2f52d0563d8fbfa3d00042817ce87 - pristine_git_object: 4a40ce76799b5c224c5687287e8fc14857999d85 docs/models/batcherror.md: id: 8053e29a3f26 last_write_checksum: sha1:23a12dc2e95f92a7a3691bd65a1b05012c669f0f @@ -350,12 +320,18 @@ trackedFiles: pristine_git_object: 910d62ae20fc67e9a3200397aeab95513bfed90f docs/models/chatcompletionchoice.md: id: 0d15c59ab501 - last_write_checksum: sha1:449b3e772891ec8d2ef77b6959a437514bb48d9c + last_write_checksum: sha1:a6274a39a4239e054816d08517bf8507cb5c4564 pristine_git_object: d77d286eb0b2d2b018b6ff5f9617225be4fa9fa5 + docs/models/chatcompletionchoicefinishreason.md: + last_write_checksum: sha1:b894d3408cb801e072c3c302a5676ff939d59284 docs/models/chatcompletionrequest.md: id: adffe90369d0 - last_write_checksum: sha1:7dce1fcd0918e2c94ad90337fb7a89179a5b8402 + last_write_checksum: sha1:f6eec11c908ee6581e508fff98e785441c4b84ad pristine_git_object: 109fa7b13d19ccc85e4633e64b44613640c171fb + docs/models/chatcompletionrequestmessage.md: + last_write_checksum: sha1:7921c5a508a9f88adc01caab34e26182b8035607 + docs/models/chatcompletionrequeststop.md: + last_write_checksum: sha1:71a25f84f0d88c7acf72e801ced6159546201851 docs/models/chatcompletionrequesttoolchoice.md: id: b97041b2f15b last_write_checksum: sha1:7ad7eb133f70e07d0d6a9def36aadd08b35cf861 @@ -366,12 +342,10 @@ trackedFiles: pristine_git_object: a0465ffbfc5558628953e03fbc53b80bbdc8649b docs/models/chatcompletionstreamrequest.md: id: cf8f29558a68 - last_write_checksum: sha1:6f3ca8df1ce48dceb72547012a3e973e09a16d61 + last_write_checksum: sha1:7ed921e0366c1b00225c05e60937fb8d228f027b pristine_git_object: 7d5fb411bde92e39910018cc2ad8d4d67ea980a1 - docs/models/chatcompletionstreamrequestmessages.md: - id: b343649e1a58 - last_write_checksum: sha1:04ea9c0e1abcc1956a5990847027bbbbcc778620 - pristine_git_object: 479906112d167c909301c1835df549f4a6456f95 + docs/models/chatcompletionstreamrequestmessage.md: + last_write_checksum: sha1:8270692463fab1243d9de4bbef7162daa64e52c5 docs/models/chatcompletionstreamrequeststop.md: id: d0e89a4dca78 last_write_checksum: sha1:a889e9580fa94bda7c848682d6ba501b7f5c0f41 @@ -382,12 +356,14 @@ trackedFiles: pristine_git_object: 43f3ca3809bf1a2a040e2ad7c19a2b22db0b73f8 docs/models/chatmoderationrequest.md: id: 22862d4d20ec - last_write_checksum: sha1:2fb708270756e1296a063b0d12252e7a5b2fb92a + last_write_checksum: sha1:9bbe510ee67515092bd953ad7f84ae118398af54 pristine_git_object: 69b6c1dc2c10abbbc2574f3782b2d85687661f11 - docs/models/chatmoderationrequestinputs.md: - id: 6d7386a07f09 - last_write_checksum: sha1:f95cffb7d88cfa238a483c949af2d386f875def2 - pristine_git_object: cf775d609e5d308ffb041deed7a70ae3f7fd70a7 + docs/models/chatmoderationrequestinputs1.md: + last_write_checksum: sha1:8d4c2dbd9207589aabf9c00cf60c61d2d3eef452 + docs/models/chatmoderationrequestinputs2.md: + last_write_checksum: sha1:e34eb6557e06e7783ed14d959c2a29959c26fd4c + docs/models/chatmoderationrequestinputs3.md: + last_write_checksum: sha1:14ce49ace5845bc467fe1559b12374bfd36bc9a7 docs/models/checkpointout.md: id: 909ce66e1f65 last_write_checksum: sha1:89e678d55b97353ad1c3b28d9f1ab101f6be0928 @@ -410,16 +386,10 @@ trackedFiles: pristine_git_object: f3b10727b023dd83a207d955b3d0f3cd4b7479a1 docs/models/classifierdetailedjobout.md: id: a2084ba5cc8c - last_write_checksum: sha1:75fec933eb83e28b81aa69561d7aaf0fb79b869b + last_write_checksum: sha1:63acd8a1921ac99143685722f8812b1f572d451f pristine_git_object: ccc88f89ed81e6e879a88b9729c4945704370fd9 - docs/models/classifierdetailedjoboutintegrations.md: - id: 3c607522e70d - last_write_checksum: sha1:e483390fb183bd1960373e4613a15ab31a52b7c7 - pristine_git_object: 5a09465ece564b1bf4dd323918a20f6747019cac - docs/models/classifierdetailedjoboutjobtype.md: - id: 176bd257be82 - last_write_checksum: sha1:ad0f41bac94d711d2b51b2ec4e09d0155db2b6eb - pristine_git_object: 0d1c6573b925e0ef836f5a607ac24f801e0d72eb + docs/models/classifierdetailedjoboutintegration.md: + last_write_checksum: sha1:6b2691766c1795d17b1572076a693eb377c5307f docs/models/classifierdetailedjoboutobject.md: id: 1ca54621f5bf last_write_checksum: sha1:5ae3d2847a66487d70bc2ff97a8c31bbbba191c7 @@ -430,28 +400,18 @@ trackedFiles: pristine_git_object: c3118aafa8614f20c9adf331033e7822b6391752 docs/models/classifierftmodelout.md: id: 268ac482c38b - last_write_checksum: sha1:77ff5ad1a9c142de2a43939be9cd3f57038a9bfc + last_write_checksum: sha1:dda3d6bf88fb6a3e860821aefb8a522d8a476b1d pristine_git_object: dd9e8bf9c0ee291b44cd4f06146dea3d3280c143 - docs/models/classifierftmodeloutmodeltype.md: - id: 40536012f45c - last_write_checksum: sha1:c6fde7ce8542ba6a56a91584aa0d6b1eb99fde6d - pristine_git_object: e1e7e465378c4c0112f08dc140052fad7955995e docs/models/classifierftmodeloutobject.md: id: 6aa25d9fe076 last_write_checksum: sha1:5a5fe345b3a2b3e65ce3171e8d6e9b9493ec7b06 pristine_git_object: 9fe05bcf42325a390e5c984c7bdf346668944928 docs/models/classifierjobout.md: id: 2e3498af3f8c - last_write_checksum: sha1:a9706e8df1a0a569e5e42e7a1494737e391cb55a + last_write_checksum: sha1:311f6ca4b6b625768c4ddd63e642e14e6a58df23 pristine_git_object: aa1d3ca910535e283059903a2c39331673c1982b - docs/models/classifierjoboutintegrations.md: - id: 3c4aff0af3fd - last_write_checksum: sha1:b843cb1635940ff74737f92ec1ac5da893a239f2 - pristine_git_object: d938d0b991f71e46096a9b12320c6237265bd811 - docs/models/classifierjoboutjobtype.md: - id: 772280dfaefc - last_write_checksum: sha1:b809726c9edd5a47be7582eb028acbd58014b565 - pristine_git_object: 7f5236fa87ea9bb5fd93873a2d2f9a6a8c4f9456 + docs/models/classifierjoboutintegration.md: + last_write_checksum: sha1:72dfda442a88f977f3480c95127534a600362806 docs/models/classifierjoboutobject.md: id: 04543f046d40 last_write_checksum: sha1:96863c621ddf0425b818edcd5da32ddbd5fd1194 @@ -478,12 +438,8 @@ trackedFiles: pristine_git_object: 1287c973fae9762310597fbeceaef26865ace04f docs/models/codeinterpretertool.md: id: f009740c6e54 - last_write_checksum: sha1:bba7c0b8f0979b0c77a31c70621dccb03d6722a5 + last_write_checksum: sha1:bce278ce22703246613254ee2dac57f8b14e8060 pristine_git_object: d5ad789ed012accaa105ced4f8dfd8e9eb83d4a3 - docs/models/codeinterpretertooltype.md: - id: d6d0f83de515 - last_write_checksum: sha1:f41ae23451c22692410340d44bcec36a1f45910b - pristine_git_object: f704b65e2842e36be4d2b96c9334cda4a6b02cde docs/models/completionargs.md: id: 3b54534f9830 last_write_checksum: sha1:c0368b7c21524228939b2093ff1a4524eb57aeb7 @@ -498,24 +454,16 @@ trackedFiles: pristine_git_object: 7f8ab5e631e2c6d1d9830325e591a7e434b83a35 docs/models/completiondetailedjobout.md: id: 634ca7241abd - last_write_checksum: sha1:b0af22a4e5eb409d6aa2a91c4ee3924d38923f5f + last_write_checksum: sha1:e5edf096998b6b8e2048f354bd694288dd609875 pristine_git_object: 84613080715078a73204d3984e7f97477ef548ae - docs/models/completiondetailedjoboutintegrations.md: - id: ecf47529e409 - last_write_checksum: sha1:5ff41070f932c911a724867a91a0a26c1d62032e - pristine_git_object: af6bbcc5f43176df2dea01a4a1a31f3c616ee3b9 - docs/models/completiondetailedjoboutjobtype.md: - id: cb794f29a3f2 - last_write_checksum: sha1:24533bc2a5bb42b560f02af4d93f008f9e5b7873 - pristine_git_object: fb24db0cc3d9495f01732bdb0e1c3df8a5865540 + docs/models/completiondetailedjoboutintegration.md: + last_write_checksum: sha1:3317db3f71962623a6144e3de0db20b4abfd5b9b docs/models/completiondetailedjoboutobject.md: id: 8e418065aa1c last_write_checksum: sha1:d429d772a6a4249809bbf0c26a6547e5f2de3f11 pristine_git_object: 1bec88e5f4c5f082c53157b8ee95b4b05cb787e3 - docs/models/completiondetailedjoboutrepositories.md: - id: bb83e77df490 - last_write_checksum: sha1:dc2d60c6be1d3385d584ce9629abaaaaa46cf0ef - pristine_git_object: 4f9727c36fac5515d0afbc801904abc3652a5b20 + docs/models/completiondetailedjoboutrepository.md: + last_write_checksum: sha1:b1910efc6cd1e50391cd33daef004441bac3d3cd docs/models/completiondetailedjoboutstatus.md: id: c606d38452e2 last_write_checksum: sha1:1e9a5736de32a44cf539f7eaf8214aad72ec4994 @@ -526,7 +474,7 @@ trackedFiles: pristine_git_object: 7a66e8fee2bb0f1c58166177653893bb05b98f1d docs/models/completionftmodelout.md: id: 93fed66a5794 - last_write_checksum: sha1:c66aecd2e10f79c84c057eeae1986e975cb40220 + last_write_checksum: sha1:17c4ed9718d6556ddb103cff5a5823c3baa18f41 pristine_git_object: cd0858258521ced3990ff393fd00c11ef0abe094 docs/models/completionftmodeloutobject.md: id: c6e5667c5f03 @@ -534,12 +482,18 @@ trackedFiles: pristine_git_object: 6f9d858caa563f4a25ae752dd40ba632ecd0af75 docs/models/completionjobout.md: id: 77315b024171 - last_write_checksum: sha1:bae2f49bb9064e24f886487e44ce1688993fa949 + last_write_checksum: sha1:1070ddeaef67a65f27a365a57d343a83b4b40aca pristine_git_object: cb471746c4f23d2ec8451f4c45bf57e2f001072f + docs/models/completionjoboutintegration.md: + last_write_checksum: sha1:59711a3fa46d6a4bff787a61c81ecc34bdaaec2e docs/models/completionjoboutobject.md: id: 922a1e3a4e33 last_write_checksum: sha1:020211def2c4cd969398cf009b187ca19bd7a943 pristine_git_object: 712b107d79a8c60c4330da4f3af307545bf1a7ec + docs/models/completionjoboutrepository.md: + last_write_checksum: sha1:2cb5b23640eeaf87f45dc9f180247ed7a6307df7 + docs/models/completionjoboutstatus.md: + last_write_checksum: sha1:b8f33134c63b12dc474e7714b1ac19d768a3cbbd docs/models/completionresponsestreamchoice.md: id: d56824d615a6 last_write_checksum: sha1:0296a490df009dbfd04893fdebcc88dd6102a872 @@ -556,10 +510,6 @@ trackedFiles: id: 7223a57004ab last_write_checksum: sha1:8f77e5fe2ce149115b0bda372c57fafa931abd90 pristine_git_object: 9fcc714e5f000e6134f7f03f1dd4f56956323385 - docs/models/content.md: - id: bfd859c99f86 - last_write_checksum: sha1:6673dbd19871a701955a322348a4f7e51c38ffc8 - pristine_git_object: a833dc2c6043e36b85131c9243b4cc02b9fcc4c6 docs/models/contentchunk.md: id: d2d3a32080cd last_write_checksum: sha1:b253e4b802adb5b66d896bfc6245ac4d21a0c67c @@ -590,7 +540,7 @@ trackedFiles: pristine_git_object: 5452d7d5ce2aa59a6d89c7b7363290e91ed8a0a3 docs/models/conversationhistory.md: id: 7e97e8e6d6e9 - last_write_checksum: sha1:cc6b40d6e6ff923555e959be5ef50a00c73154a7 + last_write_checksum: sha1:06df76a87aca7c5acd5a28ca3306be09a8bb541b pristine_git_object: ebb1d5136cebf2bc9b77047fe83feecc68532d03 docs/models/conversationhistoryobject.md: id: 088f7df6b658 @@ -610,11 +560,17 @@ trackedFiles: pristine_git_object: db3a441bde0d086bccda4814ddfbf737539681a6 docs/models/conversationrequest.md: id: dd7f4d6807f2 - last_write_checksum: sha1:33dec32dbf20979ac04763e99a82e90ee474fef4 + last_write_checksum: sha1:e4da423f9eb7a8a5d0c21948b50e8df08a63552c pristine_git_object: 2b4ff8ef3398561d9b3e192a51ec22f64880389c + docs/models/conversationrequestagentversion.md: + last_write_checksum: sha1:fd2e9cd7ed2499b5843c592505ec5e0596a50b33 + docs/models/conversationrequesthandoffexecution.md: + last_write_checksum: sha1:f7df210a46acf24abb1312123aebe9e595a190e8 + docs/models/conversationrequesttool.md: + last_write_checksum: sha1:69d503d73f5bd044882d13cd0c7de188dd5f4831 docs/models/conversationresponse.md: id: 2eccf42d48af - last_write_checksum: sha1:69059d02d5354897d23c9d9654d38a85c7e0afc6 + last_write_checksum: sha1:17ebabdf1dd191eeac442046511c44120dfa97a1 pristine_git_object: 38cdadd0055d457fa371984eabcba7782e130839 docs/models/conversationresponseobject.md: id: 6c028b455297 @@ -646,7 +602,7 @@ trackedFiles: pristine_git_object: 97266b43444f5ed50eeedf574abd99cb201199fd docs/models/conversationstreamrequest.md: id: 833f266c4f96 - last_write_checksum: sha1:8d7400dcdb9525c2e45bdaa495df6ca7dcf7f992 + last_write_checksum: sha1:5cb58852d393eb6cc504b45d8b238fc2f3eecd2a pristine_git_object: 299346f8aaa8ccddcbf7fd083389b74346ef2d4f docs/models/conversationstreamrequestagentversion.md: id: e99ccc842929 @@ -656,18 +612,12 @@ trackedFiles: id: e6701e5f9f0c last_write_checksum: sha1:ef2ebe8f23f27144e7403f0a522326a7e4f25f50 pristine_git_object: c98e194c1d204c3a5d4234f0553712a7025d7f85 - docs/models/conversationstreamrequesttools.md: - id: 83ea0526da4e - last_write_checksum: sha1:c445fc14cbb882871a83990943569bdf09a662f9 - pristine_git_object: 700c844876754e85428898f6cabda8fb0dedf114 + docs/models/conversationstreamrequesttool.md: + last_write_checksum: sha1:f2882742a74dd2b4f74383efa444c7ab968249dc docs/models/conversationusageinfo.md: id: 57ef89d3ab83 last_write_checksum: sha1:d92408ad37d7261b0f83588e6216871074a50225 pristine_git_object: 57e260335959c605a0b9b4eaa8bf1f8272f73ae0 - docs/models/data.md: - id: 9a31987caf78 - last_write_checksum: sha1:da040f995f799c04214eff92982dd8d6c057ae93 - pristine_git_object: 95dc8d28aa4669513ae0f255c81aadaf3d793370 docs/models/deletefileout.md: id: c7b84242a45c last_write_checksum: sha1:f2b039ab88fc83ec5dd765cab8e2ed8cce7e417d @@ -682,20 +632,18 @@ trackedFiles: pristine_git_object: d9bc15fe393388f7d0c41abce97ead17e35e2ba4 docs/models/deltamessage.md: id: 6c5ed6b60968 - last_write_checksum: sha1:c213149256c620715d744c89685d5b6cbdea6f58 + last_write_checksum: sha1:00052476b9b2474dbc149f18dd18c71c86d0fc74 pristine_git_object: 61deabbf7e37388fdd4c1789089d120cc0b937b9 + docs/models/deltamessagecontent.md: + last_write_checksum: sha1:a1211b8cb576ad1358e68983680ee326c3920a5e docs/models/document.md: id: cd1d2a444370 last_write_checksum: sha1:d00a2ac808a0ae83a7b97da87e647ecc8dca9c52 pristine_git_object: 509d43b733d68d462853d9eb52fc913c855dff40 docs/models/documentlibrarytool.md: id: 68083b0ef8f3 - last_write_checksum: sha1:5f21be0a248ff4dedc26908b9ee0039d7ac1421c + last_write_checksum: sha1:470b969fa4983c0e7ad3d513b4b7a4fa8d5f0f41 pristine_git_object: 82315f32b920d32741b2e53bc10e411f74a85602 - docs/models/documentlibrarytooltype.md: - id: 23c5ba5c4b3f - last_write_checksum: sha1:bcb58941aafaca2b8ad6e71425d5f16e881b4f97 - pristine_git_object: ebd420f69a4ace05daa7edd82b9315b2a4354b5f docs/models/documentout.md: id: a69fd1f47711 last_write_checksum: sha1:ed446078e7194a0e44e21ab1af958d6a83597edb @@ -708,6 +656,8 @@ trackedFiles: id: 185ab27259a7 last_write_checksum: sha1:e0faccd04229204968dbc4e8131ee72f81288182 pristine_git_object: 0993886d56868aba6844824f0e0fdf1bdb9d74f6 + docs/models/documentupload.md: + last_write_checksum: sha1:aea0f81009be09b153019abbc01b2918a1ecc1f9 docs/models/documenturlchunk.md: id: 48437d297408 last_write_checksum: sha1:38c3e2ad5353a4632bd827f00419c5d8eb2def54 @@ -744,10 +694,8 @@ trackedFiles: id: 130a2f7038b0 last_write_checksum: sha1:01c3c10e737bcd58be70b437f7ee74632972a983 pristine_git_object: 7c040b382d4c1b6bc63f582566d938be75a5f954 - docs/models/entries.md: - id: 93dc7a28346c - last_write_checksum: sha1:c6c61c922df17562e9ca5d8d2d325579db5c88bc - pristine_git_object: 8e5a20d052c47008b8a399b7fb740bece3b35386 + docs/models/entry.md: + last_write_checksum: sha1:4971db390327db09f88feff5d2b8a0b1e6c5b933 docs/models/eventout.md: id: 9960732c3718 last_write_checksum: sha1:dbc23814b2e54ded4aa014d63510b3a2a3259329 @@ -784,10 +732,6 @@ trackedFiles: id: 2783bfd9c4b9 last_write_checksum: sha1:a1249ef0aedb3056e613078488832c96b91f8cab pristine_git_object: 961bae1f51a4ae9df21b28fd7a5ca91dc7b3888b - docs/models/filesapiroutesuploadfilemultipartbodyparams.md: - id: 558bf53c7b65 - last_write_checksum: sha1:de3f26e8bd89aae0e2c2078b9e1f7f47adccafbd - pristine_git_object: a5dd1174ab987e511d70a0f8fdaefbeaeda18c43 docs/models/fileschema.md: id: 9a05a660399d last_write_checksum: sha1:97987d64285ff3092635754c78ad7b68d863e197 @@ -820,10 +764,6 @@ trackedFiles: id: e16926b57814 last_write_checksum: sha1:52006811b756ff5af865ed6f74838d0903f0ee52 pristine_git_object: 34b24bd4db1ad3f9e77e2c6a45a41d2fbc5cf7fd - docs/models/finishreason.md: - id: 73315c2a39b3 - last_write_checksum: sha1:dc258e82af5babd6efabadb20cd6e2f9663dbb64 - pristine_git_object: 2af53f6e55b74455a696c17ab00ba626a1c3711f docs/models/format_.md: id: a17c22228eda last_write_checksum: sha1:dad6de59fec6378d50356007602e2a0254d8d2e4 @@ -838,12 +778,8 @@ trackedFiles: pristine_git_object: 19690476c64ac7be53f974347c1618730f0013ce docs/models/ftmodelcard.md: id: 15ed6f94deea - last_write_checksum: sha1:2dccc70020274152bb8a76f0f7699694f8683652 + last_write_checksum: sha1:1c560ceaaacc1d109b2997c36de03192dfcda941 pristine_git_object: 35032775db8ae6f4c6fbac309edacd27ee7868af - docs/models/ftmodelcardtype.md: - id: e2ba85c02d1c - last_write_checksum: sha1:f6a718013be6a8cb340f58f1ff7b919217594622 - pristine_git_object: 0b38470b9222df6c51baef2e7e9e10c0156a2e05 docs/models/function.md: id: 416a80fba031 last_write_checksum: sha1:a9485076d430a7753558461ce87bf42d09e34511 @@ -870,12 +806,8 @@ trackedFiles: pristine_git_object: 7ea34c5206bdf205d74d8d49c87ddee5607582e9 docs/models/functioncallevent.md: id: cc9f2e603464 - last_write_checksum: sha1:c3a6a7ce8af38d7ba7a2ece48c352eed95edc578 + last_write_checksum: sha1:942d1bed0778ba4738993fcdbefe080934b641d5 pristine_git_object: c25679a5d89745c1e186cdeb72fda490b2f45af2 - docs/models/functioncalleventtype.md: - id: 1aab7a86c5d6 - last_write_checksum: sha1:61d480f424df9a74a615be673cae4dcaf7875d81 - pristine_git_object: 8cf3f03866d72ac710015eec57d6b9caa079022e docs/models/functionname.md: id: 4b3bd62c0f26 last_write_checksum: sha1:754fe32bdffe53c1057b302702f5516f4e551cfb @@ -894,32 +826,16 @@ trackedFiles: pristine_git_object: 35c94d8e553e1cb641bef28fec2d8b3576d142f6 docs/models/functiontool.md: id: 5fb499088cdf - last_write_checksum: sha1:f616c6de97a6e0d622b16b99f95c2c5a94661789 + last_write_checksum: sha1:a9a3b6530b1c48a8575402b48cde7b65efb33a7d pristine_git_object: 8c42459304100777cf85416a5c3a984bc0e7a7ca - docs/models/functiontooltype.md: - id: bc0bcbe69ad9 - last_write_checksum: sha1:c0fae17a8e5a9b7240ff16af7eef9fb4782fe983 - pristine_git_object: 9c095625b60f1e2e0fd09b08e3ba315545d6a036 docs/models/githubrepositoryin.md: id: b42209ef8423 - last_write_checksum: sha1:fece86cdee3ba3a5719244a953193ed2f7b982f7 + last_write_checksum: sha1:5ab33fc1b0b5513086b1cae07f416d502441db23 pristine_git_object: 1584152ba934756793d5228d5691c07d3256c7b8 - docs/models/githubrepositoryintype.md: - id: e2f2ca622221 - last_write_checksum: sha1:349dc9c6e4db5ec5394c8649c3b872db3545c182 - pristine_git_object: 63da967cb7a75ec328f9b9fbd1062e43f2cabc07 docs/models/githubrepositoryout.md: id: 0ca86e122722 - last_write_checksum: sha1:f6ffda992af75d3f95751106db1b0f0c82a2eca7 + last_write_checksum: sha1:0e3999cef8a745ae24ac36907b3431bc5103ea6f pristine_git_object: 03f0b2661e46b48489ede1208d9c38c4324b2b35 - docs/models/githubrepositoryouttype.md: - id: f3ab58fa1b0e - last_write_checksum: sha1:8f26cd692f499279b9c4182010d56c75374ed9ec - pristine_git_object: 46c3eefd1d67ea6968a3c7025e6dc27e8f0f1ac5 - docs/models/handoffexecution.md: - id: d0b2e094fa39 - last_write_checksum: sha1:1d8fafc8105b6c15e50620353c0457b629951804 - pristine_git_object: 61e7dade49090096a49d99b5c8291f629fd43c4e docs/models/httpvalidationerror.md: id: a211c095f2ac last_write_checksum: sha1:277a46811144643262651853dc6176d21b33573e @@ -930,28 +846,22 @@ trackedFiles: pristine_git_object: 46a6dd6baa1b1574bad5eadc1e83d4b72d56c0c8 docs/models/imagegenerationtool.md: id: d5deb6b06d28 - last_write_checksum: sha1:8596d0119712e68b1deafd18860ed6ed452a31fa + last_write_checksum: sha1:b3decee8fe7a824401f9afbd3544a69ccde4ef8e pristine_git_object: b8fc9cf40c8cb010231837ffe3d66cb3762dd666 - docs/models/imagegenerationtooltype.md: - id: fc670aabaff7 - last_write_checksum: sha1:234109f99f467905e6e7b74036e2c395090840e4 - pristine_git_object: 29681b58e1afe945faa76f9dd424deb01cdfb1bd docs/models/imageurl.md: id: e75dd23cec1d last_write_checksum: sha1:30131c77dd240c3bae48d9693698358e5cc0ae63 pristine_git_object: 7c2bcbc36e99c3cf467d213d6a6a59d6300433d8 docs/models/imageurlchunk.md: id: 4407097bfff3 - last_write_checksum: sha1:7a478fd638234ece78770c7fc5e8d0adaf1c3727 + last_write_checksum: sha1:73e14a0beccfc9465ee6d2990462e609903f5cd5 pristine_git_object: f1b926ef8e82443aa1446b1c64c2f02e33d7c789 - docs/models/imageurlchunkimageurl.md: - id: c7fae88454ce - last_write_checksum: sha1:5eff71b7a8be7baacb9ba8ca0be0a0f7a391a325 - pristine_git_object: 767389082d25f06e617fec2ef0134dd9fb2d4064 docs/models/imageurlchunktype.md: id: b9af2db9ff60 last_write_checksum: sha1:990546f94648a09faf9d3ae55d7f6ee66de13e85 pristine_git_object: 2064a0b405870313bd4b802a3b1988418ce8439e + docs/models/imageurlunion.md: + last_write_checksum: sha1:4e32bcd7d44746d2ddbfafbef96152bb2bdb2a15 docs/models/inputentries.md: id: a5c647d5ad90 last_write_checksum: sha1:4231bb97837bdcff4515ae1b00ff5e7712256e53 @@ -960,38 +870,26 @@ trackedFiles: id: 4b0a7fb87af8 last_write_checksum: sha1:19d8da9624030a47a3285276c5893a0fc7609435 pristine_git_object: 0f62a7ce8e965d0879507e98f808b9eb254282a6 + docs/models/inputsmessage.md: + last_write_checksum: sha1:92a95c1757e33603d1aa9ed6c9912d1c551d9974 docs/models/instructrequest.md: id: a0034d7349a2 - last_write_checksum: sha1:91c446be8428efd44163ed8366a37c376554211a + last_write_checksum: sha1:34a81411110cbb7a099c45e482f5d1702ae48fd3 pristine_git_object: 9500cb588b5d27d934b04cc5fa0be26a270f6d82 docs/models/instructrequestinputs.md: id: 2a677880e32a - last_write_checksum: sha1:1b989ef7ef4c84f59c83af11b3243d934c85e348 + last_write_checksum: sha1:64bcc6371d70446da60f167682504568d7f2618c pristine_git_object: 4caa028f85be2324966e61321c917cbd0c65de01 - docs/models/instructrequestinputsmessages.md: - id: c0cb1f866e69 - last_write_checksum: sha1:558f78fafbd44c5ea7030491a39d0c7ccd994d01 - pristine_git_object: 237e131f1b1161c8b90df11d49739f5bfe9ee829 - docs/models/instructrequestmessages.md: - id: 639538e7d70d - last_write_checksum: sha1:8c26b3b97f095e5c525b0e3c18d45aded9bd03a2 - pristine_git_object: 9c866a7db86b40e997cb3f06d68e67eb033f3360 - docs/models/integrations.md: - id: f9eb2b4df2f8 - last_write_checksum: sha1:e0b12cf5661d4e6332da28913c5394e5a85071bf - pristine_git_object: 35214d63ef2b902aa39bfdd2fd6dc5f319cc203b + docs/models/instructrequestmessage.md: + last_write_checksum: sha1:551b5d6dd3ba0b39cad32478213a9eb7549f0023 docs/models/jobin.md: id: 1b7b37214fa8 - last_write_checksum: sha1:6dadb7d78e2dc04966bd041ddb54428108098f76 + last_write_checksum: sha1:16436f5d3222b89d604cf326bde749d9e6f9da39 pristine_git_object: b96517705cea7b9efd266f146080ad1aed3cc8cb - docs/models/jobinintegrations.md: - id: 5f293420eced - last_write_checksum: sha1:288931c5427e1a435b1396e131e95a43cbcbc2b9 - pristine_git_object: 91c102426d05b4f88ca5a661f53f1acf316b5b88 - docs/models/jobinrepositories.md: - id: 5c94c2d28ce8 - last_write_checksum: sha1:e7fbe667fa5703dedd78672d936f1b02caf301b5 - pristine_git_object: b94477af4c51c7939fd6dcdb75cbc56459d4a30a + docs/models/jobinintegration.md: + last_write_checksum: sha1:c9887897357e01e6e228b48d6bf0c3fb4edd29f7 + docs/models/jobinrepository.md: + last_write_checksum: sha1:1773f59546b94688d0be16d3f5f014cd86f5b1d7 docs/models/jobmetadataout.md: id: 30eb634fe247 last_write_checksum: sha1:46d54b6f6004a6e571afd5207db5170dfbce7081 @@ -1022,7 +920,7 @@ trackedFiles: pristine_git_object: 1b331662b17cd24c22e88b01bf00d042cb658516 docs/models/jobsapiroutesfinetuningcreatefinetuningjobresponse.md: id: 8aa8030f26d7 - last_write_checksum: sha1:ebc6ac03e99d69fed1bae6cb4e858e0aecf2dd88 + last_write_checksum: sha1:619bb7677fa549f5089fde98f3a00ab1d939f80d pristine_git_object: eeddc3cdfdd975cdb69fbfcd306e9445010eb82f docs/models/jobsapiroutesfinetuninggetfinetuningjobrequest.md: id: a9b75762e534 @@ -1034,8 +932,10 @@ trackedFiles: pristine_git_object: e0d2e3610ce460d834c2d07d9a34b09f8257217b docs/models/jobsapiroutesfinetuninggetfinetuningjobsrequest.md: id: 52078f097503 - last_write_checksum: sha1:5d8fe21d292264209508ae484a7e88d33bff373f + last_write_checksum: sha1:fc134fdc7e229b8df373b77096c8299c214171a7 pristine_git_object: 3dca3cd85245e0956b557fc5d6ae6c5e265df38d + docs/models/jobsapiroutesfinetuninggetfinetuningjobsstatus.md: + last_write_checksum: sha1:bbc08ca53c2da180b96ed0347cf4954410c79311 docs/models/jobsapiroutesfinetuningstartfinetuningjobrequest.md: id: b4e2b814d8c3 last_write_checksum: sha1:f13b5c8f2e74cc73b58a30d366032c764603f95e @@ -1068,10 +968,6 @@ trackedFiles: id: 1c99619e2435 last_write_checksum: sha1:cffbcfb8673e12feb8e22fd397bf68c8745c76bb pristine_git_object: f6c8a2c3079003a885ee9bdfc73cf7c7c7d8eded - docs/models/jobtype.md: - id: 86685dbc7863 - last_write_checksum: sha1:da927d34a69b0b2569314cc7a62733ee1ab85186 - pristine_git_object: 847c662259537ed54cc108e8de8d8eb93defbe58 docs/models/jsonschema.md: id: a6b15ed6fac8 last_write_checksum: sha1:523465666ad3c292252b3fe60f345c7ffb29053f @@ -1124,13 +1020,9 @@ trackedFiles: id: b9147b1c0e38 last_write_checksum: sha1:45b2cc114886b300e3b996a8b71241ac5c7260a3 pristine_git_object: 2f18b014af4577a0ae862dfeea599d5f700005cb - docs/models/librariesdocumentsuploadv1documentupload.md: - id: c76458963b1c - last_write_checksum: sha1:6973cb619a8e50bb12e96cffdc6b57fcf7add000 - pristine_git_object: a0ba95da33a248fd639ca1af5f443fd043dae0ea docs/models/librariesdocumentsuploadv1request.md: id: 89a89d889c72 - last_write_checksum: sha1:4f67f0bc5b2accb6dcf31ce7be0e9447ab4da811 + last_write_checksum: sha1:32294a87d8a0b173b4d6f12b607a1bb3da765776 pristine_git_object: 7c91ca9b92839be8ab1efb4428cc8d7a78d57e1e docs/models/librariesgetv1request.md: id: f47ad71ec7ca @@ -1184,10 +1076,6 @@ trackedFiles: id: b071d5a509cc last_write_checksum: sha1:09a04749333ab50ae806c3ac6adcaa90d54df0f1 pristine_git_object: d6094ac2c6e0326c039dad2f6b89158694ef6aa7 - docs/models/message.md: - id: a9614076792b - last_write_checksum: sha1:9199637b21212e630336d0d513c6b799732dee54 - pristine_git_object: 752f04a8b5ec3bedb0b5c3e4fbf3e5c3fccc07cd docs/models/messageentries.md: id: 9af3a27b862b last_write_checksum: sha1:a3eb6e37b780644313738f84e6c5ac653b4686bc @@ -1198,12 +1086,14 @@ trackedFiles: pristine_git_object: 4fd18a0dcb4f6af4a9c3956116f8958dc2fa78d1 docs/models/messageinputentry.md: id: eb74af2b9341 - last_write_checksum: sha1:a65737ba7d9592ff91b42689c5c98fca8060d868 + last_write_checksum: sha1:07124339ecb87e31df5f0e2f887e23209dd269af pristine_git_object: d55eb8769c3963518fcbc910d2e1398b6f46fd87 docs/models/messageinputentrycontent.md: id: 7e12c6be6913 last_write_checksum: sha1:6be8be0ebea2b93712ff6273c776ed3c6bc40f9a pristine_git_object: 65e55d97606cf6f3119b7b297074587e88d3d01e + docs/models/messageinputentryobject.md: + last_write_checksum: sha1:7746753005fda37834a73e62bf459eacb740ba5b docs/models/messageinputentryrole.md: id: 2497d07a793d last_write_checksum: sha1:a41eb58f853f25489d8c00f7a9595f443dcca2e6 @@ -1238,7 +1128,7 @@ trackedFiles: pristine_git_object: cb4a7a1b15d44a465dbfbd7fe319b8dbc0b62406 docs/models/messageoutputevent.md: id: b690693fa806 - last_write_checksum: sha1:8a87ff6b624d133bcea36729fb1b1a1a88b3eaf0 + last_write_checksum: sha1:d6538a4b5d5721c09bc196f3e9523ed45dafbea7 pristine_git_object: 92c1c61587e34f6e143263e35c33acc9332870d6 docs/models/messageoutputeventcontent.md: id: cecea075d823 @@ -1248,14 +1138,6 @@ trackedFiles: id: 87d07815e9be last_write_checksum: sha1:a6db79edc1bf2d7d0f4762653c8d7860cb86e300 pristine_git_object: e38c6472e577e0f1686e22dc61d589fdb2928434 - docs/models/messageoutputeventtype.md: - id: 13c082072934 - last_write_checksum: sha1:03c07b7a6046e138b9b7c02084727785f05a5a67 - pristine_git_object: 1f43fdcce5a8cfe4d781b4a6faa4a265975ae817 - docs/models/messages.md: - id: 2103cd675c2f - last_write_checksum: sha1:f6940c9c67b98c49ae2bc2764f6c14178321f244 - pristine_git_object: 1d394500e8ffdd140457575568fc2ce465a1cc3a docs/models/metricout.md: id: 7c6ff0ad95f9 last_write_checksum: sha1:eef34dc522a351e23d7371c00a07662a0711ea73 @@ -1270,24 +1152,20 @@ trackedFiles: pristine_git_object: c7dd2710011451c2db15f53ebc659770e786c4ca docs/models/modelconversation.md: id: 497521ee9bd6 - last_write_checksum: sha1:bd11f51f1b6fedbf8a1e1973889d1961086c164f + last_write_checksum: sha1:440c9e7c306f20bd4f4b27ab0cf770d3bf8762e2 pristine_git_object: 1a03ef7d1dd9e1d6b51f0f9391c46feb5cd822a8 docs/models/modelconversationobject.md: id: 4c5699d157a9 last_write_checksum: sha1:8e2e82e1fa4cb97f8c7a8a129b3cc9cd651e4055 pristine_git_object: ead1fa26f5d9641a198a14b43a0f5689456e5821 - docs/models/modelconversationtools.md: - id: b3463ae729a7 - last_write_checksum: sha1:eb78650e337ab5354a0cdfbfcf975ed02495230b - pristine_git_object: 5cc97437c34263ad650c84c8702e158ee74ecfb1 + docs/models/modelconversationtool.md: + last_write_checksum: sha1:9b33f73330e5ae31de877a904954efe342e99c4f docs/models/modellist.md: id: ce07fd9ce413 - last_write_checksum: sha1:4f2956eeba39cc14f2289f24990e85b3588c132a + last_write_checksum: sha1:b4c22b5eff4478ffa5717bd5af92ca79f4a90b01 pristine_git_object: 760882c6c5b442b09bbc91f910f960138d6a00c8 - docs/models/modeltype.md: - id: 9f69805691d1 - last_write_checksum: sha1:f3a8bce458460e55124ce5dd6814e7cada8e0e89 - pristine_git_object: a31c3ca0aa78cae9619b313f1cda95b9c391ee12 + docs/models/modellistdata.md: + last_write_checksum: sha1:7394ba5645f990163c4d777ebbfc71f24c5d3a74 docs/models/moderationobject.md: id: 4e84364835f5 last_write_checksum: sha1:2831033dcc3d93d32b8813498f6eb3082e2d3c4e @@ -1296,14 +1174,8 @@ trackedFiles: id: e15cf12e553b last_write_checksum: sha1:18e8f4b4b97cb444824fcdce8f518c4e5a27c372 pristine_git_object: 75a5eec74071fdd0d330c9f3e10dac0873077f20 - docs/models/name.md: - id: 6ee802922293 - last_write_checksum: sha1:91a266ed489c046a4ec511d4c03eb6e413c2ff02 - pristine_git_object: 18b978a8cc2c38d65c37e7dd110315cedb221620 - docs/models/object.md: - id: 7ffe67d0b83f - last_write_checksum: sha1:dfb590560db658dc5062e7cedc1f3f29c0d012a0 - pristine_git_object: 0122c0db4541d95d57d2edb3f18b9e1921dc3099 + docs/models/multipartbodyparams.md: + last_write_checksum: sha1:34e68e3795c7987138abd152177fa07198d2f6f6 docs/models/ocrimageobject.md: id: b72f3c5853b2 last_write_checksum: sha1:90c5158dec6a7b31c858677b6a8efa1e3cabd504 @@ -1332,18 +1204,12 @@ trackedFiles: id: 419abbb8353a last_write_checksum: sha1:6e717a3f3de3c464e8b3237f06867cdfecec339e pristine_git_object: d9d79125cb02bc2b09d8dc543a5e2d4a6c55571c - docs/models/one.md: - id: 69a5df93c480 - last_write_checksum: sha1:cb6d46c2939a0e2314e29ff0307a2b0632caca65 - pristine_git_object: 3de496a6201d47ea52fc15bfe16a44bd6d3be900 + docs/models/output.md: + last_write_checksum: sha1:600058f0b0f589d8688e9589762c45a0dd18cc9b docs/models/outputcontentchunks.md: id: f7e175c8e002 last_write_checksum: sha1:5094466110028801726cc825e8809f524fe1ee24 pristine_git_object: c76bc31d4d8791b7bef4dc6cbff6671b38a7927d - docs/models/outputs.md: - id: 58b672ddb5b3 - last_write_checksum: sha1:7553d62771ac5a85f8f330978b400cdd420cf865 - pristine_git_object: 7756c6276cc141b69d8099e0bbcbd2bccc1b5112 docs/models/paginationinfo.md: id: 3d2b61cbbf88 last_write_checksum: sha1:1da38e172024fe703f3180ea3c6ec91fe3c51ed0 @@ -1356,22 +1222,16 @@ trackedFiles: id: 83c8c59c1802 last_write_checksum: sha1:046375bb3035cc033d4484099cd7f5a4f53ce88c pristine_git_object: 7b67583f4209778ac6f945631c0ee03ba1f4c663 - docs/models/queryparamagentversion.md: - id: 49d942f63049 - last_write_checksum: sha1:42557c6bf0afc1eabde48c4b6122f801608d8f05 - pristine_git_object: 3eb5ef1840299139bf969379cbfc3ed49127f176 - docs/models/queryparamstatus.md: - id: 15628120923d - last_write_checksum: sha1:36f1c9b6a6af6f27fbf0190417abf95b4a0bc1b9 - pristine_git_object: dcd2090861b16f72b0fb321714b4143bc14b7566 docs/models/realtimetranscriptionerror.md: id: 4bc5e819565b last_write_checksum: sha1:c93e4b19a0aa68723ea69973a9f22a581c7b2ff6 pristine_git_object: e01f2126b3084eade47a26ea092556f7f61142c9 docs/models/realtimetranscriptionerrordetail.md: id: ea137b1051f1 - last_write_checksum: sha1:43ae02b32b473d8ba1aaa3b336a40f706d6338d0 + last_write_checksum: sha1:7e1d18760939d6087cda5fba54553141f8a78d1e pristine_git_object: 96420ada2ac94fca24a36ddacae9c876e14ccb7a + docs/models/realtimetranscriptionerrordetailmessage.md: + last_write_checksum: sha1:f8c3a4984d647d64e8ea4e1e42654265ffe46b0f docs/models/realtimetranscriptionsession.md: id: aeb0a0f87d6f last_write_checksum: sha1:c3aa4050d9cc1b73df8496760f1c723d16183f3a @@ -1392,38 +1252,20 @@ trackedFiles: id: 0944b80ea9c8 last_write_checksum: sha1:956b270766c7f11fe99f4a9b484cc29c159e7471 pristine_git_object: 1e0e2fe64883ef5f3e628777b261b1224661d257 - docs/models/repositories.md: - id: 0531efe9bced - last_write_checksum: sha1:249bdb315eb1f0bd54601e5b8a45e58cb1ec7638 - pristine_git_object: 02274e3d58d55f4a18dfdf578fa53d2459e1345e docs/models/requestsource.md: id: 8857ab6025c4 last_write_checksum: sha1:4b7ecc7c5327c74e46e2b98bd6e3814935cdecdf pristine_git_object: c81c115992439350d56c91d2e3351a13df40676b - docs/models/response1.md: - id: 245c499462a9 - last_write_checksum: sha1:6d64b50b59875744eb3c1038d7cdcba9397fdbae - pristine_git_object: 2e73fdbb204c14cadc028d0891ede0ca4d4178d7 - docs/models/responsebody.md: - id: a2c4400c632e - last_write_checksum: sha1:a1705a40914ac8f96000953bd53ca01f66643fcd - pristine_git_object: 8a218517178eed859683f87f143c5397f96d10d9 + docs/models/response.md: + last_write_checksum: sha1:f4a3ec06ff53cd1cbdf892ff7152d39fa1746821 docs/models/responsedoneevent.md: id: 38c38c3c065b - last_write_checksum: sha1:9910c6c35ad7cb8e5ae0edabcdba8a8a498b3138 + last_write_checksum: sha1:4ac3a0fd91d5ebaccce7f4098ae416b56e08416f pristine_git_object: ec25bd6d364b0b4959b11a6d1595bdb57cba6564 - docs/models/responsedoneeventtype.md: - id: 03a896b6f98a - last_write_checksum: sha1:09ccbc7ed0143a884481a5943221be2e4a16c123 - pristine_git_object: 58f7f44d74553f649bf1b54385926a5b5d6033f5 docs/models/responseerrorevent.md: id: 3e868aa9958d - last_write_checksum: sha1:9ed1d04b3ed1f468f4dc9218890aa24e0c84fc03 + last_write_checksum: sha1:4711077bf182e4f3406dd12357da49d37d172b4c pristine_git_object: 2ea6a2e0ec412ae484f60fa1d09d02e776499bb9 - docs/models/responseerroreventtype.md: - id: 5595b8eec59e - last_write_checksum: sha1:442185b0615ec81923f4c97478e758b451c52439 - pristine_git_object: 3b3fc303fc7f75c609b18a785f59517b222b6881 docs/models/responseformat.md: id: 50a1e4140614 last_write_checksum: sha1:e877b2e81470ef5eec5675dfb91a47e74d5d3add @@ -1432,14 +1274,14 @@ trackedFiles: id: cf1f250b82db last_write_checksum: sha1:105e1f9181913104b554051838cbdd0f728aa2c4 pristine_git_object: 2f5f1e5511b048323fee18a0ffdd506fe2b3d56f + docs/models/responseretrievemodelv1modelsmodelidget.md: + last_write_checksum: sha1:6bae62cbb18559065a53f0acdacb1f72f513467e docs/models/responsestartedevent.md: id: 88e3b9f0aa8d - last_write_checksum: sha1:fa9db583e8223d2d8284866f7e6cf6d775751478 + last_write_checksum: sha1:156f38bbe8278f9c03117135938e7cbdae3038b9 pristine_git_object: 481bd5bba67a524dbadf9f1570a28ae20ec9f642 - docs/models/responsestartedeventtype.md: - id: 1d27fafe0f03 - last_write_checksum: sha1:c30ca125ec76af9a2191ebc125f5f8b9558b0ecb - pristine_git_object: 2d9273bd02bf371378575619443ec948beec8d66 + docs/models/responsev1conversationsget.md: + last_write_checksum: sha1:8e75db359f0d640a27498d20c2ea6d561c318d7e docs/models/retrievefileout.md: id: 8e82ae08d9b5 last_write_checksum: sha1:600d5ea4f75dab07fb1139112962affcf633a6c9 @@ -1448,14 +1290,6 @@ trackedFiles: id: ac567924689c last_write_checksum: sha1:7534c5ec5f1ae1e750c8f610f81f2106587e81a9 pristine_git_object: f1280f8862e9d3212a5cfccd9453884b4055710a - docs/models/retrievemodelv1modelsmodelidgetresponseretrievemodelv1modelsmodelidget.md: - id: c2a914584353 - last_write_checksum: sha1:bdd52e2c434fc6fd10e341d41de9dda1a28ddb4f - pristine_git_object: 3ac96521a8f58f1ed4caedbb4ab7fe3fe2b238c5 - docs/models/role.md: - id: b694540a5b1e - last_write_checksum: sha1:260a50c56a8bd03cc535edf98ebec06437f87f8d - pristine_git_object: affca78d5574cc42d8e6169f21968e5a8765e053 docs/models/sampletype.md: id: 0e09775cd9d3 last_write_checksum: sha1:33cef5c5b097ab7a9cd6232fe3f7bca65cd1187a @@ -1488,17 +1322,9 @@ trackedFiles: id: 6a902241137c last_write_checksum: sha1:567027284c7572c0fa24132cd119e956386ff9d0 pristine_git_object: ae06b5e870d31b10f17224c99af1628a7252bbc3 - docs/models/status.md: - id: 959cd204aadf - last_write_checksum: sha1:618f30fd5ba191bb918c953864bfac4a63192a40 - pristine_git_object: 5e22eb736c734121b4b057812cacb43b3e299b52 - docs/models/stop.md: - id: f231cc9f5041 - last_write_checksum: sha1:86903cac5f57ad9b8ac07ecba6c454d40a53bdc8 - pristine_git_object: ba40ca83136d6d6cb4f1ef9e5ca3104a704e4846 docs/models/systemmessage.md: id: fdb7963e1cdf - last_write_checksum: sha1:97e726dff19a39b468767d5c01fc6256277ee71f + last_write_checksum: sha1:561c3372391e093c890f477b3213c308ead50b81 pristine_git_object: 0dba71c00f40c85e74b2c1967e077ffff9660f13 docs/models/systemmessagecontent.md: id: 94a56febaeda @@ -1554,32 +1380,26 @@ trackedFiles: pristine_git_object: 0be3d6c54b13a8bf30773398a2c12e0d30d3ae58 docs/models/toolexecutiondeltaevent.md: id: f2fc876ef7c6 - last_write_checksum: sha1:901756826684886179c21f47c063c55700c79ec4 + last_write_checksum: sha1:ae1462a9b5cb56002b41f477ce262cb64ccf2f4e pristine_git_object: 7bee6d831a92085a88c0772300bcad4ce8194edb docs/models/toolexecutiondeltaeventname.md: id: 93fd3a3b669d last_write_checksum: sha1:d5dcdb165c220209ee76d81938f2d9808c77d4fc pristine_git_object: 9c3edef8c0698d7293a71ee56410a0ed67fd1924 - docs/models/toolexecutiondeltaeventtype.md: - id: ae6e8a5bf0ce - last_write_checksum: sha1:dd405269077b6a4756fd086067c9bbe88f430924 - pristine_git_object: a4a2f8cc9927499c990bad0590e84b2a609add8d docs/models/toolexecutiondoneevent.md: id: b604a4ca5876 - last_write_checksum: sha1:267ff0e19884e08abf3818b890579c1a13a3fa98 + last_write_checksum: sha1:6b6975ded0b0495b6c56250d153186c7818b5958 pristine_git_object: 5898ea5eff103b99886789805d9113dfd8b01588 docs/models/toolexecutiondoneeventname.md: id: d19dc0060655 last_write_checksum: sha1:aa5677087e6933699135a53f664f5b86bbae5ac6 pristine_git_object: 6449079d7b467796355e3353f4245046cced17e8 - docs/models/toolexecutiondoneeventtype.md: - id: 7c5a318d924b - last_write_checksum: sha1:55a5041cdf8c7e05fcfd7260a72f7cd3f1b2baf8 - pristine_git_object: 872624c1f274259cdd22100995b5d99bf27eaeac docs/models/toolexecutionentry.md: id: 75a7560ab96e - last_write_checksum: sha1:66086952d92940830a53f5583f1751b09d902fcf + last_write_checksum: sha1:fdaa9abd5417486100ffc7059fcfdc8532935ed3 pristine_git_object: 3678116df64ad398fef00bab39dd35c3fd5ee1f5 + docs/models/toolexecutionentryname.md: + last_write_checksum: sha1:6c528cdfbb3f2f7dc41d11f57c86676f689b8845 docs/models/toolexecutionentryobject.md: id: af106f91001f last_write_checksum: sha1:6df075bee4e84edf9b57fcf62f27b22a4e7700f4 @@ -1590,16 +1410,12 @@ trackedFiles: pristine_git_object: a67629b8bdefe59d188969a2b78fa409ffeedb2a docs/models/toolexecutionstartedevent.md: id: 37657383654d - last_write_checksum: sha1:3051a74c1746c8341d50a22f34bd54f6347ee0c8 + last_write_checksum: sha1:47126a25c2a93583038ff877b85fc9ae1dcef9f3 pristine_git_object: de81312bda08970cded88d1b3df23ebc1481ebf2 docs/models/toolexecutionstartedeventname.md: id: be6b33417678 last_write_checksum: sha1:f8857baa02607b0a0da8d96d130f1cb765e3d364 pristine_git_object: 3308c483bab521f7fa987a62ebd0ad9cec562c3a - docs/models/toolexecutionstartedeventtype.md: - id: 9eff7a0d9ad5 - last_write_checksum: sha1:86fe6aec11baff4090efd11d10e8b31772598349 - pristine_git_object: 56695d1f804c28808cf92715140959b60eb9a9fd docs/models/toolfilechunk.md: id: 67347e2bef90 last_write_checksum: sha1:0a499d354a4758cd8cf06b0035bca105ed29a01b @@ -1614,16 +1430,12 @@ trackedFiles: pristine_git_object: 7e99acefff265f616b576a90a5f0484add92bffb docs/models/toolmessage.md: id: 0553747c37a1 - last_write_checksum: sha1:3ac87031fdd4ba8b0996e95be8e7ef1a7ff41167 + last_write_checksum: sha1:f35fa287b94d2c1a9de46c2c479dadd5dca7144d pristine_git_object: a54f49332c2873471759b477fb4c712fa4fb61f5 docs/models/toolmessagecontent.md: id: f0522d2d3c93 last_write_checksum: sha1:783769c0200baa1b6751327aa3e009fa83da72ee pristine_git_object: 5c76091fbd2c8e0d768921fab19c7b761df73411 - docs/models/toolmessagerole.md: - id: f333d4d1ab56 - last_write_checksum: sha1:7e1c004bad24e928da0c286a9f053516b172d24f - pristine_git_object: c24e59c0c79ea886d266e38c673edd51531b9be6 docs/models/toolreferencechunk.md: id: 10414b39b7b3 last_write_checksum: sha1:2e24f2331bb19de7d68d0e580b099c03f5207199 @@ -1636,10 +1448,6 @@ trackedFiles: id: 42a4cae4fd96 last_write_checksum: sha1:43620d9529a1ccb2fac975fbe2e6fcaa62b5baa5 pristine_git_object: bc57d277a39eef3c112c08ffc31a91f5c075c5a4 - docs/models/tools.md: - id: b78ed2931856 - last_write_checksum: sha1:ea4dcd2eafe87fc271c2f6f22f9b1cedc9f8316e - pristine_git_object: f308d732e3adfcc711590c3e1bee627c94032a6b docs/models/tooltypes.md: id: adb50fe63ea2 last_write_checksum: sha1:f224c3d8732450b9c969b3e04027b7df7892694c @@ -1654,16 +1462,14 @@ trackedFiles: pristine_git_object: 1bc0189c5d1833c946a71c9773346e21b08d2404 docs/models/transcriptionsegmentchunk.md: id: f09db8b2273e - last_write_checksum: sha1:b89ee132a3c63e56806f3f395c98a9e7e5e9c7d0 + last_write_checksum: sha1:5387f2595d14f34b8af6182c34efac4874a98308 pristine_git_object: f620b96a75a0b9c6e015ae1f460dcccb80d113ee + docs/models/transcriptionsegmentchunktype.md: + last_write_checksum: sha1:63d511c2bd93bd477f1b7aae52954b28838316d9 docs/models/transcriptionstreamdone.md: id: 2253923d93cf - last_write_checksum: sha1:043ebcd284007f8c8536f2726ec5f525abffeb6b + last_write_checksum: sha1:2a1910d59be258af8dd733b8911e5a0431fab5a4 pristine_git_object: 9ecf7d9ca32410d92c93c62ead9674e097533ec3 - docs/models/transcriptionstreamdonetype.md: - id: 3f5aec641135 - last_write_checksum: sha1:b86f7b20dff031e7dbe02b4805058a025c39dcac - pristine_git_object: db092c4fa47d7401919a02c199198e4ae99a5de1 docs/models/transcriptionstreamevents.md: id: d0f4eedfa2b6 last_write_checksum: sha1:ec6b992049bd0337d57baab56603b1fa36a0a35b @@ -1678,36 +1484,16 @@ trackedFiles: pristine_git_object: e4eb25a6400dcc5a48b5eb5f65e96f7be91fa761 docs/models/transcriptionstreamlanguage.md: id: 5e9df200153c - last_write_checksum: sha1:82967c1b056bc1358adb21644bf78f0e37068e0f + last_write_checksum: sha1:d5626a53dde8d6736bab75f35cee4d6666a6b795 pristine_git_object: e16c8fdce3f04ae688ddc18650b359d2dd5d6f6f - docs/models/transcriptionstreamlanguagetype.md: - id: 81c8bd31eeb1 - last_write_checksum: sha1:6cf3efec178180266bccda24f27328edfbebbd93 - pristine_git_object: e93521e10d43299676f44c8297608cc94c6106e6 docs/models/transcriptionstreamsegmentdelta.md: id: f59c3fb696f2 - last_write_checksum: sha1:4d03e881a4ad9c3bed6075bb8e25d00af391652c + last_write_checksum: sha1:4a031b76315f66c3d414a7dd5f34ae1b5c239b2e pristine_git_object: 2ab32f9783f6645bba7603279c03db4465c70fff - docs/models/transcriptionstreamsegmentdeltatype.md: - id: 03ee222a3afd - last_write_checksum: sha1:d02b5f92cf2d8182aeaa8dd3428b988ab4fc0fad - pristine_git_object: 03ff3e8bb4f25770200ed9fb43dd246375934c58 docs/models/transcriptionstreamtextdelta.md: id: 69a13554b554 - last_write_checksum: sha1:9f6c7bdc50484ff46b6715141cee9912f1f2f3ff + last_write_checksum: sha1:de31f5585d671f85e6a9b8f04938cf71000ae3f7 pristine_git_object: adddfe187546c0161260cf06953efb197bf25693 - docs/models/transcriptionstreamtextdeltatype.md: - id: ae14d97dc3fa - last_write_checksum: sha1:2abfea3b109518f7371ab78ade6fa514d6e3e968 - pristine_git_object: b7c9d675402cd122ee61deaa4ea7051c2503cf0e - docs/models/two.md: - id: 3720b8efc931 - last_write_checksum: sha1:8676158171bef1373b5e0b7c91a31c4dd6f9128a - pristine_git_object: 59dc2be2a2036cbdac26683e2afd83085387188f - docs/models/type.md: - id: 98c32f09b2c8 - last_write_checksum: sha1:9b07c46f7e1aacaab319e8dfdcfdfc94a2b7bf31 - pristine_git_object: d05ead75c8f6d38b4dbcc2cdad16f1ba4dd4f7e8 docs/models/unarchiveftmodelout.md: id: 4f2a771b328a last_write_checksum: sha1:b3be8add91bbe10704ff674891f2e6377b34b539 @@ -1730,16 +1516,12 @@ trackedFiles: pristine_git_object: f5204ac94a4d6191839031c66c5a9bc0124a1f35 docs/models/usermessage.md: id: ed66d7a0f80b - last_write_checksum: sha1:8291f7703e49ed669775dc953ea8cab6715dc7ed + last_write_checksum: sha1:627f88dbb89e226a7d92564658c23a0e8d71342a pristine_git_object: 63b0131091cd211b3b1477c1d63b5666a26db546 docs/models/usermessagecontent.md: id: 52c072c851e8 last_write_checksum: sha1:1de02bcf7082768ebe1bb912fdbebbec5a577b5a pristine_git_object: 8350f9e8f8996c136093e38760990f62fd01f8cf - docs/models/usermessagerole.md: - id: 99ffa937c462 - last_write_checksum: sha1:52014480516828b43827aa966b7319d9074f1111 - pristine_git_object: 171124e45988e784c56a6b92a0057ba00efc0db4 docs/models/utils/retryconfig.md: id: 4343ac43161c last_write_checksum: sha1:562c0f21e308ad10c27f85f75704c15592c6929d @@ -1750,55 +1532,43 @@ trackedFiles: pristine_git_object: 7a1654a1a5cfb3ab92360b361e8e962bf2db4582 docs/models/wandbintegration.md: id: ba1f7fe1b1a3 - last_write_checksum: sha1:1702d58db559818304404a5dc8c70d71fb2be716 + last_write_checksum: sha1:ef35648cec304e58ccd804eafaebe9547d78ddcf pristine_git_object: 199d2eddc61069c80b628a12bff359ac2abc7338 docs/models/wandbintegrationout.md: id: c1a0f85273d8 - last_write_checksum: sha1:c2addbba8c15b7c115129d5249c4a6d7dc527d2d + last_write_checksum: sha1:ce7ffc6cc34931b4f6d2b051ff63e1ca39e13882 pristine_git_object: cec02ed87555128e6027e00f3385a61028694ac0 - docs/models/wandbintegrationouttype.md: - id: 647c7c2eab8a - last_write_checksum: sha1:78ad7847183b18319995b5e3de0262ba6fffecac - pristine_git_object: 5a7533c99671e0556c3c11f179312ec8268ce477 - docs/models/wandbintegrationtype.md: - id: 08c414c73826 - last_write_checksum: sha1:0990c604ec45f2f1fd1019e87705533b0c9be023 - pristine_git_object: 4fdffe22e370fd64429d83753c30a0079be0e7fd docs/models/websearchpremiumtool.md: id: 267988aa8c3f - last_write_checksum: sha1:cc040d754d40c644a2a8fd70302eb7ee864bfff3 + last_write_checksum: sha1:f9b761d727cbe0c60a2d0800b0a93929c5c3f5e7 pristine_git_object: 941fc2b8448d4caeae9318fdf08053a2b59a9bee - docs/models/websearchpremiumtooltype.md: - id: c70fa6b0ee9f - last_write_checksum: sha1:069ad330c3f5b3c6b8a375de4484f151698c439c - pristine_git_object: 348bfe854914114c84cd74997a63fe2badc0756d docs/models/websearchtool.md: id: fc4df52fb9b5 - last_write_checksum: sha1:53e128c3f0f6781227d99d46838579dc15ab26d2 + last_write_checksum: sha1:047fd9f950d5a86cf42a8f3ac40f754b395e39ec pristine_git_object: c8d708bdcdbfc387a09683bdd47ebabedd566cb0 - docs/models/websearchtooltype.md: - id: 6591e569c4f3 - last_write_checksum: sha1:f9b6672bc3fbb5bb70c4919cb7b98160a0ebe9ff - pristine_git_object: 57b6acbbd3b85aae5a9b7e2f754689637c01a912 docs/sdks/accesses/README.md: id: 2ea167c2eff2 last_write_checksum: sha1:22bd7a11d44295c2f433955604d3578292f26c99 pristine_git_object: 64a1e749aeb6f2c32497a72a649ecc2b7549c077 docs/sdks/agents/README.md: id: 5965d8232fd8 - last_write_checksum: sha1:34e01f46c1a32020fa3eeb40fe80c3c0e8de0983 + last_write_checksum: sha1:a655952f426d5459fa958fa5551507e4fb3f29a8 pristine_git_object: 75efc492c4114417c22a796824ee971e9180104e + docs/sdks/batchjobs/README.md: + last_write_checksum: sha1:212bc82280a58f896172d173e5be516b926bc11c + docs/sdks/betaagents/README.md: + last_write_checksum: sha1:131f220aefaff8a3ca912df661199be7a88d50ca docs/sdks/chat/README.md: id: 393193527c2c - last_write_checksum: sha1:7bc2201f585bea247c0bb148ecdea220bcb384e1 + last_write_checksum: sha1:908e67969e8f17bbcbe3697de4233d9e1dd81a65 pristine_git_object: 89c4fffbb777427723307b13c124668601ff5839 docs/sdks/classifiers/README.md: id: 74eb09b8d620 - last_write_checksum: sha1:f424721545e683e230ee0c612765be2bdb9897cd + last_write_checksum: sha1:f9cc75dbb32ea9780a9d7340e524b7f16dc18070 pristine_git_object: 634ee419f3334ba50dd25f0e2340c32db1ec40b3 docs/sdks/conversations/README.md: id: e22a9d2c5424 - last_write_checksum: sha1:5ed03d60808cff2539e0e83df4714b3a274208a0 + last_write_checksum: sha1:f55def6eaab9fcbed0e86a4dee60e5c2656f0805 pristine_git_object: acd43cdb63edd23665e808aaccc6ab3a4dc3dc85 docs/sdks/documents/README.md: id: 9758e88a0a9d @@ -1816,25 +1586,15 @@ trackedFiles: id: 499b227bf6ca last_write_checksum: sha1:34ff7167b0597bf668ef75ede016cb8884372d1b pristine_git_object: 3c8c59c79db12c916577d6c064ddb16a511513fd - docs/sdks/jobs/README.md: - id: 7371cdc8b89a - last_write_checksum: sha1:5dcd708cfcbb00d0ab9d41311c363c6fdae101b0 - pristine_git_object: 9c44be7559e2b7127d43ff50777fd32c7cf8b6ee + docs/sdks/finetuningjobs/README.md: + last_write_checksum: sha1:58b5ecea679eab1691f0002c7d3323170d73357b docs/sdks/libraries/README.md: id: df9a982905a3 last_write_checksum: sha1:0c710c0395906333b85bedd516cfca7dcb3b9b42 pristine_git_object: bbdacf0538c6c055fef0c0109aac163e987a3dd5 - docs/sdks/mistralagents/README.md: - id: 20b3478ad16d - last_write_checksum: sha1:b2dcb1516dd05dc38e0e0305969de248994aade4 - pristine_git_object: fe0f6e35a445e17ccedc2031c4b4204f5cc4d650 - docs/sdks/mistraljobs/README.md: - id: 71aafa44d228 - last_write_checksum: sha1:212bc82280a58f896172d173e5be516b926bc11c - pristine_git_object: 8f2358de28e88ffd1e3750292488c486f7bb893b docs/sdks/models/README.md: id: b35bdf4bc7ed - last_write_checksum: sha1:ca13e994ae31ddf37628eba9cc68cf8f64b48404 + last_write_checksum: sha1:37ac4b52ddcdbe548d478aed5fd95091a38b4e42 pristine_git_object: 6fa28ca2e25c0b2f3fbf044b706d19f01193fc3c docs/sdks/ocr/README.md: id: 545e35d2613e @@ -1870,7 +1630,7 @@ trackedFiles: pristine_git_object: e7e1bb7f61527de6095357e4f2ab11e342a4af87 src/mistralai/client/_version.py: id: cc807b30de19 - last_write_checksum: sha1:e654adbd2f066332b48c68d97e995dcc8f7dde84 + last_write_checksum: sha1:c808e81ad8b454d646101b878105d109d74ba6ad pristine_git_object: 8c5d6e54860c69881bf976887910fc32d183c6e5 src/mistralai/client/accesses.py: id: 76fc53bfcf59 @@ -1878,7 +1638,7 @@ trackedFiles: pristine_git_object: 307c7156626e735c802c149ea3547648ea03da09 src/mistralai/client/agents.py: id: e946546e3eaa - last_write_checksum: sha1:4a2bc22e5a6d9aee56d04d2800084eb326ef9ba7 + last_write_checksum: sha1:0ff47f41f9224c1ef6c15b5793c04a7be64f074b pristine_git_object: c04abd21b5b7cb9b8ddfdb52ec67fffa7d21759a src/mistralai/client/audio.py: id: 7a8ed2e90d61 @@ -1890,27 +1650,31 @@ trackedFiles: pristine_git_object: bddc9012f28f7881b75a720a07a3ad60845e472e src/mistralai/client/batch.py: id: cffe114c7ac7 - last_write_checksum: sha1:b7236249d2a6235fc3834b2c3bba3feda838013e + last_write_checksum: sha1:ed3cc7aee50879eca660845e51bb34912505d56a pristine_git_object: d53a45fbcbeb7b1d8fb29c373101c9e2a586b877 + src/mistralai/client/batch_jobs.py: + last_write_checksum: sha1:0ac09a2fcbf9f059cea8197b0961cd78603e9c9c src/mistralai/client/beta.py: id: 981417f45147 - last_write_checksum: sha1:2cf61e620e0e0e969e951d100e42c8c9b8facd27 + last_write_checksum: sha1:538571fbb2b393c64b1e7f53d1e530d989717eb3 pristine_git_object: b30003eae52be5e79838fe994cda8474068a43dc + src/mistralai/client/beta_agents.py: + last_write_checksum: sha1:295438e65ce0453cbb97988fb58d01263d88b635 src/mistralai/client/chat.py: id: 7eba0f088d47 - last_write_checksum: sha1:53558e4f3e5ecc8d2cea51d2462aa3432d8c156e + last_write_checksum: sha1:00d1ec46a2c964b39dae5f02e4d8adf23e5dcc21 pristine_git_object: 6fa210bb01b193e1bd034431923a3d4dc8c8a16c src/mistralai/client/classifiers.py: id: 26e773725732 - last_write_checksum: sha1:b3bed5a404f8837cc12e516f3fb85f47fd37518a + last_write_checksum: sha1:3a65b39ad26b6d1c988d1e08b7b06e88da21bb76 pristine_git_object: 537e2438afcb570a3e436ab4dd8b7d604b35b627 src/mistralai/client/conversations.py: id: 40692a878064 - last_write_checksum: sha1:fedcc53385d833f18fdd393591cb156bc5e5f3d1 + last_write_checksum: sha1:d6b44a85ecf623d0257296d62b05f26742a2a2aa pristine_git_object: 285beddbd175fee210b697d4714c28196c1fa7a2 src/mistralai/client/documents.py: id: bcc17286c31c - last_write_checksum: sha1:82287ef513f2f5ee1acb9ffe8323f2dad0fc86f4 + last_write_checksum: sha1:eb3d1d86cbc2e7e72176ff60370a9ad1d616e730 pristine_git_object: 009a604f1c2fa367d14df7fb9f4078083c4be501 src/mistralai/client/embeddings.py: id: f9c17258207e @@ -1918,7 +1682,7 @@ trackedFiles: pristine_git_object: 359f2f621d1628536b89f66115726064db34e51b src/mistralai/client/files.py: id: f12df4b2ce43 - last_write_checksum: sha1:72c1fda19adff9042461f498d5859bae62d4603a + last_write_checksum: sha1:577d731e40683b309a4848d8534185e738e54d31 pristine_git_object: 97817eab1a4b0a0649a128b06a9f3ff4077dffa5 src/mistralai/client/fim.py: id: 217bea5d701d @@ -1926,35 +1690,25 @@ trackedFiles: pristine_git_object: 4a834fe93a9b9a8af30f681c9541a7cef0a513e1 src/mistralai/client/fine_tuning.py: id: 5d5079bbd54e - last_write_checksum: sha1:e8061f6bb9912d668249c3c20235e9778345d23b + last_write_checksum: sha1:e420e8df4b265b95696085585b1b213b9d05dee4 pristine_git_object: c57425fdf3225eaeccee47a17db198a3974995a3 + src/mistralai/client/fine_tuning_jobs.py: + last_write_checksum: sha1:4dc213f6b47379bd76c97c8fc62a4dc23acbb86e src/mistralai/client/httpclient.py: id: 3e46bde74327 last_write_checksum: sha1:5e55338d6ee9f01ab648cad4380201a8a3da7dd7 pristine_git_object: 89560b566073785535643e694c112bedbd3db13d - src/mistralai/client/jobs.py: - id: 22e6e695e52b - last_write_checksum: sha1:a040fec9c1a50ec603e2cd22284db526c177a55b - pristine_git_object: 848926eaca286f74b5cfd4b0f0f72a8e2222c52f src/mistralai/client/libraries.py: id: d43a5f78045f last_write_checksum: sha1:5264a24b973f49b4ea7252868f4a76baba9093b4 pristine_git_object: 03a547410e042c19329ea9a91eef1bf25ecdcbe1 - src/mistralai/client/mistral_agents.py: - id: bd22ff89d9bb - last_write_checksum: sha1:7b6d1ac9256c1f958bbc9cf18355b4407f0cffc4 - pristine_git_object: 2ac7a29e4d7ab72c5fa29d13e7a8e4648906ead0 - src/mistralai/client/mistral_jobs.py: - id: e925bb9b27ce - last_write_checksum: sha1:b1d8ecfe998d64637089eb4a5a4cfdf4735717d1 - pristine_git_object: eae4403326ecfdf432a1ca7feb260ffe8ec251cf src/mistralai/client/models/__init__.py: id: e0e8dad92725 - last_write_checksum: sha1:cb1fb02e33b85bf82db7d6fd15b2cc3b109c5060 + last_write_checksum: sha1:1b4b7b007a50570b4592f6121d6fa5556cecae4b pristine_git_object: 23e652220f29a882748661a8c0d21aa2830471bf src/mistralai/client/models/agent.py: id: 1336849c84fb - last_write_checksum: sha1:68609569847b9d638d948deba9563d5460c17b9f + last_write_checksum: sha1:39fca92a9cb4fea59a01b6ce883b1c17395978f8 pristine_git_object: 3bedb3a3a71c116f5ccb0294bc9f3ce6690e47b2 src/mistralai/client/models/agentaliasresponse.py: id: 3899a98a55dd @@ -1966,11 +1720,11 @@ trackedFiles: pristine_git_object: 5dfa8c3137c59be90c655ba8cf8afb8a3966c93a src/mistralai/client/models/agentcreationrequest.py: id: 35b7f4933b3e - last_write_checksum: sha1:60caa3dfa2425ac3ff4e64d81ac9d18df0774157 + last_write_checksum: sha1:99456f8e6d8848f2cebbd96040eefbce73c9c316 pristine_git_object: 61a5aff554f830ab9057ce9ceafc2ce78380290f src/mistralai/client/models/agenthandoffdoneevent.py: id: 82628bb5fcea - last_write_checksum: sha1:79de1153a3fce681ee547cc1d3bd0fd8fc5598d2 + last_write_checksum: sha1:151a49e8a7f110123fd0a41e723dfdb6055e9a8e pristine_git_object: c826aa5e1f2324cddb740b3ffc05095ff26c666d src/mistralai/client/models/agenthandoffentry.py: id: 5030bcaa3a07 @@ -1978,7 +1732,7 @@ trackedFiles: pristine_git_object: 0b0de13f8840e9ab221ea233040ca03241cba8b7 src/mistralai/client/models/agenthandoffstartedevent.py: id: 2f6093d9b222 - last_write_checksum: sha1:772bc7b396285560cdafd7d7fb4bc4ece79179ad + last_write_checksum: sha1:ba4e40a4791bad20a4ac7568e32e34f6f00cfe24 pristine_git_object: 4b8ff1e5e3639fb94b55c0a417e9478d5a4252b2 src/mistralai/client/models/agents_api_v1_agents_create_or_update_aliasop.py: id: 23a832f8f175 @@ -1994,7 +1748,7 @@ trackedFiles: pristine_git_object: edcccda19d5c3e784a227c6356285ee48be3d7f2 src/mistralai/client/models/agents_api_v1_agents_getop.py: id: f5918c34f1c7 - last_write_checksum: sha1:412df95a1ac4b4f6a59e4391fd1226f2e26e4537 + last_write_checksum: sha1:efdd7bed8ae19047b48c16c73099d433725181ab pristine_git_object: d4817457a33d49ddaa09e8d41f3b03b69e8e491e src/mistralai/client/models/agents_api_v1_agents_list_version_aliasesop.py: id: a04815e6c798 @@ -2030,7 +1784,7 @@ trackedFiles: pristine_git_object: 81066f90302d79bc2083d1e31aa13656c27cc65f src/mistralai/client/models/agents_api_v1_conversations_getop.py: id: c530f2fc64d0 - last_write_checksum: sha1:28cab443af4d623a22e836ab876da20d84eb8a41 + last_write_checksum: sha1:89088ac683d6830ffd4f649c25ccfb60a4b094de pristine_git_object: c919f99e38148fb9b2d51816d0dd231ee828b11d src/mistralai/client/models/agents_api_v1_conversations_historyop.py: id: 2f5ca33768aa @@ -2038,7 +1792,7 @@ trackedFiles: pristine_git_object: ba1f8890c1083947e4d6882dff2b50b3987be738 src/mistralai/client/models/agents_api_v1_conversations_listop.py: id: 936e36181d36 - last_write_checksum: sha1:b338f793707c25ce9703266d8b7f6f560051b057 + last_write_checksum: sha1:e528bf06983dd0b22a0b0bc1d470b344e85db434 pristine_git_object: bb3c7127c4b43019405689dc2ae10f5933c763bc src/mistralai/client/models/agents_api_v1_conversations_messagesop.py: id: b5141764a708 @@ -2054,15 +1808,15 @@ trackedFiles: pristine_git_object: 8bce3ce519a69a6d1cb36383b22fb801768c4868 src/mistralai/client/models/agentscompletionrequest.py: id: 3960bc4c545f - last_write_checksum: sha1:7f2176c96916c85ac43278f3ac23fe5e3da35aca + last_write_checksum: sha1:d22d3513e2b391127df2202ca50b1fb9de605103 pristine_git_object: 22368e44adb1b3ecff58d2b92592710335a062b9 src/mistralai/client/models/agentscompletionstreamrequest.py: id: 1b73f90befc2 - last_write_checksum: sha1:8126924507b41754ec1d4a10613cf189f5ea0aea + last_write_checksum: sha1:02fd1cf62fc203635099ad60fb9b41e82a82e0f8 pristine_git_object: 37d46c79d8964d799679413e14122a5146799eb6 src/mistralai/client/models/agentupdaterequest.py: id: 2d5a3a437819 - last_write_checksum: sha1:97509eeb4cd25d31a0e1f3b4de1288580cb9a5cb + last_write_checksum: sha1:65fdf42d54199ad3b951089bdea26deca0134440 pristine_git_object: 261ac069ce4e2b630d39080edf47bf2ad510ffb4 src/mistralai/client/models/apiendpoint.py: id: 00b34ce0a24d @@ -2078,7 +1832,7 @@ trackedFiles: pristine_git_object: 3ba14ce78e01c92458477bb025b9e5ded074fd4d src/mistralai/client/models/audiochunk.py: id: ce5dce4dced2 - last_write_checksum: sha1:6d8ed87fd3f114b2b04aa15dd24d0dd5b1837215 + last_write_checksum: sha1:5b7ef3c96f0d8b240d1a7354379dbebd911604c3 pristine_git_object: 80d836f27ae65f30c6ca0e1d4d5d585bbf498cfd src/mistralai/client/models/audioencoding.py: id: b14e6a50f730 @@ -2098,7 +1852,7 @@ trackedFiles: pristine_git_object: 350643614e23002bc55e99e2d1807bedd80a0613 src/mistralai/client/models/basemodelcard.py: id: 556ebdc33276 - last_write_checksum: sha1:f524e61a160af83b20f7901afc585f61bfad6e05 + last_write_checksum: sha1:6ebd9dd362ad23d34eb35451af01897662854726 pristine_git_object: 8ce7f139b6018c4a7358a21534532cd3e741fa8a src/mistralai/client/models/batcherror.py: id: 1563e2a576ec @@ -2118,7 +1872,7 @@ trackedFiles: pristine_git_object: 2654dac04c126a933f6d045f43f16a30263750dc src/mistralai/client/models/batchjobstatus.py: id: 61e08cf5eea9 - last_write_checksum: sha1:9e042ccd0901fe4fc08fcc8abe5a3f3e1ffe9cbb + last_write_checksum: sha1:f90059b4aaead197100965c648114254e7dc4888 pristine_git_object: 4b28059ba71b394d91f32dba3ba538a73c9af7a5 src/mistralai/client/models/batchrequest.py: id: 6f36819eeb46 @@ -2126,7 +1880,7 @@ trackedFiles: pristine_git_object: 24f50a9af9a74f6bec7e8903a966d114966a36d3 src/mistralai/client/models/builtinconnectors.py: id: 2d276ce938dc - last_write_checksum: sha1:4e94744e3854d4cdc9d1272e4f1d9371f9829a5f + last_write_checksum: sha1:50d2b60942ca1d7c9868ce59bf01ed860c09f313 pristine_git_object: 6a3b2476d54096722eb3e7a271629d108028bd35 src/mistralai/client/models/chatclassificationrequest.py: id: afd9cdc71834 @@ -2134,11 +1888,11 @@ trackedFiles: pristine_git_object: 450810225bb43bbd1539768e291840a210489f0f src/mistralai/client/models/chatcompletionchoice.py: id: 7e6a512f6a04 - last_write_checksum: sha1:bc3fb866e2eb661b1619f118af459d18ba545d40 + last_write_checksum: sha1:dee3be3b6950e355b14cce5be6c34bd5d03ba325 pristine_git_object: 5d888cfd73b82097d647f2f5ecdbdf8beee2e098 src/mistralai/client/models/chatcompletionrequest.py: id: 9979805d8c38 - last_write_checksum: sha1:ccd9f3908c71d6fc3ad57f41301348918b977a6f + last_write_checksum: sha1:6442737fd5552e01ad78ab4cf8bc10e0d9c75d05 pristine_git_object: 30fce28d5e071797a7180753f2825d39cfeac362 src/mistralai/client/models/chatcompletionresponse.py: id: 669d996b8e82 @@ -2146,11 +1900,11 @@ trackedFiles: pristine_git_object: 60a1f561ff29c3bc28ee6aea69b60b9d47c51471 src/mistralai/client/models/chatcompletionstreamrequest.py: id: 18cb2b2415d4 - last_write_checksum: sha1:a067cc25d2e8c5feb146bdb0b69fb5186e77c416 + last_write_checksum: sha1:512f4c05b140757888db465e2bb30a0abcafb1d4 pristine_git_object: 21dad38bb83e9b334850645ffa24e1099b121f6c src/mistralai/client/models/chatmoderationrequest.py: id: 057aecb07275 - last_write_checksum: sha1:f93d1758dd8c0f123d8c52d162e3b4c8681bf121 + last_write_checksum: sha1:6c24f39ddd835278773bd72cb2676e8f1fd10e73 pristine_git_object: 631c914d1a4f4453024665eb0a8233ec7a070332 src/mistralai/client/models/checkpointout.py: id: 3866fe32cd7c @@ -2170,15 +1924,15 @@ trackedFiles: pristine_git_object: 89a137c374efc0f8b3ee49f3434f264705f69639 src/mistralai/client/models/classifierdetailedjobout.py: id: d8daeb39ef9f - last_write_checksum: sha1:d33e6a4672b33b6092caec50cc957d98e32058f7 + last_write_checksum: sha1:7e6df794c49d75785fac3bf01ea467a2dcbd224b pristine_git_object: 1de4534fcb12440a004e94bc0eced7483952581d src/mistralai/client/models/classifierftmodelout.py: id: 2903a7123b06 - last_write_checksum: sha1:4662ec585ade8347aeda4f020b7d31978bf8f9bb + last_write_checksum: sha1:78bfdfa3b9188c44fe4cd9cf18bce9e1d1a4cd48 pristine_git_object: a4572108674ea9c209b6224597878d5e824af686 src/mistralai/client/models/classifierjobout.py: id: e19e9c4416cc - last_write_checksum: sha1:0239761cb318518641281f584783bd2b42ec3340 + last_write_checksum: sha1:7384ea39ff4c341e8d84c3a4af664298b31c1440 pristine_git_object: ab1e261d573a30714042af3f20ed439ddbf1d819 src/mistralai/client/models/classifiertargetin.py: id: ed021de1c06c @@ -2198,7 +1952,7 @@ trackedFiles: pristine_git_object: e24c9ddecf60c38e146b8f94ad35be95b3ea2609 src/mistralai/client/models/codeinterpretertool.py: id: 950cd8f4ad49 - last_write_checksum: sha1:533ae809df90e14e4ef6e4e993e20e37f969f39f + last_write_checksum: sha1:9b720eaf4d7243e503e14350f457babbca9cf7af pristine_git_object: faf5b0b78f2d9981bb02eee0c28bba1fdba795b9 src/mistralai/client/models/completionargs.py: id: 3db008bcddca @@ -2214,7 +1968,7 @@ trackedFiles: pristine_git_object: 9790db6fe35e0043f3240c0f7e8172d36dee96f5 src/mistralai/client/models/completiondetailedjobout.py: id: 9bc38dcfbddf - last_write_checksum: sha1:df43d27716d99b6886a2b2a389e4c7b8c0b61630 + last_write_checksum: sha1:0b0f7114471e650b877de2e149b69e772d29905f pristine_git_object: 85c0c803cf809338900b7b8dcde774d731b67f8f src/mistralai/client/models/completionevent.py: id: c68817e7e190 @@ -2222,11 +1976,11 @@ trackedFiles: pristine_git_object: 52db911eeb62ec7906b396d6936e3c7a0908bb76 src/mistralai/client/models/completionftmodelout.py: id: 0f5277833b3e - last_write_checksum: sha1:d125468e84529042a19e29d1c34aef70318ddf54 + last_write_checksum: sha1:6ae50b3172f358796cfeb154c7e59f9cdde39e61 pristine_git_object: ccecbb6a59f2994051708e66bce7ece3598a786f src/mistralai/client/models/completionjobout.py: id: 712e6c524f9a - last_write_checksum: sha1:4ca927d2eb17e2f2fe588fd22f6aaa32a4025b07 + last_write_checksum: sha1:4f66641e3d765df1db88554b4399eded4625e08d pristine_git_object: ecd95bb9c93412b222659e6f369d3ff7e13c8bb2 src/mistralai/client/models/completionresponsestreamchoice.py: id: 5969a6bc07f3 @@ -2254,11 +2008,11 @@ trackedFiles: pristine_git_object: f51407bf2a363f705b0b61ed7be4ef6249525af5 src/mistralai/client/models/conversationevents.py: id: 8c8b08d853f6 - last_write_checksum: sha1:e0d920578ca14fa186b3efeee69ed03f7a2aa119 + last_write_checksum: sha1:4d7e8087fa9a074ed2747131c3753e723ba03e0b pristine_git_object: 308588a1f094631935e4229f5538c5092f435d2c src/mistralai/client/models/conversationhistory.py: id: 60a51ff1682b - last_write_checksum: sha1:ed60e311224c3ada9c3768335394a5b338342433 + last_write_checksum: sha1:637f7302571f51bcb5d65c51e6b6e377e8895b96 pristine_git_object: 40bd1e7220160f54b0ab938b3627c77fb4d4f9ef src/mistralai/client/models/conversationinputs.py: id: 711b769f2c40 @@ -2270,11 +2024,11 @@ trackedFiles: pristine_git_object: 1ea05369b95fdaa7d7ae75398669f88826e5bb26 src/mistralai/client/models/conversationrequest.py: id: 58e3ae67f149 - last_write_checksum: sha1:20339231abbf60fb160f2dc24941860304c702fd + last_write_checksum: sha1:0e3cdc7cb34cc8c7f646cc7c2869349747cfd47e pristine_git_object: e3211c4c7b20c162473e619fad6dc0c6cea6b571 src/mistralai/client/models/conversationresponse.py: id: ad7a8472c7bf - last_write_checksum: sha1:50fdea156c2f2ce3116d41034094c071a3e136fa + last_write_checksum: sha1:ae6b273f3b1d1aff149d269a19c99d495fdf263e pristine_git_object: 32d0f28f101f51a3ca79e4d57f4913b1c420b189 src/mistralai/client/models/conversationrestartrequest.py: id: 681d90d50514 @@ -2286,7 +2040,7 @@ trackedFiles: pristine_git_object: 689815ebcfe577a1698938c9ccbf100b5d7995f8 src/mistralai/client/models/conversationstreamrequest.py: id: 58d633507527 - last_write_checksum: sha1:9cb79120c78867e12825ac4d504aa55ee5827168 + last_write_checksum: sha1:d4cda0957f6d09ed991e3570b6e8ef81d3cf62af pristine_git_object: 219230a2a8dd7d42cc7f5613ca22cec5fa872750 src/mistralai/client/models/conversationusageinfo.py: id: 6685e3b50b50 @@ -2306,11 +2060,11 @@ trackedFiles: pristine_git_object: 5aa8b68fe3680d3b51127d6a6b6068b1303756e8 src/mistralai/client/models/deltamessage.py: id: 68f53d67a140 - last_write_checksum: sha1:52296fa6d7fc3788b64dcb47aadd0818bcb86e11 + last_write_checksum: sha1:db65faf32a4abc2396eb1f694d3245fcc4173e2f pristine_git_object: 0ae56da86f645e5a0db2a0aa4579342610243300 src/mistralai/client/models/documentlibrarytool.py: id: 3eb3c218f457 - last_write_checksum: sha1:af01ec63a1c5eb7c332b82b3ec1d3553891614c2 + last_write_checksum: sha1:3f3dafea3df855f1fccaa6ece64df55b40b2d4f7 pristine_git_object: 861a58d38125ca5af11772ebde39a7c57c39ad9c src/mistralai/client/models/documentout.py: id: 7a85b9dca506 @@ -2390,7 +2144,7 @@ trackedFiles: pristine_git_object: 4a9678e5aa7405cbe09f59ffbdb6c7927396f06a src/mistralai/client/models/files_api_routes_upload_fileop.py: id: f13b84de6fa7 - last_write_checksum: sha1:3dc679de7b41abb4b0710ade631e818621b6f3bc + last_write_checksum: sha1:2ca94437630dddc55c6dd624d715963b19b97a73 pristine_git_object: 723c6cc264613b3670ac999829e66131b8424849 src/mistralai/client/models/fileschema.py: id: 19cde41ca32a @@ -2418,7 +2172,7 @@ trackedFiles: pristine_git_object: f5b8b2ed45b56d25b387da44c398ae79f3a52c73 src/mistralai/client/models/ftclassifierlossfunction.py: id: d21e2a36ab1f - last_write_checksum: sha1:69e08ab728e095b8e3846ed2dc142aa1e82a864a + last_write_checksum: sha1:9554b17b3139b54975aae989fb27e1c369bee4cd pristine_git_object: c4ef66e0fe69edace4912f2708f69a6e606c0654 src/mistralai/client/models/ftmodelcapabilitiesout.py: id: f70517be97d4 @@ -2426,7 +2180,7 @@ trackedFiles: pristine_git_object: be31aa3c14fb8fe9154ad8f54e9bf43f586951c7 src/mistralai/client/models/ftmodelcard.py: id: c4f15eed2ca2 - last_write_checksum: sha1:a6a71ce4a89688cb4780697e299a4274f7323e24 + last_write_checksum: sha1:ab559da7dd290e4d2be5c6a3398732de887b2a74 pristine_git_object: 36cb723df8bcde355e19a55105932298a8e2e33a src/mistralai/client/models/function.py: id: 32275a9d8fee @@ -2446,7 +2200,7 @@ trackedFiles: pristine_git_object: ac9e6227647b28bfd135c35bd32ca792d8dd414b src/mistralai/client/models/functioncallevent.py: id: 23b120b8f122 - last_write_checksum: sha1:535874a4593ce1f40f9683fa85159e4c4274f3ee + last_write_checksum: sha1:c0226ca734320b628223f5c5206477b224dff15e pristine_git_object: 4e040585285985cebc7e26ac402b6df8f4c063bb src/mistralai/client/models/functionname.py: id: 000acafdb0c0 @@ -2458,15 +2212,15 @@ trackedFiles: pristine_git_object: a843bf9bdd82b5cf3907e2172ed793a391c5cba2 src/mistralai/client/models/functiontool.py: id: 2e9ef5800117 - last_write_checksum: sha1:8ab806567a2ab6c2e04cb4ce394cbff2ae7aad50 + last_write_checksum: sha1:af5e38a4498149f46abd63eda97f9ccfb66a1fa3 pristine_git_object: 74b50d1bcd2bc0af658bf5293c8cc7f328644fa1 src/mistralai/client/models/githubrepositoryin.py: id: eef26fbd2876 - last_write_checksum: sha1:3b64fb4f34e748ef71fa92241ecdd1c73aa9485a + last_write_checksum: sha1:7736d0a475b47049c35aec59254c5d47b3ae609b pristine_git_object: e56fef9ba187792238991cc9373a7d2ccf0b8c0d src/mistralai/client/models/githubrepositoryout.py: id: d2434a167623 - last_write_checksum: sha1:d2be5c474d3a789491cad50b95e3f25933b0c66a + last_write_checksum: sha1:5d9625805bf6eb3c061ebdd73433ca2001e26cb1 pristine_git_object: e3aa9ebc52e8613b15e3ff92a03593e2169dc935 src/mistralai/client/models/httpvalidationerror.py: id: 4099f568a6f8 @@ -2474,7 +2228,7 @@ trackedFiles: pristine_git_object: 34d9b54307db818e51118bc448032e0476688a35 src/mistralai/client/models/imagegenerationtool.py: id: e1532275faa0 - last_write_checksum: sha1:7eaea320c1b602df2e761405644361820ca57d33 + last_write_checksum: sha1:e5d4c986062850ce3ba4f66a8347848332192c21 pristine_git_object: e09dba81314da940b2be64164e9b02d51e72f7b4 src/mistralai/client/models/imageurl.py: id: e4bbf5881fbf @@ -2482,7 +2236,7 @@ trackedFiles: pristine_git_object: 6e61d1ae2ec745774345c36e605748cf7733687b src/mistralai/client/models/imageurlchunk.py: id: 746fde62f637 - last_write_checksum: sha1:2311445f8c12347eab646f1b9ff7c4202642c907 + last_write_checksum: sha1:f6c19195337e3715fac3dc874abfc2333d661c8e pristine_git_object: f967a3c8ced6d5fb4b274454100134e41c5b7a5c src/mistralai/client/models/inputentries.py: id: 44727997dacb @@ -2490,15 +2244,15 @@ trackedFiles: pristine_git_object: 8ae29837a6c090fbe1998562684d3a372a9bdc31 src/mistralai/client/models/inputs.py: id: 84a8007518c7 - last_write_checksum: sha1:3ecd986b0f5a0de3a4c88f06758cfa51068253e9 + last_write_checksum: sha1:62cf4c19b48f68f57f30223d48d06e33d08ae096 pristine_git_object: fb0674760c1191f04e07f066e84ae9684a1431e3 src/mistralai/client/models/instructrequest.py: id: 6d3ad9f896c7 - last_write_checksum: sha1:5f8857f8fffe0b858cfc7bec268480003b562303 + last_write_checksum: sha1:5fabc65cccf9f17ffbd20cd176341b4d78b62a5c pristine_git_object: 1b2f269359700582687fdf4492ea3cef64da48bb src/mistralai/client/models/jobin.py: id: f4d176123ccc - last_write_checksum: sha1:c1ec4b9ea0930612aea1b1c5c5cd419379ab0687 + last_write_checksum: sha1:ae6b1d9bc202db7a49d29f85b75bffea605126c5 pristine_git_object: dc7684fcbecd558fc6e3e3f17c4000ec217285c1 src/mistralai/client/models/jobmetadataout.py: id: 805f41e3292a @@ -2522,23 +2276,23 @@ trackedFiles: pristine_git_object: 4536b738442ec9710ddf67f2faf7d30b094d8cd5 src/mistralai/client/models/jobs_api_routes_fine_tuning_cancel_fine_tuning_jobop.py: id: d175c6e32ecb - last_write_checksum: sha1:07bfc80146492e3608a5c1683e4530de296c0938 + last_write_checksum: sha1:c61f02640ec384778e6f6b1f08dcb31dc5c1fb82 pristine_git_object: b36d3c3ef5abb30abc886876bb66384ea41bab9e src/mistralai/client/models/jobs_api_routes_fine_tuning_create_fine_tuning_jobop.py: id: 81651291187a - last_write_checksum: sha1:eb265e749cc076b2d39c103df48ceeeda6da7f5a + last_write_checksum: sha1:80bc2d32588a115c4ac5571a3c1ffc8a24ab9d45 pristine_git_object: ece0d15a0654ec759904276ad5d95c5619ff016f src/mistralai/client/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobop.py: id: d910fd8fe2d6 - last_write_checksum: sha1:7ee82991b49a615517b3323abbfc0e5928419890 + last_write_checksum: sha1:4f57772cda3075251f36c52a264ebce1328cb486 pristine_git_object: aa5a26098e084885e8c2f63944e7549969899d3c src/mistralai/client/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobsop.py: id: cf43028824bf - last_write_checksum: sha1:3fd6b5c7c9ae24d662abd5d3c7ea9699e295e5ff + last_write_checksum: sha1:e7bb3855dabfcaf7b92e6917e6da39246fc01282 pristine_git_object: 7e399b31354e4f09c43efbe9ffe3d938f6af0d8c src/mistralai/client/models/jobs_api_routes_fine_tuning_start_fine_tuning_jobop.py: id: e7ff4a4a4edb - last_write_checksum: sha1:176fef64d07c58da36ca6672ce5440508787dc84 + last_write_checksum: sha1:21d90c0a3fa151bd855d63ed241f518812f26f82 pristine_git_object: ed5938b039be719169e62e033b7735bde7e72503 src/mistralai/client/models/jobs_api_routes_fine_tuning_unarchive_fine_tuned_modelop.py: id: 7cc1c80335a9 @@ -2546,11 +2300,11 @@ trackedFiles: pristine_git_object: e1be0ac00af889a38647b5f7e4f9d26ed09ee7c4 src/mistralai/client/models/jobs_api_routes_fine_tuning_update_fine_tuned_modelop.py: id: 6d9dc624aafd - last_write_checksum: sha1:1a8054c02cd8fd3c48954812e153e97efa58aaef + last_write_checksum: sha1:ad615dd8d493fec4f818f19e5745ff52575181aa pristine_git_object: a2b70b37e349c7f5fc6c687fbad015eb218de952 src/mistralai/client/models/jobsout.py: id: 22e91e9631a9 - last_write_checksum: sha1:f2a5aa117953410f0743c2dd024e4a462a0be105 + last_write_checksum: sha1:1bb48570e040fa9ad4408b41fef8ce4ec0bf52be pristine_git_object: 9087704f0660e39f662efbd36f39713202598c43 src/mistralai/client/models/jsonschema.py: id: e1fc1d8a434a @@ -2602,7 +2356,7 @@ trackedFiles: pristine_git_object: f677b4ddc96b51ecd777240844800b2634ca4358 src/mistralai/client/models/libraries_documents_upload_v1op.py: id: "744466971862" - last_write_checksum: sha1:63b6f82a3ed8b0655d3b5dea1811699553d62cb0 + last_write_checksum: sha1:d6b085e01eac97f404a01e137413e159390c1382 pristine_git_object: e2d59d9f1556ca77c0666b2bba3213ef5386f82a src/mistralai/client/models/libraries_get_v1op.py: id: d493f39e7ebb @@ -2662,7 +2416,7 @@ trackedFiles: pristine_git_object: e90d8aa0317e553bfc0cceb4a356cf9994ecfb60 src/mistralai/client/models/messageinputentry.py: id: c0a4b5179095 - last_write_checksum: sha1:def6a5ce05756f76f7da6504bfc25eea166b21ab + last_write_checksum: sha1:e9898424d5129750738adb6a049232162824282d pristine_git_object: 12a31097a88e90645c67a30451a379427cd4fcd3 src/mistralai/client/models/messageoutputcontentchunks.py: id: 2ed248515035 @@ -2674,7 +2428,7 @@ trackedFiles: pristine_git_object: d52e4e3e722ef221f565a0bd40f505385974a0e1 src/mistralai/client/models/messageoutputevent.py: id: a2bbf63615c6 - last_write_checksum: sha1:19dda725e29108b2110903e7883ce442e4e90bd4 + last_write_checksum: sha1:fb98c35064fd9c65fa8c8c0cbc59293067ac793f pristine_git_object: 3db7f5a0908a72f75f6f7303af4ad426a4909d84 src/mistralai/client/models/metricout.py: id: 92d33621dda7 @@ -2694,11 +2448,11 @@ trackedFiles: pristine_git_object: a6db80e73189addcb1e1951a093297e0523f5fa4 src/mistralai/client/models/modelconversation.py: id: fea0a651f888 - last_write_checksum: sha1:35fec41b1dac4a83bdf229de5dd0436916b144c8 + last_write_checksum: sha1:beade63589bde3cae79f471a71e3d04d3f132f97 pristine_git_object: 574f053d4186288980754ead28bb6ce19b414064 src/mistralai/client/models/modellist.py: id: 00693c7eec60 - last_write_checksum: sha1:4b9cdd48439f0ebc1aa6637cc93f445fc3e8a424 + last_write_checksum: sha1:d6ff956092c0c930a6db02cbe017bc473403639c pristine_git_object: 6a5209fa6dac59539be338e9ac6ffbefd18057ee src/mistralai/client/models/moderationobject.py: id: 132faad0549a @@ -2734,7 +2488,7 @@ trackedFiles: pristine_git_object: 2813a1ca4c94d690f248a318a9e35d655d80600c src/mistralai/client/models/ocrtableobject.py: id: d74dd0d2ddac - last_write_checksum: sha1:6821e39003e2ca46dc31384c2635e59763fddb98 + last_write_checksum: sha1:d562f3207193c7d5ef5d7b6175eba8006b6c3a73 pristine_git_object: 0c9091de8975d8bd8e673aadbb69a619b96d77e8 src/mistralai/client/models/ocrusageinfo.py: id: 272b7e1785d5 @@ -2762,7 +2516,7 @@ trackedFiles: pristine_git_object: e6a889de576f9e36db551a44d4ed3cf0c032e599 src/mistralai/client/models/realtimetranscriptionerrordetail.py: id: 5bd25cdf9c7a - last_write_checksum: sha1:49ff15eb41e8964ba3b150e2fca70f6529dee58f + last_write_checksum: sha1:471824f03586b63688de43608d6c756b8a156e11 pristine_git_object: 27bb8d872792723b06238b3f0eebed815948fd63 src/mistralai/client/models/realtimetranscriptionsession.py: id: 02517fa5411a @@ -2786,11 +2540,11 @@ trackedFiles: pristine_git_object: 7b0a35c44050b6fca868479e261805a77f33e230 src/mistralai/client/models/responsedoneevent.py: id: cf8a686bf82c - last_write_checksum: sha1:1fa63522f52a48a8e328dc5b3fe2c6f5206b04cc + last_write_checksum: sha1:25972ca80ff7fd7a0d6dfe98718be52580dacc61 pristine_git_object: 5405625692cb22c60a7b5f5a6f1b58cee5676576 src/mistralai/client/models/responseerrorevent.py: id: b286d74e8724 - last_write_checksum: sha1:f570a02791afb3fe60e99cbb4993c2d1f8dc476d + last_write_checksum: sha1:a4767e8820ae840559fc55c8fcd346dea41a386e pristine_git_object: c9ef95a04c91c32e7a7973309e2174b7e776f099 src/mistralai/client/models/responseformat.py: id: 6ab8bc8d22c0 @@ -2798,11 +2552,11 @@ trackedFiles: pristine_git_object: 5899b0175cefd4159eb680a3715a72fa78577ba4 src/mistralai/client/models/responseformats.py: id: c4462a05fb08 - last_write_checksum: sha1:a212e85d286b5b49219f57d071a2232ff8b5263b + last_write_checksum: sha1:863c7ec4c567d8f0c4e6305b47896424726e71be pristine_git_object: cbf83ce7b54ff8634f741334831807bfb5c98991 src/mistralai/client/models/responsestartedevent.py: id: 24f54ee8b0f2 - last_write_checksum: sha1:5f7a4fad7c13f89b6e3672e422d5ef902aa5bf03 + last_write_checksum: sha1:1bd2a884b9f66eb811fc83d8c3644913dfa80ab1 pristine_git_object: dc6a10f91e2bb0d13a582ed03e7db2089b75bcf7 src/mistralai/client/models/responsevalidationerror.py: id: c244a88981e0 @@ -2810,7 +2564,7 @@ trackedFiles: pristine_git_object: bab5d0b70e0bb2ea567a16a1a7c5db839651836f src/mistralai/client/models/retrieve_model_v1_models_model_id_getop.py: id: 6fefa90ca351 - last_write_checksum: sha1:c34e2f55663cafe353e628fbd978a6be7ca6a467 + last_write_checksum: sha1:f7308b269e12b2554a27de9d41312097d0d55d82 pristine_git_object: 7fdcd37d5879aaca158f459df830a5a4dc55bfa0 src/mistralai/client/models/retrievefileout.py: id: 8bb5859aa0d0 @@ -2850,11 +2604,11 @@ trackedFiles: pristine_git_object: 181b327ea73a9bcf9fb90f95633da71cee96e599 src/mistralai/client/models/ssetypes.py: id: 1733e4765106 - last_write_checksum: sha1:1901bf6feee92ac100113e0a98dc0abe6e769375 + last_write_checksum: sha1:8154966cda84ddd5225936ee47c87df1143ee1f1 pristine_git_object: 796f0327cbb1372c1b2a817a7db39f8f185a59be src/mistralai/client/models/systemmessage.py: id: 500ef6e85ba1 - last_write_checksum: sha1:0e8e34fa66e4bb8bf1128b3007ef72bf33690e1e + last_write_checksum: sha1:4ca4da49acae5fb508584b1776d368eba7d4a119 pristine_git_object: 9e01bc57bd17a5ecf6be5fee3383bbb9e03a8ab5 src/mistralai/client/models/systemmessagecontentchunks.py: id: 297e8905d5af @@ -2886,23 +2640,23 @@ trackedFiles: pristine_git_object: 2c7f6cbf6ebfbbdcce7d82b885b5e07b6b52d066 src/mistralai/client/models/toolchoiceenum.py: id: c7798801f860 - last_write_checksum: sha1:3dbba9a58c5569aafe115f3f7713a52b01ad8620 + last_write_checksum: sha1:d958ef93b303539226fdab0fd46c8ea21d24cea2 pristine_git_object: 01f6f677b379f9e3c99db9d1ad248cb0033a2804 src/mistralai/client/models/toolexecutiondeltaevent.py: id: df8f17cf3e07 - last_write_checksum: sha1:32257ebf812efe05763df71e498018d53884a32d + last_write_checksum: sha1:96147badaad7eb961d224b29d9134dba8fc35f49 pristine_git_object: 0268e6a0d9b3c25afe1022e61a630e926a50f135 src/mistralai/client/models/toolexecutiondoneevent.py: id: 514fdee7d99f - last_write_checksum: sha1:e99be4db8d87bb3aa9383c062846d35923721292 + last_write_checksum: sha1:bc439993c647ba471b7f1581f72e094b99bd5c14 pristine_git_object: 854baee98a119caf237ca0f39e4ddd7a36577771 src/mistralai/client/models/toolexecutionentry.py: id: 76db69eebe41 - last_write_checksum: sha1:1577af968f800b28a3da2006c44016a901532591 + last_write_checksum: sha1:4fb31b58961ce5f43233d91fb6efb89c624fab44 pristine_git_object: 839709fb8ea63cc358de9f5e71180bf9e94cf5a5 src/mistralai/client/models/toolexecutionstartedevent.py: id: 40fadb8e49a1 - last_write_checksum: sha1:49922a41c52e7f25eab26c8a34ec481c319c62b4 + last_write_checksum: sha1:d71ec6e61c1a881be8e02853f1ba450c36ec16e3 pristine_git_object: 66438cfc33b171868f597ff3f80a82a40d1396f4 src/mistralai/client/models/toolfilechunk.py: id: 26c8aadf416a @@ -2910,7 +2664,7 @@ trackedFiles: pristine_git_object: 62b5ffeda19a7fa614ccc5e390450f2452dd119d src/mistralai/client/models/toolmessage.py: id: 15f1af161031 - last_write_checksum: sha1:47b4b3426ecde263ce4f2918ff98135952447b40 + last_write_checksum: sha1:58370491597186ddf08c8648f1e24abc9c852c26 pristine_git_object: eae2d2aef69dc4134f42714d69625e7b6c43e8c9 src/mistralai/client/models/toolreferencechunk.py: id: 822e9f3e70de @@ -2930,31 +2684,31 @@ trackedFiles: pristine_git_object: 24c0b92e424e91d40972c0826553a7d344a8f932 src/mistralai/client/models/transcriptionsegmentchunk.py: id: d1e6f3bdc74b - last_write_checksum: sha1:5f16b05debe943432b69d390844216a703adf71a + last_write_checksum: sha1:23714fcd3791d09a7cc9a1bddd2f2203861d1bce pristine_git_object: c89d84fcf3842da23e1f710309446b4c592ceeb3 src/mistralai/client/models/transcriptionstreamdone.py: id: 066a9158ed09 - last_write_checksum: sha1:1f9a29e826dcc91ed0c7f08b69aaa81987d810b7 + last_write_checksum: sha1:09bd7a12a1985d377883be53815f88195dcdce57 pristine_git_object: add17f562385c3befc2932b16448901154372ca6 src/mistralai/client/models/transcriptionstreamevents.py: id: b50b3d74f16f - last_write_checksum: sha1:38d2ff40e9d4f5d09fa24eef0925d306cf434bf0 + last_write_checksum: sha1:651ae56098858fe8103ebd280bbdf2f74550794c pristine_git_object: caaf943a4662ecccab96183f63c226eaefee2882 src/mistralai/client/models/transcriptionstreameventtypes.py: id: 6f71f6fbf4c5 - last_write_checksum: sha1:38b7334aebf400e1abb2b20b0f2890880f0fc2f7 + last_write_checksum: sha1:d7671637063c19222c20b8334abf92abe3d30517 pristine_git_object: 4a910f0abca2912746cac60fd5a16bd5464f2457 src/mistralai/client/models/transcriptionstreamlanguage.py: id: e94333e4bc27 - last_write_checksum: sha1:9427411056a6239956ed3963af53c452e6fc4705 + last_write_checksum: sha1:7da587e67d635164bb986a3151a43b9a71b28c4d pristine_git_object: b47024adfca2d8da6f1f01ce573bcc339cbbc63a src/mistralai/client/models/transcriptionstreamsegmentdelta.py: id: c0a882ce57e5 - last_write_checksum: sha1:3cc8664a90c67c412fc3c58e6841571c476697ea + last_write_checksum: sha1:91631a724a84abf4fd603ba7a7630b5e7d970944 pristine_git_object: 7cfffb63f31d10a247e066c8f422e4f6af2cf489 src/mistralai/client/models/transcriptionstreamtextdelta.py: id: 6086dc081147 - last_write_checksum: sha1:d68e4b6cefa3a1492b461fbe17cff5c5216b58f5 + last_write_checksum: sha1:1c065d9a2874c4b315fe3cd191f94ef3e8f1cc43 pristine_git_object: ce279cf67ffc4e225ce37490f4ffd0c0d64fe993 src/mistralai/client/models/unarchiveftmodelout.py: id: 9dbc3bfb71ed @@ -2974,7 +2728,7 @@ trackedFiles: pristine_git_object: f1186d97357320f4bfc9d3f2a626f58d2b1a38d0 src/mistralai/client/models/usermessage.py: id: cb583483acf4 - last_write_checksum: sha1:1c15371710f18d7ed8f612cc450f4873f83f1eb9 + last_write_checksum: sha1:1e33aea6971835069dc9c862351d507f48d4ff8d pristine_git_object: 8d92cea803368e996d68dc2f3a2dadd1d06a4675 src/mistralai/client/models/validationerror.py: id: 15df3c7368ab @@ -2982,23 +2736,23 @@ trackedFiles: pristine_git_object: 352409be88a1175073e5438d6da86fc9a54896fc src/mistralai/client/models/wandbintegration.py: id: 4823c1e80942 - last_write_checksum: sha1:a76661e93fd3b6d8a3d210ef610a40ff1da203f7 + last_write_checksum: sha1:b33912c4e08c07b0139cc3c31f93e899f797b5f2 pristine_git_object: 89489fb4527c6515a609bcb533ef59ab516c7a38 src/mistralai/client/models/wandbintegrationout.py: id: 6b103d74195c - last_write_checksum: sha1:e648c37d559f8cec36b3c8e06979d8ac053a2ad6 + last_write_checksum: sha1:f64af59d7fe3d068e185776b01d43b7fdab1f129 pristine_git_object: a7f9afeb6683a173115371a686af5f95e2d29056 src/mistralai/client/models/websearchpremiumtool.py: id: bfe88af887e3 - last_write_checksum: sha1:af6e2fae78c2f22b98d58ab55b365d1688dba8cb + last_write_checksum: sha1:689087bc6c49bbc8b286e5b0155a6e5f6a1dc47d pristine_git_object: 8d2d4b5dfea50a34ac744181790bf5db84809b1c src/mistralai/client/models/websearchtool.py: id: 26b0903423e5 - last_write_checksum: sha1:49295d52d59e914620dedf9d22fb2290896039cf + last_write_checksum: sha1:93015f750a125a8297f9455278ebe482794ba958 pristine_git_object: ba4cc09f84faebb438a631db6ac328fea2ced609 src/mistralai/client/models_.py: id: 1d277958a843 - last_write_checksum: sha1:8f76c2395cb534e94366033007df24bf56c43ac7 + last_write_checksum: sha1:987921077f5b5535c39a21216585fc1bf9aa8811 pristine_git_object: 5ef9da096e58023aaa582f31717b4ee7a4b720b0 src/mistralai/client/ocr.py: id: 2f804a12fc62 diff --git a/.speakeasy/gen.yaml b/.speakeasy/gen.yaml index b47a192d..20576b9d 100644 --- a/.speakeasy/gen.yaml +++ b/.speakeasy/gen.yaml @@ -8,11 +8,13 @@ generation: useClassNamesForArrayFields: true fixes: nameResolutionDec2023: true - nameResolutionFeb2025: false + nameResolutionFeb2025: true parameterOrderingFeb2024: true requestResponseComponentNamesFeb2024: true - securityFeb2025: false - sharedErrorComponentsApr2025: false + securityFeb2025: true + sharedErrorComponentsApr2025: true + methodSignaturesApr2024: true + sharedNestedComponentsJan2026: true auth: oAuth2ClientCredentialsEnabled: true oAuth2PasswordEnabled: false @@ -26,7 +28,7 @@ generation: generateNewTests: false skipResponseBodyAssertions: false python: - version: 2.0.0a1 + version: 2.0.0a2 additionalDependencies: dev: pytest: ^8.2.2 @@ -47,9 +49,12 @@ python: envVarPrefix: MISTRAL fixFlags: responseRequiredSep2024: true + flatAdditionalProperties: true flattenGlobalSecurity: true flattenRequests: true flatteningOrder: parameters-first + forwardCompatibleEnumsByDefault: true + forwardCompatibleUnionsByDefault: tagged-only imports: option: openapi paths: @@ -68,7 +73,7 @@ python: outputModelSuffix: output packageManager: uv packageName: mistralai - preApplyUnionDiscriminators: false + preApplyUnionDiscriminators: true pytestFilterWarnings: [] pytestTimeout: 0 responseFormat: flat diff --git a/.speakeasy/generated-files-2d045ec7-2ebb-4f4d-ad25-40953b132161.lock b/.speakeasy/generated-files-2d045ec7-2ebb-4f4d-ad25-40953b132161.lock new file mode 100644 index 00000000..d6937e41 --- /dev/null +++ b/.speakeasy/generated-files-2d045ec7-2ebb-4f4d-ad25-40953b132161.lock @@ -0,0 +1,799 @@ +src/mistralai/client/_hooks/sdkhooks.py +docs/models/messageoutputeventcontent.md +docs/models/classificationresponse.md +docs/models/tooltypes.md +docs/models/toolexecutionstartedevent.md +docs/models/unarchiveftmodeloutobject.md +src/mistralai/client/models/conversationrequest.py +docs/models/agentconversationobject.md +src/mistralai/client/models/listlibraryout.py +docs/models/agentsapiv1agentsupdaterequest.md +src/mistralai/client/models/agentaliasresponse.py +docs/models/embeddingresponse.md +docs/models/agentsapiv1agentsgetversionrequest.md +src/mistralai/client/models/libraryin.py +docs/models/agentscompletionstreamrequest.md +docs/models/function.md +docs/models/agentsapiv1agentsgetagentversion.md +docs/models/imagegenerationtool.md +docs/models/classifiertargetin.md +src/mistralai/client/models/realtimetranscriptionsession.py +src/mistralai/client/models/jobs_api_routes_batch_get_batch_jobsop.py +docs/models/agentsapiv1conversationsgetrequest.md +docs/models/messageoutputentry.md +docs/models/classificationrequestinputs.md +docs/models/chatcompletionrequestmessage.md +docs/models/thinking.md +src/mistralai/client/models/conversationinputs.py +docs/models/functionresultentry.md +docs/models/fimcompletionstreamrequeststop.md +docs/models/librariesupdatev1request.md +src/mistralai/client/models/agents_api_v1_conversations_appendop.py +docs/models/paginationinfo.md +docs/models/agenthandoffentry.md +docs/models/jobsapiroutesfinetuningupdatefinetunedmodelresponse.md +docs/models/moderationresponse.md +docs/models/toolexecutionentryobject.md +docs/models/completionresponsestreamchoicefinishreason.md +docs/models/jobsapiroutesfinetuningcreatefinetuningjobresponse.md +docs/models/modelcapabilities.md +docs/models/responseformats.md +docs/models/agentupdaterequest.md +src/mistralai/client/models/transcriptionstreamsegmentdelta.py +docs/models/sharingin.md +docs/models/responseformat.md +docs/models/imageurl.md +src/mistralai/client/models/processingstatusout.py +docs/models/messageoutputevent.md +src/mistralai/client/models/conversationusageinfo.py +src/mistralai/client/models/jobs_api_routes_fine_tuning_update_fine_tuned_modelop.py +src/mistralai/client/models/agents_api_v1_agents_get_versionop.py +src/mistralai/client/models/libraries_documents_get_v1op.py +docs/models/attributes.md +docs/models/agentscompletionrequeststop.md +src/mistralai/client/models/moderationresponse.py +src/mistralai/client/models/classifiertrainingparametersin.py +docs/models/audiochunk.md +src/mistralai/client/models/ocrrequest.py +src/mistralai/client/models/file.py +src/mistralai/client/models/ocrresponse.py +src/mistralai/client/models/classifiertargetin.py +docs/models/agentconversationagentversion.md +docs/models/classificationtargetresult.md +docs/models/tableformat.md +docs/models/classifiertrainingparameters.md +src/mistralai/client/models/shareenum.py +.vscode/settings.json +docs/models/messageoutputentrycontent.md +py.typed +docs/models/agentscompletionrequest.md +docs/models/completionjoboutrepository.md +src/mistralai/client/models/batchrequest.py +docs/models/entry.md +src/mistralai/client/models/modelcapabilities.py +docs/models/file.md +src/mistralai/client/models/mistralpromptmode.py +scripts/publish.sh +docs/models/agentscompletionstreamrequestmessage.md +docs/models/messageinputentrytype.md +src/mistralai/client/__init__.py +src/mistralai/client/_version.py +src/mistralai/client/models/ocrpageobject.py +docs/models/ocrimageobject.md +src/mistralai/client/basesdk.py +docs/models/ocrpagedimensions.md +src/mistralai/client/httpclient.py +docs/models/jobsapiroutesfinetuningstartfinetuningjobrequest.md +src/mistralai/client/py.typed +src/mistralai/client/types/__init__.py +docs/models/agentsapiv1agentsupdateversionrequest.md +src/mistralai/client/types/basemodel.py +src/mistralai/client/utils/__init__.py +src/mistralai/client/utils/annotations.py +src/mistralai/client/utils/datetimes.py +src/mistralai/client/utils/enums.py +src/mistralai/client/models/inputs.py +src/mistralai/client/utils/eventstreaming.py +src/mistralai/client/utils/forms.py +src/mistralai/client/utils/headers.py +src/mistralai/client/models/legacyjobmetadataout.py +src/mistralai/client/utils/logger.py +src/mistralai/client/utils/metadata.py +src/mistralai/client/utils/queryparams.py +src/mistralai/client/utils/requestbodies.py +src/mistralai/client/models/jobs_api_routes_fine_tuning_start_fine_tuning_jobop.py +docs/models/toolexecutionentryname.md +src/mistralai/client/utils/retries.py +src/mistralai/client/utils/security.py +src/mistralai/client/models/toolfilechunk.py +src/mistralai/client/utils/serializers.py +src/mistralai/client/models/transcriptionstreamdone.py +src/mistralai/client/utils/unmarshal_json_response.py +src/mistralai/client/utils/url.py +src/mistralai/client/utils/values.py +src/mistralai/client/models/responsevalidationerror.py +src/mistralai/client/models/retrievefileout.py +src/mistralai/client/models/mistralerror.py +docs/models/apiendpoint.md +src/mistralai/client/models/sdkerror.py +docs/models/jobsout.md +src/mistralai/client/models/no_response_error.py +docs/models/conversationrestartstreamrequesthandoffexecution.md +docs/models/functiontool.md +docs/models/agentsapiv1conversationsappendstreamrequest.md +docs/models/agenthandoffentryobject.md +docs/models/transcriptionstreameventtypes.md +docs/models/messageoutputeventrole.md +src/mistralai/client/models/modellist.py +docs/models/responseretrievemodelv1modelsmodelidget.md +docs/models/referencechunktype.md +docs/models/chatclassificationrequest.md +src/mistralai/client/models/responseformats.py +docs/models/librariesdocumentsdeletev1request.md +src/mistralai/client/models/conversationresponse.py +src/mistralai/client/models/completionargsstop.py +src/mistralai/client/models/contentchunk.py +docs/models/classifierdetailedjoboutstatus.md +docs/models/listlibraryout.md +docs/models/transcriptionstreamevents.md +src/mistralai/client/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobsop.py +docs/models/chatcompletionrequeststop.md +src/mistralai/client/models/libraries_update_v1op.py +src/mistralai/client/models/websearchtool.py +src/mistralai/client/models/classifiertrainingparameters.py +docs/models/validationerror.md +src/mistralai/client/models/documentlibrarytool.py +src/mistralai/client/models/responsestartedevent.py +docs/models/document.md +src/mistralai/client/models/filesignedurl.py +src/mistralai/client/models/fimcompletionresponse.py +docs/models/agentscompletionstreamrequeststop.md +docs/models/agenthandoffentrytype.md +docs/models/conversationmessages.md +src/mistralai/client/models/responsedoneevent.py +docs/models/completionresponsestreamchoice.md +docs/models/fimcompletionresponse.md +src/mistralai/client/models/unarchiveftmodelout.py +src/mistralai/client/conversations.py +src/mistralai/client/models/toolexecutionstartedevent.py +src/mistralai/client/models/jsonschema.py +docs/models/completionftmodelout.md +src/mistralai/client/models/fimcompletionstreamrequest.py +docs/models/chatcompletionrequesttoolchoice.md +src/mistralai/client/models/tooltypes.py +src/mistralai/client/models/functionname.py +docs/models/functionresultentryobject.md +docs/models/classifierjobout.md +src/mistralai/client/models/listfilesout.py +src/mistralai/client/models/agents_api_v1_agents_listop.py +src/mistralai/client/models/imageurl.py +src/mistralai/client/models/chatcompletionchoice.py +src/mistralai/client/sdk.py +docs/models/conversationrequesttool.md +docs/models/chatcompletionrequest.md +docs/models/librariesdeletev1request.md +src/mistralai/client/models/chatcompletionresponse.py +docs/models/toolreferencechunktool.md +src/mistralai/client/_hooks/types.py +src/mistralai/client/models/agents_api_v1_conversations_deleteop.py +docs/models/systemmessagecontentchunks.md +src/mistralai/client/models/sharingin.py +docs/models/completionjoboutobject.md +docs/models/librariesdocumentsgetextractedtextsignedurlv1request.md +src/mistralai/client/models/functionresultentry.py +docs/models/agentsapiv1conversationsdeleterequest.md +docs/models/githubrepositoryout.md +docs/models/retrievemodelv1modelsmodelidgetrequest.md +docs/models/conversationstreamrequest.md +docs/models/agentsapiv1conversationsmessagesrequest.md +docs/models/sharingout.md +docs/models/archiveftmodelout.md +docs/models/listdocumentout.md +docs/models/toolreferencechunk.md +docs/models/instructrequestinputs.md +src/mistralai/client/models/deltamessage.py +src/mistralai/client/models/tool.py +src/mistralai/client/beta_agents.py +src/mistralai/client/models/toolcall.py +docs/models/jobin.md +src/mistralai/client/models/libraries_documents_upload_v1op.py +src/mistralai/client/models/toolexecutiondoneevent.py +docs/models/conversationrequestagentversion.md +docs/models/listsharingout.md +docs/models/completiondetailedjoboutrepository.md +docs/models/completionftmodeloutobject.md +src/mistralai/client/models/agentcreationrequest.py +docs/models/functioncallentry.md +src/mistralai/client/models/agents_api_v1_conversations_getop.py +src/mistralai/client/models/filepurpose.py +src/mistralai/client/models/retrieve_model_v1_models_model_id_getop.py +src/mistralai/client/models/jobsout.py +docs/models/agentsapiv1conversationsappendrequest.md +docs/models/jobsapiroutesbatchgetbatchjobsrequest.md +src/mistralai/client/models/audiotranscriptionrequest.py +src/mistralai/client/models/agents_api_v1_agents_update_versionop.py +src/mistralai/client/models/prediction.py +docs/models/conversationinputs.md +docs/models/agenthandoffdoneevent.md +docs/models/finetuneablemodeltype.md +src/mistralai/client/models/agents_api_v1_agents_create_or_update_aliasop.py +src/mistralai/client/models/jobs_api_routes_fine_tuning_create_fine_tuning_jobop.py +docs/models/conversationrestartrequest.md +src/mistralai/client/models/ocrimageobject.py +docs/models/security.md +src/mistralai/client/models/libraryinupdate.py +docs/models/fimcompletionrequest.md +docs/models/ocrusageinfo.md +docs/models/completionjoboutintegration.md +src/mistralai/client/libraries.py +src/mistralai/client/models/wandbintegration.py +src/mistralai/client/models/ocrpagedimensions.py +src/mistralai/client/models/jobin.py +docs/models/conversationrestartstreamrequestagentversion.md +src/mistralai/client/models/libraries_documents_reprocess_v1op.py +docs/models/agentsapiv1agentsgetrequest.md +src/mistralai/client/models/paginationinfo.py +src/mistralai/client/models/jobmetadataout.py +docs/models/assistantmessage.md +src/mistralai/client/models/conversationappendstreamrequest.py +docs/models/librariesdocumentsgettextcontentv1request.md +docs/models/realtimetranscriptionerror.md +src/mistralai/client/models/completiondetailedjobout.py +src/mistralai/client/fine_tuning_jobs.py +src/mistralai/client/models/documentout.py +docs/models/librariesgetv1request.md +docs/models/referencechunk.md +src/mistralai/client/models/completiontrainingparameters.py +src/mistralai/client/agents.py +src/mistralai/client/models/agents_api_v1_agents_list_version_aliasesop.py +src/mistralai/client/models/toolchoice.py +docs/models/requestsource.md +docs/models/embeddingrequestinputs.md +src/mistralai/client/models/imagegenerationtool.py +src/mistralai/client/models/libraries_documents_get_signed_url_v1op.py +docs/models/jobsoutobject.md +docs/models/librariesdocumentsreprocessv1request.md +src/mistralai/client/models/audiotranscriptionrequeststream.py +docs/models/tool.md +src/mistralai/client/models/uploadfileout.py +src/mistralai/client/models/timestampgranularity.py +src/mistralai/client/models/metricout.py +docs/models/jobmetadataout.md +src/mistralai/client/models/files_api_routes_upload_fileop.py +docs/models/chatmoderationrequestinputs1.md +src/mistralai/client/models/transcriptionstreameventtypes.py +src/mistralai/client/models/completionchunk.py +src/mistralai/client/models/conversationevents.py +docs/models/agent.md +src/mistralai/client/models/documenttextcontent.py +docs/models/embeddingresponsedata.md +docs/models/codeinterpretertool.md +src/mistralai/client/models/deletemodelout.py +docs/models/agenttool.md +src/mistralai/client/models/completionresponsestreamchoice.py +src/mistralai/client/models/audiochunk.py +docs/models/functioncallevent.md +docs/models/transcriptionstreamtextdelta.md +docs/models/completiontrainingparametersin.md +docs/models/conversationappendrequesthandoffexecution.md +docs/models/chatcompletionchoicefinishreason.md +src/mistralai/client/models/libraries_documents_get_status_v1op.py +docs/models/libraryinupdate.md +src/mistralai/client/models/modelconversation.py +docs/models/completiondetailedjobout.md +docs/models/realtimetranscriptionsessioncreated.md +docs/models/classifierjoboutobject.md +docs/models/filesapiroutesretrievefilerequest.md +src/mistralai/client/models/trainingfile.py +docs/models/multipartbodyparams.md +src/mistralai/client/models/libraries_delete_v1op.py +docs/models/sampletype.md +src/mistralai/client/models/functioncallevent.py +src/mistralai/client/models/imageurlchunk.py +src/mistralai/client/models/libraries_documents_delete_v1op.py +src/mistralai/client/models/agentconversation.py +src/mistralai/client/models/chatclassificationrequest.py +docs/models/ftmodelcapabilitiesout.md +docs/models/classifierftmodelout.md +docs/models/deletemodelv1modelsmodeliddeleterequest.md +docs/models/messageoutputentryrole.md +docs/models/eventout.md +docs/models/systemmessage.md +src/mistralai/client/models/sampletype.py +docs/models/conversationevents.md +docs/models/fileschema.md +src/mistralai/client/models/agentscompletionrequest.py +src/mistralai/client/models/chatmoderationrequest.py +src/mistralai/client/models/classifierftmodelout.py +docs/models/jobsapiroutesfinetuningstartfinetuningjobresponse.md +docs/models/chatcompletionresponse.md +src/mistralai/client/models/toolmessage.py +src/mistralai/client/accesses.py +src/mistralai/client/models/source.py +docs/models/documenturlchunk.md +docs/models/updateftmodelin.md +src/mistralai/client/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobop.py +docs/models/toolreferencechunktype.md +src/mistralai/client/models/files_api_routes_get_signed_urlop.py +src/mistralai/client/models/responseerrorevent.py +docs/models/jobsapiroutesfinetuningunarchivefinetunedmodelrequest.md +docs/models/thinkchunk.md +docs/models/agentcreationrequesttool.md +docs/models/completiondetailedjoboutobject.md +src/mistralai/client/models/filechunk.py +docs/models/agentcreationrequest.md +docs/models/jobsapiroutesfinetuningcancelfinetuningjobrequest.md +docs/models/utils/retryconfig.md +docs/models/loc.md +docs/models/filesignedurl.md +src/mistralai/client/models/embeddingdtype.py +docs/models/chatcompletionstreamrequest.md +docs/models/audioformat.md +docs/models/transcriptionstreamsegmentdelta.md +docs/models/inputsmessage.md +docs/models/instructrequest.md +src/mistralai/client/models/batchjobout.py +docs/models/classifiertargetout.md +docs/models/filesapiroutesgetsignedurlrequest.md +docs/models/conversationappendrequest.md +docs/models/legacyjobmetadataoutobject.md +src/mistralai/client/models/messageoutputentry.py +docs/models/messageinputentryobject.md +src/mistralai/client/models/embeddingresponse.py +src/mistralai/client/models/documenturlchunk.py +docs/models/usermessage.md +src/mistralai/client/models/apiendpoint.py +src/mistralai/client/models/batchjobstatus.py +docs/models/jobsapiroutesbatchgetbatchjobrequest.md +docs/models/jobsapiroutesfinetuninggetfinetuningjobrequest.md +docs/models/wandbintegration.md +docs/models/conversationmessagesobject.md +docs/models/utils/retryconfig.md +docs/models/fimcompletionstreamrequest.md +docs/models/batchrequest.md +docs/models/agentsapiv1conversationslistresponse.md +docs/models/conversationhistory.md +docs/sdks/agents/README.md +docs/models/transcriptionresponse.md +src/mistralai/client/models/files_api_routes_download_fileop.py +src/mistralai/client/models/embeddingrequest.py +src/mistralai/client/models/transcriptionresponse.py +src/mistralai/client/models/libraries_documents_list_v1op.py +src/mistralai/client/models/githubrepositoryin.py +docs/models/librariesdocumentsgetstatusv1request.md +docs/models/modelconversationtool.md +.gitattributes +docs/models/functioncallentryarguments.md +src/mistralai/client/models/ftclassifierlossfunction.py +src/mistralai/client/batch.py +docs/models/classificationrequest.md +src/mistralai/client/models/realtimetranscriptionerrordetail.py +docs/models/hyperparameters.md +docs/models/utils/retryconfig.md +docs/models/moderationobject.md +docs/models/classifierjoboutstatus.md +docs/models/agentupdaterequesttool.md +docs/models/chatcompletionstreamrequestmessage.md +docs/models/completiondetailedjoboutintegration.md +src/mistralai/client/models/transcriptionstreamtextdelta.py +src/mistralai/client/models/libraries_get_v1op.py +docs/models/agentscompletionrequesttoolchoice.md +src/mistralai/client/models/deletefileout.py +docs/models/completionevent.md +src/mistralai/client/chat.py +src/mistralai/client/models/completiontrainingparametersin.py +docs/models/librariesdocumentsupdatev1request.md +docs/models/instructrequestmessage.md +src/mistralai/client/models/documentupdatein.py +docs/models/toolfilechunk.md +src/mistralai/client/models/messageinputcontentchunks.py +src/mistralai/client/models/files_api_routes_delete_fileop.py +docs/models/utils/retryconfig.md +docs/models/assistantmessagerole.md +docs/sdks/transcriptions/README.md +docs/models/librariessharedeletev1request.md +src/mistralai/client/models/moderationobject.py +docs/models/unarchiveftmodelout.md +src/mistralai/client/models/jobs_api_routes_batch_cancel_batch_jobop.py +docs/models/messageoutputentrytype.md +docs/models/functioncall.md +docs/models/toolexecutiondeltaevent.md +src/mistralai/client/models/realtimetranscriptionerror.py +docs/models/agentsapiv1agentslistrequest.md +src/mistralai/client/models/websearchpremiumtool.py +src/mistralai/client/models/realtimetranscriptionsessionupdated.py +src/mistralai/client/models/libraries_documents_get_text_content_v1op.py +docs/models/agentscompletionstreamrequesttoolchoice.md +docs/models/textchunk.md +docs/models/toolcall.md +docs/models/assistantmessagecontent.md +src/mistralai/client/models/chatcompletionrequest.py +src/mistralai/client/models/usermessage.py +docs/models/outputcontentchunks.md +docs/models/librariesdocumentsuploadv1request.md +docs/models/entitytype.md +src/mistralai/client/models/basemodelcard.py +docs/models/toolexecutionentrytype.md +docs/models/shareenum.md +docs/models/imageurlunion.md +docs/models/conversationappendstreamrequest.md +docs/models/websearchpremiumtool.md +docs/models/utils/retryconfig.md +docs/models/fimcompletionrequeststop.md +src/mistralai/client/models/classificationtargetresult.py +src/mistralai/client/audio.py +docs/models/chatmoderationrequestinputs3.md +docs/models/response.md +src/mistralai/client/models/referencechunk.py +docs/models/jobinrepository.md +src/mistralai/client/models/files_api_routes_retrieve_fileop.py +src/mistralai/client/sdkconfiguration.py +src/mistralai/client/models/agents_api_v1_conversations_messagesop.py +src/mistralai/client/models/instructrequest.py +src/mistralai/client/models/classifiertargetout.py +docs/models/classifierdetailedjoboutobject.md +src/mistralai/client/models/inputentries.py +src/mistralai/client/models/toolchoiceenum.py +docs/models/chatcompletionstreamrequesttoolchoice.md +docs/models/agentconversation.md +docs/models/utils/retryconfig.md +src/mistralai/client/models/functioncall.py +docs/models/mistralpromptmode.md +docs/models/conversationresponseobject.md +src/mistralai/client/models/ocrtableobject.py +src/mistralai/client/models/toolexecutionentry.py +docs/models/classifierdetailedjobout.md +docs/models/conversationresponse.md +docs/models/agentsapiv1agentslistversionaliasesrequest.md +docs/models/conversationeventsdata.md +src/mistralai/client/models/ocrusageinfo.py +src/mistralai/client/models/ftmodelcard.py +src/mistralai/client/models/libraries_share_list_v1op.py +docs/models/modellistdata.md +docs/models/messageoutputcontentchunks.md +docs/models/modelconversation.md +docs/models/batchjobstatus.md +docs/models/encodingformat.md +docs/models/jobsapiroutesfinetuninggetfinetuningjobresponse.md +docs/models/utils/retryconfig.md +docs/models/modellist.md +docs/models/textchunktype.md +docs/models/completionargs.md +docs/models/agenthandoffstartedevent.md +docs/models/basemodelcard.md +src/mistralai/client/models/classifierjobout.py +docs/models/batchjobout.md +docs/models/conversationstreamrequestagentversion.md +docs/models/filesapiroutesdownloadfilerequest.md +src/mistralai/client/models/fileschema.py +docs/models/completiontrainingparameters.md +docs/models/wandbintegrationout.md +docs/models/agentobject.md +src/mistralai/client/models/classifierdetailedjobout.py +src/mistralai/client/models/jobs_api_routes_fine_tuning_archive_fine_tuned_modelop.py +USAGE.md +docs/models/deltamessage.md +docs/models/messageinputentry.md +docs/models/jobsapiroutesfinetuningupdatefinetunedmodelrequest.md +docs/models/filechunk.md +src/mistralai/client/models/agent.py +src/mistralai/client/models/agents_api_v1_conversations_append_streamop.py +src/mistralai/client/models/agents_api_v1_agents_list_versionsop.py +docs/models/classifierftmodeloutobject.md +src/mistralai/client/models/ftmodelcapabilitiesout.py +src/mistralai/client/models/listsharingout.py +src/mistralai/client/models/systemmessagecontentchunks.py +src/mistralai/client/models/agents_api_v1_agents_updateop.py +docs/models/retrievefileout.md +src/mistralai/client/models/agents_api_v1_conversations_historyop.py +src/mistralai/client/fim.py +docs/models/embeddingdtype.md +src/mistralai/client/models/conversationrestartstreamrequest.py +src/mistralai/client/models/completionargs.py +docs/models/toolexecutionstartedeventname.md +src/mistralai/client/models/transcriptionstreamlanguage.py +docs/models/librariessharelistv1request.md +src/mistralai/client/fine_tuning.py +docs/models/agentsapiv1conversationsrestartrequest.md +docs/models/conversationrestartstreamrequest.md +docs/models/transcriptionstreamlanguage.md +docs/models/toolexecutiondoneeventname.md +docs/models/classifierjoboutintegration.md +docs/models/classifiertrainingparametersin.md +src/mistralai/client/models/agentupdaterequest.py +docs/models/agentscompletionrequestmessage.md +docs/models/chatmoderationrequest.md +docs/models/chatcompletionchoice.md +docs/models/batchjoboutobject.md +docs/models/toolchoiceenum.md +docs/models/ocrrequest.md +src/mistralai/client/models/updateftmodelin.py +docs/models/classifierdetailedjoboutintegration.md +src/mistralai/client/models/agenthandoffdoneevent.py +src/mistralai/client/models/files_api_routes_list_filesop.py +src/mistralai/client/ocr.py +docs/models/embeddingrequest.md +src/mistralai/client/models/conversationstreamrequest.py +src/mistralai/client/models/thinkchunk.py +docs/models/toolchoice.md +docs/models/documentupload.md +docs/models/imageurlchunktype.md +docs/models/conversationrestartrequestagentversion.md +docs/models/transcriptionstreamdone.md +src/mistralai/client/models/libraryout.py +src/mistralai/client/models/conversationappendrequest.py +src/mistralai/client/models/audioformat.py +docs/models/conversationhistoryobject.md +docs/models/ftclassifierlossfunction.md +docs/models/websearchtool.md +docs/models/messageoutputentryobject.md +src/mistralai/client/models/batchjobsout.py +docs/models/agentsapiv1agentsdeleterequest.md +docs/models/libraryout.md +docs/models/batchjobsoutobject.md +docs/models/functionresultentrytype.md +src/mistralai/client/models/agents_api_v1_conversations_restart_streamop.py +docs/models/completionjoboutstatus.md +docs/models/documenttextcontent.md +docs/models/legacyjobmetadataout.md +docs/models/prediction.md +src/mistralai/client/models_.py +src/mistralai/client/models/sharingdelete.py +src/mistralai/client/models/usageinfo.py +docs/models/thinkchunktype.md +docs/models/agentsapiv1conversationshistoryrequest.md +src/mistralai/client/models/jobs_api_routes_fine_tuning_cancel_fine_tuning_jobop.py +src/mistralai/client/models/jobs_api_routes_fine_tuning_unarchive_fine_tuned_modelop.py +src/mistralai/client/models/responseformat.py +docs/models/builtinconnectors.md +docs/models/realtimetranscriptionsession.md +docs/models/documentlibrarytool.md +docs/models/toolfilechunktool.md +docs/models/messageinputcontentchunks.md +src/mistralai/client/models/checkpointout.py +src/mistralai/client/models/validationerror.py +docs/models/utils/retryconfig.md +docs/models/chatmoderationrequestinputs2.md +docs/models/format_.md +docs/sdks/batchjobs/README.md +docs/models/agentsapiv1conversationsrestartstreamrequest.md +docs/models/filesapiroutesdeletefilerequest.md +docs/models/toolexecutionentry.md +docs/models/metricout.md +src/mistralai/client/models/completionjobout.py +docs/models/deletefileout.md +docs/models/functioncallentrytype.md +docs/models/filepurpose.md +docs/models/transcriptionsegmentchunk.md +docs/models/usageinfo.md +docs/models/responsev1conversationsget.md +src/mistralai/client/models/ssetypes.py +src/mistralai/client/models/audioencoding.py +docs/models/librariesdocumentsgetsignedurlv1request.md +docs/models/timestampgranularity.md +docs/models/conversationrequesthandoffexecution.md +src/mistralai/client/transcriptions.py +src/mistralai/client/models/function.py +src/mistralai/client/models/toolexecutiondeltaevent.py +docs/models/conversationappendstreamrequesthandoffexecution.md +docs/models/realtimetranscriptionerrordetail.md +docs/models/toolexecutiondeltaeventname.md +src/mistralai/client/models/__init__.py +src/mistralai/client/models/codeinterpretertool.py +docs/models/utils/retryconfig.md +docs/models/completiondetailedjoboutstatus.md +docs/models/librariesdocumentsgetv1request.md +src/mistralai/client/models/messageoutputevent.py +src/mistralai/client/models/agentscompletionstreamrequest.py +src/mistralai/client/models/textchunk.py +docs/models/conversationstreamrequesttool.md +docs/models/systemmessagecontent.md +docs/models/agentsapiv1conversationslistrequest.md +docs/models/chatcompletionstreamrequeststop.md +docs/models/responseerrorevent.md +docs/models/usermessagecontent.md +docs/models/audioencoding.md +docs/models/messageinputentryrole.md +docs/models/inputentries.md +src/mistralai/client/models/agents_api_v1_conversations_restartop.py +src/mistralai/client/models/messageentries.py +docs/models/ocrpageobject.md +src/mistralai/client/models/completionevent.py +src/mistralai/client/models/batchjobin.py +src/mistralai/client/models/requestsource.py +src/mistralai/client/models/fimcompletionrequest.py +docs/models/utils/retryconfig.md +src/mistralai/client/models/sharingout.py +docs/models/messageentries.md +docs/models/jobsoutdata.md +src/mistralai/client/batch_jobs.py +src/mistralai/client/models/messageinputentry.py +docs/models/uploadfileout.md +src/mistralai/client/models/finetuneablemodeltype.py +docs/models/documentupdatein.md +docs/models/toolmessagecontent.md +docs/models/utils/retryconfig.md +docs/models/documentout.md +docs/models/functionname.md +src/mistralai/client/documents.py +src/mistralai/client/models/realtimetranscriptionsessioncreated.py +docs/models/conversationstreamrequesthandoffexecution.md +docs/models/ocrresponse.md +src/mistralai/client/models/libraries_share_create_v1op.py +docs/models/functioncallentryobject.md +docs/models/httpvalidationerror.md +src/mistralai/client/models/agents_api_v1_agents_getop.py +docs/models/responsedoneevent.md +docs/models/jobsapiroutesfinetuningarchivefinetunedmodelrequest.md +docs/models/utils/retryconfig.md +src/mistralai/client/models/completionftmodelout.py +docs/models/utils/retryconfig.md +src/mistralai/client/files.py +docs/models/batchjobsout.md +docs/models/audiotranscriptionrequeststream.md +src/mistralai/client/models/functioncallentryarguments.py +docs/models/responsestartedevent.md +src/mistralai/client/models/agents_api_v1_agents_deleteop.py +docs/models/utils/retryconfig.md +docs/models/completionchunk.md +src/mistralai/client/models/agents_api_v1_conversations_listop.py +src/mistralai/client/models/archiveftmodelout.py +docs/models/agentaliasresponse.md +docs/models/realtimetranscriptionsessionupdated.md +docs/models/batcherror.md +docs/models/contentchunk.md +docs/models/source.md +docs/models/utils/retryconfig.md +docs/models/toolexecutiondoneevent.md +docs/models/realtimetranscriptionerrordetailmessage.md +docs/models/jobsapiroutesfinetuninggetfinetuningjobsstatus.md +docs/models/sharingdelete.md +docs/models/agentsapiv1agentscreateorupdatealiasrequest.md +docs/models/completionjobout.md +docs/models/conversationrequest.md +docs/models/utils/retryconfig.md +docs/models/utils/retryconfig.md +docs/models/utils/retryconfig.md +docs/sdks/betaagents/README.md +docs/models/utils/retryconfig.md +docs/models/utils/retryconfig.md +docs/models/utils/retryconfig.md +docs/models/utils/retryconfig.md +docs/models/utils/retryconfig.md +docs/models/utils/retryconfig.md +docs/models/utils/retryconfig.md +docs/models/utils/retryconfig.md +docs/models/utils/retryconfig.md +docs/models/utils/retryconfig.md +docs/models/utils/retryconfig.md +docs/sdks/conversations/README.md +docs/models/utils/retryconfig.md +docs/models/utils/retryconfig.md +docs/models/utils/retryconfig.md +docs/models/utils/retryconfig.md +docs/models/utils/retryconfig.md +docs/sdks/libraries/README.md +docs/models/utils/retryconfig.md +docs/models/utils/retryconfig.md +docs/models/utils/retryconfig.md +docs/sdks/accesses/README.md +docs/models/utils/retryconfig.md +docs/models/utils/retryconfig.md +docs/models/utils/retryconfig.md +docs/models/utils/retryconfig.md +docs/models/utils/retryconfig.md +docs/models/utils/retryconfig.md +docs/models/utils/retryconfig.md +docs/models/utils/retryconfig.md +docs/models/utils/retryconfig.md +docs/models/utils/retryconfig.md +docs/sdks/documents/README.md +docs/models/utils/retryconfig.md +docs/models/utils/retryconfig.md +docs/sdks/chat/README.md +docs/models/utils/retryconfig.md +docs/models/utils/retryconfig.md +docs/models/utils/retryconfig.md +docs/models/utils/retryconfig.md +docs/sdks/classifiers/README.md +docs/models/utils/retryconfig.md +docs/sdks/embeddings/README.md +docs/models/utils/retryconfig.md +docs/models/utils/retryconfig.md +docs/models/utils/retryconfig.md +docs/models/utils/retryconfig.md +docs/models/utils/retryconfig.md +docs/models/utils/retryconfig.md +docs/sdks/files/README.md +docs/models/utils/retryconfig.md +docs/models/utils/retryconfig.md +docs/sdks/fim/README.md +docs/models/utils/retryconfig.md +docs/models/utils/retryconfig.md +docs/models/utils/retryconfig.md +docs/models/utils/retryconfig.md +docs/models/utils/retryconfig.md +docs/sdks/finetuningjobs/README.md +docs/models/utils/retryconfig.md +docs/models/utils/retryconfig.md +docs/models/utils/retryconfig.md +docs/models/utils/retryconfig.md +docs/models/utils/retryconfig.md +docs/models/utils/retryconfig.md +docs/sdks/models/README.md +docs/models/utils/retryconfig.md +docs/sdks/ocr/README.md +docs/models/processingstatusout.md +docs/models/completionargsstop.md +docs/models/ocrtableobject.md +src/mistralai/client/models/assistantmessage.py +src/mistralai/client/models/libraries_documents_update_v1op.py +src/mistralai/client/models/agenthandoffstartedevent.py +src/mistralai/client/models/eventout.py +src/mistralai/client/models/toolreferencechunk.py +docs/models/githubrepositoryin.md +src/mistralai/client/models/messageoutputcontentchunks.py +src/mistralai/client/models/agenthandoffentry.py +docs/models/jsonschema.md +docs/models/conversationrestartrequesthandoffexecution.md +docs/models/listfilesout.md +src/mistralai/client/models/transcriptionstreamevents.py +docs/models/ftmodelcard.md +docs/models/jobinintegration.md +src/mistralai/client/models/conversationrestartrequest.py +src/mistralai/client/models/encodingformat.py +docs/models/deltamessagecontent.md +src/mistralai/client/models/outputcontentchunks.py +docs/models/toolfilechunktype.md +src/mistralai/client/_hooks/__init__.py +src/mistralai/client/models/entitytype.py +docs/models/deletemodelout.md +src/mistralai/client/embeddings.py +docs/models/documenturlchunktype.md +docs/models/batchjobin.md +src/mistralai/client/models/wandbintegrationout.py +docs/models/transcriptionstreameventsdata.md +src/mistralai/client/models/classificationresponse.py +docs/models/trainingfile.md +src/mistralai/client/models/transcriptionsegmentchunk.py +docs/models/audiotranscriptionrequest.md +src/mistralai/client/models/githubrepositoryout.py +src/mistralai/client/models/functiontool.py +docs/models/jobsapiroutesfinetuninggetfinetuningjobsrequest.md +docs/models/conversationusageinfo.md +docs/models/ssetypes.md +src/mistralai/client/models/listdocumentout.py +docs/models/libraryin.md +src/mistralai/client/models/libraries_share_delete_v1op.py +src/mistralai/client/models/systemmessage.py +src/mistralai/client/models/chatcompletionstreamrequest.py +src/mistralai/client/models/delete_model_v1_models_model_id_deleteop.py +docs/models/filesapirouteslistfilesrequest.md +docs/models/jobsapiroutesfinetuningcancelfinetuningjobresponse.md +src/mistralai/client/models/security.py +docs/models/modelconversationobject.md +src/mistralai/client/models/conversationmessages.py +docs/models/output.md +src/mistralai/client/models/libraries_documents_get_extracted_text_signed_url_v1op.py +src/mistralai/client/models/classificationrequest.py +docs/models/librariesdocumentslistv1request.md +docs/models/toolmessage.md +docs/models/agentsapiv1agentslistversionsrequest.md +src/mistralai/client/models/embeddingresponsedata.py +src/mistralai/client/models/conversationhistory.py +docs/models/librariessharecreatev1request.md +docs/models/messageinputentrycontent.md +src/mistralai/client/models/functioncallentry.py +src/mistralai/client/models/builtinconnectors.py +src/mistralai/client/models/jobs_api_routes_batch_get_batch_jobop.py +src/mistralai/client/models/httpvalidationerror.py +src/mistralai/client/classifiers.py +docs/models/transcriptionsegmentchunktype.md +docs/models/arguments.md +docs/models/checkpointout.md +src/mistralai/client/beta.py +docs/models/archiveftmodeloutobject.md +docs/models/jobsapiroutesbatchcancelbatchjobrequest.md +docs/models/imageurlchunk.md +src/mistralai/client/models/batcherror.py +docs/models/inputs.md diff --git a/MIGRATION.md b/MIGRATION.md index 4ab7f2ff..5fb16739 100644 --- a/MIGRATION.md +++ b/MIGRATION.md @@ -35,10 +35,22 @@ from mistralai.client.types import BaseModel ### What Stays the Same -- All method names and signatures remain identical - The `Mistral` client API is unchanged - All models (`UserMessage`, `AssistantMessage`, etc.) work the same way +### Type Name Changes + +Some type names have been updated for clarity and consistency: + +| Old Name | New Name | +|---|---| +| `Tools` | `ConversationRequestTool` | +| `ToolsTypedDict` | `ConversationRequestToolTypedDict` | +| `HandoffExecution` | `ConversationRequestHandoffExecution` | +| `AgentVersion` | `ConversationRequestAgentVersion` | + +Enums now accept unknown values for forward compatibility with API changes. + --- ## Migrating from v0.x to v1.x diff --git a/README.md b/README.md index 129e8ee0..2f31ccf2 100644 --- a/README.md +++ b/README.md @@ -458,25 +458,25 @@ The documentation for the GCP SDK is available [here](packages/mistralai_gcp/REA * [complete](docs/sdks/transcriptions/README.md#complete) - Create Transcription * [stream](docs/sdks/transcriptions/README.md#stream) - Create Streaming Transcription (SSE) -### [Batch.Jobs](docs/sdks/mistraljobs/README.md) - -* [list](docs/sdks/mistraljobs/README.md#list) - Get Batch Jobs -* [create](docs/sdks/mistraljobs/README.md#create) - Create Batch Job -* [get](docs/sdks/mistraljobs/README.md#get) - Get Batch Job -* [cancel](docs/sdks/mistraljobs/README.md#cancel) - Cancel Batch Job - -### [Beta.Agents](docs/sdks/mistralagents/README.md) - -* [create](docs/sdks/mistralagents/README.md#create) - Create a agent that can be used within a conversation. -* [list](docs/sdks/mistralagents/README.md#list) - List agent entities. -* [get](docs/sdks/mistralagents/README.md#get) - Retrieve an agent entity. -* [update](docs/sdks/mistralagents/README.md#update) - Update an agent entity. -* [delete](docs/sdks/mistralagents/README.md#delete) - Delete an agent entity. -* [update_version](docs/sdks/mistralagents/README.md#update_version) - Update an agent version. -* [list_versions](docs/sdks/mistralagents/README.md#list_versions) - List all versions of an agent. -* [get_version](docs/sdks/mistralagents/README.md#get_version) - Retrieve a specific version of an agent. -* [create_version_alias](docs/sdks/mistralagents/README.md#create_version_alias) - Create or update an agent version alias. -* [list_version_aliases](docs/sdks/mistralagents/README.md#list_version_aliases) - List all aliases for an agent. +### [Batch.Jobs](docs/sdks/batchjobs/README.md) + +* [list](docs/sdks/batchjobs/README.md#list) - Get Batch Jobs +* [create](docs/sdks/batchjobs/README.md#create) - Create Batch Job +* [get](docs/sdks/batchjobs/README.md#get) - Get Batch Job +* [cancel](docs/sdks/batchjobs/README.md#cancel) - Cancel Batch Job + +### [Beta.Agents](docs/sdks/betaagents/README.md) + +* [create](docs/sdks/betaagents/README.md#create) - Create a agent that can be used within a conversation. +* [list](docs/sdks/betaagents/README.md#list) - List agent entities. +* [get](docs/sdks/betaagents/README.md#get) - Retrieve an agent entity. +* [update](docs/sdks/betaagents/README.md#update) - Update an agent entity. +* [delete](docs/sdks/betaagents/README.md#delete) - Delete an agent entity. +* [update_version](docs/sdks/betaagents/README.md#update_version) - Update an agent version. +* [list_versions](docs/sdks/betaagents/README.md#list_versions) - List all versions of an agent. +* [get_version](docs/sdks/betaagents/README.md#get_version) - Retrieve a specific version of an agent. +* [create_version_alias](docs/sdks/betaagents/README.md#create_version_alias) - Create or update an agent version alias. +* [list_version_aliases](docs/sdks/betaagents/README.md#list_version_aliases) - List all aliases for an agent. ### [Beta.Conversations](docs/sdks/conversations/README.md) @@ -549,13 +549,13 @@ The documentation for the GCP SDK is available [here](packages/mistralai_gcp/REA * [complete](docs/sdks/fim/README.md#complete) - Fim Completion * [stream](docs/sdks/fim/README.md#stream) - Stream fim completion -### [FineTuning.Jobs](docs/sdks/jobs/README.md) +### [FineTuning.Jobs](docs/sdks/finetuningjobs/README.md) -* [list](docs/sdks/jobs/README.md#list) - Get Fine Tuning Jobs -* [create](docs/sdks/jobs/README.md#create) - Create Fine Tuning Job -* [get](docs/sdks/jobs/README.md#get) - Get Fine Tuning Job -* [cancel](docs/sdks/jobs/README.md#cancel) - Cancel Fine Tuning Job -* [start](docs/sdks/jobs/README.md#start) - Start Fine Tuning Job +* [list](docs/sdks/finetuningjobs/README.md#list) - Get Fine Tuning Jobs +* [create](docs/sdks/finetuningjobs/README.md#create) - Create Fine Tuning Job +* [get](docs/sdks/finetuningjobs/README.md#get) - Get Fine Tuning Job +* [cancel](docs/sdks/finetuningjobs/README.md#cancel) - Cancel Fine Tuning Job +* [start](docs/sdks/finetuningjobs/README.md#start) - Start Fine Tuning Job ### [Models](docs/sdks/models/README.md) diff --git a/docs/models/agent.md b/docs/models/agent.md index ee054dd3..bd143350 100644 --- a/docs/models/agent.md +++ b/docs/models/agent.md @@ -6,7 +6,7 @@ | Field | Type | Required | Description | | ----------------------------------------------------------------------- | ----------------------------------------------------------------------- | ----------------------------------------------------------------------- | ----------------------------------------------------------------------- | | `instructions` | *OptionalNullable[str]* | :heavy_minus_sign: | Instruction prompt the model will follow during the conversation. | -| `tools` | List[[models.AgentTools](../models/agenttools.md)] | :heavy_minus_sign: | List of tools which are available to the model during the conversation. | +| `tools` | List[[models.AgentTool](../models/agenttool.md)] | :heavy_minus_sign: | List of tools which are available to the model during the conversation. | | `completion_args` | [Optional[models.CompletionArgs]](../models/completionargs.md) | :heavy_minus_sign: | White-listed arguments from the completion API | | `model` | *str* | :heavy_check_mark: | N/A | | `name` | *str* | :heavy_check_mark: | N/A | diff --git a/docs/models/agentcreationrequest.md b/docs/models/agentcreationrequest.md index afc27d3b..6a24c00b 100644 --- a/docs/models/agentcreationrequest.md +++ b/docs/models/agentcreationrequest.md @@ -3,13 +3,13 @@ ## Fields -| Field | Type | Required | Description | -| -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -| `instructions` | *OptionalNullable[str]* | :heavy_minus_sign: | Instruction prompt the model will follow during the conversation. | -| `tools` | List[[models.AgentCreationRequestTools](../models/agentcreationrequesttools.md)] | :heavy_minus_sign: | List of tools which are available to the model during the conversation. | -| `completion_args` | [Optional[models.CompletionArgs]](../models/completionargs.md) | :heavy_minus_sign: | White-listed arguments from the completion API | -| `model` | *str* | :heavy_check_mark: | N/A | -| `name` | *str* | :heavy_check_mark: | N/A | -| `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `handoffs` | List[*str*] | :heavy_minus_sign: | N/A | -| `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | +| `instructions` | *OptionalNullable[str]* | :heavy_minus_sign: | Instruction prompt the model will follow during the conversation. | +| `tools` | List[[models.AgentCreationRequestTool](../models/agentcreationrequesttool.md)] | :heavy_minus_sign: | List of tools which are available to the model during the conversation. | +| `completion_args` | [Optional[models.CompletionArgs]](../models/completionargs.md) | :heavy_minus_sign: | White-listed arguments from the completion API | +| `model` | *str* | :heavy_check_mark: | N/A | +| `name` | *str* | :heavy_check_mark: | N/A | +| `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `handoffs` | List[*str*] | :heavy_minus_sign: | N/A | +| `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/agentcreationrequesttool.md b/docs/models/agentcreationrequesttool.md new file mode 100644 index 00000000..b3bd7fa3 --- /dev/null +++ b/docs/models/agentcreationrequesttool.md @@ -0,0 +1,41 @@ +# AgentCreationRequestTool + + +## Supported Types + +### `models.CodeInterpreterTool` + +```python +value: models.CodeInterpreterTool = /* values here */ +``` + +### `models.DocumentLibraryTool` + +```python +value: models.DocumentLibraryTool = /* values here */ +``` + +### `models.FunctionTool` + +```python +value: models.FunctionTool = /* values here */ +``` + +### `models.ImageGenerationTool` + +```python +value: models.ImageGenerationTool = /* values here */ +``` + +### `models.WebSearchTool` + +```python +value: models.WebSearchTool = /* values here */ +``` + +### `models.WebSearchPremiumTool` + +```python +value: models.WebSearchPremiumTool = /* values here */ +``` + diff --git a/docs/models/agenthandoffdoneevent.md b/docs/models/agenthandoffdoneevent.md index c0039f41..6bfcc3d8 100644 --- a/docs/models/agenthandoffdoneevent.md +++ b/docs/models/agenthandoffdoneevent.md @@ -3,11 +3,11 @@ ## Fields -| Field | Type | Required | Description | -| ------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------ | -| `type` | [Optional[models.AgentHandoffDoneEventType]](../models/agenthandoffdoneeventtype.md) | :heavy_minus_sign: | N/A | -| `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | -| `output_index` | *Optional[int]* | :heavy_minus_sign: | N/A | -| `id` | *str* | :heavy_check_mark: | N/A | -| `next_agent_id` | *str* | :heavy_check_mark: | N/A | -| `next_agent_name` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | +| `type` | *Literal["agent.handoff.done"]* | :heavy_check_mark: | N/A | +| `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | +| `output_index` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `id` | *str* | :heavy_check_mark: | N/A | +| `next_agent_id` | *str* | :heavy_check_mark: | N/A | +| `next_agent_name` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/agenthandoffdoneeventtype.md b/docs/models/agenthandoffdoneeventtype.md deleted file mode 100644 index c864ce43..00000000 --- a/docs/models/agenthandoffdoneeventtype.md +++ /dev/null @@ -1,8 +0,0 @@ -# AgentHandoffDoneEventType - - -## Values - -| Name | Value | -| -------------------- | -------------------- | -| `AGENT_HANDOFF_DONE` | agent.handoff.done | \ No newline at end of file diff --git a/docs/models/agenthandoffstartedevent.md b/docs/models/agenthandoffstartedevent.md index 035cd02a..518b5a0c 100644 --- a/docs/models/agenthandoffstartedevent.md +++ b/docs/models/agenthandoffstartedevent.md @@ -3,11 +3,11 @@ ## Fields -| Field | Type | Required | Description | -| ------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------ | -| `type` | [Optional[models.AgentHandoffStartedEventType]](../models/agenthandoffstartedeventtype.md) | :heavy_minus_sign: | N/A | -| `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | -| `output_index` | *Optional[int]* | :heavy_minus_sign: | N/A | -| `id` | *str* | :heavy_check_mark: | N/A | -| `previous_agent_id` | *str* | :heavy_check_mark: | N/A | -| `previous_agent_name` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | +| `type` | *Literal["agent.handoff.started"]* | :heavy_check_mark: | N/A | +| `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | +| `output_index` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `id` | *str* | :heavy_check_mark: | N/A | +| `previous_agent_id` | *str* | :heavy_check_mark: | N/A | +| `previous_agent_name` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/agenthandoffstartedeventtype.md b/docs/models/agenthandoffstartedeventtype.md deleted file mode 100644 index 4ffaff15..00000000 --- a/docs/models/agenthandoffstartedeventtype.md +++ /dev/null @@ -1,8 +0,0 @@ -# AgentHandoffStartedEventType - - -## Values - -| Name | Value | -| ----------------------- | ----------------------- | -| `AGENT_HANDOFF_STARTED` | agent.handoff.started | \ No newline at end of file diff --git a/docs/models/queryparamagentversion.md b/docs/models/agentsapiv1agentsgetagentversion.md similarity index 79% rename from docs/models/queryparamagentversion.md rename to docs/models/agentsapiv1agentsgetagentversion.md index 3eb5ef18..7fb9f2d5 100644 --- a/docs/models/queryparamagentversion.md +++ b/docs/models/agentsapiv1agentsgetagentversion.md @@ -1,4 +1,4 @@ -# QueryParamAgentVersion +# AgentsAPIV1AgentsGetAgentVersion ## Supported Types diff --git a/docs/models/agentsapiv1agentsgetrequest.md b/docs/models/agentsapiv1agentsgetrequest.md index c71d4419..ceffe009 100644 --- a/docs/models/agentsapiv1agentsgetrequest.md +++ b/docs/models/agentsapiv1agentsgetrequest.md @@ -3,7 +3,7 @@ ## Fields -| Field | Type | Required | Description | -| -------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------- | -| `agent_id` | *str* | :heavy_check_mark: | N/A | -| `agent_version` | [OptionalNullable[models.QueryParamAgentVersion]](../models/queryparamagentversion.md) | :heavy_minus_sign: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| ---------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------- | +| `agent_id` | *str* | :heavy_check_mark: | N/A | +| `agent_version` | [OptionalNullable[models.AgentsAPIV1AgentsGetAgentVersion]](../models/agentsapiv1agentsgetagentversion.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/responsebody.md b/docs/models/agentsapiv1conversationslistresponse.md similarity index 84% rename from docs/models/responsebody.md rename to docs/models/agentsapiv1conversationslistresponse.md index 8a218517..b233ee20 100644 --- a/docs/models/responsebody.md +++ b/docs/models/agentsapiv1conversationslistresponse.md @@ -1,4 +1,4 @@ -# ResponseBody +# AgentsAPIV1ConversationsListResponse ## Supported Types diff --git a/docs/models/agentscompletionrequest.md b/docs/models/agentscompletionrequest.md index 2a0c4144..d87dc7da 100644 --- a/docs/models/agentscompletionrequest.md +++ b/docs/models/agentscompletionrequest.md @@ -10,7 +10,7 @@ | `stop` | [Optional[models.AgentsCompletionRequestStop]](../models/agentscompletionrequeststop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | | `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | | `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | | -| `messages` | List[[models.AgentsCompletionRequestMessages](../models/agentscompletionrequestmessages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | +| `messages` | List[[models.AgentsCompletionRequestMessage](../models/agentscompletionrequestmessage.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | | `response_format` | [Optional[models.ResponseFormat]](../models/responseformat.md) | :heavy_minus_sign: | Specify the format that the model must output. By default it will use `{ "type": "text" }`. Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ "type": "json_schema" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide. | {
"type": "text"
} | | `tools` | List[[models.Tool](../models/tool.md)] | :heavy_minus_sign: | N/A | | | `tool_choice` | [Optional[models.AgentsCompletionRequestToolChoice]](../models/agentscompletionrequesttoolchoice.md) | :heavy_minus_sign: | N/A | | diff --git a/docs/models/instructrequestinputsmessages.md b/docs/models/agentscompletionrequestmessage.md similarity index 92% rename from docs/models/instructrequestinputsmessages.md rename to docs/models/agentscompletionrequestmessage.md index 237e131f..957703b5 100644 --- a/docs/models/instructrequestinputsmessages.md +++ b/docs/models/agentscompletionrequestmessage.md @@ -1,4 +1,4 @@ -# InstructRequestInputsMessages +# AgentsCompletionRequestMessage ## Supported Types diff --git a/docs/models/agentscompletionstreamrequest.md b/docs/models/agentscompletionstreamrequest.md index b2ccd4e8..dd1804a1 100644 --- a/docs/models/agentscompletionstreamrequest.md +++ b/docs/models/agentscompletionstreamrequest.md @@ -10,7 +10,7 @@ | `stop` | [Optional[models.AgentsCompletionStreamRequestStop]](../models/agentscompletionstreamrequeststop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | | `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | | `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | | -| `messages` | List[[models.AgentsCompletionStreamRequestMessages](../models/agentscompletionstreamrequestmessages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | +| `messages` | List[[models.AgentsCompletionStreamRequestMessage](../models/agentscompletionstreamrequestmessage.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | | `response_format` | [Optional[models.ResponseFormat]](../models/responseformat.md) | :heavy_minus_sign: | Specify the format that the model must output. By default it will use `{ "type": "text" }`. Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ "type": "json_schema" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide. | {
"type": "text"
} | | `tools` | List[[models.Tool](../models/tool.md)] | :heavy_minus_sign: | N/A | | | `tool_choice` | [Optional[models.AgentsCompletionStreamRequestToolChoice]](../models/agentscompletionstreamrequesttoolchoice.md) | :heavy_minus_sign: | N/A | | diff --git a/docs/models/chatcompletionstreamrequestmessages.md b/docs/models/agentscompletionstreamrequestmessage.md similarity index 90% rename from docs/models/chatcompletionstreamrequestmessages.md rename to docs/models/agentscompletionstreamrequestmessage.md index 47990611..6ccf4244 100644 --- a/docs/models/chatcompletionstreamrequestmessages.md +++ b/docs/models/agentscompletionstreamrequestmessage.md @@ -1,4 +1,4 @@ -# ChatCompletionStreamRequestMessages +# AgentsCompletionStreamRequestMessage ## Supported Types diff --git a/docs/models/agentscompletionstreamrequestmessages.md b/docs/models/agentscompletionstreamrequestmessages.md deleted file mode 100644 index 1bc736af..00000000 --- a/docs/models/agentscompletionstreamrequestmessages.md +++ /dev/null @@ -1,29 +0,0 @@ -# AgentsCompletionStreamRequestMessages - - -## Supported Types - -### `models.AssistantMessage` - -```python -value: models.AssistantMessage = /* values here */ -``` - -### `models.SystemMessage` - -```python -value: models.SystemMessage = /* values here */ -``` - -### `models.ToolMessage` - -```python -value: models.ToolMessage = /* values here */ -``` - -### `models.UserMessage` - -```python -value: models.UserMessage = /* values here */ -``` - diff --git a/docs/models/tools.md b/docs/models/agenttool.md similarity index 98% rename from docs/models/tools.md rename to docs/models/agenttool.md index f308d732..022f7e10 100644 --- a/docs/models/tools.md +++ b/docs/models/agenttool.md @@ -1,4 +1,4 @@ -# Tools +# AgentTool ## Supported Types diff --git a/docs/models/agentupdaterequest.md b/docs/models/agentupdaterequest.md index 641d1e40..b276e199 100644 --- a/docs/models/agentupdaterequest.md +++ b/docs/models/agentupdaterequest.md @@ -3,14 +3,14 @@ ## Fields -| Field | Type | Required | Description | -| ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | -| `instructions` | *OptionalNullable[str]* | :heavy_minus_sign: | Instruction prompt the model will follow during the conversation. | -| `tools` | List[[models.AgentUpdateRequestTools](../models/agentupdaterequesttools.md)] | :heavy_minus_sign: | List of tools which are available to the model during the conversation. | -| `completion_args` | [Optional[models.CompletionArgs]](../models/completionargs.md) | :heavy_minus_sign: | White-listed arguments from the completion API | -| `model` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `handoffs` | List[*str*] | :heavy_minus_sign: | N/A | -| `deployment_chat` | *OptionalNullable[bool]* | :heavy_minus_sign: | N/A | -| `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| -------------------------------------------------------------------------- | -------------------------------------------------------------------------- | -------------------------------------------------------------------------- | -------------------------------------------------------------------------- | +| `instructions` | *OptionalNullable[str]* | :heavy_minus_sign: | Instruction prompt the model will follow during the conversation. | +| `tools` | List[[models.AgentUpdateRequestTool](../models/agentupdaterequesttool.md)] | :heavy_minus_sign: | List of tools which are available to the model during the conversation. | +| `completion_args` | [Optional[models.CompletionArgs]](../models/completionargs.md) | :heavy_minus_sign: | White-listed arguments from the completion API | +| `model` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `handoffs` | List[*str*] | :heavy_minus_sign: | N/A | +| `deployment_chat` | *OptionalNullable[bool]* | :heavy_minus_sign: | N/A | +| `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/modelconversationtools.md b/docs/models/agentupdaterequesttool.md similarity index 96% rename from docs/models/modelconversationtools.md rename to docs/models/agentupdaterequesttool.md index 5cc97437..ce553126 100644 --- a/docs/models/modelconversationtools.md +++ b/docs/models/agentupdaterequesttool.md @@ -1,4 +1,4 @@ -# ModelConversationTools +# AgentUpdateRequestTool ## Supported Types diff --git a/docs/models/audiochunk.md b/docs/models/audiochunk.md index c443e7ad..8a04af04 100644 --- a/docs/models/audiochunk.md +++ b/docs/models/audiochunk.md @@ -3,7 +3,7 @@ ## Fields -| Field | Type | Required | Description | -| -------------------------------------------------------------- | -------------------------------------------------------------- | -------------------------------------------------------------- | -------------------------------------------------------------- | -| `input_audio` | *str* | :heavy_check_mark: | N/A | -| `type` | [Optional[models.AudioChunkType]](../models/audiochunktype.md) | :heavy_minus_sign: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| ------------------------ | ------------------------ | ------------------------ | ------------------------ | +| `input_audio` | *str* | :heavy_check_mark: | N/A | +| `type` | *Literal["input_audio"]* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/audiochunktype.md b/docs/models/audiochunktype.md deleted file mode 100644 index 46ebf372..00000000 --- a/docs/models/audiochunktype.md +++ /dev/null @@ -1,8 +0,0 @@ -# AudioChunkType - - -## Values - -| Name | Value | -| ------------- | ------------- | -| `INPUT_AUDIO` | input_audio | \ No newline at end of file diff --git a/docs/models/basemodelcard.md b/docs/models/basemodelcard.md index 58ad5e25..0f42504f 100644 --- a/docs/models/basemodelcard.md +++ b/docs/models/basemodelcard.md @@ -17,4 +17,4 @@ | `deprecation` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | | `deprecation_replacement_model` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | `default_model_temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | N/A | -| `type` | [Optional[models.BaseModelCardType]](../models/basemodelcardtype.md) | :heavy_minus_sign: | N/A | \ No newline at end of file +| `type` | *Literal["base"]* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/basemodelcardtype.md b/docs/models/basemodelcardtype.md deleted file mode 100644 index 4a40ce76..00000000 --- a/docs/models/basemodelcardtype.md +++ /dev/null @@ -1,8 +0,0 @@ -# BaseModelCardType - - -## Values - -| Name | Value | -| ------ | ------ | -| `BASE` | base | \ No newline at end of file diff --git a/docs/models/chatcompletionchoice.md b/docs/models/chatcompletionchoice.md index d77d286e..deaa0ea0 100644 --- a/docs/models/chatcompletionchoice.md +++ b/docs/models/chatcompletionchoice.md @@ -3,8 +3,8 @@ ## Fields -| Field | Type | Required | Description | Example | -| -------------------------------------------------------- | -------------------------------------------------------- | -------------------------------------------------------- | -------------------------------------------------------- | -------------------------------------------------------- | -| `index` | *int* | :heavy_check_mark: | N/A | 0 | -| `message` | [models.AssistantMessage](../models/assistantmessage.md) | :heavy_check_mark: | N/A | | -| `finish_reason` | [models.FinishReason](../models/finishreason.md) | :heavy_check_mark: | N/A | stop | \ No newline at end of file +| Field | Type | Required | Description | Example | +| ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | +| `index` | *int* | :heavy_check_mark: | N/A | 0 | +| `message` | [models.AssistantMessage](../models/assistantmessage.md) | :heavy_check_mark: | N/A | | +| `finish_reason` | [models.ChatCompletionChoiceFinishReason](../models/chatcompletionchoicefinishreason.md) | :heavy_check_mark: | N/A | stop | \ No newline at end of file diff --git a/docs/models/finishreason.md b/docs/models/chatcompletionchoicefinishreason.md similarity index 88% rename from docs/models/finishreason.md rename to docs/models/chatcompletionchoicefinishreason.md index 2af53f6e..b2f15ecb 100644 --- a/docs/models/finishreason.md +++ b/docs/models/chatcompletionchoicefinishreason.md @@ -1,4 +1,4 @@ -# FinishReason +# ChatCompletionChoiceFinishReason ## Values diff --git a/docs/models/chatcompletionrequest.md b/docs/models/chatcompletionrequest.md index 109fa7b1..f3abeeff 100644 --- a/docs/models/chatcompletionrequest.md +++ b/docs/models/chatcompletionrequest.md @@ -10,10 +10,10 @@ | `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | | `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | | `stream` | *Optional[bool]* | :heavy_minus_sign: | Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. | | -| `stop` | [Optional[models.Stop]](../models/stop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | +| `stop` | [Optional[models.ChatCompletionRequestStop]](../models/chatcompletionrequeststop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | | `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | | `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | | -| `messages` | List[[models.Messages](../models/messages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | +| `messages` | List[[models.ChatCompletionRequestMessage](../models/chatcompletionrequestmessage.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | | `response_format` | [Optional[models.ResponseFormat]](../models/responseformat.md) | :heavy_minus_sign: | Specify the format that the model must output. By default it will use `{ "type": "text" }`. Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ "type": "json_schema" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide. | {
"type": "text"
} | | `tools` | List[[models.Tool](../models/tool.md)] | :heavy_minus_sign: | A list of tools the model may call. Use this to provide a list of functions the model may generate JSON inputs for. | | | `tool_choice` | [Optional[models.ChatCompletionRequestToolChoice]](../models/chatcompletionrequesttoolchoice.md) | :heavy_minus_sign: | Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{"type": "function", "function": {"name": "my_function"}}` forces the model to call that tool. | | diff --git a/docs/models/one.md b/docs/models/chatcompletionrequestmessage.md similarity index 92% rename from docs/models/one.md rename to docs/models/chatcompletionrequestmessage.md index 3de496a6..91e9e062 100644 --- a/docs/models/one.md +++ b/docs/models/chatcompletionrequestmessage.md @@ -1,4 +1,4 @@ -# One +# ChatCompletionRequestMessage ## Supported Types diff --git a/docs/models/stop.md b/docs/models/chatcompletionrequeststop.md similarity index 90% rename from docs/models/stop.md rename to docs/models/chatcompletionrequeststop.md index ba40ca83..749296d4 100644 --- a/docs/models/stop.md +++ b/docs/models/chatcompletionrequeststop.md @@ -1,4 +1,4 @@ -# Stop +# ChatCompletionRequestStop Stop generation if this token is detected. Or if one of these tokens is detected when providing an array diff --git a/docs/models/chatcompletionstreamrequest.md b/docs/models/chatcompletionstreamrequest.md index 7d5fb411..42792d39 100644 --- a/docs/models/chatcompletionstreamrequest.md +++ b/docs/models/chatcompletionstreamrequest.md @@ -13,7 +13,7 @@ | `stop` | [Optional[models.ChatCompletionStreamRequestStop]](../models/chatcompletionstreamrequeststop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | | `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | | `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | | -| `messages` | List[[models.ChatCompletionStreamRequestMessages](../models/chatcompletionstreamrequestmessages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | +| `messages` | List[[models.ChatCompletionStreamRequestMessage](../models/chatcompletionstreamrequestmessage.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | | `response_format` | [Optional[models.ResponseFormat]](../models/responseformat.md) | :heavy_minus_sign: | Specify the format that the model must output. By default it will use `{ "type": "text" }`. Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ "type": "json_schema" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide. | {
"type": "text"
} | | `tools` | List[[models.Tool](../models/tool.md)] | :heavy_minus_sign: | A list of tools the model may call. Use this to provide a list of functions the model may generate JSON inputs for. | | | `tool_choice` | [Optional[models.ChatCompletionStreamRequestToolChoice]](../models/chatcompletionstreamrequesttoolchoice.md) | :heavy_minus_sign: | Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{"type": "function", "function": {"name": "my_function"}}` forces the model to call that tool. | | diff --git a/docs/models/agentscompletionrequestmessages.md b/docs/models/chatcompletionstreamrequestmessage.md similarity index 91% rename from docs/models/agentscompletionrequestmessages.md rename to docs/models/chatcompletionstreamrequestmessage.md index d6a1e691..2e4e93ac 100644 --- a/docs/models/agentscompletionrequestmessages.md +++ b/docs/models/chatcompletionstreamrequestmessage.md @@ -1,4 +1,4 @@ -# AgentsCompletionRequestMessages +# ChatCompletionStreamRequestMessage ## Supported Types diff --git a/docs/models/chatmoderationrequest.md b/docs/models/chatmoderationrequest.md index 69b6c1dc..f252482d 100644 --- a/docs/models/chatmoderationrequest.md +++ b/docs/models/chatmoderationrequest.md @@ -3,7 +3,7 @@ ## Fields -| Field | Type | Required | Description | -| ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | -| `inputs` | [models.ChatModerationRequestInputs](../models/chatmoderationrequestinputs.md) | :heavy_check_mark: | Chat to classify | -| `model` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | +| `inputs` | [models.ChatModerationRequestInputs3](../models/chatmoderationrequestinputs3.md) | :heavy_check_mark: | Chat to classify | +| `model` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/chatmoderationrequestinputs.md b/docs/models/chatmoderationrequestinputs.md deleted file mode 100644 index cf775d60..00000000 --- a/docs/models/chatmoderationrequestinputs.md +++ /dev/null @@ -1,19 +0,0 @@ -# ChatModerationRequestInputs - -Chat to classify - - -## Supported Types - -### `List[models.One]` - -```python -value: List[models.One] = /* values here */ -``` - -### `List[List[models.Two]]` - -```python -value: List[List[models.Two]] = /* values here */ -``` - diff --git a/docs/models/instructrequestmessages.md b/docs/models/chatmoderationrequestinputs1.md similarity index 92% rename from docs/models/instructrequestmessages.md rename to docs/models/chatmoderationrequestinputs1.md index 9c866a7d..e15b8a84 100644 --- a/docs/models/instructrequestmessages.md +++ b/docs/models/chatmoderationrequestinputs1.md @@ -1,4 +1,4 @@ -# InstructRequestMessages +# ChatModerationRequestInputs1 ## Supported Types diff --git a/docs/models/chatmoderationrequestinputs2.md b/docs/models/chatmoderationrequestinputs2.md new file mode 100644 index 00000000..f40a4ebe --- /dev/null +++ b/docs/models/chatmoderationrequestinputs2.md @@ -0,0 +1,29 @@ +# ChatModerationRequestInputs2 + + +## Supported Types + +### `models.AssistantMessage` + +```python +value: models.AssistantMessage = /* values here */ +``` + +### `models.SystemMessage` + +```python +value: models.SystemMessage = /* values here */ +``` + +### `models.ToolMessage` + +```python +value: models.ToolMessage = /* values here */ +``` + +### `models.UserMessage` + +```python +value: models.UserMessage = /* values here */ +``` + diff --git a/docs/models/chatmoderationrequestinputs3.md b/docs/models/chatmoderationrequestinputs3.md new file mode 100644 index 00000000..ff1c6ea3 --- /dev/null +++ b/docs/models/chatmoderationrequestinputs3.md @@ -0,0 +1,19 @@ +# ChatModerationRequestInputs3 + +Chat to classify + + +## Supported Types + +### `List[models.ChatModerationRequestInputs1]` + +```python +value: List[models.ChatModerationRequestInputs1] = /* values here */ +``` + +### `List[List[models.ChatModerationRequestInputs2]]` + +```python +value: List[List[models.ChatModerationRequestInputs2]] = /* values here */ +``` + diff --git a/docs/models/classifierdetailedjobout.md b/docs/models/classifierdetailedjobout.md index ccc88f89..15f70aeb 100644 --- a/docs/models/classifierdetailedjobout.md +++ b/docs/models/classifierdetailedjobout.md @@ -16,10 +16,10 @@ | `object` | [Optional[models.ClassifierDetailedJobOutObject]](../models/classifierdetailedjoboutobject.md) | :heavy_minus_sign: | N/A | | `fine_tuned_model` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | `suffix` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `integrations` | List[[models.ClassifierDetailedJobOutIntegrations](../models/classifierdetailedjoboutintegrations.md)] | :heavy_minus_sign: | N/A | +| `integrations` | List[[models.ClassifierDetailedJobOutIntegration](../models/classifierdetailedjoboutintegration.md)] | :heavy_minus_sign: | N/A | | `trained_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | | `metadata` | [OptionalNullable[models.JobMetadataOut]](../models/jobmetadataout.md) | :heavy_minus_sign: | N/A | -| `job_type` | [Optional[models.ClassifierDetailedJobOutJobType]](../models/classifierdetailedjoboutjobtype.md) | :heavy_minus_sign: | N/A | +| `job_type` | *Literal["classifier"]* | :heavy_check_mark: | N/A | | `hyperparameters` | [models.ClassifierTrainingParameters](../models/classifiertrainingparameters.md) | :heavy_check_mark: | N/A | | `events` | List[[models.EventOut](../models/eventout.md)] | :heavy_minus_sign: | Event items are created every time the status of a fine-tuning job changes. The timestamped list of all events is accessible here. | | `checkpoints` | List[[models.CheckpointOut](../models/checkpointout.md)] | :heavy_minus_sign: | N/A | diff --git a/docs/models/completiondetailedjoboutintegrations.md b/docs/models/classifierdetailedjoboutintegration.md similarity index 76% rename from docs/models/completiondetailedjoboutintegrations.md rename to docs/models/classifierdetailedjoboutintegration.md index af6bbcc5..9dfa6e8a 100644 --- a/docs/models/completiondetailedjoboutintegrations.md +++ b/docs/models/classifierdetailedjoboutintegration.md @@ -1,4 +1,4 @@ -# CompletionDetailedJobOutIntegrations +# ClassifierDetailedJobOutIntegration ## Supported Types diff --git a/docs/models/classifierdetailedjoboutjobtype.md b/docs/models/classifierdetailedjoboutjobtype.md deleted file mode 100644 index 0d1c6573..00000000 --- a/docs/models/classifierdetailedjoboutjobtype.md +++ /dev/null @@ -1,8 +0,0 @@ -# ClassifierDetailedJobOutJobType - - -## Values - -| Name | Value | -| ------------ | ------------ | -| `CLASSIFIER` | classifier | \ No newline at end of file diff --git a/docs/models/classifierftmodelout.md b/docs/models/classifierftmodelout.md index dd9e8bf9..d7bcd3ca 100644 --- a/docs/models/classifierftmodelout.md +++ b/docs/models/classifierftmodelout.md @@ -3,21 +3,21 @@ ## Fields -| Field | Type | Required | Description | -| -------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------- | -| `id` | *str* | :heavy_check_mark: | N/A | -| `object` | [Optional[models.ClassifierFTModelOutObject]](../models/classifierftmodeloutobject.md) | :heavy_minus_sign: | N/A | -| `created` | *int* | :heavy_check_mark: | N/A | -| `owned_by` | *str* | :heavy_check_mark: | N/A | -| `workspace_id` | *str* | :heavy_check_mark: | N/A | -| `root` | *str* | :heavy_check_mark: | N/A | -| `root_version` | *str* | :heavy_check_mark: | N/A | -| `archived` | *bool* | :heavy_check_mark: | N/A | -| `name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `capabilities` | [models.FTModelCapabilitiesOut](../models/ftmodelcapabilitiesout.md) | :heavy_check_mark: | N/A | -| `max_context_length` | *Optional[int]* | :heavy_minus_sign: | N/A | -| `aliases` | List[*str*] | :heavy_minus_sign: | N/A | -| `job` | *str* | :heavy_check_mark: | N/A | -| `classifier_targets` | List[[models.ClassifierTargetOut](../models/classifiertargetout.md)] | :heavy_check_mark: | N/A | -| `model_type` | [Optional[models.ClassifierFTModelOutModelType]](../models/classifierftmodeloutmodeltype.md) | :heavy_minus_sign: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| -------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------- | +| `id` | *str* | :heavy_check_mark: | N/A | +| `object` | [Optional[models.ClassifierFTModelOutObject]](../models/classifierftmodeloutobject.md) | :heavy_minus_sign: | N/A | +| `created` | *int* | :heavy_check_mark: | N/A | +| `owned_by` | *str* | :heavy_check_mark: | N/A | +| `workspace_id` | *str* | :heavy_check_mark: | N/A | +| `root` | *str* | :heavy_check_mark: | N/A | +| `root_version` | *str* | :heavy_check_mark: | N/A | +| `archived` | *bool* | :heavy_check_mark: | N/A | +| `name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `capabilities` | [models.FTModelCapabilitiesOut](../models/ftmodelcapabilitiesout.md) | :heavy_check_mark: | N/A | +| `max_context_length` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `aliases` | List[*str*] | :heavy_minus_sign: | N/A | +| `job` | *str* | :heavy_check_mark: | N/A | +| `classifier_targets` | List[[models.ClassifierTargetOut](../models/classifiertargetout.md)] | :heavy_check_mark: | N/A | +| `model_type` | *Literal["classifier"]* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/classifierftmodeloutmodeltype.md b/docs/models/classifierftmodeloutmodeltype.md deleted file mode 100644 index e1e7e465..00000000 --- a/docs/models/classifierftmodeloutmodeltype.md +++ /dev/null @@ -1,8 +0,0 @@ -# ClassifierFTModelOutModelType - - -## Values - -| Name | Value | -| ------------ | ------------ | -| `CLASSIFIER` | classifier | \ No newline at end of file diff --git a/docs/models/classifierjobout.md b/docs/models/classifierjobout.md index aa1d3ca9..f8259cab 100644 --- a/docs/models/classifierjobout.md +++ b/docs/models/classifierjobout.md @@ -16,8 +16,8 @@ | `object` | [Optional[models.ClassifierJobOutObject]](../models/classifierjoboutobject.md) | :heavy_minus_sign: | The object type of the fine-tuning job. | | `fine_tuned_model` | *OptionalNullable[str]* | :heavy_minus_sign: | The name of the fine-tuned model that is being created. The value will be `null` if the fine-tuning job is still running. | | `suffix` | *OptionalNullable[str]* | :heavy_minus_sign: | Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`. | -| `integrations` | List[[models.ClassifierJobOutIntegrations](../models/classifierjoboutintegrations.md)] | :heavy_minus_sign: | A list of integrations enabled for your fine-tuning job. | +| `integrations` | List[[models.ClassifierJobOutIntegration](../models/classifierjoboutintegration.md)] | :heavy_minus_sign: | A list of integrations enabled for your fine-tuning job. | | `trained_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | Total number of tokens trained. | | `metadata` | [OptionalNullable[models.JobMetadataOut]](../models/jobmetadataout.md) | :heavy_minus_sign: | N/A | -| `job_type` | [Optional[models.ClassifierJobOutJobType]](../models/classifierjoboutjobtype.md) | :heavy_minus_sign: | The type of job (`FT` for fine-tuning). | +| `job_type` | *Literal["classifier"]* | :heavy_check_mark: | The type of job (`FT` for fine-tuning). | | `hyperparameters` | [models.ClassifierTrainingParameters](../models/classifiertrainingparameters.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/integrations.md b/docs/models/classifierjoboutintegration.md similarity index 80% rename from docs/models/integrations.md rename to docs/models/classifierjoboutintegration.md index 35214d63..33af8a70 100644 --- a/docs/models/integrations.md +++ b/docs/models/classifierjoboutintegration.md @@ -1,4 +1,4 @@ -# Integrations +# ClassifierJobOutIntegration ## Supported Types diff --git a/docs/models/classifierjoboutjobtype.md b/docs/models/classifierjoboutjobtype.md deleted file mode 100644 index 7f5236fa..00000000 --- a/docs/models/classifierjoboutjobtype.md +++ /dev/null @@ -1,10 +0,0 @@ -# ClassifierJobOutJobType - -The type of job (`FT` for fine-tuning). - - -## Values - -| Name | Value | -| ------------ | ------------ | -| `CLASSIFIER` | classifier | \ No newline at end of file diff --git a/docs/models/codeinterpretertool.md b/docs/models/codeinterpretertool.md index d5ad789e..544cda93 100644 --- a/docs/models/codeinterpretertool.md +++ b/docs/models/codeinterpretertool.md @@ -3,6 +3,6 @@ ## Fields -| Field | Type | Required | Description | -| -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -| `type` | [Optional[models.CodeInterpreterToolType]](../models/codeinterpretertooltype.md) | :heavy_minus_sign: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| ----------------------------- | ----------------------------- | ----------------------------- | ----------------------------- | +| `type` | *Literal["code_interpreter"]* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/codeinterpretertooltype.md b/docs/models/codeinterpretertooltype.md deleted file mode 100644 index f704b65e..00000000 --- a/docs/models/codeinterpretertooltype.md +++ /dev/null @@ -1,8 +0,0 @@ -# CodeInterpreterToolType - - -## Values - -| Name | Value | -| ------------------ | ------------------ | -| `CODE_INTERPRETER` | code_interpreter | \ No newline at end of file diff --git a/docs/models/completiondetailedjobout.md b/docs/models/completiondetailedjobout.md index 84613080..725ebcde 100644 --- a/docs/models/completiondetailedjobout.md +++ b/docs/models/completiondetailedjobout.md @@ -16,11 +16,11 @@ | `object` | [Optional[models.CompletionDetailedJobOutObject]](../models/completiondetailedjoboutobject.md) | :heavy_minus_sign: | N/A | | `fine_tuned_model` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | `suffix` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `integrations` | List[[models.CompletionDetailedJobOutIntegrations](../models/completiondetailedjoboutintegrations.md)] | :heavy_minus_sign: | N/A | +| `integrations` | List[[models.CompletionDetailedJobOutIntegration](../models/completiondetailedjoboutintegration.md)] | :heavy_minus_sign: | N/A | | `trained_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | | `metadata` | [OptionalNullable[models.JobMetadataOut]](../models/jobmetadataout.md) | :heavy_minus_sign: | N/A | -| `job_type` | [Optional[models.CompletionDetailedJobOutJobType]](../models/completiondetailedjoboutjobtype.md) | :heavy_minus_sign: | N/A | +| `job_type` | *Literal["completion"]* | :heavy_check_mark: | N/A | | `hyperparameters` | [models.CompletionTrainingParameters](../models/completiontrainingparameters.md) | :heavy_check_mark: | N/A | -| `repositories` | List[[models.CompletionDetailedJobOutRepositories](../models/completiondetailedjoboutrepositories.md)] | :heavy_minus_sign: | N/A | +| `repositories` | List[[models.CompletionDetailedJobOutRepository](../models/completiondetailedjoboutrepository.md)] | :heavy_minus_sign: | N/A | | `events` | List[[models.EventOut](../models/eventout.md)] | :heavy_minus_sign: | Event items are created every time the status of a fine-tuning job changes. The timestamped list of all events is accessible here. | | `checkpoints` | List[[models.CheckpointOut](../models/checkpointout.md)] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/classifierdetailedjoboutintegrations.md b/docs/models/completiondetailedjoboutintegration.md similarity index 76% rename from docs/models/classifierdetailedjoboutintegrations.md rename to docs/models/completiondetailedjoboutintegration.md index 5a09465e..9e526053 100644 --- a/docs/models/classifierdetailedjoboutintegrations.md +++ b/docs/models/completiondetailedjoboutintegration.md @@ -1,4 +1,4 @@ -# ClassifierDetailedJobOutIntegrations +# CompletionDetailedJobOutIntegration ## Supported Types diff --git a/docs/models/completiondetailedjoboutjobtype.md b/docs/models/completiondetailedjoboutjobtype.md deleted file mode 100644 index fb24db0c..00000000 --- a/docs/models/completiondetailedjoboutjobtype.md +++ /dev/null @@ -1,8 +0,0 @@ -# CompletionDetailedJobOutJobType - - -## Values - -| Name | Value | -| ------------ | ------------ | -| `COMPLETION` | completion | \ No newline at end of file diff --git a/docs/models/completiondetailedjoboutrepositories.md b/docs/models/completiondetailedjoboutrepository.md similarity index 76% rename from docs/models/completiondetailedjoboutrepositories.md rename to docs/models/completiondetailedjoboutrepository.md index 4f9727c3..92a7b75c 100644 --- a/docs/models/completiondetailedjoboutrepositories.md +++ b/docs/models/completiondetailedjoboutrepository.md @@ -1,4 +1,4 @@ -# CompletionDetailedJobOutRepositories +# CompletionDetailedJobOutRepository ## Supported Types diff --git a/docs/models/completionftmodelout.md b/docs/models/completionftmodelout.md index cd085825..9ebfa83e 100644 --- a/docs/models/completionftmodelout.md +++ b/docs/models/completionftmodelout.md @@ -19,4 +19,4 @@ | `max_context_length` | *Optional[int]* | :heavy_minus_sign: | N/A | | `aliases` | List[*str*] | :heavy_minus_sign: | N/A | | `job` | *str* | :heavy_check_mark: | N/A | -| `model_type` | [Optional[models.ModelType]](../models/modeltype.md) | :heavy_minus_sign: | N/A | \ No newline at end of file +| `model_type` | *Literal["completion"]* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/completionjobout.md b/docs/models/completionjobout.md index cb471746..84be452f 100644 --- a/docs/models/completionjobout.md +++ b/docs/models/completionjobout.md @@ -8,7 +8,7 @@ | `id` | *str* | :heavy_check_mark: | The ID of the job. | | `auto_start` | *bool* | :heavy_check_mark: | N/A | | `model` | *str* | :heavy_check_mark: | The name of the model to fine-tune. | -| `status` | [models.Status](../models/status.md) | :heavy_check_mark: | The current status of the fine-tuning job. | +| `status` | [models.CompletionJobOutStatus](../models/completionjoboutstatus.md) | :heavy_check_mark: | The current status of the fine-tuning job. | | `created_at` | *int* | :heavy_check_mark: | The UNIX timestamp (in seconds) for when the fine-tuning job was created. | | `modified_at` | *int* | :heavy_check_mark: | The UNIX timestamp (in seconds) for when the fine-tuning job was last modified. | | `training_files` | List[*str*] | :heavy_check_mark: | A list containing the IDs of uploaded files that contain training data. | @@ -16,9 +16,9 @@ | `object` | [Optional[models.CompletionJobOutObject]](../models/completionjoboutobject.md) | :heavy_minus_sign: | The object type of the fine-tuning job. | | `fine_tuned_model` | *OptionalNullable[str]* | :heavy_minus_sign: | The name of the fine-tuned model that is being created. The value will be `null` if the fine-tuning job is still running. | | `suffix` | *OptionalNullable[str]* | :heavy_minus_sign: | Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`. | -| `integrations` | List[[models.Integrations](../models/integrations.md)] | :heavy_minus_sign: | A list of integrations enabled for your fine-tuning job. | +| `integrations` | List[[models.CompletionJobOutIntegration](../models/completionjoboutintegration.md)] | :heavy_minus_sign: | A list of integrations enabled for your fine-tuning job. | | `trained_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | Total number of tokens trained. | | `metadata` | [OptionalNullable[models.JobMetadataOut]](../models/jobmetadataout.md) | :heavy_minus_sign: | N/A | -| `job_type` | [Optional[models.JobType]](../models/jobtype.md) | :heavy_minus_sign: | The type of job (`FT` for fine-tuning). | +| `job_type` | *Literal["completion"]* | :heavy_check_mark: | The type of job (`FT` for fine-tuning). | | `hyperparameters` | [models.CompletionTrainingParameters](../models/completiontrainingparameters.md) | :heavy_check_mark: | N/A | -| `repositories` | List[[models.Repositories](../models/repositories.md)] | :heavy_minus_sign: | N/A | \ No newline at end of file +| `repositories` | List[[models.CompletionJobOutRepository](../models/completionjoboutrepository.md)] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/classifierjoboutintegrations.md b/docs/models/completionjoboutintegration.md similarity index 80% rename from docs/models/classifierjoboutintegrations.md rename to docs/models/completionjoboutintegration.md index d938d0b9..6474747b 100644 --- a/docs/models/classifierjoboutintegrations.md +++ b/docs/models/completionjoboutintegration.md @@ -1,4 +1,4 @@ -# ClassifierJobOutIntegrations +# CompletionJobOutIntegration ## Supported Types diff --git a/docs/models/repositories.md b/docs/models/completionjoboutrepository.md similarity index 81% rename from docs/models/repositories.md rename to docs/models/completionjoboutrepository.md index 02274e3d..52f65558 100644 --- a/docs/models/repositories.md +++ b/docs/models/completionjoboutrepository.md @@ -1,4 +1,4 @@ -# Repositories +# CompletionJobOutRepository ## Supported Types diff --git a/docs/models/status.md b/docs/models/completionjoboutstatus.md similarity index 96% rename from docs/models/status.md rename to docs/models/completionjoboutstatus.md index 5e22eb73..91754945 100644 --- a/docs/models/status.md +++ b/docs/models/completionjoboutstatus.md @@ -1,4 +1,4 @@ -# Status +# CompletionJobOutStatus The current status of the fine-tuning job. diff --git a/docs/models/conversationhistory.md b/docs/models/conversationhistory.md index ebb1d513..c8baad0b 100644 --- a/docs/models/conversationhistory.md +++ b/docs/models/conversationhistory.md @@ -9,4 +9,4 @@ Retrieve all entries in a conversation. | ------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------ | | `object` | [Optional[models.ConversationHistoryObject]](../models/conversationhistoryobject.md) | :heavy_minus_sign: | N/A | | `conversation_id` | *str* | :heavy_check_mark: | N/A | -| `entries` | List[[models.Entries](../models/entries.md)] | :heavy_check_mark: | N/A | \ No newline at end of file +| `entries` | List[[models.Entry](../models/entry.md)] | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/conversationrequest.md b/docs/models/conversationrequest.md index 2b4ff8ef..bd7823a8 100644 --- a/docs/models/conversationrequest.md +++ b/docs/models/conversationrequest.md @@ -3,18 +3,18 @@ ## Fields -| Field | Type | Required | Description | -| -------------------------------------------------------------------------- | -------------------------------------------------------------------------- | -------------------------------------------------------------------------- | -------------------------------------------------------------------------- | -| `inputs` | [models.ConversationInputs](../models/conversationinputs.md) | :heavy_check_mark: | N/A | -| `stream` | *Optional[bool]* | :heavy_minus_sign: | N/A | -| `store` | *OptionalNullable[bool]* | :heavy_minus_sign: | N/A | -| `handoff_execution` | [OptionalNullable[models.HandoffExecution]](../models/handoffexecution.md) | :heavy_minus_sign: | N/A | -| `instructions` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `tools` | List[[models.Tools](../models/tools.md)] | :heavy_minus_sign: | List of tools which are available to the model during the conversation. | -| `completion_args` | [OptionalNullable[models.CompletionArgs]](../models/completionargs.md) | :heavy_minus_sign: | N/A | -| `name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | -| `agent_id` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `agent_version` | [OptionalNullable[models.AgentVersion]](../models/agentversion.md) | :heavy_minus_sign: | N/A | -| `model` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| ---------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------- | +| `inputs` | [models.ConversationInputs](../models/conversationinputs.md) | :heavy_check_mark: | N/A | +| `stream` | *Optional[bool]* | :heavy_minus_sign: | N/A | +| `store` | *OptionalNullable[bool]* | :heavy_minus_sign: | N/A | +| `handoff_execution` | [OptionalNullable[models.ConversationRequestHandoffExecution]](../models/conversationrequesthandoffexecution.md) | :heavy_minus_sign: | N/A | +| `instructions` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `tools` | List[[models.ConversationRequestTool](../models/conversationrequesttool.md)] | :heavy_minus_sign: | List of tools which are available to the model during the conversation. | +| `completion_args` | [OptionalNullable[models.CompletionArgs]](../models/completionargs.md) | :heavy_minus_sign: | N/A | +| `name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | +| `agent_id` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `agent_version` | [OptionalNullable[models.ConversationRequestAgentVersion]](../models/conversationrequestagentversion.md) | :heavy_minus_sign: | N/A | +| `model` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/agentversion.md b/docs/models/conversationrequestagentversion.md similarity index 80% rename from docs/models/agentversion.md rename to docs/models/conversationrequestagentversion.md index fd4b6a3e..9f251821 100644 --- a/docs/models/agentversion.md +++ b/docs/models/conversationrequestagentversion.md @@ -1,4 +1,4 @@ -# AgentVersion +# ConversationRequestAgentVersion ## Supported Types diff --git a/docs/models/handoffexecution.md b/docs/models/conversationrequesthandoffexecution.md similarity index 73% rename from docs/models/handoffexecution.md rename to docs/models/conversationrequesthandoffexecution.md index 61e7dade..e7314f7e 100644 --- a/docs/models/handoffexecution.md +++ b/docs/models/conversationrequesthandoffexecution.md @@ -1,4 +1,4 @@ -# HandoffExecution +# ConversationRequestHandoffExecution ## Values diff --git a/docs/models/agentupdaterequesttools.md b/docs/models/conversationrequesttool.md similarity index 95% rename from docs/models/agentupdaterequesttools.md rename to docs/models/conversationrequesttool.md index 1752ee68..2e4e8d01 100644 --- a/docs/models/agentupdaterequesttools.md +++ b/docs/models/conversationrequesttool.md @@ -1,4 +1,4 @@ -# AgentUpdateRequestTools +# ConversationRequestTool ## Supported Types diff --git a/docs/models/conversationresponse.md b/docs/models/conversationresponse.md index 38cdadd0..e3182128 100644 --- a/docs/models/conversationresponse.md +++ b/docs/models/conversationresponse.md @@ -9,5 +9,5 @@ The response after appending new entries to the conversation. | -------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------- | | `object` | [Optional[models.ConversationResponseObject]](../models/conversationresponseobject.md) | :heavy_minus_sign: | N/A | | `conversation_id` | *str* | :heavy_check_mark: | N/A | -| `outputs` | List[[models.Outputs](../models/outputs.md)] | :heavy_check_mark: | N/A | +| `outputs` | List[[models.Output](../models/output.md)] | :heavy_check_mark: | N/A | | `usage` | [models.ConversationUsageInfo](../models/conversationusageinfo.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/conversationstreamrequest.md b/docs/models/conversationstreamrequest.md index 299346f8..8b74f9e7 100644 --- a/docs/models/conversationstreamrequest.md +++ b/docs/models/conversationstreamrequest.md @@ -10,7 +10,7 @@ | `store` | *OptionalNullable[bool]* | :heavy_minus_sign: | N/A | | `handoff_execution` | [OptionalNullable[models.ConversationStreamRequestHandoffExecution]](../models/conversationstreamrequesthandoffexecution.md) | :heavy_minus_sign: | N/A | | `instructions` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `tools` | List[[models.ConversationStreamRequestTools](../models/conversationstreamrequesttools.md)] | :heavy_minus_sign: | List of tools which are available to the model during the conversation. | +| `tools` | List[[models.ConversationStreamRequestTool](../models/conversationstreamrequesttool.md)] | :heavy_minus_sign: | List of tools which are available to the model during the conversation. | | `completion_args` | [OptionalNullable[models.CompletionArgs]](../models/completionargs.md) | :heavy_minus_sign: | N/A | | `name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | diff --git a/docs/models/agentcreationrequesttools.md b/docs/models/conversationstreamrequesttool.md similarity index 95% rename from docs/models/agentcreationrequesttools.md rename to docs/models/conversationstreamrequesttool.md index c2525850..0f75f82b 100644 --- a/docs/models/agentcreationrequesttools.md +++ b/docs/models/conversationstreamrequesttool.md @@ -1,4 +1,4 @@ -# AgentCreationRequestTools +# ConversationStreamRequestTool ## Supported Types diff --git a/docs/models/conversationstreamrequesttools.md b/docs/models/conversationstreamrequesttools.md deleted file mode 100644 index 700c8448..00000000 --- a/docs/models/conversationstreamrequesttools.md +++ /dev/null @@ -1,41 +0,0 @@ -# ConversationStreamRequestTools - - -## Supported Types - -### `models.CodeInterpreterTool` - -```python -value: models.CodeInterpreterTool = /* values here */ -``` - -### `models.DocumentLibraryTool` - -```python -value: models.DocumentLibraryTool = /* values here */ -``` - -### `models.FunctionTool` - -```python -value: models.FunctionTool = /* values here */ -``` - -### `models.ImageGenerationTool` - -```python -value: models.ImageGenerationTool = /* values here */ -``` - -### `models.WebSearchTool` - -```python -value: models.WebSearchTool = /* values here */ -``` - -### `models.WebSearchPremiumTool` - -```python -value: models.WebSearchPremiumTool = /* values here */ -``` - diff --git a/docs/models/deltamessage.md b/docs/models/deltamessage.md index 61deabbf..e0ee575f 100644 --- a/docs/models/deltamessage.md +++ b/docs/models/deltamessage.md @@ -3,8 +3,8 @@ ## Fields -| Field | Type | Required | Description | -| -------------------------------------------------------- | -------------------------------------------------------- | -------------------------------------------------------- | -------------------------------------------------------- | -| `role` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `content` | [OptionalNullable[models.Content]](../models/content.md) | :heavy_minus_sign: | N/A | -| `tool_calls` | List[[models.ToolCall](../models/toolcall.md)] | :heavy_minus_sign: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | +| `role` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `content` | [OptionalNullable[models.DeltaMessageContent]](../models/deltamessagecontent.md) | :heavy_minus_sign: | N/A | +| `tool_calls` | List[[models.ToolCall](../models/toolcall.md)] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/content.md b/docs/models/deltamessagecontent.md similarity index 89% rename from docs/models/content.md rename to docs/models/deltamessagecontent.md index a833dc2c..8142772d 100644 --- a/docs/models/content.md +++ b/docs/models/deltamessagecontent.md @@ -1,4 +1,4 @@ -# Content +# DeltaMessageContent ## Supported Types diff --git a/docs/models/documentlibrarytool.md b/docs/models/documentlibrarytool.md index 82315f32..1695bad4 100644 --- a/docs/models/documentlibrarytool.md +++ b/docs/models/documentlibrarytool.md @@ -3,7 +3,7 @@ ## Fields -| Field | Type | Required | Description | -| -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -| `type` | [Optional[models.DocumentLibraryToolType]](../models/documentlibrarytooltype.md) | :heavy_minus_sign: | N/A | -| `library_ids` | List[*str*] | :heavy_check_mark: | Ids of the library in which to search. | \ No newline at end of file +| Field | Type | Required | Description | +| -------------------------------------- | -------------------------------------- | -------------------------------------- | -------------------------------------- | +| `type` | *Literal["document_library"]* | :heavy_check_mark: | N/A | +| `library_ids` | List[*str*] | :heavy_check_mark: | Ids of the library in which to search. | \ No newline at end of file diff --git a/docs/models/documentlibrarytooltype.md b/docs/models/documentlibrarytooltype.md deleted file mode 100644 index ebd420f6..00000000 --- a/docs/models/documentlibrarytooltype.md +++ /dev/null @@ -1,8 +0,0 @@ -# DocumentLibraryToolType - - -## Values - -| Name | Value | -| ------------------ | ------------------ | -| `DOCUMENT_LIBRARY` | document_library | \ No newline at end of file diff --git a/docs/models/librariesdocumentsuploadv1documentupload.md b/docs/models/documentupload.md similarity index 98% rename from docs/models/librariesdocumentsuploadv1documentupload.md rename to docs/models/documentupload.md index a0ba95da..4e58a475 100644 --- a/docs/models/librariesdocumentsuploadv1documentupload.md +++ b/docs/models/documentupload.md @@ -1,4 +1,4 @@ -# LibrariesDocumentsUploadV1DocumentUpload +# DocumentUpload ## Fields diff --git a/docs/models/entries.md b/docs/models/entry.md similarity index 98% rename from docs/models/entries.md rename to docs/models/entry.md index 8e5a20d0..d934b677 100644 --- a/docs/models/entries.md +++ b/docs/models/entry.md @@ -1,4 +1,4 @@ -# Entries +# Entry ## Supported Types diff --git a/docs/models/ftmodelcard.md b/docs/models/ftmodelcard.md index 35032775..409f0526 100644 --- a/docs/models/ftmodelcard.md +++ b/docs/models/ftmodelcard.md @@ -19,7 +19,7 @@ Extra fields for fine-tuned models. | `deprecation` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | | `deprecation_replacement_model` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | `default_model_temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | N/A | -| `type` | [Optional[models.FTModelCardType]](../models/ftmodelcardtype.md) | :heavy_minus_sign: | N/A | +| `type` | *Literal["fine-tuned"]* | :heavy_check_mark: | N/A | | `job` | *str* | :heavy_check_mark: | N/A | | `root` | *str* | :heavy_check_mark: | N/A | | `archived` | *Optional[bool]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/ftmodelcardtype.md b/docs/models/ftmodelcardtype.md deleted file mode 100644 index 0b38470b..00000000 --- a/docs/models/ftmodelcardtype.md +++ /dev/null @@ -1,8 +0,0 @@ -# FTModelCardType - - -## Values - -| Name | Value | -| ------------ | ------------ | -| `FINE_TUNED` | fine-tuned | \ No newline at end of file diff --git a/docs/models/functioncallevent.md b/docs/models/functioncallevent.md index c25679a5..f4062060 100644 --- a/docs/models/functioncallevent.md +++ b/docs/models/functioncallevent.md @@ -3,12 +3,12 @@ ## Fields -| Field | Type | Required | Description | -| ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | -| `type` | [Optional[models.FunctionCallEventType]](../models/functioncalleventtype.md) | :heavy_minus_sign: | N/A | -| `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | -| `output_index` | *Optional[int]* | :heavy_minus_sign: | N/A | -| `id` | *str* | :heavy_check_mark: | N/A | -| `name` | *str* | :heavy_check_mark: | N/A | -| `tool_call_id` | *str* | :heavy_check_mark: | N/A | -| `arguments` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | +| `type` | *Literal["function.call.delta"]* | :heavy_check_mark: | N/A | +| `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | +| `output_index` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `id` | *str* | :heavy_check_mark: | N/A | +| `name` | *str* | :heavy_check_mark: | N/A | +| `tool_call_id` | *str* | :heavy_check_mark: | N/A | +| `arguments` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/functioncalleventtype.md b/docs/models/functioncalleventtype.md deleted file mode 100644 index 8cf3f038..00000000 --- a/docs/models/functioncalleventtype.md +++ /dev/null @@ -1,8 +0,0 @@ -# FunctionCallEventType - - -## Values - -| Name | Value | -| --------------------- | --------------------- | -| `FUNCTION_CALL_DELTA` | function.call.delta | \ No newline at end of file diff --git a/docs/models/functiontool.md b/docs/models/functiontool.md index 8c424593..0226b704 100644 --- a/docs/models/functiontool.md +++ b/docs/models/functiontool.md @@ -3,7 +3,7 @@ ## Fields -| Field | Type | Required | Description | -| ------------------------------------------------------------------ | ------------------------------------------------------------------ | ------------------------------------------------------------------ | ------------------------------------------------------------------ | -| `type` | [Optional[models.FunctionToolType]](../models/functiontooltype.md) | :heavy_minus_sign: | N/A | -| `function` | [models.Function](../models/function.md) | :heavy_check_mark: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| ---------------------------------------- | ---------------------------------------- | ---------------------------------------- | ---------------------------------------- | +| `type` | *Literal["function"]* | :heavy_check_mark: | N/A | +| `function` | [models.Function](../models/function.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/functiontooltype.md b/docs/models/functiontooltype.md deleted file mode 100644 index 9c095625..00000000 --- a/docs/models/functiontooltype.md +++ /dev/null @@ -1,8 +0,0 @@ -# FunctionToolType - - -## Values - -| Name | Value | -| ---------- | ---------- | -| `FUNCTION` | function | \ No newline at end of file diff --git a/docs/models/githubrepositoryin.md b/docs/models/githubrepositoryin.md index 1584152b..241cf584 100644 --- a/docs/models/githubrepositoryin.md +++ b/docs/models/githubrepositoryin.md @@ -3,11 +3,11 @@ ## Fields -| Field | Type | Required | Description | -| ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | -| `type` | [Optional[models.GithubRepositoryInType]](../models/githubrepositoryintype.md) | :heavy_minus_sign: | N/A | -| `name` | *str* | :heavy_check_mark: | N/A | -| `owner` | *str* | :heavy_check_mark: | N/A | -| `ref` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `weight` | *Optional[float]* | :heavy_minus_sign: | N/A | -| `token` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| ----------------------- | ----------------------- | ----------------------- | ----------------------- | +| `type` | *Literal["github"]* | :heavy_check_mark: | N/A | +| `name` | *str* | :heavy_check_mark: | N/A | +| `owner` | *str* | :heavy_check_mark: | N/A | +| `ref` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `weight` | *Optional[float]* | :heavy_minus_sign: | N/A | +| `token` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/githubrepositoryintype.md b/docs/models/githubrepositoryintype.md deleted file mode 100644 index 63da967c..00000000 --- a/docs/models/githubrepositoryintype.md +++ /dev/null @@ -1,8 +0,0 @@ -# GithubRepositoryInType - - -## Values - -| Name | Value | -| -------- | -------- | -| `GITHUB` | github | \ No newline at end of file diff --git a/docs/models/githubrepositoryout.md b/docs/models/githubrepositoryout.md index 03f0b266..fe38393a 100644 --- a/docs/models/githubrepositoryout.md +++ b/docs/models/githubrepositoryout.md @@ -3,11 +3,11 @@ ## Fields -| Field | Type | Required | Description | -| -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -| `type` | [Optional[models.GithubRepositoryOutType]](../models/githubrepositoryouttype.md) | :heavy_minus_sign: | N/A | -| `name` | *str* | :heavy_check_mark: | N/A | -| `owner` | *str* | :heavy_check_mark: | N/A | -| `ref` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `weight` | *Optional[float]* | :heavy_minus_sign: | N/A | -| `commit_id` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| ----------------------- | ----------------------- | ----------------------- | ----------------------- | +| `type` | *Literal["github"]* | :heavy_check_mark: | N/A | +| `name` | *str* | :heavy_check_mark: | N/A | +| `owner` | *str* | :heavy_check_mark: | N/A | +| `ref` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `weight` | *Optional[float]* | :heavy_minus_sign: | N/A | +| `commit_id` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/githubrepositoryouttype.md b/docs/models/githubrepositoryouttype.md deleted file mode 100644 index 46c3eefd..00000000 --- a/docs/models/githubrepositoryouttype.md +++ /dev/null @@ -1,8 +0,0 @@ -# GithubRepositoryOutType - - -## Values - -| Name | Value | -| -------- | -------- | -| `GITHUB` | github | \ No newline at end of file diff --git a/docs/models/imagegenerationtool.md b/docs/models/imagegenerationtool.md index b8fc9cf4..0c8de72c 100644 --- a/docs/models/imagegenerationtool.md +++ b/docs/models/imagegenerationtool.md @@ -3,6 +3,6 @@ ## Fields -| Field | Type | Required | Description | -| -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -| `type` | [Optional[models.ImageGenerationToolType]](../models/imagegenerationtooltype.md) | :heavy_minus_sign: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| ----------------------------- | ----------------------------- | ----------------------------- | ----------------------------- | +| `type` | *Literal["image_generation"]* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/imagegenerationtooltype.md b/docs/models/imagegenerationtooltype.md deleted file mode 100644 index 29681b58..00000000 --- a/docs/models/imagegenerationtooltype.md +++ /dev/null @@ -1,8 +0,0 @@ -# ImageGenerationToolType - - -## Values - -| Name | Value | -| ------------------ | ------------------ | -| `IMAGE_GENERATION` | image_generation | \ No newline at end of file diff --git a/docs/models/imageurlchunk.md b/docs/models/imageurlchunk.md index f1b926ef..43078c78 100644 --- a/docs/models/imageurlchunk.md +++ b/docs/models/imageurlchunk.md @@ -7,5 +7,5 @@ | Field | Type | Required | Description | | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -| `image_url` | [models.ImageURLChunkImageURL](../models/imageurlchunkimageurl.md) | :heavy_check_mark: | N/A | +| `image_url` | [models.ImageURLUnion](../models/imageurlunion.md) | :heavy_check_mark: | N/A | | `type` | [Optional[models.ImageURLChunkType]](../models/imageurlchunktype.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/imageurlchunkimageurl.md b/docs/models/imageurlunion.md similarity index 86% rename from docs/models/imageurlchunkimageurl.md rename to docs/models/imageurlunion.md index 76738908..db97130f 100644 --- a/docs/models/imageurlchunkimageurl.md +++ b/docs/models/imageurlunion.md @@ -1,4 +1,4 @@ -# ImageURLChunkImageURL +# ImageURLUnion ## Supported Types diff --git a/docs/models/messages.md b/docs/models/inputsmessage.md similarity index 96% rename from docs/models/messages.md rename to docs/models/inputsmessage.md index 1d394500..e3543fb4 100644 --- a/docs/models/messages.md +++ b/docs/models/inputsmessage.md @@ -1,4 +1,4 @@ -# Messages +# InputsMessage ## Supported Types diff --git a/docs/models/instructrequest.md b/docs/models/instructrequest.md index 9500cb58..5f0cdfff 100644 --- a/docs/models/instructrequest.md +++ b/docs/models/instructrequest.md @@ -3,6 +3,6 @@ ## Fields -| Field | Type | Required | Description | -| ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | -| `messages` | List[[models.InstructRequestMessages](../models/instructrequestmessages.md)] | :heavy_check_mark: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| -------------------------------------------------------------------------- | -------------------------------------------------------------------------- | -------------------------------------------------------------------------- | -------------------------------------------------------------------------- | +| `messages` | List[[models.InstructRequestMessage](../models/instructrequestmessage.md)] | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/instructrequestinputs.md b/docs/models/instructrequestinputs.md index 4caa028f..931ae5e4 100644 --- a/docs/models/instructrequestinputs.md +++ b/docs/models/instructrequestinputs.md @@ -3,6 +3,6 @@ ## Fields -| Field | Type | Required | Description | -| ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | -| `messages` | List[[models.InstructRequestInputsMessages](../models/instructrequestinputsmessages.md)] | :heavy_check_mark: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| -------------------------------------------------------- | -------------------------------------------------------- | -------------------------------------------------------- | -------------------------------------------------------- | +| `messages` | List[[models.InputsMessage](../models/inputsmessage.md)] | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/two.md b/docs/models/instructrequestmessage.md similarity index 93% rename from docs/models/two.md rename to docs/models/instructrequestmessage.md index 59dc2be2..57ed27ab 100644 --- a/docs/models/two.md +++ b/docs/models/instructrequestmessage.md @@ -1,4 +1,4 @@ -# Two +# InstructRequestMessage ## Supported Types diff --git a/docs/models/jobin.md b/docs/models/jobin.md index b9651770..33e6ccc6 100644 --- a/docs/models/jobin.md +++ b/docs/models/jobin.md @@ -9,10 +9,10 @@ | `training_files` | List[[models.TrainingFile](../models/trainingfile.md)] | :heavy_minus_sign: | N/A | | `validation_files` | List[*str*] | :heavy_minus_sign: | A list containing the IDs of uploaded files that contain validation data. If you provide these files, the data is used to generate validation metrics periodically during fine-tuning. These metrics can be viewed in `checkpoints` when getting the status of a running fine-tuning job. The same data should not be present in both train and validation files. | | `suffix` | *OptionalNullable[str]* | :heavy_minus_sign: | A string that will be added to your fine-tuning model name. For example, a suffix of "my-great-model" would produce a model name like `ft:open-mistral-7b:my-great-model:xxx...` | -| `integrations` | List[[models.JobInIntegrations](../models/jobinintegrations.md)] | :heavy_minus_sign: | A list of integrations to enable for your fine-tuning job. | +| `integrations` | List[[models.JobInIntegration](../models/jobinintegration.md)] | :heavy_minus_sign: | A list of integrations to enable for your fine-tuning job. | | `auto_start` | *Optional[bool]* | :heavy_minus_sign: | This field will be required in a future release. | | `invalid_sample_skip_percentage` | *Optional[float]* | :heavy_minus_sign: | N/A | | `job_type` | [OptionalNullable[models.FineTuneableModelType]](../models/finetuneablemodeltype.md) | :heavy_minus_sign: | N/A | | `hyperparameters` | [models.Hyperparameters](../models/hyperparameters.md) | :heavy_check_mark: | N/A | -| `repositories` | List[[models.JobInRepositories](../models/jobinrepositories.md)] | :heavy_minus_sign: | N/A | +| `repositories` | List[[models.JobInRepository](../models/jobinrepository.md)] | :heavy_minus_sign: | N/A | | `classifier_targets` | List[[models.ClassifierTargetIn](../models/classifiertargetin.md)] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/jobinintegrations.md b/docs/models/jobinintegration.md similarity index 85% rename from docs/models/jobinintegrations.md rename to docs/models/jobinintegration.md index 91c10242..103820e7 100644 --- a/docs/models/jobinintegrations.md +++ b/docs/models/jobinintegration.md @@ -1,4 +1,4 @@ -# JobInIntegrations +# JobInIntegration ## Supported Types diff --git a/docs/models/jobinrepositories.md b/docs/models/jobinrepository.md similarity index 86% rename from docs/models/jobinrepositories.md rename to docs/models/jobinrepository.md index b94477af..e873ae63 100644 --- a/docs/models/jobinrepositories.md +++ b/docs/models/jobinrepository.md @@ -1,4 +1,4 @@ -# JobInRepositories +# JobInRepository ## Supported Types diff --git a/docs/models/jobsapiroutesfinetuningcreatefinetuningjobresponse.md b/docs/models/jobsapiroutesfinetuningcreatefinetuningjobresponse.md index eeddc3cd..13191e90 100644 --- a/docs/models/jobsapiroutesfinetuningcreatefinetuningjobresponse.md +++ b/docs/models/jobsapiroutesfinetuningcreatefinetuningjobresponse.md @@ -5,10 +5,10 @@ OK ## Supported Types -### `models.Response1` +### `models.Response` ```python -value: models.Response1 = /* values here */ +value: models.Response = /* values here */ ``` ### `models.LegacyJobMetadataOut` diff --git a/docs/models/jobsapiroutesfinetuninggetfinetuningjobsrequest.md b/docs/models/jobsapiroutesfinetuninggetfinetuningjobsrequest.md index 3dca3cd8..23c52c34 100644 --- a/docs/models/jobsapiroutesfinetuninggetfinetuningjobsrequest.md +++ b/docs/models/jobsapiroutesfinetuninggetfinetuningjobsrequest.md @@ -3,15 +3,15 @@ ## Fields -| Field | Type | Required | Description | -| -------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------- | -| `page` | *Optional[int]* | :heavy_minus_sign: | The page number of the results to be returned. | -| `page_size` | *Optional[int]* | :heavy_minus_sign: | The number of items to return per page. | -| `model` | *OptionalNullable[str]* | :heavy_minus_sign: | The model name used for fine-tuning to filter on. When set, the other results are not displayed. | -| `created_after` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | The date/time to filter on. When set, the results for previous creation times are not displayed. | -| `created_before` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | -| `created_by_me` | *Optional[bool]* | :heavy_minus_sign: | When set, only return results for jobs created by the API caller. Other results are not displayed. | -| `status` | [OptionalNullable[models.QueryParamStatus]](../models/queryparamstatus.md) | :heavy_minus_sign: | The current job state to filter on. When set, the other results are not displayed. | -| `wandb_project` | *OptionalNullable[str]* | :heavy_minus_sign: | The Weights and Biases project to filter on. When set, the other results are not displayed. | -| `wandb_name` | *OptionalNullable[str]* | :heavy_minus_sign: | The Weight and Biases run name to filter on. When set, the other results are not displayed. | -| `suffix` | *OptionalNullable[str]* | :heavy_minus_sign: | The model suffix to filter on. When set, the other results are not displayed. | \ No newline at end of file +| Field | Type | Required | Description | +| -------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------- | +| `page` | *Optional[int]* | :heavy_minus_sign: | The page number of the results to be returned. | +| `page_size` | *Optional[int]* | :heavy_minus_sign: | The number of items to return per page. | +| `model` | *OptionalNullable[str]* | :heavy_minus_sign: | The model name used for fine-tuning to filter on. When set, the other results are not displayed. | +| `created_after` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | The date/time to filter on. When set, the results for previous creation times are not displayed. | +| `created_before` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | +| `created_by_me` | *Optional[bool]* | :heavy_minus_sign: | When set, only return results for jobs created by the API caller. Other results are not displayed. | +| `status` | [OptionalNullable[models.JobsAPIRoutesFineTuningGetFineTuningJobsStatus]](../models/jobsapiroutesfinetuninggetfinetuningjobsstatus.md) | :heavy_minus_sign: | The current job state to filter on. When set, the other results are not displayed. | +| `wandb_project` | *OptionalNullable[str]* | :heavy_minus_sign: | The Weights and Biases project to filter on. When set, the other results are not displayed. | +| `wandb_name` | *OptionalNullable[str]* | :heavy_minus_sign: | The Weight and Biases run name to filter on. When set, the other results are not displayed. | +| `suffix` | *OptionalNullable[str]* | :heavy_minus_sign: | The model suffix to filter on. When set, the other results are not displayed. | \ No newline at end of file diff --git a/docs/models/queryparamstatus.md b/docs/models/jobsapiroutesfinetuninggetfinetuningjobsstatus.md similarity index 94% rename from docs/models/queryparamstatus.md rename to docs/models/jobsapiroutesfinetuninggetfinetuningjobsstatus.md index dcd20908..40d57686 100644 --- a/docs/models/queryparamstatus.md +++ b/docs/models/jobsapiroutesfinetuninggetfinetuningjobsstatus.md @@ -1,4 +1,4 @@ -# QueryParamStatus +# JobsAPIRoutesFineTuningGetFineTuningJobsStatus The current job state to filter on. When set, the other results are not displayed. diff --git a/docs/models/jobtype.md b/docs/models/jobtype.md deleted file mode 100644 index 847c6622..00000000 --- a/docs/models/jobtype.md +++ /dev/null @@ -1,10 +0,0 @@ -# JobType - -The type of job (`FT` for fine-tuning). - - -## Values - -| Name | Value | -| ------------ | ------------ | -| `COMPLETION` | completion | \ No newline at end of file diff --git a/docs/models/librariesdocumentsuploadv1request.md b/docs/models/librariesdocumentsuploadv1request.md index 7c91ca9b..172a6183 100644 --- a/docs/models/librariesdocumentsuploadv1request.md +++ b/docs/models/librariesdocumentsuploadv1request.md @@ -3,7 +3,7 @@ ## Fields -| Field | Type | Required | Description | -| -------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------- | -| `library_id` | *str* | :heavy_check_mark: | N/A | -| `request_body` | [models.LibrariesDocumentsUploadV1DocumentUpload](../models/librariesdocumentsuploadv1documentupload.md) | :heavy_check_mark: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| ---------------------------------------------------- | ---------------------------------------------------- | ---------------------------------------------------- | ---------------------------------------------------- | +| `library_id` | *str* | :heavy_check_mark: | N/A | +| `request_body` | [models.DocumentUpload](../models/documentupload.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/messageinputentry.md b/docs/models/messageinputentry.md index d55eb876..52183a32 100644 --- a/docs/models/messageinputentry.md +++ b/docs/models/messageinputentry.md @@ -5,13 +5,13 @@ Representation of an input message inside the conversation. ## Fields -| Field | Type | Required | Description | -| ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | -| `object` | [Optional[models.Object]](../models/object.md) | :heavy_minus_sign: | N/A | -| `type` | [Optional[models.MessageInputEntryType]](../models/messageinputentrytype.md) | :heavy_minus_sign: | N/A | -| `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | -| `completed_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | -| `id` | *Optional[str]* | :heavy_minus_sign: | N/A | -| `role` | [models.MessageInputEntryRole](../models/messageinputentryrole.md) | :heavy_check_mark: | N/A | -| `content` | [models.MessageInputEntryContent](../models/messageinputentrycontent.md) | :heavy_check_mark: | N/A | -| `prefix` | *Optional[bool]* | :heavy_minus_sign: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | +| `object` | [Optional[models.MessageInputEntryObject]](../models/messageinputentryobject.md) | :heavy_minus_sign: | N/A | +| `type` | [Optional[models.MessageInputEntryType]](../models/messageinputentrytype.md) | :heavy_minus_sign: | N/A | +| `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | +| `completed_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | +| `id` | *Optional[str]* | :heavy_minus_sign: | N/A | +| `role` | [models.MessageInputEntryRole](../models/messageinputentryrole.md) | :heavy_check_mark: | N/A | +| `content` | [models.MessageInputEntryContent](../models/messageinputentrycontent.md) | :heavy_check_mark: | N/A | +| `prefix` | *Optional[bool]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/object.md b/docs/models/messageinputentryobject.md similarity index 75% rename from docs/models/object.md rename to docs/models/messageinputentryobject.md index 0122c0db..6bdd62e2 100644 --- a/docs/models/object.md +++ b/docs/models/messageinputentryobject.md @@ -1,4 +1,4 @@ -# Object +# MessageInputEntryObject ## Values diff --git a/docs/models/messageoutputevent.md b/docs/models/messageoutputevent.md index 92c1c615..b0fa1a2d 100644 --- a/docs/models/messageoutputevent.md +++ b/docs/models/messageoutputevent.md @@ -5,7 +5,7 @@ | Field | Type | Required | Description | | ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | -| `type` | [Optional[models.MessageOutputEventType]](../models/messageoutputeventtype.md) | :heavy_minus_sign: | N/A | +| `type` | *Literal["message.output.delta"]* | :heavy_check_mark: | N/A | | `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | | `output_index` | *Optional[int]* | :heavy_minus_sign: | N/A | | `id` | *str* | :heavy_check_mark: | N/A | diff --git a/docs/models/messageoutputeventtype.md b/docs/models/messageoutputeventtype.md deleted file mode 100644 index 1f43fdcc..00000000 --- a/docs/models/messageoutputeventtype.md +++ /dev/null @@ -1,8 +0,0 @@ -# MessageOutputEventType - - -## Values - -| Name | Value | -| ---------------------- | ---------------------- | -| `MESSAGE_OUTPUT_DELTA` | message.output.delta | \ No newline at end of file diff --git a/docs/models/modelconversation.md b/docs/models/modelconversation.md index 1a03ef7d..813e1f3a 100644 --- a/docs/models/modelconversation.md +++ b/docs/models/modelconversation.md @@ -6,7 +6,7 @@ | Field | Type | Required | Description | | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | | `instructions` | *OptionalNullable[str]* | :heavy_minus_sign: | Instruction prompt the model will follow during the conversation. | -| `tools` | List[[models.ModelConversationTools](../models/modelconversationtools.md)] | :heavy_minus_sign: | List of tools which are available to the model during the conversation. | +| `tools` | List[[models.ModelConversationTool](../models/modelconversationtool.md)] | :heavy_minus_sign: | List of tools which are available to the model during the conversation. | | `completion_args` | [Optional[models.CompletionArgs]](../models/completionargs.md) | :heavy_minus_sign: | White-listed arguments from the completion API | | `name` | *OptionalNullable[str]* | :heavy_minus_sign: | Name given to the conversation. | | `description` | *OptionalNullable[str]* | :heavy_minus_sign: | Description of the what the conversation is about. | diff --git a/docs/models/agenttools.md b/docs/models/modelconversationtool.md similarity index 96% rename from docs/models/agenttools.md rename to docs/models/modelconversationtool.md index 15891f56..87235567 100644 --- a/docs/models/agenttools.md +++ b/docs/models/modelconversationtool.md @@ -1,4 +1,4 @@ -# AgentTools +# ModelConversationTool ## Supported Types diff --git a/docs/models/modellist.md b/docs/models/modellist.md index 760882c6..85b20be7 100644 --- a/docs/models/modellist.md +++ b/docs/models/modellist.md @@ -3,7 +3,7 @@ ## Fields -| Field | Type | Required | Description | -| -------------------------------------- | -------------------------------------- | -------------------------------------- | -------------------------------------- | -| `object` | *Optional[str]* | :heavy_minus_sign: | N/A | -| `data` | List[[models.Data](../models/data.md)] | :heavy_minus_sign: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| -------------------------------------------------------- | -------------------------------------------------------- | -------------------------------------------------------- | -------------------------------------------------------- | +| `object` | *Optional[str]* | :heavy_minus_sign: | N/A | +| `data` | List[[models.ModelListData](../models/modellistdata.md)] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/data.md b/docs/models/modellistdata.md similarity index 92% rename from docs/models/data.md rename to docs/models/modellistdata.md index 95dc8d28..b44e84a0 100644 --- a/docs/models/data.md +++ b/docs/models/modellistdata.md @@ -1,4 +1,4 @@ -# Data +# ModelListData ## Supported Types diff --git a/docs/models/modeltype.md b/docs/models/modeltype.md deleted file mode 100644 index a31c3ca0..00000000 --- a/docs/models/modeltype.md +++ /dev/null @@ -1,8 +0,0 @@ -# ModelType - - -## Values - -| Name | Value | -| ------------ | ------------ | -| `COMPLETION` | completion | \ No newline at end of file diff --git a/docs/models/filesapiroutesuploadfilemultipartbodyparams.md b/docs/models/multipartbodyparams.md similarity index 99% rename from docs/models/filesapiroutesuploadfilemultipartbodyparams.md rename to docs/models/multipartbodyparams.md index a5dd1174..f14b9573 100644 --- a/docs/models/filesapiroutesuploadfilemultipartbodyparams.md +++ b/docs/models/multipartbodyparams.md @@ -1,4 +1,4 @@ -# FilesAPIRoutesUploadFileMultiPartBodyParams +# MultiPartBodyParams ## Fields diff --git a/docs/models/outputs.md b/docs/models/output.md similarity index 97% rename from docs/models/outputs.md rename to docs/models/output.md index 7756c627..d0ee0db9 100644 --- a/docs/models/outputs.md +++ b/docs/models/output.md @@ -1,4 +1,4 @@ -# Outputs +# Output ## Supported Types diff --git a/docs/models/realtimetranscriptionerrordetail.md b/docs/models/realtimetranscriptionerrordetail.md index 96420ada..5b34755d 100644 --- a/docs/models/realtimetranscriptionerrordetail.md +++ b/docs/models/realtimetranscriptionerrordetail.md @@ -3,7 +3,7 @@ ## Fields -| Field | Type | Required | Description | -| -------------------------------------- | -------------------------------------- | -------------------------------------- | -------------------------------------- | -| `message` | [models.Message](../models/message.md) | :heavy_check_mark: | Human-readable error message. | -| `code` | *int* | :heavy_check_mark: | Internal error code for debugging. | \ No newline at end of file +| Field | Type | Required | Description | +| ------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------ | +| `message` | [models.RealtimeTranscriptionErrorDetailMessage](../models/realtimetranscriptionerrordetailmessage.md) | :heavy_check_mark: | Human-readable error message. | +| `code` | *int* | :heavy_check_mark: | Internal error code for debugging. | \ No newline at end of file diff --git a/docs/models/message.md b/docs/models/realtimetranscriptionerrordetailmessage.md similarity index 81% rename from docs/models/message.md rename to docs/models/realtimetranscriptionerrordetailmessage.md index 752f04a8..da3764ef 100644 --- a/docs/models/message.md +++ b/docs/models/realtimetranscriptionerrordetailmessage.md @@ -1,4 +1,4 @@ -# Message +# RealtimeTranscriptionErrorDetailMessage Human-readable error message. diff --git a/docs/models/response1.md b/docs/models/response.md similarity index 94% rename from docs/models/response1.md rename to docs/models/response.md index 2e73fdbb..3512b7a8 100644 --- a/docs/models/response1.md +++ b/docs/models/response.md @@ -1,4 +1,4 @@ -# Response1 +# Response ## Supported Types diff --git a/docs/models/responsedoneevent.md b/docs/models/responsedoneevent.md index ec25bd6d..63d4cc06 100644 --- a/docs/models/responsedoneevent.md +++ b/docs/models/responsedoneevent.md @@ -3,8 +3,8 @@ ## Fields -| Field | Type | Required | Description | -| ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | -| `type` | [Optional[models.ResponseDoneEventType]](../models/responsedoneeventtype.md) | :heavy_minus_sign: | N/A | -| `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | -| `usage` | [models.ConversationUsageInfo](../models/conversationusageinfo.md) | :heavy_check_mark: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | +| `type` | *Literal["conversation.response.done"]* | :heavy_check_mark: | N/A | +| `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | +| `usage` | [models.ConversationUsageInfo](../models/conversationusageinfo.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/responsedoneeventtype.md b/docs/models/responsedoneeventtype.md deleted file mode 100644 index 58f7f44d..00000000 --- a/docs/models/responsedoneeventtype.md +++ /dev/null @@ -1,8 +0,0 @@ -# ResponseDoneEventType - - -## Values - -| Name | Value | -| ---------------------------- | ---------------------------- | -| `CONVERSATION_RESPONSE_DONE` | conversation.response.done | \ No newline at end of file diff --git a/docs/models/responseerrorevent.md b/docs/models/responseerrorevent.md index 2ea6a2e0..4309bdad 100644 --- a/docs/models/responseerrorevent.md +++ b/docs/models/responseerrorevent.md @@ -3,9 +3,9 @@ ## Fields -| Field | Type | Required | Description | -| ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | -| `type` | [Optional[models.ResponseErrorEventType]](../models/responseerroreventtype.md) | :heavy_minus_sign: | N/A | -| `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | -| `message` | *str* | :heavy_check_mark: | N/A | -| `code` | *int* | :heavy_check_mark: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | +| `type` | *Literal["conversation.response.error"]* | :heavy_check_mark: | N/A | +| `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | +| `message` | *str* | :heavy_check_mark: | N/A | +| `code` | *int* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/responseerroreventtype.md b/docs/models/responseerroreventtype.md deleted file mode 100644 index 3b3fc303..00000000 --- a/docs/models/responseerroreventtype.md +++ /dev/null @@ -1,8 +0,0 @@ -# ResponseErrorEventType - - -## Values - -| Name | Value | -| ----------------------------- | ----------------------------- | -| `CONVERSATION_RESPONSE_ERROR` | conversation.response.error | \ No newline at end of file diff --git a/docs/models/retrievemodelv1modelsmodelidgetresponseretrievemodelv1modelsmodelidget.md b/docs/models/responseretrievemodelv1modelsmodelidget.md similarity index 75% rename from docs/models/retrievemodelv1modelsmodelidgetresponseretrievemodelv1modelsmodelidget.md rename to docs/models/responseretrievemodelv1modelsmodelidget.md index 3ac96521..ffbc1473 100644 --- a/docs/models/retrievemodelv1modelsmodelidgetresponseretrievemodelv1modelsmodelidget.md +++ b/docs/models/responseretrievemodelv1modelsmodelidget.md @@ -1,4 +1,4 @@ -# RetrieveModelV1ModelsModelIDGetResponseRetrieveModelV1ModelsModelIDGet +# ResponseRetrieveModelV1ModelsModelIDGet Successful Response diff --git a/docs/models/responsestartedevent.md b/docs/models/responsestartedevent.md index 481bd5bb..e2f421af 100644 --- a/docs/models/responsestartedevent.md +++ b/docs/models/responsestartedevent.md @@ -3,8 +3,8 @@ ## Fields -| Field | Type | Required | Description | -| ---------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------- | -| `type` | [Optional[models.ResponseStartedEventType]](../models/responsestartedeventtype.md) | :heavy_minus_sign: | N/A | -| `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | -| `conversation_id` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | +| `type` | *Literal["conversation.response.started"]* | :heavy_check_mark: | N/A | +| `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | +| `conversation_id` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/responsestartedeventtype.md b/docs/models/responsestartedeventtype.md deleted file mode 100644 index 2d9273bd..00000000 --- a/docs/models/responsestartedeventtype.md +++ /dev/null @@ -1,8 +0,0 @@ -# ResponseStartedEventType - - -## Values - -| Name | Value | -| ------------------------------- | ------------------------------- | -| `CONVERSATION_RESPONSE_STARTED` | conversation.response.started | \ No newline at end of file diff --git a/docs/models/agentsapiv1conversationsgetresponsev1conversationsget.md b/docs/models/responsev1conversationsget.md similarity index 81% rename from docs/models/agentsapiv1conversationsgetresponsev1conversationsget.md rename to docs/models/responsev1conversationsget.md index 4bc836f3..844c5d61 100644 --- a/docs/models/agentsapiv1conversationsgetresponsev1conversationsget.md +++ b/docs/models/responsev1conversationsget.md @@ -1,4 +1,4 @@ -# AgentsAPIV1ConversationsGetResponseV1ConversationsGet +# ResponseV1ConversationsGet Successful Response diff --git a/docs/models/role.md b/docs/models/role.md deleted file mode 100644 index affca78d..00000000 --- a/docs/models/role.md +++ /dev/null @@ -1,8 +0,0 @@ -# Role - - -## Values - -| Name | Value | -| -------- | -------- | -| `SYSTEM` | system | \ No newline at end of file diff --git a/docs/models/systemmessage.md b/docs/models/systemmessage.md index 0dba71c0..dfb0cd0b 100644 --- a/docs/models/systemmessage.md +++ b/docs/models/systemmessage.md @@ -6,4 +6,4 @@ | Field | Type | Required | Description | | ---------------------------------------------------------------- | ---------------------------------------------------------------- | ---------------------------------------------------------------- | ---------------------------------------------------------------- | | `content` | [models.SystemMessageContent](../models/systemmessagecontent.md) | :heavy_check_mark: | N/A | -| `role` | [Optional[models.Role]](../models/role.md) | :heavy_minus_sign: | N/A | \ No newline at end of file +| `role` | *Literal["system"]* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/toolexecutiondeltaevent.md b/docs/models/toolexecutiondeltaevent.md index 7bee6d83..7066f348 100644 --- a/docs/models/toolexecutiondeltaevent.md +++ b/docs/models/toolexecutiondeltaevent.md @@ -3,11 +3,11 @@ ## Fields -| Field | Type | Required | Description | -| ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | -| `type` | [Optional[models.ToolExecutionDeltaEventType]](../models/toolexecutiondeltaeventtype.md) | :heavy_minus_sign: | N/A | -| `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | -| `output_index` | *Optional[int]* | :heavy_minus_sign: | N/A | -| `id` | *str* | :heavy_check_mark: | N/A | -| `name` | [models.ToolExecutionDeltaEventName](../models/toolexecutiondeltaeventname.md) | :heavy_check_mark: | N/A | -| `arguments` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | +| `type` | *Literal["tool.execution.delta"]* | :heavy_check_mark: | N/A | +| `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | +| `output_index` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `id` | *str* | :heavy_check_mark: | N/A | +| `name` | [models.ToolExecutionDeltaEventName](../models/toolexecutiondeltaeventname.md) | :heavy_check_mark: | N/A | +| `arguments` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/toolexecutiondeltaeventtype.md b/docs/models/toolexecutiondeltaeventtype.md deleted file mode 100644 index a4a2f8cc..00000000 --- a/docs/models/toolexecutiondeltaeventtype.md +++ /dev/null @@ -1,8 +0,0 @@ -# ToolExecutionDeltaEventType - - -## Values - -| Name | Value | -| ---------------------- | ---------------------- | -| `TOOL_EXECUTION_DELTA` | tool.execution.delta | \ No newline at end of file diff --git a/docs/models/toolexecutiondoneevent.md b/docs/models/toolexecutiondoneevent.md index 5898ea5e..b2d81be3 100644 --- a/docs/models/toolexecutiondoneevent.md +++ b/docs/models/toolexecutiondoneevent.md @@ -3,11 +3,11 @@ ## Fields -| Field | Type | Required | Description | -| -------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------- | -| `type` | [Optional[models.ToolExecutionDoneEventType]](../models/toolexecutiondoneeventtype.md) | :heavy_minus_sign: | N/A | -| `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | -| `output_index` | *Optional[int]* | :heavy_minus_sign: | N/A | -| `id` | *str* | :heavy_check_mark: | N/A | -| `name` | [models.ToolExecutionDoneEventName](../models/toolexecutiondoneeventname.md) | :heavy_check_mark: | N/A | -| `info` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | +| `type` | *Literal["tool.execution.done"]* | :heavy_check_mark: | N/A | +| `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | +| `output_index` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `id` | *str* | :heavy_check_mark: | N/A | +| `name` | [models.ToolExecutionDoneEventName](../models/toolexecutiondoneeventname.md) | :heavy_check_mark: | N/A | +| `info` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/toolexecutiondoneeventtype.md b/docs/models/toolexecutiondoneeventtype.md deleted file mode 100644 index 872624c1..00000000 --- a/docs/models/toolexecutiondoneeventtype.md +++ /dev/null @@ -1,8 +0,0 @@ -# ToolExecutionDoneEventType - - -## Values - -| Name | Value | -| --------------------- | --------------------- | -| `TOOL_EXECUTION_DONE` | tool.execution.done | \ No newline at end of file diff --git a/docs/models/toolexecutionentry.md b/docs/models/toolexecutionentry.md index 3678116d..adf88fb1 100644 --- a/docs/models/toolexecutionentry.md +++ b/docs/models/toolexecutionentry.md @@ -10,6 +10,6 @@ | `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | | `completed_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | | `id` | *Optional[str]* | :heavy_minus_sign: | N/A | -| `name` | [models.Name](../models/name.md) | :heavy_check_mark: | N/A | +| `name` | [models.ToolExecutionEntryName](../models/toolexecutionentryname.md) | :heavy_check_mark: | N/A | | `arguments` | *str* | :heavy_check_mark: | N/A | | `info` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/name.md b/docs/models/toolexecutionentryname.md similarity index 87% rename from docs/models/name.md rename to docs/models/toolexecutionentryname.md index 18b978a8..fb762a53 100644 --- a/docs/models/name.md +++ b/docs/models/toolexecutionentryname.md @@ -1,4 +1,4 @@ -# Name +# ToolExecutionEntryName ## Supported Types diff --git a/docs/models/toolexecutionstartedevent.md b/docs/models/toolexecutionstartedevent.md index de81312b..c41c7258 100644 --- a/docs/models/toolexecutionstartedevent.md +++ b/docs/models/toolexecutionstartedevent.md @@ -3,11 +3,11 @@ ## Fields -| Field | Type | Required | Description | -| -------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------- | -| `type` | [Optional[models.ToolExecutionStartedEventType]](../models/toolexecutionstartedeventtype.md) | :heavy_minus_sign: | N/A | -| `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | -| `output_index` | *Optional[int]* | :heavy_minus_sign: | N/A | -| `id` | *str* | :heavy_check_mark: | N/A | -| `name` | [models.ToolExecutionStartedEventName](../models/toolexecutionstartedeventname.md) | :heavy_check_mark: | N/A | -| `arguments` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| ---------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------- | +| `type` | *Literal["tool.execution.started"]* | :heavy_check_mark: | N/A | +| `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | +| `output_index` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `id` | *str* | :heavy_check_mark: | N/A | +| `name` | [models.ToolExecutionStartedEventName](../models/toolexecutionstartedeventname.md) | :heavy_check_mark: | N/A | +| `arguments` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/toolexecutionstartedeventtype.md b/docs/models/toolexecutionstartedeventtype.md deleted file mode 100644 index 56695d1f..00000000 --- a/docs/models/toolexecutionstartedeventtype.md +++ /dev/null @@ -1,8 +0,0 @@ -# ToolExecutionStartedEventType - - -## Values - -| Name | Value | -| ------------------------ | ------------------------ | -| `TOOL_EXECUTION_STARTED` | tool.execution.started | \ No newline at end of file diff --git a/docs/models/toolmessage.md b/docs/models/toolmessage.md index a54f4933..fa00d666 100644 --- a/docs/models/toolmessage.md +++ b/docs/models/toolmessage.md @@ -8,4 +8,4 @@ | `content` | [Nullable[models.ToolMessageContent]](../models/toolmessagecontent.md) | :heavy_check_mark: | N/A | | `tool_call_id` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | `name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `role` | [Optional[models.ToolMessageRole]](../models/toolmessagerole.md) | :heavy_minus_sign: | N/A | \ No newline at end of file +| `role` | *Literal["tool"]* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/toolmessagerole.md b/docs/models/toolmessagerole.md deleted file mode 100644 index c24e59c0..00000000 --- a/docs/models/toolmessagerole.md +++ /dev/null @@ -1,8 +0,0 @@ -# ToolMessageRole - - -## Values - -| Name | Value | -| ------ | ------ | -| `TOOL` | tool | \ No newline at end of file diff --git a/docs/models/transcriptionsegmentchunk.md b/docs/models/transcriptionsegmentchunk.md index f620b96a..00a599ee 100644 --- a/docs/models/transcriptionsegmentchunk.md +++ b/docs/models/transcriptionsegmentchunk.md @@ -3,12 +3,12 @@ ## Fields -| Field | Type | Required | Description | -| ------------------------------------------ | ------------------------------------------ | ------------------------------------------ | ------------------------------------------ | -| `text` | *str* | :heavy_check_mark: | N/A | -| `start` | *float* | :heavy_check_mark: | N/A | -| `end` | *float* | :heavy_check_mark: | N/A | -| `score` | *OptionalNullable[float]* | :heavy_minus_sign: | N/A | -| `speaker_id` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `type` | [Optional[models.Type]](../models/type.md) | :heavy_minus_sign: | N/A | -| `__pydantic_extra__` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| -------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------- | +| `text` | *str* | :heavy_check_mark: | N/A | +| `start` | *float* | :heavy_check_mark: | N/A | +| `end` | *float* | :heavy_check_mark: | N/A | +| `score` | *OptionalNullable[float]* | :heavy_minus_sign: | N/A | +| `speaker_id` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `type` | [Optional[models.TranscriptionSegmentChunkType]](../models/transcriptionsegmentchunktype.md) | :heavy_minus_sign: | N/A | +| `__pydantic_extra__` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/type.md b/docs/models/transcriptionsegmentchunktype.md similarity index 84% rename from docs/models/type.md rename to docs/models/transcriptionsegmentchunktype.md index d05ead75..2968fa26 100644 --- a/docs/models/type.md +++ b/docs/models/transcriptionsegmentchunktype.md @@ -1,4 +1,4 @@ -# Type +# TranscriptionSegmentChunkType ## Values diff --git a/docs/models/transcriptionstreamdone.md b/docs/models/transcriptionstreamdone.md index 9ecf7d9c..bca69a2b 100644 --- a/docs/models/transcriptionstreamdone.md +++ b/docs/models/transcriptionstreamdone.md @@ -3,12 +3,12 @@ ## Fields -| Field | Type | Required | Description | -| ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | -| `model` | *str* | :heavy_check_mark: | N/A | -| `text` | *str* | :heavy_check_mark: | N/A | -| `segments` | List[[models.TranscriptionSegmentChunk](../models/transcriptionsegmentchunk.md)] | :heavy_minus_sign: | N/A | -| `usage` | [models.UsageInfo](../models/usageinfo.md) | :heavy_check_mark: | N/A | -| `type` | [Optional[models.TranscriptionStreamDoneType]](../models/transcriptionstreamdonetype.md) | :heavy_minus_sign: | N/A | -| `language` | *Nullable[str]* | :heavy_check_mark: | N/A | -| `__pydantic_extra__` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | +| `model` | *str* | :heavy_check_mark: | N/A | +| `text` | *str* | :heavy_check_mark: | N/A | +| `segments` | List[[models.TranscriptionSegmentChunk](../models/transcriptionsegmentchunk.md)] | :heavy_minus_sign: | N/A | +| `usage` | [models.UsageInfo](../models/usageinfo.md) | :heavy_check_mark: | N/A | +| `type` | *Literal["transcription.done"]* | :heavy_check_mark: | N/A | +| `language` | *Nullable[str]* | :heavy_check_mark: | N/A | +| `__pydantic_extra__` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/transcriptionstreamdonetype.md b/docs/models/transcriptionstreamdonetype.md deleted file mode 100644 index db092c4f..00000000 --- a/docs/models/transcriptionstreamdonetype.md +++ /dev/null @@ -1,8 +0,0 @@ -# TranscriptionStreamDoneType - - -## Values - -| Name | Value | -| -------------------- | -------------------- | -| `TRANSCRIPTION_DONE` | transcription.done | \ No newline at end of file diff --git a/docs/models/transcriptionstreamlanguage.md b/docs/models/transcriptionstreamlanguage.md index e16c8fdc..63fcfbc6 100644 --- a/docs/models/transcriptionstreamlanguage.md +++ b/docs/models/transcriptionstreamlanguage.md @@ -3,8 +3,8 @@ ## Fields -| Field | Type | Required | Description | -| ------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------ | -| `type` | [Optional[models.TranscriptionStreamLanguageType]](../models/transcriptionstreamlanguagetype.md) | :heavy_minus_sign: | N/A | -| `audio_language` | *str* | :heavy_check_mark: | N/A | -| `__pydantic_extra__` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| ----------------------------------- | ----------------------------------- | ----------------------------------- | ----------------------------------- | +| `type` | *Literal["transcription.language"]* | :heavy_check_mark: | N/A | +| `audio_language` | *str* | :heavy_check_mark: | N/A | +| `__pydantic_extra__` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/transcriptionstreamlanguagetype.md b/docs/models/transcriptionstreamlanguagetype.md deleted file mode 100644 index e93521e1..00000000 --- a/docs/models/transcriptionstreamlanguagetype.md +++ /dev/null @@ -1,8 +0,0 @@ -# TranscriptionStreamLanguageType - - -## Values - -| Name | Value | -| ------------------------ | ------------------------ | -| `TRANSCRIPTION_LANGUAGE` | transcription.language | \ No newline at end of file diff --git a/docs/models/transcriptionstreamsegmentdelta.md b/docs/models/transcriptionstreamsegmentdelta.md index 2ab32f97..e0143a39 100644 --- a/docs/models/transcriptionstreamsegmentdelta.md +++ b/docs/models/transcriptionstreamsegmentdelta.md @@ -3,11 +3,11 @@ ## Fields -| Field | Type | Required | Description | -| -------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------- | -| `text` | *str* | :heavy_check_mark: | N/A | -| `start` | *float* | :heavy_check_mark: | N/A | -| `end` | *float* | :heavy_check_mark: | N/A | -| `speaker_id` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `type` | [Optional[models.TranscriptionStreamSegmentDeltaType]](../models/transcriptionstreamsegmentdeltatype.md) | :heavy_minus_sign: | N/A | -| `__pydantic_extra__` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| ---------------------------------- | ---------------------------------- | ---------------------------------- | ---------------------------------- | +| `text` | *str* | :heavy_check_mark: | N/A | +| `start` | *float* | :heavy_check_mark: | N/A | +| `end` | *float* | :heavy_check_mark: | N/A | +| `speaker_id` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `type` | *Literal["transcription.segment"]* | :heavy_check_mark: | N/A | +| `__pydantic_extra__` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/transcriptionstreamsegmentdeltatype.md b/docs/models/transcriptionstreamsegmentdeltatype.md deleted file mode 100644 index 03ff3e8b..00000000 --- a/docs/models/transcriptionstreamsegmentdeltatype.md +++ /dev/null @@ -1,8 +0,0 @@ -# TranscriptionStreamSegmentDeltaType - - -## Values - -| Name | Value | -| ----------------------- | ----------------------- | -| `TRANSCRIPTION_SEGMENT` | transcription.segment | \ No newline at end of file diff --git a/docs/models/transcriptionstreamtextdelta.md b/docs/models/transcriptionstreamtextdelta.md index adddfe18..a4062171 100644 --- a/docs/models/transcriptionstreamtextdelta.md +++ b/docs/models/transcriptionstreamtextdelta.md @@ -3,8 +3,8 @@ ## Fields -| Field | Type | Required | Description | -| -------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------- | -| `text` | *str* | :heavy_check_mark: | N/A | -| `type` | [Optional[models.TranscriptionStreamTextDeltaType]](../models/transcriptionstreamtextdeltatype.md) | :heavy_minus_sign: | N/A | -| `__pydantic_extra__` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| ------------------------------------- | ------------------------------------- | ------------------------------------- | ------------------------------------- | +| `text` | *str* | :heavy_check_mark: | N/A | +| `type` | *Literal["transcription.text.delta"]* | :heavy_check_mark: | N/A | +| `__pydantic_extra__` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/transcriptionstreamtextdeltatype.md b/docs/models/transcriptionstreamtextdeltatype.md deleted file mode 100644 index b7c9d675..00000000 --- a/docs/models/transcriptionstreamtextdeltatype.md +++ /dev/null @@ -1,8 +0,0 @@ -# TranscriptionStreamTextDeltaType - - -## Values - -| Name | Value | -| -------------------------- | -------------------------- | -| `TRANSCRIPTION_TEXT_DELTA` | transcription.text.delta | \ No newline at end of file diff --git a/docs/models/usermessage.md b/docs/models/usermessage.md index 63b01310..78ed066e 100644 --- a/docs/models/usermessage.md +++ b/docs/models/usermessage.md @@ -6,4 +6,4 @@ | Field | Type | Required | Description | | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | | `content` | [Nullable[models.UserMessageContent]](../models/usermessagecontent.md) | :heavy_check_mark: | N/A | -| `role` | [Optional[models.UserMessageRole]](../models/usermessagerole.md) | :heavy_minus_sign: | N/A | \ No newline at end of file +| `role` | *Literal["user"]* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/usermessagerole.md b/docs/models/usermessagerole.md deleted file mode 100644 index 171124e4..00000000 --- a/docs/models/usermessagerole.md +++ /dev/null @@ -1,8 +0,0 @@ -# UserMessageRole - - -## Values - -| Name | Value | -| ------ | ------ | -| `USER` | user | \ No newline at end of file diff --git a/docs/models/wandbintegration.md b/docs/models/wandbintegration.md index 199d2edd..c73952d9 100644 --- a/docs/models/wandbintegration.md +++ b/docs/models/wandbintegration.md @@ -5,7 +5,7 @@ | Field | Type | Required | Description | | ------------------------------------------------------------------------------- | ------------------------------------------------------------------------------- | ------------------------------------------------------------------------------- | ------------------------------------------------------------------------------- | -| `type` | [Optional[models.WandbIntegrationType]](../models/wandbintegrationtype.md) | :heavy_minus_sign: | N/A | +| `type` | *Literal["wandb"]* | :heavy_check_mark: | N/A | | `project` | *str* | :heavy_check_mark: | The name of the project that the new run will be created under. | | `name` | *OptionalNullable[str]* | :heavy_minus_sign: | A display name to set for the run. If not set, will use the job ID as the name. | | `api_key` | *str* | :heavy_check_mark: | The WandB API key to use for authentication. | diff --git a/docs/models/wandbintegrationout.md b/docs/models/wandbintegrationout.md index cec02ed8..a6f65667 100644 --- a/docs/models/wandbintegrationout.md +++ b/docs/models/wandbintegrationout.md @@ -3,10 +3,10 @@ ## Fields -| Field | Type | Required | Description | -| -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -| `type` | [Optional[models.WandbIntegrationOutType]](../models/wandbintegrationouttype.md) | :heavy_minus_sign: | N/A | -| `project` | *str* | :heavy_check_mark: | The name of the project that the new run will be created under. | -| `name` | *OptionalNullable[str]* | :heavy_minus_sign: | A display name to set for the run. If not set, will use the job ID as the name. | -| `run_name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `url` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| ------------------------------------------------------------------------------- | ------------------------------------------------------------------------------- | ------------------------------------------------------------------------------- | ------------------------------------------------------------------------------- | +| `type` | *Literal["wandb"]* | :heavy_check_mark: | N/A | +| `project` | *str* | :heavy_check_mark: | The name of the project that the new run will be created under. | +| `name` | *OptionalNullable[str]* | :heavy_minus_sign: | A display name to set for the run. If not set, will use the job ID as the name. | +| `run_name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `url` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/wandbintegrationouttype.md b/docs/models/wandbintegrationouttype.md deleted file mode 100644 index 5a7533c9..00000000 --- a/docs/models/wandbintegrationouttype.md +++ /dev/null @@ -1,8 +0,0 @@ -# WandbIntegrationOutType - - -## Values - -| Name | Value | -| ------- | ------- | -| `WANDB` | wandb | \ No newline at end of file diff --git a/docs/models/wandbintegrationtype.md b/docs/models/wandbintegrationtype.md deleted file mode 100644 index 4fdffe22..00000000 --- a/docs/models/wandbintegrationtype.md +++ /dev/null @@ -1,8 +0,0 @@ -# WandbIntegrationType - - -## Values - -| Name | Value | -| ------- | ------- | -| `WANDB` | wandb | \ No newline at end of file diff --git a/docs/models/websearchpremiumtool.md b/docs/models/websearchpremiumtool.md index 941fc2b8..07b8b926 100644 --- a/docs/models/websearchpremiumtool.md +++ b/docs/models/websearchpremiumtool.md @@ -3,6 +3,6 @@ ## Fields -| Field | Type | Required | Description | -| ---------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------- | -| `type` | [Optional[models.WebSearchPremiumToolType]](../models/websearchpremiumtooltype.md) | :heavy_minus_sign: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| ------------------------------- | ------------------------------- | ------------------------------- | ------------------------------- | +| `type` | *Literal["web_search_premium"]* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/websearchpremiumtooltype.md b/docs/models/websearchpremiumtooltype.md deleted file mode 100644 index 348bfe85..00000000 --- a/docs/models/websearchpremiumtooltype.md +++ /dev/null @@ -1,8 +0,0 @@ -# WebSearchPremiumToolType - - -## Values - -| Name | Value | -| -------------------- | -------------------- | -| `WEB_SEARCH_PREMIUM` | web_search_premium | \ No newline at end of file diff --git a/docs/models/websearchtool.md b/docs/models/websearchtool.md index c8d708bd..da5e7b7b 100644 --- a/docs/models/websearchtool.md +++ b/docs/models/websearchtool.md @@ -3,6 +3,6 @@ ## Fields -| Field | Type | Required | Description | -| -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -| `type` | [Optional[models.WebSearchToolType]](../models/websearchtooltype.md) | :heavy_minus_sign: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| ----------------------- | ----------------------- | ----------------------- | ----------------------- | +| `type` | *Literal["web_search"]* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/websearchtooltype.md b/docs/models/websearchtooltype.md deleted file mode 100644 index 57b6acbb..00000000 --- a/docs/models/websearchtooltype.md +++ /dev/null @@ -1,8 +0,0 @@ -# WebSearchToolType - - -## Values - -| Name | Value | -| ------------ | ------------ | -| `WEB_SEARCH` | web_search | \ No newline at end of file diff --git a/docs/sdks/agents/README.md b/docs/sdks/agents/README.md index 75efc492..cd3ec4c6 100644 --- a/docs/sdks/agents/README.md +++ b/docs/sdks/agents/README.md @@ -43,7 +43,7 @@ with Mistral( | Parameter | Type | Required | Description | Example | | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `messages` | List[[models.AgentsCompletionRequestMessages](../../models/agentscompletionrequestmessages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | +| `messages` | List[[models.AgentsCompletionRequestMessage](../../models/agentscompletionrequestmessage.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | | `agent_id` | *str* | :heavy_check_mark: | The ID of the agent to use for this completion. | | | `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | | `stream` | *Optional[bool]* | :heavy_minus_sign: | Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. | | @@ -108,7 +108,7 @@ with Mistral( | Parameter | Type | Required | Description | Example | | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `messages` | List[[models.AgentsCompletionStreamRequestMessages](../../models/agentscompletionstreamrequestmessages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | +| `messages` | List[[models.AgentsCompletionStreamRequestMessage](../../models/agentscompletionstreamrequestmessage.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | | `agent_id` | *str* | :heavy_check_mark: | The ID of the agent to use for this completion. | | | `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | | `stream` | *Optional[bool]* | :heavy_minus_sign: | N/A | | diff --git a/docs/sdks/mistraljobs/README.md b/docs/sdks/batchjobs/README.md similarity index 100% rename from docs/sdks/mistraljobs/README.md rename to docs/sdks/batchjobs/README.md diff --git a/docs/sdks/mistralagents/README.md b/docs/sdks/betaagents/README.md similarity index 78% rename from docs/sdks/mistralagents/README.md rename to docs/sdks/betaagents/README.md index fe0f6e35..8d23b875 100644 --- a/docs/sdks/mistralagents/README.md +++ b/docs/sdks/betaagents/README.md @@ -46,17 +46,17 @@ with Mistral( ### Parameters -| Parameter | Type | Required | Description | -| ----------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------- | -| `model` | *str* | :heavy_check_mark: | N/A | -| `name` | *str* | :heavy_check_mark: | N/A | -| `instructions` | *OptionalNullable[str]* | :heavy_minus_sign: | Instruction prompt the model will follow during the conversation. | -| `tools` | List[[models.AgentCreationRequestTools](../../models/agentcreationrequesttools.md)] | :heavy_minus_sign: | List of tools which are available to the model during the conversation. | -| `completion_args` | [Optional[models.CompletionArgs]](../../models/completionargs.md) | :heavy_minus_sign: | White-listed arguments from the completion API | -| `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `handoffs` | List[*str*] | :heavy_minus_sign: | N/A | -| `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | -| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | +| Parameter | Type | Required | Description | +| --------------------------------------------------------------------------------- | --------------------------------------------------------------------------------- | --------------------------------------------------------------------------------- | --------------------------------------------------------------------------------- | +| `model` | *str* | :heavy_check_mark: | N/A | +| `name` | *str* | :heavy_check_mark: | N/A | +| `instructions` | *OptionalNullable[str]* | :heavy_minus_sign: | Instruction prompt the model will follow during the conversation. | +| `tools` | List[[models.AgentCreationRequestTool](../../models/agentcreationrequesttool.md)] | :heavy_minus_sign: | List of tools which are available to the model during the conversation. | +| `completion_args` | [Optional[models.CompletionArgs]](../../models/completionargs.md) | :heavy_minus_sign: | White-listed arguments from the completion API | +| `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `handoffs` | List[*str*] | :heavy_minus_sign: | N/A | +| `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | ### Response @@ -141,11 +141,11 @@ with Mistral( ### Parameters -| Parameter | Type | Required | Description | -| ----------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------- | -| `agent_id` | *str* | :heavy_check_mark: | N/A | -| `agent_version` | [OptionalNullable[models.QueryParamAgentVersion]](../../models/queryparamagentversion.md) | :heavy_minus_sign: | N/A | -| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | +| Parameter | Type | Required | Description | +| ------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------- | +| `agent_id` | *str* | :heavy_check_mark: | N/A | +| `agent_version` | [OptionalNullable[models.AgentsAPIV1AgentsGetAgentVersion]](../../models/agentsapiv1agentsgetagentversion.md) | :heavy_minus_sign: | N/A | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | ### Response @@ -187,19 +187,19 @@ with Mistral( ### Parameters -| Parameter | Type | Required | Description | -| ------------------------------------------------------------------------------- | ------------------------------------------------------------------------------- | ------------------------------------------------------------------------------- | ------------------------------------------------------------------------------- | -| `agent_id` | *str* | :heavy_check_mark: | N/A | -| `instructions` | *OptionalNullable[str]* | :heavy_minus_sign: | Instruction prompt the model will follow during the conversation. | -| `tools` | List[[models.AgentUpdateRequestTools](../../models/agentupdaterequesttools.md)] | :heavy_minus_sign: | List of tools which are available to the model during the conversation. | -| `completion_args` | [Optional[models.CompletionArgs]](../../models/completionargs.md) | :heavy_minus_sign: | White-listed arguments from the completion API | -| `model` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `handoffs` | List[*str*] | :heavy_minus_sign: | N/A | -| `deployment_chat` | *OptionalNullable[bool]* | :heavy_minus_sign: | N/A | -| `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | -| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | +| Parameter | Type | Required | Description | +| ----------------------------------------------------------------------------- | ----------------------------------------------------------------------------- | ----------------------------------------------------------------------------- | ----------------------------------------------------------------------------- | +| `agent_id` | *str* | :heavy_check_mark: | N/A | +| `instructions` | *OptionalNullable[str]* | :heavy_minus_sign: | Instruction prompt the model will follow during the conversation. | +| `tools` | List[[models.AgentUpdateRequestTool](../../models/agentupdaterequesttool.md)] | :heavy_minus_sign: | List of tools which are available to the model during the conversation. | +| `completion_args` | [Optional[models.CompletionArgs]](../../models/completionargs.md) | :heavy_minus_sign: | White-listed arguments from the completion API | +| `model` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `handoffs` | List[*str*] | :heavy_minus_sign: | N/A | +| `deployment_chat` | *OptionalNullable[bool]* | :heavy_minus_sign: | N/A | +| `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | ### Response diff --git a/docs/sdks/chat/README.md b/docs/sdks/chat/README.md index 89c4fffb..6907c29d 100644 --- a/docs/sdks/chat/README.md +++ b/docs/sdks/chat/README.md @@ -44,12 +44,12 @@ with Mistral( | Parameter | Type | Required | Description | Example | | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | | `model` | *str* | :heavy_check_mark: | ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions. | mistral-large-latest | -| `messages` | List[[models.Messages](../../models/messages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | +| `messages` | List[[models.ChatCompletionRequestMessage](../../models/chatcompletionrequestmessage.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | | `temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. | | | `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | | `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | | `stream` | *Optional[bool]* | :heavy_minus_sign: | Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON. | | -| `stop` | [Optional[models.Stop]](../../models/stop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | +| `stop` | [Optional[models.ChatCompletionRequestStop]](../../models/chatcompletionrequeststop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | | `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | | `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | | | `response_format` | [Optional[models.ResponseFormat]](../../models/responseformat.md) | :heavy_minus_sign: | Specify the format that the model must output. By default it will use `{ "type": "text" }`. Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ "type": "json_schema" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide. | {
"type": "text"
} | @@ -112,7 +112,7 @@ with Mistral( | Parameter | Type | Required | Description | Example | | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | | `model` | *str* | :heavy_check_mark: | ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions. | mistral-large-latest | -| `messages` | List[[models.ChatCompletionStreamRequestMessages](../../models/chatcompletionstreamrequestmessages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | +| `messages` | List[[models.ChatCompletionStreamRequestMessage](../../models/chatcompletionstreamrequestmessage.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | | `temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value. | | | `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | | `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | diff --git a/docs/sdks/classifiers/README.md b/docs/sdks/classifiers/README.md index 634ee419..41b52081 100644 --- a/docs/sdks/classifiers/README.md +++ b/docs/sdks/classifiers/README.md @@ -87,11 +87,11 @@ with Mistral( ### Parameters -| Parameter | Type | Required | Description | -| --------------------------------------------------------------------------------- | --------------------------------------------------------------------------------- | --------------------------------------------------------------------------------- | --------------------------------------------------------------------------------- | -| `inputs` | [models.ChatModerationRequestInputs](../../models/chatmoderationrequestinputs.md) | :heavy_check_mark: | Chat to classify | -| `model` | *str* | :heavy_check_mark: | N/A | -| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | +| Parameter | Type | Required | Description | +| ----------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------- | +| `inputs` | [models.ChatModerationRequestInputs3](../../models/chatmoderationrequestinputs3.md) | :heavy_check_mark: | Chat to classify | +| `model` | *str* | :heavy_check_mark: | N/A | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | ### Response diff --git a/docs/sdks/conversations/README.md b/docs/sdks/conversations/README.md index acd43cdb..6aae03c5 100644 --- a/docs/sdks/conversations/README.md +++ b/docs/sdks/conversations/README.md @@ -47,22 +47,22 @@ with Mistral( ### Parameters -| Parameter | Type | Required | Description | -| ----------------------------------------------------------------------------- | ----------------------------------------------------------------------------- | ----------------------------------------------------------------------------- | ----------------------------------------------------------------------------- | -| `inputs` | [models.ConversationInputs](../../models/conversationinputs.md) | :heavy_check_mark: | N/A | -| `stream` | *Optional[bool]* | :heavy_minus_sign: | N/A | -| `store` | *OptionalNullable[bool]* | :heavy_minus_sign: | N/A | -| `handoff_execution` | [OptionalNullable[models.HandoffExecution]](../../models/handoffexecution.md) | :heavy_minus_sign: | N/A | -| `instructions` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `tools` | List[[models.Tools](../../models/tools.md)] | :heavy_minus_sign: | List of tools which are available to the model during the conversation. | -| `completion_args` | [OptionalNullable[models.CompletionArgs]](../../models/completionargs.md) | :heavy_minus_sign: | N/A | -| `name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | -| `agent_id` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `agent_version` | [OptionalNullable[models.AgentVersion]](../../models/agentversion.md) | :heavy_minus_sign: | N/A | -| `model` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | +| Parameter | Type | Required | Description | +| ------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------- | +| `inputs` | [models.ConversationInputs](../../models/conversationinputs.md) | :heavy_check_mark: | N/A | +| `stream` | *Optional[bool]* | :heavy_minus_sign: | N/A | +| `store` | *OptionalNullable[bool]* | :heavy_minus_sign: | N/A | +| `handoff_execution` | [OptionalNullable[models.ConversationRequestHandoffExecution]](../../models/conversationrequesthandoffexecution.md) | :heavy_minus_sign: | N/A | +| `instructions` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `tools` | List[[models.ConversationRequestTool](../../models/conversationrequesttool.md)] | :heavy_minus_sign: | List of tools which are available to the model during the conversation. | +| `completion_args` | [OptionalNullable[models.CompletionArgs]](../../models/completionargs.md) | :heavy_minus_sign: | N/A | +| `name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | +| `agent_id` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `agent_version` | [OptionalNullable[models.ConversationRequestAgentVersion]](../../models/conversationrequestagentversion.md) | :heavy_minus_sign: | N/A | +| `model` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | ### Response @@ -109,7 +109,7 @@ with Mistral( ### Response -**[List[models.ResponseBody]](../../models/.md)** +**[List[models.AgentsAPIV1ConversationsListResponse]](../../models/.md)** ### Errors @@ -150,7 +150,7 @@ with Mistral( ### Response -**[models.AgentsAPIV1ConversationsGetResponseV1ConversationsGet](../../models/agentsapiv1conversationsgetresponsev1conversationsget.md)** +**[models.ResponseV1ConversationsGet](../../models/responsev1conversationsget.md)** ### Errors @@ -425,7 +425,7 @@ with Mistral( | `store` | *OptionalNullable[bool]* | :heavy_minus_sign: | N/A | | `handoff_execution` | [OptionalNullable[models.ConversationStreamRequestHandoffExecution]](../../models/conversationstreamrequesthandoffexecution.md) | :heavy_minus_sign: | N/A | | `instructions` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `tools` | List[[models.ConversationStreamRequestTools](../../models/conversationstreamrequesttools.md)] | :heavy_minus_sign: | List of tools which are available to the model during the conversation. | +| `tools` | List[[models.ConversationStreamRequestTool](../../models/conversationstreamrequesttool.md)] | :heavy_minus_sign: | List of tools which are available to the model during the conversation. | | `completion_args` | [OptionalNullable[models.CompletionArgs]](../../models/completionargs.md) | :heavy_minus_sign: | N/A | | `name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | diff --git a/docs/sdks/jobs/README.md b/docs/sdks/finetuningjobs/README.md similarity index 83% rename from docs/sdks/jobs/README.md rename to docs/sdks/finetuningjobs/README.md index 9c44be75..63897fd6 100644 --- a/docs/sdks/jobs/README.md +++ b/docs/sdks/finetuningjobs/README.md @@ -35,19 +35,19 @@ with Mistral( ### Parameters -| Parameter | Type | Required | Description | -| -------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------- | -| `page` | *Optional[int]* | :heavy_minus_sign: | The page number of the results to be returned. | -| `page_size` | *Optional[int]* | :heavy_minus_sign: | The number of items to return per page. | -| `model` | *OptionalNullable[str]* | :heavy_minus_sign: | The model name used for fine-tuning to filter on. When set, the other results are not displayed. | -| `created_after` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | The date/time to filter on. When set, the results for previous creation times are not displayed. | -| `created_before` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | -| `created_by_me` | *Optional[bool]* | :heavy_minus_sign: | When set, only return results for jobs created by the API caller. Other results are not displayed. | -| `status` | [OptionalNullable[models.QueryParamStatus]](../../models/queryparamstatus.md) | :heavy_minus_sign: | The current job state to filter on. When set, the other results are not displayed. | -| `wandb_project` | *OptionalNullable[str]* | :heavy_minus_sign: | The Weights and Biases project to filter on. When set, the other results are not displayed. | -| `wandb_name` | *OptionalNullable[str]* | :heavy_minus_sign: | The Weight and Biases run name to filter on. When set, the other results are not displayed. | -| `suffix` | *OptionalNullable[str]* | :heavy_minus_sign: | The model suffix to filter on. When set, the other results are not displayed. | -| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | +| Parameter | Type | Required | Description | +| ----------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------- | +| `page` | *Optional[int]* | :heavy_minus_sign: | The page number of the results to be returned. | +| `page_size` | *Optional[int]* | :heavy_minus_sign: | The number of items to return per page. | +| `model` | *OptionalNullable[str]* | :heavy_minus_sign: | The model name used for fine-tuning to filter on. When set, the other results are not displayed. | +| `created_after` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | The date/time to filter on. When set, the results for previous creation times are not displayed. | +| `created_before` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | +| `created_by_me` | *Optional[bool]* | :heavy_minus_sign: | When set, only return results for jobs created by the API caller. Other results are not displayed. | +| `status` | [OptionalNullable[models.JobsAPIRoutesFineTuningGetFineTuningJobsStatus]](../../models/jobsapiroutesfinetuninggetfinetuningjobsstatus.md) | :heavy_minus_sign: | The current job state to filter on. When set, the other results are not displayed. | +| `wandb_project` | *OptionalNullable[str]* | :heavy_minus_sign: | The Weights and Biases project to filter on. When set, the other results are not displayed. | +| `wandb_name` | *OptionalNullable[str]* | :heavy_minus_sign: | The Weight and Biases run name to filter on. When set, the other results are not displayed. | +| `suffix` | *OptionalNullable[str]* | :heavy_minus_sign: | The model suffix to filter on. When set, the other results are not displayed. | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | ### Response @@ -93,11 +93,11 @@ with Mistral( | `training_files` | List[[models.TrainingFile](../../models/trainingfile.md)] | :heavy_minus_sign: | N/A | | `validation_files` | List[*str*] | :heavy_minus_sign: | A list containing the IDs of uploaded files that contain validation data. If you provide these files, the data is used to generate validation metrics periodically during fine-tuning. These metrics can be viewed in `checkpoints` when getting the status of a running fine-tuning job. The same data should not be present in both train and validation files. | | `suffix` | *OptionalNullable[str]* | :heavy_minus_sign: | A string that will be added to your fine-tuning model name. For example, a suffix of "my-great-model" would produce a model name like `ft:open-mistral-7b:my-great-model:xxx...` | -| `integrations` | List[[models.JobInIntegrations](../../models/jobinintegrations.md)] | :heavy_minus_sign: | A list of integrations to enable for your fine-tuning job. | +| `integrations` | List[[models.JobInIntegration](../../models/jobinintegration.md)] | :heavy_minus_sign: | A list of integrations to enable for your fine-tuning job. | | `auto_start` | *Optional[bool]* | :heavy_minus_sign: | This field will be required in a future release. | | `invalid_sample_skip_percentage` | *Optional[float]* | :heavy_minus_sign: | N/A | | `job_type` | [OptionalNullable[models.FineTuneableModelType]](../../models/finetuneablemodeltype.md) | :heavy_minus_sign: | N/A | -| `repositories` | List[[models.JobInRepositories](../../models/jobinrepositories.md)] | :heavy_minus_sign: | N/A | +| `repositories` | List[[models.JobInRepository](../../models/jobinrepository.md)] | :heavy_minus_sign: | N/A | | `classifier_targets` | List[[models.ClassifierTargetIn](../../models/classifiertargetin.md)] | :heavy_minus_sign: | N/A | | `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | diff --git a/docs/sdks/models/README.md b/docs/sdks/models/README.md index 6fa28ca2..129ea223 100644 --- a/docs/sdks/models/README.md +++ b/docs/sdks/models/README.md @@ -84,7 +84,7 @@ with Mistral( ### Response -**[models.RetrieveModelV1ModelsModelIDGetResponseRetrieveModelV1ModelsModelIDGet](../../models/retrievemodelv1modelsmodelidgetresponseretrievemodelv1modelsmodelidget.md)** +**[models.ResponseRetrieveModelV1ModelsModelIDGet](../../models/responseretrievemodelv1modelsmodelidget.md)** ### Errors diff --git a/examples/azure/az_chat_no_streaming.py b/examples/azure/az_chat_no_streaming.py new file mode 100644 index 00000000..4d5530dc --- /dev/null +++ b/examples/azure/az_chat_no_streaming.py @@ -0,0 +1,15 @@ +import os + +from mistralai_azure import MistralAzure +from mistralai_azure.models import ChatCompletionRequestMessages, UserMessage + +client = MistralAzure( + azure_api_key=os.environ["AZURE_API_KEY"], + azure_endpoint=os.environ["AZURE_ENDPOINT"], +) + +messages: list[ChatCompletionRequestMessages] = [ + UserMessage(content="What is the capital of France?"), +] +res = client.chat.complete(messages=messages) +print(res.choices[0].message.content) diff --git a/examples/azure/az_chat_no_streaming.py.py b/examples/azure/az_chat_no_streaming.py.py deleted file mode 100644 index 485b594e..00000000 --- a/examples/azure/az_chat_no_streaming.py.py +++ /dev/null @@ -1,16 +0,0 @@ -import os - -from mistralai_azure import MistralAzure - -client = MistralAzure( - azure_api_key=os.environ["AZURE_API_KEY"], - azure_endpoint=os.environ["AZURE_ENDPOINT"], -) - -res = client.chat.complete( - messages=[ - {"role": "user", "content": "What is the capital of France?"}, - ], - # you don't need model as it will always be "azureai" -) -print(res.choices[0].message.content) diff --git a/examples/azure/chat_no_streaming.py b/examples/azure/chat_no_streaming.py new file mode 100644 index 00000000..4d5530dc --- /dev/null +++ b/examples/azure/chat_no_streaming.py @@ -0,0 +1,15 @@ +import os + +from mistralai_azure import MistralAzure +from mistralai_azure.models import ChatCompletionRequestMessages, UserMessage + +client = MistralAzure( + azure_api_key=os.environ["AZURE_API_KEY"], + azure_endpoint=os.environ["AZURE_ENDPOINT"], +) + +messages: list[ChatCompletionRequestMessages] = [ + UserMessage(content="What is the capital of France?"), +] +res = client.chat.complete(messages=messages) +print(res.choices[0].message.content) diff --git a/examples/azure/chat_no_streaming.py.py b/examples/azure/chat_no_streaming.py.py deleted file mode 100644 index 485b594e..00000000 --- a/examples/azure/chat_no_streaming.py.py +++ /dev/null @@ -1,16 +0,0 @@ -import os - -from mistralai_azure import MistralAzure - -client = MistralAzure( - azure_api_key=os.environ["AZURE_API_KEY"], - azure_endpoint=os.environ["AZURE_ENDPOINT"], -) - -res = client.chat.complete( - messages=[ - {"role": "user", "content": "What is the capital of France?"}, - ], - # you don't need model as it will always be "azureai" -) -print(res.choices[0].message.content) diff --git a/examples/mistral/chat/chatbot_with_streaming.py b/examples/mistral/chat/chatbot_with_streaming.py index bbc3881f..eae79dcf 100755 --- a/examples/mistral/chat/chatbot_with_streaming.py +++ b/examples/mistral/chat/chatbot_with_streaming.py @@ -7,6 +7,7 @@ import os import readline import sys +from typing import Any from mistralai.client import Mistral from mistralai.client.models import AssistantMessage, SystemMessage, UserMessage @@ -21,7 +22,7 @@ DEFAULT_TEMPERATURE = 0.7 LOG_FORMAT = "%(asctime)s - %(levelname)s - %(message)s" # A dictionary of all commands and their arguments, used for tab completion. -COMMAND_LIST = { +COMMAND_LIST: dict[str, Any] = { "/new": {}, "/help": {}, "/model": {model: {} for model in MODEL_LIST}, # Nested completions for models diff --git a/examples/mistral/chat/function_calling.py b/examples/mistral/chat/function_calling.py index f0eb9e70..68e9d91c 100644 --- a/examples/mistral/chat/function_calling.py +++ b/examples/mistral/chat/function_calling.py @@ -1,16 +1,20 @@ import functools import json import os -from typing import Dict, List +from typing import Any from mistralai.client import Mistral -from mistralai.client.models.assistantmessage import AssistantMessage -from mistralai.client.models.function import Function -from mistralai.client.models.toolmessage import ToolMessage -from mistralai.client.models.usermessage import UserMessage +from mistralai.client.models import ( + AssistantMessage, + ChatCompletionRequestMessage, + Function, + Tool, + ToolMessage, + UserMessage, +) # Assuming we have the following data -data = { +data: dict[str, list[Any]] = { "transaction_id": ["T1001", "T1002", "T1003", "T1004", "T1005"], "customer_id": ["C001", "C002", "C003", "C002", "C001"], "payment_amount": [125.50, 89.99, 120.00, 54.30, 210.20], @@ -25,20 +29,18 @@ } -def retrieve_payment_status(data: Dict[str, List], transaction_id: str) -> str: +def retrieve_payment_status(data: dict[str, list[Any]], transaction_id: str) -> str: for i, r in enumerate(data["transaction_id"]): if r == transaction_id: return json.dumps({"status": data["payment_status"][i]}) - else: - return json.dumps({"status": "Error - transaction id not found"}) + return json.dumps({"status": "Error - transaction id not found"}) -def retrieve_payment_date(data: Dict[str, List], transaction_id: str) -> str: +def retrieve_payment_date(data: dict[str, list[Any]], transaction_id: str) -> str: for i, r in enumerate(data["transaction_id"]): if r == transaction_id: return json.dumps({"date": data["payment_date"][i]}) - else: - return json.dumps({"status": "Error - transaction id not found"}) + return json.dumps({"status": "Error - transaction id not found"}) names_to_functions = { @@ -46,10 +48,9 @@ def retrieve_payment_date(data: Dict[str, List], transaction_id: str) -> str: "retrieve_payment_date": functools.partial(retrieve_payment_date, data=data), } -tools = [ - { - "type": "function", - "function": Function( +tools: list[Tool] = [ + Tool( + function=Function( name="retrieve_payment_status", description="Get payment status of a transaction id", parameters={ @@ -63,10 +64,9 @@ def retrieve_payment_date(data: Dict[str, List], transaction_id: str) -> str: }, }, ), - }, - { - "type": "function", - "function": Function( + ), + Tool( + function=Function( name="retrieve_payment_date", description="Get payment date of a transaction id", parameters={ @@ -80,7 +80,7 @@ def retrieve_payment_date(data: Dict[str, List], transaction_id: str) -> str: }, }, ), - }, + ), ] api_key = os.environ["MISTRAL_API_KEY"] @@ -88,28 +88,27 @@ def retrieve_payment_date(data: Dict[str, List], transaction_id: str) -> str: client = Mistral(api_key=api_key) -messages = [UserMessage(content="What's the status of my transaction?")] +messages: list[ChatCompletionRequestMessage] = [ + UserMessage(content="What's the status of my transaction?") +] -response = client.chat.complete( - model=model, messages=messages, tools=tools, temperature=0 -) +response = client.chat.complete(model=model, messages=messages, tools=tools, temperature=0) print(response.choices[0].message.content) messages.append(AssistantMessage(content=response.choices[0].message.content)) messages.append(UserMessage(content="My transaction ID is T1001.")) -response = client.chat.complete( - model=model, messages=messages, tools=tools, temperature=0 -) +response = client.chat.complete(model=model, messages=messages, tools=tools, temperature=0) -tool_call = response.choices[0].message.tool_calls[0] +tool_calls = response.choices[0].message.tool_calls +if not tool_calls: + raise RuntimeError("Expected tool calls") +tool_call = tool_calls[0] function_name = tool_call.function.name -function_params = json.loads(tool_call.function.arguments) +function_params = json.loads(str(tool_call.function.arguments)) -print( - f"calling function_name: {function_name}, with function_params: {function_params}" -) +print(f"calling function_name: {function_name}, with function_params: {function_params}") function_result = names_to_functions[function_name](**function_params) @@ -128,8 +127,6 @@ def retrieve_payment_date(data: Dict[str, List], transaction_id: str) -> str: ) print(messages) -response = client.chat.complete( - model=model, messages=messages, tools=tools, temperature=0 -) +response = client.chat.complete(model=model, messages=messages, tools=tools, temperature=0) print(f"{response.choices[0].message.content}") diff --git a/examples/mistral/classifier/async_classifier.py b/examples/mistral/classifier/async_classifier.py index d5ee6cc1..45cc14fa 100644 --- a/examples/mistral/classifier/async_classifier.py +++ b/examples/mistral/classifier/async_classifier.py @@ -3,6 +3,7 @@ from pprint import pprint import asyncio from mistralai.client import Mistral, TrainingFile, ClassifierTrainingParametersIn +from mistralai.client.models import ClassifierJobOut import os @@ -26,7 +27,7 @@ async def upload_files(client: Mistral, file_names: list[str]) -> list[str]: return file_ids -async def train_classifier(client: Mistral,training_file_ids: list[str]) -> str: +async def train_classifier(client: Mistral, training_file_ids: list[str]) -> str | None: print("Creating job...") job = await client.fine_tuning.jobs.create_async( model="ministral-3b-latest", @@ -40,6 +41,9 @@ async def train_classifier(client: Mistral,training_file_ids: list[str]) -> str: ), auto_start=True, ) + if not isinstance(job, ClassifierJobOut): + print("Unexpected job type returned") + return None print(f"Job created ({job.id})") @@ -62,6 +66,9 @@ async def train_classifier(client: Mistral,training_file_ids: list[str]) -> str: print("Training failed") raise Exception(f"Job failed {detailed_job.status}") + if not detailed_job.fine_tuned_model: + print("No fine-tuned model returned") + return None print(f"Training succeed: {detailed_job.fine_tuned_model}") return detailed_job.fine_tuned_model diff --git a/pyproject.toml b/pyproject.toml index c9003a1e..7209c64c 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "mistralai" -version = "2.0.0a1" +version = "2.0.0a2" description = "Python Client SDK for the Mistral AI API." authors = [{ name = "Mistral" }] requires-python = ">=3.10" diff --git a/scripts/lint_custom_code.sh b/scripts/lint_custom_code.sh index 5bf9d675..57bab71a 100755 --- a/scripts/lint_custom_code.sh +++ b/scripts/lint_custom_code.sh @@ -11,8 +11,9 @@ else fi echo "Running mypy..." -# TODO: Uncomment once the examples are fixed -# uv run mypy examples/ || ERRORS=1 +echo "-> running on examples" +uv run mypy examples/ \ + --exclude 'audio/' || ERRORS=1 echo "-> running on extra" uv run mypy src/mistralai/extra/ || ERRORS=1 echo "-> running on hooks" diff --git a/src/mistralai/client/_version.py b/src/mistralai/client/_version.py index 8c5d6e54..5a7296a7 100644 --- a/src/mistralai/client/_version.py +++ b/src/mistralai/client/_version.py @@ -3,10 +3,10 @@ import importlib.metadata __title__: str = "mistralai" -__version__: str = "2.0.0a1" +__version__: str = "2.0.0a2" __openapi_doc_version__: str = "1.0.0" __gen_version__: str = "2.794.1" -__user_agent__: str = "speakeasy-sdk/python 2.0.0a1 2.794.1 1.0.0 mistralai" +__user_agent__: str = "speakeasy-sdk/python 2.0.0a2 2.794.1 1.0.0 mistralai" try: if __package__ is not None: diff --git a/src/mistralai/client/agents.py b/src/mistralai/client/agents.py index c04abd21..d0da9f07 100644 --- a/src/mistralai/client/agents.py +++ b/src/mistralai/client/agents.py @@ -24,9 +24,9 @@ def complete( self, *, messages: Union[ - List[models_agentscompletionrequest.AgentsCompletionRequestMessages], + List[models_agentscompletionrequest.AgentsCompletionRequestMessage], List[ - models_agentscompletionrequest.AgentsCompletionRequestMessagesTypedDict + models_agentscompletionrequest.AgentsCompletionRequestMessageTypedDict ], ], agent_id: str, @@ -110,7 +110,7 @@ def complete( random_seed=random_seed, metadata=metadata, messages=utils.get_pydantic_model( - messages, List[models.AgentsCompletionRequestMessages] + messages, List[models.AgentsCompletionRequestMessage] ), response_format=utils.get_pydantic_model( response_format, Optional[models.ResponseFormat] @@ -194,9 +194,9 @@ async def complete_async( self, *, messages: Union[ - List[models_agentscompletionrequest.AgentsCompletionRequestMessages], + List[models_agentscompletionrequest.AgentsCompletionRequestMessage], List[ - models_agentscompletionrequest.AgentsCompletionRequestMessagesTypedDict + models_agentscompletionrequest.AgentsCompletionRequestMessageTypedDict ], ], agent_id: str, @@ -280,7 +280,7 @@ async def complete_async( random_seed=random_seed, metadata=metadata, messages=utils.get_pydantic_model( - messages, List[models.AgentsCompletionRequestMessages] + messages, List[models.AgentsCompletionRequestMessage] ), response_format=utils.get_pydantic_model( response_format, Optional[models.ResponseFormat] @@ -365,10 +365,10 @@ def stream( *, messages: Union[ List[ - models_agentscompletionstreamrequest.AgentsCompletionStreamRequestMessages + models_agentscompletionstreamrequest.AgentsCompletionStreamRequestMessage ], List[ - models_agentscompletionstreamrequest.AgentsCompletionStreamRequestMessagesTypedDict + models_agentscompletionstreamrequest.AgentsCompletionStreamRequestMessageTypedDict ], ], agent_id: str, @@ -454,7 +454,7 @@ def stream( random_seed=random_seed, metadata=metadata, messages=utils.get_pydantic_model( - messages, List[models.AgentsCompletionStreamRequestMessages] + messages, List[models.AgentsCompletionStreamRequestMessage] ), response_format=utils.get_pydantic_model( response_format, Optional[models.ResponseFormat] @@ -547,10 +547,10 @@ async def stream_async( *, messages: Union[ List[ - models_agentscompletionstreamrequest.AgentsCompletionStreamRequestMessages + models_agentscompletionstreamrequest.AgentsCompletionStreamRequestMessage ], List[ - models_agentscompletionstreamrequest.AgentsCompletionStreamRequestMessagesTypedDict + models_agentscompletionstreamrequest.AgentsCompletionStreamRequestMessageTypedDict ], ], agent_id: str, @@ -636,7 +636,7 @@ async def stream_async( random_seed=random_seed, metadata=metadata, messages=utils.get_pydantic_model( - messages, List[models.AgentsCompletionStreamRequestMessages] + messages, List[models.AgentsCompletionStreamRequestMessage] ), response_format=utils.get_pydantic_model( response_format, Optional[models.ResponseFormat] diff --git a/src/mistralai/client/batch.py b/src/mistralai/client/batch.py index d53a45fb..586dc235 100644 --- a/src/mistralai/client/batch.py +++ b/src/mistralai/client/batch.py @@ -2,12 +2,12 @@ from .basesdk import BaseSDK from .sdkconfiguration import SDKConfiguration -from mistralai.client.mistral_jobs import MistralJobs +from mistralai.client.batch_jobs import BatchJobs from typing import Optional class Batch(BaseSDK): - jobs: MistralJobs + jobs: BatchJobs def __init__( self, sdk_config: SDKConfiguration, parent_ref: Optional[object] = None @@ -17,4 +17,4 @@ def __init__( self._init_sdks() def _init_sdks(self): - self.jobs = MistralJobs(self.sdk_configuration, parent_ref=self.parent_ref) + self.jobs = BatchJobs(self.sdk_configuration, parent_ref=self.parent_ref) diff --git a/src/mistralai/client/mistral_jobs.py b/src/mistralai/client/batch_jobs.py similarity index 99% rename from src/mistralai/client/mistral_jobs.py rename to src/mistralai/client/batch_jobs.py index eae44033..af8d97b2 100644 --- a/src/mistralai/client/mistral_jobs.py +++ b/src/mistralai/client/batch_jobs.py @@ -15,7 +15,7 @@ from typing import Any, Dict, List, Mapping, Optional, Union -class MistralJobs(BaseSDK): +class BatchJobs(BaseSDK): def list( self, *, diff --git a/src/mistralai/client/beta.py b/src/mistralai/client/beta.py index b30003ea..a1bd409e 100644 --- a/src/mistralai/client/beta.py +++ b/src/mistralai/client/beta.py @@ -2,16 +2,16 @@ from .basesdk import BaseSDK from .sdkconfiguration import SDKConfiguration +from mistralai.client.beta_agents import BetaAgents from mistralai.client.conversations import Conversations from mistralai.client.libraries import Libraries -from mistralai.client.mistral_agents import MistralAgents from typing import Optional class Beta(BaseSDK): conversations: Conversations r"""(beta) Conversations API""" - agents: MistralAgents + agents: BetaAgents r"""(beta) Agents API""" libraries: Libraries r"""(beta) Libraries API to create and manage libraries - index your documents to enhance agent capabilities.""" @@ -27,5 +27,5 @@ def _init_sdks(self): self.conversations = Conversations( self.sdk_configuration, parent_ref=self.parent_ref ) - self.agents = MistralAgents(self.sdk_configuration, parent_ref=self.parent_ref) + self.agents = BetaAgents(self.sdk_configuration, parent_ref=self.parent_ref) self.libraries = Libraries(self.sdk_configuration, parent_ref=self.parent_ref) diff --git a/src/mistralai/client/mistral_agents.py b/src/mistralai/client/beta_agents.py similarity index 99% rename from src/mistralai/client/mistral_agents.py rename to src/mistralai/client/beta_agents.py index 2ac7a29e..1420895e 100644 --- a/src/mistralai/client/mistral_agents.py +++ b/src/mistralai/client/beta_agents.py @@ -16,7 +16,7 @@ from typing import Any, Dict, List, Mapping, Optional, Union -class MistralAgents(BaseSDK): +class BetaAgents(BaseSDK): r"""(beta) Agents API""" def create( @@ -27,8 +27,8 @@ def create( instructions: OptionalNullable[str] = UNSET, tools: Optional[ Union[ - List[models_agentcreationrequest.AgentCreationRequestTools], - List[models_agentcreationrequest.AgentCreationRequestToolsTypedDict], + List[models_agentcreationrequest.AgentCreationRequestTool], + List[models_agentcreationrequest.AgentCreationRequestToolTypedDict], ] ] = None, completion_args: Optional[ @@ -75,7 +75,7 @@ def create( request = models.AgentCreationRequest( instructions=instructions, tools=utils.get_pydantic_model( - tools, Optional[List[models.AgentCreationRequestTools]] + tools, Optional[List[models.AgentCreationRequestTool]] ), completion_args=utils.get_pydantic_model( completion_args, Optional[models.CompletionArgs] @@ -155,8 +155,8 @@ async def create_async( instructions: OptionalNullable[str] = UNSET, tools: Optional[ Union[ - List[models_agentcreationrequest.AgentCreationRequestTools], - List[models_agentcreationrequest.AgentCreationRequestToolsTypedDict], + List[models_agentcreationrequest.AgentCreationRequestTool], + List[models_agentcreationrequest.AgentCreationRequestToolTypedDict], ] ] = None, completion_args: Optional[ @@ -203,7 +203,7 @@ async def create_async( request = models.AgentCreationRequest( instructions=instructions, tools=utils.get_pydantic_model( - tools, Optional[List[models.AgentCreationRequestTools]] + tools, Optional[List[models.AgentCreationRequestTool]] ), completion_args=utils.get_pydantic_model( completion_args, Optional[models.CompletionArgs] @@ -497,8 +497,8 @@ def get( agent_id: str, agent_version: OptionalNullable[ Union[ - models_agents_api_v1_agents_getop.QueryParamAgentVersion, - models_agents_api_v1_agents_getop.QueryParamAgentVersionTypedDict, + models_agents_api_v1_agents_getop.AgentsAPIV1AgentsGetAgentVersion, + models_agents_api_v1_agents_getop.AgentsAPIV1AgentsGetAgentVersionTypedDict, ] ] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, @@ -595,8 +595,8 @@ async def get_async( agent_id: str, agent_version: OptionalNullable[ Union[ - models_agents_api_v1_agents_getop.QueryParamAgentVersion, - models_agents_api_v1_agents_getop.QueryParamAgentVersionTypedDict, + models_agents_api_v1_agents_getop.AgentsAPIV1AgentsGetAgentVersion, + models_agents_api_v1_agents_getop.AgentsAPIV1AgentsGetAgentVersionTypedDict, ] ] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, @@ -694,8 +694,8 @@ def update( instructions: OptionalNullable[str] = UNSET, tools: Optional[ Union[ - List[models_agentupdaterequest.AgentUpdateRequestTools], - List[models_agentupdaterequest.AgentUpdateRequestToolsTypedDict], + List[models_agentupdaterequest.AgentUpdateRequestTool], + List[models_agentupdaterequest.AgentUpdateRequestToolTypedDict], ] ] = None, completion_args: Optional[ @@ -749,7 +749,7 @@ def update( agent_update_request=models.AgentUpdateRequest( instructions=instructions, tools=utils.get_pydantic_model( - tools, Optional[List[models.AgentUpdateRequestTools]] + tools, Optional[List[models.AgentUpdateRequestTool]] ), completion_args=utils.get_pydantic_model( completion_args, Optional[models.CompletionArgs] @@ -834,8 +834,8 @@ async def update_async( instructions: OptionalNullable[str] = UNSET, tools: Optional[ Union[ - List[models_agentupdaterequest.AgentUpdateRequestTools], - List[models_agentupdaterequest.AgentUpdateRequestToolsTypedDict], + List[models_agentupdaterequest.AgentUpdateRequestTool], + List[models_agentupdaterequest.AgentUpdateRequestToolTypedDict], ] ] = None, completion_args: Optional[ @@ -889,7 +889,7 @@ async def update_async( agent_update_request=models.AgentUpdateRequest( instructions=instructions, tools=utils.get_pydantic_model( - tools, Optional[List[models.AgentUpdateRequestTools]] + tools, Optional[List[models.AgentUpdateRequestTool]] ), completion_args=utils.get_pydantic_model( completion_args, Optional[models.CompletionArgs] diff --git a/src/mistralai/client/chat.py b/src/mistralai/client/chat.py index 6fa210bb..523e3340 100644 --- a/src/mistralai/client/chat.py +++ b/src/mistralai/client/chat.py @@ -110,8 +110,8 @@ def complete( *, model: str, messages: Union[ - List[models_chatcompletionrequest.Messages], - List[models_chatcompletionrequest.MessagesTypedDict], + List[models_chatcompletionrequest.ChatCompletionRequestMessage], + List[models_chatcompletionrequest.ChatCompletionRequestMessageTypedDict], ], temperature: OptionalNullable[float] = UNSET, top_p: Optional[float] = None, @@ -119,8 +119,8 @@ def complete( stream: Optional[bool] = False, stop: Optional[ Union[ - models_chatcompletionrequest.Stop, - models_chatcompletionrequest.StopTypedDict, + models_chatcompletionrequest.ChatCompletionRequestStop, + models_chatcompletionrequest.ChatCompletionRequestStopTypedDict, ] ] = None, random_seed: OptionalNullable[int] = UNSET, @@ -201,7 +201,9 @@ def complete( stop=stop, random_seed=random_seed, metadata=metadata, - messages=utils.get_pydantic_model(messages, List[models.Messages]), + messages=utils.get_pydantic_model( + messages, List[models.ChatCompletionRequestMessage] + ), response_format=utils.get_pydantic_model( response_format, Optional[models.ResponseFormat] ), @@ -285,8 +287,8 @@ async def complete_async( *, model: str, messages: Union[ - List[models_chatcompletionrequest.Messages], - List[models_chatcompletionrequest.MessagesTypedDict], + List[models_chatcompletionrequest.ChatCompletionRequestMessage], + List[models_chatcompletionrequest.ChatCompletionRequestMessageTypedDict], ], temperature: OptionalNullable[float] = UNSET, top_p: Optional[float] = None, @@ -294,8 +296,8 @@ async def complete_async( stream: Optional[bool] = False, stop: Optional[ Union[ - models_chatcompletionrequest.Stop, - models_chatcompletionrequest.StopTypedDict, + models_chatcompletionrequest.ChatCompletionRequestStop, + models_chatcompletionrequest.ChatCompletionRequestStopTypedDict, ] ] = None, random_seed: OptionalNullable[int] = UNSET, @@ -376,7 +378,9 @@ async def complete_async( stop=stop, random_seed=random_seed, metadata=metadata, - messages=utils.get_pydantic_model(messages, List[models.Messages]), + messages=utils.get_pydantic_model( + messages, List[models.ChatCompletionRequestMessage] + ), response_format=utils.get_pydantic_model( response_format, Optional[models.ResponseFormat] ), @@ -460,11 +464,9 @@ def stream( *, model: str, messages: Union[ + List[models_chatcompletionstreamrequest.ChatCompletionStreamRequestMessage], List[ - models_chatcompletionstreamrequest.ChatCompletionStreamRequestMessages - ], - List[ - models_chatcompletionstreamrequest.ChatCompletionStreamRequestMessagesTypedDict + models_chatcompletionstreamrequest.ChatCompletionStreamRequestMessageTypedDict ], ], temperature: OptionalNullable[float] = UNSET, @@ -558,7 +560,7 @@ def stream( random_seed=random_seed, metadata=metadata, messages=utils.get_pydantic_model( - messages, List[models.ChatCompletionStreamRequestMessages] + messages, List[models.ChatCompletionStreamRequestMessage] ), response_format=utils.get_pydantic_model( response_format, Optional[models.ResponseFormat] @@ -651,11 +653,9 @@ async def stream_async( *, model: str, messages: Union[ + List[models_chatcompletionstreamrequest.ChatCompletionStreamRequestMessage], List[ - models_chatcompletionstreamrequest.ChatCompletionStreamRequestMessages - ], - List[ - models_chatcompletionstreamrequest.ChatCompletionStreamRequestMessagesTypedDict + models_chatcompletionstreamrequest.ChatCompletionStreamRequestMessageTypedDict ], ], temperature: OptionalNullable[float] = UNSET, @@ -749,7 +749,7 @@ async def stream_async( random_seed=random_seed, metadata=metadata, messages=utils.get_pydantic_model( - messages, List[models.ChatCompletionStreamRequestMessages] + messages, List[models.ChatCompletionStreamRequestMessage] ), response_format=utils.get_pydantic_model( response_format, Optional[models.ResponseFormat] diff --git a/src/mistralai/client/classifiers.py b/src/mistralai/client/classifiers.py index 537e2438..327653d1 100644 --- a/src/mistralai/client/classifiers.py +++ b/src/mistralai/client/classifiers.py @@ -221,8 +221,8 @@ def moderate_chat( self, *, inputs: Union[ - models_chatmoderationrequest.ChatModerationRequestInputs, - models_chatmoderationrequest.ChatModerationRequestInputsTypedDict, + models_chatmoderationrequest.ChatModerationRequestInputs3, + models_chatmoderationrequest.ChatModerationRequestInputs3TypedDict, ], model: str, retries: OptionalNullable[utils.RetryConfig] = UNSET, @@ -250,7 +250,9 @@ def moderate_chat( base_url = self._get_url(base_url, url_variables) request = models.ChatModerationRequest( - inputs=utils.get_pydantic_model(inputs, models.ChatModerationRequestInputs), + inputs=utils.get_pydantic_model( + inputs, models.ChatModerationRequestInputs3 + ), model=model, ) @@ -318,8 +320,8 @@ async def moderate_chat_async( self, *, inputs: Union[ - models_chatmoderationrequest.ChatModerationRequestInputs, - models_chatmoderationrequest.ChatModerationRequestInputsTypedDict, + models_chatmoderationrequest.ChatModerationRequestInputs3, + models_chatmoderationrequest.ChatModerationRequestInputs3TypedDict, ], model: str, retries: OptionalNullable[utils.RetryConfig] = UNSET, @@ -347,7 +349,9 @@ async def moderate_chat_async( base_url = self._get_url(base_url, url_variables) request = models.ChatModerationRequest( - inputs=utils.get_pydantic_model(inputs, models.ChatModerationRequestInputs), + inputs=utils.get_pydantic_model( + inputs, models.ChatModerationRequestInputs3 + ), model=model, ) diff --git a/src/mistralai/client/conversations.py b/src/mistralai/client/conversations.py index 285beddb..aa037bd2 100644 --- a/src/mistralai/client/conversations.py +++ b/src/mistralai/client/conversations.py @@ -60,7 +60,7 @@ async def run_async( inputs: Union[models.ConversationInputs, models.ConversationInputsTypedDict], instructions: OptionalNullable[str] = UNSET, tools: OptionalNullable[ - Union[List[models.Tools], List[models.ToolsTypedDict]] + Union[List[models.ConversationRequestTool], List[models.ConversationRequestToolTypedDict]] ] = UNSET, completion_args: OptionalNullable[ Union[models.CompletionArgs, models.CompletionArgsTypedDict] @@ -133,7 +133,7 @@ async def run_stream_async( inputs: Union[models.ConversationInputs, models.ConversationInputsTypedDict], instructions: OptionalNullable[str] = UNSET, tools: OptionalNullable[ - Union[List[models.Tools], List[models.ToolsTypedDict]] + Union[List[models.ConversationRequestTool], List[models.ConversationRequestToolTypedDict]] ] = UNSET, completion_args: OptionalNullable[ Union[models.CompletionArgs, models.CompletionArgsTypedDict] @@ -238,13 +238,13 @@ def start( stream: Optional[bool] = False, store: OptionalNullable[bool] = UNSET, handoff_execution: OptionalNullable[ - models_conversationrequest.HandoffExecution + models_conversationrequest.ConversationRequestHandoffExecution ] = UNSET, instructions: OptionalNullable[str] = UNSET, tools: Optional[ Union[ - List[models_conversationrequest.Tools], - List[models_conversationrequest.ToolsTypedDict], + List[models_conversationrequest.ConversationRequestTool], + List[models_conversationrequest.ConversationRequestToolTypedDict], ] ] = None, completion_args: OptionalNullable[ @@ -259,8 +259,8 @@ def start( agent_id: OptionalNullable[str] = UNSET, agent_version: OptionalNullable[ Union[ - models_conversationrequest.AgentVersion, - models_conversationrequest.AgentVersionTypedDict, + models_conversationrequest.ConversationRequestAgentVersion, + models_conversationrequest.ConversationRequestAgentVersionTypedDict, ] ] = UNSET, model: OptionalNullable[str] = UNSET, @@ -307,7 +307,9 @@ def start( store=store, handoff_execution=handoff_execution, instructions=instructions, - tools=utils.get_pydantic_model(tools, Optional[List[models.Tools]]), + tools=utils.get_pydantic_model( + tools, Optional[List[models.ConversationRequestTool]] + ), completion_args=utils.get_pydantic_model( completion_args, OptionalNullable[models.CompletionArgs] ), @@ -389,13 +391,13 @@ async def start_async( stream: Optional[bool] = False, store: OptionalNullable[bool] = UNSET, handoff_execution: OptionalNullable[ - models_conversationrequest.HandoffExecution + models_conversationrequest.ConversationRequestHandoffExecution ] = UNSET, instructions: OptionalNullable[str] = UNSET, tools: Optional[ Union[ - List[models_conversationrequest.Tools], - List[models_conversationrequest.ToolsTypedDict], + List[models_conversationrequest.ConversationRequestTool], + List[models_conversationrequest.ConversationRequestToolTypedDict], ] ] = None, completion_args: OptionalNullable[ @@ -410,8 +412,8 @@ async def start_async( agent_id: OptionalNullable[str] = UNSET, agent_version: OptionalNullable[ Union[ - models_conversationrequest.AgentVersion, - models_conversationrequest.AgentVersionTypedDict, + models_conversationrequest.ConversationRequestAgentVersion, + models_conversationrequest.ConversationRequestAgentVersionTypedDict, ] ] = UNSET, model: OptionalNullable[str] = UNSET, @@ -458,7 +460,9 @@ async def start_async( store=store, handoff_execution=handoff_execution, instructions=instructions, - tools=utils.get_pydantic_model(tools, Optional[List[models.Tools]]), + tools=utils.get_pydantic_model( + tools, Optional[List[models.ConversationRequestTool]] + ), completion_args=utils.get_pydantic_model( completion_args, OptionalNullable[models.CompletionArgs] ), @@ -540,7 +544,7 @@ def list( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> List[models.ResponseBody]: + ) -> List[models.AgentsAPIV1ConversationsListResponse]: r"""List all created conversations. Retrieve a list of conversation entities sorted by creation time. @@ -611,7 +615,9 @@ def list( response_data: Any = None if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(List[models.ResponseBody], http_res) + return unmarshal_json_response( + List[models.AgentsAPIV1ConversationsListResponse], http_res + ) if utils.match_response(http_res, "422", "application/json"): response_data = unmarshal_json_response( models.HTTPValidationErrorData, http_res @@ -636,7 +642,7 @@ async def list_async( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> List[models.ResponseBody]: + ) -> List[models.AgentsAPIV1ConversationsListResponse]: r"""List all created conversations. Retrieve a list of conversation entities sorted by creation time. @@ -707,7 +713,9 @@ async def list_async( response_data: Any = None if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(List[models.ResponseBody], http_res) + return unmarshal_json_response( + List[models.AgentsAPIV1ConversationsListResponse], http_res + ) if utils.match_response(http_res, "422", "application/json"): response_data = unmarshal_json_response( models.HTTPValidationErrorData, http_res @@ -730,7 +738,7 @@ def get( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> models.AgentsAPIV1ConversationsGetResponseV1ConversationsGet: + ) -> models.ResponseV1ConversationsGet: r"""Retrieve a conversation information. Given a conversation_id retrieve a conversation entity with its attributes. @@ -797,9 +805,7 @@ def get( response_data: Any = None if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response( - models.AgentsAPIV1ConversationsGetResponseV1ConversationsGet, http_res - ) + return unmarshal_json_response(models.ResponseV1ConversationsGet, http_res) if utils.match_response(http_res, "422", "application/json"): response_data = unmarshal_json_response( models.HTTPValidationErrorData, http_res @@ -822,7 +828,7 @@ async def get_async( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> models.AgentsAPIV1ConversationsGetResponseV1ConversationsGet: + ) -> models.ResponseV1ConversationsGet: r"""Retrieve a conversation information. Given a conversation_id retrieve a conversation entity with its attributes. @@ -889,9 +895,7 @@ async def get_async( response_data: Any = None if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response( - models.AgentsAPIV1ConversationsGetResponseV1ConversationsGet, http_res - ) + return unmarshal_json_response(models.ResponseV1ConversationsGet, http_res) if utils.match_response(http_res, "422", "application/json"): response_data = unmarshal_json_response( models.HTTPValidationErrorData, http_res @@ -1993,9 +1997,9 @@ def start_stream( instructions: OptionalNullable[str] = UNSET, tools: Optional[ Union[ - List[models_conversationstreamrequest.ConversationStreamRequestTools], + List[models_conversationstreamrequest.ConversationStreamRequestTool], List[ - models_conversationstreamrequest.ConversationStreamRequestToolsTypedDict + models_conversationstreamrequest.ConversationStreamRequestToolTypedDict ], ] ] = None, @@ -2060,7 +2064,7 @@ def start_stream( handoff_execution=handoff_execution, instructions=instructions, tools=utils.get_pydantic_model( - tools, Optional[List[models.ConversationStreamRequestTools]] + tools, Optional[List[models.ConversationStreamRequestTool]] ), completion_args=utils.get_pydantic_model( completion_args, OptionalNullable[models.CompletionArgs] @@ -2155,9 +2159,9 @@ async def start_stream_async( instructions: OptionalNullable[str] = UNSET, tools: Optional[ Union[ - List[models_conversationstreamrequest.ConversationStreamRequestTools], + List[models_conversationstreamrequest.ConversationStreamRequestTool], List[ - models_conversationstreamrequest.ConversationStreamRequestToolsTypedDict + models_conversationstreamrequest.ConversationStreamRequestToolTypedDict ], ] ] = None, @@ -2222,7 +2226,7 @@ async def start_stream_async( handoff_execution=handoff_execution, instructions=instructions, tools=utils.get_pydantic_model( - tools, Optional[List[models.ConversationStreamRequestTools]] + tools, Optional[List[models.ConversationStreamRequestTool]] ), completion_args=utils.get_pydantic_model( completion_args, OptionalNullable[models.CompletionArgs] diff --git a/src/mistralai/client/documents.py b/src/mistralai/client/documents.py index 009a604f..3316e63b 100644 --- a/src/mistralai/client/documents.py +++ b/src/mistralai/client/documents.py @@ -273,7 +273,7 @@ def upload( request = models.LibrariesDocumentsUploadV1Request( library_id=library_id, - request_body=models.LibrariesDocumentsUploadV1DocumentUpload( + request_body=models.DocumentUpload( file=utils.get_pydantic_model(file, models.File), ), ) @@ -292,11 +292,7 @@ def upload( http_headers=http_headers, security=self.sdk_configuration.security, get_serialized_body=lambda: utils.serialize_request_body( - request.request_body, - False, - False, - "multipart", - models.LibrariesDocumentsUploadV1DocumentUpload, + request.request_body, False, False, "multipart", models.DocumentUpload ), allow_empty_value=None, timeout_ms=timeout_ms, @@ -383,7 +379,7 @@ async def upload_async( request = models.LibrariesDocumentsUploadV1Request( library_id=library_id, - request_body=models.LibrariesDocumentsUploadV1DocumentUpload( + request_body=models.DocumentUpload( file=utils.get_pydantic_model(file, models.File), ), ) @@ -402,11 +398,7 @@ async def upload_async( http_headers=http_headers, security=self.sdk_configuration.security, get_serialized_body=lambda: utils.serialize_request_body( - request.request_body, - False, - False, - "multipart", - models.LibrariesDocumentsUploadV1DocumentUpload, + request.request_body, False, False, "multipart", models.DocumentUpload ), allow_empty_value=None, timeout_ms=timeout_ms, diff --git a/src/mistralai/client/files.py b/src/mistralai/client/files.py index 97817eab..b384cda4 100644 --- a/src/mistralai/client/files.py +++ b/src/mistralai/client/files.py @@ -62,7 +62,7 @@ def upload( else: base_url = self._get_url(base_url, url_variables) - request = models.FilesAPIRoutesUploadFileMultiPartBodyParams( + request = models.MultiPartBodyParams( purpose=purpose, file=utils.get_pydantic_model(file, models.File), ) @@ -81,11 +81,7 @@ def upload( http_headers=http_headers, security=self.sdk_configuration.security, get_serialized_body=lambda: utils.serialize_request_body( - request, - False, - False, - "multipart", - models.FilesAPIRoutesUploadFileMultiPartBodyParams, + request, False, False, "multipart", models.MultiPartBodyParams ), allow_empty_value=None, timeout_ms=timeout_ms, @@ -168,7 +164,7 @@ async def upload_async( else: base_url = self._get_url(base_url, url_variables) - request = models.FilesAPIRoutesUploadFileMultiPartBodyParams( + request = models.MultiPartBodyParams( purpose=purpose, file=utils.get_pydantic_model(file, models.File), ) @@ -187,11 +183,7 @@ async def upload_async( http_headers=http_headers, security=self.sdk_configuration.security, get_serialized_body=lambda: utils.serialize_request_body( - request, - False, - False, - "multipart", - models.FilesAPIRoutesUploadFileMultiPartBodyParams, + request, False, False, "multipart", models.MultiPartBodyParams ), allow_empty_value=None, timeout_ms=timeout_ms, diff --git a/src/mistralai/client/fine_tuning.py b/src/mistralai/client/fine_tuning.py index c57425fd..aeb832d4 100644 --- a/src/mistralai/client/fine_tuning.py +++ b/src/mistralai/client/fine_tuning.py @@ -2,12 +2,12 @@ from .basesdk import BaseSDK from .sdkconfiguration import SDKConfiguration -from mistralai.client.jobs import Jobs +from mistralai.client.fine_tuning_jobs import FineTuningJobs from typing import Optional class FineTuning(BaseSDK): - jobs: Jobs + jobs: FineTuningJobs def __init__( self, sdk_config: SDKConfiguration, parent_ref: Optional[object] = None @@ -17,4 +17,4 @@ def __init__( self._init_sdks() def _init_sdks(self): - self.jobs = Jobs(self.sdk_configuration, parent_ref=self.parent_ref) + self.jobs = FineTuningJobs(self.sdk_configuration, parent_ref=self.parent_ref) diff --git a/src/mistralai/client/jobs.py b/src/mistralai/client/fine_tuning_jobs.py similarity index 98% rename from src/mistralai/client/jobs.py rename to src/mistralai/client/fine_tuning_jobs.py index 848926ea..fb75e8c7 100644 --- a/src/mistralai/client/jobs.py +++ b/src/mistralai/client/fine_tuning_jobs.py @@ -17,7 +17,7 @@ from typing import List, Mapping, Optional, Union -class Jobs(BaseSDK): +class FineTuningJobs(BaseSDK): def list( self, *, @@ -28,7 +28,7 @@ def list( created_before: OptionalNullable[datetime] = UNSET, created_by_me: Optional[bool] = False, status: OptionalNullable[ - models_jobs_api_routes_fine_tuning_get_fine_tuning_jobsop.QueryParamStatus + models_jobs_api_routes_fine_tuning_get_fine_tuning_jobsop.JobsAPIRoutesFineTuningGetFineTuningJobsStatus ] = UNSET, wandb_project: OptionalNullable[str] = UNSET, wandb_name: OptionalNullable[str] = UNSET, @@ -141,7 +141,7 @@ async def list_async( created_before: OptionalNullable[datetime] = UNSET, created_by_me: Optional[bool] = False, status: OptionalNullable[ - models_jobs_api_routes_fine_tuning_get_fine_tuning_jobsop.QueryParamStatus + models_jobs_api_routes_fine_tuning_get_fine_tuning_jobsop.JobsAPIRoutesFineTuningGetFineTuningJobsStatus ] = UNSET, wandb_project: OptionalNullable[str] = UNSET, wandb_name: OptionalNullable[str] = UNSET, @@ -261,8 +261,8 @@ def create( suffix: OptionalNullable[str] = UNSET, integrations: OptionalNullable[ Union[ - List[models_jobin.JobInIntegrations], - List[models_jobin.JobInIntegrationsTypedDict], + List[models_jobin.JobInIntegration], + List[models_jobin.JobInIntegrationTypedDict], ] ] = UNSET, auto_start: Optional[bool] = None, @@ -272,8 +272,8 @@ def create( ] = UNSET, repositories: OptionalNullable[ Union[ - List[models_jobin.JobInRepositories], - List[models_jobin.JobInRepositoriesTypedDict], + List[models_jobin.JobInRepository], + List[models_jobin.JobInRepositoryTypedDict], ] ] = UNSET, classifier_targets: OptionalNullable[ @@ -325,7 +325,7 @@ def create( validation_files=validation_files, suffix=suffix, integrations=utils.get_pydantic_model( - integrations, OptionalNullable[List[models.JobInIntegrations]] + integrations, OptionalNullable[List[models.JobInIntegration]] ), auto_start=auto_start, invalid_sample_skip_percentage=invalid_sample_skip_percentage, @@ -334,7 +334,7 @@ def create( hyperparameters, models.Hyperparameters ), repositories=utils.get_pydantic_model( - repositories, OptionalNullable[List[models.JobInRepositories]] + repositories, OptionalNullable[List[models.JobInRepository]] ), classifier_targets=utils.get_pydantic_model( classifier_targets, OptionalNullable[List[models.ClassifierTargetIn]] @@ -414,8 +414,8 @@ async def create_async( suffix: OptionalNullable[str] = UNSET, integrations: OptionalNullable[ Union[ - List[models_jobin.JobInIntegrations], - List[models_jobin.JobInIntegrationsTypedDict], + List[models_jobin.JobInIntegration], + List[models_jobin.JobInIntegrationTypedDict], ] ] = UNSET, auto_start: Optional[bool] = None, @@ -425,8 +425,8 @@ async def create_async( ] = UNSET, repositories: OptionalNullable[ Union[ - List[models_jobin.JobInRepositories], - List[models_jobin.JobInRepositoriesTypedDict], + List[models_jobin.JobInRepository], + List[models_jobin.JobInRepositoryTypedDict], ] ] = UNSET, classifier_targets: OptionalNullable[ @@ -478,7 +478,7 @@ async def create_async( validation_files=validation_files, suffix=suffix, integrations=utils.get_pydantic_model( - integrations, OptionalNullable[List[models.JobInIntegrations]] + integrations, OptionalNullable[List[models.JobInIntegration]] ), auto_start=auto_start, invalid_sample_skip_percentage=invalid_sample_skip_percentage, @@ -487,7 +487,7 @@ async def create_async( hyperparameters, models.Hyperparameters ), repositories=utils.get_pydantic_model( - repositories, OptionalNullable[List[models.JobInRepositories]] + repositories, OptionalNullable[List[models.JobInRepository]] ), classifier_targets=utils.get_pydantic_model( classifier_targets, OptionalNullable[List[models.ClassifierTargetIn]] diff --git a/src/mistralai/client/models/__init__.py b/src/mistralai/client/models/__init__.py index 23e65222..046037c5 100644 --- a/src/mistralai/client/models/__init__.py +++ b/src/mistralai/client/models/__init__.py @@ -7,13 +7,7 @@ import sys if TYPE_CHECKING: - from .agent import ( - Agent, - AgentObject, - AgentTools, - AgentToolsTypedDict, - AgentTypedDict, - ) + from .agent import Agent, AgentObject, AgentTool, AgentToolTypedDict, AgentTypedDict from .agentaliasresponse import AgentAliasResponse, AgentAliasResponseTypedDict from .agentconversation import ( AgentConversation, @@ -24,13 +18,12 @@ ) from .agentcreationrequest import ( AgentCreationRequest, - AgentCreationRequestTools, - AgentCreationRequestToolsTypedDict, + AgentCreationRequestTool, + AgentCreationRequestToolTypedDict, AgentCreationRequestTypedDict, ) from .agenthandoffdoneevent import ( AgentHandoffDoneEvent, - AgentHandoffDoneEventType, AgentHandoffDoneEventTypedDict, ) from .agenthandoffentry import ( @@ -41,7 +34,6 @@ ) from .agenthandoffstartedevent import ( AgentHandoffStartedEvent, - AgentHandoffStartedEventType, AgentHandoffStartedEventTypedDict, ) from .agents_api_v1_agents_create_or_update_aliasop import ( @@ -57,10 +49,10 @@ AgentsAPIV1AgentsGetVersionRequestTypedDict, ) from .agents_api_v1_agents_getop import ( + AgentsAPIV1AgentsGetAgentVersion, + AgentsAPIV1AgentsGetAgentVersionTypedDict, AgentsAPIV1AgentsGetRequest, AgentsAPIV1AgentsGetRequestTypedDict, - QueryParamAgentVersion, - QueryParamAgentVersionTypedDict, ) from .agents_api_v1_agents_list_version_aliasesop import ( AgentsAPIV1AgentsListVersionAliasesRequest, @@ -97,8 +89,8 @@ from .agents_api_v1_conversations_getop import ( AgentsAPIV1ConversationsGetRequest, AgentsAPIV1ConversationsGetRequestTypedDict, - AgentsAPIV1ConversationsGetResponseV1ConversationsGet, - AgentsAPIV1ConversationsGetResponseV1ConversationsGetTypedDict, + ResponseV1ConversationsGet, + ResponseV1ConversationsGetTypedDict, ) from .agents_api_v1_conversations_historyop import ( AgentsAPIV1ConversationsHistoryRequest, @@ -107,8 +99,8 @@ from .agents_api_v1_conversations_listop import ( AgentsAPIV1ConversationsListRequest, AgentsAPIV1ConversationsListRequestTypedDict, - ResponseBody, - ResponseBodyTypedDict, + AgentsAPIV1ConversationsListResponse, + AgentsAPIV1ConversationsListResponseTypedDict, ) from .agents_api_v1_conversations_messagesop import ( AgentsAPIV1ConversationsMessagesRequest, @@ -124,8 +116,8 @@ ) from .agentscompletionrequest import ( AgentsCompletionRequest, - AgentsCompletionRequestMessages, - AgentsCompletionRequestMessagesTypedDict, + AgentsCompletionRequestMessage, + AgentsCompletionRequestMessageTypedDict, AgentsCompletionRequestStop, AgentsCompletionRequestStopTypedDict, AgentsCompletionRequestToolChoice, @@ -134,8 +126,8 @@ ) from .agentscompletionstreamrequest import ( AgentsCompletionStreamRequest, - AgentsCompletionStreamRequestMessages, - AgentsCompletionStreamRequestMessagesTypedDict, + AgentsCompletionStreamRequestMessage, + AgentsCompletionStreamRequestMessageTypedDict, AgentsCompletionStreamRequestStop, AgentsCompletionStreamRequestStopTypedDict, AgentsCompletionStreamRequestToolChoice, @@ -144,8 +136,8 @@ ) from .agentupdaterequest import ( AgentUpdateRequest, - AgentUpdateRequestTools, - AgentUpdateRequestToolsTypedDict, + AgentUpdateRequestTool, + AgentUpdateRequestToolTypedDict, AgentUpdateRequestTypedDict, ) from .apiendpoint import APIEndpoint @@ -161,7 +153,7 @@ AssistantMessageRole, AssistantMessageTypedDict, ) - from .audiochunk import AudioChunk, AudioChunkType, AudioChunkTypedDict + from .audiochunk import AudioChunk, AudioChunkTypedDict from .audioencoding import AudioEncoding from .audioformat import AudioFormat, AudioFormatTypedDict from .audiotranscriptionrequest import ( @@ -172,7 +164,7 @@ AudioTranscriptionRequestStream, AudioTranscriptionRequestStreamTypedDict, ) - from .basemodelcard import BaseModelCard, BaseModelCardType, BaseModelCardTypedDict + from .basemodelcard import BaseModelCard, BaseModelCardTypedDict from .batcherror import BatchError, BatchErrorTypedDict from .batchjobin import BatchJobIn, BatchJobInTypedDict from .batchjobout import BatchJobOut, BatchJobOutObject, BatchJobOutTypedDict @@ -186,18 +178,18 @@ ) from .chatcompletionchoice import ( ChatCompletionChoice, + ChatCompletionChoiceFinishReason, ChatCompletionChoiceTypedDict, - FinishReason, ) from .chatcompletionrequest import ( ChatCompletionRequest, + ChatCompletionRequestMessage, + ChatCompletionRequestMessageTypedDict, + ChatCompletionRequestStop, + ChatCompletionRequestStopTypedDict, ChatCompletionRequestToolChoice, ChatCompletionRequestToolChoiceTypedDict, ChatCompletionRequestTypedDict, - Messages, - MessagesTypedDict, - Stop, - StopTypedDict, ) from .chatcompletionresponse import ( ChatCompletionResponse, @@ -205,8 +197,8 @@ ) from .chatcompletionstreamrequest import ( ChatCompletionStreamRequest, - ChatCompletionStreamRequestMessages, - ChatCompletionStreamRequestMessagesTypedDict, + ChatCompletionStreamRequestMessage, + ChatCompletionStreamRequestMessageTypedDict, ChatCompletionStreamRequestStop, ChatCompletionStreamRequestStopTypedDict, ChatCompletionStreamRequestToolChoice, @@ -215,13 +207,13 @@ ) from .chatmoderationrequest import ( ChatModerationRequest, - ChatModerationRequestInputs, - ChatModerationRequestInputsTypedDict, + ChatModerationRequestInputs1, + ChatModerationRequestInputs1TypedDict, + ChatModerationRequestInputs2, + ChatModerationRequestInputs2TypedDict, + ChatModerationRequestInputs3, + ChatModerationRequestInputs3TypedDict, ChatModerationRequestTypedDict, - One, - OneTypedDict, - Two, - TwoTypedDict, ) from .checkpointout import CheckpointOut, CheckpointOutTypedDict from .classificationrequest import ( @@ -240,24 +232,21 @@ ) from .classifierdetailedjobout import ( ClassifierDetailedJobOut, - ClassifierDetailedJobOutIntegrations, - ClassifierDetailedJobOutIntegrationsTypedDict, - ClassifierDetailedJobOutJobType, + ClassifierDetailedJobOutIntegration, + ClassifierDetailedJobOutIntegrationTypedDict, ClassifierDetailedJobOutObject, ClassifierDetailedJobOutStatus, ClassifierDetailedJobOutTypedDict, ) from .classifierftmodelout import ( ClassifierFTModelOut, - ClassifierFTModelOutModelType, ClassifierFTModelOutObject, ClassifierFTModelOutTypedDict, ) from .classifierjobout import ( ClassifierJobOut, - ClassifierJobOutIntegrations, - ClassifierJobOutIntegrationsTypedDict, - ClassifierJobOutJobType, + ClassifierJobOutIntegration, + ClassifierJobOutIntegrationTypedDict, ClassifierJobOutObject, ClassifierJobOutStatus, ClassifierJobOutTypedDict, @@ -272,22 +261,17 @@ ClassifierTrainingParametersIn, ClassifierTrainingParametersInTypedDict, ) - from .codeinterpretertool import ( - CodeInterpreterTool, - CodeInterpreterToolType, - CodeInterpreterToolTypedDict, - ) + from .codeinterpretertool import CodeInterpreterTool, CodeInterpreterToolTypedDict from .completionargs import CompletionArgs, CompletionArgsTypedDict from .completionargsstop import CompletionArgsStop, CompletionArgsStopTypedDict from .completionchunk import CompletionChunk, CompletionChunkTypedDict from .completiondetailedjobout import ( CompletionDetailedJobOut, - CompletionDetailedJobOutIntegrations, - CompletionDetailedJobOutIntegrationsTypedDict, - CompletionDetailedJobOutJobType, + CompletionDetailedJobOutIntegration, + CompletionDetailedJobOutIntegrationTypedDict, CompletionDetailedJobOutObject, - CompletionDetailedJobOutRepositories, - CompletionDetailedJobOutRepositoriesTypedDict, + CompletionDetailedJobOutRepository, + CompletionDetailedJobOutRepositoryTypedDict, CompletionDetailedJobOutStatus, CompletionDetailedJobOutTypedDict, ) @@ -296,18 +280,16 @@ CompletionFTModelOut, CompletionFTModelOutObject, CompletionFTModelOutTypedDict, - ModelType, ) from .completionjobout import ( CompletionJobOut, + CompletionJobOutIntegration, + CompletionJobOutIntegrationTypedDict, CompletionJobOutObject, + CompletionJobOutRepository, + CompletionJobOutRepositoryTypedDict, + CompletionJobOutStatus, CompletionJobOutTypedDict, - Integrations, - IntegrationsTypedDict, - JobType, - Repositories, - RepositoriesTypedDict, - Status, ) from .completionresponsestreamchoice import ( CompletionResponseStreamChoice, @@ -343,8 +325,8 @@ ConversationHistory, ConversationHistoryObject, ConversationHistoryTypedDict, - Entries, - EntriesTypedDict, + Entry, + EntryTypedDict, ) from .conversationinputs import ConversationInputs, ConversationInputsTypedDict from .conversationmessages import ( @@ -353,20 +335,20 @@ ConversationMessagesTypedDict, ) from .conversationrequest import ( - AgentVersion, - AgentVersionTypedDict, ConversationRequest, + ConversationRequestAgentVersion, + ConversationRequestAgentVersionTypedDict, + ConversationRequestHandoffExecution, + ConversationRequestTool, + ConversationRequestToolTypedDict, ConversationRequestTypedDict, - HandoffExecution, - Tools, - ToolsTypedDict, ) from .conversationresponse import ( ConversationResponse, ConversationResponseObject, ConversationResponseTypedDict, - Outputs, - OutputsTypedDict, + Output, + OutputTypedDict, ) from .conversationrestartrequest import ( ConversationRestartRequest, @@ -387,8 +369,8 @@ ConversationStreamRequestAgentVersion, ConversationStreamRequestAgentVersionTypedDict, ConversationStreamRequestHandoffExecution, - ConversationStreamRequestTools, - ConversationStreamRequestToolsTypedDict, + ConversationStreamRequestTool, + ConversationStreamRequestToolTypedDict, ConversationStreamRequestTypedDict, ) from .conversationusageinfo import ( @@ -402,16 +384,12 @@ from .deletefileout import DeleteFileOut, DeleteFileOutTypedDict from .deletemodelout import DeleteModelOut, DeleteModelOutTypedDict from .deltamessage import ( - Content, - ContentTypedDict, DeltaMessage, + DeltaMessageContent, + DeltaMessageContentTypedDict, DeltaMessageTypedDict, ) - from .documentlibrarytool import ( - DocumentLibraryTool, - DocumentLibraryToolType, - DocumentLibraryToolTypedDict, - ) + from .documentlibrarytool import DocumentLibraryTool, DocumentLibraryToolTypedDict from .documentout import DocumentOut, DocumentOutTypedDict from .documenttextcontent import DocumentTextContent, DocumentTextContentTypedDict from .documentupdatein import ( @@ -464,8 +442,8 @@ FilesAPIRoutesRetrieveFileRequestTypedDict, ) from .files_api_routes_upload_fileop import ( - FilesAPIRoutesUploadFileMultiPartBodyParams, - FilesAPIRoutesUploadFileMultiPartBodyParamsTypedDict, + MultiPartBodyParams, + MultiPartBodyParamsTypedDict, ) from .fileschema import FileSchema, FileSchemaTypedDict from .filesignedurl import FileSignedURL, FileSignedURLTypedDict @@ -491,7 +469,7 @@ FTModelCapabilitiesOut, FTModelCapabilitiesOutTypedDict, ) - from .ftmodelcard import FTModelCard, FTModelCardType, FTModelCardTypedDict + from .ftmodelcard import FTModelCard, FTModelCardTypedDict from .function import Function, FunctionTypedDict from .functioncall import ( Arguments, @@ -509,11 +487,7 @@ FunctionCallEntryArguments, FunctionCallEntryArgumentsTypedDict, ) - from .functioncallevent import ( - FunctionCallEvent, - FunctionCallEventType, - FunctionCallEventTypedDict, - ) + from .functioncallevent import FunctionCallEvent, FunctionCallEventTypedDict from .functionname import FunctionName, FunctionNameTypedDict from .functionresultentry import ( FunctionResultEntry, @@ -521,54 +495,42 @@ FunctionResultEntryType, FunctionResultEntryTypedDict, ) - from .functiontool import FunctionTool, FunctionToolType, FunctionToolTypedDict - from .githubrepositoryin import ( - GithubRepositoryIn, - GithubRepositoryInType, - GithubRepositoryInTypedDict, - ) - from .githubrepositoryout import ( - GithubRepositoryOut, - GithubRepositoryOutType, - GithubRepositoryOutTypedDict, - ) + from .functiontool import FunctionTool, FunctionToolTypedDict + from .githubrepositoryin import GithubRepositoryIn, GithubRepositoryInTypedDict + from .githubrepositoryout import GithubRepositoryOut, GithubRepositoryOutTypedDict from .httpvalidationerror import HTTPValidationError, HTTPValidationErrorData - from .imagegenerationtool import ( - ImageGenerationTool, - ImageGenerationToolType, - ImageGenerationToolTypedDict, - ) + from .imagegenerationtool import ImageGenerationTool, ImageGenerationToolTypedDict from .imageurl import ImageURL, ImageURLTypedDict from .imageurlchunk import ( ImageURLChunk, - ImageURLChunkImageURL, - ImageURLChunkImageURLTypedDict, ImageURLChunkType, ImageURLChunkTypedDict, + ImageURLUnion, + ImageURLUnionTypedDict, ) from .inputentries import InputEntries, InputEntriesTypedDict from .inputs import ( Inputs, + InputsMessage, + InputsMessageTypedDict, InputsTypedDict, InstructRequestInputs, - InstructRequestInputsMessages, - InstructRequestInputsMessagesTypedDict, InstructRequestInputsTypedDict, ) from .instructrequest import ( InstructRequest, - InstructRequestMessages, - InstructRequestMessagesTypedDict, + InstructRequestMessage, + InstructRequestMessageTypedDict, InstructRequestTypedDict, ) from .jobin import ( Hyperparameters, HyperparametersTypedDict, JobIn, - JobInIntegrations, - JobInIntegrationsTypedDict, - JobInRepositories, - JobInRepositoriesTypedDict, + JobInIntegration, + JobInIntegrationTypedDict, + JobInRepository, + JobInRepositoryTypedDict, JobInTypedDict, ) from .jobmetadataout import JobMetadataOut, JobMetadataOutTypedDict @@ -597,8 +559,8 @@ from .jobs_api_routes_fine_tuning_create_fine_tuning_jobop import ( JobsAPIRoutesFineTuningCreateFineTuningJobResponse, JobsAPIRoutesFineTuningCreateFineTuningJobResponseTypedDict, - Response1, - Response1TypedDict, + Response, + ResponseTypedDict, ) from .jobs_api_routes_fine_tuning_get_fine_tuning_jobop import ( JobsAPIRoutesFineTuningGetFineTuningJobRequest, @@ -609,7 +571,7 @@ from .jobs_api_routes_fine_tuning_get_fine_tuning_jobsop import ( JobsAPIRoutesFineTuningGetFineTuningJobsRequest, JobsAPIRoutesFineTuningGetFineTuningJobsRequestTypedDict, - QueryParamStatus, + JobsAPIRoutesFineTuningGetFineTuningJobsStatus, ) from .jobs_api_routes_fine_tuning_start_fine_tuning_jobop import ( JobsAPIRoutesFineTuningStartFineTuningJobRequest, @@ -681,8 +643,8 @@ LibrariesDocumentsUpdateV1RequestTypedDict, ) from .libraries_documents_upload_v1op import ( - LibrariesDocumentsUploadV1DocumentUpload, - LibrariesDocumentsUploadV1DocumentUploadTypedDict, + DocumentUpload, + DocumentUploadTypedDict, LibrariesDocumentsUploadV1Request, LibrariesDocumentsUploadV1RequestTypedDict, ) @@ -722,10 +684,10 @@ MessageInputEntry, MessageInputEntryContent, MessageInputEntryContentTypedDict, + MessageInputEntryObject, MessageInputEntryRole, MessageInputEntryType, MessageInputEntryTypedDict, - Object, ) from .messageoutputcontentchunks import ( MessageOutputContentChunks, @@ -745,7 +707,6 @@ MessageOutputEventContent, MessageOutputEventContentTypedDict, MessageOutputEventRole, - MessageOutputEventType, MessageOutputEventTypedDict, ) from .metricout import MetricOut, MetricOutTypedDict @@ -754,11 +715,16 @@ from .modelconversation import ( ModelConversation, ModelConversationObject, - ModelConversationTools, - ModelConversationToolsTypedDict, + ModelConversationTool, + ModelConversationToolTypedDict, ModelConversationTypedDict, ) - from .modellist import Data, DataTypedDict, ModelList, ModelListTypedDict + from .modellist import ( + ModelList, + ModelListData, + ModelListDataTypedDict, + ModelListTypedDict, + ) from .moderationobject import ModerationObject, ModerationObjectTypedDict from .moderationresponse import ModerationResponse, ModerationResponseTypedDict from .no_response_error import NoResponseError @@ -784,9 +750,9 @@ RealtimeTranscriptionErrorTypedDict, ) from .realtimetranscriptionerrordetail import ( - Message, - MessageTypedDict, RealtimeTranscriptionErrorDetail, + RealtimeTranscriptionErrorDetailMessage, + RealtimeTranscriptionErrorDetailMessageTypedDict, RealtimeTranscriptionErrorDetailTypedDict, ) from .realtimetranscriptionsession import ( @@ -807,29 +773,20 @@ ReferenceChunkTypedDict, ) from .requestsource import RequestSource - from .responsedoneevent import ( - ResponseDoneEvent, - ResponseDoneEventType, - ResponseDoneEventTypedDict, - ) - from .responseerrorevent import ( - ResponseErrorEvent, - ResponseErrorEventType, - ResponseErrorEventTypedDict, - ) + from .responsedoneevent import ResponseDoneEvent, ResponseDoneEventTypedDict + from .responseerrorevent import ResponseErrorEvent, ResponseErrorEventTypedDict from .responseformat import ResponseFormat, ResponseFormatTypedDict from .responseformats import ResponseFormats from .responsestartedevent import ( ResponseStartedEvent, - ResponseStartedEventType, ResponseStartedEventTypedDict, ) from .responsevalidationerror import ResponseValidationError from .retrieve_model_v1_models_model_id_getop import ( + ResponseRetrieveModelV1ModelsModelIDGet, + ResponseRetrieveModelV1ModelsModelIDGetTypedDict, RetrieveModelV1ModelsModelIDGetRequest, RetrieveModelV1ModelsModelIDGetRequestTypedDict, - RetrieveModelV1ModelsModelIDGetResponseRetrieveModelV1ModelsModelIDGet, - RetrieveModelV1ModelsModelIDGetResponseRetrieveModelV1ModelsModelIDGetTypedDict, ) from .retrievefileout import RetrieveFileOut, RetrieveFileOutTypedDict from .sampletype import SampleType @@ -842,7 +799,6 @@ from .source import Source from .ssetypes import SSETypes from .systemmessage import ( - Role, SystemMessage, SystemMessageContent, SystemMessageContentTypedDict, @@ -869,20 +825,18 @@ ToolExecutionDeltaEvent, ToolExecutionDeltaEventName, ToolExecutionDeltaEventNameTypedDict, - ToolExecutionDeltaEventType, ToolExecutionDeltaEventTypedDict, ) from .toolexecutiondoneevent import ( ToolExecutionDoneEvent, ToolExecutionDoneEventName, ToolExecutionDoneEventNameTypedDict, - ToolExecutionDoneEventType, ToolExecutionDoneEventTypedDict, ) from .toolexecutionentry import ( - Name, - NameTypedDict, ToolExecutionEntry, + ToolExecutionEntryName, + ToolExecutionEntryNameTypedDict, ToolExecutionEntryObject, ToolExecutionEntryType, ToolExecutionEntryTypedDict, @@ -891,7 +845,6 @@ ToolExecutionStartedEvent, ToolExecutionStartedEventName, ToolExecutionStartedEventNameTypedDict, - ToolExecutionStartedEventType, ToolExecutionStartedEventTypedDict, ) from .toolfilechunk import ( @@ -905,7 +858,6 @@ ToolMessage, ToolMessageContent, ToolMessageContentTypedDict, - ToolMessageRole, ToolMessageTypedDict, ) from .toolreferencechunk import ( @@ -923,12 +875,11 @@ ) from .transcriptionsegmentchunk import ( TranscriptionSegmentChunk, + TranscriptionSegmentChunkType, TranscriptionSegmentChunkTypedDict, - Type, ) from .transcriptionstreamdone import ( TranscriptionStreamDone, - TranscriptionStreamDoneType, TranscriptionStreamDoneTypedDict, ) from .transcriptionstreamevents import ( @@ -940,17 +891,14 @@ from .transcriptionstreameventtypes import TranscriptionStreamEventTypes from .transcriptionstreamlanguage import ( TranscriptionStreamLanguage, - TranscriptionStreamLanguageType, TranscriptionStreamLanguageTypedDict, ) from .transcriptionstreamsegmentdelta import ( TranscriptionStreamSegmentDelta, - TranscriptionStreamSegmentDeltaType, TranscriptionStreamSegmentDeltaTypedDict, ) from .transcriptionstreamtextdelta import ( TranscriptionStreamTextDelta, - TranscriptionStreamTextDeltaType, TranscriptionStreamTextDeltaTypedDict, ) from .unarchiveftmodelout import ( @@ -965,7 +913,6 @@ UserMessage, UserMessageContent, UserMessageContentTypedDict, - UserMessageRole, UserMessageTypedDict, ) from .validationerror import ( @@ -974,22 +921,13 @@ ValidationError, ValidationErrorTypedDict, ) - from .wandbintegration import ( - WandbIntegration, - WandbIntegrationType, - WandbIntegrationTypedDict, - ) - from .wandbintegrationout import ( - WandbIntegrationOut, - WandbIntegrationOutType, - WandbIntegrationOutTypedDict, - ) + from .wandbintegration import WandbIntegration, WandbIntegrationTypedDict + from .wandbintegrationout import WandbIntegrationOut, WandbIntegrationOutTypedDict from .websearchpremiumtool import ( WebSearchPremiumTool, - WebSearchPremiumToolType, WebSearchPremiumToolTypedDict, ) - from .websearchtool import WebSearchTool, WebSearchToolType, WebSearchToolTypedDict + from .websearchtool import WebSearchTool, WebSearchToolTypedDict __all__ = [ "APIEndpoint", @@ -1002,33 +940,31 @@ "AgentConversationObject", "AgentConversationTypedDict", "AgentCreationRequest", - "AgentCreationRequestTools", - "AgentCreationRequestToolsTypedDict", + "AgentCreationRequestTool", + "AgentCreationRequestToolTypedDict", "AgentCreationRequestTypedDict", "AgentHandoffDoneEvent", - "AgentHandoffDoneEventType", "AgentHandoffDoneEventTypedDict", "AgentHandoffEntry", "AgentHandoffEntryObject", "AgentHandoffEntryType", "AgentHandoffEntryTypedDict", "AgentHandoffStartedEvent", - "AgentHandoffStartedEventType", "AgentHandoffStartedEventTypedDict", "AgentObject", - "AgentTools", - "AgentToolsTypedDict", + "AgentTool", + "AgentToolTypedDict", "AgentTypedDict", "AgentUpdateRequest", - "AgentUpdateRequestTools", - "AgentUpdateRequestToolsTypedDict", + "AgentUpdateRequestTool", + "AgentUpdateRequestToolTypedDict", "AgentUpdateRequestTypedDict", - "AgentVersion", - "AgentVersionTypedDict", "AgentsAPIV1AgentsCreateOrUpdateAliasRequest", "AgentsAPIV1AgentsCreateOrUpdateAliasRequestTypedDict", "AgentsAPIV1AgentsDeleteRequest", "AgentsAPIV1AgentsDeleteRequestTypedDict", + "AgentsAPIV1AgentsGetAgentVersion", + "AgentsAPIV1AgentsGetAgentVersionTypedDict", "AgentsAPIV1AgentsGetRequest", "AgentsAPIV1AgentsGetRequestTypedDict", "AgentsAPIV1AgentsGetVersionRequest", @@ -1051,12 +987,12 @@ "AgentsAPIV1ConversationsDeleteRequestTypedDict", "AgentsAPIV1ConversationsGetRequest", "AgentsAPIV1ConversationsGetRequestTypedDict", - "AgentsAPIV1ConversationsGetResponseV1ConversationsGet", - "AgentsAPIV1ConversationsGetResponseV1ConversationsGetTypedDict", "AgentsAPIV1ConversationsHistoryRequest", "AgentsAPIV1ConversationsHistoryRequestTypedDict", "AgentsAPIV1ConversationsListRequest", "AgentsAPIV1ConversationsListRequestTypedDict", + "AgentsAPIV1ConversationsListResponse", + "AgentsAPIV1ConversationsListResponseTypedDict", "AgentsAPIV1ConversationsMessagesRequest", "AgentsAPIV1ConversationsMessagesRequestTypedDict", "AgentsAPIV1ConversationsRestartRequest", @@ -1064,16 +1000,16 @@ "AgentsAPIV1ConversationsRestartStreamRequest", "AgentsAPIV1ConversationsRestartStreamRequestTypedDict", "AgentsCompletionRequest", - "AgentsCompletionRequestMessages", - "AgentsCompletionRequestMessagesTypedDict", + "AgentsCompletionRequestMessage", + "AgentsCompletionRequestMessageTypedDict", "AgentsCompletionRequestStop", "AgentsCompletionRequestStopTypedDict", "AgentsCompletionRequestToolChoice", "AgentsCompletionRequestToolChoiceTypedDict", "AgentsCompletionRequestTypedDict", "AgentsCompletionStreamRequest", - "AgentsCompletionStreamRequestMessages", - "AgentsCompletionStreamRequestMessagesTypedDict", + "AgentsCompletionStreamRequestMessage", + "AgentsCompletionStreamRequestMessageTypedDict", "AgentsCompletionStreamRequestStop", "AgentsCompletionStreamRequestStopTypedDict", "AgentsCompletionStreamRequestToolChoice", @@ -1092,7 +1028,6 @@ "Attributes", "AttributesTypedDict", "AudioChunk", - "AudioChunkType", "AudioChunkTypedDict", "AudioEncoding", "AudioFormat", @@ -1102,7 +1037,6 @@ "AudioTranscriptionRequestStreamTypedDict", "AudioTranscriptionRequestTypedDict", "BaseModelCard", - "BaseModelCardType", "BaseModelCardTypedDict", "BatchError", "BatchErrorTypedDict", @@ -1121,24 +1055,33 @@ "ChatClassificationRequest", "ChatClassificationRequestTypedDict", "ChatCompletionChoice", + "ChatCompletionChoiceFinishReason", "ChatCompletionChoiceTypedDict", "ChatCompletionRequest", + "ChatCompletionRequestMessage", + "ChatCompletionRequestMessageTypedDict", + "ChatCompletionRequestStop", + "ChatCompletionRequestStopTypedDict", "ChatCompletionRequestToolChoice", "ChatCompletionRequestToolChoiceTypedDict", "ChatCompletionRequestTypedDict", "ChatCompletionResponse", "ChatCompletionResponseTypedDict", "ChatCompletionStreamRequest", - "ChatCompletionStreamRequestMessages", - "ChatCompletionStreamRequestMessagesTypedDict", + "ChatCompletionStreamRequestMessage", + "ChatCompletionStreamRequestMessageTypedDict", "ChatCompletionStreamRequestStop", "ChatCompletionStreamRequestStopTypedDict", "ChatCompletionStreamRequestToolChoice", "ChatCompletionStreamRequestToolChoiceTypedDict", "ChatCompletionStreamRequestTypedDict", "ChatModerationRequest", - "ChatModerationRequestInputs", - "ChatModerationRequestInputsTypedDict", + "ChatModerationRequestInputs1", + "ChatModerationRequestInputs1TypedDict", + "ChatModerationRequestInputs2", + "ChatModerationRequestInputs2TypedDict", + "ChatModerationRequestInputs3", + "ChatModerationRequestInputs3TypedDict", "ChatModerationRequestTypedDict", "CheckpointOut", "CheckpointOutTypedDict", @@ -1151,20 +1094,17 @@ "ClassificationTargetResult", "ClassificationTargetResultTypedDict", "ClassifierDetailedJobOut", - "ClassifierDetailedJobOutIntegrations", - "ClassifierDetailedJobOutIntegrationsTypedDict", - "ClassifierDetailedJobOutJobType", + "ClassifierDetailedJobOutIntegration", + "ClassifierDetailedJobOutIntegrationTypedDict", "ClassifierDetailedJobOutObject", "ClassifierDetailedJobOutStatus", "ClassifierDetailedJobOutTypedDict", "ClassifierFTModelOut", - "ClassifierFTModelOutModelType", "ClassifierFTModelOutObject", "ClassifierFTModelOutTypedDict", "ClassifierJobOut", - "ClassifierJobOutIntegrations", - "ClassifierJobOutIntegrationsTypedDict", - "ClassifierJobOutJobType", + "ClassifierJobOutIntegration", + "ClassifierJobOutIntegrationTypedDict", "ClassifierJobOutObject", "ClassifierJobOutStatus", "ClassifierJobOutTypedDict", @@ -1177,7 +1117,6 @@ "ClassifierTrainingParametersInTypedDict", "ClassifierTrainingParametersTypedDict", "CodeInterpreterTool", - "CodeInterpreterToolType", "CodeInterpreterToolTypedDict", "CompletionArgs", "CompletionArgsStop", @@ -1186,12 +1125,11 @@ "CompletionChunk", "CompletionChunkTypedDict", "CompletionDetailedJobOut", - "CompletionDetailedJobOutIntegrations", - "CompletionDetailedJobOutIntegrationsTypedDict", - "CompletionDetailedJobOutJobType", + "CompletionDetailedJobOutIntegration", + "CompletionDetailedJobOutIntegrationTypedDict", "CompletionDetailedJobOutObject", - "CompletionDetailedJobOutRepositories", - "CompletionDetailedJobOutRepositoriesTypedDict", + "CompletionDetailedJobOutRepository", + "CompletionDetailedJobOutRepositoryTypedDict", "CompletionDetailedJobOutStatus", "CompletionDetailedJobOutTypedDict", "CompletionEvent", @@ -1200,7 +1138,12 @@ "CompletionFTModelOutObject", "CompletionFTModelOutTypedDict", "CompletionJobOut", + "CompletionJobOutIntegration", + "CompletionJobOutIntegrationTypedDict", "CompletionJobOutObject", + "CompletionJobOutRepository", + "CompletionJobOutRepositoryTypedDict", + "CompletionJobOutStatus", "CompletionJobOutTypedDict", "CompletionResponseStreamChoice", "CompletionResponseStreamChoiceFinishReason", @@ -1209,10 +1152,8 @@ "CompletionTrainingParametersIn", "CompletionTrainingParametersInTypedDict", "CompletionTrainingParametersTypedDict", - "Content", "ContentChunk", "ContentChunkTypedDict", - "ContentTypedDict", "ConversationAppendRequest", "ConversationAppendRequestHandoffExecution", "ConversationAppendRequestTypedDict", @@ -1232,6 +1173,11 @@ "ConversationMessagesObject", "ConversationMessagesTypedDict", "ConversationRequest", + "ConversationRequestAgentVersion", + "ConversationRequestAgentVersionTypedDict", + "ConversationRequestHandoffExecution", + "ConversationRequestTool", + "ConversationRequestToolTypedDict", "ConversationRequestTypedDict", "ConversationResponse", "ConversationResponseObject", @@ -1250,13 +1196,11 @@ "ConversationStreamRequestAgentVersion", "ConversationStreamRequestAgentVersionTypedDict", "ConversationStreamRequestHandoffExecution", - "ConversationStreamRequestTools", - "ConversationStreamRequestToolsTypedDict", + "ConversationStreamRequestTool", + "ConversationStreamRequestToolTypedDict", "ConversationStreamRequestTypedDict", "ConversationUsageInfo", "ConversationUsageInfoTypedDict", - "Data", - "DataTypedDict", "DeleteFileOut", "DeleteFileOutTypedDict", "DeleteModelOut", @@ -1264,10 +1208,11 @@ "DeleteModelV1ModelsModelIDDeleteRequest", "DeleteModelV1ModelsModelIDDeleteRequestTypedDict", "DeltaMessage", + "DeltaMessageContent", + "DeltaMessageContentTypedDict", "DeltaMessageTypedDict", "Document", "DocumentLibraryTool", - "DocumentLibraryToolType", "DocumentLibraryToolTypedDict", "DocumentOut", "DocumentOutTypedDict", @@ -1279,6 +1224,8 @@ "DocumentURLChunkTypedDict", "DocumentUpdateIn", "DocumentUpdateInTypedDict", + "DocumentUpload", + "DocumentUploadTypedDict", "EmbeddingDtype", "EmbeddingRequest", "EmbeddingRequestInputs", @@ -1290,8 +1237,8 @@ "EmbeddingResponseTypedDict", "EncodingFormat", "EntityType", - "Entries", - "EntriesTypedDict", + "Entry", + "EntryTypedDict", "EventOut", "EventOutTypedDict", "FIMCompletionRequest", @@ -1308,7 +1255,6 @@ "FTModelCapabilitiesOut", "FTModelCapabilitiesOutTypedDict", "FTModelCard", - "FTModelCardType", "FTModelCardTypedDict", "File", "FileChunk", @@ -1329,10 +1275,7 @@ "FilesAPIRoutesListFilesRequestTypedDict", "FilesAPIRoutesRetrieveFileRequest", "FilesAPIRoutesRetrieveFileRequestTypedDict", - "FilesAPIRoutesUploadFileMultiPartBodyParams", - "FilesAPIRoutesUploadFileMultiPartBodyParamsTypedDict", "FineTuneableModelType", - "FinishReason", "Format", "Function", "FunctionCall", @@ -1343,7 +1286,6 @@ "FunctionCallEntryType", "FunctionCallEntryTypedDict", "FunctionCallEvent", - "FunctionCallEventType", "FunctionCallEventTypedDict", "FunctionCallTypedDict", "FunctionName", @@ -1353,55 +1295,47 @@ "FunctionResultEntryType", "FunctionResultEntryTypedDict", "FunctionTool", - "FunctionToolType", "FunctionToolTypedDict", "FunctionTypedDict", "GithubRepositoryIn", - "GithubRepositoryInType", "GithubRepositoryInTypedDict", "GithubRepositoryOut", - "GithubRepositoryOutType", "GithubRepositoryOutTypedDict", "HTTPValidationError", "HTTPValidationErrorData", - "HandoffExecution", "Hyperparameters", "HyperparametersTypedDict", "ImageGenerationTool", - "ImageGenerationToolType", "ImageGenerationToolTypedDict", "ImageURL", "ImageURLChunk", - "ImageURLChunkImageURL", - "ImageURLChunkImageURLTypedDict", "ImageURLChunkType", "ImageURLChunkTypedDict", "ImageURLTypedDict", + "ImageURLUnion", + "ImageURLUnionTypedDict", "InputEntries", "InputEntriesTypedDict", "Inputs", + "InputsMessage", + "InputsMessageTypedDict", "InputsTypedDict", "InstructRequest", "InstructRequestInputs", - "InstructRequestInputsMessages", - "InstructRequestInputsMessagesTypedDict", "InstructRequestInputsTypedDict", - "InstructRequestMessages", - "InstructRequestMessagesTypedDict", + "InstructRequestMessage", + "InstructRequestMessageTypedDict", "InstructRequestTypedDict", - "Integrations", - "IntegrationsTypedDict", "JSONSchema", "JSONSchemaTypedDict", "JobIn", - "JobInIntegrations", - "JobInIntegrationsTypedDict", - "JobInRepositories", - "JobInRepositoriesTypedDict", + "JobInIntegration", + "JobInIntegrationTypedDict", + "JobInRepository", + "JobInRepositoryTypedDict", "JobInTypedDict", "JobMetadataOut", "JobMetadataOutTypedDict", - "JobType", "JobsAPIRoutesBatchCancelBatchJobRequest", "JobsAPIRoutesBatchCancelBatchJobRequestTypedDict", "JobsAPIRoutesBatchGetBatchJobRequest", @@ -1422,6 +1356,7 @@ "JobsAPIRoutesFineTuningGetFineTuningJobResponseTypedDict", "JobsAPIRoutesFineTuningGetFineTuningJobsRequest", "JobsAPIRoutesFineTuningGetFineTuningJobsRequestTypedDict", + "JobsAPIRoutesFineTuningGetFineTuningJobsStatus", "JobsAPIRoutesFineTuningStartFineTuningJobRequest", "JobsAPIRoutesFineTuningStartFineTuningJobRequestTypedDict", "JobsAPIRoutesFineTuningStartFineTuningJobResponse", @@ -1460,8 +1395,6 @@ "LibrariesDocumentsReprocessV1RequestTypedDict", "LibrariesDocumentsUpdateV1Request", "LibrariesDocumentsUpdateV1RequestTypedDict", - "LibrariesDocumentsUploadV1DocumentUpload", - "LibrariesDocumentsUploadV1DocumentUploadTypedDict", "LibrariesDocumentsUploadV1Request", "LibrariesDocumentsUploadV1RequestTypedDict", "LibrariesGetV1Request", @@ -1490,7 +1423,6 @@ "ListSharingOutTypedDict", "Loc", "LocTypedDict", - "Message", "MessageEntries", "MessageEntriesTypedDict", "MessageInputContentChunks", @@ -1498,6 +1430,7 @@ "MessageInputEntry", "MessageInputEntryContent", "MessageInputEntryContentTypedDict", + "MessageInputEntryObject", "MessageInputEntryRole", "MessageInputEntryType", "MessageInputEntryTypedDict", @@ -1514,11 +1447,7 @@ "MessageOutputEventContent", "MessageOutputEventContentTypedDict", "MessageOutputEventRole", - "MessageOutputEventType", "MessageOutputEventTypedDict", - "MessageTypedDict", - "Messages", - "MessagesTypedDict", "MetricOut", "MetricOutTypedDict", "MistralError", @@ -1527,18 +1456,19 @@ "ModelCapabilitiesTypedDict", "ModelConversation", "ModelConversationObject", - "ModelConversationTools", - "ModelConversationToolsTypedDict", + "ModelConversationTool", + "ModelConversationToolTypedDict", "ModelConversationTypedDict", "ModelList", + "ModelListData", + "ModelListDataTypedDict", "ModelListTypedDict", - "ModelType", "ModerationObject", "ModerationObjectTypedDict", "ModerationResponse", "ModerationResponseTypedDict", - "Name", - "NameTypedDict", + "MultiPartBodyParams", + "MultiPartBodyParamsTypedDict", "NoResponseError", "OCRImageObject", "OCRImageObjectTypedDict", @@ -1554,24 +1484,20 @@ "OCRTableObjectTypedDict", "OCRUsageInfo", "OCRUsageInfoTypedDict", - "Object", - "One", - "OneTypedDict", + "Output", "OutputContentChunks", "OutputContentChunksTypedDict", - "Outputs", - "OutputsTypedDict", + "OutputTypedDict", "PaginationInfo", "PaginationInfoTypedDict", "Prediction", "PredictionTypedDict", "ProcessingStatusOut", "ProcessingStatusOutTypedDict", - "QueryParamAgentVersion", - "QueryParamAgentVersionTypedDict", - "QueryParamStatus", "RealtimeTranscriptionError", "RealtimeTranscriptionErrorDetail", + "RealtimeTranscriptionErrorDetailMessage", + "RealtimeTranscriptionErrorDetailMessageTypedDict", "RealtimeTranscriptionErrorDetailTypedDict", "RealtimeTranscriptionErrorTypedDict", "RealtimeTranscriptionSession", @@ -1583,33 +1509,27 @@ "ReferenceChunk", "ReferenceChunkType", "ReferenceChunkTypedDict", - "Repositories", - "RepositoriesTypedDict", "RequestSource", - "Response1", - "Response1TypedDict", - "ResponseBody", - "ResponseBodyTypedDict", + "Response", "ResponseDoneEvent", - "ResponseDoneEventType", "ResponseDoneEventTypedDict", "ResponseErrorEvent", - "ResponseErrorEventType", "ResponseErrorEventTypedDict", "ResponseFormat", "ResponseFormatTypedDict", "ResponseFormats", + "ResponseRetrieveModelV1ModelsModelIDGet", + "ResponseRetrieveModelV1ModelsModelIDGetTypedDict", "ResponseStartedEvent", - "ResponseStartedEventType", "ResponseStartedEventTypedDict", + "ResponseTypedDict", + "ResponseV1ConversationsGet", + "ResponseV1ConversationsGetTypedDict", "ResponseValidationError", "RetrieveFileOut", "RetrieveFileOutTypedDict", "RetrieveModelV1ModelsModelIDGetRequest", "RetrieveModelV1ModelsModelIDGetRequestTypedDict", - "RetrieveModelV1ModelsModelIDGetResponseRetrieveModelV1ModelsModelIDGet", - "RetrieveModelV1ModelsModelIDGetResponseRetrieveModelV1ModelsModelIDGetTypedDict", - "Role", "SDKError", "SSETypes", "SampleType", @@ -1623,9 +1543,6 @@ "SharingOut", "SharingOutTypedDict", "Source", - "Status", - "Stop", - "StopTypedDict", "SystemMessage", "SystemMessageContent", "SystemMessageContentChunks", @@ -1651,21 +1568,20 @@ "ToolExecutionDeltaEvent", "ToolExecutionDeltaEventName", "ToolExecutionDeltaEventNameTypedDict", - "ToolExecutionDeltaEventType", "ToolExecutionDeltaEventTypedDict", "ToolExecutionDoneEvent", "ToolExecutionDoneEventName", "ToolExecutionDoneEventNameTypedDict", - "ToolExecutionDoneEventType", "ToolExecutionDoneEventTypedDict", "ToolExecutionEntry", + "ToolExecutionEntryName", + "ToolExecutionEntryNameTypedDict", "ToolExecutionEntryObject", "ToolExecutionEntryType", "ToolExecutionEntryTypedDict", "ToolExecutionStartedEvent", "ToolExecutionStartedEventName", "ToolExecutionStartedEventNameTypedDict", - "ToolExecutionStartedEventType", "ToolExecutionStartedEventTypedDict", "ToolFileChunk", "ToolFileChunkTool", @@ -1675,7 +1591,6 @@ "ToolMessage", "ToolMessageContent", "ToolMessageContentTypedDict", - "ToolMessageRole", "ToolMessageTypedDict", "ToolReferenceChunk", "ToolReferenceChunkTool", @@ -1684,16 +1599,14 @@ "ToolReferenceChunkTypedDict", "ToolTypedDict", "ToolTypes", - "Tools", - "ToolsTypedDict", "TrainingFile", "TrainingFileTypedDict", "TranscriptionResponse", "TranscriptionResponseTypedDict", "TranscriptionSegmentChunk", + "TranscriptionSegmentChunkType", "TranscriptionSegmentChunkTypedDict", "TranscriptionStreamDone", - "TranscriptionStreamDoneType", "TranscriptionStreamDoneTypedDict", "TranscriptionStreamEventTypes", "TranscriptionStreamEvents", @@ -1701,17 +1614,11 @@ "TranscriptionStreamEventsDataTypedDict", "TranscriptionStreamEventsTypedDict", "TranscriptionStreamLanguage", - "TranscriptionStreamLanguageType", "TranscriptionStreamLanguageTypedDict", "TranscriptionStreamSegmentDelta", - "TranscriptionStreamSegmentDeltaType", "TranscriptionStreamSegmentDeltaTypedDict", "TranscriptionStreamTextDelta", - "TranscriptionStreamTextDeltaType", "TranscriptionStreamTextDeltaTypedDict", - "Two", - "TwoTypedDict", - "Type", "UnarchiveFTModelOut", "UnarchiveFTModelOutObject", "UnarchiveFTModelOutTypedDict", @@ -1724,29 +1631,24 @@ "UserMessage", "UserMessageContent", "UserMessageContentTypedDict", - "UserMessageRole", "UserMessageTypedDict", "ValidationError", "ValidationErrorTypedDict", "WandbIntegration", "WandbIntegrationOut", - "WandbIntegrationOutType", "WandbIntegrationOutTypedDict", - "WandbIntegrationType", "WandbIntegrationTypedDict", "WebSearchPremiumTool", - "WebSearchPremiumToolType", "WebSearchPremiumToolTypedDict", "WebSearchTool", - "WebSearchToolType", "WebSearchToolTypedDict", ] _dynamic_imports: dict[str, str] = { "Agent": ".agent", "AgentObject": ".agent", - "AgentTools": ".agent", - "AgentToolsTypedDict": ".agent", + "AgentTool": ".agent", + "AgentToolTypedDict": ".agent", "AgentTypedDict": ".agent", "AgentAliasResponse": ".agentaliasresponse", "AgentAliasResponseTypedDict": ".agentaliasresponse", @@ -1756,18 +1658,16 @@ "AgentConversationObject": ".agentconversation", "AgentConversationTypedDict": ".agentconversation", "AgentCreationRequest": ".agentcreationrequest", - "AgentCreationRequestTools": ".agentcreationrequest", - "AgentCreationRequestToolsTypedDict": ".agentcreationrequest", + "AgentCreationRequestTool": ".agentcreationrequest", + "AgentCreationRequestToolTypedDict": ".agentcreationrequest", "AgentCreationRequestTypedDict": ".agentcreationrequest", "AgentHandoffDoneEvent": ".agenthandoffdoneevent", - "AgentHandoffDoneEventType": ".agenthandoffdoneevent", "AgentHandoffDoneEventTypedDict": ".agenthandoffdoneevent", "AgentHandoffEntry": ".agenthandoffentry", "AgentHandoffEntryObject": ".agenthandoffentry", "AgentHandoffEntryType": ".agenthandoffentry", "AgentHandoffEntryTypedDict": ".agenthandoffentry", "AgentHandoffStartedEvent": ".agenthandoffstartedevent", - "AgentHandoffStartedEventType": ".agenthandoffstartedevent", "AgentHandoffStartedEventTypedDict": ".agenthandoffstartedevent", "AgentsAPIV1AgentsCreateOrUpdateAliasRequest": ".agents_api_v1_agents_create_or_update_aliasop", "AgentsAPIV1AgentsCreateOrUpdateAliasRequestTypedDict": ".agents_api_v1_agents_create_or_update_aliasop", @@ -1775,10 +1675,10 @@ "AgentsAPIV1AgentsDeleteRequestTypedDict": ".agents_api_v1_agents_deleteop", "AgentsAPIV1AgentsGetVersionRequest": ".agents_api_v1_agents_get_versionop", "AgentsAPIV1AgentsGetVersionRequestTypedDict": ".agents_api_v1_agents_get_versionop", + "AgentsAPIV1AgentsGetAgentVersion": ".agents_api_v1_agents_getop", + "AgentsAPIV1AgentsGetAgentVersionTypedDict": ".agents_api_v1_agents_getop", "AgentsAPIV1AgentsGetRequest": ".agents_api_v1_agents_getop", "AgentsAPIV1AgentsGetRequestTypedDict": ".agents_api_v1_agents_getop", - "QueryParamAgentVersion": ".agents_api_v1_agents_getop", - "QueryParamAgentVersionTypedDict": ".agents_api_v1_agents_getop", "AgentsAPIV1AgentsListVersionAliasesRequest": ".agents_api_v1_agents_list_version_aliasesop", "AgentsAPIV1AgentsListVersionAliasesRequestTypedDict": ".agents_api_v1_agents_list_version_aliasesop", "AgentsAPIV1AgentsListVersionsRequest": ".agents_api_v1_agents_list_versionsop", @@ -1797,14 +1697,14 @@ "AgentsAPIV1ConversationsDeleteRequestTypedDict": ".agents_api_v1_conversations_deleteop", "AgentsAPIV1ConversationsGetRequest": ".agents_api_v1_conversations_getop", "AgentsAPIV1ConversationsGetRequestTypedDict": ".agents_api_v1_conversations_getop", - "AgentsAPIV1ConversationsGetResponseV1ConversationsGet": ".agents_api_v1_conversations_getop", - "AgentsAPIV1ConversationsGetResponseV1ConversationsGetTypedDict": ".agents_api_v1_conversations_getop", + "ResponseV1ConversationsGet": ".agents_api_v1_conversations_getop", + "ResponseV1ConversationsGetTypedDict": ".agents_api_v1_conversations_getop", "AgentsAPIV1ConversationsHistoryRequest": ".agents_api_v1_conversations_historyop", "AgentsAPIV1ConversationsHistoryRequestTypedDict": ".agents_api_v1_conversations_historyop", "AgentsAPIV1ConversationsListRequest": ".agents_api_v1_conversations_listop", "AgentsAPIV1ConversationsListRequestTypedDict": ".agents_api_v1_conversations_listop", - "ResponseBody": ".agents_api_v1_conversations_listop", - "ResponseBodyTypedDict": ".agents_api_v1_conversations_listop", + "AgentsAPIV1ConversationsListResponse": ".agents_api_v1_conversations_listop", + "AgentsAPIV1ConversationsListResponseTypedDict": ".agents_api_v1_conversations_listop", "AgentsAPIV1ConversationsMessagesRequest": ".agents_api_v1_conversations_messagesop", "AgentsAPIV1ConversationsMessagesRequestTypedDict": ".agents_api_v1_conversations_messagesop", "AgentsAPIV1ConversationsRestartStreamRequest": ".agents_api_v1_conversations_restart_streamop", @@ -1812,24 +1712,24 @@ "AgentsAPIV1ConversationsRestartRequest": ".agents_api_v1_conversations_restartop", "AgentsAPIV1ConversationsRestartRequestTypedDict": ".agents_api_v1_conversations_restartop", "AgentsCompletionRequest": ".agentscompletionrequest", - "AgentsCompletionRequestMessages": ".agentscompletionrequest", - "AgentsCompletionRequestMessagesTypedDict": ".agentscompletionrequest", + "AgentsCompletionRequestMessage": ".agentscompletionrequest", + "AgentsCompletionRequestMessageTypedDict": ".agentscompletionrequest", "AgentsCompletionRequestStop": ".agentscompletionrequest", "AgentsCompletionRequestStopTypedDict": ".agentscompletionrequest", "AgentsCompletionRequestToolChoice": ".agentscompletionrequest", "AgentsCompletionRequestToolChoiceTypedDict": ".agentscompletionrequest", "AgentsCompletionRequestTypedDict": ".agentscompletionrequest", "AgentsCompletionStreamRequest": ".agentscompletionstreamrequest", - "AgentsCompletionStreamRequestMessages": ".agentscompletionstreamrequest", - "AgentsCompletionStreamRequestMessagesTypedDict": ".agentscompletionstreamrequest", + "AgentsCompletionStreamRequestMessage": ".agentscompletionstreamrequest", + "AgentsCompletionStreamRequestMessageTypedDict": ".agentscompletionstreamrequest", "AgentsCompletionStreamRequestStop": ".agentscompletionstreamrequest", "AgentsCompletionStreamRequestStopTypedDict": ".agentscompletionstreamrequest", "AgentsCompletionStreamRequestToolChoice": ".agentscompletionstreamrequest", "AgentsCompletionStreamRequestToolChoiceTypedDict": ".agentscompletionstreamrequest", "AgentsCompletionStreamRequestTypedDict": ".agentscompletionstreamrequest", "AgentUpdateRequest": ".agentupdaterequest", - "AgentUpdateRequestTools": ".agentupdaterequest", - "AgentUpdateRequestToolsTypedDict": ".agentupdaterequest", + "AgentUpdateRequestTool": ".agentupdaterequest", + "AgentUpdateRequestToolTypedDict": ".agentupdaterequest", "AgentUpdateRequestTypedDict": ".agentupdaterequest", "APIEndpoint": ".apiendpoint", "ArchiveFTModelOut": ".archiveftmodelout", @@ -1841,7 +1741,6 @@ "AssistantMessageRole": ".assistantmessage", "AssistantMessageTypedDict": ".assistantmessage", "AudioChunk": ".audiochunk", - "AudioChunkType": ".audiochunk", "AudioChunkTypedDict": ".audiochunk", "AudioEncoding": ".audioencoding", "AudioFormat": ".audioformat", @@ -1851,7 +1750,6 @@ "AudioTranscriptionRequestStream": ".audiotranscriptionrequeststream", "AudioTranscriptionRequestStreamTypedDict": ".audiotranscriptionrequeststream", "BaseModelCard": ".basemodelcard", - "BaseModelCardType": ".basemodelcard", "BaseModelCardTypedDict": ".basemodelcard", "BatchError": ".batcherror", "BatchErrorTypedDict": ".batcherror", @@ -1870,34 +1768,34 @@ "ChatClassificationRequest": ".chatclassificationrequest", "ChatClassificationRequestTypedDict": ".chatclassificationrequest", "ChatCompletionChoice": ".chatcompletionchoice", + "ChatCompletionChoiceFinishReason": ".chatcompletionchoice", "ChatCompletionChoiceTypedDict": ".chatcompletionchoice", - "FinishReason": ".chatcompletionchoice", "ChatCompletionRequest": ".chatcompletionrequest", + "ChatCompletionRequestMessage": ".chatcompletionrequest", + "ChatCompletionRequestMessageTypedDict": ".chatcompletionrequest", + "ChatCompletionRequestStop": ".chatcompletionrequest", + "ChatCompletionRequestStopTypedDict": ".chatcompletionrequest", "ChatCompletionRequestToolChoice": ".chatcompletionrequest", "ChatCompletionRequestToolChoiceTypedDict": ".chatcompletionrequest", "ChatCompletionRequestTypedDict": ".chatcompletionrequest", - "Messages": ".chatcompletionrequest", - "MessagesTypedDict": ".chatcompletionrequest", - "Stop": ".chatcompletionrequest", - "StopTypedDict": ".chatcompletionrequest", "ChatCompletionResponse": ".chatcompletionresponse", "ChatCompletionResponseTypedDict": ".chatcompletionresponse", "ChatCompletionStreamRequest": ".chatcompletionstreamrequest", - "ChatCompletionStreamRequestMessages": ".chatcompletionstreamrequest", - "ChatCompletionStreamRequestMessagesTypedDict": ".chatcompletionstreamrequest", + "ChatCompletionStreamRequestMessage": ".chatcompletionstreamrequest", + "ChatCompletionStreamRequestMessageTypedDict": ".chatcompletionstreamrequest", "ChatCompletionStreamRequestStop": ".chatcompletionstreamrequest", "ChatCompletionStreamRequestStopTypedDict": ".chatcompletionstreamrequest", "ChatCompletionStreamRequestToolChoice": ".chatcompletionstreamrequest", "ChatCompletionStreamRequestToolChoiceTypedDict": ".chatcompletionstreamrequest", "ChatCompletionStreamRequestTypedDict": ".chatcompletionstreamrequest", "ChatModerationRequest": ".chatmoderationrequest", - "ChatModerationRequestInputs": ".chatmoderationrequest", - "ChatModerationRequestInputsTypedDict": ".chatmoderationrequest", + "ChatModerationRequestInputs1": ".chatmoderationrequest", + "ChatModerationRequestInputs1TypedDict": ".chatmoderationrequest", + "ChatModerationRequestInputs2": ".chatmoderationrequest", + "ChatModerationRequestInputs2TypedDict": ".chatmoderationrequest", + "ChatModerationRequestInputs3": ".chatmoderationrequest", + "ChatModerationRequestInputs3TypedDict": ".chatmoderationrequest", "ChatModerationRequestTypedDict": ".chatmoderationrequest", - "One": ".chatmoderationrequest", - "OneTypedDict": ".chatmoderationrequest", - "Two": ".chatmoderationrequest", - "TwoTypedDict": ".chatmoderationrequest", "CheckpointOut": ".checkpointout", "CheckpointOutTypedDict": ".checkpointout", "ClassificationRequest": ".classificationrequest", @@ -1909,20 +1807,17 @@ "ClassificationTargetResult": ".classificationtargetresult", "ClassificationTargetResultTypedDict": ".classificationtargetresult", "ClassifierDetailedJobOut": ".classifierdetailedjobout", - "ClassifierDetailedJobOutIntegrations": ".classifierdetailedjobout", - "ClassifierDetailedJobOutIntegrationsTypedDict": ".classifierdetailedjobout", - "ClassifierDetailedJobOutJobType": ".classifierdetailedjobout", + "ClassifierDetailedJobOutIntegration": ".classifierdetailedjobout", + "ClassifierDetailedJobOutIntegrationTypedDict": ".classifierdetailedjobout", "ClassifierDetailedJobOutObject": ".classifierdetailedjobout", "ClassifierDetailedJobOutStatus": ".classifierdetailedjobout", "ClassifierDetailedJobOutTypedDict": ".classifierdetailedjobout", "ClassifierFTModelOut": ".classifierftmodelout", - "ClassifierFTModelOutModelType": ".classifierftmodelout", "ClassifierFTModelOutObject": ".classifierftmodelout", "ClassifierFTModelOutTypedDict": ".classifierftmodelout", "ClassifierJobOut": ".classifierjobout", - "ClassifierJobOutIntegrations": ".classifierjobout", - "ClassifierJobOutIntegrationsTypedDict": ".classifierjobout", - "ClassifierJobOutJobType": ".classifierjobout", + "ClassifierJobOutIntegration": ".classifierjobout", + "ClassifierJobOutIntegrationTypedDict": ".classifierjobout", "ClassifierJobOutObject": ".classifierjobout", "ClassifierJobOutStatus": ".classifierjobout", "ClassifierJobOutTypedDict": ".classifierjobout", @@ -1935,7 +1830,6 @@ "ClassifierTrainingParametersIn": ".classifiertrainingparametersin", "ClassifierTrainingParametersInTypedDict": ".classifiertrainingparametersin", "CodeInterpreterTool": ".codeinterpretertool", - "CodeInterpreterToolType": ".codeinterpretertool", "CodeInterpreterToolTypedDict": ".codeinterpretertool", "CompletionArgs": ".completionargs", "CompletionArgsTypedDict": ".completionargs", @@ -1944,12 +1838,11 @@ "CompletionChunk": ".completionchunk", "CompletionChunkTypedDict": ".completionchunk", "CompletionDetailedJobOut": ".completiondetailedjobout", - "CompletionDetailedJobOutIntegrations": ".completiondetailedjobout", - "CompletionDetailedJobOutIntegrationsTypedDict": ".completiondetailedjobout", - "CompletionDetailedJobOutJobType": ".completiondetailedjobout", + "CompletionDetailedJobOutIntegration": ".completiondetailedjobout", + "CompletionDetailedJobOutIntegrationTypedDict": ".completiondetailedjobout", "CompletionDetailedJobOutObject": ".completiondetailedjobout", - "CompletionDetailedJobOutRepositories": ".completiondetailedjobout", - "CompletionDetailedJobOutRepositoriesTypedDict": ".completiondetailedjobout", + "CompletionDetailedJobOutRepository": ".completiondetailedjobout", + "CompletionDetailedJobOutRepositoryTypedDict": ".completiondetailedjobout", "CompletionDetailedJobOutStatus": ".completiondetailedjobout", "CompletionDetailedJobOutTypedDict": ".completiondetailedjobout", "CompletionEvent": ".completionevent", @@ -1957,16 +1850,14 @@ "CompletionFTModelOut": ".completionftmodelout", "CompletionFTModelOutObject": ".completionftmodelout", "CompletionFTModelOutTypedDict": ".completionftmodelout", - "ModelType": ".completionftmodelout", "CompletionJobOut": ".completionjobout", + "CompletionJobOutIntegration": ".completionjobout", + "CompletionJobOutIntegrationTypedDict": ".completionjobout", "CompletionJobOutObject": ".completionjobout", + "CompletionJobOutRepository": ".completionjobout", + "CompletionJobOutRepositoryTypedDict": ".completionjobout", + "CompletionJobOutStatus": ".completionjobout", "CompletionJobOutTypedDict": ".completionjobout", - "Integrations": ".completionjobout", - "IntegrationsTypedDict": ".completionjobout", - "JobType": ".completionjobout", - "Repositories": ".completionjobout", - "RepositoriesTypedDict": ".completionjobout", - "Status": ".completionjobout", "CompletionResponseStreamChoice": ".completionresponsestreamchoice", "CompletionResponseStreamChoiceFinishReason": ".completionresponsestreamchoice", "CompletionResponseStreamChoiceTypedDict": ".completionresponsestreamchoice", @@ -1989,25 +1880,25 @@ "ConversationHistory": ".conversationhistory", "ConversationHistoryObject": ".conversationhistory", "ConversationHistoryTypedDict": ".conversationhistory", - "Entries": ".conversationhistory", - "EntriesTypedDict": ".conversationhistory", + "Entry": ".conversationhistory", + "EntryTypedDict": ".conversationhistory", "ConversationInputs": ".conversationinputs", "ConversationInputsTypedDict": ".conversationinputs", "ConversationMessages": ".conversationmessages", "ConversationMessagesObject": ".conversationmessages", "ConversationMessagesTypedDict": ".conversationmessages", - "AgentVersion": ".conversationrequest", - "AgentVersionTypedDict": ".conversationrequest", "ConversationRequest": ".conversationrequest", + "ConversationRequestAgentVersion": ".conversationrequest", + "ConversationRequestAgentVersionTypedDict": ".conversationrequest", + "ConversationRequestHandoffExecution": ".conversationrequest", + "ConversationRequestTool": ".conversationrequest", + "ConversationRequestToolTypedDict": ".conversationrequest", "ConversationRequestTypedDict": ".conversationrequest", - "HandoffExecution": ".conversationrequest", - "Tools": ".conversationrequest", - "ToolsTypedDict": ".conversationrequest", "ConversationResponse": ".conversationresponse", "ConversationResponseObject": ".conversationresponse", "ConversationResponseTypedDict": ".conversationresponse", - "Outputs": ".conversationresponse", - "OutputsTypedDict": ".conversationresponse", + "Output": ".conversationresponse", + "OutputTypedDict": ".conversationresponse", "ConversationRestartRequest": ".conversationrestartrequest", "ConversationRestartRequestAgentVersion": ".conversationrestartrequest", "ConversationRestartRequestAgentVersionTypedDict": ".conversationrestartrequest", @@ -2022,8 +1913,8 @@ "ConversationStreamRequestAgentVersion": ".conversationstreamrequest", "ConversationStreamRequestAgentVersionTypedDict": ".conversationstreamrequest", "ConversationStreamRequestHandoffExecution": ".conversationstreamrequest", - "ConversationStreamRequestTools": ".conversationstreamrequest", - "ConversationStreamRequestToolsTypedDict": ".conversationstreamrequest", + "ConversationStreamRequestTool": ".conversationstreamrequest", + "ConversationStreamRequestToolTypedDict": ".conversationstreamrequest", "ConversationStreamRequestTypedDict": ".conversationstreamrequest", "ConversationUsageInfo": ".conversationusageinfo", "ConversationUsageInfoTypedDict": ".conversationusageinfo", @@ -2033,12 +1924,11 @@ "DeleteFileOutTypedDict": ".deletefileout", "DeleteModelOut": ".deletemodelout", "DeleteModelOutTypedDict": ".deletemodelout", - "Content": ".deltamessage", - "ContentTypedDict": ".deltamessage", "DeltaMessage": ".deltamessage", + "DeltaMessageContent": ".deltamessage", + "DeltaMessageContentTypedDict": ".deltamessage", "DeltaMessageTypedDict": ".deltamessage", "DocumentLibraryTool": ".documentlibrarytool", - "DocumentLibraryToolType": ".documentlibrarytool", "DocumentLibraryToolTypedDict": ".documentlibrarytool", "DocumentOut": ".documentout", "DocumentOutTypedDict": ".documentout", @@ -2079,8 +1969,8 @@ "FilesAPIRoutesListFilesRequestTypedDict": ".files_api_routes_list_filesop", "FilesAPIRoutesRetrieveFileRequest": ".files_api_routes_retrieve_fileop", "FilesAPIRoutesRetrieveFileRequestTypedDict": ".files_api_routes_retrieve_fileop", - "FilesAPIRoutesUploadFileMultiPartBodyParams": ".files_api_routes_upload_fileop", - "FilesAPIRoutesUploadFileMultiPartBodyParamsTypedDict": ".files_api_routes_upload_fileop", + "MultiPartBodyParams": ".files_api_routes_upload_fileop", + "MultiPartBodyParamsTypedDict": ".files_api_routes_upload_fileop", "FileSchema": ".fileschema", "FileSchemaTypedDict": ".fileschema", "FileSignedURL": ".filesignedurl", @@ -2100,7 +1990,6 @@ "FTModelCapabilitiesOut": ".ftmodelcapabilitiesout", "FTModelCapabilitiesOutTypedDict": ".ftmodelcapabilitiesout", "FTModelCard": ".ftmodelcard", - "FTModelCardType": ".ftmodelcard", "FTModelCardTypedDict": ".ftmodelcard", "Function": ".function", "FunctionTypedDict": ".function", @@ -2115,7 +2004,6 @@ "FunctionCallEntryArguments": ".functioncallentryarguments", "FunctionCallEntryArgumentsTypedDict": ".functioncallentryarguments", "FunctionCallEvent": ".functioncallevent", - "FunctionCallEventType": ".functioncallevent", "FunctionCallEventTypedDict": ".functioncallevent", "FunctionName": ".functionname", "FunctionNameTypedDict": ".functionname", @@ -2124,45 +2012,41 @@ "FunctionResultEntryType": ".functionresultentry", "FunctionResultEntryTypedDict": ".functionresultentry", "FunctionTool": ".functiontool", - "FunctionToolType": ".functiontool", "FunctionToolTypedDict": ".functiontool", "GithubRepositoryIn": ".githubrepositoryin", - "GithubRepositoryInType": ".githubrepositoryin", "GithubRepositoryInTypedDict": ".githubrepositoryin", "GithubRepositoryOut": ".githubrepositoryout", - "GithubRepositoryOutType": ".githubrepositoryout", "GithubRepositoryOutTypedDict": ".githubrepositoryout", "HTTPValidationError": ".httpvalidationerror", "HTTPValidationErrorData": ".httpvalidationerror", "ImageGenerationTool": ".imagegenerationtool", - "ImageGenerationToolType": ".imagegenerationtool", "ImageGenerationToolTypedDict": ".imagegenerationtool", "ImageURL": ".imageurl", "ImageURLTypedDict": ".imageurl", "ImageURLChunk": ".imageurlchunk", - "ImageURLChunkImageURL": ".imageurlchunk", - "ImageURLChunkImageURLTypedDict": ".imageurlchunk", "ImageURLChunkType": ".imageurlchunk", "ImageURLChunkTypedDict": ".imageurlchunk", + "ImageURLUnion": ".imageurlchunk", + "ImageURLUnionTypedDict": ".imageurlchunk", "InputEntries": ".inputentries", "InputEntriesTypedDict": ".inputentries", "Inputs": ".inputs", + "InputsMessage": ".inputs", + "InputsMessageTypedDict": ".inputs", "InputsTypedDict": ".inputs", "InstructRequestInputs": ".inputs", - "InstructRequestInputsMessages": ".inputs", - "InstructRequestInputsMessagesTypedDict": ".inputs", "InstructRequestInputsTypedDict": ".inputs", "InstructRequest": ".instructrequest", - "InstructRequestMessages": ".instructrequest", - "InstructRequestMessagesTypedDict": ".instructrequest", + "InstructRequestMessage": ".instructrequest", + "InstructRequestMessageTypedDict": ".instructrequest", "InstructRequestTypedDict": ".instructrequest", "Hyperparameters": ".jobin", "HyperparametersTypedDict": ".jobin", "JobIn": ".jobin", - "JobInIntegrations": ".jobin", - "JobInIntegrationsTypedDict": ".jobin", - "JobInRepositories": ".jobin", - "JobInRepositoriesTypedDict": ".jobin", + "JobInIntegration": ".jobin", + "JobInIntegrationTypedDict": ".jobin", + "JobInRepository": ".jobin", + "JobInRepositoryTypedDict": ".jobin", "JobInTypedDict": ".jobin", "JobMetadataOut": ".jobmetadataout", "JobMetadataOutTypedDict": ".jobmetadataout", @@ -2180,15 +2064,15 @@ "JobsAPIRoutesFineTuningCancelFineTuningJobResponseTypedDict": ".jobs_api_routes_fine_tuning_cancel_fine_tuning_jobop", "JobsAPIRoutesFineTuningCreateFineTuningJobResponse": ".jobs_api_routes_fine_tuning_create_fine_tuning_jobop", "JobsAPIRoutesFineTuningCreateFineTuningJobResponseTypedDict": ".jobs_api_routes_fine_tuning_create_fine_tuning_jobop", - "Response1": ".jobs_api_routes_fine_tuning_create_fine_tuning_jobop", - "Response1TypedDict": ".jobs_api_routes_fine_tuning_create_fine_tuning_jobop", + "Response": ".jobs_api_routes_fine_tuning_create_fine_tuning_jobop", + "ResponseTypedDict": ".jobs_api_routes_fine_tuning_create_fine_tuning_jobop", "JobsAPIRoutesFineTuningGetFineTuningJobRequest": ".jobs_api_routes_fine_tuning_get_fine_tuning_jobop", "JobsAPIRoutesFineTuningGetFineTuningJobRequestTypedDict": ".jobs_api_routes_fine_tuning_get_fine_tuning_jobop", "JobsAPIRoutesFineTuningGetFineTuningJobResponse": ".jobs_api_routes_fine_tuning_get_fine_tuning_jobop", "JobsAPIRoutesFineTuningGetFineTuningJobResponseTypedDict": ".jobs_api_routes_fine_tuning_get_fine_tuning_jobop", "JobsAPIRoutesFineTuningGetFineTuningJobsRequest": ".jobs_api_routes_fine_tuning_get_fine_tuning_jobsop", "JobsAPIRoutesFineTuningGetFineTuningJobsRequestTypedDict": ".jobs_api_routes_fine_tuning_get_fine_tuning_jobsop", - "QueryParamStatus": ".jobs_api_routes_fine_tuning_get_fine_tuning_jobsop", + "JobsAPIRoutesFineTuningGetFineTuningJobsStatus": ".jobs_api_routes_fine_tuning_get_fine_tuning_jobsop", "JobsAPIRoutesFineTuningStartFineTuningJobRequest": ".jobs_api_routes_fine_tuning_start_fine_tuning_jobop", "JobsAPIRoutesFineTuningStartFineTuningJobRequestTypedDict": ".jobs_api_routes_fine_tuning_start_fine_tuning_jobop", "JobsAPIRoutesFineTuningStartFineTuningJobResponse": ".jobs_api_routes_fine_tuning_start_fine_tuning_jobop", @@ -2229,8 +2113,8 @@ "LibrariesDocumentsReprocessV1RequestTypedDict": ".libraries_documents_reprocess_v1op", "LibrariesDocumentsUpdateV1Request": ".libraries_documents_update_v1op", "LibrariesDocumentsUpdateV1RequestTypedDict": ".libraries_documents_update_v1op", - "LibrariesDocumentsUploadV1DocumentUpload": ".libraries_documents_upload_v1op", - "LibrariesDocumentsUploadV1DocumentUploadTypedDict": ".libraries_documents_upload_v1op", + "DocumentUpload": ".libraries_documents_upload_v1op", + "DocumentUploadTypedDict": ".libraries_documents_upload_v1op", "LibrariesDocumentsUploadV1Request": ".libraries_documents_upload_v1op", "LibrariesDocumentsUploadV1RequestTypedDict": ".libraries_documents_upload_v1op", "LibrariesGetV1Request": ".libraries_get_v1op", @@ -2264,10 +2148,10 @@ "MessageInputEntry": ".messageinputentry", "MessageInputEntryContent": ".messageinputentry", "MessageInputEntryContentTypedDict": ".messageinputentry", + "MessageInputEntryObject": ".messageinputentry", "MessageInputEntryRole": ".messageinputentry", "MessageInputEntryType": ".messageinputentry", "MessageInputEntryTypedDict": ".messageinputentry", - "Object": ".messageinputentry", "MessageOutputContentChunks": ".messageoutputcontentchunks", "MessageOutputContentChunksTypedDict": ".messageoutputcontentchunks", "MessageOutputEntry": ".messageoutputentry", @@ -2281,7 +2165,6 @@ "MessageOutputEventContent": ".messageoutputevent", "MessageOutputEventContentTypedDict": ".messageoutputevent", "MessageOutputEventRole": ".messageoutputevent", - "MessageOutputEventType": ".messageoutputevent", "MessageOutputEventTypedDict": ".messageoutputevent", "MetricOut": ".metricout", "MetricOutTypedDict": ".metricout", @@ -2290,12 +2173,12 @@ "ModelCapabilitiesTypedDict": ".modelcapabilities", "ModelConversation": ".modelconversation", "ModelConversationObject": ".modelconversation", - "ModelConversationTools": ".modelconversation", - "ModelConversationToolsTypedDict": ".modelconversation", + "ModelConversationTool": ".modelconversation", + "ModelConversationToolTypedDict": ".modelconversation", "ModelConversationTypedDict": ".modelconversation", - "Data": ".modellist", - "DataTypedDict": ".modellist", "ModelList": ".modellist", + "ModelListData": ".modellist", + "ModelListDataTypedDict": ".modellist", "ModelListTypedDict": ".modellist", "ModerationObject": ".moderationobject", "ModerationObjectTypedDict": ".moderationobject", @@ -2330,9 +2213,9 @@ "ProcessingStatusOutTypedDict": ".processingstatusout", "RealtimeTranscriptionError": ".realtimetranscriptionerror", "RealtimeTranscriptionErrorTypedDict": ".realtimetranscriptionerror", - "Message": ".realtimetranscriptionerrordetail", - "MessageTypedDict": ".realtimetranscriptionerrordetail", "RealtimeTranscriptionErrorDetail": ".realtimetranscriptionerrordetail", + "RealtimeTranscriptionErrorDetailMessage": ".realtimetranscriptionerrordetail", + "RealtimeTranscriptionErrorDetailMessageTypedDict": ".realtimetranscriptionerrordetail", "RealtimeTranscriptionErrorDetailTypedDict": ".realtimetranscriptionerrordetail", "RealtimeTranscriptionSession": ".realtimetranscriptionsession", "RealtimeTranscriptionSessionTypedDict": ".realtimetranscriptionsession", @@ -2345,22 +2228,19 @@ "ReferenceChunkTypedDict": ".referencechunk", "RequestSource": ".requestsource", "ResponseDoneEvent": ".responsedoneevent", - "ResponseDoneEventType": ".responsedoneevent", "ResponseDoneEventTypedDict": ".responsedoneevent", "ResponseErrorEvent": ".responseerrorevent", - "ResponseErrorEventType": ".responseerrorevent", "ResponseErrorEventTypedDict": ".responseerrorevent", "ResponseFormat": ".responseformat", "ResponseFormatTypedDict": ".responseformat", "ResponseFormats": ".responseformats", "ResponseStartedEvent": ".responsestartedevent", - "ResponseStartedEventType": ".responsestartedevent", "ResponseStartedEventTypedDict": ".responsestartedevent", "ResponseValidationError": ".responsevalidationerror", + "ResponseRetrieveModelV1ModelsModelIDGet": ".retrieve_model_v1_models_model_id_getop", + "ResponseRetrieveModelV1ModelsModelIDGetTypedDict": ".retrieve_model_v1_models_model_id_getop", "RetrieveModelV1ModelsModelIDGetRequest": ".retrieve_model_v1_models_model_id_getop", "RetrieveModelV1ModelsModelIDGetRequestTypedDict": ".retrieve_model_v1_models_model_id_getop", - "RetrieveModelV1ModelsModelIDGetResponseRetrieveModelV1ModelsModelIDGet": ".retrieve_model_v1_models_model_id_getop", - "RetrieveModelV1ModelsModelIDGetResponseRetrieveModelV1ModelsModelIDGetTypedDict": ".retrieve_model_v1_models_model_id_getop", "RetrieveFileOut": ".retrievefileout", "RetrieveFileOutTypedDict": ".retrievefileout", "SampleType": ".sampletype", @@ -2376,7 +2256,6 @@ "SharingOutTypedDict": ".sharingout", "Source": ".source", "SSETypes": ".ssetypes", - "Role": ".systemmessage", "SystemMessage": ".systemmessage", "SystemMessageContent": ".systemmessage", "SystemMessageContentTypedDict": ".systemmessage", @@ -2402,23 +2281,20 @@ "ToolExecutionDeltaEvent": ".toolexecutiondeltaevent", "ToolExecutionDeltaEventName": ".toolexecutiondeltaevent", "ToolExecutionDeltaEventNameTypedDict": ".toolexecutiondeltaevent", - "ToolExecutionDeltaEventType": ".toolexecutiondeltaevent", "ToolExecutionDeltaEventTypedDict": ".toolexecutiondeltaevent", "ToolExecutionDoneEvent": ".toolexecutiondoneevent", "ToolExecutionDoneEventName": ".toolexecutiondoneevent", "ToolExecutionDoneEventNameTypedDict": ".toolexecutiondoneevent", - "ToolExecutionDoneEventType": ".toolexecutiondoneevent", "ToolExecutionDoneEventTypedDict": ".toolexecutiondoneevent", - "Name": ".toolexecutionentry", - "NameTypedDict": ".toolexecutionentry", "ToolExecutionEntry": ".toolexecutionentry", + "ToolExecutionEntryName": ".toolexecutionentry", + "ToolExecutionEntryNameTypedDict": ".toolexecutionentry", "ToolExecutionEntryObject": ".toolexecutionentry", "ToolExecutionEntryType": ".toolexecutionentry", "ToolExecutionEntryTypedDict": ".toolexecutionentry", "ToolExecutionStartedEvent": ".toolexecutionstartedevent", "ToolExecutionStartedEventName": ".toolexecutionstartedevent", "ToolExecutionStartedEventNameTypedDict": ".toolexecutionstartedevent", - "ToolExecutionStartedEventType": ".toolexecutionstartedevent", "ToolExecutionStartedEventTypedDict": ".toolexecutionstartedevent", "ToolFileChunk": ".toolfilechunk", "ToolFileChunkTool": ".toolfilechunk", @@ -2428,7 +2304,6 @@ "ToolMessage": ".toolmessage", "ToolMessageContent": ".toolmessage", "ToolMessageContentTypedDict": ".toolmessage", - "ToolMessageRole": ".toolmessage", "ToolMessageTypedDict": ".toolmessage", "ToolReferenceChunk": ".toolreferencechunk", "ToolReferenceChunkTool": ".toolreferencechunk", @@ -2441,10 +2316,9 @@ "TranscriptionResponse": ".transcriptionresponse", "TranscriptionResponseTypedDict": ".transcriptionresponse", "TranscriptionSegmentChunk": ".transcriptionsegmentchunk", + "TranscriptionSegmentChunkType": ".transcriptionsegmentchunk", "TranscriptionSegmentChunkTypedDict": ".transcriptionsegmentchunk", - "Type": ".transcriptionsegmentchunk", "TranscriptionStreamDone": ".transcriptionstreamdone", - "TranscriptionStreamDoneType": ".transcriptionstreamdone", "TranscriptionStreamDoneTypedDict": ".transcriptionstreamdone", "TranscriptionStreamEvents": ".transcriptionstreamevents", "TranscriptionStreamEventsData": ".transcriptionstreamevents", @@ -2452,13 +2326,10 @@ "TranscriptionStreamEventsTypedDict": ".transcriptionstreamevents", "TranscriptionStreamEventTypes": ".transcriptionstreameventtypes", "TranscriptionStreamLanguage": ".transcriptionstreamlanguage", - "TranscriptionStreamLanguageType": ".transcriptionstreamlanguage", "TranscriptionStreamLanguageTypedDict": ".transcriptionstreamlanguage", "TranscriptionStreamSegmentDelta": ".transcriptionstreamsegmentdelta", - "TranscriptionStreamSegmentDeltaType": ".transcriptionstreamsegmentdelta", "TranscriptionStreamSegmentDeltaTypedDict": ".transcriptionstreamsegmentdelta", "TranscriptionStreamTextDelta": ".transcriptionstreamtextdelta", - "TranscriptionStreamTextDeltaType": ".transcriptionstreamtextdelta", "TranscriptionStreamTextDeltaTypedDict": ".transcriptionstreamtextdelta", "UnarchiveFTModelOut": ".unarchiveftmodelout", "UnarchiveFTModelOutObject": ".unarchiveftmodelout", @@ -2472,23 +2343,18 @@ "UserMessage": ".usermessage", "UserMessageContent": ".usermessage", "UserMessageContentTypedDict": ".usermessage", - "UserMessageRole": ".usermessage", "UserMessageTypedDict": ".usermessage", "Loc": ".validationerror", "LocTypedDict": ".validationerror", "ValidationError": ".validationerror", "ValidationErrorTypedDict": ".validationerror", "WandbIntegration": ".wandbintegration", - "WandbIntegrationType": ".wandbintegration", "WandbIntegrationTypedDict": ".wandbintegration", "WandbIntegrationOut": ".wandbintegrationout", - "WandbIntegrationOutType": ".wandbintegrationout", "WandbIntegrationOutTypedDict": ".wandbintegrationout", "WebSearchPremiumTool": ".websearchpremiumtool", - "WebSearchPremiumToolType": ".websearchpremiumtool", "WebSearchPremiumToolTypedDict": ".websearchpremiumtool", "WebSearchTool": ".websearchtool", - "WebSearchToolType": ".websearchtool", "WebSearchToolTypedDict": ".websearchtool", } diff --git a/src/mistralai/client/models/agent.py b/src/mistralai/client/models/agent.py index 3bedb3a3..b2fe3939 100644 --- a/src/mistralai/client/models/agent.py +++ b/src/mistralai/client/models/agent.py @@ -16,14 +16,13 @@ UNSET, UNSET_SENTINEL, ) -from mistralai.client.utils import get_discriminator -from pydantic import Discriminator, Tag, model_serializer +from pydantic import Field, model_serializer from typing import Any, Dict, List, Literal, Optional, Union from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict -AgentToolsTypedDict = TypeAliasType( - "AgentToolsTypedDict", +AgentToolTypedDict = TypeAliasType( + "AgentToolTypedDict", Union[ WebSearchToolTypedDict, WebSearchPremiumToolTypedDict, @@ -35,16 +34,16 @@ ) -AgentTools = Annotated[ +AgentTool = Annotated[ Union[ - Annotated[CodeInterpreterTool, Tag("code_interpreter")], - Annotated[DocumentLibraryTool, Tag("document_library")], - Annotated[FunctionTool, Tag("function")], - Annotated[ImageGenerationTool, Tag("image_generation")], - Annotated[WebSearchTool, Tag("web_search")], - Annotated[WebSearchPremiumTool, Tag("web_search_premium")], + CodeInterpreterTool, + DocumentLibraryTool, + FunctionTool, + ImageGenerationTool, + WebSearchTool, + WebSearchPremiumTool, ], - Discriminator(lambda m: get_discriminator(m, "type", "type")), + Field(discriminator="TYPE"), ] @@ -63,7 +62,7 @@ class AgentTypedDict(TypedDict): source: str instructions: NotRequired[Nullable[str]] r"""Instruction prompt the model will follow during the conversation.""" - tools: NotRequired[List[AgentToolsTypedDict]] + tools: NotRequired[List[AgentToolTypedDict]] r"""List of tools which are available to the model during the conversation.""" completion_args: NotRequired[CompletionArgsTypedDict] r"""White-listed arguments from the completion API""" @@ -95,7 +94,7 @@ class Agent(BaseModel): instructions: OptionalNullable[str] = UNSET r"""Instruction prompt the model will follow during the conversation.""" - tools: Optional[List[AgentTools]] = None + tools: Optional[List[AgentTool]] = None r"""List of tools which are available to the model during the conversation.""" completion_args: Optional[CompletionArgs] = None diff --git a/src/mistralai/client/models/agentcreationrequest.py b/src/mistralai/client/models/agentcreationrequest.py index 61a5aff5..561bef64 100644 --- a/src/mistralai/client/models/agentcreationrequest.py +++ b/src/mistralai/client/models/agentcreationrequest.py @@ -15,14 +15,13 @@ UNSET, UNSET_SENTINEL, ) -from mistralai.client.utils import get_discriminator -from pydantic import Discriminator, Tag, model_serializer +from pydantic import Field, model_serializer from typing import Any, Dict, List, Optional, Union from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict -AgentCreationRequestToolsTypedDict = TypeAliasType( - "AgentCreationRequestToolsTypedDict", +AgentCreationRequestToolTypedDict = TypeAliasType( + "AgentCreationRequestToolTypedDict", Union[ WebSearchToolTypedDict, WebSearchPremiumToolTypedDict, @@ -34,16 +33,16 @@ ) -AgentCreationRequestTools = Annotated[ +AgentCreationRequestTool = Annotated[ Union[ - Annotated[CodeInterpreterTool, Tag("code_interpreter")], - Annotated[DocumentLibraryTool, Tag("document_library")], - Annotated[FunctionTool, Tag("function")], - Annotated[ImageGenerationTool, Tag("image_generation")], - Annotated[WebSearchTool, Tag("web_search")], - Annotated[WebSearchPremiumTool, Tag("web_search_premium")], + CodeInterpreterTool, + DocumentLibraryTool, + FunctionTool, + ImageGenerationTool, + WebSearchTool, + WebSearchPremiumTool, ], - Discriminator(lambda m: get_discriminator(m, "type", "type")), + Field(discriminator="TYPE"), ] @@ -52,7 +51,7 @@ class AgentCreationRequestTypedDict(TypedDict): name: str instructions: NotRequired[Nullable[str]] r"""Instruction prompt the model will follow during the conversation.""" - tools: NotRequired[List[AgentCreationRequestToolsTypedDict]] + tools: NotRequired[List[AgentCreationRequestToolTypedDict]] r"""List of tools which are available to the model during the conversation.""" completion_args: NotRequired[CompletionArgsTypedDict] r"""White-listed arguments from the completion API""" @@ -69,7 +68,7 @@ class AgentCreationRequest(BaseModel): instructions: OptionalNullable[str] = UNSET r"""Instruction prompt the model will follow during the conversation.""" - tools: Optional[List[AgentCreationRequestTools]] = None + tools: Optional[List[AgentCreationRequestTool]] = None r"""List of tools which are available to the model during the conversation.""" completion_args: Optional[CompletionArgs] = None diff --git a/src/mistralai/client/models/agenthandoffdoneevent.py b/src/mistralai/client/models/agenthandoffdoneevent.py index c826aa5e..40bb446b 100644 --- a/src/mistralai/client/models/agenthandoffdoneevent.py +++ b/src/mistralai/client/models/agenthandoffdoneevent.py @@ -3,18 +3,18 @@ from __future__ import annotations from datetime import datetime from mistralai.client.types import BaseModel +from mistralai.client.utils import validate_const +import pydantic +from pydantic.functional_validators import AfterValidator from typing import Literal, Optional -from typing_extensions import NotRequired, TypedDict - - -AgentHandoffDoneEventType = Literal["agent.handoff.done",] +from typing_extensions import Annotated, NotRequired, TypedDict class AgentHandoffDoneEventTypedDict(TypedDict): id: str next_agent_id: str next_agent_name: str - type: NotRequired[AgentHandoffDoneEventType] + type: Literal["agent.handoff.done"] created_at: NotRequired[datetime] output_index: NotRequired[int] @@ -26,7 +26,13 @@ class AgentHandoffDoneEvent(BaseModel): next_agent_name: str - type: Optional[AgentHandoffDoneEventType] = "agent.handoff.done" + TYPE: Annotated[ + Annotated[ + Literal["agent.handoff.done"], + AfterValidator(validate_const("agent.handoff.done")), + ], + pydantic.Field(alias="type"), + ] = "agent.handoff.done" created_at: Optional[datetime] = None diff --git a/src/mistralai/client/models/agenthandoffstartedevent.py b/src/mistralai/client/models/agenthandoffstartedevent.py index 4b8ff1e5..93f56db0 100644 --- a/src/mistralai/client/models/agenthandoffstartedevent.py +++ b/src/mistralai/client/models/agenthandoffstartedevent.py @@ -3,18 +3,18 @@ from __future__ import annotations from datetime import datetime from mistralai.client.types import BaseModel +from mistralai.client.utils import validate_const +import pydantic +from pydantic.functional_validators import AfterValidator from typing import Literal, Optional -from typing_extensions import NotRequired, TypedDict - - -AgentHandoffStartedEventType = Literal["agent.handoff.started",] +from typing_extensions import Annotated, NotRequired, TypedDict class AgentHandoffStartedEventTypedDict(TypedDict): id: str previous_agent_id: str previous_agent_name: str - type: NotRequired[AgentHandoffStartedEventType] + type: Literal["agent.handoff.started"] created_at: NotRequired[datetime] output_index: NotRequired[int] @@ -26,7 +26,13 @@ class AgentHandoffStartedEvent(BaseModel): previous_agent_name: str - type: Optional[AgentHandoffStartedEventType] = "agent.handoff.started" + TYPE: Annotated[ + Annotated[ + Literal["agent.handoff.started"], + AfterValidator(validate_const("agent.handoff.started")), + ], + pydantic.Field(alias="type"), + ] = "agent.handoff.started" created_at: Optional[datetime] = None diff --git a/src/mistralai/client/models/agents_api_v1_agents_getop.py b/src/mistralai/client/models/agents_api_v1_agents_getop.py index d4817457..57abff76 100644 --- a/src/mistralai/client/models/agents_api_v1_agents_getop.py +++ b/src/mistralai/client/models/agents_api_v1_agents_getop.py @@ -14,17 +14,19 @@ from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict -QueryParamAgentVersionTypedDict = TypeAliasType( - "QueryParamAgentVersionTypedDict", Union[int, str] +AgentsAPIV1AgentsGetAgentVersionTypedDict = TypeAliasType( + "AgentsAPIV1AgentsGetAgentVersionTypedDict", Union[int, str] ) -QueryParamAgentVersion = TypeAliasType("QueryParamAgentVersion", Union[int, str]) +AgentsAPIV1AgentsGetAgentVersion = TypeAliasType( + "AgentsAPIV1AgentsGetAgentVersion", Union[int, str] +) class AgentsAPIV1AgentsGetRequestTypedDict(TypedDict): agent_id: str - agent_version: NotRequired[Nullable[QueryParamAgentVersionTypedDict]] + agent_version: NotRequired[Nullable[AgentsAPIV1AgentsGetAgentVersionTypedDict]] class AgentsAPIV1AgentsGetRequest(BaseModel): @@ -33,7 +35,7 @@ class AgentsAPIV1AgentsGetRequest(BaseModel): ] agent_version: Annotated[ - OptionalNullable[QueryParamAgentVersion], + OptionalNullable[AgentsAPIV1AgentsGetAgentVersion], FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), ] = UNSET diff --git a/src/mistralai/client/models/agents_api_v1_conversations_getop.py b/src/mistralai/client/models/agents_api_v1_conversations_getop.py index c919f99e..7308708e 100644 --- a/src/mistralai/client/models/agents_api_v1_conversations_getop.py +++ b/src/mistralai/client/models/agents_api_v1_conversations_getop.py @@ -21,15 +21,14 @@ class AgentsAPIV1ConversationsGetRequest(BaseModel): r"""ID of the conversation from which we are fetching metadata.""" -AgentsAPIV1ConversationsGetResponseV1ConversationsGetTypedDict = TypeAliasType( - "AgentsAPIV1ConversationsGetResponseV1ConversationsGetTypedDict", +ResponseV1ConversationsGetTypedDict = TypeAliasType( + "ResponseV1ConversationsGetTypedDict", Union[AgentConversationTypedDict, ModelConversationTypedDict], ) r"""Successful Response""" -AgentsAPIV1ConversationsGetResponseV1ConversationsGet = TypeAliasType( - "AgentsAPIV1ConversationsGetResponseV1ConversationsGet", - Union[AgentConversation, ModelConversation], +ResponseV1ConversationsGet = TypeAliasType( + "ResponseV1ConversationsGet", Union[AgentConversation, ModelConversation] ) r"""Successful Response""" diff --git a/src/mistralai/client/models/agents_api_v1_conversations_listop.py b/src/mistralai/client/models/agents_api_v1_conversations_listop.py index bb3c7127..aae9c74e 100644 --- a/src/mistralai/client/models/agents_api_v1_conversations_listop.py +++ b/src/mistralai/client/models/agents_api_v1_conversations_listop.py @@ -69,12 +69,12 @@ def serialize_model(self, handler): return m -ResponseBodyTypedDict = TypeAliasType( - "ResponseBodyTypedDict", +AgentsAPIV1ConversationsListResponseTypedDict = TypeAliasType( + "AgentsAPIV1ConversationsListResponseTypedDict", Union[AgentConversationTypedDict, ModelConversationTypedDict], ) -ResponseBody = TypeAliasType( - "ResponseBody", Union[AgentConversation, ModelConversation] +AgentsAPIV1ConversationsListResponse = TypeAliasType( + "AgentsAPIV1ConversationsListResponse", Union[AgentConversation, ModelConversation] ) diff --git a/src/mistralai/client/models/agentscompletionrequest.py b/src/mistralai/client/models/agentscompletionrequest.py index 22368e44..3b045ed6 100644 --- a/src/mistralai/client/models/agentscompletionrequest.py +++ b/src/mistralai/client/models/agentscompletionrequest.py @@ -36,8 +36,8 @@ r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" -AgentsCompletionRequestMessagesTypedDict = TypeAliasType( - "AgentsCompletionRequestMessagesTypedDict", +AgentsCompletionRequestMessageTypedDict = TypeAliasType( + "AgentsCompletionRequestMessageTypedDict", Union[ SystemMessageTypedDict, UserMessageTypedDict, @@ -47,7 +47,7 @@ ) -AgentsCompletionRequestMessages = Annotated[ +AgentsCompletionRequestMessage = Annotated[ Union[ Annotated[AssistantMessage, Tag("assistant")], Annotated[SystemMessage, Tag("system")], @@ -70,7 +70,7 @@ class AgentsCompletionRequestTypedDict(TypedDict): - messages: List[AgentsCompletionRequestMessagesTypedDict] + messages: List[AgentsCompletionRequestMessageTypedDict] r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content.""" agent_id: str r"""The ID of the agent to use for this completion.""" @@ -101,7 +101,7 @@ class AgentsCompletionRequestTypedDict(TypedDict): class AgentsCompletionRequest(BaseModel): - messages: List[AgentsCompletionRequestMessages] + messages: List[AgentsCompletionRequestMessage] r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content.""" agent_id: str diff --git a/src/mistralai/client/models/agentscompletionstreamrequest.py b/src/mistralai/client/models/agentscompletionstreamrequest.py index 37d46c79..23920c4e 100644 --- a/src/mistralai/client/models/agentscompletionstreamrequest.py +++ b/src/mistralai/client/models/agentscompletionstreamrequest.py @@ -36,8 +36,8 @@ r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" -AgentsCompletionStreamRequestMessagesTypedDict = TypeAliasType( - "AgentsCompletionStreamRequestMessagesTypedDict", +AgentsCompletionStreamRequestMessageTypedDict = TypeAliasType( + "AgentsCompletionStreamRequestMessageTypedDict", Union[ SystemMessageTypedDict, UserMessageTypedDict, @@ -47,7 +47,7 @@ ) -AgentsCompletionStreamRequestMessages = Annotated[ +AgentsCompletionStreamRequestMessage = Annotated[ Union[ Annotated[AssistantMessage, Tag("assistant")], Annotated[SystemMessage, Tag("system")], @@ -70,7 +70,7 @@ class AgentsCompletionStreamRequestTypedDict(TypedDict): - messages: List[AgentsCompletionStreamRequestMessagesTypedDict] + messages: List[AgentsCompletionStreamRequestMessageTypedDict] r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content.""" agent_id: str r"""The ID of the agent to use for this completion.""" @@ -100,7 +100,7 @@ class AgentsCompletionStreamRequestTypedDict(TypedDict): class AgentsCompletionStreamRequest(BaseModel): - messages: List[AgentsCompletionStreamRequestMessages] + messages: List[AgentsCompletionStreamRequestMessage] r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content.""" agent_id: str diff --git a/src/mistralai/client/models/agentupdaterequest.py b/src/mistralai/client/models/agentupdaterequest.py index 261ac069..be93157d 100644 --- a/src/mistralai/client/models/agentupdaterequest.py +++ b/src/mistralai/client/models/agentupdaterequest.py @@ -15,14 +15,13 @@ UNSET, UNSET_SENTINEL, ) -from mistralai.client.utils import get_discriminator -from pydantic import Discriminator, Tag, model_serializer +from pydantic import Field, model_serializer from typing import Any, Dict, List, Optional, Union from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict -AgentUpdateRequestToolsTypedDict = TypeAliasType( - "AgentUpdateRequestToolsTypedDict", +AgentUpdateRequestToolTypedDict = TypeAliasType( + "AgentUpdateRequestToolTypedDict", Union[ WebSearchToolTypedDict, WebSearchPremiumToolTypedDict, @@ -34,23 +33,23 @@ ) -AgentUpdateRequestTools = Annotated[ +AgentUpdateRequestTool = Annotated[ Union[ - Annotated[CodeInterpreterTool, Tag("code_interpreter")], - Annotated[DocumentLibraryTool, Tag("document_library")], - Annotated[FunctionTool, Tag("function")], - Annotated[ImageGenerationTool, Tag("image_generation")], - Annotated[WebSearchTool, Tag("web_search")], - Annotated[WebSearchPremiumTool, Tag("web_search_premium")], + CodeInterpreterTool, + DocumentLibraryTool, + FunctionTool, + ImageGenerationTool, + WebSearchTool, + WebSearchPremiumTool, ], - Discriminator(lambda m: get_discriminator(m, "type", "type")), + Field(discriminator="TYPE"), ] class AgentUpdateRequestTypedDict(TypedDict): instructions: NotRequired[Nullable[str]] r"""Instruction prompt the model will follow during the conversation.""" - tools: NotRequired[List[AgentUpdateRequestToolsTypedDict]] + tools: NotRequired[List[AgentUpdateRequestToolTypedDict]] r"""List of tools which are available to the model during the conversation.""" completion_args: NotRequired[CompletionArgsTypedDict] r"""White-listed arguments from the completion API""" @@ -66,7 +65,7 @@ class AgentUpdateRequest(BaseModel): instructions: OptionalNullable[str] = UNSET r"""Instruction prompt the model will follow during the conversation.""" - tools: Optional[List[AgentUpdateRequestTools]] = None + tools: Optional[List[AgentUpdateRequestTool]] = None r"""List of tools which are available to the model during the conversation.""" completion_args: Optional[CompletionArgs] = None diff --git a/src/mistralai/client/models/audiochunk.py b/src/mistralai/client/models/audiochunk.py index 80d836f2..fae1193c 100644 --- a/src/mistralai/client/models/audiochunk.py +++ b/src/mistralai/client/models/audiochunk.py @@ -2,19 +2,24 @@ from __future__ import annotations from mistralai.client.types import BaseModel -from typing import Literal, Optional -from typing_extensions import NotRequired, TypedDict - - -AudioChunkType = Literal["input_audio",] +from mistralai.client.utils import validate_const +import pydantic +from pydantic.functional_validators import AfterValidator +from typing import Literal +from typing_extensions import Annotated, TypedDict class AudioChunkTypedDict(TypedDict): input_audio: str - type: NotRequired[AudioChunkType] + type: Literal["input_audio"] class AudioChunk(BaseModel): input_audio: str - type: Optional[AudioChunkType] = "input_audio" + TYPE: Annotated[ + Annotated[ + Literal["input_audio"], AfterValidator(validate_const("input_audio")) + ], + pydantic.Field(alias="type"), + ] = "input_audio" diff --git a/src/mistralai/client/models/basemodelcard.py b/src/mistralai/client/models/basemodelcard.py index 8ce7f139..f16607d5 100644 --- a/src/mistralai/client/models/basemodelcard.py +++ b/src/mistralai/client/models/basemodelcard.py @@ -18,9 +18,6 @@ from typing_extensions import Annotated, NotRequired, TypedDict -BaseModelCardType = Literal["base",] - - class BaseModelCardTypedDict(TypedDict): id: str capabilities: ModelCapabilitiesTypedDict @@ -34,7 +31,7 @@ class BaseModelCardTypedDict(TypedDict): deprecation: NotRequired[Nullable[datetime]] deprecation_replacement_model: NotRequired[Nullable[str]] default_model_temperature: NotRequired[Nullable[float]] - type: BaseModelCardType + type: Literal["base"] class BaseModelCard(BaseModel): @@ -63,7 +60,7 @@ class BaseModelCard(BaseModel): default_model_temperature: OptionalNullable[float] = UNSET TYPE: Annotated[ - Annotated[Optional[BaseModelCardType], AfterValidator(validate_const("base"))], + Annotated[Literal["base"], AfterValidator(validate_const("base"))], pydantic.Field(alias="type"), ] = "base" @@ -80,7 +77,6 @@ def serialize_model(self, handler): "deprecation", "deprecation_replacement_model", "default_model_temperature", - "type", ] nullable_fields = [ "name", diff --git a/src/mistralai/client/models/batchjobstatus.py b/src/mistralai/client/models/batchjobstatus.py index 4b28059b..1ba3dd55 100644 --- a/src/mistralai/client/models/batchjobstatus.py +++ b/src/mistralai/client/models/batchjobstatus.py @@ -1,15 +1,19 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from __future__ import annotations -from typing import Literal +from mistralai.client.types import UnrecognizedStr +from typing import Literal, Union -BatchJobStatus = Literal[ - "QUEUED", - "RUNNING", - "SUCCESS", - "FAILED", - "TIMEOUT_EXCEEDED", - "CANCELLATION_REQUESTED", - "CANCELLED", +BatchJobStatus = Union[ + Literal[ + "QUEUED", + "RUNNING", + "SUCCESS", + "FAILED", + "TIMEOUT_EXCEEDED", + "CANCELLATION_REQUESTED", + "CANCELLED", + ], + UnrecognizedStr, ] diff --git a/src/mistralai/client/models/builtinconnectors.py b/src/mistralai/client/models/builtinconnectors.py index 6a3b2476..4a98b45b 100644 --- a/src/mistralai/client/models/builtinconnectors.py +++ b/src/mistralai/client/models/builtinconnectors.py @@ -1,13 +1,17 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from __future__ import annotations -from typing import Literal +from mistralai.client.types import UnrecognizedStr +from typing import Literal, Union -BuiltInConnectors = Literal[ - "web_search", - "web_search_premium", - "code_interpreter", - "image_generation", - "document_library", +BuiltInConnectors = Union[ + Literal[ + "web_search", + "web_search_premium", + "code_interpreter", + "image_generation", + "document_library", + ], + UnrecognizedStr, ] diff --git a/src/mistralai/client/models/chatcompletionchoice.py b/src/mistralai/client/models/chatcompletionchoice.py index 5d888cfd..5752f7c1 100644 --- a/src/mistralai/client/models/chatcompletionchoice.py +++ b/src/mistralai/client/models/chatcompletionchoice.py @@ -7,7 +7,7 @@ from typing_extensions import TypedDict -FinishReason = Union[ +ChatCompletionChoiceFinishReason = Union[ Literal[ "stop", "length", @@ -22,7 +22,7 @@ class ChatCompletionChoiceTypedDict(TypedDict): index: int message: AssistantMessageTypedDict - finish_reason: FinishReason + finish_reason: ChatCompletionChoiceFinishReason class ChatCompletionChoice(BaseModel): @@ -30,4 +30,4 @@ class ChatCompletionChoice(BaseModel): message: AssistantMessage - finish_reason: FinishReason + finish_reason: ChatCompletionChoiceFinishReason diff --git a/src/mistralai/client/models/chatcompletionrequest.py b/src/mistralai/client/models/chatcompletionrequest.py index 30fce28d..62c375e0 100644 --- a/src/mistralai/client/models/chatcompletionrequest.py +++ b/src/mistralai/client/models/chatcompletionrequest.py @@ -24,16 +24,20 @@ from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict -StopTypedDict = TypeAliasType("StopTypedDict", Union[str, List[str]]) +ChatCompletionRequestStopTypedDict = TypeAliasType( + "ChatCompletionRequestStopTypedDict", Union[str, List[str]] +) r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" -Stop = TypeAliasType("Stop", Union[str, List[str]]) +ChatCompletionRequestStop = TypeAliasType( + "ChatCompletionRequestStop", Union[str, List[str]] +) r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" -MessagesTypedDict = TypeAliasType( - "MessagesTypedDict", +ChatCompletionRequestMessageTypedDict = TypeAliasType( + "ChatCompletionRequestMessageTypedDict", Union[ SystemMessageTypedDict, UserMessageTypedDict, @@ -43,7 +47,7 @@ ) -Messages = Annotated[ +ChatCompletionRequestMessage = Annotated[ Union[ Annotated[AssistantMessage, Tag("assistant")], Annotated[SystemMessage, Tag("system")], @@ -70,7 +74,7 @@ class ChatCompletionRequestTypedDict(TypedDict): model: str r"""ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions.""" - messages: List[MessagesTypedDict] + messages: List[ChatCompletionRequestMessageTypedDict] r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content.""" temperature: NotRequired[Nullable[float]] r"""What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value.""" @@ -80,7 +84,7 @@ class ChatCompletionRequestTypedDict(TypedDict): r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" stream: NotRequired[bool] r"""Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON.""" - stop: NotRequired[StopTypedDict] + stop: NotRequired[ChatCompletionRequestStopTypedDict] r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" random_seed: NotRequired[Nullable[int]] r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" @@ -111,7 +115,7 @@ class ChatCompletionRequest(BaseModel): model: str r"""ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions.""" - messages: List[Messages] + messages: List[ChatCompletionRequestMessage] r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content.""" temperature: OptionalNullable[float] = UNSET @@ -126,7 +130,7 @@ class ChatCompletionRequest(BaseModel): stream: Optional[bool] = False r"""Whether to stream back partial progress. If set, tokens will be sent as data-only server-side events as they become available, with the stream terminated by a data: [DONE] message. Otherwise, the server will hold the request open until the timeout or until completion, with the response containing the full result as JSON.""" - stop: Optional[Stop] = None + stop: Optional[ChatCompletionRequestStop] = None r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" random_seed: OptionalNullable[int] = UNSET diff --git a/src/mistralai/client/models/chatcompletionstreamrequest.py b/src/mistralai/client/models/chatcompletionstreamrequest.py index 21dad38b..4e5c281d 100644 --- a/src/mistralai/client/models/chatcompletionstreamrequest.py +++ b/src/mistralai/client/models/chatcompletionstreamrequest.py @@ -36,8 +36,8 @@ r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" -ChatCompletionStreamRequestMessagesTypedDict = TypeAliasType( - "ChatCompletionStreamRequestMessagesTypedDict", +ChatCompletionStreamRequestMessageTypedDict = TypeAliasType( + "ChatCompletionStreamRequestMessageTypedDict", Union[ SystemMessageTypedDict, UserMessageTypedDict, @@ -47,7 +47,7 @@ ) -ChatCompletionStreamRequestMessages = Annotated[ +ChatCompletionStreamRequestMessage = Annotated[ Union[ Annotated[AssistantMessage, Tag("assistant")], Annotated[SystemMessage, Tag("system")], @@ -74,7 +74,7 @@ class ChatCompletionStreamRequestTypedDict(TypedDict): model: str r"""ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions.""" - messages: List[ChatCompletionStreamRequestMessagesTypedDict] + messages: List[ChatCompletionStreamRequestMessageTypedDict] r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content.""" temperature: NotRequired[Nullable[float]] r"""What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value.""" @@ -114,7 +114,7 @@ class ChatCompletionStreamRequest(BaseModel): model: str r"""ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions.""" - messages: List[ChatCompletionStreamRequestMessages] + messages: List[ChatCompletionStreamRequestMessage] r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content.""" temperature: OptionalNullable[float] = UNSET diff --git a/src/mistralai/client/models/chatmoderationrequest.py b/src/mistralai/client/models/chatmoderationrequest.py index 631c914d..4e2611c8 100644 --- a/src/mistralai/client/models/chatmoderationrequest.py +++ b/src/mistralai/client/models/chatmoderationrequest.py @@ -13,8 +13,8 @@ from typing_extensions import Annotated, TypeAliasType, TypedDict -TwoTypedDict = TypeAliasType( - "TwoTypedDict", +ChatModerationRequestInputs2TypedDict = TypeAliasType( + "ChatModerationRequestInputs2TypedDict", Union[ SystemMessageTypedDict, UserMessageTypedDict, @@ -24,7 +24,7 @@ ) -Two = Annotated[ +ChatModerationRequestInputs2 = Annotated[ Union[ Annotated[AssistantMessage, Tag("assistant")], Annotated[SystemMessage, Tag("system")], @@ -35,8 +35,8 @@ ] -OneTypedDict = TypeAliasType( - "OneTypedDict", +ChatModerationRequestInputs1TypedDict = TypeAliasType( + "ChatModerationRequestInputs1TypedDict", Union[ SystemMessageTypedDict, UserMessageTypedDict, @@ -46,7 +46,7 @@ ) -One = Annotated[ +ChatModerationRequestInputs1 = Annotated[ Union[ Annotated[AssistantMessage, Tag("assistant")], Annotated[SystemMessage, Tag("system")], @@ -57,27 +57,31 @@ ] -ChatModerationRequestInputsTypedDict = TypeAliasType( - "ChatModerationRequestInputsTypedDict", - Union[List[OneTypedDict], List[List[TwoTypedDict]]], +ChatModerationRequestInputs3TypedDict = TypeAliasType( + "ChatModerationRequestInputs3TypedDict", + Union[ + List[ChatModerationRequestInputs1TypedDict], + List[List[ChatModerationRequestInputs2TypedDict]], + ], ) r"""Chat to classify""" -ChatModerationRequestInputs = TypeAliasType( - "ChatModerationRequestInputs", Union[List[One], List[List[Two]]] +ChatModerationRequestInputs3 = TypeAliasType( + "ChatModerationRequestInputs3", + Union[List[ChatModerationRequestInputs1], List[List[ChatModerationRequestInputs2]]], ) r"""Chat to classify""" class ChatModerationRequestTypedDict(TypedDict): - inputs: ChatModerationRequestInputsTypedDict + inputs: ChatModerationRequestInputs3TypedDict r"""Chat to classify""" model: str class ChatModerationRequest(BaseModel): - inputs: Annotated[ChatModerationRequestInputs, pydantic.Field(alias="input")] + inputs: Annotated[ChatModerationRequestInputs3, pydantic.Field(alias="input")] r"""Chat to classify""" model: str diff --git a/src/mistralai/client/models/classifierdetailedjobout.py b/src/mistralai/client/models/classifierdetailedjobout.py index 1de4534f..ffe99270 100644 --- a/src/mistralai/client/models/classifierdetailedjobout.py +++ b/src/mistralai/client/models/classifierdetailedjobout.py @@ -16,36 +16,40 @@ OptionalNullable, UNSET, UNSET_SENTINEL, + UnrecognizedStr, ) +from mistralai.client.utils import validate_const +import pydantic from pydantic import model_serializer -from typing import List, Literal, Optional -from typing_extensions import NotRequired, TypedDict - - -ClassifierDetailedJobOutStatus = Literal[ - "QUEUED", - "STARTED", - "VALIDATING", - "VALIDATED", - "RUNNING", - "FAILED_VALIDATION", - "FAILED", - "SUCCESS", - "CANCELLED", - "CANCELLATION_REQUESTED", +from pydantic.functional_validators import AfterValidator +from typing import List, Literal, Optional, Union +from typing_extensions import Annotated, NotRequired, TypedDict + + +ClassifierDetailedJobOutStatus = Union[ + Literal[ + "QUEUED", + "STARTED", + "VALIDATING", + "VALIDATED", + "RUNNING", + "FAILED_VALIDATION", + "FAILED", + "SUCCESS", + "CANCELLED", + "CANCELLATION_REQUESTED", + ], + UnrecognizedStr, ] ClassifierDetailedJobOutObject = Literal["job",] -ClassifierDetailedJobOutIntegrationsTypedDict = WandbIntegrationOutTypedDict +ClassifierDetailedJobOutIntegrationTypedDict = WandbIntegrationOutTypedDict -ClassifierDetailedJobOutIntegrations = WandbIntegrationOut - - -ClassifierDetailedJobOutJobType = Literal["classifier",] +ClassifierDetailedJobOutIntegration = WandbIntegrationOut class ClassifierDetailedJobOutTypedDict(TypedDict): @@ -64,11 +68,11 @@ class ClassifierDetailedJobOutTypedDict(TypedDict): fine_tuned_model: NotRequired[Nullable[str]] suffix: NotRequired[Nullable[str]] integrations: NotRequired[ - Nullable[List[ClassifierDetailedJobOutIntegrationsTypedDict]] + Nullable[List[ClassifierDetailedJobOutIntegrationTypedDict]] ] trained_tokens: NotRequired[Nullable[int]] metadata: NotRequired[Nullable[JobMetadataOutTypedDict]] - job_type: NotRequired[ClassifierDetailedJobOutJobType] + job_type: Literal["classifier"] events: NotRequired[List[EventOutTypedDict]] r"""Event items are created every time the status of a fine-tuning job changes. The timestamped list of all events is accessible here.""" checkpoints: NotRequired[List[CheckpointOutTypedDict]] @@ -102,13 +106,16 @@ class ClassifierDetailedJobOut(BaseModel): suffix: OptionalNullable[str] = UNSET - integrations: OptionalNullable[List[ClassifierDetailedJobOutIntegrations]] = UNSET + integrations: OptionalNullable[List[ClassifierDetailedJobOutIntegration]] = UNSET trained_tokens: OptionalNullable[int] = UNSET metadata: OptionalNullable[JobMetadataOut] = UNSET - job_type: Optional[ClassifierDetailedJobOutJobType] = "classifier" + JOB_TYPE: Annotated[ + Annotated[Literal["classifier"], AfterValidator(validate_const("classifier"))], + pydantic.Field(alias="job_type"), + ] = "classifier" events: Optional[List[EventOut]] = None r"""Event items are created every time the status of a fine-tuning job changes. The timestamped list of all events is accessible here.""" @@ -125,7 +132,6 @@ def serialize_model(self, handler): "integrations", "trained_tokens", "metadata", - "job_type", "events", "checkpoints", ] diff --git a/src/mistralai/client/models/classifierftmodelout.py b/src/mistralai/client/models/classifierftmodelout.py index a4572108..c6d34167 100644 --- a/src/mistralai/client/models/classifierftmodelout.py +++ b/src/mistralai/client/models/classifierftmodelout.py @@ -13,17 +13,17 @@ UNSET, UNSET_SENTINEL, ) +from mistralai.client.utils import validate_const +import pydantic from pydantic import model_serializer +from pydantic.functional_validators import AfterValidator from typing import List, Literal, Optional -from typing_extensions import NotRequired, TypedDict +from typing_extensions import Annotated, NotRequired, TypedDict ClassifierFTModelOutObject = Literal["model",] -ClassifierFTModelOutModelType = Literal["classifier",] - - class ClassifierFTModelOutTypedDict(TypedDict): id: str created: int @@ -40,7 +40,7 @@ class ClassifierFTModelOutTypedDict(TypedDict): description: NotRequired[Nullable[str]] max_context_length: NotRequired[int] aliases: NotRequired[List[str]] - model_type: NotRequired[ClassifierFTModelOutModelType] + model_type: Literal["classifier"] class ClassifierFTModelOut(BaseModel): @@ -74,7 +74,10 @@ class ClassifierFTModelOut(BaseModel): aliases: Optional[List[str]] = None - model_type: Optional[ClassifierFTModelOutModelType] = "classifier" + MODEL_TYPE: Annotated[ + Annotated[Literal["classifier"], AfterValidator(validate_const("classifier"))], + pydantic.Field(alias="model_type"), + ] = "classifier" @model_serializer(mode="wrap") def serialize_model(self, handler): @@ -84,7 +87,6 @@ def serialize_model(self, handler): "description", "max_context_length", "aliases", - "model_type", ] nullable_fields = ["name", "description"] null_default_fields = [] diff --git a/src/mistralai/client/models/classifierjobout.py b/src/mistralai/client/models/classifierjobout.py index ab1e261d..1390aea1 100644 --- a/src/mistralai/client/models/classifierjobout.py +++ b/src/mistralai/client/models/classifierjobout.py @@ -13,23 +13,30 @@ OptionalNullable, UNSET, UNSET_SENTINEL, + UnrecognizedStr, ) +from mistralai.client.utils import validate_const +import pydantic from pydantic import model_serializer -from typing import List, Literal, Optional -from typing_extensions import NotRequired, TypedDict - - -ClassifierJobOutStatus = Literal[ - "QUEUED", - "STARTED", - "VALIDATING", - "VALIDATED", - "RUNNING", - "FAILED_VALIDATION", - "FAILED", - "SUCCESS", - "CANCELLED", - "CANCELLATION_REQUESTED", +from pydantic.functional_validators import AfterValidator +from typing import List, Literal, Optional, Union +from typing_extensions import Annotated, NotRequired, TypedDict + + +ClassifierJobOutStatus = Union[ + Literal[ + "QUEUED", + "STARTED", + "VALIDATING", + "VALIDATED", + "RUNNING", + "FAILED_VALIDATION", + "FAILED", + "SUCCESS", + "CANCELLED", + "CANCELLATION_REQUESTED", + ], + UnrecognizedStr, ] r"""The current status of the fine-tuning job.""" @@ -38,14 +45,10 @@ r"""The object type of the fine-tuning job.""" -ClassifierJobOutIntegrationsTypedDict = WandbIntegrationOutTypedDict +ClassifierJobOutIntegrationTypedDict = WandbIntegrationOutTypedDict -ClassifierJobOutIntegrations = WandbIntegrationOut - - -ClassifierJobOutJobType = Literal["classifier",] -r"""The type of job (`FT` for fine-tuning).""" +ClassifierJobOutIntegration = WandbIntegrationOut class ClassifierJobOutTypedDict(TypedDict): @@ -71,12 +74,12 @@ class ClassifierJobOutTypedDict(TypedDict): r"""The name of the fine-tuned model that is being created. The value will be `null` if the fine-tuning job is still running.""" suffix: NotRequired[Nullable[str]] r"""Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`.""" - integrations: NotRequired[Nullable[List[ClassifierJobOutIntegrationsTypedDict]]] + integrations: NotRequired[Nullable[List[ClassifierJobOutIntegrationTypedDict]]] r"""A list of integrations enabled for your fine-tuning job.""" trained_tokens: NotRequired[Nullable[int]] r"""Total number of tokens trained.""" metadata: NotRequired[Nullable[JobMetadataOutTypedDict]] - job_type: NotRequired[ClassifierJobOutJobType] + job_type: Literal["classifier"] r"""The type of job (`FT` for fine-tuning).""" @@ -115,7 +118,7 @@ class ClassifierJobOut(BaseModel): suffix: OptionalNullable[str] = UNSET r"""Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`.""" - integrations: OptionalNullable[List[ClassifierJobOutIntegrations]] = UNSET + integrations: OptionalNullable[List[ClassifierJobOutIntegration]] = UNSET r"""A list of integrations enabled for your fine-tuning job.""" trained_tokens: OptionalNullable[int] = UNSET @@ -123,7 +126,10 @@ class ClassifierJobOut(BaseModel): metadata: OptionalNullable[JobMetadataOut] = UNSET - job_type: Optional[ClassifierJobOutJobType] = "classifier" + JOB_TYPE: Annotated[ + Annotated[Literal["classifier"], AfterValidator(validate_const("classifier"))], + pydantic.Field(alias="job_type"), + ] = "classifier" r"""The type of job (`FT` for fine-tuning).""" @model_serializer(mode="wrap") @@ -136,7 +142,6 @@ def serialize_model(self, handler): "integrations", "trained_tokens", "metadata", - "job_type", ] nullable_fields = [ "validation_files", diff --git a/src/mistralai/client/models/codeinterpretertool.py b/src/mistralai/client/models/codeinterpretertool.py index faf5b0b7..2f34cbda 100644 --- a/src/mistralai/client/models/codeinterpretertool.py +++ b/src/mistralai/client/models/codeinterpretertool.py @@ -2,16 +2,22 @@ from __future__ import annotations from mistralai.client.types import BaseModel -from typing import Literal, Optional -from typing_extensions import NotRequired, TypedDict - - -CodeInterpreterToolType = Literal["code_interpreter",] +from mistralai.client.utils import validate_const +import pydantic +from pydantic.functional_validators import AfterValidator +from typing import Literal +from typing_extensions import Annotated, TypedDict class CodeInterpreterToolTypedDict(TypedDict): - type: NotRequired[CodeInterpreterToolType] + type: Literal["code_interpreter"] class CodeInterpreterTool(BaseModel): - type: Optional[CodeInterpreterToolType] = "code_interpreter" + TYPE: Annotated[ + Annotated[ + Literal["code_interpreter"], + AfterValidator(validate_const("code_interpreter")), + ], + pydantic.Field(alias="type"), + ] = "code_interpreter" diff --git a/src/mistralai/client/models/completiondetailedjobout.py b/src/mistralai/client/models/completiondetailedjobout.py index 85c0c803..ea444b8b 100644 --- a/src/mistralai/client/models/completiondetailedjobout.py +++ b/src/mistralai/client/models/completiondetailedjobout.py @@ -16,42 +16,46 @@ OptionalNullable, UNSET, UNSET_SENTINEL, + UnrecognizedStr, ) +from mistralai.client.utils import validate_const +import pydantic from pydantic import model_serializer -from typing import List, Literal, Optional -from typing_extensions import NotRequired, TypedDict - - -CompletionDetailedJobOutStatus = Literal[ - "QUEUED", - "STARTED", - "VALIDATING", - "VALIDATED", - "RUNNING", - "FAILED_VALIDATION", - "FAILED", - "SUCCESS", - "CANCELLED", - "CANCELLATION_REQUESTED", +from pydantic.functional_validators import AfterValidator +from typing import List, Literal, Optional, Union +from typing_extensions import Annotated, NotRequired, TypedDict + + +CompletionDetailedJobOutStatus = Union[ + Literal[ + "QUEUED", + "STARTED", + "VALIDATING", + "VALIDATED", + "RUNNING", + "FAILED_VALIDATION", + "FAILED", + "SUCCESS", + "CANCELLED", + "CANCELLATION_REQUESTED", + ], + UnrecognizedStr, ] CompletionDetailedJobOutObject = Literal["job",] -CompletionDetailedJobOutIntegrationsTypedDict = WandbIntegrationOutTypedDict +CompletionDetailedJobOutIntegrationTypedDict = WandbIntegrationOutTypedDict -CompletionDetailedJobOutIntegrations = WandbIntegrationOut +CompletionDetailedJobOutIntegration = WandbIntegrationOut -CompletionDetailedJobOutJobType = Literal["completion",] +CompletionDetailedJobOutRepositoryTypedDict = GithubRepositoryOutTypedDict -CompletionDetailedJobOutRepositoriesTypedDict = GithubRepositoryOutTypedDict - - -CompletionDetailedJobOutRepositories = GithubRepositoryOut +CompletionDetailedJobOutRepository = GithubRepositoryOut class CompletionDetailedJobOutTypedDict(TypedDict): @@ -69,12 +73,12 @@ class CompletionDetailedJobOutTypedDict(TypedDict): fine_tuned_model: NotRequired[Nullable[str]] suffix: NotRequired[Nullable[str]] integrations: NotRequired[ - Nullable[List[CompletionDetailedJobOutIntegrationsTypedDict]] + Nullable[List[CompletionDetailedJobOutIntegrationTypedDict]] ] trained_tokens: NotRequired[Nullable[int]] metadata: NotRequired[Nullable[JobMetadataOutTypedDict]] - job_type: NotRequired[CompletionDetailedJobOutJobType] - repositories: NotRequired[List[CompletionDetailedJobOutRepositoriesTypedDict]] + job_type: Literal["completion"] + repositories: NotRequired[List[CompletionDetailedJobOutRepositoryTypedDict]] events: NotRequired[List[EventOutTypedDict]] r"""Event items are created every time the status of a fine-tuning job changes. The timestamped list of all events is accessible here.""" checkpoints: NotRequired[List[CheckpointOutTypedDict]] @@ -106,15 +110,18 @@ class CompletionDetailedJobOut(BaseModel): suffix: OptionalNullable[str] = UNSET - integrations: OptionalNullable[List[CompletionDetailedJobOutIntegrations]] = UNSET + integrations: OptionalNullable[List[CompletionDetailedJobOutIntegration]] = UNSET trained_tokens: OptionalNullable[int] = UNSET metadata: OptionalNullable[JobMetadataOut] = UNSET - job_type: Optional[CompletionDetailedJobOutJobType] = "completion" + JOB_TYPE: Annotated[ + Annotated[Literal["completion"], AfterValidator(validate_const("completion"))], + pydantic.Field(alias="job_type"), + ] = "completion" - repositories: Optional[List[CompletionDetailedJobOutRepositories]] = None + repositories: Optional[List[CompletionDetailedJobOutRepository]] = None events: Optional[List[EventOut]] = None r"""Event items are created every time the status of a fine-tuning job changes. The timestamped list of all events is accessible here.""" @@ -131,7 +138,6 @@ def serialize_model(self, handler): "integrations", "trained_tokens", "metadata", - "job_type", "repositories", "events", "checkpoints", diff --git a/src/mistralai/client/models/completionftmodelout.py b/src/mistralai/client/models/completionftmodelout.py index ccecbb6a..92f530af 100644 --- a/src/mistralai/client/models/completionftmodelout.py +++ b/src/mistralai/client/models/completionftmodelout.py @@ -12,17 +12,17 @@ UNSET, UNSET_SENTINEL, ) +from mistralai.client.utils import validate_const +import pydantic from pydantic import model_serializer +from pydantic.functional_validators import AfterValidator from typing import List, Literal, Optional -from typing_extensions import NotRequired, TypedDict +from typing_extensions import Annotated, NotRequired, TypedDict CompletionFTModelOutObject = Literal["model",] -ModelType = Literal["completion",] - - class CompletionFTModelOutTypedDict(TypedDict): id: str created: int @@ -38,7 +38,7 @@ class CompletionFTModelOutTypedDict(TypedDict): description: NotRequired[Nullable[str]] max_context_length: NotRequired[int] aliases: NotRequired[List[str]] - model_type: NotRequired[ModelType] + model_type: Literal["completion"] class CompletionFTModelOut(BaseModel): @@ -70,7 +70,10 @@ class CompletionFTModelOut(BaseModel): aliases: Optional[List[str]] = None - model_type: Optional[ModelType] = "completion" + MODEL_TYPE: Annotated[ + Annotated[Literal["completion"], AfterValidator(validate_const("completion"))], + pydantic.Field(alias="model_type"), + ] = "completion" @model_serializer(mode="wrap") def serialize_model(self, handler): @@ -80,7 +83,6 @@ def serialize_model(self, handler): "description", "max_context_length", "aliases", - "model_type", ] nullable_fields = ["name", "description"] null_default_fields = [] diff --git a/src/mistralai/client/models/completionjobout.py b/src/mistralai/client/models/completionjobout.py index ecd95bb9..1628d8bb 100644 --- a/src/mistralai/client/models/completionjobout.py +++ b/src/mistralai/client/models/completionjobout.py @@ -14,23 +14,30 @@ OptionalNullable, UNSET, UNSET_SENTINEL, + UnrecognizedStr, ) +from mistralai.client.utils import validate_const +import pydantic from pydantic import model_serializer -from typing import List, Literal, Optional -from typing_extensions import NotRequired, TypedDict - - -Status = Literal[ - "QUEUED", - "STARTED", - "VALIDATING", - "VALIDATED", - "RUNNING", - "FAILED_VALIDATION", - "FAILED", - "SUCCESS", - "CANCELLED", - "CANCELLATION_REQUESTED", +from pydantic.functional_validators import AfterValidator +from typing import List, Literal, Optional, Union +from typing_extensions import Annotated, NotRequired, TypedDict + + +CompletionJobOutStatus = Union[ + Literal[ + "QUEUED", + "STARTED", + "VALIDATING", + "VALIDATED", + "RUNNING", + "FAILED_VALIDATION", + "FAILED", + "SUCCESS", + "CANCELLED", + "CANCELLATION_REQUESTED", + ], + UnrecognizedStr, ] r"""The current status of the fine-tuning job.""" @@ -39,20 +46,16 @@ r"""The object type of the fine-tuning job.""" -IntegrationsTypedDict = WandbIntegrationOutTypedDict +CompletionJobOutIntegrationTypedDict = WandbIntegrationOutTypedDict -Integrations = WandbIntegrationOut +CompletionJobOutIntegration = WandbIntegrationOut -JobType = Literal["completion",] -r"""The type of job (`FT` for fine-tuning).""" +CompletionJobOutRepositoryTypedDict = GithubRepositoryOutTypedDict -RepositoriesTypedDict = GithubRepositoryOutTypedDict - - -Repositories = GithubRepositoryOut +CompletionJobOutRepository = GithubRepositoryOut class CompletionJobOutTypedDict(TypedDict): @@ -61,7 +64,7 @@ class CompletionJobOutTypedDict(TypedDict): auto_start: bool model: str r"""The name of the model to fine-tune.""" - status: Status + status: CompletionJobOutStatus r"""The current status of the fine-tuning job.""" created_at: int r"""The UNIX timestamp (in seconds) for when the fine-tuning job was created.""" @@ -78,14 +81,14 @@ class CompletionJobOutTypedDict(TypedDict): r"""The name of the fine-tuned model that is being created. The value will be `null` if the fine-tuning job is still running.""" suffix: NotRequired[Nullable[str]] r"""Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`.""" - integrations: NotRequired[Nullable[List[IntegrationsTypedDict]]] + integrations: NotRequired[Nullable[List[CompletionJobOutIntegrationTypedDict]]] r"""A list of integrations enabled for your fine-tuning job.""" trained_tokens: NotRequired[Nullable[int]] r"""Total number of tokens trained.""" metadata: NotRequired[Nullable[JobMetadataOutTypedDict]] - job_type: NotRequired[JobType] + job_type: Literal["completion"] r"""The type of job (`FT` for fine-tuning).""" - repositories: NotRequired[List[RepositoriesTypedDict]] + repositories: NotRequired[List[CompletionJobOutRepositoryTypedDict]] class CompletionJobOut(BaseModel): @@ -97,7 +100,7 @@ class CompletionJobOut(BaseModel): model: str r"""The name of the model to fine-tune.""" - status: Status + status: CompletionJobOutStatus r"""The current status of the fine-tuning job.""" created_at: int @@ -123,7 +126,7 @@ class CompletionJobOut(BaseModel): suffix: OptionalNullable[str] = UNSET r"""Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`.""" - integrations: OptionalNullable[List[Integrations]] = UNSET + integrations: OptionalNullable[List[CompletionJobOutIntegration]] = UNSET r"""A list of integrations enabled for your fine-tuning job.""" trained_tokens: OptionalNullable[int] = UNSET @@ -131,10 +134,13 @@ class CompletionJobOut(BaseModel): metadata: OptionalNullable[JobMetadataOut] = UNSET - job_type: Optional[JobType] = "completion" + JOB_TYPE: Annotated[ + Annotated[Literal["completion"], AfterValidator(validate_const("completion"))], + pydantic.Field(alias="job_type"), + ] = "completion" r"""The type of job (`FT` for fine-tuning).""" - repositories: Optional[List[Repositories]] = None + repositories: Optional[List[CompletionJobOutRepository]] = None @model_serializer(mode="wrap") def serialize_model(self, handler): @@ -146,7 +152,6 @@ def serialize_model(self, handler): "integrations", "trained_tokens", "metadata", - "job_type", "repositories", ] nullable_fields = [ diff --git a/src/mistralai/client/models/conversationevents.py b/src/mistralai/client/models/conversationevents.py index 308588a1..1c2b4592 100644 --- a/src/mistralai/client/models/conversationevents.py +++ b/src/mistralai/client/models/conversationevents.py @@ -25,8 +25,7 @@ ToolExecutionStartedEventTypedDict, ) from mistralai.client.types import BaseModel -from mistralai.client.utils import get_discriminator -from pydantic import Discriminator, Tag +from pydantic import Field from typing import Union from typing_extensions import Annotated, TypeAliasType, TypedDict @@ -50,18 +49,18 @@ ConversationEventsData = Annotated[ Union[ - Annotated[AgentHandoffDoneEvent, Tag("agent.handoff.done")], - Annotated[AgentHandoffStartedEvent, Tag("agent.handoff.started")], - Annotated[ResponseDoneEvent, Tag("conversation.response.done")], - Annotated[ResponseErrorEvent, Tag("conversation.response.error")], - Annotated[ResponseStartedEvent, Tag("conversation.response.started")], - Annotated[FunctionCallEvent, Tag("function.call.delta")], - Annotated[MessageOutputEvent, Tag("message.output.delta")], - Annotated[ToolExecutionDeltaEvent, Tag("tool.execution.delta")], - Annotated[ToolExecutionDoneEvent, Tag("tool.execution.done")], - Annotated[ToolExecutionStartedEvent, Tag("tool.execution.started")], + AgentHandoffDoneEvent, + AgentHandoffStartedEvent, + ResponseDoneEvent, + ResponseErrorEvent, + ResponseStartedEvent, + FunctionCallEvent, + MessageOutputEvent, + ToolExecutionDeltaEvent, + ToolExecutionDoneEvent, + ToolExecutionStartedEvent, ], - Discriminator(lambda m: get_discriminator(m, "type", "type")), + Field(discriminator="TYPE"), ] diff --git a/src/mistralai/client/models/conversationhistory.py b/src/mistralai/client/models/conversationhistory.py index 40bd1e72..83e860f2 100644 --- a/src/mistralai/client/models/conversationhistory.py +++ b/src/mistralai/client/models/conversationhistory.py @@ -15,8 +15,8 @@ ConversationHistoryObject = Literal["conversation.history",] -EntriesTypedDict = TypeAliasType( - "EntriesTypedDict", +EntryTypedDict = TypeAliasType( + "EntryTypedDict", Union[ FunctionResultEntryTypedDict, MessageInputEntryTypedDict, @@ -28,8 +28,8 @@ ) -Entries = TypeAliasType( - "Entries", +Entry = TypeAliasType( + "Entry", Union[ FunctionResultEntry, MessageInputEntry, @@ -45,7 +45,7 @@ class ConversationHistoryTypedDict(TypedDict): r"""Retrieve all entries in a conversation.""" conversation_id: str - entries: List[EntriesTypedDict] + entries: List[EntryTypedDict] object: NotRequired[ConversationHistoryObject] @@ -54,6 +54,6 @@ class ConversationHistory(BaseModel): conversation_id: str - entries: List[Entries] + entries: List[Entry] object: Optional[ConversationHistoryObject] = "conversation.history" diff --git a/src/mistralai/client/models/conversationrequest.py b/src/mistralai/client/models/conversationrequest.py index e3211c4c..dd66c6ce 100644 --- a/src/mistralai/client/models/conversationrequest.py +++ b/src/mistralai/client/models/conversationrequest.py @@ -16,20 +16,19 @@ UNSET, UNSET_SENTINEL, ) -from mistralai.client.utils import get_discriminator -from pydantic import Discriminator, Tag, model_serializer +from pydantic import Field, model_serializer from typing import Any, Dict, List, Literal, Optional, Union from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict -HandoffExecution = Literal[ +ConversationRequestHandoffExecution = Literal[ "client", "server", ] -ToolsTypedDict = TypeAliasType( - "ToolsTypedDict", +ConversationRequestToolTypedDict = TypeAliasType( + "ConversationRequestToolTypedDict", Union[ WebSearchToolTypedDict, WebSearchPremiumToolTypedDict, @@ -41,39 +40,43 @@ ) -Tools = Annotated[ +ConversationRequestTool = Annotated[ Union[ - Annotated[CodeInterpreterTool, Tag("code_interpreter")], - Annotated[DocumentLibraryTool, Tag("document_library")], - Annotated[FunctionTool, Tag("function")], - Annotated[ImageGenerationTool, Tag("image_generation")], - Annotated[WebSearchTool, Tag("web_search")], - Annotated[WebSearchPremiumTool, Tag("web_search_premium")], + CodeInterpreterTool, + DocumentLibraryTool, + FunctionTool, + ImageGenerationTool, + WebSearchTool, + WebSearchPremiumTool, ], - Discriminator(lambda m: get_discriminator(m, "type", "type")), + Field(discriminator="TYPE"), ] -AgentVersionTypedDict = TypeAliasType("AgentVersionTypedDict", Union[str, int]) +ConversationRequestAgentVersionTypedDict = TypeAliasType( + "ConversationRequestAgentVersionTypedDict", Union[str, int] +) -AgentVersion = TypeAliasType("AgentVersion", Union[str, int]) +ConversationRequestAgentVersion = TypeAliasType( + "ConversationRequestAgentVersion", Union[str, int] +) class ConversationRequestTypedDict(TypedDict): inputs: ConversationInputsTypedDict stream: NotRequired[bool] store: NotRequired[Nullable[bool]] - handoff_execution: NotRequired[Nullable[HandoffExecution]] + handoff_execution: NotRequired[Nullable[ConversationRequestHandoffExecution]] instructions: NotRequired[Nullable[str]] - tools: NotRequired[List[ToolsTypedDict]] + tools: NotRequired[List[ConversationRequestToolTypedDict]] r"""List of tools which are available to the model during the conversation.""" completion_args: NotRequired[Nullable[CompletionArgsTypedDict]] name: NotRequired[Nullable[str]] description: NotRequired[Nullable[str]] metadata: NotRequired[Nullable[Dict[str, Any]]] agent_id: NotRequired[Nullable[str]] - agent_version: NotRequired[Nullable[AgentVersionTypedDict]] + agent_version: NotRequired[Nullable[ConversationRequestAgentVersionTypedDict]] model: NotRequired[Nullable[str]] @@ -84,11 +87,11 @@ class ConversationRequest(BaseModel): store: OptionalNullable[bool] = UNSET - handoff_execution: OptionalNullable[HandoffExecution] = UNSET + handoff_execution: OptionalNullable[ConversationRequestHandoffExecution] = UNSET instructions: OptionalNullable[str] = UNSET - tools: Optional[List[Tools]] = None + tools: Optional[List[ConversationRequestTool]] = None r"""List of tools which are available to the model during the conversation.""" completion_args: OptionalNullable[CompletionArgs] = UNSET @@ -101,7 +104,7 @@ class ConversationRequest(BaseModel): agent_id: OptionalNullable[str] = UNSET - agent_version: OptionalNullable[AgentVersion] = UNSET + agent_version: OptionalNullable[ConversationRequestAgentVersion] = UNSET model: OptionalNullable[str] = UNSET diff --git a/src/mistralai/client/models/conversationresponse.py b/src/mistralai/client/models/conversationresponse.py index 32d0f28f..0a11fff8 100644 --- a/src/mistralai/client/models/conversationresponse.py +++ b/src/mistralai/client/models/conversationresponse.py @@ -14,8 +14,8 @@ ConversationResponseObject = Literal["conversation.response",] -OutputsTypedDict = TypeAliasType( - "OutputsTypedDict", +OutputTypedDict = TypeAliasType( + "OutputTypedDict", Union[ ToolExecutionEntryTypedDict, FunctionCallEntryTypedDict, @@ -25,8 +25,8 @@ ) -Outputs = TypeAliasType( - "Outputs", +Output = TypeAliasType( + "Output", Union[ToolExecutionEntry, FunctionCallEntry, MessageOutputEntry, AgentHandoffEntry], ) @@ -35,7 +35,7 @@ class ConversationResponseTypedDict(TypedDict): r"""The response after appending new entries to the conversation.""" conversation_id: str - outputs: List[OutputsTypedDict] + outputs: List[OutputTypedDict] usage: ConversationUsageInfoTypedDict object: NotRequired[ConversationResponseObject] @@ -45,7 +45,7 @@ class ConversationResponse(BaseModel): conversation_id: str - outputs: List[Outputs] + outputs: List[Output] usage: ConversationUsageInfo diff --git a/src/mistralai/client/models/conversationstreamrequest.py b/src/mistralai/client/models/conversationstreamrequest.py index 219230a2..9b8d0c44 100644 --- a/src/mistralai/client/models/conversationstreamrequest.py +++ b/src/mistralai/client/models/conversationstreamrequest.py @@ -16,8 +16,7 @@ UNSET, UNSET_SENTINEL, ) -from mistralai.client.utils import get_discriminator -from pydantic import Discriminator, Tag, model_serializer +from pydantic import Field, model_serializer from typing import Any, Dict, List, Literal, Optional, Union from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict @@ -28,8 +27,8 @@ ] -ConversationStreamRequestToolsTypedDict = TypeAliasType( - "ConversationStreamRequestToolsTypedDict", +ConversationStreamRequestToolTypedDict = TypeAliasType( + "ConversationStreamRequestToolTypedDict", Union[ WebSearchToolTypedDict, WebSearchPremiumToolTypedDict, @@ -41,16 +40,16 @@ ) -ConversationStreamRequestTools = Annotated[ +ConversationStreamRequestTool = Annotated[ Union[ - Annotated[CodeInterpreterTool, Tag("code_interpreter")], - Annotated[DocumentLibraryTool, Tag("document_library")], - Annotated[FunctionTool, Tag("function")], - Annotated[ImageGenerationTool, Tag("image_generation")], - Annotated[WebSearchTool, Tag("web_search")], - Annotated[WebSearchPremiumTool, Tag("web_search_premium")], + CodeInterpreterTool, + DocumentLibraryTool, + FunctionTool, + ImageGenerationTool, + WebSearchTool, + WebSearchPremiumTool, ], - Discriminator(lambda m: get_discriminator(m, "type", "type")), + Field(discriminator="TYPE"), ] @@ -70,7 +69,7 @@ class ConversationStreamRequestTypedDict(TypedDict): store: NotRequired[Nullable[bool]] handoff_execution: NotRequired[Nullable[ConversationStreamRequestHandoffExecution]] instructions: NotRequired[Nullable[str]] - tools: NotRequired[List[ConversationStreamRequestToolsTypedDict]] + tools: NotRequired[List[ConversationStreamRequestToolTypedDict]] r"""List of tools which are available to the model during the conversation.""" completion_args: NotRequired[Nullable[CompletionArgsTypedDict]] name: NotRequired[Nullable[str]] @@ -94,7 +93,7 @@ class ConversationStreamRequest(BaseModel): instructions: OptionalNullable[str] = UNSET - tools: Optional[List[ConversationStreamRequestTools]] = None + tools: Optional[List[ConversationStreamRequestTool]] = None r"""List of tools which are available to the model during the conversation.""" completion_args: OptionalNullable[CompletionArgs] = UNSET diff --git a/src/mistralai/client/models/deltamessage.py b/src/mistralai/client/models/deltamessage.py index 0ae56da8..fc08d62a 100644 --- a/src/mistralai/client/models/deltamessage.py +++ b/src/mistralai/client/models/deltamessage.py @@ -15,24 +15,26 @@ from typing_extensions import NotRequired, TypeAliasType, TypedDict -ContentTypedDict = TypeAliasType( - "ContentTypedDict", Union[str, List[ContentChunkTypedDict]] +DeltaMessageContentTypedDict = TypeAliasType( + "DeltaMessageContentTypedDict", Union[str, List[ContentChunkTypedDict]] ) -Content = TypeAliasType("Content", Union[str, List[ContentChunk]]) +DeltaMessageContent = TypeAliasType( + "DeltaMessageContent", Union[str, List[ContentChunk]] +) class DeltaMessageTypedDict(TypedDict): role: NotRequired[Nullable[str]] - content: NotRequired[Nullable[ContentTypedDict]] + content: NotRequired[Nullable[DeltaMessageContentTypedDict]] tool_calls: NotRequired[Nullable[List[ToolCallTypedDict]]] class DeltaMessage(BaseModel): role: OptionalNullable[str] = UNSET - content: OptionalNullable[Content] = UNSET + content: OptionalNullable[DeltaMessageContent] = UNSET tool_calls: OptionalNullable[List[ToolCall]] = UNSET diff --git a/src/mistralai/client/models/documentlibrarytool.py b/src/mistralai/client/models/documentlibrarytool.py index 861a58d3..21eab39e 100644 --- a/src/mistralai/client/models/documentlibrarytool.py +++ b/src/mistralai/client/models/documentlibrarytool.py @@ -2,21 +2,27 @@ from __future__ import annotations from mistralai.client.types import BaseModel -from typing import List, Literal, Optional -from typing_extensions import NotRequired, TypedDict - - -DocumentLibraryToolType = Literal["document_library",] +from mistralai.client.utils import validate_const +import pydantic +from pydantic.functional_validators import AfterValidator +from typing import List, Literal +from typing_extensions import Annotated, TypedDict class DocumentLibraryToolTypedDict(TypedDict): library_ids: List[str] r"""Ids of the library in which to search.""" - type: NotRequired[DocumentLibraryToolType] + type: Literal["document_library"] class DocumentLibraryTool(BaseModel): library_ids: List[str] r"""Ids of the library in which to search.""" - type: Optional[DocumentLibraryToolType] = "document_library" + TYPE: Annotated[ + Annotated[ + Literal["document_library"], + AfterValidator(validate_const("document_library")), + ], + pydantic.Field(alias="type"), + ] = "document_library" diff --git a/src/mistralai/client/models/files_api_routes_upload_fileop.py b/src/mistralai/client/models/files_api_routes_upload_fileop.py index 723c6cc2..ab2f1524 100644 --- a/src/mistralai/client/models/files_api_routes_upload_fileop.py +++ b/src/mistralai/client/models/files_api_routes_upload_fileop.py @@ -9,7 +9,7 @@ from typing_extensions import Annotated, NotRequired, TypedDict -class FilesAPIRoutesUploadFileMultiPartBodyParamsTypedDict(TypedDict): +class MultiPartBodyParamsTypedDict(TypedDict): file: FileTypedDict r"""The File object (not file name) to be uploaded. To upload a file and specify a custom file name you should format your request as such: @@ -24,7 +24,7 @@ class FilesAPIRoutesUploadFileMultiPartBodyParamsTypedDict(TypedDict): purpose: NotRequired[FilePurpose] -class FilesAPIRoutesUploadFileMultiPartBodyParams(BaseModel): +class MultiPartBodyParams(BaseModel): file: Annotated[File, FieldMetadata(multipart=MultipartFormMetadata(file=True))] r"""The File object (not file name) to be uploaded. To upload a file and specify a custom file name you should format your request as such: diff --git a/src/mistralai/client/models/ftclassifierlossfunction.py b/src/mistralai/client/models/ftclassifierlossfunction.py index c4ef66e0..e6781a5e 100644 --- a/src/mistralai/client/models/ftclassifierlossfunction.py +++ b/src/mistralai/client/models/ftclassifierlossfunction.py @@ -1,10 +1,14 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from __future__ import annotations -from typing import Literal +from mistralai.client.types import UnrecognizedStr +from typing import Literal, Union -FTClassifierLossFunction = Literal[ - "single_class", - "multi_class", +FTClassifierLossFunction = Union[ + Literal[ + "single_class", + "multi_class", + ], + UnrecognizedStr, ] diff --git a/src/mistralai/client/models/ftmodelcard.py b/src/mistralai/client/models/ftmodelcard.py index 36cb723d..06f088ec 100644 --- a/src/mistralai/client/models/ftmodelcard.py +++ b/src/mistralai/client/models/ftmodelcard.py @@ -18,9 +18,6 @@ from typing_extensions import Annotated, NotRequired, TypedDict -FTModelCardType = Literal["fine-tuned",] - - class FTModelCardTypedDict(TypedDict): r"""Extra fields for fine-tuned models.""" @@ -38,7 +35,7 @@ class FTModelCardTypedDict(TypedDict): deprecation: NotRequired[Nullable[datetime]] deprecation_replacement_model: NotRequired[Nullable[str]] default_model_temperature: NotRequired[Nullable[float]] - type: FTModelCardType + type: Literal["fine-tuned"] archived: NotRequired[bool] @@ -74,9 +71,7 @@ class FTModelCard(BaseModel): default_model_temperature: OptionalNullable[float] = UNSET TYPE: Annotated[ - Annotated[ - Optional[FTModelCardType], AfterValidator(validate_const("fine-tuned")) - ], + Annotated[Literal["fine-tuned"], AfterValidator(validate_const("fine-tuned"))], pydantic.Field(alias="type"), ] = "fine-tuned" @@ -95,7 +90,6 @@ def serialize_model(self, handler): "deprecation", "deprecation_replacement_model", "default_model_temperature", - "type", "archived", ] nullable_fields = [ diff --git a/src/mistralai/client/models/functioncallevent.py b/src/mistralai/client/models/functioncallevent.py index 4e040585..8146fa5c 100644 --- a/src/mistralai/client/models/functioncallevent.py +++ b/src/mistralai/client/models/functioncallevent.py @@ -3,11 +3,11 @@ from __future__ import annotations from datetime import datetime from mistralai.client.types import BaseModel +from mistralai.client.utils import validate_const +import pydantic +from pydantic.functional_validators import AfterValidator from typing import Literal, Optional -from typing_extensions import NotRequired, TypedDict - - -FunctionCallEventType = Literal["function.call.delta",] +from typing_extensions import Annotated, NotRequired, TypedDict class FunctionCallEventTypedDict(TypedDict): @@ -15,7 +15,7 @@ class FunctionCallEventTypedDict(TypedDict): name: str tool_call_id: str arguments: str - type: NotRequired[FunctionCallEventType] + type: Literal["function.call.delta"] created_at: NotRequired[datetime] output_index: NotRequired[int] @@ -29,7 +29,13 @@ class FunctionCallEvent(BaseModel): arguments: str - type: Optional[FunctionCallEventType] = "function.call.delta" + TYPE: Annotated[ + Annotated[ + Literal["function.call.delta"], + AfterValidator(validate_const("function.call.delta")), + ], + pydantic.Field(alias="type"), + ] = "function.call.delta" created_at: Optional[datetime] = None diff --git a/src/mistralai/client/models/functiontool.py b/src/mistralai/client/models/functiontool.py index 74b50d1b..16abcbf3 100644 --- a/src/mistralai/client/models/functiontool.py +++ b/src/mistralai/client/models/functiontool.py @@ -3,19 +3,22 @@ from __future__ import annotations from .function import Function, FunctionTypedDict from mistralai.client.types import BaseModel -from typing import Literal, Optional -from typing_extensions import NotRequired, TypedDict - - -FunctionToolType = Literal["function",] +from mistralai.client.utils import validate_const +import pydantic +from pydantic.functional_validators import AfterValidator +from typing import Literal +from typing_extensions import Annotated, TypedDict class FunctionToolTypedDict(TypedDict): function: FunctionTypedDict - type: NotRequired[FunctionToolType] + type: Literal["function"] class FunctionTool(BaseModel): function: Function - type: Optional[FunctionToolType] = "function" + TYPE: Annotated[ + Annotated[Literal["function"], AfterValidator(validate_const("function"))], + pydantic.Field(alias="type"), + ] = "function" diff --git a/src/mistralai/client/models/githubrepositoryin.py b/src/mistralai/client/models/githubrepositoryin.py index e56fef9b..4e4b4777 100644 --- a/src/mistralai/client/models/githubrepositoryin.py +++ b/src/mistralai/client/models/githubrepositoryin.py @@ -8,19 +8,19 @@ UNSET, UNSET_SENTINEL, ) +from mistralai.client.utils import validate_const +import pydantic from pydantic import model_serializer +from pydantic.functional_validators import AfterValidator from typing import Literal, Optional -from typing_extensions import NotRequired, TypedDict - - -GithubRepositoryInType = Literal["github",] +from typing_extensions import Annotated, NotRequired, TypedDict class GithubRepositoryInTypedDict(TypedDict): name: str owner: str token: str - type: NotRequired[GithubRepositoryInType] + type: Literal["github"] ref: NotRequired[Nullable[str]] weight: NotRequired[float] @@ -32,7 +32,10 @@ class GithubRepositoryIn(BaseModel): token: str - type: Optional[GithubRepositoryInType] = "github" + TYPE: Annotated[ + Annotated[Literal["github"], AfterValidator(validate_const("github"))], + pydantic.Field(alias="type"), + ] = "github" ref: OptionalNullable[str] = UNSET @@ -40,7 +43,7 @@ class GithubRepositoryIn(BaseModel): @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = ["type", "ref", "weight"] + optional_fields = ["ref", "weight"] nullable_fields = ["ref"] null_default_fields = [] diff --git a/src/mistralai/client/models/githubrepositoryout.py b/src/mistralai/client/models/githubrepositoryout.py index e3aa9ebc..1f738708 100644 --- a/src/mistralai/client/models/githubrepositoryout.py +++ b/src/mistralai/client/models/githubrepositoryout.py @@ -8,19 +8,19 @@ UNSET, UNSET_SENTINEL, ) +from mistralai.client.utils import validate_const +import pydantic from pydantic import model_serializer +from pydantic.functional_validators import AfterValidator from typing import Literal, Optional -from typing_extensions import NotRequired, TypedDict - - -GithubRepositoryOutType = Literal["github",] +from typing_extensions import Annotated, NotRequired, TypedDict class GithubRepositoryOutTypedDict(TypedDict): name: str owner: str commit_id: str - type: NotRequired[GithubRepositoryOutType] + type: Literal["github"] ref: NotRequired[Nullable[str]] weight: NotRequired[float] @@ -32,7 +32,10 @@ class GithubRepositoryOut(BaseModel): commit_id: str - type: Optional[GithubRepositoryOutType] = "github" + TYPE: Annotated[ + Annotated[Literal["github"], AfterValidator(validate_const("github"))], + pydantic.Field(alias="type"), + ] = "github" ref: OptionalNullable[str] = UNSET @@ -40,7 +43,7 @@ class GithubRepositoryOut(BaseModel): @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = ["type", "ref", "weight"] + optional_fields = ["ref", "weight"] nullable_fields = ["ref"] null_default_fields = [] diff --git a/src/mistralai/client/models/imagegenerationtool.py b/src/mistralai/client/models/imagegenerationtool.py index e09dba81..c5dbda3f 100644 --- a/src/mistralai/client/models/imagegenerationtool.py +++ b/src/mistralai/client/models/imagegenerationtool.py @@ -2,16 +2,22 @@ from __future__ import annotations from mistralai.client.types import BaseModel -from typing import Literal, Optional -from typing_extensions import NotRequired, TypedDict - - -ImageGenerationToolType = Literal["image_generation",] +from mistralai.client.utils import validate_const +import pydantic +from pydantic.functional_validators import AfterValidator +from typing import Literal +from typing_extensions import Annotated, TypedDict class ImageGenerationToolTypedDict(TypedDict): - type: NotRequired[ImageGenerationToolType] + type: Literal["image_generation"] class ImageGenerationTool(BaseModel): - type: Optional[ImageGenerationToolType] = "image_generation" + TYPE: Annotated[ + Annotated[ + Literal["image_generation"], + AfterValidator(validate_const("image_generation")), + ], + pydantic.Field(alias="type"), + ] = "image_generation" diff --git a/src/mistralai/client/models/imageurlchunk.py b/src/mistralai/client/models/imageurlchunk.py index f967a3c8..9968ed74 100644 --- a/src/mistralai/client/models/imageurlchunk.py +++ b/src/mistralai/client/models/imageurlchunk.py @@ -7,12 +7,12 @@ from typing_extensions import NotRequired, TypeAliasType, TypedDict -ImageURLChunkImageURLTypedDict = TypeAliasType( - "ImageURLChunkImageURLTypedDict", Union[ImageURLTypedDict, str] +ImageURLUnionTypedDict = TypeAliasType( + "ImageURLUnionTypedDict", Union[ImageURLTypedDict, str] ) -ImageURLChunkImageURL = TypeAliasType("ImageURLChunkImageURL", Union[ImageURL, str]) +ImageURLUnion = TypeAliasType("ImageURLUnion", Union[ImageURL, str]) ImageURLChunkType = Literal["image_url",] @@ -21,13 +21,13 @@ class ImageURLChunkTypedDict(TypedDict): r"""{\"type\":\"image_url\",\"image_url\":{\"url\":\"data:image/png;base64,iVBORw0""" - image_url: ImageURLChunkImageURLTypedDict + image_url: ImageURLUnionTypedDict type: NotRequired[ImageURLChunkType] class ImageURLChunk(BaseModel): r"""{\"type\":\"image_url\",\"image_url\":{\"url\":\"data:image/png;base64,iVBORw0""" - image_url: ImageURLChunkImageURL + image_url: ImageURLUnion type: Optional[ImageURLChunkType] = "image_url" diff --git a/src/mistralai/client/models/inputs.py b/src/mistralai/client/models/inputs.py index fb067476..2b8b2f5f 100644 --- a/src/mistralai/client/models/inputs.py +++ b/src/mistralai/client/models/inputs.py @@ -13,8 +13,8 @@ from typing_extensions import Annotated, TypeAliasType, TypedDict -InstructRequestInputsMessagesTypedDict = TypeAliasType( - "InstructRequestInputsMessagesTypedDict", +InputsMessageTypedDict = TypeAliasType( + "InputsMessageTypedDict", Union[ SystemMessageTypedDict, UserMessageTypedDict, @@ -24,7 +24,7 @@ ) -InstructRequestInputsMessages = Annotated[ +InputsMessage = Annotated[ Union[ Annotated[AssistantMessage, Tag("assistant")], Annotated[SystemMessage, Tag("system")], @@ -36,11 +36,11 @@ class InstructRequestInputsTypedDict(TypedDict): - messages: List[InstructRequestInputsMessagesTypedDict] + messages: List[InputsMessageTypedDict] class InstructRequestInputs(BaseModel): - messages: List[InstructRequestInputsMessages] + messages: List[InputsMessage] InputsTypedDict = TypeAliasType( diff --git a/src/mistralai/client/models/instructrequest.py b/src/mistralai/client/models/instructrequest.py index 1b2f2693..73d482d8 100644 --- a/src/mistralai/client/models/instructrequest.py +++ b/src/mistralai/client/models/instructrequest.py @@ -12,8 +12,8 @@ from typing_extensions import Annotated, TypeAliasType, TypedDict -InstructRequestMessagesTypedDict = TypeAliasType( - "InstructRequestMessagesTypedDict", +InstructRequestMessageTypedDict = TypeAliasType( + "InstructRequestMessageTypedDict", Union[ SystemMessageTypedDict, UserMessageTypedDict, @@ -23,7 +23,7 @@ ) -InstructRequestMessages = Annotated[ +InstructRequestMessage = Annotated[ Union[ Annotated[AssistantMessage, Tag("assistant")], Annotated[SystemMessage, Tag("system")], @@ -35,8 +35,8 @@ class InstructRequestTypedDict(TypedDict): - messages: List[InstructRequestMessagesTypedDict] + messages: List[InstructRequestMessageTypedDict] class InstructRequest(BaseModel): - messages: List[InstructRequestMessages] + messages: List[InstructRequestMessage] diff --git a/src/mistralai/client/models/jobin.py b/src/mistralai/client/models/jobin.py index dc7684fc..23a431c9 100644 --- a/src/mistralai/client/models/jobin.py +++ b/src/mistralai/client/models/jobin.py @@ -26,10 +26,10 @@ from typing_extensions import NotRequired, TypeAliasType, TypedDict -JobInIntegrationsTypedDict = WandbIntegrationTypedDict +JobInIntegrationTypedDict = WandbIntegrationTypedDict -JobInIntegrations = WandbIntegration +JobInIntegration = WandbIntegration HyperparametersTypedDict = TypeAliasType( @@ -46,10 +46,10 @@ ) -JobInRepositoriesTypedDict = GithubRepositoryInTypedDict +JobInRepositoryTypedDict = GithubRepositoryInTypedDict -JobInRepositories = GithubRepositoryIn +JobInRepository = GithubRepositoryIn class JobInTypedDict(TypedDict): @@ -61,13 +61,13 @@ class JobInTypedDict(TypedDict): r"""A list containing the IDs of uploaded files that contain validation data. If you provide these files, the data is used to generate validation metrics periodically during fine-tuning. These metrics can be viewed in `checkpoints` when getting the status of a running fine-tuning job. The same data should not be present in both train and validation files.""" suffix: NotRequired[Nullable[str]] r"""A string that will be added to your fine-tuning model name. For example, a suffix of \"my-great-model\" would produce a model name like `ft:open-mistral-7b:my-great-model:xxx...`""" - integrations: NotRequired[Nullable[List[JobInIntegrationsTypedDict]]] + integrations: NotRequired[Nullable[List[JobInIntegrationTypedDict]]] r"""A list of integrations to enable for your fine-tuning job.""" auto_start: NotRequired[bool] r"""This field will be required in a future release.""" invalid_sample_skip_percentage: NotRequired[float] job_type: NotRequired[Nullable[FineTuneableModelType]] - repositories: NotRequired[Nullable[List[JobInRepositoriesTypedDict]]] + repositories: NotRequired[Nullable[List[JobInRepositoryTypedDict]]] classifier_targets: NotRequired[Nullable[List[ClassifierTargetInTypedDict]]] @@ -85,7 +85,7 @@ class JobIn(BaseModel): suffix: OptionalNullable[str] = UNSET r"""A string that will be added to your fine-tuning model name. For example, a suffix of \"my-great-model\" would produce a model name like `ft:open-mistral-7b:my-great-model:xxx...`""" - integrations: OptionalNullable[List[JobInIntegrations]] = UNSET + integrations: OptionalNullable[List[JobInIntegration]] = UNSET r"""A list of integrations to enable for your fine-tuning job.""" auto_start: Optional[bool] = None @@ -95,7 +95,7 @@ class JobIn(BaseModel): job_type: OptionalNullable[FineTuneableModelType] = UNSET - repositories: OptionalNullable[List[JobInRepositories]] = UNSET + repositories: OptionalNullable[List[JobInRepository]] = UNSET classifier_targets: OptionalNullable[List[ClassifierTargetIn]] = UNSET diff --git a/src/mistralai/client/models/jobs_api_routes_fine_tuning_cancel_fine_tuning_jobop.py b/src/mistralai/client/models/jobs_api_routes_fine_tuning_cancel_fine_tuning_jobop.py index b36d3c3e..5d9c026b 100644 --- a/src/mistralai/client/models/jobs_api_routes_fine_tuning_cancel_fine_tuning_jobop.py +++ b/src/mistralai/client/models/jobs_api_routes_fine_tuning_cancel_fine_tuning_jobop.py @@ -10,8 +10,8 @@ CompletionDetailedJobOutTypedDict, ) from mistralai.client.types import BaseModel -from mistralai.client.utils import FieldMetadata, PathParamMetadata, get_discriminator -from pydantic import Discriminator, Tag +from mistralai.client.utils import FieldMetadata, PathParamMetadata +from pydantic import Field from typing import Union from typing_extensions import Annotated, TypeAliasType, TypedDict @@ -36,10 +36,7 @@ class JobsAPIRoutesFineTuningCancelFineTuningJobRequest(BaseModel): JobsAPIRoutesFineTuningCancelFineTuningJobResponse = Annotated[ - Union[ - Annotated[ClassifierDetailedJobOut, Tag("classifier")], - Annotated[CompletionDetailedJobOut, Tag("completion")], - ], - Discriminator(lambda m: get_discriminator(m, "job_type", "job_type")), + Union[ClassifierDetailedJobOut, CompletionDetailedJobOut], + Field(discriminator="JOB_TYPE"), ] r"""OK""" diff --git a/src/mistralai/client/models/jobs_api_routes_fine_tuning_create_fine_tuning_jobop.py b/src/mistralai/client/models/jobs_api_routes_fine_tuning_create_fine_tuning_jobop.py index ece0d15a..c54aaa5e 100644 --- a/src/mistralai/client/models/jobs_api_routes_fine_tuning_create_fine_tuning_jobop.py +++ b/src/mistralai/client/models/jobs_api_routes_fine_tuning_create_fine_tuning_jobop.py @@ -4,35 +4,30 @@ from .classifierjobout import ClassifierJobOut, ClassifierJobOutTypedDict from .completionjobout import CompletionJobOut, CompletionJobOutTypedDict from .legacyjobmetadataout import LegacyJobMetadataOut, LegacyJobMetadataOutTypedDict -from mistralai.client.utils import get_discriminator -from pydantic import Discriminator, Tag +from pydantic import Field from typing import Union from typing_extensions import Annotated, TypeAliasType -Response1TypedDict = TypeAliasType( - "Response1TypedDict", Union[ClassifierJobOutTypedDict, CompletionJobOutTypedDict] +ResponseTypedDict = TypeAliasType( + "ResponseTypedDict", Union[ClassifierJobOutTypedDict, CompletionJobOutTypedDict] ) -Response1 = Annotated[ - Union[ - Annotated[ClassifierJobOut, Tag("classifier")], - Annotated[CompletionJobOut, Tag("completion")], - ], - Discriminator(lambda m: get_discriminator(m, "job_type", "job_type")), +Response = Annotated[ + Union[ClassifierJobOut, CompletionJobOut], Field(discriminator="JOB_TYPE") ] JobsAPIRoutesFineTuningCreateFineTuningJobResponseTypedDict = TypeAliasType( "JobsAPIRoutesFineTuningCreateFineTuningJobResponseTypedDict", - Union[LegacyJobMetadataOutTypedDict, Response1TypedDict], + Union[LegacyJobMetadataOutTypedDict, ResponseTypedDict], ) r"""OK""" JobsAPIRoutesFineTuningCreateFineTuningJobResponse = TypeAliasType( "JobsAPIRoutesFineTuningCreateFineTuningJobResponse", - Union[LegacyJobMetadataOut, Response1], + Union[LegacyJobMetadataOut, Response], ) r"""OK""" diff --git a/src/mistralai/client/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobop.py b/src/mistralai/client/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobop.py index aa5a2609..8837d262 100644 --- a/src/mistralai/client/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobop.py +++ b/src/mistralai/client/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobop.py @@ -10,8 +10,8 @@ CompletionDetailedJobOutTypedDict, ) from mistralai.client.types import BaseModel -from mistralai.client.utils import FieldMetadata, PathParamMetadata, get_discriminator -from pydantic import Discriminator, Tag +from mistralai.client.utils import FieldMetadata, PathParamMetadata +from pydantic import Field from typing import Union from typing_extensions import Annotated, TypeAliasType, TypedDict @@ -36,10 +36,7 @@ class JobsAPIRoutesFineTuningGetFineTuningJobRequest(BaseModel): JobsAPIRoutesFineTuningGetFineTuningJobResponse = Annotated[ - Union[ - Annotated[ClassifierDetailedJobOut, Tag("classifier")], - Annotated[CompletionDetailedJobOut, Tag("completion")], - ], - Discriminator(lambda m: get_discriminator(m, "job_type", "job_type")), + Union[ClassifierDetailedJobOut, CompletionDetailedJobOut], + Field(discriminator="JOB_TYPE"), ] r"""OK""" diff --git a/src/mistralai/client/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobsop.py b/src/mistralai/client/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobsop.py index 7e399b31..8c19bacb 100644 --- a/src/mistralai/client/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobsop.py +++ b/src/mistralai/client/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobsop.py @@ -15,7 +15,7 @@ from typing_extensions import Annotated, NotRequired, TypedDict -QueryParamStatus = Literal[ +JobsAPIRoutesFineTuningGetFineTuningJobsStatus = Literal[ "QUEUED", "STARTED", "VALIDATING", @@ -42,7 +42,7 @@ class JobsAPIRoutesFineTuningGetFineTuningJobsRequestTypedDict(TypedDict): created_before: NotRequired[Nullable[datetime]] created_by_me: NotRequired[bool] r"""When set, only return results for jobs created by the API caller. Other results are not displayed.""" - status: NotRequired[Nullable[QueryParamStatus]] + status: NotRequired[Nullable[JobsAPIRoutesFineTuningGetFineTuningJobsStatus]] r"""The current job state to filter on. When set, the other results are not displayed.""" wandb_project: NotRequired[Nullable[str]] r"""The Weights and Biases project to filter on. When set, the other results are not displayed.""" @@ -89,7 +89,7 @@ class JobsAPIRoutesFineTuningGetFineTuningJobsRequest(BaseModel): r"""When set, only return results for jobs created by the API caller. Other results are not displayed.""" status: Annotated[ - OptionalNullable[QueryParamStatus], + OptionalNullable[JobsAPIRoutesFineTuningGetFineTuningJobsStatus], FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), ] = UNSET r"""The current job state to filter on. When set, the other results are not displayed.""" diff --git a/src/mistralai/client/models/jobs_api_routes_fine_tuning_start_fine_tuning_jobop.py b/src/mistralai/client/models/jobs_api_routes_fine_tuning_start_fine_tuning_jobop.py index ed5938b0..91d581eb 100644 --- a/src/mistralai/client/models/jobs_api_routes_fine_tuning_start_fine_tuning_jobop.py +++ b/src/mistralai/client/models/jobs_api_routes_fine_tuning_start_fine_tuning_jobop.py @@ -10,8 +10,8 @@ CompletionDetailedJobOutTypedDict, ) from mistralai.client.types import BaseModel -from mistralai.client.utils import FieldMetadata, PathParamMetadata, get_discriminator -from pydantic import Discriminator, Tag +from mistralai.client.utils import FieldMetadata, PathParamMetadata +from pydantic import Field from typing import Union from typing_extensions import Annotated, TypeAliasType, TypedDict @@ -34,10 +34,7 @@ class JobsAPIRoutesFineTuningStartFineTuningJobRequest(BaseModel): JobsAPIRoutesFineTuningStartFineTuningJobResponse = Annotated[ - Union[ - Annotated[ClassifierDetailedJobOut, Tag("classifier")], - Annotated[CompletionDetailedJobOut, Tag("completion")], - ], - Discriminator(lambda m: get_discriminator(m, "job_type", "job_type")), + Union[ClassifierDetailedJobOut, CompletionDetailedJobOut], + Field(discriminator="JOB_TYPE"), ] r"""OK""" diff --git a/src/mistralai/client/models/jobs_api_routes_fine_tuning_update_fine_tuned_modelop.py b/src/mistralai/client/models/jobs_api_routes_fine_tuning_update_fine_tuned_modelop.py index a2b70b37..760c22f4 100644 --- a/src/mistralai/client/models/jobs_api_routes_fine_tuning_update_fine_tuned_modelop.py +++ b/src/mistralai/client/models/jobs_api_routes_fine_tuning_update_fine_tuned_modelop.py @@ -5,13 +5,8 @@ from .completionftmodelout import CompletionFTModelOut, CompletionFTModelOutTypedDict from .updateftmodelin import UpdateFTModelIn, UpdateFTModelInTypedDict from mistralai.client.types import BaseModel -from mistralai.client.utils import ( - FieldMetadata, - PathParamMetadata, - RequestMetadata, - get_discriminator, -) -from pydantic import Discriminator, Tag +from mistralai.client.utils import FieldMetadata, PathParamMetadata, RequestMetadata +from pydantic import Field from typing import Union from typing_extensions import Annotated, TypeAliasType, TypedDict @@ -42,10 +37,6 @@ class JobsAPIRoutesFineTuningUpdateFineTunedModelRequest(BaseModel): JobsAPIRoutesFineTuningUpdateFineTunedModelResponse = Annotated[ - Union[ - Annotated[ClassifierFTModelOut, Tag("classifier")], - Annotated[CompletionFTModelOut, Tag("completion")], - ], - Discriminator(lambda m: get_discriminator(m, "model_type", "model_type")), + Union[ClassifierFTModelOut, CompletionFTModelOut], Field(discriminator="MODEL_TYPE") ] r"""OK""" diff --git a/src/mistralai/client/models/jobsout.py b/src/mistralai/client/models/jobsout.py index 9087704f..7727d56c 100644 --- a/src/mistralai/client/models/jobsout.py +++ b/src/mistralai/client/models/jobsout.py @@ -4,8 +4,7 @@ from .classifierjobout import ClassifierJobOut, ClassifierJobOutTypedDict from .completionjobout import CompletionJobOut, CompletionJobOutTypedDict from mistralai.client.types import BaseModel -from mistralai.client.utils import get_discriminator -from pydantic import Discriminator, Tag +from pydantic import Field from typing import List, Literal, Optional, Union from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict @@ -16,11 +15,7 @@ JobsOutData = Annotated[ - Union[ - Annotated[ClassifierJobOut, Tag("classifier")], - Annotated[CompletionJobOut, Tag("completion")], - ], - Discriminator(lambda m: get_discriminator(m, "job_type", "job_type")), + Union[ClassifierJobOut, CompletionJobOut], Field(discriminator="JOB_TYPE") ] diff --git a/src/mistralai/client/models/libraries_documents_upload_v1op.py b/src/mistralai/client/models/libraries_documents_upload_v1op.py index e2d59d9f..18a5b780 100644 --- a/src/mistralai/client/models/libraries_documents_upload_v1op.py +++ b/src/mistralai/client/models/libraries_documents_upload_v1op.py @@ -12,7 +12,7 @@ from typing_extensions import Annotated, TypedDict -class LibrariesDocumentsUploadV1DocumentUploadTypedDict(TypedDict): +class DocumentUploadTypedDict(TypedDict): file: FileTypedDict r"""The File object (not file name) to be uploaded. To upload a file and specify a custom file name you should format your request as such: @@ -26,7 +26,7 @@ class LibrariesDocumentsUploadV1DocumentUploadTypedDict(TypedDict): """ -class LibrariesDocumentsUploadV1DocumentUpload(BaseModel): +class DocumentUpload(BaseModel): file: Annotated[File, FieldMetadata(multipart=MultipartFormMetadata(file=True))] r"""The File object (not file name) to be uploaded. To upload a file and specify a custom file name you should format your request as such: @@ -42,7 +42,7 @@ class LibrariesDocumentsUploadV1DocumentUpload(BaseModel): class LibrariesDocumentsUploadV1RequestTypedDict(TypedDict): library_id: str - request_body: LibrariesDocumentsUploadV1DocumentUploadTypedDict + request_body: DocumentUploadTypedDict class LibrariesDocumentsUploadV1Request(BaseModel): @@ -51,6 +51,6 @@ class LibrariesDocumentsUploadV1Request(BaseModel): ] request_body: Annotated[ - LibrariesDocumentsUploadV1DocumentUpload, + DocumentUpload, FieldMetadata(request=RequestMetadata(media_type="multipart/form-data")), ] diff --git a/src/mistralai/client/models/messageinputentry.py b/src/mistralai/client/models/messageinputentry.py index 12a31097..a72319cf 100644 --- a/src/mistralai/client/models/messageinputentry.py +++ b/src/mistralai/client/models/messageinputentry.py @@ -12,21 +12,25 @@ OptionalNullable, UNSET, UNSET_SENTINEL, + UnrecognizedStr, ) from pydantic import model_serializer from typing import List, Literal, Optional, Union from typing_extensions import NotRequired, TypeAliasType, TypedDict -Object = Literal["entry",] +MessageInputEntryObject = Literal["entry",] MessageInputEntryType = Literal["message.input",] -MessageInputEntryRole = Literal[ - "assistant", - "user", +MessageInputEntryRole = Union[ + Literal[ + "assistant", + "user", + ], + UnrecognizedStr, ] @@ -46,7 +50,7 @@ class MessageInputEntryTypedDict(TypedDict): role: MessageInputEntryRole content: MessageInputEntryContentTypedDict - object: NotRequired[Object] + object: NotRequired[MessageInputEntryObject] type: NotRequired[MessageInputEntryType] created_at: NotRequired[datetime] completed_at: NotRequired[Nullable[datetime]] @@ -61,7 +65,7 @@ class MessageInputEntry(BaseModel): content: MessageInputEntryContent - object: Optional[Object] = "entry" + object: Optional[MessageInputEntryObject] = "entry" type: Optional[MessageInputEntryType] = "message.input" diff --git a/src/mistralai/client/models/messageoutputevent.py b/src/mistralai/client/models/messageoutputevent.py index 3db7f5a0..447e3867 100644 --- a/src/mistralai/client/models/messageoutputevent.py +++ b/src/mistralai/client/models/messageoutputevent.py @@ -10,12 +10,12 @@ UNSET, UNSET_SENTINEL, ) +from mistralai.client.utils import validate_const +import pydantic from pydantic import model_serializer +from pydantic.functional_validators import AfterValidator from typing import Literal, Optional, Union -from typing_extensions import NotRequired, TypeAliasType, TypedDict - - -MessageOutputEventType = Literal["message.output.delta",] +from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict MessageOutputEventRole = Literal["assistant",] @@ -34,7 +34,7 @@ class MessageOutputEventTypedDict(TypedDict): id: str content: MessageOutputEventContentTypedDict - type: NotRequired[MessageOutputEventType] + type: Literal["message.output.delta"] created_at: NotRequired[datetime] output_index: NotRequired[int] content_index: NotRequired[int] @@ -48,7 +48,13 @@ class MessageOutputEvent(BaseModel): content: MessageOutputEventContent - type: Optional[MessageOutputEventType] = "message.output.delta" + TYPE: Annotated[ + Annotated[ + Literal["message.output.delta"], + AfterValidator(validate_const("message.output.delta")), + ], + pydantic.Field(alias="type"), + ] = "message.output.delta" created_at: Optional[datetime] = None @@ -65,7 +71,6 @@ class MessageOutputEvent(BaseModel): @model_serializer(mode="wrap") def serialize_model(self, handler): optional_fields = [ - "type", "created_at", "output_index", "content_index", diff --git a/src/mistralai/client/models/modelconversation.py b/src/mistralai/client/models/modelconversation.py index 574f053d..d348072a 100644 --- a/src/mistralai/client/models/modelconversation.py +++ b/src/mistralai/client/models/modelconversation.py @@ -16,14 +16,13 @@ UNSET, UNSET_SENTINEL, ) -from mistralai.client.utils import get_discriminator -from pydantic import Discriminator, Tag, model_serializer +from pydantic import Field, model_serializer from typing import Any, Dict, List, Literal, Optional, Union from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict -ModelConversationToolsTypedDict = TypeAliasType( - "ModelConversationToolsTypedDict", +ModelConversationToolTypedDict = TypeAliasType( + "ModelConversationToolTypedDict", Union[ WebSearchToolTypedDict, WebSearchPremiumToolTypedDict, @@ -35,16 +34,16 @@ ) -ModelConversationTools = Annotated[ +ModelConversationTool = Annotated[ Union[ - Annotated[CodeInterpreterTool, Tag("code_interpreter")], - Annotated[DocumentLibraryTool, Tag("document_library")], - Annotated[FunctionTool, Tag("function")], - Annotated[ImageGenerationTool, Tag("image_generation")], - Annotated[WebSearchTool, Tag("web_search")], - Annotated[WebSearchPremiumTool, Tag("web_search_premium")], + CodeInterpreterTool, + DocumentLibraryTool, + FunctionTool, + ImageGenerationTool, + WebSearchTool, + WebSearchPremiumTool, ], - Discriminator(lambda m: get_discriminator(m, "type", "type")), + Field(discriminator="TYPE"), ] @@ -58,7 +57,7 @@ class ModelConversationTypedDict(TypedDict): model: str instructions: NotRequired[Nullable[str]] r"""Instruction prompt the model will follow during the conversation.""" - tools: NotRequired[List[ModelConversationToolsTypedDict]] + tools: NotRequired[List[ModelConversationToolTypedDict]] r"""List of tools which are available to the model during the conversation.""" completion_args: NotRequired[CompletionArgsTypedDict] r"""White-listed arguments from the completion API""" @@ -83,7 +82,7 @@ class ModelConversation(BaseModel): instructions: OptionalNullable[str] = UNSET r"""Instruction prompt the model will follow during the conversation.""" - tools: Optional[List[ModelConversationTools]] = None + tools: Optional[List[ModelConversationTool]] = None r"""List of tools which are available to the model during the conversation.""" completion_args: Optional[CompletionArgs] = None diff --git a/src/mistralai/client/models/modellist.py b/src/mistralai/client/models/modellist.py index 6a5209fa..b357ae84 100644 --- a/src/mistralai/client/models/modellist.py +++ b/src/mistralai/client/models/modellist.py @@ -4,31 +4,27 @@ from .basemodelcard import BaseModelCard, BaseModelCardTypedDict from .ftmodelcard import FTModelCard, FTModelCardTypedDict from mistralai.client.types import BaseModel -from mistralai.client.utils import get_discriminator -from pydantic import Discriminator, Tag +from pydantic import Field from typing import List, Optional, Union from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict -DataTypedDict = TypeAliasType( - "DataTypedDict", Union[BaseModelCardTypedDict, FTModelCardTypedDict] +ModelListDataTypedDict = TypeAliasType( + "ModelListDataTypedDict", Union[BaseModelCardTypedDict, FTModelCardTypedDict] ) -Data = Annotated[ - Union[ - Annotated[BaseModelCard, Tag("base")], Annotated[FTModelCard, Tag("fine-tuned")] - ], - Discriminator(lambda m: get_discriminator(m, "type", "type")), +ModelListData = Annotated[ + Union[BaseModelCard, FTModelCard], Field(discriminator="TYPE") ] class ModelListTypedDict(TypedDict): object: NotRequired[str] - data: NotRequired[List[DataTypedDict]] + data: NotRequired[List[ModelListDataTypedDict]] class ModelList(BaseModel): object: Optional[str] = "list" - data: Optional[List[Data]] = None + data: Optional[List[ModelListData]] = None diff --git a/src/mistralai/client/models/ocrtableobject.py b/src/mistralai/client/models/ocrtableobject.py index 0c9091de..f3b0bc45 100644 --- a/src/mistralai/client/models/ocrtableobject.py +++ b/src/mistralai/client/models/ocrtableobject.py @@ -1,15 +1,18 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from __future__ import annotations -from mistralai.client.types import BaseModel +from mistralai.client.types import BaseModel, UnrecognizedStr import pydantic -from typing import Literal +from typing import Literal, Union from typing_extensions import Annotated, TypedDict -Format = Literal[ - "markdown", - "html", +Format = Union[ + Literal[ + "markdown", + "html", + ], + UnrecognizedStr, ] r"""Format of the table""" diff --git a/src/mistralai/client/models/realtimetranscriptionerrordetail.py b/src/mistralai/client/models/realtimetranscriptionerrordetail.py index 27bb8d87..e1f48379 100644 --- a/src/mistralai/client/models/realtimetranscriptionerrordetail.py +++ b/src/mistralai/client/models/realtimetranscriptionerrordetail.py @@ -6,23 +6,27 @@ from typing_extensions import TypeAliasType, TypedDict -MessageTypedDict = TypeAliasType("MessageTypedDict", Union[str, Dict[str, Any]]) +RealtimeTranscriptionErrorDetailMessageTypedDict = TypeAliasType( + "RealtimeTranscriptionErrorDetailMessageTypedDict", Union[str, Dict[str, Any]] +) r"""Human-readable error message.""" -Message = TypeAliasType("Message", Union[str, Dict[str, Any]]) +RealtimeTranscriptionErrorDetailMessage = TypeAliasType( + "RealtimeTranscriptionErrorDetailMessage", Union[str, Dict[str, Any]] +) r"""Human-readable error message.""" class RealtimeTranscriptionErrorDetailTypedDict(TypedDict): - message: MessageTypedDict + message: RealtimeTranscriptionErrorDetailMessageTypedDict r"""Human-readable error message.""" code: int r"""Internal error code for debugging.""" class RealtimeTranscriptionErrorDetail(BaseModel): - message: Message + message: RealtimeTranscriptionErrorDetailMessage r"""Human-readable error message.""" code: int diff --git a/src/mistralai/client/models/responsedoneevent.py b/src/mistralai/client/models/responsedoneevent.py index 54056256..283baa11 100644 --- a/src/mistralai/client/models/responsedoneevent.py +++ b/src/mistralai/client/models/responsedoneevent.py @@ -4,22 +4,28 @@ from .conversationusageinfo import ConversationUsageInfo, ConversationUsageInfoTypedDict from datetime import datetime from mistralai.client.types import BaseModel +from mistralai.client.utils import validate_const +import pydantic +from pydantic.functional_validators import AfterValidator from typing import Literal, Optional -from typing_extensions import NotRequired, TypedDict - - -ResponseDoneEventType = Literal["conversation.response.done",] +from typing_extensions import Annotated, NotRequired, TypedDict class ResponseDoneEventTypedDict(TypedDict): usage: ConversationUsageInfoTypedDict - type: NotRequired[ResponseDoneEventType] + type: Literal["conversation.response.done"] created_at: NotRequired[datetime] class ResponseDoneEvent(BaseModel): usage: ConversationUsageInfo - type: Optional[ResponseDoneEventType] = "conversation.response.done" + TYPE: Annotated[ + Annotated[ + Literal["conversation.response.done"], + AfterValidator(validate_const("conversation.response.done")), + ], + pydantic.Field(alias="type"), + ] = "conversation.response.done" created_at: Optional[datetime] = None diff --git a/src/mistralai/client/models/responseerrorevent.py b/src/mistralai/client/models/responseerrorevent.py index c9ef95a0..ee078963 100644 --- a/src/mistralai/client/models/responseerrorevent.py +++ b/src/mistralai/client/models/responseerrorevent.py @@ -3,17 +3,17 @@ from __future__ import annotations from datetime import datetime from mistralai.client.types import BaseModel +from mistralai.client.utils import validate_const +import pydantic +from pydantic.functional_validators import AfterValidator from typing import Literal, Optional -from typing_extensions import NotRequired, TypedDict - - -ResponseErrorEventType = Literal["conversation.response.error",] +from typing_extensions import Annotated, NotRequired, TypedDict class ResponseErrorEventTypedDict(TypedDict): message: str code: int - type: NotRequired[ResponseErrorEventType] + type: Literal["conversation.response.error"] created_at: NotRequired[datetime] @@ -22,6 +22,12 @@ class ResponseErrorEvent(BaseModel): code: int - type: Optional[ResponseErrorEventType] = "conversation.response.error" + TYPE: Annotated[ + Annotated[ + Literal["conversation.response.error"], + AfterValidator(validate_const("conversation.response.error")), + ], + pydantic.Field(alias="type"), + ] = "conversation.response.error" created_at: Optional[datetime] = None diff --git a/src/mistralai/client/models/responseformats.py b/src/mistralai/client/models/responseformats.py index cbf83ce7..b98cd098 100644 --- a/src/mistralai/client/models/responseformats.py +++ b/src/mistralai/client/models/responseformats.py @@ -1,11 +1,15 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from __future__ import annotations -from typing import Literal +from mistralai.client.types import UnrecognizedStr +from typing import Literal, Union -ResponseFormats = Literal[ - "text", - "json_object", - "json_schema", +ResponseFormats = Union[ + Literal[ + "text", + "json_object", + "json_schema", + ], + UnrecognizedStr, ] diff --git a/src/mistralai/client/models/responsestartedevent.py b/src/mistralai/client/models/responsestartedevent.py index dc6a10f9..0841fd58 100644 --- a/src/mistralai/client/models/responsestartedevent.py +++ b/src/mistralai/client/models/responsestartedevent.py @@ -3,22 +3,28 @@ from __future__ import annotations from datetime import datetime from mistralai.client.types import BaseModel +from mistralai.client.utils import validate_const +import pydantic +from pydantic.functional_validators import AfterValidator from typing import Literal, Optional -from typing_extensions import NotRequired, TypedDict - - -ResponseStartedEventType = Literal["conversation.response.started",] +from typing_extensions import Annotated, NotRequired, TypedDict class ResponseStartedEventTypedDict(TypedDict): conversation_id: str - type: NotRequired[ResponseStartedEventType] + type: Literal["conversation.response.started"] created_at: NotRequired[datetime] class ResponseStartedEvent(BaseModel): conversation_id: str - type: Optional[ResponseStartedEventType] = "conversation.response.started" + TYPE: Annotated[ + Annotated[ + Literal["conversation.response.started"], + AfterValidator(validate_const("conversation.response.started")), + ], + pydantic.Field(alias="type"), + ] = "conversation.response.started" created_at: Optional[datetime] = None diff --git a/src/mistralai/client/models/retrieve_model_v1_models_model_id_getop.py b/src/mistralai/client/models/retrieve_model_v1_models_model_id_getop.py index 7fdcd37d..96e5b57f 100644 --- a/src/mistralai/client/models/retrieve_model_v1_models_model_id_getop.py +++ b/src/mistralai/client/models/retrieve_model_v1_models_model_id_getop.py @@ -4,8 +4,8 @@ from .basemodelcard import BaseModelCard, BaseModelCardTypedDict from .ftmodelcard import FTModelCard, FTModelCardTypedDict from mistralai.client.types import BaseModel -from mistralai.client.utils import FieldMetadata, PathParamMetadata, get_discriminator -from pydantic import Discriminator, Tag +from mistralai.client.utils import FieldMetadata, PathParamMetadata +from pydantic import Field from typing import Union from typing_extensions import Annotated, TypeAliasType, TypedDict @@ -22,17 +22,14 @@ class RetrieveModelV1ModelsModelIDGetRequest(BaseModel): r"""The ID of the model to retrieve.""" -RetrieveModelV1ModelsModelIDGetResponseRetrieveModelV1ModelsModelIDGetTypedDict = TypeAliasType( - "RetrieveModelV1ModelsModelIDGetResponseRetrieveModelV1ModelsModelIDGetTypedDict", +ResponseRetrieveModelV1ModelsModelIDGetTypedDict = TypeAliasType( + "ResponseRetrieveModelV1ModelsModelIDGetTypedDict", Union[BaseModelCardTypedDict, FTModelCardTypedDict], ) r"""Successful Response""" -RetrieveModelV1ModelsModelIDGetResponseRetrieveModelV1ModelsModelIDGet = Annotated[ - Union[ - Annotated[BaseModelCard, Tag("base")], Annotated[FTModelCard, Tag("fine-tuned")] - ], - Discriminator(lambda m: get_discriminator(m, "type", "type")), +ResponseRetrieveModelV1ModelsModelIDGet = Annotated[ + Union[BaseModelCard, FTModelCard], Field(discriminator="TYPE") ] r"""Successful Response""" diff --git a/src/mistralai/client/models/ssetypes.py b/src/mistralai/client/models/ssetypes.py index 796f0327..ac2722f1 100644 --- a/src/mistralai/client/models/ssetypes.py +++ b/src/mistralai/client/models/ssetypes.py @@ -1,19 +1,23 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from __future__ import annotations -from typing import Literal +from mistralai.client.types import UnrecognizedStr +from typing import Literal, Union -SSETypes = Literal[ - "conversation.response.started", - "conversation.response.done", - "conversation.response.error", - "message.output.delta", - "tool.execution.started", - "tool.execution.delta", - "tool.execution.done", - "agent.handoff.started", - "agent.handoff.done", - "function.call.delta", +SSETypes = Union[ + Literal[ + "conversation.response.started", + "conversation.response.done", + "conversation.response.error", + "message.output.delta", + "tool.execution.started", + "tool.execution.delta", + "tool.execution.done", + "agent.handoff.started", + "agent.handoff.done", + "function.call.delta", + ], + UnrecognizedStr, ] r"""Server side events sent when streaming a conversation response.""" diff --git a/src/mistralai/client/models/systemmessage.py b/src/mistralai/client/models/systemmessage.py index 9e01bc57..245e7b61 100644 --- a/src/mistralai/client/models/systemmessage.py +++ b/src/mistralai/client/models/systemmessage.py @@ -6,8 +6,11 @@ SystemMessageContentChunksTypedDict, ) from mistralai.client.types import BaseModel -from typing import List, Literal, Optional, Union -from typing_extensions import NotRequired, TypeAliasType, TypedDict +from mistralai.client.utils import validate_const +import pydantic +from pydantic.functional_validators import AfterValidator +from typing import List, Literal, Union +from typing_extensions import Annotated, TypeAliasType, TypedDict SystemMessageContentTypedDict = TypeAliasType( @@ -21,15 +24,15 @@ ) -Role = Literal["system",] - - class SystemMessageTypedDict(TypedDict): content: SystemMessageContentTypedDict - role: NotRequired[Role] + role: Literal["system"] class SystemMessage(BaseModel): content: SystemMessageContent - role: Optional[Role] = "system" + ROLE: Annotated[ + Annotated[Literal["system"], AfterValidator(validate_const("system"))], + pydantic.Field(alias="role"), + ] = "system" diff --git a/src/mistralai/client/models/toolchoiceenum.py b/src/mistralai/client/models/toolchoiceenum.py index 01f6f677..ba8195b8 100644 --- a/src/mistralai/client/models/toolchoiceenum.py +++ b/src/mistralai/client/models/toolchoiceenum.py @@ -1,12 +1,16 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from __future__ import annotations -from typing import Literal +from mistralai.client.types import UnrecognizedStr +from typing import Literal, Union -ToolChoiceEnum = Literal[ - "auto", - "none", - "any", - "required", +ToolChoiceEnum = Union[ + Literal[ + "auto", + "none", + "any", + "required", + ], + UnrecognizedStr, ] diff --git a/src/mistralai/client/models/toolexecutiondeltaevent.py b/src/mistralai/client/models/toolexecutiondeltaevent.py index 0268e6a0..aeda1472 100644 --- a/src/mistralai/client/models/toolexecutiondeltaevent.py +++ b/src/mistralai/client/models/toolexecutiondeltaevent.py @@ -4,11 +4,11 @@ from .builtinconnectors import BuiltInConnectors from datetime import datetime from mistralai.client.types import BaseModel +from mistralai.client.utils import validate_const +import pydantic +from pydantic.functional_validators import AfterValidator from typing import Literal, Optional, Union -from typing_extensions import NotRequired, TypeAliasType, TypedDict - - -ToolExecutionDeltaEventType = Literal["tool.execution.delta",] +from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict ToolExecutionDeltaEventNameTypedDict = TypeAliasType( @@ -25,7 +25,7 @@ class ToolExecutionDeltaEventTypedDict(TypedDict): id: str name: ToolExecutionDeltaEventNameTypedDict arguments: str - type: NotRequired[ToolExecutionDeltaEventType] + type: Literal["tool.execution.delta"] created_at: NotRequired[datetime] output_index: NotRequired[int] @@ -37,7 +37,13 @@ class ToolExecutionDeltaEvent(BaseModel): arguments: str - type: Optional[ToolExecutionDeltaEventType] = "tool.execution.delta" + TYPE: Annotated[ + Annotated[ + Literal["tool.execution.delta"], + AfterValidator(validate_const("tool.execution.delta")), + ], + pydantic.Field(alias="type"), + ] = "tool.execution.delta" created_at: Optional[datetime] = None diff --git a/src/mistralai/client/models/toolexecutiondoneevent.py b/src/mistralai/client/models/toolexecutiondoneevent.py index 854baee9..88aa5124 100644 --- a/src/mistralai/client/models/toolexecutiondoneevent.py +++ b/src/mistralai/client/models/toolexecutiondoneevent.py @@ -4,11 +4,11 @@ from .builtinconnectors import BuiltInConnectors from datetime import datetime from mistralai.client.types import BaseModel +from mistralai.client.utils import validate_const +import pydantic +from pydantic.functional_validators import AfterValidator from typing import Any, Dict, Literal, Optional, Union -from typing_extensions import NotRequired, TypeAliasType, TypedDict - - -ToolExecutionDoneEventType = Literal["tool.execution.done",] +from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict ToolExecutionDoneEventNameTypedDict = TypeAliasType( @@ -24,7 +24,7 @@ class ToolExecutionDoneEventTypedDict(TypedDict): id: str name: ToolExecutionDoneEventNameTypedDict - type: NotRequired[ToolExecutionDoneEventType] + type: Literal["tool.execution.done"] created_at: NotRequired[datetime] output_index: NotRequired[int] info: NotRequired[Dict[str, Any]] @@ -35,7 +35,13 @@ class ToolExecutionDoneEvent(BaseModel): name: ToolExecutionDoneEventName - type: Optional[ToolExecutionDoneEventType] = "tool.execution.done" + TYPE: Annotated[ + Annotated[ + Literal["tool.execution.done"], + AfterValidator(validate_const("tool.execution.done")), + ], + pydantic.Field(alias="type"), + ] = "tool.execution.done" created_at: Optional[datetime] = None diff --git a/src/mistralai/client/models/toolexecutionentry.py b/src/mistralai/client/models/toolexecutionentry.py index 839709fb..530c9029 100644 --- a/src/mistralai/client/models/toolexecutionentry.py +++ b/src/mistralai/client/models/toolexecutionentry.py @@ -21,14 +21,18 @@ ToolExecutionEntryType = Literal["tool.execution",] -NameTypedDict = TypeAliasType("NameTypedDict", Union[BuiltInConnectors, str]) +ToolExecutionEntryNameTypedDict = TypeAliasType( + "ToolExecutionEntryNameTypedDict", Union[BuiltInConnectors, str] +) -Name = TypeAliasType("Name", Union[BuiltInConnectors, str]) +ToolExecutionEntryName = TypeAliasType( + "ToolExecutionEntryName", Union[BuiltInConnectors, str] +) class ToolExecutionEntryTypedDict(TypedDict): - name: NameTypedDict + name: ToolExecutionEntryNameTypedDict arguments: str object: NotRequired[ToolExecutionEntryObject] type: NotRequired[ToolExecutionEntryType] @@ -39,7 +43,7 @@ class ToolExecutionEntryTypedDict(TypedDict): class ToolExecutionEntry(BaseModel): - name: Name + name: ToolExecutionEntryName arguments: str diff --git a/src/mistralai/client/models/toolexecutionstartedevent.py b/src/mistralai/client/models/toolexecutionstartedevent.py index 66438cfc..3d5f49c7 100644 --- a/src/mistralai/client/models/toolexecutionstartedevent.py +++ b/src/mistralai/client/models/toolexecutionstartedevent.py @@ -4,11 +4,11 @@ from .builtinconnectors import BuiltInConnectors from datetime import datetime from mistralai.client.types import BaseModel +from mistralai.client.utils import validate_const +import pydantic +from pydantic.functional_validators import AfterValidator from typing import Literal, Optional, Union -from typing_extensions import NotRequired, TypeAliasType, TypedDict - - -ToolExecutionStartedEventType = Literal["tool.execution.started",] +from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict ToolExecutionStartedEventNameTypedDict = TypeAliasType( @@ -25,7 +25,7 @@ class ToolExecutionStartedEventTypedDict(TypedDict): id: str name: ToolExecutionStartedEventNameTypedDict arguments: str - type: NotRequired[ToolExecutionStartedEventType] + type: Literal["tool.execution.started"] created_at: NotRequired[datetime] output_index: NotRequired[int] @@ -37,7 +37,13 @@ class ToolExecutionStartedEvent(BaseModel): arguments: str - type: Optional[ToolExecutionStartedEventType] = "tool.execution.started" + TYPE: Annotated[ + Annotated[ + Literal["tool.execution.started"], + AfterValidator(validate_const("tool.execution.started")), + ], + pydantic.Field(alias="type"), + ] = "tool.execution.started" created_at: Optional[datetime] = None diff --git a/src/mistralai/client/models/toolmessage.py b/src/mistralai/client/models/toolmessage.py index eae2d2ae..44fe63e7 100644 --- a/src/mistralai/client/models/toolmessage.py +++ b/src/mistralai/client/models/toolmessage.py @@ -9,9 +9,12 @@ UNSET, UNSET_SENTINEL, ) +from mistralai.client.utils import validate_const +import pydantic from pydantic import model_serializer -from typing import List, Literal, Optional, Union -from typing_extensions import NotRequired, TypeAliasType, TypedDict +from pydantic.functional_validators import AfterValidator +from typing import List, Literal, Union +from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict ToolMessageContentTypedDict = TypeAliasType( @@ -22,14 +25,11 @@ ToolMessageContent = TypeAliasType("ToolMessageContent", Union[str, List[ContentChunk]]) -ToolMessageRole = Literal["tool",] - - class ToolMessageTypedDict(TypedDict): content: Nullable[ToolMessageContentTypedDict] tool_call_id: NotRequired[Nullable[str]] name: NotRequired[Nullable[str]] - role: NotRequired[ToolMessageRole] + role: Literal["tool"] class ToolMessage(BaseModel): @@ -39,11 +39,14 @@ class ToolMessage(BaseModel): name: OptionalNullable[str] = UNSET - role: Optional[ToolMessageRole] = "tool" + ROLE: Annotated[ + Annotated[Literal["tool"], AfterValidator(validate_const("tool"))], + pydantic.Field(alias="role"), + ] = "tool" @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = ["tool_call_id", "name", "role"] + optional_fields = ["tool_call_id", "name"] nullable_fields = ["content", "tool_call_id", "name"] null_default_fields = [] diff --git a/src/mistralai/client/models/transcriptionsegmentchunk.py b/src/mistralai/client/models/transcriptionsegmentchunk.py index c89d84fc..25e859e5 100644 --- a/src/mistralai/client/models/transcriptionsegmentchunk.py +++ b/src/mistralai/client/models/transcriptionsegmentchunk.py @@ -14,7 +14,7 @@ from typing_extensions import NotRequired, TypedDict -Type = Literal["transcription_segment",] +TranscriptionSegmentChunkType = Literal["transcription_segment",] class TranscriptionSegmentChunkTypedDict(TypedDict): @@ -23,7 +23,7 @@ class TranscriptionSegmentChunkTypedDict(TypedDict): end: float score: NotRequired[Nullable[float]] speaker_id: NotRequired[Nullable[str]] - type: NotRequired[Type] + type: NotRequired[TranscriptionSegmentChunkType] class TranscriptionSegmentChunk(BaseModel): @@ -42,7 +42,7 @@ class TranscriptionSegmentChunk(BaseModel): speaker_id: OptionalNullable[str] = UNSET - type: Optional[Type] = "transcription_segment" + type: Optional[TranscriptionSegmentChunkType] = "transcription_segment" @property def additional_properties(self): diff --git a/src/mistralai/client/models/transcriptionstreamdone.py b/src/mistralai/client/models/transcriptionstreamdone.py index add17f56..9ba2aeff 100644 --- a/src/mistralai/client/models/transcriptionstreamdone.py +++ b/src/mistralai/client/models/transcriptionstreamdone.py @@ -7,13 +7,12 @@ ) from .usageinfo import UsageInfo, UsageInfoTypedDict from mistralai.client.types import BaseModel, Nullable, UNSET_SENTINEL +from mistralai.client.utils import validate_const import pydantic from pydantic import ConfigDict, model_serializer +from pydantic.functional_validators import AfterValidator from typing import Any, Dict, List, Literal, Optional -from typing_extensions import NotRequired, TypedDict - - -TranscriptionStreamDoneType = Literal["transcription.done",] +from typing_extensions import Annotated, NotRequired, TypedDict class TranscriptionStreamDoneTypedDict(TypedDict): @@ -22,7 +21,7 @@ class TranscriptionStreamDoneTypedDict(TypedDict): usage: UsageInfoTypedDict language: Nullable[str] segments: NotRequired[List[TranscriptionSegmentChunkTypedDict]] - type: NotRequired[TranscriptionStreamDoneType] + type: Literal["transcription.done"] class TranscriptionStreamDone(BaseModel): @@ -41,7 +40,13 @@ class TranscriptionStreamDone(BaseModel): segments: Optional[List[TranscriptionSegmentChunk]] = None - type: Optional[TranscriptionStreamDoneType] = "transcription.done" + TYPE: Annotated[ + Annotated[ + Literal["transcription.done"], + AfterValidator(validate_const("transcription.done")), + ], + pydantic.Field(alias="type"), + ] = "transcription.done" @property def additional_properties(self): @@ -53,7 +58,7 @@ def additional_properties(self, value): @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = ["segments", "type"] + optional_fields = ["segments"] nullable_fields = ["language"] null_default_fields = [] diff --git a/src/mistralai/client/models/transcriptionstreamevents.py b/src/mistralai/client/models/transcriptionstreamevents.py index caaf943a..63a08fb5 100644 --- a/src/mistralai/client/models/transcriptionstreamevents.py +++ b/src/mistralai/client/models/transcriptionstreamevents.py @@ -19,8 +19,7 @@ TranscriptionStreamTextDeltaTypedDict, ) from mistralai.client.types import BaseModel -from mistralai.client.utils import get_discriminator -from pydantic import Discriminator, Tag +from pydantic import Field from typing import Union from typing_extensions import Annotated, TypeAliasType, TypedDict @@ -38,12 +37,12 @@ TranscriptionStreamEventsData = Annotated[ Union[ - Annotated[TranscriptionStreamDone, Tag("transcription.done")], - Annotated[TranscriptionStreamLanguage, Tag("transcription.language")], - Annotated[TranscriptionStreamSegmentDelta, Tag("transcription.segment")], - Annotated[TranscriptionStreamTextDelta, Tag("transcription.text.delta")], + TranscriptionStreamDone, + TranscriptionStreamLanguage, + TranscriptionStreamSegmentDelta, + TranscriptionStreamTextDelta, ], - Discriminator(lambda m: get_discriminator(m, "type", "type")), + Field(discriminator="TYPE"), ] diff --git a/src/mistralai/client/models/transcriptionstreameventtypes.py b/src/mistralai/client/models/transcriptionstreameventtypes.py index 4a910f0a..cb6b2889 100644 --- a/src/mistralai/client/models/transcriptionstreameventtypes.py +++ b/src/mistralai/client/models/transcriptionstreameventtypes.py @@ -1,12 +1,16 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from __future__ import annotations -from typing import Literal +from mistralai.client.types import UnrecognizedStr +from typing import Literal, Union -TranscriptionStreamEventTypes = Literal[ - "transcription.language", - "transcription.segment", - "transcription.text.delta", - "transcription.done", +TranscriptionStreamEventTypes = Union[ + Literal[ + "transcription.language", + "transcription.segment", + "transcription.text.delta", + "transcription.done", + ], + UnrecognizedStr, ] diff --git a/src/mistralai/client/models/transcriptionstreamlanguage.py b/src/mistralai/client/models/transcriptionstreamlanguage.py index b47024ad..244103be 100644 --- a/src/mistralai/client/models/transcriptionstreamlanguage.py +++ b/src/mistralai/client/models/transcriptionstreamlanguage.py @@ -2,18 +2,17 @@ from __future__ import annotations from mistralai.client.types import BaseModel +from mistralai.client.utils import validate_const import pydantic from pydantic import ConfigDict -from typing import Any, Dict, Literal, Optional -from typing_extensions import NotRequired, TypedDict - - -TranscriptionStreamLanguageType = Literal["transcription.language",] +from pydantic.functional_validators import AfterValidator +from typing import Any, Dict, Literal +from typing_extensions import Annotated, TypedDict class TranscriptionStreamLanguageTypedDict(TypedDict): audio_language: str - type: NotRequired[TranscriptionStreamLanguageType] + type: Literal["transcription.language"] class TranscriptionStreamLanguage(BaseModel): @@ -24,7 +23,13 @@ class TranscriptionStreamLanguage(BaseModel): audio_language: str - type: Optional[TranscriptionStreamLanguageType] = "transcription.language" + TYPE: Annotated[ + Annotated[ + Literal["transcription.language"], + AfterValidator(validate_const("transcription.language")), + ], + pydantic.Field(alias="type"), + ] = "transcription.language" @property def additional_properties(self): diff --git a/src/mistralai/client/models/transcriptionstreamsegmentdelta.py b/src/mistralai/client/models/transcriptionstreamsegmentdelta.py index 7cfffb63..ee014742 100644 --- a/src/mistralai/client/models/transcriptionstreamsegmentdelta.py +++ b/src/mistralai/client/models/transcriptionstreamsegmentdelta.py @@ -8,13 +8,12 @@ UNSET, UNSET_SENTINEL, ) +from mistralai.client.utils import validate_const import pydantic from pydantic import ConfigDict, model_serializer -from typing import Any, Dict, Literal, Optional -from typing_extensions import NotRequired, TypedDict - - -TranscriptionStreamSegmentDeltaType = Literal["transcription.segment",] +from pydantic.functional_validators import AfterValidator +from typing import Any, Dict, Literal +from typing_extensions import Annotated, NotRequired, TypedDict class TranscriptionStreamSegmentDeltaTypedDict(TypedDict): @@ -22,7 +21,7 @@ class TranscriptionStreamSegmentDeltaTypedDict(TypedDict): start: float end: float speaker_id: NotRequired[Nullable[str]] - type: NotRequired[TranscriptionStreamSegmentDeltaType] + type: Literal["transcription.segment"] class TranscriptionStreamSegmentDelta(BaseModel): @@ -39,7 +38,13 @@ class TranscriptionStreamSegmentDelta(BaseModel): speaker_id: OptionalNullable[str] = UNSET - type: Optional[TranscriptionStreamSegmentDeltaType] = "transcription.segment" + TYPE: Annotated[ + Annotated[ + Literal["transcription.segment"], + AfterValidator(validate_const("transcription.segment")), + ], + pydantic.Field(alias="type"), + ] = "transcription.segment" @property def additional_properties(self): @@ -51,7 +56,7 @@ def additional_properties(self, value): @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = ["speaker_id", "type"] + optional_fields = ["speaker_id"] nullable_fields = ["speaker_id"] null_default_fields = [] diff --git a/src/mistralai/client/models/transcriptionstreamtextdelta.py b/src/mistralai/client/models/transcriptionstreamtextdelta.py index ce279cf6..feb459ea 100644 --- a/src/mistralai/client/models/transcriptionstreamtextdelta.py +++ b/src/mistralai/client/models/transcriptionstreamtextdelta.py @@ -2,18 +2,17 @@ from __future__ import annotations from mistralai.client.types import BaseModel +from mistralai.client.utils import validate_const import pydantic from pydantic import ConfigDict -from typing import Any, Dict, Literal, Optional -from typing_extensions import NotRequired, TypedDict - - -TranscriptionStreamTextDeltaType = Literal["transcription.text.delta",] +from pydantic.functional_validators import AfterValidator +from typing import Any, Dict, Literal +from typing_extensions import Annotated, TypedDict class TranscriptionStreamTextDeltaTypedDict(TypedDict): text: str - type: NotRequired[TranscriptionStreamTextDeltaType] + type: Literal["transcription.text.delta"] class TranscriptionStreamTextDelta(BaseModel): @@ -24,7 +23,13 @@ class TranscriptionStreamTextDelta(BaseModel): text: str - type: Optional[TranscriptionStreamTextDeltaType] = "transcription.text.delta" + TYPE: Annotated[ + Annotated[ + Literal["transcription.text.delta"], + AfterValidator(validate_const("transcription.text.delta")), + ], + pydantic.Field(alias="type"), + ] = "transcription.text.delta" @property def additional_properties(self): diff --git a/src/mistralai/client/models/usermessage.py b/src/mistralai/client/models/usermessage.py index 8d92cea8..fe64a8cc 100644 --- a/src/mistralai/client/models/usermessage.py +++ b/src/mistralai/client/models/usermessage.py @@ -3,9 +3,12 @@ from __future__ import annotations from .contentchunk import ContentChunk, ContentChunkTypedDict from mistralai.client.types import BaseModel, Nullable, UNSET_SENTINEL +from mistralai.client.utils import validate_const +import pydantic from pydantic import model_serializer -from typing import List, Literal, Optional, Union -from typing_extensions import NotRequired, TypeAliasType, TypedDict +from pydantic.functional_validators import AfterValidator +from typing import List, Literal, Union +from typing_extensions import Annotated, TypeAliasType, TypedDict UserMessageContentTypedDict = TypeAliasType( @@ -16,22 +19,22 @@ UserMessageContent = TypeAliasType("UserMessageContent", Union[str, List[ContentChunk]]) -UserMessageRole = Literal["user",] - - class UserMessageTypedDict(TypedDict): content: Nullable[UserMessageContentTypedDict] - role: NotRequired[UserMessageRole] + role: Literal["user"] class UserMessage(BaseModel): content: Nullable[UserMessageContent] - role: Optional[UserMessageRole] = "user" + ROLE: Annotated[ + Annotated[Literal["user"], AfterValidator(validate_const("user"))], + pydantic.Field(alias="role"), + ] = "user" @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = ["role"] + optional_fields = [] nullable_fields = ["content"] null_default_fields = [] diff --git a/src/mistralai/client/models/wandbintegration.py b/src/mistralai/client/models/wandbintegration.py index 89489fb4..18e32ac3 100644 --- a/src/mistralai/client/models/wandbintegration.py +++ b/src/mistralai/client/models/wandbintegration.py @@ -8,12 +8,12 @@ UNSET, UNSET_SENTINEL, ) +from mistralai.client.utils import validate_const +import pydantic from pydantic import model_serializer -from typing import Literal, Optional -from typing_extensions import NotRequired, TypedDict - - -WandbIntegrationType = Literal["wandb",] +from pydantic.functional_validators import AfterValidator +from typing import Literal +from typing_extensions import Annotated, NotRequired, TypedDict class WandbIntegrationTypedDict(TypedDict): @@ -21,7 +21,7 @@ class WandbIntegrationTypedDict(TypedDict): r"""The name of the project that the new run will be created under.""" api_key: str r"""The WandB API key to use for authentication.""" - type: NotRequired[WandbIntegrationType] + type: Literal["wandb"] name: NotRequired[Nullable[str]] r"""A display name to set for the run. If not set, will use the job ID as the name.""" run_name: NotRequired[Nullable[str]] @@ -34,7 +34,10 @@ class WandbIntegration(BaseModel): api_key: str r"""The WandB API key to use for authentication.""" - type: Optional[WandbIntegrationType] = "wandb" + TYPE: Annotated[ + Annotated[Literal["wandb"], AfterValidator(validate_const("wandb"))], + pydantic.Field(alias="type"), + ] = "wandb" name: OptionalNullable[str] = UNSET r"""A display name to set for the run. If not set, will use the job ID as the name.""" @@ -43,7 +46,7 @@ class WandbIntegration(BaseModel): @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = ["type", "name", "run_name"] + optional_fields = ["name", "run_name"] nullable_fields = ["name", "run_name"] null_default_fields = [] diff --git a/src/mistralai/client/models/wandbintegrationout.py b/src/mistralai/client/models/wandbintegrationout.py index a7f9afeb..6409f4a4 100644 --- a/src/mistralai/client/models/wandbintegrationout.py +++ b/src/mistralai/client/models/wandbintegrationout.py @@ -8,18 +8,18 @@ UNSET, UNSET_SENTINEL, ) +from mistralai.client.utils import validate_const +import pydantic from pydantic import model_serializer -from typing import Literal, Optional -from typing_extensions import NotRequired, TypedDict - - -WandbIntegrationOutType = Literal["wandb",] +from pydantic.functional_validators import AfterValidator +from typing import Literal +from typing_extensions import Annotated, NotRequired, TypedDict class WandbIntegrationOutTypedDict(TypedDict): project: str r"""The name of the project that the new run will be created under.""" - type: NotRequired[WandbIntegrationOutType] + type: Literal["wandb"] name: NotRequired[Nullable[str]] r"""A display name to set for the run. If not set, will use the job ID as the name.""" run_name: NotRequired[Nullable[str]] @@ -30,7 +30,10 @@ class WandbIntegrationOut(BaseModel): project: str r"""The name of the project that the new run will be created under.""" - type: Optional[WandbIntegrationOutType] = "wandb" + TYPE: Annotated[ + Annotated[Literal["wandb"], AfterValidator(validate_const("wandb"))], + pydantic.Field(alias="type"), + ] = "wandb" name: OptionalNullable[str] = UNSET r"""A display name to set for the run. If not set, will use the job ID as the name.""" @@ -41,7 +44,7 @@ class WandbIntegrationOut(BaseModel): @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = ["type", "name", "run_name", "url"] + optional_fields = ["name", "run_name", "url"] nullable_fields = ["name", "run_name", "url"] null_default_fields = [] diff --git a/src/mistralai/client/models/websearchpremiumtool.py b/src/mistralai/client/models/websearchpremiumtool.py index 8d2d4b5d..c7825ec3 100644 --- a/src/mistralai/client/models/websearchpremiumtool.py +++ b/src/mistralai/client/models/websearchpremiumtool.py @@ -2,16 +2,22 @@ from __future__ import annotations from mistralai.client.types import BaseModel -from typing import Literal, Optional -from typing_extensions import NotRequired, TypedDict - - -WebSearchPremiumToolType = Literal["web_search_premium",] +from mistralai.client.utils import validate_const +import pydantic +from pydantic.functional_validators import AfterValidator +from typing import Literal +from typing_extensions import Annotated, TypedDict class WebSearchPremiumToolTypedDict(TypedDict): - type: NotRequired[WebSearchPremiumToolType] + type: Literal["web_search_premium"] class WebSearchPremiumTool(BaseModel): - type: Optional[WebSearchPremiumToolType] = "web_search_premium" + TYPE: Annotated[ + Annotated[ + Literal["web_search_premium"], + AfterValidator(validate_const("web_search_premium")), + ], + pydantic.Field(alias="type"), + ] = "web_search_premium" diff --git a/src/mistralai/client/models/websearchtool.py b/src/mistralai/client/models/websearchtool.py index ba4cc09f..7a237d86 100644 --- a/src/mistralai/client/models/websearchtool.py +++ b/src/mistralai/client/models/websearchtool.py @@ -2,16 +2,19 @@ from __future__ import annotations from mistralai.client.types import BaseModel -from typing import Literal, Optional -from typing_extensions import NotRequired, TypedDict - - -WebSearchToolType = Literal["web_search",] +from mistralai.client.utils import validate_const +import pydantic +from pydantic.functional_validators import AfterValidator +from typing import Literal +from typing_extensions import Annotated, TypedDict class WebSearchToolTypedDict(TypedDict): - type: NotRequired[WebSearchToolType] + type: Literal["web_search"] class WebSearchTool(BaseModel): - type: Optional[WebSearchToolType] = "web_search" + TYPE: Annotated[ + Annotated[Literal["web_search"], AfterValidator(validate_const("web_search"))], + pydantic.Field(alias="type"), + ] = "web_search" diff --git a/src/mistralai/client/models_.py b/src/mistralai/client/models_.py index 5ef9da09..00708197 100644 --- a/src/mistralai/client/models_.py +++ b/src/mistralai/client/models_.py @@ -174,7 +174,7 @@ def retrieve( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> models.RetrieveModelV1ModelsModelIDGetResponseRetrieveModelV1ModelsModelIDGet: + ) -> models.ResponseRetrieveModelV1ModelsModelIDGet: r"""Retrieve Model Retrieve information about a model. @@ -242,8 +242,7 @@ def retrieve( response_data: Any = None if utils.match_response(http_res, "200", "application/json"): return unmarshal_json_response( - models.RetrieveModelV1ModelsModelIDGetResponseRetrieveModelV1ModelsModelIDGet, - http_res, + models.ResponseRetrieveModelV1ModelsModelIDGet, http_res ) if utils.match_response(http_res, "422", "application/json"): response_data = unmarshal_json_response( @@ -267,7 +266,7 @@ async def retrieve_async( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> models.RetrieveModelV1ModelsModelIDGetResponseRetrieveModelV1ModelsModelIDGet: + ) -> models.ResponseRetrieveModelV1ModelsModelIDGet: r"""Retrieve Model Retrieve information about a model. @@ -335,8 +334,7 @@ async def retrieve_async( response_data: Any = None if utils.match_response(http_res, "200", "application/json"): return unmarshal_json_response( - models.RetrieveModelV1ModelsModelIDGetResponseRetrieveModelV1ModelsModelIDGet, - http_res, + models.ResponseRetrieveModelV1ModelsModelIDGet, http_res ) if utils.match_response(http_res, "422", "application/json"): response_data = unmarshal_json_response( diff --git a/src/mistralai/extra/mcp/base.py b/src/mistralai/extra/mcp/base.py index 1048c54f..115eff61 100644 --- a/src/mistralai/extra/mcp/base.py +++ b/src/mistralai/extra/mcp/base.py @@ -84,7 +84,6 @@ async def get_tools(self) -> list[FunctionTool]: for mcp_tool in mcp_tools.tools: tools.append( FunctionTool( - type="function", function=Function( name=mcp_tool.name, description=mcp_tool.description, diff --git a/src/mistralai/extra/run/context.py b/src/mistralai/extra/run/context.py index 8e570e41..01baa6a9 100644 --- a/src/mistralai/extra/run/context.py +++ b/src/mistralai/extra/run/context.py @@ -22,18 +22,19 @@ create_tool_call, ) from mistralai.client.models import ( + AgentTool, CompletionArgs, CompletionArgsTypedDict, ConversationInputs, ConversationInputsTypedDict, + ConversationRequestTool, + ConversationRequestToolTypedDict, FunctionCallEntry, FunctionResultEntry, FunctionTool, InputEntries, MessageInputEntry, ResponseFormat, - Tools, - ToolsTypedDict, ) from mistralai.client.types.basemodel import BaseModel, OptionalNullable, UNSET @@ -50,7 +51,7 @@ class AgentRequestKwargs(typing.TypedDict): class ModelRequestKwargs(typing.TypedDict): model: str instructions: OptionalNullable[str] - tools: OptionalNullable[list[Tools] | list[ToolsTypedDict]] + tools: OptionalNullable[list[ConversationRequestTool] | list[ConversationRequestToolTypedDict]] completion_args: OptionalNullable[CompletionArgs | CompletionArgsTypedDict] @@ -186,10 +187,9 @@ async def prepare_agent_request(self, beta_client: "Beta") -> AgentRequestKwargs ) agent = await beta_client.agents.get_async(agent_id=self.agent_id) agent_tools = agent.tools or [] - updated_tools = [] - for i in range(len(agent_tools)): - tool = agent_tools[i] - if tool.type != "function": + updated_tools: list[AgentTool] = [] + for tool in agent_tools: + if not isinstance(tool, FunctionTool): updated_tools.append(tool) elif tool.function.name in self._callable_tools: # function already exists in the agent, don't add it again @@ -209,7 +209,7 @@ async def prepare_agent_request(self, beta_client: "Beta") -> AgentRequestKwargs async def prepare_model_request( self, - tools: OptionalNullable[list[Tools] | list[ToolsTypedDict]] = UNSET, + tools: OptionalNullable[list[ConversationRequestTool] | list[ConversationRequestToolTypedDict]] = UNSET, completion_args: OptionalNullable[CompletionArgs | CompletionArgsTypedDict] = UNSET, instructions: OptionalNullable[str] = None, ) -> ModelRequestKwargs: @@ -225,7 +225,7 @@ async def prepare_model_request( request_tools = [] if isinstance(tools, list): for tool in tools: - request_tools.append(typing.cast(Tools, tool)) + request_tools.append(typing.cast(ConversationRequestTool, tool)) for tool in self.get_tools(): request_tools.append(tool) return ModelRequestKwargs( @@ -248,7 +248,7 @@ async def _validate_run( run_ctx: RunContext, inputs: ConversationInputs | ConversationInputsTypedDict, instructions: OptionalNullable[str] = UNSET, - tools: OptionalNullable[list[Tools] | list[ToolsTypedDict]] = UNSET, + tools: OptionalNullable[list[ConversationRequestTool] | list[ConversationRequestToolTypedDict]] = UNSET, completion_args: OptionalNullable[CompletionArgs | CompletionArgsTypedDict] = UNSET, ) -> tuple[ AgentRequestKwargs | ModelRequestKwargs, RunResult, list[InputEntries] diff --git a/src/mistralai/extra/run/tools.py b/src/mistralai/extra/run/tools.py index 94ef2852..18c1d3dd 100644 --- a/src/mistralai/extra/run/tools.py +++ b/src/mistralai/extra/run/tools.py @@ -168,7 +168,6 @@ def create_tool_call(func: Callable) -> FunctionTool: type_hints = get_type_hints(func, include_extras=True, localns=None, globalns=None) return FunctionTool( - type="function", function=Function( name=name, description=_get_function_description(docstring_sections), diff --git a/uv.lock b/uv.lock index 4b1890b2..caa731ed 100644 --- a/uv.lock +++ b/uv.lock @@ -563,7 +563,7 @@ wheels = [ [[package]] name = "mistralai" -version = "2.0.0a1" +version = "2.0.0a2" source = { editable = "." } dependencies = [ { name = "eval-type-backport" }, From 346dc75a6a9db5542b3bd93ffce505b5223b560c Mon Sep 17 00:00:00 2001 From: Nelson PROIA <144663685+Nelson-PROIA@users.noreply.github.com> Date: Fri, 13 Feb 2026 10:46:17 +0100 Subject: [PATCH 205/223] PEP 420 namespace support, widen otel upper bound (#346) PEP 420 namespace support, widen otel upper bound - Widen opentelemetry-semantic-conventions upper bound from <0.60 to <0.61 (fixes #341) - Add "Additional packages" section to README for mistralai-* namespace packages Co-Authored-By: Claude Opus 4.6 --- README.md | 12 ++++++++++-- pyproject.toml | 2 +- 2 files changed, 11 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index 2f31ccf2..04cb586c 100644 --- a/README.md +++ b/README.md @@ -118,7 +118,7 @@ Once that is saved to a file, you can run it with `uv run script.py` where ### Agents extra dependencies -When using the agents related feature it is required to add the `agents` extra dependencies. This can be added when +When using the agents related feature it is required to add the `agents` extra dependencies. This can be added when installing the package: ```bash @@ -127,6 +127,14 @@ pip install "mistralai[agents]" > Note: These features require Python 3.10+ (the SDK minimum). +### Additional packages + +Additional `mistralai-*` packages (e.g. `mistralai-workflows`) can be installed separately and are available under the `mistralai` namespace: + +```bash +pip install mistralai-workflows +``` + ## SDK Example Usage @@ -410,7 +418,7 @@ gcloud auth application-default login Install the extras dependencies specific to Google Cloud: ```bash -pip install mistralai[gcp] +pip install "mistralai[gcp]" ``` **Step 2: Example Usage** diff --git a/pyproject.toml b/pyproject.toml index 7209c64c..5802feaa 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -16,7 +16,7 @@ dependencies = [ "opentelemetry-sdk (>=1.33.1,<2.0.0)", "opentelemetry-api (>=1.33.1,<2.0.0)", "opentelemetry-exporter-otlp-proto-http (>=1.37.0,<2.0.0)", - "opentelemetry-semantic-conventions (>=0.59b0,<0.60)", + "opentelemetry-semantic-conventions (>=0.59b0,<0.61)", ] [project.optional-dependencies] From fc41a0759cab147c99238ccfe94eac8affb54e06 Mon Sep 17 00:00:00 2001 From: Louis Sanna <85956496+louis-sanna-dev@users.noreply.github.com> Date: Tue, 17 Feb 2026 15:17:20 +0100 Subject: [PATCH 206/223] chore: switch to v2 specs for shorter SDK class names (#352) * chore: add Makefile for SDK generation commands - test-generate: Test SDK generation locally - update-speakeasy-version: Update Speakeasy CLI version Production generation is done via GitHub Actions. * chore: switch SDK generation to v2 specs for shorter class names Update Speakeasy workflow to pull from :v2 registry tags instead of :main. This enables shorter SDK class names via x-mistral-sdk-operation-id. * chore: regenerate SDK with v2 specs (shorter class names) Auto-generated by Speakeasy using v2 spec with short operation IDs. Class names now use concise names like ArchiveModelRequest instead of JobsAPIRoutesFineTuningArchiveFineTunedModelRequest. * chore: bump version to 2.0.0a3 * chore: regenerate SDK for v2.0.0a3 * docs: add shorter class names section to migration guide * fix(examples): use mistral-small-latest for fine-tuning examples * ci: skip fine-tuning job examples (CI API key lacks access) * refactor(examples): rename job examples to clarify fine-tuning vs batch --- .speakeasy/gen.lock | 2892 ++++++++++------- .speakeasy/gen.yaml | 5 +- ...-2d045ec7-2ebb-4f4d-ad25-40953b132161.lock | 799 ----- .speakeasy/workflow.lock | 36 +- .speakeasy/workflow.yaml | 6 +- MIGRATION.md | 13 + Makefile | 24 + README.md | 18 +- docs/models/agent.md | 3 +- docs/models/agentcreationrequest.md | 3 +- docs/models/agentsapiv1agentsgetrequest.md | 9 - docs/models/agentupdaterequest.md | 3 +- ...equest.md => appendconversationrequest.md} | 2 +- ....md => appendconversationstreamrequest.md} | 2 +- docs/models/archiveftmodelout.md | 10 +- docs/models/archiveftmodeloutobject.md | 8 - ...modelrequest.md => archivemodelrequest.md} | 2 +- docs/models/batchjobout.md | 42 +- docs/models/batchjoboutobject.md | 8 - docs/models/batchjobsout.md | 10 +- docs/models/batchjobsoutobject.md | 8 - ...jobrequest.md => cancelbatchjobrequest.md} | 2 +- ...quest.md => cancelfinetuningjobrequest.md} | 2 +- ...onse.md => cancelfinetuningjobresponse.md} | 2 +- docs/models/classifierdetailedjobout.md | 4 +- docs/models/classifierdetailedjoboutobject.md | 8 - docs/models/classifierftmodelout.md | 36 +- docs/models/classifierftmodeloutobject.md | 8 - docs/models/classifierjobout.md | 4 +- docs/models/classifierjoboutobject.md | 10 - docs/models/completiondetailedjobout.md | 4 +- docs/models/completiondetailedjoboutobject.md | 8 - docs/models/completionftmodelout.md | 34 +- docs/models/completionftmodeloutobject.md | 8 - docs/models/completionjobout.md | 4 +- docs/models/completionjoboutobject.md | 10 - ...onse.md => createfinetuningjobresponse.md} | 2 +- ....md => createorupdateagentaliasrequest.md} | 2 +- docs/models/deleteagentaliasrequest.md | 9 + ...deleterequest.md => deleteagentrequest.md} | 2 +- ...equest.md => deleteconversationrequest.md} | 2 +- ...tv1request.md => deletedocumentrequest.md} | 2 +- ...etefilerequest.md => deletefilerequest.md} | 2 +- ...quest.md => deletelibraryaccessrequest.md} | 2 +- ...etv1request.md => deletelibraryrequest.md} | 2 +- ...deleterequest.md => deletemodelrequest.md} | 2 +- ...dfilerequest.md => downloadfilerequest.md} | 2 +- ...gentversion.md => getagentagentversion.md} | 2 +- docs/models/getagentrequest.md | 9 + ...onrequest.md => getagentversionrequest.md} | 2 +- ...tchjobrequest.md => getbatchjobrequest.md} | 2 +- ...st.md => getconversationhistoryrequest.md} | 2 +- ...t.md => getconversationmessagesrequest.md} | 2 +- ...terequest.md => getconversationrequest.md} | 2 +- ...tdocumentextractedtextsignedurlrequest.md} | 2 +- ...letev1request.md => getdocumentrequest.md} | 2 +- ...uest.md => getdocumentsignedurlrequest.md} | 2 +- ...request.md => getdocumentstatusrequest.md} | 2 +- docs/models/getdocumenttextcontentrequest.md | 9 + ...lrequest.md => getfilesignedurlrequest.md} | 2 +- ...brequest.md => getfinetuningjobrequest.md} | 2 +- ...esponse.md => getfinetuningjobresponse.md} | 2 +- ...eletev1request.md => getlibraryrequest.md} | 2 +- docs/models/jobin.md | 2 +- ...outesfinetuninggetfinetuningjobsrequest.md | 17 - docs/models/jobsout.md | 10 +- docs/models/jobsoutobject.md | 8 - docs/models/legacyjobmetadataout.md | 2 +- docs/models/legacyjobmetadataoutobject.md | 8 - ...mentsgetextractedtextsignedurlv1request.md | 9 - ...brariesdocumentsgettextcontentv1request.md | 9 - ...srequest.md => listagentaliasesrequest.md} | 2 +- ...ntslistrequest.md => listagentsrequest.md} | 5 +- ...request.md => listagentversionsrequest.md} | 2 +- ...jobsrequest.md => listbatchjobsrequest.md} | 5 +- ...request.md => listconversationsrequest.md} | 2 +- ...sponse.md => listconversationsresponse.md} | 2 +- ...stv1request.md => listdocumentsrequest.md} | 2 +- ...istfilesrequest.md => listfilesrequest.md} | 2 +- docs/models/listfinetuningjobsrequest.md | 17 + ...sstatus.md => listfinetuningjobsstatus.md} | 2 +- ...quest.md => listlibraryaccessesrequest.md} | 2 +- docs/models/orderby.md | 9 + docs/models/reprocessdocumentrequest.md | 9 + ...quest.md => restartconversationrequest.md} | 2 +- ...md => restartconversationstreamrequest.md} | 2 +- ...efilerequest.md => retrievefilerequest.md} | 2 +- ...dgetrequest.md => retrievemodelrequest.md} | 2 +- ...equest.md => startfinetuningjobrequest.md} | 2 +- ...ponse.md => startfinetuningjobresponse.md} | 2 +- docs/models/unarchiveftmodelout.md | 10 +- docs/models/unarchiveftmodeloutobject.md | 8 - ...delrequest.md => unarchivemodelrequest.md} | 2 +- ...updaterequest.md => updateagentrequest.md} | 2 +- ...equest.md => updateagentversionrequest.md} | 2 +- ...ev1request.md => updatedocumentrequest.md} | 2 +- ...tev1request.md => updatelibraryrequest.md} | 2 +- ...dmodelrequest.md => updatemodelrequest.md} | 2 +- ...odelresponse.md => updatemodelresponse.md} | 2 +- ... => updateorcreatelibraryaccessrequest.md} | 2 +- ...dv1request.md => uploaddocumentrequest.md} | 2 +- docs/sdks/accesses/README.md | 12 +- docs/sdks/batchjobs/README.md | 17 +- docs/sdks/betaagents/README.md | 81 +- docs/sdks/conversations/README.md | 43 +- docs/sdks/documents/README.md | 40 +- docs/sdks/files/README.md | 20 +- docs/sdks/finetuningjobs/README.md | 54 +- docs/sdks/libraries/README.md | 16 +- docs/sdks/models/README.md | 14 +- .../{async_jobs.py => async_fine_tuning.py} | 2 +- ...jobs_chat.py => async_fine_tuning_chat.py} | 2 +- .../mistral/jobs/{jobs.py => fine_tuning.py} | 2 +- ...{dry_run_job.py => fine_tuning_dry_run.py} | 2 +- packages/mistralai_azure/.speakeasy/gen.lock | 28 +- .../docs/models/mistralpromptmode.md | 4 + .../mistralai_azure/docs/models/ocrrequest.md | 1 + .../models/mistralpromptmode.py | 4 + .../src/mistralai_azure/models/ocrrequest.py | 7 + .../src/mistralai_azure/ocr.py | 6 + packages/mistralai_gcp/.speakeasy/gen.lock | 16 +- .../docs/models/mistralpromptmode.md | 4 + .../mistralai_gcp/models/mistralpromptmode.py | 4 + pyproject.toml | 2 +- scripts/run_examples.sh | 5 +- src/mistralai/client/__init__.py | 1 + src/mistralai/client/_hooks/__init__.py | 1 + src/mistralai/client/_hooks/sdkhooks.py | 1 + src/mistralai/client/_hooks/types.py | 1 + src/mistralai/client/_version.py | 5 +- src/mistralai/client/accesses.py | 25 +- src/mistralai/client/agents.py | 1 + src/mistralai/client/audio.py | 1 + src/mistralai/client/basesdk.py | 1 + src/mistralai/client/batch.py | 1 + src/mistralai/client/batch_jobs.py | 36 +- src/mistralai/client/beta.py | 1 + src/mistralai/client/beta_agents.py | 295 +- src/mistralai/client/chat.py | 1 + src/mistralai/client/classifiers.py | 1 + src/mistralai/client/conversations.py | 99 +- src/mistralai/client/documents.py | 81 +- src/mistralai/client/embeddings.py | 1 + src/mistralai/client/files.py | 45 +- src/mistralai/client/fim.py | 1 + src/mistralai/client/fine_tuning.py | 1 + src/mistralai/client/fine_tuning_jobs.py | 95 +- src/mistralai/client/httpclient.py | 1 + src/mistralai/client/libraries.py | 33 +- src/mistralai/client/models/__init__.py | 951 +++--- src/mistralai/client/models/agent.py | 13 +- .../client/models/agentaliasresponse.py | 1 + .../client/models/agentconversation.py | 1 + .../client/models/agentcreationrequest.py | 13 +- .../client/models/agenthandoffdoneevent.py | 1 + .../client/models/agenthandoffentry.py | 1 + .../client/models/agenthandoffstartedevent.py | 1 + .../client/models/agentscompletionrequest.py | 1 + .../models/agentscompletionstreamrequest.py | 1 + .../client/models/agentupdaterequest.py | 6 + src/mistralai/client/models/apiendpoint.py | 1 + ...ns_appendop.py => appendconversationop.py} | 5 +- ...eamop.py => appendconversationstreamop.py} | 5 +- .../client/models/archiveftmodelout.py | 16 +- ...ine_tuned_modelop.py => archivemodelop.py} | 5 +- .../client/models/assistantmessage.py | 1 + src/mistralai/client/models/audiochunk.py | 1 + src/mistralai/client/models/audioencoding.py | 1 + src/mistralai/client/models/audioformat.py | 1 + .../models/audiotranscriptionrequest.py | 1 + .../models/audiotranscriptionrequeststream.py | 1 + src/mistralai/client/models/basemodelcard.py | 1 + src/mistralai/client/models/batcherror.py | 1 + src/mistralai/client/models/batchjobin.py | 1 + src/mistralai/client/models/batchjobout.py | 16 +- src/mistralai/client/models/batchjobsout.py | 16 +- src/mistralai/client/models/batchjobstatus.py | 1 + src/mistralai/client/models/batchrequest.py | 1 + .../client/models/builtinconnectors.py | 1 + ...cel_batch_jobop.py => cancelbatchjobop.py} | 5 +- ...ning_jobop.py => cancelfinetuningjobop.py} | 11 +- .../models/chatclassificationrequest.py | 1 + .../client/models/chatcompletionchoice.py | 1 + .../client/models/chatcompletionrequest.py | 1 + .../client/models/chatcompletionresponse.py | 1 + .../models/chatcompletionstreamrequest.py | 1 + .../client/models/chatmoderationrequest.py | 1 + src/mistralai/client/models/checkpointout.py | 1 + .../client/models/classificationrequest.py | 1 + .../client/models/classificationresponse.py | 1 + .../models/classificationtargetresult.py | 1 + .../client/models/classifierdetailedjobout.py | 13 +- .../client/models/classifierftmodelout.py | 11 +- .../client/models/classifierjobout.py | 14 +- .../client/models/classifiertargetin.py | 1 + .../client/models/classifiertargetout.py | 1 + .../models/classifiertrainingparameters.py | 1 + .../models/classifiertrainingparametersin.py | 1 + .../client/models/codeinterpretertool.py | 1 + src/mistralai/client/models/completionargs.py | 1 + .../client/models/completionargsstop.py | 1 + .../client/models/completionchunk.py | 1 + .../client/models/completiondetailedjobout.py | 13 +- .../client/models/completionevent.py | 1 + .../client/models/completionftmodelout.py | 11 +- .../client/models/completionjobout.py | 14 +- .../models/completionresponsestreamchoice.py | 1 + .../models/completiontrainingparameters.py | 1 + .../models/completiontrainingparametersin.py | 1 + src/mistralai/client/models/contentchunk.py | 1 + .../models/conversationappendrequest.py | 1 + .../models/conversationappendstreamrequest.py | 1 + .../client/models/conversationevents.py | 1 + .../client/models/conversationhistory.py | 1 + .../client/models/conversationinputs.py | 1 + .../client/models/conversationmessages.py | 1 + .../client/models/conversationrequest.py | 1 + .../client/models/conversationresponse.py | 1 + .../models/conversationrestartrequest.py | 1 + .../conversationrestartstreamrequest.py | 1 + .../models/conversationstreamrequest.py | 1 + .../client/models/conversationusageinfo.py | 1 + ...ning_jobop.py => createfinetuningjobop.py} | 10 +- ...iasop.py => createorupdateagentaliasop.py} | 5 +- .../client/models/deleteagentaliasop.py | 22 + ...v1_agents_deleteop.py => deleteagentop.py} | 5 +- ...ns_deleteop.py => deleteconversationop.py} | 5 +- ..._reprocess_v1op.py => deletedocumentop.py} | 5 +- ...outes_delete_fileop.py => deletefileop.py} | 5 +- src/mistralai/client/models/deletefileout.py | 1 + ...elete_v1op.py => deletelibraryaccessop.py} | 5 +- ...braries_get_v1op.py => deletelibraryop.py} | 5 +- ..._model_id_deleteop.py => deletemodelop.py} | 5 +- src/mistralai/client/models/deletemodelout.py | 1 + src/mistralai/client/models/deltamessage.py | 1 + .../client/models/documentlibrarytool.py | 1 + src/mistralai/client/models/documentout.py | 1 + .../client/models/documenttextcontent.py | 1 + .../client/models/documentupdatein.py | 1 + .../client/models/documenturlchunk.py | 1 + ...s_download_fileop.py => downloadfileop.py} | 5 +- src/mistralai/client/models/embeddingdtype.py | 1 + .../client/models/embeddingrequest.py | 1 + .../client/models/embeddingresponse.py | 1 + .../client/models/embeddingresponsedata.py | 1 + src/mistralai/client/models/encodingformat.py | 1 + src/mistralai/client/models/entitytype.py | 1 + src/mistralai/client/models/eventout.py | 1 + src/mistralai/client/models/file.py | 1 + src/mistralai/client/models/filechunk.py | 1 + src/mistralai/client/models/filepurpose.py | 1 + src/mistralai/client/models/fileschema.py | 1 + src/mistralai/client/models/filesignedurl.py | 1 + .../client/models/fimcompletionrequest.py | 1 + .../client/models/fimcompletionresponse.py | 1 + .../models/fimcompletionstreamrequest.py | 1 + .../client/models/finetuneablemodeltype.py | 1 + .../client/models/ftclassifierlossfunction.py | 1 + .../client/models/ftmodelcapabilitiesout.py | 1 + src/mistralai/client/models/ftmodelcard.py | 1 + src/mistralai/client/models/function.py | 1 + src/mistralai/client/models/functioncall.py | 1 + .../client/models/functioncallentry.py | 1 + .../models/functioncallentryarguments.py | 1 + .../client/models/functioncallevent.py | 1 + src/mistralai/client/models/functionname.py | 1 + .../client/models/functionresultentry.py | 1 + src/mistralai/client/models/functiontool.py | 1 + ...s_api_v1_agents_getop.py => getagentop.py} | 17 +- ..._get_versionop.py => getagentversionop.py} | 5 +- ...ch_get_batch_jobop.py => getbatchjobop.py} | 5 +- ...storyop.py => getconversationhistoryop.py} | 5 +- ...agesop.py => getconversationmessagesop.py} | 5 +- ...rsations_getop.py => getconversationop.py} | 5 +- ...=> getdocumentextractedtextsignedurlop.py} | 5 +- ...uments_delete_v1op.py => getdocumentop.py} | 5 +- ...atus_v1op.py => getdocumentsignedurlop.py} | 5 +- ...ned_url_v1op.py => getdocumentstatusop.py} | 5 +- ...nt_v1op.py => getdocumenttextcontentop.py} | 5 +- ..._signed_urlop.py => getfilesignedurlop.py} | 5 +- ..._tuning_jobop.py => getfinetuningjobop.py} | 11 +- ...braries_delete_v1op.py => getlibraryop.py} | 5 +- .../client/models/githubrepositoryin.py | 1 + .../client/models/githubrepositoryout.py | 1 + .../client/models/httpvalidationerror.py | 1 + .../client/models/imagegenerationtool.py | 1 + src/mistralai/client/models/imageurl.py | 1 + src/mistralai/client/models/imageurlchunk.py | 1 + src/mistralai/client/models/inputentries.py | 1 + src/mistralai/client/models/inputs.py | 1 + .../client/models/instructrequest.py | 1 + src/mistralai/client/models/jobin.py | 3 +- src/mistralai/client/models/jobmetadataout.py | 1 + src/mistralai/client/models/jobsout.py | 14 +- src/mistralai/client/models/jsonschema.py | 1 + .../client/models/legacyjobmetadataout.py | 19 +- src/mistralai/client/models/libraryin.py | 1 + .../client/models/libraryinupdate.py | 1 + src/mistralai/client/models/libraryout.py | 1 + ...ion_aliasesop.py => listagentaliasesop.py} | 5 +- ...pi_v1_agents_listop.py => listagentsop.py} | 25 +- ...t_versionsop.py => listagentversionsop.py} | 5 +- ...get_batch_jobsop.py => listbatchjobsop.py} | 20 +- ...tions_listop.py => listconversationsop.py} | 13 +- .../client/models/listdocumentout.py | 1 + ...uments_list_v1op.py => listdocumentsop.py} | 5 +- ..._routes_list_filesop.py => listfilesop.py} | 5 +- src/mistralai/client/models/listfilesout.py | 1 + ...ning_jobsop.py => listfinetuningjobsop.py} | 11 +- ..._list_v1op.py => listlibraryaccessesop.py} | 5 +- src/mistralai/client/models/listlibraryout.py | 1 + src/mistralai/client/models/listsharingout.py | 1 + src/mistralai/client/models/messageentries.py | 1 + .../models/messageinputcontentchunks.py | 1 + .../client/models/messageinputentry.py | 1 + .../models/messageoutputcontentchunks.py | 1 + .../client/models/messageoutputentry.py | 1 + .../client/models/messageoutputevent.py | 1 + src/mistralai/client/models/metricout.py | 1 + src/mistralai/client/models/mistralerror.py | 1 + .../client/models/mistralpromptmode.py | 1 + .../client/models/modelcapabilities.py | 1 + .../client/models/modelconversation.py | 1 + src/mistralai/client/models/modellist.py | 1 + .../client/models/moderationobject.py | 1 + .../client/models/moderationresponse.py | 1 + .../client/models/no_response_error.py | 1 + src/mistralai/client/models/ocrimageobject.py | 1 + .../client/models/ocrpagedimensions.py | 1 + src/mistralai/client/models/ocrpageobject.py | 1 + src/mistralai/client/models/ocrrequest.py | 1 + src/mistralai/client/models/ocrresponse.py | 1 + src/mistralai/client/models/ocrtableobject.py | 1 + src/mistralai/client/models/ocrusageinfo.py | 1 + .../client/models/outputcontentchunks.py | 1 + src/mistralai/client/models/paginationinfo.py | 1 + src/mistralai/client/models/prediction.py | 1 + .../client/models/processingstatusout.py | 1 + .../models/realtimetranscriptionerror.py | 1 + .../realtimetranscriptionerrordetail.py | 1 + .../models/realtimetranscriptionsession.py | 1 + .../realtimetranscriptionsessioncreated.py | 1 + .../realtimetranscriptionsessionupdated.py | 1 + src/mistralai/client/models/referencechunk.py | 1 + ...nts_get_v1op.py => reprocessdocumentop.py} | 5 +- src/mistralai/client/models/requestsource.py | 1 + .../client/models/responsedoneevent.py | 1 + .../client/models/responseerrorevent.py | 1 + src/mistralai/client/models/responseformat.py | 1 + .../client/models/responseformats.py | 1 + .../client/models/responsestartedevent.py | 1 + .../client/models/responsevalidationerror.py | 1 + ..._restartop.py => restartconversationop.py} | 5 +- ...amop.py => restartconversationstreamop.py} | 5 +- ...s_retrieve_fileop.py => retrievefileop.py} | 5 +- .../client/models/retrievefileout.py | 1 + ...s_model_id_getop.py => retrievemodelop.py} | 5 +- src/mistralai/client/models/sampletype.py | 1 + src/mistralai/client/models/sdkerror.py | 1 + src/mistralai/client/models/security.py | 1 + src/mistralai/client/models/shareenum.py | 1 + src/mistralai/client/models/sharingdelete.py | 1 + src/mistralai/client/models/sharingin.py | 1 + src/mistralai/client/models/sharingout.py | 1 + src/mistralai/client/models/source.py | 1 + src/mistralai/client/models/ssetypes.py | 1 + ...uning_jobop.py => startfinetuningjobop.py} | 11 +- src/mistralai/client/models/systemmessage.py | 1 + .../models/systemmessagecontentchunks.py | 1 + src/mistralai/client/models/textchunk.py | 1 + src/mistralai/client/models/thinkchunk.py | 1 + .../client/models/timestampgranularity.py | 1 + src/mistralai/client/models/tool.py | 1 + src/mistralai/client/models/toolcall.py | 1 + src/mistralai/client/models/toolchoice.py | 1 + src/mistralai/client/models/toolchoiceenum.py | 1 + .../client/models/toolexecutiondeltaevent.py | 1 + .../client/models/toolexecutiondoneevent.py | 1 + .../client/models/toolexecutionentry.py | 1 + .../models/toolexecutionstartedevent.py | 1 + src/mistralai/client/models/toolfilechunk.py | 1 + src/mistralai/client/models/toolmessage.py | 1 + .../client/models/toolreferencechunk.py | 1 + src/mistralai/client/models/tooltypes.py | 1 + src/mistralai/client/models/trainingfile.py | 1 + .../client/models/transcriptionresponse.py | 1 + .../models/transcriptionsegmentchunk.py | 1 + .../client/models/transcriptionstreamdone.py | 1 + .../models/transcriptionstreamevents.py | 1 + .../models/transcriptionstreameventtypes.py | 1 + .../models/transcriptionstreamlanguage.py | 1 + .../models/transcriptionstreamsegmentdelta.py | 1 + .../models/transcriptionstreamtextdelta.py | 1 + .../client/models/unarchiveftmodelout.py | 16 +- ...e_tuned_modelop.py => unarchivemodelop.py} | 5 +- ...v1_agents_updateop.py => updateagentop.py} | 5 +- ...e_versionop.py => updateagentversionop.py} | 5 +- ...nts_update_v1op.py => updatedocumentop.py} | 5 +- .../client/models/updateftmodelin.py | 1 + ...ries_update_v1op.py => updatelibraryop.py} | 5 +- ...fine_tuned_modelop.py => updatemodelop.py} | 11 +- ...op.py => updateorcreatelibraryaccessop.py} | 5 +- ...nts_upload_v1op.py => uploaddocumentop.py} | 5 +- ...outes_upload_fileop.py => uploadfileop.py} | 1 + src/mistralai/client/models/uploadfileout.py | 1 + src/mistralai/client/models/usageinfo.py | 1 + src/mistralai/client/models/usermessage.py | 1 + .../client/models/validationerror.py | 1 + .../client/models/wandbintegration.py | 1 + .../client/models/wandbintegrationout.py | 1 + .../client/models/websearchpremiumtool.py | 1 + src/mistralai/client/models/websearchtool.py | 1 + src/mistralai/client/models_.py | 57 +- src/mistralai/client/ocr.py | 1 + src/mistralai/client/sdk.py | 6 +- src/mistralai/client/sdkconfiguration.py | 1 + src/mistralai/client/transcriptions.py | 1 + src/mistralai/client/types/__init__.py | 1 + src/mistralai/client/types/basemodel.py | 1 + src/mistralai/client/utils/__init__.py | 1 + src/mistralai/client/utils/annotations.py | 1 + src/mistralai/client/utils/datetimes.py | 1 + src/mistralai/client/utils/enums.py | 1 + src/mistralai/client/utils/eventstreaming.py | 1 + src/mistralai/client/utils/forms.py | 1 + src/mistralai/client/utils/headers.py | 1 + src/mistralai/client/utils/logger.py | 1 + src/mistralai/client/utils/metadata.py | 1 + src/mistralai/client/utils/queryparams.py | 1 + src/mistralai/client/utils/requestbodies.py | 1 + src/mistralai/client/utils/retries.py | 1 + src/mistralai/client/utils/security.py | 1 + src/mistralai/client/utils/serializers.py | 1 + .../client/utils/unmarshal_json_response.py | 1 + src/mistralai/client/utils/url.py | 1 + src/mistralai/client/utils/values.py | 1 + uv.lock | 2 +- 437 files changed, 3809 insertions(+), 3413 deletions(-) delete mode 100644 .speakeasy/generated-files-2d045ec7-2ebb-4f4d-ad25-40953b132161.lock create mode 100644 Makefile delete mode 100644 docs/models/agentsapiv1agentsgetrequest.md rename docs/models/{agentsapiv1conversationsappendrequest.md => appendconversationrequest.md} (96%) rename docs/models/{agentsapiv1conversationsappendstreamrequest.md => appendconversationstreamrequest.md} (96%) delete mode 100644 docs/models/archiveftmodeloutobject.md rename docs/models/{jobsapiroutesfinetuningarchivefinetunedmodelrequest.md => archivemodelrequest.md} (93%) delete mode 100644 docs/models/batchjoboutobject.md delete mode 100644 docs/models/batchjobsoutobject.md rename docs/models/{jobsapiroutesbatchcancelbatchjobrequest.md => cancelbatchjobrequest.md} (86%) rename docs/models/{jobsapiroutesfinetuningcancelfinetuningjobrequest.md => cancelfinetuningjobrequest.md} (88%) rename docs/models/{jobsapiroutesfinetuninggetfinetuningjobresponse.md => cancelfinetuningjobresponse.md} (83%) delete mode 100644 docs/models/classifierdetailedjoboutobject.md delete mode 100644 docs/models/classifierftmodeloutobject.md delete mode 100644 docs/models/classifierjoboutobject.md delete mode 100644 docs/models/completiondetailedjoboutobject.md delete mode 100644 docs/models/completionftmodeloutobject.md delete mode 100644 docs/models/completionjoboutobject.md rename docs/models/{jobsapiroutesfinetuningcreatefinetuningjobresponse.md => createfinetuningjobresponse.md} (80%) rename docs/models/{agentsapiv1agentscreateorupdatealiasrequest.md => createorupdateagentaliasrequest.md} (90%) create mode 100644 docs/models/deleteagentaliasrequest.md rename docs/models/{agentsapiv1agentsdeleterequest.md => deleteagentrequest.md} (89%) rename docs/models/{agentsapiv1conversationsgetrequest.md => deleteconversationrequest.md} (95%) rename docs/models/{librariesdocumentsgetv1request.md => deletedocumentrequest.md} (91%) rename docs/models/{filesapiroutesdeletefilerequest.md => deletefilerequest.md} (88%) rename docs/models/{librariessharedeletev1request.md => deletelibraryaccessrequest.md} (96%) rename docs/models/{librariesgetv1request.md => deletelibraryrequest.md} (91%) rename docs/models/{deletemodelv1modelsmodeliddeleterequest.md => deletemodelrequest.md} (94%) rename docs/models/{filesapiroutesdownloadfilerequest.md => downloadfilerequest.md} (88%) rename docs/models/{agentsapiv1agentsgetagentversion.md => getagentagentversion.md} (79%) create mode 100644 docs/models/getagentrequest.md rename docs/models/{agentsapiv1agentsgetversionrequest.md => getagentversionrequest.md} (90%) rename docs/models/{jobsapiroutesbatchgetbatchjobrequest.md => getbatchjobrequest.md} (92%) rename docs/models/{agentsapiv1conversationshistoryrequest.md => getconversationhistoryrequest.md} (94%) rename docs/models/{agentsapiv1conversationsmessagesrequest.md => getconversationmessagesrequest.md} (94%) rename docs/models/{agentsapiv1conversationsdeleterequest.md => getconversationrequest.md} (95%) rename docs/models/{librariesdocumentsgetsignedurlv1request.md => getdocumentextractedtextsignedurlrequest.md} (89%) rename docs/models/{librariesdocumentsdeletev1request.md => getdocumentrequest.md} (90%) rename docs/models/{librariesdocumentsreprocessv1request.md => getdocumentsignedurlrequest.md} (90%) rename docs/models/{librariesdocumentsgetstatusv1request.md => getdocumentstatusrequest.md} (90%) create mode 100644 docs/models/getdocumenttextcontentrequest.md rename docs/models/{filesapiroutesgetsignedurlrequest.md => getfilesignedurlrequest.md} (96%) rename docs/models/{jobsapiroutesfinetuninggetfinetuningjobrequest.md => getfinetuningjobrequest.md} (89%) rename docs/models/{jobsapiroutesfinetuningstartfinetuningjobresponse.md => getfinetuningjobresponse.md} (82%) rename docs/models/{librariesdeletev1request.md => getlibraryrequest.md} (90%) delete mode 100644 docs/models/jobsapiroutesfinetuninggetfinetuningjobsrequest.md delete mode 100644 docs/models/jobsoutobject.md delete mode 100644 docs/models/legacyjobmetadataoutobject.md delete mode 100644 docs/models/librariesdocumentsgetextractedtextsignedurlv1request.md delete mode 100644 docs/models/librariesdocumentsgettextcontentv1request.md rename docs/models/{agentsapiv1agentslistversionaliasesrequest.md => listagentaliasesrequest.md} (85%) rename docs/models/{agentsapiv1agentslistrequest.md => listagentsrequest.md} (84%) rename docs/models/{agentsapiv1agentslistversionsrequest.md => listagentversionsrequest.md} (94%) rename docs/models/{jobsapiroutesbatchgetbatchjobsrequest.md => listbatchjobsrequest.md} (90%) rename docs/models/{agentsapiv1conversationslistrequest.md => listconversationsrequest.md} (92%) rename docs/models/{agentsapiv1conversationslistresponse.md => listconversationsresponse.md} (84%) rename docs/models/{librariesdocumentslistv1request.md => listdocumentsrequest.md} (96%) rename docs/models/{filesapirouteslistfilesrequest.md => listfilesrequest.md} (98%) create mode 100644 docs/models/listfinetuningjobsrequest.md rename docs/models/{jobsapiroutesfinetuninggetfinetuningjobsstatus.md => listfinetuningjobsstatus.md} (94%) rename docs/models/{librariessharelistv1request.md => listlibraryaccessesrequest.md} (90%) create mode 100644 docs/models/orderby.md create mode 100644 docs/models/reprocessdocumentrequest.md rename docs/models/{agentsapiv1conversationsrestartrequest.md => restartconversationrequest.md} (96%) rename docs/models/{agentsapiv1conversationsrestartstreamrequest.md => restartconversationstreamrequest.md} (96%) rename docs/models/{filesapiroutesretrievefilerequest.md => retrievefilerequest.md} (88%) rename docs/models/{retrievemodelv1modelsmodelidgetrequest.md => retrievemodelrequest.md} (94%) rename docs/models/{jobsapiroutesfinetuningstartfinetuningjobrequest.md => startfinetuningjobrequest.md} (84%) rename docs/models/{jobsapiroutesfinetuningcancelfinetuningjobresponse.md => startfinetuningjobresponse.md} (82%) delete mode 100644 docs/models/unarchiveftmodeloutobject.md rename docs/models/{jobsapiroutesfinetuningunarchivefinetunedmodelrequest.md => unarchivemodelrequest.md} (92%) rename docs/models/{agentsapiv1agentsupdaterequest.md => updateagentrequest.md} (96%) rename docs/models/{agentsapiv1agentsupdateversionrequest.md => updateagentversionrequest.md} (89%) rename docs/models/{librariesdocumentsupdatev1request.md => updatedocumentrequest.md} (97%) rename docs/models/{librariesupdatev1request.md => updatelibraryrequest.md} (97%) rename docs/models/{jobsapiroutesfinetuningupdatefinetunedmodelrequest.md => updatemodelrequest.md} (95%) rename docs/models/{jobsapiroutesfinetuningupdatefinetunedmodelresponse.md => updatemodelresponse.md} (81%) rename docs/models/{librariessharecreatev1request.md => updateorcreatelibraryaccessrequest.md} (95%) rename docs/models/{librariesdocumentsuploadv1request.md => uploaddocumentrequest.md} (96%) rename examples/mistral/jobs/{async_jobs.py => async_fine_tuning.py} (97%) rename examples/mistral/jobs/{async_jobs_chat.py => async_fine_tuning_chat.py} (99%) rename examples/mistral/jobs/{jobs.py => fine_tuning.py} (97%) rename examples/mistral/jobs/{dry_run_job.py => fine_tuning_dry_run.py} (97%) rename src/mistralai/client/models/{agents_api_v1_conversations_appendop.py => appendconversationop.py} (87%) rename src/mistralai/client/models/{agents_api_v1_conversations_append_streamop.py => appendconversationstreamop.py} (87%) rename src/mistralai/client/models/{jobs_api_routes_fine_tuning_archive_fine_tuned_modelop.py => archivemodelop.py} (76%) rename src/mistralai/client/models/{jobs_api_routes_batch_cancel_batch_jobop.py => cancelbatchjobop.py} (76%) rename src/mistralai/client/models/{jobs_api_routes_fine_tuning_cancel_fine_tuning_jobop.py => cancelfinetuningjobop.py} (73%) rename src/mistralai/client/models/{jobs_api_routes_fine_tuning_create_fine_tuning_jobop.py => createfinetuningjobop.py} (71%) rename src/mistralai/client/models/{agents_api_v1_agents_create_or_update_aliasop.py => createorupdateagentaliasop.py} (83%) create mode 100644 src/mistralai/client/models/deleteagentaliasop.py rename src/mistralai/client/models/{agents_api_v1_agents_deleteop.py => deleteagentop.py} (78%) rename src/mistralai/client/models/{agents_api_v1_conversations_deleteop.py => deleteconversationop.py} (81%) rename src/mistralai/client/models/{libraries_documents_reprocess_v1op.py => deletedocumentop.py} (82%) rename src/mistralai/client/models/{files_api_routes_delete_fileop.py => deletefileop.py} (78%) rename src/mistralai/client/models/{libraries_share_delete_v1op.py => deletelibraryaccessop.py} (83%) rename src/mistralai/client/models/{libraries_get_v1op.py => deletelibraryop.py} (77%) rename src/mistralai/client/models/{delete_model_v1_models_model_id_deleteop.py => deletemodelop.py} (79%) rename src/mistralai/client/models/{files_api_routes_download_fileop.py => downloadfileop.py} (77%) rename src/mistralai/client/models/{agents_api_v1_agents_getop.py => getagentop.py} (77%) rename src/mistralai/client/models/{agents_api_v1_agents_get_versionop.py => getagentversionop.py} (81%) rename src/mistralai/client/models/{jobs_api_routes_batch_get_batch_jobop.py => getbatchjobop.py} (93%) rename src/mistralai/client/models/{agents_api_v1_conversations_historyop.py => getconversationhistoryop.py} (80%) rename src/mistralai/client/models/{agents_api_v1_conversations_messagesop.py => getconversationmessagesop.py} (80%) rename src/mistralai/client/models/{agents_api_v1_conversations_getop.py => getconversationop.py} (90%) rename src/mistralai/client/models/{libraries_documents_get_extracted_text_signed_url_v1op.py => getdocumentextractedtextsignedurlop.py} (77%) rename src/mistralai/client/models/{libraries_documents_delete_v1op.py => getdocumentop.py} (82%) rename src/mistralai/client/models/{libraries_documents_get_status_v1op.py => getdocumentsignedurlop.py} (80%) rename src/mistralai/client/models/{libraries_documents_get_signed_url_v1op.py => getdocumentstatusop.py} (81%) rename src/mistralai/client/models/{libraries_documents_get_text_content_v1op.py => getdocumenttextcontentop.py} (80%) rename src/mistralai/client/models/{files_api_routes_get_signed_urlop.py => getfilesignedurlop.py} (86%) rename src/mistralai/client/models/{jobs_api_routes_fine_tuning_get_fine_tuning_jobop.py => getfinetuningjobop.py} (74%) rename src/mistralai/client/models/{libraries_delete_v1op.py => getlibraryop.py} (78%) rename src/mistralai/client/models/{agents_api_v1_agents_list_version_aliasesop.py => listagentaliasesop.py} (75%) rename src/mistralai/client/models/{agents_api_v1_agents_listop.py => listagentsop.py} (82%) rename src/mistralai/client/models/{agents_api_v1_agents_list_versionsop.py => listagentversionsop.py} (88%) rename src/mistralai/client/models/{jobs_api_routes_batch_get_batch_jobsop.py => listbatchjobsop.py} (87%) rename src/mistralai/client/models/{agents_api_v1_conversations_listop.py => listconversationsop.py} (85%) rename src/mistralai/client/models/{libraries_documents_list_v1op.py => listdocumentsop.py} (95%) rename src/mistralai/client/models/{files_api_routes_list_filesop.py => listfilesop.py} (96%) rename src/mistralai/client/models/{jobs_api_routes_fine_tuning_get_fine_tuning_jobsop.py => listfinetuningjobsop.py} (93%) rename src/mistralai/client/models/{libraries_share_list_v1op.py => listlibraryaccessesop.py} (76%) rename src/mistralai/client/models/{libraries_documents_get_v1op.py => reprocessdocumentop.py} (81%) rename src/mistralai/client/models/{agents_api_v1_conversations_restartop.py => restartconversationop.py} (87%) rename src/mistralai/client/models/{agents_api_v1_conversations_restart_streamop.py => restartconversationstreamop.py} (87%) rename src/mistralai/client/models/{files_api_routes_retrieve_fileop.py => retrievefileop.py} (77%) rename src/mistralai/client/models/{retrieve_model_v1_models_model_id_getop.py => retrievemodelop.py} (89%) rename src/mistralai/client/models/{jobs_api_routes_fine_tuning_start_fine_tuning_jobop.py => startfinetuningjobop.py} (72%) rename src/mistralai/client/models/{jobs_api_routes_fine_tuning_unarchive_fine_tuned_modelop.py => unarchivemodelop.py} (76%) rename src/mistralai/client/models/{agents_api_v1_agents_updateop.py => updateagentop.py} (86%) rename src/mistralai/client/models/{agents_api_v1_agents_update_versionop.py => updateagentversionop.py} (81%) rename src/mistralai/client/models/{libraries_documents_update_v1op.py => updatedocumentop.py} (87%) rename src/mistralai/client/models/{libraries_update_v1op.py => updatelibraryop.py} (85%) rename src/mistralai/client/models/{jobs_api_routes_fine_tuning_update_fine_tuned_modelop.py => updatemodelop.py} (77%) rename src/mistralai/client/models/{libraries_share_create_v1op.py => updateorcreatelibraryaccessop.py} (81%) rename src/mistralai/client/models/{libraries_documents_upload_v1op.py => uploaddocumentop.py} (92%) rename src/mistralai/client/models/{files_api_routes_upload_fileop.py => uploadfileop.py} (97%) diff --git a/.speakeasy/gen.lock b/.speakeasy/gen.lock index 69828bd7..6e86c59c 100644 --- a/.speakeasy/gen.lock +++ b/.speakeasy/gen.lock @@ -1,19 +1,19 @@ lockVersion: 2.0.0 id: 2d045ec7-2ebb-4f4d-ad25-40953b132161 management: - docChecksum: e4b3b07fe28f4666261325e923d6c5d9 + docChecksum: 2d9e4f612e5caf84349ab02663eee66e docVersion: 1.0.0 speakeasyVersion: 1.685.0 generationVersion: 2.794.1 - releaseVersion: 2.0.0a1 - configChecksum: d5e0f55b62bca3e8aab33c7955415e61 + releaseVersion: 2.0.0a3 + configChecksum: 7fc1ba01c21def8447b979e71593af4a repoURL: https://github.com/mistralai/client-python.git installationURL: https://github.com/mistralai/client-python.git published: true persistentEdits: - generation_id: b2306c28-6200-44c1-a856-ddd318359c15 - pristine_commit_hash: dc36861e5d8b9f4c91221be8f09dc13254755c9a - pristine_tree_hash: 640358903b623a1b0d7deabbb43f39e82676a1a1 + generation_id: 3aa9018f-cb6c-4c1b-96d0-b832fd5f6513 + pristine_commit_hash: 5c4e3b65b7572c91338d50dc3ca91ea6a46eedf7 + pristine_tree_hash: aaea604044e12872107c3b550ea7be094fb66a99 features: python: additionalDependencies: 1.0.0 @@ -62,8 +62,8 @@ trackedFiles: pristine_git_object: 1810386448a440cfc5f7b8579695b228ae40460d docs/models/agent.md: id: ffdbb4c53c87 - last_write_checksum: sha1:26d2fb743d3fdd54a6ab1258a37f08d1726927ac - pristine_git_object: ee054dd349848eff144d7064c319c3c8434bdc6c + last_write_checksum: sha1:4538aaa78a09b7e33db405f84916b1eb82f94bca + pristine_git_object: e335d889cdb70f4d3c987827ff714db90418cb39 docs/models/agentaliasresponse.md: id: 5ac4721d8947 last_write_checksum: sha1:15dcc6820e89d2c6bb799e331463419ce29ec167 @@ -82,14 +82,16 @@ trackedFiles: pristine_git_object: ea7cc75c5197ed42f9fb508a969baa16effe1f98 docs/models/agentcreationrequest.md: id: 697a770fe5c0 - last_write_checksum: sha1:d77c75f922c64df266b101a2fd23c7fe56b7894b - pristine_git_object: afc27d3b688f9ca187606243c810fd19d12bb840 + last_write_checksum: sha1:b3f12ca0a6356e657de2941c8441fc951bcc96f4 + pristine_git_object: f0f0fdbc13f8f490ded4f8df3944250aece1311b docs/models/agentcreationrequesttool.md: + id: 392d970ffb74 last_write_checksum: sha1:310d4b107554a9c16143191fdc306a5438b63768 + pristine_git_object: b3bd7fa3cead0a0a1480b0e1b3f0afbfd177b600 docs/models/agenthandoffdoneevent.md: id: dcf166a3c3b0 last_write_checksum: sha1:9e95c09f724827f5e9c202fd634bdfa2baef1b6e - pristine_git_object: c0039f41825e3667cd8e91adae5bb78a2e3ac8ae + pristine_git_object: 6bfcc3d83457edf05d0f13957d34ead0f260599b docs/models/agenthandoffentry.md: id: 39d54f489b84 last_write_checksum: sha1:7d949e750fd24dea20cabae340f9204d8f756008 @@ -105,93 +107,19 @@ trackedFiles: docs/models/agenthandoffstartedevent.md: id: b620102af460 last_write_checksum: sha1:33732e0465423348c2ace458506a597a3dadf9b2 - pristine_git_object: 035cd02aaf338785d9f6410fde248591c5ffa5f7 + pristine_git_object: 518b5a0c4521ec55a5a28ba3ef0ad1c1fce52792 docs/models/agentobject.md: id: ed24a6d647a0 last_write_checksum: sha1:ff5dfde6cc19f09c83afb5b4f0f103096df6691d pristine_git_object: 70e143b030d3041c7538ecdacb8f5f9f8d1b5c92 - docs/models/agentsapiv1agentscreateorupdatealiasrequest.md: - id: c09ec9946094 - last_write_checksum: sha1:0883217b4bad21f5d4f8162ca72005bf9105a93f - pristine_git_object: 79406434cc6ff3d1485089f35639d6284f66d6cb - docs/models/agentsapiv1agentsdeleterequest.md: - id: 0faaaa59add9 - last_write_checksum: sha1:2a34269e682bb910b83814b4d730ba2ce07f8cb2 - pristine_git_object: 2799f41817ab0f7a22b49b4ff895c8308525953c - docs/models/agentsapiv1agentsgetagentversion.md: - last_write_checksum: sha1:e4f4c6a64b1c2ec9465b7ad008df4d7859098e59 - docs/models/agentsapiv1agentsgetrequest.md: - id: 01740ae62cff - last_write_checksum: sha1:bc86e90289ec09b40212083a82455b4fe71c7194 - pristine_git_object: c71d4419afd3b51713e154b8021d4fe2b49d8af5 - docs/models/agentsapiv1agentsgetversionrequest.md: - id: 88ed22b85cde - last_write_checksum: sha1:0ef23807c8efa2662144da66745045abdd2cb60a - pristine_git_object: 96a7358943a69e871a2bb7f0f30d6fe2bb8dff3d - docs/models/agentsapiv1agentslistrequest.md: - id: c2720c209527 - last_write_checksum: sha1:cb599d1583ee9374d44695f5ee7efe79dbb8a503 - pristine_git_object: 8cba13253d42a180b06eab8c10297ef362fb434d - docs/models/agentsapiv1agentslistversionaliasesrequest.md: - id: 69c8bce2c017 - last_write_checksum: sha1:4083fc80627b2cc04fd271df21393944730ef1ba - pristine_git_object: 3083bf92641404738948cd57306eac978b701551 - docs/models/agentsapiv1agentslistversionsrequest.md: - id: 0bc44ed8d6bb - last_write_checksum: sha1:315790552fc5b2b3a6c4f7be2eb33100133abe18 - pristine_git_object: 91831700bed92cb4f609f8c412dcb0ee98b544ca - docs/models/agentsapiv1agentsupdaterequest.md: - id: 7692812cd677 - last_write_checksum: sha1:8b17ce9d488b5eab892b66ca44d0e0a01b56aa11 - pristine_git_object: f60f8e5ba0cc6923935187ba221875d757c4693e - docs/models/agentsapiv1agentsupdateversionrequest.md: - id: a001251b1624 - last_write_checksum: sha1:0ee9e0fc55fd969f2b8f2c55dec93bf10e0e5b2f - pristine_git_object: e937acc9b1d3f50eee69495b1305f7aee1c960ac - docs/models/agentsapiv1conversationsappendrequest.md: - id: 70f76380e810 - last_write_checksum: sha1:d428dc114b60362d269b5ae50a57ea60b9edee1a - pristine_git_object: ac8a00ecab30305de8eb8c7c08cda1b1c04148c3 - docs/models/agentsapiv1conversationsappendstreamrequest.md: - id: f6ada9a592c5 - last_write_checksum: sha1:8a806ca2e5bad75d9d0cf50726dc0d5b8e7e3eab - pristine_git_object: dbc330f11aa3039c9cea2dd7d477d56d5c4969d0 - docs/models/agentsapiv1conversationsdeleterequest.md: - id: c2c9f084ed93 - last_write_checksum: sha1:9ecca93f8123cebdd1f9e74cf0f4a104b46402a8 - pristine_git_object: c6eed281331cb4d2cac4470de5e04935d22eca5a - docs/models/agentsapiv1conversationsgetrequest.md: - id: d6acce23f92c - last_write_checksum: sha1:b5d5529b72c16293d3d9b5c45dcb2e3798405bcf - pristine_git_object: 67d450c88778cb27d7d0ba06d49d9f419840b32e - docs/models/agentsapiv1conversationshistoryrequest.md: - id: e3efc36ea8b5 - last_write_checksum: sha1:4155100eaed6d3b7410b3f4476f000d1879576be - pristine_git_object: 7e5d39e9a11ac437a24b8c059db56527fa93f8b0 - docs/models/agentsapiv1conversationslistrequest.md: - id: 406c3e92777a - last_write_checksum: sha1:d5c5effcf2ca32900678d20b667bdaf8ca908194 - pristine_git_object: 62c9011faf26b3a4268186f01caf98c186e7d5b4 - docs/models/agentsapiv1conversationslistresponse.md: - last_write_checksum: sha1:1144f41f8a97daacfb75c11fdf3575e553cf0859 - docs/models/agentsapiv1conversationsmessagesrequest.md: - id: 2c749c6620d4 - last_write_checksum: sha1:781e526b030653dc189d94ca04cdc4742f9506d2 - pristine_git_object: a91ab0466d57379eacea9d475c72db9cb228a649 - docs/models/agentsapiv1conversationsrestartrequest.md: - id: 6955883f9a44 - last_write_checksum: sha1:99c1455c7fde9b82b6940e6e1ed4f363d7c38de9 - pristine_git_object: a18a41f5395adae3942573792c86ddf7c3812ff4 - docs/models/agentsapiv1conversationsrestartstreamrequest.md: - id: 0c39856fd70e - last_write_checksum: sha1:d03475c088c059077049270c69be01c67a17f178 - pristine_git_object: 7548286af5d1db51fbfd29c893eb8afdc3c97c4d docs/models/agentscompletionrequest.md: id: 906b82c214dc last_write_checksum: sha1:84ee0378e413830260a279a67fc3b1342e643328 - pristine_git_object: 2a0c4144fb5919e5ce892db1210bde90820c127c + pristine_git_object: d87dc7da67dd883f92a23d8df4f5648e97c4f12e docs/models/agentscompletionrequestmessage.md: + id: 5337f0644b40 last_write_checksum: sha1:ecf7b7cdf0d24a5e97b520366cf816b8731734bb + pristine_git_object: 957703b528d3da6f57576064d7cb9b2af63c362a docs/models/agentscompletionrequeststop.md: id: ad1e0e74b6b8 last_write_checksum: sha1:b2422d4dada80d54b2dd499a6659a3894318d2c9 @@ -203,9 +131,11 @@ trackedFiles: docs/models/agentscompletionstreamrequest.md: id: 21d09756447b last_write_checksum: sha1:0c88bc63255733480b65b61685dcc356fcc9ed66 - pristine_git_object: b2ccd4e8fe2fc3f63d4b517f7ecfc21f3aef9d67 + pristine_git_object: dd1804a1b3a2aadc3e3c3964262b0fc25195703f docs/models/agentscompletionstreamrequestmessage.md: + id: b309ade92081 last_write_checksum: sha1:98744c9646969250242cbbfbdf428dbd7030e4bb + pristine_git_object: 6ccf4244a709de7bedbf75042efb935129a6ca01 docs/models/agentscompletionstreamrequeststop.md: id: 4925b6b8fbca last_write_checksum: sha1:c9d0d73ca46643ffdf02e6c6cd35de5c39460c20 @@ -215,25 +145,37 @@ trackedFiles: last_write_checksum: sha1:843c4946d5cab61df2cba458af40835c4e8bcafe pristine_git_object: 4354523a7d0d21721a96e91938b89236169ccced docs/models/agenttool.md: + id: 513b8b7bc0b7 last_write_checksum: sha1:9154d0ac6b0ab8970a10a8ad7716009d62e80ce7 + pristine_git_object: 022f7e10edb22cb1b1d741c13ac586bd136d03b5 docs/models/agentupdaterequest.md: id: 75a7f820b906 - last_write_checksum: sha1:306134659876c4e87324dfec879ab0b691a74f3a - pristine_git_object: 641d1e406f0fba0fce9f10c16a15f883c7095c07 + last_write_checksum: sha1:358e39130bc439f5801a2dcc73502a1f1c2c6685 + pristine_git_object: b1830d7be6cb8e33529246a3368deaf0909a3343 docs/models/agentupdaterequesttool.md: + id: 9c9aac9dda3d last_write_checksum: sha1:25d8a331a706bf8e6056b99f8ff1a46abff6ae72 + pristine_git_object: ce5531260e9b06db0b93d4bfcf95a12b627da522 docs/models/apiendpoint.md: id: be613fd9b947 last_write_checksum: sha1:4d984c11248f7da42c949164e69b53995d5942c4 pristine_git_object: 8d83a26f19241da5ce626ff9526575c50e5d27be + docs/models/appendconversationrequest.md: + id: 295b6d446690 + last_write_checksum: sha1:0c3d7091b19abf30fb0b78800cab292abd902c1d + pristine_git_object: 977d8e8b797c8ae36de4da90bc32bba47a6a0779 + docs/models/appendconversationstreamrequest.md: + id: aeea33736f95 + last_write_checksum: sha1:a0b5b036e46688e862c7f7671c86f965b5322742 + pristine_git_object: a23231c2c2f0017ba29c8863c3046aebe8f57ff1 docs/models/archiveftmodelout.md: id: 9e855deac0d1 - last_write_checksum: sha1:ab79a7762ca33eb1f16b3ed2e5aa5318ec398829 - pristine_git_object: 46a9e755555480d333f91adfe840cdf09313e6c2 - docs/models/archiveftmodeloutobject.md: - id: 9afeccafe5b6 - last_write_checksum: sha1:4bf1b38dc9b6f275affaf353b4bf28bc63ef817c - pristine_git_object: f6f46889da24995f8e5130def3140a9fd1aff57c + last_write_checksum: sha1:41866e666241ed42e5e7c6df5a64b887f1ff774b + pristine_git_object: 98fa7b19e4579198b433eccc76b2b4d990476b72 + docs/models/archivemodelrequest.md: + id: 3fde72a45ad9 + last_write_checksum: sha1:60eaa9be631215c63a2c01da7da809ec34f5b01a + pristine_git_object: 806d135e2bc6c0da2b20a4bb84107d3ab31962ad docs/models/arguments.md: id: 7ea5e33709a7 last_write_checksum: sha1:09eea126210d7fd0353e60a76bf1dbed173f13ec @@ -257,7 +199,7 @@ trackedFiles: docs/models/audiochunk.md: id: 88315a758fd4 last_write_checksum: sha1:d52e493765280fc0b1df61a0ce1086205965c712 - pristine_git_object: c443e7ade726ba88dd7ce9a8341687ef38abe598 + pristine_git_object: 8a04af045f4ce33a2964f5f75664e82c3edf1bf3 docs/models/audioencoding.md: id: 1e0dfee9c2a0 last_write_checksum: sha1:5d47cfaca916d7a47adbea71748595b3ab69a478 @@ -277,7 +219,7 @@ trackedFiles: docs/models/basemodelcard.md: id: 2f62bfbd650e last_write_checksum: sha1:4b29e0d24060b6724e82aeee05befe1cddb316f4 - pristine_git_object: 58ad5e25131804287b5f7c834afc3ad480d065a9 + pristine_git_object: 0f42504fd6446c0baf4686bfbb8481658b6789cd docs/models/batcherror.md: id: 8053e29a3f26 last_write_checksum: sha1:23a12dc2e95f92a7a3691bd65a1b05012c669f0f @@ -288,20 +230,12 @@ trackedFiles: pristine_git_object: 7dcf265dfe63cbbd13b7fa0e56fc62717f3ee050 docs/models/batchjobout.md: id: 49a98e5b2aba - last_write_checksum: sha1:82e0c730eeac4fc9ee787b213e4653cee1cca5aa - pristine_git_object: cb49649b87aeb3ec10068d96222e3d803c508324 - docs/models/batchjoboutobject.md: - id: 8964218f4f7e - last_write_checksum: sha1:8fffd069c91ea950d321cd41994df78df3eb2051 - pristine_git_object: 64ae89654c3d1a2743e67068f66fbd56f70c14b5 + last_write_checksum: sha1:b504fcf5a65567ec114fdc5b79cabe7554b36cac + pristine_git_object: 5f1011734b249a75cf9381d024f295fe31ff9f68 docs/models/batchjobsout.md: id: d8041dee5b90 - last_write_checksum: sha1:619fcebe753b14a34b7d3ba56f7b45c6c2690fad - pristine_git_object: a76cfdccf96ac2adf783417444be70c5b208582b - docs/models/batchjobsoutobject.md: - id: 885adfc869d5 - last_write_checksum: sha1:3fdc878e360b22d1074bd61f95d7461d478d78a2 - pristine_git_object: d4bf9f65ae546b160dd8ec5f3ecdc4228dc91bfa + last_write_checksum: sha1:5e4127548b50abbb6cee267ac53a8e05f55b97f9 + pristine_git_object: 7a9d6f688e87851ed7ffa516523e12cb3f967c68 docs/models/batchjobstatus.md: id: 7e6f034d3c91 last_write_checksum: sha1:9e876b4b94255e1399bbb31feb51e08691bcb8fc @@ -314,6 +248,18 @@ trackedFiles: id: 9d14e972f08a last_write_checksum: sha1:1f32eb515e32c58685d0bdc15de09656194c508c pristine_git_object: f96f50444aaa23ca291db2fd0dc69db0d9d149d9 + docs/models/cancelbatchjobrequest.md: + id: db6860fe9ec3 + last_write_checksum: sha1:d2f55d5ffec21f6f70cc77c643c73113b0d1ed43 + pristine_git_object: f31f843bb864fc21ed620e4e069b8a97a091d99c + docs/models/cancelfinetuningjobrequest.md: + id: 10d341c56c9c + last_write_checksum: sha1:a484ad9d8eb791d60e5447b845b73871e9f1e6a3 + pristine_git_object: 6525788cd527eca4d89f95d4c829c1b3eda0f06e + docs/models/cancelfinetuningjobresponse.md: + id: 0c9ca281a898 + last_write_checksum: sha1:ac02c2a268a21430e74f8075671de0b97fd844e6 + pristine_git_object: c512342e575e9b6d57da08b20f50c86510d246d8 docs/models/chatclassificationrequest.md: id: 57b86771c870 last_write_checksum: sha1:2ee5fff26c780ade7ed89617358befa93a6dfd23 @@ -321,17 +267,23 @@ trackedFiles: docs/models/chatcompletionchoice.md: id: 0d15c59ab501 last_write_checksum: sha1:a6274a39a4239e054816d08517bf8507cb5c4564 - pristine_git_object: d77d286eb0b2d2b018b6ff5f9617225be4fa9fa5 + pristine_git_object: deaa0ea073e1b6c21bd466c10db31db2464066f1 docs/models/chatcompletionchoicefinishreason.md: + id: 225764da91d3 last_write_checksum: sha1:b894d3408cb801e072c3c302a5676ff939d59284 + pristine_git_object: b2f15ecbe88328de95b4961ddb3940fd8a6ee64b docs/models/chatcompletionrequest.md: id: adffe90369d0 last_write_checksum: sha1:f6eec11c908ee6581e508fff98e785441c4b84ad - pristine_git_object: 109fa7b13d19ccc85e4633e64b44613640c171fb + pristine_git_object: f3abeeff4346c181cfca40eb819a8c6ecf656026 docs/models/chatcompletionrequestmessage.md: + id: 3f5e170d418c last_write_checksum: sha1:7921c5a508a9f88adc01caab34e26182b8035607 + pristine_git_object: 91e9e062d0ef0cb69235c4ae4516548733ce28a9 docs/models/chatcompletionrequeststop.md: + id: fcaf5bbea451 last_write_checksum: sha1:71a25f84f0d88c7acf72e801ced6159546201851 + pristine_git_object: 749296d420c0671d2a1d6d22483b51f577a86485 docs/models/chatcompletionrequesttoolchoice.md: id: b97041b2f15b last_write_checksum: sha1:7ad7eb133f70e07d0d6a9def36aadd08b35cf861 @@ -343,9 +295,11 @@ trackedFiles: docs/models/chatcompletionstreamrequest.md: id: cf8f29558a68 last_write_checksum: sha1:7ed921e0366c1b00225c05e60937fb8d228f027b - pristine_git_object: 7d5fb411bde92e39910018cc2ad8d4d67ea980a1 + pristine_git_object: 42792d396462dead9d7a80a87f05a0888efe348b docs/models/chatcompletionstreamrequestmessage.md: + id: 053a98476cd2 last_write_checksum: sha1:8270692463fab1243d9de4bbef7162daa64e52c5 + pristine_git_object: 2e4e93acca8983a3ea27b391d4606518946e13fe docs/models/chatcompletionstreamrequeststop.md: id: d0e89a4dca78 last_write_checksum: sha1:a889e9580fa94bda7c848682d6ba501b7f5c0f41 @@ -357,13 +311,19 @@ trackedFiles: docs/models/chatmoderationrequest.md: id: 22862d4d20ec last_write_checksum: sha1:9bbe510ee67515092bd953ad7f84ae118398af54 - pristine_git_object: 69b6c1dc2c10abbbc2574f3782b2d85687661f11 + pristine_git_object: f252482db0e404e21a61aafba0d09d9561610c11 docs/models/chatmoderationrequestinputs1.md: + id: 89311e3e440d last_write_checksum: sha1:8d4c2dbd9207589aabf9c00cf60c61d2d3eef452 + pristine_git_object: e15b8a844110fae68c02da040cd0122be5afc09a docs/models/chatmoderationrequestinputs2.md: + id: 4daa876da841 last_write_checksum: sha1:e34eb6557e06e7783ed14d959c2a29959c26fd4c + pristine_git_object: f40a4ebe0780c493e8bd7a322aec31893669a181 docs/models/chatmoderationrequestinputs3.md: + id: aec173bca43b last_write_checksum: sha1:14ce49ace5845bc467fe1559b12374bfd36bc9a7 + pristine_git_object: ff1c6ea32233d5c5e8d6292c62f9e8eacd3340c3 docs/models/checkpointout.md: id: 909ce66e1f65 last_write_checksum: sha1:89e678d55b97353ad1c3b28d9f1ab101f6be0928 @@ -386,36 +346,28 @@ trackedFiles: pristine_git_object: f3b10727b023dd83a207d955b3d0f3cd4b7479a1 docs/models/classifierdetailedjobout.md: id: a2084ba5cc8c - last_write_checksum: sha1:63acd8a1921ac99143685722f8812b1f572d451f - pristine_git_object: ccc88f89ed81e6e879a88b9729c4945704370fd9 + last_write_checksum: sha1:ee206a5c68bd7aed201f8274d0710e8c570a35d2 + pristine_git_object: fb532449458fb445bb79d3fa0ed8e6faa538f00a docs/models/classifierdetailedjoboutintegration.md: + id: 7a775cbd4d9f last_write_checksum: sha1:6b2691766c1795d17b1572076a693eb377c5307f - docs/models/classifierdetailedjoboutobject.md: - id: 1ca54621f5bf - last_write_checksum: sha1:5ae3d2847a66487d70bc2ff97a8c31bbbba191c7 - pristine_git_object: 08cbcffc1c60c11c07d6e8c4724f46394f7d0854 + pristine_git_object: 9dfa6e8a179529bd12fb8935c264e3c57c62cb41 docs/models/classifierdetailedjoboutstatus.md: id: a98493f9d02d last_write_checksum: sha1:3441d9961e9093d314dd1bc88df1743cd12866d2 pristine_git_object: c3118aafa8614f20c9adf331033e7822b6391752 docs/models/classifierftmodelout.md: id: 268ac482c38b - last_write_checksum: sha1:dda3d6bf88fb6a3e860821aefb8a522d8a476b1d - pristine_git_object: dd9e8bf9c0ee291b44cd4f06146dea3d3280c143 - docs/models/classifierftmodeloutobject.md: - id: 6aa25d9fe076 - last_write_checksum: sha1:5a5fe345b3a2b3e65ce3171e8d6e9b9493ec7b06 - pristine_git_object: 9fe05bcf42325a390e5c984c7bdf346668944928 + last_write_checksum: sha1:46bdbe1176bbf43dd79a4ff8255129fd82bd97bc + pristine_git_object: 6e7afbbed075efe2e29f42b7bc3d758fe47460d4 docs/models/classifierjobout.md: id: 2e3498af3f8c - last_write_checksum: sha1:311f6ca4b6b625768c4ddd63e642e14e6a58df23 - pristine_git_object: aa1d3ca910535e283059903a2c39331673c1982b + last_write_checksum: sha1:70845cc24cd48987552ca337ea5522066e6de1b9 + pristine_git_object: ceecef5decdbd74a9741401ad0f1a9e8e215ae82 docs/models/classifierjoboutintegration.md: + id: 30a340fed57d last_write_checksum: sha1:72dfda442a88f977f3480c95127534a600362806 - docs/models/classifierjoboutobject.md: - id: 04543f046d40 - last_write_checksum: sha1:96863c621ddf0425b818edcd5da32ddbd5fd1194 - pristine_git_object: 1b42d547de7bdfb109c3ff750c6754e15ec4a8c1 + pristine_git_object: 33af8a708618c1e54c7f55e67c8848fe45217799 docs/models/classifierjoboutstatus.md: id: 2411c6bf3297 last_write_checksum: sha1:6ceef218b783505231a0ec653292460e6cb1a65b @@ -439,7 +391,7 @@ trackedFiles: docs/models/codeinterpretertool.md: id: f009740c6e54 last_write_checksum: sha1:bce278ce22703246613254ee2dac57f8b14e8060 - pristine_git_object: d5ad789ed012accaa105ced4f8dfd8e9eb83d4a3 + pristine_git_object: 544cda9358faf6ec525d06f78068817aee55b193 docs/models/completionargs.md: id: 3b54534f9830 last_write_checksum: sha1:c0368b7c21524228939b2093ff1a4524eb57aeb7 @@ -454,16 +406,16 @@ trackedFiles: pristine_git_object: 7f8ab5e631e2c6d1d9830325e591a7e434b83a35 docs/models/completiondetailedjobout.md: id: 634ca7241abd - last_write_checksum: sha1:e5edf096998b6b8e2048f354bd694288dd609875 - pristine_git_object: 84613080715078a73204d3984e7f97477ef548ae + last_write_checksum: sha1:7899568eedfa04cccb5b07c2e0d1e821af8fb0a2 + pristine_git_object: bc7e5d1cb5c298d0d935a9e3472ad547b5b9714c docs/models/completiondetailedjoboutintegration.md: + id: f8d1f509f456 last_write_checksum: sha1:3317db3f71962623a6144e3de0db20b4abfd5b9b - docs/models/completiondetailedjoboutobject.md: - id: 8e418065aa1c - last_write_checksum: sha1:d429d772a6a4249809bbf0c26a6547e5f2de3f11 - pristine_git_object: 1bec88e5f4c5f082c53157b8ee95b4b05cb787e3 + pristine_git_object: 9e526053160cc393dc65242cff8f8143bc67e38c docs/models/completiondetailedjoboutrepository.md: + id: a8e7452065a7 last_write_checksum: sha1:b1910efc6cd1e50391cd33daef004441bac3d3cd + pristine_git_object: 92a7b75c51f27e73ca41d5ffee28921057959878 docs/models/completiondetailedjoboutstatus.md: id: c606d38452e2 last_write_checksum: sha1:1e9a5736de32a44cf539f7eaf8214aad72ec4994 @@ -474,26 +426,24 @@ trackedFiles: pristine_git_object: 7a66e8fee2bb0f1c58166177653893bb05b98f1d docs/models/completionftmodelout.md: id: 93fed66a5794 - last_write_checksum: sha1:17c4ed9718d6556ddb103cff5a5823c3baa18f41 - pristine_git_object: cd0858258521ced3990ff393fd00c11ef0abe094 - docs/models/completionftmodeloutobject.md: - id: c6e5667c5f03 - last_write_checksum: sha1:b4cbdc01a2b439d923ad542cf852797c24d234e8 - pristine_git_object: 6f9d858caa563f4a25ae752dd40ba632ecd0af75 + last_write_checksum: sha1:ee4bccae36229f23b1db8894585cc8e88ad71f6d + pristine_git_object: ccd4844fab92d000de1cc9ba59c884e31dc5db26 docs/models/completionjobout.md: id: 77315b024171 - last_write_checksum: sha1:1070ddeaef67a65f27a365a57d343a83b4b40aca - pristine_git_object: cb471746c4f23d2ec8451f4c45bf57e2f001072f + last_write_checksum: sha1:a08ca1dcedbb9b88b9909a4b03251e2fb0cd8319 + pristine_git_object: 5eb44eef73872b0f1c2709381fc0852e3b3e224b docs/models/completionjoboutintegration.md: + id: 25e651dd8d58 last_write_checksum: sha1:59711a3fa46d6a4bff787a61c81ecc34bdaaec2e - docs/models/completionjoboutobject.md: - id: 922a1e3a4e33 - last_write_checksum: sha1:020211def2c4cd969398cf009b187ca19bd7a943 - pristine_git_object: 712b107d79a8c60c4330da4f3af307545bf1a7ec + pristine_git_object: 6474747bf8d38485f13b1702e3245ef9e0f866a9 docs/models/completionjoboutrepository.md: + id: 2c94b3ecacf1 last_write_checksum: sha1:2cb5b23640eeaf87f45dc9f180247ed7a6307df7 + pristine_git_object: 52f65558f8b3663596642d8854df36d29858beae docs/models/completionjoboutstatus.md: + id: b77ebfd0e4f0 last_write_checksum: sha1:b8f33134c63b12dc474e7714b1ac19d768a3cbbd + pristine_git_object: 917549450a096397d9a7ca0b8f5856f7cd62db04 docs/models/completionresponsestreamchoice.md: id: d56824d615a6 last_write_checksum: sha1:0296a490df009dbfd04893fdebcc88dd6102a872 @@ -541,7 +491,7 @@ trackedFiles: docs/models/conversationhistory.md: id: 7e97e8e6d6e9 last_write_checksum: sha1:06df76a87aca7c5acd5a28ca3306be09a8bb541b - pristine_git_object: ebb1d5136cebf2bc9b77047fe83feecc68532d03 + pristine_git_object: c8baad0b597ddb9148413a651a659b06c20351ac docs/models/conversationhistoryobject.md: id: 088f7df6b658 last_write_checksum: sha1:bcce4ef55e6e556f3c10f65e860faaedc8eb0671 @@ -561,17 +511,23 @@ trackedFiles: docs/models/conversationrequest.md: id: dd7f4d6807f2 last_write_checksum: sha1:e4da423f9eb7a8a5d0c21948b50e8df08a63552c - pristine_git_object: 2b4ff8ef3398561d9b3e192a51ec22f64880389c + pristine_git_object: bd7823a88a07d4bc8fe1da82e51f843e70480ee1 docs/models/conversationrequestagentversion.md: + id: 68aad87b1459 last_write_checksum: sha1:fd2e9cd7ed2499b5843c592505ec5e0596a50b33 + pristine_git_object: 9f2518211256762d03dec12c4c4464d48f7ed52c docs/models/conversationrequesthandoffexecution.md: + id: 9733b1e121d1 last_write_checksum: sha1:f7df210a46acf24abb1312123aebe9e595a190e8 + pristine_git_object: e7314f7e0080ff3f1a80afdbb229c78df5b008bb docs/models/conversationrequesttool.md: + id: bd1bb6fcea8b last_write_checksum: sha1:69d503d73f5bd044882d13cd0c7de188dd5f4831 + pristine_git_object: 2e4e8d01b5482c4e0644be52e55bf6912aeff69e docs/models/conversationresponse.md: id: 2eccf42d48af last_write_checksum: sha1:17ebabdf1dd191eeac442046511c44120dfa97a1 - pristine_git_object: 38cdadd0055d457fa371984eabcba7782e130839 + pristine_git_object: e31821288dd18bf425e442787f67a69ea35ff6a6 docs/models/conversationresponseobject.md: id: 6c028b455297 last_write_checksum: sha1:76270a07b86b1a973b28106f2a11673d082a385b @@ -603,7 +559,7 @@ trackedFiles: docs/models/conversationstreamrequest.md: id: 833f266c4f96 last_write_checksum: sha1:5cb58852d393eb6cc504b45d8b238fc2f3eecd2a - pristine_git_object: 299346f8aaa8ccddcbf7fd083389b74346ef2d4f + pristine_git_object: 8b74f9e7cdea83a5622df2c3b79debe3c4427288 docs/models/conversationstreamrequestagentversion.md: id: e99ccc842929 last_write_checksum: sha1:0ba5fca217681cdc5e08e0d82db67884bed076a6 @@ -613,29 +569,69 @@ trackedFiles: last_write_checksum: sha1:ef2ebe8f23f27144e7403f0a522326a7e4f25f50 pristine_git_object: c98e194c1d204c3a5d4234f0553712a7025d7f85 docs/models/conversationstreamrequesttool.md: + id: 71df6212ff44 last_write_checksum: sha1:f2882742a74dd2b4f74383efa444c7ab968249dc + pristine_git_object: 0f75f82b38f224340bed468ceecfe622066740ba docs/models/conversationusageinfo.md: id: 57ef89d3ab83 last_write_checksum: sha1:d92408ad37d7261b0f83588e6216871074a50225 pristine_git_object: 57e260335959c605a0b9b4eaa8bf1f8272f73ae0 + docs/models/createfinetuningjobresponse.md: + id: a9d31306296c + last_write_checksum: sha1:a15ccee66983fcc23321f966440d02fab4463178 + pristine_git_object: f82cd793b466b0028b586781d36c690c0e5f97cd + docs/models/createorupdateagentaliasrequest.md: + id: be33079aa124 + last_write_checksum: sha1:84cb72c549ee74c44dcf00b3f6a100060e322295 + pristine_git_object: af2591ebb584965f5110ed987993f3a72b513255 + docs/models/deleteagentaliasrequest.md: + id: c116b5c42b1b + last_write_checksum: sha1:51e1544cc867389120a2d1fbb4780c855690841e + pristine_git_object: 17812ec4a03b452a2d31950cc5a9e87a8f6d79f7 + docs/models/deleteagentrequest.md: + id: 6411b6df1c85 + last_write_checksum: sha1:1157d4717b75be91744bd7464c042e367faa4b71 + pristine_git_object: 0aaacae471dd81ddc5ce4808abdd2b5653503ff6 + docs/models/deleteconversationrequest.md: + id: 7247871c454c + last_write_checksum: sha1:a43ed3e32630fbb41921fa413ab2a26a914e425e + pristine_git_object: 39d9e5dfd52d9df1d1da7093761b65e0d12a0b40 + docs/models/deletedocumentrequest.md: + id: 898eebfc019e + last_write_checksum: sha1:f06a13be4484048cf15c21d46eb2d107057b39db + pristine_git_object: eb060099f1b078fd084551338b51ee6677e8d235 docs/models/deletefileout.md: id: c7b84242a45c last_write_checksum: sha1:f2b039ab88fc83ec5dd765cab8e2ed8cce7e417d pristine_git_object: 4709cc4958d008dc24430deb597f801b91c6957f + docs/models/deletefilerequest.md: + id: ca151d3da83a + last_write_checksum: sha1:ec50f13b099a6ef28d7965f7c8721ce1f505f7d2 + pristine_git_object: bceae901954471a8667a3a61e66da6361ef50190 + docs/models/deletelibraryaccessrequest.md: + id: ca39ae894c1f + last_write_checksum: sha1:41b7cd5c2e4616d3edefeb271dd7089fa04bd67d + pristine_git_object: c7034b98c30234a0a8cb368d84d9b287690027de + docs/models/deletelibraryrequest.md: + id: 4be1af37ab41 + last_write_checksum: sha1:2769939a702c26be619f6c455cd48365b64110cc + pristine_git_object: c229ad73b2a7c39dab0ccdfa29e1f0475f0cdc7b docs/models/deletemodelout.md: id: 5643e76768d5 last_write_checksum: sha1:1593c64f7673e59b7ef1f4ae9f5f6b556dd6a269 pristine_git_object: 5fd4df7a7013dcd4f6489ad29cdc664714d32efd - docs/models/deletemodelv1modelsmodeliddeleterequest.md: - id: c838cee0f093 - last_write_checksum: sha1:e5b6d18b4f8ab91630ae34a4f50f01e536e08d99 - pristine_git_object: d9bc15fe393388f7d0c41abce97ead17e35e2ba4 + docs/models/deletemodelrequest.md: + id: 22c414d48ee4 + last_write_checksum: sha1:a60f549577b3461cb7552ad2080a34ad389f8579 + pristine_git_object: d80103f1610668292589b6d7b861de814c17afda docs/models/deltamessage.md: id: 6c5ed6b60968 last_write_checksum: sha1:00052476b9b2474dbc149f18dd18c71c86d0fc74 - pristine_git_object: 61deabbf7e37388fdd4c1789089d120cc0b937b9 + pristine_git_object: e0ee575f3fce7c312114ce8c5390efc5c4854952 docs/models/deltamessagecontent.md: + id: 7307bedc8733 last_write_checksum: sha1:a1211b8cb576ad1358e68983680ee326c3920a5e + pristine_git_object: 8142772d7ea33ad8a75cf9cf822564ba3f630de2 docs/models/document.md: id: cd1d2a444370 last_write_checksum: sha1:d00a2ac808a0ae83a7b97da87e647ecc8dca9c52 @@ -643,7 +639,7 @@ trackedFiles: docs/models/documentlibrarytool.md: id: 68083b0ef8f3 last_write_checksum: sha1:470b969fa4983c0e7ad3d513b4b7a4fa8d5f0f41 - pristine_git_object: 82315f32b920d32741b2e53bc10e411f74a85602 + pristine_git_object: 1695bad40cb0a1eb269e4ee12c6a81cbf0c7749a docs/models/documentout.md: id: a69fd1f47711 last_write_checksum: sha1:ed446078e7194a0e44e21ab1af958d6a83597edb @@ -657,7 +653,9 @@ trackedFiles: last_write_checksum: sha1:e0faccd04229204968dbc4e8131ee72f81288182 pristine_git_object: 0993886d56868aba6844824f0e0fdf1bdb9d74f6 docs/models/documentupload.md: + id: 7ff809a25eb0 last_write_checksum: sha1:aea0f81009be09b153019abbc01b2918a1ecc1f9 + pristine_git_object: 4e58a475f1776431c9c27a0fcdd00dd96257801f docs/models/documenturlchunk.md: id: 48437d297408 last_write_checksum: sha1:38c3e2ad5353a4632bd827f00419c5d8eb2def54 @@ -666,6 +664,10 @@ trackedFiles: id: a3574c91f539 last_write_checksum: sha1:a0134fc0ea822d55b1204ee71140f2aa9d8dbe9c pristine_git_object: 32e1fa9e975a3633fb49057b38b0ea0206b2d8ef + docs/models/downloadfilerequest.md: + id: 5acd7aafd454 + last_write_checksum: sha1:5d7056818ddc5860e43699917496ded68b91ddfa + pristine_git_object: 3f4dc6ccc6d1c67396fe97197067c5421d8dc2d5 docs/models/embeddingdtype.md: id: 22786e732e28 last_write_checksum: sha1:dbd16968cdecf706c890769d8d1557298f41ef71 @@ -695,7 +697,9 @@ trackedFiles: last_write_checksum: sha1:01c3c10e737bcd58be70b437f7ee74632972a983 pristine_git_object: 7c040b382d4c1b6bc63f582566d938be75a5f954 docs/models/entry.md: + id: da9a99ab48ab last_write_checksum: sha1:4971db390327db09f88feff5d2b8a0b1e6c5b933 + pristine_git_object: d934b6774b25713afe923154d7709755426ec2cf docs/models/eventout.md: id: 9960732c3718 last_write_checksum: sha1:dbc23814b2e54ded4aa014d63510b3a2a3259329 @@ -712,26 +716,6 @@ trackedFiles: id: ed6216584490 last_write_checksum: sha1:02767595f85228f7bfcf359f8384b8263580d53a pristine_git_object: 14cab13ee191ae60e2c5e1e336d0a5abc13f778b - docs/models/filesapiroutesdeletefilerequest.md: - id: 7fdf9a97320b - last_write_checksum: sha1:411e38d0e08a499049796d1557f79d669fc65107 - pristine_git_object: 1b02c2dbb7b3ced86ddb49c2323d1d88732b480c - docs/models/filesapiroutesdownloadfilerequest.md: - id: b9c13bb26345 - last_write_checksum: sha1:1f41dad5ba9bd63881de04d24ef49a0650d30421 - pristine_git_object: 8b28cb0e5c60ac9676656624eb3c2c6fdc8a3e88 - docs/models/filesapiroutesgetsignedurlrequest.md: - id: 08f3772db370 - last_write_checksum: sha1:26aa0140444ccef7307ef6f236932032e4784e8f - pristine_git_object: dbe3c801003c7bb8616f0c5be2dac2ab1e7e9fb1 - docs/models/filesapirouteslistfilesrequest.md: - id: 04bdf7c654bd - last_write_checksum: sha1:0a99755150c2ded8e5d59a96527021d29326b980 - pristine_git_object: 57d11722f1dba2640df97c22be2a91317c240608 - docs/models/filesapiroutesretrievefilerequest.md: - id: 2783bfd9c4b9 - last_write_checksum: sha1:a1249ef0aedb3056e613078488832c96b91f8cab - pristine_git_object: 961bae1f51a4ae9df21b28fd7a5ca91dc7b3888b docs/models/fileschema.md: id: 9a05a660399d last_write_checksum: sha1:97987d64285ff3092635754c78ad7b68d863e197 @@ -779,7 +763,7 @@ trackedFiles: docs/models/ftmodelcard.md: id: 15ed6f94deea last_write_checksum: sha1:1c560ceaaacc1d109b2997c36de03192dfcda941 - pristine_git_object: 35032775db8ae6f4c6fbac309edacd27ee7868af + pristine_git_object: 409f0526316a621b30dfbe45126c6b232e01fad4 docs/models/function.md: id: 416a80fba031 last_write_checksum: sha1:a9485076d430a7753558461ce87bf42d09e34511 @@ -807,7 +791,7 @@ trackedFiles: docs/models/functioncallevent.md: id: cc9f2e603464 last_write_checksum: sha1:942d1bed0778ba4738993fcdbefe080934b641d5 - pristine_git_object: c25679a5d89745c1e186cdeb72fda490b2f45af2 + pristine_git_object: f406206086afa37cbc59aa551ac17a4814dddf7e docs/models/functionname.md: id: 4b3bd62c0f26 last_write_checksum: sha1:754fe32bdffe53c1057b302702f5516f4e551cfb @@ -827,15 +811,79 @@ trackedFiles: docs/models/functiontool.md: id: 5fb499088cdf last_write_checksum: sha1:a9a3b6530b1c48a8575402b48cde7b65efb33a7d - pristine_git_object: 8c42459304100777cf85416a5c3a984bc0e7a7ca + pristine_git_object: 0226b7045c9d82186e1111bb2025e96a4de90bd6 + docs/models/getagentagentversion.md: + id: 825de6d2614f + last_write_checksum: sha1:d99f384ff5ee73e68fa7f8581d7622068b5b7498 + pristine_git_object: 6d7b3f1d15994c24a5b992d1908fe8126da0e3ea + docs/models/getagentrequest.md: + id: 743f3a4630be + last_write_checksum: sha1:4d17d6b7b15e39520414085fc977be881e4e0a85 + pristine_git_object: 3f729dff0f7fc773f83593222da0dd0618b3e8b3 + docs/models/getagentversionrequest.md: + id: 4bf5feb4494a + last_write_checksum: sha1:d26546c2fdd78e0f52e2a2c50736b412ce814f6e + pristine_git_object: c98fee9d141f556520e16189e90234063e6861eb + docs/models/getbatchjobrequest.md: + id: 0c3a5debd663 + last_write_checksum: sha1:c186bbc6b04e1ed2db32f68fb22cb7eff4c1a90c + pristine_git_object: f3c67eb4a898a21e8a78c3340171458dcbd21d58 + docs/models/getconversationhistoryrequest.md: + id: 27de0e44ed80 + last_write_checksum: sha1:d89318332c87b5fa3bba22a52e209bdd5702b3f0 + pristine_git_object: fc90282bd9308a7531c3c532234fd332a223f243 + docs/models/getconversationmessagesrequest.md: + id: 82bf9b5c275b + last_write_checksum: sha1:cdbb0371c7a35e84f7938d28719acd843ebc15ce + pristine_git_object: fd037fea6c09d97bfb74332838a2b2760de4dccb + docs/models/getconversationrequest.md: + id: ad6c903380f6 + last_write_checksum: sha1:ee93a91d5daa01fc937dd09589b268bb2e42868a + pristine_git_object: 8a66a8b032cb67503c0f6b95c98e0a40b13d16ec + docs/models/getdocumentextractedtextsignedurlrequest.md: + id: d47f32212cf5 + last_write_checksum: sha1:7d695630988d5ab3773aabfe17c3fa9177d7e9c9 + pristine_git_object: ff703802ddfe0e36768daf87f4c5626028642370 + docs/models/getdocumentrequest.md: + id: 4208f9b571b3 + last_write_checksum: sha1:45f6807e2f7cd4c30f95304172cb556896571b76 + pristine_git_object: 29f62127b09511f14a065b9b6f6068e63643ab7c + docs/models/getdocumentsignedurlrequest.md: + id: 734960a10101 + last_write_checksum: sha1:04debc445e51e7d0f922bfe7873d639a844c17b4 + pristine_git_object: 72a179c086e38650afd81165575c7926d9566f69 + docs/models/getdocumentstatusrequest.md: + id: d0a69468ea34 + last_write_checksum: sha1:a8d91948737e4fa392221ec18970d27af90c203e + pristine_git_object: 3557d7738be21206061ef5806b79118432b33f26 + docs/models/getdocumenttextcontentrequest.md: + id: 6baa6485417b + last_write_checksum: sha1:5b47d1d8d5675e4b9f477c8034ef64afc912cd06 + pristine_git_object: 8593340139f28b44dfed455849198f5d5a457643 + docs/models/getfilesignedurlrequest.md: + id: c7b1953174af + last_write_checksum: sha1:d558115d1611827f461cc6a9f373885271c7a51d + pristine_git_object: 0be3b2888b0680d5a5fac0057cedc279d112ddb8 + docs/models/getfinetuningjobrequest.md: + id: c18796fe85f3 + last_write_checksum: sha1:8166520e2d657098131fd77c81a86099ed4d3486 + pristine_git_object: f20cb2148330c7078c6e93f55aa99f1b09086eaf + docs/models/getfinetuningjobresponse.md: + id: 8f50d4a61ae1 + last_write_checksum: sha1:509e8d190b43b5a4a3e0ae7d97bf2b4262fcd1f8 + pristine_git_object: 1b0568dd8019879ec2e1d0ff039296f600415e21 + docs/models/getlibraryrequest.md: + id: 9c9a9e6c4f03 + last_write_checksum: sha1:822494a821ee3a51a477f305c140ed39cd6465fc + pristine_git_object: 2a3acf50a6300ea3bcbc3b8432fe28cbef82c620 docs/models/githubrepositoryin.md: id: b42209ef8423 last_write_checksum: sha1:5ab33fc1b0b5513086b1cae07f416d502441db23 - pristine_git_object: 1584152ba934756793d5228d5691c07d3256c7b8 + pristine_git_object: 241cf584d5e2425e46e065f47a18bea50fa624db docs/models/githubrepositoryout.md: id: 0ca86e122722 last_write_checksum: sha1:0e3999cef8a745ae24ac36907b3431bc5103ea6f - pristine_git_object: 03f0b2661e46b48489ede1208d9c38c4324b2b35 + pristine_git_object: fe38393a0cc2eb5c0b0c4690cb0c4e5e3ec41df8 docs/models/httpvalidationerror.md: id: a211c095f2ac last_write_checksum: sha1:277a46811144643262651853dc6176d21b33573e @@ -847,7 +895,7 @@ trackedFiles: docs/models/imagegenerationtool.md: id: d5deb6b06d28 last_write_checksum: sha1:b3decee8fe7a824401f9afbd3544a69ccde4ef8e - pristine_git_object: b8fc9cf40c8cb010231837ffe3d66cb3762dd666 + pristine_git_object: 0c8de72cdd7149217010ae5d02777d1c5dd9896c docs/models/imageurl.md: id: e75dd23cec1d last_write_checksum: sha1:30131c77dd240c3bae48d9693698358e5cc0ae63 @@ -855,13 +903,15 @@ trackedFiles: docs/models/imageurlchunk.md: id: 4407097bfff3 last_write_checksum: sha1:73e14a0beccfc9465ee6d2990462e609903f5cd5 - pristine_git_object: f1b926ef8e82443aa1446b1c64c2f02e33d7c789 + pristine_git_object: 43078c7849fb3e808c2eaeaa5a3caeab2619d700 docs/models/imageurlchunktype.md: id: b9af2db9ff60 last_write_checksum: sha1:990546f94648a09faf9d3ae55d7f6ee66de13e85 pristine_git_object: 2064a0b405870313bd4b802a3b1988418ce8439e docs/models/imageurlunion.md: + id: 9d3c691a9db0 last_write_checksum: sha1:4e32bcd7d44746d2ddbfafbef96152bb2bdb2a15 + pristine_git_object: db97130f26199dcb354ecb7469d09530b035daa2 docs/models/inputentries.md: id: a5c647d5ad90 last_write_checksum: sha1:4231bb97837bdcff4515ae1b00ff5e7712256e53 @@ -871,179 +921,53 @@ trackedFiles: last_write_checksum: sha1:19d8da9624030a47a3285276c5893a0fc7609435 pristine_git_object: 0f62a7ce8e965d0879507e98f808b9eb254282a6 docs/models/inputsmessage.md: + id: 174dcada287d last_write_checksum: sha1:92a95c1757e33603d1aa9ed6c9912d1c551d9974 + pristine_git_object: e3543fb4f9fff679b25f7f803eb2e8dabd56368f docs/models/instructrequest.md: id: a0034d7349a2 last_write_checksum: sha1:34a81411110cbb7a099c45e482f5d1702ae48fd3 - pristine_git_object: 9500cb588b5d27d934b04cc5fa0be26a270f6d82 + pristine_git_object: 5f0cdfff135fb72d3b1a81999a30b720c044e3d4 docs/models/instructrequestinputs.md: id: 2a677880e32a last_write_checksum: sha1:64bcc6371d70446da60f167682504568d7f2618c - pristine_git_object: 4caa028f85be2324966e61321c917cbd0c65de01 + pristine_git_object: 931ae5e47df2d2850e3ef6740e2b89e1e0138297 docs/models/instructrequestmessage.md: + id: 380503708a09 last_write_checksum: sha1:551b5d6dd3ba0b39cad32478213a9eb7549f0023 + pristine_git_object: 57ed27ab3b1430514797dd0073bc87b31e5e3815 docs/models/jobin.md: id: 1b7b37214fa8 - last_write_checksum: sha1:16436f5d3222b89d604cf326bde749d9e6f9da39 - pristine_git_object: b96517705cea7b9efd266f146080ad1aed3cc8cb + last_write_checksum: sha1:0a241378cf3791c5c3fa733f30d45c07ef841448 + pristine_git_object: 62da90727898dd84f547c436c17fefa788e4f0d6 docs/models/jobinintegration.md: + id: 200c505fa67f last_write_checksum: sha1:c9887897357e01e6e228b48d6bf0c3fb4edd29f7 + pristine_git_object: 103820e7ec55769227610c385addbecfcd075cae docs/models/jobinrepository.md: + id: 9ab1d5469c10 last_write_checksum: sha1:1773f59546b94688d0be16d3f5f014cd86f5b1d7 + pristine_git_object: e873ae63f359d6ac4aca03b058a7c25fbbf2ba32 docs/models/jobmetadataout.md: id: 30eb634fe247 last_write_checksum: sha1:46d54b6f6004a6e571afd5207db5170dfbce7081 pristine_git_object: 6218a161b71abbb35eb4ca6e3ce664226983efc2 - docs/models/jobsapiroutesbatchcancelbatchjobrequest.md: - id: 798cb1ca1385 - last_write_checksum: sha1:67e8bda117608aee0e09a702a1ef8a4b03c40b68 - pristine_git_object: c19d0241784ff69bc68a11f405437400057d6f62 - docs/models/jobsapiroutesbatchgetbatchjobrequest.md: - id: e83a7ec84f8a - last_write_checksum: sha1:d661875832b4b9d5f545262216c9fcb9a77c8cd0 - pristine_git_object: 8c259bea9bef11f779fd609f1212565d574457e2 - docs/models/jobsapiroutesbatchgetbatchjobsrequest.md: - id: 5b9c44ad4d31 - last_write_checksum: sha1:8e28b08c86355b097836e55559fda85487000092 - pristine_git_object: b062b8731ca7c99af968be2e65cca6aa5f122b37 - docs/models/jobsapiroutesfinetuningarchivefinetunedmodelrequest.md: - id: 8eb8c127091e - last_write_checksum: sha1:2b93a6bed5743461bb03c8337fb25dfc5a15522e - pristine_git_object: f9700df50b8f512c4139c1830aba18989d022b8e - docs/models/jobsapiroutesfinetuningcancelfinetuningjobrequest.md: - id: deff83b39b78 - last_write_checksum: sha1:dac8d8f2e95aed2db9b46711e6e80816881d5d14 - pristine_git_object: 883cbac685563d2e0959b63638f6b967ebdf1ee9 - docs/models/jobsapiroutesfinetuningcancelfinetuningjobresponse.md: - id: c45757ba1ed9 - last_write_checksum: sha1:52d4f945aff24c03627111d0e7c73cbbba60129f - pristine_git_object: 1b331662b17cd24c22e88b01bf00d042cb658516 - docs/models/jobsapiroutesfinetuningcreatefinetuningjobresponse.md: - id: 8aa8030f26d7 - last_write_checksum: sha1:619bb7677fa549f5089fde98f3a00ab1d939f80d - pristine_git_object: eeddc3cdfdd975cdb69fbfcd306e9445010eb82f - docs/models/jobsapiroutesfinetuninggetfinetuningjobrequest.md: - id: a9b75762e534 - last_write_checksum: sha1:8f1395447928e089c88dce8c0ced1030ec5f0eba - pristine_git_object: fde19800303a901149bf39c5330ef8c4da87df62 - docs/models/jobsapiroutesfinetuninggetfinetuningjobresponse.md: - id: c0b31f4fc621 - last_write_checksum: sha1:6f70f5cabb62e2df7c1e4086f7a8b100143cc2aa - pristine_git_object: e0d2e3610ce460d834c2d07d9a34b09f8257217b - docs/models/jobsapiroutesfinetuninggetfinetuningjobsrequest.md: - id: 52078f097503 - last_write_checksum: sha1:fc134fdc7e229b8df373b77096c8299c214171a7 - pristine_git_object: 3dca3cd85245e0956b557fc5d6ae6c5e265df38d - docs/models/jobsapiroutesfinetuninggetfinetuningjobsstatus.md: - last_write_checksum: sha1:bbc08ca53c2da180b96ed0347cf4954410c79311 - docs/models/jobsapiroutesfinetuningstartfinetuningjobrequest.md: - id: b4e2b814d8c3 - last_write_checksum: sha1:f13b5c8f2e74cc73b58a30d366032c764603f95e - pristine_git_object: 4429fe480ab9486de98940a119ac63f40045313b - docs/models/jobsapiroutesfinetuningstartfinetuningjobresponse.md: - id: cfd848845787 - last_write_checksum: sha1:b3a64f467ab1c16427ef77d3acb0749ab155e213 - pristine_git_object: 64f4cca608f8e505f9eeaac623955200dd5b9553 - docs/models/jobsapiroutesfinetuningunarchivefinetunedmodelrequest.md: - id: 75b5dd1bcbaa - last_write_checksum: sha1:dd30e7ff8748d26497458f3398c0547113dc058f - pristine_git_object: 95c1734daa7164bedeeb1fa58dd792939f25bc17 - docs/models/jobsapiroutesfinetuningupdatefinetunedmodelrequest.md: - id: 60bd2e28993a - last_write_checksum: sha1:7ff770c3d0148a4818957b279875bbe5b1ecfc62 - pristine_git_object: 6d93832e68739e465de7c61993b8bcfa1468bafc - docs/models/jobsapiroutesfinetuningupdatefinetunedmodelresponse.md: - id: c265a30fd4cf - last_write_checksum: sha1:e1a739e755b4e573f592743cd34116da97a67450 - pristine_git_object: 54f4c3981978e1ac4bdf42d5b746b73a62d13162 docs/models/jobsout.md: id: cbe31f43047d - last_write_checksum: sha1:73e1ce0ff11741c22dc00d768055ad603034147c - pristine_git_object: 977013f7a679dd89fb48c4a95b266a9ea5f3f7cf + last_write_checksum: sha1:4bd9ffbd2e5a286090167c795b9c3970e3c7d0a5 + pristine_git_object: 69f8342ac6f02a6e60d05b6f5b3cd892964fd3d7 docs/models/jobsoutdata.md: id: 809574cac86a last_write_checksum: sha1:06455044d314c4edbd1ce4833d551c10918f0a3e pristine_git_object: 28cec31117416b79eb8688d84b47b157974574cc - docs/models/jobsoutobject.md: - id: 1c99619e2435 - last_write_checksum: sha1:cffbcfb8673e12feb8e22fd397bf68c8745c76bb - pristine_git_object: f6c8a2c3079003a885ee9bdfc73cf7c7c7d8eded docs/models/jsonschema.md: id: a6b15ed6fac8 last_write_checksum: sha1:523465666ad3c292252b3fe60f345c7ffb29053f pristine_git_object: 7ff7c070353c58290416aff5b01d1dfc43905269 docs/models/legacyjobmetadataout.md: id: b3b8c262f61a - last_write_checksum: sha1:bc611bf233bd5b224b1367c6b800de6c3b589b38 - pristine_git_object: 53a45485b70017e729709359407d6c9f3e0fbe35 - docs/models/legacyjobmetadataoutobject.md: - id: 5bafaafb6137 - last_write_checksum: sha1:30e5942a6d0c9fde35d29cd9d87a4304b0e4fa26 - pristine_git_object: 9873ada894f79647c05e386521c6b4208d740524 - docs/models/librariesdeletev1request.md: - id: c0c3b2e1aabc - last_write_checksum: sha1:bef84f8851b06d2d914b605f11109de1850d0294 - pristine_git_object: 68d7e54369ce75422bf8b0ff16cada1c0ae2b05c - docs/models/librariesdocumentsdeletev1request.md: - id: 9d557bd7d1cc - last_write_checksum: sha1:1b580b657559356886915ee5579b90a03db19337 - pristine_git_object: efccdb1bbc36cf644ed2d1716cbd202e6d6bf6c5 - docs/models/librariesdocumentsgetextractedtextsignedurlv1request.md: - id: 27ad38ce4cb1 - last_write_checksum: sha1:b35ad610330232b395b5f87cc15f6ae270de6816 - pristine_git_object: 14ca66f72693f1df05eb93e0cca45f440b62d282 - docs/models/librariesdocumentsgetsignedurlv1request.md: - id: 4498715b6cfb - last_write_checksum: sha1:31f78079e31e070d080c99555cd2d85318fc4610 - pristine_git_object: 7c08c180d59a8e8475fea89424b8b2021d51385f - docs/models/librariesdocumentsgetstatusv1request.md: - id: c2219d3a3738 - last_write_checksum: sha1:44e79df94cf2686e83d7a2e793140a6a7b3a1c05 - pristine_git_object: e6d41875966348fd9e770d06c8099e48f0e64b5d - docs/models/librariesdocumentsgettextcontentv1request.md: - id: 850dfa465952 - last_write_checksum: sha1:4a1212e111525f4265d2924ce52f9c13d2787d4d - pristine_git_object: 2f58a4460ccdad531391318c62191e76c1ec22ac - docs/models/librariesdocumentsgetv1request.md: - id: cdd0df2f7e9d - last_write_checksum: sha1:36e5ef39552159044ecd28d20ee0792ea5bcadef - pristine_git_object: 6febc058425bb38857c391ee4c40d600858e6058 - docs/models/librariesdocumentslistv1request.md: - id: 7b5756e50d64 - last_write_checksum: sha1:2605b7972a3d7b4f73ab8052be4bf740f44f6f6f - pristine_git_object: 44f6300115853053214639982516a60b3268e778 - docs/models/librariesdocumentsreprocessv1request.md: - id: 1b8bf57b3f0a - last_write_checksum: sha1:8528785c1b4ae18d6ec6f261d29d5daac0d420a3 - pristine_git_object: 196ba17b749ce9efc1c30189864e474896814f85 - docs/models/librariesdocumentsupdatev1request.md: - id: b9147b1c0e38 - last_write_checksum: sha1:45b2cc114886b300e3b996a8b71241ac5c7260a3 - pristine_git_object: 2f18b014af4577a0ae862dfeea599d5f700005cb - docs/models/librariesdocumentsuploadv1request.md: - id: 89a89d889c72 - last_write_checksum: sha1:32294a87d8a0b173b4d6f12b607a1bb3da765776 - pristine_git_object: 7c91ca9b92839be8ab1efb4428cc8d7a78d57e1e - docs/models/librariesgetv1request.md: - id: f47ad71ec7ca - last_write_checksum: sha1:3b2bf1e4f6069d0c954e1ebf95b575a32c4adeac - pristine_git_object: 6e1e04c39c15a85d96710f8d3a8ed11a22412816 - docs/models/librariessharecreatev1request.md: - id: 99e7bb8f7fed - last_write_checksum: sha1:e40d710ad1023768a0574b3283ef35544f6b0088 - pristine_git_object: 4c05241de4ee5a76df335ae9ea71004bd02b8669 - docs/models/librariessharedeletev1request.md: - id: bc8adba83f39 - last_write_checksum: sha1:79fc5a9a3cee5b060f29edd95f00e0fea32579cf - pristine_git_object: 850e22ab79863ba544f453138322c0eb5bf544cd - docs/models/librariessharelistv1request.md: - id: 86e6f08565e2 - last_write_checksum: sha1:6f2ffff66fa5fb141d930bca7bb56e978d62b4a5 - pristine_git_object: 98bf6d17ab013c1dd3f0ab18c37bbfc1a63f1b76 - docs/models/librariesupdatev1request.md: - id: f7e51b528406 - last_write_checksum: sha1:cec4aa232c78ca2bd862aee3d5fb3bcc2ad9dc05 - pristine_git_object: a68ef7a8f52ee4a606cb88d0a3f96de8c2fbccb8 + last_write_checksum: sha1:d8c4e7525e2dc2f4d29bfeb6cadc648fab1c62c7 + pristine_git_object: 8a712140fbf3c36f4bd9686e135b70d8688aa9c1 docs/models/libraryin.md: id: a08170e6397c last_write_checksum: sha1:2c996ecf1ae5d9e8df702a79741b72b3571eb6ef @@ -1056,14 +980,58 @@ trackedFiles: id: 2e8b6d91ded2 last_write_checksum: sha1:d71053b44725147265871be445217e3e1a0e5ede pristine_git_object: ebf46d57de6bad7022a3e8cb8eaf88728bbbe888 + docs/models/listagentaliasesrequest.md: + id: 495659b2d40a + last_write_checksum: sha1:637e7e0e8deadcf2e77cc9469727010f90f0ad79 + pristine_git_object: b3570cb80d484dadaf2a138c70bbb477746ba416 + docs/models/listagentsrequest.md: + id: aeb9bbc163f5 + last_write_checksum: sha1:86c5f5068061b79d2e582e4dd9a8b0ed4c84cbcf + pristine_git_object: 79aec3ea6e3506797fc96a7ca9d7393543270866 + docs/models/listagentversionsrequest.md: + id: 3270f6dd4107 + last_write_checksum: sha1:14ffb20c5c48cca371ed27f6a6a8b565cd4a5565 + pristine_git_object: ba8ddaa5cb4c94623b29a1f635f38a04cc0ff497 + docs/models/listbatchjobsrequest.md: + id: e2a0b1528191 + last_write_checksum: sha1:01a587ec7cc6e183d47e106eb809e7c1e9e79e39 + pristine_git_object: 19981b2425254058bd24b218d1f7881fc3635c89 + docs/models/listconversationsrequest.md: + id: 6c0961051703 + last_write_checksum: sha1:453eb480cd48330f857b4c80210b6753a750348d + pristine_git_object: d99b420834b17f3f5b7fac630af7a7b0d2db341d + docs/models/listconversationsresponse.md: + id: 65075f5cf00c + last_write_checksum: sha1:8478c55b156c09f2b714d2854030a04494b48f7c + pristine_git_object: 9d611c553b245657181c06d7f65acaa9d8128556 docs/models/listdocumentout.md: id: 4bec19e96c34 last_write_checksum: sha1:c0b3a6e3841f120c52b1d7718d7226a52fe1b6d6 pristine_git_object: f14157b8db55c1201d9f7151742e9ddf0d191c16 + docs/models/listdocumentsrequest.md: + id: 36c8a1116534 + last_write_checksum: sha1:390849ce3d93a64c505b7b2f7cae411766a5e44b + pristine_git_object: 369e8edbe471dd5167ad1baf74ee5b00eb7d5043 docs/models/listfilesout.md: id: 98d4c59cc07e last_write_checksum: sha1:e76df31628984095f1123005009ddc4b59b1c2bc pristine_git_object: bcb1f13aa17f41dadb6af37541e929364e2d6cec + docs/models/listfilesrequest.md: + id: 70edaf3759f0 + last_write_checksum: sha1:686edbd5134dfe60cfd98221ec78d296a8429d28 + pristine_git_object: 2d76a76b011603e3a7c4b4932ef4b26def1cb792 + docs/models/listfinetuningjobsrequest.md: + id: 41878563fe80 + last_write_checksum: sha1:103cd0d3c5334ea60a6c6e1c2585bf9bd493c78f + pristine_git_object: 3a04fc709c2a12cc4f414701efcaec4584b7d6df + docs/models/listfinetuningjobsstatus.md: + id: 1d6d54dc70ea + last_write_checksum: sha1:c4f69e2b2b5aac719281d264722f2cba6aa048a0 + pristine_git_object: 07db9ae5d87b7192ada4843d4fe0d3e8573794c6 + docs/models/listlibraryaccessesrequest.md: + id: 0b387463f914 + last_write_checksum: sha1:2912e1fc3ee179f01fde7a21501e2501debecc2c + pristine_git_object: d98bcda22bbb2540a525f2ce1516a637446b0a0f docs/models/listlibraryout.md: id: ea34f8548bd6 last_write_checksum: sha1:cec920357bc48bea286c05d16c480a9a9369b459 @@ -1087,13 +1055,15 @@ trackedFiles: docs/models/messageinputentry.md: id: eb74af2b9341 last_write_checksum: sha1:07124339ecb87e31df5f0e2f887e23209dd269af - pristine_git_object: d55eb8769c3963518fcbc910d2e1398b6f46fd87 + pristine_git_object: 52183a32330b3e0bf91a1bd5e541dfda12d3f1a0 docs/models/messageinputentrycontent.md: id: 7e12c6be6913 last_write_checksum: sha1:6be8be0ebea2b93712ff6273c776ed3c6bc40f9a pristine_git_object: 65e55d97606cf6f3119b7b297074587e88d3d01e docs/models/messageinputentryobject.md: + id: 9a1d0d31f357 last_write_checksum: sha1:7746753005fda37834a73e62bf459eacb740ba5b + pristine_git_object: 6bdd62e27d7353dbb7d521ad02bde358496ab108 docs/models/messageinputentryrole.md: id: 2497d07a793d last_write_checksum: sha1:a41eb58f853f25489d8c00f7a9595f443dcca2e6 @@ -1129,7 +1099,7 @@ trackedFiles: docs/models/messageoutputevent.md: id: b690693fa806 last_write_checksum: sha1:d6538a4b5d5721c09bc196f3e9523ed45dafbea7 - pristine_git_object: 92c1c61587e34f6e143263e35c33acc9332870d6 + pristine_git_object: b0fa1a2d369c89ec75f43c6b31ff52b0d80d9b1c docs/models/messageoutputeventcontent.md: id: cecea075d823 last_write_checksum: sha1:16dac25382642cf2614e24cb8dcef6538be34914 @@ -1153,19 +1123,23 @@ trackedFiles: docs/models/modelconversation.md: id: 497521ee9bd6 last_write_checksum: sha1:440c9e7c306f20bd4f4b27ab0cf770d3bf8762e2 - pristine_git_object: 1a03ef7d1dd9e1d6b51f0f9391c46feb5cd822a8 + pristine_git_object: 813e1f3a79ad14eae55bbb1b96598d6260904d9d docs/models/modelconversationobject.md: id: 4c5699d157a9 last_write_checksum: sha1:8e2e82e1fa4cb97f8c7a8a129b3cc9cd651e4055 pristine_git_object: ead1fa26f5d9641a198a14b43a0f5689456e5821 docs/models/modelconversationtool.md: + id: 2dd28167bc36 last_write_checksum: sha1:9b33f73330e5ae31de877a904954efe342e99c4f + pristine_git_object: 8723556753d077969bc665a423c057ae4ceaa0d2 docs/models/modellist.md: id: ce07fd9ce413 last_write_checksum: sha1:b4c22b5eff4478ffa5717bd5af92ca79f4a90b01 - pristine_git_object: 760882c6c5b442b09bbc91f910f960138d6a00c8 + pristine_git_object: 85b20be7376f80cf169c25b3c7117079cd4c2828 docs/models/modellistdata.md: + id: e2eb639c646f last_write_checksum: sha1:7394ba5645f990163c4d777ebbfc71f24c5d3a74 + pristine_git_object: b44e84a00d0c54f8df78650d45de0a409c901048 docs/models/moderationobject.md: id: 4e84364835f5 last_write_checksum: sha1:2831033dcc3d93d32b8813498f6eb3082e2d3c4e @@ -1175,7 +1149,9 @@ trackedFiles: last_write_checksum: sha1:18e8f4b4b97cb444824fcdce8f518c4e5a27c372 pristine_git_object: 75a5eec74071fdd0d330c9f3e10dac0873077f20 docs/models/multipartbodyparams.md: + id: f5be2d861921 last_write_checksum: sha1:34e68e3795c7987138abd152177fa07198d2f6f6 + pristine_git_object: f14b95737fde09a120b35e2f922568ca31825bd5 docs/models/ocrimageobject.md: id: b72f3c5853b2 last_write_checksum: sha1:90c5158dec6a7b31c858677b6a8efa1e3cabd504 @@ -1204,8 +1180,14 @@ trackedFiles: id: 419abbb8353a last_write_checksum: sha1:6e717a3f3de3c464e8b3237f06867cdfecec339e pristine_git_object: d9d79125cb02bc2b09d8dc543a5e2d4a6c55571c + docs/models/orderby.md: + id: 9e749ed80f72 + last_write_checksum: sha1:6ec002e3e59f37002ccb14e347b790ca4daef773 + pristine_git_object: bba50df10855a8d6acdf4b061ec2ffeb0279fd7f docs/models/output.md: + id: 376633b966cd last_write_checksum: sha1:600058f0b0f589d8688e9589762c45a0dd18cc9b + pristine_git_object: d0ee0db93f56c40f6684fcfdb5873aba586bc876 docs/models/outputcontentchunks.md: id: f7e175c8e002 last_write_checksum: sha1:5094466110028801726cc825e8809f524fe1ee24 @@ -1229,9 +1211,11 @@ trackedFiles: docs/models/realtimetranscriptionerrordetail.md: id: ea137b1051f1 last_write_checksum: sha1:7e1d18760939d6087cda5fba54553141f8a78d1e - pristine_git_object: 96420ada2ac94fca24a36ddacae9c876e14ccb7a + pristine_git_object: 5b34755dc67359bb884d5c2387608686ee527470 docs/models/realtimetranscriptionerrordetailmessage.md: + id: d25137243bef last_write_checksum: sha1:f8c3a4984d647d64e8ea4e1e42654265ffe46b0f + pristine_git_object: da3764ef56337bdc773eaf8e9aa747cbd1b407e2 docs/models/realtimetranscriptionsession.md: id: aeb0a0f87d6f last_write_checksum: sha1:c3aa4050d9cc1b73df8496760f1c723d16183f3a @@ -1252,20 +1236,26 @@ trackedFiles: id: 0944b80ea9c8 last_write_checksum: sha1:956b270766c7f11fe99f4a9b484cc29c159e7471 pristine_git_object: 1e0e2fe64883ef5f3e628777b261b1224661d257 + docs/models/reprocessdocumentrequest.md: + id: 3c713aad474b + last_write_checksum: sha1:100b194196051470a2ae75cc2f707afec0c8d161 + pristine_git_object: cf3982a8cd76e4b2c8429acede0a12a044cbe2ca docs/models/requestsource.md: id: 8857ab6025c4 last_write_checksum: sha1:4b7ecc7c5327c74e46e2b98bd6e3814935cdecdf pristine_git_object: c81c115992439350d56c91d2e3351a13df40676b docs/models/response.md: + id: 583c991c7a30 last_write_checksum: sha1:f4a3ec06ff53cd1cbdf892ff7152d39fa1746821 + pristine_git_object: 3512b7a8f9fdfcaaed9a6db06ef4266629d9fa89 docs/models/responsedoneevent.md: id: 38c38c3c065b last_write_checksum: sha1:4ac3a0fd91d5ebaccce7f4098ae416b56e08416f - pristine_git_object: ec25bd6d364b0b4959b11a6d1595bdb57cba6564 + pristine_git_object: 63d4cc06493e1ca12cf0e8ef800acfc0bdc9a02d docs/models/responseerrorevent.md: id: 3e868aa9958d last_write_checksum: sha1:4711077bf182e4f3406dd12357da49d37d172b4c - pristine_git_object: 2ea6a2e0ec412ae484f60fa1d09d02e776499bb9 + pristine_git_object: 4309bdadc323918900cc4ca4fddb18788361d648 docs/models/responseformat.md: id: 50a1e4140614 last_write_checksum: sha1:e877b2e81470ef5eec5675dfb91a47e74d5d3add @@ -1275,21 +1265,37 @@ trackedFiles: last_write_checksum: sha1:105e1f9181913104b554051838cbdd0f728aa2c4 pristine_git_object: 2f5f1e5511b048323fee18a0ffdd506fe2b3d56f docs/models/responseretrievemodelv1modelsmodelidget.md: + id: 6143ec73bdd6 last_write_checksum: sha1:6bae62cbb18559065a53f0acdacb1f72f513467e + pristine_git_object: ffbc1473d39c8266bb6b05b37677c98ca1d10858 docs/models/responsestartedevent.md: id: 88e3b9f0aa8d last_write_checksum: sha1:156f38bbe8278f9c03117135938e7cbdae3038b9 - pristine_git_object: 481bd5bba67a524dbadf9f1570a28ae20ec9f642 + pristine_git_object: e2f421af866690b34c2d9fa4595a63e9172a65f5 docs/models/responsev1conversationsget.md: + id: 48d4a45780a9 last_write_checksum: sha1:8e75db359f0d640a27498d20c2ea6d561c318d7e + pristine_git_object: 844c5d610a9a351532d12b1a73f6c660059da76b + docs/models/restartconversationrequest.md: + id: b85b069aa827 + last_write_checksum: sha1:b7fb56a5561ab329f605d77795a610da8faaf561 + pristine_git_object: f24f14e67e749da884363038ca72891449cd99da + docs/models/restartconversationstreamrequest.md: + id: 65df276279f0 + last_write_checksum: sha1:907807c7e5969f82e70e743fddeb4c6f4278fc1a + pristine_git_object: daa661a9250701ad33241084d5033f73d75a9d6e docs/models/retrievefileout.md: id: 8e82ae08d9b5 last_write_checksum: sha1:600d5ea4f75dab07fb1139112962affcf633a6c9 pristine_git_object: 28f97dd25718833aaa42c361337e5e60488bcdc8 - docs/models/retrievemodelv1modelsmodelidgetrequest.md: - id: ac567924689c - last_write_checksum: sha1:7534c5ec5f1ae1e750c8f610f81f2106587e81a9 - pristine_git_object: f1280f8862e9d3212a5cfccd9453884b4055710a + docs/models/retrievefilerequest.md: + id: eac92ea7ca45 + last_write_checksum: sha1:c80772e3cfbe704385abe1b347d8e69d55bd9e00 + pristine_git_object: 454b9665b8134876488eb32c57a9dc45f4d972de + docs/models/retrievemodelrequest.md: + id: 392008b3324b + last_write_checksum: sha1:b9aafe10f0cd838a0b6959ec8dde5850ce59c55d + pristine_git_object: 787c3dd1000cba873c787fd5b9dcbe3c793f2b11 docs/models/sampletype.md: id: 0e09775cd9d3 last_write_checksum: sha1:33cef5c5b097ab7a9cd6232fe3f7bca65cd1187a @@ -1322,10 +1328,18 @@ trackedFiles: id: 6a902241137c last_write_checksum: sha1:567027284c7572c0fa24132cd119e956386ff9d0 pristine_git_object: ae06b5e870d31b10f17224c99af1628a7252bbc3 + docs/models/startfinetuningjobrequest.md: + id: 48fd313ae362 + last_write_checksum: sha1:f645c1e3e3244729eaa31aabb4b3ec0454fb114f + pristine_git_object: 9df5aee8f527fea4f0c9b02a28af77a65765be48 + docs/models/startfinetuningjobresponse.md: + id: 970045c710ff + last_write_checksum: sha1:78d230946abe19e928f286562ac589c7672c9854 + pristine_git_object: dce84c5a7711cd655a624b6ba0540504a6ff75d7 docs/models/systemmessage.md: id: fdb7963e1cdf last_write_checksum: sha1:561c3372391e093c890f477b3213c308ead50b81 - pristine_git_object: 0dba71c00f40c85e74b2c1967e077ffff9660f13 + pristine_git_object: dfb0cd0bd17aecbc1fe4b8410e78440f65038fef docs/models/systemmessagecontent.md: id: 94a56febaeda last_write_checksum: sha1:6cb10b4b860b4204df57a29c650c85c826395aeb @@ -1381,7 +1395,7 @@ trackedFiles: docs/models/toolexecutiondeltaevent.md: id: f2fc876ef7c6 last_write_checksum: sha1:ae1462a9b5cb56002b41f477ce262cb64ccf2f4e - pristine_git_object: 7bee6d831a92085a88c0772300bcad4ce8194edb + pristine_git_object: 7066f3485407707500e5006335279bfa37db8705 docs/models/toolexecutiondeltaeventname.md: id: 93fd3a3b669d last_write_checksum: sha1:d5dcdb165c220209ee76d81938f2d9808c77d4fc @@ -1389,7 +1403,7 @@ trackedFiles: docs/models/toolexecutiondoneevent.md: id: b604a4ca5876 last_write_checksum: sha1:6b6975ded0b0495b6c56250d153186c7818b5958 - pristine_git_object: 5898ea5eff103b99886789805d9113dfd8b01588 + pristine_git_object: b2d81be3cfa3e1dd0d1a58ef5ad16556c5e953c7 docs/models/toolexecutiondoneeventname.md: id: d19dc0060655 last_write_checksum: sha1:aa5677087e6933699135a53f664f5b86bbae5ac6 @@ -1397,9 +1411,11 @@ trackedFiles: docs/models/toolexecutionentry.md: id: 75a7560ab96e last_write_checksum: sha1:fdaa9abd5417486100ffc7059fcfdc8532935ed3 - pristine_git_object: 3678116df64ad398fef00bab39dd35c3fd5ee1f5 + pristine_git_object: adf88fb1acec13bf8016eb42d6bdc5fd3bd279b5 docs/models/toolexecutionentryname.md: + id: 86d537762559 last_write_checksum: sha1:6c528cdfbb3f2f7dc41d11f57c86676f689b8845 + pristine_git_object: fb762a5382d8b0e93dc2eb277f18adf810057c55 docs/models/toolexecutionentryobject.md: id: af106f91001f last_write_checksum: sha1:6df075bee4e84edf9b57fcf62f27b22a4e7700f4 @@ -1411,7 +1427,7 @@ trackedFiles: docs/models/toolexecutionstartedevent.md: id: 37657383654d last_write_checksum: sha1:47126a25c2a93583038ff877b85fc9ae1dcef9f3 - pristine_git_object: de81312bda08970cded88d1b3df23ebc1481ebf2 + pristine_git_object: c41c7258779f15f1f0436ad890f4947d780bfa75 docs/models/toolexecutionstartedeventname.md: id: be6b33417678 last_write_checksum: sha1:f8857baa02607b0a0da8d96d130f1cb765e3d364 @@ -1431,7 +1447,7 @@ trackedFiles: docs/models/toolmessage.md: id: 0553747c37a1 last_write_checksum: sha1:f35fa287b94d2c1a9de46c2c479dadd5dca7144d - pristine_git_object: a54f49332c2873471759b477fb4c712fa4fb61f5 + pristine_git_object: fa00d666d6d2baea0aac10fcdeff449eb73c9d39 docs/models/toolmessagecontent.md: id: f0522d2d3c93 last_write_checksum: sha1:783769c0200baa1b6751327aa3e009fa83da72ee @@ -1463,13 +1479,15 @@ trackedFiles: docs/models/transcriptionsegmentchunk.md: id: f09db8b2273e last_write_checksum: sha1:5387f2595d14f34b8af6182c34efac4874a98308 - pristine_git_object: f620b96a75a0b9c6e015ae1f460dcccb80d113ee + pristine_git_object: 00a599ee8442f45ce4f529da18ad3e9486b12f9f docs/models/transcriptionsegmentchunktype.md: + id: 01bda77a53f8 last_write_checksum: sha1:63d511c2bd93bd477f1b7aae52954b28838316d9 + pristine_git_object: 2968fa26a2dd390b66974e6db57317616fb3b832 docs/models/transcriptionstreamdone.md: id: 2253923d93cf last_write_checksum: sha1:2a1910d59be258af8dd733b8911e5a0431fab5a4 - pristine_git_object: 9ecf7d9ca32410d92c93c62ead9674e097533ec3 + pristine_git_object: bca69a2b02e069ce240342d76ac408aec67993a9 docs/models/transcriptionstreamevents.md: id: d0f4eedfa2b6 last_write_checksum: sha1:ec6b992049bd0337d57baab56603b1fa36a0a35b @@ -1485,27 +1503,59 @@ trackedFiles: docs/models/transcriptionstreamlanguage.md: id: 5e9df200153c last_write_checksum: sha1:d5626a53dde8d6736bab75f35cee4d6666a6b795 - pristine_git_object: e16c8fdce3f04ae688ddc18650b359d2dd5d6f6f + pristine_git_object: 63fcfbc63a65cdff4228601e8a46f9d003ec9210 docs/models/transcriptionstreamsegmentdelta.md: id: f59c3fb696f2 last_write_checksum: sha1:4a031b76315f66c3d414a7dd5f34ae1b5c239b2e - pristine_git_object: 2ab32f9783f6645bba7603279c03db4465c70fff + pristine_git_object: e0143a39fb12a4a3efce3e1b250730d20cf21c7d docs/models/transcriptionstreamtextdelta.md: id: 69a13554b554 last_write_checksum: sha1:de31f5585d671f85e6a9b8f04938cf71000ae3f7 - pristine_git_object: adddfe187546c0161260cf06953efb197bf25693 + pristine_git_object: a4062171d7630bcea967a89d8df6cffd4908285f docs/models/unarchiveftmodelout.md: id: 4f2a771b328a - last_write_checksum: sha1:b3be8add91bbe10704ff674891f2e6377b34b539 - pristine_git_object: 287c9a007e0b2113738a1884450133558d23540e - docs/models/unarchiveftmodeloutobject.md: - id: 5fa9545c3df0 - last_write_checksum: sha1:29c0a228082142925a0fd72fef5a578f06ac764d - pristine_git_object: 623dcec24e2c676c9d50d3a3547b1dd9ffd78038 + last_write_checksum: sha1:0b9ab5d6c7c1285712127cfac9e918525303a441 + pristine_git_object: 12c3d74534897129766397a44afee0f4dac91d9f + docs/models/unarchivemodelrequest.md: + id: e6922871c93a + last_write_checksum: sha1:591461141df5089e884a2db13bfaaef1def0748c + pristine_git_object: 033dad8a66969e2b920ec40391c38daa658c6f0e + docs/models/updateagentrequest.md: + id: 371bfedd9f89 + last_write_checksum: sha1:f9ebaa4650f77595fd554bb2711d4b869cba06cc + pristine_git_object: 358cb71d2ab7dfae85ac7768936910a976d2f644 + docs/models/updateagentversionrequest.md: + id: 706f66fb34eb + last_write_checksum: sha1:913a8105b77620d32147a00c1223ce5a117d2df2 + pristine_git_object: b83eb867a518d757b23d981c962f87a0e9c8a454 + docs/models/updatedocumentrequest.md: + id: ee4e094a6aa7 + last_write_checksum: sha1:4798ef091b5d045b0cda3d2a3cc40aef0fb3155c + pristine_git_object: fa5d117a4016208d81ad53f24daa4284b35152f8 docs/models/updateftmodelin.md: id: 1b98d220f114 last_write_checksum: sha1:d1c7a8f5b32228d8e93ad4455fccda51b802f08f pristine_git_object: 4e55b1a7d96e1ad5c1e65c6f54484b24cd05fcfc + docs/models/updatelibraryrequest.md: + id: 2eda82f12f31 + last_write_checksum: sha1:cc1ca5b6f9bd4ab61e3983991f5656ff5ea22e8d + pristine_git_object: e03883cca75f3ed17fa3432e0abc2c892ec3d74a + docs/models/updatemodelrequest.md: + id: 8eabdced3e0e + last_write_checksum: sha1:28765fe537adb34e5e2ef051cd1226bdcae8ea9f + pristine_git_object: 5799c63babcd9377c5024f584328c814c4401c04 + docs/models/updatemodelresponse.md: + id: 742d796d5be3 + last_write_checksum: sha1:2e09ab747fa3247486b25057e887baf0859c3a5b + pristine_git_object: 275ee77f111b926d681a446af9741001a1c88fa8 + docs/models/updateorcreatelibraryaccessrequest.md: + id: c95e6b3df38f + last_write_checksum: sha1:f957324978f18d9831dafe4d1a5d78f755f51ed6 + pristine_git_object: e04567b40d62e0d705096eedaba9fa84913f584d + docs/models/uploaddocumentrequest.md: + id: a211b5f814e4 + last_write_checksum: sha1:ce851cd52da0250c8d86f1346778edb0b5c97a50 + pristine_git_object: 92152b7f247ae4d7f8373e8b13ce947b7ca2cae7 docs/models/uploadfileout.md: id: c991d0bfc54c last_write_checksum: sha1:ce5af8ffadb8443a6d1ca5fbbc014de42da35b9d @@ -1517,7 +1567,7 @@ trackedFiles: docs/models/usermessage.md: id: ed66d7a0f80b last_write_checksum: sha1:627f88dbb89e226a7d92564658c23a0e8d71342a - pristine_git_object: 63b0131091cd211b3b1477c1d63b5666a26db546 + pristine_git_object: 78ed066eed9f0638edc6db697eaeaad6f32b4770 docs/models/usermessagecontent.md: id: 52c072c851e8 last_write_checksum: sha1:1de02bcf7082768ebe1bb912fdbebbec5a577b5a @@ -1533,69 +1583,75 @@ trackedFiles: docs/models/wandbintegration.md: id: ba1f7fe1b1a3 last_write_checksum: sha1:ef35648cec304e58ccd804eafaebe9547d78ddcf - pristine_git_object: 199d2eddc61069c80b628a12bff359ac2abc7338 + pristine_git_object: c73952d9e79ea8e08bc1c17817e74e3650def956 docs/models/wandbintegrationout.md: id: c1a0f85273d8 last_write_checksum: sha1:ce7ffc6cc34931b4f6d2b051ff63e1ca39e13882 - pristine_git_object: cec02ed87555128e6027e00f3385a61028694ac0 + pristine_git_object: a6f65667a6bcfb18b78f8f766ab71de84ca13ca7 docs/models/websearchpremiumtool.md: id: 267988aa8c3f last_write_checksum: sha1:f9b761d727cbe0c60a2d0800b0a93929c5c3f5e7 - pristine_git_object: 941fc2b8448d4caeae9318fdf08053a2b59a9bee + pristine_git_object: 07b8b9265e01bd28b1c30fbc3f1283285e7d6edd docs/models/websearchtool.md: id: fc4df52fb9b5 last_write_checksum: sha1:047fd9f950d5a86cf42a8f3ac40f754b395e39ec - pristine_git_object: c8d708bdcdbfc387a09683bdd47ebabedd566cb0 + pristine_git_object: da5e7b7b600fa3fd0799e95e7a0f9507cd8456c3 docs/sdks/accesses/README.md: id: 2ea167c2eff2 - last_write_checksum: sha1:22bd7a11d44295c2f433955604d3578292f26c99 - pristine_git_object: 64a1e749aeb6f2c32497a72a649ecc2b7549c077 + last_write_checksum: sha1:200d509484a1a27fec893e15c39043a9deb140da + pristine_git_object: c1e3866d1a37e1596fa61538317eb68907cbaf57 docs/sdks/agents/README.md: id: 5965d8232fd8 last_write_checksum: sha1:a655952f426d5459fa958fa5551507e4fb3f29a8 - pristine_git_object: 75efc492c4114417c22a796824ee971e9180104e + pristine_git_object: cd3ec4c6c87f34c4d3634bf510534dff163d97de docs/sdks/batchjobs/README.md: - last_write_checksum: sha1:212bc82280a58f896172d173e5be516b926bc11c + id: a3b8043c6336 + last_write_checksum: sha1:eca07f3c47acbe42264d31fba982a49005a8c983 + pristine_git_object: 24316d78b1be51649d186db1479bbf74f00f87e6 docs/sdks/betaagents/README.md: - last_write_checksum: sha1:131f220aefaff8a3ca912df661199be7a88d50ca + id: 5df79b1612d8 + last_write_checksum: sha1:f2dbb543e7bd1db239ee801c55fa1f7f92ca6322 + pristine_git_object: 0ef655a348d7381aa0a7869a022b362d90497197 docs/sdks/chat/README.md: id: 393193527c2c last_write_checksum: sha1:908e67969e8f17bbcbe3697de4233d9e1dd81a65 - pristine_git_object: 89c4fffbb777427723307b13c124668601ff5839 + pristine_git_object: 6907c29d26b51fa7748b339cc73fd3d6d11a95a5 docs/sdks/classifiers/README.md: id: 74eb09b8d620 last_write_checksum: sha1:f9cc75dbb32ea9780a9d7340e524b7f16dc18070 - pristine_git_object: 634ee419f3334ba50dd25f0e2340c32db1ec40b3 + pristine_git_object: 41b520812ac8a6031c0ab32aa771e9903fa24a97 docs/sdks/conversations/README.md: id: e22a9d2c5424 - last_write_checksum: sha1:f55def6eaab9fcbed0e86a4dee60e5c2656f0805 - pristine_git_object: acd43cdb63edd23665e808aaccc6ab3a4dc3dc85 + last_write_checksum: sha1:55b150757576819887075feac484ba76ae8abd59 + pristine_git_object: c0089f12b040f3686a584f1569ed4e0ab56c52fb docs/sdks/documents/README.md: id: 9758e88a0a9d - last_write_checksum: sha1:d9bcb4bf6c2189c282844f81b456fb29654e384c - pristine_git_object: d90e7ee7aab234cb992a904088cbbf2e57dd0baa + last_write_checksum: sha1:55280d8863200affd25a98d7493a0110c14baad3 + pristine_git_object: 97831f86223c6dbbaec35a240725a8c72e229961 docs/sdks/embeddings/README.md: id: 15b5b04486c1 last_write_checksum: sha1:46e57c7808ce9c24dd54c3562379d2ff3e0526e8 pristine_git_object: 0be7ea6dcace678d12d7e7e4f8e88daf7570df5d docs/sdks/files/README.md: id: e576d7a117f0 - last_write_checksum: sha1:22298532be84a02d4fc8a524d6baa4fab0adcec4 - pristine_git_object: 44c39f8a3bd783b5c592e4f22c453bd76cef434a + last_write_checksum: sha1:92558cd6688432150cc433391e2b77a328fa3939 + pristine_git_object: ae29b7bf9383f534b2ca194ec5ff261ff17b5fb6 docs/sdks/fim/README.md: id: 499b227bf6ca last_write_checksum: sha1:34ff7167b0597bf668ef75ede016cb8884372d1b pristine_git_object: 3c8c59c79db12c916577d6c064ddb16a511513fd docs/sdks/finetuningjobs/README.md: - last_write_checksum: sha1:58b5ecea679eab1691f0002c7d3323170d73357b + id: 03d609f6ebdd + last_write_checksum: sha1:206624c621a25836333f4c439e0247beb24a7492 + pristine_git_object: fe18feeb640804d9308e6fefe9b5f2371d125f9b docs/sdks/libraries/README.md: id: df9a982905a3 - last_write_checksum: sha1:0c710c0395906333b85bedd516cfca7dcb3b9b42 - pristine_git_object: bbdacf0538c6c055fef0c0109aac163e987a3dd5 + last_write_checksum: sha1:1c623647aa7b834a844e343c9e3fe0763c8445a5 + pristine_git_object: 8835d0ec8cbabcb8ab47b39df982a775342c3986 docs/sdks/models/README.md: id: b35bdf4bc7ed - last_write_checksum: sha1:37ac4b52ddcdbe548d478aed5fd95091a38b4e42 - pristine_git_object: 6fa28ca2e25c0b2f3fbf044b706d19f01193fc3c + last_write_checksum: sha1:2410579fd554ad1e5734cc313d0a75eeb04a1d14 + pristine_git_object: 0cbf1bdde52d1a52c1329ecd1116718237be5152 docs/sdks/ocr/README.md: id: 545e35d2613e last_write_checksum: sha1:a8d22a86b79a0166ecec26a3e9379fa110d49b73 @@ -1614,1242 +1670,1252 @@ trackedFiles: pristine_git_object: c35748f360329c2bc370e9b189f49b1a360b2c48 src/mistralai/client/__init__.py: id: f1b791f9d2a5 - last_write_checksum: sha1:36306d1d404b6aeb912d27f1d9c52f098ff7bf9b - pristine_git_object: dd02e42e4cc509dc90e6ae70493054021faa5f9c + last_write_checksum: sha1:fcca936cb62cc76d57372d5bd5735877b79b53a4 + pristine_git_object: 481fc91604c413966c8510d8341edaa3355fc276 src/mistralai/client/_hooks/__init__.py: id: cef9ff97efd7 - last_write_checksum: sha1:e3111289afd28ad557c21d9e2f918caabfb7037d - pristine_git_object: 2ee66cdd592fe41731c24ddd407c8ca31c50aec1 + last_write_checksum: sha1:9a6f060871150610f890cc97676c3afe9050b523 + pristine_git_object: 66a04e3727ffcc2c427d854cdbb4f5f340af050f src/mistralai/client/_hooks/sdkhooks.py: id: ed1e485b2153 - last_write_checksum: sha1:5688b56bf910f5f176bcacc58f4ad440ac2fa169 - pristine_git_object: c9318db481df2293b37e9b964da417ee5de86911 + last_write_checksum: sha1:e592d5ab277827b988257b4df3e746508ca91b23 + pristine_git_object: ecf94240a5689c8b248add46509bc7a7982d8437 src/mistralai/client/_hooks/types.py: id: 85cfedfb7582 - last_write_checksum: sha1:ea20450ab595abb6ad744ecbd58927e8fa1ce520 - pristine_git_object: e7e1bb7f61527de6095357e4f2ab11e342a4af87 + last_write_checksum: sha1:40294e852f818a974034c33e510e0f8723fcaf31 + pristine_git_object: 036d44b8cfc51599873bd5c401a6aed30450536c src/mistralai/client/_version.py: id: cc807b30de19 - last_write_checksum: sha1:c808e81ad8b454d646101b878105d109d74ba6ad - pristine_git_object: 8c5d6e54860c69881bf976887910fc32d183c6e5 + last_write_checksum: sha1:dd6d1521f7ecfc56be58eafc1709873a04d27fb0 + pristine_git_object: 814d9ec74a37ae50f106ea07b3c174e65685521b src/mistralai/client/accesses.py: id: 76fc53bfcf59 - last_write_checksum: sha1:da6c930bfec52d4cc344408f0aaef2874705fa68 - pristine_git_object: 307c7156626e735c802c149ea3547648ea03da09 + last_write_checksum: sha1:16574ca54176ec30b236ab1a4694f57a6314db43 + pristine_git_object: cda484c8feade66829dad587f5f397aa89d4fb6f src/mistralai/client/agents.py: id: e946546e3eaa - last_write_checksum: sha1:0ff47f41f9224c1ef6c15b5793c04a7be64f074b - pristine_git_object: c04abd21b5b7cb9b8ddfdb52ec67fffa7d21759a + last_write_checksum: sha1:3b46ac68d37563a9eb988ad2978083e40cf4513d + pristine_git_object: 0942cb20173f0b2e3f828f5857e3aa221f65bc1b src/mistralai/client/audio.py: id: 7a8ed2e90d61 - last_write_checksum: sha1:941d0466d9ff5d07c30a6e41cf4434857518963a - pristine_git_object: 2834ade22ab137b7620bfd4318fba4bdd9ef087f + last_write_checksum: sha1:e202d775d24c0303053e0548af83fcb04e2748f4 + pristine_git_object: f68f063c08a099d07904456daa76d8e2d2ecdbe6 src/mistralai/client/basesdk.py: id: 7518c67b81ea - last_write_checksum: sha1:c10ba4619b8c70ff876304a93b432d4466cb7112 - pristine_git_object: bddc9012f28f7881b75a720a07a3ad60845e472e + last_write_checksum: sha1:795253524d0911d227b934978bdacb84619177a3 + pristine_git_object: 611b40597b42ac309871681b38a3b3c249cbe494 src/mistralai/client/batch.py: id: cffe114c7ac7 - last_write_checksum: sha1:ed3cc7aee50879eca660845e51bb34912505d56a - pristine_git_object: d53a45fbcbeb7b1d8fb29c373101c9e2a586b877 + last_write_checksum: sha1:b452983f67b33f26e1faa60fdbbb171cb1877224 + pristine_git_object: 7e36fd0d73ebeb873f74f4109896a6cf3bb7d2ba src/mistralai/client/batch_jobs.py: - last_write_checksum: sha1:0ac09a2fcbf9f059cea8197b0961cd78603e9c9c + id: 3423fec25840 + last_write_checksum: sha1:eb1baade19f5da3dd815ebfbabccca139eb7b25d + pristine_git_object: 752c76524a4fa19ed1654943218ca5182d563ca3 src/mistralai/client/beta.py: id: 981417f45147 - last_write_checksum: sha1:538571fbb2b393c64b1e7f53d1e530d989717eb3 - pristine_git_object: b30003eae52be5e79838fe994cda8474068a43dc + last_write_checksum: sha1:85f42fc6c2318eef94c90405b985120220c9c617 + pristine_git_object: 65b761d18f7274cc33162a83efa5b33211f78952 src/mistralai/client/beta_agents.py: - last_write_checksum: sha1:295438e65ce0453cbb97988fb58d01263d88b635 + id: b64ad29b7174 + last_write_checksum: sha1:227c2ef3812c06e4a813063bf9d2282ce0884ecd + pristine_git_object: 4e692f17579635d5f0cc03f86b8158b3344ae87f src/mistralai/client/chat.py: id: 7eba0f088d47 - last_write_checksum: sha1:00d1ec46a2c964b39dae5f02e4d8adf23e5dcc21 - pristine_git_object: 6fa210bb01b193e1bd034431923a3d4dc8c8a16c + last_write_checksum: sha1:6f052ac3117829b16906a4e1cbfa5b1f7ab104fd + pristine_git_object: 35698d32ac870f4b59c03f02700f20c04b14462d src/mistralai/client/classifiers.py: id: 26e773725732 - last_write_checksum: sha1:3a65b39ad26b6d1c988d1e08b7b06e88da21bb76 - pristine_git_object: 537e2438afcb570a3e436ab4dd8b7d604b35b627 + last_write_checksum: sha1:abd5033ee390fdeddfa4af918cc44f6210a2a6a0 + pristine_git_object: 3407c4b77db429535465f29754a2da8145d6a5fe src/mistralai/client/conversations.py: id: 40692a878064 - last_write_checksum: sha1:d6b44a85ecf623d0257296d62b05f26742a2a2aa - pristine_git_object: 285beddbd175fee210b697d4714c28196c1fa7a2 + last_write_checksum: sha1:6e81283d3d5db5dd554af68d69313951cf5f4578 + pristine_git_object: 646b91f3980bbe9be01078162d5b4ad9afb141b9 src/mistralai/client/documents.py: id: bcc17286c31c - last_write_checksum: sha1:eb3d1d86cbc2e7e72176ff60370a9ad1d616e730 - pristine_git_object: 009a604f1c2fa367d14df7fb9f4078083c4be501 + last_write_checksum: sha1:9ae89ef80a636b55ba4cdc3ad6c77c47c1824433 + pristine_git_object: c78f2944edaac77864ff6c4dd8d19d3aab3f0cb6 src/mistralai/client/embeddings.py: id: f9c17258207e - last_write_checksum: sha1:a3fa049388bf794ed764a1a8b6736f6c29136c83 - pristine_git_object: 359f2f621d1628536b89f66115726064db34e51b + last_write_checksum: sha1:7cd6d848ed8978637988d9b7e1a7dd92dac5eb3b + pristine_git_object: 4a056baa014217927412e9dd60479c28de899e2e src/mistralai/client/files.py: id: f12df4b2ce43 - last_write_checksum: sha1:577d731e40683b309a4848d8534185e738e54d31 - pristine_git_object: 97817eab1a4b0a0649a128b06a9f3ff4077dffa5 + last_write_checksum: sha1:aa647afa486bbed48083c0b1ec954bdc5cfd0280 + pristine_git_object: 57d389f1e245f5768fe9e8991f65229dd4bd608d src/mistralai/client/fim.py: id: 217bea5d701d - last_write_checksum: sha1:d62f3bee1322a41aefc0cc01aa8313e8b7e3ae1b - pristine_git_object: 4a834fe93a9b9a8af30f681c9541a7cef0a513e1 + last_write_checksum: sha1:90cacb025a1a1fb81e619d59819c0a652f4a5efa + pristine_git_object: be3f7742b866ac58b7bbb65e3593e9865dee134f src/mistralai/client/fine_tuning.py: id: 5d5079bbd54e - last_write_checksum: sha1:e420e8df4b265b95696085585b1b213b9d05dee4 - pristine_git_object: c57425fdf3225eaeccee47a17db198a3974995a3 + last_write_checksum: sha1:fe1f774df4436cc9c2e54ed01a48db573eb813cd + pristine_git_object: df6bc5643a13294ddfbeecc6ae84d00cd7199bed src/mistralai/client/fine_tuning_jobs.py: - last_write_checksum: sha1:4dc213f6b47379bd76c97c8fc62a4dc23acbb86e + id: fa1ea246e0b2 + last_write_checksum: sha1:edfe25f99047d4cbd45222cd23823c782286a2c8 + pristine_git_object: 9a28ded152a4f4a5b625a97e087aebc5a287d71e src/mistralai/client/httpclient.py: id: 3e46bde74327 - last_write_checksum: sha1:5e55338d6ee9f01ab648cad4380201a8a3da7dd7 - pristine_git_object: 89560b566073785535643e694c112bedbd3db13d + last_write_checksum: sha1:0f4ecc805be1dc3d6e0ca090f0feb7d988f6eb9d + pristine_git_object: 544af7f87d6b7097935290bebd08e30e5f485672 src/mistralai/client/libraries.py: id: d43a5f78045f - last_write_checksum: sha1:5264a24b973f49b4ea7252868f4a76baba9093b4 - pristine_git_object: 03a547410e042c19329ea9a91eef1bf25ecdcbe1 + last_write_checksum: sha1:b3fd0348f4f56aab9873d09c45ed9575baf6e7c3 + pristine_git_object: 26ceabe19a340b7fd4dbb74aebab62bc45093ae5 src/mistralai/client/models/__init__.py: id: e0e8dad92725 - last_write_checksum: sha1:1b4b7b007a50570b4592f6121d6fa5556cecae4b - pristine_git_object: 23e652220f29a882748661a8c0d21aa2830471bf + last_write_checksum: sha1:d047eab2a2a8ee5af65ed19055a0a3e3092ad2c5 + pristine_git_object: 093ffcbdb0b57458cf856f585e6637d7d5955e8d src/mistralai/client/models/agent.py: id: 1336849c84fb - last_write_checksum: sha1:39fca92a9cb4fea59a01b6ce883b1c17395978f8 - pristine_git_object: 3bedb3a3a71c116f5ccb0294bc9f3ce6690e47b2 + last_write_checksum: sha1:d41a96558ddbd52b6c71d316c291847bb6131a01 + pristine_git_object: 05ae24cde5149e30004b7cd4a2409c753682be56 src/mistralai/client/models/agentaliasresponse.py: id: 3899a98a55dd - last_write_checksum: sha1:6dfa55d4b61a543382fab8e3a6e6d824feb5cfc7 - pristine_git_object: 4bc8225c0217f741328d52ef7df38f7a9c77af21 + last_write_checksum: sha1:d7e12ea05431361ad0219f5c8dee11273cd60397 + pristine_git_object: 6972af2a4ae846e63d2c70b733ecd6c8370ee0cd src/mistralai/client/models/agentconversation.py: id: 1b7d73eddf51 - last_write_checksum: sha1:2624deece37e8819cb0f60bbacbbf1922aa2c99c - pristine_git_object: 5dfa8c3137c59be90c655ba8cf8afb8a3966c93a + last_write_checksum: sha1:bc2f1a3710efc9c87d6796ccce953c9ce9cf3826 + pristine_git_object: a850d54c64de0c84ad4ea2b11ea1a828eb2580c4 src/mistralai/client/models/agentcreationrequest.py: id: 35b7f4933b3e - last_write_checksum: sha1:99456f8e6d8848f2cebbd96040eefbce73c9c316 - pristine_git_object: 61a5aff554f830ab9057ce9ceafc2ce78380290f + last_write_checksum: sha1:d3f61940b4cccfc9c13860844f4115e60b095823 + pristine_git_object: 898d42a9c16ffe893792e14445e9ebfcbd046ba3 src/mistralai/client/models/agenthandoffdoneevent.py: id: 82628bb5fcea - last_write_checksum: sha1:151a49e8a7f110123fd0a41e723dfdb6055e9a8e - pristine_git_object: c826aa5e1f2324cddb740b3ffc05095ff26c666d + last_write_checksum: sha1:537e9f651de951057023d3712fa1820da17a21b4 + pristine_git_object: 40bf84970e1d245c3c7fbad64d73f648f8287438 src/mistralai/client/models/agenthandoffentry.py: id: 5030bcaa3a07 - last_write_checksum: sha1:86622620c14e2aacbdcc47b9772a3b9bb4127018 - pristine_git_object: 0b0de13f8840e9ab221ea233040ca03241cba8b7 + last_write_checksum: sha1:afe800c64c74aa79fceda4e4ce808f67573edbc7 + pristine_git_object: b18fe17c70d561b926bdac04124ebca8fc1cca0b src/mistralai/client/models/agenthandoffstartedevent.py: id: 2f6093d9b222 - last_write_checksum: sha1:ba4e40a4791bad20a4ac7568e32e34f6f00cfe24 - pristine_git_object: 4b8ff1e5e3639fb94b55c0a417e9478d5a4252b2 - src/mistralai/client/models/agents_api_v1_agents_create_or_update_aliasop.py: - id: 23a832f8f175 - last_write_checksum: sha1:9ca9a0be2db68005fc0dec3f24d24fccf8d0c631 - pristine_git_object: 33da325cadf36ce8162bac11f1576872bcbbdbd6 - src/mistralai/client/models/agents_api_v1_agents_deleteop.py: - id: 95adb6768908 - last_write_checksum: sha1:9118fb084668440cec39ddd47b613fb4cd796c8d - pristine_git_object: 58fe902f0a51b50db869dfa760f1a3a4cba36342 - src/mistralai/client/models/agents_api_v1_agents_get_versionop.py: - id: ef9914284afb - last_write_checksum: sha1:d9b429cd8ea7d20050c0bc2077eec0084ed916b6 - pristine_git_object: edcccda19d5c3e784a227c6356285ee48be3d7f2 - src/mistralai/client/models/agents_api_v1_agents_getop.py: - id: f5918c34f1c7 - last_write_checksum: sha1:efdd7bed8ae19047b48c16c73099d433725181ab - pristine_git_object: d4817457a33d49ddaa09e8d41f3b03b69e8e491e - src/mistralai/client/models/agents_api_v1_agents_list_version_aliasesop.py: - id: a04815e6c798 - last_write_checksum: sha1:7bd6ba32e2aeeee4c34f02bab1d460eb384f9229 - pristine_git_object: b9770fffe5be41579f12d76f41a049e8b41b3ef8 - src/mistralai/client/models/agents_api_v1_agents_list_versionsop.py: - id: 19e3310c3907 - last_write_checksum: sha1:62b3b94ad3ed412f74cfc75572a91b7f3cd6b39b - pristine_git_object: 813335f9e972c976f0e887d1f26be3c224b36b0c - src/mistralai/client/models/agents_api_v1_agents_listop.py: - id: 25a6460a6e19 - last_write_checksum: sha1:586ad2257e4a2c70bdb6d0044afe7d1b20f23d93 - pristine_git_object: 119f51236dda0769ab3dc41a9dbbb11b5d5e935d - src/mistralai/client/models/agents_api_v1_agents_update_versionop.py: - id: 63f61b8891bf - last_write_checksum: sha1:b214f6850347e4c98930ef6f019fdad52668c8c0 - pristine_git_object: 116f952b2ba2a7dca47975a339267c85122cd29a - src/mistralai/client/models/agents_api_v1_agents_updateop.py: - id: bb55993c932d - last_write_checksum: sha1:28cd6d0b729745b2e16d91a5e005d59a6d3be124 - pristine_git_object: 116acaa741f79123e682db0be2adbb98cf8283d8 - src/mistralai/client/models/agents_api_v1_conversations_append_streamop.py: - id: ec00e0905f15 - last_write_checksum: sha1:67967a775c3a1ec139ccd6991465ea15327e3ba7 - pristine_git_object: 9f00ffd4b484f03dae6e670d019f61a4392afc85 - src/mistralai/client/models/agents_api_v1_conversations_appendop.py: - id: 39c6125e850c - last_write_checksum: sha1:93621c5ea8fbc5c038c92596b7d4c0aef0a01e2f - pristine_git_object: 13d07ba91207f82dcea8f58c238cc743cd6c3964 - src/mistralai/client/models/agents_api_v1_conversations_deleteop.py: - id: 0792e6abbdcb - last_write_checksum: sha1:dc60f272fed790bec27c654da0fb185aab27ff82 - pristine_git_object: 81066f90302d79bc2083d1e31aa13656c27cc65f - src/mistralai/client/models/agents_api_v1_conversations_getop.py: - id: c530f2fc64d0 - last_write_checksum: sha1:89088ac683d6830ffd4f649c25ccfb60a4b094de - pristine_git_object: c919f99e38148fb9b2d51816d0dd231ee828b11d - src/mistralai/client/models/agents_api_v1_conversations_historyop.py: - id: 2f5ca33768aa - last_write_checksum: sha1:9f33f183cd07b823b4727662ea305c74853049c5 - pristine_git_object: ba1f8890c1083947e4d6882dff2b50b3987be738 - src/mistralai/client/models/agents_api_v1_conversations_listop.py: - id: 936e36181d36 - last_write_checksum: sha1:e528bf06983dd0b22a0b0bc1d470b344e85db434 - pristine_git_object: bb3c7127c4b43019405689dc2ae10f5933c763bc - src/mistralai/client/models/agents_api_v1_conversations_messagesop.py: - id: b5141764a708 - last_write_checksum: sha1:0be49e2ad8a3edb079ce4b1f092654c7a6b7e309 - pristine_git_object: e05728f2c2c0a350bdaf72fe9dc488c923230ab7 - src/mistralai/client/models/agents_api_v1_conversations_restart_streamop.py: - id: c284a1711148 - last_write_checksum: sha1:ef22ebf2e217ab41ce0b69cf388122ee18ad7b05 - pristine_git_object: 9b489ab46486cc37349d64a4fc685f1355afb79a - src/mistralai/client/models/agents_api_v1_conversations_restartop.py: - id: 3ba234e5a8fc - last_write_checksum: sha1:e7e22098d8b31f5cc5cb0e8fafebe515842c2f88 - pristine_git_object: 8bce3ce519a69a6d1cb36383b22fb801768c4868 + last_write_checksum: sha1:933f8be5eacd86881a42cfb83612f327caa77ee7 + pristine_git_object: e278aef39d3bc5e158a094c593391fa8ad77c320 src/mistralai/client/models/agentscompletionrequest.py: id: 3960bc4c545f - last_write_checksum: sha1:d22d3513e2b391127df2202ca50b1fb9de605103 - pristine_git_object: 22368e44adb1b3ecff58d2b92592710335a062b9 + last_write_checksum: sha1:ee1e60d894d3a9277c1a3970c422483ffa502e21 + pristine_git_object: f4a2d646927c8c0f250507f52c5e7515830759ad src/mistralai/client/models/agentscompletionstreamrequest.py: id: 1b73f90befc2 - last_write_checksum: sha1:02fd1cf62fc203635099ad60fb9b41e82a82e0f8 - pristine_git_object: 37d46c79d8964d799679413e14122a5146799eb6 + last_write_checksum: sha1:3bc4976eeda6d9b30bba72e7f7c417ca9ba885c5 + pristine_git_object: 732e2402190d40bc5360868d3048d57fff9e7b55 src/mistralai/client/models/agentupdaterequest.py: id: 2d5a3a437819 - last_write_checksum: sha1:65fdf42d54199ad3b951089bdea26deca0134440 - pristine_git_object: 261ac069ce4e2b630d39080edf47bf2ad510ffb4 + last_write_checksum: sha1:4a0ef549756904749a36b580cc2296a6a54d6647 + pristine_git_object: 96e209d41b638002f129ec4c13748082ccc3a8db src/mistralai/client/models/apiendpoint.py: id: 00b34ce0a24d - last_write_checksum: sha1:0a1a08e7faaa7be804de952248b4f715c942af9a - pristine_git_object: a6072d568e08ab1f5e010d5924794adfb2188920 + last_write_checksum: sha1:733e852bf75956acd2c72a23443627abfa090b7b + pristine_git_object: a6665c1076f05c28936510c24ee7d3498d7e7a24 + src/mistralai/client/models/appendconversationop.py: + id: 1c47dd1e7c7e + last_write_checksum: sha1:109ced509e3caa5e5c9610b3a18839d113be708a + pristine_git_object: 710b8e1ca3fbfbb747e48d7699588bc199a41274 + src/mistralai/client/models/appendconversationstreamop.py: + id: 1ab08b189e9d + last_write_checksum: sha1:edd2a91da29f83646538b57e4d29f006d31f9dec + pristine_git_object: 55efca0e32c5d54d100563500aee9b61952d07c2 src/mistralai/client/models/archiveftmodelout.py: id: bab499599d30 - last_write_checksum: sha1:352eb0aca8368d29ef1b68820540363e8fa69be4 - pristine_git_object: 6108c7e153abecfc85be93b6fa1f9f22480f6d9b + last_write_checksum: sha1:92f5b5a89ae5e52523d30069629e0ac8dc858d6b + pristine_git_object: 3107116c4a2c78c591999b220349325612a19b4e + src/mistralai/client/models/archivemodelop.py: + id: beefa1df3b7c + last_write_checksum: sha1:6f78b2f84f42267d4928a5a3ad1d3d3cae417cac + pristine_git_object: 30b4a9bd71f349cc4ab4b12df73770d327008527 src/mistralai/client/models/assistantmessage.py: id: 2b49546e0742 - last_write_checksum: sha1:235a0f8d14b3100f5c498a9784ddda1f824a77a9 - pristine_git_object: 3ba14ce78e01c92458477bb025b9e5ded074fd4d + last_write_checksum: sha1:a58ecb7bc381af02d83247f0518a3d34013b4575 + pristine_git_object: 5a4a2085e838196d3ab2b1c00bbeb7a78516dfb2 src/mistralai/client/models/audiochunk.py: id: ce5dce4dced2 - last_write_checksum: sha1:5b7ef3c96f0d8b240d1a7354379dbebd911604c3 - pristine_git_object: 80d836f27ae65f30c6ca0e1d4d5d585bbf498cfd + last_write_checksum: sha1:8218d4c7118f677f16a3a63c55348c40d3ab3156 + pristine_git_object: a51868279b9b4ce2d97990286512d69f8d7f2e82 src/mistralai/client/models/audioencoding.py: id: b14e6a50f730 - last_write_checksum: sha1:8c8d6c1da3958200bf774313c485189426439545 - pristine_git_object: 557f53ed7a90f05e5c457f8b217d3df07e113e0b + last_write_checksum: sha1:92ca06dce513cd39b2c7d9e5848cf426b40598ce + pristine_git_object: 67fec75d72845b8dda774c96767a0b233f269fe5 src/mistralai/client/models/audioformat.py: id: c8655712c218 - last_write_checksum: sha1:baef21b264f77117bbaa1336d7efefae916b9119 - pristine_git_object: 7ea10b3ad610aa1500fd25500ff942988ea0e1db + last_write_checksum: sha1:8ee7b564d106b601b6ad8a9321c27dfff421ce5e + pristine_git_object: fef87ae76b31128ebd5ced4278e274c249181c23 src/mistralai/client/models/audiotranscriptionrequest.py: id: e4148b4d23e7 - last_write_checksum: sha1:52c245a739864ca838d4c4ef4bdf74e7b0c60f2e - pristine_git_object: 78a3797882841a6fd1251d72756f6b75f6d01006 + last_write_checksum: sha1:6d7b267bc241c1f72b5b7839d6e2ad76a4c1ecff + pristine_git_object: 8c47a83cada33d8dbd4a9ffdedb55d3f4f55dadf src/mistralai/client/models/audiotranscriptionrequeststream.py: id: 33a07317a3b3 - last_write_checksum: sha1:e468052c9ab8681ff0e1121e61aff406fc4427fc - pristine_git_object: 350643614e23002bc55e99e2d1807bedd80a0613 + last_write_checksum: sha1:66ae6146b9b75600df720054ec2c21e4e996b1fe + pristine_git_object: a080cee24c0d67c38fc6785c440418580e097700 src/mistralai/client/models/basemodelcard.py: id: 556ebdc33276 - last_write_checksum: sha1:6ebd9dd362ad23d34eb35451af01897662854726 - pristine_git_object: 8ce7f139b6018c4a7358a21534532cd3e741fa8a + last_write_checksum: sha1:94871ce94c92fbbcff9fa5d6a543c824b17ee13b + pristine_git_object: 17a3e5c93339082f408f9ab5f34b5a01e24c74e0 src/mistralai/client/models/batcherror.py: id: 1563e2a576ec - last_write_checksum: sha1:239f9c44477941c45a3e7fe863828299d36267d6 - pristine_git_object: a9c8362bfa08ab4727f08a6dd2b44a71040560f7 + last_write_checksum: sha1:9b59325428680d58151696c1738ad5466d67a78d + pristine_git_object: c1bf722a45c4326f24f7fd34ea536d59c48c67f2 src/mistralai/client/models/batchjobin.py: id: 72b25c2038d4 - last_write_checksum: sha1:0064f199b6f27b5101f6a9abf0532f61c522e2c8 - pristine_git_object: 39cf70b5bdf8db8adaa5c9d1dd8a227b2365879b + last_write_checksum: sha1:667020377b2ca85dfd9c5aed96c7d4ba2571093b + pristine_git_object: a0c3b9146130a5ebfdbd0ec6338092bacc01bf85 src/mistralai/client/models/batchjobout.py: id: cbf1d872a46e - last_write_checksum: sha1:44a92b4f427b77db29294a3b6d375f8622660ee1 - pristine_git_object: 008d43b4340cf8853fac751fb6f15525f765fe39 + last_write_checksum: sha1:9031bc5ff1986ddc283551f7f5d210c9de67cc56 + pristine_git_object: 99c2b95118364d660f0cefde16507a83e8c9cafe src/mistralai/client/models/batchjobsout.py: id: 20b2516e7efa - last_write_checksum: sha1:7d4223363e861137b9bce0dc78460c732a63c90b - pristine_git_object: 2654dac04c126a933f6d045f43f16a30263750dc + last_write_checksum: sha1:426287f6ef9ed88e75f9e318582627d066f9e4f0 + pristine_git_object: f65fc040a964c68c82b5df7d3fb9e40222182322 src/mistralai/client/models/batchjobstatus.py: id: 61e08cf5eea9 - last_write_checksum: sha1:f90059b4aaead197100965c648114254e7dc4888 - pristine_git_object: 4b28059ba71b394d91f32dba3ba538a73c9af7a5 + last_write_checksum: sha1:78934183519948464385245cbc89efb68ac00bfb + pristine_git_object: bd77faa2fbed74b19a8d3884af6d43bc1b4806e0 src/mistralai/client/models/batchrequest.py: id: 6f36819eeb46 - last_write_checksum: sha1:0ce0e6982c96933e73a31c6ebfb29f78b6ebf13b - pristine_git_object: 24f50a9af9a74f6bec7e8903a966d114966a36d3 + last_write_checksum: sha1:115df324d1fec164bae60bf4b37acfa5149b3172 + pristine_git_object: 41c4523456398b302e0b7eb35824efc014f03aa6 src/mistralai/client/models/builtinconnectors.py: id: 2d276ce938dc - last_write_checksum: sha1:50d2b60942ca1d7c9868ce59bf01ed860c09f313 - pristine_git_object: 6a3b2476d54096722eb3e7a271629d108028bd35 + last_write_checksum: sha1:4ceb3182009b6535c07d652ccf46661b553b6272 + pristine_git_object: ecf60d3c1a83028d9cf755d4c9d5459f6b56e72a + src/mistralai/client/models/cancelbatchjobop.py: + id: cebac10b56a9 + last_write_checksum: sha1:2614180488e51c0e701fffdb058b39892c5bc1e5 + pristine_git_object: cd94ee86467247fe2bc7f7381fa05b57bedabef1 + src/mistralai/client/models/cancelfinetuningjobop.py: + id: c9a1b39f0d02 + last_write_checksum: sha1:139d3c443678aeeb8afedea8b2a783210e5ac28c + pristine_git_object: ddd445bb433df9a0f987693d97088d79e5e8c47f src/mistralai/client/models/chatclassificationrequest.py: id: afd9cdc71834 - last_write_checksum: sha1:84cc02714fe8ae408a526ab68c143b9b51ea5279 - pristine_git_object: 450810225bb43bbd1539768e291840a210489f0f + last_write_checksum: sha1:91f62e46c415a0168442695f61cb30756227ed1a + pristine_git_object: 8b6d07b906c688a3849b8a4576cc10e075a6868f src/mistralai/client/models/chatcompletionchoice.py: id: 7e6a512f6a04 - last_write_checksum: sha1:dee3be3b6950e355b14cce5be6c34bd5d03ba325 - pristine_git_object: 5d888cfd73b82097d647f2f5ecdbdf8beee2e098 + last_write_checksum: sha1:de0281a258140f081012b303e3c14e0b42acdf63 + pristine_git_object: 2c515f6e9a290ebab43bae41e07493e4b99afe8f src/mistralai/client/models/chatcompletionrequest.py: id: 9979805d8c38 - last_write_checksum: sha1:6442737fd5552e01ad78ab4cf8bc10e0d9c75d05 - pristine_git_object: 30fce28d5e071797a7180753f2825d39cfeac362 + last_write_checksum: sha1:95c0879e52d8b6c1ff389a5dfe1776129c764c00 + pristine_git_object: 4f7d071b5a0b84ef27397b4acaf4a798b6178eb8 src/mistralai/client/models/chatcompletionresponse.py: id: 669d996b8e82 - last_write_checksum: sha1:af8071e660b09437a32482cdb25fd07096edc080 - pristine_git_object: 60a1f561ff29c3bc28ee6aea69b60b9d47c51471 + last_write_checksum: sha1:97f164fea881127ac82303e637b6a270e200ac5b + pristine_git_object: 7092bbc18425091d111ec998b33edc009ff0931b src/mistralai/client/models/chatcompletionstreamrequest.py: id: 18cb2b2415d4 - last_write_checksum: sha1:512f4c05b140757888db465e2bb30a0abcafb1d4 - pristine_git_object: 21dad38bb83e9b334850645ffa24e1099b121f6c + last_write_checksum: sha1:12e794c89a954702c3d4dccddad9b365331bd996 + pristine_git_object: ec7d2ae131cf5fac7eb618bbe09340ac23d444ef src/mistralai/client/models/chatmoderationrequest.py: id: 057aecb07275 - last_write_checksum: sha1:6c24f39ddd835278773bd72cb2676e8f1fd10e73 - pristine_git_object: 631c914d1a4f4453024665eb0a8233ec7a070332 + last_write_checksum: sha1:e18a5ae518f5413b1bff45f85f823b60e00ef32a + pristine_git_object: a8d021e8deb2015470765340281789a7fba544aa src/mistralai/client/models/checkpointout.py: id: 3866fe32cd7c - last_write_checksum: sha1:c2b57fe880c75290b100904c26afaadd356fbe88 - pristine_git_object: 89189ed19dc521bc862da0aec1997bba0854def7 + last_write_checksum: sha1:5ed4988914acef48854337127c4ca51791de3ab9 + pristine_git_object: 3e8d90e920cd34ff611f5e875c0163e1a4087f6f src/mistralai/client/models/classificationrequest.py: id: 6942fe3de24a - last_write_checksum: sha1:3b99dba1f7383defed1254fba60433808184e8e7 - pristine_git_object: c724ff534f60022599f34db09b517f853ae7968d + last_write_checksum: sha1:c98f6751aeba813b968aaf69c3551972b94da4c8 + pristine_git_object: 903706c31176da4c2ab021b3bcaeb2217ca98f76 src/mistralai/client/models/classificationresponse.py: id: eaf279db1109 - last_write_checksum: sha1:0e09986f5db869df04601cec3793552d17e7ed04 - pristine_git_object: 4bc21a58f0fb5b5f29357f2729250030b7d961bc + last_write_checksum: sha1:64522aa2b0970e86a0133348411592f95163f374 + pristine_git_object: d2f09f430c4bca39ea9e5423b7d604ea4016fc70 src/mistralai/client/models/classificationtargetresult.py: id: 2445f12b2a57 - last_write_checksum: sha1:9325f4db4e098c3bf7e24cfc487788e272a5896f - pristine_git_object: 89a137c374efc0f8b3ee49f3434f264705f69639 + last_write_checksum: sha1:2b8b9aeadee3b8ffe21efd1e0c842f9094c4ecc7 + pristine_git_object: 6c7d6231d211977332100112900ea0f8cdf5d84c src/mistralai/client/models/classifierdetailedjobout.py: id: d8daeb39ef9f - last_write_checksum: sha1:7e6df794c49d75785fac3bf01ea467a2dcbd224b - pristine_git_object: 1de4534fcb12440a004e94bc0eced7483952581d + last_write_checksum: sha1:1b6dde6554e51d9100f2e50779eff56b3ca07603 + pristine_git_object: bc5c5381d61b6b4945b51dc9836bcc2e7aa66f9f src/mistralai/client/models/classifierftmodelout.py: id: 2903a7123b06 - last_write_checksum: sha1:78bfdfa3b9188c44fe4cd9cf18bce9e1d1a4cd48 - pristine_git_object: a4572108674ea9c209b6224597878d5e824af686 + last_write_checksum: sha1:5141a0c29da0739057c52b2345a386c79d6f8f85 + pristine_git_object: 182f4954c2b3f1408cb05eee76e2bf24005b023e src/mistralai/client/models/classifierjobout.py: id: e19e9c4416cc - last_write_checksum: sha1:7384ea39ff4c341e8d84c3a4af664298b31c1440 - pristine_git_object: ab1e261d573a30714042af3f20ed439ddbf1d819 + last_write_checksum: sha1:c5daf7e879911ea24fba847a1c12ab9774ebbe98 + pristine_git_object: 03a5b11c46097733d609f3b075b58ef729f230a5 src/mistralai/client/models/classifiertargetin.py: id: ed021de1c06c - last_write_checksum: sha1:cd1c0b8425c752815825abaedab8f4e2589cbc8f - pristine_git_object: 231ee21e61f8df491057767eac1450c60e8c706a + last_write_checksum: sha1:8a1db343861e4f193a56d4030862c1f3a361d3e1 + pristine_git_object: b250109bd03976c93c571dbbacb1c631acd19717 src/mistralai/client/models/classifiertargetout.py: id: 5131f55abefe - last_write_checksum: sha1:4d9f66e3739f99ff1ea6f3468fe029d664541d58 - pristine_git_object: 957104a7bcc880d84ddefe39e58969b20f36d24c + last_write_checksum: sha1:304408da049ff4ad17f058267ffaa916ef907dc2 + pristine_git_object: 3d41a4d9c887488e7b08cc9d5d8dcb5b0fd26781 src/mistralai/client/models/classifiertrainingparameters.py: id: 4000b05e3b8d - last_write_checksum: sha1:a9d4eecd716bd078065531198f5a57b189caeb79 - pristine_git_object: 60f53c374ece9a5d336e8ab20c05c2d2c2d931f9 + last_write_checksum: sha1:4063f78ea65f138578bef4ce8908b04e556cc013 + pristine_git_object: f360eda504f0aa3f60ba6834aab59c1beb648151 src/mistralai/client/models/classifiertrainingparametersin.py: id: 4b33d5cf0345 - last_write_checksum: sha1:f50e68c14be4655d5cf80f6c98366d32bbd01869 - pristine_git_object: e24c9ddecf60c38e146b8f94ad35be95b3ea2609 + last_write_checksum: sha1:7764e6e6c5fc58e501c0891d036bbb22a8ddcb07 + pristine_git_object: 85360a7e7ba5212ef9052d3bd5f368ea4e2c4d98 src/mistralai/client/models/codeinterpretertool.py: id: 950cd8f4ad49 - last_write_checksum: sha1:9b720eaf4d7243e503e14350f457babbca9cf7af - pristine_git_object: faf5b0b78f2d9981bb02eee0c28bba1fdba795b9 + last_write_checksum: sha1:b014008db6ddce4b35aedec70783d74ce1b5cf83 + pristine_git_object: f69c7a5777af16df151589d2c5c8d81de4d28638 src/mistralai/client/models/completionargs.py: id: 3db008bcddca - last_write_checksum: sha1:4b4f444b06a286098ce4e5018ffef74b3abf5b91 - pristine_git_object: 010910f6f00a85b706a185ca5770fe70cc998905 + last_write_checksum: sha1:4c4ba2d39540bbb06fc1c49815fc6a7c8cf40ab2 + pristine_git_object: 918832acf3ea3d324c20e809fcdb1eae2ba3d7fd src/mistralai/client/models/completionargsstop.py: id: 5f339214501d - last_write_checksum: sha1:99912f7a10e92419308cf3c112c36f023de3fc11 - pristine_git_object: de7a09564540daaa6819f06195c347b3e01914f7 + last_write_checksum: sha1:744878976d33423327ea257defeff62073dad920 + pristine_git_object: 39c858e66380044e11d3c7fd705334d130f39dea src/mistralai/client/models/completionchunk.py: id: d786b44926f4 - last_write_checksum: sha1:e38d856ffefd3b72ff7034fa030ca0071caa0996 - pristine_git_object: 9790db6fe35e0043f3240c0f7e8172d36dee96f5 + last_write_checksum: sha1:04b634cffa4b0eb8ca177c91d62d333a061160df + pristine_git_object: 67f447d0c6cd97cb54ffcd0c620654629ac4e848 src/mistralai/client/models/completiondetailedjobout.py: id: 9bc38dcfbddf - last_write_checksum: sha1:0b0f7114471e650b877de2e149b69e772d29905f - pristine_git_object: 85c0c803cf809338900b7b8dcde774d731b67f8f + last_write_checksum: sha1:4771444753ff456829249d4e5fa5f71f2328fa78 + pristine_git_object: cd3a86ee28cdbf3a670d08f27642294321849ec0 src/mistralai/client/models/completionevent.py: id: c68817e7e190 - last_write_checksum: sha1:c29f7e8a5b357e15606a01ad23e21341292b9c5e - pristine_git_object: 52db911eeb62ec7906b396d6936e3c7a0908bb76 + last_write_checksum: sha1:dc43ac751e4e9d9006b548e4374a5ec44729eea4 + pristine_git_object: 3b90ab0c1ecac12f90e0ae3946a6b61410247e4f src/mistralai/client/models/completionftmodelout.py: id: 0f5277833b3e - last_write_checksum: sha1:6ae50b3172f358796cfeb154c7e59f9cdde39e61 - pristine_git_object: ccecbb6a59f2994051708e66bce7ece3598a786f + last_write_checksum: sha1:1c83e1d0a868eef32792844d787c5aaede0386b8 + pristine_git_object: 7ecbf54aabf022392e6d2ce2d0a354b9326eec79 src/mistralai/client/models/completionjobout.py: id: 712e6c524f9a - last_write_checksum: sha1:4f66641e3d765df1db88554b4399eded4625e08d - pristine_git_object: ecd95bb9c93412b222659e6f369d3ff7e13c8bb2 + last_write_checksum: sha1:2c8500593b8f9257a0a389f87792cd174fcd7209 + pristine_git_object: 42e5f6c65809aaaa02f0bf58fbf031f4c476208b src/mistralai/client/models/completionresponsestreamchoice.py: id: 5969a6bc07f3 - last_write_checksum: sha1:aa04c99a8bca998752b44fc3e2f2d5e24434a9bf - pristine_git_object: 1b8d6faccbe917aaf751b4efa676bf51c1dcd3ff + last_write_checksum: sha1:874d3553d4010a8b83484588dcbf9136bd8c6537 + pristine_git_object: 119a9690727ae296acf72dcfafdd224a61582599 src/mistralai/client/models/completiontrainingparameters.py: id: be202ea0d5a6 - last_write_checksum: sha1:fa4a0f44afeb3994c9273c5b4c9203eef810b957 - pristine_git_object: 36b285ab4f41209c71687a14c8650c0db52e165f + last_write_checksum: sha1:fd9a12417cd4f7bdc1e70ba05bbfef23b411ddd0 + pristine_git_object: 4b846b1b9bbcc4f2c13306169b715f08241e8f1c src/mistralai/client/models/completiontrainingparametersin.py: id: 0df22b873b5f - last_write_checksum: sha1:109503fabafd24174c671f2caa0566af2d46800e - pristine_git_object: d0315d9984575cb6c02bc6e38cedde3deef77b9a + last_write_checksum: sha1:a92e9df1d5be2a7f2d34b1dcde131e99e5ee351d + pristine_git_object: 20b74ad9fc0c50fe7d1d3dd97fcd3c296fbf7042 src/mistralai/client/models/contentchunk.py: id: c007f5ee0325 - last_write_checksum: sha1:a319b67206f4d0132544607482e685b46e2dce8c - pristine_git_object: 0a25423f9f9a95ced75d817ad7712747ce0915ae + last_write_checksum: sha1:5cedb52346bc34cb30950496d34ab87d591b6110 + pristine_git_object: eff4b8c670f47f53785690415751be05284f3d8b src/mistralai/client/models/conversationappendrequest.py: id: 81ce529e0865 - last_write_checksum: sha1:4f38d4aa2b792b113ef34ce54df3ac9b2efca5e1 - pristine_git_object: 867c0a414c1340033af7f6d03ea8cef2dcb8ff4a + last_write_checksum: sha1:83e883e4324d76d74521607390747ecdf7dffaa0 + pristine_git_object: 0f07475e4ca640ce50a6214fe59a91041a2e596a src/mistralai/client/models/conversationappendstreamrequest.py: id: 27ada745e6ad - last_write_checksum: sha1:41dcb9467d562bcc8feb885a56f73ac8d013c2d8 - pristine_git_object: f51407bf2a363f705b0b61ed7be4ef6249525af5 + last_write_checksum: sha1:12c3c63b763bd16398fcbec7d6fab41729ee81a6 + pristine_git_object: a0d46f727ff99d76a1bf26891df3b0ed80a88375 src/mistralai/client/models/conversationevents.py: id: 8c8b08d853f6 - last_write_checksum: sha1:4d7e8087fa9a074ed2747131c3753e723ba03e0b - pristine_git_object: 308588a1f094631935e4229f5538c5092f435d2c + last_write_checksum: sha1:6362a88ae26cb67f7abc3d2b0963f9a869c15371 + pristine_git_object: f24760381501f822593ef5903df0d32ca3cf9b47 src/mistralai/client/models/conversationhistory.py: id: 60a51ff1682b - last_write_checksum: sha1:637f7302571f51bcb5d65c51e6b6e377e8895b96 - pristine_git_object: 40bd1e7220160f54b0ab938b3627c77fb4d4f9ef + last_write_checksum: sha1:6fa8bdd370239df879da7b687c037405a8fbbe25 + pristine_git_object: 92d6cbf90c9c76945ee79752d5b4232aea10a79d src/mistralai/client/models/conversationinputs.py: id: 711b769f2c40 - last_write_checksum: sha1:3e8c4650808b8059c3a0e9b1db60136ba35942df - pristine_git_object: 4d30cd76d14358e12c3d30c22e3c95078ecde4bd + last_write_checksum: sha1:5fc688af61d6a49ede9c9709069f3db79f4dc615 + pristine_git_object: 7ce3ffc3772926a259d714b13bfc4ee4e518f8f7 src/mistralai/client/models/conversationmessages.py: id: 011c39501c26 - last_write_checksum: sha1:f71e85febab797d5c17b58ef8a1318545c974ed2 - pristine_git_object: 1ea05369b95fdaa7d7ae75398669f88826e5bb26 + last_write_checksum: sha1:408e26cb45dc1bdf88b1864d365e636307920df3 + pristine_git_object: 1aa294a497d2eb27a12dcbcce36c7956f6ee4f4e src/mistralai/client/models/conversationrequest.py: id: 58e3ae67f149 - last_write_checksum: sha1:0e3cdc7cb34cc8c7f646cc7c2869349747cfd47e - pristine_git_object: e3211c4c7b20c162473e619fad6dc0c6cea6b571 + last_write_checksum: sha1:f1b0b2b6a9c9b94ed5e3a77fb0b92e695f421a2e + pristine_git_object: 2005be82d8ebcf8c8fa74074abf25f072e795582 src/mistralai/client/models/conversationresponse.py: id: ad7a8472c7bf - last_write_checksum: sha1:ae6b273f3b1d1aff149d269a19c99d495fdf263e - pristine_git_object: 32d0f28f101f51a3ca79e4d57f4913b1c420b189 + last_write_checksum: sha1:8b625fe8808f239d6bc16ecf90ae1b7f42262c0c + pristine_git_object: 24598ef3fc24a61a0f15ab012aa211ba57cd0dcf src/mistralai/client/models/conversationrestartrequest.py: id: 681d90d50514 - last_write_checksum: sha1:76c5393b280e263a38119d98bdcac917afe36881 - pristine_git_object: aa2bf7b0dcdf5e343a47787c4acd00fe3f8bd405 + last_write_checksum: sha1:0ce81536464db32422165c35252770f3197fb38e + pristine_git_object: 35d3099361274440552e14934b6a1b19ebc8f195 src/mistralai/client/models/conversationrestartstreamrequest.py: id: 521c2b5bfb2b - last_write_checksum: sha1:5ba78bf9048b1e954c45242f1843eb310b306a94 - pristine_git_object: 689815ebcfe577a1698938c9ccbf100b5d7995f8 + last_write_checksum: sha1:b996f57271f0c521113913f48b31d54c17d73769 + pristine_git_object: 0ddfb130d662d954c3daabdf063172b8ea18a153 src/mistralai/client/models/conversationstreamrequest.py: id: 58d633507527 - last_write_checksum: sha1:d4cda0957f6d09ed991e3570b6e8ef81d3cf62af - pristine_git_object: 219230a2a8dd7d42cc7f5613ca22cec5fa872750 + last_write_checksum: sha1:fc4f2f1578fbeb959ddbe681dee2d11f0a4e6c5e + pristine_git_object: 379a8f2859b5f40cc744ad8f9bc6c39a198258b5 src/mistralai/client/models/conversationusageinfo.py: id: 6685e3b50b50 - last_write_checksum: sha1:7fa37776d7f7da6b3a7874c6f398d6f607c01b52 - pristine_git_object: 7a818c89a102fe88eebc8fec78a0e195e26cf85d - src/mistralai/client/models/delete_model_v1_models_model_id_deleteop.py: - id: 767aba526e43 - last_write_checksum: sha1:9a8f9917fc5de154e8a6fdb44a1dd7db55bb1de5 - pristine_git_object: 1cd36128a231a6d4be328fde53d1f048ff7c2ccd + last_write_checksum: sha1:60f91812b9b574b3fade418cc7c2191253f6abbf + pristine_git_object: 98db0f1617bd7484750652997dcd43d08ef7c5fc + src/mistralai/client/models/createfinetuningjobop.py: + id: fd3c305df250 + last_write_checksum: sha1:e29ada8f733de44bfeab2885d2221ade84b34619 + pristine_git_object: f55deef5d9f6134fddb02c458a0d812759cea358 + src/mistralai/client/models/createorupdateagentaliasop.py: + id: a79cf28bda01 + last_write_checksum: sha1:d4f2790b5970c9cf30b3fcee9d8bc6d4b8c33778 + pristine_git_object: cde1dd054c447a8617527585e783a95affba3277 + src/mistralai/client/models/deleteagentaliasop.py: + id: e4d0d7f75b24 + last_write_checksum: sha1:66e34ba7fb1a238d55c7ed380bd666c8975c01b4 + pristine_git_object: c52d099e9c1f28bf37ee009833b5fb8e351ed987 + src/mistralai/client/models/deleteagentop.py: + id: 089fb7f87aea + last_write_checksum: sha1:a196bcc758e36ffeb17fab25bb60451d3d66a4d8 + pristine_git_object: 8b14bca7bf5d67e16181b67ef6b7375c1b0a93fd + src/mistralai/client/models/deleteconversationop.py: + id: 86fefc353db0 + last_write_checksum: sha1:48f33b614ec087fdaf2b29d9c3eefd6e8d7d311f + pristine_git_object: 39607f40640c6dfa3ef20d913a90abee602b9b4a + src/mistralai/client/models/deletedocumentop.py: + id: 62522db1ccf2 + last_write_checksum: sha1:1a4e2e72a0d3cd24e184ce3cc5037f5ec7cdd9a5 + pristine_git_object: 400070a49bc046d8132bfc7dfe3e114faa719001 + src/mistralai/client/models/deletefileop.py: + id: 286b4e583638 + last_write_checksum: sha1:2561c1fe03ec3915dfa48fa354a86a56ba9b54c4 + pristine_git_object: 4feb7812f8acfa366e4b46fc914925df4f705528 src/mistralai/client/models/deletefileout.py: id: 5578701e7327 - last_write_checksum: sha1:76d209f8b3bba5e4bc984700fe3d8981c9d6142b - pristine_git_object: b25538bee35dedaae221ea064defb576339402c8 + last_write_checksum: sha1:a34520be2271c1e37fa8b3c1bdead843db7b1bb9 + pristine_git_object: c721f32cfe752c2c084efb72db3e5409795e387a + src/mistralai/client/models/deletelibraryaccessop.py: + id: df80945bcf19 + last_write_checksum: sha1:065aad372e0bbfd998fe3adc3389e3dbc9d5b674 + pristine_git_object: ca14c3ffc43be3aee14d6aa1f4805f0483d8b676 + src/mistralai/client/models/deletelibraryop.py: + id: cd0ce9bf8d51 + last_write_checksum: sha1:07840cbdb741bba291f1db1a1b54daca99e8f7ea + pristine_git_object: 5eb6fc310aa62454e3f7ed0766212c807125fe8c + src/mistralai/client/models/deletemodelop.py: + id: 2c494d99a44d + last_write_checksum: sha1:97dce35d527e03612068896572824cc0f13269c1 + pristine_git_object: 55c4b2422336ef6e148eedbd4a6a60846d187e9b src/mistralai/client/models/deletemodelout.py: id: ef6a1671c739 - last_write_checksum: sha1:ef2f6774eaf33c1c78368cd92bc4108ecccd9a6c - pristine_git_object: 5aa8b68fe3680d3b51127d6a6b6068b1303756e8 + last_write_checksum: sha1:4606683ef6da0aae7e88bc50144eddc83908f9d7 + pristine_git_object: bf22ed177ee91dce98bfd9b04f02e683c79e4860 src/mistralai/client/models/deltamessage.py: id: 68f53d67a140 - last_write_checksum: sha1:db65faf32a4abc2396eb1f694d3245fcc4173e2f - pristine_git_object: 0ae56da86f645e5a0db2a0aa4579342610243300 + last_write_checksum: sha1:ff7fa85086bd56863f7f4a255b008cfaa11a959c + pristine_git_object: fbb8231a310e90afd50951dd0f572ce3e0f029e6 src/mistralai/client/models/documentlibrarytool.py: id: 3eb3c218f457 - last_write_checksum: sha1:3f3dafea3df855f1fccaa6ece64df55b40b2d4f7 - pristine_git_object: 861a58d38125ca5af11772ebde39a7c57c39ad9c + last_write_checksum: sha1:e5bfb61a4a03a3b28837c27195f1bcd8cc14c6b2 + pristine_git_object: ff0f739391404604c1cc592c23507946aa0b693f src/mistralai/client/models/documentout.py: id: 7a85b9dca506 - last_write_checksum: sha1:2de0e0f9be3a2362fbd7a49ff664b43e4c29a262 - pristine_git_object: 39d0aa2a5a77d3eb3349ae5e7b02271c2584fe56 + last_write_checksum: sha1:f041a4866c67d1f81f62282918d625216a760355 + pristine_git_object: 3b1a5713c84512947a07d153792b17fcf3262dcb src/mistralai/client/models/documenttextcontent.py: id: e730005e44cb - last_write_checksum: sha1:ad7e836b5f885d703fd5f09c09aba0628d77e05b - pristine_git_object: b1c1aa073dff4dcdc59d070058221b67ce9e36f9 + last_write_checksum: sha1:c86f4b15e8fda1cd5c173da01462342cd22b7286 + pristine_git_object: b6904cb4267347b62a457a01b91a391500326da9 src/mistralai/client/models/documentupdatein.py: id: d19c1b26a875 - last_write_checksum: sha1:bad1cee0906961f555784e03c23f345194959077 - pristine_git_object: 02022b89ef2b87349e0d1dc4cccc3d1908a2d1aa + last_write_checksum: sha1:bddd412de340d050cfbdd4206a9fbb3d1660a045 + pristine_git_object: 669554de5d33f6163c8d08fefee52c1869662eba src/mistralai/client/models/documenturlchunk.py: id: 4309807f6048 - last_write_checksum: sha1:1253bdbe1233481622b76e340413ffb1d8996f0e - pristine_git_object: 00eb55357f19ac4534446e0ee761bdbccfb471e2 + last_write_checksum: sha1:186a684da48bb5d237769ecb3dbf1479a5c5ee55 + pristine_git_object: 304cde2b687e71b0d2fb0aee9b20826473375b25 + src/mistralai/client/models/downloadfileop.py: + id: 4d051f08057d + last_write_checksum: sha1:b80c5332cfdb043bb56f686e4e1c4bf26495b04b + pristine_git_object: fcdc01d644bdce8d1fc7896b5f8244a7a5311dfa src/mistralai/client/models/embeddingdtype.py: id: 77f9526a78df - last_write_checksum: sha1:962f629fa4ee8a36e731d33f8f730d5741a9e772 - pristine_git_object: 26eee779e12ae8114a90d3f18f99f3dd50e46b9e + last_write_checksum: sha1:a4e2ce6d00e6d1db287a5d9f4254b0947227f337 + pristine_git_object: 732c4ebe3678563ebcdbafd519f93317261586fb src/mistralai/client/models/embeddingrequest.py: id: eadbe3f9040c - last_write_checksum: sha1:c4f85f5b768afb0e01c9a9519b58286804cfbd6b - pristine_git_object: 1dfe97c8fa2162719d2a68e7a0ef2f348efa1f88 + last_write_checksum: sha1:6071612944c4c603803cc7f2adc1e9784549c70f + pristine_git_object: f4537ffa9bdc0a9a73101e1b1524fed1a09c1a65 src/mistralai/client/models/embeddingresponse.py: id: f7d790e84b65 - last_write_checksum: sha1:285531abf3a45de3193ed3c8b07818faac97eb32 - pristine_git_object: 64a28ea9f1c57ed6e69e1d49c5c83f63fa38fd36 + last_write_checksum: sha1:9bb53a5a860c8e10d4d504648d84da73068c0a83 + pristine_git_object: 6ffd68941f32f396998df9dded14ff8365926608 src/mistralai/client/models/embeddingresponsedata.py: id: 6d6ead6f3803 - last_write_checksum: sha1:ed821591832ebfa03acd0ce0a3ca5a0521e6fa53 - pristine_git_object: ebd0bf7b29e0a1aee442337fd02ce562fb2c5a3d + last_write_checksum: sha1:3e2430e6bd9b3c77a564f4e56edec1274446a1f4 + pristine_git_object: a689b290d5a4b360e409413c96bb5e7288ce2e2e src/mistralai/client/models/encodingformat.py: id: b51ec296cc92 - last_write_checksum: sha1:f9a3568cd008edb02f475a860e5849d9a40d0246 - pristine_git_object: be6c1a14e4680e24e70b8bbda018759056b784ca + last_write_checksum: sha1:ea907f86b00323d99df37f7ff45d582aace798e7 + pristine_git_object: 4a39d0295f9069ae9f749cf21dab450eaf145d19 src/mistralai/client/models/entitytype.py: id: 62d6a6a13288 - last_write_checksum: sha1:baefd3e820f1682bbd75ab195d1a47ccb3d16a19 - pristine_git_object: 9c16f4a1c0e61f8ffaee790de181572891db3f89 + last_write_checksum: sha1:015e2db9e8e5a3e4ce58442ccedaf86c66239dde + pristine_git_object: 56d82cbed237f32a8b00cfee4042dfe3e7053bcb src/mistralai/client/models/eventout.py: id: da8ad645a9cb - last_write_checksum: sha1:326b575403d313c1739077ad6eb9047ded15a6f5 - pristine_git_object: 5e118d4599e935bcd6196a7cbc1baae8f4a82752 + last_write_checksum: sha1:67f7cc29102a971d33b6cbbcb06ffcfe595227a5 + pristine_git_object: a0247555bb816061cb22f882406c11c3a9011818 src/mistralai/client/models/file.py: id: f972c39edfcf - last_write_checksum: sha1:40ddf9b7e6d3e9a77899cd9d32a9ac921c531c87 - pristine_git_object: a8bbc6fab46a49e7171cabbef143a9bbb48e763c + last_write_checksum: sha1:8d0adce8f4dfc676f6da6465547a0d187d4326f1 + pristine_git_object: dbbc00b50e5578230daefa47648954ead8ed8eb9 src/mistralai/client/models/filechunk.py: id: ff3c2d33ab1e - last_write_checksum: sha1:9ae8d68bfcb6695cce828af08e1c9a9ce779f1f3 - pristine_git_object: d8b96f69285ea967397813ae53722ca38e8d6443 + last_write_checksum: sha1:9f970ef8366df8087f9332a4b1986540063a1949 + pristine_git_object: 43ef22f861e0a275c7348133d0c4d04551477646 src/mistralai/client/models/filepurpose.py: id: a11e7f9f2d45 - last_write_checksum: sha1:154a721dbd5e0c951757a596a96e5d880ecf4982 - pristine_git_object: eef1b08999956fd45fe23f2c03bb24546207b4e3 - src/mistralai/client/models/files_api_routes_delete_fileop.py: - id: 2f385cc6138f - last_write_checksum: sha1:e7b7ad30a08b1033ecd5433da694f69a91029bfc - pristine_git_object: b71748669906990998cc79345f789ed50865e110 - src/mistralai/client/models/files_api_routes_download_fileop.py: - id: 8184ee3577c3 - last_write_checksum: sha1:7781932cc271d47a2965217184e1dd35a187de3f - pristine_git_object: fa9e491a95625dbedde33bc9ea344aaebf992902 - src/mistralai/client/models/files_api_routes_get_signed_urlop.py: - id: 0a1a18c6431e - last_write_checksum: sha1:797201cde755cf8e349b71dc2ff7ce56d1eabb73 - pristine_git_object: a05f826232396957a3f65cb1c38c2ae13944d43b - src/mistralai/client/models/files_api_routes_list_filesop.py: - id: b2e92f2a29b4 - last_write_checksum: sha1:711cc470b8dedefd2c2c7e2ae7dfa6c4601e0f30 - pristine_git_object: ace996318446667b2da3ca2d37bd2b25bcfbb7a7 - src/mistralai/client/models/files_api_routes_retrieve_fileop.py: - id: 5d5dbb8d5f7a - last_write_checksum: sha1:ea34337ee17bdb99ad89c0c6742fb80cb0b67c13 - pristine_git_object: 4a9678e5aa7405cbe09f59ffbdb6c7927396f06a - src/mistralai/client/models/files_api_routes_upload_fileop.py: - id: f13b84de6fa7 - last_write_checksum: sha1:2ca94437630dddc55c6dd624d715963b19b97a73 - pristine_git_object: 723c6cc264613b3670ac999829e66131b8424849 + last_write_checksum: sha1:8b167c02f9f33e32d5fd1c6de894693924f4d940 + pristine_git_object: 49a5568ff82ad4a85e15c8de911e8d6c98dcd396 src/mistralai/client/models/fileschema.py: id: 19cde41ca32a - last_write_checksum: sha1:29fe7d4321fc2b20ae5fa349f30492aeb155c329 - pristine_git_object: 9ecde454f0dac17997ef75e5cdb850cccc8020fe + last_write_checksum: sha1:245115d1f955324bce2eeb3220bdaa6906b28e92 + pristine_git_object: cbe9b0d17ad15ce02e9fd973fe49666885c6ff92 src/mistralai/client/models/filesignedurl.py: id: a1754c725163 - last_write_checksum: sha1:0987cc364694efd61c62ba15a57cfb74aa0d0cc8 - pristine_git_object: cbca9847568ab7871d05b6bb416f230d3c9cddfc + last_write_checksum: sha1:5d981b1743aa2d84818597b41a5f357b4256e9e0 + pristine_git_object: 53dff812ffe5c5859794424d49f8bd7f735cf3b0 src/mistralai/client/models/fimcompletionrequest.py: id: cf3558adc3ab - last_write_checksum: sha1:a62845c9f60c8d4df4bfaa12e4edbb39dcc5dcb7 - pristine_git_object: c9eca0af3ccacfd815bfb8b11768e289b4828f4e + last_write_checksum: sha1:db51cde0b13bb373097f2c158b665ccb3c5789f4 + pristine_git_object: e2f6032784c996d18c100b8b2cde4bb4432af884 src/mistralai/client/models/fimcompletionresponse.py: id: b860d2ba771e - last_write_checksum: sha1:00b5b7146932f412f8230da7164e5157d267a817 - pristine_git_object: 8a2eda0ced48f382b79e5c6d7b64b0c5f0b16c15 + last_write_checksum: sha1:dffd5a7005999340f57eaa94e17b2c82ddc7fd90 + pristine_git_object: 1345a116b7855ab4b824cf0369c0a5281e44ea97 src/mistralai/client/models/fimcompletionstreamrequest.py: id: 1d1ee09f1913 - last_write_checksum: sha1:9260ae9a12c37b23d7dfa8ec6d3029d1d8a133ed - pristine_git_object: 2954380238dec5540e321012b8aa6609e404114c + last_write_checksum: sha1:df973050b942b844280bf98f0a3abc90bd144bbb + pristine_git_object: 480ed17ab006e7afa321a91c5ccebd6380f8f60c src/mistralai/client/models/finetuneablemodeltype.py: id: 05e097395df3 - last_write_checksum: sha1:8694f016e8a4758308225b92b57bee162accf9d7 - pristine_git_object: f5b8b2ed45b56d25b387da44c398ae79f3a52c73 + last_write_checksum: sha1:daf4cd1869da582981023dea1074268da071e16a + pristine_git_object: 7b924bd7abc596f0607a513eee30e98cbf7ab57a src/mistralai/client/models/ftclassifierlossfunction.py: id: d21e2a36ab1f - last_write_checksum: sha1:9554b17b3139b54975aae989fb27e1c369bee4cd - pristine_git_object: c4ef66e0fe69edace4912f2708f69a6e606c0654 + last_write_checksum: sha1:ca90e2f1cd0b9054293bea304be0867c93f7fac2 + pristine_git_object: ccb0f21b5a69f91119bec9db6e9f3d876e4c35af src/mistralai/client/models/ftmodelcapabilitiesout.py: id: f70517be97d4 - last_write_checksum: sha1:44260fefae93bc44a099ff64eeae7657c489005c - pristine_git_object: be31aa3c14fb8fe9154ad8f54e9bf43f586951c7 + last_write_checksum: sha1:2bc7700ad89b7aab37fa02fcb6d9282bc252315e + pristine_git_object: 42269b785d9d5ad2257179f2c093c62637fb5dd6 src/mistralai/client/models/ftmodelcard.py: id: c4f15eed2ca2 - last_write_checksum: sha1:ab559da7dd290e4d2be5c6a3398732de887b2a74 - pristine_git_object: 36cb723df8bcde355e19a55105932298a8e2e33a + last_write_checksum: sha1:7441e4155beaa97cea47b6295017f567dd6eee1a + pristine_git_object: 570e95e2276b144e008e9ccf6a108faa1fc835f5 src/mistralai/client/models/function.py: id: 32275a9d8fee - last_write_checksum: sha1:f98db69c2fb49bbd6cff36fb4a25e348db6cd660 - pristine_git_object: 6e2b52edbd8d7cb6f7654eb76b7ca920636349cf + last_write_checksum: sha1:356a2c6c9d2437e60036a9b3d1a3d154302363c8 + pristine_git_object: 3632c1afb40aebab0795f754814036e04c251469 src/mistralai/client/models/functioncall.py: id: 393fca552632 - last_write_checksum: sha1:ef22d048ddb5390f370fcf3405f4d46fa82ed574 - pristine_git_object: 6cb6f26e6c69bc134bcb45f53156e15e362b8a63 + last_write_checksum: sha1:6e96e9abaa9b7625a9a30e376c31b596ee9defcb + pristine_git_object: 527c3ad408e1e1ccfe6301a8860e7f751e1d312d src/mistralai/client/models/functioncallentry.py: id: cd058446c0aa - last_write_checksum: sha1:661372b1ff4505cf7039ece11f12bb1866688bed - pristine_git_object: fce4d387df89a9fa484b0c7cc57556ea13278469 + last_write_checksum: sha1:6ece3816c50bd04b908743ad62e2dc71d815842a + pristine_git_object: 6ada1d358641a23bc83b93f222eeff659a124b34 src/mistralai/client/models/functioncallentryarguments.py: id: 3df3767a7b93 - last_write_checksum: sha1:6beb9aca5bfc2719f357f47a5627c9edccef051f - pristine_git_object: ac9e6227647b28bfd135c35bd32ca792d8dd414b + last_write_checksum: sha1:9858feba8f7f01017f10477a77dec851a1d06e55 + pristine_git_object: afe81b24e131a8ef879ee7f140271aa762b8ed2f src/mistralai/client/models/functioncallevent.py: id: 23b120b8f122 - last_write_checksum: sha1:c0226ca734320b628223f5c5206477b224dff15e - pristine_git_object: 4e040585285985cebc7e26ac402b6df8f4c063bb + last_write_checksum: sha1:cb63fb3cfb4debfca7b207b49e592566619f84b1 + pristine_git_object: 5d871a0e0f15cc27afe3c861f387609aa9a8a17f src/mistralai/client/models/functionname.py: id: 000acafdb0c0 - last_write_checksum: sha1:03d7b26a37311602ae52a3f6467fe2c306c468c1 - pristine_git_object: 2a05c1de42a6ff5775af5509c106eaa7b391778e + last_write_checksum: sha1:4145b7b817b712b85dcbedb309416c7ba72d827e + pristine_git_object: 07d98a0e65ccbcba330fb39c7f23e26d3ffc833c src/mistralai/client/models/functionresultentry.py: id: 213df39bd5e6 - last_write_checksum: sha1:7e6d951cfd333f9677f4c651054f32658794cc48 - pristine_git_object: a843bf9bdd82b5cf3907e2172ed793a391c5cba2 + last_write_checksum: sha1:04a8fd7396777c412fa9c73c0bef148b2ab53cb2 + pristine_git_object: ca73cbb7481fe0e97b354e9abe5ef6034f10bd98 src/mistralai/client/models/functiontool.py: id: 2e9ef5800117 - last_write_checksum: sha1:af5e38a4498149f46abd63eda97f9ccfb66a1fa3 - pristine_git_object: 74b50d1bcd2bc0af658bf5293c8cc7f328644fa1 + last_write_checksum: sha1:5c4ea61a1bccd87e1aae06bfa728c29a4ec60c54 + pristine_git_object: 13b0449687f64848cb2f2fdf792f148f9e3cfed9 + src/mistralai/client/models/getagentop.py: + id: 5a28bb1e727e + last_write_checksum: sha1:50a681253a1075f1268a269cd67154efa35dff6a + pristine_git_object: 55d8fe6860fa4c868c4d6d5d5d2ce4571e9071b4 + src/mistralai/client/models/getagentversionop.py: + id: a0db5a6aab1f + last_write_checksum: sha1:d1dfc0927abcae22460838902d1f5ddc2a224856 + pristine_git_object: 77b8a2662939e03b261f713aa7d9676746a4df1e + src/mistralai/client/models/getbatchjobop.py: + id: 443103fe3b88 + last_write_checksum: sha1:3a7f9656f3d169c60f0d3f16b00c4136d193468e + pristine_git_object: 792c3e2121902734094a7224c8605109fc697f44 + src/mistralai/client/models/getconversationhistoryop.py: + id: c863a4cbeb34 + last_write_checksum: sha1:4e04b4550c7b48635eca1943bcfee64027f0e7ca + pristine_git_object: c1fbf3de4ee966fffa2400a9c109d952b26543da + src/mistralai/client/models/getconversationmessagesop.py: + id: bb8a90ba7c22 + last_write_checksum: sha1:1b7aad5c74338aeecb11de44d8378aaa75498e37 + pristine_git_object: 6666198edce05a99c55f1c35f26f6d3b548c9b0d + src/mistralai/client/models/getconversationop.py: + id: 1a622b8337ac + last_write_checksum: sha1:4665e81fae4f12fabc09629f32d28c1c2de2bcf2 + pristine_git_object: d204d1755b4dc23ba8397ad24fec30bd064eacce + src/mistralai/client/models/getdocumentextractedtextsignedurlop.py: + id: 69099395d631 + last_write_checksum: sha1:f6d5e8499a314e903301e419fb206c33644363ff + pristine_git_object: 9a71181d3abd625643e741c562fe73f25bf12932 + src/mistralai/client/models/getdocumentop.py: + id: de89ff93d373 + last_write_checksum: sha1:4d1f358dfe3b44ccd2a88aea6730fbaf4b5f1d93 + pristine_git_object: d7b07db791a3adb3992475f0cf49c3fe01007ad9 + src/mistralai/client/models/getdocumentsignedurlop.py: + id: b8d95511c6d1 + last_write_checksum: sha1:255a0b505d558db3149652822718c7bcecc706e8 + pristine_git_object: e5d56c54c1ffc3529a8d1cf013bcb3327392b269 + src/mistralai/client/models/getdocumentstatusop.py: + id: f1f40b8f003f + last_write_checksum: sha1:c442daff8adb3db0ac58b03e54b7c05c82b202a9 + pristine_git_object: 4206f593ca58650f9df17b377b67c374a1b0d883 + src/mistralai/client/models/getdocumenttextcontentop.py: + id: ba23717093ef + last_write_checksum: sha1:33f047af38e4be2b71f4d90a36614ea7ab096a28 + pristine_git_object: 8a7b4aae025bbcb5ade5d4d36f2bb5e34cbb315e + src/mistralai/client/models/getfilesignedurlop.py: + id: 1aa50b81c8cf + last_write_checksum: sha1:a8fb95f119d173dd1d7afed02597a297dbbc7a89 + pristine_git_object: 06ed79eea058d4ebffc5d0b87ae2d06a32f4755a + src/mistralai/client/models/getfinetuningjobop.py: + id: afe997f96d69 + last_write_checksum: sha1:25db6d0d336a78189b603bbce16b0e0de84a33f1 + pristine_git_object: 1fb732f48a1a4c2993185a6a272879a83c80dc06 + src/mistralai/client/models/getlibraryop.py: + id: c84a92e23a90 + last_write_checksum: sha1:d51c0cf40a6ed398b0cb7078fe897d047b55e251 + pristine_git_object: bc0b4a238b146c6e5853e0b9d3031a876f30bc17 src/mistralai/client/models/githubrepositoryin.py: id: eef26fbd2876 - last_write_checksum: sha1:7736d0a475b47049c35aec59254c5d47b3ae609b - pristine_git_object: e56fef9ba187792238991cc9373a7d2ccf0b8c0d + last_write_checksum: sha1:cc98805951c3f80d9b8f0ba4037cf451551b0742 + pristine_git_object: e55389c380416f69ed7dc085cbbaaba056c4d1ba src/mistralai/client/models/githubrepositoryout.py: id: d2434a167623 - last_write_checksum: sha1:5d9625805bf6eb3c061ebdd73433ca2001e26cb1 - pristine_git_object: e3aa9ebc52e8613b15e3ff92a03593e2169dc935 + last_write_checksum: sha1:76d98ac7613e626599cb4c7a0b0366e9b20815ff + pristine_git_object: 514df01c217b40d8c050839ac40b938c68ef1bf6 src/mistralai/client/models/httpvalidationerror.py: id: 4099f568a6f8 - last_write_checksum: sha1:81432fd45c6faac14a6b48c6d7c85bbc908b175c - pristine_git_object: 34d9b54307db818e51118bc448032e0476688a35 + last_write_checksum: sha1:be2db0d4ec07da0ddb37878761545c3dde8fb8ec + pristine_git_object: e7f0a35bf208c32086c7b448273d1133d0f1027b src/mistralai/client/models/imagegenerationtool.py: id: e1532275faa0 - last_write_checksum: sha1:e5d4c986062850ce3ba4f66a8347848332192c21 - pristine_git_object: e09dba81314da940b2be64164e9b02d51e72f7b4 + last_write_checksum: sha1:85122792c3ba324086096345119fedf326f55c86 + pristine_git_object: 680c6ce2d08277e65e23ea3060e83c1fa4accb78 src/mistralai/client/models/imageurl.py: id: e4bbf5881fbf - last_write_checksum: sha1:d300e69742936f6e6583f580091827ada7da6c20 - pristine_git_object: 6e61d1ae2ec745774345c36e605748cf7733687b + last_write_checksum: sha1:9af5cff0b3a2c1c63e2bd1f998dcfeab273fd206 + pristine_git_object: 4ff13b1ccbc157f21013aacd7a062e89a26dcbf9 src/mistralai/client/models/imageurlchunk.py: id: 746fde62f637 - last_write_checksum: sha1:f6c19195337e3715fac3dc874abfc2333d661c8e - pristine_git_object: f967a3c8ced6d5fb4b274454100134e41c5b7a5c + last_write_checksum: sha1:57e48972720a3e317291250d6d94c44d295b69f5 + pristine_git_object: 993185cce833c59ad341b977cf9263654951fa03 src/mistralai/client/models/inputentries.py: id: 44727997dacb - last_write_checksum: sha1:afc03830974af11516c0b997f1cd181218ee4fb0 - pristine_git_object: 8ae29837a6c090fbe1998562684d3a372a9bdc31 + last_write_checksum: sha1:44ef8e75dd43b82276a0f06ef5c6be9eed46b379 + pristine_git_object: dc9892956f0e2583c51bf49ef89adbd22b8646d5 src/mistralai/client/models/inputs.py: id: 84a8007518c7 - last_write_checksum: sha1:62cf4c19b48f68f57f30223d48d06e33d08ae096 - pristine_git_object: fb0674760c1191f04e07f066e84ae9684a1431e3 + last_write_checksum: sha1:871491fa3b24315bc1bddf371334381f75ab035d + pristine_git_object: cfcdeb3d5895ccb34512c2a0a2e799e763e09c09 src/mistralai/client/models/instructrequest.py: id: 6d3ad9f896c7 - last_write_checksum: sha1:5fabc65cccf9f17ffbd20cd176341b4d78b62a5c - pristine_git_object: 1b2f269359700582687fdf4492ea3cef64da48bb + last_write_checksum: sha1:b56a77442b50b50151adedaa5ec356dc96c56428 + pristine_git_object: e5f9cccf174d8e73c42e8ee4aa294b43e1ad6cf5 src/mistralai/client/models/jobin.py: id: f4d176123ccc - last_write_checksum: sha1:ae6b1d9bc202db7a49d29f85b75bffea605126c5 - pristine_git_object: dc7684fcbecd558fc6e3e3f17c4000ec217285c1 + last_write_checksum: sha1:478a9beaf1c5ada536f5c333a47aa2ac0900bd16 + pristine_git_object: b3cb8998b5b0ce00414e40643eb3e259b2c0aabf src/mistralai/client/models/jobmetadataout.py: id: 805f41e3292a - last_write_checksum: sha1:5f84c58dab92d76de8d74f2e02cdf7b2b4c9cc12 - pristine_git_object: f91e30c09232b5227972b3b02ba5efbde22ac387 - src/mistralai/client/models/jobs_api_routes_batch_cancel_batch_jobop.py: - id: b56cb6c17c95 - last_write_checksum: sha1:e5e2c422bb211bb4af3e8c1a4b48e491d0fdf5a4 - pristine_git_object: 21a04f7313b3594a204395ca080b76e2a4958c63 - src/mistralai/client/models/jobs_api_routes_batch_get_batch_jobop.py: - id: 36b5a6b3ceee - last_write_checksum: sha1:5ada7f2b7a666f985c856a6d9cab1969928c9488 - pristine_git_object: 32e34281cd188f4d6d23d100fe0d45002030c56b - src/mistralai/client/models/jobs_api_routes_batch_get_batch_jobsop.py: - id: d8f0af99c94d - last_write_checksum: sha1:3026ea0231866e792dd3cf83eb2b2bac93eda61b - pristine_git_object: 3557e773860e94d85f7a528d000f03adfcc60c2f - src/mistralai/client/models/jobs_api_routes_fine_tuning_archive_fine_tuned_modelop.py: - id: 34f89d2af0ec - last_write_checksum: sha1:2a7225666b02d42be0d3455a249a962948feadf9 - pristine_git_object: 4536b738442ec9710ddf67f2faf7d30b094d8cd5 - src/mistralai/client/models/jobs_api_routes_fine_tuning_cancel_fine_tuning_jobop.py: - id: d175c6e32ecb - last_write_checksum: sha1:c61f02640ec384778e6f6b1f08dcb31dc5c1fb82 - pristine_git_object: b36d3c3ef5abb30abc886876bb66384ea41bab9e - src/mistralai/client/models/jobs_api_routes_fine_tuning_create_fine_tuning_jobop.py: - id: 81651291187a - last_write_checksum: sha1:80bc2d32588a115c4ac5571a3c1ffc8a24ab9d45 - pristine_git_object: ece0d15a0654ec759904276ad5d95c5619ff016f - src/mistralai/client/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobop.py: - id: d910fd8fe2d6 - last_write_checksum: sha1:4f57772cda3075251f36c52a264ebce1328cb486 - pristine_git_object: aa5a26098e084885e8c2f63944e7549969899d3c - src/mistralai/client/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobsop.py: - id: cf43028824bf - last_write_checksum: sha1:e7bb3855dabfcaf7b92e6917e6da39246fc01282 - pristine_git_object: 7e399b31354e4f09c43efbe9ffe3d938f6af0d8c - src/mistralai/client/models/jobs_api_routes_fine_tuning_start_fine_tuning_jobop.py: - id: e7ff4a4a4edb - last_write_checksum: sha1:21d90c0a3fa151bd855d63ed241f518812f26f82 - pristine_git_object: ed5938b039be719169e62e033b7735bde7e72503 - src/mistralai/client/models/jobs_api_routes_fine_tuning_unarchive_fine_tuned_modelop.py: - id: 7cc1c80335a9 - last_write_checksum: sha1:4270cb52e5aef807ec2d8a9ab1ca1065b0cf8a10 - pristine_git_object: e1be0ac00af889a38647b5f7e4f9d26ed09ee7c4 - src/mistralai/client/models/jobs_api_routes_fine_tuning_update_fine_tuned_modelop.py: - id: 6d9dc624aafd - last_write_checksum: sha1:ad615dd8d493fec4f818f19e5745ff52575181aa - pristine_git_object: a2b70b37e349c7f5fc6c687fbad015eb218de952 + last_write_checksum: sha1:1333181d5a3dff43076095f61e1d57f37085abbe + pristine_git_object: 1d386539d8c638d96b8f468cfca3241dfc07a9f3 src/mistralai/client/models/jobsout.py: id: 22e91e9631a9 - last_write_checksum: sha1:1bb48570e040fa9ad4408b41fef8ce4ec0bf52be - pristine_git_object: 9087704f0660e39f662efbd36f39713202598c43 + last_write_checksum: sha1:e9434f43df7df8e991eb0387eabcf308cae3cb65 + pristine_git_object: a4127a5d835c0f0ead04980f05cb293e18970905 src/mistralai/client/models/jsonschema.py: id: e1fc1d8a434a - last_write_checksum: sha1:6289875b78fab12efa9e3a4aa4bebdb08a95d332 - pristine_git_object: db2fa55ba9001bd3715451c15e9661a87ff7501a + last_write_checksum: sha1:6711508e9c1bd20fc8b1bfdbd1181ca29144ef0d + pristine_git_object: 948c94ed8fe8102a9cdced68fde6be03489f5778 src/mistralai/client/models/legacyjobmetadataout.py: id: 4f44aa38c864 - last_write_checksum: sha1:b6aba9032bb250c5a23f2ff2a8521b7bddcd1a06 - pristine_git_object: 155ecea78cb94fc1a3ffaccc4af104a8a81c5d44 - src/mistralai/client/models/libraries_delete_v1op.py: - id: b2e8bbd19baa - last_write_checksum: sha1:566db1febc40c73476af31a27201a208b64bc32a - pristine_git_object: fa447de067518abb355b958954ff9a3ee9b2cf6d - src/mistralai/client/models/libraries_documents_delete_v1op.py: - id: 81eb34382a3d - last_write_checksum: sha1:c7bd801e5f75d1716101721cd3e711be978cb7c5 - pristine_git_object: bc5ec6e5443b32d47e570c4f43c43827928a3e39 - src/mistralai/client/models/libraries_documents_get_extracted_text_signed_url_v1op.py: - id: a7417ebd6040 - last_write_checksum: sha1:a298e22d9a68de87288419717b03273c1a26de6e - pristine_git_object: 24ed897d305cfccdc2b9717e214da901479cc70e - src/mistralai/client/models/libraries_documents_get_signed_url_v1op.py: - id: d4b7b47913ba - last_write_checksum: sha1:0855bb39a09514fb5709bd3674eb5fcc618299f1 - pristine_git_object: 350c8e73992583b7890889c5ff252096a8df7fbd - src/mistralai/client/models/libraries_documents_get_status_v1op.py: - id: f314f73e909c - last_write_checksum: sha1:ca4679fbdc833b42e35b4c015ddf8434321d86eb - pristine_git_object: 92b077d3b5850985cac73ee880de7eab31a5b8fd - src/mistralai/client/models/libraries_documents_get_text_content_v1op.py: - id: 1ca4e0c41321 - last_write_checksum: sha1:8dbd91ab145d4c01e91502c9349477e1f98551d7 - pristine_git_object: 68f9725a1a390028e3118611bb0df1b4ab103943 - src/mistralai/client/models/libraries_documents_get_v1op.py: - id: 26ff35f0c69d - last_write_checksum: sha1:208b7ca22416295d27f51513e3fe58947e1549c7 - pristine_git_object: a67e687eaffebbee81654bbbb78ad00bcc28999c - src/mistralai/client/models/libraries_documents_list_v1op.py: - id: 756f26de3cbe - last_write_checksum: sha1:a742a58c137ecf1cfd7446d5f2f60211ff087751 - pristine_git_object: 5dec33858719e713c0fa07538aa0dfcab8d69dad - src/mistralai/client/models/libraries_documents_reprocess_v1op.py: - id: dbbeb02fc336 - last_write_checksum: sha1:516691f61c18e18b96738360a85acd34ba415ca0 - pristine_git_object: 8aee75522f7677e9f6fc49e2f8c5a75124db3dc7 - src/mistralai/client/models/libraries_documents_update_v1op.py: - id: 734ba6c19f5f - last_write_checksum: sha1:929f437a1c366b6cbecfc86b43436767712327f8 - pristine_git_object: f677b4ddc96b51ecd777240844800b2634ca4358 - src/mistralai/client/models/libraries_documents_upload_v1op.py: - id: "744466971862" - last_write_checksum: sha1:d6b085e01eac97f404a01e137413e159390c1382 - pristine_git_object: e2d59d9f1556ca77c0666b2bba3213ef5386f82a - src/mistralai/client/models/libraries_get_v1op.py: - id: d493f39e7ebb - last_write_checksum: sha1:d61166f6c399516d905c7376fabe56c102265747 - pristine_git_object: 83ae377d245e5c93a4a9118dd049a9096e9f3074 - src/mistralai/client/models/libraries_share_create_v1op.py: - id: feaacfd46dd3 - last_write_checksum: sha1:66ddb6685924e1702cfc40dbcb9a0d2e525cb57d - pristine_git_object: d0313bd01acd6e5403402d0d80a604a6c2812e19 - src/mistralai/client/models/libraries_share_delete_v1op.py: - id: 7f3a679ca384 - last_write_checksum: sha1:3ac568a5e09a6c74bc6779cd9c0bc3df36b24785 - pristine_git_object: 620527d50c15f5b14307e7735b429fe194469ed5 - src/mistralai/client/models/libraries_share_list_v1op.py: - id: 8f0af379bf1c - last_write_checksum: sha1:3d764be7232233229dc79079101270ace179e65f - pristine_git_object: fd5d9d33ce4b757b369d191621a727f71b5d2e35 - src/mistralai/client/models/libraries_update_v1op.py: - id: 92c8d4132252 - last_write_checksum: sha1:482c5b78278a6e729ed980191c6c1b94dbd890e6 - pristine_git_object: c434ab7a8be94042e6add582520dba11dc9d8d01 + last_write_checksum: sha1:e93d512c8cb6e0812248a195ff869428209cd71f + pristine_git_object: 4453c15798f4fd4db2de64e0beaf7ad557d82fa1 src/mistralai/client/models/libraryin.py: id: 6147d5df71d9 - last_write_checksum: sha1:5b7fe7a4bde80032bd36fad27f5854ad4bb1832f - pristine_git_object: a7b36158a165ab5586cba26cc1f96ab6fe938501 + last_write_checksum: sha1:34c5c9582a488fe87da084e74316e0fd76aa28d1 + pristine_git_object: 1a71d410d997a6d3f197947f821117e0605517af src/mistralai/client/models/libraryinupdate.py: id: 300a6bb02e6e - last_write_checksum: sha1:95060dfcdafbfe2deb96f450b128cd5d6f4e0e5a - pristine_git_object: f0241ba17f95b2c30a102bf1d09ac094c6e757e5 + last_write_checksum: sha1:c9b1a0a00d31fa839df12353f1a3ee9d0b3ffb60 + pristine_git_object: 328b2de3cd4e304fd462882eca7226e460b7c4a7 src/mistralai/client/models/libraryout.py: id: 4e608c7aafc4 - last_write_checksum: sha1:4089ffe9adc8e561b9ec093330c276de653bff7f - pristine_git_object: d1953f16490d40876d05cdd615a3ae8cbcbfd9f6 + last_write_checksum: sha1:9841adb596398554dfcaeb35b7e5a0572c541cff + pristine_git_object: c7ab7b8d39b68b5998c4874f9942caa275cf65d9 + src/mistralai/client/models/listagentaliasesop.py: + id: ff038766a902 + last_write_checksum: sha1:eef4e471999d5df5195aea51cde027b55567aeef + pristine_git_object: 83c6d1769c10fe38402a36b6aff2a18da61f4504 + src/mistralai/client/models/listagentsop.py: + id: a573a873c404 + last_write_checksum: sha1:db3c9e6ddc146138ed971f9970d9a164c0f97456 + pristine_git_object: 863fc13af1429bd1a6c02a9a20d2b6cb0cad7b34 + src/mistralai/client/models/listagentversionsop.py: + id: ccc5fb48e78f + last_write_checksum: sha1:0f2306bcceba2a2d7bfeb0be33126514d9287f17 + pristine_git_object: 613d3d8516690e6cba15922dfe69bdf62c039b01 + src/mistralai/client/models/listbatchjobsop.py: + id: f49af453f5e6 + last_write_checksum: sha1:e48b0e7371ee8f637e4fd6bed140cdbb1d405a7d + pristine_git_object: 5322df816e391a5569afcfd14edaeb128467a176 + src/mistralai/client/models/listconversationsop.py: + id: d6007f6c1643 + last_write_checksum: sha1:ece12b550abe6e17eb79f7a05593a93ea055f3f6 + pristine_git_object: 1c9a347c0ad4801c3a1b941e6328061d23d7dcd5 src/mistralai/client/models/listdocumentout.py: id: b2c96075ce00 - last_write_checksum: sha1:13c5461b89970ae00cdce8b80045ed586fd113b7 - pristine_git_object: 24969a0f6dc3d2e0badd650a2694d1ffa0062988 + last_write_checksum: sha1:fc3eca772d1e32938ea1bd2f3e98cdea5f1003f3 + pristine_git_object: a636b3deff66fe4277a63c04fc7dd6c5e74e58e7 + src/mistralai/client/models/listdocumentsop.py: + id: 3e42bdc15383 + last_write_checksum: sha1:d9beade6d8bb8050a67e32c2a73926b140015e68 + pristine_git_object: 0f7c4584d793c7e692a4bbc6678e18549b0e0364 + src/mistralai/client/models/listfilesop.py: + id: e5bd46ac0145 + last_write_checksum: sha1:3e0bc8a7318ffd1c3fe15f335ea2bc1e18c714a1 + pristine_git_object: a9af5c70c98adce56653ff01772fe5900530a36e src/mistralai/client/models/listfilesout.py: id: ae5fa21b141c - last_write_checksum: sha1:2ef7f78253cde73c3baae6aebeda6568bcb96c0d - pristine_git_object: 1db17c406778ac201dfcc1fd348a3e1176f05977 + last_write_checksum: sha1:4bc8ef424beb41c75d9c6fa4e101d330a951a99f + pristine_git_object: 460822f71fe8b0fc6292b804dc2a9de29bff4ef5 + src/mistralai/client/models/listfinetuningjobsop.py: + id: b77fe203b929 + last_write_checksum: sha1:af98423b166930cd18a1d377ea688540f3364166 + pristine_git_object: 8712c3fa6ac24094532fdfc047561997ea34552f + src/mistralai/client/models/listlibraryaccessesop.py: + id: 581b332626b7 + last_write_checksum: sha1:0a6bd277a706d807d87d3f2a4f870cc6ba917928 + pristine_git_object: 2206310f301f6ea40f14a495f5f6c6b4e76dbbf7 src/mistralai/client/models/listlibraryout.py: id: cb78c529e763 - last_write_checksum: sha1:044d3d17138c3af1feba6b980f92f8db7bd64578 - pristine_git_object: 24aaa1a9874d0e2054f6a49efe0f70101cec2fb2 + last_write_checksum: sha1:3cd81fd6f6d2421c6b6d06077f0bf1d5b3c96cad + pristine_git_object: 39fa459f7cc7be17c751025287d7827c9d141aac src/mistralai/client/models/listsharingout.py: id: ee708a7ccdad - last_write_checksum: sha1:0644f080e93a533f40579b8c59e5039dea4ee02d - pristine_git_object: f139813f54e97810502d658ad924911de646ab09 + last_write_checksum: sha1:18e6501b00a566121dfd6a1ce7b0e23fef297e45 + pristine_git_object: 443ad0d6a275c1c8bae4adda3e67621b068c0412 src/mistralai/client/models/messageentries.py: id: e13f9009902b - last_write_checksum: sha1:7c2503f4a1be5e5dde5640dd7413fed06aee09b4 - pristine_git_object: 9b1706dee5fbb5cae18674708a1571b187bf0576 + last_write_checksum: sha1:43aebdc9eaecc8341298dc6b281d0d57edf4e9e6 + pristine_git_object: a95098e01843fe3b4087319881967dc42c6e4fef src/mistralai/client/models/messageinputcontentchunks.py: id: 01025c12866a - last_write_checksum: sha1:740e82b72d5472f0cc967e745c07393e2df8ae38 - pristine_git_object: e90d8aa0317e553bfc0cceb4a356cf9994ecfb60 + last_write_checksum: sha1:9eab6d7734dcd4bf9da5222c1927f5f40ef45db0 + pristine_git_object: 63cf14e7fcbc7c3969220b4f07109473b246bf49 src/mistralai/client/models/messageinputentry.py: id: c0a4b5179095 - last_write_checksum: sha1:e9898424d5129750738adb6a049232162824282d - pristine_git_object: 12a31097a88e90645c67a30451a379427cd4fcd3 + last_write_checksum: sha1:b1b8f5b78eb5f57f5cfa7163ed49101736bcefaa + pristine_git_object: 15046d25130cda6571f07a456c2b5a67d2a3bcc0 src/mistralai/client/models/messageoutputcontentchunks.py: id: 2ed248515035 - last_write_checksum: sha1:f239151ae206f6e82ee3096d357ff33cf9a08138 - pristine_git_object: 136a7608e7e2a612d48271a7c257e2bb383584f3 + last_write_checksum: sha1:df4ef4d17ce48df271ff2b8cab297ae305aa08ec + pristine_git_object: def7a4d27cd3d1479864a1d6af19e89bd57bff70 src/mistralai/client/models/messageoutputentry.py: id: a07577d2268d - last_write_checksum: sha1:d0ca07d6bf6445a16761889bf04a5851abe21ea3 - pristine_git_object: d52e4e3e722ef221f565a0bd40f505385974a0e1 + last_write_checksum: sha1:0633b8c619883bedb1a6ad732c5487c7e7f817f9 + pristine_git_object: 8752fc36bfec39e0ab79d4593ae0cb43ea00641c src/mistralai/client/models/messageoutputevent.py: id: a2bbf63615c6 - last_write_checksum: sha1:fb98c35064fd9c65fa8c8c0cbc59293067ac793f - pristine_git_object: 3db7f5a0908a72f75f6f7303af4ad426a4909d84 + last_write_checksum: sha1:bbdb2c840a7a196edcb6ac6170e8273cc47a495e + pristine_git_object: 39c1013939ea238cb1c7ccbc05480a6840400061 src/mistralai/client/models/metricout.py: id: 92d33621dda7 - last_write_checksum: sha1:056f6e7e76182df649804034d722c5ad2e43294f - pristine_git_object: f8027a69235861ae8f04ccc185d61fa13cc8cc14 + last_write_checksum: sha1:6198ba9e2cd66fcf7f9fcc1cf89481edd432cf11 + pristine_git_object: 5705c71283ce7d4a01d60752657f39279c0f1f85 src/mistralai/client/models/mistralerror.py: id: 68ffd8394c2e - last_write_checksum: sha1:07fce1e971a25d95ffa8c8f3624d62cdf96e353a - pristine_git_object: 28cfd22dc3d567aa4ae55cc19ad89341fa9c96a1 + last_write_checksum: sha1:8b867eca5ca81aa6364f13c9d7e42f9b0d855724 + pristine_git_object: 862a6be8294db5b30bb06cb7b85d60c52ed8e8c9 src/mistralai/client/models/mistralpromptmode.py: id: 95abc4ec799a - last_write_checksum: sha1:ed0b87853d373d830b6572cbdf99d64f167b1d48 - pristine_git_object: 7008fc055bd1031096b7a486a17bf9a5b7841a4c + last_write_checksum: sha1:a1417b987bb34daeb73ca4e015c085814e6c8ad2 + pristine_git_object: 9b91323e7545d636308064085ca16fc554eac904 src/mistralai/client/models/modelcapabilities.py: id: 64d8a422ea29 - last_write_checksum: sha1:3857f4b989eeed681dffe387d48d66f880537db6 - pristine_git_object: a6db80e73189addcb1e1951a093297e0523f5fa4 + last_write_checksum: sha1:5bc65733cf1c2f4ee8e1b422636fda754bdf8afe + pristine_git_object: c329efbcd9be212c7428c09f28f897834c9239d3 src/mistralai/client/models/modelconversation.py: id: fea0a651f888 - last_write_checksum: sha1:beade63589bde3cae79f471a71e3d04d3f132f97 - pristine_git_object: 574f053d4186288980754ead28bb6ce19b414064 + last_write_checksum: sha1:6186e845be2717da6116e20072835c050d3fdaa5 + pristine_git_object: c0bacb7fd9cd052ecb31a72c6bf593504034e069 src/mistralai/client/models/modellist.py: id: 00693c7eec60 - last_write_checksum: sha1:d6ff956092c0c930a6db02cbe017bc473403639c - pristine_git_object: 6a5209fa6dac59539be338e9ac6ffbefd18057ee + last_write_checksum: sha1:89695c6a680da571c7a77c4544607bd83b3a93d5 + pristine_git_object: c122122c38a3331337cc702340cf1d3e0c9ef99d src/mistralai/client/models/moderationobject.py: id: 132faad0549a - last_write_checksum: sha1:d108ea519d2f491ddbc2e99ab5b8cc02e6987cf8 - pristine_git_object: a6b44b96f00f47c168cd1b2339b7aa44e6ca139e + last_write_checksum: sha1:742d942d72b615432c066827b822290cf4d51d40 + pristine_git_object: 9aa4eb15d837ab2af97faa131a362d50a3a85482 src/mistralai/client/models/moderationresponse.py: id: 06bab279cb31 - last_write_checksum: sha1:d31313c2164ecbc5a5714435a52b6f0dda87b8fe - pristine_git_object: 288c8d82d87a9944ae6d7a417bb92e558c6dcc0f + last_write_checksum: sha1:b9158e575276c1e0a510c129347b9a98c5a70567 + pristine_git_object: a8a8ec3d8d8a58deb3c1f8358c6dce5a9734f89c src/mistralai/client/models/no_response_error.py: id: 2849e0a482e2 - last_write_checksum: sha1:7f326424a7d5ae1bcd5c89a0d6b3dbda9138942f - pristine_git_object: 1deab64bc43e1e65bf3c412d326a4032ce342366 + last_write_checksum: sha1:35b1651843a697024146d6377838b5b99c5c66d3 + pristine_git_object: 7705f1945567498ce606364490685a91b13cd8dd src/mistralai/client/models/ocrimageobject.py: id: 685faeb41a80 - last_write_checksum: sha1:93f3d24c4b7513fffef60d5590f3e5a4a0b6e1e4 - pristine_git_object: e97fa8df46c6e39775b3c938c7e1862a507090d2 + last_write_checksum: sha1:663f11a19e067d424263eee40d8127cdc56fb72e + pristine_git_object: e95b67e17e51653bf194ad1cff3a926f34cf97c2 src/mistralai/client/models/ocrpagedimensions.py: id: 02f763afbc9f - last_write_checksum: sha1:28e91a96916711bce831e7fa33a69f0e10298eed - pristine_git_object: f4fc11e0952f59b70c49e00d9f1890d9dd93a0df + last_write_checksum: sha1:f572ed8992ba1ba4d53b705c4e8c94c85ae1290e + pristine_git_object: 847205c6c74a621dd2ee6d9eb18d1acba8395c50 src/mistralai/client/models/ocrpageobject.py: id: 07a099f89487 - last_write_checksum: sha1:367035d07f306aa5ce73fc77635d061a75612a68 - pristine_git_object: f8b43601e7a3dd4fae554c763d3ed1ee6f2927a3 + last_write_checksum: sha1:10cbd1dddcb1f1f5d530048130908ad0ce715928 + pristine_git_object: 4f4ccf43011fa2563f79bb70ae2a813b84f04074 src/mistralai/client/models/ocrrequest.py: id: 36f204c64074 - last_write_checksum: sha1:d4b7a8bf70efe5828d04d773f4b82284a18656f1 - pristine_git_object: 03a6028c5cc298b3ed66ae5f31c310d573a954e5 + last_write_checksum: sha1:8e669292b846a5af4e3cee0b632524696e3067bc + pristine_git_object: 18b899dd5ecc162dc8e92622f56bed503fff80f7 src/mistralai/client/models/ocrresponse.py: id: 2fdfc881ca56 - last_write_checksum: sha1:fb848d5f5c1456028a1e04b9e4f5be3234fa073f - pristine_git_object: 2813a1ca4c94d690f248a318a9e35d655d80600c + last_write_checksum: sha1:4a28dbfcc076c149e4f08a830d4d7f770836eb15 + pristine_git_object: 0a36e97500b4f62adac2526d7dd7cb85c9bdb8b8 src/mistralai/client/models/ocrtableobject.py: id: d74dd0d2ddac - last_write_checksum: sha1:d562f3207193c7d5ef5d7b6175eba8006b6c3a73 - pristine_git_object: 0c9091de8975d8bd8e673aadbb69a619b96d77e8 + last_write_checksum: sha1:3116548673509f4e9f6a50d39f58ce3374823cc4 + pristine_git_object: e32ad894cd97546e635d12595051da103cde9fd8 src/mistralai/client/models/ocrusageinfo.py: id: 272b7e1785d5 - last_write_checksum: sha1:b466bdd22ad5fa5f08c8aa51e3a6ff5e2fcbf749 - pristine_git_object: 62f07fd4fafa4c16a8cf80a9f52754904943272a + last_write_checksum: sha1:b8fb06d0dad22f958ac756e65d70f5ba410ad47a + pristine_git_object: a421d850450bb3f0b62853c931cd457434d2f174 src/mistralai/client/models/outputcontentchunks.py: id: 9ad9741f4975 - last_write_checksum: sha1:47d74d212ebcb68ff75a547b3221f2aee3e07bfe - pristine_git_object: ad0c087e0dcbe302dd9c73d1ea03e109e9a66ef7 + last_write_checksum: sha1:afb76f3af2952c2afab5397e348ddfd6dbb56c4f + pristine_git_object: 1a115fe8b4874a6bd86719d91332cd3db6d95b46 src/mistralai/client/models/paginationinfo.py: id: 48851e82d67e - last_write_checksum: sha1:b17cc84c592706882d5819b1a706c9a206de9198 - pristine_git_object: 0252f4482f50b34a35f52911b4b57b6899751b42 + last_write_checksum: sha1:166961e2c0f573ba0677ee803820bb944a8a5efb + pristine_git_object: 2b9dab6258249f7be87e1d4a73a2502e21fe1f0d src/mistralai/client/models/prediction.py: id: 1cc842a069a5 - last_write_checksum: sha1:d9bd04d22d58e7e1be0195aaed218a4f407db9c0 - pristine_git_object: f2c5d9c60c50c6e397d7df9ce71ccff957b0e058 + last_write_checksum: sha1:ca391fc2f9faf1657392ceda952c2ee422121952 + pristine_git_object: 52f4adf1eb46d7d5679f9705871cd73e08ae8830 src/mistralai/client/models/processingstatusout.py: id: 3df842c4140f - last_write_checksum: sha1:83fbbccf635fabf60452dfa8dcac696033c3d436 - pristine_git_object: 031f386fb4381b8e2ead1bd22f7f53e59e37f6bb + last_write_checksum: sha1:007a476e4101cac4d2a9eef94d289f0f486d763a + pristine_git_object: 3acadcc9792c286cd31031a80e108b74bc2c0c4e src/mistralai/client/models/realtimetranscriptionerror.py: id: 8c2267378f48 - last_write_checksum: sha1:671be287639964cc6ac7efbed41998f225845e2e - pristine_git_object: e6a889de576f9e36db551a44d4ed3cf0c032e599 + last_write_checksum: sha1:b9642dd42c4092bdebe0a4f8d35c68152f259c05 + pristine_git_object: f8f2d3da9598ce0cd90d148ba1a9be0c5d6237cc src/mistralai/client/models/realtimetranscriptionerrordetail.py: id: 5bd25cdf9c7a - last_write_checksum: sha1:471824f03586b63688de43608d6c756b8a156e11 - pristine_git_object: 27bb8d872792723b06238b3f0eebed815948fd63 + last_write_checksum: sha1:a226b10718b1fe4a661311cbd98ea3b1d1ac4163 + pristine_git_object: cec1f6eabd44ceab4e58694a0862c9c90ea2f264 src/mistralai/client/models/realtimetranscriptionsession.py: id: 02517fa5411a - last_write_checksum: sha1:a6db31662165d3df47a5da11efd1923121d1593e - pristine_git_object: 3a3306513c111125c71871024caa650176360c1b + last_write_checksum: sha1:0073b248604f667e89e34cf01184a788ca84d63f + pristine_git_object: d20d0d8c94aeec425a2c1dfb93b72ac6878cb8af src/mistralai/client/models/realtimetranscriptionsessioncreated.py: id: 4e3731f63a3c - last_write_checksum: sha1:5d2e0541b58a3c647ded25d6a0cf8590f64cf0db - pristine_git_object: cc6d5028f221e1794c723dedac5c73564ddb61f7 + last_write_checksum: sha1:d3fb5c5dc417a0ebb12a30770324674e055526ae + pristine_git_object: c4fa5774502699529e27870436ca65b9f88ccfe1 src/mistralai/client/models/realtimetranscriptionsessionupdated.py: id: 686dc4f2450f - last_write_checksum: sha1:2311bf0107f0f957c48ee1841cc95369269a6105 - pristine_git_object: 3da23595291cd49e42d30646288f4f39da6f8c00 + last_write_checksum: sha1:7e4de1020672efc3503cda5b916b41056bf1d22b + pristine_git_object: a61fb05e8e5ba3ffa20bbb98bf61c17045c1f75c src/mistralai/client/models/referencechunk.py: id: 921acd3a224a - last_write_checksum: sha1:abfc5818dbe9e40be5d71436f2ffd1a9b53bd4ab - pristine_git_object: 4c703b8165329a55343c20b5080670168327afc4 + last_write_checksum: sha1:0dcff62499afdb1db0fd4f46614f8680f94837f4 + pristine_git_object: 7634d8ae07c96a99e634dcf888077f1d8cc4dc67 + src/mistralai/client/models/reprocessdocumentop.py: + id: b2913a7aa5c9 + last_write_checksum: sha1:07174ee58ec12909f08a08a9a6d7427ee9b2d5d0 + pristine_git_object: 48a4b72bf285e2f2e4b2d0c352ebc463518ed712 src/mistralai/client/models/requestsource.py: id: 3f2774d9e609 - last_write_checksum: sha1:31aae791bf737ad123fe189227d113838204ed42 - pristine_git_object: 7b0a35c44050b6fca868479e261805a77f33e230 + last_write_checksum: sha1:1ce68530a46793968f1122d29df722f0a5c9d267 + pristine_git_object: fc4433cb4e657b06aa6a4c078094c2df342810e2 src/mistralai/client/models/responsedoneevent.py: id: cf8a686bf82c - last_write_checksum: sha1:25972ca80ff7fd7a0d6dfe98718be52580dacc61 - pristine_git_object: 5405625692cb22c60a7b5f5a6f1b58cee5676576 + last_write_checksum: sha1:376c2a65f1fcdfe20d7cf0bd6aa6d8870a4f32c1 + pristine_git_object: ed331ff12c8728290b8ad17e52d9384265233665 src/mistralai/client/models/responseerrorevent.py: id: b286d74e8724 - last_write_checksum: sha1:a4767e8820ae840559fc55c8fcd346dea41a386e - pristine_git_object: c9ef95a04c91c32e7a7973309e2174b7e776f099 + last_write_checksum: sha1:ecff834ec62bf46d2aa5d9753f3898ed86caad45 + pristine_git_object: 8f196a52b469458da5c9f072983870da8c4fc4ea src/mistralai/client/models/responseformat.py: id: 6ab8bc8d22c0 - last_write_checksum: sha1:ad0489488713a977dbf4eac739ce2734c8280350 - pristine_git_object: 5899b0175cefd4159eb680a3715a72fa78577ba4 + last_write_checksum: sha1:e0c29239b4cd698af50412a1cab85217ccbb1796 + pristine_git_object: 409b80d658e4c93f4ee25c218fe74d65fd84ad44 src/mistralai/client/models/responseformats.py: id: c4462a05fb08 - last_write_checksum: sha1:863c7ec4c567d8f0c4e6305b47896424726e71be - pristine_git_object: cbf83ce7b54ff8634f741334831807bfb5c98991 + last_write_checksum: sha1:3cb82d44a4f9df5e9a3f51867be6eab1d439d87a + pristine_git_object: 21345778ad2d41a3746292e67fec628f9ec2a84d src/mistralai/client/models/responsestartedevent.py: id: 24f54ee8b0f2 - last_write_checksum: sha1:1bd2a884b9f66eb811fc83d8c3644913dfa80ab1 - pristine_git_object: dc6a10f91e2bb0d13a582ed03e7db2089b75bcf7 + last_write_checksum: sha1:8be1513409934d7ea1c524e468954f7eda0a8c62 + pristine_git_object: 256d2a6c864edf4f3ccd77b2db139c11fe4f6727 src/mistralai/client/models/responsevalidationerror.py: id: c244a88981e0 - last_write_checksum: sha1:2687c9ca7df0763384030719e5c1447d83f511b3 - pristine_git_object: bab5d0b70e0bb2ea567a16a1a7c5db839651836f - src/mistralai/client/models/retrieve_model_v1_models_model_id_getop.py: - id: 6fefa90ca351 - last_write_checksum: sha1:f7308b269e12b2554a27de9d41312097d0d55d82 - pristine_git_object: 7fdcd37d5879aaca158f459df830a5a4dc55bfa0 + last_write_checksum: sha1:74a39321dee69f3487d9b9e01ffb2e40715176f4 + pristine_git_object: 1ed0d55266a106364fe58aa1e476fafbfbbbfdfe + src/mistralai/client/models/restartconversationop.py: + id: 2f6f3e4bbfd8 + last_write_checksum: sha1:9500d3ebea040ff4a203f3f025ff1bff8a397235 + pristine_git_object: b09eaed5bc8ecdbb7f1952c97b2e665462c70f9e + src/mistralai/client/models/restartconversationstreamop.py: + id: 16dc9ee5bf22 + last_write_checksum: sha1:b16f54529f4fd7d1422c82ff1a6dd5a9a82ba6bd + pristine_git_object: 3b2025f536d1c54ed58064b4be33aaafb9297ac4 + src/mistralai/client/models/retrievefileop.py: + id: ee73efdf9180 + last_write_checksum: sha1:330ec0a78a7ba623f21cd378b53250045bea984c + pristine_git_object: edd50e571cf56c6c22acc1777f6c9af38787f07d src/mistralai/client/models/retrievefileout.py: id: 8bb5859aa0d0 - last_write_checksum: sha1:9d182b5b20c8edef9b98a42036b13afd98031fd5 - pristine_git_object: ffd0617a1c6465a5f8080eb65e382e7a9169eef4 + last_write_checksum: sha1:1077bdb8fcc5ba22b2deb7f5c95fefe7b1fb82f5 + pristine_git_object: 2abf2161cd61d84f04836740a526c0e3711c3f6d + src/mistralai/client/models/retrievemodelop.py: + id: d883baa79c9e + last_write_checksum: sha1:525c7e9cf8594433cbb21374422067a75e6b53a9 + pristine_git_object: b4334e9a5541a14f7916244761213b883d507a41 src/mistralai/client/models/sampletype.py: id: a9309422fed7 - last_write_checksum: sha1:1eb21a68c138e9a0d39b4dd14bcffc9e3ff0784f - pristine_git_object: e0727b028c790a62da67784965f825436dead4f8 + last_write_checksum: sha1:86a61340a647696f6c35a82d945509b1c85aa6f7 + pristine_git_object: dfec7cce1e22ab607b6a9e947fa940284426086d src/mistralai/client/models/sdkerror.py: id: 12f991dad510 - last_write_checksum: sha1:9ee3f2dfd9977ce77957d60116db7d04740a4eed - pristine_git_object: ceb03c4868f9c9111007d6c16411f5da1954f211 + last_write_checksum: sha1:c2c344c8b7e23b0c93eeafedd25d28582467c3a7 + pristine_git_object: 101e1e6a67c3672e899b39dbfe10d45550a4449a src/mistralai/client/models/security.py: id: c2ca0e2a36b7 - last_write_checksum: sha1:415802794c6a3f22c58e863be0f633727f681600 - pristine_git_object: 1b67229bee0b64f3a9e8fc3600a7b0c9c13c0a2d + last_write_checksum: sha1:cec2a544790c2178f92742ac88e546efeacedb40 + pristine_git_object: 4fa8b4b2651f1d13811faf2da6e481243ea84e5a src/mistralai/client/models/shareenum.py: id: a0e2a7a16bf8 - last_write_checksum: sha1:0beaa4472ed607142b485c9e208441f9050746b9 - pristine_git_object: ca1b96245e81327aa830f07c0588dccdc1ee518e + last_write_checksum: sha1:15a84d57ceeb74cfb37275f714954e42d8e9b3ba + pristine_git_object: 08ffeb7e46fbbc28b7c93ef2aa4a49aff7c0d35e src/mistralai/client/models/sharingdelete.py: id: f5ecce372e06 - last_write_checksum: sha1:c943bfc24aa0f2035a1b5261d29efb5f3518a555 - pristine_git_object: d659342f1330d73354d557a45bc1a16015a38d8b + last_write_checksum: sha1:c5e4e6df47ef2d5715a99533a1efd936f0e7e16e + pristine_git_object: 202732cf785074446cd24360dd9c540768e4134f src/mistralai/client/models/sharingin.py: id: e953dda09c02 - last_write_checksum: sha1:996c17a8db2c61daed285ee5cafd44481fbd1483 - pristine_git_object: 630f4c70552167237735797f6b64d3f1df5ea214 + last_write_checksum: sha1:f60bd60d37f0accadf50ea111055fd99aa190a5f + pristine_git_object: 8cc3e8968d9d5460f040ebdb66d8f460e86d2c96 src/mistralai/client/models/sharingout.py: id: 0b8804effb5c - last_write_checksum: sha1:b3356792affd50e062bb1f1a84d835bbcfeb50ab - pristine_git_object: 195701d111514fe9aebfedce05dbb4bafab67fed + last_write_checksum: sha1:362bda8a5bd70d12e2de33814d3bd36a61c6d7ae + pristine_git_object: 778071546c12c2636d2deec6042e6b686b6428c6 src/mistralai/client/models/source.py: id: fcee60a4ea0d - last_write_checksum: sha1:6f3ea355c62280e1fc6008da69ed0b987f53fd72 - pristine_git_object: 181b327ea73a9bcf9fb90f95633da71cee96e599 + last_write_checksum: sha1:4d4277d75f7ce001780a069898b38afa7c8addc0 + pristine_git_object: fcea403cdbad44299fb2178f07a63bb7e83dc033 src/mistralai/client/models/ssetypes.py: id: 1733e4765106 - last_write_checksum: sha1:8154966cda84ddd5225936ee47c87df1143ee1f1 - pristine_git_object: 796f0327cbb1372c1b2a817a7db39f8f185a59be + last_write_checksum: sha1:3c79fc7c43cd018fba4950ba013ed15899b82ebf + pristine_git_object: 0add960bc93f53df5ddda94892543a0857f32dd6 + src/mistralai/client/models/startfinetuningjobop.py: + id: "663886392468" + last_write_checksum: sha1:6a6a409dd481359e8d6593fa2ea817007f8a967d + pristine_git_object: 805a8721cc7d048f172e1096ead0e410c7d04928 src/mistralai/client/models/systemmessage.py: id: 500ef6e85ba1 - last_write_checksum: sha1:4ca4da49acae5fb508584b1776d368eba7d4a119 - pristine_git_object: 9e01bc57bd17a5ecf6be5fee3383bbb9e03a8ab5 + last_write_checksum: sha1:af68936119bf7c067aec5215e2654c56a5df1755 + pristine_git_object: 352eca76ad5051cc2c504c673a23e048642fe018 src/mistralai/client/models/systemmessagecontentchunks.py: id: 297e8905d5af - last_write_checksum: sha1:4581a28c592708bf51dbc75b28fe9f7bddde3c70 - pristine_git_object: 7a79737964b79e39b760ef833cce24e411f5aa90 + last_write_checksum: sha1:e5695ca0ebdb0f02f3a0c527015df154a0c52b7f + pristine_git_object: d480a219e935aaea91adc320de0003b562c0bbb5 src/mistralai/client/models/textchunk.py: id: 9c96fb86a9ab - last_write_checksum: sha1:8abd7cb3d8149458d95268eea8f18d5096e77fb0 - pristine_git_object: 4207ce7e46141aed94cf0f8726bb2433709101ca + last_write_checksum: sha1:4ad624afaf4d83d4e58f72bcbd15b9faecc703f3 + pristine_git_object: c0584234da572bb903894633b123b1dda29e7736 src/mistralai/client/models/thinkchunk.py: id: 294bfce193a4 - last_write_checksum: sha1:a6cd3efbf01dc0a72818675893594179addcfd12 - pristine_git_object: b1560806b88b733bf3b574c3e0d45e93df892548 + last_write_checksum: sha1:d9c779959ed82ae3de66e481536d80bcc2ed57a5 + pristine_git_object: a999f5d7b824325085ec980cfa07294919408538 src/mistralai/client/models/timestampgranularity.py: id: 68ddf8d702ea - last_write_checksum: sha1:68ea11a4e27f23b2fcc976d0a8eeb95f6f28ba85 - pristine_git_object: 5bda890f500228ba0c3dc234edf09906b88cb522 + last_write_checksum: sha1:64e7b198a75f026590e26758112651d31984076f + pristine_git_object: 8d3773752444db865c0e2629ad9eed66eb7f2bc6 src/mistralai/client/models/tool.py: id: 48b4f6f50fe9 - last_write_checksum: sha1:5f80f78858fb50e0688123f8dd1478eeb0e7c5af - pristine_git_object: 4b29f575a3604d83fd6b492c26327f36e6e5a681 + last_write_checksum: sha1:14e7b21a2857e2ca36830730a47d0eca476fb491 + pristine_git_object: a46d31f166618fd5b92b7e76ccb9190796af7cd2 src/mistralai/client/models/toolcall.py: id: fb34a1a3f3c2 - last_write_checksum: sha1:f4c5de640f5b942f180062388be187a910067a1b - pristine_git_object: 558b49bfaec7c306c093b97a4bbf722fe9f4b6b1 + last_write_checksum: sha1:15ed0a4611e8c310640ec4622af8019d2db93355 + pristine_git_object: 4a05bbd04a44446babda8419dcf4d4c93248fe41 src/mistralai/client/models/toolchoice.py: id: 14f7e4cc35b6 - last_write_checksum: sha1:f833d01b307437a83705b9b669b0d95eab4c01e0 - pristine_git_object: 2c7f6cbf6ebfbbdcce7d82b885b5e07b6b52d066 + last_write_checksum: sha1:358a6e88486b4d372c9041dd15c0206b119bbc32 + pristine_git_object: aa2016fb63290c63f9b8f3e18c552f6598f15c8f src/mistralai/client/models/toolchoiceenum.py: id: c7798801f860 - last_write_checksum: sha1:d958ef93b303539226fdab0fd46c8ea21d24cea2 - pristine_git_object: 01f6f677b379f9e3c99db9d1ad248cb0033a2804 + last_write_checksum: sha1:5388b2a6fad842f8e4ae79e6257b4d14c122a6ff + pristine_git_object: d66c3d07058eb87bcc3eec10de99a616b5f6638a src/mistralai/client/models/toolexecutiondeltaevent.py: id: df8f17cf3e07 - last_write_checksum: sha1:96147badaad7eb961d224b29d9134dba8fc35f49 - pristine_git_object: 0268e6a0d9b3c25afe1022e61a630e926a50f135 + last_write_checksum: sha1:6ad6e219f3d7512c9fd382fb22471bfaa0fc9b09 + pristine_git_object: 384ec2407848f51434ca378ad7de965c584b163b src/mistralai/client/models/toolexecutiondoneevent.py: id: 514fdee7d99f - last_write_checksum: sha1:bc439993c647ba471b7f1581f72e094b99bd5c14 - pristine_git_object: 854baee98a119caf237ca0f39e4ddd7a36577771 + last_write_checksum: sha1:09ef4842c50419eda746f3361454c4df0c3c2466 + pristine_git_object: 56f28899b8b4161fcddfec0ed2610486fe6f8b06 src/mistralai/client/models/toolexecutionentry.py: id: 76db69eebe41 - last_write_checksum: sha1:4fb31b58961ce5f43233d91fb6efb89c624fab44 - pristine_git_object: 839709fb8ea63cc358de9f5e71180bf9e94cf5a5 + last_write_checksum: sha1:ff84f62c5264aa023f412956cf83604ecc4112a9 + pristine_git_object: 158cbf06a2acdd492ddb91ae8eaca4802da9f359 src/mistralai/client/models/toolexecutionstartedevent.py: id: 40fadb8e49a1 - last_write_checksum: sha1:d71ec6e61c1a881be8e02853f1ba450c36ec16e3 - pristine_git_object: 66438cfc33b171868f597ff3f80a82a40d1396f4 + last_write_checksum: sha1:5ba46ca1583e8245736a0ae81372025482a8504b + pristine_git_object: 1591866981ce1439fbce3736f028b15205d95810 src/mistralai/client/models/toolfilechunk.py: id: 26c8aadf416a - last_write_checksum: sha1:753db4dd27eea752066a04774094cba73aeb8ca0 - pristine_git_object: 62b5ffeda19a7fa614ccc5e390450f2452dd119d + last_write_checksum: sha1:1dd468876a2ff5ec8b15b6f4e6b8f812e640a29a + pristine_git_object: 6eebd562b1707b41b81e2fd0e267e4c8698551de src/mistralai/client/models/toolmessage.py: id: 15f1af161031 - last_write_checksum: sha1:58370491597186ddf08c8648f1e24abc9c852c26 - pristine_git_object: eae2d2aef69dc4134f42714d69625e7b6c43e8c9 + last_write_checksum: sha1:809936ebaeb4541f862aed6d26e1d1f5ff0ae58a + pristine_git_object: b3e8ffd9294bf6b0b46b26097abb87a5b96c9302 src/mistralai/client/models/toolreferencechunk.py: id: 822e9f3e70de - last_write_checksum: sha1:bf6b77aff4de13f4f374513e85785a1c6b17b87b - pristine_git_object: 882b1563a44cbc77256b6f44b1f41d602956d0b4 + last_write_checksum: sha1:f02c38c892580a6287156551e7964c601a239220 + pristine_git_object: 3c76c8c2dcc86d225c5218fa13cd43a693230fa8 src/mistralai/client/models/tooltypes.py: id: 86c3b54272fd - last_write_checksum: sha1:94cd31b4a170bde0983bc48e8c1148693c3d67e0 - pristine_git_object: abb26c258280a889d784e662b45ed486fc648817 + last_write_checksum: sha1:e90c15c1e645a5f207af0c7ac728cb0a521c6706 + pristine_git_object: e601c1967c42ef8d0c2eea98bc5c0ca722cde066 src/mistralai/client/models/trainingfile.py: id: 2edf9bce227d - last_write_checksum: sha1:12257eadce20511a4f3e3f3424e3bca112510f5f - pristine_git_object: 1d9763e0fd8e44f9b6e05254c5abb5a81fdf0b17 + last_write_checksum: sha1:668f05a3e3b883c2f54b1e541f1fb501605456b0 + pristine_git_object: 1f710ff81c046261ea497505d7216a1208c65d5b src/mistralai/client/models/transcriptionresponse.py: id: 60896dbc6345 - last_write_checksum: sha1:1f3066c34b7e76acc46ddb1e69869f3c62bfb841 - pristine_git_object: 24c0b92e424e91d40972c0826553a7d344a8f932 + last_write_checksum: sha1:3e5c20911697f5569029932fe0910da94feb2b06 + pristine_git_object: 786863ec331a4bdca18ac056f7447d11010d4320 src/mistralai/client/models/transcriptionsegmentchunk.py: id: d1e6f3bdc74b - last_write_checksum: sha1:23714fcd3791d09a7cc9a1bddd2f2203861d1bce - pristine_git_object: c89d84fcf3842da23e1f710309446b4c592ceeb3 + last_write_checksum: sha1:0107b6ee9160cd2a8309f7c8465502d7d0be90a8 + pristine_git_object: c78bec3068b95782bdc271c2e1ee645b115fed32 src/mistralai/client/models/transcriptionstreamdone.py: id: 066a9158ed09 - last_write_checksum: sha1:09bd7a12a1985d377883be53815f88195dcdce57 - pristine_git_object: add17f562385c3befc2932b16448901154372ca6 + last_write_checksum: sha1:3a6abc6f1a0ad78d73e32f3d40ef4bb425aee5b5 + pristine_git_object: b5740b3bb62b4db3846b7727b15e18502e39d862 src/mistralai/client/models/transcriptionstreamevents.py: id: b50b3d74f16f - last_write_checksum: sha1:651ae56098858fe8103ebd280bbdf2f74550794c - pristine_git_object: caaf943a4662ecccab96183f63c226eaefee2882 + last_write_checksum: sha1:f688a18317bd048ad89881c35cb80e39bb7cba47 + pristine_git_object: 17161a177721e44a40903cf15bf08ad0b56545de src/mistralai/client/models/transcriptionstreameventtypes.py: id: 6f71f6fbf4c5 - last_write_checksum: sha1:d7671637063c19222c20b8334abf92abe3d30517 - pristine_git_object: 4a910f0abca2912746cac60fd5a16bd5464f2457 + last_write_checksum: sha1:1d568460b1521f17dd5e551632ae4d7883a98dd3 + pristine_git_object: c74bbb7483cc3981ee3638c80c15924f3e1c20c4 src/mistralai/client/models/transcriptionstreamlanguage.py: id: e94333e4bc27 - last_write_checksum: sha1:7da587e67d635164bb986a3151a43b9a71b28c4d - pristine_git_object: b47024adfca2d8da6f1f01ce573bcc339cbbc63a + last_write_checksum: sha1:17c7b082ebf5764e21f124fe4c6a6ee5cea4fc51 + pristine_git_object: 67b3e9791efaf134580d82c2a12fab1cd33efbb1 src/mistralai/client/models/transcriptionstreamsegmentdelta.py: id: c0a882ce57e5 - last_write_checksum: sha1:91631a724a84abf4fd603ba7a7630b5e7d970944 - pristine_git_object: 7cfffb63f31d10a247e066c8f422e4f6af2cf489 + last_write_checksum: sha1:12cbfcf02d5cb4979a836e429690786153250bf0 + pristine_git_object: 8db5e73619eab98c3751689a7ec5bef45ef9ef6b src/mistralai/client/models/transcriptionstreamtextdelta.py: id: 6086dc081147 - last_write_checksum: sha1:1c065d9a2874c4b315fe3cd191f94ef3e8f1cc43 - pristine_git_object: ce279cf67ffc4e225ce37490f4ffd0c0d64fe993 + last_write_checksum: sha1:6b371b5d236e6e767f25160ab0e8a49bcaf356f8 + pristine_git_object: 49338a083332467e64f171637ca04365ca6bf25b src/mistralai/client/models/unarchiveftmodelout.py: id: 9dbc3bfb71ed - last_write_checksum: sha1:b2a1f9af7a5a7f5cbcda3256c46d02926e0cf2da - pristine_git_object: 511c390b4192cf85ec86150c7dad84543c68e031 + last_write_checksum: sha1:40a23dc39af81f06b23f21dad45c5c5f1178b2af + pristine_git_object: 0249a69e8552ed00a5e1f505fdc16025c46d2477 + src/mistralai/client/models/unarchivemodelop.py: + id: eb18584fd78c + last_write_checksum: sha1:5b81357950f301a82233b58a3e2a5b232fdbf546 + pristine_git_object: 1d68a06ae41559baffb6d54398b52dec630556c7 + src/mistralai/client/models/updateagentop.py: + id: ae3a6abea468 + last_write_checksum: sha1:3867948bd0ea37b597c4e5ef7a2e6881791a5fa5 + pristine_git_object: 28acc83d8df1373e897f9634dfbb84ee28897717 + src/mistralai/client/models/updateagentversionop.py: + id: 3821dca5b20a + last_write_checksum: sha1:4c41a450278858089c7cb23b8fcf1e4184fa1f1d + pristine_git_object: 114013bcdcfb7d7c9e935285f167a004b65fbd09 + src/mistralai/client/models/updatedocumentop.py: + id: eee9ef317180 + last_write_checksum: sha1:7d9fc6e18e6631cfe9cd1bc2fa5f7d6cd599ec19 + pristine_git_object: 073f22a9a28c18ad645212262bdc66528a1f6281 src/mistralai/client/models/updateftmodelin.py: id: 39e2d678e651 - last_write_checksum: sha1:dd8dda798b804c4927505ac1fcbd13787f32a25d - pristine_git_object: 0471a15458f3cff4939360d3891af0fdee9ec251 + last_write_checksum: sha1:4ea30ed8eaad36e1440614016f075f088c7e5781 + pristine_git_object: 4ac5a8a24026f6a975044de01a9918364aa64e04 + src/mistralai/client/models/updatelibraryop.py: + id: 4ba7acdb62c6 + last_write_checksum: sha1:3816c8eff226634b545843eed2d0c15fa1579308 + pristine_git_object: c5a1ad30e9bfc277cbbcdea0218a265ad10bcb96 + src/mistralai/client/models/updatemodelop.py: + id: ba149ecfe03e + last_write_checksum: sha1:2ce33ac60846a5ef70141dccbdb09950c3d1e348 + pristine_git_object: 023be97905929aa2925f20cd69b3591e6e3168d7 + src/mistralai/client/models/updateorcreatelibraryaccessop.py: + id: ec9b15418f5c + last_write_checksum: sha1:82fe6bbbb1402f03b7c0380c5fd84a8fef9bec67 + pristine_git_object: 1abe6eda3eb7d0aff8a7c146c848a63e299cedf1 + src/mistralai/client/models/uploaddocumentop.py: + id: 0018fe7ff48c + last_write_checksum: sha1:f31d565f419cbcc59af0655753cee6c643ad307a + pristine_git_object: 2c957947830ae0d467084cc6502d9d97ffdf6c81 + src/mistralai/client/models/uploadfileop.py: + id: d67619670938 + last_write_checksum: sha1:00664ba8af70ffc96871eee02890411475ca6c37 + pristine_git_object: 50848f0b663f60f9a303010f3c940919939949c9 src/mistralai/client/models/uploadfileout.py: id: 42466f2bebfb - last_write_checksum: sha1:db43df223f848a25a1526624cd3722ef3014e700 - pristine_git_object: 55e56504db280fdb4772bb061128742866555e82 + last_write_checksum: sha1:44d0e5d419fb82c56c33c0f9af8902b3cc06bf6d + pristine_git_object: be291efb523965c155fc922d896da2cf682378ab src/mistralai/client/models/usageinfo.py: id: 54adb9a3af16 - last_write_checksum: sha1:a5f57f73d176aa8f4a9ad91daefe8e6257398abc - pristine_git_object: f1186d97357320f4bfc9d3f2a626f58d2b1a38d0 + last_write_checksum: sha1:fcfdc921bbcc78436ef156dd7a2eff1123c4036f + pristine_git_object: e78f92e75f86fd593469f7267aad72e417178161 src/mistralai/client/models/usermessage.py: id: cb583483acf4 - last_write_checksum: sha1:1e33aea6971835069dc9c862351d507f48d4ff8d - pristine_git_object: 8d92cea803368e996d68dc2f3a2dadd1d06a4675 + last_write_checksum: sha1:215406ca76602e899620ef763e216d71f8cd9fcd + pristine_git_object: 25ccdf805e9fbc65da7b6d0051f13224cf0e04fa src/mistralai/client/models/validationerror.py: id: 15df3c7368ab - last_write_checksum: sha1:de86af94be29bd8bfd5fa2708eeb3dda3032423d - pristine_git_object: 352409be88a1175073e5438d6da86fc9a54896fc + last_write_checksum: sha1:63df5739d68f984470d4d1b8661a875201cc301d + pristine_git_object: 385714c8cb80a8afbca6d5142a2d378d0d165cf9 src/mistralai/client/models/wandbintegration.py: id: 4823c1e80942 - last_write_checksum: sha1:b33912c4e08c07b0139cc3c31f93e899f797b5f2 - pristine_git_object: 89489fb4527c6515a609bcb533ef59ab516c7a38 + last_write_checksum: sha1:6391a293368ba6fa98114ce510a7665b47d82721 + pristine_git_object: c5db4a6d409f1d84d356a471995119a070db627a src/mistralai/client/models/wandbintegrationout.py: id: 6b103d74195c - last_write_checksum: sha1:f64af59d7fe3d068e185776b01d43b7fdab1f129 - pristine_git_object: a7f9afeb6683a173115371a686af5f95e2d29056 + last_write_checksum: sha1:37caaf5224b216826c48912538959baa0a7d997a + pristine_git_object: d0a09bf48c3a24f5382a626d26897afe2d680f7e src/mistralai/client/models/websearchpremiumtool.py: id: bfe88af887e3 - last_write_checksum: sha1:689087bc6c49bbc8b286e5b0155a6e5f6a1dc47d - pristine_git_object: 8d2d4b5dfea50a34ac744181790bf5db84809b1c + last_write_checksum: sha1:9f9b4bfeea780cec16b9457ee800524c3eba7a4b + pristine_git_object: 9588ab1d7361d3ab1cba2f16e74695273cc03557 src/mistralai/client/models/websearchtool.py: id: 26b0903423e5 - last_write_checksum: sha1:93015f750a125a8297f9455278ebe482794ba958 - pristine_git_object: ba4cc09f84faebb438a631db6ac328fea2ced609 + last_write_checksum: sha1:9afaf3738be10d0a401b34e15db25612ee33465f + pristine_git_object: 27502909ea608f8e0b4a71484da94d26209e0c07 src/mistralai/client/models_.py: id: 1d277958a843 - last_write_checksum: sha1:987921077f5b5535c39a21216585fc1bf9aa8811 - pristine_git_object: 5ef9da096e58023aaa582f31717b4ee7a4b720b0 + last_write_checksum: sha1:f50e7b7194f97de4abf0afd70b5e1c52b805cef6 + pristine_git_object: 05b33ac72da14401b700c4abfb28ca33b5af702b src/mistralai/client/ocr.py: id: 2f804a12fc62 - last_write_checksum: sha1:877f0c2db0319ea6b5ccf3d92f35bf633df10eda - pristine_git_object: ce7e2126dda2bc2b12cefb96e955edd3c7d4b6ab + last_write_checksum: sha1:2cfde7a27733502b87690c1025adefe5b717da57 + pristine_git_object: 2aa382295a9f1561021a36f3a68a9fb505cfe536 src/mistralai/client/py.typed: id: d95cd1565e33 last_write_checksum: sha1:8efc425ffe830805ffcc0f3055871bdcdc542c60 pristine_git_object: 3e38f1a929f7d6b1d6de74604aa87e3d8f010544 src/mistralai/client/sdk.py: id: 48edbcb38d7e - last_write_checksum: sha1:831d2d1fee16c8d970c946f80ec56ba965e4f0ca - pristine_git_object: 9957940005a1150762e9fc284993cefeb2e8831a + last_write_checksum: sha1:be11dc3f70c773dd5c6baba6b3fafd996c5baec2 + pristine_git_object: b1ab54935a3421008c78f4864bd6097c0a098040 src/mistralai/client/sdkconfiguration.py: id: b7dd68a0235e - last_write_checksum: sha1:a24763668db44bf36ca35d1efa4873e2495dd716 - pristine_git_object: df50d16fa502e8b4c2a4567f3541fd48bfc1e324 + last_write_checksum: sha1:c6944f12c6fdc992d43db943b24c8c90854cde5e + pristine_git_object: 712e92e05c7fd3016431ec62ecb7b7789c8b7071 src/mistralai/client/transcriptions.py: id: 75b45780c978 - last_write_checksum: sha1:5c305412b646fa70232fd141e93378b3b4d4b3c4 - pristine_git_object: 455010243710d56d033861b1440cc1e30924d40c + last_write_checksum: sha1:b47a3765f2191715fc19bdbc4e56414abbe59f4b + pristine_git_object: f7ef5b0a0769467bd4bea61f7b0dca3b68c3788d src/mistralai/client/types/__init__.py: id: 000b943f821c - last_write_checksum: sha1:140ebdd01a46f92ffc710c52c958c4eba3cf68ed - pristine_git_object: fc76fe0c5505e29859b5d2bb707d48fd27661b8c + last_write_checksum: sha1:12a4ace69cbc63f1125eeddf901afed7cdf378b0 + pristine_git_object: cf83864312d8fed0bb9dd3ce39d373b040c36b2e src/mistralai/client/types/basemodel.py: id: 7ec465a1d3ff - last_write_checksum: sha1:10d84aedeb9d35edfdadf2c3020caa1d24d8b584 - pristine_git_object: a9a640a1a7048736383f96c67c6290c86bf536ee + last_write_checksum: sha1:b62a9d42d79a238399e04efbf5c02215c707efde + pristine_git_object: 4e889aa0ffbb4402e416a40fa6259334cb0a3c5c src/mistralai/client/utils/__init__.py: id: b69505f4b269 - last_write_checksum: sha1:c7c1ee47be7ac3774b042c8aee439143493ed3ce - pristine_git_object: f9c2edce8ecf2d2a4ab0ad36129ac70afd3d1f2f + last_write_checksum: sha1:adb457b85659a04945857a74407306dafbdce7cb + pristine_git_object: 7ed3a42095b5921adf0e154ae6eba560a1098233 src/mistralai/client/utils/annotations.py: id: 1ffdedfc66a2 - last_write_checksum: sha1:a4824ad65f730303e4e1e3ec1febf87b4eb46dbc - pristine_git_object: 12e0aa4f1151bb52474cc02e88397329b90703f6 + last_write_checksum: sha1:f86ba37de752e63076f25d53f9c54fce98d2a0bd + pristine_git_object: 4b60ab8e730e7093a064b6869c4a712b96e4aad8 src/mistralai/client/utils/datetimes.py: id: c40066d868c9 - last_write_checksum: sha1:c721e4123000e7dc61ec52b28a739439d9e17341 - pristine_git_object: a6c52cd61bbe2d459046c940ce5e8c469f2f0664 + last_write_checksum: sha1:412ca432d6f5a75b692a967bc6fc52e4f4eff7d5 + pristine_git_object: a2c94fac73ecbfb8acd8ed4f75692318e4f863ec src/mistralai/client/utils/enums.py: id: a0735873b5ac - last_write_checksum: sha1:bc8c3c1285ae09ba8a094ee5c3d9c7f41fa1284d - pristine_git_object: 3324e1bc2668c54c4d5f5a1a845675319757a828 + last_write_checksum: sha1:fe05b6a21360b0eff1fc246e9a3ee01758521262 + pristine_git_object: d897495f053459106144501c67f2215251d52a27 src/mistralai/client/utils/eventstreaming.py: id: 3263d7502030 - last_write_checksum: sha1:bababae5d54b7efc360db701daa49e18a92c2f3b - pristine_git_object: 0969899bfc491e5e408d05643525f347ea95e4fc + last_write_checksum: sha1:0e15051d74262fbe051e1ba83fd1f2c0c0a016a0 + pristine_git_object: 3fe3c7e13509d6fab08fbb8504c6c5f674c2b679 src/mistralai/client/utils/forms.py: id: 58842e905fce - last_write_checksum: sha1:15fa7e9ab1611e062a9984cf06cb20969713d295 - pristine_git_object: f961e76beaf0a8b1fe0dda44754a74eebd3608e7 + last_write_checksum: sha1:c7929d974f46629b56e740456ddf03230b4048ab + pristine_git_object: 2b474b9a719e95c4bcae8572e5569e64f8d0b77f src/mistralai/client/utils/headers.py: id: 9066de2ead8b - last_write_checksum: sha1:7c6df233ee006332b566a8afa9ce9a245941d935 - pristine_git_object: 37864cbbbc40d1a47112bbfdd3ba79568fc8818a + last_write_checksum: sha1:bcd2f47b96bfaa54b3590c557a9267142d446be6 + pristine_git_object: 6491187230b5f11c7ff13396891ac69099a73a79 src/mistralai/client/utils/logger.py: id: 745023607a1f - last_write_checksum: sha1:3212454c3047548e8f9099366dc0e7c37e5918ac - pristine_git_object: 2ef27ee5bb8cd37d9aa66b076c449fd9c80e2627 + last_write_checksum: sha1:2582e0cb889b6293c12ce9671aba6281d46bad44 + pristine_git_object: 3edad8307ea0ef38e857596a3ec11023a4af287f src/mistralai/client/utils/metadata.py: id: d49d535ae52c - last_write_checksum: sha1:c6a560bd0c63ab158582f34dadb69433ea73b3d4 - pristine_git_object: 173b3e5ce658675c2f504222a56b3daaaa68107d + last_write_checksum: sha1:54d300a665d3d5eafcc778a795d79347479b8337 + pristine_git_object: d46ffa59952926b7b1a842b0db2475527eda87df src/mistralai/client/utils/queryparams.py: id: bb77d4664844 - last_write_checksum: sha1:b94c3f314fd3da0d1d215afc2731f48748e2aa59 - pristine_git_object: c04e0db82b68eca041f2cb2614d748fbac80fd41 + last_write_checksum: sha1:d02ce5b2dcc26edb7c937d75b98b70c22a5af189 + pristine_git_object: 0b78c548233f32afa2aafe0040ebb120b51532e8 src/mistralai/client/utils/requestbodies.py: id: 946cfcd26ee4 - last_write_checksum: sha1:41e2d2d2d3ecc394c8122ca4d4b85e1c3e03f054 - pristine_git_object: 1de32b6d26f46590232f398fdba6ce0072f1659c + last_write_checksum: sha1:8cac30839193ee0bb02975b0e225eab97adf4fd1 + pristine_git_object: 3aae69c7cf618776daec8bd46f9116b06c25e837 src/mistralai/client/utils/retries.py: id: 5f1a5b90423c - last_write_checksum: sha1:5b97ac4f59357d70c2529975d50364c88bcad607 - pristine_git_object: 88a91b10cd2076b4a2c6cff2ac6bfaa5e3c5ad13 + last_write_checksum: sha1:94a86f31092553d4640a54c446cfe9028b4fb6ef + pristine_git_object: 90c008b0e20c1a539d65ffb387fb61a724c3c111 src/mistralai/client/utils/security.py: id: 1acb7c006265 - last_write_checksum: sha1:d86d2fd73cbb4a77f72395c10fff1c700efcf42e - pristine_git_object: 3b8526bfdf5c9c871ed184a2ec785f7bc1ebe57e + last_write_checksum: sha1:e8543609e699dab330a4768786883c6ca38f07a6 + pristine_git_object: 4c73806d9c8e54a2a4cfe8f62d8c281177789f6f src/mistralai/client/utils/serializers.py: id: 53c57c7f29a8 - last_write_checksum: sha1:ce1d8d7f500a9ccba0aeca5057cee9c271f4dfd7 - pristine_git_object: 14321eb479de81d0d9580ec8291e0ff91bf29e57 + last_write_checksum: sha1:8a3a15cf273034261111f2559cacbb579e17cb1b + pristine_git_object: fbc2772dc4284775be92de6a086c1eade9376417 src/mistralai/client/utils/unmarshal_json_response.py: id: b13585fc5626 - last_write_checksum: sha1:4df16054b0c28b043d248dd8f56992574156bcd0 - pristine_git_object: 6d43d6e44056d64e272f60a466c47391a60c792d + last_write_checksum: sha1:c0c44d0a656477daa225724e88a7cf5c954a1df6 + pristine_git_object: 65190e5c1d70a31f51656e1644bb701b9f132bcd src/mistralai/client/utils/url.py: id: 3c6496c17510 - last_write_checksum: sha1:6479961baa90432ca25626f8e40a7bbc32e73b41 - pristine_git_object: c78ccbae426ce6d385709d97ce0b1c2813ea2418 + last_write_checksum: sha1:c64be472d29cf229f2b91102808dcb741371c227 + pristine_git_object: 27a6a3a05287ff8a4e24e379ae5d20280c2caf30 src/mistralai/client/utils/values.py: id: bb6ade7a7f82 - last_write_checksum: sha1:acaa178a7c41ddd000f58cc691e4632d925b2553 - pristine_git_object: dae01a44384ac3bc13ae07453a053bf6c898ebe3 + last_write_checksum: sha1:da9ce43ad241db386efd9b2f53d81eb051dd7544 + pristine_git_object: 2469a9f310a37a7170b54853715274f13d38901c examples: list_models_v1_models_get: speakeasy-default-list-models-v1-models-get: @@ -3650,6 +3716,618 @@ examples: application/json: [{"alias": "", "version": 318290, "created_at": "2025-10-02T20:25:32.322Z", "updated_at": "2026-11-19T02:58:37.894Z"}] "422": application/json: {} + ListModels: + userExample: + responses: + "200": + application/json: {"object": "list"} + RetrieveModel: + speakeasy-default-retrieve-model: + parameters: + path: + model_id: "ft:open-mistral-7b:587a6b29:20240514:7e773925" + responses: + "200": + application/json: {"id": "", "object": "model", "owned_by": "mistralai", "capabilities": {"completion_chat": false, "function_calling": false, "completion_fim": false, "fine_tuning": false, "vision": false, "ocr": false, "classification": false, "moderation": false, "audio": false, "audio_transcription": false}, "max_context_length": 32768, "type": "base"} + "422": + application/json: {} + userExample: + parameters: + path: + model_id: "ft:open-mistral-7b:587a6b29:20240514:7e773925" + responses: + "200": + application/json: {"id": "", "object": "model", "owned_by": "mistralai", "capabilities": {"completion_chat": false, "function_calling": false, "completion_fim": false, "fine_tuning": false, "vision": false, "ocr": false, "classification": false, "moderation": false, "audio": false, "audio_transcription": false}, "max_context_length": 32768, "type": "fine-tuned", "job": "Principal Implementation Assistant", "root": "", "archived": false} + DeleteModel: + speakeasy-default-delete-model: + parameters: + path: + model_id: "ft:open-mistral-7b:587a6b29:20240514:7e773925" + responses: + "200": + application/json: {"id": "ft:open-mistral-7b:587a6b29:20240514:7e773925", "object": "model", "deleted": true} + "422": + application/json: {} + userExample: + parameters: + path: + model_id: "ft:open-mistral-7b:587a6b29:20240514:7e773925" + responses: + "200": + application/json: {"id": "ft:open-mistral-7b:587a6b29:20240514:7e773925", "object": "model", "deleted": true} + UpdateModel: + speakeasy-default-update-model: + parameters: + path: + model_id: "ft:open-mistral-7b:587a6b29:20240514:7e773925" + requestBody: + application/json: {} + responses: + "200": + application/json: {"id": "", "object": "model", "created": 76471, "owned_by": "", "workspace_id": "", "root": "", "root_version": "", "archived": true, "capabilities": {"completion_chat": true, "completion_fim": false, "function_calling": false, "fine_tuning": false, "classification": false}, "max_context_length": 32768, "job": "9765ed11-3bc9-49ff-a19d-06665406d404", "model_type": "completion"} + ArchiveModel: + speakeasy-default-archive-model: + parameters: + path: + model_id: "ft:open-mistral-7b:587a6b29:20240514:7e773925" + responses: + "200": + application/json: {"id": "", "object": "model", "archived": true} + UnarchiveModel: + speakeasy-default-unarchive-model: + parameters: + path: + model_id: "ft:open-mistral-7b:587a6b29:20240514:7e773925" + responses: + "200": + application/json: {"id": "", "object": "model", "archived": false} + StartConversation: + speakeasy-default-start-conversation: + requestBody: + application/json: {"inputs": "", "stream": false, "completion_args": {"response_format": {"type": "text"}}} + responses: + "200": + application/json: {"object": "conversation.response", "conversation_id": "", "outputs": [{"object": "entry", "type": "function.call", "tool_call_id": "", "name": "", "arguments": ""}], "usage": {"prompt_tokens": 0, "completion_tokens": 0, "total_tokens": 0}} + "422": + application/json: {} + ListConversations: + speakeasy-default-list-conversations: + parameters: + query: + page: 0 + page_size: 100 + responses: + "200": + application/json: [{"completion_args": {"response_format": {"type": "text"}}, "object": "conversation", "id": "", "created_at": "2026-05-02T18:35:22.595Z", "updated_at": "2024-04-15T10:58:56.705Z", "model": "Silverado"}] + "422": + application/json: {} + GetConversation: + speakeasy-default-get-conversation: + parameters: + path: + conversation_id: "" + responses: + "200": + application/json: {"object": "conversation", "id": "", "created_at": "2026-10-30T16:36:24.274Z", "updated_at": "2026-03-08T22:30:16.213Z", "agent_id": ""} + "422": + application/json: {} + DeleteConversation: + speakeasy-default-delete-conversation: + parameters: + path: + conversation_id: "" + responses: + "422": + application/json: {} + AppendConversation: + speakeasy-default-append-conversation: + parameters: + path: + conversation_id: "" + requestBody: + application/json: {"inputs": [], "stream": false, "store": true, "handoff_execution": "server", "completion_args": {"response_format": {"type": "text"}}} + responses: + "200": + application/json: {"object": "conversation.response", "conversation_id": "", "outputs": [{"object": "entry", "type": "function.call", "tool_call_id": "", "name": "", "arguments": {"key": "", "key1": ""}}], "usage": {"prompt_tokens": 0, "completion_tokens": 0, "total_tokens": 0}} + "422": + application/json: {} + GetConversationHistory: + speakeasy-default-get-conversation-history: + parameters: + path: + conversation_id: "" + responses: + "200": + application/json: {"object": "conversation.history", "conversation_id": "", "entries": []} + "422": + application/json: {} + GetConversationMessages: + speakeasy-default-get-conversation-messages: + parameters: + path: + conversation_id: "" + responses: + "200": + application/json: {"object": "conversation.messages", "conversation_id": "", "messages": []} + "422": + application/json: {} + RestartConversation: + speakeasy-default-restart-conversation: + parameters: + path: + conversation_id: "" + requestBody: + application/json: {"inputs": "", "stream": false, "store": true, "handoff_execution": "server", "completion_args": {"response_format": {"type": "text"}}, "from_entry_id": ""} + responses: + "200": + application/json: {"object": "conversation.response", "conversation_id": "", "outputs": [{"object": "entry", "type": "function.call", "tool_call_id": "", "name": "", "arguments": ""}], "usage": {"prompt_tokens": 0, "completion_tokens": 0, "total_tokens": 0}} + "422": + application/json: {} + StartConversationStream: + speakeasy-default-start-conversation-stream: + requestBody: + application/json: {"inputs": "", "stream": true, "completion_args": {"response_format": {"type": "text"}}} + responses: + "422": + application/json: {} + AppendConversationStream: + speakeasy-default-append-conversation-stream: + parameters: + path: + conversation_id: "" + requestBody: + application/json: {"inputs": "", "stream": true, "store": true, "handoff_execution": "server", "completion_args": {"response_format": {"type": "text"}}} + responses: + "422": + application/json: {} + RestartConversationStream: + speakeasy-default-restart-conversation-stream: + parameters: + path: + conversation_id: "" + requestBody: + application/json: {"inputs": "", "stream": true, "store": true, "handoff_execution": "server", "completion_args": {"response_format": {"type": "text"}}, "from_entry_id": ""} + responses: + "422": + application/json: {} + CreateAgent: + speakeasy-default-create-agent: + requestBody: + application/json: {"completion_args": {"response_format": {"type": "text"}}, "model": "Mustang", "name": ""} + responses: + "200": + application/json: {"completion_args": {"response_format": {"type": "text"}}, "model": "Taurus", "name": "", "object": "agent", "id": "", "version": 388058, "versions": [980917, 959600], "created_at": "2024-07-23T17:25:11.997Z", "updated_at": "2025-07-14T09:13:03.268Z", "deployment_chat": false, "source": ""} + "422": + application/json: {} + ListAgents: + speakeasy-default-list-agents: + parameters: + query: + page: 0 + page_size: 20 + responses: + "200": + application/json: [{"completion_args": {"response_format": {"type": "text"}}, "model": "Challenger", "name": "", "object": "agent", "id": "", "version": 679172, "versions": [491437], "created_at": "2026-05-11T12:36:32.958Z", "updated_at": "2026-08-23T04:04:31.448Z", "deployment_chat": false, "source": ""}] + "422": + application/json: {} + GetAgent: + speakeasy-default-get-agent: + parameters: + path: + agent_id: "" + responses: + "200": + application/json: {"completion_args": {"response_format": {"type": "text"}}, "model": "F-150", "name": "", "object": "agent", "id": "", "version": 928666, "versions": [246402], "created_at": "2024-02-28T12:05:26.160Z", "updated_at": "2024-05-16T04:31:56.940Z", "deployment_chat": false, "source": ""} + "422": + application/json: {} + UpdateAgent: + speakeasy-default-update-agent: + parameters: + path: + agent_id: "" + requestBody: + application/json: {"completion_args": {"response_format": {"type": "text"}}} + responses: + "200": + application/json: {"completion_args": {"response_format": {"type": "text"}}, "model": "1", "name": "", "object": "agent", "id": "", "version": 388537, "versions": [955918, 365950, 823288], "created_at": "2026-11-04T08:06:14.896Z", "updated_at": "2025-05-23T04:44:27.181Z", "deployment_chat": true, "source": ""} + "422": + application/json: {} + DeleteAgent: + speakeasy-default-delete-agent: + parameters: + path: + agent_id: "" + responses: + "422": + application/json: {} + UpdateAgentVersion: + speakeasy-default-update-agent-version: + parameters: + path: + agent_id: "" + query: + version: 958693 + responses: + "200": + application/json: {"completion_args": {"response_format": {"type": "text"}}, "model": "XTS", "name": "", "object": "agent", "id": "", "version": 203502, "versions": [449666], "created_at": "2024-09-21T15:29:30.503Z", "updated_at": "2026-10-29T17:49:52.509Z", "deployment_chat": true, "source": ""} + "422": + application/json: {} + ListAgentVersions: + speakeasy-default-list-agent-versions: + parameters: + path: + agent_id: "" + query: + page: 0 + page_size: 20 + responses: + "200": + application/json: [{"completion_args": {"response_format": {"type": "text"}}, "model": "Volt", "name": "", "object": "agent", "id": "", "version": 45747, "versions": [584697, 811109], "created_at": "2024-12-19T10:33:53.873Z", "updated_at": "2025-10-05T12:31:56.977Z", "deployment_chat": false, "source": ""}] + "422": + application/json: {} + GetAgentVersion: + speakeasy-default-get-agent-version: + parameters: + path: + agent_id: "" + version: "" + responses: + "200": + application/json: {"completion_args": {"response_format": {"type": "text"}}, "model": "Camaro", "name": "", "object": "agent", "id": "", "version": 663020, "versions": [210212], "created_at": "2026-11-16T03:32:55.781Z", "updated_at": "2026-09-28T23:51:49.611Z", "deployment_chat": true, "source": ""} + "422": + application/json: {} + CreateOrUpdateAgentAlias: + speakeasy-default-create-or-update-agent-alias: + parameters: + path: + agent_id: "" + query: + alias: "" + version: 154719 + responses: + "200": + application/json: {"alias": "", "version": 991981, "created_at": "2025-09-02T11:06:53.872Z", "updated_at": "2024-12-17T11:05:04.936Z"} + "422": + application/json: {} + ListAgentAliases: + speakeasy-default-list-agent-aliases: + parameters: + path: + agent_id: "" + responses: + "200": + application/json: [{"alias": "", "version": 345116, "created_at": "2025-03-19T21:46:52.564Z", "updated_at": "2026-07-18T22:23:53.218Z"}] + "422": + application/json: {} + DeleteAgentAlias: + speakeasy-default-delete-agent-alias: + parameters: + path: + agent_id: "" + query: + alias: "" + responses: + "422": + application/json: {} + ListLibraries: + speakeasy-default-list-libraries: + responses: + "200": + application/json: {"data": []} + CreateLibrary: + speakeasy-default-create-library: + requestBody: + application/json: {"name": ""} + responses: + "201": + application/json: {"id": "7a160c5d-b74e-44df-8beb-aca6894fa845", "name": "", "created_at": "2026-05-19T08:13:56.459Z", "updated_at": "2026-06-02T23:02:36.047Z", "owner_id": null, "owner_type": "", "total_size": 236146, "nb_documents": 584286, "chunk_size": 369781} + "422": + application/json: {} + GetLibrary: + speakeasy-default-get-library: + parameters: + path: + library_id: "44e385d6-783e-4b21-8fae-5181e6817bc4" + responses: + "200": + application/json: {"id": "785b8f2b-04c4-4e51-aeee-10ba7210996d", "name": "", "created_at": "2026-06-02T21:55:32.209Z", "updated_at": "2026-06-28T12:11:02.341Z", "owner_id": "489e38bd-4195-4ab1-a06d-f1253bcc0e7a", "owner_type": "", "total_size": 733226, "nb_documents": 896348, "chunk_size": 594373} + "422": + application/json: {} + DeleteLibrary: + speakeasy-default-delete-library: + parameters: + path: + library_id: "441ba08a-3d1f-4700-8d6f-f32eeed49dff" + responses: + "200": + application/json: {"id": "a03c22a9-d4f2-4735-806c-b8497fe2a882", "name": "", "created_at": "2024-03-20T22:16:14.073Z", "updated_at": "2025-08-10T22:18:39.851Z", "owner_id": null, "owner_type": "", "total_size": 735078, "nb_documents": 443485, "chunk_size": 738927} + "422": + application/json: {} + UpdateLibrary: + speakeasy-default-update-library: + parameters: + path: + library_id: "27049553-3425-49ce-b965-fcb3a7ab03a3" + requestBody: + application/json: {} + responses: + "200": + application/json: {"id": "0c44cb97-9c48-4e8b-9837-239b80130faf", "name": "", "created_at": "2025-02-22T01:07:38.404Z", "updated_at": "2024-01-02T09:35:39.994Z", "owner_id": "9ea3bb36-40f8-41f9-ba61-d6f71a725ff2", "owner_type": "", "total_size": 234996, "nb_documents": 664396, "chunk_size": 337104} + "422": + application/json: {} + ListDocuments: + speakeasy-default-list-documents: + parameters: + path: + library_id: "05e1bda5-99b1-4baf-bb03-905d8e094f74" + query: + page_size: 100 + page: 0 + sort_by: "created_at" + sort_order: "desc" + responses: + "200": + application/json: {"pagination": {"total_items": 985775, "total_pages": 196446, "current_page": 86746, "page_size": 671573, "has_more": false}, "data": [{"id": "9b168ce6-0e63-4d0a-b784-71cab0b43775", "library_id": "01d6c3ae-df9c-448d-8e84-873b6588d655", "hash": "", "mime_type": "", "extension": "shtml", "size": null, "name": "", "created_at": "2024-06-29T16:51:59.433Z", "processing_status": "", "uploaded_by_id": "ce40c587-9bb9-48d4-8bd3-5ce14f8f07c8", "uploaded_by_type": "", "tokens_processing_total": 288046}]} + "422": + application/json: {} + UploadDocument: + speakeasy-default-upload-document: + parameters: + path: + library_id: "f973c54e-979a-4464-9d36-8cc31beb21fe" + requestBody: + multipart/form-data: {"file": "x-file: example.file"} + responses: + "200": + application/json: {"id": "a13f4191-9721-413d-ac5c-b8edadbfb34e", "library_id": "a6ea3cdd-242f-4132-baf8-9a2589d78cb2", "hash": "", "mime_type": "", "extension": "mp4v", "size": 731796, "name": "", "created_at": "2024-04-30T08:38:55.667Z", "processing_status": "", "uploaded_by_id": "fd1426b3-90f8-4b54-97de-c4f108cb2a63", "uploaded_by_type": "", "tokens_processing_total": 603440} + "422": + application/json: {} + GetDocument: + speakeasy-default-get-document: + parameters: + path: + library_id: "f9902d0a-1ea4-4953-be48-52df6edd302a" + document_id: "c3e12fd9-e840-46f2-8d4a-79985ed36d24" + responses: + "200": + application/json: {"id": "52c93ba5-b31c-4717-a099-f3415e6d4eea", "library_id": "912f1e36-456c-4551-bd6d-535931a66817", "hash": "", "mime_type": "", "extension": "wav", "size": null, "name": "", "created_at": "2026-09-30T22:43:59.455Z", "processing_status": "", "uploaded_by_id": "8578215b-d0b4-4ee2-857d-dcb0686d45f1", "uploaded_by_type": "", "tokens_processing_total": 833979} + "422": + application/json: {} + UpdateDocument: + speakeasy-default-update-document: + parameters: + path: + library_id: "3b900c67-d2b6-4637-93f2-3eff2c85f8dd" + document_id: "66f935fd-37ec-441f-bca5-b1129befcbca" + requestBody: + application/json: {} + responses: + "200": + application/json: {"id": "6a5ec2ab-bd54-4cc8-a761-e51374243293", "library_id": "f8b3b9a7-bb4b-4b47-b3b2-bb6db5e92901", "hash": "", "mime_type": "", "extension": "gif", "size": null, "name": "", "created_at": "2025-11-09T02:41:11.680Z", "processing_status": "", "uploaded_by_id": "0f707dfd-bd39-42ad-9748-c0b305a13eb6", "uploaded_by_type": "", "tokens_processing_total": 170388} + "422": + application/json: {} + DeleteDocument: + speakeasy-default-delete-document: + parameters: + path: + library_id: "c728d742-7845-462b-84ad-2aacbaf1c7cf" + document_id: "ed3f5797-846a-4abe-8e30-39b2fd2323e0" + responses: + "422": + application/json: {} + GetDocumentTextContent: + speakeasy-default-get-document-text-content: + parameters: + path: + library_id: "12689dc1-50df-4a0d-8202-2757f7a8c141" + document_id: "9d4057e9-d112-437c-911e-6ee031389739" + responses: + "200": + application/json: {"text": ""} + "422": + application/json: {} + GetDocumentStatus: + speakeasy-default-get-document-status: + parameters: + path: + library_id: "41bb33c4-7e53-453d-bf21-398bb2862772" + document_id: "416b95cf-19c8-45af-84be-26aaa3ab3666" + responses: + "200": + application/json: {"document_id": "b5b43c40-8e91-41d9-933c-096ee588639a", "processing_status": ""} + "422": + application/json: {} + GetDocumentSignedUrl: + speakeasy-default-get-document-signed-url: + parameters: + path: + library_id: "2dbbe172-1374-41be-b03d-a088c733612e" + document_id: "b5d88764-47f1-4485-9df1-658775428344" + responses: + "200": + application/json: "" + "422": + application/json: {} + GetDocumentExtractedTextSignedUrl: + speakeasy-default-get-document-extracted-text-signed-url: + parameters: + path: + library_id: "46d040ce-ae2e-4891-a54c-cdab6a8f62d8" + document_id: "3eddbfe2-3fd7-47f5-984b-b378e6950e37" + responses: + "200": + application/json: "" + "422": + application/json: {} + ReprocessDocument: + speakeasy-default-reprocess-document: + parameters: + path: + library_id: "76d357e4-d891-40c6-9d1e-6d6ce5056ee0" + document_id: "09798d2b-8f46-46c6-9765-8054a82a4bb2" + responses: + "422": + application/json: {} + ListLibraryAccesses: + speakeasy-default-list-library-accesses: + parameters: + path: + library_id: "9eb628ef-f118-47eb-b3cc-9750c4ca5fb6" + responses: + "200": + application/json: {"data": [{"library_id": "98821ea0-f6e2-444d-b922-e649cd549a2a", "org_id": "a33230f8-b93d-4f45-80ce-b45e8dd8b5fe", "role": "", "share_with_type": "", "share_with_uuid": "0e1f6eb2-b59e-4e38-b916-382b3383c228"}]} + "422": + application/json: {} + UpdateOrCreateLibraryAccess: + speakeasy-default-update-or-create-library-access: + parameters: + path: + library_id: "88bb030c-1cb5-4231-ba13-742c56554876" + requestBody: + application/json: {"level": "Viewer", "share_with_uuid": "6a736283-c1fa-49b0-9b6d-ea9309c0a766", "share_with_type": "Workspace"} + responses: + "200": + application/json: {"library_id": "b783a30a-ca47-4c15-8095-dee3502846e5", "org_id": "6721ec8e-e0c0-4e8e-be83-3c01f2f884a5", "role": "", "share_with_type": "", "share_with_uuid": null} + "422": + application/json: {} + DeleteLibraryAccess: + speakeasy-default-delete-library-access: + parameters: + path: + library_id: "fc7ab1cf-e33c-4791-a6e0-95ff1f921c43" + requestBody: + application/json: {"share_with_uuid": "5818ddff-3568-40f1-a9e4-39d6cb9f5c94", "share_with_type": "Org"} + responses: + "200": + application/json: {"library_id": "6eeb1c0b-8c49-4745-8e3a-eef5bace0782", "org_id": "36550d6e-a514-4601-bd5b-7a0978aab0c7", "role": "", "share_with_type": "", "share_with_uuid": "023a9d84-8615-44a6-acd3-59b113a45c43"} + "422": + application/json: {} + UploadFile: + userExample: + requestBody: + multipart/form-data: {"file": "x-file: example.file"} + responses: + "200": + application/json: {"id": "e85980c9-409e-4a46-9304-36588f6292b0", "object": "file", "bytes": 13000, "created_at": 1759500189, "filename": "example.file.jsonl", "purpose": "fine-tune", "sample_type": "instruct", "num_lines": 2, "mimetype": "application/jsonl", "source": "upload", "signature": "d4821d2de1917341"} + ListFiles: + userExample: + parameters: + query: + page: 0 + page_size: 100 + include_total: true + responses: + "200": + application/json: {"data": [{"id": "", "object": "file", "bytes": 13000, "created_at": 1759491994, "filename": "", "purpose": "batch", "sample_type": "batch_result", "num_lines": 2, "mimetype": "application/jsonl", "source": "mistral", "signature": null}, {"id": "", "object": "file", "bytes": 13000, "created_at": 1759491994, "filename": "", "purpose": "batch", "sample_type": "batch_result", "num_lines": 2, "mimetype": "application/jsonl", "source": "mistral", "signature": null}], "object": "list", "total": 2} + RetrieveFile: + userExample: + parameters: + path: + file_id: "654a62d9-b7ee-49ac-835e-af4153e3c9ec" + responses: + "200": + application/json: {"id": "e85980c9-409e-4a46-9304-36588f6292b0", "object": "file", "bytes": 13000, "created_at": 1759500189, "filename": "example.file.jsonl", "purpose": "fine-tune", "sample_type": "instruct", "num_lines": 2, "mimetype": "application/jsonl", "source": "upload", "signature": "d4821d2de1917341", "deleted": false} + DeleteFile: + userExample: + parameters: + path: + file_id: "789c27a4-69de-47c6-b67f-cf6e56ce9f41" + responses: + "200": + application/json: {"id": "e85980c9-409e-4a46-9304-36588f6292b0", "object": "file", "deleted": true} + DownloadFile: + speakeasy-default-download-file: + parameters: + path: + file_id: "e2ba278e-eac9-4050-ae8e-ec433e124efb" + responses: + "200": + application/octet-stream: "x-file: example.file" + GetFileSignedUrl: + userExample: + parameters: + path: + file_id: "7a0c108d-9e6b-4c47-990d-a20cba50b283" + query: + expiry: 24 + responses: + "200": + application/json: {"url": "https://mistralaifilesapiprodswe.blob.core.windows.net/fine-tune/.../.../e85980c9409e4a46930436588f6292b0.jsonl?se=2025-10-04T14%3A16%3A17Z&sp=r&sv=2025-01-05&sr=b&sig=..."} + ListFineTuningJobs: + speakeasy-default-list-fine-tuning-jobs: + parameters: + query: + page: 0 + page_size: 100 + created_by_me: false + responses: + "200": + application/json: {"object": "list", "total": 677316} + CreateFineTuningJob: + speakeasy-default-create-fine-tuning-job: + requestBody: + application/json: {"model": "Countach", "invalid_sample_skip_percentage": 0, "hyperparameters": {"learning_rate": 0.0001}} + responses: + "200": + application/json: {"id": "18371b47-e157-4d80-8d09-2687df8868e8", "auto_start": false, "model": "Fiesta", "status": "FAILED", "created_at": 475667, "modified_at": 452225, "training_files": [], "object": "job", "job_type": "completion", "hyperparameters": {"learning_rate": 0.0001}} + GetFineTuningJob: + speakeasy-default-get-fine-tuning-job: + parameters: + path: + job_id: "2855f873-414e-4cf5-a46e-e589e39ee809" + responses: + "200": + application/json: {"id": "b9f4ad32-1400-4751-8e0d-16c09b4b26e6", "auto_start": true, "model": "LeBaron", "status": "QUEUED", "created_at": 458966, "modified_at": 377090, "training_files": ["52d812c3-b5fe-4866-878e-39a5910f91df"], "object": "job", "job_type": "classifier", "hyperparameters": {"learning_rate": 0.0001}, "classifier_targets": [{"name": "", "labels": [], "weight": 6490.15, "loss_function": "single_class"}]} + CancelFineTuningJob: + speakeasy-default-cancel-fine-tuning-job: + parameters: + path: + job_id: "ee7d6f03-fcbb-43ca-8f17-0388c0832eb9" + responses: + "200": + application/json: {"id": "24b50383-3de5-4711-a14f-b71bbeccc6c5", "auto_start": true, "model": "Countach", "status": "CANCELLED", "created_at": 148194, "modified_at": 80833, "training_files": ["13ba2c85-5db5-4c14-94e4-2fcf030cecae", "85892e4f-5c84-4f38-bfb8-01072484489c", "723f89f0-65c0-43fa-9a9f-296acfe91134"], "object": "job", "job_type": "classifier", "hyperparameters": {"learning_rate": 0.0001}, "classifier_targets": [{"name": "", "labels": ["", ""], "weight": 1717.09, "loss_function": "single_class"}]} + StartFineTuningJob: + speakeasy-default-start-fine-tuning-job: + parameters: + path: + job_id: "da371429-0ec2-4cea-b9c7-73ce3a1dd76f" + responses: + "200": + application/json: {"id": "2628c0c5-a98f-4d0b-a22a-fba0b0b23112", "auto_start": false, "model": "Model 3", "status": "QUEUED", "created_at": 139851, "modified_at": 571341, "training_files": ["856f394d-d216-41ab-8fa1-a42fba9e7734"], "object": "job", "job_type": "completion", "hyperparameters": {"learning_rate": 0.0001}} + ListBatchJobs: + speakeasy-default-list-batch-jobs: + parameters: + query: + page: 0 + page_size: 100 + created_by_me: false + order_by: "-created" + responses: + "200": + application/json: {"object": "list", "total": 853018} + CreateBatchJob: + speakeasy-default-create-batch-job: + requestBody: + application/json: {"endpoint": "/v1/classifications", "model": "mistral-small-latest", "timeout_hours": 24} + responses: + "200": + application/json: {"id": "", "object": "batch", "input_files": ["936962bc-f885-485f-914e-fe90c1d312f9", "02e65e71-6f9f-4c39-9a54-bba0acb1e912", "4c5d848d-d86e-43cb-a795-1eaba0c96608"], "endpoint": "", "errors": [], "status": "SUCCESS", "created_at": 346291, "total_requests": 784915, "completed_requests": 663597, "succeeded_requests": 195848, "failed_requests": 688098} + GetBatchJob: + speakeasy-default-get-batch-job: + parameters: + path: + job_id: "358c80a1-79bd-43f0-8f0e-8186713aa3ba" + responses: + "200": + application/json: {"id": "", "object": "batch", "input_files": ["782a7fa0-6ea1-4be9-bce9-9ff61f81530d"], "endpoint": "", "errors": [{"message": "", "count": 1}], "status": "SUCCESS", "created_at": 878725, "total_requests": 913781, "completed_requests": 964506, "succeeded_requests": 119373, "failed_requests": 490093} + CancelBatchJob: + speakeasy-default-cancel-batch-job: + parameters: + path: + job_id: "393537d7-8b33-4931-a289-7f61f8757eda" + responses: + "200": + application/json: {"id": "", "object": "batch", "input_files": ["7309e534-200e-43a4-83c5-dc4c2a14c745"], "endpoint": "", "errors": [], "status": "FAILED", "created_at": 157212, "total_requests": 188914, "completed_requests": 685483, "succeeded_requests": 127060, "failed_requests": 428561} examplesVersion: 1.0.2 generatedTests: {} generatedFiles: diff --git a/.speakeasy/gen.yaml b/.speakeasy/gen.yaml index 20576b9d..23b915b5 100644 --- a/.speakeasy/gen.yaml +++ b/.speakeasy/gen.yaml @@ -22,13 +22,14 @@ generation: schemas: allOfMergeStrategy: shallowMerge requestBodyFieldName: "" - persistentEdits: {} + persistentEdits: + enabled: "true" tests: generateTests: true generateNewTests: false skipResponseBodyAssertions: false python: - version: 2.0.0a2 + version: 2.0.0a3 additionalDependencies: dev: pytest: ^8.2.2 diff --git a/.speakeasy/generated-files-2d045ec7-2ebb-4f4d-ad25-40953b132161.lock b/.speakeasy/generated-files-2d045ec7-2ebb-4f4d-ad25-40953b132161.lock deleted file mode 100644 index d6937e41..00000000 --- a/.speakeasy/generated-files-2d045ec7-2ebb-4f4d-ad25-40953b132161.lock +++ /dev/null @@ -1,799 +0,0 @@ -src/mistralai/client/_hooks/sdkhooks.py -docs/models/messageoutputeventcontent.md -docs/models/classificationresponse.md -docs/models/tooltypes.md -docs/models/toolexecutionstartedevent.md -docs/models/unarchiveftmodeloutobject.md -src/mistralai/client/models/conversationrequest.py -docs/models/agentconversationobject.md -src/mistralai/client/models/listlibraryout.py -docs/models/agentsapiv1agentsupdaterequest.md -src/mistralai/client/models/agentaliasresponse.py -docs/models/embeddingresponse.md -docs/models/agentsapiv1agentsgetversionrequest.md -src/mistralai/client/models/libraryin.py -docs/models/agentscompletionstreamrequest.md -docs/models/function.md -docs/models/agentsapiv1agentsgetagentversion.md -docs/models/imagegenerationtool.md -docs/models/classifiertargetin.md -src/mistralai/client/models/realtimetranscriptionsession.py -src/mistralai/client/models/jobs_api_routes_batch_get_batch_jobsop.py -docs/models/agentsapiv1conversationsgetrequest.md -docs/models/messageoutputentry.md -docs/models/classificationrequestinputs.md -docs/models/chatcompletionrequestmessage.md -docs/models/thinking.md -src/mistralai/client/models/conversationinputs.py -docs/models/functionresultentry.md -docs/models/fimcompletionstreamrequeststop.md -docs/models/librariesupdatev1request.md -src/mistralai/client/models/agents_api_v1_conversations_appendop.py -docs/models/paginationinfo.md -docs/models/agenthandoffentry.md -docs/models/jobsapiroutesfinetuningupdatefinetunedmodelresponse.md -docs/models/moderationresponse.md -docs/models/toolexecutionentryobject.md -docs/models/completionresponsestreamchoicefinishreason.md -docs/models/jobsapiroutesfinetuningcreatefinetuningjobresponse.md -docs/models/modelcapabilities.md -docs/models/responseformats.md -docs/models/agentupdaterequest.md -src/mistralai/client/models/transcriptionstreamsegmentdelta.py -docs/models/sharingin.md -docs/models/responseformat.md -docs/models/imageurl.md -src/mistralai/client/models/processingstatusout.py -docs/models/messageoutputevent.md -src/mistralai/client/models/conversationusageinfo.py -src/mistralai/client/models/jobs_api_routes_fine_tuning_update_fine_tuned_modelop.py -src/mistralai/client/models/agents_api_v1_agents_get_versionop.py -src/mistralai/client/models/libraries_documents_get_v1op.py -docs/models/attributes.md -docs/models/agentscompletionrequeststop.md -src/mistralai/client/models/moderationresponse.py -src/mistralai/client/models/classifiertrainingparametersin.py -docs/models/audiochunk.md -src/mistralai/client/models/ocrrequest.py -src/mistralai/client/models/file.py -src/mistralai/client/models/ocrresponse.py -src/mistralai/client/models/classifiertargetin.py -docs/models/agentconversationagentversion.md -docs/models/classificationtargetresult.md -docs/models/tableformat.md -docs/models/classifiertrainingparameters.md -src/mistralai/client/models/shareenum.py -.vscode/settings.json -docs/models/messageoutputentrycontent.md -py.typed -docs/models/agentscompletionrequest.md -docs/models/completionjoboutrepository.md -src/mistralai/client/models/batchrequest.py -docs/models/entry.md -src/mistralai/client/models/modelcapabilities.py -docs/models/file.md -src/mistralai/client/models/mistralpromptmode.py -scripts/publish.sh -docs/models/agentscompletionstreamrequestmessage.md -docs/models/messageinputentrytype.md -src/mistralai/client/__init__.py -src/mistralai/client/_version.py -src/mistralai/client/models/ocrpageobject.py -docs/models/ocrimageobject.md -src/mistralai/client/basesdk.py -docs/models/ocrpagedimensions.md -src/mistralai/client/httpclient.py -docs/models/jobsapiroutesfinetuningstartfinetuningjobrequest.md -src/mistralai/client/py.typed -src/mistralai/client/types/__init__.py -docs/models/agentsapiv1agentsupdateversionrequest.md -src/mistralai/client/types/basemodel.py -src/mistralai/client/utils/__init__.py -src/mistralai/client/utils/annotations.py -src/mistralai/client/utils/datetimes.py -src/mistralai/client/utils/enums.py -src/mistralai/client/models/inputs.py -src/mistralai/client/utils/eventstreaming.py -src/mistralai/client/utils/forms.py -src/mistralai/client/utils/headers.py -src/mistralai/client/models/legacyjobmetadataout.py -src/mistralai/client/utils/logger.py -src/mistralai/client/utils/metadata.py -src/mistralai/client/utils/queryparams.py -src/mistralai/client/utils/requestbodies.py -src/mistralai/client/models/jobs_api_routes_fine_tuning_start_fine_tuning_jobop.py -docs/models/toolexecutionentryname.md -src/mistralai/client/utils/retries.py -src/mistralai/client/utils/security.py -src/mistralai/client/models/toolfilechunk.py -src/mistralai/client/utils/serializers.py -src/mistralai/client/models/transcriptionstreamdone.py -src/mistralai/client/utils/unmarshal_json_response.py -src/mistralai/client/utils/url.py -src/mistralai/client/utils/values.py -src/mistralai/client/models/responsevalidationerror.py -src/mistralai/client/models/retrievefileout.py -src/mistralai/client/models/mistralerror.py -docs/models/apiendpoint.md -src/mistralai/client/models/sdkerror.py -docs/models/jobsout.md -src/mistralai/client/models/no_response_error.py -docs/models/conversationrestartstreamrequesthandoffexecution.md -docs/models/functiontool.md -docs/models/agentsapiv1conversationsappendstreamrequest.md -docs/models/agenthandoffentryobject.md -docs/models/transcriptionstreameventtypes.md -docs/models/messageoutputeventrole.md -src/mistralai/client/models/modellist.py -docs/models/responseretrievemodelv1modelsmodelidget.md -docs/models/referencechunktype.md -docs/models/chatclassificationrequest.md -src/mistralai/client/models/responseformats.py -docs/models/librariesdocumentsdeletev1request.md -src/mistralai/client/models/conversationresponse.py -src/mistralai/client/models/completionargsstop.py -src/mistralai/client/models/contentchunk.py -docs/models/classifierdetailedjoboutstatus.md -docs/models/listlibraryout.md -docs/models/transcriptionstreamevents.md -src/mistralai/client/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobsop.py -docs/models/chatcompletionrequeststop.md -src/mistralai/client/models/libraries_update_v1op.py -src/mistralai/client/models/websearchtool.py -src/mistralai/client/models/classifiertrainingparameters.py -docs/models/validationerror.md -src/mistralai/client/models/documentlibrarytool.py -src/mistralai/client/models/responsestartedevent.py -docs/models/document.md -src/mistralai/client/models/filesignedurl.py -src/mistralai/client/models/fimcompletionresponse.py -docs/models/agentscompletionstreamrequeststop.md -docs/models/agenthandoffentrytype.md -docs/models/conversationmessages.md -src/mistralai/client/models/responsedoneevent.py -docs/models/completionresponsestreamchoice.md -docs/models/fimcompletionresponse.md -src/mistralai/client/models/unarchiveftmodelout.py -src/mistralai/client/conversations.py -src/mistralai/client/models/toolexecutionstartedevent.py -src/mistralai/client/models/jsonschema.py -docs/models/completionftmodelout.md -src/mistralai/client/models/fimcompletionstreamrequest.py -docs/models/chatcompletionrequesttoolchoice.md -src/mistralai/client/models/tooltypes.py -src/mistralai/client/models/functionname.py -docs/models/functionresultentryobject.md -docs/models/classifierjobout.md -src/mistralai/client/models/listfilesout.py -src/mistralai/client/models/agents_api_v1_agents_listop.py -src/mistralai/client/models/imageurl.py -src/mistralai/client/models/chatcompletionchoice.py -src/mistralai/client/sdk.py -docs/models/conversationrequesttool.md -docs/models/chatcompletionrequest.md -docs/models/librariesdeletev1request.md -src/mistralai/client/models/chatcompletionresponse.py -docs/models/toolreferencechunktool.md -src/mistralai/client/_hooks/types.py -src/mistralai/client/models/agents_api_v1_conversations_deleteop.py -docs/models/systemmessagecontentchunks.md -src/mistralai/client/models/sharingin.py -docs/models/completionjoboutobject.md -docs/models/librariesdocumentsgetextractedtextsignedurlv1request.md -src/mistralai/client/models/functionresultentry.py -docs/models/agentsapiv1conversationsdeleterequest.md -docs/models/githubrepositoryout.md -docs/models/retrievemodelv1modelsmodelidgetrequest.md -docs/models/conversationstreamrequest.md -docs/models/agentsapiv1conversationsmessagesrequest.md -docs/models/sharingout.md -docs/models/archiveftmodelout.md -docs/models/listdocumentout.md -docs/models/toolreferencechunk.md -docs/models/instructrequestinputs.md -src/mistralai/client/models/deltamessage.py -src/mistralai/client/models/tool.py -src/mistralai/client/beta_agents.py -src/mistralai/client/models/toolcall.py -docs/models/jobin.md -src/mistralai/client/models/libraries_documents_upload_v1op.py -src/mistralai/client/models/toolexecutiondoneevent.py -docs/models/conversationrequestagentversion.md -docs/models/listsharingout.md -docs/models/completiondetailedjoboutrepository.md -docs/models/completionftmodeloutobject.md -src/mistralai/client/models/agentcreationrequest.py -docs/models/functioncallentry.md -src/mistralai/client/models/agents_api_v1_conversations_getop.py -src/mistralai/client/models/filepurpose.py -src/mistralai/client/models/retrieve_model_v1_models_model_id_getop.py -src/mistralai/client/models/jobsout.py -docs/models/agentsapiv1conversationsappendrequest.md -docs/models/jobsapiroutesbatchgetbatchjobsrequest.md -src/mistralai/client/models/audiotranscriptionrequest.py -src/mistralai/client/models/agents_api_v1_agents_update_versionop.py -src/mistralai/client/models/prediction.py -docs/models/conversationinputs.md -docs/models/agenthandoffdoneevent.md -docs/models/finetuneablemodeltype.md -src/mistralai/client/models/agents_api_v1_agents_create_or_update_aliasop.py -src/mistralai/client/models/jobs_api_routes_fine_tuning_create_fine_tuning_jobop.py -docs/models/conversationrestartrequest.md -src/mistralai/client/models/ocrimageobject.py -docs/models/security.md -src/mistralai/client/models/libraryinupdate.py -docs/models/fimcompletionrequest.md -docs/models/ocrusageinfo.md -docs/models/completionjoboutintegration.md -src/mistralai/client/libraries.py -src/mistralai/client/models/wandbintegration.py -src/mistralai/client/models/ocrpagedimensions.py -src/mistralai/client/models/jobin.py -docs/models/conversationrestartstreamrequestagentversion.md -src/mistralai/client/models/libraries_documents_reprocess_v1op.py -docs/models/agentsapiv1agentsgetrequest.md -src/mistralai/client/models/paginationinfo.py -src/mistralai/client/models/jobmetadataout.py -docs/models/assistantmessage.md -src/mistralai/client/models/conversationappendstreamrequest.py -docs/models/librariesdocumentsgettextcontentv1request.md -docs/models/realtimetranscriptionerror.md -src/mistralai/client/models/completiondetailedjobout.py -src/mistralai/client/fine_tuning_jobs.py -src/mistralai/client/models/documentout.py -docs/models/librariesgetv1request.md -docs/models/referencechunk.md -src/mistralai/client/models/completiontrainingparameters.py -src/mistralai/client/agents.py -src/mistralai/client/models/agents_api_v1_agents_list_version_aliasesop.py -src/mistralai/client/models/toolchoice.py -docs/models/requestsource.md -docs/models/embeddingrequestinputs.md -src/mistralai/client/models/imagegenerationtool.py -src/mistralai/client/models/libraries_documents_get_signed_url_v1op.py -docs/models/jobsoutobject.md -docs/models/librariesdocumentsreprocessv1request.md -src/mistralai/client/models/audiotranscriptionrequeststream.py -docs/models/tool.md -src/mistralai/client/models/uploadfileout.py -src/mistralai/client/models/timestampgranularity.py -src/mistralai/client/models/metricout.py -docs/models/jobmetadataout.md -src/mistralai/client/models/files_api_routes_upload_fileop.py -docs/models/chatmoderationrequestinputs1.md -src/mistralai/client/models/transcriptionstreameventtypes.py -src/mistralai/client/models/completionchunk.py -src/mistralai/client/models/conversationevents.py -docs/models/agent.md -src/mistralai/client/models/documenttextcontent.py -docs/models/embeddingresponsedata.md -docs/models/codeinterpretertool.md -src/mistralai/client/models/deletemodelout.py -docs/models/agenttool.md -src/mistralai/client/models/completionresponsestreamchoice.py -src/mistralai/client/models/audiochunk.py -docs/models/functioncallevent.md -docs/models/transcriptionstreamtextdelta.md -docs/models/completiontrainingparametersin.md -docs/models/conversationappendrequesthandoffexecution.md -docs/models/chatcompletionchoicefinishreason.md -src/mistralai/client/models/libraries_documents_get_status_v1op.py -docs/models/libraryinupdate.md -src/mistralai/client/models/modelconversation.py -docs/models/completiondetailedjobout.md -docs/models/realtimetranscriptionsessioncreated.md -docs/models/classifierjoboutobject.md -docs/models/filesapiroutesretrievefilerequest.md -src/mistralai/client/models/trainingfile.py -docs/models/multipartbodyparams.md -src/mistralai/client/models/libraries_delete_v1op.py -docs/models/sampletype.md -src/mistralai/client/models/functioncallevent.py -src/mistralai/client/models/imageurlchunk.py -src/mistralai/client/models/libraries_documents_delete_v1op.py -src/mistralai/client/models/agentconversation.py -src/mistralai/client/models/chatclassificationrequest.py -docs/models/ftmodelcapabilitiesout.md -docs/models/classifierftmodelout.md -docs/models/deletemodelv1modelsmodeliddeleterequest.md -docs/models/messageoutputentryrole.md -docs/models/eventout.md -docs/models/systemmessage.md -src/mistralai/client/models/sampletype.py -docs/models/conversationevents.md -docs/models/fileschema.md -src/mistralai/client/models/agentscompletionrequest.py -src/mistralai/client/models/chatmoderationrequest.py -src/mistralai/client/models/classifierftmodelout.py -docs/models/jobsapiroutesfinetuningstartfinetuningjobresponse.md -docs/models/chatcompletionresponse.md -src/mistralai/client/models/toolmessage.py -src/mistralai/client/accesses.py -src/mistralai/client/models/source.py -docs/models/documenturlchunk.md -docs/models/updateftmodelin.md -src/mistralai/client/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobop.py -docs/models/toolreferencechunktype.md -src/mistralai/client/models/files_api_routes_get_signed_urlop.py -src/mistralai/client/models/responseerrorevent.py -docs/models/jobsapiroutesfinetuningunarchivefinetunedmodelrequest.md -docs/models/thinkchunk.md -docs/models/agentcreationrequesttool.md -docs/models/completiondetailedjoboutobject.md -src/mistralai/client/models/filechunk.py -docs/models/agentcreationrequest.md -docs/models/jobsapiroutesfinetuningcancelfinetuningjobrequest.md -docs/models/utils/retryconfig.md -docs/models/loc.md -docs/models/filesignedurl.md -src/mistralai/client/models/embeddingdtype.py -docs/models/chatcompletionstreamrequest.md -docs/models/audioformat.md -docs/models/transcriptionstreamsegmentdelta.md -docs/models/inputsmessage.md -docs/models/instructrequest.md -src/mistralai/client/models/batchjobout.py -docs/models/classifiertargetout.md -docs/models/filesapiroutesgetsignedurlrequest.md -docs/models/conversationappendrequest.md -docs/models/legacyjobmetadataoutobject.md -src/mistralai/client/models/messageoutputentry.py -docs/models/messageinputentryobject.md -src/mistralai/client/models/embeddingresponse.py -src/mistralai/client/models/documenturlchunk.py -docs/models/usermessage.md -src/mistralai/client/models/apiendpoint.py -src/mistralai/client/models/batchjobstatus.py -docs/models/jobsapiroutesbatchgetbatchjobrequest.md -docs/models/jobsapiroutesfinetuninggetfinetuningjobrequest.md -docs/models/wandbintegration.md -docs/models/conversationmessagesobject.md -docs/models/utils/retryconfig.md -docs/models/fimcompletionstreamrequest.md -docs/models/batchrequest.md -docs/models/agentsapiv1conversationslistresponse.md -docs/models/conversationhistory.md -docs/sdks/agents/README.md -docs/models/transcriptionresponse.md -src/mistralai/client/models/files_api_routes_download_fileop.py -src/mistralai/client/models/embeddingrequest.py -src/mistralai/client/models/transcriptionresponse.py -src/mistralai/client/models/libraries_documents_list_v1op.py -src/mistralai/client/models/githubrepositoryin.py -docs/models/librariesdocumentsgetstatusv1request.md -docs/models/modelconversationtool.md -.gitattributes -docs/models/functioncallentryarguments.md -src/mistralai/client/models/ftclassifierlossfunction.py -src/mistralai/client/batch.py -docs/models/classificationrequest.md -src/mistralai/client/models/realtimetranscriptionerrordetail.py -docs/models/hyperparameters.md -docs/models/utils/retryconfig.md -docs/models/moderationobject.md -docs/models/classifierjoboutstatus.md -docs/models/agentupdaterequesttool.md -docs/models/chatcompletionstreamrequestmessage.md -docs/models/completiondetailedjoboutintegration.md -src/mistralai/client/models/transcriptionstreamtextdelta.py -src/mistralai/client/models/libraries_get_v1op.py -docs/models/agentscompletionrequesttoolchoice.md -src/mistralai/client/models/deletefileout.py -docs/models/completionevent.md -src/mistralai/client/chat.py -src/mistralai/client/models/completiontrainingparametersin.py -docs/models/librariesdocumentsupdatev1request.md -docs/models/instructrequestmessage.md -src/mistralai/client/models/documentupdatein.py -docs/models/toolfilechunk.md -src/mistralai/client/models/messageinputcontentchunks.py -src/mistralai/client/models/files_api_routes_delete_fileop.py -docs/models/utils/retryconfig.md -docs/models/assistantmessagerole.md -docs/sdks/transcriptions/README.md -docs/models/librariessharedeletev1request.md -src/mistralai/client/models/moderationobject.py -docs/models/unarchiveftmodelout.md -src/mistralai/client/models/jobs_api_routes_batch_cancel_batch_jobop.py -docs/models/messageoutputentrytype.md -docs/models/functioncall.md -docs/models/toolexecutiondeltaevent.md -src/mistralai/client/models/realtimetranscriptionerror.py -docs/models/agentsapiv1agentslistrequest.md -src/mistralai/client/models/websearchpremiumtool.py -src/mistralai/client/models/realtimetranscriptionsessionupdated.py -src/mistralai/client/models/libraries_documents_get_text_content_v1op.py -docs/models/agentscompletionstreamrequesttoolchoice.md -docs/models/textchunk.md -docs/models/toolcall.md -docs/models/assistantmessagecontent.md -src/mistralai/client/models/chatcompletionrequest.py -src/mistralai/client/models/usermessage.py -docs/models/outputcontentchunks.md -docs/models/librariesdocumentsuploadv1request.md -docs/models/entitytype.md -src/mistralai/client/models/basemodelcard.py -docs/models/toolexecutionentrytype.md -docs/models/shareenum.md -docs/models/imageurlunion.md -docs/models/conversationappendstreamrequest.md -docs/models/websearchpremiumtool.md -docs/models/utils/retryconfig.md -docs/models/fimcompletionrequeststop.md -src/mistralai/client/models/classificationtargetresult.py -src/mistralai/client/audio.py -docs/models/chatmoderationrequestinputs3.md -docs/models/response.md -src/mistralai/client/models/referencechunk.py -docs/models/jobinrepository.md -src/mistralai/client/models/files_api_routes_retrieve_fileop.py -src/mistralai/client/sdkconfiguration.py -src/mistralai/client/models/agents_api_v1_conversations_messagesop.py -src/mistralai/client/models/instructrequest.py -src/mistralai/client/models/classifiertargetout.py -docs/models/classifierdetailedjoboutobject.md -src/mistralai/client/models/inputentries.py -src/mistralai/client/models/toolchoiceenum.py -docs/models/chatcompletionstreamrequesttoolchoice.md -docs/models/agentconversation.md -docs/models/utils/retryconfig.md -src/mistralai/client/models/functioncall.py -docs/models/mistralpromptmode.md -docs/models/conversationresponseobject.md -src/mistralai/client/models/ocrtableobject.py -src/mistralai/client/models/toolexecutionentry.py -docs/models/classifierdetailedjobout.md -docs/models/conversationresponse.md -docs/models/agentsapiv1agentslistversionaliasesrequest.md -docs/models/conversationeventsdata.md -src/mistralai/client/models/ocrusageinfo.py -src/mistralai/client/models/ftmodelcard.py -src/mistralai/client/models/libraries_share_list_v1op.py -docs/models/modellistdata.md -docs/models/messageoutputcontentchunks.md -docs/models/modelconversation.md -docs/models/batchjobstatus.md -docs/models/encodingformat.md -docs/models/jobsapiroutesfinetuninggetfinetuningjobresponse.md -docs/models/utils/retryconfig.md -docs/models/modellist.md -docs/models/textchunktype.md -docs/models/completionargs.md -docs/models/agenthandoffstartedevent.md -docs/models/basemodelcard.md -src/mistralai/client/models/classifierjobout.py -docs/models/batchjobout.md -docs/models/conversationstreamrequestagentversion.md -docs/models/filesapiroutesdownloadfilerequest.md -src/mistralai/client/models/fileschema.py -docs/models/completiontrainingparameters.md -docs/models/wandbintegrationout.md -docs/models/agentobject.md -src/mistralai/client/models/classifierdetailedjobout.py -src/mistralai/client/models/jobs_api_routes_fine_tuning_archive_fine_tuned_modelop.py -USAGE.md -docs/models/deltamessage.md -docs/models/messageinputentry.md -docs/models/jobsapiroutesfinetuningupdatefinetunedmodelrequest.md -docs/models/filechunk.md -src/mistralai/client/models/agent.py -src/mistralai/client/models/agents_api_v1_conversations_append_streamop.py -src/mistralai/client/models/agents_api_v1_agents_list_versionsop.py -docs/models/classifierftmodeloutobject.md -src/mistralai/client/models/ftmodelcapabilitiesout.py -src/mistralai/client/models/listsharingout.py -src/mistralai/client/models/systemmessagecontentchunks.py -src/mistralai/client/models/agents_api_v1_agents_updateop.py -docs/models/retrievefileout.md -src/mistralai/client/models/agents_api_v1_conversations_historyop.py -src/mistralai/client/fim.py -docs/models/embeddingdtype.md -src/mistralai/client/models/conversationrestartstreamrequest.py -src/mistralai/client/models/completionargs.py -docs/models/toolexecutionstartedeventname.md -src/mistralai/client/models/transcriptionstreamlanguage.py -docs/models/librariessharelistv1request.md -src/mistralai/client/fine_tuning.py -docs/models/agentsapiv1conversationsrestartrequest.md -docs/models/conversationrestartstreamrequest.md -docs/models/transcriptionstreamlanguage.md -docs/models/toolexecutiondoneeventname.md -docs/models/classifierjoboutintegration.md -docs/models/classifiertrainingparametersin.md -src/mistralai/client/models/agentupdaterequest.py -docs/models/agentscompletionrequestmessage.md -docs/models/chatmoderationrequest.md -docs/models/chatcompletionchoice.md -docs/models/batchjoboutobject.md -docs/models/toolchoiceenum.md -docs/models/ocrrequest.md -src/mistralai/client/models/updateftmodelin.py -docs/models/classifierdetailedjoboutintegration.md -src/mistralai/client/models/agenthandoffdoneevent.py -src/mistralai/client/models/files_api_routes_list_filesop.py -src/mistralai/client/ocr.py -docs/models/embeddingrequest.md -src/mistralai/client/models/conversationstreamrequest.py -src/mistralai/client/models/thinkchunk.py -docs/models/toolchoice.md -docs/models/documentupload.md -docs/models/imageurlchunktype.md -docs/models/conversationrestartrequestagentversion.md -docs/models/transcriptionstreamdone.md -src/mistralai/client/models/libraryout.py -src/mistralai/client/models/conversationappendrequest.py -src/mistralai/client/models/audioformat.py -docs/models/conversationhistoryobject.md -docs/models/ftclassifierlossfunction.md -docs/models/websearchtool.md -docs/models/messageoutputentryobject.md -src/mistralai/client/models/batchjobsout.py -docs/models/agentsapiv1agentsdeleterequest.md -docs/models/libraryout.md -docs/models/batchjobsoutobject.md -docs/models/functionresultentrytype.md -src/mistralai/client/models/agents_api_v1_conversations_restart_streamop.py -docs/models/completionjoboutstatus.md -docs/models/documenttextcontent.md -docs/models/legacyjobmetadataout.md -docs/models/prediction.md -src/mistralai/client/models_.py -src/mistralai/client/models/sharingdelete.py -src/mistralai/client/models/usageinfo.py -docs/models/thinkchunktype.md -docs/models/agentsapiv1conversationshistoryrequest.md -src/mistralai/client/models/jobs_api_routes_fine_tuning_cancel_fine_tuning_jobop.py -src/mistralai/client/models/jobs_api_routes_fine_tuning_unarchive_fine_tuned_modelop.py -src/mistralai/client/models/responseformat.py -docs/models/builtinconnectors.md -docs/models/realtimetranscriptionsession.md -docs/models/documentlibrarytool.md -docs/models/toolfilechunktool.md -docs/models/messageinputcontentchunks.md -src/mistralai/client/models/checkpointout.py -src/mistralai/client/models/validationerror.py -docs/models/utils/retryconfig.md -docs/models/chatmoderationrequestinputs2.md -docs/models/format_.md -docs/sdks/batchjobs/README.md -docs/models/agentsapiv1conversationsrestartstreamrequest.md -docs/models/filesapiroutesdeletefilerequest.md -docs/models/toolexecutionentry.md -docs/models/metricout.md -src/mistralai/client/models/completionjobout.py -docs/models/deletefileout.md -docs/models/functioncallentrytype.md -docs/models/filepurpose.md -docs/models/transcriptionsegmentchunk.md -docs/models/usageinfo.md -docs/models/responsev1conversationsget.md -src/mistralai/client/models/ssetypes.py -src/mistralai/client/models/audioencoding.py -docs/models/librariesdocumentsgetsignedurlv1request.md -docs/models/timestampgranularity.md -docs/models/conversationrequesthandoffexecution.md -src/mistralai/client/transcriptions.py -src/mistralai/client/models/function.py -src/mistralai/client/models/toolexecutiondeltaevent.py -docs/models/conversationappendstreamrequesthandoffexecution.md -docs/models/realtimetranscriptionerrordetail.md -docs/models/toolexecutiondeltaeventname.md -src/mistralai/client/models/__init__.py -src/mistralai/client/models/codeinterpretertool.py -docs/models/utils/retryconfig.md -docs/models/completiondetailedjoboutstatus.md -docs/models/librariesdocumentsgetv1request.md -src/mistralai/client/models/messageoutputevent.py -src/mistralai/client/models/agentscompletionstreamrequest.py -src/mistralai/client/models/textchunk.py -docs/models/conversationstreamrequesttool.md -docs/models/systemmessagecontent.md -docs/models/agentsapiv1conversationslistrequest.md -docs/models/chatcompletionstreamrequeststop.md -docs/models/responseerrorevent.md -docs/models/usermessagecontent.md -docs/models/audioencoding.md -docs/models/messageinputentryrole.md -docs/models/inputentries.md -src/mistralai/client/models/agents_api_v1_conversations_restartop.py -src/mistralai/client/models/messageentries.py -docs/models/ocrpageobject.md -src/mistralai/client/models/completionevent.py -src/mistralai/client/models/batchjobin.py -src/mistralai/client/models/requestsource.py -src/mistralai/client/models/fimcompletionrequest.py -docs/models/utils/retryconfig.md -src/mistralai/client/models/sharingout.py -docs/models/messageentries.md -docs/models/jobsoutdata.md -src/mistralai/client/batch_jobs.py -src/mistralai/client/models/messageinputentry.py -docs/models/uploadfileout.md -src/mistralai/client/models/finetuneablemodeltype.py -docs/models/documentupdatein.md -docs/models/toolmessagecontent.md -docs/models/utils/retryconfig.md -docs/models/documentout.md -docs/models/functionname.md -src/mistralai/client/documents.py -src/mistralai/client/models/realtimetranscriptionsessioncreated.py -docs/models/conversationstreamrequesthandoffexecution.md -docs/models/ocrresponse.md -src/mistralai/client/models/libraries_share_create_v1op.py -docs/models/functioncallentryobject.md -docs/models/httpvalidationerror.md -src/mistralai/client/models/agents_api_v1_agents_getop.py -docs/models/responsedoneevent.md -docs/models/jobsapiroutesfinetuningarchivefinetunedmodelrequest.md -docs/models/utils/retryconfig.md -src/mistralai/client/models/completionftmodelout.py -docs/models/utils/retryconfig.md -src/mistralai/client/files.py -docs/models/batchjobsout.md -docs/models/audiotranscriptionrequeststream.md -src/mistralai/client/models/functioncallentryarguments.py -docs/models/responsestartedevent.md -src/mistralai/client/models/agents_api_v1_agents_deleteop.py -docs/models/utils/retryconfig.md -docs/models/completionchunk.md -src/mistralai/client/models/agents_api_v1_conversations_listop.py -src/mistralai/client/models/archiveftmodelout.py -docs/models/agentaliasresponse.md -docs/models/realtimetranscriptionsessionupdated.md -docs/models/batcherror.md -docs/models/contentchunk.md -docs/models/source.md -docs/models/utils/retryconfig.md -docs/models/toolexecutiondoneevent.md -docs/models/realtimetranscriptionerrordetailmessage.md -docs/models/jobsapiroutesfinetuninggetfinetuningjobsstatus.md -docs/models/sharingdelete.md -docs/models/agentsapiv1agentscreateorupdatealiasrequest.md -docs/models/completionjobout.md -docs/models/conversationrequest.md -docs/models/utils/retryconfig.md -docs/models/utils/retryconfig.md -docs/models/utils/retryconfig.md -docs/sdks/betaagents/README.md -docs/models/utils/retryconfig.md -docs/models/utils/retryconfig.md -docs/models/utils/retryconfig.md -docs/models/utils/retryconfig.md -docs/models/utils/retryconfig.md -docs/models/utils/retryconfig.md -docs/models/utils/retryconfig.md -docs/models/utils/retryconfig.md -docs/models/utils/retryconfig.md -docs/models/utils/retryconfig.md -docs/models/utils/retryconfig.md -docs/sdks/conversations/README.md -docs/models/utils/retryconfig.md -docs/models/utils/retryconfig.md -docs/models/utils/retryconfig.md -docs/models/utils/retryconfig.md -docs/models/utils/retryconfig.md -docs/sdks/libraries/README.md -docs/models/utils/retryconfig.md -docs/models/utils/retryconfig.md -docs/models/utils/retryconfig.md -docs/sdks/accesses/README.md -docs/models/utils/retryconfig.md -docs/models/utils/retryconfig.md -docs/models/utils/retryconfig.md -docs/models/utils/retryconfig.md -docs/models/utils/retryconfig.md -docs/models/utils/retryconfig.md -docs/models/utils/retryconfig.md -docs/models/utils/retryconfig.md -docs/models/utils/retryconfig.md -docs/models/utils/retryconfig.md -docs/sdks/documents/README.md -docs/models/utils/retryconfig.md -docs/models/utils/retryconfig.md -docs/sdks/chat/README.md -docs/models/utils/retryconfig.md -docs/models/utils/retryconfig.md -docs/models/utils/retryconfig.md -docs/models/utils/retryconfig.md -docs/sdks/classifiers/README.md -docs/models/utils/retryconfig.md -docs/sdks/embeddings/README.md -docs/models/utils/retryconfig.md -docs/models/utils/retryconfig.md -docs/models/utils/retryconfig.md -docs/models/utils/retryconfig.md -docs/models/utils/retryconfig.md -docs/models/utils/retryconfig.md -docs/sdks/files/README.md -docs/models/utils/retryconfig.md -docs/models/utils/retryconfig.md -docs/sdks/fim/README.md -docs/models/utils/retryconfig.md -docs/models/utils/retryconfig.md -docs/models/utils/retryconfig.md -docs/models/utils/retryconfig.md -docs/models/utils/retryconfig.md -docs/sdks/finetuningjobs/README.md -docs/models/utils/retryconfig.md -docs/models/utils/retryconfig.md -docs/models/utils/retryconfig.md -docs/models/utils/retryconfig.md -docs/models/utils/retryconfig.md -docs/models/utils/retryconfig.md -docs/sdks/models/README.md -docs/models/utils/retryconfig.md -docs/sdks/ocr/README.md -docs/models/processingstatusout.md -docs/models/completionargsstop.md -docs/models/ocrtableobject.md -src/mistralai/client/models/assistantmessage.py -src/mistralai/client/models/libraries_documents_update_v1op.py -src/mistralai/client/models/agenthandoffstartedevent.py -src/mistralai/client/models/eventout.py -src/mistralai/client/models/toolreferencechunk.py -docs/models/githubrepositoryin.md -src/mistralai/client/models/messageoutputcontentchunks.py -src/mistralai/client/models/agenthandoffentry.py -docs/models/jsonschema.md -docs/models/conversationrestartrequesthandoffexecution.md -docs/models/listfilesout.md -src/mistralai/client/models/transcriptionstreamevents.py -docs/models/ftmodelcard.md -docs/models/jobinintegration.md -src/mistralai/client/models/conversationrestartrequest.py -src/mistralai/client/models/encodingformat.py -docs/models/deltamessagecontent.md -src/mistralai/client/models/outputcontentchunks.py -docs/models/toolfilechunktype.md -src/mistralai/client/_hooks/__init__.py -src/mistralai/client/models/entitytype.py -docs/models/deletemodelout.md -src/mistralai/client/embeddings.py -docs/models/documenturlchunktype.md -docs/models/batchjobin.md -src/mistralai/client/models/wandbintegrationout.py -docs/models/transcriptionstreameventsdata.md -src/mistralai/client/models/classificationresponse.py -docs/models/trainingfile.md -src/mistralai/client/models/transcriptionsegmentchunk.py -docs/models/audiotranscriptionrequest.md -src/mistralai/client/models/githubrepositoryout.py -src/mistralai/client/models/functiontool.py -docs/models/jobsapiroutesfinetuninggetfinetuningjobsrequest.md -docs/models/conversationusageinfo.md -docs/models/ssetypes.md -src/mistralai/client/models/listdocumentout.py -docs/models/libraryin.md -src/mistralai/client/models/libraries_share_delete_v1op.py -src/mistralai/client/models/systemmessage.py -src/mistralai/client/models/chatcompletionstreamrequest.py -src/mistralai/client/models/delete_model_v1_models_model_id_deleteop.py -docs/models/filesapirouteslistfilesrequest.md -docs/models/jobsapiroutesfinetuningcancelfinetuningjobresponse.md -src/mistralai/client/models/security.py -docs/models/modelconversationobject.md -src/mistralai/client/models/conversationmessages.py -docs/models/output.md -src/mistralai/client/models/libraries_documents_get_extracted_text_signed_url_v1op.py -src/mistralai/client/models/classificationrequest.py -docs/models/librariesdocumentslistv1request.md -docs/models/toolmessage.md -docs/models/agentsapiv1agentslistversionsrequest.md -src/mistralai/client/models/embeddingresponsedata.py -src/mistralai/client/models/conversationhistory.py -docs/models/librariessharecreatev1request.md -docs/models/messageinputentrycontent.md -src/mistralai/client/models/functioncallentry.py -src/mistralai/client/models/builtinconnectors.py -src/mistralai/client/models/jobs_api_routes_batch_get_batch_jobop.py -src/mistralai/client/models/httpvalidationerror.py -src/mistralai/client/classifiers.py -docs/models/transcriptionsegmentchunktype.md -docs/models/arguments.md -docs/models/checkpointout.md -src/mistralai/client/beta.py -docs/models/archiveftmodeloutobject.md -docs/models/jobsapiroutesbatchcancelbatchjobrequest.md -docs/models/imageurlchunk.md -src/mistralai/client/models/batcherror.py -docs/models/inputs.md diff --git a/.speakeasy/workflow.lock b/.speakeasy/workflow.lock index a0e535c2..a9e18489 100644 --- a/.speakeasy/workflow.lock +++ b/.speakeasy/workflow.lock @@ -2,57 +2,57 @@ speakeasyVersion: 1.685.0 sources: mistral-azure-source: sourceNamespace: mistral-openapi-azure - sourceRevisionDigest: sha256:544a7fd4d099e72a9a12681b326d44201f1b163e4df2f5fd643d831167255d84 - sourceBlobDigest: sha256:41c72401329a30983907c32a60063da8ccd82137cf79d7f452089b5b83bb9d92 + sourceRevisionDigest: sha256:d303e640ad565cc8a9801519b20dc7eab226efdfdab951c11256962d9e479f74 + sourceBlobDigest: sha256:6e4c789de61b2c9c604bf581e0abbadae90e360491d95ec4247678f4f70cee87 tags: - latest mistral-google-cloud-source: sourceNamespace: mistral-openapi-google-cloud - sourceRevisionDigest: sha256:e3dd3079347edf744151936aaee4ec0ce3eeeb8f46b5c7f31f8e224221e879d4 - sourceBlobDigest: sha256:7a525230930debff23fec4e92e3ad2a57889ea46de86cc96d519615709ae8a16 + sourceRevisionDigest: sha256:351c4d392b8b2220c337a207e98ed5665ed27fd85de854871a70c4bc2b9c0784 + sourceBlobDigest: sha256:d79b21f70efb93b0cd261d2044939a288beaf8707a7caae86aca5c4d5de3821b tags: - latest mistral-openapi: sourceNamespace: mistral-openapi - sourceRevisionDigest: sha256:4e49849eba5334a3fe4a3d081baa9afdecd8f41dfc4c2a5115bc19ead4d92d13 - sourceBlobDigest: sha256:3ab3c61ac6a4e9fab37d924d516838ca27dd7e57a1b5e9059d4db2ef29efec56 + sourceRevisionDigest: sha256:74d0de7750f6a1878b68c9da683eba7a447d7c367131d0cb8f5c3b1e05829624 + sourceBlobDigest: sha256:41e8354c48993fc29be68959d835ea4f8e0cc1d4b4fbd527afcd970bc02c62a2 tags: - latest targets: mistralai-azure-sdk: source: mistral-azure-source sourceNamespace: mistral-openapi-azure - sourceRevisionDigest: sha256:544a7fd4d099e72a9a12681b326d44201f1b163e4df2f5fd643d831167255d84 - sourceBlobDigest: sha256:41c72401329a30983907c32a60063da8ccd82137cf79d7f452089b5b83bb9d92 + sourceRevisionDigest: sha256:d303e640ad565cc8a9801519b20dc7eab226efdfdab951c11256962d9e479f74 + sourceBlobDigest: sha256:6e4c789de61b2c9c604bf581e0abbadae90e360491d95ec4247678f4f70cee87 codeSamplesNamespace: mistral-openapi-azure-code-samples - codeSamplesRevisionDigest: sha256:57821a9bf6cfe7001dfcbcaa2f17b233b98c2f79e2d7588540c41750f10b9c05 + codeSamplesRevisionDigest: sha256:0109302b87fa17b0103ef1e372fae76356811b3c552103e659bd5373d537d759 mistralai-gcp-sdk: source: mistral-google-cloud-source sourceNamespace: mistral-openapi-google-cloud - sourceRevisionDigest: sha256:e3dd3079347edf744151936aaee4ec0ce3eeeb8f46b5c7f31f8e224221e879d4 - sourceBlobDigest: sha256:7a525230930debff23fec4e92e3ad2a57889ea46de86cc96d519615709ae8a16 + sourceRevisionDigest: sha256:351c4d392b8b2220c337a207e98ed5665ed27fd85de854871a70c4bc2b9c0784 + sourceBlobDigest: sha256:d79b21f70efb93b0cd261d2044939a288beaf8707a7caae86aca5c4d5de3821b codeSamplesNamespace: mistral-openapi-google-cloud-code-samples - codeSamplesRevisionDigest: sha256:404d5964361b3ced085b11e4b8408c36a4a92efe12a97f7497919efdf7594f6f + codeSamplesRevisionDigest: sha256:09bb7cbf291076170d228116db05d1c9606af541b301b6564609c4d76633258a mistralai-sdk: source: mistral-openapi sourceNamespace: mistral-openapi - sourceRevisionDigest: sha256:4e49849eba5334a3fe4a3d081baa9afdecd8f41dfc4c2a5115bc19ead4d92d13 - sourceBlobDigest: sha256:3ab3c61ac6a4e9fab37d924d516838ca27dd7e57a1b5e9059d4db2ef29efec56 + sourceRevisionDigest: sha256:74d0de7750f6a1878b68c9da683eba7a447d7c367131d0cb8f5c3b1e05829624 + sourceBlobDigest: sha256:41e8354c48993fc29be68959d835ea4f8e0cc1d4b4fbd527afcd970bc02c62a2 codeSamplesNamespace: mistral-openapi-code-samples - codeSamplesRevisionDigest: sha256:debd698577e8da014e900a57194128d867ad76fd0d2e2b361e9d0c298700fc67 + codeSamplesRevisionDigest: sha256:f37fb6188ad25957bef4cadaa03f454a4f9ab0c045db633a46d9cc89af145ba2 workflow: workflowVersion: 1.0.0 speakeasyVersion: 1.685.0 sources: mistral-azure-source: inputs: - - location: registry.speakeasyapi.dev/mistral-dev/mistral-dev/mistral-openapi-azure:main + - location: registry.speakeasyapi.dev/mistral-dev/mistral-dev/mistral-openapi-azure:v2 mistral-google-cloud-source: inputs: - - location: registry.speakeasyapi.dev/mistral-dev/mistral-dev/mistral-openapi-google-cloud:main + - location: registry.speakeasyapi.dev/mistral-dev/mistral-dev/mistral-openapi-google-cloud:v2 mistral-openapi: inputs: - - location: registry.speakeasyapi.dev/mistral-dev/mistral-dev/mistral-openapi:main + - location: registry.speakeasyapi.dev/mistral-dev/mistral-dev/mistral-openapi:v2 targets: mistralai-azure-sdk: target: python diff --git a/.speakeasy/workflow.yaml b/.speakeasy/workflow.yaml index 8557ab42..b45d6b3b 100644 --- a/.speakeasy/workflow.yaml +++ b/.speakeasy/workflow.yaml @@ -3,13 +3,13 @@ speakeasyVersion: 1.685.0 sources: mistral-azure-source: inputs: - - location: registry.speakeasyapi.dev/mistral-dev/mistral-dev/mistral-openapi-azure:main + - location: registry.speakeasyapi.dev/mistral-dev/mistral-dev/mistral-openapi-azure:v2 mistral-google-cloud-source: inputs: - - location: registry.speakeasyapi.dev/mistral-dev/mistral-dev/mistral-openapi-google-cloud:main + - location: registry.speakeasyapi.dev/mistral-dev/mistral-dev/mistral-openapi-google-cloud:v2 mistral-openapi: inputs: - - location: registry.speakeasyapi.dev/mistral-dev/mistral-dev/mistral-openapi:main + - location: registry.speakeasyapi.dev/mistral-dev/mistral-dev/mistral-openapi:v2 targets: mistralai-azure-sdk: target: python diff --git a/MIGRATION.md b/MIGRATION.md index 5fb16739..9f39cdb5 100644 --- a/MIGRATION.md +++ b/MIGRATION.md @@ -49,6 +49,19 @@ Some type names have been updated for clarity and consistency: | `HandoffExecution` | `ConversationRequestHandoffExecution` | | `AgentVersion` | `ConversationRequestAgentVersion` | +### Shorter Request/Response Class Names + +Internal request and response wrapper classes now use concise names: + +| Old Name | New Name | +|---|---| +| `JobsAPIRoutesFineTuningArchiveFineTunedModelRequest` | `ArchiveModelRequest` | +| `JobsAPIRoutesFineTuningCreateFineTuningJobResponse` | `CreateFineTuningJobResponse` | +| `FilesAPIRoutesUploadFileRequest` | `UploadFileRequest` | +| `AgentsAPIV1ConversationsAppendRequest` | `AppendConversationRequest` | + +This affects all operation-specific request/response types. Core models like `UserMessage`, `ChatCompletionRequest`, etc. are unchanged. + Enums now accept unknown values for forward compatibility with API changes. --- diff --git a/Makefile b/Makefile new file mode 100644 index 00000000..a169d78f --- /dev/null +++ b/Makefile @@ -0,0 +1,24 @@ +.PHONY: help test-generate update-speakeasy-version + +help: + @echo "Available targets:" + @echo " make test-generate Test SDK generation locally" + @echo " make update-speakeasy-version VERSION=x.y.z Update Speakeasy CLI version" + @echo "" + @echo "Note: Production SDK generation is done via GitHub Actions:" + @echo " .github/workflows/sdk_generation_mistralai_sdk.yaml" + +# Test SDK generation locally. +# For production, use GitHub Actions: .github/workflows/sdk_generation_mistralai_sdk.yaml +# This uses the Speakeasy CLI version defined in .speakeasy/workflow.yaml +test-generate: + speakeasy run --skip-versioning + +# Update the Speakeasy CLI version (the code generator tool). +# This modifies speakeasyVersion in .speakeasy/workflow.yaml and regenerates the SDK. +# Usage: make update-speakeasy-version VERSION=1.685.0 +update-speakeasy-version: +ifndef VERSION + $(error VERSION is required. This is the Speakeasy CLI version (e.g., 1.685.0)) +endif + uv run inv update-speakeasy --version "$(VERSION)" --targets "all" diff --git a/README.md b/README.md index 04cb586c..53de43f5 100644 --- a/README.md +++ b/README.md @@ -22,7 +22,9 @@ $ source ~/.zshenv ## Summary -Mistral AI API: Our Chat Completion and Embeddings APIs specification. Create your account on [La Plateforme](https://console.mistral.ai) to get access and read the [docs](https://docs.mistral.ai) to learn how to use it. +Mistral AI API: Dora OpenAPI schema + +Our Chat Completion and Embeddings APIs specification. Create your account on [La Plateforme](https://console.mistral.ai) to get access and read the [docs](https://docs.mistral.ai) to learn how to use it. @@ -485,6 +487,7 @@ The documentation for the GCP SDK is available [here](packages/mistralai_gcp/REA * [get_version](docs/sdks/betaagents/README.md#get_version) - Retrieve a specific version of an agent. * [create_version_alias](docs/sdks/betaagents/README.md#create_version_alias) - Create or update an agent version alias. * [list_version_aliases](docs/sdks/betaagents/README.md#list_version_aliases) - List all aliases for an agent. +* [delete_version_alias](docs/sdks/betaagents/README.md#delete_version_alias) - Delete an agent version alias. ### [Beta.Conversations](docs/sdks/conversations/README.md) @@ -602,14 +605,7 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.beta.conversations.start_stream(inputs=[ - { - "object": "entry", - "type": "function.result", - "tool_call_id": "", - "result": "", - }, - ], stream=True, completion_args={ + res = mistral.beta.conversations.start_stream(inputs="", stream=True, completion_args={ "response_format": { "type": "text", }, @@ -646,7 +642,7 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.beta.libraries.documents.upload(library_id="a02150d9-5ee0-4877-b62c-28b1fcdf3b76", file={ + res = mistral.beta.libraries.documents.upload(library_id="f973c54e-979a-4464-9d36-8cc31beb21fe", file={ "file_name": "example.file", "content": open("example.file", "rb"), }) @@ -762,7 +758,7 @@ with Mistral( **Inherit from [`MistralError`](./src/mistralai/client/models/mistralerror.py)**: -* [`HTTPValidationError`](./src/mistralai/client/models/httpvalidationerror.py): Validation Error. Status code `422`. Applicable to 52 of 74 methods.* +* [`HTTPValidationError`](./src/mistralai/client/models/httpvalidationerror.py): Validation Error. Status code `422`. Applicable to 53 of 75 methods.* * [`ResponseValidationError`](./src/mistralai/client/models/responsevalidationerror.py): Type mismatch between the response data and the expected Pydantic model. Provides access to the Pydantic validation error via the `cause` attribute. diff --git a/docs/models/agent.md b/docs/models/agent.md index bd143350..e335d889 100644 --- a/docs/models/agent.md +++ b/docs/models/agent.md @@ -20,4 +20,5 @@ | `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_check_mark: | N/A | | `updated_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_check_mark: | N/A | | `deployment_chat` | *bool* | :heavy_check_mark: | N/A | -| `source` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file +| `source` | *str* | :heavy_check_mark: | N/A | +| `version_message` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/agentcreationrequest.md b/docs/models/agentcreationrequest.md index 6a24c00b..f0f0fdbc 100644 --- a/docs/models/agentcreationrequest.md +++ b/docs/models/agentcreationrequest.md @@ -12,4 +12,5 @@ | `name` | *str* | :heavy_check_mark: | N/A | | `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | `handoffs` | List[*str*] | :heavy_minus_sign: | N/A | -| `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | \ No newline at end of file +| `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | +| `version_message` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/agentsapiv1agentsgetrequest.md b/docs/models/agentsapiv1agentsgetrequest.md deleted file mode 100644 index ceffe009..00000000 --- a/docs/models/agentsapiv1agentsgetrequest.md +++ /dev/null @@ -1,9 +0,0 @@ -# AgentsAPIV1AgentsGetRequest - - -## Fields - -| Field | Type | Required | Description | -| ---------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------- | -| `agent_id` | *str* | :heavy_check_mark: | N/A | -| `agent_version` | [OptionalNullable[models.AgentsAPIV1AgentsGetAgentVersion]](../models/agentsapiv1agentsgetagentversion.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/agentupdaterequest.md b/docs/models/agentupdaterequest.md index b276e199..b1830d7b 100644 --- a/docs/models/agentupdaterequest.md +++ b/docs/models/agentupdaterequest.md @@ -13,4 +13,5 @@ | `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | `handoffs` | List[*str*] | :heavy_minus_sign: | N/A | | `deployment_chat` | *OptionalNullable[bool]* | :heavy_minus_sign: | N/A | -| `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | \ No newline at end of file +| `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | +| `version_message` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/agentsapiv1conversationsappendrequest.md b/docs/models/appendconversationrequest.md similarity index 96% rename from docs/models/agentsapiv1conversationsappendrequest.md rename to docs/models/appendconversationrequest.md index ac8a00ec..977d8e8b 100644 --- a/docs/models/agentsapiv1conversationsappendrequest.md +++ b/docs/models/appendconversationrequest.md @@ -1,4 +1,4 @@ -# AgentsAPIV1ConversationsAppendRequest +# AppendConversationRequest ## Fields diff --git a/docs/models/agentsapiv1conversationsappendstreamrequest.md b/docs/models/appendconversationstreamrequest.md similarity index 96% rename from docs/models/agentsapiv1conversationsappendstreamrequest.md rename to docs/models/appendconversationstreamrequest.md index dbc330f1..a23231c2 100644 --- a/docs/models/agentsapiv1conversationsappendstreamrequest.md +++ b/docs/models/appendconversationstreamrequest.md @@ -1,4 +1,4 @@ -# AgentsAPIV1ConversationsAppendStreamRequest +# AppendConversationStreamRequest ## Fields diff --git a/docs/models/archiveftmodelout.md b/docs/models/archiveftmodelout.md index 46a9e755..98fa7b19 100644 --- a/docs/models/archiveftmodelout.md +++ b/docs/models/archiveftmodelout.md @@ -3,8 +3,8 @@ ## Fields -| Field | Type | Required | Description | -| -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -| `id` | *str* | :heavy_check_mark: | N/A | -| `object` | [Optional[models.ArchiveFTModelOutObject]](../models/archiveftmodeloutobject.md) | :heavy_minus_sign: | N/A | -| `archived` | *Optional[bool]* | :heavy_minus_sign: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| ---------------------------- | ---------------------------- | ---------------------------- | ---------------------------- | +| `id` | *str* | :heavy_check_mark: | N/A | +| `object` | *Optional[Literal["model"]]* | :heavy_minus_sign: | N/A | +| `archived` | *Optional[bool]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/archiveftmodeloutobject.md b/docs/models/archiveftmodeloutobject.md deleted file mode 100644 index f6f46889..00000000 --- a/docs/models/archiveftmodeloutobject.md +++ /dev/null @@ -1,8 +0,0 @@ -# ArchiveFTModelOutObject - - -## Values - -| Name | Value | -| ------- | ------- | -| `MODEL` | model | \ No newline at end of file diff --git a/docs/models/jobsapiroutesfinetuningarchivefinetunedmodelrequest.md b/docs/models/archivemodelrequest.md similarity index 93% rename from docs/models/jobsapiroutesfinetuningarchivefinetunedmodelrequest.md rename to docs/models/archivemodelrequest.md index f9700df5..806d135e 100644 --- a/docs/models/jobsapiroutesfinetuningarchivefinetunedmodelrequest.md +++ b/docs/models/archivemodelrequest.md @@ -1,4 +1,4 @@ -# JobsAPIRoutesFineTuningArchiveFineTunedModelRequest +# ArchiveModelRequest ## Fields diff --git a/docs/models/batchjobout.md b/docs/models/batchjobout.md index cb49649b..5f101173 100644 --- a/docs/models/batchjobout.md +++ b/docs/models/batchjobout.md @@ -3,24 +3,24 @@ ## Fields -| Field | Type | Required | Description | -| -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -| `id` | *str* | :heavy_check_mark: | N/A | -| `object` | [Optional[models.BatchJobOutObject]](../models/batchjoboutobject.md) | :heavy_minus_sign: | N/A | -| `input_files` | List[*str*] | :heavy_check_mark: | N/A | -| `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | -| `endpoint` | *str* | :heavy_check_mark: | N/A | -| `model` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `agent_id` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `output_file` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `error_file` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `errors` | List[[models.BatchError](../models/batcherror.md)] | :heavy_check_mark: | N/A | -| `outputs` | List[Dict[str, *Any*]] | :heavy_minus_sign: | N/A | -| `status` | [models.BatchJobStatus](../models/batchjobstatus.md) | :heavy_check_mark: | N/A | -| `created_at` | *int* | :heavy_check_mark: | N/A | -| `total_requests` | *int* | :heavy_check_mark: | N/A | -| `completed_requests` | *int* | :heavy_check_mark: | N/A | -| `succeeded_requests` | *int* | :heavy_check_mark: | N/A | -| `failed_requests` | *int* | :heavy_check_mark: | N/A | -| `started_at` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | -| `completed_at` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| ---------------------------------------------------- | ---------------------------------------------------- | ---------------------------------------------------- | ---------------------------------------------------- | +| `id` | *str* | :heavy_check_mark: | N/A | +| `object` | *Optional[Literal["batch"]]* | :heavy_minus_sign: | N/A | +| `input_files` | List[*str*] | :heavy_check_mark: | N/A | +| `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | +| `endpoint` | *str* | :heavy_check_mark: | N/A | +| `model` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `agent_id` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `output_file` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `error_file` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `errors` | List[[models.BatchError](../models/batcherror.md)] | :heavy_check_mark: | N/A | +| `outputs` | List[Dict[str, *Any*]] | :heavy_minus_sign: | N/A | +| `status` | [models.BatchJobStatus](../models/batchjobstatus.md) | :heavy_check_mark: | N/A | +| `created_at` | *int* | :heavy_check_mark: | N/A | +| `total_requests` | *int* | :heavy_check_mark: | N/A | +| `completed_requests` | *int* | :heavy_check_mark: | N/A | +| `succeeded_requests` | *int* | :heavy_check_mark: | N/A | +| `failed_requests` | *int* | :heavy_check_mark: | N/A | +| `started_at` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | +| `completed_at` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/batchjoboutobject.md b/docs/models/batchjoboutobject.md deleted file mode 100644 index 64ae8965..00000000 --- a/docs/models/batchjoboutobject.md +++ /dev/null @@ -1,8 +0,0 @@ -# BatchJobOutObject - - -## Values - -| Name | Value | -| ------- | ------- | -| `BATCH` | batch | \ No newline at end of file diff --git a/docs/models/batchjobsout.md b/docs/models/batchjobsout.md index a76cfdcc..7a9d6f68 100644 --- a/docs/models/batchjobsout.md +++ b/docs/models/batchjobsout.md @@ -3,8 +3,8 @@ ## Fields -| Field | Type | Required | Description | -| ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | -| `data` | List[[models.BatchJobOut](../models/batchjobout.md)] | :heavy_minus_sign: | N/A | -| `object` | [Optional[models.BatchJobsOutObject]](../models/batchjobsoutobject.md) | :heavy_minus_sign: | N/A | -| `total` | *int* | :heavy_check_mark: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| ---------------------------------------------------- | ---------------------------------------------------- | ---------------------------------------------------- | ---------------------------------------------------- | +| `data` | List[[models.BatchJobOut](../models/batchjobout.md)] | :heavy_minus_sign: | N/A | +| `object` | *Optional[Literal["list"]]* | :heavy_minus_sign: | N/A | +| `total` | *int* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/batchjobsoutobject.md b/docs/models/batchjobsoutobject.md deleted file mode 100644 index d4bf9f65..00000000 --- a/docs/models/batchjobsoutobject.md +++ /dev/null @@ -1,8 +0,0 @@ -# BatchJobsOutObject - - -## Values - -| Name | Value | -| ------ | ------ | -| `LIST` | list | \ No newline at end of file diff --git a/docs/models/jobsapiroutesbatchcancelbatchjobrequest.md b/docs/models/cancelbatchjobrequest.md similarity index 86% rename from docs/models/jobsapiroutesbatchcancelbatchjobrequest.md rename to docs/models/cancelbatchjobrequest.md index c19d0241..f31f843b 100644 --- a/docs/models/jobsapiroutesbatchcancelbatchjobrequest.md +++ b/docs/models/cancelbatchjobrequest.md @@ -1,4 +1,4 @@ -# JobsAPIRoutesBatchCancelBatchJobRequest +# CancelBatchJobRequest ## Fields diff --git a/docs/models/jobsapiroutesfinetuningcancelfinetuningjobrequest.md b/docs/models/cancelfinetuningjobrequest.md similarity index 88% rename from docs/models/jobsapiroutesfinetuningcancelfinetuningjobrequest.md rename to docs/models/cancelfinetuningjobrequest.md index 883cbac6..6525788c 100644 --- a/docs/models/jobsapiroutesfinetuningcancelfinetuningjobrequest.md +++ b/docs/models/cancelfinetuningjobrequest.md @@ -1,4 +1,4 @@ -# JobsAPIRoutesFineTuningCancelFineTuningJobRequest +# CancelFineTuningJobRequest ## Fields diff --git a/docs/models/jobsapiroutesfinetuninggetfinetuningjobresponse.md b/docs/models/cancelfinetuningjobresponse.md similarity index 83% rename from docs/models/jobsapiroutesfinetuninggetfinetuningjobresponse.md rename to docs/models/cancelfinetuningjobresponse.md index e0d2e361..c512342e 100644 --- a/docs/models/jobsapiroutesfinetuninggetfinetuningjobresponse.md +++ b/docs/models/cancelfinetuningjobresponse.md @@ -1,4 +1,4 @@ -# JobsAPIRoutesFineTuningGetFineTuningJobResponse +# CancelFineTuningJobResponse OK diff --git a/docs/models/classifierdetailedjobout.md b/docs/models/classifierdetailedjobout.md index 15f70aeb..fb532449 100644 --- a/docs/models/classifierdetailedjobout.md +++ b/docs/models/classifierdetailedjobout.md @@ -7,13 +7,13 @@ | ---------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------- | | `id` | *str* | :heavy_check_mark: | N/A | | `auto_start` | *bool* | :heavy_check_mark: | N/A | -| `model` | *str* | :heavy_check_mark: | The name of the model to fine-tune. | +| `model` | *str* | :heavy_check_mark: | N/A | | `status` | [models.ClassifierDetailedJobOutStatus](../models/classifierdetailedjoboutstatus.md) | :heavy_check_mark: | N/A | | `created_at` | *int* | :heavy_check_mark: | N/A | | `modified_at` | *int* | :heavy_check_mark: | N/A | | `training_files` | List[*str*] | :heavy_check_mark: | N/A | | `validation_files` | List[*str*] | :heavy_minus_sign: | N/A | -| `object` | [Optional[models.ClassifierDetailedJobOutObject]](../models/classifierdetailedjoboutobject.md) | :heavy_minus_sign: | N/A | +| `object` | *Optional[Literal["job"]]* | :heavy_minus_sign: | N/A | | `fine_tuned_model` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | `suffix` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | `integrations` | List[[models.ClassifierDetailedJobOutIntegration](../models/classifierdetailedjoboutintegration.md)] | :heavy_minus_sign: | N/A | diff --git a/docs/models/classifierdetailedjoboutobject.md b/docs/models/classifierdetailedjoboutobject.md deleted file mode 100644 index 08cbcffc..00000000 --- a/docs/models/classifierdetailedjoboutobject.md +++ /dev/null @@ -1,8 +0,0 @@ -# ClassifierDetailedJobOutObject - - -## Values - -| Name | Value | -| ----- | ----- | -| `JOB` | job | \ No newline at end of file diff --git a/docs/models/classifierftmodelout.md b/docs/models/classifierftmodelout.md index d7bcd3ca..6e7afbbe 100644 --- a/docs/models/classifierftmodelout.md +++ b/docs/models/classifierftmodelout.md @@ -3,21 +3,21 @@ ## Fields -| Field | Type | Required | Description | -| -------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------- | -| `id` | *str* | :heavy_check_mark: | N/A | -| `object` | [Optional[models.ClassifierFTModelOutObject]](../models/classifierftmodeloutobject.md) | :heavy_minus_sign: | N/A | -| `created` | *int* | :heavy_check_mark: | N/A | -| `owned_by` | *str* | :heavy_check_mark: | N/A | -| `workspace_id` | *str* | :heavy_check_mark: | N/A | -| `root` | *str* | :heavy_check_mark: | N/A | -| `root_version` | *str* | :heavy_check_mark: | N/A | -| `archived` | *bool* | :heavy_check_mark: | N/A | -| `name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `capabilities` | [models.FTModelCapabilitiesOut](../models/ftmodelcapabilitiesout.md) | :heavy_check_mark: | N/A | -| `max_context_length` | *Optional[int]* | :heavy_minus_sign: | N/A | -| `aliases` | List[*str*] | :heavy_minus_sign: | N/A | -| `job` | *str* | :heavy_check_mark: | N/A | -| `classifier_targets` | List[[models.ClassifierTargetOut](../models/classifiertargetout.md)] | :heavy_check_mark: | N/A | -| `model_type` | *Literal["classifier"]* | :heavy_check_mark: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | +| `id` | *str* | :heavy_check_mark: | N/A | +| `object` | *Optional[Literal["model"]]* | :heavy_minus_sign: | N/A | +| `created` | *int* | :heavy_check_mark: | N/A | +| `owned_by` | *str* | :heavy_check_mark: | N/A | +| `workspace_id` | *str* | :heavy_check_mark: | N/A | +| `root` | *str* | :heavy_check_mark: | N/A | +| `root_version` | *str* | :heavy_check_mark: | N/A | +| `archived` | *bool* | :heavy_check_mark: | N/A | +| `name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `capabilities` | [models.FTModelCapabilitiesOut](../models/ftmodelcapabilitiesout.md) | :heavy_check_mark: | N/A | +| `max_context_length` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `aliases` | List[*str*] | :heavy_minus_sign: | N/A | +| `job` | *str* | :heavy_check_mark: | N/A | +| `classifier_targets` | List[[models.ClassifierTargetOut](../models/classifiertargetout.md)] | :heavy_check_mark: | N/A | +| `model_type` | *Literal["classifier"]* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/classifierftmodeloutobject.md b/docs/models/classifierftmodeloutobject.md deleted file mode 100644 index 9fe05bcf..00000000 --- a/docs/models/classifierftmodeloutobject.md +++ /dev/null @@ -1,8 +0,0 @@ -# ClassifierFTModelOutObject - - -## Values - -| Name | Value | -| ------- | ------- | -| `MODEL` | model | \ No newline at end of file diff --git a/docs/models/classifierjobout.md b/docs/models/classifierjobout.md index f8259cab..ceecef5d 100644 --- a/docs/models/classifierjobout.md +++ b/docs/models/classifierjobout.md @@ -7,13 +7,13 @@ | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | | `id` | *str* | :heavy_check_mark: | The ID of the job. | | `auto_start` | *bool* | :heavy_check_mark: | N/A | -| `model` | *str* | :heavy_check_mark: | The name of the model to fine-tune. | +| `model` | *str* | :heavy_check_mark: | N/A | | `status` | [models.ClassifierJobOutStatus](../models/classifierjoboutstatus.md) | :heavy_check_mark: | The current status of the fine-tuning job. | | `created_at` | *int* | :heavy_check_mark: | The UNIX timestamp (in seconds) for when the fine-tuning job was created. | | `modified_at` | *int* | :heavy_check_mark: | The UNIX timestamp (in seconds) for when the fine-tuning job was last modified. | | `training_files` | List[*str*] | :heavy_check_mark: | A list containing the IDs of uploaded files that contain training data. | | `validation_files` | List[*str*] | :heavy_minus_sign: | A list containing the IDs of uploaded files that contain validation data. | -| `object` | [Optional[models.ClassifierJobOutObject]](../models/classifierjoboutobject.md) | :heavy_minus_sign: | The object type of the fine-tuning job. | +| `object` | *Optional[Literal["job"]]* | :heavy_minus_sign: | The object type of the fine-tuning job. | | `fine_tuned_model` | *OptionalNullable[str]* | :heavy_minus_sign: | The name of the fine-tuned model that is being created. The value will be `null` if the fine-tuning job is still running. | | `suffix` | *OptionalNullable[str]* | :heavy_minus_sign: | Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`. | | `integrations` | List[[models.ClassifierJobOutIntegration](../models/classifierjoboutintegration.md)] | :heavy_minus_sign: | A list of integrations enabled for your fine-tuning job. | diff --git a/docs/models/classifierjoboutobject.md b/docs/models/classifierjoboutobject.md deleted file mode 100644 index 1b42d547..00000000 --- a/docs/models/classifierjoboutobject.md +++ /dev/null @@ -1,10 +0,0 @@ -# ClassifierJobOutObject - -The object type of the fine-tuning job. - - -## Values - -| Name | Value | -| ----- | ----- | -| `JOB` | job | \ No newline at end of file diff --git a/docs/models/completiondetailedjobout.md b/docs/models/completiondetailedjobout.md index 725ebcde..bc7e5d1c 100644 --- a/docs/models/completiondetailedjobout.md +++ b/docs/models/completiondetailedjobout.md @@ -7,13 +7,13 @@ | ---------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------- | | `id` | *str* | :heavy_check_mark: | N/A | | `auto_start` | *bool* | :heavy_check_mark: | N/A | -| `model` | *str* | :heavy_check_mark: | The name of the model to fine-tune. | +| `model` | *str* | :heavy_check_mark: | N/A | | `status` | [models.CompletionDetailedJobOutStatus](../models/completiondetailedjoboutstatus.md) | :heavy_check_mark: | N/A | | `created_at` | *int* | :heavy_check_mark: | N/A | | `modified_at` | *int* | :heavy_check_mark: | N/A | | `training_files` | List[*str*] | :heavy_check_mark: | N/A | | `validation_files` | List[*str*] | :heavy_minus_sign: | N/A | -| `object` | [Optional[models.CompletionDetailedJobOutObject]](../models/completiondetailedjoboutobject.md) | :heavy_minus_sign: | N/A | +| `object` | *Optional[Literal["job"]]* | :heavy_minus_sign: | N/A | | `fine_tuned_model` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | `suffix` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | `integrations` | List[[models.CompletionDetailedJobOutIntegration](../models/completiondetailedjoboutintegration.md)] | :heavy_minus_sign: | N/A | diff --git a/docs/models/completiondetailedjoboutobject.md b/docs/models/completiondetailedjoboutobject.md deleted file mode 100644 index 1bec88e5..00000000 --- a/docs/models/completiondetailedjoboutobject.md +++ /dev/null @@ -1,8 +0,0 @@ -# CompletionDetailedJobOutObject - - -## Values - -| Name | Value | -| ----- | ----- | -| `JOB` | job | \ No newline at end of file diff --git a/docs/models/completionftmodelout.md b/docs/models/completionftmodelout.md index 9ebfa83e..ccd4844f 100644 --- a/docs/models/completionftmodelout.md +++ b/docs/models/completionftmodelout.md @@ -3,20 +3,20 @@ ## Fields -| Field | Type | Required | Description | -| -------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------- | -| `id` | *str* | :heavy_check_mark: | N/A | -| `object` | [Optional[models.CompletionFTModelOutObject]](../models/completionftmodeloutobject.md) | :heavy_minus_sign: | N/A | -| `created` | *int* | :heavy_check_mark: | N/A | -| `owned_by` | *str* | :heavy_check_mark: | N/A | -| `workspace_id` | *str* | :heavy_check_mark: | N/A | -| `root` | *str* | :heavy_check_mark: | N/A | -| `root_version` | *str* | :heavy_check_mark: | N/A | -| `archived` | *bool* | :heavy_check_mark: | N/A | -| `name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `capabilities` | [models.FTModelCapabilitiesOut](../models/ftmodelcapabilitiesout.md) | :heavy_check_mark: | N/A | -| `max_context_length` | *Optional[int]* | :heavy_minus_sign: | N/A | -| `aliases` | List[*str*] | :heavy_minus_sign: | N/A | -| `job` | *str* | :heavy_check_mark: | N/A | -| `model_type` | *Literal["completion"]* | :heavy_check_mark: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | +| `id` | *str* | :heavy_check_mark: | N/A | +| `object` | *Optional[Literal["model"]]* | :heavy_minus_sign: | N/A | +| `created` | *int* | :heavy_check_mark: | N/A | +| `owned_by` | *str* | :heavy_check_mark: | N/A | +| `workspace_id` | *str* | :heavy_check_mark: | N/A | +| `root` | *str* | :heavy_check_mark: | N/A | +| `root_version` | *str* | :heavy_check_mark: | N/A | +| `archived` | *bool* | :heavy_check_mark: | N/A | +| `name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `capabilities` | [models.FTModelCapabilitiesOut](../models/ftmodelcapabilitiesout.md) | :heavy_check_mark: | N/A | +| `max_context_length` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `aliases` | List[*str*] | :heavy_minus_sign: | N/A | +| `job` | *str* | :heavy_check_mark: | N/A | +| `model_type` | *Literal["completion"]* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/completionftmodeloutobject.md b/docs/models/completionftmodeloutobject.md deleted file mode 100644 index 6f9d858c..00000000 --- a/docs/models/completionftmodeloutobject.md +++ /dev/null @@ -1,8 +0,0 @@ -# CompletionFTModelOutObject - - -## Values - -| Name | Value | -| ------- | ------- | -| `MODEL` | model | \ No newline at end of file diff --git a/docs/models/completionjobout.md b/docs/models/completionjobout.md index 84be452f..5eb44eef 100644 --- a/docs/models/completionjobout.md +++ b/docs/models/completionjobout.md @@ -7,13 +7,13 @@ | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | | `id` | *str* | :heavy_check_mark: | The ID of the job. | | `auto_start` | *bool* | :heavy_check_mark: | N/A | -| `model` | *str* | :heavy_check_mark: | The name of the model to fine-tune. | +| `model` | *str* | :heavy_check_mark: | N/A | | `status` | [models.CompletionJobOutStatus](../models/completionjoboutstatus.md) | :heavy_check_mark: | The current status of the fine-tuning job. | | `created_at` | *int* | :heavy_check_mark: | The UNIX timestamp (in seconds) for when the fine-tuning job was created. | | `modified_at` | *int* | :heavy_check_mark: | The UNIX timestamp (in seconds) for when the fine-tuning job was last modified. | | `training_files` | List[*str*] | :heavy_check_mark: | A list containing the IDs of uploaded files that contain training data. | | `validation_files` | List[*str*] | :heavy_minus_sign: | A list containing the IDs of uploaded files that contain validation data. | -| `object` | [Optional[models.CompletionJobOutObject]](../models/completionjoboutobject.md) | :heavy_minus_sign: | The object type of the fine-tuning job. | +| `object` | *Optional[Literal["job"]]* | :heavy_minus_sign: | The object type of the fine-tuning job. | | `fine_tuned_model` | *OptionalNullable[str]* | :heavy_minus_sign: | The name of the fine-tuned model that is being created. The value will be `null` if the fine-tuning job is still running. | | `suffix` | *OptionalNullable[str]* | :heavy_minus_sign: | Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`. | | `integrations` | List[[models.CompletionJobOutIntegration](../models/completionjoboutintegration.md)] | :heavy_minus_sign: | A list of integrations enabled for your fine-tuning job. | diff --git a/docs/models/completionjoboutobject.md b/docs/models/completionjoboutobject.md deleted file mode 100644 index 712b107d..00000000 --- a/docs/models/completionjoboutobject.md +++ /dev/null @@ -1,10 +0,0 @@ -# CompletionJobOutObject - -The object type of the fine-tuning job. - - -## Values - -| Name | Value | -| ----- | ----- | -| `JOB` | job | \ No newline at end of file diff --git a/docs/models/jobsapiroutesfinetuningcreatefinetuningjobresponse.md b/docs/models/createfinetuningjobresponse.md similarity index 80% rename from docs/models/jobsapiroutesfinetuningcreatefinetuningjobresponse.md rename to docs/models/createfinetuningjobresponse.md index 13191e90..f82cd793 100644 --- a/docs/models/jobsapiroutesfinetuningcreatefinetuningjobresponse.md +++ b/docs/models/createfinetuningjobresponse.md @@ -1,4 +1,4 @@ -# JobsAPIRoutesFineTuningCreateFineTuningJobResponse +# CreateFineTuningJobResponse OK diff --git a/docs/models/agentsapiv1agentscreateorupdatealiasrequest.md b/docs/models/createorupdateagentaliasrequest.md similarity index 90% rename from docs/models/agentsapiv1agentscreateorupdatealiasrequest.md rename to docs/models/createorupdateagentaliasrequest.md index 79406434..af2591eb 100644 --- a/docs/models/agentsapiv1agentscreateorupdatealiasrequest.md +++ b/docs/models/createorupdateagentaliasrequest.md @@ -1,4 +1,4 @@ -# AgentsAPIV1AgentsCreateOrUpdateAliasRequest +# CreateOrUpdateAgentAliasRequest ## Fields diff --git a/docs/models/deleteagentaliasrequest.md b/docs/models/deleteagentaliasrequest.md new file mode 100644 index 00000000..17812ec4 --- /dev/null +++ b/docs/models/deleteagentaliasrequest.md @@ -0,0 +1,9 @@ +# DeleteAgentAliasRequest + + +## Fields + +| Field | Type | Required | Description | +| ------------------ | ------------------ | ------------------ | ------------------ | +| `agent_id` | *str* | :heavy_check_mark: | N/A | +| `alias` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/agentsapiv1agentsdeleterequest.md b/docs/models/deleteagentrequest.md similarity index 89% rename from docs/models/agentsapiv1agentsdeleterequest.md rename to docs/models/deleteagentrequest.md index 2799f418..0aaacae4 100644 --- a/docs/models/agentsapiv1agentsdeleterequest.md +++ b/docs/models/deleteagentrequest.md @@ -1,4 +1,4 @@ -# AgentsAPIV1AgentsDeleteRequest +# DeleteAgentRequest ## Fields diff --git a/docs/models/agentsapiv1conversationsgetrequest.md b/docs/models/deleteconversationrequest.md similarity index 95% rename from docs/models/agentsapiv1conversationsgetrequest.md rename to docs/models/deleteconversationrequest.md index 67d450c8..39d9e5df 100644 --- a/docs/models/agentsapiv1conversationsgetrequest.md +++ b/docs/models/deleteconversationrequest.md @@ -1,4 +1,4 @@ -# AgentsAPIV1ConversationsGetRequest +# DeleteConversationRequest ## Fields diff --git a/docs/models/librariesdocumentsgetv1request.md b/docs/models/deletedocumentrequest.md similarity index 91% rename from docs/models/librariesdocumentsgetv1request.md rename to docs/models/deletedocumentrequest.md index 6febc058..eb060099 100644 --- a/docs/models/librariesdocumentsgetv1request.md +++ b/docs/models/deletedocumentrequest.md @@ -1,4 +1,4 @@ -# LibrariesDocumentsGetV1Request +# DeleteDocumentRequest ## Fields diff --git a/docs/models/filesapiroutesdeletefilerequest.md b/docs/models/deletefilerequest.md similarity index 88% rename from docs/models/filesapiroutesdeletefilerequest.md rename to docs/models/deletefilerequest.md index 1b02c2db..bceae901 100644 --- a/docs/models/filesapiroutesdeletefilerequest.md +++ b/docs/models/deletefilerequest.md @@ -1,4 +1,4 @@ -# FilesAPIRoutesDeleteFileRequest +# DeleteFileRequest ## Fields diff --git a/docs/models/librariessharedeletev1request.md b/docs/models/deletelibraryaccessrequest.md similarity index 96% rename from docs/models/librariessharedeletev1request.md rename to docs/models/deletelibraryaccessrequest.md index 850e22ab..c7034b98 100644 --- a/docs/models/librariessharedeletev1request.md +++ b/docs/models/deletelibraryaccessrequest.md @@ -1,4 +1,4 @@ -# LibrariesShareDeleteV1Request +# DeleteLibraryAccessRequest ## Fields diff --git a/docs/models/librariesgetv1request.md b/docs/models/deletelibraryrequest.md similarity index 91% rename from docs/models/librariesgetv1request.md rename to docs/models/deletelibraryrequest.md index 6e1e04c3..c229ad73 100644 --- a/docs/models/librariesgetv1request.md +++ b/docs/models/deletelibraryrequest.md @@ -1,4 +1,4 @@ -# LibrariesGetV1Request +# DeleteLibraryRequest ## Fields diff --git a/docs/models/deletemodelv1modelsmodeliddeleterequest.md b/docs/models/deletemodelrequest.md similarity index 94% rename from docs/models/deletemodelv1modelsmodeliddeleterequest.md rename to docs/models/deletemodelrequest.md index d9bc15fe..d80103f1 100644 --- a/docs/models/deletemodelv1modelsmodeliddeleterequest.md +++ b/docs/models/deletemodelrequest.md @@ -1,4 +1,4 @@ -# DeleteModelV1ModelsModelIDDeleteRequest +# DeleteModelRequest ## Fields diff --git a/docs/models/filesapiroutesdownloadfilerequest.md b/docs/models/downloadfilerequest.md similarity index 88% rename from docs/models/filesapiroutesdownloadfilerequest.md rename to docs/models/downloadfilerequest.md index 8b28cb0e..3f4dc6cc 100644 --- a/docs/models/filesapiroutesdownloadfilerequest.md +++ b/docs/models/downloadfilerequest.md @@ -1,4 +1,4 @@ -# FilesAPIRoutesDownloadFileRequest +# DownloadFileRequest ## Fields diff --git a/docs/models/agentsapiv1agentsgetagentversion.md b/docs/models/getagentagentversion.md similarity index 79% rename from docs/models/agentsapiv1agentsgetagentversion.md rename to docs/models/getagentagentversion.md index 7fb9f2d5..6d7b3f1d 100644 --- a/docs/models/agentsapiv1agentsgetagentversion.md +++ b/docs/models/getagentagentversion.md @@ -1,4 +1,4 @@ -# AgentsAPIV1AgentsGetAgentVersion +# GetAgentAgentVersion ## Supported Types diff --git a/docs/models/getagentrequest.md b/docs/models/getagentrequest.md new file mode 100644 index 00000000..3f729dff --- /dev/null +++ b/docs/models/getagentrequest.md @@ -0,0 +1,9 @@ +# GetAgentRequest + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------- | +| `agent_id` | *str* | :heavy_check_mark: | N/A | +| `agent_version` | [OptionalNullable[models.GetAgentAgentVersion]](../models/getagentagentversion.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/agentsapiv1agentsgetversionrequest.md b/docs/models/getagentversionrequest.md similarity index 90% rename from docs/models/agentsapiv1agentsgetversionrequest.md rename to docs/models/getagentversionrequest.md index 96a73589..c98fee9d 100644 --- a/docs/models/agentsapiv1agentsgetversionrequest.md +++ b/docs/models/getagentversionrequest.md @@ -1,4 +1,4 @@ -# AgentsAPIV1AgentsGetVersionRequest +# GetAgentVersionRequest ## Fields diff --git a/docs/models/jobsapiroutesbatchgetbatchjobrequest.md b/docs/models/getbatchjobrequest.md similarity index 92% rename from docs/models/jobsapiroutesbatchgetbatchjobrequest.md rename to docs/models/getbatchjobrequest.md index 8c259bea..f3c67eb4 100644 --- a/docs/models/jobsapiroutesbatchgetbatchjobrequest.md +++ b/docs/models/getbatchjobrequest.md @@ -1,4 +1,4 @@ -# JobsAPIRoutesBatchGetBatchJobRequest +# GetBatchJobRequest ## Fields diff --git a/docs/models/agentsapiv1conversationshistoryrequest.md b/docs/models/getconversationhistoryrequest.md similarity index 94% rename from docs/models/agentsapiv1conversationshistoryrequest.md rename to docs/models/getconversationhistoryrequest.md index 7e5d39e9..fc90282b 100644 --- a/docs/models/agentsapiv1conversationshistoryrequest.md +++ b/docs/models/getconversationhistoryrequest.md @@ -1,4 +1,4 @@ -# AgentsAPIV1ConversationsHistoryRequest +# GetConversationHistoryRequest ## Fields diff --git a/docs/models/agentsapiv1conversationsmessagesrequest.md b/docs/models/getconversationmessagesrequest.md similarity index 94% rename from docs/models/agentsapiv1conversationsmessagesrequest.md rename to docs/models/getconversationmessagesrequest.md index a91ab046..fd037fea 100644 --- a/docs/models/agentsapiv1conversationsmessagesrequest.md +++ b/docs/models/getconversationmessagesrequest.md @@ -1,4 +1,4 @@ -# AgentsAPIV1ConversationsMessagesRequest +# GetConversationMessagesRequest ## Fields diff --git a/docs/models/agentsapiv1conversationsdeleterequest.md b/docs/models/getconversationrequest.md similarity index 95% rename from docs/models/agentsapiv1conversationsdeleterequest.md rename to docs/models/getconversationrequest.md index c6eed281..8a66a8b0 100644 --- a/docs/models/agentsapiv1conversationsdeleterequest.md +++ b/docs/models/getconversationrequest.md @@ -1,4 +1,4 @@ -# AgentsAPIV1ConversationsDeleteRequest +# GetConversationRequest ## Fields diff --git a/docs/models/librariesdocumentsgetsignedurlv1request.md b/docs/models/getdocumentextractedtextsignedurlrequest.md similarity index 89% rename from docs/models/librariesdocumentsgetsignedurlv1request.md rename to docs/models/getdocumentextractedtextsignedurlrequest.md index 7c08c180..ff703802 100644 --- a/docs/models/librariesdocumentsgetsignedurlv1request.md +++ b/docs/models/getdocumentextractedtextsignedurlrequest.md @@ -1,4 +1,4 @@ -# LibrariesDocumentsGetSignedURLV1Request +# GetDocumentExtractedTextSignedURLRequest ## Fields diff --git a/docs/models/librariesdocumentsdeletev1request.md b/docs/models/getdocumentrequest.md similarity index 90% rename from docs/models/librariesdocumentsdeletev1request.md rename to docs/models/getdocumentrequest.md index efccdb1b..29f62127 100644 --- a/docs/models/librariesdocumentsdeletev1request.md +++ b/docs/models/getdocumentrequest.md @@ -1,4 +1,4 @@ -# LibrariesDocumentsDeleteV1Request +# GetDocumentRequest ## Fields diff --git a/docs/models/librariesdocumentsreprocessv1request.md b/docs/models/getdocumentsignedurlrequest.md similarity index 90% rename from docs/models/librariesdocumentsreprocessv1request.md rename to docs/models/getdocumentsignedurlrequest.md index 196ba17b..72a179c0 100644 --- a/docs/models/librariesdocumentsreprocessv1request.md +++ b/docs/models/getdocumentsignedurlrequest.md @@ -1,4 +1,4 @@ -# LibrariesDocumentsReprocessV1Request +# GetDocumentSignedURLRequest ## Fields diff --git a/docs/models/librariesdocumentsgetstatusv1request.md b/docs/models/getdocumentstatusrequest.md similarity index 90% rename from docs/models/librariesdocumentsgetstatusv1request.md rename to docs/models/getdocumentstatusrequest.md index e6d41875..3557d773 100644 --- a/docs/models/librariesdocumentsgetstatusv1request.md +++ b/docs/models/getdocumentstatusrequest.md @@ -1,4 +1,4 @@ -# LibrariesDocumentsGetStatusV1Request +# GetDocumentStatusRequest ## Fields diff --git a/docs/models/getdocumenttextcontentrequest.md b/docs/models/getdocumenttextcontentrequest.md new file mode 100644 index 00000000..85933401 --- /dev/null +++ b/docs/models/getdocumenttextcontentrequest.md @@ -0,0 +1,9 @@ +# GetDocumentTextContentRequest + + +## Fields + +| Field | Type | Required | Description | +| ------------------ | ------------------ | ------------------ | ------------------ | +| `library_id` | *str* | :heavy_check_mark: | N/A | +| `document_id` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/filesapiroutesgetsignedurlrequest.md b/docs/models/getfilesignedurlrequest.md similarity index 96% rename from docs/models/filesapiroutesgetsignedurlrequest.md rename to docs/models/getfilesignedurlrequest.md index dbe3c801..0be3b288 100644 --- a/docs/models/filesapiroutesgetsignedurlrequest.md +++ b/docs/models/getfilesignedurlrequest.md @@ -1,4 +1,4 @@ -# FilesAPIRoutesGetSignedURLRequest +# GetFileSignedURLRequest ## Fields diff --git a/docs/models/jobsapiroutesfinetuninggetfinetuningjobrequest.md b/docs/models/getfinetuningjobrequest.md similarity index 89% rename from docs/models/jobsapiroutesfinetuninggetfinetuningjobrequest.md rename to docs/models/getfinetuningjobrequest.md index fde19800..f20cb214 100644 --- a/docs/models/jobsapiroutesfinetuninggetfinetuningjobrequest.md +++ b/docs/models/getfinetuningjobrequest.md @@ -1,4 +1,4 @@ -# JobsAPIRoutesFineTuningGetFineTuningJobRequest +# GetFineTuningJobRequest ## Fields diff --git a/docs/models/jobsapiroutesfinetuningstartfinetuningjobresponse.md b/docs/models/getfinetuningjobresponse.md similarity index 82% rename from docs/models/jobsapiroutesfinetuningstartfinetuningjobresponse.md rename to docs/models/getfinetuningjobresponse.md index 64f4cca6..1b0568dd 100644 --- a/docs/models/jobsapiroutesfinetuningstartfinetuningjobresponse.md +++ b/docs/models/getfinetuningjobresponse.md @@ -1,4 +1,4 @@ -# JobsAPIRoutesFineTuningStartFineTuningJobResponse +# GetFineTuningJobResponse OK diff --git a/docs/models/librariesdeletev1request.md b/docs/models/getlibraryrequest.md similarity index 90% rename from docs/models/librariesdeletev1request.md rename to docs/models/getlibraryrequest.md index 68d7e543..2a3acf50 100644 --- a/docs/models/librariesdeletev1request.md +++ b/docs/models/getlibraryrequest.md @@ -1,4 +1,4 @@ -# LibrariesDeleteV1Request +# GetLibraryRequest ## Fields diff --git a/docs/models/jobin.md b/docs/models/jobin.md index 33e6ccc6..62da9072 100644 --- a/docs/models/jobin.md +++ b/docs/models/jobin.md @@ -5,7 +5,7 @@ | Field | Type | Required | Description | | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `model` | *str* | :heavy_check_mark: | The name of the model to fine-tune. | +| `model` | *str* | :heavy_check_mark: | N/A | | `training_files` | List[[models.TrainingFile](../models/trainingfile.md)] | :heavy_minus_sign: | N/A | | `validation_files` | List[*str*] | :heavy_minus_sign: | A list containing the IDs of uploaded files that contain validation data. If you provide these files, the data is used to generate validation metrics periodically during fine-tuning. These metrics can be viewed in `checkpoints` when getting the status of a running fine-tuning job. The same data should not be present in both train and validation files. | | `suffix` | *OptionalNullable[str]* | :heavy_minus_sign: | A string that will be added to your fine-tuning model name. For example, a suffix of "my-great-model" would produce a model name like `ft:open-mistral-7b:my-great-model:xxx...` | diff --git a/docs/models/jobsapiroutesfinetuninggetfinetuningjobsrequest.md b/docs/models/jobsapiroutesfinetuninggetfinetuningjobsrequest.md deleted file mode 100644 index 23c52c34..00000000 --- a/docs/models/jobsapiroutesfinetuninggetfinetuningjobsrequest.md +++ /dev/null @@ -1,17 +0,0 @@ -# JobsAPIRoutesFineTuningGetFineTuningJobsRequest - - -## Fields - -| Field | Type | Required | Description | -| -------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------- | -| `page` | *Optional[int]* | :heavy_minus_sign: | The page number of the results to be returned. | -| `page_size` | *Optional[int]* | :heavy_minus_sign: | The number of items to return per page. | -| `model` | *OptionalNullable[str]* | :heavy_minus_sign: | The model name used for fine-tuning to filter on. When set, the other results are not displayed. | -| `created_after` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | The date/time to filter on. When set, the results for previous creation times are not displayed. | -| `created_before` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | -| `created_by_me` | *Optional[bool]* | :heavy_minus_sign: | When set, only return results for jobs created by the API caller. Other results are not displayed. | -| `status` | [OptionalNullable[models.JobsAPIRoutesFineTuningGetFineTuningJobsStatus]](../models/jobsapiroutesfinetuninggetfinetuningjobsstatus.md) | :heavy_minus_sign: | The current job state to filter on. When set, the other results are not displayed. | -| `wandb_project` | *OptionalNullable[str]* | :heavy_minus_sign: | The Weights and Biases project to filter on. When set, the other results are not displayed. | -| `wandb_name` | *OptionalNullable[str]* | :heavy_minus_sign: | The Weight and Biases run name to filter on. When set, the other results are not displayed. | -| `suffix` | *OptionalNullable[str]* | :heavy_minus_sign: | The model suffix to filter on. When set, the other results are not displayed. | \ No newline at end of file diff --git a/docs/models/jobsout.md b/docs/models/jobsout.md index 977013f7..69f8342a 100644 --- a/docs/models/jobsout.md +++ b/docs/models/jobsout.md @@ -3,8 +3,8 @@ ## Fields -| Field | Type | Required | Description | -| ------------------------------------------------------------ | ------------------------------------------------------------ | ------------------------------------------------------------ | ------------------------------------------------------------ | -| `data` | List[[models.JobsOutData](../models/jobsoutdata.md)] | :heavy_minus_sign: | N/A | -| `object` | [Optional[models.JobsOutObject]](../models/jobsoutobject.md) | :heavy_minus_sign: | N/A | -| `total` | *int* | :heavy_check_mark: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| ---------------------------------------------------- | ---------------------------------------------------- | ---------------------------------------------------- | ---------------------------------------------------- | +| `data` | List[[models.JobsOutData](../models/jobsoutdata.md)] | :heavy_minus_sign: | N/A | +| `object` | *Optional[Literal["list"]]* | :heavy_minus_sign: | N/A | +| `total` | *int* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/jobsoutobject.md b/docs/models/jobsoutobject.md deleted file mode 100644 index f6c8a2c3..00000000 --- a/docs/models/jobsoutobject.md +++ /dev/null @@ -1,8 +0,0 @@ -# JobsOutObject - - -## Values - -| Name | Value | -| ------ | ------ | -| `LIST` | list | \ No newline at end of file diff --git a/docs/models/legacyjobmetadataout.md b/docs/models/legacyjobmetadataout.md index 53a45485..8a712140 100644 --- a/docs/models/legacyjobmetadataout.md +++ b/docs/models/legacyjobmetadataout.md @@ -16,4 +16,4 @@ | `details` | *str* | :heavy_check_mark: | N/A | | | `epochs` | *OptionalNullable[float]* | :heavy_minus_sign: | The number of complete passes through the entire training dataset. | 4.2922 | | `training_steps` | *OptionalNullable[int]* | :heavy_minus_sign: | The number of training steps to perform. A training step refers to a single update of the model weights during the fine-tuning process. This update is typically calculated using a batch of samples from the training dataset. | 10 | -| `object` | [Optional[models.LegacyJobMetadataOutObject]](../models/legacyjobmetadataoutobject.md) | :heavy_minus_sign: | N/A | | \ No newline at end of file +| `object` | *Optional[Literal["job.metadata"]]* | :heavy_minus_sign: | N/A | | \ No newline at end of file diff --git a/docs/models/legacyjobmetadataoutobject.md b/docs/models/legacyjobmetadataoutobject.md deleted file mode 100644 index 9873ada8..00000000 --- a/docs/models/legacyjobmetadataoutobject.md +++ /dev/null @@ -1,8 +0,0 @@ -# LegacyJobMetadataOutObject - - -## Values - -| Name | Value | -| -------------- | -------------- | -| `JOB_METADATA` | job.metadata | \ No newline at end of file diff --git a/docs/models/librariesdocumentsgetextractedtextsignedurlv1request.md b/docs/models/librariesdocumentsgetextractedtextsignedurlv1request.md deleted file mode 100644 index 14ca66f7..00000000 --- a/docs/models/librariesdocumentsgetextractedtextsignedurlv1request.md +++ /dev/null @@ -1,9 +0,0 @@ -# LibrariesDocumentsGetExtractedTextSignedURLV1Request - - -## Fields - -| Field | Type | Required | Description | -| ------------------ | ------------------ | ------------------ | ------------------ | -| `library_id` | *str* | :heavy_check_mark: | N/A | -| `document_id` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/librariesdocumentsgettextcontentv1request.md b/docs/models/librariesdocumentsgettextcontentv1request.md deleted file mode 100644 index 2f58a446..00000000 --- a/docs/models/librariesdocumentsgettextcontentv1request.md +++ /dev/null @@ -1,9 +0,0 @@ -# LibrariesDocumentsGetTextContentV1Request - - -## Fields - -| Field | Type | Required | Description | -| ------------------ | ------------------ | ------------------ | ------------------ | -| `library_id` | *str* | :heavy_check_mark: | N/A | -| `document_id` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/agentsapiv1agentslistversionaliasesrequest.md b/docs/models/listagentaliasesrequest.md similarity index 85% rename from docs/models/agentsapiv1agentslistversionaliasesrequest.md rename to docs/models/listagentaliasesrequest.md index 3083bf92..b3570cb8 100644 --- a/docs/models/agentsapiv1agentslistversionaliasesrequest.md +++ b/docs/models/listagentaliasesrequest.md @@ -1,4 +1,4 @@ -# AgentsAPIV1AgentsListVersionAliasesRequest +# ListAgentAliasesRequest ## Fields diff --git a/docs/models/agentsapiv1agentslistrequest.md b/docs/models/listagentsrequest.md similarity index 84% rename from docs/models/agentsapiv1agentslistrequest.md rename to docs/models/listagentsrequest.md index 8cba1325..79aec3ea 100644 --- a/docs/models/agentsapiv1agentslistrequest.md +++ b/docs/models/listagentsrequest.md @@ -1,4 +1,4 @@ -# AgentsAPIV1AgentsListRequest +# ListAgentsRequest ## Fields @@ -9,6 +9,7 @@ | `page_size` | *Optional[int]* | :heavy_minus_sign: | Number of agents per page | | `deployment_chat` | *OptionalNullable[bool]* | :heavy_minus_sign: | N/A | | `sources` | List[[models.RequestSource](../models/requestsource.md)] | :heavy_minus_sign: | N/A | -| `name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `name` | *OptionalNullable[str]* | :heavy_minus_sign: | Filter by agent name | +| `search` | *OptionalNullable[str]* | :heavy_minus_sign: | Search agents by name or ID | | `id` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/agentsapiv1agentslistversionsrequest.md b/docs/models/listagentversionsrequest.md similarity index 94% rename from docs/models/agentsapiv1agentslistversionsrequest.md rename to docs/models/listagentversionsrequest.md index 91831700..ba8ddaa5 100644 --- a/docs/models/agentsapiv1agentslistversionsrequest.md +++ b/docs/models/listagentversionsrequest.md @@ -1,4 +1,4 @@ -# AgentsAPIV1AgentsListVersionsRequest +# ListAgentVersionsRequest ## Fields diff --git a/docs/models/jobsapiroutesbatchgetbatchjobsrequest.md b/docs/models/listbatchjobsrequest.md similarity index 90% rename from docs/models/jobsapiroutesbatchgetbatchjobsrequest.md rename to docs/models/listbatchjobsrequest.md index b062b873..19981b24 100644 --- a/docs/models/jobsapiroutesbatchgetbatchjobsrequest.md +++ b/docs/models/listbatchjobsrequest.md @@ -1,4 +1,4 @@ -# JobsAPIRoutesBatchGetBatchJobsRequest +# ListBatchJobsRequest ## Fields @@ -12,4 +12,5 @@ | `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | | `created_after` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | | `created_by_me` | *Optional[bool]* | :heavy_minus_sign: | N/A | -| `status` | List[[models.BatchJobStatus](../models/batchjobstatus.md)] | :heavy_minus_sign: | N/A | \ No newline at end of file +| `status` | List[[models.BatchJobStatus](../models/batchjobstatus.md)] | :heavy_minus_sign: | N/A | +| `order_by` | [Optional[models.OrderBy]](../models/orderby.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/agentsapiv1conversationslistrequest.md b/docs/models/listconversationsrequest.md similarity index 92% rename from docs/models/agentsapiv1conversationslistrequest.md rename to docs/models/listconversationsrequest.md index 62c9011f..d99b4208 100644 --- a/docs/models/agentsapiv1conversationslistrequest.md +++ b/docs/models/listconversationsrequest.md @@ -1,4 +1,4 @@ -# AgentsAPIV1ConversationsListRequest +# ListConversationsRequest ## Fields diff --git a/docs/models/agentsapiv1conversationslistresponse.md b/docs/models/listconversationsresponse.md similarity index 84% rename from docs/models/agentsapiv1conversationslistresponse.md rename to docs/models/listconversationsresponse.md index b233ee20..9d611c55 100644 --- a/docs/models/agentsapiv1conversationslistresponse.md +++ b/docs/models/listconversationsresponse.md @@ -1,4 +1,4 @@ -# AgentsAPIV1ConversationsListResponse +# ListConversationsResponse ## Supported Types diff --git a/docs/models/librariesdocumentslistv1request.md b/docs/models/listdocumentsrequest.md similarity index 96% rename from docs/models/librariesdocumentslistv1request.md rename to docs/models/listdocumentsrequest.md index 44f63001..369e8edb 100644 --- a/docs/models/librariesdocumentslistv1request.md +++ b/docs/models/listdocumentsrequest.md @@ -1,4 +1,4 @@ -# LibrariesDocumentsListV1Request +# ListDocumentsRequest ## Fields diff --git a/docs/models/filesapirouteslistfilesrequest.md b/docs/models/listfilesrequest.md similarity index 98% rename from docs/models/filesapirouteslistfilesrequest.md rename to docs/models/listfilesrequest.md index 57d11722..2d76a76b 100644 --- a/docs/models/filesapirouteslistfilesrequest.md +++ b/docs/models/listfilesrequest.md @@ -1,4 +1,4 @@ -# FilesAPIRoutesListFilesRequest +# ListFilesRequest ## Fields diff --git a/docs/models/listfinetuningjobsrequest.md b/docs/models/listfinetuningjobsrequest.md new file mode 100644 index 00000000..3a04fc70 --- /dev/null +++ b/docs/models/listfinetuningjobsrequest.md @@ -0,0 +1,17 @@ +# ListFineTuningJobsRequest + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------- | +| `page` | *Optional[int]* | :heavy_minus_sign: | The page number of the results to be returned. | +| `page_size` | *Optional[int]* | :heavy_minus_sign: | The number of items to return per page. | +| `model` | *OptionalNullable[str]* | :heavy_minus_sign: | The model name used for fine-tuning to filter on. When set, the other results are not displayed. | +| `created_after` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | The date/time to filter on. When set, the results for previous creation times are not displayed. | +| `created_before` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | +| `created_by_me` | *Optional[bool]* | :heavy_minus_sign: | When set, only return results for jobs created by the API caller. Other results are not displayed. | +| `status` | [OptionalNullable[models.ListFineTuningJobsStatus]](../models/listfinetuningjobsstatus.md) | :heavy_minus_sign: | The current job state to filter on. When set, the other results are not displayed. | +| `wandb_project` | *OptionalNullable[str]* | :heavy_minus_sign: | The Weights and Biases project to filter on. When set, the other results are not displayed. | +| `wandb_name` | *OptionalNullable[str]* | :heavy_minus_sign: | The Weight and Biases run name to filter on. When set, the other results are not displayed. | +| `suffix` | *OptionalNullable[str]* | :heavy_minus_sign: | The model suffix to filter on. When set, the other results are not displayed. | \ No newline at end of file diff --git a/docs/models/jobsapiroutesfinetuninggetfinetuningjobsstatus.md b/docs/models/listfinetuningjobsstatus.md similarity index 94% rename from docs/models/jobsapiroutesfinetuninggetfinetuningjobsstatus.md rename to docs/models/listfinetuningjobsstatus.md index 40d57686..07db9ae5 100644 --- a/docs/models/jobsapiroutesfinetuninggetfinetuningjobsstatus.md +++ b/docs/models/listfinetuningjobsstatus.md @@ -1,4 +1,4 @@ -# JobsAPIRoutesFineTuningGetFineTuningJobsStatus +# ListFineTuningJobsStatus The current job state to filter on. When set, the other results are not displayed. diff --git a/docs/models/librariessharelistv1request.md b/docs/models/listlibraryaccessesrequest.md similarity index 90% rename from docs/models/librariessharelistv1request.md rename to docs/models/listlibraryaccessesrequest.md index 98bf6d17..d98bcda2 100644 --- a/docs/models/librariessharelistv1request.md +++ b/docs/models/listlibraryaccessesrequest.md @@ -1,4 +1,4 @@ -# LibrariesShareListV1Request +# ListLibraryAccessesRequest ## Fields diff --git a/docs/models/orderby.md b/docs/models/orderby.md new file mode 100644 index 00000000..bba50df1 --- /dev/null +++ b/docs/models/orderby.md @@ -0,0 +1,9 @@ +# OrderBy + + +## Values + +| Name | Value | +| --------------- | --------------- | +| `CREATED` | created | +| `MINUS_CREATED` | -created | \ No newline at end of file diff --git a/docs/models/reprocessdocumentrequest.md b/docs/models/reprocessdocumentrequest.md new file mode 100644 index 00000000..cf3982a8 --- /dev/null +++ b/docs/models/reprocessdocumentrequest.md @@ -0,0 +1,9 @@ +# ReprocessDocumentRequest + + +## Fields + +| Field | Type | Required | Description | +| ------------------ | ------------------ | ------------------ | ------------------ | +| `library_id` | *str* | :heavy_check_mark: | N/A | +| `document_id` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/agentsapiv1conversationsrestartrequest.md b/docs/models/restartconversationrequest.md similarity index 96% rename from docs/models/agentsapiv1conversationsrestartrequest.md rename to docs/models/restartconversationrequest.md index a18a41f5..f24f14e6 100644 --- a/docs/models/agentsapiv1conversationsrestartrequest.md +++ b/docs/models/restartconversationrequest.md @@ -1,4 +1,4 @@ -# AgentsAPIV1ConversationsRestartRequest +# RestartConversationRequest ## Fields diff --git a/docs/models/agentsapiv1conversationsrestartstreamrequest.md b/docs/models/restartconversationstreamrequest.md similarity index 96% rename from docs/models/agentsapiv1conversationsrestartstreamrequest.md rename to docs/models/restartconversationstreamrequest.md index 7548286a..daa661a9 100644 --- a/docs/models/agentsapiv1conversationsrestartstreamrequest.md +++ b/docs/models/restartconversationstreamrequest.md @@ -1,4 +1,4 @@ -# AgentsAPIV1ConversationsRestartStreamRequest +# RestartConversationStreamRequest ## Fields diff --git a/docs/models/filesapiroutesretrievefilerequest.md b/docs/models/retrievefilerequest.md similarity index 88% rename from docs/models/filesapiroutesretrievefilerequest.md rename to docs/models/retrievefilerequest.md index 961bae1f..454b9665 100644 --- a/docs/models/filesapiroutesretrievefilerequest.md +++ b/docs/models/retrievefilerequest.md @@ -1,4 +1,4 @@ -# FilesAPIRoutesRetrieveFileRequest +# RetrieveFileRequest ## Fields diff --git a/docs/models/retrievemodelv1modelsmodelidgetrequest.md b/docs/models/retrievemodelrequest.md similarity index 94% rename from docs/models/retrievemodelv1modelsmodelidgetrequest.md rename to docs/models/retrievemodelrequest.md index f1280f88..787c3dd1 100644 --- a/docs/models/retrievemodelv1modelsmodelidgetrequest.md +++ b/docs/models/retrievemodelrequest.md @@ -1,4 +1,4 @@ -# RetrieveModelV1ModelsModelIDGetRequest +# RetrieveModelRequest ## Fields diff --git a/docs/models/jobsapiroutesfinetuningstartfinetuningjobrequest.md b/docs/models/startfinetuningjobrequest.md similarity index 84% rename from docs/models/jobsapiroutesfinetuningstartfinetuningjobrequest.md rename to docs/models/startfinetuningjobrequest.md index 4429fe48..9df5aee8 100644 --- a/docs/models/jobsapiroutesfinetuningstartfinetuningjobrequest.md +++ b/docs/models/startfinetuningjobrequest.md @@ -1,4 +1,4 @@ -# JobsAPIRoutesFineTuningStartFineTuningJobRequest +# StartFineTuningJobRequest ## Fields diff --git a/docs/models/jobsapiroutesfinetuningcancelfinetuningjobresponse.md b/docs/models/startfinetuningjobresponse.md similarity index 82% rename from docs/models/jobsapiroutesfinetuningcancelfinetuningjobresponse.md rename to docs/models/startfinetuningjobresponse.md index 1b331662..dce84c5a 100644 --- a/docs/models/jobsapiroutesfinetuningcancelfinetuningjobresponse.md +++ b/docs/models/startfinetuningjobresponse.md @@ -1,4 +1,4 @@ -# JobsAPIRoutesFineTuningCancelFineTuningJobResponse +# StartFineTuningJobResponse OK diff --git a/docs/models/unarchiveftmodelout.md b/docs/models/unarchiveftmodelout.md index 287c9a00..12c3d745 100644 --- a/docs/models/unarchiveftmodelout.md +++ b/docs/models/unarchiveftmodelout.md @@ -3,8 +3,8 @@ ## Fields -| Field | Type | Required | Description | -| ------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------ | -| `id` | *str* | :heavy_check_mark: | N/A | -| `object` | [Optional[models.UnarchiveFTModelOutObject]](../models/unarchiveftmodeloutobject.md) | :heavy_minus_sign: | N/A | -| `archived` | *Optional[bool]* | :heavy_minus_sign: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| ---------------------------- | ---------------------------- | ---------------------------- | ---------------------------- | +| `id` | *str* | :heavy_check_mark: | N/A | +| `object` | *Optional[Literal["model"]]* | :heavy_minus_sign: | N/A | +| `archived` | *Optional[bool]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/unarchiveftmodeloutobject.md b/docs/models/unarchiveftmodeloutobject.md deleted file mode 100644 index 623dcec2..00000000 --- a/docs/models/unarchiveftmodeloutobject.md +++ /dev/null @@ -1,8 +0,0 @@ -# UnarchiveFTModelOutObject - - -## Values - -| Name | Value | -| ------- | ------- | -| `MODEL` | model | \ No newline at end of file diff --git a/docs/models/jobsapiroutesfinetuningunarchivefinetunedmodelrequest.md b/docs/models/unarchivemodelrequest.md similarity index 92% rename from docs/models/jobsapiroutesfinetuningunarchivefinetunedmodelrequest.md rename to docs/models/unarchivemodelrequest.md index 95c1734d..033dad8a 100644 --- a/docs/models/jobsapiroutesfinetuningunarchivefinetunedmodelrequest.md +++ b/docs/models/unarchivemodelrequest.md @@ -1,4 +1,4 @@ -# JobsAPIRoutesFineTuningUnarchiveFineTunedModelRequest +# UnarchiveModelRequest ## Fields diff --git a/docs/models/agentsapiv1agentsupdaterequest.md b/docs/models/updateagentrequest.md similarity index 96% rename from docs/models/agentsapiv1agentsupdaterequest.md rename to docs/models/updateagentrequest.md index f60f8e5b..358cb71d 100644 --- a/docs/models/agentsapiv1agentsupdaterequest.md +++ b/docs/models/updateagentrequest.md @@ -1,4 +1,4 @@ -# AgentsAPIV1AgentsUpdateRequest +# UpdateAgentRequest ## Fields diff --git a/docs/models/agentsapiv1agentsupdateversionrequest.md b/docs/models/updateagentversionrequest.md similarity index 89% rename from docs/models/agentsapiv1agentsupdateversionrequest.md rename to docs/models/updateagentversionrequest.md index e937acc9..b83eb867 100644 --- a/docs/models/agentsapiv1agentsupdateversionrequest.md +++ b/docs/models/updateagentversionrequest.md @@ -1,4 +1,4 @@ -# AgentsAPIV1AgentsUpdateVersionRequest +# UpdateAgentVersionRequest ## Fields diff --git a/docs/models/librariesdocumentsupdatev1request.md b/docs/models/updatedocumentrequest.md similarity index 97% rename from docs/models/librariesdocumentsupdatev1request.md rename to docs/models/updatedocumentrequest.md index 2f18b014..fa5d117a 100644 --- a/docs/models/librariesdocumentsupdatev1request.md +++ b/docs/models/updatedocumentrequest.md @@ -1,4 +1,4 @@ -# LibrariesDocumentsUpdateV1Request +# UpdateDocumentRequest ## Fields diff --git a/docs/models/librariesupdatev1request.md b/docs/models/updatelibraryrequest.md similarity index 97% rename from docs/models/librariesupdatev1request.md rename to docs/models/updatelibraryrequest.md index a68ef7a8..e03883cc 100644 --- a/docs/models/librariesupdatev1request.md +++ b/docs/models/updatelibraryrequest.md @@ -1,4 +1,4 @@ -# LibrariesUpdateV1Request +# UpdateLibraryRequest ## Fields diff --git a/docs/models/jobsapiroutesfinetuningupdatefinetunedmodelrequest.md b/docs/models/updatemodelrequest.md similarity index 95% rename from docs/models/jobsapiroutesfinetuningupdatefinetunedmodelrequest.md rename to docs/models/updatemodelrequest.md index 6d93832e..5799c63b 100644 --- a/docs/models/jobsapiroutesfinetuningupdatefinetunedmodelrequest.md +++ b/docs/models/updatemodelrequest.md @@ -1,4 +1,4 @@ -# JobsAPIRoutesFineTuningUpdateFineTunedModelRequest +# UpdateModelRequest ## Fields diff --git a/docs/models/jobsapiroutesfinetuningupdatefinetunedmodelresponse.md b/docs/models/updatemodelresponse.md similarity index 81% rename from docs/models/jobsapiroutesfinetuningupdatefinetunedmodelresponse.md rename to docs/models/updatemodelresponse.md index 54f4c398..275ee77f 100644 --- a/docs/models/jobsapiroutesfinetuningupdatefinetunedmodelresponse.md +++ b/docs/models/updatemodelresponse.md @@ -1,4 +1,4 @@ -# JobsAPIRoutesFineTuningUpdateFineTunedModelResponse +# UpdateModelResponse OK diff --git a/docs/models/librariessharecreatev1request.md b/docs/models/updateorcreatelibraryaccessrequest.md similarity index 95% rename from docs/models/librariessharecreatev1request.md rename to docs/models/updateorcreatelibraryaccessrequest.md index 4c05241d..e04567b4 100644 --- a/docs/models/librariessharecreatev1request.md +++ b/docs/models/updateorcreatelibraryaccessrequest.md @@ -1,4 +1,4 @@ -# LibrariesShareCreateV1Request +# UpdateOrCreateLibraryAccessRequest ## Fields diff --git a/docs/models/librariesdocumentsuploadv1request.md b/docs/models/uploaddocumentrequest.md similarity index 96% rename from docs/models/librariesdocumentsuploadv1request.md rename to docs/models/uploaddocumentrequest.md index 172a6183..92152b7f 100644 --- a/docs/models/librariesdocumentsuploadv1request.md +++ b/docs/models/uploaddocumentrequest.md @@ -1,4 +1,4 @@ -# LibrariesDocumentsUploadV1Request +# UploadDocumentRequest ## Fields diff --git a/docs/sdks/accesses/README.md b/docs/sdks/accesses/README.md index 64a1e749..c1e3866d 100644 --- a/docs/sdks/accesses/README.md +++ b/docs/sdks/accesses/README.md @@ -16,7 +16,7 @@ Given a library, list all of the Entity that have access and to what level. ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -26,7 +26,7 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.beta.libraries.accesses.list(library_id="d2169833-d8e2-416e-a372-76518d3d99c2") + res = mistral.beta.libraries.accesses.list(library_id="9eb628ef-f118-47eb-b3cc-9750c4ca5fb6") # Handle response print(res) @@ -57,7 +57,7 @@ Given a library id, you can create or update the access level of an entity. You ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -67,7 +67,7 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.beta.libraries.accesses.update_or_create(library_id="36de3a24-5b1c-4c8f-9d84-d5642205a976", level="Viewer", share_with_uuid="0ae92ecb-21ed-47c5-9f7e-0b2cbe325a20", share_with_type="User") + res = mistral.beta.libraries.accesses.update_or_create(library_id="88bb030c-1cb5-4231-ba13-742c56554876", level="Viewer", share_with_uuid="6a736283-c1fa-49b0-9b6d-ea9309c0a766", share_with_type="Workspace") # Handle response print(res) @@ -102,7 +102,7 @@ Given a library id, you can delete the access level of an entity. An owner canno ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -112,7 +112,7 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.beta.libraries.accesses.delete(library_id="709e3cad-9fb2-4f4e-bf88-143cf1808107", share_with_uuid="b843cc47-ce8f-4354-8cfc-5fcd7fb2865b", share_with_type="User") + res = mistral.beta.libraries.accesses.delete(library_id="fc7ab1cf-e33c-4791-a6e0-95ff1f921c43", share_with_uuid="5818ddff-3568-40f1-a9e4-39d6cb9f5c94", share_with_type="Org") # Handle response print(res) diff --git a/docs/sdks/batchjobs/README.md b/docs/sdks/batchjobs/README.md index 8f2358de..24316d78 100644 --- a/docs/sdks/batchjobs/README.md +++ b/docs/sdks/batchjobs/README.md @@ -15,7 +15,7 @@ Get a list of batch jobs for your organization and user. ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -25,7 +25,7 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.batch.jobs.list(page=0, page_size=100, created_by_me=False) + res = mistral.batch.jobs.list(page=0, page_size=100, created_by_me=False, order_by="-created") # Handle response print(res) @@ -44,6 +44,7 @@ with Mistral( | `created_after` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | | `created_by_me` | *Optional[bool]* | :heavy_minus_sign: | N/A | | `status` | List[[models.BatchJobStatus](../../models/batchjobstatus.md)] | :heavy_minus_sign: | N/A | +| `order_by` | [Optional[models.OrderBy]](../../models/orderby.md) | :heavy_minus_sign: | N/A | | `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | ### Response @@ -62,7 +63,7 @@ Create a new batch job, it will be queued for processing. ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -72,7 +73,7 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.batch.jobs.create(endpoint="/v1/moderations", model="mistral-small-latest", timeout_hours=24) + res = mistral.batch.jobs.create(endpoint="/v1/classifications", model="mistral-small-latest", timeout_hours=24) # Handle response print(res) @@ -111,7 +112,7 @@ Args: ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -121,7 +122,7 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.batch.jobs.get(job_id="4017dc9f-b629-42f4-9700-8c681b9e7f0f") + res = mistral.batch.jobs.get(job_id="358c80a1-79bd-43f0-8f0e-8186713aa3ba") # Handle response print(res) @@ -152,7 +153,7 @@ Request the cancellation of a batch job. ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -162,7 +163,7 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.batch.jobs.cancel(job_id="4fb29d1c-535b-4f0a-a1cb-2167f86da569") + res = mistral.batch.jobs.cancel(job_id="393537d7-8b33-4931-a289-7f61f8757eda") # Handle response print(res) diff --git a/docs/sdks/betaagents/README.md b/docs/sdks/betaagents/README.md index 8d23b875..0ef655a3 100644 --- a/docs/sdks/betaagents/README.md +++ b/docs/sdks/betaagents/README.md @@ -16,6 +16,7 @@ * [get_version](#get_version) - Retrieve a specific version of an agent. * [create_version_alias](#create_version_alias) - Create or update an agent version alias. * [list_version_aliases](#list_version_aliases) - List all aliases for an agent. +* [delete_version_alias](#delete_version_alias) - Delete an agent version alias. ## create @@ -23,7 +24,7 @@ Create a new agent giving it instructions, tools, description. The agent is then ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -33,7 +34,7 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.beta.agents.create(model="LeBaron", name="", completion_args={ + res = mistral.beta.agents.create(model="Mustang", name="", completion_args={ "response_format": { "type": "text", }, @@ -56,6 +57,7 @@ with Mistral( | `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | `handoffs` | List[*str*] | :heavy_minus_sign: | N/A | | `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | +| `version_message` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | ### Response @@ -75,7 +77,7 @@ Retrieve a list of agent entities sorted by creation time. ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -100,7 +102,8 @@ with Mistral( | `page_size` | *Optional[int]* | :heavy_minus_sign: | Number of agents per page | | `deployment_chat` | *OptionalNullable[bool]* | :heavy_minus_sign: | N/A | | `sources` | List[[models.RequestSource](../../models/requestsource.md)] | :heavy_minus_sign: | N/A | -| `name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `name` | *OptionalNullable[str]* | :heavy_minus_sign: | Filter by agent name | +| `search` | *OptionalNullable[str]* | :heavy_minus_sign: | Search agents by name or ID | | `id` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | | `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | @@ -122,7 +125,7 @@ Given an agent, retrieve an agent entity with its attributes. The agent_version ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -141,11 +144,11 @@ with Mistral( ### Parameters -| Parameter | Type | Required | Description | -| ------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------- | -| `agent_id` | *str* | :heavy_check_mark: | N/A | -| `agent_version` | [OptionalNullable[models.AgentsAPIV1AgentsGetAgentVersion]](../../models/agentsapiv1agentsgetagentversion.md) | :heavy_minus_sign: | N/A | -| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | +| Parameter | Type | Required | Description | +| ------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------- | +| `agent_id` | *str* | :heavy_check_mark: | N/A | +| `agent_version` | [OptionalNullable[models.GetAgentAgentVersion]](../../models/getagentagentversion.md) | :heavy_minus_sign: | N/A | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | ### Response @@ -164,7 +167,7 @@ Update an agent attributes and create a new version. ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -199,6 +202,7 @@ with Mistral( | `handoffs` | List[*str*] | :heavy_minus_sign: | N/A | | `deployment_chat` | *OptionalNullable[bool]* | :heavy_minus_sign: | N/A | | `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | +| `version_message` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | ### Response @@ -218,7 +222,7 @@ Delete an agent entity. ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -254,7 +258,7 @@ Switch the version of an agent. ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -264,7 +268,7 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.beta.agents.update_version(agent_id="", version=157995) + res = mistral.beta.agents.update_version(agent_id="", version=958693) # Handle response print(res) @@ -296,7 +300,7 @@ Retrieve all versions for a specific agent with full agent context. Supports pag ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -339,7 +343,7 @@ Get a specific agent version by version number. ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -349,7 +353,7 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.beta.agents.get_version(agent_id="", version="788393") + res = mistral.beta.agents.get_version(agent_id="", version="") # Handle response print(res) @@ -381,7 +385,7 @@ Create a new alias or update an existing alias to point to a specific version. A ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -391,7 +395,7 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.beta.agents.create_version_alias(agent_id="", alias="", version=595141) + res = mistral.beta.agents.create_version_alias(agent_id="", alias="", version=154719) # Handle response print(res) @@ -424,7 +428,7 @@ Retrieve all version aliases for a specific agent. ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -454,6 +458,43 @@ with Mistral( ### Errors +| Error Type | Status Code | Content Type | +| -------------------------- | -------------------------- | -------------------------- | +| models.HTTPValidationError | 422 | application/json | +| models.SDKError | 4XX, 5XX | \*/\* | + +## delete_version_alias + +Delete an existing alias for an agent. + +### Example Usage + + +```python +from mistralai.client import Mistral +import os + + +with Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), +) as mistral: + + mistral.beta.agents.delete_version_alias(agent_id="", alias="") + + # Use the SDK ... + +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | +| `agent_id` | *str* | :heavy_check_mark: | N/A | +| `alias` | *str* | :heavy_check_mark: | N/A | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | + +### Errors + | Error Type | Status Code | Content Type | | -------------------------- | -------------------------- | -------------------------- | | models.HTTPValidationError | 422 | application/json | diff --git a/docs/sdks/conversations/README.md b/docs/sdks/conversations/README.md index 6aae03c5..c0089f12 100644 --- a/docs/sdks/conversations/README.md +++ b/docs/sdks/conversations/README.md @@ -24,7 +24,7 @@ Create a new conversation, using a base model or an agent and append entries. Co ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -81,7 +81,7 @@ Retrieve a list of conversation entities sorted by creation time. ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -109,7 +109,7 @@ with Mistral( ### Response -**[List[models.AgentsAPIV1ConversationsListResponse]](../../models/.md)** +**[List[models.ListConversationsResponse]](../../models/.md)** ### Errors @@ -124,7 +124,7 @@ Given a conversation_id retrieve a conversation entity with its attributes. ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -165,7 +165,7 @@ Delete a conversation given a conversation_id. ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -201,7 +201,7 @@ Run completion on the history of the conversation and the user entries. Return t ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -251,7 +251,7 @@ Given a conversation_id retrieve all the entries belonging to that conversation. ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -292,7 +292,7 @@ Given a conversation_id retrieve all the messages belonging to that conversation ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -333,7 +333,7 @@ Given a conversation_id and an id, recreate a conversation from this point and r ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -386,7 +386,7 @@ Create a new conversation, using a base model or an agent and append entries. Co ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -396,14 +396,7 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.beta.conversations.start_stream(inputs=[ - { - "object": "entry", - "type": "function.result", - "tool_call_id": "", - "result": "", - }, - ], stream=True, completion_args={ + res = mistral.beta.conversations.start_stream(inputs="", stream=True, completion_args={ "response_format": { "type": "text", }, @@ -452,7 +445,7 @@ Run completion on the history of the conversation and the user entries. Return t ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -504,7 +497,7 @@ Given a conversation_id and an id, recreate a conversation from this point and r ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -514,15 +507,7 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.beta.conversations.restart_stream(conversation_id="", inputs=[ - { - "object": "entry", - "type": "message.input", - "role": "assistant", - "content": "", - "prefix": False, - }, - ], from_entry_id="", stream=True, store=True, handoff_execution="server", completion_args={ + res = mistral.beta.conversations.restart_stream(conversation_id="", inputs="", from_entry_id="", stream=True, store=True, handoff_execution="server", completion_args={ "response_format": { "type": "text", }, diff --git a/docs/sdks/documents/README.md b/docs/sdks/documents/README.md index d90e7ee7..97831f86 100644 --- a/docs/sdks/documents/README.md +++ b/docs/sdks/documents/README.md @@ -23,7 +23,7 @@ Given a library, lists the document that have been uploaded to that library. ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -33,7 +33,7 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.beta.libraries.documents.list(library_id="5c3ca4cd-62bc-4c71-ad8a-1531ae80d078", page_size=100, page=0, sort_by="created_at", sort_order="desc") + res = mistral.beta.libraries.documents.list(library_id="05e1bda5-99b1-4baf-bb03-905d8e094f74", page_size=100, page=0, sort_by="created_at", sort_order="desc") # Handle response print(res) @@ -70,7 +70,7 @@ Given a library, upload a new document to that library. It is queued for process ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -80,7 +80,7 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.beta.libraries.documents.upload(library_id="a02150d9-5ee0-4877-b62c-28b1fcdf3b76", file={ + res = mistral.beta.libraries.documents.upload(library_id="f973c54e-979a-4464-9d36-8cc31beb21fe", file={ "file_name": "example.file", "content": open("example.file", "rb"), }) @@ -115,7 +115,7 @@ Given a library and a document in this library, you can retrieve the metadata of ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -125,7 +125,7 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.beta.libraries.documents.get(library_id="03d908c8-90a1-44fd-bf3a-8490fb7c9a03", document_id="90973aec-0508-4375-8b00-91d732414745") + res = mistral.beta.libraries.documents.get(library_id="f9902d0a-1ea4-4953-be48-52df6edd302a", document_id="c3e12fd9-e840-46f2-8d4a-79985ed36d24") # Handle response print(res) @@ -157,7 +157,7 @@ Given a library and a document in that library, update the name of that document ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -167,7 +167,7 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.beta.libraries.documents.update(library_id="3ddd8d93-dca5-4a6d-980d-173226c35742", document_id="2a25e44c-b160-40ca-b5c2-b65fb2fcae34") + res = mistral.beta.libraries.documents.update(library_id="3b900c67-d2b6-4637-93f2-3eff2c85f8dd", document_id="66f935fd-37ec-441f-bca5-b1129befcbca") # Handle response print(res) @@ -201,7 +201,7 @@ Given a library and a document in that library, delete that document. The docume ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -211,7 +211,7 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - mistral.beta.libraries.documents.delete(library_id="005daae9-d42e-407d-82d7-2261c6a1496c", document_id="edc236b0-baff-49a9-884b-4ca36a258da4") + mistral.beta.libraries.documents.delete(library_id="c728d742-7845-462b-84ad-2aacbaf1c7cf", document_id="ed3f5797-846a-4abe-8e30-39b2fd2323e0") # Use the SDK ... @@ -238,7 +238,7 @@ Given a library and a document in that library, you can retrieve the text conten ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -248,7 +248,7 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.beta.libraries.documents.text_content(library_id="1d177215-3b6b-45ba-9fa9-baf773223bec", document_id="60214c91-2aba-4692-a4e6-a53365de8caf") + res = mistral.beta.libraries.documents.text_content(library_id="12689dc1-50df-4a0d-8202-2757f7a8c141", document_id="9d4057e9-d112-437c-911e-6ee031389739") # Handle response print(res) @@ -280,7 +280,7 @@ Given a library and a document in that library, retrieve the processing status o ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -290,7 +290,7 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.beta.libraries.documents.status(library_id="e6906f70-368f-4155-80da-c1718f01bc43", document_id="2c904915-d831-4e9d-a345-8ce405bcef66") + res = mistral.beta.libraries.documents.status(library_id="41bb33c4-7e53-453d-bf21-398bb2862772", document_id="416b95cf-19c8-45af-84be-26aaa3ab3666") # Handle response print(res) @@ -322,7 +322,7 @@ Given a library and a document in that library, retrieve the signed URL of a spe ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -332,7 +332,7 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.beta.libraries.documents.get_signed_url(library_id="23cf6904-a602-4ee8-9f5b-8efc557c336d", document_id="48598486-df71-4994-acbb-1133c72efa8c") + res = mistral.beta.libraries.documents.get_signed_url(library_id="2dbbe172-1374-41be-b03d-a088c733612e", document_id="b5d88764-47f1-4485-9df1-658775428344") # Handle response print(res) @@ -364,7 +364,7 @@ Given a library and a document in that library, retrieve the signed URL of text ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -374,7 +374,7 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.beta.libraries.documents.extracted_text_signed_url(library_id="a6f15de3-1e82-4f95-af82-851499042ef8", document_id="9749d4f9-24e5-4ca2-99a3-a406863f805d") + res = mistral.beta.libraries.documents.extracted_text_signed_url(library_id="46d040ce-ae2e-4891-a54c-cdab6a8f62d8", document_id="3eddbfe2-3fd7-47f5-984b-b378e6950e37") # Handle response print(res) @@ -406,7 +406,7 @@ Given a library and a document in that library, reprocess that document, it will ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -416,7 +416,7 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - mistral.beta.libraries.documents.reprocess(library_id="51b29371-de8f-4ba4-932b-a0bafb3a7f64", document_id="3052422c-49ca-45ac-a918-cadb35d61fd8") + mistral.beta.libraries.documents.reprocess(library_id="76d357e4-d891-40c6-9d1e-6d6ce5056ee0", document_id="09798d2b-8f46-46c6-9765-8054a82a4bb2") # Use the SDK ... diff --git a/docs/sdks/files/README.md b/docs/sdks/files/README.md index 44c39f8a..ae29b7bf 100644 --- a/docs/sdks/files/README.md +++ b/docs/sdks/files/README.md @@ -23,7 +23,7 @@ Please contact us if you need to increase these storage limits. ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -67,7 +67,7 @@ Returns a list of files that belong to the user's organization. ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -114,7 +114,7 @@ Returns information about a specific file. ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -124,7 +124,7 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.files.retrieve(file_id="f2a27685-ca4e-4dc2-9f2b-88c422c3e0f6") + res = mistral.files.retrieve(file_id="654a62d9-b7ee-49ac-835e-af4153e3c9ec") # Handle response print(res) @@ -154,7 +154,7 @@ Delete a file. ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -164,7 +164,7 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.files.delete(file_id="3b6d45eb-e30b-416f-8019-f47e2e93d930") + res = mistral.files.delete(file_id="789c27a4-69de-47c6-b67f-cf6e56ce9f41") # Handle response print(res) @@ -194,7 +194,7 @@ Download a file ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -204,7 +204,7 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.files.download(file_id="f8919994-a4a1-46b2-8b5b-06335a4300ce") + res = mistral.files.download(file_id="e2ba278e-eac9-4050-ae8e-ec433e124efb") # Handle response print(res) @@ -234,7 +234,7 @@ Get Signed Url ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -244,7 +244,7 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.files.get_signed_url(file_id="06a020ab-355c-49a6-b19d-304b7c01699f", expiry=24) + res = mistral.files.get_signed_url(file_id="7a0c108d-9e6b-4c47-990d-a20cba50b283", expiry=24) # Handle response print(res) diff --git a/docs/sdks/finetuningjobs/README.md b/docs/sdks/finetuningjobs/README.md index 63897fd6..fe18feeb 100644 --- a/docs/sdks/finetuningjobs/README.md +++ b/docs/sdks/finetuningjobs/README.md @@ -16,7 +16,7 @@ Get a list of fine-tuning jobs for your organization and user. ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -35,19 +35,19 @@ with Mistral( ### Parameters -| Parameter | Type | Required | Description | -| ----------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------- | -| `page` | *Optional[int]* | :heavy_minus_sign: | The page number of the results to be returned. | -| `page_size` | *Optional[int]* | :heavy_minus_sign: | The number of items to return per page. | -| `model` | *OptionalNullable[str]* | :heavy_minus_sign: | The model name used for fine-tuning to filter on. When set, the other results are not displayed. | -| `created_after` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | The date/time to filter on. When set, the results for previous creation times are not displayed. | -| `created_before` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | -| `created_by_me` | *Optional[bool]* | :heavy_minus_sign: | When set, only return results for jobs created by the API caller. Other results are not displayed. | -| `status` | [OptionalNullable[models.JobsAPIRoutesFineTuningGetFineTuningJobsStatus]](../../models/jobsapiroutesfinetuninggetfinetuningjobsstatus.md) | :heavy_minus_sign: | The current job state to filter on. When set, the other results are not displayed. | -| `wandb_project` | *OptionalNullable[str]* | :heavy_minus_sign: | The Weights and Biases project to filter on. When set, the other results are not displayed. | -| `wandb_name` | *OptionalNullable[str]* | :heavy_minus_sign: | The Weight and Biases run name to filter on. When set, the other results are not displayed. | -| `suffix` | *OptionalNullable[str]* | :heavy_minus_sign: | The model suffix to filter on. When set, the other results are not displayed. | -| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | +| Parameter | Type | Required | Description | +| -------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------- | +| `page` | *Optional[int]* | :heavy_minus_sign: | The page number of the results to be returned. | +| `page_size` | *Optional[int]* | :heavy_minus_sign: | The number of items to return per page. | +| `model` | *OptionalNullable[str]* | :heavy_minus_sign: | The model name used for fine-tuning to filter on. When set, the other results are not displayed. | +| `created_after` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | The date/time to filter on. When set, the results for previous creation times are not displayed. | +| `created_before` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | +| `created_by_me` | *Optional[bool]* | :heavy_minus_sign: | When set, only return results for jobs created by the API caller. Other results are not displayed. | +| `status` | [OptionalNullable[models.ListFineTuningJobsStatus]](../../models/listfinetuningjobsstatus.md) | :heavy_minus_sign: | The current job state to filter on. When set, the other results are not displayed. | +| `wandb_project` | *OptionalNullable[str]* | :heavy_minus_sign: | The Weights and Biases project to filter on. When set, the other results are not displayed. | +| `wandb_name` | *OptionalNullable[str]* | :heavy_minus_sign: | The Weight and Biases run name to filter on. When set, the other results are not displayed. | +| `suffix` | *OptionalNullable[str]* | :heavy_minus_sign: | The model suffix to filter on. When set, the other results are not displayed. | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | ### Response @@ -65,7 +65,7 @@ Create a new fine-tuning job, it will be queued for processing. ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -75,7 +75,7 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.fine_tuning.jobs.create(model="Camaro", hyperparameters={ + res = mistral.fine_tuning.jobs.create(model="Countach", hyperparameters={ "learning_rate": 0.0001, }, invalid_sample_skip_percentage=0) @@ -88,7 +88,7 @@ with Mistral( | Parameter | Type | Required | Description | | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `model` | *str* | :heavy_check_mark: | The name of the model to fine-tune. | +| `model` | *str* | :heavy_check_mark: | N/A | | `hyperparameters` | [models.Hyperparameters](../../models/hyperparameters.md) | :heavy_check_mark: | N/A | | `training_files` | List[[models.TrainingFile](../../models/trainingfile.md)] | :heavy_minus_sign: | N/A | | `validation_files` | List[*str*] | :heavy_minus_sign: | A list containing the IDs of uploaded files that contain validation data. If you provide these files, the data is used to generate validation metrics periodically during fine-tuning. These metrics can be viewed in `checkpoints` when getting the status of a running fine-tuning job. The same data should not be present in both train and validation files. | @@ -103,7 +103,7 @@ with Mistral( ### Response -**[models.JobsAPIRoutesFineTuningCreateFineTuningJobResponse](../../models/jobsapiroutesfinetuningcreatefinetuningjobresponse.md)** +**[models.CreateFineTuningJobResponse](../../models/createfinetuningjobresponse.md)** ### Errors @@ -117,7 +117,7 @@ Get a fine-tuned job details by its UUID. ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -127,7 +127,7 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.fine_tuning.jobs.get(job_id="c167a961-ffca-4bcf-93ac-6169468dd389") + res = mistral.fine_tuning.jobs.get(job_id="2855f873-414e-4cf5-a46e-e589e39ee809") # Handle response print(res) @@ -143,7 +143,7 @@ with Mistral( ### Response -**[models.JobsAPIRoutesFineTuningGetFineTuningJobResponse](../../models/jobsapiroutesfinetuninggetfinetuningjobresponse.md)** +**[models.GetFineTuningJobResponse](../../models/getfinetuningjobresponse.md)** ### Errors @@ -157,7 +157,7 @@ Request the cancellation of a fine tuning job. ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -167,7 +167,7 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.fine_tuning.jobs.cancel(job_id="6188a2f6-7513-4e0f-89cc-3f8088523a49") + res = mistral.fine_tuning.jobs.cancel(job_id="ee7d6f03-fcbb-43ca-8f17-0388c0832eb9") # Handle response print(res) @@ -183,7 +183,7 @@ with Mistral( ### Response -**[models.JobsAPIRoutesFineTuningCancelFineTuningJobResponse](../../models/jobsapiroutesfinetuningcancelfinetuningjobresponse.md)** +**[models.CancelFineTuningJobResponse](../../models/cancelfinetuningjobresponse.md)** ### Errors @@ -197,7 +197,7 @@ Request the start of a validated fine tuning job. ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -207,7 +207,7 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.fine_tuning.jobs.start(job_id="56553e4d-0679-471e-b9ac-59a77d671103") + res = mistral.fine_tuning.jobs.start(job_id="da371429-0ec2-4cea-b9c7-73ce3a1dd76f") # Handle response print(res) @@ -223,7 +223,7 @@ with Mistral( ### Response -**[models.JobsAPIRoutesFineTuningStartFineTuningJobResponse](../../models/jobsapiroutesfinetuningstartfinetuningjobresponse.md)** +**[models.StartFineTuningJobResponse](../../models/startfinetuningjobresponse.md)** ### Errors diff --git a/docs/sdks/libraries/README.md b/docs/sdks/libraries/README.md index bbdacf05..8835d0ec 100644 --- a/docs/sdks/libraries/README.md +++ b/docs/sdks/libraries/README.md @@ -18,7 +18,7 @@ List all libraries that you have created or have been shared with you. ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -57,7 +57,7 @@ Create a new Library, you will be marked as the owner and only you will have the ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -100,7 +100,7 @@ Given a library id, details information about that Library. ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -110,7 +110,7 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.beta.libraries.get(library_id="d0d23a1e-bfe5-45e7-b7bb-22a4ea78d47f") + res = mistral.beta.libraries.get(library_id="44e385d6-783e-4b21-8fae-5181e6817bc4") # Handle response print(res) @@ -141,7 +141,7 @@ Given a library id, deletes it together with all documents that have been upload ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -151,7 +151,7 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.beta.libraries.delete(library_id="6cad0b6e-fd2e-4d11-a48b-21d30fb7c17a") + res = mistral.beta.libraries.delete(library_id="441ba08a-3d1f-4700-8d6f-f32eeed49dff") # Handle response print(res) @@ -182,7 +182,7 @@ Given a library id, you can update the name and description. ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -192,7 +192,7 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.beta.libraries.update(library_id="e01880c3-d0b5-4a29-8b1b-abdb8ce917e4") + res = mistral.beta.libraries.update(library_id="27049553-3425-49ce-b965-fcb3a7ab03a3") # Handle response print(res) diff --git a/docs/sdks/models/README.md b/docs/sdks/models/README.md index 129ea223..0cbf1bdd 100644 --- a/docs/sdks/models/README.md +++ b/docs/sdks/models/README.md @@ -19,7 +19,7 @@ List all models available to the user. ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -58,7 +58,7 @@ Retrieve information about a model. ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -99,7 +99,7 @@ Delete a fine-tuned model. ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -140,7 +140,7 @@ Update a model name or description. ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -168,7 +168,7 @@ with Mistral( ### Response -**[models.JobsAPIRoutesFineTuningUpdateFineTunedModelResponse](../../models/jobsapiroutesfinetuningupdatefinetunedmodelresponse.md)** +**[models.UpdateModelResponse](../../models/updatemodelresponse.md)** ### Errors @@ -182,7 +182,7 @@ Archive a fine-tuned model. ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -222,7 +222,7 @@ Un-archive a fine-tuned model. ### Example Usage - + ```python from mistralai.client import Mistral import os diff --git a/examples/mistral/jobs/async_jobs.py b/examples/mistral/jobs/async_fine_tuning.py similarity index 97% rename from examples/mistral/jobs/async_jobs.py rename to examples/mistral/jobs/async_fine_tuning.py index 12f9035e..080dbe03 100644 --- a/examples/mistral/jobs/async_jobs.py +++ b/examples/mistral/jobs/async_fine_tuning.py @@ -24,7 +24,7 @@ async def main(): # Create a new job created_job = await client.fine_tuning.jobs.create_async( - model="open-mistral-7b", + model="mistral-small-latest", training_files=[{"file_id": training_file.id, "weight": 1}], validation_files=[validation_file.id], hyperparameters=CompletionTrainingParametersIn( diff --git a/examples/mistral/jobs/async_jobs_chat.py b/examples/mistral/jobs/async_fine_tuning_chat.py similarity index 99% rename from examples/mistral/jobs/async_jobs_chat.py rename to examples/mistral/jobs/async_fine_tuning_chat.py index f14fb833..f170fed4 100644 --- a/examples/mistral/jobs/async_jobs_chat.py +++ b/examples/mistral/jobs/async_fine_tuning_chat.py @@ -82,7 +82,7 @@ async def main(): ) # Create a new job created_job = await client.fine_tuning.jobs.create_async( - model="open-mistral-7b", + model="mistral-small-latest", training_files=[{"file_id": training_file.id, "weight": 1}], validation_files=[validation_file.id], hyperparameters=CompletionTrainingParametersIn( diff --git a/examples/mistral/jobs/jobs.py b/examples/mistral/jobs/fine_tuning.py similarity index 97% rename from examples/mistral/jobs/jobs.py rename to examples/mistral/jobs/fine_tuning.py index be3a821f..2d155cc2 100644 --- a/examples/mistral/jobs/jobs.py +++ b/examples/mistral/jobs/fine_tuning.py @@ -22,7 +22,7 @@ def main(): # Create a new job created_job = client.fine_tuning.jobs.create( - model="open-mistral-7b", + model="mistral-small-latest", training_files=[{"file_id": training_file.id, "weight": 1}], validation_files=[validation_file.id], hyperparameters=CompletionTrainingParametersIn( diff --git a/examples/mistral/jobs/dry_run_job.py b/examples/mistral/jobs/fine_tuning_dry_run.py similarity index 97% rename from examples/mistral/jobs/dry_run_job.py rename to examples/mistral/jobs/fine_tuning_dry_run.py index d4280836..d0c6f733 100644 --- a/examples/mistral/jobs/dry_run_job.py +++ b/examples/mistral/jobs/fine_tuning_dry_run.py @@ -20,7 +20,7 @@ async def main(): # Create a new job dry_run_job = await client.fine_tuning.jobs.create_async( - model="open-mistral-7b", + model="mistral-small-latest", training_files=[{"file_id": training_file.id, "weight": 1}], hyperparameters=CompletionTrainingParametersIn( training_steps=1, diff --git a/packages/mistralai_azure/.speakeasy/gen.lock b/packages/mistralai_azure/.speakeasy/gen.lock index 45ed9b17..a7cdba10 100644 --- a/packages/mistralai_azure/.speakeasy/gen.lock +++ b/packages/mistralai_azure/.speakeasy/gen.lock @@ -1,7 +1,7 @@ lockVersion: 2.0.0 id: dc40fa48-2c4d-46ad-ac8b-270749770f34 management: - docChecksum: eb0d097e3bdb7c0784f34ca2af2ce554 + docChecksum: 2bebd9aadeecb18391d46d1dadc340ef docVersion: 1.0.0 speakeasyVersion: 1.685.0 generationVersion: 2.794.1 @@ -12,9 +12,9 @@ management: installationURL: https://github.com/mistralai/client-python.git#subdirectory=packages/mistralai_azure published: true persistentEdits: - generation_id: 0d580549-db09-4078-890b-62de0e5fe937 - pristine_commit_hash: b561cb140a25a721f54d0aad3c9a03d419c8fc19 - pristine_tree_hash: d122bdae045ddf46c910e2f5da53d78da18ef009 + generation_id: ecb4f74f-ba8a-4f28-941d-36b3258200bd + pristine_commit_hash: 785c0560d42a9c4cff938392bb6d52d98a2f3529 + pristine_tree_hash: 50ed42d2e4b3d4ecd639935cd1511220354a41d7 features: python: additionalDependencies: 1.0.0 @@ -197,8 +197,8 @@ trackedFiles: pristine_git_object: 1d394500e8ffdd140457575568fc2ce465a1cc3a docs/models/mistralpromptmode.md: id: d17d5db4d3b6 - last_write_checksum: sha1:5ccd31d3804f70b6abb0e5a00bda57b9102225e3 - pristine_git_object: 7416e2037c507d19ac02aed914da1208a2fed0a1 + last_write_checksum: sha1:abcb7205c5086169c7d9449d15ac142448a7d258 + pristine_git_object: c3409d03b9646e21a3793372d06dcae6fef95463 docs/models/ocrimageobject.md: id: b72f3c5853b2 last_write_checksum: sha1:90c5158dec6a7b31c858677b6a8efa1e3cabd504 @@ -213,8 +213,8 @@ trackedFiles: pristine_git_object: 02473d44f73485fd7b7f0031d51bfac835d4036e docs/models/ocrrequest.md: id: 6862a3fc2d0f - last_write_checksum: sha1:f32fcc5916f9eedf7adfaa60beda30a9ec42f32e - pristine_git_object: 76e4da925937fd4bdd42307f116a74d4dbf2bea3 + last_write_checksum: sha1:9311e2c87f8f4512c35a717d3b063f2861f878d4 + pristine_git_object: 87929e53f8a74823b82ecce56d15f22228134fa6 docs/models/ocrresponse.md: id: 30042328fb78 last_write_checksum: sha1:8e4a4ae404ea752f3e9f1108c2a5f89ed6cfb143 @@ -485,8 +485,8 @@ trackedFiles: pristine_git_object: c5bf17528c7cf25bac8f8874f58692c601fcdd76 src/mistralai_azure/models/mistralpromptmode.py: id: f62a521bcdae - last_write_checksum: sha1:fcb16c10986bd6946f79b9e330a4be9f26f7e724 - pristine_git_object: 22fb643896688b68af238f6ac75cf41a00b0511b + last_write_checksum: sha1:82190bc14d2e51440723176cb8108791485c1180 + pristine_git_object: 77230b7e5e61cc662fdc52c72e8b817a15e183c3 src/mistralai_azure/models/no_response_error.py: id: 54523e14f29b last_write_checksum: sha1:7f326424a7d5ae1bcd5c89a0d6b3dbda9138942f @@ -505,8 +505,8 @@ trackedFiles: pristine_git_object: e95718001e07bb89ba2fc9094f88b894572148bb src/mistralai_azure/models/ocrrequest.py: id: 4e574d5fb9be - last_write_checksum: sha1:6ca937598dd92c6c6ab7b8d59363595a3e8760e9 - pristine_git_object: 565a0a30a7f9fae374c14fb5fcb0f19385cf05e4 + last_write_checksum: sha1:1b03dc8b392069f6b142228e74179c8341b09ffa + pristine_git_object: e9c23afcdd7440660f17c7819406d7e603eabbec src/mistralai_azure/models/ocrresponse.py: id: 326a4d9fab25 last_write_checksum: sha1:cf597498a5841a56bbd1aeb8478bd57a01d93cb1 @@ -601,8 +601,8 @@ trackedFiles: pristine_git_object: 4caff4a6b74aeb322bf42cd2070b7bd576ca834a src/mistralai_azure/ocr.py: id: 77e2e0f594ad - last_write_checksum: sha1:a455095c62c2dfad071d70682c2f57e7d64934db - pristine_git_object: da823f816dda9d462a795e9b946d5634ff6d48e2 + last_write_checksum: sha1:7daae9b0c14093d6d0bc0258b0bce008cb845a1e + pristine_git_object: 31e27f6eaa6dcc2b8450656d4a59dd4a7a50a29a src/mistralai_azure/py.typed: id: 98df238e554c last_write_checksum: sha1:8efc425ffe830805ffcc0f3055871bdcdc542c60 diff --git a/packages/mistralai_azure/docs/models/mistralpromptmode.md b/packages/mistralai_azure/docs/models/mistralpromptmode.md index 7416e203..c3409d03 100644 --- a/packages/mistralai_azure/docs/models/mistralpromptmode.md +++ b/packages/mistralai_azure/docs/models/mistralpromptmode.md @@ -1,5 +1,9 @@ # MistralPromptMode +Available options to the prompt_mode argument on the chat completion endpoint. +Values represent high-level intent. Assignment to actual SPs is handled internally. +System prompt may include knowledge cutoff date, model capabilities, tone to use, safety guidelines, etc. + ## Values diff --git a/packages/mistralai_azure/docs/models/ocrrequest.md b/packages/mistralai_azure/docs/models/ocrrequest.md index 76e4da92..87929e53 100644 --- a/packages/mistralai_azure/docs/models/ocrrequest.md +++ b/packages/mistralai_azure/docs/models/ocrrequest.md @@ -14,6 +14,7 @@ | `image_min_size` | *OptionalNullable[int]* | :heavy_minus_sign: | Minimum height and width of image to extract | | | `bbox_annotation_format` | [OptionalNullable[models.ResponseFormat]](../models/responseformat.md) | :heavy_minus_sign: | Structured output class for extracting useful information from each extracted bounding box / image from document. Only json_schema is valid for this field | {
"type": "text"
} | | `document_annotation_format` | [OptionalNullable[models.ResponseFormat]](../models/responseformat.md) | :heavy_minus_sign: | Structured output class for extracting useful information from the entire document. Only json_schema is valid for this field | {
"type": "text"
} | +| `document_annotation_prompt` | *OptionalNullable[str]* | :heavy_minus_sign: | Optional prompt to guide the model in extracting structured output from the entire document. A document_annotation_format must be provided. | | | `table_format` | [OptionalNullable[models.TableFormat]](../models/tableformat.md) | :heavy_minus_sign: | N/A | | | `extract_header` | *Optional[bool]* | :heavy_minus_sign: | N/A | | | `extract_footer` | *Optional[bool]* | :heavy_minus_sign: | N/A | | \ No newline at end of file diff --git a/packages/mistralai_azure/src/mistralai_azure/models/mistralpromptmode.py b/packages/mistralai_azure/src/mistralai_azure/models/mistralpromptmode.py index 22fb6438..77230b7e 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/mistralpromptmode.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/mistralpromptmode.py @@ -6,3 +6,7 @@ MistralPromptMode = Union[Literal["reasoning",], UnrecognizedStr] +r"""Available options to the prompt_mode argument on the chat completion endpoint. +Values represent high-level intent. Assignment to actual SPs is handled internally. +System prompt may include knowledge cutoff date, model capabilities, tone to use, safety guidelines, etc. +""" diff --git a/packages/mistralai_azure/src/mistralai_azure/models/ocrrequest.py b/packages/mistralai_azure/src/mistralai_azure/models/ocrrequest.py index 565a0a30..e9c23afc 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/ocrrequest.py +++ b/packages/mistralai_azure/src/mistralai_azure/models/ocrrequest.py @@ -51,6 +51,8 @@ class OCRRequestTypedDict(TypedDict): r"""Structured output class for extracting useful information from each extracted bounding box / image from document. Only json_schema is valid for this field""" document_annotation_format: NotRequired[Nullable[ResponseFormatTypedDict]] r"""Structured output class for extracting useful information from the entire document. Only json_schema is valid for this field""" + document_annotation_prompt: NotRequired[Nullable[str]] + r"""Optional prompt to guide the model in extracting structured output from the entire document. A document_annotation_format must be provided.""" table_format: NotRequired[Nullable[TableFormat]] extract_header: NotRequired[bool] extract_footer: NotRequired[bool] @@ -82,6 +84,9 @@ class OCRRequest(BaseModel): document_annotation_format: OptionalNullable[ResponseFormat] = UNSET r"""Structured output class for extracting useful information from the entire document. Only json_schema is valid for this field""" + document_annotation_prompt: OptionalNullable[str] = UNSET + r"""Optional prompt to guide the model in extracting structured output from the entire document. A document_annotation_format must be provided.""" + table_format: OptionalNullable[TableFormat] = UNSET extract_header: Optional[bool] = None @@ -98,6 +103,7 @@ def serialize_model(self, handler): "image_min_size", "bbox_annotation_format", "document_annotation_format", + "document_annotation_prompt", "table_format", "extract_header", "extract_footer", @@ -110,6 +116,7 @@ def serialize_model(self, handler): "image_min_size", "bbox_annotation_format", "document_annotation_format", + "document_annotation_prompt", "table_format", ] null_default_fields = [] diff --git a/packages/mistralai_azure/src/mistralai_azure/ocr.py b/packages/mistralai_azure/src/mistralai_azure/ocr.py index da823f81..31e27f6e 100644 --- a/packages/mistralai_azure/src/mistralai_azure/ocr.py +++ b/packages/mistralai_azure/src/mistralai_azure/ocr.py @@ -25,6 +25,7 @@ def process( document_annotation_format: OptionalNullable[ Union[models.ResponseFormat, models.ResponseFormatTypedDict] ] = UNSET, + document_annotation_prompt: OptionalNullable[str] = UNSET, table_format: OptionalNullable[models.TableFormat] = UNSET, extract_header: Optional[bool] = None, extract_footer: Optional[bool] = None, @@ -44,6 +45,7 @@ def process( :param image_min_size: Minimum height and width of image to extract :param bbox_annotation_format: Structured output class for extracting useful information from each extracted bounding box / image from document. Only json_schema is valid for this field :param document_annotation_format: Structured output class for extracting useful information from the entire document. Only json_schema is valid for this field + :param document_annotation_prompt: Optional prompt to guide the model in extracting structured output from the entire document. A document_annotation_format must be provided. :param table_format: :param extract_header: :param extract_footer: @@ -76,6 +78,7 @@ def process( document_annotation_format=utils.get_pydantic_model( document_annotation_format, OptionalNullable[models.ResponseFormat] ), + document_annotation_prompt=document_annotation_prompt, table_format=table_format, extract_header=extract_header, extract_footer=extract_footer, @@ -155,6 +158,7 @@ async def process_async( document_annotation_format: OptionalNullable[ Union[models.ResponseFormat, models.ResponseFormatTypedDict] ] = UNSET, + document_annotation_prompt: OptionalNullable[str] = UNSET, table_format: OptionalNullable[models.TableFormat] = UNSET, extract_header: Optional[bool] = None, extract_footer: Optional[bool] = None, @@ -174,6 +178,7 @@ async def process_async( :param image_min_size: Minimum height and width of image to extract :param bbox_annotation_format: Structured output class for extracting useful information from each extracted bounding box / image from document. Only json_schema is valid for this field :param document_annotation_format: Structured output class for extracting useful information from the entire document. Only json_schema is valid for this field + :param document_annotation_prompt: Optional prompt to guide the model in extracting structured output from the entire document. A document_annotation_format must be provided. :param table_format: :param extract_header: :param extract_footer: @@ -206,6 +211,7 @@ async def process_async( document_annotation_format=utils.get_pydantic_model( document_annotation_format, OptionalNullable[models.ResponseFormat] ), + document_annotation_prompt=document_annotation_prompt, table_format=table_format, extract_header=extract_header, extract_footer=extract_footer, diff --git a/packages/mistralai_gcp/.speakeasy/gen.lock b/packages/mistralai_gcp/.speakeasy/gen.lock index 0bf3209f..31eb1bc7 100644 --- a/packages/mistralai_gcp/.speakeasy/gen.lock +++ b/packages/mistralai_gcp/.speakeasy/gen.lock @@ -1,7 +1,7 @@ lockVersion: 2.0.0 id: ec60f2d8-7869-45c1-918e-773d41a8cf74 management: - docChecksum: a7d9a161ca71328c62514af87c72bd88 + docChecksum: d91fd326da9118e6c9dddea48eaf47a7 docVersion: 1.0.0 speakeasyVersion: 1.685.0 generationVersion: 2.794.1 @@ -12,9 +12,9 @@ management: installationURL: https://github.com/mistralai/client-python.git#subdirectory=packages/mistralai_gcp published: true persistentEdits: - generation_id: 749d4ba0-3c79-459a-a407-b84537a057da - pristine_commit_hash: ae909165077818f36014ef4d28edaa3572c8cc64 - pristine_tree_hash: f04041c3f961a8702dfa1eaa1185b1b605875f82 + generation_id: e1cf1217-2a08-4cb8-b92c-542b4f885caa + pristine_commit_hash: 57fe0df69b76fe4754f039d49f7c40770fb3097d + pristine_tree_hash: c4c1037865fb86650ada485b300f96784045922f features: python: additionalDependencies: 1.0.0 @@ -197,8 +197,8 @@ trackedFiles: pristine_git_object: 1d394500e8ffdd140457575568fc2ce465a1cc3a docs/models/mistralpromptmode.md: id: d17d5db4d3b6 - last_write_checksum: sha1:5ccd31d3804f70b6abb0e5a00bda57b9102225e3 - pristine_git_object: 7416e2037c507d19ac02aed914da1208a2fed0a1 + last_write_checksum: sha1:abcb7205c5086169c7d9449d15ac142448a7d258 + pristine_git_object: c3409d03b9646e21a3793372d06dcae6fef95463 docs/models/prediction.md: id: 3c70b2262201 last_write_checksum: sha1:ca8a77219e6113f2358a5363e935288d90df0725 @@ -461,8 +461,8 @@ trackedFiles: pristine_git_object: fec729a590b2ea981e01f4af99d8b36ba52b4664 src/mistralai_gcp/models/mistralpromptmode.py: id: d2ba58ed5184 - last_write_checksum: sha1:8518548e80dcd8798ee72c2557c473327ba9289b - pristine_git_object: 1440f6ea9d18139ce5f10eb38d951b0995f74a20 + last_write_checksum: sha1:6fb8323de88682846a2a09e68550f3508a29f1f5 + pristine_git_object: a5cc534f8c53bc87b8451aac1b2a79e695530e71 src/mistralai_gcp/models/no_response_error.py: id: 7a773ba0687f last_write_checksum: sha1:7f326424a7d5ae1bcd5c89a0d6b3dbda9138942f diff --git a/packages/mistralai_gcp/docs/models/mistralpromptmode.md b/packages/mistralai_gcp/docs/models/mistralpromptmode.md index 7416e203..c3409d03 100644 --- a/packages/mistralai_gcp/docs/models/mistralpromptmode.md +++ b/packages/mistralai_gcp/docs/models/mistralpromptmode.md @@ -1,5 +1,9 @@ # MistralPromptMode +Available options to the prompt_mode argument on the chat completion endpoint. +Values represent high-level intent. Assignment to actual SPs is handled internally. +System prompt may include knowledge cutoff date, model capabilities, tone to use, safety guidelines, etc. + ## Values diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/mistralpromptmode.py b/packages/mistralai_gcp/src/mistralai_gcp/models/mistralpromptmode.py index 1440f6ea..a5cc534f 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/mistralpromptmode.py +++ b/packages/mistralai_gcp/src/mistralai_gcp/models/mistralpromptmode.py @@ -6,3 +6,7 @@ MistralPromptMode = Union[Literal["reasoning",], UnrecognizedStr] +r"""Available options to the prompt_mode argument on the chat completion endpoint. +Values represent high-level intent. Assignment to actual SPs is handled internally. +System prompt may include knowledge cutoff date, model capabilities, tone to use, safety guidelines, etc. +""" diff --git a/pyproject.toml b/pyproject.toml index 5802feaa..f8006e7d 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "mistralai" -version = "2.0.0a2" +version = "2.0.0a3" description = "Python Client SDK for the Mistral AI API." authors = [{ name = "Mistral" }] requires-python = ">=3.10" diff --git a/scripts/run_examples.sh b/scripts/run_examples.sh index 40ff2c8f..22fc94e5 100755 --- a/scripts/run_examples.sh +++ b/scripts/run_examples.sh @@ -29,7 +29,10 @@ done exclude_files=( "examples/mistral/chat/chatbot_with_streaming.py" "examples/mistral/agents/async_conversation_run_mcp_remote_auth.py" - "examples/mistral/jobs/async_jobs_chat.py" + "examples/mistral/jobs/async_fine_tuning_chat.py" + "examples/mistral/jobs/async_fine_tuning.py" + "examples/mistral/jobs/fine_tuning.py" + "examples/mistral/jobs/fine_tuning_dry_run.py" "examples/mistral/classifier/async_classifier.py" "examples/mistral/mcp_servers/sse_server.py" "examples/mistral/mcp_servers/stdio_server.py" diff --git a/src/mistralai/client/__init__.py b/src/mistralai/client/__init__.py index dd02e42e..481fc916 100644 --- a/src/mistralai/client/__init__.py +++ b/src/mistralai/client/__init__.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: f1b791f9d2a5 from ._version import ( __title__, diff --git a/src/mistralai/client/_hooks/__init__.py b/src/mistralai/client/_hooks/__init__.py index 2ee66cdd..66a04e37 100644 --- a/src/mistralai/client/_hooks/__init__.py +++ b/src/mistralai/client/_hooks/__init__.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: cef9ff97efd7 from .sdkhooks import * from .types import * diff --git a/src/mistralai/client/_hooks/sdkhooks.py b/src/mistralai/client/_hooks/sdkhooks.py index c9318db4..ecf94240 100644 --- a/src/mistralai/client/_hooks/sdkhooks.py +++ b/src/mistralai/client/_hooks/sdkhooks.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: ed1e485b2153 import httpx from .types import ( diff --git a/src/mistralai/client/_hooks/types.py b/src/mistralai/client/_hooks/types.py index e7e1bb7f..036d44b8 100644 --- a/src/mistralai/client/_hooks/types.py +++ b/src/mistralai/client/_hooks/types.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 85cfedfb7582 from abc import ABC, abstractmethod import httpx diff --git a/src/mistralai/client/_version.py b/src/mistralai/client/_version.py index 5a7296a7..814d9ec7 100644 --- a/src/mistralai/client/_version.py +++ b/src/mistralai/client/_version.py @@ -1,12 +1,13 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: cc807b30de19 import importlib.metadata __title__: str = "mistralai" -__version__: str = "2.0.0a2" +__version__: str = "2.0.0a3" __openapi_doc_version__: str = "1.0.0" __gen_version__: str = "2.794.1" -__user_agent__: str = "speakeasy-sdk/python 2.0.0a2 2.794.1 1.0.0 mistralai" +__user_agent__: str = "speakeasy-sdk/python 2.0.0a3 2.794.1 1.0.0 mistralai" try: if __package__ is not None: diff --git a/src/mistralai/client/accesses.py b/src/mistralai/client/accesses.py index 307c7156..cda484c8 100644 --- a/src/mistralai/client/accesses.py +++ b/src/mistralai/client/accesses.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 76fc53bfcf59 from .basesdk import BaseSDK from mistralai.client import models, utils @@ -45,7 +46,7 @@ def list( else: base_url = self._get_url(base_url, url_variables) - request = models.LibrariesShareListV1Request( + request = models.ListLibraryAccessesRequest( library_id=library_id, ) @@ -78,7 +79,7 @@ def list( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="libraries_share_list_v1", + operation_id="ListLibraryAccesses", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -135,7 +136,7 @@ async def list_async( else: base_url = self._get_url(base_url, url_variables) - request = models.LibrariesShareListV1Request( + request = models.ListLibraryAccessesRequest( library_id=library_id, ) @@ -168,7 +169,7 @@ async def list_async( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="libraries_share_list_v1", + operation_id="ListLibraryAccesses", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -233,7 +234,7 @@ def update_or_create( else: base_url = self._get_url(base_url, url_variables) - request = models.LibrariesShareCreateV1Request( + request = models.UpdateOrCreateLibraryAccessRequest( library_id=library_id, sharing_in=models.SharingIn( org_id=org_id, @@ -275,7 +276,7 @@ def update_or_create( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="libraries_share_create_v1", + operation_id="UpdateOrCreateLibraryAccess", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -340,7 +341,7 @@ async def update_or_create_async( else: base_url = self._get_url(base_url, url_variables) - request = models.LibrariesShareCreateV1Request( + request = models.UpdateOrCreateLibraryAccessRequest( library_id=library_id, sharing_in=models.SharingIn( org_id=org_id, @@ -382,7 +383,7 @@ async def update_or_create_async( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="libraries_share_create_v1", + operation_id="UpdateOrCreateLibraryAccess", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -445,7 +446,7 @@ def delete( else: base_url = self._get_url(base_url, url_variables) - request = models.LibrariesShareDeleteV1Request( + request = models.DeleteLibraryAccessRequest( library_id=library_id, sharing_delete=models.SharingDelete( org_id=org_id, @@ -486,7 +487,7 @@ def delete( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="libraries_share_delete_v1", + operation_id="DeleteLibraryAccess", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -549,7 +550,7 @@ async def delete_async( else: base_url = self._get_url(base_url, url_variables) - request = models.LibrariesShareDeleteV1Request( + request = models.DeleteLibraryAccessRequest( library_id=library_id, sharing_delete=models.SharingDelete( org_id=org_id, @@ -590,7 +591,7 @@ async def delete_async( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="libraries_share_delete_v1", + operation_id="DeleteLibraryAccess", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security diff --git a/src/mistralai/client/agents.py b/src/mistralai/client/agents.py index d0da9f07..0942cb20 100644 --- a/src/mistralai/client/agents.py +++ b/src/mistralai/client/agents.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: e946546e3eaa from .basesdk import BaseSDK from mistralai.client import models, utils diff --git a/src/mistralai/client/audio.py b/src/mistralai/client/audio.py index 2834ade2..f68f063c 100644 --- a/src/mistralai/client/audio.py +++ b/src/mistralai/client/audio.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 7a8ed2e90d61 from .basesdk import BaseSDK from .sdkconfiguration import SDKConfiguration diff --git a/src/mistralai/client/basesdk.py b/src/mistralai/client/basesdk.py index bddc9012..611b4059 100644 --- a/src/mistralai/client/basesdk.py +++ b/src/mistralai/client/basesdk.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 7518c67b81ea from .sdkconfiguration import SDKConfiguration import httpx diff --git a/src/mistralai/client/batch.py b/src/mistralai/client/batch.py index 586dc235..7e36fd0d 100644 --- a/src/mistralai/client/batch.py +++ b/src/mistralai/client/batch.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: cffe114c7ac7 from .basesdk import BaseSDK from .sdkconfiguration import SDKConfiguration diff --git a/src/mistralai/client/batch_jobs.py b/src/mistralai/client/batch_jobs.py index af8d97b2..752c7652 100644 --- a/src/mistralai/client/batch_jobs.py +++ b/src/mistralai/client/batch_jobs.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 3423fec25840 from .basesdk import BaseSDK from datetime import datetime @@ -8,6 +9,7 @@ apiendpoint as models_apiendpoint, batchjobstatus as models_batchjobstatus, batchrequest as models_batchrequest, + listbatchjobsop as models_listbatchjobsop, ) from mistralai.client.types import OptionalNullable, UNSET from mistralai.client.utils import get_security_from_env @@ -27,6 +29,7 @@ def list( created_after: OptionalNullable[datetime] = UNSET, created_by_me: Optional[bool] = False, status: OptionalNullable[List[models_batchjobstatus.BatchJobStatus]] = UNSET, + order_by: Optional[models_listbatchjobsop.OrderBy] = "-created", retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -44,6 +47,7 @@ def list( :param created_after: :param created_by_me: :param status: + :param order_by: :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -59,7 +63,7 @@ def list( else: base_url = self._get_url(base_url, url_variables) - request = models.JobsAPIRoutesBatchGetBatchJobsRequest( + request = models.ListBatchJobsRequest( page=page, page_size=page_size, model=model, @@ -68,6 +72,7 @@ def list( created_after=created_after, created_by_me=created_by_me, status=status, + order_by=order_by, ) req = self._build_request( @@ -99,7 +104,7 @@ def list( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="jobs_api_routes_batch_get_batch_jobs", + operation_id="ListBatchJobs", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -132,6 +137,7 @@ async def list_async( created_after: OptionalNullable[datetime] = UNSET, created_by_me: Optional[bool] = False, status: OptionalNullable[List[models_batchjobstatus.BatchJobStatus]] = UNSET, + order_by: Optional[models_listbatchjobsop.OrderBy] = "-created", retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -149,6 +155,7 @@ async def list_async( :param created_after: :param created_by_me: :param status: + :param order_by: :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -164,7 +171,7 @@ async def list_async( else: base_url = self._get_url(base_url, url_variables) - request = models.JobsAPIRoutesBatchGetBatchJobsRequest( + request = models.ListBatchJobsRequest( page=page, page_size=page_size, model=model, @@ -173,6 +180,7 @@ async def list_async( created_after=created_after, created_by_me=created_by_me, status=status, + order_by=order_by, ) req = self._build_request_async( @@ -204,7 +212,7 @@ async def list_async( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="jobs_api_routes_batch_get_batch_jobs", + operation_id="ListBatchJobs", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -316,7 +324,7 @@ def create( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="jobs_api_routes_batch_create_batch_job", + operation_id="CreateBatchJob", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -428,7 +436,7 @@ async def create_async( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="jobs_api_routes_batch_create_batch_job", + operation_id="CreateBatchJob", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -484,7 +492,7 @@ def get( else: base_url = self._get_url(base_url, url_variables) - request = models.JobsAPIRoutesBatchGetBatchJobRequest( + request = models.GetBatchJobRequest( job_id=job_id, inline=inline, ) @@ -518,7 +526,7 @@ def get( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="jobs_api_routes_batch_get_batch_job", + operation_id="GetBatchJob", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -574,7 +582,7 @@ async def get_async( else: base_url = self._get_url(base_url, url_variables) - request = models.JobsAPIRoutesBatchGetBatchJobRequest( + request = models.GetBatchJobRequest( job_id=job_id, inline=inline, ) @@ -608,7 +616,7 @@ async def get_async( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="jobs_api_routes_batch_get_batch_job", + operation_id="GetBatchJob", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -659,7 +667,7 @@ def cancel( else: base_url = self._get_url(base_url, url_variables) - request = models.JobsAPIRoutesBatchCancelBatchJobRequest( + request = models.CancelBatchJobRequest( job_id=job_id, ) @@ -692,7 +700,7 @@ def cancel( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="jobs_api_routes_batch_cancel_batch_job", + operation_id="CancelBatchJob", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -743,7 +751,7 @@ async def cancel_async( else: base_url = self._get_url(base_url, url_variables) - request = models.JobsAPIRoutesBatchCancelBatchJobRequest( + request = models.CancelBatchJobRequest( job_id=job_id, ) @@ -776,7 +784,7 @@ async def cancel_async( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="jobs_api_routes_batch_cancel_batch_job", + operation_id="CancelBatchJob", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security diff --git a/src/mistralai/client/beta.py b/src/mistralai/client/beta.py index a1bd409e..65b761d1 100644 --- a/src/mistralai/client/beta.py +++ b/src/mistralai/client/beta.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 981417f45147 from .basesdk import BaseSDK from .sdkconfiguration import SDKConfiguration diff --git a/src/mistralai/client/beta_agents.py b/src/mistralai/client/beta_agents.py index 1420895e..4e692f17 100644 --- a/src/mistralai/client/beta_agents.py +++ b/src/mistralai/client/beta_agents.py @@ -1,13 +1,14 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: b64ad29b7174 from .basesdk import BaseSDK from mistralai.client import models, utils from mistralai.client._hooks import HookContext from mistralai.client.models import ( agentcreationrequest as models_agentcreationrequest, - agents_api_v1_agents_getop as models_agents_api_v1_agents_getop, agentupdaterequest as models_agentupdaterequest, completionargs as models_completionargs, + getagentop as models_getagentop, requestsource as models_requestsource, ) from mistralai.client.types import OptionalNullable, UNSET @@ -40,6 +41,7 @@ def create( description: OptionalNullable[str] = UNSET, handoffs: OptionalNullable[List[str]] = UNSET, metadata: OptionalNullable[Dict[str, Any]] = UNSET, + version_message: OptionalNullable[str] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -57,6 +59,7 @@ def create( :param description: :param handoffs: :param metadata: + :param version_message: :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -85,6 +88,7 @@ def create( description=description, handoffs=handoffs, metadata=metadata, + version_message=version_message, ) req = self._build_request( @@ -119,7 +123,7 @@ def create( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="agents_api_v1_agents_create", + operation_id="CreateAgent", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -168,6 +172,7 @@ async def create_async( description: OptionalNullable[str] = UNSET, handoffs: OptionalNullable[List[str]] = UNSET, metadata: OptionalNullable[Dict[str, Any]] = UNSET, + version_message: OptionalNullable[str] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -185,6 +190,7 @@ async def create_async( :param description: :param handoffs: :param metadata: + :param version_message: :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -213,6 +219,7 @@ async def create_async( description=description, handoffs=handoffs, metadata=metadata, + version_message=version_message, ) req = self._build_request_async( @@ -247,7 +254,7 @@ async def create_async( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="agents_api_v1_agents_create", + operation_id="CreateAgent", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -283,6 +290,7 @@ def list( deployment_chat: OptionalNullable[bool] = UNSET, sources: OptionalNullable[List[models_requestsource.RequestSource]] = UNSET, name: OptionalNullable[str] = UNSET, + search: OptionalNullable[str] = UNSET, id: OptionalNullable[str] = UNSET, metadata: OptionalNullable[Dict[str, Any]] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, @@ -298,7 +306,8 @@ def list( :param page_size: Number of agents per page :param deployment_chat: :param sources: - :param name: + :param name: Filter by agent name + :param search: Search agents by name or ID :param id: :param metadata: :param retries: Override the default retry configuration for this method @@ -316,12 +325,13 @@ def list( else: base_url = self._get_url(base_url, url_variables) - request = models.AgentsAPIV1AgentsListRequest( + request = models.ListAgentsRequest( page=page, page_size=page_size, deployment_chat=deployment_chat, sources=sources, name=name, + search=search, id=id, metadata=metadata, ) @@ -355,7 +365,7 @@ def list( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="agents_api_v1_agents_list", + operation_id="ListAgents", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -391,6 +401,7 @@ async def list_async( deployment_chat: OptionalNullable[bool] = UNSET, sources: OptionalNullable[List[models_requestsource.RequestSource]] = UNSET, name: OptionalNullable[str] = UNSET, + search: OptionalNullable[str] = UNSET, id: OptionalNullable[str] = UNSET, metadata: OptionalNullable[Dict[str, Any]] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, @@ -406,7 +417,8 @@ async def list_async( :param page_size: Number of agents per page :param deployment_chat: :param sources: - :param name: + :param name: Filter by agent name + :param search: Search agents by name or ID :param id: :param metadata: :param retries: Override the default retry configuration for this method @@ -424,12 +436,13 @@ async def list_async( else: base_url = self._get_url(base_url, url_variables) - request = models.AgentsAPIV1AgentsListRequest( + request = models.ListAgentsRequest( page=page, page_size=page_size, deployment_chat=deployment_chat, sources=sources, name=name, + search=search, id=id, metadata=metadata, ) @@ -463,7 +476,7 @@ async def list_async( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="agents_api_v1_agents_list", + operation_id="ListAgents", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -497,8 +510,8 @@ def get( agent_id: str, agent_version: OptionalNullable[ Union[ - models_agents_api_v1_agents_getop.AgentsAPIV1AgentsGetAgentVersion, - models_agents_api_v1_agents_getop.AgentsAPIV1AgentsGetAgentVersionTypedDict, + models_getagentop.GetAgentAgentVersion, + models_getagentop.GetAgentAgentVersionTypedDict, ] ] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, @@ -527,7 +540,7 @@ def get( else: base_url = self._get_url(base_url, url_variables) - request = models.AgentsAPIV1AgentsGetRequest( + request = models.GetAgentRequest( agent_id=agent_id, agent_version=agent_version, ) @@ -561,7 +574,7 @@ def get( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="agents_api_v1_agents_get", + operation_id="GetAgent", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -595,8 +608,8 @@ async def get_async( agent_id: str, agent_version: OptionalNullable[ Union[ - models_agents_api_v1_agents_getop.AgentsAPIV1AgentsGetAgentVersion, - models_agents_api_v1_agents_getop.AgentsAPIV1AgentsGetAgentVersionTypedDict, + models_getagentop.GetAgentAgentVersion, + models_getagentop.GetAgentAgentVersionTypedDict, ] ] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, @@ -625,7 +638,7 @@ async def get_async( else: base_url = self._get_url(base_url, url_variables) - request = models.AgentsAPIV1AgentsGetRequest( + request = models.GetAgentRequest( agent_id=agent_id, agent_version=agent_version, ) @@ -659,7 +672,7 @@ async def get_async( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="agents_api_v1_agents_get", + operation_id="GetAgent", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -710,6 +723,7 @@ def update( handoffs: OptionalNullable[List[str]] = UNSET, deployment_chat: OptionalNullable[bool] = UNSET, metadata: OptionalNullable[Dict[str, Any]] = UNSET, + version_message: OptionalNullable[str] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -729,6 +743,7 @@ def update( :param handoffs: :param deployment_chat: :param metadata: + :param version_message: :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -744,7 +759,7 @@ def update( else: base_url = self._get_url(base_url, url_variables) - request = models.AgentsAPIV1AgentsUpdateRequest( + request = models.UpdateAgentRequest( agent_id=agent_id, agent_update_request=models.AgentUpdateRequest( instructions=instructions, @@ -760,6 +775,7 @@ def update( handoffs=handoffs, deployment_chat=deployment_chat, metadata=metadata, + version_message=version_message, ), ) @@ -799,7 +815,7 @@ def update( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="agents_api_v1_agents_update", + operation_id="UpdateAgent", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -850,6 +866,7 @@ async def update_async( handoffs: OptionalNullable[List[str]] = UNSET, deployment_chat: OptionalNullable[bool] = UNSET, metadata: OptionalNullable[Dict[str, Any]] = UNSET, + version_message: OptionalNullable[str] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -869,6 +886,7 @@ async def update_async( :param handoffs: :param deployment_chat: :param metadata: + :param version_message: :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -884,7 +902,7 @@ async def update_async( else: base_url = self._get_url(base_url, url_variables) - request = models.AgentsAPIV1AgentsUpdateRequest( + request = models.UpdateAgentRequest( agent_id=agent_id, agent_update_request=models.AgentUpdateRequest( instructions=instructions, @@ -900,6 +918,7 @@ async def update_async( handoffs=handoffs, deployment_chat=deployment_chat, metadata=metadata, + version_message=version_message, ), ) @@ -939,7 +958,7 @@ async def update_async( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="agents_api_v1_agents_update", + operation_id="UpdateAgent", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -994,7 +1013,7 @@ def delete( else: base_url = self._get_url(base_url, url_variables) - request = models.AgentsAPIV1AgentsDeleteRequest( + request = models.DeleteAgentRequest( agent_id=agent_id, ) @@ -1027,7 +1046,7 @@ def delete( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="agents_api_v1_agents_delete", + operation_id="DeleteAgent", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -1082,7 +1101,7 @@ async def delete_async( else: base_url = self._get_url(base_url, url_variables) - request = models.AgentsAPIV1AgentsDeleteRequest( + request = models.DeleteAgentRequest( agent_id=agent_id, ) @@ -1115,7 +1134,7 @@ async def delete_async( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="agents_api_v1_agents_delete", + operation_id="DeleteAgent", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -1174,7 +1193,7 @@ def update_version( else: base_url = self._get_url(base_url, url_variables) - request = models.AgentsAPIV1AgentsUpdateVersionRequest( + request = models.UpdateAgentVersionRequest( agent_id=agent_id, version=version, ) @@ -1208,7 +1227,7 @@ def update_version( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="agents_api_v1_agents_update_version", + operation_id="UpdateAgentVersion", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -1267,7 +1286,7 @@ async def update_version_async( else: base_url = self._get_url(base_url, url_variables) - request = models.AgentsAPIV1AgentsUpdateVersionRequest( + request = models.UpdateAgentVersionRequest( agent_id=agent_id, version=version, ) @@ -1301,7 +1320,7 @@ async def update_version_async( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="agents_api_v1_agents_update_version", + operation_id="UpdateAgentVersion", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -1362,7 +1381,7 @@ def list_versions( else: base_url = self._get_url(base_url, url_variables) - request = models.AgentsAPIV1AgentsListVersionsRequest( + request = models.ListAgentVersionsRequest( agent_id=agent_id, page=page, page_size=page_size, @@ -1397,7 +1416,7 @@ def list_versions( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="agents_api_v1_agents_list_versions", + operation_id="ListAgentVersions", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -1458,7 +1477,7 @@ async def list_versions_async( else: base_url = self._get_url(base_url, url_variables) - request = models.AgentsAPIV1AgentsListVersionsRequest( + request = models.ListAgentVersionsRequest( agent_id=agent_id, page=page, page_size=page_size, @@ -1493,7 +1512,7 @@ async def list_versions_async( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="agents_api_v1_agents_list_versions", + operation_id="ListAgentVersions", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -1552,7 +1571,7 @@ def get_version( else: base_url = self._get_url(base_url, url_variables) - request = models.AgentsAPIV1AgentsGetVersionRequest( + request = models.GetAgentVersionRequest( agent_id=agent_id, version=version, ) @@ -1586,7 +1605,7 @@ def get_version( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="agents_api_v1_agents_get_version", + operation_id="GetAgentVersion", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -1645,7 +1664,7 @@ async def get_version_async( else: base_url = self._get_url(base_url, url_variables) - request = models.AgentsAPIV1AgentsGetVersionRequest( + request = models.GetAgentVersionRequest( agent_id=agent_id, version=version, ) @@ -1679,7 +1698,7 @@ async def get_version_async( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="agents_api_v1_agents_get_version", + operation_id="GetAgentVersion", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -1740,7 +1759,7 @@ def create_version_alias( else: base_url = self._get_url(base_url, url_variables) - request = models.AgentsAPIV1AgentsCreateOrUpdateAliasRequest( + request = models.CreateOrUpdateAgentAliasRequest( agent_id=agent_id, alias=alias, version=version, @@ -1775,7 +1794,7 @@ def create_version_alias( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="agents_api_v1_agents_create_or_update_alias", + operation_id="CreateOrUpdateAgentAlias", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -1836,7 +1855,7 @@ async def create_version_alias_async( else: base_url = self._get_url(base_url, url_variables) - request = models.AgentsAPIV1AgentsCreateOrUpdateAliasRequest( + request = models.CreateOrUpdateAgentAliasRequest( agent_id=agent_id, alias=alias, version=version, @@ -1871,7 +1890,7 @@ async def create_version_alias_async( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="agents_api_v1_agents_create_or_update_alias", + operation_id="CreateOrUpdateAgentAlias", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -1928,7 +1947,7 @@ def list_version_aliases( else: base_url = self._get_url(base_url, url_variables) - request = models.AgentsAPIV1AgentsListVersionAliasesRequest( + request = models.ListAgentAliasesRequest( agent_id=agent_id, ) @@ -1961,7 +1980,7 @@ def list_version_aliases( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="agents_api_v1_agents_list_version_aliases", + operation_id="ListAgentAliases", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -2018,7 +2037,7 @@ async def list_version_aliases_async( else: base_url = self._get_url(base_url, url_variables) - request = models.AgentsAPIV1AgentsListVersionAliasesRequest( + request = models.ListAgentAliasesRequest( agent_id=agent_id, ) @@ -2051,7 +2070,7 @@ async def list_version_aliases_async( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="agents_api_v1_agents_list_version_aliases", + operation_id="ListAgentAliases", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -2078,3 +2097,189 @@ async def list_version_aliases_async( raise models.SDKError("API error occurred", http_res, http_res_text) raise models.SDKError("Unexpected response received", http_res) + + def delete_version_alias( + self, + *, + agent_id: str, + alias: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ): + r"""Delete an agent version alias. + + Delete an existing alias for an agent. + + :param agent_id: + :param alias: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.DeleteAgentAliasRequest( + agent_id=agent_id, + alias=alias, + ) + + req = self._build_request( + method="DELETE", + path="/v1/agents/{agent_id}/aliases", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="DeleteAgentAlias", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "204", "*"): + return + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res + ) + raise models.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = utils.stream_to_text(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) + + async def delete_version_alias_async( + self, + *, + agent_id: str, + alias: str, + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + http_headers: Optional[Mapping[str, str]] = None, + ): + r"""Delete an agent version alias. + + Delete an existing alias for an agent. + + :param agent_id: + :param alias: + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + :param http_headers: Additional headers to set or replace on requests. + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + else: + base_url = self._get_url(base_url, url_variables) + + request = models.DeleteAgentAliasRequest( + agent_id=agent_id, + alias=alias, + ) + + req = self._build_request_async( + method="DELETE", + path="/v1/agents/{agent_id}/aliases", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=False, + request_has_path_params=True, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + http_headers=http_headers, + security=self.sdk_configuration.security, + allow_empty_value=None, + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + config=self.sdk_configuration, + base_url=base_url or "", + operation_id="DeleteAgentAlias", + oauth2_scopes=None, + security_source=get_security_from_env( + self.sdk_configuration.security, models.Security + ), + ), + request=req, + error_status_codes=["422", "4XX", "5XX"], + retry_config=retry_config, + ) + + response_data: Any = None + if utils.match_response(http_res, "204", "*"): + return + if utils.match_response(http_res, "422", "application/json"): + response_data = unmarshal_json_response( + models.HTTPValidationErrorData, http_res + ) + raise models.HTTPValidationError(response_data, http_res) + if utils.match_response(http_res, "4XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + if utils.match_response(http_res, "5XX", "*"): + http_res_text = await utils.stream_to_text_async(http_res) + raise models.SDKError("API error occurred", http_res, http_res_text) + + raise models.SDKError("Unexpected response received", http_res) diff --git a/src/mistralai/client/chat.py b/src/mistralai/client/chat.py index 523e3340..35698d32 100644 --- a/src/mistralai/client/chat.py +++ b/src/mistralai/client/chat.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 7eba0f088d47 from .basesdk import BaseSDK from mistralai.client import models, utils diff --git a/src/mistralai/client/classifiers.py b/src/mistralai/client/classifiers.py index 327653d1..3407c4b7 100644 --- a/src/mistralai/client/classifiers.py +++ b/src/mistralai/client/classifiers.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 26e773725732 from .basesdk import BaseSDK from mistralai.client import models, utils diff --git a/src/mistralai/client/conversations.py b/src/mistralai/client/conversations.py index aa037bd2..646b91f3 100644 --- a/src/mistralai/client/conversations.py +++ b/src/mistralai/client/conversations.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 40692a878064 from .basesdk import BaseSDK from mistralai.client import models, utils @@ -60,7 +61,10 @@ async def run_async( inputs: Union[models.ConversationInputs, models.ConversationInputsTypedDict], instructions: OptionalNullable[str] = UNSET, tools: OptionalNullable[ - Union[List[models.ConversationRequestTool], List[models.ConversationRequestToolTypedDict]] + Union[ + List[models.ConversationRequestTool], + List[models.ConversationRequestToolTypedDict], + ] ] = UNSET, completion_args: OptionalNullable[ Union[models.CompletionArgs, models.CompletionArgsTypedDict] @@ -133,7 +137,10 @@ async def run_stream_async( inputs: Union[models.ConversationInputs, models.ConversationInputsTypedDict], instructions: OptionalNullable[str] = UNSET, tools: OptionalNullable[ - Union[List[models.ConversationRequestTool], List[models.ConversationRequestToolTypedDict]] + Union[ + List[models.ConversationRequestTool], + List[models.ConversationRequestToolTypedDict], + ] ] = UNSET, completion_args: OptionalNullable[ Union[models.CompletionArgs, models.CompletionArgsTypedDict] @@ -353,7 +360,7 @@ def start( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="agents_api_v1_conversations_start", + operation_id="StartConversation", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -506,7 +513,7 @@ async def start_async( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="agents_api_v1_conversations_start", + operation_id="StartConversation", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -544,7 +551,7 @@ def list( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> List[models.AgentsAPIV1ConversationsListResponse]: + ) -> List[models.ListConversationsResponse]: r"""List all created conversations. Retrieve a list of conversation entities sorted by creation time. @@ -567,7 +574,7 @@ def list( else: base_url = self._get_url(base_url, url_variables) - request = models.AgentsAPIV1ConversationsListRequest( + request = models.ListConversationsRequest( page=page, page_size=page_size, metadata=metadata, @@ -602,7 +609,7 @@ def list( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="agents_api_v1_conversations_list", + operation_id="ListConversations", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -616,7 +623,7 @@ def list( response_data: Any = None if utils.match_response(http_res, "200", "application/json"): return unmarshal_json_response( - List[models.AgentsAPIV1ConversationsListResponse], http_res + List[models.ListConversationsResponse], http_res ) if utils.match_response(http_res, "422", "application/json"): response_data = unmarshal_json_response( @@ -642,7 +649,7 @@ async def list_async( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> List[models.AgentsAPIV1ConversationsListResponse]: + ) -> List[models.ListConversationsResponse]: r"""List all created conversations. Retrieve a list of conversation entities sorted by creation time. @@ -665,7 +672,7 @@ async def list_async( else: base_url = self._get_url(base_url, url_variables) - request = models.AgentsAPIV1ConversationsListRequest( + request = models.ListConversationsRequest( page=page, page_size=page_size, metadata=metadata, @@ -700,7 +707,7 @@ async def list_async( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="agents_api_v1_conversations_list", + operation_id="ListConversations", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -714,7 +721,7 @@ async def list_async( response_data: Any = None if utils.match_response(http_res, "200", "application/json"): return unmarshal_json_response( - List[models.AgentsAPIV1ConversationsListResponse], http_res + List[models.ListConversationsResponse], http_res ) if utils.match_response(http_res, "422", "application/json"): response_data = unmarshal_json_response( @@ -759,7 +766,7 @@ def get( else: base_url = self._get_url(base_url, url_variables) - request = models.AgentsAPIV1ConversationsGetRequest( + request = models.GetConversationRequest( conversation_id=conversation_id, ) @@ -792,7 +799,7 @@ def get( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="agents_api_v1_conversations_get", + operation_id="GetConversation", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -849,7 +856,7 @@ async def get_async( else: base_url = self._get_url(base_url, url_variables) - request = models.AgentsAPIV1ConversationsGetRequest( + request = models.GetConversationRequest( conversation_id=conversation_id, ) @@ -882,7 +889,7 @@ async def get_async( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="agents_api_v1_conversations_get", + operation_id="GetConversation", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -939,7 +946,7 @@ def delete( else: base_url = self._get_url(base_url, url_variables) - request = models.AgentsAPIV1ConversationsDeleteRequest( + request = models.DeleteConversationRequest( conversation_id=conversation_id, ) @@ -972,7 +979,7 @@ def delete( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="agents_api_v1_conversations_delete", + operation_id="DeleteConversation", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -1029,7 +1036,7 @@ async def delete_async( else: base_url = self._get_url(base_url, url_variables) - request = models.AgentsAPIV1ConversationsDeleteRequest( + request = models.DeleteConversationRequest( conversation_id=conversation_id, ) @@ -1062,7 +1069,7 @@ async def delete_async( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="agents_api_v1_conversations_delete", + operation_id="DeleteConversation", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -1139,7 +1146,7 @@ def append( else: base_url = self._get_url(base_url, url_variables) - request = models.AgentsAPIV1ConversationsAppendRequest( + request = models.AppendConversationRequest( conversation_id=conversation_id, conversation_append_request=models.ConversationAppendRequest( inputs=utils.get_pydantic_model(inputs, models.ConversationInputs), @@ -1188,7 +1195,7 @@ def append( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="agents_api_v1_conversations_append", + operation_id="AppendConversation", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -1265,7 +1272,7 @@ async def append_async( else: base_url = self._get_url(base_url, url_variables) - request = models.AgentsAPIV1ConversationsAppendRequest( + request = models.AppendConversationRequest( conversation_id=conversation_id, conversation_append_request=models.ConversationAppendRequest( inputs=utils.get_pydantic_model(inputs, models.ConversationInputs), @@ -1314,7 +1321,7 @@ async def append_async( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="agents_api_v1_conversations_append", + operation_id="AppendConversation", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -1371,7 +1378,7 @@ def get_history( else: base_url = self._get_url(base_url, url_variables) - request = models.AgentsAPIV1ConversationsHistoryRequest( + request = models.GetConversationHistoryRequest( conversation_id=conversation_id, ) @@ -1404,7 +1411,7 @@ def get_history( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="agents_api_v1_conversations_history", + operation_id="GetConversationHistory", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -1461,7 +1468,7 @@ async def get_history_async( else: base_url = self._get_url(base_url, url_variables) - request = models.AgentsAPIV1ConversationsHistoryRequest( + request = models.GetConversationHistoryRequest( conversation_id=conversation_id, ) @@ -1494,7 +1501,7 @@ async def get_history_async( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="agents_api_v1_conversations_history", + operation_id="GetConversationHistory", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -1551,7 +1558,7 @@ def get_messages( else: base_url = self._get_url(base_url, url_variables) - request = models.AgentsAPIV1ConversationsMessagesRequest( + request = models.GetConversationMessagesRequest( conversation_id=conversation_id, ) @@ -1584,7 +1591,7 @@ def get_messages( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="agents_api_v1_conversations_messages", + operation_id="GetConversationMessages", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -1641,7 +1648,7 @@ async def get_messages_async( else: base_url = self._get_url(base_url, url_variables) - request = models.AgentsAPIV1ConversationsMessagesRequest( + request = models.GetConversationMessagesRequest( conversation_id=conversation_id, ) @@ -1674,7 +1681,7 @@ async def get_messages_async( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="agents_api_v1_conversations_messages", + operation_id="GetConversationMessages", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -1762,7 +1769,7 @@ def restart( else: base_url = self._get_url(base_url, url_variables) - request = models.AgentsAPIV1ConversationsRestartRequest( + request = models.RestartConversationRequest( conversation_id=conversation_id, conversation_restart_request=models.ConversationRestartRequest( inputs=utils.get_pydantic_model(inputs, models.ConversationInputs), @@ -1814,7 +1821,7 @@ def restart( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="agents_api_v1_conversations_restart", + operation_id="RestartConversation", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -1902,7 +1909,7 @@ async def restart_async( else: base_url = self._get_url(base_url, url_variables) - request = models.AgentsAPIV1ConversationsRestartRequest( + request = models.RestartConversationRequest( conversation_id=conversation_id, conversation_restart_request=models.ConversationRestartRequest( inputs=utils.get_pydantic_model(inputs, models.ConversationInputs), @@ -1954,7 +1961,7 @@ async def restart_async( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="agents_api_v1_conversations_restart", + operation_id="RestartConversation", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -2109,7 +2116,7 @@ def start_stream( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="agents_api_v1_conversations_start_stream", + operation_id="StartConversationStream", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -2271,7 +2278,7 @@ async def start_stream_async( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="agents_api_v1_conversations_start_stream", + operation_id="StartConversationStream", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -2355,7 +2362,7 @@ def append_stream( else: base_url = self._get_url(base_url, url_variables) - request = models.AgentsAPIV1ConversationsAppendStreamRequest( + request = models.AppendConversationStreamRequest( conversation_id=conversation_id, conversation_append_stream_request=models.ConversationAppendStreamRequest( inputs=utils.get_pydantic_model(inputs, models.ConversationInputs), @@ -2404,7 +2411,7 @@ def append_stream( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="agents_api_v1_conversations_append_stream", + operation_id="AppendConversationStream", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -2488,7 +2495,7 @@ async def append_stream_async( else: base_url = self._get_url(base_url, url_variables) - request = models.AgentsAPIV1ConversationsAppendStreamRequest( + request = models.AppendConversationStreamRequest( conversation_id=conversation_id, conversation_append_stream_request=models.ConversationAppendStreamRequest( inputs=utils.get_pydantic_model(inputs, models.ConversationInputs), @@ -2537,7 +2544,7 @@ async def append_stream_async( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="agents_api_v1_conversations_append_stream", + operation_id="AppendConversationStream", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -2632,7 +2639,7 @@ def restart_stream( else: base_url = self._get_url(base_url, url_variables) - request = models.AgentsAPIV1ConversationsRestartStreamRequest( + request = models.RestartConversationStreamRequest( conversation_id=conversation_id, conversation_restart_stream_request=models.ConversationRestartStreamRequest( inputs=utils.get_pydantic_model(inputs, models.ConversationInputs), @@ -2684,7 +2691,7 @@ def restart_stream( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="agents_api_v1_conversations_restart_stream", + operation_id="RestartConversationStream", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -2779,7 +2786,7 @@ async def restart_stream_async( else: base_url = self._get_url(base_url, url_variables) - request = models.AgentsAPIV1ConversationsRestartStreamRequest( + request = models.RestartConversationStreamRequest( conversation_id=conversation_id, conversation_restart_stream_request=models.ConversationRestartStreamRequest( inputs=utils.get_pydantic_model(inputs, models.ConversationInputs), @@ -2831,7 +2838,7 @@ async def restart_stream_async( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="agents_api_v1_conversations_restart_stream", + operation_id="RestartConversationStream", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security diff --git a/src/mistralai/client/documents.py b/src/mistralai/client/documents.py index 3316e63b..c78f2944 100644 --- a/src/mistralai/client/documents.py +++ b/src/mistralai/client/documents.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: bcc17286c31c from .basesdk import BaseSDK from mistralai.client import models, utils @@ -57,7 +58,7 @@ def list( else: base_url = self._get_url(base_url, url_variables) - request = models.LibrariesDocumentsListV1Request( + request = models.ListDocumentsRequest( library_id=library_id, search=search, page_size=page_size, @@ -96,7 +97,7 @@ def list( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="libraries_documents_list_v1", + operation_id="ListDocuments", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -165,7 +166,7 @@ async def list_async( else: base_url = self._get_url(base_url, url_variables) - request = models.LibrariesDocumentsListV1Request( + request = models.ListDocumentsRequest( library_id=library_id, search=search, page_size=page_size, @@ -204,7 +205,7 @@ async def list_async( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="libraries_documents_list_v1", + operation_id="ListDocuments", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -271,7 +272,7 @@ def upload( else: base_url = self._get_url(base_url, url_variables) - request = models.LibrariesDocumentsUploadV1Request( + request = models.UploadDocumentRequest( library_id=library_id, request_body=models.DocumentUpload( file=utils.get_pydantic_model(file, models.File), @@ -310,7 +311,7 @@ def upload( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="libraries_documents_upload_v1", + operation_id="UploadDocument", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -377,7 +378,7 @@ async def upload_async( else: base_url = self._get_url(base_url, url_variables) - request = models.LibrariesDocumentsUploadV1Request( + request = models.UploadDocumentRequest( library_id=library_id, request_body=models.DocumentUpload( file=utils.get_pydantic_model(file, models.File), @@ -416,7 +417,7 @@ async def upload_async( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="libraries_documents_upload_v1", + operation_id="UploadDocument", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -475,7 +476,7 @@ def get( else: base_url = self._get_url(base_url, url_variables) - request = models.LibrariesDocumentsGetV1Request( + request = models.GetDocumentRequest( library_id=library_id, document_id=document_id, ) @@ -509,7 +510,7 @@ def get( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="libraries_documents_get_v1", + operation_id="GetDocument", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -568,7 +569,7 @@ async def get_async( else: base_url = self._get_url(base_url, url_variables) - request = models.LibrariesDocumentsGetV1Request( + request = models.GetDocumentRequest( library_id=library_id, document_id=document_id, ) @@ -602,7 +603,7 @@ async def get_async( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="libraries_documents_get_v1", + operation_id="GetDocument", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -670,7 +671,7 @@ def update( else: base_url = self._get_url(base_url, url_variables) - request = models.LibrariesDocumentsUpdateV1Request( + request = models.UpdateDocumentRequest( library_id=library_id, document_id=document_id, document_update_in=models.DocumentUpdateIn( @@ -715,7 +716,7 @@ def update( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="libraries_documents_update_v1", + operation_id="UpdateDocument", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -783,7 +784,7 @@ async def update_async( else: base_url = self._get_url(base_url, url_variables) - request = models.LibrariesDocumentsUpdateV1Request( + request = models.UpdateDocumentRequest( library_id=library_id, document_id=document_id, document_update_in=models.DocumentUpdateIn( @@ -828,7 +829,7 @@ async def update_async( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="libraries_documents_update_v1", + operation_id="UpdateDocument", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -887,7 +888,7 @@ def delete( else: base_url = self._get_url(base_url, url_variables) - request = models.LibrariesDocumentsDeleteV1Request( + request = models.DeleteDocumentRequest( library_id=library_id, document_id=document_id, ) @@ -921,7 +922,7 @@ def delete( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="libraries_documents_delete_v1", + operation_id="DeleteDocument", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -980,7 +981,7 @@ async def delete_async( else: base_url = self._get_url(base_url, url_variables) - request = models.LibrariesDocumentsDeleteV1Request( + request = models.DeleteDocumentRequest( library_id=library_id, document_id=document_id, ) @@ -1014,7 +1015,7 @@ async def delete_async( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="libraries_documents_delete_v1", + operation_id="DeleteDocument", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -1073,7 +1074,7 @@ def text_content( else: base_url = self._get_url(base_url, url_variables) - request = models.LibrariesDocumentsGetTextContentV1Request( + request = models.GetDocumentTextContentRequest( library_id=library_id, document_id=document_id, ) @@ -1107,7 +1108,7 @@ def text_content( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="libraries_documents_get_text_content_v1", + operation_id="GetDocumentTextContent", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -1166,7 +1167,7 @@ async def text_content_async( else: base_url = self._get_url(base_url, url_variables) - request = models.LibrariesDocumentsGetTextContentV1Request( + request = models.GetDocumentTextContentRequest( library_id=library_id, document_id=document_id, ) @@ -1200,7 +1201,7 @@ async def text_content_async( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="libraries_documents_get_text_content_v1", + operation_id="GetDocumentTextContent", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -1259,7 +1260,7 @@ def status( else: base_url = self._get_url(base_url, url_variables) - request = models.LibrariesDocumentsGetStatusV1Request( + request = models.GetDocumentStatusRequest( library_id=library_id, document_id=document_id, ) @@ -1293,7 +1294,7 @@ def status( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="libraries_documents_get_status_v1", + operation_id="GetDocumentStatus", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -1352,7 +1353,7 @@ async def status_async( else: base_url = self._get_url(base_url, url_variables) - request = models.LibrariesDocumentsGetStatusV1Request( + request = models.GetDocumentStatusRequest( library_id=library_id, document_id=document_id, ) @@ -1386,7 +1387,7 @@ async def status_async( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="libraries_documents_get_status_v1", + operation_id="GetDocumentStatus", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -1445,7 +1446,7 @@ def get_signed_url( else: base_url = self._get_url(base_url, url_variables) - request = models.LibrariesDocumentsGetSignedURLV1Request( + request = models.GetDocumentSignedURLRequest( library_id=library_id, document_id=document_id, ) @@ -1479,7 +1480,7 @@ def get_signed_url( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="libraries_documents_get_signed_url_v1", + operation_id="GetDocumentSignedUrl", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -1538,7 +1539,7 @@ async def get_signed_url_async( else: base_url = self._get_url(base_url, url_variables) - request = models.LibrariesDocumentsGetSignedURLV1Request( + request = models.GetDocumentSignedURLRequest( library_id=library_id, document_id=document_id, ) @@ -1572,7 +1573,7 @@ async def get_signed_url_async( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="libraries_documents_get_signed_url_v1", + operation_id="GetDocumentSignedUrl", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -1631,7 +1632,7 @@ def extracted_text_signed_url( else: base_url = self._get_url(base_url, url_variables) - request = models.LibrariesDocumentsGetExtractedTextSignedURLV1Request( + request = models.GetDocumentExtractedTextSignedURLRequest( library_id=library_id, document_id=document_id, ) @@ -1665,7 +1666,7 @@ def extracted_text_signed_url( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="libraries_documents_get_extracted_text_signed_url_v1", + operation_id="GetDocumentExtractedTextSignedUrl", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -1724,7 +1725,7 @@ async def extracted_text_signed_url_async( else: base_url = self._get_url(base_url, url_variables) - request = models.LibrariesDocumentsGetExtractedTextSignedURLV1Request( + request = models.GetDocumentExtractedTextSignedURLRequest( library_id=library_id, document_id=document_id, ) @@ -1758,7 +1759,7 @@ async def extracted_text_signed_url_async( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="libraries_documents_get_extracted_text_signed_url_v1", + operation_id="GetDocumentExtractedTextSignedUrl", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -1817,7 +1818,7 @@ def reprocess( else: base_url = self._get_url(base_url, url_variables) - request = models.LibrariesDocumentsReprocessV1Request( + request = models.ReprocessDocumentRequest( library_id=library_id, document_id=document_id, ) @@ -1851,7 +1852,7 @@ def reprocess( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="libraries_documents_reprocess_v1", + operation_id="ReprocessDocument", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -1910,7 +1911,7 @@ async def reprocess_async( else: base_url = self._get_url(base_url, url_variables) - request = models.LibrariesDocumentsReprocessV1Request( + request = models.ReprocessDocumentRequest( library_id=library_id, document_id=document_id, ) @@ -1944,7 +1945,7 @@ async def reprocess_async( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="libraries_documents_reprocess_v1", + operation_id="ReprocessDocument", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security diff --git a/src/mistralai/client/embeddings.py b/src/mistralai/client/embeddings.py index 359f2f62..4a056baa 100644 --- a/src/mistralai/client/embeddings.py +++ b/src/mistralai/client/embeddings.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: f9c17258207e from .basesdk import BaseSDK from mistralai.client import models, utils diff --git a/src/mistralai/client/files.py b/src/mistralai/client/files.py index b384cda4..57d389f1 100644 --- a/src/mistralai/client/files.py +++ b/src/mistralai/client/files.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: f12df4b2ce43 from .basesdk import BaseSDK import httpx @@ -99,7 +100,7 @@ def upload( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="files_api_routes_upload_file", + operation_id="UploadFile", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -201,7 +202,7 @@ async def upload_async( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="files_api_routes_upload_file", + operation_id="UploadFile", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -266,7 +267,7 @@ def list( else: base_url = self._get_url(base_url, url_variables) - request = models.FilesAPIRoutesListFilesRequest( + request = models.ListFilesRequest( page=page, page_size=page_size, include_total=include_total, @@ -306,7 +307,7 @@ def list( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="files_api_routes_list_files", + operation_id="ListFiles", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -371,7 +372,7 @@ async def list_async( else: base_url = self._get_url(base_url, url_variables) - request = models.FilesAPIRoutesListFilesRequest( + request = models.ListFilesRequest( page=page, page_size=page_size, include_total=include_total, @@ -411,7 +412,7 @@ async def list_async( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="files_api_routes_list_files", + operation_id="ListFiles", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -462,7 +463,7 @@ def retrieve( else: base_url = self._get_url(base_url, url_variables) - request = models.FilesAPIRoutesRetrieveFileRequest( + request = models.RetrieveFileRequest( file_id=file_id, ) @@ -495,7 +496,7 @@ def retrieve( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="files_api_routes_retrieve_file", + operation_id="RetrieveFile", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -546,7 +547,7 @@ async def retrieve_async( else: base_url = self._get_url(base_url, url_variables) - request = models.FilesAPIRoutesRetrieveFileRequest( + request = models.RetrieveFileRequest( file_id=file_id, ) @@ -579,7 +580,7 @@ async def retrieve_async( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="files_api_routes_retrieve_file", + operation_id="RetrieveFile", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -630,7 +631,7 @@ def delete( else: base_url = self._get_url(base_url, url_variables) - request = models.FilesAPIRoutesDeleteFileRequest( + request = models.DeleteFileRequest( file_id=file_id, ) @@ -663,7 +664,7 @@ def delete( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="files_api_routes_delete_file", + operation_id="DeleteFile", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -714,7 +715,7 @@ async def delete_async( else: base_url = self._get_url(base_url, url_variables) - request = models.FilesAPIRoutesDeleteFileRequest( + request = models.DeleteFileRequest( file_id=file_id, ) @@ -747,7 +748,7 @@ async def delete_async( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="files_api_routes_delete_file", + operation_id="DeleteFile", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -798,7 +799,7 @@ def download( else: base_url = self._get_url(base_url, url_variables) - request = models.FilesAPIRoutesDownloadFileRequest( + request = models.DownloadFileRequest( file_id=file_id, ) @@ -831,7 +832,7 @@ def download( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="files_api_routes_download_file", + operation_id="DownloadFile", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -884,7 +885,7 @@ async def download_async( else: base_url = self._get_url(base_url, url_variables) - request = models.FilesAPIRoutesDownloadFileRequest( + request = models.DownloadFileRequest( file_id=file_id, ) @@ -917,7 +918,7 @@ async def download_async( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="files_api_routes_download_file", + operation_id="DownloadFile", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -970,7 +971,7 @@ def get_signed_url( else: base_url = self._get_url(base_url, url_variables) - request = models.FilesAPIRoutesGetSignedURLRequest( + request = models.GetFileSignedURLRequest( file_id=file_id, expiry=expiry, ) @@ -1004,7 +1005,7 @@ def get_signed_url( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="files_api_routes_get_signed_url", + operation_id="GetFileSignedUrl", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -1055,7 +1056,7 @@ async def get_signed_url_async( else: base_url = self._get_url(base_url, url_variables) - request = models.FilesAPIRoutesGetSignedURLRequest( + request = models.GetFileSignedURLRequest( file_id=file_id, expiry=expiry, ) @@ -1089,7 +1090,7 @@ async def get_signed_url_async( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="files_api_routes_get_signed_url", + operation_id="GetFileSignedUrl", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security diff --git a/src/mistralai/client/fim.py b/src/mistralai/client/fim.py index 4a834fe9..be3f7742 100644 --- a/src/mistralai/client/fim.py +++ b/src/mistralai/client/fim.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 217bea5d701d from .basesdk import BaseSDK from mistralai.client import models, utils diff --git a/src/mistralai/client/fine_tuning.py b/src/mistralai/client/fine_tuning.py index aeb832d4..df6bc564 100644 --- a/src/mistralai/client/fine_tuning.py +++ b/src/mistralai/client/fine_tuning.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 5d5079bbd54e from .basesdk import BaseSDK from .sdkconfiguration import SDKConfiguration diff --git a/src/mistralai/client/fine_tuning_jobs.py b/src/mistralai/client/fine_tuning_jobs.py index fb75e8c7..9a28ded1 100644 --- a/src/mistralai/client/fine_tuning_jobs.py +++ b/src/mistralai/client/fine_tuning_jobs.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: fa1ea246e0b2 from .basesdk import BaseSDK from datetime import datetime @@ -8,7 +9,7 @@ classifiertargetin as models_classifiertargetin, finetuneablemodeltype as models_finetuneablemodeltype, jobin as models_jobin, - jobs_api_routes_fine_tuning_get_fine_tuning_jobsop as models_jobs_api_routes_fine_tuning_get_fine_tuning_jobsop, + listfinetuningjobsop as models_listfinetuningjobsop, trainingfile as models_trainingfile, ) from mistralai.client.types import OptionalNullable, UNSET @@ -28,7 +29,7 @@ def list( created_before: OptionalNullable[datetime] = UNSET, created_by_me: Optional[bool] = False, status: OptionalNullable[ - models_jobs_api_routes_fine_tuning_get_fine_tuning_jobsop.JobsAPIRoutesFineTuningGetFineTuningJobsStatus + models_listfinetuningjobsop.ListFineTuningJobsStatus ] = UNSET, wandb_project: OptionalNullable[str] = UNSET, wandb_name: OptionalNullable[str] = UNSET, @@ -67,7 +68,7 @@ def list( else: base_url = self._get_url(base_url, url_variables) - request = models.JobsAPIRoutesFineTuningGetFineTuningJobsRequest( + request = models.ListFineTuningJobsRequest( page=page, page_size=page_size, model=model, @@ -109,7 +110,7 @@ def list( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="jobs_api_routes_fine_tuning_get_fine_tuning_jobs", + operation_id="ListFineTuningJobs", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -141,7 +142,7 @@ async def list_async( created_before: OptionalNullable[datetime] = UNSET, created_by_me: Optional[bool] = False, status: OptionalNullable[ - models_jobs_api_routes_fine_tuning_get_fine_tuning_jobsop.JobsAPIRoutesFineTuningGetFineTuningJobsStatus + models_listfinetuningjobsop.ListFineTuningJobsStatus ] = UNSET, wandb_project: OptionalNullable[str] = UNSET, wandb_name: OptionalNullable[str] = UNSET, @@ -180,7 +181,7 @@ async def list_async( else: base_url = self._get_url(base_url, url_variables) - request = models.JobsAPIRoutesFineTuningGetFineTuningJobsRequest( + request = models.ListFineTuningJobsRequest( page=page, page_size=page_size, model=model, @@ -222,7 +223,7 @@ async def list_async( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="jobs_api_routes_fine_tuning_get_fine_tuning_jobs", + operation_id="ListFineTuningJobs", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -286,12 +287,12 @@ def create( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> models.JobsAPIRoutesFineTuningCreateFineTuningJobResponse: + ) -> models.CreateFineTuningJobResponse: r"""Create Fine Tuning Job Create a new fine-tuning job, it will be queued for processing. - :param model: The name of the model to fine-tune. + :param model: :param hyperparameters: :param training_files: :param validation_files: A list containing the IDs of uploaded files that contain validation data. If you provide these files, the data is used to generate validation metrics periodically during fine-tuning. These metrics can be viewed in `checkpoints` when getting the status of a running fine-tuning job. The same data should not be present in both train and validation files. @@ -373,7 +374,7 @@ def create( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="jobs_api_routes_fine_tuning_create_fine_tuning_job", + operation_id="CreateFineTuningJob", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -385,9 +386,7 @@ def create( ) if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response( - models.JobsAPIRoutesFineTuningCreateFineTuningJobResponse, http_res - ) + return unmarshal_json_response(models.CreateFineTuningJobResponse, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) raise models.SDKError("API error occurred", http_res, http_res_text) @@ -439,12 +438,12 @@ async def create_async( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> models.JobsAPIRoutesFineTuningCreateFineTuningJobResponse: + ) -> models.CreateFineTuningJobResponse: r"""Create Fine Tuning Job Create a new fine-tuning job, it will be queued for processing. - :param model: The name of the model to fine-tune. + :param model: :param hyperparameters: :param training_files: :param validation_files: A list containing the IDs of uploaded files that contain validation data. If you provide these files, the data is used to generate validation metrics periodically during fine-tuning. These metrics can be viewed in `checkpoints` when getting the status of a running fine-tuning job. The same data should not be present in both train and validation files. @@ -526,7 +525,7 @@ async def create_async( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="jobs_api_routes_fine_tuning_create_fine_tuning_job", + operation_id="CreateFineTuningJob", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -538,9 +537,7 @@ async def create_async( ) if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response( - models.JobsAPIRoutesFineTuningCreateFineTuningJobResponse, http_res - ) + return unmarshal_json_response(models.CreateFineTuningJobResponse, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError("API error occurred", http_res, http_res_text) @@ -558,7 +555,7 @@ def get( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> models.JobsAPIRoutesFineTuningGetFineTuningJobResponse: + ) -> models.GetFineTuningJobResponse: r"""Get Fine Tuning Job Get a fine-tuned job details by its UUID. @@ -579,7 +576,7 @@ def get( else: base_url = self._get_url(base_url, url_variables) - request = models.JobsAPIRoutesFineTuningGetFineTuningJobRequest( + request = models.GetFineTuningJobRequest( job_id=job_id, ) @@ -612,7 +609,7 @@ def get( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="jobs_api_routes_fine_tuning_get_fine_tuning_job", + operation_id="GetFineTuningJob", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -624,9 +621,7 @@ def get( ) if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response( - models.JobsAPIRoutesFineTuningGetFineTuningJobResponse, http_res - ) + return unmarshal_json_response(models.GetFineTuningJobResponse, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) raise models.SDKError("API error occurred", http_res, http_res_text) @@ -644,7 +639,7 @@ async def get_async( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> models.JobsAPIRoutesFineTuningGetFineTuningJobResponse: + ) -> models.GetFineTuningJobResponse: r"""Get Fine Tuning Job Get a fine-tuned job details by its UUID. @@ -665,7 +660,7 @@ async def get_async( else: base_url = self._get_url(base_url, url_variables) - request = models.JobsAPIRoutesFineTuningGetFineTuningJobRequest( + request = models.GetFineTuningJobRequest( job_id=job_id, ) @@ -698,7 +693,7 @@ async def get_async( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="jobs_api_routes_fine_tuning_get_fine_tuning_job", + operation_id="GetFineTuningJob", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -710,9 +705,7 @@ async def get_async( ) if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response( - models.JobsAPIRoutesFineTuningGetFineTuningJobResponse, http_res - ) + return unmarshal_json_response(models.GetFineTuningJobResponse, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError("API error occurred", http_res, http_res_text) @@ -730,7 +723,7 @@ def cancel( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> models.JobsAPIRoutesFineTuningCancelFineTuningJobResponse: + ) -> models.CancelFineTuningJobResponse: r"""Cancel Fine Tuning Job Request the cancellation of a fine tuning job. @@ -751,7 +744,7 @@ def cancel( else: base_url = self._get_url(base_url, url_variables) - request = models.JobsAPIRoutesFineTuningCancelFineTuningJobRequest( + request = models.CancelFineTuningJobRequest( job_id=job_id, ) @@ -784,7 +777,7 @@ def cancel( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="jobs_api_routes_fine_tuning_cancel_fine_tuning_job", + operation_id="CancelFineTuningJob", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -796,9 +789,7 @@ def cancel( ) if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response( - models.JobsAPIRoutesFineTuningCancelFineTuningJobResponse, http_res - ) + return unmarshal_json_response(models.CancelFineTuningJobResponse, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) raise models.SDKError("API error occurred", http_res, http_res_text) @@ -816,7 +807,7 @@ async def cancel_async( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> models.JobsAPIRoutesFineTuningCancelFineTuningJobResponse: + ) -> models.CancelFineTuningJobResponse: r"""Cancel Fine Tuning Job Request the cancellation of a fine tuning job. @@ -837,7 +828,7 @@ async def cancel_async( else: base_url = self._get_url(base_url, url_variables) - request = models.JobsAPIRoutesFineTuningCancelFineTuningJobRequest( + request = models.CancelFineTuningJobRequest( job_id=job_id, ) @@ -870,7 +861,7 @@ async def cancel_async( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="jobs_api_routes_fine_tuning_cancel_fine_tuning_job", + operation_id="CancelFineTuningJob", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -882,9 +873,7 @@ async def cancel_async( ) if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response( - models.JobsAPIRoutesFineTuningCancelFineTuningJobResponse, http_res - ) + return unmarshal_json_response(models.CancelFineTuningJobResponse, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError("API error occurred", http_res, http_res_text) @@ -902,7 +891,7 @@ def start( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> models.JobsAPIRoutesFineTuningStartFineTuningJobResponse: + ) -> models.StartFineTuningJobResponse: r"""Start Fine Tuning Job Request the start of a validated fine tuning job. @@ -923,7 +912,7 @@ def start( else: base_url = self._get_url(base_url, url_variables) - request = models.JobsAPIRoutesFineTuningStartFineTuningJobRequest( + request = models.StartFineTuningJobRequest( job_id=job_id, ) @@ -956,7 +945,7 @@ def start( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="jobs_api_routes_fine_tuning_start_fine_tuning_job", + operation_id="StartFineTuningJob", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -968,9 +957,7 @@ def start( ) if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response( - models.JobsAPIRoutesFineTuningStartFineTuningJobResponse, http_res - ) + return unmarshal_json_response(models.StartFineTuningJobResponse, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) raise models.SDKError("API error occurred", http_res, http_res_text) @@ -988,7 +975,7 @@ async def start_async( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> models.JobsAPIRoutesFineTuningStartFineTuningJobResponse: + ) -> models.StartFineTuningJobResponse: r"""Start Fine Tuning Job Request the start of a validated fine tuning job. @@ -1009,7 +996,7 @@ async def start_async( else: base_url = self._get_url(base_url, url_variables) - request = models.JobsAPIRoutesFineTuningStartFineTuningJobRequest( + request = models.StartFineTuningJobRequest( job_id=job_id, ) @@ -1042,7 +1029,7 @@ async def start_async( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="jobs_api_routes_fine_tuning_start_fine_tuning_job", + operation_id="StartFineTuningJob", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -1054,9 +1041,7 @@ async def start_async( ) if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response( - models.JobsAPIRoutesFineTuningStartFineTuningJobResponse, http_res - ) + return unmarshal_json_response(models.StartFineTuningJobResponse, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError("API error occurred", http_res, http_res_text) diff --git a/src/mistralai/client/httpclient.py b/src/mistralai/client/httpclient.py index 89560b56..544af7f8 100644 --- a/src/mistralai/client/httpclient.py +++ b/src/mistralai/client/httpclient.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 3e46bde74327 # pyright: reportReturnType = false import asyncio diff --git a/src/mistralai/client/libraries.py b/src/mistralai/client/libraries.py index 03a54741..26ceabe1 100644 --- a/src/mistralai/client/libraries.py +++ b/src/mistralai/client/libraries.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: d43a5f78045f from .basesdk import BaseSDK from .sdkconfiguration import SDKConfiguration @@ -86,7 +87,7 @@ def list( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="libraries_list_v1", + operation_id="ListLibraries", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -163,7 +164,7 @@ async def list_async( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="libraries_list_v1", + operation_id="ListLibraries", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -256,7 +257,7 @@ def create( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="libraries_create_v1", + operation_id="CreateLibrary", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -355,7 +356,7 @@ async def create_async( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="libraries_create_v1", + operation_id="CreateLibrary", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -412,7 +413,7 @@ def get( else: base_url = self._get_url(base_url, url_variables) - request = models.LibrariesGetV1Request( + request = models.GetLibraryRequest( library_id=library_id, ) @@ -445,7 +446,7 @@ def get( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="libraries_get_v1", + operation_id="GetLibrary", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -502,7 +503,7 @@ async def get_async( else: base_url = self._get_url(base_url, url_variables) - request = models.LibrariesGetV1Request( + request = models.GetLibraryRequest( library_id=library_id, ) @@ -535,7 +536,7 @@ async def get_async( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="libraries_get_v1", + operation_id="GetLibrary", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -592,7 +593,7 @@ def delete( else: base_url = self._get_url(base_url, url_variables) - request = models.LibrariesDeleteV1Request( + request = models.DeleteLibraryRequest( library_id=library_id, ) @@ -625,7 +626,7 @@ def delete( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="libraries_delete_v1", + operation_id="DeleteLibrary", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -682,7 +683,7 @@ async def delete_async( else: base_url = self._get_url(base_url, url_variables) - request = models.LibrariesDeleteV1Request( + request = models.DeleteLibraryRequest( library_id=library_id, ) @@ -715,7 +716,7 @@ async def delete_async( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="libraries_delete_v1", + operation_id="DeleteLibrary", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -776,7 +777,7 @@ def update( else: base_url = self._get_url(base_url, url_variables) - request = models.LibrariesUpdateV1Request( + request = models.UpdateLibraryRequest( library_id=library_id, library_in_update=models.LibraryInUpdate( name=name, @@ -816,7 +817,7 @@ def update( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="libraries_update_v1", + operation_id="UpdateLibrary", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -877,7 +878,7 @@ async def update_async( else: base_url = self._get_url(base_url, url_variables) - request = models.LibrariesUpdateV1Request( + request = models.UpdateLibraryRequest( library_id=library_id, library_in_update=models.LibraryInUpdate( name=name, @@ -917,7 +918,7 @@ async def update_async( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="libraries_update_v1", + operation_id="UpdateLibrary", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security diff --git a/src/mistralai/client/models/__init__.py b/src/mistralai/client/models/__init__.py index 046037c5..093ffcbd 100644 --- a/src/mistralai/client/models/__init__.py +++ b/src/mistralai/client/models/__init__.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: e0e8dad92725 from .mistralerror import MistralError from typing import TYPE_CHECKING @@ -36,84 +37,6 @@ AgentHandoffStartedEvent, AgentHandoffStartedEventTypedDict, ) - from .agents_api_v1_agents_create_or_update_aliasop import ( - AgentsAPIV1AgentsCreateOrUpdateAliasRequest, - AgentsAPIV1AgentsCreateOrUpdateAliasRequestTypedDict, - ) - from .agents_api_v1_agents_deleteop import ( - AgentsAPIV1AgentsDeleteRequest, - AgentsAPIV1AgentsDeleteRequestTypedDict, - ) - from .agents_api_v1_agents_get_versionop import ( - AgentsAPIV1AgentsGetVersionRequest, - AgentsAPIV1AgentsGetVersionRequestTypedDict, - ) - from .agents_api_v1_agents_getop import ( - AgentsAPIV1AgentsGetAgentVersion, - AgentsAPIV1AgentsGetAgentVersionTypedDict, - AgentsAPIV1AgentsGetRequest, - AgentsAPIV1AgentsGetRequestTypedDict, - ) - from .agents_api_v1_agents_list_version_aliasesop import ( - AgentsAPIV1AgentsListVersionAliasesRequest, - AgentsAPIV1AgentsListVersionAliasesRequestTypedDict, - ) - from .agents_api_v1_agents_list_versionsop import ( - AgentsAPIV1AgentsListVersionsRequest, - AgentsAPIV1AgentsListVersionsRequestTypedDict, - ) - from .agents_api_v1_agents_listop import ( - AgentsAPIV1AgentsListRequest, - AgentsAPIV1AgentsListRequestTypedDict, - ) - from .agents_api_v1_agents_update_versionop import ( - AgentsAPIV1AgentsUpdateVersionRequest, - AgentsAPIV1AgentsUpdateVersionRequestTypedDict, - ) - from .agents_api_v1_agents_updateop import ( - AgentsAPIV1AgentsUpdateRequest, - AgentsAPIV1AgentsUpdateRequestTypedDict, - ) - from .agents_api_v1_conversations_append_streamop import ( - AgentsAPIV1ConversationsAppendStreamRequest, - AgentsAPIV1ConversationsAppendStreamRequestTypedDict, - ) - from .agents_api_v1_conversations_appendop import ( - AgentsAPIV1ConversationsAppendRequest, - AgentsAPIV1ConversationsAppendRequestTypedDict, - ) - from .agents_api_v1_conversations_deleteop import ( - AgentsAPIV1ConversationsDeleteRequest, - AgentsAPIV1ConversationsDeleteRequestTypedDict, - ) - from .agents_api_v1_conversations_getop import ( - AgentsAPIV1ConversationsGetRequest, - AgentsAPIV1ConversationsGetRequestTypedDict, - ResponseV1ConversationsGet, - ResponseV1ConversationsGetTypedDict, - ) - from .agents_api_v1_conversations_historyop import ( - AgentsAPIV1ConversationsHistoryRequest, - AgentsAPIV1ConversationsHistoryRequestTypedDict, - ) - from .agents_api_v1_conversations_listop import ( - AgentsAPIV1ConversationsListRequest, - AgentsAPIV1ConversationsListRequestTypedDict, - AgentsAPIV1ConversationsListResponse, - AgentsAPIV1ConversationsListResponseTypedDict, - ) - from .agents_api_v1_conversations_messagesop import ( - AgentsAPIV1ConversationsMessagesRequest, - AgentsAPIV1ConversationsMessagesRequestTypedDict, - ) - from .agents_api_v1_conversations_restart_streamop import ( - AgentsAPIV1ConversationsRestartStreamRequest, - AgentsAPIV1ConversationsRestartStreamRequestTypedDict, - ) - from .agents_api_v1_conversations_restartop import ( - AgentsAPIV1ConversationsRestartRequest, - AgentsAPIV1ConversationsRestartRequestTypedDict, - ) from .agentscompletionrequest import ( AgentsCompletionRequest, AgentsCompletionRequestMessage, @@ -141,11 +64,16 @@ AgentUpdateRequestTypedDict, ) from .apiendpoint import APIEndpoint - from .archiveftmodelout import ( - ArchiveFTModelOut, - ArchiveFTModelOutObject, - ArchiveFTModelOutTypedDict, + from .appendconversationop import ( + AppendConversationRequest, + AppendConversationRequestTypedDict, ) + from .appendconversationstreamop import ( + AppendConversationStreamRequest, + AppendConversationStreamRequestTypedDict, + ) + from .archiveftmodelout import ArchiveFTModelOut, ArchiveFTModelOutTypedDict + from .archivemodelop import ArchiveModelRequest, ArchiveModelRequestTypedDict from .assistantmessage import ( AssistantMessage, AssistantMessageContent, @@ -167,11 +95,18 @@ from .basemodelcard import BaseModelCard, BaseModelCardTypedDict from .batcherror import BatchError, BatchErrorTypedDict from .batchjobin import BatchJobIn, BatchJobInTypedDict - from .batchjobout import BatchJobOut, BatchJobOutObject, BatchJobOutTypedDict - from .batchjobsout import BatchJobsOut, BatchJobsOutObject, BatchJobsOutTypedDict + from .batchjobout import BatchJobOut, BatchJobOutTypedDict + from .batchjobsout import BatchJobsOut, BatchJobsOutTypedDict from .batchjobstatus import BatchJobStatus from .batchrequest import BatchRequest, BatchRequestTypedDict from .builtinconnectors import BuiltInConnectors + from .cancelbatchjobop import CancelBatchJobRequest, CancelBatchJobRequestTypedDict + from .cancelfinetuningjobop import ( + CancelFineTuningJobRequest, + CancelFineTuningJobRequestTypedDict, + CancelFineTuningJobResponse, + CancelFineTuningJobResponseTypedDict, + ) from .chatclassificationrequest import ( ChatClassificationRequest, ChatClassificationRequestTypedDict, @@ -234,20 +169,17 @@ ClassifierDetailedJobOut, ClassifierDetailedJobOutIntegration, ClassifierDetailedJobOutIntegrationTypedDict, - ClassifierDetailedJobOutObject, ClassifierDetailedJobOutStatus, ClassifierDetailedJobOutTypedDict, ) from .classifierftmodelout import ( ClassifierFTModelOut, - ClassifierFTModelOutObject, ClassifierFTModelOutTypedDict, ) from .classifierjobout import ( ClassifierJobOut, ClassifierJobOutIntegration, ClassifierJobOutIntegrationTypedDict, - ClassifierJobOutObject, ClassifierJobOutStatus, ClassifierJobOutTypedDict, ) @@ -269,7 +201,6 @@ CompletionDetailedJobOut, CompletionDetailedJobOutIntegration, CompletionDetailedJobOutIntegrationTypedDict, - CompletionDetailedJobOutObject, CompletionDetailedJobOutRepository, CompletionDetailedJobOutRepositoryTypedDict, CompletionDetailedJobOutStatus, @@ -278,14 +209,12 @@ from .completionevent import CompletionEvent, CompletionEventTypedDict from .completionftmodelout import ( CompletionFTModelOut, - CompletionFTModelOutObject, CompletionFTModelOutTypedDict, ) from .completionjobout import ( CompletionJobOut, CompletionJobOutIntegration, CompletionJobOutIntegrationTypedDict, - CompletionJobOutObject, CompletionJobOutRepository, CompletionJobOutRepositoryTypedDict, CompletionJobOutStatus, @@ -377,11 +306,34 @@ ConversationUsageInfo, ConversationUsageInfoTypedDict, ) - from .delete_model_v1_models_model_id_deleteop import ( - DeleteModelV1ModelsModelIDDeleteRequest, - DeleteModelV1ModelsModelIDDeleteRequestTypedDict, + from .createfinetuningjobop import ( + CreateFineTuningJobResponse, + CreateFineTuningJobResponseTypedDict, + Response, + ResponseTypedDict, + ) + from .createorupdateagentaliasop import ( + CreateOrUpdateAgentAliasRequest, + CreateOrUpdateAgentAliasRequestTypedDict, ) + from .deleteagentaliasop import ( + DeleteAgentAliasRequest, + DeleteAgentAliasRequestTypedDict, + ) + from .deleteagentop import DeleteAgentRequest, DeleteAgentRequestTypedDict + from .deleteconversationop import ( + DeleteConversationRequest, + DeleteConversationRequestTypedDict, + ) + from .deletedocumentop import DeleteDocumentRequest, DeleteDocumentRequestTypedDict + from .deletefileop import DeleteFileRequest, DeleteFileRequestTypedDict from .deletefileout import DeleteFileOut, DeleteFileOutTypedDict + from .deletelibraryaccessop import ( + DeleteLibraryAccessRequest, + DeleteLibraryAccessRequestTypedDict, + ) + from .deletelibraryop import DeleteLibraryRequest, DeleteLibraryRequestTypedDict + from .deletemodelop import DeleteModelRequest, DeleteModelRequestTypedDict from .deletemodelout import DeleteModelOut, DeleteModelOutTypedDict from .deltamessage import ( DeltaMessage, @@ -403,6 +355,7 @@ DocumentURLChunkType, DocumentURLChunkTypedDict, ) + from .downloadfileop import DownloadFileRequest, DownloadFileRequestTypedDict from .embeddingdtype import EmbeddingDtype from .embeddingrequest import ( EmbeddingRequest, @@ -421,30 +374,6 @@ from .file import File, FileTypedDict from .filechunk import FileChunk, FileChunkTypedDict from .filepurpose import FilePurpose - from .files_api_routes_delete_fileop import ( - FilesAPIRoutesDeleteFileRequest, - FilesAPIRoutesDeleteFileRequestTypedDict, - ) - from .files_api_routes_download_fileop import ( - FilesAPIRoutesDownloadFileRequest, - FilesAPIRoutesDownloadFileRequestTypedDict, - ) - from .files_api_routes_get_signed_urlop import ( - FilesAPIRoutesGetSignedURLRequest, - FilesAPIRoutesGetSignedURLRequestTypedDict, - ) - from .files_api_routes_list_filesop import ( - FilesAPIRoutesListFilesRequest, - FilesAPIRoutesListFilesRequestTypedDict, - ) - from .files_api_routes_retrieve_fileop import ( - FilesAPIRoutesRetrieveFileRequest, - FilesAPIRoutesRetrieveFileRequestTypedDict, - ) - from .files_api_routes_upload_fileop import ( - MultiPartBodyParams, - MultiPartBodyParamsTypedDict, - ) from .fileschema import FileSchema, FileSchemaTypedDict from .filesignedurl import FileSignedURL, FileSignedURLTypedDict from .fimcompletionrequest import ( @@ -496,6 +425,59 @@ FunctionResultEntryTypedDict, ) from .functiontool import FunctionTool, FunctionToolTypedDict + from .getagentop import ( + GetAgentAgentVersion, + GetAgentAgentVersionTypedDict, + GetAgentRequest, + GetAgentRequestTypedDict, + ) + from .getagentversionop import ( + GetAgentVersionRequest, + GetAgentVersionRequestTypedDict, + ) + from .getbatchjobop import GetBatchJobRequest, GetBatchJobRequestTypedDict + from .getconversationhistoryop import ( + GetConversationHistoryRequest, + GetConversationHistoryRequestTypedDict, + ) + from .getconversationmessagesop import ( + GetConversationMessagesRequest, + GetConversationMessagesRequestTypedDict, + ) + from .getconversationop import ( + GetConversationRequest, + GetConversationRequestTypedDict, + ResponseV1ConversationsGet, + ResponseV1ConversationsGetTypedDict, + ) + from .getdocumentextractedtextsignedurlop import ( + GetDocumentExtractedTextSignedURLRequest, + GetDocumentExtractedTextSignedURLRequestTypedDict, + ) + from .getdocumentop import GetDocumentRequest, GetDocumentRequestTypedDict + from .getdocumentsignedurlop import ( + GetDocumentSignedURLRequest, + GetDocumentSignedURLRequestTypedDict, + ) + from .getdocumentstatusop import ( + GetDocumentStatusRequest, + GetDocumentStatusRequestTypedDict, + ) + from .getdocumenttextcontentop import ( + GetDocumentTextContentRequest, + GetDocumentTextContentRequestTypedDict, + ) + from .getfilesignedurlop import ( + GetFileSignedURLRequest, + GetFileSignedURLRequestTypedDict, + ) + from .getfinetuningjobop import ( + GetFineTuningJobRequest, + GetFineTuningJobRequestTypedDict, + GetFineTuningJobResponse, + GetFineTuningJobResponseTypedDict, + ) + from .getlibraryop import GetLibraryRequest, GetLibraryRequestTypedDict from .githubrepositoryin import GithubRepositoryIn, GithubRepositoryInTypedDict from .githubrepositoryout import GithubRepositoryOut, GithubRepositoryOutTypedDict from .httpvalidationerror import HTTPValidationError, HTTPValidationErrorData @@ -534,145 +516,48 @@ JobInTypedDict, ) from .jobmetadataout import JobMetadataOut, JobMetadataOutTypedDict - from .jobs_api_routes_batch_cancel_batch_jobop import ( - JobsAPIRoutesBatchCancelBatchJobRequest, - JobsAPIRoutesBatchCancelBatchJobRequestTypedDict, - ) - from .jobs_api_routes_batch_get_batch_jobop import ( - JobsAPIRoutesBatchGetBatchJobRequest, - JobsAPIRoutesBatchGetBatchJobRequestTypedDict, - ) - from .jobs_api_routes_batch_get_batch_jobsop import ( - JobsAPIRoutesBatchGetBatchJobsRequest, - JobsAPIRoutesBatchGetBatchJobsRequestTypedDict, - ) - from .jobs_api_routes_fine_tuning_archive_fine_tuned_modelop import ( - JobsAPIRoutesFineTuningArchiveFineTunedModelRequest, - JobsAPIRoutesFineTuningArchiveFineTunedModelRequestTypedDict, - ) - from .jobs_api_routes_fine_tuning_cancel_fine_tuning_jobop import ( - JobsAPIRoutesFineTuningCancelFineTuningJobRequest, - JobsAPIRoutesFineTuningCancelFineTuningJobRequestTypedDict, - JobsAPIRoutesFineTuningCancelFineTuningJobResponse, - JobsAPIRoutesFineTuningCancelFineTuningJobResponseTypedDict, - ) - from .jobs_api_routes_fine_tuning_create_fine_tuning_jobop import ( - JobsAPIRoutesFineTuningCreateFineTuningJobResponse, - JobsAPIRoutesFineTuningCreateFineTuningJobResponseTypedDict, - Response, - ResponseTypedDict, - ) - from .jobs_api_routes_fine_tuning_get_fine_tuning_jobop import ( - JobsAPIRoutesFineTuningGetFineTuningJobRequest, - JobsAPIRoutesFineTuningGetFineTuningJobRequestTypedDict, - JobsAPIRoutesFineTuningGetFineTuningJobResponse, - JobsAPIRoutesFineTuningGetFineTuningJobResponseTypedDict, - ) - from .jobs_api_routes_fine_tuning_get_fine_tuning_jobsop import ( - JobsAPIRoutesFineTuningGetFineTuningJobsRequest, - JobsAPIRoutesFineTuningGetFineTuningJobsRequestTypedDict, - JobsAPIRoutesFineTuningGetFineTuningJobsStatus, - ) - from .jobs_api_routes_fine_tuning_start_fine_tuning_jobop import ( - JobsAPIRoutesFineTuningStartFineTuningJobRequest, - JobsAPIRoutesFineTuningStartFineTuningJobRequestTypedDict, - JobsAPIRoutesFineTuningStartFineTuningJobResponse, - JobsAPIRoutesFineTuningStartFineTuningJobResponseTypedDict, - ) - from .jobs_api_routes_fine_tuning_unarchive_fine_tuned_modelop import ( - JobsAPIRoutesFineTuningUnarchiveFineTunedModelRequest, - JobsAPIRoutesFineTuningUnarchiveFineTunedModelRequestTypedDict, - ) - from .jobs_api_routes_fine_tuning_update_fine_tuned_modelop import ( - JobsAPIRoutesFineTuningUpdateFineTunedModelRequest, - JobsAPIRoutesFineTuningUpdateFineTunedModelRequestTypedDict, - JobsAPIRoutesFineTuningUpdateFineTunedModelResponse, - JobsAPIRoutesFineTuningUpdateFineTunedModelResponseTypedDict, - ) - from .jobsout import ( - JobsOut, - JobsOutData, - JobsOutDataTypedDict, - JobsOutObject, - JobsOutTypedDict, - ) + from .jobsout import JobsOut, JobsOutData, JobsOutDataTypedDict, JobsOutTypedDict from .jsonschema import JSONSchema, JSONSchemaTypedDict from .legacyjobmetadataout import ( LegacyJobMetadataOut, - LegacyJobMetadataOutObject, LegacyJobMetadataOutTypedDict, ) - from .libraries_delete_v1op import ( - LibrariesDeleteV1Request, - LibrariesDeleteV1RequestTypedDict, - ) - from .libraries_documents_delete_v1op import ( - LibrariesDocumentsDeleteV1Request, - LibrariesDocumentsDeleteV1RequestTypedDict, - ) - from .libraries_documents_get_extracted_text_signed_url_v1op import ( - LibrariesDocumentsGetExtractedTextSignedURLV1Request, - LibrariesDocumentsGetExtractedTextSignedURLV1RequestTypedDict, - ) - from .libraries_documents_get_signed_url_v1op import ( - LibrariesDocumentsGetSignedURLV1Request, - LibrariesDocumentsGetSignedURLV1RequestTypedDict, - ) - from .libraries_documents_get_status_v1op import ( - LibrariesDocumentsGetStatusV1Request, - LibrariesDocumentsGetStatusV1RequestTypedDict, - ) - from .libraries_documents_get_text_content_v1op import ( - LibrariesDocumentsGetTextContentV1Request, - LibrariesDocumentsGetTextContentV1RequestTypedDict, - ) - from .libraries_documents_get_v1op import ( - LibrariesDocumentsGetV1Request, - LibrariesDocumentsGetV1RequestTypedDict, - ) - from .libraries_documents_list_v1op import ( - LibrariesDocumentsListV1Request, - LibrariesDocumentsListV1RequestTypedDict, - ) - from .libraries_documents_reprocess_v1op import ( - LibrariesDocumentsReprocessV1Request, - LibrariesDocumentsReprocessV1RequestTypedDict, - ) - from .libraries_documents_update_v1op import ( - LibrariesDocumentsUpdateV1Request, - LibrariesDocumentsUpdateV1RequestTypedDict, - ) - from .libraries_documents_upload_v1op import ( - DocumentUpload, - DocumentUploadTypedDict, - LibrariesDocumentsUploadV1Request, - LibrariesDocumentsUploadV1RequestTypedDict, - ) - from .libraries_get_v1op import ( - LibrariesGetV1Request, - LibrariesGetV1RequestTypedDict, - ) - from .libraries_share_create_v1op import ( - LibrariesShareCreateV1Request, - LibrariesShareCreateV1RequestTypedDict, - ) - from .libraries_share_delete_v1op import ( - LibrariesShareDeleteV1Request, - LibrariesShareDeleteV1RequestTypedDict, - ) - from .libraries_share_list_v1op import ( - LibrariesShareListV1Request, - LibrariesShareListV1RequestTypedDict, - ) - from .libraries_update_v1op import ( - LibrariesUpdateV1Request, - LibrariesUpdateV1RequestTypedDict, - ) from .libraryin import LibraryIn, LibraryInTypedDict from .libraryinupdate import LibraryInUpdate, LibraryInUpdateTypedDict from .libraryout import LibraryOut, LibraryOutTypedDict + from .listagentaliasesop import ( + ListAgentAliasesRequest, + ListAgentAliasesRequestTypedDict, + ) + from .listagentsop import ListAgentsRequest, ListAgentsRequestTypedDict + from .listagentversionsop import ( + ListAgentVersionsRequest, + ListAgentVersionsRequestTypedDict, + ) + from .listbatchjobsop import ( + ListBatchJobsRequest, + ListBatchJobsRequestTypedDict, + OrderBy, + ) + from .listconversationsop import ( + ListConversationsRequest, + ListConversationsRequestTypedDict, + ListConversationsResponse, + ListConversationsResponseTypedDict, + ) from .listdocumentout import ListDocumentOut, ListDocumentOutTypedDict + from .listdocumentsop import ListDocumentsRequest, ListDocumentsRequestTypedDict + from .listfilesop import ListFilesRequest, ListFilesRequestTypedDict from .listfilesout import ListFilesOut, ListFilesOutTypedDict + from .listfinetuningjobsop import ( + ListFineTuningJobsRequest, + ListFineTuningJobsRequestTypedDict, + ListFineTuningJobsStatus, + ) + from .listlibraryaccessesop import ( + ListLibraryAccessesRequest, + ListLibraryAccessesRequestTypedDict, + ) from .listlibraryout import ListLibraryOut, ListLibraryOutTypedDict from .listsharingout import ListSharingOut, ListSharingOutTypedDict from .messageentries import MessageEntries, MessageEntriesTypedDict @@ -772,6 +657,10 @@ ReferenceChunkType, ReferenceChunkTypedDict, ) + from .reprocessdocumentop import ( + ReprocessDocumentRequest, + ReprocessDocumentRequestTypedDict, + ) from .requestsource import RequestSource from .responsedoneevent import ResponseDoneEvent, ResponseDoneEventTypedDict from .responseerrorevent import ResponseErrorEvent, ResponseErrorEventTypedDict @@ -782,13 +671,22 @@ ResponseStartedEventTypedDict, ) from .responsevalidationerror import ResponseValidationError - from .retrieve_model_v1_models_model_id_getop import ( + from .restartconversationop import ( + RestartConversationRequest, + RestartConversationRequestTypedDict, + ) + from .restartconversationstreamop import ( + RestartConversationStreamRequest, + RestartConversationStreamRequestTypedDict, + ) + from .retrievefileop import RetrieveFileRequest, RetrieveFileRequestTypedDict + from .retrievefileout import RetrieveFileOut, RetrieveFileOutTypedDict + from .retrievemodelop import ( ResponseRetrieveModelV1ModelsModelIDGet, ResponseRetrieveModelV1ModelsModelIDGetTypedDict, - RetrieveModelV1ModelsModelIDGetRequest, - RetrieveModelV1ModelsModelIDGetRequestTypedDict, + RetrieveModelRequest, + RetrieveModelRequestTypedDict, ) - from .retrievefileout import RetrieveFileOut, RetrieveFileOutTypedDict from .sampletype import SampleType from .sdkerror import SDKError from .security import Security, SecurityTypedDict @@ -798,6 +696,12 @@ from .sharingout import SharingOut, SharingOutTypedDict from .source import Source from .ssetypes import SSETypes + from .startfinetuningjobop import ( + StartFineTuningJobRequest, + StartFineTuningJobRequestTypedDict, + StartFineTuningJobResponse, + StartFineTuningJobResponseTypedDict, + ) from .systemmessage import ( SystemMessage, SystemMessageContent, @@ -901,12 +805,33 @@ TranscriptionStreamTextDelta, TranscriptionStreamTextDeltaTypedDict, ) - from .unarchiveftmodelout import ( - UnarchiveFTModelOut, - UnarchiveFTModelOutObject, - UnarchiveFTModelOutTypedDict, + from .unarchiveftmodelout import UnarchiveFTModelOut, UnarchiveFTModelOutTypedDict + from .unarchivemodelop import UnarchiveModelRequest, UnarchiveModelRequestTypedDict + from .updateagentop import UpdateAgentRequest, UpdateAgentRequestTypedDict + from .updateagentversionop import ( + UpdateAgentVersionRequest, + UpdateAgentVersionRequestTypedDict, ) + from .updatedocumentop import UpdateDocumentRequest, UpdateDocumentRequestTypedDict from .updateftmodelin import UpdateFTModelIn, UpdateFTModelInTypedDict + from .updatelibraryop import UpdateLibraryRequest, UpdateLibraryRequestTypedDict + from .updatemodelop import ( + UpdateModelRequest, + UpdateModelRequestTypedDict, + UpdateModelResponse, + UpdateModelResponseTypedDict, + ) + from .updateorcreatelibraryaccessop import ( + UpdateOrCreateLibraryAccessRequest, + UpdateOrCreateLibraryAccessRequestTypedDict, + ) + from .uploaddocumentop import ( + DocumentUpload, + DocumentUploadTypedDict, + UploadDocumentRequest, + UploadDocumentRequestTypedDict, + ) + from .uploadfileop import MultiPartBodyParams, MultiPartBodyParamsTypedDict from .uploadfileout import UploadFileOut, UploadFileOutTypedDict from .usageinfo import UsageInfo, UsageInfoTypedDict from .usermessage import ( @@ -959,46 +884,6 @@ "AgentUpdateRequestTool", "AgentUpdateRequestToolTypedDict", "AgentUpdateRequestTypedDict", - "AgentsAPIV1AgentsCreateOrUpdateAliasRequest", - "AgentsAPIV1AgentsCreateOrUpdateAliasRequestTypedDict", - "AgentsAPIV1AgentsDeleteRequest", - "AgentsAPIV1AgentsDeleteRequestTypedDict", - "AgentsAPIV1AgentsGetAgentVersion", - "AgentsAPIV1AgentsGetAgentVersionTypedDict", - "AgentsAPIV1AgentsGetRequest", - "AgentsAPIV1AgentsGetRequestTypedDict", - "AgentsAPIV1AgentsGetVersionRequest", - "AgentsAPIV1AgentsGetVersionRequestTypedDict", - "AgentsAPIV1AgentsListRequest", - "AgentsAPIV1AgentsListRequestTypedDict", - "AgentsAPIV1AgentsListVersionAliasesRequest", - "AgentsAPIV1AgentsListVersionAliasesRequestTypedDict", - "AgentsAPIV1AgentsListVersionsRequest", - "AgentsAPIV1AgentsListVersionsRequestTypedDict", - "AgentsAPIV1AgentsUpdateRequest", - "AgentsAPIV1AgentsUpdateRequestTypedDict", - "AgentsAPIV1AgentsUpdateVersionRequest", - "AgentsAPIV1AgentsUpdateVersionRequestTypedDict", - "AgentsAPIV1ConversationsAppendRequest", - "AgentsAPIV1ConversationsAppendRequestTypedDict", - "AgentsAPIV1ConversationsAppendStreamRequest", - "AgentsAPIV1ConversationsAppendStreamRequestTypedDict", - "AgentsAPIV1ConversationsDeleteRequest", - "AgentsAPIV1ConversationsDeleteRequestTypedDict", - "AgentsAPIV1ConversationsGetRequest", - "AgentsAPIV1ConversationsGetRequestTypedDict", - "AgentsAPIV1ConversationsHistoryRequest", - "AgentsAPIV1ConversationsHistoryRequestTypedDict", - "AgentsAPIV1ConversationsListRequest", - "AgentsAPIV1ConversationsListRequestTypedDict", - "AgentsAPIV1ConversationsListResponse", - "AgentsAPIV1ConversationsListResponseTypedDict", - "AgentsAPIV1ConversationsMessagesRequest", - "AgentsAPIV1ConversationsMessagesRequestTypedDict", - "AgentsAPIV1ConversationsRestartRequest", - "AgentsAPIV1ConversationsRestartRequestTypedDict", - "AgentsAPIV1ConversationsRestartStreamRequest", - "AgentsAPIV1ConversationsRestartStreamRequestTypedDict", "AgentsCompletionRequest", "AgentsCompletionRequestMessage", "AgentsCompletionRequestMessageTypedDict", @@ -1015,9 +900,14 @@ "AgentsCompletionStreamRequestToolChoice", "AgentsCompletionStreamRequestToolChoiceTypedDict", "AgentsCompletionStreamRequestTypedDict", + "AppendConversationRequest", + "AppendConversationRequestTypedDict", + "AppendConversationStreamRequest", + "AppendConversationStreamRequestTypedDict", "ArchiveFTModelOut", - "ArchiveFTModelOutObject", "ArchiveFTModelOutTypedDict", + "ArchiveModelRequest", + "ArchiveModelRequestTypedDict", "Arguments", "ArgumentsTypedDict", "AssistantMessage", @@ -1043,15 +933,19 @@ "BatchJobIn", "BatchJobInTypedDict", "BatchJobOut", - "BatchJobOutObject", "BatchJobOutTypedDict", "BatchJobStatus", "BatchJobsOut", - "BatchJobsOutObject", "BatchJobsOutTypedDict", "BatchRequest", "BatchRequestTypedDict", "BuiltInConnectors", + "CancelBatchJobRequest", + "CancelBatchJobRequestTypedDict", + "CancelFineTuningJobRequest", + "CancelFineTuningJobRequestTypedDict", + "CancelFineTuningJobResponse", + "CancelFineTuningJobResponseTypedDict", "ChatClassificationRequest", "ChatClassificationRequestTypedDict", "ChatCompletionChoice", @@ -1096,16 +990,13 @@ "ClassifierDetailedJobOut", "ClassifierDetailedJobOutIntegration", "ClassifierDetailedJobOutIntegrationTypedDict", - "ClassifierDetailedJobOutObject", "ClassifierDetailedJobOutStatus", "ClassifierDetailedJobOutTypedDict", "ClassifierFTModelOut", - "ClassifierFTModelOutObject", "ClassifierFTModelOutTypedDict", "ClassifierJobOut", "ClassifierJobOutIntegration", "ClassifierJobOutIntegrationTypedDict", - "ClassifierJobOutObject", "ClassifierJobOutStatus", "ClassifierJobOutTypedDict", "ClassifierTargetIn", @@ -1127,7 +1018,6 @@ "CompletionDetailedJobOut", "CompletionDetailedJobOutIntegration", "CompletionDetailedJobOutIntegrationTypedDict", - "CompletionDetailedJobOutObject", "CompletionDetailedJobOutRepository", "CompletionDetailedJobOutRepositoryTypedDict", "CompletionDetailedJobOutStatus", @@ -1135,12 +1025,10 @@ "CompletionEvent", "CompletionEventTypedDict", "CompletionFTModelOut", - "CompletionFTModelOutObject", "CompletionFTModelOutTypedDict", "CompletionJobOut", "CompletionJobOutIntegration", "CompletionJobOutIntegrationTypedDict", - "CompletionJobOutObject", "CompletionJobOutRepository", "CompletionJobOutRepositoryTypedDict", "CompletionJobOutStatus", @@ -1201,12 +1089,30 @@ "ConversationStreamRequestTypedDict", "ConversationUsageInfo", "ConversationUsageInfoTypedDict", + "CreateFineTuningJobResponse", + "CreateFineTuningJobResponseTypedDict", + "CreateOrUpdateAgentAliasRequest", + "CreateOrUpdateAgentAliasRequestTypedDict", + "DeleteAgentAliasRequest", + "DeleteAgentAliasRequestTypedDict", + "DeleteAgentRequest", + "DeleteAgentRequestTypedDict", + "DeleteConversationRequest", + "DeleteConversationRequestTypedDict", + "DeleteDocumentRequest", + "DeleteDocumentRequestTypedDict", "DeleteFileOut", "DeleteFileOutTypedDict", + "DeleteFileRequest", + "DeleteFileRequestTypedDict", + "DeleteLibraryAccessRequest", + "DeleteLibraryAccessRequestTypedDict", + "DeleteLibraryRequest", + "DeleteLibraryRequestTypedDict", "DeleteModelOut", "DeleteModelOutTypedDict", - "DeleteModelV1ModelsModelIDDeleteRequest", - "DeleteModelV1ModelsModelIDDeleteRequestTypedDict", + "DeleteModelRequest", + "DeleteModelRequestTypedDict", "DeltaMessage", "DeltaMessageContent", "DeltaMessageContentTypedDict", @@ -1226,6 +1132,8 @@ "DocumentUpdateInTypedDict", "DocumentUpload", "DocumentUploadTypedDict", + "DownloadFileRequest", + "DownloadFileRequestTypedDict", "EmbeddingDtype", "EmbeddingRequest", "EmbeddingRequestInputs", @@ -1265,16 +1173,6 @@ "FileSignedURL", "FileSignedURLTypedDict", "FileTypedDict", - "FilesAPIRoutesDeleteFileRequest", - "FilesAPIRoutesDeleteFileRequestTypedDict", - "FilesAPIRoutesDownloadFileRequest", - "FilesAPIRoutesDownloadFileRequestTypedDict", - "FilesAPIRoutesGetSignedURLRequest", - "FilesAPIRoutesGetSignedURLRequestTypedDict", - "FilesAPIRoutesListFilesRequest", - "FilesAPIRoutesListFilesRequestTypedDict", - "FilesAPIRoutesRetrieveFileRequest", - "FilesAPIRoutesRetrieveFileRequestTypedDict", "FineTuneableModelType", "Format", "Function", @@ -1297,6 +1195,38 @@ "FunctionTool", "FunctionToolTypedDict", "FunctionTypedDict", + "GetAgentAgentVersion", + "GetAgentAgentVersionTypedDict", + "GetAgentRequest", + "GetAgentRequestTypedDict", + "GetAgentVersionRequest", + "GetAgentVersionRequestTypedDict", + "GetBatchJobRequest", + "GetBatchJobRequestTypedDict", + "GetConversationHistoryRequest", + "GetConversationHistoryRequestTypedDict", + "GetConversationMessagesRequest", + "GetConversationMessagesRequestTypedDict", + "GetConversationRequest", + "GetConversationRequestTypedDict", + "GetDocumentExtractedTextSignedURLRequest", + "GetDocumentExtractedTextSignedURLRequestTypedDict", + "GetDocumentRequest", + "GetDocumentRequestTypedDict", + "GetDocumentSignedURLRequest", + "GetDocumentSignedURLRequestTypedDict", + "GetDocumentStatusRequest", + "GetDocumentStatusRequestTypedDict", + "GetDocumentTextContentRequest", + "GetDocumentTextContentRequestTypedDict", + "GetFileSignedURLRequest", + "GetFileSignedURLRequestTypedDict", + "GetFineTuningJobRequest", + "GetFineTuningJobRequestTypedDict", + "GetFineTuningJobResponse", + "GetFineTuningJobResponseTypedDict", + "GetLibraryRequest", + "GetLibraryRequestTypedDict", "GithubRepositoryIn", "GithubRepositoryInTypedDict", "GithubRepositoryOut", @@ -1336,87 +1266,43 @@ "JobInTypedDict", "JobMetadataOut", "JobMetadataOutTypedDict", - "JobsAPIRoutesBatchCancelBatchJobRequest", - "JobsAPIRoutesBatchCancelBatchJobRequestTypedDict", - "JobsAPIRoutesBatchGetBatchJobRequest", - "JobsAPIRoutesBatchGetBatchJobRequestTypedDict", - "JobsAPIRoutesBatchGetBatchJobsRequest", - "JobsAPIRoutesBatchGetBatchJobsRequestTypedDict", - "JobsAPIRoutesFineTuningArchiveFineTunedModelRequest", - "JobsAPIRoutesFineTuningArchiveFineTunedModelRequestTypedDict", - "JobsAPIRoutesFineTuningCancelFineTuningJobRequest", - "JobsAPIRoutesFineTuningCancelFineTuningJobRequestTypedDict", - "JobsAPIRoutesFineTuningCancelFineTuningJobResponse", - "JobsAPIRoutesFineTuningCancelFineTuningJobResponseTypedDict", - "JobsAPIRoutesFineTuningCreateFineTuningJobResponse", - "JobsAPIRoutesFineTuningCreateFineTuningJobResponseTypedDict", - "JobsAPIRoutesFineTuningGetFineTuningJobRequest", - "JobsAPIRoutesFineTuningGetFineTuningJobRequestTypedDict", - "JobsAPIRoutesFineTuningGetFineTuningJobResponse", - "JobsAPIRoutesFineTuningGetFineTuningJobResponseTypedDict", - "JobsAPIRoutesFineTuningGetFineTuningJobsRequest", - "JobsAPIRoutesFineTuningGetFineTuningJobsRequestTypedDict", - "JobsAPIRoutesFineTuningGetFineTuningJobsStatus", - "JobsAPIRoutesFineTuningStartFineTuningJobRequest", - "JobsAPIRoutesFineTuningStartFineTuningJobRequestTypedDict", - "JobsAPIRoutesFineTuningStartFineTuningJobResponse", - "JobsAPIRoutesFineTuningStartFineTuningJobResponseTypedDict", - "JobsAPIRoutesFineTuningUnarchiveFineTunedModelRequest", - "JobsAPIRoutesFineTuningUnarchiveFineTunedModelRequestTypedDict", - "JobsAPIRoutesFineTuningUpdateFineTunedModelRequest", - "JobsAPIRoutesFineTuningUpdateFineTunedModelRequestTypedDict", - "JobsAPIRoutesFineTuningUpdateFineTunedModelResponse", - "JobsAPIRoutesFineTuningUpdateFineTunedModelResponseTypedDict", "JobsOut", "JobsOutData", "JobsOutDataTypedDict", - "JobsOutObject", "JobsOutTypedDict", "LegacyJobMetadataOut", - "LegacyJobMetadataOutObject", "LegacyJobMetadataOutTypedDict", - "LibrariesDeleteV1Request", - "LibrariesDeleteV1RequestTypedDict", - "LibrariesDocumentsDeleteV1Request", - "LibrariesDocumentsDeleteV1RequestTypedDict", - "LibrariesDocumentsGetExtractedTextSignedURLV1Request", - "LibrariesDocumentsGetExtractedTextSignedURLV1RequestTypedDict", - "LibrariesDocumentsGetSignedURLV1Request", - "LibrariesDocumentsGetSignedURLV1RequestTypedDict", - "LibrariesDocumentsGetStatusV1Request", - "LibrariesDocumentsGetStatusV1RequestTypedDict", - "LibrariesDocumentsGetTextContentV1Request", - "LibrariesDocumentsGetTextContentV1RequestTypedDict", - "LibrariesDocumentsGetV1Request", - "LibrariesDocumentsGetV1RequestTypedDict", - "LibrariesDocumentsListV1Request", - "LibrariesDocumentsListV1RequestTypedDict", - "LibrariesDocumentsReprocessV1Request", - "LibrariesDocumentsReprocessV1RequestTypedDict", - "LibrariesDocumentsUpdateV1Request", - "LibrariesDocumentsUpdateV1RequestTypedDict", - "LibrariesDocumentsUploadV1Request", - "LibrariesDocumentsUploadV1RequestTypedDict", - "LibrariesGetV1Request", - "LibrariesGetV1RequestTypedDict", - "LibrariesShareCreateV1Request", - "LibrariesShareCreateV1RequestTypedDict", - "LibrariesShareDeleteV1Request", - "LibrariesShareDeleteV1RequestTypedDict", - "LibrariesShareListV1Request", - "LibrariesShareListV1RequestTypedDict", - "LibrariesUpdateV1Request", - "LibrariesUpdateV1RequestTypedDict", "LibraryIn", "LibraryInTypedDict", "LibraryInUpdate", "LibraryInUpdateTypedDict", "LibraryOut", "LibraryOutTypedDict", + "ListAgentAliasesRequest", + "ListAgentAliasesRequestTypedDict", + "ListAgentVersionsRequest", + "ListAgentVersionsRequestTypedDict", + "ListAgentsRequest", + "ListAgentsRequestTypedDict", + "ListBatchJobsRequest", + "ListBatchJobsRequestTypedDict", + "ListConversationsRequest", + "ListConversationsRequestTypedDict", + "ListConversationsResponse", + "ListConversationsResponseTypedDict", "ListDocumentOut", "ListDocumentOutTypedDict", + "ListDocumentsRequest", + "ListDocumentsRequestTypedDict", "ListFilesOut", "ListFilesOutTypedDict", + "ListFilesRequest", + "ListFilesRequestTypedDict", + "ListFineTuningJobsRequest", + "ListFineTuningJobsRequestTypedDict", + "ListFineTuningJobsStatus", + "ListLibraryAccessesRequest", + "ListLibraryAccessesRequestTypedDict", "ListLibraryOut", "ListLibraryOutTypedDict", "ListSharingOut", @@ -1484,6 +1370,7 @@ "OCRTableObjectTypedDict", "OCRUsageInfo", "OCRUsageInfoTypedDict", + "OrderBy", "Output", "OutputContentChunks", "OutputContentChunksTypedDict", @@ -1509,6 +1396,8 @@ "ReferenceChunk", "ReferenceChunkType", "ReferenceChunkTypedDict", + "ReprocessDocumentRequest", + "ReprocessDocumentRequestTypedDict", "RequestSource", "Response", "ResponseDoneEvent", @@ -1526,10 +1415,16 @@ "ResponseV1ConversationsGet", "ResponseV1ConversationsGetTypedDict", "ResponseValidationError", + "RestartConversationRequest", + "RestartConversationRequestTypedDict", + "RestartConversationStreamRequest", + "RestartConversationStreamRequestTypedDict", "RetrieveFileOut", "RetrieveFileOutTypedDict", - "RetrieveModelV1ModelsModelIDGetRequest", - "RetrieveModelV1ModelsModelIDGetRequestTypedDict", + "RetrieveFileRequest", + "RetrieveFileRequestTypedDict", + "RetrieveModelRequest", + "RetrieveModelRequestTypedDict", "SDKError", "SSETypes", "SampleType", @@ -1543,6 +1438,10 @@ "SharingOut", "SharingOutTypedDict", "Source", + "StartFineTuningJobRequest", + "StartFineTuningJobRequestTypedDict", + "StartFineTuningJobResponse", + "StartFineTuningJobResponseTypedDict", "SystemMessage", "SystemMessageContent", "SystemMessageContentChunks", @@ -1620,10 +1519,27 @@ "TranscriptionStreamTextDelta", "TranscriptionStreamTextDeltaTypedDict", "UnarchiveFTModelOut", - "UnarchiveFTModelOutObject", "UnarchiveFTModelOutTypedDict", + "UnarchiveModelRequest", + "UnarchiveModelRequestTypedDict", + "UpdateAgentRequest", + "UpdateAgentRequestTypedDict", + "UpdateAgentVersionRequest", + "UpdateAgentVersionRequestTypedDict", + "UpdateDocumentRequest", + "UpdateDocumentRequestTypedDict", "UpdateFTModelIn", "UpdateFTModelInTypedDict", + "UpdateLibraryRequest", + "UpdateLibraryRequestTypedDict", + "UpdateModelRequest", + "UpdateModelRequestTypedDict", + "UpdateModelResponse", + "UpdateModelResponseTypedDict", + "UpdateOrCreateLibraryAccessRequest", + "UpdateOrCreateLibraryAccessRequestTypedDict", + "UploadDocumentRequest", + "UploadDocumentRequestTypedDict", "UploadFileOut", "UploadFileOutTypedDict", "UsageInfo", @@ -1669,48 +1585,6 @@ "AgentHandoffEntryTypedDict": ".agenthandoffentry", "AgentHandoffStartedEvent": ".agenthandoffstartedevent", "AgentHandoffStartedEventTypedDict": ".agenthandoffstartedevent", - "AgentsAPIV1AgentsCreateOrUpdateAliasRequest": ".agents_api_v1_agents_create_or_update_aliasop", - "AgentsAPIV1AgentsCreateOrUpdateAliasRequestTypedDict": ".agents_api_v1_agents_create_or_update_aliasop", - "AgentsAPIV1AgentsDeleteRequest": ".agents_api_v1_agents_deleteop", - "AgentsAPIV1AgentsDeleteRequestTypedDict": ".agents_api_v1_agents_deleteop", - "AgentsAPIV1AgentsGetVersionRequest": ".agents_api_v1_agents_get_versionop", - "AgentsAPIV1AgentsGetVersionRequestTypedDict": ".agents_api_v1_agents_get_versionop", - "AgentsAPIV1AgentsGetAgentVersion": ".agents_api_v1_agents_getop", - "AgentsAPIV1AgentsGetAgentVersionTypedDict": ".agents_api_v1_agents_getop", - "AgentsAPIV1AgentsGetRequest": ".agents_api_v1_agents_getop", - "AgentsAPIV1AgentsGetRequestTypedDict": ".agents_api_v1_agents_getop", - "AgentsAPIV1AgentsListVersionAliasesRequest": ".agents_api_v1_agents_list_version_aliasesop", - "AgentsAPIV1AgentsListVersionAliasesRequestTypedDict": ".agents_api_v1_agents_list_version_aliasesop", - "AgentsAPIV1AgentsListVersionsRequest": ".agents_api_v1_agents_list_versionsop", - "AgentsAPIV1AgentsListVersionsRequestTypedDict": ".agents_api_v1_agents_list_versionsop", - "AgentsAPIV1AgentsListRequest": ".agents_api_v1_agents_listop", - "AgentsAPIV1AgentsListRequestTypedDict": ".agents_api_v1_agents_listop", - "AgentsAPIV1AgentsUpdateVersionRequest": ".agents_api_v1_agents_update_versionop", - "AgentsAPIV1AgentsUpdateVersionRequestTypedDict": ".agents_api_v1_agents_update_versionop", - "AgentsAPIV1AgentsUpdateRequest": ".agents_api_v1_agents_updateop", - "AgentsAPIV1AgentsUpdateRequestTypedDict": ".agents_api_v1_agents_updateop", - "AgentsAPIV1ConversationsAppendStreamRequest": ".agents_api_v1_conversations_append_streamop", - "AgentsAPIV1ConversationsAppendStreamRequestTypedDict": ".agents_api_v1_conversations_append_streamop", - "AgentsAPIV1ConversationsAppendRequest": ".agents_api_v1_conversations_appendop", - "AgentsAPIV1ConversationsAppendRequestTypedDict": ".agents_api_v1_conversations_appendop", - "AgentsAPIV1ConversationsDeleteRequest": ".agents_api_v1_conversations_deleteop", - "AgentsAPIV1ConversationsDeleteRequestTypedDict": ".agents_api_v1_conversations_deleteop", - "AgentsAPIV1ConversationsGetRequest": ".agents_api_v1_conversations_getop", - "AgentsAPIV1ConversationsGetRequestTypedDict": ".agents_api_v1_conversations_getop", - "ResponseV1ConversationsGet": ".agents_api_v1_conversations_getop", - "ResponseV1ConversationsGetTypedDict": ".agents_api_v1_conversations_getop", - "AgentsAPIV1ConversationsHistoryRequest": ".agents_api_v1_conversations_historyop", - "AgentsAPIV1ConversationsHistoryRequestTypedDict": ".agents_api_v1_conversations_historyop", - "AgentsAPIV1ConversationsListRequest": ".agents_api_v1_conversations_listop", - "AgentsAPIV1ConversationsListRequestTypedDict": ".agents_api_v1_conversations_listop", - "AgentsAPIV1ConversationsListResponse": ".agents_api_v1_conversations_listop", - "AgentsAPIV1ConversationsListResponseTypedDict": ".agents_api_v1_conversations_listop", - "AgentsAPIV1ConversationsMessagesRequest": ".agents_api_v1_conversations_messagesop", - "AgentsAPIV1ConversationsMessagesRequestTypedDict": ".agents_api_v1_conversations_messagesop", - "AgentsAPIV1ConversationsRestartStreamRequest": ".agents_api_v1_conversations_restart_streamop", - "AgentsAPIV1ConversationsRestartStreamRequestTypedDict": ".agents_api_v1_conversations_restart_streamop", - "AgentsAPIV1ConversationsRestartRequest": ".agents_api_v1_conversations_restartop", - "AgentsAPIV1ConversationsRestartRequestTypedDict": ".agents_api_v1_conversations_restartop", "AgentsCompletionRequest": ".agentscompletionrequest", "AgentsCompletionRequestMessage": ".agentscompletionrequest", "AgentsCompletionRequestMessageTypedDict": ".agentscompletionrequest", @@ -1732,9 +1606,14 @@ "AgentUpdateRequestToolTypedDict": ".agentupdaterequest", "AgentUpdateRequestTypedDict": ".agentupdaterequest", "APIEndpoint": ".apiendpoint", + "AppendConversationRequest": ".appendconversationop", + "AppendConversationRequestTypedDict": ".appendconversationop", + "AppendConversationStreamRequest": ".appendconversationstreamop", + "AppendConversationStreamRequestTypedDict": ".appendconversationstreamop", "ArchiveFTModelOut": ".archiveftmodelout", - "ArchiveFTModelOutObject": ".archiveftmodelout", "ArchiveFTModelOutTypedDict": ".archiveftmodelout", + "ArchiveModelRequest": ".archivemodelop", + "ArchiveModelRequestTypedDict": ".archivemodelop", "AssistantMessage": ".assistantmessage", "AssistantMessageContent": ".assistantmessage", "AssistantMessageContentTypedDict": ".assistantmessage", @@ -1756,15 +1635,19 @@ "BatchJobIn": ".batchjobin", "BatchJobInTypedDict": ".batchjobin", "BatchJobOut": ".batchjobout", - "BatchJobOutObject": ".batchjobout", "BatchJobOutTypedDict": ".batchjobout", "BatchJobsOut": ".batchjobsout", - "BatchJobsOutObject": ".batchjobsout", "BatchJobsOutTypedDict": ".batchjobsout", "BatchJobStatus": ".batchjobstatus", "BatchRequest": ".batchrequest", "BatchRequestTypedDict": ".batchrequest", "BuiltInConnectors": ".builtinconnectors", + "CancelBatchJobRequest": ".cancelbatchjobop", + "CancelBatchJobRequestTypedDict": ".cancelbatchjobop", + "CancelFineTuningJobRequest": ".cancelfinetuningjobop", + "CancelFineTuningJobRequestTypedDict": ".cancelfinetuningjobop", + "CancelFineTuningJobResponse": ".cancelfinetuningjobop", + "CancelFineTuningJobResponseTypedDict": ".cancelfinetuningjobop", "ChatClassificationRequest": ".chatclassificationrequest", "ChatClassificationRequestTypedDict": ".chatclassificationrequest", "ChatCompletionChoice": ".chatcompletionchoice", @@ -1809,16 +1692,13 @@ "ClassifierDetailedJobOut": ".classifierdetailedjobout", "ClassifierDetailedJobOutIntegration": ".classifierdetailedjobout", "ClassifierDetailedJobOutIntegrationTypedDict": ".classifierdetailedjobout", - "ClassifierDetailedJobOutObject": ".classifierdetailedjobout", "ClassifierDetailedJobOutStatus": ".classifierdetailedjobout", "ClassifierDetailedJobOutTypedDict": ".classifierdetailedjobout", "ClassifierFTModelOut": ".classifierftmodelout", - "ClassifierFTModelOutObject": ".classifierftmodelout", "ClassifierFTModelOutTypedDict": ".classifierftmodelout", "ClassifierJobOut": ".classifierjobout", "ClassifierJobOutIntegration": ".classifierjobout", "ClassifierJobOutIntegrationTypedDict": ".classifierjobout", - "ClassifierJobOutObject": ".classifierjobout", "ClassifierJobOutStatus": ".classifierjobout", "ClassifierJobOutTypedDict": ".classifierjobout", "ClassifierTargetIn": ".classifiertargetin", @@ -1840,7 +1720,6 @@ "CompletionDetailedJobOut": ".completiondetailedjobout", "CompletionDetailedJobOutIntegration": ".completiondetailedjobout", "CompletionDetailedJobOutIntegrationTypedDict": ".completiondetailedjobout", - "CompletionDetailedJobOutObject": ".completiondetailedjobout", "CompletionDetailedJobOutRepository": ".completiondetailedjobout", "CompletionDetailedJobOutRepositoryTypedDict": ".completiondetailedjobout", "CompletionDetailedJobOutStatus": ".completiondetailedjobout", @@ -1848,12 +1727,10 @@ "CompletionEvent": ".completionevent", "CompletionEventTypedDict": ".completionevent", "CompletionFTModelOut": ".completionftmodelout", - "CompletionFTModelOutObject": ".completionftmodelout", "CompletionFTModelOutTypedDict": ".completionftmodelout", "CompletionJobOut": ".completionjobout", "CompletionJobOutIntegration": ".completionjobout", "CompletionJobOutIntegrationTypedDict": ".completionjobout", - "CompletionJobOutObject": ".completionjobout", "CompletionJobOutRepository": ".completionjobout", "CompletionJobOutRepositoryTypedDict": ".completionjobout", "CompletionJobOutStatus": ".completionjobout", @@ -1918,10 +1795,30 @@ "ConversationStreamRequestTypedDict": ".conversationstreamrequest", "ConversationUsageInfo": ".conversationusageinfo", "ConversationUsageInfoTypedDict": ".conversationusageinfo", - "DeleteModelV1ModelsModelIDDeleteRequest": ".delete_model_v1_models_model_id_deleteop", - "DeleteModelV1ModelsModelIDDeleteRequestTypedDict": ".delete_model_v1_models_model_id_deleteop", + "CreateFineTuningJobResponse": ".createfinetuningjobop", + "CreateFineTuningJobResponseTypedDict": ".createfinetuningjobop", + "Response": ".createfinetuningjobop", + "ResponseTypedDict": ".createfinetuningjobop", + "CreateOrUpdateAgentAliasRequest": ".createorupdateagentaliasop", + "CreateOrUpdateAgentAliasRequestTypedDict": ".createorupdateagentaliasop", + "DeleteAgentAliasRequest": ".deleteagentaliasop", + "DeleteAgentAliasRequestTypedDict": ".deleteagentaliasop", + "DeleteAgentRequest": ".deleteagentop", + "DeleteAgentRequestTypedDict": ".deleteagentop", + "DeleteConversationRequest": ".deleteconversationop", + "DeleteConversationRequestTypedDict": ".deleteconversationop", + "DeleteDocumentRequest": ".deletedocumentop", + "DeleteDocumentRequestTypedDict": ".deletedocumentop", + "DeleteFileRequest": ".deletefileop", + "DeleteFileRequestTypedDict": ".deletefileop", "DeleteFileOut": ".deletefileout", "DeleteFileOutTypedDict": ".deletefileout", + "DeleteLibraryAccessRequest": ".deletelibraryaccessop", + "DeleteLibraryAccessRequestTypedDict": ".deletelibraryaccessop", + "DeleteLibraryRequest": ".deletelibraryop", + "DeleteLibraryRequestTypedDict": ".deletelibraryop", + "DeleteModelRequest": ".deletemodelop", + "DeleteModelRequestTypedDict": ".deletemodelop", "DeleteModelOut": ".deletemodelout", "DeleteModelOutTypedDict": ".deletemodelout", "DeltaMessage": ".deltamessage", @@ -1941,6 +1838,8 @@ "DocumentURLChunk": ".documenturlchunk", "DocumentURLChunkType": ".documenturlchunk", "DocumentURLChunkTypedDict": ".documenturlchunk", + "DownloadFileRequest": ".downloadfileop", + "DownloadFileRequestTypedDict": ".downloadfileop", "EmbeddingDtype": ".embeddingdtype", "EmbeddingRequest": ".embeddingrequest", "EmbeddingRequestInputs": ".embeddingrequest", @@ -1959,18 +1858,6 @@ "FileChunk": ".filechunk", "FileChunkTypedDict": ".filechunk", "FilePurpose": ".filepurpose", - "FilesAPIRoutesDeleteFileRequest": ".files_api_routes_delete_fileop", - "FilesAPIRoutesDeleteFileRequestTypedDict": ".files_api_routes_delete_fileop", - "FilesAPIRoutesDownloadFileRequest": ".files_api_routes_download_fileop", - "FilesAPIRoutesDownloadFileRequestTypedDict": ".files_api_routes_download_fileop", - "FilesAPIRoutesGetSignedURLRequest": ".files_api_routes_get_signed_urlop", - "FilesAPIRoutesGetSignedURLRequestTypedDict": ".files_api_routes_get_signed_urlop", - "FilesAPIRoutesListFilesRequest": ".files_api_routes_list_filesop", - "FilesAPIRoutesListFilesRequestTypedDict": ".files_api_routes_list_filesop", - "FilesAPIRoutesRetrieveFileRequest": ".files_api_routes_retrieve_fileop", - "FilesAPIRoutesRetrieveFileRequestTypedDict": ".files_api_routes_retrieve_fileop", - "MultiPartBodyParams": ".files_api_routes_upload_fileop", - "MultiPartBodyParamsTypedDict": ".files_api_routes_upload_fileop", "FileSchema": ".fileschema", "FileSchemaTypedDict": ".fileschema", "FileSignedURL": ".filesignedurl", @@ -2013,6 +1900,40 @@ "FunctionResultEntryTypedDict": ".functionresultentry", "FunctionTool": ".functiontool", "FunctionToolTypedDict": ".functiontool", + "GetAgentAgentVersion": ".getagentop", + "GetAgentAgentVersionTypedDict": ".getagentop", + "GetAgentRequest": ".getagentop", + "GetAgentRequestTypedDict": ".getagentop", + "GetAgentVersionRequest": ".getagentversionop", + "GetAgentVersionRequestTypedDict": ".getagentversionop", + "GetBatchJobRequest": ".getbatchjobop", + "GetBatchJobRequestTypedDict": ".getbatchjobop", + "GetConversationHistoryRequest": ".getconversationhistoryop", + "GetConversationHistoryRequestTypedDict": ".getconversationhistoryop", + "GetConversationMessagesRequest": ".getconversationmessagesop", + "GetConversationMessagesRequestTypedDict": ".getconversationmessagesop", + "GetConversationRequest": ".getconversationop", + "GetConversationRequestTypedDict": ".getconversationop", + "ResponseV1ConversationsGet": ".getconversationop", + "ResponseV1ConversationsGetTypedDict": ".getconversationop", + "GetDocumentExtractedTextSignedURLRequest": ".getdocumentextractedtextsignedurlop", + "GetDocumentExtractedTextSignedURLRequestTypedDict": ".getdocumentextractedtextsignedurlop", + "GetDocumentRequest": ".getdocumentop", + "GetDocumentRequestTypedDict": ".getdocumentop", + "GetDocumentSignedURLRequest": ".getdocumentsignedurlop", + "GetDocumentSignedURLRequestTypedDict": ".getdocumentsignedurlop", + "GetDocumentStatusRequest": ".getdocumentstatusop", + "GetDocumentStatusRequestTypedDict": ".getdocumentstatusop", + "GetDocumentTextContentRequest": ".getdocumenttextcontentop", + "GetDocumentTextContentRequestTypedDict": ".getdocumenttextcontentop", + "GetFileSignedURLRequest": ".getfilesignedurlop", + "GetFileSignedURLRequestTypedDict": ".getfilesignedurlop", + "GetFineTuningJobRequest": ".getfinetuningjobop", + "GetFineTuningJobRequestTypedDict": ".getfinetuningjobop", + "GetFineTuningJobResponse": ".getfinetuningjobop", + "GetFineTuningJobResponseTypedDict": ".getfinetuningjobop", + "GetLibraryRequest": ".getlibraryop", + "GetLibraryRequestTypedDict": ".getlibraryop", "GithubRepositoryIn": ".githubrepositoryin", "GithubRepositoryInTypedDict": ".githubrepositoryin", "GithubRepositoryOut": ".githubrepositoryout", @@ -2050,93 +1971,46 @@ "JobInTypedDict": ".jobin", "JobMetadataOut": ".jobmetadataout", "JobMetadataOutTypedDict": ".jobmetadataout", - "JobsAPIRoutesBatchCancelBatchJobRequest": ".jobs_api_routes_batch_cancel_batch_jobop", - "JobsAPIRoutesBatchCancelBatchJobRequestTypedDict": ".jobs_api_routes_batch_cancel_batch_jobop", - "JobsAPIRoutesBatchGetBatchJobRequest": ".jobs_api_routes_batch_get_batch_jobop", - "JobsAPIRoutesBatchGetBatchJobRequestTypedDict": ".jobs_api_routes_batch_get_batch_jobop", - "JobsAPIRoutesBatchGetBatchJobsRequest": ".jobs_api_routes_batch_get_batch_jobsop", - "JobsAPIRoutesBatchGetBatchJobsRequestTypedDict": ".jobs_api_routes_batch_get_batch_jobsop", - "JobsAPIRoutesFineTuningArchiveFineTunedModelRequest": ".jobs_api_routes_fine_tuning_archive_fine_tuned_modelop", - "JobsAPIRoutesFineTuningArchiveFineTunedModelRequestTypedDict": ".jobs_api_routes_fine_tuning_archive_fine_tuned_modelop", - "JobsAPIRoutesFineTuningCancelFineTuningJobRequest": ".jobs_api_routes_fine_tuning_cancel_fine_tuning_jobop", - "JobsAPIRoutesFineTuningCancelFineTuningJobRequestTypedDict": ".jobs_api_routes_fine_tuning_cancel_fine_tuning_jobop", - "JobsAPIRoutesFineTuningCancelFineTuningJobResponse": ".jobs_api_routes_fine_tuning_cancel_fine_tuning_jobop", - "JobsAPIRoutesFineTuningCancelFineTuningJobResponseTypedDict": ".jobs_api_routes_fine_tuning_cancel_fine_tuning_jobop", - "JobsAPIRoutesFineTuningCreateFineTuningJobResponse": ".jobs_api_routes_fine_tuning_create_fine_tuning_jobop", - "JobsAPIRoutesFineTuningCreateFineTuningJobResponseTypedDict": ".jobs_api_routes_fine_tuning_create_fine_tuning_jobop", - "Response": ".jobs_api_routes_fine_tuning_create_fine_tuning_jobop", - "ResponseTypedDict": ".jobs_api_routes_fine_tuning_create_fine_tuning_jobop", - "JobsAPIRoutesFineTuningGetFineTuningJobRequest": ".jobs_api_routes_fine_tuning_get_fine_tuning_jobop", - "JobsAPIRoutesFineTuningGetFineTuningJobRequestTypedDict": ".jobs_api_routes_fine_tuning_get_fine_tuning_jobop", - "JobsAPIRoutesFineTuningGetFineTuningJobResponse": ".jobs_api_routes_fine_tuning_get_fine_tuning_jobop", - "JobsAPIRoutesFineTuningGetFineTuningJobResponseTypedDict": ".jobs_api_routes_fine_tuning_get_fine_tuning_jobop", - "JobsAPIRoutesFineTuningGetFineTuningJobsRequest": ".jobs_api_routes_fine_tuning_get_fine_tuning_jobsop", - "JobsAPIRoutesFineTuningGetFineTuningJobsRequestTypedDict": ".jobs_api_routes_fine_tuning_get_fine_tuning_jobsop", - "JobsAPIRoutesFineTuningGetFineTuningJobsStatus": ".jobs_api_routes_fine_tuning_get_fine_tuning_jobsop", - "JobsAPIRoutesFineTuningStartFineTuningJobRequest": ".jobs_api_routes_fine_tuning_start_fine_tuning_jobop", - "JobsAPIRoutesFineTuningStartFineTuningJobRequestTypedDict": ".jobs_api_routes_fine_tuning_start_fine_tuning_jobop", - "JobsAPIRoutesFineTuningStartFineTuningJobResponse": ".jobs_api_routes_fine_tuning_start_fine_tuning_jobop", - "JobsAPIRoutesFineTuningStartFineTuningJobResponseTypedDict": ".jobs_api_routes_fine_tuning_start_fine_tuning_jobop", - "JobsAPIRoutesFineTuningUnarchiveFineTunedModelRequest": ".jobs_api_routes_fine_tuning_unarchive_fine_tuned_modelop", - "JobsAPIRoutesFineTuningUnarchiveFineTunedModelRequestTypedDict": ".jobs_api_routes_fine_tuning_unarchive_fine_tuned_modelop", - "JobsAPIRoutesFineTuningUpdateFineTunedModelRequest": ".jobs_api_routes_fine_tuning_update_fine_tuned_modelop", - "JobsAPIRoutesFineTuningUpdateFineTunedModelRequestTypedDict": ".jobs_api_routes_fine_tuning_update_fine_tuned_modelop", - "JobsAPIRoutesFineTuningUpdateFineTunedModelResponse": ".jobs_api_routes_fine_tuning_update_fine_tuned_modelop", - "JobsAPIRoutesFineTuningUpdateFineTunedModelResponseTypedDict": ".jobs_api_routes_fine_tuning_update_fine_tuned_modelop", "JobsOut": ".jobsout", "JobsOutData": ".jobsout", "JobsOutDataTypedDict": ".jobsout", - "JobsOutObject": ".jobsout", "JobsOutTypedDict": ".jobsout", "JSONSchema": ".jsonschema", "JSONSchemaTypedDict": ".jsonschema", "LegacyJobMetadataOut": ".legacyjobmetadataout", - "LegacyJobMetadataOutObject": ".legacyjobmetadataout", "LegacyJobMetadataOutTypedDict": ".legacyjobmetadataout", - "LibrariesDeleteV1Request": ".libraries_delete_v1op", - "LibrariesDeleteV1RequestTypedDict": ".libraries_delete_v1op", - "LibrariesDocumentsDeleteV1Request": ".libraries_documents_delete_v1op", - "LibrariesDocumentsDeleteV1RequestTypedDict": ".libraries_documents_delete_v1op", - "LibrariesDocumentsGetExtractedTextSignedURLV1Request": ".libraries_documents_get_extracted_text_signed_url_v1op", - "LibrariesDocumentsGetExtractedTextSignedURLV1RequestTypedDict": ".libraries_documents_get_extracted_text_signed_url_v1op", - "LibrariesDocumentsGetSignedURLV1Request": ".libraries_documents_get_signed_url_v1op", - "LibrariesDocumentsGetSignedURLV1RequestTypedDict": ".libraries_documents_get_signed_url_v1op", - "LibrariesDocumentsGetStatusV1Request": ".libraries_documents_get_status_v1op", - "LibrariesDocumentsGetStatusV1RequestTypedDict": ".libraries_documents_get_status_v1op", - "LibrariesDocumentsGetTextContentV1Request": ".libraries_documents_get_text_content_v1op", - "LibrariesDocumentsGetTextContentV1RequestTypedDict": ".libraries_documents_get_text_content_v1op", - "LibrariesDocumentsGetV1Request": ".libraries_documents_get_v1op", - "LibrariesDocumentsGetV1RequestTypedDict": ".libraries_documents_get_v1op", - "LibrariesDocumentsListV1Request": ".libraries_documents_list_v1op", - "LibrariesDocumentsListV1RequestTypedDict": ".libraries_documents_list_v1op", - "LibrariesDocumentsReprocessV1Request": ".libraries_documents_reprocess_v1op", - "LibrariesDocumentsReprocessV1RequestTypedDict": ".libraries_documents_reprocess_v1op", - "LibrariesDocumentsUpdateV1Request": ".libraries_documents_update_v1op", - "LibrariesDocumentsUpdateV1RequestTypedDict": ".libraries_documents_update_v1op", - "DocumentUpload": ".libraries_documents_upload_v1op", - "DocumentUploadTypedDict": ".libraries_documents_upload_v1op", - "LibrariesDocumentsUploadV1Request": ".libraries_documents_upload_v1op", - "LibrariesDocumentsUploadV1RequestTypedDict": ".libraries_documents_upload_v1op", - "LibrariesGetV1Request": ".libraries_get_v1op", - "LibrariesGetV1RequestTypedDict": ".libraries_get_v1op", - "LibrariesShareCreateV1Request": ".libraries_share_create_v1op", - "LibrariesShareCreateV1RequestTypedDict": ".libraries_share_create_v1op", - "LibrariesShareDeleteV1Request": ".libraries_share_delete_v1op", - "LibrariesShareDeleteV1RequestTypedDict": ".libraries_share_delete_v1op", - "LibrariesShareListV1Request": ".libraries_share_list_v1op", - "LibrariesShareListV1RequestTypedDict": ".libraries_share_list_v1op", - "LibrariesUpdateV1Request": ".libraries_update_v1op", - "LibrariesUpdateV1RequestTypedDict": ".libraries_update_v1op", "LibraryIn": ".libraryin", "LibraryInTypedDict": ".libraryin", "LibraryInUpdate": ".libraryinupdate", "LibraryInUpdateTypedDict": ".libraryinupdate", "LibraryOut": ".libraryout", "LibraryOutTypedDict": ".libraryout", + "ListAgentAliasesRequest": ".listagentaliasesop", + "ListAgentAliasesRequestTypedDict": ".listagentaliasesop", + "ListAgentsRequest": ".listagentsop", + "ListAgentsRequestTypedDict": ".listagentsop", + "ListAgentVersionsRequest": ".listagentversionsop", + "ListAgentVersionsRequestTypedDict": ".listagentversionsop", + "ListBatchJobsRequest": ".listbatchjobsop", + "ListBatchJobsRequestTypedDict": ".listbatchjobsop", + "OrderBy": ".listbatchjobsop", + "ListConversationsRequest": ".listconversationsop", + "ListConversationsRequestTypedDict": ".listconversationsop", + "ListConversationsResponse": ".listconversationsop", + "ListConversationsResponseTypedDict": ".listconversationsop", "ListDocumentOut": ".listdocumentout", "ListDocumentOutTypedDict": ".listdocumentout", + "ListDocumentsRequest": ".listdocumentsop", + "ListDocumentsRequestTypedDict": ".listdocumentsop", + "ListFilesRequest": ".listfilesop", + "ListFilesRequestTypedDict": ".listfilesop", "ListFilesOut": ".listfilesout", "ListFilesOutTypedDict": ".listfilesout", + "ListFineTuningJobsRequest": ".listfinetuningjobsop", + "ListFineTuningJobsRequestTypedDict": ".listfinetuningjobsop", + "ListFineTuningJobsStatus": ".listfinetuningjobsop", + "ListLibraryAccessesRequest": ".listlibraryaccessesop", + "ListLibraryAccessesRequestTypedDict": ".listlibraryaccessesop", "ListLibraryOut": ".listlibraryout", "ListLibraryOutTypedDict": ".listlibraryout", "ListSharingOut": ".listsharingout", @@ -2226,6 +2100,8 @@ "ReferenceChunk": ".referencechunk", "ReferenceChunkType": ".referencechunk", "ReferenceChunkTypedDict": ".referencechunk", + "ReprocessDocumentRequest": ".reprocessdocumentop", + "ReprocessDocumentRequestTypedDict": ".reprocessdocumentop", "RequestSource": ".requestsource", "ResponseDoneEvent": ".responsedoneevent", "ResponseDoneEventTypedDict": ".responsedoneevent", @@ -2237,12 +2113,18 @@ "ResponseStartedEvent": ".responsestartedevent", "ResponseStartedEventTypedDict": ".responsestartedevent", "ResponseValidationError": ".responsevalidationerror", - "ResponseRetrieveModelV1ModelsModelIDGet": ".retrieve_model_v1_models_model_id_getop", - "ResponseRetrieveModelV1ModelsModelIDGetTypedDict": ".retrieve_model_v1_models_model_id_getop", - "RetrieveModelV1ModelsModelIDGetRequest": ".retrieve_model_v1_models_model_id_getop", - "RetrieveModelV1ModelsModelIDGetRequestTypedDict": ".retrieve_model_v1_models_model_id_getop", + "RestartConversationRequest": ".restartconversationop", + "RestartConversationRequestTypedDict": ".restartconversationop", + "RestartConversationStreamRequest": ".restartconversationstreamop", + "RestartConversationStreamRequestTypedDict": ".restartconversationstreamop", + "RetrieveFileRequest": ".retrievefileop", + "RetrieveFileRequestTypedDict": ".retrievefileop", "RetrieveFileOut": ".retrievefileout", "RetrieveFileOutTypedDict": ".retrievefileout", + "ResponseRetrieveModelV1ModelsModelIDGet": ".retrievemodelop", + "ResponseRetrieveModelV1ModelsModelIDGetTypedDict": ".retrievemodelop", + "RetrieveModelRequest": ".retrievemodelop", + "RetrieveModelRequestTypedDict": ".retrievemodelop", "SampleType": ".sampletype", "SDKError": ".sdkerror", "Security": ".security", @@ -2256,6 +2138,10 @@ "SharingOutTypedDict": ".sharingout", "Source": ".source", "SSETypes": ".ssetypes", + "StartFineTuningJobRequest": ".startfinetuningjobop", + "StartFineTuningJobRequestTypedDict": ".startfinetuningjobop", + "StartFineTuningJobResponse": ".startfinetuningjobop", + "StartFineTuningJobResponseTypedDict": ".startfinetuningjobop", "SystemMessage": ".systemmessage", "SystemMessageContent": ".systemmessage", "SystemMessageContentTypedDict": ".systemmessage", @@ -2332,10 +2218,31 @@ "TranscriptionStreamTextDelta": ".transcriptionstreamtextdelta", "TranscriptionStreamTextDeltaTypedDict": ".transcriptionstreamtextdelta", "UnarchiveFTModelOut": ".unarchiveftmodelout", - "UnarchiveFTModelOutObject": ".unarchiveftmodelout", "UnarchiveFTModelOutTypedDict": ".unarchiveftmodelout", + "UnarchiveModelRequest": ".unarchivemodelop", + "UnarchiveModelRequestTypedDict": ".unarchivemodelop", + "UpdateAgentRequest": ".updateagentop", + "UpdateAgentRequestTypedDict": ".updateagentop", + "UpdateAgentVersionRequest": ".updateagentversionop", + "UpdateAgentVersionRequestTypedDict": ".updateagentversionop", + "UpdateDocumentRequest": ".updatedocumentop", + "UpdateDocumentRequestTypedDict": ".updatedocumentop", "UpdateFTModelIn": ".updateftmodelin", "UpdateFTModelInTypedDict": ".updateftmodelin", + "UpdateLibraryRequest": ".updatelibraryop", + "UpdateLibraryRequestTypedDict": ".updatelibraryop", + "UpdateModelRequest": ".updatemodelop", + "UpdateModelRequestTypedDict": ".updatemodelop", + "UpdateModelResponse": ".updatemodelop", + "UpdateModelResponseTypedDict": ".updatemodelop", + "UpdateOrCreateLibraryAccessRequest": ".updateorcreatelibraryaccessop", + "UpdateOrCreateLibraryAccessRequestTypedDict": ".updateorcreatelibraryaccessop", + "DocumentUpload": ".uploaddocumentop", + "DocumentUploadTypedDict": ".uploaddocumentop", + "UploadDocumentRequest": ".uploaddocumentop", + "UploadDocumentRequestTypedDict": ".uploaddocumentop", + "MultiPartBodyParams": ".uploadfileop", + "MultiPartBodyParamsTypedDict": ".uploadfileop", "UploadFileOut": ".uploadfileout", "UploadFileOutTypedDict": ".uploadfileout", "UsageInfo": ".usageinfo", diff --git a/src/mistralai/client/models/agent.py b/src/mistralai/client/models/agent.py index b2fe3939..05ae24cd 100644 --- a/src/mistralai/client/models/agent.py +++ b/src/mistralai/client/models/agent.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 1336849c84fb from __future__ import annotations from .codeinterpretertool import CodeInterpreterTool, CodeInterpreterToolTypedDict @@ -70,6 +71,7 @@ class AgentTypedDict(TypedDict): handoffs: NotRequired[Nullable[List[str]]] metadata: NotRequired[Nullable[Dict[str, Any]]] object: NotRequired[AgentObject] + version_message: NotRequired[Nullable[str]] class Agent(BaseModel): @@ -108,6 +110,8 @@ class Agent(BaseModel): object: Optional[AgentObject] = "agent" + version_message: OptionalNullable[str] = UNSET + @model_serializer(mode="wrap") def serialize_model(self, handler): optional_fields = [ @@ -118,8 +122,15 @@ def serialize_model(self, handler): "handoffs", "metadata", "object", + "version_message", + ] + nullable_fields = [ + "instructions", + "description", + "handoffs", + "metadata", + "version_message", ] - nullable_fields = ["instructions", "description", "handoffs", "metadata"] null_default_fields = [] serialized = handler(self) diff --git a/src/mistralai/client/models/agentaliasresponse.py b/src/mistralai/client/models/agentaliasresponse.py index 4bc8225c..6972af2a 100644 --- a/src/mistralai/client/models/agentaliasresponse.py +++ b/src/mistralai/client/models/agentaliasresponse.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 3899a98a55dd from __future__ import annotations from datetime import datetime diff --git a/src/mistralai/client/models/agentconversation.py b/src/mistralai/client/models/agentconversation.py index 5dfa8c31..a850d54c 100644 --- a/src/mistralai/client/models/agentconversation.py +++ b/src/mistralai/client/models/agentconversation.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 1b7d73eddf51 from __future__ import annotations from datetime import datetime diff --git a/src/mistralai/client/models/agentcreationrequest.py b/src/mistralai/client/models/agentcreationrequest.py index 561bef64..898d42a9 100644 --- a/src/mistralai/client/models/agentcreationrequest.py +++ b/src/mistralai/client/models/agentcreationrequest.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 35b7f4933b3e from __future__ import annotations from .codeinterpretertool import CodeInterpreterTool, CodeInterpreterToolTypedDict @@ -58,6 +59,7 @@ class AgentCreationRequestTypedDict(TypedDict): description: NotRequired[Nullable[str]] handoffs: NotRequired[Nullable[List[str]]] metadata: NotRequired[Nullable[Dict[str, Any]]] + version_message: NotRequired[Nullable[str]] class AgentCreationRequest(BaseModel): @@ -80,6 +82,8 @@ class AgentCreationRequest(BaseModel): metadata: OptionalNullable[Dict[str, Any]] = UNSET + version_message: OptionalNullable[str] = UNSET + @model_serializer(mode="wrap") def serialize_model(self, handler): optional_fields = [ @@ -89,8 +93,15 @@ def serialize_model(self, handler): "description", "handoffs", "metadata", + "version_message", + ] + nullable_fields = [ + "instructions", + "description", + "handoffs", + "metadata", + "version_message", ] - nullable_fields = ["instructions", "description", "handoffs", "metadata"] null_default_fields = [] serialized = handler(self) diff --git a/src/mistralai/client/models/agenthandoffdoneevent.py b/src/mistralai/client/models/agenthandoffdoneevent.py index 40bb446b..40bf8497 100644 --- a/src/mistralai/client/models/agenthandoffdoneevent.py +++ b/src/mistralai/client/models/agenthandoffdoneevent.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 82628bb5fcea from __future__ import annotations from datetime import datetime diff --git a/src/mistralai/client/models/agenthandoffentry.py b/src/mistralai/client/models/agenthandoffentry.py index 0b0de13f..b18fe17c 100644 --- a/src/mistralai/client/models/agenthandoffentry.py +++ b/src/mistralai/client/models/agenthandoffentry.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 5030bcaa3a07 from __future__ import annotations from datetime import datetime diff --git a/src/mistralai/client/models/agenthandoffstartedevent.py b/src/mistralai/client/models/agenthandoffstartedevent.py index 93f56db0..e278aef3 100644 --- a/src/mistralai/client/models/agenthandoffstartedevent.py +++ b/src/mistralai/client/models/agenthandoffstartedevent.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 2f6093d9b222 from __future__ import annotations from datetime import datetime diff --git a/src/mistralai/client/models/agentscompletionrequest.py b/src/mistralai/client/models/agentscompletionrequest.py index 3b045ed6..f4a2d646 100644 --- a/src/mistralai/client/models/agentscompletionrequest.py +++ b/src/mistralai/client/models/agentscompletionrequest.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 3960bc4c545f from __future__ import annotations from .assistantmessage import AssistantMessage, AssistantMessageTypedDict diff --git a/src/mistralai/client/models/agentscompletionstreamrequest.py b/src/mistralai/client/models/agentscompletionstreamrequest.py index 23920c4e..732e2402 100644 --- a/src/mistralai/client/models/agentscompletionstreamrequest.py +++ b/src/mistralai/client/models/agentscompletionstreamrequest.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 1b73f90befc2 from __future__ import annotations from .assistantmessage import AssistantMessage, AssistantMessageTypedDict diff --git a/src/mistralai/client/models/agentupdaterequest.py b/src/mistralai/client/models/agentupdaterequest.py index be93157d..96e209d4 100644 --- a/src/mistralai/client/models/agentupdaterequest.py +++ b/src/mistralai/client/models/agentupdaterequest.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 2d5a3a437819 from __future__ import annotations from .codeinterpretertool import CodeInterpreterTool, CodeInterpreterToolTypedDict @@ -59,6 +60,7 @@ class AgentUpdateRequestTypedDict(TypedDict): handoffs: NotRequired[Nullable[List[str]]] deployment_chat: NotRequired[Nullable[bool]] metadata: NotRequired[Nullable[Dict[str, Any]]] + version_message: NotRequired[Nullable[str]] class AgentUpdateRequest(BaseModel): @@ -83,6 +85,8 @@ class AgentUpdateRequest(BaseModel): metadata: OptionalNullable[Dict[str, Any]] = UNSET + version_message: OptionalNullable[str] = UNSET + @model_serializer(mode="wrap") def serialize_model(self, handler): optional_fields = [ @@ -95,6 +99,7 @@ def serialize_model(self, handler): "handoffs", "deployment_chat", "metadata", + "version_message", ] nullable_fields = [ "instructions", @@ -104,6 +109,7 @@ def serialize_model(self, handler): "handoffs", "deployment_chat", "metadata", + "version_message", ] null_default_fields = [] diff --git a/src/mistralai/client/models/apiendpoint.py b/src/mistralai/client/models/apiendpoint.py index a6072d56..a6665c10 100644 --- a/src/mistralai/client/models/apiendpoint.py +++ b/src/mistralai/client/models/apiendpoint.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 00b34ce0a24d from __future__ import annotations from mistralai.client.types import UnrecognizedStr diff --git a/src/mistralai/client/models/agents_api_v1_conversations_appendop.py b/src/mistralai/client/models/appendconversationop.py similarity index 87% rename from src/mistralai/client/models/agents_api_v1_conversations_appendop.py rename to src/mistralai/client/models/appendconversationop.py index 13d07ba9..710b8e1c 100644 --- a/src/mistralai/client/models/agents_api_v1_conversations_appendop.py +++ b/src/mistralai/client/models/appendconversationop.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 1c47dd1e7c7e from __future__ import annotations from .conversationappendrequest import ( @@ -10,13 +11,13 @@ from typing_extensions import Annotated, TypedDict -class AgentsAPIV1ConversationsAppendRequestTypedDict(TypedDict): +class AppendConversationRequestTypedDict(TypedDict): conversation_id: str r"""ID of the conversation to which we append entries.""" conversation_append_request: ConversationAppendRequestTypedDict -class AgentsAPIV1ConversationsAppendRequest(BaseModel): +class AppendConversationRequest(BaseModel): conversation_id: Annotated[ str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) ] diff --git a/src/mistralai/client/models/agents_api_v1_conversations_append_streamop.py b/src/mistralai/client/models/appendconversationstreamop.py similarity index 87% rename from src/mistralai/client/models/agents_api_v1_conversations_append_streamop.py rename to src/mistralai/client/models/appendconversationstreamop.py index 9f00ffd4..55efca0e 100644 --- a/src/mistralai/client/models/agents_api_v1_conversations_append_streamop.py +++ b/src/mistralai/client/models/appendconversationstreamop.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 1ab08b189e9d from __future__ import annotations from .conversationappendstreamrequest import ( @@ -10,13 +11,13 @@ from typing_extensions import Annotated, TypedDict -class AgentsAPIV1ConversationsAppendStreamRequestTypedDict(TypedDict): +class AppendConversationStreamRequestTypedDict(TypedDict): conversation_id: str r"""ID of the conversation to which we append entries.""" conversation_append_stream_request: ConversationAppendStreamRequestTypedDict -class AgentsAPIV1ConversationsAppendStreamRequest(BaseModel): +class AppendConversationStreamRequest(BaseModel): conversation_id: Annotated[ str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) ] diff --git a/src/mistralai/client/models/archiveftmodelout.py b/src/mistralai/client/models/archiveftmodelout.py index 6108c7e1..3107116c 100644 --- a/src/mistralai/client/models/archiveftmodelout.py +++ b/src/mistralai/client/models/archiveftmodelout.py @@ -1,23 +1,27 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: bab499599d30 from __future__ import annotations from mistralai.client.types import BaseModel +from mistralai.client.utils import validate_const +import pydantic +from pydantic.functional_validators import AfterValidator from typing import Literal, Optional -from typing_extensions import NotRequired, TypedDict - - -ArchiveFTModelOutObject = Literal["model",] +from typing_extensions import Annotated, NotRequired, TypedDict class ArchiveFTModelOutTypedDict(TypedDict): id: str - object: NotRequired[ArchiveFTModelOutObject] + object: Literal["model"] archived: NotRequired[bool] class ArchiveFTModelOut(BaseModel): id: str - object: Optional[ArchiveFTModelOutObject] = "model" + OBJECT: Annotated[ + Annotated[Optional[Literal["model"]], AfterValidator(validate_const("model"))], + pydantic.Field(alias="object"), + ] = "model" archived: Optional[bool] = True diff --git a/src/mistralai/client/models/jobs_api_routes_fine_tuning_archive_fine_tuned_modelop.py b/src/mistralai/client/models/archivemodelop.py similarity index 76% rename from src/mistralai/client/models/jobs_api_routes_fine_tuning_archive_fine_tuned_modelop.py rename to src/mistralai/client/models/archivemodelop.py index 4536b738..30b4a9bd 100644 --- a/src/mistralai/client/models/jobs_api_routes_fine_tuning_archive_fine_tuned_modelop.py +++ b/src/mistralai/client/models/archivemodelop.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: beefa1df3b7c from __future__ import annotations from mistralai.client.types import BaseModel @@ -6,12 +7,12 @@ from typing_extensions import Annotated, TypedDict -class JobsAPIRoutesFineTuningArchiveFineTunedModelRequestTypedDict(TypedDict): +class ArchiveModelRequestTypedDict(TypedDict): model_id: str r"""The ID of the model to archive.""" -class JobsAPIRoutesFineTuningArchiveFineTunedModelRequest(BaseModel): +class ArchiveModelRequest(BaseModel): model_id: Annotated[ str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) ] diff --git a/src/mistralai/client/models/assistantmessage.py b/src/mistralai/client/models/assistantmessage.py index 3ba14ce7..5a4a2085 100644 --- a/src/mistralai/client/models/assistantmessage.py +++ b/src/mistralai/client/models/assistantmessage.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 2b49546e0742 from __future__ import annotations from .contentchunk import ContentChunk, ContentChunkTypedDict diff --git a/src/mistralai/client/models/audiochunk.py b/src/mistralai/client/models/audiochunk.py index fae1193c..a5186827 100644 --- a/src/mistralai/client/models/audiochunk.py +++ b/src/mistralai/client/models/audiochunk.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: ce5dce4dced2 from __future__ import annotations from mistralai.client.types import BaseModel diff --git a/src/mistralai/client/models/audioencoding.py b/src/mistralai/client/models/audioencoding.py index 557f53ed..67fec75d 100644 --- a/src/mistralai/client/models/audioencoding.py +++ b/src/mistralai/client/models/audioencoding.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: b14e6a50f730 from __future__ import annotations from mistralai.client.types import UnrecognizedStr diff --git a/src/mistralai/client/models/audioformat.py b/src/mistralai/client/models/audioformat.py index 7ea10b3a..fef87ae7 100644 --- a/src/mistralai/client/models/audioformat.py +++ b/src/mistralai/client/models/audioformat.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: c8655712c218 from __future__ import annotations from .audioencoding import AudioEncoding diff --git a/src/mistralai/client/models/audiotranscriptionrequest.py b/src/mistralai/client/models/audiotranscriptionrequest.py index 78a37978..8c47a83c 100644 --- a/src/mistralai/client/models/audiotranscriptionrequest.py +++ b/src/mistralai/client/models/audiotranscriptionrequest.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: e4148b4d23e7 from __future__ import annotations from .file import File, FileTypedDict diff --git a/src/mistralai/client/models/audiotranscriptionrequeststream.py b/src/mistralai/client/models/audiotranscriptionrequeststream.py index 35064361..a080cee2 100644 --- a/src/mistralai/client/models/audiotranscriptionrequeststream.py +++ b/src/mistralai/client/models/audiotranscriptionrequeststream.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 33a07317a3b3 from __future__ import annotations from .file import File, FileTypedDict diff --git a/src/mistralai/client/models/basemodelcard.py b/src/mistralai/client/models/basemodelcard.py index f16607d5..17a3e5c9 100644 --- a/src/mistralai/client/models/basemodelcard.py +++ b/src/mistralai/client/models/basemodelcard.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 556ebdc33276 from __future__ import annotations from .modelcapabilities import ModelCapabilities, ModelCapabilitiesTypedDict diff --git a/src/mistralai/client/models/batcherror.py b/src/mistralai/client/models/batcherror.py index a9c8362b..c1bf722a 100644 --- a/src/mistralai/client/models/batcherror.py +++ b/src/mistralai/client/models/batcherror.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 1563e2a576ec from __future__ import annotations from mistralai.client.types import BaseModel diff --git a/src/mistralai/client/models/batchjobin.py b/src/mistralai/client/models/batchjobin.py index 39cf70b5..a0c3b914 100644 --- a/src/mistralai/client/models/batchjobin.py +++ b/src/mistralai/client/models/batchjobin.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 72b25c2038d4 from __future__ import annotations from .apiendpoint import APIEndpoint diff --git a/src/mistralai/client/models/batchjobout.py b/src/mistralai/client/models/batchjobout.py index 008d43b4..99c2b951 100644 --- a/src/mistralai/client/models/batchjobout.py +++ b/src/mistralai/client/models/batchjobout.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: cbf1d872a46e from __future__ import annotations from .batcherror import BatchError, BatchErrorTypedDict @@ -10,12 +11,12 @@ UNSET, UNSET_SENTINEL, ) +from mistralai.client.utils import validate_const +import pydantic from pydantic import model_serializer +from pydantic.functional_validators import AfterValidator from typing import Any, Dict, List, Literal, Optional -from typing_extensions import NotRequired, TypedDict - - -BatchJobOutObject = Literal["batch",] +from typing_extensions import Annotated, NotRequired, TypedDict class BatchJobOutTypedDict(TypedDict): @@ -29,7 +30,7 @@ class BatchJobOutTypedDict(TypedDict): completed_requests: int succeeded_requests: int failed_requests: int - object: NotRequired[BatchJobOutObject] + object: Literal["batch"] metadata: NotRequired[Nullable[Dict[str, Any]]] model: NotRequired[Nullable[str]] agent_id: NotRequired[Nullable[str]] @@ -61,7 +62,10 @@ class BatchJobOut(BaseModel): failed_requests: int - object: Optional[BatchJobOutObject] = "batch" + OBJECT: Annotated[ + Annotated[Optional[Literal["batch"]], AfterValidator(validate_const("batch"))], + pydantic.Field(alias="object"), + ] = "batch" metadata: OptionalNullable[Dict[str, Any]] = UNSET diff --git a/src/mistralai/client/models/batchjobsout.py b/src/mistralai/client/models/batchjobsout.py index 2654dac0..f65fc040 100644 --- a/src/mistralai/client/models/batchjobsout.py +++ b/src/mistralai/client/models/batchjobsout.py @@ -1,19 +1,20 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 20b2516e7efa from __future__ import annotations from .batchjobout import BatchJobOut, BatchJobOutTypedDict from mistralai.client.types import BaseModel +from mistralai.client.utils import validate_const +import pydantic +from pydantic.functional_validators import AfterValidator from typing import List, Literal, Optional -from typing_extensions import NotRequired, TypedDict - - -BatchJobsOutObject = Literal["list",] +from typing_extensions import Annotated, NotRequired, TypedDict class BatchJobsOutTypedDict(TypedDict): total: int data: NotRequired[List[BatchJobOutTypedDict]] - object: NotRequired[BatchJobsOutObject] + object: Literal["list"] class BatchJobsOut(BaseModel): @@ -21,4 +22,7 @@ class BatchJobsOut(BaseModel): data: Optional[List[BatchJobOut]] = None - object: Optional[BatchJobsOutObject] = "list" + OBJECT: Annotated[ + Annotated[Optional[Literal["list"]], AfterValidator(validate_const("list"))], + pydantic.Field(alias="object"), + ] = "list" diff --git a/src/mistralai/client/models/batchjobstatus.py b/src/mistralai/client/models/batchjobstatus.py index 1ba3dd55..bd77faa2 100644 --- a/src/mistralai/client/models/batchjobstatus.py +++ b/src/mistralai/client/models/batchjobstatus.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 61e08cf5eea9 from __future__ import annotations from mistralai.client.types import UnrecognizedStr diff --git a/src/mistralai/client/models/batchrequest.py b/src/mistralai/client/models/batchrequest.py index 24f50a9a..41c45234 100644 --- a/src/mistralai/client/models/batchrequest.py +++ b/src/mistralai/client/models/batchrequest.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 6f36819eeb46 from __future__ import annotations from mistralai.client.types import ( diff --git a/src/mistralai/client/models/builtinconnectors.py b/src/mistralai/client/models/builtinconnectors.py index 4a98b45b..ecf60d3c 100644 --- a/src/mistralai/client/models/builtinconnectors.py +++ b/src/mistralai/client/models/builtinconnectors.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 2d276ce938dc from __future__ import annotations from mistralai.client.types import UnrecognizedStr diff --git a/src/mistralai/client/models/jobs_api_routes_batch_cancel_batch_jobop.py b/src/mistralai/client/models/cancelbatchjobop.py similarity index 76% rename from src/mistralai/client/models/jobs_api_routes_batch_cancel_batch_jobop.py rename to src/mistralai/client/models/cancelbatchjobop.py index 21a04f73..cd94ee86 100644 --- a/src/mistralai/client/models/jobs_api_routes_batch_cancel_batch_jobop.py +++ b/src/mistralai/client/models/cancelbatchjobop.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: cebac10b56a9 from __future__ import annotations from mistralai.client.types import BaseModel @@ -6,11 +7,11 @@ from typing_extensions import Annotated, TypedDict -class JobsAPIRoutesBatchCancelBatchJobRequestTypedDict(TypedDict): +class CancelBatchJobRequestTypedDict(TypedDict): job_id: str -class JobsAPIRoutesBatchCancelBatchJobRequest(BaseModel): +class CancelBatchJobRequest(BaseModel): job_id: Annotated[ str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) ] diff --git a/src/mistralai/client/models/jobs_api_routes_fine_tuning_cancel_fine_tuning_jobop.py b/src/mistralai/client/models/cancelfinetuningjobop.py similarity index 73% rename from src/mistralai/client/models/jobs_api_routes_fine_tuning_cancel_fine_tuning_jobop.py rename to src/mistralai/client/models/cancelfinetuningjobop.py index 5d9c026b..ddd445bb 100644 --- a/src/mistralai/client/models/jobs_api_routes_fine_tuning_cancel_fine_tuning_jobop.py +++ b/src/mistralai/client/models/cancelfinetuningjobop.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: c9a1b39f0d02 from __future__ import annotations from .classifierdetailedjobout import ( @@ -16,26 +17,26 @@ from typing_extensions import Annotated, TypeAliasType, TypedDict -class JobsAPIRoutesFineTuningCancelFineTuningJobRequestTypedDict(TypedDict): +class CancelFineTuningJobRequestTypedDict(TypedDict): job_id: str r"""The ID of the job to cancel.""" -class JobsAPIRoutesFineTuningCancelFineTuningJobRequest(BaseModel): +class CancelFineTuningJobRequest(BaseModel): job_id: Annotated[ str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) ] r"""The ID of the job to cancel.""" -JobsAPIRoutesFineTuningCancelFineTuningJobResponseTypedDict = TypeAliasType( - "JobsAPIRoutesFineTuningCancelFineTuningJobResponseTypedDict", +CancelFineTuningJobResponseTypedDict = TypeAliasType( + "CancelFineTuningJobResponseTypedDict", Union[CompletionDetailedJobOutTypedDict, ClassifierDetailedJobOutTypedDict], ) r"""OK""" -JobsAPIRoutesFineTuningCancelFineTuningJobResponse = Annotated[ +CancelFineTuningJobResponse = Annotated[ Union[ClassifierDetailedJobOut, CompletionDetailedJobOut], Field(discriminator="JOB_TYPE"), ] diff --git a/src/mistralai/client/models/chatclassificationrequest.py b/src/mistralai/client/models/chatclassificationrequest.py index 45081022..8b6d07b9 100644 --- a/src/mistralai/client/models/chatclassificationrequest.py +++ b/src/mistralai/client/models/chatclassificationrequest.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: afd9cdc71834 from __future__ import annotations from .inputs import Inputs, InputsTypedDict diff --git a/src/mistralai/client/models/chatcompletionchoice.py b/src/mistralai/client/models/chatcompletionchoice.py index 5752f7c1..2c515f6e 100644 --- a/src/mistralai/client/models/chatcompletionchoice.py +++ b/src/mistralai/client/models/chatcompletionchoice.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 7e6a512f6a04 from __future__ import annotations from .assistantmessage import AssistantMessage, AssistantMessageTypedDict diff --git a/src/mistralai/client/models/chatcompletionrequest.py b/src/mistralai/client/models/chatcompletionrequest.py index 62c375e0..4f7d071b 100644 --- a/src/mistralai/client/models/chatcompletionrequest.py +++ b/src/mistralai/client/models/chatcompletionrequest.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 9979805d8c38 from __future__ import annotations from .assistantmessage import AssistantMessage, AssistantMessageTypedDict diff --git a/src/mistralai/client/models/chatcompletionresponse.py b/src/mistralai/client/models/chatcompletionresponse.py index 60a1f561..7092bbc1 100644 --- a/src/mistralai/client/models/chatcompletionresponse.py +++ b/src/mistralai/client/models/chatcompletionresponse.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 669d996b8e82 from __future__ import annotations from .chatcompletionchoice import ChatCompletionChoice, ChatCompletionChoiceTypedDict diff --git a/src/mistralai/client/models/chatcompletionstreamrequest.py b/src/mistralai/client/models/chatcompletionstreamrequest.py index 4e5c281d..ec7d2ae1 100644 --- a/src/mistralai/client/models/chatcompletionstreamrequest.py +++ b/src/mistralai/client/models/chatcompletionstreamrequest.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 18cb2b2415d4 from __future__ import annotations from .assistantmessage import AssistantMessage, AssistantMessageTypedDict diff --git a/src/mistralai/client/models/chatmoderationrequest.py b/src/mistralai/client/models/chatmoderationrequest.py index 4e2611c8..a8d021e8 100644 --- a/src/mistralai/client/models/chatmoderationrequest.py +++ b/src/mistralai/client/models/chatmoderationrequest.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 057aecb07275 from __future__ import annotations from .assistantmessage import AssistantMessage, AssistantMessageTypedDict diff --git a/src/mistralai/client/models/checkpointout.py b/src/mistralai/client/models/checkpointout.py index 89189ed1..3e8d90e9 100644 --- a/src/mistralai/client/models/checkpointout.py +++ b/src/mistralai/client/models/checkpointout.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 3866fe32cd7c from __future__ import annotations from .metricout import MetricOut, MetricOutTypedDict diff --git a/src/mistralai/client/models/classificationrequest.py b/src/mistralai/client/models/classificationrequest.py index c724ff53..903706c3 100644 --- a/src/mistralai/client/models/classificationrequest.py +++ b/src/mistralai/client/models/classificationrequest.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 6942fe3de24a from __future__ import annotations from mistralai.client.types import ( diff --git a/src/mistralai/client/models/classificationresponse.py b/src/mistralai/client/models/classificationresponse.py index 4bc21a58..d2f09f43 100644 --- a/src/mistralai/client/models/classificationresponse.py +++ b/src/mistralai/client/models/classificationresponse.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: eaf279db1109 from __future__ import annotations from .classificationtargetresult import ( diff --git a/src/mistralai/client/models/classificationtargetresult.py b/src/mistralai/client/models/classificationtargetresult.py index 89a137c3..6c7d6231 100644 --- a/src/mistralai/client/models/classificationtargetresult.py +++ b/src/mistralai/client/models/classificationtargetresult.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 2445f12b2a57 from __future__ import annotations from mistralai.client.types import BaseModel diff --git a/src/mistralai/client/models/classifierdetailedjobout.py b/src/mistralai/client/models/classifierdetailedjobout.py index ffe99270..bc5c5381 100644 --- a/src/mistralai/client/models/classifierdetailedjobout.py +++ b/src/mistralai/client/models/classifierdetailedjobout.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: d8daeb39ef9f from __future__ import annotations from .checkpointout import CheckpointOut, CheckpointOutTypedDict @@ -43,9 +44,6 @@ ] -ClassifierDetailedJobOutObject = Literal["job",] - - ClassifierDetailedJobOutIntegrationTypedDict = WandbIntegrationOutTypedDict @@ -56,7 +54,6 @@ class ClassifierDetailedJobOutTypedDict(TypedDict): id: str auto_start: bool model: str - r"""The name of the model to fine-tune.""" status: ClassifierDetailedJobOutStatus created_at: int modified_at: int @@ -64,7 +61,7 @@ class ClassifierDetailedJobOutTypedDict(TypedDict): hyperparameters: ClassifierTrainingParametersTypedDict classifier_targets: List[ClassifierTargetOutTypedDict] validation_files: NotRequired[Nullable[List[str]]] - object: NotRequired[ClassifierDetailedJobOutObject] + object: Literal["job"] fine_tuned_model: NotRequired[Nullable[str]] suffix: NotRequired[Nullable[str]] integrations: NotRequired[ @@ -84,7 +81,6 @@ class ClassifierDetailedJobOut(BaseModel): auto_start: bool model: str - r"""The name of the model to fine-tune.""" status: ClassifierDetailedJobOutStatus @@ -100,7 +96,10 @@ class ClassifierDetailedJobOut(BaseModel): validation_files: OptionalNullable[List[str]] = UNSET - object: Optional[ClassifierDetailedJobOutObject] = "job" + OBJECT: Annotated[ + Annotated[Optional[Literal["job"]], AfterValidator(validate_const("job"))], + pydantic.Field(alias="object"), + ] = "job" fine_tuned_model: OptionalNullable[str] = UNSET diff --git a/src/mistralai/client/models/classifierftmodelout.py b/src/mistralai/client/models/classifierftmodelout.py index c6d34167..182f4954 100644 --- a/src/mistralai/client/models/classifierftmodelout.py +++ b/src/mistralai/client/models/classifierftmodelout.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 2903a7123b06 from __future__ import annotations from .classifiertargetout import ClassifierTargetOut, ClassifierTargetOutTypedDict @@ -21,9 +22,6 @@ from typing_extensions import Annotated, NotRequired, TypedDict -ClassifierFTModelOutObject = Literal["model",] - - class ClassifierFTModelOutTypedDict(TypedDict): id: str created: int @@ -35,7 +33,7 @@ class ClassifierFTModelOutTypedDict(TypedDict): capabilities: FTModelCapabilitiesOutTypedDict job: str classifier_targets: List[ClassifierTargetOutTypedDict] - object: NotRequired[ClassifierFTModelOutObject] + object: Literal["model"] name: NotRequired[Nullable[str]] description: NotRequired[Nullable[str]] max_context_length: NotRequired[int] @@ -64,7 +62,10 @@ class ClassifierFTModelOut(BaseModel): classifier_targets: List[ClassifierTargetOut] - object: Optional[ClassifierFTModelOutObject] = "model" + OBJECT: Annotated[ + Annotated[Optional[Literal["model"]], AfterValidator(validate_const("model"))], + pydantic.Field(alias="object"), + ] = "model" name: OptionalNullable[str] = UNSET diff --git a/src/mistralai/client/models/classifierjobout.py b/src/mistralai/client/models/classifierjobout.py index 1390aea1..03a5b11c 100644 --- a/src/mistralai/client/models/classifierjobout.py +++ b/src/mistralai/client/models/classifierjobout.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: e19e9c4416cc from __future__ import annotations from .classifiertrainingparameters import ( @@ -41,10 +42,6 @@ r"""The current status of the fine-tuning job.""" -ClassifierJobOutObject = Literal["job",] -r"""The object type of the fine-tuning job.""" - - ClassifierJobOutIntegrationTypedDict = WandbIntegrationOutTypedDict @@ -56,7 +53,6 @@ class ClassifierJobOutTypedDict(TypedDict): r"""The ID of the job.""" auto_start: bool model: str - r"""The name of the model to fine-tune.""" status: ClassifierJobOutStatus r"""The current status of the fine-tuning job.""" created_at: int @@ -68,7 +64,7 @@ class ClassifierJobOutTypedDict(TypedDict): hyperparameters: ClassifierTrainingParametersTypedDict validation_files: NotRequired[Nullable[List[str]]] r"""A list containing the IDs of uploaded files that contain validation data.""" - object: NotRequired[ClassifierJobOutObject] + object: Literal["job"] r"""The object type of the fine-tuning job.""" fine_tuned_model: NotRequired[Nullable[str]] r"""The name of the fine-tuned model that is being created. The value will be `null` if the fine-tuning job is still running.""" @@ -90,7 +86,6 @@ class ClassifierJobOut(BaseModel): auto_start: bool model: str - r"""The name of the model to fine-tune.""" status: ClassifierJobOutStatus r"""The current status of the fine-tuning job.""" @@ -109,7 +104,10 @@ class ClassifierJobOut(BaseModel): validation_files: OptionalNullable[List[str]] = UNSET r"""A list containing the IDs of uploaded files that contain validation data.""" - object: Optional[ClassifierJobOutObject] = "job" + OBJECT: Annotated[ + Annotated[Optional[Literal["job"]], AfterValidator(validate_const("job"))], + pydantic.Field(alias="object"), + ] = "job" r"""The object type of the fine-tuning job.""" fine_tuned_model: OptionalNullable[str] = UNSET diff --git a/src/mistralai/client/models/classifiertargetin.py b/src/mistralai/client/models/classifiertargetin.py index 231ee21e..b250109b 100644 --- a/src/mistralai/client/models/classifiertargetin.py +++ b/src/mistralai/client/models/classifiertargetin.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: ed021de1c06c from __future__ import annotations from .ftclassifierlossfunction import FTClassifierLossFunction diff --git a/src/mistralai/client/models/classifiertargetout.py b/src/mistralai/client/models/classifiertargetout.py index 957104a7..3d41a4d9 100644 --- a/src/mistralai/client/models/classifiertargetout.py +++ b/src/mistralai/client/models/classifiertargetout.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 5131f55abefe from __future__ import annotations from .ftclassifierlossfunction import FTClassifierLossFunction diff --git a/src/mistralai/client/models/classifiertrainingparameters.py b/src/mistralai/client/models/classifiertrainingparameters.py index 60f53c37..f360eda5 100644 --- a/src/mistralai/client/models/classifiertrainingparameters.py +++ b/src/mistralai/client/models/classifiertrainingparameters.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 4000b05e3b8d from __future__ import annotations from mistralai.client.types import ( diff --git a/src/mistralai/client/models/classifiertrainingparametersin.py b/src/mistralai/client/models/classifiertrainingparametersin.py index e24c9dde..85360a7e 100644 --- a/src/mistralai/client/models/classifiertrainingparametersin.py +++ b/src/mistralai/client/models/classifiertrainingparametersin.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 4b33d5cf0345 from __future__ import annotations from mistralai.client.types import ( diff --git a/src/mistralai/client/models/codeinterpretertool.py b/src/mistralai/client/models/codeinterpretertool.py index 2f34cbda..f69c7a57 100644 --- a/src/mistralai/client/models/codeinterpretertool.py +++ b/src/mistralai/client/models/codeinterpretertool.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 950cd8f4ad49 from __future__ import annotations from mistralai.client.types import BaseModel diff --git a/src/mistralai/client/models/completionargs.py b/src/mistralai/client/models/completionargs.py index 010910f6..918832ac 100644 --- a/src/mistralai/client/models/completionargs.py +++ b/src/mistralai/client/models/completionargs.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 3db008bcddca from __future__ import annotations from .completionargsstop import CompletionArgsStop, CompletionArgsStopTypedDict diff --git a/src/mistralai/client/models/completionargsstop.py b/src/mistralai/client/models/completionargsstop.py index de7a0956..39c858e6 100644 --- a/src/mistralai/client/models/completionargsstop.py +++ b/src/mistralai/client/models/completionargsstop.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 5f339214501d from __future__ import annotations from typing import List, Union diff --git a/src/mistralai/client/models/completionchunk.py b/src/mistralai/client/models/completionchunk.py index 9790db6f..67f447d0 100644 --- a/src/mistralai/client/models/completionchunk.py +++ b/src/mistralai/client/models/completionchunk.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: d786b44926f4 from __future__ import annotations from .completionresponsestreamchoice import ( diff --git a/src/mistralai/client/models/completiondetailedjobout.py b/src/mistralai/client/models/completiondetailedjobout.py index ea444b8b..cd3a86ee 100644 --- a/src/mistralai/client/models/completiondetailedjobout.py +++ b/src/mistralai/client/models/completiondetailedjobout.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 9bc38dcfbddf from __future__ import annotations from .checkpointout import CheckpointOut, CheckpointOutTypedDict @@ -43,9 +44,6 @@ ] -CompletionDetailedJobOutObject = Literal["job",] - - CompletionDetailedJobOutIntegrationTypedDict = WandbIntegrationOutTypedDict @@ -62,14 +60,13 @@ class CompletionDetailedJobOutTypedDict(TypedDict): id: str auto_start: bool model: str - r"""The name of the model to fine-tune.""" status: CompletionDetailedJobOutStatus created_at: int modified_at: int training_files: List[str] hyperparameters: CompletionTrainingParametersTypedDict validation_files: NotRequired[Nullable[List[str]]] - object: NotRequired[CompletionDetailedJobOutObject] + object: Literal["job"] fine_tuned_model: NotRequired[Nullable[str]] suffix: NotRequired[Nullable[str]] integrations: NotRequired[ @@ -90,7 +87,6 @@ class CompletionDetailedJobOut(BaseModel): auto_start: bool model: str - r"""The name of the model to fine-tune.""" status: CompletionDetailedJobOutStatus @@ -104,7 +100,10 @@ class CompletionDetailedJobOut(BaseModel): validation_files: OptionalNullable[List[str]] = UNSET - object: Optional[CompletionDetailedJobOutObject] = "job" + OBJECT: Annotated[ + Annotated[Optional[Literal["job"]], AfterValidator(validate_const("job"))], + pydantic.Field(alias="object"), + ] = "job" fine_tuned_model: OptionalNullable[str] = UNSET diff --git a/src/mistralai/client/models/completionevent.py b/src/mistralai/client/models/completionevent.py index 52db911e..3b90ab0c 100644 --- a/src/mistralai/client/models/completionevent.py +++ b/src/mistralai/client/models/completionevent.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: c68817e7e190 from __future__ import annotations from .completionchunk import CompletionChunk, CompletionChunkTypedDict diff --git a/src/mistralai/client/models/completionftmodelout.py b/src/mistralai/client/models/completionftmodelout.py index 92f530af..7ecbf54a 100644 --- a/src/mistralai/client/models/completionftmodelout.py +++ b/src/mistralai/client/models/completionftmodelout.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 0f5277833b3e from __future__ import annotations from .ftmodelcapabilitiesout import ( @@ -20,9 +21,6 @@ from typing_extensions import Annotated, NotRequired, TypedDict -CompletionFTModelOutObject = Literal["model",] - - class CompletionFTModelOutTypedDict(TypedDict): id: str created: int @@ -33,7 +31,7 @@ class CompletionFTModelOutTypedDict(TypedDict): archived: bool capabilities: FTModelCapabilitiesOutTypedDict job: str - object: NotRequired[CompletionFTModelOutObject] + object: Literal["model"] name: NotRequired[Nullable[str]] description: NotRequired[Nullable[str]] max_context_length: NotRequired[int] @@ -60,7 +58,10 @@ class CompletionFTModelOut(BaseModel): job: str - object: Optional[CompletionFTModelOutObject] = "model" + OBJECT: Annotated[ + Annotated[Optional[Literal["model"]], AfterValidator(validate_const("model"))], + pydantic.Field(alias="object"), + ] = "model" name: OptionalNullable[str] = UNSET diff --git a/src/mistralai/client/models/completionjobout.py b/src/mistralai/client/models/completionjobout.py index 1628d8bb..42e5f6c6 100644 --- a/src/mistralai/client/models/completionjobout.py +++ b/src/mistralai/client/models/completionjobout.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 712e6c524f9a from __future__ import annotations from .completiontrainingparameters import ( @@ -42,10 +43,6 @@ r"""The current status of the fine-tuning job.""" -CompletionJobOutObject = Literal["job",] -r"""The object type of the fine-tuning job.""" - - CompletionJobOutIntegrationTypedDict = WandbIntegrationOutTypedDict @@ -63,7 +60,6 @@ class CompletionJobOutTypedDict(TypedDict): r"""The ID of the job.""" auto_start: bool model: str - r"""The name of the model to fine-tune.""" status: CompletionJobOutStatus r"""The current status of the fine-tuning job.""" created_at: int @@ -75,7 +71,7 @@ class CompletionJobOutTypedDict(TypedDict): hyperparameters: CompletionTrainingParametersTypedDict validation_files: NotRequired[Nullable[List[str]]] r"""A list containing the IDs of uploaded files that contain validation data.""" - object: NotRequired[CompletionJobOutObject] + object: Literal["job"] r"""The object type of the fine-tuning job.""" fine_tuned_model: NotRequired[Nullable[str]] r"""The name of the fine-tuned model that is being created. The value will be `null` if the fine-tuning job is still running.""" @@ -98,7 +94,6 @@ class CompletionJobOut(BaseModel): auto_start: bool model: str - r"""The name of the model to fine-tune.""" status: CompletionJobOutStatus r"""The current status of the fine-tuning job.""" @@ -117,7 +112,10 @@ class CompletionJobOut(BaseModel): validation_files: OptionalNullable[List[str]] = UNSET r"""A list containing the IDs of uploaded files that contain validation data.""" - object: Optional[CompletionJobOutObject] = "job" + OBJECT: Annotated[ + Annotated[Optional[Literal["job"]], AfterValidator(validate_const("job"))], + pydantic.Field(alias="object"), + ] = "job" r"""The object type of the fine-tuning job.""" fine_tuned_model: OptionalNullable[str] = UNSET diff --git a/src/mistralai/client/models/completionresponsestreamchoice.py b/src/mistralai/client/models/completionresponsestreamchoice.py index 1b8d6fac..119a9690 100644 --- a/src/mistralai/client/models/completionresponsestreamchoice.py +++ b/src/mistralai/client/models/completionresponsestreamchoice.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 5969a6bc07f3 from __future__ import annotations from .deltamessage import DeltaMessage, DeltaMessageTypedDict diff --git a/src/mistralai/client/models/completiontrainingparameters.py b/src/mistralai/client/models/completiontrainingparameters.py index 36b285ab..4b846b1b 100644 --- a/src/mistralai/client/models/completiontrainingparameters.py +++ b/src/mistralai/client/models/completiontrainingparameters.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: be202ea0d5a6 from __future__ import annotations from mistralai.client.types import ( diff --git a/src/mistralai/client/models/completiontrainingparametersin.py b/src/mistralai/client/models/completiontrainingparametersin.py index d0315d99..20b74ad9 100644 --- a/src/mistralai/client/models/completiontrainingparametersin.py +++ b/src/mistralai/client/models/completiontrainingparametersin.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 0df22b873b5f from __future__ import annotations from mistralai.client.types import ( diff --git a/src/mistralai/client/models/contentchunk.py b/src/mistralai/client/models/contentchunk.py index 0a25423f..eff4b8c6 100644 --- a/src/mistralai/client/models/contentchunk.py +++ b/src/mistralai/client/models/contentchunk.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: c007f5ee0325 from __future__ import annotations from .audiochunk import AudioChunk, AudioChunkTypedDict diff --git a/src/mistralai/client/models/conversationappendrequest.py b/src/mistralai/client/models/conversationappendrequest.py index 867c0a41..0f07475e 100644 --- a/src/mistralai/client/models/conversationappendrequest.py +++ b/src/mistralai/client/models/conversationappendrequest.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 81ce529e0865 from __future__ import annotations from .completionargs import CompletionArgs, CompletionArgsTypedDict diff --git a/src/mistralai/client/models/conversationappendstreamrequest.py b/src/mistralai/client/models/conversationappendstreamrequest.py index f51407bf..a0d46f72 100644 --- a/src/mistralai/client/models/conversationappendstreamrequest.py +++ b/src/mistralai/client/models/conversationappendstreamrequest.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 27ada745e6ad from __future__ import annotations from .completionargs import CompletionArgs, CompletionArgsTypedDict diff --git a/src/mistralai/client/models/conversationevents.py b/src/mistralai/client/models/conversationevents.py index 1c2b4592..f2476038 100644 --- a/src/mistralai/client/models/conversationevents.py +++ b/src/mistralai/client/models/conversationevents.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 8c8b08d853f6 from __future__ import annotations from .agenthandoffdoneevent import AgentHandoffDoneEvent, AgentHandoffDoneEventTypedDict diff --git a/src/mistralai/client/models/conversationhistory.py b/src/mistralai/client/models/conversationhistory.py index 83e860f2..92d6cbf9 100644 --- a/src/mistralai/client/models/conversationhistory.py +++ b/src/mistralai/client/models/conversationhistory.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 60a51ff1682b from __future__ import annotations from .agenthandoffentry import AgentHandoffEntry, AgentHandoffEntryTypedDict diff --git a/src/mistralai/client/models/conversationinputs.py b/src/mistralai/client/models/conversationinputs.py index 4d30cd76..7ce3ffc3 100644 --- a/src/mistralai/client/models/conversationinputs.py +++ b/src/mistralai/client/models/conversationinputs.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 711b769f2c40 from __future__ import annotations from .inputentries import InputEntries, InputEntriesTypedDict diff --git a/src/mistralai/client/models/conversationmessages.py b/src/mistralai/client/models/conversationmessages.py index 1ea05369..1aa294a4 100644 --- a/src/mistralai/client/models/conversationmessages.py +++ b/src/mistralai/client/models/conversationmessages.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 011c39501c26 from __future__ import annotations from .messageentries import MessageEntries, MessageEntriesTypedDict diff --git a/src/mistralai/client/models/conversationrequest.py b/src/mistralai/client/models/conversationrequest.py index dd66c6ce..2005be82 100644 --- a/src/mistralai/client/models/conversationrequest.py +++ b/src/mistralai/client/models/conversationrequest.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 58e3ae67f149 from __future__ import annotations from .codeinterpretertool import CodeInterpreterTool, CodeInterpreterToolTypedDict diff --git a/src/mistralai/client/models/conversationresponse.py b/src/mistralai/client/models/conversationresponse.py index 0a11fff8..24598ef3 100644 --- a/src/mistralai/client/models/conversationresponse.py +++ b/src/mistralai/client/models/conversationresponse.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: ad7a8472c7bf from __future__ import annotations from .agenthandoffentry import AgentHandoffEntry, AgentHandoffEntryTypedDict diff --git a/src/mistralai/client/models/conversationrestartrequest.py b/src/mistralai/client/models/conversationrestartrequest.py index aa2bf7b0..35d30993 100644 --- a/src/mistralai/client/models/conversationrestartrequest.py +++ b/src/mistralai/client/models/conversationrestartrequest.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 681d90d50514 from __future__ import annotations from .completionargs import CompletionArgs, CompletionArgsTypedDict diff --git a/src/mistralai/client/models/conversationrestartstreamrequest.py b/src/mistralai/client/models/conversationrestartstreamrequest.py index 689815eb..0ddfb130 100644 --- a/src/mistralai/client/models/conversationrestartstreamrequest.py +++ b/src/mistralai/client/models/conversationrestartstreamrequest.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 521c2b5bfb2b from __future__ import annotations from .completionargs import CompletionArgs, CompletionArgsTypedDict diff --git a/src/mistralai/client/models/conversationstreamrequest.py b/src/mistralai/client/models/conversationstreamrequest.py index 9b8d0c44..379a8f28 100644 --- a/src/mistralai/client/models/conversationstreamrequest.py +++ b/src/mistralai/client/models/conversationstreamrequest.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 58d633507527 from __future__ import annotations from .codeinterpretertool import CodeInterpreterTool, CodeInterpreterToolTypedDict diff --git a/src/mistralai/client/models/conversationusageinfo.py b/src/mistralai/client/models/conversationusageinfo.py index 7a818c89..98db0f16 100644 --- a/src/mistralai/client/models/conversationusageinfo.py +++ b/src/mistralai/client/models/conversationusageinfo.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 6685e3b50b50 from __future__ import annotations from mistralai.client.types import ( diff --git a/src/mistralai/client/models/jobs_api_routes_fine_tuning_create_fine_tuning_jobop.py b/src/mistralai/client/models/createfinetuningjobop.py similarity index 71% rename from src/mistralai/client/models/jobs_api_routes_fine_tuning_create_fine_tuning_jobop.py rename to src/mistralai/client/models/createfinetuningjobop.py index c54aaa5e..f55deef5 100644 --- a/src/mistralai/client/models/jobs_api_routes_fine_tuning_create_fine_tuning_jobop.py +++ b/src/mistralai/client/models/createfinetuningjobop.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: fd3c305df250 from __future__ import annotations from .classifierjobout import ClassifierJobOut, ClassifierJobOutTypedDict @@ -19,15 +20,14 @@ ] -JobsAPIRoutesFineTuningCreateFineTuningJobResponseTypedDict = TypeAliasType( - "JobsAPIRoutesFineTuningCreateFineTuningJobResponseTypedDict", +CreateFineTuningJobResponseTypedDict = TypeAliasType( + "CreateFineTuningJobResponseTypedDict", Union[LegacyJobMetadataOutTypedDict, ResponseTypedDict], ) r"""OK""" -JobsAPIRoutesFineTuningCreateFineTuningJobResponse = TypeAliasType( - "JobsAPIRoutesFineTuningCreateFineTuningJobResponse", - Union[LegacyJobMetadataOut, Response], +CreateFineTuningJobResponse = TypeAliasType( + "CreateFineTuningJobResponse", Union[LegacyJobMetadataOut, Response] ) r"""OK""" diff --git a/src/mistralai/client/models/agents_api_v1_agents_create_or_update_aliasop.py b/src/mistralai/client/models/createorupdateagentaliasop.py similarity index 83% rename from src/mistralai/client/models/agents_api_v1_agents_create_or_update_aliasop.py rename to src/mistralai/client/models/createorupdateagentaliasop.py index 33da325c..cde1dd05 100644 --- a/src/mistralai/client/models/agents_api_v1_agents_create_or_update_aliasop.py +++ b/src/mistralai/client/models/createorupdateagentaliasop.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: a79cf28bda01 from __future__ import annotations from mistralai.client.types import BaseModel @@ -6,13 +7,13 @@ from typing_extensions import Annotated, TypedDict -class AgentsAPIV1AgentsCreateOrUpdateAliasRequestTypedDict(TypedDict): +class CreateOrUpdateAgentAliasRequestTypedDict(TypedDict): agent_id: str alias: str version: int -class AgentsAPIV1AgentsCreateOrUpdateAliasRequest(BaseModel): +class CreateOrUpdateAgentAliasRequest(BaseModel): agent_id: Annotated[ str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) ] diff --git a/src/mistralai/client/models/deleteagentaliasop.py b/src/mistralai/client/models/deleteagentaliasop.py new file mode 100644 index 00000000..c52d099e --- /dev/null +++ b/src/mistralai/client/models/deleteagentaliasop.py @@ -0,0 +1,22 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: e4d0d7f75b24 + +from __future__ import annotations +from mistralai.client.types import BaseModel +from mistralai.client.utils import FieldMetadata, PathParamMetadata, QueryParamMetadata +from typing_extensions import Annotated, TypedDict + + +class DeleteAgentAliasRequestTypedDict(TypedDict): + agent_id: str + alias: str + + +class DeleteAgentAliasRequest(BaseModel): + agent_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] + + alias: Annotated[ + str, FieldMetadata(query=QueryParamMetadata(style="form", explode=True)) + ] diff --git a/src/mistralai/client/models/agents_api_v1_agents_deleteop.py b/src/mistralai/client/models/deleteagentop.py similarity index 78% rename from src/mistralai/client/models/agents_api_v1_agents_deleteop.py rename to src/mistralai/client/models/deleteagentop.py index 58fe902f..8b14bca7 100644 --- a/src/mistralai/client/models/agents_api_v1_agents_deleteop.py +++ b/src/mistralai/client/models/deleteagentop.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 089fb7f87aea from __future__ import annotations from mistralai.client.types import BaseModel @@ -6,11 +7,11 @@ from typing_extensions import Annotated, TypedDict -class AgentsAPIV1AgentsDeleteRequestTypedDict(TypedDict): +class DeleteAgentRequestTypedDict(TypedDict): agent_id: str -class AgentsAPIV1AgentsDeleteRequest(BaseModel): +class DeleteAgentRequest(BaseModel): agent_id: Annotated[ str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) ] diff --git a/src/mistralai/client/models/agents_api_v1_conversations_deleteop.py b/src/mistralai/client/models/deleteconversationop.py similarity index 81% rename from src/mistralai/client/models/agents_api_v1_conversations_deleteop.py rename to src/mistralai/client/models/deleteconversationop.py index 81066f90..39607f40 100644 --- a/src/mistralai/client/models/agents_api_v1_conversations_deleteop.py +++ b/src/mistralai/client/models/deleteconversationop.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 86fefc353db0 from __future__ import annotations from mistralai.client.types import BaseModel @@ -6,12 +7,12 @@ from typing_extensions import Annotated, TypedDict -class AgentsAPIV1ConversationsDeleteRequestTypedDict(TypedDict): +class DeleteConversationRequestTypedDict(TypedDict): conversation_id: str r"""ID of the conversation from which we are fetching metadata.""" -class AgentsAPIV1ConversationsDeleteRequest(BaseModel): +class DeleteConversationRequest(BaseModel): conversation_id: Annotated[ str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) ] diff --git a/src/mistralai/client/models/libraries_documents_reprocess_v1op.py b/src/mistralai/client/models/deletedocumentop.py similarity index 82% rename from src/mistralai/client/models/libraries_documents_reprocess_v1op.py rename to src/mistralai/client/models/deletedocumentop.py index 8aee7552..400070a4 100644 --- a/src/mistralai/client/models/libraries_documents_reprocess_v1op.py +++ b/src/mistralai/client/models/deletedocumentop.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 62522db1ccf2 from __future__ import annotations from mistralai.client.types import BaseModel @@ -6,12 +7,12 @@ from typing_extensions import Annotated, TypedDict -class LibrariesDocumentsReprocessV1RequestTypedDict(TypedDict): +class DeleteDocumentRequestTypedDict(TypedDict): library_id: str document_id: str -class LibrariesDocumentsReprocessV1Request(BaseModel): +class DeleteDocumentRequest(BaseModel): library_id: Annotated[ str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) ] diff --git a/src/mistralai/client/models/files_api_routes_delete_fileop.py b/src/mistralai/client/models/deletefileop.py similarity index 78% rename from src/mistralai/client/models/files_api_routes_delete_fileop.py rename to src/mistralai/client/models/deletefileop.py index b7174866..4feb7812 100644 --- a/src/mistralai/client/models/files_api_routes_delete_fileop.py +++ b/src/mistralai/client/models/deletefileop.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 286b4e583638 from __future__ import annotations from mistralai.client.types import BaseModel @@ -6,11 +7,11 @@ from typing_extensions import Annotated, TypedDict -class FilesAPIRoutesDeleteFileRequestTypedDict(TypedDict): +class DeleteFileRequestTypedDict(TypedDict): file_id: str -class FilesAPIRoutesDeleteFileRequest(BaseModel): +class DeleteFileRequest(BaseModel): file_id: Annotated[ str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) ] diff --git a/src/mistralai/client/models/deletefileout.py b/src/mistralai/client/models/deletefileout.py index b25538be..c721f32c 100644 --- a/src/mistralai/client/models/deletefileout.py +++ b/src/mistralai/client/models/deletefileout.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 5578701e7327 from __future__ import annotations from mistralai.client.types import BaseModel diff --git a/src/mistralai/client/models/libraries_share_delete_v1op.py b/src/mistralai/client/models/deletelibraryaccessop.py similarity index 83% rename from src/mistralai/client/models/libraries_share_delete_v1op.py rename to src/mistralai/client/models/deletelibraryaccessop.py index 620527d5..ca14c3ff 100644 --- a/src/mistralai/client/models/libraries_share_delete_v1op.py +++ b/src/mistralai/client/models/deletelibraryaccessop.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: df80945bcf19 from __future__ import annotations from .sharingdelete import SharingDelete, SharingDeleteTypedDict @@ -7,12 +8,12 @@ from typing_extensions import Annotated, TypedDict -class LibrariesShareDeleteV1RequestTypedDict(TypedDict): +class DeleteLibraryAccessRequestTypedDict(TypedDict): library_id: str sharing_delete: SharingDeleteTypedDict -class LibrariesShareDeleteV1Request(BaseModel): +class DeleteLibraryAccessRequest(BaseModel): library_id: Annotated[ str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) ] diff --git a/src/mistralai/client/models/libraries_get_v1op.py b/src/mistralai/client/models/deletelibraryop.py similarity index 77% rename from src/mistralai/client/models/libraries_get_v1op.py rename to src/mistralai/client/models/deletelibraryop.py index 83ae377d..5eb6fc31 100644 --- a/src/mistralai/client/models/libraries_get_v1op.py +++ b/src/mistralai/client/models/deletelibraryop.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: cd0ce9bf8d51 from __future__ import annotations from mistralai.client.types import BaseModel @@ -6,11 +7,11 @@ from typing_extensions import Annotated, TypedDict -class LibrariesGetV1RequestTypedDict(TypedDict): +class DeleteLibraryRequestTypedDict(TypedDict): library_id: str -class LibrariesGetV1Request(BaseModel): +class DeleteLibraryRequest(BaseModel): library_id: Annotated[ str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) ] diff --git a/src/mistralai/client/models/delete_model_v1_models_model_id_deleteop.py b/src/mistralai/client/models/deletemodelop.py similarity index 79% rename from src/mistralai/client/models/delete_model_v1_models_model_id_deleteop.py rename to src/mistralai/client/models/deletemodelop.py index 1cd36128..55c4b242 100644 --- a/src/mistralai/client/models/delete_model_v1_models_model_id_deleteop.py +++ b/src/mistralai/client/models/deletemodelop.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 2c494d99a44d from __future__ import annotations from mistralai.client.types import BaseModel @@ -6,12 +7,12 @@ from typing_extensions import Annotated, TypedDict -class DeleteModelV1ModelsModelIDDeleteRequestTypedDict(TypedDict): +class DeleteModelRequestTypedDict(TypedDict): model_id: str r"""The ID of the model to delete.""" -class DeleteModelV1ModelsModelIDDeleteRequest(BaseModel): +class DeleteModelRequest(BaseModel): model_id: Annotated[ str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) ] diff --git a/src/mistralai/client/models/deletemodelout.py b/src/mistralai/client/models/deletemodelout.py index 5aa8b68f..bf22ed17 100644 --- a/src/mistralai/client/models/deletemodelout.py +++ b/src/mistralai/client/models/deletemodelout.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: ef6a1671c739 from __future__ import annotations from mistralai.client.types import BaseModel diff --git a/src/mistralai/client/models/deltamessage.py b/src/mistralai/client/models/deltamessage.py index fc08d62a..fbb8231a 100644 --- a/src/mistralai/client/models/deltamessage.py +++ b/src/mistralai/client/models/deltamessage.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 68f53d67a140 from __future__ import annotations from .contentchunk import ContentChunk, ContentChunkTypedDict diff --git a/src/mistralai/client/models/documentlibrarytool.py b/src/mistralai/client/models/documentlibrarytool.py index 21eab39e..ff0f7393 100644 --- a/src/mistralai/client/models/documentlibrarytool.py +++ b/src/mistralai/client/models/documentlibrarytool.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 3eb3c218f457 from __future__ import annotations from mistralai.client.types import BaseModel diff --git a/src/mistralai/client/models/documentout.py b/src/mistralai/client/models/documentout.py index 39d0aa2a..3b1a5713 100644 --- a/src/mistralai/client/models/documentout.py +++ b/src/mistralai/client/models/documentout.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 7a85b9dca506 from __future__ import annotations from datetime import datetime diff --git a/src/mistralai/client/models/documenttextcontent.py b/src/mistralai/client/models/documenttextcontent.py index b1c1aa07..b6904cb4 100644 --- a/src/mistralai/client/models/documenttextcontent.py +++ b/src/mistralai/client/models/documenttextcontent.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: e730005e44cb from __future__ import annotations from mistralai.client.types import BaseModel diff --git a/src/mistralai/client/models/documentupdatein.py b/src/mistralai/client/models/documentupdatein.py index 02022b89..669554de 100644 --- a/src/mistralai/client/models/documentupdatein.py +++ b/src/mistralai/client/models/documentupdatein.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: d19c1b26a875 from __future__ import annotations from datetime import datetime diff --git a/src/mistralai/client/models/documenturlchunk.py b/src/mistralai/client/models/documenturlchunk.py index 00eb5535..304cde2b 100644 --- a/src/mistralai/client/models/documenturlchunk.py +++ b/src/mistralai/client/models/documenturlchunk.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 4309807f6048 from __future__ import annotations from mistralai.client.types import ( diff --git a/src/mistralai/client/models/files_api_routes_download_fileop.py b/src/mistralai/client/models/downloadfileop.py similarity index 77% rename from src/mistralai/client/models/files_api_routes_download_fileop.py rename to src/mistralai/client/models/downloadfileop.py index fa9e491a..fcdc01d6 100644 --- a/src/mistralai/client/models/files_api_routes_download_fileop.py +++ b/src/mistralai/client/models/downloadfileop.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 4d051f08057d from __future__ import annotations from mistralai.client.types import BaseModel @@ -6,11 +7,11 @@ from typing_extensions import Annotated, TypedDict -class FilesAPIRoutesDownloadFileRequestTypedDict(TypedDict): +class DownloadFileRequestTypedDict(TypedDict): file_id: str -class FilesAPIRoutesDownloadFileRequest(BaseModel): +class DownloadFileRequest(BaseModel): file_id: Annotated[ str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) ] diff --git a/src/mistralai/client/models/embeddingdtype.py b/src/mistralai/client/models/embeddingdtype.py index 26eee779..732c4ebe 100644 --- a/src/mistralai/client/models/embeddingdtype.py +++ b/src/mistralai/client/models/embeddingdtype.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 77f9526a78df from __future__ import annotations from typing import Literal diff --git a/src/mistralai/client/models/embeddingrequest.py b/src/mistralai/client/models/embeddingrequest.py index 1dfe97c8..f4537ffa 100644 --- a/src/mistralai/client/models/embeddingrequest.py +++ b/src/mistralai/client/models/embeddingrequest.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: eadbe3f9040c from __future__ import annotations from .embeddingdtype import EmbeddingDtype diff --git a/src/mistralai/client/models/embeddingresponse.py b/src/mistralai/client/models/embeddingresponse.py index 64a28ea9..6ffd6894 100644 --- a/src/mistralai/client/models/embeddingresponse.py +++ b/src/mistralai/client/models/embeddingresponse.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: f7d790e84b65 from __future__ import annotations from .embeddingresponsedata import EmbeddingResponseData, EmbeddingResponseDataTypedDict diff --git a/src/mistralai/client/models/embeddingresponsedata.py b/src/mistralai/client/models/embeddingresponsedata.py index ebd0bf7b..a689b290 100644 --- a/src/mistralai/client/models/embeddingresponsedata.py +++ b/src/mistralai/client/models/embeddingresponsedata.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 6d6ead6f3803 from __future__ import annotations from mistralai.client.types import BaseModel diff --git a/src/mistralai/client/models/encodingformat.py b/src/mistralai/client/models/encodingformat.py index be6c1a14..4a39d029 100644 --- a/src/mistralai/client/models/encodingformat.py +++ b/src/mistralai/client/models/encodingformat.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: b51ec296cc92 from __future__ import annotations from typing import Literal diff --git a/src/mistralai/client/models/entitytype.py b/src/mistralai/client/models/entitytype.py index 9c16f4a1..56d82cbe 100644 --- a/src/mistralai/client/models/entitytype.py +++ b/src/mistralai/client/models/entitytype.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 62d6a6a13288 from __future__ import annotations from mistralai.client.types import UnrecognizedStr diff --git a/src/mistralai/client/models/eventout.py b/src/mistralai/client/models/eventout.py index 5e118d45..a0247555 100644 --- a/src/mistralai/client/models/eventout.py +++ b/src/mistralai/client/models/eventout.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: da8ad645a9cb from __future__ import annotations from mistralai.client.types import ( diff --git a/src/mistralai/client/models/file.py b/src/mistralai/client/models/file.py index a8bbc6fa..dbbc00b5 100644 --- a/src/mistralai/client/models/file.py +++ b/src/mistralai/client/models/file.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: f972c39edfcf from __future__ import annotations import io diff --git a/src/mistralai/client/models/filechunk.py b/src/mistralai/client/models/filechunk.py index d8b96f69..43ef22f8 100644 --- a/src/mistralai/client/models/filechunk.py +++ b/src/mistralai/client/models/filechunk.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: ff3c2d33ab1e from __future__ import annotations from mistralai.client.types import BaseModel diff --git a/src/mistralai/client/models/filepurpose.py b/src/mistralai/client/models/filepurpose.py index eef1b089..49a5568f 100644 --- a/src/mistralai/client/models/filepurpose.py +++ b/src/mistralai/client/models/filepurpose.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: a11e7f9f2d45 from __future__ import annotations from mistralai.client.types import UnrecognizedStr diff --git a/src/mistralai/client/models/fileschema.py b/src/mistralai/client/models/fileschema.py index 9ecde454..cbe9b0d1 100644 --- a/src/mistralai/client/models/fileschema.py +++ b/src/mistralai/client/models/fileschema.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 19cde41ca32a from __future__ import annotations from .filepurpose import FilePurpose diff --git a/src/mistralai/client/models/filesignedurl.py b/src/mistralai/client/models/filesignedurl.py index cbca9847..53dff812 100644 --- a/src/mistralai/client/models/filesignedurl.py +++ b/src/mistralai/client/models/filesignedurl.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: a1754c725163 from __future__ import annotations from mistralai.client.types import BaseModel diff --git a/src/mistralai/client/models/fimcompletionrequest.py b/src/mistralai/client/models/fimcompletionrequest.py index c9eca0af..e2f60327 100644 --- a/src/mistralai/client/models/fimcompletionrequest.py +++ b/src/mistralai/client/models/fimcompletionrequest.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: cf3558adc3ab from __future__ import annotations from mistralai.client.types import ( diff --git a/src/mistralai/client/models/fimcompletionresponse.py b/src/mistralai/client/models/fimcompletionresponse.py index 8a2eda0c..1345a116 100644 --- a/src/mistralai/client/models/fimcompletionresponse.py +++ b/src/mistralai/client/models/fimcompletionresponse.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: b860d2ba771e from __future__ import annotations from .chatcompletionchoice import ChatCompletionChoice, ChatCompletionChoiceTypedDict diff --git a/src/mistralai/client/models/fimcompletionstreamrequest.py b/src/mistralai/client/models/fimcompletionstreamrequest.py index 29543802..480ed17a 100644 --- a/src/mistralai/client/models/fimcompletionstreamrequest.py +++ b/src/mistralai/client/models/fimcompletionstreamrequest.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 1d1ee09f1913 from __future__ import annotations from mistralai.client.types import ( diff --git a/src/mistralai/client/models/finetuneablemodeltype.py b/src/mistralai/client/models/finetuneablemodeltype.py index f5b8b2ed..7b924bd7 100644 --- a/src/mistralai/client/models/finetuneablemodeltype.py +++ b/src/mistralai/client/models/finetuneablemodeltype.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 05e097395df3 from __future__ import annotations from typing import Literal diff --git a/src/mistralai/client/models/ftclassifierlossfunction.py b/src/mistralai/client/models/ftclassifierlossfunction.py index e6781a5e..ccb0f21b 100644 --- a/src/mistralai/client/models/ftclassifierlossfunction.py +++ b/src/mistralai/client/models/ftclassifierlossfunction.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: d21e2a36ab1f from __future__ import annotations from mistralai.client.types import UnrecognizedStr diff --git a/src/mistralai/client/models/ftmodelcapabilitiesout.py b/src/mistralai/client/models/ftmodelcapabilitiesout.py index be31aa3c..42269b78 100644 --- a/src/mistralai/client/models/ftmodelcapabilitiesout.py +++ b/src/mistralai/client/models/ftmodelcapabilitiesout.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: f70517be97d4 from __future__ import annotations from mistralai.client.types import BaseModel diff --git a/src/mistralai/client/models/ftmodelcard.py b/src/mistralai/client/models/ftmodelcard.py index 06f088ec..570e95e2 100644 --- a/src/mistralai/client/models/ftmodelcard.py +++ b/src/mistralai/client/models/ftmodelcard.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: c4f15eed2ca2 from __future__ import annotations from .modelcapabilities import ModelCapabilities, ModelCapabilitiesTypedDict diff --git a/src/mistralai/client/models/function.py b/src/mistralai/client/models/function.py index 6e2b52ed..3632c1af 100644 --- a/src/mistralai/client/models/function.py +++ b/src/mistralai/client/models/function.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 32275a9d8fee from __future__ import annotations from mistralai.client.types import BaseModel diff --git a/src/mistralai/client/models/functioncall.py b/src/mistralai/client/models/functioncall.py index 6cb6f26e..527c3ad4 100644 --- a/src/mistralai/client/models/functioncall.py +++ b/src/mistralai/client/models/functioncall.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 393fca552632 from __future__ import annotations from mistralai.client.types import BaseModel diff --git a/src/mistralai/client/models/functioncallentry.py b/src/mistralai/client/models/functioncallentry.py index fce4d387..6ada1d35 100644 --- a/src/mistralai/client/models/functioncallentry.py +++ b/src/mistralai/client/models/functioncallentry.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: cd058446c0aa from __future__ import annotations from .functioncallentryarguments import ( diff --git a/src/mistralai/client/models/functioncallentryarguments.py b/src/mistralai/client/models/functioncallentryarguments.py index ac9e6227..afe81b24 100644 --- a/src/mistralai/client/models/functioncallentryarguments.py +++ b/src/mistralai/client/models/functioncallentryarguments.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 3df3767a7b93 from __future__ import annotations from typing import Any, Dict, Union diff --git a/src/mistralai/client/models/functioncallevent.py b/src/mistralai/client/models/functioncallevent.py index 8146fa5c..5d871a0e 100644 --- a/src/mistralai/client/models/functioncallevent.py +++ b/src/mistralai/client/models/functioncallevent.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 23b120b8f122 from __future__ import annotations from datetime import datetime diff --git a/src/mistralai/client/models/functionname.py b/src/mistralai/client/models/functionname.py index 2a05c1de..07d98a0e 100644 --- a/src/mistralai/client/models/functionname.py +++ b/src/mistralai/client/models/functionname.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 000acafdb0c0 from __future__ import annotations from mistralai.client.types import BaseModel diff --git a/src/mistralai/client/models/functionresultentry.py b/src/mistralai/client/models/functionresultentry.py index a843bf9b..ca73cbb7 100644 --- a/src/mistralai/client/models/functionresultentry.py +++ b/src/mistralai/client/models/functionresultentry.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 213df39bd5e6 from __future__ import annotations from datetime import datetime diff --git a/src/mistralai/client/models/functiontool.py b/src/mistralai/client/models/functiontool.py index 16abcbf3..13b04496 100644 --- a/src/mistralai/client/models/functiontool.py +++ b/src/mistralai/client/models/functiontool.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 2e9ef5800117 from __future__ import annotations from .function import Function, FunctionTypedDict diff --git a/src/mistralai/client/models/agents_api_v1_agents_getop.py b/src/mistralai/client/models/getagentop.py similarity index 77% rename from src/mistralai/client/models/agents_api_v1_agents_getop.py rename to src/mistralai/client/models/getagentop.py index 57abff76..55d8fe68 100644 --- a/src/mistralai/client/models/agents_api_v1_agents_getop.py +++ b/src/mistralai/client/models/getagentop.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 5a28bb1e727e from __future__ import annotations from mistralai.client.types import ( @@ -14,28 +15,26 @@ from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict -AgentsAPIV1AgentsGetAgentVersionTypedDict = TypeAliasType( - "AgentsAPIV1AgentsGetAgentVersionTypedDict", Union[int, str] +GetAgentAgentVersionTypedDict = TypeAliasType( + "GetAgentAgentVersionTypedDict", Union[int, str] ) -AgentsAPIV1AgentsGetAgentVersion = TypeAliasType( - "AgentsAPIV1AgentsGetAgentVersion", Union[int, str] -) +GetAgentAgentVersion = TypeAliasType("GetAgentAgentVersion", Union[int, str]) -class AgentsAPIV1AgentsGetRequestTypedDict(TypedDict): +class GetAgentRequestTypedDict(TypedDict): agent_id: str - agent_version: NotRequired[Nullable[AgentsAPIV1AgentsGetAgentVersionTypedDict]] + agent_version: NotRequired[Nullable[GetAgentAgentVersionTypedDict]] -class AgentsAPIV1AgentsGetRequest(BaseModel): +class GetAgentRequest(BaseModel): agent_id: Annotated[ str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) ] agent_version: Annotated[ - OptionalNullable[AgentsAPIV1AgentsGetAgentVersion], + OptionalNullable[GetAgentAgentVersion], FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), ] = UNSET diff --git a/src/mistralai/client/models/agents_api_v1_agents_get_versionop.py b/src/mistralai/client/models/getagentversionop.py similarity index 81% rename from src/mistralai/client/models/agents_api_v1_agents_get_versionop.py rename to src/mistralai/client/models/getagentversionop.py index edcccda1..77b8a266 100644 --- a/src/mistralai/client/models/agents_api_v1_agents_get_versionop.py +++ b/src/mistralai/client/models/getagentversionop.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: a0db5a6aab1f from __future__ import annotations from mistralai.client.types import BaseModel @@ -6,12 +7,12 @@ from typing_extensions import Annotated, TypedDict -class AgentsAPIV1AgentsGetVersionRequestTypedDict(TypedDict): +class GetAgentVersionRequestTypedDict(TypedDict): agent_id: str version: str -class AgentsAPIV1AgentsGetVersionRequest(BaseModel): +class GetAgentVersionRequest(BaseModel): agent_id: Annotated[ str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) ] diff --git a/src/mistralai/client/models/jobs_api_routes_batch_get_batch_jobop.py b/src/mistralai/client/models/getbatchjobop.py similarity index 93% rename from src/mistralai/client/models/jobs_api_routes_batch_get_batch_jobop.py rename to src/mistralai/client/models/getbatchjobop.py index 32e34281..792c3e21 100644 --- a/src/mistralai/client/models/jobs_api_routes_batch_get_batch_jobop.py +++ b/src/mistralai/client/models/getbatchjobop.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 443103fe3b88 from __future__ import annotations from mistralai.client.types import ( @@ -13,12 +14,12 @@ from typing_extensions import Annotated, NotRequired, TypedDict -class JobsAPIRoutesBatchGetBatchJobRequestTypedDict(TypedDict): +class GetBatchJobRequestTypedDict(TypedDict): job_id: str inline: NotRequired[Nullable[bool]] -class JobsAPIRoutesBatchGetBatchJobRequest(BaseModel): +class GetBatchJobRequest(BaseModel): job_id: Annotated[ str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) ] diff --git a/src/mistralai/client/models/agents_api_v1_conversations_historyop.py b/src/mistralai/client/models/getconversationhistoryop.py similarity index 80% rename from src/mistralai/client/models/agents_api_v1_conversations_historyop.py rename to src/mistralai/client/models/getconversationhistoryop.py index ba1f8890..c1fbf3de 100644 --- a/src/mistralai/client/models/agents_api_v1_conversations_historyop.py +++ b/src/mistralai/client/models/getconversationhistoryop.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: c863a4cbeb34 from __future__ import annotations from mistralai.client.types import BaseModel @@ -6,12 +7,12 @@ from typing_extensions import Annotated, TypedDict -class AgentsAPIV1ConversationsHistoryRequestTypedDict(TypedDict): +class GetConversationHistoryRequestTypedDict(TypedDict): conversation_id: str r"""ID of the conversation from which we are fetching entries.""" -class AgentsAPIV1ConversationsHistoryRequest(BaseModel): +class GetConversationHistoryRequest(BaseModel): conversation_id: Annotated[ str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) ] diff --git a/src/mistralai/client/models/agents_api_v1_conversations_messagesop.py b/src/mistralai/client/models/getconversationmessagesop.py similarity index 80% rename from src/mistralai/client/models/agents_api_v1_conversations_messagesop.py rename to src/mistralai/client/models/getconversationmessagesop.py index e05728f2..6666198e 100644 --- a/src/mistralai/client/models/agents_api_v1_conversations_messagesop.py +++ b/src/mistralai/client/models/getconversationmessagesop.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: bb8a90ba7c22 from __future__ import annotations from mistralai.client.types import BaseModel @@ -6,12 +7,12 @@ from typing_extensions import Annotated, TypedDict -class AgentsAPIV1ConversationsMessagesRequestTypedDict(TypedDict): +class GetConversationMessagesRequestTypedDict(TypedDict): conversation_id: str r"""ID of the conversation from which we are fetching messages.""" -class AgentsAPIV1ConversationsMessagesRequest(BaseModel): +class GetConversationMessagesRequest(BaseModel): conversation_id: Annotated[ str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) ] diff --git a/src/mistralai/client/models/agents_api_v1_conversations_getop.py b/src/mistralai/client/models/getconversationop.py similarity index 90% rename from src/mistralai/client/models/agents_api_v1_conversations_getop.py rename to src/mistralai/client/models/getconversationop.py index 7308708e..d204d175 100644 --- a/src/mistralai/client/models/agents_api_v1_conversations_getop.py +++ b/src/mistralai/client/models/getconversationop.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 1a622b8337ac from __future__ import annotations from .agentconversation import AgentConversation, AgentConversationTypedDict @@ -9,12 +10,12 @@ from typing_extensions import Annotated, TypeAliasType, TypedDict -class AgentsAPIV1ConversationsGetRequestTypedDict(TypedDict): +class GetConversationRequestTypedDict(TypedDict): conversation_id: str r"""ID of the conversation from which we are fetching metadata.""" -class AgentsAPIV1ConversationsGetRequest(BaseModel): +class GetConversationRequest(BaseModel): conversation_id: Annotated[ str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) ] diff --git a/src/mistralai/client/models/libraries_documents_get_extracted_text_signed_url_v1op.py b/src/mistralai/client/models/getdocumentextractedtextsignedurlop.py similarity index 77% rename from src/mistralai/client/models/libraries_documents_get_extracted_text_signed_url_v1op.py rename to src/mistralai/client/models/getdocumentextractedtextsignedurlop.py index 24ed897d..9a71181d 100644 --- a/src/mistralai/client/models/libraries_documents_get_extracted_text_signed_url_v1op.py +++ b/src/mistralai/client/models/getdocumentextractedtextsignedurlop.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 69099395d631 from __future__ import annotations from mistralai.client.types import BaseModel @@ -6,12 +7,12 @@ from typing_extensions import Annotated, TypedDict -class LibrariesDocumentsGetExtractedTextSignedURLV1RequestTypedDict(TypedDict): +class GetDocumentExtractedTextSignedURLRequestTypedDict(TypedDict): library_id: str document_id: str -class LibrariesDocumentsGetExtractedTextSignedURLV1Request(BaseModel): +class GetDocumentExtractedTextSignedURLRequest(BaseModel): library_id: Annotated[ str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) ] diff --git a/src/mistralai/client/models/libraries_documents_delete_v1op.py b/src/mistralai/client/models/getdocumentop.py similarity index 82% rename from src/mistralai/client/models/libraries_documents_delete_v1op.py rename to src/mistralai/client/models/getdocumentop.py index bc5ec6e5..d7b07db7 100644 --- a/src/mistralai/client/models/libraries_documents_delete_v1op.py +++ b/src/mistralai/client/models/getdocumentop.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: de89ff93d373 from __future__ import annotations from mistralai.client.types import BaseModel @@ -6,12 +7,12 @@ from typing_extensions import Annotated, TypedDict -class LibrariesDocumentsDeleteV1RequestTypedDict(TypedDict): +class GetDocumentRequestTypedDict(TypedDict): library_id: str document_id: str -class LibrariesDocumentsDeleteV1Request(BaseModel): +class GetDocumentRequest(BaseModel): library_id: Annotated[ str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) ] diff --git a/src/mistralai/client/models/libraries_documents_get_status_v1op.py b/src/mistralai/client/models/getdocumentsignedurlop.py similarity index 80% rename from src/mistralai/client/models/libraries_documents_get_status_v1op.py rename to src/mistralai/client/models/getdocumentsignedurlop.py index 92b077d3..e5d56c54 100644 --- a/src/mistralai/client/models/libraries_documents_get_status_v1op.py +++ b/src/mistralai/client/models/getdocumentsignedurlop.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: b8d95511c6d1 from __future__ import annotations from mistralai.client.types import BaseModel @@ -6,12 +7,12 @@ from typing_extensions import Annotated, TypedDict -class LibrariesDocumentsGetStatusV1RequestTypedDict(TypedDict): +class GetDocumentSignedURLRequestTypedDict(TypedDict): library_id: str document_id: str -class LibrariesDocumentsGetStatusV1Request(BaseModel): +class GetDocumentSignedURLRequest(BaseModel): library_id: Annotated[ str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) ] diff --git a/src/mistralai/client/models/libraries_documents_get_signed_url_v1op.py b/src/mistralai/client/models/getdocumentstatusop.py similarity index 81% rename from src/mistralai/client/models/libraries_documents_get_signed_url_v1op.py rename to src/mistralai/client/models/getdocumentstatusop.py index 350c8e73..4206f593 100644 --- a/src/mistralai/client/models/libraries_documents_get_signed_url_v1op.py +++ b/src/mistralai/client/models/getdocumentstatusop.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: f1f40b8f003f from __future__ import annotations from mistralai.client.types import BaseModel @@ -6,12 +7,12 @@ from typing_extensions import Annotated, TypedDict -class LibrariesDocumentsGetSignedURLV1RequestTypedDict(TypedDict): +class GetDocumentStatusRequestTypedDict(TypedDict): library_id: str document_id: str -class LibrariesDocumentsGetSignedURLV1Request(BaseModel): +class GetDocumentStatusRequest(BaseModel): library_id: Annotated[ str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) ] diff --git a/src/mistralai/client/models/libraries_documents_get_text_content_v1op.py b/src/mistralai/client/models/getdocumenttextcontentop.py similarity index 80% rename from src/mistralai/client/models/libraries_documents_get_text_content_v1op.py rename to src/mistralai/client/models/getdocumenttextcontentop.py index 68f9725a..8a7b4aae 100644 --- a/src/mistralai/client/models/libraries_documents_get_text_content_v1op.py +++ b/src/mistralai/client/models/getdocumenttextcontentop.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: ba23717093ef from __future__ import annotations from mistralai.client.types import BaseModel @@ -6,12 +7,12 @@ from typing_extensions import Annotated, TypedDict -class LibrariesDocumentsGetTextContentV1RequestTypedDict(TypedDict): +class GetDocumentTextContentRequestTypedDict(TypedDict): library_id: str document_id: str -class LibrariesDocumentsGetTextContentV1Request(BaseModel): +class GetDocumentTextContentRequest(BaseModel): library_id: Annotated[ str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) ] diff --git a/src/mistralai/client/models/files_api_routes_get_signed_urlop.py b/src/mistralai/client/models/getfilesignedurlop.py similarity index 86% rename from src/mistralai/client/models/files_api_routes_get_signed_urlop.py rename to src/mistralai/client/models/getfilesignedurlop.py index a05f8262..06ed79ee 100644 --- a/src/mistralai/client/models/files_api_routes_get_signed_urlop.py +++ b/src/mistralai/client/models/getfilesignedurlop.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 1aa50b81c8cf from __future__ import annotations from mistralai.client.types import BaseModel @@ -7,13 +8,13 @@ from typing_extensions import Annotated, NotRequired, TypedDict -class FilesAPIRoutesGetSignedURLRequestTypedDict(TypedDict): +class GetFileSignedURLRequestTypedDict(TypedDict): file_id: str expiry: NotRequired[int] r"""Number of hours before the url becomes invalid. Defaults to 24h""" -class FilesAPIRoutesGetSignedURLRequest(BaseModel): +class GetFileSignedURLRequest(BaseModel): file_id: Annotated[ str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) ] diff --git a/src/mistralai/client/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobop.py b/src/mistralai/client/models/getfinetuningjobop.py similarity index 74% rename from src/mistralai/client/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobop.py rename to src/mistralai/client/models/getfinetuningjobop.py index 8837d262..1fb732f4 100644 --- a/src/mistralai/client/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobop.py +++ b/src/mistralai/client/models/getfinetuningjobop.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: afe997f96d69 from __future__ import annotations from .classifierdetailedjobout import ( @@ -16,26 +17,26 @@ from typing_extensions import Annotated, TypeAliasType, TypedDict -class JobsAPIRoutesFineTuningGetFineTuningJobRequestTypedDict(TypedDict): +class GetFineTuningJobRequestTypedDict(TypedDict): job_id: str r"""The ID of the job to analyse.""" -class JobsAPIRoutesFineTuningGetFineTuningJobRequest(BaseModel): +class GetFineTuningJobRequest(BaseModel): job_id: Annotated[ str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) ] r"""The ID of the job to analyse.""" -JobsAPIRoutesFineTuningGetFineTuningJobResponseTypedDict = TypeAliasType( - "JobsAPIRoutesFineTuningGetFineTuningJobResponseTypedDict", +GetFineTuningJobResponseTypedDict = TypeAliasType( + "GetFineTuningJobResponseTypedDict", Union[CompletionDetailedJobOutTypedDict, ClassifierDetailedJobOutTypedDict], ) r"""OK""" -JobsAPIRoutesFineTuningGetFineTuningJobResponse = Annotated[ +GetFineTuningJobResponse = Annotated[ Union[ClassifierDetailedJobOut, CompletionDetailedJobOut], Field(discriminator="JOB_TYPE"), ] diff --git a/src/mistralai/client/models/libraries_delete_v1op.py b/src/mistralai/client/models/getlibraryop.py similarity index 78% rename from src/mistralai/client/models/libraries_delete_v1op.py rename to src/mistralai/client/models/getlibraryop.py index fa447de0..bc0b4a23 100644 --- a/src/mistralai/client/models/libraries_delete_v1op.py +++ b/src/mistralai/client/models/getlibraryop.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: c84a92e23a90 from __future__ import annotations from mistralai.client.types import BaseModel @@ -6,11 +7,11 @@ from typing_extensions import Annotated, TypedDict -class LibrariesDeleteV1RequestTypedDict(TypedDict): +class GetLibraryRequestTypedDict(TypedDict): library_id: str -class LibrariesDeleteV1Request(BaseModel): +class GetLibraryRequest(BaseModel): library_id: Annotated[ str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) ] diff --git a/src/mistralai/client/models/githubrepositoryin.py b/src/mistralai/client/models/githubrepositoryin.py index 4e4b4777..e55389c3 100644 --- a/src/mistralai/client/models/githubrepositoryin.py +++ b/src/mistralai/client/models/githubrepositoryin.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: eef26fbd2876 from __future__ import annotations from mistralai.client.types import ( diff --git a/src/mistralai/client/models/githubrepositoryout.py b/src/mistralai/client/models/githubrepositoryout.py index 1f738708..514df01c 100644 --- a/src/mistralai/client/models/githubrepositoryout.py +++ b/src/mistralai/client/models/githubrepositoryout.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: d2434a167623 from __future__ import annotations from mistralai.client.types import ( diff --git a/src/mistralai/client/models/httpvalidationerror.py b/src/mistralai/client/models/httpvalidationerror.py index 34d9b543..e7f0a35b 100644 --- a/src/mistralai/client/models/httpvalidationerror.py +++ b/src/mistralai/client/models/httpvalidationerror.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 4099f568a6f8 from __future__ import annotations from .validationerror import ValidationError diff --git a/src/mistralai/client/models/imagegenerationtool.py b/src/mistralai/client/models/imagegenerationtool.py index c5dbda3f..680c6ce2 100644 --- a/src/mistralai/client/models/imagegenerationtool.py +++ b/src/mistralai/client/models/imagegenerationtool.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: e1532275faa0 from __future__ import annotations from mistralai.client.types import BaseModel diff --git a/src/mistralai/client/models/imageurl.py b/src/mistralai/client/models/imageurl.py index 6e61d1ae..4ff13b1c 100644 --- a/src/mistralai/client/models/imageurl.py +++ b/src/mistralai/client/models/imageurl.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: e4bbf5881fbf from __future__ import annotations from mistralai.client.types import ( diff --git a/src/mistralai/client/models/imageurlchunk.py b/src/mistralai/client/models/imageurlchunk.py index 9968ed74..993185cc 100644 --- a/src/mistralai/client/models/imageurlchunk.py +++ b/src/mistralai/client/models/imageurlchunk.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 746fde62f637 from __future__ import annotations from .imageurl import ImageURL, ImageURLTypedDict diff --git a/src/mistralai/client/models/inputentries.py b/src/mistralai/client/models/inputentries.py index 8ae29837..dc989295 100644 --- a/src/mistralai/client/models/inputentries.py +++ b/src/mistralai/client/models/inputentries.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 44727997dacb from __future__ import annotations from .agenthandoffentry import AgentHandoffEntry, AgentHandoffEntryTypedDict diff --git a/src/mistralai/client/models/inputs.py b/src/mistralai/client/models/inputs.py index 2b8b2f5f..cfcdeb3d 100644 --- a/src/mistralai/client/models/inputs.py +++ b/src/mistralai/client/models/inputs.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 84a8007518c7 from __future__ import annotations from .assistantmessage import AssistantMessage, AssistantMessageTypedDict diff --git a/src/mistralai/client/models/instructrequest.py b/src/mistralai/client/models/instructrequest.py index 73d482d8..e5f9cccf 100644 --- a/src/mistralai/client/models/instructrequest.py +++ b/src/mistralai/client/models/instructrequest.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 6d3ad9f896c7 from __future__ import annotations from .assistantmessage import AssistantMessage, AssistantMessageTypedDict diff --git a/src/mistralai/client/models/jobin.py b/src/mistralai/client/models/jobin.py index 23a431c9..b3cb8998 100644 --- a/src/mistralai/client/models/jobin.py +++ b/src/mistralai/client/models/jobin.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: f4d176123ccc from __future__ import annotations from .classifiertargetin import ClassifierTargetIn, ClassifierTargetInTypedDict @@ -54,7 +55,6 @@ class JobInTypedDict(TypedDict): model: str - r"""The name of the model to fine-tune.""" hyperparameters: HyperparametersTypedDict training_files: NotRequired[List[TrainingFileTypedDict]] validation_files: NotRequired[Nullable[List[str]]] @@ -73,7 +73,6 @@ class JobInTypedDict(TypedDict): class JobIn(BaseModel): model: str - r"""The name of the model to fine-tune.""" hyperparameters: Hyperparameters diff --git a/src/mistralai/client/models/jobmetadataout.py b/src/mistralai/client/models/jobmetadataout.py index f91e30c0..1d386539 100644 --- a/src/mistralai/client/models/jobmetadataout.py +++ b/src/mistralai/client/models/jobmetadataout.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 805f41e3292a from __future__ import annotations from mistralai.client.types import ( diff --git a/src/mistralai/client/models/jobsout.py b/src/mistralai/client/models/jobsout.py index 7727d56c..a4127a5d 100644 --- a/src/mistralai/client/models/jobsout.py +++ b/src/mistralai/client/models/jobsout.py @@ -1,10 +1,14 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 22e91e9631a9 from __future__ import annotations from .classifierjobout import ClassifierJobOut, ClassifierJobOutTypedDict from .completionjobout import CompletionJobOut, CompletionJobOutTypedDict from mistralai.client.types import BaseModel +from mistralai.client.utils import validate_const +import pydantic from pydantic import Field +from pydantic.functional_validators import AfterValidator from typing import List, Literal, Optional, Union from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict @@ -19,13 +23,10 @@ ] -JobsOutObject = Literal["list",] - - class JobsOutTypedDict(TypedDict): total: int data: NotRequired[List[JobsOutDataTypedDict]] - object: NotRequired[JobsOutObject] + object: Literal["list"] class JobsOut(BaseModel): @@ -33,4 +34,7 @@ class JobsOut(BaseModel): data: Optional[List[JobsOutData]] = None - object: Optional[JobsOutObject] = "list" + OBJECT: Annotated[ + Annotated[Optional[Literal["list"]], AfterValidator(validate_const("list"))], + pydantic.Field(alias="object"), + ] = "list" diff --git a/src/mistralai/client/models/jsonschema.py b/src/mistralai/client/models/jsonschema.py index db2fa55b..948c94ed 100644 --- a/src/mistralai/client/models/jsonschema.py +++ b/src/mistralai/client/models/jsonschema.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: e1fc1d8a434a from __future__ import annotations from mistralai.client.types import ( diff --git a/src/mistralai/client/models/legacyjobmetadataout.py b/src/mistralai/client/models/legacyjobmetadataout.py index 155ecea7..4453c157 100644 --- a/src/mistralai/client/models/legacyjobmetadataout.py +++ b/src/mistralai/client/models/legacyjobmetadataout.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 4f44aa38c864 from __future__ import annotations from mistralai.client.types import ( @@ -8,12 +9,12 @@ UNSET, UNSET_SENTINEL, ) +from mistralai.client.utils import validate_const +import pydantic from pydantic import model_serializer +from pydantic.functional_validators import AfterValidator from typing import Literal, Optional -from typing_extensions import NotRequired, TypedDict - - -LegacyJobMetadataOutObject = Literal["job.metadata",] +from typing_extensions import Annotated, NotRequired, TypedDict class LegacyJobMetadataOutTypedDict(TypedDict): @@ -36,7 +37,7 @@ class LegacyJobMetadataOutTypedDict(TypedDict): r"""The number of complete passes through the entire training dataset.""" training_steps: NotRequired[Nullable[int]] r"""The number of training steps to perform. A training step refers to a single update of the model weights during the fine-tuning process. This update is typically calculated using a batch of samples from the training dataset.""" - object: NotRequired[LegacyJobMetadataOutObject] + object: Literal["job.metadata"] class LegacyJobMetadataOut(BaseModel): @@ -70,7 +71,13 @@ class LegacyJobMetadataOut(BaseModel): training_steps: OptionalNullable[int] = UNSET r"""The number of training steps to perform. A training step refers to a single update of the model weights during the fine-tuning process. This update is typically calculated using a batch of samples from the training dataset.""" - object: Optional[LegacyJobMetadataOutObject] = "job.metadata" + OBJECT: Annotated[ + Annotated[ + Optional[Literal["job.metadata"]], + AfterValidator(validate_const("job.metadata")), + ], + pydantic.Field(alias="object"), + ] = "job.metadata" @model_serializer(mode="wrap") def serialize_model(self, handler): diff --git a/src/mistralai/client/models/libraryin.py b/src/mistralai/client/models/libraryin.py index a7b36158..1a71d410 100644 --- a/src/mistralai/client/models/libraryin.py +++ b/src/mistralai/client/models/libraryin.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 6147d5df71d9 from __future__ import annotations from mistralai.client.types import ( diff --git a/src/mistralai/client/models/libraryinupdate.py b/src/mistralai/client/models/libraryinupdate.py index f0241ba1..328b2de3 100644 --- a/src/mistralai/client/models/libraryinupdate.py +++ b/src/mistralai/client/models/libraryinupdate.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 300a6bb02e6e from __future__ import annotations from mistralai.client.types import ( diff --git a/src/mistralai/client/models/libraryout.py b/src/mistralai/client/models/libraryout.py index d1953f16..c7ab7b8d 100644 --- a/src/mistralai/client/models/libraryout.py +++ b/src/mistralai/client/models/libraryout.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 4e608c7aafc4 from __future__ import annotations from datetime import datetime diff --git a/src/mistralai/client/models/agents_api_v1_agents_list_version_aliasesop.py b/src/mistralai/client/models/listagentaliasesop.py similarity index 75% rename from src/mistralai/client/models/agents_api_v1_agents_list_version_aliasesop.py rename to src/mistralai/client/models/listagentaliasesop.py index b9770fff..83c6d176 100644 --- a/src/mistralai/client/models/agents_api_v1_agents_list_version_aliasesop.py +++ b/src/mistralai/client/models/listagentaliasesop.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: ff038766a902 from __future__ import annotations from mistralai.client.types import BaseModel @@ -6,11 +7,11 @@ from typing_extensions import Annotated, TypedDict -class AgentsAPIV1AgentsListVersionAliasesRequestTypedDict(TypedDict): +class ListAgentAliasesRequestTypedDict(TypedDict): agent_id: str -class AgentsAPIV1AgentsListVersionAliasesRequest(BaseModel): +class ListAgentAliasesRequest(BaseModel): agent_id: Annotated[ str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) ] diff --git a/src/mistralai/client/models/agents_api_v1_agents_listop.py b/src/mistralai/client/models/listagentsop.py similarity index 82% rename from src/mistralai/client/models/agents_api_v1_agents_listop.py rename to src/mistralai/client/models/listagentsop.py index 119f5123..863fc13a 100644 --- a/src/mistralai/client/models/agents_api_v1_agents_listop.py +++ b/src/mistralai/client/models/listagentsop.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: a573a873c404 from __future__ import annotations from .requestsource import RequestSource @@ -15,7 +16,7 @@ from typing_extensions import Annotated, NotRequired, TypedDict -class AgentsAPIV1AgentsListRequestTypedDict(TypedDict): +class ListAgentsRequestTypedDict(TypedDict): page: NotRequired[int] r"""Page number (0-indexed)""" page_size: NotRequired[int] @@ -23,11 +24,14 @@ class AgentsAPIV1AgentsListRequestTypedDict(TypedDict): deployment_chat: NotRequired[Nullable[bool]] sources: NotRequired[Nullable[List[RequestSource]]] name: NotRequired[Nullable[str]] + r"""Filter by agent name""" + search: NotRequired[Nullable[str]] + r"""Search agents by name or ID""" id: NotRequired[Nullable[str]] metadata: NotRequired[Nullable[Dict[str, Any]]] -class AgentsAPIV1AgentsListRequest(BaseModel): +class ListAgentsRequest(BaseModel): page: Annotated[ Optional[int], FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), @@ -54,6 +58,13 @@ class AgentsAPIV1AgentsListRequest(BaseModel): OptionalNullable[str], FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), ] = UNSET + r"""Filter by agent name""" + + search: Annotated[ + OptionalNullable[str], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = UNSET + r"""Search agents by name or ID""" id: Annotated[ OptionalNullable[str], @@ -73,10 +84,18 @@ def serialize_model(self, handler): "deployment_chat", "sources", "name", + "search", + "id", + "metadata", + ] + nullable_fields = [ + "deployment_chat", + "sources", + "name", + "search", "id", "metadata", ] - nullable_fields = ["deployment_chat", "sources", "name", "id", "metadata"] null_default_fields = [] serialized = handler(self) diff --git a/src/mistralai/client/models/agents_api_v1_agents_list_versionsop.py b/src/mistralai/client/models/listagentversionsop.py similarity index 88% rename from src/mistralai/client/models/agents_api_v1_agents_list_versionsop.py rename to src/mistralai/client/models/listagentversionsop.py index 813335f9..613d3d85 100644 --- a/src/mistralai/client/models/agents_api_v1_agents_list_versionsop.py +++ b/src/mistralai/client/models/listagentversionsop.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: ccc5fb48e78f from __future__ import annotations from mistralai.client.types import BaseModel @@ -7,7 +8,7 @@ from typing_extensions import Annotated, NotRequired, TypedDict -class AgentsAPIV1AgentsListVersionsRequestTypedDict(TypedDict): +class ListAgentVersionsRequestTypedDict(TypedDict): agent_id: str page: NotRequired[int] r"""Page number (0-indexed)""" @@ -15,7 +16,7 @@ class AgentsAPIV1AgentsListVersionsRequestTypedDict(TypedDict): r"""Number of versions per page""" -class AgentsAPIV1AgentsListVersionsRequest(BaseModel): +class ListAgentVersionsRequest(BaseModel): agent_id: Annotated[ str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) ] diff --git a/src/mistralai/client/models/jobs_api_routes_batch_get_batch_jobsop.py b/src/mistralai/client/models/listbatchjobsop.py similarity index 87% rename from src/mistralai/client/models/jobs_api_routes_batch_get_batch_jobsop.py rename to src/mistralai/client/models/listbatchjobsop.py index 3557e773..5322df81 100644 --- a/src/mistralai/client/models/jobs_api_routes_batch_get_batch_jobsop.py +++ b/src/mistralai/client/models/listbatchjobsop.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: f49af453f5e6 from __future__ import annotations from .batchjobstatus import BatchJobStatus @@ -12,11 +13,17 @@ ) from mistralai.client.utils import FieldMetadata, QueryParamMetadata from pydantic import model_serializer -from typing import Any, Dict, List, Optional +from typing import Any, Dict, List, Literal, Optional from typing_extensions import Annotated, NotRequired, TypedDict -class JobsAPIRoutesBatchGetBatchJobsRequestTypedDict(TypedDict): +OrderBy = Literal[ + "created", + "-created", +] + + +class ListBatchJobsRequestTypedDict(TypedDict): page: NotRequired[int] page_size: NotRequired[int] model: NotRequired[Nullable[str]] @@ -25,9 +32,10 @@ class JobsAPIRoutesBatchGetBatchJobsRequestTypedDict(TypedDict): created_after: NotRequired[Nullable[datetime]] created_by_me: NotRequired[bool] status: NotRequired[Nullable[List[BatchJobStatus]]] + order_by: NotRequired[OrderBy] -class JobsAPIRoutesBatchGetBatchJobsRequest(BaseModel): +class ListBatchJobsRequest(BaseModel): page: Annotated[ Optional[int], FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), @@ -68,6 +76,11 @@ class JobsAPIRoutesBatchGetBatchJobsRequest(BaseModel): FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), ] = UNSET + order_by: Annotated[ + Optional[OrderBy], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = "-created" + @model_serializer(mode="wrap") def serialize_model(self, handler): optional_fields = [ @@ -79,6 +92,7 @@ def serialize_model(self, handler): "created_after", "created_by_me", "status", + "order_by", ] nullable_fields = ["model", "agent_id", "metadata", "created_after", "status"] null_default_fields = [] diff --git a/src/mistralai/client/models/agents_api_v1_conversations_listop.py b/src/mistralai/client/models/listconversationsop.py similarity index 85% rename from src/mistralai/client/models/agents_api_v1_conversations_listop.py rename to src/mistralai/client/models/listconversationsop.py index aae9c74e..1c9a347c 100644 --- a/src/mistralai/client/models/agents_api_v1_conversations_listop.py +++ b/src/mistralai/client/models/listconversationsop.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: d6007f6c1643 from __future__ import annotations from .agentconversation import AgentConversation, AgentConversationTypedDict @@ -16,13 +17,13 @@ from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict -class AgentsAPIV1ConversationsListRequestTypedDict(TypedDict): +class ListConversationsRequestTypedDict(TypedDict): page: NotRequired[int] page_size: NotRequired[int] metadata: NotRequired[Nullable[Dict[str, Any]]] -class AgentsAPIV1ConversationsListRequest(BaseModel): +class ListConversationsRequest(BaseModel): page: Annotated[ Optional[int], FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), @@ -69,12 +70,12 @@ def serialize_model(self, handler): return m -AgentsAPIV1ConversationsListResponseTypedDict = TypeAliasType( - "AgentsAPIV1ConversationsListResponseTypedDict", +ListConversationsResponseTypedDict = TypeAliasType( + "ListConversationsResponseTypedDict", Union[AgentConversationTypedDict, ModelConversationTypedDict], ) -AgentsAPIV1ConversationsListResponse = TypeAliasType( - "AgentsAPIV1ConversationsListResponse", Union[AgentConversation, ModelConversation] +ListConversationsResponse = TypeAliasType( + "ListConversationsResponse", Union[AgentConversation, ModelConversation] ) diff --git a/src/mistralai/client/models/listdocumentout.py b/src/mistralai/client/models/listdocumentout.py index 24969a0f..a636b3de 100644 --- a/src/mistralai/client/models/listdocumentout.py +++ b/src/mistralai/client/models/listdocumentout.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: b2c96075ce00 from __future__ import annotations from .documentout import DocumentOut, DocumentOutTypedDict diff --git a/src/mistralai/client/models/libraries_documents_list_v1op.py b/src/mistralai/client/models/listdocumentsop.py similarity index 95% rename from src/mistralai/client/models/libraries_documents_list_v1op.py rename to src/mistralai/client/models/listdocumentsop.py index 5dec3385..0f7c4584 100644 --- a/src/mistralai/client/models/libraries_documents_list_v1op.py +++ b/src/mistralai/client/models/listdocumentsop.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 3e42bdc15383 from __future__ import annotations from mistralai.client.types import ( @@ -14,7 +15,7 @@ from typing_extensions import Annotated, NotRequired, TypedDict -class LibrariesDocumentsListV1RequestTypedDict(TypedDict): +class ListDocumentsRequestTypedDict(TypedDict): library_id: str search: NotRequired[Nullable[str]] page_size: NotRequired[int] @@ -24,7 +25,7 @@ class LibrariesDocumentsListV1RequestTypedDict(TypedDict): sort_order: NotRequired[str] -class LibrariesDocumentsListV1Request(BaseModel): +class ListDocumentsRequest(BaseModel): library_id: Annotated[ str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) ] diff --git a/src/mistralai/client/models/files_api_routes_list_filesop.py b/src/mistralai/client/models/listfilesop.py similarity index 96% rename from src/mistralai/client/models/files_api_routes_list_filesop.py rename to src/mistralai/client/models/listfilesop.py index ace99631..a9af5c70 100644 --- a/src/mistralai/client/models/files_api_routes_list_filesop.py +++ b/src/mistralai/client/models/listfilesop.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: e5bd46ac0145 from __future__ import annotations from .filepurpose import FilePurpose @@ -17,7 +18,7 @@ from typing_extensions import Annotated, NotRequired, TypedDict -class FilesAPIRoutesListFilesRequestTypedDict(TypedDict): +class ListFilesRequestTypedDict(TypedDict): page: NotRequired[int] page_size: NotRequired[int] include_total: NotRequired[bool] @@ -28,7 +29,7 @@ class FilesAPIRoutesListFilesRequestTypedDict(TypedDict): mimetypes: NotRequired[Nullable[List[str]]] -class FilesAPIRoutesListFilesRequest(BaseModel): +class ListFilesRequest(BaseModel): page: Annotated[ Optional[int], FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), diff --git a/src/mistralai/client/models/listfilesout.py b/src/mistralai/client/models/listfilesout.py index 1db17c40..460822f7 100644 --- a/src/mistralai/client/models/listfilesout.py +++ b/src/mistralai/client/models/listfilesout.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: ae5fa21b141c from __future__ import annotations from .fileschema import FileSchema, FileSchemaTypedDict diff --git a/src/mistralai/client/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobsop.py b/src/mistralai/client/models/listfinetuningjobsop.py similarity index 93% rename from src/mistralai/client/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobsop.py rename to src/mistralai/client/models/listfinetuningjobsop.py index 8c19bacb..8712c3fa 100644 --- a/src/mistralai/client/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobsop.py +++ b/src/mistralai/client/models/listfinetuningjobsop.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: b77fe203b929 from __future__ import annotations from datetime import datetime @@ -15,7 +16,7 @@ from typing_extensions import Annotated, NotRequired, TypedDict -JobsAPIRoutesFineTuningGetFineTuningJobsStatus = Literal[ +ListFineTuningJobsStatus = Literal[ "QUEUED", "STARTED", "VALIDATING", @@ -30,7 +31,7 @@ r"""The current job state to filter on. When set, the other results are not displayed.""" -class JobsAPIRoutesFineTuningGetFineTuningJobsRequestTypedDict(TypedDict): +class ListFineTuningJobsRequestTypedDict(TypedDict): page: NotRequired[int] r"""The page number of the results to be returned.""" page_size: NotRequired[int] @@ -42,7 +43,7 @@ class JobsAPIRoutesFineTuningGetFineTuningJobsRequestTypedDict(TypedDict): created_before: NotRequired[Nullable[datetime]] created_by_me: NotRequired[bool] r"""When set, only return results for jobs created by the API caller. Other results are not displayed.""" - status: NotRequired[Nullable[JobsAPIRoutesFineTuningGetFineTuningJobsStatus]] + status: NotRequired[Nullable[ListFineTuningJobsStatus]] r"""The current job state to filter on. When set, the other results are not displayed.""" wandb_project: NotRequired[Nullable[str]] r"""The Weights and Biases project to filter on. When set, the other results are not displayed.""" @@ -52,7 +53,7 @@ class JobsAPIRoutesFineTuningGetFineTuningJobsRequestTypedDict(TypedDict): r"""The model suffix to filter on. When set, the other results are not displayed.""" -class JobsAPIRoutesFineTuningGetFineTuningJobsRequest(BaseModel): +class ListFineTuningJobsRequest(BaseModel): page: Annotated[ Optional[int], FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), @@ -89,7 +90,7 @@ class JobsAPIRoutesFineTuningGetFineTuningJobsRequest(BaseModel): r"""When set, only return results for jobs created by the API caller. Other results are not displayed.""" status: Annotated[ - OptionalNullable[JobsAPIRoutesFineTuningGetFineTuningJobsStatus], + OptionalNullable[ListFineTuningJobsStatus], FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), ] = UNSET r"""The current job state to filter on. When set, the other results are not displayed.""" diff --git a/src/mistralai/client/models/libraries_share_list_v1op.py b/src/mistralai/client/models/listlibraryaccessesop.py similarity index 76% rename from src/mistralai/client/models/libraries_share_list_v1op.py rename to src/mistralai/client/models/listlibraryaccessesop.py index fd5d9d33..2206310f 100644 --- a/src/mistralai/client/models/libraries_share_list_v1op.py +++ b/src/mistralai/client/models/listlibraryaccessesop.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 581b332626b7 from __future__ import annotations from mistralai.client.types import BaseModel @@ -6,11 +7,11 @@ from typing_extensions import Annotated, TypedDict -class LibrariesShareListV1RequestTypedDict(TypedDict): +class ListLibraryAccessesRequestTypedDict(TypedDict): library_id: str -class LibrariesShareListV1Request(BaseModel): +class ListLibraryAccessesRequest(BaseModel): library_id: Annotated[ str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) ] diff --git a/src/mistralai/client/models/listlibraryout.py b/src/mistralai/client/models/listlibraryout.py index 24aaa1a9..39fa459f 100644 --- a/src/mistralai/client/models/listlibraryout.py +++ b/src/mistralai/client/models/listlibraryout.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: cb78c529e763 from __future__ import annotations from .libraryout import LibraryOut, LibraryOutTypedDict diff --git a/src/mistralai/client/models/listsharingout.py b/src/mistralai/client/models/listsharingout.py index f139813f..443ad0d6 100644 --- a/src/mistralai/client/models/listsharingout.py +++ b/src/mistralai/client/models/listsharingout.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: ee708a7ccdad from __future__ import annotations from .sharingout import SharingOut, SharingOutTypedDict diff --git a/src/mistralai/client/models/messageentries.py b/src/mistralai/client/models/messageentries.py index 9b1706de..a95098e0 100644 --- a/src/mistralai/client/models/messageentries.py +++ b/src/mistralai/client/models/messageentries.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: e13f9009902b from __future__ import annotations from .messageinputentry import MessageInputEntry, MessageInputEntryTypedDict diff --git a/src/mistralai/client/models/messageinputcontentchunks.py b/src/mistralai/client/models/messageinputcontentchunks.py index e90d8aa0..63cf14e7 100644 --- a/src/mistralai/client/models/messageinputcontentchunks.py +++ b/src/mistralai/client/models/messageinputcontentchunks.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 01025c12866a from __future__ import annotations from .documenturlchunk import DocumentURLChunk, DocumentURLChunkTypedDict diff --git a/src/mistralai/client/models/messageinputentry.py b/src/mistralai/client/models/messageinputentry.py index a72319cf..15046d25 100644 --- a/src/mistralai/client/models/messageinputentry.py +++ b/src/mistralai/client/models/messageinputentry.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: c0a4b5179095 from __future__ import annotations from .messageinputcontentchunks import ( diff --git a/src/mistralai/client/models/messageoutputcontentchunks.py b/src/mistralai/client/models/messageoutputcontentchunks.py index 136a7608..def7a4d2 100644 --- a/src/mistralai/client/models/messageoutputcontentchunks.py +++ b/src/mistralai/client/models/messageoutputcontentchunks.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 2ed248515035 from __future__ import annotations from .documenturlchunk import DocumentURLChunk, DocumentURLChunkTypedDict diff --git a/src/mistralai/client/models/messageoutputentry.py b/src/mistralai/client/models/messageoutputentry.py index d52e4e3e..8752fc36 100644 --- a/src/mistralai/client/models/messageoutputentry.py +++ b/src/mistralai/client/models/messageoutputentry.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: a07577d2268d from __future__ import annotations from .messageoutputcontentchunks import ( diff --git a/src/mistralai/client/models/messageoutputevent.py b/src/mistralai/client/models/messageoutputevent.py index 447e3867..39c10139 100644 --- a/src/mistralai/client/models/messageoutputevent.py +++ b/src/mistralai/client/models/messageoutputevent.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: a2bbf63615c6 from __future__ import annotations from .outputcontentchunks import OutputContentChunks, OutputContentChunksTypedDict diff --git a/src/mistralai/client/models/metricout.py b/src/mistralai/client/models/metricout.py index f8027a69..5705c712 100644 --- a/src/mistralai/client/models/metricout.py +++ b/src/mistralai/client/models/metricout.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 92d33621dda7 from __future__ import annotations from mistralai.client.types import ( diff --git a/src/mistralai/client/models/mistralerror.py b/src/mistralai/client/models/mistralerror.py index 28cfd22d..862a6be8 100644 --- a/src/mistralai/client/models/mistralerror.py +++ b/src/mistralai/client/models/mistralerror.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 68ffd8394c2e import httpx from typing import Optional diff --git a/src/mistralai/client/models/mistralpromptmode.py b/src/mistralai/client/models/mistralpromptmode.py index 7008fc05..9b91323e 100644 --- a/src/mistralai/client/models/mistralpromptmode.py +++ b/src/mistralai/client/models/mistralpromptmode.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 95abc4ec799a from __future__ import annotations from mistralai.client.types import UnrecognizedStr diff --git a/src/mistralai/client/models/modelcapabilities.py b/src/mistralai/client/models/modelcapabilities.py index a6db80e7..c329efbc 100644 --- a/src/mistralai/client/models/modelcapabilities.py +++ b/src/mistralai/client/models/modelcapabilities.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 64d8a422ea29 from __future__ import annotations from mistralai.client.types import BaseModel diff --git a/src/mistralai/client/models/modelconversation.py b/src/mistralai/client/models/modelconversation.py index d348072a..c0bacb7f 100644 --- a/src/mistralai/client/models/modelconversation.py +++ b/src/mistralai/client/models/modelconversation.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: fea0a651f888 from __future__ import annotations from .codeinterpretertool import CodeInterpreterTool, CodeInterpreterToolTypedDict diff --git a/src/mistralai/client/models/modellist.py b/src/mistralai/client/models/modellist.py index b357ae84..c122122c 100644 --- a/src/mistralai/client/models/modellist.py +++ b/src/mistralai/client/models/modellist.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 00693c7eec60 from __future__ import annotations from .basemodelcard import BaseModelCard, BaseModelCardTypedDict diff --git a/src/mistralai/client/models/moderationobject.py b/src/mistralai/client/models/moderationobject.py index a6b44b96..9aa4eb15 100644 --- a/src/mistralai/client/models/moderationobject.py +++ b/src/mistralai/client/models/moderationobject.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 132faad0549a from __future__ import annotations from mistralai.client.types import BaseModel diff --git a/src/mistralai/client/models/moderationresponse.py b/src/mistralai/client/models/moderationresponse.py index 288c8d82..a8a8ec3d 100644 --- a/src/mistralai/client/models/moderationresponse.py +++ b/src/mistralai/client/models/moderationresponse.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 06bab279cb31 from __future__ import annotations from .moderationobject import ModerationObject, ModerationObjectTypedDict diff --git a/src/mistralai/client/models/no_response_error.py b/src/mistralai/client/models/no_response_error.py index 1deab64b..7705f194 100644 --- a/src/mistralai/client/models/no_response_error.py +++ b/src/mistralai/client/models/no_response_error.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 2849e0a482e2 from dataclasses import dataclass diff --git a/src/mistralai/client/models/ocrimageobject.py b/src/mistralai/client/models/ocrimageobject.py index e97fa8df..e95b67e1 100644 --- a/src/mistralai/client/models/ocrimageobject.py +++ b/src/mistralai/client/models/ocrimageobject.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 685faeb41a80 from __future__ import annotations from mistralai.client.types import ( diff --git a/src/mistralai/client/models/ocrpagedimensions.py b/src/mistralai/client/models/ocrpagedimensions.py index f4fc11e0..847205c6 100644 --- a/src/mistralai/client/models/ocrpagedimensions.py +++ b/src/mistralai/client/models/ocrpagedimensions.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 02f763afbc9f from __future__ import annotations from mistralai.client.types import BaseModel diff --git a/src/mistralai/client/models/ocrpageobject.py b/src/mistralai/client/models/ocrpageobject.py index f8b43601..4f4ccf43 100644 --- a/src/mistralai/client/models/ocrpageobject.py +++ b/src/mistralai/client/models/ocrpageobject.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 07a099f89487 from __future__ import annotations from .ocrimageobject import OCRImageObject, OCRImageObjectTypedDict diff --git a/src/mistralai/client/models/ocrrequest.py b/src/mistralai/client/models/ocrrequest.py index 03a6028c..18b899dd 100644 --- a/src/mistralai/client/models/ocrrequest.py +++ b/src/mistralai/client/models/ocrrequest.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 36f204c64074 from __future__ import annotations from .documenturlchunk import DocumentURLChunk, DocumentURLChunkTypedDict diff --git a/src/mistralai/client/models/ocrresponse.py b/src/mistralai/client/models/ocrresponse.py index 2813a1ca..0a36e975 100644 --- a/src/mistralai/client/models/ocrresponse.py +++ b/src/mistralai/client/models/ocrresponse.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 2fdfc881ca56 from __future__ import annotations from .ocrpageobject import OCRPageObject, OCRPageObjectTypedDict diff --git a/src/mistralai/client/models/ocrtableobject.py b/src/mistralai/client/models/ocrtableobject.py index f3b0bc45..e32ad894 100644 --- a/src/mistralai/client/models/ocrtableobject.py +++ b/src/mistralai/client/models/ocrtableobject.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: d74dd0d2ddac from __future__ import annotations from mistralai.client.types import BaseModel, UnrecognizedStr diff --git a/src/mistralai/client/models/ocrusageinfo.py b/src/mistralai/client/models/ocrusageinfo.py index 62f07fd4..a421d850 100644 --- a/src/mistralai/client/models/ocrusageinfo.py +++ b/src/mistralai/client/models/ocrusageinfo.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 272b7e1785d5 from __future__ import annotations from mistralai.client.types import ( diff --git a/src/mistralai/client/models/outputcontentchunks.py b/src/mistralai/client/models/outputcontentchunks.py index ad0c087e..1a115fe8 100644 --- a/src/mistralai/client/models/outputcontentchunks.py +++ b/src/mistralai/client/models/outputcontentchunks.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 9ad9741f4975 from __future__ import annotations from .documenturlchunk import DocumentURLChunk, DocumentURLChunkTypedDict diff --git a/src/mistralai/client/models/paginationinfo.py b/src/mistralai/client/models/paginationinfo.py index 0252f448..2b9dab62 100644 --- a/src/mistralai/client/models/paginationinfo.py +++ b/src/mistralai/client/models/paginationinfo.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 48851e82d67e from __future__ import annotations from mistralai.client.types import BaseModel diff --git a/src/mistralai/client/models/prediction.py b/src/mistralai/client/models/prediction.py index f2c5d9c6..52f4adf1 100644 --- a/src/mistralai/client/models/prediction.py +++ b/src/mistralai/client/models/prediction.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 1cc842a069a5 from __future__ import annotations from mistralai.client.types import BaseModel diff --git a/src/mistralai/client/models/processingstatusout.py b/src/mistralai/client/models/processingstatusout.py index 031f386f..3acadcc9 100644 --- a/src/mistralai/client/models/processingstatusout.py +++ b/src/mistralai/client/models/processingstatusout.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 3df842c4140f from __future__ import annotations from mistralai.client.types import BaseModel diff --git a/src/mistralai/client/models/realtimetranscriptionerror.py b/src/mistralai/client/models/realtimetranscriptionerror.py index e6a889de..f8f2d3da 100644 --- a/src/mistralai/client/models/realtimetranscriptionerror.py +++ b/src/mistralai/client/models/realtimetranscriptionerror.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 8c2267378f48 from __future__ import annotations from .realtimetranscriptionerrordetail import ( diff --git a/src/mistralai/client/models/realtimetranscriptionerrordetail.py b/src/mistralai/client/models/realtimetranscriptionerrordetail.py index e1f48379..cec1f6ea 100644 --- a/src/mistralai/client/models/realtimetranscriptionerrordetail.py +++ b/src/mistralai/client/models/realtimetranscriptionerrordetail.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 5bd25cdf9c7a from __future__ import annotations from mistralai.client.types import BaseModel diff --git a/src/mistralai/client/models/realtimetranscriptionsession.py b/src/mistralai/client/models/realtimetranscriptionsession.py index 3a330651..d20d0d8c 100644 --- a/src/mistralai/client/models/realtimetranscriptionsession.py +++ b/src/mistralai/client/models/realtimetranscriptionsession.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 02517fa5411a from __future__ import annotations from .audioformat import AudioFormat, AudioFormatTypedDict diff --git a/src/mistralai/client/models/realtimetranscriptionsessioncreated.py b/src/mistralai/client/models/realtimetranscriptionsessioncreated.py index cc6d5028..c4fa5774 100644 --- a/src/mistralai/client/models/realtimetranscriptionsessioncreated.py +++ b/src/mistralai/client/models/realtimetranscriptionsessioncreated.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 4e3731f63a3c from __future__ import annotations from .realtimetranscriptionsession import ( diff --git a/src/mistralai/client/models/realtimetranscriptionsessionupdated.py b/src/mistralai/client/models/realtimetranscriptionsessionupdated.py index 3da23595..a61fb05e 100644 --- a/src/mistralai/client/models/realtimetranscriptionsessionupdated.py +++ b/src/mistralai/client/models/realtimetranscriptionsessionupdated.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 686dc4f2450f from __future__ import annotations from .realtimetranscriptionsession import ( diff --git a/src/mistralai/client/models/referencechunk.py b/src/mistralai/client/models/referencechunk.py index 4c703b81..7634d8ae 100644 --- a/src/mistralai/client/models/referencechunk.py +++ b/src/mistralai/client/models/referencechunk.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 921acd3a224a from __future__ import annotations from mistralai.client.types import BaseModel diff --git a/src/mistralai/client/models/libraries_documents_get_v1op.py b/src/mistralai/client/models/reprocessdocumentop.py similarity index 81% rename from src/mistralai/client/models/libraries_documents_get_v1op.py rename to src/mistralai/client/models/reprocessdocumentop.py index a67e687e..48a4b72b 100644 --- a/src/mistralai/client/models/libraries_documents_get_v1op.py +++ b/src/mistralai/client/models/reprocessdocumentop.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: b2913a7aa5c9 from __future__ import annotations from mistralai.client.types import BaseModel @@ -6,12 +7,12 @@ from typing_extensions import Annotated, TypedDict -class LibrariesDocumentsGetV1RequestTypedDict(TypedDict): +class ReprocessDocumentRequestTypedDict(TypedDict): library_id: str document_id: str -class LibrariesDocumentsGetV1Request(BaseModel): +class ReprocessDocumentRequest(BaseModel): library_id: Annotated[ str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) ] diff --git a/src/mistralai/client/models/requestsource.py b/src/mistralai/client/models/requestsource.py index 7b0a35c4..fc4433cb 100644 --- a/src/mistralai/client/models/requestsource.py +++ b/src/mistralai/client/models/requestsource.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 3f2774d9e609 from __future__ import annotations from typing import Literal diff --git a/src/mistralai/client/models/responsedoneevent.py b/src/mistralai/client/models/responsedoneevent.py index 283baa11..ed331ff1 100644 --- a/src/mistralai/client/models/responsedoneevent.py +++ b/src/mistralai/client/models/responsedoneevent.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: cf8a686bf82c from __future__ import annotations from .conversationusageinfo import ConversationUsageInfo, ConversationUsageInfoTypedDict diff --git a/src/mistralai/client/models/responseerrorevent.py b/src/mistralai/client/models/responseerrorevent.py index ee078963..8f196a52 100644 --- a/src/mistralai/client/models/responseerrorevent.py +++ b/src/mistralai/client/models/responseerrorevent.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: b286d74e8724 from __future__ import annotations from datetime import datetime diff --git a/src/mistralai/client/models/responseformat.py b/src/mistralai/client/models/responseformat.py index 5899b017..409b80d6 100644 --- a/src/mistralai/client/models/responseformat.py +++ b/src/mistralai/client/models/responseformat.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 6ab8bc8d22c0 from __future__ import annotations from .jsonschema import JSONSchema, JSONSchemaTypedDict diff --git a/src/mistralai/client/models/responseformats.py b/src/mistralai/client/models/responseformats.py index b98cd098..21345778 100644 --- a/src/mistralai/client/models/responseformats.py +++ b/src/mistralai/client/models/responseformats.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: c4462a05fb08 from __future__ import annotations from mistralai.client.types import UnrecognizedStr diff --git a/src/mistralai/client/models/responsestartedevent.py b/src/mistralai/client/models/responsestartedevent.py index 0841fd58..256d2a6c 100644 --- a/src/mistralai/client/models/responsestartedevent.py +++ b/src/mistralai/client/models/responsestartedevent.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 24f54ee8b0f2 from __future__ import annotations from datetime import datetime diff --git a/src/mistralai/client/models/responsevalidationerror.py b/src/mistralai/client/models/responsevalidationerror.py index bab5d0b7..1ed0d552 100644 --- a/src/mistralai/client/models/responsevalidationerror.py +++ b/src/mistralai/client/models/responsevalidationerror.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: c244a88981e0 import httpx from typing import Optional diff --git a/src/mistralai/client/models/agents_api_v1_conversations_restartop.py b/src/mistralai/client/models/restartconversationop.py similarity index 87% rename from src/mistralai/client/models/agents_api_v1_conversations_restartop.py rename to src/mistralai/client/models/restartconversationop.py index 8bce3ce5..b09eaed5 100644 --- a/src/mistralai/client/models/agents_api_v1_conversations_restartop.py +++ b/src/mistralai/client/models/restartconversationop.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 2f6f3e4bbfd8 from __future__ import annotations from .conversationrestartrequest import ( @@ -10,13 +11,13 @@ from typing_extensions import Annotated, TypedDict -class AgentsAPIV1ConversationsRestartRequestTypedDict(TypedDict): +class RestartConversationRequestTypedDict(TypedDict): conversation_id: str r"""ID of the original conversation which is being restarted.""" conversation_restart_request: ConversationRestartRequestTypedDict -class AgentsAPIV1ConversationsRestartRequest(BaseModel): +class RestartConversationRequest(BaseModel): conversation_id: Annotated[ str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) ] diff --git a/src/mistralai/client/models/agents_api_v1_conversations_restart_streamop.py b/src/mistralai/client/models/restartconversationstreamop.py similarity index 87% rename from src/mistralai/client/models/agents_api_v1_conversations_restart_streamop.py rename to src/mistralai/client/models/restartconversationstreamop.py index 9b489ab4..3b2025f5 100644 --- a/src/mistralai/client/models/agents_api_v1_conversations_restart_streamop.py +++ b/src/mistralai/client/models/restartconversationstreamop.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 16dc9ee5bf22 from __future__ import annotations from .conversationrestartstreamrequest import ( @@ -10,13 +11,13 @@ from typing_extensions import Annotated, TypedDict -class AgentsAPIV1ConversationsRestartStreamRequestTypedDict(TypedDict): +class RestartConversationStreamRequestTypedDict(TypedDict): conversation_id: str r"""ID of the original conversation which is being restarted.""" conversation_restart_stream_request: ConversationRestartStreamRequestTypedDict -class AgentsAPIV1ConversationsRestartStreamRequest(BaseModel): +class RestartConversationStreamRequest(BaseModel): conversation_id: Annotated[ str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) ] diff --git a/src/mistralai/client/models/files_api_routes_retrieve_fileop.py b/src/mistralai/client/models/retrievefileop.py similarity index 77% rename from src/mistralai/client/models/files_api_routes_retrieve_fileop.py rename to src/mistralai/client/models/retrievefileop.py index 4a9678e5..edd50e57 100644 --- a/src/mistralai/client/models/files_api_routes_retrieve_fileop.py +++ b/src/mistralai/client/models/retrievefileop.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: ee73efdf9180 from __future__ import annotations from mistralai.client.types import BaseModel @@ -6,11 +7,11 @@ from typing_extensions import Annotated, TypedDict -class FilesAPIRoutesRetrieveFileRequestTypedDict(TypedDict): +class RetrieveFileRequestTypedDict(TypedDict): file_id: str -class FilesAPIRoutesRetrieveFileRequest(BaseModel): +class RetrieveFileRequest(BaseModel): file_id: Annotated[ str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) ] diff --git a/src/mistralai/client/models/retrievefileout.py b/src/mistralai/client/models/retrievefileout.py index ffd0617a..2abf2161 100644 --- a/src/mistralai/client/models/retrievefileout.py +++ b/src/mistralai/client/models/retrievefileout.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 8bb5859aa0d0 from __future__ import annotations from .filepurpose import FilePurpose diff --git a/src/mistralai/client/models/retrieve_model_v1_models_model_id_getop.py b/src/mistralai/client/models/retrievemodelop.py similarity index 89% rename from src/mistralai/client/models/retrieve_model_v1_models_model_id_getop.py rename to src/mistralai/client/models/retrievemodelop.py index 96e5b57f..b4334e9a 100644 --- a/src/mistralai/client/models/retrieve_model_v1_models_model_id_getop.py +++ b/src/mistralai/client/models/retrievemodelop.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: d883baa79c9e from __future__ import annotations from .basemodelcard import BaseModelCard, BaseModelCardTypedDict @@ -10,12 +11,12 @@ from typing_extensions import Annotated, TypeAliasType, TypedDict -class RetrieveModelV1ModelsModelIDGetRequestTypedDict(TypedDict): +class RetrieveModelRequestTypedDict(TypedDict): model_id: str r"""The ID of the model to retrieve.""" -class RetrieveModelV1ModelsModelIDGetRequest(BaseModel): +class RetrieveModelRequest(BaseModel): model_id: Annotated[ str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) ] diff --git a/src/mistralai/client/models/sampletype.py b/src/mistralai/client/models/sampletype.py index e0727b02..dfec7cce 100644 --- a/src/mistralai/client/models/sampletype.py +++ b/src/mistralai/client/models/sampletype.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: a9309422fed7 from __future__ import annotations from mistralai.client.types import UnrecognizedStr diff --git a/src/mistralai/client/models/sdkerror.py b/src/mistralai/client/models/sdkerror.py index ceb03c48..101e1e6a 100644 --- a/src/mistralai/client/models/sdkerror.py +++ b/src/mistralai/client/models/sdkerror.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 12f991dad510 import httpx from typing import Optional diff --git a/src/mistralai/client/models/security.py b/src/mistralai/client/models/security.py index 1b67229b..4fa8b4b2 100644 --- a/src/mistralai/client/models/security.py +++ b/src/mistralai/client/models/security.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: c2ca0e2a36b7 from __future__ import annotations from mistralai.client.types import BaseModel diff --git a/src/mistralai/client/models/shareenum.py b/src/mistralai/client/models/shareenum.py index ca1b9624..08ffeb7e 100644 --- a/src/mistralai/client/models/shareenum.py +++ b/src/mistralai/client/models/shareenum.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: a0e2a7a16bf8 from __future__ import annotations from mistralai.client.types import UnrecognizedStr diff --git a/src/mistralai/client/models/sharingdelete.py b/src/mistralai/client/models/sharingdelete.py index d659342f..202732cf 100644 --- a/src/mistralai/client/models/sharingdelete.py +++ b/src/mistralai/client/models/sharingdelete.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: f5ecce372e06 from __future__ import annotations from .entitytype import EntityType diff --git a/src/mistralai/client/models/sharingin.py b/src/mistralai/client/models/sharingin.py index 630f4c70..8cc3e896 100644 --- a/src/mistralai/client/models/sharingin.py +++ b/src/mistralai/client/models/sharingin.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: e953dda09c02 from __future__ import annotations from .entitytype import EntityType diff --git a/src/mistralai/client/models/sharingout.py b/src/mistralai/client/models/sharingout.py index 195701d1..77807154 100644 --- a/src/mistralai/client/models/sharingout.py +++ b/src/mistralai/client/models/sharingout.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 0b8804effb5c from __future__ import annotations from mistralai.client.types import ( diff --git a/src/mistralai/client/models/source.py b/src/mistralai/client/models/source.py index 181b327e..fcea403c 100644 --- a/src/mistralai/client/models/source.py +++ b/src/mistralai/client/models/source.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: fcee60a4ea0d from __future__ import annotations from mistralai.client.types import UnrecognizedStr diff --git a/src/mistralai/client/models/ssetypes.py b/src/mistralai/client/models/ssetypes.py index ac2722f1..0add960b 100644 --- a/src/mistralai/client/models/ssetypes.py +++ b/src/mistralai/client/models/ssetypes.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 1733e4765106 from __future__ import annotations from mistralai.client.types import UnrecognizedStr diff --git a/src/mistralai/client/models/jobs_api_routes_fine_tuning_start_fine_tuning_jobop.py b/src/mistralai/client/models/startfinetuningjobop.py similarity index 72% rename from src/mistralai/client/models/jobs_api_routes_fine_tuning_start_fine_tuning_jobop.py rename to src/mistralai/client/models/startfinetuningjobop.py index 91d581eb..805a8721 100644 --- a/src/mistralai/client/models/jobs_api_routes_fine_tuning_start_fine_tuning_jobop.py +++ b/src/mistralai/client/models/startfinetuningjobop.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 663886392468 from __future__ import annotations from .classifierdetailedjobout import ( @@ -16,24 +17,24 @@ from typing_extensions import Annotated, TypeAliasType, TypedDict -class JobsAPIRoutesFineTuningStartFineTuningJobRequestTypedDict(TypedDict): +class StartFineTuningJobRequestTypedDict(TypedDict): job_id: str -class JobsAPIRoutesFineTuningStartFineTuningJobRequest(BaseModel): +class StartFineTuningJobRequest(BaseModel): job_id: Annotated[ str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) ] -JobsAPIRoutesFineTuningStartFineTuningJobResponseTypedDict = TypeAliasType( - "JobsAPIRoutesFineTuningStartFineTuningJobResponseTypedDict", +StartFineTuningJobResponseTypedDict = TypeAliasType( + "StartFineTuningJobResponseTypedDict", Union[CompletionDetailedJobOutTypedDict, ClassifierDetailedJobOutTypedDict], ) r"""OK""" -JobsAPIRoutesFineTuningStartFineTuningJobResponse = Annotated[ +StartFineTuningJobResponse = Annotated[ Union[ClassifierDetailedJobOut, CompletionDetailedJobOut], Field(discriminator="JOB_TYPE"), ] diff --git a/src/mistralai/client/models/systemmessage.py b/src/mistralai/client/models/systemmessage.py index 245e7b61..352eca76 100644 --- a/src/mistralai/client/models/systemmessage.py +++ b/src/mistralai/client/models/systemmessage.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 500ef6e85ba1 from __future__ import annotations from .systemmessagecontentchunks import ( diff --git a/src/mistralai/client/models/systemmessagecontentchunks.py b/src/mistralai/client/models/systemmessagecontentchunks.py index 7a797379..d480a219 100644 --- a/src/mistralai/client/models/systemmessagecontentchunks.py +++ b/src/mistralai/client/models/systemmessagecontentchunks.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 297e8905d5af from __future__ import annotations from .textchunk import TextChunk, TextChunkTypedDict diff --git a/src/mistralai/client/models/textchunk.py b/src/mistralai/client/models/textchunk.py index 4207ce7e..c0584234 100644 --- a/src/mistralai/client/models/textchunk.py +++ b/src/mistralai/client/models/textchunk.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 9c96fb86a9ab from __future__ import annotations from mistralai.client.types import BaseModel diff --git a/src/mistralai/client/models/thinkchunk.py b/src/mistralai/client/models/thinkchunk.py index b1560806..a999f5d7 100644 --- a/src/mistralai/client/models/thinkchunk.py +++ b/src/mistralai/client/models/thinkchunk.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 294bfce193a4 from __future__ import annotations from .referencechunk import ReferenceChunk, ReferenceChunkTypedDict diff --git a/src/mistralai/client/models/timestampgranularity.py b/src/mistralai/client/models/timestampgranularity.py index 5bda890f..8d377375 100644 --- a/src/mistralai/client/models/timestampgranularity.py +++ b/src/mistralai/client/models/timestampgranularity.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 68ddf8d702ea from __future__ import annotations from typing import Literal diff --git a/src/mistralai/client/models/tool.py b/src/mistralai/client/models/tool.py index 4b29f575..a46d31f1 100644 --- a/src/mistralai/client/models/tool.py +++ b/src/mistralai/client/models/tool.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 48b4f6f50fe9 from __future__ import annotations from .function import Function, FunctionTypedDict diff --git a/src/mistralai/client/models/toolcall.py b/src/mistralai/client/models/toolcall.py index 558b49bf..4a05bbd0 100644 --- a/src/mistralai/client/models/toolcall.py +++ b/src/mistralai/client/models/toolcall.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: fb34a1a3f3c2 from __future__ import annotations from .functioncall import FunctionCall, FunctionCallTypedDict diff --git a/src/mistralai/client/models/toolchoice.py b/src/mistralai/client/models/toolchoice.py index 2c7f6cbf..aa2016fb 100644 --- a/src/mistralai/client/models/toolchoice.py +++ b/src/mistralai/client/models/toolchoice.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 14f7e4cc35b6 from __future__ import annotations from .functionname import FunctionName, FunctionNameTypedDict diff --git a/src/mistralai/client/models/toolchoiceenum.py b/src/mistralai/client/models/toolchoiceenum.py index ba8195b8..d66c3d07 100644 --- a/src/mistralai/client/models/toolchoiceenum.py +++ b/src/mistralai/client/models/toolchoiceenum.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: c7798801f860 from __future__ import annotations from mistralai.client.types import UnrecognizedStr diff --git a/src/mistralai/client/models/toolexecutiondeltaevent.py b/src/mistralai/client/models/toolexecutiondeltaevent.py index aeda1472..384ec240 100644 --- a/src/mistralai/client/models/toolexecutiondeltaevent.py +++ b/src/mistralai/client/models/toolexecutiondeltaevent.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: df8f17cf3e07 from __future__ import annotations from .builtinconnectors import BuiltInConnectors diff --git a/src/mistralai/client/models/toolexecutiondoneevent.py b/src/mistralai/client/models/toolexecutiondoneevent.py index 88aa5124..56f28899 100644 --- a/src/mistralai/client/models/toolexecutiondoneevent.py +++ b/src/mistralai/client/models/toolexecutiondoneevent.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 514fdee7d99f from __future__ import annotations from .builtinconnectors import BuiltInConnectors diff --git a/src/mistralai/client/models/toolexecutionentry.py b/src/mistralai/client/models/toolexecutionentry.py index 530c9029..158cbf06 100644 --- a/src/mistralai/client/models/toolexecutionentry.py +++ b/src/mistralai/client/models/toolexecutionentry.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 76db69eebe41 from __future__ import annotations from .builtinconnectors import BuiltInConnectors diff --git a/src/mistralai/client/models/toolexecutionstartedevent.py b/src/mistralai/client/models/toolexecutionstartedevent.py index 3d5f49c7..15918669 100644 --- a/src/mistralai/client/models/toolexecutionstartedevent.py +++ b/src/mistralai/client/models/toolexecutionstartedevent.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 40fadb8e49a1 from __future__ import annotations from .builtinconnectors import BuiltInConnectors diff --git a/src/mistralai/client/models/toolfilechunk.py b/src/mistralai/client/models/toolfilechunk.py index 62b5ffed..6eebd562 100644 --- a/src/mistralai/client/models/toolfilechunk.py +++ b/src/mistralai/client/models/toolfilechunk.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 26c8aadf416a from __future__ import annotations from .builtinconnectors import BuiltInConnectors diff --git a/src/mistralai/client/models/toolmessage.py b/src/mistralai/client/models/toolmessage.py index 44fe63e7..b3e8ffd9 100644 --- a/src/mistralai/client/models/toolmessage.py +++ b/src/mistralai/client/models/toolmessage.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 15f1af161031 from __future__ import annotations from .contentchunk import ContentChunk, ContentChunkTypedDict diff --git a/src/mistralai/client/models/toolreferencechunk.py b/src/mistralai/client/models/toolreferencechunk.py index 882b1563..3c76c8c2 100644 --- a/src/mistralai/client/models/toolreferencechunk.py +++ b/src/mistralai/client/models/toolreferencechunk.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 822e9f3e70de from __future__ import annotations from .builtinconnectors import BuiltInConnectors diff --git a/src/mistralai/client/models/tooltypes.py b/src/mistralai/client/models/tooltypes.py index abb26c25..e601c196 100644 --- a/src/mistralai/client/models/tooltypes.py +++ b/src/mistralai/client/models/tooltypes.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 86c3b54272fd from __future__ import annotations from mistralai.client.types import UnrecognizedStr diff --git a/src/mistralai/client/models/trainingfile.py b/src/mistralai/client/models/trainingfile.py index 1d9763e0..1f710ff8 100644 --- a/src/mistralai/client/models/trainingfile.py +++ b/src/mistralai/client/models/trainingfile.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 2edf9bce227d from __future__ import annotations from mistralai.client.types import BaseModel diff --git a/src/mistralai/client/models/transcriptionresponse.py b/src/mistralai/client/models/transcriptionresponse.py index 24c0b92e..786863ec 100644 --- a/src/mistralai/client/models/transcriptionresponse.py +++ b/src/mistralai/client/models/transcriptionresponse.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 60896dbc6345 from __future__ import annotations from .transcriptionsegmentchunk import ( diff --git a/src/mistralai/client/models/transcriptionsegmentchunk.py b/src/mistralai/client/models/transcriptionsegmentchunk.py index 25e859e5..c78bec30 100644 --- a/src/mistralai/client/models/transcriptionsegmentchunk.py +++ b/src/mistralai/client/models/transcriptionsegmentchunk.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: d1e6f3bdc74b from __future__ import annotations from mistralai.client.types import ( diff --git a/src/mistralai/client/models/transcriptionstreamdone.py b/src/mistralai/client/models/transcriptionstreamdone.py index 9ba2aeff..b5740b3b 100644 --- a/src/mistralai/client/models/transcriptionstreamdone.py +++ b/src/mistralai/client/models/transcriptionstreamdone.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 066a9158ed09 from __future__ import annotations from .transcriptionsegmentchunk import ( diff --git a/src/mistralai/client/models/transcriptionstreamevents.py b/src/mistralai/client/models/transcriptionstreamevents.py index 63a08fb5..17161a17 100644 --- a/src/mistralai/client/models/transcriptionstreamevents.py +++ b/src/mistralai/client/models/transcriptionstreamevents.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: b50b3d74f16f from __future__ import annotations from .transcriptionstreamdone import ( diff --git a/src/mistralai/client/models/transcriptionstreameventtypes.py b/src/mistralai/client/models/transcriptionstreameventtypes.py index cb6b2889..c74bbb74 100644 --- a/src/mistralai/client/models/transcriptionstreameventtypes.py +++ b/src/mistralai/client/models/transcriptionstreameventtypes.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 6f71f6fbf4c5 from __future__ import annotations from mistralai.client.types import UnrecognizedStr diff --git a/src/mistralai/client/models/transcriptionstreamlanguage.py b/src/mistralai/client/models/transcriptionstreamlanguage.py index 244103be..67b3e979 100644 --- a/src/mistralai/client/models/transcriptionstreamlanguage.py +++ b/src/mistralai/client/models/transcriptionstreamlanguage.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: e94333e4bc27 from __future__ import annotations from mistralai.client.types import BaseModel diff --git a/src/mistralai/client/models/transcriptionstreamsegmentdelta.py b/src/mistralai/client/models/transcriptionstreamsegmentdelta.py index ee014742..8db5e736 100644 --- a/src/mistralai/client/models/transcriptionstreamsegmentdelta.py +++ b/src/mistralai/client/models/transcriptionstreamsegmentdelta.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: c0a882ce57e5 from __future__ import annotations from mistralai.client.types import ( diff --git a/src/mistralai/client/models/transcriptionstreamtextdelta.py b/src/mistralai/client/models/transcriptionstreamtextdelta.py index feb459ea..49338a08 100644 --- a/src/mistralai/client/models/transcriptionstreamtextdelta.py +++ b/src/mistralai/client/models/transcriptionstreamtextdelta.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 6086dc081147 from __future__ import annotations from mistralai.client.types import BaseModel diff --git a/src/mistralai/client/models/unarchiveftmodelout.py b/src/mistralai/client/models/unarchiveftmodelout.py index 511c390b..0249a69e 100644 --- a/src/mistralai/client/models/unarchiveftmodelout.py +++ b/src/mistralai/client/models/unarchiveftmodelout.py @@ -1,23 +1,27 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 9dbc3bfb71ed from __future__ import annotations from mistralai.client.types import BaseModel +from mistralai.client.utils import validate_const +import pydantic +from pydantic.functional_validators import AfterValidator from typing import Literal, Optional -from typing_extensions import NotRequired, TypedDict - - -UnarchiveFTModelOutObject = Literal["model",] +from typing_extensions import Annotated, NotRequired, TypedDict class UnarchiveFTModelOutTypedDict(TypedDict): id: str - object: NotRequired[UnarchiveFTModelOutObject] + object: Literal["model"] archived: NotRequired[bool] class UnarchiveFTModelOut(BaseModel): id: str - object: Optional[UnarchiveFTModelOutObject] = "model" + OBJECT: Annotated[ + Annotated[Optional[Literal["model"]], AfterValidator(validate_const("model"))], + pydantic.Field(alias="object"), + ] = "model" archived: Optional[bool] = False diff --git a/src/mistralai/client/models/jobs_api_routes_fine_tuning_unarchive_fine_tuned_modelop.py b/src/mistralai/client/models/unarchivemodelop.py similarity index 76% rename from src/mistralai/client/models/jobs_api_routes_fine_tuning_unarchive_fine_tuned_modelop.py rename to src/mistralai/client/models/unarchivemodelop.py index e1be0ac0..1d68a06a 100644 --- a/src/mistralai/client/models/jobs_api_routes_fine_tuning_unarchive_fine_tuned_modelop.py +++ b/src/mistralai/client/models/unarchivemodelop.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: eb18584fd78c from __future__ import annotations from mistralai.client.types import BaseModel @@ -6,12 +7,12 @@ from typing_extensions import Annotated, TypedDict -class JobsAPIRoutesFineTuningUnarchiveFineTunedModelRequestTypedDict(TypedDict): +class UnarchiveModelRequestTypedDict(TypedDict): model_id: str r"""The ID of the model to unarchive.""" -class JobsAPIRoutesFineTuningUnarchiveFineTunedModelRequest(BaseModel): +class UnarchiveModelRequest(BaseModel): model_id: Annotated[ str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) ] diff --git a/src/mistralai/client/models/agents_api_v1_agents_updateop.py b/src/mistralai/client/models/updateagentop.py similarity index 86% rename from src/mistralai/client/models/agents_api_v1_agents_updateop.py rename to src/mistralai/client/models/updateagentop.py index 116acaa7..28acc83d 100644 --- a/src/mistralai/client/models/agents_api_v1_agents_updateop.py +++ b/src/mistralai/client/models/updateagentop.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: ae3a6abea468 from __future__ import annotations from .agentupdaterequest import AgentUpdateRequest, AgentUpdateRequestTypedDict @@ -7,12 +8,12 @@ from typing_extensions import Annotated, TypedDict -class AgentsAPIV1AgentsUpdateRequestTypedDict(TypedDict): +class UpdateAgentRequestTypedDict(TypedDict): agent_id: str agent_update_request: AgentUpdateRequestTypedDict -class AgentsAPIV1AgentsUpdateRequest(BaseModel): +class UpdateAgentRequest(BaseModel): agent_id: Annotated[ str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) ] diff --git a/src/mistralai/client/models/agents_api_v1_agents_update_versionop.py b/src/mistralai/client/models/updateagentversionop.py similarity index 81% rename from src/mistralai/client/models/agents_api_v1_agents_update_versionop.py rename to src/mistralai/client/models/updateagentversionop.py index 116f952b..114013bc 100644 --- a/src/mistralai/client/models/agents_api_v1_agents_update_versionop.py +++ b/src/mistralai/client/models/updateagentversionop.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 3821dca5b20a from __future__ import annotations from mistralai.client.types import BaseModel @@ -6,12 +7,12 @@ from typing_extensions import Annotated, TypedDict -class AgentsAPIV1AgentsUpdateVersionRequestTypedDict(TypedDict): +class UpdateAgentVersionRequestTypedDict(TypedDict): agent_id: str version: int -class AgentsAPIV1AgentsUpdateVersionRequest(BaseModel): +class UpdateAgentVersionRequest(BaseModel): agent_id: Annotated[ str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) ] diff --git a/src/mistralai/client/models/libraries_documents_update_v1op.py b/src/mistralai/client/models/updatedocumentop.py similarity index 87% rename from src/mistralai/client/models/libraries_documents_update_v1op.py rename to src/mistralai/client/models/updatedocumentop.py index f677b4dd..073f22a9 100644 --- a/src/mistralai/client/models/libraries_documents_update_v1op.py +++ b/src/mistralai/client/models/updatedocumentop.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: eee9ef317180 from __future__ import annotations from .documentupdatein import DocumentUpdateIn, DocumentUpdateInTypedDict @@ -7,13 +8,13 @@ from typing_extensions import Annotated, TypedDict -class LibrariesDocumentsUpdateV1RequestTypedDict(TypedDict): +class UpdateDocumentRequestTypedDict(TypedDict): library_id: str document_id: str document_update_in: DocumentUpdateInTypedDict -class LibrariesDocumentsUpdateV1Request(BaseModel): +class UpdateDocumentRequest(BaseModel): library_id: Annotated[ str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) ] diff --git a/src/mistralai/client/models/updateftmodelin.py b/src/mistralai/client/models/updateftmodelin.py index 0471a154..4ac5a8a2 100644 --- a/src/mistralai/client/models/updateftmodelin.py +++ b/src/mistralai/client/models/updateftmodelin.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 39e2d678e651 from __future__ import annotations from mistralai.client.types import ( diff --git a/src/mistralai/client/models/libraries_update_v1op.py b/src/mistralai/client/models/updatelibraryop.py similarity index 85% rename from src/mistralai/client/models/libraries_update_v1op.py rename to src/mistralai/client/models/updatelibraryop.py index c434ab7a..c5a1ad30 100644 --- a/src/mistralai/client/models/libraries_update_v1op.py +++ b/src/mistralai/client/models/updatelibraryop.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 4ba7acdb62c6 from __future__ import annotations from .libraryinupdate import LibraryInUpdate, LibraryInUpdateTypedDict @@ -7,12 +8,12 @@ from typing_extensions import Annotated, TypedDict -class LibrariesUpdateV1RequestTypedDict(TypedDict): +class UpdateLibraryRequestTypedDict(TypedDict): library_id: str library_in_update: LibraryInUpdateTypedDict -class LibrariesUpdateV1Request(BaseModel): +class UpdateLibraryRequest(BaseModel): library_id: Annotated[ str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) ] diff --git a/src/mistralai/client/models/jobs_api_routes_fine_tuning_update_fine_tuned_modelop.py b/src/mistralai/client/models/updatemodelop.py similarity index 77% rename from src/mistralai/client/models/jobs_api_routes_fine_tuning_update_fine_tuned_modelop.py rename to src/mistralai/client/models/updatemodelop.py index 760c22f4..023be979 100644 --- a/src/mistralai/client/models/jobs_api_routes_fine_tuning_update_fine_tuned_modelop.py +++ b/src/mistralai/client/models/updatemodelop.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: ba149ecfe03e from __future__ import annotations from .classifierftmodelout import ClassifierFTModelOut, ClassifierFTModelOutTypedDict @@ -11,13 +12,13 @@ from typing_extensions import Annotated, TypeAliasType, TypedDict -class JobsAPIRoutesFineTuningUpdateFineTunedModelRequestTypedDict(TypedDict): +class UpdateModelRequestTypedDict(TypedDict): model_id: str r"""The ID of the model to update.""" update_ft_model_in: UpdateFTModelInTypedDict -class JobsAPIRoutesFineTuningUpdateFineTunedModelRequest(BaseModel): +class UpdateModelRequest(BaseModel): model_id: Annotated[ str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) ] @@ -29,14 +30,14 @@ class JobsAPIRoutesFineTuningUpdateFineTunedModelRequest(BaseModel): ] -JobsAPIRoutesFineTuningUpdateFineTunedModelResponseTypedDict = TypeAliasType( - "JobsAPIRoutesFineTuningUpdateFineTunedModelResponseTypedDict", +UpdateModelResponseTypedDict = TypeAliasType( + "UpdateModelResponseTypedDict", Union[CompletionFTModelOutTypedDict, ClassifierFTModelOutTypedDict], ) r"""OK""" -JobsAPIRoutesFineTuningUpdateFineTunedModelResponse = Annotated[ +UpdateModelResponse = Annotated[ Union[ClassifierFTModelOut, CompletionFTModelOut], Field(discriminator="MODEL_TYPE") ] r"""OK""" diff --git a/src/mistralai/client/models/libraries_share_create_v1op.py b/src/mistralai/client/models/updateorcreatelibraryaccessop.py similarity index 81% rename from src/mistralai/client/models/libraries_share_create_v1op.py rename to src/mistralai/client/models/updateorcreatelibraryaccessop.py index d0313bd0..1abe6eda 100644 --- a/src/mistralai/client/models/libraries_share_create_v1op.py +++ b/src/mistralai/client/models/updateorcreatelibraryaccessop.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: ec9b15418f5c from __future__ import annotations from .sharingin import SharingIn, SharingInTypedDict @@ -7,12 +8,12 @@ from typing_extensions import Annotated, TypedDict -class LibrariesShareCreateV1RequestTypedDict(TypedDict): +class UpdateOrCreateLibraryAccessRequestTypedDict(TypedDict): library_id: str sharing_in: SharingInTypedDict -class LibrariesShareCreateV1Request(BaseModel): +class UpdateOrCreateLibraryAccessRequest(BaseModel): library_id: Annotated[ str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) ] diff --git a/src/mistralai/client/models/libraries_documents_upload_v1op.py b/src/mistralai/client/models/uploaddocumentop.py similarity index 92% rename from src/mistralai/client/models/libraries_documents_upload_v1op.py rename to src/mistralai/client/models/uploaddocumentop.py index 18a5b780..2c957947 100644 --- a/src/mistralai/client/models/libraries_documents_upload_v1op.py +++ b/src/mistralai/client/models/uploaddocumentop.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 0018fe7ff48c from __future__ import annotations from .file import File, FileTypedDict @@ -40,12 +41,12 @@ class DocumentUpload(BaseModel): """ -class LibrariesDocumentsUploadV1RequestTypedDict(TypedDict): +class UploadDocumentRequestTypedDict(TypedDict): library_id: str request_body: DocumentUploadTypedDict -class LibrariesDocumentsUploadV1Request(BaseModel): +class UploadDocumentRequest(BaseModel): library_id: Annotated[ str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) ] diff --git a/src/mistralai/client/models/files_api_routes_upload_fileop.py b/src/mistralai/client/models/uploadfileop.py similarity index 97% rename from src/mistralai/client/models/files_api_routes_upload_fileop.py rename to src/mistralai/client/models/uploadfileop.py index ab2f1524..50848f0b 100644 --- a/src/mistralai/client/models/files_api_routes_upload_fileop.py +++ b/src/mistralai/client/models/uploadfileop.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: d67619670938 from __future__ import annotations from .file import File, FileTypedDict diff --git a/src/mistralai/client/models/uploadfileout.py b/src/mistralai/client/models/uploadfileout.py index 55e56504..be291efb 100644 --- a/src/mistralai/client/models/uploadfileout.py +++ b/src/mistralai/client/models/uploadfileout.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 42466f2bebfb from __future__ import annotations from .filepurpose import FilePurpose diff --git a/src/mistralai/client/models/usageinfo.py b/src/mistralai/client/models/usageinfo.py index f1186d97..e78f92e7 100644 --- a/src/mistralai/client/models/usageinfo.py +++ b/src/mistralai/client/models/usageinfo.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 54adb9a3af16 from __future__ import annotations from mistralai.client.types import ( diff --git a/src/mistralai/client/models/usermessage.py b/src/mistralai/client/models/usermessage.py index fe64a8cc..25ccdf80 100644 --- a/src/mistralai/client/models/usermessage.py +++ b/src/mistralai/client/models/usermessage.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: cb583483acf4 from __future__ import annotations from .contentchunk import ContentChunk, ContentChunkTypedDict diff --git a/src/mistralai/client/models/validationerror.py b/src/mistralai/client/models/validationerror.py index 352409be..385714c8 100644 --- a/src/mistralai/client/models/validationerror.py +++ b/src/mistralai/client/models/validationerror.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 15df3c7368ab from __future__ import annotations from mistralai.client.types import BaseModel diff --git a/src/mistralai/client/models/wandbintegration.py b/src/mistralai/client/models/wandbintegration.py index 18e32ac3..c5db4a6d 100644 --- a/src/mistralai/client/models/wandbintegration.py +++ b/src/mistralai/client/models/wandbintegration.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 4823c1e80942 from __future__ import annotations from mistralai.client.types import ( diff --git a/src/mistralai/client/models/wandbintegrationout.py b/src/mistralai/client/models/wandbintegrationout.py index 6409f4a4..d0a09bf4 100644 --- a/src/mistralai/client/models/wandbintegrationout.py +++ b/src/mistralai/client/models/wandbintegrationout.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 6b103d74195c from __future__ import annotations from mistralai.client.types import ( diff --git a/src/mistralai/client/models/websearchpremiumtool.py b/src/mistralai/client/models/websearchpremiumtool.py index c7825ec3..9588ab1d 100644 --- a/src/mistralai/client/models/websearchpremiumtool.py +++ b/src/mistralai/client/models/websearchpremiumtool.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: bfe88af887e3 from __future__ import annotations from mistralai.client.types import BaseModel diff --git a/src/mistralai/client/models/websearchtool.py b/src/mistralai/client/models/websearchtool.py index 7a237d86..27502909 100644 --- a/src/mistralai/client/models/websearchtool.py +++ b/src/mistralai/client/models/websearchtool.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 26b0903423e5 from __future__ import annotations from mistralai.client.types import BaseModel diff --git a/src/mistralai/client/models_.py b/src/mistralai/client/models_.py index 00708197..05b33ac7 100644 --- a/src/mistralai/client/models_.py +++ b/src/mistralai/client/models_.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 1d277958a843 from .basesdk import BaseSDK from mistralai.client import models, utils @@ -67,7 +68,7 @@ def list( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="list_models_v1_models_get", + operation_id="ListModels", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -144,7 +145,7 @@ async def list_async( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="list_models_v1_models_get", + operation_id="ListModels", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -195,7 +196,7 @@ def retrieve( else: base_url = self._get_url(base_url, url_variables) - request = models.RetrieveModelV1ModelsModelIDGetRequest( + request = models.RetrieveModelRequest( model_id=model_id, ) @@ -228,7 +229,7 @@ def retrieve( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="retrieve_model_v1_models__model_id__get", + operation_id="RetrieveModel", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -287,7 +288,7 @@ async def retrieve_async( else: base_url = self._get_url(base_url, url_variables) - request = models.RetrieveModelV1ModelsModelIDGetRequest( + request = models.RetrieveModelRequest( model_id=model_id, ) @@ -320,7 +321,7 @@ async def retrieve_async( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="retrieve_model_v1_models__model_id__get", + operation_id="RetrieveModel", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -379,7 +380,7 @@ def delete( else: base_url = self._get_url(base_url, url_variables) - request = models.DeleteModelV1ModelsModelIDDeleteRequest( + request = models.DeleteModelRequest( model_id=model_id, ) @@ -412,7 +413,7 @@ def delete( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="delete_model_v1_models__model_id__delete", + operation_id="DeleteModel", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -469,7 +470,7 @@ async def delete_async( else: base_url = self._get_url(base_url, url_variables) - request = models.DeleteModelV1ModelsModelIDDeleteRequest( + request = models.DeleteModelRequest( model_id=model_id, ) @@ -502,7 +503,7 @@ async def delete_async( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="delete_model_v1_models__model_id__delete", + operation_id="DeleteModel", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -540,7 +541,7 @@ def update( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> models.JobsAPIRoutesFineTuningUpdateFineTunedModelResponse: + ) -> models.UpdateModelResponse: r"""Update Fine Tuned Model Update a model name or description. @@ -563,7 +564,7 @@ def update( else: base_url = self._get_url(base_url, url_variables) - request = models.JobsAPIRoutesFineTuningUpdateFineTunedModelRequest( + request = models.UpdateModelRequest( model_id=model_id, update_ft_model_in=models.UpdateFTModelIn( name=name, @@ -603,7 +604,7 @@ def update( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="jobs_api_routes_fine_tuning_update_fine_tuned_model", + operation_id="UpdateModel", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -615,9 +616,7 @@ def update( ) if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response( - models.JobsAPIRoutesFineTuningUpdateFineTunedModelResponse, http_res - ) + return unmarshal_json_response(models.UpdateModelResponse, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) raise models.SDKError("API error occurred", http_res, http_res_text) @@ -637,7 +636,7 @@ async def update_async( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> models.JobsAPIRoutesFineTuningUpdateFineTunedModelResponse: + ) -> models.UpdateModelResponse: r"""Update Fine Tuned Model Update a model name or description. @@ -660,7 +659,7 @@ async def update_async( else: base_url = self._get_url(base_url, url_variables) - request = models.JobsAPIRoutesFineTuningUpdateFineTunedModelRequest( + request = models.UpdateModelRequest( model_id=model_id, update_ft_model_in=models.UpdateFTModelIn( name=name, @@ -700,7 +699,7 @@ async def update_async( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="jobs_api_routes_fine_tuning_update_fine_tuned_model", + operation_id="UpdateModel", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -712,9 +711,7 @@ async def update_async( ) if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response( - models.JobsAPIRoutesFineTuningUpdateFineTunedModelResponse, http_res - ) + return unmarshal_json_response(models.UpdateModelResponse, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) raise models.SDKError("API error occurred", http_res, http_res_text) @@ -753,7 +750,7 @@ def archive( else: base_url = self._get_url(base_url, url_variables) - request = models.JobsAPIRoutesFineTuningArchiveFineTunedModelRequest( + request = models.ArchiveModelRequest( model_id=model_id, ) @@ -786,7 +783,7 @@ def archive( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="jobs_api_routes_fine_tuning_archive_fine_tuned_model", + operation_id="ArchiveModel", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -837,7 +834,7 @@ async def archive_async( else: base_url = self._get_url(base_url, url_variables) - request = models.JobsAPIRoutesFineTuningArchiveFineTunedModelRequest( + request = models.ArchiveModelRequest( model_id=model_id, ) @@ -870,7 +867,7 @@ async def archive_async( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="jobs_api_routes_fine_tuning_archive_fine_tuned_model", + operation_id="ArchiveModel", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -921,7 +918,7 @@ def unarchive( else: base_url = self._get_url(base_url, url_variables) - request = models.JobsAPIRoutesFineTuningUnarchiveFineTunedModelRequest( + request = models.UnarchiveModelRequest( model_id=model_id, ) @@ -954,7 +951,7 @@ def unarchive( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="jobs_api_routes_fine_tuning_unarchive_fine_tuned_model", + operation_id="UnarchiveModel", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -1005,7 +1002,7 @@ async def unarchive_async( else: base_url = self._get_url(base_url, url_variables) - request = models.JobsAPIRoutesFineTuningUnarchiveFineTunedModelRequest( + request = models.UnarchiveModelRequest( model_id=model_id, ) @@ -1038,7 +1035,7 @@ async def unarchive_async( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="jobs_api_routes_fine_tuning_unarchive_fine_tuned_model", + operation_id="UnarchiveModel", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security diff --git a/src/mistralai/client/ocr.py b/src/mistralai/client/ocr.py index ce7e2126..2aa38229 100644 --- a/src/mistralai/client/ocr.py +++ b/src/mistralai/client/ocr.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 2f804a12fc62 from .basesdk import BaseSDK from mistralai.client import models, utils diff --git a/src/mistralai/client/sdk.py b/src/mistralai/client/sdk.py index 99579400..b1ab5493 100644 --- a/src/mistralai/client/sdk.py +++ b/src/mistralai/client/sdk.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 48edbcb38d7e from .basesdk import BaseSDK from .httpclient import AsyncHttpClient, ClientOwner, HttpClient, close_clients @@ -30,7 +31,10 @@ class Mistral(BaseSDK): - r"""Mistral AI API: Our Chat Completion and Embeddings APIs specification. Create your account on [La Plateforme](https://console.mistral.ai) to get access and read the [docs](https://docs.mistral.ai) to learn how to use it.""" + r"""Mistral AI API: Dora OpenAPI schema + + Our Chat Completion and Embeddings APIs specification. Create your account on [La Plateforme](https://console.mistral.ai) to get access and read the [docs](https://docs.mistral.ai) to learn how to use it. + """ models: "Models" r"""Model Management API""" diff --git a/src/mistralai/client/sdkconfiguration.py b/src/mistralai/client/sdkconfiguration.py index df50d16f..712e92e0 100644 --- a/src/mistralai/client/sdkconfiguration.py +++ b/src/mistralai/client/sdkconfiguration.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: b7dd68a0235e from ._version import ( __gen_version__, diff --git a/src/mistralai/client/transcriptions.py b/src/mistralai/client/transcriptions.py index 45501024..f7ef5b0a 100644 --- a/src/mistralai/client/transcriptions.py +++ b/src/mistralai/client/transcriptions.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 75b45780c978 from .basesdk import BaseSDK from mistralai.client import models, utils diff --git a/src/mistralai/client/types/__init__.py b/src/mistralai/client/types/__init__.py index fc76fe0c..cf838643 100644 --- a/src/mistralai/client/types/__init__.py +++ b/src/mistralai/client/types/__init__.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 000b943f821c from .basemodel import ( BaseModel, diff --git a/src/mistralai/client/types/basemodel.py b/src/mistralai/client/types/basemodel.py index a9a640a1..4e889aa0 100644 --- a/src/mistralai/client/types/basemodel.py +++ b/src/mistralai/client/types/basemodel.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 7ec465a1d3ff from pydantic import ConfigDict, model_serializer from pydantic import BaseModel as PydanticBaseModel diff --git a/src/mistralai/client/utils/__init__.py b/src/mistralai/client/utils/__init__.py index f9c2edce..7ed3a420 100644 --- a/src/mistralai/client/utils/__init__.py +++ b/src/mistralai/client/utils/__init__.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: b69505f4b269 from typing import TYPE_CHECKING from importlib import import_module diff --git a/src/mistralai/client/utils/annotations.py b/src/mistralai/client/utils/annotations.py index 12e0aa4f..4b60ab8e 100644 --- a/src/mistralai/client/utils/annotations.py +++ b/src/mistralai/client/utils/annotations.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 1ffdedfc66a2 from enum import Enum from typing import Any, Optional diff --git a/src/mistralai/client/utils/datetimes.py b/src/mistralai/client/utils/datetimes.py index a6c52cd6..a2c94fac 100644 --- a/src/mistralai/client/utils/datetimes.py +++ b/src/mistralai/client/utils/datetimes.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: c40066d868c9 from datetime import datetime import sys diff --git a/src/mistralai/client/utils/enums.py b/src/mistralai/client/utils/enums.py index 3324e1bc..d897495f 100644 --- a/src/mistralai/client/utils/enums.py +++ b/src/mistralai/client/utils/enums.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: a0735873b5ac import enum import sys diff --git a/src/mistralai/client/utils/eventstreaming.py b/src/mistralai/client/utils/eventstreaming.py index 0969899b..3fe3c7e1 100644 --- a/src/mistralai/client/utils/eventstreaming.py +++ b/src/mistralai/client/utils/eventstreaming.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 3263d7502030 import re import json diff --git a/src/mistralai/client/utils/forms.py b/src/mistralai/client/utils/forms.py index f961e76b..2b474b9a 100644 --- a/src/mistralai/client/utils/forms.py +++ b/src/mistralai/client/utils/forms.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 58842e905fce from typing import ( Any, diff --git a/src/mistralai/client/utils/headers.py b/src/mistralai/client/utils/headers.py index 37864cbb..64911872 100644 --- a/src/mistralai/client/utils/headers.py +++ b/src/mistralai/client/utils/headers.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 9066de2ead8b from typing import ( Any, diff --git a/src/mistralai/client/utils/logger.py b/src/mistralai/client/utils/logger.py index 2ef27ee5..3edad830 100644 --- a/src/mistralai/client/utils/logger.py +++ b/src/mistralai/client/utils/logger.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 745023607a1f import httpx import logging diff --git a/src/mistralai/client/utils/metadata.py b/src/mistralai/client/utils/metadata.py index 173b3e5c..d46ffa59 100644 --- a/src/mistralai/client/utils/metadata.py +++ b/src/mistralai/client/utils/metadata.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: d49d535ae52c from typing import Optional, Type, TypeVar, Union from dataclasses import dataclass diff --git a/src/mistralai/client/utils/queryparams.py b/src/mistralai/client/utils/queryparams.py index c04e0db8..0b78c548 100644 --- a/src/mistralai/client/utils/queryparams.py +++ b/src/mistralai/client/utils/queryparams.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: bb77d4664844 from typing import ( Any, diff --git a/src/mistralai/client/utils/requestbodies.py b/src/mistralai/client/utils/requestbodies.py index 1de32b6d..3aae69c7 100644 --- a/src/mistralai/client/utils/requestbodies.py +++ b/src/mistralai/client/utils/requestbodies.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 946cfcd26ee4 import io from dataclasses import dataclass diff --git a/src/mistralai/client/utils/retries.py b/src/mistralai/client/utils/retries.py index 88a91b10..90c008b0 100644 --- a/src/mistralai/client/utils/retries.py +++ b/src/mistralai/client/utils/retries.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 5f1a5b90423c import asyncio import random diff --git a/src/mistralai/client/utils/security.py b/src/mistralai/client/utils/security.py index 3b8526bf..4c73806d 100644 --- a/src/mistralai/client/utils/security.py +++ b/src/mistralai/client/utils/security.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 1acb7c006265 import base64 diff --git a/src/mistralai/client/utils/serializers.py b/src/mistralai/client/utils/serializers.py index 14321eb4..fbc2772d 100644 --- a/src/mistralai/client/utils/serializers.py +++ b/src/mistralai/client/utils/serializers.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 53c57c7f29a8 from decimal import Decimal import functools diff --git a/src/mistralai/client/utils/unmarshal_json_response.py b/src/mistralai/client/utils/unmarshal_json_response.py index 6d43d6e4..65190e5c 100644 --- a/src/mistralai/client/utils/unmarshal_json_response.py +++ b/src/mistralai/client/utils/unmarshal_json_response.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: b13585fc5626 from typing import Any, Optional, Type, TypeVar, overload diff --git a/src/mistralai/client/utils/url.py b/src/mistralai/client/utils/url.py index c78ccbae..27a6a3a0 100644 --- a/src/mistralai/client/utils/url.py +++ b/src/mistralai/client/utils/url.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 3c6496c17510 from decimal import Decimal from typing import ( diff --git a/src/mistralai/client/utils/values.py b/src/mistralai/client/utils/values.py index dae01a44..2469a9f3 100644 --- a/src/mistralai/client/utils/values.py +++ b/src/mistralai/client/utils/values.py @@ -1,4 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: bb6ade7a7f82 from datetime import datetime from enum import Enum diff --git a/uv.lock b/uv.lock index caa731ed..1e667c77 100644 --- a/uv.lock +++ b/uv.lock @@ -625,7 +625,7 @@ requires-dist = [ { name = "opentelemetry-api", specifier = ">=1.33.1,<2.0.0" }, { name = "opentelemetry-exporter-otlp-proto-http", specifier = ">=1.37.0,<2.0.0" }, { name = "opentelemetry-sdk", specifier = ">=1.33.1,<2.0.0" }, - { name = "opentelemetry-semantic-conventions", specifier = ">=0.59b0,<0.60" }, + { name = "opentelemetry-semantic-conventions", specifier = ">=0.59b0,<0.61" }, { name = "pydantic", specifier = ">=2.10.3" }, { name = "python-dateutil", specifier = ">=2.8.2" }, { name = "pyyaml", specifier = ">=6.0.2,<7.0.0" }, From 8490078bb9387caadf2d6384ee6f4724d7c1f14a Mon Sep 17 00:00:00 2001 From: Louis Sanna <85956496+louis-sanna-dev@users.noreply.github.com> Date: Mon, 23 Feb 2026 10:14:27 +0100 Subject: [PATCH 207/223] docs: add callout directing v1 users to v1 branch (#368) Users installing from PyPI get v1 but main branch docs are for v2, causing confusion with import paths. Add prominent notice at top of README directing them to the v1 branch for correct documentation. Fixes #366 --- README.md | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/README.md b/README.md index 53de43f5..5c8a1b51 100644 --- a/README.md +++ b/README.md @@ -1,5 +1,10 @@ # Mistral Python Client +> [!IMPORTANT] +> **Looking for v1 documentation?** If you installed `mistralai` from PyPI (e.g., `pip install mistralai`), you are using **v1** of the SDK. The documentation on this branch (`main`) is for **v2**, which is not yet released on PyPI. +> +> **➡️ [Go to the v1 branch for v1 documentation](https://github.com/mistralai/client-python/tree/v1)** + ## Migration warning This documentation is for Mistral AI SDK v2. You can find more details on how to migrate from v1 to v2 [here](MIGRATION.md) From 969c992f1decf803bcc6e61007ab37c4d596ca35 Mon Sep 17 00:00:00 2001 From: Nelson PROIA <144663685+Nelson-PROIA@users.noreply.github.com> Date: Tue, 24 Feb 2026 16:56:30 +0100 Subject: [PATCH 208/223] feat!: align Azure/GCP SDKs with namespace vision (API-1988) (#351) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Migrate Azure/GCP provider SDKs to PEP 420 namespace sub-packages: - mistralai_azure → mistralai.azure.client.MistralAzure - mistralai_gcp → mistralai.gcp.client.MistralGCP Azure: auto-inject api_version query param, warn on custom client conflict GCP: auto-detect credentials via google.auth.default(), refresh tokens only when expired, dynamically construct Vertex AI paths from model name Added 348 parity tests, async FIM integration tests, lint coverage for provider packages. --- .genignore | 2 +- .pre-commit-config.yaml | 6 +- .speakeasy/workflow.lock | 26 +- .speakeasy/workflow.yaml | 4 +- MIGRATION.md | 20 + README.md | 98 +-- examples/azure/.env.example | 4 + examples/azure/az_chat_no_streaming.py | 15 - examples/azure/chat_no_streaming.py | 19 +- examples/gcp/.env.example | 3 + examples/gcp/async_chat_no_streaming.py | 36 +- examples/gcp/gcp_async_chat_no_streaming.py | 24 - packages/azure/.genignore | 6 + .../{mistralai_azure => azure}/.gitattributes | 0 .../{mistralai_azure => azure}/.gitignore | 0 .../.speakeasy/gen.lock | 711 +++++++++--------- .../.speakeasy/gen.yaml | 21 +- .../CONTRIBUTING.md | 0 packages/{mistralai_azure => azure}/README.md | 282 ++++--- .../{mistralai_azure => azure}/RELEASES.md | 0 packages/azure/USAGE.md | 70 ++ .../docs/models/arguments.md | 0 .../docs/models/assistantmessage.md | 4 +- .../docs/models/assistantmessagecontent.md | 0 .../docs/models/chatcompletionchoice.md | 0 .../chatcompletionchoicefinishreason.md | 0 .../docs/models/chatcompletionrequest.md | 2 +- .../models/chatcompletionrequestmessage.md} | 2 +- .../docs/models/chatcompletionrequeststop.md | 0 .../models/chatcompletionrequesttoolchoice.md | 0 .../docs/models/chatcompletionresponse.md | 0 .../models/chatcompletionstreamrequest.md | 4 +- .../chatcompletionstreamrequestmessage.md} | 2 +- .../chatcompletionstreamrequeststop.md} | 2 +- .../chatcompletionstreamrequesttoolchoice.md | 0 .../docs/models/completionchunk.md | 0 .../docs/models/completionevent.md | 0 .../models/completionresponsestreamchoice.md | 10 + ...letionresponsestreamchoicefinishreason.md} | 2 +- .../docs/models/contentchunk.md | 0 packages/azure/docs/models/deltamessage.md | 10 + .../docs/models/deltamessagecontent.md} | 2 +- .../docs/models/document.md | 0 .../azure/docs/models/documenturlchunk.md | 10 + .../docs/models/filechunk.md | 0 .../docs/models/format_.md | 0 .../docs/models/function.md | 0 .../docs/models/functioncall.md | 0 .../docs/models/functionname.md | 0 .../docs/models/httpvalidationerror.md | 0 packages/azure/docs/models/imagedetail.md | 10 + packages/azure/docs/models/imageurl.md | 9 + packages/azure/docs/models/imageurlchunk.md | 11 + .../docs/models/imageurlunion.md} | 2 +- .../docs/models/jsonschema.md | 0 .../docs/models/loc.md | 0 .../docs/models/mistralpromptmode.md | 0 .../docs/models/ocrimageobject.md | 0 .../docs/models/ocrpagedimensions.md | 0 .../docs/models/ocrpageobject.md | 0 .../docs/models/ocrrequest.md | 0 .../docs/models/ocrresponse.md | 0 .../docs/models/ocrtableobject.md | 0 .../docs/models/ocrusageinfo.md | 0 .../docs/models/prediction.md | 0 packages/azure/docs/models/referencechunk.md | 9 + .../docs/models/responseformat.md | 0 .../docs/models/responseformats.md | 0 .../docs/models/security.md | 0 .../docs/models/systemmessage.md | 4 +- .../docs/models/systemmessagecontent.md | 0 .../docs/models/systemmessagecontentchunks.md | 0 .../docs/models/tableformat.md | 0 packages/azure/docs/models/textchunk.md | 9 + .../docs/models/thinkchunk.md | 4 +- .../docs/models/thinking.md | 0 .../docs/models/tool.md | 0 .../docs/models/toolcall.md | 0 .../docs/models/toolchoice.md | 0 .../docs/models/toolchoiceenum.md | 0 .../docs/models/toolmessage.md | 4 +- .../docs/models/toolmessagecontent.md | 0 .../docs/models/tooltypes.md | 0 .../docs/models/usageinfo.md | 0 .../docs/models/usermessage.md | 4 +- .../docs/models/usermessagecontent.md | 0 .../docs/models/utils/retryconfig.md | 0 .../docs/models/validationerror.md | 0 .../docs/sdks/chat/README.md | 42 +- .../docs/sdks/mistralazure/README.md | 0 packages/{mistralai_azure => azure}/py.typed | 0 packages/{mistralai_azure => azure}/pylintrc | 0 .../{mistralai_azure => azure}/pyproject.toml | 15 +- .../scripts/prepare_readme.py | 2 +- .../scripts/publish.sh | 0 .../src/mistralai/azure/client}/__init__.py | 0 .../azure/client}/_hooks/__init__.py | 0 .../azure/client}/_hooks/registration.py | 9 +- .../azure/client}/_hooks/sdkhooks.py | 2 +- .../mistralai/azure/client}/_hooks/types.py | 4 +- .../src/mistralai/azure/client}/_version.py | 6 +- .../src/mistralai/azure/client}/basesdk.py | 10 +- .../src/mistralai/azure/client}/chat.py | 54 +- .../src/mistralai/azure/client}/httpclient.py | 0 .../azure/client}/models/__init__.py | 114 +-- .../azure/client}/models/assistantmessage.py | 23 +- .../client}/models/chatcompletionchoice.py | 2 +- .../client}/models/chatcompletionrequest.py | 14 +- .../client}/models/chatcompletionresponse.py | 2 +- .../models/chatcompletionstreamrequest.py | 26 +- .../azure/client}/models/completionchunk.py | 2 +- .../azure/client}/models/completionevent.py | 2 +- .../models/completionresponsestreamchoice.py | 13 +- .../azure/client}/models/contentchunk.py | 2 +- .../azure/client}/models/deltamessage.py | 14 +- .../azure/client}/models/documenturlchunk.py | 24 +- .../azure/client}/models/filechunk.py | 4 +- .../azure/client}/models/function.py | 2 +- .../azure/client}/models/functioncall.py | 2 +- .../azure/client}/models/functionname.py | 2 +- .../client}/models/httpvalidationerror.py | 4 +- .../azure/client/models/imagedetail.py | 15 + .../azure/client}/models/imageurl.py | 7 +- .../azure/client/models/imageurlchunk.py | 38 + .../azure/client}/models/jsonschema.py | 2 +- .../azure/client}/models/mistralazureerror.py | 0 .../azure/client}/models/mistralpromptmode.py | 2 +- .../azure/client}/models/no_response_error.py | 0 .../azure/client}/models/ocrimageobject.py | 2 +- .../azure/client}/models/ocrpagedimensions.py | 2 +- .../azure/client}/models/ocrpageobject.py | 2 +- .../azure/client}/models/ocrrequest.py | 2 +- .../azure/client}/models/ocrresponse.py | 2 +- .../azure/client}/models/ocrtableobject.py | 13 +- .../azure/client}/models/ocrusageinfo.py | 2 +- .../azure/client}/models/prediction.py | 4 +- .../azure/client/models/referencechunk.py | 25 + .../azure/client}/models/responseformat.py | 2 +- .../azure/client}/models/responseformats.py | 0 .../client}/models/responsevalidationerror.py | 2 +- .../azure/client}/models/sdkerror.py | 2 +- .../azure/client}/models/security.py | 4 +- .../azure/client}/models/systemmessage.py | 19 +- .../models/systemmessagecontentchunks.py | 6 +- .../azure/client/models/textchunk.py | 23 + .../azure/client}/models/thinkchunk.py | 19 +- .../mistralai/azure/client}/models/tool.py | 2 +- .../azure/client}/models/toolcall.py | 2 +- .../azure/client}/models/toolchoice.py | 2 +- .../azure/client}/models/toolchoiceenum.py | 0 .../azure/client}/models/toolmessage.py | 23 +- .../azure/client}/models/tooltypes.py | 2 +- .../azure/client}/models/usageinfo.py | 2 +- .../azure/client}/models/usermessage.py | 21 +- .../azure/client}/models/validationerror.py | 2 +- .../src/mistralai/azure/client}/ocr.py | 8 +- .../src/mistralai/azure/client}/py.typed | 0 .../src/mistralai/azure/client}/sdk.py | 108 ++- .../azure/client}/sdkconfiguration.py | 4 +- .../mistralai/azure/client}/types/__init__.py | 0 .../azure/client}/types/basemodel.py | 0 .../mistralai/azure/client}/utils/__init__.py | 0 .../azure/client}/utils/annotations.py | 0 .../azure/client}/utils/datetimes.py | 0 .../mistralai/azure/client}/utils/enums.py | 0 .../azure/client}/utils/eventstreaming.py | 0 .../mistralai/azure/client}/utils/forms.py | 0 .../mistralai/azure/client}/utils/headers.py | 0 .../mistralai/azure/client}/utils/logger.py | 0 .../mistralai/azure/client}/utils/metadata.py | 0 .../azure/client}/utils/queryparams.py | 0 .../azure/client}/utils/requestbodies.py | 0 .../mistralai/azure/client}/utils/retries.py | 0 .../mistralai/azure/client}/utils/security.py | 0 .../azure/client}/utils/serializers.py | 0 .../client}/utils/unmarshal_json_response.py | 2 +- .../src/mistralai/azure/client}/utils/url.py | 0 .../mistralai/azure/client}/utils/values.py | 0 packages/{mistralai_azure => azure}/uv.lock | 2 +- packages/gcp/.genignore | 6 + .../{mistralai_gcp => gcp}/.gitattributes | 0 packages/{mistralai_gcp => gcp}/.gitignore | 0 .../.speakeasy/gen.lock | 659 ++++++++-------- .../.speakeasy/gen.yaml | 24 +- .../{mistralai_gcp => gcp}/CONTRIBUTING.md | 0 packages/{mistralai_gcp => gcp}/README.md | 185 ++--- packages/{mistralai_gcp => gcp}/RELEASES.md | 0 packages/gcp/USAGE.md | 61 ++ .../docs/models/arguments.md | 0 .../docs/models/assistantmessage.md | 4 +- .../docs/models/assistantmessagecontent.md | 0 .../docs/models/chatcompletionchoice.md | 0 .../chatcompletionchoicefinishreason.md | 0 .../docs/models/chatcompletionrequest.md | 2 +- .../models/chatcompletionrequestmessage.md} | 2 +- .../docs/models/chatcompletionrequeststop.md | 0 .../models/chatcompletionrequesttoolchoice.md | 0 .../docs/models/chatcompletionresponse.md | 0 .../models/chatcompletionstreamrequest.md | 4 +- .../chatcompletionstreamrequestmessage.md} | 2 +- .../chatcompletionstreamrequeststop.md} | 2 +- .../chatcompletionstreamrequesttoolchoice.md | 0 .../docs/models/completionchunk.md | 0 .../docs/models/completionevent.md | 0 .../models/completionresponsestreamchoice.md | 10 + ...letionresponsestreamchoicefinishreason.md} | 2 +- .../docs/models/contentchunk.md | 0 packages/gcp/docs/models/deltamessage.md | 10 + .../docs/models/deltamessagecontent.md} | 2 +- .../docs/models/fimcompletionrequest.md | 0 .../docs/models/fimcompletionrequeststop.md | 0 .../docs/models/fimcompletionresponse.md | 0 .../docs/models/fimcompletionstreamrequest.md | 0 .../models/fimcompletionstreamrequeststop.md | 0 .../docs/models/function.md | 0 .../docs/models/functioncall.md | 0 .../docs/models/functionname.md | 0 .../docs/models/httpvalidationerror.md | 0 packages/gcp/docs/models/imagedetail.md | 10 + packages/gcp/docs/models/imageurl.md | 9 + packages/gcp/docs/models/imageurlchunk.md | 11 + .../docs/models/imageurlunion.md} | 2 +- .../docs/models/jsonschema.md | 0 .../{mistralai_gcp => gcp}/docs/models/loc.md | 0 .../docs/models/mistralpromptmode.md | 0 .../docs/models/prediction.md | 0 packages/gcp/docs/models/referencechunk.md | 9 + .../docs/models/responseformat.md | 0 .../docs/models/responseformats.md | 0 .../docs/models/security.md | 0 .../docs/models/systemmessage.md | 4 +- .../docs/models/systemmessagecontent.md | 0 .../docs/models/systemmessagecontentchunks.md | 0 packages/gcp/docs/models/textchunk.md | 9 + .../docs/models/thinkchunk.md | 4 +- .../docs/models/thinking.md | 0 .../docs/models/tool.md | 0 .../docs/models/toolcall.md | 0 .../docs/models/toolchoice.md | 0 .../docs/models/toolchoiceenum.md | 0 .../docs/models/toolmessage.md | 4 +- .../docs/models/toolmessagecontent.md | 0 .../docs/models/tooltypes.md | 0 .../docs/models/usageinfo.md | 0 .../docs/models/usermessage.md | 4 +- .../docs/models/usermessagecontent.md | 0 .../docs/models/utils/retryconfig.md | 0 .../docs/models/validationerror.md | 0 .../docs/sdks/chat/README.md | 32 +- .../docs/sdks/fim/README.md | 28 +- .../docs/sdks/mistralgcp/README.md | 0 packages/{mistralai_gcp => gcp}/py.typed | 0 packages/{mistralai_gcp => gcp}/pylintrc | 0 .../{mistralai_gcp => gcp}/pyproject.toml | 19 +- .../scripts/prepare_readme.py | 2 +- .../{mistralai_gcp => gcp}/scripts/publish.sh | 0 .../src/mistralai/gcp/client}/__init__.py | 0 .../mistralai/gcp/client}/_hooks/__init__.py | 0 .../gcp/client/_hooks/registration.py | 67 ++ .../mistralai/gcp/client}/_hooks/sdkhooks.py | 2 +- .../src/mistralai/gcp/client}/_hooks/types.py | 4 +- .../src/mistralai/gcp/client}/_version.py | 4 +- .../src/mistralai/gcp/client}/basesdk.py | 10 +- .../src/mistralai/gcp/client}/chat.py | 54 +- .../src/mistralai/gcp/client}/fim.py | 10 +- .../src/mistralai/gcp/client}/httpclient.py | 0 .../mistralai/gcp/client}/models/__init__.py | 110 +-- .../gcp/client}/models/assistantmessage.py | 23 +- .../client}/models/chatcompletionchoice.py | 2 +- .../client}/models/chatcompletionrequest.py | 14 +- .../client}/models/chatcompletionresponse.py | 2 +- .../models/chatcompletionstreamrequest.py | 26 +- .../gcp/client}/models/completionchunk.py | 2 +- .../gcp/client}/models/completionevent.py | 2 +- .../models/completionresponsestreamchoice.py | 13 +- .../gcp/client}/models/contentchunk.py | 2 +- .../gcp/client}/models/deltamessage.py | 14 +- .../client}/models/fimcompletionrequest.py | 2 +- .../client}/models/fimcompletionresponse.py | 2 +- .../models/fimcompletionstreamrequest.py | 2 +- .../mistralai/gcp/client}/models/function.py | 2 +- .../gcp/client}/models/functioncall.py | 2 +- .../gcp/client}/models/functionname.py | 2 +- .../gcp/client}/models/httpvalidationerror.py | 6 +- .../gcp/client/models/imagedetail.py | 15 + .../mistralai/gcp/client}/models/imageurl.py | 7 +- .../gcp/client/models/imageurlchunk.py | 36 + .../gcp/client}/models/jsonschema.py | 2 +- .../gcp/client}/models/mistralgcperror.py | 2 +- .../gcp/client}/models/mistralpromptmode.py | 2 +- .../gcp/client}/models/no_response_error.py | 0 .../gcp/client}/models/prediction.py | 4 +- .../gcp/client/models/referencechunk.py | 25 + .../gcp/client}/models/responseformat.py | 2 +- .../gcp/client}/models/responseformats.py | 0 .../client}/models/responsevalidationerror.py | 4 +- .../mistralai/gcp/client}/models/sdkerror.py | 4 +- .../mistralai/gcp/client}/models/security.py | 4 +- .../gcp/client}/models/systemmessage.py | 19 +- .../models/systemmessagecontentchunks.py | 6 +- .../mistralai/gcp/client/models/textchunk.py | 23 + .../gcp/client}/models/thinkchunk.py | 19 +- .../src/mistralai/gcp/client}/models/tool.py | 2 +- .../mistralai/gcp/client}/models/toolcall.py | 2 +- .../gcp/client}/models/toolchoice.py | 2 +- .../gcp/client}/models/toolchoiceenum.py | 0 .../gcp/client}/models/toolmessage.py | 23 +- .../mistralai/gcp/client}/models/tooltypes.py | 2 +- .../mistralai/gcp/client}/models/usageinfo.py | 2 +- .../gcp/client}/models/usermessage.py | 21 +- .../gcp/client}/models/validationerror.py | 2 +- .../src/mistralai/gcp/client}/py.typed | 0 packages/gcp/src/mistralai/gcp/client/sdk.py | 243 ++++++ .../mistralai/gcp/client}/sdkconfiguration.py | 4 +- .../mistralai/gcp/client}/types/__init__.py | 0 .../mistralai/gcp/client}/types/basemodel.py | 0 .../mistralai/gcp/client}/utils/__init__.py | 0 .../gcp/client}/utils/annotations.py | 0 .../mistralai/gcp/client}/utils/datetimes.py | 0 .../src/mistralai/gcp/client}/utils/enums.py | 0 .../gcp/client}/utils/eventstreaming.py | 0 .../src/mistralai/gcp/client}/utils/forms.py | 0 .../mistralai/gcp/client}/utils/headers.py | 0 .../src/mistralai/gcp/client}/utils/logger.py | 0 .../mistralai/gcp/client}/utils/metadata.py | 0 .../gcp/client}/utils/queryparams.py | 0 .../gcp/client}/utils/requestbodies.py | 0 .../mistralai/gcp/client}/utils/retries.py | 0 .../mistralai/gcp/client}/utils/security.py | 0 .../gcp/client}/utils/serializers.py | 0 .../client}/utils/unmarshal_json_response.py | 2 +- .../src/mistralai/gcp/client}/utils/url.py | 0 .../src/mistralai/gcp/client}/utils/values.py | 0 packages/{mistralai_gcp => gcp}/uv.lock | 62 +- packages/mistralai_azure/.genignore | 5 - .../mistralai_azure/.vscode/settings.json | 6 - packages/mistralai_azure/USAGE.md | 55 -- .../docs/models/assistantmessagerole.md | 8 - .../models/completionresponsestreamchoice.md | 10 - .../docs/models/deltamessage.md | 10 - .../docs/models/documenturlchunk.md | 10 - .../docs/models/documenturlchunktype.md | 8 - .../mistralai_azure/docs/models/imageurl.md | 9 - .../docs/models/imageurlchunk.md | 11 - .../docs/models/imageurlchunktype.md | 8 - .../docs/models/referencechunk.md | 9 - .../docs/models/referencechunktype.md | 8 - packages/mistralai_azure/docs/models/role.md | 8 - .../mistralai_azure/docs/models/textchunk.md | 9 - .../docs/models/thinkchunktype.md | 8 - .../docs/models/toolmessagerole.md | 8 - packages/mistralai_azure/docs/models/type.md | 8 - .../docs/models/usermessagerole.md | 8 - .../_hooks/custom_user_agent.py | 22 - .../mistralai_azure/_hooks/registration.py | 15 - .../mistralai_azure/models/imageurlchunk.py | 33 - .../mistralai_azure/models/referencechunk.py | 20 - .../src/mistralai_azure/models/textchunk.py | 20 - packages/mistralai_gcp/.genignore | 5 - packages/mistralai_gcp/.vscode/settings.json | 6 - packages/mistralai_gcp/USAGE.md | 51 -- .../docs/models/assistantmessagerole.md | 8 - .../models/completionresponsestreamchoice.md | 10 - .../mistralai_gcp/docs/models/deltamessage.md | 10 - .../mistralai_gcp/docs/models/imageurl.md | 9 - .../docs/models/imageurlchunk.md | 11 - .../docs/models/imageurlchunktype.md | 8 - .../docs/models/referencechunk.md | 9 - .../docs/models/referencechunktype.md | 8 - packages/mistralai_gcp/docs/models/role.md | 8 - .../mistralai_gcp/docs/models/textchunk.md | 9 - .../docs/models/thinkchunktype.md | 8 - .../docs/models/toolmessagerole.md | 8 - packages/mistralai_gcp/docs/models/type.md | 8 - .../docs/models/usermessagerole.md | 8 - .../mistralai_gcp/_hooks/custom_user_agent.py | 22 - .../src/mistralai_gcp/models/imageurlchunk.py | 33 - .../mistralai_gcp/models/referencechunk.py | 20 - .../src/mistralai_gcp/models/textchunk.py | 20 - .../mistralai_gcp/src/mistralai_gcp/sdk.py | 233 ------ pylintrc | 2 +- pyproject.toml | 30 +- scripts/lint_custom_code.sh | 42 +- scripts/run_examples.sh | 29 +- tasks.py | 4 +- tests/test_azure_integration.py | 433 +++++++++++ tests/test_azure_v2_parity.py | 269 +++++++ tests/test_gcp_integration.py | 512 +++++++++++++ tests/test_gcp_v2_parity.py | 330 ++++++++ uv.lock | 4 +- 390 files changed, 4238 insertions(+), 2521 deletions(-) create mode 100644 examples/azure/.env.example delete mode 100644 examples/azure/az_chat_no_streaming.py create mode 100644 examples/gcp/.env.example delete mode 100755 examples/gcp/gcp_async_chat_no_streaming.py create mode 100644 packages/azure/.genignore rename packages/{mistralai_azure => azure}/.gitattributes (100%) rename packages/{mistralai_azure => azure}/.gitignore (100%) rename packages/{mistralai_azure => azure}/.speakeasy/gen.lock (69%) rename packages/{mistralai_azure => azure}/.speakeasy/gen.yaml (78%) rename packages/{mistralai_azure => azure}/CONTRIBUTING.md (100%) rename packages/{mistralai_azure => azure}/README.md (57%) rename packages/{mistralai_azure => azure}/RELEASES.md (100%) create mode 100644 packages/azure/USAGE.md rename packages/{mistralai_azure => azure}/docs/models/arguments.md (100%) rename packages/{mistralai_azure => azure}/docs/models/assistantmessage.md (95%) rename packages/{mistralai_azure => azure}/docs/models/assistantmessagecontent.md (100%) rename packages/{mistralai_azure => azure}/docs/models/chatcompletionchoice.md (100%) rename packages/{mistralai_azure => azure}/docs/models/chatcompletionchoicefinishreason.md (100%) rename packages/{mistralai_azure => azure}/docs/models/chatcompletionrequest.md (99%) rename packages/{mistralai_azure/docs/models/messages.md => azure/docs/models/chatcompletionrequestmessage.md} (92%) rename packages/{mistralai_azure => azure}/docs/models/chatcompletionrequeststop.md (100%) rename packages/{mistralai_azure => azure}/docs/models/chatcompletionrequesttoolchoice.md (100%) rename packages/{mistralai_azure => azure}/docs/models/chatcompletionresponse.md (100%) rename packages/{mistralai_azure => azure}/docs/models/chatcompletionstreamrequest.md (99%) rename packages/{mistralai_gcp/docs/models/chatcompletionrequestmessages.md => azure/docs/models/chatcompletionstreamrequestmessage.md} (91%) rename packages/{mistralai_gcp/docs/models/stop.md => azure/docs/models/chatcompletionstreamrequeststop.md} (88%) rename packages/{mistralai_azure => azure}/docs/models/chatcompletionstreamrequesttoolchoice.md (100%) rename packages/{mistralai_azure => azure}/docs/models/completionchunk.md (100%) rename packages/{mistralai_azure => azure}/docs/models/completionevent.md (100%) create mode 100644 packages/azure/docs/models/completionresponsestreamchoice.md rename packages/{mistralai_gcp/docs/models/finishreason.md => azure/docs/models/completionresponsestreamchoicefinishreason.md} (81%) rename packages/{mistralai_azure => azure}/docs/models/contentchunk.md (100%) create mode 100644 packages/azure/docs/models/deltamessage.md rename packages/{mistralai_gcp/docs/models/content.md => azure/docs/models/deltamessagecontent.md} (89%) rename packages/{mistralai_azure => azure}/docs/models/document.md (100%) create mode 100644 packages/azure/docs/models/documenturlchunk.md rename packages/{mistralai_azure => azure}/docs/models/filechunk.md (100%) rename packages/{mistralai_azure => azure}/docs/models/format_.md (100%) rename packages/{mistralai_azure => azure}/docs/models/function.md (100%) rename packages/{mistralai_azure => azure}/docs/models/functioncall.md (100%) rename packages/{mistralai_azure => azure}/docs/models/functionname.md (100%) rename packages/{mistralai_azure => azure}/docs/models/httpvalidationerror.md (100%) create mode 100644 packages/azure/docs/models/imagedetail.md create mode 100644 packages/azure/docs/models/imageurl.md create mode 100644 packages/azure/docs/models/imageurlchunk.md rename packages/{mistralai_gcp/docs/models/imageurlchunkimageurl.md => azure/docs/models/imageurlunion.md} (86%) rename packages/{mistralai_azure => azure}/docs/models/jsonschema.md (100%) rename packages/{mistralai_azure => azure}/docs/models/loc.md (100%) rename packages/{mistralai_azure => azure}/docs/models/mistralpromptmode.md (100%) rename packages/{mistralai_azure => azure}/docs/models/ocrimageobject.md (100%) rename packages/{mistralai_azure => azure}/docs/models/ocrpagedimensions.md (100%) rename packages/{mistralai_azure => azure}/docs/models/ocrpageobject.md (100%) rename packages/{mistralai_azure => azure}/docs/models/ocrrequest.md (100%) rename packages/{mistralai_azure => azure}/docs/models/ocrresponse.md (100%) rename packages/{mistralai_azure => azure}/docs/models/ocrtableobject.md (100%) rename packages/{mistralai_azure => azure}/docs/models/ocrusageinfo.md (100%) rename packages/{mistralai_azure => azure}/docs/models/prediction.md (100%) create mode 100644 packages/azure/docs/models/referencechunk.md rename packages/{mistralai_azure => azure}/docs/models/responseformat.md (100%) rename packages/{mistralai_azure => azure}/docs/models/responseformats.md (100%) rename packages/{mistralai_azure => azure}/docs/models/security.md (100%) rename packages/{mistralai_gcp => azure}/docs/models/systemmessage.md (88%) rename packages/{mistralai_azure => azure}/docs/models/systemmessagecontent.md (100%) rename packages/{mistralai_azure => azure}/docs/models/systemmessagecontentchunks.md (100%) rename packages/{mistralai_azure => azure}/docs/models/tableformat.md (100%) create mode 100644 packages/azure/docs/models/textchunk.md rename packages/{mistralai_azure => azure}/docs/models/thinkchunk.md (91%) rename packages/{mistralai_azure => azure}/docs/models/thinking.md (100%) rename packages/{mistralai_azure => azure}/docs/models/tool.md (100%) rename packages/{mistralai_azure => azure}/docs/models/toolcall.md (100%) rename packages/{mistralai_azure => azure}/docs/models/toolchoice.md (100%) rename packages/{mistralai_azure => azure}/docs/models/toolchoiceenum.md (100%) rename packages/{mistralai_gcp => azure}/docs/models/toolmessage.md (92%) rename packages/{mistralai_azure => azure}/docs/models/toolmessagecontent.md (100%) rename packages/{mistralai_azure => azure}/docs/models/tooltypes.md (100%) rename packages/{mistralai_azure => azure}/docs/models/usageinfo.md (100%) rename packages/{mistralai_gcp => azure}/docs/models/usermessage.md (89%) rename packages/{mistralai_azure => azure}/docs/models/usermessagecontent.md (100%) rename packages/{mistralai_azure => azure}/docs/models/utils/retryconfig.md (100%) rename packages/{mistralai_azure => azure}/docs/models/validationerror.md (100%) rename packages/{mistralai_azure => azure}/docs/sdks/chat/README.md (95%) rename packages/{mistralai_azure => azure}/docs/sdks/mistralazure/README.md (100%) rename packages/{mistralai_azure => azure}/py.typed (100%) rename packages/{mistralai_azure => azure}/pylintrc (100%) rename packages/{mistralai_azure => azure}/pyproject.toml (79%) rename packages/{mistralai_gcp => azure}/scripts/prepare_readme.py (96%) rename packages/{mistralai_azure => azure}/scripts/publish.sh (100%) rename packages/{mistralai_azure/src/mistralai_azure => azure/src/mistralai/azure/client}/__init__.py (100%) rename packages/{mistralai_azure/src/mistralai_azure => azure/src/mistralai/azure/client}/_hooks/__init__.py (100%) rename packages/{mistralai_gcp/src/mistralai_gcp => azure/src/mistralai/azure/client}/_hooks/registration.py (70%) rename packages/{mistralai_azure/src/mistralai_azure => azure/src/mistralai/azure/client}/_hooks/sdkhooks.py (97%) rename packages/{mistralai_gcp/src/mistralai_gcp => azure/src/mistralai/azure/client}/_hooks/types.py (95%) rename packages/{mistralai_azure/src/mistralai_azure => azure/src/mistralai/azure/client}/_version.py (69%) rename packages/{mistralai_azure/src/mistralai_azure => azure/src/mistralai/azure/client}/basesdk.py (98%) rename packages/{mistralai_azure/src/mistralai_azure => azure/src/mistralai/azure/client}/chat.py (96%) rename packages/{mistralai_azure/src/mistralai_azure => azure/src/mistralai/azure/client}/httpclient.py (100%) rename packages/{mistralai_azure/src/mistralai_azure => azure/src/mistralai/azure/client}/models/__init__.py (85%) rename packages/{mistralai_gcp/src/mistralai_gcp => azure/src/mistralai/azure/client}/models/assistantmessage.py (80%) rename packages/{mistralai_azure/src/mistralai_azure => azure/src/mistralai/azure/client}/models/chatcompletionchoice.py (91%) rename packages/{mistralai_azure/src/mistralai_azure => azure/src/mistralai/azure/client}/models/chatcompletionrequest.py (97%) rename packages/{mistralai_azure/src/mistralai_azure => azure/src/mistralai/azure/client}/models/chatcompletionresponse.py (92%) rename packages/{mistralai_azure/src/mistralai_azure => azure/src/mistralai/azure/client}/models/chatcompletionstreamrequest.py (94%) rename packages/{mistralai_gcp/src/mistralai_gcp => azure/src/mistralai/azure/client}/models/completionchunk.py (94%) rename packages/{mistralai_azure/src/mistralai_azure => azure/src/mistralai/azure/client}/models/completionevent.py (87%) rename packages/{mistralai_gcp/src/mistralai_gcp => azure/src/mistralai/azure/client}/models/completionresponsestreamchoice.py (82%) rename packages/{mistralai_azure/src/mistralai_azure => azure/src/mistralai/azure/client}/models/contentchunk.py (93%) rename packages/{mistralai_gcp/src/mistralai_gcp => azure/src/mistralai/azure/client}/models/deltamessage.py (81%) rename packages/{mistralai_azure/src/mistralai_azure => azure/src/mistralai/azure/client}/models/documenturlchunk.py (72%) rename packages/{mistralai_azure/src/mistralai_azure => azure/src/mistralai/azure/client}/models/filechunk.py (83%) rename packages/{mistralai_gcp/src/mistralai_gcp => azure/src/mistralai/azure/client}/models/function.py (90%) rename packages/{mistralai_azure/src/mistralai_azure => azure/src/mistralai/azure/client}/models/functioncall.py (91%) rename packages/{mistralai_azure/src/mistralai_azure => azure/src/mistralai/azure/client}/models/functionname.py (89%) rename packages/{mistralai_azure/src/mistralai_azure => azure/src/mistralai/azure/client}/models/httpvalidationerror.py (87%) create mode 100644 packages/azure/src/mistralai/azure/client/models/imagedetail.py rename packages/{mistralai_gcp/src/mistralai_gcp => azure/src/mistralai/azure/client}/models/imageurl.py (87%) create mode 100644 packages/azure/src/mistralai/azure/client/models/imageurlchunk.py rename packages/{mistralai_azure/src/mistralai_azure => azure/src/mistralai/azure/client}/models/jsonschema.py (97%) rename packages/{mistralai_azure/src/mistralai_azure => azure/src/mistralai/azure/client}/models/mistralazureerror.py (100%) rename packages/{mistralai_azure/src/mistralai_azure => azure/src/mistralai/azure/client}/models/mistralpromptmode.py (89%) rename packages/{mistralai_azure/src/mistralai_azure => azure/src/mistralai/azure/client}/models/no_response_error.py (100%) rename packages/{mistralai_azure/src/mistralai_azure => azure/src/mistralai/azure/client}/models/ocrimageobject.py (98%) rename packages/{mistralai_azure/src/mistralai_azure => azure/src/mistralai/azure/client}/models/ocrpagedimensions.py (91%) rename packages/{mistralai_azure/src/mistralai_azure => azure/src/mistralai/azure/client}/models/ocrpageobject.py (98%) rename packages/{mistralai_azure/src/mistralai_azure => azure/src/mistralai/azure/client}/models/ocrrequest.py (99%) rename packages/{mistralai_azure/src/mistralai_azure => azure/src/mistralai/azure/client}/models/ocrresponse.py (97%) rename packages/{mistralai_azure/src/mistralai_azure => azure/src/mistralai/azure/client}/models/ocrtableobject.py (78%) rename packages/{mistralai_azure/src/mistralai_azure => azure/src/mistralai/azure/client}/models/ocrusageinfo.py (97%) rename packages/{mistralai_azure/src/mistralai_azure => azure/src/mistralai/azure/client}/models/prediction.py (89%) create mode 100644 packages/azure/src/mistralai/azure/client/models/referencechunk.py rename packages/{mistralai_azure/src/mistralai_azure => azure/src/mistralai/azure/client}/models/responseformat.py (98%) rename packages/{mistralai_azure/src/mistralai_azure => azure/src/mistralai/azure/client}/models/responseformats.py (100%) rename packages/{mistralai_azure/src/mistralai_azure => azure/src/mistralai/azure/client}/models/responsevalidationerror.py (92%) rename packages/{mistralai_azure/src/mistralai_azure => azure/src/mistralai/azure/client}/models/sdkerror.py (95%) rename packages/{mistralai_gcp/src/mistralai_gcp => azure/src/mistralai/azure/client}/models/security.py (80%) rename packages/{mistralai_gcp/src/mistralai_gcp => azure/src/mistralai/azure/client}/models/systemmessage.py (57%) rename packages/{mistralai_gcp/src/mistralai_gcp => azure/src/mistralai/azure/client}/models/systemmessagecontentchunks.py (66%) create mode 100644 packages/azure/src/mistralai/azure/client/models/textchunk.py rename packages/{mistralai_gcp/src/mistralai_gcp => azure/src/mistralai/azure/client}/models/thinkchunk.py (65%) rename packages/{mistralai_azure/src/mistralai_azure => azure/src/mistralai/azure/client}/models/tool.py (89%) rename packages/{mistralai_gcp/src/mistralai_gcp => azure/src/mistralai/azure/client}/models/toolcall.py (92%) rename packages/{mistralai_azure/src/mistralai_azure => azure/src/mistralai/azure/client}/models/toolchoice.py (93%) rename packages/{mistralai_azure/src/mistralai_azure => azure/src/mistralai/azure/client}/models/toolchoiceenum.py (100%) rename packages/{mistralai_azure/src/mistralai_azure => azure/src/mistralai/azure/client}/models/toolmessage.py (77%) rename packages/{mistralai_gcp/src/mistralai_gcp => azure/src/mistralai/azure/client}/models/tooltypes.py (77%) rename packages/{mistralai_azure/src/mistralai_azure => azure/src/mistralai/azure/client}/models/usageinfo.py (98%) rename packages/{mistralai_azure/src/mistralai_azure => azure/src/mistralai/azure/client}/models/usermessage.py (73%) rename packages/{mistralai_azure/src/mistralai_azure => azure/src/mistralai/azure/client}/models/validationerror.py (90%) rename packages/{mistralai_azure/src/mistralai_azure => azure/src/mistralai/azure/client}/ocr.py (97%) rename packages/{mistralai_azure/src/mistralai_azure => azure/src/mistralai/azure/client}/py.typed (100%) rename packages/{mistralai_azure/src/mistralai_azure => azure/src/mistralai/azure/client}/sdk.py (59%) rename packages/{mistralai_gcp/src/mistralai_gcp => azure/src/mistralai/azure/client}/sdkconfiguration.py (93%) rename packages/{mistralai_azure/src/mistralai_azure => azure/src/mistralai/azure/client}/types/__init__.py (100%) rename packages/{mistralai_azure/src/mistralai_azure => azure/src/mistralai/azure/client}/types/basemodel.py (100%) rename packages/{mistralai_azure/src/mistralai_azure => azure/src/mistralai/azure/client}/utils/__init__.py (100%) rename packages/{mistralai_azure/src/mistralai_azure => azure/src/mistralai/azure/client}/utils/annotations.py (100%) rename packages/{mistralai_azure/src/mistralai_azure => azure/src/mistralai/azure/client}/utils/datetimes.py (100%) rename packages/{mistralai_azure/src/mistralai_azure => azure/src/mistralai/azure/client}/utils/enums.py (100%) rename packages/{mistralai_azure/src/mistralai_azure => azure/src/mistralai/azure/client}/utils/eventstreaming.py (100%) rename packages/{mistralai_azure/src/mistralai_azure => azure/src/mistralai/azure/client}/utils/forms.py (100%) rename packages/{mistralai_azure/src/mistralai_azure => azure/src/mistralai/azure/client}/utils/headers.py (100%) rename packages/{mistralai_azure/src/mistralai_azure => azure/src/mistralai/azure/client}/utils/logger.py (100%) rename packages/{mistralai_azure/src/mistralai_azure => azure/src/mistralai/azure/client}/utils/metadata.py (100%) rename packages/{mistralai_azure/src/mistralai_azure => azure/src/mistralai/azure/client}/utils/queryparams.py (100%) rename packages/{mistralai_azure/src/mistralai_azure => azure/src/mistralai/azure/client}/utils/requestbodies.py (100%) rename packages/{mistralai_azure/src/mistralai_azure => azure/src/mistralai/azure/client}/utils/retries.py (100%) rename packages/{mistralai_azure/src/mistralai_azure => azure/src/mistralai/azure/client}/utils/security.py (100%) rename packages/{mistralai_azure/src/mistralai_azure => azure/src/mistralai/azure/client}/utils/serializers.py (100%) rename packages/{mistralai_azure/src/mistralai_azure => azure/src/mistralai/azure/client}/utils/unmarshal_json_response.py (95%) rename packages/{mistralai_azure/src/mistralai_azure => azure/src/mistralai/azure/client}/utils/url.py (100%) rename packages/{mistralai_azure/src/mistralai_azure => azure/src/mistralai/azure/client}/utils/values.py (100%) rename packages/{mistralai_azure => azure}/uv.lock (99%) create mode 100644 packages/gcp/.genignore rename packages/{mistralai_gcp => gcp}/.gitattributes (100%) rename packages/{mistralai_gcp => gcp}/.gitignore (100%) rename packages/{mistralai_gcp => gcp}/.speakeasy/gen.lock (55%) rename packages/{mistralai_gcp => gcp}/.speakeasy/gen.yaml (78%) rename packages/{mistralai_gcp => gcp}/CONTRIBUTING.md (100%) rename packages/{mistralai_gcp => gcp}/README.md (70%) rename packages/{mistralai_gcp => gcp}/RELEASES.md (100%) create mode 100644 packages/gcp/USAGE.md rename packages/{mistralai_gcp => gcp}/docs/models/arguments.md (100%) rename packages/{mistralai_gcp => gcp}/docs/models/assistantmessage.md (95%) rename packages/{mistralai_gcp => gcp}/docs/models/assistantmessagecontent.md (100%) rename packages/{mistralai_gcp => gcp}/docs/models/chatcompletionchoice.md (100%) rename packages/{mistralai_gcp => gcp}/docs/models/chatcompletionchoicefinishreason.md (100%) rename packages/{mistralai_gcp => gcp}/docs/models/chatcompletionrequest.md (99%) rename packages/{mistralai_gcp/docs/models/messages.md => gcp/docs/models/chatcompletionrequestmessage.md} (92%) rename packages/{mistralai_gcp => gcp}/docs/models/chatcompletionrequeststop.md (100%) rename packages/{mistralai_gcp => gcp}/docs/models/chatcompletionrequesttoolchoice.md (100%) rename packages/{mistralai_gcp => gcp}/docs/models/chatcompletionresponse.md (100%) rename packages/{mistralai_gcp => gcp}/docs/models/chatcompletionstreamrequest.md (99%) rename packages/{mistralai_azure/docs/models/chatcompletionrequestmessages.md => gcp/docs/models/chatcompletionstreamrequestmessage.md} (91%) rename packages/{mistralai_azure/docs/models/stop.md => gcp/docs/models/chatcompletionstreamrequeststop.md} (88%) rename packages/{mistralai_gcp => gcp}/docs/models/chatcompletionstreamrequesttoolchoice.md (100%) rename packages/{mistralai_gcp => gcp}/docs/models/completionchunk.md (100%) rename packages/{mistralai_gcp => gcp}/docs/models/completionevent.md (100%) create mode 100644 packages/gcp/docs/models/completionresponsestreamchoice.md rename packages/{mistralai_azure/docs/models/finishreason.md => gcp/docs/models/completionresponsestreamchoicefinishreason.md} (81%) rename packages/{mistralai_gcp => gcp}/docs/models/contentchunk.md (100%) create mode 100644 packages/gcp/docs/models/deltamessage.md rename packages/{mistralai_azure/docs/models/content.md => gcp/docs/models/deltamessagecontent.md} (89%) rename packages/{mistralai_gcp => gcp}/docs/models/fimcompletionrequest.md (100%) rename packages/{mistralai_gcp => gcp}/docs/models/fimcompletionrequeststop.md (100%) rename packages/{mistralai_gcp => gcp}/docs/models/fimcompletionresponse.md (100%) rename packages/{mistralai_gcp => gcp}/docs/models/fimcompletionstreamrequest.md (100%) rename packages/{mistralai_gcp => gcp}/docs/models/fimcompletionstreamrequeststop.md (100%) rename packages/{mistralai_gcp => gcp}/docs/models/function.md (100%) rename packages/{mistralai_gcp => gcp}/docs/models/functioncall.md (100%) rename packages/{mistralai_gcp => gcp}/docs/models/functionname.md (100%) rename packages/{mistralai_gcp => gcp}/docs/models/httpvalidationerror.md (100%) create mode 100644 packages/gcp/docs/models/imagedetail.md create mode 100644 packages/gcp/docs/models/imageurl.md create mode 100644 packages/gcp/docs/models/imageurlchunk.md rename packages/{mistralai_azure/docs/models/imageurlchunkimageurl.md => gcp/docs/models/imageurlunion.md} (86%) rename packages/{mistralai_gcp => gcp}/docs/models/jsonschema.md (100%) rename packages/{mistralai_gcp => gcp}/docs/models/loc.md (100%) rename packages/{mistralai_gcp => gcp}/docs/models/mistralpromptmode.md (100%) rename packages/{mistralai_gcp => gcp}/docs/models/prediction.md (100%) create mode 100644 packages/gcp/docs/models/referencechunk.md rename packages/{mistralai_gcp => gcp}/docs/models/responseformat.md (100%) rename packages/{mistralai_gcp => gcp}/docs/models/responseformats.md (100%) rename packages/{mistralai_gcp => gcp}/docs/models/security.md (100%) rename packages/{mistralai_azure => gcp}/docs/models/systemmessage.md (88%) rename packages/{mistralai_gcp => gcp}/docs/models/systemmessagecontent.md (100%) rename packages/{mistralai_gcp => gcp}/docs/models/systemmessagecontentchunks.md (100%) create mode 100644 packages/gcp/docs/models/textchunk.md rename packages/{mistralai_gcp => gcp}/docs/models/thinkchunk.md (91%) rename packages/{mistralai_gcp => gcp}/docs/models/thinking.md (100%) rename packages/{mistralai_gcp => gcp}/docs/models/tool.md (100%) rename packages/{mistralai_gcp => gcp}/docs/models/toolcall.md (100%) rename packages/{mistralai_gcp => gcp}/docs/models/toolchoice.md (100%) rename packages/{mistralai_gcp => gcp}/docs/models/toolchoiceenum.md (100%) rename packages/{mistralai_azure => gcp}/docs/models/toolmessage.md (92%) rename packages/{mistralai_gcp => gcp}/docs/models/toolmessagecontent.md (100%) rename packages/{mistralai_gcp => gcp}/docs/models/tooltypes.md (100%) rename packages/{mistralai_gcp => gcp}/docs/models/usageinfo.md (100%) rename packages/{mistralai_azure => gcp}/docs/models/usermessage.md (89%) rename packages/{mistralai_gcp => gcp}/docs/models/usermessagecontent.md (100%) rename packages/{mistralai_gcp => gcp}/docs/models/utils/retryconfig.md (100%) rename packages/{mistralai_gcp => gcp}/docs/models/validationerror.md (100%) rename packages/{mistralai_gcp => gcp}/docs/sdks/chat/README.md (96%) rename packages/{mistralai_gcp => gcp}/docs/sdks/fim/README.md (94%) rename packages/{mistralai_gcp => gcp}/docs/sdks/mistralgcp/README.md (100%) rename packages/{mistralai_gcp => gcp}/py.typed (100%) rename packages/{mistralai_gcp => gcp}/pylintrc (100%) rename packages/{mistralai_gcp => gcp}/pyproject.toml (78%) rename packages/{mistralai_azure => gcp}/scripts/prepare_readme.py (96%) rename packages/{mistralai_gcp => gcp}/scripts/publish.sh (100%) rename packages/{mistralai_gcp/src/mistralai_gcp => gcp/src/mistralai/gcp/client}/__init__.py (100%) rename packages/{mistralai_gcp/src/mistralai_gcp => gcp/src/mistralai/gcp/client}/_hooks/__init__.py (100%) create mode 100644 packages/gcp/src/mistralai/gcp/client/_hooks/registration.py rename packages/{mistralai_gcp/src/mistralai_gcp => gcp/src/mistralai/gcp/client}/_hooks/sdkhooks.py (97%) rename packages/{mistralai_azure/src/mistralai_azure => gcp/src/mistralai/gcp/client}/_hooks/types.py (96%) rename packages/{mistralai_gcp/src/mistralai_gcp => gcp/src/mistralai/gcp/client}/_version.py (76%) rename packages/{mistralai_gcp/src/mistralai_gcp => gcp/src/mistralai/gcp/client}/basesdk.py (98%) rename packages/{mistralai_gcp/src/mistralai_gcp => gcp/src/mistralai/gcp/client}/chat.py (96%) rename packages/{mistralai_gcp/src/mistralai_gcp => gcp/src/mistralai/gcp/client}/fim.py (98%) rename packages/{mistralai_gcp/src/mistralai_gcp => gcp/src/mistralai/gcp/client}/httpclient.py (100%) rename packages/{mistralai_gcp/src/mistralai_gcp => gcp/src/mistralai/gcp/client}/models/__init__.py (85%) rename packages/{mistralai_azure/src/mistralai_azure => gcp/src/mistralai/gcp/client}/models/assistantmessage.py (81%) rename packages/{mistralai_gcp/src/mistralai_gcp => gcp/src/mistralai/gcp/client}/models/chatcompletionchoice.py (91%) rename packages/{mistralai_gcp/src/mistralai_gcp => gcp/src/mistralai/gcp/client}/models/chatcompletionrequest.py (97%) rename packages/{mistralai_gcp/src/mistralai_gcp => gcp/src/mistralai/gcp/client}/models/chatcompletionresponse.py (93%) rename packages/{mistralai_gcp/src/mistralai_gcp => gcp/src/mistralai/gcp/client}/models/chatcompletionstreamrequest.py (94%) rename packages/{mistralai_azure/src/mistralai_azure => gcp/src/mistralai/gcp/client}/models/completionchunk.py (94%) rename packages/{mistralai_gcp/src/mistralai_gcp => gcp/src/mistralai/gcp/client}/models/completionevent.py (88%) rename packages/{mistralai_azure/src/mistralai_azure => gcp/src/mistralai/gcp/client}/models/completionresponsestreamchoice.py (82%) rename packages/{mistralai_gcp/src/mistralai_gcp => gcp/src/mistralai/gcp/client}/models/contentchunk.py (93%) rename packages/{mistralai_azure/src/mistralai_azure => gcp/src/mistralai/gcp/client}/models/deltamessage.py (81%) rename packages/{mistralai_gcp/src/mistralai_gcp => gcp/src/mistralai/gcp/client}/models/fimcompletionrequest.py (99%) rename packages/{mistralai_gcp/src/mistralai_gcp => gcp/src/mistralai/gcp/client}/models/fimcompletionresponse.py (93%) rename packages/{mistralai_gcp/src/mistralai_gcp => gcp/src/mistralai/gcp/client}/models/fimcompletionstreamrequest.py (99%) rename packages/{mistralai_azure/src/mistralai_azure => gcp/src/mistralai/gcp/client}/models/function.py (91%) rename packages/{mistralai_gcp/src/mistralai_gcp => gcp/src/mistralai/gcp/client}/models/functioncall.py (91%) rename packages/{mistralai_gcp/src/mistralai_gcp => gcp/src/mistralai/gcp/client}/models/functionname.py (89%) rename packages/{mistralai_gcp/src/mistralai_gcp => gcp/src/mistralai/gcp/client}/models/httpvalidationerror.py (82%) create mode 100644 packages/gcp/src/mistralai/gcp/client/models/imagedetail.py rename packages/{mistralai_azure/src/mistralai_azure => gcp/src/mistralai/gcp/client}/models/imageurl.py (88%) create mode 100644 packages/gcp/src/mistralai/gcp/client/models/imageurlchunk.py rename packages/{mistralai_gcp/src/mistralai_gcp => gcp/src/mistralai/gcp/client}/models/jsonschema.py (97%) rename packages/{mistralai_gcp/src/mistralai_gcp => gcp/src/mistralai/gcp/client}/models/mistralgcperror.py (96%) rename packages/{mistralai_gcp/src/mistralai_gcp => gcp/src/mistralai/gcp/client}/models/mistralpromptmode.py (89%) rename packages/{mistralai_gcp/src/mistralai_gcp => gcp/src/mistralai/gcp/client}/models/no_response_error.py (100%) rename packages/{mistralai_gcp/src/mistralai_gcp => gcp/src/mistralai/gcp/client}/models/prediction.py (89%) create mode 100644 packages/gcp/src/mistralai/gcp/client/models/referencechunk.py rename packages/{mistralai_gcp/src/mistralai_gcp => gcp/src/mistralai/gcp/client}/models/responseformat.py (98%) rename packages/{mistralai_gcp/src/mistralai_gcp => gcp/src/mistralai/gcp/client}/models/responseformats.py (100%) rename packages/{mistralai_gcp/src/mistralai_gcp => gcp/src/mistralai/gcp/client}/models/responsevalidationerror.py (86%) rename packages/{mistralai_gcp/src/mistralai_gcp => gcp/src/mistralai/gcp/client}/models/sdkerror.py (93%) rename packages/{mistralai_azure/src/mistralai_azure => gcp/src/mistralai/gcp/client}/models/security.py (81%) rename packages/{mistralai_azure/src/mistralai_azure => gcp/src/mistralai/gcp/client}/models/systemmessage.py (57%) rename packages/{mistralai_azure/src/mistralai_azure => gcp/src/mistralai/gcp/client}/models/systemmessagecontentchunks.py (66%) create mode 100644 packages/gcp/src/mistralai/gcp/client/models/textchunk.py rename packages/{mistralai_azure/src/mistralai_azure => gcp/src/mistralai/gcp/client}/models/thinkchunk.py (65%) rename packages/{mistralai_gcp/src/mistralai_gcp => gcp/src/mistralai/gcp/client}/models/tool.py (90%) rename packages/{mistralai_azure/src/mistralai_azure => gcp/src/mistralai/gcp/client}/models/toolcall.py (92%) rename packages/{mistralai_gcp/src/mistralai_gcp => gcp/src/mistralai/gcp/client}/models/toolchoice.py (94%) rename packages/{mistralai_gcp/src/mistralai_gcp => gcp/src/mistralai/gcp/client}/models/toolchoiceenum.py (100%) rename packages/{mistralai_gcp/src/mistralai_gcp => gcp/src/mistralai/gcp/client}/models/toolmessage.py (77%) rename packages/{mistralai_azure/src/mistralai_azure => gcp/src/mistralai/gcp/client}/models/tooltypes.py (78%) rename packages/{mistralai_gcp/src/mistralai_gcp => gcp/src/mistralai/gcp/client}/models/usageinfo.py (98%) rename packages/{mistralai_gcp/src/mistralai_gcp => gcp/src/mistralai/gcp/client}/models/usermessage.py (73%) rename packages/{mistralai_gcp/src/mistralai_gcp => gcp/src/mistralai/gcp/client}/models/validationerror.py (90%) rename packages/{mistralai_gcp/src/mistralai_gcp => gcp/src/mistralai/gcp/client}/py.typed (100%) create mode 100644 packages/gcp/src/mistralai/gcp/client/sdk.py rename packages/{mistralai_azure/src/mistralai_azure => gcp/src/mistralai/gcp/client}/sdkconfiguration.py (94%) rename packages/{mistralai_gcp/src/mistralai_gcp => gcp/src/mistralai/gcp/client}/types/__init__.py (100%) rename packages/{mistralai_gcp/src/mistralai_gcp => gcp/src/mistralai/gcp/client}/types/basemodel.py (100%) rename packages/{mistralai_gcp/src/mistralai_gcp => gcp/src/mistralai/gcp/client}/utils/__init__.py (100%) rename packages/{mistralai_gcp/src/mistralai_gcp => gcp/src/mistralai/gcp/client}/utils/annotations.py (100%) rename packages/{mistralai_gcp/src/mistralai_gcp => gcp/src/mistralai/gcp/client}/utils/datetimes.py (100%) rename packages/{mistralai_gcp/src/mistralai_gcp => gcp/src/mistralai/gcp/client}/utils/enums.py (100%) rename packages/{mistralai_gcp/src/mistralai_gcp => gcp/src/mistralai/gcp/client}/utils/eventstreaming.py (100%) rename packages/{mistralai_gcp/src/mistralai_gcp => gcp/src/mistralai/gcp/client}/utils/forms.py (100%) rename packages/{mistralai_gcp/src/mistralai_gcp => gcp/src/mistralai/gcp/client}/utils/headers.py (100%) rename packages/{mistralai_gcp/src/mistralai_gcp => gcp/src/mistralai/gcp/client}/utils/logger.py (100%) rename packages/{mistralai_gcp/src/mistralai_gcp => gcp/src/mistralai/gcp/client}/utils/metadata.py (100%) rename packages/{mistralai_gcp/src/mistralai_gcp => gcp/src/mistralai/gcp/client}/utils/queryparams.py (100%) rename packages/{mistralai_gcp/src/mistralai_gcp => gcp/src/mistralai/gcp/client}/utils/requestbodies.py (100%) rename packages/{mistralai_gcp/src/mistralai_gcp => gcp/src/mistralai/gcp/client}/utils/retries.py (100%) rename packages/{mistralai_gcp/src/mistralai_gcp => gcp/src/mistralai/gcp/client}/utils/security.py (100%) rename packages/{mistralai_gcp/src/mistralai_gcp => gcp/src/mistralai/gcp/client}/utils/serializers.py (100%) rename packages/{mistralai_gcp/src/mistralai_gcp => gcp/src/mistralai/gcp/client}/utils/unmarshal_json_response.py (95%) rename packages/{mistralai_gcp/src/mistralai_gcp => gcp/src/mistralai/gcp/client}/utils/url.py (100%) rename packages/{mistralai_gcp/src/mistralai_gcp => gcp/src/mistralai/gcp/client}/utils/values.py (100%) rename packages/{mistralai_gcp => gcp}/uv.lock (92%) delete mode 100644 packages/mistralai_azure/.genignore delete mode 100644 packages/mistralai_azure/.vscode/settings.json delete mode 100644 packages/mistralai_azure/USAGE.md delete mode 100644 packages/mistralai_azure/docs/models/assistantmessagerole.md delete mode 100644 packages/mistralai_azure/docs/models/completionresponsestreamchoice.md delete mode 100644 packages/mistralai_azure/docs/models/deltamessage.md delete mode 100644 packages/mistralai_azure/docs/models/documenturlchunk.md delete mode 100644 packages/mistralai_azure/docs/models/documenturlchunktype.md delete mode 100644 packages/mistralai_azure/docs/models/imageurl.md delete mode 100644 packages/mistralai_azure/docs/models/imageurlchunk.md delete mode 100644 packages/mistralai_azure/docs/models/imageurlchunktype.md delete mode 100644 packages/mistralai_azure/docs/models/referencechunk.md delete mode 100644 packages/mistralai_azure/docs/models/referencechunktype.md delete mode 100644 packages/mistralai_azure/docs/models/role.md delete mode 100644 packages/mistralai_azure/docs/models/textchunk.md delete mode 100644 packages/mistralai_azure/docs/models/thinkchunktype.md delete mode 100644 packages/mistralai_azure/docs/models/toolmessagerole.md delete mode 100644 packages/mistralai_azure/docs/models/type.md delete mode 100644 packages/mistralai_azure/docs/models/usermessagerole.md delete mode 100644 packages/mistralai_azure/src/mistralai_azure/_hooks/custom_user_agent.py delete mode 100644 packages/mistralai_azure/src/mistralai_azure/_hooks/registration.py delete mode 100644 packages/mistralai_azure/src/mistralai_azure/models/imageurlchunk.py delete mode 100644 packages/mistralai_azure/src/mistralai_azure/models/referencechunk.py delete mode 100644 packages/mistralai_azure/src/mistralai_azure/models/textchunk.py delete mode 100644 packages/mistralai_gcp/.genignore delete mode 100644 packages/mistralai_gcp/.vscode/settings.json delete mode 100644 packages/mistralai_gcp/USAGE.md delete mode 100644 packages/mistralai_gcp/docs/models/assistantmessagerole.md delete mode 100644 packages/mistralai_gcp/docs/models/completionresponsestreamchoice.md delete mode 100644 packages/mistralai_gcp/docs/models/deltamessage.md delete mode 100644 packages/mistralai_gcp/docs/models/imageurl.md delete mode 100644 packages/mistralai_gcp/docs/models/imageurlchunk.md delete mode 100644 packages/mistralai_gcp/docs/models/imageurlchunktype.md delete mode 100644 packages/mistralai_gcp/docs/models/referencechunk.md delete mode 100644 packages/mistralai_gcp/docs/models/referencechunktype.md delete mode 100644 packages/mistralai_gcp/docs/models/role.md delete mode 100644 packages/mistralai_gcp/docs/models/textchunk.md delete mode 100644 packages/mistralai_gcp/docs/models/thinkchunktype.md delete mode 100644 packages/mistralai_gcp/docs/models/toolmessagerole.md delete mode 100644 packages/mistralai_gcp/docs/models/type.md delete mode 100644 packages/mistralai_gcp/docs/models/usermessagerole.md delete mode 100644 packages/mistralai_gcp/src/mistralai_gcp/_hooks/custom_user_agent.py delete mode 100644 packages/mistralai_gcp/src/mistralai_gcp/models/imageurlchunk.py delete mode 100644 packages/mistralai_gcp/src/mistralai_gcp/models/referencechunk.py delete mode 100644 packages/mistralai_gcp/src/mistralai_gcp/models/textchunk.py delete mode 100644 packages/mistralai_gcp/src/mistralai_gcp/sdk.py create mode 100644 tests/test_azure_integration.py create mode 100644 tests/test_azure_v2_parity.py create mode 100644 tests/test_gcp_integration.py create mode 100644 tests/test_gcp_v2_parity.py diff --git a/.genignore b/.genignore index b80cf0f6..6bd11b26 100644 --- a/.genignore +++ b/.genignore @@ -1,6 +1,6 @@ pyproject.toml examples/* /utils/* -src/mistral/extra/* +src/mistralai/extra/* pylintrc scripts/prepare_readme.py diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 39e850eb..9be71784 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -4,17 +4,17 @@ repos: hooks: - id: ruff args: [--fix] - files: ^(example/|src/mistralai/).*\.py$ + files: ^(examples/|src/mistralai/|packages/(azure|gcp)/src/mistralai/).*\.py$ exclude: ^src/mistralai/(__init__|sdkhooks|types)\.py$ - repo: https://github.com/RobertCraigie/pyright-python rev: v1.1.401 hooks: - id: pyright - files: ^(example/|src/mistralai/).*\.py$ + files: ^(examples/|src/mistralai/|packages/(azure|gcp)/src/mistralai/).*\.py$ exclude: ^src/mistralai/(__init__|sdkhooks|types)\.py$ - repo: https://github.com/pre-commit/mirrors-mypy rev: v1.15.0 hooks: - id: mypy - files: ^(example/|src/mistralai/).*\.py$ + files: ^(examples/|src/mistralai/|packages/(azure|gcp)/src/mistralai/).*\.py$ exclude: ^src/mistralai/(__init__|sdkhooks|types)\.py$ diff --git a/.speakeasy/workflow.lock b/.speakeasy/workflow.lock index a9e18489..4aa0af42 100644 --- a/.speakeasy/workflow.lock +++ b/.speakeasy/workflow.lock @@ -2,14 +2,14 @@ speakeasyVersion: 1.685.0 sources: mistral-azure-source: sourceNamespace: mistral-openapi-azure - sourceRevisionDigest: sha256:d303e640ad565cc8a9801519b20dc7eab226efdfdab951c11256962d9e479f74 - sourceBlobDigest: sha256:6e4c789de61b2c9c604bf581e0abbadae90e360491d95ec4247678f4f70cee87 + sourceRevisionDigest: sha256:e32d21a6317d1bca6ab29f05603b96038e841752c2698aab47f434ea0d6530b7 + sourceBlobDigest: sha256:2dad2b1b7a79de6917c363ce7e870d11efe31ac08e3bfe0258f72823fe1ad13e tags: - latest mistral-google-cloud-source: sourceNamespace: mistral-openapi-google-cloud - sourceRevisionDigest: sha256:351c4d392b8b2220c337a207e98ed5665ed27fd85de854871a70c4bc2b9c0784 - sourceBlobDigest: sha256:d79b21f70efb93b0cd261d2044939a288beaf8707a7caae86aca5c4d5de3821b + sourceRevisionDigest: sha256:4d9938ab74c4d41d62cd24234c8b8109e286c4aeec093e21d369259a43173113 + sourceBlobDigest: sha256:5a558d5ea7a936723c7a5540db5a1fba63d85d25b453372e1cf16395b30c98d3 tags: - latest mistral-openapi: @@ -22,24 +22,24 @@ targets: mistralai-azure-sdk: source: mistral-azure-source sourceNamespace: mistral-openapi-azure - sourceRevisionDigest: sha256:d303e640ad565cc8a9801519b20dc7eab226efdfdab951c11256962d9e479f74 - sourceBlobDigest: sha256:6e4c789de61b2c9c604bf581e0abbadae90e360491d95ec4247678f4f70cee87 + sourceRevisionDigest: sha256:e32d21a6317d1bca6ab29f05603b96038e841752c2698aab47f434ea0d6530b7 + sourceBlobDigest: sha256:2dad2b1b7a79de6917c363ce7e870d11efe31ac08e3bfe0258f72823fe1ad13e codeSamplesNamespace: mistral-openapi-azure-code-samples - codeSamplesRevisionDigest: sha256:0109302b87fa17b0103ef1e372fae76356811b3c552103e659bd5373d537d759 + codeSamplesRevisionDigest: sha256:a34c3049c604d0bb67101d042e959f14098964fe784f98975a9201c84dbf44d0 mistralai-gcp-sdk: source: mistral-google-cloud-source sourceNamespace: mistral-openapi-google-cloud - sourceRevisionDigest: sha256:351c4d392b8b2220c337a207e98ed5665ed27fd85de854871a70c4bc2b9c0784 - sourceBlobDigest: sha256:d79b21f70efb93b0cd261d2044939a288beaf8707a7caae86aca5c4d5de3821b + sourceRevisionDigest: sha256:4d9938ab74c4d41d62cd24234c8b8109e286c4aeec093e21d369259a43173113 + sourceBlobDigest: sha256:5a558d5ea7a936723c7a5540db5a1fba63d85d25b453372e1cf16395b30c98d3 codeSamplesNamespace: mistral-openapi-google-cloud-code-samples - codeSamplesRevisionDigest: sha256:09bb7cbf291076170d228116db05d1c9606af541b301b6564609c4d76633258a + codeSamplesRevisionDigest: sha256:fa36e5999e79c32e8b2c1317cc0d6ed179912ced15194f02b5f80da22e45ae5f mistralai-sdk: source: mistral-openapi sourceNamespace: mistral-openapi sourceRevisionDigest: sha256:74d0de7750f6a1878b68c9da683eba7a447d7c367131d0cb8f5c3b1e05829624 sourceBlobDigest: sha256:41e8354c48993fc29be68959d835ea4f8e0cc1d4b4fbd527afcd970bc02c62a2 codeSamplesNamespace: mistral-openapi-code-samples - codeSamplesRevisionDigest: sha256:f37fb6188ad25957bef4cadaa03f454a4f9ab0c045db633a46d9cc89af145ba2 + codeSamplesRevisionDigest: sha256:99fcae1bc81801e3825648a44f5ffa62a8f124e3186e5570be40414de164e7f2 workflow: workflowVersion: 1.0.0 speakeasyVersion: 1.685.0 @@ -57,7 +57,7 @@ workflow: mistralai-azure-sdk: target: python source: mistral-azure-source - output: ./packages/mistralai_azure + output: ./packages/azure publish: pypi: token: $pypi_token @@ -68,7 +68,7 @@ workflow: mistralai-gcp-sdk: target: python source: mistral-google-cloud-source - output: ./packages/mistralai_gcp + output: ./packages/gcp publish: pypi: token: $pypi_token diff --git a/.speakeasy/workflow.yaml b/.speakeasy/workflow.yaml index b45d6b3b..ba109c09 100644 --- a/.speakeasy/workflow.yaml +++ b/.speakeasy/workflow.yaml @@ -14,7 +14,7 @@ targets: mistralai-azure-sdk: target: python source: mistral-azure-source - output: ./packages/mistralai_azure + output: ./packages/azure publish: pypi: token: $pypi_token @@ -25,7 +25,7 @@ targets: mistralai-gcp-sdk: target: python source: mistral-google-cloud-source - output: ./packages/mistralai_gcp + output: ./packages/gcp publish: pypi: token: $pypi_token diff --git a/MIGRATION.md b/MIGRATION.md index 9f39cdb5..906173fe 100644 --- a/MIGRATION.md +++ b/MIGRATION.md @@ -33,6 +33,26 @@ from mistralai.client.types import BaseModel | `from mistralai.types import ...` | `from mistralai.client.types import ...` | | `from mistralai.utils import ...` | `from mistralai.client.utils import ...` | +### Azure & GCP Import Changes + +Azure and GCP SDKs now live under the `mistralai` namespace as separate distributions: + +| v1 | v2 | +|---|---| +| `from mistralai_azure import MistralAzure` | `from mistralai.azure.client import MistralAzure` | +| `from mistralai_azure.models import ...` | `from mistralai.azure.client.models import ...` | +| `from mistralai_gcp import MistralGoogleCloud` | `from mistralai.gcp.client import MistralGCP` | +| `from mistralai_gcp.models import ...` | `from mistralai.gcp.client.models import ...` | + +#### Installation Changes + +| v1 | v2 | +|---|---| +| `pip install mistralai` | `pip install mistralai` (includes Azure and GCP) | +| `pip install mistralai[gcp]` (for GCP auth) | `pip install "mistralai[gcp]"` (for GCP auth dependencies) | + +Azure and GCP are now standalone distributions that can be installed independently of the core SDK. The `mistralai[azure]` and `mistralai[gcp]` extras are syntactic sugar that pull in the respective distributions. + ### What Stays the Same - The `Mistral` client API is unchanged diff --git a/README.md b/README.md index 5c8a1b51..a774a9e1 100644 --- a/README.md +++ b/README.md @@ -104,7 +104,7 @@ It's also possible to write a standalone Python script without needing to set up ```python #!/usr/bin/env -S uv run --script # /// script -# requires-python = ">=3.9" +# requires-python = ">=3.10" # dependencies = [ # "mistralai", # ] @@ -374,38 +374,41 @@ You can run the examples in the `examples/` directory using `uv run`. **Prerequisites** -Before you begin, ensure you have `AZUREAI_ENDPOINT` and an `AZURE_API_KEY`. To obtain these, you will need to deploy Mistral on Azure AI. +Before you begin, ensure you have `AZURE_ENDPOINT` and an `AZURE_API_KEY`. To obtain these, you will need to deploy Mistral on Azure AI. See [instructions for deploying Mistral on Azure AI here](https://docs.mistral.ai/deployment/cloud/azure/). +**Step 1: Install** + +```bash +pip install mistralai +``` + +**Step 2: Example Usage** + Here's a basic example to get you started. You can also run [the example in the `examples` directory](/examples/azure). ```python -import asyncio import os +from mistralai.azure.client import MistralAzure -from mistralai_azure import MistralAzure - +# The SDK automatically injects api-version as a query parameter client = MistralAzure( - azure_api_key=os.getenv("AZURE_API_KEY", ""), - azure_endpoint=os.getenv("AZURE_ENDPOINT", "") + api_key=os.environ["AZURE_API_KEY"], + server_url=os.environ["AZURE_ENDPOINT"], + api_version="2024-05-01-preview", # Optional, this is the default ) -async def main() -> None: - res = await client.chat.complete_async( - max_tokens= 100, - temperature= 0.5, - messages= [ - { - "content": "Hello there!", - "role": "user" - } - ] - ) - print(res) - -asyncio.run(main()) +res = client.chat.complete( + model=os.environ["AZURE_MODEL"], + messages=[ + { + "role": "user", + "content": "Hello there!", + } + ], +) +print(res.choices[0].message.content) ``` -The documentation for the Azure SDK is available [here](packages/mistralai_azure/README.md). ### Google Cloud @@ -422,40 +425,43 @@ gcloud auth application-default login **Step 1: Install** -Install the extras dependencies specific to Google Cloud: - ```bash +pip install mistralai +# For GCP authentication support (required): pip install "mistralai[gcp]" ``` **Step 2: Example Usage** -Here's a basic example to get you started. - -```python -import asyncio -from mistralai_gcp import MistralGoogleCloud +Here's a basic example to get you started. You can also run [the example in the `examples` directory](/examples/gcp). -client = MistralGoogleCloud() +The SDK automatically: +- Detects credentials via `google.auth.default()` +- Auto-refreshes tokens when they expire +- Builds the Vertex AI URL from `project_id` and `region` +```python +import os +from mistralai.gcp.client import MistralGCP -async def main() -> None: - res = await client.chat.complete_async( - model= "mistral-small-2402", - messages= [ - { - "content": "Hello there!", - "role": "user" - } - ] - ) - print(res) +# The SDK auto-detects credentials and builds the Vertex AI URL +client = MistralGCP( + project_id=os.environ.get("GCP_PROJECT_ID"), # Optional: auto-detected from credentials + region="us-central1", # Default: europe-west4 +) -asyncio.run(main()) +res = client.chat.complete( + model="mistral-small-2503", + messages=[ + { + "role": "user", + "content": "Hello there!", + } + ], +) +print(res.choices[0].message.content) ``` -The documentation for the GCP SDK is available [here](packages/mistralai_gcp/README.md). - ## Available Resources and Operations @@ -674,8 +680,8 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.models.list(, - RetryConfig("backoff", BackoffStrategy(1, 50, 1.1, 100), False)) + res = mistral.models.list( + retries=RetryConfig("backoff", BackoffStrategy(1, 50, 1.1, 100), False)) # Handle response print(res) diff --git a/examples/azure/.env.example b/examples/azure/.env.example new file mode 100644 index 00000000..7467bf2e --- /dev/null +++ b/examples/azure/.env.example @@ -0,0 +1,4 @@ +AZURE_API_KEY=your-azure-api-key +AZURE_ENDPOINT=https://your-endpoint.services.ai.azure.com/models +AZURE_MODEL=your-deployment-name +AZURE_API_VERSION=2024-05-01-preview diff --git a/examples/azure/az_chat_no_streaming.py b/examples/azure/az_chat_no_streaming.py deleted file mode 100644 index 4d5530dc..00000000 --- a/examples/azure/az_chat_no_streaming.py +++ /dev/null @@ -1,15 +0,0 @@ -import os - -from mistralai_azure import MistralAzure -from mistralai_azure.models import ChatCompletionRequestMessages, UserMessage - -client = MistralAzure( - azure_api_key=os.environ["AZURE_API_KEY"], - azure_endpoint=os.environ["AZURE_ENDPOINT"], -) - -messages: list[ChatCompletionRequestMessages] = [ - UserMessage(content="What is the capital of France?"), -] -res = client.chat.complete(messages=messages) -print(res.choices[0].message.content) diff --git a/examples/azure/chat_no_streaming.py b/examples/azure/chat_no_streaming.py index 4d5530dc..952b171d 100644 --- a/examples/azure/chat_no_streaming.py +++ b/examples/azure/chat_no_streaming.py @@ -1,15 +1,22 @@ import os -from mistralai_azure import MistralAzure -from mistralai_azure.models import ChatCompletionRequestMessages, UserMessage +from mistralai.azure.client import MistralAzure +from mistralai.azure.client.models import ChatCompletionRequestMessage, UserMessage +AZURE_API_KEY = os.environ.get("AZURE_API_KEY", "") +AZURE_ENDPOINT = os.environ.get("AZURE_ENDPOINT", "") +AZURE_MODEL = os.environ.get("AZURE_MODEL", "mistral-small-2503") +AZURE_API_VERSION = os.environ.get("AZURE_API_VERSION", "2024-05-01-preview") + +# The SDK automatically injects api-version as a query parameter client = MistralAzure( - azure_api_key=os.environ["AZURE_API_KEY"], - azure_endpoint=os.environ["AZURE_ENDPOINT"], + api_key=AZURE_API_KEY, + server_url=AZURE_ENDPOINT, + api_version=AZURE_API_VERSION, ) -messages: list[ChatCompletionRequestMessages] = [ +messages: list[ChatCompletionRequestMessage] = [ UserMessage(content="What is the capital of France?"), ] -res = client.chat.complete(messages=messages) +res = client.chat.complete(model=AZURE_MODEL, messages=messages) print(res.choices[0].message.content) diff --git a/examples/gcp/.env.example b/examples/gcp/.env.example new file mode 100644 index 00000000..6721bd37 --- /dev/null +++ b/examples/gcp/.env.example @@ -0,0 +1,3 @@ +GCP_PROJECT_ID=your-gcp-project-id +GCP_REGION=us-central1 +GCP_MODEL=mistral-small-2503 diff --git a/examples/gcp/async_chat_no_streaming.py b/examples/gcp/async_chat_no_streaming.py index 178f151c..61a2d076 100755 --- a/examples/gcp/async_chat_no_streaming.py +++ b/examples/gcp/async_chat_no_streaming.py @@ -1,19 +1,43 @@ #!/usr/bin/env python +""" +Example: Async chat completion with GCP Vertex AI. + +The SDK automatically: +- Detects credentials via google.auth.default() +- Auto-refreshes tokens when they expire +- Builds the Vertex AI URL from project_id and region + +Prerequisites: + gcloud auth application-default login + +Usage: + GCP_PROJECT_ID=your-project GCP_REGION=us-central1 GCP_MODEL=mistral-small-2503 python async_chat_no_streaming.py +""" import asyncio import os -from mistralai_gcp import MistralGoogleCloud -from mistralai_gcp.models.usermessage import UserMessage +from mistralai.gcp.client import MistralGCP +from mistralai.gcp.client.models import UserMessage +# Configuration from environment variables +GCP_PROJECT_ID = os.environ.get("GCP_PROJECT_ID") # Optional: auto-detected from credentials +GCP_REGION = os.environ.get("GCP_REGION", "us-central1") +GCP_MODEL = os.environ.get("GCP_MODEL", "mistral-small-2503") -async def main(): - model = "mistral-large-2407" - client = MistralGoogleCloud(project_id=os.environ["GCP_PROJECT_ID"]) +async def main(): + # The SDK automatically handles: + # - Credential detection via google.auth.default() + # - Token refresh when expired + # - Vertex AI URL construction + client = MistralGCP( + project_id=GCP_PROJECT_ID, + region=GCP_REGION, + ) chat_response = await client.chat.complete_async( - model=model, + model=GCP_MODEL, messages=[UserMessage(content="What is the best French cheese?")], ) diff --git a/examples/gcp/gcp_async_chat_no_streaming.py b/examples/gcp/gcp_async_chat_no_streaming.py deleted file mode 100755 index 178f151c..00000000 --- a/examples/gcp/gcp_async_chat_no_streaming.py +++ /dev/null @@ -1,24 +0,0 @@ -#!/usr/bin/env python - -import asyncio -import os - -from mistralai_gcp import MistralGoogleCloud -from mistralai_gcp.models.usermessage import UserMessage - - -async def main(): - model = "mistral-large-2407" - - client = MistralGoogleCloud(project_id=os.environ["GCP_PROJECT_ID"]) - - chat_response = await client.chat.complete_async( - model=model, - messages=[UserMessage(content="What is the best French cheese?")], - ) - - print(chat_response.choices[0].message.content) - - -if __name__ == "__main__": - asyncio.run(main()) diff --git a/packages/azure/.genignore b/packages/azure/.genignore new file mode 100644 index 00000000..6bdf6621 --- /dev/null +++ b/packages/azure/.genignore @@ -0,0 +1,6 @@ +pyproject.toml +src/mistralai/azure/client/sdk.py +src/mistralai/azure/client/_hooks/registration.py +README.md +USAGE.md +docs/sdks/**/README.md diff --git a/packages/mistralai_azure/.gitattributes b/packages/azure/.gitattributes similarity index 100% rename from packages/mistralai_azure/.gitattributes rename to packages/azure/.gitattributes diff --git a/packages/mistralai_azure/.gitignore b/packages/azure/.gitignore similarity index 100% rename from packages/mistralai_azure/.gitignore rename to packages/azure/.gitignore diff --git a/packages/mistralai_azure/.speakeasy/gen.lock b/packages/azure/.speakeasy/gen.lock similarity index 69% rename from packages/mistralai_azure/.speakeasy/gen.lock rename to packages/azure/.speakeasy/gen.lock index a7cdba10..5cf1d8e1 100644 --- a/packages/mistralai_azure/.speakeasy/gen.lock +++ b/packages/azure/.speakeasy/gen.lock @@ -1,24 +1,25 @@ lockVersion: 2.0.0 id: dc40fa48-2c4d-46ad-ac8b-270749770f34 management: - docChecksum: 2bebd9aadeecb18391d46d1dadc340ef + docChecksum: 571037b8485712afcef86703debb7f15 docVersion: 1.0.0 speakeasyVersion: 1.685.0 generationVersion: 2.794.1 - releaseVersion: 1.8.1 - configChecksum: 0448ba634aa36625c6ac276e17e3b3b5 + releaseVersion: 2.0.0a4 + configChecksum: 549cf1eae199d39bf97052462fd8e640 repoURL: https://github.com/mistralai/client-python.git - repoSubDirectory: packages/mistralai_azure - installationURL: https://github.com/mistralai/client-python.git#subdirectory=packages/mistralai_azure + repoSubDirectory: packages/azure + installationURL: https://github.com/mistralai/client-python.git#subdirectory=packages/azure published: true persistentEdits: - generation_id: ecb4f74f-ba8a-4f28-941d-36b3258200bd - pristine_commit_hash: 785c0560d42a9c4cff938392bb6d52d98a2f3529 - pristine_tree_hash: 50ed42d2e4b3d4ecd639935cd1511220354a41d7 + generation_id: b0dbfbbb-4028-4834-9980-a1d2dba52a8d + pristine_commit_hash: 6cab3cf0757d3c7dd58ee1eabec66dd63a8c9a03 + pristine_tree_hash: abf5c6e4b603142b1a6aac936d7c3be574611256 features: python: additionalDependencies: 1.0.0 additionalProperties: 1.0.1 + configurableModuleName: 0.2.0 constsAndDefaults: 1.0.5 core: 5.23.18 defaultEnabledRetries: 0.2.0 @@ -57,16 +58,12 @@ trackedFiles: pristine_git_object: 2e54e27e0ca97bee87918b2ae38cc6c335669a79 docs/models/assistantmessage.md: id: 7e0218023943 - last_write_checksum: sha1:e75d407349842b2de46ee3ca6250f9f51121cf38 - pristine_git_object: 3d0bd90b4433c1a919f917f4bcf2518927cdcd50 + last_write_checksum: sha1:47d5cd1a1bef9e398c12c207f5b3d8486d94f359 + pristine_git_object: 9ef638379aee1198742743800e778409c47a9b9d docs/models/assistantmessagecontent.md: id: 9f1795bbe642 last_write_checksum: sha1:1ce4066623a8d62d969e5ed3a088d73a9ba26643 pristine_git_object: 047b7cf95f4db203bf2c501680b73ca0562a122d - docs/models/assistantmessagerole.md: - id: bb5d2a4bc72f - last_write_checksum: sha1:82f2c4f469426bd476c1003a91394afb89cb7c91 - pristine_git_object: 658229e77eb6419391cf7941568164541c528387 docs/models/chatcompletionchoice.md: id: 0d15c59ab501 last_write_checksum: sha1:a6274a39a4239e054816d08517bf8507cb5c4564 @@ -77,12 +74,12 @@ trackedFiles: pristine_git_object: b2f15ecbe88328de95b4961ddb3940fd8a6ee64b docs/models/chatcompletionrequest.md: id: adffe90369d0 - last_write_checksum: sha1:de5476eb16a5ff75942b1ece68dbe547110dbbb8 - pristine_git_object: 104a1f96e60e1d4b86305dab2829be084b00b153 - docs/models/chatcompletionrequestmessages.md: - id: ec996b350e12 - last_write_checksum: sha1:2ecec8d12cdb48426f4eb62732066fc79fcd4ec3 - pristine_git_object: bc7708a67f06d74e8a5bf1facb2b23fb1e08053c + last_write_checksum: sha1:a404d37c6605a5524f1f48b418bacf46e86a9a68 + pristine_git_object: 3b0f7270840e257475f4b0f15f27e0c0152818d2 + docs/models/chatcompletionrequestmessage.md: + id: 3f5e170d418c + last_write_checksum: sha1:7921c5a508a9f88adc01caab34e26182b8035607 + pristine_git_object: 91e9e062d0ef0cb69235c4ae4516548733ce28a9 docs/models/chatcompletionrequeststop.md: id: fcaf5bbea451 last_write_checksum: sha1:71a25f84f0d88c7acf72e801ced6159546201851 @@ -97,8 +94,16 @@ trackedFiles: pristine_git_object: a0465ffbfc5558628953e03fbc53b80bbdc8649b docs/models/chatcompletionstreamrequest.md: id: cf8f29558a68 - last_write_checksum: sha1:f6bc4a0f064fc3420ae9b29c7e6fc9100ae85e4d - pristine_git_object: 85f237b4fc59ffc487377f150952284cc2102d85 + last_write_checksum: sha1:daca00885f0d0f9863d8420bbee514723084813d + pristine_git_object: f78156a647ec63ca60ff423acbdee2b2404e4e60 + docs/models/chatcompletionstreamrequestmessage.md: + id: 053a98476cd2 + last_write_checksum: sha1:8270692463fab1243d9de4bbef7162daa64e52c5 + pristine_git_object: 2e4e93acca8983a3ea27b391d4606518946e13fe + docs/models/chatcompletionstreamrequeststop.md: + id: d0e89a4dca78 + last_write_checksum: sha1:a889e9580fa94bda7c848682d6ba501b7f5c0f41 + pristine_git_object: a48460a92ac47fec1de2188ba46b238229736d32 docs/models/chatcompletionstreamrequesttoolchoice.md: id: 210d5e5b1413 last_write_checksum: sha1:0543164caf3f4fb2bef3061dbd1a5e6b34b17ae9 @@ -113,40 +118,36 @@ trackedFiles: pristine_git_object: 7a66e8fee2bb0f1c58166177653893bb05b98f1d docs/models/completionresponsestreamchoice.md: id: d56824d615a6 - last_write_checksum: sha1:dcf4b125b533192cb5aea1a68551866954712dc5 - pristine_git_object: c807dacd98eb3561ee45f40db71a92cb72b0f6de - docs/models/content.md: - id: bfd859c99f86 - last_write_checksum: sha1:6673dbd19871a701955a322348a4f7e51c38ffc8 - pristine_git_object: a833dc2c6043e36b85131c9243b4cc02b9fcc4c6 + last_write_checksum: sha1:0296a490df009dbfd04893fdebcc88dd6102a872 + pristine_git_object: 1532c25b8fc065d486f52d4610a7f757e5340875 + docs/models/completionresponsestreamchoicefinishreason.md: + id: 5f1fbfc90b8e + last_write_checksum: sha1:20824b4a223cbd3658b32440973a7d47dcd108b9 + pristine_git_object: 0fece473297227c75db4e7ded63417a2f117cac0 docs/models/contentchunk.md: id: d2d3a32080cd last_write_checksum: sha1:5839a26cdc412b78caad7fb59df97bdcea57be6d pristine_git_object: 22023e8b19692df969693b7a14f8cf6e0143859f docs/models/deltamessage.md: id: 6c5ed6b60968 - last_write_checksum: sha1:c213149256c620715d744c89685d5b6cbdea6f58 - pristine_git_object: 61deabbf7e37388fdd4c1789089d120cc0b937b9 + last_write_checksum: sha1:00052476b9b2474dbc149f18dd18c71c86d0fc74 + pristine_git_object: e0ee575f3fce7c312114ce8c5390efc5c4854952 + docs/models/deltamessagecontent.md: + id: 7307bedc8733 + last_write_checksum: sha1:a1211b8cb576ad1358e68983680ee326c3920a5e + pristine_git_object: 8142772d7ea33ad8a75cf9cf822564ba3f630de2 docs/models/document.md: id: cd1d2a444370 last_write_checksum: sha1:d00a2ac808a0ae83a7b97da87e647ecc8dca9c52 pristine_git_object: 509d43b733d68d462853d9eb52fc913c855dff40 docs/models/documenturlchunk.md: id: 48437d297408 - last_write_checksum: sha1:38c3e2ad5353a4632bd827f00419c5d8eb2def54 - pristine_git_object: 6c9a5b4d9e6769be242b27ef0208f6af704689c0 - docs/models/documenturlchunktype.md: - id: a3574c91f539 - last_write_checksum: sha1:a0134fc0ea822d55b1204ee71140f2aa9d8dbe9c - pristine_git_object: 32e1fa9e975a3633fb49057b38b0ea0206b2d8ef + last_write_checksum: sha1:5f9294355929d66834c52c67990ba36a7f81387d + pristine_git_object: 9dbfbe5074de81b9fcf6f5bae8a0423fb2c82f71 docs/models/filechunk.md: id: edc076728e9d last_write_checksum: sha1:07ab5db503211adba2fa099e66d12ac3c4bbf680 pristine_git_object: 18217114060ac4e4b45fefabace4628684f27e5c - docs/models/finishreason.md: - id: 73315c2a39b3 - last_write_checksum: sha1:5b58c7fa9219f728b9731287e21abe1be9f11e4a - pristine_git_object: 45a5aedb7241cf080df3eb976a4413064d314009 docs/models/format_.md: id: a17c22228eda last_write_checksum: sha1:dad6de59fec6378d50356007602e2a0254d8d2e4 @@ -167,22 +168,22 @@ trackedFiles: id: a211c095f2ac last_write_checksum: sha1:277a46811144643262651853dc6176d21b33573e pristine_git_object: 712a148c3e2305dca4c702851865f9f8c8e674cc + docs/models/imagedetail.md: + id: f8217529b496 + last_write_checksum: sha1:fdf19ac9459f64616240955cb81a84ef03e775c8 + pristine_git_object: 1e5ba3fd405a14e5e2872cc85504584dca19b726 docs/models/imageurl.md: id: e75dd23cec1d - last_write_checksum: sha1:30131c77dd240c3bae48d9693698358e5cc0ae63 - pristine_git_object: 7c2bcbc36e99c3cf467d213d6a6a59d6300433d8 + last_write_checksum: sha1:a5cf621ce58a9cc7c96afa7de53367eac7b4cb0b + pristine_git_object: 6358e0acb2dea4816203413842243704ca955783 docs/models/imageurlchunk.md: id: 4407097bfff3 - last_write_checksum: sha1:7a478fd638234ece78770c7fc5e8d0adaf1c3727 - pristine_git_object: f1b926ef8e82443aa1446b1c64c2f02e33d7c789 - docs/models/imageurlchunkimageurl.md: - id: c7fae88454ce - last_write_checksum: sha1:5eff71b7a8be7baacb9ba8ca0be0a0f7a391a325 - pristine_git_object: 767389082d25f06e617fec2ef0134dd9fb2d4064 - docs/models/imageurlchunktype.md: - id: b9af2db9ff60 - last_write_checksum: sha1:990546f94648a09faf9d3ae55d7f6ee66de13e85 - pristine_git_object: 2064a0b405870313bd4b802a3b1988418ce8439e + last_write_checksum: sha1:da7a792f7b649f311062338dfbf3d25ff55fe6c5 + pristine_git_object: db0c53d22e29fa25222edb86b264e5135879a029 + docs/models/imageurlunion.md: + id: 9d3c691a9db0 + last_write_checksum: sha1:4e32bcd7d44746d2ddbfafbef96152bb2bdb2a15 + pristine_git_object: db97130f26199dcb354ecb7469d09530b035daa2 docs/models/jsonschema.md: id: a6b15ed6fac8 last_write_checksum: sha1:523465666ad3c292252b3fe60f345c7ffb29053f @@ -191,10 +192,6 @@ trackedFiles: id: b071d5a509cc last_write_checksum: sha1:09a04749333ab50ae806c3ac6adcaa90d54df0f1 pristine_git_object: d6094ac2c6e0326c039dad2f6b89158694ef6aa7 - docs/models/messages.md: - id: 2103cd675c2f - last_write_checksum: sha1:f6940c9c67b98c49ae2bc2764f6c14178321f244 - pristine_git_object: 1d394500e8ffdd140457575568fc2ce465a1cc3a docs/models/mistralpromptmode.md: id: d17d5db4d3b6 last_write_checksum: sha1:abcb7205c5086169c7d9449d15ac142448a7d258 @@ -233,12 +230,8 @@ trackedFiles: pristine_git_object: fae3c1ca4ba2c2ddb3b7de401ecdc8d56dcc7740 docs/models/referencechunk.md: id: 07895f9debfd - last_write_checksum: sha1:97d01dd2b907e87b58bebd9c950e1bef29747c89 - pristine_git_object: a132ca2fe6fbbaca644491cbc36d88b0c67cc6bc - docs/models/referencechunktype.md: - id: 0944b80ea9c8 - last_write_checksum: sha1:956b270766c7f11fe99f4a9b484cc29c159e7471 - pristine_git_object: 1e0e2fe64883ef5f3e628777b261b1224661d257 + last_write_checksum: sha1:4384049375a2566c7567599f97ce1ec19e9f6276 + pristine_git_object: d847e24845a399c7ca93d54701832fb65e01b3ab docs/models/responseformat.md: id: 50a1e4140614 last_write_checksum: sha1:e877b2e81470ef5eec5675dfb91a47e74d5d3add @@ -247,22 +240,14 @@ trackedFiles: id: cf1f250b82db last_write_checksum: sha1:105e1f9181913104b554051838cbdd0f728aa2c4 pristine_git_object: 2f5f1e5511b048323fee18a0ffdd506fe2b3d56f - docs/models/role.md: - id: b694540a5b1e - last_write_checksum: sha1:260a50c56a8bd03cc535edf98ebec06437f87f8d - pristine_git_object: affca78d5574cc42d8e6169f21968e5a8765e053 docs/models/security.md: id: 452e4d4eb67a last_write_checksum: sha1:ce2871b49c1632d50e22d0b1ebe4999021d52313 pristine_git_object: c698674c513f5b20c04f629e50154e67977275f7 - docs/models/stop.md: - id: f231cc9f5041 - last_write_checksum: sha1:86903cac5f57ad9b8ac07ecba6c454d40a53bdc8 - pristine_git_object: ba40ca83136d6d6cb4f1ef9e5ca3104a704e4846 docs/models/systemmessage.md: id: fdb7963e1cdf - last_write_checksum: sha1:97e726dff19a39b468767d5c01fc6256277ee71f - pristine_git_object: 0dba71c00f40c85e74b2c1967e077ffff9660f13 + last_write_checksum: sha1:c7603c5ce77ba2bcbda9eff65eeafdb1e9ecbec7 + pristine_git_object: 10bda10f921fb5d66c1606ff18e654b4e78ab197 docs/models/systemmessagecontent.md: id: 94a56febaeda last_write_checksum: sha1:6cb10b4b860b4204df57a29c650c85c826395aeb @@ -277,16 +262,12 @@ trackedFiles: pristine_git_object: 54f029b814fdcfa2e93e2b8b0594ef9e4eab792a docs/models/textchunk.md: id: 6cd12e0ef110 - last_write_checksum: sha1:6d41d1991d122805734ed0d90ee01592aa5ae6ff - pristine_git_object: 6daab3c381bd8c13d2935bf62578648a8470fc76 + last_write_checksum: sha1:aa448d4937c0c1cd562621f0a9080aa0dc6e4bd1 + pristine_git_object: b266619dcb57222ec343f373c43b2b5cef5b8b93 docs/models/thinkchunk.md: id: bca24d7153f6 - last_write_checksum: sha1:feb95a931bb9cdbfe28ab351618687e513cf830b - pristine_git_object: 66b2e0cde70e25e2927180d2e709503401fddeab - docs/models/thinkchunktype.md: - id: 0fbeed985341 - last_write_checksum: sha1:790f991f95c86c26a6abb9c9c5debda8b53526f5 - pristine_git_object: baf6f755252d027295be082b53ecf80555039414 + last_write_checksum: sha1:2b8ff7737fa7255673ca31da7cb2e6803fce9e02 + pristine_git_object: b07f598ebc5f0e9c041186c081dc98bc21104bdb docs/models/thinking.md: id: 07234f8dd364 last_write_checksum: sha1:a5962d1615b57996730da19e59fbfaa684321442 @@ -309,40 +290,28 @@ trackedFiles: pristine_git_object: 0be3d6c54b13a8bf30773398a2c12e0d30d3ae58 docs/models/toolmessage.md: id: 0553747c37a1 - last_write_checksum: sha1:3ac87031fdd4ba8b0996e95be8e7ef1a7ff41167 - pristine_git_object: a54f49332c2873471759b477fb4c712fa4fb61f5 + last_write_checksum: sha1:ac61e644ba7c6da607cb479eafd1db78d8e8012e + pristine_git_object: 7201481e61e269b238887deec30c03f7e16c53d7 docs/models/toolmessagecontent.md: id: f0522d2d3c93 last_write_checksum: sha1:783769c0200baa1b6751327aa3e009fa83da72ee pristine_git_object: 5c76091fbd2c8e0d768921fab19c7b761df73411 - docs/models/toolmessagerole.md: - id: f333d4d1ab56 - last_write_checksum: sha1:7e1c004bad24e928da0c286a9f053516b172d24f - pristine_git_object: c24e59c0c79ea886d266e38c673edd51531b9be6 docs/models/tooltypes.md: id: adb50fe63ea2 last_write_checksum: sha1:f224c3d8732450b9c969b3e04027b7df7892694c pristine_git_object: 84e49253c9b9bd1bd314e2a126106404cbb52f16 - docs/models/type.md: - id: 98c32f09b2c8 - last_write_checksum: sha1:8aa9ca999e9648ddc2240bf80780684e3e858ddf - pristine_git_object: eb0581e7174b6951d69c485a64af5244cb8687fa docs/models/usageinfo.md: id: ec6fe65028a9 last_write_checksum: sha1:cf71fb9676d870eba7c4d10a69636e1db4054adc pristine_git_object: f5204ac94a4d6191839031c66c5a9bc0124a1f35 docs/models/usermessage.md: id: ed66d7a0f80b - last_write_checksum: sha1:8291f7703e49ed669775dc953ea8cab6715dc7ed - pristine_git_object: 63b0131091cd211b3b1477c1d63b5666a26db546 + last_write_checksum: sha1:f0ed7d9cb7264f1d9e4a9190772df3f15e25346c + pristine_git_object: e7a932ed71496fa7cc358388c650d25f166f27a4 docs/models/usermessagecontent.md: id: 52c072c851e8 last_write_checksum: sha1:1de02bcf7082768ebe1bb912fdbebbec5a577b5a pristine_git_object: 8350f9e8f8996c136093e38760990f62fd01f8cf - docs/models/usermessagerole.md: - id: 99ffa937c462 - last_write_checksum: sha1:52014480516828b43827aa966b7319d9074f1111 - pristine_git_object: 171124e45988e784c56a6b92a0057ba00efc0db4 docs/models/utils/retryconfig.md: id: 4343ac43161c last_write_checksum: sha1:562c0f21e308ad10c27f85f75704c15592c6929d @@ -361,344 +330,348 @@ trackedFiles: pristine_git_object: a8fcb932ba2a01c5e96e3b04c59371e930b75558 scripts/prepare_readme.py: id: e0c5957a6035 - last_write_checksum: sha1:2291075229aebf4e036800b5b9299b37fcb8707c - pristine_git_object: ff1121fda23730f356d2df2ad17c8e991b9fc605 + last_write_checksum: sha1:26b29aad3c23a98912fd881698c976aac55749fe + pristine_git_object: 2b2577ea83873f64aa9f91d9d762bc6e1f250977 scripts/publish.sh: id: fe273b08f514 last_write_checksum: sha1:b290b25b36dca3d5eb1a2e66a2e1bcf2e7326cf3 pristine_git_object: c35748f360329c2bc370e9b189f49b1a360b2c48 - src/mistralai_azure/__init__.py: - id: 3cd9e92c2f72 + src/mistralai/azure/client/__init__.py: + id: 5624bda9196d last_write_checksum: sha1:36306d1d404b6aeb912d27f1d9c52f098ff7bf9b pristine_git_object: dd02e42e4cc509dc90e6ae70493054021faa5f9c - src/mistralai_azure/_hooks/__init__.py: - id: 66932eacf398 + src/mistralai/azure/client/_hooks/__init__.py: + id: 850c237217cb last_write_checksum: sha1:e3111289afd28ad557c21d9e2f918caabfb7037d pristine_git_object: 2ee66cdd592fe41731c24ddd407c8ca31c50aec1 - src/mistralai_azure/_hooks/sdkhooks.py: - id: 1184c9201c62 - last_write_checksum: sha1:c98774db1664db2bc6d80e8a5f4f5133260f201a - pristine_git_object: 37ff4e9f0ebd42a58ada6300098a5b1b85a54b69 - src/mistralai_azure/_hooks/types.py: - id: a32fe1943bce - last_write_checksum: sha1:78fc31840a38e668a73871885c779929196a8bec - pristine_git_object: 0c22d7ebccdd64097033454b7c698d10ee59987d - src/mistralai_azure/_version.py: - id: 7711a0bb1da3 - last_write_checksum: sha1:9a446d67d6a86cdf9d9e3447c1c09a4f719b2c9b - pristine_git_object: 79277f9a358b4c851363e11e1e8f534779e9f271 - src/mistralai_azure/basesdk.py: - id: 7d825dbc7d6e - last_write_checksum: sha1:4070786599952b3c603d1384d87d7b92bb13b974 - pristine_git_object: 89f7dc493d7f50d5f2d3f468c0a8392a6ec5e28b - src/mistralai_azure/chat.py: - id: ebf1c99bea88 - last_write_checksum: sha1:2d78fa9e8b3e300e18b6fb3bc116e824261efb55 - pristine_git_object: 10bb247fb89f0f9ef110300224c95f2a7653ad2f - src/mistralai_azure/httpclient.py: - id: 808a3f534ffa + src/mistralai/azure/client/_hooks/sdkhooks.py: + id: e9923767446c + last_write_checksum: sha1:ae162d6e73be0eb767c353c815d76b034395d50f + pristine_git_object: 2080681b7f2c52fcb80dcb95eff48654763e6258 + src/mistralai/azure/client/_hooks/types.py: + id: 07c892e06527 + last_write_checksum: sha1:fde2e0f6da6930232b67682009de520724b23398 + pristine_git_object: 3e4e39555d60adebe84e596c8323ee5b80676fc9 + src/mistralai/azure/client/_version.py: + id: a77160e60e5d + last_write_checksum: sha1:e26eb828e9a240042acc754f38dcf2e581e045aa + pristine_git_object: 4448d2a0fd803f43820378359c921d09eba6f43e + src/mistralai/azure/client/basesdk.py: + id: 5a585a95ec21 + last_write_checksum: sha1:d7a4a959d7d3ca3cd22d8daf144c3b4d5c0d1210 + pristine_git_object: b0391ac078b4e2a5d9107ed014c1ca939a553c23 + src/mistralai/azure/client/chat.py: + id: c18454e628d7 + last_write_checksum: sha1:cc1ff54b85ce494428ebf22ec01bd1199cd9e2b6 + pristine_git_object: 3348bf47eafb3fcfb2de0e7d512073e947b69554 + src/mistralai/azure/client/httpclient.py: + id: 60c81037fbd0 last_write_checksum: sha1:5e55338d6ee9f01ab648cad4380201a8a3da7dd7 pristine_git_object: 89560b566073785535643e694c112bedbd3db13d - src/mistralai_azure/models/__init__.py: - id: e5fcf3933d2c - last_write_checksum: sha1:43f0ebb24f64a382fd18202da65a321d9925dbee - pristine_git_object: 9baa3ff1865cd8aec9e9b93d6e3c315e8c7870c5 - src/mistralai_azure/models/assistantmessage.py: - id: 15f117b45380 - last_write_checksum: sha1:3c2872d06ad465dbbbedcf8d397d1f12961e1e2e - pristine_git_object: 7790eb10a034d892c3c1e793c412c75ff8820e40 - src/mistralai_azure/models/chatcompletionchoice.py: - id: 93cfc6cec0d2 - last_write_checksum: sha1:f5dfcf407d8abd5ce8eb23f058c589861c71a0df - pristine_git_object: 7c6eb933faf09cc5c6102575d371ac280f2e242d - src/mistralai_azure/models/chatcompletionrequest.py: - id: d046a16b5e58 - last_write_checksum: sha1:bc1e0b5c8b11bfef5a9b135436c2f3f555a11fa3 - pristine_git_object: a7b095f34c572e1805650e44f847946280ccb3fe - src/mistralai_azure/models/chatcompletionresponse.py: - id: fc342e80f579 - last_write_checksum: sha1:a93593ec734420bc122f0b0b8c49d630795f1d42 - pristine_git_object: 7a66f3221a154b1a0f47c4f808ece8e580280548 - src/mistralai_azure/models/chatcompletionstreamrequest.py: - id: 1052b055a791 - last_write_checksum: sha1:18f71c5eeda25d23f2c82ddcdb710a20b44b806c - pristine_git_object: 96cd631b0ed74e5c82c6e2492011001021b019f8 - src/mistralai_azure/models/completionchunk.py: - id: e04bc380589d - last_write_checksum: sha1:490c3236276ae8fdecb883744e263aecbe4c608c - pristine_git_object: d6cc2a86a1fda1ebce1f3c5a169ab1118705e3f0 - src/mistralai_azure/models/completionevent.py: - id: e75909f919b1 - last_write_checksum: sha1:9f5423ad56747fb7cc95a6f01e0826510571d4c1 - pristine_git_object: 5a2039c2492bab82184b4f2469806f8b977a7246 - src/mistralai_azure/models/completionresponsestreamchoice.py: - id: 24fe265a60d8 - last_write_checksum: sha1:df52342c3458cca6396d538c5d9a42f07131796d - pristine_git_object: 0e890aacf79f8f220f585d914c6fbe8863232036 - src/mistralai_azure/models/contentchunk.py: - id: 9e6b90acdf54 - last_write_checksum: sha1:e93c57ef87654a06d8849030f65db3d279f8f7ad - pristine_git_object: e6a3e24a8857ea1661874197eec967f0ac99e31d - src/mistralai_azure/models/deltamessage.py: - id: 593eaaeda97b - last_write_checksum: sha1:9c2f6e52c81d2f5bf71f520861158dc5eae6eab7 - pristine_git_object: 7fa3c3f216153ebc0a2d31e590793698e95a8be8 - src/mistralai_azure/models/documenturlchunk.py: - id: bff69bfa8014 - last_write_checksum: sha1:5c515c4c85b78d8f4cf147faab9cf01c3501e0b9 - pristine_git_object: ea8d5625a6d1579dd60f2e4a067f455c82334986 - src/mistralai_azure/models/filechunk.py: - id: 0de687fe41c1 - last_write_checksum: sha1:56a1765b46702d24ee9c00ab3a06ccdbffdd63f9 - pristine_git_object: 2c3edc078b5e781b4d7163ab01e02a3347c81e2f - src/mistralai_azure/models/function.py: - id: 16111a6101f2 - last_write_checksum: sha1:456d34df457592f1975b0d1e158207d4446a6c41 - pristine_git_object: a4642f92a0cf614b458591c220a83ae1c422ce25 - src/mistralai_azure/models/functioncall.py: - id: e383b31a7f16 - last_write_checksum: sha1:cec288f925fa58842bb7d9e688f6122a01973d4b - pristine_git_object: dd93c4629c3bd81dd6fb305474ce0cd5443e1bdb - src/mistralai_azure/models/functionname.py: - id: ebc3e07e4b6f - last_write_checksum: sha1:743cec4c3f586d67d1ab2816d8d76170f46a3ca1 - pristine_git_object: b55c82af3f29efe38698bc776a8532c647dccc36 - src/mistralai_azure/models/httpvalidationerror.py: - id: da4825943f94 - last_write_checksum: sha1:dce58ead8f7f901514250e1ae5965ba039b1da14 - pristine_git_object: 56607d9437ce39097deac134d4f622ea523cbda7 - src/mistralai_azure/models/imageurl.py: - id: 80cc0df94e9d - last_write_checksum: sha1:a1a416ae5bf9c559219cff5f008a90f251a52477 - pristine_git_object: a5a66360b017cbdc342775241aa4aa2322534c6a - src/mistralai_azure/models/imageurlchunk.py: - id: c5c6dd2f1782 - last_write_checksum: sha1:11634325be12aa567b42227f2117e9b8c854a51c - pristine_git_object: a40e451c60caca688a9379dcb20d545e9e6b76e2 - src/mistralai_azure/models/jsonschema.py: - id: 8c635811dd6b - last_write_checksum: sha1:a99a6de224e51eb6cf85fa6de8cf37266ab5fe6d - pristine_git_object: 0f7563fc17bf172d527d09507294b4ef5646c22c - src/mistralai_azure/models/mistralazureerror.py: - id: a919897c4ea9 + src/mistralai/azure/client/models/__init__.py: + id: "335011330e21" + last_write_checksum: sha1:9afe0f0fb324a2b3c60ec98ce78b1ff6f908db39 + pristine_git_object: 51db6a383ddbab2d946b00c41934359a7eb50448 + src/mistralai/azure/client/models/assistantmessage.py: + id: 353ed9110f97 + last_write_checksum: sha1:e444c76e27b9b745b9238894bdf2b6a40bba6e6e + pristine_git_object: f5793f9455485c576293b44fb548be8bae9c7a65 + src/mistralai/azure/client/models/chatcompletionchoice.py: + id: 6942c7db5891 + last_write_checksum: sha1:817bfda6120a98248322c308629e404081e01279 + pristine_git_object: 67b5ba694217f4f3b95589d7f84af6a9bea9802d + src/mistralai/azure/client/models/chatcompletionrequest.py: + id: 0c711c870184 + last_write_checksum: sha1:fae2a92375aa3e58c258e4497acead859cd3b6dc + pristine_git_object: 921790959880ddf9b9ffce15d881e01f8adefa86 + src/mistralai/azure/client/models/chatcompletionresponse.py: + id: bdfacf065e9e + last_write_checksum: sha1:c72fb624e7475a551d37e0b291b64bcf772c402a + pristine_git_object: d41f9c6fab670cf7c961f50b1302f9a88cf48162 + src/mistralai/azure/client/models/chatcompletionstreamrequest.py: + id: da00a7feb4ef + last_write_checksum: sha1:c8c84c818b3b22bfec1e7f5737bbb281088dd3ba + pristine_git_object: be21eed2ecbe8354eb9a4bfa48122b28dada4aaf + src/mistralai/azure/client/models/completionchunk.py: + id: 28d620f25510 + last_write_checksum: sha1:413545e0521539346bff6e77fdec0c9e383bde17 + pristine_git_object: b94284b2d9c29c25a2f8eaa02828e2a205f4407e + src/mistralai/azure/client/models/completionevent.py: + id: a6f00a747933 + last_write_checksum: sha1:3d04bfbdaf11c52af5613ed0fd70c8dbc59f6d49 + pristine_git_object: c4b272871d9b3ea8443f469d29b0825706c25c00 + src/mistralai/azure/client/models/completionresponsestreamchoice.py: + id: 3ba5d7ba8a13 + last_write_checksum: sha1:f917300daf4febec7661f2c73bae675600ee0bdd + pristine_git_object: 2a4d053feb84cf2a9675d76ae08c83945b26644c + src/mistralai/azure/client/models/contentchunk.py: + id: 1f65e4f8f731 + last_write_checksum: sha1:79efbc90c1ae36b74492666125fb3e5ecaa5c27a + pristine_git_object: 0f09f76703efd95fcd96377b8ec6870d58dbf829 + src/mistralai/azure/client/models/deltamessage.py: + id: b7dab1d158de + last_write_checksum: sha1:553fdff5a3aec6909417be3cb390d99421af1693 + pristine_git_object: 2c01feae56c44d256f1e579c15f08e167dcc6481 + src/mistralai/azure/client/models/documenturlchunk.py: + id: e56fec6e977f + last_write_checksum: sha1:a43cee08f935933bf715b2f1a82b4c746b591f35 + pristine_git_object: 345bafc2bfe3cc056d746cf8151cf53b68771414 + src/mistralai/azure/client/models/filechunk.py: + id: 150d9f180110 + last_write_checksum: sha1:df1e010006338f6dd37009f2547ab8f0b90b917a + pristine_git_object: 829f03d84c25dd859d514ffa26e570f235e4e75b + src/mistralai/azure/client/models/function.py: + id: 6d1e2011a14b + last_write_checksum: sha1:62df160db82853d79907cccff4d0904f6bb9f142 + pristine_git_object: f4edce0fb8563f485d9a63a42439a9b2593a7f40 + src/mistralai/azure/client/models/functioncall.py: + id: ced560a1bd57 + last_write_checksum: sha1:490cb3a0305994de063e06fa4c77defa911271f3 + pristine_git_object: d476792ccbb5aa2002deb870f1c81cc1500f59d4 + src/mistralai/azure/client/models/functionname.py: + id: 6f09474ebc85 + last_write_checksum: sha1:651ceed24416ce8192f70db03cc5cd0db685899f + pristine_git_object: 839e0d557a902da6c819210962e38e1df9bda90f + src/mistralai/azure/client/models/httpvalidationerror.py: + id: ca155413681b + last_write_checksum: sha1:9dea33d9c74bbdf842ee9d157e4aaa05c36ae34a + pristine_git_object: 40bccddc4d0c0e761d70af713387561101e20b60 + src/mistralai/azure/client/models/imagedetail.py: + id: de211988043d + last_write_checksum: sha1:812f2ec4fc0d8d13db643ed49192384d5a841aa4 + pristine_git_object: 2d074cee614e1c49b69ee4073c3aaaa7a5a2c9e2 + src/mistralai/azure/client/models/imageurl.py: + id: c8882341c798 + last_write_checksum: sha1:443ee3739b3801928b4f3d4256531078fc4045e8 + pristine_git_object: b3c705e3f261ebd59f40e46785577694d80f98bf + src/mistralai/azure/client/models/imageurlchunk.py: + id: b6f0abb574d7 + last_write_checksum: sha1:4651f12f779bc86874c8516f06e39b882e414c92 + pristine_git_object: ee6de50f2add830c19d0b8b030a7c7a2ab65cb11 + src/mistralai/azure/client/models/jsonschema.py: + id: bfd486f4bb18 + last_write_checksum: sha1:ffe7190393086a4301aaffa6854cb3d80b0db92f + pristine_git_object: 5aaa490af350ac1c436dafb3d3c73d56402cac11 + src/mistralai/azure/client/models/mistralazureerror.py: + id: 31ed29254e67 last_write_checksum: sha1:25f4411c7411faad753d46118edf74828b1c9f7c pristine_git_object: c5bf17528c7cf25bac8f8874f58692c601fcdd76 - src/mistralai_azure/models/mistralpromptmode.py: - id: f62a521bcdae - last_write_checksum: sha1:82190bc14d2e51440723176cb8108791485c1180 - pristine_git_object: 77230b7e5e61cc662fdc52c72e8b817a15e183c3 - src/mistralai_azure/models/no_response_error.py: - id: 54523e14f29b + src/mistralai/azure/client/models/mistralpromptmode.py: + id: d0028b1e4129 + last_write_checksum: sha1:46fe1ab8ac2d5867877368a59a4aa5be2fabadeb + pristine_git_object: 26e7adbdc4a981c92d51b72542c966b0ba0fb8f8 + src/mistralai/azure/client/models/no_response_error.py: + id: a956d6cd06f0 last_write_checksum: sha1:7f326424a7d5ae1bcd5c89a0d6b3dbda9138942f pristine_git_object: 1deab64bc43e1e65bf3c412d326a4032ce342366 - src/mistralai_azure/models/ocrimageobject.py: - id: 6c349909fb0c - last_write_checksum: sha1:0fed6abf8172f6ee40e703ef86ee9d902c6e5d7e - pristine_git_object: 9d0dd01dbb5be095e234aa3ec9469fface68c3d2 - src/mistralai_azure/models/ocrpagedimensions.py: - id: f33f598001b2 - last_write_checksum: sha1:5281879ef3d737a17a539cefda9f222302ead7da - pristine_git_object: efb62a58f22ad62c730b3af93bff151586105957 - src/mistralai_azure/models/ocrpageobject.py: - id: 99f20768c4d6 - last_write_checksum: sha1:c7479b83b0eb619e6b0f82344e81bc691f0b3a46 - pristine_git_object: e95718001e07bb89ba2fc9094f88b894572148bb - src/mistralai_azure/models/ocrrequest.py: - id: 4e574d5fb9be - last_write_checksum: sha1:1b03dc8b392069f6b142228e74179c8341b09ffa - pristine_git_object: e9c23afcdd7440660f17c7819406d7e603eabbec - src/mistralai_azure/models/ocrresponse.py: - id: 326a4d9fab25 - last_write_checksum: sha1:cf597498a5841a56bbd1aeb8478bd57a01d93cb1 - pristine_git_object: 3e43fa8eb7b80fafbd9344ad5a98c0ead98c54cb - src/mistralai_azure/models/ocrtableobject.py: - id: 3ba1292c343a - last_write_checksum: sha1:2d1d05902a9ed6bccdb41ccac6782f015450cf2e - pristine_git_object: 189f059eaa8a32cc32a5320ea9fe33d779e8ef1c - src/mistralai_azure/models/ocrusageinfo.py: - id: 0de4eae62e4b - last_write_checksum: sha1:85e5a850bd2f847e4a02b0731b0327ca0a02f643 - pristine_git_object: 1f5c9f1bc2cf2d728dec06b0930602852474a29e - src/mistralai_azure/models/prediction.py: - id: 9e8a0a7a3ca7 - last_write_checksum: sha1:e78af600f109a7489a5bcce80b48adf29cc0c4c3 - pristine_git_object: b23a935c00cd7ce4e7b7bd6fe8f2da87f8aaca92 - src/mistralai_azure/models/referencechunk.py: - id: 420a12dfec3b - last_write_checksum: sha1:f49da7a4541f55b283e9391e6397a9e4286570bd - pristine_git_object: 32d2ca68e67be3f03e14f74fd7e7692fa05b70f5 - src/mistralai_azure/models/responseformat.py: - id: aa7acbc1bda7 - last_write_checksum: sha1:70e7960bb4ec5db5f133c4cc8f6e813e39f8c671 - pristine_git_object: c989f3a4467c21416ea59b33fbc734a1477a6eb3 - src/mistralai_azure/models/responseformats.py: - id: 780a7aa0e87e + src/mistralai/azure/client/models/ocrimageobject.py: + id: 9c9f987d94bb + last_write_checksum: sha1:b86f5187d1c425ddf27ed4815657a7c41d71855c + pristine_git_object: 38e9d3e48df5cee8cdd0cd1d7b6df62182814104 + src/mistralai/azure/client/models/ocrpagedimensions.py: + id: 7669a25f32b3 + last_write_checksum: sha1:60642db6bb61f0e96204fb78d3aa0bd80dd0a7e5 + pristine_git_object: 12858da92de99aa6da9d6e148df3ba7ee37496c7 + src/mistralai/azure/client/models/ocrpageobject.py: + id: eea193b05126 + last_write_checksum: sha1:baada584537b75e2e184738424068e61afe263c7 + pristine_git_object: 5fb821c19fd3cca2c2e149bd058a7ca49d2d002b + src/mistralai/azure/client/models/ocrrequest.py: + id: 365a5b4776a2 + last_write_checksum: sha1:9d3a9bccd341219934470688d3818557231b9b62 + pristine_git_object: fece2713166fc943194b7b38ec9b82db295bba0a + src/mistralai/azure/client/models/ocrresponse.py: + id: b8cde8c16a4c + last_write_checksum: sha1:e6f08c68f0388919ca7bcbc4f0cb134525053fcd + pristine_git_object: 787289fa995ba6cbf4b2ef3d3c41edb31f656674 + src/mistralai/azure/client/models/ocrtableobject.py: + id: c2cd51b8789e + last_write_checksum: sha1:11052d42f0d91916f038437923ea656bf882032c + pristine_git_object: 3e3c25830a3216f4ef325f5b1056a0c1a267b090 + src/mistralai/azure/client/models/ocrusageinfo.py: + id: 5e9118cac468 + last_write_checksum: sha1:6b27c09b5ec447c6ede22aa75190a1e06353349c + pristine_git_object: e2ceba35eb3f6e148389a7fd466dea5c051480a4 + src/mistralai/azure/client/models/prediction.py: + id: bd6abfa93083 + last_write_checksum: sha1:87eb3c43fa31b245c13c4708602b300956aa9efb + pristine_git_object: 6b8d6480b9ba1cb6683bdc93c24fb762ccfba146 + src/mistralai/azure/client/models/referencechunk.py: + id: c9612f854670 + last_write_checksum: sha1:b96507bcc82939fa4057532ef7e6a440baabd973 + pristine_git_object: e0bcb06be4d4c8d947ee267a9728aeae3a2c52fe + src/mistralai/azure/client/models/responseformat.py: + id: c124e7c316aa + last_write_checksum: sha1:f8c9e581053d1d885196c210a219a3e7aa086610 + pristine_git_object: 39fb03a25efdbc0a92ea91c72038ddd86ee056be + src/mistralai/azure/client/models/responseformats.py: + id: fef416cefcd4 last_write_checksum: sha1:a212e85d286b5b49219f57d071a2232ff8b5263b pristine_git_object: cbf83ce7b54ff8634f741334831807bfb5c98991 - src/mistralai_azure/models/responsevalidationerror.py: - id: 1952c765e2ec - last_write_checksum: sha1:d516c0c88210dd28b65747daa2fa1b63f432fe89 - pristine_git_object: a33954ccead3a8df87bdcc30a090efbb0ebecb94 - src/mistralai_azure/models/sdkerror.py: - id: bd8616367442 - last_write_checksum: sha1:41c259fac1bd50d33f1a2fd64d1ed17fd8d0d075 - pristine_git_object: 216d7f8fca986ac29162a1a7cba8c18b7f73d012 - src/mistralai_azure/models/security.py: - id: 7b3bcb55164e - last_write_checksum: sha1:9cacce270a27809ded4ee91aecac4a681154f5f0 - pristine_git_object: c1ae83138b09eab742f755a0f11428cf8c0fd60d - src/mistralai_azure/models/systemmessage.py: - id: 2e15bb043753 - last_write_checksum: sha1:8ec96bfc0533414a698d92387021cac116eadade - pristine_git_object: f99bf4ffb112b068159a3b95bc99ec7ce91b3f7d - src/mistralai_azure/models/systemmessagecontentchunks.py: - id: b6d9a4838359 - last_write_checksum: sha1:1e3f4688317d10f207dd42ef39cf2ac8f6042e54 - pristine_git_object: 4615a16cf39496dffc4982c6f0552d8bf353e280 - src/mistralai_azure/models/textchunk.py: - id: c169e3f0ffc9 - last_write_checksum: sha1:6cb623bafd4005e527dca9b908bb9f4b371342da - pristine_git_object: 5845456e5ca3089bcb551112408a0de84c597a91 - src/mistralai_azure/models/thinkchunk.py: - id: b1b9aeee4dcf - last_write_checksum: sha1:d15b39ef3e12195183664c32854233b9410d565b - pristine_git_object: f53a9f1ad2e6f124a36c9fb9be65bc09dbfbff4b - src/mistralai_azure/models/tool.py: - id: 99c8106f5428 - last_write_checksum: sha1:6142383805723bbc2b22f1bfcc660288378d1e42 - pristine_git_object: c91deec28488062a0220af41492fdfb34330e7a4 - src/mistralai_azure/models/toolcall.py: - id: 3643db1054cd - last_write_checksum: sha1:9b095f1efe1ea554cfacbc4a8e0c59b1c57d7f32 - pristine_git_object: 44fe8ec86b8f31ad8ee9591ae49036e8caa9ac41 - src/mistralai_azure/models/toolchoice.py: - id: 669768b7cbda - last_write_checksum: sha1:1217d8186e64d16f4c369079c62e3ac466726c60 - pristine_git_object: 93b4b7fe72f05a2ece9fed08a83139f4510b2574 - src/mistralai_azure/models/toolchoiceenum.py: - id: 5f7df8457771 + src/mistralai/azure/client/models/responsevalidationerror.py: + id: afdb9463b434 + last_write_checksum: sha1:26f01befeb347a63928012e7eb36c95a8a392145 + pristine_git_object: cbdffcbba45a988805cdd52d111e77b0ca777dbf + src/mistralai/azure/client/models/sdkerror.py: + id: 4601c7297af7 + last_write_checksum: sha1:b54041f9751e1f2a38dd02a6f8eadb3907fa3df0 + pristine_git_object: a1e9aacaa2fcc839dcb2638788dd7c94298adee7 + src/mistralai/azure/client/models/security.py: + id: 4a2e4760ec08 + last_write_checksum: sha1:0cd2ae54cecd88cfd8d43e92c0d3da7efa48942c + pristine_git_object: 9b83ba98336090bed89fbeda40b4a07b212a1106 + src/mistralai/azure/client/models/systemmessage.py: + id: 8fa0dee9e4e1 + last_write_checksum: sha1:2b52c44b92a098b559ec8b7a80449532169cd317 + pristine_git_object: 38c280c809148e190e329619858718d132da6bc0 + src/mistralai/azure/client/models/systemmessagecontentchunks.py: + id: 5918e770869d + last_write_checksum: sha1:55529f2f29ba3087fbf117dbbe64e1dda92b2958 + pristine_git_object: 225f38b712f5f3c7abfd526cc8c0386687814f36 + src/mistralai/azure/client/models/textchunk.py: + id: 9c81c76a6325 + last_write_checksum: sha1:d1c9eaffeb80299f023351dc8d07eb53e49133f2 + pristine_git_object: e513c1434cc7a4766bb9ef039ad8eed2bf0c12ca + src/mistralai/azure/client/models/thinkchunk.py: + id: df6bbd55b3eb + last_write_checksum: sha1:ec9af4cb7faa6ba8ed033b37db1d1d5a1406ac3f + pristine_git_object: e769399fe6ba90ddb2503f8fadb4b6cebc7d6f85 + src/mistralai/azure/client/models/tool.py: + id: 4075ef72c086 + last_write_checksum: sha1:0c041eaa008ee1851e05bf90e57602c0338f362f + pristine_git_object: 169305bc4c538e88b1e0cf1120aa10e424118880 + src/mistralai/azure/client/models/toolcall.py: + id: c65e6f79e539 + last_write_checksum: sha1:dd2290e019322e9df73b119e054a1d738eb5f3ba + pristine_git_object: a589b1b38ef4caaba2753f8335228bc16cd68961 + src/mistralai/azure/client/models/toolchoice.py: + id: c25062b5de34 + last_write_checksum: sha1:db82f8d3f811461226cffbeacf2699103a5e0689 + pristine_git_object: 1f623222084f12eaa63f2cea656dc7da10b12a3a + src/mistralai/azure/client/models/toolchoiceenum.py: + id: cc06ba3a8d21 last_write_checksum: sha1:3dbba9a58c5569aafe115f3f7713a52b01ad8620 pristine_git_object: 01f6f677b379f9e3c99db9d1ad248cb0033a2804 - src/mistralai_azure/models/toolmessage.py: - id: 1d9845bf98b3 - last_write_checksum: sha1:52bd15280bcae27ec7ba6a1c64b15648de5b0868 - pristine_git_object: 4bc5c9a9b509fdb89a4cf5ce81231189bf46bab4 - src/mistralai_azure/models/tooltypes.py: - id: 34c499f03e21 - last_write_checksum: sha1:f060bd3aebf7d42c1066c543c47cfa020e61eb27 - pristine_git_object: 638890c589ee642fd0a43e00337505e53ea3ec3a - src/mistralai_azure/models/usageinfo.py: - id: 59a5033672bf - last_write_checksum: sha1:7d0e7a483331077309b78e035cab9d65e87d3f65 - pristine_git_object: bbe5cdfaae260df81e93da11d05a1ba55ecbe329 - src/mistralai_azure/models/usermessage.py: - id: c54119314021 - last_write_checksum: sha1:b45f38755a96b07100baf5149631f366009e701f - pristine_git_object: 85fedb4bd1bcf64f69e4ead5310cf3fb354a6e3c - src/mistralai_azure/models/validationerror.py: - id: 83cd7bfd6d92 - last_write_checksum: sha1:250ed57498dabd11c0e2b6d255969e0285bb4214 - pristine_git_object: 4caff4a6b74aeb322bf42cd2070b7bd576ca834a - src/mistralai_azure/ocr.py: - id: 77e2e0f594ad - last_write_checksum: sha1:7daae9b0c14093d6d0bc0258b0bce008cb845a1e - pristine_git_object: 31e27f6eaa6dcc2b8450656d4a59dd4a7a50a29a - src/mistralai_azure/py.typed: - id: 98df238e554c + src/mistralai/azure/client/models/toolmessage.py: + id: 84ac736fa955 + last_write_checksum: sha1:11841bba4b66179321a35ea1a4d4d3571fa997b7 + pristine_git_object: a73fd6bf8355043f1b40caf7e8b9ded90c1fda0f + src/mistralai/azure/client/models/tooltypes.py: + id: fa881b046d34 + last_write_checksum: sha1:cd28ddc02fff9a5abbb59c82fe9e0dcbdb9b6d2a + pristine_git_object: 1cce7446f2772b998208ea1c78c7969e3881d5d0 + src/mistralai/azure/client/models/usageinfo.py: + id: 3edc9c81b329 + last_write_checksum: sha1:0b2117285b13d846a25c6c59436c4628b9d84a03 + pristine_git_object: 19a6b09fb63a3732719c45f8dfca92cfc2c57353 + src/mistralai/azure/client/models/usermessage.py: + id: 3796508adc07 + last_write_checksum: sha1:f4baa9d8b8f99f715873cea83191baf055c3296a + pristine_git_object: 96439c64a979ac3edf8900d39154d706846a3a95 + src/mistralai/azure/client/models/validationerror.py: + id: f2b84813e2ea + last_write_checksum: sha1:f0f9706a5af2ac4f6b234e768fdd492bbdd8a18c + pristine_git_object: 817ecf7a56470369ccacd0f5e0bb739656a5f92c + src/mistralai/azure/client/ocr.py: + id: 5817c10c9297 + last_write_checksum: sha1:24fec22877024154ea417e31ea443b4795c443ba + pristine_git_object: 098e764b6580e35ad0e81242ca601ce821656ee9 + src/mistralai/azure/client/py.typed: + id: e88369f116d2 last_write_checksum: sha1:8efc425ffe830805ffcc0f3055871bdcdc542c60 pristine_git_object: 3e38f1a929f7d6b1d6de74604aa87e3d8f010544 - src/mistralai_azure/sdkconfiguration.py: - id: 476a4f9e2f3e - last_write_checksum: sha1:6b117889b46a546be6e949c1bf843834ceff7417 - pristine_git_object: 51289cf05559ba32dd17e45fab78df4a8697063f - src/mistralai_azure/types/__init__.py: - id: d761bb7a67a5 + src/mistralai/azure/client/sdkconfiguration.py: + id: 602f74633eed + last_write_checksum: sha1:163fe779949725d81181f39b70d6922fc2cb8099 + pristine_git_object: 919225f9bf2e4315f879f0da6c7f8b3e6157bd58 + src/mistralai/azure/client/types/__init__.py: + id: f79033f78412 last_write_checksum: sha1:140ebdd01a46f92ffc710c52c958c4eba3cf68ed pristine_git_object: fc76fe0c5505e29859b5d2bb707d48fd27661b8c - src/mistralai_azure/types/basemodel.py: - id: 68c97875efb7 + src/mistralai/azure/client/types/basemodel.py: + id: fd244927c80c last_write_checksum: sha1:10d84aedeb9d35edfdadf2c3020caa1d24d8b584 pristine_git_object: a9a640a1a7048736383f96c67c6290c86bf536ee - src/mistralai_azure/utils/__init__.py: - id: 3c68abef839b + src/mistralai/azure/client/utils/__init__.py: + id: 26f1a707325b last_write_checksum: sha1:887f56a717845fab7445cc368d2a17d850c3565a pristine_git_object: 05f26ade57efb8c54a774fbcb939fb1a7dc655ce - src/mistralai_azure/utils/annotations.py: - id: 476ee839718f + src/mistralai/azure/client/utils/annotations.py: + id: bb1f6c189fdb last_write_checksum: sha1:a4824ad65f730303e4e1e3ec1febf87b4eb46dbc pristine_git_object: 12e0aa4f1151bb52474cc02e88397329b90703f6 - src/mistralai_azure/utils/datetimes.py: - id: e9faf3b28c48 + src/mistralai/azure/client/utils/datetimes.py: + id: 2b7db09ee0ab last_write_checksum: sha1:c721e4123000e7dc61ec52b28a739439d9e17341 pristine_git_object: a6c52cd61bbe2d459046c940ce5e8c469f2f0664 - src/mistralai_azure/utils/enums.py: - id: 4d10693bf655 + src/mistralai/azure/client/utils/enums.py: + id: ffbdb1917a68 last_write_checksum: sha1:bc8c3c1285ae09ba8a094ee5c3d9c7f41fa1284d pristine_git_object: 3324e1bc2668c54c4d5f5a1a845675319757a828 - src/mistralai_azure/utils/eventstreaming.py: - id: 5f5e90529fd7 + src/mistralai/azure/client/utils/eventstreaming.py: + id: bdc37b70360c last_write_checksum: sha1:bababae5d54b7efc360db701daa49e18a92c2f3b pristine_git_object: 0969899bfc491e5e408d05643525f347ea95e4fc - src/mistralai_azure/utils/forms.py: - id: 91c3fe9ba311 + src/mistralai/azure/client/utils/forms.py: + id: 51696122c557 last_write_checksum: sha1:15fa7e9ab1611e062a9984cf06cb20969713d295 pristine_git_object: f961e76beaf0a8b1fe0dda44754a74eebd3608e7 - src/mistralai_azure/utils/headers.py: - id: d37ef2f03e41 + src/mistralai/azure/client/utils/headers.py: + id: e42840c8cb13 last_write_checksum: sha1:7c6df233ee006332b566a8afa9ce9a245941d935 pristine_git_object: 37864cbbbc40d1a47112bbfdd3ba79568fc8818a - src/mistralai_azure/utils/logger.py: - id: 9122a46617cc + src/mistralai/azure/client/utils/logger.py: + id: 9db88755a137 last_write_checksum: sha1:f3fdb154a3f09b8cc43d74c7e9c02f899f8086e4 pristine_git_object: b661aff65d38b77d035149699aea09b2785d2fc6 - src/mistralai_azure/utils/metadata.py: - id: 2d93fa8523eb + src/mistralai/azure/client/utils/metadata.py: + id: 44f85bd3b2e2 last_write_checksum: sha1:c6a560bd0c63ab158582f34dadb69433ea73b3d4 pristine_git_object: 173b3e5ce658675c2f504222a56b3daaaa68107d - src/mistralai_azure/utils/queryparams.py: - id: dfd31ba97c2b + src/mistralai/azure/client/utils/queryparams.py: + id: ec1c03114156 last_write_checksum: sha1:b94c3f314fd3da0d1d215afc2731f48748e2aa59 pristine_git_object: c04e0db82b68eca041f2cb2614d748fbac80fd41 - src/mistralai_azure/utils/requestbodies.py: - id: c91db641d5b9 + src/mistralai/azure/client/utils/requestbodies.py: + id: 1030c47d624d last_write_checksum: sha1:41e2d2d2d3ecc394c8122ca4d4b85e1c3e03f054 pristine_git_object: 1de32b6d26f46590232f398fdba6ce0072f1659c - src/mistralai_azure/utils/retries.py: - id: 6f0cd9f6169d + src/mistralai/azure/client/utils/retries.py: + id: d50ed6e400b2 last_write_checksum: sha1:5b97ac4f59357d70c2529975d50364c88bcad607 pristine_git_object: 88a91b10cd2076b4a2c6cff2ac6bfaa5e3c5ad13 - src/mistralai_azure/utils/security.py: - id: "270040388028" + src/mistralai/azure/client/utils/security.py: + id: 1d35741ce5f1 last_write_checksum: sha1:a17130ace2c0db6394f38dd941ad2b700cc755c8 pristine_git_object: 295a3f40031dbb40073ad227fd4a355660f97ab2 - src/mistralai_azure/utils/serializers.py: - id: 595ddab03803 + src/mistralai/azure/client/utils/serializers.py: + id: a1f26d73c3ad last_write_checksum: sha1:ce1d8d7f500a9ccba0aeca5057cee9c271f4dfd7 pristine_git_object: 14321eb479de81d0d9580ec8291e0ff91bf29e57 - src/mistralai_azure/utils/unmarshal_json_response.py: - id: bde89a892417 - last_write_checksum: sha1:d2ce9e3478b38e54e4bb3a43610ee0bab00c2e27 - pristine_git_object: f5813119b559442ee85c0b310765db3866bfa09d - src/mistralai_azure/utils/url.py: - id: 080c62716b06 + src/mistralai/azure/client/utils/unmarshal_json_response.py: + id: 947f4fc4db62 + last_write_checksum: sha1:99bd357d24d2236e3974630d9bd18bae22610cbc + pristine_git_object: 5317ac87097ccb35628202cf7fc5cb21e186855f + src/mistralai/azure/client/utils/url.py: + id: 4976c88d0e3b last_write_checksum: sha1:6479961baa90432ca25626f8e40a7bbc32e73b41 pristine_git_object: c78ccbae426ce6d385709d97ce0b1c2813ea2418 - src/mistralai_azure/utils/values.py: - id: 640889083cda + src/mistralai/azure/client/utils/values.py: + id: 3974a1553447 last_write_checksum: sha1:acaa178a7c41ddd000f58cc691e4632d925b2553 pristine_git_object: dae01a44384ac3bc13ae07453a053bf6c898ebe3 examples: stream_chat: speakeasy-default-stream-chat: requestBody: - application/json: {"model": "azureai", "stream": true, "messages": [{"content": "Who is the best French painter? Answer in one short sentence.", "role": "user"}], "response_format": {"type": "text"}} + application/json: {"model": "azureai", "stream": true, "messages": [{"role": "user", "content": "Who is the best French painter? Answer in one short sentence."}], "response_format": {"type": "text"}} responses: "422": application/json: {} chat_completion_v1_chat_completions_post: speakeasy-default-chat-completion-v1-chat-completions-post: requestBody: - application/json: {"model": "azureai", "stream": false, "messages": [{"content": "Who is the best French painter? Answer in one short sentence.", "role": "user"}], "response_format": {"type": "text"}} + application/json: {"model": "azureai", "stream": false, "messages": [{"role": "user", "content": "Who is the best French painter? Answer in one short sentence."}], "response_format": {"type": "text"}} responses: "200": application/json: {"id": "cmpl-e5cc70bb28c444948073e77776eb30ef", "object": "chat.completion", "model": "mistral-small-latest", "usage": {"prompt_tokens": 0, "completion_tokens": 0, "total_tokens": 0}, "created": 1702256327, "choices": []} @@ -715,7 +688,7 @@ examples: application/json: {} userExample: requestBody: - application/json: {"model": "CX-9", "document": {"document_url": "https://upset-labourer.net/", "type": "document_url"}, "bbox_annotation_format": {"type": "text"}, "document_annotation_format": {"type": "text"}} + application/json: {"model": "CX-9", "document": {"type": "document_url", "document_url": "https://upset-labourer.net/"}, "bbox_annotation_format": {"type": "text"}, "document_annotation_format": {"type": "text"}} responses: "200": application/json: {"pages": [{"index": 1, "markdown": "# LEVERAGING UNLABELED DATA TO PREDICT OUT-OF-DISTRIBUTION PERFORMANCE\nSaurabh Garg*
Carnegie Mellon University
sgarg2@andrew.cmu.edu
Sivaraman Balakrishnan
Carnegie Mellon University
sbalakri@andrew.cmu.edu
Zachary C. Lipton
Carnegie Mellon University
zlipton@andrew.cmu.edu\n## Behnam Neyshabur\nGoogle Research, Blueshift team
neyshabur@google.com\nHanie Sedghi
Google Research, Brain team
hsedghi@google.com\n#### Abstract\nReal-world machine learning deployments are characterized by mismatches between the source (training) and target (test) distributions that may cause performance drops. In this work, we investigate methods for predicting the target domain accuracy using only labeled source data and unlabeled target data. We propose Average Thresholded Confidence (ATC), a practical method that learns a threshold on the model's confidence, predicting accuracy as the fraction of unlabeled examples for which model confidence exceeds that threshold. ATC outperforms previous methods across several model architectures, types of distribution shifts (e.g., due to synthetic corruptions, dataset reproduction, or novel subpopulations), and datasets (WILDS, ImageNet, BREEDS, CIFAR, and MNIST). In our experiments, ATC estimates target performance $2-4 \\times$ more accurately than prior methods. We also explore the theoretical foundations of the problem, proving that, in general, identifying the accuracy is just as hard as identifying the optimal predictor and thus, the efficacy of any method rests upon (perhaps unstated) assumptions on the nature of the shift. Finally, analyzing our method on some toy distributions, we provide insights concerning when it works ${ }^{1}$.\n## 1 INTRODUCTION\nMachine learning models deployed in the real world typically encounter examples from previously unseen distributions. While the IID assumption enables us to evaluate models using held-out data from the source distribution (from which training data is sampled), this estimate is no longer valid in presence of a distribution shift. Moreover, under such shifts, model accuracy tends to degrade (Szegedy et al., 2014; Recht et al., 2019; Koh et al., 2021). Commonly, the only data available to the practitioner are a labeled training set (source) and unlabeled deployment-time data which makes the problem more difficult. In this setting, detecting shifts in the distribution of covariates is known to be possible (but difficult) in theory (Ramdas et al., 2015), and in practice (Rabanser et al., 2018). However, producing an optimal predictor using only labeled source and unlabeled target data is well-known to be impossible absent further assumptions (Ben-David et al., 2010; Lipton et al., 2018).\nTwo vital questions that remain are: (i) the precise conditions under which we can estimate a classifier's target-domain accuracy; and (ii) which methods are most practically useful. To begin, the straightforward way to assess the performance of a model under distribution shift would be to collect labeled (target domain) examples and then to evaluate the model on that data. However, collecting fresh labeled data from the target distribution is prohibitively expensive and time-consuming, especially if the target distribution is non-stationary. Hence, instead of using labeled data, we aim to use unlabeled data from the target distribution, that is comparatively abundant, to predict model performance. Note that in this work, our focus is not to improve performance on the target but, rather, to estimate the accuracy on the target for a given classifier.\n[^0]: Work done in part while Saurabh Garg was interning at Google ${ }^{1}$ Code is available at [https://github.com/saurabhgarg1996/ATC_code](https://github.com/saurabhgarg1996/ATC_code).\n", "images": [], "dimensions": {"dpi": 200, "height": 2200, "width": 1700}}, {"index": 2, "markdown": "![img-0.jpeg](img-0.jpeg)\nFigure 1: Illustration of our proposed method ATC. Left: using source domain validation data, we identify a threshold on a score (e.g. negative entropy) computed on model confidence such that fraction of examples above the threshold matches the validation set accuracy. ATC estimates accuracy on unlabeled target data as the fraction of examples with the score above the threshold. Interestingly, this threshold yields accurate estimates on a wide set of target distributions resulting from natural and synthetic shifts. Right: Efficacy of ATC over previously proposed approaches on our testbed with a post-hoc calibrated model. To obtain errors on the same scale, we rescale all errors with Average Confidence (AC) error. Lower estimation error is better. See Table 1 for exact numbers and comparison on various types of distribution shift. See Sec. 5 for details on our testbed.\nRecently, numerous methods have been proposed for this purpose (Deng & Zheng, 2021; Chen et al., 2021b; Jiang et al., 2021; Deng et al., 2021; Guillory et al., 2021). These methods either require calibration on the target domain to yield consistent estimates (Jiang et al., 2021; Guillory et al., 2021) or additional labeled data from several target domains to learn a linear regression function on a distributional distance that then predicts model performance (Deng et al., 2021; Deng & Zheng, 2021; Guillory et al., 2021). However, methods that require calibration on the target domain typically yield poor estimates since deep models trained and calibrated on source data are not, in general, calibrated on a (previously unseen) target domain (Ovadia et al., 2019). Besides, methods that leverage labeled data from target domains rely on the fact that unseen target domains exhibit strong linear correlation with seen target domains on the underlying distance measure and, hence, can be rendered ineffective when such target domains with labeled data are unavailable (in Sec. 5.1 we demonstrate such a failure on a real-world distribution shift problem). Therefore, throughout the paper, we assume access to labeled source data and only unlabeled data from target domain(s).\nIn this work, we first show that absent assumptions on the source classifier or the nature of the shift, no method of estimating accuracy will work generally (even in non-contrived settings). To estimate accuracy on target domain perfectly, we highlight that even given perfect knowledge of the labeled source distribution (i.e., $p_{s}(x, y)$ ) and unlabeled target distribution (i.e., $p_{t}(x)$ ), we need restrictions on the nature of the shift such that we can uniquely identify the target conditional $p_{t}(y \\mid x)$. Thus, in general, identifying the accuracy of the classifier is as hard as identifying the optimal predictor.\nSecond, motivated by the superiority of methods that use maximum softmax probability (or logit) of a model for Out-Of-Distribution (OOD) detection (Hendrycks & Gimpel, 2016; Hendrycks et al., 2019), we propose a simple method that leverages softmax probability to predict model performance. Our method, Average Thresholded Confidence (ATC), learns a threshold on a score (e.g., maximum confidence or negative entropy) of model confidence on validation source data and predicts target domain accuracy as the fraction of unlabeled target points that receive a score above that threshold. ATC selects a threshold on validation source data such that the fraction of source examples that receive the score above the threshold match the accuracy of those examples. Our primary contribution in ATC is the proposal of obtaining the threshold and observing its efficacy on (practical) accuracy estimation. Importantly, our work takes a step forward in positively answering the question raised in Deng & Zheng (2021); Deng et al. (2021) about a practical strategy to select a threshold that enables accuracy prediction with thresholded model confidence.\n", "images": [{"id": "img-0.jpeg", "top_left_x": 292, "top_left_y": 217, "bottom_right_x": 1405, "bottom_right_y": 649, "image_base64": ""}], "dimensions": {"dpi": 200, "height": 2200, "width": 1700}}, {"index": 3, "markdown": "", "images": [], "dimensions": {"dpi": 539192, "height": 944919, "width": 247256}}, {"index": 27, "markdown": "![img-8.jpeg](img-8.jpeg)\nFigure 9: Scatter plot of predicted accuracy versus (true) OOD accuracy for vision datasets except MNIST with a ResNet50 model. Results reported by aggregating MAE numbers over 4 different seeds.\n", "images": [{"id": "img-8.jpeg", "top_left_x": 290, "top_left_y": 226, "bottom_right_x": 1405, "bottom_right_y": 1834, "image_base64": ""}], "dimensions": {"dpi": 200, "height": 2200, "width": 1700}}, {"index": 28, "markdown": "| Dataset | Shift | IM | | AC | | DOC | | GDE | ATC-MC (Ours) | | ATC-NE (Ours) | | | :--: | :--: | :--: | :--: | :--: | :--: | :--: | :--: | :--: | :--: | :--: | :--: | :--: | | | | Pre T | Post T | Pre T | Post T | Pre T | Post T | Post T | Pre T | Post T | Pre T | Post T | | CIFAR10 | Natural | 6.60 | 5.74 | 9.88 | 6.89 | 7.25 | 6.07 | 4.77 | 3.21 | 3.02 | 2.99 | 2.85 | | | | (0.35) | (0.30) | (0.16) | (0.13) | (0.15) | (0.16) | (0.13) | (0.49) | (0.40) | (0.37) | (0.29) | | | Synthetic | 12.33 | 10.20 | 16.50 | 11.91 | 13.87 | 11.08 | 6.55 | 4.65 | 4.25 | 4.21 | 3.87 | | | | (0.51) | (0.48) | (0.26) | (0.17) | (0.18) | (0.17) | (0.35) | (0.55) | (0.55) | (0.55) | (0.75) | | CIFAR100 | Synthetic | 13.69 | 11.51 | 23.61 | 13.10 | 14.60 | 10.14 | 9.85 | 5.50 | 4.75 | 4.72 | 4.94 | | | | (0.55) | (0.41) | (1.16) | (0.80) | (0.77) | (0.64) | (0.57) | (0.70) | (0.73) | (0.74) | (0.74) | | ImageNet200 | Natural | 12.37 | 8.19 | 22.07 | 8.61 | 15.17 | 7.81 | 5.13 | 4.37 | 2.04 | 3.79 | 1.45 | | | | (0.25) | (0.33) | (0.08) | (0.25) | (0.11) | (0.29) | (0.08) | (0.39) | (0.24) | (0.30) | (0.27) | | | Synthetic | 19.86 | 12.94 | 32.44 | 13.35 | 25.02 | 12.38 | 5.41 | 5.93 | 3.09 | 5.00 | 2.68 | | | | (1.38) | (1.81) | (1.00) | (1.30) | (1.10) | (1.38) | (0.89) | (1.38) | (0.87) | (1.28) | (0.45) | | ImageNet | Natural | 7.77 | 6.50 | 18.13 | 6.02 | 8.13 | 5.76 | 6.23 | 3.88 | 2.17 | 2.06 | 0.80 | | | | (0.27) | (0.33) | (0.23) | (0.34) | (0.27) | (0.37) | (0.41) | (0.53) | (0.62) | (0.54) | (0.44) | | | Synthetic | 13.39 | 10.12 | 24.62 | 8.51 | 13.55 | 7.90 | 6.32 | 3.34 | 2.53 | 2.61 | 4.89 | | | | (0.53) | (0.63) | (0.64) | (0.71) | (0.61) | (0.72) | (0.33) | (0.53) | (0.36) | (0.33) | (0.83) | | FMoW-WILDS | Natural | 5.53 | 4.31 | 33.53 | 12.84 | 5.94 | 4.45 | 5.74 | 3.06 | 2.70 | 3.02 | 2.72 | | | | (0.33) | (0.63) | (0.13) | (12.06) | (0.36) | (0.77) | (0.55) | (0.36) | (0.54) | (0.35) | (0.44) | | RxRx1-WILDS | Natural | 5.80 | 5.72 | 7.90 | 4.84 | 5.98 | 5.98 | 6.03 | 4.66 | 4.56 | 4.41 | 4.47 | | | | (0.17) | (0.15) | (0.24) | (0.09) | (0.15) | (0.13) | (0.08) | (0.38) | (0.38) | (0.31) | (0.26) | | Amazon-WILDS | Natural | 2.40 | 2.29 | 8.01 | 2.38 | 2.40 | 2.28 | 17.87 | 1.65 | 1.62 | 1.60 | 1.59 | | | | (0.08) | (0.09) | (0.53) | (0.17) | (0.09) | (0.09) | (0.18) | (0.06) | (0.05) | (0.14) | (0.15) | | CivilCom.-WILDS | Natural | 12.64 | 10.80 | 16.76 | 11.03 | 13.31 | 10.99 | 16.65 | | 7.14 | | | | | | (0.52) | (0.48) | (0.53) | (0.49) | (0.52) | (0.49) | (0.25) | | (0.41) | | | | MNIST | Natural | 18.48 | 15.99 | 21.17 | 14.81 | 20.19 | 14.56 | 24.42 | 5.02 | 2.40 | 3.14 | 3.50 | | | | (0.45) | (1.53) | (0.24) | (3.89) | (0.23) | (3.47) | (0.41) | (0.44) | (1.83) | (0.49) | (0.17) | | ENTITY-13 | Same | 16.23 | 11.14 | 24.97 | 10.88 | 19.08 | 10.47 | 10.71 | 5.39 | 3.88 | 4.58 | 4.19 | | | | (0.77) | (0.65) | (0.70) | (0.77) | (0.65) | (0.72) | (0.74) | (0.92) | (0.61) | (0.85) | (0.16) | | | Novel | 28.53 | 22.02 | 38.33 | 21.64 | 32.43 | 21.22 | 20.61 | 13.58 | 10.28 | 12.25 | 6.63 | | | | (0.82) | (0.68) | (0.75) | (0.86) | (0.69) | (0.80) | (0.60) | (1.15) | (1.34) | (1.21) | (0.93) | | ENTITY-30 | Same | 18.59 | 14.46 | 28.82 | 14.30 | 21.63 | 13.46 | 12.92 | 9.12 | 7.75 | 8.15 | 7.64 | | | | (0.51) | (0.52) | (0.43) | (0.71) | (0.37) | (0.59) | (0.14) | (0.62) | (0.72) | (0.68) | (0.88) | | | Novel | 32.34 | 26.85 | 44.02 | 26.27 | 36.82 | 25.42 | 23.16 | 17.75 | 14.30 | 15.60 | 10.57 | | | | (0.60) | (0.58) | (0.56) | (0.79) | (0.47) | (0.68) | (0.12) | (0.76) | (0.85) | (0.86) | (0.86) | | NONLIVING-26 | Same | 18.66 | 17.17 | 26.39 | 16.14 | 19.86 | 15.58 | 16.63 | 10.87 | 10.24 | 10.07 | 10.26 | | | | (0.76) | (0.74) | (0.82) | (0.81) | (0.67) | (0.76) | (0.45) | (0.98) | (0.83) | (0.92) | (1.18) | | | Novel | 33.43 | 31.53 | 41.66 | 29.87 | 35.13 | 29.31 | 29.56 | 21.70 | 20.12 | 19.08 | 18.26 | | | | (0.67) | (0.65) | (0.67) | (0.71) | (0.54) | (0.64) | (0.21) | (0.86) | (0.75) | (0.82) | (1.12) | | LIVING-17 | Same | 12.63 | 11.05 | 18.32 | 10.46 | 14.43 | 10.14 | 9.87 | 4.57 | 3.95 | 3.81 | 4.21 | | | | (1.25) | (1.20) | (1.01) | (1.12) | (1.11) | (1.16) | (0.61) | (0.71) | (0.48) | (0.22) | (0.53) | | | Novel | 29.03 | 26.96 | 35.67 | 26.11 | 31.73 | 25.73 | 23.53 | 16.15 | 14.49 | 12.97 | 11.39 | | | | (1.44) | (1.38) | (1.09) | (1.27) | (1.19) | (1.35) | (0.52) | (1.36) | (1.46) | (1.52) | (1.72) |\nTable 3: Mean Absolute estimation Error (MAE) results for different datasets in our setup grouped by the nature of shift. 'Same' refers to same subpopulation shifts and 'Novel' refers novel subpopulation shifts. We include details about the target sets considered in each shift in Table 2. Post T denotes use of TS calibration on source. For language datasets, we use DistilBERT-base-uncased, for vision dataset we report results with DenseNet model with the exception of MNIST where we use FCN. Across all datasets, we observe that ATC achieves superior performance (lower MAE is better). For GDE post T and pre T estimates match since TS doesn't alter the argmax prediction. Results reported by aggregating MAE numbers over 4 different seeds. Values in parenthesis (i.e., $(\\cdot)$ ) denote standard deviation values.\n", "images": [], "dimensions": {"dpi": 200, "height": 2200, "width": 1700}}, {"index": 29, "markdown": "| Dataset | Shift | IM | | AC | | DOC | | GDE | ATC-MC (Ours) | | ATC-NE (Ours) | | | :--: | :--: | :--: | :--: | :--: | :--: | :--: | :--: | :--: | :--: | :--: | :--: | :--: | | | | Pre T | Post T | Pre T | Post T | Pre T | Post T | Post T | Pre T | Post T | Pre T | Post T | | CIFAR10 | Natural | 7.14 | 6.20 | 10.25 | 7.06 | 7.68 | 6.35 | 5.74 | 4.02 | 3.85 | 3.76 | 3.38 | | | | (0.14) | (0.11) | (0.31) | (0.33) | (0.28) | (0.27) | (0.25) | (0.38) | (0.30) | (0.33) | (0.32) | | | Synthetic | 12.62 | 10.75 | 16.50 | 11.91 | 13.93 | 11.20 | 7.97 | 5.66 | 5.03 | 4.87 | 3.63 | | | | (0.76) | (0.71) | (0.28) | (0.24) | (0.29) | (0.28) | (0.13) | (0.64) | (0.71) | (0.71) | (0.62) | | CIFAR100 | Synthetic | 12.77 | 12.34 | 16.89 | 12.73 | 11.18 | 9.63 | 12.00 | 5.61 | 5.55 | 5.65 | 5.76 | | | | (0.43) | (0.68) | (0.20) | (2.59) | (0.35) | (1.25) | (0.48) | (0.51) | (0.55) | (0.35) | (0.27) | | ImageNet200 | Natural | 12.63 | 7.99 | 23.08 | 7.22 | 15.40 | 6.33 | 5.00 | 4.60 | 1.80 | 4.06 | 1.38 | | | | (0.59) | (0.47) | (0.31) | (0.22) | (0.42) | (0.24) | (0.36) | (0.63) | (0.17) | (0.69) | (0.29) | | | Synthetic | 20.17 | 11.74 | 33.69 | 9.51 | 25.49 | 8.61 | 4.19 | 5.37 | 2.78 | 4.53 | 3.58 | | | | (0.74) | (0.80) | (0.73) | (0.51) | (0.66) | (0.50) | (0.14) | (0.88) | (0.23) | (0.79) | (0.33) | | ImageNet | Natural | 8.09 | 6.42 | 21.66 | 5.91 | 8.53 | 5.21 | 5.90 | 3.93 | 1.89 | 2.45 | 0.73 | | | | (0.25) | (0.28) | (0.38) | (0.22) | (0.26) | (0.25) | (0.44) | (0.26) | (0.21) | (0.16) | (0.10) | | | Synthetic | 13.93 | 9.90 | 28.05 | 7.56 | 13.82 | 6.19 | 6.70 | 3.33 | 2.55 | 2.12 | 5.06 | | | | (0.14) | (0.23) | (0.39) | (0.13) | (0.31) | (0.07) | (0.52) | (0.25) | (0.25) | (0.31) | (0.27) | | FMoW-WILDS | Natural | 5.15 | 3.55 | 34.64 | 5.03 | 5.58 | 3.46 | 5.08 | 2.59 | 2.33 | 2.52 | 2.22 | | | | (0.19) | (0.41) | (0.22) | (0.29) | (0.17) | (0.37) | (0.46) | (0.32) | (0.28) | (0.25) | (0.30) | | RxRx1-WILDS | Natural | 6.17 | 6.11 | 21.05 | 5.21 | 6.54 | 6.27 | 6.82 | 5.30 | 5.20 | 5.19 | 5.63 | | | | (0.20) | (0.24) | (0.31) | (0.18) | (0.21) | (0.20) | (0.31) | (0.30) | (0.44) | (0.43) | (0.55) | | Entity-13 | Same | 18.32 | 14.38 | 27.79 | 13.56 | 20.50 | 13.22 | 16.09 | 9.35 | 7.50 | 7.80 | 6.94 | | | | (0.29) | (0.53) | (1.18) | (0.58) | (0.47) | (0.58) | (0.84) | (0.79) | (0.65) | (0.62) | (0.71) | | | Novel | 28.82 | 24.03 | 38.97 | 22.96 | 31.66 | 22.61 | 25.26 | 17.11 | 13.96 | 14.75 | 9.94 | | | | (0.30) | (0.55) | (1.32) | (0.59) | (0.54) | (0.58) | (1.08) | (0.93) | (0.64) | (0.78) | | | Entity-30 | Same | 16.91 | 14.61 | 26.84 | 14.37 | 18.60 | 13.11 | 13.74 | 8.54 | 7.94 | 7.77 | 8.04 | | | | (1.33) | (1.11) | (2.15) | (1.34) | (1.69) | (1.30) | (1.07) | (1.47) | (1.38) | (1.44) | (1.51) | | | Novel | 28.66 | 25.83 | 39.21 | 25.03 | 30.95 | 23.73 | 23.15 | 15.57 | 13.24 | 12.44 | 11.05 | | | | (1.16) | (0.88) | (2.03) | (1.11) | (1.64) | (1.11) | (0.51) | (1.44) | (1.15) | (1.26) | (1.13) | | NonLIVING-26 | Same | 17.43 | 15.95 | 27.70 | 15.40 | 18.06 | 14.58 | 16.99 | 10.79 | 10.13 | 10.05 | 10.29 | | | | (0.90) | (0.86) | (0.90) | (0.69) | (1.00) | (0.78) | (1.25) | (0.62) | (0.32) | (0.46) | (0.79) | | | Novel | 29.51 | 27.75 | 40.02 | 26.77 | 30.36 | 25.93 | 27.70 | 19.64 | 17.75 | 16.90 | 15.69 | | | | (0.86) | (0.82) | (0.76) | (0.82) | (0.95) | (0.80) | (1.42) | (0.68) | (0.53) | (0.60) | (0.83) | | LIVING-17 | Same | 14.28 | 12.21 | 23.46 | 11.16 | 15.22 | 10.78 | 10.49 | 4.92 | 4.23 | 4.19 | 4.73 | | | | (0.96) | (0.93) | (1.16) | (0.90) | (0.96) | (0.99) | (0.97) | (0.57) | (0.42) | (0.35) | (0.24) | | | Novel | 28.91 | 26.35 | 38.62 | 24.91 | 30.32 | 24.52 | 22.49 | 15.42 | 13.02 | 12.29 | 10.34 | | | | (0.66) | (0.73) | (1.01) | (0.61) | (0.59) | (0.74) | (0.85) | (0.59) | (0.53) | (0.73) | (0.62) |\nTable 4: Mean Absolute estimation Error (MAE) results for different datasets in our setup grouped by the nature of shift for ResNet model. 'Same' refers to same subpopulation shifts and 'Novel' refers novel subpopulation shifts. We include details about the target sets considered in each shift in Table 2. Post T denotes use of TS calibration on source. Across all datasets, we observe that ATC achieves superior performance (lower MAE is better). For GDE post T and pre T estimates match since TS doesn't alter the argmax prediction. Results reported by aggregating MAE numbers over 4 different seeds. Values in parenthesis (i.e., $(\\cdot)$ ) denote standard deviation values.\n", "images": [], "dimensions": {"dpi": 200, "height": 2200, "width": 1700}}], "model": "mistral-ocr-2503-completion", "usage_info": {"pages_processed": 29, "doc_size_bytes": null}} diff --git a/packages/mistralai_azure/.speakeasy/gen.yaml b/packages/azure/.speakeasy/gen.yaml similarity index 78% rename from packages/mistralai_azure/.speakeasy/gen.yaml rename to packages/azure/.speakeasy/gen.yaml index e2be4d84..729cdfcf 100644 --- a/packages/mistralai_azure/.speakeasy/gen.yaml +++ b/packages/azure/.speakeasy/gen.yaml @@ -8,11 +8,13 @@ generation: useClassNamesForArrayFields: true fixes: nameResolutionDec2023: true - nameResolutionFeb2025: false + nameResolutionFeb2025: true parameterOrderingFeb2024: true requestResponseComponentNamesFeb2024: true - securityFeb2025: false - sharedErrorComponentsApr2025: false + securityFeb2025: true + sharedErrorComponentsApr2025: true + methodSignaturesApr2024: true + sharedNestedComponentsJan2026: true auth: oAuth2ClientCredentialsEnabled: true oAuth2PasswordEnabled: false @@ -26,7 +28,7 @@ generation: generateNewTests: false skipResponseBodyAssertions: false python: - version: 1.8.1 + version: 2.0.0a4 additionalDependencies: dev: pytest: ^8.2.2 @@ -45,10 +47,13 @@ python: enableCustomCodeRegions: false enumFormat: union fixFlags: - responseRequiredSep2024: false + responseRequiredSep2024: true + flatAdditionalProperties: true flattenGlobalSecurity: true flattenRequests: true flatteningOrder: parameters-first + forwardCompatibleEnumsByDefault: true + forwardCompatibleUnionsByDefault: tagged-only imports: option: openapi paths: @@ -62,12 +67,12 @@ python: license: "" maxMethodParams: 15 methodArguments: infer-optional-args - moduleName: "" + moduleName: mistralai.azure.client multipartArrayFormat: legacy outputModelSuffix: output packageManager: uv - packageName: mistralai_azure - preApplyUnionDiscriminators: false + packageName: mistralai-azure + preApplyUnionDiscriminators: true pytestFilterWarnings: [] pytestTimeout: 0 responseFormat: flat diff --git a/packages/mistralai_azure/CONTRIBUTING.md b/packages/azure/CONTRIBUTING.md similarity index 100% rename from packages/mistralai_azure/CONTRIBUTING.md rename to packages/azure/CONTRIBUTING.md diff --git a/packages/mistralai_azure/README.md b/packages/azure/README.md similarity index 57% rename from packages/mistralai_azure/README.md rename to packages/azure/README.md index f869b90a..6eff040f 100644 --- a/packages/mistralai_azure/README.md +++ b/packages/azure/README.md @@ -14,7 +14,7 @@ uv add mistralai **Prerequisites** -Before you begin, ensure you have `AZUREAI_ENDPOINT` and an `AZURE_API_KEY`. To obtain these, you will need to deploy Mistral on Azure AI. +Before you begin, ensure you have `AZURE_ENDPOINT` and an `AZURE_API_KEY`. To obtain these, you will need to deploy Mistral on Azure AI. See [instructions for deploying Mistral on Azure AI here](https://docs.mistral.ai/deployment/cloud/azure/). @@ -24,58 +24,73 @@ See [instructions for deploying Mistral on Azure AI here](https://docs.mistral.a This example shows how to create chat completions. +The SDK automatically injects the `api-version` query parameter. + ```python # Synchronous Example -from mistralai_azure import MistralAzure +from mistralai.azure.client import MistralAzure import os +AZURE_API_KEY = os.environ["AZURE_API_KEY"] +AZURE_ENDPOINT = os.environ["AZURE_ENDPOINT"] +AZURE_MODEL = os.environ["AZURE_MODEL"] +AZURE_API_VERSION = os.environ.get("AZURE_API_VERSION", "2024-05-01-preview") + +# The SDK automatically injects api-version as a query parameter s = MistralAzure( - azure_api_key=os.getenv("AZURE_API_KEY", ""), - azure_endpoint=os.getenv("AZURE_ENDPOINT", "") + api_key=AZURE_API_KEY, + server_url=AZURE_ENDPOINT, + api_version=AZURE_API_VERSION, ) - res = s.chat.complete( messages=[ { - "content": "Who is the best French painter? Answer in one short sentence.", "role": "user", + "content": "Who is the best French painter? Answer in one short sentence.", }, - ], - model="azureai" + ], + model=AZURE_MODEL, ) if res is not None: # handle response - pass + print(res.choices[0].message.content) ```
-The same SDK client can also be used to make asychronous requests by importing asyncio. +The same SDK client can also be used to make asynchronous requests by importing asyncio. ```python # Asynchronous Example import asyncio -from mistralai_azure import MistralAzure import os +from mistralai.azure.client import MistralAzure + +AZURE_API_KEY = os.environ["AZURE_API_KEY"] +AZURE_ENDPOINT = os.environ["AZURE_ENDPOINT"] +AZURE_MODEL = os.environ["AZURE_MODEL"] +AZURE_API_VERSION = os.environ.get("AZURE_API_VERSION", "2024-05-01-preview") async def main(): + # The SDK automatically injects api-version as a query parameter s = MistralAzure( - azure_api_key=os.getenv("AZURE_API_KEY", ""), - azure_endpoint=os.getenv("AZURE_ENDPOINT", "") + api_key=AZURE_API_KEY, + server_url=AZURE_ENDPOINT, + api_version=AZURE_API_VERSION, ) res = await s.chat.complete_async( messages=[ { - "content": "Who is the best French painter? Answer in one short sentence.", "role": "user", + "content": "Who is the best French painter? Answer in one short sentence.", }, - ], - model="azureai" + ], + model=AZURE_MODEL, ) if res is not None: # handle response - pass + print(res.choices[0].message.content) asyncio.run(main()) ``` @@ -87,7 +102,7 @@ asyncio.run(main()) ### [chat](docs/sdks/chat/README.md) * [stream](docs/sdks/chat/README.md#stream) - Stream chat completion -* [create](docs/sdks/chat/README.md#create) - Chat Completion +* [complete](docs/sdks/chat/README.md#complete) - Chat Completion @@ -100,23 +115,29 @@ terminate when the server no longer has any events to send and closes the underlying connection. ```python -from mistralai_azure import MistralAzure +from mistralai.azure.client import MistralAzure import os +AZURE_API_KEY = os.environ["AZURE_API_KEY"] +AZURE_ENDPOINT = os.environ["AZURE_ENDPOINT"] +AZURE_MODEL = os.environ["AZURE_MODEL"] +AZURE_API_VERSION = os.environ.get("AZURE_API_VERSION", "2024-05-01-preview") + +# The SDK automatically injects api-version as a query parameter s = MistralAzure( - azure_api_key=os.getenv("AZURE_API_KEY", ""), - azure_endpoint=os.getenv("AZURE_ENDPOINT", "") + api_key=AZURE_API_KEY, + server_url=AZURE_ENDPOINT, + api_version=AZURE_API_VERSION, ) - res = s.chat.stream( messages=[ { - "content": "Who is the best French painter? Answer in one short sentence.", "role": "user", + "content": "Who is the best French painter? Answer in one short sentence.", }, - ], - model="azureai" + ], + model=AZURE_MODEL, ) if res is not None: @@ -137,23 +158,36 @@ Some of the endpoints in this SDK support retries. If you use the SDK without an To change the default retry strategy for a single API call, simply provide a `RetryConfig` object to the call: ```python -from mistralai_azure import MistralAzure -from mistralazure.utils import BackoffStrategy, RetryConfig +from mistralai.azure.client import MistralAzure +from mistralai.azure.client.utils import BackoffStrategy, RetryConfig import os +AZURE_API_KEY = os.environ["AZURE_API_KEY"] +AZURE_ENDPOINT = os.environ["AZURE_ENDPOINT"] +AZURE_MODEL = os.environ["AZURE_MODEL"] +AZURE_API_VERSION = os.environ.get("AZURE_API_VERSION", "2024-05-01-preview") + +# The SDK automatically injects api-version as a query parameter s = MistralAzure( - azure_api_key=os.getenv("AZURE_API_KEY", ""), - azure_endpoint=os.getenv("AZURE_ENDPOINT", "") + api_key=AZURE_API_KEY, + server_url=AZURE_ENDPOINT, + api_version=AZURE_API_VERSION, ) - -res = s.chat.stream(messages=[ - { - "content": "Who is the best French painter? Answer in one short sentence.", - "role": "user", - }, -], model="azureai", - RetryConfig("backoff", BackoffStrategy(1, 50, 1.1, 100), False)) +res = s.chat.stream( + messages=[ + { + "role": "user", + "content": "Who is the best French painter? Answer in one short sentence.", + }, + ], + model=AZURE_MODEL, + retries=RetryConfig( + "backoff", + BackoffStrategy(1, 50, 1.1, 100), + False + ), +) if res is not None: for event in res: @@ -164,23 +198,32 @@ if res is not None: If you'd like to override the default retry strategy for all operations that support retries, you can use the `retry_config` optional parameter when initializing the SDK: ```python -from mistralai_azure import MistralAzure -from mistralazure.utils import BackoffStrategy, RetryConfig +from mistralai.azure.client import MistralAzure +from mistralai.azure.client.utils import BackoffStrategy, RetryConfig import os +AZURE_API_KEY = os.environ["AZURE_API_KEY"] +AZURE_ENDPOINT = os.environ["AZURE_ENDPOINT"] +AZURE_MODEL = os.environ["AZURE_MODEL"] +AZURE_API_VERSION = os.environ["AZURE_API_VERSION"] + +# The SDK automatically injects api-version as a query parameter s = MistralAzure( + api_key=AZURE_API_KEY, + server_url=AZURE_ENDPOINT, + api_version=AZURE_API_VERSION, retry_config=RetryConfig("backoff", BackoffStrategy(1, 50, 1.1, 100), False), - azure_api_key=os.getenv("AZURE_API_KEY", ""), - azure_endpoint=os.getenv("AZURE_ENDPOINT", "") ) - -res = s.chat.stream(messages=[ - { - "content": "Who is the best French painter? Answer in one short sentence.", - "role": "user", - }, -], model="azureai") +res = s.chat.stream( + messages=[ + { + "role": "user", + "content": "Who is the best French painter? Answer in one short sentence.", + }, + ], + model=AZURE_MODEL, +) if res is not None: for event in res: @@ -193,7 +236,7 @@ if res is not None: ## Error Handling -Handling errors in this SDK should largely match your expectations. All operations return a response object or raise an error. If Error objects are specified in your OpenAPI Spec, the SDK will raise the appropriate Error type. +Handling errors in this SDK should largely match your expectations. All operations return a response object or raise an error. If Error objects are specified in your OpenAPI Spec, the SDK will raise the appropriate Error type. | Error Object | Status Code | Content Type | | -------------------------- | ----------- | ---------------- | @@ -203,22 +246,33 @@ Handling errors in this SDK should largely match your expectations. All operati ### Example ```python -from mistralai_azure import MistralAzure, models +from mistralai.azure.client import MistralAzure +from mistralai.azure.client import models import os +AZURE_API_KEY = os.environ["AZURE_API_KEY"] +AZURE_ENDPOINT = os.environ["AZURE_ENDPOINT"] +AZURE_MODEL = os.environ["AZURE_MODEL"] +AZURE_API_VERSION = os.environ.get("AZURE_API_VERSION", "2024-05-01-preview") + +# The SDK automatically injects api-version as a query parameter s = MistralAzure( - azure_api_key=os.getenv("AZURE_API_KEY", ""), - azure_endpoint=os.getenv("AZURE_ENDPOINT", "") + api_key=AZURE_API_KEY, + server_url=AZURE_ENDPOINT, + api_version=AZURE_API_VERSION, ) res = None try: - res = s.chat.complete(messages=[ - { - "content": "Who is the best French painter? Answer in one short sentence.", - "role": "user", - }, -], model="azureai") + res = s.chat.complete( + messages=[ + { + "role": "user", + "content": "Who is the best French painter? Answer in one short sentence.", + }, + ], + model=AZURE_MODEL, + ) except models.HTTPValidationError as e: # handle exception @@ -237,62 +291,28 @@ if res is not None: ## Server Selection -### Select Server by Name - -You can override the default server globally by passing a server name to the `server: str` optional parameter when initializing the SDK client instance. The selected server will then be used as the default on the operations that use it. This table lists the names associated with the available servers: - -| Name | Server | Variables | -| ------ | ------------------------ | --------- | -| `prod` | `https://api.mistral.ai` | None | - -#### Example - -```python -from mistralai_azure import MistralAzure -import os - -s = MistralAzure( - server="prod", - azure_api_key=os.getenv("AZURE_API_KEY", ""), - azure_endpoint=os.getenv("AZURE_ENDPOINT", "") -) - - -res = s.chat.stream(messages=[ - { - "content": "Who is the best French painter? Answer in one short sentence.", - "role": "user", - }, -], model="azureai") - -if res is not None: - for event in res: - # handle event - print(event) - -``` - - ### Override Server URL Per-Client -The default server can also be overridden globally by passing a URL to the `server_url: str` optional parameter when initializing the SDK client instance. For example: +For Azure, you must provide your Azure AI Foundry endpoint via `server_url`. The SDK automatically injects the `api-version` query parameter: ```python -from mistralai_azure import MistralAzure +from mistralai.azure.client import MistralAzure import os s = MistralAzure( - server_url="https://api.mistral.ai", - azure_api_key=os.getenv("AZURE_API_KEY", ""), - azure_endpoint=os.getenv("AZURE_ENDPOINT", "") + api_key=os.environ["AZURE_API_KEY"], + server_url=os.environ["AZURE_ENDPOINT"], + api_version=os.environ.get("AZURE_API_VERSION", "2024-05-01-preview"), ) - -res = s.chat.stream(messages=[ - { - "content": "Who is the best French painter? Answer in one short sentence.", - "role": "user", - }, -], model="azureai") +res = s.chat.stream( + messages=[ + { + "role": "user", + "content": "Who is the best French painter? Answer in one short sentence.", + }, + ], + model=os.environ["AZURE_MODEL"], +) if res is not None: for event in res: @@ -311,17 +331,24 @@ This allows you to wrap the client with your own custom logic, such as adding cu For example, you could specify a header for every request that this sdk makes as follows: ```python -from mistralai_azure import MistralAzure +from mistralai.azure.client import MistralAzure import httpx +import os http_client = httpx.Client(headers={"x-custom-header": "someValue"}) -s = MistralAzure(client=http_client) +s = MistralAzure( + api_key=os.environ["AZURE_API_KEY"], + server_url=os.environ["AZURE_ENDPOINT"], + api_version=os.environ.get("AZURE_API_VERSION", "2024-05-01-preview"), + client=http_client, +) ``` or you could wrap the client with your own custom logic: ```python -from mistralai_azure import MistralAzure -from mistralai_azure.httpclient import AsyncHttpClient +from typing import Any, Optional, Union +from mistralai.azure.client import MistralAzure +from mistralai.azure.client.httpclient import AsyncHttpClient import httpx class CustomClient(AsyncHttpClient): @@ -379,7 +406,11 @@ class CustomClient(AsyncHttpClient): extensions=extensions, ) -s = MistralAzure(async_client=CustomClient(httpx.AsyncClient())) +s = MistralAzure( + api_key="", + server_url="", + async_client=CustomClient(httpx.AsyncClient()), +) ``` @@ -394,23 +425,26 @@ This SDK supports the following security scheme globally: | --------- | ---- | ----------- | | `api_key` | http | HTTP Bearer | -To authenticate with the API the `api_key` parameter must be set when initializing the SDK client instance. For example: +To authenticate with the API the `api_key` parameter must be set when initializing the SDK client instance. You must also provide `server_url` pointing to your Azure AI Foundry endpoint. The SDK automatically injects the `api-version` query parameter: ```python -from mistralai_azure import MistralAzure +from mistralai.azure.client import MistralAzure import os s = MistralAzure( - azure_api_key=os.getenv("AZURE_API_KEY", ""), - azure_endpoint=os.getenv("AZURE_ENDPOINT", "") + api_key=os.environ["AZURE_API_KEY"], + server_url=os.environ["AZURE_ENDPOINT"], + api_version=os.environ.get("AZURE_API_VERSION", "2024-05-01-preview"), ) - -res = s.chat.stream(messages=[ - { - "content": "Who is the best French painter? Answer in one short sentence.", - "role": "user", - }, -], model="azureai") +res = s.chat.stream( + messages=[ + { + "role": "user", + "content": "Who is the best French painter? Answer in one short sentence.", + }, + ], + model=os.environ["AZURE_MODEL"], +) if res is not None: for event in res: @@ -426,5 +460,5 @@ if res is not None: ## Contributions -While we value open-source contributions to this SDK, this library is generated programmatically. Any manual changes added to internal files will be overwritten on the next generation. -We look forward to hearing your feedback. Feel free to open a PR or an issue with a proof of concept and we'll do our best to include it in a future release. +While we value open-source contributions to this SDK, this library is generated programmatically. Any manual changes added to internal files will be overwritten on the next generation. +We look forward to hearing your feedback. Feel free to open a PR or an issue with a proof of concept and we'll do our best to include it in a future release. diff --git a/packages/mistralai_azure/RELEASES.md b/packages/azure/RELEASES.md similarity index 100% rename from packages/mistralai_azure/RELEASES.md rename to packages/azure/RELEASES.md diff --git a/packages/azure/USAGE.md b/packages/azure/USAGE.md new file mode 100644 index 00000000..a4bc5147 --- /dev/null +++ b/packages/azure/USAGE.md @@ -0,0 +1,70 @@ + +### Create Chat Completions + +This example shows how to create chat completions. + +The SDK automatically injects the `api-version` query parameter. + +```python +# Synchronous Example +from mistralai.azure.client import MistralAzure +import os + +AZURE_API_KEY = os.environ["AZURE_API_KEY"] +AZURE_ENDPOINT = os.environ["AZURE_ENDPOINT"] +AZURE_MODEL = os.environ["AZURE_MODEL"] +AZURE_API_VERSION = os.environ.get("AZURE_API_VERSION", "2024-05-01-preview") + +# The SDK automatically injects api-version as a query parameter +s = MistralAzure( + api_key=AZURE_API_KEY, + server_url=AZURE_ENDPOINT, + api_version=AZURE_API_VERSION, +) + +res = s.chat.complete(messages=[ + { + "role": "user", + "content": "Who is the best French painter? Answer in one short sentence.", + }, +], model=AZURE_MODEL) + +if res is not None: + # handle response + print(res.choices[0].message.content) +``` + +
+ +The same SDK client can also be used to make asynchronous requests by importing asyncio. +```python +# Asynchronous Example +import asyncio +import os +from mistralai.azure.client import MistralAzure + +AZURE_API_KEY = os.environ["AZURE_API_KEY"] +AZURE_ENDPOINT = os.environ["AZURE_ENDPOINT"] +AZURE_MODEL = os.environ["AZURE_MODEL"] +AZURE_API_VERSION = os.environ.get("AZURE_API_VERSION", "2024-05-01-preview") + +async def main(): + # The SDK automatically injects api-version as a query parameter + s = MistralAzure( + api_key=AZURE_API_KEY, + server_url=AZURE_ENDPOINT, + api_version=AZURE_API_VERSION, + ) + res = await s.chat.complete_async(messages=[ + { + "role": "user", + "content": "Who is the best French painter? Answer in one short sentence.", + }, + ], model=AZURE_MODEL) + if res is not None: + # handle response + print(res.choices[0].message.content) + +asyncio.run(main()) +``` + diff --git a/packages/mistralai_azure/docs/models/arguments.md b/packages/azure/docs/models/arguments.md similarity index 100% rename from packages/mistralai_azure/docs/models/arguments.md rename to packages/azure/docs/models/arguments.md diff --git a/packages/mistralai_azure/docs/models/assistantmessage.md b/packages/azure/docs/models/assistantmessage.md similarity index 95% rename from packages/mistralai_azure/docs/models/assistantmessage.md rename to packages/azure/docs/models/assistantmessage.md index 3d0bd90b..9ef63837 100644 --- a/packages/mistralai_azure/docs/models/assistantmessage.md +++ b/packages/azure/docs/models/assistantmessage.md @@ -5,7 +5,7 @@ | Field | Type | Required | Description | | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| `role` | *Optional[Literal["assistant"]]* | :heavy_minus_sign: | N/A | | `content` | [OptionalNullable[models.AssistantMessageContent]](../models/assistantmessagecontent.md) | :heavy_minus_sign: | N/A | | `tool_calls` | List[[models.ToolCall](../models/toolcall.md)] | :heavy_minus_sign: | N/A | -| `prefix` | *Optional[bool]* | :heavy_minus_sign: | Set this to `true` when adding an assistant message as prefix to condition the model response. The role of the prefix message is to force the model to start its answer by the content of the message. | -| `role` | [Optional[models.AssistantMessageRole]](../models/assistantmessagerole.md) | :heavy_minus_sign: | N/A | \ No newline at end of file +| `prefix` | *Optional[bool]* | :heavy_minus_sign: | Set this to `true` when adding an assistant message as prefix to condition the model response. The role of the prefix message is to force the model to start its answer by the content of the message. | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/assistantmessagecontent.md b/packages/azure/docs/models/assistantmessagecontent.md similarity index 100% rename from packages/mistralai_azure/docs/models/assistantmessagecontent.md rename to packages/azure/docs/models/assistantmessagecontent.md diff --git a/packages/mistralai_azure/docs/models/chatcompletionchoice.md b/packages/azure/docs/models/chatcompletionchoice.md similarity index 100% rename from packages/mistralai_azure/docs/models/chatcompletionchoice.md rename to packages/azure/docs/models/chatcompletionchoice.md diff --git a/packages/mistralai_azure/docs/models/chatcompletionchoicefinishreason.md b/packages/azure/docs/models/chatcompletionchoicefinishreason.md similarity index 100% rename from packages/mistralai_azure/docs/models/chatcompletionchoicefinishreason.md rename to packages/azure/docs/models/chatcompletionchoicefinishreason.md diff --git a/packages/mistralai_azure/docs/models/chatcompletionrequest.md b/packages/azure/docs/models/chatcompletionrequest.md similarity index 99% rename from packages/mistralai_azure/docs/models/chatcompletionrequest.md rename to packages/azure/docs/models/chatcompletionrequest.md index 104a1f96..3b0f7270 100644 --- a/packages/mistralai_azure/docs/models/chatcompletionrequest.md +++ b/packages/azure/docs/models/chatcompletionrequest.md @@ -13,7 +13,7 @@ | `stop` | [Optional[models.ChatCompletionRequestStop]](../models/chatcompletionrequeststop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | | `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | | `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | | -| `messages` | List[[models.ChatCompletionRequestMessages](../models/chatcompletionrequestmessages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | +| `messages` | List[[models.ChatCompletionRequestMessage](../models/chatcompletionrequestmessage.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | | `response_format` | [Optional[models.ResponseFormat]](../models/responseformat.md) | :heavy_minus_sign: | Specify the format that the model must output. By default it will use `{ "type": "text" }`. Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ "type": "json_schema" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide. | {
"type": "text"
} | | `tools` | List[[models.Tool](../models/tool.md)] | :heavy_minus_sign: | A list of tools the model may call. Use this to provide a list of functions the model may generate JSON inputs for. | | | `tool_choice` | [Optional[models.ChatCompletionRequestToolChoice]](../models/chatcompletionrequesttoolchoice.md) | :heavy_minus_sign: | Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{"type": "function", "function": {"name": "my_function"}}` forces the model to call that tool. | | diff --git a/packages/mistralai_azure/docs/models/messages.md b/packages/azure/docs/models/chatcompletionrequestmessage.md similarity index 92% rename from packages/mistralai_azure/docs/models/messages.md rename to packages/azure/docs/models/chatcompletionrequestmessage.md index 1d394500..91e9e062 100644 --- a/packages/mistralai_azure/docs/models/messages.md +++ b/packages/azure/docs/models/chatcompletionrequestmessage.md @@ -1,4 +1,4 @@ -# Messages +# ChatCompletionRequestMessage ## Supported Types diff --git a/packages/mistralai_azure/docs/models/chatcompletionrequeststop.md b/packages/azure/docs/models/chatcompletionrequeststop.md similarity index 100% rename from packages/mistralai_azure/docs/models/chatcompletionrequeststop.md rename to packages/azure/docs/models/chatcompletionrequeststop.md diff --git a/packages/mistralai_azure/docs/models/chatcompletionrequesttoolchoice.md b/packages/azure/docs/models/chatcompletionrequesttoolchoice.md similarity index 100% rename from packages/mistralai_azure/docs/models/chatcompletionrequesttoolchoice.md rename to packages/azure/docs/models/chatcompletionrequesttoolchoice.md diff --git a/packages/mistralai_azure/docs/models/chatcompletionresponse.md b/packages/azure/docs/models/chatcompletionresponse.md similarity index 100% rename from packages/mistralai_azure/docs/models/chatcompletionresponse.md rename to packages/azure/docs/models/chatcompletionresponse.md diff --git a/packages/mistralai_azure/docs/models/chatcompletionstreamrequest.md b/packages/azure/docs/models/chatcompletionstreamrequest.md similarity index 99% rename from packages/mistralai_azure/docs/models/chatcompletionstreamrequest.md rename to packages/azure/docs/models/chatcompletionstreamrequest.md index 85f237b4..f78156a6 100644 --- a/packages/mistralai_azure/docs/models/chatcompletionstreamrequest.md +++ b/packages/azure/docs/models/chatcompletionstreamrequest.md @@ -10,10 +10,10 @@ | `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | | `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | | `stream` | *Optional[bool]* | :heavy_minus_sign: | N/A | | -| `stop` | [Optional[models.Stop]](../models/stop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | +| `stop` | [Optional[models.ChatCompletionStreamRequestStop]](../models/chatcompletionstreamrequeststop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | | `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | | `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | | -| `messages` | List[[models.Messages](../models/messages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | +| `messages` | List[[models.ChatCompletionStreamRequestMessage](../models/chatcompletionstreamrequestmessage.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | | `response_format` | [Optional[models.ResponseFormat]](../models/responseformat.md) | :heavy_minus_sign: | Specify the format that the model must output. By default it will use `{ "type": "text" }`. Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ "type": "json_schema" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide. | {
"type": "text"
} | | `tools` | List[[models.Tool](../models/tool.md)] | :heavy_minus_sign: | A list of tools the model may call. Use this to provide a list of functions the model may generate JSON inputs for. | | | `tool_choice` | [Optional[models.ChatCompletionStreamRequestToolChoice]](../models/chatcompletionstreamrequesttoolchoice.md) | :heavy_minus_sign: | Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{"type": "function", "function": {"name": "my_function"}}` forces the model to call that tool. | | diff --git a/packages/mistralai_gcp/docs/models/chatcompletionrequestmessages.md b/packages/azure/docs/models/chatcompletionstreamrequestmessage.md similarity index 91% rename from packages/mistralai_gcp/docs/models/chatcompletionrequestmessages.md rename to packages/azure/docs/models/chatcompletionstreamrequestmessage.md index bc7708a6..2e4e93ac 100644 --- a/packages/mistralai_gcp/docs/models/chatcompletionrequestmessages.md +++ b/packages/azure/docs/models/chatcompletionstreamrequestmessage.md @@ -1,4 +1,4 @@ -# ChatCompletionRequestMessages +# ChatCompletionStreamRequestMessage ## Supported Types diff --git a/packages/mistralai_gcp/docs/models/stop.md b/packages/azure/docs/models/chatcompletionstreamrequeststop.md similarity index 88% rename from packages/mistralai_gcp/docs/models/stop.md rename to packages/azure/docs/models/chatcompletionstreamrequeststop.md index ba40ca83..a48460a9 100644 --- a/packages/mistralai_gcp/docs/models/stop.md +++ b/packages/azure/docs/models/chatcompletionstreamrequeststop.md @@ -1,4 +1,4 @@ -# Stop +# ChatCompletionStreamRequestStop Stop generation if this token is detected. Or if one of these tokens is detected when providing an array diff --git a/packages/mistralai_azure/docs/models/chatcompletionstreamrequesttoolchoice.md b/packages/azure/docs/models/chatcompletionstreamrequesttoolchoice.md similarity index 100% rename from packages/mistralai_azure/docs/models/chatcompletionstreamrequesttoolchoice.md rename to packages/azure/docs/models/chatcompletionstreamrequesttoolchoice.md diff --git a/packages/mistralai_azure/docs/models/completionchunk.md b/packages/azure/docs/models/completionchunk.md similarity index 100% rename from packages/mistralai_azure/docs/models/completionchunk.md rename to packages/azure/docs/models/completionchunk.md diff --git a/packages/mistralai_azure/docs/models/completionevent.md b/packages/azure/docs/models/completionevent.md similarity index 100% rename from packages/mistralai_azure/docs/models/completionevent.md rename to packages/azure/docs/models/completionevent.md diff --git a/packages/azure/docs/models/completionresponsestreamchoice.md b/packages/azure/docs/models/completionresponsestreamchoice.md new file mode 100644 index 00000000..1532c25b --- /dev/null +++ b/packages/azure/docs/models/completionresponsestreamchoice.md @@ -0,0 +1,10 @@ +# CompletionResponseStreamChoice + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------- | +| `index` | *int* | :heavy_check_mark: | N/A | +| `delta` | [models.DeltaMessage](../models/deltamessage.md) | :heavy_check_mark: | N/A | +| `finish_reason` | [Nullable[models.CompletionResponseStreamChoiceFinishReason]](../models/completionresponsestreamchoicefinishreason.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/finishreason.md b/packages/azure/docs/models/completionresponsestreamchoicefinishreason.md similarity index 81% rename from packages/mistralai_gcp/docs/models/finishreason.md rename to packages/azure/docs/models/completionresponsestreamchoicefinishreason.md index 45a5aedb..0fece473 100644 --- a/packages/mistralai_gcp/docs/models/finishreason.md +++ b/packages/azure/docs/models/completionresponsestreamchoicefinishreason.md @@ -1,4 +1,4 @@ -# FinishReason +# CompletionResponseStreamChoiceFinishReason ## Values diff --git a/packages/mistralai_azure/docs/models/contentchunk.md b/packages/azure/docs/models/contentchunk.md similarity index 100% rename from packages/mistralai_azure/docs/models/contentchunk.md rename to packages/azure/docs/models/contentchunk.md diff --git a/packages/azure/docs/models/deltamessage.md b/packages/azure/docs/models/deltamessage.md new file mode 100644 index 00000000..e0ee575f --- /dev/null +++ b/packages/azure/docs/models/deltamessage.md @@ -0,0 +1,10 @@ +# DeltaMessage + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | +| `role` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `content` | [OptionalNullable[models.DeltaMessageContent]](../models/deltamessagecontent.md) | :heavy_minus_sign: | N/A | +| `tool_calls` | List[[models.ToolCall](../models/toolcall.md)] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/content.md b/packages/azure/docs/models/deltamessagecontent.md similarity index 89% rename from packages/mistralai_gcp/docs/models/content.md rename to packages/azure/docs/models/deltamessagecontent.md index a833dc2c..8142772d 100644 --- a/packages/mistralai_gcp/docs/models/content.md +++ b/packages/azure/docs/models/deltamessagecontent.md @@ -1,4 +1,4 @@ -# Content +# DeltaMessageContent ## Supported Types diff --git a/packages/mistralai_azure/docs/models/document.md b/packages/azure/docs/models/document.md similarity index 100% rename from packages/mistralai_azure/docs/models/document.md rename to packages/azure/docs/models/document.md diff --git a/packages/azure/docs/models/documenturlchunk.md b/packages/azure/docs/models/documenturlchunk.md new file mode 100644 index 00000000..9dbfbe50 --- /dev/null +++ b/packages/azure/docs/models/documenturlchunk.md @@ -0,0 +1,10 @@ +# DocumentURLChunk + + +## Fields + +| Field | Type | Required | Description | +| ----------------------------------- | ----------------------------------- | ----------------------------------- | ----------------------------------- | +| `type` | *Optional[Literal["document_url"]]* | :heavy_minus_sign: | N/A | +| `document_url` | *str* | :heavy_check_mark: | N/A | +| `document_name` | *OptionalNullable[str]* | :heavy_minus_sign: | The filename of the document | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/filechunk.md b/packages/azure/docs/models/filechunk.md similarity index 100% rename from packages/mistralai_azure/docs/models/filechunk.md rename to packages/azure/docs/models/filechunk.md diff --git a/packages/mistralai_azure/docs/models/format_.md b/packages/azure/docs/models/format_.md similarity index 100% rename from packages/mistralai_azure/docs/models/format_.md rename to packages/azure/docs/models/format_.md diff --git a/packages/mistralai_azure/docs/models/function.md b/packages/azure/docs/models/function.md similarity index 100% rename from packages/mistralai_azure/docs/models/function.md rename to packages/azure/docs/models/function.md diff --git a/packages/mistralai_azure/docs/models/functioncall.md b/packages/azure/docs/models/functioncall.md similarity index 100% rename from packages/mistralai_azure/docs/models/functioncall.md rename to packages/azure/docs/models/functioncall.md diff --git a/packages/mistralai_azure/docs/models/functionname.md b/packages/azure/docs/models/functionname.md similarity index 100% rename from packages/mistralai_azure/docs/models/functionname.md rename to packages/azure/docs/models/functionname.md diff --git a/packages/mistralai_azure/docs/models/httpvalidationerror.md b/packages/azure/docs/models/httpvalidationerror.md similarity index 100% rename from packages/mistralai_azure/docs/models/httpvalidationerror.md rename to packages/azure/docs/models/httpvalidationerror.md diff --git a/packages/azure/docs/models/imagedetail.md b/packages/azure/docs/models/imagedetail.md new file mode 100644 index 00000000..1e5ba3fd --- /dev/null +++ b/packages/azure/docs/models/imagedetail.md @@ -0,0 +1,10 @@ +# ImageDetail + + +## Values + +| Name | Value | +| ------ | ------ | +| `LOW` | low | +| `AUTO` | auto | +| `HIGH` | high | \ No newline at end of file diff --git a/packages/azure/docs/models/imageurl.md b/packages/azure/docs/models/imageurl.md new file mode 100644 index 00000000..6358e0ac --- /dev/null +++ b/packages/azure/docs/models/imageurl.md @@ -0,0 +1,9 @@ +# ImageURL + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------------------- | ---------------------------------------------------------------- | ---------------------------------------------------------------- | ---------------------------------------------------------------- | +| `url` | *str* | :heavy_check_mark: | N/A | +| `detail` | [OptionalNullable[models.ImageDetail]](../models/imagedetail.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/azure/docs/models/imageurlchunk.md b/packages/azure/docs/models/imageurlchunk.md new file mode 100644 index 00000000..db0c53d2 --- /dev/null +++ b/packages/azure/docs/models/imageurlchunk.md @@ -0,0 +1,11 @@ +# ImageURLChunk + +{"type":"image_url","image_url":{"url":"data:image/png;base64,iVBORw0 + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------------- | -------------------------------------------------- | -------------------------------------------------- | -------------------------------------------------- | +| `type` | *Optional[Literal["image_url"]]* | :heavy_minus_sign: | N/A | +| `image_url` | [models.ImageURLUnion](../models/imageurlunion.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/imageurlchunkimageurl.md b/packages/azure/docs/models/imageurlunion.md similarity index 86% rename from packages/mistralai_gcp/docs/models/imageurlchunkimageurl.md rename to packages/azure/docs/models/imageurlunion.md index 76738908..db97130f 100644 --- a/packages/mistralai_gcp/docs/models/imageurlchunkimageurl.md +++ b/packages/azure/docs/models/imageurlunion.md @@ -1,4 +1,4 @@ -# ImageURLChunkImageURL +# ImageURLUnion ## Supported Types diff --git a/packages/mistralai_azure/docs/models/jsonschema.md b/packages/azure/docs/models/jsonschema.md similarity index 100% rename from packages/mistralai_azure/docs/models/jsonschema.md rename to packages/azure/docs/models/jsonschema.md diff --git a/packages/mistralai_azure/docs/models/loc.md b/packages/azure/docs/models/loc.md similarity index 100% rename from packages/mistralai_azure/docs/models/loc.md rename to packages/azure/docs/models/loc.md diff --git a/packages/mistralai_azure/docs/models/mistralpromptmode.md b/packages/azure/docs/models/mistralpromptmode.md similarity index 100% rename from packages/mistralai_azure/docs/models/mistralpromptmode.md rename to packages/azure/docs/models/mistralpromptmode.md diff --git a/packages/mistralai_azure/docs/models/ocrimageobject.md b/packages/azure/docs/models/ocrimageobject.md similarity index 100% rename from packages/mistralai_azure/docs/models/ocrimageobject.md rename to packages/azure/docs/models/ocrimageobject.md diff --git a/packages/mistralai_azure/docs/models/ocrpagedimensions.md b/packages/azure/docs/models/ocrpagedimensions.md similarity index 100% rename from packages/mistralai_azure/docs/models/ocrpagedimensions.md rename to packages/azure/docs/models/ocrpagedimensions.md diff --git a/packages/mistralai_azure/docs/models/ocrpageobject.md b/packages/azure/docs/models/ocrpageobject.md similarity index 100% rename from packages/mistralai_azure/docs/models/ocrpageobject.md rename to packages/azure/docs/models/ocrpageobject.md diff --git a/packages/mistralai_azure/docs/models/ocrrequest.md b/packages/azure/docs/models/ocrrequest.md similarity index 100% rename from packages/mistralai_azure/docs/models/ocrrequest.md rename to packages/azure/docs/models/ocrrequest.md diff --git a/packages/mistralai_azure/docs/models/ocrresponse.md b/packages/azure/docs/models/ocrresponse.md similarity index 100% rename from packages/mistralai_azure/docs/models/ocrresponse.md rename to packages/azure/docs/models/ocrresponse.md diff --git a/packages/mistralai_azure/docs/models/ocrtableobject.md b/packages/azure/docs/models/ocrtableobject.md similarity index 100% rename from packages/mistralai_azure/docs/models/ocrtableobject.md rename to packages/azure/docs/models/ocrtableobject.md diff --git a/packages/mistralai_azure/docs/models/ocrusageinfo.md b/packages/azure/docs/models/ocrusageinfo.md similarity index 100% rename from packages/mistralai_azure/docs/models/ocrusageinfo.md rename to packages/azure/docs/models/ocrusageinfo.md diff --git a/packages/mistralai_azure/docs/models/prediction.md b/packages/azure/docs/models/prediction.md similarity index 100% rename from packages/mistralai_azure/docs/models/prediction.md rename to packages/azure/docs/models/prediction.md diff --git a/packages/azure/docs/models/referencechunk.md b/packages/azure/docs/models/referencechunk.md new file mode 100644 index 00000000..d847e248 --- /dev/null +++ b/packages/azure/docs/models/referencechunk.md @@ -0,0 +1,9 @@ +# ReferenceChunk + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------- | -------------------------------- | -------------------------------- | -------------------------------- | +| `type` | *Optional[Literal["reference"]]* | :heavy_minus_sign: | N/A | +| `reference_ids` | List[*int*] | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/responseformat.md b/packages/azure/docs/models/responseformat.md similarity index 100% rename from packages/mistralai_azure/docs/models/responseformat.md rename to packages/azure/docs/models/responseformat.md diff --git a/packages/mistralai_azure/docs/models/responseformats.md b/packages/azure/docs/models/responseformats.md similarity index 100% rename from packages/mistralai_azure/docs/models/responseformats.md rename to packages/azure/docs/models/responseformats.md diff --git a/packages/mistralai_azure/docs/models/security.md b/packages/azure/docs/models/security.md similarity index 100% rename from packages/mistralai_azure/docs/models/security.md rename to packages/azure/docs/models/security.md diff --git a/packages/mistralai_gcp/docs/models/systemmessage.md b/packages/azure/docs/models/systemmessage.md similarity index 88% rename from packages/mistralai_gcp/docs/models/systemmessage.md rename to packages/azure/docs/models/systemmessage.md index 0dba71c0..10bda10f 100644 --- a/packages/mistralai_gcp/docs/models/systemmessage.md +++ b/packages/azure/docs/models/systemmessage.md @@ -5,5 +5,5 @@ | Field | Type | Required | Description | | ---------------------------------------------------------------- | ---------------------------------------------------------------- | ---------------------------------------------------------------- | ---------------------------------------------------------------- | -| `content` | [models.SystemMessageContent](../models/systemmessagecontent.md) | :heavy_check_mark: | N/A | -| `role` | [Optional[models.Role]](../models/role.md) | :heavy_minus_sign: | N/A | \ No newline at end of file +| `role` | *Literal["system"]* | :heavy_check_mark: | N/A | +| `content` | [models.SystemMessageContent](../models/systemmessagecontent.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/systemmessagecontent.md b/packages/azure/docs/models/systemmessagecontent.md similarity index 100% rename from packages/mistralai_azure/docs/models/systemmessagecontent.md rename to packages/azure/docs/models/systemmessagecontent.md diff --git a/packages/mistralai_azure/docs/models/systemmessagecontentchunks.md b/packages/azure/docs/models/systemmessagecontentchunks.md similarity index 100% rename from packages/mistralai_azure/docs/models/systemmessagecontentchunks.md rename to packages/azure/docs/models/systemmessagecontentchunks.md diff --git a/packages/mistralai_azure/docs/models/tableformat.md b/packages/azure/docs/models/tableformat.md similarity index 100% rename from packages/mistralai_azure/docs/models/tableformat.md rename to packages/azure/docs/models/tableformat.md diff --git a/packages/azure/docs/models/textchunk.md b/packages/azure/docs/models/textchunk.md new file mode 100644 index 00000000..b266619d --- /dev/null +++ b/packages/azure/docs/models/textchunk.md @@ -0,0 +1,9 @@ +# TextChunk + + +## Fields + +| Field | Type | Required | Description | +| ------------------ | ------------------ | ------------------ | ------------------ | +| `type` | *Literal["text"]* | :heavy_check_mark: | N/A | +| `text` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/thinkchunk.md b/packages/azure/docs/models/thinkchunk.md similarity index 91% rename from packages/mistralai_azure/docs/models/thinkchunk.md rename to packages/azure/docs/models/thinkchunk.md index 66b2e0cd..b07f598e 100644 --- a/packages/mistralai_azure/docs/models/thinkchunk.md +++ b/packages/azure/docs/models/thinkchunk.md @@ -5,6 +5,6 @@ | Field | Type | Required | Description | | ------------------------------------------------------------------------------- | ------------------------------------------------------------------------------- | ------------------------------------------------------------------------------- | ------------------------------------------------------------------------------- | +| `type` | *Literal["thinking"]* | :heavy_check_mark: | N/A | | `thinking` | List[[models.Thinking](../models/thinking.md)] | :heavy_check_mark: | N/A | -| `closed` | *Optional[bool]* | :heavy_minus_sign: | Whether the thinking chunk is closed or not. Currently only used for prefixing. | -| `type` | [Optional[models.ThinkChunkType]](../models/thinkchunktype.md) | :heavy_minus_sign: | N/A | \ No newline at end of file +| `closed` | *Optional[bool]* | :heavy_minus_sign: | Whether the thinking chunk is closed or not. Currently only used for prefixing. | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/thinking.md b/packages/azure/docs/models/thinking.md similarity index 100% rename from packages/mistralai_azure/docs/models/thinking.md rename to packages/azure/docs/models/thinking.md diff --git a/packages/mistralai_azure/docs/models/tool.md b/packages/azure/docs/models/tool.md similarity index 100% rename from packages/mistralai_azure/docs/models/tool.md rename to packages/azure/docs/models/tool.md diff --git a/packages/mistralai_azure/docs/models/toolcall.md b/packages/azure/docs/models/toolcall.md similarity index 100% rename from packages/mistralai_azure/docs/models/toolcall.md rename to packages/azure/docs/models/toolcall.md diff --git a/packages/mistralai_azure/docs/models/toolchoice.md b/packages/azure/docs/models/toolchoice.md similarity index 100% rename from packages/mistralai_azure/docs/models/toolchoice.md rename to packages/azure/docs/models/toolchoice.md diff --git a/packages/mistralai_azure/docs/models/toolchoiceenum.md b/packages/azure/docs/models/toolchoiceenum.md similarity index 100% rename from packages/mistralai_azure/docs/models/toolchoiceenum.md rename to packages/azure/docs/models/toolchoiceenum.md diff --git a/packages/mistralai_gcp/docs/models/toolmessage.md b/packages/azure/docs/models/toolmessage.md similarity index 92% rename from packages/mistralai_gcp/docs/models/toolmessage.md rename to packages/azure/docs/models/toolmessage.md index a54f4933..7201481e 100644 --- a/packages/mistralai_gcp/docs/models/toolmessage.md +++ b/packages/azure/docs/models/toolmessage.md @@ -5,7 +5,7 @@ | Field | Type | Required | Description | | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | +| `role` | *Literal["tool"]* | :heavy_check_mark: | N/A | | `content` | [Nullable[models.ToolMessageContent]](../models/toolmessagecontent.md) | :heavy_check_mark: | N/A | | `tool_call_id` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `role` | [Optional[models.ToolMessageRole]](../models/toolmessagerole.md) | :heavy_minus_sign: | N/A | \ No newline at end of file +| `name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/toolmessagecontent.md b/packages/azure/docs/models/toolmessagecontent.md similarity index 100% rename from packages/mistralai_azure/docs/models/toolmessagecontent.md rename to packages/azure/docs/models/toolmessagecontent.md diff --git a/packages/mistralai_azure/docs/models/tooltypes.md b/packages/azure/docs/models/tooltypes.md similarity index 100% rename from packages/mistralai_azure/docs/models/tooltypes.md rename to packages/azure/docs/models/tooltypes.md diff --git a/packages/mistralai_azure/docs/models/usageinfo.md b/packages/azure/docs/models/usageinfo.md similarity index 100% rename from packages/mistralai_azure/docs/models/usageinfo.md rename to packages/azure/docs/models/usageinfo.md diff --git a/packages/mistralai_gcp/docs/models/usermessage.md b/packages/azure/docs/models/usermessage.md similarity index 89% rename from packages/mistralai_gcp/docs/models/usermessage.md rename to packages/azure/docs/models/usermessage.md index 63b01310..e7a932ed 100644 --- a/packages/mistralai_gcp/docs/models/usermessage.md +++ b/packages/azure/docs/models/usermessage.md @@ -5,5 +5,5 @@ | Field | Type | Required | Description | | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | -| `content` | [Nullable[models.UserMessageContent]](../models/usermessagecontent.md) | :heavy_check_mark: | N/A | -| `role` | [Optional[models.UserMessageRole]](../models/usermessagerole.md) | :heavy_minus_sign: | N/A | \ No newline at end of file +| `role` | *Literal["user"]* | :heavy_check_mark: | N/A | +| `content` | [Nullable[models.UserMessageContent]](../models/usermessagecontent.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/usermessagecontent.md b/packages/azure/docs/models/usermessagecontent.md similarity index 100% rename from packages/mistralai_azure/docs/models/usermessagecontent.md rename to packages/azure/docs/models/usermessagecontent.md diff --git a/packages/mistralai_azure/docs/models/utils/retryconfig.md b/packages/azure/docs/models/utils/retryconfig.md similarity index 100% rename from packages/mistralai_azure/docs/models/utils/retryconfig.md rename to packages/azure/docs/models/utils/retryconfig.md diff --git a/packages/mistralai_azure/docs/models/validationerror.md b/packages/azure/docs/models/validationerror.md similarity index 100% rename from packages/mistralai_azure/docs/models/validationerror.md rename to packages/azure/docs/models/validationerror.md diff --git a/packages/mistralai_azure/docs/sdks/chat/README.md b/packages/azure/docs/sdks/chat/README.md similarity index 95% rename from packages/mistralai_azure/docs/sdks/chat/README.md rename to packages/azure/docs/sdks/chat/README.md index 26d20bb4..560ffa83 100644 --- a/packages/mistralai_azure/docs/sdks/chat/README.md +++ b/packages/azure/docs/sdks/chat/README.md @@ -8,7 +8,7 @@ Chat Completion API. ### Available Operations * [stream](#stream) - Stream chat completion -* [create](#create) - Chat Completion +* [complete](#complete) - Chat Completion ## stream @@ -17,21 +17,27 @@ Mistral AI provides the ability to stream responses back to a client in order to ### Example Usage ```python -from mistralai_azure import MistralAzure +from mistralai.azure.client import MistralAzure import os +AZURE_API_KEY = os.environ["AZURE_API_KEY"] +AZURE_ENDPOINT = os.environ["AZURE_ENDPOINT"] +AZURE_MODEL = os.environ["AZURE_MODEL"] +AZURE_API_VERSION = os.environ.get("AZURE_API_VERSION", "2024-05-01-preview") + +# The SDK automatically injects api-version as a query parameter s = MistralAzure( - azure_api_key=os.getenv("AZURE_API_KEY", ""), - azure_endpoint=os.getenv("AZURE_ENDPOINT", "") + api_key=AZURE_API_KEY, + server_url=AZURE_ENDPOINT, + api_version=AZURE_API_VERSION, ) - res = s.chat.stream(messages=[ { - "content": "Who is the best French painter? Answer in one short sentence.", "role": "user", + "content": "Who is the best French painter? Answer in one short sentence.", }, -], model="azureai") +], model=AZURE_MODEL) if res is not None: for event in res: @@ -69,32 +75,38 @@ if res is not None: | --------------- | ----------- | ------------ | | models.SDKError | 4xx-5xx | */* | -## create +## complete Chat Completion ### Example Usage ```python -from mistralai_azure import MistralAzure +from mistralai.azure.client import MistralAzure import os +AZURE_API_KEY = os.environ["AZURE_API_KEY"] +AZURE_ENDPOINT = os.environ["AZURE_ENDPOINT"] +AZURE_MODEL = os.environ["AZURE_MODEL"] +AZURE_API_VERSION = os.environ.get("AZURE_API_VERSION", "2024-05-01-preview") + +# The SDK automatically injects api-version as a query parameter s = MistralAzure( - azure_api_key=os.getenv("AZURE_API_KEY", ""), - azure_endpoint=os.getenv("AZURE_ENDPOINT", "") + api_key=AZURE_API_KEY, + server_url=AZURE_ENDPOINT, + api_version=AZURE_API_VERSION, ) - res = s.chat.complete(messages=[ { - "content": "Who is the best French painter? Answer in one short sentence.", "role": "user", + "content": "Who is the best French painter? Answer in one short sentence.", }, -], model="azureai") +], model=AZURE_MODEL) if res is not None: # handle response - pass + print(res.choices[0].message.content) ``` diff --git a/packages/mistralai_azure/docs/sdks/mistralazure/README.md b/packages/azure/docs/sdks/mistralazure/README.md similarity index 100% rename from packages/mistralai_azure/docs/sdks/mistralazure/README.md rename to packages/azure/docs/sdks/mistralazure/README.md diff --git a/packages/mistralai_azure/py.typed b/packages/azure/py.typed similarity index 100% rename from packages/mistralai_azure/py.typed rename to packages/azure/py.typed diff --git a/packages/mistralai_azure/pylintrc b/packages/azure/pylintrc similarity index 100% rename from packages/mistralai_azure/pylintrc rename to packages/azure/pylintrc diff --git a/packages/mistralai_azure/pyproject.toml b/packages/azure/pyproject.toml similarity index 79% rename from packages/mistralai_azure/pyproject.toml rename to packages/azure/pyproject.toml index d129a290..3b9aa829 100644 --- a/packages/mistralai_azure/pyproject.toml +++ b/packages/azure/pyproject.toml @@ -1,6 +1,6 @@ [project] -name = "mistralai_azure" -version = "1.8.0" +name = "mistralai-azure" +version = "2.0.0a4" description = "Python Client SDK for the Mistral AI API in Azure." authors = [{ name = "Mistral" }] requires-python = ">=3.10" @@ -21,20 +21,20 @@ dev = [ ] [tool.setuptools.package-data] -"*" = ["py.typed", "src/mistralai_azure/py.typed"] +"*" = ["py.typed", "src/mistralai/azure/client/py.typed"] [tool.hatch.build.targets.sdist] -include = ["src/mistralai_azure"] +include = ["src/mistralai"] [tool.hatch.build.targets.sdist.force-include] "py.typed" = "py.typed" -"src/mistralai_azure/py.typed" = "src/mistralai_azure/py.typed" +"src/mistralai/azure/client/py.typed" = "src/mistralai/azure/client/py.typed" [tool.hatch.build.targets.wheel] -include = ["src/mistralai_azure"] +include = ["src/mistralai"] [tool.hatch.build.targets.wheel.sources] -"src/mistralai_azure" = "mistralai_azure" +"src" = "" [virtualenvs] in-project = true @@ -48,6 +48,7 @@ pythonpath = ["src"] [tool.mypy] disable_error_code = "misc" +namespace_packages = true explicit_package_bases = true mypy_path = "src" diff --git a/packages/mistralai_gcp/scripts/prepare_readme.py b/packages/azure/scripts/prepare_readme.py similarity index 96% rename from packages/mistralai_gcp/scripts/prepare_readme.py rename to packages/azure/scripts/prepare_readme.py index 6c4b9932..2b2577ea 100644 --- a/packages/mistralai_gcp/scripts/prepare_readme.py +++ b/packages/azure/scripts/prepare_readme.py @@ -10,7 +10,7 @@ GITHUB_URL = ( GITHUB_URL[: -len(".git")] if GITHUB_URL.endswith(".git") else GITHUB_URL ) - REPO_SUBDIR = "packages/mistralai_gcp" + REPO_SUBDIR = "packages/azure" # Ensure the subdirectory has a trailing slash if not REPO_SUBDIR.endswith("/"): REPO_SUBDIR += "/" diff --git a/packages/mistralai_azure/scripts/publish.sh b/packages/azure/scripts/publish.sh similarity index 100% rename from packages/mistralai_azure/scripts/publish.sh rename to packages/azure/scripts/publish.sh diff --git a/packages/mistralai_azure/src/mistralai_azure/__init__.py b/packages/azure/src/mistralai/azure/client/__init__.py similarity index 100% rename from packages/mistralai_azure/src/mistralai_azure/__init__.py rename to packages/azure/src/mistralai/azure/client/__init__.py diff --git a/packages/mistralai_azure/src/mistralai_azure/_hooks/__init__.py b/packages/azure/src/mistralai/azure/client/_hooks/__init__.py similarity index 100% rename from packages/mistralai_azure/src/mistralai_azure/_hooks/__init__.py rename to packages/azure/src/mistralai/azure/client/_hooks/__init__.py diff --git a/packages/mistralai_gcp/src/mistralai_gcp/_hooks/registration.py b/packages/azure/src/mistralai/azure/client/_hooks/registration.py similarity index 70% rename from packages/mistralai_gcp/src/mistralai_gcp/_hooks/registration.py rename to packages/azure/src/mistralai/azure/client/_hooks/registration.py index 304edfa2..d5a49cc3 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/_hooks/registration.py +++ b/packages/azure/src/mistralai/azure/client/_hooks/registration.py @@ -1,15 +1,12 @@ -from .custom_user_agent import CustomUserAgentHook from .types import Hooks + # This file is only ever generated once on the first generation and then is free to be modified. # Any hooks you wish to add should be registered in the init_hooks function. Feel free to define them # in this file or in separate files in the hooks folder. -def init_hooks(hooks: Hooks): - # pylint: disable=unused-argument +def init_hooks(_hooks: Hooks) -> None: """Add hooks by calling hooks.register{sdk_init/before_request/after_success/after_error}Hook with an instance of a hook that implements that specific Hook interface - Hooks are registered per SDK instance, and are valid for the lifetime of the SDK instance - """ - hooks.register_before_request_hook(CustomUserAgentHook()) + Hooks are registered per SDK instance, and are valid for the lifetime of the SDK instance""" diff --git a/packages/mistralai_azure/src/mistralai_azure/_hooks/sdkhooks.py b/packages/azure/src/mistralai/azure/client/_hooks/sdkhooks.py similarity index 97% rename from packages/mistralai_azure/src/mistralai_azure/_hooks/sdkhooks.py rename to packages/azure/src/mistralai/azure/client/_hooks/sdkhooks.py index 37ff4e9f..2080681b 100644 --- a/packages/mistralai_azure/src/mistralai_azure/_hooks/sdkhooks.py +++ b/packages/azure/src/mistralai/azure/client/_hooks/sdkhooks.py @@ -13,7 +13,7 @@ ) from .registration import init_hooks from typing import List, Optional, Tuple -from mistralai_azure.httpclient import HttpClient +from mistralai.azure.client.httpclient import HttpClient class SDKHooks(Hooks): diff --git a/packages/mistralai_gcp/src/mistralai_gcp/_hooks/types.py b/packages/azure/src/mistralai/azure/client/_hooks/types.py similarity index 95% rename from packages/mistralai_gcp/src/mistralai_gcp/_hooks/types.py rename to packages/azure/src/mistralai/azure/client/_hooks/types.py index f8088f4c..3e4e3955 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/_hooks/types.py +++ b/packages/azure/src/mistralai/azure/client/_hooks/types.py @@ -2,8 +2,8 @@ from abc import ABC, abstractmethod import httpx -from mistralai_gcp.httpclient import HttpClient -from mistralai_gcp.sdkconfiguration import SDKConfiguration +from mistralai.azure.client.httpclient import HttpClient +from mistralai.azure.client.sdkconfiguration import SDKConfiguration from typing import Any, Callable, List, Optional, Tuple, Union diff --git a/packages/mistralai_azure/src/mistralai_azure/_version.py b/packages/azure/src/mistralai/azure/client/_version.py similarity index 69% rename from packages/mistralai_azure/src/mistralai_azure/_version.py rename to packages/azure/src/mistralai/azure/client/_version.py index 79277f9a..4448d2a0 100644 --- a/packages/mistralai_azure/src/mistralai_azure/_version.py +++ b/packages/azure/src/mistralai/azure/client/_version.py @@ -2,11 +2,11 @@ import importlib.metadata -__title__: str = "mistralai_azure" -__version__: str = "1.8.1" +__title__: str = "mistralai-azure" +__version__: str = "2.0.0a4" __openapi_doc_version__: str = "1.0.0" __gen_version__: str = "2.794.1" -__user_agent__: str = "speakeasy-sdk/python 1.8.1 2.794.1 1.0.0 mistralai_azure" +__user_agent__: str = "speakeasy-sdk/python 2.0.0a4 2.794.1 1.0.0 mistralai-azure" try: if __package__ is not None: diff --git a/packages/mistralai_azure/src/mistralai_azure/basesdk.py b/packages/azure/src/mistralai/azure/client/basesdk.py similarity index 98% rename from packages/mistralai_azure/src/mistralai_azure/basesdk.py rename to packages/azure/src/mistralai/azure/client/basesdk.py index 89f7dc49..b0391ac0 100644 --- a/packages/mistralai_azure/src/mistralai_azure/basesdk.py +++ b/packages/azure/src/mistralai/azure/client/basesdk.py @@ -2,13 +2,17 @@ from .sdkconfiguration import SDKConfiguration import httpx -from mistralai_azure import models, utils -from mistralai_azure._hooks import ( +from mistralai.azure.client import models, utils +from mistralai.azure.client._hooks import ( AfterErrorContext, AfterSuccessContext, BeforeRequestContext, ) -from mistralai_azure.utils import RetryConfig, SerializedRequestBody, get_body_content +from mistralai.azure.client.utils import ( + RetryConfig, + SerializedRequestBody, + get_body_content, +) from typing import Callable, List, Mapping, Optional, Tuple from urllib.parse import parse_qs, urlparse diff --git a/packages/mistralai_azure/src/mistralai_azure/chat.py b/packages/azure/src/mistralai/azure/client/chat.py similarity index 96% rename from packages/mistralai_azure/src/mistralai_azure/chat.py rename to packages/azure/src/mistralai/azure/client/chat.py index 10bb247f..3348bf47 100644 --- a/packages/mistralai_azure/src/mistralai_azure/chat.py +++ b/packages/azure/src/mistralai/azure/client/chat.py @@ -1,11 +1,11 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from .basesdk import BaseSDK -from mistralai_azure import models, utils -from mistralai_azure._hooks import HookContext -from mistralai_azure.types import OptionalNullable, UNSET -from mistralai_azure.utils import eventstreaming -from mistralai_azure.utils.unmarshal_json_response import unmarshal_json_response +from mistralai.azure.client import models, utils +from mistralai.azure.client._hooks import HookContext +from mistralai.azure.client.types import OptionalNullable, UNSET +from mistralai.azure.client.utils import eventstreaming +from mistralai.azure.client.utils.unmarshal_json_response import unmarshal_json_response from typing import Any, Dict, List, Mapping, Optional, Union @@ -15,13 +15,21 @@ class Chat(BaseSDK): def stream( self, *, - messages: Union[List[models.Messages], List[models.MessagesTypedDict]], + messages: Union[ + List[models.ChatCompletionStreamRequestMessage], + List[models.ChatCompletionStreamRequestMessageTypedDict], + ], model: Optional[str] = "azureai", temperature: OptionalNullable[float] = UNSET, top_p: Optional[float] = None, max_tokens: OptionalNullable[int] = UNSET, stream: Optional[bool] = True, - stop: Optional[Union[models.Stop, models.StopTypedDict]] = None, + stop: Optional[ + Union[ + models.ChatCompletionStreamRequestStop, + models.ChatCompletionStreamRequestStopTypedDict, + ] + ] = None, random_seed: OptionalNullable[int] = UNSET, metadata: OptionalNullable[Dict[str, Any]] = UNSET, response_format: Optional[ @@ -97,7 +105,9 @@ def stream( stop=stop, random_seed=random_seed, metadata=metadata, - messages=utils.get_pydantic_model(messages, List[models.Messages]), + messages=utils.get_pydantic_model( + messages, List[models.ChatCompletionStreamRequestMessage] + ), response_format=utils.get_pydantic_model( response_format, Optional[models.ResponseFormat] ), @@ -185,13 +195,21 @@ def stream( async def stream_async( self, *, - messages: Union[List[models.Messages], List[models.MessagesTypedDict]], + messages: Union[ + List[models.ChatCompletionStreamRequestMessage], + List[models.ChatCompletionStreamRequestMessageTypedDict], + ], model: Optional[str] = "azureai", temperature: OptionalNullable[float] = UNSET, top_p: Optional[float] = None, max_tokens: OptionalNullable[int] = UNSET, stream: Optional[bool] = True, - stop: Optional[Union[models.Stop, models.StopTypedDict]] = None, + stop: Optional[ + Union[ + models.ChatCompletionStreamRequestStop, + models.ChatCompletionStreamRequestStopTypedDict, + ] + ] = None, random_seed: OptionalNullable[int] = UNSET, metadata: OptionalNullable[Dict[str, Any]] = UNSET, response_format: Optional[ @@ -267,7 +285,9 @@ async def stream_async( stop=stop, random_seed=random_seed, metadata=metadata, - messages=utils.get_pydantic_model(messages, List[models.Messages]), + messages=utils.get_pydantic_model( + messages, List[models.ChatCompletionStreamRequestMessage] + ), response_format=utils.get_pydantic_model( response_format, Optional[models.ResponseFormat] ), @@ -356,8 +376,8 @@ def complete( self, *, messages: Union[ - List[models.ChatCompletionRequestMessages], - List[models.ChatCompletionRequestMessagesTypedDict], + List[models.ChatCompletionRequestMessage], + List[models.ChatCompletionRequestMessageTypedDict], ], model: Optional[str] = "azureai", temperature: OptionalNullable[float] = UNSET, @@ -444,7 +464,7 @@ def complete( random_seed=random_seed, metadata=metadata, messages=utils.get_pydantic_model( - messages, List[models.ChatCompletionRequestMessages] + messages, List[models.ChatCompletionRequestMessage] ), response_format=utils.get_pydantic_model( response_format, Optional[models.ResponseFormat] @@ -526,8 +546,8 @@ async def complete_async( self, *, messages: Union[ - List[models.ChatCompletionRequestMessages], - List[models.ChatCompletionRequestMessagesTypedDict], + List[models.ChatCompletionRequestMessage], + List[models.ChatCompletionRequestMessageTypedDict], ], model: Optional[str] = "azureai", temperature: OptionalNullable[float] = UNSET, @@ -614,7 +634,7 @@ async def complete_async( random_seed=random_seed, metadata=metadata, messages=utils.get_pydantic_model( - messages, List[models.ChatCompletionRequestMessages] + messages, List[models.ChatCompletionRequestMessage] ), response_format=utils.get_pydantic_model( response_format, Optional[models.ResponseFormat] diff --git a/packages/mistralai_azure/src/mistralai_azure/httpclient.py b/packages/azure/src/mistralai/azure/client/httpclient.py similarity index 100% rename from packages/mistralai_azure/src/mistralai_azure/httpclient.py rename to packages/azure/src/mistralai/azure/client/httpclient.py diff --git a/packages/mistralai_azure/src/mistralai_azure/models/__init__.py b/packages/azure/src/mistralai/azure/client/models/__init__.py similarity index 85% rename from packages/mistralai_azure/src/mistralai_azure/models/__init__.py rename to packages/azure/src/mistralai/azure/client/models/__init__.py index 9baa3ff1..51db6a38 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/__init__.py +++ b/packages/azure/src/mistralai/azure/client/models/__init__.py @@ -11,7 +11,6 @@ AssistantMessage, AssistantMessageContent, AssistantMessageContentTypedDict, - AssistantMessageRole, AssistantMessageTypedDict, ) from .chatcompletionchoice import ( @@ -21,8 +20,8 @@ ) from .chatcompletionrequest import ( ChatCompletionRequest, - ChatCompletionRequestMessages, - ChatCompletionRequestMessagesTypedDict, + ChatCompletionRequestMessage, + ChatCompletionRequestMessageTypedDict, ChatCompletionRequestStop, ChatCompletionRequestStopTypedDict, ChatCompletionRequestToolChoice, @@ -35,33 +34,29 @@ ) from .chatcompletionstreamrequest import ( ChatCompletionStreamRequest, + ChatCompletionStreamRequestMessage, + ChatCompletionStreamRequestMessageTypedDict, + ChatCompletionStreamRequestStop, + ChatCompletionStreamRequestStopTypedDict, ChatCompletionStreamRequestToolChoice, ChatCompletionStreamRequestToolChoiceTypedDict, ChatCompletionStreamRequestTypedDict, - Messages, - MessagesTypedDict, - Stop, - StopTypedDict, ) from .completionchunk import CompletionChunk, CompletionChunkTypedDict from .completionevent import CompletionEvent, CompletionEventTypedDict from .completionresponsestreamchoice import ( CompletionResponseStreamChoice, + CompletionResponseStreamChoiceFinishReason, CompletionResponseStreamChoiceTypedDict, - FinishReason, ) from .contentchunk import ContentChunk, ContentChunkTypedDict from .deltamessage import ( - Content, - ContentTypedDict, DeltaMessage, + DeltaMessageContent, + DeltaMessageContentTypedDict, DeltaMessageTypedDict, ) - from .documenturlchunk import ( - DocumentURLChunk, - DocumentURLChunkType, - DocumentURLChunkTypedDict, - ) + from .documenturlchunk import DocumentURLChunk, DocumentURLChunkTypedDict from .filechunk import FileChunk, FileChunkTypedDict from .function import Function, FunctionTypedDict from .functioncall import ( @@ -72,13 +67,13 @@ ) from .functionname import FunctionName, FunctionNameTypedDict from .httpvalidationerror import HTTPValidationError, HTTPValidationErrorData + from .imagedetail import ImageDetail from .imageurl import ImageURL, ImageURLTypedDict from .imageurlchunk import ( ImageURLChunk, - ImageURLChunkImageURL, - ImageURLChunkImageURLTypedDict, - ImageURLChunkType, ImageURLChunkTypedDict, + ImageURLUnion, + ImageURLUnionTypedDict, ) from .jsonschema import JSONSchema, JSONSchemaTypedDict from .mistralpromptmode import MistralPromptMode @@ -97,18 +92,13 @@ from .ocrtableobject import Format, OCRTableObject, OCRTableObjectTypedDict from .ocrusageinfo import OCRUsageInfo, OCRUsageInfoTypedDict from .prediction import Prediction, PredictionTypedDict - from .referencechunk import ( - ReferenceChunk, - ReferenceChunkType, - ReferenceChunkTypedDict, - ) + from .referencechunk import ReferenceChunk, ReferenceChunkTypedDict from .responseformat import ResponseFormat, ResponseFormatTypedDict from .responseformats import ResponseFormats from .responsevalidationerror import ResponseValidationError from .sdkerror import SDKError from .security import Security, SecurityTypedDict from .systemmessage import ( - Role, SystemMessage, SystemMessageContent, SystemMessageContentTypedDict, @@ -118,14 +108,8 @@ SystemMessageContentChunks, SystemMessageContentChunksTypedDict, ) - from .textchunk import TextChunk, TextChunkTypedDict, Type - from .thinkchunk import ( - ThinkChunk, - ThinkChunkType, - ThinkChunkTypedDict, - Thinking, - ThinkingTypedDict, - ) + from .textchunk import TextChunk, TextChunkTypedDict + from .thinkchunk import ThinkChunk, ThinkChunkTypedDict, Thinking, ThinkingTypedDict from .tool import Tool, ToolTypedDict from .toolcall import ToolCall, ToolCallTypedDict from .toolchoice import ToolChoice, ToolChoiceTypedDict @@ -134,7 +118,6 @@ ToolMessage, ToolMessageContent, ToolMessageContentTypedDict, - ToolMessageRole, ToolMessageTypedDict, ) from .tooltypes import ToolTypes @@ -143,7 +126,6 @@ UserMessage, UserMessageContent, UserMessageContentTypedDict, - UserMessageRole, UserMessageTypedDict, ) from .validationerror import ( @@ -159,14 +141,13 @@ "AssistantMessage", "AssistantMessageContent", "AssistantMessageContentTypedDict", - "AssistantMessageRole", "AssistantMessageTypedDict", "ChatCompletionChoice", "ChatCompletionChoiceFinishReason", "ChatCompletionChoiceTypedDict", "ChatCompletionRequest", - "ChatCompletionRequestMessages", - "ChatCompletionRequestMessagesTypedDict", + "ChatCompletionRequestMessage", + "ChatCompletionRequestMessageTypedDict", "ChatCompletionRequestStop", "ChatCompletionRequestStopTypedDict", "ChatCompletionRequestToolChoice", @@ -175,6 +156,10 @@ "ChatCompletionResponse", "ChatCompletionResponseTypedDict", "ChatCompletionStreamRequest", + "ChatCompletionStreamRequestMessage", + "ChatCompletionStreamRequestMessageTypedDict", + "ChatCompletionStreamRequestStop", + "ChatCompletionStreamRequestStopTypedDict", "ChatCompletionStreamRequestToolChoice", "ChatCompletionStreamRequestToolChoiceTypedDict", "ChatCompletionStreamRequestTypedDict", @@ -183,21 +168,20 @@ "CompletionEvent", "CompletionEventTypedDict", "CompletionResponseStreamChoice", + "CompletionResponseStreamChoiceFinishReason", "CompletionResponseStreamChoiceTypedDict", - "Content", "ContentChunk", "ContentChunkTypedDict", - "ContentTypedDict", "DeltaMessage", + "DeltaMessageContent", + "DeltaMessageContentTypedDict", "DeltaMessageTypedDict", "Document", "DocumentTypedDict", "DocumentURLChunk", - "DocumentURLChunkType", "DocumentURLChunkTypedDict", "FileChunk", "FileChunkTypedDict", - "FinishReason", "Format", "Function", "FunctionCall", @@ -207,19 +191,17 @@ "FunctionTypedDict", "HTTPValidationError", "HTTPValidationErrorData", + "ImageDetail", "ImageURL", "ImageURLChunk", - "ImageURLChunkImageURL", - "ImageURLChunkImageURLTypedDict", - "ImageURLChunkType", "ImageURLChunkTypedDict", "ImageURLTypedDict", + "ImageURLUnion", + "ImageURLUnionTypedDict", "JSONSchema", "JSONSchemaTypedDict", "Loc", "LocTypedDict", - "Messages", - "MessagesTypedDict", "MistralAzureError", "MistralPromptMode", "NoResponseError", @@ -240,18 +222,14 @@ "Prediction", "PredictionTypedDict", "ReferenceChunk", - "ReferenceChunkType", "ReferenceChunkTypedDict", "ResponseFormat", "ResponseFormatTypedDict", "ResponseFormats", "ResponseValidationError", - "Role", "SDKError", "Security", "SecurityTypedDict", - "Stop", - "StopTypedDict", "SystemMessage", "SystemMessageContent", "SystemMessageContentChunks", @@ -262,7 +240,6 @@ "TextChunk", "TextChunkTypedDict", "ThinkChunk", - "ThinkChunkType", "ThinkChunkTypedDict", "Thinking", "ThinkingTypedDict", @@ -275,17 +252,14 @@ "ToolMessage", "ToolMessageContent", "ToolMessageContentTypedDict", - "ToolMessageRole", "ToolMessageTypedDict", "ToolTypedDict", "ToolTypes", - "Type", "UsageInfo", "UsageInfoTypedDict", "UserMessage", "UserMessageContent", "UserMessageContentTypedDict", - "UserMessageRole", "UserMessageTypedDict", "ValidationError", "ValidationErrorTypedDict", @@ -295,14 +269,13 @@ "AssistantMessage": ".assistantmessage", "AssistantMessageContent": ".assistantmessage", "AssistantMessageContentTypedDict": ".assistantmessage", - "AssistantMessageRole": ".assistantmessage", "AssistantMessageTypedDict": ".assistantmessage", "ChatCompletionChoice": ".chatcompletionchoice", "ChatCompletionChoiceFinishReason": ".chatcompletionchoice", "ChatCompletionChoiceTypedDict": ".chatcompletionchoice", "ChatCompletionRequest": ".chatcompletionrequest", - "ChatCompletionRequestMessages": ".chatcompletionrequest", - "ChatCompletionRequestMessagesTypedDict": ".chatcompletionrequest", + "ChatCompletionRequestMessage": ".chatcompletionrequest", + "ChatCompletionRequestMessageTypedDict": ".chatcompletionrequest", "ChatCompletionRequestStop": ".chatcompletionrequest", "ChatCompletionRequestStopTypedDict": ".chatcompletionrequest", "ChatCompletionRequestToolChoice": ".chatcompletionrequest", @@ -311,28 +284,27 @@ "ChatCompletionResponse": ".chatcompletionresponse", "ChatCompletionResponseTypedDict": ".chatcompletionresponse", "ChatCompletionStreamRequest": ".chatcompletionstreamrequest", + "ChatCompletionStreamRequestMessage": ".chatcompletionstreamrequest", + "ChatCompletionStreamRequestMessageTypedDict": ".chatcompletionstreamrequest", + "ChatCompletionStreamRequestStop": ".chatcompletionstreamrequest", + "ChatCompletionStreamRequestStopTypedDict": ".chatcompletionstreamrequest", "ChatCompletionStreamRequestToolChoice": ".chatcompletionstreamrequest", "ChatCompletionStreamRequestToolChoiceTypedDict": ".chatcompletionstreamrequest", "ChatCompletionStreamRequestTypedDict": ".chatcompletionstreamrequest", - "Messages": ".chatcompletionstreamrequest", - "MessagesTypedDict": ".chatcompletionstreamrequest", - "Stop": ".chatcompletionstreamrequest", - "StopTypedDict": ".chatcompletionstreamrequest", "CompletionChunk": ".completionchunk", "CompletionChunkTypedDict": ".completionchunk", "CompletionEvent": ".completionevent", "CompletionEventTypedDict": ".completionevent", "CompletionResponseStreamChoice": ".completionresponsestreamchoice", + "CompletionResponseStreamChoiceFinishReason": ".completionresponsestreamchoice", "CompletionResponseStreamChoiceTypedDict": ".completionresponsestreamchoice", - "FinishReason": ".completionresponsestreamchoice", "ContentChunk": ".contentchunk", "ContentChunkTypedDict": ".contentchunk", - "Content": ".deltamessage", - "ContentTypedDict": ".deltamessage", "DeltaMessage": ".deltamessage", + "DeltaMessageContent": ".deltamessage", + "DeltaMessageContentTypedDict": ".deltamessage", "DeltaMessageTypedDict": ".deltamessage", "DocumentURLChunk": ".documenturlchunk", - "DocumentURLChunkType": ".documenturlchunk", "DocumentURLChunkTypedDict": ".documenturlchunk", "FileChunk": ".filechunk", "FileChunkTypedDict": ".filechunk", @@ -346,13 +318,13 @@ "FunctionNameTypedDict": ".functionname", "HTTPValidationError": ".httpvalidationerror", "HTTPValidationErrorData": ".httpvalidationerror", + "ImageDetail": ".imagedetail", "ImageURL": ".imageurl", "ImageURLTypedDict": ".imageurl", "ImageURLChunk": ".imageurlchunk", - "ImageURLChunkImageURL": ".imageurlchunk", - "ImageURLChunkImageURLTypedDict": ".imageurlchunk", - "ImageURLChunkType": ".imageurlchunk", "ImageURLChunkTypedDict": ".imageurlchunk", + "ImageURLUnion": ".imageurlchunk", + "ImageURLUnionTypedDict": ".imageurlchunk", "JSONSchema": ".jsonschema", "JSONSchemaTypedDict": ".jsonschema", "MistralPromptMode": ".mistralpromptmode", @@ -378,7 +350,6 @@ "Prediction": ".prediction", "PredictionTypedDict": ".prediction", "ReferenceChunk": ".referencechunk", - "ReferenceChunkType": ".referencechunk", "ReferenceChunkTypedDict": ".referencechunk", "ResponseFormat": ".responseformat", "ResponseFormatTypedDict": ".responseformat", @@ -387,7 +358,6 @@ "SDKError": ".sdkerror", "Security": ".security", "SecurityTypedDict": ".security", - "Role": ".systemmessage", "SystemMessage": ".systemmessage", "SystemMessageContent": ".systemmessage", "SystemMessageContentTypedDict": ".systemmessage", @@ -396,9 +366,7 @@ "SystemMessageContentChunksTypedDict": ".systemmessagecontentchunks", "TextChunk": ".textchunk", "TextChunkTypedDict": ".textchunk", - "Type": ".textchunk", "ThinkChunk": ".thinkchunk", - "ThinkChunkType": ".thinkchunk", "ThinkChunkTypedDict": ".thinkchunk", "Thinking": ".thinkchunk", "ThinkingTypedDict": ".thinkchunk", @@ -412,7 +380,6 @@ "ToolMessage": ".toolmessage", "ToolMessageContent": ".toolmessage", "ToolMessageContentTypedDict": ".toolmessage", - "ToolMessageRole": ".toolmessage", "ToolMessageTypedDict": ".toolmessage", "ToolTypes": ".tooltypes", "UsageInfo": ".usageinfo", @@ -420,7 +387,6 @@ "UserMessage": ".usermessage", "UserMessageContent": ".usermessage", "UserMessageContentTypedDict": ".usermessage", - "UserMessageRole": ".usermessage", "UserMessageTypedDict": ".usermessage", "Loc": ".validationerror", "LocTypedDict": ".validationerror", diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/assistantmessage.py b/packages/azure/src/mistralai/azure/client/models/assistantmessage.py similarity index 80% rename from packages/mistralai_gcp/src/mistralai_gcp/models/assistantmessage.py rename to packages/azure/src/mistralai/azure/client/models/assistantmessage.py index 17d740b6..f5793f94 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/assistantmessage.py +++ b/packages/azure/src/mistralai/azure/client/models/assistantmessage.py @@ -3,16 +3,19 @@ from __future__ import annotations from .contentchunk import ContentChunk, ContentChunkTypedDict from .toolcall import ToolCall, ToolCallTypedDict -from mistralai_gcp.types import ( +from mistralai.azure.client.types import ( BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL, ) +from mistralai.azure.client.utils import validate_const +import pydantic from pydantic import model_serializer +from pydantic.functional_validators import AfterValidator from typing import List, Literal, Optional, Union -from typing_extensions import NotRequired, TypeAliasType, TypedDict +from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict AssistantMessageContentTypedDict = TypeAliasType( @@ -25,18 +28,22 @@ ) -AssistantMessageRole = Literal["assistant",] - - class AssistantMessageTypedDict(TypedDict): + role: Literal["assistant"] content: NotRequired[Nullable[AssistantMessageContentTypedDict]] tool_calls: NotRequired[Nullable[List[ToolCallTypedDict]]] prefix: NotRequired[bool] r"""Set this to `true` when adding an assistant message as prefix to condition the model response. The role of the prefix message is to force the model to start its answer by the content of the message.""" - role: NotRequired[AssistantMessageRole] class AssistantMessage(BaseModel): + ROLE: Annotated[ + Annotated[ + Optional[Literal["assistant"]], AfterValidator(validate_const("assistant")) + ], + pydantic.Field(alias="role"), + ] = "assistant" + content: OptionalNullable[AssistantMessageContent] = UNSET tool_calls: OptionalNullable[List[ToolCall]] = UNSET @@ -44,11 +51,9 @@ class AssistantMessage(BaseModel): prefix: Optional[bool] = False r"""Set this to `true` when adding an assistant message as prefix to condition the model response. The role of the prefix message is to force the model to start its answer by the content of the message.""" - role: Optional[AssistantMessageRole] = "assistant" - @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = ["content", "tool_calls", "prefix", "role"] + optional_fields = ["role", "content", "tool_calls", "prefix"] nullable_fields = ["content", "tool_calls"] null_default_fields = [] diff --git a/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionchoice.py b/packages/azure/src/mistralai/azure/client/models/chatcompletionchoice.py similarity index 91% rename from packages/mistralai_azure/src/mistralai_azure/models/chatcompletionchoice.py rename to packages/azure/src/mistralai/azure/client/models/chatcompletionchoice.py index 7c6eb933..67b5ba69 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionchoice.py +++ b/packages/azure/src/mistralai/azure/client/models/chatcompletionchoice.py @@ -2,7 +2,7 @@ from __future__ import annotations from .assistantmessage import AssistantMessage, AssistantMessageTypedDict -from mistralai_azure.types import BaseModel, UnrecognizedStr +from mistralai.azure.client.types import BaseModel, UnrecognizedStr from typing import Literal, Union from typing_extensions import TypedDict diff --git a/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionrequest.py b/packages/azure/src/mistralai/azure/client/models/chatcompletionrequest.py similarity index 97% rename from packages/mistralai_azure/src/mistralai_azure/models/chatcompletionrequest.py rename to packages/azure/src/mistralai/azure/client/models/chatcompletionrequest.py index a7b095f3..92179095 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionrequest.py +++ b/packages/azure/src/mistralai/azure/client/models/chatcompletionrequest.py @@ -11,14 +11,14 @@ from .toolchoiceenum import ToolChoiceEnum from .toolmessage import ToolMessage, ToolMessageTypedDict from .usermessage import UserMessage, UserMessageTypedDict -from mistralai_azure.types import ( +from mistralai.azure.client.types import ( BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL, ) -from mistralai_azure.utils import get_discriminator +from mistralai.azure.client.utils import get_discriminator from pydantic import Discriminator, Tag, model_serializer from typing import Any, Dict, List, Optional, Union from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict @@ -36,8 +36,8 @@ r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" -ChatCompletionRequestMessagesTypedDict = TypeAliasType( - "ChatCompletionRequestMessagesTypedDict", +ChatCompletionRequestMessageTypedDict = TypeAliasType( + "ChatCompletionRequestMessageTypedDict", Union[ SystemMessageTypedDict, UserMessageTypedDict, @@ -47,7 +47,7 @@ ) -ChatCompletionRequestMessages = Annotated[ +ChatCompletionRequestMessage = Annotated[ Union[ Annotated[AssistantMessage, Tag("assistant")], Annotated[SystemMessage, Tag("system")], @@ -72,7 +72,7 @@ class ChatCompletionRequestTypedDict(TypedDict): - messages: List[ChatCompletionRequestMessagesTypedDict] + messages: List[ChatCompletionRequestMessageTypedDict] r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content.""" model: NotRequired[str] r"""The ID of the model to use for this request.""" @@ -112,7 +112,7 @@ class ChatCompletionRequestTypedDict(TypedDict): class ChatCompletionRequest(BaseModel): - messages: List[ChatCompletionRequestMessages] + messages: List[ChatCompletionRequestMessage] r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content.""" model: Optional[str] = "azureai" diff --git a/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionresponse.py b/packages/azure/src/mistralai/azure/client/models/chatcompletionresponse.py similarity index 92% rename from packages/mistralai_azure/src/mistralai_azure/models/chatcompletionresponse.py rename to packages/azure/src/mistralai/azure/client/models/chatcompletionresponse.py index 7a66f322..d41f9c6f 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionresponse.py +++ b/packages/azure/src/mistralai/azure/client/models/chatcompletionresponse.py @@ -3,7 +3,7 @@ from __future__ import annotations from .chatcompletionchoice import ChatCompletionChoice, ChatCompletionChoiceTypedDict from .usageinfo import UsageInfo, UsageInfoTypedDict -from mistralai_azure.types import BaseModel +from mistralai.azure.client.types import BaseModel from typing import List from typing_extensions import TypedDict diff --git a/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionstreamrequest.py b/packages/azure/src/mistralai/azure/client/models/chatcompletionstreamrequest.py similarity index 94% rename from packages/mistralai_azure/src/mistralai_azure/models/chatcompletionstreamrequest.py rename to packages/azure/src/mistralai/azure/client/models/chatcompletionstreamrequest.py index 96cd631b..be21eed2 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/chatcompletionstreamrequest.py +++ b/packages/azure/src/mistralai/azure/client/models/chatcompletionstreamrequest.py @@ -11,29 +11,33 @@ from .toolchoiceenum import ToolChoiceEnum from .toolmessage import ToolMessage, ToolMessageTypedDict from .usermessage import UserMessage, UserMessageTypedDict -from mistralai_azure.types import ( +from mistralai.azure.client.types import ( BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL, ) -from mistralai_azure.utils import get_discriminator +from mistralai.azure.client.utils import get_discriminator from pydantic import Discriminator, Tag, model_serializer from typing import Any, Dict, List, Optional, Union from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict -StopTypedDict = TypeAliasType("StopTypedDict", Union[str, List[str]]) +ChatCompletionStreamRequestStopTypedDict = TypeAliasType( + "ChatCompletionStreamRequestStopTypedDict", Union[str, List[str]] +) r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" -Stop = TypeAliasType("Stop", Union[str, List[str]]) +ChatCompletionStreamRequestStop = TypeAliasType( + "ChatCompletionStreamRequestStop", Union[str, List[str]] +) r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" -MessagesTypedDict = TypeAliasType( - "MessagesTypedDict", +ChatCompletionStreamRequestMessageTypedDict = TypeAliasType( + "ChatCompletionStreamRequestMessageTypedDict", Union[ SystemMessageTypedDict, UserMessageTypedDict, @@ -43,7 +47,7 @@ ) -Messages = Annotated[ +ChatCompletionStreamRequestMessage = Annotated[ Union[ Annotated[AssistantMessage, Tag("assistant")], Annotated[SystemMessage, Tag("system")], @@ -68,7 +72,7 @@ class ChatCompletionStreamRequestTypedDict(TypedDict): - messages: List[MessagesTypedDict] + messages: List[ChatCompletionStreamRequestMessageTypedDict] r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content.""" model: NotRequired[str] r"""The ID of the model to use for this request.""" @@ -79,7 +83,7 @@ class ChatCompletionStreamRequestTypedDict(TypedDict): max_tokens: NotRequired[Nullable[int]] r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" stream: NotRequired[bool] - stop: NotRequired[StopTypedDict] + stop: NotRequired[ChatCompletionStreamRequestStopTypedDict] r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" random_seed: NotRequired[Nullable[int]] r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" @@ -107,7 +111,7 @@ class ChatCompletionStreamRequestTypedDict(TypedDict): class ChatCompletionStreamRequest(BaseModel): - messages: List[Messages] + messages: List[ChatCompletionStreamRequestMessage] r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content.""" model: Optional[str] = "azureai" @@ -124,7 +128,7 @@ class ChatCompletionStreamRequest(BaseModel): stream: Optional[bool] = True - stop: Optional[Stop] = None + stop: Optional[ChatCompletionStreamRequestStop] = None r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" random_seed: OptionalNullable[int] = UNSET diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/completionchunk.py b/packages/azure/src/mistralai/azure/client/models/completionchunk.py similarity index 94% rename from packages/mistralai_gcp/src/mistralai_gcp/models/completionchunk.py rename to packages/azure/src/mistralai/azure/client/models/completionchunk.py index ca002f52..b94284b2 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/completionchunk.py +++ b/packages/azure/src/mistralai/azure/client/models/completionchunk.py @@ -6,7 +6,7 @@ CompletionResponseStreamChoiceTypedDict, ) from .usageinfo import UsageInfo, UsageInfoTypedDict -from mistralai_gcp.types import BaseModel +from mistralai.azure.client.types import BaseModel from typing import List, Optional from typing_extensions import NotRequired, TypedDict diff --git a/packages/mistralai_azure/src/mistralai_azure/models/completionevent.py b/packages/azure/src/mistralai/azure/client/models/completionevent.py similarity index 87% rename from packages/mistralai_azure/src/mistralai_azure/models/completionevent.py rename to packages/azure/src/mistralai/azure/client/models/completionevent.py index 5a2039c2..c4b27287 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/completionevent.py +++ b/packages/azure/src/mistralai/azure/client/models/completionevent.py @@ -2,7 +2,7 @@ from __future__ import annotations from .completionchunk import CompletionChunk, CompletionChunkTypedDict -from mistralai_azure.types import BaseModel +from mistralai.azure.client.types import BaseModel from typing_extensions import TypedDict diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/completionresponsestreamchoice.py b/packages/azure/src/mistralai/azure/client/models/completionresponsestreamchoice.py similarity index 82% rename from packages/mistralai_gcp/src/mistralai_gcp/models/completionresponsestreamchoice.py rename to packages/azure/src/mistralai/azure/client/models/completionresponsestreamchoice.py index ec9df528..2a4d053f 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/completionresponsestreamchoice.py +++ b/packages/azure/src/mistralai/azure/client/models/completionresponsestreamchoice.py @@ -2,13 +2,18 @@ from __future__ import annotations from .deltamessage import DeltaMessage, DeltaMessageTypedDict -from mistralai_gcp.types import BaseModel, Nullable, UNSET_SENTINEL, UnrecognizedStr +from mistralai.azure.client.types import ( + BaseModel, + Nullable, + UNSET_SENTINEL, + UnrecognizedStr, +) from pydantic import model_serializer from typing import Literal, Union from typing_extensions import TypedDict -FinishReason = Union[ +CompletionResponseStreamChoiceFinishReason = Union[ Literal[ "stop", "length", @@ -22,7 +27,7 @@ class CompletionResponseStreamChoiceTypedDict(TypedDict): index: int delta: DeltaMessageTypedDict - finish_reason: Nullable[FinishReason] + finish_reason: Nullable[CompletionResponseStreamChoiceFinishReason] class CompletionResponseStreamChoice(BaseModel): @@ -30,7 +35,7 @@ class CompletionResponseStreamChoice(BaseModel): delta: DeltaMessage - finish_reason: Nullable[FinishReason] + finish_reason: Nullable[CompletionResponseStreamChoiceFinishReason] @model_serializer(mode="wrap") def serialize_model(self, handler): diff --git a/packages/mistralai_azure/src/mistralai_azure/models/contentchunk.py b/packages/azure/src/mistralai/azure/client/models/contentchunk.py similarity index 93% rename from packages/mistralai_azure/src/mistralai_azure/models/contentchunk.py rename to packages/azure/src/mistralai/azure/client/models/contentchunk.py index e6a3e24a..0f09f767 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/contentchunk.py +++ b/packages/azure/src/mistralai/azure/client/models/contentchunk.py @@ -4,7 +4,7 @@ from .imageurlchunk import ImageURLChunk, ImageURLChunkTypedDict from .referencechunk import ReferenceChunk, ReferenceChunkTypedDict from .textchunk import TextChunk, TextChunkTypedDict -from mistralai_azure.utils import get_discriminator +from mistralai.azure.client.utils import get_discriminator from pydantic import Discriminator, Tag from typing import Union from typing_extensions import Annotated, TypeAliasType diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/deltamessage.py b/packages/azure/src/mistralai/azure/client/models/deltamessage.py similarity index 81% rename from packages/mistralai_gcp/src/mistralai_gcp/models/deltamessage.py rename to packages/azure/src/mistralai/azure/client/models/deltamessage.py index 1801ac76..2c01feae 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/deltamessage.py +++ b/packages/azure/src/mistralai/azure/client/models/deltamessage.py @@ -3,7 +3,7 @@ from __future__ import annotations from .contentchunk import ContentChunk, ContentChunkTypedDict from .toolcall import ToolCall, ToolCallTypedDict -from mistralai_gcp.types import ( +from mistralai.azure.client.types import ( BaseModel, Nullable, OptionalNullable, @@ -15,24 +15,26 @@ from typing_extensions import NotRequired, TypeAliasType, TypedDict -ContentTypedDict = TypeAliasType( - "ContentTypedDict", Union[str, List[ContentChunkTypedDict]] +DeltaMessageContentTypedDict = TypeAliasType( + "DeltaMessageContentTypedDict", Union[str, List[ContentChunkTypedDict]] ) -Content = TypeAliasType("Content", Union[str, List[ContentChunk]]) +DeltaMessageContent = TypeAliasType( + "DeltaMessageContent", Union[str, List[ContentChunk]] +) class DeltaMessageTypedDict(TypedDict): role: NotRequired[Nullable[str]] - content: NotRequired[Nullable[ContentTypedDict]] + content: NotRequired[Nullable[DeltaMessageContentTypedDict]] tool_calls: NotRequired[Nullable[List[ToolCallTypedDict]]] class DeltaMessage(BaseModel): role: OptionalNullable[str] = UNSET - content: OptionalNullable[Content] = UNSET + content: OptionalNullable[DeltaMessageContent] = UNSET tool_calls: OptionalNullable[List[ToolCall]] = UNSET diff --git a/packages/mistralai_azure/src/mistralai_azure/models/documenturlchunk.py b/packages/azure/src/mistralai/azure/client/models/documenturlchunk.py similarity index 72% rename from packages/mistralai_azure/src/mistralai_azure/models/documenturlchunk.py rename to packages/azure/src/mistralai/azure/client/models/documenturlchunk.py index ea8d5625..345bafc2 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/documenturlchunk.py +++ b/packages/azure/src/mistralai/azure/client/models/documenturlchunk.py @@ -1,39 +1,45 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from __future__ import annotations -from mistralai_azure.types import ( +from mistralai.azure.client.types import ( BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL, ) +from mistralai.azure.client.utils import validate_const +import pydantic from pydantic import model_serializer +from pydantic.functional_validators import AfterValidator from typing import Literal, Optional -from typing_extensions import NotRequired, TypedDict - - -DocumentURLChunkType = Literal["document_url",] +from typing_extensions import Annotated, NotRequired, TypedDict class DocumentURLChunkTypedDict(TypedDict): document_url: str + type: Literal["document_url"] document_name: NotRequired[Nullable[str]] r"""The filename of the document""" - type: NotRequired[DocumentURLChunkType] class DocumentURLChunk(BaseModel): document_url: str + TYPE: Annotated[ + Annotated[ + Optional[Literal["document_url"]], + AfterValidator(validate_const("document_url")), + ], + pydantic.Field(alias="type"), + ] = "document_url" + document_name: OptionalNullable[str] = UNSET r"""The filename of the document""" - type: Optional[DocumentURLChunkType] = "document_url" - @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = ["document_name", "type"] + optional_fields = ["type", "document_name"] nullable_fields = ["document_name"] null_default_fields = [] diff --git a/packages/mistralai_azure/src/mistralai_azure/models/filechunk.py b/packages/azure/src/mistralai/azure/client/models/filechunk.py similarity index 83% rename from packages/mistralai_azure/src/mistralai_azure/models/filechunk.py rename to packages/azure/src/mistralai/azure/client/models/filechunk.py index 2c3edc07..829f03d8 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/filechunk.py +++ b/packages/azure/src/mistralai/azure/client/models/filechunk.py @@ -1,8 +1,8 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from __future__ import annotations -from mistralai_azure.types import BaseModel -from mistralai_azure.utils import validate_const +from mistralai.azure.client.types import BaseModel +from mistralai.azure.client.utils import validate_const import pydantic from pydantic.functional_validators import AfterValidator from typing import Literal, Optional diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/function.py b/packages/azure/src/mistralai/azure/client/models/function.py similarity index 90% rename from packages/mistralai_gcp/src/mistralai_gcp/models/function.py rename to packages/azure/src/mistralai/azure/client/models/function.py index 7ad1ae64..f4edce0f 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/function.py +++ b/packages/azure/src/mistralai/azure/client/models/function.py @@ -1,7 +1,7 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from __future__ import annotations -from mistralai_gcp.types import BaseModel +from mistralai.azure.client.types import BaseModel from typing import Any, Dict, Optional from typing_extensions import NotRequired, TypedDict diff --git a/packages/mistralai_azure/src/mistralai_azure/models/functioncall.py b/packages/azure/src/mistralai/azure/client/models/functioncall.py similarity index 91% rename from packages/mistralai_azure/src/mistralai_azure/models/functioncall.py rename to packages/azure/src/mistralai/azure/client/models/functioncall.py index dd93c462..d476792c 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/functioncall.py +++ b/packages/azure/src/mistralai/azure/client/models/functioncall.py @@ -1,7 +1,7 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from __future__ import annotations -from mistralai_azure.types import BaseModel +from mistralai.azure.client.types import BaseModel from typing import Any, Dict, Union from typing_extensions import TypeAliasType, TypedDict diff --git a/packages/mistralai_azure/src/mistralai_azure/models/functionname.py b/packages/azure/src/mistralai/azure/client/models/functionname.py similarity index 89% rename from packages/mistralai_azure/src/mistralai_azure/models/functionname.py rename to packages/azure/src/mistralai/azure/client/models/functionname.py index b55c82af..839e0d55 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/functionname.py +++ b/packages/azure/src/mistralai/azure/client/models/functionname.py @@ -1,7 +1,7 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from __future__ import annotations -from mistralai_azure.types import BaseModel +from mistralai.azure.client.types import BaseModel from typing_extensions import TypedDict diff --git a/packages/mistralai_azure/src/mistralai_azure/models/httpvalidationerror.py b/packages/azure/src/mistralai/azure/client/models/httpvalidationerror.py similarity index 87% rename from packages/mistralai_azure/src/mistralai_azure/models/httpvalidationerror.py rename to packages/azure/src/mistralai/azure/client/models/httpvalidationerror.py index 56607d94..40bccddc 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/httpvalidationerror.py +++ b/packages/azure/src/mistralai/azure/client/models/httpvalidationerror.py @@ -4,8 +4,8 @@ from .validationerror import ValidationError from dataclasses import dataclass, field import httpx -from mistralai_azure.models import MistralAzureError -from mistralai_azure.types import BaseModel +from mistralai.azure.client.models import MistralAzureError +from mistralai.azure.client.types import BaseModel from typing import List, Optional diff --git a/packages/azure/src/mistralai/azure/client/models/imagedetail.py b/packages/azure/src/mistralai/azure/client/models/imagedetail.py new file mode 100644 index 00000000..2d074cee --- /dev/null +++ b/packages/azure/src/mistralai/azure/client/models/imagedetail.py @@ -0,0 +1,15 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.azure.client.types import UnrecognizedStr +from typing import Literal, Union + + +ImageDetail = Union[ + Literal[ + "low", + "auto", + "high", + ], + UnrecognizedStr, +] diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/imageurl.py b/packages/azure/src/mistralai/azure/client/models/imageurl.py similarity index 87% rename from packages/mistralai_gcp/src/mistralai_gcp/models/imageurl.py rename to packages/azure/src/mistralai/azure/client/models/imageurl.py index 20d4ba77..b3c705e3 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/imageurl.py +++ b/packages/azure/src/mistralai/azure/client/models/imageurl.py @@ -1,7 +1,8 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from __future__ import annotations -from mistralai_gcp.types import ( +from .imagedetail import ImageDetail +from mistralai.azure.client.types import ( BaseModel, Nullable, OptionalNullable, @@ -14,13 +15,13 @@ class ImageURLTypedDict(TypedDict): url: str - detail: NotRequired[Nullable[str]] + detail: NotRequired[Nullable[ImageDetail]] class ImageURL(BaseModel): url: str - detail: OptionalNullable[str] = UNSET + detail: OptionalNullable[ImageDetail] = UNSET @model_serializer(mode="wrap") def serialize_model(self, handler): diff --git a/packages/azure/src/mistralai/azure/client/models/imageurlchunk.py b/packages/azure/src/mistralai/azure/client/models/imageurlchunk.py new file mode 100644 index 00000000..ee6de50f --- /dev/null +++ b/packages/azure/src/mistralai/azure/client/models/imageurlchunk.py @@ -0,0 +1,38 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .imageurl import ImageURL, ImageURLTypedDict +from mistralai.azure.client.types import BaseModel +from mistralai.azure.client.utils import validate_const +import pydantic +from pydantic.functional_validators import AfterValidator +from typing import Literal, Optional, Union +from typing_extensions import Annotated, TypeAliasType, TypedDict + + +ImageURLUnionTypedDict = TypeAliasType( + "ImageURLUnionTypedDict", Union[ImageURLTypedDict, str] +) + + +ImageURLUnion = TypeAliasType("ImageURLUnion", Union[ImageURL, str]) + + +class ImageURLChunkTypedDict(TypedDict): + r"""{\"type\":\"image_url\",\"image_url\":{\"url\":\"data:image/png;base64,iVBORw0""" + + image_url: ImageURLUnionTypedDict + type: Literal["image_url"] + + +class ImageURLChunk(BaseModel): + r"""{\"type\":\"image_url\",\"image_url\":{\"url\":\"data:image/png;base64,iVBORw0""" + + image_url: ImageURLUnion + + TYPE: Annotated[ + Annotated[ + Optional[Literal["image_url"]], AfterValidator(validate_const("image_url")) + ], + pydantic.Field(alias="type"), + ] = "image_url" diff --git a/packages/mistralai_azure/src/mistralai_azure/models/jsonschema.py b/packages/azure/src/mistralai/azure/client/models/jsonschema.py similarity index 97% rename from packages/mistralai_azure/src/mistralai_azure/models/jsonschema.py rename to packages/azure/src/mistralai/azure/client/models/jsonschema.py index 0f7563fc..5aaa490a 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/jsonschema.py +++ b/packages/azure/src/mistralai/azure/client/models/jsonschema.py @@ -1,7 +1,7 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from __future__ import annotations -from mistralai_azure.types import ( +from mistralai.azure.client.types import ( BaseModel, Nullable, OptionalNullable, diff --git a/packages/mistralai_azure/src/mistralai_azure/models/mistralazureerror.py b/packages/azure/src/mistralai/azure/client/models/mistralazureerror.py similarity index 100% rename from packages/mistralai_azure/src/mistralai_azure/models/mistralazureerror.py rename to packages/azure/src/mistralai/azure/client/models/mistralazureerror.py diff --git a/packages/mistralai_azure/src/mistralai_azure/models/mistralpromptmode.py b/packages/azure/src/mistralai/azure/client/models/mistralpromptmode.py similarity index 89% rename from packages/mistralai_azure/src/mistralai_azure/models/mistralpromptmode.py rename to packages/azure/src/mistralai/azure/client/models/mistralpromptmode.py index 77230b7e..26e7adbd 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/mistralpromptmode.py +++ b/packages/azure/src/mistralai/azure/client/models/mistralpromptmode.py @@ -1,7 +1,7 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from __future__ import annotations -from mistralai_azure.types import UnrecognizedStr +from mistralai.azure.client.types import UnrecognizedStr from typing import Literal, Union diff --git a/packages/mistralai_azure/src/mistralai_azure/models/no_response_error.py b/packages/azure/src/mistralai/azure/client/models/no_response_error.py similarity index 100% rename from packages/mistralai_azure/src/mistralai_azure/models/no_response_error.py rename to packages/azure/src/mistralai/azure/client/models/no_response_error.py diff --git a/packages/mistralai_azure/src/mistralai_azure/models/ocrimageobject.py b/packages/azure/src/mistralai/azure/client/models/ocrimageobject.py similarity index 98% rename from packages/mistralai_azure/src/mistralai_azure/models/ocrimageobject.py rename to packages/azure/src/mistralai/azure/client/models/ocrimageobject.py index 9d0dd01d..38e9d3e4 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/ocrimageobject.py +++ b/packages/azure/src/mistralai/azure/client/models/ocrimageobject.py @@ -1,7 +1,7 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from __future__ import annotations -from mistralai_azure.types import ( +from mistralai.azure.client.types import ( BaseModel, Nullable, OptionalNullable, diff --git a/packages/mistralai_azure/src/mistralai_azure/models/ocrpagedimensions.py b/packages/azure/src/mistralai/azure/client/models/ocrpagedimensions.py similarity index 91% rename from packages/mistralai_azure/src/mistralai_azure/models/ocrpagedimensions.py rename to packages/azure/src/mistralai/azure/client/models/ocrpagedimensions.py index efb62a58..12858da9 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/ocrpagedimensions.py +++ b/packages/azure/src/mistralai/azure/client/models/ocrpagedimensions.py @@ -1,7 +1,7 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from __future__ import annotations -from mistralai_azure.types import BaseModel +from mistralai.azure.client.types import BaseModel from typing_extensions import TypedDict diff --git a/packages/mistralai_azure/src/mistralai_azure/models/ocrpageobject.py b/packages/azure/src/mistralai/azure/client/models/ocrpageobject.py similarity index 98% rename from packages/mistralai_azure/src/mistralai_azure/models/ocrpageobject.py rename to packages/azure/src/mistralai/azure/client/models/ocrpageobject.py index e9571800..5fb821c1 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/ocrpageobject.py +++ b/packages/azure/src/mistralai/azure/client/models/ocrpageobject.py @@ -4,7 +4,7 @@ from .ocrimageobject import OCRImageObject, OCRImageObjectTypedDict from .ocrpagedimensions import OCRPageDimensions, OCRPageDimensionsTypedDict from .ocrtableobject import OCRTableObject, OCRTableObjectTypedDict -from mistralai_azure.types import ( +from mistralai.azure.client.types import ( BaseModel, Nullable, OptionalNullable, diff --git a/packages/mistralai_azure/src/mistralai_azure/models/ocrrequest.py b/packages/azure/src/mistralai/azure/client/models/ocrrequest.py similarity index 99% rename from packages/mistralai_azure/src/mistralai_azure/models/ocrrequest.py rename to packages/azure/src/mistralai/azure/client/models/ocrrequest.py index e9c23afc..fece2713 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/ocrrequest.py +++ b/packages/azure/src/mistralai/azure/client/models/ocrrequest.py @@ -5,7 +5,7 @@ from .filechunk import FileChunk, FileChunkTypedDict from .imageurlchunk import ImageURLChunk, ImageURLChunkTypedDict from .responseformat import ResponseFormat, ResponseFormatTypedDict -from mistralai_azure.types import ( +from mistralai.azure.client.types import ( BaseModel, Nullable, OptionalNullable, diff --git a/packages/mistralai_azure/src/mistralai_azure/models/ocrresponse.py b/packages/azure/src/mistralai/azure/client/models/ocrresponse.py similarity index 97% rename from packages/mistralai_azure/src/mistralai_azure/models/ocrresponse.py rename to packages/azure/src/mistralai/azure/client/models/ocrresponse.py index 3e43fa8e..787289fa 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/ocrresponse.py +++ b/packages/azure/src/mistralai/azure/client/models/ocrresponse.py @@ -3,7 +3,7 @@ from __future__ import annotations from .ocrpageobject import OCRPageObject, OCRPageObjectTypedDict from .ocrusageinfo import OCRUsageInfo, OCRUsageInfoTypedDict -from mistralai_azure.types import ( +from mistralai.azure.client.types import ( BaseModel, Nullable, OptionalNullable, diff --git a/packages/mistralai_azure/src/mistralai_azure/models/ocrtableobject.py b/packages/azure/src/mistralai/azure/client/models/ocrtableobject.py similarity index 78% rename from packages/mistralai_azure/src/mistralai_azure/models/ocrtableobject.py rename to packages/azure/src/mistralai/azure/client/models/ocrtableobject.py index 189f059e..3e3c2583 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/ocrtableobject.py +++ b/packages/azure/src/mistralai/azure/client/models/ocrtableobject.py @@ -1,15 +1,18 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from __future__ import annotations -from mistralai_azure.types import BaseModel +from mistralai.azure.client.types import BaseModel, UnrecognizedStr import pydantic -from typing import Literal +from typing import Literal, Union from typing_extensions import Annotated, TypedDict -Format = Literal[ - "markdown", - "html", +Format = Union[ + Literal[ + "markdown", + "html", + ], + UnrecognizedStr, ] r"""Format of the table""" diff --git a/packages/mistralai_azure/src/mistralai_azure/models/ocrusageinfo.py b/packages/azure/src/mistralai/azure/client/models/ocrusageinfo.py similarity index 97% rename from packages/mistralai_azure/src/mistralai_azure/models/ocrusageinfo.py rename to packages/azure/src/mistralai/azure/client/models/ocrusageinfo.py index 1f5c9f1b..e2ceba35 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/ocrusageinfo.py +++ b/packages/azure/src/mistralai/azure/client/models/ocrusageinfo.py @@ -1,7 +1,7 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from __future__ import annotations -from mistralai_azure.types import ( +from mistralai.azure.client.types import ( BaseModel, Nullable, OptionalNullable, diff --git a/packages/mistralai_azure/src/mistralai_azure/models/prediction.py b/packages/azure/src/mistralai/azure/client/models/prediction.py similarity index 89% rename from packages/mistralai_azure/src/mistralai_azure/models/prediction.py rename to packages/azure/src/mistralai/azure/client/models/prediction.py index b23a935c..6b8d6480 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/prediction.py +++ b/packages/azure/src/mistralai/azure/client/models/prediction.py @@ -1,8 +1,8 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from __future__ import annotations -from mistralai_azure.types import BaseModel -from mistralai_azure.utils import validate_const +from mistralai.azure.client.types import BaseModel +from mistralai.azure.client.utils import validate_const import pydantic from pydantic.functional_validators import AfterValidator from typing import Literal, Optional diff --git a/packages/azure/src/mistralai/azure/client/models/referencechunk.py b/packages/azure/src/mistralai/azure/client/models/referencechunk.py new file mode 100644 index 00000000..e0bcb06b --- /dev/null +++ b/packages/azure/src/mistralai/azure/client/models/referencechunk.py @@ -0,0 +1,25 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.azure.client.types import BaseModel +from mistralai.azure.client.utils import validate_const +import pydantic +from pydantic.functional_validators import AfterValidator +from typing import List, Literal, Optional +from typing_extensions import Annotated, TypedDict + + +class ReferenceChunkTypedDict(TypedDict): + reference_ids: List[int] + type: Literal["reference"] + + +class ReferenceChunk(BaseModel): + reference_ids: List[int] + + TYPE: Annotated[ + Annotated[ + Optional[Literal["reference"]], AfterValidator(validate_const("reference")) + ], + pydantic.Field(alias="type"), + ] = "reference" diff --git a/packages/mistralai_azure/src/mistralai_azure/models/responseformat.py b/packages/azure/src/mistralai/azure/client/models/responseformat.py similarity index 98% rename from packages/mistralai_azure/src/mistralai_azure/models/responseformat.py rename to packages/azure/src/mistralai/azure/client/models/responseformat.py index c989f3a4..39fb03a2 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/responseformat.py +++ b/packages/azure/src/mistralai/azure/client/models/responseformat.py @@ -3,7 +3,7 @@ from __future__ import annotations from .jsonschema import JSONSchema, JSONSchemaTypedDict from .responseformats import ResponseFormats -from mistralai_azure.types import ( +from mistralai.azure.client.types import ( BaseModel, Nullable, OptionalNullable, diff --git a/packages/mistralai_azure/src/mistralai_azure/models/responseformats.py b/packages/azure/src/mistralai/azure/client/models/responseformats.py similarity index 100% rename from packages/mistralai_azure/src/mistralai_azure/models/responseformats.py rename to packages/azure/src/mistralai/azure/client/models/responseformats.py diff --git a/packages/mistralai_azure/src/mistralai_azure/models/responsevalidationerror.py b/packages/azure/src/mistralai/azure/client/models/responsevalidationerror.py similarity index 92% rename from packages/mistralai_azure/src/mistralai_azure/models/responsevalidationerror.py rename to packages/azure/src/mistralai/azure/client/models/responsevalidationerror.py index a33954cc..cbdffcbb 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/responsevalidationerror.py +++ b/packages/azure/src/mistralai/azure/client/models/responsevalidationerror.py @@ -4,7 +4,7 @@ from typing import Optional from dataclasses import dataclass -from mistralai_azure.models import MistralAzureError +from mistralai.azure.client.models import MistralAzureError @dataclass(unsafe_hash=True) diff --git a/packages/mistralai_azure/src/mistralai_azure/models/sdkerror.py b/packages/azure/src/mistralai/azure/client/models/sdkerror.py similarity index 95% rename from packages/mistralai_azure/src/mistralai_azure/models/sdkerror.py rename to packages/azure/src/mistralai/azure/client/models/sdkerror.py index 216d7f8f..a1e9aaca 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/sdkerror.py +++ b/packages/azure/src/mistralai/azure/client/models/sdkerror.py @@ -4,7 +4,7 @@ from typing import Optional from dataclasses import dataclass -from mistralai_azure.models import MistralAzureError +from mistralai.azure.client.models import MistralAzureError MAX_MESSAGE_LEN = 10_000 diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/security.py b/packages/azure/src/mistralai/azure/client/models/security.py similarity index 80% rename from packages/mistralai_gcp/src/mistralai_gcp/models/security.py rename to packages/azure/src/mistralai/azure/client/models/security.py index 38574942..9b83ba98 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/security.py +++ b/packages/azure/src/mistralai/azure/client/models/security.py @@ -1,8 +1,8 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from __future__ import annotations -from mistralai_gcp.types import BaseModel -from mistralai_gcp.utils import FieldMetadata, SecurityMetadata +from mistralai.azure.client.types import BaseModel +from mistralai.azure.client.utils import FieldMetadata, SecurityMetadata from typing_extensions import Annotated, TypedDict diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/systemmessage.py b/packages/azure/src/mistralai/azure/client/models/systemmessage.py similarity index 57% rename from packages/mistralai_gcp/src/mistralai_gcp/models/systemmessage.py rename to packages/azure/src/mistralai/azure/client/models/systemmessage.py index d74bdf32..38c280c8 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/systemmessage.py +++ b/packages/azure/src/mistralai/azure/client/models/systemmessage.py @@ -5,9 +5,12 @@ SystemMessageContentChunks, SystemMessageContentChunksTypedDict, ) -from mistralai_gcp.types import BaseModel -from typing import List, Literal, Optional, Union -from typing_extensions import NotRequired, TypeAliasType, TypedDict +from mistralai.azure.client.types import BaseModel +from mistralai.azure.client.utils import validate_const +import pydantic +from pydantic.functional_validators import AfterValidator +from typing import List, Literal, Union +from typing_extensions import Annotated, TypeAliasType, TypedDict SystemMessageContentTypedDict = TypeAliasType( @@ -21,15 +24,15 @@ ) -Role = Literal["system",] - - class SystemMessageTypedDict(TypedDict): content: SystemMessageContentTypedDict - role: NotRequired[Role] + role: Literal["system"] class SystemMessage(BaseModel): content: SystemMessageContent - role: Optional[Role] = "system" + ROLE: Annotated[ + Annotated[Literal["system"], AfterValidator(validate_const("system"))], + pydantic.Field(alias="role"), + ] = "system" diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/systemmessagecontentchunks.py b/packages/azure/src/mistralai/azure/client/models/systemmessagecontentchunks.py similarity index 66% rename from packages/mistralai_gcp/src/mistralai_gcp/models/systemmessagecontentchunks.py rename to packages/azure/src/mistralai/azure/client/models/systemmessagecontentchunks.py index e0b5bbc3..225f38b7 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/systemmessagecontentchunks.py +++ b/packages/azure/src/mistralai/azure/client/models/systemmessagecontentchunks.py @@ -3,8 +3,7 @@ from __future__ import annotations from .textchunk import TextChunk, TextChunkTypedDict from .thinkchunk import ThinkChunk, ThinkChunkTypedDict -from mistralai_gcp.utils import get_discriminator -from pydantic import Discriminator, Tag +from pydantic import Field from typing import Union from typing_extensions import Annotated, TypeAliasType @@ -16,6 +15,5 @@ SystemMessageContentChunks = Annotated[ - Union[Annotated[TextChunk, Tag("text")], Annotated[ThinkChunk, Tag("thinking")]], - Discriminator(lambda m: get_discriminator(m, "type", "type")), + Union[TextChunk, ThinkChunk], Field(discriminator="TYPE") ] diff --git a/packages/azure/src/mistralai/azure/client/models/textchunk.py b/packages/azure/src/mistralai/azure/client/models/textchunk.py new file mode 100644 index 00000000..e513c143 --- /dev/null +++ b/packages/azure/src/mistralai/azure/client/models/textchunk.py @@ -0,0 +1,23 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.azure.client.types import BaseModel +from mistralai.azure.client.utils import validate_const +import pydantic +from pydantic.functional_validators import AfterValidator +from typing import Literal +from typing_extensions import Annotated, TypedDict + + +class TextChunkTypedDict(TypedDict): + text: str + type: Literal["text"] + + +class TextChunk(BaseModel): + text: str + + TYPE: Annotated[ + Annotated[Literal["text"], AfterValidator(validate_const("text"))], + pydantic.Field(alias="type"), + ] = "text" diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/thinkchunk.py b/packages/azure/src/mistralai/azure/client/models/thinkchunk.py similarity index 65% rename from packages/mistralai_gcp/src/mistralai_gcp/models/thinkchunk.py rename to packages/azure/src/mistralai/azure/client/models/thinkchunk.py index b88c0cb5..e769399f 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/thinkchunk.py +++ b/packages/azure/src/mistralai/azure/client/models/thinkchunk.py @@ -3,9 +3,12 @@ from __future__ import annotations from .referencechunk import ReferenceChunk, ReferenceChunkTypedDict from .textchunk import TextChunk, TextChunkTypedDict -from mistralai_gcp.types import BaseModel +from mistralai.azure.client.types import BaseModel +from mistralai.azure.client.utils import validate_const +import pydantic +from pydantic.functional_validators import AfterValidator from typing import List, Literal, Optional, Union -from typing_extensions import NotRequired, TypeAliasType, TypedDict +from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict ThinkingTypedDict = TypeAliasType( @@ -16,20 +19,20 @@ Thinking = TypeAliasType("Thinking", Union[ReferenceChunk, TextChunk]) -ThinkChunkType = Literal["thinking",] - - class ThinkChunkTypedDict(TypedDict): thinking: List[ThinkingTypedDict] + type: Literal["thinking"] closed: NotRequired[bool] r"""Whether the thinking chunk is closed or not. Currently only used for prefixing.""" - type: NotRequired[ThinkChunkType] class ThinkChunk(BaseModel): thinking: List[Thinking] + TYPE: Annotated[ + Annotated[Literal["thinking"], AfterValidator(validate_const("thinking"))], + pydantic.Field(alias="type"), + ] = "thinking" + closed: Optional[bool] = None r"""Whether the thinking chunk is closed or not. Currently only used for prefixing.""" - - type: Optional[ThinkChunkType] = "thinking" diff --git a/packages/mistralai_azure/src/mistralai_azure/models/tool.py b/packages/azure/src/mistralai/azure/client/models/tool.py similarity index 89% rename from packages/mistralai_azure/src/mistralai_azure/models/tool.py rename to packages/azure/src/mistralai/azure/client/models/tool.py index c91deec2..169305bc 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/tool.py +++ b/packages/azure/src/mistralai/azure/client/models/tool.py @@ -3,7 +3,7 @@ from __future__ import annotations from .function import Function, FunctionTypedDict from .tooltypes import ToolTypes -from mistralai_azure.types import BaseModel +from mistralai.azure.client.types import BaseModel from typing import Optional from typing_extensions import NotRequired, TypedDict diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/toolcall.py b/packages/azure/src/mistralai/azure/client/models/toolcall.py similarity index 92% rename from packages/mistralai_gcp/src/mistralai_gcp/models/toolcall.py rename to packages/azure/src/mistralai/azure/client/models/toolcall.py index 23ef157a..a589b1b3 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/toolcall.py +++ b/packages/azure/src/mistralai/azure/client/models/toolcall.py @@ -3,7 +3,7 @@ from __future__ import annotations from .functioncall import FunctionCall, FunctionCallTypedDict from .tooltypes import ToolTypes -from mistralai_gcp.types import BaseModel +from mistralai.azure.client.types import BaseModel from typing import Optional from typing_extensions import NotRequired, TypedDict diff --git a/packages/mistralai_azure/src/mistralai_azure/models/toolchoice.py b/packages/azure/src/mistralai/azure/client/models/toolchoice.py similarity index 93% rename from packages/mistralai_azure/src/mistralai_azure/models/toolchoice.py rename to packages/azure/src/mistralai/azure/client/models/toolchoice.py index 93b4b7fe..1f623222 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/toolchoice.py +++ b/packages/azure/src/mistralai/azure/client/models/toolchoice.py @@ -3,7 +3,7 @@ from __future__ import annotations from .functionname import FunctionName, FunctionNameTypedDict from .tooltypes import ToolTypes -from mistralai_azure.types import BaseModel +from mistralai.azure.client.types import BaseModel from typing import Optional from typing_extensions import NotRequired, TypedDict diff --git a/packages/mistralai_azure/src/mistralai_azure/models/toolchoiceenum.py b/packages/azure/src/mistralai/azure/client/models/toolchoiceenum.py similarity index 100% rename from packages/mistralai_azure/src/mistralai_azure/models/toolchoiceenum.py rename to packages/azure/src/mistralai/azure/client/models/toolchoiceenum.py diff --git a/packages/mistralai_azure/src/mistralai_azure/models/toolmessage.py b/packages/azure/src/mistralai/azure/client/models/toolmessage.py similarity index 77% rename from packages/mistralai_azure/src/mistralai_azure/models/toolmessage.py rename to packages/azure/src/mistralai/azure/client/models/toolmessage.py index 4bc5c9a9..a73fd6bf 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/toolmessage.py +++ b/packages/azure/src/mistralai/azure/client/models/toolmessage.py @@ -2,16 +2,19 @@ from __future__ import annotations from .contentchunk import ContentChunk, ContentChunkTypedDict -from mistralai_azure.types import ( +from mistralai.azure.client.types import ( BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL, ) +from mistralai.azure.client.utils import validate_const +import pydantic from pydantic import model_serializer -from typing import List, Literal, Optional, Union -from typing_extensions import NotRequired, TypeAliasType, TypedDict +from pydantic.functional_validators import AfterValidator +from typing import List, Literal, Union +from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict ToolMessageContentTypedDict = TypeAliasType( @@ -22,28 +25,28 @@ ToolMessageContent = TypeAliasType("ToolMessageContent", Union[str, List[ContentChunk]]) -ToolMessageRole = Literal["tool",] - - class ToolMessageTypedDict(TypedDict): content: Nullable[ToolMessageContentTypedDict] + role: Literal["tool"] tool_call_id: NotRequired[Nullable[str]] name: NotRequired[Nullable[str]] - role: NotRequired[ToolMessageRole] class ToolMessage(BaseModel): content: Nullable[ToolMessageContent] + ROLE: Annotated[ + Annotated[Literal["tool"], AfterValidator(validate_const("tool"))], + pydantic.Field(alias="role"), + ] = "tool" + tool_call_id: OptionalNullable[str] = UNSET name: OptionalNullable[str] = UNSET - role: Optional[ToolMessageRole] = "tool" - @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = ["tool_call_id", "name", "role"] + optional_fields = ["tool_call_id", "name"] nullable_fields = ["content", "tool_call_id", "name"] null_default_fields = [] diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/tooltypes.py b/packages/azure/src/mistralai/azure/client/models/tooltypes.py similarity index 77% rename from packages/mistralai_gcp/src/mistralai_gcp/models/tooltypes.py rename to packages/azure/src/mistralai/azure/client/models/tooltypes.py index 8b812ae0..1cce7446 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/tooltypes.py +++ b/packages/azure/src/mistralai/azure/client/models/tooltypes.py @@ -1,7 +1,7 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from __future__ import annotations -from mistralai_gcp.types import UnrecognizedStr +from mistralai.azure.client.types import UnrecognizedStr from typing import Literal, Union diff --git a/packages/mistralai_azure/src/mistralai_azure/models/usageinfo.py b/packages/azure/src/mistralai/azure/client/models/usageinfo.py similarity index 98% rename from packages/mistralai_azure/src/mistralai_azure/models/usageinfo.py rename to packages/azure/src/mistralai/azure/client/models/usageinfo.py index bbe5cdfa..19a6b09f 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/usageinfo.py +++ b/packages/azure/src/mistralai/azure/client/models/usageinfo.py @@ -1,7 +1,7 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from __future__ import annotations -from mistralai_azure.types import ( +from mistralai.azure.client.types import ( BaseModel, Nullable, OptionalNullable, diff --git a/packages/mistralai_azure/src/mistralai_azure/models/usermessage.py b/packages/azure/src/mistralai/azure/client/models/usermessage.py similarity index 73% rename from packages/mistralai_azure/src/mistralai_azure/models/usermessage.py rename to packages/azure/src/mistralai/azure/client/models/usermessage.py index 85fedb4b..96439c64 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/usermessage.py +++ b/packages/azure/src/mistralai/azure/client/models/usermessage.py @@ -2,10 +2,13 @@ from __future__ import annotations from .contentchunk import ContentChunk, ContentChunkTypedDict -from mistralai_azure.types import BaseModel, Nullable, UNSET_SENTINEL +from mistralai.azure.client.types import BaseModel, Nullable, UNSET_SENTINEL +from mistralai.azure.client.utils import validate_const +import pydantic from pydantic import model_serializer -from typing import List, Literal, Optional, Union -from typing_extensions import NotRequired, TypeAliasType, TypedDict +from pydantic.functional_validators import AfterValidator +from typing import List, Literal, Union +from typing_extensions import Annotated, TypeAliasType, TypedDict UserMessageContentTypedDict = TypeAliasType( @@ -16,22 +19,22 @@ UserMessageContent = TypeAliasType("UserMessageContent", Union[str, List[ContentChunk]]) -UserMessageRole = Literal["user",] - - class UserMessageTypedDict(TypedDict): content: Nullable[UserMessageContentTypedDict] - role: NotRequired[UserMessageRole] + role: Literal["user"] class UserMessage(BaseModel): content: Nullable[UserMessageContent] - role: Optional[UserMessageRole] = "user" + ROLE: Annotated[ + Annotated[Literal["user"], AfterValidator(validate_const("user"))], + pydantic.Field(alias="role"), + ] = "user" @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = ["role"] + optional_fields = [] nullable_fields = ["content"] null_default_fields = [] diff --git a/packages/mistralai_azure/src/mistralai_azure/models/validationerror.py b/packages/azure/src/mistralai/azure/client/models/validationerror.py similarity index 90% rename from packages/mistralai_azure/src/mistralai_azure/models/validationerror.py rename to packages/azure/src/mistralai/azure/client/models/validationerror.py index 4caff4a6..817ecf7a 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/validationerror.py +++ b/packages/azure/src/mistralai/azure/client/models/validationerror.py @@ -1,7 +1,7 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from __future__ import annotations -from mistralai_azure.types import BaseModel +from mistralai.azure.client.types import BaseModel from typing import List, Union from typing_extensions import TypeAliasType, TypedDict diff --git a/packages/mistralai_azure/src/mistralai_azure/ocr.py b/packages/azure/src/mistralai/azure/client/ocr.py similarity index 97% rename from packages/mistralai_azure/src/mistralai_azure/ocr.py rename to packages/azure/src/mistralai/azure/client/ocr.py index 31e27f6e..098e764b 100644 --- a/packages/mistralai_azure/src/mistralai_azure/ocr.py +++ b/packages/azure/src/mistralai/azure/client/ocr.py @@ -1,10 +1,10 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from .basesdk import BaseSDK -from mistralai_azure import models, utils -from mistralai_azure._hooks import HookContext -from mistralai_azure.types import Nullable, OptionalNullable, UNSET -from mistralai_azure.utils.unmarshal_json_response import unmarshal_json_response +from mistralai.azure.client import models, utils +from mistralai.azure.client._hooks import HookContext +from mistralai.azure.client.types import Nullable, OptionalNullable, UNSET +from mistralai.azure.client.utils.unmarshal_json_response import unmarshal_json_response from typing import Any, List, Mapping, Optional, Union diff --git a/packages/mistralai_azure/src/mistralai_azure/py.typed b/packages/azure/src/mistralai/azure/client/py.typed similarity index 100% rename from packages/mistralai_azure/src/mistralai_azure/py.typed rename to packages/azure/src/mistralai/azure/client/py.typed diff --git a/packages/mistralai_azure/src/mistralai_azure/sdk.py b/packages/azure/src/mistralai/azure/client/sdk.py similarity index 59% rename from packages/mistralai_azure/src/mistralai_azure/sdk.py rename to packages/azure/src/mistralai/azure/client/sdk.py index 04bc7743..985cb9a8 100644 --- a/packages/mistralai_azure/src/mistralai_azure/sdk.py +++ b/packages/azure/src/mistralai/azure/client/sdk.py @@ -7,61 +7,74 @@ from .utils.retries import RetryConfig import httpx import importlib -from mistralai_azure import models, utils -from mistralai_azure._hooks import SDKHooks -from mistralai_azure.types import OptionalNullable, UNSET -from typing import Any, Callable, Dict, Optional, TYPE_CHECKING, Union, cast +import logging +from mistralai.azure.client import models, utils +from mistralai.azure.client._hooks import SDKHooks +from mistralai.azure.client.types import OptionalNullable, UNSET +import sys +from typing import Callable, Dict, Optional, TYPE_CHECKING, Union, cast +import warnings import weakref +logger = logging.getLogger(__name__) + if TYPE_CHECKING: - from mistralai_azure.chat import Chat - from mistralai_azure.ocr import Ocr + from mistralai.azure.client.chat import Chat + from mistralai.azure.client.ocr import Ocr class MistralAzure(BaseSDK): - r"""Mistral AI API: Our Chat Completion and Embeddings APIs specification. Create your account on [La Plateforme](https://console.mistral.ai) to get access and read the [docs](https://docs.mistral.ai) to learn how to use it.""" + r"""Mistral AI API: Dora OpenAPI schema + + Our Chat Completion and Embeddings APIs specification. Create your account on [La Plateforme](https://console.mistral.ai) to get access and read the [docs](https://docs.mistral.ai) to learn how to use it. + """ chat: "Chat" r"""Chat Completion API.""" ocr: "Ocr" _sub_sdk_map = { - "chat": ("mistralai_azure.chat", "Chat"), - "ocr": ("mistralai_azure.ocr", "Ocr"), + "chat": ("mistralai.azure.client.chat", "Chat"), + "ocr": ("mistralai.azure.client.ocr", "Ocr"), } def __init__( self, - azure_api_key: Union[str, Callable[[], str]], - azure_endpoint: str, + api_key: Union[str, Callable[[], str]], + server: Optional[str] = None, + server_url: Optional[str] = None, url_params: Optional[Dict[str, str]] = None, client: Optional[HttpClient] = None, async_client: Optional[AsyncHttpClient] = None, retry_config: OptionalNullable[RetryConfig] = UNSET, timeout_ms: Optional[int] = None, debug_logger: Optional[Logger] = None, + api_version: str = "2024-05-01-preview", ) -> None: r"""Instantiates the SDK configuring it with the provided parameters. - :param azure_api_key: The azure_api_key required for authentication - :param azure_endpoint: The Azure AI endpoint URL to use for all methods + :param api_key: The api_key required for authentication + :param server: The server by name to use for all methods + :param server_url: The server URL to use for all methods :param url_params: Parameters to optionally template the server URL with :param client: The HTTP client to use for all synchronous methods :param async_client: The Async HTTP client to use for all asynchronous methods :param retry_config: The retry configuration to use for all supported methods :param timeout_ms: Optional request timeout applied to each operation in milliseconds + :param api_version: Azure API version to use (injected as query param) """ - - # if azure_endpoint doesn't end with `/v1` add it - if not azure_endpoint.endswith("/"): - azure_endpoint += "/" - if not azure_endpoint.endswith("v1/"): - azure_endpoint += "v1/" - server_url = azure_endpoint - client_supplied = True if client is None: - client = httpx.Client() + client = httpx.Client( + follow_redirects=True, + params={"api-version": api_version}, + ) client_supplied = False + elif api_version != "2024-05-01-preview": + warnings.warn( + "api_version is ignored when a custom client is provided. " + "Set the api-version query parameter on your httpx.Client directly.", + stacklevel=2, + ) assert issubclass( type(client), HttpClient @@ -69,8 +82,17 @@ def __init__( async_client_supplied = True if async_client is None: - async_client = httpx.AsyncClient() + async_client = httpx.AsyncClient( + follow_redirects=True, + params={"api-version": api_version}, + ) async_client_supplied = False + elif api_version != "2024-05-01-preview": + warnings.warn( + "api_version is ignored when a custom async_client is provided. " + "Set the api-version query parameter on your httpx.AsyncClient directly.", + stacklevel=2, + ) if debug_logger is None: debug_logger = get_default_logger() @@ -79,11 +101,15 @@ def __init__( type(async_client), AsyncHttpClient ), "The provided async_client must implement the AsyncHttpClient protocol." - security: Any = None - if callable(azure_api_key): - security = lambda: models.Security(api_key=azure_api_key()) # pylint: disable=unnecessary-lambda-assignment + security: Union[models.Security, Callable[[], models.Security]] + if callable(api_key): + + def get_security() -> models.Security: + return models.Security(api_key=api_key()) + + security = get_security else: - security = models.Security(api_key=azure_api_key) + security = models.Security(api_key=api_key) if server_url is not None: if url_params is not None: @@ -98,16 +124,15 @@ def __init__( async_client_supplied=async_client_supplied, security=security, server_url=server_url, - server=None, + server=server, retry_config=retry_config, timeout_ms=timeout_ms, debug_logger=debug_logger, ), + parent_ref=self, ) hooks = SDKHooks() - - # pylint: disable=protected-access self.sdk_configuration.__dict__["_hooks"] = hooks current_server_url, *_ = self.sdk_configuration.get_server_details() @@ -127,13 +152,28 @@ def __init__( self.sdk_configuration.async_client_supplied, ) + def dynamic_import(self, modname, retries=3): + last_exc: Optional[Exception] = None + for attempt in range(retries): + try: + return importlib.import_module(modname) + except (KeyError, ImportError, ModuleNotFoundError) as e: + last_exc = e + # Clear any half-initialized module and retry + sys.modules.pop(modname, None) + if attempt == retries - 1: + break + raise ImportError( + f"Failed to import module '{modname}' after {retries} attempts" + ) from last_exc + def __getattr__(self, name: str): if name in self._sub_sdk_map: module_path, class_name = self._sub_sdk_map[name] try: - module = importlib.import_module(module_path) + module = self.dynamic_import(module_path) klass = getattr(module, class_name) - instance = klass(self.sdk_configuration) + instance = klass(self.sdk_configuration, parent_ref=self) setattr(self, name, instance) return instance except ImportError as e: @@ -160,7 +200,7 @@ def __enter__(self): async def __aenter__(self): return self - def __exit__(self, exc_type, exc_val, exc_tb): + def __exit__(self, _exc_type, _exc_val, _exc_tb): if ( self.sdk_configuration.client is not None and not self.sdk_configuration.client_supplied @@ -168,7 +208,7 @@ def __exit__(self, exc_type, exc_val, exc_tb): self.sdk_configuration.client.close() self.sdk_configuration.client = None - async def __aexit__(self, exc_type, exc_val, exc_tb): + async def __aexit__(self, _exc_type, _exc_val, _exc_tb): if ( self.sdk_configuration.async_client is not None and not self.sdk_configuration.async_client_supplied diff --git a/packages/mistralai_gcp/src/mistralai_gcp/sdkconfiguration.py b/packages/azure/src/mistralai/azure/client/sdkconfiguration.py similarity index 93% rename from packages/mistralai_gcp/src/mistralai_gcp/sdkconfiguration.py rename to packages/azure/src/mistralai/azure/client/sdkconfiguration.py index cf85c47e..919225f9 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/sdkconfiguration.py +++ b/packages/azure/src/mistralai/azure/client/sdkconfiguration.py @@ -9,8 +9,8 @@ from .httpclient import AsyncHttpClient, HttpClient from .utils import Logger, RetryConfig, remove_suffix from dataclasses import dataclass -from mistralai_gcp import models -from mistralai_gcp.types import OptionalNullable, UNSET +from mistralai.azure.client import models +from mistralai.azure.client.types import OptionalNullable, UNSET from pydantic import Field from typing import Callable, Dict, Optional, Tuple, Union diff --git a/packages/mistralai_azure/src/mistralai_azure/types/__init__.py b/packages/azure/src/mistralai/azure/client/types/__init__.py similarity index 100% rename from packages/mistralai_azure/src/mistralai_azure/types/__init__.py rename to packages/azure/src/mistralai/azure/client/types/__init__.py diff --git a/packages/mistralai_azure/src/mistralai_azure/types/basemodel.py b/packages/azure/src/mistralai/azure/client/types/basemodel.py similarity index 100% rename from packages/mistralai_azure/src/mistralai_azure/types/basemodel.py rename to packages/azure/src/mistralai/azure/client/types/basemodel.py diff --git a/packages/mistralai_azure/src/mistralai_azure/utils/__init__.py b/packages/azure/src/mistralai/azure/client/utils/__init__.py similarity index 100% rename from packages/mistralai_azure/src/mistralai_azure/utils/__init__.py rename to packages/azure/src/mistralai/azure/client/utils/__init__.py diff --git a/packages/mistralai_azure/src/mistralai_azure/utils/annotations.py b/packages/azure/src/mistralai/azure/client/utils/annotations.py similarity index 100% rename from packages/mistralai_azure/src/mistralai_azure/utils/annotations.py rename to packages/azure/src/mistralai/azure/client/utils/annotations.py diff --git a/packages/mistralai_azure/src/mistralai_azure/utils/datetimes.py b/packages/azure/src/mistralai/azure/client/utils/datetimes.py similarity index 100% rename from packages/mistralai_azure/src/mistralai_azure/utils/datetimes.py rename to packages/azure/src/mistralai/azure/client/utils/datetimes.py diff --git a/packages/mistralai_azure/src/mistralai_azure/utils/enums.py b/packages/azure/src/mistralai/azure/client/utils/enums.py similarity index 100% rename from packages/mistralai_azure/src/mistralai_azure/utils/enums.py rename to packages/azure/src/mistralai/azure/client/utils/enums.py diff --git a/packages/mistralai_azure/src/mistralai_azure/utils/eventstreaming.py b/packages/azure/src/mistralai/azure/client/utils/eventstreaming.py similarity index 100% rename from packages/mistralai_azure/src/mistralai_azure/utils/eventstreaming.py rename to packages/azure/src/mistralai/azure/client/utils/eventstreaming.py diff --git a/packages/mistralai_azure/src/mistralai_azure/utils/forms.py b/packages/azure/src/mistralai/azure/client/utils/forms.py similarity index 100% rename from packages/mistralai_azure/src/mistralai_azure/utils/forms.py rename to packages/azure/src/mistralai/azure/client/utils/forms.py diff --git a/packages/mistralai_azure/src/mistralai_azure/utils/headers.py b/packages/azure/src/mistralai/azure/client/utils/headers.py similarity index 100% rename from packages/mistralai_azure/src/mistralai_azure/utils/headers.py rename to packages/azure/src/mistralai/azure/client/utils/headers.py diff --git a/packages/mistralai_azure/src/mistralai_azure/utils/logger.py b/packages/azure/src/mistralai/azure/client/utils/logger.py similarity index 100% rename from packages/mistralai_azure/src/mistralai_azure/utils/logger.py rename to packages/azure/src/mistralai/azure/client/utils/logger.py diff --git a/packages/mistralai_azure/src/mistralai_azure/utils/metadata.py b/packages/azure/src/mistralai/azure/client/utils/metadata.py similarity index 100% rename from packages/mistralai_azure/src/mistralai_azure/utils/metadata.py rename to packages/azure/src/mistralai/azure/client/utils/metadata.py diff --git a/packages/mistralai_azure/src/mistralai_azure/utils/queryparams.py b/packages/azure/src/mistralai/azure/client/utils/queryparams.py similarity index 100% rename from packages/mistralai_azure/src/mistralai_azure/utils/queryparams.py rename to packages/azure/src/mistralai/azure/client/utils/queryparams.py diff --git a/packages/mistralai_azure/src/mistralai_azure/utils/requestbodies.py b/packages/azure/src/mistralai/azure/client/utils/requestbodies.py similarity index 100% rename from packages/mistralai_azure/src/mistralai_azure/utils/requestbodies.py rename to packages/azure/src/mistralai/azure/client/utils/requestbodies.py diff --git a/packages/mistralai_azure/src/mistralai_azure/utils/retries.py b/packages/azure/src/mistralai/azure/client/utils/retries.py similarity index 100% rename from packages/mistralai_azure/src/mistralai_azure/utils/retries.py rename to packages/azure/src/mistralai/azure/client/utils/retries.py diff --git a/packages/mistralai_azure/src/mistralai_azure/utils/security.py b/packages/azure/src/mistralai/azure/client/utils/security.py similarity index 100% rename from packages/mistralai_azure/src/mistralai_azure/utils/security.py rename to packages/azure/src/mistralai/azure/client/utils/security.py diff --git a/packages/mistralai_azure/src/mistralai_azure/utils/serializers.py b/packages/azure/src/mistralai/azure/client/utils/serializers.py similarity index 100% rename from packages/mistralai_azure/src/mistralai_azure/utils/serializers.py rename to packages/azure/src/mistralai/azure/client/utils/serializers.py diff --git a/packages/mistralai_azure/src/mistralai_azure/utils/unmarshal_json_response.py b/packages/azure/src/mistralai/azure/client/utils/unmarshal_json_response.py similarity index 95% rename from packages/mistralai_azure/src/mistralai_azure/utils/unmarshal_json_response.py rename to packages/azure/src/mistralai/azure/client/utils/unmarshal_json_response.py index f5813119..5317ac87 100644 --- a/packages/mistralai_azure/src/mistralai_azure/utils/unmarshal_json_response.py +++ b/packages/azure/src/mistralai/azure/client/utils/unmarshal_json_response.py @@ -5,7 +5,7 @@ import httpx from .serializers import unmarshal_json -from mistralai_azure import models +from mistralai.azure.client import models T = TypeVar("T") diff --git a/packages/mistralai_azure/src/mistralai_azure/utils/url.py b/packages/azure/src/mistralai/azure/client/utils/url.py similarity index 100% rename from packages/mistralai_azure/src/mistralai_azure/utils/url.py rename to packages/azure/src/mistralai/azure/client/utils/url.py diff --git a/packages/mistralai_azure/src/mistralai_azure/utils/values.py b/packages/azure/src/mistralai/azure/client/utils/values.py similarity index 100% rename from packages/mistralai_azure/src/mistralai_azure/utils/values.py rename to packages/azure/src/mistralai/azure/client/utils/values.py diff --git a/packages/mistralai_azure/uv.lock b/packages/azure/uv.lock similarity index 99% rename from packages/mistralai_azure/uv.lock rename to packages/azure/uv.lock index a227d093..cedb1ce8 100644 --- a/packages/mistralai_azure/uv.lock +++ b/packages/azure/uv.lock @@ -154,7 +154,7 @@ wheels = [ [[package]] name = "mistralai-azure" -version = "1.8.0" +version = "2.0.0a4" source = { editable = "." } dependencies = [ { name = "httpcore" }, diff --git a/packages/gcp/.genignore b/packages/gcp/.genignore new file mode 100644 index 00000000..9a119b75 --- /dev/null +++ b/packages/gcp/.genignore @@ -0,0 +1,6 @@ +pyproject.toml +src/mistralai/gcp/client/sdk.py +src/mistralai/gcp/client/_hooks/registration.py +README.md +USAGE.md +docs/sdks/**/README.md diff --git a/packages/mistralai_gcp/.gitattributes b/packages/gcp/.gitattributes similarity index 100% rename from packages/mistralai_gcp/.gitattributes rename to packages/gcp/.gitattributes diff --git a/packages/mistralai_gcp/.gitignore b/packages/gcp/.gitignore similarity index 100% rename from packages/mistralai_gcp/.gitignore rename to packages/gcp/.gitignore diff --git a/packages/mistralai_gcp/.speakeasy/gen.lock b/packages/gcp/.speakeasy/gen.lock similarity index 55% rename from packages/mistralai_gcp/.speakeasy/gen.lock rename to packages/gcp/.speakeasy/gen.lock index 31eb1bc7..8ce6c5ea 100644 --- a/packages/mistralai_gcp/.speakeasy/gen.lock +++ b/packages/gcp/.speakeasy/gen.lock @@ -1,24 +1,25 @@ lockVersion: 2.0.0 id: ec60f2d8-7869-45c1-918e-773d41a8cf74 management: - docChecksum: d91fd326da9118e6c9dddea48eaf47a7 + docChecksum: bc4a0ba9c38418d84a6a8a76b503977b docVersion: 1.0.0 speakeasyVersion: 1.685.0 generationVersion: 2.794.1 - releaseVersion: 1.8.0 - configChecksum: 42a1e5752a774fcdb0a5949bd6535933 + releaseVersion: 2.0.0a4 + configChecksum: 95fb33ae488fa72fb4ba17c6b93551a9 repoURL: https://github.com/mistralai/client-python.git - repoSubDirectory: packages/mistralai_gcp - installationURL: https://github.com/mistralai/client-python.git#subdirectory=packages/mistralai_gcp + repoSubDirectory: packages/gcp + installationURL: https://github.com/mistralai/client-python.git#subdirectory=packages/gcp published: true persistentEdits: - generation_id: e1cf1217-2a08-4cb8-b92c-542b4f885caa - pristine_commit_hash: 57fe0df69b76fe4754f039d49f7c40770fb3097d - pristine_tree_hash: c4c1037865fb86650ada485b300f96784045922f + generation_id: 5f09b925-b801-4bf0-bda9-6f9a3212c588 + pristine_commit_hash: 20c7ce96f6a097f98d3367b89a7bea09ba0ded7c + pristine_tree_hash: c30d519719cc0cd17d7bf53ae2c13b1d8b125c5e features: python: additionalDependencies: 1.0.0 additionalProperties: 1.0.1 + configurableModuleName: 0.2.0 constsAndDefaults: 1.0.5 core: 5.23.18 defaultEnabledRetries: 0.2.0 @@ -57,16 +58,12 @@ trackedFiles: pristine_git_object: 2e54e27e0ca97bee87918b2ae38cc6c335669a79 docs/models/assistantmessage.md: id: 7e0218023943 - last_write_checksum: sha1:e75d407349842b2de46ee3ca6250f9f51121cf38 - pristine_git_object: 3d0bd90b4433c1a919f917f4bcf2518927cdcd50 + last_write_checksum: sha1:47d5cd1a1bef9e398c12c207f5b3d8486d94f359 + pristine_git_object: 9ef638379aee1198742743800e778409c47a9b9d docs/models/assistantmessagecontent.md: id: 9f1795bbe642 last_write_checksum: sha1:1ce4066623a8d62d969e5ed3a088d73a9ba26643 pristine_git_object: 047b7cf95f4db203bf2c501680b73ca0562a122d - docs/models/assistantmessagerole.md: - id: bb5d2a4bc72f - last_write_checksum: sha1:82f2c4f469426bd476c1003a91394afb89cb7c91 - pristine_git_object: 658229e77eb6419391cf7941568164541c528387 docs/models/chatcompletionchoice.md: id: 0d15c59ab501 last_write_checksum: sha1:a6274a39a4239e054816d08517bf8507cb5c4564 @@ -77,12 +74,12 @@ trackedFiles: pristine_git_object: b2f15ecbe88328de95b4961ddb3940fd8a6ee64b docs/models/chatcompletionrequest.md: id: adffe90369d0 - last_write_checksum: sha1:1ac7d6b5a8aba9c922cf5fe45f94aee55228f9db - pristine_git_object: 6886f9dcd43e8d61f4ec6692235f281cb03a5c86 - docs/models/chatcompletionrequestmessages.md: - id: ec996b350e12 - last_write_checksum: sha1:2ecec8d12cdb48426f4eb62732066fc79fcd4ec3 - pristine_git_object: bc7708a67f06d74e8a5bf1facb2b23fb1e08053c + last_write_checksum: sha1:2bf5152388f18436be4fe1c541b8d423dcae175c + pristine_git_object: 61a25d86e7dc292621f7f6c0f8909137a16b9112 + docs/models/chatcompletionrequestmessage.md: + id: 3f5e170d418c + last_write_checksum: sha1:7921c5a508a9f88adc01caab34e26182b8035607 + pristine_git_object: 91e9e062d0ef0cb69235c4ae4516548733ce28a9 docs/models/chatcompletionrequeststop.md: id: fcaf5bbea451 last_write_checksum: sha1:71a25f84f0d88c7acf72e801ced6159546201851 @@ -97,8 +94,16 @@ trackedFiles: pristine_git_object: a0465ffbfc5558628953e03fbc53b80bbdc8649b docs/models/chatcompletionstreamrequest.md: id: cf8f29558a68 - last_write_checksum: sha1:33778fdf71aa9b934ae48d51664daaa0dd817e04 - pristine_git_object: ff1940dd8a92d7892d895c3fc0e0a4b321e55534 + last_write_checksum: sha1:f30b2a7353e7406eb30af841a1a211ea5cb30cb0 + pristine_git_object: 3e790e7dc7143b0ae287ad2df14ae7e7a4085e3f + docs/models/chatcompletionstreamrequestmessage.md: + id: 053a98476cd2 + last_write_checksum: sha1:8270692463fab1243d9de4bbef7162daa64e52c5 + pristine_git_object: 2e4e93acca8983a3ea27b391d4606518946e13fe + docs/models/chatcompletionstreamrequeststop.md: + id: d0e89a4dca78 + last_write_checksum: sha1:a889e9580fa94bda7c848682d6ba501b7f5c0f41 + pristine_git_object: a48460a92ac47fec1de2188ba46b238229736d32 docs/models/chatcompletionstreamrequesttoolchoice.md: id: 210d5e5b1413 last_write_checksum: sha1:0543164caf3f4fb2bef3061dbd1a5e6b34b17ae9 @@ -113,20 +118,24 @@ trackedFiles: pristine_git_object: 7a66e8fee2bb0f1c58166177653893bb05b98f1d docs/models/completionresponsestreamchoice.md: id: d56824d615a6 - last_write_checksum: sha1:dcf4b125b533192cb5aea1a68551866954712dc5 - pristine_git_object: c807dacd98eb3561ee45f40db71a92cb72b0f6de - docs/models/content.md: - id: bfd859c99f86 - last_write_checksum: sha1:6673dbd19871a701955a322348a4f7e51c38ffc8 - pristine_git_object: a833dc2c6043e36b85131c9243b4cc02b9fcc4c6 + last_write_checksum: sha1:0296a490df009dbfd04893fdebcc88dd6102a872 + pristine_git_object: 1532c25b8fc065d486f52d4610a7f757e5340875 + docs/models/completionresponsestreamchoicefinishreason.md: + id: 5f1fbfc90b8e + last_write_checksum: sha1:20824b4a223cbd3658b32440973a7d47dcd108b9 + pristine_git_object: 0fece473297227c75db4e7ded63417a2f117cac0 docs/models/contentchunk.md: id: d2d3a32080cd last_write_checksum: sha1:5839a26cdc412b78caad7fb59df97bdcea57be6d pristine_git_object: 22023e8b19692df969693b7a14f8cf6e0143859f docs/models/deltamessage.md: id: 6c5ed6b60968 - last_write_checksum: sha1:c213149256c620715d744c89685d5b6cbdea6f58 - pristine_git_object: 61deabbf7e37388fdd4c1789089d120cc0b937b9 + last_write_checksum: sha1:00052476b9b2474dbc149f18dd18c71c86d0fc74 + pristine_git_object: e0ee575f3fce7c312114ce8c5390efc5c4854952 + docs/models/deltamessagecontent.md: + id: 7307bedc8733 + last_write_checksum: sha1:a1211b8cb576ad1358e68983680ee326c3920a5e + pristine_git_object: 8142772d7ea33ad8a75cf9cf822564ba3f630de2 docs/models/fimcompletionrequest.md: id: b44677ecc293 last_write_checksum: sha1:24bcb54d39b3fabd487549a27b4c0a65dd5ffe50 @@ -147,10 +156,6 @@ trackedFiles: id: c97a11b764e9 last_write_checksum: sha1:958d5087050fdeb128745884ebcf565b4fdc3886 pristine_git_object: 5a9e2ff020d4939f7fd42c0673ea7bdd16cca99d - docs/models/finishreason.md: - id: 73315c2a39b3 - last_write_checksum: sha1:5b58c7fa9219f728b9731287e21abe1be9f11e4a - pristine_git_object: 45a5aedb7241cf080df3eb976a4413064d314009 docs/models/function.md: id: 416a80fba031 last_write_checksum: sha1:a9485076d430a7753558461ce87bf42d09e34511 @@ -167,22 +172,22 @@ trackedFiles: id: a211c095f2ac last_write_checksum: sha1:277a46811144643262651853dc6176d21b33573e pristine_git_object: 712a148c3e2305dca4c702851865f9f8c8e674cc + docs/models/imagedetail.md: + id: f8217529b496 + last_write_checksum: sha1:fdf19ac9459f64616240955cb81a84ef03e775c8 + pristine_git_object: 1e5ba3fd405a14e5e2872cc85504584dca19b726 docs/models/imageurl.md: id: e75dd23cec1d - last_write_checksum: sha1:30131c77dd240c3bae48d9693698358e5cc0ae63 - pristine_git_object: 7c2bcbc36e99c3cf467d213d6a6a59d6300433d8 + last_write_checksum: sha1:a5cf621ce58a9cc7c96afa7de53367eac7b4cb0b + pristine_git_object: 6358e0acb2dea4816203413842243704ca955783 docs/models/imageurlchunk.md: id: 4407097bfff3 - last_write_checksum: sha1:7a478fd638234ece78770c7fc5e8d0adaf1c3727 - pristine_git_object: f1b926ef8e82443aa1446b1c64c2f02e33d7c789 - docs/models/imageurlchunkimageurl.md: - id: c7fae88454ce - last_write_checksum: sha1:5eff71b7a8be7baacb9ba8ca0be0a0f7a391a325 - pristine_git_object: 767389082d25f06e617fec2ef0134dd9fb2d4064 - docs/models/imageurlchunktype.md: - id: b9af2db9ff60 - last_write_checksum: sha1:990546f94648a09faf9d3ae55d7f6ee66de13e85 - pristine_git_object: 2064a0b405870313bd4b802a3b1988418ce8439e + last_write_checksum: sha1:30b72826963e22cadf76ac0b7604288dbc4fb943 + pristine_git_object: a84dac32b99390e3fd0559714ca43795742192c6 + docs/models/imageurlunion.md: + id: 9d3c691a9db0 + last_write_checksum: sha1:4e32bcd7d44746d2ddbfafbef96152bb2bdb2a15 + pristine_git_object: db97130f26199dcb354ecb7469d09530b035daa2 docs/models/jsonschema.md: id: a6b15ed6fac8 last_write_checksum: sha1:523465666ad3c292252b3fe60f345c7ffb29053f @@ -191,10 +196,6 @@ trackedFiles: id: b071d5a509cc last_write_checksum: sha1:09a04749333ab50ae806c3ac6adcaa90d54df0f1 pristine_git_object: d6094ac2c6e0326c039dad2f6b89158694ef6aa7 - docs/models/messages.md: - id: 2103cd675c2f - last_write_checksum: sha1:f6940c9c67b98c49ae2bc2764f6c14178321f244 - pristine_git_object: 1d394500e8ffdd140457575568fc2ce465a1cc3a docs/models/mistralpromptmode.md: id: d17d5db4d3b6 last_write_checksum: sha1:abcb7205c5086169c7d9449d15ac142448a7d258 @@ -205,12 +206,8 @@ trackedFiles: pristine_git_object: fae3c1ca4ba2c2ddb3b7de401ecdc8d56dcc7740 docs/models/referencechunk.md: id: 07895f9debfd - last_write_checksum: sha1:97d01dd2b907e87b58bebd9c950e1bef29747c89 - pristine_git_object: a132ca2fe6fbbaca644491cbc36d88b0c67cc6bc - docs/models/referencechunktype.md: - id: 0944b80ea9c8 - last_write_checksum: sha1:956b270766c7f11fe99f4a9b484cc29c159e7471 - pristine_git_object: 1e0e2fe64883ef5f3e628777b261b1224661d257 + last_write_checksum: sha1:4384049375a2566c7567599f97ce1ec19e9f6276 + pristine_git_object: d847e24845a399c7ca93d54701832fb65e01b3ab docs/models/responseformat.md: id: 50a1e4140614 last_write_checksum: sha1:e877b2e81470ef5eec5675dfb91a47e74d5d3add @@ -219,22 +216,14 @@ trackedFiles: id: cf1f250b82db last_write_checksum: sha1:105e1f9181913104b554051838cbdd0f728aa2c4 pristine_git_object: 2f5f1e5511b048323fee18a0ffdd506fe2b3d56f - docs/models/role.md: - id: b694540a5b1e - last_write_checksum: sha1:260a50c56a8bd03cc535edf98ebec06437f87f8d - pristine_git_object: affca78d5574cc42d8e6169f21968e5a8765e053 docs/models/security.md: id: 452e4d4eb67a last_write_checksum: sha1:ce2871b49c1632d50e22d0b1ebe4999021d52313 pristine_git_object: c698674c513f5b20c04f629e50154e67977275f7 - docs/models/stop.md: - id: f231cc9f5041 - last_write_checksum: sha1:86903cac5f57ad9b8ac07ecba6c454d40a53bdc8 - pristine_git_object: ba40ca83136d6d6cb4f1ef9e5ca3104a704e4846 docs/models/systemmessage.md: id: fdb7963e1cdf - last_write_checksum: sha1:97e726dff19a39b468767d5c01fc6256277ee71f - pristine_git_object: 0dba71c00f40c85e74b2c1967e077ffff9660f13 + last_write_checksum: sha1:c7603c5ce77ba2bcbda9eff65eeafdb1e9ecbec7 + pristine_git_object: 10bda10f921fb5d66c1606ff18e654b4e78ab197 docs/models/systemmessagecontent.md: id: 94a56febaeda last_write_checksum: sha1:6cb10b4b860b4204df57a29c650c85c826395aeb @@ -245,16 +234,12 @@ trackedFiles: pristine_git_object: 40030c170746d9953d25b979ab7e6f522018e230 docs/models/textchunk.md: id: 6cd12e0ef110 - last_write_checksum: sha1:6d41d1991d122805734ed0d90ee01592aa5ae6ff - pristine_git_object: 6daab3c381bd8c13d2935bf62578648a8470fc76 + last_write_checksum: sha1:aa448d4937c0c1cd562621f0a9080aa0dc6e4bd1 + pristine_git_object: b266619dcb57222ec343f373c43b2b5cef5b8b93 docs/models/thinkchunk.md: id: bca24d7153f6 - last_write_checksum: sha1:feb95a931bb9cdbfe28ab351618687e513cf830b - pristine_git_object: 66b2e0cde70e25e2927180d2e709503401fddeab - docs/models/thinkchunktype.md: - id: 0fbeed985341 - last_write_checksum: sha1:790f991f95c86c26a6abb9c9c5debda8b53526f5 - pristine_git_object: baf6f755252d027295be082b53ecf80555039414 + last_write_checksum: sha1:2b8ff7737fa7255673ca31da7cb2e6803fce9e02 + pristine_git_object: b07f598ebc5f0e9c041186c081dc98bc21104bdb docs/models/thinking.md: id: 07234f8dd364 last_write_checksum: sha1:a5962d1615b57996730da19e59fbfaa684321442 @@ -277,40 +262,28 @@ trackedFiles: pristine_git_object: 0be3d6c54b13a8bf30773398a2c12e0d30d3ae58 docs/models/toolmessage.md: id: 0553747c37a1 - last_write_checksum: sha1:3ac87031fdd4ba8b0996e95be8e7ef1a7ff41167 - pristine_git_object: a54f49332c2873471759b477fb4c712fa4fb61f5 + last_write_checksum: sha1:ac61e644ba7c6da607cb479eafd1db78d8e8012e + pristine_git_object: 7201481e61e269b238887deec30c03f7e16c53d7 docs/models/toolmessagecontent.md: id: f0522d2d3c93 last_write_checksum: sha1:783769c0200baa1b6751327aa3e009fa83da72ee pristine_git_object: 5c76091fbd2c8e0d768921fab19c7b761df73411 - docs/models/toolmessagerole.md: - id: f333d4d1ab56 - last_write_checksum: sha1:7e1c004bad24e928da0c286a9f053516b172d24f - pristine_git_object: c24e59c0c79ea886d266e38c673edd51531b9be6 docs/models/tooltypes.md: id: adb50fe63ea2 last_write_checksum: sha1:f224c3d8732450b9c969b3e04027b7df7892694c pristine_git_object: 84e49253c9b9bd1bd314e2a126106404cbb52f16 - docs/models/type.md: - id: 98c32f09b2c8 - last_write_checksum: sha1:8aa9ca999e9648ddc2240bf80780684e3e858ddf - pristine_git_object: eb0581e7174b6951d69c485a64af5244cb8687fa docs/models/usageinfo.md: id: ec6fe65028a9 last_write_checksum: sha1:cf71fb9676d870eba7c4d10a69636e1db4054adc pristine_git_object: f5204ac94a4d6191839031c66c5a9bc0124a1f35 docs/models/usermessage.md: id: ed66d7a0f80b - last_write_checksum: sha1:8291f7703e49ed669775dc953ea8cab6715dc7ed - pristine_git_object: 63b0131091cd211b3b1477c1d63b5666a26db546 + last_write_checksum: sha1:f0ed7d9cb7264f1d9e4a9190772df3f15e25346c + pristine_git_object: e7a932ed71496fa7cc358388c650d25f166f27a4 docs/models/usermessagecontent.md: id: 52c072c851e8 last_write_checksum: sha1:1de02bcf7082768ebe1bb912fdbebbec5a577b5a pristine_git_object: 8350f9e8f8996c136093e38760990f62fd01f8cf - docs/models/usermessagerole.md: - id: 99ffa937c462 - last_write_checksum: sha1:52014480516828b43827aa966b7319d9074f1111 - pristine_git_object: 171124e45988e784c56a6b92a0057ba00efc0db4 docs/models/utils/retryconfig.md: id: 4343ac43161c last_write_checksum: sha1:562c0f21e308ad10c27f85f75704c15592c6929d @@ -329,320 +302,324 @@ trackedFiles: pristine_git_object: a8fcb932ba2a01c5e96e3b04c59371e930b75558 scripts/prepare_readme.py: id: e0c5957a6035 - last_write_checksum: sha1:81c7dbabc0e726a4a150e6ef1bcba578d3f1153d - pristine_git_object: 6c4b993238c1a60d4df4bb7de0a0b0a82e385dbf + last_write_checksum: sha1:eb988bc0e00ed4bb14e9a3572845af14f06c9b42 + pristine_git_object: ae27b555c05c3c9f35d84e8bbe6a7c9f80cf94b2 scripts/publish.sh: id: fe273b08f514 last_write_checksum: sha1:b290b25b36dca3d5eb1a2e66a2e1bcf2e7326cf3 pristine_git_object: c35748f360329c2bc370e9b189f49b1a360b2c48 - src/mistralai_gcp/__init__.py: - id: b6565f49e73b + src/mistralai/gcp/client/__init__.py: + id: 4f63decd432e last_write_checksum: sha1:36306d1d404b6aeb912d27f1d9c52f098ff7bf9b pristine_git_object: dd02e42e4cc509dc90e6ae70493054021faa5f9c - src/mistralai_gcp/_hooks/__init__.py: - id: 663f3129700b + src/mistralai/gcp/client/_hooks/__init__.py: + id: adcb191838d1 last_write_checksum: sha1:e3111289afd28ad557c21d9e2f918caabfb7037d pristine_git_object: 2ee66cdd592fe41731c24ddd407c8ca31c50aec1 - src/mistralai_gcp/_hooks/sdkhooks.py: - id: 46ab7e644296 - last_write_checksum: sha1:a2c018871bea47706a76b03d9a17dab717c339c0 - pristine_git_object: b81c2a2739d316cfed54daec48df6375155eb802 - src/mistralai_gcp/_hooks/types.py: - id: 1f77198243ec - last_write_checksum: sha1:fbf5f1fb258b75133c6b12ae688c03c18b2debd5 - pristine_git_object: f8088f4c26d3ae27202c716c80c391d4daed4156 - src/mistralai_gcp/_version.py: - id: 4e2b8c406f49 - last_write_checksum: sha1:18c7db41065d76e733dc84c997f2a7808892a7c7 - pristine_git_object: a170f0ab6d229979b9077477809b10f2774a0144 - src/mistralai_gcp/basesdk.py: - id: b84fa6111b69 - last_write_checksum: sha1:41381dd799bd5e1f8a15bb65a0672dc6cc4796c4 - pristine_git_object: 7a93de23ad83096b2392e05b11f29030b5961456 - src/mistralai_gcp/chat.py: - id: 1cc7d54332ba - last_write_checksum: sha1:b4b4db3cfeac555718e2d74e897c6ba22b07a672 - pristine_git_object: 3dd6040fc7a565ffc4854bcc25e1e24a6683896d - src/mistralai_gcp/fim.py: - id: 1e5bec08157c - last_write_checksum: sha1:2c27170c5932893d4e8bec8ce45b2dc6e0957cd6 - pristine_git_object: 36d9fd60baaad606d9d57a30afdd9566b06b4caa - src/mistralai_gcp/httpclient.py: - id: 7de4ac861042 + src/mistralai/gcp/client/_hooks/sdkhooks.py: + id: 7e23394c3f65 + last_write_checksum: sha1:4a03a16da35168f25ed0cccfdb0d4c4d86bbe242 + pristine_git_object: 2af4deeda8055f4c57c0c7f00a7b79033435cf34 + src/mistralai/gcp/client/_hooks/types.py: + id: 4f37fd18bfd9 + last_write_checksum: sha1:2b295cc28d5fa2c79495510c8b97a1ea60f993e0 + pristine_git_object: ea95bed210db9180824efddfb1b3e47f5bf96489 + src/mistralai/gcp/client/_version.py: + id: f87319e32c7b + last_write_checksum: sha1:8c07e6351bf2df8239b3c02db75ee469dba53394 + pristine_git_object: ba48dac120cadd3f586b38659dc04e50838daa11 + src/mistralai/gcp/client/basesdk.py: + id: 4d594572857b + last_write_checksum: sha1:45ed4b6078e01d52d1dcf4bdc5494b700f1a6cde + pristine_git_object: 6f9f5fd9a2cadc8893d6693c1d40a8114c0fdc2a + src/mistralai/gcp/client/chat.py: + id: 4c41f05f786e + last_write_checksum: sha1:a4d5609f51dee25dfc34f83e1eda2888aa01dda6 + pristine_git_object: 78541248204cbd5b92b6d6d362924fcdada8a948 + src/mistralai/gcp/client/fim.py: + id: 13d2d208e0ef + last_write_checksum: sha1:e6226c1720effd314afa7b9a21e5ec2347e5a74f + pristine_git_object: e2acacd58c28fa7ea718240b01a3714f7fc0b8f6 + src/mistralai/gcp/client/httpclient.py: + id: a53dd7be6a4c last_write_checksum: sha1:5e55338d6ee9f01ab648cad4380201a8a3da7dd7 pristine_git_object: 89560b566073785535643e694c112bedbd3db13d - src/mistralai_gcp/models/__init__.py: - id: 9a7b2a1f0dba - last_write_checksum: sha1:54654df1aecc8d4f634ebd4dbcb0fed16da80309 - pristine_git_object: fe85b133a3a7652cfcfd3b44074be3729c8a9b7b - src/mistralai_gcp/models/assistantmessage.py: - id: 0779dd85c548 - last_write_checksum: sha1:ccf5d6a93bf007d47f0415320afb047278e10172 - pristine_git_object: 17d740b6eeb433b2865a652d1dd760227ad38191 - src/mistralai_gcp/models/chatcompletionchoice.py: - id: b5843c853153 - last_write_checksum: sha1:d389ddcfb64980b6c56a42d53bce7c63e26cc569 - pristine_git_object: fe3ee952a207f772ec49972cbd30f83654c84ad9 - src/mistralai_gcp/models/chatcompletionrequest.py: - id: 42d6cdf4646f - last_write_checksum: sha1:9685d594f13e8500e9c7fbab1e0d4042fccfc23d - pristine_git_object: 80345f9d956f64396f48850641842b2a3a6f8bee - src/mistralai_gcp/models/chatcompletionresponse.py: - id: 14720f23411e - last_write_checksum: sha1:46f14c3e00d21e9f01756f111d353768ad939494 - pristine_git_object: a7953eb156cc8185d70f92df8a75a2ebb77840b9 - src/mistralai_gcp/models/chatcompletionstreamrequest.py: - id: 2e17680adc7e - last_write_checksum: sha1:37c2daaad5c557234b5f067152280440f4c96933 - pristine_git_object: e857d51522dc9964cde865d7f681bd856a3cbdaf - src/mistralai_gcp/models/completionchunk.py: - id: 7fa670acf4b8 - last_write_checksum: sha1:0d0fdb8efda7f0b6a8ff376b7da94cac8060e4e2 - pristine_git_object: ca002f52239f69b96dd967b5e91cb4ed544e51d0 - src/mistralai_gcp/models/completionevent.py: - id: c25e6676e263 - last_write_checksum: sha1:528f13beedc9befc6fb71d4f9f2a2d4ff5e91817 - pristine_git_object: 33278c119c62205b8d9b09297066dc61c2a86cd1 - src/mistralai_gcp/models/completionresponsestreamchoice.py: - id: 46946832a23e - last_write_checksum: sha1:bc42569eb80dc034a1bde9170c35e6bc4ff52bb8 - pristine_git_object: ec9df52818fabf6bef33094bc7d25398066df3af - src/mistralai_gcp/models/contentchunk.py: - id: 96dd7160dff7 - last_write_checksum: sha1:484722b90615ca7af20993c570de79fe990a50f2 - pristine_git_object: da5671e348d363927af77188da6af07240398826 - src/mistralai_gcp/models/deltamessage.py: - id: db6c3c4d3384 - last_write_checksum: sha1:e596610fa0dd100203cd7e515750782bfbdb0445 - pristine_git_object: 1801ac76522df2efc362712d46262aeba95abc87 - src/mistralai_gcp/models/fimcompletionrequest.py: - id: ed8593c435af - last_write_checksum: sha1:6561263425e385568189ffc61e6b00034136adc3 - pristine_git_object: bcc97c90d4d327c83666423317dae2dc90db3b82 - src/mistralai_gcp/models/fimcompletionresponse.py: - id: 5f85a7cdb5fd - last_write_checksum: sha1:3ac2057157c7d1cb1bfc81fca2915ba72546f968 - pristine_git_object: e1940b0a2290fc3f9afcbd9e945397b1b90660ec - src/mistralai_gcp/models/fimcompletionstreamrequest.py: - id: f17c4f8fa580 - last_write_checksum: sha1:ebbe89e576d498070fde6b195d5afa2dc8bd5eac - pristine_git_object: 34d2ba65682b971f675f427cdf3aa6539071ce3a - src/mistralai_gcp/models/function.py: - id: 4612d6f83b9a - last_write_checksum: sha1:7692ea8102475e4d82d83722a8aea1efde668774 - pristine_git_object: 7ad1ae645f99ab13c022c92e7733ff4b15d39cac - src/mistralai_gcp/models/functioncall.py: - id: a3ca765a9368 - last_write_checksum: sha1:e044de5b26b15d46dce8ad8bd0d13bdf3d24ef7d - pristine_git_object: 99554c8862922184a05074bf01f71fbe20ac8fea - src/mistralai_gcp/models/functionname.py: - id: f97eb2c1bae3 - last_write_checksum: sha1:6343e5b4f724db6088c2055b058a9ebdd9bda995 - pristine_git_object: 00ec22f5ca6ff2d68d5cce2a020846a672ab0a1b - src/mistralai_gcp/models/httpvalidationerror.py: - id: f1ac6b7c81f3 - last_write_checksum: sha1:8e98e27a5440e2e1dbe330d1c889d43919d90b51 - pristine_git_object: 79609351e675148ef074988bb6ea8a11b81087dc - src/mistralai_gcp/models/imageurl.py: - id: 1668e9d55730 - last_write_checksum: sha1:2b8eaac00c956beb87434f8d5a21dff12611c788 - pristine_git_object: 20d4ba7719a6c04d2c7864459a68cca808e1a3f2 - src/mistralai_gcp/models/imageurlchunk.py: - id: ebc4dfed0347 - last_write_checksum: sha1:5c625584449139a410138c9986323d1f86b52735 - pristine_git_object: ddb53f21a13aeed7884e213e92752de1870d9fb5 - src/mistralai_gcp/models/jsonschema.py: - id: 4c32e4fa593e - last_write_checksum: sha1:3c972f731f2bd92262ea04a65771c093254d3a5f - pristine_git_object: 26914b2f8562da07e2d54d68a5806bedd32ec16a - src/mistralai_gcp/models/mistralgcperror.py: - id: 690cf29f596b - last_write_checksum: sha1:0ec55c68e3daccf2aba3c52f0a7c77ad5102f4c9 - pristine_git_object: fec729a590b2ea981e01f4af99d8b36ba52b4664 - src/mistralai_gcp/models/mistralpromptmode.py: - id: d2ba58ed5184 - last_write_checksum: sha1:6fb8323de88682846a2a09e68550f3508a29f1f5 - pristine_git_object: a5cc534f8c53bc87b8451aac1b2a79e695530e71 - src/mistralai_gcp/models/no_response_error.py: - id: 7a773ba0687f + src/mistralai/gcp/client/models/__init__.py: + id: d9e976d01972 + last_write_checksum: sha1:f0554ff6b81286615330ffea947e619bc508bf19 + pristine_git_object: fb446c259f4ca1cc97ec64aac197f52b8224a096 + src/mistralai/gcp/client/models/assistantmessage.py: + id: d39c4bdd289e + last_write_checksum: sha1:08fa98315561d5bb2c094bf57e7d66639b86e3ee + pristine_git_object: 7061775b3dbd9be0b978ff2a2cb07e52c01fc80a + src/mistralai/gcp/client/models/chatcompletionchoice.py: + id: 8e65b56f3e6d + last_write_checksum: sha1:e6d1382e9f880b866130d900fd866997aaf80e45 + pristine_git_object: ae5a2fbf38afbd86233dcaa8aa1c8441f5ed9eba + src/mistralai/gcp/client/models/chatcompletionrequest.py: + id: 4694a31c0003 + last_write_checksum: sha1:edb744ec2baca1f9ba6574662fffb36fb7d3faab + pristine_git_object: 1bc039221910bf88396c96affe735c8ac822920b + src/mistralai/gcp/client/models/chatcompletionresponse.py: + id: dd9e4796fca9 + last_write_checksum: sha1:76d7257583389ff5021e320a8f9a45a6deb07c7c + pristine_git_object: 317c4d84e378c14294d58c5aefd8c55ffe28754a + src/mistralai/gcp/client/models/chatcompletionstreamrequest.py: + id: 7294862af8ea + last_write_checksum: sha1:75d5bfcc204339b152dc78e33ac449c3aa9b5432 + pristine_git_object: 0a5a0021a4862e7b92a5c31679bf42bfa704d15b + src/mistralai/gcp/client/models/completionchunk.py: + id: 6b9ed8c30877 + last_write_checksum: sha1:4afc07c1824d81640f52a5c8bf89fde8893269b9 + pristine_git_object: 9e54cb6dfaccf7f815b40be585e11585cb5fef78 + src/mistralai/gcp/client/models/completionevent.py: + id: 3f55c4b8fc75 + last_write_checksum: sha1:66665d921fd27df6ef0efce996a5446e49b989d8 + pristine_git_object: bb1550093ce9adcb9bcd0548b69796e82f4f260b + src/mistralai/gcp/client/models/completionresponsestreamchoice.py: + id: ad9b98ca7e1c + last_write_checksum: sha1:04d195584fe4ea16544685e9989e5ae35205179a + pristine_git_object: 6f306721fbe47780c778833b80e97ab5d25d8367 + src/mistralai/gcp/client/models/contentchunk.py: + id: 8714d3bf2698 + last_write_checksum: sha1:347f43b4d7dcab18e09e6c3323f745a25ecfb04c + pristine_git_object: 1cd9e502ab7d4860daa79f907beafa71da086ab3 + src/mistralai/gcp/client/models/deltamessage.py: + id: 404fc85f1a4c + last_write_checksum: sha1:3375624531d12279d225fb07a68e0396483b962f + pristine_git_object: 96923518438137cb729a69149b5b99be49836ad7 + src/mistralai/gcp/client/models/fimcompletionrequest.py: + id: 5b79e2595d31 + last_write_checksum: sha1:cc4fa68c60a6a500a9887e47dd2e9220327c6226 + pristine_git_object: f37bbcc3cab020224531da898dd99cc175d49cd9 + src/mistralai/gcp/client/models/fimcompletionresponse.py: + id: 402f602d29b8 + last_write_checksum: sha1:cfe26848c7b14d6e374b7944d7ad44df822990b0 + pristine_git_object: 5b80da3f03e4e99dfca971a53af1cf6472c889bb + src/mistralai/gcp/client/models/fimcompletionstreamrequest.py: + id: 31190cf25070 + last_write_checksum: sha1:720f0a039a62cb508d513475a0e4bad45a9aa03c + pristine_git_object: 8e6102612998bde70d830bb0b8ee3a5e2a4dd01e + src/mistralai/gcp/client/models/function.py: + id: 2285a899b32e + last_write_checksum: sha1:a69ad9c8cd723e78a3949deefe43bcbf57426916 + pristine_git_object: 28577eff06d052aeb58c2795dd0a92ae4f2e7552 + src/mistralai/gcp/client/models/functioncall.py: + id: 17bb51f08e5f + last_write_checksum: sha1:b5fe2f061ea5f47057ee50011babc80de27e0ee6 + pristine_git_object: 0f1b24251ce728b3c2a0fb9e9ca94f90a9c3b7be + src/mistralai/gcp/client/models/functionname.py: + id: 313a6001145f + last_write_checksum: sha1:fe1eefaed314efa788bd15beb63bf6b81abb307e + pristine_git_object: 585b9e39762e49356823e211ad86f701bca389b8 + src/mistralai/gcp/client/models/httpvalidationerror.py: + id: bdb67f678798 + last_write_checksum: sha1:58b6b7a2b2f8e4f66fc14c38540a26cfd2541a1e + pristine_git_object: 57df72607adc980b061d092f77140c6dbd36ecec + src/mistralai/gcp/client/models/imagedetail.py: + id: a28b2f3e2cb5 + last_write_checksum: sha1:a4874529961952019eaa86a2fa0989626f537a4c + pristine_git_object: 68ed76080716eb1424b13f182479f57e51a4fabf + src/mistralai/gcp/client/models/imageurl.py: + id: 4e330f3eae74 + last_write_checksum: sha1:3c5d70c0698b1b4b9c99087241227bab3dc0cdbf + pristine_git_object: d4f298f12d8095590cded5714091596b505c59b1 + src/mistralai/gcp/client/models/imageurlchunk.py: + id: e68a4a393e9b + last_write_checksum: sha1:2eb2c8a205e5f8b320e2f597075cad9e5e27475b + pristine_git_object: fc5284c102c17a33c1ba6029c87515d509cd014b + src/mistralai/gcp/client/models/jsonschema.py: + id: 39c6e7d412a0 + last_write_checksum: sha1:29ba87457959588ff7d8188ae2382fb88740151d + pristine_git_object: 443c429dd1461d7a6817335626cd585577c5bffe + src/mistralai/gcp/client/models/mistralgcperror.py: + id: 278d296220ff + last_write_checksum: sha1:7267c829a842a94c5b84ac248a1610ce45f3db4e + pristine_git_object: 9de91bf2a4abf8b0d0922eb6062fe2ab817a8aee + src/mistralai/gcp/client/models/mistralpromptmode.py: + id: 8be4a4a683e4 + last_write_checksum: sha1:c958567e95490abf3941fde69be69733e8afb90e + pristine_git_object: c765e4f1a0b86735255771231377f13d62f3d7a6 + src/mistralai/gcp/client/models/no_response_error.py: + id: 2a7fa173594b last_write_checksum: sha1:7f326424a7d5ae1bcd5c89a0d6b3dbda9138942f pristine_git_object: 1deab64bc43e1e65bf3c412d326a4032ce342366 - src/mistralai_gcp/models/prediction.py: - id: cd3b43190e22 - last_write_checksum: sha1:a0411a8e3b1ecb42b91405dd9ee2a2ee5f3fad59 - pristine_git_object: 36c87ab046ed9f1a28a371fbdc5c7d584d71b6d3 - src/mistralai_gcp/models/referencechunk.py: - id: ee00a52fb6dd - last_write_checksum: sha1:d0c05b6b1e7d085833d4a9ef85f1e0088c86d3a5 - pristine_git_object: 904e8b8250570371e2b59895196986a45e6d3562 - src/mistralai_gcp/models/responseformat.py: - id: ad17dac36a51 - last_write_checksum: sha1:296d4b52f934c48490b71d85e1e9d0e207cee21a - pristine_git_object: 9fe5116ca46d713f5f23c92ec1de8a73c5124408 - src/mistralai_gcp/models/responseformats.py: - id: deb9c36c5ec5 + src/mistralai/gcp/client/models/prediction.py: + id: 7a5463285bc8 + last_write_checksum: sha1:1d1e81082d1c2bfd613f0bc00f7173995ad67c0c + pristine_git_object: f53579edc665dd7fc1cc2497b0cd05b69e541cd8 + src/mistralai/gcp/client/models/referencechunk.py: + id: 523e477f8725 + last_write_checksum: sha1:d29c5fc1d8b6850fdeb3abc7f83185de92571b23 + pristine_git_object: 274ea7f7b142714d96040428fe7b87eeb48432cb + src/mistralai/gcp/client/models/responseformat.py: + id: 06774bb65b42 + last_write_checksum: sha1:a52a60dc45c0b0939b99754d6c0c603ef2f737d3 + pristine_git_object: 34ae6b039a6c83c603fc6d47f6b2f233ec6c817a + src/mistralai/gcp/client/models/responseformats.py: + id: 18112ad0f6db last_write_checksum: sha1:a212e85d286b5b49219f57d071a2232ff8b5263b pristine_git_object: cbf83ce7b54ff8634f741334831807bfb5c98991 - src/mistralai_gcp/models/responsevalidationerror.py: - id: 78e210042d35 - last_write_checksum: sha1:b8ba70238453017393e721c7d61b5f1e268d7c17 - pristine_git_object: ebd4f214747d451dc2733d6ea838c67bb0c84797 - src/mistralai_gcp/models/sdkerror.py: - id: beed68eccaa1 - last_write_checksum: sha1:a058f2519ec22f72457e800600be469f13ff9915 - pristine_git_object: 7f53bbcd548d15f4fdd529bd3caea5249eb5e8e6 - src/mistralai_gcp/models/security.py: - id: 32f877bd1399 - last_write_checksum: sha1:7bad1150440143f9f6faefe33911edf6c2afdec1 - pristine_git_object: 3857494264c0444d330c54570483710a5ed321f0 - src/mistralai_gcp/models/systemmessage.py: - id: 13826cd6cb74 - last_write_checksum: sha1:876e84816c4e27ad77d6313777ba841ea3086cf9 - pristine_git_object: d74bdf3255bac53335eea08a6010cf1cc19380dd - src/mistralai_gcp/models/systemmessagecontentchunks.py: - id: 8233735d37db - last_write_checksum: sha1:38fedfdb83824054a1734bcc7d39e7e040bf4792 - pristine_git_object: e0b5bbc30828cbf572e603efc86ee2695102ea31 - src/mistralai_gcp/models/textchunk.py: - id: a330626b2006 - last_write_checksum: sha1:b801cf9b1913a70841c8fbdc9d433f0380ea82d8 - pristine_git_object: c4a8cf28cd2281cfda40cefa70ce1bd64d3e750d - src/mistralai_gcp/models/thinkchunk.py: - id: c38f6a213cc1 - last_write_checksum: sha1:a072f3bf01c2dc90ef6cc1b188b2e00e15923e07 - pristine_git_object: b88c0cb54c6926b3c896b3c192c5f3c51c676a51 - src/mistralai_gcp/models/tool.py: - id: 86b94d6a3bcb - last_write_checksum: sha1:14a081eb8639d834a7c4f209a79e7d1270202205 - pristine_git_object: 800de633962a0ccddab52596eae542318bb491b0 - src/mistralai_gcp/models/toolcall.py: - id: 3047e78c2ac3 - last_write_checksum: sha1:d219f8d7de19f501b799caf0e232bdce95e2c891 - pristine_git_object: 23ef157aa1d24498805a489a8cebf3c0e257d919 - src/mistralai_gcp/models/toolchoice.py: - id: 1f3d5233426e - last_write_checksum: sha1:936b1ac7b44bc1bf357e6a66cc42ed0127ad015e - pristine_git_object: 4a1483305f606afcc704e8d51ae363468354849e - src/mistralai_gcp/models/toolchoiceenum.py: - id: b4431b9cf3fd + src/mistralai/gcp/client/models/responsevalidationerror.py: + id: b90c1c09ac00 + last_write_checksum: sha1:e4321c1141ba7b1f6a8c217124e02ea0c70d9ad1 + pristine_git_object: 0e86ea6cb79fd4598d527dfef403ba66d435d3bb + src/mistralai/gcp/client/models/sdkerror.py: + id: a7cf4fa8974b + last_write_checksum: sha1:a3b60234deceb7fbcb57926c265e02e9fefc0835 + pristine_git_object: 00bc1d99353e7e2415d92c3e906c2c09712e5a64 + src/mistralai/gcp/client/models/security.py: + id: 7e13bda8273b + last_write_checksum: sha1:7086e929823d4eefe80cc279b605adfc8bbb08aa + pristine_git_object: 10a469b54d5e03873fb7d7d98627f2376c93d484 + src/mistralai/gcp/client/models/systemmessage.py: + id: 6537664d2d1b + last_write_checksum: sha1:e7f8dc73154c6985fcdbb77259df9bbc4745f976 + pristine_git_object: a7d695a7791eb5e97cd8f74e81c475c78e4b1a67 + src/mistralai/gcp/client/models/systemmessagecontentchunks.py: + id: e120a6469c89 + last_write_checksum: sha1:55529f2f29ba3087fbf117dbbe64e1dda92b2958 + pristine_git_object: 225f38b712f5f3c7abfd526cc8c0386687814f36 + src/mistralai/gcp/client/models/textchunk.py: + id: a134f120d4dc + last_write_checksum: sha1:9f46381e01f235560017ea80fbc85210eb625a99 + pristine_git_object: 77576c9fd87f0861bf6a3496aeae7e8bb8dc986a + src/mistralai/gcp/client/models/thinkchunk.py: + id: 59a1d1ef2020 + last_write_checksum: sha1:9fcccb19d87bc41f771cae710eeb8f28c229070d + pristine_git_object: b65fffb21d5cb060acaa648a70e337a43595cd32 + src/mistralai/gcp/client/models/tool.py: + id: 4b27d45e56ad + last_write_checksum: sha1:6d139575b740ea1f9f68a73b7bc2c95c30a10345 + pristine_git_object: d09c68542f2cb1f3bae0ffc7a7b163ad08a8e973 + src/mistralai/gcp/client/models/toolcall.py: + id: e6c25869a579 + last_write_checksum: sha1:5acf0eca8b1f4c459c6d8cadbbbd90605201ddc0 + pristine_git_object: a1edf3370426957980ff212367d56909ea8fa548 + src/mistralai/gcp/client/models/toolchoice.py: + id: cb13a9f64c92 + last_write_checksum: sha1:3ad6b48b24b39609e86229193ad18d84b1b3c818 + pristine_git_object: de3828dac8bc23e32b9f9434adccc770b5ce1212 + src/mistralai/gcp/client/models/toolchoiceenum.py: + id: d62e9c92d93c last_write_checksum: sha1:3dbba9a58c5569aafe115f3f7713a52b01ad8620 pristine_git_object: 01f6f677b379f9e3c99db9d1ad248cb0033a2804 - src/mistralai_gcp/models/toolmessage.py: - id: e21a2326eb31 - last_write_checksum: sha1:c332f72e272fff7970f52e2b15223a2898ce9b15 - pristine_git_object: d6aa2621b83bde261fab7bd15f58273861f88738 - src/mistralai_gcp/models/tooltypes.py: - id: b4c1716d51b3 - last_write_checksum: sha1:0f8fe0c437736eb584cce298a5e72c4e25f7c42b - pristine_git_object: 8b812ae0cfee81a1cd8ab0180e65f57d19a0dcbd - src/mistralai_gcp/models/usageinfo.py: - id: 574d1999c265 - last_write_checksum: sha1:a0a88fe5b3cae9317781b99cb3cc1916a9ba17cc - pristine_git_object: 59f36158761c3a86900256a6ed73845c455417c7 - src/mistralai_gcp/models/usermessage.py: - id: cf3691ffafa6 - last_write_checksum: sha1:d0ed86a67403d65ed6ac7a31aa5f73e19ecfa670 - pristine_git_object: 0168b45235bc891888c095565af832535dd26139 - src/mistralai_gcp/models/validationerror.py: - id: f1a6468621bd - last_write_checksum: sha1:a4cc5969f12e00be3506edc90ec21a01d5415eff - pristine_git_object: 033d4b63d1c321ae2c49e8684b34817adddca4c2 - src/mistralai_gcp/py.typed: - id: 7f25f97fed44 + src/mistralai/gcp/client/models/toolmessage.py: + id: b3774786c2e9 + last_write_checksum: sha1:ef21eb555f41ec70010dbcea1a155af988936061 + pristine_git_object: 65b1d9d62d37361a06b3fd3ee1790eb3a976a94f + src/mistralai/gcp/client/models/tooltypes.py: + id: 5926c64f5229 + last_write_checksum: sha1:ffd576511eed9f823c3d67df9fc5574d8d53c54b + pristine_git_object: fd1aa13d7b8c5d9bdb0922e04b8bd653ff843f60 + src/mistralai/gcp/client/models/usageinfo.py: + id: 3aab1af66cff + last_write_checksum: sha1:47c6311bc1db47849a72c8e1bcc64dac9cec637e + pristine_git_object: 9b7207b10ea9d46d8216c104c45be1a52fb093d9 + src/mistralai/gcp/client/models/usermessage.py: + id: 9cfa7260463e + last_write_checksum: sha1:580acf868a3d180eef34b2af9c2d20f78e4fb693 + pristine_git_object: c083e16d4aa536beec9f9e1151ebbe8c1797798c + src/mistralai/gcp/client/models/validationerror.py: + id: 6b4f4910ea9c + last_write_checksum: sha1:2792fd656f55519902f37670fb9fb3b43b4aa016 + pristine_git_object: 2d330e9acb579cc4928fa27fdd72288ce8832b8b + src/mistralai/gcp/client/py.typed: + id: 98b8ab80ab0d last_write_checksum: sha1:8efc425ffe830805ffcc0f3055871bdcdc542c60 pristine_git_object: 3e38f1a929f7d6b1d6de74604aa87e3d8f010544 - src/mistralai_gcp/sdkconfiguration.py: - id: 84fd7d3e219a - last_write_checksum: sha1:df51450c87f807c849e2aefb0a154aa4426fd8e3 - pristine_git_object: cf85c47e5e33956a64ddea53d85cdb7cc4bb687e - src/mistralai_gcp/types/__init__.py: - id: 15a92fdbd0a1 + src/mistralai/gcp/client/sdkconfiguration.py: + id: 57be0f79ea1e + last_write_checksum: sha1:0c5905e7c6092f57c15ee4318a85c0985bcc1ccf + pristine_git_object: d56a634f688f6697ba84962381084dc2d0836ac9 + src/mistralai/gcp/client/types/__init__.py: + id: f7ef15ac2ba1 last_write_checksum: sha1:140ebdd01a46f92ffc710c52c958c4eba3cf68ed pristine_git_object: fc76fe0c5505e29859b5d2bb707d48fd27661b8c - src/mistralai_gcp/types/basemodel.py: - id: 0dd6dc277359 + src/mistralai/gcp/client/types/basemodel.py: + id: 24babf758c19 last_write_checksum: sha1:10d84aedeb9d35edfdadf2c3020caa1d24d8b584 pristine_git_object: a9a640a1a7048736383f96c67c6290c86bf536ee - src/mistralai_gcp/utils/__init__.py: - id: bb44726e5fa4 + src/mistralai/gcp/client/utils/__init__.py: + id: a30c8ff6dcff last_write_checksum: sha1:887f56a717845fab7445cc368d2a17d850c3565a pristine_git_object: 05f26ade57efb8c54a774fbcb939fb1a7dc655ce - src/mistralai_gcp/utils/annotations.py: - id: aeecca0c40a3 + src/mistralai/gcp/client/utils/annotations.py: + id: 9b2cd4ffc6e9 last_write_checksum: sha1:a4824ad65f730303e4e1e3ec1febf87b4eb46dbc pristine_git_object: 12e0aa4f1151bb52474cc02e88397329b90703f6 - src/mistralai_gcp/utils/datetimes.py: - id: e3e3bb6cb264 + src/mistralai/gcp/client/utils/datetimes.py: + id: dd1f0f91ea9d last_write_checksum: sha1:c721e4123000e7dc61ec52b28a739439d9e17341 pristine_git_object: a6c52cd61bbe2d459046c940ce5e8c469f2f0664 - src/mistralai_gcp/utils/enums.py: - id: 9f020fc8d361 + src/mistralai/gcp/client/utils/enums.py: + id: 2341407d5443 last_write_checksum: sha1:bc8c3c1285ae09ba8a094ee5c3d9c7f41fa1284d pristine_git_object: 3324e1bc2668c54c4d5f5a1a845675319757a828 - src/mistralai_gcp/utils/eventstreaming.py: - id: d570df9074cf + src/mistralai/gcp/client/utils/eventstreaming.py: + id: bb66f0c3e0dc last_write_checksum: sha1:bababae5d54b7efc360db701daa49e18a92c2f3b pristine_git_object: 0969899bfc491e5e408d05643525f347ea95e4fc - src/mistralai_gcp/utils/forms.py: - id: fe642748c385 + src/mistralai/gcp/client/utils/forms.py: + id: ebf34781d6bd last_write_checksum: sha1:15fa7e9ab1611e062a9984cf06cb20969713d295 pristine_git_object: f961e76beaf0a8b1fe0dda44754a74eebd3608e7 - src/mistralai_gcp/utils/headers.py: - id: 0cb933d098ed + src/mistralai/gcp/client/utils/headers.py: + id: 4c369582903e last_write_checksum: sha1:7c6df233ee006332b566a8afa9ce9a245941d935 pristine_git_object: 37864cbbbc40d1a47112bbfdd3ba79568fc8818a - src/mistralai_gcp/utils/logger.py: - id: 2992f9bda9c7 + src/mistralai/gcp/client/utils/logger.py: + id: 082d86b60820 last_write_checksum: sha1:f3fdb154a3f09b8cc43d74c7e9c02f899f8086e4 pristine_git_object: b661aff65d38b77d035149699aea09b2785d2fc6 - src/mistralai_gcp/utils/metadata.py: - id: af274ae68c93 + src/mistralai/gcp/client/utils/metadata.py: + id: ff0e832b8b9c last_write_checksum: sha1:c6a560bd0c63ab158582f34dadb69433ea73b3d4 pristine_git_object: 173b3e5ce658675c2f504222a56b3daaaa68107d - src/mistralai_gcp/utils/queryparams.py: - id: b20aa8da5982 + src/mistralai/gcp/client/utils/queryparams.py: + id: 133b8408e73e last_write_checksum: sha1:b94c3f314fd3da0d1d215afc2731f48748e2aa59 pristine_git_object: c04e0db82b68eca041f2cb2614d748fbac80fd41 - src/mistralai_gcp/utils/requestbodies.py: - id: 1a2ddaa8f5a2 + src/mistralai/gcp/client/utils/requestbodies.py: + id: 1be13a660954 last_write_checksum: sha1:41e2d2d2d3ecc394c8122ca4d4b85e1c3e03f054 pristine_git_object: 1de32b6d26f46590232f398fdba6ce0072f1659c - src/mistralai_gcp/utils/retries.py: - id: 8caeba1fe4ab + src/mistralai/gcp/client/utils/retries.py: + id: 542ebd75b79b last_write_checksum: sha1:5b97ac4f59357d70c2529975d50364c88bcad607 pristine_git_object: 88a91b10cd2076b4a2c6cff2ac6bfaa5e3c5ad13 - src/mistralai_gcp/utils/security.py: - id: fa4f52aaad5d + src/mistralai/gcp/client/utils/security.py: + id: 5273152365f4 last_write_checksum: sha1:a17130ace2c0db6394f38dd941ad2b700cc755c8 pristine_git_object: 295a3f40031dbb40073ad227fd4a355660f97ab2 - src/mistralai_gcp/utils/serializers.py: - id: 920ccb5c87f2 + src/mistralai/gcp/client/utils/serializers.py: + id: a7836e553d41 last_write_checksum: sha1:ce1d8d7f500a9ccba0aeca5057cee9c271f4dfd7 pristine_git_object: 14321eb479de81d0d9580ec8291e0ff91bf29e57 - src/mistralai_gcp/utils/unmarshal_json_response.py: - id: 65d5fa644cf8 - last_write_checksum: sha1:877dd4bb58700039a481fdf7d7216d2d9a0b3e92 - pristine_git_object: c168a293f7018fc3b83cac0d8f723475e5f05631 - src/mistralai_gcp/utils/url.py: - id: 116eb5a78ca7 + src/mistralai/gcp/client/utils/unmarshal_json_response.py: + id: d972d22cf934 + last_write_checksum: sha1:a68b9e491188e6c1956a749530eac3c7dc8004e7 + pristine_git_object: 83e8275e59adf51fb01a0579ae26627ee29fee49 + src/mistralai/gcp/client/utils/url.py: + id: 0d311bbcb8f8 last_write_checksum: sha1:6479961baa90432ca25626f8e40a7bbc32e73b41 pristine_git_object: c78ccbae426ce6d385709d97ce0b1c2813ea2418 - src/mistralai_gcp/utils/values.py: - id: 9cc9ee47c951 + src/mistralai/gcp/client/utils/values.py: + id: 328207e9ae81 last_write_checksum: sha1:acaa178a7c41ddd000f58cc691e4632d925b2553 pristine_git_object: dae01a44384ac3bc13ae07453a053bf6c898ebe3 examples: stream_chat: speakeasy-default-stream-chat: requestBody: - application/json: {"model": "mistral-large-latest", "stream": true, "messages": [{"content": "Who is the best French painter? Answer in one short sentence.", "role": "user"}], "response_format": {"type": "text"}} + application/json: {"model": "mistral-large-latest", "stream": true, "messages": [{"role": "user", "content": "Who is the best French painter? Answer in one short sentence."}], "response_format": {"type": "text"}} responses: "422": application/json: {} chat_completion_v1_chat_completions_post: speakeasy-default-chat-completion-v1-chat-completions-post: requestBody: - application/json: {"model": "mistral-large-latest", "stream": false, "messages": [{"content": "Who is the best French painter? Answer in one short sentence.", "role": "user"}], "response_format": {"type": "text"}} + application/json: {"model": "mistral-large-latest", "stream": false, "messages": [{"role": "user", "content": "Who is the best French painter? Answer in one short sentence."}], "response_format": {"type": "text"}} responses: "200": application/json: {"id": "cmpl-e5cc70bb28c444948073e77776eb30ef", "object": "chat.completion", "model": "mistral-small-latest", "usage": {"prompt_tokens": 0, "completion_tokens": 0, "total_tokens": 0}, "created": 1702256327, "choices": []} @@ -661,7 +638,7 @@ examples: application/json: {"model": "codestral-latest", "top_p": 1, "stream": false, "prompt": "def", "suffix": "return a+b"} responses: "200": - application/json: {"id": "447e3e0d457e42e98248b5d2ef52a2a3", "object": "chat.completion", "model": "codestral-2508", "usage": {"prompt_tokens": 8, "completion_tokens": 91, "total_tokens": 99}, "created": 1759496862, "choices": [{"index": 0, "message": {"content": "add_numbers(a: int, b: int) -> int:\n \"\"\"\n You are given two integers `a` and `b`. Your task is to write a function that\n returns the sum of these two integers. The function should be implemented in a\n way that it can handle very large integers (up to 10^18). As a reminder, your\n code has to be in python\n \"\"\"\n", "tool_calls": null, "prefix": false, "role": "assistant"}, "finish_reason": "stop"}]} + application/json: {"id": "447e3e0d457e42e98248b5d2ef52a2a3", "object": "chat.completion", "model": "codestral-2508", "usage": {"prompt_tokens": 8, "completion_tokens": 91, "total_tokens": 99}, "created": 1759496862, "choices": [{"index": 0, "message": {"role": "assistant", "content": "add_numbers(a: int, b: int) -> int:\n \"\"\"\n You are given two integers `a` and `b`. Your task is to write a function that\n returns the sum of these two integers. The function should be implemented in a\n way that it can handle very large integers (up to 10^18). As a reminder, your\n code has to be in python\n \"\"\"\n", "tool_calls": null, "prefix": false}, "finish_reason": "stop"}]} examplesVersion: 1.0.2 generatedTests: {} generatedFiles: diff --git a/packages/mistralai_gcp/.speakeasy/gen.yaml b/packages/gcp/.speakeasy/gen.yaml similarity index 78% rename from packages/mistralai_gcp/.speakeasy/gen.yaml rename to packages/gcp/.speakeasy/gen.yaml index 2aacaa62..93cc5a42 100644 --- a/packages/mistralai_gcp/.speakeasy/gen.yaml +++ b/packages/gcp/.speakeasy/gen.yaml @@ -8,11 +8,13 @@ generation: useClassNamesForArrayFields: true fixes: nameResolutionDec2023: true - nameResolutionFeb2025: false + nameResolutionFeb2025: true parameterOrderingFeb2024: true requestResponseComponentNamesFeb2024: true - securityFeb2025: false - sharedErrorComponentsApr2025: false + securityFeb2025: true + sharedErrorComponentsApr2025: true + methodSignaturesApr2024: true + sharedNestedComponentsJan2026: true auth: oAuth2ClientCredentialsEnabled: true oAuth2PasswordEnabled: false @@ -26,21 +28,18 @@ generation: generateNewTests: false skipResponseBodyAssertions: false python: - version: 1.8.0 + version: 2.0.0a4 additionalDependencies: dev: pytest: ^8.2.2 pytest-asyncio: ^0.23.7 - main: - google-auth: ^2.31.0 - requests: ^2.32.3 allowedRedefinedBuiltins: - id - object asyncMode: both authors: - Mistral - baseErrorName: MistralGcpError + baseErrorName: MistralGCPError clientServerStatusCodesAsErrors: true constFieldCasing: upper defaultErrorName: SDKError @@ -48,10 +47,13 @@ python: enableCustomCodeRegions: false enumFormat: union fixFlags: - responseRequiredSep2024: false + responseRequiredSep2024: true + flatAdditionalProperties: true flattenGlobalSecurity: true flattenRequests: true flatteningOrder: parameters-first + forwardCompatibleEnumsByDefault: true + forwardCompatibleUnionsByDefault: tagged-only imports: option: openapi paths: @@ -65,12 +67,12 @@ python: license: "" maxMethodParams: 15 methodArguments: infer-optional-args - moduleName: "" + moduleName: mistralai.gcp.client multipartArrayFormat: legacy outputModelSuffix: output packageManager: uv packageName: mistralai-gcp - preApplyUnionDiscriminators: false + preApplyUnionDiscriminators: true pytestFilterWarnings: [] pytestTimeout: 0 responseFormat: flat diff --git a/packages/mistralai_gcp/CONTRIBUTING.md b/packages/gcp/CONTRIBUTING.md similarity index 100% rename from packages/mistralai_gcp/CONTRIBUTING.md rename to packages/gcp/CONTRIBUTING.md diff --git a/packages/mistralai_gcp/README.md b/packages/gcp/README.md similarity index 70% rename from packages/mistralai_gcp/README.md rename to packages/gcp/README.md index a4233244..5b66766b 100644 --- a/packages/mistralai_gcp/README.md +++ b/packages/gcp/README.md @@ -26,47 +26,58 @@ pip install mistralai[gcp] This example shows how to create chat completions. +The SDK automatically: +- Detects credentials via `google.auth.default()` +- Auto-refreshes tokens when they expire +- Builds the Vertex AI URL from `project_id` and `region` + ```python # Synchronous Example -from mistralai_gcp import MistralGCP import os -) +from mistralai.gcp.client import MistralGCP +# The SDK auto-detects credentials and builds the Vertex AI URL +s = MistralGCP( + project_id=os.environ.get("GCP_PROJECT_ID"), # Optional: auto-detected from credentials + region=os.environ.get("GCP_REGION", "us-central1"), +) res = s.chat.complete(messages=[ { - "content": "Who is the best French painter? Answer in one short sentence.", "role": "user", + "content": "Who is the best French painter? Answer in one short sentence.", }, -], model="mistral-small-latest") +], model="mistral-small-2503") if res is not None: # handle response - pass + print(res.choices[0].message.content) ```
-The same SDK client can also be used to make asychronous requests by importing asyncio. +The same SDK client can also be used to make asynchronous requests by importing asyncio. ```python # Asynchronous Example import asyncio -from mistralai_gcp import MistralGCP import os +from mistralai.gcp.client import MistralGCP async def main(): + # The SDK auto-detects credentials and builds the Vertex AI URL s = MistralGCP( - api_key=os.getenv("API_KEY", ""), + project_id=os.environ.get("GCP_PROJECT_ID"), # Optional: auto-detected + region=os.environ.get("GCP_REGION", "us-central1"), ) res = await s.chat.complete_async(messages=[ { - "content": "Who is the best French painter? Answer in one short sentence.", "role": "user", + "content": "Who is the best French painter? Answer in one short sentence.", }, - ], model="mistral-small-latest") + ], model="mistral-small-2503") if res is not None: # handle response - pass + print(res.choices[0].message.content) asyncio.run(main()) ``` @@ -78,12 +89,12 @@ asyncio.run(main()) ### [chat](docs/sdks/chat/README.md) * [stream](docs/sdks/chat/README.md#stream) - Stream chat completion -* [create](docs/sdks/chat/README.md#create) - Chat Completion +* [complete](docs/sdks/chat/README.md#complete) - Chat Completion ### [fim](docs/sdks/fim/README.md) * [stream](docs/sdks/fim/README.md#stream) - Stream fim completion -* [create](docs/sdks/fim/README.md#create) - Fim Completion +* [complete](docs/sdks/fim/README.md#complete) - Fim Completion @@ -96,18 +107,21 @@ terminate when the server no longer has any events to send and closes the underlying connection. ```python -from mistralai_gcp import MistralGCP import os +from mistralai.gcp.client import MistralGCP -s = MistralGCP() - +# The SDK auto-detects credentials and builds the Vertex AI URL +s = MistralGCP( + project_id=os.environ.get("GCP_PROJECT_ID"), # Optional: auto-detected + region=os.environ.get("GCP_REGION", "us-central1"), +) res = s.chat.stream(messages=[ { - "content": "Who is the best French painter? Answer in one short sentence.", "role": "user", + "content": "Who is the best French painter? Answer in one short sentence.", }, -], model="mistral-small-latest") +], model="mistral-small-2503") if res is not None: for event in res: @@ -127,21 +141,24 @@ Some of the endpoints in this SDK support retries. If you use the SDK without an To change the default retry strategy for a single API call, simply provide a `RetryConfig` object to the call: ```python -from mistralai_gcp import MistralGCP -from mistralgcp.utils import BackoffStrategy, RetryConfig import os +from mistralai.gcp.client import MistralGCP +from mistralai.gcp.client.utils import BackoffStrategy, RetryConfig -s = MistralGCP() - +# The SDK auto-detects credentials and builds the Vertex AI URL +s = MistralGCP( + project_id=os.environ.get("GCP_PROJECT_ID"), # Optional: auto-detected + region=os.environ.get("GCP_REGION", "us-central1"), +) res = s.chat.stream( messages=[ { - "content": "Who is the best French painter? Answer in one short sentence.", "role": "user", + "content": "Who is the best French painter? Answer in one short sentence.", }, - ], - model="mistral-small-latest", + ], + model="mistral-small-2503", retries=RetryConfig( "backoff", BackoffStrategy(1, 50, 1.1, 100), @@ -158,23 +175,25 @@ if res is not None: If you'd like to override the default retry strategy for all operations that support retries, you can use the `retry_config` optional parameter when initializing the SDK: ```python -from mistralai_gcp import MistralGCP -from mistralgcp.utils import BackoffStrategy, RetryConfig import os +from mistralai.gcp.client import MistralGCP +from mistralai.gcp.client.utils import BackoffStrategy, RetryConfig +# The SDK auto-detects credentials and builds the Vertex AI URL s = MistralGCP( + project_id=os.environ.get("GCP_PROJECT_ID"), + region=os.environ.get("GCP_REGION", "us-central1"), retry_config=RetryConfig("backoff", BackoffStrategy(1, 50, 1.1, 100), False), ) - res = s.chat.stream( messages=[ { - "content": "Who is the best French painter? Answer in one short sentence.", "role": "user", + "content": "Who is the best French painter? Answer in one short sentence.", }, ], - model="mistral-small-latest" + model="mistral-small-2503", ) if res is not None: @@ -188,7 +207,7 @@ if res is not None: ## Error Handling -Handling errors in this SDK should largely match your expectations. All operations return a response object or raise an error. If Error objects are specified in your OpenAPI Spec, the SDK will raise the appropriate Error type. +Handling errors in this SDK should largely match your expectations. All operations return a response object or raise an error. If Error objects are specified in your OpenAPI Spec, the SDK will raise the appropriate Error type. | Error Object | Status Code | Content Type | | -------------------------- | ----------- | ---------------- | @@ -198,21 +217,26 @@ Handling errors in this SDK should largely match your expectations. All operati ### Example ```python -from mistralai_gcp import MistralGCP, models import os +from mistralai.gcp.client import MistralGCP +from mistralai.gcp.client import models -s = MistralGCP() +# The SDK auto-detects credentials and builds the Vertex AI URL +s = MistralGCP( + project_id=os.environ.get("GCP_PROJECT_ID"), + region=os.environ.get("GCP_REGION", "us-central1"), +) res = None try: res = s.chat.complete( messages=[ { - "content": "Who is the best French painter? Answer in one short sentence.", "role": "user", + "content": "Who is the best French painter? Answer in one short sentence.", }, ], - model="mistral-small-latest" + model="mistral-small-2503", ) except models.HTTPValidationError as e: @@ -232,61 +256,27 @@ if res is not None: ## Server Selection -### Select Server by Name - -You can override the default server globally by passing a server name to the `server: str` optional parameter when initializing the SDK client instance. The selected server will then be used as the default on the operations that use it. This table lists the names associated with the available servers: - -| Name | Server | Variables | -| ------ | ------------------------ | --------- | -| `prod` | `https://api.mistral.ai` | None | - -#### Example - -```python -from mistralai_gcp import MistralGCP -import os - -s = MistralGCP(server="prod") - - -res = s.chat.stream( - messages=[ - { - "content": "Who is the best French painter? Answer in one short sentence.", - "role": "user", - }, - ], - model="mistral-small-latest" -) - -if res is not None: - for event in res: - # handle event - print(event) - -``` - - ### Override Server URL Per-Client -The default server can also be overridden globally by passing a URL to the `server_url: str` optional parameter when initializing the SDK client instance. For example: +The SDK automatically constructs the Vertex AI endpoint from `project_id` and `region`: ```python -from mistralai_gcp import MistralGCP import os +from mistralai.gcp.client import MistralGCP +# The SDK auto-detects credentials and builds the Vertex AI URL s = MistralGCP( - server_url="https://api.mistral.ai", + project_id=os.environ.get("GCP_PROJECT_ID"), # Optional: auto-detected + region=os.environ.get("GCP_REGION", "us-central1"), ) - res = s.chat.stream( messages=[ { - "content": "Who is the best French painter? Answer in one short sentence.", "role": "user", + "content": "Who is the best French painter? Answer in one short sentence.", }, - ], - model="mistral-small-latest" + ], + model="mistral-small-2503", ) if res is not None: @@ -306,17 +296,23 @@ This allows you to wrap the client with your own custom logic, such as adding cu For example, you could specify a header for every request that this sdk makes as follows: ```python -from mistralai_gcp import MistralGCP +import os +from mistralai.gcp.client import MistralGCP import httpx http_client = httpx.Client(headers={"x-custom-header": "someValue"}) -s = MistralGCP(client=http_client) +s = MistralGCP( + project_id=os.environ.get("GCP_PROJECT_ID"), + region="us-central1", + client=http_client, +) ``` or you could wrap the client with your own custom logic: ```python -from mistralai_gcp import MistralGCP -from mistralai_gcp.httpclient import AsyncHttpClient +from typing import Any, Optional, Union +from mistralai.gcp.client import MistralGCP +from mistralai.gcp.client.httpclient import AsyncHttpClient import httpx class CustomClient(AsyncHttpClient): @@ -374,7 +370,11 @@ class CustomClient(AsyncHttpClient): extensions=extensions, ) -s = MistralGCP(async_client=CustomClient(httpx.AsyncClient())) +s = MistralGCP( + project_id="", + region="us-central1", + async_client=CustomClient(httpx.AsyncClient()), +) ``` @@ -389,22 +389,25 @@ This SDK supports the following security scheme globally: | --------- | ---- | ----------- | | `api_key` | http | HTTP Bearer | -To authenticate with the API the `api_key` parameter must be set when initializing the SDK client instance. For example: +The SDK automatically handles GCP authentication via `google.auth.default()`. Tokens are auto-refreshed when they expire. For example: ```python -from mistralai_gcp import MistralGCP import os +from mistralai.gcp.client import MistralGCP -s = MistralGCP() - +# The SDK auto-detects credentials and builds the Vertex AI URL +s = MistralGCP( + project_id=os.environ.get("GCP_PROJECT_ID"), # Optional: auto-detected + region=os.environ.get("GCP_REGION", "us-central1"), +) res = s.chat.stream( messages=[ { - "content": "Who is the best French painter? Answer in one short sentence.", "role": "user", + "content": "Who is the best French painter? Answer in one short sentence.", }, - ], - model="mistral-small-latest" + ], + model="mistral-small-2503", ) if res is not None: @@ -421,5 +424,5 @@ if res is not None: ## Contributions -While we value open-source contributions to this SDK, this library is generated programmatically. Any manual changes added to internal files will be overwritten on the next generation. -We look forward to hearing your feedback. Feel free to open a PR or an issue with a proof of concept and we'll do our best to include it in a future release. +While we value open-source contributions to this SDK, this library is generated programmatically. Any manual changes added to internal files will be overwritten on the next generation. +We look forward to hearing your feedback. Feel free to open a PR or an issue with a proof of concept and we'll do our best to include it in a future release. diff --git a/packages/mistralai_gcp/RELEASES.md b/packages/gcp/RELEASES.md similarity index 100% rename from packages/mistralai_gcp/RELEASES.md rename to packages/gcp/RELEASES.md diff --git a/packages/gcp/USAGE.md b/packages/gcp/USAGE.md new file mode 100644 index 00000000..3156349d --- /dev/null +++ b/packages/gcp/USAGE.md @@ -0,0 +1,61 @@ + +### Create Chat Completions + +This example shows how to create chat completions. + +The SDK automatically: +- Detects credentials via `google.auth.default()` +- Auto-refreshes tokens when they expire +- Builds the Vertex AI URL from `project_id` and `region` + +```python +# Synchronous Example +import os +from mistralai.gcp.client import MistralGCP + +# The SDK auto-detects credentials and builds the Vertex AI URL +s = MistralGCP( + project_id=os.environ.get("GCP_PROJECT_ID"), # Optional: auto-detected from credentials + region=os.environ.get("GCP_REGION", "us-central1"), +) + +res = s.chat.complete(messages=[ + { + "role": "user", + "content": "Who is the best French painter? Answer in one short sentence.", + }, +], model="mistral-small-2503") + +if res is not None: + # handle response + print(res.choices[0].message.content) +``` + +
+ +The same SDK client can also be used to make asynchronous requests by importing asyncio. +```python +# Asynchronous Example +import asyncio +import os +from mistralai.gcp.client import MistralGCP + +async def main(): + # The SDK auto-detects credentials and builds the Vertex AI URL + s = MistralGCP( + project_id=os.environ.get("GCP_PROJECT_ID"), # Optional: auto-detected + region=os.environ.get("GCP_REGION", "us-central1"), + ) + res = await s.chat.complete_async(messages=[ + { + "role": "user", + "content": "Who is the best French painter? Answer in one short sentence.", + }, + ], model="mistral-small-2503") + if res is not None: + # handle response + print(res.choices[0].message.content) + +asyncio.run(main()) +``` + diff --git a/packages/mistralai_gcp/docs/models/arguments.md b/packages/gcp/docs/models/arguments.md similarity index 100% rename from packages/mistralai_gcp/docs/models/arguments.md rename to packages/gcp/docs/models/arguments.md diff --git a/packages/mistralai_gcp/docs/models/assistantmessage.md b/packages/gcp/docs/models/assistantmessage.md similarity index 95% rename from packages/mistralai_gcp/docs/models/assistantmessage.md rename to packages/gcp/docs/models/assistantmessage.md index 3d0bd90b..9ef63837 100644 --- a/packages/mistralai_gcp/docs/models/assistantmessage.md +++ b/packages/gcp/docs/models/assistantmessage.md @@ -5,7 +5,7 @@ | Field | Type | Required | Description | | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| `role` | *Optional[Literal["assistant"]]* | :heavy_minus_sign: | N/A | | `content` | [OptionalNullable[models.AssistantMessageContent]](../models/assistantmessagecontent.md) | :heavy_minus_sign: | N/A | | `tool_calls` | List[[models.ToolCall](../models/toolcall.md)] | :heavy_minus_sign: | N/A | -| `prefix` | *Optional[bool]* | :heavy_minus_sign: | Set this to `true` when adding an assistant message as prefix to condition the model response. The role of the prefix message is to force the model to start its answer by the content of the message. | -| `role` | [Optional[models.AssistantMessageRole]](../models/assistantmessagerole.md) | :heavy_minus_sign: | N/A | \ No newline at end of file +| `prefix` | *Optional[bool]* | :heavy_minus_sign: | Set this to `true` when adding an assistant message as prefix to condition the model response. The role of the prefix message is to force the model to start its answer by the content of the message. | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/assistantmessagecontent.md b/packages/gcp/docs/models/assistantmessagecontent.md similarity index 100% rename from packages/mistralai_gcp/docs/models/assistantmessagecontent.md rename to packages/gcp/docs/models/assistantmessagecontent.md diff --git a/packages/mistralai_gcp/docs/models/chatcompletionchoice.md b/packages/gcp/docs/models/chatcompletionchoice.md similarity index 100% rename from packages/mistralai_gcp/docs/models/chatcompletionchoice.md rename to packages/gcp/docs/models/chatcompletionchoice.md diff --git a/packages/mistralai_gcp/docs/models/chatcompletionchoicefinishreason.md b/packages/gcp/docs/models/chatcompletionchoicefinishreason.md similarity index 100% rename from packages/mistralai_gcp/docs/models/chatcompletionchoicefinishreason.md rename to packages/gcp/docs/models/chatcompletionchoicefinishreason.md diff --git a/packages/mistralai_gcp/docs/models/chatcompletionrequest.md b/packages/gcp/docs/models/chatcompletionrequest.md similarity index 99% rename from packages/mistralai_gcp/docs/models/chatcompletionrequest.md rename to packages/gcp/docs/models/chatcompletionrequest.md index 6886f9dc..61a25d86 100644 --- a/packages/mistralai_gcp/docs/models/chatcompletionrequest.md +++ b/packages/gcp/docs/models/chatcompletionrequest.md @@ -13,7 +13,7 @@ | `stop` | [Optional[models.ChatCompletionRequestStop]](../models/chatcompletionrequeststop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | | `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | | `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | | -| `messages` | List[[models.ChatCompletionRequestMessages](../models/chatcompletionrequestmessages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | +| `messages` | List[[models.ChatCompletionRequestMessage](../models/chatcompletionrequestmessage.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | | `response_format` | [Optional[models.ResponseFormat]](../models/responseformat.md) | :heavy_minus_sign: | Specify the format that the model must output. By default it will use `{ "type": "text" }`. Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ "type": "json_schema" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide. | {
"type": "text"
} | | `tools` | List[[models.Tool](../models/tool.md)] | :heavy_minus_sign: | A list of tools the model may call. Use this to provide a list of functions the model may generate JSON inputs for. | | | `tool_choice` | [Optional[models.ChatCompletionRequestToolChoice]](../models/chatcompletionrequesttoolchoice.md) | :heavy_minus_sign: | Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{"type": "function", "function": {"name": "my_function"}}` forces the model to call that tool. | | diff --git a/packages/mistralai_gcp/docs/models/messages.md b/packages/gcp/docs/models/chatcompletionrequestmessage.md similarity index 92% rename from packages/mistralai_gcp/docs/models/messages.md rename to packages/gcp/docs/models/chatcompletionrequestmessage.md index 1d394500..91e9e062 100644 --- a/packages/mistralai_gcp/docs/models/messages.md +++ b/packages/gcp/docs/models/chatcompletionrequestmessage.md @@ -1,4 +1,4 @@ -# Messages +# ChatCompletionRequestMessage ## Supported Types diff --git a/packages/mistralai_gcp/docs/models/chatcompletionrequeststop.md b/packages/gcp/docs/models/chatcompletionrequeststop.md similarity index 100% rename from packages/mistralai_gcp/docs/models/chatcompletionrequeststop.md rename to packages/gcp/docs/models/chatcompletionrequeststop.md diff --git a/packages/mistralai_gcp/docs/models/chatcompletionrequesttoolchoice.md b/packages/gcp/docs/models/chatcompletionrequesttoolchoice.md similarity index 100% rename from packages/mistralai_gcp/docs/models/chatcompletionrequesttoolchoice.md rename to packages/gcp/docs/models/chatcompletionrequesttoolchoice.md diff --git a/packages/mistralai_gcp/docs/models/chatcompletionresponse.md b/packages/gcp/docs/models/chatcompletionresponse.md similarity index 100% rename from packages/mistralai_gcp/docs/models/chatcompletionresponse.md rename to packages/gcp/docs/models/chatcompletionresponse.md diff --git a/packages/mistralai_gcp/docs/models/chatcompletionstreamrequest.md b/packages/gcp/docs/models/chatcompletionstreamrequest.md similarity index 99% rename from packages/mistralai_gcp/docs/models/chatcompletionstreamrequest.md rename to packages/gcp/docs/models/chatcompletionstreamrequest.md index ff1940dd..3e790e7d 100644 --- a/packages/mistralai_gcp/docs/models/chatcompletionstreamrequest.md +++ b/packages/gcp/docs/models/chatcompletionstreamrequest.md @@ -10,10 +10,10 @@ | `top_p` | *Optional[float]* | :heavy_minus_sign: | Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | | | `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. | | | `stream` | *Optional[bool]* | :heavy_minus_sign: | N/A | | -| `stop` | [Optional[models.Stop]](../models/stop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | +| `stop` | [Optional[models.ChatCompletionStreamRequestStop]](../models/chatcompletionstreamrequeststop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | | `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | | `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | | -| `messages` | List[[models.Messages](../models/messages.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | +| `messages` | List[[models.ChatCompletionStreamRequestMessage](../models/chatcompletionstreamrequestmessage.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | | `response_format` | [Optional[models.ResponseFormat]](../models/responseformat.md) | :heavy_minus_sign: | Specify the format that the model must output. By default it will use `{ "type": "text" }`. Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ "type": "json_schema" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide. | {
"type": "text"
} | | `tools` | List[[models.Tool](../models/tool.md)] | :heavy_minus_sign: | A list of tools the model may call. Use this to provide a list of functions the model may generate JSON inputs for. | | | `tool_choice` | [Optional[models.ChatCompletionStreamRequestToolChoice]](../models/chatcompletionstreamrequesttoolchoice.md) | :heavy_minus_sign: | Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{"type": "function", "function": {"name": "my_function"}}` forces the model to call that tool. | | diff --git a/packages/mistralai_azure/docs/models/chatcompletionrequestmessages.md b/packages/gcp/docs/models/chatcompletionstreamrequestmessage.md similarity index 91% rename from packages/mistralai_azure/docs/models/chatcompletionrequestmessages.md rename to packages/gcp/docs/models/chatcompletionstreamrequestmessage.md index bc7708a6..2e4e93ac 100644 --- a/packages/mistralai_azure/docs/models/chatcompletionrequestmessages.md +++ b/packages/gcp/docs/models/chatcompletionstreamrequestmessage.md @@ -1,4 +1,4 @@ -# ChatCompletionRequestMessages +# ChatCompletionStreamRequestMessage ## Supported Types diff --git a/packages/mistralai_azure/docs/models/stop.md b/packages/gcp/docs/models/chatcompletionstreamrequeststop.md similarity index 88% rename from packages/mistralai_azure/docs/models/stop.md rename to packages/gcp/docs/models/chatcompletionstreamrequeststop.md index ba40ca83..a48460a9 100644 --- a/packages/mistralai_azure/docs/models/stop.md +++ b/packages/gcp/docs/models/chatcompletionstreamrequeststop.md @@ -1,4 +1,4 @@ -# Stop +# ChatCompletionStreamRequestStop Stop generation if this token is detected. Or if one of these tokens is detected when providing an array diff --git a/packages/mistralai_gcp/docs/models/chatcompletionstreamrequesttoolchoice.md b/packages/gcp/docs/models/chatcompletionstreamrequesttoolchoice.md similarity index 100% rename from packages/mistralai_gcp/docs/models/chatcompletionstreamrequesttoolchoice.md rename to packages/gcp/docs/models/chatcompletionstreamrequesttoolchoice.md diff --git a/packages/mistralai_gcp/docs/models/completionchunk.md b/packages/gcp/docs/models/completionchunk.md similarity index 100% rename from packages/mistralai_gcp/docs/models/completionchunk.md rename to packages/gcp/docs/models/completionchunk.md diff --git a/packages/mistralai_gcp/docs/models/completionevent.md b/packages/gcp/docs/models/completionevent.md similarity index 100% rename from packages/mistralai_gcp/docs/models/completionevent.md rename to packages/gcp/docs/models/completionevent.md diff --git a/packages/gcp/docs/models/completionresponsestreamchoice.md b/packages/gcp/docs/models/completionresponsestreamchoice.md new file mode 100644 index 00000000..1532c25b --- /dev/null +++ b/packages/gcp/docs/models/completionresponsestreamchoice.md @@ -0,0 +1,10 @@ +# CompletionResponseStreamChoice + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------- | +| `index` | *int* | :heavy_check_mark: | N/A | +| `delta` | [models.DeltaMessage](../models/deltamessage.md) | :heavy_check_mark: | N/A | +| `finish_reason` | [Nullable[models.CompletionResponseStreamChoiceFinishReason]](../models/completionresponsestreamchoicefinishreason.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/finishreason.md b/packages/gcp/docs/models/completionresponsestreamchoicefinishreason.md similarity index 81% rename from packages/mistralai_azure/docs/models/finishreason.md rename to packages/gcp/docs/models/completionresponsestreamchoicefinishreason.md index 45a5aedb..0fece473 100644 --- a/packages/mistralai_azure/docs/models/finishreason.md +++ b/packages/gcp/docs/models/completionresponsestreamchoicefinishreason.md @@ -1,4 +1,4 @@ -# FinishReason +# CompletionResponseStreamChoiceFinishReason ## Values diff --git a/packages/mistralai_gcp/docs/models/contentchunk.md b/packages/gcp/docs/models/contentchunk.md similarity index 100% rename from packages/mistralai_gcp/docs/models/contentchunk.md rename to packages/gcp/docs/models/contentchunk.md diff --git a/packages/gcp/docs/models/deltamessage.md b/packages/gcp/docs/models/deltamessage.md new file mode 100644 index 00000000..e0ee575f --- /dev/null +++ b/packages/gcp/docs/models/deltamessage.md @@ -0,0 +1,10 @@ +# DeltaMessage + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | +| `role` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `content` | [OptionalNullable[models.DeltaMessageContent]](../models/deltamessagecontent.md) | :heavy_minus_sign: | N/A | +| `tool_calls` | List[[models.ToolCall](../models/toolcall.md)] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/content.md b/packages/gcp/docs/models/deltamessagecontent.md similarity index 89% rename from packages/mistralai_azure/docs/models/content.md rename to packages/gcp/docs/models/deltamessagecontent.md index a833dc2c..8142772d 100644 --- a/packages/mistralai_azure/docs/models/content.md +++ b/packages/gcp/docs/models/deltamessagecontent.md @@ -1,4 +1,4 @@ -# Content +# DeltaMessageContent ## Supported Types diff --git a/packages/mistralai_gcp/docs/models/fimcompletionrequest.md b/packages/gcp/docs/models/fimcompletionrequest.md similarity index 100% rename from packages/mistralai_gcp/docs/models/fimcompletionrequest.md rename to packages/gcp/docs/models/fimcompletionrequest.md diff --git a/packages/mistralai_gcp/docs/models/fimcompletionrequeststop.md b/packages/gcp/docs/models/fimcompletionrequeststop.md similarity index 100% rename from packages/mistralai_gcp/docs/models/fimcompletionrequeststop.md rename to packages/gcp/docs/models/fimcompletionrequeststop.md diff --git a/packages/mistralai_gcp/docs/models/fimcompletionresponse.md b/packages/gcp/docs/models/fimcompletionresponse.md similarity index 100% rename from packages/mistralai_gcp/docs/models/fimcompletionresponse.md rename to packages/gcp/docs/models/fimcompletionresponse.md diff --git a/packages/mistralai_gcp/docs/models/fimcompletionstreamrequest.md b/packages/gcp/docs/models/fimcompletionstreamrequest.md similarity index 100% rename from packages/mistralai_gcp/docs/models/fimcompletionstreamrequest.md rename to packages/gcp/docs/models/fimcompletionstreamrequest.md diff --git a/packages/mistralai_gcp/docs/models/fimcompletionstreamrequeststop.md b/packages/gcp/docs/models/fimcompletionstreamrequeststop.md similarity index 100% rename from packages/mistralai_gcp/docs/models/fimcompletionstreamrequeststop.md rename to packages/gcp/docs/models/fimcompletionstreamrequeststop.md diff --git a/packages/mistralai_gcp/docs/models/function.md b/packages/gcp/docs/models/function.md similarity index 100% rename from packages/mistralai_gcp/docs/models/function.md rename to packages/gcp/docs/models/function.md diff --git a/packages/mistralai_gcp/docs/models/functioncall.md b/packages/gcp/docs/models/functioncall.md similarity index 100% rename from packages/mistralai_gcp/docs/models/functioncall.md rename to packages/gcp/docs/models/functioncall.md diff --git a/packages/mistralai_gcp/docs/models/functionname.md b/packages/gcp/docs/models/functionname.md similarity index 100% rename from packages/mistralai_gcp/docs/models/functionname.md rename to packages/gcp/docs/models/functionname.md diff --git a/packages/mistralai_gcp/docs/models/httpvalidationerror.md b/packages/gcp/docs/models/httpvalidationerror.md similarity index 100% rename from packages/mistralai_gcp/docs/models/httpvalidationerror.md rename to packages/gcp/docs/models/httpvalidationerror.md diff --git a/packages/gcp/docs/models/imagedetail.md b/packages/gcp/docs/models/imagedetail.md new file mode 100644 index 00000000..1e5ba3fd --- /dev/null +++ b/packages/gcp/docs/models/imagedetail.md @@ -0,0 +1,10 @@ +# ImageDetail + + +## Values + +| Name | Value | +| ------ | ------ | +| `LOW` | low | +| `AUTO` | auto | +| `HIGH` | high | \ No newline at end of file diff --git a/packages/gcp/docs/models/imageurl.md b/packages/gcp/docs/models/imageurl.md new file mode 100644 index 00000000..6358e0ac --- /dev/null +++ b/packages/gcp/docs/models/imageurl.md @@ -0,0 +1,9 @@ +# ImageURL + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------------------- | ---------------------------------------------------------------- | ---------------------------------------------------------------- | ---------------------------------------------------------------- | +| `url` | *str* | :heavy_check_mark: | N/A | +| `detail` | [OptionalNullable[models.ImageDetail]](../models/imagedetail.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/gcp/docs/models/imageurlchunk.md b/packages/gcp/docs/models/imageurlchunk.md new file mode 100644 index 00000000..a84dac32 --- /dev/null +++ b/packages/gcp/docs/models/imageurlchunk.md @@ -0,0 +1,11 @@ +# ImageURLChunk + +{"type":"image_url","image_url":{"url":"data:image/png;base64,iVBORw0 + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------------- | -------------------------------------------------- | -------------------------------------------------- | -------------------------------------------------- | +| `type` | *Literal["image_url"]* | :heavy_check_mark: | N/A | +| `image_url` | [models.ImageURLUnion](../models/imageurlunion.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/imageurlchunkimageurl.md b/packages/gcp/docs/models/imageurlunion.md similarity index 86% rename from packages/mistralai_azure/docs/models/imageurlchunkimageurl.md rename to packages/gcp/docs/models/imageurlunion.md index 76738908..db97130f 100644 --- a/packages/mistralai_azure/docs/models/imageurlchunkimageurl.md +++ b/packages/gcp/docs/models/imageurlunion.md @@ -1,4 +1,4 @@ -# ImageURLChunkImageURL +# ImageURLUnion ## Supported Types diff --git a/packages/mistralai_gcp/docs/models/jsonschema.md b/packages/gcp/docs/models/jsonschema.md similarity index 100% rename from packages/mistralai_gcp/docs/models/jsonschema.md rename to packages/gcp/docs/models/jsonschema.md diff --git a/packages/mistralai_gcp/docs/models/loc.md b/packages/gcp/docs/models/loc.md similarity index 100% rename from packages/mistralai_gcp/docs/models/loc.md rename to packages/gcp/docs/models/loc.md diff --git a/packages/mistralai_gcp/docs/models/mistralpromptmode.md b/packages/gcp/docs/models/mistralpromptmode.md similarity index 100% rename from packages/mistralai_gcp/docs/models/mistralpromptmode.md rename to packages/gcp/docs/models/mistralpromptmode.md diff --git a/packages/mistralai_gcp/docs/models/prediction.md b/packages/gcp/docs/models/prediction.md similarity index 100% rename from packages/mistralai_gcp/docs/models/prediction.md rename to packages/gcp/docs/models/prediction.md diff --git a/packages/gcp/docs/models/referencechunk.md b/packages/gcp/docs/models/referencechunk.md new file mode 100644 index 00000000..d847e248 --- /dev/null +++ b/packages/gcp/docs/models/referencechunk.md @@ -0,0 +1,9 @@ +# ReferenceChunk + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------- | -------------------------------- | -------------------------------- | -------------------------------- | +| `type` | *Optional[Literal["reference"]]* | :heavy_minus_sign: | N/A | +| `reference_ids` | List[*int*] | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/responseformat.md b/packages/gcp/docs/models/responseformat.md similarity index 100% rename from packages/mistralai_gcp/docs/models/responseformat.md rename to packages/gcp/docs/models/responseformat.md diff --git a/packages/mistralai_gcp/docs/models/responseformats.md b/packages/gcp/docs/models/responseformats.md similarity index 100% rename from packages/mistralai_gcp/docs/models/responseformats.md rename to packages/gcp/docs/models/responseformats.md diff --git a/packages/mistralai_gcp/docs/models/security.md b/packages/gcp/docs/models/security.md similarity index 100% rename from packages/mistralai_gcp/docs/models/security.md rename to packages/gcp/docs/models/security.md diff --git a/packages/mistralai_azure/docs/models/systemmessage.md b/packages/gcp/docs/models/systemmessage.md similarity index 88% rename from packages/mistralai_azure/docs/models/systemmessage.md rename to packages/gcp/docs/models/systemmessage.md index 0dba71c0..10bda10f 100644 --- a/packages/mistralai_azure/docs/models/systemmessage.md +++ b/packages/gcp/docs/models/systemmessage.md @@ -5,5 +5,5 @@ | Field | Type | Required | Description | | ---------------------------------------------------------------- | ---------------------------------------------------------------- | ---------------------------------------------------------------- | ---------------------------------------------------------------- | -| `content` | [models.SystemMessageContent](../models/systemmessagecontent.md) | :heavy_check_mark: | N/A | -| `role` | [Optional[models.Role]](../models/role.md) | :heavy_minus_sign: | N/A | \ No newline at end of file +| `role` | *Literal["system"]* | :heavy_check_mark: | N/A | +| `content` | [models.SystemMessageContent](../models/systemmessagecontent.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/systemmessagecontent.md b/packages/gcp/docs/models/systemmessagecontent.md similarity index 100% rename from packages/mistralai_gcp/docs/models/systemmessagecontent.md rename to packages/gcp/docs/models/systemmessagecontent.md diff --git a/packages/mistralai_gcp/docs/models/systemmessagecontentchunks.md b/packages/gcp/docs/models/systemmessagecontentchunks.md similarity index 100% rename from packages/mistralai_gcp/docs/models/systemmessagecontentchunks.md rename to packages/gcp/docs/models/systemmessagecontentchunks.md diff --git a/packages/gcp/docs/models/textchunk.md b/packages/gcp/docs/models/textchunk.md new file mode 100644 index 00000000..b266619d --- /dev/null +++ b/packages/gcp/docs/models/textchunk.md @@ -0,0 +1,9 @@ +# TextChunk + + +## Fields + +| Field | Type | Required | Description | +| ------------------ | ------------------ | ------------------ | ------------------ | +| `type` | *Literal["text"]* | :heavy_check_mark: | N/A | +| `text` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/thinkchunk.md b/packages/gcp/docs/models/thinkchunk.md similarity index 91% rename from packages/mistralai_gcp/docs/models/thinkchunk.md rename to packages/gcp/docs/models/thinkchunk.md index 66b2e0cd..b07f598e 100644 --- a/packages/mistralai_gcp/docs/models/thinkchunk.md +++ b/packages/gcp/docs/models/thinkchunk.md @@ -5,6 +5,6 @@ | Field | Type | Required | Description | | ------------------------------------------------------------------------------- | ------------------------------------------------------------------------------- | ------------------------------------------------------------------------------- | ------------------------------------------------------------------------------- | +| `type` | *Literal["thinking"]* | :heavy_check_mark: | N/A | | `thinking` | List[[models.Thinking](../models/thinking.md)] | :heavy_check_mark: | N/A | -| `closed` | *Optional[bool]* | :heavy_minus_sign: | Whether the thinking chunk is closed or not. Currently only used for prefixing. | -| `type` | [Optional[models.ThinkChunkType]](../models/thinkchunktype.md) | :heavy_minus_sign: | N/A | \ No newline at end of file +| `closed` | *Optional[bool]* | :heavy_minus_sign: | Whether the thinking chunk is closed or not. Currently only used for prefixing. | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/thinking.md b/packages/gcp/docs/models/thinking.md similarity index 100% rename from packages/mistralai_gcp/docs/models/thinking.md rename to packages/gcp/docs/models/thinking.md diff --git a/packages/mistralai_gcp/docs/models/tool.md b/packages/gcp/docs/models/tool.md similarity index 100% rename from packages/mistralai_gcp/docs/models/tool.md rename to packages/gcp/docs/models/tool.md diff --git a/packages/mistralai_gcp/docs/models/toolcall.md b/packages/gcp/docs/models/toolcall.md similarity index 100% rename from packages/mistralai_gcp/docs/models/toolcall.md rename to packages/gcp/docs/models/toolcall.md diff --git a/packages/mistralai_gcp/docs/models/toolchoice.md b/packages/gcp/docs/models/toolchoice.md similarity index 100% rename from packages/mistralai_gcp/docs/models/toolchoice.md rename to packages/gcp/docs/models/toolchoice.md diff --git a/packages/mistralai_gcp/docs/models/toolchoiceenum.md b/packages/gcp/docs/models/toolchoiceenum.md similarity index 100% rename from packages/mistralai_gcp/docs/models/toolchoiceenum.md rename to packages/gcp/docs/models/toolchoiceenum.md diff --git a/packages/mistralai_azure/docs/models/toolmessage.md b/packages/gcp/docs/models/toolmessage.md similarity index 92% rename from packages/mistralai_azure/docs/models/toolmessage.md rename to packages/gcp/docs/models/toolmessage.md index a54f4933..7201481e 100644 --- a/packages/mistralai_azure/docs/models/toolmessage.md +++ b/packages/gcp/docs/models/toolmessage.md @@ -5,7 +5,7 @@ | Field | Type | Required | Description | | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | +| `role` | *Literal["tool"]* | :heavy_check_mark: | N/A | | `content` | [Nullable[models.ToolMessageContent]](../models/toolmessagecontent.md) | :heavy_check_mark: | N/A | | `tool_call_id` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `role` | [Optional[models.ToolMessageRole]](../models/toolmessagerole.md) | :heavy_minus_sign: | N/A | \ No newline at end of file +| `name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/toolmessagecontent.md b/packages/gcp/docs/models/toolmessagecontent.md similarity index 100% rename from packages/mistralai_gcp/docs/models/toolmessagecontent.md rename to packages/gcp/docs/models/toolmessagecontent.md diff --git a/packages/mistralai_gcp/docs/models/tooltypes.md b/packages/gcp/docs/models/tooltypes.md similarity index 100% rename from packages/mistralai_gcp/docs/models/tooltypes.md rename to packages/gcp/docs/models/tooltypes.md diff --git a/packages/mistralai_gcp/docs/models/usageinfo.md b/packages/gcp/docs/models/usageinfo.md similarity index 100% rename from packages/mistralai_gcp/docs/models/usageinfo.md rename to packages/gcp/docs/models/usageinfo.md diff --git a/packages/mistralai_azure/docs/models/usermessage.md b/packages/gcp/docs/models/usermessage.md similarity index 89% rename from packages/mistralai_azure/docs/models/usermessage.md rename to packages/gcp/docs/models/usermessage.md index 63b01310..e7a932ed 100644 --- a/packages/mistralai_azure/docs/models/usermessage.md +++ b/packages/gcp/docs/models/usermessage.md @@ -5,5 +5,5 @@ | Field | Type | Required | Description | | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | -| `content` | [Nullable[models.UserMessageContent]](../models/usermessagecontent.md) | :heavy_check_mark: | N/A | -| `role` | [Optional[models.UserMessageRole]](../models/usermessagerole.md) | :heavy_minus_sign: | N/A | \ No newline at end of file +| `role` | *Literal["user"]* | :heavy_check_mark: | N/A | +| `content` | [Nullable[models.UserMessageContent]](../models/usermessagecontent.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/usermessagecontent.md b/packages/gcp/docs/models/usermessagecontent.md similarity index 100% rename from packages/mistralai_gcp/docs/models/usermessagecontent.md rename to packages/gcp/docs/models/usermessagecontent.md diff --git a/packages/mistralai_gcp/docs/models/utils/retryconfig.md b/packages/gcp/docs/models/utils/retryconfig.md similarity index 100% rename from packages/mistralai_gcp/docs/models/utils/retryconfig.md rename to packages/gcp/docs/models/utils/retryconfig.md diff --git a/packages/mistralai_gcp/docs/models/validationerror.md b/packages/gcp/docs/models/validationerror.md similarity index 100% rename from packages/mistralai_gcp/docs/models/validationerror.md rename to packages/gcp/docs/models/validationerror.md diff --git a/packages/mistralai_gcp/docs/sdks/chat/README.md b/packages/gcp/docs/sdks/chat/README.md similarity index 96% rename from packages/mistralai_gcp/docs/sdks/chat/README.md rename to packages/gcp/docs/sdks/chat/README.md index 6f5f1977..a1fdfd9a 100644 --- a/packages/mistralai_gcp/docs/sdks/chat/README.md +++ b/packages/gcp/docs/sdks/chat/README.md @@ -8,7 +8,7 @@ Chat Completion API. ### Available Operations * [stream](#stream) - Stream chat completion -* [create](#create) - Chat Completion +* [complete](#complete) - Chat Completion ## stream @@ -17,18 +17,21 @@ Mistral AI provides the ability to stream responses back to a client in order to ### Example Usage ```python -from mistralai_gcp import MistralGCP import os +from mistralai.gcp.client import MistralGCP -s = MistralGCP() - +# The SDK auto-detects credentials and builds the Vertex AI URL +s = MistralGCP( + project_id=os.environ.get("GCP_PROJECT_ID"), # Optional: auto-detected from credentials + region=os.environ.get("GCP_REGION", "us-central1"), +) res = s.chat.stream(messages=[ { - "content": "Who is the best French painter? Answer in one short sentence.", "role": "user", + "content": "Who is the best French painter? Answer in one short sentence.", }, -], model="mistral-small-latest") +], model="mistral-small-2503") if res is not None: for event in res: @@ -65,29 +68,32 @@ if res is not None: | --------------- | ----------- | ------------ | | models.SDKError | 4xx-5xx | */* | -## create +## complete Chat Completion ### Example Usage ```python -from mistralai_gcp import MistralGCP import os +from mistralai.gcp.client import MistralGCP -s = MistralGCP() - +# The SDK auto-detects credentials and builds the Vertex AI URL +s = MistralGCP( + project_id=os.environ.get("GCP_PROJECT_ID"), # Optional: auto-detected from credentials + region=os.environ.get("GCP_REGION", "us-central1"), +) res = s.chat.complete(messages=[ { - "content": "Who is the best French painter? Answer in one short sentence.", "role": "user", + "content": "Who is the best French painter? Answer in one short sentence.", }, -], model="mistral-small-latest") +], model="mistral-small-2503") if res is not None: # handle response - pass + print(res.choices[0].message.content) ``` diff --git a/packages/mistralai_gcp/docs/sdks/fim/README.md b/packages/gcp/docs/sdks/fim/README.md similarity index 94% rename from packages/mistralai_gcp/docs/sdks/fim/README.md rename to packages/gcp/docs/sdks/fim/README.md index b997fabf..61a28883 100644 --- a/packages/mistralai_gcp/docs/sdks/fim/README.md +++ b/packages/gcp/docs/sdks/fim/README.md @@ -8,7 +8,7 @@ Fill-in-the-middle API. ### Available Operations * [stream](#stream) - Stream fim completion -* [create](#create) - Fim Completion +* [complete](#complete) - Fim Completion ## stream @@ -17,13 +17,16 @@ Mistral AI provides the ability to stream responses back to a client in order to ### Example Usage ```python -from mistralai_gcp import MistralGCP import os +from mistralai.gcp.client import MistralGCP -s = MistralGCP() +# The SDK auto-detects credentials and builds the Vertex AI URL +s = MistralGCP( + project_id=os.environ.get("GCP_PROJECT_ID"), # Optional: auto-detected from credentials + region=os.environ.get("GCP_REGION", "us-central1"), +) - -res = s.fim.stream(prompt="def", model="codestral-2405", suffix="return a+b") +res = s.fim.stream(prompt="def", model="codestral-2", suffix="return a+b") if res is not None: for event in res: @@ -58,24 +61,27 @@ if res is not None: | --------------- | ----------- | ------------ | | models.SDKError | 4xx-5xx | */* | -## create +## complete FIM completion. ### Example Usage ```python -from mistralai_gcp import MistralGCP import os +from mistralai.gcp.client import MistralGCP -s = MistralGCP() - +# The SDK auto-detects credentials and builds the Vertex AI URL +s = MistralGCP( + project_id=os.environ.get("GCP_PROJECT_ID"), # Optional: auto-detected from credentials + region=os.environ.get("GCP_REGION", "us-central1"), +) -res = s.fim.complete(prompt="def", model="codestral-2405", suffix="return a+b") +res = s.fim.complete(prompt="def", model="codestral-2", suffix="return a+b") if res is not None: # handle response - pass + print(res.choices[0].message.content) ``` diff --git a/packages/mistralai_gcp/docs/sdks/mistralgcp/README.md b/packages/gcp/docs/sdks/mistralgcp/README.md similarity index 100% rename from packages/mistralai_gcp/docs/sdks/mistralgcp/README.md rename to packages/gcp/docs/sdks/mistralgcp/README.md diff --git a/packages/mistralai_gcp/py.typed b/packages/gcp/py.typed similarity index 100% rename from packages/mistralai_gcp/py.typed rename to packages/gcp/py.typed diff --git a/packages/mistralai_gcp/pylintrc b/packages/gcp/pylintrc similarity index 100% rename from packages/mistralai_gcp/pylintrc rename to packages/gcp/pylintrc diff --git a/packages/mistralai_gcp/pyproject.toml b/packages/gcp/pyproject.toml similarity index 78% rename from packages/mistralai_gcp/pyproject.toml rename to packages/gcp/pyproject.toml index df3e43ae..98619ecd 100644 --- a/packages/mistralai_gcp/pyproject.toml +++ b/packages/gcp/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "mistralai-gcp" -version = "1.8.0" +version = "2.0.0a4" description = "Python Client SDK for the Mistral AI API in GCP." authors = [{ name = "Mistral" }] requires-python = ">=3.10" @@ -9,7 +9,7 @@ dependencies = [ "eval-type-backport >=0.2.0", "google-auth (>=2.31.0,<3.0.0)", "httpx >=0.28.1", - "pydantic >=2.10.3", + "pydantic >=2.11.2", "python-dateutil >=2.8.2", "requests (>=2.32.3,<3.0.0)", "typing-inspection >=0.4.0", @@ -17,7 +17,7 @@ dependencies = [ [dependency-groups] dev = [ - "mypy==1.14.1", + "mypy==1.15.0", "pylint==3.2.3", "pyright>=1.1.401,<2", "pytest>=8.2.2,<9", @@ -26,20 +26,20 @@ dev = [ ] [tool.setuptools.package-data] -"*" = ["py.typed", "src/mistralai_gcp/py.typed"] +"*" = ["py.typed", "src/mistralai/gcp/client/py.typed"] [tool.hatch.build.targets.sdist] -include = ["src/mistralai_gcp"] +include = ["src/mistralai"] [tool.hatch.build.targets.sdist.force-include] "py.typed" = "py.typed" -"src/mistralai_gcp/py.typed" = "src/mistralai_gcp/py.typed" +"src/mistralai/gcp/client/py.typed" = "src/mistralai/gcp/client/py.typed" [tool.hatch.build.targets.wheel] -include = ["src/mistralai_gcp"] +include = ["src/mistralai"] [tool.hatch.build.targets.wheel.sources] -"src/mistralai_gcp" = "mistralai_gcp" +"src" = "" [virtualenvs] in-project = true @@ -53,6 +53,9 @@ pythonpath = ["src"] [tool.mypy] disable_error_code = "misc" +namespace_packages = true +explicit_package_bases = true +mypy_path = "src" [[tool.mypy.overrides]] module = "typing_inspect" diff --git a/packages/mistralai_azure/scripts/prepare_readme.py b/packages/gcp/scripts/prepare_readme.py similarity index 96% rename from packages/mistralai_azure/scripts/prepare_readme.py rename to packages/gcp/scripts/prepare_readme.py index ff1121fd..ae27b555 100644 --- a/packages/mistralai_azure/scripts/prepare_readme.py +++ b/packages/gcp/scripts/prepare_readme.py @@ -10,7 +10,7 @@ GITHUB_URL = ( GITHUB_URL[: -len(".git")] if GITHUB_URL.endswith(".git") else GITHUB_URL ) - REPO_SUBDIR = "packages/mistralai_azure" + REPO_SUBDIR = "packages/gcp" # Ensure the subdirectory has a trailing slash if not REPO_SUBDIR.endswith("/"): REPO_SUBDIR += "/" diff --git a/packages/mistralai_gcp/scripts/publish.sh b/packages/gcp/scripts/publish.sh similarity index 100% rename from packages/mistralai_gcp/scripts/publish.sh rename to packages/gcp/scripts/publish.sh diff --git a/packages/mistralai_gcp/src/mistralai_gcp/__init__.py b/packages/gcp/src/mistralai/gcp/client/__init__.py similarity index 100% rename from packages/mistralai_gcp/src/mistralai_gcp/__init__.py rename to packages/gcp/src/mistralai/gcp/client/__init__.py diff --git a/packages/mistralai_gcp/src/mistralai_gcp/_hooks/__init__.py b/packages/gcp/src/mistralai/gcp/client/_hooks/__init__.py similarity index 100% rename from packages/mistralai_gcp/src/mistralai_gcp/_hooks/__init__.py rename to packages/gcp/src/mistralai/gcp/client/_hooks/__init__.py diff --git a/packages/gcp/src/mistralai/gcp/client/_hooks/registration.py b/packages/gcp/src/mistralai/gcp/client/_hooks/registration.py new file mode 100644 index 00000000..23d3283d --- /dev/null +++ b/packages/gcp/src/mistralai/gcp/client/_hooks/registration.py @@ -0,0 +1,67 @@ +import json +import logging +from .types import BeforeRequestHook, BeforeRequestContext, Hooks +import httpx + +logger = logging.getLogger(__name__) + + +# This file is only ever generated once on the first generation and then is free to be modified. +# Any hooks you wish to add should be registered in the init_hooks function. Feel free to define them +# in this file or in separate files in the hooks folder. + + +class GCPVertexAIPathHook(BeforeRequestHook): + """Build full Vertex AI URL path from project_id, region, and model. + + Extracts model from request body and builds the Vertex AI URL dynamically. + """ + + def __init__(self, project_id: str, region: str): + self.project_id = project_id + self.region = region + + def before_request( + self, hook_ctx: BeforeRequestContext, request: httpx.Request + ) -> httpx.Request: + if not request.content: + return request + + try: + body = json.loads(request.content.decode("utf-8")) + except (UnicodeDecodeError, json.JSONDecodeError): + # Non-JSON body (e.g. multipart upload) — pass through unmodified + return request + + model = body.get("model") + if not model: + logger.warning( + "GCPVertexAIPathHook: request body has no 'model' field; " + "Vertex AI path will not be constructed. " + "Operation: %s", + hook_ctx.operation_id, + ) + return request + + is_streaming = "stream" in hook_ctx.operation_id.lower() + specifier = "streamRawPredict" if is_streaming else "rawPredict" + + path = ( + f"/v1/projects/{self.project_id}/locations/{self.region}/" + f"publishers/mistralai/models/{model}:{specifier}" + ) + + return httpx.Request( + method=request.method, + url=request.url.copy_with(path=path), + headers=request.headers, + content=request.content, + ) + + +def init_hooks(_hooks: Hooks) -> None: + """Initialize hooks. Called by SDKHooks.__init__. + + Note: GCPVertexAIPathHook requires project_id and region, so it is + registered separately in MistralGCP.__init__ after those values are known. + """ diff --git a/packages/mistralai_gcp/src/mistralai_gcp/_hooks/sdkhooks.py b/packages/gcp/src/mistralai/gcp/client/_hooks/sdkhooks.py similarity index 97% rename from packages/mistralai_gcp/src/mistralai_gcp/_hooks/sdkhooks.py rename to packages/gcp/src/mistralai/gcp/client/_hooks/sdkhooks.py index b81c2a27..2af4deed 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/_hooks/sdkhooks.py +++ b/packages/gcp/src/mistralai/gcp/client/_hooks/sdkhooks.py @@ -13,7 +13,7 @@ ) from .registration import init_hooks from typing import List, Optional, Tuple -from mistralai_gcp.httpclient import HttpClient +from mistralai.gcp.client.httpclient import HttpClient class SDKHooks(Hooks): diff --git a/packages/mistralai_azure/src/mistralai_azure/_hooks/types.py b/packages/gcp/src/mistralai/gcp/client/_hooks/types.py similarity index 96% rename from packages/mistralai_azure/src/mistralai_azure/_hooks/types.py rename to packages/gcp/src/mistralai/gcp/client/_hooks/types.py index 0c22d7eb..ea95bed2 100644 --- a/packages/mistralai_azure/src/mistralai_azure/_hooks/types.py +++ b/packages/gcp/src/mistralai/gcp/client/_hooks/types.py @@ -2,8 +2,8 @@ from abc import ABC, abstractmethod import httpx -from mistralai_azure.httpclient import HttpClient -from mistralai_azure.sdkconfiguration import SDKConfiguration +from mistralai.gcp.client.httpclient import HttpClient +from mistralai.gcp.client.sdkconfiguration import SDKConfiguration from typing import Any, Callable, List, Optional, Tuple, Union diff --git a/packages/mistralai_gcp/src/mistralai_gcp/_version.py b/packages/gcp/src/mistralai/gcp/client/_version.py similarity index 76% rename from packages/mistralai_gcp/src/mistralai_gcp/_version.py rename to packages/gcp/src/mistralai/gcp/client/_version.py index a170f0ab..ba48dac1 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/_version.py +++ b/packages/gcp/src/mistralai/gcp/client/_version.py @@ -3,10 +3,10 @@ import importlib.metadata __title__: str = "mistralai-gcp" -__version__: str = "1.8.0" +__version__: str = "2.0.0a4" __openapi_doc_version__: str = "1.0.0" __gen_version__: str = "2.794.1" -__user_agent__: str = "speakeasy-sdk/python 1.8.0 2.794.1 1.0.0 mistralai-gcp" +__user_agent__: str = "speakeasy-sdk/python 2.0.0a4 2.794.1 1.0.0 mistralai-gcp" try: if __package__ is not None: diff --git a/packages/mistralai_gcp/src/mistralai_gcp/basesdk.py b/packages/gcp/src/mistralai/gcp/client/basesdk.py similarity index 98% rename from packages/mistralai_gcp/src/mistralai_gcp/basesdk.py rename to packages/gcp/src/mistralai/gcp/client/basesdk.py index 7a93de23..6f9f5fd9 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/basesdk.py +++ b/packages/gcp/src/mistralai/gcp/client/basesdk.py @@ -2,13 +2,17 @@ from .sdkconfiguration import SDKConfiguration import httpx -from mistralai_gcp import models, utils -from mistralai_gcp._hooks import ( +from mistralai.gcp.client import models, utils +from mistralai.gcp.client._hooks import ( AfterErrorContext, AfterSuccessContext, BeforeRequestContext, ) -from mistralai_gcp.utils import RetryConfig, SerializedRequestBody, get_body_content +from mistralai.gcp.client.utils import ( + RetryConfig, + SerializedRequestBody, + get_body_content, +) from typing import Callable, List, Mapping, Optional, Tuple from urllib.parse import parse_qs, urlparse diff --git a/packages/mistralai_gcp/src/mistralai_gcp/chat.py b/packages/gcp/src/mistralai/gcp/client/chat.py similarity index 96% rename from packages/mistralai_gcp/src/mistralai_gcp/chat.py rename to packages/gcp/src/mistralai/gcp/client/chat.py index 3dd6040f..78541248 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/chat.py +++ b/packages/gcp/src/mistralai/gcp/client/chat.py @@ -1,11 +1,11 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from .basesdk import BaseSDK -from mistralai_gcp import models, utils -from mistralai_gcp._hooks import HookContext -from mistralai_gcp.types import OptionalNullable, UNSET -from mistralai_gcp.utils import eventstreaming -from mistralai_gcp.utils.unmarshal_json_response import unmarshal_json_response +from mistralai.gcp.client import models, utils +from mistralai.gcp.client._hooks import HookContext +from mistralai.gcp.client.types import OptionalNullable, UNSET +from mistralai.gcp.client.utils import eventstreaming +from mistralai.gcp.client.utils.unmarshal_json_response import unmarshal_json_response from typing import Any, Dict, List, Mapping, Optional, Union @@ -16,12 +16,20 @@ def stream( self, *, model: str, - messages: Union[List[models.Messages], List[models.MessagesTypedDict]], + messages: Union[ + List[models.ChatCompletionStreamRequestMessage], + List[models.ChatCompletionStreamRequestMessageTypedDict], + ], temperature: OptionalNullable[float] = UNSET, top_p: Optional[float] = None, max_tokens: OptionalNullable[int] = UNSET, stream: Optional[bool] = True, - stop: Optional[Union[models.Stop, models.StopTypedDict]] = None, + stop: Optional[ + Union[ + models.ChatCompletionStreamRequestStop, + models.ChatCompletionStreamRequestStopTypedDict, + ] + ] = None, random_seed: OptionalNullable[int] = UNSET, metadata: OptionalNullable[Dict[str, Any]] = UNSET, response_format: Optional[ @@ -95,7 +103,9 @@ def stream( stop=stop, random_seed=random_seed, metadata=metadata, - messages=utils.get_pydantic_model(messages, List[models.Messages]), + messages=utils.get_pydantic_model( + messages, List[models.ChatCompletionStreamRequestMessage] + ), response_format=utils.get_pydantic_model( response_format, Optional[models.ResponseFormat] ), @@ -183,12 +193,20 @@ async def stream_async( self, *, model: str, - messages: Union[List[models.Messages], List[models.MessagesTypedDict]], + messages: Union[ + List[models.ChatCompletionStreamRequestMessage], + List[models.ChatCompletionStreamRequestMessageTypedDict], + ], temperature: OptionalNullable[float] = UNSET, top_p: Optional[float] = None, max_tokens: OptionalNullable[int] = UNSET, stream: Optional[bool] = True, - stop: Optional[Union[models.Stop, models.StopTypedDict]] = None, + stop: Optional[ + Union[ + models.ChatCompletionStreamRequestStop, + models.ChatCompletionStreamRequestStopTypedDict, + ] + ] = None, random_seed: OptionalNullable[int] = UNSET, metadata: OptionalNullable[Dict[str, Any]] = UNSET, response_format: Optional[ @@ -262,7 +280,9 @@ async def stream_async( stop=stop, random_seed=random_seed, metadata=metadata, - messages=utils.get_pydantic_model(messages, List[models.Messages]), + messages=utils.get_pydantic_model( + messages, List[models.ChatCompletionStreamRequestMessage] + ), response_format=utils.get_pydantic_model( response_format, Optional[models.ResponseFormat] ), @@ -351,8 +371,8 @@ def complete( *, model: str, messages: Union[ - List[models.ChatCompletionRequestMessages], - List[models.ChatCompletionRequestMessagesTypedDict], + List[models.ChatCompletionRequestMessage], + List[models.ChatCompletionRequestMessageTypedDict], ], temperature: OptionalNullable[float] = UNSET, top_p: Optional[float] = None, @@ -436,7 +456,7 @@ def complete( random_seed=random_seed, metadata=metadata, messages=utils.get_pydantic_model( - messages, List[models.ChatCompletionRequestMessages] + messages, List[models.ChatCompletionRequestMessage] ), response_format=utils.get_pydantic_model( response_format, Optional[models.ResponseFormat] @@ -518,8 +538,8 @@ async def complete_async( *, model: str, messages: Union[ - List[models.ChatCompletionRequestMessages], - List[models.ChatCompletionRequestMessagesTypedDict], + List[models.ChatCompletionRequestMessage], + List[models.ChatCompletionRequestMessageTypedDict], ], temperature: OptionalNullable[float] = UNSET, top_p: Optional[float] = None, @@ -603,7 +623,7 @@ async def complete_async( random_seed=random_seed, metadata=metadata, messages=utils.get_pydantic_model( - messages, List[models.ChatCompletionRequestMessages] + messages, List[models.ChatCompletionRequestMessage] ), response_format=utils.get_pydantic_model( response_format, Optional[models.ResponseFormat] diff --git a/packages/mistralai_gcp/src/mistralai_gcp/fim.py b/packages/gcp/src/mistralai/gcp/client/fim.py similarity index 98% rename from packages/mistralai_gcp/src/mistralai_gcp/fim.py rename to packages/gcp/src/mistralai/gcp/client/fim.py index 36d9fd60..e2acacd5 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/fim.py +++ b/packages/gcp/src/mistralai/gcp/client/fim.py @@ -1,11 +1,11 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from .basesdk import BaseSDK -from mistralai_gcp import models, utils -from mistralai_gcp._hooks import HookContext -from mistralai_gcp.types import OptionalNullable, UNSET -from mistralai_gcp.utils import eventstreaming -from mistralai_gcp.utils.unmarshal_json_response import unmarshal_json_response +from mistralai.gcp.client import models, utils +from mistralai.gcp.client._hooks import HookContext +from mistralai.gcp.client.types import OptionalNullable, UNSET +from mistralai.gcp.client.utils import eventstreaming +from mistralai.gcp.client.utils.unmarshal_json_response import unmarshal_json_response from typing import Any, Dict, Mapping, Optional, Union diff --git a/packages/mistralai_gcp/src/mistralai_gcp/httpclient.py b/packages/gcp/src/mistralai/gcp/client/httpclient.py similarity index 100% rename from packages/mistralai_gcp/src/mistralai_gcp/httpclient.py rename to packages/gcp/src/mistralai/gcp/client/httpclient.py diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/__init__.py b/packages/gcp/src/mistralai/gcp/client/models/__init__.py similarity index 85% rename from packages/mistralai_gcp/src/mistralai_gcp/models/__init__.py rename to packages/gcp/src/mistralai/gcp/client/models/__init__.py index fe85b133..fb446c25 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/__init__.py +++ b/packages/gcp/src/mistralai/gcp/client/models/__init__.py @@ -1,6 +1,6 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -from .mistralgcperror import MistralGcpError +from .mistralgcperror import MistralGCPError from typing import TYPE_CHECKING from importlib import import_module import builtins @@ -11,7 +11,6 @@ AssistantMessage, AssistantMessageContent, AssistantMessageContentTypedDict, - AssistantMessageRole, AssistantMessageTypedDict, ) from .chatcompletionchoice import ( @@ -21,8 +20,8 @@ ) from .chatcompletionrequest import ( ChatCompletionRequest, - ChatCompletionRequestMessages, - ChatCompletionRequestMessagesTypedDict, + ChatCompletionRequestMessage, + ChatCompletionRequestMessageTypedDict, ChatCompletionRequestStop, ChatCompletionRequestStopTypedDict, ChatCompletionRequestToolChoice, @@ -35,26 +34,26 @@ ) from .chatcompletionstreamrequest import ( ChatCompletionStreamRequest, + ChatCompletionStreamRequestMessage, + ChatCompletionStreamRequestMessageTypedDict, + ChatCompletionStreamRequestStop, + ChatCompletionStreamRequestStopTypedDict, ChatCompletionStreamRequestToolChoice, ChatCompletionStreamRequestToolChoiceTypedDict, ChatCompletionStreamRequestTypedDict, - Messages, - MessagesTypedDict, - Stop, - StopTypedDict, ) from .completionchunk import CompletionChunk, CompletionChunkTypedDict from .completionevent import CompletionEvent, CompletionEventTypedDict from .completionresponsestreamchoice import ( CompletionResponseStreamChoice, + CompletionResponseStreamChoiceFinishReason, CompletionResponseStreamChoiceTypedDict, - FinishReason, ) from .contentchunk import ContentChunk, ContentChunkTypedDict from .deltamessage import ( - Content, - ContentTypedDict, DeltaMessage, + DeltaMessageContent, + DeltaMessageContentTypedDict, DeltaMessageTypedDict, ) from .fimcompletionrequest import ( @@ -82,30 +81,25 @@ ) from .functionname import FunctionName, FunctionNameTypedDict from .httpvalidationerror import HTTPValidationError, HTTPValidationErrorData + from .imagedetail import ImageDetail from .imageurl import ImageURL, ImageURLTypedDict from .imageurlchunk import ( ImageURLChunk, - ImageURLChunkImageURL, - ImageURLChunkImageURLTypedDict, - ImageURLChunkType, ImageURLChunkTypedDict, + ImageURLUnion, + ImageURLUnionTypedDict, ) from .jsonschema import JSONSchema, JSONSchemaTypedDict from .mistralpromptmode import MistralPromptMode from .no_response_error import NoResponseError from .prediction import Prediction, PredictionTypedDict - from .referencechunk import ( - ReferenceChunk, - ReferenceChunkType, - ReferenceChunkTypedDict, - ) + from .referencechunk import ReferenceChunk, ReferenceChunkTypedDict from .responseformat import ResponseFormat, ResponseFormatTypedDict from .responseformats import ResponseFormats from .responsevalidationerror import ResponseValidationError from .sdkerror import SDKError from .security import Security, SecurityTypedDict from .systemmessage import ( - Role, SystemMessage, SystemMessageContent, SystemMessageContentTypedDict, @@ -115,14 +109,8 @@ SystemMessageContentChunks, SystemMessageContentChunksTypedDict, ) - from .textchunk import TextChunk, TextChunkTypedDict, Type - from .thinkchunk import ( - ThinkChunk, - ThinkChunkType, - ThinkChunkTypedDict, - Thinking, - ThinkingTypedDict, - ) + from .textchunk import TextChunk, TextChunkTypedDict + from .thinkchunk import ThinkChunk, ThinkChunkTypedDict, Thinking, ThinkingTypedDict from .tool import Tool, ToolTypedDict from .toolcall import ToolCall, ToolCallTypedDict from .toolchoice import ToolChoice, ToolChoiceTypedDict @@ -131,7 +119,6 @@ ToolMessage, ToolMessageContent, ToolMessageContentTypedDict, - ToolMessageRole, ToolMessageTypedDict, ) from .tooltypes import ToolTypes @@ -140,7 +127,6 @@ UserMessage, UserMessageContent, UserMessageContentTypedDict, - UserMessageRole, UserMessageTypedDict, ) from .validationerror import ( @@ -156,14 +142,13 @@ "AssistantMessage", "AssistantMessageContent", "AssistantMessageContentTypedDict", - "AssistantMessageRole", "AssistantMessageTypedDict", "ChatCompletionChoice", "ChatCompletionChoiceFinishReason", "ChatCompletionChoiceTypedDict", "ChatCompletionRequest", - "ChatCompletionRequestMessages", - "ChatCompletionRequestMessagesTypedDict", + "ChatCompletionRequestMessage", + "ChatCompletionRequestMessageTypedDict", "ChatCompletionRequestStop", "ChatCompletionRequestStopTypedDict", "ChatCompletionRequestToolChoice", @@ -172,6 +157,10 @@ "ChatCompletionResponse", "ChatCompletionResponseTypedDict", "ChatCompletionStreamRequest", + "ChatCompletionStreamRequestMessage", + "ChatCompletionStreamRequestMessageTypedDict", + "ChatCompletionStreamRequestStop", + "ChatCompletionStreamRequestStopTypedDict", "ChatCompletionStreamRequestToolChoice", "ChatCompletionStreamRequestToolChoiceTypedDict", "ChatCompletionStreamRequestTypedDict", @@ -180,12 +169,13 @@ "CompletionEvent", "CompletionEventTypedDict", "CompletionResponseStreamChoice", + "CompletionResponseStreamChoiceFinishReason", "CompletionResponseStreamChoiceTypedDict", - "Content", "ContentChunk", "ContentChunkTypedDict", - "ContentTypedDict", "DeltaMessage", + "DeltaMessageContent", + "DeltaMessageContentTypedDict", "DeltaMessageTypedDict", "FIMCompletionRequest", "FIMCompletionRequestStop", @@ -197,7 +187,6 @@ "FIMCompletionStreamRequestStop", "FIMCompletionStreamRequestStopTypedDict", "FIMCompletionStreamRequestTypedDict", - "FinishReason", "Function", "FunctionCall", "FunctionCallTypedDict", @@ -206,37 +195,31 @@ "FunctionTypedDict", "HTTPValidationError", "HTTPValidationErrorData", + "ImageDetail", "ImageURL", "ImageURLChunk", - "ImageURLChunkImageURL", - "ImageURLChunkImageURLTypedDict", - "ImageURLChunkType", "ImageURLChunkTypedDict", "ImageURLTypedDict", + "ImageURLUnion", + "ImageURLUnionTypedDict", "JSONSchema", "JSONSchemaTypedDict", "Loc", "LocTypedDict", - "Messages", - "MessagesTypedDict", - "MistralGcpError", + "MistralGCPError", "MistralPromptMode", "NoResponseError", "Prediction", "PredictionTypedDict", "ReferenceChunk", - "ReferenceChunkType", "ReferenceChunkTypedDict", "ResponseFormat", "ResponseFormatTypedDict", "ResponseFormats", "ResponseValidationError", - "Role", "SDKError", "Security", "SecurityTypedDict", - "Stop", - "StopTypedDict", "SystemMessage", "SystemMessageContent", "SystemMessageContentChunks", @@ -246,7 +229,6 @@ "TextChunk", "TextChunkTypedDict", "ThinkChunk", - "ThinkChunkType", "ThinkChunkTypedDict", "Thinking", "ThinkingTypedDict", @@ -259,17 +241,14 @@ "ToolMessage", "ToolMessageContent", "ToolMessageContentTypedDict", - "ToolMessageRole", "ToolMessageTypedDict", "ToolTypedDict", "ToolTypes", - "Type", "UsageInfo", "UsageInfoTypedDict", "UserMessage", "UserMessageContent", "UserMessageContentTypedDict", - "UserMessageRole", "UserMessageTypedDict", "ValidationError", "ValidationErrorTypedDict", @@ -279,14 +258,13 @@ "AssistantMessage": ".assistantmessage", "AssistantMessageContent": ".assistantmessage", "AssistantMessageContentTypedDict": ".assistantmessage", - "AssistantMessageRole": ".assistantmessage", "AssistantMessageTypedDict": ".assistantmessage", "ChatCompletionChoice": ".chatcompletionchoice", "ChatCompletionChoiceFinishReason": ".chatcompletionchoice", "ChatCompletionChoiceTypedDict": ".chatcompletionchoice", "ChatCompletionRequest": ".chatcompletionrequest", - "ChatCompletionRequestMessages": ".chatcompletionrequest", - "ChatCompletionRequestMessagesTypedDict": ".chatcompletionrequest", + "ChatCompletionRequestMessage": ".chatcompletionrequest", + "ChatCompletionRequestMessageTypedDict": ".chatcompletionrequest", "ChatCompletionRequestStop": ".chatcompletionrequest", "ChatCompletionRequestStopTypedDict": ".chatcompletionrequest", "ChatCompletionRequestToolChoice": ".chatcompletionrequest", @@ -295,25 +273,25 @@ "ChatCompletionResponse": ".chatcompletionresponse", "ChatCompletionResponseTypedDict": ".chatcompletionresponse", "ChatCompletionStreamRequest": ".chatcompletionstreamrequest", + "ChatCompletionStreamRequestMessage": ".chatcompletionstreamrequest", + "ChatCompletionStreamRequestMessageTypedDict": ".chatcompletionstreamrequest", + "ChatCompletionStreamRequestStop": ".chatcompletionstreamrequest", + "ChatCompletionStreamRequestStopTypedDict": ".chatcompletionstreamrequest", "ChatCompletionStreamRequestToolChoice": ".chatcompletionstreamrequest", "ChatCompletionStreamRequestToolChoiceTypedDict": ".chatcompletionstreamrequest", "ChatCompletionStreamRequestTypedDict": ".chatcompletionstreamrequest", - "Messages": ".chatcompletionstreamrequest", - "MessagesTypedDict": ".chatcompletionstreamrequest", - "Stop": ".chatcompletionstreamrequest", - "StopTypedDict": ".chatcompletionstreamrequest", "CompletionChunk": ".completionchunk", "CompletionChunkTypedDict": ".completionchunk", "CompletionEvent": ".completionevent", "CompletionEventTypedDict": ".completionevent", "CompletionResponseStreamChoice": ".completionresponsestreamchoice", + "CompletionResponseStreamChoiceFinishReason": ".completionresponsestreamchoice", "CompletionResponseStreamChoiceTypedDict": ".completionresponsestreamchoice", - "FinishReason": ".completionresponsestreamchoice", "ContentChunk": ".contentchunk", "ContentChunkTypedDict": ".contentchunk", - "Content": ".deltamessage", - "ContentTypedDict": ".deltamessage", "DeltaMessage": ".deltamessage", + "DeltaMessageContent": ".deltamessage", + "DeltaMessageContentTypedDict": ".deltamessage", "DeltaMessageTypedDict": ".deltamessage", "FIMCompletionRequest": ".fimcompletionrequest", "FIMCompletionRequestStop": ".fimcompletionrequest", @@ -335,13 +313,13 @@ "FunctionNameTypedDict": ".functionname", "HTTPValidationError": ".httpvalidationerror", "HTTPValidationErrorData": ".httpvalidationerror", + "ImageDetail": ".imagedetail", "ImageURL": ".imageurl", "ImageURLTypedDict": ".imageurl", "ImageURLChunk": ".imageurlchunk", - "ImageURLChunkImageURL": ".imageurlchunk", - "ImageURLChunkImageURLTypedDict": ".imageurlchunk", - "ImageURLChunkType": ".imageurlchunk", "ImageURLChunkTypedDict": ".imageurlchunk", + "ImageURLUnion": ".imageurlchunk", + "ImageURLUnionTypedDict": ".imageurlchunk", "JSONSchema": ".jsonschema", "JSONSchemaTypedDict": ".jsonschema", "MistralPromptMode": ".mistralpromptmode", @@ -349,7 +327,6 @@ "Prediction": ".prediction", "PredictionTypedDict": ".prediction", "ReferenceChunk": ".referencechunk", - "ReferenceChunkType": ".referencechunk", "ReferenceChunkTypedDict": ".referencechunk", "ResponseFormat": ".responseformat", "ResponseFormatTypedDict": ".responseformat", @@ -358,7 +335,6 @@ "SDKError": ".sdkerror", "Security": ".security", "SecurityTypedDict": ".security", - "Role": ".systemmessage", "SystemMessage": ".systemmessage", "SystemMessageContent": ".systemmessage", "SystemMessageContentTypedDict": ".systemmessage", @@ -367,9 +343,7 @@ "SystemMessageContentChunksTypedDict": ".systemmessagecontentchunks", "TextChunk": ".textchunk", "TextChunkTypedDict": ".textchunk", - "Type": ".textchunk", "ThinkChunk": ".thinkchunk", - "ThinkChunkType": ".thinkchunk", "ThinkChunkTypedDict": ".thinkchunk", "Thinking": ".thinkchunk", "ThinkingTypedDict": ".thinkchunk", @@ -383,7 +357,6 @@ "ToolMessage": ".toolmessage", "ToolMessageContent": ".toolmessage", "ToolMessageContentTypedDict": ".toolmessage", - "ToolMessageRole": ".toolmessage", "ToolMessageTypedDict": ".toolmessage", "ToolTypes": ".tooltypes", "UsageInfo": ".usageinfo", @@ -391,7 +364,6 @@ "UserMessage": ".usermessage", "UserMessageContent": ".usermessage", "UserMessageContentTypedDict": ".usermessage", - "UserMessageRole": ".usermessage", "UserMessageTypedDict": ".usermessage", "Loc": ".validationerror", "LocTypedDict": ".validationerror", diff --git a/packages/mistralai_azure/src/mistralai_azure/models/assistantmessage.py b/packages/gcp/src/mistralai/gcp/client/models/assistantmessage.py similarity index 81% rename from packages/mistralai_azure/src/mistralai_azure/models/assistantmessage.py rename to packages/gcp/src/mistralai/gcp/client/models/assistantmessage.py index 7790eb10..7061775b 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/assistantmessage.py +++ b/packages/gcp/src/mistralai/gcp/client/models/assistantmessage.py @@ -3,16 +3,19 @@ from __future__ import annotations from .contentchunk import ContentChunk, ContentChunkTypedDict from .toolcall import ToolCall, ToolCallTypedDict -from mistralai_azure.types import ( +from mistralai.gcp.client.types import ( BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL, ) +from mistralai.gcp.client.utils import validate_const +import pydantic from pydantic import model_serializer +from pydantic.functional_validators import AfterValidator from typing import List, Literal, Optional, Union -from typing_extensions import NotRequired, TypeAliasType, TypedDict +from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict AssistantMessageContentTypedDict = TypeAliasType( @@ -25,18 +28,22 @@ ) -AssistantMessageRole = Literal["assistant",] - - class AssistantMessageTypedDict(TypedDict): + role: Literal["assistant"] content: NotRequired[Nullable[AssistantMessageContentTypedDict]] tool_calls: NotRequired[Nullable[List[ToolCallTypedDict]]] prefix: NotRequired[bool] r"""Set this to `true` when adding an assistant message as prefix to condition the model response. The role of the prefix message is to force the model to start its answer by the content of the message.""" - role: NotRequired[AssistantMessageRole] class AssistantMessage(BaseModel): + ROLE: Annotated[ + Annotated[ + Optional[Literal["assistant"]], AfterValidator(validate_const("assistant")) + ], + pydantic.Field(alias="role"), + ] = "assistant" + content: OptionalNullable[AssistantMessageContent] = UNSET tool_calls: OptionalNullable[List[ToolCall]] = UNSET @@ -44,11 +51,9 @@ class AssistantMessage(BaseModel): prefix: Optional[bool] = False r"""Set this to `true` when adding an assistant message as prefix to condition the model response. The role of the prefix message is to force the model to start its answer by the content of the message.""" - role: Optional[AssistantMessageRole] = "assistant" - @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = ["content", "tool_calls", "prefix", "role"] + optional_fields = ["role", "content", "tool_calls", "prefix"] nullable_fields = ["content", "tool_calls"] null_default_fields = [] diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionchoice.py b/packages/gcp/src/mistralai/gcp/client/models/chatcompletionchoice.py similarity index 91% rename from packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionchoice.py rename to packages/gcp/src/mistralai/gcp/client/models/chatcompletionchoice.py index fe3ee952..ae5a2fbf 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionchoice.py +++ b/packages/gcp/src/mistralai/gcp/client/models/chatcompletionchoice.py @@ -2,7 +2,7 @@ from __future__ import annotations from .assistantmessage import AssistantMessage, AssistantMessageTypedDict -from mistralai_gcp.types import BaseModel, UnrecognizedStr +from mistralai.gcp.client.types import BaseModel, UnrecognizedStr from typing import Literal, Union from typing_extensions import TypedDict diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionrequest.py b/packages/gcp/src/mistralai/gcp/client/models/chatcompletionrequest.py similarity index 97% rename from packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionrequest.py rename to packages/gcp/src/mistralai/gcp/client/models/chatcompletionrequest.py index 80345f9d..1bc03922 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionrequest.py +++ b/packages/gcp/src/mistralai/gcp/client/models/chatcompletionrequest.py @@ -11,14 +11,14 @@ from .toolchoiceenum import ToolChoiceEnum from .toolmessage import ToolMessage, ToolMessageTypedDict from .usermessage import UserMessage, UserMessageTypedDict -from mistralai_gcp.types import ( +from mistralai.gcp.client.types import ( BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL, ) -from mistralai_gcp.utils import get_discriminator +from mistralai.gcp.client.utils import get_discriminator from pydantic import Discriminator, Tag, model_serializer from typing import Any, Dict, List, Optional, Union from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict @@ -36,8 +36,8 @@ r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" -ChatCompletionRequestMessagesTypedDict = TypeAliasType( - "ChatCompletionRequestMessagesTypedDict", +ChatCompletionRequestMessageTypedDict = TypeAliasType( + "ChatCompletionRequestMessageTypedDict", Union[ SystemMessageTypedDict, UserMessageTypedDict, @@ -47,7 +47,7 @@ ) -ChatCompletionRequestMessages = Annotated[ +ChatCompletionRequestMessage = Annotated[ Union[ Annotated[AssistantMessage, Tag("assistant")], Annotated[SystemMessage, Tag("system")], @@ -74,7 +74,7 @@ class ChatCompletionRequestTypedDict(TypedDict): model: str r"""ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions.""" - messages: List[ChatCompletionRequestMessagesTypedDict] + messages: List[ChatCompletionRequestMessageTypedDict] r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content.""" temperature: NotRequired[Nullable[float]] r"""What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value.""" @@ -113,7 +113,7 @@ class ChatCompletionRequest(BaseModel): model: str r"""ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions.""" - messages: List[ChatCompletionRequestMessages] + messages: List[ChatCompletionRequestMessage] r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content.""" temperature: OptionalNullable[float] = UNSET diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionresponse.py b/packages/gcp/src/mistralai/gcp/client/models/chatcompletionresponse.py similarity index 93% rename from packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionresponse.py rename to packages/gcp/src/mistralai/gcp/client/models/chatcompletionresponse.py index a7953eb1..317c4d84 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionresponse.py +++ b/packages/gcp/src/mistralai/gcp/client/models/chatcompletionresponse.py @@ -3,7 +3,7 @@ from __future__ import annotations from .chatcompletionchoice import ChatCompletionChoice, ChatCompletionChoiceTypedDict from .usageinfo import UsageInfo, UsageInfoTypedDict -from mistralai_gcp.types import BaseModel +from mistralai.gcp.client.types import BaseModel from typing import List from typing_extensions import TypedDict diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionstreamrequest.py b/packages/gcp/src/mistralai/gcp/client/models/chatcompletionstreamrequest.py similarity index 94% rename from packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionstreamrequest.py rename to packages/gcp/src/mistralai/gcp/client/models/chatcompletionstreamrequest.py index e857d515..0a5a0021 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/chatcompletionstreamrequest.py +++ b/packages/gcp/src/mistralai/gcp/client/models/chatcompletionstreamrequest.py @@ -11,29 +11,33 @@ from .toolchoiceenum import ToolChoiceEnum from .toolmessage import ToolMessage, ToolMessageTypedDict from .usermessage import UserMessage, UserMessageTypedDict -from mistralai_gcp.types import ( +from mistralai.gcp.client.types import ( BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL, ) -from mistralai_gcp.utils import get_discriminator +from mistralai.gcp.client.utils import get_discriminator from pydantic import Discriminator, Tag, model_serializer from typing import Any, Dict, List, Optional, Union from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict -StopTypedDict = TypeAliasType("StopTypedDict", Union[str, List[str]]) +ChatCompletionStreamRequestStopTypedDict = TypeAliasType( + "ChatCompletionStreamRequestStopTypedDict", Union[str, List[str]] +) r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" -Stop = TypeAliasType("Stop", Union[str, List[str]]) +ChatCompletionStreamRequestStop = TypeAliasType( + "ChatCompletionStreamRequestStop", Union[str, List[str]] +) r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" -MessagesTypedDict = TypeAliasType( - "MessagesTypedDict", +ChatCompletionStreamRequestMessageTypedDict = TypeAliasType( + "ChatCompletionStreamRequestMessageTypedDict", Union[ SystemMessageTypedDict, UserMessageTypedDict, @@ -43,7 +47,7 @@ ) -Messages = Annotated[ +ChatCompletionStreamRequestMessage = Annotated[ Union[ Annotated[AssistantMessage, Tag("assistant")], Annotated[SystemMessage, Tag("system")], @@ -70,7 +74,7 @@ class ChatCompletionStreamRequestTypedDict(TypedDict): model: str r"""ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions.""" - messages: List[MessagesTypedDict] + messages: List[ChatCompletionStreamRequestMessageTypedDict] r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content.""" temperature: NotRequired[Nullable[float]] r"""What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value.""" @@ -79,7 +83,7 @@ class ChatCompletionStreamRequestTypedDict(TypedDict): max_tokens: NotRequired[Nullable[int]] r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length.""" stream: NotRequired[bool] - stop: NotRequired[StopTypedDict] + stop: NotRequired[ChatCompletionStreamRequestStopTypedDict] r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" random_seed: NotRequired[Nullable[int]] r"""The seed to use for random sampling. If set, different calls will generate deterministic results.""" @@ -108,7 +112,7 @@ class ChatCompletionStreamRequest(BaseModel): model: str r"""ID of the model to use. You can use the [List Available Models](/api/#tag/models/operation/list_models_v1_models_get) API to see all of your available models, or see our [Model overview](/models) for model descriptions.""" - messages: List[Messages] + messages: List[ChatCompletionStreamRequestMessage] r"""The prompt(s) to generate completions for, encoded as a list of dict with role and content.""" temperature: OptionalNullable[float] = UNSET @@ -122,7 +126,7 @@ class ChatCompletionStreamRequest(BaseModel): stream: Optional[bool] = True - stop: Optional[Stop] = None + stop: Optional[ChatCompletionStreamRequestStop] = None r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array""" random_seed: OptionalNullable[int] = UNSET diff --git a/packages/mistralai_azure/src/mistralai_azure/models/completionchunk.py b/packages/gcp/src/mistralai/gcp/client/models/completionchunk.py similarity index 94% rename from packages/mistralai_azure/src/mistralai_azure/models/completionchunk.py rename to packages/gcp/src/mistralai/gcp/client/models/completionchunk.py index d6cc2a86..9e54cb6d 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/completionchunk.py +++ b/packages/gcp/src/mistralai/gcp/client/models/completionchunk.py @@ -6,7 +6,7 @@ CompletionResponseStreamChoiceTypedDict, ) from .usageinfo import UsageInfo, UsageInfoTypedDict -from mistralai_azure.types import BaseModel +from mistralai.gcp.client.types import BaseModel from typing import List, Optional from typing_extensions import NotRequired, TypedDict diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/completionevent.py b/packages/gcp/src/mistralai/gcp/client/models/completionevent.py similarity index 88% rename from packages/mistralai_gcp/src/mistralai_gcp/models/completionevent.py rename to packages/gcp/src/mistralai/gcp/client/models/completionevent.py index 33278c11..bb155009 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/completionevent.py +++ b/packages/gcp/src/mistralai/gcp/client/models/completionevent.py @@ -2,7 +2,7 @@ from __future__ import annotations from .completionchunk import CompletionChunk, CompletionChunkTypedDict -from mistralai_gcp.types import BaseModel +from mistralai.gcp.client.types import BaseModel from typing_extensions import TypedDict diff --git a/packages/mistralai_azure/src/mistralai_azure/models/completionresponsestreamchoice.py b/packages/gcp/src/mistralai/gcp/client/models/completionresponsestreamchoice.py similarity index 82% rename from packages/mistralai_azure/src/mistralai_azure/models/completionresponsestreamchoice.py rename to packages/gcp/src/mistralai/gcp/client/models/completionresponsestreamchoice.py index 0e890aac..6f306721 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/completionresponsestreamchoice.py +++ b/packages/gcp/src/mistralai/gcp/client/models/completionresponsestreamchoice.py @@ -2,13 +2,18 @@ from __future__ import annotations from .deltamessage import DeltaMessage, DeltaMessageTypedDict -from mistralai_azure.types import BaseModel, Nullable, UNSET_SENTINEL, UnrecognizedStr +from mistralai.gcp.client.types import ( + BaseModel, + Nullable, + UNSET_SENTINEL, + UnrecognizedStr, +) from pydantic import model_serializer from typing import Literal, Union from typing_extensions import TypedDict -FinishReason = Union[ +CompletionResponseStreamChoiceFinishReason = Union[ Literal[ "stop", "length", @@ -22,7 +27,7 @@ class CompletionResponseStreamChoiceTypedDict(TypedDict): index: int delta: DeltaMessageTypedDict - finish_reason: Nullable[FinishReason] + finish_reason: Nullable[CompletionResponseStreamChoiceFinishReason] class CompletionResponseStreamChoice(BaseModel): @@ -30,7 +35,7 @@ class CompletionResponseStreamChoice(BaseModel): delta: DeltaMessage - finish_reason: Nullable[FinishReason] + finish_reason: Nullable[CompletionResponseStreamChoiceFinishReason] @model_serializer(mode="wrap") def serialize_model(self, handler): diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/contentchunk.py b/packages/gcp/src/mistralai/gcp/client/models/contentchunk.py similarity index 93% rename from packages/mistralai_gcp/src/mistralai_gcp/models/contentchunk.py rename to packages/gcp/src/mistralai/gcp/client/models/contentchunk.py index da5671e3..1cd9e502 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/contentchunk.py +++ b/packages/gcp/src/mistralai/gcp/client/models/contentchunk.py @@ -4,7 +4,7 @@ from .imageurlchunk import ImageURLChunk, ImageURLChunkTypedDict from .referencechunk import ReferenceChunk, ReferenceChunkTypedDict from .textchunk import TextChunk, TextChunkTypedDict -from mistralai_gcp.utils import get_discriminator +from mistralai.gcp.client.utils import get_discriminator from pydantic import Discriminator, Tag from typing import Union from typing_extensions import Annotated, TypeAliasType diff --git a/packages/mistralai_azure/src/mistralai_azure/models/deltamessage.py b/packages/gcp/src/mistralai/gcp/client/models/deltamessage.py similarity index 81% rename from packages/mistralai_azure/src/mistralai_azure/models/deltamessage.py rename to packages/gcp/src/mistralai/gcp/client/models/deltamessage.py index 7fa3c3f2..96923518 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/deltamessage.py +++ b/packages/gcp/src/mistralai/gcp/client/models/deltamessage.py @@ -3,7 +3,7 @@ from __future__ import annotations from .contentchunk import ContentChunk, ContentChunkTypedDict from .toolcall import ToolCall, ToolCallTypedDict -from mistralai_azure.types import ( +from mistralai.gcp.client.types import ( BaseModel, Nullable, OptionalNullable, @@ -15,24 +15,26 @@ from typing_extensions import NotRequired, TypeAliasType, TypedDict -ContentTypedDict = TypeAliasType( - "ContentTypedDict", Union[str, List[ContentChunkTypedDict]] +DeltaMessageContentTypedDict = TypeAliasType( + "DeltaMessageContentTypedDict", Union[str, List[ContentChunkTypedDict]] ) -Content = TypeAliasType("Content", Union[str, List[ContentChunk]]) +DeltaMessageContent = TypeAliasType( + "DeltaMessageContent", Union[str, List[ContentChunk]] +) class DeltaMessageTypedDict(TypedDict): role: NotRequired[Nullable[str]] - content: NotRequired[Nullable[ContentTypedDict]] + content: NotRequired[Nullable[DeltaMessageContentTypedDict]] tool_calls: NotRequired[Nullable[List[ToolCallTypedDict]]] class DeltaMessage(BaseModel): role: OptionalNullable[str] = UNSET - content: OptionalNullable[Content] = UNSET + content: OptionalNullable[DeltaMessageContent] = UNSET tool_calls: OptionalNullable[List[ToolCall]] = UNSET diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/fimcompletionrequest.py b/packages/gcp/src/mistralai/gcp/client/models/fimcompletionrequest.py similarity index 99% rename from packages/mistralai_gcp/src/mistralai_gcp/models/fimcompletionrequest.py rename to packages/gcp/src/mistralai/gcp/client/models/fimcompletionrequest.py index bcc97c90..f37bbcc3 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/fimcompletionrequest.py +++ b/packages/gcp/src/mistralai/gcp/client/models/fimcompletionrequest.py @@ -1,7 +1,7 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from __future__ import annotations -from mistralai_gcp.types import ( +from mistralai.gcp.client.types import ( BaseModel, Nullable, OptionalNullable, diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/fimcompletionresponse.py b/packages/gcp/src/mistralai/gcp/client/models/fimcompletionresponse.py similarity index 93% rename from packages/mistralai_gcp/src/mistralai_gcp/models/fimcompletionresponse.py rename to packages/gcp/src/mistralai/gcp/client/models/fimcompletionresponse.py index e1940b0a..5b80da3f 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/fimcompletionresponse.py +++ b/packages/gcp/src/mistralai/gcp/client/models/fimcompletionresponse.py @@ -3,7 +3,7 @@ from __future__ import annotations from .chatcompletionchoice import ChatCompletionChoice, ChatCompletionChoiceTypedDict from .usageinfo import UsageInfo, UsageInfoTypedDict -from mistralai_gcp.types import BaseModel +from mistralai.gcp.client.types import BaseModel from typing import List from typing_extensions import TypedDict diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/fimcompletionstreamrequest.py b/packages/gcp/src/mistralai/gcp/client/models/fimcompletionstreamrequest.py similarity index 99% rename from packages/mistralai_gcp/src/mistralai_gcp/models/fimcompletionstreamrequest.py rename to packages/gcp/src/mistralai/gcp/client/models/fimcompletionstreamrequest.py index 34d2ba65..8e610261 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/fimcompletionstreamrequest.py +++ b/packages/gcp/src/mistralai/gcp/client/models/fimcompletionstreamrequest.py @@ -1,7 +1,7 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from __future__ import annotations -from mistralai_gcp.types import ( +from mistralai.gcp.client.types import ( BaseModel, Nullable, OptionalNullable, diff --git a/packages/mistralai_azure/src/mistralai_azure/models/function.py b/packages/gcp/src/mistralai/gcp/client/models/function.py similarity index 91% rename from packages/mistralai_azure/src/mistralai_azure/models/function.py rename to packages/gcp/src/mistralai/gcp/client/models/function.py index a4642f92..28577eff 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/function.py +++ b/packages/gcp/src/mistralai/gcp/client/models/function.py @@ -1,7 +1,7 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from __future__ import annotations -from mistralai_azure.types import BaseModel +from mistralai.gcp.client.types import BaseModel from typing import Any, Dict, Optional from typing_extensions import NotRequired, TypedDict diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/functioncall.py b/packages/gcp/src/mistralai/gcp/client/models/functioncall.py similarity index 91% rename from packages/mistralai_gcp/src/mistralai_gcp/models/functioncall.py rename to packages/gcp/src/mistralai/gcp/client/models/functioncall.py index 99554c88..0f1b2425 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/functioncall.py +++ b/packages/gcp/src/mistralai/gcp/client/models/functioncall.py @@ -1,7 +1,7 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from __future__ import annotations -from mistralai_gcp.types import BaseModel +from mistralai.gcp.client.types import BaseModel from typing import Any, Dict, Union from typing_extensions import TypeAliasType, TypedDict diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/functionname.py b/packages/gcp/src/mistralai/gcp/client/models/functionname.py similarity index 89% rename from packages/mistralai_gcp/src/mistralai_gcp/models/functionname.py rename to packages/gcp/src/mistralai/gcp/client/models/functionname.py index 00ec22f5..585b9e39 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/functionname.py +++ b/packages/gcp/src/mistralai/gcp/client/models/functionname.py @@ -1,7 +1,7 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from __future__ import annotations -from mistralai_gcp.types import BaseModel +from mistralai.gcp.client.types import BaseModel from typing_extensions import TypedDict diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/httpvalidationerror.py b/packages/gcp/src/mistralai/gcp/client/models/httpvalidationerror.py similarity index 82% rename from packages/mistralai_gcp/src/mistralai_gcp/models/httpvalidationerror.py rename to packages/gcp/src/mistralai/gcp/client/models/httpvalidationerror.py index 79609351..57df7260 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/httpvalidationerror.py +++ b/packages/gcp/src/mistralai/gcp/client/models/httpvalidationerror.py @@ -4,8 +4,8 @@ from .validationerror import ValidationError from dataclasses import dataclass, field import httpx -from mistralai_gcp.models import MistralGcpError -from mistralai_gcp.types import BaseModel +from mistralai.gcp.client.models import MistralGCPError +from mistralai.gcp.client.types import BaseModel from typing import List, Optional @@ -14,7 +14,7 @@ class HTTPValidationErrorData(BaseModel): @dataclass(unsafe_hash=True) -class HTTPValidationError(MistralGcpError): +class HTTPValidationError(MistralGCPError): data: HTTPValidationErrorData = field(hash=False) def __init__( diff --git a/packages/gcp/src/mistralai/gcp/client/models/imagedetail.py b/packages/gcp/src/mistralai/gcp/client/models/imagedetail.py new file mode 100644 index 00000000..68ed7608 --- /dev/null +++ b/packages/gcp/src/mistralai/gcp/client/models/imagedetail.py @@ -0,0 +1,15 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.gcp.client.types import UnrecognizedStr +from typing import Literal, Union + + +ImageDetail = Union[ + Literal[ + "low", + "auto", + "high", + ], + UnrecognizedStr, +] diff --git a/packages/mistralai_azure/src/mistralai_azure/models/imageurl.py b/packages/gcp/src/mistralai/gcp/client/models/imageurl.py similarity index 88% rename from packages/mistralai_azure/src/mistralai_azure/models/imageurl.py rename to packages/gcp/src/mistralai/gcp/client/models/imageurl.py index a5a66360..d4f298f1 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/imageurl.py +++ b/packages/gcp/src/mistralai/gcp/client/models/imageurl.py @@ -1,7 +1,8 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from __future__ import annotations -from mistralai_azure.types import ( +from .imagedetail import ImageDetail +from mistralai.gcp.client.types import ( BaseModel, Nullable, OptionalNullable, @@ -14,13 +15,13 @@ class ImageURLTypedDict(TypedDict): url: str - detail: NotRequired[Nullable[str]] + detail: NotRequired[Nullable[ImageDetail]] class ImageURL(BaseModel): url: str - detail: OptionalNullable[str] = UNSET + detail: OptionalNullable[ImageDetail] = UNSET @model_serializer(mode="wrap") def serialize_model(self, handler): diff --git a/packages/gcp/src/mistralai/gcp/client/models/imageurlchunk.py b/packages/gcp/src/mistralai/gcp/client/models/imageurlchunk.py new file mode 100644 index 00000000..fc5284c1 --- /dev/null +++ b/packages/gcp/src/mistralai/gcp/client/models/imageurlchunk.py @@ -0,0 +1,36 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from .imageurl import ImageURL, ImageURLTypedDict +from mistralai.gcp.client.types import BaseModel +from mistralai.gcp.client.utils import validate_const +import pydantic +from pydantic.functional_validators import AfterValidator +from typing import Literal, Union +from typing_extensions import Annotated, TypeAliasType, TypedDict + + +ImageURLUnionTypedDict = TypeAliasType( + "ImageURLUnionTypedDict", Union[ImageURLTypedDict, str] +) + + +ImageURLUnion = TypeAliasType("ImageURLUnion", Union[ImageURL, str]) + + +class ImageURLChunkTypedDict(TypedDict): + r"""{\"type\":\"image_url\",\"image_url\":{\"url\":\"data:image/png;base64,iVBORw0""" + + image_url: ImageURLUnionTypedDict + type: Literal["image_url"] + + +class ImageURLChunk(BaseModel): + r"""{\"type\":\"image_url\",\"image_url\":{\"url\":\"data:image/png;base64,iVBORw0""" + + image_url: ImageURLUnion + + TYPE: Annotated[ + Annotated[Literal["image_url"], AfterValidator(validate_const("image_url"))], + pydantic.Field(alias="type"), + ] = "image_url" diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/jsonschema.py b/packages/gcp/src/mistralai/gcp/client/models/jsonschema.py similarity index 97% rename from packages/mistralai_gcp/src/mistralai_gcp/models/jsonschema.py rename to packages/gcp/src/mistralai/gcp/client/models/jsonschema.py index 26914b2f..443c429d 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/jsonschema.py +++ b/packages/gcp/src/mistralai/gcp/client/models/jsonschema.py @@ -1,7 +1,7 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from __future__ import annotations -from mistralai_gcp.types import ( +from mistralai.gcp.client.types import ( BaseModel, Nullable, OptionalNullable, diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/mistralgcperror.py b/packages/gcp/src/mistralai/gcp/client/models/mistralgcperror.py similarity index 96% rename from packages/mistralai_gcp/src/mistralai_gcp/models/mistralgcperror.py rename to packages/gcp/src/mistralai/gcp/client/models/mistralgcperror.py index fec729a5..9de91bf2 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/mistralgcperror.py +++ b/packages/gcp/src/mistralai/gcp/client/models/mistralgcperror.py @@ -6,7 +6,7 @@ @dataclass(unsafe_hash=True) -class MistralGcpError(Exception): +class MistralGCPError(Exception): """The base class for all HTTP error responses.""" message: str diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/mistralpromptmode.py b/packages/gcp/src/mistralai/gcp/client/models/mistralpromptmode.py similarity index 89% rename from packages/mistralai_gcp/src/mistralai_gcp/models/mistralpromptmode.py rename to packages/gcp/src/mistralai/gcp/client/models/mistralpromptmode.py index a5cc534f..c765e4f1 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/mistralpromptmode.py +++ b/packages/gcp/src/mistralai/gcp/client/models/mistralpromptmode.py @@ -1,7 +1,7 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from __future__ import annotations -from mistralai_gcp.types import UnrecognizedStr +from mistralai.gcp.client.types import UnrecognizedStr from typing import Literal, Union diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/no_response_error.py b/packages/gcp/src/mistralai/gcp/client/models/no_response_error.py similarity index 100% rename from packages/mistralai_gcp/src/mistralai_gcp/models/no_response_error.py rename to packages/gcp/src/mistralai/gcp/client/models/no_response_error.py diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/prediction.py b/packages/gcp/src/mistralai/gcp/client/models/prediction.py similarity index 89% rename from packages/mistralai_gcp/src/mistralai_gcp/models/prediction.py rename to packages/gcp/src/mistralai/gcp/client/models/prediction.py index 36c87ab0..f53579ed 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/prediction.py +++ b/packages/gcp/src/mistralai/gcp/client/models/prediction.py @@ -1,8 +1,8 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from __future__ import annotations -from mistralai_gcp.types import BaseModel -from mistralai_gcp.utils import validate_const +from mistralai.gcp.client.types import BaseModel +from mistralai.gcp.client.utils import validate_const import pydantic from pydantic.functional_validators import AfterValidator from typing import Literal, Optional diff --git a/packages/gcp/src/mistralai/gcp/client/models/referencechunk.py b/packages/gcp/src/mistralai/gcp/client/models/referencechunk.py new file mode 100644 index 00000000..274ea7f7 --- /dev/null +++ b/packages/gcp/src/mistralai/gcp/client/models/referencechunk.py @@ -0,0 +1,25 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.gcp.client.types import BaseModel +from mistralai.gcp.client.utils import validate_const +import pydantic +from pydantic.functional_validators import AfterValidator +from typing import List, Literal, Optional +from typing_extensions import Annotated, TypedDict + + +class ReferenceChunkTypedDict(TypedDict): + reference_ids: List[int] + type: Literal["reference"] + + +class ReferenceChunk(BaseModel): + reference_ids: List[int] + + TYPE: Annotated[ + Annotated[ + Optional[Literal["reference"]], AfterValidator(validate_const("reference")) + ], + pydantic.Field(alias="type"), + ] = "reference" diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/responseformat.py b/packages/gcp/src/mistralai/gcp/client/models/responseformat.py similarity index 98% rename from packages/mistralai_gcp/src/mistralai_gcp/models/responseformat.py rename to packages/gcp/src/mistralai/gcp/client/models/responseformat.py index 9fe5116c..34ae6b03 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/responseformat.py +++ b/packages/gcp/src/mistralai/gcp/client/models/responseformat.py @@ -3,7 +3,7 @@ from __future__ import annotations from .jsonschema import JSONSchema, JSONSchemaTypedDict from .responseformats import ResponseFormats -from mistralai_gcp.types import ( +from mistralai.gcp.client.types import ( BaseModel, Nullable, OptionalNullable, diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/responseformats.py b/packages/gcp/src/mistralai/gcp/client/models/responseformats.py similarity index 100% rename from packages/mistralai_gcp/src/mistralai_gcp/models/responseformats.py rename to packages/gcp/src/mistralai/gcp/client/models/responseformats.py diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/responsevalidationerror.py b/packages/gcp/src/mistralai/gcp/client/models/responsevalidationerror.py similarity index 86% rename from packages/mistralai_gcp/src/mistralai_gcp/models/responsevalidationerror.py rename to packages/gcp/src/mistralai/gcp/client/models/responsevalidationerror.py index ebd4f214..0e86ea6c 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/responsevalidationerror.py +++ b/packages/gcp/src/mistralai/gcp/client/models/responsevalidationerror.py @@ -4,11 +4,11 @@ from typing import Optional from dataclasses import dataclass -from mistralai_gcp.models import MistralGcpError +from mistralai.gcp.client.models import MistralGCPError @dataclass(unsafe_hash=True) -class ResponseValidationError(MistralGcpError): +class ResponseValidationError(MistralGCPError): """Error raised when there is a type mismatch between the response data and the expected Pydantic model.""" def __init__( diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/sdkerror.py b/packages/gcp/src/mistralai/gcp/client/models/sdkerror.py similarity index 93% rename from packages/mistralai_gcp/src/mistralai_gcp/models/sdkerror.py rename to packages/gcp/src/mistralai/gcp/client/models/sdkerror.py index 7f53bbcd..00bc1d99 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/sdkerror.py +++ b/packages/gcp/src/mistralai/gcp/client/models/sdkerror.py @@ -4,13 +4,13 @@ from typing import Optional from dataclasses import dataclass -from mistralai_gcp.models import MistralGcpError +from mistralai.gcp.client.models import MistralGCPError MAX_MESSAGE_LEN = 10_000 @dataclass(unsafe_hash=True) -class SDKError(MistralGcpError): +class SDKError(MistralGCPError): """The fallback error class if no more specific error class is matched.""" def __init__( diff --git a/packages/mistralai_azure/src/mistralai_azure/models/security.py b/packages/gcp/src/mistralai/gcp/client/models/security.py similarity index 81% rename from packages/mistralai_azure/src/mistralai_azure/models/security.py rename to packages/gcp/src/mistralai/gcp/client/models/security.py index c1ae8313..10a469b5 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/security.py +++ b/packages/gcp/src/mistralai/gcp/client/models/security.py @@ -1,8 +1,8 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from __future__ import annotations -from mistralai_azure.types import BaseModel -from mistralai_azure.utils import FieldMetadata, SecurityMetadata +from mistralai.gcp.client.types import BaseModel +from mistralai.gcp.client.utils import FieldMetadata, SecurityMetadata from typing_extensions import Annotated, TypedDict diff --git a/packages/mistralai_azure/src/mistralai_azure/models/systemmessage.py b/packages/gcp/src/mistralai/gcp/client/models/systemmessage.py similarity index 57% rename from packages/mistralai_azure/src/mistralai_azure/models/systemmessage.py rename to packages/gcp/src/mistralai/gcp/client/models/systemmessage.py index f99bf4ff..a7d695a7 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/systemmessage.py +++ b/packages/gcp/src/mistralai/gcp/client/models/systemmessage.py @@ -5,9 +5,12 @@ SystemMessageContentChunks, SystemMessageContentChunksTypedDict, ) -from mistralai_azure.types import BaseModel -from typing import List, Literal, Optional, Union -from typing_extensions import NotRequired, TypeAliasType, TypedDict +from mistralai.gcp.client.types import BaseModel +from mistralai.gcp.client.utils import validate_const +import pydantic +from pydantic.functional_validators import AfterValidator +from typing import List, Literal, Union +from typing_extensions import Annotated, TypeAliasType, TypedDict SystemMessageContentTypedDict = TypeAliasType( @@ -21,15 +24,15 @@ ) -Role = Literal["system",] - - class SystemMessageTypedDict(TypedDict): content: SystemMessageContentTypedDict - role: NotRequired[Role] + role: Literal["system"] class SystemMessage(BaseModel): content: SystemMessageContent - role: Optional[Role] = "system" + ROLE: Annotated[ + Annotated[Literal["system"], AfterValidator(validate_const("system"))], + pydantic.Field(alias="role"), + ] = "system" diff --git a/packages/mistralai_azure/src/mistralai_azure/models/systemmessagecontentchunks.py b/packages/gcp/src/mistralai/gcp/client/models/systemmessagecontentchunks.py similarity index 66% rename from packages/mistralai_azure/src/mistralai_azure/models/systemmessagecontentchunks.py rename to packages/gcp/src/mistralai/gcp/client/models/systemmessagecontentchunks.py index 4615a16c..225f38b7 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/systemmessagecontentchunks.py +++ b/packages/gcp/src/mistralai/gcp/client/models/systemmessagecontentchunks.py @@ -3,8 +3,7 @@ from __future__ import annotations from .textchunk import TextChunk, TextChunkTypedDict from .thinkchunk import ThinkChunk, ThinkChunkTypedDict -from mistralai_azure.utils import get_discriminator -from pydantic import Discriminator, Tag +from pydantic import Field from typing import Union from typing_extensions import Annotated, TypeAliasType @@ -16,6 +15,5 @@ SystemMessageContentChunks = Annotated[ - Union[Annotated[TextChunk, Tag("text")], Annotated[ThinkChunk, Tag("thinking")]], - Discriminator(lambda m: get_discriminator(m, "type", "type")), + Union[TextChunk, ThinkChunk], Field(discriminator="TYPE") ] diff --git a/packages/gcp/src/mistralai/gcp/client/models/textchunk.py b/packages/gcp/src/mistralai/gcp/client/models/textchunk.py new file mode 100644 index 00000000..77576c9f --- /dev/null +++ b/packages/gcp/src/mistralai/gcp/client/models/textchunk.py @@ -0,0 +1,23 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.gcp.client.types import BaseModel +from mistralai.gcp.client.utils import validate_const +import pydantic +from pydantic.functional_validators import AfterValidator +from typing import Literal +from typing_extensions import Annotated, TypedDict + + +class TextChunkTypedDict(TypedDict): + text: str + type: Literal["text"] + + +class TextChunk(BaseModel): + text: str + + TYPE: Annotated[ + Annotated[Literal["text"], AfterValidator(validate_const("text"))], + pydantic.Field(alias="type"), + ] = "text" diff --git a/packages/mistralai_azure/src/mistralai_azure/models/thinkchunk.py b/packages/gcp/src/mistralai/gcp/client/models/thinkchunk.py similarity index 65% rename from packages/mistralai_azure/src/mistralai_azure/models/thinkchunk.py rename to packages/gcp/src/mistralai/gcp/client/models/thinkchunk.py index f53a9f1a..b65fffb2 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/thinkchunk.py +++ b/packages/gcp/src/mistralai/gcp/client/models/thinkchunk.py @@ -3,9 +3,12 @@ from __future__ import annotations from .referencechunk import ReferenceChunk, ReferenceChunkTypedDict from .textchunk import TextChunk, TextChunkTypedDict -from mistralai_azure.types import BaseModel +from mistralai.gcp.client.types import BaseModel +from mistralai.gcp.client.utils import validate_const +import pydantic +from pydantic.functional_validators import AfterValidator from typing import List, Literal, Optional, Union -from typing_extensions import NotRequired, TypeAliasType, TypedDict +from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict ThinkingTypedDict = TypeAliasType( @@ -16,20 +19,20 @@ Thinking = TypeAliasType("Thinking", Union[ReferenceChunk, TextChunk]) -ThinkChunkType = Literal["thinking",] - - class ThinkChunkTypedDict(TypedDict): thinking: List[ThinkingTypedDict] + type: Literal["thinking"] closed: NotRequired[bool] r"""Whether the thinking chunk is closed or not. Currently only used for prefixing.""" - type: NotRequired[ThinkChunkType] class ThinkChunk(BaseModel): thinking: List[Thinking] + TYPE: Annotated[ + Annotated[Literal["thinking"], AfterValidator(validate_const("thinking"))], + pydantic.Field(alias="type"), + ] = "thinking" + closed: Optional[bool] = None r"""Whether the thinking chunk is closed or not. Currently only used for prefixing.""" - - type: Optional[ThinkChunkType] = "thinking" diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/tool.py b/packages/gcp/src/mistralai/gcp/client/models/tool.py similarity index 90% rename from packages/mistralai_gcp/src/mistralai_gcp/models/tool.py rename to packages/gcp/src/mistralai/gcp/client/models/tool.py index 800de633..d09c6854 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/tool.py +++ b/packages/gcp/src/mistralai/gcp/client/models/tool.py @@ -3,7 +3,7 @@ from __future__ import annotations from .function import Function, FunctionTypedDict from .tooltypes import ToolTypes -from mistralai_gcp.types import BaseModel +from mistralai.gcp.client.types import BaseModel from typing import Optional from typing_extensions import NotRequired, TypedDict diff --git a/packages/mistralai_azure/src/mistralai_azure/models/toolcall.py b/packages/gcp/src/mistralai/gcp/client/models/toolcall.py similarity index 92% rename from packages/mistralai_azure/src/mistralai_azure/models/toolcall.py rename to packages/gcp/src/mistralai/gcp/client/models/toolcall.py index 44fe8ec8..a1edf337 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/toolcall.py +++ b/packages/gcp/src/mistralai/gcp/client/models/toolcall.py @@ -3,7 +3,7 @@ from __future__ import annotations from .functioncall import FunctionCall, FunctionCallTypedDict from .tooltypes import ToolTypes -from mistralai_azure.types import BaseModel +from mistralai.gcp.client.types import BaseModel from typing import Optional from typing_extensions import NotRequired, TypedDict diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/toolchoice.py b/packages/gcp/src/mistralai/gcp/client/models/toolchoice.py similarity index 94% rename from packages/mistralai_gcp/src/mistralai_gcp/models/toolchoice.py rename to packages/gcp/src/mistralai/gcp/client/models/toolchoice.py index 4a148330..de3828da 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/toolchoice.py +++ b/packages/gcp/src/mistralai/gcp/client/models/toolchoice.py @@ -3,7 +3,7 @@ from __future__ import annotations from .functionname import FunctionName, FunctionNameTypedDict from .tooltypes import ToolTypes -from mistralai_gcp.types import BaseModel +from mistralai.gcp.client.types import BaseModel from typing import Optional from typing_extensions import NotRequired, TypedDict diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/toolchoiceenum.py b/packages/gcp/src/mistralai/gcp/client/models/toolchoiceenum.py similarity index 100% rename from packages/mistralai_gcp/src/mistralai_gcp/models/toolchoiceenum.py rename to packages/gcp/src/mistralai/gcp/client/models/toolchoiceenum.py diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/toolmessage.py b/packages/gcp/src/mistralai/gcp/client/models/toolmessage.py similarity index 77% rename from packages/mistralai_gcp/src/mistralai_gcp/models/toolmessage.py rename to packages/gcp/src/mistralai/gcp/client/models/toolmessage.py index d6aa2621..65b1d9d6 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/toolmessage.py +++ b/packages/gcp/src/mistralai/gcp/client/models/toolmessage.py @@ -2,16 +2,19 @@ from __future__ import annotations from .contentchunk import ContentChunk, ContentChunkTypedDict -from mistralai_gcp.types import ( +from mistralai.gcp.client.types import ( BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL, ) +from mistralai.gcp.client.utils import validate_const +import pydantic from pydantic import model_serializer -from typing import List, Literal, Optional, Union -from typing_extensions import NotRequired, TypeAliasType, TypedDict +from pydantic.functional_validators import AfterValidator +from typing import List, Literal, Union +from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict ToolMessageContentTypedDict = TypeAliasType( @@ -22,28 +25,28 @@ ToolMessageContent = TypeAliasType("ToolMessageContent", Union[str, List[ContentChunk]]) -ToolMessageRole = Literal["tool",] - - class ToolMessageTypedDict(TypedDict): content: Nullable[ToolMessageContentTypedDict] + role: Literal["tool"] tool_call_id: NotRequired[Nullable[str]] name: NotRequired[Nullable[str]] - role: NotRequired[ToolMessageRole] class ToolMessage(BaseModel): content: Nullable[ToolMessageContent] + ROLE: Annotated[ + Annotated[Literal["tool"], AfterValidator(validate_const("tool"))], + pydantic.Field(alias="role"), + ] = "tool" + tool_call_id: OptionalNullable[str] = UNSET name: OptionalNullable[str] = UNSET - role: Optional[ToolMessageRole] = "tool" - @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = ["tool_call_id", "name", "role"] + optional_fields = ["tool_call_id", "name"] nullable_fields = ["content", "tool_call_id", "name"] null_default_fields = [] diff --git a/packages/mistralai_azure/src/mistralai_azure/models/tooltypes.py b/packages/gcp/src/mistralai/gcp/client/models/tooltypes.py similarity index 78% rename from packages/mistralai_azure/src/mistralai_azure/models/tooltypes.py rename to packages/gcp/src/mistralai/gcp/client/models/tooltypes.py index 638890c5..fd1aa13d 100644 --- a/packages/mistralai_azure/src/mistralai_azure/models/tooltypes.py +++ b/packages/gcp/src/mistralai/gcp/client/models/tooltypes.py @@ -1,7 +1,7 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from __future__ import annotations -from mistralai_azure.types import UnrecognizedStr +from mistralai.gcp.client.types import UnrecognizedStr from typing import Literal, Union diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/usageinfo.py b/packages/gcp/src/mistralai/gcp/client/models/usageinfo.py similarity index 98% rename from packages/mistralai_gcp/src/mistralai_gcp/models/usageinfo.py rename to packages/gcp/src/mistralai/gcp/client/models/usageinfo.py index 59f36158..9b7207b1 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/usageinfo.py +++ b/packages/gcp/src/mistralai/gcp/client/models/usageinfo.py @@ -1,7 +1,7 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from __future__ import annotations -from mistralai_gcp.types import ( +from mistralai.gcp.client.types import ( BaseModel, Nullable, OptionalNullable, diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/usermessage.py b/packages/gcp/src/mistralai/gcp/client/models/usermessage.py similarity index 73% rename from packages/mistralai_gcp/src/mistralai_gcp/models/usermessage.py rename to packages/gcp/src/mistralai/gcp/client/models/usermessage.py index 0168b452..c083e16d 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/usermessage.py +++ b/packages/gcp/src/mistralai/gcp/client/models/usermessage.py @@ -2,10 +2,13 @@ from __future__ import annotations from .contentchunk import ContentChunk, ContentChunkTypedDict -from mistralai_gcp.types import BaseModel, Nullable, UNSET_SENTINEL +from mistralai.gcp.client.types import BaseModel, Nullable, UNSET_SENTINEL +from mistralai.gcp.client.utils import validate_const +import pydantic from pydantic import model_serializer -from typing import List, Literal, Optional, Union -from typing_extensions import NotRequired, TypeAliasType, TypedDict +from pydantic.functional_validators import AfterValidator +from typing import List, Literal, Union +from typing_extensions import Annotated, TypeAliasType, TypedDict UserMessageContentTypedDict = TypeAliasType( @@ -16,22 +19,22 @@ UserMessageContent = TypeAliasType("UserMessageContent", Union[str, List[ContentChunk]]) -UserMessageRole = Literal["user",] - - class UserMessageTypedDict(TypedDict): content: Nullable[UserMessageContentTypedDict] - role: NotRequired[UserMessageRole] + role: Literal["user"] class UserMessage(BaseModel): content: Nullable[UserMessageContent] - role: Optional[UserMessageRole] = "user" + ROLE: Annotated[ + Annotated[Literal["user"], AfterValidator(validate_const("user"))], + pydantic.Field(alias="role"), + ] = "user" @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = ["role"] + optional_fields = [] nullable_fields = ["content"] null_default_fields = [] diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/validationerror.py b/packages/gcp/src/mistralai/gcp/client/models/validationerror.py similarity index 90% rename from packages/mistralai_gcp/src/mistralai_gcp/models/validationerror.py rename to packages/gcp/src/mistralai/gcp/client/models/validationerror.py index 033d4b63..2d330e9a 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/validationerror.py +++ b/packages/gcp/src/mistralai/gcp/client/models/validationerror.py @@ -1,7 +1,7 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from __future__ import annotations -from mistralai_gcp.types import BaseModel +from mistralai.gcp.client.types import BaseModel from typing import List, Union from typing_extensions import TypeAliasType, TypedDict diff --git a/packages/mistralai_gcp/src/mistralai_gcp/py.typed b/packages/gcp/src/mistralai/gcp/client/py.typed similarity index 100% rename from packages/mistralai_gcp/src/mistralai_gcp/py.typed rename to packages/gcp/src/mistralai/gcp/client/py.typed diff --git a/packages/gcp/src/mistralai/gcp/client/sdk.py b/packages/gcp/src/mistralai/gcp/client/sdk.py new file mode 100644 index 00000000..e6e83839 --- /dev/null +++ b/packages/gcp/src/mistralai/gcp/client/sdk.py @@ -0,0 +1,243 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from .basesdk import BaseSDK +from .httpclient import AsyncHttpClient, ClientOwner, HttpClient, close_clients +from .sdkconfiguration import SDKConfiguration +from .utils.logger import Logger, get_default_logger +from .utils.retries import RetryConfig +import google.auth +import google.auth.credentials +import google.auth.transport.requests +import httpx +import importlib +from mistralai.gcp.client import models, utils +from mistralai.gcp.client._hooks import SDKHooks +from mistralai.gcp.client._hooks.registration import GCPVertexAIPathHook +from mistralai.gcp.client.types import OptionalNullable, UNSET +import sys +from typing import Callable, Dict, Optional, TYPE_CHECKING, cast +import weakref + +if TYPE_CHECKING: + from mistralai.gcp.client.chat import Chat + from mistralai.gcp.client.fim import Fim + + +class MistralGCP(BaseSDK): + r"""Mistral AI API: Dora OpenAPI schema + + Our Chat Completion and Embeddings APIs specification. Create your account on [La Plateforme](https://console.mistral.ai) to get access and read the [docs](https://docs.mistral.ai) to learn how to use it. + """ + + chat: "Chat" + r"""Chat Completion API.""" + fim: "Fim" + r"""Fill-in-the-middle API.""" + _sub_sdk_map = { + "chat": ("mistralai.gcp.client.chat", "Chat"), + "fim": ("mistralai.gcp.client.fim", "Fim"), + } + + def __init__( + self, + project_id: Optional[str] = None, + region: str = "europe-west4", + access_token: Optional[str] = None, + server: Optional[str] = None, + server_url: Optional[str] = None, + url_params: Optional[Dict[str, str]] = None, + client: Optional[HttpClient] = None, + async_client: Optional[AsyncHttpClient] = None, + retry_config: OptionalNullable[RetryConfig] = UNSET, + timeout_ms: Optional[int] = None, + debug_logger: Optional[Logger] = None, + ) -> None: + r"""Instantiates the SDK configuring it with the provided parameters. + + :param project_id: GCP project ID (auto-detected from credentials if not provided) + :param region: GCP region for Vertex AI (default: europe-west4) + :param access_token: Fixed access token for testing (skips google.auth) + :param server: The server by name to use for all methods + :param server_url: The server URL to use for all methods + :param url_params: Parameters to optionally template the server URL with + :param client: The HTTP client to use for all synchronous methods + :param async_client: The Async HTTP client to use for all asynchronous methods + :param retry_config: The retry configuration to use for all supported methods + :param timeout_ms: Optional request timeout applied to each operation in milliseconds + """ + credentials: Optional[google.auth.credentials.Credentials] = None + if access_token is None: + creds, detected_project_id = google.auth.default( + scopes=["https://www.googleapis.com/auth/cloud-platform"], + ) + if creds is None: + raise ValueError("Failed to obtain GCP credentials") + # Cast to Credentials base class which has refresh() and token + creds = cast(google.auth.credentials.Credentials, creds) + creds.refresh(google.auth.transport.requests.Request()) + credentials = creds + project_id = project_id or detected_project_id + + if project_id is None: + raise ValueError( + "project_id must be provided or available from default credentials" + ) + + self._credentials = credentials + self._project_id = project_id + self._region = region + self._fixed_access_token = access_token + + def get_auth_token() -> str: + if self._fixed_access_token: + return self._fixed_access_token + creds = self._credentials + if creds is None: + raise ValueError("No credentials available") + # Only refresh when the token is expired or missing. + # This avoids a blocking HTTP round-trip on every request and + # minimises event-loop blocking when called from async paths + # (the Speakeasy-generated basesdk always calls security + # callables synchronously). + if not creds.valid: + creds.refresh(google.auth.transport.requests.Request()) + token = creds.token + if token is None: + raise ValueError("Failed to obtain access token") + return token + + if server_url is None: + server_url = f"https://{region}-aiplatform.googleapis.com" + + client_supplied = True + if client is None: + client = httpx.Client(follow_redirects=True) + client_supplied = False + + assert issubclass( + type(client), HttpClient + ), "The provided client must implement the HttpClient protocol." + + async_client_supplied = True + if async_client is None: + async_client = httpx.AsyncClient(follow_redirects=True) + async_client_supplied = False + + if debug_logger is None: + debug_logger = get_default_logger() + + assert issubclass( + type(async_client), AsyncHttpClient + ), "The provided async_client must implement the AsyncHttpClient protocol." + + def get_security() -> models.Security: + return models.Security(api_key=get_auth_token()) + + security: Callable[[], models.Security] = get_security + + if url_params is not None: + server_url = utils.template_url(server_url, url_params) + + BaseSDK.__init__( + self, + SDKConfiguration( + client=client, + client_supplied=client_supplied, + async_client=async_client, + async_client_supplied=async_client_supplied, + security=security, + server_url=server_url, + server=server, + retry_config=retry_config, + timeout_ms=timeout_ms, + debug_logger=debug_logger, + ), + parent_ref=self, + ) + + hooks = SDKHooks() + self.sdk_configuration.__dict__["_hooks"] = hooks + + # Register hook that builds Vertex AI URL path + hooks.register_before_request_hook(GCPVertexAIPathHook(project_id, region)) + + current_server_url, *_ = self.sdk_configuration.get_server_details() + server_url, self.sdk_configuration.client = hooks.sdk_init( + current_server_url, client + ) + if current_server_url != server_url: + self.sdk_configuration.server_url = server_url + + weakref.finalize( + self, + close_clients, + cast(ClientOwner, self.sdk_configuration), + self.sdk_configuration.client, + self.sdk_configuration.client_supplied, + self.sdk_configuration.async_client, + self.sdk_configuration.async_client_supplied, + ) + + def dynamic_import(self, modname, retries=3): + last_exc: Optional[Exception] = None + for attempt in range(retries): + try: + return importlib.import_module(modname) + except (KeyError, ImportError, ModuleNotFoundError) as e: + last_exc = e + # Clear any half-initialized module and retry + sys.modules.pop(modname, None) + if attempt == retries - 1: + break + raise ImportError( + f"Failed to import module '{modname}' after {retries} attempts" + ) from last_exc + + def __getattr__(self, name: str): + if name in self._sub_sdk_map: + module_path, class_name = self._sub_sdk_map[name] + try: + module = self.dynamic_import(module_path) + klass = getattr(module, class_name) + instance = klass(self.sdk_configuration, parent_ref=self) + setattr(self, name, instance) + return instance + except ImportError as e: + raise AttributeError( + f"Failed to import module {module_path} for attribute {name}: {e}" + ) from e + except AttributeError as e: + raise AttributeError( + f"Failed to find class {class_name} in module {module_path} for attribute {name}: {e}" + ) from e + + raise AttributeError( + f"'{type(self).__name__}' object has no attribute '{name}'" + ) + + def __dir__(self): + default_attrs = list(super().__dir__()) + lazy_attrs = list(self._sub_sdk_map.keys()) + return sorted(list(set(default_attrs + lazy_attrs))) + + def __enter__(self): + return self + + async def __aenter__(self): + return self + + def __exit__(self, _exc_type, _exc_val, _exc_tb): + if ( + self.sdk_configuration.client is not None + and not self.sdk_configuration.client_supplied + ): + self.sdk_configuration.client.close() + self.sdk_configuration.client = None + + async def __aexit__(self, _exc_type, _exc_val, _exc_tb): + if ( + self.sdk_configuration.async_client is not None + and not self.sdk_configuration.async_client_supplied + ): + await self.sdk_configuration.async_client.aclose() + self.sdk_configuration.async_client = None diff --git a/packages/mistralai_azure/src/mistralai_azure/sdkconfiguration.py b/packages/gcp/src/mistralai/gcp/client/sdkconfiguration.py similarity index 94% rename from packages/mistralai_azure/src/mistralai_azure/sdkconfiguration.py rename to packages/gcp/src/mistralai/gcp/client/sdkconfiguration.py index 51289cf0..d56a634f 100644 --- a/packages/mistralai_azure/src/mistralai_azure/sdkconfiguration.py +++ b/packages/gcp/src/mistralai/gcp/client/sdkconfiguration.py @@ -9,8 +9,8 @@ from .httpclient import AsyncHttpClient, HttpClient from .utils import Logger, RetryConfig, remove_suffix from dataclasses import dataclass -from mistralai_azure import models -from mistralai_azure.types import OptionalNullable, UNSET +from mistralai.gcp.client import models +from mistralai.gcp.client.types import OptionalNullable, UNSET from pydantic import Field from typing import Callable, Dict, Optional, Tuple, Union diff --git a/packages/mistralai_gcp/src/mistralai_gcp/types/__init__.py b/packages/gcp/src/mistralai/gcp/client/types/__init__.py similarity index 100% rename from packages/mistralai_gcp/src/mistralai_gcp/types/__init__.py rename to packages/gcp/src/mistralai/gcp/client/types/__init__.py diff --git a/packages/mistralai_gcp/src/mistralai_gcp/types/basemodel.py b/packages/gcp/src/mistralai/gcp/client/types/basemodel.py similarity index 100% rename from packages/mistralai_gcp/src/mistralai_gcp/types/basemodel.py rename to packages/gcp/src/mistralai/gcp/client/types/basemodel.py diff --git a/packages/mistralai_gcp/src/mistralai_gcp/utils/__init__.py b/packages/gcp/src/mistralai/gcp/client/utils/__init__.py similarity index 100% rename from packages/mistralai_gcp/src/mistralai_gcp/utils/__init__.py rename to packages/gcp/src/mistralai/gcp/client/utils/__init__.py diff --git a/packages/mistralai_gcp/src/mistralai_gcp/utils/annotations.py b/packages/gcp/src/mistralai/gcp/client/utils/annotations.py similarity index 100% rename from packages/mistralai_gcp/src/mistralai_gcp/utils/annotations.py rename to packages/gcp/src/mistralai/gcp/client/utils/annotations.py diff --git a/packages/mistralai_gcp/src/mistralai_gcp/utils/datetimes.py b/packages/gcp/src/mistralai/gcp/client/utils/datetimes.py similarity index 100% rename from packages/mistralai_gcp/src/mistralai_gcp/utils/datetimes.py rename to packages/gcp/src/mistralai/gcp/client/utils/datetimes.py diff --git a/packages/mistralai_gcp/src/mistralai_gcp/utils/enums.py b/packages/gcp/src/mistralai/gcp/client/utils/enums.py similarity index 100% rename from packages/mistralai_gcp/src/mistralai_gcp/utils/enums.py rename to packages/gcp/src/mistralai/gcp/client/utils/enums.py diff --git a/packages/mistralai_gcp/src/mistralai_gcp/utils/eventstreaming.py b/packages/gcp/src/mistralai/gcp/client/utils/eventstreaming.py similarity index 100% rename from packages/mistralai_gcp/src/mistralai_gcp/utils/eventstreaming.py rename to packages/gcp/src/mistralai/gcp/client/utils/eventstreaming.py diff --git a/packages/mistralai_gcp/src/mistralai_gcp/utils/forms.py b/packages/gcp/src/mistralai/gcp/client/utils/forms.py similarity index 100% rename from packages/mistralai_gcp/src/mistralai_gcp/utils/forms.py rename to packages/gcp/src/mistralai/gcp/client/utils/forms.py diff --git a/packages/mistralai_gcp/src/mistralai_gcp/utils/headers.py b/packages/gcp/src/mistralai/gcp/client/utils/headers.py similarity index 100% rename from packages/mistralai_gcp/src/mistralai_gcp/utils/headers.py rename to packages/gcp/src/mistralai/gcp/client/utils/headers.py diff --git a/packages/mistralai_gcp/src/mistralai_gcp/utils/logger.py b/packages/gcp/src/mistralai/gcp/client/utils/logger.py similarity index 100% rename from packages/mistralai_gcp/src/mistralai_gcp/utils/logger.py rename to packages/gcp/src/mistralai/gcp/client/utils/logger.py diff --git a/packages/mistralai_gcp/src/mistralai_gcp/utils/metadata.py b/packages/gcp/src/mistralai/gcp/client/utils/metadata.py similarity index 100% rename from packages/mistralai_gcp/src/mistralai_gcp/utils/metadata.py rename to packages/gcp/src/mistralai/gcp/client/utils/metadata.py diff --git a/packages/mistralai_gcp/src/mistralai_gcp/utils/queryparams.py b/packages/gcp/src/mistralai/gcp/client/utils/queryparams.py similarity index 100% rename from packages/mistralai_gcp/src/mistralai_gcp/utils/queryparams.py rename to packages/gcp/src/mistralai/gcp/client/utils/queryparams.py diff --git a/packages/mistralai_gcp/src/mistralai_gcp/utils/requestbodies.py b/packages/gcp/src/mistralai/gcp/client/utils/requestbodies.py similarity index 100% rename from packages/mistralai_gcp/src/mistralai_gcp/utils/requestbodies.py rename to packages/gcp/src/mistralai/gcp/client/utils/requestbodies.py diff --git a/packages/mistralai_gcp/src/mistralai_gcp/utils/retries.py b/packages/gcp/src/mistralai/gcp/client/utils/retries.py similarity index 100% rename from packages/mistralai_gcp/src/mistralai_gcp/utils/retries.py rename to packages/gcp/src/mistralai/gcp/client/utils/retries.py diff --git a/packages/mistralai_gcp/src/mistralai_gcp/utils/security.py b/packages/gcp/src/mistralai/gcp/client/utils/security.py similarity index 100% rename from packages/mistralai_gcp/src/mistralai_gcp/utils/security.py rename to packages/gcp/src/mistralai/gcp/client/utils/security.py diff --git a/packages/mistralai_gcp/src/mistralai_gcp/utils/serializers.py b/packages/gcp/src/mistralai/gcp/client/utils/serializers.py similarity index 100% rename from packages/mistralai_gcp/src/mistralai_gcp/utils/serializers.py rename to packages/gcp/src/mistralai/gcp/client/utils/serializers.py diff --git a/packages/mistralai_gcp/src/mistralai_gcp/utils/unmarshal_json_response.py b/packages/gcp/src/mistralai/gcp/client/utils/unmarshal_json_response.py similarity index 95% rename from packages/mistralai_gcp/src/mistralai_gcp/utils/unmarshal_json_response.py rename to packages/gcp/src/mistralai/gcp/client/utils/unmarshal_json_response.py index c168a293..83e8275e 100644 --- a/packages/mistralai_gcp/src/mistralai_gcp/utils/unmarshal_json_response.py +++ b/packages/gcp/src/mistralai/gcp/client/utils/unmarshal_json_response.py @@ -5,7 +5,7 @@ import httpx from .serializers import unmarshal_json -from mistralai_gcp import models +from mistralai.gcp.client import models T = TypeVar("T") diff --git a/packages/mistralai_gcp/src/mistralai_gcp/utils/url.py b/packages/gcp/src/mistralai/gcp/client/utils/url.py similarity index 100% rename from packages/mistralai_gcp/src/mistralai_gcp/utils/url.py rename to packages/gcp/src/mistralai/gcp/client/utils/url.py diff --git a/packages/mistralai_gcp/src/mistralai_gcp/utils/values.py b/packages/gcp/src/mistralai/gcp/client/utils/values.py similarity index 100% rename from packages/mistralai_gcp/src/mistralai_gcp/utils/values.py rename to packages/gcp/src/mistralai/gcp/client/utils/values.py diff --git a/packages/mistralai_gcp/uv.lock b/packages/gcp/uv.lock similarity index 92% rename from packages/mistralai_gcp/uv.lock rename to packages/gcp/uv.lock index 4fbca724..a49757c9 100644 --- a/packages/mistralai_gcp/uv.lock +++ b/packages/gcp/uv.lock @@ -277,7 +277,7 @@ wheels = [ [[package]] name = "mistralai-gcp" -version = "1.8.0" +version = "2.0.0a4" source = { editable = "." } dependencies = [ { name = "eval-type-backport" }, @@ -304,7 +304,7 @@ requires-dist = [ { name = "eval-type-backport", specifier = ">=0.2.0" }, { name = "google-auth", specifier = ">=2.31.0,<3.0.0" }, { name = "httpx", specifier = ">=0.28.1" }, - { name = "pydantic", specifier = ">=2.10.3" }, + { name = "pydantic", specifier = ">=2.11.2" }, { name = "python-dateutil", specifier = ">=2.8.2" }, { name = "requests", specifier = ">=2.32.3,<3.0.0" }, { name = "typing-inspection", specifier = ">=0.4.0" }, @@ -312,7 +312,7 @@ requires-dist = [ [package.metadata.requires-dev] dev = [ - { name = "mypy", specifier = "==1.14.1" }, + { name = "mypy", specifier = "==1.15.0" }, { name = "pylint", specifier = "==3.2.3" }, { name = "pyright", specifier = ">=1.1.401,<2" }, { name = "pytest", specifier = ">=8.2.2,<9" }, @@ -322,40 +322,40 @@ dev = [ [[package]] name = "mypy" -version = "1.14.1" +version = "1.15.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "mypy-extensions" }, { name = "tomli", marker = "python_full_version < '3.11'" }, { name = "typing-extensions" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/b9/eb/2c92d8ea1e684440f54fa49ac5d9a5f19967b7b472a281f419e69a8d228e/mypy-1.14.1.tar.gz", hash = "sha256:7ec88144fe9b510e8475ec2f5f251992690fcf89ccb4500b214b4226abcd32d6", size = 3216051, upload-time = "2024-12-30T16:39:07.335Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/9b/7a/87ae2adb31d68402da6da1e5f30c07ea6063e9f09b5e7cfc9dfa44075e74/mypy-1.14.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:52686e37cf13d559f668aa398dd7ddf1f92c5d613e4f8cb262be2fb4fedb0fcb", size = 11211002, upload-time = "2024-12-30T16:37:22.435Z" }, - { url = "https://files.pythonhosted.org/packages/e1/23/eada4c38608b444618a132be0d199b280049ded278b24cbb9d3fc59658e4/mypy-1.14.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:1fb545ca340537d4b45d3eecdb3def05e913299ca72c290326be19b3804b39c0", size = 10358400, upload-time = "2024-12-30T16:37:53.526Z" }, - { url = "https://files.pythonhosted.org/packages/43/c9/d6785c6f66241c62fd2992b05057f404237deaad1566545e9f144ced07f5/mypy-1.14.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:90716d8b2d1f4cd503309788e51366f07c56635a3309b0f6a32547eaaa36a64d", size = 12095172, upload-time = "2024-12-30T16:37:50.332Z" }, - { url = "https://files.pythonhosted.org/packages/c3/62/daa7e787770c83c52ce2aaf1a111eae5893de9e004743f51bfcad9e487ec/mypy-1.14.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:2ae753f5c9fef278bcf12e1a564351764f2a6da579d4a81347e1d5a15819997b", size = 12828732, upload-time = "2024-12-30T16:37:29.96Z" }, - { url = "https://files.pythonhosted.org/packages/1b/a2/5fb18318a3637f29f16f4e41340b795da14f4751ef4f51c99ff39ab62e52/mypy-1.14.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:e0fe0f5feaafcb04505bcf439e991c6d8f1bf8b15f12b05feeed96e9e7bf1427", size = 13012197, upload-time = "2024-12-30T16:38:05.037Z" }, - { url = "https://files.pythonhosted.org/packages/28/99/e153ce39105d164b5f02c06c35c7ba958aaff50a2babba7d080988b03fe7/mypy-1.14.1-cp310-cp310-win_amd64.whl", hash = "sha256:7d54bd85b925e501c555a3227f3ec0cfc54ee8b6930bd6141ec872d1c572f81f", size = 9780836, upload-time = "2024-12-30T16:37:19.726Z" }, - { url = "https://files.pythonhosted.org/packages/da/11/a9422850fd506edbcdc7f6090682ecceaf1f87b9dd847f9df79942da8506/mypy-1.14.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:f995e511de847791c3b11ed90084a7a0aafdc074ab88c5a9711622fe4751138c", size = 11120432, upload-time = "2024-12-30T16:37:11.533Z" }, - { url = "https://files.pythonhosted.org/packages/b6/9e/47e450fd39078d9c02d620545b2cb37993a8a8bdf7db3652ace2f80521ca/mypy-1.14.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:d64169ec3b8461311f8ce2fd2eb5d33e2d0f2c7b49116259c51d0d96edee48d1", size = 10279515, upload-time = "2024-12-30T16:37:40.724Z" }, - { url = "https://files.pythonhosted.org/packages/01/b5/6c8d33bd0f851a7692a8bfe4ee75eb82b6983a3cf39e5e32a5d2a723f0c1/mypy-1.14.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ba24549de7b89b6381b91fbc068d798192b1b5201987070319889e93038967a8", size = 12025791, upload-time = "2024-12-30T16:36:58.73Z" }, - { url = "https://files.pythonhosted.org/packages/f0/4c/e10e2c46ea37cab5c471d0ddaaa9a434dc1d28650078ac1b56c2d7b9b2e4/mypy-1.14.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:183cf0a45457d28ff9d758730cd0210419ac27d4d3f285beda038c9083363b1f", size = 12749203, upload-time = "2024-12-30T16:37:03.741Z" }, - { url = "https://files.pythonhosted.org/packages/88/55/beacb0c69beab2153a0f57671ec07861d27d735a0faff135a494cd4f5020/mypy-1.14.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:f2a0ecc86378f45347f586e4163d1769dd81c5a223d577fe351f26b179e148b1", size = 12885900, upload-time = "2024-12-30T16:37:57.948Z" }, - { url = "https://files.pythonhosted.org/packages/a2/75/8c93ff7f315c4d086a2dfcde02f713004357d70a163eddb6c56a6a5eff40/mypy-1.14.1-cp311-cp311-win_amd64.whl", hash = "sha256:ad3301ebebec9e8ee7135d8e3109ca76c23752bac1e717bc84cd3836b4bf3eae", size = 9777869, upload-time = "2024-12-30T16:37:33.428Z" }, - { url = "https://files.pythonhosted.org/packages/43/1b/b38c079609bb4627905b74fc6a49849835acf68547ac33d8ceb707de5f52/mypy-1.14.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:30ff5ef8519bbc2e18b3b54521ec319513a26f1bba19a7582e7b1f58a6e69f14", size = 11266668, upload-time = "2024-12-30T16:38:02.211Z" }, - { url = "https://files.pythonhosted.org/packages/6b/75/2ed0d2964c1ffc9971c729f7a544e9cd34b2cdabbe2d11afd148d7838aa2/mypy-1.14.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:cb9f255c18052343c70234907e2e532bc7e55a62565d64536dbc7706a20b78b9", size = 10254060, upload-time = "2024-12-30T16:37:46.131Z" }, - { url = "https://files.pythonhosted.org/packages/a1/5f/7b8051552d4da3c51bbe8fcafffd76a6823779101a2b198d80886cd8f08e/mypy-1.14.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:8b4e3413e0bddea671012b063e27591b953d653209e7a4fa5e48759cda77ca11", size = 11933167, upload-time = "2024-12-30T16:37:43.534Z" }, - { url = "https://files.pythonhosted.org/packages/04/90/f53971d3ac39d8b68bbaab9a4c6c58c8caa4d5fd3d587d16f5927eeeabe1/mypy-1.14.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:553c293b1fbdebb6c3c4030589dab9fafb6dfa768995a453d8a5d3b23784af2e", size = 12864341, upload-time = "2024-12-30T16:37:36.249Z" }, - { url = "https://files.pythonhosted.org/packages/03/d2/8bc0aeaaf2e88c977db41583559319f1821c069e943ada2701e86d0430b7/mypy-1.14.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:fad79bfe3b65fe6a1efaed97b445c3d37f7be9fdc348bdb2d7cac75579607c89", size = 12972991, upload-time = "2024-12-30T16:37:06.743Z" }, - { url = "https://files.pythonhosted.org/packages/6f/17/07815114b903b49b0f2cf7499f1c130e5aa459411596668267535fe9243c/mypy-1.14.1-cp312-cp312-win_amd64.whl", hash = "sha256:8fa2220e54d2946e94ab6dbb3ba0a992795bd68b16dc852db33028df2b00191b", size = 9879016, upload-time = "2024-12-30T16:37:15.02Z" }, - { url = "https://files.pythonhosted.org/packages/9e/15/bb6a686901f59222275ab228453de741185f9d54fecbaacec041679496c6/mypy-1.14.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:92c3ed5afb06c3a8e188cb5da4984cab9ec9a77ba956ee419c68a388b4595255", size = 11252097, upload-time = "2024-12-30T16:37:25.144Z" }, - { url = "https://files.pythonhosted.org/packages/f8/b3/8b0f74dfd072c802b7fa368829defdf3ee1566ba74c32a2cb2403f68024c/mypy-1.14.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:dbec574648b3e25f43d23577309b16534431db4ddc09fda50841f1e34e64ed34", size = 10239728, upload-time = "2024-12-30T16:38:08.634Z" }, - { url = "https://files.pythonhosted.org/packages/c5/9b/4fd95ab20c52bb5b8c03cc49169be5905d931de17edfe4d9d2986800b52e/mypy-1.14.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:8c6d94b16d62eb3e947281aa7347d78236688e21081f11de976376cf010eb31a", size = 11924965, upload-time = "2024-12-30T16:38:12.132Z" }, - { url = "https://files.pythonhosted.org/packages/56/9d/4a236b9c57f5d8f08ed346914b3f091a62dd7e19336b2b2a0d85485f82ff/mypy-1.14.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:d4b19b03fdf54f3c5b2fa474c56b4c13c9dbfb9a2db4370ede7ec11a2c5927d9", size = 12867660, upload-time = "2024-12-30T16:38:17.342Z" }, - { url = "https://files.pythonhosted.org/packages/40/88/a61a5497e2f68d9027de2bb139c7bb9abaeb1be1584649fa9d807f80a338/mypy-1.14.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:0c911fde686394753fff899c409fd4e16e9b294c24bfd5e1ea4675deae1ac6fd", size = 12969198, upload-time = "2024-12-30T16:38:32.839Z" }, - { url = "https://files.pythonhosted.org/packages/54/da/3d6fc5d92d324701b0c23fb413c853892bfe0e1dbe06c9138037d459756b/mypy-1.14.1-cp313-cp313-win_amd64.whl", hash = "sha256:8b21525cb51671219f5307be85f7e646a153e5acc656e5cebf64bfa076c50107", size = 9885276, upload-time = "2024-12-30T16:38:20.828Z" }, - { url = "https://files.pythonhosted.org/packages/a0/b5/32dd67b69a16d088e533962e5044e51004176a9952419de0370cdaead0f8/mypy-1.14.1-py3-none-any.whl", hash = "sha256:b66a60cc4073aeb8ae00057f9c1f64d49e90f918fbcef9a977eb121da8b8f1d1", size = 2752905, upload-time = "2024-12-30T16:38:42.021Z" }, +sdist = { url = "https://files.pythonhosted.org/packages/ce/43/d5e49a86afa64bd3839ea0d5b9c7103487007d728e1293f52525d6d5486a/mypy-1.15.0.tar.gz", hash = "sha256:404534629d51d3efea5c800ee7c42b72a6554d6c400e6a79eafe15d11341fd43", size = 3239717, upload-time = "2025-02-05T03:50:34.655Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/68/f8/65a7ce8d0e09b6329ad0c8d40330d100ea343bd4dd04c4f8ae26462d0a17/mypy-1.15.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:979e4e1a006511dacf628e36fadfecbcc0160a8af6ca7dad2f5025529e082c13", size = 10738433, upload-time = "2025-02-05T03:49:29.145Z" }, + { url = "https://files.pythonhosted.org/packages/b4/95/9c0ecb8eacfe048583706249439ff52105b3f552ea9c4024166c03224270/mypy-1.15.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c4bb0e1bd29f7d34efcccd71cf733580191e9a264a2202b0239da95984c5b559", size = 9861472, upload-time = "2025-02-05T03:49:16.986Z" }, + { url = "https://files.pythonhosted.org/packages/84/09/9ec95e982e282e20c0d5407bc65031dfd0f0f8ecc66b69538296e06fcbee/mypy-1.15.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:be68172e9fd9ad8fb876c6389f16d1c1b5f100ffa779f77b1fb2176fcc9ab95b", size = 11611424, upload-time = "2025-02-05T03:49:46.908Z" }, + { url = "https://files.pythonhosted.org/packages/78/13/f7d14e55865036a1e6a0a69580c240f43bc1f37407fe9235c0d4ef25ffb0/mypy-1.15.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c7be1e46525adfa0d97681432ee9fcd61a3964c2446795714699a998d193f1a3", size = 12365450, upload-time = "2025-02-05T03:50:05.89Z" }, + { url = "https://files.pythonhosted.org/packages/48/e1/301a73852d40c241e915ac6d7bcd7fedd47d519246db2d7b86b9d7e7a0cb/mypy-1.15.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:2e2c2e6d3593f6451b18588848e66260ff62ccca522dd231cd4dd59b0160668b", size = 12551765, upload-time = "2025-02-05T03:49:33.56Z" }, + { url = "https://files.pythonhosted.org/packages/77/ba/c37bc323ae5fe7f3f15a28e06ab012cd0b7552886118943e90b15af31195/mypy-1.15.0-cp310-cp310-win_amd64.whl", hash = "sha256:6983aae8b2f653e098edb77f893f7b6aca69f6cffb19b2cc7443f23cce5f4828", size = 9274701, upload-time = "2025-02-05T03:49:38.981Z" }, + { url = "https://files.pythonhosted.org/packages/03/bc/f6339726c627bd7ca1ce0fa56c9ae2d0144604a319e0e339bdadafbbb599/mypy-1.15.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:2922d42e16d6de288022e5ca321cd0618b238cfc5570e0263e5ba0a77dbef56f", size = 10662338, upload-time = "2025-02-05T03:50:17.287Z" }, + { url = "https://files.pythonhosted.org/packages/e2/90/8dcf506ca1a09b0d17555cc00cd69aee402c203911410136cd716559efe7/mypy-1.15.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:2ee2d57e01a7c35de00f4634ba1bbf015185b219e4dc5909e281016df43f5ee5", size = 9787540, upload-time = "2025-02-05T03:49:51.21Z" }, + { url = "https://files.pythonhosted.org/packages/05/05/a10f9479681e5da09ef2f9426f650d7b550d4bafbef683b69aad1ba87457/mypy-1.15.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:973500e0774b85d9689715feeffcc980193086551110fd678ebe1f4342fb7c5e", size = 11538051, upload-time = "2025-02-05T03:50:20.885Z" }, + { url = "https://files.pythonhosted.org/packages/e9/9a/1f7d18b30edd57441a6411fcbc0c6869448d1a4bacbaee60656ac0fc29c8/mypy-1.15.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:5a95fb17c13e29d2d5195869262f8125dfdb5c134dc8d9a9d0aecf7525b10c2c", size = 12286751, upload-time = "2025-02-05T03:49:42.408Z" }, + { url = "https://files.pythonhosted.org/packages/72/af/19ff499b6f1dafcaf56f9881f7a965ac2f474f69f6f618b5175b044299f5/mypy-1.15.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:1905f494bfd7d85a23a88c5d97840888a7bd516545fc5aaedff0267e0bb54e2f", size = 12421783, upload-time = "2025-02-05T03:49:07.707Z" }, + { url = "https://files.pythonhosted.org/packages/96/39/11b57431a1f686c1aed54bf794870efe0f6aeca11aca281a0bd87a5ad42c/mypy-1.15.0-cp311-cp311-win_amd64.whl", hash = "sha256:c9817fa23833ff189db061e6d2eff49b2f3b6ed9856b4a0a73046e41932d744f", size = 9265618, upload-time = "2025-02-05T03:49:54.581Z" }, + { url = "https://files.pythonhosted.org/packages/98/3a/03c74331c5eb8bd025734e04c9840532226775c47a2c39b56a0c8d4f128d/mypy-1.15.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:aea39e0583d05124836ea645f412e88a5c7d0fd77a6d694b60d9b6b2d9f184fd", size = 10793981, upload-time = "2025-02-05T03:50:28.25Z" }, + { url = "https://files.pythonhosted.org/packages/f0/1a/41759b18f2cfd568848a37c89030aeb03534411eef981df621d8fad08a1d/mypy-1.15.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:2f2147ab812b75e5b5499b01ade1f4a81489a147c01585cda36019102538615f", size = 9749175, upload-time = "2025-02-05T03:50:13.411Z" }, + { url = "https://files.pythonhosted.org/packages/12/7e/873481abf1ef112c582db832740f4c11b2bfa510e829d6da29b0ab8c3f9c/mypy-1.15.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ce436f4c6d218a070048ed6a44c0bbb10cd2cc5e272b29e7845f6a2f57ee4464", size = 11455675, upload-time = "2025-02-05T03:50:31.421Z" }, + { url = "https://files.pythonhosted.org/packages/b3/d0/92ae4cde706923a2d3f2d6c39629134063ff64b9dedca9c1388363da072d/mypy-1.15.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:8023ff13985661b50a5928fc7a5ca15f3d1affb41e5f0a9952cb68ef090b31ee", size = 12410020, upload-time = "2025-02-05T03:48:48.705Z" }, + { url = "https://files.pythonhosted.org/packages/46/8b/df49974b337cce35f828ba6fda228152d6db45fed4c86ba56ffe442434fd/mypy-1.15.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:1124a18bc11a6a62887e3e137f37f53fbae476dc36c185d549d4f837a2a6a14e", size = 12498582, upload-time = "2025-02-05T03:49:03.628Z" }, + { url = "https://files.pythonhosted.org/packages/13/50/da5203fcf6c53044a0b699939f31075c45ae8a4cadf538a9069b165c1050/mypy-1.15.0-cp312-cp312-win_amd64.whl", hash = "sha256:171a9ca9a40cd1843abeca0e405bc1940cd9b305eaeea2dda769ba096932bb22", size = 9366614, upload-time = "2025-02-05T03:50:00.313Z" }, + { url = "https://files.pythonhosted.org/packages/6a/9b/fd2e05d6ffff24d912f150b87db9e364fa8282045c875654ce7e32fffa66/mypy-1.15.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:93faf3fdb04768d44bf28693293f3904bbb555d076b781ad2530214ee53e3445", size = 10788592, upload-time = "2025-02-05T03:48:55.789Z" }, + { url = "https://files.pythonhosted.org/packages/74/37/b246d711c28a03ead1fd906bbc7106659aed7c089d55fe40dd58db812628/mypy-1.15.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:811aeccadfb730024c5d3e326b2fbe9249bb7413553f15499a4050f7c30e801d", size = 9753611, upload-time = "2025-02-05T03:48:44.581Z" }, + { url = "https://files.pythonhosted.org/packages/a6/ac/395808a92e10cfdac8003c3de9a2ab6dc7cde6c0d2a4df3df1b815ffd067/mypy-1.15.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:98b7b9b9aedb65fe628c62a6dc57f6d5088ef2dfca37903a7d9ee374d03acca5", size = 11438443, upload-time = "2025-02-05T03:49:25.514Z" }, + { url = "https://files.pythonhosted.org/packages/d2/8b/801aa06445d2de3895f59e476f38f3f8d610ef5d6908245f07d002676cbf/mypy-1.15.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c43a7682e24b4f576d93072216bf56eeff70d9140241f9edec0c104d0c515036", size = 12402541, upload-time = "2025-02-05T03:49:57.623Z" }, + { url = "https://files.pythonhosted.org/packages/c7/67/5a4268782eb77344cc613a4cf23540928e41f018a9a1ec4c6882baf20ab8/mypy-1.15.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:baefc32840a9f00babd83251560e0ae1573e2f9d1b067719479bfb0e987c6357", size = 12494348, upload-time = "2025-02-05T03:48:52.361Z" }, + { url = "https://files.pythonhosted.org/packages/83/3e/57bb447f7bbbfaabf1712d96f9df142624a386d98fb026a761532526057e/mypy-1.15.0-cp313-cp313-win_amd64.whl", hash = "sha256:b9378e2c00146c44793c98b8d5a61039a048e31f429fb0eb546d93f4b000bedf", size = 9373648, upload-time = "2025-02-05T03:49:11.395Z" }, + { url = "https://files.pythonhosted.org/packages/09/4e/a7d65c7322c510de2c409ff3828b03354a7c43f5a8ed458a7a131b41c7b9/mypy-1.15.0-py3-none-any.whl", hash = "sha256:5469affef548bd1895d86d3bf10ce2b44e33d86923c29e4d675b3e323437ea3e", size = 2221777, upload-time = "2025-02-05T03:50:08.348Z" }, ] [[package]] diff --git a/packages/mistralai_azure/.genignore b/packages/mistralai_azure/.genignore deleted file mode 100644 index ba7f2350..00000000 --- a/packages/mistralai_azure/.genignore +++ /dev/null @@ -1,5 +0,0 @@ -pyproject.toml -src/mistralai_azure/sdk.py -README.md -USAGE.md -docs/sdks/**/README.md \ No newline at end of file diff --git a/packages/mistralai_azure/.vscode/settings.json b/packages/mistralai_azure/.vscode/settings.json deleted file mode 100644 index 8d79f0ab..00000000 --- a/packages/mistralai_azure/.vscode/settings.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "python.testing.pytestArgs": ["tests", "-vv"], - "python.testing.unittestEnabled": false, - "python.testing.pytestEnabled": true, - "pylint.args": ["--rcfile=pylintrc"] -} diff --git a/packages/mistralai_azure/USAGE.md b/packages/mistralai_azure/USAGE.md deleted file mode 100644 index 0ccf3d70..00000000 --- a/packages/mistralai_azure/USAGE.md +++ /dev/null @@ -1,55 +0,0 @@ - -### Create Chat Completions - -This example shows how to create chat completions. - -```python -# Synchronous Example -from mistralai_azure import MistralAzure -import os - -s = MistralAzure( - azure_api_key=os.getenv("AZURE_API_KEY", ""), - azure_endpoint=os.getenv("AZURE_ENDPOINT", "") -) - - -res = s.chat.complete(messages=[ - { - "content": "Who is the best French painter? Answer in one short sentence.", - "role": "user", - }, -], model="azureai") - -if res is not None: - # handle response - pass -``` - -
- -The same SDK client can also be used to make asychronous requests by importing asyncio. -```python -# Asynchronous Example -import asyncio -from mistralai_azure import MistralAzure -import os - -async def main(): - s = MistralAzure( - azure_api_key=os.getenv("AZURE_API_KEY", ""), - azure_endpoint=os.getenv("AZURE_ENDPOINT", "") - ) - res = await s.chat.complete_async(messages=[ - { - "content": "Who is the best French painter? Answer in one short sentence.", - "role": "user", - }, - ], model="azureai") - if res is not None: - # handle response - pass - -asyncio.run(main()) -``` - \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/assistantmessagerole.md b/packages/mistralai_azure/docs/models/assistantmessagerole.md deleted file mode 100644 index 658229e7..00000000 --- a/packages/mistralai_azure/docs/models/assistantmessagerole.md +++ /dev/null @@ -1,8 +0,0 @@ -# AssistantMessageRole - - -## Values - -| Name | Value | -| ----------- | ----------- | -| `ASSISTANT` | assistant | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/completionresponsestreamchoice.md b/packages/mistralai_azure/docs/models/completionresponsestreamchoice.md deleted file mode 100644 index c807dacd..00000000 --- a/packages/mistralai_azure/docs/models/completionresponsestreamchoice.md +++ /dev/null @@ -1,10 +0,0 @@ -# CompletionResponseStreamChoice - - -## Fields - -| Field | Type | Required | Description | -| ---------------------------------------------------------- | ---------------------------------------------------------- | ---------------------------------------------------------- | ---------------------------------------------------------- | -| `index` | *int* | :heavy_check_mark: | N/A | -| `delta` | [models.DeltaMessage](../models/deltamessage.md) | :heavy_check_mark: | N/A | -| `finish_reason` | [Nullable[models.FinishReason]](../models/finishreason.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/deltamessage.md b/packages/mistralai_azure/docs/models/deltamessage.md deleted file mode 100644 index 61deabbf..00000000 --- a/packages/mistralai_azure/docs/models/deltamessage.md +++ /dev/null @@ -1,10 +0,0 @@ -# DeltaMessage - - -## Fields - -| Field | Type | Required | Description | -| -------------------------------------------------------- | -------------------------------------------------------- | -------------------------------------------------------- | -------------------------------------------------------- | -| `role` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `content` | [OptionalNullable[models.Content]](../models/content.md) | :heavy_minus_sign: | N/A | -| `tool_calls` | List[[models.ToolCall](../models/toolcall.md)] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/documenturlchunk.md b/packages/mistralai_azure/docs/models/documenturlchunk.md deleted file mode 100644 index 6c9a5b4d..00000000 --- a/packages/mistralai_azure/docs/models/documenturlchunk.md +++ /dev/null @@ -1,10 +0,0 @@ -# DocumentURLChunk - - -## Fields - -| Field | Type | Required | Description | -| -------------------------------------------------------------------------- | -------------------------------------------------------------------------- | -------------------------------------------------------------------------- | -------------------------------------------------------------------------- | -| `document_url` | *str* | :heavy_check_mark: | N/A | -| `document_name` | *OptionalNullable[str]* | :heavy_minus_sign: | The filename of the document | -| `type` | [Optional[models.DocumentURLChunkType]](../models/documenturlchunktype.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/documenturlchunktype.md b/packages/mistralai_azure/docs/models/documenturlchunktype.md deleted file mode 100644 index 32e1fa9e..00000000 --- a/packages/mistralai_azure/docs/models/documenturlchunktype.md +++ /dev/null @@ -1,8 +0,0 @@ -# DocumentURLChunkType - - -## Values - -| Name | Value | -| -------------- | -------------- | -| `DOCUMENT_URL` | document_url | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/imageurl.md b/packages/mistralai_azure/docs/models/imageurl.md deleted file mode 100644 index 7c2bcbc3..00000000 --- a/packages/mistralai_azure/docs/models/imageurl.md +++ /dev/null @@ -1,9 +0,0 @@ -# ImageURL - - -## Fields - -| Field | Type | Required | Description | -| ----------------------- | ----------------------- | ----------------------- | ----------------------- | -| `url` | *str* | :heavy_check_mark: | N/A | -| `detail` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/imageurlchunk.md b/packages/mistralai_azure/docs/models/imageurlchunk.md deleted file mode 100644 index f1b926ef..00000000 --- a/packages/mistralai_azure/docs/models/imageurlchunk.md +++ /dev/null @@ -1,11 +0,0 @@ -# ImageURLChunk - -{"type":"image_url","image_url":{"url":"data:image/png;base64,iVBORw0 - - -## Fields - -| Field | Type | Required | Description | -| -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -| `image_url` | [models.ImageURLChunkImageURL](../models/imageurlchunkimageurl.md) | :heavy_check_mark: | N/A | -| `type` | [Optional[models.ImageURLChunkType]](../models/imageurlchunktype.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/imageurlchunktype.md b/packages/mistralai_azure/docs/models/imageurlchunktype.md deleted file mode 100644 index 2064a0b4..00000000 --- a/packages/mistralai_azure/docs/models/imageurlchunktype.md +++ /dev/null @@ -1,8 +0,0 @@ -# ImageURLChunkType - - -## Values - -| Name | Value | -| ----------- | ----------- | -| `IMAGE_URL` | image_url | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/referencechunk.md b/packages/mistralai_azure/docs/models/referencechunk.md deleted file mode 100644 index a132ca2f..00000000 --- a/packages/mistralai_azure/docs/models/referencechunk.md +++ /dev/null @@ -1,9 +0,0 @@ -# ReferenceChunk - - -## Fields - -| Field | Type | Required | Description | -| ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | -| `reference_ids` | List[*int*] | :heavy_check_mark: | N/A | -| `type` | [Optional[models.ReferenceChunkType]](../models/referencechunktype.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/referencechunktype.md b/packages/mistralai_azure/docs/models/referencechunktype.md deleted file mode 100644 index 1e0e2fe6..00000000 --- a/packages/mistralai_azure/docs/models/referencechunktype.md +++ /dev/null @@ -1,8 +0,0 @@ -# ReferenceChunkType - - -## Values - -| Name | Value | -| ----------- | ----------- | -| `REFERENCE` | reference | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/role.md b/packages/mistralai_azure/docs/models/role.md deleted file mode 100644 index affca78d..00000000 --- a/packages/mistralai_azure/docs/models/role.md +++ /dev/null @@ -1,8 +0,0 @@ -# Role - - -## Values - -| Name | Value | -| -------- | -------- | -| `SYSTEM` | system | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/textchunk.md b/packages/mistralai_azure/docs/models/textchunk.md deleted file mode 100644 index 6daab3c3..00000000 --- a/packages/mistralai_azure/docs/models/textchunk.md +++ /dev/null @@ -1,9 +0,0 @@ -# TextChunk - - -## Fields - -| Field | Type | Required | Description | -| ------------------------------------------ | ------------------------------------------ | ------------------------------------------ | ------------------------------------------ | -| `text` | *str* | :heavy_check_mark: | N/A | -| `type` | [Optional[models.Type]](../models/type.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/thinkchunktype.md b/packages/mistralai_azure/docs/models/thinkchunktype.md deleted file mode 100644 index baf6f755..00000000 --- a/packages/mistralai_azure/docs/models/thinkchunktype.md +++ /dev/null @@ -1,8 +0,0 @@ -# ThinkChunkType - - -## Values - -| Name | Value | -| ---------- | ---------- | -| `THINKING` | thinking | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/toolmessagerole.md b/packages/mistralai_azure/docs/models/toolmessagerole.md deleted file mode 100644 index c24e59c0..00000000 --- a/packages/mistralai_azure/docs/models/toolmessagerole.md +++ /dev/null @@ -1,8 +0,0 @@ -# ToolMessageRole - - -## Values - -| Name | Value | -| ------ | ------ | -| `TOOL` | tool | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/type.md b/packages/mistralai_azure/docs/models/type.md deleted file mode 100644 index eb0581e7..00000000 --- a/packages/mistralai_azure/docs/models/type.md +++ /dev/null @@ -1,8 +0,0 @@ -# Type - - -## Values - -| Name | Value | -| ------ | ------ | -| `TEXT` | text | \ No newline at end of file diff --git a/packages/mistralai_azure/docs/models/usermessagerole.md b/packages/mistralai_azure/docs/models/usermessagerole.md deleted file mode 100644 index 171124e4..00000000 --- a/packages/mistralai_azure/docs/models/usermessagerole.md +++ /dev/null @@ -1,8 +0,0 @@ -# UserMessageRole - - -## Values - -| Name | Value | -| ------ | ------ | -| `USER` | user | \ No newline at end of file diff --git a/packages/mistralai_azure/src/mistralai_azure/_hooks/custom_user_agent.py b/packages/mistralai_azure/src/mistralai_azure/_hooks/custom_user_agent.py deleted file mode 100644 index 77df6aef..00000000 --- a/packages/mistralai_azure/src/mistralai_azure/_hooks/custom_user_agent.py +++ /dev/null @@ -1,22 +0,0 @@ -# THIS FILE IS THE EXACT COPY OF THE ORIGINAL FILE FROM src/mistralai/_hooks/custom_user_agent.py -from typing import Union - -import httpx - -from .types import BeforeRequestContext, BeforeRequestHook - -PREFIX = "mistral-client-python/" - -class CustomUserAgentHook(BeforeRequestHook): - def before_request( - self, hook_ctx: BeforeRequestContext, request: httpx.Request - ) -> Union[httpx.Request, Exception]: - current = request.headers["user-agent"] - if current.startswith(PREFIX): - return request - - request.headers["user-agent"] = ( - PREFIX + current.split(" ")[1] - ) - - return request diff --git a/packages/mistralai_azure/src/mistralai_azure/_hooks/registration.py b/packages/mistralai_azure/src/mistralai_azure/_hooks/registration.py deleted file mode 100644 index 304edfa2..00000000 --- a/packages/mistralai_azure/src/mistralai_azure/_hooks/registration.py +++ /dev/null @@ -1,15 +0,0 @@ -from .custom_user_agent import CustomUserAgentHook -from .types import Hooks - -# This file is only ever generated once on the first generation and then is free to be modified. -# Any hooks you wish to add should be registered in the init_hooks function. Feel free to define them -# in this file or in separate files in the hooks folder. - - -def init_hooks(hooks: Hooks): - # pylint: disable=unused-argument - """Add hooks by calling hooks.register{sdk_init/before_request/after_success/after_error}Hook - with an instance of a hook that implements that specific Hook interface - Hooks are registered per SDK instance, and are valid for the lifetime of the SDK instance - """ - hooks.register_before_request_hook(CustomUserAgentHook()) diff --git a/packages/mistralai_azure/src/mistralai_azure/models/imageurlchunk.py b/packages/mistralai_azure/src/mistralai_azure/models/imageurlchunk.py deleted file mode 100644 index a40e451c..00000000 --- a/packages/mistralai_azure/src/mistralai_azure/models/imageurlchunk.py +++ /dev/null @@ -1,33 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .imageurl import ImageURL, ImageURLTypedDict -from mistralai_azure.types import BaseModel -from typing import Literal, Optional, Union -from typing_extensions import NotRequired, TypeAliasType, TypedDict - - -ImageURLChunkImageURLTypedDict = TypeAliasType( - "ImageURLChunkImageURLTypedDict", Union[ImageURLTypedDict, str] -) - - -ImageURLChunkImageURL = TypeAliasType("ImageURLChunkImageURL", Union[ImageURL, str]) - - -ImageURLChunkType = Literal["image_url",] - - -class ImageURLChunkTypedDict(TypedDict): - r"""{\"type\":\"image_url\",\"image_url\":{\"url\":\"data:image/png;base64,iVBORw0""" - - image_url: ImageURLChunkImageURLTypedDict - type: NotRequired[ImageURLChunkType] - - -class ImageURLChunk(BaseModel): - r"""{\"type\":\"image_url\",\"image_url\":{\"url\":\"data:image/png;base64,iVBORw0""" - - image_url: ImageURLChunkImageURL - - type: Optional[ImageURLChunkType] = "image_url" diff --git a/packages/mistralai_azure/src/mistralai_azure/models/referencechunk.py b/packages/mistralai_azure/src/mistralai_azure/models/referencechunk.py deleted file mode 100644 index 32d2ca68..00000000 --- a/packages/mistralai_azure/src/mistralai_azure/models/referencechunk.py +++ /dev/null @@ -1,20 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai_azure.types import BaseModel -from typing import List, Literal, Optional -from typing_extensions import NotRequired, TypedDict - - -ReferenceChunkType = Literal["reference",] - - -class ReferenceChunkTypedDict(TypedDict): - reference_ids: List[int] - type: NotRequired[ReferenceChunkType] - - -class ReferenceChunk(BaseModel): - reference_ids: List[int] - - type: Optional[ReferenceChunkType] = "reference" diff --git a/packages/mistralai_azure/src/mistralai_azure/models/textchunk.py b/packages/mistralai_azure/src/mistralai_azure/models/textchunk.py deleted file mode 100644 index 5845456e..00000000 --- a/packages/mistralai_azure/src/mistralai_azure/models/textchunk.py +++ /dev/null @@ -1,20 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai_azure.types import BaseModel -from typing import Literal, Optional -from typing_extensions import NotRequired, TypedDict - - -Type = Literal["text",] - - -class TextChunkTypedDict(TypedDict): - text: str - type: NotRequired[Type] - - -class TextChunk(BaseModel): - text: str - - type: Optional[Type] = "text" diff --git a/packages/mistralai_gcp/.genignore b/packages/mistralai_gcp/.genignore deleted file mode 100644 index 76043176..00000000 --- a/packages/mistralai_gcp/.genignore +++ /dev/null @@ -1,5 +0,0 @@ -pyproject.toml -src/mistralai_gcp/sdk.py -README.md -USAGE.md -docs/sdks/**/README.md \ No newline at end of file diff --git a/packages/mistralai_gcp/.vscode/settings.json b/packages/mistralai_gcp/.vscode/settings.json deleted file mode 100644 index 8d79f0ab..00000000 --- a/packages/mistralai_gcp/.vscode/settings.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "python.testing.pytestArgs": ["tests", "-vv"], - "python.testing.unittestEnabled": false, - "python.testing.pytestEnabled": true, - "pylint.args": ["--rcfile=pylintrc"] -} diff --git a/packages/mistralai_gcp/USAGE.md b/packages/mistralai_gcp/USAGE.md deleted file mode 100644 index 30fa08aa..00000000 --- a/packages/mistralai_gcp/USAGE.md +++ /dev/null @@ -1,51 +0,0 @@ - -### Create Chat Completions - -This example shows how to create chat completions. - -```python -# Synchronous Example -from mistralai_gcp import MistralGCP -import os - -s = MistralGCP() - - -res = s.chat.complete(messages=[ - { - "content": "Who is the best French painter? Answer in one short sentence.", - "role": "user", - }, -], model="mistral-small-latest") - -if res is not None: - # handle response - pass -``` - -
- -The same SDK client can also be used to make asychronous requests by importing asyncio. -```python -# Asynchronous Example -import asyncio -from mistralai_gcp import MistralGCP -import os - -async def main(): - s = MistralGCP( - api_key=os.getenv("API_KEY", ""), - ) - res = await s.chat.complete_async(messages=[ - { - "content": "Who is the best French painter? Answer in one short sentence.", - "role": "user", - }, - ], model="mistral-small-latest") - if res is not None: - # handle response - pass - -asyncio.run(main()) -``` - \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/assistantmessagerole.md b/packages/mistralai_gcp/docs/models/assistantmessagerole.md deleted file mode 100644 index 658229e7..00000000 --- a/packages/mistralai_gcp/docs/models/assistantmessagerole.md +++ /dev/null @@ -1,8 +0,0 @@ -# AssistantMessageRole - - -## Values - -| Name | Value | -| ----------- | ----------- | -| `ASSISTANT` | assistant | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/completionresponsestreamchoice.md b/packages/mistralai_gcp/docs/models/completionresponsestreamchoice.md deleted file mode 100644 index c807dacd..00000000 --- a/packages/mistralai_gcp/docs/models/completionresponsestreamchoice.md +++ /dev/null @@ -1,10 +0,0 @@ -# CompletionResponseStreamChoice - - -## Fields - -| Field | Type | Required | Description | -| ---------------------------------------------------------- | ---------------------------------------------------------- | ---------------------------------------------------------- | ---------------------------------------------------------- | -| `index` | *int* | :heavy_check_mark: | N/A | -| `delta` | [models.DeltaMessage](../models/deltamessage.md) | :heavy_check_mark: | N/A | -| `finish_reason` | [Nullable[models.FinishReason]](../models/finishreason.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/deltamessage.md b/packages/mistralai_gcp/docs/models/deltamessage.md deleted file mode 100644 index 61deabbf..00000000 --- a/packages/mistralai_gcp/docs/models/deltamessage.md +++ /dev/null @@ -1,10 +0,0 @@ -# DeltaMessage - - -## Fields - -| Field | Type | Required | Description | -| -------------------------------------------------------- | -------------------------------------------------------- | -------------------------------------------------------- | -------------------------------------------------------- | -| `role` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `content` | [OptionalNullable[models.Content]](../models/content.md) | :heavy_minus_sign: | N/A | -| `tool_calls` | List[[models.ToolCall](../models/toolcall.md)] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/imageurl.md b/packages/mistralai_gcp/docs/models/imageurl.md deleted file mode 100644 index 7c2bcbc3..00000000 --- a/packages/mistralai_gcp/docs/models/imageurl.md +++ /dev/null @@ -1,9 +0,0 @@ -# ImageURL - - -## Fields - -| Field | Type | Required | Description | -| ----------------------- | ----------------------- | ----------------------- | ----------------------- | -| `url` | *str* | :heavy_check_mark: | N/A | -| `detail` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/imageurlchunk.md b/packages/mistralai_gcp/docs/models/imageurlchunk.md deleted file mode 100644 index f1b926ef..00000000 --- a/packages/mistralai_gcp/docs/models/imageurlchunk.md +++ /dev/null @@ -1,11 +0,0 @@ -# ImageURLChunk - -{"type":"image_url","image_url":{"url":"data:image/png;base64,iVBORw0 - - -## Fields - -| Field | Type | Required | Description | -| -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -| `image_url` | [models.ImageURLChunkImageURL](../models/imageurlchunkimageurl.md) | :heavy_check_mark: | N/A | -| `type` | [Optional[models.ImageURLChunkType]](../models/imageurlchunktype.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/imageurlchunktype.md b/packages/mistralai_gcp/docs/models/imageurlchunktype.md deleted file mode 100644 index 2064a0b4..00000000 --- a/packages/mistralai_gcp/docs/models/imageurlchunktype.md +++ /dev/null @@ -1,8 +0,0 @@ -# ImageURLChunkType - - -## Values - -| Name | Value | -| ----------- | ----------- | -| `IMAGE_URL` | image_url | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/referencechunk.md b/packages/mistralai_gcp/docs/models/referencechunk.md deleted file mode 100644 index a132ca2f..00000000 --- a/packages/mistralai_gcp/docs/models/referencechunk.md +++ /dev/null @@ -1,9 +0,0 @@ -# ReferenceChunk - - -## Fields - -| Field | Type | Required | Description | -| ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | -| `reference_ids` | List[*int*] | :heavy_check_mark: | N/A | -| `type` | [Optional[models.ReferenceChunkType]](../models/referencechunktype.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/referencechunktype.md b/packages/mistralai_gcp/docs/models/referencechunktype.md deleted file mode 100644 index 1e0e2fe6..00000000 --- a/packages/mistralai_gcp/docs/models/referencechunktype.md +++ /dev/null @@ -1,8 +0,0 @@ -# ReferenceChunkType - - -## Values - -| Name | Value | -| ----------- | ----------- | -| `REFERENCE` | reference | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/role.md b/packages/mistralai_gcp/docs/models/role.md deleted file mode 100644 index affca78d..00000000 --- a/packages/mistralai_gcp/docs/models/role.md +++ /dev/null @@ -1,8 +0,0 @@ -# Role - - -## Values - -| Name | Value | -| -------- | -------- | -| `SYSTEM` | system | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/textchunk.md b/packages/mistralai_gcp/docs/models/textchunk.md deleted file mode 100644 index 6daab3c3..00000000 --- a/packages/mistralai_gcp/docs/models/textchunk.md +++ /dev/null @@ -1,9 +0,0 @@ -# TextChunk - - -## Fields - -| Field | Type | Required | Description | -| ------------------------------------------ | ------------------------------------------ | ------------------------------------------ | ------------------------------------------ | -| `text` | *str* | :heavy_check_mark: | N/A | -| `type` | [Optional[models.Type]](../models/type.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/thinkchunktype.md b/packages/mistralai_gcp/docs/models/thinkchunktype.md deleted file mode 100644 index baf6f755..00000000 --- a/packages/mistralai_gcp/docs/models/thinkchunktype.md +++ /dev/null @@ -1,8 +0,0 @@ -# ThinkChunkType - - -## Values - -| Name | Value | -| ---------- | ---------- | -| `THINKING` | thinking | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/toolmessagerole.md b/packages/mistralai_gcp/docs/models/toolmessagerole.md deleted file mode 100644 index c24e59c0..00000000 --- a/packages/mistralai_gcp/docs/models/toolmessagerole.md +++ /dev/null @@ -1,8 +0,0 @@ -# ToolMessageRole - - -## Values - -| Name | Value | -| ------ | ------ | -| `TOOL` | tool | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/type.md b/packages/mistralai_gcp/docs/models/type.md deleted file mode 100644 index eb0581e7..00000000 --- a/packages/mistralai_gcp/docs/models/type.md +++ /dev/null @@ -1,8 +0,0 @@ -# Type - - -## Values - -| Name | Value | -| ------ | ------ | -| `TEXT` | text | \ No newline at end of file diff --git a/packages/mistralai_gcp/docs/models/usermessagerole.md b/packages/mistralai_gcp/docs/models/usermessagerole.md deleted file mode 100644 index 171124e4..00000000 --- a/packages/mistralai_gcp/docs/models/usermessagerole.md +++ /dev/null @@ -1,8 +0,0 @@ -# UserMessageRole - - -## Values - -| Name | Value | -| ------ | ------ | -| `USER` | user | \ No newline at end of file diff --git a/packages/mistralai_gcp/src/mistralai_gcp/_hooks/custom_user_agent.py b/packages/mistralai_gcp/src/mistralai_gcp/_hooks/custom_user_agent.py deleted file mode 100644 index 77df6aef..00000000 --- a/packages/mistralai_gcp/src/mistralai_gcp/_hooks/custom_user_agent.py +++ /dev/null @@ -1,22 +0,0 @@ -# THIS FILE IS THE EXACT COPY OF THE ORIGINAL FILE FROM src/mistralai/_hooks/custom_user_agent.py -from typing import Union - -import httpx - -from .types import BeforeRequestContext, BeforeRequestHook - -PREFIX = "mistral-client-python/" - -class CustomUserAgentHook(BeforeRequestHook): - def before_request( - self, hook_ctx: BeforeRequestContext, request: httpx.Request - ) -> Union[httpx.Request, Exception]: - current = request.headers["user-agent"] - if current.startswith(PREFIX): - return request - - request.headers["user-agent"] = ( - PREFIX + current.split(" ")[1] - ) - - return request diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/imageurlchunk.py b/packages/mistralai_gcp/src/mistralai_gcp/models/imageurlchunk.py deleted file mode 100644 index ddb53f21..00000000 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/imageurlchunk.py +++ /dev/null @@ -1,33 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from .imageurl import ImageURL, ImageURLTypedDict -from mistralai_gcp.types import BaseModel -from typing import Literal, Optional, Union -from typing_extensions import NotRequired, TypeAliasType, TypedDict - - -ImageURLChunkImageURLTypedDict = TypeAliasType( - "ImageURLChunkImageURLTypedDict", Union[ImageURLTypedDict, str] -) - - -ImageURLChunkImageURL = TypeAliasType("ImageURLChunkImageURL", Union[ImageURL, str]) - - -ImageURLChunkType = Literal["image_url",] - - -class ImageURLChunkTypedDict(TypedDict): - r"""{\"type\":\"image_url\",\"image_url\":{\"url\":\"data:image/png;base64,iVBORw0""" - - image_url: ImageURLChunkImageURLTypedDict - type: NotRequired[ImageURLChunkType] - - -class ImageURLChunk(BaseModel): - r"""{\"type\":\"image_url\",\"image_url\":{\"url\":\"data:image/png;base64,iVBORw0""" - - image_url: ImageURLChunkImageURL - - type: Optional[ImageURLChunkType] = "image_url" diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/referencechunk.py b/packages/mistralai_gcp/src/mistralai_gcp/models/referencechunk.py deleted file mode 100644 index 904e8b82..00000000 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/referencechunk.py +++ /dev/null @@ -1,20 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai_gcp.types import BaseModel -from typing import List, Literal, Optional -from typing_extensions import NotRequired, TypedDict - - -ReferenceChunkType = Literal["reference",] - - -class ReferenceChunkTypedDict(TypedDict): - reference_ids: List[int] - type: NotRequired[ReferenceChunkType] - - -class ReferenceChunk(BaseModel): - reference_ids: List[int] - - type: Optional[ReferenceChunkType] = "reference" diff --git a/packages/mistralai_gcp/src/mistralai_gcp/models/textchunk.py b/packages/mistralai_gcp/src/mistralai_gcp/models/textchunk.py deleted file mode 100644 index c4a8cf28..00000000 --- a/packages/mistralai_gcp/src/mistralai_gcp/models/textchunk.py +++ /dev/null @@ -1,20 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -from __future__ import annotations -from mistralai_gcp.types import BaseModel -from typing import Literal, Optional -from typing_extensions import NotRequired, TypedDict - - -Type = Literal["text",] - - -class TextChunkTypedDict(TypedDict): - text: str - type: NotRequired[Type] - - -class TextChunk(BaseModel): - text: str - - type: Optional[Type] = "text" diff --git a/packages/mistralai_gcp/src/mistralai_gcp/sdk.py b/packages/mistralai_gcp/src/mistralai_gcp/sdk.py deleted file mode 100644 index de48fbbb..00000000 --- a/packages/mistralai_gcp/src/mistralai_gcp/sdk.py +++ /dev/null @@ -1,233 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" - -import json -import weakref -from typing import Any, Optional, Union, cast - -import google.auth -import google.auth.credentials -import google.auth.transport -import google.auth.transport.requests -import httpx - -from mistralai_gcp import models -from mistralai_gcp._hooks import BeforeRequestHook, SDKHooks -from mistralai_gcp.chat import Chat -from mistralai_gcp.fim import Fim -from mistralai_gcp.types import UNSET, OptionalNullable - -from .basesdk import BaseSDK -from .httpclient import AsyncHttpClient, ClientOwner, HttpClient, close_clients -from .sdkconfiguration import SDKConfiguration -from .utils.logger import Logger, get_default_logger -from .utils.retries import RetryConfig - -LEGACY_MODEL_ID_FORMAT = { - "codestral-2405": "codestral@2405", - "mistral-large-2407": "mistral-large@2407", - "mistral-nemo-2407": "mistral-nemo@2407", -} - - -def get_model_info(model: str) -> tuple[str, str]: - # if the model requiers the legacy fomat, use it, else do nothing. - if model in LEGACY_MODEL_ID_FORMAT: - return "-".join(model.split("-")[:-1]), LEGACY_MODEL_ID_FORMAT[model] - return model, model - - -class MistralGoogleCloud(BaseSDK): - r"""Mistral AI API: Our Chat Completion and Embeddings APIs specification. Create your account on [La Plateforme](https://console.mistral.ai) to get access and read the [docs](https://docs.mistral.ai) to learn how to use it.""" - - chat: Chat - r"""Chat Completion API.""" - fim: Fim - r"""Fill-in-the-middle API.""" - - def __init__( - self, - region: str = "europe-west4", - project_id: Optional[str] = None, - access_token: Optional[str] = None, - client: Optional[HttpClient] = None, - async_client: Optional[AsyncHttpClient] = None, - retry_config: OptionalNullable[RetryConfig] = UNSET, - timeout_ms: Optional[int] = None, - debug_logger: Optional[Logger] = None, - ) -> None: - r"""Instantiates the SDK configuring it with the provided parameters. - - :param api_key: The api_key required for authentication - :param server: The server by name to use for all methods - :param server_url: The server URL to use for all methods - :param url_params: Parameters to optionally template the server URL with - :param client: The HTTP client to use for all synchronous methods - :param async_client: The Async HTTP client to use for all asynchronous methods - :param retry_config: The retry configuration to use for all supported methods - :param timeout_ms: Optional request timeout applied to each operation in milliseconds - """ - - credentials = None - if not access_token: - credentials, loaded_project_id = google.auth.default( - scopes=["https://www.googleapis.com/auth/cloud-platform"], - ) - - # default will already raise a google.auth.exceptions.DefaultCredentialsError if no credentials are found - assert isinstance( - credentials, google.auth.credentials.Credentials - ), "credentials must be an instance of google.auth.credentials.Credentials" - - credentials.refresh(google.auth.transport.requests.Request()) - project_id = project_id or loaded_project_id - - if project_id is None: - raise ValueError("project_id must be provided") - - def auth_token() -> str: - if access_token: - return access_token - - assert credentials is not None, "credentials must be initialized" - credentials.refresh(google.auth.transport.requests.Request()) - token = credentials.token - if not token: - raise Exception("Failed to get token from credentials") - return token - - client_supplied = True - if client is None: - client = httpx.Client() - client_supplied = False - - assert issubclass( - type(client), HttpClient - ), "The provided client must implement the HttpClient protocol." - - async_client_supplied = True - if async_client is None: - async_client = httpx.AsyncClient() - async_client_supplied = False - - if debug_logger is None: - debug_logger = get_default_logger() - - assert issubclass( - type(async_client), AsyncHttpClient - ), "The provided async_client must implement the AsyncHttpClient protocol." - - security: Any = None - if callable(auth_token): - security = lambda: models.Security( # pylint: disable=unnecessary-lambda-assignment - api_key=auth_token() - ) - else: - security = models.Security(api_key=auth_token) - - BaseSDK.__init__( - self, - SDKConfiguration( - client=client, - client_supplied=client_supplied, - async_client=async_client, - async_client_supplied=async_client_supplied, - security=security, - server_url=f"https://{region}-aiplatform.googleapis.com", - server=None, - retry_config=retry_config, - timeout_ms=timeout_ms, - debug_logger=debug_logger, - ), - ) - - hooks = SDKHooks() - hook = GoogleCloudBeforeRequestHook(region, project_id) - hooks.register_before_request_hook(hook) - current_server_url, *_ = self.sdk_configuration.get_server_details() - server_url, self.sdk_configuration.client = hooks.sdk_init( - current_server_url, client - ) - if current_server_url != server_url: - self.sdk_configuration.server_url = server_url - - # pylint: disable=protected-access - self.sdk_configuration.__dict__["_hooks"] = hooks - - weakref.finalize( - self, - close_clients, - cast(ClientOwner, self.sdk_configuration), - self.sdk_configuration.client, - self.sdk_configuration.client_supplied, - self.sdk_configuration.async_client, - self.sdk_configuration.async_client_supplied, - ) - - self._init_sdks() - - def _init_sdks(self): - self.chat = Chat(self.sdk_configuration) - self.fim = Fim(self.sdk_configuration) - - def __enter__(self): - return self - - async def __aenter__(self): - return self - - def __exit__(self, exc_type, exc_val, exc_tb): - if ( - self.sdk_configuration.client is not None - and not self.sdk_configuration.client_supplied - ): - self.sdk_configuration.client.close() - self.sdk_configuration.client = None - - async def __aexit__(self, exc_type, exc_val, exc_tb): - if ( - self.sdk_configuration.async_client is not None - and not self.sdk_configuration.async_client_supplied - ): - await self.sdk_configuration.async_client.aclose() - self.sdk_configuration.async_client = None - - -class GoogleCloudBeforeRequestHook(BeforeRequestHook): - def __init__(self, region: str, project_id: str): - self.region = region - self.project_id = project_id - - def before_request( - self, hook_ctx, request: httpx.Request - ) -> Union[httpx.Request, Exception]: - # The goal of this function is to template in the region, project and model into the URL path - # We do this here so that the API remains more user-friendly - model_id = None - new_content = None - if request.content: - parsed = json.loads(request.content.decode("utf-8")) - model_raw = parsed.get("model") - model_name, model_id = get_model_info(model_raw) - parsed["model"] = model_name - new_content = json.dumps(parsed).encode("utf-8") - - if model_id == "": - raise ValueError("model must be provided") - - stream = "streamRawPredict" in request.url.path - specifier = "streamRawPredict" if stream else "rawPredict" - url = f"/v1/projects/{self.project_id}/locations/{self.region}/publishers/mistralai/models/{model_id}:{specifier}" - - headers = dict(request.headers) - # Delete content-length header as it will need to be recalculated - headers.pop("content-length", None) - - next_request = httpx.Request( - method=request.method, - url=request.url.copy_with(path=url), - headers=headers, - content=new_content, - stream=None, - ) - - return next_request diff --git a/pylintrc b/pylintrc index d4e4ba5e..2dc62b0e 100644 --- a/pylintrc +++ b/pylintrc @@ -103,7 +103,7 @@ source-roots=src # When enabled, pylint would attempt to guess common misconfiguration and emit # user-friendly hints instead of false-positive error messages. -suggestion-mode=yes +# Note: suggestion-mode was removed in pylint 3.0 # Allow loading of arbitrary C extensions. Extensions are imported into the # active Python interpreter and may run arbitrary code. diff --git a/pyproject.toml b/pyproject.toml index f8006e7d..c1762f0a 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "mistralai" -version = "2.0.0a3" +version = "2.0.0a4" description = "Python Client SDK for the Mistral AI API." authors = [{ name = "Mistral" }] requires-python = ">=3.10" @@ -8,7 +8,7 @@ readme = "README.md" dependencies = [ "eval-type-backport >=0.2.0", "httpx >=0.28.1", - "pydantic >=2.10.3", + "pydantic >=2.11.2", "python-dateutil >=2.8.2", "typing-inspection >=0.4.0", "pyyaml (>=6.0.2,<7.0.0)", @@ -65,11 +65,18 @@ default-groups = [ [tool.setuptools.package-data] "*" = ["py.typed", "src/mistralai/client/py.typed"] +[tool.hatch.build] +dev-mode-dirs = [ + "src", + "packages/azure/src", + "packages/gcp/src", +] + [tool.hatch.build.targets.sdist] include = [ "src/mistralai", - "packages/mistralai_azure/src/mistralai_azure", - "packages/mistralai_gcp/src/mistralai_gcp", + "packages/azure/src/mistralai", + "packages/gcp/src/mistralai", ] [tool.hatch.build.targets.sdist.force-include] @@ -79,27 +86,27 @@ include = [ [tool.hatch.build.targets.wheel] include = [ "src/mistralai", - "packages/mistralai_azure/src/mistralai_azure", - "packages/mistralai_gcp/src/mistralai_gcp", + "packages/azure/src/mistralai", + "packages/gcp/src/mistralai", ] [tool.hatch.build.targets.wheel.sources] "src/mistralai" = "mistralai" -"packages/mistralai_azure/src/mistralai_azure" = "mistralai_azure" -"packages/mistralai_gcp/src/mistralai_gcp" = "mistralai_gcp" +"packages/azure/src/mistralai/azure" = "mistralai/azure" +"packages/gcp/src/mistralai/gcp" = "mistralai/gcp" [build-system] requires = ["hatchling"] build-backend = "hatchling.build" [tool.pytest.ini_options] -pythonpath = ["src"] +pythonpath = ["src", "packages/azure/src", "packages/gcp/src"] [tool.mypy] disable_error_code = "misc" namespace_packages = true explicit_package_bases = true -mypy_path = "src" +mypy_path = "src:packages/azure/src:packages/gcp/src" [[tool.mypy.overrides]] module = "typing_inspect" @@ -112,7 +119,8 @@ module = [ "authlib.*", "websockets.*", "mcp.*", - "griffe.*" + "griffe.*", + "google.*" ] ignore_missing_imports = true diff --git a/scripts/lint_custom_code.sh b/scripts/lint_custom_code.sh index 57bab71a..4baa3d88 100755 --- a/scripts/lint_custom_code.sh +++ b/scripts/lint_custom_code.sh @@ -7,7 +7,19 @@ if [ -f src/mistralai/__init__.py ]; then echo "ERROR: PEP 420 violation - src/mistralai/__init__.py must not exist" ERRORS=1 else - echo "-> PEP 420 namespace OK" + echo "-> PEP 420 namespace OK (core)" +fi +if [ -f packages/azure/src/mistralai/__init__.py ]; then + echo "ERROR: PEP 420 violation - packages/azure/src/mistralai/__init__.py must not exist" + ERRORS=1 +else + echo "-> PEP 420 namespace OK (azure)" +fi +if [ -f packages/gcp/src/mistralai/__init__.py ]; then + echo "ERROR: PEP 420 violation - packages/gcp/src/mistralai/__init__.py must not exist" + ERRORS=1 +else + echo "-> PEP 420 namespace OK (gcp)" fi echo "Running mypy..." @@ -19,6 +31,16 @@ uv run mypy src/mistralai/extra/ || ERRORS=1 echo "-> running on hooks" uv run mypy src/mistralai/client/_hooks/ \ --exclude __init__.py --exclude sdkhooks.py --exclude types.py || ERRORS=1 +echo "-> running on azure hooks" +uv run mypy packages/azure/src/mistralai/azure/client/_hooks/ \ + --exclude __init__.py --exclude sdkhooks.py --exclude types.py || ERRORS=1 +echo "-> running on azure sdk" +uv run mypy packages/azure/src/mistralai/azure/client/sdk.py || ERRORS=1 +echo "-> running on gcp hooks" +uv run mypy packages/gcp/src/mistralai/gcp/client/_hooks/ \ + --exclude __init__.py --exclude sdkhooks.py --exclude types.py || ERRORS=1 +echo "-> running on gcp sdk" +uv run mypy packages/gcp/src/mistralai/gcp/client/sdk.py || ERRORS=1 echo "-> running on scripts" uv run mypy scripts/ || ERRORS=1 @@ -29,6 +51,14 @@ echo "-> running on extra" uv run pyright src/mistralai/extra/ || ERRORS=1 echo "-> running on hooks" uv run pyright src/mistralai/client/_hooks/ || ERRORS=1 +echo "-> running on azure hooks" +uv run pyright packages/azure/src/mistralai/azure/client/_hooks/ || ERRORS=1 +echo "-> running on azure sdk" +uv run pyright packages/azure/src/mistralai/azure/client/sdk.py || ERRORS=1 +echo "-> running on gcp hooks" +uv run pyright packages/gcp/src/mistralai/gcp/client/_hooks/ || ERRORS=1 +echo "-> running on gcp sdk" +uv run pyright packages/gcp/src/mistralai/gcp/client/sdk.py || ERRORS=1 echo "-> running on scripts" uv run pyright scripts/ || ERRORS=1 @@ -40,6 +70,16 @@ uv run ruff check src/mistralai/extra/ || ERRORS=1 echo "-> running on hooks" uv run ruff check src/mistralai/client/_hooks/ \ --exclude __init__.py --exclude sdkhooks.py --exclude types.py || ERRORS=1 +echo "-> running on azure hooks" +uv run ruff check packages/azure/src/mistralai/azure/client/_hooks/ \ + --exclude __init__.py --exclude sdkhooks.py --exclude types.py || ERRORS=1 +echo "-> running on azure sdk" +uv run ruff check packages/azure/src/mistralai/azure/client/sdk.py || ERRORS=1 +echo "-> running on gcp hooks" +uv run ruff check packages/gcp/src/mistralai/gcp/client/_hooks/ \ + --exclude __init__.py --exclude sdkhooks.py --exclude types.py || ERRORS=1 +echo "-> running on gcp sdk" +uv run ruff check packages/gcp/src/mistralai/gcp/client/sdk.py || ERRORS=1 echo "-> running on scripts" uv run ruff check scripts/ || ERRORS=1 diff --git a/scripts/run_examples.sh b/scripts/run_examples.sh index 22fc94e5..998b8dbe 100755 --- a/scripts/run_examples.sh +++ b/scripts/run_examples.sh @@ -1,13 +1,18 @@ #!/bin/bash -# Default retry count +# Defaults RETRY_COUNT=3 +NO_EXTRA_DEP=false # Parse command line arguments while [[ $# -gt 0 ]]; do case $1 in + --no-extra-dep) + NO_EXTRA_DEP=true + shift + ;; --retry-count) - RETRY_COUNT="$1" + RETRY_COUNT="$2" shift 2 ;; --help) @@ -25,14 +30,15 @@ while [[ $# -gt 0 ]]; do esac done -# List of files to exclude +# List of files to always exclude exclude_files=( "examples/mistral/chat/chatbot_with_streaming.py" "examples/mistral/agents/async_conversation_run_mcp_remote_auth.py" - "examples/mistral/jobs/async_fine_tuning_chat.py" "examples/mistral/jobs/async_fine_tuning.py" + "examples/mistral/jobs/async_fine_tuning_chat.py" "examples/mistral/jobs/fine_tuning.py" "examples/mistral/jobs/fine_tuning_dry_run.py" + "examples/mistral/jobs/async_jobs_ocr_batch_annotation.py" "examples/mistral/classifier/async_classifier.py" "examples/mistral/mcp_servers/sse_server.py" "examples/mistral/mcp_servers/stdio_server.py" @@ -44,6 +50,21 @@ exclude_files=( "examples/mistral/audio/async_realtime_transcription_stream.py" ) +# Files that require extra dependencies (agents, mcp, audio, etc.) +extra_dep_files=( + "examples/mistral/agents/" + "examples/mistral/mcp_servers/" + "examples/mistral/audio/" +) + +if [ "$NO_EXTRA_DEP" = true ]; then + for pattern in "${extra_dep_files[@]}"; do + for f in ${pattern}*.py; do + [ -f "$f" ] && exclude_files+=("$f") + done + done +fi + failed=0 echo "Skipping scripts" diff --git a/tasks.py b/tasks.py index 0d5483e1..8b1bc3f0 100644 --- a/tasks.py +++ b/tasks.py @@ -19,8 +19,6 @@ def update_speakeasy( workflow_lock_path: str = WORKFLOW_LOCK_PATH, verbose: bool = False, ): - if not re.match(r'^\d+\.\d+\.\d+$', version): - raise ValueError(f"Invalid version format: {version}. Expected format: X.Y.Z (e.g., 1.2.3)") """ Update the speakeasy version and pin the openapi specs to the current revision. @@ -30,6 +28,8 @@ def update_speakeasy( inv update-speakeasy --version "1.580.2" --targets "mistralai-sdk" --workflow-path ".speakeasy/workflow.yaml" --workflow-lock-path ".speakeasy/workflow.lock.yaml" inv update-speakeasy --version "1.580.2" --targets "mistralai-sdk" --workflow-path ".speakeasy/workflow.yaml" --workflow-lock-path ".speakeasy/workflow.lock.yaml" --verbose """ + if not re.match(r'^\d+\.\d+\.\d+$', version): + raise ValueError(f"Invalid version format: {version}. Expected format: X.Y.Z (e.g., 1.2.3)") for target in targets: try: SpeakeasyTargets(target) diff --git a/tests/test_azure_integration.py b/tests/test_azure_integration.py new file mode 100644 index 00000000..ac4e38a1 --- /dev/null +++ b/tests/test_azure_integration.py @@ -0,0 +1,433 @@ +""" +Integration tests for Azure SDK. + +These tests require credentials and make real API calls. +Skip if AZURE_API_KEY env var is not set. + +Prerequisites: + 1. Azure API key (stored in Bitwarden at "[MaaS] - Azure Foundry API key") + 2. Tailscale connected via gw-0 exit node + +Usage: + AZURE_API_KEY=xxx pytest tests/test_azure_integration.py -v + +Environment variables: + AZURE_API_KEY: API key (required) + AZURE_ENDPOINT: Base URL (default: https://maas-qa-aifoundry.services.ai.azure.com/models) + AZURE_MODEL: Model name (default: maas-qa-ministral-3b) + AZURE_API_VERSION: API version (default: 2024-05-01-preview) + +Note: AZURE_ENDPOINT should be the base URL without path suffixes. +The SDK appends /chat/completions to this URL. The api_version parameter +is automatically injected as a query parameter by the SDK. + +Available models: + Chat: maas-qa-ministral-3b, maas-qa-mistral-large-3, maas-qa-mistral-medium-2505 + OCR: maas-qa-mistral-document-ai-2505, maas-qa-mistral-document-ai-2512 + (OCR uses a separate endpoint, not tested here) +""" +import json +import os + +import pytest + +# Configuration from env vars +AZURE_API_KEY = os.environ.get("AZURE_API_KEY") +AZURE_ENDPOINT = os.environ.get( + "AZURE_ENDPOINT", + "https://maas-qa-aifoundry.services.ai.azure.com/models", +) +AZURE_MODEL = os.environ.get("AZURE_MODEL", "maas-qa-ministral-3b") +AZURE_API_VERSION = os.environ.get("AZURE_API_VERSION", "2024-05-01-preview") + +SKIP_REASON = "AZURE_API_KEY env var required" + +pytestmark = pytest.mark.skipif( + not AZURE_API_KEY, + reason=SKIP_REASON +) + +# Shared tool definition for tool-call tests +WEATHER_TOOL = { + "type": "function", + "function": { + "name": "get_weather", + "description": "Get the weather in a city", + "parameters": { + "type": "object", + "properties": {"city": {"type": "string"}}, + "required": ["city"], + }, + }, +} + + +@pytest.fixture +def azure_client(): + """Create an Azure client with api_version parameter.""" + from mistralai.azure.client import MistralAzure + assert AZURE_API_KEY is not None, "AZURE_API_KEY must be set" + return MistralAzure( + api_key=AZURE_API_KEY, + server_url=AZURE_ENDPOINT, + api_version=AZURE_API_VERSION, + ) + + +class TestAzureChatComplete: + """Test synchronous chat completion.""" + + def test_basic_completion(self, azure_client): + """Test basic chat completion returns a response.""" + res = azure_client.chat.complete( + model=AZURE_MODEL, + messages=[ + {"role": "user", "content": "Say 'hello' and nothing else."} + ], + ) + assert res is not None + assert res.choices is not None + assert len(res.choices) > 0 + assert res.choices[0].message is not None + assert res.choices[0].message.content is not None + assert len(res.choices[0].message.content) > 0 + + def test_completion_with_system_message(self, azure_client): + """Test chat completion with system + user message.""" + res = azure_client.chat.complete( + model=AZURE_MODEL, + messages=[ + {"role": "system", "content": "You are a pirate. Respond in pirate speak."}, + {"role": "user", "content": "Say hello."}, + ], + ) + assert res is not None + assert res.choices[0].message.content is not None + assert len(res.choices[0].message.content) > 0 + + def test_completion_with_max_tokens(self, azure_client): + """Test chat completion respects max_tokens.""" + res = azure_client.chat.complete( + model=AZURE_MODEL, + messages=[ + {"role": "user", "content": "Count from 1 to 100."} + ], + max_tokens=10, + ) + assert res is not None + assert res.choices[0].finish_reason in ("length", "stop") + + def test_completion_with_temperature(self, azure_client): + """Test chat completion accepts temperature parameter.""" + res = azure_client.chat.complete( + model=AZURE_MODEL, + messages=[ + {"role": "user", "content": "Say 'test'."} + ], + temperature=0.0, + ) + assert res is not None + assert res.choices[0].message.content is not None + + def test_completion_with_stop_sequence(self, azure_client): + """Test chat completion stops at stop sequence.""" + res = azure_client.chat.complete( + model=AZURE_MODEL, + messages=[ + {"role": "user", "content": "Write three sentences about the sky."} + ], + stop=["."], + ) + assert res is not None + content = res.choices[0].message.content + assert content is not None + # The model should stop at or before the first period + assert content.count(".") <= 1 + + def test_completion_with_random_seed(self, azure_client): + """Test chat completion with random_seed returns valid responses.""" + res1 = azure_client.chat.complete( + model=AZURE_MODEL, + messages=[ + {"role": "user", "content": "Say 'deterministic'."} + ], + random_seed=42, + ) + res2 = azure_client.chat.complete( + model=AZURE_MODEL, + messages=[ + {"role": "user", "content": "Say 'deterministic'."} + ], + random_seed=42, + ) + # Both should return valid responses (not asserting equality due to model non-determinism) + assert res1.choices[0].message.content is not None + assert res2.choices[0].message.content is not None + + def test_multi_turn_conversation(self, azure_client): + """Test multi-turn conversation with user/assistant round-trip.""" + res1 = azure_client.chat.complete( + model=AZURE_MODEL, + messages=[ + {"role": "user", "content": "My name is Alice."} + ], + ) + assert res1.choices[0].message.content is not None + + res2 = azure_client.chat.complete( + model=AZURE_MODEL, + messages=[ + {"role": "user", "content": "My name is Alice."}, + {"role": "assistant", "content": res1.choices[0].message.content}, + {"role": "user", "content": "What is my name?"}, + ], + ) + assert res2.choices[0].message.content is not None + assert "Alice" in res2.choices[0].message.content + + def test_tool_call(self, azure_client): + """Test that the model returns a tool call when given tools.""" + res = azure_client.chat.complete( + model=AZURE_MODEL, + messages=[ + {"role": "user", "content": "What is the weather in Paris?"} + ], + tools=[WEATHER_TOOL], + tool_choice="any", + ) + assert res is not None + choice = res.choices[0] + assert choice.message.tool_calls is not None + assert len(choice.message.tool_calls) > 0 + tool_call = choice.message.tool_calls[0] + assert tool_call.function.name == "get_weather" + args = json.loads(tool_call.function.arguments) + assert "city" in args + + def test_json_response_format(self, azure_client): + """Test JSON response format returns valid JSON.""" + res = azure_client.chat.complete( + model=AZURE_MODEL, + messages=[ + {"role": "user", "content": "Return a JSON object with a key 'greeting' and value 'hello'."} + ], + response_format={"type": "json_object"}, + ) + assert res is not None + content = res.choices[0].message.content + assert content is not None + parsed = json.loads(content) + assert isinstance(parsed, dict) + + def test_completion_with_n(self, azure_client): + """Test completion with n=2 returns multiple choices.""" + res = azure_client.chat.complete( + model=AZURE_MODEL, + messages=[ + {"role": "user", "content": "Say a random word."} + ], + n=2, + ) + assert res is not None + assert len(res.choices) == 2 + for choice in res.choices: + assert choice.message.content is not None + + +class TestAzureChatStream: + """Test streaming chat completion.""" + + def test_basic_stream(self, azure_client): + """Test streaming returns chunks with content.""" + stream = azure_client.chat.stream( + model=AZURE_MODEL, + messages=[ + {"role": "user", "content": "Say 'hello' and nothing else."} + ], + ) + + chunks = list(stream) + assert len(chunks) > 0 + + content = "" + for chunk in chunks: + if chunk.data.choices and chunk.data.choices[0].delta.content: + content += chunk.data.choices[0].delta.content + + assert len(content) > 0 + + def test_stream_with_max_tokens(self, azure_client): + """Test streaming respects max_tokens truncation.""" + stream = azure_client.chat.stream( + model=AZURE_MODEL, + messages=[ + {"role": "user", "content": "Count from 1 to 100."} + ], + max_tokens=10, + ) + + chunks = list(stream) + assert len(chunks) > 0 + + # Find finish_reason in any chunk + finish_reasons = [ + chunk.data.choices[0].finish_reason + for chunk in chunks + if chunk.data.choices and chunk.data.choices[0].finish_reason is not None + ] + assert len(finish_reasons) > 0 + assert finish_reasons[-1] in ("length", "stop") + + def test_stream_finish_reason(self, azure_client): + """Test that the last chunk has a finish_reason.""" + stream = azure_client.chat.stream( + model=AZURE_MODEL, + messages=[ + {"role": "user", "content": "Say 'hi'."} + ], + ) + + chunks = list(stream) + assert len(chunks) > 0 + + # The final chunk(s) should contain a finish_reason + finish_reasons = [ + chunk.data.choices[0].finish_reason + for chunk in chunks + if chunk.data.choices and chunk.data.choices[0].finish_reason is not None + ] + assert len(finish_reasons) > 0 + assert finish_reasons[-1] == "stop" + + def test_stream_tool_call(self, azure_client): + """Test tool call via streaming, collecting tool_call delta chunks.""" + stream = azure_client.chat.stream( + model=AZURE_MODEL, + messages=[ + {"role": "user", "content": "What is the weather in Paris?"} + ], + tools=[WEATHER_TOOL], + tool_choice="any", + ) + + chunks = list(stream) + assert len(chunks) > 0 + + # Collect tool call information from delta chunks + tool_call_found = False + for chunk in chunks: + if chunk.data.choices and chunk.data.choices[0].delta.tool_calls: + tool_call_found = True + break + + assert tool_call_found, "Expected tool_call delta chunks in stream" + + +class TestAzureChatCompleteAsync: + """Test async chat completion.""" + + @pytest.mark.asyncio + async def test_basic_completion_async(self, azure_client): + """Test async chat completion returns a response.""" + res = await azure_client.chat.complete_async( + model=AZURE_MODEL, + messages=[ + {"role": "user", "content": "Say 'hello' and nothing else."} + ], + ) + assert res is not None + assert res.choices is not None + assert len(res.choices) > 0 + assert res.choices[0].message.content is not None + + @pytest.mark.asyncio + async def test_completion_with_system_message_async(self, azure_client): + """Test async chat completion with system + user message.""" + res = await azure_client.chat.complete_async( + model=AZURE_MODEL, + messages=[ + {"role": "system", "content": "You are a helpful assistant."}, + {"role": "user", "content": "Say 'hello'."}, + ], + ) + assert res is not None + assert res.choices[0].message.content is not None + + @pytest.mark.asyncio + async def test_tool_call_async(self, azure_client): + """Test async tool call returns tool_calls.""" + res = await azure_client.chat.complete_async( + model=AZURE_MODEL, + messages=[ + {"role": "user", "content": "What is the weather in Paris?"} + ], + tools=[WEATHER_TOOL], + tool_choice="any", + ) + assert res is not None + choice = res.choices[0] + assert choice.message.tool_calls is not None + assert len(choice.message.tool_calls) > 0 + assert choice.message.tool_calls[0].function.name == "get_weather" + + +class TestAzureChatStreamAsync: + """Test async streaming chat completion.""" + + @pytest.mark.asyncio + async def test_basic_stream_async(self, azure_client): + """Test async streaming returns chunks with content.""" + stream = await azure_client.chat.stream_async( + model=AZURE_MODEL, + messages=[ + {"role": "user", "content": "Say 'hello' and nothing else."} + ], + ) + + content = "" + async for chunk in stream: + if chunk.data.choices and chunk.data.choices[0].delta.content: + content += chunk.data.choices[0].delta.content + + assert len(content) > 0 + + +class TestAzureContextManager: + """Test context manager support.""" + + def test_sync_context_manager(self): + """Test that MistralAzure works as a sync context manager.""" + from mistralai.azure.client import MistralAzure + assert AZURE_API_KEY is not None, "AZURE_API_KEY must be set" + with MistralAzure( + api_key=AZURE_API_KEY, + server_url=AZURE_ENDPOINT, + api_version=AZURE_API_VERSION, + ) as client: + res = client.chat.complete( + model=AZURE_MODEL, + messages=[ + {"role": "user", "content": "Say 'context'."} + ], + ) + assert res is not None + assert res.choices[0].message.content is not None + + @pytest.mark.asyncio + async def test_async_context_manager(self): + """Test that MistralAzure works as an async context manager.""" + from mistralai.azure.client import MistralAzure + assert AZURE_API_KEY is not None, "AZURE_API_KEY must be set" + async with MistralAzure( + api_key=AZURE_API_KEY, + server_url=AZURE_ENDPOINT, + api_version=AZURE_API_VERSION, + ) as client: + res = await client.chat.complete_async( + model=AZURE_MODEL, + messages=[ + {"role": "user", "content": "Say 'async context'."} + ], + ) + assert res is not None + assert res.choices[0].message.content is not None diff --git a/tests/test_azure_v2_parity.py b/tests/test_azure_v2_parity.py new file mode 100644 index 00000000..8cd89bf4 --- /dev/null +++ b/tests/test_azure_v2_parity.py @@ -0,0 +1,269 @@ +""" +Parity tests for the Azure v2 SDK. + +Verifies that the regenerated mistralai.azure package exposes +the same public API surface as the v1 mistralai_azure package. +Uses introspection only — no API calls or credentials required. +""" +import inspect + +import pytest + +from mistralai.azure.client import MistralAzure +from mistralai.azure.client.chat import Chat +from mistralai.azure.client.ocr import Ocr +from mistralai.azure.client.types import UNSET + +AZURE_METHODS: dict[str, set[str]] = { + "chat": {"complete", "stream"}, + "ocr": {"process"}, +} + +TESTED_METHODS: set[str] = set() + +_EMPTY = inspect.Parameter.empty + + +def mark_tested(resource: str, method: str) -> None: + TESTED_METHODS.add(f"{resource}.{method}") + + +# --------------------------------------------------------------------------- +# Expected parameter specs: (name, expected_default) +# Use _EMPTY for required params, UNSET for OptionalNullable, None for Optional +# --------------------------------------------------------------------------- + +CONSTRUCTOR_PARAMS = [ + ("api_key", _EMPTY), + ("server", None), + ("server_url", None), + ("url_params", None), + ("client", None), + ("async_client", None), + ("retry_config", UNSET), + ("timeout_ms", None), + ("debug_logger", None), + ("api_version", "2024-05-01-preview"), +] + +CHAT_COMPLETE_PARAMS = [ + ("messages", _EMPTY), + ("model", "azureai"), + ("temperature", UNSET), + ("top_p", None), + ("max_tokens", UNSET), + ("stream", False), + ("stop", None), + ("random_seed", UNSET), + ("metadata", UNSET), + ("response_format", None), + ("tools", UNSET), + ("tool_choice", None), + ("presence_penalty", None), + ("frequency_penalty", None), + ("n", UNSET), + ("prediction", None), + ("parallel_tool_calls", None), + ("prompt_mode", UNSET), + ("safe_prompt", None), + ("retries", UNSET), + ("server_url", None), + ("timeout_ms", None), + ("http_headers", None), +] + +CHAT_STREAM_PARAMS = [ + (name, True if name == "stream" else default) + for name, default in CHAT_COMPLETE_PARAMS +] + +OCR_PROCESS_PARAMS = [ + ("model", _EMPTY), + ("document", _EMPTY), + ("id", None), + ("pages", UNSET), + ("include_image_base64", UNSET), + ("image_limit", UNSET), + ("image_min_size", UNSET), + ("bbox_annotation_format", UNSET), + ("document_annotation_format", UNSET), + ("document_annotation_prompt", UNSET), + ("table_format", UNSET), + ("extract_header", None), + ("extract_footer", None), + ("retries", UNSET), + ("server_url", None), + ("timeout_ms", None), + ("http_headers", None), +] + + +# --------------------------------------------------------------------------- +# Tests +# --------------------------------------------------------------------------- + + +class TestAzureSDKStructure: + def test_sdk_has_chat(self): + assert "chat" in MistralAzure.__annotations__ + + def test_sdk_has_ocr(self): + assert "ocr" in MistralAzure.__annotations__ + + @pytest.mark.parametrize("param_name,expected_default", CONSTRUCTOR_PARAMS) + def test_constructor_param(self, param_name, expected_default): + sig = inspect.signature(MistralAzure.__init__) + assert param_name in sig.parameters, f"Missing constructor param: {param_name}" + actual = sig.parameters[param_name].default + assert actual == expected_default, ( + f"Constructor param {param_name}: expected {expected_default!r}, got {actual!r}" + ) + + @pytest.mark.parametrize("method", ["__enter__", "__exit__", "__aenter__", "__aexit__"]) + def test_context_manager_support(self, method): + assert hasattr(MistralAzure, method), f"MistralAzure missing {method}" + + +class TestAzureChat: + def test_has_complete(self): + assert hasattr(Chat, "complete") + mark_tested("chat", "complete") + + def test_has_complete_async(self): + assert hasattr(Chat, "complete_async") + mark_tested("chat", "complete_async") + + def test_has_stream(self): + assert hasattr(Chat, "stream") + mark_tested("chat", "stream") + + def test_has_stream_async(self): + assert hasattr(Chat, "stream_async") + mark_tested("chat", "stream_async") + + # -- complete params -- + @pytest.mark.parametrize("param_name,expected_default", CHAT_COMPLETE_PARAMS) + def test_complete_has_param(self, param_name, expected_default): + sig = inspect.signature(Chat.complete) + assert param_name in sig.parameters, f"Chat.complete missing param: {param_name}" + actual = sig.parameters[param_name].default + assert actual == expected_default, ( + f"Chat.complete param {param_name}: expected {expected_default!r}, got {actual!r}" + ) + + # -- stream params -- + @pytest.mark.parametrize("param_name,expected_default", CHAT_STREAM_PARAMS) + def test_stream_has_param(self, param_name, expected_default): + sig = inspect.signature(Chat.stream) + assert param_name in sig.parameters, f"Chat.stream missing param: {param_name}" + actual = sig.parameters[param_name].default + assert actual == expected_default, ( + f"Chat.stream param {param_name}: expected {expected_default!r}, got {actual!r}" + ) + + # -- complete_async matches complete -- + @pytest.mark.parametrize("param_name,expected_default", CHAT_COMPLETE_PARAMS) + def test_complete_async_has_param(self, param_name, expected_default): + sig = inspect.signature(Chat.complete_async) + assert param_name in sig.parameters, f"Chat.complete_async missing param: {param_name}" + actual = sig.parameters[param_name].default + assert actual == expected_default, ( + f"Chat.complete_async param {param_name}: expected {expected_default!r}, got {actual!r}" + ) + + # -- stream_async matches stream -- + @pytest.mark.parametrize("param_name,expected_default", CHAT_STREAM_PARAMS) + def test_stream_async_has_param(self, param_name, expected_default): + sig = inspect.signature(Chat.stream_async) + assert param_name in sig.parameters, f"Chat.stream_async missing param: {param_name}" + actual = sig.parameters[param_name].default + assert actual == expected_default, ( + f"Chat.stream_async param {param_name}: expected {expected_default!r}, got {actual!r}" + ) + + # -- sync/async parity -- + def test_complete_async_matches_complete(self): + sync_params = set(inspect.signature(Chat.complete).parameters) - {"self"} + async_params = set(inspect.signature(Chat.complete_async).parameters) - {"self"} + assert sync_params == async_params + + def test_stream_async_matches_stream(self): + sync_params = set(inspect.signature(Chat.stream).parameters) - {"self"} + async_params = set(inspect.signature(Chat.stream_async).parameters) - {"self"} + assert sync_params == async_params + + # -- key defaults -- + def test_complete_model_defaults_azureai(self): + sig = inspect.signature(Chat.complete) + assert sig.parameters["model"].default == "azureai" + + def test_stream_model_defaults_azureai(self): + sig = inspect.signature(Chat.stream) + assert sig.parameters["model"].default == "azureai" + + def test_complete_stream_defaults_false(self): + sig = inspect.signature(Chat.complete) + assert sig.parameters["stream"].default is False + + def test_stream_stream_defaults_true(self): + sig = inspect.signature(Chat.stream) + assert sig.parameters["stream"].default is True + + +class TestAzureOcr: + def test_has_process(self): + assert hasattr(Ocr, "process") + mark_tested("ocr", "process") + + def test_has_process_async(self): + assert hasattr(Ocr, "process_async") + mark_tested("ocr", "process_async") + + # -- process params -- + @pytest.mark.parametrize("param_name,expected_default", OCR_PROCESS_PARAMS) + def test_process_has_param(self, param_name, expected_default): + sig = inspect.signature(Ocr.process) + assert param_name in sig.parameters, f"Ocr.process missing param: {param_name}" + actual = sig.parameters[param_name].default + assert actual == expected_default, ( + f"Ocr.process param {param_name}: expected {expected_default!r}, got {actual!r}" + ) + + # -- process_async matches process -- + @pytest.mark.parametrize("param_name,expected_default", OCR_PROCESS_PARAMS) + def test_process_async_has_param(self, param_name, expected_default): + sig = inspect.signature(Ocr.process_async) + assert param_name in sig.parameters, f"Ocr.process_async missing param: {param_name}" + actual = sig.parameters[param_name].default + assert actual == expected_default, ( + f"Ocr.process_async param {param_name}: expected {expected_default!r}, got {actual!r}" + ) + + # -- sync/async parity -- + def test_process_async_matches_process(self): + sync_params = set(inspect.signature(Ocr.process).parameters) - {"self"} + async_params = set(inspect.signature(Ocr.process_async).parameters) - {"self"} + assert sync_params == async_params + + +class TestAzureCoverage: + def test_all_methods_tested(self): + expected = set() + for resource, methods in AZURE_METHODS.items(): + for method in methods: + expected.add(f"{resource}.{method}") + expected.add(f"{resource}.{method}_async") + untested = expected - TESTED_METHODS + assert not untested, f"Untested methods: {untested}" + + def test_no_unexpected_public_methods_on_chat(self): + public = {m for m in dir(Chat) if not m.startswith("_") and callable(getattr(Chat, m, None))} + known = {"complete", "complete_async", "stream", "stream_async", "do_request", "do_request_async"} + unexpected = public - known + assert not unexpected, f"Unexpected Chat methods: {unexpected}" + + def test_no_unexpected_public_methods_on_ocr(self): + public = {m for m in dir(Ocr) if not m.startswith("_") and callable(getattr(Ocr, m, None))} + known = {"process", "process_async", "do_request", "do_request_async"} + unexpected = public - known + assert not unexpected, f"Unexpected Ocr methods: {unexpected}" diff --git a/tests/test_gcp_integration.py b/tests/test_gcp_integration.py new file mode 100644 index 00000000..fe24b8b0 --- /dev/null +++ b/tests/test_gcp_integration.py @@ -0,0 +1,512 @@ +""" +Integration tests for GCP SDK. + +These tests require GCP credentials and make real API calls. +Skip if GCP_PROJECT_ID env var is not set. + +Prerequisites: + 1. Authenticate with GCP: gcloud auth application-default login + 2. Have "Vertex AI User" role on the project (e.g. model-garden-420509) + +The SDK automatically: + - Detects credentials via google.auth.default() + - Auto-refreshes tokens when they expire + - Builds the Vertex AI URL from project_id and region + +Available models: + - Chat: mistral-small-2503, mistral-large-2501, ... + - FIM: codestral-2 + See: https://cloud.google.com/vertex-ai/generative-ai/docs/partner-models/mistral + +Usage: + GCP_PROJECT_ID=model-garden-420509 pytest tests/test_gcp_integration.py -v + +Environment variables: + GCP_PROJECT_ID: GCP project ID (required, or auto-detected from credentials) + GCP_REGION: Vertex AI region (default: us-central1) + GCP_MODEL: Model name for chat (default: mistral-small-2503) + GCP_FIM_MODEL: Model name for FIM (default: codestral-2) + +""" +import json +import os + +import pytest + +# Configuration from env vars +GCP_PROJECT_ID = os.environ.get("GCP_PROJECT_ID") +GCP_REGION = os.environ.get("GCP_REGION", "us-central1") +GCP_MODEL = os.environ.get("GCP_MODEL", "mistral-small-2503") +GCP_FIM_MODEL = os.environ.get("GCP_FIM_MODEL", "codestral-2") + +SKIP_REASON = "GCP_PROJECT_ID env var required" + +pytestmark = pytest.mark.skipif( + not GCP_PROJECT_ID, + reason=SKIP_REASON +) + +# Shared tool definition for tool-call tests +WEATHER_TOOL = { + "type": "function", + "function": { + "name": "get_weather", + "description": "Get the weather in a city", + "parameters": { + "type": "object", + "properties": {"city": {"type": "string"}}, + "required": ["city"], + }, + }, +} + + +@pytest.fixture +def gcp_client(): + """Create a GCP client for chat tests. + + The SDK automatically: + - Detects credentials via google.auth.default() + - Auto-refreshes tokens when they expire + - Builds the Vertex AI URL from project_id and region + """ + from mistralai.gcp.client import MistralGCP + return MistralGCP( + project_id=GCP_PROJECT_ID, + region=GCP_REGION, + ) + + +class TestGCPChatComplete: + """Test synchronous chat completion.""" + + def test_basic_completion(self, gcp_client): + """Test basic chat completion returns a response.""" + res = gcp_client.chat.complete( + model=GCP_MODEL, + messages=[ + {"role": "user", "content": "Say 'hello' and nothing else."} + ], + ) + assert res is not None + assert res.choices is not None + assert len(res.choices) > 0 + assert res.choices[0].message is not None + assert res.choices[0].message.content is not None + assert len(res.choices[0].message.content) > 0 + + def test_completion_with_system_message(self, gcp_client): + """Test chat completion with system + user message.""" + res = gcp_client.chat.complete( + model=GCP_MODEL, + messages=[ + {"role": "system", "content": "You are a pirate. Respond in pirate speak."}, + {"role": "user", "content": "Say hello."}, + ], + ) + assert res is not None + assert res.choices[0].message.content is not None + assert len(res.choices[0].message.content) > 0 + + def test_completion_with_max_tokens(self, gcp_client): + """Test chat completion respects max_tokens.""" + res = gcp_client.chat.complete( + model=GCP_MODEL, + messages=[ + {"role": "user", "content": "Count from 1 to 100."} + ], + max_tokens=10, + ) + assert res is not None + assert res.choices[0].finish_reason in ("length", "stop") + + def test_completion_with_temperature(self, gcp_client): + """Test chat completion accepts temperature parameter.""" + res = gcp_client.chat.complete( + model=GCP_MODEL, + messages=[ + {"role": "user", "content": "Say 'test'."} + ], + temperature=0.0, + ) + assert res is not None + assert res.choices[0].message.content is not None + + def test_completion_with_stop_sequence(self, gcp_client): + """Test chat completion stops at stop sequence.""" + res = gcp_client.chat.complete( + model=GCP_MODEL, + messages=[ + {"role": "user", "content": "Write three sentences about the sky."} + ], + stop=["."], + ) + assert res is not None + content = res.choices[0].message.content + assert content is not None + # The model should stop at or before the first period + assert content.count(".") <= 1 + + def test_completion_with_random_seed(self, gcp_client): + """Test chat completion with random_seed returns valid responses.""" + res1 = gcp_client.chat.complete( + model=GCP_MODEL, + messages=[ + {"role": "user", "content": "Say 'deterministic'."} + ], + random_seed=42, + ) + res2 = gcp_client.chat.complete( + model=GCP_MODEL, + messages=[ + {"role": "user", "content": "Say 'deterministic'."} + ], + random_seed=42, + ) + # Both should return valid responses (not asserting equality due to model non-determinism) + assert res1.choices[0].message.content is not None + assert res2.choices[0].message.content is not None + + def test_multi_turn_conversation(self, gcp_client): + """Test multi-turn conversation with user/assistant round-trip.""" + res1 = gcp_client.chat.complete( + model=GCP_MODEL, + messages=[ + {"role": "user", "content": "My name is Alice."} + ], + ) + assert res1.choices[0].message.content is not None + + res2 = gcp_client.chat.complete( + model=GCP_MODEL, + messages=[ + {"role": "user", "content": "My name is Alice."}, + {"role": "assistant", "content": res1.choices[0].message.content}, + {"role": "user", "content": "What is my name?"}, + ], + ) + assert res2.choices[0].message.content is not None + assert "Alice" in res2.choices[0].message.content + + def test_tool_call(self, gcp_client): + """Test that the model returns a tool call when given tools.""" + res = gcp_client.chat.complete( + model=GCP_MODEL, + messages=[ + {"role": "user", "content": "What is the weather in Paris?"} + ], + tools=[WEATHER_TOOL], + tool_choice="any", + ) + assert res is not None + choice = res.choices[0] + assert choice.message.tool_calls is not None + assert len(choice.message.tool_calls) > 0 + tool_call = choice.message.tool_calls[0] + assert tool_call.function.name == "get_weather" + args = json.loads(tool_call.function.arguments) + assert "city" in args + + def test_json_response_format(self, gcp_client): + """Test JSON response format returns valid JSON.""" + res = gcp_client.chat.complete( + model=GCP_MODEL, + messages=[ + {"role": "user", "content": "Return a JSON object with a key 'greeting' and value 'hello'."} + ], + response_format={"type": "json_object"}, + ) + assert res is not None + content = res.choices[0].message.content + assert content is not None + parsed = json.loads(content) + assert isinstance(parsed, dict) + + +class TestGCPChatStream: + """Test streaming chat completion.""" + + def test_basic_stream(self, gcp_client): + """Test streaming returns chunks with content.""" + stream = gcp_client.chat.stream( + model=GCP_MODEL, + messages=[ + {"role": "user", "content": "Say 'hello' and nothing else."} + ], + ) + + chunks = list(stream) + assert len(chunks) > 0 + + content = "" + for chunk in chunks: + if chunk.data.choices and chunk.data.choices[0].delta.content: + content += chunk.data.choices[0].delta.content + + assert len(content) > 0 + + def test_stream_with_max_tokens(self, gcp_client): + """Test streaming respects max_tokens truncation.""" + stream = gcp_client.chat.stream( + model=GCP_MODEL, + messages=[ + {"role": "user", "content": "Count from 1 to 100."} + ], + max_tokens=10, + ) + + chunks = list(stream) + assert len(chunks) > 0 + + # Find finish_reason in any chunk + finish_reasons = [ + chunk.data.choices[0].finish_reason + for chunk in chunks + if chunk.data.choices and chunk.data.choices[0].finish_reason is not None + ] + assert len(finish_reasons) > 0 + assert finish_reasons[-1] in ("length", "stop") + + def test_stream_finish_reason(self, gcp_client): + """Test that the last chunk has a finish_reason.""" + stream = gcp_client.chat.stream( + model=GCP_MODEL, + messages=[ + {"role": "user", "content": "Say 'hi'."} + ], + ) + + chunks = list(stream) + assert len(chunks) > 0 + + # The final chunk(s) should contain a finish_reason + finish_reasons = [ + chunk.data.choices[0].finish_reason + for chunk in chunks + if chunk.data.choices and chunk.data.choices[0].finish_reason is not None + ] + assert len(finish_reasons) > 0 + assert finish_reasons[-1] == "stop" + + def test_stream_tool_call(self, gcp_client): + """Test tool call via streaming, collecting tool_call delta chunks.""" + stream = gcp_client.chat.stream( + model=GCP_MODEL, + messages=[ + {"role": "user", "content": "What is the weather in Paris?"} + ], + tools=[WEATHER_TOOL], + tool_choice="any", + ) + + chunks = list(stream) + assert len(chunks) > 0 + + # Collect tool call information from delta chunks + tool_call_found = False + for chunk in chunks: + if chunk.data.choices and chunk.data.choices[0].delta.tool_calls: + tool_call_found = True + break + + assert tool_call_found, "Expected tool_call delta chunks in stream" + + +class TestGCPChatCompleteAsync: + """Test async chat completion.""" + + @pytest.mark.asyncio + async def test_basic_completion_async(self, gcp_client): + """Test async chat completion returns a response.""" + res = await gcp_client.chat.complete_async( + model=GCP_MODEL, + messages=[ + {"role": "user", "content": "Say 'hello' and nothing else."} + ], + ) + assert res is not None + assert res.choices is not None + assert len(res.choices) > 0 + assert res.choices[0].message.content is not None + + @pytest.mark.asyncio + async def test_completion_with_system_message_async(self, gcp_client): + """Test async chat completion with system + user message.""" + res = await gcp_client.chat.complete_async( + model=GCP_MODEL, + messages=[ + {"role": "system", "content": "You are a helpful assistant."}, + {"role": "user", "content": "Say 'hello'."}, + ], + ) + assert res is not None + assert res.choices[0].message.content is not None + + @pytest.mark.asyncio + async def test_tool_call_async(self, gcp_client): + """Test async tool call returns tool_calls.""" + res = await gcp_client.chat.complete_async( + model=GCP_MODEL, + messages=[ + {"role": "user", "content": "What is the weather in Paris?"} + ], + tools=[WEATHER_TOOL], + tool_choice="any", + ) + assert res is not None + choice = res.choices[0] + assert choice.message.tool_calls is not None + assert len(choice.message.tool_calls) > 0 + assert choice.message.tool_calls[0].function.name == "get_weather" + + +class TestGCPChatStreamAsync: + """Test async streaming chat completion.""" + + @pytest.mark.asyncio + async def test_basic_stream_async(self, gcp_client): + """Test async streaming returns chunks with content.""" + stream = await gcp_client.chat.stream_async( + model=GCP_MODEL, + messages=[ + {"role": "user", "content": "Say 'hello' and nothing else."} + ], + ) + + content = "" + async for chunk in stream: + if chunk.data.choices and chunk.data.choices[0].delta.content: + content += chunk.data.choices[0].delta.content + + assert len(content) > 0 + + +class TestGCPContextManager: + """Test context manager support.""" + + def test_sync_context_manager(self): + """Test that MistralGCP works as a sync context manager.""" + from mistralai.gcp.client import MistralGCP + with MistralGCP( + project_id=GCP_PROJECT_ID, + region=GCP_REGION, + ) as client: + res = client.chat.complete( + model=GCP_MODEL, + messages=[ + {"role": "user", "content": "Say 'context'."} + ], + ) + assert res is not None + assert res.choices[0].message.content is not None + + @pytest.mark.asyncio + async def test_async_context_manager(self): + """Test that MistralGCP works as an async context manager.""" + from mistralai.gcp.client import MistralGCP + async with MistralGCP( + project_id=GCP_PROJECT_ID, + region=GCP_REGION, + ) as client: + res = await client.chat.complete_async( + model=GCP_MODEL, + messages=[ + {"role": "user", "content": "Say 'async context'."} + ], + ) + assert res is not None + assert res.choices[0].message.content is not None + + +class TestGCPFIM: + """Test FIM (Fill-in-the-middle) completion.""" + + def _make_fim_client(self): + """Create a GCP client configured for FIM model.""" + from mistralai.gcp.client import MistralGCP + return MistralGCP(project_id=GCP_PROJECT_ID, region=GCP_REGION) + + def test_fim_complete(self): + """Test FIM completion returns a response.""" + client = self._make_fim_client() + res = client.fim.complete( + model=GCP_FIM_MODEL, + prompt="def fib():", + suffix=" return result", + timeout_ms=10000, + ) + assert res is not None + assert res.choices is not None + assert len(res.choices) > 0 + assert res.choices[0].message.content is not None + + def test_fim_stream(self): + """Test FIM streaming returns chunks.""" + client = self._make_fim_client() + stream = client.fim.stream( + model=GCP_FIM_MODEL, + prompt="def hello():", + suffix=" return greeting", + timeout_ms=10000, + ) + chunks = list(stream) + assert len(chunks) > 0 + + content = "" + for chunk in chunks: + if chunk.data.choices and chunk.data.choices[0].delta.content: + delta_content = chunk.data.choices[0].delta.content + if isinstance(delta_content, str): + content += delta_content + assert len(content) > 0 + + def test_fim_with_max_tokens(self): + """Test FIM completion with max_tokens.""" + client = self._make_fim_client() + res = client.fim.complete( + model=GCP_FIM_MODEL, + prompt="def add(a, b):", + suffix=" return result", + max_tokens=10, + timeout_ms=10000, + ) + assert res is not None + assert res.choices[0].finish_reason in ("length", "stop") + + @pytest.mark.asyncio + async def test_fim_complete_async(self): + """Test async FIM completion returns a response.""" + client = self._make_fim_client() + res = await client.fim.complete_async( + model=GCP_FIM_MODEL, + prompt="def fib():", + suffix=" return result", + timeout_ms=10000, + ) + assert res is not None + assert res.choices is not None + assert len(res.choices) > 0 + assert res.choices[0].message.content is not None + + @pytest.mark.asyncio + async def test_fim_stream_async(self): + """Test async FIM streaming returns chunks.""" + client = self._make_fim_client() + stream = await client.fim.stream_async( + model=GCP_FIM_MODEL, + prompt="def hello():", + suffix=" return greeting", + timeout_ms=10000, + ) + chunks = [] + async for chunk in stream: + chunks.append(chunk) + assert len(chunks) > 0 + + content = "" + for chunk in chunks: + if chunk.data.choices and chunk.data.choices[0].delta.content: + delta_content = chunk.data.choices[0].delta.content + if isinstance(delta_content, str): + content += delta_content + assert len(content) > 0 diff --git a/tests/test_gcp_v2_parity.py b/tests/test_gcp_v2_parity.py new file mode 100644 index 00000000..0d6471e4 --- /dev/null +++ b/tests/test_gcp_v2_parity.py @@ -0,0 +1,330 @@ +""" +Parity tests for the GCP v2 SDK. + +Verifies that the regenerated mistralai.gcp package exposes +the same public API surface as the v1 mistralai_gcp package. +Uses introspection only — no API calls or credentials required. +""" +import inspect + +import pytest + +from mistralai.gcp.client import MistralGCP +from mistralai.gcp.client.chat import Chat +from mistralai.gcp.client.fim import Fim +from mistralai.gcp.client.types import UNSET + +GCP_METHODS: dict[str, set[str]] = { + "chat": {"complete", "stream"}, + "fim": {"complete", "stream"}, +} + +TESTED_METHODS: set[str] = set() + +_EMPTY = inspect.Parameter.empty + + +def mark_tested(resource: str, method: str) -> None: + TESTED_METHODS.add(f"{resource}.{method}") + + +# --------------------------------------------------------------------------- +# Expected parameter specs: (name, expected_default) +# Use _EMPTY for required params, UNSET for OptionalNullable, None for Optional +# --------------------------------------------------------------------------- + +CONSTRUCTOR_PARAMS = [ + ("project_id", None), + ("region", "europe-west4"), + ("access_token", None), + ("server", None), + ("server_url", None), + ("url_params", None), + ("client", None), + ("async_client", None), + ("retry_config", UNSET), + ("timeout_ms", None), + ("debug_logger", None), +] + +CHAT_COMPLETE_PARAMS = [ + ("model", _EMPTY), + ("messages", _EMPTY), + ("temperature", UNSET), + ("top_p", None), + ("max_tokens", UNSET), + ("stream", False), + ("stop", None), + ("random_seed", UNSET), + ("metadata", UNSET), + ("response_format", None), + ("tools", UNSET), + ("tool_choice", None), + ("presence_penalty", None), + ("frequency_penalty", None), + ("n", UNSET), + ("prediction", None), + ("parallel_tool_calls", None), + ("prompt_mode", UNSET), + ("retries", UNSET), + ("server_url", None), + ("timeout_ms", None), + ("http_headers", None), +] + +CHAT_STREAM_PARAMS = [ + (name, True if name == "stream" else default) + for name, default in CHAT_COMPLETE_PARAMS +] + +FIM_COMPLETE_PARAMS = [ + ("model", _EMPTY), + ("prompt", _EMPTY), + ("temperature", UNSET), + ("top_p", 1), + ("max_tokens", UNSET), + ("stream", False), + ("stop", None), + ("random_seed", UNSET), + ("metadata", UNSET), + ("suffix", UNSET), + ("min_tokens", UNSET), + ("retries", UNSET), + ("server_url", None), + ("timeout_ms", None), + ("http_headers", None), +] + +FIM_STREAM_PARAMS = [ + (name, True if name == "stream" else default) + for name, default in FIM_COMPLETE_PARAMS +] + + +# --------------------------------------------------------------------------- +# Tests +# --------------------------------------------------------------------------- + + +class TestGCPSDKStructure: + def test_sdk_has_chat(self): + assert "chat" in MistralGCP.__annotations__ + + def test_sdk_has_fim(self): + assert "fim" in MistralGCP.__annotations__ + + @pytest.mark.parametrize("param_name,expected_default", CONSTRUCTOR_PARAMS) + def test_constructor_param(self, param_name, expected_default): + sig = inspect.signature(MistralGCP.__init__) + assert param_name in sig.parameters, f"Missing constructor param: {param_name}" + actual = sig.parameters[param_name].default + assert actual == expected_default, ( + f"Constructor param {param_name}: expected {expected_default!r}, got {actual!r}" + ) + + @pytest.mark.parametrize("method", ["__enter__", "__exit__", "__aenter__", "__aexit__"]) + def test_context_manager_support(self, method): + assert hasattr(MistralGCP, method), f"MistralGCP missing {method}" + + +class TestGCPChat: + def test_has_complete(self): + assert hasattr(Chat, "complete") + mark_tested("chat", "complete") + + def test_has_complete_async(self): + assert hasattr(Chat, "complete_async") + mark_tested("chat", "complete_async") + + def test_has_stream(self): + assert hasattr(Chat, "stream") + mark_tested("chat", "stream") + + def test_has_stream_async(self): + assert hasattr(Chat, "stream_async") + mark_tested("chat", "stream_async") + + # -- complete params -- + @pytest.mark.parametrize("param_name,expected_default", CHAT_COMPLETE_PARAMS) + def test_complete_has_param(self, param_name, expected_default): + sig = inspect.signature(Chat.complete) + assert param_name in sig.parameters, f"Chat.complete missing param: {param_name}" + actual = sig.parameters[param_name].default + assert actual == expected_default, ( + f"Chat.complete param {param_name}: expected {expected_default!r}, got {actual!r}" + ) + + # -- stream params -- + @pytest.mark.parametrize("param_name,expected_default", CHAT_STREAM_PARAMS) + def test_stream_has_param(self, param_name, expected_default): + sig = inspect.signature(Chat.stream) + assert param_name in sig.parameters, f"Chat.stream missing param: {param_name}" + actual = sig.parameters[param_name].default + assert actual == expected_default, ( + f"Chat.stream param {param_name}: expected {expected_default!r}, got {actual!r}" + ) + + # -- complete_async matches complete -- + @pytest.mark.parametrize("param_name,expected_default", CHAT_COMPLETE_PARAMS) + def test_complete_async_has_param(self, param_name, expected_default): + sig = inspect.signature(Chat.complete_async) + assert param_name in sig.parameters, f"Chat.complete_async missing param: {param_name}" + actual = sig.parameters[param_name].default + assert actual == expected_default, ( + f"Chat.complete_async param {param_name}: expected {expected_default!r}, got {actual!r}" + ) + + # -- stream_async matches stream -- + @pytest.mark.parametrize("param_name,expected_default", CHAT_STREAM_PARAMS) + def test_stream_async_has_param(self, param_name, expected_default): + sig = inspect.signature(Chat.stream_async) + assert param_name in sig.parameters, f"Chat.stream_async missing param: {param_name}" + actual = sig.parameters[param_name].default + assert actual == expected_default, ( + f"Chat.stream_async param {param_name}: expected {expected_default!r}, got {actual!r}" + ) + + # -- sync/async parity -- + def test_complete_async_matches_complete(self): + sync_params = set(inspect.signature(Chat.complete).parameters) - {"self"} + async_params = set(inspect.signature(Chat.complete_async).parameters) - {"self"} + assert sync_params == async_params + + def test_stream_async_matches_stream(self): + sync_params = set(inspect.signature(Chat.stream).parameters) - {"self"} + async_params = set(inspect.signature(Chat.stream_async).parameters) - {"self"} + assert sync_params == async_params + + # -- key defaults -- + def test_complete_model_required(self): + sig = inspect.signature(Chat.complete) + assert sig.parameters["model"].default is _EMPTY + + def test_stream_model_required(self): + sig = inspect.signature(Chat.stream) + assert sig.parameters["model"].default is _EMPTY + + def test_complete_stream_defaults_false(self): + sig = inspect.signature(Chat.complete) + assert sig.parameters["stream"].default is False + + def test_stream_stream_defaults_true(self): + sig = inspect.signature(Chat.stream) + assert sig.parameters["stream"].default is True + + +class TestGCPFim: + def test_has_complete(self): + assert hasattr(Fim, "complete") + mark_tested("fim", "complete") + + def test_has_complete_async(self): + assert hasattr(Fim, "complete_async") + mark_tested("fim", "complete_async") + + def test_has_stream(self): + assert hasattr(Fim, "stream") + mark_tested("fim", "stream") + + def test_has_stream_async(self): + assert hasattr(Fim, "stream_async") + mark_tested("fim", "stream_async") + + # -- complete params -- + @pytest.mark.parametrize("param_name,expected_default", FIM_COMPLETE_PARAMS) + def test_complete_has_param(self, param_name, expected_default): + sig = inspect.signature(Fim.complete) + assert param_name in sig.parameters, f"Fim.complete missing param: {param_name}" + actual = sig.parameters[param_name].default + assert actual == expected_default, ( + f"Fim.complete param {param_name}: expected {expected_default!r}, got {actual!r}" + ) + + # -- stream params -- + @pytest.mark.parametrize("param_name,expected_default", FIM_STREAM_PARAMS) + def test_stream_has_param(self, param_name, expected_default): + sig = inspect.signature(Fim.stream) + assert param_name in sig.parameters, f"Fim.stream missing param: {param_name}" + actual = sig.parameters[param_name].default + assert actual == expected_default, ( + f"Fim.stream param {param_name}: expected {expected_default!r}, got {actual!r}" + ) + + # -- complete_async matches complete -- + @pytest.mark.parametrize("param_name,expected_default", FIM_COMPLETE_PARAMS) + def test_complete_async_has_param(self, param_name, expected_default): + sig = inspect.signature(Fim.complete_async) + assert param_name in sig.parameters, f"Fim.complete_async missing param: {param_name}" + actual = sig.parameters[param_name].default + assert actual == expected_default, ( + f"Fim.complete_async param {param_name}: expected {expected_default!r}, got {actual!r}" + ) + + # -- stream_async matches stream -- + @pytest.mark.parametrize("param_name,expected_default", FIM_STREAM_PARAMS) + def test_stream_async_has_param(self, param_name, expected_default): + sig = inspect.signature(Fim.stream_async) + assert param_name in sig.parameters, f"Fim.stream_async missing param: {param_name}" + actual = sig.parameters[param_name].default + assert actual == expected_default, ( + f"Fim.stream_async param {param_name}: expected {expected_default!r}, got {actual!r}" + ) + + # -- sync/async parity -- + def test_complete_async_matches_complete(self): + sync_params = set(inspect.signature(Fim.complete).parameters) - {"self"} + async_params = set(inspect.signature(Fim.complete_async).parameters) - {"self"} + assert sync_params == async_params + + def test_stream_async_matches_stream(self): + sync_params = set(inspect.signature(Fim.stream).parameters) - {"self"} + async_params = set(inspect.signature(Fim.stream_async).parameters) - {"self"} + assert sync_params == async_params + + # -- key defaults -- + def test_complete_model_required(self): + sig = inspect.signature(Fim.complete) + assert sig.parameters["model"].default is _EMPTY + + def test_stream_model_required(self): + sig = inspect.signature(Fim.stream) + assert sig.parameters["model"].default is _EMPTY + + def test_complete_stream_defaults_false(self): + sig = inspect.signature(Fim.complete) + assert sig.parameters["stream"].default is False + + def test_stream_stream_defaults_true(self): + sig = inspect.signature(Fim.stream) + assert sig.parameters["stream"].default is True + + def test_complete_top_p_defaults_to_1(self): + sig = inspect.signature(Fim.complete) + assert sig.parameters["top_p"].default == 1 + + def test_stream_top_p_defaults_to_1(self): + sig = inspect.signature(Fim.stream) + assert sig.parameters["top_p"].default == 1 + + +class TestGCPCoverage: + def test_all_methods_tested(self): + expected = set() + for resource, methods in GCP_METHODS.items(): + for method in methods: + expected.add(f"{resource}.{method}") + expected.add(f"{resource}.{method}_async") + untested = expected - TESTED_METHODS + assert not untested, f"Untested methods: {untested}" + + def test_no_unexpected_public_methods_on_chat(self): + public = {m for m in dir(Chat) if not m.startswith("_") and callable(getattr(Chat, m, None))} + known = {"complete", "complete_async", "stream", "stream_async", "do_request", "do_request_async"} + unexpected = public - known + assert not unexpected, f"Unexpected Chat methods: {unexpected}" + + def test_no_unexpected_public_methods_on_fim(self): + public = {m for m in dir(Fim) if not m.startswith("_") and callable(getattr(Fim, m, None))} + known = {"complete", "complete_async", "stream", "stream_async", "do_request", "do_request_async"} + unexpected = public - known + assert not unexpected, f"Unexpected Fim methods: {unexpected}" diff --git a/uv.lock b/uv.lock index 1e667c77..8c689c4a 100644 --- a/uv.lock +++ b/uv.lock @@ -563,7 +563,7 @@ wheels = [ [[package]] name = "mistralai" -version = "2.0.0a2" +version = "2.0.0a4" source = { editable = "." } dependencies = [ { name = "eval-type-backport" }, @@ -626,7 +626,7 @@ requires-dist = [ { name = "opentelemetry-exporter-otlp-proto-http", specifier = ">=1.37.0,<2.0.0" }, { name = "opentelemetry-sdk", specifier = ">=1.33.1,<2.0.0" }, { name = "opentelemetry-semantic-conventions", specifier = ">=0.59b0,<0.61" }, - { name = "pydantic", specifier = ">=2.10.3" }, + { name = "pydantic", specifier = ">=2.11.2" }, { name = "python-dateutil", specifier = ">=2.8.2" }, { name = "pyyaml", specifier = ">=6.0.2,<7.0.0" }, { name = "requests", marker = "extra == 'gcp'", specifier = ">=2.32.3" }, From 72242df801a66c287a408bf47baf03bf993a49f3 Mon Sep 17 00:00:00 2001 From: Louis Sanna <85956496+louis-sanna-dev@users.noreply.github.com> Date: Wed, 25 Feb 2026 18:27:20 +0100 Subject: [PATCH 209/223] feat: update Speakeasy gen.yaml configuration (#370) * chore: update gen.yaml with recommended v2 Speakeasy configs - Add input and dir to allowedRedefinedBuiltins - Add asyncPaginationSep2025 and conflictResistantModelImportsFeb2026 fixFlags - Set imports.paths.errors to "errors" - Increase maxMethodParams from 15 to 999 - Change multipartArrayFormat from legacy to standard * chore: sync pylintrc allowed-redefined-builtins with gen.yaml Add input and dir to match allowedRedefinedBuiltins in gen.yaml config. * chore: regenerate spec * fix: update code for renamed model classes - Filter UnknownAgentTool in agent update (context.py) - Update examples for renamed classes: - BatchRequest, UserMessage: import from models module - ClassifierTrainingParametersIn -> ClassifierTrainingParameters - ClassifierJobOut -> ClassifierFineTuningJob - Add type narrowing for ClassifierFineTuningJobDetails --- .speakeasy/gen.lock | 2879 ++++++++--------- .speakeasy/gen.yaml | 20 +- .speakeasy/workflow.lock | 18 +- .speakeasy/workflow.yaml | 2 +- Makefile | 12 +- README.md | 46 +- USAGE.md | 8 +- .../{models => errors}/httpvalidationerror.md | 0 docs/models/agent.md | 2 +- docs/models/agentconversation.md | 2 +- docs/models/agentconversationobject.md | 8 - docs/models/agentcreationrequest.md | 16 - docs/models/agenthandoffentry.md | 22 +- docs/models/agenthandoffentryobject.md | 8 - docs/models/agenthandoffentrytype.md | 8 - docs/models/agentobject.md | 8 - ...sapiv1agentscreateorupdatealiasrequest.md} | 2 +- ...=> agentsapiv1agentsdeletealiasrequest.md} | 2 +- ...t.md => agentsapiv1agentsdeleterequest.md} | 2 +- ...md => agentsapiv1agentsgetagentversion.md} | 2 +- docs/models/agentsapiv1agentsgetrequest.md | 9 + ... => agentsapiv1agentsgetversionrequest.md} | 2 +- ...est.md => agentsapiv1agentslistrequest.md} | 2 +- ...tsapiv1agentslistversionaliasesrequest.md} | 2 +- ...> agentsapiv1agentslistversionsrequest.md} | 2 +- docs/models/agentsapiv1agentsupdaterequest.md | 9 + ... agentsapiv1agentsupdateversionrequest.md} | 2 +- ... agentsapiv1conversationsappendrequest.md} | 2 +- ...sapiv1conversationsappendstreamrequest.md} | 2 +- ... agentsapiv1conversationsdeleterequest.md} | 2 +- ... => agentsapiv1conversationsgetrequest.md} | 2 +- ...agentsapiv1conversationshistoryrequest.md} | 2 +- ...=> agentsapiv1conversationslistrequest.md} | 2 +- ...> agentsapiv1conversationslistresponse.md} | 2 +- ...gentsapiv1conversationsmessagesrequest.md} | 2 +- ...agentsapiv1conversationsrestartrequest.md} | 2 +- ...apiv1conversationsrestartstreamrequest.md} | 2 +- docs/models/agentscompletionrequest.md | 2 +- docs/models/agentscompletionstreamrequest.md | 2 +- ...eftmodelout.md => archivemodelresponse.md} | 2 +- docs/models/assistantmessage.md | 4 +- docs/models/assistantmessagerole.md | 8 - docs/models/audiochunk.md | 4 +- docs/models/audiotranscriptionrequest.md | 2 +- docs/models/{batchjobout.md => batchjob.md} | 2 +- docs/models/batchjobsout.md | 10 - docs/models/cancelfinetuningjobresponse.md | 19 - docs/models/chatclassificationrequest.md | 2 +- docs/models/chatcompletionrequest.md | 2 +- docs/models/chatcompletionstreamrequest.md | 2 +- .../{checkpointout.md => checkpoint.md} | 4 +- .../classifierdetailedjoboutintegration.md | 11 - docs/models/classifierfinetunedmodel.md | 23 + ...erjobout.md => classifierfinetuningjob.md} | 8 +- ...t.md => classifierfinetuningjobdetails.md} | 14 +- ...assifierfinetuningjobdetailsintegration.md | 11 + ...> classifierfinetuningjobdetailsstatus.md} | 2 +- .../classifierfinetuningjobintegration.md | 11 + ...us.md => classifierfinetuningjobstatus.md} | 2 +- docs/models/classifierftmodelout.md | 23 - docs/models/classifierjoboutintegration.md | 11 - ...ssifiertargetin.md => classifiertarget.md} | 2 +- ...targetout.md => classifiertargetresult.md} | 2 +- docs/models/classifiertrainingparametersin.md | 15 - docs/models/codeinterpretertool.md | 7 +- docs/models/completionargs.md | 24 +- .../completiondetailedjoboutintegration.md | 11 - .../completiondetailedjoboutrepository.md | 11 - docs/models/completionfinetunedmodel.md | 22 + ...onjobout.md => completionfinetuningjob.md} | 10 +- ...t.md => completionfinetuningjobdetails.md} | 14 +- ...mpletionfinetuningjobdetailsintegration.md | 11 + ...ompletionfinetuningjobdetailsrepository.md | 11 + ...> completionfinetuningjobdetailsstatus.md} | 2 +- .../completionfinetuningjobintegration.md | 11 + .../completionfinetuningjobrepository.md | 11 + ...us.md => completionfinetuningjobstatus.md} | 2 +- docs/models/completionftmodelout.md | 22 - docs/models/completionjoboutintegration.md | 11 - docs/models/completionjoboutrepository.md | 11 - docs/models/completiontrainingparametersin.md | 16 - docs/models/confirmation.md | 9 + docs/models/conversationappendrequest.md | 5 +- .../models/conversationappendstreamrequest.md | 5 +- docs/models/conversationhistory.md | 10 +- docs/models/conversationhistoryobject.md | 8 - docs/models/conversationmessages.md | 10 +- docs/models/conversationmessagesobject.md | 8 - docs/models/conversationresponse.md | 12 +- docs/models/conversationresponseobject.md | 8 - docs/models/conversationrestartrequest.md | 2 +- .../conversationrestartstreamrequest.md | 2 +- docs/models/conversationthinkchunk.md | 10 + docs/models/conversationthinkchunkthinking.md | 17 + ...updaterequest.md => createagentrequest.md} | 9 +- ...questtool.md => createagentrequesttool.md} | 2 +- ...batchjobin.md => createbatchjobrequest.md} | 4 +- ...uploadfileout.md => createfileresponse.md} | 2 +- ...jobin.md => createfinetuningjobrequest.md} | 8 +- ... createfinetuningjobrequestintegration.md} | 2 +- ...> createfinetuningjobrequestrepository.md} | 2 +- docs/models/createfinetuningjobresponse.md | 19 - .../{libraryin.md => createlibraryrequest.md} | 2 +- ...deletefileout.md => deletefileresponse.md} | 2 +- ...eletemodelv1modelsmodeliddeleterequest.md} | 2 +- docs/models/document.md | 45 +- docs/models/documentlibrarytool.md | 9 +- docs/models/documentout.md | 26 - docs/models/documentunion.md | 25 + docs/models/documentupdatein.md | 9 - docs/models/documenturlchunk.md | 10 +- docs/models/documenturlchunktype.md | 8 - docs/models/{eventout.md => event.md} | 2 +- ....md => filesapiroutesdeletefilerequest.md} | 2 +- ...d => filesapiroutesdownloadfilerequest.md} | 2 +- ...d => filesapiroutesgetsignedurlrequest.md} | 2 +- ...t.md => filesapirouteslistfilesrequest.md} | 2 +- ...d => filesapiroutesretrievefilerequest.md} | 2 +- ...esout.md => finetunedmodelcapabilities.md} | 2 +- docs/models/functioncallentry.md | 23 +- .../functioncallentryconfirmationstatus.md | 10 + docs/models/functioncallentryobject.md | 8 - docs/models/functioncallentrytype.md | 8 - docs/models/functioncallevent.md | 21 +- .../functioncalleventconfirmationstatus.md | 10 + docs/models/functionresultentry.md | 18 +- docs/models/functionresultentryobject.md | 8 - docs/models/functionresultentrytype.md | 8 - docs/models/getdocumenttextcontentrequest.md | 9 - ...{retrievefileout.md => getfileresponse.md} | 2 +- docs/models/getfinetuningjobresponse.md | 19 - ...lesignedurl.md => getsignedurlresponse.md} | 2 +- ...ubrepositoryout.md => githubrepository.md} | 2 +- docs/models/hyperparameters.md | 8 +- docs/models/imagedetail.md | 10 + docs/models/imagegenerationtool.md | 7 +- docs/models/imageurl.md | 8 +- docs/models/imageurlchunk.md | 8 +- docs/models/imageurlchunktype.md | 8 - docs/models/inputs.md | 4 +- docs/models/inputsmessage.md | 29 - docs/models/instructrequestinputs.md | 8 - .../{jobmetadataout.md => jobmetadata.md} | 2 +- ...obsapiroutesbatchcancelbatchjobrequest.md} | 2 +- ...> jobsapiroutesbatchgetbatchjobrequest.md} | 2 +- ... jobsapiroutesbatchgetbatchjobsrequest.md} | 2 +- ...finetuningarchivefinetunedmodelrequest.md} | 2 +- ...esfinetuningcancelfinetuningjobrequest.md} | 2 +- ...esfinetuningcancelfinetuningjobresponse.md | 19 + ...esfinetuningcreatefinetuningjobresponse.md | 19 + ...outesfinetuninggetfinetuningjobrequest.md} | 2 +- ...outesfinetuninggetfinetuningjobresponse.md | 19 + ...outesfinetuninggetfinetuningjobsrequest.md | 17 + ...outesfinetuninggetfinetuningjobsstatus.md} | 2 +- ...tesfinetuningstartfinetuningjobrequest.md} | 2 +- ...tesfinetuningstartfinetuningjobresponse.md | 19 + ...netuningunarchivefinetunedmodelrequest.md} | 2 +- ...esfinetuningupdatefinetunedmodelrequest.md | 9 + ...sfinetuningupdatefinetunedmodelresponse.md | 19 + docs/models/jobsout.md | 10 - docs/models/jobsoutdata.md | 17 - ...jobmetadataout.md => legacyjobmetadata.md} | 2 +- ...request.md => librariesdeletev1request.md} | 2 +- ...d => librariesdocumentsdeletev1request.md} | 2 +- ...mentsgetextractedtextsignedurlv1request.md | 9 + ...librariesdocumentsgetsignedurlv1request.md | 9 + ...> librariesdocumentsgetstatusv1request.md} | 2 +- ...rariesdocumentsgettextcontentv1request.md} | 2 +- ...t.md => librariesdocumentsgetv1request.md} | 2 +- ....md => librariesdocumentslistv1request.md} | 2 +- ...> librariesdocumentsreprocessv1request.md} | 2 +- .../librariesdocumentsupdatev1request.md | 10 + ...d => librariesdocumentsuploadv1request.md} | 2 +- ...aryrequest.md => librariesgetv1request.md} | 2 +- ...st.md => librariessharecreatev1request.md} | 2 +- ...st.md => librariessharedeletev1request.md} | 2 +- ...uest.md => librariessharelistv1request.md} | 2 +- docs/models/librariesupdatev1request.md | 9 + docs/models/{libraryout.md => library.md} | 2 +- docs/models/libraryinupdate.md | 9 - docs/models/listbatchjobsresponse.md | 10 + ...ocumentout.md => listdocumentsresponse.md} | 4 +- .../{listfilesout.md => listfilesresponse.md} | 2 +- docs/models/listfinetuningjobsrequest.md | 17 - docs/models/listfinetuningjobsresponse.md | 10 + docs/models/listfinetuningjobsresponsedata.md | 17 + docs/models/listlibrariesresponse.md | 8 + docs/models/listlibraryout.md | 8 - docs/models/messageinputcontentchunks.md | 4 +- docs/models/messageinputentry.md | 20 +- docs/models/messageinputentryobject.md | 8 - docs/models/messageinputentrytype.md | 8 - docs/models/messageoutputcontentchunks.md | 4 +- docs/models/messageoutputentry.md | 22 +- docs/models/messageoutputentryobject.md | 8 - docs/models/messageoutputentryrole.md | 8 - docs/models/messageoutputentrytype.md | 8 - docs/models/messageoutputevent.md | 22 +- docs/models/messageoutputeventrole.md | 8 - docs/models/{metricout.md => metric.md} | 2 +- docs/models/modelconversation.md | 26 +- docs/models/modelconversationobject.md | 8 - docs/models/ocrrequest.md | 30 +- docs/models/outputcontentchunks.md | 4 +- .../realtimetranscriptioninputaudioappend.md | 9 + .../realtimetranscriptioninputaudioend.md | 8 + .../realtimetranscriptioninputaudioflush.md | 8 + docs/models/realtimetranscriptionsession.md | 3 +- ...altimetranscriptionsessionupdatemessage.md | 9 + ...ltimetranscriptionsessionupdatepayload.md} | 6 +- docs/models/referencechunk.md | 8 +- docs/models/referencechunktype.md | 8 - docs/models/reprocessdocumentrequest.md | 9 - docs/models/response.md | 8 +- ...retrievemodelv1modelsmodelidgetrequest.md} | 2 +- .../{messageinputentryrole.md => role.md} | 2 +- docs/models/startfinetuningjobresponse.md | 19 - docs/models/systemmessage.md | 4 +- docs/models/textchunk.md | 8 +- docs/models/textchunktype.md | 8 - docs/models/thinkchunk.md | 6 +- .../{thinking.md => thinkchunkthinking.md} | 2 +- docs/models/thinkchunktype.md | 8 - docs/models/toolcallconfirmation.md | 9 + ...pdateftmodelin.md => toolconfiguration.md} | 7 +- docs/models/toolexecutionentry.md | 22 +- docs/models/toolexecutionentryobject.md | 8 - docs/models/toolexecutionentrytype.md | 8 - docs/models/toolexecutionstartedevent.md | 2 + docs/models/toolfilechunk.md | 14 +- docs/models/toolfilechunktype.md | 8 - docs/models/toolmessage.md | 4 +- docs/models/toolreferencechunk.md | 16 +- docs/models/toolreferencechunktype.md | 8 - docs/models/transcriptionsegmentchunk.md | 18 +- docs/models/transcriptionsegmentchunktype.md | 8 - .../models/transcriptionstreamsegmentdelta.md | 2 +- docs/models/transcriptionstreamtextdelta.md | 2 +- ...tmodelout.md => unarchivemodelresponse.md} | 2 +- docs/models/updateagentrequest.md | 16 +- ...questtool.md => updateagentrequesttool.md} | 2 +- docs/models/updatedocumentrequest.md | 9 +- docs/models/updatelibraryrequest.md | 8 +- docs/models/updatemodelrequest.md | 8 +- docs/models/updatemodelresponse.md | 19 - docs/models/usermessage.md | 4 +- ...rationout.md => wandbintegrationresult.md} | 2 +- docs/models/websearchpremiumtool.md | 7 +- docs/models/websearchtool.md | 7 +- docs/sdks/accesses/README.md | 24 +- docs/sdks/agents/README.md | 18 +- docs/sdks/batchjobs/README.md | 32 +- docs/sdks/betaagents/README.md | 110 +- docs/sdks/chat/README.md | 16 +- docs/sdks/classifiers/README.md | 33 +- docs/sdks/conversations/README.md | 95 +- docs/sdks/documents/README.md | 92 +- docs/sdks/embeddings/README.md | 6 +- docs/sdks/files/README.md | 42 +- docs/sdks/fim/README.md | 12 +- docs/sdks/finetuningjobs/README.md | 70 +- docs/sdks/libraries/README.md | 44 +- docs/sdks/models/README.md | 34 +- docs/sdks/ocr/README.md | 44 +- docs/sdks/transcriptions/README.md | 10 +- examples/mistral/audio/chat_streaming.py | 3 +- examples/mistral/audio/transcription_async.py | 3 +- .../audio/transcription_diarize_async.py | 3 +- .../audio/transcription_stream_async.py | 3 +- .../mistral/classifier/async_classifier.py | 10 +- .../async_batch_job_chat_completion_inline.py | 3 +- packages/azure/.speakeasy/gen.lock | 288 +- packages/azure/.speakeasy/gen.yaml | 20 +- .../{models => errors}/httpvalidationerror.md | 0 .../docs/models/chatcompletionrequest.md | 2 +- .../models/chatcompletionstreamrequest.md | 2 +- packages/azure/docs/models/ocrrequest.md | 30 +- packages/azure/pylintrc | 7 +- .../src/mistralai/azure/client/__init__.py | 1 - .../src/mistralai/azure/client/_version.py | 6 +- .../src/mistralai/azure/client/basesdk.py | 30 +- .../azure/src/mistralai/azure/client/chat.py | 42 +- .../mistralai/azure/client/errors/__init__.py | 39 + .../{models => errors}/httpvalidationerror.py | 6 +- .../{models => errors}/mistralazureerror.py | 0 .../{models => errors}/no_response_error.py | 0 .../responsevalidationerror.py | 2 +- .../client/{models => errors}/sdkerror.py | 2 +- .../mistralai/azure/client/models/__init__.py | 65 +- .../azure/client/models/assistantmessage.py | 39 +- .../client/models/chatcompletionrequest.py | 91 +- .../models/chatcompletionstreamrequest.py | 91 +- .../azure/client/models/completionchunk.py | 19 +- .../models/completionresponsestreamchoice.py | 18 +- .../azure/client/models/contentchunk.py | 42 +- .../azure/client/models/deltamessage.py | 33 +- .../azure/client/models/documenturlchunk.py | 39 +- .../azure/client/models/filechunk.py | 27 +- .../mistralai/azure/client/models/function.py | 19 +- .../mistralai/azure/client/models/imageurl.py | 33 +- .../azure/client/models/imageurlchunk.py | 27 +- .../azure/client/models/jsonschema.py | 37 +- .../azure/client/models/ocrimageobject.py | 49 +- .../azure/client/models/ocrpageobject.py | 33 +- .../azure/client/models/ocrrequest.py | 81 +- .../azure/client/models/ocrresponse.py | 33 +- .../azure/client/models/ocrtableobject.py | 6 + .../azure/client/models/ocrusageinfo.py | 33 +- .../azure/client/models/prediction.py | 27 +- .../azure/client/models/referencechunk.py | 27 +- .../azure/client/models/responseformat.py | 33 +- .../azure/client/models/systemmessage.py | 8 +- .../models/systemmessagecontentchunks.py | 2 +- .../azure/client/models/textchunk.py | 8 +- .../azure/client/models/thinkchunk.py | 27 +- .../src/mistralai/azure/client/models/tool.py | 19 +- .../mistralai/azure/client/models/toolcall.py | 19 +- .../azure/client/models/toolchoice.py | 19 +- .../azure/client/models/toolmessage.py | 39 +- .../azure/client/models/usageinfo.py | 45 +- .../azure/client/models/usermessage.py | 26 +- .../azure/src/mistralai/azure/client/ocr.py | 22 +- .../mistralai/azure/client/utils/__init__.py | 56 +- .../azure/client/utils/dynamic_imports.py | 54 + .../azure/client/utils/eventstreaming.py | 126 +- .../src/mistralai/azure/client/utils/forms.py | 4 +- .../mistralai/azure/client/utils/retries.py | 14 +- .../mistralai/azure/client/utils/security.py | 2 + .../mistralai/azure/client/utils/unions.py | 32 + .../client/utils/unmarshal_json_response.py | 4 +- packages/gcp/.speakeasy/gen.lock | 260 +- packages/gcp/.speakeasy/gen.yaml | 20 +- .../{models => errors}/httpvalidationerror.md | 0 .../gcp/docs/models/chatcompletionrequest.md | 2 +- .../models/chatcompletionstreamrequest.md | 2 +- packages/gcp/pylintrc | 7 +- .../gcp/src/mistralai/gcp/client/__init__.py | 1 - .../gcp/src/mistralai/gcp/client/_version.py | 6 +- .../gcp/src/mistralai/gcp/client/basesdk.py | 30 +- packages/gcp/src/mistralai/gcp/client/chat.py | 42 +- .../mistralai/gcp/client/errors/__init__.py | 39 + .../{models => errors}/httpvalidationerror.py | 6 +- .../{models => errors}/mistralgcperror.py | 0 .../{models => errors}/no_response_error.py | 0 .../responsevalidationerror.py | 2 +- .../gcp/client/{models => errors}/sdkerror.py | 2 +- packages/gcp/src/mistralai/gcp/client/fim.py | 42 +- .../mistralai/gcp/client/models/__init__.py | 65 +- .../gcp/client/models/assistantmessage.py | 39 +- .../client/models/chatcompletionrequest.py | 87 +- .../models/chatcompletionstreamrequest.py | 87 +- .../gcp/client/models/completionchunk.py | 19 +- .../models/completionresponsestreamchoice.py | 18 +- .../gcp/client/models/contentchunk.py | 42 +- .../gcp/client/models/deltamessage.py | 33 +- .../gcp/client/models/fimcompletionrequest.py | 71 +- .../models/fimcompletionstreamrequest.py | 71 +- .../mistralai/gcp/client/models/function.py | 19 +- .../mistralai/gcp/client/models/imageurl.py | 33 +- .../gcp/client/models/imageurlchunk.py | 8 +- .../mistralai/gcp/client/models/jsonschema.py | 37 +- .../mistralai/gcp/client/models/prediction.py | 27 +- .../gcp/client/models/referencechunk.py | 27 +- .../gcp/client/models/responseformat.py | 33 +- .../gcp/client/models/systemmessage.py | 8 +- .../models/systemmessagecontentchunks.py | 2 +- .../mistralai/gcp/client/models/textchunk.py | 8 +- .../mistralai/gcp/client/models/thinkchunk.py | 27 +- .../src/mistralai/gcp/client/models/tool.py | 19 +- .../mistralai/gcp/client/models/toolcall.py | 19 +- .../mistralai/gcp/client/models/toolchoice.py | 19 +- .../gcp/client/models/toolmessage.py | 39 +- .../mistralai/gcp/client/models/usageinfo.py | 45 +- .../gcp/client/models/usermessage.py | 26 +- .../mistralai/gcp/client/utils/__init__.py | 56 +- .../gcp/client/utils/dynamic_imports.py | 54 + .../gcp/client/utils/eventstreaming.py | 126 +- .../src/mistralai/gcp/client/utils/forms.py | 4 +- .../src/mistralai/gcp/client/utils/retries.py | 14 +- .../mistralai/gcp/client/utils/security.py | 2 + .../src/mistralai/gcp/client/utils/unions.py | 32 + .../client/utils/unmarshal_json_response.py | 4 +- pylintrc | 2 +- src/mistralai/client/__init__.py | 1 - src/mistralai/client/_version.py | 6 +- src/mistralai/client/accesses.py | 102 +- src/mistralai/client/agents.py | 162 +- src/mistralai/client/basesdk.py | 36 +- src/mistralai/client/batch_jobs.py | 146 +- src/mistralai/client/beta_agents.py | 389 ++- src/mistralai/client/chat.py | 154 +- src/mistralai/client/classifiers.py | 123 +- src/mistralai/client/conversations.py | 608 ++-- src/mistralai/client/documents.py | 344 +- src/mistralai/client/embeddings.py | 41 +- src/mistralai/client/errors/__init__.py | 40 + .../{models => errors}/httpvalidationerror.py | 8 +- .../client/{models => errors}/mistralerror.py | 2 +- .../{models => errors}/no_response_error.py | 2 +- .../responsevalidationerror.py | 4 +- .../client/{models => errors}/sdkerror.py | 4 +- src/mistralai/client/files.py | 184 +- src/mistralai/client/fim.py | 62 +- src/mistralai/client/fine_tuning_jobs.py | 241 +- src/mistralai/client/libraries.py | 190 +- src/mistralai/client/models/__init__.py | 1903 +++++------ src/mistralai/client/models/agent.py | 115 +- .../client/models/agentconversation.py | 57 +- .../client/models/agenthandoffdoneevent.py | 27 +- .../client/models/agenthandoffentry.py | 65 +- .../client/models/agenthandoffstartedevent.py | 27 +- ...api_v1_agents_create_or_update_aliasop.py} | 6 +- ...=> agents_api_v1_agents_delete_aliasop.py} | 6 +- ...op.py => agents_api_v1_agents_deleteop.py} | 6 +- ... => agents_api_v1_agents_get_versionop.py} | 6 +- .../models/agents_api_v1_agents_getop.py | 66 + ...s_api_v1_agents_list_version_aliasesop.py} | 6 +- ...> agents_api_v1_agents_list_versionsop.py} | 25 +- ...tsop.py => agents_api_v1_agents_listop.py} | 68 +- ... agents_api_v1_agents_update_versionop.py} | 6 +- ...op.py => agents_api_v1_agents_updateop.py} | 14 +- ...s_api_v1_conversations_append_streamop.py} | 6 +- ...> agents_api_v1_conversations_appendop.py} | 6 +- ...> agents_api_v1_conversations_deleteop.py} | 6 +- ...y => agents_api_v1_conversations_getop.py} | 6 +- ... agents_api_v1_conversations_historyop.py} | 6 +- ... => agents_api_v1_conversations_listop.py} | 47 +- ...agents_api_v1_conversations_messagesop.py} | 6 +- ..._api_v1_conversations_restart_streamop.py} | 6 +- ... agents_api_v1_conversations_restartop.py} | 6 +- .../client/models/agentscompletionrequest.py | 74 +- .../models/agentscompletionstreamrequest.py | 74 +- .../client/models/archiveftmodelout.py | 27 - .../client/models/archivemodelresponse.py | 50 + .../client/models/assistantmessage.py | 56 +- src/mistralai/client/models/audiochunk.py | 8 +- .../models/audiotranscriptionrequest.py | 61 +- .../models/audiotranscriptionrequeststream.py | 61 +- src/mistralai/client/models/basemodelcard.py | 77 +- src/mistralai/client/models/batcherror.py | 19 +- .../models/{batchjobout.py => batchjob.py} | 87 +- src/mistralai/client/models/batchjobsout.py | 28 - src/mistralai/client/models/batchrequest.py | 33 +- .../client/models/cancelfinetuningjobop.py | 43 - .../models/chatclassificationrequest.py | 7 +- .../client/models/chatcompletionrequest.py | 89 +- .../models/chatcompletionstreamrequest.py | 89 +- .../client/models/chatmoderationrequest.py | 6 + .../{checkpointout.py => checkpoint.py} | 12 +- .../client/models/classificationrequest.py | 37 +- .../client/models/classifierdetailedjobout.py | 169 - ...odelout.py => classifierfinetunedmodel.py} | 74 +- ...erjobout.py => classifierfinetuningjob.py} | 127 +- .../models/classifierfinetuningjobdetails.py | 197 ++ ...ssifiertargetin.py => classifiertarget.py} | 39 +- ...targetout.py => classifiertargetresult.py} | 6 +- .../models/classifiertrainingparameters.py | 57 +- .../models/classifiertrainingparametersin.py | 92 - .../client/models/codeinterpretertool.py | 48 +- src/mistralai/client/models/completionargs.py | 79 +- .../client/models/completionchunk.py | 19 +- .../client/models/completiondetailedjobout.py | 176 - ...odelout.py => completionfinetunedmodel.py} | 65 +- ...onjobout.py => completionfinetuningjob.py} | 154 +- .../models/completionfinetuningjobdetails.py | 216 ++ .../models/completionresponsestreamchoice.py | 18 +- .../models/completiontrainingparameters.py | 67 +- .../models/completiontrainingparametersin.py | 97 - src/mistralai/client/models/contentchunk.py | 55 +- .../models/conversationappendrequest.py | 53 +- .../models/conversationappendstreamrequest.py | 53 +- .../client/models/conversationevents.py | 46 +- .../client/models/conversationhistory.py | 51 +- .../client/models/conversationmessages.py | 43 +- .../client/models/conversationrequest.py | 89 +- .../client/models/conversationresponse.py | 49 +- .../models/conversationrestartrequest.py | 56 +- .../conversationrestartstreamrequest.py | 56 +- .../models/conversationstreamrequest.py | 89 +- .../client/models/conversationthinkchunk.py | 65 + .../client/models/conversationusageinfo.py | 47 +- ...eationrequest.py => createagentrequest.py} | 79 +- ...batchjobin.py => createbatchjobrequest.py} | 57 +- ...uploadfileout.py => createfileresponse.py} | 43 +- .../client/models/createfinetuningjobop.py | 33 - ...jobin.py => createfinetuningjobrequest.py} | 122 +- .../{libraryin.py => createlibraryrequest.py} | 39 +- ...lete_model_v1_models_model_id_deleteop.py} | 6 +- ...deletefileout.py => deletefileresponse.py} | 6 +- src/mistralai/client/models/deletemodelout.py | 19 +- src/mistralai/client/models/deltamessage.py | 33 +- .../models/{documentout.py => document.py} | 91 +- .../client/models/documentlibrarytool.py | 48 +- .../client/models/documenturlchunk.py | 57 +- .../client/models/embeddingrequest.py | 44 +- .../client/models/embeddingresponsedata.py | 19 +- .../client/models/{eventout.py => event.py} | 39 +- src/mistralai/client/models/file.py | 19 +- src/mistralai/client/models/filechunk.py | 27 +- ...p.py => files_api_routes_delete_fileop.py} | 6 +- ...py => files_api_routes_download_fileop.py} | 6 +- ...y => files_api_routes_get_signed_urlop.py} | 25 +- ...op.py => files_api_routes_list_filesop.py} | 61 +- ...py => files_api_routes_retrieve_fileop.py} | 6 +- ...p.py => files_api_routes_upload_fileop.py} | 21 +- src/mistralai/client/models/fileschema.py | 37 +- .../client/models/fimcompletionrequest.py | 71 +- .../models/fimcompletionstreamrequest.py | 71 +- .../models/finetunedmodelcapabilities.py | 52 + .../client/models/ftmodelcapabilitiesout.py | 27 - src/mistralai/client/models/ftmodelcard.py | 79 +- src/mistralai/client/models/function.py | 19 +- .../client/models/functioncallentry.py | 96 +- .../client/models/functioncallevent.py | 66 +- .../client/models/functionresultentry.py | 65 +- src/mistralai/client/models/functiontool.py | 8 +- src/mistralai/client/models/getagentop.py | 69 - .../client/models/getdocumenttextcontentop.py | 22 - ...{retrievefileout.py => getfileresponse.py} | 43 +- .../client/models/getfinetuningjobop.py | 43 - ...lesignedurl.py => getsignedurlresponse.py} | 6 +- ...ubrepositoryout.py => githubrepository.py} | 45 +- .../client/models/githubrepositoryin.py | 39 +- src/mistralai/client/models/imagedetail.py | 16 + .../client/models/imagegenerationtool.py | 48 +- src/mistralai/client/models/imageurl.py | 38 +- src/mistralai/client/models/imageurlchunk.py | 42 +- src/mistralai/client/models/inputentries.py | 8 +- src/mistralai/client/models/inputs.py | 44 +- .../{jobmetadataout.py => jobmetadata.py} | 75 +- ...bs_api_routes_batch_cancel_batch_jobop.py} | 6 +- ... jobs_api_routes_batch_get_batch_jobop.py} | 39 +- ...jobs_api_routes_batch_get_batch_jobsop.py} | 63 +- ...fine_tuning_archive_fine_tuned_modelop.py} | 6 +- ...es_fine_tuning_cancel_fine_tuning_jobop.py | 78 + ...es_fine_tuning_create_fine_tuning_jobop.py | 70 + ...outes_fine_tuning_get_fine_tuning_jobop.py | 76 + ...tes_fine_tuning_get_fine_tuning_jobsop.py} | 87 +- ...tes_fine_tuning_start_fine_tuning_jobop.py | 74 + ...ne_tuning_unarchive_fine_tuned_modelop.py} | 6 +- ...s_fine_tuning_update_fine_tuned_modelop.py | 83 + src/mistralai/client/models/jobsout.py | 40 - src/mistralai/client/models/jsonschema.py | 37 +- ...jobmetadataout.py => legacyjobmetadata.py} | 93 +- ...elibraryop.py => libraries_delete_v1op.py} | 6 +- ....py => libraries_documents_delete_v1op.py} | 6 +- ...ents_get_extracted_text_signed_url_v1op.py | 22 + ...libraries_documents_get_signed_url_v1op.py | 22 + ...=> libraries_documents_get_status_v1op.py} | 6 +- ...raries_documents_get_text_content_v1op.py} | 6 +- ...sop.py => libraries_documents_get_v1op.py} | 6 +- ...op.py => libraries_documents_list_v1op.py} | 55 +- ... => libraries_documents_reprocess_v1op.py} | 6 +- ....py => libraries_documents_update_v1op.py} | 14 +- ....py => libraries_documents_upload_v1op.py} | 6 +- ...{getlibraryop.py => libraries_get_v1op.py} | 6 +- ...ssop.py => libraries_share_create_v1op.py} | 6 +- ...ssop.py => libraries_share_delete_v1op.py} | 6 +- ...ssesop.py => libraries_share_list_v1op.py} | 6 +- ...elibraryop.py => libraries_update_v1op.py} | 14 +- .../models/{libraryout.py => library.py} | 79 +- .../client/models/libraryinupdate.py | 54 - .../client/models/listbatchjobsresponse.py | 51 + ...ocumentout.py => listdocumentsresponse.py} | 12 +- .../{listfilesout.py => listfilesresponse.py} | 39 +- .../models/listfinetuningjobsresponse.py | 100 + .../client/models/listlibrariesresponse.py | 16 + src/mistralai/client/models/listlibraryout.py | 16 - .../models/messageinputcontentchunks.py | 15 +- .../client/models/messageinputentry.py | 80 +- .../models/messageoutputcontentchunks.py | 9 +- .../client/models/messageoutputentry.py | 103 +- .../client/models/messageoutputevent.py | 60 +- .../client/models/{metricout.py => metric.py} | 39 +- .../client/models/modelcapabilities.py | 32 +- .../client/models/modelconversation.py | 108 +- src/mistralai/client/models/modellist.py | 52 +- .../client/models/moderationobject.py | 19 +- src/mistralai/client/models/ocrimageobject.py | 49 +- src/mistralai/client/models/ocrpageobject.py | 33 +- src/mistralai/client/models/ocrrequest.py | 93 +- src/mistralai/client/models/ocrresponse.py | 31 +- src/mistralai/client/models/ocrtableobject.py | 6 + src/mistralai/client/models/ocrusageinfo.py | 33 +- .../client/models/outputcontentchunks.py | 9 +- src/mistralai/client/models/prediction.py | 27 +- .../models/realtimetranscriptionerror.py | 27 +- .../realtimetranscriptioninputaudioappend.py | 52 + .../realtimetranscriptioninputaudioend.py | 47 + .../realtimetranscriptioninputaudioflush.py | 47 + .../models/realtimetranscriptionsession.py | 39 +- .../realtimetranscriptionsessioncreated.py | 27 +- .../realtimetranscriptionsessionupdated.py | 27 +- ...altimetranscriptionsessionupdatemessage.py | 54 + ...altimetranscriptionsessionupdatepayload.py | 54 + src/mistralai/client/models/referencechunk.py | 42 +- .../client/models/reprocessdocumentop.py | 22 - .../client/models/responsedoneevent.py | 27 +- .../client/models/responseerrorevent.py | 27 +- src/mistralai/client/models/responseformat.py | 33 +- .../client/models/responsestartedevent.py | 27 +- ...retrieve_model_v1_models_model_id_getop.py | 64 + .../client/models/retrievemodelop.py | 36 - src/mistralai/client/models/security.py | 19 +- src/mistralai/client/models/sharingdelete.py | 33 +- src/mistralai/client/models/sharingin.py | 33 +- src/mistralai/client/models/sharingout.py | 33 +- .../client/models/startfinetuningjobop.py | 41 - src/mistralai/client/models/systemmessage.py | 8 +- src/mistralai/client/models/textchunk.py | 40 +- src/mistralai/client/models/thinkchunk.py | 52 +- src/mistralai/client/models/tool.py | 19 +- src/mistralai/client/models/toolcall.py | 19 +- .../client/models/toolcallconfirmation.py | 24 + src/mistralai/client/models/toolchoice.py | 19 +- .../client/models/toolconfiguration.py | 53 + .../client/models/toolexecutiondeltaevent.py | 27 +- .../client/models/toolexecutiondoneevent.py | 27 +- .../client/models/toolexecutionentry.py | 82 +- .../models/toolexecutionstartedevent.py | 48 +- src/mistralai/client/models/toolfilechunk.py | 54 +- src/mistralai/client/models/toolmessage.py | 49 +- .../client/models/toolreferencechunk.py | 55 +- src/mistralai/client/models/trainingfile.py | 19 +- .../client/models/transcriptionresponse.py | 33 +- .../models/transcriptionsegmentchunk.py | 58 +- .../client/models/transcriptionstreamdone.py | 41 +- .../models/transcriptionstreamevents.py | 36 +- .../models/transcriptionstreamlanguage.py | 8 +- .../models/transcriptionstreamsegmentdelta.py | 47 +- .../models/transcriptionstreamtextdelta.py | 8 +- .../client/models/unarchiveftmodelout.py | 27 - .../client/models/unarchivemodelresponse.py | 50 + ...updaterequest.py => updateagentrequest.py} | 97 +- ...ntupdatein.py => updatedocumentrequest.py} | 39 +- .../client/models/updateftmodelin.py | 54 - .../client/models/updatelibraryrequest.py | 49 + src/mistralai/client/models/updatemodelop.py | 43 - .../client/models/updatemodelrequest.py | 49 + src/mistralai/client/models/usageinfo.py | 45 +- src/mistralai/client/models/usermessage.py | 26 +- .../client/models/wandbintegration.py | 39 +- ...rationout.py => wandbintegrationresult.py} | 45 +- .../client/models/websearchpremiumtool.py | 48 +- src/mistralai/client/models/websearchtool.py | 48 +- src/mistralai/client/models_.py | 178 +- src/mistralai/client/ocr.py | 62 +- src/mistralai/client/sdk.py | 11 +- src/mistralai/client/transcriptions.py | 54 +- src/mistralai/client/utils/__init__.py | 56 +- src/mistralai/client/utils/dynamic_imports.py | 55 + src/mistralai/client/utils/eventstreaming.py | 126 +- src/mistralai/client/utils/forms.py | 4 +- src/mistralai/client/utils/retries.py | 14 +- src/mistralai/client/utils/security.py | 2 + src/mistralai/client/utils/unions.py | 33 + .../client/utils/unmarshal_json_response.py | 4 +- src/mistralai/extra/run/context.py | 8 +- 658 files changed, 13796 insertions(+), 11219 deletions(-) rename docs/{models => errors}/httpvalidationerror.md (100%) delete mode 100644 docs/models/agentconversationobject.md delete mode 100644 docs/models/agentcreationrequest.md delete mode 100644 docs/models/agenthandoffentryobject.md delete mode 100644 docs/models/agenthandoffentrytype.md delete mode 100644 docs/models/agentobject.md rename docs/models/{createorupdateagentaliasrequest.md => agentsapiv1agentscreateorupdatealiasrequest.md} (90%) rename docs/models/{deleteagentaliasrequest.md => agentsapiv1agentsdeletealiasrequest.md} (90%) rename docs/models/{deleteagentrequest.md => agentsapiv1agentsdeleterequest.md} (89%) rename docs/models/{getagentagentversion.md => agentsapiv1agentsgetagentversion.md} (79%) create mode 100644 docs/models/agentsapiv1agentsgetrequest.md rename docs/models/{getagentversionrequest.md => agentsapiv1agentsgetversionrequest.md} (90%) rename docs/models/{listagentsrequest.md => agentsapiv1agentslistrequest.md} (98%) rename docs/models/{listagentaliasesrequest.md => agentsapiv1agentslistversionaliasesrequest.md} (85%) rename docs/models/{listagentversionsrequest.md => agentsapiv1agentslistversionsrequest.md} (94%) create mode 100644 docs/models/agentsapiv1agentsupdaterequest.md rename docs/models/{updateagentversionrequest.md => agentsapiv1agentsupdateversionrequest.md} (89%) rename docs/models/{appendconversationrequest.md => agentsapiv1conversationsappendrequest.md} (96%) rename docs/models/{appendconversationstreamrequest.md => agentsapiv1conversationsappendstreamrequest.md} (96%) rename docs/models/{getconversationrequest.md => agentsapiv1conversationsdeleterequest.md} (95%) rename docs/models/{deleteconversationrequest.md => agentsapiv1conversationsgetrequest.md} (95%) rename docs/models/{getconversationhistoryrequest.md => agentsapiv1conversationshistoryrequest.md} (94%) rename docs/models/{listconversationsrequest.md => agentsapiv1conversationslistrequest.md} (92%) rename docs/models/{listconversationsresponse.md => agentsapiv1conversationslistresponse.md} (84%) rename docs/models/{getconversationmessagesrequest.md => agentsapiv1conversationsmessagesrequest.md} (94%) rename docs/models/{restartconversationrequest.md => agentsapiv1conversationsrestartrequest.md} (96%) rename docs/models/{restartconversationstreamrequest.md => agentsapiv1conversationsrestartstreamrequest.md} (96%) rename docs/models/{unarchiveftmodelout.md => archivemodelresponse.md} (96%) delete mode 100644 docs/models/assistantmessagerole.md rename docs/models/{batchjobout.md => batchjob.md} (99%) delete mode 100644 docs/models/batchjobsout.md delete mode 100644 docs/models/cancelfinetuningjobresponse.md rename docs/models/{checkpointout.md => checkpoint.md} (96%) delete mode 100644 docs/models/classifierdetailedjoboutintegration.md create mode 100644 docs/models/classifierfinetunedmodel.md rename docs/models/{classifierjobout.md => classifierfinetuningjob.md} (97%) rename docs/models/{classifierdetailedjobout.md => classifierfinetuningjobdetails.md} (94%) create mode 100644 docs/models/classifierfinetuningjobdetailsintegration.md rename docs/models/{classifierdetailedjoboutstatus.md => classifierfinetuningjobdetailsstatus.md} (94%) create mode 100644 docs/models/classifierfinetuningjobintegration.md rename docs/models/{completionjoboutstatus.md => classifierfinetuningjobstatus.md} (95%) delete mode 100644 docs/models/classifierftmodelout.md delete mode 100644 docs/models/classifierjoboutintegration.md rename docs/models/{classifiertargetin.md => classifiertarget.md} (99%) rename docs/models/{classifiertargetout.md => classifiertargetresult.md} (98%) delete mode 100644 docs/models/classifiertrainingparametersin.md delete mode 100644 docs/models/completiondetailedjoboutintegration.md delete mode 100644 docs/models/completiondetailedjoboutrepository.md create mode 100644 docs/models/completionfinetunedmodel.md rename docs/models/{completionjobout.md => completionfinetuningjob.md} (97%) rename docs/models/{completiondetailedjobout.md => completionfinetuningjobdetails.md} (94%) create mode 100644 docs/models/completionfinetuningjobdetailsintegration.md create mode 100644 docs/models/completionfinetuningjobdetailsrepository.md rename docs/models/{completiondetailedjoboutstatus.md => completionfinetuningjobdetailsstatus.md} (94%) create mode 100644 docs/models/completionfinetuningjobintegration.md create mode 100644 docs/models/completionfinetuningjobrepository.md rename docs/models/{classifierjoboutstatus.md => completionfinetuningjobstatus.md} (95%) delete mode 100644 docs/models/completionftmodelout.md delete mode 100644 docs/models/completionjoboutintegration.md delete mode 100644 docs/models/completionjoboutrepository.md delete mode 100644 docs/models/completiontrainingparametersin.md create mode 100644 docs/models/confirmation.md delete mode 100644 docs/models/conversationhistoryobject.md delete mode 100644 docs/models/conversationmessagesobject.md delete mode 100644 docs/models/conversationresponseobject.md create mode 100644 docs/models/conversationthinkchunk.md create mode 100644 docs/models/conversationthinkchunkthinking.md rename docs/models/{agentupdaterequest.md => createagentrequest.md} (80%) rename docs/models/{agentupdaterequesttool.md => createagentrequesttool.md} (96%) rename docs/models/{batchjobin.md => createbatchjobrequest.md} (99%) rename docs/models/{uploadfileout.md => createfileresponse.md} (99%) rename docs/models/{jobin.md => createfinetuningjobrequest.md} (97%) rename docs/models/{jobinintegration.md => createfinetuningjobrequestintegration.md} (74%) rename docs/models/{jobinrepository.md => createfinetuningjobrequestrepository.md} (75%) delete mode 100644 docs/models/createfinetuningjobresponse.md rename docs/models/{libraryin.md => createlibraryrequest.md} (95%) rename docs/models/{deletefileout.md => deletefileresponse.md} (97%) rename docs/models/{deletemodelrequest.md => deletemodelv1modelsmodeliddeleterequest.md} (94%) delete mode 100644 docs/models/documentout.md create mode 100644 docs/models/documentunion.md delete mode 100644 docs/models/documentupdatein.md delete mode 100644 docs/models/documenturlchunktype.md rename docs/models/{eventout.md => event.md} (98%) rename docs/models/{downloadfilerequest.md => filesapiroutesdeletefilerequest.md} (88%) rename docs/models/{retrievefilerequest.md => filesapiroutesdownloadfilerequest.md} (88%) rename docs/models/{getfilesignedurlrequest.md => filesapiroutesgetsignedurlrequest.md} (96%) rename docs/models/{listfilesrequest.md => filesapirouteslistfilesrequest.md} (98%) rename docs/models/{deletefilerequest.md => filesapiroutesretrievefilerequest.md} (88%) rename docs/models/{ftmodelcapabilitiesout.md => finetunedmodelcapabilities.md} (95%) create mode 100644 docs/models/functioncallentryconfirmationstatus.md delete mode 100644 docs/models/functioncallentryobject.md delete mode 100644 docs/models/functioncallentrytype.md create mode 100644 docs/models/functioncalleventconfirmationstatus.md delete mode 100644 docs/models/functionresultentryobject.md delete mode 100644 docs/models/functionresultentrytype.md delete mode 100644 docs/models/getdocumenttextcontentrequest.md rename docs/models/{retrievefileout.md => getfileresponse.md} (99%) delete mode 100644 docs/models/getfinetuningjobresponse.md rename docs/models/{filesignedurl.md => getsignedurlresponse.md} (92%) rename docs/models/{githubrepositoryout.md => githubrepository.md} (97%) create mode 100644 docs/models/imagedetail.md delete mode 100644 docs/models/imageurlchunktype.md delete mode 100644 docs/models/inputsmessage.md delete mode 100644 docs/models/instructrequestinputs.md rename docs/models/{jobmetadataout.md => jobmetadata.md} (98%) rename docs/models/{cancelbatchjobrequest.md => jobsapiroutesbatchcancelbatchjobrequest.md} (86%) rename docs/models/{getbatchjobrequest.md => jobsapiroutesbatchgetbatchjobrequest.md} (92%) rename docs/models/{listbatchjobsrequest.md => jobsapiroutesbatchgetbatchjobsrequest.md} (98%) rename docs/models/{archivemodelrequest.md => jobsapiroutesfinetuningarchivefinetunedmodelrequest.md} (93%) rename docs/models/{cancelfinetuningjobrequest.md => jobsapiroutesfinetuningcancelfinetuningjobrequest.md} (88%) create mode 100644 docs/models/jobsapiroutesfinetuningcancelfinetuningjobresponse.md create mode 100644 docs/models/jobsapiroutesfinetuningcreatefinetuningjobresponse.md rename docs/models/{getfinetuningjobrequest.md => jobsapiroutesfinetuninggetfinetuningjobrequest.md} (89%) create mode 100644 docs/models/jobsapiroutesfinetuninggetfinetuningjobresponse.md create mode 100644 docs/models/jobsapiroutesfinetuninggetfinetuningjobsrequest.md rename docs/models/{listfinetuningjobsstatus.md => jobsapiroutesfinetuninggetfinetuningjobsstatus.md} (94%) rename docs/models/{startfinetuningjobrequest.md => jobsapiroutesfinetuningstartfinetuningjobrequest.md} (84%) create mode 100644 docs/models/jobsapiroutesfinetuningstartfinetuningjobresponse.md rename docs/models/{unarchivemodelrequest.md => jobsapiroutesfinetuningunarchivefinetunedmodelrequest.md} (92%) create mode 100644 docs/models/jobsapiroutesfinetuningupdatefinetunedmodelrequest.md create mode 100644 docs/models/jobsapiroutesfinetuningupdatefinetunedmodelresponse.md delete mode 100644 docs/models/jobsout.md delete mode 100644 docs/models/jobsoutdata.md rename docs/models/{legacyjobmetadataout.md => legacyjobmetadata.md} (99%) rename docs/models/{getlibraryrequest.md => librariesdeletev1request.md} (90%) rename docs/models/{getdocumentstatusrequest.md => librariesdocumentsdeletev1request.md} (90%) create mode 100644 docs/models/librariesdocumentsgetextractedtextsignedurlv1request.md create mode 100644 docs/models/librariesdocumentsgetsignedurlv1request.md rename docs/models/{getdocumentrequest.md => librariesdocumentsgetstatusv1request.md} (90%) rename docs/models/{getdocumentextractedtextsignedurlrequest.md => librariesdocumentsgettextcontentv1request.md} (89%) rename docs/models/{getdocumentsignedurlrequest.md => librariesdocumentsgetv1request.md} (91%) rename docs/models/{listdocumentsrequest.md => librariesdocumentslistv1request.md} (96%) rename docs/models/{deletedocumentrequest.md => librariesdocumentsreprocessv1request.md} (90%) create mode 100644 docs/models/librariesdocumentsupdatev1request.md rename docs/models/{uploaddocumentrequest.md => librariesdocumentsuploadv1request.md} (96%) rename docs/models/{deletelibraryrequest.md => librariesgetv1request.md} (91%) rename docs/models/{updateorcreatelibraryaccessrequest.md => librariessharecreatev1request.md} (95%) rename docs/models/{deletelibraryaccessrequest.md => librariessharedeletev1request.md} (96%) rename docs/models/{listlibraryaccessesrequest.md => librariessharelistv1request.md} (90%) create mode 100644 docs/models/librariesupdatev1request.md rename docs/models/{libraryout.md => library.md} (99%) delete mode 100644 docs/models/libraryinupdate.md create mode 100644 docs/models/listbatchjobsresponse.md rename docs/models/{listdocumentout.md => listdocumentsresponse.md} (90%) rename docs/models/{listfilesout.md => listfilesresponse.md} (98%) delete mode 100644 docs/models/listfinetuningjobsrequest.md create mode 100644 docs/models/listfinetuningjobsresponse.md create mode 100644 docs/models/listfinetuningjobsresponsedata.md create mode 100644 docs/models/listlibrariesresponse.md delete mode 100644 docs/models/listlibraryout.md delete mode 100644 docs/models/messageinputentryobject.md delete mode 100644 docs/models/messageinputentrytype.md delete mode 100644 docs/models/messageoutputentryobject.md delete mode 100644 docs/models/messageoutputentryrole.md delete mode 100644 docs/models/messageoutputentrytype.md delete mode 100644 docs/models/messageoutputeventrole.md rename docs/models/{metricout.md => metric.md} (98%) delete mode 100644 docs/models/modelconversationobject.md create mode 100644 docs/models/realtimetranscriptioninputaudioappend.md create mode 100644 docs/models/realtimetranscriptioninputaudioend.md create mode 100644 docs/models/realtimetranscriptioninputaudioflush.md create mode 100644 docs/models/realtimetranscriptionsessionupdatemessage.md rename docs/models/{getagentrequest.md => realtimetranscriptionsessionupdatepayload.md} (57%) delete mode 100644 docs/models/referencechunktype.md delete mode 100644 docs/models/reprocessdocumentrequest.md rename docs/models/{retrievemodelrequest.md => retrievemodelv1modelsmodelidgetrequest.md} (94%) rename docs/models/{messageinputentryrole.md => role.md} (84%) delete mode 100644 docs/models/startfinetuningjobresponse.md delete mode 100644 docs/models/textchunktype.md rename docs/models/{thinking.md => thinkchunkthinking.md} (90%) delete mode 100644 docs/models/thinkchunktype.md create mode 100644 docs/models/toolcallconfirmation.md rename docs/models/{updateftmodelin.md => toolconfiguration.md} (54%) delete mode 100644 docs/models/toolexecutionentryobject.md delete mode 100644 docs/models/toolexecutionentrytype.md delete mode 100644 docs/models/toolfilechunktype.md delete mode 100644 docs/models/toolreferencechunktype.md delete mode 100644 docs/models/transcriptionsegmentchunktype.md rename docs/models/{archiveftmodelout.md => unarchivemodelresponse.md} (96%) rename docs/models/{agentcreationrequesttool.md => updateagentrequesttool.md} (95%) delete mode 100644 docs/models/updatemodelresponse.md rename docs/models/{wandbintegrationout.md => wandbintegrationresult.md} (98%) rename packages/azure/docs/{models => errors}/httpvalidationerror.md (100%) create mode 100644 packages/azure/src/mistralai/azure/client/errors/__init__.py rename packages/azure/src/mistralai/azure/client/{models => errors}/httpvalidationerror.py (76%) rename packages/azure/src/mistralai/azure/client/{models => errors}/mistralazureerror.py (100%) rename packages/azure/src/mistralai/azure/client/{models => errors}/no_response_error.py (100%) rename packages/azure/src/mistralai/azure/client/{models => errors}/responsevalidationerror.py (92%) rename packages/azure/src/mistralai/azure/client/{models => errors}/sdkerror.py (95%) create mode 100644 packages/azure/src/mistralai/azure/client/utils/dynamic_imports.py create mode 100644 packages/azure/src/mistralai/azure/client/utils/unions.py rename packages/gcp/docs/{models => errors}/httpvalidationerror.md (100%) create mode 100644 packages/gcp/src/mistralai/gcp/client/errors/__init__.py rename packages/gcp/src/mistralai/gcp/client/{models => errors}/httpvalidationerror.py (77%) rename packages/gcp/src/mistralai/gcp/client/{models => errors}/mistralgcperror.py (100%) rename packages/gcp/src/mistralai/gcp/client/{models => errors}/no_response_error.py (100%) rename packages/gcp/src/mistralai/gcp/client/{models => errors}/responsevalidationerror.py (92%) rename packages/gcp/src/mistralai/gcp/client/{models => errors}/sdkerror.py (95%) create mode 100644 packages/gcp/src/mistralai/gcp/client/utils/dynamic_imports.py create mode 100644 packages/gcp/src/mistralai/gcp/client/utils/unions.py create mode 100644 src/mistralai/client/errors/__init__.py rename src/mistralai/client/{models => errors}/httpvalidationerror.py (75%) rename src/mistralai/client/{models => errors}/mistralerror.py (96%) rename src/mistralai/client/{models => errors}/no_response_error.py (93%) rename src/mistralai/client/{models => errors}/responsevalidationerror.py (90%) rename src/mistralai/client/{models => errors}/sdkerror.py (94%) rename src/mistralai/client/models/{createorupdateagentaliasop.py => agents_api_v1_agents_create_or_update_aliasop.py} (80%) rename src/mistralai/client/models/{deleteagentaliasop.py => agents_api_v1_agents_delete_aliasop.py} (78%) rename src/mistralai/client/models/{listagentaliasesop.py => agents_api_v1_agents_deleteop.py} (74%) rename src/mistralai/client/models/{getagentversionop.py => agents_api_v1_agents_get_versionop.py} (78%) create mode 100644 src/mistralai/client/models/agents_api_v1_agents_getop.py rename src/mistralai/client/models/{deleteagentop.py => agents_api_v1_agents_list_version_aliasesop.py} (71%) rename src/mistralai/client/models/{listagentversionsop.py => agents_api_v1_agents_list_versionsop.py} (56%) rename src/mistralai/client/models/{listagentsop.py => agents_api_v1_agents_listop.py} (70%) rename src/mistralai/client/models/{updateagentversionop.py => agents_api_v1_agents_update_versionop.py} (78%) rename src/mistralai/client/models/{updateagentop.py => agents_api_v1_agents_updateop.py} (62%) rename src/mistralai/client/models/{appendconversationstreamop.py => agents_api_v1_conversations_append_streamop.py} (85%) rename src/mistralai/client/models/{appendconversationop.py => agents_api_v1_conversations_appendop.py} (85%) rename src/mistralai/client/models/{deleteconversationop.py => agents_api_v1_conversations_deleteop.py} (78%) rename src/mistralai/client/models/{getconversationop.py => agents_api_v1_conversations_getop.py} (88%) rename src/mistralai/client/models/{getconversationhistoryop.py => agents_api_v1_conversations_historyop.py} (78%) rename src/mistralai/client/models/{listconversationsop.py => agents_api_v1_conversations_listop.py} (59%) rename src/mistralai/client/models/{getconversationmessagesop.py => agents_api_v1_conversations_messagesop.py} (78%) rename src/mistralai/client/models/{restartconversationstreamop.py => agents_api_v1_conversations_restart_streamop.py} (85%) rename src/mistralai/client/models/{restartconversationop.py => agents_api_v1_conversations_restartop.py} (85%) delete mode 100644 src/mistralai/client/models/archiveftmodelout.py create mode 100644 src/mistralai/client/models/archivemodelresponse.py rename src/mistralai/client/models/{batchjobout.py => batchjob.py} (64%) delete mode 100644 src/mistralai/client/models/batchjobsout.py delete mode 100644 src/mistralai/client/models/cancelfinetuningjobop.py rename src/mistralai/client/models/{checkpointout.py => checkpoint.py} (81%) delete mode 100644 src/mistralai/client/models/classifierdetailedjobout.py rename src/mistralai/client/models/{classifierftmodelout.py => classifierfinetunedmodel.py} (56%) rename src/mistralai/client/models/{classifierjobout.py => classifierfinetuningjob.py} (63%) create mode 100644 src/mistralai/client/models/classifierfinetuningjobdetails.py rename src/mistralai/client/models/{classifiertargetin.py => classifiertarget.py} (55%) rename src/mistralai/client/models/{classifiertargetout.py => classifiertargetresult.py} (79%) delete mode 100644 src/mistralai/client/models/classifiertrainingparametersin.py delete mode 100644 src/mistralai/client/models/completiondetailedjobout.py rename src/mistralai/client/models/{completionftmodelout.py => completionfinetunedmodel.py} (60%) rename src/mistralai/client/models/{completionjobout.py => completionfinetuningjob.py} (56%) create mode 100644 src/mistralai/client/models/completionfinetuningjobdetails.py delete mode 100644 src/mistralai/client/models/completiontrainingparametersin.py create mode 100644 src/mistralai/client/models/conversationthinkchunk.py rename src/mistralai/client/models/{agentcreationrequest.py => createagentrequest.py} (66%) rename src/mistralai/client/models/{batchjobin.py => createbatchjobrequest.py} (76%) rename src/mistralai/client/models/{uploadfileout.py => createfileresponse.py} (69%) delete mode 100644 src/mistralai/client/models/createfinetuningjobop.py rename src/mistralai/client/models/{jobin.py => createfinetuningjobrequest.py} (56%) rename src/mistralai/client/models/{libraryin.py => createlibraryrequest.py} (50%) rename src/mistralai/client/models/{deletemodelop.py => delete_model_v1_models_model_id_deleteop.py} (76%) rename src/mistralai/client/models/{deletefileout.py => deletefileresponse.py} (82%) rename src/mistralai/client/models/{documentout.py => document.py} (60%) rename src/mistralai/client/models/{eventout.py => event.py} (56%) rename src/mistralai/client/models/{downloadfileop.py => files_api_routes_delete_fileop.py} (74%) rename src/mistralai/client/models/{deletefileop.py => files_api_routes_download_fileop.py} (73%) rename src/mistralai/client/models/{getfilesignedurlop.py => files_api_routes_get_signed_urlop.py} (51%) rename src/mistralai/client/models/{listfilesop.py => files_api_routes_list_filesop.py} (70%) rename src/mistralai/client/models/{retrievefileop.py => files_api_routes_retrieve_fileop.py} (73%) rename src/mistralai/client/models/{uploadfileop.py => files_api_routes_upload_fileop.py} (70%) create mode 100644 src/mistralai/client/models/finetunedmodelcapabilities.py delete mode 100644 src/mistralai/client/models/ftmodelcapabilitiesout.py delete mode 100644 src/mistralai/client/models/getagentop.py delete mode 100644 src/mistralai/client/models/getdocumenttextcontentop.py rename src/mistralai/client/models/{retrievefileout.py => getfileresponse.py} (69%) delete mode 100644 src/mistralai/client/models/getfinetuningjobop.py rename src/mistralai/client/models/{filesignedurl.py => getsignedurlresponse.py} (65%) rename src/mistralai/client/models/{githubrepositoryout.py => githubrepository.py} (59%) create mode 100644 src/mistralai/client/models/imagedetail.py rename src/mistralai/client/models/{jobmetadataout.py => jobmetadata.py} (52%) rename src/mistralai/client/models/{cancelbatchjobop.py => jobs_api_routes_batch_cancel_batch_jobop.py} (72%) rename src/mistralai/client/models/{getbatchjobop.py => jobs_api_routes_batch_get_batch_jobop.py} (56%) rename src/mistralai/client/models/{listbatchjobsop.py => jobs_api_routes_batch_get_batch_jobsop.py} (71%) rename src/mistralai/client/models/{archivemodelop.py => jobs_api_routes_fine_tuning_archive_fine_tuned_modelop.py} (73%) create mode 100644 src/mistralai/client/models/jobs_api_routes_fine_tuning_cancel_fine_tuning_jobop.py create mode 100644 src/mistralai/client/models/jobs_api_routes_fine_tuning_create_fine_tuning_jobop.py create mode 100644 src/mistralai/client/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobop.py rename src/mistralai/client/models/{listfinetuningjobsop.py => jobs_api_routes_fine_tuning_get_fine_tuning_jobsop.py} (75%) create mode 100644 src/mistralai/client/models/jobs_api_routes_fine_tuning_start_fine_tuning_jobop.py rename src/mistralai/client/models/{unarchivemodelop.py => jobs_api_routes_fine_tuning_unarchive_fine_tuned_modelop.py} (73%) create mode 100644 src/mistralai/client/models/jobs_api_routes_fine_tuning_update_fine_tuned_modelop.py delete mode 100644 src/mistralai/client/models/jobsout.py rename src/mistralai/client/models/{legacyjobmetadataout.py => legacyjobmetadata.py} (70%) rename src/mistralai/client/models/{deletelibraryop.py => libraries_delete_v1op.py} (76%) rename src/mistralai/client/models/{deletedocumentop.py => libraries_documents_delete_v1op.py} (79%) create mode 100644 src/mistralai/client/models/libraries_documents_get_extracted_text_signed_url_v1op.py create mode 100644 src/mistralai/client/models/libraries_documents_get_signed_url_v1op.py rename src/mistralai/client/models/{getdocumentop.py => libraries_documents_get_status_v1op.py} (78%) rename src/mistralai/client/models/{getdocumentextractedtextsignedurlop.py => libraries_documents_get_text_content_v1op.py} (77%) rename src/mistralai/client/models/{getdocumentstatusop.py => libraries_documents_get_v1op.py} (80%) rename src/mistralai/client/models/{listdocumentsop.py => libraries_documents_list_v1op.py} (67%) rename src/mistralai/client/models/{getdocumentsignedurlop.py => libraries_documents_reprocess_v1op.py} (78%) rename src/mistralai/client/models/{updatedocumentop.py => libraries_documents_update_v1op.py} (64%) rename src/mistralai/client/models/{uploaddocumentop.py => libraries_documents_upload_v1op.py} (91%) rename src/mistralai/client/models/{getlibraryop.py => libraries_get_v1op.py} (77%) rename src/mistralai/client/models/{updateorcreatelibraryaccessop.py => libraries_share_create_v1op.py} (81%) rename src/mistralai/client/models/{deletelibraryaccessop.py => libraries_share_delete_v1op.py} (83%) rename src/mistralai/client/models/{listlibraryaccessesop.py => libraries_share_list_v1op.py} (75%) rename src/mistralai/client/models/{updatelibraryop.py => libraries_update_v1op.py} (60%) rename src/mistralai/client/models/{libraryout.py => library.py} (58%) delete mode 100644 src/mistralai/client/models/libraryinupdate.py create mode 100644 src/mistralai/client/models/listbatchjobsresponse.py rename src/mistralai/client/models/{listdocumentout.py => listdocumentsresponse.py} (60%) rename src/mistralai/client/models/{listfilesout.py => listfilesresponse.py} (53%) create mode 100644 src/mistralai/client/models/listfinetuningjobsresponse.py create mode 100644 src/mistralai/client/models/listlibrariesresponse.py delete mode 100644 src/mistralai/client/models/listlibraryout.py rename src/mistralai/client/models/{metricout.py => metric.py} (60%) create mode 100644 src/mistralai/client/models/realtimetranscriptioninputaudioappend.py create mode 100644 src/mistralai/client/models/realtimetranscriptioninputaudioend.py create mode 100644 src/mistralai/client/models/realtimetranscriptioninputaudioflush.py create mode 100644 src/mistralai/client/models/realtimetranscriptionsessionupdatemessage.py create mode 100644 src/mistralai/client/models/realtimetranscriptionsessionupdatepayload.py delete mode 100644 src/mistralai/client/models/reprocessdocumentop.py create mode 100644 src/mistralai/client/models/retrieve_model_v1_models_model_id_getop.py delete mode 100644 src/mistralai/client/models/retrievemodelop.py delete mode 100644 src/mistralai/client/models/startfinetuningjobop.py create mode 100644 src/mistralai/client/models/toolcallconfirmation.py create mode 100644 src/mistralai/client/models/toolconfiguration.py delete mode 100644 src/mistralai/client/models/unarchiveftmodelout.py create mode 100644 src/mistralai/client/models/unarchivemodelresponse.py rename src/mistralai/client/models/{agentupdaterequest.py => updateagentrequest.py} (66%) rename src/mistralai/client/models/{documentupdatein.py => updatedocumentrequest.py} (60%) delete mode 100644 src/mistralai/client/models/updateftmodelin.py create mode 100644 src/mistralai/client/models/updatelibraryrequest.py delete mode 100644 src/mistralai/client/models/updatemodelop.py create mode 100644 src/mistralai/client/models/updatemodelrequest.py rename src/mistralai/client/models/{wandbintegrationout.py => wandbintegrationresult.py} (65%) create mode 100644 src/mistralai/client/utils/dynamic_imports.py create mode 100644 src/mistralai/client/utils/unions.py diff --git a/.speakeasy/gen.lock b/.speakeasy/gen.lock index 6e86c59c..678c20f2 100644 --- a/.speakeasy/gen.lock +++ b/.speakeasy/gen.lock @@ -1,26 +1,26 @@ lockVersion: 2.0.0 id: 2d045ec7-2ebb-4f4d-ad25-40953b132161 management: - docChecksum: 2d9e4f612e5caf84349ab02663eee66e + docChecksum: b66b034aac7aa9b38c4fb47a3b3d843e docVersion: 1.0.0 - speakeasyVersion: 1.685.0 - generationVersion: 2.794.1 - releaseVersion: 2.0.0a3 - configChecksum: 7fc1ba01c21def8447b979e71593af4a + speakeasyVersion: 1.729.0 + generationVersion: 2.841.0 + releaseVersion: 2.0.0-a3.1 + configChecksum: 134292298710eaf25a0f90f7097e648f repoURL: https://github.com/mistralai/client-python.git installationURL: https://github.com/mistralai/client-python.git published: true persistentEdits: - generation_id: 3aa9018f-cb6c-4c1b-96d0-b832fd5f6513 - pristine_commit_hash: 5c4e3b65b7572c91338d50dc3ca91ea6a46eedf7 - pristine_tree_hash: aaea604044e12872107c3b550ea7be094fb66a99 + generation_id: 21ec746f-e476-468a-bb8e-c942c0997501 + pristine_commit_hash: 99ae95385eb06175841ba19bef78319a5921c585 + pristine_tree_hash: 5b06b6f5add0cd16af8139d524a42368532441c6 features: python: additionalDependencies: 1.0.0 additionalProperties: 1.0.1 configurableModuleName: 0.2.0 - constsAndDefaults: 1.0.5 - core: 5.23.18 + constsAndDefaults: 1.0.7 + core: 6.0.12 customCodeRegions: 0.1.1 defaultEnabledRetries: 0.2.0 downloadStreams: 1.0.1 @@ -29,23 +29,23 @@ features: examples: 3.0.2 flatRequests: 1.0.1 flattening: 3.1.1 - globalSecurity: 3.0.4 + globalSecurity: 3.0.5 globalSecurityCallbacks: 1.0.0 globalSecurityFlattening: 1.0.0 globalServerURLs: 3.2.0 includes: 3.0.0 methodArguments: 1.0.2 multipartFileContentType: 1.0.0 - nameOverrides: 3.0.1 - nullables: 1.0.1 - openEnums: 1.0.2 - responseFormat: 1.0.1 - retries: 3.0.3 - sdkHooks: 1.2.0 - serverEvents: 1.0.11 + nameOverrides: 3.0.3 + nullables: 1.0.2 + openEnums: 1.0.4 + responseFormat: 1.1.0 + retries: 3.0.4 + sdkHooks: 1.2.1 + serverEvents: 1.0.13 serverEventsSentinels: 0.1.0 serverIDs: 3.0.0 - unions: 3.1.1 + unions: 3.1.4 uploadStreams: 1.0.0 trackedFiles: .gitattributes: @@ -58,64 +58,128 @@ trackedFiles: pristine_git_object: 8d79f0abb72526f1fb34a4c03e5bba612c6ba2ae USAGE.md: id: 3aed33ce6e6f - last_write_checksum: sha1:b1cf4cc87111df10c55731b3f5abad22890387a2 - pristine_git_object: 1810386448a440cfc5f7b8579695b228ae40460d + last_write_checksum: sha1:50cc0351d6145a805d1d5ae8be4dfce58178e648 + pristine_git_object: f71bbabc223b8cef8d923816fce8d572f3901884 + docs/errors/httpvalidationerror.md: + id: 7fe2e5327e07 + last_write_checksum: sha1:277a46811144643262651853dc6176d21b33573e + pristine_git_object: 712a148c3e2305dca4c702851865f9f8c8e674cc docs/models/agent.md: id: ffdbb4c53c87 - last_write_checksum: sha1:4538aaa78a09b7e33db405f84916b1eb82f94bca - pristine_git_object: e335d889cdb70f4d3c987827ff714db90418cb39 + last_write_checksum: sha1:c87b05a17785cd83fdfc58cb2d55b6d77d3bc23e + pristine_git_object: 4de5a901d120b85ba5940490a2ec3fd4f1a91136 docs/models/agentaliasresponse.md: id: 5ac4721d8947 last_write_checksum: sha1:15dcc6820e89d2c6bb799e331463419ce29ec167 pristine_git_object: aa531ec5d1464f95e3938f148c1e88efc30fa6a6 docs/models/agentconversation.md: id: 3590c1a566fa - last_write_checksum: sha1:264d78815c3999bac377ab3f8c08a264178baf43 - pristine_git_object: a2d617316f1965acfabf7d2fe74334de16213829 + last_write_checksum: sha1:43e7c1ed2b43aca2794d89f2e6d6aa5f1478cc3e + pristine_git_object: 451f6fb8f700dddd54c69593c316bf562b5cbc93 docs/models/agentconversationagentversion.md: id: 468e0d1614bb last_write_checksum: sha1:6e60bf4a18d791d694e90c89bdb8cc38e43c324b pristine_git_object: 668a8dc0f0c51a231a73aed51b2db13de243a038 - docs/models/agentconversationobject.md: - id: cfd35d9dd4f2 - last_write_checksum: sha1:112552d4a241967cf0a7dcb981428e7e0715dc34 - pristine_git_object: ea7cc75c5197ed42f9fb508a969baa16effe1f98 - docs/models/agentcreationrequest.md: - id: 697a770fe5c0 - last_write_checksum: sha1:b3f12ca0a6356e657de2941c8441fc951bcc96f4 - pristine_git_object: f0f0fdbc13f8f490ded4f8df3944250aece1311b - docs/models/agentcreationrequesttool.md: - id: 392d970ffb74 - last_write_checksum: sha1:310d4b107554a9c16143191fdc306a5438b63768 - pristine_git_object: b3bd7fa3cead0a0a1480b0e1b3f0afbfd177b600 docs/models/agenthandoffdoneevent.md: id: dcf166a3c3b0 last_write_checksum: sha1:9e95c09f724827f5e9c202fd634bdfa2baef1b6e pristine_git_object: 6bfcc3d83457edf05d0f13957d34ead0f260599b docs/models/agenthandoffentry.md: id: 39d54f489b84 - last_write_checksum: sha1:7d949e750fd24dea20cabae340f9204d8f756008 - pristine_git_object: 8831b0ebad1c4e857f4f4353d1815753bb13125f - docs/models/agenthandoffentryobject.md: - id: ac62dd5f1002 - last_write_checksum: sha1:9d25ec388406e6faa765cf163e1e6dcb590ca0e9 - pristine_git_object: 4bb876fb3c60a42cf530c932b7c60278e6036f03 - docs/models/agenthandoffentrytype.md: - id: 07506fd159e0 - last_write_checksum: sha1:27ce9bdf225fbad46230e339a5c6d96213f1df62 - pristine_git_object: 527ebceb2ff1bbba1067f30438befd5e2c2e91d6 + last_write_checksum: sha1:a93a604ced2303eb6f93cfe0f1360224d3298b37 + pristine_git_object: 2b689ec720c02b7289ec462d7acca64a82b23570 docs/models/agenthandoffstartedevent.md: id: b620102af460 last_write_checksum: sha1:33732e0465423348c2ace458506a597a3dadf9b2 pristine_git_object: 518b5a0c4521ec55a5a28ba3ef0ad1c1fce52792 - docs/models/agentobject.md: - id: ed24a6d647a0 - last_write_checksum: sha1:ff5dfde6cc19f09c83afb5b4f0f103096df6691d - pristine_git_object: 70e143b030d3041c7538ecdacb8f5f9f8d1b5c92 + docs/models/agentsapiv1agentscreateorupdatealiasrequest.md: + id: c09ec9946094 + last_write_checksum: sha1:0883217b4bad21f5d4f8162ca72005bf9105a93f + pristine_git_object: 79406434cc6ff3d1485089f35639d6284f66d6cb + docs/models/agentsapiv1agentsdeletealiasrequest.md: + id: 429307ab315d + last_write_checksum: sha1:8e0a8388bb51c234aa1eb5566cb68389ebe57574 + pristine_git_object: 8e95c0c31e8ac92b374c153d622d7806b9e59a8d + docs/models/agentsapiv1agentsdeleterequest.md: + id: 0faaaa59add9 + last_write_checksum: sha1:2a34269e682bb910b83814b4d730ba2ce07f8cb2 + pristine_git_object: 2799f41817ab0f7a22b49b4ff895c8308525953c + docs/models/agentsapiv1agentsgetagentversion.md: + id: 3316961b40c4 + last_write_checksum: sha1:e4f4c6a64b1c2ec9465b7ad008df4d7859098e59 + pristine_git_object: 7fb9f2d578c4901ca1b41aaada6acc3a5ee94fa1 + docs/models/agentsapiv1agentsgetrequest.md: + id: 01740ae62cff + last_write_checksum: sha1:bc86e90289ec09b40212083a82455b4fe71c7194 + pristine_git_object: ceffe0096ffd6db97a6018d34870c29cec4fb0d3 + docs/models/agentsapiv1agentsgetversionrequest.md: + id: 88ed22b85cde + last_write_checksum: sha1:0ef23807c8efa2662144da66745045abdd2cb60a + pristine_git_object: 96a7358943a69e871a2bb7f0f30d6fe2bb8dff3d + docs/models/agentsapiv1agentslistrequest.md: + id: c2720c209527 + last_write_checksum: sha1:99502da34d868f1563ad1e3ea256f3becdbefa11 + pristine_git_object: 4785a54c561f5f9e1eb7ffd3317c5faa9b8b56dd + docs/models/agentsapiv1agentslistversionaliasesrequest.md: + id: 69c8bce2c017 + last_write_checksum: sha1:4083fc80627b2cc04fd271df21393944730ef1ba + pristine_git_object: 3083bf92641404738948cd57306eac978b701551 + docs/models/agentsapiv1agentslistversionsrequest.md: + id: 0bc44ed8d6bb + last_write_checksum: sha1:315790552fc5b2b3a6c4f7be2eb33100133abe18 + pristine_git_object: 91831700bed92cb4f609f8c412dcb0ee98b544ca + docs/models/agentsapiv1agentsupdaterequest.md: + id: 7692812cd677 + last_write_checksum: sha1:aaccaa13eeb0d775b0c6a0b23c328d3f3c2c2dbf + pristine_git_object: 7ef60becfcdde09c8ce0366361306c5661d67e24 + docs/models/agentsapiv1agentsupdateversionrequest.md: + id: a001251b1624 + last_write_checksum: sha1:0ee9e0fc55fd969f2b8f2c55dec93bf10e0e5b2f + pristine_git_object: e937acc9b1d3f50eee69495b1305f7aee1c960ac + docs/models/agentsapiv1conversationsappendrequest.md: + id: 70f76380e810 + last_write_checksum: sha1:d428dc114b60362d269b5ae50a57ea60b9edee1a + pristine_git_object: ac8a00ecab30305de8eb8c7c08cda1b1c04148c3 + docs/models/agentsapiv1conversationsappendstreamrequest.md: + id: f6ada9a592c5 + last_write_checksum: sha1:8a806ca2e5bad75d9d0cf50726dc0d5b8e7e3eab + pristine_git_object: dbc330f11aa3039c9cea2dd7d477d56d5c4969d0 + docs/models/agentsapiv1conversationsdeleterequest.md: + id: c2c9f084ed93 + last_write_checksum: sha1:9ecca93f8123cebdd1f9e74cf0f4a104b46402a8 + pristine_git_object: c6eed281331cb4d2cac4470de5e04935d22eca5a + docs/models/agentsapiv1conversationsgetrequest.md: + id: d6acce23f92c + last_write_checksum: sha1:b5d5529b72c16293d3d9b5c45dcb2e3798405bcf + pristine_git_object: 67d450c88778cb27d7d0ba06d49d9f419840b32e + docs/models/agentsapiv1conversationshistoryrequest.md: + id: e3efc36ea8b5 + last_write_checksum: sha1:4155100eaed6d3b7410b3f4476f000d1879576be + pristine_git_object: 7e5d39e9a11ac437a24b8c059db56527fa93f8b0 + docs/models/agentsapiv1conversationslistrequest.md: + id: 406c3e92777a + last_write_checksum: sha1:d5c5effcf2ca32900678d20b667bdaf8ca908194 + pristine_git_object: 62c9011faf26b3a4268186f01caf98c186e7d5b4 + docs/models/agentsapiv1conversationslistresponse.md: + id: 394c37d2203f + last_write_checksum: sha1:1144f41f8a97daacfb75c11fdf3575e553cf0859 + pristine_git_object: b233ee203ff5da0c65d6e9f87b2925d6802d2c0a + docs/models/agentsapiv1conversationsmessagesrequest.md: + id: 2c749c6620d4 + last_write_checksum: sha1:781e526b030653dc189d94ca04cdc4742f9506d2 + pristine_git_object: a91ab0466d57379eacea9d475c72db9cb228a649 + docs/models/agentsapiv1conversationsrestartrequest.md: + id: 6955883f9a44 + last_write_checksum: sha1:99c1455c7fde9b82b6940e6e1ed4f363d7c38de9 + pristine_git_object: a18a41f5395adae3942573792c86ddf7c3812ff4 + docs/models/agentsapiv1conversationsrestartstreamrequest.md: + id: 0c39856fd70e + last_write_checksum: sha1:d03475c088c059077049270c69be01c67a17f178 + pristine_git_object: 7548286af5d1db51fbfd29c893eb8afdc3c97c4d docs/models/agentscompletionrequest.md: id: 906b82c214dc - last_write_checksum: sha1:84ee0378e413830260a279a67fc3b1342e643328 - pristine_git_object: d87dc7da67dd883f92a23d8df4f5648e97c4f12e + last_write_checksum: sha1:b5685a779b633823ccfe99d9740078e0aab50bde + pristine_git_object: 33435732b94c81c7bccff5cf1868b2f382223200 docs/models/agentscompletionrequestmessage.md: id: 5337f0644b40 last_write_checksum: sha1:ecf7b7cdf0d24a5e97b520366cf816b8731734bb @@ -130,8 +194,8 @@ trackedFiles: pristine_git_object: 63b9dca9fbb8d829f93d8327a77fbc385a846c76 docs/models/agentscompletionstreamrequest.md: id: 21d09756447b - last_write_checksum: sha1:0c88bc63255733480b65b61685dcc356fcc9ed66 - pristine_git_object: dd1804a1b3a2aadc3e3c3964262b0fc25195703f + last_write_checksum: sha1:9d506ac8f620f4cef54b4b7a1891fb17b8eaefa5 + pristine_git_object: 407be8e0c1264a31cc0d80c1059f3bd62c2eaceb docs/models/agentscompletionstreamrequestmessage.md: id: b309ade92081 last_write_checksum: sha1:98744c9646969250242cbbfbdf428dbd7030e4bb @@ -148,58 +212,34 @@ trackedFiles: id: 513b8b7bc0b7 last_write_checksum: sha1:9154d0ac6b0ab8970a10a8ad7716009d62e80ce7 pristine_git_object: 022f7e10edb22cb1b1d741c13ac586bd136d03b5 - docs/models/agentupdaterequest.md: - id: 75a7f820b906 - last_write_checksum: sha1:358e39130bc439f5801a2dcc73502a1f1c2c6685 - pristine_git_object: b1830d7be6cb8e33529246a3368deaf0909a3343 - docs/models/agentupdaterequesttool.md: - id: 9c9aac9dda3d - last_write_checksum: sha1:25d8a331a706bf8e6056b99f8ff1a46abff6ae72 - pristine_git_object: ce5531260e9b06db0b93d4bfcf95a12b627da522 docs/models/apiendpoint.md: id: be613fd9b947 last_write_checksum: sha1:4d984c11248f7da42c949164e69b53995d5942c4 pristine_git_object: 8d83a26f19241da5ce626ff9526575c50e5d27be - docs/models/appendconversationrequest.md: - id: 295b6d446690 - last_write_checksum: sha1:0c3d7091b19abf30fb0b78800cab292abd902c1d - pristine_git_object: 977d8e8b797c8ae36de4da90bc32bba47a6a0779 - docs/models/appendconversationstreamrequest.md: - id: aeea33736f95 - last_write_checksum: sha1:a0b5b036e46688e862c7f7671c86f965b5322742 - pristine_git_object: a23231c2c2f0017ba29c8863c3046aebe8f57ff1 - docs/models/archiveftmodelout.md: - id: 9e855deac0d1 - last_write_checksum: sha1:41866e666241ed42e5e7c6df5a64b887f1ff774b - pristine_git_object: 98fa7b19e4579198b433eccc76b2b4d990476b72 - docs/models/archivemodelrequest.md: - id: 3fde72a45ad9 - last_write_checksum: sha1:60eaa9be631215c63a2c01da7da809ec34f5b01a - pristine_git_object: 806d135e2bc6c0da2b20a4bb84107d3ab31962ad + docs/models/archivemodelresponse.md: + id: 133f4af8058f + last_write_checksum: sha1:95fa73ebd765cbd244c847218df6d31e18dc5e85 + pristine_git_object: 276656d1d00ca174e78aa9102f7f576575daa818 docs/models/arguments.md: id: 7ea5e33709a7 last_write_checksum: sha1:09eea126210d7fd0353e60a76bf1dbed173f13ec pristine_git_object: 2e54e27e0ca97bee87918b2ae38cc6c335669a79 docs/models/assistantmessage.md: id: 7e0218023943 - last_write_checksum: sha1:e75d407349842b2de46ee3ca6250f9f51121cf38 - pristine_git_object: 3d0bd90b4433c1a919f917f4bcf2518927cdcd50 + last_write_checksum: sha1:47d5cd1a1bef9e398c12c207f5b3d8486d94f359 + pristine_git_object: 9ef638379aee1198742743800e778409c47a9b9d docs/models/assistantmessagecontent.md: id: 9f1795bbe642 last_write_checksum: sha1:1ce4066623a8d62d969e5ed3a088d73a9ba26643 pristine_git_object: 047b7cf95f4db203bf2c501680b73ca0562a122d - docs/models/assistantmessagerole.md: - id: bb5d2a4bc72f - last_write_checksum: sha1:82f2c4f469426bd476c1003a91394afb89cb7c91 - pristine_git_object: 658229e77eb6419391cf7941568164541c528387 docs/models/attributes.md: id: ececf40457de last_write_checksum: sha1:9f23adf16a682cc43346d157f7e971c596b416ef pristine_git_object: 147708d9238e40e1cdb222beee15fbe8c1603050 docs/models/audiochunk.md: id: 88315a758fd4 - last_write_checksum: sha1:d52e493765280fc0b1df61a0ce1086205965c712 - pristine_git_object: 8a04af045f4ce33a2964f5f75664e82c3edf1bf3 + last_write_checksum: sha1:b47b295122cea28d66212d75a1f0eccd70a248cc + pristine_git_object: 1ba8b0f578fa94b4f8dddf559798e033a1704e7b docs/models/audioencoding.md: id: 1e0dfee9c2a0 last_write_checksum: sha1:5d47cfaca916d7a47adbea71748595b3ab69a478 @@ -210,8 +250,8 @@ trackedFiles: pristine_git_object: d174ab9959cadde659f76db94ed87c743e0f6783 docs/models/audiotranscriptionrequest.md: id: ebf59641bc84 - last_write_checksum: sha1:c55c97a06726812323a031897beffbb160021c05 - pristine_git_object: d7f5bd51b1289f0eb481d86a71bb483ee50bbc40 + last_write_checksum: sha1:a478d0656a0f69d4c426e548e2236b99730e2084 + pristine_git_object: 80bd53015ddee1bcecc7aeecc75152a19afc22c1 docs/models/audiotranscriptionrequeststream.md: id: 79b5f721b753 last_write_checksum: sha1:df6825c05b5a02dcf904ebaa40fb97e9186248cc @@ -224,18 +264,10 @@ trackedFiles: id: 8053e29a3f26 last_write_checksum: sha1:23a12dc2e95f92a7a3691bd65a1b05012c669f0f pristine_git_object: 95016cdc4c6225d23edc4436e11e4a7feacf1fe6 - docs/models/batchjobin.md: - id: 10f37fc761f1 - last_write_checksum: sha1:0acea471920959b7c85a015e557216c783de4e88 - pristine_git_object: 7dcf265dfe63cbbd13b7fa0e56fc62717f3ee050 - docs/models/batchjobout.md: - id: 49a98e5b2aba - last_write_checksum: sha1:b504fcf5a65567ec114fdc5b79cabe7554b36cac - pristine_git_object: 5f1011734b249a75cf9381d024f295fe31ff9f68 - docs/models/batchjobsout.md: - id: d8041dee5b90 - last_write_checksum: sha1:5e4127548b50abbb6cee267ac53a8e05f55b97f9 - pristine_git_object: 7a9d6f688e87851ed7ffa516523e12cb3f967c68 + docs/models/batchjob.md: + id: de2a00d0f739 + last_write_checksum: sha1:1160822c4032e1745dfaf37abcac02e78cbc4fb4 + pristine_git_object: 162e2cff3a1132f2b89e57dcf1bf8b4c403b6453 docs/models/batchjobstatus.md: id: 7e6f034d3c91 last_write_checksum: sha1:9e876b4b94255e1399bbb31feb51e08691bcb8fc @@ -248,22 +280,10 @@ trackedFiles: id: 9d14e972f08a last_write_checksum: sha1:1f32eb515e32c58685d0bdc15de09656194c508c pristine_git_object: f96f50444aaa23ca291db2fd0dc69db0d9d149d9 - docs/models/cancelbatchjobrequest.md: - id: db6860fe9ec3 - last_write_checksum: sha1:d2f55d5ffec21f6f70cc77c643c73113b0d1ed43 - pristine_git_object: f31f843bb864fc21ed620e4e069b8a97a091d99c - docs/models/cancelfinetuningjobrequest.md: - id: 10d341c56c9c - last_write_checksum: sha1:a484ad9d8eb791d60e5447b845b73871e9f1e6a3 - pristine_git_object: 6525788cd527eca4d89f95d4c829c1b3eda0f06e - docs/models/cancelfinetuningjobresponse.md: - id: 0c9ca281a898 - last_write_checksum: sha1:ac02c2a268a21430e74f8075671de0b97fd844e6 - pristine_git_object: c512342e575e9b6d57da08b20f50c86510d246d8 docs/models/chatclassificationrequest.md: id: 57b86771c870 - last_write_checksum: sha1:2ee5fff26c780ade7ed89617358befa93a6dfd23 - pristine_git_object: 910d62ae20fc67e9a3200397aeab95513bfed90f + last_write_checksum: sha1:bfd2fb8e2c83578ca0cea5209ea3f18c3bcd2ae5 + pristine_git_object: ba9c95eab2c1e4f080e39e8804a5de222e052ee6 docs/models/chatcompletionchoice.md: id: 0d15c59ab501 last_write_checksum: sha1:a6274a39a4239e054816d08517bf8507cb5c4564 @@ -274,8 +294,8 @@ trackedFiles: pristine_git_object: b2f15ecbe88328de95b4961ddb3940fd8a6ee64b docs/models/chatcompletionrequest.md: id: adffe90369d0 - last_write_checksum: sha1:f6eec11c908ee6581e508fff98e785441c4b84ad - pristine_git_object: f3abeeff4346c181cfca40eb819a8c6ecf656026 + last_write_checksum: sha1:4980b698006c641b1c84495c5b601cc8662b05f6 + pristine_git_object: 921161faf38b2f4d4648d6d744c08a96ed38f0a6 docs/models/chatcompletionrequestmessage.md: id: 3f5e170d418c last_write_checksum: sha1:7921c5a508a9f88adc01caab34e26182b8035607 @@ -294,8 +314,8 @@ trackedFiles: pristine_git_object: a0465ffbfc5558628953e03fbc53b80bbdc8649b docs/models/chatcompletionstreamrequest.md: id: cf8f29558a68 - last_write_checksum: sha1:7ed921e0366c1b00225c05e60937fb8d228f027b - pristine_git_object: 42792d396462dead9d7a80a87f05a0888efe348b + last_write_checksum: sha1:c54d4a32d0d65533b79c381174690e9b735b2800 + pristine_git_object: 8761f000d4249de86265bc63da785cd807c2e7a5 docs/models/chatcompletionstreamrequestmessage.md: id: 053a98476cd2 last_write_checksum: sha1:8270692463fab1243d9de4bbef7162daa64e52c5 @@ -324,10 +344,10 @@ trackedFiles: id: aec173bca43b last_write_checksum: sha1:14ce49ace5845bc467fe1559b12374bfd36bc9a7 pristine_git_object: ff1c6ea32233d5c5e8d6292c62f9e8eacd3340c3 - docs/models/checkpointout.md: - id: 909ce66e1f65 - last_write_checksum: sha1:89e678d55b97353ad1c3b28d9f1ab101f6be0928 - pristine_git_object: 053592d2c57c43220bec3df27cc1486554178955 + docs/models/checkpoint.md: + id: 9c97119961cf + last_write_checksum: sha1:0e7732d9c30f67d59fe4d9ad1d165ad0cd80c790 + pristine_git_object: f7f35530c0d57aca02b2503e968a9a262bb1a10d docs/models/classificationrequest.md: id: 6f79e905a3fa last_write_checksum: sha1:3e083210e1cfdd3539e714928688648673767ae8 @@ -344,58 +364,54 @@ trackedFiles: id: 97a5eab5eb54 last_write_checksum: sha1:41269d1372be3523f46cb57bd19292af4971f7c0 pristine_git_object: f3b10727b023dd83a207d955b3d0f3cd4b7479a1 - docs/models/classifierdetailedjobout.md: - id: a2084ba5cc8c - last_write_checksum: sha1:ee206a5c68bd7aed201f8274d0710e8c570a35d2 - pristine_git_object: fb532449458fb445bb79d3fa0ed8e6faa538f00a - docs/models/classifierdetailedjoboutintegration.md: - id: 7a775cbd4d9f - last_write_checksum: sha1:6b2691766c1795d17b1572076a693eb377c5307f - pristine_git_object: 9dfa6e8a179529bd12fb8935c264e3c57c62cb41 - docs/models/classifierdetailedjoboutstatus.md: - id: a98493f9d02d - last_write_checksum: sha1:3441d9961e9093d314dd1bc88df1743cd12866d2 - pristine_git_object: c3118aafa8614f20c9adf331033e7822b6391752 - docs/models/classifierftmodelout.md: - id: 268ac482c38b - last_write_checksum: sha1:46bdbe1176bbf43dd79a4ff8255129fd82bd97bc - pristine_git_object: 6e7afbbed075efe2e29f42b7bc3d758fe47460d4 - docs/models/classifierjobout.md: - id: 2e3498af3f8c - last_write_checksum: sha1:70845cc24cd48987552ca337ea5522066e6de1b9 - pristine_git_object: ceecef5decdbd74a9741401ad0f1a9e8e215ae82 - docs/models/classifierjoboutintegration.md: - id: 30a340fed57d - last_write_checksum: sha1:72dfda442a88f977f3480c95127534a600362806 - pristine_git_object: 33af8a708618c1e54c7f55e67c8848fe45217799 - docs/models/classifierjoboutstatus.md: - id: 2411c6bf3297 - last_write_checksum: sha1:6ceef218b783505231a0ec653292460e6cb1a65b - pristine_git_object: 4520f1648323227863f78f7f86b2b4567bb7ace7 - docs/models/classifiertargetin.md: - id: 90d2da204677 - last_write_checksum: sha1:18fca3deee476b3dd23d55a9a40ced96cdc21f83 - pristine_git_object: 78cab67b4ced9fd0139a1dc4e6b687de870f9c62 - docs/models/classifiertargetout.md: - id: 1ce5c0513022 - last_write_checksum: sha1:2b8ed8a25b6ea6f2717cb4edcfa3f6a1ff3e69e4 - pristine_git_object: 57535ae5cb7d30177d1800d3597fe2f6ec3ad024 + docs/models/classifierfinetunedmodel.md: + id: b67a370e0ef1 + last_write_checksum: sha1:5fe3c26e337083716dd823e861924a03c55ce293 + pristine_git_object: ad05f93147d6904ee62602480c24644ec5e4cf63 + docs/models/classifierfinetuningjob.md: + id: 5bf35c25183f + last_write_checksum: sha1:afedddfe38e217189b5ec12ded74606c3b1e4c59 + pristine_git_object: 369756ba16a4c64f03cb6bb5da9bc0abd2a8eac6 + docs/models/classifierfinetuningjobdetails.md: + id: c91d53e010d5 + last_write_checksum: sha1:59a4c11a0d52b02ffc48e011a40fb4ebb1604825 + pristine_git_object: c5efdf1c817b978506a4862991a0f8eab8b219fb + docs/models/classifierfinetuningjobdetailsintegration.md: + id: e6c161ac2a44 + last_write_checksum: sha1:6450686e7f92ac8c1c02fcea82d5855ca6738b46 + pristine_git_object: 438a35d9eb0e4250a9e6bcbb7dafeb26d74e018a + docs/models/classifierfinetuningjobdetailsstatus.md: + id: 87737e85b845 + last_write_checksum: sha1:2ff02df3efee0f9b5867045d43fc71025fb37129 + pristine_git_object: 058c65832188f7148d96ab320114d984d618efa1 + docs/models/classifierfinetuningjobintegration.md: + id: 91de20176a8c + last_write_checksum: sha1:e49a7c082300eb4d3106e96b21ebc6860060b8c3 + pristine_git_object: 820aee4c6fcf899341d869d796b1a61d4d4eab42 + docs/models/classifierfinetuningjobstatus.md: + id: e3c4e672dc88 + last_write_checksum: sha1:1bfd306ab633d3ea73272e56796c1f63843fce22 + pristine_git_object: ca829885de056c5ccafec0fe3a901743e56deb0c + docs/models/classifiertarget.md: + id: 4c5c0b3e0bc7 + last_write_checksum: sha1:ad16823def0acb267543c4189df32406a27685aa + pristine_git_object: f8c99e2e7e6653d0e809506861ec4c25571cb5c9 + docs/models/classifiertargetresult.md: + id: c78d27aec276 + last_write_checksum: sha1:17c37c10385019953d6085fff6681808f950693f + pristine_git_object: ccadc623493bfa946dc2cccf894364b1e6b8b452 docs/models/classifiertrainingparameters.md: id: 9370e1ccd3d5 last_write_checksum: sha1:03f7c32717792966afdec50cb9dc1c85bb99dd84 pristine_git_object: 3b6f3be6942bbcf56261f773864a518d16923880 - docs/models/classifiertrainingparametersin.md: - id: 8bcca130af93 - last_write_checksum: sha1:7e9d61d3377031c740ea98d6c3dc65be99dc059b - pristine_git_object: 1287c973fae9762310597fbeceaef26865ace04f docs/models/codeinterpretertool.md: id: f009740c6e54 - last_write_checksum: sha1:bce278ce22703246613254ee2dac57f8b14e8060 - pristine_git_object: 544cda9358faf6ec525d06f78068817aee55b193 + last_write_checksum: sha1:a2114d61a98a48b4365a77c0c75c06ca834367ad + pristine_git_object: 6302fc627d7c49442b6c9aec19c70fdceaf7c519 docs/models/completionargs.md: id: 3b54534f9830 - last_write_checksum: sha1:c0368b7c21524228939b2093ff1a4524eb57aeb7 - pristine_git_object: 60d091374a80418892df9700dc0c21e7dad28775 + last_write_checksum: sha1:7432daccf23d8963a65fa4f2b103ea0396fbfbeb + pristine_git_object: 148f760859636e8c32259604698785663491a445 docs/models/completionargsstop.md: id: 40b0f0c81dc8 last_write_checksum: sha1:2a576618c62d4818af0048ed3a79080149a88642 @@ -404,46 +420,46 @@ trackedFiles: id: 60cb30423c60 last_write_checksum: sha1:61b976fe2e71236cf7941ee1635decc31bd304b2 pristine_git_object: 7f8ab5e631e2c6d1d9830325e591a7e434b83a35 - docs/models/completiondetailedjobout.md: - id: 634ca7241abd - last_write_checksum: sha1:7899568eedfa04cccb5b07c2e0d1e821af8fb0a2 - pristine_git_object: bc7e5d1cb5c298d0d935a9e3472ad547b5b9714c - docs/models/completiondetailedjoboutintegration.md: - id: f8d1f509f456 - last_write_checksum: sha1:3317db3f71962623a6144e3de0db20b4abfd5b9b - pristine_git_object: 9e526053160cc393dc65242cff8f8143bc67e38c - docs/models/completiondetailedjoboutrepository.md: - id: a8e7452065a7 - last_write_checksum: sha1:b1910efc6cd1e50391cd33daef004441bac3d3cd - pristine_git_object: 92a7b75c51f27e73ca41d5ffee28921057959878 - docs/models/completiondetailedjoboutstatus.md: - id: c606d38452e2 - last_write_checksum: sha1:1e9a5736de32a44cf539f7eaf8214aad72ec4994 - pristine_git_object: b80525bad8f6292892d8aee864a549c8ec52171c docs/models/completionevent.md: id: e57cd17cb9dc last_write_checksum: sha1:4f59c67af0b11c77b80d2b9c7aca36484d2be219 pristine_git_object: 7a66e8fee2bb0f1c58166177653893bb05b98f1d - docs/models/completionftmodelout.md: - id: 93fed66a5794 - last_write_checksum: sha1:ee4bccae36229f23b1db8894585cc8e88ad71f6d - pristine_git_object: ccd4844fab92d000de1cc9ba59c884e31dc5db26 - docs/models/completionjobout.md: - id: 77315b024171 - last_write_checksum: sha1:a08ca1dcedbb9b88b9909a4b03251e2fb0cd8319 - pristine_git_object: 5eb44eef73872b0f1c2709381fc0852e3b3e224b - docs/models/completionjoboutintegration.md: - id: 25e651dd8d58 - last_write_checksum: sha1:59711a3fa46d6a4bff787a61c81ecc34bdaaec2e - pristine_git_object: 6474747bf8d38485f13b1702e3245ef9e0f866a9 - docs/models/completionjoboutrepository.md: - id: 2c94b3ecacf1 - last_write_checksum: sha1:2cb5b23640eeaf87f45dc9f180247ed7a6307df7 - pristine_git_object: 52f65558f8b3663596642d8854df36d29858beae - docs/models/completionjoboutstatus.md: - id: b77ebfd0e4f0 - last_write_checksum: sha1:b8f33134c63b12dc474e7714b1ac19d768a3cbbd - pristine_git_object: 917549450a096397d9a7ca0b8f5856f7cd62db04 + docs/models/completionfinetunedmodel.md: + id: 23a7705a9c89 + last_write_checksum: sha1:50d173b7505a97435c9d7ccb4fa99af04a51c6a2 + pristine_git_object: 0055db021f1c039c84cf7cfecd654683d2f9996f + docs/models/completionfinetuningjob.md: + id: 13c69dd18690 + last_write_checksum: sha1:b77e82f00f851034999986ff67aea5b0b558fbd2 + pristine_git_object: 83c0ae7e551e1f70df8dad4dce75ad20fe2b7ae7 + docs/models/completionfinetuningjobdetails.md: + id: b285f80afd59 + last_write_checksum: sha1:6ced5483d8249d7e8f502ec3f53f45d76e348003 + pristine_git_object: 3c54e874bcd968a9d5d9c8b3285632ba71364763 + docs/models/completionfinetuningjobdetailsintegration.md: + id: 27662795c95f + last_write_checksum: sha1:655f03341ad1b590ec451288607cec61024bfefc + pristine_git_object: 38f6a34963db4a653ec7aa7f0c85b68e837ebafc + docs/models/completionfinetuningjobdetailsrepository.md: + id: 023920eecc9e + last_write_checksum: sha1:2b8ba6ff115fda4cc6ed74825fb09b9500d915f6 + pristine_git_object: c6bd67cde1d1628aa3efc4a53fa8487a009aa129 + docs/models/completionfinetuningjobdetailsstatus.md: + id: b1b717a4e256 + last_write_checksum: sha1:97c8699f0979978ea4320da3388e18da6219cb87 + pristine_git_object: 94d795a9ba4ec743f274d4ab5666e8897d174c61 + docs/models/completionfinetuningjobintegration.md: + id: 392ffc2cdef2 + last_write_checksum: sha1:53540da44e0edbad5d4085f81ded159dbc582a6c + pristine_git_object: dbe57417d78f1de798c6eaea7e56984e3b002cb9 + docs/models/completionfinetuningjobrepository.md: + id: deb47b72e8e4 + last_write_checksum: sha1:c0fd43a01c2f763c7945311741ee3c2b9c7520f6 + pristine_git_object: 54225e27204b703a6b33d2d66492e272559c3b3c + docs/models/completionfinetuningjobstatus.md: + id: 2ac420312815 + last_write_checksum: sha1:90f498cb04e89e8f4a424762c07231fd9030b326 + pristine_git_object: db151a1bd871a2bf231424a78c8c450b2a488099 docs/models/completionresponsestreamchoice.md: id: d56824d615a6 last_write_checksum: sha1:0296a490df009dbfd04893fdebcc88dd6102a872 @@ -456,26 +472,26 @@ trackedFiles: id: b716b0195d39 last_write_checksum: sha1:1d8d7c469f933ea741ec15c8b9ef8b986e0ca95e pristine_git_object: 4746a95df18c78331f572425a16b2b3dcbc2df4c - docs/models/completiontrainingparametersin.md: - id: 7223a57004ab - last_write_checksum: sha1:8f77e5fe2ce149115b0bda372c57fafa931abd90 - pristine_git_object: 9fcc714e5f000e6134f7f03f1dd4f56956323385 + docs/models/confirmation.md: + id: 19b9e48a3c2e + last_write_checksum: sha1:eb6494cb19f23c6df62afb009cc88ce38d24af86 + pristine_git_object: fd6e6aaa58cabba0cdec1b76ac50fb6e46f91b07 docs/models/contentchunk.md: id: d2d3a32080cd last_write_checksum: sha1:b253e4b802adb5b66d896bfc6245ac4d21a0c67c pristine_git_object: cb7e51d3a6e05f197fceff4a4999594f3e340dac docs/models/conversationappendrequest.md: id: 722746e5065c - last_write_checksum: sha1:1677ab5b06748a7650464c0d7596e66e6759ede2 - pristine_git_object: 1cdb584b62423072f9a7cdc61f045b0d161525df + last_write_checksum: sha1:c8a4a49f0a1fe5cdd2ef6264ef9c600cfc8f7beb + pristine_git_object: 78a96508e4e1c6f83de4556d0bfa3b10c875da37 docs/models/conversationappendrequesthandoffexecution.md: id: e3f56d558037 last_write_checksum: sha1:dc71c8db746bb08f6630e995cf6af9fda747e954 pristine_git_object: 7418b36a55fab959639aec456a946600eb908efb docs/models/conversationappendstreamrequest.md: id: e9f8131435e8 - last_write_checksum: sha1:559d90bbf6d64f46221edaa6482837f0ee3b0626 - pristine_git_object: a8516ea7fc7db1d6bc0abb8f99b967a1715ceb4b + last_write_checksum: sha1:3afe7eaafbf61abcd9341ee8fbca5c6d0c2db0ab + pristine_git_object: daea9c522a8a0693edce11b1bbeca1f2cba0781e docs/models/conversationappendstreamrequesthandoffexecution.md: id: 5739ea777905 last_write_checksum: sha1:c85584b63c0c5d859ee5d46d6ae167a8ee44e279 @@ -490,24 +506,16 @@ trackedFiles: pristine_git_object: 5452d7d5ce2aa59a6d89c7b7363290e91ed8a0a3 docs/models/conversationhistory.md: id: 7e97e8e6d6e9 - last_write_checksum: sha1:06df76a87aca7c5acd5a28ca3306be09a8bb541b - pristine_git_object: c8baad0b597ddb9148413a651a659b06c20351ac - docs/models/conversationhistoryobject.md: - id: 088f7df6b658 - last_write_checksum: sha1:bcce4ef55e6e556f3c10f65e860faaedc8eb0671 - pristine_git_object: a14e7f9c7a392f0d98e79cff9cc3ea54f30146fa + last_write_checksum: sha1:719a7c0722f3ad2e9f428dd31abf7bd0bad197d2 + pristine_git_object: daefe3363fb57d9a7d2737d3ea3d6e6f61021d49 docs/models/conversationinputs.md: id: 23e3160b457d last_write_checksum: sha1:0c6abaa34575ee0eb22f12606de3eab7f4b7fbaf pristine_git_object: 86db40ea1390e84c10a31155b3cde9066eac23b0 docs/models/conversationmessages.md: id: 46684ffdf874 - last_write_checksum: sha1:01ccdc4b509d5f46ff185f686d332587e25fc5b7 - pristine_git_object: c3f00979b748ad83246a3824bb9be462895eafd6 - docs/models/conversationmessagesobject.md: - id: b1833c3c20e4 - last_write_checksum: sha1:bb91a6e2c89066299660375e5e18381d0df5a7ff - pristine_git_object: db3a441bde0d086bccda4814ddfbf737539681a6 + last_write_checksum: sha1:5b10a9f3f19591a2675979c21dd8383d5249d728 + pristine_git_object: 8fa51571697ee375bfbc708de854bc0b1129eec7 docs/models/conversationrequest.md: id: dd7f4d6807f2 last_write_checksum: sha1:e4da423f9eb7a8a5d0c21948b50e8df08a63552c @@ -526,16 +534,12 @@ trackedFiles: pristine_git_object: 2e4e8d01b5482c4e0644be52e55bf6912aeff69e docs/models/conversationresponse.md: id: 2eccf42d48af - last_write_checksum: sha1:17ebabdf1dd191eeac442046511c44120dfa97a1 - pristine_git_object: e31821288dd18bf425e442787f67a69ea35ff6a6 - docs/models/conversationresponseobject.md: - id: 6c028b455297 - last_write_checksum: sha1:76270a07b86b1a973b28106f2a11673d082a385b - pristine_git_object: bea66e5277feca4358dd6447959ca945eff2171a + last_write_checksum: sha1:8a86a4d0df6d13b121d5e41a8ee45555b69bf927 + pristine_git_object: 2732f785cdd706274ec5ff383f25fc201e6d0f78 docs/models/conversationrestartrequest.md: id: 558e9daa00bd - last_write_checksum: sha1:0e33f56f69313b9111b3394ecca693871d48acfa - pristine_git_object: d98653127fd522e35323b310d2342ccc08927962 + last_write_checksum: sha1:434e6c94b5d6c37b9026d536308cd1d3ff56e8d6 + pristine_git_object: ad3ff3624f533e4d4f751264d9bc6dd1849b3b69 docs/models/conversationrestartrequestagentversion.md: id: e6ea289c6b23 last_write_checksum: sha1:a5abf95a81b7e080bd3cadf65c2db38ca458573f @@ -546,8 +550,8 @@ trackedFiles: pristine_git_object: 5790624b82ce47ea99e5c25c825fbc25145bfb8e docs/models/conversationrestartstreamrequest.md: id: 01b92ab1b56d - last_write_checksum: sha1:aa3d30800417e04f741324d60529f3190ea9cd16 - pristine_git_object: a5f8cbe73ed1ce28c82d76f0e9f933bda64f733c + last_write_checksum: sha1:e9755598b5be197a938f1f74aa77ac24ccac8457 + pristine_git_object: 865a1e8f666d7f6878c40eb70fe5ab1c63da3066 docs/models/conversationrestartstreamrequestagentversion.md: id: 395265f34ff6 last_write_checksum: sha1:ebf4e89a478ab40e1f8cd3f9a000e179426bda47 @@ -572,58 +576,62 @@ trackedFiles: id: 71df6212ff44 last_write_checksum: sha1:f2882742a74dd2b4f74383efa444c7ab968249dc pristine_git_object: 0f75f82b38f224340bed468ceecfe622066740ba + docs/models/conversationthinkchunk.md: + id: b9a8324da8f1 + last_write_checksum: sha1:80aed188198434ceca134e7aa7351ddba82c92c9 + pristine_git_object: 1fb16bd99f2b6277f87cd40d5c1eca389819d725 + docs/models/conversationthinkchunkthinking.md: + id: 477db2d543bd + last_write_checksum: sha1:d9f8c37fe933a3e52e2adb3ffe283d79c187cd36 + pristine_git_object: 84b800188b248166aac0043994fa27d4d79aad9d docs/models/conversationusageinfo.md: id: 57ef89d3ab83 last_write_checksum: sha1:d92408ad37d7261b0f83588e6216871074a50225 pristine_git_object: 57e260335959c605a0b9b4eaa8bf1f8272f73ae0 - docs/models/createfinetuningjobresponse.md: - id: a9d31306296c - last_write_checksum: sha1:a15ccee66983fcc23321f966440d02fab4463178 - pristine_git_object: f82cd793b466b0028b586781d36c690c0e5f97cd - docs/models/createorupdateagentaliasrequest.md: - id: be33079aa124 - last_write_checksum: sha1:84cb72c549ee74c44dcf00b3f6a100060e322295 - pristine_git_object: af2591ebb584965f5110ed987993f3a72b513255 - docs/models/deleteagentaliasrequest.md: - id: c116b5c42b1b - last_write_checksum: sha1:51e1544cc867389120a2d1fbb4780c855690841e - pristine_git_object: 17812ec4a03b452a2d31950cc5a9e87a8f6d79f7 - docs/models/deleteagentrequest.md: - id: 6411b6df1c85 - last_write_checksum: sha1:1157d4717b75be91744bd7464c042e367faa4b71 - pristine_git_object: 0aaacae471dd81ddc5ce4808abdd2b5653503ff6 - docs/models/deleteconversationrequest.md: - id: 7247871c454c - last_write_checksum: sha1:a43ed3e32630fbb41921fa413ab2a26a914e425e - pristine_git_object: 39d9e5dfd52d9df1d1da7093761b65e0d12a0b40 - docs/models/deletedocumentrequest.md: - id: 898eebfc019e - last_write_checksum: sha1:f06a13be4484048cf15c21d46eb2d107057b39db - pristine_git_object: eb060099f1b078fd084551338b51ee6677e8d235 - docs/models/deletefileout.md: - id: c7b84242a45c - last_write_checksum: sha1:f2b039ab88fc83ec5dd765cab8e2ed8cce7e417d - pristine_git_object: 4709cc4958d008dc24430deb597f801b91c6957f - docs/models/deletefilerequest.md: - id: ca151d3da83a - last_write_checksum: sha1:ec50f13b099a6ef28d7965f7c8721ce1f505f7d2 - pristine_git_object: bceae901954471a8667a3a61e66da6361ef50190 - docs/models/deletelibraryaccessrequest.md: - id: ca39ae894c1f - last_write_checksum: sha1:41b7cd5c2e4616d3edefeb271dd7089fa04bd67d - pristine_git_object: c7034b98c30234a0a8cb368d84d9b287690027de - docs/models/deletelibraryrequest.md: - id: 4be1af37ab41 - last_write_checksum: sha1:2769939a702c26be619f6c455cd48365b64110cc - pristine_git_object: c229ad73b2a7c39dab0ccdfa29e1f0475f0cdc7b + docs/models/createagentrequest.md: + id: 9484bab389c1 + last_write_checksum: sha1:b3228a622081b6f4b2a8bdaa60ca16049517d819 + pristine_git_object: cca3a079c532d3426f65a15bb0affdd34fd1d3ac + docs/models/createagentrequesttool.md: + id: 72e5f99878c5 + last_write_checksum: sha1:a90ad01c15da321f0c8ec700ba359a5371c5dcbb + pristine_git_object: c6ed3e98566eb684932fae9d2648a85c84443493 + docs/models/createbatchjobrequest.md: + id: e79afe8f495c + last_write_checksum: sha1:6cedce49f3108b9d5bc80e6d11712c594f2d9e50 + pristine_git_object: d094e2d518b31ada68c282241af3aa1483e98ff6 + docs/models/createfileresponse.md: + id: ea1396cebae8 + last_write_checksum: sha1:7b26d0a466004aca5cefaeb29f84dafc405c51ff + pristine_git_object: 8152922b0d4ce199e269df955e5a25d4acf71e28 + docs/models/createfinetuningjobrequest.md: + id: 36824ba035ff + last_write_checksum: sha1:78f019530e9f5deace91c454c91ec6c4d0d23a20 + pristine_git_object: a93e323d5dd474c6d287e1203e85b79d11d762f0 + docs/models/createfinetuningjobrequestintegration.md: + id: e41b5575b494 + last_write_checksum: sha1:06dab95269f4a571a4c62a7f956fbf0250a0e8b3 + pristine_git_object: 0054a4a683a88fe67f92c1659bcb8c792ca8d286 + docs/models/createfinetuningjobrequestrepository.md: + id: e113eb1929b5 + last_write_checksum: sha1:6bd504d3ecb219f3245a83d306c1792133b96769 + pristine_git_object: 32be1b6dc3fcf7f6ee1a1d71abee4c81493655c2 + docs/models/createlibraryrequest.md: + id: 8935b2ed9d13 + last_write_checksum: sha1:c00abfe1abb0f0323e434b084dafa0d451eb3e51 + pristine_git_object: 71562806dbec6444dcdd0a19852a31ca00b6229a + docs/models/deletefileresponse.md: + id: ab3aa44589a0 + last_write_checksum: sha1:47ebc2474e4725e9ecb0f0d5940c604d9a82a4df + pristine_git_object: 188e2504606b051674352339c6aa999116a43b61 docs/models/deletemodelout.md: id: 5643e76768d5 last_write_checksum: sha1:1593c64f7673e59b7ef1f4ae9f5f6b556dd6a269 pristine_git_object: 5fd4df7a7013dcd4f6489ad29cdc664714d32efd - docs/models/deletemodelrequest.md: - id: 22c414d48ee4 - last_write_checksum: sha1:a60f549577b3461cb7552ad2080a34ad389f8579 - pristine_git_object: d80103f1610668292589b6d7b861de814c17afda + docs/models/deletemodelv1modelsmodeliddeleterequest.md: + id: c838cee0f093 + last_write_checksum: sha1:e5b6d18b4f8ab91630ae34a4f50f01e536e08d99 + pristine_git_object: d9bc15fe393388f7d0c41abce97ead17e35e2ba4 docs/models/deltamessage.md: id: 6c5ed6b60968 last_write_checksum: sha1:00052476b9b2474dbc149f18dd18c71c86d0fc74 @@ -634,40 +642,28 @@ trackedFiles: pristine_git_object: 8142772d7ea33ad8a75cf9cf822564ba3f630de2 docs/models/document.md: id: cd1d2a444370 - last_write_checksum: sha1:d00a2ac808a0ae83a7b97da87e647ecc8dca9c52 - pristine_git_object: 509d43b733d68d462853d9eb52fc913c855dff40 + last_write_checksum: sha1:77076e66dea6f4582e73ecc5a55ef750f026448a + pristine_git_object: 284babb98fbb0279bef2626fa18eada0035572c5 docs/models/documentlibrarytool.md: id: 68083b0ef8f3 - last_write_checksum: sha1:470b969fa4983c0e7ad3d513b4b7a4fa8d5f0f41 - pristine_git_object: 1695bad40cb0a1eb269e4ee12c6a81cbf0c7749a - docs/models/documentout.md: - id: a69fd1f47711 - last_write_checksum: sha1:ed446078e7194a0e44e21ab1af958d6a83597edb - pristine_git_object: 28df11eb1aef1fdaf3c1103b5d61549fb32ea85d + last_write_checksum: sha1:76b9f47c399915a338abe929cb10c1b37282eadf + pristine_git_object: 95c3fa52ee3ff29e72bc0240a98c0afaa0cd5f62 docs/models/documenttextcontent.md: id: 29587399f346 last_write_checksum: sha1:93382da0228027a02501abbcf681f247814d3d68 pristine_git_object: 989f49e9bcb29f4127cb11df683c76993f14eba8 - docs/models/documentupdatein.md: - id: 185ab27259a7 - last_write_checksum: sha1:e0faccd04229204968dbc4e8131ee72f81288182 - pristine_git_object: 0993886d56868aba6844824f0e0fdf1bdb9d74f6 + docs/models/documentunion.md: + id: c65f9e42375c + last_write_checksum: sha1:249043e03067f79b27dc6eac410fb937920e8cdb + pristine_git_object: e573bd4632493ca648ad61307c70148366625d4b docs/models/documentupload.md: id: 7ff809a25eb0 last_write_checksum: sha1:aea0f81009be09b153019abbc01b2918a1ecc1f9 pristine_git_object: 4e58a475f1776431c9c27a0fcdd00dd96257801f docs/models/documenturlchunk.md: id: 48437d297408 - last_write_checksum: sha1:38c3e2ad5353a4632bd827f00419c5d8eb2def54 - pristine_git_object: 6c9a5b4d9e6769be242b27ef0208f6af704689c0 - docs/models/documenturlchunktype.md: - id: a3574c91f539 - last_write_checksum: sha1:a0134fc0ea822d55b1204ee71140f2aa9d8dbe9c - pristine_git_object: 32e1fa9e975a3633fb49057b38b0ea0206b2d8ef - docs/models/downloadfilerequest.md: - id: 5acd7aafd454 - last_write_checksum: sha1:5d7056818ddc5860e43699917496ded68b91ddfa - pristine_git_object: 3f4dc6ccc6d1c67396fe97197067c5421d8dc2d5 + last_write_checksum: sha1:5f9294355929d66834c52c67990ba36a7f81387d + pristine_git_object: 9dbfbe5074de81b9fcf6f5bae8a0423fb2c82f71 docs/models/embeddingdtype.md: id: 22786e732e28 last_write_checksum: sha1:dbd16968cdecf706c890769d8d1557298f41ef71 @@ -700,10 +696,10 @@ trackedFiles: id: da9a99ab48ab last_write_checksum: sha1:4971db390327db09f88feff5d2b8a0b1e6c5b933 pristine_git_object: d934b6774b25713afe923154d7709755426ec2cf - docs/models/eventout.md: - id: 9960732c3718 - last_write_checksum: sha1:dbc23814b2e54ded4aa014d63510b3a2a3259329 - pristine_git_object: d9202353be984d51b9c05fb0f490053ce6ccfe4a + docs/models/event.md: + id: 311c22a8574a + last_write_checksum: sha1:627793d6aed5e378e3f2eeb4087808eb50e948d5 + pristine_git_object: 3eebffca874b8614a5be3d75be3cb7b0e52c2339 docs/models/file.md: id: 4ad31355bd1c last_write_checksum: sha1:ade4d3c908c664a07a3c333cc24bc1bfb43ab88b @@ -716,14 +712,30 @@ trackedFiles: id: ed6216584490 last_write_checksum: sha1:02767595f85228f7bfcf359f8384b8263580d53a pristine_git_object: 14cab13ee191ae60e2c5e1e336d0a5abc13f778b + docs/models/filesapiroutesdeletefilerequest.md: + id: 7fdf9a97320b + last_write_checksum: sha1:411e38d0e08a499049796d1557f79d669fc65107 + pristine_git_object: 1b02c2dbb7b3ced86ddb49c2323d1d88732b480c + docs/models/filesapiroutesdownloadfilerequest.md: + id: b9c13bb26345 + last_write_checksum: sha1:1f41dad5ba9bd63881de04d24ef49a0650d30421 + pristine_git_object: 8b28cb0e5c60ac9676656624eb3c2c6fdc8a3e88 + docs/models/filesapiroutesgetsignedurlrequest.md: + id: 08f3772db370 + last_write_checksum: sha1:26aa0140444ccef7307ef6f236932032e4784e8f + pristine_git_object: dbe3c801003c7bb8616f0c5be2dac2ab1e7e9fb1 + docs/models/filesapirouteslistfilesrequest.md: + id: 04bdf7c654bd + last_write_checksum: sha1:0a99755150c2ded8e5d59a96527021d29326b980 + pristine_git_object: 57d11722f1dba2640df97c22be2a91317c240608 + docs/models/filesapiroutesretrievefilerequest.md: + id: 2783bfd9c4b9 + last_write_checksum: sha1:a1249ef0aedb3056e613078488832c96b91f8cab + pristine_git_object: 961bae1f51a4ae9df21b28fd7a5ca91dc7b3888b docs/models/fileschema.md: id: 9a05a660399d last_write_checksum: sha1:97987d64285ff3092635754c78ad7b68d863e197 pristine_git_object: 4f3e72dba17a964155007755ad9d69f0304b2adb - docs/models/filesignedurl.md: - id: c0a57176d62e - last_write_checksum: sha1:2c64ef5abc75e617496f0a28d3e1cebfe269a6b9 - pristine_git_object: 52ce3f4f0c44df0ef3ed1918f92ad63f76ffc144 docs/models/fimcompletionrequest.md: id: b44677ecc293 last_write_checksum: sha1:24bcb54d39b3fabd487549a27b4c0a65dd5ffe50 @@ -748,6 +760,10 @@ trackedFiles: id: e16926b57814 last_write_checksum: sha1:52006811b756ff5af865ed6f74838d0903f0ee52 pristine_git_object: 34b24bd4db1ad3f9e77e2c6a45a41d2fbc5cf7fd + docs/models/finetunedmodelcapabilities.md: + id: 3a6a0498ccf7 + last_write_checksum: sha1:82fc7d3f4e0b591b757f202699bb645bc61c69ff + pristine_git_object: d3203a2adccb7eb89c58395952c3e5a123a5b31b docs/models/format_.md: id: a17c22228eda last_write_checksum: sha1:dad6de59fec6378d50356007602e2a0254d8d2e4 @@ -756,10 +772,6 @@ trackedFiles: id: b546cfde5aa6 last_write_checksum: sha1:752d9d238a90a3ef55205576fa38cee56ea1539e pristine_git_object: 919cdd384315c99d4b590bc562298403733344ce - docs/models/ftmodelcapabilitiesout.md: - id: f7be0dd1d889 - last_write_checksum: sha1:670412a0c0268f646dd444537bd79ce9440170c8 - pristine_git_object: 19690476c64ac7be53f974347c1618730f0013ce docs/models/ftmodelcard.md: id: 15ed6f94deea last_write_checksum: sha1:1c560ceaaacc1d109b2997c36de03192dfcda941 @@ -774,140 +786,72 @@ trackedFiles: pristine_git_object: 7ccd90dca4868db9b6e178712f95d375210013c8 docs/models/functioncallentry.md: id: 016986b7d6b0 - last_write_checksum: sha1:bd3e67aea9eb4f70064e67e00385966d44f73f24 - pristine_git_object: fd3aa5c575019d08db258842262e8814e57dc6d5 + last_write_checksum: sha1:373eb3a2d72596fcbb8933b28426896d5ac6b6f4 + pristine_git_object: 2843db9d36d8b82a15ebfce0833c8b0832609b4a docs/models/functioncallentryarguments.md: id: c4c609e52680 last_write_checksum: sha1:ae88aa697e33d60f351a30052aa3d6e2a8a3e188 pristine_git_object: f1f6e39e724673556a57059a4dbda24f31a4d4b9 - docs/models/functioncallentryobject.md: - id: ea634770754e - last_write_checksum: sha1:d6bc885e9689397d4801b76c1a3c8751a75cf212 - pristine_git_object: 3cf2e427bfb6f2bc7acea1e0c6aafe965187f63f - docs/models/functioncallentrytype.md: - id: b99da15c307b - last_write_checksum: sha1:04665a6718ad5990b3beda7316d55120fbe471b0 - pristine_git_object: 7ea34c5206bdf205d74d8d49c87ddee5607582e9 + docs/models/functioncallentryconfirmationstatus.md: + id: 18f36160d744 + last_write_checksum: sha1:cc3ea4e03d26a1b22f94d42a87bd5ae63535d266 + pristine_git_object: 8948beb6d9ac647ada655960284dfc7f6d1f5ca1 docs/models/functioncallevent.md: id: cc9f2e603464 - last_write_checksum: sha1:942d1bed0778ba4738993fcdbefe080934b641d5 - pristine_git_object: f406206086afa37cbc59aa551ac17a4814dddf7e + last_write_checksum: sha1:58c6ee00af0c63614fd7506345977f9f2d8838ec + pristine_git_object: 0e3a36d6045a69e96c40836cdb586424225775af + docs/models/functioncalleventconfirmationstatus.md: + id: a33cc7957407 + last_write_checksum: sha1:36ac2d3442d83cbb1256e86f413134296bf8e90f + pristine_git_object: 4a3c8774d4eec4e1f5fea23a1827082e09f91669 docs/models/functionname.md: id: 4b3bd62c0f26 last_write_checksum: sha1:754fe32bdffe53c1057b302702f5516f4e551cfb pristine_git_object: 87d7b4852de629015166605b273deb9341202dc0 docs/models/functionresultentry.md: id: 24d4cb18998c - last_write_checksum: sha1:528cae03e09e43bdf13e1a3fef64fd9ed334319b - pristine_git_object: 6df54d3d15e6d4a03e9af47335829f01a2226108 - docs/models/functionresultentryobject.md: - id: 025dc546525c - last_write_checksum: sha1:01a0085fb99253582383dd3b12a14d19c803c33c - pristine_git_object: fe52e0a5a848ea09dfb4913dd8d2e9f988f29de7 - docs/models/functionresultentrytype.md: - id: 69651967bdee - last_write_checksum: sha1:41489b0f727a00d86b313b8aefec85b4c30c7602 - pristine_git_object: 35c94d8e553e1cb641bef28fec2d8b3576d142f6 + last_write_checksum: sha1:1758992e30517b505b8d0622a54545dc9ae19163 + pristine_git_object: 6a77abfd27e3e46de950646d7f89777dca11300e docs/models/functiontool.md: id: 5fb499088cdf last_write_checksum: sha1:a9a3b6530b1c48a8575402b48cde7b65efb33a7d pristine_git_object: 0226b7045c9d82186e1111bb2025e96a4de90bd6 - docs/models/getagentagentversion.md: - id: 825de6d2614f - last_write_checksum: sha1:d99f384ff5ee73e68fa7f8581d7622068b5b7498 - pristine_git_object: 6d7b3f1d15994c24a5b992d1908fe8126da0e3ea - docs/models/getagentrequest.md: - id: 743f3a4630be - last_write_checksum: sha1:4d17d6b7b15e39520414085fc977be881e4e0a85 - pristine_git_object: 3f729dff0f7fc773f83593222da0dd0618b3e8b3 - docs/models/getagentversionrequest.md: - id: 4bf5feb4494a - last_write_checksum: sha1:d26546c2fdd78e0f52e2a2c50736b412ce814f6e - pristine_git_object: c98fee9d141f556520e16189e90234063e6861eb - docs/models/getbatchjobrequest.md: - id: 0c3a5debd663 - last_write_checksum: sha1:c186bbc6b04e1ed2db32f68fb22cb7eff4c1a90c - pristine_git_object: f3c67eb4a898a21e8a78c3340171458dcbd21d58 - docs/models/getconversationhistoryrequest.md: - id: 27de0e44ed80 - last_write_checksum: sha1:d89318332c87b5fa3bba22a52e209bdd5702b3f0 - pristine_git_object: fc90282bd9308a7531c3c532234fd332a223f243 - docs/models/getconversationmessagesrequest.md: - id: 82bf9b5c275b - last_write_checksum: sha1:cdbb0371c7a35e84f7938d28719acd843ebc15ce - pristine_git_object: fd037fea6c09d97bfb74332838a2b2760de4dccb - docs/models/getconversationrequest.md: - id: ad6c903380f6 - last_write_checksum: sha1:ee93a91d5daa01fc937dd09589b268bb2e42868a - pristine_git_object: 8a66a8b032cb67503c0f6b95c98e0a40b13d16ec - docs/models/getdocumentextractedtextsignedurlrequest.md: - id: d47f32212cf5 - last_write_checksum: sha1:7d695630988d5ab3773aabfe17c3fa9177d7e9c9 - pristine_git_object: ff703802ddfe0e36768daf87f4c5626028642370 - docs/models/getdocumentrequest.md: - id: 4208f9b571b3 - last_write_checksum: sha1:45f6807e2f7cd4c30f95304172cb556896571b76 - pristine_git_object: 29f62127b09511f14a065b9b6f6068e63643ab7c - docs/models/getdocumentsignedurlrequest.md: - id: 734960a10101 - last_write_checksum: sha1:04debc445e51e7d0f922bfe7873d639a844c17b4 - pristine_git_object: 72a179c086e38650afd81165575c7926d9566f69 - docs/models/getdocumentstatusrequest.md: - id: d0a69468ea34 - last_write_checksum: sha1:a8d91948737e4fa392221ec18970d27af90c203e - pristine_git_object: 3557d7738be21206061ef5806b79118432b33f26 - docs/models/getdocumenttextcontentrequest.md: - id: 6baa6485417b - last_write_checksum: sha1:5b47d1d8d5675e4b9f477c8034ef64afc912cd06 - pristine_git_object: 8593340139f28b44dfed455849198f5d5a457643 - docs/models/getfilesignedurlrequest.md: - id: c7b1953174af - last_write_checksum: sha1:d558115d1611827f461cc6a9f373885271c7a51d - pristine_git_object: 0be3b2888b0680d5a5fac0057cedc279d112ddb8 - docs/models/getfinetuningjobrequest.md: - id: c18796fe85f3 - last_write_checksum: sha1:8166520e2d657098131fd77c81a86099ed4d3486 - pristine_git_object: f20cb2148330c7078c6e93f55aa99f1b09086eaf - docs/models/getfinetuningjobresponse.md: - id: 8f50d4a61ae1 - last_write_checksum: sha1:509e8d190b43b5a4a3e0ae7d97bf2b4262fcd1f8 - pristine_git_object: 1b0568dd8019879ec2e1d0ff039296f600415e21 - docs/models/getlibraryrequest.md: - id: 9c9a9e6c4f03 - last_write_checksum: sha1:822494a821ee3a51a477f305c140ed39cd6465fc - pristine_git_object: 2a3acf50a6300ea3bcbc3b8432fe28cbef82c620 + docs/models/getfileresponse.md: + id: a983b3c8acd6 + last_write_checksum: sha1:5ca732ae5b384937473c04de6736fbab34deca24 + pristine_git_object: 0edd13e0818fc70c9c4db1e08b1490c1e146ea63 + docs/models/getsignedurlresponse.md: + id: 5539e5d7c3d4 + last_write_checksum: sha1:7198474f48bfba6d47326cd436e4a00a8ba70ce3 + pristine_git_object: bde693236406fe092f48c315e3b68a2fbbe6f9a4 + docs/models/githubrepository.md: + id: 66c120df624b + last_write_checksum: sha1:045e538dd7faffc1c6c6e6816563c5c8e776a276 + pristine_git_object: 827b6f34ae68ace7b8b4811764f59de2e0fcdd22 docs/models/githubrepositoryin.md: id: b42209ef8423 last_write_checksum: sha1:5ab33fc1b0b5513086b1cae07f416d502441db23 pristine_git_object: 241cf584d5e2425e46e065f47a18bea50fa624db - docs/models/githubrepositoryout.md: - id: 0ca86e122722 - last_write_checksum: sha1:0e3999cef8a745ae24ac36907b3431bc5103ea6f - pristine_git_object: fe38393a0cc2eb5c0b0c4690cb0c4e5e3ec41df8 - docs/models/httpvalidationerror.md: - id: a211c095f2ac - last_write_checksum: sha1:277a46811144643262651853dc6176d21b33573e - pristine_git_object: 712a148c3e2305dca4c702851865f9f8c8e674cc docs/models/hyperparameters.md: id: c167bad5b302 - last_write_checksum: sha1:5b7f76360dea58be5350bbe074482da45e57599c - pristine_git_object: 46a6dd6baa1b1574bad5eadc1e83d4b72d56c0c8 + last_write_checksum: sha1:e391cf72690e6cd01a2878081b8d87938e1c6639 + pristine_git_object: b6c00c3647d21789c92ad7d32dd29c3089ca134f + docs/models/imagedetail.md: + id: f8217529b496 + last_write_checksum: sha1:fdf19ac9459f64616240955cb81a84ef03e775c8 + pristine_git_object: 1e5ba3fd405a14e5e2872cc85504584dca19b726 docs/models/imagegenerationtool.md: id: d5deb6b06d28 - last_write_checksum: sha1:b3decee8fe7a824401f9afbd3544a69ccde4ef8e - pristine_git_object: 0c8de72cdd7149217010ae5d02777d1c5dd9896c + last_write_checksum: sha1:a1813ef99e4a0990fd073bb2311c475e88072029 + pristine_git_object: b476b6f2733a49767d7f7a4ad091fc321ab514f4 docs/models/imageurl.md: id: e75dd23cec1d - last_write_checksum: sha1:30131c77dd240c3bae48d9693698358e5cc0ae63 - pristine_git_object: 7c2bcbc36e99c3cf467d213d6a6a59d6300433d8 + last_write_checksum: sha1:a5cf621ce58a9cc7c96afa7de53367eac7b4cb0b + pristine_git_object: 6358e0acb2dea4816203413842243704ca955783 docs/models/imageurlchunk.md: id: 4407097bfff3 - last_write_checksum: sha1:73e14a0beccfc9465ee6d2990462e609903f5cd5 - pristine_git_object: 43078c7849fb3e808c2eaeaa5a3caeab2619d700 - docs/models/imageurlchunktype.md: - id: b9af2db9ff60 - last_write_checksum: sha1:990546f94648a09faf9d3ae55d7f6ee66de13e85 - pristine_git_object: 2064a0b405870313bd4b802a3b1988418ce8439e + last_write_checksum: sha1:da7a792f7b649f311062338dfbf3d25ff55fe6c5 + pristine_git_object: db0c53d22e29fa25222edb86b264e5135879a029 docs/models/imageurlunion.md: id: 9d3c691a9db0 last_write_checksum: sha1:4e32bcd7d44746d2ddbfafbef96152bb2bdb2a15 @@ -918,124 +862,184 @@ trackedFiles: pristine_git_object: b44a467d258cfa8cc3d2a3236330471dbc3af109 docs/models/inputs.md: id: 4b0a7fb87af8 - last_write_checksum: sha1:19d8da9624030a47a3285276c5893a0fc7609435 - pristine_git_object: 0f62a7ce8e965d0879507e98f808b9eb254282a6 - docs/models/inputsmessage.md: - id: 174dcada287d - last_write_checksum: sha1:92a95c1757e33603d1aa9ed6c9912d1c551d9974 - pristine_git_object: e3543fb4f9fff679b25f7f803eb2e8dabd56368f + last_write_checksum: sha1:c5f0c21c25fd5a698398a9e4ddf6261add60740c + pristine_git_object: d5771207d9725f04ca2ab1be692fc089360a58f4 docs/models/instructrequest.md: id: a0034d7349a2 last_write_checksum: sha1:34a81411110cbb7a099c45e482f5d1702ae48fd3 pristine_git_object: 5f0cdfff135fb72d3b1a81999a30b720c044e3d4 - docs/models/instructrequestinputs.md: - id: 2a677880e32a - last_write_checksum: sha1:64bcc6371d70446da60f167682504568d7f2618c - pristine_git_object: 931ae5e47df2d2850e3ef6740e2b89e1e0138297 docs/models/instructrequestmessage.md: id: 380503708a09 last_write_checksum: sha1:551b5d6dd3ba0b39cad32478213a9eb7549f0023 pristine_git_object: 57ed27ab3b1430514797dd0073bc87b31e5e3815 - docs/models/jobin.md: - id: 1b7b37214fa8 - last_write_checksum: sha1:0a241378cf3791c5c3fa733f30d45c07ef841448 - pristine_git_object: 62da90727898dd84f547c436c17fefa788e4f0d6 - docs/models/jobinintegration.md: - id: 200c505fa67f - last_write_checksum: sha1:c9887897357e01e6e228b48d6bf0c3fb4edd29f7 - pristine_git_object: 103820e7ec55769227610c385addbecfcd075cae - docs/models/jobinrepository.md: - id: 9ab1d5469c10 - last_write_checksum: sha1:1773f59546b94688d0be16d3f5f014cd86f5b1d7 - pristine_git_object: e873ae63f359d6ac4aca03b058a7c25fbbf2ba32 - docs/models/jobmetadataout.md: - id: 30eb634fe247 - last_write_checksum: sha1:46d54b6f6004a6e571afd5207db5170dfbce7081 - pristine_git_object: 6218a161b71abbb35eb4ca6e3ce664226983efc2 - docs/models/jobsout.md: - id: cbe31f43047d - last_write_checksum: sha1:4bd9ffbd2e5a286090167c795b9c3970e3c7d0a5 - pristine_git_object: 69f8342ac6f02a6e60d05b6f5b3cd892964fd3d7 - docs/models/jobsoutdata.md: - id: 809574cac86a - last_write_checksum: sha1:06455044d314c4edbd1ce4833d551c10918f0a3e - pristine_git_object: 28cec31117416b79eb8688d84b47b157974574cc + docs/models/jobmetadata.md: + id: 1f8e4c2f49e5 + last_write_checksum: sha1:a29ec10cd129b955672f60aaf526905780afe6b6 + pristine_git_object: 5d8a89ddc6b401a80e23d51cb378cdac5d4eb342 + docs/models/jobsapiroutesbatchcancelbatchjobrequest.md: + id: 798cb1ca1385 + last_write_checksum: sha1:67e8bda117608aee0e09a702a1ef8a4b03c40b68 + pristine_git_object: c19d0241784ff69bc68a11f405437400057d6f62 + docs/models/jobsapiroutesbatchgetbatchjobrequest.md: + id: e83a7ec84f8a + last_write_checksum: sha1:d661875832b4b9d5f545262216c9fcb9a77c8cd0 + pristine_git_object: 8c259bea9bef11f779fd609f1212565d574457e2 + docs/models/jobsapiroutesbatchgetbatchjobsrequest.md: + id: 5b9c44ad4d31 + last_write_checksum: sha1:1d7c05337b7cfe68f85a36576d060e1a890f9f96 + pristine_git_object: 5ceb0b2c40f079ffbe2cc4c82f6c3f94276980b4 + docs/models/jobsapiroutesfinetuningarchivefinetunedmodelrequest.md: + id: 8eb8c127091e + last_write_checksum: sha1:2b93a6bed5743461bb03c8337fb25dfc5a15522e + pristine_git_object: f9700df50b8f512c4139c1830aba18989d022b8e + docs/models/jobsapiroutesfinetuningcancelfinetuningjobrequest.md: + id: deff83b39b78 + last_write_checksum: sha1:dac8d8f2e95aed2db9b46711e6e80816881d5d14 + pristine_git_object: 883cbac685563d2e0959b63638f6b967ebdf1ee9 + docs/models/jobsapiroutesfinetuningcancelfinetuningjobresponse.md: + id: c45757ba1ed9 + last_write_checksum: sha1:4931469b58d454264f1e8d32df6a07d3f6f01022 + pristine_git_object: fb62eb62027c8151d597544fcaf27b972aeb78b3 + docs/models/jobsapiroutesfinetuningcreatefinetuningjobresponse.md: + id: 8aa8030f26d7 + last_write_checksum: sha1:4aada0d2297479d8276f5a422cb4dd6b56b1e176 + pristine_git_object: 7b52e2ca6365f17ac3b19d128937783d87c7fa37 + docs/models/jobsapiroutesfinetuninggetfinetuningjobrequest.md: + id: a9b75762e534 + last_write_checksum: sha1:8f1395447928e089c88dce8c0ced1030ec5f0eba + pristine_git_object: fde19800303a901149bf39c5330ef8c4da87df62 + docs/models/jobsapiroutesfinetuninggetfinetuningjobresponse.md: + id: c0b31f4fc621 + last_write_checksum: sha1:4ceb9df28082bf5d496cd222a0f45dc81a576367 + pristine_git_object: f770532776a13860e697da7478d1677d16f0ec36 + docs/models/jobsapiroutesfinetuninggetfinetuningjobsrequest.md: + id: 52078f097503 + last_write_checksum: sha1:fc134fdc7e229b8df373b77096c8299c214171a7 + pristine_git_object: 23c52c342358ea889b25ee7b18b381b68519c6cf + docs/models/jobsapiroutesfinetuninggetfinetuningjobsstatus.md: + id: 8545ffb587d6 + last_write_checksum: sha1:bbc08ca53c2da180b96ed0347cf4954410c79311 + pristine_git_object: 40d57686aec11d9bdc4c116ea4c98183e0a6414c + docs/models/jobsapiroutesfinetuningstartfinetuningjobrequest.md: + id: b4e2b814d8c3 + last_write_checksum: sha1:f13b5c8f2e74cc73b58a30d366032c764603f95e + pristine_git_object: 4429fe480ab9486de98940a119ac63f40045313b + docs/models/jobsapiroutesfinetuningstartfinetuningjobresponse.md: + id: cfd848845787 + last_write_checksum: sha1:a165279fa0c9e051458ea4333dfdd31ef0440426 + pristine_git_object: 1a7e71d4479369f13c391a9782278557bc4531ae + docs/models/jobsapiroutesfinetuningunarchivefinetunedmodelrequest.md: + id: 75b5dd1bcbaa + last_write_checksum: sha1:dd30e7ff8748d26497458f3398c0547113dc058f + pristine_git_object: 95c1734daa7164bedeeb1fa58dd792939f25bc17 + docs/models/jobsapiroutesfinetuningupdatefinetunedmodelrequest.md: + id: 60bd2e28993a + last_write_checksum: sha1:58835c28cccaf90e99bbb72bf7c5a5ce42498824 + pristine_git_object: dbe49a86ca2bf64901133fd58a342d30909c35b2 + docs/models/jobsapiroutesfinetuningupdatefinetunedmodelresponse.md: + id: c265a30fd4cf + last_write_checksum: sha1:410c62a884aae902cdfbfcab33779e62487de13b + pristine_git_object: f40350bf9d74d09ca3a2ec6d91d9068bda631ef5 docs/models/jsonschema.md: id: a6b15ed6fac8 last_write_checksum: sha1:523465666ad3c292252b3fe60f345c7ffb29053f pristine_git_object: 7ff7c070353c58290416aff5b01d1dfc43905269 - docs/models/legacyjobmetadataout.md: - id: b3b8c262f61a - last_write_checksum: sha1:d8c4e7525e2dc2f4d29bfeb6cadc648fab1c62c7 - pristine_git_object: 8a712140fbf3c36f4bd9686e135b70d8688aa9c1 - docs/models/libraryin.md: - id: a08170e6397c - last_write_checksum: sha1:2c996ecf1ae5d9e8df702a79741b72b3571eb6ef - pristine_git_object: d6b119148725627bcf76594c4a24e915399cd8f8 - docs/models/libraryinupdate.md: - id: 6d06b6b21498 - last_write_checksum: sha1:4ec01d7f7e24f58a74613d4847725bfd516b7d7f - pristine_git_object: 4aa169c7669c00fcedc423fbff6f386697360787 - docs/models/libraryout.md: - id: 2e8b6d91ded2 - last_write_checksum: sha1:d71053b44725147265871be445217e3e1a0e5ede - pristine_git_object: ebf46d57de6bad7022a3e8cb8eaf88728bbbe888 - docs/models/listagentaliasesrequest.md: - id: 495659b2d40a - last_write_checksum: sha1:637e7e0e8deadcf2e77cc9469727010f90f0ad79 - pristine_git_object: b3570cb80d484dadaf2a138c70bbb477746ba416 - docs/models/listagentsrequest.md: - id: aeb9bbc163f5 - last_write_checksum: sha1:86c5f5068061b79d2e582e4dd9a8b0ed4c84cbcf - pristine_git_object: 79aec3ea6e3506797fc96a7ca9d7393543270866 - docs/models/listagentversionsrequest.md: - id: 3270f6dd4107 - last_write_checksum: sha1:14ffb20c5c48cca371ed27f6a6a8b565cd4a5565 - pristine_git_object: ba8ddaa5cb4c94623b29a1f635f38a04cc0ff497 - docs/models/listbatchjobsrequest.md: - id: e2a0b1528191 - last_write_checksum: sha1:01a587ec7cc6e183d47e106eb809e7c1e9e79e39 - pristine_git_object: 19981b2425254058bd24b218d1f7881fc3635c89 - docs/models/listconversationsrequest.md: - id: 6c0961051703 - last_write_checksum: sha1:453eb480cd48330f857b4c80210b6753a750348d - pristine_git_object: d99b420834b17f3f5b7fac630af7a7b0d2db341d - docs/models/listconversationsresponse.md: - id: 65075f5cf00c - last_write_checksum: sha1:8478c55b156c09f2b714d2854030a04494b48f7c - pristine_git_object: 9d611c553b245657181c06d7f65acaa9d8128556 - docs/models/listdocumentout.md: - id: 4bec19e96c34 - last_write_checksum: sha1:c0b3a6e3841f120c52b1d7718d7226a52fe1b6d6 - pristine_git_object: f14157b8db55c1201d9f7151742e9ddf0d191c16 - docs/models/listdocumentsrequest.md: - id: 36c8a1116534 - last_write_checksum: sha1:390849ce3d93a64c505b7b2f7cae411766a5e44b - pristine_git_object: 369e8edbe471dd5167ad1baf74ee5b00eb7d5043 - docs/models/listfilesout.md: - id: 98d4c59cc07e - last_write_checksum: sha1:e76df31628984095f1123005009ddc4b59b1c2bc - pristine_git_object: bcb1f13aa17f41dadb6af37541e929364e2d6cec - docs/models/listfilesrequest.md: - id: 70edaf3759f0 - last_write_checksum: sha1:686edbd5134dfe60cfd98221ec78d296a8429d28 - pristine_git_object: 2d76a76b011603e3a7c4b4932ef4b26def1cb792 - docs/models/listfinetuningjobsrequest.md: - id: 41878563fe80 - last_write_checksum: sha1:103cd0d3c5334ea60a6c6e1c2585bf9bd493c78f - pristine_git_object: 3a04fc709c2a12cc4f414701efcaec4584b7d6df - docs/models/listfinetuningjobsstatus.md: - id: 1d6d54dc70ea - last_write_checksum: sha1:c4f69e2b2b5aac719281d264722f2cba6aa048a0 - pristine_git_object: 07db9ae5d87b7192ada4843d4fe0d3e8573794c6 - docs/models/listlibraryaccessesrequest.md: - id: 0b387463f914 - last_write_checksum: sha1:2912e1fc3ee179f01fde7a21501e2501debecc2c - pristine_git_object: d98bcda22bbb2540a525f2ce1516a637446b0a0f - docs/models/listlibraryout.md: - id: ea34f8548bd6 - last_write_checksum: sha1:cec920357bc48bea286c05d16c480a9a9369b459 - pristine_git_object: db76ffa10eb97f143ad4a6930e520e389fe18153 + docs/models/legacyjobmetadata.md: + id: 50ac14d9b270 + last_write_checksum: sha1:ebe37a176ca318e797fee7ebf4eef73fb9938a12 + pristine_git_object: 4705ab4f67e10b8e2cbfc86b29c03a9945aeb8fb + docs/models/librariesdeletev1request.md: + id: c0c3b2e1aabc + last_write_checksum: sha1:bef84f8851b06d2d914b605f11109de1850d0294 + pristine_git_object: 68d7e54369ce75422bf8b0ff16cada1c0ae2b05c + docs/models/librariesdocumentsdeletev1request.md: + id: 9d557bd7d1cc + last_write_checksum: sha1:1b580b657559356886915ee5579b90a03db19337 + pristine_git_object: efccdb1bbc36cf644ed2d1716cbd202e6d6bf6c5 + docs/models/librariesdocumentsgetextractedtextsignedurlv1request.md: + id: 27ad38ce4cb1 + last_write_checksum: sha1:b35ad610330232b395b5f87cc15f6ae270de6816 + pristine_git_object: 14ca66f72693f1df05eb93e0cca45f440b62d282 + docs/models/librariesdocumentsgetsignedurlv1request.md: + id: 4498715b6cfb + last_write_checksum: sha1:31f78079e31e070d080c99555cd2d85318fc4610 + pristine_git_object: 7c08c180d59a8e8475fea89424b8b2021d51385f + docs/models/librariesdocumentsgetstatusv1request.md: + id: c2219d3a3738 + last_write_checksum: sha1:44e79df94cf2686e83d7a2e793140a6a7b3a1c05 + pristine_git_object: e6d41875966348fd9e770d06c8099e48f0e64b5d + docs/models/librariesdocumentsgettextcontentv1request.md: + id: 850dfa465952 + last_write_checksum: sha1:4a1212e111525f4265d2924ce52f9c13d2787d4d + pristine_git_object: 2f58a4460ccdad531391318c62191e76c1ec22ac + docs/models/librariesdocumentsgetv1request.md: + id: cdd0df2f7e9d + last_write_checksum: sha1:36e5ef39552159044ecd28d20ee0792ea5bcadef + pristine_git_object: 6febc058425bb38857c391ee4c40d600858e6058 + docs/models/librariesdocumentslistv1request.md: + id: 7b5756e50d64 + last_write_checksum: sha1:2605b7972a3d7b4f73ab8052be4bf740f44f6f6f + pristine_git_object: 44f6300115853053214639982516a60b3268e778 + docs/models/librariesdocumentsreprocessv1request.md: + id: 1b8bf57b3f0a + last_write_checksum: sha1:8528785c1b4ae18d6ec6f261d29d5daac0d420a3 + pristine_git_object: 196ba17b749ce9efc1c30189864e474896814f85 + docs/models/librariesdocumentsupdatev1request.md: + id: b9147b1c0e38 + last_write_checksum: sha1:ed3ae7761990bd26a4bf99cd4641822eb90d3d57 + pristine_git_object: d46308509330099e30a53dddad51da8a6186aa92 + docs/models/librariesdocumentsuploadv1request.md: + id: 89a89d889c72 + last_write_checksum: sha1:32294a87d8a0b173b4d6f12b607a1bb3da765776 + pristine_git_object: 172a6183f31eec3142a84637414484799c2a4677 + docs/models/librariesgetv1request.md: + id: f47ad71ec7ca + last_write_checksum: sha1:3b2bf1e4f6069d0c954e1ebf95b575a32c4adeac + pristine_git_object: 6e1e04c39c15a85d96710f8d3a8ed11a22412816 + docs/models/librariessharecreatev1request.md: + id: 99e7bb8f7fed + last_write_checksum: sha1:e40d710ad1023768a0574b3283ef35544f6b0088 + pristine_git_object: 4c05241de4ee5a76df335ae9ea71004bd02b8669 + docs/models/librariessharedeletev1request.md: + id: bc8adba83f39 + last_write_checksum: sha1:79fc5a9a3cee5b060f29edd95f00e0fea32579cf + pristine_git_object: 850e22ab79863ba544f453138322c0eb5bf544cd + docs/models/librariessharelistv1request.md: + id: 86e6f08565e2 + last_write_checksum: sha1:6f2ffff66fa5fb141d930bca7bb56e978d62b4a5 + pristine_git_object: 98bf6d17ab013c1dd3f0ab18c37bbfc1a63f1b76 + docs/models/librariesupdatev1request.md: + id: f7e51b528406 + last_write_checksum: sha1:6a33b0161702ecc335dd2859df1bbc05b73702a9 + pristine_git_object: c5c142db7aaa49990135c21eabde43b8c0fdf756 + docs/models/library.md: + id: e8ec114dd107 + last_write_checksum: sha1:a4d6e9a777ce3d63aac24432281933ce6e13b4a9 + pristine_git_object: 4319f43df922b4924a11d494002826cb8d6dea0b + docs/models/listbatchjobsresponse.md: + id: e03025d58630 + last_write_checksum: sha1:de42c9396546fc8487d0bd6ed15b4076599fa83f + pristine_git_object: c23e32201d12a2594f97a493f63b2b7b42b9e337 + docs/models/listdocumentsresponse.md: + id: f2091cee0405 + last_write_checksum: sha1:335d0ccd3a448e65739d5a0cfa2c67614daec031 + pristine_git_object: 47b9d3b73fdc85bf6e463c91790faf346df56664 + docs/models/listfilesresponse.md: + id: b15df90d2d59 + last_write_checksum: sha1:4840f26427acf8846a9f1e48136f0663c6e4cd87 + pristine_git_object: 802f685fb3a76afb86a69cf41e6de9339cd6fbc7 + docs/models/listfinetuningjobsresponse.md: + id: d04e4dfddf78 + last_write_checksum: sha1:cebaf361aa10f1f6c4299c3c8a34f32d301455ad + pristine_git_object: 00251242023e2161747ebf00b4c2959909e2b654 + docs/models/listfinetuningjobsresponsedata.md: + id: 59c80de4086d + last_write_checksum: sha1:5a0d91c251b4b9283895d9f19f7b9416f93d4468 + pristine_git_object: adb0644475841c6a4686e8c42790dd44eed43dc1 + docs/models/listlibrariesresponse.md: + id: 87e3bec10745 + last_write_checksum: sha1:00522e685ec71a54f5f272d66b82e650848eaf36 + pristine_git_object: e21b9ced628f6fd5ae891d4a46666ebc94546859 docs/models/listsharingout.md: id: a3249129f37e last_write_checksum: sha1:4831e4f02e1d5e86f138c7bb6b04d095aa4df30f @@ -1050,68 +1054,40 @@ trackedFiles: pristine_git_object: 76256fb913376a15d5bcd2531b18f1a78b980c9d docs/models/messageinputcontentchunks.md: id: 34aac9c271db - last_write_checksum: sha1:641cd1dba3721f85b049c5ee514879f067483949 - pristine_git_object: 4fd18a0dcb4f6af4a9c3956116f8958dc2fa78d1 + last_write_checksum: sha1:d8ffdfd8b5458497e2cb6a32f52900c3ca2a6ddf + pristine_git_object: 0561785082c741f39f930ab7ded5b6c6a9ade6ad docs/models/messageinputentry.md: id: eb74af2b9341 - last_write_checksum: sha1:07124339ecb87e31df5f0e2f887e23209dd269af - pristine_git_object: 52183a32330b3e0bf91a1bd5e541dfda12d3f1a0 + last_write_checksum: sha1:c91bfdf9426c51236b6ff33d127dbe62b051a9da + pristine_git_object: f8514fb3305dbe1df91db8d622cc33a753b63623 docs/models/messageinputentrycontent.md: id: 7e12c6be6913 last_write_checksum: sha1:6be8be0ebea2b93712ff6273c776ed3c6bc40f9a pristine_git_object: 65e55d97606cf6f3119b7b297074587e88d3d01e - docs/models/messageinputentryobject.md: - id: 9a1d0d31f357 - last_write_checksum: sha1:7746753005fda37834a73e62bf459eacb740ba5b - pristine_git_object: 6bdd62e27d7353dbb7d521ad02bde358496ab108 - docs/models/messageinputentryrole.md: - id: 2497d07a793d - last_write_checksum: sha1:a41eb58f853f25489d8c00f7a9595f443dcca2e6 - pristine_git_object: f2fdc71d8bc818b18209cd1834d4fead4dfd3ba6 - docs/models/messageinputentrytype.md: - id: 5d2a466dad0f - last_write_checksum: sha1:19f689ffdd647f3ddc747daf6cb0b4e811dfdcee - pristine_git_object: d3378124db83c92174e28fe36907263e2cbe6938 docs/models/messageoutputcontentchunks.md: id: 802048198dc0 - last_write_checksum: sha1:d70a638af21ee46126aa0434bf2d66c8dd8e43ff - pristine_git_object: d9c3d50e295b50618f106ef5f6b40929a28164df + last_write_checksum: sha1:8cf4e4ea6b6988e22c117d8f689bbfb0869816ad + pristine_git_object: c4a7777e7675ebf2384311ec82b2713da69e5900 docs/models/messageoutputentry.md: id: f969119c8134 - last_write_checksum: sha1:cf5032929394584a31b3f12f55dfce6f665f71c7 - pristine_git_object: 5b42e20d1b03263f3d4d9f5cefe6c8d49c984e01 + last_write_checksum: sha1:f50b955cd622a6160c0ada34b0e14bff612802b7 + pristine_git_object: 73a1c666acc913b96d65a124612c4a728882bbc9 docs/models/messageoutputentrycontent.md: id: 44019e6e5698 last_write_checksum: sha1:d0cc7a8ebe649614c8763aaadbf03624bb9e47e3 pristine_git_object: 5206e4eb0d95e10b46c91f9f26ae00407d2dd337 - docs/models/messageoutputentryobject.md: - id: b3a7567581df - last_write_checksum: sha1:46528a6f87408c6113d689f2243eddf84bcbc55f - pristine_git_object: bb254c82737007516398287ff7878406866dceeb - docs/models/messageoutputentryrole.md: - id: bf7aafcdddab - last_write_checksum: sha1:e28643b6183866b2759401f7ebf849d4848abb10 - pristine_git_object: 783ee0aae4625f7b6e2ca701ac8fcdddcfe0e412 - docs/models/messageoutputentrytype.md: - id: 960cecf5fde3 - last_write_checksum: sha1:b6e52e971b6eb69582162a7d96979cacff6f5a9c - pristine_git_object: cb4a7a1b15d44a465dbfbd7fe319b8dbc0b62406 docs/models/messageoutputevent.md: id: b690693fa806 - last_write_checksum: sha1:d6538a4b5d5721c09bc196f3e9523ed45dafbea7 - pristine_git_object: b0fa1a2d369c89ec75f43c6b31ff52b0d80d9b1c + last_write_checksum: sha1:a4157c087ff95fa9445757c9d363615718156164 + pristine_git_object: e09a965f7d4cc35d6b120ba5555d96ba7b3e8a27 docs/models/messageoutputeventcontent.md: id: cecea075d823 last_write_checksum: sha1:16dac25382642cf2614e24cb8dcef6538be34914 pristine_git_object: 16d8d52f6ff9f43798a94e96c5219314731ab5fb - docs/models/messageoutputeventrole.md: - id: 87d07815e9be - last_write_checksum: sha1:a6db79edc1bf2d7d0f4762653c8d7860cb86e300 - pristine_git_object: e38c6472e577e0f1686e22dc61d589fdb2928434 - docs/models/metricout.md: - id: 7c6ff0ad95f9 - last_write_checksum: sha1:eef34dc522a351e23d7371c00a07662a0711ea73 - pristine_git_object: 3c552bac2fa3a5a3783db994d47d255a94643110 + docs/models/metric.md: + id: a812a3e37338 + last_write_checksum: sha1:14016848dcfaba90014b482634ed6d5715caa860 + pristine_git_object: 7f86303651650177ece51b82d867cab858e830ae docs/models/mistralpromptmode.md: id: d17d5db4d3b6 last_write_checksum: sha1:abcb7205c5086169c7d9449d15ac142448a7d258 @@ -1122,12 +1098,8 @@ trackedFiles: pristine_git_object: c7dd2710011451c2db15f53ebc659770e786c4ca docs/models/modelconversation.md: id: 497521ee9bd6 - last_write_checksum: sha1:440c9e7c306f20bd4f4b27ab0cf770d3bf8762e2 - pristine_git_object: 813e1f3a79ad14eae55bbb1b96598d6260904d9d - docs/models/modelconversationobject.md: - id: 4c5699d157a9 - last_write_checksum: sha1:8e2e82e1fa4cb97f8c7a8a129b3cc9cd651e4055 - pristine_git_object: ead1fa26f5d9641a198a14b43a0f5689456e5821 + last_write_checksum: sha1:22a8d7502eeaf176fbd1c7b22b512b4f9e4e043f + pristine_git_object: af2e5c6149339a561b03b954cd0e71f9d9aeffd6 docs/models/modelconversationtool.md: id: 2dd28167bc36 last_write_checksum: sha1:9b33f73330e5ae31de877a904954efe342e99c4f @@ -1166,8 +1138,8 @@ trackedFiles: pristine_git_object: 02473d44f73485fd7b7f0031d51bfac835d4036e docs/models/ocrrequest.md: id: 6862a3fc2d0f - last_write_checksum: sha1:9311e2c87f8f4512c35a717d3b063f2861f878d4 - pristine_git_object: 87929e53f8a74823b82ecce56d15f22228134fa6 + last_write_checksum: sha1:2faa819df648d330074c177d8f5d4a9c9a27bc90 + pristine_git_object: dd3fc2ea28cc2bc147473ba9f73aa32a9528632a docs/models/ocrresponse.md: id: 30042328fb78 last_write_checksum: sha1:8e4a4ae404ea752f3e9f1108c2a5f89ed6cfb143 @@ -1190,8 +1162,8 @@ trackedFiles: pristine_git_object: d0ee0db93f56c40f6684fcfdb5873aba586bc876 docs/models/outputcontentchunks.md: id: f7e175c8e002 - last_write_checksum: sha1:5094466110028801726cc825e8809f524fe1ee24 - pristine_git_object: c76bc31d4d8791b7bef4dc6cbff6671b38a7927d + last_write_checksum: sha1:5adb0733a8ca9b224155dfef66dfb37b7f416972 + pristine_git_object: e5185014faa41b6e6d1567d713fc390f551fad01 docs/models/paginationinfo.md: id: 3d2b61cbbf88 last_write_checksum: sha1:1da38e172024fe703f3180ea3c6ec91fe3c51ed0 @@ -1216,10 +1188,22 @@ trackedFiles: id: d25137243bef last_write_checksum: sha1:f8c3a4984d647d64e8ea4e1e42654265ffe46b0f pristine_git_object: da3764ef56337bdc773eaf8e9aa747cbd1b407e2 + docs/models/realtimetranscriptioninputaudioappend.md: + id: fa2aa317d1ca + last_write_checksum: sha1:59cce0828505fdb55104cd3144b75334e0f31050 + pristine_git_object: 5ee365eb9a993933509ac4666bcec24bfcc6fccd + docs/models/realtimetranscriptioninputaudioend.md: + id: 11045f9cc039 + last_write_checksum: sha1:945ca0475826294e13aba409f3ae2c2fc49b1b67 + pristine_git_object: 393d208c6e242959161f4436d53cf4aa2df69a92 + docs/models/realtimetranscriptioninputaudioflush.md: + id: c2f2258e0746 + last_write_checksum: sha1:a4e6d160da44c6f57b01059f7198208702e9b06a + pristine_git_object: 367725baa278935a6a282338ca7f2a23895a86d8 docs/models/realtimetranscriptionsession.md: id: aeb0a0f87d6f - last_write_checksum: sha1:c3aa4050d9cc1b73df8496760f1c723d16183f3a - pristine_git_object: 94a0a89e8ca03866f8b09202a28c4e0f7c3af2e6 + last_write_checksum: sha1:d72bf67442ac5e99f194c429e96a504685f02efb + pristine_git_object: 750bd7f79b65666812c6207d7085b9437c49517d docs/models/realtimetranscriptionsessioncreated.md: id: aa2ae26192d6 last_write_checksum: sha1:d13fec916d05300c86b52e951e81b1ceee230634 @@ -1228,26 +1212,26 @@ trackedFiles: id: 56ce3ae7e208 last_write_checksum: sha1:833db566b2c8a6839b43cb4e760f2af53a2d7f57 pristine_git_object: 7e2719957aae390ee18b699e61fbc7581242942f + docs/models/realtimetranscriptionsessionupdatemessage.md: + id: 02a5eee40cdd + last_write_checksum: sha1:44f8e6bc8f8cd4087a7e86c85db5141fab90f78d + pristine_git_object: 2a50ca92720bad6605bdeafd83b43d0e8bf40615 + docs/models/realtimetranscriptionsessionupdatepayload.md: + id: 3ddd5a95510a + last_write_checksum: sha1:33bca4d547ca812d55ac49bf7b17851b2fecfc80 + pristine_git_object: d6c6547d7895e53be15a0cce46b6524178acc3bc docs/models/referencechunk.md: id: 07895f9debfd - last_write_checksum: sha1:97d01dd2b907e87b58bebd9c950e1bef29747c89 - pristine_git_object: a132ca2fe6fbbaca644491cbc36d88b0c67cc6bc - docs/models/referencechunktype.md: - id: 0944b80ea9c8 - last_write_checksum: sha1:956b270766c7f11fe99f4a9b484cc29c159e7471 - pristine_git_object: 1e0e2fe64883ef5f3e628777b261b1224661d257 - docs/models/reprocessdocumentrequest.md: - id: 3c713aad474b - last_write_checksum: sha1:100b194196051470a2ae75cc2f707afec0c8d161 - pristine_git_object: cf3982a8cd76e4b2c8429acede0a12a044cbe2ca + last_write_checksum: sha1:4384049375a2566c7567599f97ce1ec19e9f6276 + pristine_git_object: d847e24845a399c7ca93d54701832fb65e01b3ab docs/models/requestsource.md: id: 8857ab6025c4 last_write_checksum: sha1:4b7ecc7c5327c74e46e2b98bd6e3814935cdecdf pristine_git_object: c81c115992439350d56c91d2e3351a13df40676b docs/models/response.md: id: 583c991c7a30 - last_write_checksum: sha1:f4a3ec06ff53cd1cbdf892ff7152d39fa1746821 - pristine_git_object: 3512b7a8f9fdfcaaed9a6db06ef4266629d9fa89 + last_write_checksum: sha1:0791cb4aa4045708ab64d42bf67bd6ab74bc7752 + pristine_git_object: ff67925758959b87992b47a1a32c224eeeb599e3 docs/models/responsedoneevent.md: id: 38c38c3c065b last_write_checksum: sha1:4ac3a0fd91d5ebaccce7f4098ae416b56e08416f @@ -1276,26 +1260,14 @@ trackedFiles: id: 48d4a45780a9 last_write_checksum: sha1:8e75db359f0d640a27498d20c2ea6d561c318d7e pristine_git_object: 844c5d610a9a351532d12b1a73f6c660059da76b - docs/models/restartconversationrequest.md: - id: b85b069aa827 - last_write_checksum: sha1:b7fb56a5561ab329f605d77795a610da8faaf561 - pristine_git_object: f24f14e67e749da884363038ca72891449cd99da - docs/models/restartconversationstreamrequest.md: - id: 65df276279f0 - last_write_checksum: sha1:907807c7e5969f82e70e743fddeb4c6f4278fc1a - pristine_git_object: daa661a9250701ad33241084d5033f73d75a9d6e - docs/models/retrievefileout.md: - id: 8e82ae08d9b5 - last_write_checksum: sha1:600d5ea4f75dab07fb1139112962affcf633a6c9 - pristine_git_object: 28f97dd25718833aaa42c361337e5e60488bcdc8 - docs/models/retrievefilerequest.md: - id: eac92ea7ca45 - last_write_checksum: sha1:c80772e3cfbe704385abe1b347d8e69d55bd9e00 - pristine_git_object: 454b9665b8134876488eb32c57a9dc45f4d972de - docs/models/retrievemodelrequest.md: - id: 392008b3324b - last_write_checksum: sha1:b9aafe10f0cd838a0b6959ec8dde5850ce59c55d - pristine_git_object: 787c3dd1000cba873c787fd5b9dcbe3c793f2b11 + docs/models/retrievemodelv1modelsmodelidgetrequest.md: + id: ac567924689c + last_write_checksum: sha1:7534c5ec5f1ae1e750c8f610f81f2106587e81a9 + pristine_git_object: f1280f8862e9d3212a5cfccd9453884b4055710a + docs/models/role.md: + id: b694540a5b1e + last_write_checksum: sha1:c7ef39a81299f3156b701420ef634a8b4fab76f0 + pristine_git_object: 853c6257d9bdb4eda9cb37e677d35ab477dca812 docs/models/sampletype.md: id: 0e09775cd9d3 last_write_checksum: sha1:33cef5c5b097ab7a9cd6232fe3f7bca65cd1187a @@ -1328,18 +1300,10 @@ trackedFiles: id: 6a902241137c last_write_checksum: sha1:567027284c7572c0fa24132cd119e956386ff9d0 pristine_git_object: ae06b5e870d31b10f17224c99af1628a7252bbc3 - docs/models/startfinetuningjobrequest.md: - id: 48fd313ae362 - last_write_checksum: sha1:f645c1e3e3244729eaa31aabb4b3ec0454fb114f - pristine_git_object: 9df5aee8f527fea4f0c9b02a28af77a65765be48 - docs/models/startfinetuningjobresponse.md: - id: 970045c710ff - last_write_checksum: sha1:78d230946abe19e928f286562ac589c7672c9854 - pristine_git_object: dce84c5a7711cd655a624b6ba0540504a6ff75d7 docs/models/systemmessage.md: id: fdb7963e1cdf - last_write_checksum: sha1:561c3372391e093c890f477b3213c308ead50b81 - pristine_git_object: dfb0cd0bd17aecbc1fe4b8410e78440f65038fef + last_write_checksum: sha1:c7603c5ce77ba2bcbda9eff65eeafdb1e9ecbec7 + pristine_git_object: 10bda10f921fb5d66c1606ff18e654b4e78ab197 docs/models/systemmessagecontent.md: id: 94a56febaeda last_write_checksum: sha1:6cb10b4b860b4204df57a29c650c85c826395aeb @@ -1354,24 +1318,16 @@ trackedFiles: pristine_git_object: 54f029b814fdcfa2e93e2b8b0594ef9e4eab792a docs/models/textchunk.md: id: 6cd12e0ef110 - last_write_checksum: sha1:f04818ca76e68b3d3684927e4032d5d7de882f6a - pristine_git_object: d488cb51abeb4913c8441d9fbe9e5b964099bb7e - docs/models/textchunktype.md: - id: 886e88ebde41 - last_write_checksum: sha1:ba8db2a3910d1c8af424930c01ecc44889335bd3 - pristine_git_object: e2a2ae8bcdf8a35ad580a7de6271a5d26cd19504 + last_write_checksum: sha1:d9fe94c670c5e0578212752c11a0c405a9da8518 + pristine_git_object: df0e61c32bc93ef17dbba50d026edace139fee6a docs/models/thinkchunk.md: id: bca24d7153f6 - last_write_checksum: sha1:feb95a931bb9cdbfe28ab351618687e513cf830b - pristine_git_object: 66b2e0cde70e25e2927180d2e709503401fddeab - docs/models/thinkchunktype.md: - id: 0fbeed985341 - last_write_checksum: sha1:790f991f95c86c26a6abb9c9c5debda8b53526f5 - pristine_git_object: baf6f755252d027295be082b53ecf80555039414 - docs/models/thinking.md: - id: 07234f8dd364 - last_write_checksum: sha1:a5962d1615b57996730da19e59fbfaa684321442 - pristine_git_object: c7a0d5c9811ea37aaf9e16b6e93c833ab979573f + last_write_checksum: sha1:0f861f1653035dea2018be9a977c15f54add9531 + pristine_git_object: 70c0369f16465e1b1f5f46e8cd799e5db536cdde + docs/models/thinkchunkthinking.md: + id: 22de7b5060fb + last_write_checksum: sha1:5e0722b8d513b38d60fbfe28635bdea40b951593 + pristine_git_object: dd1ecca12b5cda76a51b1e13335f1757a9dd7a68 docs/models/timestampgranularity.md: id: eb4d5a8e6f08 last_write_checksum: sha1:e256a5e8c6010d500841295b89d88d0eface3b88 @@ -1384,6 +1340,10 @@ trackedFiles: id: 80892ea1a051 last_write_checksum: sha1:cb27b9d36cfe6227978c7a7a01b1349b6bac99d9 pristine_git_object: 3819236b9f3eee2f6878818cfbbe2817e97f7de2 + docs/models/toolcallconfirmation.md: + id: 944eebb142ff + last_write_checksum: sha1:864ccb39a00094d965b764235e74709945abca3d + pristine_git_object: 1812f7d687d83f5692d9e79709e56813ab2c79b1 docs/models/toolchoice.md: id: "097076343426" last_write_checksum: sha1:25b33b34da02c3b46349dc8b6223f9ae18370d16 @@ -1392,6 +1352,10 @@ trackedFiles: id: 15410de51ffc last_write_checksum: sha1:ca0cf9bf128bebc8faedd9333cc6a56b30f58130 pristine_git_object: 0be3d6c54b13a8bf30773398a2c12e0d30d3ae58 + docs/models/toolconfiguration.md: + id: 06bfa2c4e662 + last_write_checksum: sha1:9b619977375f228c76f09d48d6e2833add6c07e2 + pristine_git_object: 89286a172124ce3473bcb081de6e4db8c95afefa docs/models/toolexecutiondeltaevent.md: id: f2fc876ef7c6 last_write_checksum: sha1:ae1462a9b5cb56002b41f477ce262cb64ccf2f4e @@ -1410,60 +1374,44 @@ trackedFiles: pristine_git_object: 6449079d7b467796355e3353f4245046cced17e8 docs/models/toolexecutionentry.md: id: 75a7560ab96e - last_write_checksum: sha1:fdaa9abd5417486100ffc7059fcfdc8532935ed3 - pristine_git_object: adf88fb1acec13bf8016eb42d6bdc5fd3bd279b5 + last_write_checksum: sha1:668d8fbc59bc729bf4b1d95d2f2bfe4097701c0e + pristine_git_object: 03316381b130cf02751b10fef4129c8f23072b76 docs/models/toolexecutionentryname.md: id: 86d537762559 last_write_checksum: sha1:6c528cdfbb3f2f7dc41d11f57c86676f689b8845 pristine_git_object: fb762a5382d8b0e93dc2eb277f18adf810057c55 - docs/models/toolexecutionentryobject.md: - id: af106f91001f - last_write_checksum: sha1:6df075bee4e84edf9b57fcf62f27b22a4e7700f4 - pristine_git_object: 0ca79af56d60094099c8830f638a748a92a40f21 - docs/models/toolexecutionentrytype.md: - id: b61e79a59610 - last_write_checksum: sha1:b0485bae901e14117f76b8e16fe80023a0913787 - pristine_git_object: a67629b8bdefe59d188969a2b78fa409ffeedb2a docs/models/toolexecutionstartedevent.md: id: 37657383654d - last_write_checksum: sha1:47126a25c2a93583038ff877b85fc9ae1dcef9f3 - pristine_git_object: c41c7258779f15f1f0436ad890f4947d780bfa75 + last_write_checksum: sha1:5a020d24bdeb4eb9976ce93a8daa91947026bde9 + pristine_git_object: 189b8a3d3b22d73000850a3f1a95b85e358c2090 docs/models/toolexecutionstartedeventname.md: id: be6b33417678 last_write_checksum: sha1:f8857baa02607b0a0da8d96d130f1cb765e3d364 pristine_git_object: 3308c483bab521f7fa987a62ebd0ad9cec562c3a docs/models/toolfilechunk.md: id: 67347e2bef90 - last_write_checksum: sha1:0a499d354a4758cd8cf06b0035bca105ed29a01b - pristine_git_object: a3ffaa2b8339ae3a090a6a033b022db61a75125b + last_write_checksum: sha1:2e4c6ce703733c02e62467507c231033716fdb92 + pristine_git_object: d60021755729f1a2870e24a500b3220c8f1fc6e3 docs/models/toolfilechunktool.md: id: eafe1cfd7437 last_write_checksum: sha1:73a31dbff0851612f1e03d8fac3dbbee77af2df0 pristine_git_object: aa5ac8a99a33d8c511f3d08de93e693bf75fb2a1 - docs/models/toolfilechunktype.md: - id: f895006e53e4 - last_write_checksum: sha1:258a55eef5646f4bf20a150ee0c48780bdddcd19 - pristine_git_object: 7e99acefff265f616b576a90a5f0484add92bffb docs/models/toolmessage.md: id: 0553747c37a1 - last_write_checksum: sha1:f35fa287b94d2c1a9de46c2c479dadd5dca7144d - pristine_git_object: fa00d666d6d2baea0aac10fcdeff449eb73c9d39 + last_write_checksum: sha1:ac61e644ba7c6da607cb479eafd1db78d8e8012e + pristine_git_object: 7201481e61e269b238887deec30c03f7e16c53d7 docs/models/toolmessagecontent.md: id: f0522d2d3c93 last_write_checksum: sha1:783769c0200baa1b6751327aa3e009fa83da72ee pristine_git_object: 5c76091fbd2c8e0d768921fab19c7b761df73411 docs/models/toolreferencechunk.md: id: 10414b39b7b3 - last_write_checksum: sha1:2e24f2331bb19de7d68d0e580b099c03f5207199 - pristine_git_object: 3020dbc96563e2d36941b17b0945ab1e926948f4 + last_write_checksum: sha1:ea3bdfc83177c6b7183ad51fddb2d15aee0f0729 + pristine_git_object: 49ea4ca7b05e5fcaaf914f781e3a28483199d82d docs/models/toolreferencechunktool.md: id: c2210d74792a last_write_checksum: sha1:368add3ac6df876bc85bb4968de840ac578ae623 pristine_git_object: 999f7c34885015a687c4213d067b144f1585c946 - docs/models/toolreferencechunktype.md: - id: 42a4cae4fd96 - last_write_checksum: sha1:43620d9529a1ccb2fac975fbe2e6fcaa62b5baa5 - pristine_git_object: bc57d277a39eef3c112c08ffc31a91f5c075c5a4 docs/models/tooltypes.md: id: adb50fe63ea2 last_write_checksum: sha1:f224c3d8732450b9c969b3e04027b7df7892694c @@ -1478,12 +1426,8 @@ trackedFiles: pristine_git_object: 1bc0189c5d1833c946a71c9773346e21b08d2404 docs/models/transcriptionsegmentchunk.md: id: f09db8b2273e - last_write_checksum: sha1:5387f2595d14f34b8af6182c34efac4874a98308 - pristine_git_object: 00a599ee8442f45ce4f529da18ad3e9486b12f9f - docs/models/transcriptionsegmentchunktype.md: - id: 01bda77a53f8 - last_write_checksum: sha1:63d511c2bd93bd477f1b7aae52954b28838316d9 - pristine_git_object: 2968fa26a2dd390b66974e6db57317616fb3b832 + last_write_checksum: sha1:d4a7ebd6a8cc512a0bd00a49af4130c533254b44 + pristine_git_object: d7672c0eebb55243965306c94a771aa18ed641d6 docs/models/transcriptionstreamdone.md: id: 2253923d93cf last_write_checksum: sha1:2a1910d59be258af8dd733b8911e5a0431fab5a4 @@ -1506,68 +1450,44 @@ trackedFiles: pristine_git_object: 63fcfbc63a65cdff4228601e8a46f9d003ec9210 docs/models/transcriptionstreamsegmentdelta.md: id: f59c3fb696f2 - last_write_checksum: sha1:4a031b76315f66c3d414a7dd5f34ae1b5c239b2e - pristine_git_object: e0143a39fb12a4a3efce3e1b250730d20cf21c7d + last_write_checksum: sha1:7d6999abf5a01fc01c0d5302acd3218e535adc9a + pristine_git_object: 1b652a3b6dc4406a3b7efa8a412b15ca0a5d765f docs/models/transcriptionstreamtextdelta.md: id: 69a13554b554 - last_write_checksum: sha1:de31f5585d671f85e6a9b8f04938cf71000ae3f7 - pristine_git_object: a4062171d7630bcea967a89d8df6cffd4908285f - docs/models/unarchiveftmodelout.md: - id: 4f2a771b328a - last_write_checksum: sha1:0b9ab5d6c7c1285712127cfac9e918525303a441 - pristine_git_object: 12c3d74534897129766397a44afee0f4dac91d9f - docs/models/unarchivemodelrequest.md: - id: e6922871c93a - last_write_checksum: sha1:591461141df5089e884a2db13bfaaef1def0748c - pristine_git_object: 033dad8a66969e2b920ec40391c38daa658c6f0e + last_write_checksum: sha1:d969f462034ed356f2c8713b601ee7d873d4ce07 + pristine_git_object: 77bd0ddcf8a1d95707fa9e041de3a47bb9e7f56d + docs/models/unarchivemodelresponse.md: + id: a690f43df567 + last_write_checksum: sha1:5c9d4b78c92d30bb4835cb724d1ea22a19bf5327 + pristine_git_object: 375962a7110f814288ea9f72323383bd8194e843 docs/models/updateagentrequest.md: id: 371bfedd9f89 - last_write_checksum: sha1:f9ebaa4650f77595fd554bb2711d4b869cba06cc - pristine_git_object: 358cb71d2ab7dfae85ac7768936910a976d2f644 - docs/models/updateagentversionrequest.md: - id: 706f66fb34eb - last_write_checksum: sha1:913a8105b77620d32147a00c1223ce5a117d2df2 - pristine_git_object: b83eb867a518d757b23d981c962f87a0e9c8a454 + last_write_checksum: sha1:97170995ed40391023f0dce5096cfebe83fa7dc8 + pristine_git_object: d3428d92a8f23670a6b587a6017a353d2c12a815 + docs/models/updateagentrequesttool.md: + id: bdf961d2c886 + last_write_checksum: sha1:5355f8c97b2aef98aebff251e1f4830ddbaa7881 + pristine_git_object: e358b1edb9035667104700dde890bb0b43074543 docs/models/updatedocumentrequest.md: id: ee4e094a6aa7 - last_write_checksum: sha1:4798ef091b5d045b0cda3d2a3cc40aef0fb3155c - pristine_git_object: fa5d117a4016208d81ad53f24daa4284b35152f8 - docs/models/updateftmodelin.md: - id: 1b98d220f114 - last_write_checksum: sha1:d1c7a8f5b32228d8e93ad4455fccda51b802f08f - pristine_git_object: 4e55b1a7d96e1ad5c1e65c6f54484b24cd05fcfc + last_write_checksum: sha1:4c4d774c67449402eb7e1476b9d0fef5b63f2b99 + pristine_git_object: 7e0b41b7be9f559b27a3430f46ed53d0453f6e03 docs/models/updatelibraryrequest.md: id: 2eda82f12f31 - last_write_checksum: sha1:cc1ca5b6f9bd4ab61e3983991f5656ff5ea22e8d - pristine_git_object: e03883cca75f3ed17fa3432e0abc2c892ec3d74a + last_write_checksum: sha1:436e08988daa8ca04ece36a4790ed84e0629b81a + pristine_git_object: aaffc5a9f0d588ff935db2ec2c079af9f162c2c3 docs/models/updatemodelrequest.md: id: 8eabdced3e0e - last_write_checksum: sha1:28765fe537adb34e5e2ef051cd1226bdcae8ea9f - pristine_git_object: 5799c63babcd9377c5024f584328c814c4401c04 - docs/models/updatemodelresponse.md: - id: 742d796d5be3 - last_write_checksum: sha1:2e09ab747fa3247486b25057e887baf0859c3a5b - pristine_git_object: 275ee77f111b926d681a446af9741001a1c88fa8 - docs/models/updateorcreatelibraryaccessrequest.md: - id: c95e6b3df38f - last_write_checksum: sha1:f957324978f18d9831dafe4d1a5d78f755f51ed6 - pristine_git_object: e04567b40d62e0d705096eedaba9fa84913f584d - docs/models/uploaddocumentrequest.md: - id: a211b5f814e4 - last_write_checksum: sha1:ce851cd52da0250c8d86f1346778edb0b5c97a50 - pristine_git_object: 92152b7f247ae4d7f8373e8b13ce947b7ca2cae7 - docs/models/uploadfileout.md: - id: c991d0bfc54c - last_write_checksum: sha1:ce5af8ffadb8443a6d1ca5fbbc014de42da35b9d - pristine_git_object: 6f09c9a6920f373c730fa3538b0c2953d757c257 + last_write_checksum: sha1:96879df11c005b591f2e59975897feff8fc8656e + pristine_git_object: 56b84c59c48ac135345394235c71ce77d384e33e docs/models/usageinfo.md: id: ec6fe65028a9 last_write_checksum: sha1:cf71fb9676d870eba7c4d10a69636e1db4054adc pristine_git_object: f5204ac94a4d6191839031c66c5a9bc0124a1f35 docs/models/usermessage.md: id: ed66d7a0f80b - last_write_checksum: sha1:627f88dbb89e226a7d92564658c23a0e8d71342a - pristine_git_object: 78ed066eed9f0638edc6db697eaeaad6f32b4770 + last_write_checksum: sha1:f0ed7d9cb7264f1d9e4a9190772df3f15e25346c + pristine_git_object: e7a932ed71496fa7cc358388c650d25f166f27a4 docs/models/usermessagecontent.md: id: 52c072c851e8 last_write_checksum: sha1:1de02bcf7082768ebe1bb912fdbebbec5a577b5a @@ -1584,82 +1504,82 @@ trackedFiles: id: ba1f7fe1b1a3 last_write_checksum: sha1:ef35648cec304e58ccd804eafaebe9547d78ddcf pristine_git_object: c73952d9e79ea8e08bc1c17817e74e3650def956 - docs/models/wandbintegrationout.md: - id: c1a0f85273d8 - last_write_checksum: sha1:ce7ffc6cc34931b4f6d2b051ff63e1ca39e13882 - pristine_git_object: a6f65667a6bcfb18b78f8f766ab71de84ca13ca7 + docs/models/wandbintegrationresult.md: + id: 729c2601b338 + last_write_checksum: sha1:49f442907815de4661a85a3521803d80b953a17e + pristine_git_object: d12bc31191ba534a9744d78f657c19e7f93f777a docs/models/websearchpremiumtool.md: id: 267988aa8c3f - last_write_checksum: sha1:f9b761d727cbe0c60a2d0800b0a93929c5c3f5e7 - pristine_git_object: 07b8b9265e01bd28b1c30fbc3f1283285e7d6edd + last_write_checksum: sha1:38f80a43f73a13ddedc7730f853c092a48b665f9 + pristine_git_object: 78b736cd314617caa0d77f3c42015212e37ab539 docs/models/websearchtool.md: id: fc4df52fb9b5 - last_write_checksum: sha1:047fd9f950d5a86cf42a8f3ac40f754b395e39ec - pristine_git_object: da5e7b7b600fa3fd0799e95e7a0f9507cd8456c3 + last_write_checksum: sha1:72636dc7ae74264bb5158d284ef6f83da5290b27 + pristine_git_object: 4ca7333c412ad819e3e02c61debe402e3f9b0af9 docs/sdks/accesses/README.md: id: 2ea167c2eff2 - last_write_checksum: sha1:200d509484a1a27fec893e15c39043a9deb140da - pristine_git_object: c1e3866d1a37e1596fa61538317eb68907cbaf57 + last_write_checksum: sha1:279d3b3a4f625b89b25e9a2a47886ac6008b3ca0 + pristine_git_object: c50456df9ea2bb71f78a83ad28f90e089d2e2cd7 docs/sdks/agents/README.md: id: 5965d8232fd8 - last_write_checksum: sha1:a655952f426d5459fa958fa5551507e4fb3f29a8 - pristine_git_object: cd3ec4c6c87f34c4d3634bf510534dff163d97de + last_write_checksum: sha1:a73ae6719acef32b47be55ea5c5684e91f7eda68 + pristine_git_object: 8a60837030b9e5dd0adca0d07d9f0266158b080f docs/sdks/batchjobs/README.md: id: a3b8043c6336 - last_write_checksum: sha1:eca07f3c47acbe42264d31fba982a49005a8c983 - pristine_git_object: 24316d78b1be51649d186db1479bbf74f00f87e6 + last_write_checksum: sha1:b4b3123ff210545048e2b0c729f2b7e5f7460f4e + pristine_git_object: 3633fe4ee136c1ac90f9446425f62a0d68fa4f90 docs/sdks/betaagents/README.md: id: 5df79b1612d8 - last_write_checksum: sha1:f2dbb543e7bd1db239ee801c55fa1f7f92ca6322 - pristine_git_object: 0ef655a348d7381aa0a7869a022b362d90497197 + last_write_checksum: sha1:9ec1c7a967bc653fe175a7986ddec74d5feb0714 + pristine_git_object: aaa5110e6db30f5450877b67d70d46e53b98996b docs/sdks/chat/README.md: id: 393193527c2c - last_write_checksum: sha1:908e67969e8f17bbcbe3697de4233d9e1dd81a65 - pristine_git_object: 6907c29d26b51fa7748b339cc73fd3d6d11a95a5 + last_write_checksum: sha1:5e7a43def5636140d70a7c781ed417e527ce9819 + pristine_git_object: 1bf4aeadc762f5d696c278eefaa759f35993e9d5 docs/sdks/classifiers/README.md: id: 74eb09b8d620 - last_write_checksum: sha1:f9cc75dbb32ea9780a9d7340e524b7f16dc18070 - pristine_git_object: 41b520812ac8a6031c0ab32aa771e9903fa24a97 + last_write_checksum: sha1:9f11740f8cf1a3af44fff15b63916305f1882505 + pristine_git_object: dc0f4984380b5b137266421e87a1505af5260e89 docs/sdks/conversations/README.md: id: e22a9d2c5424 - last_write_checksum: sha1:55b150757576819887075feac484ba76ae8abd59 - pristine_git_object: c0089f12b040f3686a584f1569ed4e0ab56c52fb + last_write_checksum: sha1:4c5f8ea93d560956cb23c26e0d5f6d7cbc129e07 + pristine_git_object: e77d329b735dc21f620470bcf82220a79bc34e18 docs/sdks/documents/README.md: id: 9758e88a0a9d - last_write_checksum: sha1:55280d8863200affd25a98d7493a0110c14baad3 - pristine_git_object: 97831f86223c6dbbaec35a240725a8c72e229961 + last_write_checksum: sha1:ac7ab2598066971e8b371a3e73aa266ec697df1b + pristine_git_object: 9c219b6709d5d5bfa28113efca92012e8c5a5112 docs/sdks/embeddings/README.md: id: 15b5b04486c1 - last_write_checksum: sha1:46e57c7808ce9c24dd54c3562379d2ff3e0526e8 - pristine_git_object: 0be7ea6dcace678d12d7e7e4f8e88daf7570df5d + last_write_checksum: sha1:76cb4876eebccfd2ab9a10a1b25570477a96a5c1 + pristine_git_object: eecb5c9e991dcd2fd5c1f0688efe3b64b4c6de3b docs/sdks/files/README.md: id: e576d7a117f0 - last_write_checksum: sha1:92558cd6688432150cc433391e2b77a328fa3939 - pristine_git_object: ae29b7bf9383f534b2ca194ec5ff261ff17b5fb6 + last_write_checksum: sha1:f5861c42227b901742fd8afe7155ed6d634b1b4c + pristine_git_object: 9507326be83eaf750daa12c0b1421d819b72340d docs/sdks/fim/README.md: id: 499b227bf6ca - last_write_checksum: sha1:34ff7167b0597bf668ef75ede016cb8884372d1b - pristine_git_object: 3c8c59c79db12c916577d6c064ddb16a511513fd + last_write_checksum: sha1:5b2ce811df8d867d14fe0126f2c9619cca779f56 + pristine_git_object: 49151bf5be49ce6554679bc5c30906894a290ecb docs/sdks/finetuningjobs/README.md: id: 03d609f6ebdd - last_write_checksum: sha1:206624c621a25836333f4c439e0247beb24a7492 - pristine_git_object: fe18feeb640804d9308e6fefe9b5f2371d125f9b + last_write_checksum: sha1:2d7ff255c1462d5f1dff617a1993e730ec3911ea + pristine_git_object: 4262b3a9833180ce86da43a26ee7ab27403f2cd0 docs/sdks/libraries/README.md: id: df9a982905a3 - last_write_checksum: sha1:1c623647aa7b834a844e343c9e3fe0763c8445a5 - pristine_git_object: 8835d0ec8cbabcb8ab47b39df982a775342c3986 + last_write_checksum: sha1:e3eb0e9efb3f758fdf830aa1752c942d59a4f72b + pristine_git_object: 7df1ef4e26449af572412f052ee7ad189039544f docs/sdks/models/README.md: id: b35bdf4bc7ed - last_write_checksum: sha1:2410579fd554ad1e5734cc313d0a75eeb04a1d14 - pristine_git_object: 0cbf1bdde52d1a52c1329ecd1116718237be5152 + last_write_checksum: sha1:2aa91ffe637c049aed0d63d24ac39688b6ecb270 + pristine_git_object: 311a2db6e213902ac5a2c27acf19f856dae2c264 docs/sdks/ocr/README.md: id: 545e35d2613e - last_write_checksum: sha1:a8d22a86b79a0166ecec26a3e9379fa110d49b73 - pristine_git_object: 9fd9d6fc14c5874dbb819239ea677a171a26969b + last_write_checksum: sha1:da377d75b6b7480c335d7f721bb06fe11492be38 + pristine_git_object: fde2a82339e10c74aca6d1b4168b62501d7bbf83 docs/sdks/transcriptions/README.md: id: 089cf94ecf47 - last_write_checksum: sha1:493070fcce7cec1a627b04daa31c38a6745659e7 - pristine_git_object: 9691b81d3a7eb27d7b2b489408d32513859646c9 + last_write_checksum: sha1:15d118796f147bc5b0bf4146ba39bfa9edfbc996 + pristine_git_object: 97703c9b4dc942385ee04ae96cbd100c3f632a17 py.typed: id: 258c3ed47ae4 last_write_checksum: sha1:8efc425ffe830805ffcc0f3055871bdcdc542c60 @@ -1670,8 +1590,8 @@ trackedFiles: pristine_git_object: c35748f360329c2bc370e9b189f49b1a360b2c48 src/mistralai/client/__init__.py: id: f1b791f9d2a5 - last_write_checksum: sha1:fcca936cb62cc76d57372d5bd5735877b79b53a4 - pristine_git_object: 481fc91604c413966c8510d8341edaa3355fc276 + last_write_checksum: sha1:c05dc9845d3361c4aae7796b079ac0e7952e8606 + pristine_git_object: 4b79610a3fc8222fc8f9adeeaf798e894708fc06 src/mistralai/client/_hooks/__init__.py: id: cef9ff97efd7 last_write_checksum: sha1:9a6f060871150610f890cc97676c3afe9050b523 @@ -1686,156 +1606,236 @@ trackedFiles: pristine_git_object: 036d44b8cfc51599873bd5c401a6aed30450536c src/mistralai/client/_version.py: id: cc807b30de19 - last_write_checksum: sha1:dd6d1521f7ecfc56be58eafc1709873a04d27fb0 - pristine_git_object: 814d9ec74a37ae50f106ea07b3c174e65685521b + last_write_checksum: sha1:03563b818feb27386f7d6a0321a3875e3024a2d2 + pristine_git_object: 1a4d15d66f45d13c7f9cae550138390b5cf5897e src/mistralai/client/accesses.py: id: 76fc53bfcf59 - last_write_checksum: sha1:16574ca54176ec30b236ab1a4694f57a6314db43 - pristine_git_object: cda484c8feade66829dad587f5f397aa89d4fb6f + last_write_checksum: sha1:ed94623aa8a2bd502572a699a2f54c9281ec283e + pristine_git_object: 0761b0bc6080ab0d891be70089a1908d435559fa src/mistralai/client/agents.py: id: e946546e3eaa - last_write_checksum: sha1:3b46ac68d37563a9eb988ad2978083e40cf4513d - pristine_git_object: 0942cb20173f0b2e3f828f5857e3aa221f65bc1b + last_write_checksum: sha1:7049cab7c308888c88b0341fb29f0132e154e3cb + pristine_git_object: 2b70d1520663d999773159d89b1f9dc96f7fbf97 src/mistralai/client/audio.py: id: 7a8ed2e90d61 last_write_checksum: sha1:e202d775d24c0303053e0548af83fcb04e2748f4 pristine_git_object: f68f063c08a099d07904456daa76d8e2d2ecdbe6 src/mistralai/client/basesdk.py: id: 7518c67b81ea - last_write_checksum: sha1:795253524d0911d227b934978bdacb84619177a3 - pristine_git_object: 611b40597b42ac309871681b38a3b3c249cbe494 + last_write_checksum: sha1:2cea76931db51175b2c787d0c707f08e9944c22f + pristine_git_object: a976121bd224d64497e5006cb58dd728f6a67144 src/mistralai/client/batch.py: id: cffe114c7ac7 last_write_checksum: sha1:b452983f67b33f26e1faa60fdbbb171cb1877224 pristine_git_object: 7e36fd0d73ebeb873f74f4109896a6cf3bb7d2ba src/mistralai/client/batch_jobs.py: id: 3423fec25840 - last_write_checksum: sha1:eb1baade19f5da3dd815ebfbabccca139eb7b25d - pristine_git_object: 752c76524a4fa19ed1654943218ca5182d563ca3 + last_write_checksum: sha1:34de0e986e7c0e4377f70125d319e522280c565f + pristine_git_object: 0e135b30cd122d1a813ee67bf2f9037953448e73 src/mistralai/client/beta.py: id: 981417f45147 last_write_checksum: sha1:85f42fc6c2318eef94c90405b985120220c9c617 pristine_git_object: 65b761d18f7274cc33162a83efa5b33211f78952 src/mistralai/client/beta_agents.py: id: b64ad29b7174 - last_write_checksum: sha1:227c2ef3812c06e4a813063bf9d2282ce0884ecd - pristine_git_object: 4e692f17579635d5f0cc03f86b8158b3344ae87f + last_write_checksum: sha1:7c900a6b1483108a367050440667c069b08fbb92 + pristine_git_object: 157c5de4c66273e6df468f8a12b4399f9efb32fb src/mistralai/client/chat.py: id: 7eba0f088d47 - last_write_checksum: sha1:6f052ac3117829b16906a4e1cbfa5b1f7ab104fd - pristine_git_object: 35698d32ac870f4b59c03f02700f20c04b14462d + last_write_checksum: sha1:520b0da011d63c60bd0d3a960a410a8f4a6a3e22 + pristine_git_object: 13b9c01f035c4fd6f60b78f20a1801bedf3b582b src/mistralai/client/classifiers.py: id: 26e773725732 - last_write_checksum: sha1:abd5033ee390fdeddfa4af918cc44f6210a2a6a0 - pristine_git_object: 3407c4b77db429535465f29754a2da8145d6a5fe + last_write_checksum: sha1:ee94a4e50cda893f9c19c2304adda8b23fc2de9e + pristine_git_object: 67199b601e38dff6fc6a4317eb845fbde6c25de0 src/mistralai/client/conversations.py: id: 40692a878064 - last_write_checksum: sha1:6e81283d3d5db5dd554af68d69313951cf5f4578 - pristine_git_object: 646b91f3980bbe9be01078162d5b4ad9afb141b9 + last_write_checksum: sha1:1101b9e374010ba9cb080c30789672cfcfc45c55 + pristine_git_object: ec33b1fb12d1923ef5f686ed09c5fe5ae889e40c src/mistralai/client/documents.py: id: bcc17286c31c - last_write_checksum: sha1:9ae89ef80a636b55ba4cdc3ad6c77c47c1824433 - pristine_git_object: c78f2944edaac77864ff6c4dd8d19d3aab3f0cb6 + last_write_checksum: sha1:37669f51eba1b352a5e3c7f3a17d79c27c7ea772 + pristine_git_object: b3130364c0f3cc90ed1e4407a070bd99e3cce606 src/mistralai/client/embeddings.py: id: f9c17258207e - last_write_checksum: sha1:7cd6d848ed8978637988d9b7e1a7dd92dac5eb3b - pristine_git_object: 4a056baa014217927412e9dd60479c28de899e2e + last_write_checksum: sha1:0fbf92b59fde3199c770a522ead030f8fa65ff5c + pristine_git_object: 5f9d3b9cb611943e509caeda9ddd175e3baee2c3 + src/mistralai/client/errors/__init__.py: + id: 0b2db51246df + last_write_checksum: sha1:0befddc505c9c47388683126750c7ad0e3fbef52 + pristine_git_object: 58a591a1cc2896f26df2075ffca378ca6c982d1e + src/mistralai/client/errors/httpvalidationerror.py: + id: ac3de4a52bb6 + last_write_checksum: sha1:73251adb99a07d11b56d0bc0399a2362ff9ccdba + pristine_git_object: 97b165629c39ab6e24406eb3f13970414b73f8f7 + src/mistralai/client/errors/mistralerror.py: + id: d1f57f0ff1e9 + last_write_checksum: sha1:30065cdd7003ec02cb3463d7c63229c4ff97503c + pristine_git_object: eb73040c5b5251018695204fde80eac914b35dae + src/mistralai/client/errors/no_response_error.py: + id: 8b469ecb0906 + last_write_checksum: sha1:0b3fdb1136472c41a4a739a5cbf9e2a4ce0c63a4 + pristine_git_object: d71dfa7b24146f1390ac6830e61acf337b99ca83 + src/mistralai/client/errors/responsevalidationerror.py: + id: 6cfaa3147abe + last_write_checksum: sha1:6862d178d4d1964bc03db47b76709aa406546981 + pristine_git_object: a7b3b9f0207846b5f176076b9f400e95cb08ebb9 + src/mistralai/client/errors/sdkerror.py: + id: c489ffe1e9ca + last_write_checksum: sha1:f708168e46c2960dd51896083aee75ccdb36f9dd + pristine_git_object: 25b87255a51021079f8ba5cc60b43509e12f9a4d src/mistralai/client/files.py: id: f12df4b2ce43 - last_write_checksum: sha1:aa647afa486bbed48083c0b1ec954bdc5cfd0280 - pristine_git_object: 57d389f1e245f5768fe9e8991f65229dd4bd608d + last_write_checksum: sha1:a16c8702d15339200b09c62948c06f79e720d79c + pristine_git_object: a5f3adf6dd9b60a202c70edf7d2a148a626ce471 src/mistralai/client/fim.py: id: 217bea5d701d - last_write_checksum: sha1:90cacb025a1a1fb81e619d59819c0a652f4a5efa - pristine_git_object: be3f7742b866ac58b7bbb65e3593e9865dee134f + last_write_checksum: sha1:dc427c9e954dfb9a7fe2df8b5c544877a28cdc73 + pristine_git_object: 8ffb7730a03398322dfdd6c83724096d4924c5c5 src/mistralai/client/fine_tuning.py: id: 5d5079bbd54e last_write_checksum: sha1:fe1f774df4436cc9c2e54ed01a48db573eb813cd pristine_git_object: df6bc5643a13294ddfbeecc6ae84d00cd7199bed src/mistralai/client/fine_tuning_jobs.py: id: fa1ea246e0b2 - last_write_checksum: sha1:edfe25f99047d4cbd45222cd23823c782286a2c8 - pristine_git_object: 9a28ded152a4f4a5b625a97e087aebc5a287d71e + last_write_checksum: sha1:8cbf3827f5c2e43170192de39be498af0bf24cf0 + pristine_git_object: c2ee871bb1ccf7e3e24081121a7e54f1483eee5c src/mistralai/client/httpclient.py: id: 3e46bde74327 last_write_checksum: sha1:0f4ecc805be1dc3d6e0ca090f0feb7d988f6eb9d pristine_git_object: 544af7f87d6b7097935290bebd08e30e5f485672 src/mistralai/client/libraries.py: id: d43a5f78045f - last_write_checksum: sha1:b3fd0348f4f56aab9873d09c45ed9575baf6e7c3 - pristine_git_object: 26ceabe19a340b7fd4dbb74aebab62bc45093ae5 + last_write_checksum: sha1:6440b3df71fe557ecba5c23768d115efd4ceb26f + pristine_git_object: b8728362b87349118ac6f163f50613dd18c43340 src/mistralai/client/models/__init__.py: id: e0e8dad92725 - last_write_checksum: sha1:d047eab2a2a8ee5af65ed19055a0a3e3092ad2c5 - pristine_git_object: 093ffcbdb0b57458cf856f585e6637d7d5955e8d + last_write_checksum: sha1:50727667552480e8298431f5a3dcc78457c53331 + pristine_git_object: 5ef8b3f3dd9fbb32d4675f7e11808c29fc218c57 src/mistralai/client/models/agent.py: id: 1336849c84fb - last_write_checksum: sha1:d41a96558ddbd52b6c71d316c291847bb6131a01 - pristine_git_object: 05ae24cde5149e30004b7cd4a2409c753682be56 + last_write_checksum: sha1:6090ddf2b5b40656dfbf3325f1022a40ae418948 + pristine_git_object: 686a6eb84ecd27e725e3773b3f7773dddac1c10c src/mistralai/client/models/agentaliasresponse.py: id: 3899a98a55dd last_write_checksum: sha1:d7e12ea05431361ad0219f5c8dee11273cd60397 pristine_git_object: 6972af2a4ae846e63d2c70b733ecd6c8370ee0cd src/mistralai/client/models/agentconversation.py: id: 1b7d73eddf51 - last_write_checksum: sha1:bc2f1a3710efc9c87d6796ccce953c9ce9cf3826 - pristine_git_object: a850d54c64de0c84ad4ea2b11ea1a828eb2580c4 - src/mistralai/client/models/agentcreationrequest.py: - id: 35b7f4933b3e - last_write_checksum: sha1:d3f61940b4cccfc9c13860844f4115e60b095823 - pristine_git_object: 898d42a9c16ffe893792e14445e9ebfcbd046ba3 + last_write_checksum: sha1:28718fb00dbe74241712b4f7a3fbce2d060f7e86 + pristine_git_object: da30c6634294cdaba459b68ca8877d867ee052fb src/mistralai/client/models/agenthandoffdoneevent.py: id: 82628bb5fcea - last_write_checksum: sha1:537e9f651de951057023d3712fa1820da17a21b4 - pristine_git_object: 40bf84970e1d245c3c7fbad64d73f648f8287438 + last_write_checksum: sha1:829c5a152e6d737ffd65a3b88b0b2890e6703764 + pristine_git_object: e2609e3d1fb62b132eb53112eb2bdc4ae855085f src/mistralai/client/models/agenthandoffentry.py: id: 5030bcaa3a07 - last_write_checksum: sha1:afe800c64c74aa79fceda4e4ce808f67573edbc7 - pristine_git_object: b18fe17c70d561b926bdac04124ebca8fc1cca0b + last_write_checksum: sha1:c9544755ad6d3a3831f8afe446c6a9a523eb5137 + pristine_git_object: f92ef2cc7310d5df94436f3067a640d3848405f0 src/mistralai/client/models/agenthandoffstartedevent.py: id: 2f6093d9b222 - last_write_checksum: sha1:933f8be5eacd86881a42cfb83612f327caa77ee7 - pristine_git_object: e278aef39d3bc5e158a094c593391fa8ad77c320 + last_write_checksum: sha1:c9f86e01497c53f3c1806dbb9fdff6e2d9993323 + pristine_git_object: 2a4023419212fec8b3f0e83d506a25b17408a8b1 + src/mistralai/client/models/agents_api_v1_agents_create_or_update_aliasop.py: + id: 23a832f8f175 + last_write_checksum: sha1:237d6b4419615c9c26f96d49760732bd7b4617e7 + pristine_git_object: 04761ae786c35e6fa6cd5a896a5e52458cb3a5d5 + src/mistralai/client/models/agents_api_v1_agents_delete_aliasop.py: + id: 9c9947e768d3 + last_write_checksum: sha1:385faebecef8479d1a72a7ab6f15ddcc611dad87 + pristine_git_object: 291a9802a7d49108fc0d428610cb4c37b42f0796 + src/mistralai/client/models/agents_api_v1_agents_deleteop.py: + id: 95adb6768908 + last_write_checksum: sha1:f222a61a73ba2f37051fffbf2d19b3b81197d998 + pristine_git_object: 5e41fdcdbf182e993acd71603ecb8c9a14e48043 + src/mistralai/client/models/agents_api_v1_agents_get_versionop.py: + id: ef9914284afb + last_write_checksum: sha1:c99ee098f659a56cb365c280cc29de441916b48a + pristine_git_object: 941863d0f8143020200bb5566ce66d527c4369c8 + src/mistralai/client/models/agents_api_v1_agents_getop.py: + id: f5918c34f1c7 + last_write_checksum: sha1:b90285965e2aaccaf989e59b8f1db4a53ae8b31c + pristine_git_object: dd17580dd0041a979fc6c9c7349d14a3e200f5d3 + src/mistralai/client/models/agents_api_v1_agents_list_version_aliasesop.py: + id: a04815e6c798 + last_write_checksum: sha1:b4b5c4e8566f1d0c68a14aba94b7ffea257fd7ce + pristine_git_object: bb1da6020386fabfbd606db9a098a0e9323ce3b0 + src/mistralai/client/models/agents_api_v1_agents_list_versionsop.py: + id: 19e3310c3907 + last_write_checksum: sha1:6628e9ff747c579e11fa9a756cee3b11c57c476d + pristine_git_object: 54b62e90e23c1782a0b068460d6877cac3b28916 + src/mistralai/client/models/agents_api_v1_agents_listop.py: + id: 25a6460a6e19 + last_write_checksum: sha1:0abe889b85470b28917368a2b958a13303bd38f1 + pristine_git_object: 97b1c7f1a070be5e12e1a32ad56dbcfcb0f1cd68 + src/mistralai/client/models/agents_api_v1_agents_update_versionop.py: + id: 63f61b8891bf + last_write_checksum: sha1:e9046cf75e008e856f00dda8725cbb16d83cd394 + pristine_git_object: 5ab821ea413d656dc7194f3588c8987c3e720831 + src/mistralai/client/models/agents_api_v1_agents_updateop.py: + id: bb55993c932d + last_write_checksum: sha1:bc922e15651d7bb33b841d9b3ae247843b6a5426 + pristine_git_object: 69da5001007916e458cab6caf8c10073c8fbc7d6 + src/mistralai/client/models/agents_api_v1_conversations_append_streamop.py: + id: ec00e0905f15 + last_write_checksum: sha1:d0a253c2f383241378e6fab35a38427d0a1dd827 + pristine_git_object: d257dc789cdc4f57bb91d1788335d2d49442d02f + src/mistralai/client/models/agents_api_v1_conversations_appendop.py: + id: 39c6125e850c + last_write_checksum: sha1:864ece4ddcd65075547daa1ab996ba7cfe9939fc + pristine_git_object: 61fec0834e6e05a56a7ee5c984fb0401f9c72f5c + src/mistralai/client/models/agents_api_v1_conversations_deleteop.py: + id: 0792e6abbdcb + last_write_checksum: sha1:9725fce86a52b4995a51e1995ca114c0c4b414df + pristine_git_object: 499645a77782e29db61e439060340fee787799c1 + src/mistralai/client/models/agents_api_v1_conversations_getop.py: + id: c530f2fc64d0 + last_write_checksum: sha1:241e5a07f37fa88f1e5011615b3e2b47a1aaf6a7 + pristine_git_object: 504616abbf0c9d0595f2aae81c59e52352cee323 + src/mistralai/client/models/agents_api_v1_conversations_historyop.py: + id: 2f5ca33768aa + last_write_checksum: sha1:fccc3e1a3f48eff31463829037a440be667a7da1 + pristine_git_object: ef0a4eb084de52d4bde435ee9751aaa12e61dcc3 + src/mistralai/client/models/agents_api_v1_conversations_listop.py: + id: 936e36181d36 + last_write_checksum: sha1:e3e52cf7967b9b78099db9449cb33e3ded34d111 + pristine_git_object: 8bf66aea23f16734c1f9e03629aaf7246e4e60b4 + src/mistralai/client/models/agents_api_v1_conversations_messagesop.py: + id: b5141764a708 + last_write_checksum: sha1:17fd503da7fb20198792c6e25f94dcc0a1e5db05 + pristine_git_object: 19978a194e2dd633fe89bcee7ceac177fcdd6629 + src/mistralai/client/models/agents_api_v1_conversations_restart_streamop.py: + id: c284a1711148 + last_write_checksum: sha1:2e462249c8ab71376c5f6179a2c033e254165f3e + pristine_git_object: 63c744498dfbdd254f2e780d90a680b10100ee63 + src/mistralai/client/models/agents_api_v1_conversations_restartop.py: + id: 3ba234e5a8fc + last_write_checksum: sha1:5dd06d300dbe8832b72d868657dc4c58f0ebaad5 + pristine_git_object: 3186d5df9000d4a62c0fbc64a601e6b709803deb src/mistralai/client/models/agentscompletionrequest.py: id: 3960bc4c545f - last_write_checksum: sha1:ee1e60d894d3a9277c1a3970c422483ffa502e21 - pristine_git_object: f4a2d646927c8c0f250507f52c5e7515830759ad + last_write_checksum: sha1:5d81a0421184ed547208e8ea7cff47b18fc00788 + pristine_git_object: 6955f6acb023fd842d9ec46a694d270a66911c0e src/mistralai/client/models/agentscompletionstreamrequest.py: id: 1b73f90befc2 - last_write_checksum: sha1:3bc4976eeda6d9b30bba72e7f7c417ca9ba885c5 - pristine_git_object: 732e2402190d40bc5360868d3048d57fff9e7b55 - src/mistralai/client/models/agentupdaterequest.py: - id: 2d5a3a437819 - last_write_checksum: sha1:4a0ef549756904749a36b580cc2296a6a54d6647 - pristine_git_object: 96e209d41b638002f129ec4c13748082ccc3a8db + last_write_checksum: sha1:b46298a653359bca205b6b1975bcd1909e563dff + pristine_git_object: c2cf35522236f29ca1b9f2a438dfc79a59ca3e2a src/mistralai/client/models/apiendpoint.py: id: 00b34ce0a24d last_write_checksum: sha1:733e852bf75956acd2c72a23443627abfa090b7b pristine_git_object: a6665c1076f05c28936510c24ee7d3498d7e7a24 - src/mistralai/client/models/appendconversationop.py: - id: 1c47dd1e7c7e - last_write_checksum: sha1:109ced509e3caa5e5c9610b3a18839d113be708a - pristine_git_object: 710b8e1ca3fbfbb747e48d7699588bc199a41274 - src/mistralai/client/models/appendconversationstreamop.py: - id: 1ab08b189e9d - last_write_checksum: sha1:edd2a91da29f83646538b57e4d29f006d31f9dec - pristine_git_object: 55efca0e32c5d54d100563500aee9b61952d07c2 - src/mistralai/client/models/archiveftmodelout.py: - id: bab499599d30 - last_write_checksum: sha1:92f5b5a89ae5e52523d30069629e0ac8dc858d6b - pristine_git_object: 3107116c4a2c78c591999b220349325612a19b4e - src/mistralai/client/models/archivemodelop.py: - id: beefa1df3b7c - last_write_checksum: sha1:6f78b2f84f42267d4928a5a3ad1d3d3cae417cac - pristine_git_object: 30b4a9bd71f349cc4ab4b12df73770d327008527 + src/mistralai/client/models/archivemodelresponse.py: + id: 2d22c644df64 + last_write_checksum: sha1:d0f67fd2bc5a6e8de4f2b0a5742ceb4a1f7f5ab8 + pristine_git_object: f1116850c8bf0159c4146d4973988ea5d0fe7de7 src/mistralai/client/models/assistantmessage.py: id: 2b49546e0742 - last_write_checksum: sha1:a58ecb7bc381af02d83247f0518a3d34013b4575 - pristine_git_object: 5a4a2085e838196d3ab2b1c00bbeb7a78516dfb2 + last_write_checksum: sha1:dcfa31c2aac95a0d7bd748e96bd87a5c85c0d1f6 + pristine_git_object: 26a778c70439d21b890e85f2c85dbb560e8bffef src/mistralai/client/models/audiochunk.py: id: ce5dce4dced2 - last_write_checksum: sha1:8218d4c7118f677f16a3a63c55348c40d3ab3156 - pristine_git_object: a51868279b9b4ce2d97990286512d69f8d7f2e82 + last_write_checksum: sha1:d3c2e28583d661a9583c40c237430a1f63ea7631 + pristine_git_object: 68866cd2c3c640cf56258f2f98b8a2385ea6fcdb src/mistralai/client/models/audioencoding.py: id: b14e6a50f730 last_write_checksum: sha1:92ca06dce513cd39b2c7d9e5848cf426b40598ce @@ -1846,84 +1846,68 @@ trackedFiles: pristine_git_object: fef87ae76b31128ebd5ced4278e274c249181c23 src/mistralai/client/models/audiotranscriptionrequest.py: id: e4148b4d23e7 - last_write_checksum: sha1:6d7b267bc241c1f72b5b7839d6e2ad76a4c1ecff - pristine_git_object: 8c47a83cada33d8dbd4a9ffdedb55d3f4f55dadf + last_write_checksum: sha1:a6ef85be4ae24aa79c8c3fa9dcaf055e0ba9b266 + pristine_git_object: fe4c79e3427fae3e022bd936236d2934eaa76b60 src/mistralai/client/models/audiotranscriptionrequeststream.py: id: 33a07317a3b3 - last_write_checksum: sha1:66ae6146b9b75600df720054ec2c21e4e996b1fe - pristine_git_object: a080cee24c0d67c38fc6785c440418580e097700 + last_write_checksum: sha1:6e648ff58a70a0a3bd63a24676122b80eba4baf7 + pristine_git_object: 2d1e9269b51d84cd8b21643fe04accd00839b013 src/mistralai/client/models/basemodelcard.py: id: 556ebdc33276 - last_write_checksum: sha1:94871ce94c92fbbcff9fa5d6a543c824b17ee13b - pristine_git_object: 17a3e5c93339082f408f9ab5f34b5a01e24c74e0 + last_write_checksum: sha1:e2c3d1effee5b434fea9b958c0dd54fa96143924 + pristine_git_object: 9c9e9a2045a10f4606f11ee5886a19ccf03bbf0e src/mistralai/client/models/batcherror.py: id: 1563e2a576ec - last_write_checksum: sha1:9b59325428680d58151696c1738ad5466d67a78d - pristine_git_object: c1bf722a45c4326f24f7fd34ea536d59c48c67f2 - src/mistralai/client/models/batchjobin.py: - id: 72b25c2038d4 - last_write_checksum: sha1:667020377b2ca85dfd9c5aed96c7d4ba2571093b - pristine_git_object: a0c3b9146130a5ebfdbd0ec6338092bacc01bf85 - src/mistralai/client/models/batchjobout.py: - id: cbf1d872a46e - last_write_checksum: sha1:9031bc5ff1986ddc283551f7f5d210c9de67cc56 - pristine_git_object: 99c2b95118364d660f0cefde16507a83e8c9cafe - src/mistralai/client/models/batchjobsout.py: - id: 20b2516e7efa - last_write_checksum: sha1:426287f6ef9ed88e75f9e318582627d066f9e4f0 - pristine_git_object: f65fc040a964c68c82b5df7d3fb9e40222182322 + last_write_checksum: sha1:51c9e9a4d306c2de45dc0879ade62daed3fc2972 + pristine_git_object: 8a353cd2dc06a8c6f2db3d6b613cfdca8278f57e + src/mistralai/client/models/batchjob.py: + id: 85cd28932cc7 + last_write_checksum: sha1:532a8c6ca8546052159e5e5174cf65ce17a62f3f + pristine_git_object: 80acac336883c23b621d0dc647fef20548bf061a src/mistralai/client/models/batchjobstatus.py: id: 61e08cf5eea9 last_write_checksum: sha1:78934183519948464385245cbc89efb68ac00bfb pristine_git_object: bd77faa2fbed74b19a8d3884af6d43bc1b4806e0 src/mistralai/client/models/batchrequest.py: id: 6f36819eeb46 - last_write_checksum: sha1:115df324d1fec164bae60bf4b37acfa5149b3172 - pristine_git_object: 41c4523456398b302e0b7eb35824efc014f03aa6 + last_write_checksum: sha1:b2a71163e37a9483e172dc13b6320749bee38f2f + pristine_git_object: 911a9a0554b9b8cb6dedcb3a86a06c39890b875e src/mistralai/client/models/builtinconnectors.py: id: 2d276ce938dc last_write_checksum: sha1:4ceb3182009b6535c07d652ccf46661b553b6272 pristine_git_object: ecf60d3c1a83028d9cf755d4c9d5459f6b56e72a - src/mistralai/client/models/cancelbatchjobop.py: - id: cebac10b56a9 - last_write_checksum: sha1:2614180488e51c0e701fffdb058b39892c5bc1e5 - pristine_git_object: cd94ee86467247fe2bc7f7381fa05b57bedabef1 - src/mistralai/client/models/cancelfinetuningjobop.py: - id: c9a1b39f0d02 - last_write_checksum: sha1:139d3c443678aeeb8afedea8b2a783210e5ac28c - pristine_git_object: ddd445bb433df9a0f987693d97088d79e5e8c47f src/mistralai/client/models/chatclassificationrequest.py: id: afd9cdc71834 - last_write_checksum: sha1:91f62e46c415a0168442695f61cb30756227ed1a - pristine_git_object: 8b6d07b906c688a3849b8a4576cc10e075a6868f + last_write_checksum: sha1:a29088359142ebd6409f45569168b2096014119e + pristine_git_object: cf2aa78af3ffc747d557422b83551075b83e601d src/mistralai/client/models/chatcompletionchoice.py: id: 7e6a512f6a04 last_write_checksum: sha1:de0281a258140f081012b303e3c14e0b42acdf63 pristine_git_object: 2c515f6e9a290ebab43bae41e07493e4b99afe8f src/mistralai/client/models/chatcompletionrequest.py: id: 9979805d8c38 - last_write_checksum: sha1:95c0879e52d8b6c1ff389a5dfe1776129c764c00 - pristine_git_object: 4f7d071b5a0b84ef27397b4acaf4a798b6178eb8 + last_write_checksum: sha1:1f0390718ab06126a05e06797ef6af310ccab543 + pristine_git_object: e871bd92733ac400fdfeb2cf4f66fc32a7584103 src/mistralai/client/models/chatcompletionresponse.py: id: 669d996b8e82 last_write_checksum: sha1:97f164fea881127ac82303e637b6a270e200ac5b pristine_git_object: 7092bbc18425091d111ec998b33edc009ff0931b src/mistralai/client/models/chatcompletionstreamrequest.py: id: 18cb2b2415d4 - last_write_checksum: sha1:12e794c89a954702c3d4dccddad9b365331bd996 - pristine_git_object: ec7d2ae131cf5fac7eb618bbe09340ac23d444ef + last_write_checksum: sha1:c197792ed1dd78159ab0b970f8f76087ff2c4d6b + pristine_git_object: b7b2bff138cee9c130fa01d6157d8b6c21ea5a9c src/mistralai/client/models/chatmoderationrequest.py: id: 057aecb07275 - last_write_checksum: sha1:e18a5ae518f5413b1bff45f85f823b60e00ef32a - pristine_git_object: a8d021e8deb2015470765340281789a7fba544aa - src/mistralai/client/models/checkpointout.py: - id: 3866fe32cd7c - last_write_checksum: sha1:5ed4988914acef48854337127c4ca51791de3ab9 - pristine_git_object: 3e8d90e920cd34ff611f5e875c0163e1a4087f6f + last_write_checksum: sha1:7677494c0e36ccbc201384cb587abeb852a1a924 + pristine_git_object: 228e7d26b8b172c3e11f01d4f260bf6e5195b318 + src/mistralai/client/models/checkpoint.py: + id: 1a530d3674d8 + last_write_checksum: sha1:418f08c61b64fa7ffb053c6f5912e211acab1330 + pristine_git_object: c24e433eb4787146620fb48b6d301f51a4db5067 src/mistralai/client/models/classificationrequest.py: id: 6942fe3de24a - last_write_checksum: sha1:c98f6751aeba813b968aaf69c3551972b94da4c8 - pristine_git_object: 903706c31176da4c2ab021b3bcaeb2217ca98f76 + last_write_checksum: sha1:7bd416d4b0e083efbf9324107263027140702ddb + pristine_git_object: 25b6941355cb9629abb9c0f09fb6fd191c56ffa6 src/mistralai/client/models/classificationresponse.py: id: eaf279db1109 last_write_checksum: sha1:64522aa2b0970e86a0133348411592f95163f374 @@ -1932,222 +1916,194 @@ trackedFiles: id: 2445f12b2a57 last_write_checksum: sha1:2b8b9aeadee3b8ffe21efd1e0c842f9094c4ecc7 pristine_git_object: 6c7d6231d211977332100112900ea0f8cdf5d84c - src/mistralai/client/models/classifierdetailedjobout.py: - id: d8daeb39ef9f - last_write_checksum: sha1:1b6dde6554e51d9100f2e50779eff56b3ca07603 - pristine_git_object: bc5c5381d61b6b4945b51dc9836bcc2e7aa66f9f - src/mistralai/client/models/classifierftmodelout.py: - id: 2903a7123b06 - last_write_checksum: sha1:5141a0c29da0739057c52b2345a386c79d6f8f85 - pristine_git_object: 182f4954c2b3f1408cb05eee76e2bf24005b023e - src/mistralai/client/models/classifierjobout.py: - id: e19e9c4416cc - last_write_checksum: sha1:c5daf7e879911ea24fba847a1c12ab9774ebbe98 - pristine_git_object: 03a5b11c46097733d609f3b075b58ef729f230a5 - src/mistralai/client/models/classifiertargetin.py: - id: ed021de1c06c - last_write_checksum: sha1:8a1db343861e4f193a56d4030862c1f3a361d3e1 - pristine_git_object: b250109bd03976c93c571dbbacb1c631acd19717 - src/mistralai/client/models/classifiertargetout.py: - id: 5131f55abefe - last_write_checksum: sha1:304408da049ff4ad17f058267ffaa916ef907dc2 - pristine_git_object: 3d41a4d9c887488e7b08cc9d5d8dcb5b0fd26781 + src/mistralai/client/models/classifierfinetunedmodel.py: + id: 5a9a7a0153c8 + last_write_checksum: sha1:853bf1b3b941ec3aebeb17ac2caf38fa0dd094de + pristine_git_object: fbcf5892d7f0a3ed8b3872d71dd95ed3a25463d1 + src/mistralai/client/models/classifierfinetuningjob.py: + id: a244d5f2afc5 + last_write_checksum: sha1:ceb13935702275025284bb77aa8bf5ccf926e19c + pristine_git_object: fb160cf8e16a1b4899f8bb2803b18ba1f55232ce + src/mistralai/client/models/classifierfinetuningjobdetails.py: + id: 75c5dee8df2e + last_write_checksum: sha1:6b3f2f7ca3bd4e089591f5f9c59b7e28a00447f8 + pristine_git_object: 5d73f55ee0f1321fdeeb4db1971e144953e8e27f + src/mistralai/client/models/classifiertarget.py: + id: 2177d51d9dcf + last_write_checksum: sha1:c801dacc31e2d7682285a9a41d8ef38fa2e38fb9 + pristine_git_object: 4d66d789a42a0bc8762998161f1ad801bd8d96d4 + src/mistralai/client/models/classifiertargetresult.py: + id: 19c343844888 + last_write_checksum: sha1:3f5b37de3585cb38a3e41f0ee49dc4b5a33bf925 + pristine_git_object: 8ce7c0ca167b38ebaf1e5fc6393ab56d9f142cfa src/mistralai/client/models/classifiertrainingparameters.py: id: 4000b05e3b8d - last_write_checksum: sha1:4063f78ea65f138578bef4ce8908b04e556cc013 - pristine_git_object: f360eda504f0aa3f60ba6834aab59c1beb648151 - src/mistralai/client/models/classifiertrainingparametersin.py: - id: 4b33d5cf0345 - last_write_checksum: sha1:7764e6e6c5fc58e501c0891d036bbb22a8ddcb07 - pristine_git_object: 85360a7e7ba5212ef9052d3bd5f368ea4e2c4d98 + last_write_checksum: sha1:d7ce2f1017463c52856b973d696c9abecf5f79e3 + pristine_git_object: 14fa4926f8b5b62aa6b5d8864c40d5acf66e7b15 src/mistralai/client/models/codeinterpretertool.py: id: 950cd8f4ad49 - last_write_checksum: sha1:b014008db6ddce4b35aedec70783d74ce1b5cf83 - pristine_git_object: f69c7a5777af16df151589d2c5c8d81de4d28638 + last_write_checksum: sha1:8c3d91805d6c5f5cc9d249216694781faf15ea68 + pristine_git_object: ce14265f6d312c3da52014d2a058b6a730d5c980 src/mistralai/client/models/completionargs.py: id: 3db008bcddca - last_write_checksum: sha1:4c4ba2d39540bbb06fc1c49815fc6a7c8cf40ab2 - pristine_git_object: 918832acf3ea3d324c20e809fcdb1eae2ba3d7fd + last_write_checksum: sha1:e3d36235610c0546d8a2f2bb0a1db0f953747d88 + pristine_git_object: ab5cf5ff2d4df92d00664803f9274696ae80216d src/mistralai/client/models/completionargsstop.py: id: 5f339214501d last_write_checksum: sha1:744878976d33423327ea257defeff62073dad920 pristine_git_object: 39c858e66380044e11d3c7fd705334d130f39dea src/mistralai/client/models/completionchunk.py: id: d786b44926f4 - last_write_checksum: sha1:04b634cffa4b0eb8ca177c91d62d333a061160df - pristine_git_object: 67f447d0c6cd97cb54ffcd0c620654629ac4e848 - src/mistralai/client/models/completiondetailedjobout.py: - id: 9bc38dcfbddf - last_write_checksum: sha1:4771444753ff456829249d4e5fa5f71f2328fa78 - pristine_git_object: cd3a86ee28cdbf3a670d08f27642294321849ec0 + last_write_checksum: sha1:15f1b57b696b46bf6986c8f1a53d6bbf8d2351e2 + pristine_git_object: 5fd6c173ef29fb9bf2f570e0c2300268221e1ad3 src/mistralai/client/models/completionevent.py: id: c68817e7e190 last_write_checksum: sha1:dc43ac751e4e9d9006b548e4374a5ec44729eea4 pristine_git_object: 3b90ab0c1ecac12f90e0ae3946a6b61410247e4f - src/mistralai/client/models/completionftmodelout.py: - id: 0f5277833b3e - last_write_checksum: sha1:1c83e1d0a868eef32792844d787c5aaede0386b8 - pristine_git_object: 7ecbf54aabf022392e6d2ce2d0a354b9326eec79 - src/mistralai/client/models/completionjobout.py: - id: 712e6c524f9a - last_write_checksum: sha1:2c8500593b8f9257a0a389f87792cd174fcd7209 - pristine_git_object: 42e5f6c65809aaaa02f0bf58fbf031f4c476208b + src/mistralai/client/models/completionfinetunedmodel.py: + id: f08c10d149f5 + last_write_checksum: sha1:5fbd8c5475c250cbed1c2d2f47de372e8e92b128 + pristine_git_object: 54a1c1656aea1954288e9144670c939e29a83c47 + src/mistralai/client/models/completionfinetuningjob.py: + id: c242237efe9b + last_write_checksum: sha1:e4352be2411c7026c054a6fe380b87242183d4e4 + pristine_git_object: 1bf0a730c389be30bac2acfa17ffc6b5891e4918 + src/mistralai/client/models/completionfinetuningjobdetails.py: + id: e8379265af48 + last_write_checksum: sha1:b11c9bdc161da6a5cbd9f35f4bc5b51f0f3cea9c + pristine_git_object: cb7870219b261e260feceb6109088b0bbf8a6408 src/mistralai/client/models/completionresponsestreamchoice.py: id: 5969a6bc07f3 - last_write_checksum: sha1:874d3553d4010a8b83484588dcbf9136bd8c6537 - pristine_git_object: 119a9690727ae296acf72dcfafdd224a61582599 + last_write_checksum: sha1:59730cdaeeb3e95f4d38f63c34a4e491f40e6010 + pristine_git_object: a52ae892fcaafe54918160d055ee2badac31404e src/mistralai/client/models/completiontrainingparameters.py: id: be202ea0d5a6 - last_write_checksum: sha1:fd9a12417cd4f7bdc1e70ba05bbfef23b411ddd0 - pristine_git_object: 4b846b1b9bbcc4f2c13306169b715f08241e8f1c - src/mistralai/client/models/completiontrainingparametersin.py: - id: 0df22b873b5f - last_write_checksum: sha1:a92e9df1d5be2a7f2d34b1dcde131e99e5ee351d - pristine_git_object: 20b74ad9fc0c50fe7d1d3dd97fcd3c296fbf7042 + last_write_checksum: sha1:1a797019770795edcd911ff5b3580bedb83c05f4 + pristine_git_object: ca50a7ad521b46f275dd3a39c98911f13ee527c8 src/mistralai/client/models/contentchunk.py: id: c007f5ee0325 - last_write_checksum: sha1:5cedb52346bc34cb30950496d34ab87d591b6110 - pristine_git_object: eff4b8c670f47f53785690415751be05284f3d8b + last_write_checksum: sha1:b921b03b4c1e300b0e3f51ea9eadd4d7c4b7a0ea + pristine_git_object: e3de7591a089a3739af17108cecdc2d4240f10bf src/mistralai/client/models/conversationappendrequest.py: id: 81ce529e0865 - last_write_checksum: sha1:83e883e4324d76d74521607390747ecdf7dffaa0 - pristine_git_object: 0f07475e4ca640ce50a6214fe59a91041a2e596a + last_write_checksum: sha1:bdae860241893ec3ab3f22bd57c45dede2927da3 + pristine_git_object: 386714fd6dcccff8abb2247d7474949d9e8e79f8 src/mistralai/client/models/conversationappendstreamrequest.py: id: 27ada745e6ad - last_write_checksum: sha1:12c3c63b763bd16398fcbec7d6fab41729ee81a6 - pristine_git_object: a0d46f727ff99d76a1bf26891df3b0ed80a88375 + last_write_checksum: sha1:0a563cb146c4806ee6a133d10e7af8839e6f38dd + pristine_git_object: 32f6b148c647d3bac8edada3b941c51c17d78901 src/mistralai/client/models/conversationevents.py: id: 8c8b08d853f6 - last_write_checksum: sha1:6362a88ae26cb67f7abc3d2b0963f9a869c15371 - pristine_git_object: f24760381501f822593ef5903df0d32ca3cf9b47 + last_write_checksum: sha1:2eedde1ecf31061fb13de0b1bdc9ea311897b570 + pristine_git_object: 17812983f3aee3e675d44f46ca1b741315c2139a src/mistralai/client/models/conversationhistory.py: id: 60a51ff1682b - last_write_checksum: sha1:6fa8bdd370239df879da7b687c037405a8fbbe25 - pristine_git_object: 92d6cbf90c9c76945ee79752d5b4232aea10a79d + last_write_checksum: sha1:8984a0b12766e350022796a44baf6aac4c93f79b + pristine_git_object: ceef115b70ff02da05ac97571a177edf5b5f6cf6 src/mistralai/client/models/conversationinputs.py: id: 711b769f2c40 last_write_checksum: sha1:5fc688af61d6a49ede9c9709069f3db79f4dc615 pristine_git_object: 7ce3ffc3772926a259d714b13bfc4ee4e518f8f7 src/mistralai/client/models/conversationmessages.py: id: 011c39501c26 - last_write_checksum: sha1:408e26cb45dc1bdf88b1864d365e636307920df3 - pristine_git_object: 1aa294a497d2eb27a12dcbcce36c7956f6ee4f4e + last_write_checksum: sha1:95e3abe55199f2118e6fb7e5d8520af6a929449a + pristine_git_object: 84664b62337dcdc408bb01e0494fa598e6a86832 src/mistralai/client/models/conversationrequest.py: id: 58e3ae67f149 - last_write_checksum: sha1:f1b0b2b6a9c9b94ed5e3a77fb0b92e695f421a2e - pristine_git_object: 2005be82d8ebcf8c8fa74074abf25f072e795582 + last_write_checksum: sha1:f7a67082e06c1789f4c6a4c56bfef5f21cce5034 + pristine_git_object: 83d599ebf984f1df2390d97dbe651881f7dee0e2 src/mistralai/client/models/conversationresponse.py: id: ad7a8472c7bf - last_write_checksum: sha1:8b625fe8808f239d6bc16ecf90ae1b7f42262c0c - pristine_git_object: 24598ef3fc24a61a0f15ab012aa211ba57cd0dcf + last_write_checksum: sha1:99148d75abcb18c91ba0a801174461346508f5fb + pristine_git_object: f6c10969a931eaf1a4667b0fcff3765f57658b15 src/mistralai/client/models/conversationrestartrequest.py: id: 681d90d50514 - last_write_checksum: sha1:0ce81536464db32422165c35252770f3197fb38e - pristine_git_object: 35d3099361274440552e14934b6a1b19ebc8f195 + last_write_checksum: sha1:99123cee7c54f44c02b56111305af399143b4e5a + pristine_git_object: 7ae16aff4de36a91093d3021b66283e657b00897 src/mistralai/client/models/conversationrestartstreamrequest.py: id: 521c2b5bfb2b - last_write_checksum: sha1:b996f57271f0c521113913f48b31d54c17d73769 - pristine_git_object: 0ddfb130d662d954c3daabdf063172b8ea18a153 + last_write_checksum: sha1:abfd14652b4785c36de84a59593b55f7a6a2d613 + pristine_git_object: 0e247261d997ac3d8ff0155ba54cc4cafe9ac65a src/mistralai/client/models/conversationstreamrequest.py: id: 58d633507527 - last_write_checksum: sha1:fc4f2f1578fbeb959ddbe681dee2d11f0a4e6c5e - pristine_git_object: 379a8f2859b5f40cc744ad8f9bc6c39a198258b5 + last_write_checksum: sha1:7dc25a12979f4082ed7d7e37584bb9c30297f196 + pristine_git_object: a20dccae1a60753ed95f59da0df78c204c19d515 + src/mistralai/client/models/conversationthinkchunk.py: + id: 77e59cde5c0f + last_write_checksum: sha1:5db067661a5d4b0c13db92ad93da1aab9e0e7a34 + pristine_git_object: e0e172e3edbe46c000e82e712c135b96a65312e9 src/mistralai/client/models/conversationusageinfo.py: id: 6685e3b50b50 - last_write_checksum: sha1:60f91812b9b574b3fade418cc7c2191253f6abbf - pristine_git_object: 98db0f1617bd7484750652997dcd43d08ef7c5fc - src/mistralai/client/models/createfinetuningjobop.py: - id: fd3c305df250 - last_write_checksum: sha1:e29ada8f733de44bfeab2885d2221ade84b34619 - pristine_git_object: f55deef5d9f6134fddb02c458a0d812759cea358 - src/mistralai/client/models/createorupdateagentaliasop.py: - id: a79cf28bda01 - last_write_checksum: sha1:d4f2790b5970c9cf30b3fcee9d8bc6d4b8c33778 - pristine_git_object: cde1dd054c447a8617527585e783a95affba3277 - src/mistralai/client/models/deleteagentaliasop.py: - id: e4d0d7f75b24 - last_write_checksum: sha1:66e34ba7fb1a238d55c7ed380bd666c8975c01b4 - pristine_git_object: c52d099e9c1f28bf37ee009833b5fb8e351ed987 - src/mistralai/client/models/deleteagentop.py: - id: 089fb7f87aea - last_write_checksum: sha1:a196bcc758e36ffeb17fab25bb60451d3d66a4d8 - pristine_git_object: 8b14bca7bf5d67e16181b67ef6b7375c1b0a93fd - src/mistralai/client/models/deleteconversationop.py: - id: 86fefc353db0 - last_write_checksum: sha1:48f33b614ec087fdaf2b29d9c3eefd6e8d7d311f - pristine_git_object: 39607f40640c6dfa3ef20d913a90abee602b9b4a - src/mistralai/client/models/deletedocumentop.py: - id: 62522db1ccf2 - last_write_checksum: sha1:1a4e2e72a0d3cd24e184ce3cc5037f5ec7cdd9a5 - pristine_git_object: 400070a49bc046d8132bfc7dfe3e114faa719001 - src/mistralai/client/models/deletefileop.py: - id: 286b4e583638 - last_write_checksum: sha1:2561c1fe03ec3915dfa48fa354a86a56ba9b54c4 - pristine_git_object: 4feb7812f8acfa366e4b46fc914925df4f705528 - src/mistralai/client/models/deletefileout.py: - id: 5578701e7327 - last_write_checksum: sha1:a34520be2271c1e37fa8b3c1bdead843db7b1bb9 - pristine_git_object: c721f32cfe752c2c084efb72db3e5409795e387a - src/mistralai/client/models/deletelibraryaccessop.py: - id: df80945bcf19 - last_write_checksum: sha1:065aad372e0bbfd998fe3adc3389e3dbc9d5b674 - pristine_git_object: ca14c3ffc43be3aee14d6aa1f4805f0483d8b676 - src/mistralai/client/models/deletelibraryop.py: - id: cd0ce9bf8d51 - last_write_checksum: sha1:07840cbdb741bba291f1db1a1b54daca99e8f7ea - pristine_git_object: 5eb6fc310aa62454e3f7ed0766212c807125fe8c - src/mistralai/client/models/deletemodelop.py: - id: 2c494d99a44d - last_write_checksum: sha1:97dce35d527e03612068896572824cc0f13269c1 - pristine_git_object: 55c4b2422336ef6e148eedbd4a6a60846d187e9b + last_write_checksum: sha1:3e0489836936a7a77fa3b41adde1eb459ecd176d + pristine_git_object: 1e80f89ee4f7a3d464df2bf39990b467029e86c1 + src/mistralai/client/models/createagentrequest.py: + id: 442629bd914b + last_write_checksum: sha1:273dde9338cc1eb166ee40f4c6215f90cae908ab + pristine_git_object: 54b09880eefe348d2e003ed1b238b67cb58b8e34 + src/mistralai/client/models/createbatchjobrequest.py: + id: 56e24cd24e98 + last_write_checksum: sha1:e648017622cd6e860cb15e5dd2b29bf9f2a00572 + pristine_git_object: 9a901fefee0ea6a825274af6fd0aa5775a61c521 + src/mistralai/client/models/createfileresponse.py: + id: fea5e4832dcc + last_write_checksum: sha1:b7f3ba95a09a3225eae80b53152fe2b7d3806fbe + pristine_git_object: 768212803bc3535ac8a27a9c0d48f147e3d536b7 + src/mistralai/client/models/createfinetuningjobrequest.py: + id: c60d2a45d66b + last_write_checksum: sha1:2e8e608140860bba9ecfa9498d61cf807f96680a + pristine_git_object: e328d944ce2a71ffbec027965d31075070647dbc + src/mistralai/client/models/createlibraryrequest.py: + id: 1c489bec2f53 + last_write_checksum: sha1:45fa65be82712ce99304027c88f953f0932bdae4 + pristine_git_object: 58874e014275b06ce19d145aaa34a48d11ca0950 + src/mistralai/client/models/delete_model_v1_models_model_id_deleteop.py: + id: 767aba526e43 + last_write_checksum: sha1:73568f2f450bf9c23aca3649372a92e1b9a2fc54 + pristine_git_object: 199614f53501f34088cb112d6fe1114e1e588d8a + src/mistralai/client/models/deletefileresponse.py: + id: 3ee464763a32 + last_write_checksum: sha1:2c0df66fc8c4384d50e54ac03577da3da2997cf5 + pristine_git_object: ffd0e0d015e38e5f6113da036ebeba98441444f4 src/mistralai/client/models/deletemodelout.py: id: ef6a1671c739 - last_write_checksum: sha1:4606683ef6da0aae7e88bc50144eddc83908f9d7 - pristine_git_object: bf22ed177ee91dce98bfd9b04f02e683c79e4860 + last_write_checksum: sha1:d67ac7c3fa143be40c74455c7206c94bfb5a2134 + pristine_git_object: fa0c20a419c59b8fc168c150b28d703398ea7f40 src/mistralai/client/models/deltamessage.py: id: 68f53d67a140 - last_write_checksum: sha1:ff7fa85086bd56863f7f4a255b008cfaa11a959c - pristine_git_object: fbb8231a310e90afd50951dd0f572ce3e0f029e6 + last_write_checksum: sha1:b18350de03a8685bea5ac52e1441415b5e58bdf4 + pristine_git_object: d9fa230e93d4e0886f21c836cf3813855eb8f9fd + src/mistralai/client/models/document.py: + id: fbbf7428328c + last_write_checksum: sha1:2a5a28c54f0aec50059b6badc1001b1cd120e7d3 + pristine_git_object: 31eebbd1a7d7fdcb498259837c533bfc8008a6f9 src/mistralai/client/models/documentlibrarytool.py: id: 3eb3c218f457 - last_write_checksum: sha1:e5bfb61a4a03a3b28837c27195f1bcd8cc14c6b2 - pristine_git_object: ff0f739391404604c1cc592c23507946aa0b693f - src/mistralai/client/models/documentout.py: - id: 7a85b9dca506 - last_write_checksum: sha1:f041a4866c67d1f81f62282918d625216a760355 - pristine_git_object: 3b1a5713c84512947a07d153792b17fcf3262dcb + last_write_checksum: sha1:d03a6136192b56778bd739d834d9bdc80a09cc23 + pristine_git_object: 642c3202b11c5bb8a2b41cf8ae0fe43f73aa2a81 src/mistralai/client/models/documenttextcontent.py: id: e730005e44cb last_write_checksum: sha1:c86f4b15e8fda1cd5c173da01462342cd22b7286 pristine_git_object: b6904cb4267347b62a457a01b91a391500326da9 - src/mistralai/client/models/documentupdatein.py: - id: d19c1b26a875 - last_write_checksum: sha1:bddd412de340d050cfbdd4206a9fbb3d1660a045 - pristine_git_object: 669554de5d33f6163c8d08fefee52c1869662eba src/mistralai/client/models/documenturlchunk.py: id: 4309807f6048 - last_write_checksum: sha1:186a684da48bb5d237769ecb3dbf1479a5c5ee55 - pristine_git_object: 304cde2b687e71b0d2fb0aee9b20826473375b25 - src/mistralai/client/models/downloadfileop.py: - id: 4d051f08057d - last_write_checksum: sha1:b80c5332cfdb043bb56f686e4e1c4bf26495b04b - pristine_git_object: fcdc01d644bdce8d1fc7896b5f8244a7a5311dfa + last_write_checksum: sha1:33cdaccb3a4f231730c7fa1db9f338a71e6311b2 + pristine_git_object: 43444d98b8b7fb430f9c33562c35072d9c79a263 src/mistralai/client/models/embeddingdtype.py: id: 77f9526a78df last_write_checksum: sha1:a4e2ce6d00e6d1db287a5d9f4254b0947227f337 pristine_git_object: 732c4ebe3678563ebcdbafd519f93317261586fb src/mistralai/client/models/embeddingrequest.py: id: eadbe3f9040c - last_write_checksum: sha1:6071612944c4c603803cc7f2adc1e9784549c70f - pristine_git_object: f4537ffa9bdc0a9a73101e1b1524fed1a09c1a65 + last_write_checksum: sha1:e36282eb015b782804b4bdf3d18b596607b020fd + pristine_git_object: 15950590fec8b82a4fb28d69009a6f6cfb83c9ee src/mistralai/client/models/embeddingresponse.py: id: f7d790e84b65 last_write_checksum: sha1:9bb53a5a860c8e10d4d504648d84da73068c0a83 pristine_git_object: 6ffd68941f32f396998df9dded14ff8365926608 src/mistralai/client/models/embeddingresponsedata.py: id: 6d6ead6f3803 - last_write_checksum: sha1:3e2430e6bd9b3c77a564f4e56edec1274446a1f4 - pristine_git_object: a689b290d5a4b360e409413c96bb5e7288ce2e2e + last_write_checksum: sha1:ba5f38ee6e2b0436532229da01ba79ee49c20d12 + pristine_git_object: 098cfae06eae6a92830b4b5a26985f5d5950e512 src/mistralai/client/models/encodingformat.py: id: b51ec296cc92 last_write_checksum: sha1:ea907f86b00323d99df37f7ff45d582aace798e7 @@ -2156,262 +2112,298 @@ trackedFiles: id: 62d6a6a13288 last_write_checksum: sha1:015e2db9e8e5a3e4ce58442ccedaf86c66239dde pristine_git_object: 56d82cbed237f32a8b00cfee4042dfe3e7053bcb - src/mistralai/client/models/eventout.py: - id: da8ad645a9cb - last_write_checksum: sha1:67f7cc29102a971d33b6cbbcb06ffcfe595227a5 - pristine_git_object: a0247555bb816061cb22f882406c11c3a9011818 + src/mistralai/client/models/event.py: + id: e5a68ac2dd57 + last_write_checksum: sha1:8ed848fe2e74c7f18ee8f4dcba39ad1c951c16d2 + pristine_git_object: c40ae2b1a1b8131a90c637e3268872b97b22683e src/mistralai/client/models/file.py: id: f972c39edfcf - last_write_checksum: sha1:8d0adce8f4dfc676f6da6465547a0d187d4326f1 - pristine_git_object: dbbc00b50e5578230daefa47648954ead8ed8eb9 + last_write_checksum: sha1:609381a40a4bfdda2e7e750a848cd2bb38d6ac0f + pristine_git_object: 1b0ea1d4a288d9723dcdd7cfda99d49c5cbd9e7c src/mistralai/client/models/filechunk.py: id: ff3c2d33ab1e - last_write_checksum: sha1:9f970ef8366df8087f9332a4b1986540063a1949 - pristine_git_object: 43ef22f861e0a275c7348133d0c4d04551477646 + last_write_checksum: sha1:d7561c39252b81007a8e079edb4f23989ffd510e + pristine_git_object: 5c8d2646dc0d5c732828bdd81c5a58e12fa92a42 src/mistralai/client/models/filepurpose.py: id: a11e7f9f2d45 last_write_checksum: sha1:8b167c02f9f33e32d5fd1c6de894693924f4d940 pristine_git_object: 49a5568ff82ad4a85e15c8de911e8d6c98dcd396 + src/mistralai/client/models/files_api_routes_delete_fileop.py: + id: 2f385cc6138f + last_write_checksum: sha1:ccfd3ff64635cfd511f49c5e02a6f1860c479966 + pristine_git_object: eaba274b9dd94d6cf729325316b3e3e9b3834566 + src/mistralai/client/models/files_api_routes_download_fileop.py: + id: 8184ee3577c3 + last_write_checksum: sha1:81058ede2a5eb333b54561f99ed7878082c0f411 + pristine_git_object: 83de8e73a3d50917e4a41bb92a828a10e646a632 + src/mistralai/client/models/files_api_routes_get_signed_urlop.py: + id: 0a1a18c6431e + last_write_checksum: sha1:ef4908b9d2e43c0256d25a5aa533c5bdc1205113 + pristine_git_object: 64cd6ac57b4f2de70403e11062307a8d8d5d94e7 + src/mistralai/client/models/files_api_routes_list_filesop.py: + id: b2e92f2a29b4 + last_write_checksum: sha1:71e67fc63f0df28c534d4bd03a6464ae88959dc2 + pristine_git_object: b03e2f886ce02d4beabca150302a924ae63ad507 + src/mistralai/client/models/files_api_routes_retrieve_fileop.py: + id: 5d5dbb8d5f7a + last_write_checksum: sha1:d451d8d2b32f412158a074919cca1a72f79940cb + pristine_git_object: 5f8de05f1bba07517dc2ee33a4f05122503b54b5 + src/mistralai/client/models/files_api_routes_upload_fileop.py: + id: f13b84de6fa7 + last_write_checksum: sha1:d38a86b9e7d338278e14c68756654d85bc330070 + pristine_git_object: 54ff4e4951a58e13993be0f5d2c16b0cb11c0978 src/mistralai/client/models/fileschema.py: id: 19cde41ca32a - last_write_checksum: sha1:245115d1f955324bce2eeb3220bdaa6906b28e92 - pristine_git_object: cbe9b0d17ad15ce02e9fd973fe49666885c6ff92 - src/mistralai/client/models/filesignedurl.py: - id: a1754c725163 - last_write_checksum: sha1:5d981b1743aa2d84818597b41a5f357b4256e9e0 - pristine_git_object: 53dff812ffe5c5859794424d49f8bd7f735cf3b0 + last_write_checksum: sha1:0b3acb889a2c70998da4076e2f4eef3698e8b117 + pristine_git_object: e99066a9eb19daebcf29f356225635a297c444e1 src/mistralai/client/models/fimcompletionrequest.py: id: cf3558adc3ab - last_write_checksum: sha1:db51cde0b13bb373097f2c158b665ccb3c5789f4 - pristine_git_object: e2f6032784c996d18c100b8b2cde4bb4432af884 + last_write_checksum: sha1:20bca1f6a0ab6e84f48b6e332f0c3242da84ae45 + pristine_git_object: ea877213d1abe4811fee188eb7a60ccf1bb51f18 src/mistralai/client/models/fimcompletionresponse.py: id: b860d2ba771e last_write_checksum: sha1:dffd5a7005999340f57eaa94e17b2c82ddc7fd90 pristine_git_object: 1345a116b7855ab4b824cf0369c0a5281e44ea97 src/mistralai/client/models/fimcompletionstreamrequest.py: id: 1d1ee09f1913 - last_write_checksum: sha1:df973050b942b844280bf98f0a3abc90bd144bbb - pristine_git_object: 480ed17ab006e7afa321a91c5ccebd6380f8f60c + last_write_checksum: sha1:aa8313ecdd852034aaf6ec23dc3f04f7ef8e28e5 + pristine_git_object: e80efc095feb2e2df87f6d3c3f9c56b6cbf347b3 src/mistralai/client/models/finetuneablemodeltype.py: id: 05e097395df3 last_write_checksum: sha1:daf4cd1869da582981023dea1074268da071e16a pristine_git_object: 7b924bd7abc596f0607a513eee30e98cbf7ab57a + src/mistralai/client/models/finetunedmodelcapabilities.py: + id: 475c805eab95 + last_write_checksum: sha1:5919e48a6778f1a2360ce090d05b41b1bf33253f + pristine_git_object: 2f4cca0b8c0e3e379f5c2aa67953f2e55757f68d src/mistralai/client/models/ftclassifierlossfunction.py: id: d21e2a36ab1f last_write_checksum: sha1:ca90e2f1cd0b9054293bea304be0867c93f7fac2 pristine_git_object: ccb0f21b5a69f91119bec9db6e9f3d876e4c35af - src/mistralai/client/models/ftmodelcapabilitiesout.py: - id: f70517be97d4 - last_write_checksum: sha1:2bc7700ad89b7aab37fa02fcb6d9282bc252315e - pristine_git_object: 42269b785d9d5ad2257179f2c093c62637fb5dd6 src/mistralai/client/models/ftmodelcard.py: id: c4f15eed2ca2 - last_write_checksum: sha1:7441e4155beaa97cea47b6295017f567dd6eee1a - pristine_git_object: 570e95e2276b144e008e9ccf6a108faa1fc835f5 + last_write_checksum: sha1:b1b36ff994bcadd8c917880333627fd05976c991 + pristine_git_object: 2c26ff2f66faa55dc5a5a1743720e8f3f5d4d0f1 src/mistralai/client/models/function.py: id: 32275a9d8fee - last_write_checksum: sha1:356a2c6c9d2437e60036a9b3d1a3d154302363c8 - pristine_git_object: 3632c1afb40aebab0795f754814036e04c251469 + last_write_checksum: sha1:ca24a512de22787932d7f4af005699621926d6c0 + pristine_git_object: 1da1dcc9b637d0a5b0fbb7cf2761f6d01eb3068f src/mistralai/client/models/functioncall.py: id: 393fca552632 last_write_checksum: sha1:6e96e9abaa9b7625a9a30e376c31b596ee9defcb pristine_git_object: 527c3ad408e1e1ccfe6301a8860e7f751e1d312d src/mistralai/client/models/functioncallentry.py: id: cd058446c0aa - last_write_checksum: sha1:6ece3816c50bd04b908743ad62e2dc71d815842a - pristine_git_object: 6ada1d358641a23bc83b93f222eeff659a124b34 + last_write_checksum: sha1:776f397d17f946bae2929998f14d991a1ccc99e0 + pristine_git_object: d05fad856729a76dd24f8aa4d050f8381e51ed6a src/mistralai/client/models/functioncallentryarguments.py: id: 3df3767a7b93 last_write_checksum: sha1:9858feba8f7f01017f10477a77dec851a1d06e55 pristine_git_object: afe81b24e131a8ef879ee7f140271aa762b8ed2f src/mistralai/client/models/functioncallevent.py: id: 23b120b8f122 - last_write_checksum: sha1:cb63fb3cfb4debfca7b207b49e592566619f84b1 - pristine_git_object: 5d871a0e0f15cc27afe3c861f387609aa9a8a17f + last_write_checksum: sha1:62b5b94df4e5b6f945ead78871cdbfceb6cd40cf + pristine_git_object: 849eed76d08524e5e4d1e7cc1c3fa04386f5ef75 src/mistralai/client/models/functionname.py: id: 000acafdb0c0 last_write_checksum: sha1:4145b7b817b712b85dcbedb309416c7ba72d827e pristine_git_object: 07d98a0e65ccbcba330fb39c7f23e26d3ffc833c src/mistralai/client/models/functionresultentry.py: id: 213df39bd5e6 - last_write_checksum: sha1:04a8fd7396777c412fa9c73c0bef148b2ab53cb2 - pristine_git_object: ca73cbb7481fe0e97b354e9abe5ef6034f10bd98 + last_write_checksum: sha1:3aa6834bf2beda061ac772a0a8a4d7ed5ad942a0 + pristine_git_object: 01e2e36fc0a9de6a2b06a4205004992baf0f9e43 src/mistralai/client/models/functiontool.py: id: 2e9ef5800117 - last_write_checksum: sha1:5c4ea61a1bccd87e1aae06bfa728c29a4ec60c54 - pristine_git_object: 13b0449687f64848cb2f2fdf792f148f9e3cfed9 - src/mistralai/client/models/getagentop.py: - id: 5a28bb1e727e - last_write_checksum: sha1:50a681253a1075f1268a269cd67154efa35dff6a - pristine_git_object: 55d8fe6860fa4c868c4d6d5d5d2ce4571e9071b4 - src/mistralai/client/models/getagentversionop.py: - id: a0db5a6aab1f - last_write_checksum: sha1:d1dfc0927abcae22460838902d1f5ddc2a224856 - pristine_git_object: 77b8a2662939e03b261f713aa7d9676746a4df1e - src/mistralai/client/models/getbatchjobop.py: - id: 443103fe3b88 - last_write_checksum: sha1:3a7f9656f3d169c60f0d3f16b00c4136d193468e - pristine_git_object: 792c3e2121902734094a7224c8605109fc697f44 - src/mistralai/client/models/getconversationhistoryop.py: - id: c863a4cbeb34 - last_write_checksum: sha1:4e04b4550c7b48635eca1943bcfee64027f0e7ca - pristine_git_object: c1fbf3de4ee966fffa2400a9c109d952b26543da - src/mistralai/client/models/getconversationmessagesop.py: - id: bb8a90ba7c22 - last_write_checksum: sha1:1b7aad5c74338aeecb11de44d8378aaa75498e37 - pristine_git_object: 6666198edce05a99c55f1c35f26f6d3b548c9b0d - src/mistralai/client/models/getconversationop.py: - id: 1a622b8337ac - last_write_checksum: sha1:4665e81fae4f12fabc09629f32d28c1c2de2bcf2 - pristine_git_object: d204d1755b4dc23ba8397ad24fec30bd064eacce - src/mistralai/client/models/getdocumentextractedtextsignedurlop.py: - id: 69099395d631 - last_write_checksum: sha1:f6d5e8499a314e903301e419fb206c33644363ff - pristine_git_object: 9a71181d3abd625643e741c562fe73f25bf12932 - src/mistralai/client/models/getdocumentop.py: - id: de89ff93d373 - last_write_checksum: sha1:4d1f358dfe3b44ccd2a88aea6730fbaf4b5f1d93 - pristine_git_object: d7b07db791a3adb3992475f0cf49c3fe01007ad9 - src/mistralai/client/models/getdocumentsignedurlop.py: - id: b8d95511c6d1 - last_write_checksum: sha1:255a0b505d558db3149652822718c7bcecc706e8 - pristine_git_object: e5d56c54c1ffc3529a8d1cf013bcb3327392b269 - src/mistralai/client/models/getdocumentstatusop.py: - id: f1f40b8f003f - last_write_checksum: sha1:c442daff8adb3db0ac58b03e54b7c05c82b202a9 - pristine_git_object: 4206f593ca58650f9df17b377b67c374a1b0d883 - src/mistralai/client/models/getdocumenttextcontentop.py: - id: ba23717093ef - last_write_checksum: sha1:33f047af38e4be2b71f4d90a36614ea7ab096a28 - pristine_git_object: 8a7b4aae025bbcb5ade5d4d36f2bb5e34cbb315e - src/mistralai/client/models/getfilesignedurlop.py: - id: 1aa50b81c8cf - last_write_checksum: sha1:a8fb95f119d173dd1d7afed02597a297dbbc7a89 - pristine_git_object: 06ed79eea058d4ebffc5d0b87ae2d06a32f4755a - src/mistralai/client/models/getfinetuningjobop.py: - id: afe997f96d69 - last_write_checksum: sha1:25db6d0d336a78189b603bbce16b0e0de84a33f1 - pristine_git_object: 1fb732f48a1a4c2993185a6a272879a83c80dc06 - src/mistralai/client/models/getlibraryop.py: - id: c84a92e23a90 - last_write_checksum: sha1:d51c0cf40a6ed398b0cb7078fe897d047b55e251 - pristine_git_object: bc0b4a238b146c6e5853e0b9d3031a876f30bc17 + last_write_checksum: sha1:bce744d77a3dac92d4776a37be497311674bdc7d + pristine_git_object: eae872643c85115a825c2feda11d9a6c12a06b99 + src/mistralai/client/models/getfileresponse.py: + id: 81919086e371 + last_write_checksum: sha1:fc0232e54c0de355058c5bd82e424953b1659b56 + pristine_git_object: f625c153799dcd38e4990504d48371112b65cd15 + src/mistralai/client/models/getsignedurlresponse.py: + id: cee4e4197372 + last_write_checksum: sha1:ab9adbc06e7f02e791dc549ad1850ce1b1a250a7 + pristine_git_object: 4ba95894f2b89719fa58e7e397c28014dbd00316 + src/mistralai/client/models/githubrepository.py: + id: 4bc83ce18378 + last_write_checksum: sha1:21aa04bc426158ccbe1ded3bc65b46e6869e897d + pristine_git_object: 84b01078c2192de5d6668a6943d416a2ff30db5f src/mistralai/client/models/githubrepositoryin.py: id: eef26fbd2876 - last_write_checksum: sha1:cc98805951c3f80d9b8f0ba4037cf451551b0742 - pristine_git_object: e55389c380416f69ed7dc085cbbaaba056c4d1ba - src/mistralai/client/models/githubrepositoryout.py: - id: d2434a167623 - last_write_checksum: sha1:76d98ac7613e626599cb4c7a0b0366e9b20815ff - pristine_git_object: 514df01c217b40d8c050839ac40b938c68ef1bf6 - src/mistralai/client/models/httpvalidationerror.py: - id: 4099f568a6f8 - last_write_checksum: sha1:be2db0d4ec07da0ddb37878761545c3dde8fb8ec - pristine_git_object: e7f0a35bf208c32086c7b448273d1133d0f1027b + last_write_checksum: sha1:18bd07155fff4b99d114353fee95e6bd828aeacd + pristine_git_object: 38bcc2087630f2fd4e9e5fa149449c32e21fdb07 + src/mistralai/client/models/imagedetail.py: + id: c1084b549abb + last_write_checksum: sha1:375db5c8fa87712dc37e46d0bf72283ae6cd6400 + pristine_git_object: 1982d357277a92fc7ebea3b99146116596d99c78 src/mistralai/client/models/imagegenerationtool.py: id: e1532275faa0 - last_write_checksum: sha1:85122792c3ba324086096345119fedf326f55c86 - pristine_git_object: 680c6ce2d08277e65e23ea3060e83c1fa4accb78 + last_write_checksum: sha1:88a1347876f69960dc33f8e2cb9929ab1a90a224 + pristine_git_object: c1789b18028156ae683d0323e65e47a43694570f src/mistralai/client/models/imageurl.py: id: e4bbf5881fbf - last_write_checksum: sha1:9af5cff0b3a2c1c63e2bd1f998dcfeab273fd206 - pristine_git_object: 4ff13b1ccbc157f21013aacd7a062e89a26dcbf9 + last_write_checksum: sha1:28ef2509fdb489ecf379b60e883e6957aebd2797 + pristine_git_object: ac1030f5d61144e393b2aa9f3ffea893faabb1f7 src/mistralai/client/models/imageurlchunk.py: id: 746fde62f637 - last_write_checksum: sha1:57e48972720a3e317291250d6d94c44d295b69f5 - pristine_git_object: 993185cce833c59ad341b977cf9263654951fa03 + last_write_checksum: sha1:0ac388d25cae5348ffb3821706c3a8b64e716ff5 + pristine_git_object: 7134b46e7428cee52eda859cb78387c99f7e1f5a src/mistralai/client/models/inputentries.py: id: 44727997dacb - last_write_checksum: sha1:44ef8e75dd43b82276a0f06ef5c6be9eed46b379 - pristine_git_object: dc9892956f0e2583c51bf49ef89adbd22b8646d5 + last_write_checksum: sha1:9e2a776be59c5043ea4179a60ac082faf064cc3d + pristine_git_object: e2da5a80aea121d18e2232f302ad73f63b4fc050 src/mistralai/client/models/inputs.py: id: 84a8007518c7 - last_write_checksum: sha1:871491fa3b24315bc1bddf371334381f75ab035d - pristine_git_object: cfcdeb3d5895ccb34512c2a0a2e799e763e09c09 + last_write_checksum: sha1:d067587b5395529fbd638741f20b80edb2848e39 + pristine_git_object: 9ecd7f484ea306b91a9ebf038a0addd80ccd57c4 src/mistralai/client/models/instructrequest.py: id: 6d3ad9f896c7 last_write_checksum: sha1:b56a77442b50b50151adedaa5ec356dc96c56428 pristine_git_object: e5f9cccf174d8e73c42e8ee4aa294b43e1ad6cf5 - src/mistralai/client/models/jobin.py: - id: f4d176123ccc - last_write_checksum: sha1:478a9beaf1c5ada536f5c333a47aa2ac0900bd16 - pristine_git_object: b3cb8998b5b0ce00414e40643eb3e259b2c0aabf - src/mistralai/client/models/jobmetadataout.py: - id: 805f41e3292a - last_write_checksum: sha1:1333181d5a3dff43076095f61e1d57f37085abbe - pristine_git_object: 1d386539d8c638d96b8f468cfca3241dfc07a9f3 - src/mistralai/client/models/jobsout.py: - id: 22e91e9631a9 - last_write_checksum: sha1:e9434f43df7df8e991eb0387eabcf308cae3cb65 - pristine_git_object: a4127a5d835c0f0ead04980f05cb293e18970905 + src/mistralai/client/models/jobmetadata.py: + id: cfbdde7fc0a2 + last_write_checksum: sha1:e1b180a47ca888d0fd4cbc34b62000d3ac86c2b5 + pristine_git_object: f6e96fa104e7a6c8ce9a94538a3d00167a2ae341 + src/mistralai/client/models/jobs_api_routes_batch_cancel_batch_jobop.py: + id: b56cb6c17c95 + last_write_checksum: sha1:21b5794f110c53691654d7195201f9a4b7793f21 + pristine_git_object: de2e63472ac53809cfeae200bd7d2f3dcbb70034 + src/mistralai/client/models/jobs_api_routes_batch_get_batch_jobop.py: + id: 36b5a6b3ceee + last_write_checksum: sha1:b41862f037d74bbdc44fb4df5f65cd402a16703b + pristine_git_object: d779e1d96c359b0d548d5dee17c06ae2a505cf47 + src/mistralai/client/models/jobs_api_routes_batch_get_batch_jobsop.py: + id: d8f0af99c94d + last_write_checksum: sha1:a50885f97cfd4d38bc3e3b0746c88bd602b88f94 + pristine_git_object: 89ac3c933347497b6fb1ec26fecb485802ef85fc + src/mistralai/client/models/jobs_api_routes_fine_tuning_archive_fine_tuned_modelop.py: + id: 34f89d2af0ec + last_write_checksum: sha1:3d5242f757ee9be10963af9cd5d47824fc83c71a + pristine_git_object: 9fa99837dda7e9413d3a05822cd17107c5fae51d + src/mistralai/client/models/jobs_api_routes_fine_tuning_cancel_fine_tuning_jobop.py: + id: d175c6e32ecb + last_write_checksum: sha1:515b7737cf8262243ee6175e297714125f3962bc + pristine_git_object: 56fa534044522f27fb26ef4820d10f22752134ea + src/mistralai/client/models/jobs_api_routes_fine_tuning_create_fine_tuning_jobop.py: + id: 81651291187a + last_write_checksum: sha1:19a0707e2f73b0184959d7c710a170650fa1767a + pristine_git_object: db857f7d6cc77057491e4b968798f730228b09bc + src/mistralai/client/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobop.py: + id: d910fd8fe2d6 + last_write_checksum: sha1:52704f01d7388a8b62d59b6f7cd94fcb7d067ebf + pristine_git_object: ddd9c1891356a7c272e0244a9aea3d3d6b2d00d6 + src/mistralai/client/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobsop.py: + id: cf43028824bf + last_write_checksum: sha1:36082bde6f3d932c66178729533e2a69040fdeab + pristine_git_object: ec80a158f45061b122f84ebaff89ae82ef8d98ef + src/mistralai/client/models/jobs_api_routes_fine_tuning_start_fine_tuning_jobop.py: + id: e7ff4a4a4edb + last_write_checksum: sha1:8cbfc309c09df806ad7d130004b4e1c2b89ede0a + pristine_git_object: cd25fa04f29dd544f01f3620b31d1c54c86addbb + src/mistralai/client/models/jobs_api_routes_fine_tuning_unarchive_fine_tuned_modelop.py: + id: 7cc1c80335a9 + last_write_checksum: sha1:f66c16423155066b844f8e89446d2acbb6e68157 + pristine_git_object: fd01fe6948613b0fffef9ac76cf1a0f9011ec5af + src/mistralai/client/models/jobs_api_routes_fine_tuning_update_fine_tuned_modelop.py: + id: 6d9dc624aafd + last_write_checksum: sha1:fbacb171b9c75f1fe45406f542a958d10c15fae2 + pristine_git_object: 296070b426900305fe4596f03a3c9f081cdb2dcf src/mistralai/client/models/jsonschema.py: id: e1fc1d8a434a - last_write_checksum: sha1:6711508e9c1bd20fc8b1bfdbd1181ca29144ef0d - pristine_git_object: 948c94ed8fe8102a9cdced68fde6be03489f5778 - src/mistralai/client/models/legacyjobmetadataout.py: - id: 4f44aa38c864 - last_write_checksum: sha1:e93d512c8cb6e0812248a195ff869428209cd71f - pristine_git_object: 4453c15798f4fd4db2de64e0beaf7ad557d82fa1 - src/mistralai/client/models/libraryin.py: - id: 6147d5df71d9 - last_write_checksum: sha1:34c5c9582a488fe87da084e74316e0fd76aa28d1 - pristine_git_object: 1a71d410d997a6d3f197947f821117e0605517af - src/mistralai/client/models/libraryinupdate.py: - id: 300a6bb02e6e - last_write_checksum: sha1:c9b1a0a00d31fa839df12353f1a3ee9d0b3ffb60 - pristine_git_object: 328b2de3cd4e304fd462882eca7226e460b7c4a7 - src/mistralai/client/models/libraryout.py: - id: 4e608c7aafc4 - last_write_checksum: sha1:9841adb596398554dfcaeb35b7e5a0572c541cff - pristine_git_object: c7ab7b8d39b68b5998c4874f9942caa275cf65d9 - src/mistralai/client/models/listagentaliasesop.py: - id: ff038766a902 - last_write_checksum: sha1:eef4e471999d5df5195aea51cde027b55567aeef - pristine_git_object: 83c6d1769c10fe38402a36b6aff2a18da61f4504 - src/mistralai/client/models/listagentsop.py: - id: a573a873c404 - last_write_checksum: sha1:db3c9e6ddc146138ed971f9970d9a164c0f97456 - pristine_git_object: 863fc13af1429bd1a6c02a9a20d2b6cb0cad7b34 - src/mistralai/client/models/listagentversionsop.py: - id: ccc5fb48e78f - last_write_checksum: sha1:0f2306bcceba2a2d7bfeb0be33126514d9287f17 - pristine_git_object: 613d3d8516690e6cba15922dfe69bdf62c039b01 - src/mistralai/client/models/listbatchjobsop.py: - id: f49af453f5e6 - last_write_checksum: sha1:e48b0e7371ee8f637e4fd6bed140cdbb1d405a7d - pristine_git_object: 5322df816e391a5569afcfd14edaeb128467a176 - src/mistralai/client/models/listconversationsop.py: - id: d6007f6c1643 - last_write_checksum: sha1:ece12b550abe6e17eb79f7a05593a93ea055f3f6 - pristine_git_object: 1c9a347c0ad4801c3a1b941e6328061d23d7dcd5 - src/mistralai/client/models/listdocumentout.py: - id: b2c96075ce00 - last_write_checksum: sha1:fc3eca772d1e32938ea1bd2f3e98cdea5f1003f3 - pristine_git_object: a636b3deff66fe4277a63c04fc7dd6c5e74e58e7 - src/mistralai/client/models/listdocumentsop.py: - id: 3e42bdc15383 - last_write_checksum: sha1:d9beade6d8bb8050a67e32c2a73926b140015e68 - pristine_git_object: 0f7c4584d793c7e692a4bbc6678e18549b0e0364 - src/mistralai/client/models/listfilesop.py: - id: e5bd46ac0145 - last_write_checksum: sha1:3e0bc8a7318ffd1c3fe15f335ea2bc1e18c714a1 - pristine_git_object: a9af5c70c98adce56653ff01772fe5900530a36e - src/mistralai/client/models/listfilesout.py: - id: ae5fa21b141c - last_write_checksum: sha1:4bc8ef424beb41c75d9c6fa4e101d330a951a99f - pristine_git_object: 460822f71fe8b0fc6292b804dc2a9de29bff4ef5 - src/mistralai/client/models/listfinetuningjobsop.py: - id: b77fe203b929 - last_write_checksum: sha1:af98423b166930cd18a1d377ea688540f3364166 - pristine_git_object: 8712c3fa6ac24094532fdfc047561997ea34552f - src/mistralai/client/models/listlibraryaccessesop.py: - id: 581b332626b7 - last_write_checksum: sha1:0a6bd277a706d807d87d3f2a4f870cc6ba917928 - pristine_git_object: 2206310f301f6ea40f14a495f5f6c6b4e76dbbf7 - src/mistralai/client/models/listlibraryout.py: - id: cb78c529e763 - last_write_checksum: sha1:3cd81fd6f6d2421c6b6d06077f0bf1d5b3c96cad - pristine_git_object: 39fa459f7cc7be17c751025287d7827c9d141aac + last_write_checksum: sha1:d01507ab0a1f6067cbc65aaba199de340ccc68aa + pristine_git_object: dfababa694305c96f98ddebf2f09e448e737c855 + src/mistralai/client/models/legacyjobmetadata.py: + id: 0330b8930f65 + last_write_checksum: sha1:3c2f669a05cc01227f62d6a8da1840d9c458d52f + pristine_git_object: 5757675895b3c56d8aa7c174deb08567e596ecf8 + src/mistralai/client/models/libraries_delete_v1op.py: + id: b2e8bbd19baa + last_write_checksum: sha1:ba41496bc99040f7598659c5b037b955b7f6d385 + pristine_git_object: 893ab53b11672edd9cde175e68a80d89ff949cb6 + src/mistralai/client/models/libraries_documents_delete_v1op.py: + id: 81eb34382a3d + last_write_checksum: sha1:66d1c6ec5e2535b0db72a3beac65b25a1f2336d7 + pristine_git_object: 0495832efba33314f3cd28fe62759c6dac5ca706 + src/mistralai/client/models/libraries_documents_get_extracted_text_signed_url_v1op.py: + id: a7417ebd6040 + last_write_checksum: sha1:030ca9fb7e10396e6b743ee644fe1a734e1df1f0 + pristine_git_object: 186baaed8346d106272fea2e4826587634b061bc + src/mistralai/client/models/libraries_documents_get_signed_url_v1op.py: + id: d4b7b47913ba + last_write_checksum: sha1:fdad7a6d3ae9a9c69009caf8207b284835675a9a + pristine_git_object: ebcf85d77ed6982d510ae95a6971e1d4b3ad56ca + src/mistralai/client/models/libraries_documents_get_status_v1op.py: + id: f314f73e909c + last_write_checksum: sha1:11d463eb328a1133658e8ff92340edc7f75923e4 + pristine_git_object: 1f4847874cdeff26caaf5fd16e0f8382834ecb2b + src/mistralai/client/models/libraries_documents_get_text_content_v1op.py: + id: 1ca4e0c41321 + last_write_checksum: sha1:26133a83bf0ef063c78069da1bbb96d58f44f30c + pristine_git_object: e0508d66fce682ed20a029604897137940689327 + src/mistralai/client/models/libraries_documents_get_v1op.py: + id: 26ff35f0c69d + last_write_checksum: sha1:e87e56e8fb9f7c11d61c805362db4755a81186b9 + pristine_git_object: 857dfbe60c57af8b0fa6655a049ed336d70fb941 + src/mistralai/client/models/libraries_documents_list_v1op.py: + id: 756f26de3cbe + last_write_checksum: sha1:5a1a9e025dc7a1fedaa5199d396a73c4986d4113 + pristine_git_object: da7d793b65139a3894b077a9665b392e8a44e8a2 + src/mistralai/client/models/libraries_documents_reprocess_v1op.py: + id: dbbeb02fc336 + last_write_checksum: sha1:bd5013cb1581dd13642ce7edf1e701f5b0c967c4 + pristine_git_object: a2f9ba2a0465fb3a8eb3b9afbb41d813de348656 + src/mistralai/client/models/libraries_documents_update_v1op.py: + id: 734ba6c19f5f + last_write_checksum: sha1:e12ca003680ff17523fe09438cd8f71d00ea081e + pristine_git_object: 7ad4231f72901b675d9af67c33364592c86be5ab + src/mistralai/client/models/libraries_documents_upload_v1op.py: + id: "744466971862" + last_write_checksum: sha1:9691ac41ecf986c9ccfad81423d367f96b10f4b7 + pristine_git_object: 388633d1c7e906803b711ef2bbf37656624515a9 + src/mistralai/client/models/libraries_get_v1op.py: + id: d493f39e7ebb + last_write_checksum: sha1:25b3c2c1040cd73ebd6b988b8b27708831affefd + pristine_git_object: 7a51d6053aa2cf2e6524a80487fe9549eec3dfa1 + src/mistralai/client/models/libraries_share_create_v1op.py: + id: feaacfd46dd3 + last_write_checksum: sha1:72e07fb60edbe1989865ba2ac90349edeb183f7e + pristine_git_object: 00ea74824b2efc4150d2e547e2eee416e5f6f2ee + src/mistralai/client/models/libraries_share_delete_v1op.py: + id: 7f3a679ca384 + last_write_checksum: sha1:897857c11cf0c14a0a81ef122dec4395dc16c0ce + pristine_git_object: eca3f86a6135e702f8cb6412a5f215dac2335a8f + src/mistralai/client/models/libraries_share_list_v1op.py: + id: 8f0af379bf1c + last_write_checksum: sha1:d27e0360c504576c315350fc226d371da455a598 + pristine_git_object: 895a259059283a17cc7558e3cc03022e2d4dd259 + src/mistralai/client/models/libraries_update_v1op.py: + id: 92c8d4132252 + last_write_checksum: sha1:a252f68e65cdb47e27d7059f256381daf2847344 + pristine_git_object: 54b0ab708c665ccb841b1c8d0f2748c390850506 + src/mistralai/client/models/library.py: + id: 028a34b08f9c + last_write_checksum: sha1:65f02f963a0540385681b88c7c7fba98d0d704f4 + pristine_git_object: 1953b6fbc6d7ad245ccacd9d665fb29853b00af7 + src/mistralai/client/models/listbatchjobsresponse.py: + id: 99d94c86a871 + last_write_checksum: sha1:7530be5f80a0756527be94758e800e8118e53210 + pristine_git_object: 35a348a1160dcf6d82d58c70cea07e11730359fb + src/mistralai/client/models/listdocumentsresponse.py: + id: f593d8e66833 + last_write_checksum: sha1:0d842168856056ff681b2a1c36b87df8e0d96570 + pristine_git_object: c48b8c051ad0d1fb4aed8396697e57e782be5a40 + src/mistralai/client/models/listfilesresponse.py: + id: 85d6d24c1a19 + last_write_checksum: sha1:caf901685bfb6f13d707b89726aaf6e5116cd054 + pristine_git_object: 10a60126600343033a4b0511d717cac6f1924b4d + src/mistralai/client/models/listfinetuningjobsresponse.py: + id: 118e05dbfbbd + last_write_checksum: sha1:f0582740a6777039e9695d97f072b5a3c34b483e + pristine_git_object: 1e434c5986bf577e2b42cca943cc6896a83d1fa2 + src/mistralai/client/models/listlibrariesresponse.py: + id: df556a618365 + last_write_checksum: sha1:55afb46b1fa797bc46574e5256cd063574c6fcbf + pristine_git_object: 337fe105731d8f3ced1f8f1299ff4081b9d5bfbe src/mistralai/client/models/listsharingout.py: id: ee708a7ccdad last_write_checksum: sha1:18e6501b00a566121dfd6a1ce7b0e23fef297e45 @@ -2422,208 +2414,192 @@ trackedFiles: pristine_git_object: a95098e01843fe3b4087319881967dc42c6e4fef src/mistralai/client/models/messageinputcontentchunks.py: id: 01025c12866a - last_write_checksum: sha1:9eab6d7734dcd4bf9da5222c1927f5f40ef45db0 - pristine_git_object: 63cf14e7fcbc7c3969220b4f07109473b246bf49 + last_write_checksum: sha1:6a0988d4e52aa2e9f7b09ae1e3266ecf9639c22b + pristine_git_object: 1e04ce24d62db6667129b35eb28dabcfd4135ea8 src/mistralai/client/models/messageinputentry.py: id: c0a4b5179095 - last_write_checksum: sha1:b1b8f5b78eb5f57f5cfa7163ed49101736bcefaa - pristine_git_object: 15046d25130cda6571f07a456c2b5a67d2a3bcc0 + last_write_checksum: sha1:b5bad18b88c0bfbbddfdafa6dc50a09e40a6ebd7 + pristine_git_object: c948a13e3cc2071dd1b3d11c419ea61d51470152 src/mistralai/client/models/messageoutputcontentchunks.py: id: 2ed248515035 - last_write_checksum: sha1:df4ef4d17ce48df271ff2b8cab297ae305aa08ec - pristine_git_object: def7a4d27cd3d1479864a1d6af19e89bd57bff70 + last_write_checksum: sha1:dc7456e44084cba9cc6a46553fd64b1eb25f8d77 + pristine_git_object: bf455d17db16e4bc11da0ebb105a9f6ad4d63c01 src/mistralai/client/models/messageoutputentry.py: id: a07577d2268d - last_write_checksum: sha1:0633b8c619883bedb1a6ad732c5487c7e7f817f9 - pristine_git_object: 8752fc36bfec39e0ab79d4593ae0cb43ea00641c + last_write_checksum: sha1:38ad03422407925087835ab888c0be40bf5fa7fa + pristine_git_object: 6a9c52ed59af1497577be2538e7141d57eea4c8f src/mistralai/client/models/messageoutputevent.py: id: a2bbf63615c6 - last_write_checksum: sha1:bbdb2c840a7a196edcb6ac6170e8273cc47a495e - pristine_git_object: 39c1013939ea238cb1c7ccbc05480a6840400061 - src/mistralai/client/models/metricout.py: - id: 92d33621dda7 - last_write_checksum: sha1:6198ba9e2cd66fcf7f9fcc1cf89481edd432cf11 - pristine_git_object: 5705c71283ce7d4a01d60752657f39279c0f1f85 - src/mistralai/client/models/mistralerror.py: - id: 68ffd8394c2e - last_write_checksum: sha1:8b867eca5ca81aa6364f13c9d7e42f9b0d855724 - pristine_git_object: 862a6be8294db5b30bb06cb7b85d60c52ed8e8c9 + last_write_checksum: sha1:c3317ab9279c499dd7fb26f45799ca9369676ac7 + pristine_git_object: d765f4fd3c4e43c37063833368e4b21cc0bfbcf2 + src/mistralai/client/models/metric.py: + id: c6a65acdd1a2 + last_write_checksum: sha1:5ef7c75b278f16b412b42889ff0f2fc19d87cb7d + pristine_git_object: 1413f589f7f23991a12c1367bc6f287b5e07d4a4 src/mistralai/client/models/mistralpromptmode.py: id: 95abc4ec799a last_write_checksum: sha1:a1417b987bb34daeb73ca4e015c085814e6c8ad2 pristine_git_object: 9b91323e7545d636308064085ca16fc554eac904 src/mistralai/client/models/modelcapabilities.py: id: 64d8a422ea29 - last_write_checksum: sha1:5bc65733cf1c2f4ee8e1b422636fda754bdf8afe - pristine_git_object: c329efbcd9be212c7428c09f28f897834c9239d3 + last_write_checksum: sha1:0f733a45f06cb2c603b47134d999a2de4c0a7bb0 + pristine_git_object: d9293ccc163995cfe0419d05c90fe1ae8e75cf57 src/mistralai/client/models/modelconversation.py: id: fea0a651f888 - last_write_checksum: sha1:6186e845be2717da6116e20072835c050d3fdaa5 - pristine_git_object: c0bacb7fd9cd052ecb31a72c6bf593504034e069 + last_write_checksum: sha1:4c1b31d95351dea877e24bd452b32d8e22edf42e + pristine_git_object: bb33d2e0e047bc075cb7ae284958b80a5b5ee657 src/mistralai/client/models/modellist.py: id: 00693c7eec60 - last_write_checksum: sha1:89695c6a680da571c7a77c4544607bd83b3a93d5 - pristine_git_object: c122122c38a3331337cc702340cf1d3e0c9ef99d + last_write_checksum: sha1:de62fc6787f482e5df0ff0e70415f493f177b9a1 + pristine_git_object: 5fd835f24cd1098a153ebfb3e958038a183d28a7 src/mistralai/client/models/moderationobject.py: id: 132faad0549a - last_write_checksum: sha1:742d942d72b615432c066827b822290cf4d51d40 - pristine_git_object: 9aa4eb15d837ab2af97faa131a362d50a3a85482 + last_write_checksum: sha1:a8c1454a533e466216ef98dd198ae8959f51fa76 + pristine_git_object: e7ccd8f6f1f75704a973be7ebabc49617070c34a src/mistralai/client/models/moderationresponse.py: id: 06bab279cb31 last_write_checksum: sha1:b9158e575276c1e0a510c129347b9a98c5a70567 pristine_git_object: a8a8ec3d8d8a58deb3c1f8358c6dce5a9734f89c - src/mistralai/client/models/no_response_error.py: - id: 2849e0a482e2 - last_write_checksum: sha1:35b1651843a697024146d6377838b5b99c5c66d3 - pristine_git_object: 7705f1945567498ce606364490685a91b13cd8dd src/mistralai/client/models/ocrimageobject.py: id: 685faeb41a80 - last_write_checksum: sha1:663f11a19e067d424263eee40d8127cdc56fb72e - pristine_git_object: e95b67e17e51653bf194ad1cff3a926f34cf97c2 + last_write_checksum: sha1:13f4e4d33d8fb5b0ee842695d4cc8329bd7ca382 + pristine_git_object: 365f062b5674141aad4b1601a85bec7a56db4cff src/mistralai/client/models/ocrpagedimensions.py: id: 02f763afbc9f last_write_checksum: sha1:f572ed8992ba1ba4d53b705c4e8c94c85ae1290e pristine_git_object: 847205c6c74a621dd2ee6d9eb18d1acba8395c50 src/mistralai/client/models/ocrpageobject.py: id: 07a099f89487 - last_write_checksum: sha1:10cbd1dddcb1f1f5d530048130908ad0ce715928 - pristine_git_object: 4f4ccf43011fa2563f79bb70ae2a813b84f04074 + last_write_checksum: sha1:5089ac3f02e4225d6c95cc9f05b74013694536da + pristine_git_object: ffc7b3b615e17a8e0d76fea4081249b143d8fe4d src/mistralai/client/models/ocrrequest.py: id: 36f204c64074 - last_write_checksum: sha1:8e669292b846a5af4e3cee0b632524696e3067bc - pristine_git_object: 18b899dd5ecc162dc8e92622f56bed503fff80f7 + last_write_checksum: sha1:9e9009dace9ff36cbff0cb8de408a1e0585147a7 + pristine_git_object: 4ad337ced23b3bdad21785b8dc3fcadbb868d4f0 src/mistralai/client/models/ocrresponse.py: id: 2fdfc881ca56 - last_write_checksum: sha1:4a28dbfcc076c149e4f08a830d4d7f770836eb15 - pristine_git_object: 0a36e97500b4f62adac2526d7dd7cb85c9bdb8b8 + last_write_checksum: sha1:f1d18dbf4cd02f3598ae574d5033c30989fa6985 + pristine_git_object: e63eed987f4eb83f3406b15cf4d840fd43528a49 src/mistralai/client/models/ocrtableobject.py: id: d74dd0d2ddac - last_write_checksum: sha1:3116548673509f4e9f6a50d39f58ce3374823cc4 - pristine_git_object: e32ad894cd97546e635d12595051da103cde9fd8 + last_write_checksum: sha1:492f8e4c30b61330592768b13cffcf9a9eb2c0fa + pristine_git_object: 66bb050f30790c3fc51cdca1b73e847388fe50c5 src/mistralai/client/models/ocrusageinfo.py: id: 272b7e1785d5 - last_write_checksum: sha1:b8fb06d0dad22f958ac756e65d70f5ba410ad47a - pristine_git_object: a421d850450bb3f0b62853c931cd457434d2f174 + last_write_checksum: sha1:2b37766fdff72e7ec6e052f248362f7bb3989d2c + pristine_git_object: 2ec1322b29d7fe5246b9ad355a4997222b37970f src/mistralai/client/models/outputcontentchunks.py: id: 9ad9741f4975 - last_write_checksum: sha1:afb76f3af2952c2afab5397e348ddfd6dbb56c4f - pristine_git_object: 1a115fe8b4874a6bd86719d91332cd3db6d95b46 + last_write_checksum: sha1:16c43816ac7b7afd134bce1cda5bb44485d9fafe + pristine_git_object: fab7907b105cc9d9c738c5cca9c09eba9d5c4781 src/mistralai/client/models/paginationinfo.py: id: 48851e82d67e last_write_checksum: sha1:166961e2c0f573ba0677ee803820bb944a8a5efb pristine_git_object: 2b9dab6258249f7be87e1d4a73a2502e21fe1f0d src/mistralai/client/models/prediction.py: id: 1cc842a069a5 - last_write_checksum: sha1:ca391fc2f9faf1657392ceda952c2ee422121952 - pristine_git_object: 52f4adf1eb46d7d5679f9705871cd73e08ae8830 + last_write_checksum: sha1:3ee24375eb7f00cea0c9db6eebc564ce7067f295 + pristine_git_object: 0c6f4182ca8140e595f601b12fbd582034257587 src/mistralai/client/models/processingstatusout.py: id: 3df842c4140f last_write_checksum: sha1:007a476e4101cac4d2a9eef94d289f0f486d763a pristine_git_object: 3acadcc9792c286cd31031a80e108b74bc2c0c4e src/mistralai/client/models/realtimetranscriptionerror.py: id: 8c2267378f48 - last_write_checksum: sha1:b9642dd42c4092bdebe0a4f8d35c68152f259c05 - pristine_git_object: f8f2d3da9598ce0cd90d148ba1a9be0c5d6237cc + last_write_checksum: sha1:78637de61d6fc3bc1fff8e95c0a6f5ffc1a3e111 + pristine_git_object: c661e46100752119521f63045e8ebe79105ecc01 src/mistralai/client/models/realtimetranscriptionerrordetail.py: id: 5bd25cdf9c7a last_write_checksum: sha1:a226b10718b1fe4a661311cbd98ea3b1d1ac4163 pristine_git_object: cec1f6eabd44ceab4e58694a0862c9c90ea2f264 + src/mistralai/client/models/realtimetranscriptioninputaudioappend.py: + id: 8b03cde6e115 + last_write_checksum: sha1:abcf48a48b077e836e2ae5647d93bd61007b9aa7 + pristine_git_object: 8156a2704bd95b74875f7a9ac17191e026f08993 + src/mistralai/client/models/realtimetranscriptioninputaudioend.py: + id: c187ba1b551d + last_write_checksum: sha1:fa96156774481ca3b98f8c0f99b3b1db01280b37 + pristine_git_object: 473eedb744141faa3447929865a76129d5e96432 + src/mistralai/client/models/realtimetranscriptioninputaudioflush.py: + id: b27b600c310e + last_write_checksum: sha1:8a8eb7de4137cf8cd810d93d984009bf8dff51c4 + pristine_git_object: 553d14c7720b3d1388901989d8160f0e3318ba56 src/mistralai/client/models/realtimetranscriptionsession.py: id: 02517fa5411a - last_write_checksum: sha1:0073b248604f667e89e34cf01184a788ca84d63f - pristine_git_object: d20d0d8c94aeec425a2c1dfb93b72ac6878cb8af + last_write_checksum: sha1:eb9a23fb89e0bdb3bb6168f512488a98bd626bc1 + pristine_git_object: a74a457b1e54deb1fcd203ce5ff2c57691f16b18 src/mistralai/client/models/realtimetranscriptionsessioncreated.py: id: 4e3731f63a3c - last_write_checksum: sha1:d3fb5c5dc417a0ebb12a30770324674e055526ae - pristine_git_object: c4fa5774502699529e27870436ca65b9f88ccfe1 + last_write_checksum: sha1:6997848cf22dc90b10597eaf9f0dd966ace969af + pristine_git_object: bb96875ab913f3d6ff241a00d94a87e877637782 src/mistralai/client/models/realtimetranscriptionsessionupdated.py: id: 686dc4f2450f - last_write_checksum: sha1:7e4de1020672efc3503cda5b916b41056bf1d22b - pristine_git_object: a61fb05e8e5ba3ffa20bbb98bf61c17045c1f75c + last_write_checksum: sha1:e023fe0c8c54da644fc797c25dfeb070b6f0fd1c + pristine_git_object: fea5db4a1b956cb8253e4f147463c47958bfd989 + src/mistralai/client/models/realtimetranscriptionsessionupdatemessage.py: + id: 4e1b3fd7c5a3 + last_write_checksum: sha1:7da202e016b1d1dfc36a13ac03e3b419f0952cd2 + pristine_git_object: 07ad59a41f8a16b9c23c4e0be503a801ec0e2dd6 + src/mistralai/client/models/realtimetranscriptionsessionupdatepayload.py: + id: 7033fdb33ad4 + last_write_checksum: sha1:812f072a9effe1ce44e56094121ed10b3a83e39d + pristine_git_object: a89441e91dff4b7a78e8dd247b43243e89bf129d src/mistralai/client/models/referencechunk.py: id: 921acd3a224a - last_write_checksum: sha1:0dcff62499afdb1db0fd4f46614f8680f94837f4 - pristine_git_object: 7634d8ae07c96a99e634dcf888077f1d8cc4dc67 - src/mistralai/client/models/reprocessdocumentop.py: - id: b2913a7aa5c9 - last_write_checksum: sha1:07174ee58ec12909f08a08a9a6d7427ee9b2d5d0 - pristine_git_object: 48a4b72bf285e2f2e4b2d0c352ebc463518ed712 + last_write_checksum: sha1:a8bff06a2a040556bce8e6212973a774bee6bd34 + pristine_git_object: e0bbae4e08275e82010080d4ee84612e01a07f81 src/mistralai/client/models/requestsource.py: id: 3f2774d9e609 last_write_checksum: sha1:1ce68530a46793968f1122d29df722f0a5c9d267 pristine_git_object: fc4433cb4e657b06aa6a4c078094c2df342810e2 src/mistralai/client/models/responsedoneevent.py: id: cf8a686bf82c - last_write_checksum: sha1:376c2a65f1fcdfe20d7cf0bd6aa6d8870a4f32c1 - pristine_git_object: ed331ff12c8728290b8ad17e52d9384265233665 + last_write_checksum: sha1:144a8bf407391948946f3f5362db78a33c45ee6c + pristine_git_object: be38fba81c08088303c4342c99ac3628c5957785 src/mistralai/client/models/responseerrorevent.py: id: b286d74e8724 - last_write_checksum: sha1:ecff834ec62bf46d2aa5d9753f3898ed86caad45 - pristine_git_object: 8f196a52b469458da5c9f072983870da8c4fc4ea + last_write_checksum: sha1:df3f53344624082471c795131552689510946345 + pristine_git_object: fa4d0d01c1cb7f15d6f469279c2000d2fad8f459 src/mistralai/client/models/responseformat.py: id: 6ab8bc8d22c0 - last_write_checksum: sha1:e0c29239b4cd698af50412a1cab85217ccbb1796 - pristine_git_object: 409b80d658e4c93f4ee25c218fe74d65fd84ad44 + last_write_checksum: sha1:0ab455566c6bb0b63e2cb1f61f300266021f5954 + pristine_git_object: b2971412549cc5b95c53b93425dbd5b6503a4df7 src/mistralai/client/models/responseformats.py: id: c4462a05fb08 last_write_checksum: sha1:3cb82d44a4f9df5e9a3f51867be6eab1d439d87a pristine_git_object: 21345778ad2d41a3746292e67fec628f9ec2a84d src/mistralai/client/models/responsestartedevent.py: id: 24f54ee8b0f2 - last_write_checksum: sha1:8be1513409934d7ea1c524e468954f7eda0a8c62 - pristine_git_object: 256d2a6c864edf4f3ccd77b2db139c11fe4f6727 - src/mistralai/client/models/responsevalidationerror.py: - id: c244a88981e0 - last_write_checksum: sha1:74a39321dee69f3487d9b9e01ffb2e40715176f4 - pristine_git_object: 1ed0d55266a106364fe58aa1e476fafbfbbbfdfe - src/mistralai/client/models/restartconversationop.py: - id: 2f6f3e4bbfd8 - last_write_checksum: sha1:9500d3ebea040ff4a203f3f025ff1bff8a397235 - pristine_git_object: b09eaed5bc8ecdbb7f1952c97b2e665462c70f9e - src/mistralai/client/models/restartconversationstreamop.py: - id: 16dc9ee5bf22 - last_write_checksum: sha1:b16f54529f4fd7d1422c82ff1a6dd5a9a82ba6bd - pristine_git_object: 3b2025f536d1c54ed58064b4be33aaafb9297ac4 - src/mistralai/client/models/retrievefileop.py: - id: ee73efdf9180 - last_write_checksum: sha1:330ec0a78a7ba623f21cd378b53250045bea984c - pristine_git_object: edd50e571cf56c6c22acc1777f6c9af38787f07d - src/mistralai/client/models/retrievefileout.py: - id: 8bb5859aa0d0 - last_write_checksum: sha1:1077bdb8fcc5ba22b2deb7f5c95fefe7b1fb82f5 - pristine_git_object: 2abf2161cd61d84f04836740a526c0e3711c3f6d - src/mistralai/client/models/retrievemodelop.py: - id: d883baa79c9e - last_write_checksum: sha1:525c7e9cf8594433cbb21374422067a75e6b53a9 - pristine_git_object: b4334e9a5541a14f7916244761213b883d507a41 + last_write_checksum: sha1:f66a0a67444916e838ca9a63144fb661832b54b9 + pristine_git_object: 84abfcd9ac159b9bd9234ff015d5525d88d663f6 + src/mistralai/client/models/retrieve_model_v1_models_model_id_getop.py: + id: 6fefa90ca351 + last_write_checksum: sha1:52775e73fb5c51d245362ce63672cec776e5b6bd + pristine_git_object: cd5955c1eadb8cd9d1f9ecc388c2cc17df11c885 src/mistralai/client/models/sampletype.py: id: a9309422fed7 last_write_checksum: sha1:86a61340a647696f6c35a82d945509b1c85aa6f7 pristine_git_object: dfec7cce1e22ab607b6a9e947fa940284426086d - src/mistralai/client/models/sdkerror.py: - id: 12f991dad510 - last_write_checksum: sha1:c2c344c8b7e23b0c93eeafedd25d28582467c3a7 - pristine_git_object: 101e1e6a67c3672e899b39dbfe10d45550a4449a src/mistralai/client/models/security.py: id: c2ca0e2a36b7 - last_write_checksum: sha1:cec2a544790c2178f92742ac88e546efeacedb40 - pristine_git_object: 4fa8b4b2651f1d13811faf2da6e481243ea84e5a + last_write_checksum: sha1:d74333517caae2a1aa58517e8e935e46913bcc66 + pristine_git_object: f3b3423e850a1afa3b0fa5fa6c94f6018ff70627 src/mistralai/client/models/shareenum.py: id: a0e2a7a16bf8 last_write_checksum: sha1:15a84d57ceeb74cfb37275f714954e42d8e9b3ba pristine_git_object: 08ffeb7e46fbbc28b7c93ef2aa4a49aff7c0d35e src/mistralai/client/models/sharingdelete.py: id: f5ecce372e06 - last_write_checksum: sha1:c5e4e6df47ef2d5715a99533a1efd936f0e7e16e - pristine_git_object: 202732cf785074446cd24360dd9c540768e4134f + last_write_checksum: sha1:247d793bd1ddc0ad35d010c17e5b32eba826e3a1 + pristine_git_object: 33ccd7e71b8f65d2a9329d8632b5446ca0431d0a src/mistralai/client/models/sharingin.py: id: e953dda09c02 - last_write_checksum: sha1:f60bd60d37f0accadf50ea111055fd99aa190a5f - pristine_git_object: 8cc3e8968d9d5460f040ebdb66d8f460e86d2c96 + last_write_checksum: sha1:7c2b5333c634ed7889fc907edbf89c6066db5928 + pristine_git_object: 7c1a52b049db4afbd6a06b5f39966dbec4f862ba src/mistralai/client/models/sharingout.py: id: 0b8804effb5c - last_write_checksum: sha1:362bda8a5bd70d12e2de33814d3bd36a61c6d7ae - pristine_git_object: 778071546c12c2636d2deec6042e6b686b6428c6 + last_write_checksum: sha1:a78e4f6bf2f49ae8250787e1680b5004563b32ac + pristine_git_object: ab3679a4cbcc2826ff2672a09e4eaf4990b5c6a9 src/mistralai/client/models/source.py: id: fcee60a4ea0d last_write_checksum: sha1:4d4277d75f7ce001780a069898b38afa7c8addc0 @@ -2632,214 +2608,190 @@ trackedFiles: id: 1733e4765106 last_write_checksum: sha1:3c79fc7c43cd018fba4950ba013ed15899b82ebf pristine_git_object: 0add960bc93f53df5ddda94892543a0857f32dd6 - src/mistralai/client/models/startfinetuningjobop.py: - id: "663886392468" - last_write_checksum: sha1:6a6a409dd481359e8d6593fa2ea817007f8a967d - pristine_git_object: 805a8721cc7d048f172e1096ead0e410c7d04928 src/mistralai/client/models/systemmessage.py: id: 500ef6e85ba1 - last_write_checksum: sha1:af68936119bf7c067aec5215e2654c56a5df1755 - pristine_git_object: 352eca76ad5051cc2c504c673a23e048642fe018 + last_write_checksum: sha1:a88de3fc70adab47943f867336659b3a1a6cdae0 + pristine_git_object: 2602cd2db03cd129b42b343f2dc79ce68106ac35 src/mistralai/client/models/systemmessagecontentchunks.py: id: 297e8905d5af last_write_checksum: sha1:e5695ca0ebdb0f02f3a0c527015df154a0c52b7f pristine_git_object: d480a219e935aaea91adc320de0003b562c0bbb5 src/mistralai/client/models/textchunk.py: id: 9c96fb86a9ab - last_write_checksum: sha1:4ad624afaf4d83d4e58f72bcbd15b9faecc703f3 - pristine_git_object: c0584234da572bb903894633b123b1dda29e7736 + last_write_checksum: sha1:89cbb66753d7a3585ce58c70219a349f770909cc + pristine_git_object: ac9f3137dddc15e1cd10aa6385b76510e6c23e33 src/mistralai/client/models/thinkchunk.py: id: 294bfce193a4 - last_write_checksum: sha1:d9c779959ed82ae3de66e481536d80bcc2ed57a5 - pristine_git_object: a999f5d7b824325085ec980cfa07294919408538 + last_write_checksum: sha1:9126c530e93ae7532235d4bfa3e2b202423a0f24 + pristine_git_object: 5995e6010bfb63d0ab2ded6e0f55b7dca23f769a src/mistralai/client/models/timestampgranularity.py: id: 68ddf8d702ea last_write_checksum: sha1:64e7b198a75f026590e26758112651d31984076f pristine_git_object: 8d3773752444db865c0e2629ad9eed66eb7f2bc6 src/mistralai/client/models/tool.py: id: 48b4f6f50fe9 - last_write_checksum: sha1:14e7b21a2857e2ca36830730a47d0eca476fb491 - pristine_git_object: a46d31f166618fd5b92b7e76ccb9190796af7cd2 + last_write_checksum: sha1:7e33d7a0349e652b40926f6a51240b9a5c1a7dbd + pristine_git_object: 2b9965e571eeb494f8cf867818aab488198ecdb2 src/mistralai/client/models/toolcall.py: id: fb34a1a3f3c2 - last_write_checksum: sha1:15ed0a4611e8c310640ec4622af8019d2db93355 - pristine_git_object: 4a05bbd04a44446babda8419dcf4d4c93248fe41 + last_write_checksum: sha1:7d0275444dd6be291c091e908a2b7f2fc536f20f + pristine_git_object: 181cec33c904535c804de06c7357bd493647cd70 + src/mistralai/client/models/toolcallconfirmation.py: + id: f2e953cfb4fe + last_write_checksum: sha1:554a2e073917ffb479efe5887c0b59a2f4967c6e + pristine_git_object: fd6eca50a7ec2f4cca2ae20958717881660e0ac5 src/mistralai/client/models/toolchoice.py: id: 14f7e4cc35b6 - last_write_checksum: sha1:358a6e88486b4d372c9041dd15c0206b119bbc32 - pristine_git_object: aa2016fb63290c63f9b8f3e18c552f6598f15c8f + last_write_checksum: sha1:a787827a4f4ecf5b6a7068ba94fd1ff074898b51 + pristine_git_object: cb787df1b62190319c6e9679521228af28ee7204 src/mistralai/client/models/toolchoiceenum.py: id: c7798801f860 last_write_checksum: sha1:5388b2a6fad842f8e4ae79e6257b4d14c122a6ff pristine_git_object: d66c3d07058eb87bcc3eec10de99a616b5f6638a + src/mistralai/client/models/toolconfiguration.py: + id: faec24b75066 + last_write_checksum: sha1:912c1c10e88053ae4ee44af763c9ab7c95339f5d + pristine_git_object: b903c8b6c13777b671faf5aa97994117734b3a8f src/mistralai/client/models/toolexecutiondeltaevent.py: id: df8f17cf3e07 - last_write_checksum: sha1:6ad6e219f3d7512c9fd382fb22471bfaa0fc9b09 - pristine_git_object: 384ec2407848f51434ca378ad7de965c584b163b + last_write_checksum: sha1:2537a6e2dffde3760a064fdf92efa6cdc117ba2b + pristine_git_object: 5a977ca6fc5bfdeadd929f18037fb5c9a9582b40 src/mistralai/client/models/toolexecutiondoneevent.py: id: 514fdee7d99f - last_write_checksum: sha1:09ef4842c50419eda746f3361454c4df0c3c2466 - pristine_git_object: 56f28899b8b4161fcddfec0ed2610486fe6f8b06 + last_write_checksum: sha1:d62f57105e4816e03030bc9a2a5645482ea80c55 + pristine_git_object: 1c9b0ec92d87a8559ef050a21ba309e05f6b0314 src/mistralai/client/models/toolexecutionentry.py: id: 76db69eebe41 - last_write_checksum: sha1:ff84f62c5264aa023f412956cf83604ecc4112a9 - pristine_git_object: 158cbf06a2acdd492ddb91ae8eaca4802da9f359 + last_write_checksum: sha1:9a697fdad4178b95d7d1bd1eaee77ef948fb2d4f + pristine_git_object: 0d6f2a1305f262519ba719969c6e62ceb95e52b3 src/mistralai/client/models/toolexecutionstartedevent.py: id: 40fadb8e49a1 - last_write_checksum: sha1:5ba46ca1583e8245736a0ae81372025482a8504b - pristine_git_object: 1591866981ce1439fbce3736f028b15205d95810 + last_write_checksum: sha1:9f6e43d5b2c807ca3b080ea7bd4878ba3ec2a788 + pristine_git_object: 21e5bfa8fea7fa27b7031b740f72a873760700cc src/mistralai/client/models/toolfilechunk.py: id: 26c8aadf416a - last_write_checksum: sha1:1dd468876a2ff5ec8b15b6f4e6b8f812e640a29a - pristine_git_object: 6eebd562b1707b41b81e2fd0e267e4c8698551de + last_write_checksum: sha1:89bb203aa600bf6a516fbe10e1787a132de9ca5a + pristine_git_object: 0708b3ff4c4f97a0e4c4359baeedc89ef0b10278 src/mistralai/client/models/toolmessage.py: id: 15f1af161031 - last_write_checksum: sha1:809936ebaeb4541f862aed6d26e1d1f5ff0ae58a - pristine_git_object: b3e8ffd9294bf6b0b46b26097abb87a5b96c9302 + last_write_checksum: sha1:cfa16352cf5bbcd6eedbfbf7f3002149fd989418 + pristine_git_object: 05a0ee636a4393e3ce65cc1b6e272ddf8ec79254 src/mistralai/client/models/toolreferencechunk.py: id: 822e9f3e70de - last_write_checksum: sha1:f02c38c892580a6287156551e7964c601a239220 - pristine_git_object: 3c76c8c2dcc86d225c5218fa13cd43a693230fa8 + last_write_checksum: sha1:f5c9265e27fa2d4526e5ce50dff7f7bd641eb642 + pristine_git_object: 95454fe891dd3955121565431897c1b8f0c25083 src/mistralai/client/models/tooltypes.py: id: 86c3b54272fd last_write_checksum: sha1:e90c15c1e645a5f207af0c7ac728cb0a521c6706 pristine_git_object: e601c1967c42ef8d0c2eea98bc5c0ca722cde066 src/mistralai/client/models/trainingfile.py: id: 2edf9bce227d - last_write_checksum: sha1:668f05a3e3b883c2f54b1e541f1fb501605456b0 - pristine_git_object: 1f710ff81c046261ea497505d7216a1208c65d5b + last_write_checksum: sha1:8fd6a2560554b3c2166daff2ff1a48bb49053489 + pristine_git_object: 2faeda8bfb38c810c5d80eb17cc9928c49c7caf5 src/mistralai/client/models/transcriptionresponse.py: id: 60896dbc6345 - last_write_checksum: sha1:3e5c20911697f5569029932fe0910da94feb2b06 - pristine_git_object: 786863ec331a4bdca18ac056f7447d11010d4320 + last_write_checksum: sha1:e8a318798dfe4ebd64c9d64f487f7e3e8dd05532 + pristine_git_object: 70315463ff8e01c680aa80d68bdc32a7429ddb16 src/mistralai/client/models/transcriptionsegmentchunk.py: id: d1e6f3bdc74b - last_write_checksum: sha1:0107b6ee9160cd2a8309f7c8465502d7d0be90a8 - pristine_git_object: c78bec3068b95782bdc271c2e1ee645b115fed32 + last_write_checksum: sha1:ee56c437444cbfa7983ba950e3e166f392d208cb + pristine_git_object: b87bfc2f9de0a07d62e8cc1fe265a9c29f56f194 src/mistralai/client/models/transcriptionstreamdone.py: id: 066a9158ed09 - last_write_checksum: sha1:3a6abc6f1a0ad78d73e32f3d40ef4bb425aee5b5 - pristine_git_object: b5740b3bb62b4db3846b7727b15e18502e39d862 + last_write_checksum: sha1:cb8ea2e34c712ef1694bd1b6a83e7eed9318b13b + pristine_git_object: e3c5016901a2400c222e5b821b5afb312af1a1e6 src/mistralai/client/models/transcriptionstreamevents.py: id: b50b3d74f16f - last_write_checksum: sha1:f688a18317bd048ad89881c35cb80e39bb7cba47 - pristine_git_object: 17161a177721e44a40903cf15bf08ad0b56545de + last_write_checksum: sha1:68f82eea8a0bcf1b8b65cedf9e276f34121d398b + pristine_git_object: 073fd99aebf6f90027a45c8ee4daa7ffeb8ee34e src/mistralai/client/models/transcriptionstreameventtypes.py: id: 6f71f6fbf4c5 last_write_checksum: sha1:1d568460b1521f17dd5e551632ae4d7883a98dd3 pristine_git_object: c74bbb7483cc3981ee3638c80c15924f3e1c20c4 src/mistralai/client/models/transcriptionstreamlanguage.py: id: e94333e4bc27 - last_write_checksum: sha1:17c7b082ebf5764e21f124fe4c6a6ee5cea4fc51 - pristine_git_object: 67b3e9791efaf134580d82c2a12fab1cd33efbb1 + last_write_checksum: sha1:d1ee93b09ca377bc29845924d53db3ccf250269d + pristine_git_object: b6c6190684eccdc3fe6ce4bc7b86f5ee6490a197 src/mistralai/client/models/transcriptionstreamsegmentdelta.py: id: c0a882ce57e5 - last_write_checksum: sha1:12cbfcf02d5cb4979a836e429690786153250bf0 - pristine_git_object: 8db5e73619eab98c3751689a7ec5bef45ef9ef6b + last_write_checksum: sha1:3507a0355027136e92ada0c9766277381d5dee96 + pristine_git_object: 32ef8f9b2aa34253ea10c830ae856a931306f658 src/mistralai/client/models/transcriptionstreamtextdelta.py: id: 6086dc081147 - last_write_checksum: sha1:6b371b5d236e6e767f25160ab0e8a49bcaf356f8 - pristine_git_object: 49338a083332467e64f171637ca04365ca6bf25b - src/mistralai/client/models/unarchiveftmodelout.py: - id: 9dbc3bfb71ed - last_write_checksum: sha1:40a23dc39af81f06b23f21dad45c5c5f1178b2af - pristine_git_object: 0249a69e8552ed00a5e1f505fdc16025c46d2477 - src/mistralai/client/models/unarchivemodelop.py: - id: eb18584fd78c - last_write_checksum: sha1:5b81357950f301a82233b58a3e2a5b232fdbf546 - pristine_git_object: 1d68a06ae41559baffb6d54398b52dec630556c7 - src/mistralai/client/models/updateagentop.py: - id: ae3a6abea468 - last_write_checksum: sha1:3867948bd0ea37b597c4e5ef7a2e6881791a5fa5 - pristine_git_object: 28acc83d8df1373e897f9634dfbb84ee28897717 - src/mistralai/client/models/updateagentversionop.py: - id: 3821dca5b20a - last_write_checksum: sha1:4c41a450278858089c7cb23b8fcf1e4184fa1f1d - pristine_git_object: 114013bcdcfb7d7c9e935285f167a004b65fbd09 - src/mistralai/client/models/updatedocumentop.py: - id: eee9ef317180 - last_write_checksum: sha1:7d9fc6e18e6631cfe9cd1bc2fa5f7d6cd599ec19 - pristine_git_object: 073f22a9a28c18ad645212262bdc66528a1f6281 - src/mistralai/client/models/updateftmodelin.py: - id: 39e2d678e651 - last_write_checksum: sha1:4ea30ed8eaad36e1440614016f075f088c7e5781 - pristine_git_object: 4ac5a8a24026f6a975044de01a9918364aa64e04 - src/mistralai/client/models/updatelibraryop.py: - id: 4ba7acdb62c6 - last_write_checksum: sha1:3816c8eff226634b545843eed2d0c15fa1579308 - pristine_git_object: c5a1ad30e9bfc277cbbcdea0218a265ad10bcb96 - src/mistralai/client/models/updatemodelop.py: - id: ba149ecfe03e - last_write_checksum: sha1:2ce33ac60846a5ef70141dccbdb09950c3d1e348 - pristine_git_object: 023be97905929aa2925f20cd69b3591e6e3168d7 - src/mistralai/client/models/updateorcreatelibraryaccessop.py: - id: ec9b15418f5c - last_write_checksum: sha1:82fe6bbbb1402f03b7c0380c5fd84a8fef9bec67 - pristine_git_object: 1abe6eda3eb7d0aff8a7c146c848a63e299cedf1 - src/mistralai/client/models/uploaddocumentop.py: - id: 0018fe7ff48c - last_write_checksum: sha1:f31d565f419cbcc59af0655753cee6c643ad307a - pristine_git_object: 2c957947830ae0d467084cc6502d9d97ffdf6c81 - src/mistralai/client/models/uploadfileop.py: - id: d67619670938 - last_write_checksum: sha1:00664ba8af70ffc96871eee02890411475ca6c37 - pristine_git_object: 50848f0b663f60f9a303010f3c940919939949c9 - src/mistralai/client/models/uploadfileout.py: - id: 42466f2bebfb - last_write_checksum: sha1:44d0e5d419fb82c56c33c0f9af8902b3cc06bf6d - pristine_git_object: be291efb523965c155fc922d896da2cf682378ab + last_write_checksum: sha1:968b4bc32731be6c63be3fd90eb26f4357f891a3 + pristine_git_object: 42f0ffb7f16bee4f68f9db9807aa4ec3d9ae5176 + src/mistralai/client/models/unarchivemodelresponse.py: + id: 22e2ccbb0c80 + last_write_checksum: sha1:a69d8dc8636f3326eb61892b85a9b60044b457fe + pristine_git_object: 5c75d30edaade853f085533da0f9f5de221b6e44 + src/mistralai/client/models/updateagentrequest.py: + id: 914b4b2be67a + last_write_checksum: sha1:f37178288254e905ce298befbe801fa6ba63ec0e + pristine_git_object: b751ff74396ca0e74411a7a1549c6e0b4988fc49 + src/mistralai/client/models/updatedocumentrequest.py: + id: a8cfda07d337 + last_write_checksum: sha1:c644725ae379f22550d00b42baefb511d1cc3667 + pristine_git_object: 61e696555c0654208b0d9dcd63fc475ad85297d4 + src/mistralai/client/models/updatelibraryrequest.py: + id: 51bc63885337 + last_write_checksum: sha1:622d6a7af58d2e86d7d2dd4e312883d11ce5a8a8 + pristine_git_object: 91cbf2a1c76361c9c5ee1554c80f1507ff5ee50b + src/mistralai/client/models/updatemodelrequest.py: + id: fe649967751e + last_write_checksum: sha1:dbba8a6ccbfae36ac56808742f4c05ab99dd2c6c + pristine_git_object: f685cfcce1aa3669159fec902ba78034ef3141b8 src/mistralai/client/models/usageinfo.py: id: 54adb9a3af16 - last_write_checksum: sha1:fcfdc921bbcc78436ef156dd7a2eff1123c4036f - pristine_git_object: e78f92e75f86fd593469f7267aad72e417178161 + last_write_checksum: sha1:04705526057c43495284fe9c50cf7df2af7b49fd + pristine_git_object: 31cbf07e3e38df4452da320e44f3fa9aef17c196 src/mistralai/client/models/usermessage.py: id: cb583483acf4 - last_write_checksum: sha1:215406ca76602e899620ef763e216d71f8cd9fcd - pristine_git_object: 25ccdf805e9fbc65da7b6d0051f13224cf0e04fa + last_write_checksum: sha1:0060ee5f5fbbd78073cd56546127a021354a8072 + pristine_git_object: 63e7679246a11fe8e7a3db06e382779c05c64366 src/mistralai/client/models/validationerror.py: id: 15df3c7368ab last_write_checksum: sha1:63df5739d68f984470d4d1b8661a875201cc301d pristine_git_object: 385714c8cb80a8afbca6d5142a2d378d0d165cf9 src/mistralai/client/models/wandbintegration.py: id: 4823c1e80942 - last_write_checksum: sha1:6391a293368ba6fa98114ce510a7665b47d82721 - pristine_git_object: c5db4a6d409f1d84d356a471995119a070db627a - src/mistralai/client/models/wandbintegrationout.py: - id: 6b103d74195c - last_write_checksum: sha1:37caaf5224b216826c48912538959baa0a7d997a - pristine_git_object: d0a09bf48c3a24f5382a626d26897afe2d680f7e + last_write_checksum: sha1:cc0a7ce49756928f4d261375526a3498b9e4f05d + pristine_git_object: f0df2c77845b2741802730fcd4f3c5d31b7ddd8e + src/mistralai/client/models/wandbintegrationresult.py: + id: 8787b4ad5458 + last_write_checksum: sha1:6ba506e01333a3084f63fbfccb459235b6560554 + pristine_git_object: 575cbd42297f02a54542c7eda3a4cabaa28dda23 src/mistralai/client/models/websearchpremiumtool.py: id: bfe88af887e3 - last_write_checksum: sha1:9f9b4bfeea780cec16b9457ee800524c3eba7a4b - pristine_git_object: 9588ab1d7361d3ab1cba2f16e74695273cc03557 + last_write_checksum: sha1:ceb073d3b3916b2ff8f7b7e5eb01692893024d68 + pristine_git_object: 00d4a4b427331660d29513ec43e68fc7cf8afcfb src/mistralai/client/models/websearchtool.py: id: 26b0903423e5 - last_write_checksum: sha1:9afaf3738be10d0a401b34e15db25612ee33465f - pristine_git_object: 27502909ea608f8e0b4a71484da94d26209e0c07 + last_write_checksum: sha1:a07d7ace2d68c944c686e69053bef8d84231814b + pristine_git_object: 6871080f6279ef42a0525c1e26368baafc98fbb7 src/mistralai/client/models_.py: id: 1d277958a843 - last_write_checksum: sha1:f50e7b7194f97de4abf0afd70b5e1c52b805cef6 - pristine_git_object: 05b33ac72da14401b700c4abfb28ca33b5af702b + last_write_checksum: sha1:b9ea906a7704aa57efe5d13ac547e502d961d3b5 + pristine_git_object: a287c413ddf48bd5ff7fc0a685e05d4bcdabb6e5 src/mistralai/client/ocr.py: id: 2f804a12fc62 - last_write_checksum: sha1:2cfde7a27733502b87690c1025adefe5b717da57 - pristine_git_object: 2aa382295a9f1561021a36f3a68a9fb505cfe536 + last_write_checksum: sha1:707d91582149e76a3109df8b1a58bfd44111a93d + pristine_git_object: a46119d1577036be57896a7ea3737ab508497e4f src/mistralai/client/py.typed: id: d95cd1565e33 last_write_checksum: sha1:8efc425ffe830805ffcc0f3055871bdcdc542c60 pristine_git_object: 3e38f1a929f7d6b1d6de74604aa87e3d8f010544 src/mistralai/client/sdk.py: id: 48edbcb38d7e - last_write_checksum: sha1:be11dc3f70c773dd5c6baba6b3fafd996c5baec2 - pristine_git_object: b1ab54935a3421008c78f4864bd6097c0a098040 + last_write_checksum: sha1:365709e35dc4e450a2c4931e75dcbd04568ab361 + pristine_git_object: 80bf25a749eb3b36035aaafa15f059bcf403ec80 src/mistralai/client/sdkconfiguration.py: id: b7dd68a0235e last_write_checksum: sha1:c6944f12c6fdc992d43db943b24c8c90854cde5e pristine_git_object: 712e92e05c7fd3016431ec62ecb7b7789c8b7071 src/mistralai/client/transcriptions.py: id: 75b45780c978 - last_write_checksum: sha1:b47a3765f2191715fc19bdbc4e56414abbe59f4b - pristine_git_object: f7ef5b0a0769467bd4bea61f7b0dca3b68c3788d + last_write_checksum: sha1:27a5b7dd6ed47b0f79b95fbb8599d439512ef344 + pristine_git_object: 7f01917d6e462cff9af75e70d32afbcc5958c7de src/mistralai/client/types/__init__.py: id: 000b943f821c last_write_checksum: sha1:12a4ace69cbc63f1125eeddf901afed7cdf378b0 @@ -2850,8 +2802,8 @@ trackedFiles: pristine_git_object: 4e889aa0ffbb4402e416a40fa6259334cb0a3c5c src/mistralai/client/utils/__init__.py: id: b69505f4b269 - last_write_checksum: sha1:adb457b85659a04945857a74407306dafbdce7cb - pristine_git_object: 7ed3a42095b5921adf0e154ae6eba560a1098233 + last_write_checksum: sha1:98698da73839db7c258fd1afd45ccacff86c64be + pristine_git_object: 4bde281a1fd8c616d4b3529af0fcb79f57374310 src/mistralai/client/utils/annotations.py: id: 1ffdedfc66a2 last_write_checksum: sha1:f86ba37de752e63076f25d53f9c54fce98d2a0bd @@ -2860,18 +2812,22 @@ trackedFiles: id: c40066d868c9 last_write_checksum: sha1:412ca432d6f5a75b692a967bc6fc52e4f4eff7d5 pristine_git_object: a2c94fac73ecbfb8acd8ed4f75692318e4f863ec + src/mistralai/client/utils/dynamic_imports.py: + id: ac9918d925c0 + last_write_checksum: sha1:93d3eac90a47a039e7a652ae120bec66be6c681a + pristine_git_object: 969f2fc71178ed2114640c8f0831f4f3acb25af8 src/mistralai/client/utils/enums.py: id: a0735873b5ac last_write_checksum: sha1:fe05b6a21360b0eff1fc246e9a3ee01758521262 pristine_git_object: d897495f053459106144501c67f2215251d52a27 src/mistralai/client/utils/eventstreaming.py: id: 3263d7502030 - last_write_checksum: sha1:0e15051d74262fbe051e1ba83fd1f2c0c0a016a0 - pristine_git_object: 3fe3c7e13509d6fab08fbb8504c6c5f674c2b679 + last_write_checksum: sha1:24af3168dafe6b8d860cffb121fac11cd0e9d930 + pristine_git_object: 19a121529f180968f655baffbe446e5c1d6c2abb src/mistralai/client/utils/forms.py: id: 58842e905fce - last_write_checksum: sha1:c7929d974f46629b56e740456ddf03230b4048ab - pristine_git_object: 2b474b9a719e95c4bcae8572e5569e64f8d0b77f + last_write_checksum: sha1:d68ca0257e0e8bdc5cdc450f3e70a7ba789859f5 + pristine_git_object: 6facec5386675ccd5a26ff6093f98436a62fdf6b src/mistralai/client/utils/headers.py: id: 9066de2ead8b last_write_checksum: sha1:bcd2f47b96bfaa54b3590c557a9267142d446be6 @@ -2894,20 +2850,24 @@ trackedFiles: pristine_git_object: 3aae69c7cf618776daec8bd46f9116b06c25e837 src/mistralai/client/utils/retries.py: id: 5f1a5b90423c - last_write_checksum: sha1:94a86f31092553d4640a54c446cfe9028b4fb6ef - pristine_git_object: 90c008b0e20c1a539d65ffb387fb61a724c3c111 + last_write_checksum: sha1:bbf8e376c1c801911e65e33566d3a142f46133f9 + pristine_git_object: bea1304150e77ca06185efb7db7798aaacd5e623 src/mistralai/client/utils/security.py: id: 1acb7c006265 - last_write_checksum: sha1:e8543609e699dab330a4768786883c6ca38f07a6 - pristine_git_object: 4c73806d9c8e54a2a4cfe8f62d8c281177789f6f + last_write_checksum: sha1:3981f6571daf28b3b553beb09a4ebeeeb6ceff14 + pristine_git_object: d8b9d8fe746babd0a87846812b1f4117d1a46de2 src/mistralai/client/utils/serializers.py: id: 53c57c7f29a8 last_write_checksum: sha1:8a3a15cf273034261111f2559cacbb579e17cb1b pristine_git_object: fbc2772dc4284775be92de6a086c1eade9376417 + src/mistralai/client/utils/unions.py: + id: d23713342634 + last_write_checksum: sha1:f814d757474f039199f501aa53cdfba97a8c6645 + pristine_git_object: 14ef1bd5c5abef9bd5f2a3a4ee2f79e954c67e7e src/mistralai/client/utils/unmarshal_json_response.py: id: b13585fc5626 - last_write_checksum: sha1:c0c44d0a656477daa225724e88a7cf5c954a1df6 - pristine_git_object: 65190e5c1d70a31f51656e1644bb701b9f132bcd + last_write_checksum: sha1:372a01f5abf034ddbe5d4a3fc68e9e397f86085a + pristine_git_object: 624433c4dd42c9fb1bfae363becc76c62e390e14 src/mistralai/client/utils/url.py: id: 3c6496c17510 last_write_checksum: sha1:c64be472d29cf229f2b91102808dcb741371c227 @@ -3024,7 +2984,7 @@ examples: path: conversation_id: "" requestBody: - application/json: {"inputs": [], "stream": false, "store": true, "handoff_execution": "server", "completion_args": {"response_format": {"type": "text"}}} + application/json: {"stream": false, "store": true, "handoff_execution": "server", "completion_args": {"response_format": {"type": "text"}}} responses: "200": application/json: {"object": "conversation.response", "conversation_id": "", "outputs": [], "usage": {"prompt_tokens": 0, "completion_tokens": 0, "total_tokens": 0}} @@ -3056,7 +3016,7 @@ examples: path: conversation_id: "" requestBody: - application/json: {"inputs": "", "stream": false, "store": true, "handoff_execution": "server", "completion_args": {"response_format": {"type": "text"}}, "from_entry_id": ""} + application/json: {"stream": false, "store": true, "handoff_execution": "server", "completion_args": {"response_format": {"type": "text"}}, "from_entry_id": ""} responses: "200": application/json: {"object": "conversation.response", "conversation_id": "", "outputs": [], "usage": {"prompt_tokens": 0, "completion_tokens": 0, "total_tokens": 0}} @@ -3075,7 +3035,7 @@ examples: path: conversation_id: "" requestBody: - application/json: {"inputs": "", "stream": true, "store": true, "handoff_execution": "server", "completion_args": {"response_format": {"type": "text"}}} + application/json: {"stream": true, "store": true, "handoff_execution": "server", "completion_args": {"response_format": {"type": "text"}}} responses: "422": application/json: {} @@ -3085,7 +3045,7 @@ examples: path: conversation_id: "" requestBody: - application/json: {"inputs": [{"object": "entry", "type": "message.input", "role": "assistant", "content": "", "prefix": false}], "stream": true, "store": true, "handoff_execution": "server", "completion_args": {"response_format": {"type": "text"}}, "from_entry_id": ""} + application/json: {"stream": true, "store": true, "handoff_execution": "server", "completion_args": {"response_format": {"type": "text"}}, "from_entry_id": ""} responses: "422": application/json: {} @@ -3279,6 +3239,7 @@ examples: page: 0 page_size: 100 created_by_me: false + order_by: "-created" responses: "200": application/json: {"object": "list", "total": 186589} @@ -3308,7 +3269,7 @@ examples: chat_completion_v1_chat_completions_post: speakeasy-default-chat-completion-v1-chat-completions-post: requestBody: - application/json: {"model": "mistral-large-latest", "stream": false, "messages": [{"content": "Who is the best French painter? Answer in one short sentence.", "role": "user"}], "response_format": {"type": "text"}} + application/json: {"model": "mistral-large-latest", "stream": false, "messages": [{"role": "user", "content": "Who is the best French painter? Answer in one short sentence."}], "response_format": {"type": "text"}} responses: "200": application/json: {"id": "cmpl-e5cc70bb28c444948073e77776eb30ef", "object": "chat.completion", "model": "mistral-small-latest", "usage": {"prompt_tokens": 0, "completion_tokens": 0, "total_tokens": 0}, "created": 1702256327, "choices": []} @@ -3317,7 +3278,7 @@ examples: stream_chat: speakeasy-default-stream-chat: requestBody: - application/json: {"model": "mistral-large-latest", "stream": true, "messages": [{"content": "Who is the best French painter? Answer in one short sentence.", "role": "user"}], "response_format": {"type": "text"}} + application/json: {"model": "mistral-large-latest", "stream": true, "messages": [{"role": "user", "content": "Who is the best French painter? Answer in one short sentence."}], "response_format": {"type": "text"}} responses: "422": application/json: {} @@ -3335,7 +3296,7 @@ examples: application/json: {"model": "codestral-latest", "top_p": 1, "stream": false, "prompt": "def", "suffix": "return a+b"} responses: "200": - application/json: {"id": "447e3e0d457e42e98248b5d2ef52a2a3", "object": "chat.completion", "model": "codestral-2508", "usage": {"prompt_tokens": 8, "completion_tokens": 91, "total_tokens": 99}, "created": 1759496862, "choices": [{"index": 0, "message": {"content": "add_numbers(a: int, b: int) -> int:\n \"\"\"\n You are given two integers `a` and `b`. Your task is to write a function that\n returns the sum of these two integers. The function should be implemented in a\n way that it can handle very large integers (up to 10^18). As a reminder, your\n code has to be in python\n \"\"\"\n", "tool_calls": null, "prefix": false, "role": "assistant"}, "finish_reason": "stop"}]} + application/json: {"id": "447e3e0d457e42e98248b5d2ef52a2a3", "object": "chat.completion", "model": "codestral-2508", "usage": {"prompt_tokens": 8, "completion_tokens": 91, "total_tokens": 99}, "created": 1759496862, "choices": [{"index": 0, "message": {"role": "assistant", "content": "add_numbers(a: int, b: int) -> int:\n \"\"\"\n You are given two integers `a` and `b`. Your task is to write a function that\n returns the sum of these two integers. The function should be implemented in a\n way that it can handle very large integers (up to 10^18). As a reminder, your\n code has to be in python\n \"\"\"\n", "tool_calls": null, "prefix": false}, "finish_reason": "stop"}]} stream_fim: speakeasy-default-stream-fim: requestBody: @@ -3354,14 +3315,14 @@ examples: application/json: {} userExample: requestBody: - application/json: {"stream": false, "messages": [{"content": "Who is the best French painter? Answer in one short sentence.", "role": "user"}], "response_format": {"type": "text"}, "agent_id": ""} + application/json: {"stream": false, "messages": [{"role": "user", "content": "Who is the best French painter? Answer in one short sentence."}], "response_format": {"type": "text"}, "agent_id": ""} responses: "200": - application/json: {"id": "cf79f7daaee244b1a0ae5c7b1444424a", "object": "chat.completion", "model": "mistral-medium-latest", "usage": {"prompt_tokens": 24, "completion_tokens": 27, "total_tokens": 51, "prompt_audio_seconds": {}}, "created": 1759500534, "choices": [{"index": 0, "message": {"content": "Arrr, the scallywag Claude Monet be the finest French painter to ever splash colors on a canvas, savvy?", "tool_calls": null, "prefix": false, "role": "assistant"}, "finish_reason": "stop"}]} + application/json: {"id": "cf79f7daaee244b1a0ae5c7b1444424a", "object": "chat.completion", "model": "mistral-medium-latest", "usage": {"prompt_tokens": 24, "completion_tokens": 27, "total_tokens": 51, "prompt_audio_seconds": {}}, "created": 1759500534, "choices": [{"index": 0, "message": {"role": "assistant", "content": "Arrr, the scallywag Claude Monet be the finest French painter to ever splash colors on a canvas, savvy?", "tool_calls": null, "prefix": false}, "finish_reason": "stop"}]} stream_agents: speakeasy-default-stream-agents: requestBody: - application/json: {"stream": true, "messages": [{"content": "Who is the best French painter? Answer in one short sentence.", "role": "user"}], "response_format": {"type": "text"}, "agent_id": ""} + application/json: {"stream": true, "messages": [{"role": "user", "content": "Who is the best French painter? Answer in one short sentence."}], "response_format": {"type": "text"}, "agent_id": ""} responses: "422": application/json: {} @@ -3406,7 +3367,7 @@ examples: application/json: {} userExample: requestBody: - application/json: {"input": [{"content": "", "role": "tool"}], "model": "LeBaron"} + application/json: {"input": [{"role": "tool", "content": ""}], "model": "LeBaron"} responses: "200": application/json: {"id": "352bce1a55814127a3b0bc4fb8f02a35", "model": "mistral-moderation-latest", "results": [{"categories": {"sexual": false, "hate_and_discrimination": false, "violence_and_threats": false, "dangerous_and_criminal_content": false, "selfharm": false, "health": false, "financial": false, "law": false, "pii": false}, "category_scores": {"sexual": 0.0010322310263291001, "hate_and_discrimination": 0.001597845577634871, "violence_and_threats": 0.00020342698553577065, "dangerous_and_criminal_content": 0.0029810327105224133, "selfharm": 0.00017952796770259738, "health": 0.0002959570847451687, "financial": 0.000079673009167891, "law": 0.00004539786823443137, "pii": 0.004198795650154352}}]} @@ -3422,7 +3383,7 @@ examples: chat_classifications_v1_chat_classifications_post: speakeasy-default-chat-classifications-v1-chat-classifications-post: requestBody: - application/json: {"model": "Camry", "input": [{"messages": [{"content": "", "role": "system"}]}]} + application/json: {"model": "Camry", "input": [{"messages": [{"role": "system", "content": ""}]}]} responses: "200": application/json: {"id": "mod-e5cc70bb28c444948073e77776eb30ef", "model": "Altima", "results": [{}, {"key": {"scores": {"key": 1360.53, "key1": 5946.42}}}, {"key": {"scores": {"key": 1360.53, "key1": 5946.42}}}]} @@ -3439,7 +3400,7 @@ examples: application/json: {} userExample: requestBody: - application/json: {"model": "CX-9", "document": {"document_url": "https://upset-labourer.net/", "type": "document_url"}, "bbox_annotation_format": {"type": "text"}, "document_annotation_format": {"type": "text"}} + application/json: {"model": "CX-9", "document": {"type": "document_url", "document_url": "https://upset-labourer.net/"}, "bbox_annotation_format": {"type": "text"}, "document_annotation_format": {"type": "text"}} responses: "200": application/json: {"pages": [{"index": 1, "markdown": "# LEVERAGING UNLABELED DATA TO PREDICT OUT-OF-DISTRIBUTION PERFORMANCE\nSaurabh Garg*
Carnegie Mellon University
sgarg2@andrew.cmu.edu
Sivaraman Balakrishnan
Carnegie Mellon University
sbalakri@andrew.cmu.edu
Zachary C. Lipton
Carnegie Mellon University
zlipton@andrew.cmu.edu\n## Behnam Neyshabur\nGoogle Research, Blueshift team
neyshabur@google.com\nHanie Sedghi
Google Research, Brain team
hsedghi@google.com\n#### Abstract\nReal-world machine learning deployments are characterized by mismatches between the source (training) and target (test) distributions that may cause performance drops. In this work, we investigate methods for predicting the target domain accuracy using only labeled source data and unlabeled target data. We propose Average Thresholded Confidence (ATC), a practical method that learns a threshold on the model's confidence, predicting accuracy as the fraction of unlabeled examples for which model confidence exceeds that threshold. ATC outperforms previous methods across several model architectures, types of distribution shifts (e.g., due to synthetic corruptions, dataset reproduction, or novel subpopulations), and datasets (WILDS, ImageNet, BREEDS, CIFAR, and MNIST). In our experiments, ATC estimates target performance $2-4 \\times$ more accurately than prior methods. We also explore the theoretical foundations of the problem, proving that, in general, identifying the accuracy is just as hard as identifying the optimal predictor and thus, the efficacy of any method rests upon (perhaps unstated) assumptions on the nature of the shift. Finally, analyzing our method on some toy distributions, we provide insights concerning when it works ${ }^{1}$.\n## 1 INTRODUCTION\nMachine learning models deployed in the real world typically encounter examples from previously unseen distributions. While the IID assumption enables us to evaluate models using held-out data from the source distribution (from which training data is sampled), this estimate is no longer valid in presence of a distribution shift. Moreover, under such shifts, model accuracy tends to degrade (Szegedy et al., 2014; Recht et al., 2019; Koh et al., 2021). Commonly, the only data available to the practitioner are a labeled training set (source) and unlabeled deployment-time data which makes the problem more difficult. In this setting, detecting shifts in the distribution of covariates is known to be possible (but difficult) in theory (Ramdas et al., 2015), and in practice (Rabanser et al., 2018). However, producing an optimal predictor using only labeled source and unlabeled target data is well-known to be impossible absent further assumptions (Ben-David et al., 2010; Lipton et al., 2018).\nTwo vital questions that remain are: (i) the precise conditions under which we can estimate a classifier's target-domain accuracy; and (ii) which methods are most practically useful. To begin, the straightforward way to assess the performance of a model under distribution shift would be to collect labeled (target domain) examples and then to evaluate the model on that data. However, collecting fresh labeled data from the target distribution is prohibitively expensive and time-consuming, especially if the target distribution is non-stationary. Hence, instead of using labeled data, we aim to use unlabeled data from the target distribution, that is comparatively abundant, to predict model performance. Note that in this work, our focus is not to improve performance on the target but, rather, to estimate the accuracy on the target for a given classifier.\n[^0]: Work done in part while Saurabh Garg was interning at Google ${ }^{1}$ Code is available at [https://github.com/saurabhgarg1996/ATC_code](https://github.com/saurabhgarg1996/ATC_code).\n", "images": [], "dimensions": {"dpi": 200, "height": 2200, "width": 1700}}, {"index": 2, "markdown": "![img-0.jpeg](img-0.jpeg)\nFigure 1: Illustration of our proposed method ATC. Left: using source domain validation data, we identify a threshold on a score (e.g. negative entropy) computed on model confidence such that fraction of examples above the threshold matches the validation set accuracy. ATC estimates accuracy on unlabeled target data as the fraction of examples with the score above the threshold. Interestingly, this threshold yields accurate estimates on a wide set of target distributions resulting from natural and synthetic shifts. Right: Efficacy of ATC over previously proposed approaches on our testbed with a post-hoc calibrated model. To obtain errors on the same scale, we rescale all errors with Average Confidence (AC) error. Lower estimation error is better. See Table 1 for exact numbers and comparison on various types of distribution shift. See Sec. 5 for details on our testbed.\nRecently, numerous methods have been proposed for this purpose (Deng & Zheng, 2021; Chen et al., 2021b; Jiang et al., 2021; Deng et al., 2021; Guillory et al., 2021). These methods either require calibration on the target domain to yield consistent estimates (Jiang et al., 2021; Guillory et al., 2021) or additional labeled data from several target domains to learn a linear regression function on a distributional distance that then predicts model performance (Deng et al., 2021; Deng & Zheng, 2021; Guillory et al., 2021). However, methods that require calibration on the target domain typically yield poor estimates since deep models trained and calibrated on source data are not, in general, calibrated on a (previously unseen) target domain (Ovadia et al., 2019). Besides, methods that leverage labeled data from target domains rely on the fact that unseen target domains exhibit strong linear correlation with seen target domains on the underlying distance measure and, hence, can be rendered ineffective when such target domains with labeled data are unavailable (in Sec. 5.1 we demonstrate such a failure on a real-world distribution shift problem). Therefore, throughout the paper, we assume access to labeled source data and only unlabeled data from target domain(s).\nIn this work, we first show that absent assumptions on the source classifier or the nature of the shift, no method of estimating accuracy will work generally (even in non-contrived settings). To estimate accuracy on target domain perfectly, we highlight that even given perfect knowledge of the labeled source distribution (i.e., $p_{s}(x, y)$ ) and unlabeled target distribution (i.e., $p_{t}(x)$ ), we need restrictions on the nature of the shift such that we can uniquely identify the target conditional $p_{t}(y \\mid x)$. Thus, in general, identifying the accuracy of the classifier is as hard as identifying the optimal predictor.\nSecond, motivated by the superiority of methods that use maximum softmax probability (or logit) of a model for Out-Of-Distribution (OOD) detection (Hendrycks & Gimpel, 2016; Hendrycks et al., 2019), we propose a simple method that leverages softmax probability to predict model performance. Our method, Average Thresholded Confidence (ATC), learns a threshold on a score (e.g., maximum confidence or negative entropy) of model confidence on validation source data and predicts target domain accuracy as the fraction of unlabeled target points that receive a score above that threshold. ATC selects a threshold on validation source data such that the fraction of source examples that receive the score above the threshold match the accuracy of those examples. Our primary contribution in ATC is the proposal of obtaining the threshold and observing its efficacy on (practical) accuracy estimation. Importantly, our work takes a step forward in positively answering the question raised in Deng & Zheng (2021); Deng et al. (2021) about a practical strategy to select a threshold that enables accuracy prediction with thresholded model confidence.\n", "images": [{"id": "img-0.jpeg", "top_left_x": 292, "top_left_y": 217, "bottom_right_x": 1405, "bottom_right_y": 649, "image_base64": ""}], "dimensions": {"dpi": 200, "height": 2200, "width": 1700}}, {"index": 3, "markdown": "", "images": [], "dimensions": {"dpi": 539192, "height": 944919, "width": 247256}}, {"index": 27, "markdown": "![img-8.jpeg](img-8.jpeg)\nFigure 9: Scatter plot of predicted accuracy versus (true) OOD accuracy for vision datasets except MNIST with a ResNet50 model. Results reported by aggregating MAE numbers over 4 different seeds.\n", "images": [{"id": "img-8.jpeg", "top_left_x": 290, "top_left_y": 226, "bottom_right_x": 1405, "bottom_right_y": 1834, "image_base64": ""}], "dimensions": {"dpi": 200, "height": 2200, "width": 1700}}, {"index": 28, "markdown": "| Dataset | Shift | IM | | AC | | DOC | | GDE | ATC-MC (Ours) | | ATC-NE (Ours) | | | :--: | :--: | :--: | :--: | :--: | :--: | :--: | :--: | :--: | :--: | :--: | :--: | :--: | | | | Pre T | Post T | Pre T | Post T | Pre T | Post T | Post T | Pre T | Post T | Pre T | Post T | | CIFAR10 | Natural | 6.60 | 5.74 | 9.88 | 6.89 | 7.25 | 6.07 | 4.77 | 3.21 | 3.02 | 2.99 | 2.85 | | | | (0.35) | (0.30) | (0.16) | (0.13) | (0.15) | (0.16) | (0.13) | (0.49) | (0.40) | (0.37) | (0.29) | | | Synthetic | 12.33 | 10.20 | 16.50 | 11.91 | 13.87 | 11.08 | 6.55 | 4.65 | 4.25 | 4.21 | 3.87 | | | | (0.51) | (0.48) | (0.26) | (0.17) | (0.18) | (0.17) | (0.35) | (0.55) | (0.55) | (0.55) | (0.75) | | CIFAR100 | Synthetic | 13.69 | 11.51 | 23.61 | 13.10 | 14.60 | 10.14 | 9.85 | 5.50 | 4.75 | 4.72 | 4.94 | | | | (0.55) | (0.41) | (1.16) | (0.80) | (0.77) | (0.64) | (0.57) | (0.70) | (0.73) | (0.74) | (0.74) | | ImageNet200 | Natural | 12.37 | 8.19 | 22.07 | 8.61 | 15.17 | 7.81 | 5.13 | 4.37 | 2.04 | 3.79 | 1.45 | | | | (0.25) | (0.33) | (0.08) | (0.25) | (0.11) | (0.29) | (0.08) | (0.39) | (0.24) | (0.30) | (0.27) | | | Synthetic | 19.86 | 12.94 | 32.44 | 13.35 | 25.02 | 12.38 | 5.41 | 5.93 | 3.09 | 5.00 | 2.68 | | | | (1.38) | (1.81) | (1.00) | (1.30) | (1.10) | (1.38) | (0.89) | (1.38) | (0.87) | (1.28) | (0.45) | | ImageNet | Natural | 7.77 | 6.50 | 18.13 | 6.02 | 8.13 | 5.76 | 6.23 | 3.88 | 2.17 | 2.06 | 0.80 | | | | (0.27) | (0.33) | (0.23) | (0.34) | (0.27) | (0.37) | (0.41) | (0.53) | (0.62) | (0.54) | (0.44) | | | Synthetic | 13.39 | 10.12 | 24.62 | 8.51 | 13.55 | 7.90 | 6.32 | 3.34 | 2.53 | 2.61 | 4.89 | | | | (0.53) | (0.63) | (0.64) | (0.71) | (0.61) | (0.72) | (0.33) | (0.53) | (0.36) | (0.33) | (0.83) | | FMoW-WILDS | Natural | 5.53 | 4.31 | 33.53 | 12.84 | 5.94 | 4.45 | 5.74 | 3.06 | 2.70 | 3.02 | 2.72 | | | | (0.33) | (0.63) | (0.13) | (12.06) | (0.36) | (0.77) | (0.55) | (0.36) | (0.54) | (0.35) | (0.44) | | RxRx1-WILDS | Natural | 5.80 | 5.72 | 7.90 | 4.84 | 5.98 | 5.98 | 6.03 | 4.66 | 4.56 | 4.41 | 4.47 | | | | (0.17) | (0.15) | (0.24) | (0.09) | (0.15) | (0.13) | (0.08) | (0.38) | (0.38) | (0.31) | (0.26) | | Amazon-WILDS | Natural | 2.40 | 2.29 | 8.01 | 2.38 | 2.40 | 2.28 | 17.87 | 1.65 | 1.62 | 1.60 | 1.59 | | | | (0.08) | (0.09) | (0.53) | (0.17) | (0.09) | (0.09) | (0.18) | (0.06) | (0.05) | (0.14) | (0.15) | | CivilCom.-WILDS | Natural | 12.64 | 10.80 | 16.76 | 11.03 | 13.31 | 10.99 | 16.65 | | 7.14 | | | | | | (0.52) | (0.48) | (0.53) | (0.49) | (0.52) | (0.49) | (0.25) | | (0.41) | | | | MNIST | Natural | 18.48 | 15.99 | 21.17 | 14.81 | 20.19 | 14.56 | 24.42 | 5.02 | 2.40 | 3.14 | 3.50 | | | | (0.45) | (1.53) | (0.24) | (3.89) | (0.23) | (3.47) | (0.41) | (0.44) | (1.83) | (0.49) | (0.17) | | ENTITY-13 | Same | 16.23 | 11.14 | 24.97 | 10.88 | 19.08 | 10.47 | 10.71 | 5.39 | 3.88 | 4.58 | 4.19 | | | | (0.77) | (0.65) | (0.70) | (0.77) | (0.65) | (0.72) | (0.74) | (0.92) | (0.61) | (0.85) | (0.16) | | | Novel | 28.53 | 22.02 | 38.33 | 21.64 | 32.43 | 21.22 | 20.61 | 13.58 | 10.28 | 12.25 | 6.63 | | | | (0.82) | (0.68) | (0.75) | (0.86) | (0.69) | (0.80) | (0.60) | (1.15) | (1.34) | (1.21) | (0.93) | | ENTITY-30 | Same | 18.59 | 14.46 | 28.82 | 14.30 | 21.63 | 13.46 | 12.92 | 9.12 | 7.75 | 8.15 | 7.64 | | | | (0.51) | (0.52) | (0.43) | (0.71) | (0.37) | (0.59) | (0.14) | (0.62) | (0.72) | (0.68) | (0.88) | | | Novel | 32.34 | 26.85 | 44.02 | 26.27 | 36.82 | 25.42 | 23.16 | 17.75 | 14.30 | 15.60 | 10.57 | | | | (0.60) | (0.58) | (0.56) | (0.79) | (0.47) | (0.68) | (0.12) | (0.76) | (0.85) | (0.86) | (0.86) | | NONLIVING-26 | Same | 18.66 | 17.17 | 26.39 | 16.14 | 19.86 | 15.58 | 16.63 | 10.87 | 10.24 | 10.07 | 10.26 | | | | (0.76) | (0.74) | (0.82) | (0.81) | (0.67) | (0.76) | (0.45) | (0.98) | (0.83) | (0.92) | (1.18) | | | Novel | 33.43 | 31.53 | 41.66 | 29.87 | 35.13 | 29.31 | 29.56 | 21.70 | 20.12 | 19.08 | 18.26 | | | | (0.67) | (0.65) | (0.67) | (0.71) | (0.54) | (0.64) | (0.21) | (0.86) | (0.75) | (0.82) | (1.12) | | LIVING-17 | Same | 12.63 | 11.05 | 18.32 | 10.46 | 14.43 | 10.14 | 9.87 | 4.57 | 3.95 | 3.81 | 4.21 | | | | (1.25) | (1.20) | (1.01) | (1.12) | (1.11) | (1.16) | (0.61) | (0.71) | (0.48) | (0.22) | (0.53) | | | Novel | 29.03 | 26.96 | 35.67 | 26.11 | 31.73 | 25.73 | 23.53 | 16.15 | 14.49 | 12.97 | 11.39 | | | | (1.44) | (1.38) | (1.09) | (1.27) | (1.19) | (1.35) | (0.52) | (1.36) | (1.46) | (1.52) | (1.72) |\nTable 3: Mean Absolute estimation Error (MAE) results for different datasets in our setup grouped by the nature of shift. 'Same' refers to same subpopulation shifts and 'Novel' refers novel subpopulation shifts. We include details about the target sets considered in each shift in Table 2. Post T denotes use of TS calibration on source. For language datasets, we use DistilBERT-base-uncased, for vision dataset we report results with DenseNet model with the exception of MNIST where we use FCN. Across all datasets, we observe that ATC achieves superior performance (lower MAE is better). For GDE post T and pre T estimates match since TS doesn't alter the argmax prediction. Results reported by aggregating MAE numbers over 4 different seeds. Values in parenthesis (i.e., $(\\cdot)$ ) denote standard deviation values.\n", "images": [], "dimensions": {"dpi": 200, "height": 2200, "width": 1700}}, {"index": 29, "markdown": "| Dataset | Shift | IM | | AC | | DOC | | GDE | ATC-MC (Ours) | | ATC-NE (Ours) | | | :--: | :--: | :--: | :--: | :--: | :--: | :--: | :--: | :--: | :--: | :--: | :--: | :--: | | | | Pre T | Post T | Pre T | Post T | Pre T | Post T | Post T | Pre T | Post T | Pre T | Post T | | CIFAR10 | Natural | 7.14 | 6.20 | 10.25 | 7.06 | 7.68 | 6.35 | 5.74 | 4.02 | 3.85 | 3.76 | 3.38 | | | | (0.14) | (0.11) | (0.31) | (0.33) | (0.28) | (0.27) | (0.25) | (0.38) | (0.30) | (0.33) | (0.32) | | | Synthetic | 12.62 | 10.75 | 16.50 | 11.91 | 13.93 | 11.20 | 7.97 | 5.66 | 5.03 | 4.87 | 3.63 | | | | (0.76) | (0.71) | (0.28) | (0.24) | (0.29) | (0.28) | (0.13) | (0.64) | (0.71) | (0.71) | (0.62) | | CIFAR100 | Synthetic | 12.77 | 12.34 | 16.89 | 12.73 | 11.18 | 9.63 | 12.00 | 5.61 | 5.55 | 5.65 | 5.76 | | | | (0.43) | (0.68) | (0.20) | (2.59) | (0.35) | (1.25) | (0.48) | (0.51) | (0.55) | (0.35) | (0.27) | | ImageNet200 | Natural | 12.63 | 7.99 | 23.08 | 7.22 | 15.40 | 6.33 | 5.00 | 4.60 | 1.80 | 4.06 | 1.38 | | | | (0.59) | (0.47) | (0.31) | (0.22) | (0.42) | (0.24) | (0.36) | (0.63) | (0.17) | (0.69) | (0.29) | | | Synthetic | 20.17 | 11.74 | 33.69 | 9.51 | 25.49 | 8.61 | 4.19 | 5.37 | 2.78 | 4.53 | 3.58 | | | | (0.74) | (0.80) | (0.73) | (0.51) | (0.66) | (0.50) | (0.14) | (0.88) | (0.23) | (0.79) | (0.33) | | ImageNet | Natural | 8.09 | 6.42 | 21.66 | 5.91 | 8.53 | 5.21 | 5.90 | 3.93 | 1.89 | 2.45 | 0.73 | | | | (0.25) | (0.28) | (0.38) | (0.22) | (0.26) | (0.25) | (0.44) | (0.26) | (0.21) | (0.16) | (0.10) | | | Synthetic | 13.93 | 9.90 | 28.05 | 7.56 | 13.82 | 6.19 | 6.70 | 3.33 | 2.55 | 2.12 | 5.06 | | | | (0.14) | (0.23) | (0.39) | (0.13) | (0.31) | (0.07) | (0.52) | (0.25) | (0.25) | (0.31) | (0.27) | | FMoW-WILDS | Natural | 5.15 | 3.55 | 34.64 | 5.03 | 5.58 | 3.46 | 5.08 | 2.59 | 2.33 | 2.52 | 2.22 | | | | (0.19) | (0.41) | (0.22) | (0.29) | (0.17) | (0.37) | (0.46) | (0.32) | (0.28) | (0.25) | (0.30) | | RxRx1-WILDS | Natural | 6.17 | 6.11 | 21.05 | 5.21 | 6.54 | 6.27 | 6.82 | 5.30 | 5.20 | 5.19 | 5.63 | | | | (0.20) | (0.24) | (0.31) | (0.18) | (0.21) | (0.20) | (0.31) | (0.30) | (0.44) | (0.43) | (0.55) | | Entity-13 | Same | 18.32 | 14.38 | 27.79 | 13.56 | 20.50 | 13.22 | 16.09 | 9.35 | 7.50 | 7.80 | 6.94 | | | | (0.29) | (0.53) | (1.18) | (0.58) | (0.47) | (0.58) | (0.84) | (0.79) | (0.65) | (0.62) | (0.71) | | | Novel | 28.82 | 24.03 | 38.97 | 22.96 | 31.66 | 22.61 | 25.26 | 17.11 | 13.96 | 14.75 | 9.94 | | | | (0.30) | (0.55) | (1.32) | (0.59) | (0.54) | (0.58) | (1.08) | (0.93) | (0.64) | (0.78) | | | Entity-30 | Same | 16.91 | 14.61 | 26.84 | 14.37 | 18.60 | 13.11 | 13.74 | 8.54 | 7.94 | 7.77 | 8.04 | | | | (1.33) | (1.11) | (2.15) | (1.34) | (1.69) | (1.30) | (1.07) | (1.47) | (1.38) | (1.44) | (1.51) | | | Novel | 28.66 | 25.83 | 39.21 | 25.03 | 30.95 | 23.73 | 23.15 | 15.57 | 13.24 | 12.44 | 11.05 | | | | (1.16) | (0.88) | (2.03) | (1.11) | (1.64) | (1.11) | (0.51) | (1.44) | (1.15) | (1.26) | (1.13) | | NonLIVING-26 | Same | 17.43 | 15.95 | 27.70 | 15.40 | 18.06 | 14.58 | 16.99 | 10.79 | 10.13 | 10.05 | 10.29 | | | | (0.90) | (0.86) | (0.90) | (0.69) | (1.00) | (0.78) | (1.25) | (0.62) | (0.32) | (0.46) | (0.79) | | | Novel | 29.51 | 27.75 | 40.02 | 26.77 | 30.36 | 25.93 | 27.70 | 19.64 | 17.75 | 16.90 | 15.69 | | | | (0.86) | (0.82) | (0.76) | (0.82) | (0.95) | (0.80) | (1.42) | (0.68) | (0.53) | (0.60) | (0.83) | | LIVING-17 | Same | 14.28 | 12.21 | 23.46 | 11.16 | 15.22 | 10.78 | 10.49 | 4.92 | 4.23 | 4.19 | 4.73 | | | | (0.96) | (0.93) | (1.16) | (0.90) | (0.96) | (0.99) | (0.97) | (0.57) | (0.42) | (0.35) | (0.24) | | | Novel | 28.91 | 26.35 | 38.62 | 24.91 | 30.32 | 24.52 | 22.49 | 15.42 | 13.02 | 12.29 | 10.34 | | | | (0.66) | (0.73) | (1.01) | (0.61) | (0.59) | (0.74) | (0.85) | (0.59) | (0.53) | (0.73) | (0.62) |\nTable 4: Mean Absolute estimation Error (MAE) results for different datasets in our setup grouped by the nature of shift for ResNet model. 'Same' refers to same subpopulation shifts and 'Novel' refers novel subpopulation shifts. We include details about the target sets considered in each shift in Table 2. Post T denotes use of TS calibration on source. Across all datasets, we observe that ATC achieves superior performance (lower MAE is better). For GDE post T and pre T estimates match since TS doesn't alter the argmax prediction. Results reported by aggregating MAE numbers over 4 different seeds. Values in parenthesis (i.e., $(\\cdot)$ ) denote standard deviation values.\n", "images": [], "dimensions": {"dpi": 200, "height": 2200, "width": 1700}}], "model": "mistral-ocr-2503-completion", "usage_info": {"pages_processed": 29, "doc_size_bytes": null}} @@ -3501,7 +3462,7 @@ examples: sort_order: "desc" responses: "200": - application/json: {"pagination": {"total_items": 23246, "total_pages": 881485, "current_page": 173326, "page_size": 318395, "has_more": false}, "data": [{"id": "5106c0c7-30fb-4fd3-9083-129b77f9f509", "library_id": "71eb68a2-756e-48b0-9d2b-a04d7bf95ff5", "hash": "", "mime_type": "", "extension": "pdf", "size": 367159, "name": "", "created_at": "2024-09-24T04:50:43.988Z", "processing_status": "", "uploaded_by_id": "7d65f4d8-1997-479f-bfb4-535c0144b48c", "uploaded_by_type": "", "tokens_processing_total": 957230}]} + application/json: {"pagination": {"total_items": 23246, "total_pages": 881485, "current_page": 173326, "page_size": 318395, "has_more": false}, "data": [{"id": "5106c0c7-30fb-4fd3-9083-129b77f9f509", "library_id": "71eb68a2-756e-48b0-9d2b-a04d7bf95ff5", "hash": "", "mime_type": "", "extension": "pdf", "size": 367159, "name": "", "created_at": "2024-09-24T04:50:43.988Z", "uploaded_by_id": "7d65f4d8-1997-479f-bfb4-535c0144b48c", "uploaded_by_type": "", "processing_status": "", "tokens_processing_total": 957230}]} "422": application/json: {} libraries_documents_upload_v1: @@ -3513,7 +3474,7 @@ examples: multipart/form-data: {"file": "x-file: example.file"} responses: "200": - application/json: {"id": "d40f9b56-c832-405d-aa99-b3e442254dd8", "library_id": "868d7955-009a-4433-bfc6-ad7b4be4e7e4", "hash": "", "mime_type": "", "extension": "m2v", "size": 418415, "name": "", "created_at": "2025-04-30T20:11:27.130Z", "processing_status": "", "uploaded_by_id": "7db8d896-09c9-438c-b6dc-aa5c70102b3f", "uploaded_by_type": "", "tokens_processing_total": 61161} + application/json: {"id": "d40f9b56-c832-405d-aa99-b3e442254dd8", "library_id": "868d7955-009a-4433-bfc6-ad7b4be4e7e4", "hash": "", "mime_type": "", "extension": "m2v", "size": 418415, "name": "", "created_at": "2025-04-30T20:11:27.130Z", "uploaded_by_id": "7db8d896-09c9-438c-b6dc-aa5c70102b3f", "uploaded_by_type": "", "processing_status": "", "tokens_processing_total": 61161} "422": application/json: {} libraries_documents_get_v1: @@ -3524,7 +3485,7 @@ examples: document_id: "90973aec-0508-4375-8b00-91d732414745" responses: "200": - application/json: {"id": "0de60230-717d-459a-8c0f-fbb9360c01be", "library_id": "e0bf3cf9-cd3b-405b-b842-ac7fcb9c373e", "hash": "", "mime_type": "", "extension": "jpe", "size": 402478, "name": "", "created_at": "2023-07-29T21:43:20.750Z", "processing_status": "", "uploaded_by_id": "d5eadabe-d7f2-4f87-a337-f80c192f886d", "uploaded_by_type": "", "tokens_processing_total": 793889} + application/json: {"id": "0de60230-717d-459a-8c0f-fbb9360c01be", "library_id": "e0bf3cf9-cd3b-405b-b842-ac7fcb9c373e", "hash": "", "mime_type": "", "extension": "jpe", "size": 402478, "name": "", "created_at": "2023-07-29T21:43:20.750Z", "uploaded_by_id": "d5eadabe-d7f2-4f87-a337-f80c192f886d", "uploaded_by_type": "", "processing_status": "", "tokens_processing_total": 793889} "422": application/json: {} libraries_documents_update_v1: @@ -3537,7 +3498,7 @@ examples: application/json: {} responses: "200": - application/json: {"id": "1111e519-9ba5-42de-9301-938fbfee59fc", "library_id": "70aac5e3-23f7-439b-bbef-090e4c1dbd6d", "hash": "", "mime_type": "", "extension": "m1v", "size": 802305, "name": "", "created_at": "2024-07-02T20:02:03.680Z", "processing_status": "", "uploaded_by_id": "08471957-b27d-4437-8242-57256727dc49", "uploaded_by_type": "", "tokens_processing_total": 806683} + application/json: {"id": "1111e519-9ba5-42de-9301-938fbfee59fc", "library_id": "70aac5e3-23f7-439b-bbef-090e4c1dbd6d", "hash": "", "mime_type": "", "extension": "m1v", "size": 802305, "name": "", "created_at": "2024-07-02T20:02:03.680Z", "uploaded_by_id": "08471957-b27d-4437-8242-57256727dc49", "uploaded_by_type": "", "processing_status": "", "tokens_processing_total": 806683} "422": application/json: {} libraries_documents_delete_v1: @@ -4328,6 +4289,16 @@ examples: responses: "200": application/json: {"id": "", "object": "batch", "input_files": ["7309e534-200e-43a4-83c5-dc4c2a14c745"], "endpoint": "", "errors": [], "status": "FAILED", "created_at": 157212, "total_requests": 188914, "completed_requests": 685483, "succeeded_requests": 127060, "failed_requests": 428561} + agents_api_v1_agents_delete_alias: + speakeasy-default-agents-api-v1-agents-delete-alias: + parameters: + path: + agent_id: "" + query: + alias: "" + responses: + "422": + application/json: {} examplesVersion: 1.0.2 generatedTests: {} generatedFiles: diff --git a/.speakeasy/gen.yaml b/.speakeasy/gen.yaml index 23b915b5..733650dc 100644 --- a/.speakeasy/gen.yaml +++ b/.speakeasy/gen.yaml @@ -13,8 +13,9 @@ generation: requestResponseComponentNamesFeb2024: true securityFeb2025: true sharedErrorComponentsApr2025: true - methodSignaturesApr2024: true sharedNestedComponentsJan2026: true + nameOverrideFeb2026: true + methodSignaturesApr2024: true auth: oAuth2ClientCredentialsEnabled: true oAuth2PasswordEnabled: false @@ -22,6 +23,7 @@ generation: schemas: allOfMergeStrategy: shallowMerge requestBodyFieldName: "" + versioningStrategy: automatic persistentEdits: enabled: "true" tests: @@ -29,26 +31,31 @@ generation: generateNewTests: false skipResponseBodyAssertions: false python: - version: 2.0.0a3 + version: 2.0.0-a3.1 additionalDependencies: dev: pytest: ^8.2.2 pytest-asyncio: ^0.23.7 + main: {} allowedRedefinedBuiltins: - id - object + - input + - dir asyncMode: both authors: - Mistral baseErrorName: MistralError clientServerStatusCodesAsErrors: true - constFieldCasing: upper + constFieldCasing: normal defaultErrorName: SDKError description: Python Client SDK for the Mistral AI API. enableCustomCodeRegions: true enumFormat: union envVarPrefix: MISTRAL fixFlags: + asyncPaginationSep2025: true + conflictResistantModelImportsFeb2026: true responseRequiredSep2024: true flatAdditionalProperties: true flattenGlobalSecurity: true @@ -60,17 +67,17 @@ python: option: openapi paths: callbacks: "" - errors: "" + errors: errors operations: "" shared: "" webhooks: "" inferUnionDiscriminators: true inputModelSuffix: input license: "" - maxMethodParams: 15 + maxMethodParams: 999 methodArguments: infer-optional-args moduleName: mistralai.client - multipartArrayFormat: legacy + multipartArrayFormat: standard outputModelSuffix: output packageManager: uv packageName: mistralai @@ -80,3 +87,4 @@ python: responseFormat: flat sseFlatResponse: false templateVersion: v2 + useAsyncHooks: false diff --git a/.speakeasy/workflow.lock b/.speakeasy/workflow.lock index 4aa0af42..d051080f 100644 --- a/.speakeasy/workflow.lock +++ b/.speakeasy/workflow.lock @@ -1,4 +1,4 @@ -speakeasyVersion: 1.685.0 +speakeasyVersion: 1.729.0 sources: mistral-azure-source: sourceNamespace: mistral-openapi-azure @@ -14,8 +14,8 @@ sources: - latest mistral-openapi: sourceNamespace: mistral-openapi - sourceRevisionDigest: sha256:74d0de7750f6a1878b68c9da683eba7a447d7c367131d0cb8f5c3b1e05829624 - sourceBlobDigest: sha256:41e8354c48993fc29be68959d835ea4f8e0cc1d4b4fbd527afcd970bc02c62a2 + sourceRevisionDigest: sha256:4f8e25101b35a66b9c93089fe3d491990268bdbefb70a349740e01ba9c8e28f8 + sourceBlobDigest: sha256:8566b35549178910c6fd4d005474d612bb9c476ef58785bb51c46251de145f71 tags: - latest targets: @@ -25,24 +25,24 @@ targets: sourceRevisionDigest: sha256:e32d21a6317d1bca6ab29f05603b96038e841752c2698aab47f434ea0d6530b7 sourceBlobDigest: sha256:2dad2b1b7a79de6917c363ce7e870d11efe31ac08e3bfe0258f72823fe1ad13e codeSamplesNamespace: mistral-openapi-azure-code-samples - codeSamplesRevisionDigest: sha256:a34c3049c604d0bb67101d042e959f14098964fe784f98975a9201c84dbf44d0 + codeSamplesRevisionDigest: sha256:248e5daaa44589805664ab1479502885758fde0f1da3b384b97b1a09d74c8256 mistralai-gcp-sdk: source: mistral-google-cloud-source sourceNamespace: mistral-openapi-google-cloud sourceRevisionDigest: sha256:4d9938ab74c4d41d62cd24234c8b8109e286c4aeec093e21d369259a43173113 sourceBlobDigest: sha256:5a558d5ea7a936723c7a5540db5a1fba63d85d25b453372e1cf16395b30c98d3 codeSamplesNamespace: mistral-openapi-google-cloud-code-samples - codeSamplesRevisionDigest: sha256:fa36e5999e79c32e8b2c1317cc0d6ed179912ced15194f02b5f80da22e45ae5f + codeSamplesRevisionDigest: sha256:f6c4dc988e9b7be6f8d8087d14b2269be601bb9bff2227b07e1018efe88e1556 mistralai-sdk: source: mistral-openapi sourceNamespace: mistral-openapi - sourceRevisionDigest: sha256:74d0de7750f6a1878b68c9da683eba7a447d7c367131d0cb8f5c3b1e05829624 - sourceBlobDigest: sha256:41e8354c48993fc29be68959d835ea4f8e0cc1d4b4fbd527afcd970bc02c62a2 + sourceRevisionDigest: sha256:4f8e25101b35a66b9c93089fe3d491990268bdbefb70a349740e01ba9c8e28f8 + sourceBlobDigest: sha256:8566b35549178910c6fd4d005474d612bb9c476ef58785bb51c46251de145f71 codeSamplesNamespace: mistral-openapi-code-samples - codeSamplesRevisionDigest: sha256:99fcae1bc81801e3825648a44f5ffa62a8f124e3186e5570be40414de164e7f2 + codeSamplesRevisionDigest: sha256:f3cf9d6d99a27d6e753bd6e1a2f2c2fb290f412a455576de4bab610ab4825939 workflow: workflowVersion: 1.0.0 - speakeasyVersion: 1.685.0 + speakeasyVersion: 1.729.0 sources: mistral-azure-source: inputs: diff --git a/.speakeasy/workflow.yaml b/.speakeasy/workflow.yaml index ba109c09..65d6d202 100644 --- a/.speakeasy/workflow.yaml +++ b/.speakeasy/workflow.yaml @@ -1,5 +1,5 @@ workflowVersion: 1.0.0 -speakeasyVersion: 1.685.0 +speakeasyVersion: 1.729.0 sources: mistral-azure-source: inputs: diff --git a/Makefile b/Makefile index a169d78f..bba024ad 100644 --- a/Makefile +++ b/Makefile @@ -1,19 +1,29 @@ -.PHONY: help test-generate update-speakeasy-version +.PHONY: help generate test-generate update-speakeasy-version check-config help: @echo "Available targets:" + @echo " make generate Generate all SDKs (main, Azure, GCP)" @echo " make test-generate Test SDK generation locally" @echo " make update-speakeasy-version VERSION=x.y.z Update Speakeasy CLI version" + @echo " make check-config Check gen.yaml against recommended defaults" @echo "" @echo "Note: Production SDK generation is done via GitHub Actions:" @echo " .github/workflows/sdk_generation_mistralai_sdk.yaml" +# Generate all SDKs (main, Azure, GCP) +generate: + speakeasy run -t all + # Test SDK generation locally. # For production, use GitHub Actions: .github/workflows/sdk_generation_mistralai_sdk.yaml # This uses the Speakeasy CLI version defined in .speakeasy/workflow.yaml test-generate: speakeasy run --skip-versioning +# Check gen.yaml configuration against Speakeasy recommended defaults +check-config: + speakeasy configure generation check + # Update the Speakeasy CLI version (the code generator tool). # This modifies speakeasyVersion in .speakeasy/workflow.yaml and regenerates the SDK. # Usage: make update-speakeasy-version VERSION=1.685.0 diff --git a/README.md b/README.md index a774a9e1..dd98b5cc 100644 --- a/README.md +++ b/README.md @@ -27,9 +27,7 @@ $ source ~/.zshenv ## Summary -Mistral AI API: Dora OpenAPI schema - -Our Chat Completion and Embeddings APIs specification. Create your account on [La Plateforme](https://console.mistral.ai) to get access and read the [docs](https://docs.mistral.ai) to learn how to use it. +Mistral AI API: Our Chat Completion and Embeddings APIs specification. Create your account on [La Plateforme](https://console.mistral.ai) to get access and read the [docs](https://docs.mistral.ai) to learn how to use it. @@ -161,8 +159,8 @@ with Mistral( res = mistral.chat.complete(model="mistral-large-latest", messages=[ { - "content": "Who is the best French painter? Answer in one short sentence.", "role": "user", + "content": "Who is the best French painter? Answer in one short sentence.", }, ], stream=False, response_format={ "type": "text", @@ -190,8 +188,8 @@ async def main(): res = await mistral.chat.complete_async(model="mistral-large-latest", messages=[ { - "content": "Who is the best French painter? Answer in one short sentence.", "role": "user", + "content": "Who is the best French painter? Answer in one short sentence.", }, ], stream=False, response_format={ "type": "text", @@ -269,8 +267,8 @@ with Mistral( res = mistral.agents.complete(messages=[ { - "content": "Who is the best French painter? Answer in one short sentence.", "role": "user", + "content": "Who is the best French painter? Answer in one short sentence.", }, ], agent_id="", stream=False, response_format={ "type": "text", @@ -298,8 +296,8 @@ async def main(): res = await mistral.agents.complete_async(messages=[ { - "content": "Who is the best French painter? Answer in one short sentence.", "role": "user", + "content": "Who is the best French painter? Answer in one short sentence.", }, ], agent_id="", stream=False, response_format={ "type": "text", @@ -616,7 +614,14 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.beta.conversations.start_stream(inputs="", stream=True, completion_args={ + res = mistral.beta.conversations.start_stream(inputs=[ + { + "object": "entry", + "type": "function.result", + "tool_call_id": "", + "result": "", + }, + ], stream=True, completion_args={ "response_format": { "type": "text", }, @@ -653,7 +658,7 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.beta.libraries.documents.upload(library_id="f973c54e-979a-4464-9d36-8cc31beb21fe", file={ + res = mistral.beta.libraries.documents.upload(library_id="a02150d9-5ee0-4877-b62c-28b1fcdf3b76", file={ "file_name": "example.file", "content": open("example.file", "rb"), }) @@ -680,8 +685,8 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.models.list( - retries=RetryConfig("backoff", BackoffStrategy(1, 50, 1.1, 100), False)) + res = mistral.models.list(, + RetryConfig("backoff", BackoffStrategy(1, 50, 1.1, 100), False)) # Handle response print(res) @@ -711,7 +716,7 @@ with Mistral( ## Error Handling -[`MistralError`](./src/mistralai/client/models/mistralerror.py) is the base class for all HTTP error responses. It has the following properties: +[`MistralError`](./src/mistralai/client/errors/mistralerror.py) is the base class for all HTTP error responses. It has the following properties: | Property | Type | Description | | ------------------ | ---------------- | --------------------------------------------------------------------------------------- | @@ -724,8 +729,7 @@ with Mistral( ### Example ```python -import mistralai.client -from mistralai.client import Mistral, models +from mistralai.client import Mistral, errors import os @@ -741,7 +745,7 @@ with Mistral( print(res) - except models.MistralError as e: + except errors.MistralError as e: # The base class for HTTP error responses print(e.message) print(e.status_code) @@ -750,13 +754,13 @@ with Mistral( print(e.raw_response) # Depending on the method different errors may be thrown - if isinstance(e, models.HTTPValidationError): - print(e.data.detail) # Optional[List[mistralai.client.ValidationError]] + if isinstance(e, errors.HTTPValidationError): + print(e.data.detail) # Optional[List[models.ValidationError]] ``` ### Error Classes **Primary error:** -* [`MistralError`](./src/mistralai/client/models/mistralerror.py): The base class for HTTP error responses. +* [`MistralError`](./src/mistralai/client/errors/mistralerror.py): The base class for HTTP error responses.
Less common errors (6) @@ -768,9 +772,9 @@ with Mistral( * [`httpx.TimeoutException`](https://www.python-httpx.org/exceptions/#httpx.TimeoutException): HTTP request timed out. -**Inherit from [`MistralError`](./src/mistralai/client/models/mistralerror.py)**: -* [`HTTPValidationError`](./src/mistralai/client/models/httpvalidationerror.py): Validation Error. Status code `422`. Applicable to 53 of 75 methods.* -* [`ResponseValidationError`](./src/mistralai/client/models/responsevalidationerror.py): Type mismatch between the response data and the expected Pydantic model. Provides access to the Pydantic validation error via the `cause` attribute. +**Inherit from [`MistralError`](./src/mistralai/client/errors/mistralerror.py)**: +* [`HTTPValidationError`](./src/mistralai/client/errors/httpvalidationerror.py): Validation Error. Status code `422`. Applicable to 53 of 75 methods.* +* [`ResponseValidationError`](./src/mistralai/client/errors/responsevalidationerror.py): Type mismatch between the response data and the expected Pydantic model. Provides access to the Pydantic validation error via the `cause` attribute.
diff --git a/USAGE.md b/USAGE.md index 18103864..f71bbabc 100644 --- a/USAGE.md +++ b/USAGE.md @@ -15,8 +15,8 @@ with Mistral( res = mistral.chat.complete(model="mistral-large-latest", messages=[ { - "content": "Who is the best French painter? Answer in one short sentence.", "role": "user", + "content": "Who is the best French painter? Answer in one short sentence.", }, ], stream=False, response_format={ "type": "text", @@ -44,8 +44,8 @@ async def main(): res = await mistral.chat.complete_async(model="mistral-large-latest", messages=[ { - "content": "Who is the best French painter? Answer in one short sentence.", "role": "user", + "content": "Who is the best French painter? Answer in one short sentence.", }, ], stream=False, response_format={ "type": "text", @@ -123,8 +123,8 @@ with Mistral( res = mistral.agents.complete(messages=[ { - "content": "Who is the best French painter? Answer in one short sentence.", "role": "user", + "content": "Who is the best French painter? Answer in one short sentence.", }, ], agent_id="", stream=False, response_format={ "type": "text", @@ -152,8 +152,8 @@ async def main(): res = await mistral.agents.complete_async(messages=[ { - "content": "Who is the best French painter? Answer in one short sentence.", "role": "user", + "content": "Who is the best French painter? Answer in one short sentence.", }, ], agent_id="", stream=False, response_format={ "type": "text", diff --git a/docs/models/httpvalidationerror.md b/docs/errors/httpvalidationerror.md similarity index 100% rename from docs/models/httpvalidationerror.md rename to docs/errors/httpvalidationerror.md diff --git a/docs/models/agent.md b/docs/models/agent.md index e335d889..4de5a901 100644 --- a/docs/models/agent.md +++ b/docs/models/agent.md @@ -13,7 +13,7 @@ | `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | `handoffs` | List[*str*] | :heavy_minus_sign: | N/A | | `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | -| `object` | [Optional[models.AgentObject]](../models/agentobject.md) | :heavy_minus_sign: | N/A | +| `object` | *Optional[Literal["agent"]]* | :heavy_minus_sign: | N/A | | `id` | *str* | :heavy_check_mark: | N/A | | `version` | *int* | :heavy_check_mark: | N/A | | `versions` | List[*int*] | :heavy_check_mark: | N/A | diff --git a/docs/models/agentconversation.md b/docs/models/agentconversation.md index a2d61731..451f6fb8 100644 --- a/docs/models/agentconversation.md +++ b/docs/models/agentconversation.md @@ -8,7 +8,7 @@ | `name` | *OptionalNullable[str]* | :heavy_minus_sign: | Name given to the conversation. | | `description` | *OptionalNullable[str]* | :heavy_minus_sign: | Description of the what the conversation is about. | | `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | Custom metadata for the conversation. | -| `object` | [Optional[models.AgentConversationObject]](../models/agentconversationobject.md) | :heavy_minus_sign: | N/A | +| `object` | *Optional[Literal["conversation"]]* | :heavy_minus_sign: | N/A | | `id` | *str* | :heavy_check_mark: | N/A | | `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_check_mark: | N/A | | `updated_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_check_mark: | N/A | diff --git a/docs/models/agentconversationobject.md b/docs/models/agentconversationobject.md deleted file mode 100644 index ea7cc75c..00000000 --- a/docs/models/agentconversationobject.md +++ /dev/null @@ -1,8 +0,0 @@ -# AgentConversationObject - - -## Values - -| Name | Value | -| -------------- | -------------- | -| `CONVERSATION` | conversation | \ No newline at end of file diff --git a/docs/models/agentcreationrequest.md b/docs/models/agentcreationrequest.md deleted file mode 100644 index f0f0fdbc..00000000 --- a/docs/models/agentcreationrequest.md +++ /dev/null @@ -1,16 +0,0 @@ -# AgentCreationRequest - - -## Fields - -| Field | Type | Required | Description | -| ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | -| `instructions` | *OptionalNullable[str]* | :heavy_minus_sign: | Instruction prompt the model will follow during the conversation. | -| `tools` | List[[models.AgentCreationRequestTool](../models/agentcreationrequesttool.md)] | :heavy_minus_sign: | List of tools which are available to the model during the conversation. | -| `completion_args` | [Optional[models.CompletionArgs]](../models/completionargs.md) | :heavy_minus_sign: | White-listed arguments from the completion API | -| `model` | *str* | :heavy_check_mark: | N/A | -| `name` | *str* | :heavy_check_mark: | N/A | -| `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `handoffs` | List[*str*] | :heavy_minus_sign: | N/A | -| `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | -| `version_message` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/agenthandoffentry.md b/docs/models/agenthandoffentry.md index 8831b0eb..2b689ec7 100644 --- a/docs/models/agenthandoffentry.md +++ b/docs/models/agenthandoffentry.md @@ -3,14 +3,14 @@ ## Fields -| Field | Type | Required | Description | -| -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -| `object` | [Optional[models.AgentHandoffEntryObject]](../models/agenthandoffentryobject.md) | :heavy_minus_sign: | N/A | -| `type` | [Optional[models.AgentHandoffEntryType]](../models/agenthandoffentrytype.md) | :heavy_minus_sign: | N/A | -| `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | -| `completed_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | -| `id` | *Optional[str]* | :heavy_minus_sign: | N/A | -| `previous_agent_id` | *str* | :heavy_check_mark: | N/A | -| `previous_agent_name` | *str* | :heavy_check_mark: | N/A | -| `next_agent_id` | *str* | :heavy_check_mark: | N/A | -| `next_agent_name` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | +| `object` | *Optional[Literal["entry"]]* | :heavy_minus_sign: | N/A | +| `type` | *Optional[Literal["agent.handoff"]]* | :heavy_minus_sign: | N/A | +| `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | +| `completed_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | +| `id` | *Optional[str]* | :heavy_minus_sign: | N/A | +| `previous_agent_id` | *str* | :heavy_check_mark: | N/A | +| `previous_agent_name` | *str* | :heavy_check_mark: | N/A | +| `next_agent_id` | *str* | :heavy_check_mark: | N/A | +| `next_agent_name` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/agenthandoffentryobject.md b/docs/models/agenthandoffentryobject.md deleted file mode 100644 index 4bb876fb..00000000 --- a/docs/models/agenthandoffentryobject.md +++ /dev/null @@ -1,8 +0,0 @@ -# AgentHandoffEntryObject - - -## Values - -| Name | Value | -| ------- | ------- | -| `ENTRY` | entry | \ No newline at end of file diff --git a/docs/models/agenthandoffentrytype.md b/docs/models/agenthandoffentrytype.md deleted file mode 100644 index 527ebceb..00000000 --- a/docs/models/agenthandoffentrytype.md +++ /dev/null @@ -1,8 +0,0 @@ -# AgentHandoffEntryType - - -## Values - -| Name | Value | -| --------------- | --------------- | -| `AGENT_HANDOFF` | agent.handoff | \ No newline at end of file diff --git a/docs/models/agentobject.md b/docs/models/agentobject.md deleted file mode 100644 index 70e143b0..00000000 --- a/docs/models/agentobject.md +++ /dev/null @@ -1,8 +0,0 @@ -# AgentObject - - -## Values - -| Name | Value | -| ------- | ------- | -| `AGENT` | agent | \ No newline at end of file diff --git a/docs/models/createorupdateagentaliasrequest.md b/docs/models/agentsapiv1agentscreateorupdatealiasrequest.md similarity index 90% rename from docs/models/createorupdateagentaliasrequest.md rename to docs/models/agentsapiv1agentscreateorupdatealiasrequest.md index af2591eb..79406434 100644 --- a/docs/models/createorupdateagentaliasrequest.md +++ b/docs/models/agentsapiv1agentscreateorupdatealiasrequest.md @@ -1,4 +1,4 @@ -# CreateOrUpdateAgentAliasRequest +# AgentsAPIV1AgentsCreateOrUpdateAliasRequest ## Fields diff --git a/docs/models/deleteagentaliasrequest.md b/docs/models/agentsapiv1agentsdeletealiasrequest.md similarity index 90% rename from docs/models/deleteagentaliasrequest.md rename to docs/models/agentsapiv1agentsdeletealiasrequest.md index 17812ec4..8e95c0c3 100644 --- a/docs/models/deleteagentaliasrequest.md +++ b/docs/models/agentsapiv1agentsdeletealiasrequest.md @@ -1,4 +1,4 @@ -# DeleteAgentAliasRequest +# AgentsAPIV1AgentsDeleteAliasRequest ## Fields diff --git a/docs/models/deleteagentrequest.md b/docs/models/agentsapiv1agentsdeleterequest.md similarity index 89% rename from docs/models/deleteagentrequest.md rename to docs/models/agentsapiv1agentsdeleterequest.md index 0aaacae4..2799f418 100644 --- a/docs/models/deleteagentrequest.md +++ b/docs/models/agentsapiv1agentsdeleterequest.md @@ -1,4 +1,4 @@ -# DeleteAgentRequest +# AgentsAPIV1AgentsDeleteRequest ## Fields diff --git a/docs/models/getagentagentversion.md b/docs/models/agentsapiv1agentsgetagentversion.md similarity index 79% rename from docs/models/getagentagentversion.md rename to docs/models/agentsapiv1agentsgetagentversion.md index 6d7b3f1d..7fb9f2d5 100644 --- a/docs/models/getagentagentversion.md +++ b/docs/models/agentsapiv1agentsgetagentversion.md @@ -1,4 +1,4 @@ -# GetAgentAgentVersion +# AgentsAPIV1AgentsGetAgentVersion ## Supported Types diff --git a/docs/models/agentsapiv1agentsgetrequest.md b/docs/models/agentsapiv1agentsgetrequest.md new file mode 100644 index 00000000..ceffe009 --- /dev/null +++ b/docs/models/agentsapiv1agentsgetrequest.md @@ -0,0 +1,9 @@ +# AgentsAPIV1AgentsGetRequest + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------- | +| `agent_id` | *str* | :heavy_check_mark: | N/A | +| `agent_version` | [OptionalNullable[models.AgentsAPIV1AgentsGetAgentVersion]](../models/agentsapiv1agentsgetagentversion.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/getagentversionrequest.md b/docs/models/agentsapiv1agentsgetversionrequest.md similarity index 90% rename from docs/models/getagentversionrequest.md rename to docs/models/agentsapiv1agentsgetversionrequest.md index c98fee9d..96a73589 100644 --- a/docs/models/getagentversionrequest.md +++ b/docs/models/agentsapiv1agentsgetversionrequest.md @@ -1,4 +1,4 @@ -# GetAgentVersionRequest +# AgentsAPIV1AgentsGetVersionRequest ## Fields diff --git a/docs/models/listagentsrequest.md b/docs/models/agentsapiv1agentslistrequest.md similarity index 98% rename from docs/models/listagentsrequest.md rename to docs/models/agentsapiv1agentslistrequest.md index 79aec3ea..4785a54c 100644 --- a/docs/models/listagentsrequest.md +++ b/docs/models/agentsapiv1agentslistrequest.md @@ -1,4 +1,4 @@ -# ListAgentsRequest +# AgentsAPIV1AgentsListRequest ## Fields diff --git a/docs/models/listagentaliasesrequest.md b/docs/models/agentsapiv1agentslistversionaliasesrequest.md similarity index 85% rename from docs/models/listagentaliasesrequest.md rename to docs/models/agentsapiv1agentslistversionaliasesrequest.md index b3570cb8..3083bf92 100644 --- a/docs/models/listagentaliasesrequest.md +++ b/docs/models/agentsapiv1agentslistversionaliasesrequest.md @@ -1,4 +1,4 @@ -# ListAgentAliasesRequest +# AgentsAPIV1AgentsListVersionAliasesRequest ## Fields diff --git a/docs/models/listagentversionsrequest.md b/docs/models/agentsapiv1agentslistversionsrequest.md similarity index 94% rename from docs/models/listagentversionsrequest.md rename to docs/models/agentsapiv1agentslistversionsrequest.md index ba8ddaa5..91831700 100644 --- a/docs/models/listagentversionsrequest.md +++ b/docs/models/agentsapiv1agentslistversionsrequest.md @@ -1,4 +1,4 @@ -# ListAgentVersionsRequest +# AgentsAPIV1AgentsListVersionsRequest ## Fields diff --git a/docs/models/agentsapiv1agentsupdaterequest.md b/docs/models/agentsapiv1agentsupdaterequest.md new file mode 100644 index 00000000..7ef60bec --- /dev/null +++ b/docs/models/agentsapiv1agentsupdaterequest.md @@ -0,0 +1,9 @@ +# AgentsAPIV1AgentsUpdateRequest + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------------------------------ | ------------------------------------------------------------ | ------------------------------------------------------------ | ------------------------------------------------------------ | +| `agent_id` | *str* | :heavy_check_mark: | N/A | +| `update_agent_request` | [models.UpdateAgentRequest](../models/updateagentrequest.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/updateagentversionrequest.md b/docs/models/agentsapiv1agentsupdateversionrequest.md similarity index 89% rename from docs/models/updateagentversionrequest.md rename to docs/models/agentsapiv1agentsupdateversionrequest.md index b83eb867..e937acc9 100644 --- a/docs/models/updateagentversionrequest.md +++ b/docs/models/agentsapiv1agentsupdateversionrequest.md @@ -1,4 +1,4 @@ -# UpdateAgentVersionRequest +# AgentsAPIV1AgentsUpdateVersionRequest ## Fields diff --git a/docs/models/appendconversationrequest.md b/docs/models/agentsapiv1conversationsappendrequest.md similarity index 96% rename from docs/models/appendconversationrequest.md rename to docs/models/agentsapiv1conversationsappendrequest.md index 977d8e8b..ac8a00ec 100644 --- a/docs/models/appendconversationrequest.md +++ b/docs/models/agentsapiv1conversationsappendrequest.md @@ -1,4 +1,4 @@ -# AppendConversationRequest +# AgentsAPIV1ConversationsAppendRequest ## Fields diff --git a/docs/models/appendconversationstreamrequest.md b/docs/models/agentsapiv1conversationsappendstreamrequest.md similarity index 96% rename from docs/models/appendconversationstreamrequest.md rename to docs/models/agentsapiv1conversationsappendstreamrequest.md index a23231c2..dbc330f1 100644 --- a/docs/models/appendconversationstreamrequest.md +++ b/docs/models/agentsapiv1conversationsappendstreamrequest.md @@ -1,4 +1,4 @@ -# AppendConversationStreamRequest +# AgentsAPIV1ConversationsAppendStreamRequest ## Fields diff --git a/docs/models/getconversationrequest.md b/docs/models/agentsapiv1conversationsdeleterequest.md similarity index 95% rename from docs/models/getconversationrequest.md rename to docs/models/agentsapiv1conversationsdeleterequest.md index 8a66a8b0..c6eed281 100644 --- a/docs/models/getconversationrequest.md +++ b/docs/models/agentsapiv1conversationsdeleterequest.md @@ -1,4 +1,4 @@ -# GetConversationRequest +# AgentsAPIV1ConversationsDeleteRequest ## Fields diff --git a/docs/models/deleteconversationrequest.md b/docs/models/agentsapiv1conversationsgetrequest.md similarity index 95% rename from docs/models/deleteconversationrequest.md rename to docs/models/agentsapiv1conversationsgetrequest.md index 39d9e5df..67d450c8 100644 --- a/docs/models/deleteconversationrequest.md +++ b/docs/models/agentsapiv1conversationsgetrequest.md @@ -1,4 +1,4 @@ -# DeleteConversationRequest +# AgentsAPIV1ConversationsGetRequest ## Fields diff --git a/docs/models/getconversationhistoryrequest.md b/docs/models/agentsapiv1conversationshistoryrequest.md similarity index 94% rename from docs/models/getconversationhistoryrequest.md rename to docs/models/agentsapiv1conversationshistoryrequest.md index fc90282b..7e5d39e9 100644 --- a/docs/models/getconversationhistoryrequest.md +++ b/docs/models/agentsapiv1conversationshistoryrequest.md @@ -1,4 +1,4 @@ -# GetConversationHistoryRequest +# AgentsAPIV1ConversationsHistoryRequest ## Fields diff --git a/docs/models/listconversationsrequest.md b/docs/models/agentsapiv1conversationslistrequest.md similarity index 92% rename from docs/models/listconversationsrequest.md rename to docs/models/agentsapiv1conversationslistrequest.md index d99b4208..62c9011f 100644 --- a/docs/models/listconversationsrequest.md +++ b/docs/models/agentsapiv1conversationslistrequest.md @@ -1,4 +1,4 @@ -# ListConversationsRequest +# AgentsAPIV1ConversationsListRequest ## Fields diff --git a/docs/models/listconversationsresponse.md b/docs/models/agentsapiv1conversationslistresponse.md similarity index 84% rename from docs/models/listconversationsresponse.md rename to docs/models/agentsapiv1conversationslistresponse.md index 9d611c55..b233ee20 100644 --- a/docs/models/listconversationsresponse.md +++ b/docs/models/agentsapiv1conversationslistresponse.md @@ -1,4 +1,4 @@ -# ListConversationsResponse +# AgentsAPIV1ConversationsListResponse ## Supported Types diff --git a/docs/models/getconversationmessagesrequest.md b/docs/models/agentsapiv1conversationsmessagesrequest.md similarity index 94% rename from docs/models/getconversationmessagesrequest.md rename to docs/models/agentsapiv1conversationsmessagesrequest.md index fd037fea..a91ab046 100644 --- a/docs/models/getconversationmessagesrequest.md +++ b/docs/models/agentsapiv1conversationsmessagesrequest.md @@ -1,4 +1,4 @@ -# GetConversationMessagesRequest +# AgentsAPIV1ConversationsMessagesRequest ## Fields diff --git a/docs/models/restartconversationrequest.md b/docs/models/agentsapiv1conversationsrestartrequest.md similarity index 96% rename from docs/models/restartconversationrequest.md rename to docs/models/agentsapiv1conversationsrestartrequest.md index f24f14e6..a18a41f5 100644 --- a/docs/models/restartconversationrequest.md +++ b/docs/models/agentsapiv1conversationsrestartrequest.md @@ -1,4 +1,4 @@ -# RestartConversationRequest +# AgentsAPIV1ConversationsRestartRequest ## Fields diff --git a/docs/models/restartconversationstreamrequest.md b/docs/models/agentsapiv1conversationsrestartstreamrequest.md similarity index 96% rename from docs/models/restartconversationstreamrequest.md rename to docs/models/agentsapiv1conversationsrestartstreamrequest.md index daa661a9..7548286a 100644 --- a/docs/models/restartconversationstreamrequest.md +++ b/docs/models/agentsapiv1conversationsrestartstreamrequest.md @@ -1,4 +1,4 @@ -# RestartConversationStreamRequest +# AgentsAPIV1ConversationsRestartStreamRequest ## Fields diff --git a/docs/models/agentscompletionrequest.md b/docs/models/agentscompletionrequest.md index d87dc7da..33435732 100644 --- a/docs/models/agentscompletionrequest.md +++ b/docs/models/agentscompletionrequest.md @@ -11,7 +11,7 @@ | `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | | `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | | | `messages` | List[[models.AgentsCompletionRequestMessage](../models/agentscompletionrequestmessage.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | -| `response_format` | [Optional[models.ResponseFormat]](../models/responseformat.md) | :heavy_minus_sign: | Specify the format that the model must output. By default it will use `{ "type": "text" }`. Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ "type": "json_schema" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide. | {
"type": "text"
} | +| `response_format` | [Optional[models.ResponseFormat]](../models/responseformat.md) | :heavy_minus_sign: | Specify the format that the model must output. By default it will use `{ "type": "text" }`. Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ "type": "json_schema" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide. | **Example 1:** {
"type": "text"
}
**Example 2:** {
"type": "json_object"
}
**Example 3:** {
"type": "json_schema",
"json_schema": {
"schema": {
"properties": {
"name": {
"title": "Name",
"type": "string"
},
"authors": {
"items": {
"type": "string"
},
"title": "Authors",
"type": "array"
}
},
"required": [
"name",
"authors"
],
"title": "Book",
"type": "object",
"additionalProperties": false
},
"name": "book",
"strict": true
}
} | | `tools` | List[[models.Tool](../models/tool.md)] | :heavy_minus_sign: | N/A | | | `tool_choice` | [Optional[models.AgentsCompletionRequestToolChoice]](../models/agentscompletionrequesttoolchoice.md) | :heavy_minus_sign: | N/A | | | `presence_penalty` | *Optional[float]* | :heavy_minus_sign: | The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. | | diff --git a/docs/models/agentscompletionstreamrequest.md b/docs/models/agentscompletionstreamrequest.md index dd1804a1..407be8e0 100644 --- a/docs/models/agentscompletionstreamrequest.md +++ b/docs/models/agentscompletionstreamrequest.md @@ -11,7 +11,7 @@ | `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | | `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | | | `messages` | List[[models.AgentsCompletionStreamRequestMessage](../models/agentscompletionstreamrequestmessage.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | -| `response_format` | [Optional[models.ResponseFormat]](../models/responseformat.md) | :heavy_minus_sign: | Specify the format that the model must output. By default it will use `{ "type": "text" }`. Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ "type": "json_schema" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide. | {
"type": "text"
} | +| `response_format` | [Optional[models.ResponseFormat]](../models/responseformat.md) | :heavy_minus_sign: | Specify the format that the model must output. By default it will use `{ "type": "text" }`. Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ "type": "json_schema" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide. | **Example 1:** {
"type": "text"
}
**Example 2:** {
"type": "json_object"
}
**Example 3:** {
"type": "json_schema",
"json_schema": {
"schema": {
"properties": {
"name": {
"title": "Name",
"type": "string"
},
"authors": {
"items": {
"type": "string"
},
"title": "Authors",
"type": "array"
}
},
"required": [
"name",
"authors"
],
"title": "Book",
"type": "object",
"additionalProperties": false
},
"name": "book",
"strict": true
}
} | | `tools` | List[[models.Tool](../models/tool.md)] | :heavy_minus_sign: | N/A | | | `tool_choice` | [Optional[models.AgentsCompletionStreamRequestToolChoice]](../models/agentscompletionstreamrequesttoolchoice.md) | :heavy_minus_sign: | N/A | | | `presence_penalty` | *Optional[float]* | :heavy_minus_sign: | The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. | | diff --git a/docs/models/unarchiveftmodelout.md b/docs/models/archivemodelresponse.md similarity index 96% rename from docs/models/unarchiveftmodelout.md rename to docs/models/archivemodelresponse.md index 12c3d745..276656d1 100644 --- a/docs/models/unarchiveftmodelout.md +++ b/docs/models/archivemodelresponse.md @@ -1,4 +1,4 @@ -# UnarchiveFTModelOut +# ArchiveModelResponse ## Fields diff --git a/docs/models/assistantmessage.md b/docs/models/assistantmessage.md index 3d0bd90b..9ef63837 100644 --- a/docs/models/assistantmessage.md +++ b/docs/models/assistantmessage.md @@ -5,7 +5,7 @@ | Field | Type | Required | Description | | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| `role` | *Optional[Literal["assistant"]]* | :heavy_minus_sign: | N/A | | `content` | [OptionalNullable[models.AssistantMessageContent]](../models/assistantmessagecontent.md) | :heavy_minus_sign: | N/A | | `tool_calls` | List[[models.ToolCall](../models/toolcall.md)] | :heavy_minus_sign: | N/A | -| `prefix` | *Optional[bool]* | :heavy_minus_sign: | Set this to `true` when adding an assistant message as prefix to condition the model response. The role of the prefix message is to force the model to start its answer by the content of the message. | -| `role` | [Optional[models.AssistantMessageRole]](../models/assistantmessagerole.md) | :heavy_minus_sign: | N/A | \ No newline at end of file +| `prefix` | *Optional[bool]* | :heavy_minus_sign: | Set this to `true` when adding an assistant message as prefix to condition the model response. The role of the prefix message is to force the model to start its answer by the content of the message. | \ No newline at end of file diff --git a/docs/models/assistantmessagerole.md b/docs/models/assistantmessagerole.md deleted file mode 100644 index 658229e7..00000000 --- a/docs/models/assistantmessagerole.md +++ /dev/null @@ -1,8 +0,0 @@ -# AssistantMessageRole - - -## Values - -| Name | Value | -| ----------- | ----------- | -| `ASSISTANT` | assistant | \ No newline at end of file diff --git a/docs/models/audiochunk.md b/docs/models/audiochunk.md index 8a04af04..1ba8b0f5 100644 --- a/docs/models/audiochunk.md +++ b/docs/models/audiochunk.md @@ -5,5 +5,5 @@ | Field | Type | Required | Description | | ------------------------ | ------------------------ | ------------------------ | ------------------------ | -| `input_audio` | *str* | :heavy_check_mark: | N/A | -| `type` | *Literal["input_audio"]* | :heavy_check_mark: | N/A | \ No newline at end of file +| `type` | *Literal["input_audio"]* | :heavy_check_mark: | N/A | +| `input_audio` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/audiotranscriptionrequest.md b/docs/models/audiotranscriptionrequest.md index d7f5bd51..80bd5301 100644 --- a/docs/models/audiotranscriptionrequest.md +++ b/docs/models/audiotranscriptionrequest.md @@ -5,7 +5,7 @@ | Field | Type | Required | Description | Example | | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | -| `model` | *str* | :heavy_check_mark: | ID of the model to be used. | voxtral-mini-latest | +| `model` | *str* | :heavy_check_mark: | ID of the model to be used. | **Example 1:** voxtral-mini-latest
**Example 2:** voxtral-mini-2507 | | `file` | [Optional[models.File]](../models/file.md) | :heavy_minus_sign: | N/A | | | `file_url` | *OptionalNullable[str]* | :heavy_minus_sign: | Url of a file to be transcribed | | | `file_id` | *OptionalNullable[str]* | :heavy_minus_sign: | ID of a file uploaded to /v1/files | | diff --git a/docs/models/batchjobout.md b/docs/models/batchjob.md similarity index 99% rename from docs/models/batchjobout.md rename to docs/models/batchjob.md index 5f101173..162e2cff 100644 --- a/docs/models/batchjobout.md +++ b/docs/models/batchjob.md @@ -1,4 +1,4 @@ -# BatchJobOut +# BatchJob ## Fields diff --git a/docs/models/batchjobsout.md b/docs/models/batchjobsout.md deleted file mode 100644 index 7a9d6f68..00000000 --- a/docs/models/batchjobsout.md +++ /dev/null @@ -1,10 +0,0 @@ -# BatchJobsOut - - -## Fields - -| Field | Type | Required | Description | -| ---------------------------------------------------- | ---------------------------------------------------- | ---------------------------------------------------- | ---------------------------------------------------- | -| `data` | List[[models.BatchJobOut](../models/batchjobout.md)] | :heavy_minus_sign: | N/A | -| `object` | *Optional[Literal["list"]]* | :heavy_minus_sign: | N/A | -| `total` | *int* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/cancelfinetuningjobresponse.md b/docs/models/cancelfinetuningjobresponse.md deleted file mode 100644 index c512342e..00000000 --- a/docs/models/cancelfinetuningjobresponse.md +++ /dev/null @@ -1,19 +0,0 @@ -# CancelFineTuningJobResponse - -OK - - -## Supported Types - -### `models.ClassifierDetailedJobOut` - -```python -value: models.ClassifierDetailedJobOut = /* values here */ -``` - -### `models.CompletionDetailedJobOut` - -```python -value: models.CompletionDetailedJobOut = /* values here */ -``` - diff --git a/docs/models/chatclassificationrequest.md b/docs/models/chatclassificationrequest.md index 910d62ae..ba9c95ea 100644 --- a/docs/models/chatclassificationrequest.md +++ b/docs/models/chatclassificationrequest.md @@ -6,4 +6,4 @@ | Field | Type | Required | Description | | ------------------------------------ | ------------------------------------ | ------------------------------------ | ------------------------------------ | | `model` | *str* | :heavy_check_mark: | N/A | -| `inputs` | [models.Inputs](../models/inputs.md) | :heavy_check_mark: | Chat to classify | \ No newline at end of file +| `input` | [models.Inputs](../models/inputs.md) | :heavy_check_mark: | Chat to classify | \ No newline at end of file diff --git a/docs/models/chatcompletionrequest.md b/docs/models/chatcompletionrequest.md index f3abeeff..921161fa 100644 --- a/docs/models/chatcompletionrequest.md +++ b/docs/models/chatcompletionrequest.md @@ -14,7 +14,7 @@ | `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | | `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | | | `messages` | List[[models.ChatCompletionRequestMessage](../models/chatcompletionrequestmessage.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | -| `response_format` | [Optional[models.ResponseFormat]](../models/responseformat.md) | :heavy_minus_sign: | Specify the format that the model must output. By default it will use `{ "type": "text" }`. Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ "type": "json_schema" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide. | {
"type": "text"
} | +| `response_format` | [Optional[models.ResponseFormat]](../models/responseformat.md) | :heavy_minus_sign: | Specify the format that the model must output. By default it will use `{ "type": "text" }`. Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ "type": "json_schema" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide. | **Example 1:** {
"type": "text"
}
**Example 2:** {
"type": "json_object"
}
**Example 3:** {
"type": "json_schema",
"json_schema": {
"schema": {
"properties": {
"name": {
"title": "Name",
"type": "string"
},
"authors": {
"items": {
"type": "string"
},
"title": "Authors",
"type": "array"
}
},
"required": [
"name",
"authors"
],
"title": "Book",
"type": "object",
"additionalProperties": false
},
"name": "book",
"strict": true
}
} | | `tools` | List[[models.Tool](../models/tool.md)] | :heavy_minus_sign: | A list of tools the model may call. Use this to provide a list of functions the model may generate JSON inputs for. | | | `tool_choice` | [Optional[models.ChatCompletionRequestToolChoice]](../models/chatcompletionrequesttoolchoice.md) | :heavy_minus_sign: | Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{"type": "function", "function": {"name": "my_function"}}` forces the model to call that tool. | | | `presence_penalty` | *Optional[float]* | :heavy_minus_sign: | The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. | | diff --git a/docs/models/chatcompletionstreamrequest.md b/docs/models/chatcompletionstreamrequest.md index 42792d39..8761f000 100644 --- a/docs/models/chatcompletionstreamrequest.md +++ b/docs/models/chatcompletionstreamrequest.md @@ -14,7 +14,7 @@ | `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | | `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | | | `messages` | List[[models.ChatCompletionStreamRequestMessage](../models/chatcompletionstreamrequestmessage.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | -| `response_format` | [Optional[models.ResponseFormat]](../models/responseformat.md) | :heavy_minus_sign: | Specify the format that the model must output. By default it will use `{ "type": "text" }`. Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ "type": "json_schema" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide. | {
"type": "text"
} | +| `response_format` | [Optional[models.ResponseFormat]](../models/responseformat.md) | :heavy_minus_sign: | Specify the format that the model must output. By default it will use `{ "type": "text" }`. Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ "type": "json_schema" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide. | **Example 1:** {
"type": "text"
}
**Example 2:** {
"type": "json_object"
}
**Example 3:** {
"type": "json_schema",
"json_schema": {
"schema": {
"properties": {
"name": {
"title": "Name",
"type": "string"
},
"authors": {
"items": {
"type": "string"
},
"title": "Authors",
"type": "array"
}
},
"required": [
"name",
"authors"
],
"title": "Book",
"type": "object",
"additionalProperties": false
},
"name": "book",
"strict": true
}
} | | `tools` | List[[models.Tool](../models/tool.md)] | :heavy_minus_sign: | A list of tools the model may call. Use this to provide a list of functions the model may generate JSON inputs for. | | | `tool_choice` | [Optional[models.ChatCompletionStreamRequestToolChoice]](../models/chatcompletionstreamrequesttoolchoice.md) | :heavy_minus_sign: | Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{"type": "function", "function": {"name": "my_function"}}` forces the model to call that tool. | | | `presence_penalty` | *Optional[float]* | :heavy_minus_sign: | The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. | | diff --git a/docs/models/checkpointout.md b/docs/models/checkpoint.md similarity index 96% rename from docs/models/checkpointout.md rename to docs/models/checkpoint.md index 053592d2..f7f35530 100644 --- a/docs/models/checkpointout.md +++ b/docs/models/checkpoint.md @@ -1,10 +1,10 @@ -# CheckpointOut +# Checkpoint ## Fields | Field | Type | Required | Description | Example | | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `metrics` | [models.MetricOut](../models/metricout.md) | :heavy_check_mark: | Metrics at the step number during the fine-tuning job. Use these metrics to assess if the training is going smoothly (loss should decrease, token accuracy should increase). | | +| `metrics` | [models.Metric](../models/metric.md) | :heavy_check_mark: | Metrics at the step number during the fine-tuning job. Use these metrics to assess if the training is going smoothly (loss should decrease, token accuracy should increase). | | | `step_number` | *int* | :heavy_check_mark: | The step number that the checkpoint was created at. | | | `created_at` | *int* | :heavy_check_mark: | The UNIX timestamp (in seconds) for when the checkpoint was created. | 1716963433 | \ No newline at end of file diff --git a/docs/models/classifierdetailedjoboutintegration.md b/docs/models/classifierdetailedjoboutintegration.md deleted file mode 100644 index 9dfa6e8a..00000000 --- a/docs/models/classifierdetailedjoboutintegration.md +++ /dev/null @@ -1,11 +0,0 @@ -# ClassifierDetailedJobOutIntegration - - -## Supported Types - -### `models.WandbIntegrationOut` - -```python -value: models.WandbIntegrationOut = /* values here */ -``` - diff --git a/docs/models/classifierfinetunedmodel.md b/docs/models/classifierfinetunedmodel.md new file mode 100644 index 00000000..ad05f931 --- /dev/null +++ b/docs/models/classifierfinetunedmodel.md @@ -0,0 +1,23 @@ +# ClassifierFineTunedModel + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | +| `id` | *str* | :heavy_check_mark: | N/A | +| `object` | *Optional[Literal["model"]]* | :heavy_minus_sign: | N/A | +| `created` | *int* | :heavy_check_mark: | N/A | +| `owned_by` | *str* | :heavy_check_mark: | N/A | +| `workspace_id` | *str* | :heavy_check_mark: | N/A | +| `root` | *str* | :heavy_check_mark: | N/A | +| `root_version` | *str* | :heavy_check_mark: | N/A | +| `archived` | *bool* | :heavy_check_mark: | N/A | +| `name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `capabilities` | [models.FineTunedModelCapabilities](../models/finetunedmodelcapabilities.md) | :heavy_check_mark: | N/A | +| `max_context_length` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `aliases` | List[*str*] | :heavy_minus_sign: | N/A | +| `job` | *str* | :heavy_check_mark: | N/A | +| `classifier_targets` | List[[models.ClassifierTargetResult](../models/classifiertargetresult.md)] | :heavy_check_mark: | N/A | +| `model_type` | *Literal["classifier"]* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/classifierjobout.md b/docs/models/classifierfinetuningjob.md similarity index 97% rename from docs/models/classifierjobout.md rename to docs/models/classifierfinetuningjob.md index ceecef5d..369756ba 100644 --- a/docs/models/classifierjobout.md +++ b/docs/models/classifierfinetuningjob.md @@ -1,4 +1,4 @@ -# ClassifierJobOut +# ClassifierFineTuningJob ## Fields @@ -8,7 +8,7 @@ | `id` | *str* | :heavy_check_mark: | The ID of the job. | | `auto_start` | *bool* | :heavy_check_mark: | N/A | | `model` | *str* | :heavy_check_mark: | N/A | -| `status` | [models.ClassifierJobOutStatus](../models/classifierjoboutstatus.md) | :heavy_check_mark: | The current status of the fine-tuning job. | +| `status` | [models.ClassifierFineTuningJobStatus](../models/classifierfinetuningjobstatus.md) | :heavy_check_mark: | The current status of the fine-tuning job. | | `created_at` | *int* | :heavy_check_mark: | The UNIX timestamp (in seconds) for when the fine-tuning job was created. | | `modified_at` | *int* | :heavy_check_mark: | The UNIX timestamp (in seconds) for when the fine-tuning job was last modified. | | `training_files` | List[*str*] | :heavy_check_mark: | A list containing the IDs of uploaded files that contain training data. | @@ -16,8 +16,8 @@ | `object` | *Optional[Literal["job"]]* | :heavy_minus_sign: | The object type of the fine-tuning job. | | `fine_tuned_model` | *OptionalNullable[str]* | :heavy_minus_sign: | The name of the fine-tuned model that is being created. The value will be `null` if the fine-tuning job is still running. | | `suffix` | *OptionalNullable[str]* | :heavy_minus_sign: | Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`. | -| `integrations` | List[[models.ClassifierJobOutIntegration](../models/classifierjoboutintegration.md)] | :heavy_minus_sign: | A list of integrations enabled for your fine-tuning job. | +| `integrations` | List[[models.ClassifierFineTuningJobIntegration](../models/classifierfinetuningjobintegration.md)] | :heavy_minus_sign: | A list of integrations enabled for your fine-tuning job. | | `trained_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | Total number of tokens trained. | -| `metadata` | [OptionalNullable[models.JobMetadataOut]](../models/jobmetadataout.md) | :heavy_minus_sign: | N/A | +| `metadata` | [OptionalNullable[models.JobMetadata]](../models/jobmetadata.md) | :heavy_minus_sign: | N/A | | `job_type` | *Literal["classifier"]* | :heavy_check_mark: | The type of job (`FT` for fine-tuning). | | `hyperparameters` | [models.ClassifierTrainingParameters](../models/classifiertrainingparameters.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/classifierdetailedjobout.md b/docs/models/classifierfinetuningjobdetails.md similarity index 94% rename from docs/models/classifierdetailedjobout.md rename to docs/models/classifierfinetuningjobdetails.md index fb532449..c5efdf1c 100644 --- a/docs/models/classifierdetailedjobout.md +++ b/docs/models/classifierfinetuningjobdetails.md @@ -1,4 +1,4 @@ -# ClassifierDetailedJobOut +# ClassifierFineTuningJobDetails ## Fields @@ -8,7 +8,7 @@ | `id` | *str* | :heavy_check_mark: | N/A | | `auto_start` | *bool* | :heavy_check_mark: | N/A | | `model` | *str* | :heavy_check_mark: | N/A | -| `status` | [models.ClassifierDetailedJobOutStatus](../models/classifierdetailedjoboutstatus.md) | :heavy_check_mark: | N/A | +| `status` | [models.ClassifierFineTuningJobDetailsStatus](../models/classifierfinetuningjobdetailsstatus.md) | :heavy_check_mark: | N/A | | `created_at` | *int* | :heavy_check_mark: | N/A | | `modified_at` | *int* | :heavy_check_mark: | N/A | | `training_files` | List[*str*] | :heavy_check_mark: | N/A | @@ -16,11 +16,11 @@ | `object` | *Optional[Literal["job"]]* | :heavy_minus_sign: | N/A | | `fine_tuned_model` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | `suffix` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `integrations` | List[[models.ClassifierDetailedJobOutIntegration](../models/classifierdetailedjoboutintegration.md)] | :heavy_minus_sign: | N/A | +| `integrations` | List[[models.ClassifierFineTuningJobDetailsIntegration](../models/classifierfinetuningjobdetailsintegration.md)] | :heavy_minus_sign: | N/A | | `trained_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | -| `metadata` | [OptionalNullable[models.JobMetadataOut]](../models/jobmetadataout.md) | :heavy_minus_sign: | N/A | +| `metadata` | [OptionalNullable[models.JobMetadata]](../models/jobmetadata.md) | :heavy_minus_sign: | N/A | | `job_type` | *Literal["classifier"]* | :heavy_check_mark: | N/A | | `hyperparameters` | [models.ClassifierTrainingParameters](../models/classifiertrainingparameters.md) | :heavy_check_mark: | N/A | -| `events` | List[[models.EventOut](../models/eventout.md)] | :heavy_minus_sign: | Event items are created every time the status of a fine-tuning job changes. The timestamped list of all events is accessible here. | -| `checkpoints` | List[[models.CheckpointOut](../models/checkpointout.md)] | :heavy_minus_sign: | N/A | -| `classifier_targets` | List[[models.ClassifierTargetOut](../models/classifiertargetout.md)] | :heavy_check_mark: | N/A | \ No newline at end of file +| `events` | List[[models.Event](../models/event.md)] | :heavy_minus_sign: | Event items are created every time the status of a fine-tuning job changes. The timestamped list of all events is accessible here. | +| `checkpoints` | List[[models.Checkpoint](../models/checkpoint.md)] | :heavy_minus_sign: | N/A | +| `classifier_targets` | List[[models.ClassifierTargetResult](../models/classifiertargetresult.md)] | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/classifierfinetuningjobdetailsintegration.md b/docs/models/classifierfinetuningjobdetailsintegration.md new file mode 100644 index 00000000..438a35d9 --- /dev/null +++ b/docs/models/classifierfinetuningjobdetailsintegration.md @@ -0,0 +1,11 @@ +# ClassifierFineTuningJobDetailsIntegration + + +## Supported Types + +### `models.WandbIntegrationResult` + +```python +value: models.WandbIntegrationResult = /* values here */ +``` + diff --git a/docs/models/classifierdetailedjoboutstatus.md b/docs/models/classifierfinetuningjobdetailsstatus.md similarity index 94% rename from docs/models/classifierdetailedjoboutstatus.md rename to docs/models/classifierfinetuningjobdetailsstatus.md index c3118aaf..058c6583 100644 --- a/docs/models/classifierdetailedjoboutstatus.md +++ b/docs/models/classifierfinetuningjobdetailsstatus.md @@ -1,4 +1,4 @@ -# ClassifierDetailedJobOutStatus +# ClassifierFineTuningJobDetailsStatus ## Values diff --git a/docs/models/classifierfinetuningjobintegration.md b/docs/models/classifierfinetuningjobintegration.md new file mode 100644 index 00000000..820aee4c --- /dev/null +++ b/docs/models/classifierfinetuningjobintegration.md @@ -0,0 +1,11 @@ +# ClassifierFineTuningJobIntegration + + +## Supported Types + +### `models.WandbIntegrationResult` + +```python +value: models.WandbIntegrationResult = /* values here */ +``` + diff --git a/docs/models/completionjoboutstatus.md b/docs/models/classifierfinetuningjobstatus.md similarity index 95% rename from docs/models/completionjoboutstatus.md rename to docs/models/classifierfinetuningjobstatus.md index 91754945..ca829885 100644 --- a/docs/models/completionjoboutstatus.md +++ b/docs/models/classifierfinetuningjobstatus.md @@ -1,4 +1,4 @@ -# CompletionJobOutStatus +# ClassifierFineTuningJobStatus The current status of the fine-tuning job. diff --git a/docs/models/classifierftmodelout.md b/docs/models/classifierftmodelout.md deleted file mode 100644 index 6e7afbbe..00000000 --- a/docs/models/classifierftmodelout.md +++ /dev/null @@ -1,23 +0,0 @@ -# ClassifierFTModelOut - - -## Fields - -| Field | Type | Required | Description | -| -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -| `id` | *str* | :heavy_check_mark: | N/A | -| `object` | *Optional[Literal["model"]]* | :heavy_minus_sign: | N/A | -| `created` | *int* | :heavy_check_mark: | N/A | -| `owned_by` | *str* | :heavy_check_mark: | N/A | -| `workspace_id` | *str* | :heavy_check_mark: | N/A | -| `root` | *str* | :heavy_check_mark: | N/A | -| `root_version` | *str* | :heavy_check_mark: | N/A | -| `archived` | *bool* | :heavy_check_mark: | N/A | -| `name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `capabilities` | [models.FTModelCapabilitiesOut](../models/ftmodelcapabilitiesout.md) | :heavy_check_mark: | N/A | -| `max_context_length` | *Optional[int]* | :heavy_minus_sign: | N/A | -| `aliases` | List[*str*] | :heavy_minus_sign: | N/A | -| `job` | *str* | :heavy_check_mark: | N/A | -| `classifier_targets` | List[[models.ClassifierTargetOut](../models/classifiertargetout.md)] | :heavy_check_mark: | N/A | -| `model_type` | *Literal["classifier"]* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/classifierjoboutintegration.md b/docs/models/classifierjoboutintegration.md deleted file mode 100644 index 33af8a70..00000000 --- a/docs/models/classifierjoboutintegration.md +++ /dev/null @@ -1,11 +0,0 @@ -# ClassifierJobOutIntegration - - -## Supported Types - -### `models.WandbIntegrationOut` - -```python -value: models.WandbIntegrationOut = /* values here */ -``` - diff --git a/docs/models/classifiertargetin.md b/docs/models/classifiertarget.md similarity index 99% rename from docs/models/classifiertargetin.md rename to docs/models/classifiertarget.md index 78cab67b..f8c99e2e 100644 --- a/docs/models/classifiertargetin.md +++ b/docs/models/classifiertarget.md @@ -1,4 +1,4 @@ -# ClassifierTargetIn +# ClassifierTarget ## Fields diff --git a/docs/models/classifiertargetout.md b/docs/models/classifiertargetresult.md similarity index 98% rename from docs/models/classifiertargetout.md rename to docs/models/classifiertargetresult.md index 57535ae5..ccadc623 100644 --- a/docs/models/classifiertargetout.md +++ b/docs/models/classifiertargetresult.md @@ -1,4 +1,4 @@ -# ClassifierTargetOut +# ClassifierTargetResult ## Fields diff --git a/docs/models/classifiertrainingparametersin.md b/docs/models/classifiertrainingparametersin.md deleted file mode 100644 index 1287c973..00000000 --- a/docs/models/classifiertrainingparametersin.md +++ /dev/null @@ -1,15 +0,0 @@ -# ClassifierTrainingParametersIn - -The fine-tuning hyperparameter settings used in a classifier fine-tune job. - - -## Fields - -| Field | Type | Required | Description | -| -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `training_steps` | *OptionalNullable[int]* | :heavy_minus_sign: | The number of training steps to perform. A training step refers to a single update of the model weights during the fine-tuning process. This update is typically calculated using a batch of samples from the training dataset. | -| `learning_rate` | *Optional[float]* | :heavy_minus_sign: | A parameter describing how much to adjust the pre-trained model's weights in response to the estimated error each time the weights are updated during the fine-tuning process. | -| `weight_decay` | *OptionalNullable[float]* | :heavy_minus_sign: | (Advanced Usage) Weight decay adds a term to the loss function that is proportional to the sum of the squared weights. This term reduces the magnitude of the weights and prevents them from growing too large. | -| `warmup_fraction` | *OptionalNullable[float]* | :heavy_minus_sign: | (Advanced Usage) A parameter that specifies the percentage of the total training steps at which the learning rate warm-up phase ends. During this phase, the learning rate gradually increases from a small value to the initial learning rate, helping to stabilize the training process and improve convergence. Similar to `pct_start` in [mistral-finetune](https://github.com/mistralai/mistral-finetune) | -| `epochs` | *OptionalNullable[float]* | :heavy_minus_sign: | N/A | -| `seq_len` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/codeinterpretertool.md b/docs/models/codeinterpretertool.md index 544cda93..6302fc62 100644 --- a/docs/models/codeinterpretertool.md +++ b/docs/models/codeinterpretertool.md @@ -3,6 +3,7 @@ ## Fields -| Field | Type | Required | Description | -| ----------------------------- | ----------------------------- | ----------------------------- | ----------------------------- | -| `type` | *Literal["code_interpreter"]* | :heavy_check_mark: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | +| `tool_configuration` | [OptionalNullable[models.ToolConfiguration]](../models/toolconfiguration.md) | :heavy_minus_sign: | N/A | +| `type` | *Literal["code_interpreter"]* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/completionargs.md b/docs/models/completionargs.md index 60d09137..148f7608 100644 --- a/docs/models/completionargs.md +++ b/docs/models/completionargs.md @@ -5,15 +5,15 @@ White-listed arguments from the completion API ## Fields -| Field | Type | Required | Description | Example | -| ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | -| `stop` | [OptionalNullable[models.CompletionArgsStop]](../models/completionargsstop.md) | :heavy_minus_sign: | N/A | | -| `presence_penalty` | *OptionalNullable[float]* | :heavy_minus_sign: | N/A | | -| `frequency_penalty` | *OptionalNullable[float]* | :heavy_minus_sign: | N/A | | -| `temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | N/A | | -| `top_p` | *OptionalNullable[float]* | :heavy_minus_sign: | N/A | | -| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | | -| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | | -| `prediction` | [OptionalNullable[models.Prediction]](../models/prediction.md) | :heavy_minus_sign: | N/A | | -| `response_format` | [OptionalNullable[models.ResponseFormat]](../models/responseformat.md) | :heavy_minus_sign: | N/A | {
"type": "text"
} | -| `tool_choice` | [Optional[models.ToolChoiceEnum]](../models/toolchoiceenum.md) | :heavy_minus_sign: | N/A | | \ No newline at end of file +| Field | Type | Required | Description | Example | +| ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `stop` | [OptionalNullable[models.CompletionArgsStop]](../models/completionargsstop.md) | :heavy_minus_sign: | N/A | | +| `presence_penalty` | *OptionalNullable[float]* | :heavy_minus_sign: | N/A | | +| `frequency_penalty` | *OptionalNullable[float]* | :heavy_minus_sign: | N/A | | +| `temperature` | *OptionalNullable[float]* | :heavy_minus_sign: | N/A | | +| `top_p` | *OptionalNullable[float]* | :heavy_minus_sign: | N/A | | +| `max_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | | +| `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | | +| `prediction` | [OptionalNullable[models.Prediction]](../models/prediction.md) | :heavy_minus_sign: | N/A | | +| `response_format` | [OptionalNullable[models.ResponseFormat]](../models/responseformat.md) | :heavy_minus_sign: | N/A | **Example 1:** {
"type": "text"
}
**Example 2:** {
"type": "json_object"
}
**Example 3:** {
"type": "json_schema",
"json_schema": {
"schema": {
"properties": {
"name": {
"title": "Name",
"type": "string"
},
"authors": {
"items": {
"type": "string"
},
"title": "Authors",
"type": "array"
}
},
"required": [
"name",
"authors"
],
"title": "Book",
"type": "object",
"additionalProperties": false
},
"name": "book",
"strict": true
}
} | +| `tool_choice` | [Optional[models.ToolChoiceEnum]](../models/toolchoiceenum.md) | :heavy_minus_sign: | N/A | | \ No newline at end of file diff --git a/docs/models/completiondetailedjoboutintegration.md b/docs/models/completiondetailedjoboutintegration.md deleted file mode 100644 index 9e526053..00000000 --- a/docs/models/completiondetailedjoboutintegration.md +++ /dev/null @@ -1,11 +0,0 @@ -# CompletionDetailedJobOutIntegration - - -## Supported Types - -### `models.WandbIntegrationOut` - -```python -value: models.WandbIntegrationOut = /* values here */ -``` - diff --git a/docs/models/completiondetailedjoboutrepository.md b/docs/models/completiondetailedjoboutrepository.md deleted file mode 100644 index 92a7b75c..00000000 --- a/docs/models/completiondetailedjoboutrepository.md +++ /dev/null @@ -1,11 +0,0 @@ -# CompletionDetailedJobOutRepository - - -## Supported Types - -### `models.GithubRepositoryOut` - -```python -value: models.GithubRepositoryOut = /* values here */ -``` - diff --git a/docs/models/completionfinetunedmodel.md b/docs/models/completionfinetunedmodel.md new file mode 100644 index 00000000..0055db02 --- /dev/null +++ b/docs/models/completionfinetunedmodel.md @@ -0,0 +1,22 @@ +# CompletionFineTunedModel + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | +| `id` | *str* | :heavy_check_mark: | N/A | +| `object` | *Optional[Literal["model"]]* | :heavy_minus_sign: | N/A | +| `created` | *int* | :heavy_check_mark: | N/A | +| `owned_by` | *str* | :heavy_check_mark: | N/A | +| `workspace_id` | *str* | :heavy_check_mark: | N/A | +| `root` | *str* | :heavy_check_mark: | N/A | +| `root_version` | *str* | :heavy_check_mark: | N/A | +| `archived` | *bool* | :heavy_check_mark: | N/A | +| `name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `capabilities` | [models.FineTunedModelCapabilities](../models/finetunedmodelcapabilities.md) | :heavy_check_mark: | N/A | +| `max_context_length` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `aliases` | List[*str*] | :heavy_minus_sign: | N/A | +| `job` | *str* | :heavy_check_mark: | N/A | +| `model_type` | *Literal["completion"]* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/completionjobout.md b/docs/models/completionfinetuningjob.md similarity index 97% rename from docs/models/completionjobout.md rename to docs/models/completionfinetuningjob.md index 5eb44eef..83c0ae7e 100644 --- a/docs/models/completionjobout.md +++ b/docs/models/completionfinetuningjob.md @@ -1,4 +1,4 @@ -# CompletionJobOut +# CompletionFineTuningJob ## Fields @@ -8,7 +8,7 @@ | `id` | *str* | :heavy_check_mark: | The ID of the job. | | `auto_start` | *bool* | :heavy_check_mark: | N/A | | `model` | *str* | :heavy_check_mark: | N/A | -| `status` | [models.CompletionJobOutStatus](../models/completionjoboutstatus.md) | :heavy_check_mark: | The current status of the fine-tuning job. | +| `status` | [models.CompletionFineTuningJobStatus](../models/completionfinetuningjobstatus.md) | :heavy_check_mark: | The current status of the fine-tuning job. | | `created_at` | *int* | :heavy_check_mark: | The UNIX timestamp (in seconds) for when the fine-tuning job was created. | | `modified_at` | *int* | :heavy_check_mark: | The UNIX timestamp (in seconds) for when the fine-tuning job was last modified. | | `training_files` | List[*str*] | :heavy_check_mark: | A list containing the IDs of uploaded files that contain training data. | @@ -16,9 +16,9 @@ | `object` | *Optional[Literal["job"]]* | :heavy_minus_sign: | The object type of the fine-tuning job. | | `fine_tuned_model` | *OptionalNullable[str]* | :heavy_minus_sign: | The name of the fine-tuned model that is being created. The value will be `null` if the fine-tuning job is still running. | | `suffix` | *OptionalNullable[str]* | :heavy_minus_sign: | Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`. | -| `integrations` | List[[models.CompletionJobOutIntegration](../models/completionjoboutintegration.md)] | :heavy_minus_sign: | A list of integrations enabled for your fine-tuning job. | +| `integrations` | List[[models.CompletionFineTuningJobIntegration](../models/completionfinetuningjobintegration.md)] | :heavy_minus_sign: | A list of integrations enabled for your fine-tuning job. | | `trained_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | Total number of tokens trained. | -| `metadata` | [OptionalNullable[models.JobMetadataOut]](../models/jobmetadataout.md) | :heavy_minus_sign: | N/A | +| `metadata` | [OptionalNullable[models.JobMetadata]](../models/jobmetadata.md) | :heavy_minus_sign: | N/A | | `job_type` | *Literal["completion"]* | :heavy_check_mark: | The type of job (`FT` for fine-tuning). | | `hyperparameters` | [models.CompletionTrainingParameters](../models/completiontrainingparameters.md) | :heavy_check_mark: | N/A | -| `repositories` | List[[models.CompletionJobOutRepository](../models/completionjoboutrepository.md)] | :heavy_minus_sign: | N/A | \ No newline at end of file +| `repositories` | List[[models.CompletionFineTuningJobRepository](../models/completionfinetuningjobrepository.md)] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/completiondetailedjobout.md b/docs/models/completionfinetuningjobdetails.md similarity index 94% rename from docs/models/completiondetailedjobout.md rename to docs/models/completionfinetuningjobdetails.md index bc7e5d1c..3c54e874 100644 --- a/docs/models/completiondetailedjobout.md +++ b/docs/models/completionfinetuningjobdetails.md @@ -1,4 +1,4 @@ -# CompletionDetailedJobOut +# CompletionFineTuningJobDetails ## Fields @@ -8,7 +8,7 @@ | `id` | *str* | :heavy_check_mark: | N/A | | `auto_start` | *bool* | :heavy_check_mark: | N/A | | `model` | *str* | :heavy_check_mark: | N/A | -| `status` | [models.CompletionDetailedJobOutStatus](../models/completiondetailedjoboutstatus.md) | :heavy_check_mark: | N/A | +| `status` | [models.CompletionFineTuningJobDetailsStatus](../models/completionfinetuningjobdetailsstatus.md) | :heavy_check_mark: | N/A | | `created_at` | *int* | :heavy_check_mark: | N/A | | `modified_at` | *int* | :heavy_check_mark: | N/A | | `training_files` | List[*str*] | :heavy_check_mark: | N/A | @@ -16,11 +16,11 @@ | `object` | *Optional[Literal["job"]]* | :heavy_minus_sign: | N/A | | `fine_tuned_model` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | `suffix` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `integrations` | List[[models.CompletionDetailedJobOutIntegration](../models/completiondetailedjoboutintegration.md)] | :heavy_minus_sign: | N/A | +| `integrations` | List[[models.CompletionFineTuningJobDetailsIntegration](../models/completionfinetuningjobdetailsintegration.md)] | :heavy_minus_sign: | N/A | | `trained_tokens` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | -| `metadata` | [OptionalNullable[models.JobMetadataOut]](../models/jobmetadataout.md) | :heavy_minus_sign: | N/A | +| `metadata` | [OptionalNullable[models.JobMetadata]](../models/jobmetadata.md) | :heavy_minus_sign: | N/A | | `job_type` | *Literal["completion"]* | :heavy_check_mark: | N/A | | `hyperparameters` | [models.CompletionTrainingParameters](../models/completiontrainingparameters.md) | :heavy_check_mark: | N/A | -| `repositories` | List[[models.CompletionDetailedJobOutRepository](../models/completiondetailedjoboutrepository.md)] | :heavy_minus_sign: | N/A | -| `events` | List[[models.EventOut](../models/eventout.md)] | :heavy_minus_sign: | Event items are created every time the status of a fine-tuning job changes. The timestamped list of all events is accessible here. | -| `checkpoints` | List[[models.CheckpointOut](../models/checkpointout.md)] | :heavy_minus_sign: | N/A | \ No newline at end of file +| `repositories` | List[[models.CompletionFineTuningJobDetailsRepository](../models/completionfinetuningjobdetailsrepository.md)] | :heavy_minus_sign: | N/A | +| `events` | List[[models.Event](../models/event.md)] | :heavy_minus_sign: | Event items are created every time the status of a fine-tuning job changes. The timestamped list of all events is accessible here. | +| `checkpoints` | List[[models.Checkpoint](../models/checkpoint.md)] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/completionfinetuningjobdetailsintegration.md b/docs/models/completionfinetuningjobdetailsintegration.md new file mode 100644 index 00000000..38f6a349 --- /dev/null +++ b/docs/models/completionfinetuningjobdetailsintegration.md @@ -0,0 +1,11 @@ +# CompletionFineTuningJobDetailsIntegration + + +## Supported Types + +### `models.WandbIntegrationResult` + +```python +value: models.WandbIntegrationResult = /* values here */ +``` + diff --git a/docs/models/completionfinetuningjobdetailsrepository.md b/docs/models/completionfinetuningjobdetailsrepository.md new file mode 100644 index 00000000..c6bd67cd --- /dev/null +++ b/docs/models/completionfinetuningjobdetailsrepository.md @@ -0,0 +1,11 @@ +# CompletionFineTuningJobDetailsRepository + + +## Supported Types + +### `models.GithubRepository` + +```python +value: models.GithubRepository = /* values here */ +``` + diff --git a/docs/models/completiondetailedjoboutstatus.md b/docs/models/completionfinetuningjobdetailsstatus.md similarity index 94% rename from docs/models/completiondetailedjoboutstatus.md rename to docs/models/completionfinetuningjobdetailsstatus.md index b80525ba..94d795a9 100644 --- a/docs/models/completiondetailedjoboutstatus.md +++ b/docs/models/completionfinetuningjobdetailsstatus.md @@ -1,4 +1,4 @@ -# CompletionDetailedJobOutStatus +# CompletionFineTuningJobDetailsStatus ## Values diff --git a/docs/models/completionfinetuningjobintegration.md b/docs/models/completionfinetuningjobintegration.md new file mode 100644 index 00000000..dbe57417 --- /dev/null +++ b/docs/models/completionfinetuningjobintegration.md @@ -0,0 +1,11 @@ +# CompletionFineTuningJobIntegration + + +## Supported Types + +### `models.WandbIntegrationResult` + +```python +value: models.WandbIntegrationResult = /* values here */ +``` + diff --git a/docs/models/completionfinetuningjobrepository.md b/docs/models/completionfinetuningjobrepository.md new file mode 100644 index 00000000..54225e27 --- /dev/null +++ b/docs/models/completionfinetuningjobrepository.md @@ -0,0 +1,11 @@ +# CompletionFineTuningJobRepository + + +## Supported Types + +### `models.GithubRepository` + +```python +value: models.GithubRepository = /* values here */ +``` + diff --git a/docs/models/classifierjoboutstatus.md b/docs/models/completionfinetuningjobstatus.md similarity index 95% rename from docs/models/classifierjoboutstatus.md rename to docs/models/completionfinetuningjobstatus.md index 4520f164..db151a1b 100644 --- a/docs/models/classifierjoboutstatus.md +++ b/docs/models/completionfinetuningjobstatus.md @@ -1,4 +1,4 @@ -# ClassifierJobOutStatus +# CompletionFineTuningJobStatus The current status of the fine-tuning job. diff --git a/docs/models/completionftmodelout.md b/docs/models/completionftmodelout.md deleted file mode 100644 index ccd4844f..00000000 --- a/docs/models/completionftmodelout.md +++ /dev/null @@ -1,22 +0,0 @@ -# CompletionFTModelOut - - -## Fields - -| Field | Type | Required | Description | -| -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -| `id` | *str* | :heavy_check_mark: | N/A | -| `object` | *Optional[Literal["model"]]* | :heavy_minus_sign: | N/A | -| `created` | *int* | :heavy_check_mark: | N/A | -| `owned_by` | *str* | :heavy_check_mark: | N/A | -| `workspace_id` | *str* | :heavy_check_mark: | N/A | -| `root` | *str* | :heavy_check_mark: | N/A | -| `root_version` | *str* | :heavy_check_mark: | N/A | -| `archived` | *bool* | :heavy_check_mark: | N/A | -| `name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `capabilities` | [models.FTModelCapabilitiesOut](../models/ftmodelcapabilitiesout.md) | :heavy_check_mark: | N/A | -| `max_context_length` | *Optional[int]* | :heavy_minus_sign: | N/A | -| `aliases` | List[*str*] | :heavy_minus_sign: | N/A | -| `job` | *str* | :heavy_check_mark: | N/A | -| `model_type` | *Literal["completion"]* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/completionjoboutintegration.md b/docs/models/completionjoboutintegration.md deleted file mode 100644 index 6474747b..00000000 --- a/docs/models/completionjoboutintegration.md +++ /dev/null @@ -1,11 +0,0 @@ -# CompletionJobOutIntegration - - -## Supported Types - -### `models.WandbIntegrationOut` - -```python -value: models.WandbIntegrationOut = /* values here */ -``` - diff --git a/docs/models/completionjoboutrepository.md b/docs/models/completionjoboutrepository.md deleted file mode 100644 index 52f65558..00000000 --- a/docs/models/completionjoboutrepository.md +++ /dev/null @@ -1,11 +0,0 @@ -# CompletionJobOutRepository - - -## Supported Types - -### `models.GithubRepositoryOut` - -```python -value: models.GithubRepositoryOut = /* values here */ -``` - diff --git a/docs/models/completiontrainingparametersin.md b/docs/models/completiontrainingparametersin.md deleted file mode 100644 index 9fcc714e..00000000 --- a/docs/models/completiontrainingparametersin.md +++ /dev/null @@ -1,16 +0,0 @@ -# CompletionTrainingParametersIn - -The fine-tuning hyperparameter settings used in a fine-tune job. - - -## Fields - -| Field | Type | Required | Description | -| -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `training_steps` | *OptionalNullable[int]* | :heavy_minus_sign: | The number of training steps to perform. A training step refers to a single update of the model weights during the fine-tuning process. This update is typically calculated using a batch of samples from the training dataset. | -| `learning_rate` | *Optional[float]* | :heavy_minus_sign: | A parameter describing how much to adjust the pre-trained model's weights in response to the estimated error each time the weights are updated during the fine-tuning process. | -| `weight_decay` | *OptionalNullable[float]* | :heavy_minus_sign: | (Advanced Usage) Weight decay adds a term to the loss function that is proportional to the sum of the squared weights. This term reduces the magnitude of the weights and prevents them from growing too large. | -| `warmup_fraction` | *OptionalNullable[float]* | :heavy_minus_sign: | (Advanced Usage) A parameter that specifies the percentage of the total training steps at which the learning rate warm-up phase ends. During this phase, the learning rate gradually increases from a small value to the initial learning rate, helping to stabilize the training process and improve convergence. Similar to `pct_start` in [mistral-finetune](https://github.com/mistralai/mistral-finetune) | -| `epochs` | *OptionalNullable[float]* | :heavy_minus_sign: | N/A | -| `seq_len` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | -| `fim_ratio` | *OptionalNullable[float]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/confirmation.md b/docs/models/confirmation.md new file mode 100644 index 00000000..fd6e6aaa --- /dev/null +++ b/docs/models/confirmation.md @@ -0,0 +1,9 @@ +# Confirmation + + +## Values + +| Name | Value | +| ------- | ------- | +| `ALLOW` | allow | +| `DENY` | deny | \ No newline at end of file diff --git a/docs/models/conversationappendrequest.md b/docs/models/conversationappendrequest.md index 1cdb584b..78a96508 100644 --- a/docs/models/conversationappendrequest.md +++ b/docs/models/conversationappendrequest.md @@ -5,8 +5,9 @@ | Field | Type | Required | Description | | -------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------- | -| `inputs` | [models.ConversationInputs](../models/conversationinputs.md) | :heavy_check_mark: | N/A | +| `inputs` | [Optional[models.ConversationInputs]](../models/conversationinputs.md) | :heavy_minus_sign: | N/A | | `stream` | *Optional[bool]* | :heavy_minus_sign: | N/A | | `store` | *Optional[bool]* | :heavy_minus_sign: | Whether to store the results into our servers or not. | | `handoff_execution` | [Optional[models.ConversationAppendRequestHandoffExecution]](../models/conversationappendrequesthandoffexecution.md) | :heavy_minus_sign: | N/A | -| `completion_args` | [Optional[models.CompletionArgs]](../models/completionargs.md) | :heavy_minus_sign: | White-listed arguments from the completion API | \ No newline at end of file +| `completion_args` | [Optional[models.CompletionArgs]](../models/completionargs.md) | :heavy_minus_sign: | White-listed arguments from the completion API | +| `tool_confirmations` | List[[models.ToolCallConfirmation](../models/toolcallconfirmation.md)] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/conversationappendstreamrequest.md b/docs/models/conversationappendstreamrequest.md index a8516ea7..daea9c52 100644 --- a/docs/models/conversationappendstreamrequest.md +++ b/docs/models/conversationappendstreamrequest.md @@ -5,8 +5,9 @@ | Field | Type | Required | Description | | -------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------- | -| `inputs` | [models.ConversationInputs](../models/conversationinputs.md) | :heavy_check_mark: | N/A | +| `inputs` | [Optional[models.ConversationInputs]](../models/conversationinputs.md) | :heavy_minus_sign: | N/A | | `stream` | *Optional[bool]* | :heavy_minus_sign: | N/A | | `store` | *Optional[bool]* | :heavy_minus_sign: | Whether to store the results into our servers or not. | | `handoff_execution` | [Optional[models.ConversationAppendStreamRequestHandoffExecution]](../models/conversationappendstreamrequesthandoffexecution.md) | :heavy_minus_sign: | N/A | -| `completion_args` | [Optional[models.CompletionArgs]](../models/completionargs.md) | :heavy_minus_sign: | White-listed arguments from the completion API | \ No newline at end of file +| `completion_args` | [Optional[models.CompletionArgs]](../models/completionargs.md) | :heavy_minus_sign: | White-listed arguments from the completion API | +| `tool_confirmations` | List[[models.ToolCallConfirmation](../models/toolcallconfirmation.md)] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/conversationhistory.md b/docs/models/conversationhistory.md index c8baad0b..daefe336 100644 --- a/docs/models/conversationhistory.md +++ b/docs/models/conversationhistory.md @@ -5,8 +5,8 @@ Retrieve all entries in a conversation. ## Fields -| Field | Type | Required | Description | -| ------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------ | -| `object` | [Optional[models.ConversationHistoryObject]](../models/conversationhistoryobject.md) | :heavy_minus_sign: | N/A | -| `conversation_id` | *str* | :heavy_check_mark: | N/A | -| `entries` | List[[models.Entry](../models/entry.md)] | :heavy_check_mark: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| ------------------------------------------- | ------------------------------------------- | ------------------------------------------- | ------------------------------------------- | +| `object` | *Optional[Literal["conversation.history"]]* | :heavy_minus_sign: | N/A | +| `conversation_id` | *str* | :heavy_check_mark: | N/A | +| `entries` | List[[models.Entry](../models/entry.md)] | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/conversationhistoryobject.md b/docs/models/conversationhistoryobject.md deleted file mode 100644 index a14e7f9c..00000000 --- a/docs/models/conversationhistoryobject.md +++ /dev/null @@ -1,8 +0,0 @@ -# ConversationHistoryObject - - -## Values - -| Name | Value | -| ---------------------- | ---------------------- | -| `CONVERSATION_HISTORY` | conversation.history | \ No newline at end of file diff --git a/docs/models/conversationmessages.md b/docs/models/conversationmessages.md index c3f00979..8fa51571 100644 --- a/docs/models/conversationmessages.md +++ b/docs/models/conversationmessages.md @@ -5,8 +5,8 @@ Similar to the conversation history but only keep the messages ## Fields -| Field | Type | Required | Description | -| -------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------- | -| `object` | [Optional[models.ConversationMessagesObject]](../models/conversationmessagesobject.md) | :heavy_minus_sign: | N/A | -| `conversation_id` | *str* | :heavy_check_mark: | N/A | -| `messages` | List[[models.MessageEntries](../models/messageentries.md)] | :heavy_check_mark: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| ---------------------------------------------------------- | ---------------------------------------------------------- | ---------------------------------------------------------- | ---------------------------------------------------------- | +| `object` | *Optional[Literal["conversation.messages"]]* | :heavy_minus_sign: | N/A | +| `conversation_id` | *str* | :heavy_check_mark: | N/A | +| `messages` | List[[models.MessageEntries](../models/messageentries.md)] | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/conversationmessagesobject.md b/docs/models/conversationmessagesobject.md deleted file mode 100644 index db3a441b..00000000 --- a/docs/models/conversationmessagesobject.md +++ /dev/null @@ -1,8 +0,0 @@ -# ConversationMessagesObject - - -## Values - -| Name | Value | -| ----------------------- | ----------------------- | -| `CONVERSATION_MESSAGES` | conversation.messages | \ No newline at end of file diff --git a/docs/models/conversationresponse.md b/docs/models/conversationresponse.md index e3182128..2732f785 100644 --- a/docs/models/conversationresponse.md +++ b/docs/models/conversationresponse.md @@ -5,9 +5,9 @@ The response after appending new entries to the conversation. ## Fields -| Field | Type | Required | Description | -| -------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------- | -| `object` | [Optional[models.ConversationResponseObject]](../models/conversationresponseobject.md) | :heavy_minus_sign: | N/A | -| `conversation_id` | *str* | :heavy_check_mark: | N/A | -| `outputs` | List[[models.Output](../models/output.md)] | :heavy_check_mark: | N/A | -| `usage` | [models.ConversationUsageInfo](../models/conversationusageinfo.md) | :heavy_check_mark: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| ------------------------------------------------------------------ | ------------------------------------------------------------------ | ------------------------------------------------------------------ | ------------------------------------------------------------------ | +| `object` | *Optional[Literal["conversation.response"]]* | :heavy_minus_sign: | N/A | +| `conversation_id` | *str* | :heavy_check_mark: | N/A | +| `outputs` | List[[models.Output](../models/output.md)] | :heavy_check_mark: | N/A | +| `usage` | [models.ConversationUsageInfo](../models/conversationusageinfo.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/conversationresponseobject.md b/docs/models/conversationresponseobject.md deleted file mode 100644 index bea66e52..00000000 --- a/docs/models/conversationresponseobject.md +++ /dev/null @@ -1,8 +0,0 @@ -# ConversationResponseObject - - -## Values - -| Name | Value | -| ----------------------- | ----------------------- | -| `CONVERSATION_RESPONSE` | conversation.response | \ No newline at end of file diff --git a/docs/models/conversationrestartrequest.md b/docs/models/conversationrestartrequest.md index d9865312..ad3ff362 100644 --- a/docs/models/conversationrestartrequest.md +++ b/docs/models/conversationrestartrequest.md @@ -7,7 +7,7 @@ Request to restart a new conversation from a given entry in the conversation. | Field | Type | Required | Description | | ---------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------- | -| `inputs` | [models.ConversationInputs](../models/conversationinputs.md) | :heavy_check_mark: | N/A | +| `inputs` | [Optional[models.ConversationInputs]](../models/conversationinputs.md) | :heavy_minus_sign: | N/A | | `stream` | *Optional[bool]* | :heavy_minus_sign: | N/A | | `store` | *Optional[bool]* | :heavy_minus_sign: | Whether to store the results into our servers or not. | | `handoff_execution` | [Optional[models.ConversationRestartRequestHandoffExecution]](../models/conversationrestartrequesthandoffexecution.md) | :heavy_minus_sign: | N/A | diff --git a/docs/models/conversationrestartstreamrequest.md b/docs/models/conversationrestartstreamrequest.md index a5f8cbe7..865a1e8f 100644 --- a/docs/models/conversationrestartstreamrequest.md +++ b/docs/models/conversationrestartstreamrequest.md @@ -7,7 +7,7 @@ Request to restart a new conversation from a given entry in the conversation. | Field | Type | Required | Description | | ---------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------- | -| `inputs` | [models.ConversationInputs](../models/conversationinputs.md) | :heavy_check_mark: | N/A | +| `inputs` | [Optional[models.ConversationInputs]](../models/conversationinputs.md) | :heavy_minus_sign: | N/A | | `stream` | *Optional[bool]* | :heavy_minus_sign: | N/A | | `store` | *Optional[bool]* | :heavy_minus_sign: | Whether to store the results into our servers or not. | | `handoff_execution` | [Optional[models.ConversationRestartStreamRequestHandoffExecution]](../models/conversationrestartstreamrequesthandoffexecution.md) | :heavy_minus_sign: | N/A | diff --git a/docs/models/conversationthinkchunk.md b/docs/models/conversationthinkchunk.md new file mode 100644 index 00000000..1fb16bd9 --- /dev/null +++ b/docs/models/conversationthinkchunk.md @@ -0,0 +1,10 @@ +# ConversationThinkChunk + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------ | +| `type` | *Optional[Literal["thinking"]]* | :heavy_minus_sign: | N/A | +| `thinking` | List[[models.ConversationThinkChunkThinking](../models/conversationthinkchunkthinking.md)] | :heavy_check_mark: | N/A | +| `closed` | *Optional[bool]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/conversationthinkchunkthinking.md b/docs/models/conversationthinkchunkthinking.md new file mode 100644 index 00000000..84b80018 --- /dev/null +++ b/docs/models/conversationthinkchunkthinking.md @@ -0,0 +1,17 @@ +# ConversationThinkChunkThinking + + +## Supported Types + +### `models.TextChunk` + +```python +value: models.TextChunk = /* values here */ +``` + +### `models.ToolReferenceChunk` + +```python +value: models.ToolReferenceChunk = /* values here */ +``` + diff --git a/docs/models/agentupdaterequest.md b/docs/models/createagentrequest.md similarity index 80% rename from docs/models/agentupdaterequest.md rename to docs/models/createagentrequest.md index b1830d7b..cca3a079 100644 --- a/docs/models/agentupdaterequest.md +++ b/docs/models/createagentrequest.md @@ -1,4 +1,4 @@ -# AgentUpdateRequest +# CreateAgentRequest ## Fields @@ -6,12 +6,11 @@ | Field | Type | Required | Description | | -------------------------------------------------------------------------- | -------------------------------------------------------------------------- | -------------------------------------------------------------------------- | -------------------------------------------------------------------------- | | `instructions` | *OptionalNullable[str]* | :heavy_minus_sign: | Instruction prompt the model will follow during the conversation. | -| `tools` | List[[models.AgentUpdateRequestTool](../models/agentupdaterequesttool.md)] | :heavy_minus_sign: | List of tools which are available to the model during the conversation. | +| `tools` | List[[models.CreateAgentRequestTool](../models/createagentrequesttool.md)] | :heavy_minus_sign: | List of tools which are available to the model during the conversation. | | `completion_args` | [Optional[models.CompletionArgs]](../models/completionargs.md) | :heavy_minus_sign: | White-listed arguments from the completion API | -| `model` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `model` | *str* | :heavy_check_mark: | N/A | +| `name` | *str* | :heavy_check_mark: | N/A | | `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | `handoffs` | List[*str*] | :heavy_minus_sign: | N/A | -| `deployment_chat` | *OptionalNullable[bool]* | :heavy_minus_sign: | N/A | | `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | | `version_message` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/agentupdaterequesttool.md b/docs/models/createagentrequesttool.md similarity index 96% rename from docs/models/agentupdaterequesttool.md rename to docs/models/createagentrequesttool.md index ce553126..c6ed3e98 100644 --- a/docs/models/agentupdaterequesttool.md +++ b/docs/models/createagentrequesttool.md @@ -1,4 +1,4 @@ -# AgentUpdateRequestTool +# CreateAgentRequestTool ## Supported Types diff --git a/docs/models/batchjobin.md b/docs/models/createbatchjobrequest.md similarity index 99% rename from docs/models/batchjobin.md rename to docs/models/createbatchjobrequest.md index 7dcf265d..d094e2d5 100644 --- a/docs/models/batchjobin.md +++ b/docs/models/createbatchjobrequest.md @@ -1,4 +1,4 @@ -# BatchJobIn +# CreateBatchJobRequest ## Fields @@ -8,7 +8,7 @@ | `input_files` | List[*str*] | :heavy_minus_sign: | The list of input files to be used for batch inference, these files should be `jsonl` files, containing the input data corresponding to the bory request for the batch inference in a "body" field. An example of such file is the following: ```json {"custom_id": "0", "body": {"max_tokens": 100, "messages": [{"role": "user", "content": "What is the best French cheese?"}]}} {"custom_id": "1", "body": {"max_tokens": 100, "messages": [{"role": "user", "content": "What is the best French wine?"}]}} ``` | | | `requests` | List[[models.BatchRequest](../models/batchrequest.md)] | :heavy_minus_sign: | N/A | | | `endpoint` | [models.APIEndpoint](../models/apiendpoint.md) | :heavy_check_mark: | N/A | | -| `model` | *OptionalNullable[str]* | :heavy_minus_sign: | The model to be used for batch inference. | mistral-small-latest | +| `model` | *OptionalNullable[str]* | :heavy_minus_sign: | The model to be used for batch inference. | **Example 1:** mistral-small-latest
**Example 2:** mistral-medium-latest | | `agent_id` | *OptionalNullable[str]* | :heavy_minus_sign: | In case you want to use a specific agent from the **deprecated** agents api for batch inference, you can specify the agent ID here. | | | `metadata` | Dict[str, *str*] | :heavy_minus_sign: | The metadata of your choice to be associated with the batch inference job. | | | `timeout_hours` | *Optional[int]* | :heavy_minus_sign: | The timeout in hours for the batch inference job. | | \ No newline at end of file diff --git a/docs/models/uploadfileout.md b/docs/models/createfileresponse.md similarity index 99% rename from docs/models/uploadfileout.md rename to docs/models/createfileresponse.md index 6f09c9a6..8152922b 100644 --- a/docs/models/uploadfileout.md +++ b/docs/models/createfileresponse.md @@ -1,4 +1,4 @@ -# UploadFileOut +# CreateFileResponse ## Fields diff --git a/docs/models/jobin.md b/docs/models/createfinetuningjobrequest.md similarity index 97% rename from docs/models/jobin.md rename to docs/models/createfinetuningjobrequest.md index 62da9072..a93e323d 100644 --- a/docs/models/jobin.md +++ b/docs/models/createfinetuningjobrequest.md @@ -1,4 +1,4 @@ -# JobIn +# CreateFineTuningJobRequest ## Fields @@ -9,10 +9,10 @@ | `training_files` | List[[models.TrainingFile](../models/trainingfile.md)] | :heavy_minus_sign: | N/A | | `validation_files` | List[*str*] | :heavy_minus_sign: | A list containing the IDs of uploaded files that contain validation data. If you provide these files, the data is used to generate validation metrics periodically during fine-tuning. These metrics can be viewed in `checkpoints` when getting the status of a running fine-tuning job. The same data should not be present in both train and validation files. | | `suffix` | *OptionalNullable[str]* | :heavy_minus_sign: | A string that will be added to your fine-tuning model name. For example, a suffix of "my-great-model" would produce a model name like `ft:open-mistral-7b:my-great-model:xxx...` | -| `integrations` | List[[models.JobInIntegration](../models/jobinintegration.md)] | :heavy_minus_sign: | A list of integrations to enable for your fine-tuning job. | +| `integrations` | List[[models.CreateFineTuningJobRequestIntegration](../models/createfinetuningjobrequestintegration.md)] | :heavy_minus_sign: | A list of integrations to enable for your fine-tuning job. | | `auto_start` | *Optional[bool]* | :heavy_minus_sign: | This field will be required in a future release. | | `invalid_sample_skip_percentage` | *Optional[float]* | :heavy_minus_sign: | N/A | | `job_type` | [OptionalNullable[models.FineTuneableModelType]](../models/finetuneablemodeltype.md) | :heavy_minus_sign: | N/A | | `hyperparameters` | [models.Hyperparameters](../models/hyperparameters.md) | :heavy_check_mark: | N/A | -| `repositories` | List[[models.JobInRepository](../models/jobinrepository.md)] | :heavy_minus_sign: | N/A | -| `classifier_targets` | List[[models.ClassifierTargetIn](../models/classifiertargetin.md)] | :heavy_minus_sign: | N/A | \ No newline at end of file +| `repositories` | List[[models.CreateFineTuningJobRequestRepository](../models/createfinetuningjobrequestrepository.md)] | :heavy_minus_sign: | N/A | +| `classifier_targets` | List[[models.ClassifierTarget](../models/classifiertarget.md)] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/jobinintegration.md b/docs/models/createfinetuningjobrequestintegration.md similarity index 74% rename from docs/models/jobinintegration.md rename to docs/models/createfinetuningjobrequestintegration.md index 103820e7..0054a4a6 100644 --- a/docs/models/jobinintegration.md +++ b/docs/models/createfinetuningjobrequestintegration.md @@ -1,4 +1,4 @@ -# JobInIntegration +# CreateFineTuningJobRequestIntegration ## Supported Types diff --git a/docs/models/jobinrepository.md b/docs/models/createfinetuningjobrequestrepository.md similarity index 75% rename from docs/models/jobinrepository.md rename to docs/models/createfinetuningjobrequestrepository.md index e873ae63..32be1b6d 100644 --- a/docs/models/jobinrepository.md +++ b/docs/models/createfinetuningjobrequestrepository.md @@ -1,4 +1,4 @@ -# JobInRepository +# CreateFineTuningJobRequestRepository ## Supported Types diff --git a/docs/models/createfinetuningjobresponse.md b/docs/models/createfinetuningjobresponse.md deleted file mode 100644 index f82cd793..00000000 --- a/docs/models/createfinetuningjobresponse.md +++ /dev/null @@ -1,19 +0,0 @@ -# CreateFineTuningJobResponse - -OK - - -## Supported Types - -### `models.Response` - -```python -value: models.Response = /* values here */ -``` - -### `models.LegacyJobMetadataOut` - -```python -value: models.LegacyJobMetadataOut = /* values here */ -``` - diff --git a/docs/models/libraryin.md b/docs/models/createlibraryrequest.md similarity index 95% rename from docs/models/libraryin.md rename to docs/models/createlibraryrequest.md index d6b11914..71562806 100644 --- a/docs/models/libraryin.md +++ b/docs/models/createlibraryrequest.md @@ -1,4 +1,4 @@ -# LibraryIn +# CreateLibraryRequest ## Fields diff --git a/docs/models/deletefileout.md b/docs/models/deletefileresponse.md similarity index 97% rename from docs/models/deletefileout.md rename to docs/models/deletefileresponse.md index 4709cc49..188e2504 100644 --- a/docs/models/deletefileout.md +++ b/docs/models/deletefileresponse.md @@ -1,4 +1,4 @@ -# DeleteFileOut +# DeleteFileResponse ## Fields diff --git a/docs/models/deletemodelrequest.md b/docs/models/deletemodelv1modelsmodeliddeleterequest.md similarity index 94% rename from docs/models/deletemodelrequest.md rename to docs/models/deletemodelv1modelsmodeliddeleterequest.md index d80103f1..d9bc15fe 100644 --- a/docs/models/deletemodelrequest.md +++ b/docs/models/deletemodelv1modelsmodeliddeleterequest.md @@ -1,4 +1,4 @@ -# DeleteModelRequest +# DeleteModelV1ModelsModelIDDeleteRequest ## Fields diff --git a/docs/models/document.md b/docs/models/document.md index 509d43b7..284babb9 100644 --- a/docs/models/document.md +++ b/docs/models/document.md @@ -1,25 +1,26 @@ # Document -Document to run OCR on - - -## Supported Types - -### `models.FileChunk` - -```python -value: models.FileChunk = /* values here */ -``` - -### `models.DocumentURLChunk` - -```python -value: models.DocumentURLChunk = /* values here */ -``` - -### `models.ImageURLChunk` - -```python -value: models.ImageURLChunk = /* values here */ -``` +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | +| `id` | *str* | :heavy_check_mark: | N/A | +| `library_id` | *str* | :heavy_check_mark: | N/A | +| `hash` | *Nullable[str]* | :heavy_check_mark: | N/A | +| `mime_type` | *Nullable[str]* | :heavy_check_mark: | N/A | +| `extension` | *Nullable[str]* | :heavy_check_mark: | N/A | +| `size` | *Nullable[int]* | :heavy_check_mark: | N/A | +| `name` | *str* | :heavy_check_mark: | N/A | +| `summary` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_check_mark: | N/A | +| `last_processed_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | +| `number_of_pages` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | +| `uploaded_by_id` | *Nullable[str]* | :heavy_check_mark: | N/A | +| `uploaded_by_type` | *str* | :heavy_check_mark: | N/A | +| `tokens_processing_main_content` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | +| `tokens_processing_summary` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | +| `url` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `attributes` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | +| `processing_status` | *str* | :heavy_check_mark: | N/A | +| `tokens_processing_total` | *int* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/documentlibrarytool.md b/docs/models/documentlibrarytool.md index 1695bad4..95c3fa52 100644 --- a/docs/models/documentlibrarytool.md +++ b/docs/models/documentlibrarytool.md @@ -3,7 +3,8 @@ ## Fields -| Field | Type | Required | Description | -| -------------------------------------- | -------------------------------------- | -------------------------------------- | -------------------------------------- | -| `type` | *Literal["document_library"]* | :heavy_check_mark: | N/A | -| `library_ids` | List[*str*] | :heavy_check_mark: | Ids of the library in which to search. | \ No newline at end of file +| Field | Type | Required | Description | +| ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | +| `tool_configuration` | [OptionalNullable[models.ToolConfiguration]](../models/toolconfiguration.md) | :heavy_minus_sign: | N/A | +| `type` | *Literal["document_library"]* | :heavy_check_mark: | N/A | +| `library_ids` | List[*str*] | :heavy_check_mark: | Ids of the library in which to search. | \ No newline at end of file diff --git a/docs/models/documentout.md b/docs/models/documentout.md deleted file mode 100644 index 28df11eb..00000000 --- a/docs/models/documentout.md +++ /dev/null @@ -1,26 +0,0 @@ -# DocumentOut - - -## Fields - -| Field | Type | Required | Description | -| -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -| `id` | *str* | :heavy_check_mark: | N/A | -| `library_id` | *str* | :heavy_check_mark: | N/A | -| `hash` | *Nullable[str]* | :heavy_check_mark: | N/A | -| `mime_type` | *Nullable[str]* | :heavy_check_mark: | N/A | -| `extension` | *Nullable[str]* | :heavy_check_mark: | N/A | -| `size` | *Nullable[int]* | :heavy_check_mark: | N/A | -| `name` | *str* | :heavy_check_mark: | N/A | -| `summary` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_check_mark: | N/A | -| `last_processed_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | -| `number_of_pages` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | -| `processing_status` | *str* | :heavy_check_mark: | N/A | -| `uploaded_by_id` | *Nullable[str]* | :heavy_check_mark: | N/A | -| `uploaded_by_type` | *str* | :heavy_check_mark: | N/A | -| `tokens_processing_main_content` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | -| `tokens_processing_summary` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | -| `url` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `attributes` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | -| `tokens_processing_total` | *int* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/documentunion.md b/docs/models/documentunion.md new file mode 100644 index 00000000..e573bd46 --- /dev/null +++ b/docs/models/documentunion.md @@ -0,0 +1,25 @@ +# DocumentUnion + +Document to run OCR on + + +## Supported Types + +### `models.FileChunk` + +```python +value: models.FileChunk = /* values here */ +``` + +### `models.DocumentURLChunk` + +```python +value: models.DocumentURLChunk = /* values here */ +``` + +### `models.ImageURLChunk` + +```python +value: models.ImageURLChunk = /* values here */ +``` + diff --git a/docs/models/documentupdatein.md b/docs/models/documentupdatein.md deleted file mode 100644 index 0993886d..00000000 --- a/docs/models/documentupdatein.md +++ /dev/null @@ -1,9 +0,0 @@ -# DocumentUpdateIn - - -## Fields - -| Field | Type | Required | Description | -| ------------------------------------------------------- | ------------------------------------------------------- | ------------------------------------------------------- | ------------------------------------------------------- | -| `name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `attributes` | Dict[str, [models.Attributes](../models/attributes.md)] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/documenturlchunk.md b/docs/models/documenturlchunk.md index 6c9a5b4d..9dbfbe50 100644 --- a/docs/models/documenturlchunk.md +++ b/docs/models/documenturlchunk.md @@ -3,8 +3,8 @@ ## Fields -| Field | Type | Required | Description | -| -------------------------------------------------------------------------- | -------------------------------------------------------------------------- | -------------------------------------------------------------------------- | -------------------------------------------------------------------------- | -| `document_url` | *str* | :heavy_check_mark: | N/A | -| `document_name` | *OptionalNullable[str]* | :heavy_minus_sign: | The filename of the document | -| `type` | [Optional[models.DocumentURLChunkType]](../models/documenturlchunktype.md) | :heavy_minus_sign: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| ----------------------------------- | ----------------------------------- | ----------------------------------- | ----------------------------------- | +| `type` | *Optional[Literal["document_url"]]* | :heavy_minus_sign: | N/A | +| `document_url` | *str* | :heavy_check_mark: | N/A | +| `document_name` | *OptionalNullable[str]* | :heavy_minus_sign: | The filename of the document | \ No newline at end of file diff --git a/docs/models/documenturlchunktype.md b/docs/models/documenturlchunktype.md deleted file mode 100644 index 32e1fa9e..00000000 --- a/docs/models/documenturlchunktype.md +++ /dev/null @@ -1,8 +0,0 @@ -# DocumentURLChunkType - - -## Values - -| Name | Value | -| -------------- | -------------- | -| `DOCUMENT_URL` | document_url | \ No newline at end of file diff --git a/docs/models/eventout.md b/docs/models/event.md similarity index 98% rename from docs/models/eventout.md rename to docs/models/event.md index d9202353..3eebffca 100644 --- a/docs/models/eventout.md +++ b/docs/models/event.md @@ -1,4 +1,4 @@ -# EventOut +# Event ## Fields diff --git a/docs/models/downloadfilerequest.md b/docs/models/filesapiroutesdeletefilerequest.md similarity index 88% rename from docs/models/downloadfilerequest.md rename to docs/models/filesapiroutesdeletefilerequest.md index 3f4dc6cc..1b02c2db 100644 --- a/docs/models/downloadfilerequest.md +++ b/docs/models/filesapiroutesdeletefilerequest.md @@ -1,4 +1,4 @@ -# DownloadFileRequest +# FilesAPIRoutesDeleteFileRequest ## Fields diff --git a/docs/models/retrievefilerequest.md b/docs/models/filesapiroutesdownloadfilerequest.md similarity index 88% rename from docs/models/retrievefilerequest.md rename to docs/models/filesapiroutesdownloadfilerequest.md index 454b9665..8b28cb0e 100644 --- a/docs/models/retrievefilerequest.md +++ b/docs/models/filesapiroutesdownloadfilerequest.md @@ -1,4 +1,4 @@ -# RetrieveFileRequest +# FilesAPIRoutesDownloadFileRequest ## Fields diff --git a/docs/models/getfilesignedurlrequest.md b/docs/models/filesapiroutesgetsignedurlrequest.md similarity index 96% rename from docs/models/getfilesignedurlrequest.md rename to docs/models/filesapiroutesgetsignedurlrequest.md index 0be3b288..dbe3c801 100644 --- a/docs/models/getfilesignedurlrequest.md +++ b/docs/models/filesapiroutesgetsignedurlrequest.md @@ -1,4 +1,4 @@ -# GetFileSignedURLRequest +# FilesAPIRoutesGetSignedURLRequest ## Fields diff --git a/docs/models/listfilesrequest.md b/docs/models/filesapirouteslistfilesrequest.md similarity index 98% rename from docs/models/listfilesrequest.md rename to docs/models/filesapirouteslistfilesrequest.md index 2d76a76b..57d11722 100644 --- a/docs/models/listfilesrequest.md +++ b/docs/models/filesapirouteslistfilesrequest.md @@ -1,4 +1,4 @@ -# ListFilesRequest +# FilesAPIRoutesListFilesRequest ## Fields diff --git a/docs/models/deletefilerequest.md b/docs/models/filesapiroutesretrievefilerequest.md similarity index 88% rename from docs/models/deletefilerequest.md rename to docs/models/filesapiroutesretrievefilerequest.md index bceae901..961bae1f 100644 --- a/docs/models/deletefilerequest.md +++ b/docs/models/filesapiroutesretrievefilerequest.md @@ -1,4 +1,4 @@ -# DeleteFileRequest +# FilesAPIRoutesRetrieveFileRequest ## Fields diff --git a/docs/models/ftmodelcapabilitiesout.md b/docs/models/finetunedmodelcapabilities.md similarity index 95% rename from docs/models/ftmodelcapabilitiesout.md rename to docs/models/finetunedmodelcapabilities.md index 19690476..d3203a2a 100644 --- a/docs/models/ftmodelcapabilitiesout.md +++ b/docs/models/finetunedmodelcapabilities.md @@ -1,4 +1,4 @@ -# FTModelCapabilitiesOut +# FineTunedModelCapabilities ## Fields diff --git a/docs/models/functioncallentry.md b/docs/models/functioncallentry.md index fd3aa5c5..2843db9d 100644 --- a/docs/models/functioncallentry.md +++ b/docs/models/functioncallentry.md @@ -3,13 +3,16 @@ ## Fields -| Field | Type | Required | Description | -| -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -| `object` | [Optional[models.FunctionCallEntryObject]](../models/functioncallentryobject.md) | :heavy_minus_sign: | N/A | -| `type` | [Optional[models.FunctionCallEntryType]](../models/functioncallentrytype.md) | :heavy_minus_sign: | N/A | -| `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | -| `completed_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | -| `id` | *Optional[str]* | :heavy_minus_sign: | N/A | -| `tool_call_id` | *str* | :heavy_check_mark: | N/A | -| `name` | *str* | :heavy_check_mark: | N/A | -| `arguments` | [models.FunctionCallEntryArguments](../models/functioncallentryarguments.md) | :heavy_check_mark: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| ---------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------- | +| `object` | *Optional[Literal["entry"]]* | :heavy_minus_sign: | N/A | +| `type` | *Optional[Literal["function.call"]]* | :heavy_minus_sign: | N/A | +| `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | +| `completed_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | +| `agent_id` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `model` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `id` | *Optional[str]* | :heavy_minus_sign: | N/A | +| `tool_call_id` | *str* | :heavy_check_mark: | N/A | +| `name` | *str* | :heavy_check_mark: | N/A | +| `arguments` | [models.FunctionCallEntryArguments](../models/functioncallentryarguments.md) | :heavy_check_mark: | N/A | +| `confirmation_status` | [OptionalNullable[models.FunctionCallEntryConfirmationStatus]](../models/functioncallentryconfirmationstatus.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/functioncallentryconfirmationstatus.md b/docs/models/functioncallentryconfirmationstatus.md new file mode 100644 index 00000000..8948beb6 --- /dev/null +++ b/docs/models/functioncallentryconfirmationstatus.md @@ -0,0 +1,10 @@ +# FunctionCallEntryConfirmationStatus + + +## Values + +| Name | Value | +| --------- | --------- | +| `PENDING` | pending | +| `ALLOWED` | allowed | +| `DENIED` | denied | \ No newline at end of file diff --git a/docs/models/functioncallentryobject.md b/docs/models/functioncallentryobject.md deleted file mode 100644 index 3cf2e427..00000000 --- a/docs/models/functioncallentryobject.md +++ /dev/null @@ -1,8 +0,0 @@ -# FunctionCallEntryObject - - -## Values - -| Name | Value | -| ------- | ------- | -| `ENTRY` | entry | \ No newline at end of file diff --git a/docs/models/functioncallentrytype.md b/docs/models/functioncallentrytype.md deleted file mode 100644 index 7ea34c52..00000000 --- a/docs/models/functioncallentrytype.md +++ /dev/null @@ -1,8 +0,0 @@ -# FunctionCallEntryType - - -## Values - -| Name | Value | -| --------------- | --------------- | -| `FUNCTION_CALL` | function.call | \ No newline at end of file diff --git a/docs/models/functioncallevent.md b/docs/models/functioncallevent.md index f4062060..0e3a36d6 100644 --- a/docs/models/functioncallevent.md +++ b/docs/models/functioncallevent.md @@ -3,12 +3,15 @@ ## Fields -| Field | Type | Required | Description | -| -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -| `type` | *Literal["function.call.delta"]* | :heavy_check_mark: | N/A | -| `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | -| `output_index` | *Optional[int]* | :heavy_minus_sign: | N/A | -| `id` | *str* | :heavy_check_mark: | N/A | -| `name` | *str* | :heavy_check_mark: | N/A | -| `tool_call_id` | *str* | :heavy_check_mark: | N/A | -| `arguments` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| ---------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------- | +| `type` | *Literal["function.call.delta"]* | :heavy_check_mark: | N/A | +| `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | +| `output_index` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `id` | *str* | :heavy_check_mark: | N/A | +| `model` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `agent_id` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `name` | *str* | :heavy_check_mark: | N/A | +| `tool_call_id` | *str* | :heavy_check_mark: | N/A | +| `arguments` | *str* | :heavy_check_mark: | N/A | +| `confirmation_status` | [OptionalNullable[models.FunctionCallEventConfirmationStatus]](../models/functioncalleventconfirmationstatus.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/functioncalleventconfirmationstatus.md b/docs/models/functioncalleventconfirmationstatus.md new file mode 100644 index 00000000..4a3c8774 --- /dev/null +++ b/docs/models/functioncalleventconfirmationstatus.md @@ -0,0 +1,10 @@ +# FunctionCallEventConfirmationStatus + + +## Values + +| Name | Value | +| --------- | --------- | +| `PENDING` | pending | +| `ALLOWED` | allowed | +| `DENIED` | denied | \ No newline at end of file diff --git a/docs/models/functionresultentry.md b/docs/models/functionresultentry.md index 6df54d3d..6a77abfd 100644 --- a/docs/models/functionresultentry.md +++ b/docs/models/functionresultentry.md @@ -3,12 +3,12 @@ ## Fields -| Field | Type | Required | Description | -| ------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------ | -| `object` | [Optional[models.FunctionResultEntryObject]](../models/functionresultentryobject.md) | :heavy_minus_sign: | N/A | -| `type` | [Optional[models.FunctionResultEntryType]](../models/functionresultentrytype.md) | :heavy_minus_sign: | N/A | -| `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | -| `completed_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | -| `id` | *Optional[str]* | :heavy_minus_sign: | N/A | -| `tool_call_id` | *str* | :heavy_check_mark: | N/A | -| `result` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | +| `object` | *Optional[Literal["entry"]]* | :heavy_minus_sign: | N/A | +| `type` | *Optional[Literal["function.result"]]* | :heavy_minus_sign: | N/A | +| `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | +| `completed_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | +| `id` | *Optional[str]* | :heavy_minus_sign: | N/A | +| `tool_call_id` | *str* | :heavy_check_mark: | N/A | +| `result` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/functionresultentryobject.md b/docs/models/functionresultentryobject.md deleted file mode 100644 index fe52e0a5..00000000 --- a/docs/models/functionresultentryobject.md +++ /dev/null @@ -1,8 +0,0 @@ -# FunctionResultEntryObject - - -## Values - -| Name | Value | -| ------- | ------- | -| `ENTRY` | entry | \ No newline at end of file diff --git a/docs/models/functionresultentrytype.md b/docs/models/functionresultentrytype.md deleted file mode 100644 index 35c94d8e..00000000 --- a/docs/models/functionresultentrytype.md +++ /dev/null @@ -1,8 +0,0 @@ -# FunctionResultEntryType - - -## Values - -| Name | Value | -| ----------------- | ----------------- | -| `FUNCTION_RESULT` | function.result | \ No newline at end of file diff --git a/docs/models/getdocumenttextcontentrequest.md b/docs/models/getdocumenttextcontentrequest.md deleted file mode 100644 index 85933401..00000000 --- a/docs/models/getdocumenttextcontentrequest.md +++ /dev/null @@ -1,9 +0,0 @@ -# GetDocumentTextContentRequest - - -## Fields - -| Field | Type | Required | Description | -| ------------------ | ------------------ | ------------------ | ------------------ | -| `library_id` | *str* | :heavy_check_mark: | N/A | -| `document_id` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/retrievefileout.md b/docs/models/getfileresponse.md similarity index 99% rename from docs/models/retrievefileout.md rename to docs/models/getfileresponse.md index 28f97dd2..0edd13e0 100644 --- a/docs/models/retrievefileout.md +++ b/docs/models/getfileresponse.md @@ -1,4 +1,4 @@ -# RetrieveFileOut +# GetFileResponse ## Fields diff --git a/docs/models/getfinetuningjobresponse.md b/docs/models/getfinetuningjobresponse.md deleted file mode 100644 index 1b0568dd..00000000 --- a/docs/models/getfinetuningjobresponse.md +++ /dev/null @@ -1,19 +0,0 @@ -# GetFineTuningJobResponse - -OK - - -## Supported Types - -### `models.ClassifierDetailedJobOut` - -```python -value: models.ClassifierDetailedJobOut = /* values here */ -``` - -### `models.CompletionDetailedJobOut` - -```python -value: models.CompletionDetailedJobOut = /* values here */ -``` - diff --git a/docs/models/filesignedurl.md b/docs/models/getsignedurlresponse.md similarity index 92% rename from docs/models/filesignedurl.md rename to docs/models/getsignedurlresponse.md index 52ce3f4f..bde69323 100644 --- a/docs/models/filesignedurl.md +++ b/docs/models/getsignedurlresponse.md @@ -1,4 +1,4 @@ -# FileSignedURL +# GetSignedURLResponse ## Fields diff --git a/docs/models/githubrepositoryout.md b/docs/models/githubrepository.md similarity index 97% rename from docs/models/githubrepositoryout.md rename to docs/models/githubrepository.md index fe38393a..827b6f34 100644 --- a/docs/models/githubrepositoryout.md +++ b/docs/models/githubrepository.md @@ -1,4 +1,4 @@ -# GithubRepositoryOut +# GithubRepository ## Fields diff --git a/docs/models/hyperparameters.md b/docs/models/hyperparameters.md index 46a6dd6b..b6c00c36 100644 --- a/docs/models/hyperparameters.md +++ b/docs/models/hyperparameters.md @@ -3,15 +3,15 @@ ## Supported Types -### `models.CompletionTrainingParametersIn` +### `models.CompletionTrainingParameters` ```python -value: models.CompletionTrainingParametersIn = /* values here */ +value: models.CompletionTrainingParameters = /* values here */ ``` -### `models.ClassifierTrainingParametersIn` +### `models.ClassifierTrainingParameters` ```python -value: models.ClassifierTrainingParametersIn = /* values here */ +value: models.ClassifierTrainingParameters = /* values here */ ``` diff --git a/docs/models/imagedetail.md b/docs/models/imagedetail.md new file mode 100644 index 00000000..1e5ba3fd --- /dev/null +++ b/docs/models/imagedetail.md @@ -0,0 +1,10 @@ +# ImageDetail + + +## Values + +| Name | Value | +| ------ | ------ | +| `LOW` | low | +| `AUTO` | auto | +| `HIGH` | high | \ No newline at end of file diff --git a/docs/models/imagegenerationtool.md b/docs/models/imagegenerationtool.md index 0c8de72c..b476b6f2 100644 --- a/docs/models/imagegenerationtool.md +++ b/docs/models/imagegenerationtool.md @@ -3,6 +3,7 @@ ## Fields -| Field | Type | Required | Description | -| ----------------------------- | ----------------------------- | ----------------------------- | ----------------------------- | -| `type` | *Literal["image_generation"]* | :heavy_check_mark: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | +| `tool_configuration` | [OptionalNullable[models.ToolConfiguration]](../models/toolconfiguration.md) | :heavy_minus_sign: | N/A | +| `type` | *Literal["image_generation"]* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/imageurl.md b/docs/models/imageurl.md index 7c2bcbc3..6358e0ac 100644 --- a/docs/models/imageurl.md +++ b/docs/models/imageurl.md @@ -3,7 +3,7 @@ ## Fields -| Field | Type | Required | Description | -| ----------------------- | ----------------------- | ----------------------- | ----------------------- | -| `url` | *str* | :heavy_check_mark: | N/A | -| `detail` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| ---------------------------------------------------------------- | ---------------------------------------------------------------- | ---------------------------------------------------------------- | ---------------------------------------------------------------- | +| `url` | *str* | :heavy_check_mark: | N/A | +| `detail` | [OptionalNullable[models.ImageDetail]](../models/imagedetail.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/imageurlchunk.md b/docs/models/imageurlchunk.md index 43078c78..db0c53d2 100644 --- a/docs/models/imageurlchunk.md +++ b/docs/models/imageurlchunk.md @@ -5,7 +5,7 @@ ## Fields -| Field | Type | Required | Description | -| -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -| `image_url` | [models.ImageURLUnion](../models/imageurlunion.md) | :heavy_check_mark: | N/A | -| `type` | [Optional[models.ImageURLChunkType]](../models/imageurlchunktype.md) | :heavy_minus_sign: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| -------------------------------------------------- | -------------------------------------------------- | -------------------------------------------------- | -------------------------------------------------- | +| `type` | *Optional[Literal["image_url"]]* | :heavy_minus_sign: | N/A | +| `image_url` | [models.ImageURLUnion](../models/imageurlunion.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/imageurlchunktype.md b/docs/models/imageurlchunktype.md deleted file mode 100644 index 2064a0b4..00000000 --- a/docs/models/imageurlchunktype.md +++ /dev/null @@ -1,8 +0,0 @@ -# ImageURLChunkType - - -## Values - -| Name | Value | -| ----------- | ----------- | -| `IMAGE_URL` | image_url | \ No newline at end of file diff --git a/docs/models/inputs.md b/docs/models/inputs.md index 0f62a7ce..d5771207 100644 --- a/docs/models/inputs.md +++ b/docs/models/inputs.md @@ -5,10 +5,10 @@ Chat to classify ## Supported Types -### `models.InstructRequestInputs` +### `models.InstructRequest` ```python -value: models.InstructRequestInputs = /* values here */ +value: models.InstructRequest = /* values here */ ``` ### `List[models.InstructRequest]` diff --git a/docs/models/inputsmessage.md b/docs/models/inputsmessage.md deleted file mode 100644 index e3543fb4..00000000 --- a/docs/models/inputsmessage.md +++ /dev/null @@ -1,29 +0,0 @@ -# InputsMessage - - -## Supported Types - -### `models.AssistantMessage` - -```python -value: models.AssistantMessage = /* values here */ -``` - -### `models.SystemMessage` - -```python -value: models.SystemMessage = /* values here */ -``` - -### `models.ToolMessage` - -```python -value: models.ToolMessage = /* values here */ -``` - -### `models.UserMessage` - -```python -value: models.UserMessage = /* values here */ -``` - diff --git a/docs/models/instructrequestinputs.md b/docs/models/instructrequestinputs.md deleted file mode 100644 index 931ae5e4..00000000 --- a/docs/models/instructrequestinputs.md +++ /dev/null @@ -1,8 +0,0 @@ -# InstructRequestInputs - - -## Fields - -| Field | Type | Required | Description | -| -------------------------------------------------------- | -------------------------------------------------------- | -------------------------------------------------------- | -------------------------------------------------------- | -| `messages` | List[[models.InputsMessage](../models/inputsmessage.md)] | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/jobmetadataout.md b/docs/models/jobmetadata.md similarity index 98% rename from docs/models/jobmetadataout.md rename to docs/models/jobmetadata.md index 6218a161..5d8a89dd 100644 --- a/docs/models/jobmetadataout.md +++ b/docs/models/jobmetadata.md @@ -1,4 +1,4 @@ -# JobMetadataOut +# JobMetadata ## Fields diff --git a/docs/models/cancelbatchjobrequest.md b/docs/models/jobsapiroutesbatchcancelbatchjobrequest.md similarity index 86% rename from docs/models/cancelbatchjobrequest.md rename to docs/models/jobsapiroutesbatchcancelbatchjobrequest.md index f31f843b..c19d0241 100644 --- a/docs/models/cancelbatchjobrequest.md +++ b/docs/models/jobsapiroutesbatchcancelbatchjobrequest.md @@ -1,4 +1,4 @@ -# CancelBatchJobRequest +# JobsAPIRoutesBatchCancelBatchJobRequest ## Fields diff --git a/docs/models/getbatchjobrequest.md b/docs/models/jobsapiroutesbatchgetbatchjobrequest.md similarity index 92% rename from docs/models/getbatchjobrequest.md rename to docs/models/jobsapiroutesbatchgetbatchjobrequest.md index f3c67eb4..8c259bea 100644 --- a/docs/models/getbatchjobrequest.md +++ b/docs/models/jobsapiroutesbatchgetbatchjobrequest.md @@ -1,4 +1,4 @@ -# GetBatchJobRequest +# JobsAPIRoutesBatchGetBatchJobRequest ## Fields diff --git a/docs/models/listbatchjobsrequest.md b/docs/models/jobsapiroutesbatchgetbatchjobsrequest.md similarity index 98% rename from docs/models/listbatchjobsrequest.md rename to docs/models/jobsapiroutesbatchgetbatchjobsrequest.md index 19981b24..5ceb0b2c 100644 --- a/docs/models/listbatchjobsrequest.md +++ b/docs/models/jobsapiroutesbatchgetbatchjobsrequest.md @@ -1,4 +1,4 @@ -# ListBatchJobsRequest +# JobsAPIRoutesBatchGetBatchJobsRequest ## Fields diff --git a/docs/models/archivemodelrequest.md b/docs/models/jobsapiroutesfinetuningarchivefinetunedmodelrequest.md similarity index 93% rename from docs/models/archivemodelrequest.md rename to docs/models/jobsapiroutesfinetuningarchivefinetunedmodelrequest.md index 806d135e..f9700df5 100644 --- a/docs/models/archivemodelrequest.md +++ b/docs/models/jobsapiroutesfinetuningarchivefinetunedmodelrequest.md @@ -1,4 +1,4 @@ -# ArchiveModelRequest +# JobsAPIRoutesFineTuningArchiveFineTunedModelRequest ## Fields diff --git a/docs/models/cancelfinetuningjobrequest.md b/docs/models/jobsapiroutesfinetuningcancelfinetuningjobrequest.md similarity index 88% rename from docs/models/cancelfinetuningjobrequest.md rename to docs/models/jobsapiroutesfinetuningcancelfinetuningjobrequest.md index 6525788c..883cbac6 100644 --- a/docs/models/cancelfinetuningjobrequest.md +++ b/docs/models/jobsapiroutesfinetuningcancelfinetuningjobrequest.md @@ -1,4 +1,4 @@ -# CancelFineTuningJobRequest +# JobsAPIRoutesFineTuningCancelFineTuningJobRequest ## Fields diff --git a/docs/models/jobsapiroutesfinetuningcancelfinetuningjobresponse.md b/docs/models/jobsapiroutesfinetuningcancelfinetuningjobresponse.md new file mode 100644 index 00000000..fb62eb62 --- /dev/null +++ b/docs/models/jobsapiroutesfinetuningcancelfinetuningjobresponse.md @@ -0,0 +1,19 @@ +# JobsAPIRoutesFineTuningCancelFineTuningJobResponse + +OK + + +## Supported Types + +### `models.ClassifierFineTuningJobDetails` + +```python +value: models.ClassifierFineTuningJobDetails = /* values here */ +``` + +### `models.CompletionFineTuningJobDetails` + +```python +value: models.CompletionFineTuningJobDetails = /* values here */ +``` + diff --git a/docs/models/jobsapiroutesfinetuningcreatefinetuningjobresponse.md b/docs/models/jobsapiroutesfinetuningcreatefinetuningjobresponse.md new file mode 100644 index 00000000..7b52e2ca --- /dev/null +++ b/docs/models/jobsapiroutesfinetuningcreatefinetuningjobresponse.md @@ -0,0 +1,19 @@ +# JobsAPIRoutesFineTuningCreateFineTuningJobResponse + +OK + + +## Supported Types + +### `models.Response` + +```python +value: models.Response = /* values here */ +``` + +### `models.LegacyJobMetadata` + +```python +value: models.LegacyJobMetadata = /* values here */ +``` + diff --git a/docs/models/getfinetuningjobrequest.md b/docs/models/jobsapiroutesfinetuninggetfinetuningjobrequest.md similarity index 89% rename from docs/models/getfinetuningjobrequest.md rename to docs/models/jobsapiroutesfinetuninggetfinetuningjobrequest.md index f20cb214..fde19800 100644 --- a/docs/models/getfinetuningjobrequest.md +++ b/docs/models/jobsapiroutesfinetuninggetfinetuningjobrequest.md @@ -1,4 +1,4 @@ -# GetFineTuningJobRequest +# JobsAPIRoutesFineTuningGetFineTuningJobRequest ## Fields diff --git a/docs/models/jobsapiroutesfinetuninggetfinetuningjobresponse.md b/docs/models/jobsapiroutesfinetuninggetfinetuningjobresponse.md new file mode 100644 index 00000000..f7705327 --- /dev/null +++ b/docs/models/jobsapiroutesfinetuninggetfinetuningjobresponse.md @@ -0,0 +1,19 @@ +# JobsAPIRoutesFineTuningGetFineTuningJobResponse + +OK + + +## Supported Types + +### `models.ClassifierFineTuningJobDetails` + +```python +value: models.ClassifierFineTuningJobDetails = /* values here */ +``` + +### `models.CompletionFineTuningJobDetails` + +```python +value: models.CompletionFineTuningJobDetails = /* values here */ +``` + diff --git a/docs/models/jobsapiroutesfinetuninggetfinetuningjobsrequest.md b/docs/models/jobsapiroutesfinetuninggetfinetuningjobsrequest.md new file mode 100644 index 00000000..23c52c34 --- /dev/null +++ b/docs/models/jobsapiroutesfinetuninggetfinetuningjobsrequest.md @@ -0,0 +1,17 @@ +# JobsAPIRoutesFineTuningGetFineTuningJobsRequest + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------- | +| `page` | *Optional[int]* | :heavy_minus_sign: | The page number of the results to be returned. | +| `page_size` | *Optional[int]* | :heavy_minus_sign: | The number of items to return per page. | +| `model` | *OptionalNullable[str]* | :heavy_minus_sign: | The model name used for fine-tuning to filter on. When set, the other results are not displayed. | +| `created_after` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | The date/time to filter on. When set, the results for previous creation times are not displayed. | +| `created_before` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | +| `created_by_me` | *Optional[bool]* | :heavy_minus_sign: | When set, only return results for jobs created by the API caller. Other results are not displayed. | +| `status` | [OptionalNullable[models.JobsAPIRoutesFineTuningGetFineTuningJobsStatus]](../models/jobsapiroutesfinetuninggetfinetuningjobsstatus.md) | :heavy_minus_sign: | The current job state to filter on. When set, the other results are not displayed. | +| `wandb_project` | *OptionalNullable[str]* | :heavy_minus_sign: | The Weights and Biases project to filter on. When set, the other results are not displayed. | +| `wandb_name` | *OptionalNullable[str]* | :heavy_minus_sign: | The Weight and Biases run name to filter on. When set, the other results are not displayed. | +| `suffix` | *OptionalNullable[str]* | :heavy_minus_sign: | The model suffix to filter on. When set, the other results are not displayed. | \ No newline at end of file diff --git a/docs/models/listfinetuningjobsstatus.md b/docs/models/jobsapiroutesfinetuninggetfinetuningjobsstatus.md similarity index 94% rename from docs/models/listfinetuningjobsstatus.md rename to docs/models/jobsapiroutesfinetuninggetfinetuningjobsstatus.md index 07db9ae5..40d57686 100644 --- a/docs/models/listfinetuningjobsstatus.md +++ b/docs/models/jobsapiroutesfinetuninggetfinetuningjobsstatus.md @@ -1,4 +1,4 @@ -# ListFineTuningJobsStatus +# JobsAPIRoutesFineTuningGetFineTuningJobsStatus The current job state to filter on. When set, the other results are not displayed. diff --git a/docs/models/startfinetuningjobrequest.md b/docs/models/jobsapiroutesfinetuningstartfinetuningjobrequest.md similarity index 84% rename from docs/models/startfinetuningjobrequest.md rename to docs/models/jobsapiroutesfinetuningstartfinetuningjobrequest.md index 9df5aee8..4429fe48 100644 --- a/docs/models/startfinetuningjobrequest.md +++ b/docs/models/jobsapiroutesfinetuningstartfinetuningjobrequest.md @@ -1,4 +1,4 @@ -# StartFineTuningJobRequest +# JobsAPIRoutesFineTuningStartFineTuningJobRequest ## Fields diff --git a/docs/models/jobsapiroutesfinetuningstartfinetuningjobresponse.md b/docs/models/jobsapiroutesfinetuningstartfinetuningjobresponse.md new file mode 100644 index 00000000..1a7e71d4 --- /dev/null +++ b/docs/models/jobsapiroutesfinetuningstartfinetuningjobresponse.md @@ -0,0 +1,19 @@ +# JobsAPIRoutesFineTuningStartFineTuningJobResponse + +OK + + +## Supported Types + +### `models.ClassifierFineTuningJobDetails` + +```python +value: models.ClassifierFineTuningJobDetails = /* values here */ +``` + +### `models.CompletionFineTuningJobDetails` + +```python +value: models.CompletionFineTuningJobDetails = /* values here */ +``` + diff --git a/docs/models/unarchivemodelrequest.md b/docs/models/jobsapiroutesfinetuningunarchivefinetunedmodelrequest.md similarity index 92% rename from docs/models/unarchivemodelrequest.md rename to docs/models/jobsapiroutesfinetuningunarchivefinetunedmodelrequest.md index 033dad8a..95c1734d 100644 --- a/docs/models/unarchivemodelrequest.md +++ b/docs/models/jobsapiroutesfinetuningunarchivefinetunedmodelrequest.md @@ -1,4 +1,4 @@ -# UnarchiveModelRequest +# JobsAPIRoutesFineTuningUnarchiveFineTunedModelRequest ## Fields diff --git a/docs/models/jobsapiroutesfinetuningupdatefinetunedmodelrequest.md b/docs/models/jobsapiroutesfinetuningupdatefinetunedmodelrequest.md new file mode 100644 index 00000000..dbe49a86 --- /dev/null +++ b/docs/models/jobsapiroutesfinetuningupdatefinetunedmodelrequest.md @@ -0,0 +1,9 @@ +# JobsAPIRoutesFineTuningUpdateFineTunedModelRequest + + +## Fields + +| Field | Type | Required | Description | Example | +| ------------------------------------------------------------ | ------------------------------------------------------------ | ------------------------------------------------------------ | ------------------------------------------------------------ | ------------------------------------------------------------ | +| `model_id` | *str* | :heavy_check_mark: | The ID of the model to update. | ft:open-mistral-7b:587a6b29:20240514:7e773925 | +| `update_model_request` | [models.UpdateModelRequest](../models/updatemodelrequest.md) | :heavy_check_mark: | N/A | | \ No newline at end of file diff --git a/docs/models/jobsapiroutesfinetuningupdatefinetunedmodelresponse.md b/docs/models/jobsapiroutesfinetuningupdatefinetunedmodelresponse.md new file mode 100644 index 00000000..f40350bf --- /dev/null +++ b/docs/models/jobsapiroutesfinetuningupdatefinetunedmodelresponse.md @@ -0,0 +1,19 @@ +# JobsAPIRoutesFineTuningUpdateFineTunedModelResponse + +OK + + +## Supported Types + +### `models.ClassifierFineTunedModel` + +```python +value: models.ClassifierFineTunedModel = /* values here */ +``` + +### `models.CompletionFineTunedModel` + +```python +value: models.CompletionFineTunedModel = /* values here */ +``` + diff --git a/docs/models/jobsout.md b/docs/models/jobsout.md deleted file mode 100644 index 69f8342a..00000000 --- a/docs/models/jobsout.md +++ /dev/null @@ -1,10 +0,0 @@ -# JobsOut - - -## Fields - -| Field | Type | Required | Description | -| ---------------------------------------------------- | ---------------------------------------------------- | ---------------------------------------------------- | ---------------------------------------------------- | -| `data` | List[[models.JobsOutData](../models/jobsoutdata.md)] | :heavy_minus_sign: | N/A | -| `object` | *Optional[Literal["list"]]* | :heavy_minus_sign: | N/A | -| `total` | *int* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/jobsoutdata.md b/docs/models/jobsoutdata.md deleted file mode 100644 index 28cec311..00000000 --- a/docs/models/jobsoutdata.md +++ /dev/null @@ -1,17 +0,0 @@ -# JobsOutData - - -## Supported Types - -### `models.ClassifierJobOut` - -```python -value: models.ClassifierJobOut = /* values here */ -``` - -### `models.CompletionJobOut` - -```python -value: models.CompletionJobOut = /* values here */ -``` - diff --git a/docs/models/legacyjobmetadataout.md b/docs/models/legacyjobmetadata.md similarity index 99% rename from docs/models/legacyjobmetadataout.md rename to docs/models/legacyjobmetadata.md index 8a712140..4705ab4f 100644 --- a/docs/models/legacyjobmetadataout.md +++ b/docs/models/legacyjobmetadata.md @@ -1,4 +1,4 @@ -# LegacyJobMetadataOut +# LegacyJobMetadata ## Fields diff --git a/docs/models/getlibraryrequest.md b/docs/models/librariesdeletev1request.md similarity index 90% rename from docs/models/getlibraryrequest.md rename to docs/models/librariesdeletev1request.md index 2a3acf50..68d7e543 100644 --- a/docs/models/getlibraryrequest.md +++ b/docs/models/librariesdeletev1request.md @@ -1,4 +1,4 @@ -# GetLibraryRequest +# LibrariesDeleteV1Request ## Fields diff --git a/docs/models/getdocumentstatusrequest.md b/docs/models/librariesdocumentsdeletev1request.md similarity index 90% rename from docs/models/getdocumentstatusrequest.md rename to docs/models/librariesdocumentsdeletev1request.md index 3557d773..efccdb1b 100644 --- a/docs/models/getdocumentstatusrequest.md +++ b/docs/models/librariesdocumentsdeletev1request.md @@ -1,4 +1,4 @@ -# GetDocumentStatusRequest +# LibrariesDocumentsDeleteV1Request ## Fields diff --git a/docs/models/librariesdocumentsgetextractedtextsignedurlv1request.md b/docs/models/librariesdocumentsgetextractedtextsignedurlv1request.md new file mode 100644 index 00000000..14ca66f7 --- /dev/null +++ b/docs/models/librariesdocumentsgetextractedtextsignedurlv1request.md @@ -0,0 +1,9 @@ +# LibrariesDocumentsGetExtractedTextSignedURLV1Request + + +## Fields + +| Field | Type | Required | Description | +| ------------------ | ------------------ | ------------------ | ------------------ | +| `library_id` | *str* | :heavy_check_mark: | N/A | +| `document_id` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/librariesdocumentsgetsignedurlv1request.md b/docs/models/librariesdocumentsgetsignedurlv1request.md new file mode 100644 index 00000000..7c08c180 --- /dev/null +++ b/docs/models/librariesdocumentsgetsignedurlv1request.md @@ -0,0 +1,9 @@ +# LibrariesDocumentsGetSignedURLV1Request + + +## Fields + +| Field | Type | Required | Description | +| ------------------ | ------------------ | ------------------ | ------------------ | +| `library_id` | *str* | :heavy_check_mark: | N/A | +| `document_id` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/getdocumentrequest.md b/docs/models/librariesdocumentsgetstatusv1request.md similarity index 90% rename from docs/models/getdocumentrequest.md rename to docs/models/librariesdocumentsgetstatusv1request.md index 29f62127..e6d41875 100644 --- a/docs/models/getdocumentrequest.md +++ b/docs/models/librariesdocumentsgetstatusv1request.md @@ -1,4 +1,4 @@ -# GetDocumentRequest +# LibrariesDocumentsGetStatusV1Request ## Fields diff --git a/docs/models/getdocumentextractedtextsignedurlrequest.md b/docs/models/librariesdocumentsgettextcontentv1request.md similarity index 89% rename from docs/models/getdocumentextractedtextsignedurlrequest.md rename to docs/models/librariesdocumentsgettextcontentv1request.md index ff703802..2f58a446 100644 --- a/docs/models/getdocumentextractedtextsignedurlrequest.md +++ b/docs/models/librariesdocumentsgettextcontentv1request.md @@ -1,4 +1,4 @@ -# GetDocumentExtractedTextSignedURLRequest +# LibrariesDocumentsGetTextContentV1Request ## Fields diff --git a/docs/models/getdocumentsignedurlrequest.md b/docs/models/librariesdocumentsgetv1request.md similarity index 91% rename from docs/models/getdocumentsignedurlrequest.md rename to docs/models/librariesdocumentsgetv1request.md index 72a179c0..6febc058 100644 --- a/docs/models/getdocumentsignedurlrequest.md +++ b/docs/models/librariesdocumentsgetv1request.md @@ -1,4 +1,4 @@ -# GetDocumentSignedURLRequest +# LibrariesDocumentsGetV1Request ## Fields diff --git a/docs/models/listdocumentsrequest.md b/docs/models/librariesdocumentslistv1request.md similarity index 96% rename from docs/models/listdocumentsrequest.md rename to docs/models/librariesdocumentslistv1request.md index 369e8edb..44f63001 100644 --- a/docs/models/listdocumentsrequest.md +++ b/docs/models/librariesdocumentslistv1request.md @@ -1,4 +1,4 @@ -# ListDocumentsRequest +# LibrariesDocumentsListV1Request ## Fields diff --git a/docs/models/deletedocumentrequest.md b/docs/models/librariesdocumentsreprocessv1request.md similarity index 90% rename from docs/models/deletedocumentrequest.md rename to docs/models/librariesdocumentsreprocessv1request.md index eb060099..196ba17b 100644 --- a/docs/models/deletedocumentrequest.md +++ b/docs/models/librariesdocumentsreprocessv1request.md @@ -1,4 +1,4 @@ -# DeleteDocumentRequest +# LibrariesDocumentsReprocessV1Request ## Fields diff --git a/docs/models/librariesdocumentsupdatev1request.md b/docs/models/librariesdocumentsupdatev1request.md new file mode 100644 index 00000000..d4630850 --- /dev/null +++ b/docs/models/librariesdocumentsupdatev1request.md @@ -0,0 +1,10 @@ +# LibrariesDocumentsUpdateV1Request + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------------------------------------ | ------------------------------------------------------------------ | ------------------------------------------------------------------ | ------------------------------------------------------------------ | +| `library_id` | *str* | :heavy_check_mark: | N/A | +| `document_id` | *str* | :heavy_check_mark: | N/A | +| `update_document_request` | [models.UpdateDocumentRequest](../models/updatedocumentrequest.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/uploaddocumentrequest.md b/docs/models/librariesdocumentsuploadv1request.md similarity index 96% rename from docs/models/uploaddocumentrequest.md rename to docs/models/librariesdocumentsuploadv1request.md index 92152b7f..172a6183 100644 --- a/docs/models/uploaddocumentrequest.md +++ b/docs/models/librariesdocumentsuploadv1request.md @@ -1,4 +1,4 @@ -# UploadDocumentRequest +# LibrariesDocumentsUploadV1Request ## Fields diff --git a/docs/models/deletelibraryrequest.md b/docs/models/librariesgetv1request.md similarity index 91% rename from docs/models/deletelibraryrequest.md rename to docs/models/librariesgetv1request.md index c229ad73..6e1e04c3 100644 --- a/docs/models/deletelibraryrequest.md +++ b/docs/models/librariesgetv1request.md @@ -1,4 +1,4 @@ -# DeleteLibraryRequest +# LibrariesGetV1Request ## Fields diff --git a/docs/models/updateorcreatelibraryaccessrequest.md b/docs/models/librariessharecreatev1request.md similarity index 95% rename from docs/models/updateorcreatelibraryaccessrequest.md rename to docs/models/librariessharecreatev1request.md index e04567b4..4c05241d 100644 --- a/docs/models/updateorcreatelibraryaccessrequest.md +++ b/docs/models/librariessharecreatev1request.md @@ -1,4 +1,4 @@ -# UpdateOrCreateLibraryAccessRequest +# LibrariesShareCreateV1Request ## Fields diff --git a/docs/models/deletelibraryaccessrequest.md b/docs/models/librariessharedeletev1request.md similarity index 96% rename from docs/models/deletelibraryaccessrequest.md rename to docs/models/librariessharedeletev1request.md index c7034b98..850e22ab 100644 --- a/docs/models/deletelibraryaccessrequest.md +++ b/docs/models/librariessharedeletev1request.md @@ -1,4 +1,4 @@ -# DeleteLibraryAccessRequest +# LibrariesShareDeleteV1Request ## Fields diff --git a/docs/models/listlibraryaccessesrequest.md b/docs/models/librariessharelistv1request.md similarity index 90% rename from docs/models/listlibraryaccessesrequest.md rename to docs/models/librariessharelistv1request.md index d98bcda2..98bf6d17 100644 --- a/docs/models/listlibraryaccessesrequest.md +++ b/docs/models/librariessharelistv1request.md @@ -1,4 +1,4 @@ -# ListLibraryAccessesRequest +# LibrariesShareListV1Request ## Fields diff --git a/docs/models/librariesupdatev1request.md b/docs/models/librariesupdatev1request.md new file mode 100644 index 00000000..c5c142db --- /dev/null +++ b/docs/models/librariesupdatev1request.md @@ -0,0 +1,9 @@ +# LibrariesUpdateV1Request + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------------------- | ---------------------------------------------------------------- | ---------------------------------------------------------------- | ---------------------------------------------------------------- | +| `library_id` | *str* | :heavy_check_mark: | N/A | +| `update_library_request` | [models.UpdateLibraryRequest](../models/updatelibraryrequest.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/libraryout.md b/docs/models/library.md similarity index 99% rename from docs/models/libraryout.md rename to docs/models/library.md index ebf46d57..4319f43d 100644 --- a/docs/models/libraryout.md +++ b/docs/models/library.md @@ -1,4 +1,4 @@ -# LibraryOut +# Library ## Fields diff --git a/docs/models/libraryinupdate.md b/docs/models/libraryinupdate.md deleted file mode 100644 index 4aa169c7..00000000 --- a/docs/models/libraryinupdate.md +++ /dev/null @@ -1,9 +0,0 @@ -# LibraryInUpdate - - -## Fields - -| Field | Type | Required | Description | -| ----------------------- | ----------------------- | ----------------------- | ----------------------- | -| `name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/listbatchjobsresponse.md b/docs/models/listbatchjobsresponse.md new file mode 100644 index 00000000..c23e3220 --- /dev/null +++ b/docs/models/listbatchjobsresponse.md @@ -0,0 +1,10 @@ +# ListBatchJobsResponse + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------- | ---------------------------------------------- | ---------------------------------------------- | ---------------------------------------------- | +| `data` | List[[models.BatchJob](../models/batchjob.md)] | :heavy_minus_sign: | N/A | +| `object` | *Optional[Literal["list"]]* | :heavy_minus_sign: | N/A | +| `total` | *int* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/listdocumentout.md b/docs/models/listdocumentsresponse.md similarity index 90% rename from docs/models/listdocumentout.md rename to docs/models/listdocumentsresponse.md index f14157b8..47b9d3b7 100644 --- a/docs/models/listdocumentout.md +++ b/docs/models/listdocumentsresponse.md @@ -1,4 +1,4 @@ -# ListDocumentOut +# ListDocumentsResponse ## Fields @@ -6,4 +6,4 @@ | Field | Type | Required | Description | | ---------------------------------------------------- | ---------------------------------------------------- | ---------------------------------------------------- | ---------------------------------------------------- | | `pagination` | [models.PaginationInfo](../models/paginationinfo.md) | :heavy_check_mark: | N/A | -| `data` | List[[models.DocumentOut](../models/documentout.md)] | :heavy_check_mark: | N/A | \ No newline at end of file +| `data` | List[[models.Document](../models/document.md)] | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/listfilesout.md b/docs/models/listfilesresponse.md similarity index 98% rename from docs/models/listfilesout.md rename to docs/models/listfilesresponse.md index bcb1f13a..802f685f 100644 --- a/docs/models/listfilesout.md +++ b/docs/models/listfilesresponse.md @@ -1,4 +1,4 @@ -# ListFilesOut +# ListFilesResponse ## Fields diff --git a/docs/models/listfinetuningjobsrequest.md b/docs/models/listfinetuningjobsrequest.md deleted file mode 100644 index 3a04fc70..00000000 --- a/docs/models/listfinetuningjobsrequest.md +++ /dev/null @@ -1,17 +0,0 @@ -# ListFineTuningJobsRequest - - -## Fields - -| Field | Type | Required | Description | -| -------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------- | -| `page` | *Optional[int]* | :heavy_minus_sign: | The page number of the results to be returned. | -| `page_size` | *Optional[int]* | :heavy_minus_sign: | The number of items to return per page. | -| `model` | *OptionalNullable[str]* | :heavy_minus_sign: | The model name used for fine-tuning to filter on. When set, the other results are not displayed. | -| `created_after` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | The date/time to filter on. When set, the results for previous creation times are not displayed. | -| `created_before` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | -| `created_by_me` | *Optional[bool]* | :heavy_minus_sign: | When set, only return results for jobs created by the API caller. Other results are not displayed. | -| `status` | [OptionalNullable[models.ListFineTuningJobsStatus]](../models/listfinetuningjobsstatus.md) | :heavy_minus_sign: | The current job state to filter on. When set, the other results are not displayed. | -| `wandb_project` | *OptionalNullable[str]* | :heavy_minus_sign: | The Weights and Biases project to filter on. When set, the other results are not displayed. | -| `wandb_name` | *OptionalNullable[str]* | :heavy_minus_sign: | The Weight and Biases run name to filter on. When set, the other results are not displayed. | -| `suffix` | *OptionalNullable[str]* | :heavy_minus_sign: | The model suffix to filter on. When set, the other results are not displayed. | \ No newline at end of file diff --git a/docs/models/listfinetuningjobsresponse.md b/docs/models/listfinetuningjobsresponse.md new file mode 100644 index 00000000..00251242 --- /dev/null +++ b/docs/models/listfinetuningjobsresponse.md @@ -0,0 +1,10 @@ +# ListFineTuningJobsResponse + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------ | +| `data` | List[[models.ListFineTuningJobsResponseData](../models/listfinetuningjobsresponsedata.md)] | :heavy_minus_sign: | N/A | +| `object` | *Optional[Literal["list"]]* | :heavy_minus_sign: | N/A | +| `total` | *int* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/listfinetuningjobsresponsedata.md b/docs/models/listfinetuningjobsresponsedata.md new file mode 100644 index 00000000..adb06444 --- /dev/null +++ b/docs/models/listfinetuningjobsresponsedata.md @@ -0,0 +1,17 @@ +# ListFineTuningJobsResponseData + + +## Supported Types + +### `models.ClassifierFineTuningJob` + +```python +value: models.ClassifierFineTuningJob = /* values here */ +``` + +### `models.CompletionFineTuningJob` + +```python +value: models.CompletionFineTuningJob = /* values here */ +``` + diff --git a/docs/models/listlibrariesresponse.md b/docs/models/listlibrariesresponse.md new file mode 100644 index 00000000..e21b9ced --- /dev/null +++ b/docs/models/listlibrariesresponse.md @@ -0,0 +1,8 @@ +# ListLibrariesResponse + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------- | -------------------------------------------- | -------------------------------------------- | -------------------------------------------- | +| `data` | List[[models.Library](../models/library.md)] | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/listlibraryout.md b/docs/models/listlibraryout.md deleted file mode 100644 index db76ffa1..00000000 --- a/docs/models/listlibraryout.md +++ /dev/null @@ -1,8 +0,0 @@ -# ListLibraryOut - - -## Fields - -| Field | Type | Required | Description | -| -------------------------------------------------- | -------------------------------------------------- | -------------------------------------------------- | -------------------------------------------------- | -| `data` | List[[models.LibraryOut](../models/libraryout.md)] | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/messageinputcontentchunks.md b/docs/models/messageinputcontentchunks.md index 4fd18a0d..05617850 100644 --- a/docs/models/messageinputcontentchunks.md +++ b/docs/models/messageinputcontentchunks.md @@ -27,9 +27,9 @@ value: models.ToolFileChunk = /* values here */ value: models.DocumentURLChunk = /* values here */ ``` -### `models.ThinkChunk` +### `models.ConversationThinkChunk` ```python -value: models.ThinkChunk = /* values here */ +value: models.ConversationThinkChunk = /* values here */ ``` diff --git a/docs/models/messageinputentry.md b/docs/models/messageinputentry.md index 52183a32..f8514fb3 100644 --- a/docs/models/messageinputentry.md +++ b/docs/models/messageinputentry.md @@ -5,13 +5,13 @@ Representation of an input message inside the conversation. ## Fields -| Field | Type | Required | Description | -| -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -| `object` | [Optional[models.MessageInputEntryObject]](../models/messageinputentryobject.md) | :heavy_minus_sign: | N/A | -| `type` | [Optional[models.MessageInputEntryType]](../models/messageinputentrytype.md) | :heavy_minus_sign: | N/A | -| `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | -| `completed_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | -| `id` | *Optional[str]* | :heavy_minus_sign: | N/A | -| `role` | [models.MessageInputEntryRole](../models/messageinputentryrole.md) | :heavy_check_mark: | N/A | -| `content` | [models.MessageInputEntryContent](../models/messageinputentrycontent.md) | :heavy_check_mark: | N/A | -| `prefix` | *Optional[bool]* | :heavy_minus_sign: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| ------------------------------------------------------------------------ | ------------------------------------------------------------------------ | ------------------------------------------------------------------------ | ------------------------------------------------------------------------ | +| `object` | *Optional[Literal["entry"]]* | :heavy_minus_sign: | N/A | +| `type` | *Optional[Literal["message.input"]]* | :heavy_minus_sign: | N/A | +| `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | +| `completed_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | +| `id` | *Optional[str]* | :heavy_minus_sign: | N/A | +| `role` | [models.Role](../models/role.md) | :heavy_check_mark: | N/A | +| `content` | [models.MessageInputEntryContent](../models/messageinputentrycontent.md) | :heavy_check_mark: | N/A | +| `prefix` | *Optional[bool]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/messageinputentryobject.md b/docs/models/messageinputentryobject.md deleted file mode 100644 index 6bdd62e2..00000000 --- a/docs/models/messageinputentryobject.md +++ /dev/null @@ -1,8 +0,0 @@ -# MessageInputEntryObject - - -## Values - -| Name | Value | -| ------- | ------- | -| `ENTRY` | entry | \ No newline at end of file diff --git a/docs/models/messageinputentrytype.md b/docs/models/messageinputentrytype.md deleted file mode 100644 index d3378124..00000000 --- a/docs/models/messageinputentrytype.md +++ /dev/null @@ -1,8 +0,0 @@ -# MessageInputEntryType - - -## Values - -| Name | Value | -| --------------- | --------------- | -| `MESSAGE_INPUT` | message.input | \ No newline at end of file diff --git a/docs/models/messageoutputcontentchunks.md b/docs/models/messageoutputcontentchunks.md index d9c3d50e..c4a7777e 100644 --- a/docs/models/messageoutputcontentchunks.md +++ b/docs/models/messageoutputcontentchunks.md @@ -27,10 +27,10 @@ value: models.ToolFileChunk = /* values here */ value: models.DocumentURLChunk = /* values here */ ``` -### `models.ThinkChunk` +### `models.ConversationThinkChunk` ```python -value: models.ThinkChunk = /* values here */ +value: models.ConversationThinkChunk = /* values here */ ``` ### `models.ToolReferenceChunk` diff --git a/docs/models/messageoutputentry.md b/docs/models/messageoutputentry.md index 5b42e20d..73a1c666 100644 --- a/docs/models/messageoutputentry.md +++ b/docs/models/messageoutputentry.md @@ -3,14 +3,14 @@ ## Fields -| Field | Type | Required | Description | -| ---------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------- | -| `object` | [Optional[models.MessageOutputEntryObject]](../models/messageoutputentryobject.md) | :heavy_minus_sign: | N/A | -| `type` | [Optional[models.MessageOutputEntryType]](../models/messageoutputentrytype.md) | :heavy_minus_sign: | N/A | -| `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | -| `completed_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | -| `id` | *Optional[str]* | :heavy_minus_sign: | N/A | -| `agent_id` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `model` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `role` | [Optional[models.MessageOutputEntryRole]](../models/messageoutputentryrole.md) | :heavy_minus_sign: | N/A | -| `content` | [models.MessageOutputEntryContent](../models/messageoutputentrycontent.md) | :heavy_check_mark: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| -------------------------------------------------------------------------- | -------------------------------------------------------------------------- | -------------------------------------------------------------------------- | -------------------------------------------------------------------------- | +| `object` | *Optional[Literal["entry"]]* | :heavy_minus_sign: | N/A | +| `type` | *Optional[Literal["message.output"]]* | :heavy_minus_sign: | N/A | +| `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | +| `completed_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | +| `agent_id` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `model` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `id` | *Optional[str]* | :heavy_minus_sign: | N/A | +| `role` | *Optional[Literal["assistant"]]* | :heavy_minus_sign: | N/A | +| `content` | [models.MessageOutputEntryContent](../models/messageoutputentrycontent.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/messageoutputentryobject.md b/docs/models/messageoutputentryobject.md deleted file mode 100644 index bb254c82..00000000 --- a/docs/models/messageoutputentryobject.md +++ /dev/null @@ -1,8 +0,0 @@ -# MessageOutputEntryObject - - -## Values - -| Name | Value | -| ------- | ------- | -| `ENTRY` | entry | \ No newline at end of file diff --git a/docs/models/messageoutputentryrole.md b/docs/models/messageoutputentryrole.md deleted file mode 100644 index 783ee0aa..00000000 --- a/docs/models/messageoutputentryrole.md +++ /dev/null @@ -1,8 +0,0 @@ -# MessageOutputEntryRole - - -## Values - -| Name | Value | -| ----------- | ----------- | -| `ASSISTANT` | assistant | \ No newline at end of file diff --git a/docs/models/messageoutputentrytype.md b/docs/models/messageoutputentrytype.md deleted file mode 100644 index cb4a7a1b..00000000 --- a/docs/models/messageoutputentrytype.md +++ /dev/null @@ -1,8 +0,0 @@ -# MessageOutputEntryType - - -## Values - -| Name | Value | -| ---------------- | ---------------- | -| `MESSAGE_OUTPUT` | message.output | \ No newline at end of file diff --git a/docs/models/messageoutputevent.md b/docs/models/messageoutputevent.md index b0fa1a2d..e09a965f 100644 --- a/docs/models/messageoutputevent.md +++ b/docs/models/messageoutputevent.md @@ -3,14 +3,14 @@ ## Fields -| Field | Type | Required | Description | -| ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | -| `type` | *Literal["message.output.delta"]* | :heavy_check_mark: | N/A | -| `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | -| `output_index` | *Optional[int]* | :heavy_minus_sign: | N/A | -| `id` | *str* | :heavy_check_mark: | N/A | -| `content_index` | *Optional[int]* | :heavy_minus_sign: | N/A | -| `model` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `agent_id` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `role` | [Optional[models.MessageOutputEventRole]](../models/messageoutputeventrole.md) | :heavy_minus_sign: | N/A | -| `content` | [models.MessageOutputEventContent](../models/messageoutputeventcontent.md) | :heavy_check_mark: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| -------------------------------------------------------------------------- | -------------------------------------------------------------------------- | -------------------------------------------------------------------------- | -------------------------------------------------------------------------- | +| `type` | *Literal["message.output.delta"]* | :heavy_check_mark: | N/A | +| `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | +| `output_index` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `id` | *str* | :heavy_check_mark: | N/A | +| `content_index` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `model` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `agent_id` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `role` | *Optional[Literal["assistant"]]* | :heavy_minus_sign: | N/A | +| `content` | [models.MessageOutputEventContent](../models/messageoutputeventcontent.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/messageoutputeventrole.md b/docs/models/messageoutputeventrole.md deleted file mode 100644 index e38c6472..00000000 --- a/docs/models/messageoutputeventrole.md +++ /dev/null @@ -1,8 +0,0 @@ -# MessageOutputEventRole - - -## Values - -| Name | Value | -| ----------- | ----------- | -| `ASSISTANT` | assistant | \ No newline at end of file diff --git a/docs/models/metricout.md b/docs/models/metric.md similarity index 98% rename from docs/models/metricout.md rename to docs/models/metric.md index 3c552bac..7f863036 100644 --- a/docs/models/metricout.md +++ b/docs/models/metric.md @@ -1,4 +1,4 @@ -# MetricOut +# Metric Metrics at the step number during the fine-tuning job. Use these metrics to assess if the training is going smoothly (loss should decrease, token accuracy should increase). diff --git a/docs/models/modelconversation.md b/docs/models/modelconversation.md index 813e1f3a..af2e5c61 100644 --- a/docs/models/modelconversation.md +++ b/docs/models/modelconversation.md @@ -3,16 +3,16 @@ ## Fields -| Field | Type | Required | Description | -| -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -| `instructions` | *OptionalNullable[str]* | :heavy_minus_sign: | Instruction prompt the model will follow during the conversation. | -| `tools` | List[[models.ModelConversationTool](../models/modelconversationtool.md)] | :heavy_minus_sign: | List of tools which are available to the model during the conversation. | -| `completion_args` | [Optional[models.CompletionArgs]](../models/completionargs.md) | :heavy_minus_sign: | White-listed arguments from the completion API | -| `name` | *OptionalNullable[str]* | :heavy_minus_sign: | Name given to the conversation. | -| `description` | *OptionalNullable[str]* | :heavy_minus_sign: | Description of the what the conversation is about. | -| `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | Custom metadata for the conversation. | -| `object` | [Optional[models.ModelConversationObject]](../models/modelconversationobject.md) | :heavy_minus_sign: | N/A | -| `id` | *str* | :heavy_check_mark: | N/A | -| `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_check_mark: | N/A | -| `updated_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_check_mark: | N/A | -| `model` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| ------------------------------------------------------------------------ | ------------------------------------------------------------------------ | ------------------------------------------------------------------------ | ------------------------------------------------------------------------ | +| `instructions` | *OptionalNullable[str]* | :heavy_minus_sign: | Instruction prompt the model will follow during the conversation. | +| `tools` | List[[models.ModelConversationTool](../models/modelconversationtool.md)] | :heavy_minus_sign: | List of tools which are available to the model during the conversation. | +| `completion_args` | [Optional[models.CompletionArgs]](../models/completionargs.md) | :heavy_minus_sign: | White-listed arguments from the completion API | +| `name` | *OptionalNullable[str]* | :heavy_minus_sign: | Name given to the conversation. | +| `description` | *OptionalNullable[str]* | :heavy_minus_sign: | Description of the what the conversation is about. | +| `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | Custom metadata for the conversation. | +| `object` | *Optional[Literal["conversation"]]* | :heavy_minus_sign: | N/A | +| `id` | *str* | :heavy_check_mark: | N/A | +| `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_check_mark: | N/A | +| `updated_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_check_mark: | N/A | +| `model` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/modelconversationobject.md b/docs/models/modelconversationobject.md deleted file mode 100644 index ead1fa26..00000000 --- a/docs/models/modelconversationobject.md +++ /dev/null @@ -1,8 +0,0 @@ -# ModelConversationObject - - -## Values - -| Name | Value | -| -------------- | -------------- | -| `CONVERSATION` | conversation | \ No newline at end of file diff --git a/docs/models/ocrrequest.md b/docs/models/ocrrequest.md index 87929e53..dd3fc2ea 100644 --- a/docs/models/ocrrequest.md +++ b/docs/models/ocrrequest.md @@ -3,18 +3,18 @@ ## Fields -| Field | Type | Required | Description | Example | -| ---------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `model` | *Nullable[str]* | :heavy_check_mark: | N/A | | -| `id` | *Optional[str]* | :heavy_minus_sign: | N/A | | -| `document` | [models.Document](../models/document.md) | :heavy_check_mark: | Document to run OCR on | | -| `pages` | List[*int*] | :heavy_minus_sign: | Specific pages user wants to process in various formats: single number, range, or list of both. Starts from 0 | | -| `include_image_base64` | *OptionalNullable[bool]* | :heavy_minus_sign: | Include image URLs in response | | -| `image_limit` | *OptionalNullable[int]* | :heavy_minus_sign: | Max images to extract | | -| `image_min_size` | *OptionalNullable[int]* | :heavy_minus_sign: | Minimum height and width of image to extract | | -| `bbox_annotation_format` | [OptionalNullable[models.ResponseFormat]](../models/responseformat.md) | :heavy_minus_sign: | Structured output class for extracting useful information from each extracted bounding box / image from document. Only json_schema is valid for this field | {
"type": "text"
} | -| `document_annotation_format` | [OptionalNullable[models.ResponseFormat]](../models/responseformat.md) | :heavy_minus_sign: | Structured output class for extracting useful information from the entire document. Only json_schema is valid for this field | {
"type": "text"
} | -| `document_annotation_prompt` | *OptionalNullable[str]* | :heavy_minus_sign: | Optional prompt to guide the model in extracting structured output from the entire document. A document_annotation_format must be provided. | | -| `table_format` | [OptionalNullable[models.TableFormat]](../models/tableformat.md) | :heavy_minus_sign: | N/A | | -| `extract_header` | *Optional[bool]* | :heavy_minus_sign: | N/A | | -| `extract_footer` | *Optional[bool]* | :heavy_minus_sign: | N/A | | \ No newline at end of file +| Field | Type | Required | Description | Example | +| ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `model` | *Nullable[str]* | :heavy_check_mark: | N/A | | +| `id` | *Optional[str]* | :heavy_minus_sign: | N/A | | +| `document` | [models.DocumentUnion](../models/documentunion.md) | :heavy_check_mark: | Document to run OCR on | | +| `pages` | List[*int*] | :heavy_minus_sign: | Specific pages user wants to process in various formats: single number, range, or list of both. Starts from 0 | | +| `include_image_base64` | *OptionalNullable[bool]* | :heavy_minus_sign: | Include image URLs in response | | +| `image_limit` | *OptionalNullable[int]* | :heavy_minus_sign: | Max images to extract | | +| `image_min_size` | *OptionalNullable[int]* | :heavy_minus_sign: | Minimum height and width of image to extract | | +| `bbox_annotation_format` | [OptionalNullable[models.ResponseFormat]](../models/responseformat.md) | :heavy_minus_sign: | Structured output class for extracting useful information from each extracted bounding box / image from document. Only json_schema is valid for this field | **Example 1:** {
"type": "text"
}
**Example 2:** {
"type": "json_object"
}
**Example 3:** {
"type": "json_schema",
"json_schema": {
"schema": {
"properties": {
"name": {
"title": "Name",
"type": "string"
},
"authors": {
"items": {
"type": "string"
},
"title": "Authors",
"type": "array"
}
},
"required": [
"name",
"authors"
],
"title": "Book",
"type": "object",
"additionalProperties": false
},
"name": "book",
"strict": true
}
} | +| `document_annotation_format` | [OptionalNullable[models.ResponseFormat]](../models/responseformat.md) | :heavy_minus_sign: | Structured output class for extracting useful information from the entire document. Only json_schema is valid for this field | **Example 1:** {
"type": "text"
}
**Example 2:** {
"type": "json_object"
}
**Example 3:** {
"type": "json_schema",
"json_schema": {
"schema": {
"properties": {
"name": {
"title": "Name",
"type": "string"
},
"authors": {
"items": {
"type": "string"
},
"title": "Authors",
"type": "array"
}
},
"required": [
"name",
"authors"
],
"title": "Book",
"type": "object",
"additionalProperties": false
},
"name": "book",
"strict": true
}
} | +| `document_annotation_prompt` | *OptionalNullable[str]* | :heavy_minus_sign: | Optional prompt to guide the model in extracting structured output from the entire document. A document_annotation_format must be provided. | | +| `table_format` | [OptionalNullable[models.TableFormat]](../models/tableformat.md) | :heavy_minus_sign: | N/A | | +| `extract_header` | *Optional[bool]* | :heavy_minus_sign: | N/A | | +| `extract_footer` | *Optional[bool]* | :heavy_minus_sign: | N/A | | \ No newline at end of file diff --git a/docs/models/outputcontentchunks.md b/docs/models/outputcontentchunks.md index c76bc31d..e5185014 100644 --- a/docs/models/outputcontentchunks.md +++ b/docs/models/outputcontentchunks.md @@ -27,10 +27,10 @@ value: models.ToolFileChunk = /* values here */ value: models.DocumentURLChunk = /* values here */ ``` -### `models.ThinkChunk` +### `models.ConversationThinkChunk` ```python -value: models.ThinkChunk = /* values here */ +value: models.ConversationThinkChunk = /* values here */ ``` ### `models.ToolReferenceChunk` diff --git a/docs/models/realtimetranscriptioninputaudioappend.md b/docs/models/realtimetranscriptioninputaudioappend.md new file mode 100644 index 00000000..5ee365eb --- /dev/null +++ b/docs/models/realtimetranscriptioninputaudioappend.md @@ -0,0 +1,9 @@ +# RealtimeTranscriptionInputAudioAppend + + +## Fields + +| Field | Type | Required | Description | +| ----------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------- | +| `type` | *Optional[Literal["input_audio.append"]]* | :heavy_minus_sign: | N/A | +| `audio` | *str* | :heavy_check_mark: | Base64-encoded raw PCM bytes matching the current audio_format. Max decoded size: 262144 bytes. | \ No newline at end of file diff --git a/docs/models/realtimetranscriptioninputaudioend.md b/docs/models/realtimetranscriptioninputaudioend.md new file mode 100644 index 00000000..393d208c --- /dev/null +++ b/docs/models/realtimetranscriptioninputaudioend.md @@ -0,0 +1,8 @@ +# RealtimeTranscriptionInputAudioEnd + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------- | -------------------------------------- | -------------------------------------- | -------------------------------------- | +| `type` | *Optional[Literal["input_audio.end"]]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/realtimetranscriptioninputaudioflush.md b/docs/models/realtimetranscriptioninputaudioflush.md new file mode 100644 index 00000000..367725ba --- /dev/null +++ b/docs/models/realtimetranscriptioninputaudioflush.md @@ -0,0 +1,8 @@ +# RealtimeTranscriptionInputAudioFlush + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------- | ---------------------------------------- | ---------------------------------------- | ---------------------------------------- | +| `type` | *Optional[Literal["input_audio.flush"]]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/realtimetranscriptionsession.md b/docs/models/realtimetranscriptionsession.md index 94a0a89e..750bd7f7 100644 --- a/docs/models/realtimetranscriptionsession.md +++ b/docs/models/realtimetranscriptionsession.md @@ -7,4 +7,5 @@ | ---------------------------------------------- | ---------------------------------------------- | ---------------------------------------------- | ---------------------------------------------- | | `request_id` | *str* | :heavy_check_mark: | N/A | | `model` | *str* | :heavy_check_mark: | N/A | -| `audio_format` | [models.AudioFormat](../models/audioformat.md) | :heavy_check_mark: | N/A | \ No newline at end of file +| `audio_format` | [models.AudioFormat](../models/audioformat.md) | :heavy_check_mark: | N/A | +| `target_streaming_delay_ms` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/realtimetranscriptionsessionupdatemessage.md b/docs/models/realtimetranscriptionsessionupdatemessage.md new file mode 100644 index 00000000..2a50ca92 --- /dev/null +++ b/docs/models/realtimetranscriptionsessionupdatemessage.md @@ -0,0 +1,9 @@ +# RealtimeTranscriptionSessionUpdateMessage + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------- | +| `type` | *Optional[Literal["session.update"]]* | :heavy_minus_sign: | N/A | +| `session` | [models.RealtimeTranscriptionSessionUpdatePayload](../models/realtimetranscriptionsessionupdatepayload.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/getagentrequest.md b/docs/models/realtimetranscriptionsessionupdatepayload.md similarity index 57% rename from docs/models/getagentrequest.md rename to docs/models/realtimetranscriptionsessionupdatepayload.md index 3f729dff..d6c6547d 100644 --- a/docs/models/getagentrequest.md +++ b/docs/models/realtimetranscriptionsessionupdatepayload.md @@ -1,9 +1,9 @@ -# GetAgentRequest +# RealtimeTranscriptionSessionUpdatePayload ## Fields | Field | Type | Required | Description | | ---------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------- | -| `agent_id` | *str* | :heavy_check_mark: | N/A | -| `agent_version` | [OptionalNullable[models.GetAgentAgentVersion]](../models/getagentagentversion.md) | :heavy_minus_sign: | N/A | \ No newline at end of file +| `audio_format` | [OptionalNullable[models.AudioFormat]](../models/audioformat.md) | :heavy_minus_sign: | Set before sending audio. Audio format updates are rejected after audio starts. | +| `target_streaming_delay_ms` | *OptionalNullable[int]* | :heavy_minus_sign: | Set before sending audio. Streaming delay updates are rejected after audio starts. | \ No newline at end of file diff --git a/docs/models/referencechunk.md b/docs/models/referencechunk.md index a132ca2f..d847e248 100644 --- a/docs/models/referencechunk.md +++ b/docs/models/referencechunk.md @@ -3,7 +3,7 @@ ## Fields -| Field | Type | Required | Description | -| ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | -| `reference_ids` | List[*int*] | :heavy_check_mark: | N/A | -| `type` | [Optional[models.ReferenceChunkType]](../models/referencechunktype.md) | :heavy_minus_sign: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| -------------------------------- | -------------------------------- | -------------------------------- | -------------------------------- | +| `type` | *Optional[Literal["reference"]]* | :heavy_minus_sign: | N/A | +| `reference_ids` | List[*int*] | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/referencechunktype.md b/docs/models/referencechunktype.md deleted file mode 100644 index 1e0e2fe6..00000000 --- a/docs/models/referencechunktype.md +++ /dev/null @@ -1,8 +0,0 @@ -# ReferenceChunkType - - -## Values - -| Name | Value | -| ----------- | ----------- | -| `REFERENCE` | reference | \ No newline at end of file diff --git a/docs/models/reprocessdocumentrequest.md b/docs/models/reprocessdocumentrequest.md deleted file mode 100644 index cf3982a8..00000000 --- a/docs/models/reprocessdocumentrequest.md +++ /dev/null @@ -1,9 +0,0 @@ -# ReprocessDocumentRequest - - -## Fields - -| Field | Type | Required | Description | -| ------------------ | ------------------ | ------------------ | ------------------ | -| `library_id` | *str* | :heavy_check_mark: | N/A | -| `document_id` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/response.md b/docs/models/response.md index 3512b7a8..ff679257 100644 --- a/docs/models/response.md +++ b/docs/models/response.md @@ -3,15 +3,15 @@ ## Supported Types -### `models.ClassifierJobOut` +### `models.ClassifierFineTuningJob` ```python -value: models.ClassifierJobOut = /* values here */ +value: models.ClassifierFineTuningJob = /* values here */ ``` -### `models.CompletionJobOut` +### `models.CompletionFineTuningJob` ```python -value: models.CompletionJobOut = /* values here */ +value: models.CompletionFineTuningJob = /* values here */ ``` diff --git a/docs/models/retrievemodelrequest.md b/docs/models/retrievemodelv1modelsmodelidgetrequest.md similarity index 94% rename from docs/models/retrievemodelrequest.md rename to docs/models/retrievemodelv1modelsmodelidgetrequest.md index 787c3dd1..f1280f88 100644 --- a/docs/models/retrievemodelrequest.md +++ b/docs/models/retrievemodelv1modelsmodelidgetrequest.md @@ -1,4 +1,4 @@ -# RetrieveModelRequest +# RetrieveModelV1ModelsModelIDGetRequest ## Fields diff --git a/docs/models/messageinputentryrole.md b/docs/models/role.md similarity index 84% rename from docs/models/messageinputentryrole.md rename to docs/models/role.md index f2fdc71d..853c6257 100644 --- a/docs/models/messageinputentryrole.md +++ b/docs/models/role.md @@ -1,4 +1,4 @@ -# MessageInputEntryRole +# Role ## Values diff --git a/docs/models/startfinetuningjobresponse.md b/docs/models/startfinetuningjobresponse.md deleted file mode 100644 index dce84c5a..00000000 --- a/docs/models/startfinetuningjobresponse.md +++ /dev/null @@ -1,19 +0,0 @@ -# StartFineTuningJobResponse - -OK - - -## Supported Types - -### `models.ClassifierDetailedJobOut` - -```python -value: models.ClassifierDetailedJobOut = /* values here */ -``` - -### `models.CompletionDetailedJobOut` - -```python -value: models.CompletionDetailedJobOut = /* values here */ -``` - diff --git a/docs/models/systemmessage.md b/docs/models/systemmessage.md index dfb0cd0b..10bda10f 100644 --- a/docs/models/systemmessage.md +++ b/docs/models/systemmessage.md @@ -5,5 +5,5 @@ | Field | Type | Required | Description | | ---------------------------------------------------------------- | ---------------------------------------------------------------- | ---------------------------------------------------------------- | ---------------------------------------------------------------- | -| `content` | [models.SystemMessageContent](../models/systemmessagecontent.md) | :heavy_check_mark: | N/A | -| `role` | *Literal["system"]* | :heavy_check_mark: | N/A | \ No newline at end of file +| `role` | *Literal["system"]* | :heavy_check_mark: | N/A | +| `content` | [models.SystemMessageContent](../models/systemmessagecontent.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/textchunk.md b/docs/models/textchunk.md index d488cb51..df0e61c3 100644 --- a/docs/models/textchunk.md +++ b/docs/models/textchunk.md @@ -3,7 +3,7 @@ ## Fields -| Field | Type | Required | Description | -| ------------------------------------------------------------ | ------------------------------------------------------------ | ------------------------------------------------------------ | ------------------------------------------------------------ | -| `text` | *str* | :heavy_check_mark: | N/A | -| `type` | [Optional[models.TextChunkType]](../models/textchunktype.md) | :heavy_minus_sign: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| --------------------------- | --------------------------- | --------------------------- | --------------------------- | +| `type` | *Optional[Literal["text"]]* | :heavy_minus_sign: | N/A | +| `text` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/textchunktype.md b/docs/models/textchunktype.md deleted file mode 100644 index e2a2ae8b..00000000 --- a/docs/models/textchunktype.md +++ /dev/null @@ -1,8 +0,0 @@ -# TextChunkType - - -## Values - -| Name | Value | -| ------ | ------ | -| `TEXT` | text | \ No newline at end of file diff --git a/docs/models/thinkchunk.md b/docs/models/thinkchunk.md index 66b2e0cd..70c0369f 100644 --- a/docs/models/thinkchunk.md +++ b/docs/models/thinkchunk.md @@ -5,6 +5,6 @@ | Field | Type | Required | Description | | ------------------------------------------------------------------------------- | ------------------------------------------------------------------------------- | ------------------------------------------------------------------------------- | ------------------------------------------------------------------------------- | -| `thinking` | List[[models.Thinking](../models/thinking.md)] | :heavy_check_mark: | N/A | -| `closed` | *Optional[bool]* | :heavy_minus_sign: | Whether the thinking chunk is closed or not. Currently only used for prefixing. | -| `type` | [Optional[models.ThinkChunkType]](../models/thinkchunktype.md) | :heavy_minus_sign: | N/A | \ No newline at end of file +| `type` | *Literal["thinking"]* | :heavy_check_mark: | N/A | +| `thinking` | List[[models.ThinkChunkThinking](../models/thinkchunkthinking.md)] | :heavy_check_mark: | N/A | +| `closed` | *Optional[bool]* | :heavy_minus_sign: | Whether the thinking chunk is closed or not. Currently only used for prefixing. | \ No newline at end of file diff --git a/docs/models/thinking.md b/docs/models/thinkchunkthinking.md similarity index 90% rename from docs/models/thinking.md rename to docs/models/thinkchunkthinking.md index c7a0d5c9..dd1ecca1 100644 --- a/docs/models/thinking.md +++ b/docs/models/thinkchunkthinking.md @@ -1,4 +1,4 @@ -# Thinking +# ThinkChunkThinking ## Supported Types diff --git a/docs/models/thinkchunktype.md b/docs/models/thinkchunktype.md deleted file mode 100644 index baf6f755..00000000 --- a/docs/models/thinkchunktype.md +++ /dev/null @@ -1,8 +0,0 @@ -# ThinkChunkType - - -## Values - -| Name | Value | -| ---------- | ---------- | -| `THINKING` | thinking | \ No newline at end of file diff --git a/docs/models/toolcallconfirmation.md b/docs/models/toolcallconfirmation.md new file mode 100644 index 00000000..1812f7d6 --- /dev/null +++ b/docs/models/toolcallconfirmation.md @@ -0,0 +1,9 @@ +# ToolCallConfirmation + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------------------ | ------------------------------------------------ | ------------------------------------------------ | ------------------------------------------------ | +| `tool_call_id` | *str* | :heavy_check_mark: | N/A | +| `confirmation` | [models.Confirmation](../models/confirmation.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/updateftmodelin.md b/docs/models/toolconfiguration.md similarity index 54% rename from docs/models/updateftmodelin.md rename to docs/models/toolconfiguration.md index 4e55b1a7..89286a17 100644 --- a/docs/models/updateftmodelin.md +++ b/docs/models/toolconfiguration.md @@ -1,9 +1,10 @@ -# UpdateFTModelIn +# ToolConfiguration ## Fields | Field | Type | Required | Description | | ----------------------- | ----------------------- | ----------------------- | ----------------------- | -| `name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file +| `exclude` | List[*str*] | :heavy_minus_sign: | N/A | +| `include` | List[*str*] | :heavy_minus_sign: | N/A | +| `requires_confirmation` | List[*str*] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/toolexecutionentry.md b/docs/models/toolexecutionentry.md index adf88fb1..03316381 100644 --- a/docs/models/toolexecutionentry.md +++ b/docs/models/toolexecutionentry.md @@ -3,13 +3,15 @@ ## Fields -| Field | Type | Required | Description | -| ---------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------- | -| `object` | [Optional[models.ToolExecutionEntryObject]](../models/toolexecutionentryobject.md) | :heavy_minus_sign: | N/A | -| `type` | [Optional[models.ToolExecutionEntryType]](../models/toolexecutionentrytype.md) | :heavy_minus_sign: | N/A | -| `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | -| `completed_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | -| `id` | *Optional[str]* | :heavy_minus_sign: | N/A | -| `name` | [models.ToolExecutionEntryName](../models/toolexecutionentryname.md) | :heavy_check_mark: | N/A | -| `arguments` | *str* | :heavy_check_mark: | N/A | -| `info` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | +| `object` | *Optional[Literal["entry"]]* | :heavy_minus_sign: | N/A | +| `type` | *Optional[Literal["tool.execution"]]* | :heavy_minus_sign: | N/A | +| `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | +| `completed_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | +| `agent_id` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `model` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `id` | *Optional[str]* | :heavy_minus_sign: | N/A | +| `name` | [models.ToolExecutionEntryName](../models/toolexecutionentryname.md) | :heavy_check_mark: | N/A | +| `arguments` | *str* | :heavy_check_mark: | N/A | +| `info` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/toolexecutionentryobject.md b/docs/models/toolexecutionentryobject.md deleted file mode 100644 index 0ca79af5..00000000 --- a/docs/models/toolexecutionentryobject.md +++ /dev/null @@ -1,8 +0,0 @@ -# ToolExecutionEntryObject - - -## Values - -| Name | Value | -| ------- | ------- | -| `ENTRY` | entry | \ No newline at end of file diff --git a/docs/models/toolexecutionentrytype.md b/docs/models/toolexecutionentrytype.md deleted file mode 100644 index a67629b8..00000000 --- a/docs/models/toolexecutionentrytype.md +++ /dev/null @@ -1,8 +0,0 @@ -# ToolExecutionEntryType - - -## Values - -| Name | Value | -| ---------------- | ---------------- | -| `TOOL_EXECUTION` | tool.execution | \ No newline at end of file diff --git a/docs/models/toolexecutionstartedevent.md b/docs/models/toolexecutionstartedevent.md index c41c7258..189b8a3d 100644 --- a/docs/models/toolexecutionstartedevent.md +++ b/docs/models/toolexecutionstartedevent.md @@ -9,5 +9,7 @@ | `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | | `output_index` | *Optional[int]* | :heavy_minus_sign: | N/A | | `id` | *str* | :heavy_check_mark: | N/A | +| `model` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `agent_id` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | `name` | [models.ToolExecutionStartedEventName](../models/toolexecutionstartedeventname.md) | :heavy_check_mark: | N/A | | `arguments` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/toolfilechunk.md b/docs/models/toolfilechunk.md index a3ffaa2b..d6002175 100644 --- a/docs/models/toolfilechunk.md +++ b/docs/models/toolfilechunk.md @@ -3,10 +3,10 @@ ## Fields -| Field | Type | Required | Description | -| -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -| `type` | [Optional[models.ToolFileChunkType]](../models/toolfilechunktype.md) | :heavy_minus_sign: | N/A | -| `tool` | [models.ToolFileChunkTool](../models/toolfilechunktool.md) | :heavy_check_mark: | N/A | -| `file_id` | *str* | :heavy_check_mark: | N/A | -| `file_name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `file_type` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| ---------------------------------------------------------- | ---------------------------------------------------------- | ---------------------------------------------------------- | ---------------------------------------------------------- | +| `type` | *Optional[Literal["tool_file"]]* | :heavy_minus_sign: | N/A | +| `tool` | [models.ToolFileChunkTool](../models/toolfilechunktool.md) | :heavy_check_mark: | N/A | +| `file_id` | *str* | :heavy_check_mark: | N/A | +| `file_name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `file_type` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/toolfilechunktype.md b/docs/models/toolfilechunktype.md deleted file mode 100644 index 7e99acef..00000000 --- a/docs/models/toolfilechunktype.md +++ /dev/null @@ -1,8 +0,0 @@ -# ToolFileChunkType - - -## Values - -| Name | Value | -| ----------- | ----------- | -| `TOOL_FILE` | tool_file | \ No newline at end of file diff --git a/docs/models/toolmessage.md b/docs/models/toolmessage.md index fa00d666..7201481e 100644 --- a/docs/models/toolmessage.md +++ b/docs/models/toolmessage.md @@ -5,7 +5,7 @@ | Field | Type | Required | Description | | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | +| `role` | *Literal["tool"]* | :heavy_check_mark: | N/A | | `content` | [Nullable[models.ToolMessageContent]](../models/toolmessagecontent.md) | :heavy_check_mark: | N/A | | `tool_call_id` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `role` | *Literal["tool"]* | :heavy_check_mark: | N/A | \ No newline at end of file +| `name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/toolreferencechunk.md b/docs/models/toolreferencechunk.md index 3020dbc9..49ea4ca7 100644 --- a/docs/models/toolreferencechunk.md +++ b/docs/models/toolreferencechunk.md @@ -3,11 +3,11 @@ ## Fields -| Field | Type | Required | Description | -| ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ | -| `type` | [Optional[models.ToolReferenceChunkType]](../models/toolreferencechunktype.md) | :heavy_minus_sign: | N/A | -| `tool` | [models.ToolReferenceChunkTool](../models/toolreferencechunktool.md) | :heavy_check_mark: | N/A | -| `title` | *str* | :heavy_check_mark: | N/A | -| `url` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `favicon` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | +| `type` | *Optional[Literal["tool_reference"]]* | :heavy_minus_sign: | N/A | +| `tool` | [models.ToolReferenceChunkTool](../models/toolreferencechunktool.md) | :heavy_check_mark: | N/A | +| `title` | *str* | :heavy_check_mark: | N/A | +| `url` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `favicon` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/toolreferencechunktype.md b/docs/models/toolreferencechunktype.md deleted file mode 100644 index bc57d277..00000000 --- a/docs/models/toolreferencechunktype.md +++ /dev/null @@ -1,8 +0,0 @@ -# ToolReferenceChunkType - - -## Values - -| Name | Value | -| ---------------- | ---------------- | -| `TOOL_REFERENCE` | tool_reference | \ No newline at end of file diff --git a/docs/models/transcriptionsegmentchunk.md b/docs/models/transcriptionsegmentchunk.md index 00a599ee..d7672c0e 100644 --- a/docs/models/transcriptionsegmentchunk.md +++ b/docs/models/transcriptionsegmentchunk.md @@ -3,12 +3,12 @@ ## Fields -| Field | Type | Required | Description | -| -------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------- | -| `text` | *str* | :heavy_check_mark: | N/A | -| `start` | *float* | :heavy_check_mark: | N/A | -| `end` | *float* | :heavy_check_mark: | N/A | -| `score` | *OptionalNullable[float]* | :heavy_minus_sign: | N/A | -| `speaker_id` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `type` | [Optional[models.TranscriptionSegmentChunkType]](../models/transcriptionsegmentchunktype.md) | :heavy_minus_sign: | N/A | -| `__pydantic_extra__` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| -------------------------------------------- | -------------------------------------------- | -------------------------------------------- | -------------------------------------------- | +| `type` | *Optional[Literal["transcription_segment"]]* | :heavy_minus_sign: | N/A | +| `text` | *str* | :heavy_check_mark: | N/A | +| `start` | *float* | :heavy_check_mark: | N/A | +| `end` | *float* | :heavy_check_mark: | N/A | +| `score` | *OptionalNullable[float]* | :heavy_minus_sign: | N/A | +| `speaker_id` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `__pydantic_extra__` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/transcriptionsegmentchunktype.md b/docs/models/transcriptionsegmentchunktype.md deleted file mode 100644 index 2968fa26..00000000 --- a/docs/models/transcriptionsegmentchunktype.md +++ /dev/null @@ -1,8 +0,0 @@ -# TranscriptionSegmentChunkType - - -## Values - -| Name | Value | -| ----------------------- | ----------------------- | -| `TRANSCRIPTION_SEGMENT` | transcription_segment | \ No newline at end of file diff --git a/docs/models/transcriptionstreamsegmentdelta.md b/docs/models/transcriptionstreamsegmentdelta.md index e0143a39..1b652a3b 100644 --- a/docs/models/transcriptionstreamsegmentdelta.md +++ b/docs/models/transcriptionstreamsegmentdelta.md @@ -5,9 +5,9 @@ | Field | Type | Required | Description | | ---------------------------------- | ---------------------------------- | ---------------------------------- | ---------------------------------- | +| `type` | *Literal["transcription.segment"]* | :heavy_check_mark: | N/A | | `text` | *str* | :heavy_check_mark: | N/A | | `start` | *float* | :heavy_check_mark: | N/A | | `end` | *float* | :heavy_check_mark: | N/A | | `speaker_id` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `type` | *Literal["transcription.segment"]* | :heavy_check_mark: | N/A | | `__pydantic_extra__` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/transcriptionstreamtextdelta.md b/docs/models/transcriptionstreamtextdelta.md index a4062171..77bd0ddc 100644 --- a/docs/models/transcriptionstreamtextdelta.md +++ b/docs/models/transcriptionstreamtextdelta.md @@ -5,6 +5,6 @@ | Field | Type | Required | Description | | ------------------------------------- | ------------------------------------- | ------------------------------------- | ------------------------------------- | -| `text` | *str* | :heavy_check_mark: | N/A | | `type` | *Literal["transcription.text.delta"]* | :heavy_check_mark: | N/A | +| `text` | *str* | :heavy_check_mark: | N/A | | `__pydantic_extra__` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/archiveftmodelout.md b/docs/models/unarchivemodelresponse.md similarity index 96% rename from docs/models/archiveftmodelout.md rename to docs/models/unarchivemodelresponse.md index 98fa7b19..375962a7 100644 --- a/docs/models/archiveftmodelout.md +++ b/docs/models/unarchivemodelresponse.md @@ -1,4 +1,4 @@ -# ArchiveFTModelOut +# UnarchiveModelResponse ## Fields diff --git a/docs/models/updateagentrequest.md b/docs/models/updateagentrequest.md index 358cb71d..d3428d92 100644 --- a/docs/models/updateagentrequest.md +++ b/docs/models/updateagentrequest.md @@ -3,7 +3,15 @@ ## Fields -| Field | Type | Required | Description | -| ------------------------------------------------------------ | ------------------------------------------------------------ | ------------------------------------------------------------ | ------------------------------------------------------------ | -| `agent_id` | *str* | :heavy_check_mark: | N/A | -| `agent_update_request` | [models.AgentUpdateRequest](../models/agentupdaterequest.md) | :heavy_check_mark: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| -------------------------------------------------------------------------- | -------------------------------------------------------------------------- | -------------------------------------------------------------------------- | -------------------------------------------------------------------------- | +| `instructions` | *OptionalNullable[str]* | :heavy_minus_sign: | Instruction prompt the model will follow during the conversation. | +| `tools` | List[[models.UpdateAgentRequestTool](../models/updateagentrequesttool.md)] | :heavy_minus_sign: | List of tools which are available to the model during the conversation. | +| `completion_args` | [Optional[models.CompletionArgs]](../models/completionargs.md) | :heavy_minus_sign: | White-listed arguments from the completion API | +| `model` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `handoffs` | List[*str*] | :heavy_minus_sign: | N/A | +| `deployment_chat` | *OptionalNullable[bool]* | :heavy_minus_sign: | N/A | +| `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | +| `version_message` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/agentcreationrequesttool.md b/docs/models/updateagentrequesttool.md similarity index 95% rename from docs/models/agentcreationrequesttool.md rename to docs/models/updateagentrequesttool.md index b3bd7fa3..e358b1ed 100644 --- a/docs/models/agentcreationrequesttool.md +++ b/docs/models/updateagentrequesttool.md @@ -1,4 +1,4 @@ -# AgentCreationRequestTool +# UpdateAgentRequestTool ## Supported Types diff --git a/docs/models/updatedocumentrequest.md b/docs/models/updatedocumentrequest.md index fa5d117a..7e0b41b7 100644 --- a/docs/models/updatedocumentrequest.md +++ b/docs/models/updatedocumentrequest.md @@ -3,8 +3,7 @@ ## Fields -| Field | Type | Required | Description | -| -------------------------------------------------------- | -------------------------------------------------------- | -------------------------------------------------------- | -------------------------------------------------------- | -| `library_id` | *str* | :heavy_check_mark: | N/A | -| `document_id` | *str* | :heavy_check_mark: | N/A | -| `document_update_in` | [models.DocumentUpdateIn](../models/documentupdatein.md) | :heavy_check_mark: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| ------------------------------------------------------- | ------------------------------------------------------- | ------------------------------------------------------- | ------------------------------------------------------- | +| `name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `attributes` | Dict[str, [models.Attributes](../models/attributes.md)] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/updatelibraryrequest.md b/docs/models/updatelibraryrequest.md index e03883cc..aaffc5a9 100644 --- a/docs/models/updatelibraryrequest.md +++ b/docs/models/updatelibraryrequest.md @@ -3,7 +3,7 @@ ## Fields -| Field | Type | Required | Description | -| ------------------------------------------------------ | ------------------------------------------------------ | ------------------------------------------------------ | ------------------------------------------------------ | -| `library_id` | *str* | :heavy_check_mark: | N/A | -| `library_in_update` | [models.LibraryInUpdate](../models/libraryinupdate.md) | :heavy_check_mark: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| ----------------------- | ----------------------- | ----------------------- | ----------------------- | +| `name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/updatemodelrequest.md b/docs/models/updatemodelrequest.md index 5799c63b..56b84c59 100644 --- a/docs/models/updatemodelrequest.md +++ b/docs/models/updatemodelrequest.md @@ -3,7 +3,7 @@ ## Fields -| Field | Type | Required | Description | Example | -| ------------------------------------------------------ | ------------------------------------------------------ | ------------------------------------------------------ | ------------------------------------------------------ | ------------------------------------------------------ | -| `model_id` | *str* | :heavy_check_mark: | The ID of the model to update. | ft:open-mistral-7b:587a6b29:20240514:7e773925 | -| `update_ft_model_in` | [models.UpdateFTModelIn](../models/updateftmodelin.md) | :heavy_check_mark: | N/A | | \ No newline at end of file +| Field | Type | Required | Description | +| ----------------------- | ----------------------- | ----------------------- | ----------------------- | +| `name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/updatemodelresponse.md b/docs/models/updatemodelresponse.md deleted file mode 100644 index 275ee77f..00000000 --- a/docs/models/updatemodelresponse.md +++ /dev/null @@ -1,19 +0,0 @@ -# UpdateModelResponse - -OK - - -## Supported Types - -### `models.ClassifierFTModelOut` - -```python -value: models.ClassifierFTModelOut = /* values here */ -``` - -### `models.CompletionFTModelOut` - -```python -value: models.CompletionFTModelOut = /* values here */ -``` - diff --git a/docs/models/usermessage.md b/docs/models/usermessage.md index 78ed066e..e7a932ed 100644 --- a/docs/models/usermessage.md +++ b/docs/models/usermessage.md @@ -5,5 +5,5 @@ | Field | Type | Required | Description | | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | -| `content` | [Nullable[models.UserMessageContent]](../models/usermessagecontent.md) | :heavy_check_mark: | N/A | -| `role` | *Literal["user"]* | :heavy_check_mark: | N/A | \ No newline at end of file +| `role` | *Literal["user"]* | :heavy_check_mark: | N/A | +| `content` | [Nullable[models.UserMessageContent]](../models/usermessagecontent.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/wandbintegrationout.md b/docs/models/wandbintegrationresult.md similarity index 98% rename from docs/models/wandbintegrationout.md rename to docs/models/wandbintegrationresult.md index a6f65667..d12bc311 100644 --- a/docs/models/wandbintegrationout.md +++ b/docs/models/wandbintegrationresult.md @@ -1,4 +1,4 @@ -# WandbIntegrationOut +# WandbIntegrationResult ## Fields diff --git a/docs/models/websearchpremiumtool.md b/docs/models/websearchpremiumtool.md index 07b8b926..78b736cd 100644 --- a/docs/models/websearchpremiumtool.md +++ b/docs/models/websearchpremiumtool.md @@ -3,6 +3,7 @@ ## Fields -| Field | Type | Required | Description | -| ------------------------------- | ------------------------------- | ------------------------------- | ------------------------------- | -| `type` | *Literal["web_search_premium"]* | :heavy_check_mark: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | +| `tool_configuration` | [OptionalNullable[models.ToolConfiguration]](../models/toolconfiguration.md) | :heavy_minus_sign: | N/A | +| `type` | *Literal["web_search_premium"]* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/websearchtool.md b/docs/models/websearchtool.md index da5e7b7b..4ca7333c 100644 --- a/docs/models/websearchtool.md +++ b/docs/models/websearchtool.md @@ -3,6 +3,7 @@ ## Fields -| Field | Type | Required | Description | -| ----------------------- | ----------------------- | ----------------------- | ----------------------- | -| `type` | *Literal["web_search"]* | :heavy_check_mark: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | +| `tool_configuration` | [OptionalNullable[models.ToolConfiguration]](../models/toolconfiguration.md) | :heavy_minus_sign: | N/A | +| `type` | *Literal["web_search"]* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/sdks/accesses/README.md b/docs/sdks/accesses/README.md index c1e3866d..c50456df 100644 --- a/docs/sdks/accesses/README.md +++ b/docs/sdks/accesses/README.md @@ -16,7 +16,7 @@ Given a library, list all of the Entity that have access and to what level. ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -26,7 +26,7 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.beta.libraries.accesses.list(library_id="9eb628ef-f118-47eb-b3cc-9750c4ca5fb6") + res = mistral.beta.libraries.accesses.list(library_id="d2169833-d8e2-416e-a372-76518d3d99c2") # Handle response print(res) @@ -48,8 +48,8 @@ with Mistral( | Error Type | Status Code | Content Type | | -------------------------- | -------------------------- | -------------------------- | -| models.HTTPValidationError | 422 | application/json | -| models.SDKError | 4XX, 5XX | \*/\* | +| errors.HTTPValidationError | 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | ## update_or_create @@ -57,7 +57,7 @@ Given a library id, you can create or update the access level of an entity. You ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -67,7 +67,7 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.beta.libraries.accesses.update_or_create(library_id="88bb030c-1cb5-4231-ba13-742c56554876", level="Viewer", share_with_uuid="6a736283-c1fa-49b0-9b6d-ea9309c0a766", share_with_type="Workspace") + res = mistral.beta.libraries.accesses.update_or_create(library_id="36de3a24-5b1c-4c8f-9d84-d5642205a976", level="Viewer", share_with_uuid="0ae92ecb-21ed-47c5-9f7e-0b2cbe325a20", share_with_type="User") # Handle response print(res) @@ -93,8 +93,8 @@ with Mistral( | Error Type | Status Code | Content Type | | -------------------------- | -------------------------- | -------------------------- | -| models.HTTPValidationError | 422 | application/json | -| models.SDKError | 4XX, 5XX | \*/\* | +| errors.HTTPValidationError | 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | ## delete @@ -102,7 +102,7 @@ Given a library id, you can delete the access level of an entity. An owner canno ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -112,7 +112,7 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.beta.libraries.accesses.delete(library_id="fc7ab1cf-e33c-4791-a6e0-95ff1f921c43", share_with_uuid="5818ddff-3568-40f1-a9e4-39d6cb9f5c94", share_with_type="Org") + res = mistral.beta.libraries.accesses.delete(library_id="709e3cad-9fb2-4f4e-bf88-143cf1808107", share_with_uuid="b843cc47-ce8f-4354-8cfc-5fcd7fb2865b", share_with_type="User") # Handle response print(res) @@ -137,5 +137,5 @@ with Mistral( | Error Type | Status Code | Content Type | | -------------------------- | -------------------------- | -------------------------- | -| models.HTTPValidationError | 422 | application/json | -| models.SDKError | 4XX, 5XX | \*/\* | \ No newline at end of file +| errors.HTTPValidationError | 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | \ No newline at end of file diff --git a/docs/sdks/agents/README.md b/docs/sdks/agents/README.md index cd3ec4c6..8a608370 100644 --- a/docs/sdks/agents/README.md +++ b/docs/sdks/agents/README.md @@ -15,7 +15,7 @@ Agents Completion ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -27,8 +27,8 @@ with Mistral( res = mistral.agents.complete(messages=[ { - "content": "Who is the best French painter? Answer in one short sentence.", "role": "user", + "content": "Who is the best French painter? Answer in one short sentence.", }, ], agent_id="", stream=False, response_format={ "type": "text", @@ -50,7 +50,7 @@ with Mistral( | `stop` | [Optional[models.AgentsCompletionRequestStop]](../../models/agentscompletionrequeststop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | | `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | | `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | | -| `response_format` | [Optional[models.ResponseFormat]](../../models/responseformat.md) | :heavy_minus_sign: | Specify the format that the model must output. By default it will use `{ "type": "text" }`. Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ "type": "json_schema" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide. | {
"type": "text"
} | +| `response_format` | [Optional[models.ResponseFormat]](../../models/responseformat.md) | :heavy_minus_sign: | Specify the format that the model must output. By default it will use `{ "type": "text" }`. Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ "type": "json_schema" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide. | **Example 1:** {
"type": "text"
}
**Example 2:** {
"type": "json_object"
}
**Example 3:** {
"type": "json_schema",
"json_schema": {
"schema": {
"properties": {
"name": {
"title": "Name",
"type": "string"
},
"authors": {
"items": {
"type": "string"
},
"title": "Authors",
"type": "array"
}
},
"required": [
"name",
"authors"
],
"title": "Book",
"type": "object",
"additionalProperties": false
},
"name": "book",
"strict": true
}
} | | `tools` | List[[models.Tool](../../models/tool.md)] | :heavy_minus_sign: | N/A | | | `tool_choice` | [Optional[models.AgentsCompletionRequestToolChoice]](../../models/agentscompletionrequesttoolchoice.md) | :heavy_minus_sign: | N/A | | | `presence_penalty` | *Optional[float]* | :heavy_minus_sign: | The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. | | @@ -69,8 +69,8 @@ with Mistral( | Error Type | Status Code | Content Type | | -------------------------- | -------------------------- | -------------------------- | -| models.HTTPValidationError | 422 | application/json | -| models.SDKError | 4XX, 5XX | \*/\* | +| errors.HTTPValidationError | 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | ## stream @@ -90,8 +90,8 @@ with Mistral( res = mistral.agents.stream(messages=[ { - "content": "Who is the best French painter? Answer in one short sentence.", "role": "user", + "content": "Who is the best French painter? Answer in one short sentence.", }, ], agent_id="", stream=True, response_format={ "type": "text", @@ -115,7 +115,7 @@ with Mistral( | `stop` | [Optional[models.AgentsCompletionStreamRequestStop]](../../models/agentscompletionstreamrequeststop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | | `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | | `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | | -| `response_format` | [Optional[models.ResponseFormat]](../../models/responseformat.md) | :heavy_minus_sign: | Specify the format that the model must output. By default it will use `{ "type": "text" }`. Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ "type": "json_schema" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide. | {
"type": "text"
} | +| `response_format` | [Optional[models.ResponseFormat]](../../models/responseformat.md) | :heavy_minus_sign: | Specify the format that the model must output. By default it will use `{ "type": "text" }`. Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ "type": "json_schema" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide. | **Example 1:** {
"type": "text"
}
**Example 2:** {
"type": "json_object"
}
**Example 3:** {
"type": "json_schema",
"json_schema": {
"schema": {
"properties": {
"name": {
"title": "Name",
"type": "string"
},
"authors": {
"items": {
"type": "string"
},
"title": "Authors",
"type": "array"
}
},
"required": [
"name",
"authors"
],
"title": "Book",
"type": "object",
"additionalProperties": false
},
"name": "book",
"strict": true
}
} | | `tools` | List[[models.Tool](../../models/tool.md)] | :heavy_minus_sign: | N/A | | | `tool_choice` | [Optional[models.AgentsCompletionStreamRequestToolChoice]](../../models/agentscompletionstreamrequesttoolchoice.md) | :heavy_minus_sign: | N/A | | | `presence_penalty` | *Optional[float]* | :heavy_minus_sign: | The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. | | @@ -134,5 +134,5 @@ with Mistral( | Error Type | Status Code | Content Type | | -------------------------- | -------------------------- | -------------------------- | -| models.HTTPValidationError | 422 | application/json | -| models.SDKError | 4XX, 5XX | \*/\* | \ No newline at end of file +| errors.HTTPValidationError | 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | \ No newline at end of file diff --git a/docs/sdks/batchjobs/README.md b/docs/sdks/batchjobs/README.md index 24316d78..3633fe4e 100644 --- a/docs/sdks/batchjobs/README.md +++ b/docs/sdks/batchjobs/README.md @@ -15,7 +15,7 @@ Get a list of batch jobs for your organization and user. ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -49,13 +49,13 @@ with Mistral( ### Response -**[models.BatchJobsOut](../../models/batchjobsout.md)** +**[models.ListBatchJobsResponse](../../models/listbatchjobsresponse.md)** ### Errors | Error Type | Status Code | Content Type | | --------------- | --------------- | --------------- | -| models.SDKError | 4XX, 5XX | \*/\* | +| errors.SDKError | 4XX, 5XX | \*/\* | ## create @@ -63,7 +63,7 @@ Create a new batch job, it will be queued for processing. ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -73,7 +73,7 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.batch.jobs.create(endpoint="/v1/classifications", model="mistral-small-latest", timeout_hours=24) + res = mistral.batch.jobs.create(endpoint="/v1/moderations", model="mistral-small-latest", timeout_hours=24) # Handle response print(res) @@ -87,7 +87,7 @@ with Mistral( | `endpoint` | [models.APIEndpoint](../../models/apiendpoint.md) | :heavy_check_mark: | N/A | | | `input_files` | List[*str*] | :heavy_minus_sign: | The list of input files to be used for batch inference, these files should be `jsonl` files, containing the input data corresponding to the bory request for the batch inference in a "body" field. An example of such file is the following: ```json {"custom_id": "0", "body": {"max_tokens": 100, "messages": [{"role": "user", "content": "What is the best French cheese?"}]}} {"custom_id": "1", "body": {"max_tokens": 100, "messages": [{"role": "user", "content": "What is the best French wine?"}]}} ``` | | | `requests` | List[[models.BatchRequest](../../models/batchrequest.md)] | :heavy_minus_sign: | N/A | | -| `model` | *OptionalNullable[str]* | :heavy_minus_sign: | The model to be used for batch inference. | mistral-small-latest | +| `model` | *OptionalNullable[str]* | :heavy_minus_sign: | The model to be used for batch inference. | **Example 1:** mistral-small-latest
**Example 2:** mistral-medium-latest | | `agent_id` | *OptionalNullable[str]* | :heavy_minus_sign: | In case you want to use a specific agent from the **deprecated** agents api for batch inference, you can specify the agent ID here. | | | `metadata` | Dict[str, *str*] | :heavy_minus_sign: | The metadata of your choice to be associated with the batch inference job. | | | `timeout_hours` | *Optional[int]* | :heavy_minus_sign: | The timeout in hours for the batch inference job. | | @@ -95,13 +95,13 @@ with Mistral( ### Response -**[models.BatchJobOut](../../models/batchjobout.md)** +**[models.BatchJob](../../models/batchjob.md)** ### Errors | Error Type | Status Code | Content Type | | --------------- | --------------- | --------------- | -| models.SDKError | 4XX, 5XX | \*/\* | +| errors.SDKError | 4XX, 5XX | \*/\* | ## get @@ -112,7 +112,7 @@ Args: ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -122,7 +122,7 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.batch.jobs.get(job_id="358c80a1-79bd-43f0-8f0e-8186713aa3ba") + res = mistral.batch.jobs.get(job_id="4017dc9f-b629-42f4-9700-8c681b9e7f0f") # Handle response print(res) @@ -139,13 +139,13 @@ with Mistral( ### Response -**[models.BatchJobOut](../../models/batchjobout.md)** +**[models.BatchJob](../../models/batchjob.md)** ### Errors | Error Type | Status Code | Content Type | | --------------- | --------------- | --------------- | -| models.SDKError | 4XX, 5XX | \*/\* | +| errors.SDKError | 4XX, 5XX | \*/\* | ## cancel @@ -153,7 +153,7 @@ Request the cancellation of a batch job. ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -163,7 +163,7 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.batch.jobs.cancel(job_id="393537d7-8b33-4931-a289-7f61f8757eda") + res = mistral.batch.jobs.cancel(job_id="4fb29d1c-535b-4f0a-a1cb-2167f86da569") # Handle response print(res) @@ -179,10 +179,10 @@ with Mistral( ### Response -**[models.BatchJobOut](../../models/batchjobout.md)** +**[models.BatchJob](../../models/batchjob.md)** ### Errors | Error Type | Status Code | Content Type | | --------------- | --------------- | --------------- | -| models.SDKError | 4XX, 5XX | \*/\* | \ No newline at end of file +| errors.SDKError | 4XX, 5XX | \*/\* | \ No newline at end of file diff --git a/docs/sdks/betaagents/README.md b/docs/sdks/betaagents/README.md index 0ef655a3..aaa5110e 100644 --- a/docs/sdks/betaagents/README.md +++ b/docs/sdks/betaagents/README.md @@ -24,7 +24,7 @@ Create a new agent giving it instructions, tools, description. The agent is then ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -34,7 +34,7 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.beta.agents.create(model="Mustang", name="", completion_args={ + res = mistral.beta.agents.create(model="LeBaron", name="", completion_args={ "response_format": { "type": "text", }, @@ -47,18 +47,18 @@ with Mistral( ### Parameters -| Parameter | Type | Required | Description | -| --------------------------------------------------------------------------------- | --------------------------------------------------------------------------------- | --------------------------------------------------------------------------------- | --------------------------------------------------------------------------------- | -| `model` | *str* | :heavy_check_mark: | N/A | -| `name` | *str* | :heavy_check_mark: | N/A | -| `instructions` | *OptionalNullable[str]* | :heavy_minus_sign: | Instruction prompt the model will follow during the conversation. | -| `tools` | List[[models.AgentCreationRequestTool](../../models/agentcreationrequesttool.md)] | :heavy_minus_sign: | List of tools which are available to the model during the conversation. | -| `completion_args` | [Optional[models.CompletionArgs]](../../models/completionargs.md) | :heavy_minus_sign: | White-listed arguments from the completion API | -| `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `handoffs` | List[*str*] | :heavy_minus_sign: | N/A | -| `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | -| `version_message` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | -| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | +| Parameter | Type | Required | Description | +| ----------------------------------------------------------------------------- | ----------------------------------------------------------------------------- | ----------------------------------------------------------------------------- | ----------------------------------------------------------------------------- | +| `model` | *str* | :heavy_check_mark: | N/A | +| `name` | *str* | :heavy_check_mark: | N/A | +| `instructions` | *OptionalNullable[str]* | :heavy_minus_sign: | Instruction prompt the model will follow during the conversation. | +| `tools` | List[[models.CreateAgentRequestTool](../../models/createagentrequesttool.md)] | :heavy_minus_sign: | List of tools which are available to the model during the conversation. | +| `completion_args` | [Optional[models.CompletionArgs]](../../models/completionargs.md) | :heavy_minus_sign: | White-listed arguments from the completion API | +| `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `handoffs` | List[*str*] | :heavy_minus_sign: | N/A | +| `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | +| `version_message` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | ### Response @@ -68,8 +68,8 @@ with Mistral( | Error Type | Status Code | Content Type | | -------------------------- | -------------------------- | -------------------------- | -| models.HTTPValidationError | 422 | application/json | -| models.SDKError | 4XX, 5XX | \*/\* | +| errors.HTTPValidationError | 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | ## list @@ -77,7 +77,7 @@ Retrieve a list of agent entities sorted by creation time. ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -116,8 +116,8 @@ with Mistral( | Error Type | Status Code | Content Type | | -------------------------- | -------------------------- | -------------------------- | -| models.HTTPValidationError | 422 | application/json | -| models.SDKError | 4XX, 5XX | \*/\* | +| errors.HTTPValidationError | 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | ## get @@ -125,7 +125,7 @@ Given an agent, retrieve an agent entity with its attributes. The agent_version ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -144,11 +144,11 @@ with Mistral( ### Parameters -| Parameter | Type | Required | Description | -| ------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------- | -| `agent_id` | *str* | :heavy_check_mark: | N/A | -| `agent_version` | [OptionalNullable[models.GetAgentAgentVersion]](../../models/getagentagentversion.md) | :heavy_minus_sign: | N/A | -| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | +| Parameter | Type | Required | Description | +| ------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------- | +| `agent_id` | *str* | :heavy_check_mark: | N/A | +| `agent_version` | [OptionalNullable[models.AgentsAPIV1AgentsGetAgentVersion]](../../models/agentsapiv1agentsgetagentversion.md) | :heavy_minus_sign: | N/A | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | ### Response @@ -158,8 +158,8 @@ with Mistral( | Error Type | Status Code | Content Type | | -------------------------- | -------------------------- | -------------------------- | -| models.HTTPValidationError | 422 | application/json | -| models.SDKError | 4XX, 5XX | \*/\* | +| errors.HTTPValidationError | 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | ## update @@ -167,7 +167,7 @@ Update an agent attributes and create a new version. ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -194,7 +194,7 @@ with Mistral( | ----------------------------------------------------------------------------- | ----------------------------------------------------------------------------- | ----------------------------------------------------------------------------- | ----------------------------------------------------------------------------- | | `agent_id` | *str* | :heavy_check_mark: | N/A | | `instructions` | *OptionalNullable[str]* | :heavy_minus_sign: | Instruction prompt the model will follow during the conversation. | -| `tools` | List[[models.AgentUpdateRequestTool](../../models/agentupdaterequesttool.md)] | :heavy_minus_sign: | List of tools which are available to the model during the conversation. | +| `tools` | List[[models.UpdateAgentRequestTool](../../models/updateagentrequesttool.md)] | :heavy_minus_sign: | List of tools which are available to the model during the conversation. | | `completion_args` | [Optional[models.CompletionArgs]](../../models/completionargs.md) | :heavy_minus_sign: | White-listed arguments from the completion API | | `model` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | | `name` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | @@ -213,8 +213,8 @@ with Mistral( | Error Type | Status Code | Content Type | | -------------------------- | -------------------------- | -------------------------- | -| models.HTTPValidationError | 422 | application/json | -| models.SDKError | 4XX, 5XX | \*/\* | +| errors.HTTPValidationError | 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | ## delete @@ -222,7 +222,7 @@ Delete an agent entity. ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -249,8 +249,8 @@ with Mistral( | Error Type | Status Code | Content Type | | -------------------------- | -------------------------- | -------------------------- | -| models.HTTPValidationError | 422 | application/json | -| models.SDKError | 4XX, 5XX | \*/\* | +| errors.HTTPValidationError | 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | ## update_version @@ -258,7 +258,7 @@ Switch the version of an agent. ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -268,7 +268,7 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.beta.agents.update_version(agent_id="", version=958693) + res = mistral.beta.agents.update_version(agent_id="", version=157995) # Handle response print(res) @@ -291,8 +291,8 @@ with Mistral( | Error Type | Status Code | Content Type | | -------------------------- | -------------------------- | -------------------------- | -| models.HTTPValidationError | 422 | application/json | -| models.SDKError | 4XX, 5XX | \*/\* | +| errors.HTTPValidationError | 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | ## list_versions @@ -300,7 +300,7 @@ Retrieve all versions for a specific agent with full agent context. Supports pag ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -334,8 +334,8 @@ with Mistral( | Error Type | Status Code | Content Type | | -------------------------- | -------------------------- | -------------------------- | -| models.HTTPValidationError | 422 | application/json | -| models.SDKError | 4XX, 5XX | \*/\* | +| errors.HTTPValidationError | 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | ## get_version @@ -343,7 +343,7 @@ Get a specific agent version by version number. ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -353,7 +353,7 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.beta.agents.get_version(agent_id="", version="") + res = mistral.beta.agents.get_version(agent_id="", version="788393") # Handle response print(res) @@ -376,8 +376,8 @@ with Mistral( | Error Type | Status Code | Content Type | | -------------------------- | -------------------------- | -------------------------- | -| models.HTTPValidationError | 422 | application/json | -| models.SDKError | 4XX, 5XX | \*/\* | +| errors.HTTPValidationError | 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | ## create_version_alias @@ -385,7 +385,7 @@ Create a new alias or update an existing alias to point to a specific version. A ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -395,7 +395,7 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.beta.agents.create_version_alias(agent_id="", alias="", version=154719) + res = mistral.beta.agents.create_version_alias(agent_id="", alias="", version=595141) # Handle response print(res) @@ -419,8 +419,8 @@ with Mistral( | Error Type | Status Code | Content Type | | -------------------------- | -------------------------- | -------------------------- | -| models.HTTPValidationError | 422 | application/json | -| models.SDKError | 4XX, 5XX | \*/\* | +| errors.HTTPValidationError | 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | ## list_version_aliases @@ -428,7 +428,7 @@ Retrieve all version aliases for a specific agent. ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -460,8 +460,8 @@ with Mistral( | Error Type | Status Code | Content Type | | -------------------------- | -------------------------- | -------------------------- | -| models.HTTPValidationError | 422 | application/json | -| models.SDKError | 4XX, 5XX | \*/\* | +| errors.HTTPValidationError | 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | ## delete_version_alias @@ -469,7 +469,7 @@ Delete an existing alias for an agent. ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -497,5 +497,5 @@ with Mistral( | Error Type | Status Code | Content Type | | -------------------------- | -------------------------- | -------------------------- | -| models.HTTPValidationError | 422 | application/json | -| models.SDKError | 4XX, 5XX | \*/\* | \ No newline at end of file +| errors.HTTPValidationError | 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | \ No newline at end of file diff --git a/docs/sdks/chat/README.md b/docs/sdks/chat/README.md index 6907c29d..1bf4aead 100644 --- a/docs/sdks/chat/README.md +++ b/docs/sdks/chat/README.md @@ -27,8 +27,8 @@ with Mistral( res = mistral.chat.complete(model="mistral-large-latest", messages=[ { - "content": "Who is the best French painter? Answer in one short sentence.", "role": "user", + "content": "Who is the best French painter? Answer in one short sentence.", }, ], stream=False, response_format={ "type": "text", @@ -52,7 +52,7 @@ with Mistral( | `stop` | [Optional[models.ChatCompletionRequestStop]](../../models/chatcompletionrequeststop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | | `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | | `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | | -| `response_format` | [Optional[models.ResponseFormat]](../../models/responseformat.md) | :heavy_minus_sign: | Specify the format that the model must output. By default it will use `{ "type": "text" }`. Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ "type": "json_schema" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide. | {
"type": "text"
} | +| `response_format` | [Optional[models.ResponseFormat]](../../models/responseformat.md) | :heavy_minus_sign: | Specify the format that the model must output. By default it will use `{ "type": "text" }`. Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ "type": "json_schema" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide. | **Example 1:** {
"type": "text"
}
**Example 2:** {
"type": "json_object"
}
**Example 3:** {
"type": "json_schema",
"json_schema": {
"schema": {
"properties": {
"name": {
"title": "Name",
"type": "string"
},
"authors": {
"items": {
"type": "string"
},
"title": "Authors",
"type": "array"
}
},
"required": [
"name",
"authors"
],
"title": "Book",
"type": "object",
"additionalProperties": false
},
"name": "book",
"strict": true
}
} | | `tools` | List[[models.Tool](../../models/tool.md)] | :heavy_minus_sign: | A list of tools the model may call. Use this to provide a list of functions the model may generate JSON inputs for. | | | `tool_choice` | [Optional[models.ChatCompletionRequestToolChoice]](../../models/chatcompletionrequesttoolchoice.md) | :heavy_minus_sign: | Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{"type": "function", "function": {"name": "my_function"}}` forces the model to call that tool. | | | `presence_penalty` | *Optional[float]* | :heavy_minus_sign: | The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. | | @@ -72,8 +72,8 @@ with Mistral( | Error Type | Status Code | Content Type | | -------------------------- | -------------------------- | -------------------------- | -| models.HTTPValidationError | 422 | application/json | -| models.SDKError | 4XX, 5XX | \*/\* | +| errors.HTTPValidationError | 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | ## stream @@ -93,8 +93,8 @@ with Mistral( res = mistral.chat.stream(model="mistral-large-latest", messages=[ { - "content": "Who is the best French painter? Answer in one short sentence.", "role": "user", + "content": "Who is the best French painter? Answer in one short sentence.", }, ], stream=True, response_format={ "type": "text", @@ -120,7 +120,7 @@ with Mistral( | `stop` | [Optional[models.ChatCompletionStreamRequestStop]](../../models/chatcompletionstreamrequeststop.md) | :heavy_minus_sign: | Stop generation if this token is detected. Or if one of these tokens is detected when providing an array | | | `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | | `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | | -| `response_format` | [Optional[models.ResponseFormat]](../../models/responseformat.md) | :heavy_minus_sign: | Specify the format that the model must output. By default it will use `{ "type": "text" }`. Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ "type": "json_schema" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide. | {
"type": "text"
} | +| `response_format` | [Optional[models.ResponseFormat]](../../models/responseformat.md) | :heavy_minus_sign: | Specify the format that the model must output. By default it will use `{ "type": "text" }`. Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ "type": "json_schema" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide. | **Example 1:** {
"type": "text"
}
**Example 2:** {
"type": "json_object"
}
**Example 3:** {
"type": "json_schema",
"json_schema": {
"schema": {
"properties": {
"name": {
"title": "Name",
"type": "string"
},
"authors": {
"items": {
"type": "string"
},
"title": "Authors",
"type": "array"
}
},
"required": [
"name",
"authors"
],
"title": "Book",
"type": "object",
"additionalProperties": false
},
"name": "book",
"strict": true
}
} | | `tools` | List[[models.Tool](../../models/tool.md)] | :heavy_minus_sign: | A list of tools the model may call. Use this to provide a list of functions the model may generate JSON inputs for. | | | `tool_choice` | [Optional[models.ChatCompletionStreamRequestToolChoice]](../../models/chatcompletionstreamrequesttoolchoice.md) | :heavy_minus_sign: | Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{"type": "function", "function": {"name": "my_function"}}` forces the model to call that tool. | | | `presence_penalty` | *Optional[float]* | :heavy_minus_sign: | The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. | | @@ -140,5 +140,5 @@ with Mistral( | Error Type | Status Code | Content Type | | -------------------------- | -------------------------- | -------------------------- | -| models.HTTPValidationError | 422 | application/json | -| models.SDKError | 4XX, 5XX | \*/\* | \ No newline at end of file +| errors.HTTPValidationError | 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | \ No newline at end of file diff --git a/docs/sdks/classifiers/README.md b/docs/sdks/classifiers/README.md index 41b52081..dc0f4984 100644 --- a/docs/sdks/classifiers/README.md +++ b/docs/sdks/classifiers/README.md @@ -17,7 +17,7 @@ Moderations ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -27,10 +27,7 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.classifiers.moderate(model="Durango", inputs=[ - "", - "", - ]) + res = mistral.classifiers.moderate(model="mistral-moderation-latest", inputs="") # Handle response print(res) @@ -54,8 +51,8 @@ with Mistral( | Error Type | Status Code | Content Type | | -------------------------- | -------------------------- | -------------------------- | -| models.HTTPValidationError | 422 | application/json | -| models.SDKError | 4XX, 5XX | \*/\* | +| errors.HTTPValidationError | 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | ## moderate_chat @@ -63,7 +60,7 @@ Chat Moderations ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -75,8 +72,8 @@ with Mistral( res = mistral.classifiers.moderate_chat(inputs=[ { - "content": "", "role": "tool", + "content": "", }, ], model="LeBaron") @@ -101,8 +98,8 @@ with Mistral( | Error Type | Status Code | Content Type | | -------------------------- | -------------------------- | -------------------------- | -| models.HTTPValidationError | 422 | application/json | -| models.SDKError | 4XX, 5XX | \*/\* | +| errors.HTTPValidationError | 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | ## classify @@ -146,8 +143,8 @@ with Mistral( | Error Type | Status Code | Content Type | | -------------------------- | -------------------------- | -------------------------- | -| models.HTTPValidationError | 422 | application/json | -| models.SDKError | 4XX, 5XX | \*/\* | +| errors.HTTPValidationError | 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | ## classify_chat @@ -165,12 +162,12 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.classifiers.classify_chat(model="Camry", inputs=[ + res = mistral.classifiers.classify_chat(model="Camry", input=[ { "messages": [ { - "content": "", "role": "system", + "content": "", }, ], }, @@ -186,7 +183,7 @@ with Mistral( | Parameter | Type | Required | Description | | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | | `model` | *str* | :heavy_check_mark: | N/A | -| `inputs` | [models.Inputs](../../models/inputs.md) | :heavy_check_mark: | Chat to classify | +| `input` | [models.Inputs](../../models/inputs.md) | :heavy_check_mark: | Chat to classify | | `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | ### Response @@ -197,5 +194,5 @@ with Mistral( | Error Type | Status Code | Content Type | | -------------------------- | -------------------------- | -------------------------- | -| models.HTTPValidationError | 422 | application/json | -| models.SDKError | 4XX, 5XX | \*/\* | \ No newline at end of file +| errors.HTTPValidationError | 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | \ No newline at end of file diff --git a/docs/sdks/conversations/README.md b/docs/sdks/conversations/README.md index c0089f12..e77d329b 100644 --- a/docs/sdks/conversations/README.md +++ b/docs/sdks/conversations/README.md @@ -24,7 +24,7 @@ Create a new conversation, using a base model or an agent and append entries. Co ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -72,8 +72,8 @@ with Mistral( | Error Type | Status Code | Content Type | | -------------------------- | -------------------------- | -------------------------- | -| models.HTTPValidationError | 422 | application/json | -| models.SDKError | 4XX, 5XX | \*/\* | +| errors.HTTPValidationError | 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | ## list @@ -81,7 +81,7 @@ Retrieve a list of conversation entities sorted by creation time. ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -109,14 +109,14 @@ with Mistral( ### Response -**[List[models.ListConversationsResponse]](../../models/.md)** +**[List[models.AgentsAPIV1ConversationsListResponse]](../../models/.md)** ### Errors | Error Type | Status Code | Content Type | | -------------------------- | -------------------------- | -------------------------- | -| models.HTTPValidationError | 422 | application/json | -| models.SDKError | 4XX, 5XX | \*/\* | +| errors.HTTPValidationError | 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | ## get @@ -124,7 +124,7 @@ Given a conversation_id retrieve a conversation entity with its attributes. ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -156,8 +156,8 @@ with Mistral( | Error Type | Status Code | Content Type | | -------------------------- | -------------------------- | -------------------------- | -| models.HTTPValidationError | 422 | application/json | -| models.SDKError | 4XX, 5XX | \*/\* | +| errors.HTTPValidationError | 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | ## delete @@ -165,7 +165,7 @@ Delete a conversation given a conversation_id. ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -192,8 +192,8 @@ with Mistral( | Error Type | Status Code | Content Type | | -------------------------- | -------------------------- | -------------------------- | -| models.HTTPValidationError | 422 | application/json | -| models.SDKError | 4XX, 5XX | \*/\* | +| errors.HTTPValidationError | 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | ## append @@ -201,7 +201,7 @@ Run completion on the history of the conversation and the user entries. Return t ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -211,7 +211,7 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.beta.conversations.append(conversation_id="", inputs=[], stream=False, store=True, handoff_execution="server", completion_args={ + res = mistral.beta.conversations.append(conversation_id="", stream=False, store=True, handoff_execution="server", completion_args={ "response_format": { "type": "text", }, @@ -227,11 +227,12 @@ with Mistral( | Parameter | Type | Required | Description | | ----------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------- | | `conversation_id` | *str* | :heavy_check_mark: | ID of the conversation to which we append entries. | -| `inputs` | [models.ConversationInputs](../../models/conversationinputs.md) | :heavy_check_mark: | N/A | +| `inputs` | [Optional[models.ConversationInputs]](../../models/conversationinputs.md) | :heavy_minus_sign: | N/A | | `stream` | *Optional[bool]* | :heavy_minus_sign: | N/A | | `store` | *Optional[bool]* | :heavy_minus_sign: | Whether to store the results into our servers or not. | | `handoff_execution` | [Optional[models.ConversationAppendRequestHandoffExecution]](../../models/conversationappendrequesthandoffexecution.md) | :heavy_minus_sign: | N/A | | `completion_args` | [Optional[models.CompletionArgs]](../../models/completionargs.md) | :heavy_minus_sign: | White-listed arguments from the completion API | +| `tool_confirmations` | List[[models.ToolCallConfirmation](../../models/toolcallconfirmation.md)] | :heavy_minus_sign: | N/A | | `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | ### Response @@ -242,8 +243,8 @@ with Mistral( | Error Type | Status Code | Content Type | | -------------------------- | -------------------------- | -------------------------- | -| models.HTTPValidationError | 422 | application/json | -| models.SDKError | 4XX, 5XX | \*/\* | +| errors.HTTPValidationError | 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | ## get_history @@ -251,7 +252,7 @@ Given a conversation_id retrieve all the entries belonging to that conversation. ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -283,8 +284,8 @@ with Mistral( | Error Type | Status Code | Content Type | | -------------------------- | -------------------------- | -------------------------- | -| models.HTTPValidationError | 422 | application/json | -| models.SDKError | 4XX, 5XX | \*/\* | +| errors.HTTPValidationError | 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | ## get_messages @@ -292,7 +293,7 @@ Given a conversation_id retrieve all the messages belonging to that conversation ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -324,8 +325,8 @@ with Mistral( | Error Type | Status Code | Content Type | | -------------------------- | -------------------------- | -------------------------- | -| models.HTTPValidationError | 422 | application/json | -| models.SDKError | 4XX, 5XX | \*/\* | +| errors.HTTPValidationError | 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | ## restart @@ -333,7 +334,7 @@ Given a conversation_id and an id, recreate a conversation from this point and r ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -343,7 +344,7 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.beta.conversations.restart(conversation_id="", inputs="", from_entry_id="", stream=False, store=True, handoff_execution="server", completion_args={ + res = mistral.beta.conversations.restart(conversation_id="", from_entry_id="", stream=False, store=True, handoff_execution="server", completion_args={ "response_format": { "type": "text", }, @@ -359,8 +360,8 @@ with Mistral( | Parameter | Type | Required | Description | | ------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------- | | `conversation_id` | *str* | :heavy_check_mark: | ID of the original conversation which is being restarted. | -| `inputs` | [models.ConversationInputs](../../models/conversationinputs.md) | :heavy_check_mark: | N/A | | `from_entry_id` | *str* | :heavy_check_mark: | N/A | +| `inputs` | [Optional[models.ConversationInputs]](../../models/conversationinputs.md) | :heavy_minus_sign: | N/A | | `stream` | *Optional[bool]* | :heavy_minus_sign: | N/A | | `store` | *Optional[bool]* | :heavy_minus_sign: | Whether to store the results into our servers or not. | | `handoff_execution` | [Optional[models.ConversationRestartRequestHandoffExecution]](../../models/conversationrestartrequesthandoffexecution.md) | :heavy_minus_sign: | N/A | @@ -377,8 +378,8 @@ with Mistral( | Error Type | Status Code | Content Type | | -------------------------- | -------------------------- | -------------------------- | -| models.HTTPValidationError | 422 | application/json | -| models.SDKError | 4XX, 5XX | \*/\* | +| errors.HTTPValidationError | 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | ## start_stream @@ -386,7 +387,7 @@ Create a new conversation, using a base model or an agent and append entries. Co ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -396,7 +397,14 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.beta.conversations.start_stream(inputs="", stream=True, completion_args={ + res = mistral.beta.conversations.start_stream(inputs=[ + { + "object": "entry", + "type": "function.result", + "tool_call_id": "", + "result": "", + }, + ], stream=True, completion_args={ "response_format": { "type": "text", }, @@ -436,8 +444,8 @@ with Mistral( | Error Type | Status Code | Content Type | | -------------------------- | -------------------------- | -------------------------- | -| models.HTTPValidationError | 422 | application/json | -| models.SDKError | 4XX, 5XX | \*/\* | +| errors.HTTPValidationError | 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | ## append_stream @@ -445,7 +453,7 @@ Run completion on the history of the conversation and the user entries. Return t ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -455,7 +463,7 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.beta.conversations.append_stream(conversation_id="", inputs="", stream=True, store=True, handoff_execution="server", completion_args={ + res = mistral.beta.conversations.append_stream(conversation_id="", stream=True, store=True, handoff_execution="server", completion_args={ "response_format": { "type": "text", }, @@ -473,11 +481,12 @@ with Mistral( | Parameter | Type | Required | Description | | ----------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------- | | `conversation_id` | *str* | :heavy_check_mark: | ID of the conversation to which we append entries. | -| `inputs` | [models.ConversationInputs](../../models/conversationinputs.md) | :heavy_check_mark: | N/A | +| `inputs` | [Optional[models.ConversationInputs]](../../models/conversationinputs.md) | :heavy_minus_sign: | N/A | | `stream` | *Optional[bool]* | :heavy_minus_sign: | N/A | | `store` | *Optional[bool]* | :heavy_minus_sign: | Whether to store the results into our servers or not. | | `handoff_execution` | [Optional[models.ConversationAppendStreamRequestHandoffExecution]](../../models/conversationappendstreamrequesthandoffexecution.md) | :heavy_minus_sign: | N/A | | `completion_args` | [Optional[models.CompletionArgs]](../../models/completionargs.md) | :heavy_minus_sign: | White-listed arguments from the completion API | +| `tool_confirmations` | List[[models.ToolCallConfirmation](../../models/toolcallconfirmation.md)] | :heavy_minus_sign: | N/A | | `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | ### Response @@ -488,8 +497,8 @@ with Mistral( | Error Type | Status Code | Content Type | | -------------------------- | -------------------------- | -------------------------- | -| models.HTTPValidationError | 422 | application/json | -| models.SDKError | 4XX, 5XX | \*/\* | +| errors.HTTPValidationError | 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | ## restart_stream @@ -497,7 +506,7 @@ Given a conversation_id and an id, recreate a conversation from this point and r ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -507,7 +516,7 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.beta.conversations.restart_stream(conversation_id="", inputs="", from_entry_id="", stream=True, store=True, handoff_execution="server", completion_args={ + res = mistral.beta.conversations.restart_stream(conversation_id="", from_entry_id="", stream=True, store=True, handoff_execution="server", completion_args={ "response_format": { "type": "text", }, @@ -525,8 +534,8 @@ with Mistral( | Parameter | Type | Required | Description | | ------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------- | | `conversation_id` | *str* | :heavy_check_mark: | ID of the original conversation which is being restarted. | -| `inputs` | [models.ConversationInputs](../../models/conversationinputs.md) | :heavy_check_mark: | N/A | | `from_entry_id` | *str* | :heavy_check_mark: | N/A | +| `inputs` | [Optional[models.ConversationInputs]](../../models/conversationinputs.md) | :heavy_minus_sign: | N/A | | `stream` | *Optional[bool]* | :heavy_minus_sign: | N/A | | `store` | *Optional[bool]* | :heavy_minus_sign: | Whether to store the results into our servers or not. | | `handoff_execution` | [Optional[models.ConversationRestartStreamRequestHandoffExecution]](../../models/conversationrestartstreamrequesthandoffexecution.md) | :heavy_minus_sign: | N/A | @@ -543,5 +552,5 @@ with Mistral( | Error Type | Status Code | Content Type | | -------------------------- | -------------------------- | -------------------------- | -| models.HTTPValidationError | 422 | application/json | -| models.SDKError | 4XX, 5XX | \*/\* | \ No newline at end of file +| errors.HTTPValidationError | 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | \ No newline at end of file diff --git a/docs/sdks/documents/README.md b/docs/sdks/documents/README.md index 97831f86..9c219b67 100644 --- a/docs/sdks/documents/README.md +++ b/docs/sdks/documents/README.md @@ -23,7 +23,7 @@ Given a library, lists the document that have been uploaded to that library. ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -33,7 +33,7 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.beta.libraries.documents.list(library_id="05e1bda5-99b1-4baf-bb03-905d8e094f74", page_size=100, page=0, sort_by="created_at", sort_order="desc") + res = mistral.beta.libraries.documents.list(library_id="5c3ca4cd-62bc-4c71-ad8a-1531ae80d078", page_size=100, page=0, sort_by="created_at", sort_order="desc") # Handle response print(res) @@ -55,14 +55,14 @@ with Mistral( ### Response -**[models.ListDocumentOut](../../models/listdocumentout.md)** +**[models.ListDocumentsResponse](../../models/listdocumentsresponse.md)** ### Errors | Error Type | Status Code | Content Type | | -------------------------- | -------------------------- | -------------------------- | -| models.HTTPValidationError | 422 | application/json | -| models.SDKError | 4XX, 5XX | \*/\* | +| errors.HTTPValidationError | 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | ## upload @@ -70,7 +70,7 @@ Given a library, upload a new document to that library. It is queued for process ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -80,7 +80,7 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.beta.libraries.documents.upload(library_id="f973c54e-979a-4464-9d36-8cc31beb21fe", file={ + res = mistral.beta.libraries.documents.upload(library_id="a02150d9-5ee0-4877-b62c-28b1fcdf3b76", file={ "file_name": "example.file", "content": open("example.file", "rb"), }) @@ -100,14 +100,14 @@ with Mistral( ### Response -**[models.DocumentOut](../../models/documentout.md)** +**[models.Document](../../models/document.md)** ### Errors | Error Type | Status Code | Content Type | | -------------------------- | -------------------------- | -------------------------- | -| models.HTTPValidationError | 422 | application/json | -| models.SDKError | 4XX, 5XX | \*/\* | +| errors.HTTPValidationError | 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | ## get @@ -115,7 +115,7 @@ Given a library and a document in this library, you can retrieve the metadata of ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -125,7 +125,7 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.beta.libraries.documents.get(library_id="f9902d0a-1ea4-4953-be48-52df6edd302a", document_id="c3e12fd9-e840-46f2-8d4a-79985ed36d24") + res = mistral.beta.libraries.documents.get(library_id="03d908c8-90a1-44fd-bf3a-8490fb7c9a03", document_id="90973aec-0508-4375-8b00-91d732414745") # Handle response print(res) @@ -142,14 +142,14 @@ with Mistral( ### Response -**[models.DocumentOut](../../models/documentout.md)** +**[models.Document](../../models/document.md)** ### Errors | Error Type | Status Code | Content Type | | -------------------------- | -------------------------- | -------------------------- | -| models.HTTPValidationError | 422 | application/json | -| models.SDKError | 4XX, 5XX | \*/\* | +| errors.HTTPValidationError | 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | ## update @@ -157,7 +157,7 @@ Given a library and a document in that library, update the name of that document ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -167,7 +167,7 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.beta.libraries.documents.update(library_id="3b900c67-d2b6-4637-93f2-3eff2c85f8dd", document_id="66f935fd-37ec-441f-bca5-b1129befcbca") + res = mistral.beta.libraries.documents.update(library_id="3ddd8d93-dca5-4a6d-980d-173226c35742", document_id="2a25e44c-b160-40ca-b5c2-b65fb2fcae34") # Handle response print(res) @@ -186,14 +186,14 @@ with Mistral( ### Response -**[models.DocumentOut](../../models/documentout.md)** +**[models.Document](../../models/document.md)** ### Errors | Error Type | Status Code | Content Type | | -------------------------- | -------------------------- | -------------------------- | -| models.HTTPValidationError | 422 | application/json | -| models.SDKError | 4XX, 5XX | \*/\* | +| errors.HTTPValidationError | 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | ## delete @@ -201,7 +201,7 @@ Given a library and a document in that library, delete that document. The docume ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -211,7 +211,7 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - mistral.beta.libraries.documents.delete(library_id="c728d742-7845-462b-84ad-2aacbaf1c7cf", document_id="ed3f5797-846a-4abe-8e30-39b2fd2323e0") + mistral.beta.libraries.documents.delete(library_id="005daae9-d42e-407d-82d7-2261c6a1496c", document_id="edc236b0-baff-49a9-884b-4ca36a258da4") # Use the SDK ... @@ -229,8 +229,8 @@ with Mistral( | Error Type | Status Code | Content Type | | -------------------------- | -------------------------- | -------------------------- | -| models.HTTPValidationError | 422 | application/json | -| models.SDKError | 4XX, 5XX | \*/\* | +| errors.HTTPValidationError | 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | ## text_content @@ -238,7 +238,7 @@ Given a library and a document in that library, you can retrieve the text conten ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -248,7 +248,7 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.beta.libraries.documents.text_content(library_id="12689dc1-50df-4a0d-8202-2757f7a8c141", document_id="9d4057e9-d112-437c-911e-6ee031389739") + res = mistral.beta.libraries.documents.text_content(library_id="1d177215-3b6b-45ba-9fa9-baf773223bec", document_id="60214c91-2aba-4692-a4e6-a53365de8caf") # Handle response print(res) @@ -271,8 +271,8 @@ with Mistral( | Error Type | Status Code | Content Type | | -------------------------- | -------------------------- | -------------------------- | -| models.HTTPValidationError | 422 | application/json | -| models.SDKError | 4XX, 5XX | \*/\* | +| errors.HTTPValidationError | 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | ## status @@ -280,7 +280,7 @@ Given a library and a document in that library, retrieve the processing status o ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -290,7 +290,7 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.beta.libraries.documents.status(library_id="41bb33c4-7e53-453d-bf21-398bb2862772", document_id="416b95cf-19c8-45af-84be-26aaa3ab3666") + res = mistral.beta.libraries.documents.status(library_id="e6906f70-368f-4155-80da-c1718f01bc43", document_id="2c904915-d831-4e9d-a345-8ce405bcef66") # Handle response print(res) @@ -313,8 +313,8 @@ with Mistral( | Error Type | Status Code | Content Type | | -------------------------- | -------------------------- | -------------------------- | -| models.HTTPValidationError | 422 | application/json | -| models.SDKError | 4XX, 5XX | \*/\* | +| errors.HTTPValidationError | 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | ## get_signed_url @@ -322,7 +322,7 @@ Given a library and a document in that library, retrieve the signed URL of a spe ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -332,7 +332,7 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.beta.libraries.documents.get_signed_url(library_id="2dbbe172-1374-41be-b03d-a088c733612e", document_id="b5d88764-47f1-4485-9df1-658775428344") + res = mistral.beta.libraries.documents.get_signed_url(library_id="23cf6904-a602-4ee8-9f5b-8efc557c336d", document_id="48598486-df71-4994-acbb-1133c72efa8c") # Handle response print(res) @@ -349,14 +349,14 @@ with Mistral( ### Response -**[str](../../models/.md)** +**[str](../../models/responselibrariesdocumentsgetsignedurlv1.md)** ### Errors | Error Type | Status Code | Content Type | | -------------------------- | -------------------------- | -------------------------- | -| models.HTTPValidationError | 422 | application/json | -| models.SDKError | 4XX, 5XX | \*/\* | +| errors.HTTPValidationError | 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | ## extracted_text_signed_url @@ -364,7 +364,7 @@ Given a library and a document in that library, retrieve the signed URL of text ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -374,7 +374,7 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.beta.libraries.documents.extracted_text_signed_url(library_id="46d040ce-ae2e-4891-a54c-cdab6a8f62d8", document_id="3eddbfe2-3fd7-47f5-984b-b378e6950e37") + res = mistral.beta.libraries.documents.extracted_text_signed_url(library_id="a6f15de3-1e82-4f95-af82-851499042ef8", document_id="9749d4f9-24e5-4ca2-99a3-a406863f805d") # Handle response print(res) @@ -391,14 +391,14 @@ with Mistral( ### Response -**[str](../../models/.md)** +**[str](../../models/responselibrariesdocumentsgetextractedtextsignedurlv1.md)** ### Errors | Error Type | Status Code | Content Type | | -------------------------- | -------------------------- | -------------------------- | -| models.HTTPValidationError | 422 | application/json | -| models.SDKError | 4XX, 5XX | \*/\* | +| errors.HTTPValidationError | 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | ## reprocess @@ -406,7 +406,7 @@ Given a library and a document in that library, reprocess that document, it will ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -416,7 +416,7 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - mistral.beta.libraries.documents.reprocess(library_id="76d357e4-d891-40c6-9d1e-6d6ce5056ee0", document_id="09798d2b-8f46-46c6-9765-8054a82a4bb2") + mistral.beta.libraries.documents.reprocess(library_id="51b29371-de8f-4ba4-932b-a0bafb3a7f64", document_id="3052422c-49ca-45ac-a918-cadb35d61fd8") # Use the SDK ... @@ -434,5 +434,5 @@ with Mistral( | Error Type | Status Code | Content Type | | -------------------------- | -------------------------- | -------------------------- | -| models.HTTPValidationError | 422 | application/json | -| models.SDKError | 4XX, 5XX | \*/\* | \ No newline at end of file +| errors.HTTPValidationError | 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | \ No newline at end of file diff --git a/docs/sdks/embeddings/README.md b/docs/sdks/embeddings/README.md index 0be7ea6d..eecb5c9e 100644 --- a/docs/sdks/embeddings/README.md +++ b/docs/sdks/embeddings/README.md @@ -14,7 +14,7 @@ Embeddings ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -54,5 +54,5 @@ with Mistral( | Error Type | Status Code | Content Type | | -------------------------- | -------------------------- | -------------------------- | -| models.HTTPValidationError | 422 | application/json | -| models.SDKError | 4XX, 5XX | \*/\* | \ No newline at end of file +| errors.HTTPValidationError | 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | \ No newline at end of file diff --git a/docs/sdks/files/README.md b/docs/sdks/files/README.md index ae29b7bf..9507326b 100644 --- a/docs/sdks/files/README.md +++ b/docs/sdks/files/README.md @@ -23,7 +23,7 @@ Please contact us if you need to increase these storage limits. ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -53,13 +53,13 @@ with Mistral( ### Response -**[models.UploadFileOut](../../models/uploadfileout.md)** +**[models.CreateFileResponse](../../models/createfileresponse.md)** ### Errors | Error Type | Status Code | Content Type | | --------------- | --------------- | --------------- | -| models.SDKError | 4XX, 5XX | \*/\* | +| errors.SDKError | 4XX, 5XX | \*/\* | ## list @@ -67,7 +67,7 @@ Returns a list of files that belong to the user's organization. ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -100,13 +100,13 @@ with Mistral( ### Response -**[models.ListFilesOut](../../models/listfilesout.md)** +**[models.ListFilesResponse](../../models/listfilesresponse.md)** ### Errors | Error Type | Status Code | Content Type | | --------------- | --------------- | --------------- | -| models.SDKError | 4XX, 5XX | \*/\* | +| errors.SDKError | 4XX, 5XX | \*/\* | ## retrieve @@ -114,7 +114,7 @@ Returns information about a specific file. ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -124,7 +124,7 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.files.retrieve(file_id="654a62d9-b7ee-49ac-835e-af4153e3c9ec") + res = mistral.files.retrieve(file_id="f2a27685-ca4e-4dc2-9f2b-88c422c3e0f6") # Handle response print(res) @@ -140,13 +140,13 @@ with Mistral( ### Response -**[models.RetrieveFileOut](../../models/retrievefileout.md)** +**[models.GetFileResponse](../../models/getfileresponse.md)** ### Errors | Error Type | Status Code | Content Type | | --------------- | --------------- | --------------- | -| models.SDKError | 4XX, 5XX | \*/\* | +| errors.SDKError | 4XX, 5XX | \*/\* | ## delete @@ -154,7 +154,7 @@ Delete a file. ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -164,7 +164,7 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.files.delete(file_id="789c27a4-69de-47c6-b67f-cf6e56ce9f41") + res = mistral.files.delete(file_id="3b6d45eb-e30b-416f-8019-f47e2e93d930") # Handle response print(res) @@ -180,13 +180,13 @@ with Mistral( ### Response -**[models.DeleteFileOut](../../models/deletefileout.md)** +**[models.DeleteFileResponse](../../models/deletefileresponse.md)** ### Errors | Error Type | Status Code | Content Type | | --------------- | --------------- | --------------- | -| models.SDKError | 4XX, 5XX | \*/\* | +| errors.SDKError | 4XX, 5XX | \*/\* | ## download @@ -194,7 +194,7 @@ Download a file ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -204,7 +204,7 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.files.download(file_id="e2ba278e-eac9-4050-ae8e-ec433e124efb") + res = mistral.files.download(file_id="f8919994-a4a1-46b2-8b5b-06335a4300ce") # Handle response print(res) @@ -226,7 +226,7 @@ with Mistral( | Error Type | Status Code | Content Type | | --------------- | --------------- | --------------- | -| models.SDKError | 4XX, 5XX | \*/\* | +| errors.SDKError | 4XX, 5XX | \*/\* | ## get_signed_url @@ -234,7 +234,7 @@ Get Signed Url ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -244,7 +244,7 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.files.get_signed_url(file_id="7a0c108d-9e6b-4c47-990d-a20cba50b283", expiry=24) + res = mistral.files.get_signed_url(file_id="06a020ab-355c-49a6-b19d-304b7c01699f", expiry=24) # Handle response print(res) @@ -261,10 +261,10 @@ with Mistral( ### Response -**[models.FileSignedURL](../../models/filesignedurl.md)** +**[models.GetSignedURLResponse](../../models/getsignedurlresponse.md)** ### Errors | Error Type | Status Code | Content Type | | --------------- | --------------- | --------------- | -| models.SDKError | 4XX, 5XX | \*/\* | \ No newline at end of file +| errors.SDKError | 4XX, 5XX | \*/\* | \ No newline at end of file diff --git a/docs/sdks/fim/README.md b/docs/sdks/fim/README.md index 3c8c59c7..49151bf5 100644 --- a/docs/sdks/fim/README.md +++ b/docs/sdks/fim/README.md @@ -15,7 +15,7 @@ FIM completion. ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -25,7 +25,7 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.fim.complete(model="codestral-2405", prompt="def", top_p=1, stream=False, suffix="return a+b") + res = mistral.fim.complete(model="codestral-latest", prompt="def", top_p=1, stream=False, suffix="return a+b") # Handle response print(res) @@ -57,8 +57,8 @@ with Mistral( | Error Type | Status Code | Content Type | | -------------------------- | -------------------------- | -------------------------- | -| models.HTTPValidationError | 422 | application/json | -| models.SDKError | 4XX, 5XX | \*/\* | +| errors.HTTPValidationError | 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | ## stream @@ -110,5 +110,5 @@ with Mistral( | Error Type | Status Code | Content Type | | -------------------------- | -------------------------- | -------------------------- | -| models.HTTPValidationError | 422 | application/json | -| models.SDKError | 4XX, 5XX | \*/\* | \ No newline at end of file +| errors.HTTPValidationError | 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | \ No newline at end of file diff --git a/docs/sdks/finetuningjobs/README.md b/docs/sdks/finetuningjobs/README.md index fe18feeb..4262b3a9 100644 --- a/docs/sdks/finetuningjobs/README.md +++ b/docs/sdks/finetuningjobs/README.md @@ -16,7 +16,7 @@ Get a list of fine-tuning jobs for your organization and user. ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -35,29 +35,29 @@ with Mistral( ### Parameters -| Parameter | Type | Required | Description | -| -------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------- | -| `page` | *Optional[int]* | :heavy_minus_sign: | The page number of the results to be returned. | -| `page_size` | *Optional[int]* | :heavy_minus_sign: | The number of items to return per page. | -| `model` | *OptionalNullable[str]* | :heavy_minus_sign: | The model name used for fine-tuning to filter on. When set, the other results are not displayed. | -| `created_after` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | The date/time to filter on. When set, the results for previous creation times are not displayed. | -| `created_before` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | -| `created_by_me` | *Optional[bool]* | :heavy_minus_sign: | When set, only return results for jobs created by the API caller. Other results are not displayed. | -| `status` | [OptionalNullable[models.ListFineTuningJobsStatus]](../../models/listfinetuningjobsstatus.md) | :heavy_minus_sign: | The current job state to filter on. When set, the other results are not displayed. | -| `wandb_project` | *OptionalNullable[str]* | :heavy_minus_sign: | The Weights and Biases project to filter on. When set, the other results are not displayed. | -| `wandb_name` | *OptionalNullable[str]* | :heavy_minus_sign: | The Weight and Biases run name to filter on. When set, the other results are not displayed. | -| `suffix` | *OptionalNullable[str]* | :heavy_minus_sign: | The model suffix to filter on. When set, the other results are not displayed. | -| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | +| Parameter | Type | Required | Description | +| ----------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------- | +| `page` | *Optional[int]* | :heavy_minus_sign: | The page number of the results to be returned. | +| `page_size` | *Optional[int]* | :heavy_minus_sign: | The number of items to return per page. | +| `model` | *OptionalNullable[str]* | :heavy_minus_sign: | The model name used for fine-tuning to filter on. When set, the other results are not displayed. | +| `created_after` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | The date/time to filter on. When set, the results for previous creation times are not displayed. | +| `created_before` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | +| `created_by_me` | *Optional[bool]* | :heavy_minus_sign: | When set, only return results for jobs created by the API caller. Other results are not displayed. | +| `status` | [OptionalNullable[models.JobsAPIRoutesFineTuningGetFineTuningJobsStatus]](../../models/jobsapiroutesfinetuninggetfinetuningjobsstatus.md) | :heavy_minus_sign: | The current job state to filter on. When set, the other results are not displayed. | +| `wandb_project` | *OptionalNullable[str]* | :heavy_minus_sign: | The Weights and Biases project to filter on. When set, the other results are not displayed. | +| `wandb_name` | *OptionalNullable[str]* | :heavy_minus_sign: | The Weight and Biases run name to filter on. When set, the other results are not displayed. | +| `suffix` | *OptionalNullable[str]* | :heavy_minus_sign: | The model suffix to filter on. When set, the other results are not displayed. | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | ### Response -**[models.JobsOut](../../models/jobsout.md)** +**[models.ListFineTuningJobsResponse](../../models/listfinetuningjobsresponse.md)** ### Errors | Error Type | Status Code | Content Type | | --------------- | --------------- | --------------- | -| models.SDKError | 4XX, 5XX | \*/\* | +| errors.SDKError | 4XX, 5XX | \*/\* | ## create @@ -65,7 +65,7 @@ Create a new fine-tuning job, it will be queued for processing. ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -75,7 +75,7 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.fine_tuning.jobs.create(model="Countach", hyperparameters={ + res = mistral.fine_tuning.jobs.create(model="Camaro", hyperparameters={ "learning_rate": 0.0001, }, invalid_sample_skip_percentage=0) @@ -93,23 +93,23 @@ with Mistral( | `training_files` | List[[models.TrainingFile](../../models/trainingfile.md)] | :heavy_minus_sign: | N/A | | `validation_files` | List[*str*] | :heavy_minus_sign: | A list containing the IDs of uploaded files that contain validation data. If you provide these files, the data is used to generate validation metrics periodically during fine-tuning. These metrics can be viewed in `checkpoints` when getting the status of a running fine-tuning job. The same data should not be present in both train and validation files. | | `suffix` | *OptionalNullable[str]* | :heavy_minus_sign: | A string that will be added to your fine-tuning model name. For example, a suffix of "my-great-model" would produce a model name like `ft:open-mistral-7b:my-great-model:xxx...` | -| `integrations` | List[[models.JobInIntegration](../../models/jobinintegration.md)] | :heavy_minus_sign: | A list of integrations to enable for your fine-tuning job. | +| `integrations` | List[[models.CreateFineTuningJobRequestIntegration](../../models/createfinetuningjobrequestintegration.md)] | :heavy_minus_sign: | A list of integrations to enable for your fine-tuning job. | | `auto_start` | *Optional[bool]* | :heavy_minus_sign: | This field will be required in a future release. | | `invalid_sample_skip_percentage` | *Optional[float]* | :heavy_minus_sign: | N/A | | `job_type` | [OptionalNullable[models.FineTuneableModelType]](../../models/finetuneablemodeltype.md) | :heavy_minus_sign: | N/A | -| `repositories` | List[[models.JobInRepository](../../models/jobinrepository.md)] | :heavy_minus_sign: | N/A | -| `classifier_targets` | List[[models.ClassifierTargetIn](../../models/classifiertargetin.md)] | :heavy_minus_sign: | N/A | +| `repositories` | List[[models.CreateFineTuningJobRequestRepository](../../models/createfinetuningjobrequestrepository.md)] | :heavy_minus_sign: | N/A | +| `classifier_targets` | List[[models.ClassifierTarget](../../models/classifiertarget.md)] | :heavy_minus_sign: | N/A | | `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | ### Response -**[models.CreateFineTuningJobResponse](../../models/createfinetuningjobresponse.md)** +**[models.JobsAPIRoutesFineTuningCreateFineTuningJobResponse](../../models/jobsapiroutesfinetuningcreatefinetuningjobresponse.md)** ### Errors | Error Type | Status Code | Content Type | | --------------- | --------------- | --------------- | -| models.SDKError | 4XX, 5XX | \*/\* | +| errors.SDKError | 4XX, 5XX | \*/\* | ## get @@ -117,7 +117,7 @@ Get a fine-tuned job details by its UUID. ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -127,7 +127,7 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.fine_tuning.jobs.get(job_id="2855f873-414e-4cf5-a46e-e589e39ee809") + res = mistral.fine_tuning.jobs.get(job_id="c167a961-ffca-4bcf-93ac-6169468dd389") # Handle response print(res) @@ -143,13 +143,13 @@ with Mistral( ### Response -**[models.GetFineTuningJobResponse](../../models/getfinetuningjobresponse.md)** +**[models.JobsAPIRoutesFineTuningGetFineTuningJobResponse](../../models/jobsapiroutesfinetuninggetfinetuningjobresponse.md)** ### Errors | Error Type | Status Code | Content Type | | --------------- | --------------- | --------------- | -| models.SDKError | 4XX, 5XX | \*/\* | +| errors.SDKError | 4XX, 5XX | \*/\* | ## cancel @@ -157,7 +157,7 @@ Request the cancellation of a fine tuning job. ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -167,7 +167,7 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.fine_tuning.jobs.cancel(job_id="ee7d6f03-fcbb-43ca-8f17-0388c0832eb9") + res = mistral.fine_tuning.jobs.cancel(job_id="6188a2f6-7513-4e0f-89cc-3f8088523a49") # Handle response print(res) @@ -183,13 +183,13 @@ with Mistral( ### Response -**[models.CancelFineTuningJobResponse](../../models/cancelfinetuningjobresponse.md)** +**[models.JobsAPIRoutesFineTuningCancelFineTuningJobResponse](../../models/jobsapiroutesfinetuningcancelfinetuningjobresponse.md)** ### Errors | Error Type | Status Code | Content Type | | --------------- | --------------- | --------------- | -| models.SDKError | 4XX, 5XX | \*/\* | +| errors.SDKError | 4XX, 5XX | \*/\* | ## start @@ -197,7 +197,7 @@ Request the start of a validated fine tuning job. ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -207,7 +207,7 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.fine_tuning.jobs.start(job_id="da371429-0ec2-4cea-b9c7-73ce3a1dd76f") + res = mistral.fine_tuning.jobs.start(job_id="56553e4d-0679-471e-b9ac-59a77d671103") # Handle response print(res) @@ -223,10 +223,10 @@ with Mistral( ### Response -**[models.StartFineTuningJobResponse](../../models/startfinetuningjobresponse.md)** +**[models.JobsAPIRoutesFineTuningStartFineTuningJobResponse](../../models/jobsapiroutesfinetuningstartfinetuningjobresponse.md)** ### Errors | Error Type | Status Code | Content Type | | --------------- | --------------- | --------------- | -| models.SDKError | 4XX, 5XX | \*/\* | \ No newline at end of file +| errors.SDKError | 4XX, 5XX | \*/\* | \ No newline at end of file diff --git a/docs/sdks/libraries/README.md b/docs/sdks/libraries/README.md index 8835d0ec..7df1ef4e 100644 --- a/docs/sdks/libraries/README.md +++ b/docs/sdks/libraries/README.md @@ -18,7 +18,7 @@ List all libraries that you have created or have been shared with you. ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -43,13 +43,13 @@ with Mistral( ### Response -**[models.ListLibraryOut](../../models/listlibraryout.md)** +**[models.ListLibrariesResponse](../../models/listlibrariesresponse.md)** ### Errors | Error Type | Status Code | Content Type | | --------------- | --------------- | --------------- | -| models.SDKError | 4XX, 5XX | \*/\* | +| errors.SDKError | 4XX, 5XX | \*/\* | ## create @@ -57,7 +57,7 @@ Create a new Library, you will be marked as the owner and only you will have the ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -85,14 +85,14 @@ with Mistral( ### Response -**[models.LibraryOut](../../models/libraryout.md)** +**[models.Library](../../models/library.md)** ### Errors | Error Type | Status Code | Content Type | | -------------------------- | -------------------------- | -------------------------- | -| models.HTTPValidationError | 422 | application/json | -| models.SDKError | 4XX, 5XX | \*/\* | +| errors.HTTPValidationError | 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | ## get @@ -100,7 +100,7 @@ Given a library id, details information about that Library. ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -110,7 +110,7 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.beta.libraries.get(library_id="44e385d6-783e-4b21-8fae-5181e6817bc4") + res = mistral.beta.libraries.get(library_id="d0d23a1e-bfe5-45e7-b7bb-22a4ea78d47f") # Handle response print(res) @@ -126,14 +126,14 @@ with Mistral( ### Response -**[models.LibraryOut](../../models/libraryout.md)** +**[models.Library](../../models/library.md)** ### Errors | Error Type | Status Code | Content Type | | -------------------------- | -------------------------- | -------------------------- | -| models.HTTPValidationError | 422 | application/json | -| models.SDKError | 4XX, 5XX | \*/\* | +| errors.HTTPValidationError | 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | ## delete @@ -141,7 +141,7 @@ Given a library id, deletes it together with all documents that have been upload ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -151,7 +151,7 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.beta.libraries.delete(library_id="441ba08a-3d1f-4700-8d6f-f32eeed49dff") + res = mistral.beta.libraries.delete(library_id="6cad0b6e-fd2e-4d11-a48b-21d30fb7c17a") # Handle response print(res) @@ -167,14 +167,14 @@ with Mistral( ### Response -**[models.LibraryOut](../../models/libraryout.md)** +**[models.Library](../../models/library.md)** ### Errors | Error Type | Status Code | Content Type | | -------------------------- | -------------------------- | -------------------------- | -| models.HTTPValidationError | 422 | application/json | -| models.SDKError | 4XX, 5XX | \*/\* | +| errors.HTTPValidationError | 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | ## update @@ -182,7 +182,7 @@ Given a library id, you can update the name and description. ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -192,7 +192,7 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.beta.libraries.update(library_id="27049553-3425-49ce-b965-fcb3a7ab03a3") + res = mistral.beta.libraries.update(library_id="e01880c3-d0b5-4a29-8b1b-abdb8ce917e4") # Handle response print(res) @@ -210,11 +210,11 @@ with Mistral( ### Response -**[models.LibraryOut](../../models/libraryout.md)** +**[models.Library](../../models/library.md)** ### Errors | Error Type | Status Code | Content Type | | -------------------------- | -------------------------- | -------------------------- | -| models.HTTPValidationError | 422 | application/json | -| models.SDKError | 4XX, 5XX | \*/\* | \ No newline at end of file +| errors.HTTPValidationError | 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | \ No newline at end of file diff --git a/docs/sdks/models/README.md b/docs/sdks/models/README.md index 0cbf1bdd..311a2db6 100644 --- a/docs/sdks/models/README.md +++ b/docs/sdks/models/README.md @@ -19,7 +19,7 @@ List all models available to the user. ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -50,7 +50,7 @@ with Mistral( | Error Type | Status Code | Content Type | | --------------- | --------------- | --------------- | -| models.SDKError | 4XX, 5XX | \*/\* | +| errors.SDKError | 4XX, 5XX | \*/\* | ## retrieve @@ -58,7 +58,7 @@ Retrieve information about a model. ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -90,8 +90,8 @@ with Mistral( | Error Type | Status Code | Content Type | | -------------------------- | -------------------------- | -------------------------- | -| models.HTTPValidationError | 422 | application/json | -| models.SDKError | 4XX, 5XX | \*/\* | +| errors.HTTPValidationError | 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | ## delete @@ -99,7 +99,7 @@ Delete a fine-tuned model. ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -131,8 +131,8 @@ with Mistral( | Error Type | Status Code | Content Type | | -------------------------- | -------------------------- | -------------------------- | -| models.HTTPValidationError | 422 | application/json | -| models.SDKError | 4XX, 5XX | \*/\* | +| errors.HTTPValidationError | 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | ## update @@ -140,7 +140,7 @@ Update a model name or description. ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -168,13 +168,13 @@ with Mistral( ### Response -**[models.UpdateModelResponse](../../models/updatemodelresponse.md)** +**[models.JobsAPIRoutesFineTuningUpdateFineTunedModelResponse](../../models/jobsapiroutesfinetuningupdatefinetunedmodelresponse.md)** ### Errors | Error Type | Status Code | Content Type | | --------------- | --------------- | --------------- | -| models.SDKError | 4XX, 5XX | \*/\* | +| errors.SDKError | 4XX, 5XX | \*/\* | ## archive @@ -182,7 +182,7 @@ Archive a fine-tuned model. ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -208,13 +208,13 @@ with Mistral( ### Response -**[models.ArchiveFTModelOut](../../models/archiveftmodelout.md)** +**[models.ArchiveModelResponse](../../models/archivemodelresponse.md)** ### Errors | Error Type | Status Code | Content Type | | --------------- | --------------- | --------------- | -| models.SDKError | 4XX, 5XX | \*/\* | +| errors.SDKError | 4XX, 5XX | \*/\* | ## unarchive @@ -222,7 +222,7 @@ Un-archive a fine-tuned model. ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -248,10 +248,10 @@ with Mistral( ### Response -**[models.UnarchiveFTModelOut](../../models/unarchiveftmodelout.md)** +**[models.UnarchiveModelResponse](../../models/unarchivemodelresponse.md)** ### Errors | Error Type | Status Code | Content Type | | --------------- | --------------- | --------------- | -| models.SDKError | 4XX, 5XX | \*/\* | \ No newline at end of file +| errors.SDKError | 4XX, 5XX | \*/\* | \ No newline at end of file diff --git a/docs/sdks/ocr/README.md b/docs/sdks/ocr/README.md index 9fd9d6fc..fde2a823 100644 --- a/docs/sdks/ocr/README.md +++ b/docs/sdks/ocr/README.md @@ -14,7 +14,7 @@ OCR ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -25,10 +25,8 @@ with Mistral( ) as mistral: res = mistral.ocr.process(model="CX-9", document={ - "image_url": { - "url": "https://measly-scrap.com", - }, - "type": "image_url", + "type": "document_url", + "document_url": "https://upset-labourer.net/", }, bbox_annotation_format={ "type": "text", }, document_annotation_format={ @@ -42,22 +40,22 @@ with Mistral( ### Parameters -| Parameter | Type | Required | Description | Example | -| ---------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `model` | *Nullable[str]* | :heavy_check_mark: | N/A | | -| `document` | [models.Document](../../models/document.md) | :heavy_check_mark: | Document to run OCR on | | -| `id` | *Optional[str]* | :heavy_minus_sign: | N/A | | -| `pages` | List[*int*] | :heavy_minus_sign: | Specific pages user wants to process in various formats: single number, range, or list of both. Starts from 0 | | -| `include_image_base64` | *OptionalNullable[bool]* | :heavy_minus_sign: | Include image URLs in response | | -| `image_limit` | *OptionalNullable[int]* | :heavy_minus_sign: | Max images to extract | | -| `image_min_size` | *OptionalNullable[int]* | :heavy_minus_sign: | Minimum height and width of image to extract | | -| `bbox_annotation_format` | [OptionalNullable[models.ResponseFormat]](../../models/responseformat.md) | :heavy_minus_sign: | Structured output class for extracting useful information from each extracted bounding box / image from document. Only json_schema is valid for this field | {
"type": "text"
} | -| `document_annotation_format` | [OptionalNullable[models.ResponseFormat]](../../models/responseformat.md) | :heavy_minus_sign: | Structured output class for extracting useful information from the entire document. Only json_schema is valid for this field | {
"type": "text"
} | -| `document_annotation_prompt` | *OptionalNullable[str]* | :heavy_minus_sign: | Optional prompt to guide the model in extracting structured output from the entire document. A document_annotation_format must be provided. | | -| `table_format` | [OptionalNullable[models.TableFormat]](../../models/tableformat.md) | :heavy_minus_sign: | N/A | | -| `extract_header` | *Optional[bool]* | :heavy_minus_sign: | N/A | | -| `extract_footer` | *Optional[bool]* | :heavy_minus_sign: | N/A | | -| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | | +| Parameter | Type | Required | Description | Example | +| ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `model` | *Nullable[str]* | :heavy_check_mark: | N/A | | +| `document` | [models.DocumentUnion](../../models/documentunion.md) | :heavy_check_mark: | Document to run OCR on | | +| `id` | *Optional[str]* | :heavy_minus_sign: | N/A | | +| `pages` | List[*int*] | :heavy_minus_sign: | Specific pages user wants to process in various formats: single number, range, or list of both. Starts from 0 | | +| `include_image_base64` | *OptionalNullable[bool]* | :heavy_minus_sign: | Include image URLs in response | | +| `image_limit` | *OptionalNullable[int]* | :heavy_minus_sign: | Max images to extract | | +| `image_min_size` | *OptionalNullable[int]* | :heavy_minus_sign: | Minimum height and width of image to extract | | +| `bbox_annotation_format` | [OptionalNullable[models.ResponseFormat]](../../models/responseformat.md) | :heavy_minus_sign: | Structured output class for extracting useful information from each extracted bounding box / image from document. Only json_schema is valid for this field | **Example 1:** {
"type": "text"
}
**Example 2:** {
"type": "json_object"
}
**Example 3:** {
"type": "json_schema",
"json_schema": {
"schema": {
"properties": {
"name": {
"title": "Name",
"type": "string"
},
"authors": {
"items": {
"type": "string"
},
"title": "Authors",
"type": "array"
}
},
"required": [
"name",
"authors"
],
"title": "Book",
"type": "object",
"additionalProperties": false
},
"name": "book",
"strict": true
}
} | +| `document_annotation_format` | [OptionalNullable[models.ResponseFormat]](../../models/responseformat.md) | :heavy_minus_sign: | Structured output class for extracting useful information from the entire document. Only json_schema is valid for this field | **Example 1:** {
"type": "text"
}
**Example 2:** {
"type": "json_object"
}
**Example 3:** {
"type": "json_schema",
"json_schema": {
"schema": {
"properties": {
"name": {
"title": "Name",
"type": "string"
},
"authors": {
"items": {
"type": "string"
},
"title": "Authors",
"type": "array"
}
},
"required": [
"name",
"authors"
],
"title": "Book",
"type": "object",
"additionalProperties": false
},
"name": "book",
"strict": true
}
} | +| `document_annotation_prompt` | *OptionalNullable[str]* | :heavy_minus_sign: | Optional prompt to guide the model in extracting structured output from the entire document. A document_annotation_format must be provided. | | +| `table_format` | [OptionalNullable[models.TableFormat]](../../models/tableformat.md) | :heavy_minus_sign: | N/A | | +| `extract_header` | *Optional[bool]* | :heavy_minus_sign: | N/A | | +| `extract_footer` | *Optional[bool]* | :heavy_minus_sign: | N/A | | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | | ### Response @@ -67,5 +65,5 @@ with Mistral( | Error Type | Status Code | Content Type | | -------------------------- | -------------------------- | -------------------------- | -| models.HTTPValidationError | 422 | application/json | -| models.SDKError | 4XX, 5XX | \*/\* | \ No newline at end of file +| errors.HTTPValidationError | 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | \ No newline at end of file diff --git a/docs/sdks/transcriptions/README.md b/docs/sdks/transcriptions/README.md index 9691b81d..97703c9b 100644 --- a/docs/sdks/transcriptions/README.md +++ b/docs/sdks/transcriptions/README.md @@ -15,7 +15,7 @@ Create Transcription ### Example Usage - + ```python from mistralai.client import Mistral import os @@ -25,7 +25,7 @@ with Mistral( api_key=os.getenv("MISTRAL_API_KEY", ""), ) as mistral: - res = mistral.audio.transcriptions.complete(model="Model X", diarize=False) + res = mistral.audio.transcriptions.complete(model="voxtral-mini-latest", diarize=False) # Handle response print(res) @@ -36,7 +36,7 @@ with Mistral( | Parameter | Type | Required | Description | Example | | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | -| `model` | *str* | :heavy_check_mark: | ID of the model to be used. | voxtral-mini-latest | +| `model` | *str* | :heavy_check_mark: | ID of the model to be used. | **Example 1:** voxtral-mini-latest
**Example 2:** voxtral-mini-2507 | | `file` | [Optional[models.File]](../../models/file.md) | :heavy_minus_sign: | N/A | | | `file_url` | *OptionalNullable[str]* | :heavy_minus_sign: | Url of a file to be transcribed | | | `file_id` | *OptionalNullable[str]* | :heavy_minus_sign: | ID of a file uploaded to /v1/files | | @@ -55,7 +55,7 @@ with Mistral( | Error Type | Status Code | Content Type | | --------------- | --------------- | --------------- | -| models.SDKError | 4XX, 5XX | \*/\* | +| errors.SDKError | 4XX, 5XX | \*/\* | ## stream @@ -105,4 +105,4 @@ with Mistral( | Error Type | Status Code | Content Type | | --------------- | --------------- | --------------- | -| models.SDKError | 4XX, 5XX | \*/\* | \ No newline at end of file +| errors.SDKError | 4XX, 5XX | \*/\* | \ No newline at end of file diff --git a/examples/mistral/audio/chat_streaming.py b/examples/mistral/audio/chat_streaming.py index a9ab2323..b418ef57 100755 --- a/examples/mistral/audio/chat_streaming.py +++ b/examples/mistral/audio/chat_streaming.py @@ -2,7 +2,8 @@ import os -from mistralai.client import Mistral, File +from mistralai.client import Mistral +from mistralai.client.models import File from mistralai.client.models import UserMessage diff --git a/examples/mistral/audio/transcription_async.py b/examples/mistral/audio/transcription_async.py index c8fd9ae6..f04f397e 100644 --- a/examples/mistral/audio/transcription_async.py +++ b/examples/mistral/audio/transcription_async.py @@ -2,7 +2,8 @@ import os import asyncio -from mistralai.client import Mistral, File +from mistralai.client import Mistral +from mistralai.client.models import File async def main(): diff --git a/examples/mistral/audio/transcription_diarize_async.py b/examples/mistral/audio/transcription_diarize_async.py index cbdf3512..4b511c87 100644 --- a/examples/mistral/audio/transcription_diarize_async.py +++ b/examples/mistral/audio/transcription_diarize_async.py @@ -3,7 +3,8 @@ import os import asyncio import pathlib -from mistralai.client import Mistral, File +from mistralai.client import Mistral +from mistralai.client.models import File fixture_dir = pathlib.Path(__file__).parents[2] / "fixtures" diff --git a/examples/mistral/audio/transcription_stream_async.py b/examples/mistral/audio/transcription_stream_async.py index 6e64dcf7..3055f3de 100644 --- a/examples/mistral/audio/transcription_stream_async.py +++ b/examples/mistral/audio/transcription_stream_async.py @@ -2,7 +2,8 @@ import asyncio import os -from mistralai.client import Mistral, File +from mistralai.client import Mistral +from mistralai.client.models import File async def main(): diff --git a/examples/mistral/classifier/async_classifier.py b/examples/mistral/classifier/async_classifier.py index 45cc14fa..881f6a69 100644 --- a/examples/mistral/classifier/async_classifier.py +++ b/examples/mistral/classifier/async_classifier.py @@ -2,8 +2,8 @@ from pprint import pprint import asyncio -from mistralai.client import Mistral, TrainingFile, ClassifierTrainingParametersIn -from mistralai.client.models import ClassifierJobOut +from mistralai.client import Mistral +from mistralai.client.models import ClassifierFineTuningJob, ClassifierFineTuningJobDetails, ClassifierTrainingParameters, TrainingFile import os @@ -36,12 +36,12 @@ async def train_classifier(client: Mistral, training_file_ids: list[str]) -> str TrainingFile(file_id=training_file_id) for training_file_id in training_file_ids ], - hyperparameters=ClassifierTrainingParametersIn( + hyperparameters=ClassifierTrainingParameters( learning_rate=0.0001, ), auto_start=True, ) - if not isinstance(job, ClassifierJobOut): + if not isinstance(job, ClassifierFineTuningJob): print("Unexpected job type returned") return None @@ -51,6 +51,8 @@ async def train_classifier(client: Mistral, training_file_ids: list[str]) -> str while True: await asyncio.sleep(10) detailed_job = await client.fine_tuning.jobs.get_async(job_id=job.id) + if not isinstance(detailed_job, ClassifierFineTuningJobDetails): + raise Exception(f"Unexpected job type: {type(detailed_job)}") if detailed_job.status not in [ "QUEUED", "STARTED", diff --git a/examples/mistral/jobs/async_batch_job_chat_completion_inline.py b/examples/mistral/jobs/async_batch_job_chat_completion_inline.py index 8b4cedd3..d2a1679f 100644 --- a/examples/mistral/jobs/async_batch_job_chat_completion_inline.py +++ b/examples/mistral/jobs/async_batch_job_chat_completion_inline.py @@ -1,4 +1,5 @@ -from mistralai.client import Mistral, BatchRequest, UserMessage +from mistralai.client import Mistral +from mistralai.client.models import BatchRequest, UserMessage import os import asyncio diff --git a/packages/azure/.speakeasy/gen.lock b/packages/azure/.speakeasy/gen.lock index 5cf1d8e1..c795c61c 100644 --- a/packages/azure/.speakeasy/gen.lock +++ b/packages/azure/.speakeasy/gen.lock @@ -3,46 +3,46 @@ id: dc40fa48-2c4d-46ad-ac8b-270749770f34 management: docChecksum: 571037b8485712afcef86703debb7f15 docVersion: 1.0.0 - speakeasyVersion: 1.685.0 - generationVersion: 2.794.1 - releaseVersion: 2.0.0a4 - configChecksum: 549cf1eae199d39bf97052462fd8e640 + speakeasyVersion: 1.729.0 + generationVersion: 2.841.0 + releaseVersion: 2.0.0-a4.1 + configChecksum: e2523ba89eba35872d05ddb673dd862a repoURL: https://github.com/mistralai/client-python.git repoSubDirectory: packages/azure installationURL: https://github.com/mistralai/client-python.git#subdirectory=packages/azure published: true persistentEdits: - generation_id: b0dbfbbb-4028-4834-9980-a1d2dba52a8d - pristine_commit_hash: 6cab3cf0757d3c7dd58ee1eabec66dd63a8c9a03 - pristine_tree_hash: abf5c6e4b603142b1a6aac936d7c3be574611256 + generation_id: 1812b54a-0aa7-4b43-8c53-d70427856543 + pristine_commit_hash: 28db2945de995b5707dc7f310b5291435aaafcbf + pristine_tree_hash: b01973b36166a61d38fa84cf7dae49b7a74e1402 features: python: additionalDependencies: 1.0.0 additionalProperties: 1.0.1 configurableModuleName: 0.2.0 - constsAndDefaults: 1.0.5 - core: 5.23.18 + constsAndDefaults: 1.0.7 + core: 6.0.12 defaultEnabledRetries: 0.2.0 enumUnions: 0.1.0 envVarSecurityUsage: 0.3.2 examples: 3.0.2 flatRequests: 1.0.1 - globalSecurity: 3.0.4 + globalSecurity: 3.0.5 globalSecurityCallbacks: 1.0.0 globalSecurityFlattening: 1.0.0 globalServerURLs: 3.2.0 includes: 3.0.0 methodArguments: 1.0.2 - nameOverrides: 3.0.1 - nullables: 1.0.1 - openEnums: 1.0.2 - responseFormat: 1.0.1 - retries: 3.0.3 - sdkHooks: 1.2.0 - serverEvents: 1.0.11 + nameOverrides: 3.0.3 + nullables: 1.0.2 + openEnums: 1.0.4 + responseFormat: 1.1.0 + retries: 3.0.4 + sdkHooks: 1.2.1 + serverEvents: 1.0.13 serverEventsSentinels: 0.1.0 serverIDs: 3.0.0 - unions: 3.1.1 + unions: 3.1.4 trackedFiles: .gitattributes: id: 24139dae6567 @@ -52,6 +52,10 @@ trackedFiles: id: 89aa447020cd last_write_checksum: sha1:f84632c81029fcdda8c3b0c768d02b836fc80526 pristine_git_object: 8d79f0abb72526f1fb34a4c03e5bba612c6ba2ae + docs/errors/httpvalidationerror.md: + id: 7fe2e5327e07 + last_write_checksum: sha1:277a46811144643262651853dc6176d21b33573e + pristine_git_object: 712a148c3e2305dca4c702851865f9f8c8e674cc docs/models/arguments.md: id: 7ea5e33709a7 last_write_checksum: sha1:09eea126210d7fd0353e60a76bf1dbed173f13ec @@ -74,8 +78,8 @@ trackedFiles: pristine_git_object: b2f15ecbe88328de95b4961ddb3940fd8a6ee64b docs/models/chatcompletionrequest.md: id: adffe90369d0 - last_write_checksum: sha1:a404d37c6605a5524f1f48b418bacf46e86a9a68 - pristine_git_object: 3b0f7270840e257475f4b0f15f27e0c0152818d2 + last_write_checksum: sha1:00453565d70739471a4e1872c93b5b7e66fe6cb6 + pristine_git_object: f8715cd0a335c6dc0fda4b60400f11c4aa8a0a06 docs/models/chatcompletionrequestmessage.md: id: 3f5e170d418c last_write_checksum: sha1:7921c5a508a9f88adc01caab34e26182b8035607 @@ -94,8 +98,8 @@ trackedFiles: pristine_git_object: a0465ffbfc5558628953e03fbc53b80bbdc8649b docs/models/chatcompletionstreamrequest.md: id: cf8f29558a68 - last_write_checksum: sha1:daca00885f0d0f9863d8420bbee514723084813d - pristine_git_object: f78156a647ec63ca60ff423acbdee2b2404e4e60 + last_write_checksum: sha1:7233a19b12f3204b8e2259a4a09d0d9726609e4e + pristine_git_object: cc82a8c707268084865f86d71be82de5ebf6f821 docs/models/chatcompletionstreamrequestmessage.md: id: 053a98476cd2 last_write_checksum: sha1:8270692463fab1243d9de4bbef7162daa64e52c5 @@ -164,10 +168,6 @@ trackedFiles: id: 4b3bd62c0f26 last_write_checksum: sha1:754fe32bdffe53c1057b302702f5516f4e551cfb pristine_git_object: 87d7b4852de629015166605b273deb9341202dc0 - docs/models/httpvalidationerror.md: - id: a211c095f2ac - last_write_checksum: sha1:277a46811144643262651853dc6176d21b33573e - pristine_git_object: 712a148c3e2305dca4c702851865f9f8c8e674cc docs/models/imagedetail.md: id: f8217529b496 last_write_checksum: sha1:fdf19ac9459f64616240955cb81a84ef03e775c8 @@ -210,8 +210,8 @@ trackedFiles: pristine_git_object: 02473d44f73485fd7b7f0031d51bfac835d4036e docs/models/ocrrequest.md: id: 6862a3fc2d0f - last_write_checksum: sha1:9311e2c87f8f4512c35a717d3b063f2861f878d4 - pristine_git_object: 87929e53f8a74823b82ecce56d15f22228134fa6 + last_write_checksum: sha1:eefa8ad80773e00ac297f3cf806704ac6ac3557d + pristine_git_object: 2d26c19fd1cecb234d7fb761dd73cc0a59e622ad docs/models/ocrresponse.md: id: 30042328fb78 last_write_checksum: sha1:8e4a4ae404ea752f3e9f1108c2a5f89ed6cfb143 @@ -326,8 +326,8 @@ trackedFiles: pristine_git_object: 3e38f1a929f7d6b1d6de74604aa87e3d8f010544 pylintrc: id: 7ce8b9f946e6 - last_write_checksum: sha1:6b615d49741eb9ae16375d3a499767783d1128a1 - pristine_git_object: a8fcb932ba2a01c5e96e3b04c59371e930b75558 + last_write_checksum: sha1:8f871a5aac4b10bff724c9d91b8d7496eb1fbdde + pristine_git_object: 0391ac11bdc5526b697b69d047d568a611ce87d0 scripts/prepare_readme.py: id: e0c5957a6035 last_write_checksum: sha1:26b29aad3c23a98912fd881698c976aac55749fe @@ -338,8 +338,8 @@ trackedFiles: pristine_git_object: c35748f360329c2bc370e9b189f49b1a360b2c48 src/mistralai/azure/client/__init__.py: id: 5624bda9196d - last_write_checksum: sha1:36306d1d404b6aeb912d27f1d9c52f098ff7bf9b - pristine_git_object: dd02e42e4cc509dc90e6ae70493054021faa5f9c + last_write_checksum: sha1:da077c0bdfcef64a4a5aea91a17292f72fa2b088 + pristine_git_object: 833c68cd526fe34aab2b7e7c45f974f7f4b9e120 src/mistralai/azure/client/_hooks/__init__.py: id: 850c237217cb last_write_checksum: sha1:e3111289afd28ad557c21d9e2f918caabfb7037d @@ -354,76 +354,100 @@ trackedFiles: pristine_git_object: 3e4e39555d60adebe84e596c8323ee5b80676fc9 src/mistralai/azure/client/_version.py: id: a77160e60e5d - last_write_checksum: sha1:e26eb828e9a240042acc754f38dcf2e581e045aa - pristine_git_object: 4448d2a0fd803f43820378359c921d09eba6f43e + last_write_checksum: sha1:b1d1971d43e8f92bd55bb45653a228fd9de97af3 + pristine_git_object: 4f985cc69c492521664044337e5910f8e5a26b90 src/mistralai/azure/client/basesdk.py: id: 5a585a95ec21 - last_write_checksum: sha1:d7a4a959d7d3ca3cd22d8daf144c3b4d5c0d1210 - pristine_git_object: b0391ac078b4e2a5d9107ed014c1ca939a553c23 + last_write_checksum: sha1:0c2e686aa42d6aeeb103193aa058d6ddff7bcf74 + pristine_git_object: 0d4d9a440e6c7726b6bc7fc6525aa3dc009847eb src/mistralai/azure/client/chat.py: id: c18454e628d7 - last_write_checksum: sha1:cc1ff54b85ce494428ebf22ec01bd1199cd9e2b6 - pristine_git_object: 3348bf47eafb3fcfb2de0e7d512073e947b69554 + last_write_checksum: sha1:884e22b0e313662c67cec7101765d8d7ef0bc48a + pristine_git_object: 1051f9527851894988f7e1689923575cf72a0896 + src/mistralai/azure/client/errors/__init__.py: + id: f377703514d9 + last_write_checksum: sha1:36c516c11f8083c3380a72c1d0f0718a3345f24b + pristine_git_object: 79e2712c2e62121fb6dbaab15ca8487f0e16b07c + src/mistralai/azure/client/errors/httpvalidationerror.py: + id: c3ec0ad923e9 + last_write_checksum: sha1:f45b41c1ad980c5d481158209bf23fa795cc68bc + pristine_git_object: b4f2691e630a095ff09fbbce5e2ea3063592084f + src/mistralai/azure/client/errors/mistralazureerror.py: + id: fae868afae89 + last_write_checksum: sha1:25f4411c7411faad753d46118edf74828b1c9f7c + pristine_git_object: c5bf17528c7cf25bac8f8874f58692c601fcdd76 + src/mistralai/azure/client/errors/no_response_error.py: + id: b838df044e62 + last_write_checksum: sha1:7f326424a7d5ae1bcd5c89a0d6b3dbda9138942f + pristine_git_object: 1deab64bc43e1e65bf3c412d326a4032ce342366 + src/mistralai/azure/client/errors/responsevalidationerror.py: + id: 77ac5e93cdda + last_write_checksum: sha1:c1e045dbdda0199bc1d563819c0b38e877d0efef + pristine_git_object: 02397334d2b3bf2516808b69b2548564f650cbe0 + src/mistralai/azure/client/errors/sdkerror.py: + id: dfdd4b1d8928 + last_write_checksum: sha1:edc2baf6feb199e1b1ff1aad681622b44804299d + pristine_git_object: c4f3616cd2720a9b5d2a2c5b2d22a305629ebbe6 src/mistralai/azure/client/httpclient.py: id: 60c81037fbd0 last_write_checksum: sha1:5e55338d6ee9f01ab648cad4380201a8a3da7dd7 pristine_git_object: 89560b566073785535643e694c112bedbd3db13d src/mistralai/azure/client/models/__init__.py: id: "335011330e21" - last_write_checksum: sha1:9afe0f0fb324a2b3c60ec98ce78b1ff6f908db39 - pristine_git_object: 51db6a383ddbab2d946b00c41934359a7eb50448 + last_write_checksum: sha1:07054ca95df60a3f03d8ea37a361aa506f94b78b + pristine_git_object: 908dda32cebe894b37dccaaa9b84db174ac93c21 src/mistralai/azure/client/models/assistantmessage.py: id: 353ed9110f97 - last_write_checksum: sha1:e444c76e27b9b745b9238894bdf2b6a40bba6e6e - pristine_git_object: f5793f9455485c576293b44fb548be8bae9c7a65 + last_write_checksum: sha1:973979ac03f86f26ee9a540aaaa8f70a7011daca + pristine_git_object: e9ae6e82c3c758561c8c9663f27b2fd7e38d2911 src/mistralai/azure/client/models/chatcompletionchoice.py: id: 6942c7db5891 last_write_checksum: sha1:817bfda6120a98248322c308629e404081e01279 pristine_git_object: 67b5ba694217f4f3b95589d7f84af6a9bea9802d src/mistralai/azure/client/models/chatcompletionrequest.py: id: 0c711c870184 - last_write_checksum: sha1:fae2a92375aa3e58c258e4497acead859cd3b6dc - pristine_git_object: 921790959880ddf9b9ffce15d881e01f8adefa86 + last_write_checksum: sha1:ffdd11a4945dd805c9a73328749c2f4d9b6f80e6 + pristine_git_object: edd0fdc74a1b81f458d6083e79dc393e488da36a src/mistralai/azure/client/models/chatcompletionresponse.py: id: bdfacf065e9e last_write_checksum: sha1:c72fb624e7475a551d37e0b291b64bcf772c402a pristine_git_object: d41f9c6fab670cf7c961f50b1302f9a88cf48162 src/mistralai/azure/client/models/chatcompletionstreamrequest.py: id: da00a7feb4ef - last_write_checksum: sha1:c8c84c818b3b22bfec1e7f5737bbb281088dd3ba - pristine_git_object: be21eed2ecbe8354eb9a4bfa48122b28dada4aaf + last_write_checksum: sha1:8bb36693fed73a50d59687ca8b30a2c443708610 + pristine_git_object: 2edfbed98462eab43f322b9c706721365e410bb9 src/mistralai/azure/client/models/completionchunk.py: id: 28d620f25510 - last_write_checksum: sha1:413545e0521539346bff6e77fdec0c9e383bde17 - pristine_git_object: b94284b2d9c29c25a2f8eaa02828e2a205f4407e + last_write_checksum: sha1:84d1c55ef7bdb438e7f536a604a070799d054281 + pristine_git_object: 0e64bbc8aa0293c9d763db56287f296909260c38 src/mistralai/azure/client/models/completionevent.py: id: a6f00a747933 last_write_checksum: sha1:3d04bfbdaf11c52af5613ed0fd70c8dbc59f6d49 pristine_git_object: c4b272871d9b3ea8443f469d29b0825706c25c00 src/mistralai/azure/client/models/completionresponsestreamchoice.py: id: 3ba5d7ba8a13 - last_write_checksum: sha1:f917300daf4febec7661f2c73bae675600ee0bdd - pristine_git_object: 2a4d053feb84cf2a9675d76ae08c83945b26644c + last_write_checksum: sha1:4de311509c71c8f582b2c767febea89f1acd341a + pristine_git_object: 20a271401ff98d69525947ab929078af83aab1f1 src/mistralai/azure/client/models/contentchunk.py: id: 1f65e4f8f731 - last_write_checksum: sha1:79efbc90c1ae36b74492666125fb3e5ecaa5c27a - pristine_git_object: 0f09f76703efd95fcd96377b8ec6870d58dbf829 + last_write_checksum: sha1:cf11e1f061d3c8af040ebbdba0b25d4177e1cea4 + pristine_git_object: 17efcc7d5825461576cf61257908688cffd23eb7 src/mistralai/azure/client/models/deltamessage.py: id: b7dab1d158de - last_write_checksum: sha1:553fdff5a3aec6909417be3cb390d99421af1693 - pristine_git_object: 2c01feae56c44d256f1e579c15f08e167dcc6481 + last_write_checksum: sha1:190c2809d575244eda5efbb1e00a4ec5811aea29 + pristine_git_object: 567e772fc1b376efaec1a2dfd660bc74a916f8ee src/mistralai/azure/client/models/documenturlchunk.py: id: e56fec6e977f - last_write_checksum: sha1:a43cee08f935933bf715b2f1a82b4c746b591f35 - pristine_git_object: 345bafc2bfe3cc056d746cf8151cf53b68771414 + last_write_checksum: sha1:0313d94f343d46dac7cc3adc392feaf06fa2b2a4 + pristine_git_object: 2dea80056f6752bdaa5d00f391cb6f54371a9d2b src/mistralai/azure/client/models/filechunk.py: id: 150d9f180110 - last_write_checksum: sha1:df1e010006338f6dd37009f2547ab8f0b90b917a - pristine_git_object: 829f03d84c25dd859d514ffa26e570f235e4e75b + last_write_checksum: sha1:6d12d630a5bfd601836f9cb3d63b9eb2f15f880d + pristine_git_object: 6baa0cba81535e157c0f81ae2648362f7bd1adbd src/mistralai/azure/client/models/function.py: id: 6d1e2011a14b - last_write_checksum: sha1:62df160db82853d79907cccff4d0904f6bb9f142 - pristine_git_object: f4edce0fb8563f485d9a63a42439a9b2593a7f40 + last_write_checksum: sha1:b064eca9256966603581d41b5b2c08cd2448224d + pristine_git_object: 055d3657fd98da63b80deb8cd2054e95a0e66a2b src/mistralai/azure/client/models/functioncall.py: id: ced560a1bd57 last_write_checksum: sha1:490cb3a0305994de063e06fa4c77defa911271f3 @@ -432,150 +456,130 @@ trackedFiles: id: 6f09474ebc85 last_write_checksum: sha1:651ceed24416ce8192f70db03cc5cd0db685899f pristine_git_object: 839e0d557a902da6c819210962e38e1df9bda90f - src/mistralai/azure/client/models/httpvalidationerror.py: - id: ca155413681b - last_write_checksum: sha1:9dea33d9c74bbdf842ee9d157e4aaa05c36ae34a - pristine_git_object: 40bccddc4d0c0e761d70af713387561101e20b60 src/mistralai/azure/client/models/imagedetail.py: id: de211988043d last_write_checksum: sha1:812f2ec4fc0d8d13db643ed49192384d5a841aa4 pristine_git_object: 2d074cee614e1c49b69ee4073c3aaaa7a5a2c9e2 src/mistralai/azure/client/models/imageurl.py: id: c8882341c798 - last_write_checksum: sha1:443ee3739b3801928b4f3d4256531078fc4045e8 - pristine_git_object: b3c705e3f261ebd59f40e46785577694d80f98bf + last_write_checksum: sha1:8c3c08cc5d33c66b12539270b7edbf157d936f86 + pristine_git_object: bcb4fe43d334752be501d694543250d7e632a9c7 src/mistralai/azure/client/models/imageurlchunk.py: id: b6f0abb574d7 - last_write_checksum: sha1:4651f12f779bc86874c8516f06e39b882e414c92 - pristine_git_object: ee6de50f2add830c19d0b8b030a7c7a2ab65cb11 + last_write_checksum: sha1:417618d9d2aba85386a100dfe818d13342830526 + pristine_git_object: 7213c49846a4107271d017dd695648d98c2efa94 src/mistralai/azure/client/models/jsonschema.py: id: bfd486f4bb18 - last_write_checksum: sha1:ffe7190393086a4301aaffa6854cb3d80b0db92f - pristine_git_object: 5aaa490af350ac1c436dafb3d3c73d56402cac11 - src/mistralai/azure/client/models/mistralazureerror.py: - id: 31ed29254e67 - last_write_checksum: sha1:25f4411c7411faad753d46118edf74828b1c9f7c - pristine_git_object: c5bf17528c7cf25bac8f8874f58692c601fcdd76 + last_write_checksum: sha1:ccb2b53bd2351ec5119d9a7914a1a42c2746a096 + pristine_git_object: 99f2fb8903562465687edfd300d8efd373b92247 src/mistralai/azure/client/models/mistralpromptmode.py: id: d0028b1e4129 last_write_checksum: sha1:46fe1ab8ac2d5867877368a59a4aa5be2fabadeb pristine_git_object: 26e7adbdc4a981c92d51b72542c966b0ba0fb8f8 - src/mistralai/azure/client/models/no_response_error.py: - id: a956d6cd06f0 - last_write_checksum: sha1:7f326424a7d5ae1bcd5c89a0d6b3dbda9138942f - pristine_git_object: 1deab64bc43e1e65bf3c412d326a4032ce342366 src/mistralai/azure/client/models/ocrimageobject.py: id: 9c9f987d94bb - last_write_checksum: sha1:b86f5187d1c425ddf27ed4815657a7c41d71855c - pristine_git_object: 38e9d3e48df5cee8cdd0cd1d7b6df62182814104 + last_write_checksum: sha1:423effee97a4120a26ba78c2abe7f6adeb5c733d + pristine_git_object: a23515b346a0f9517fec0b2381e1b0c04cb31816 src/mistralai/azure/client/models/ocrpagedimensions.py: id: 7669a25f32b3 last_write_checksum: sha1:60642db6bb61f0e96204fb78d3aa0bd80dd0a7e5 pristine_git_object: 12858da92de99aa6da9d6e148df3ba7ee37496c7 src/mistralai/azure/client/models/ocrpageobject.py: id: eea193b05126 - last_write_checksum: sha1:baada584537b75e2e184738424068e61afe263c7 - pristine_git_object: 5fb821c19fd3cca2c2e149bd058a7ca49d2d002b + last_write_checksum: sha1:b8370ac0611dc3eccf09dddf85d1c39d3a11224b + pristine_git_object: 434c8988f124f93180e6cefa15b3aee067937946 src/mistralai/azure/client/models/ocrrequest.py: id: 365a5b4776a2 - last_write_checksum: sha1:9d3a9bccd341219934470688d3818557231b9b62 - pristine_git_object: fece2713166fc943194b7b38ec9b82db295bba0a + last_write_checksum: sha1:e684da1b6db18cb9c5ce95b9cc58556e05a9ea9b + pristine_git_object: a2cd341593c9db3644076d39352abca6815efc56 src/mistralai/azure/client/models/ocrresponse.py: id: b8cde8c16a4c - last_write_checksum: sha1:e6f08c68f0388919ca7bcbc4f0cb134525053fcd - pristine_git_object: 787289fa995ba6cbf4b2ef3d3c41edb31f656674 + last_write_checksum: sha1:55e81631f6fe57aaf58178460e1c5fc69fa19377 + pristine_git_object: 3dc09fd770a064e69e84519bd0f0c9127ebd8176 src/mistralai/azure/client/models/ocrtableobject.py: id: c2cd51b8789e - last_write_checksum: sha1:11052d42f0d91916f038437923ea656bf882032c - pristine_git_object: 3e3c25830a3216f4ef325f5b1056a0c1a267b090 + last_write_checksum: sha1:86a8fd2241cf6a636e81e58484a90bdb7880085e + pristine_git_object: f1de5428a71f9d42cd9f9e764d0bbf88f3aad8cc src/mistralai/azure/client/models/ocrusageinfo.py: id: 5e9118cac468 - last_write_checksum: sha1:6b27c09b5ec447c6ede22aa75190a1e06353349c - pristine_git_object: e2ceba35eb3f6e148389a7fd466dea5c051480a4 + last_write_checksum: sha1:97887b58cfe6ebd9ebd5905c6c7485525d6dc788 + pristine_git_object: f63315d23a1659aee4333b45c4239861aa5220d7 src/mistralai/azure/client/models/prediction.py: id: bd6abfa93083 - last_write_checksum: sha1:87eb3c43fa31b245c13c4708602b300956aa9efb - pristine_git_object: 6b8d6480b9ba1cb6683bdc93c24fb762ccfba146 + last_write_checksum: sha1:07d06d5629af183f999e043744a67868ef779bcc + pristine_git_object: 1fa1d78248628ccdc102ce0631d344150addfd2d src/mistralai/azure/client/models/referencechunk.py: id: c9612f854670 - last_write_checksum: sha1:b96507bcc82939fa4057532ef7e6a440baabd973 - pristine_git_object: e0bcb06be4d4c8d947ee267a9728aeae3a2c52fe + last_write_checksum: sha1:e81e758e00db915e68f58ffa1e03b2c473f64477 + pristine_git_object: f7af9bf9a73e0d782e5e6c6a7866af6fbc3668d8 src/mistralai/azure/client/models/responseformat.py: id: c124e7c316aa - last_write_checksum: sha1:f8c9e581053d1d885196c210a219a3e7aa086610 - pristine_git_object: 39fb03a25efdbc0a92ea91c72038ddd86ee056be + last_write_checksum: sha1:d368a2d4320356b6daab1dd0c62c6c862e902ca0 + pristine_git_object: 20fd2b868506cff278d1d7dc719eddd56ea538b0 src/mistralai/azure/client/models/responseformats.py: id: fef416cefcd4 last_write_checksum: sha1:a212e85d286b5b49219f57d071a2232ff8b5263b pristine_git_object: cbf83ce7b54ff8634f741334831807bfb5c98991 - src/mistralai/azure/client/models/responsevalidationerror.py: - id: afdb9463b434 - last_write_checksum: sha1:26f01befeb347a63928012e7eb36c95a8a392145 - pristine_git_object: cbdffcbba45a988805cdd52d111e77b0ca777dbf - src/mistralai/azure/client/models/sdkerror.py: - id: 4601c7297af7 - last_write_checksum: sha1:b54041f9751e1f2a38dd02a6f8eadb3907fa3df0 - pristine_git_object: a1e9aacaa2fcc839dcb2638788dd7c94298adee7 src/mistralai/azure/client/models/security.py: id: 4a2e4760ec08 last_write_checksum: sha1:0cd2ae54cecd88cfd8d43e92c0d3da7efa48942c pristine_git_object: 9b83ba98336090bed89fbeda40b4a07b212a1106 src/mistralai/azure/client/models/systemmessage.py: id: 8fa0dee9e4e1 - last_write_checksum: sha1:2b52c44b92a098b559ec8b7a80449532169cd317 - pristine_git_object: 38c280c809148e190e329619858718d132da6bc0 + last_write_checksum: sha1:26167db704ece6ef1391d6f474e00f417bff4639 + pristine_git_object: d4bd004476ef653798295fa5df9de68b607f0132 src/mistralai/azure/client/models/systemmessagecontentchunks.py: id: 5918e770869d - last_write_checksum: sha1:55529f2f29ba3087fbf117dbbe64e1dda92b2958 - pristine_git_object: 225f38b712f5f3c7abfd526cc8c0386687814f36 + last_write_checksum: sha1:d1f96498cbb540b91425e70ffa33892ff4d1c8cd + pristine_git_object: 8de71c909eda2ed0166a6be8f8ee029956e5766b src/mistralai/azure/client/models/textchunk.py: id: 9c81c76a6325 - last_write_checksum: sha1:d1c9eaffeb80299f023351dc8d07eb53e49133f2 - pristine_git_object: e513c1434cc7a4766bb9ef039ad8eed2bf0c12ca + last_write_checksum: sha1:28b8f4e030d365e5bf2f2f2720a7919b29616564 + pristine_git_object: 9295148588a143278ff5f48f9142347e35cfdab2 src/mistralai/azure/client/models/thinkchunk.py: id: df6bbd55b3eb - last_write_checksum: sha1:ec9af4cb7faa6ba8ed033b37db1d1d5a1406ac3f - pristine_git_object: e769399fe6ba90ddb2503f8fadb4b6cebc7d6f85 + last_write_checksum: sha1:752a81be169fdd7a6afc293cf090b2cd4d2b22c9 + pristine_git_object: 4e881aad3b11d43aecaab922fe55bf7b4076c42f src/mistralai/azure/client/models/tool.py: id: 4075ef72c086 - last_write_checksum: sha1:0c041eaa008ee1851e05bf90e57602c0338f362f - pristine_git_object: 169305bc4c538e88b1e0cf1120aa10e424118880 + last_write_checksum: sha1:4bef6d64b6426fdeff5031557c3c0e37f5c33b9a + pristine_git_object: 87329bdb73526120a3f63d48299114485a7fe038 src/mistralai/azure/client/models/toolcall.py: id: c65e6f79e539 - last_write_checksum: sha1:dd2290e019322e9df73b119e054a1d738eb5f3ba - pristine_git_object: a589b1b38ef4caaba2753f8335228bc16cd68961 + last_write_checksum: sha1:a3b36214b4533b79868630348762206a0e5ca26e + pristine_git_object: ada1ea65136fa58dce55f2857d895ea916bcd41f src/mistralai/azure/client/models/toolchoice.py: id: c25062b5de34 - last_write_checksum: sha1:db82f8d3f811461226cffbeacf2699103a5e0689 - pristine_git_object: 1f623222084f12eaa63f2cea656dc7da10b12a3a + last_write_checksum: sha1:6212c9366eb3b4f4062c86c00d4502dd03bf5ce1 + pristine_git_object: ddb9e1417c880c44a7f0505bfde839570fa3cd4a src/mistralai/azure/client/models/toolchoiceenum.py: id: cc06ba3a8d21 last_write_checksum: sha1:3dbba9a58c5569aafe115f3f7713a52b01ad8620 pristine_git_object: 01f6f677b379f9e3c99db9d1ad248cb0033a2804 src/mistralai/azure/client/models/toolmessage.py: id: 84ac736fa955 - last_write_checksum: sha1:11841bba4b66179321a35ea1a4d4d3571fa997b7 - pristine_git_object: a73fd6bf8355043f1b40caf7e8b9ded90c1fda0f + last_write_checksum: sha1:e4ed14906985fe74fd76a9adb09125ebc1218a1f + pristine_git_object: 670210de0d05b52ee9dffbbb808a87e67c2d37a9 src/mistralai/azure/client/models/tooltypes.py: id: fa881b046d34 last_write_checksum: sha1:cd28ddc02fff9a5abbb59c82fe9e0dcbdb9b6d2a pristine_git_object: 1cce7446f2772b998208ea1c78c7969e3881d5d0 src/mistralai/azure/client/models/usageinfo.py: id: 3edc9c81b329 - last_write_checksum: sha1:0b2117285b13d846a25c6c59436c4628b9d84a03 - pristine_git_object: 19a6b09fb63a3732719c45f8dfca92cfc2c57353 + last_write_checksum: sha1:0ac2350e4efa1ed3ffd7d33ac91c3ef564d1d773 + pristine_git_object: 0f04c87c97ff3148106408a46618c848b86c4b37 src/mistralai/azure/client/models/usermessage.py: id: 3796508adc07 - last_write_checksum: sha1:f4baa9d8b8f99f715873cea83191baf055c3296a - pristine_git_object: 96439c64a979ac3edf8900d39154d706846a3a95 + last_write_checksum: sha1:8eb35fb07971d74cf2cb0858c037558f52df6aa9 + pristine_git_object: 549b01ca887651a95c5efc8aff3372d32dfdc277 src/mistralai/azure/client/models/validationerror.py: id: f2b84813e2ea last_write_checksum: sha1:f0f9706a5af2ac4f6b234e768fdd492bbdd8a18c pristine_git_object: 817ecf7a56470369ccacd0f5e0bb739656a5f92c src/mistralai/azure/client/ocr.py: id: 5817c10c9297 - last_write_checksum: sha1:24fec22877024154ea417e31ea443b4795c443ba - pristine_git_object: 098e764b6580e35ad0e81242ca601ce821656ee9 + last_write_checksum: sha1:7666ca9f4596cee080952b2f4096bd4176051680 + pristine_git_object: b9270f6a52406d8a9bf02d90c24ae540da6dfb9d src/mistralai/azure/client/py.typed: id: e88369f116d2 last_write_checksum: sha1:8efc425ffe830805ffcc0f3055871bdcdc542c60 @@ -594,8 +598,8 @@ trackedFiles: pristine_git_object: a9a640a1a7048736383f96c67c6290c86bf536ee src/mistralai/azure/client/utils/__init__.py: id: 26f1a707325b - last_write_checksum: sha1:887f56a717845fab7445cc368d2a17d850c3565a - pristine_git_object: 05f26ade57efb8c54a774fbcb939fb1a7dc655ce + last_write_checksum: sha1:3ad22a588864c93bd3a16605f669955b5f3b8053 + pristine_git_object: b488c2df1390b22be3050eee72832a91c76d5385 src/mistralai/azure/client/utils/annotations.py: id: bb1f6c189fdb last_write_checksum: sha1:a4824ad65f730303e4e1e3ec1febf87b4eb46dbc @@ -604,18 +608,22 @@ trackedFiles: id: 2b7db09ee0ab last_write_checksum: sha1:c721e4123000e7dc61ec52b28a739439d9e17341 pristine_git_object: a6c52cd61bbe2d459046c940ce5e8c469f2f0664 + src/mistralai/azure/client/utils/dynamic_imports.py: + id: 0ac779c122d9 + last_write_checksum: sha1:a1940c63feb8eddfd8026de53384baf5056d5dcc + pristine_git_object: 673edf82a97d0fea7295625d3e092ea369a36b79 src/mistralai/azure/client/utils/enums.py: id: ffbdb1917a68 last_write_checksum: sha1:bc8c3c1285ae09ba8a094ee5c3d9c7f41fa1284d pristine_git_object: 3324e1bc2668c54c4d5f5a1a845675319757a828 src/mistralai/azure/client/utils/eventstreaming.py: id: bdc37b70360c - last_write_checksum: sha1:bababae5d54b7efc360db701daa49e18a92c2f3b - pristine_git_object: 0969899bfc491e5e408d05643525f347ea95e4fc + last_write_checksum: sha1:ffa870a25a7e4e2015bfd7a467ccd3aa1de97f0e + pristine_git_object: f2052fc22d9fd6c663ba3dce019fe234ca37108b src/mistralai/azure/client/utils/forms.py: id: 51696122c557 - last_write_checksum: sha1:15fa7e9ab1611e062a9984cf06cb20969713d295 - pristine_git_object: f961e76beaf0a8b1fe0dda44754a74eebd3608e7 + last_write_checksum: sha1:0ca31459b99f761fcc6d0557a0a38daac4ad50f4 + pristine_git_object: 1e550bd5c2c35d977ddc10f49d77c23cb12c158d src/mistralai/azure/client/utils/headers.py: id: e42840c8cb13 last_write_checksum: sha1:7c6df233ee006332b566a8afa9ce9a245941d935 @@ -638,20 +646,24 @@ trackedFiles: pristine_git_object: 1de32b6d26f46590232f398fdba6ce0072f1659c src/mistralai/azure/client/utils/retries.py: id: d50ed6e400b2 - last_write_checksum: sha1:5b97ac4f59357d70c2529975d50364c88bcad607 - pristine_git_object: 88a91b10cd2076b4a2c6cff2ac6bfaa5e3c5ad13 + last_write_checksum: sha1:471372f5c5d1dd5583239c9cf3c75f1b636e5d87 + pristine_git_object: af07d4e941007af4213c5ec9047ef8a2fca04e5e src/mistralai/azure/client/utils/security.py: id: 1d35741ce5f1 - last_write_checksum: sha1:a17130ace2c0db6394f38dd941ad2b700cc755c8 - pristine_git_object: 295a3f40031dbb40073ad227fd4a355660f97ab2 + last_write_checksum: sha1:435dd8b180cefcd733e635b9fa45512da091d9c0 + pristine_git_object: 17996bd54b8624009802fbbdf30bcb4225b8dfed src/mistralai/azure/client/utils/serializers.py: id: a1f26d73c3ad last_write_checksum: sha1:ce1d8d7f500a9ccba0aeca5057cee9c271f4dfd7 pristine_git_object: 14321eb479de81d0d9580ec8291e0ff91bf29e57 + src/mistralai/azure/client/utils/unions.py: + id: 9abcc9913e3f + last_write_checksum: sha1:6e38049f323e0b5fb4bd0e88ab51ec447197ccb0 + pristine_git_object: a227f4e87be22fce682fcae5813b71835199ec5e src/mistralai/azure/client/utils/unmarshal_json_response.py: id: 947f4fc4db62 - last_write_checksum: sha1:99bd357d24d2236e3974630d9bd18bae22610cbc - pristine_git_object: 5317ac87097ccb35628202cf7fc5cb21e186855f + last_write_checksum: sha1:75931131ff498a66a48cfb32dd9d5d61f2c9b4d1 + pristine_git_object: fe0c9b8ecabf8f89e363a050837582df40d67fb4 src/mistralai/azure/client/utils/url.py: id: 4976c88d0e3b last_write_checksum: sha1:6479961baa90432ca25626f8e40a7bbc32e73b41 diff --git a/packages/azure/.speakeasy/gen.yaml b/packages/azure/.speakeasy/gen.yaml index 729cdfcf..0b7262e0 100644 --- a/packages/azure/.speakeasy/gen.yaml +++ b/packages/azure/.speakeasy/gen.yaml @@ -13,8 +13,9 @@ generation: requestResponseComponentNamesFeb2024: true securityFeb2025: true sharedErrorComponentsApr2025: true - methodSignaturesApr2024: true sharedNestedComponentsJan2026: true + nameOverrideFeb2026: true + methodSignaturesApr2024: true auth: oAuth2ClientCredentialsEnabled: true oAuth2PasswordEnabled: false @@ -22,31 +23,37 @@ generation: schemas: allOfMergeStrategy: shallowMerge requestBodyFieldName: "" + versioningStrategy: automatic persistentEdits: {} tests: generateTests: true generateNewTests: false skipResponseBodyAssertions: false python: - version: 2.0.0a4 + version: 2.0.0-a4.1 additionalDependencies: dev: pytest: ^8.2.2 pytest-asyncio: ^0.23.7 + main: {} allowedRedefinedBuiltins: - id - object + - input + - dir asyncMode: both authors: - Mistral baseErrorName: MistralAzureError clientServerStatusCodesAsErrors: true - constFieldCasing: upper + constFieldCasing: normal defaultErrorName: SDKError description: Python Client SDK for the Mistral AI API in Azure. enableCustomCodeRegions: false enumFormat: union fixFlags: + asyncPaginationSep2025: true + conflictResistantModelImportsFeb2026: true responseRequiredSep2024: true flatAdditionalProperties: true flattenGlobalSecurity: true @@ -58,17 +65,17 @@ python: option: openapi paths: callbacks: "" - errors: "" + errors: errors operations: "" shared: "" webhooks: "" inferUnionDiscriminators: true inputModelSuffix: input license: "" - maxMethodParams: 15 + maxMethodParams: 999 methodArguments: infer-optional-args moduleName: mistralai.azure.client - multipartArrayFormat: legacy + multipartArrayFormat: standard outputModelSuffix: output packageManager: uv packageName: mistralai-azure @@ -78,3 +85,4 @@ python: responseFormat: flat sseFlatResponse: false templateVersion: v2 + useAsyncHooks: false diff --git a/packages/azure/docs/models/httpvalidationerror.md b/packages/azure/docs/errors/httpvalidationerror.md similarity index 100% rename from packages/azure/docs/models/httpvalidationerror.md rename to packages/azure/docs/errors/httpvalidationerror.md diff --git a/packages/azure/docs/models/chatcompletionrequest.md b/packages/azure/docs/models/chatcompletionrequest.md index 3b0f7270..f8715cd0 100644 --- a/packages/azure/docs/models/chatcompletionrequest.md +++ b/packages/azure/docs/models/chatcompletionrequest.md @@ -14,7 +14,7 @@ | `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | | `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | | | `messages` | List[[models.ChatCompletionRequestMessage](../models/chatcompletionrequestmessage.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | -| `response_format` | [Optional[models.ResponseFormat]](../models/responseformat.md) | :heavy_minus_sign: | Specify the format that the model must output. By default it will use `{ "type": "text" }`. Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ "type": "json_schema" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide. | {
"type": "text"
} | +| `response_format` | [Optional[models.ResponseFormat]](../models/responseformat.md) | :heavy_minus_sign: | Specify the format that the model must output. By default it will use `{ "type": "text" }`. Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ "type": "json_schema" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide. | **Example 1:** {
"type": "text"
}
**Example 2:** {
"type": "json_object"
}
**Example 3:** {
"type": "json_schema",
"json_schema": {
"schema": {
"properties": {
"name": {
"title": "Name",
"type": "string"
},
"authors": {
"items": {
"type": "string"
},
"title": "Authors",
"type": "array"
}
},
"required": [
"name",
"authors"
],
"title": "Book",
"type": "object",
"additionalProperties": false
},
"name": "book",
"strict": true
}
} | | `tools` | List[[models.Tool](../models/tool.md)] | :heavy_minus_sign: | A list of tools the model may call. Use this to provide a list of functions the model may generate JSON inputs for. | | | `tool_choice` | [Optional[models.ChatCompletionRequestToolChoice]](../models/chatcompletionrequesttoolchoice.md) | :heavy_minus_sign: | Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{"type": "function", "function": {"name": "my_function"}}` forces the model to call that tool. | | | `presence_penalty` | *Optional[float]* | :heavy_minus_sign: | The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. | | diff --git a/packages/azure/docs/models/chatcompletionstreamrequest.md b/packages/azure/docs/models/chatcompletionstreamrequest.md index f78156a6..cc82a8c7 100644 --- a/packages/azure/docs/models/chatcompletionstreamrequest.md +++ b/packages/azure/docs/models/chatcompletionstreamrequest.md @@ -14,7 +14,7 @@ | `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | | `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | | | `messages` | List[[models.ChatCompletionStreamRequestMessage](../models/chatcompletionstreamrequestmessage.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | -| `response_format` | [Optional[models.ResponseFormat]](../models/responseformat.md) | :heavy_minus_sign: | Specify the format that the model must output. By default it will use `{ "type": "text" }`. Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ "type": "json_schema" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide. | {
"type": "text"
} | +| `response_format` | [Optional[models.ResponseFormat]](../models/responseformat.md) | :heavy_minus_sign: | Specify the format that the model must output. By default it will use `{ "type": "text" }`. Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ "type": "json_schema" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide. | **Example 1:** {
"type": "text"
}
**Example 2:** {
"type": "json_object"
}
**Example 3:** {
"type": "json_schema",
"json_schema": {
"schema": {
"properties": {
"name": {
"title": "Name",
"type": "string"
},
"authors": {
"items": {
"type": "string"
},
"title": "Authors",
"type": "array"
}
},
"required": [
"name",
"authors"
],
"title": "Book",
"type": "object",
"additionalProperties": false
},
"name": "book",
"strict": true
}
} | | `tools` | List[[models.Tool](../models/tool.md)] | :heavy_minus_sign: | A list of tools the model may call. Use this to provide a list of functions the model may generate JSON inputs for. | | | `tool_choice` | [Optional[models.ChatCompletionStreamRequestToolChoice]](../models/chatcompletionstreamrequesttoolchoice.md) | :heavy_minus_sign: | Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{"type": "function", "function": {"name": "my_function"}}` forces the model to call that tool. | | | `presence_penalty` | *Optional[float]* | :heavy_minus_sign: | The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. | | diff --git a/packages/azure/docs/models/ocrrequest.md b/packages/azure/docs/models/ocrrequest.md index 87929e53..2d26c19f 100644 --- a/packages/azure/docs/models/ocrrequest.md +++ b/packages/azure/docs/models/ocrrequest.md @@ -3,18 +3,18 @@ ## Fields -| Field | Type | Required | Description | Example | -| ---------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `model` | *Nullable[str]* | :heavy_check_mark: | N/A | | -| `id` | *Optional[str]* | :heavy_minus_sign: | N/A | | -| `document` | [models.Document](../models/document.md) | :heavy_check_mark: | Document to run OCR on | | -| `pages` | List[*int*] | :heavy_minus_sign: | Specific pages user wants to process in various formats: single number, range, or list of both. Starts from 0 | | -| `include_image_base64` | *OptionalNullable[bool]* | :heavy_minus_sign: | Include image URLs in response | | -| `image_limit` | *OptionalNullable[int]* | :heavy_minus_sign: | Max images to extract | | -| `image_min_size` | *OptionalNullable[int]* | :heavy_minus_sign: | Minimum height and width of image to extract | | -| `bbox_annotation_format` | [OptionalNullable[models.ResponseFormat]](../models/responseformat.md) | :heavy_minus_sign: | Structured output class for extracting useful information from each extracted bounding box / image from document. Only json_schema is valid for this field | {
"type": "text"
} | -| `document_annotation_format` | [OptionalNullable[models.ResponseFormat]](../models/responseformat.md) | :heavy_minus_sign: | Structured output class for extracting useful information from the entire document. Only json_schema is valid for this field | {
"type": "text"
} | -| `document_annotation_prompt` | *OptionalNullable[str]* | :heavy_minus_sign: | Optional prompt to guide the model in extracting structured output from the entire document. A document_annotation_format must be provided. | | -| `table_format` | [OptionalNullable[models.TableFormat]](../models/tableformat.md) | :heavy_minus_sign: | N/A | | -| `extract_header` | *Optional[bool]* | :heavy_minus_sign: | N/A | | -| `extract_footer` | *Optional[bool]* | :heavy_minus_sign: | N/A | | \ No newline at end of file +| Field | Type | Required | Description | Example | +| ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `model` | *Nullable[str]* | :heavy_check_mark: | N/A | | +| `id` | *Optional[str]* | :heavy_minus_sign: | N/A | | +| `document` | [models.Document](../models/document.md) | :heavy_check_mark: | Document to run OCR on | | +| `pages` | List[*int*] | :heavy_minus_sign: | Specific pages user wants to process in various formats: single number, range, or list of both. Starts from 0 | | +| `include_image_base64` | *OptionalNullable[bool]* | :heavy_minus_sign: | Include image URLs in response | | +| `image_limit` | *OptionalNullable[int]* | :heavy_minus_sign: | Max images to extract | | +| `image_min_size` | *OptionalNullable[int]* | :heavy_minus_sign: | Minimum height and width of image to extract | | +| `bbox_annotation_format` | [OptionalNullable[models.ResponseFormat]](../models/responseformat.md) | :heavy_minus_sign: | Structured output class for extracting useful information from each extracted bounding box / image from document. Only json_schema is valid for this field | **Example 1:** {
"type": "text"
}
**Example 2:** {
"type": "json_object"
}
**Example 3:** {
"type": "json_schema",
"json_schema": {
"schema": {
"properties": {
"name": {
"title": "Name",
"type": "string"
},
"authors": {
"items": {
"type": "string"
},
"title": "Authors",
"type": "array"
}
},
"required": [
"name",
"authors"
],
"title": "Book",
"type": "object",
"additionalProperties": false
},
"name": "book",
"strict": true
}
} | +| `document_annotation_format` | [OptionalNullable[models.ResponseFormat]](../models/responseformat.md) | :heavy_minus_sign: | Structured output class for extracting useful information from the entire document. Only json_schema is valid for this field | **Example 1:** {
"type": "text"
}
**Example 2:** {
"type": "json_object"
}
**Example 3:** {
"type": "json_schema",
"json_schema": {
"schema": {
"properties": {
"name": {
"title": "Name",
"type": "string"
},
"authors": {
"items": {
"type": "string"
},
"title": "Authors",
"type": "array"
}
},
"required": [
"name",
"authors"
],
"title": "Book",
"type": "object",
"additionalProperties": false
},
"name": "book",
"strict": true
}
} | +| `document_annotation_prompt` | *OptionalNullable[str]* | :heavy_minus_sign: | Optional prompt to guide the model in extracting structured output from the entire document. A document_annotation_format must be provided. | | +| `table_format` | [OptionalNullable[models.TableFormat]](../models/tableformat.md) | :heavy_minus_sign: | N/A | | +| `extract_header` | *Optional[bool]* | :heavy_minus_sign: | N/A | | +| `extract_footer` | *Optional[bool]* | :heavy_minus_sign: | N/A | | \ No newline at end of file diff --git a/packages/azure/pylintrc b/packages/azure/pylintrc index a8fcb932..0391ac11 100644 --- a/packages/azure/pylintrc +++ b/packages/azure/pylintrc @@ -89,7 +89,7 @@ persistent=yes # Minimum Python version to use for version dependent checks. Will default to # the version used to run pylint. -py-version=3.9 +py-version=3.10 # Discover python modules and packages in the file system subtree. recursive=no @@ -459,7 +459,8 @@ disable=raw-checker-failed, consider-using-with, wildcard-import, unused-wildcard-import, - too-many-return-statements + too-many-return-statements, + redefined-builtin # Enable the message, report, category or checker with the given id(s). You can # either give multiple identifier separated by comma (,) or put this option @@ -641,7 +642,7 @@ additional-builtins= allow-global-unused-variables=yes # List of names allowed to shadow builtins -allowed-redefined-builtins=id,object +allowed-redefined-builtins=id,object,input,dir # List of strings which can identify a callback function by name. A callback # name must start or end with one of those strings. diff --git a/packages/azure/src/mistralai/azure/client/__init__.py b/packages/azure/src/mistralai/azure/client/__init__.py index dd02e42e..833c68cd 100644 --- a/packages/azure/src/mistralai/azure/client/__init__.py +++ b/packages/azure/src/mistralai/azure/client/__init__.py @@ -9,7 +9,6 @@ ) from .sdk import * from .sdkconfiguration import * -from .models import * VERSION: str = __version__ diff --git a/packages/azure/src/mistralai/azure/client/_version.py b/packages/azure/src/mistralai/azure/client/_version.py index 4448d2a0..4f985cc6 100644 --- a/packages/azure/src/mistralai/azure/client/_version.py +++ b/packages/azure/src/mistralai/azure/client/_version.py @@ -3,10 +3,10 @@ import importlib.metadata __title__: str = "mistralai-azure" -__version__: str = "2.0.0a4" +__version__: str = "2.0.0-a4.1" __openapi_doc_version__: str = "1.0.0" -__gen_version__: str = "2.794.1" -__user_agent__: str = "speakeasy-sdk/python 2.0.0a4 2.794.1 1.0.0 mistralai-azure" +__gen_version__: str = "2.841.0" +__user_agent__: str = "speakeasy-sdk/python 2.0.0-a4.1 2.841.0 1.0.0 mistralai-azure" try: if __package__ is not None: diff --git a/packages/azure/src/mistralai/azure/client/basesdk.py b/packages/azure/src/mistralai/azure/client/basesdk.py index b0391ac0..0d4d9a44 100644 --- a/packages/azure/src/mistralai/azure/client/basesdk.py +++ b/packages/azure/src/mistralai/azure/client/basesdk.py @@ -2,7 +2,7 @@ from .sdkconfiguration import SDKConfiguration import httpx -from mistralai.azure.client import models, utils +from mistralai.azure.client import errors, utils from mistralai.azure.client._hooks import ( AfterErrorContext, AfterSuccessContext, @@ -12,6 +12,7 @@ RetryConfig, SerializedRequestBody, get_body_content, + run_sync_in_thread, ) from typing import Callable, List, Mapping, Optional, Tuple from urllib.parse import parse_qs, urlparse @@ -264,7 +265,7 @@ def do(): if http_res is None: logger.debug("Raising no response SDK error") - raise models.NoResponseError("No response received") + raise errors.NoResponseError("No response received") logger.debug( "Response:\nStatus Code: %s\nURL: %s\nHeaders: %s\nBody: %s", @@ -285,7 +286,7 @@ def do(): http_res = result else: logger.debug("Raising unexpected SDK error") - raise models.SDKError("Unexpected error occurred", http_res) + raise errors.SDKError("Unexpected error occurred", http_res) return http_res @@ -315,7 +316,10 @@ async def do_request_async( async def do(): http_res = None try: - req = hooks.before_request(BeforeRequestContext(hook_ctx), request) + req = await run_sync_in_thread( + hooks.before_request, BeforeRequestContext(hook_ctx), request + ) + logger.debug( "Request:\nMethod: %s\nURL: %s\nHeaders: %s\nBody: %s", req.method, @@ -329,14 +333,17 @@ async def do(): http_res = await client.send(req, stream=stream) except Exception as e: - _, e = hooks.after_error(AfterErrorContext(hook_ctx), None, e) + _, e = await run_sync_in_thread( + hooks.after_error, AfterErrorContext(hook_ctx), None, e + ) + if e is not None: logger.debug("Request Exception", exc_info=True) raise e if http_res is None: logger.debug("Raising no response SDK error") - raise models.NoResponseError("No response received") + raise errors.NoResponseError("No response received") logger.debug( "Response:\nStatus Code: %s\nURL: %s\nHeaders: %s\nBody: %s", @@ -347,9 +354,10 @@ async def do(): ) if utils.match_status_codes(error_status_codes, http_res.status_code): - result, err = hooks.after_error( - AfterErrorContext(hook_ctx), http_res, None + result, err = await run_sync_in_thread( + hooks.after_error, AfterErrorContext(hook_ctx), http_res, None ) + if err is not None: logger.debug("Request Exception", exc_info=True) raise err @@ -357,7 +365,7 @@ async def do(): http_res = result else: logger.debug("Raising unexpected SDK error") - raise models.SDKError("Unexpected error occurred", http_res) + raise errors.SDKError("Unexpected error occurred", http_res) return http_res @@ -369,6 +377,8 @@ async def do(): http_res = await do() if not utils.match_status_codes(error_status_codes, http_res.status_code): - http_res = hooks.after_success(AfterSuccessContext(hook_ctx), http_res) + http_res = await run_sync_in_thread( + hooks.after_success, AfterSuccessContext(hook_ctx), http_res + ) return http_res diff --git a/packages/azure/src/mistralai/azure/client/chat.py b/packages/azure/src/mistralai/azure/client/chat.py index 3348bf47..1051f952 100644 --- a/packages/azure/src/mistralai/azure/client/chat.py +++ b/packages/azure/src/mistralai/azure/client/chat.py @@ -1,7 +1,7 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from .basesdk import BaseSDK -from mistralai.azure.client import models, utils +from mistralai.azure.client import errors, models, utils from mistralai.azure.client._hooks import HookContext from mistralai.azure.client.types import OptionalNullable, UNSET from mistralai.azure.client.utils import eventstreaming @@ -179,18 +179,18 @@ def stream( if utils.match_response(http_res, "422", "application/json"): http_res_text = utils.stream_to_text(http_res) response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res, http_res_text + errors.HTTPValidationErrorData, http_res, http_res_text ) - raise models.HTTPValidationError(response_data, http_res, http_res_text) + raise errors.HTTPValidationError(response_data, http_res, http_res_text) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("Unexpected response received", http_res, http_res_text) + raise errors.SDKError("Unexpected response received", http_res, http_res_text) async def stream_async( self, @@ -359,18 +359,18 @@ async def stream_async( if utils.match_response(http_res, "422", "application/json"): http_res_text = await utils.stream_to_text_async(http_res) response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res, http_res_text + errors.HTTPValidationErrorData, http_res, http_res_text ) - raise models.HTTPValidationError(response_data, http_res, http_res_text) + raise errors.HTTPValidationError(response_data, http_res, http_res_text) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("Unexpected response received", http_res, http_res_text) + raise errors.SDKError("Unexpected response received", http_res, http_res_text) def complete( self, @@ -530,17 +530,17 @@ def complete( return unmarshal_json_response(models.ChatCompletionResponse, http_res) if utils.match_response(http_res, "422", "application/json"): response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(response_data, http_res) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) async def complete_async( self, @@ -700,14 +700,14 @@ async def complete_async( return unmarshal_json_response(models.ChatCompletionResponse, http_res) if utils.match_response(http_res, "422", "application/json"): response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(response_data, http_res) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) diff --git a/packages/azure/src/mistralai/azure/client/errors/__init__.py b/packages/azure/src/mistralai/azure/client/errors/__init__.py new file mode 100644 index 00000000..79e2712c --- /dev/null +++ b/packages/azure/src/mistralai/azure/client/errors/__init__.py @@ -0,0 +1,39 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from .mistralazureerror import MistralAzureError +from typing import Any, TYPE_CHECKING + +from mistralai.azure.client.utils.dynamic_imports import lazy_getattr, lazy_dir + +if TYPE_CHECKING: + from .httpvalidationerror import HTTPValidationError, HTTPValidationErrorData + from .no_response_error import NoResponseError + from .responsevalidationerror import ResponseValidationError + from .sdkerror import SDKError + +__all__ = [ + "HTTPValidationError", + "HTTPValidationErrorData", + "MistralAzureError", + "NoResponseError", + "ResponseValidationError", + "SDKError", +] + +_dynamic_imports: dict[str, str] = { + "HTTPValidationError": ".httpvalidationerror", + "HTTPValidationErrorData": ".httpvalidationerror", + "NoResponseError": ".no_response_error", + "ResponseValidationError": ".responsevalidationerror", + "SDKError": ".sdkerror", +} + + +def __getattr__(attr_name: str) -> Any: + return lazy_getattr( + attr_name, package=__package__, dynamic_imports=_dynamic_imports + ) + + +def __dir__(): + return lazy_dir(dynamic_imports=_dynamic_imports) diff --git a/packages/azure/src/mistralai/azure/client/models/httpvalidationerror.py b/packages/azure/src/mistralai/azure/client/errors/httpvalidationerror.py similarity index 76% rename from packages/azure/src/mistralai/azure/client/models/httpvalidationerror.py rename to packages/azure/src/mistralai/azure/client/errors/httpvalidationerror.py index 40bccddc..b4f2691e 100644 --- a/packages/azure/src/mistralai/azure/client/models/httpvalidationerror.py +++ b/packages/azure/src/mistralai/azure/client/errors/httpvalidationerror.py @@ -1,16 +1,16 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from __future__ import annotations -from .validationerror import ValidationError from dataclasses import dataclass, field import httpx -from mistralai.azure.client.models import MistralAzureError +from mistralai.azure.client.errors import MistralAzureError +from mistralai.azure.client.models import validationerror as models_validationerror from mistralai.azure.client.types import BaseModel from typing import List, Optional class HTTPValidationErrorData(BaseModel): - detail: Optional[List[ValidationError]] = None + detail: Optional[List[models_validationerror.ValidationError]] = None @dataclass(unsafe_hash=True) diff --git a/packages/azure/src/mistralai/azure/client/models/mistralazureerror.py b/packages/azure/src/mistralai/azure/client/errors/mistralazureerror.py similarity index 100% rename from packages/azure/src/mistralai/azure/client/models/mistralazureerror.py rename to packages/azure/src/mistralai/azure/client/errors/mistralazureerror.py diff --git a/packages/azure/src/mistralai/azure/client/models/no_response_error.py b/packages/azure/src/mistralai/azure/client/errors/no_response_error.py similarity index 100% rename from packages/azure/src/mistralai/azure/client/models/no_response_error.py rename to packages/azure/src/mistralai/azure/client/errors/no_response_error.py diff --git a/packages/azure/src/mistralai/azure/client/models/responsevalidationerror.py b/packages/azure/src/mistralai/azure/client/errors/responsevalidationerror.py similarity index 92% rename from packages/azure/src/mistralai/azure/client/models/responsevalidationerror.py rename to packages/azure/src/mistralai/azure/client/errors/responsevalidationerror.py index cbdffcbb..02397334 100644 --- a/packages/azure/src/mistralai/azure/client/models/responsevalidationerror.py +++ b/packages/azure/src/mistralai/azure/client/errors/responsevalidationerror.py @@ -4,7 +4,7 @@ from typing import Optional from dataclasses import dataclass -from mistralai.azure.client.models import MistralAzureError +from mistralai.azure.client.errors import MistralAzureError @dataclass(unsafe_hash=True) diff --git a/packages/azure/src/mistralai/azure/client/models/sdkerror.py b/packages/azure/src/mistralai/azure/client/errors/sdkerror.py similarity index 95% rename from packages/azure/src/mistralai/azure/client/models/sdkerror.py rename to packages/azure/src/mistralai/azure/client/errors/sdkerror.py index a1e9aaca..c4f3616c 100644 --- a/packages/azure/src/mistralai/azure/client/models/sdkerror.py +++ b/packages/azure/src/mistralai/azure/client/errors/sdkerror.py @@ -4,7 +4,7 @@ from typing import Optional from dataclasses import dataclass -from mistralai.azure.client.models import MistralAzureError +from mistralai.azure.client.errors import MistralAzureError MAX_MESSAGE_LEN = 10_000 diff --git a/packages/azure/src/mistralai/azure/client/models/__init__.py b/packages/azure/src/mistralai/azure/client/models/__init__.py index 51db6a38..908dda32 100644 --- a/packages/azure/src/mistralai/azure/client/models/__init__.py +++ b/packages/azure/src/mistralai/azure/client/models/__init__.py @@ -1,10 +1,8 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -from .mistralazureerror import MistralAzureError -from typing import TYPE_CHECKING -from importlib import import_module -import builtins -import sys +from typing import Any, TYPE_CHECKING + +from mistralai.azure.client.utils.dynamic_imports import lazy_getattr, lazy_dir if TYPE_CHECKING: from .assistantmessage import ( @@ -49,7 +47,7 @@ CompletionResponseStreamChoiceFinishReason, CompletionResponseStreamChoiceTypedDict, ) - from .contentchunk import ContentChunk, ContentChunkTypedDict + from .contentchunk import ContentChunk, ContentChunkTypedDict, UnknownContentChunk from .deltamessage import ( DeltaMessage, DeltaMessageContent, @@ -66,7 +64,6 @@ FunctionCallTypedDict, ) from .functionname import FunctionName, FunctionNameTypedDict - from .httpvalidationerror import HTTPValidationError, HTTPValidationErrorData from .imagedetail import ImageDetail from .imageurl import ImageURL, ImageURLTypedDict from .imageurlchunk import ( @@ -77,7 +74,6 @@ ) from .jsonschema import JSONSchema, JSONSchemaTypedDict from .mistralpromptmode import MistralPromptMode - from .no_response_error import NoResponseError from .ocrimageobject import OCRImageObject, OCRImageObjectTypedDict from .ocrpagedimensions import OCRPageDimensions, OCRPageDimensionsTypedDict from .ocrpageobject import OCRPageObject, OCRPageObjectTypedDict @@ -95,8 +91,6 @@ from .referencechunk import ReferenceChunk, ReferenceChunkTypedDict from .responseformat import ResponseFormat, ResponseFormatTypedDict from .responseformats import ResponseFormats - from .responsevalidationerror import ResponseValidationError - from .sdkerror import SDKError from .security import Security, SecurityTypedDict from .systemmessage import ( SystemMessage, @@ -189,8 +183,6 @@ "FunctionName", "FunctionNameTypedDict", "FunctionTypedDict", - "HTTPValidationError", - "HTTPValidationErrorData", "ImageDetail", "ImageURL", "ImageURLChunk", @@ -202,9 +194,7 @@ "JSONSchemaTypedDict", "Loc", "LocTypedDict", - "MistralAzureError", "MistralPromptMode", - "NoResponseError", "OCRImageObject", "OCRImageObjectTypedDict", "OCRPageDimensions", @@ -226,8 +216,6 @@ "ResponseFormat", "ResponseFormatTypedDict", "ResponseFormats", - "ResponseValidationError", - "SDKError", "Security", "SecurityTypedDict", "SystemMessage", @@ -255,6 +243,7 @@ "ToolMessageTypedDict", "ToolTypedDict", "ToolTypes", + "UnknownContentChunk", "UsageInfo", "UsageInfoTypedDict", "UserMessage", @@ -300,6 +289,7 @@ "CompletionResponseStreamChoiceTypedDict": ".completionresponsestreamchoice", "ContentChunk": ".contentchunk", "ContentChunkTypedDict": ".contentchunk", + "UnknownContentChunk": ".contentchunk", "DeltaMessage": ".deltamessage", "DeltaMessageContent": ".deltamessage", "DeltaMessageContentTypedDict": ".deltamessage", @@ -316,8 +306,6 @@ "FunctionCallTypedDict": ".functioncall", "FunctionName": ".functionname", "FunctionNameTypedDict": ".functionname", - "HTTPValidationError": ".httpvalidationerror", - "HTTPValidationErrorData": ".httpvalidationerror", "ImageDetail": ".imagedetail", "ImageURL": ".imageurl", "ImageURLTypedDict": ".imageurl", @@ -328,7 +316,6 @@ "JSONSchema": ".jsonschema", "JSONSchemaTypedDict": ".jsonschema", "MistralPromptMode": ".mistralpromptmode", - "NoResponseError": ".no_response_error", "OCRImageObject": ".ocrimageobject", "OCRImageObjectTypedDict": ".ocrimageobject", "OCRPageDimensions": ".ocrpagedimensions", @@ -354,8 +341,6 @@ "ResponseFormat": ".responseformat", "ResponseFormatTypedDict": ".responseformat", "ResponseFormats": ".responseformats", - "ResponseValidationError": ".responsevalidationerror", - "SDKError": ".sdkerror", "Security": ".security", "SecurityTypedDict": ".security", "SystemMessage": ".systemmessage", @@ -395,39 +380,11 @@ } -def dynamic_import(modname, retries=3): - for attempt in range(retries): - try: - return import_module(modname, __package__) - except KeyError: - # Clear any half-initialized module and retry - sys.modules.pop(modname, None) - if attempt == retries - 1: - break - raise KeyError(f"Failed to import module '{modname}' after {retries} attempts") - - -def __getattr__(attr_name: str) -> object: - module_name = _dynamic_imports.get(attr_name) - if module_name is None: - raise AttributeError( - f"No {attr_name} found in _dynamic_imports for module name -> {__name__} " - ) - - try: - module = dynamic_import(module_name) - result = getattr(module, attr_name) - return result - except ImportError as e: - raise ImportError( - f"Failed to import {attr_name} from {module_name}: {e}" - ) from e - except AttributeError as e: - raise AttributeError( - f"Failed to get {attr_name} from {module_name}: {e}" - ) from e +def __getattr__(attr_name: str) -> Any: + return lazy_getattr( + attr_name, package=__package__, dynamic_imports=_dynamic_imports + ) def __dir__(): - lazy_attrs = builtins.list(_dynamic_imports.keys()) - return builtins.sorted(lazy_attrs) + return lazy_dir(dynamic_imports=_dynamic_imports) diff --git a/packages/azure/src/mistralai/azure/client/models/assistantmessage.py b/packages/azure/src/mistralai/azure/client/models/assistantmessage.py index f5793f94..e9ae6e82 100644 --- a/packages/azure/src/mistralai/azure/client/models/assistantmessage.py +++ b/packages/azure/src/mistralai/azure/client/models/assistantmessage.py @@ -37,7 +37,7 @@ class AssistantMessageTypedDict(TypedDict): class AssistantMessage(BaseModel): - ROLE: Annotated[ + role: Annotated[ Annotated[ Optional[Literal["assistant"]], AfterValidator(validate_const("assistant")) ], @@ -53,30 +53,31 @@ class AssistantMessage(BaseModel): @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = ["role", "content", "tool_calls", "prefix"] - nullable_fields = ["content", "tool_calls"] - null_default_fields = [] - + optional_fields = set(["role", "content", "tool_calls", "prefix"]) + nullable_fields = set(["content", "tool_calls"]) serialized = handler(self) - m = {} for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) - serialized.pop(k, None) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member + return m - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - return m +try: + AssistantMessage.model_rebuild() +except NameError: + pass diff --git a/packages/azure/src/mistralai/azure/client/models/chatcompletionrequest.py b/packages/azure/src/mistralai/azure/client/models/chatcompletionrequest.py index 92179095..edd0fdc7 100644 --- a/packages/azure/src/mistralai/azure/client/models/chatcompletionrequest.py +++ b/packages/azure/src/mistralai/azure/client/models/chatcompletionrequest.py @@ -170,57 +170,56 @@ class ChatCompletionRequest(BaseModel): @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = [ - "model", - "temperature", - "top_p", - "max_tokens", - "stream", - "stop", - "random_seed", - "metadata", - "response_format", - "tools", - "tool_choice", - "presence_penalty", - "frequency_penalty", - "n", - "prediction", - "parallel_tool_calls", - "prompt_mode", - "safe_prompt", - ] - nullable_fields = [ - "temperature", - "max_tokens", - "random_seed", - "metadata", - "tools", - "n", - "prompt_mode", - ] - null_default_fields = [] - + optional_fields = set( + [ + "model", + "temperature", + "top_p", + "max_tokens", + "stream", + "stop", + "random_seed", + "metadata", + "response_format", + "tools", + "tool_choice", + "presence_penalty", + "frequency_penalty", + "n", + "prediction", + "parallel_tool_calls", + "prompt_mode", + "safe_prompt", + ] + ) + nullable_fields = set( + [ + "temperature", + "max_tokens", + "random_seed", + "metadata", + "tools", + "n", + "prompt_mode", + ] + ) serialized = handler(self) - m = {} for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val return m diff --git a/packages/azure/src/mistralai/azure/client/models/chatcompletionstreamrequest.py b/packages/azure/src/mistralai/azure/client/models/chatcompletionstreamrequest.py index be21eed2..2edfbed9 100644 --- a/packages/azure/src/mistralai/azure/client/models/chatcompletionstreamrequest.py +++ b/packages/azure/src/mistralai/azure/client/models/chatcompletionstreamrequest.py @@ -168,57 +168,56 @@ class ChatCompletionStreamRequest(BaseModel): @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = [ - "model", - "temperature", - "top_p", - "max_tokens", - "stream", - "stop", - "random_seed", - "metadata", - "response_format", - "tools", - "tool_choice", - "presence_penalty", - "frequency_penalty", - "n", - "prediction", - "parallel_tool_calls", - "prompt_mode", - "safe_prompt", - ] - nullable_fields = [ - "temperature", - "max_tokens", - "random_seed", - "metadata", - "tools", - "n", - "prompt_mode", - ] - null_default_fields = [] - + optional_fields = set( + [ + "model", + "temperature", + "top_p", + "max_tokens", + "stream", + "stop", + "random_seed", + "metadata", + "response_format", + "tools", + "tool_choice", + "presence_penalty", + "frequency_penalty", + "n", + "prediction", + "parallel_tool_calls", + "prompt_mode", + "safe_prompt", + ] + ) + nullable_fields = set( + [ + "temperature", + "max_tokens", + "random_seed", + "metadata", + "tools", + "n", + "prompt_mode", + ] + ) serialized = handler(self) - m = {} for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val return m diff --git a/packages/azure/src/mistralai/azure/client/models/completionchunk.py b/packages/azure/src/mistralai/azure/client/models/completionchunk.py index b94284b2..0e64bbc8 100644 --- a/packages/azure/src/mistralai/azure/client/models/completionchunk.py +++ b/packages/azure/src/mistralai/azure/client/models/completionchunk.py @@ -6,7 +6,8 @@ CompletionResponseStreamChoiceTypedDict, ) from .usageinfo import UsageInfo, UsageInfoTypedDict -from mistralai.azure.client.types import BaseModel +from mistralai.azure.client.types import BaseModel, UNSET_SENTINEL +from pydantic import model_serializer from typing import List, Optional from typing_extensions import NotRequired, TypedDict @@ -32,3 +33,19 @@ class CompletionChunk(BaseModel): created: Optional[int] = None usage: Optional[UsageInfo] = None + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["object", "created", "usage"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m diff --git a/packages/azure/src/mistralai/azure/client/models/completionresponsestreamchoice.py b/packages/azure/src/mistralai/azure/client/models/completionresponsestreamchoice.py index 2a4d053f..20a27140 100644 --- a/packages/azure/src/mistralai/azure/client/models/completionresponsestreamchoice.py +++ b/packages/azure/src/mistralai/azure/client/models/completionresponsestreamchoice.py @@ -39,30 +39,14 @@ class CompletionResponseStreamChoice(BaseModel): @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = [] - nullable_fields = ["finish_reason"] - null_default_fields = [] - serialized = handler(self) - m = {} for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) - serialized.pop(k, None) - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): + if val != UNSET_SENTINEL: m[k] = val return m diff --git a/packages/azure/src/mistralai/azure/client/models/contentchunk.py b/packages/azure/src/mistralai/azure/client/models/contentchunk.py index 0f09f767..17efcc7d 100644 --- a/packages/azure/src/mistralai/azure/client/models/contentchunk.py +++ b/packages/azure/src/mistralai/azure/client/models/contentchunk.py @@ -4,9 +4,12 @@ from .imageurlchunk import ImageURLChunk, ImageURLChunkTypedDict from .referencechunk import ReferenceChunk, ReferenceChunkTypedDict from .textchunk import TextChunk, TextChunkTypedDict -from mistralai.azure.client.utils import get_discriminator -from pydantic import Discriminator, Tag -from typing import Union +from functools import partial +from mistralai.azure.client.types import BaseModel +from mistralai.azure.client.utils.unions import parse_open_union +from pydantic import ConfigDict +from pydantic.functional_validators import BeforeValidator +from typing import Any, Literal, Union from typing_extensions import Annotated, TypeAliasType @@ -16,11 +19,32 @@ ) +class UnknownContentChunk(BaseModel): + r"""A ContentChunk variant the SDK doesn't recognize. Preserves the raw payload.""" + + type: Literal["UNKNOWN"] = "UNKNOWN" + raw: Any + is_unknown: Literal[True] = True + + model_config = ConfigDict(frozen=True) + + +_CONTENT_CHUNK_VARIANTS: dict[str, Any] = { + "image_url": ImageURLChunk, + "text": TextChunk, + "reference": ReferenceChunk, +} + + ContentChunk = Annotated[ - Union[ - Annotated[ImageURLChunk, Tag("image_url")], - Annotated[TextChunk, Tag("text")], - Annotated[ReferenceChunk, Tag("reference")], - ], - Discriminator(lambda m: get_discriminator(m, "type", "type")), + Union[ImageURLChunk, TextChunk, ReferenceChunk, UnknownContentChunk], + BeforeValidator( + partial( + parse_open_union, + disc_key="type", + variants=_CONTENT_CHUNK_VARIANTS, + unknown_cls=UnknownContentChunk, + union_name="ContentChunk", + ) + ), ] diff --git a/packages/azure/src/mistralai/azure/client/models/deltamessage.py b/packages/azure/src/mistralai/azure/client/models/deltamessage.py index 2c01feae..567e772f 100644 --- a/packages/azure/src/mistralai/azure/client/models/deltamessage.py +++ b/packages/azure/src/mistralai/azure/client/models/deltamessage.py @@ -40,30 +40,25 @@ class DeltaMessage(BaseModel): @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = ["role", "content", "tool_calls"] - nullable_fields = ["role", "content", "tool_calls"] - null_default_fields = [] - + optional_fields = set(["role", "content", "tool_calls"]) + nullable_fields = set(["role", "content", "tool_calls"]) serialized = handler(self) - m = {} for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val return m diff --git a/packages/azure/src/mistralai/azure/client/models/documenturlchunk.py b/packages/azure/src/mistralai/azure/client/models/documenturlchunk.py index 345bafc2..2dea8005 100644 --- a/packages/azure/src/mistralai/azure/client/models/documenturlchunk.py +++ b/packages/azure/src/mistralai/azure/client/models/documenturlchunk.py @@ -26,7 +26,7 @@ class DocumentURLChunkTypedDict(TypedDict): class DocumentURLChunk(BaseModel): document_url: str - TYPE: Annotated[ + type: Annotated[ Annotated[ Optional[Literal["document_url"]], AfterValidator(validate_const("document_url")), @@ -39,30 +39,31 @@ class DocumentURLChunk(BaseModel): @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = ["type", "document_name"] - nullable_fields = ["document_name"] - null_default_fields = [] - + optional_fields = set(["type", "document_name"]) + nullable_fields = set(["document_name"]) serialized = handler(self) - m = {} for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val return m + + +try: + DocumentURLChunk.model_rebuild() +except NameError: + pass diff --git a/packages/azure/src/mistralai/azure/client/models/filechunk.py b/packages/azure/src/mistralai/azure/client/models/filechunk.py index 829f03d8..6baa0cba 100644 --- a/packages/azure/src/mistralai/azure/client/models/filechunk.py +++ b/packages/azure/src/mistralai/azure/client/models/filechunk.py @@ -1,9 +1,10 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from __future__ import annotations -from mistralai.azure.client.types import BaseModel +from mistralai.azure.client.types import BaseModel, UNSET_SENTINEL from mistralai.azure.client.utils import validate_const import pydantic +from pydantic import model_serializer from pydantic.functional_validators import AfterValidator from typing import Literal, Optional from typing_extensions import Annotated, TypedDict @@ -17,7 +18,29 @@ class FileChunkTypedDict(TypedDict): class FileChunk(BaseModel): file_id: str - TYPE: Annotated[ + type: Annotated[ Annotated[Optional[Literal["file"]], AfterValidator(validate_const("file"))], pydantic.Field(alias="type"), ] = "file" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["type"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m + + +try: + FileChunk.model_rebuild() +except NameError: + pass diff --git a/packages/azure/src/mistralai/azure/client/models/function.py b/packages/azure/src/mistralai/azure/client/models/function.py index f4edce0f..055d3657 100644 --- a/packages/azure/src/mistralai/azure/client/models/function.py +++ b/packages/azure/src/mistralai/azure/client/models/function.py @@ -1,7 +1,8 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from __future__ import annotations -from mistralai.azure.client.types import BaseModel +from mistralai.azure.client.types import BaseModel, UNSET_SENTINEL +from pydantic import model_serializer from typing import Any, Dict, Optional from typing_extensions import NotRequired, TypedDict @@ -21,3 +22,19 @@ class Function(BaseModel): description: Optional[str] = None strict: Optional[bool] = None + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["description", "strict"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m diff --git a/packages/azure/src/mistralai/azure/client/models/imageurl.py b/packages/azure/src/mistralai/azure/client/models/imageurl.py index b3c705e3..bcb4fe43 100644 --- a/packages/azure/src/mistralai/azure/client/models/imageurl.py +++ b/packages/azure/src/mistralai/azure/client/models/imageurl.py @@ -25,30 +25,25 @@ class ImageURL(BaseModel): @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = ["detail"] - nullable_fields = ["detail"] - null_default_fields = [] - + optional_fields = set(["detail"]) + nullable_fields = set(["detail"]) serialized = handler(self) - m = {} for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val return m diff --git a/packages/azure/src/mistralai/azure/client/models/imageurlchunk.py b/packages/azure/src/mistralai/azure/client/models/imageurlchunk.py index ee6de50f..7213c498 100644 --- a/packages/azure/src/mistralai/azure/client/models/imageurlchunk.py +++ b/packages/azure/src/mistralai/azure/client/models/imageurlchunk.py @@ -2,9 +2,10 @@ from __future__ import annotations from .imageurl import ImageURL, ImageURLTypedDict -from mistralai.azure.client.types import BaseModel +from mistralai.azure.client.types import BaseModel, UNSET_SENTINEL from mistralai.azure.client.utils import validate_const import pydantic +from pydantic import model_serializer from pydantic.functional_validators import AfterValidator from typing import Literal, Optional, Union from typing_extensions import Annotated, TypeAliasType, TypedDict @@ -30,9 +31,31 @@ class ImageURLChunk(BaseModel): image_url: ImageURLUnion - TYPE: Annotated[ + type: Annotated[ Annotated[ Optional[Literal["image_url"]], AfterValidator(validate_const("image_url")) ], pydantic.Field(alias="type"), ] = "image_url" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["type"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m + + +try: + ImageURLChunk.model_rebuild() +except NameError: + pass diff --git a/packages/azure/src/mistralai/azure/client/models/jsonschema.py b/packages/azure/src/mistralai/azure/client/models/jsonschema.py index 5aaa490a..99f2fb89 100644 --- a/packages/azure/src/mistralai/azure/client/models/jsonschema.py +++ b/packages/azure/src/mistralai/azure/client/models/jsonschema.py @@ -32,30 +32,31 @@ class JSONSchema(BaseModel): @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = ["description", "strict"] - nullable_fields = ["description"] - null_default_fields = [] - + optional_fields = set(["description", "strict"]) + nullable_fields = set(["description"]) serialized = handler(self) - m = {} for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) - serialized.pop(k, None) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member + return m - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - return m +try: + JSONSchema.model_rebuild() +except NameError: + pass diff --git a/packages/azure/src/mistralai/azure/client/models/ocrimageobject.py b/packages/azure/src/mistralai/azure/client/models/ocrimageobject.py index 38e9d3e4..a23515b3 100644 --- a/packages/azure/src/mistralai/azure/client/models/ocrimageobject.py +++ b/packages/azure/src/mistralai/azure/client/models/ocrimageobject.py @@ -53,37 +53,34 @@ class OCRImageObject(BaseModel): @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = ["image_base64", "image_annotation"] - nullable_fields = [ - "top_left_x", - "top_left_y", - "bottom_right_x", - "bottom_right_y", - "image_base64", - "image_annotation", - ] - null_default_fields = [] - + optional_fields = set(["image_base64", "image_annotation"]) + nullable_fields = set( + [ + "top_left_x", + "top_left_y", + "bottom_right_x", + "bottom_right_y", + "image_base64", + "image_annotation", + ] + ) serialized = handler(self) - m = {} for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val return m diff --git a/packages/azure/src/mistralai/azure/client/models/ocrpageobject.py b/packages/azure/src/mistralai/azure/client/models/ocrpageobject.py index 5fb821c1..434c8988 100644 --- a/packages/azure/src/mistralai/azure/client/models/ocrpageobject.py +++ b/packages/azure/src/mistralai/azure/client/models/ocrpageobject.py @@ -62,30 +62,25 @@ class OCRPageObject(BaseModel): @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = ["tables", "hyperlinks", "header", "footer"] - nullable_fields = ["header", "footer", "dimensions"] - null_default_fields = [] - + optional_fields = set(["tables", "hyperlinks", "header", "footer"]) + nullable_fields = set(["header", "footer", "dimensions"]) serialized = handler(self) - m = {} for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val return m diff --git a/packages/azure/src/mistralai/azure/client/models/ocrrequest.py b/packages/azure/src/mistralai/azure/client/models/ocrrequest.py index fece2713..a2cd3415 100644 --- a/packages/azure/src/mistralai/azure/client/models/ocrrequest.py +++ b/packages/azure/src/mistralai/azure/client/models/ocrrequest.py @@ -95,52 +95,51 @@ class OCRRequest(BaseModel): @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = [ - "id", - "pages", - "include_image_base64", - "image_limit", - "image_min_size", - "bbox_annotation_format", - "document_annotation_format", - "document_annotation_prompt", - "table_format", - "extract_header", - "extract_footer", - ] - nullable_fields = [ - "model", - "pages", - "include_image_base64", - "image_limit", - "image_min_size", - "bbox_annotation_format", - "document_annotation_format", - "document_annotation_prompt", - "table_format", - ] - null_default_fields = [] - + optional_fields = set( + [ + "id", + "pages", + "include_image_base64", + "image_limit", + "image_min_size", + "bbox_annotation_format", + "document_annotation_format", + "document_annotation_prompt", + "table_format", + "extract_header", + "extract_footer", + ] + ) + nullable_fields = set( + [ + "model", + "pages", + "include_image_base64", + "image_limit", + "image_min_size", + "bbox_annotation_format", + "document_annotation_format", + "document_annotation_prompt", + "table_format", + ] + ) serialized = handler(self) - m = {} for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val return m diff --git a/packages/azure/src/mistralai/azure/client/models/ocrresponse.py b/packages/azure/src/mistralai/azure/client/models/ocrresponse.py index 787289fa..3dc09fd7 100644 --- a/packages/azure/src/mistralai/azure/client/models/ocrresponse.py +++ b/packages/azure/src/mistralai/azure/client/models/ocrresponse.py @@ -39,30 +39,25 @@ class OCRResponse(BaseModel): @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = ["document_annotation"] - nullable_fields = ["document_annotation"] - null_default_fields = [] - + optional_fields = set(["document_annotation"]) + nullable_fields = set(["document_annotation"]) serialized = handler(self) - m = {} for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val return m diff --git a/packages/azure/src/mistralai/azure/client/models/ocrtableobject.py b/packages/azure/src/mistralai/azure/client/models/ocrtableobject.py index 3e3c2583..f1de5428 100644 --- a/packages/azure/src/mistralai/azure/client/models/ocrtableobject.py +++ b/packages/azure/src/mistralai/azure/client/models/ocrtableobject.py @@ -35,3 +35,9 @@ class OCRTableObject(BaseModel): format_: Annotated[Format, pydantic.Field(alias="format")] r"""Format of the table""" + + +try: + OCRTableObject.model_rebuild() +except NameError: + pass diff --git a/packages/azure/src/mistralai/azure/client/models/ocrusageinfo.py b/packages/azure/src/mistralai/azure/client/models/ocrusageinfo.py index e2ceba35..f63315d2 100644 --- a/packages/azure/src/mistralai/azure/client/models/ocrusageinfo.py +++ b/packages/azure/src/mistralai/azure/client/models/ocrusageinfo.py @@ -28,30 +28,25 @@ class OCRUsageInfo(BaseModel): @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = ["doc_size_bytes"] - nullable_fields = ["doc_size_bytes"] - null_default_fields = [] - + optional_fields = set(["doc_size_bytes"]) + nullable_fields = set(["doc_size_bytes"]) serialized = handler(self) - m = {} for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val return m diff --git a/packages/azure/src/mistralai/azure/client/models/prediction.py b/packages/azure/src/mistralai/azure/client/models/prediction.py index 6b8d6480..1fa1d782 100644 --- a/packages/azure/src/mistralai/azure/client/models/prediction.py +++ b/packages/azure/src/mistralai/azure/client/models/prediction.py @@ -1,9 +1,10 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from __future__ import annotations -from mistralai.azure.client.types import BaseModel +from mistralai.azure.client.types import BaseModel, UNSET_SENTINEL from mistralai.azure.client.utils import validate_const import pydantic +from pydantic import model_serializer from pydantic.functional_validators import AfterValidator from typing import Literal, Optional from typing_extensions import Annotated, NotRequired, TypedDict @@ -19,7 +20,7 @@ class PredictionTypedDict(TypedDict): class Prediction(BaseModel): r"""Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content.""" - TYPE: Annotated[ + type: Annotated[ Annotated[ Optional[Literal["content"]], AfterValidator(validate_const("content")) ], @@ -27,3 +28,25 @@ class Prediction(BaseModel): ] = "content" content: Optional[str] = "" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["type", "content"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m + + +try: + Prediction.model_rebuild() +except NameError: + pass diff --git a/packages/azure/src/mistralai/azure/client/models/referencechunk.py b/packages/azure/src/mistralai/azure/client/models/referencechunk.py index e0bcb06b..f7af9bf9 100644 --- a/packages/azure/src/mistralai/azure/client/models/referencechunk.py +++ b/packages/azure/src/mistralai/azure/client/models/referencechunk.py @@ -1,9 +1,10 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from __future__ import annotations -from mistralai.azure.client.types import BaseModel +from mistralai.azure.client.types import BaseModel, UNSET_SENTINEL from mistralai.azure.client.utils import validate_const import pydantic +from pydantic import model_serializer from pydantic.functional_validators import AfterValidator from typing import List, Literal, Optional from typing_extensions import Annotated, TypedDict @@ -17,9 +18,31 @@ class ReferenceChunkTypedDict(TypedDict): class ReferenceChunk(BaseModel): reference_ids: List[int] - TYPE: Annotated[ + type: Annotated[ Annotated[ Optional[Literal["reference"]], AfterValidator(validate_const("reference")) ], pydantic.Field(alias="type"), ] = "reference" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["type"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m + + +try: + ReferenceChunk.model_rebuild() +except NameError: + pass diff --git a/packages/azure/src/mistralai/azure/client/models/responseformat.py b/packages/azure/src/mistralai/azure/client/models/responseformat.py index 39fb03a2..20fd2b86 100644 --- a/packages/azure/src/mistralai/azure/client/models/responseformat.py +++ b/packages/azure/src/mistralai/azure/client/models/responseformat.py @@ -31,30 +31,25 @@ class ResponseFormat(BaseModel): @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = ["type", "json_schema"] - nullable_fields = ["json_schema"] - null_default_fields = [] - + optional_fields = set(["type", "json_schema"]) + nullable_fields = set(["json_schema"]) serialized = handler(self) - m = {} for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val return m diff --git a/packages/azure/src/mistralai/azure/client/models/systemmessage.py b/packages/azure/src/mistralai/azure/client/models/systemmessage.py index 38c280c8..d4bd0044 100644 --- a/packages/azure/src/mistralai/azure/client/models/systemmessage.py +++ b/packages/azure/src/mistralai/azure/client/models/systemmessage.py @@ -32,7 +32,13 @@ class SystemMessageTypedDict(TypedDict): class SystemMessage(BaseModel): content: SystemMessageContent - ROLE: Annotated[ + role: Annotated[ Annotated[Literal["system"], AfterValidator(validate_const("system"))], pydantic.Field(alias="role"), ] = "system" + + +try: + SystemMessage.model_rebuild() +except NameError: + pass diff --git a/packages/azure/src/mistralai/azure/client/models/systemmessagecontentchunks.py b/packages/azure/src/mistralai/azure/client/models/systemmessagecontentchunks.py index 225f38b7..8de71c90 100644 --- a/packages/azure/src/mistralai/azure/client/models/systemmessagecontentchunks.py +++ b/packages/azure/src/mistralai/azure/client/models/systemmessagecontentchunks.py @@ -15,5 +15,5 @@ SystemMessageContentChunks = Annotated[ - Union[TextChunk, ThinkChunk], Field(discriminator="TYPE") + Union[TextChunk, ThinkChunk], Field(discriminator="type") ] diff --git a/packages/azure/src/mistralai/azure/client/models/textchunk.py b/packages/azure/src/mistralai/azure/client/models/textchunk.py index e513c143..92951485 100644 --- a/packages/azure/src/mistralai/azure/client/models/textchunk.py +++ b/packages/azure/src/mistralai/azure/client/models/textchunk.py @@ -17,7 +17,13 @@ class TextChunkTypedDict(TypedDict): class TextChunk(BaseModel): text: str - TYPE: Annotated[ + type: Annotated[ Annotated[Literal["text"], AfterValidator(validate_const("text"))], pydantic.Field(alias="type"), ] = "text" + + +try: + TextChunk.model_rebuild() +except NameError: + pass diff --git a/packages/azure/src/mistralai/azure/client/models/thinkchunk.py b/packages/azure/src/mistralai/azure/client/models/thinkchunk.py index e769399f..4e881aad 100644 --- a/packages/azure/src/mistralai/azure/client/models/thinkchunk.py +++ b/packages/azure/src/mistralai/azure/client/models/thinkchunk.py @@ -3,9 +3,10 @@ from __future__ import annotations from .referencechunk import ReferenceChunk, ReferenceChunkTypedDict from .textchunk import TextChunk, TextChunkTypedDict -from mistralai.azure.client.types import BaseModel +from mistralai.azure.client.types import BaseModel, UNSET_SENTINEL from mistralai.azure.client.utils import validate_const import pydantic +from pydantic import model_serializer from pydantic.functional_validators import AfterValidator from typing import List, Literal, Optional, Union from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict @@ -29,10 +30,32 @@ class ThinkChunkTypedDict(TypedDict): class ThinkChunk(BaseModel): thinking: List[Thinking] - TYPE: Annotated[ + type: Annotated[ Annotated[Literal["thinking"], AfterValidator(validate_const("thinking"))], pydantic.Field(alias="type"), ] = "thinking" closed: Optional[bool] = None r"""Whether the thinking chunk is closed or not. Currently only used for prefixing.""" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["closed"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m + + +try: + ThinkChunk.model_rebuild() +except NameError: + pass diff --git a/packages/azure/src/mistralai/azure/client/models/tool.py b/packages/azure/src/mistralai/azure/client/models/tool.py index 169305bc..87329bdb 100644 --- a/packages/azure/src/mistralai/azure/client/models/tool.py +++ b/packages/azure/src/mistralai/azure/client/models/tool.py @@ -3,7 +3,8 @@ from __future__ import annotations from .function import Function, FunctionTypedDict from .tooltypes import ToolTypes -from mistralai.azure.client.types import BaseModel +from mistralai.azure.client.types import BaseModel, UNSET_SENTINEL +from pydantic import model_serializer from typing import Optional from typing_extensions import NotRequired, TypedDict @@ -17,3 +18,19 @@ class Tool(BaseModel): function: Function type: Optional[ToolTypes] = None + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["type"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m diff --git a/packages/azure/src/mistralai/azure/client/models/toolcall.py b/packages/azure/src/mistralai/azure/client/models/toolcall.py index a589b1b3..ada1ea65 100644 --- a/packages/azure/src/mistralai/azure/client/models/toolcall.py +++ b/packages/azure/src/mistralai/azure/client/models/toolcall.py @@ -3,7 +3,8 @@ from __future__ import annotations from .functioncall import FunctionCall, FunctionCallTypedDict from .tooltypes import ToolTypes -from mistralai.azure.client.types import BaseModel +from mistralai.azure.client.types import BaseModel, UNSET_SENTINEL +from pydantic import model_serializer from typing import Optional from typing_extensions import NotRequired, TypedDict @@ -23,3 +24,19 @@ class ToolCall(BaseModel): type: Optional[ToolTypes] = None index: Optional[int] = 0 + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["id", "type", "index"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m diff --git a/packages/azure/src/mistralai/azure/client/models/toolchoice.py b/packages/azure/src/mistralai/azure/client/models/toolchoice.py index 1f623222..ddb9e141 100644 --- a/packages/azure/src/mistralai/azure/client/models/toolchoice.py +++ b/packages/azure/src/mistralai/azure/client/models/toolchoice.py @@ -3,7 +3,8 @@ from __future__ import annotations from .functionname import FunctionName, FunctionNameTypedDict from .tooltypes import ToolTypes -from mistralai.azure.client.types import BaseModel +from mistralai.azure.client.types import BaseModel, UNSET_SENTINEL +from pydantic import model_serializer from typing import Optional from typing_extensions import NotRequired, TypedDict @@ -23,3 +24,19 @@ class ToolChoice(BaseModel): r"""this restriction of `Function` is used to select a specific function to call""" type: Optional[ToolTypes] = None + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["type"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m diff --git a/packages/azure/src/mistralai/azure/client/models/toolmessage.py b/packages/azure/src/mistralai/azure/client/models/toolmessage.py index a73fd6bf..670210de 100644 --- a/packages/azure/src/mistralai/azure/client/models/toolmessage.py +++ b/packages/azure/src/mistralai/azure/client/models/toolmessage.py @@ -35,7 +35,7 @@ class ToolMessageTypedDict(TypedDict): class ToolMessage(BaseModel): content: Nullable[ToolMessageContent] - ROLE: Annotated[ + role: Annotated[ Annotated[Literal["tool"], AfterValidator(validate_const("tool"))], pydantic.Field(alias="role"), ] = "tool" @@ -46,30 +46,31 @@ class ToolMessage(BaseModel): @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = ["tool_call_id", "name"] - nullable_fields = ["content", "tool_call_id", "name"] - null_default_fields = [] - + optional_fields = set(["tool_call_id", "name"]) + nullable_fields = set(["content", "tool_call_id", "name"]) serialized = handler(self) - m = {} for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) - serialized.pop(k, None) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member + return m - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - return m +try: + ToolMessage.model_rebuild() +except NameError: + pass diff --git a/packages/azure/src/mistralai/azure/client/models/usageinfo.py b/packages/azure/src/mistralai/azure/client/models/usageinfo.py index 19a6b09f..0f04c87c 100644 --- a/packages/azure/src/mistralai/azure/client/models/usageinfo.py +++ b/packages/azure/src/mistralai/azure/client/models/usageinfo.py @@ -45,37 +45,34 @@ def additional_properties(self, value): @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = [ - "prompt_tokens", - "completion_tokens", - "total_tokens", - "prompt_audio_seconds", - ] - nullable_fields = ["prompt_audio_seconds"] - null_default_fields = [] - + optional_fields = set( + [ + "prompt_tokens", + "completion_tokens", + "total_tokens", + "prompt_audio_seconds", + ] + ) + nullable_fields = set(["prompt_audio_seconds"]) serialized = handler(self) - m = {} for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val for k, v in serialized.items(): m[k] = v diff --git a/packages/azure/src/mistralai/azure/client/models/usermessage.py b/packages/azure/src/mistralai/azure/client/models/usermessage.py index 96439c64..549b01ca 100644 --- a/packages/azure/src/mistralai/azure/client/models/usermessage.py +++ b/packages/azure/src/mistralai/azure/client/models/usermessage.py @@ -27,37 +27,27 @@ class UserMessageTypedDict(TypedDict): class UserMessage(BaseModel): content: Nullable[UserMessageContent] - ROLE: Annotated[ + role: Annotated[ Annotated[Literal["user"], AfterValidator(validate_const("user"))], pydantic.Field(alias="role"), ] = "user" @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = [] - nullable_fields = ["content"] - null_default_fields = [] - serialized = handler(self) - m = {} for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): + if val != UNSET_SENTINEL: m[k] = val return m + + +try: + UserMessage.model_rebuild() +except NameError: + pass diff --git a/packages/azure/src/mistralai/azure/client/ocr.py b/packages/azure/src/mistralai/azure/client/ocr.py index 098e764b..b9270f6a 100644 --- a/packages/azure/src/mistralai/azure/client/ocr.py +++ b/packages/azure/src/mistralai/azure/client/ocr.py @@ -1,7 +1,7 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from .basesdk import BaseSDK -from mistralai.azure.client import models, utils +from mistralai.azure.client import errors, models, utils from mistralai.azure.client._hooks import HookContext from mistralai.azure.client.types import Nullable, OptionalNullable, UNSET from mistralai.azure.client.utils.unmarshal_json_response import unmarshal_json_response @@ -130,17 +130,17 @@ def process( return unmarshal_json_response(models.OCRResponse, http_res) if utils.match_response(http_res, "422", "application/json"): response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(response_data, http_res) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) async def process_async( self, @@ -263,14 +263,14 @@ async def process_async( return unmarshal_json_response(models.OCRResponse, http_res) if utils.match_response(http_res, "422", "application/json"): response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(response_data, http_res) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) diff --git a/packages/azure/src/mistralai/azure/client/utils/__init__.py b/packages/azure/src/mistralai/azure/client/utils/__init__.py index 05f26ade..b488c2df 100644 --- a/packages/azure/src/mistralai/azure/client/utils/__init__.py +++ b/packages/azure/src/mistralai/azure/client/utils/__init__.py @@ -1,14 +1,23 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -from typing import TYPE_CHECKING -from importlib import import_module -import builtins -import sys +from typing import Any, TYPE_CHECKING, Callable, TypeVar +import asyncio + +from .dynamic_imports import lazy_getattr, lazy_dir + +_T = TypeVar("_T") + + +async def run_sync_in_thread(func: Callable[..., _T], *args) -> _T: + """Run a synchronous function in a thread pool to avoid blocking the event loop.""" + return await asyncio.to_thread(func, *args) + if TYPE_CHECKING: from .annotations import get_discriminator from .datetimes import parse_datetime from .enums import OpenEnumMeta + from .unions import parse_open_union from .headers import get_headers, get_response_headers from .metadata import ( FieldMetadata, @@ -76,6 +85,7 @@ "match_response", "MultipartFormMetadata", "OpenEnumMeta", + "parse_open_union", "PathParamMetadata", "QueryParamMetadata", "remove_suffix", @@ -128,6 +138,7 @@ "match_response": ".values", "MultipartFormMetadata": ".metadata", "OpenEnumMeta": ".enums", + "parse_open_union": ".unions", "PathParamMetadata": ".metadata", "QueryParamMetadata": ".metadata", "remove_suffix": ".url", @@ -157,38 +168,11 @@ } -def dynamic_import(modname, retries=3): - for attempt in range(retries): - try: - return import_module(modname, __package__) - except KeyError: - # Clear any half-initialized module and retry - sys.modules.pop(modname, None) - if attempt == retries - 1: - break - raise KeyError(f"Failed to import module '{modname}' after {retries} attempts") - - -def __getattr__(attr_name: str) -> object: - module_name = _dynamic_imports.get(attr_name) - if module_name is None: - raise AttributeError( - f"no {attr_name} found in _dynamic_imports, module name -> {__name__} " - ) - - try: - module = dynamic_import(module_name) - return getattr(module, attr_name) - except ImportError as e: - raise ImportError( - f"Failed to import {attr_name} from {module_name}: {e}" - ) from e - except AttributeError as e: - raise AttributeError( - f"Failed to get {attr_name} from {module_name}: {e}" - ) from e +def __getattr__(attr_name: str) -> Any: + return lazy_getattr( + attr_name, package=__package__, dynamic_imports=_dynamic_imports + ) def __dir__(): - lazy_attrs = builtins.list(_dynamic_imports.keys()) - return builtins.sorted(lazy_attrs) + return lazy_dir(dynamic_imports=_dynamic_imports) diff --git a/packages/azure/src/mistralai/azure/client/utils/dynamic_imports.py b/packages/azure/src/mistralai/azure/client/utils/dynamic_imports.py new file mode 100644 index 00000000..673edf82 --- /dev/null +++ b/packages/azure/src/mistralai/azure/client/utils/dynamic_imports.py @@ -0,0 +1,54 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from importlib import import_module +import builtins +import sys + + +def dynamic_import(package, modname, retries=3): + """Import a module relative to package, retrying on KeyError from half-initialized modules.""" + for attempt in range(retries): + try: + return import_module(modname, package) + except KeyError: + sys.modules.pop(modname, None) + if attempt == retries - 1: + break + raise KeyError(f"Failed to import module '{modname}' after {retries} attempts") + + +def lazy_getattr(attr_name, *, package, dynamic_imports, sub_packages=None): + """Module-level __getattr__ that lazily loads from a dynamic_imports mapping. + + Args: + attr_name: The attribute being looked up. + package: The caller's __package__ (for relative imports). + dynamic_imports: Dict mapping attribute names to relative module paths. + sub_packages: Optional list of subpackage names to lazy-load. + """ + module_name = dynamic_imports.get(attr_name) + if module_name is not None: + try: + module = dynamic_import(package, module_name) + return getattr(module, attr_name) + except ImportError as e: + raise ImportError( + f"Failed to import {attr_name} from {module_name}: {e}" + ) from e + except AttributeError as e: + raise AttributeError( + f"Failed to get {attr_name} from {module_name}: {e}" + ) from e + + if sub_packages and attr_name in sub_packages: + return import_module(f".{attr_name}", package) + + raise AttributeError(f"module '{package}' has no attribute '{attr_name}'") + + +def lazy_dir(*, dynamic_imports, sub_packages=None): + """Module-level __dir__ that lists lazily-loadable attributes.""" + lazy_attrs = builtins.list(dynamic_imports.keys()) + if sub_packages: + lazy_attrs.extend(sub_packages) + return builtins.sorted(lazy_attrs) diff --git a/packages/azure/src/mistralai/azure/client/utils/eventstreaming.py b/packages/azure/src/mistralai/azure/client/utils/eventstreaming.py index 0969899b..f2052fc2 100644 --- a/packages/azure/src/mistralai/azure/client/utils/eventstreaming.py +++ b/packages/azure/src/mistralai/azure/client/utils/eventstreaming.py @@ -2,7 +2,9 @@ import re import json +from dataclasses import dataclass, asdict from typing import ( + Any, Callable, Generic, TypeVar, @@ -22,6 +24,7 @@ class EventStream(Generic[T]): client_ref: Optional[object] response: httpx.Response generator: Generator[T, None, None] + _closed: bool def __init__( self, @@ -33,17 +36,21 @@ def __init__( self.response = response self.generator = stream_events(response, decoder, sentinel) self.client_ref = client_ref + self._closed = False def __iter__(self): return self def __next__(self): + if self._closed: + raise StopIteration return next(self.generator) def __enter__(self): return self def __exit__(self, exc_type, exc_val, exc_tb): + self._closed = True self.response.close() @@ -53,6 +60,7 @@ class EventStreamAsync(Generic[T]): client_ref: Optional[object] response: httpx.Response generator: AsyncGenerator[T, None] + _closed: bool def __init__( self, @@ -64,33 +72,45 @@ def __init__( self.response = response self.generator = stream_events_async(response, decoder, sentinel) self.client_ref = client_ref + self._closed = False def __aiter__(self): return self async def __anext__(self): + if self._closed: + raise StopAsyncIteration return await self.generator.__anext__() async def __aenter__(self): return self async def __aexit__(self, exc_type, exc_val, exc_tb): + self._closed = True await self.response.aclose() +@dataclass class ServerEvent: id: Optional[str] = None event: Optional[str] = None - data: Optional[str] = None + data: Any = None retry: Optional[int] = None MESSAGE_BOUNDARIES = [ b"\r\n\r\n", - b"\n\n", + b"\r\n\r", + b"\r\n\n", + b"\r\r\n", + b"\n\r\n", b"\r\r", + b"\n\r", + b"\n\n", ] +UTF8_BOM = b"\xef\xbb\xbf" + async def stream_events_async( response: httpx.Response, @@ -99,14 +119,10 @@ async def stream_events_async( ) -> AsyncGenerator[T, None]: buffer = bytearray() position = 0 - discard = False + event_id: Optional[str] = None async for chunk in response.aiter_bytes(): - # We've encountered the sentinel value and should no longer process - # incoming data. Instead we throw new data away until the server closes - # the connection. - if discard: - continue - + if len(buffer) == 0 and chunk.startswith(UTF8_BOM): + chunk = chunk[len(UTF8_BOM) :] buffer += chunk for i in range(position, len(buffer)): char = buffer[i : i + 1] @@ -121,15 +137,22 @@ async def stream_events_async( block = buffer[position:i] position = i + len(seq) - event, discard = _parse_event(block, decoder, sentinel) + event, discard, event_id = _parse_event( + raw=block, decoder=decoder, sentinel=sentinel, event_id=event_id + ) if event is not None: yield event + if discard: + await response.aclose() + return if position > 0: buffer = buffer[position:] position = 0 - event, discard = _parse_event(buffer, decoder, sentinel) + event, discard, _ = _parse_event( + raw=buffer, decoder=decoder, sentinel=sentinel, event_id=event_id + ) if event is not None: yield event @@ -141,14 +164,10 @@ def stream_events( ) -> Generator[T, None, None]: buffer = bytearray() position = 0 - discard = False + event_id: Optional[str] = None for chunk in response.iter_bytes(): - # We've encountered the sentinel value and should no longer process - # incoming data. Instead we throw new data away until the server closes - # the connection. - if discard: - continue - + if len(buffer) == 0 and chunk.startswith(UTF8_BOM): + chunk = chunk[len(UTF8_BOM) :] buffer += chunk for i in range(position, len(buffer)): char = buffer[i : i + 1] @@ -163,22 +182,33 @@ def stream_events( block = buffer[position:i] position = i + len(seq) - event, discard = _parse_event(block, decoder, sentinel) + event, discard, event_id = _parse_event( + raw=block, decoder=decoder, sentinel=sentinel, event_id=event_id + ) if event is not None: yield event + if discard: + response.close() + return if position > 0: buffer = buffer[position:] position = 0 - event, discard = _parse_event(buffer, decoder, sentinel) + event, discard, _ = _parse_event( + raw=buffer, decoder=decoder, sentinel=sentinel, event_id=event_id + ) if event is not None: yield event def _parse_event( - raw: bytearray, decoder: Callable[[str], T], sentinel: Optional[str] = None -) -> Tuple[Optional[T], bool]: + *, + raw: bytearray, + decoder: Callable[[str], T], + sentinel: Optional[str] = None, + event_id: Optional[str] = None, +) -> Tuple[Optional[T], bool, Optional[str]]: block = raw.decode() lines = re.split(r"\r?\n|\r", block) publish = False @@ -189,13 +219,16 @@ def _parse_event( continue delim = line.find(":") - if delim <= 0: + if delim == 0: continue - field = line[0:delim] - value = line[delim + 1 :] if delim < len(line) - 1 else "" - if len(value) and value[0] == " ": - value = value[1:] + field = line + value = "" + if delim > 0: + field = line[0:delim] + value = line[delim + 1 :] if delim < len(line) - 1 else "" + if len(value) and value[0] == " ": + value = value[1:] if field == "event": event.event = value @@ -204,37 +237,36 @@ def _parse_event( data += value + "\n" publish = True elif field == "id": - event.id = value publish = True + if "\x00" not in value: + event_id = value elif field == "retry": - event.retry = int(value) if value.isdigit() else None + if value.isdigit(): + event.retry = int(value) publish = True + event.id = event_id + if sentinel and data == f"{sentinel}\n": - return None, True + return None, True, event_id if data: data = data[:-1] - event.data = data - - data_is_primitive = ( - data.isnumeric() or data == "true" or data == "false" or data == "null" - ) - data_is_json = ( - data.startswith("{") or data.startswith("[") or data.startswith('"') - ) - - if data_is_primitive or data_is_json: - try: - event.data = json.loads(data) - except Exception: - pass + try: + event.data = json.loads(data) + except json.JSONDecodeError: + event.data = data out = None if publish: - out = decoder(json.dumps(event.__dict__)) - - return out, False + out_dict = { + k: v + for k, v in asdict(event).items() + if v is not None or (k == "data" and data) + } + out = decoder(json.dumps(out_dict)) + + return out, False, event_id def _peek_sequence(position: int, buffer: bytearray, sequence: bytes): diff --git a/packages/azure/src/mistralai/azure/client/utils/forms.py b/packages/azure/src/mistralai/azure/client/utils/forms.py index f961e76b..1e550bd5 100644 --- a/packages/azure/src/mistralai/azure/client/utils/forms.py +++ b/packages/azure/src/mistralai/azure/client/utils/forms.py @@ -142,7 +142,7 @@ def serialize_multipart_form( if field_metadata.file: if isinstance(val, List): # Handle array of files - array_field_name = f_name + "[]" + array_field_name = f_name for file_obj in val: if not _is_set(file_obj): continue @@ -185,7 +185,7 @@ def serialize_multipart_form( continue values.append(_val_to_string(value)) - array_field_name = f_name + "[]" + array_field_name = f_name form[array_field_name] = values else: form[f_name] = _val_to_string(val) diff --git a/packages/azure/src/mistralai/azure/client/utils/retries.py b/packages/azure/src/mistralai/azure/client/utils/retries.py index 88a91b10..af07d4e9 100644 --- a/packages/azure/src/mistralai/azure/client/utils/retries.py +++ b/packages/azure/src/mistralai/azure/client/utils/retries.py @@ -144,12 +144,7 @@ def do_request() -> httpx.Response: if res.status_code == parsed_code: raise TemporaryError(res) - except httpx.ConnectError as exception: - if retries.config.retry_connection_errors: - raise - - raise PermanentError(exception) from exception - except httpx.TimeoutException as exception: + except (httpx.NetworkError, httpx.TimeoutException) as exception: if retries.config.retry_connection_errors: raise @@ -193,12 +188,7 @@ async def do_request() -> httpx.Response: if res.status_code == parsed_code: raise TemporaryError(res) - except httpx.ConnectError as exception: - if retries.config.retry_connection_errors: - raise - - raise PermanentError(exception) from exception - except httpx.TimeoutException as exception: + except (httpx.NetworkError, httpx.TimeoutException) as exception: if retries.config.retry_connection_errors: raise diff --git a/packages/azure/src/mistralai/azure/client/utils/security.py b/packages/azure/src/mistralai/azure/client/utils/security.py index 295a3f40..17996bd5 100644 --- a/packages/azure/src/mistralai/azure/client/utils/security.py +++ b/packages/azure/src/mistralai/azure/client/utils/security.py @@ -135,6 +135,8 @@ def _parse_security_scheme_value( elif scheme_type == "http": if sub_type == "bearer": headers[header_name] = _apply_bearer(value) + elif sub_type == "basic": + headers[header_name] = value elif sub_type == "custom": return else: diff --git a/packages/azure/src/mistralai/azure/client/utils/unions.py b/packages/azure/src/mistralai/azure/client/utils/unions.py new file mode 100644 index 00000000..a227f4e8 --- /dev/null +++ b/packages/azure/src/mistralai/azure/client/utils/unions.py @@ -0,0 +1,32 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from typing import Any + +from pydantic import BaseModel, TypeAdapter + + +def parse_open_union( + v: Any, + *, + disc_key: str, + variants: dict[str, Any], + unknown_cls: type, + union_name: str, +) -> Any: + """Parse an open discriminated union value with forward-compatibility. + + Known discriminator values are dispatched to their variant types. + Unknown discriminator values produce an instance of the fallback class, + preserving the raw payload for inspection. + """ + if isinstance(v, BaseModel): + return v + if not isinstance(v, dict) or disc_key not in v: + raise ValueError(f"{union_name}: expected object with '{disc_key}' field") + disc = v[disc_key] + variant_cls = variants.get(disc) + if variant_cls is not None: + if isinstance(variant_cls, type) and issubclass(variant_cls, BaseModel): + return variant_cls.model_validate(v) + return TypeAdapter(variant_cls).validate_python(v) + return unknown_cls(raw=v) diff --git a/packages/azure/src/mistralai/azure/client/utils/unmarshal_json_response.py b/packages/azure/src/mistralai/azure/client/utils/unmarshal_json_response.py index 5317ac87..fe0c9b8e 100644 --- a/packages/azure/src/mistralai/azure/client/utils/unmarshal_json_response.py +++ b/packages/azure/src/mistralai/azure/client/utils/unmarshal_json_response.py @@ -5,7 +5,7 @@ import httpx from .serializers import unmarshal_json -from mistralai.azure.client import models +from mistralai.azure.client import errors T = TypeVar("T") @@ -30,7 +30,7 @@ def unmarshal_json_response( try: return unmarshal_json(body, typ) except Exception as e: - raise models.ResponseValidationError( + raise errors.ResponseValidationError( "Response validation failed", http_res, e, diff --git a/packages/gcp/.speakeasy/gen.lock b/packages/gcp/.speakeasy/gen.lock index 8ce6c5ea..517e1a85 100644 --- a/packages/gcp/.speakeasy/gen.lock +++ b/packages/gcp/.speakeasy/gen.lock @@ -3,46 +3,46 @@ id: ec60f2d8-7869-45c1-918e-773d41a8cf74 management: docChecksum: bc4a0ba9c38418d84a6a8a76b503977b docVersion: 1.0.0 - speakeasyVersion: 1.685.0 - generationVersion: 2.794.1 - releaseVersion: 2.0.0a4 - configChecksum: 95fb33ae488fa72fb4ba17c6b93551a9 + speakeasyVersion: 1.729.0 + generationVersion: 2.841.0 + releaseVersion: 2.0.0-a4.1 + configChecksum: bfe17061a2e5ac54039980ad7a48fd77 repoURL: https://github.com/mistralai/client-python.git repoSubDirectory: packages/gcp installationURL: https://github.com/mistralai/client-python.git#subdirectory=packages/gcp published: true persistentEdits: - generation_id: 5f09b925-b801-4bf0-bda9-6f9a3212c588 - pristine_commit_hash: 20c7ce96f6a097f98d3367b89a7bea09ba0ded7c - pristine_tree_hash: c30d519719cc0cd17d7bf53ae2c13b1d8b125c5e + generation_id: c7e2e696-b223-4993-a79b-2e6f15242c30 + pristine_commit_hash: 86953bc23bb7fcfc3c2525f79114411bc27e8f75 + pristine_tree_hash: 93675a8857b7519918499101d4a5e30fc7fe2c4a features: python: additionalDependencies: 1.0.0 additionalProperties: 1.0.1 configurableModuleName: 0.2.0 - constsAndDefaults: 1.0.5 - core: 5.23.18 + constsAndDefaults: 1.0.7 + core: 6.0.12 defaultEnabledRetries: 0.2.0 enumUnions: 0.1.0 envVarSecurityUsage: 0.3.2 examples: 3.0.2 flatRequests: 1.0.1 - globalSecurity: 3.0.4 + globalSecurity: 3.0.5 globalSecurityCallbacks: 1.0.0 globalSecurityFlattening: 1.0.0 globalServerURLs: 3.2.0 includes: 3.0.0 methodArguments: 1.0.2 - nameOverrides: 3.0.1 - nullables: 1.0.1 - openEnums: 1.0.2 - responseFormat: 1.0.1 - retries: 3.0.3 - sdkHooks: 1.2.0 - serverEvents: 1.0.11 + nameOverrides: 3.0.3 + nullables: 1.0.2 + openEnums: 1.0.4 + responseFormat: 1.1.0 + retries: 3.0.4 + sdkHooks: 1.2.1 + serverEvents: 1.0.13 serverEventsSentinels: 0.1.0 serverIDs: 3.0.0 - unions: 3.1.1 + unions: 3.1.4 trackedFiles: .gitattributes: id: 24139dae6567 @@ -52,6 +52,10 @@ trackedFiles: id: 89aa447020cd last_write_checksum: sha1:f84632c81029fcdda8c3b0c768d02b836fc80526 pristine_git_object: 8d79f0abb72526f1fb34a4c03e5bba612c6ba2ae + docs/errors/httpvalidationerror.md: + id: 7fe2e5327e07 + last_write_checksum: sha1:277a46811144643262651853dc6176d21b33573e + pristine_git_object: 712a148c3e2305dca4c702851865f9f8c8e674cc docs/models/arguments.md: id: 7ea5e33709a7 last_write_checksum: sha1:09eea126210d7fd0353e60a76bf1dbed173f13ec @@ -74,8 +78,8 @@ trackedFiles: pristine_git_object: b2f15ecbe88328de95b4961ddb3940fd8a6ee64b docs/models/chatcompletionrequest.md: id: adffe90369d0 - last_write_checksum: sha1:2bf5152388f18436be4fe1c541b8d423dcae175c - pristine_git_object: 61a25d86e7dc292621f7f6c0f8909137a16b9112 + last_write_checksum: sha1:6374e05aeb66d48137d657acaa89527df2db35c6 + pristine_git_object: 8dbd4a82ad1d7725b9a6ce56daea208ca01b9210 docs/models/chatcompletionrequestmessage.md: id: 3f5e170d418c last_write_checksum: sha1:7921c5a508a9f88adc01caab34e26182b8035607 @@ -94,8 +98,8 @@ trackedFiles: pristine_git_object: a0465ffbfc5558628953e03fbc53b80bbdc8649b docs/models/chatcompletionstreamrequest.md: id: cf8f29558a68 - last_write_checksum: sha1:f30b2a7353e7406eb30af841a1a211ea5cb30cb0 - pristine_git_object: 3e790e7dc7143b0ae287ad2df14ae7e7a4085e3f + last_write_checksum: sha1:e23cf88a5a9b0c99e68d06a8450b8bfb9aee33a2 + pristine_git_object: db76b6c81a71607f94c212a542fe30e082053a90 docs/models/chatcompletionstreamrequestmessage.md: id: 053a98476cd2 last_write_checksum: sha1:8270692463fab1243d9de4bbef7162daa64e52c5 @@ -168,10 +172,6 @@ trackedFiles: id: 4b3bd62c0f26 last_write_checksum: sha1:754fe32bdffe53c1057b302702f5516f4e551cfb pristine_git_object: 87d7b4852de629015166605b273deb9341202dc0 - docs/models/httpvalidationerror.md: - id: a211c095f2ac - last_write_checksum: sha1:277a46811144643262651853dc6176d21b33573e - pristine_git_object: 712a148c3e2305dca4c702851865f9f8c8e674cc docs/models/imagedetail.md: id: f8217529b496 last_write_checksum: sha1:fdf19ac9459f64616240955cb81a84ef03e775c8 @@ -298,8 +298,8 @@ trackedFiles: pristine_git_object: 3e38f1a929f7d6b1d6de74604aa87e3d8f010544 pylintrc: id: 7ce8b9f946e6 - last_write_checksum: sha1:6b615d49741eb9ae16375d3a499767783d1128a1 - pristine_git_object: a8fcb932ba2a01c5e96e3b04c59371e930b75558 + last_write_checksum: sha1:8f871a5aac4b10bff724c9d91b8d7496eb1fbdde + pristine_git_object: 0391ac11bdc5526b697b69d047d568a611ce87d0 scripts/prepare_readme.py: id: e0c5957a6035 last_write_checksum: sha1:eb988bc0e00ed4bb14e9a3572845af14f06c9b42 @@ -310,8 +310,8 @@ trackedFiles: pristine_git_object: c35748f360329c2bc370e9b189f49b1a360b2c48 src/mistralai/gcp/client/__init__.py: id: 4f63decd432e - last_write_checksum: sha1:36306d1d404b6aeb912d27f1d9c52f098ff7bf9b - pristine_git_object: dd02e42e4cc509dc90e6ae70493054021faa5f9c + last_write_checksum: sha1:da077c0bdfcef64a4a5aea91a17292f72fa2b088 + pristine_git_object: 833c68cd526fe34aab2b7e7c45f974f7f4b9e120 src/mistralai/gcp/client/_hooks/__init__.py: id: adcb191838d1 last_write_checksum: sha1:e3111289afd28ad557c21d9e2f918caabfb7037d @@ -326,84 +326,108 @@ trackedFiles: pristine_git_object: ea95bed210db9180824efddfb1b3e47f5bf96489 src/mistralai/gcp/client/_version.py: id: f87319e32c7b - last_write_checksum: sha1:8c07e6351bf2df8239b3c02db75ee469dba53394 - pristine_git_object: ba48dac120cadd3f586b38659dc04e50838daa11 + last_write_checksum: sha1:85dd6da1d6503d717e8c9bd6d62278b469d3b464 + pristine_git_object: 204c92a656855ad281e86a74467e71ae1b04639f src/mistralai/gcp/client/basesdk.py: id: 4d594572857b - last_write_checksum: sha1:45ed4b6078e01d52d1dcf4bdc5494b700f1a6cde - pristine_git_object: 6f9f5fd9a2cadc8893d6693c1d40a8114c0fdc2a + last_write_checksum: sha1:d8ef9e2f4fa97d402eb9f5472ceb80fb39693991 + pristine_git_object: b3edcb0aca1882d0cbe4d499cfba9cb5464c5b58 src/mistralai/gcp/client/chat.py: id: 4c41f05f786e - last_write_checksum: sha1:a4d5609f51dee25dfc34f83e1eda2888aa01dda6 - pristine_git_object: 78541248204cbd5b92b6d6d362924fcdada8a948 + last_write_checksum: sha1:60b2697e2ecfb62eebed910007e62ab1df565eec + pristine_git_object: 925d69eda2fdac458045cc12327ca72997e07600 + src/mistralai/gcp/client/errors/__init__.py: + id: c51c8ed21629 + last_write_checksum: sha1:29f08ad600a712ff572843a250839ef92efac19b + pristine_git_object: 00c8ee0031486b5416bb6745397c463e1a5dbba6 + src/mistralai/gcp/client/errors/httpvalidationerror.py: + id: b0e25f1c36bd + last_write_checksum: sha1:c863914ed6704ee6c3ad99a77d8b1e742de069d0 + pristine_git_object: 598068197b9ed7e7756de01325f7967a719e46ea + src/mistralai/gcp/client/errors/mistralgcperror.py: + id: 9a9cad8f5d36 + last_write_checksum: sha1:7267c829a842a94c5b84ac248a1610ce45f3db4e + pristine_git_object: 9de91bf2a4abf8b0d0922eb6062fe2ab817a8aee + src/mistralai/gcp/client/errors/no_response_error.py: + id: 2d3e5fe56122 + last_write_checksum: sha1:7f326424a7d5ae1bcd5c89a0d6b3dbda9138942f + pristine_git_object: 1deab64bc43e1e65bf3c412d326a4032ce342366 + src/mistralai/gcp/client/errors/responsevalidationerror.py: + id: 98f7bac284be + last_write_checksum: sha1:1b835d2ce8754b22d5fa269077d7a2eec11d7f29 + pristine_git_object: e8bd83c19b0629bb0ddf7a240e9b8371cb33fff3 + src/mistralai/gcp/client/errors/sdkerror.py: + id: c53aee73c8e1 + last_write_checksum: sha1:080933e9f354b675988a132813f23e55f9e5db74 + pristine_git_object: 6980924626fa5fbf67fb62a30fd23d5883dbe650 src/mistralai/gcp/client/fim.py: id: 13d2d208e0ef - last_write_checksum: sha1:e6226c1720effd314afa7b9a21e5ec2347e5a74f - pristine_git_object: e2acacd58c28fa7ea718240b01a3714f7fc0b8f6 + last_write_checksum: sha1:1027165887446ce0764ad542ca52f61b460c71b8 + pristine_git_object: 4202102ae5218784a10ee93ada5a0643d23a1d0c src/mistralai/gcp/client/httpclient.py: id: a53dd7be6a4c last_write_checksum: sha1:5e55338d6ee9f01ab648cad4380201a8a3da7dd7 pristine_git_object: 89560b566073785535643e694c112bedbd3db13d src/mistralai/gcp/client/models/__init__.py: id: d9e976d01972 - last_write_checksum: sha1:f0554ff6b81286615330ffea947e619bc508bf19 - pristine_git_object: fb446c259f4ca1cc97ec64aac197f52b8224a096 + last_write_checksum: sha1:97ddfc7f70abd5e1a0b36be6dce209b69e9d5c73 + pristine_git_object: 575f64040c90152e74954b749ea89bce5a07e02e src/mistralai/gcp/client/models/assistantmessage.py: id: d39c4bdd289e - last_write_checksum: sha1:08fa98315561d5bb2c094bf57e7d66639b86e3ee - pristine_git_object: 7061775b3dbd9be0b978ff2a2cb07e52c01fc80a + last_write_checksum: sha1:c813783bcbeec4e40f12e007d1dde4aed8ec71cf + pristine_git_object: 702ac4708abb95fc18d138500b8353715c2dbc98 src/mistralai/gcp/client/models/chatcompletionchoice.py: id: 8e65b56f3e6d last_write_checksum: sha1:e6d1382e9f880b866130d900fd866997aaf80e45 pristine_git_object: ae5a2fbf38afbd86233dcaa8aa1c8441f5ed9eba src/mistralai/gcp/client/models/chatcompletionrequest.py: id: 4694a31c0003 - last_write_checksum: sha1:edb744ec2baca1f9ba6574662fffb36fb7d3faab - pristine_git_object: 1bc039221910bf88396c96affe735c8ac822920b + last_write_checksum: sha1:80fcbbcde773c22c93cf2db63beef2cfe3777497 + pristine_git_object: 8229c5bb13ded84039f3d8ddb95ac0a9c184e1bd src/mistralai/gcp/client/models/chatcompletionresponse.py: id: dd9e4796fca9 last_write_checksum: sha1:76d7257583389ff5021e320a8f9a45a6deb07c7c pristine_git_object: 317c4d84e378c14294d58c5aefd8c55ffe28754a src/mistralai/gcp/client/models/chatcompletionstreamrequest.py: id: 7294862af8ea - last_write_checksum: sha1:75d5bfcc204339b152dc78e33ac449c3aa9b5432 - pristine_git_object: 0a5a0021a4862e7b92a5c31679bf42bfa704d15b + last_write_checksum: sha1:899210f881bdbe0a0d94e29fe7044fabbccc578c + pristine_git_object: 3c228d2e7edf08c36f310e190a8dedc7b4958459 src/mistralai/gcp/client/models/completionchunk.py: id: 6b9ed8c30877 - last_write_checksum: sha1:4afc07c1824d81640f52a5c8bf89fde8893269b9 - pristine_git_object: 9e54cb6dfaccf7f815b40be585e11585cb5fef78 + last_write_checksum: sha1:f1f091e94e3c1c1aefd3c3bb60c8de8236ab0ead + pristine_git_object: a0b1ae2fa3109a2c2b76bbc483b691d88dc9a15c src/mistralai/gcp/client/models/completionevent.py: id: 3f55c4b8fc75 last_write_checksum: sha1:66665d921fd27df6ef0efce996a5446e49b989d8 pristine_git_object: bb1550093ce9adcb9bcd0548b69796e82f4f260b src/mistralai/gcp/client/models/completionresponsestreamchoice.py: id: ad9b98ca7e1c - last_write_checksum: sha1:04d195584fe4ea16544685e9989e5ae35205179a - pristine_git_object: 6f306721fbe47780c778833b80e97ab5d25d8367 + last_write_checksum: sha1:c4f9d733461bdb9a0d6c96e82212de7dddc04ffe + pristine_git_object: e58d4c88009ed3696d2a3a57f3796d8fb067019d src/mistralai/gcp/client/models/contentchunk.py: id: 8714d3bf2698 - last_write_checksum: sha1:347f43b4d7dcab18e09e6c3323f745a25ecfb04c - pristine_git_object: 1cd9e502ab7d4860daa79f907beafa71da086ab3 + last_write_checksum: sha1:acab1b53b1d324544c6aa6c4126a3fb5265278d2 + pristine_git_object: 18d481505e17d2125e380d796b0c406b0e66d601 src/mistralai/gcp/client/models/deltamessage.py: id: 404fc85f1a4c - last_write_checksum: sha1:3375624531d12279d225fb07a68e0396483b962f - pristine_git_object: 96923518438137cb729a69149b5b99be49836ad7 + last_write_checksum: sha1:982c2d15a570c7f4d5e1c3b012db46ea3bac609b + pristine_git_object: 63e6a7f3e50c138f235f5a36277aa8668f85cef1 src/mistralai/gcp/client/models/fimcompletionrequest.py: id: 5b79e2595d31 - last_write_checksum: sha1:cc4fa68c60a6a500a9887e47dd2e9220327c6226 - pristine_git_object: f37bbcc3cab020224531da898dd99cc175d49cd9 + last_write_checksum: sha1:80a2e3d5e10c240869cd96c41936d714cf8bf801 + pristine_git_object: e460f76c59315c22c75194936f1f3b232331f83c src/mistralai/gcp/client/models/fimcompletionresponse.py: id: 402f602d29b8 last_write_checksum: sha1:cfe26848c7b14d6e374b7944d7ad44df822990b0 pristine_git_object: 5b80da3f03e4e99dfca971a53af1cf6472c889bb src/mistralai/gcp/client/models/fimcompletionstreamrequest.py: id: 31190cf25070 - last_write_checksum: sha1:720f0a039a62cb508d513475a0e4bad45a9aa03c - pristine_git_object: 8e6102612998bde70d830bb0b8ee3a5e2a4dd01e + last_write_checksum: sha1:a95ab8c20b2fdff48102f08258a556af9f382ffa + pristine_git_object: fffc305499e578f77e42fb7992b59e933ae0ae7c src/mistralai/gcp/client/models/function.py: id: 2285a899b32e - last_write_checksum: sha1:a69ad9c8cd723e78a3949deefe43bcbf57426916 - pristine_git_object: 28577eff06d052aeb58c2795dd0a92ae4f2e7552 + last_write_checksum: sha1:6439f7f781174ae56b2b02ccbb4d02b08d8d5a03 + pristine_git_object: 439e831355444e0f9e82d23636651201f0db4bfc src/mistralai/gcp/client/models/functioncall.py: id: 17bb51f08e5f last_write_checksum: sha1:b5fe2f061ea5f47057ee50011babc80de27e0ee6 @@ -412,114 +436,94 @@ trackedFiles: id: 313a6001145f last_write_checksum: sha1:fe1eefaed314efa788bd15beb63bf6b81abb307e pristine_git_object: 585b9e39762e49356823e211ad86f701bca389b8 - src/mistralai/gcp/client/models/httpvalidationerror.py: - id: bdb67f678798 - last_write_checksum: sha1:58b6b7a2b2f8e4f66fc14c38540a26cfd2541a1e - pristine_git_object: 57df72607adc980b061d092f77140c6dbd36ecec src/mistralai/gcp/client/models/imagedetail.py: id: a28b2f3e2cb5 last_write_checksum: sha1:a4874529961952019eaa86a2fa0989626f537a4c pristine_git_object: 68ed76080716eb1424b13f182479f57e51a4fabf src/mistralai/gcp/client/models/imageurl.py: id: 4e330f3eae74 - last_write_checksum: sha1:3c5d70c0698b1b4b9c99087241227bab3dc0cdbf - pristine_git_object: d4f298f12d8095590cded5714091596b505c59b1 + last_write_checksum: sha1:6c0bee7d7c765fb2611131c7d270041671b428b8 + pristine_git_object: 903d0a1a45eeb7c5e8cde80f624b6e039de1f4cc src/mistralai/gcp/client/models/imageurlchunk.py: id: e68a4a393e9b - last_write_checksum: sha1:2eb2c8a205e5f8b320e2f597075cad9e5e27475b - pristine_git_object: fc5284c102c17a33c1ba6029c87515d509cd014b + last_write_checksum: sha1:eae1d0e69a90b2f7513492e4cd0ed68d647f0b5d + pristine_git_object: 4bec0eec882c1eeee8a80f663ff7d686ca677ea0 src/mistralai/gcp/client/models/jsonschema.py: id: 39c6e7d412a0 - last_write_checksum: sha1:29ba87457959588ff7d8188ae2382fb88740151d - pristine_git_object: 443c429dd1461d7a6817335626cd585577c5bffe - src/mistralai/gcp/client/models/mistralgcperror.py: - id: 278d296220ff - last_write_checksum: sha1:7267c829a842a94c5b84ac248a1610ce45f3db4e - pristine_git_object: 9de91bf2a4abf8b0d0922eb6062fe2ab817a8aee + last_write_checksum: sha1:19b34a5e3f5c00d1a1b96f91a6e02f5ad12240c7 + pristine_git_object: 684ac09f0460bef1f26bf0030b79bbc7141ab99b src/mistralai/gcp/client/models/mistralpromptmode.py: id: 8be4a4a683e4 last_write_checksum: sha1:c958567e95490abf3941fde69be69733e8afb90e pristine_git_object: c765e4f1a0b86735255771231377f13d62f3d7a6 - src/mistralai/gcp/client/models/no_response_error.py: - id: 2a7fa173594b - last_write_checksum: sha1:7f326424a7d5ae1bcd5c89a0d6b3dbda9138942f - pristine_git_object: 1deab64bc43e1e65bf3c412d326a4032ce342366 src/mistralai/gcp/client/models/prediction.py: id: 7a5463285bc8 - last_write_checksum: sha1:1d1e81082d1c2bfd613f0bc00f7173995ad67c0c - pristine_git_object: f53579edc665dd7fc1cc2497b0cd05b69e541cd8 + last_write_checksum: sha1:67c4a9b06d3e98552409a26960e0afd64f829b53 + pristine_git_object: 2e325289fd6c2a987ad270fd808f7b9a3f423440 src/mistralai/gcp/client/models/referencechunk.py: id: 523e477f8725 - last_write_checksum: sha1:d29c5fc1d8b6850fdeb3abc7f83185de92571b23 - pristine_git_object: 274ea7f7b142714d96040428fe7b87eeb48432cb + last_write_checksum: sha1:aade1dc05c2a2672630eb17626e4f49367d6bfe6 + pristine_git_object: 261c4755641093a38f97b17dce3a387623e69ead src/mistralai/gcp/client/models/responseformat.py: id: 06774bb65b42 - last_write_checksum: sha1:a52a60dc45c0b0939b99754d6c0c603ef2f737d3 - pristine_git_object: 34ae6b039a6c83c603fc6d47f6b2f233ec6c817a + last_write_checksum: sha1:7e64de46ef34718003cf0d198868a193f2122178 + pristine_git_object: f3aa9930e0f8a009dac628300d66c6209a538031 src/mistralai/gcp/client/models/responseformats.py: id: 18112ad0f6db last_write_checksum: sha1:a212e85d286b5b49219f57d071a2232ff8b5263b pristine_git_object: cbf83ce7b54ff8634f741334831807bfb5c98991 - src/mistralai/gcp/client/models/responsevalidationerror.py: - id: b90c1c09ac00 - last_write_checksum: sha1:e4321c1141ba7b1f6a8c217124e02ea0c70d9ad1 - pristine_git_object: 0e86ea6cb79fd4598d527dfef403ba66d435d3bb - src/mistralai/gcp/client/models/sdkerror.py: - id: a7cf4fa8974b - last_write_checksum: sha1:a3b60234deceb7fbcb57926c265e02e9fefc0835 - pristine_git_object: 00bc1d99353e7e2415d92c3e906c2c09712e5a64 src/mistralai/gcp/client/models/security.py: id: 7e13bda8273b last_write_checksum: sha1:7086e929823d4eefe80cc279b605adfc8bbb08aa pristine_git_object: 10a469b54d5e03873fb7d7d98627f2376c93d484 src/mistralai/gcp/client/models/systemmessage.py: id: 6537664d2d1b - last_write_checksum: sha1:e7f8dc73154c6985fcdbb77259df9bbc4745f976 - pristine_git_object: a7d695a7791eb5e97cd8f74e81c475c78e4b1a67 + last_write_checksum: sha1:779cb07cfd63ebe9eec496177cf1a8f5c077e417 + pristine_git_object: b3795c4bf4e97853979e0042cf4bd151d60ef974 src/mistralai/gcp/client/models/systemmessagecontentchunks.py: id: e120a6469c89 - last_write_checksum: sha1:55529f2f29ba3087fbf117dbbe64e1dda92b2958 - pristine_git_object: 225f38b712f5f3c7abfd526cc8c0386687814f36 + last_write_checksum: sha1:d1f96498cbb540b91425e70ffa33892ff4d1c8cd + pristine_git_object: 8de71c909eda2ed0166a6be8f8ee029956e5766b src/mistralai/gcp/client/models/textchunk.py: id: a134f120d4dc - last_write_checksum: sha1:9f46381e01f235560017ea80fbc85210eb625a99 - pristine_git_object: 77576c9fd87f0861bf6a3496aeae7e8bb8dc986a + last_write_checksum: sha1:1ccc7d232136d6278d670542d192f36f46862df1 + pristine_git_object: 690322725c0f852a005d08c5b722c41709868b22 src/mistralai/gcp/client/models/thinkchunk.py: id: 59a1d1ef2020 - last_write_checksum: sha1:9fcccb19d87bc41f771cae710eeb8f28c229070d - pristine_git_object: b65fffb21d5cb060acaa648a70e337a43595cd32 + last_write_checksum: sha1:066eeb10de301264e601a9ec64d21e1cc13b0c20 + pristine_git_object: 33ec83949499d99a28c55bb20429ab948bb5b1e8 src/mistralai/gcp/client/models/tool.py: id: 4b27d45e56ad - last_write_checksum: sha1:6d139575b740ea1f9f68a73b7bc2c95c30a10345 - pristine_git_object: d09c68542f2cb1f3bae0ffc7a7b163ad08a8e973 + last_write_checksum: sha1:cb0d879a55218fd7753bdd005be8a155982feb8f + pristine_git_object: 670aa81f8767e7c079105cf5995225168b4d6eb6 src/mistralai/gcp/client/models/toolcall.py: id: e6c25869a579 - last_write_checksum: sha1:5acf0eca8b1f4c459c6d8cadbbbd90605201ddc0 - pristine_git_object: a1edf3370426957980ff212367d56909ea8fa548 + last_write_checksum: sha1:f88e69a8e352025ca4b6897f6c16e1f7e4cd7264 + pristine_git_object: 3ea8e283c8f695bcc1fbc734b0074d37c2efeac8 src/mistralai/gcp/client/models/toolchoice.py: id: cb13a9f64c92 - last_write_checksum: sha1:3ad6b48b24b39609e86229193ad18d84b1b3c818 - pristine_git_object: de3828dac8bc23e32b9f9434adccc770b5ce1212 + last_write_checksum: sha1:71be72b1aae19aef1f8a461c89b71ad6daa009b7 + pristine_git_object: 6e795fd72792f740c8aa5b4da7d1f516018f2c2e src/mistralai/gcp/client/models/toolchoiceenum.py: id: d62e9c92d93c last_write_checksum: sha1:3dbba9a58c5569aafe115f3f7713a52b01ad8620 pristine_git_object: 01f6f677b379f9e3c99db9d1ad248cb0033a2804 src/mistralai/gcp/client/models/toolmessage.py: id: b3774786c2e9 - last_write_checksum: sha1:ef21eb555f41ec70010dbcea1a155af988936061 - pristine_git_object: 65b1d9d62d37361a06b3fd3ee1790eb3a976a94f + last_write_checksum: sha1:3d414da8132467d1472ebe485802ffc78eb6f7e4 + pristine_git_object: ce160391f37ce3568daf2877f8dc1aa0f3694821 src/mistralai/gcp/client/models/tooltypes.py: id: 5926c64f5229 last_write_checksum: sha1:ffd576511eed9f823c3d67df9fc5574d8d53c54b pristine_git_object: fd1aa13d7b8c5d9bdb0922e04b8bd653ff843f60 src/mistralai/gcp/client/models/usageinfo.py: id: 3aab1af66cff - last_write_checksum: sha1:47c6311bc1db47849a72c8e1bcc64dac9cec637e - pristine_git_object: 9b7207b10ea9d46d8216c104c45be1a52fb093d9 + last_write_checksum: sha1:c0c949ac48ed35efe1e8fbf820b8e390edd9c3ce + pristine_git_object: cb6feb6e8d173d39b828d8f5b38af75173b4f7f2 src/mistralai/gcp/client/models/usermessage.py: id: 9cfa7260463e - last_write_checksum: sha1:580acf868a3d180eef34b2af9c2d20f78e4fb693 - pristine_git_object: c083e16d4aa536beec9f9e1151ebbe8c1797798c + last_write_checksum: sha1:780984241b84a7dfe1f6ad6eccace1204bfec8bd + pristine_git_object: e237e900421a9e65fd15aede29ade0e510b189f6 src/mistralai/gcp/client/models/validationerror.py: id: 6b4f4910ea9c last_write_checksum: sha1:2792fd656f55519902f37670fb9fb3b43b4aa016 @@ -542,8 +546,8 @@ trackedFiles: pristine_git_object: a9a640a1a7048736383f96c67c6290c86bf536ee src/mistralai/gcp/client/utils/__init__.py: id: a30c8ff6dcff - last_write_checksum: sha1:887f56a717845fab7445cc368d2a17d850c3565a - pristine_git_object: 05f26ade57efb8c54a774fbcb939fb1a7dc655ce + last_write_checksum: sha1:3ad22a588864c93bd3a16605f669955b5f3b8053 + pristine_git_object: b488c2df1390b22be3050eee72832a91c76d5385 src/mistralai/gcp/client/utils/annotations.py: id: 9b2cd4ffc6e9 last_write_checksum: sha1:a4824ad65f730303e4e1e3ec1febf87b4eb46dbc @@ -552,18 +556,22 @@ trackedFiles: id: dd1f0f91ea9d last_write_checksum: sha1:c721e4123000e7dc61ec52b28a739439d9e17341 pristine_git_object: a6c52cd61bbe2d459046c940ce5e8c469f2f0664 + src/mistralai/gcp/client/utils/dynamic_imports.py: + id: 0091051cb000 + last_write_checksum: sha1:a1940c63feb8eddfd8026de53384baf5056d5dcc + pristine_git_object: 673edf82a97d0fea7295625d3e092ea369a36b79 src/mistralai/gcp/client/utils/enums.py: id: 2341407d5443 last_write_checksum: sha1:bc8c3c1285ae09ba8a094ee5c3d9c7f41fa1284d pristine_git_object: 3324e1bc2668c54c4d5f5a1a845675319757a828 src/mistralai/gcp/client/utils/eventstreaming.py: id: bb66f0c3e0dc - last_write_checksum: sha1:bababae5d54b7efc360db701daa49e18a92c2f3b - pristine_git_object: 0969899bfc491e5e408d05643525f347ea95e4fc + last_write_checksum: sha1:ffa870a25a7e4e2015bfd7a467ccd3aa1de97f0e + pristine_git_object: f2052fc22d9fd6c663ba3dce019fe234ca37108b src/mistralai/gcp/client/utils/forms.py: id: ebf34781d6bd - last_write_checksum: sha1:15fa7e9ab1611e062a9984cf06cb20969713d295 - pristine_git_object: f961e76beaf0a8b1fe0dda44754a74eebd3608e7 + last_write_checksum: sha1:0ca31459b99f761fcc6d0557a0a38daac4ad50f4 + pristine_git_object: 1e550bd5c2c35d977ddc10f49d77c23cb12c158d src/mistralai/gcp/client/utils/headers.py: id: 4c369582903e last_write_checksum: sha1:7c6df233ee006332b566a8afa9ce9a245941d935 @@ -586,20 +594,24 @@ trackedFiles: pristine_git_object: 1de32b6d26f46590232f398fdba6ce0072f1659c src/mistralai/gcp/client/utils/retries.py: id: 542ebd75b79b - last_write_checksum: sha1:5b97ac4f59357d70c2529975d50364c88bcad607 - pristine_git_object: 88a91b10cd2076b4a2c6cff2ac6bfaa5e3c5ad13 + last_write_checksum: sha1:471372f5c5d1dd5583239c9cf3c75f1b636e5d87 + pristine_git_object: af07d4e941007af4213c5ec9047ef8a2fca04e5e src/mistralai/gcp/client/utils/security.py: id: 5273152365f4 - last_write_checksum: sha1:a17130ace2c0db6394f38dd941ad2b700cc755c8 - pristine_git_object: 295a3f40031dbb40073ad227fd4a355660f97ab2 + last_write_checksum: sha1:435dd8b180cefcd733e635b9fa45512da091d9c0 + pristine_git_object: 17996bd54b8624009802fbbdf30bcb4225b8dfed src/mistralai/gcp/client/utils/serializers.py: id: a7836e553d41 last_write_checksum: sha1:ce1d8d7f500a9ccba0aeca5057cee9c271f4dfd7 pristine_git_object: 14321eb479de81d0d9580ec8291e0ff91bf29e57 + src/mistralai/gcp/client/utils/unions.py: + id: 8abba1cf1b6d + last_write_checksum: sha1:6e38049f323e0b5fb4bd0e88ab51ec447197ccb0 + pristine_git_object: a227f4e87be22fce682fcae5813b71835199ec5e src/mistralai/gcp/client/utils/unmarshal_json_response.py: id: d972d22cf934 - last_write_checksum: sha1:a68b9e491188e6c1956a749530eac3c7dc8004e7 - pristine_git_object: 83e8275e59adf51fb01a0579ae26627ee29fee49 + last_write_checksum: sha1:5c75fb4ee04ae80a350ceb96abf4e1fdb255ee6c + pristine_git_object: ead3e5a00171b3a97af5112b6cd9ece698ce74f5 src/mistralai/gcp/client/utils/url.py: id: 0d311bbcb8f8 last_write_checksum: sha1:6479961baa90432ca25626f8e40a7bbc32e73b41 diff --git a/packages/gcp/.speakeasy/gen.yaml b/packages/gcp/.speakeasy/gen.yaml index 93cc5a42..54336636 100644 --- a/packages/gcp/.speakeasy/gen.yaml +++ b/packages/gcp/.speakeasy/gen.yaml @@ -13,8 +13,9 @@ generation: requestResponseComponentNamesFeb2024: true securityFeb2025: true sharedErrorComponentsApr2025: true - methodSignaturesApr2024: true sharedNestedComponentsJan2026: true + nameOverrideFeb2026: true + methodSignaturesApr2024: true auth: oAuth2ClientCredentialsEnabled: true oAuth2PasswordEnabled: false @@ -22,31 +23,37 @@ generation: schemas: allOfMergeStrategy: shallowMerge requestBodyFieldName: "" + versioningStrategy: automatic persistentEdits: {} tests: generateTests: true generateNewTests: false skipResponseBodyAssertions: false python: - version: 2.0.0a4 + version: 2.0.0-a4.1 additionalDependencies: dev: pytest: ^8.2.2 pytest-asyncio: ^0.23.7 + main: {} allowedRedefinedBuiltins: - id - object + - input + - dir asyncMode: both authors: - Mistral baseErrorName: MistralGCPError clientServerStatusCodesAsErrors: true - constFieldCasing: upper + constFieldCasing: normal defaultErrorName: SDKError description: Python Client SDK for the Mistral AI API in GCP. enableCustomCodeRegions: false enumFormat: union fixFlags: + asyncPaginationSep2025: true + conflictResistantModelImportsFeb2026: true responseRequiredSep2024: true flatAdditionalProperties: true flattenGlobalSecurity: true @@ -58,17 +65,17 @@ python: option: openapi paths: callbacks: "" - errors: "" + errors: errors operations: "" shared: "" webhooks: "" inferUnionDiscriminators: true inputModelSuffix: input license: "" - maxMethodParams: 15 + maxMethodParams: 999 methodArguments: infer-optional-args moduleName: mistralai.gcp.client - multipartArrayFormat: legacy + multipartArrayFormat: standard outputModelSuffix: output packageManager: uv packageName: mistralai-gcp @@ -78,3 +85,4 @@ python: responseFormat: flat sseFlatResponse: false templateVersion: v2 + useAsyncHooks: false diff --git a/packages/gcp/docs/models/httpvalidationerror.md b/packages/gcp/docs/errors/httpvalidationerror.md similarity index 100% rename from packages/gcp/docs/models/httpvalidationerror.md rename to packages/gcp/docs/errors/httpvalidationerror.md diff --git a/packages/gcp/docs/models/chatcompletionrequest.md b/packages/gcp/docs/models/chatcompletionrequest.md index 61a25d86..8dbd4a82 100644 --- a/packages/gcp/docs/models/chatcompletionrequest.md +++ b/packages/gcp/docs/models/chatcompletionrequest.md @@ -14,7 +14,7 @@ | `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | | `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | | | `messages` | List[[models.ChatCompletionRequestMessage](../models/chatcompletionrequestmessage.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | -| `response_format` | [Optional[models.ResponseFormat]](../models/responseformat.md) | :heavy_minus_sign: | Specify the format that the model must output. By default it will use `{ "type": "text" }`. Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ "type": "json_schema" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide. | {
"type": "text"
} | +| `response_format` | [Optional[models.ResponseFormat]](../models/responseformat.md) | :heavy_minus_sign: | Specify the format that the model must output. By default it will use `{ "type": "text" }`. Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ "type": "json_schema" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide. | **Example 1:** {
"type": "text"
}
**Example 2:** {
"type": "json_object"
}
**Example 3:** {
"type": "json_schema",
"json_schema": {
"schema": {
"properties": {
"name": {
"title": "Name",
"type": "string"
},
"authors": {
"items": {
"type": "string"
},
"title": "Authors",
"type": "array"
}
},
"required": [
"name",
"authors"
],
"title": "Book",
"type": "object",
"additionalProperties": false
},
"name": "book",
"strict": true
}
} | | `tools` | List[[models.Tool](../models/tool.md)] | :heavy_minus_sign: | A list of tools the model may call. Use this to provide a list of functions the model may generate JSON inputs for. | | | `tool_choice` | [Optional[models.ChatCompletionRequestToolChoice]](../models/chatcompletionrequesttoolchoice.md) | :heavy_minus_sign: | Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{"type": "function", "function": {"name": "my_function"}}` forces the model to call that tool. | | | `presence_penalty` | *Optional[float]* | :heavy_minus_sign: | The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. | | diff --git a/packages/gcp/docs/models/chatcompletionstreamrequest.md b/packages/gcp/docs/models/chatcompletionstreamrequest.md index 3e790e7d..db76b6c8 100644 --- a/packages/gcp/docs/models/chatcompletionstreamrequest.md +++ b/packages/gcp/docs/models/chatcompletionstreamrequest.md @@ -14,7 +14,7 @@ | `random_seed` | *OptionalNullable[int]* | :heavy_minus_sign: | The seed to use for random sampling. If set, different calls will generate deterministic results. | | | `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | | | `messages` | List[[models.ChatCompletionStreamRequestMessage](../models/chatcompletionstreamrequestmessage.md)] | :heavy_check_mark: | The prompt(s) to generate completions for, encoded as a list of dict with role and content. | [
{
"role": "user",
"content": "Who is the best French painter? Answer in one short sentence."
}
] | -| `response_format` | [Optional[models.ResponseFormat]](../models/responseformat.md) | :heavy_minus_sign: | Specify the format that the model must output. By default it will use `{ "type": "text" }`. Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ "type": "json_schema" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide. | {
"type": "text"
} | +| `response_format` | [Optional[models.ResponseFormat]](../models/responseformat.md) | :heavy_minus_sign: | Specify the format that the model must output. By default it will use `{ "type": "text" }`. Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. Setting to `{ "type": "json_schema" }` enables JSON schema mode, which guarantees the message the model generates is in JSON and follows the schema you provide. | **Example 1:** {
"type": "text"
}
**Example 2:** {
"type": "json_object"
}
**Example 3:** {
"type": "json_schema",
"json_schema": {
"schema": {
"properties": {
"name": {
"title": "Name",
"type": "string"
},
"authors": {
"items": {
"type": "string"
},
"title": "Authors",
"type": "array"
}
},
"required": [
"name",
"authors"
],
"title": "Book",
"type": "object",
"additionalProperties": false
},
"name": "book",
"strict": true
}
} | | `tools` | List[[models.Tool](../models/tool.md)] | :heavy_minus_sign: | A list of tools the model may call. Use this to provide a list of functions the model may generate JSON inputs for. | | | `tool_choice` | [Optional[models.ChatCompletionStreamRequestToolChoice]](../models/chatcompletionstreamrequesttoolchoice.md) | :heavy_minus_sign: | Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `any` or `required` means the model must call one or more tools. Specifying a particular tool via `{"type": "function", "function": {"name": "my_function"}}` forces the model to call that tool. | | | `presence_penalty` | *Optional[float]* | :heavy_minus_sign: | The `presence_penalty` determines how much the model penalizes the repetition of words or phrases. A higher presence penalty encourages the model to use a wider variety of words and phrases, making the output more diverse and creative. | | diff --git a/packages/gcp/pylintrc b/packages/gcp/pylintrc index a8fcb932..0391ac11 100644 --- a/packages/gcp/pylintrc +++ b/packages/gcp/pylintrc @@ -89,7 +89,7 @@ persistent=yes # Minimum Python version to use for version dependent checks. Will default to # the version used to run pylint. -py-version=3.9 +py-version=3.10 # Discover python modules and packages in the file system subtree. recursive=no @@ -459,7 +459,8 @@ disable=raw-checker-failed, consider-using-with, wildcard-import, unused-wildcard-import, - too-many-return-statements + too-many-return-statements, + redefined-builtin # Enable the message, report, category or checker with the given id(s). You can # either give multiple identifier separated by comma (,) or put this option @@ -641,7 +642,7 @@ additional-builtins= allow-global-unused-variables=yes # List of names allowed to shadow builtins -allowed-redefined-builtins=id,object +allowed-redefined-builtins=id,object,input,dir # List of strings which can identify a callback function by name. A callback # name must start or end with one of those strings. diff --git a/packages/gcp/src/mistralai/gcp/client/__init__.py b/packages/gcp/src/mistralai/gcp/client/__init__.py index dd02e42e..833c68cd 100644 --- a/packages/gcp/src/mistralai/gcp/client/__init__.py +++ b/packages/gcp/src/mistralai/gcp/client/__init__.py @@ -9,7 +9,6 @@ ) from .sdk import * from .sdkconfiguration import * -from .models import * VERSION: str = __version__ diff --git a/packages/gcp/src/mistralai/gcp/client/_version.py b/packages/gcp/src/mistralai/gcp/client/_version.py index ba48dac1..204c92a6 100644 --- a/packages/gcp/src/mistralai/gcp/client/_version.py +++ b/packages/gcp/src/mistralai/gcp/client/_version.py @@ -3,10 +3,10 @@ import importlib.metadata __title__: str = "mistralai-gcp" -__version__: str = "2.0.0a4" +__version__: str = "2.0.0-a4.1" __openapi_doc_version__: str = "1.0.0" -__gen_version__: str = "2.794.1" -__user_agent__: str = "speakeasy-sdk/python 2.0.0a4 2.794.1 1.0.0 mistralai-gcp" +__gen_version__: str = "2.841.0" +__user_agent__: str = "speakeasy-sdk/python 2.0.0-a4.1 2.841.0 1.0.0 mistralai-gcp" try: if __package__ is not None: diff --git a/packages/gcp/src/mistralai/gcp/client/basesdk.py b/packages/gcp/src/mistralai/gcp/client/basesdk.py index 6f9f5fd9..b3edcb0a 100644 --- a/packages/gcp/src/mistralai/gcp/client/basesdk.py +++ b/packages/gcp/src/mistralai/gcp/client/basesdk.py @@ -2,7 +2,7 @@ from .sdkconfiguration import SDKConfiguration import httpx -from mistralai.gcp.client import models, utils +from mistralai.gcp.client import errors, utils from mistralai.gcp.client._hooks import ( AfterErrorContext, AfterSuccessContext, @@ -12,6 +12,7 @@ RetryConfig, SerializedRequestBody, get_body_content, + run_sync_in_thread, ) from typing import Callable, List, Mapping, Optional, Tuple from urllib.parse import parse_qs, urlparse @@ -264,7 +265,7 @@ def do(): if http_res is None: logger.debug("Raising no response SDK error") - raise models.NoResponseError("No response received") + raise errors.NoResponseError("No response received") logger.debug( "Response:\nStatus Code: %s\nURL: %s\nHeaders: %s\nBody: %s", @@ -285,7 +286,7 @@ def do(): http_res = result else: logger.debug("Raising unexpected SDK error") - raise models.SDKError("Unexpected error occurred", http_res) + raise errors.SDKError("Unexpected error occurred", http_res) return http_res @@ -315,7 +316,10 @@ async def do_request_async( async def do(): http_res = None try: - req = hooks.before_request(BeforeRequestContext(hook_ctx), request) + req = await run_sync_in_thread( + hooks.before_request, BeforeRequestContext(hook_ctx), request + ) + logger.debug( "Request:\nMethod: %s\nURL: %s\nHeaders: %s\nBody: %s", req.method, @@ -329,14 +333,17 @@ async def do(): http_res = await client.send(req, stream=stream) except Exception as e: - _, e = hooks.after_error(AfterErrorContext(hook_ctx), None, e) + _, e = await run_sync_in_thread( + hooks.after_error, AfterErrorContext(hook_ctx), None, e + ) + if e is not None: logger.debug("Request Exception", exc_info=True) raise e if http_res is None: logger.debug("Raising no response SDK error") - raise models.NoResponseError("No response received") + raise errors.NoResponseError("No response received") logger.debug( "Response:\nStatus Code: %s\nURL: %s\nHeaders: %s\nBody: %s", @@ -347,9 +354,10 @@ async def do(): ) if utils.match_status_codes(error_status_codes, http_res.status_code): - result, err = hooks.after_error( - AfterErrorContext(hook_ctx), http_res, None + result, err = await run_sync_in_thread( + hooks.after_error, AfterErrorContext(hook_ctx), http_res, None ) + if err is not None: logger.debug("Request Exception", exc_info=True) raise err @@ -357,7 +365,7 @@ async def do(): http_res = result else: logger.debug("Raising unexpected SDK error") - raise models.SDKError("Unexpected error occurred", http_res) + raise errors.SDKError("Unexpected error occurred", http_res) return http_res @@ -369,6 +377,8 @@ async def do(): http_res = await do() if not utils.match_status_codes(error_status_codes, http_res.status_code): - http_res = hooks.after_success(AfterSuccessContext(hook_ctx), http_res) + http_res = await run_sync_in_thread( + hooks.after_success, AfterSuccessContext(hook_ctx), http_res + ) return http_res diff --git a/packages/gcp/src/mistralai/gcp/client/chat.py b/packages/gcp/src/mistralai/gcp/client/chat.py index 78541248..925d69ed 100644 --- a/packages/gcp/src/mistralai/gcp/client/chat.py +++ b/packages/gcp/src/mistralai/gcp/client/chat.py @@ -1,7 +1,7 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from .basesdk import BaseSDK -from mistralai.gcp.client import models, utils +from mistralai.gcp.client import errors, models, utils from mistralai.gcp.client._hooks import HookContext from mistralai.gcp.client.types import OptionalNullable, UNSET from mistralai.gcp.client.utils import eventstreaming @@ -176,18 +176,18 @@ def stream( if utils.match_response(http_res, "422", "application/json"): http_res_text = utils.stream_to_text(http_res) response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res, http_res_text + errors.HTTPValidationErrorData, http_res, http_res_text ) - raise models.HTTPValidationError(response_data, http_res, http_res_text) + raise errors.HTTPValidationError(response_data, http_res, http_res_text) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("Unexpected response received", http_res, http_res_text) + raise errors.SDKError("Unexpected response received", http_res, http_res_text) async def stream_async( self, @@ -353,18 +353,18 @@ async def stream_async( if utils.match_response(http_res, "422", "application/json"): http_res_text = await utils.stream_to_text_async(http_res) response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res, http_res_text + errors.HTTPValidationErrorData, http_res, http_res_text ) - raise models.HTTPValidationError(response_data, http_res, http_res_text) + raise errors.HTTPValidationError(response_data, http_res, http_res_text) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("Unexpected response received", http_res, http_res_text) + raise errors.SDKError("Unexpected response received", http_res, http_res_text) def complete( self, @@ -521,17 +521,17 @@ def complete( return unmarshal_json_response(models.ChatCompletionResponse, http_res) if utils.match_response(http_res, "422", "application/json"): response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(response_data, http_res) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) async def complete_async( self, @@ -688,14 +688,14 @@ async def complete_async( return unmarshal_json_response(models.ChatCompletionResponse, http_res) if utils.match_response(http_res, "422", "application/json"): response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(response_data, http_res) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) diff --git a/packages/gcp/src/mistralai/gcp/client/errors/__init__.py b/packages/gcp/src/mistralai/gcp/client/errors/__init__.py new file mode 100644 index 00000000..00c8ee00 --- /dev/null +++ b/packages/gcp/src/mistralai/gcp/client/errors/__init__.py @@ -0,0 +1,39 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from .mistralgcperror import MistralGCPError +from typing import Any, TYPE_CHECKING + +from mistralai.gcp.client.utils.dynamic_imports import lazy_getattr, lazy_dir + +if TYPE_CHECKING: + from .httpvalidationerror import HTTPValidationError, HTTPValidationErrorData + from .no_response_error import NoResponseError + from .responsevalidationerror import ResponseValidationError + from .sdkerror import SDKError + +__all__ = [ + "HTTPValidationError", + "HTTPValidationErrorData", + "MistralGCPError", + "NoResponseError", + "ResponseValidationError", + "SDKError", +] + +_dynamic_imports: dict[str, str] = { + "HTTPValidationError": ".httpvalidationerror", + "HTTPValidationErrorData": ".httpvalidationerror", + "NoResponseError": ".no_response_error", + "ResponseValidationError": ".responsevalidationerror", + "SDKError": ".sdkerror", +} + + +def __getattr__(attr_name: str) -> Any: + return lazy_getattr( + attr_name, package=__package__, dynamic_imports=_dynamic_imports + ) + + +def __dir__(): + return lazy_dir(dynamic_imports=_dynamic_imports) diff --git a/packages/gcp/src/mistralai/gcp/client/models/httpvalidationerror.py b/packages/gcp/src/mistralai/gcp/client/errors/httpvalidationerror.py similarity index 77% rename from packages/gcp/src/mistralai/gcp/client/models/httpvalidationerror.py rename to packages/gcp/src/mistralai/gcp/client/errors/httpvalidationerror.py index 57df7260..59806819 100644 --- a/packages/gcp/src/mistralai/gcp/client/models/httpvalidationerror.py +++ b/packages/gcp/src/mistralai/gcp/client/errors/httpvalidationerror.py @@ -1,16 +1,16 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from __future__ import annotations -from .validationerror import ValidationError from dataclasses import dataclass, field import httpx -from mistralai.gcp.client.models import MistralGCPError +from mistralai.gcp.client.errors import MistralGCPError +from mistralai.gcp.client.models import validationerror as models_validationerror from mistralai.gcp.client.types import BaseModel from typing import List, Optional class HTTPValidationErrorData(BaseModel): - detail: Optional[List[ValidationError]] = None + detail: Optional[List[models_validationerror.ValidationError]] = None @dataclass(unsafe_hash=True) diff --git a/packages/gcp/src/mistralai/gcp/client/models/mistralgcperror.py b/packages/gcp/src/mistralai/gcp/client/errors/mistralgcperror.py similarity index 100% rename from packages/gcp/src/mistralai/gcp/client/models/mistralgcperror.py rename to packages/gcp/src/mistralai/gcp/client/errors/mistralgcperror.py diff --git a/packages/gcp/src/mistralai/gcp/client/models/no_response_error.py b/packages/gcp/src/mistralai/gcp/client/errors/no_response_error.py similarity index 100% rename from packages/gcp/src/mistralai/gcp/client/models/no_response_error.py rename to packages/gcp/src/mistralai/gcp/client/errors/no_response_error.py diff --git a/packages/gcp/src/mistralai/gcp/client/models/responsevalidationerror.py b/packages/gcp/src/mistralai/gcp/client/errors/responsevalidationerror.py similarity index 92% rename from packages/gcp/src/mistralai/gcp/client/models/responsevalidationerror.py rename to packages/gcp/src/mistralai/gcp/client/errors/responsevalidationerror.py index 0e86ea6c..e8bd83c1 100644 --- a/packages/gcp/src/mistralai/gcp/client/models/responsevalidationerror.py +++ b/packages/gcp/src/mistralai/gcp/client/errors/responsevalidationerror.py @@ -4,7 +4,7 @@ from typing import Optional from dataclasses import dataclass -from mistralai.gcp.client.models import MistralGCPError +from mistralai.gcp.client.errors import MistralGCPError @dataclass(unsafe_hash=True) diff --git a/packages/gcp/src/mistralai/gcp/client/models/sdkerror.py b/packages/gcp/src/mistralai/gcp/client/errors/sdkerror.py similarity index 95% rename from packages/gcp/src/mistralai/gcp/client/models/sdkerror.py rename to packages/gcp/src/mistralai/gcp/client/errors/sdkerror.py index 00bc1d99..69809246 100644 --- a/packages/gcp/src/mistralai/gcp/client/models/sdkerror.py +++ b/packages/gcp/src/mistralai/gcp/client/errors/sdkerror.py @@ -4,7 +4,7 @@ from typing import Optional from dataclasses import dataclass -from mistralai.gcp.client.models import MistralGCPError +from mistralai.gcp.client.errors import MistralGCPError MAX_MESSAGE_LEN = 10_000 diff --git a/packages/gcp/src/mistralai/gcp/client/fim.py b/packages/gcp/src/mistralai/gcp/client/fim.py index e2acacd5..4202102a 100644 --- a/packages/gcp/src/mistralai/gcp/client/fim.py +++ b/packages/gcp/src/mistralai/gcp/client/fim.py @@ -1,7 +1,7 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from .basesdk import BaseSDK -from mistralai.gcp.client import models, utils +from mistralai.gcp.client import errors, models, utils from mistralai.gcp.client._hooks import HookContext from mistralai.gcp.client.types import OptionalNullable, UNSET from mistralai.gcp.client.utils import eventstreaming @@ -133,18 +133,18 @@ def stream( if utils.match_response(http_res, "422", "application/json"): http_res_text = utils.stream_to_text(http_res) response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res, http_res_text + errors.HTTPValidationErrorData, http_res, http_res_text ) - raise models.HTTPValidationError(response_data, http_res, http_res_text) + raise errors.HTTPValidationError(response_data, http_res, http_res_text) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("Unexpected response received", http_res, http_res_text) + raise errors.SDKError("Unexpected response received", http_res, http_res_text) async def stream_async( self, @@ -267,18 +267,18 @@ async def stream_async( if utils.match_response(http_res, "422", "application/json"): http_res_text = await utils.stream_to_text_async(http_res) response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res, http_res_text + errors.HTTPValidationErrorData, http_res, http_res_text ) - raise models.HTTPValidationError(response_data, http_res, http_res_text) + raise errors.HTTPValidationError(response_data, http_res, http_res_text) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("Unexpected response received", http_res, http_res_text) + raise errors.SDKError("Unexpected response received", http_res, http_res_text) def complete( self, @@ -394,17 +394,17 @@ def complete( return unmarshal_json_response(models.FIMCompletionResponse, http_res) if utils.match_response(http_res, "422", "application/json"): response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(response_data, http_res) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) async def complete_async( self, @@ -520,14 +520,14 @@ async def complete_async( return unmarshal_json_response(models.FIMCompletionResponse, http_res) if utils.match_response(http_res, "422", "application/json"): response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(response_data, http_res) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) diff --git a/packages/gcp/src/mistralai/gcp/client/models/__init__.py b/packages/gcp/src/mistralai/gcp/client/models/__init__.py index fb446c25..575f6404 100644 --- a/packages/gcp/src/mistralai/gcp/client/models/__init__.py +++ b/packages/gcp/src/mistralai/gcp/client/models/__init__.py @@ -1,10 +1,8 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -from .mistralgcperror import MistralGCPError -from typing import TYPE_CHECKING -from importlib import import_module -import builtins -import sys +from typing import Any, TYPE_CHECKING + +from mistralai.gcp.client.utils.dynamic_imports import lazy_getattr, lazy_dir if TYPE_CHECKING: from .assistantmessage import ( @@ -49,7 +47,7 @@ CompletionResponseStreamChoiceFinishReason, CompletionResponseStreamChoiceTypedDict, ) - from .contentchunk import ContentChunk, ContentChunkTypedDict + from .contentchunk import ContentChunk, ContentChunkTypedDict, UnknownContentChunk from .deltamessage import ( DeltaMessage, DeltaMessageContent, @@ -80,7 +78,6 @@ FunctionCallTypedDict, ) from .functionname import FunctionName, FunctionNameTypedDict - from .httpvalidationerror import HTTPValidationError, HTTPValidationErrorData from .imagedetail import ImageDetail from .imageurl import ImageURL, ImageURLTypedDict from .imageurlchunk import ( @@ -91,13 +88,10 @@ ) from .jsonschema import JSONSchema, JSONSchemaTypedDict from .mistralpromptmode import MistralPromptMode - from .no_response_error import NoResponseError from .prediction import Prediction, PredictionTypedDict from .referencechunk import ReferenceChunk, ReferenceChunkTypedDict from .responseformat import ResponseFormat, ResponseFormatTypedDict from .responseformats import ResponseFormats - from .responsevalidationerror import ResponseValidationError - from .sdkerror import SDKError from .security import Security, SecurityTypedDict from .systemmessage import ( SystemMessage, @@ -193,8 +187,6 @@ "FunctionName", "FunctionNameTypedDict", "FunctionTypedDict", - "HTTPValidationError", - "HTTPValidationErrorData", "ImageDetail", "ImageURL", "ImageURLChunk", @@ -206,9 +198,7 @@ "JSONSchemaTypedDict", "Loc", "LocTypedDict", - "MistralGCPError", "MistralPromptMode", - "NoResponseError", "Prediction", "PredictionTypedDict", "ReferenceChunk", @@ -216,8 +206,6 @@ "ResponseFormat", "ResponseFormatTypedDict", "ResponseFormats", - "ResponseValidationError", - "SDKError", "Security", "SecurityTypedDict", "SystemMessage", @@ -244,6 +232,7 @@ "ToolMessageTypedDict", "ToolTypedDict", "ToolTypes", + "UnknownContentChunk", "UsageInfo", "UsageInfoTypedDict", "UserMessage", @@ -289,6 +278,7 @@ "CompletionResponseStreamChoiceTypedDict": ".completionresponsestreamchoice", "ContentChunk": ".contentchunk", "ContentChunkTypedDict": ".contentchunk", + "UnknownContentChunk": ".contentchunk", "DeltaMessage": ".deltamessage", "DeltaMessageContent": ".deltamessage", "DeltaMessageContentTypedDict": ".deltamessage", @@ -311,8 +301,6 @@ "FunctionCallTypedDict": ".functioncall", "FunctionName": ".functionname", "FunctionNameTypedDict": ".functionname", - "HTTPValidationError": ".httpvalidationerror", - "HTTPValidationErrorData": ".httpvalidationerror", "ImageDetail": ".imagedetail", "ImageURL": ".imageurl", "ImageURLTypedDict": ".imageurl", @@ -323,7 +311,6 @@ "JSONSchema": ".jsonschema", "JSONSchemaTypedDict": ".jsonschema", "MistralPromptMode": ".mistralpromptmode", - "NoResponseError": ".no_response_error", "Prediction": ".prediction", "PredictionTypedDict": ".prediction", "ReferenceChunk": ".referencechunk", @@ -331,8 +318,6 @@ "ResponseFormat": ".responseformat", "ResponseFormatTypedDict": ".responseformat", "ResponseFormats": ".responseformats", - "ResponseValidationError": ".responsevalidationerror", - "SDKError": ".sdkerror", "Security": ".security", "SecurityTypedDict": ".security", "SystemMessage": ".systemmessage", @@ -372,39 +357,11 @@ } -def dynamic_import(modname, retries=3): - for attempt in range(retries): - try: - return import_module(modname, __package__) - except KeyError: - # Clear any half-initialized module and retry - sys.modules.pop(modname, None) - if attempt == retries - 1: - break - raise KeyError(f"Failed to import module '{modname}' after {retries} attempts") - - -def __getattr__(attr_name: str) -> object: - module_name = _dynamic_imports.get(attr_name) - if module_name is None: - raise AttributeError( - f"No {attr_name} found in _dynamic_imports for module name -> {__name__} " - ) - - try: - module = dynamic_import(module_name) - result = getattr(module, attr_name) - return result - except ImportError as e: - raise ImportError( - f"Failed to import {attr_name} from {module_name}: {e}" - ) from e - except AttributeError as e: - raise AttributeError( - f"Failed to get {attr_name} from {module_name}: {e}" - ) from e +def __getattr__(attr_name: str) -> Any: + return lazy_getattr( + attr_name, package=__package__, dynamic_imports=_dynamic_imports + ) def __dir__(): - lazy_attrs = builtins.list(_dynamic_imports.keys()) - return builtins.sorted(lazy_attrs) + return lazy_dir(dynamic_imports=_dynamic_imports) diff --git a/packages/gcp/src/mistralai/gcp/client/models/assistantmessage.py b/packages/gcp/src/mistralai/gcp/client/models/assistantmessage.py index 7061775b..702ac470 100644 --- a/packages/gcp/src/mistralai/gcp/client/models/assistantmessage.py +++ b/packages/gcp/src/mistralai/gcp/client/models/assistantmessage.py @@ -37,7 +37,7 @@ class AssistantMessageTypedDict(TypedDict): class AssistantMessage(BaseModel): - ROLE: Annotated[ + role: Annotated[ Annotated[ Optional[Literal["assistant"]], AfterValidator(validate_const("assistant")) ], @@ -53,30 +53,31 @@ class AssistantMessage(BaseModel): @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = ["role", "content", "tool_calls", "prefix"] - nullable_fields = ["content", "tool_calls"] - null_default_fields = [] - + optional_fields = set(["role", "content", "tool_calls", "prefix"]) + nullable_fields = set(["content", "tool_calls"]) serialized = handler(self) - m = {} for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) - serialized.pop(k, None) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member + return m - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - return m +try: + AssistantMessage.model_rebuild() +except NameError: + pass diff --git a/packages/gcp/src/mistralai/gcp/client/models/chatcompletionrequest.py b/packages/gcp/src/mistralai/gcp/client/models/chatcompletionrequest.py index 1bc03922..8229c5bb 100644 --- a/packages/gcp/src/mistralai/gcp/client/models/chatcompletionrequest.py +++ b/packages/gcp/src/mistralai/gcp/client/models/chatcompletionrequest.py @@ -165,55 +165,54 @@ class ChatCompletionRequest(BaseModel): @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = [ - "temperature", - "top_p", - "max_tokens", - "stream", - "stop", - "random_seed", - "metadata", - "response_format", - "tools", - "tool_choice", - "presence_penalty", - "frequency_penalty", - "n", - "prediction", - "parallel_tool_calls", - "prompt_mode", - ] - nullable_fields = [ - "temperature", - "max_tokens", - "random_seed", - "metadata", - "tools", - "n", - "prompt_mode", - ] - null_default_fields = [] - + optional_fields = set( + [ + "temperature", + "top_p", + "max_tokens", + "stream", + "stop", + "random_seed", + "metadata", + "response_format", + "tools", + "tool_choice", + "presence_penalty", + "frequency_penalty", + "n", + "prediction", + "parallel_tool_calls", + "prompt_mode", + ] + ) + nullable_fields = set( + [ + "temperature", + "max_tokens", + "random_seed", + "metadata", + "tools", + "n", + "prompt_mode", + ] + ) serialized = handler(self) - m = {} for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val return m diff --git a/packages/gcp/src/mistralai/gcp/client/models/chatcompletionstreamrequest.py b/packages/gcp/src/mistralai/gcp/client/models/chatcompletionstreamrequest.py index 0a5a0021..3c228d2e 100644 --- a/packages/gcp/src/mistralai/gcp/client/models/chatcompletionstreamrequest.py +++ b/packages/gcp/src/mistralai/gcp/client/models/chatcompletionstreamrequest.py @@ -163,55 +163,54 @@ class ChatCompletionStreamRequest(BaseModel): @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = [ - "temperature", - "top_p", - "max_tokens", - "stream", - "stop", - "random_seed", - "metadata", - "response_format", - "tools", - "tool_choice", - "presence_penalty", - "frequency_penalty", - "n", - "prediction", - "parallel_tool_calls", - "prompt_mode", - ] - nullable_fields = [ - "temperature", - "max_tokens", - "random_seed", - "metadata", - "tools", - "n", - "prompt_mode", - ] - null_default_fields = [] - + optional_fields = set( + [ + "temperature", + "top_p", + "max_tokens", + "stream", + "stop", + "random_seed", + "metadata", + "response_format", + "tools", + "tool_choice", + "presence_penalty", + "frequency_penalty", + "n", + "prediction", + "parallel_tool_calls", + "prompt_mode", + ] + ) + nullable_fields = set( + [ + "temperature", + "max_tokens", + "random_seed", + "metadata", + "tools", + "n", + "prompt_mode", + ] + ) serialized = handler(self) - m = {} for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val return m diff --git a/packages/gcp/src/mistralai/gcp/client/models/completionchunk.py b/packages/gcp/src/mistralai/gcp/client/models/completionchunk.py index 9e54cb6d..a0b1ae2f 100644 --- a/packages/gcp/src/mistralai/gcp/client/models/completionchunk.py +++ b/packages/gcp/src/mistralai/gcp/client/models/completionchunk.py @@ -6,7 +6,8 @@ CompletionResponseStreamChoiceTypedDict, ) from .usageinfo import UsageInfo, UsageInfoTypedDict -from mistralai.gcp.client.types import BaseModel +from mistralai.gcp.client.types import BaseModel, UNSET_SENTINEL +from pydantic import model_serializer from typing import List, Optional from typing_extensions import NotRequired, TypedDict @@ -32,3 +33,19 @@ class CompletionChunk(BaseModel): created: Optional[int] = None usage: Optional[UsageInfo] = None + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["object", "created", "usage"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m diff --git a/packages/gcp/src/mistralai/gcp/client/models/completionresponsestreamchoice.py b/packages/gcp/src/mistralai/gcp/client/models/completionresponsestreamchoice.py index 6f306721..e58d4c88 100644 --- a/packages/gcp/src/mistralai/gcp/client/models/completionresponsestreamchoice.py +++ b/packages/gcp/src/mistralai/gcp/client/models/completionresponsestreamchoice.py @@ -39,30 +39,14 @@ class CompletionResponseStreamChoice(BaseModel): @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = [] - nullable_fields = ["finish_reason"] - null_default_fields = [] - serialized = handler(self) - m = {} for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) - serialized.pop(k, None) - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): + if val != UNSET_SENTINEL: m[k] = val return m diff --git a/packages/gcp/src/mistralai/gcp/client/models/contentchunk.py b/packages/gcp/src/mistralai/gcp/client/models/contentchunk.py index 1cd9e502..18d48150 100644 --- a/packages/gcp/src/mistralai/gcp/client/models/contentchunk.py +++ b/packages/gcp/src/mistralai/gcp/client/models/contentchunk.py @@ -4,9 +4,12 @@ from .imageurlchunk import ImageURLChunk, ImageURLChunkTypedDict from .referencechunk import ReferenceChunk, ReferenceChunkTypedDict from .textchunk import TextChunk, TextChunkTypedDict -from mistralai.gcp.client.utils import get_discriminator -from pydantic import Discriminator, Tag -from typing import Union +from functools import partial +from mistralai.gcp.client.types import BaseModel +from mistralai.gcp.client.utils.unions import parse_open_union +from pydantic import ConfigDict +from pydantic.functional_validators import BeforeValidator +from typing import Any, Literal, Union from typing_extensions import Annotated, TypeAliasType @@ -16,11 +19,32 @@ ) +class UnknownContentChunk(BaseModel): + r"""A ContentChunk variant the SDK doesn't recognize. Preserves the raw payload.""" + + type: Literal["UNKNOWN"] = "UNKNOWN" + raw: Any + is_unknown: Literal[True] = True + + model_config = ConfigDict(frozen=True) + + +_CONTENT_CHUNK_VARIANTS: dict[str, Any] = { + "image_url": ImageURLChunk, + "text": TextChunk, + "reference": ReferenceChunk, +} + + ContentChunk = Annotated[ - Union[ - Annotated[ImageURLChunk, Tag("image_url")], - Annotated[TextChunk, Tag("text")], - Annotated[ReferenceChunk, Tag("reference")], - ], - Discriminator(lambda m: get_discriminator(m, "type", "type")), + Union[ImageURLChunk, TextChunk, ReferenceChunk, UnknownContentChunk], + BeforeValidator( + partial( + parse_open_union, + disc_key="type", + variants=_CONTENT_CHUNK_VARIANTS, + unknown_cls=UnknownContentChunk, + union_name="ContentChunk", + ) + ), ] diff --git a/packages/gcp/src/mistralai/gcp/client/models/deltamessage.py b/packages/gcp/src/mistralai/gcp/client/models/deltamessage.py index 96923518..63e6a7f3 100644 --- a/packages/gcp/src/mistralai/gcp/client/models/deltamessage.py +++ b/packages/gcp/src/mistralai/gcp/client/models/deltamessage.py @@ -40,30 +40,25 @@ class DeltaMessage(BaseModel): @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = ["role", "content", "tool_calls"] - nullable_fields = ["role", "content", "tool_calls"] - null_default_fields = [] - + optional_fields = set(["role", "content", "tool_calls"]) + nullable_fields = set(["role", "content", "tool_calls"]) serialized = handler(self) - m = {} for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val return m diff --git a/packages/gcp/src/mistralai/gcp/client/models/fimcompletionrequest.py b/packages/gcp/src/mistralai/gcp/client/models/fimcompletionrequest.py index f37bbcc3..e460f76c 100644 --- a/packages/gcp/src/mistralai/gcp/client/models/fimcompletionrequest.py +++ b/packages/gcp/src/mistralai/gcp/client/models/fimcompletionrequest.py @@ -84,47 +84,46 @@ class FIMCompletionRequest(BaseModel): @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = [ - "temperature", - "top_p", - "max_tokens", - "stream", - "stop", - "random_seed", - "metadata", - "suffix", - "min_tokens", - ] - nullable_fields = [ - "temperature", - "max_tokens", - "random_seed", - "metadata", - "suffix", - "min_tokens", - ] - null_default_fields = [] - + optional_fields = set( + [ + "temperature", + "top_p", + "max_tokens", + "stream", + "stop", + "random_seed", + "metadata", + "suffix", + "min_tokens", + ] + ) + nullable_fields = set( + [ + "temperature", + "max_tokens", + "random_seed", + "metadata", + "suffix", + "min_tokens", + ] + ) serialized = handler(self) - m = {} for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val return m diff --git a/packages/gcp/src/mistralai/gcp/client/models/fimcompletionstreamrequest.py b/packages/gcp/src/mistralai/gcp/client/models/fimcompletionstreamrequest.py index 8e610261..fffc3054 100644 --- a/packages/gcp/src/mistralai/gcp/client/models/fimcompletionstreamrequest.py +++ b/packages/gcp/src/mistralai/gcp/client/models/fimcompletionstreamrequest.py @@ -82,47 +82,46 @@ class FIMCompletionStreamRequest(BaseModel): @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = [ - "temperature", - "top_p", - "max_tokens", - "stream", - "stop", - "random_seed", - "metadata", - "suffix", - "min_tokens", - ] - nullable_fields = [ - "temperature", - "max_tokens", - "random_seed", - "metadata", - "suffix", - "min_tokens", - ] - null_default_fields = [] - + optional_fields = set( + [ + "temperature", + "top_p", + "max_tokens", + "stream", + "stop", + "random_seed", + "metadata", + "suffix", + "min_tokens", + ] + ) + nullable_fields = set( + [ + "temperature", + "max_tokens", + "random_seed", + "metadata", + "suffix", + "min_tokens", + ] + ) serialized = handler(self) - m = {} for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val return m diff --git a/packages/gcp/src/mistralai/gcp/client/models/function.py b/packages/gcp/src/mistralai/gcp/client/models/function.py index 28577eff..439e8313 100644 --- a/packages/gcp/src/mistralai/gcp/client/models/function.py +++ b/packages/gcp/src/mistralai/gcp/client/models/function.py @@ -1,7 +1,8 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from __future__ import annotations -from mistralai.gcp.client.types import BaseModel +from mistralai.gcp.client.types import BaseModel, UNSET_SENTINEL +from pydantic import model_serializer from typing import Any, Dict, Optional from typing_extensions import NotRequired, TypedDict @@ -21,3 +22,19 @@ class Function(BaseModel): description: Optional[str] = None strict: Optional[bool] = None + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["description", "strict"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m diff --git a/packages/gcp/src/mistralai/gcp/client/models/imageurl.py b/packages/gcp/src/mistralai/gcp/client/models/imageurl.py index d4f298f1..903d0a1a 100644 --- a/packages/gcp/src/mistralai/gcp/client/models/imageurl.py +++ b/packages/gcp/src/mistralai/gcp/client/models/imageurl.py @@ -25,30 +25,25 @@ class ImageURL(BaseModel): @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = ["detail"] - nullable_fields = ["detail"] - null_default_fields = [] - + optional_fields = set(["detail"]) + nullable_fields = set(["detail"]) serialized = handler(self) - m = {} for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val return m diff --git a/packages/gcp/src/mistralai/gcp/client/models/imageurlchunk.py b/packages/gcp/src/mistralai/gcp/client/models/imageurlchunk.py index fc5284c1..4bec0eec 100644 --- a/packages/gcp/src/mistralai/gcp/client/models/imageurlchunk.py +++ b/packages/gcp/src/mistralai/gcp/client/models/imageurlchunk.py @@ -30,7 +30,13 @@ class ImageURLChunk(BaseModel): image_url: ImageURLUnion - TYPE: Annotated[ + type: Annotated[ Annotated[Literal["image_url"], AfterValidator(validate_const("image_url"))], pydantic.Field(alias="type"), ] = "image_url" + + +try: + ImageURLChunk.model_rebuild() +except NameError: + pass diff --git a/packages/gcp/src/mistralai/gcp/client/models/jsonschema.py b/packages/gcp/src/mistralai/gcp/client/models/jsonschema.py index 443c429d..684ac09f 100644 --- a/packages/gcp/src/mistralai/gcp/client/models/jsonschema.py +++ b/packages/gcp/src/mistralai/gcp/client/models/jsonschema.py @@ -32,30 +32,31 @@ class JSONSchema(BaseModel): @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = ["description", "strict"] - nullable_fields = ["description"] - null_default_fields = [] - + optional_fields = set(["description", "strict"]) + nullable_fields = set(["description"]) serialized = handler(self) - m = {} for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) - serialized.pop(k, None) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member + return m - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - return m +try: + JSONSchema.model_rebuild() +except NameError: + pass diff --git a/packages/gcp/src/mistralai/gcp/client/models/prediction.py b/packages/gcp/src/mistralai/gcp/client/models/prediction.py index f53579ed..2e325289 100644 --- a/packages/gcp/src/mistralai/gcp/client/models/prediction.py +++ b/packages/gcp/src/mistralai/gcp/client/models/prediction.py @@ -1,9 +1,10 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from __future__ import annotations -from mistralai.gcp.client.types import BaseModel +from mistralai.gcp.client.types import BaseModel, UNSET_SENTINEL from mistralai.gcp.client.utils import validate_const import pydantic +from pydantic import model_serializer from pydantic.functional_validators import AfterValidator from typing import Literal, Optional from typing_extensions import Annotated, NotRequired, TypedDict @@ -19,7 +20,7 @@ class PredictionTypedDict(TypedDict): class Prediction(BaseModel): r"""Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content.""" - TYPE: Annotated[ + type: Annotated[ Annotated[ Optional[Literal["content"]], AfterValidator(validate_const("content")) ], @@ -27,3 +28,25 @@ class Prediction(BaseModel): ] = "content" content: Optional[str] = "" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["type", "content"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m + + +try: + Prediction.model_rebuild() +except NameError: + pass diff --git a/packages/gcp/src/mistralai/gcp/client/models/referencechunk.py b/packages/gcp/src/mistralai/gcp/client/models/referencechunk.py index 274ea7f7..261c4755 100644 --- a/packages/gcp/src/mistralai/gcp/client/models/referencechunk.py +++ b/packages/gcp/src/mistralai/gcp/client/models/referencechunk.py @@ -1,9 +1,10 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from __future__ import annotations -from mistralai.gcp.client.types import BaseModel +from mistralai.gcp.client.types import BaseModel, UNSET_SENTINEL from mistralai.gcp.client.utils import validate_const import pydantic +from pydantic import model_serializer from pydantic.functional_validators import AfterValidator from typing import List, Literal, Optional from typing_extensions import Annotated, TypedDict @@ -17,9 +18,31 @@ class ReferenceChunkTypedDict(TypedDict): class ReferenceChunk(BaseModel): reference_ids: List[int] - TYPE: Annotated[ + type: Annotated[ Annotated[ Optional[Literal["reference"]], AfterValidator(validate_const("reference")) ], pydantic.Field(alias="type"), ] = "reference" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["type"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m + + +try: + ReferenceChunk.model_rebuild() +except NameError: + pass diff --git a/packages/gcp/src/mistralai/gcp/client/models/responseformat.py b/packages/gcp/src/mistralai/gcp/client/models/responseformat.py index 34ae6b03..f3aa9930 100644 --- a/packages/gcp/src/mistralai/gcp/client/models/responseformat.py +++ b/packages/gcp/src/mistralai/gcp/client/models/responseformat.py @@ -31,30 +31,25 @@ class ResponseFormat(BaseModel): @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = ["type", "json_schema"] - nullable_fields = ["json_schema"] - null_default_fields = [] - + optional_fields = set(["type", "json_schema"]) + nullable_fields = set(["json_schema"]) serialized = handler(self) - m = {} for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val return m diff --git a/packages/gcp/src/mistralai/gcp/client/models/systemmessage.py b/packages/gcp/src/mistralai/gcp/client/models/systemmessage.py index a7d695a7..b3795c4b 100644 --- a/packages/gcp/src/mistralai/gcp/client/models/systemmessage.py +++ b/packages/gcp/src/mistralai/gcp/client/models/systemmessage.py @@ -32,7 +32,13 @@ class SystemMessageTypedDict(TypedDict): class SystemMessage(BaseModel): content: SystemMessageContent - ROLE: Annotated[ + role: Annotated[ Annotated[Literal["system"], AfterValidator(validate_const("system"))], pydantic.Field(alias="role"), ] = "system" + + +try: + SystemMessage.model_rebuild() +except NameError: + pass diff --git a/packages/gcp/src/mistralai/gcp/client/models/systemmessagecontentchunks.py b/packages/gcp/src/mistralai/gcp/client/models/systemmessagecontentchunks.py index 225f38b7..8de71c90 100644 --- a/packages/gcp/src/mistralai/gcp/client/models/systemmessagecontentchunks.py +++ b/packages/gcp/src/mistralai/gcp/client/models/systemmessagecontentchunks.py @@ -15,5 +15,5 @@ SystemMessageContentChunks = Annotated[ - Union[TextChunk, ThinkChunk], Field(discriminator="TYPE") + Union[TextChunk, ThinkChunk], Field(discriminator="type") ] diff --git a/packages/gcp/src/mistralai/gcp/client/models/textchunk.py b/packages/gcp/src/mistralai/gcp/client/models/textchunk.py index 77576c9f..69032272 100644 --- a/packages/gcp/src/mistralai/gcp/client/models/textchunk.py +++ b/packages/gcp/src/mistralai/gcp/client/models/textchunk.py @@ -17,7 +17,13 @@ class TextChunkTypedDict(TypedDict): class TextChunk(BaseModel): text: str - TYPE: Annotated[ + type: Annotated[ Annotated[Literal["text"], AfterValidator(validate_const("text"))], pydantic.Field(alias="type"), ] = "text" + + +try: + TextChunk.model_rebuild() +except NameError: + pass diff --git a/packages/gcp/src/mistralai/gcp/client/models/thinkchunk.py b/packages/gcp/src/mistralai/gcp/client/models/thinkchunk.py index b65fffb2..33ec8394 100644 --- a/packages/gcp/src/mistralai/gcp/client/models/thinkchunk.py +++ b/packages/gcp/src/mistralai/gcp/client/models/thinkchunk.py @@ -3,9 +3,10 @@ from __future__ import annotations from .referencechunk import ReferenceChunk, ReferenceChunkTypedDict from .textchunk import TextChunk, TextChunkTypedDict -from mistralai.gcp.client.types import BaseModel +from mistralai.gcp.client.types import BaseModel, UNSET_SENTINEL from mistralai.gcp.client.utils import validate_const import pydantic +from pydantic import model_serializer from pydantic.functional_validators import AfterValidator from typing import List, Literal, Optional, Union from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict @@ -29,10 +30,32 @@ class ThinkChunkTypedDict(TypedDict): class ThinkChunk(BaseModel): thinking: List[Thinking] - TYPE: Annotated[ + type: Annotated[ Annotated[Literal["thinking"], AfterValidator(validate_const("thinking"))], pydantic.Field(alias="type"), ] = "thinking" closed: Optional[bool] = None r"""Whether the thinking chunk is closed or not. Currently only used for prefixing.""" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["closed"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m + + +try: + ThinkChunk.model_rebuild() +except NameError: + pass diff --git a/packages/gcp/src/mistralai/gcp/client/models/tool.py b/packages/gcp/src/mistralai/gcp/client/models/tool.py index d09c6854..670aa81f 100644 --- a/packages/gcp/src/mistralai/gcp/client/models/tool.py +++ b/packages/gcp/src/mistralai/gcp/client/models/tool.py @@ -3,7 +3,8 @@ from __future__ import annotations from .function import Function, FunctionTypedDict from .tooltypes import ToolTypes -from mistralai.gcp.client.types import BaseModel +from mistralai.gcp.client.types import BaseModel, UNSET_SENTINEL +from pydantic import model_serializer from typing import Optional from typing_extensions import NotRequired, TypedDict @@ -17,3 +18,19 @@ class Tool(BaseModel): function: Function type: Optional[ToolTypes] = None + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["type"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m diff --git a/packages/gcp/src/mistralai/gcp/client/models/toolcall.py b/packages/gcp/src/mistralai/gcp/client/models/toolcall.py index a1edf337..3ea8e283 100644 --- a/packages/gcp/src/mistralai/gcp/client/models/toolcall.py +++ b/packages/gcp/src/mistralai/gcp/client/models/toolcall.py @@ -3,7 +3,8 @@ from __future__ import annotations from .functioncall import FunctionCall, FunctionCallTypedDict from .tooltypes import ToolTypes -from mistralai.gcp.client.types import BaseModel +from mistralai.gcp.client.types import BaseModel, UNSET_SENTINEL +from pydantic import model_serializer from typing import Optional from typing_extensions import NotRequired, TypedDict @@ -23,3 +24,19 @@ class ToolCall(BaseModel): type: Optional[ToolTypes] = None index: Optional[int] = 0 + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["id", "type", "index"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m diff --git a/packages/gcp/src/mistralai/gcp/client/models/toolchoice.py b/packages/gcp/src/mistralai/gcp/client/models/toolchoice.py index de3828da..6e795fd7 100644 --- a/packages/gcp/src/mistralai/gcp/client/models/toolchoice.py +++ b/packages/gcp/src/mistralai/gcp/client/models/toolchoice.py @@ -3,7 +3,8 @@ from __future__ import annotations from .functionname import FunctionName, FunctionNameTypedDict from .tooltypes import ToolTypes -from mistralai.gcp.client.types import BaseModel +from mistralai.gcp.client.types import BaseModel, UNSET_SENTINEL +from pydantic import model_serializer from typing import Optional from typing_extensions import NotRequired, TypedDict @@ -23,3 +24,19 @@ class ToolChoice(BaseModel): r"""this restriction of `Function` is used to select a specific function to call""" type: Optional[ToolTypes] = None + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["type"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m diff --git a/packages/gcp/src/mistralai/gcp/client/models/toolmessage.py b/packages/gcp/src/mistralai/gcp/client/models/toolmessage.py index 65b1d9d6..ce160391 100644 --- a/packages/gcp/src/mistralai/gcp/client/models/toolmessage.py +++ b/packages/gcp/src/mistralai/gcp/client/models/toolmessage.py @@ -35,7 +35,7 @@ class ToolMessageTypedDict(TypedDict): class ToolMessage(BaseModel): content: Nullable[ToolMessageContent] - ROLE: Annotated[ + role: Annotated[ Annotated[Literal["tool"], AfterValidator(validate_const("tool"))], pydantic.Field(alias="role"), ] = "tool" @@ -46,30 +46,31 @@ class ToolMessage(BaseModel): @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = ["tool_call_id", "name"] - nullable_fields = ["content", "tool_call_id", "name"] - null_default_fields = [] - + optional_fields = set(["tool_call_id", "name"]) + nullable_fields = set(["content", "tool_call_id", "name"]) serialized = handler(self) - m = {} for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) - serialized.pop(k, None) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member + return m - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - return m +try: + ToolMessage.model_rebuild() +except NameError: + pass diff --git a/packages/gcp/src/mistralai/gcp/client/models/usageinfo.py b/packages/gcp/src/mistralai/gcp/client/models/usageinfo.py index 9b7207b1..cb6feb6e 100644 --- a/packages/gcp/src/mistralai/gcp/client/models/usageinfo.py +++ b/packages/gcp/src/mistralai/gcp/client/models/usageinfo.py @@ -45,37 +45,34 @@ def additional_properties(self, value): @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = [ - "prompt_tokens", - "completion_tokens", - "total_tokens", - "prompt_audio_seconds", - ] - nullable_fields = ["prompt_audio_seconds"] - null_default_fields = [] - + optional_fields = set( + [ + "prompt_tokens", + "completion_tokens", + "total_tokens", + "prompt_audio_seconds", + ] + ) + nullable_fields = set(["prompt_audio_seconds"]) serialized = handler(self) - m = {} for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val for k, v in serialized.items(): m[k] = v diff --git a/packages/gcp/src/mistralai/gcp/client/models/usermessage.py b/packages/gcp/src/mistralai/gcp/client/models/usermessage.py index c083e16d..e237e900 100644 --- a/packages/gcp/src/mistralai/gcp/client/models/usermessage.py +++ b/packages/gcp/src/mistralai/gcp/client/models/usermessage.py @@ -27,37 +27,27 @@ class UserMessageTypedDict(TypedDict): class UserMessage(BaseModel): content: Nullable[UserMessageContent] - ROLE: Annotated[ + role: Annotated[ Annotated[Literal["user"], AfterValidator(validate_const("user"))], pydantic.Field(alias="role"), ] = "user" @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = [] - nullable_fields = ["content"] - null_default_fields = [] - serialized = handler(self) - m = {} for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): + if val != UNSET_SENTINEL: m[k] = val return m + + +try: + UserMessage.model_rebuild() +except NameError: + pass diff --git a/packages/gcp/src/mistralai/gcp/client/utils/__init__.py b/packages/gcp/src/mistralai/gcp/client/utils/__init__.py index 05f26ade..b488c2df 100644 --- a/packages/gcp/src/mistralai/gcp/client/utils/__init__.py +++ b/packages/gcp/src/mistralai/gcp/client/utils/__init__.py @@ -1,14 +1,23 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -from typing import TYPE_CHECKING -from importlib import import_module -import builtins -import sys +from typing import Any, TYPE_CHECKING, Callable, TypeVar +import asyncio + +from .dynamic_imports import lazy_getattr, lazy_dir + +_T = TypeVar("_T") + + +async def run_sync_in_thread(func: Callable[..., _T], *args) -> _T: + """Run a synchronous function in a thread pool to avoid blocking the event loop.""" + return await asyncio.to_thread(func, *args) + if TYPE_CHECKING: from .annotations import get_discriminator from .datetimes import parse_datetime from .enums import OpenEnumMeta + from .unions import parse_open_union from .headers import get_headers, get_response_headers from .metadata import ( FieldMetadata, @@ -76,6 +85,7 @@ "match_response", "MultipartFormMetadata", "OpenEnumMeta", + "parse_open_union", "PathParamMetadata", "QueryParamMetadata", "remove_suffix", @@ -128,6 +138,7 @@ "match_response": ".values", "MultipartFormMetadata": ".metadata", "OpenEnumMeta": ".enums", + "parse_open_union": ".unions", "PathParamMetadata": ".metadata", "QueryParamMetadata": ".metadata", "remove_suffix": ".url", @@ -157,38 +168,11 @@ } -def dynamic_import(modname, retries=3): - for attempt in range(retries): - try: - return import_module(modname, __package__) - except KeyError: - # Clear any half-initialized module and retry - sys.modules.pop(modname, None) - if attempt == retries - 1: - break - raise KeyError(f"Failed to import module '{modname}' after {retries} attempts") - - -def __getattr__(attr_name: str) -> object: - module_name = _dynamic_imports.get(attr_name) - if module_name is None: - raise AttributeError( - f"no {attr_name} found in _dynamic_imports, module name -> {__name__} " - ) - - try: - module = dynamic_import(module_name) - return getattr(module, attr_name) - except ImportError as e: - raise ImportError( - f"Failed to import {attr_name} from {module_name}: {e}" - ) from e - except AttributeError as e: - raise AttributeError( - f"Failed to get {attr_name} from {module_name}: {e}" - ) from e +def __getattr__(attr_name: str) -> Any: + return lazy_getattr( + attr_name, package=__package__, dynamic_imports=_dynamic_imports + ) def __dir__(): - lazy_attrs = builtins.list(_dynamic_imports.keys()) - return builtins.sorted(lazy_attrs) + return lazy_dir(dynamic_imports=_dynamic_imports) diff --git a/packages/gcp/src/mistralai/gcp/client/utils/dynamic_imports.py b/packages/gcp/src/mistralai/gcp/client/utils/dynamic_imports.py new file mode 100644 index 00000000..673edf82 --- /dev/null +++ b/packages/gcp/src/mistralai/gcp/client/utils/dynamic_imports.py @@ -0,0 +1,54 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from importlib import import_module +import builtins +import sys + + +def dynamic_import(package, modname, retries=3): + """Import a module relative to package, retrying on KeyError from half-initialized modules.""" + for attempt in range(retries): + try: + return import_module(modname, package) + except KeyError: + sys.modules.pop(modname, None) + if attempt == retries - 1: + break + raise KeyError(f"Failed to import module '{modname}' after {retries} attempts") + + +def lazy_getattr(attr_name, *, package, dynamic_imports, sub_packages=None): + """Module-level __getattr__ that lazily loads from a dynamic_imports mapping. + + Args: + attr_name: The attribute being looked up. + package: The caller's __package__ (for relative imports). + dynamic_imports: Dict mapping attribute names to relative module paths. + sub_packages: Optional list of subpackage names to lazy-load. + """ + module_name = dynamic_imports.get(attr_name) + if module_name is not None: + try: + module = dynamic_import(package, module_name) + return getattr(module, attr_name) + except ImportError as e: + raise ImportError( + f"Failed to import {attr_name} from {module_name}: {e}" + ) from e + except AttributeError as e: + raise AttributeError( + f"Failed to get {attr_name} from {module_name}: {e}" + ) from e + + if sub_packages and attr_name in sub_packages: + return import_module(f".{attr_name}", package) + + raise AttributeError(f"module '{package}' has no attribute '{attr_name}'") + + +def lazy_dir(*, dynamic_imports, sub_packages=None): + """Module-level __dir__ that lists lazily-loadable attributes.""" + lazy_attrs = builtins.list(dynamic_imports.keys()) + if sub_packages: + lazy_attrs.extend(sub_packages) + return builtins.sorted(lazy_attrs) diff --git a/packages/gcp/src/mistralai/gcp/client/utils/eventstreaming.py b/packages/gcp/src/mistralai/gcp/client/utils/eventstreaming.py index 0969899b..f2052fc2 100644 --- a/packages/gcp/src/mistralai/gcp/client/utils/eventstreaming.py +++ b/packages/gcp/src/mistralai/gcp/client/utils/eventstreaming.py @@ -2,7 +2,9 @@ import re import json +from dataclasses import dataclass, asdict from typing import ( + Any, Callable, Generic, TypeVar, @@ -22,6 +24,7 @@ class EventStream(Generic[T]): client_ref: Optional[object] response: httpx.Response generator: Generator[T, None, None] + _closed: bool def __init__( self, @@ -33,17 +36,21 @@ def __init__( self.response = response self.generator = stream_events(response, decoder, sentinel) self.client_ref = client_ref + self._closed = False def __iter__(self): return self def __next__(self): + if self._closed: + raise StopIteration return next(self.generator) def __enter__(self): return self def __exit__(self, exc_type, exc_val, exc_tb): + self._closed = True self.response.close() @@ -53,6 +60,7 @@ class EventStreamAsync(Generic[T]): client_ref: Optional[object] response: httpx.Response generator: AsyncGenerator[T, None] + _closed: bool def __init__( self, @@ -64,33 +72,45 @@ def __init__( self.response = response self.generator = stream_events_async(response, decoder, sentinel) self.client_ref = client_ref + self._closed = False def __aiter__(self): return self async def __anext__(self): + if self._closed: + raise StopAsyncIteration return await self.generator.__anext__() async def __aenter__(self): return self async def __aexit__(self, exc_type, exc_val, exc_tb): + self._closed = True await self.response.aclose() +@dataclass class ServerEvent: id: Optional[str] = None event: Optional[str] = None - data: Optional[str] = None + data: Any = None retry: Optional[int] = None MESSAGE_BOUNDARIES = [ b"\r\n\r\n", - b"\n\n", + b"\r\n\r", + b"\r\n\n", + b"\r\r\n", + b"\n\r\n", b"\r\r", + b"\n\r", + b"\n\n", ] +UTF8_BOM = b"\xef\xbb\xbf" + async def stream_events_async( response: httpx.Response, @@ -99,14 +119,10 @@ async def stream_events_async( ) -> AsyncGenerator[T, None]: buffer = bytearray() position = 0 - discard = False + event_id: Optional[str] = None async for chunk in response.aiter_bytes(): - # We've encountered the sentinel value and should no longer process - # incoming data. Instead we throw new data away until the server closes - # the connection. - if discard: - continue - + if len(buffer) == 0 and chunk.startswith(UTF8_BOM): + chunk = chunk[len(UTF8_BOM) :] buffer += chunk for i in range(position, len(buffer)): char = buffer[i : i + 1] @@ -121,15 +137,22 @@ async def stream_events_async( block = buffer[position:i] position = i + len(seq) - event, discard = _parse_event(block, decoder, sentinel) + event, discard, event_id = _parse_event( + raw=block, decoder=decoder, sentinel=sentinel, event_id=event_id + ) if event is not None: yield event + if discard: + await response.aclose() + return if position > 0: buffer = buffer[position:] position = 0 - event, discard = _parse_event(buffer, decoder, sentinel) + event, discard, _ = _parse_event( + raw=buffer, decoder=decoder, sentinel=sentinel, event_id=event_id + ) if event is not None: yield event @@ -141,14 +164,10 @@ def stream_events( ) -> Generator[T, None, None]: buffer = bytearray() position = 0 - discard = False + event_id: Optional[str] = None for chunk in response.iter_bytes(): - # We've encountered the sentinel value and should no longer process - # incoming data. Instead we throw new data away until the server closes - # the connection. - if discard: - continue - + if len(buffer) == 0 and chunk.startswith(UTF8_BOM): + chunk = chunk[len(UTF8_BOM) :] buffer += chunk for i in range(position, len(buffer)): char = buffer[i : i + 1] @@ -163,22 +182,33 @@ def stream_events( block = buffer[position:i] position = i + len(seq) - event, discard = _parse_event(block, decoder, sentinel) + event, discard, event_id = _parse_event( + raw=block, decoder=decoder, sentinel=sentinel, event_id=event_id + ) if event is not None: yield event + if discard: + response.close() + return if position > 0: buffer = buffer[position:] position = 0 - event, discard = _parse_event(buffer, decoder, sentinel) + event, discard, _ = _parse_event( + raw=buffer, decoder=decoder, sentinel=sentinel, event_id=event_id + ) if event is not None: yield event def _parse_event( - raw: bytearray, decoder: Callable[[str], T], sentinel: Optional[str] = None -) -> Tuple[Optional[T], bool]: + *, + raw: bytearray, + decoder: Callable[[str], T], + sentinel: Optional[str] = None, + event_id: Optional[str] = None, +) -> Tuple[Optional[T], bool, Optional[str]]: block = raw.decode() lines = re.split(r"\r?\n|\r", block) publish = False @@ -189,13 +219,16 @@ def _parse_event( continue delim = line.find(":") - if delim <= 0: + if delim == 0: continue - field = line[0:delim] - value = line[delim + 1 :] if delim < len(line) - 1 else "" - if len(value) and value[0] == " ": - value = value[1:] + field = line + value = "" + if delim > 0: + field = line[0:delim] + value = line[delim + 1 :] if delim < len(line) - 1 else "" + if len(value) and value[0] == " ": + value = value[1:] if field == "event": event.event = value @@ -204,37 +237,36 @@ def _parse_event( data += value + "\n" publish = True elif field == "id": - event.id = value publish = True + if "\x00" not in value: + event_id = value elif field == "retry": - event.retry = int(value) if value.isdigit() else None + if value.isdigit(): + event.retry = int(value) publish = True + event.id = event_id + if sentinel and data == f"{sentinel}\n": - return None, True + return None, True, event_id if data: data = data[:-1] - event.data = data - - data_is_primitive = ( - data.isnumeric() or data == "true" or data == "false" or data == "null" - ) - data_is_json = ( - data.startswith("{") or data.startswith("[") or data.startswith('"') - ) - - if data_is_primitive or data_is_json: - try: - event.data = json.loads(data) - except Exception: - pass + try: + event.data = json.loads(data) + except json.JSONDecodeError: + event.data = data out = None if publish: - out = decoder(json.dumps(event.__dict__)) - - return out, False + out_dict = { + k: v + for k, v in asdict(event).items() + if v is not None or (k == "data" and data) + } + out = decoder(json.dumps(out_dict)) + + return out, False, event_id def _peek_sequence(position: int, buffer: bytearray, sequence: bytes): diff --git a/packages/gcp/src/mistralai/gcp/client/utils/forms.py b/packages/gcp/src/mistralai/gcp/client/utils/forms.py index f961e76b..1e550bd5 100644 --- a/packages/gcp/src/mistralai/gcp/client/utils/forms.py +++ b/packages/gcp/src/mistralai/gcp/client/utils/forms.py @@ -142,7 +142,7 @@ def serialize_multipart_form( if field_metadata.file: if isinstance(val, List): # Handle array of files - array_field_name = f_name + "[]" + array_field_name = f_name for file_obj in val: if not _is_set(file_obj): continue @@ -185,7 +185,7 @@ def serialize_multipart_form( continue values.append(_val_to_string(value)) - array_field_name = f_name + "[]" + array_field_name = f_name form[array_field_name] = values else: form[f_name] = _val_to_string(val) diff --git a/packages/gcp/src/mistralai/gcp/client/utils/retries.py b/packages/gcp/src/mistralai/gcp/client/utils/retries.py index 88a91b10..af07d4e9 100644 --- a/packages/gcp/src/mistralai/gcp/client/utils/retries.py +++ b/packages/gcp/src/mistralai/gcp/client/utils/retries.py @@ -144,12 +144,7 @@ def do_request() -> httpx.Response: if res.status_code == parsed_code: raise TemporaryError(res) - except httpx.ConnectError as exception: - if retries.config.retry_connection_errors: - raise - - raise PermanentError(exception) from exception - except httpx.TimeoutException as exception: + except (httpx.NetworkError, httpx.TimeoutException) as exception: if retries.config.retry_connection_errors: raise @@ -193,12 +188,7 @@ async def do_request() -> httpx.Response: if res.status_code == parsed_code: raise TemporaryError(res) - except httpx.ConnectError as exception: - if retries.config.retry_connection_errors: - raise - - raise PermanentError(exception) from exception - except httpx.TimeoutException as exception: + except (httpx.NetworkError, httpx.TimeoutException) as exception: if retries.config.retry_connection_errors: raise diff --git a/packages/gcp/src/mistralai/gcp/client/utils/security.py b/packages/gcp/src/mistralai/gcp/client/utils/security.py index 295a3f40..17996bd5 100644 --- a/packages/gcp/src/mistralai/gcp/client/utils/security.py +++ b/packages/gcp/src/mistralai/gcp/client/utils/security.py @@ -135,6 +135,8 @@ def _parse_security_scheme_value( elif scheme_type == "http": if sub_type == "bearer": headers[header_name] = _apply_bearer(value) + elif sub_type == "basic": + headers[header_name] = value elif sub_type == "custom": return else: diff --git a/packages/gcp/src/mistralai/gcp/client/utils/unions.py b/packages/gcp/src/mistralai/gcp/client/utils/unions.py new file mode 100644 index 00000000..a227f4e8 --- /dev/null +++ b/packages/gcp/src/mistralai/gcp/client/utils/unions.py @@ -0,0 +1,32 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from typing import Any + +from pydantic import BaseModel, TypeAdapter + + +def parse_open_union( + v: Any, + *, + disc_key: str, + variants: dict[str, Any], + unknown_cls: type, + union_name: str, +) -> Any: + """Parse an open discriminated union value with forward-compatibility. + + Known discriminator values are dispatched to their variant types. + Unknown discriminator values produce an instance of the fallback class, + preserving the raw payload for inspection. + """ + if isinstance(v, BaseModel): + return v + if not isinstance(v, dict) or disc_key not in v: + raise ValueError(f"{union_name}: expected object with '{disc_key}' field") + disc = v[disc_key] + variant_cls = variants.get(disc) + if variant_cls is not None: + if isinstance(variant_cls, type) and issubclass(variant_cls, BaseModel): + return variant_cls.model_validate(v) + return TypeAdapter(variant_cls).validate_python(v) + return unknown_cls(raw=v) diff --git a/packages/gcp/src/mistralai/gcp/client/utils/unmarshal_json_response.py b/packages/gcp/src/mistralai/gcp/client/utils/unmarshal_json_response.py index 83e8275e..ead3e5a0 100644 --- a/packages/gcp/src/mistralai/gcp/client/utils/unmarshal_json_response.py +++ b/packages/gcp/src/mistralai/gcp/client/utils/unmarshal_json_response.py @@ -5,7 +5,7 @@ import httpx from .serializers import unmarshal_json -from mistralai.gcp.client import models +from mistralai.gcp.client import errors T = TypeVar("T") @@ -30,7 +30,7 @@ def unmarshal_json_response( try: return unmarshal_json(body, typ) except Exception as e: - raise models.ResponseValidationError( + raise errors.ResponseValidationError( "Response validation failed", http_res, e, diff --git a/pylintrc b/pylintrc index 2dc62b0e..d1653ae1 100644 --- a/pylintrc +++ b/pylintrc @@ -641,7 +641,7 @@ additional-builtins= allow-global-unused-variables=yes # List of names allowed to shadow builtins -allowed-redefined-builtins=id,object +allowed-redefined-builtins=id,object,input,dir # List of strings which can identify a callback function by name. A callback # name must start or end with one of those strings. diff --git a/src/mistralai/client/__init__.py b/src/mistralai/client/__init__.py index 481fc916..4b79610a 100644 --- a/src/mistralai/client/__init__.py +++ b/src/mistralai/client/__init__.py @@ -10,7 +10,6 @@ ) from .sdk import * from .sdkconfiguration import * -from .models import * VERSION: str = __version__ diff --git a/src/mistralai/client/_version.py b/src/mistralai/client/_version.py index 814d9ec7..1a4d15d6 100644 --- a/src/mistralai/client/_version.py +++ b/src/mistralai/client/_version.py @@ -4,10 +4,10 @@ import importlib.metadata __title__: str = "mistralai" -__version__: str = "2.0.0a3" +__version__: str = "2.0.0-a3.1" __openapi_doc_version__: str = "1.0.0" -__gen_version__: str = "2.794.1" -__user_agent__: str = "speakeasy-sdk/python 2.0.0a3 2.794.1 1.0.0 mistralai" +__gen_version__: str = "2.841.0" +__user_agent__: str = "speakeasy-sdk/python 2.0.0-a3.1 2.841.0 1.0.0 mistralai" try: if __package__ is not None: diff --git a/src/mistralai/client/accesses.py b/src/mistralai/client/accesses.py index cda484c8..0761b0bc 100644 --- a/src/mistralai/client/accesses.py +++ b/src/mistralai/client/accesses.py @@ -2,12 +2,8 @@ # @generated-id: 76fc53bfcf59 from .basesdk import BaseSDK -from mistralai.client import models, utils +from mistralai.client import errors, models, utils from mistralai.client._hooks import HookContext -from mistralai.client.models import ( - entitytype as models_entitytype, - shareenum as models_shareenum, -) from mistralai.client.types import OptionalNullable, UNSET from mistralai.client.utils import get_security_from_env from mistralai.client.utils.unmarshal_json_response import unmarshal_json_response @@ -46,7 +42,7 @@ def list( else: base_url = self._get_url(base_url, url_variables) - request = models.ListLibraryAccessesRequest( + request = models.LibrariesShareListV1Request( library_id=library_id, ) @@ -79,7 +75,7 @@ def list( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="ListLibraryAccesses", + operation_id="libraries_share_list_v1", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -95,17 +91,17 @@ def list( return unmarshal_json_response(models.ListSharingOut, http_res) if utils.match_response(http_res, "422", "application/json"): response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(response_data, http_res) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) async def list_async( self, @@ -136,7 +132,7 @@ async def list_async( else: base_url = self._get_url(base_url, url_variables) - request = models.ListLibraryAccessesRequest( + request = models.LibrariesShareListV1Request( library_id=library_id, ) @@ -169,7 +165,7 @@ async def list_async( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="ListLibraryAccesses", + operation_id="libraries_share_list_v1", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -185,25 +181,25 @@ async def list_async( return unmarshal_json_response(models.ListSharingOut, http_res) if utils.match_response(http_res, "422", "application/json"): response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(response_data, http_res) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) def update_or_create( self, *, library_id: str, - level: models_shareenum.ShareEnum, + level: models.ShareEnum, share_with_uuid: str, - share_with_type: models_entitytype.EntityType, + share_with_type: models.EntityType, org_id: OptionalNullable[str] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, @@ -234,7 +230,7 @@ def update_or_create( else: base_url = self._get_url(base_url, url_variables) - request = models.UpdateOrCreateLibraryAccessRequest( + request = models.LibrariesShareCreateV1Request( library_id=library_id, sharing_in=models.SharingIn( org_id=org_id, @@ -276,7 +272,7 @@ def update_or_create( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="UpdateOrCreateLibraryAccess", + operation_id="libraries_share_create_v1", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -292,25 +288,25 @@ def update_or_create( return unmarshal_json_response(models.SharingOut, http_res) if utils.match_response(http_res, "422", "application/json"): response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(response_data, http_res) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) async def update_or_create_async( self, *, library_id: str, - level: models_shareenum.ShareEnum, + level: models.ShareEnum, share_with_uuid: str, - share_with_type: models_entitytype.EntityType, + share_with_type: models.EntityType, org_id: OptionalNullable[str] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, @@ -341,7 +337,7 @@ async def update_or_create_async( else: base_url = self._get_url(base_url, url_variables) - request = models.UpdateOrCreateLibraryAccessRequest( + request = models.LibrariesShareCreateV1Request( library_id=library_id, sharing_in=models.SharingIn( org_id=org_id, @@ -383,7 +379,7 @@ async def update_or_create_async( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="UpdateOrCreateLibraryAccess", + operation_id="libraries_share_create_v1", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -399,24 +395,24 @@ async def update_or_create_async( return unmarshal_json_response(models.SharingOut, http_res) if utils.match_response(http_res, "422", "application/json"): response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(response_data, http_res) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) def delete( self, *, library_id: str, share_with_uuid: str, - share_with_type: models_entitytype.EntityType, + share_with_type: models.EntityType, org_id: OptionalNullable[str] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, @@ -446,7 +442,7 @@ def delete( else: base_url = self._get_url(base_url, url_variables) - request = models.DeleteLibraryAccessRequest( + request = models.LibrariesShareDeleteV1Request( library_id=library_id, sharing_delete=models.SharingDelete( org_id=org_id, @@ -487,7 +483,7 @@ def delete( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="DeleteLibraryAccess", + operation_id="libraries_share_delete_v1", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -503,24 +499,24 @@ def delete( return unmarshal_json_response(models.SharingOut, http_res) if utils.match_response(http_res, "422", "application/json"): response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(response_data, http_res) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) async def delete_async( self, *, library_id: str, share_with_uuid: str, - share_with_type: models_entitytype.EntityType, + share_with_type: models.EntityType, org_id: OptionalNullable[str] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, @@ -550,7 +546,7 @@ async def delete_async( else: base_url = self._get_url(base_url, url_variables) - request = models.DeleteLibraryAccessRequest( + request = models.LibrariesShareDeleteV1Request( library_id=library_id, sharing_delete=models.SharingDelete( org_id=org_id, @@ -591,7 +587,7 @@ async def delete_async( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="DeleteLibraryAccess", + operation_id="libraries_share_delete_v1", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -607,14 +603,14 @@ async def delete_async( return unmarshal_json_response(models.SharingOut, http_res) if utils.match_response(http_res, "422", "application/json"): response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(response_data, http_res) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) diff --git a/src/mistralai/client/agents.py b/src/mistralai/client/agents.py index 0942cb20..2b70d152 100644 --- a/src/mistralai/client/agents.py +++ b/src/mistralai/client/agents.py @@ -2,16 +2,8 @@ # @generated-id: e946546e3eaa from .basesdk import BaseSDK -from mistralai.client import models, utils +from mistralai.client import errors, models, utils from mistralai.client._hooks import HookContext -from mistralai.client.models import ( - agentscompletionrequest as models_agentscompletionrequest, - agentscompletionstreamrequest as models_agentscompletionstreamrequest, - mistralpromptmode as models_mistralpromptmode, - prediction as models_prediction, - responseformat as models_responseformat, - tool as models_tool, -) from mistralai.client.types import OptionalNullable, UNSET from mistralai.client.utils import eventstreaming, get_security_from_env from mistralai.client.utils.unmarshal_json_response import unmarshal_json_response @@ -25,47 +17,40 @@ def complete( self, *, messages: Union[ - List[models_agentscompletionrequest.AgentsCompletionRequestMessage], - List[ - models_agentscompletionrequest.AgentsCompletionRequestMessageTypedDict - ], + List[models.AgentsCompletionRequestMessage], + List[models.AgentsCompletionRequestMessageTypedDict], ], agent_id: str, max_tokens: OptionalNullable[int] = UNSET, stream: Optional[bool] = False, stop: Optional[ Union[ - models_agentscompletionrequest.AgentsCompletionRequestStop, - models_agentscompletionrequest.AgentsCompletionRequestStopTypedDict, + models.AgentsCompletionRequestStop, + models.AgentsCompletionRequestStopTypedDict, ] ] = None, random_seed: OptionalNullable[int] = UNSET, metadata: OptionalNullable[Dict[str, Any]] = UNSET, response_format: Optional[ - Union[ - models_responseformat.ResponseFormat, - models_responseformat.ResponseFormatTypedDict, - ] + Union[models.ResponseFormat, models.ResponseFormatTypedDict] ] = None, tools: OptionalNullable[ - Union[List[models_tool.Tool], List[models_tool.ToolTypedDict]] + Union[List[models.Tool], List[models.ToolTypedDict]] ] = UNSET, tool_choice: Optional[ Union[ - models_agentscompletionrequest.AgentsCompletionRequestToolChoice, - models_agentscompletionrequest.AgentsCompletionRequestToolChoiceTypedDict, + models.AgentsCompletionRequestToolChoice, + models.AgentsCompletionRequestToolChoiceTypedDict, ] ] = None, presence_penalty: Optional[float] = None, frequency_penalty: Optional[float] = None, n: OptionalNullable[int] = UNSET, prediction: Optional[ - Union[models_prediction.Prediction, models_prediction.PredictionTypedDict] + Union[models.Prediction, models.PredictionTypedDict] ] = None, parallel_tool_calls: Optional[bool] = None, - prompt_mode: OptionalNullable[ - models_mistralpromptmode.MistralPromptMode - ] = UNSET, + prompt_mode: OptionalNullable[models.MistralPromptMode] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -179,63 +164,56 @@ def complete( return unmarshal_json_response(models.ChatCompletionResponse, http_res) if utils.match_response(http_res, "422", "application/json"): response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(response_data, http_res) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) async def complete_async( self, *, messages: Union[ - List[models_agentscompletionrequest.AgentsCompletionRequestMessage], - List[ - models_agentscompletionrequest.AgentsCompletionRequestMessageTypedDict - ], + List[models.AgentsCompletionRequestMessage], + List[models.AgentsCompletionRequestMessageTypedDict], ], agent_id: str, max_tokens: OptionalNullable[int] = UNSET, stream: Optional[bool] = False, stop: Optional[ Union[ - models_agentscompletionrequest.AgentsCompletionRequestStop, - models_agentscompletionrequest.AgentsCompletionRequestStopTypedDict, + models.AgentsCompletionRequestStop, + models.AgentsCompletionRequestStopTypedDict, ] ] = None, random_seed: OptionalNullable[int] = UNSET, metadata: OptionalNullable[Dict[str, Any]] = UNSET, response_format: Optional[ - Union[ - models_responseformat.ResponseFormat, - models_responseformat.ResponseFormatTypedDict, - ] + Union[models.ResponseFormat, models.ResponseFormatTypedDict] ] = None, tools: OptionalNullable[ - Union[List[models_tool.Tool], List[models_tool.ToolTypedDict]] + Union[List[models.Tool], List[models.ToolTypedDict]] ] = UNSET, tool_choice: Optional[ Union[ - models_agentscompletionrequest.AgentsCompletionRequestToolChoice, - models_agentscompletionrequest.AgentsCompletionRequestToolChoiceTypedDict, + models.AgentsCompletionRequestToolChoice, + models.AgentsCompletionRequestToolChoiceTypedDict, ] ] = None, presence_penalty: Optional[float] = None, frequency_penalty: Optional[float] = None, n: OptionalNullable[int] = UNSET, prediction: Optional[ - Union[models_prediction.Prediction, models_prediction.PredictionTypedDict] + Union[models.Prediction, models.PredictionTypedDict] ] = None, parallel_tool_calls: Optional[bool] = None, - prompt_mode: OptionalNullable[ - models_mistralpromptmode.MistralPromptMode - ] = UNSET, + prompt_mode: OptionalNullable[models.MistralPromptMode] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -349,65 +327,56 @@ async def complete_async( return unmarshal_json_response(models.ChatCompletionResponse, http_res) if utils.match_response(http_res, "422", "application/json"): response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(response_data, http_res) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) def stream( self, *, messages: Union[ - List[ - models_agentscompletionstreamrequest.AgentsCompletionStreamRequestMessage - ], - List[ - models_agentscompletionstreamrequest.AgentsCompletionStreamRequestMessageTypedDict - ], + List[models.AgentsCompletionStreamRequestMessage], + List[models.AgentsCompletionStreamRequestMessageTypedDict], ], agent_id: str, max_tokens: OptionalNullable[int] = UNSET, stream: Optional[bool] = True, stop: Optional[ Union[ - models_agentscompletionstreamrequest.AgentsCompletionStreamRequestStop, - models_agentscompletionstreamrequest.AgentsCompletionStreamRequestStopTypedDict, + models.AgentsCompletionStreamRequestStop, + models.AgentsCompletionStreamRequestStopTypedDict, ] ] = None, random_seed: OptionalNullable[int] = UNSET, metadata: OptionalNullable[Dict[str, Any]] = UNSET, response_format: Optional[ - Union[ - models_responseformat.ResponseFormat, - models_responseformat.ResponseFormatTypedDict, - ] + Union[models.ResponseFormat, models.ResponseFormatTypedDict] ] = None, tools: OptionalNullable[ - Union[List[models_tool.Tool], List[models_tool.ToolTypedDict]] + Union[List[models.Tool], List[models.ToolTypedDict]] ] = UNSET, tool_choice: Optional[ Union[ - models_agentscompletionstreamrequest.AgentsCompletionStreamRequestToolChoice, - models_agentscompletionstreamrequest.AgentsCompletionStreamRequestToolChoiceTypedDict, + models.AgentsCompletionStreamRequestToolChoice, + models.AgentsCompletionStreamRequestToolChoiceTypedDict, ] ] = None, presence_penalty: Optional[float] = None, frequency_penalty: Optional[float] = None, n: OptionalNullable[int] = UNSET, prediction: Optional[ - Union[models_prediction.Prediction, models_prediction.PredictionTypedDict] + Union[models.Prediction, models.PredictionTypedDict] ] = None, parallel_tool_calls: Optional[bool] = None, - prompt_mode: OptionalNullable[ - models_mistralpromptmode.MistralPromptMode - ] = UNSET, + prompt_mode: OptionalNullable[models.MistralPromptMode] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -530,66 +499,57 @@ def stream( if utils.match_response(http_res, "422", "application/json"): http_res_text = utils.stream_to_text(http_res) response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res, http_res_text + errors.HTTPValidationErrorData, http_res, http_res_text ) - raise models.HTTPValidationError(response_data, http_res, http_res_text) + raise errors.HTTPValidationError(response_data, http_res, http_res_text) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("Unexpected response received", http_res, http_res_text) + raise errors.SDKError("Unexpected response received", http_res, http_res_text) async def stream_async( self, *, messages: Union[ - List[ - models_agentscompletionstreamrequest.AgentsCompletionStreamRequestMessage - ], - List[ - models_agentscompletionstreamrequest.AgentsCompletionStreamRequestMessageTypedDict - ], + List[models.AgentsCompletionStreamRequestMessage], + List[models.AgentsCompletionStreamRequestMessageTypedDict], ], agent_id: str, max_tokens: OptionalNullable[int] = UNSET, stream: Optional[bool] = True, stop: Optional[ Union[ - models_agentscompletionstreamrequest.AgentsCompletionStreamRequestStop, - models_agentscompletionstreamrequest.AgentsCompletionStreamRequestStopTypedDict, + models.AgentsCompletionStreamRequestStop, + models.AgentsCompletionStreamRequestStopTypedDict, ] ] = None, random_seed: OptionalNullable[int] = UNSET, metadata: OptionalNullable[Dict[str, Any]] = UNSET, response_format: Optional[ - Union[ - models_responseformat.ResponseFormat, - models_responseformat.ResponseFormatTypedDict, - ] + Union[models.ResponseFormat, models.ResponseFormatTypedDict] ] = None, tools: OptionalNullable[ - Union[List[models_tool.Tool], List[models_tool.ToolTypedDict]] + Union[List[models.Tool], List[models.ToolTypedDict]] ] = UNSET, tool_choice: Optional[ Union[ - models_agentscompletionstreamrequest.AgentsCompletionStreamRequestToolChoice, - models_agentscompletionstreamrequest.AgentsCompletionStreamRequestToolChoiceTypedDict, + models.AgentsCompletionStreamRequestToolChoice, + models.AgentsCompletionStreamRequestToolChoiceTypedDict, ] ] = None, presence_penalty: Optional[float] = None, frequency_penalty: Optional[float] = None, n: OptionalNullable[int] = UNSET, prediction: Optional[ - Union[models_prediction.Prediction, models_prediction.PredictionTypedDict] + Union[models.Prediction, models.PredictionTypedDict] ] = None, parallel_tool_calls: Optional[bool] = None, - prompt_mode: OptionalNullable[ - models_mistralpromptmode.MistralPromptMode - ] = UNSET, + prompt_mode: OptionalNullable[models.MistralPromptMode] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -712,15 +672,15 @@ async def stream_async( if utils.match_response(http_res, "422", "application/json"): http_res_text = await utils.stream_to_text_async(http_res) response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res, http_res_text + errors.HTTPValidationErrorData, http_res, http_res_text ) - raise models.HTTPValidationError(response_data, http_res, http_res_text) + raise errors.HTTPValidationError(response_data, http_res, http_res_text) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("Unexpected response received", http_res, http_res_text) + raise errors.SDKError("Unexpected response received", http_res, http_res_text) diff --git a/src/mistralai/client/basesdk.py b/src/mistralai/client/basesdk.py index 611b4059..a976121b 100644 --- a/src/mistralai/client/basesdk.py +++ b/src/mistralai/client/basesdk.py @@ -3,13 +3,18 @@ from .sdkconfiguration import SDKConfiguration import httpx -from mistralai.client import models, utils +from mistralai.client import errors, models, utils from mistralai.client._hooks import ( AfterErrorContext, AfterSuccessContext, BeforeRequestContext, ) -from mistralai.client.utils import RetryConfig, SerializedRequestBody, get_body_content +from mistralai.client.utils import ( + RetryConfig, + SerializedRequestBody, + get_body_content, + run_sync_in_thread, +) from typing import Callable, List, Mapping, Optional, Tuple from urllib.parse import parse_qs, urlparse @@ -261,7 +266,7 @@ def do(): if http_res is None: logger.debug("Raising no response SDK error") - raise models.NoResponseError("No response received") + raise errors.NoResponseError("No response received") logger.debug( "Response:\nStatus Code: %s\nURL: %s\nHeaders: %s\nBody: %s", @@ -282,7 +287,7 @@ def do(): http_res = result else: logger.debug("Raising unexpected SDK error") - raise models.SDKError("Unexpected error occurred", http_res) + raise errors.SDKError("Unexpected error occurred", http_res) return http_res @@ -312,7 +317,10 @@ async def do_request_async( async def do(): http_res = None try: - req = hooks.before_request(BeforeRequestContext(hook_ctx), request) + req = await run_sync_in_thread( + hooks.before_request, BeforeRequestContext(hook_ctx), request + ) + logger.debug( "Request:\nMethod: %s\nURL: %s\nHeaders: %s\nBody: %s", req.method, @@ -326,14 +334,17 @@ async def do(): http_res = await client.send(req, stream=stream) except Exception as e: - _, e = hooks.after_error(AfterErrorContext(hook_ctx), None, e) + _, e = await run_sync_in_thread( + hooks.after_error, AfterErrorContext(hook_ctx), None, e + ) + if e is not None: logger.debug("Request Exception", exc_info=True) raise e if http_res is None: logger.debug("Raising no response SDK error") - raise models.NoResponseError("No response received") + raise errors.NoResponseError("No response received") logger.debug( "Response:\nStatus Code: %s\nURL: %s\nHeaders: %s\nBody: %s", @@ -344,9 +355,10 @@ async def do(): ) if utils.match_status_codes(error_status_codes, http_res.status_code): - result, err = hooks.after_error( - AfterErrorContext(hook_ctx), http_res, None + result, err = await run_sync_in_thread( + hooks.after_error, AfterErrorContext(hook_ctx), http_res, None ) + if err is not None: logger.debug("Request Exception", exc_info=True) raise err @@ -354,7 +366,7 @@ async def do(): http_res = result else: logger.debug("Raising unexpected SDK error") - raise models.SDKError("Unexpected error occurred", http_res) + raise errors.SDKError("Unexpected error occurred", http_res) return http_res @@ -366,6 +378,8 @@ async def do(): http_res = await do() if not utils.match_status_codes(error_status_codes, http_res.status_code): - http_res = hooks.after_success(AfterSuccessContext(hook_ctx), http_res) + http_res = await run_sync_in_thread( + hooks.after_success, AfterSuccessContext(hook_ctx), http_res + ) return http_res diff --git a/src/mistralai/client/batch_jobs.py b/src/mistralai/client/batch_jobs.py index 752c7652..0e135b30 100644 --- a/src/mistralai/client/batch_jobs.py +++ b/src/mistralai/client/batch_jobs.py @@ -3,14 +3,8 @@ from .basesdk import BaseSDK from datetime import datetime -from mistralai.client import models, utils +from mistralai.client import errors, models, utils from mistralai.client._hooks import HookContext -from mistralai.client.models import ( - apiendpoint as models_apiendpoint, - batchjobstatus as models_batchjobstatus, - batchrequest as models_batchrequest, - listbatchjobsop as models_listbatchjobsop, -) from mistralai.client.types import OptionalNullable, UNSET from mistralai.client.utils import get_security_from_env from mistralai.client.utils.unmarshal_json_response import unmarshal_json_response @@ -28,13 +22,13 @@ def list( metadata: OptionalNullable[Dict[str, Any]] = UNSET, created_after: OptionalNullable[datetime] = UNSET, created_by_me: Optional[bool] = False, - status: OptionalNullable[List[models_batchjobstatus.BatchJobStatus]] = UNSET, - order_by: Optional[models_listbatchjobsop.OrderBy] = "-created", + status: OptionalNullable[List[models.BatchJobStatus]] = UNSET, + order_by: Optional[models.OrderBy] = "-created", retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> models.BatchJobsOut: + ) -> models.ListBatchJobsResponse: r"""Get Batch Jobs Get a list of batch jobs for your organization and user. @@ -63,7 +57,7 @@ def list( else: base_url = self._get_url(base_url, url_variables) - request = models.ListBatchJobsRequest( + request = models.JobsAPIRoutesBatchGetBatchJobsRequest( page=page, page_size=page_size, model=model, @@ -104,7 +98,7 @@ def list( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="ListBatchJobs", + operation_id="jobs_api_routes_batch_get_batch_jobs", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -116,15 +110,15 @@ def list( ) if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.BatchJobsOut, http_res) + return unmarshal_json_response(models.ListBatchJobsResponse, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) async def list_async( self, @@ -136,13 +130,13 @@ async def list_async( metadata: OptionalNullable[Dict[str, Any]] = UNSET, created_after: OptionalNullable[datetime] = UNSET, created_by_me: Optional[bool] = False, - status: OptionalNullable[List[models_batchjobstatus.BatchJobStatus]] = UNSET, - order_by: Optional[models_listbatchjobsop.OrderBy] = "-created", + status: OptionalNullable[List[models.BatchJobStatus]] = UNSET, + order_by: Optional[models.OrderBy] = "-created", retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> models.BatchJobsOut: + ) -> models.ListBatchJobsResponse: r"""Get Batch Jobs Get a list of batch jobs for your organization and user. @@ -171,7 +165,7 @@ async def list_async( else: base_url = self._get_url(base_url, url_variables) - request = models.ListBatchJobsRequest( + request = models.JobsAPIRoutesBatchGetBatchJobsRequest( page=page, page_size=page_size, model=model, @@ -212,7 +206,7 @@ async def list_async( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="ListBatchJobs", + operation_id="jobs_api_routes_batch_get_batch_jobs", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -224,26 +218,23 @@ async def list_async( ) if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.BatchJobsOut, http_res) + return unmarshal_json_response(models.ListBatchJobsResponse, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) def create( self, *, - endpoint: models_apiendpoint.APIEndpoint, + endpoint: models.APIEndpoint, input_files: OptionalNullable[List[str]] = UNSET, requests: OptionalNullable[ - Union[ - List[models_batchrequest.BatchRequest], - List[models_batchrequest.BatchRequestTypedDict], - ] + Union[List[models.BatchRequest], List[models.BatchRequestTypedDict]] ] = UNSET, model: OptionalNullable[str] = UNSET, agent_id: OptionalNullable[str] = UNSET, @@ -253,7 +244,7 @@ def create( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> models.BatchJobOut: + ) -> models.BatchJob: r"""Create Batch Job Create a new batch job, it will be queued for processing. @@ -280,7 +271,7 @@ def create( else: base_url = self._get_url(base_url, url_variables) - request = models.BatchJobIn( + request = models.CreateBatchJobRequest( input_files=input_files, requests=utils.get_pydantic_model( requests, OptionalNullable[List[models.BatchRequest]] @@ -306,7 +297,7 @@ def create( http_headers=http_headers, security=self.sdk_configuration.security, get_serialized_body=lambda: utils.serialize_request_body( - request, False, False, "json", models.BatchJobIn + request, False, False, "json", models.CreateBatchJobRequest ), allow_empty_value=None, timeout_ms=timeout_ms, @@ -324,7 +315,7 @@ def create( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="CreateBatchJob", + operation_id="jobs_api_routes_batch_create_batch_job", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -336,26 +327,23 @@ def create( ) if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.BatchJobOut, http_res) + return unmarshal_json_response(models.BatchJob, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) async def create_async( self, *, - endpoint: models_apiendpoint.APIEndpoint, + endpoint: models.APIEndpoint, input_files: OptionalNullable[List[str]] = UNSET, requests: OptionalNullable[ - Union[ - List[models_batchrequest.BatchRequest], - List[models_batchrequest.BatchRequestTypedDict], - ] + Union[List[models.BatchRequest], List[models.BatchRequestTypedDict]] ] = UNSET, model: OptionalNullable[str] = UNSET, agent_id: OptionalNullable[str] = UNSET, @@ -365,7 +353,7 @@ async def create_async( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> models.BatchJobOut: + ) -> models.BatchJob: r"""Create Batch Job Create a new batch job, it will be queued for processing. @@ -392,7 +380,7 @@ async def create_async( else: base_url = self._get_url(base_url, url_variables) - request = models.BatchJobIn( + request = models.CreateBatchJobRequest( input_files=input_files, requests=utils.get_pydantic_model( requests, OptionalNullable[List[models.BatchRequest]] @@ -418,7 +406,7 @@ async def create_async( http_headers=http_headers, security=self.sdk_configuration.security, get_serialized_body=lambda: utils.serialize_request_body( - request, False, False, "json", models.BatchJobIn + request, False, False, "json", models.CreateBatchJobRequest ), allow_empty_value=None, timeout_ms=timeout_ms, @@ -436,7 +424,7 @@ async def create_async( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="CreateBatchJob", + operation_id="jobs_api_routes_batch_create_batch_job", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -448,15 +436,15 @@ async def create_async( ) if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.BatchJobOut, http_res) + return unmarshal_json_response(models.BatchJob, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) def get( self, @@ -467,7 +455,7 @@ def get( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> models.BatchJobOut: + ) -> models.BatchJob: r"""Get Batch Job Get a batch job details by its UUID. @@ -492,7 +480,7 @@ def get( else: base_url = self._get_url(base_url, url_variables) - request = models.GetBatchJobRequest( + request = models.JobsAPIRoutesBatchGetBatchJobRequest( job_id=job_id, inline=inline, ) @@ -526,7 +514,7 @@ def get( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="GetBatchJob", + operation_id="jobs_api_routes_batch_get_batch_job", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -538,15 +526,15 @@ def get( ) if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.BatchJobOut, http_res) + return unmarshal_json_response(models.BatchJob, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) async def get_async( self, @@ -557,7 +545,7 @@ async def get_async( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> models.BatchJobOut: + ) -> models.BatchJob: r"""Get Batch Job Get a batch job details by its UUID. @@ -582,7 +570,7 @@ async def get_async( else: base_url = self._get_url(base_url, url_variables) - request = models.GetBatchJobRequest( + request = models.JobsAPIRoutesBatchGetBatchJobRequest( job_id=job_id, inline=inline, ) @@ -616,7 +604,7 @@ async def get_async( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="GetBatchJob", + operation_id="jobs_api_routes_batch_get_batch_job", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -628,15 +616,15 @@ async def get_async( ) if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.BatchJobOut, http_res) + return unmarshal_json_response(models.BatchJob, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) def cancel( self, @@ -646,7 +634,7 @@ def cancel( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> models.BatchJobOut: + ) -> models.BatchJob: r"""Cancel Batch Job Request the cancellation of a batch job. @@ -667,7 +655,7 @@ def cancel( else: base_url = self._get_url(base_url, url_variables) - request = models.CancelBatchJobRequest( + request = models.JobsAPIRoutesBatchCancelBatchJobRequest( job_id=job_id, ) @@ -700,7 +688,7 @@ def cancel( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="CancelBatchJob", + operation_id="jobs_api_routes_batch_cancel_batch_job", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -712,15 +700,15 @@ def cancel( ) if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.BatchJobOut, http_res) + return unmarshal_json_response(models.BatchJob, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) async def cancel_async( self, @@ -730,7 +718,7 @@ async def cancel_async( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> models.BatchJobOut: + ) -> models.BatchJob: r"""Cancel Batch Job Request the cancellation of a batch job. @@ -751,7 +739,7 @@ async def cancel_async( else: base_url = self._get_url(base_url, url_variables) - request = models.CancelBatchJobRequest( + request = models.JobsAPIRoutesBatchCancelBatchJobRequest( job_id=job_id, ) @@ -784,7 +772,7 @@ async def cancel_async( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="CancelBatchJob", + operation_id="jobs_api_routes_batch_cancel_batch_job", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -796,12 +784,12 @@ async def cancel_async( ) if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.BatchJobOut, http_res) + return unmarshal_json_response(models.BatchJob, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) diff --git a/src/mistralai/client/beta_agents.py b/src/mistralai/client/beta_agents.py index 4e692f17..157c5de4 100644 --- a/src/mistralai/client/beta_agents.py +++ b/src/mistralai/client/beta_agents.py @@ -2,15 +2,8 @@ # @generated-id: b64ad29b7174 from .basesdk import BaseSDK -from mistralai.client import models, utils +from mistralai.client import errors, models, utils from mistralai.client._hooks import HookContext -from mistralai.client.models import ( - agentcreationrequest as models_agentcreationrequest, - agentupdaterequest as models_agentupdaterequest, - completionargs as models_completionargs, - getagentop as models_getagentop, - requestsource as models_requestsource, -) from mistralai.client.types import OptionalNullable, UNSET from mistralai.client.utils import get_security_from_env from mistralai.client.utils.unmarshal_json_response import unmarshal_json_response @@ -28,15 +21,12 @@ def create( instructions: OptionalNullable[str] = UNSET, tools: Optional[ Union[ - List[models_agentcreationrequest.AgentCreationRequestTool], - List[models_agentcreationrequest.AgentCreationRequestToolTypedDict], + List[models.CreateAgentRequestTool], + List[models.CreateAgentRequestToolTypedDict], ] ] = None, completion_args: Optional[ - Union[ - models_completionargs.CompletionArgs, - models_completionargs.CompletionArgsTypedDict, - ] + Union[models.CompletionArgs, models.CompletionArgsTypedDict] ] = None, description: OptionalNullable[str] = UNSET, handoffs: OptionalNullable[List[str]] = UNSET, @@ -75,10 +65,10 @@ def create( else: base_url = self._get_url(base_url, url_variables) - request = models.AgentCreationRequest( + request = models.CreateAgentRequest( instructions=instructions, tools=utils.get_pydantic_model( - tools, Optional[List[models.AgentCreationRequestTool]] + tools, Optional[List[models.CreateAgentRequestTool]] ), completion_args=utils.get_pydantic_model( completion_args, Optional[models.CompletionArgs] @@ -105,7 +95,7 @@ def create( http_headers=http_headers, security=self.sdk_configuration.security, get_serialized_body=lambda: utils.serialize_request_body( - request, False, False, "json", models.AgentCreationRequest + request, False, False, "json", models.CreateAgentRequest ), allow_empty_value=None, timeout_ms=timeout_ms, @@ -123,7 +113,7 @@ def create( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="CreateAgent", + operation_id="agents_api_v1_agents_create", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -139,17 +129,17 @@ def create( return unmarshal_json_response(models.Agent, http_res) if utils.match_response(http_res, "422", "application/json"): response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(response_data, http_res) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) async def create_async( self, @@ -159,15 +149,12 @@ async def create_async( instructions: OptionalNullable[str] = UNSET, tools: Optional[ Union[ - List[models_agentcreationrequest.AgentCreationRequestTool], - List[models_agentcreationrequest.AgentCreationRequestToolTypedDict], + List[models.CreateAgentRequestTool], + List[models.CreateAgentRequestToolTypedDict], ] ] = None, completion_args: Optional[ - Union[ - models_completionargs.CompletionArgs, - models_completionargs.CompletionArgsTypedDict, - ] + Union[models.CompletionArgs, models.CompletionArgsTypedDict] ] = None, description: OptionalNullable[str] = UNSET, handoffs: OptionalNullable[List[str]] = UNSET, @@ -206,10 +193,10 @@ async def create_async( else: base_url = self._get_url(base_url, url_variables) - request = models.AgentCreationRequest( + request = models.CreateAgentRequest( instructions=instructions, tools=utils.get_pydantic_model( - tools, Optional[List[models.AgentCreationRequestTool]] + tools, Optional[List[models.CreateAgentRequestTool]] ), completion_args=utils.get_pydantic_model( completion_args, Optional[models.CompletionArgs] @@ -236,7 +223,7 @@ async def create_async( http_headers=http_headers, security=self.sdk_configuration.security, get_serialized_body=lambda: utils.serialize_request_body( - request, False, False, "json", models.AgentCreationRequest + request, False, False, "json", models.CreateAgentRequest ), allow_empty_value=None, timeout_ms=timeout_ms, @@ -254,7 +241,7 @@ async def create_async( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="CreateAgent", + operation_id="agents_api_v1_agents_create", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -270,17 +257,17 @@ async def create_async( return unmarshal_json_response(models.Agent, http_res) if utils.match_response(http_res, "422", "application/json"): response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(response_data, http_res) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) def list( self, @@ -288,7 +275,7 @@ def list( page: Optional[int] = 0, page_size: Optional[int] = 20, deployment_chat: OptionalNullable[bool] = UNSET, - sources: OptionalNullable[List[models_requestsource.RequestSource]] = UNSET, + sources: OptionalNullable[List[models.RequestSource]] = UNSET, name: OptionalNullable[str] = UNSET, search: OptionalNullable[str] = UNSET, id: OptionalNullable[str] = UNSET, @@ -325,7 +312,7 @@ def list( else: base_url = self._get_url(base_url, url_variables) - request = models.ListAgentsRequest( + request = models.AgentsAPIV1AgentsListRequest( page=page, page_size=page_size, deployment_chat=deployment_chat, @@ -365,7 +352,7 @@ def list( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="ListAgents", + operation_id="agents_api_v1_agents_list", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -381,17 +368,17 @@ def list( return unmarshal_json_response(List[models.Agent], http_res) if utils.match_response(http_res, "422", "application/json"): response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(response_data, http_res) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) async def list_async( self, @@ -399,7 +386,7 @@ async def list_async( page: Optional[int] = 0, page_size: Optional[int] = 20, deployment_chat: OptionalNullable[bool] = UNSET, - sources: OptionalNullable[List[models_requestsource.RequestSource]] = UNSET, + sources: OptionalNullable[List[models.RequestSource]] = UNSET, name: OptionalNullable[str] = UNSET, search: OptionalNullable[str] = UNSET, id: OptionalNullable[str] = UNSET, @@ -436,7 +423,7 @@ async def list_async( else: base_url = self._get_url(base_url, url_variables) - request = models.ListAgentsRequest( + request = models.AgentsAPIV1AgentsListRequest( page=page, page_size=page_size, deployment_chat=deployment_chat, @@ -476,7 +463,7 @@ async def list_async( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="ListAgents", + operation_id="agents_api_v1_agents_list", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -492,17 +479,17 @@ async def list_async( return unmarshal_json_response(List[models.Agent], http_res) if utils.match_response(http_res, "422", "application/json"): response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(response_data, http_res) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) def get( self, @@ -510,8 +497,8 @@ def get( agent_id: str, agent_version: OptionalNullable[ Union[ - models_getagentop.GetAgentAgentVersion, - models_getagentop.GetAgentAgentVersionTypedDict, + models.AgentsAPIV1AgentsGetAgentVersion, + models.AgentsAPIV1AgentsGetAgentVersionTypedDict, ] ] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, @@ -540,7 +527,7 @@ def get( else: base_url = self._get_url(base_url, url_variables) - request = models.GetAgentRequest( + request = models.AgentsAPIV1AgentsGetRequest( agent_id=agent_id, agent_version=agent_version, ) @@ -574,7 +561,7 @@ def get( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="GetAgent", + operation_id="agents_api_v1_agents_get", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -590,17 +577,17 @@ def get( return unmarshal_json_response(models.Agent, http_res) if utils.match_response(http_res, "422", "application/json"): response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(response_data, http_res) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) async def get_async( self, @@ -608,8 +595,8 @@ async def get_async( agent_id: str, agent_version: OptionalNullable[ Union[ - models_getagentop.GetAgentAgentVersion, - models_getagentop.GetAgentAgentVersionTypedDict, + models.AgentsAPIV1AgentsGetAgentVersion, + models.AgentsAPIV1AgentsGetAgentVersionTypedDict, ] ] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, @@ -638,7 +625,7 @@ async def get_async( else: base_url = self._get_url(base_url, url_variables) - request = models.GetAgentRequest( + request = models.AgentsAPIV1AgentsGetRequest( agent_id=agent_id, agent_version=agent_version, ) @@ -672,7 +659,7 @@ async def get_async( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="GetAgent", + operation_id="agents_api_v1_agents_get", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -688,17 +675,17 @@ async def get_async( return unmarshal_json_response(models.Agent, http_res) if utils.match_response(http_res, "422", "application/json"): response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(response_data, http_res) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) def update( self, @@ -707,15 +694,12 @@ def update( instructions: OptionalNullable[str] = UNSET, tools: Optional[ Union[ - List[models_agentupdaterequest.AgentUpdateRequestTool], - List[models_agentupdaterequest.AgentUpdateRequestToolTypedDict], + List[models.UpdateAgentRequestTool], + List[models.UpdateAgentRequestToolTypedDict], ] ] = None, completion_args: Optional[ - Union[ - models_completionargs.CompletionArgs, - models_completionargs.CompletionArgsTypedDict, - ] + Union[models.CompletionArgs, models.CompletionArgsTypedDict] ] = None, model: OptionalNullable[str] = UNSET, name: OptionalNullable[str] = UNSET, @@ -759,12 +743,12 @@ def update( else: base_url = self._get_url(base_url, url_variables) - request = models.UpdateAgentRequest( + request = models.AgentsAPIV1AgentsUpdateRequest( agent_id=agent_id, - agent_update_request=models.AgentUpdateRequest( + update_agent_request=models.UpdateAgentRequest( instructions=instructions, tools=utils.get_pydantic_model( - tools, Optional[List[models.AgentUpdateRequestTool]] + tools, Optional[List[models.UpdateAgentRequestTool]] ), completion_args=utils.get_pydantic_model( completion_args, Optional[models.CompletionArgs] @@ -793,11 +777,11 @@ def update( http_headers=http_headers, security=self.sdk_configuration.security, get_serialized_body=lambda: utils.serialize_request_body( - request.agent_update_request, + request.update_agent_request, False, False, "json", - models.AgentUpdateRequest, + models.UpdateAgentRequest, ), allow_empty_value=None, timeout_ms=timeout_ms, @@ -815,7 +799,7 @@ def update( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="UpdateAgent", + operation_id="agents_api_v1_agents_update", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -831,17 +815,17 @@ def update( return unmarshal_json_response(models.Agent, http_res) if utils.match_response(http_res, "422", "application/json"): response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(response_data, http_res) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) async def update_async( self, @@ -850,15 +834,12 @@ async def update_async( instructions: OptionalNullable[str] = UNSET, tools: Optional[ Union[ - List[models_agentupdaterequest.AgentUpdateRequestTool], - List[models_agentupdaterequest.AgentUpdateRequestToolTypedDict], + List[models.UpdateAgentRequestTool], + List[models.UpdateAgentRequestToolTypedDict], ] ] = None, completion_args: Optional[ - Union[ - models_completionargs.CompletionArgs, - models_completionargs.CompletionArgsTypedDict, - ] + Union[models.CompletionArgs, models.CompletionArgsTypedDict] ] = None, model: OptionalNullable[str] = UNSET, name: OptionalNullable[str] = UNSET, @@ -902,12 +883,12 @@ async def update_async( else: base_url = self._get_url(base_url, url_variables) - request = models.UpdateAgentRequest( + request = models.AgentsAPIV1AgentsUpdateRequest( agent_id=agent_id, - agent_update_request=models.AgentUpdateRequest( + update_agent_request=models.UpdateAgentRequest( instructions=instructions, tools=utils.get_pydantic_model( - tools, Optional[List[models.AgentUpdateRequestTool]] + tools, Optional[List[models.UpdateAgentRequestTool]] ), completion_args=utils.get_pydantic_model( completion_args, Optional[models.CompletionArgs] @@ -936,11 +917,11 @@ async def update_async( http_headers=http_headers, security=self.sdk_configuration.security, get_serialized_body=lambda: utils.serialize_request_body( - request.agent_update_request, + request.update_agent_request, False, False, "json", - models.AgentUpdateRequest, + models.UpdateAgentRequest, ), allow_empty_value=None, timeout_ms=timeout_ms, @@ -958,7 +939,7 @@ async def update_async( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="UpdateAgent", + operation_id="agents_api_v1_agents_update", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -974,17 +955,17 @@ async def update_async( return unmarshal_json_response(models.Agent, http_res) if utils.match_response(http_res, "422", "application/json"): response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(response_data, http_res) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) def delete( self, @@ -1013,7 +994,7 @@ def delete( else: base_url = self._get_url(base_url, url_variables) - request = models.DeleteAgentRequest( + request = models.AgentsAPIV1AgentsDeleteRequest( agent_id=agent_id, ) @@ -1046,7 +1027,7 @@ def delete( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="DeleteAgent", + operation_id="agents_api_v1_agents_delete", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -1062,17 +1043,17 @@ def delete( return if utils.match_response(http_res, "422", "application/json"): response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(response_data, http_res) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) async def delete_async( self, @@ -1101,7 +1082,7 @@ async def delete_async( else: base_url = self._get_url(base_url, url_variables) - request = models.DeleteAgentRequest( + request = models.AgentsAPIV1AgentsDeleteRequest( agent_id=agent_id, ) @@ -1134,7 +1115,7 @@ async def delete_async( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="DeleteAgent", + operation_id="agents_api_v1_agents_delete", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -1150,17 +1131,17 @@ async def delete_async( return if utils.match_response(http_res, "422", "application/json"): response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(response_data, http_res) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) def update_version( self, @@ -1193,7 +1174,7 @@ def update_version( else: base_url = self._get_url(base_url, url_variables) - request = models.UpdateAgentVersionRequest( + request = models.AgentsAPIV1AgentsUpdateVersionRequest( agent_id=agent_id, version=version, ) @@ -1227,7 +1208,7 @@ def update_version( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="UpdateAgentVersion", + operation_id="agents_api_v1_agents_update_version", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -1243,17 +1224,17 @@ def update_version( return unmarshal_json_response(models.Agent, http_res) if utils.match_response(http_res, "422", "application/json"): response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(response_data, http_res) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) async def update_version_async( self, @@ -1286,7 +1267,7 @@ async def update_version_async( else: base_url = self._get_url(base_url, url_variables) - request = models.UpdateAgentVersionRequest( + request = models.AgentsAPIV1AgentsUpdateVersionRequest( agent_id=agent_id, version=version, ) @@ -1320,7 +1301,7 @@ async def update_version_async( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="UpdateAgentVersion", + operation_id="agents_api_v1_agents_update_version", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -1336,17 +1317,17 @@ async def update_version_async( return unmarshal_json_response(models.Agent, http_res) if utils.match_response(http_res, "422", "application/json"): response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(response_data, http_res) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) def list_versions( self, @@ -1381,7 +1362,7 @@ def list_versions( else: base_url = self._get_url(base_url, url_variables) - request = models.ListAgentVersionsRequest( + request = models.AgentsAPIV1AgentsListVersionsRequest( agent_id=agent_id, page=page, page_size=page_size, @@ -1416,7 +1397,7 @@ def list_versions( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="ListAgentVersions", + operation_id="agents_api_v1_agents_list_versions", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -1432,17 +1413,17 @@ def list_versions( return unmarshal_json_response(List[models.Agent], http_res) if utils.match_response(http_res, "422", "application/json"): response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(response_data, http_res) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) async def list_versions_async( self, @@ -1477,7 +1458,7 @@ async def list_versions_async( else: base_url = self._get_url(base_url, url_variables) - request = models.ListAgentVersionsRequest( + request = models.AgentsAPIV1AgentsListVersionsRequest( agent_id=agent_id, page=page, page_size=page_size, @@ -1512,7 +1493,7 @@ async def list_versions_async( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="ListAgentVersions", + operation_id="agents_api_v1_agents_list_versions", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -1528,17 +1509,17 @@ async def list_versions_async( return unmarshal_json_response(List[models.Agent], http_res) if utils.match_response(http_res, "422", "application/json"): response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(response_data, http_res) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) def get_version( self, @@ -1571,7 +1552,7 @@ def get_version( else: base_url = self._get_url(base_url, url_variables) - request = models.GetAgentVersionRequest( + request = models.AgentsAPIV1AgentsGetVersionRequest( agent_id=agent_id, version=version, ) @@ -1605,7 +1586,7 @@ def get_version( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="GetAgentVersion", + operation_id="agents_api_v1_agents_get_version", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -1621,17 +1602,17 @@ def get_version( return unmarshal_json_response(models.Agent, http_res) if utils.match_response(http_res, "422", "application/json"): response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(response_data, http_res) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) async def get_version_async( self, @@ -1664,7 +1645,7 @@ async def get_version_async( else: base_url = self._get_url(base_url, url_variables) - request = models.GetAgentVersionRequest( + request = models.AgentsAPIV1AgentsGetVersionRequest( agent_id=agent_id, version=version, ) @@ -1698,7 +1679,7 @@ async def get_version_async( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="GetAgentVersion", + operation_id="agents_api_v1_agents_get_version", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -1714,17 +1695,17 @@ async def get_version_async( return unmarshal_json_response(models.Agent, http_res) if utils.match_response(http_res, "422", "application/json"): response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(response_data, http_res) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) def create_version_alias( self, @@ -1759,7 +1740,7 @@ def create_version_alias( else: base_url = self._get_url(base_url, url_variables) - request = models.CreateOrUpdateAgentAliasRequest( + request = models.AgentsAPIV1AgentsCreateOrUpdateAliasRequest( agent_id=agent_id, alias=alias, version=version, @@ -1794,7 +1775,7 @@ def create_version_alias( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="CreateOrUpdateAgentAlias", + operation_id="agents_api_v1_agents_create_or_update_alias", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -1810,17 +1791,17 @@ def create_version_alias( return unmarshal_json_response(models.AgentAliasResponse, http_res) if utils.match_response(http_res, "422", "application/json"): response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(response_data, http_res) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) async def create_version_alias_async( self, @@ -1855,7 +1836,7 @@ async def create_version_alias_async( else: base_url = self._get_url(base_url, url_variables) - request = models.CreateOrUpdateAgentAliasRequest( + request = models.AgentsAPIV1AgentsCreateOrUpdateAliasRequest( agent_id=agent_id, alias=alias, version=version, @@ -1890,7 +1871,7 @@ async def create_version_alias_async( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="CreateOrUpdateAgentAlias", + operation_id="agents_api_v1_agents_create_or_update_alias", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -1906,17 +1887,17 @@ async def create_version_alias_async( return unmarshal_json_response(models.AgentAliasResponse, http_res) if utils.match_response(http_res, "422", "application/json"): response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(response_data, http_res) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) def list_version_aliases( self, @@ -1947,7 +1928,7 @@ def list_version_aliases( else: base_url = self._get_url(base_url, url_variables) - request = models.ListAgentAliasesRequest( + request = models.AgentsAPIV1AgentsListVersionAliasesRequest( agent_id=agent_id, ) @@ -1980,7 +1961,7 @@ def list_version_aliases( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="ListAgentAliases", + operation_id="agents_api_v1_agents_list_version_aliases", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -1996,17 +1977,17 @@ def list_version_aliases( return unmarshal_json_response(List[models.AgentAliasResponse], http_res) if utils.match_response(http_res, "422", "application/json"): response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(response_data, http_res) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) async def list_version_aliases_async( self, @@ -2037,7 +2018,7 @@ async def list_version_aliases_async( else: base_url = self._get_url(base_url, url_variables) - request = models.ListAgentAliasesRequest( + request = models.AgentsAPIV1AgentsListVersionAliasesRequest( agent_id=agent_id, ) @@ -2070,7 +2051,7 @@ async def list_version_aliases_async( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="ListAgentAliases", + operation_id="agents_api_v1_agents_list_version_aliases", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -2086,17 +2067,17 @@ async def list_version_aliases_async( return unmarshal_json_response(List[models.AgentAliasResponse], http_res) if utils.match_response(http_res, "422", "application/json"): response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(response_data, http_res) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) def delete_version_alias( self, @@ -2129,7 +2110,7 @@ def delete_version_alias( else: base_url = self._get_url(base_url, url_variables) - request = models.DeleteAgentAliasRequest( + request = models.AgentsAPIV1AgentsDeleteAliasRequest( agent_id=agent_id, alias=alias, ) @@ -2163,7 +2144,7 @@ def delete_version_alias( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="DeleteAgentAlias", + operation_id="agents_api_v1_agents_delete_alias", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -2179,17 +2160,17 @@ def delete_version_alias( return if utils.match_response(http_res, "422", "application/json"): response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(response_data, http_res) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) async def delete_version_alias_async( self, @@ -2222,7 +2203,7 @@ async def delete_version_alias_async( else: base_url = self._get_url(base_url, url_variables) - request = models.DeleteAgentAliasRequest( + request = models.AgentsAPIV1AgentsDeleteAliasRequest( agent_id=agent_id, alias=alias, ) @@ -2256,7 +2237,7 @@ async def delete_version_alias_async( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="DeleteAgentAlias", + operation_id="agents_api_v1_agents_delete_alias", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -2272,14 +2253,14 @@ async def delete_version_alias_async( return if utils.match_response(http_res, "422", "application/json"): response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(response_data, http_res) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) diff --git a/src/mistralai/client/chat.py b/src/mistralai/client/chat.py index 35698d32..13b9c01f 100644 --- a/src/mistralai/client/chat.py +++ b/src/mistralai/client/chat.py @@ -2,16 +2,8 @@ # @generated-id: 7eba0f088d47 from .basesdk import BaseSDK -from mistralai.client import models, utils +from mistralai.client import errors, models, utils from mistralai.client._hooks import HookContext -from mistralai.client.models import ( - chatcompletionrequest as models_chatcompletionrequest, - chatcompletionstreamrequest as models_chatcompletionstreamrequest, - mistralpromptmode as models_mistralpromptmode, - prediction as models_prediction, - responseformat as models_responseformat, - tool as models_tool, -) from mistralai.client.types import OptionalNullable, UNSET from mistralai.client.utils import eventstreaming, get_security_from_env from mistralai.client.utils.unmarshal_json_response import unmarshal_json_response @@ -111,8 +103,8 @@ def complete( *, model: str, messages: Union[ - List[models_chatcompletionrequest.ChatCompletionRequestMessage], - List[models_chatcompletionrequest.ChatCompletionRequestMessageTypedDict], + List[models.ChatCompletionRequestMessage], + List[models.ChatCompletionRequestMessageTypedDict], ], temperature: OptionalNullable[float] = UNSET, top_p: Optional[float] = None, @@ -120,37 +112,32 @@ def complete( stream: Optional[bool] = False, stop: Optional[ Union[ - models_chatcompletionrequest.ChatCompletionRequestStop, - models_chatcompletionrequest.ChatCompletionRequestStopTypedDict, + models.ChatCompletionRequestStop, + models.ChatCompletionRequestStopTypedDict, ] ] = None, random_seed: OptionalNullable[int] = UNSET, metadata: OptionalNullable[Dict[str, Any]] = UNSET, response_format: Optional[ - Union[ - models_responseformat.ResponseFormat, - models_responseformat.ResponseFormatTypedDict, - ] + Union[models.ResponseFormat, models.ResponseFormatTypedDict] ] = None, tools: OptionalNullable[ - Union[List[models_tool.Tool], List[models_tool.ToolTypedDict]] + Union[List[models.Tool], List[models.ToolTypedDict]] ] = UNSET, tool_choice: Optional[ Union[ - models_chatcompletionrequest.ChatCompletionRequestToolChoice, - models_chatcompletionrequest.ChatCompletionRequestToolChoiceTypedDict, + models.ChatCompletionRequestToolChoice, + models.ChatCompletionRequestToolChoiceTypedDict, ] ] = None, presence_penalty: Optional[float] = None, frequency_penalty: Optional[float] = None, n: OptionalNullable[int] = UNSET, prediction: Optional[ - Union[models_prediction.Prediction, models_prediction.PredictionTypedDict] + Union[models.Prediction, models.PredictionTypedDict] ] = None, parallel_tool_calls: Optional[bool] = None, - prompt_mode: OptionalNullable[ - models_mistralpromptmode.MistralPromptMode - ] = UNSET, + prompt_mode: OptionalNullable[models.MistralPromptMode] = UNSET, safe_prompt: Optional[bool] = None, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, @@ -271,25 +258,25 @@ def complete( return unmarshal_json_response(models.ChatCompletionResponse, http_res) if utils.match_response(http_res, "422", "application/json"): response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(response_data, http_res) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) async def complete_async( self, *, model: str, messages: Union[ - List[models_chatcompletionrequest.ChatCompletionRequestMessage], - List[models_chatcompletionrequest.ChatCompletionRequestMessageTypedDict], + List[models.ChatCompletionRequestMessage], + List[models.ChatCompletionRequestMessageTypedDict], ], temperature: OptionalNullable[float] = UNSET, top_p: Optional[float] = None, @@ -297,37 +284,32 @@ async def complete_async( stream: Optional[bool] = False, stop: Optional[ Union[ - models_chatcompletionrequest.ChatCompletionRequestStop, - models_chatcompletionrequest.ChatCompletionRequestStopTypedDict, + models.ChatCompletionRequestStop, + models.ChatCompletionRequestStopTypedDict, ] ] = None, random_seed: OptionalNullable[int] = UNSET, metadata: OptionalNullable[Dict[str, Any]] = UNSET, response_format: Optional[ - Union[ - models_responseformat.ResponseFormat, - models_responseformat.ResponseFormatTypedDict, - ] + Union[models.ResponseFormat, models.ResponseFormatTypedDict] ] = None, tools: OptionalNullable[ - Union[List[models_tool.Tool], List[models_tool.ToolTypedDict]] + Union[List[models.Tool], List[models.ToolTypedDict]] ] = UNSET, tool_choice: Optional[ Union[ - models_chatcompletionrequest.ChatCompletionRequestToolChoice, - models_chatcompletionrequest.ChatCompletionRequestToolChoiceTypedDict, + models.ChatCompletionRequestToolChoice, + models.ChatCompletionRequestToolChoiceTypedDict, ] ] = None, presence_penalty: Optional[float] = None, frequency_penalty: Optional[float] = None, n: OptionalNullable[int] = UNSET, prediction: Optional[ - Union[models_prediction.Prediction, models_prediction.PredictionTypedDict] + Union[models.Prediction, models.PredictionTypedDict] ] = None, parallel_tool_calls: Optional[bool] = None, - prompt_mode: OptionalNullable[ - models_mistralpromptmode.MistralPromptMode - ] = UNSET, + prompt_mode: OptionalNullable[models.MistralPromptMode] = UNSET, safe_prompt: Optional[bool] = None, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, @@ -448,27 +430,25 @@ async def complete_async( return unmarshal_json_response(models.ChatCompletionResponse, http_res) if utils.match_response(http_res, "422", "application/json"): response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(response_data, http_res) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) def stream( self, *, model: str, messages: Union[ - List[models_chatcompletionstreamrequest.ChatCompletionStreamRequestMessage], - List[ - models_chatcompletionstreamrequest.ChatCompletionStreamRequestMessageTypedDict - ], + List[models.ChatCompletionStreamRequestMessage], + List[models.ChatCompletionStreamRequestMessageTypedDict], ], temperature: OptionalNullable[float] = UNSET, top_p: Optional[float] = None, @@ -476,37 +456,32 @@ def stream( stream: Optional[bool] = True, stop: Optional[ Union[ - models_chatcompletionstreamrequest.ChatCompletionStreamRequestStop, - models_chatcompletionstreamrequest.ChatCompletionStreamRequestStopTypedDict, + models.ChatCompletionStreamRequestStop, + models.ChatCompletionStreamRequestStopTypedDict, ] ] = None, random_seed: OptionalNullable[int] = UNSET, metadata: OptionalNullable[Dict[str, Any]] = UNSET, response_format: Optional[ - Union[ - models_responseformat.ResponseFormat, - models_responseformat.ResponseFormatTypedDict, - ] + Union[models.ResponseFormat, models.ResponseFormatTypedDict] ] = None, tools: OptionalNullable[ - Union[List[models_tool.Tool], List[models_tool.ToolTypedDict]] + Union[List[models.Tool], List[models.ToolTypedDict]] ] = UNSET, tool_choice: Optional[ Union[ - models_chatcompletionstreamrequest.ChatCompletionStreamRequestToolChoice, - models_chatcompletionstreamrequest.ChatCompletionStreamRequestToolChoiceTypedDict, + models.ChatCompletionStreamRequestToolChoice, + models.ChatCompletionStreamRequestToolChoiceTypedDict, ] ] = None, presence_penalty: Optional[float] = None, frequency_penalty: Optional[float] = None, n: OptionalNullable[int] = UNSET, prediction: Optional[ - Union[models_prediction.Prediction, models_prediction.PredictionTypedDict] + Union[models.Prediction, models.PredictionTypedDict] ] = None, parallel_tool_calls: Optional[bool] = None, - prompt_mode: OptionalNullable[ - models_mistralpromptmode.MistralPromptMode - ] = UNSET, + prompt_mode: OptionalNullable[models.MistralPromptMode] = UNSET, safe_prompt: Optional[bool] = None, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, @@ -636,28 +611,26 @@ def stream( if utils.match_response(http_res, "422", "application/json"): http_res_text = utils.stream_to_text(http_res) response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res, http_res_text + errors.HTTPValidationErrorData, http_res, http_res_text ) - raise models.HTTPValidationError(response_data, http_res, http_res_text) + raise errors.HTTPValidationError(response_data, http_res, http_res_text) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("Unexpected response received", http_res, http_res_text) + raise errors.SDKError("Unexpected response received", http_res, http_res_text) async def stream_async( self, *, model: str, messages: Union[ - List[models_chatcompletionstreamrequest.ChatCompletionStreamRequestMessage], - List[ - models_chatcompletionstreamrequest.ChatCompletionStreamRequestMessageTypedDict - ], + List[models.ChatCompletionStreamRequestMessage], + List[models.ChatCompletionStreamRequestMessageTypedDict], ], temperature: OptionalNullable[float] = UNSET, top_p: Optional[float] = None, @@ -665,37 +638,32 @@ async def stream_async( stream: Optional[bool] = True, stop: Optional[ Union[ - models_chatcompletionstreamrequest.ChatCompletionStreamRequestStop, - models_chatcompletionstreamrequest.ChatCompletionStreamRequestStopTypedDict, + models.ChatCompletionStreamRequestStop, + models.ChatCompletionStreamRequestStopTypedDict, ] ] = None, random_seed: OptionalNullable[int] = UNSET, metadata: OptionalNullable[Dict[str, Any]] = UNSET, response_format: Optional[ - Union[ - models_responseformat.ResponseFormat, - models_responseformat.ResponseFormatTypedDict, - ] + Union[models.ResponseFormat, models.ResponseFormatTypedDict] ] = None, tools: OptionalNullable[ - Union[List[models_tool.Tool], List[models_tool.ToolTypedDict]] + Union[List[models.Tool], List[models.ToolTypedDict]] ] = UNSET, tool_choice: Optional[ Union[ - models_chatcompletionstreamrequest.ChatCompletionStreamRequestToolChoice, - models_chatcompletionstreamrequest.ChatCompletionStreamRequestToolChoiceTypedDict, + models.ChatCompletionStreamRequestToolChoice, + models.ChatCompletionStreamRequestToolChoiceTypedDict, ] ] = None, presence_penalty: Optional[float] = None, frequency_penalty: Optional[float] = None, n: OptionalNullable[int] = UNSET, prediction: Optional[ - Union[models_prediction.Prediction, models_prediction.PredictionTypedDict] + Union[models.Prediction, models.PredictionTypedDict] ] = None, parallel_tool_calls: Optional[bool] = None, - prompt_mode: OptionalNullable[ - models_mistralpromptmode.MistralPromptMode - ] = UNSET, + prompt_mode: OptionalNullable[models.MistralPromptMode] = UNSET, safe_prompt: Optional[bool] = None, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, @@ -825,15 +793,15 @@ async def stream_async( if utils.match_response(http_res, "422", "application/json"): http_res_text = await utils.stream_to_text_async(http_res) response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res, http_res_text + errors.HTTPValidationErrorData, http_res, http_res_text ) - raise models.HTTPValidationError(response_data, http_res, http_res_text) + raise errors.HTTPValidationError(response_data, http_res, http_res_text) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("Unexpected response received", http_res, http_res_text) + raise errors.SDKError("Unexpected response received", http_res, http_res_text) diff --git a/src/mistralai/client/classifiers.py b/src/mistralai/client/classifiers.py index 3407c4b7..67199b60 100644 --- a/src/mistralai/client/classifiers.py +++ b/src/mistralai/client/classifiers.py @@ -2,13 +2,8 @@ # @generated-id: 26e773725732 from .basesdk import BaseSDK -from mistralai.client import models, utils +from mistralai.client import errors, models, utils from mistralai.client._hooks import HookContext -from mistralai.client.models import ( - chatmoderationrequest as models_chatmoderationrequest, - classificationrequest as models_classificationrequest, - inputs as models_inputs, -) from mistralai.client.types import OptionalNullable, UNSET from mistralai.client.utils import get_security_from_env from mistralai.client.utils.unmarshal_json_response import unmarshal_json_response @@ -23,8 +18,8 @@ def moderate( *, model: str, inputs: Union[ - models_classificationrequest.ClassificationRequestInputs, - models_classificationrequest.ClassificationRequestInputsTypedDict, + models.ClassificationRequestInputs, + models.ClassificationRequestInputsTypedDict, ], metadata: OptionalNullable[Dict[str, Any]] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, @@ -106,25 +101,25 @@ def moderate( return unmarshal_json_response(models.ModerationResponse, http_res) if utils.match_response(http_res, "422", "application/json"): response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(response_data, http_res) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) async def moderate_async( self, *, model: str, inputs: Union[ - models_classificationrequest.ClassificationRequestInputs, - models_classificationrequest.ClassificationRequestInputsTypedDict, + models.ClassificationRequestInputs, + models.ClassificationRequestInputsTypedDict, ], metadata: OptionalNullable[Dict[str, Any]] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, @@ -206,24 +201,24 @@ async def moderate_async( return unmarshal_json_response(models.ModerationResponse, http_res) if utils.match_response(http_res, "422", "application/json"): response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(response_data, http_res) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) def moderate_chat( self, *, inputs: Union[ - models_chatmoderationrequest.ChatModerationRequestInputs3, - models_chatmoderationrequest.ChatModerationRequestInputs3TypedDict, + models.ChatModerationRequestInputs3, + models.ChatModerationRequestInputs3TypedDict, ], model: str, retries: OptionalNullable[utils.RetryConfig] = UNSET, @@ -305,24 +300,24 @@ def moderate_chat( return unmarshal_json_response(models.ModerationResponse, http_res) if utils.match_response(http_res, "422", "application/json"): response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(response_data, http_res) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) async def moderate_chat_async( self, *, inputs: Union[ - models_chatmoderationrequest.ChatModerationRequestInputs3, - models_chatmoderationrequest.ChatModerationRequestInputs3TypedDict, + models.ChatModerationRequestInputs3, + models.ChatModerationRequestInputs3TypedDict, ], model: str, retries: OptionalNullable[utils.RetryConfig] = UNSET, @@ -404,25 +399,25 @@ async def moderate_chat_async( return unmarshal_json_response(models.ModerationResponse, http_res) if utils.match_response(http_res, "422", "application/json"): response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(response_data, http_res) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) def classify( self, *, model: str, inputs: Union[ - models_classificationrequest.ClassificationRequestInputs, - models_classificationrequest.ClassificationRequestInputsTypedDict, + models.ClassificationRequestInputs, + models.ClassificationRequestInputsTypedDict, ], metadata: OptionalNullable[Dict[str, Any]] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, @@ -504,25 +499,25 @@ def classify( return unmarshal_json_response(models.ClassificationResponse, http_res) if utils.match_response(http_res, "422", "application/json"): response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(response_data, http_res) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) async def classify_async( self, *, model: str, inputs: Union[ - models_classificationrequest.ClassificationRequestInputs, - models_classificationrequest.ClassificationRequestInputsTypedDict, + models.ClassificationRequestInputs, + models.ClassificationRequestInputsTypedDict, ], metadata: OptionalNullable[Dict[str, Any]] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, @@ -604,23 +599,23 @@ async def classify_async( return unmarshal_json_response(models.ClassificationResponse, http_res) if utils.match_response(http_res, "422", "application/json"): response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(response_data, http_res) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) def classify_chat( self, *, model: str, - inputs: Union[models_inputs.Inputs, models_inputs.InputsTypedDict], + input: Union[models.Inputs, models.InputsTypedDict], retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -629,7 +624,7 @@ def classify_chat( r"""Chat Classifications :param model: - :param inputs: Chat to classify + :param input: Chat to classify :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -647,7 +642,7 @@ def classify_chat( request = models.ChatClassificationRequest( model=model, - inputs=utils.get_pydantic_model(inputs, models.Inputs), + input=utils.get_pydantic_model(input, models.Inputs), ) req = self._build_request( @@ -698,23 +693,23 @@ def classify_chat( return unmarshal_json_response(models.ClassificationResponse, http_res) if utils.match_response(http_res, "422", "application/json"): response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(response_data, http_res) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) async def classify_chat_async( self, *, model: str, - inputs: Union[models_inputs.Inputs, models_inputs.InputsTypedDict], + input: Union[models.Inputs, models.InputsTypedDict], retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -723,7 +718,7 @@ async def classify_chat_async( r"""Chat Classifications :param model: - :param inputs: Chat to classify + :param input: Chat to classify :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -741,7 +736,7 @@ async def classify_chat_async( request = models.ChatClassificationRequest( model=model, - inputs=utils.get_pydantic_model(inputs, models.Inputs), + input=utils.get_pydantic_model(input, models.Inputs), ) req = self._build_request_async( @@ -792,14 +787,14 @@ async def classify_chat_async( return unmarshal_json_response(models.ClassificationResponse, http_res) if utils.match_response(http_res, "422", "application/json"): response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(response_data, http_res) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) diff --git a/src/mistralai/client/conversations.py b/src/mistralai/client/conversations.py index 646b91f3..ec33b1fb 100644 --- a/src/mistralai/client/conversations.py +++ b/src/mistralai/client/conversations.py @@ -2,18 +2,8 @@ # @generated-id: 40692a878064 from .basesdk import BaseSDK -from mistralai.client import models, utils +from mistralai.client import errors, models, utils from mistralai.client._hooks import HookContext -from mistralai.client.models import ( - completionargs as models_completionargs, - conversationappendrequest as models_conversationappendrequest, - conversationappendstreamrequest as models_conversationappendstreamrequest, - conversationinputs as models_conversationinputs, - conversationrequest as models_conversationrequest, - conversationrestartrequest as models_conversationrestartrequest, - conversationrestartstreamrequest as models_conversationrestartstreamrequest, - conversationstreamrequest as models_conversationstreamrequest, -) from mistralai.client.types import OptionalNullable, UNSET from mistralai.client.utils import eventstreaming, get_security_from_env from mistralai.client.utils.unmarshal_json_response import unmarshal_json_response @@ -238,27 +228,21 @@ async def run_generator() -> ( def start( self, *, - inputs: Union[ - models_conversationinputs.ConversationInputs, - models_conversationinputs.ConversationInputsTypedDict, - ], + inputs: Union[models.ConversationInputs, models.ConversationInputsTypedDict], stream: Optional[bool] = False, store: OptionalNullable[bool] = UNSET, handoff_execution: OptionalNullable[ - models_conversationrequest.ConversationRequestHandoffExecution + models.ConversationRequestHandoffExecution ] = UNSET, instructions: OptionalNullable[str] = UNSET, tools: Optional[ Union[ - List[models_conversationrequest.ConversationRequestTool], - List[models_conversationrequest.ConversationRequestToolTypedDict], + List[models.ConversationRequestTool], + List[models.ConversationRequestToolTypedDict], ] ] = None, completion_args: OptionalNullable[ - Union[ - models_completionargs.CompletionArgs, - models_completionargs.CompletionArgsTypedDict, - ] + Union[models.CompletionArgs, models.CompletionArgsTypedDict] ] = UNSET, name: OptionalNullable[str] = UNSET, description: OptionalNullable[str] = UNSET, @@ -266,8 +250,8 @@ def start( agent_id: OptionalNullable[str] = UNSET, agent_version: OptionalNullable[ Union[ - models_conversationrequest.ConversationRequestAgentVersion, - models_conversationrequest.ConversationRequestAgentVersionTypedDict, + models.ConversationRequestAgentVersion, + models.ConversationRequestAgentVersionTypedDict, ] ] = UNSET, model: OptionalNullable[str] = UNSET, @@ -360,7 +344,7 @@ def start( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="StartConversation", + operation_id="agents_api_v1_conversations_start", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -376,42 +360,36 @@ def start( return unmarshal_json_response(models.ConversationResponse, http_res) if utils.match_response(http_res, "422", "application/json"): response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(response_data, http_res) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) async def start_async( self, *, - inputs: Union[ - models_conversationinputs.ConversationInputs, - models_conversationinputs.ConversationInputsTypedDict, - ], + inputs: Union[models.ConversationInputs, models.ConversationInputsTypedDict], stream: Optional[bool] = False, store: OptionalNullable[bool] = UNSET, handoff_execution: OptionalNullable[ - models_conversationrequest.ConversationRequestHandoffExecution + models.ConversationRequestHandoffExecution ] = UNSET, instructions: OptionalNullable[str] = UNSET, tools: Optional[ Union[ - List[models_conversationrequest.ConversationRequestTool], - List[models_conversationrequest.ConversationRequestToolTypedDict], + List[models.ConversationRequestTool], + List[models.ConversationRequestToolTypedDict], ] ] = None, completion_args: OptionalNullable[ - Union[ - models_completionargs.CompletionArgs, - models_completionargs.CompletionArgsTypedDict, - ] + Union[models.CompletionArgs, models.CompletionArgsTypedDict] ] = UNSET, name: OptionalNullable[str] = UNSET, description: OptionalNullable[str] = UNSET, @@ -419,8 +397,8 @@ async def start_async( agent_id: OptionalNullable[str] = UNSET, agent_version: OptionalNullable[ Union[ - models_conversationrequest.ConversationRequestAgentVersion, - models_conversationrequest.ConversationRequestAgentVersionTypedDict, + models.ConversationRequestAgentVersion, + models.ConversationRequestAgentVersionTypedDict, ] ] = UNSET, model: OptionalNullable[str] = UNSET, @@ -513,7 +491,7 @@ async def start_async( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="StartConversation", + operation_id="agents_api_v1_conversations_start", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -529,17 +507,17 @@ async def start_async( return unmarshal_json_response(models.ConversationResponse, http_res) if utils.match_response(http_res, "422", "application/json"): response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(response_data, http_res) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) def list( self, @@ -551,7 +529,7 @@ def list( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> List[models.ListConversationsResponse]: + ) -> List[models.AgentsAPIV1ConversationsListResponse]: r"""List all created conversations. Retrieve a list of conversation entities sorted by creation time. @@ -574,7 +552,7 @@ def list( else: base_url = self._get_url(base_url, url_variables) - request = models.ListConversationsRequest( + request = models.AgentsAPIV1ConversationsListRequest( page=page, page_size=page_size, metadata=metadata, @@ -609,7 +587,7 @@ def list( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="ListConversations", + operation_id="agents_api_v1_conversations_list", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -623,21 +601,21 @@ def list( response_data: Any = None if utils.match_response(http_res, "200", "application/json"): return unmarshal_json_response( - List[models.ListConversationsResponse], http_res + List[models.AgentsAPIV1ConversationsListResponse], http_res ) if utils.match_response(http_res, "422", "application/json"): response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(response_data, http_res) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) async def list_async( self, @@ -649,7 +627,7 @@ async def list_async( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> List[models.ListConversationsResponse]: + ) -> List[models.AgentsAPIV1ConversationsListResponse]: r"""List all created conversations. Retrieve a list of conversation entities sorted by creation time. @@ -672,7 +650,7 @@ async def list_async( else: base_url = self._get_url(base_url, url_variables) - request = models.ListConversationsRequest( + request = models.AgentsAPIV1ConversationsListRequest( page=page, page_size=page_size, metadata=metadata, @@ -707,7 +685,7 @@ async def list_async( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="ListConversations", + operation_id="agents_api_v1_conversations_list", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -721,21 +699,21 @@ async def list_async( response_data: Any = None if utils.match_response(http_res, "200", "application/json"): return unmarshal_json_response( - List[models.ListConversationsResponse], http_res + List[models.AgentsAPIV1ConversationsListResponse], http_res ) if utils.match_response(http_res, "422", "application/json"): response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(response_data, http_res) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) def get( self, @@ -766,7 +744,7 @@ def get( else: base_url = self._get_url(base_url, url_variables) - request = models.GetConversationRequest( + request = models.AgentsAPIV1ConversationsGetRequest( conversation_id=conversation_id, ) @@ -799,7 +777,7 @@ def get( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="GetConversation", + operation_id="agents_api_v1_conversations_get", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -815,17 +793,17 @@ def get( return unmarshal_json_response(models.ResponseV1ConversationsGet, http_res) if utils.match_response(http_res, "422", "application/json"): response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(response_data, http_res) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) async def get_async( self, @@ -856,7 +834,7 @@ async def get_async( else: base_url = self._get_url(base_url, url_variables) - request = models.GetConversationRequest( + request = models.AgentsAPIV1ConversationsGetRequest( conversation_id=conversation_id, ) @@ -889,7 +867,7 @@ async def get_async( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="GetConversation", + operation_id="agents_api_v1_conversations_get", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -905,17 +883,17 @@ async def get_async( return unmarshal_json_response(models.ResponseV1ConversationsGet, http_res) if utils.match_response(http_res, "422", "application/json"): response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(response_data, http_res) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) def delete( self, @@ -946,7 +924,7 @@ def delete( else: base_url = self._get_url(base_url, url_variables) - request = models.DeleteConversationRequest( + request = models.AgentsAPIV1ConversationsDeleteRequest( conversation_id=conversation_id, ) @@ -979,7 +957,7 @@ def delete( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="DeleteConversation", + operation_id="agents_api_v1_conversations_delete", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -995,17 +973,17 @@ def delete( return if utils.match_response(http_res, "422", "application/json"): response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(response_data, http_res) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) async def delete_async( self, @@ -1036,7 +1014,7 @@ async def delete_async( else: base_url = self._get_url(base_url, url_variables) - request = models.DeleteConversationRequest( + request = models.AgentsAPIV1ConversationsDeleteRequest( conversation_id=conversation_id, ) @@ -1069,7 +1047,7 @@ async def delete_async( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="DeleteConversation", + operation_id="agents_api_v1_conversations_delete", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -1085,37 +1063,39 @@ async def delete_async( return if utils.match_response(http_res, "422", "application/json"): response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(response_data, http_res) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) def append( self, *, conversation_id: str, - inputs: Union[ - models_conversationinputs.ConversationInputs, - models_conversationinputs.ConversationInputsTypedDict, - ], + inputs: Optional[ + Union[models.ConversationInputs, models.ConversationInputsTypedDict] + ] = None, stream: Optional[bool] = False, store: Optional[bool] = True, handoff_execution: Optional[ - models_conversationappendrequest.ConversationAppendRequestHandoffExecution + models.ConversationAppendRequestHandoffExecution ] = "server", completion_args: Optional[ + Union[models.CompletionArgs, models.CompletionArgsTypedDict] + ] = None, + tool_confirmations: OptionalNullable[ Union[ - models_completionargs.CompletionArgs, - models_completionargs.CompletionArgsTypedDict, + List[models.ToolCallConfirmation], + List[models.ToolCallConfirmationTypedDict], ] - ] = None, + ] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -1131,6 +1111,7 @@ def append( :param store: Whether to store the results into our servers or not. :param handoff_execution: :param completion_args: White-listed arguments from the completion API + :param tool_confirmations: :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -1146,16 +1127,22 @@ def append( else: base_url = self._get_url(base_url, url_variables) - request = models.AppendConversationRequest( + request = models.AgentsAPIV1ConversationsAppendRequest( conversation_id=conversation_id, conversation_append_request=models.ConversationAppendRequest( - inputs=utils.get_pydantic_model(inputs, models.ConversationInputs), + inputs=utils.get_pydantic_model( + inputs, Optional[models.ConversationInputs] + ), stream=stream, store=store, handoff_execution=handoff_execution, completion_args=utils.get_pydantic_model( completion_args, Optional[models.CompletionArgs] ), + tool_confirmations=utils.get_pydantic_model( + tool_confirmations, + OptionalNullable[List[models.ToolCallConfirmation]], + ), ), ) @@ -1195,7 +1182,7 @@ def append( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="AppendConversation", + operation_id="agents_api_v1_conversations_append", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -1211,37 +1198,39 @@ def append( return unmarshal_json_response(models.ConversationResponse, http_res) if utils.match_response(http_res, "422", "application/json"): response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(response_data, http_res) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) async def append_async( self, *, conversation_id: str, - inputs: Union[ - models_conversationinputs.ConversationInputs, - models_conversationinputs.ConversationInputsTypedDict, - ], + inputs: Optional[ + Union[models.ConversationInputs, models.ConversationInputsTypedDict] + ] = None, stream: Optional[bool] = False, store: Optional[bool] = True, handoff_execution: Optional[ - models_conversationappendrequest.ConversationAppendRequestHandoffExecution + models.ConversationAppendRequestHandoffExecution ] = "server", completion_args: Optional[ + Union[models.CompletionArgs, models.CompletionArgsTypedDict] + ] = None, + tool_confirmations: OptionalNullable[ Union[ - models_completionargs.CompletionArgs, - models_completionargs.CompletionArgsTypedDict, + List[models.ToolCallConfirmation], + List[models.ToolCallConfirmationTypedDict], ] - ] = None, + ] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -1257,6 +1246,7 @@ async def append_async( :param store: Whether to store the results into our servers or not. :param handoff_execution: :param completion_args: White-listed arguments from the completion API + :param tool_confirmations: :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -1272,16 +1262,22 @@ async def append_async( else: base_url = self._get_url(base_url, url_variables) - request = models.AppendConversationRequest( + request = models.AgentsAPIV1ConversationsAppendRequest( conversation_id=conversation_id, conversation_append_request=models.ConversationAppendRequest( - inputs=utils.get_pydantic_model(inputs, models.ConversationInputs), + inputs=utils.get_pydantic_model( + inputs, Optional[models.ConversationInputs] + ), stream=stream, store=store, handoff_execution=handoff_execution, completion_args=utils.get_pydantic_model( completion_args, Optional[models.CompletionArgs] ), + tool_confirmations=utils.get_pydantic_model( + tool_confirmations, + OptionalNullable[List[models.ToolCallConfirmation]], + ), ), ) @@ -1321,7 +1317,7 @@ async def append_async( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="AppendConversation", + operation_id="agents_api_v1_conversations_append", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -1337,17 +1333,17 @@ async def append_async( return unmarshal_json_response(models.ConversationResponse, http_res) if utils.match_response(http_res, "422", "application/json"): response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(response_data, http_res) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) def get_history( self, @@ -1378,7 +1374,7 @@ def get_history( else: base_url = self._get_url(base_url, url_variables) - request = models.GetConversationHistoryRequest( + request = models.AgentsAPIV1ConversationsHistoryRequest( conversation_id=conversation_id, ) @@ -1411,7 +1407,7 @@ def get_history( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="GetConversationHistory", + operation_id="agents_api_v1_conversations_history", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -1427,17 +1423,17 @@ def get_history( return unmarshal_json_response(models.ConversationHistory, http_res) if utils.match_response(http_res, "422", "application/json"): response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(response_data, http_res) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) async def get_history_async( self, @@ -1468,7 +1464,7 @@ async def get_history_async( else: base_url = self._get_url(base_url, url_variables) - request = models.GetConversationHistoryRequest( + request = models.AgentsAPIV1ConversationsHistoryRequest( conversation_id=conversation_id, ) @@ -1501,7 +1497,7 @@ async def get_history_async( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="GetConversationHistory", + operation_id="agents_api_v1_conversations_history", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -1517,17 +1513,17 @@ async def get_history_async( return unmarshal_json_response(models.ConversationHistory, http_res) if utils.match_response(http_res, "422", "application/json"): response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(response_data, http_res) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) def get_messages( self, @@ -1558,7 +1554,7 @@ def get_messages( else: base_url = self._get_url(base_url, url_variables) - request = models.GetConversationMessagesRequest( + request = models.AgentsAPIV1ConversationsMessagesRequest( conversation_id=conversation_id, ) @@ -1591,7 +1587,7 @@ def get_messages( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="GetConversationMessages", + operation_id="agents_api_v1_conversations_messages", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -1607,17 +1603,17 @@ def get_messages( return unmarshal_json_response(models.ConversationMessages, http_res) if utils.match_response(http_res, "422", "application/json"): response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(response_data, http_res) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) async def get_messages_async( self, @@ -1648,7 +1644,7 @@ async def get_messages_async( else: base_url = self._get_url(base_url, url_variables) - request = models.GetConversationMessagesRequest( + request = models.AgentsAPIV1ConversationsMessagesRequest( conversation_id=conversation_id, ) @@ -1681,7 +1677,7 @@ async def get_messages_async( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="GetConversationMessages", + operation_id="agents_api_v1_conversations_messages", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -1697,43 +1693,39 @@ async def get_messages_async( return unmarshal_json_response(models.ConversationMessages, http_res) if utils.match_response(http_res, "422", "application/json"): response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(response_data, http_res) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) def restart( self, *, conversation_id: str, - inputs: Union[ - models_conversationinputs.ConversationInputs, - models_conversationinputs.ConversationInputsTypedDict, - ], from_entry_id: str, + inputs: Optional[ + Union[models.ConversationInputs, models.ConversationInputsTypedDict] + ] = None, stream: Optional[bool] = False, store: Optional[bool] = True, handoff_execution: Optional[ - models_conversationrestartrequest.ConversationRestartRequestHandoffExecution + models.ConversationRestartRequestHandoffExecution ] = "server", completion_args: Optional[ - Union[ - models_completionargs.CompletionArgs, - models_completionargs.CompletionArgsTypedDict, - ] + Union[models.CompletionArgs, models.CompletionArgsTypedDict] ] = None, metadata: OptionalNullable[Dict[str, Any]] = UNSET, agent_version: OptionalNullable[ Union[ - models_conversationrestartrequest.ConversationRestartRequestAgentVersion, - models_conversationrestartrequest.ConversationRestartRequestAgentVersionTypedDict, + models.ConversationRestartRequestAgentVersion, + models.ConversationRestartRequestAgentVersionTypedDict, ] ] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, @@ -1746,8 +1738,8 @@ def restart( Given a conversation_id and an id, recreate a conversation from this point and run completion. A new conversation is returned with the new entries returned. :param conversation_id: ID of the original conversation which is being restarted. - :param inputs: :param from_entry_id: + :param inputs: :param stream: :param store: Whether to store the results into our servers or not. :param handoff_execution: @@ -1769,10 +1761,12 @@ def restart( else: base_url = self._get_url(base_url, url_variables) - request = models.RestartConversationRequest( + request = models.AgentsAPIV1ConversationsRestartRequest( conversation_id=conversation_id, conversation_restart_request=models.ConversationRestartRequest( - inputs=utils.get_pydantic_model(inputs, models.ConversationInputs), + inputs=utils.get_pydantic_model( + inputs, Optional[models.ConversationInputs] + ), stream=stream, store=store, handoff_execution=handoff_execution, @@ -1821,7 +1815,7 @@ def restart( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="RestartConversation", + operation_id="agents_api_v1_conversations_restart", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -1837,43 +1831,39 @@ def restart( return unmarshal_json_response(models.ConversationResponse, http_res) if utils.match_response(http_res, "422", "application/json"): response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(response_data, http_res) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) async def restart_async( self, *, conversation_id: str, - inputs: Union[ - models_conversationinputs.ConversationInputs, - models_conversationinputs.ConversationInputsTypedDict, - ], from_entry_id: str, + inputs: Optional[ + Union[models.ConversationInputs, models.ConversationInputsTypedDict] + ] = None, stream: Optional[bool] = False, store: Optional[bool] = True, handoff_execution: Optional[ - models_conversationrestartrequest.ConversationRestartRequestHandoffExecution + models.ConversationRestartRequestHandoffExecution ] = "server", completion_args: Optional[ - Union[ - models_completionargs.CompletionArgs, - models_completionargs.CompletionArgsTypedDict, - ] + Union[models.CompletionArgs, models.CompletionArgsTypedDict] ] = None, metadata: OptionalNullable[Dict[str, Any]] = UNSET, agent_version: OptionalNullable[ Union[ - models_conversationrestartrequest.ConversationRestartRequestAgentVersion, - models_conversationrestartrequest.ConversationRestartRequestAgentVersionTypedDict, + models.ConversationRestartRequestAgentVersion, + models.ConversationRestartRequestAgentVersionTypedDict, ] ] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, @@ -1886,8 +1876,8 @@ async def restart_async( Given a conversation_id and an id, recreate a conversation from this point and run completion. A new conversation is returned with the new entries returned. :param conversation_id: ID of the original conversation which is being restarted. - :param inputs: :param from_entry_id: + :param inputs: :param stream: :param store: Whether to store the results into our servers or not. :param handoff_execution: @@ -1909,10 +1899,12 @@ async def restart_async( else: base_url = self._get_url(base_url, url_variables) - request = models.RestartConversationRequest( + request = models.AgentsAPIV1ConversationsRestartRequest( conversation_id=conversation_id, conversation_restart_request=models.ConversationRestartRequest( - inputs=utils.get_pydantic_model(inputs, models.ConversationInputs), + inputs=utils.get_pydantic_model( + inputs, Optional[models.ConversationInputs] + ), stream=stream, store=store, handoff_execution=handoff_execution, @@ -1961,7 +1953,7 @@ async def restart_async( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="RestartConversation", + operation_id="agents_api_v1_conversations_restart", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -1977,44 +1969,36 @@ async def restart_async( return unmarshal_json_response(models.ConversationResponse, http_res) if utils.match_response(http_res, "422", "application/json"): response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(response_data, http_res) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) def start_stream( self, *, - inputs: Union[ - models_conversationinputs.ConversationInputs, - models_conversationinputs.ConversationInputsTypedDict, - ], + inputs: Union[models.ConversationInputs, models.ConversationInputsTypedDict], stream: Optional[bool] = True, store: OptionalNullable[bool] = UNSET, handoff_execution: OptionalNullable[ - models_conversationstreamrequest.ConversationStreamRequestHandoffExecution + models.ConversationStreamRequestHandoffExecution ] = UNSET, instructions: OptionalNullable[str] = UNSET, tools: Optional[ Union[ - List[models_conversationstreamrequest.ConversationStreamRequestTool], - List[ - models_conversationstreamrequest.ConversationStreamRequestToolTypedDict - ], + List[models.ConversationStreamRequestTool], + List[models.ConversationStreamRequestToolTypedDict], ] ] = None, completion_args: OptionalNullable[ - Union[ - models_completionargs.CompletionArgs, - models_completionargs.CompletionArgsTypedDict, - ] + Union[models.CompletionArgs, models.CompletionArgsTypedDict] ] = UNSET, name: OptionalNullable[str] = UNSET, description: OptionalNullable[str] = UNSET, @@ -2022,8 +2006,8 @@ def start_stream( agent_id: OptionalNullable[str] = UNSET, agent_version: OptionalNullable[ Union[ - models_conversationstreamrequest.ConversationStreamRequestAgentVersion, - models_conversationstreamrequest.ConversationStreamRequestAgentVersionTypedDict, + models.ConversationStreamRequestAgentVersion, + models.ConversationStreamRequestAgentVersionTypedDict, ] ] = UNSET, model: OptionalNullable[str] = UNSET, @@ -2116,7 +2100,7 @@ def start_stream( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="StartConversationStream", + operation_id="agents_api_v1_conversations_start_stream", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -2138,45 +2122,37 @@ def start_stream( if utils.match_response(http_res, "422", "application/json"): http_res_text = utils.stream_to_text(http_res) response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res, http_res_text + errors.HTTPValidationErrorData, http_res, http_res_text ) - raise models.HTTPValidationError(response_data, http_res, http_res_text) + raise errors.HTTPValidationError(response_data, http_res, http_res_text) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("Unexpected response received", http_res, http_res_text) + raise errors.SDKError("Unexpected response received", http_res, http_res_text) async def start_stream_async( self, *, - inputs: Union[ - models_conversationinputs.ConversationInputs, - models_conversationinputs.ConversationInputsTypedDict, - ], + inputs: Union[models.ConversationInputs, models.ConversationInputsTypedDict], stream: Optional[bool] = True, store: OptionalNullable[bool] = UNSET, handoff_execution: OptionalNullable[ - models_conversationstreamrequest.ConversationStreamRequestHandoffExecution + models.ConversationStreamRequestHandoffExecution ] = UNSET, instructions: OptionalNullable[str] = UNSET, tools: Optional[ Union[ - List[models_conversationstreamrequest.ConversationStreamRequestTool], - List[ - models_conversationstreamrequest.ConversationStreamRequestToolTypedDict - ], + List[models.ConversationStreamRequestTool], + List[models.ConversationStreamRequestToolTypedDict], ] ] = None, completion_args: OptionalNullable[ - Union[ - models_completionargs.CompletionArgs, - models_completionargs.CompletionArgsTypedDict, - ] + Union[models.CompletionArgs, models.CompletionArgsTypedDict] ] = UNSET, name: OptionalNullable[str] = UNSET, description: OptionalNullable[str] = UNSET, @@ -2184,8 +2160,8 @@ async def start_stream_async( agent_id: OptionalNullable[str] = UNSET, agent_version: OptionalNullable[ Union[ - models_conversationstreamrequest.ConversationStreamRequestAgentVersion, - models_conversationstreamrequest.ConversationStreamRequestAgentVersionTypedDict, + models.ConversationStreamRequestAgentVersion, + models.ConversationStreamRequestAgentVersionTypedDict, ] ] = UNSET, model: OptionalNullable[str] = UNSET, @@ -2278,7 +2254,7 @@ async def start_stream_async( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="StartConversationStream", + operation_id="agents_api_v1_conversations_start_stream", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -2300,38 +2276,40 @@ async def start_stream_async( if utils.match_response(http_res, "422", "application/json"): http_res_text = await utils.stream_to_text_async(http_res) response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res, http_res_text + errors.HTTPValidationErrorData, http_res, http_res_text ) - raise models.HTTPValidationError(response_data, http_res, http_res_text) + raise errors.HTTPValidationError(response_data, http_res, http_res_text) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("Unexpected response received", http_res, http_res_text) + raise errors.SDKError("Unexpected response received", http_res, http_res_text) def append_stream( self, *, conversation_id: str, - inputs: Union[ - models_conversationinputs.ConversationInputs, - models_conversationinputs.ConversationInputsTypedDict, - ], + inputs: Optional[ + Union[models.ConversationInputs, models.ConversationInputsTypedDict] + ] = None, stream: Optional[bool] = True, store: Optional[bool] = True, handoff_execution: Optional[ - models_conversationappendstreamrequest.ConversationAppendStreamRequestHandoffExecution + models.ConversationAppendStreamRequestHandoffExecution ] = "server", completion_args: Optional[ + Union[models.CompletionArgs, models.CompletionArgsTypedDict] + ] = None, + tool_confirmations: OptionalNullable[ Union[ - models_completionargs.CompletionArgs, - models_completionargs.CompletionArgsTypedDict, + List[models.ToolCallConfirmation], + List[models.ToolCallConfirmationTypedDict], ] - ] = None, + ] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -2347,6 +2325,7 @@ def append_stream( :param store: Whether to store the results into our servers or not. :param handoff_execution: :param completion_args: White-listed arguments from the completion API + :param tool_confirmations: :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -2362,16 +2341,22 @@ def append_stream( else: base_url = self._get_url(base_url, url_variables) - request = models.AppendConversationStreamRequest( + request = models.AgentsAPIV1ConversationsAppendStreamRequest( conversation_id=conversation_id, conversation_append_stream_request=models.ConversationAppendStreamRequest( - inputs=utils.get_pydantic_model(inputs, models.ConversationInputs), + inputs=utils.get_pydantic_model( + inputs, Optional[models.ConversationInputs] + ), stream=stream, store=store, handoff_execution=handoff_execution, completion_args=utils.get_pydantic_model( completion_args, Optional[models.CompletionArgs] ), + tool_confirmations=utils.get_pydantic_model( + tool_confirmations, + OptionalNullable[List[models.ToolCallConfirmation]], + ), ), ) @@ -2411,7 +2396,7 @@ def append_stream( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="AppendConversationStream", + operation_id="agents_api_v1_conversations_append_stream", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -2433,38 +2418,40 @@ def append_stream( if utils.match_response(http_res, "422", "application/json"): http_res_text = utils.stream_to_text(http_res) response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res, http_res_text + errors.HTTPValidationErrorData, http_res, http_res_text ) - raise models.HTTPValidationError(response_data, http_res, http_res_text) + raise errors.HTTPValidationError(response_data, http_res, http_res_text) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("Unexpected response received", http_res, http_res_text) + raise errors.SDKError("Unexpected response received", http_res, http_res_text) async def append_stream_async( self, *, conversation_id: str, - inputs: Union[ - models_conversationinputs.ConversationInputs, - models_conversationinputs.ConversationInputsTypedDict, - ], + inputs: Optional[ + Union[models.ConversationInputs, models.ConversationInputsTypedDict] + ] = None, stream: Optional[bool] = True, store: Optional[bool] = True, handoff_execution: Optional[ - models_conversationappendstreamrequest.ConversationAppendStreamRequestHandoffExecution + models.ConversationAppendStreamRequestHandoffExecution ] = "server", completion_args: Optional[ + Union[models.CompletionArgs, models.CompletionArgsTypedDict] + ] = None, + tool_confirmations: OptionalNullable[ Union[ - models_completionargs.CompletionArgs, - models_completionargs.CompletionArgsTypedDict, + List[models.ToolCallConfirmation], + List[models.ToolCallConfirmationTypedDict], ] - ] = None, + ] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -2480,6 +2467,7 @@ async def append_stream_async( :param store: Whether to store the results into our servers or not. :param handoff_execution: :param completion_args: White-listed arguments from the completion API + :param tool_confirmations: :param retries: Override the default retry configuration for this method :param server_url: Override the default server URL for this method :param timeout_ms: Override the default request timeout configuration for this method in milliseconds @@ -2495,16 +2483,22 @@ async def append_stream_async( else: base_url = self._get_url(base_url, url_variables) - request = models.AppendConversationStreamRequest( + request = models.AgentsAPIV1ConversationsAppendStreamRequest( conversation_id=conversation_id, conversation_append_stream_request=models.ConversationAppendStreamRequest( - inputs=utils.get_pydantic_model(inputs, models.ConversationInputs), + inputs=utils.get_pydantic_model( + inputs, Optional[models.ConversationInputs] + ), stream=stream, store=store, handoff_execution=handoff_execution, completion_args=utils.get_pydantic_model( completion_args, Optional[models.CompletionArgs] ), + tool_confirmations=utils.get_pydantic_model( + tool_confirmations, + OptionalNullable[List[models.ToolCallConfirmation]], + ), ), ) @@ -2544,7 +2538,7 @@ async def append_stream_async( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="AppendConversationStream", + operation_id="agents_api_v1_conversations_append_stream", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -2566,44 +2560,40 @@ async def append_stream_async( if utils.match_response(http_res, "422", "application/json"): http_res_text = await utils.stream_to_text_async(http_res) response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res, http_res_text + errors.HTTPValidationErrorData, http_res, http_res_text ) - raise models.HTTPValidationError(response_data, http_res, http_res_text) + raise errors.HTTPValidationError(response_data, http_res, http_res_text) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("Unexpected response received", http_res, http_res_text) + raise errors.SDKError("Unexpected response received", http_res, http_res_text) def restart_stream( self, *, conversation_id: str, - inputs: Union[ - models_conversationinputs.ConversationInputs, - models_conversationinputs.ConversationInputsTypedDict, - ], from_entry_id: str, + inputs: Optional[ + Union[models.ConversationInputs, models.ConversationInputsTypedDict] + ] = None, stream: Optional[bool] = True, store: Optional[bool] = True, handoff_execution: Optional[ - models_conversationrestartstreamrequest.ConversationRestartStreamRequestHandoffExecution + models.ConversationRestartStreamRequestHandoffExecution ] = "server", completion_args: Optional[ - Union[ - models_completionargs.CompletionArgs, - models_completionargs.CompletionArgsTypedDict, - ] + Union[models.CompletionArgs, models.CompletionArgsTypedDict] ] = None, metadata: OptionalNullable[Dict[str, Any]] = UNSET, agent_version: OptionalNullable[ Union[ - models_conversationrestartstreamrequest.ConversationRestartStreamRequestAgentVersion, - models_conversationrestartstreamrequest.ConversationRestartStreamRequestAgentVersionTypedDict, + models.ConversationRestartStreamRequestAgentVersion, + models.ConversationRestartStreamRequestAgentVersionTypedDict, ] ] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, @@ -2616,8 +2606,8 @@ def restart_stream( Given a conversation_id and an id, recreate a conversation from this point and run completion. A new conversation is returned with the new entries returned. :param conversation_id: ID of the original conversation which is being restarted. - :param inputs: :param from_entry_id: + :param inputs: :param stream: :param store: Whether to store the results into our servers or not. :param handoff_execution: @@ -2639,10 +2629,12 @@ def restart_stream( else: base_url = self._get_url(base_url, url_variables) - request = models.RestartConversationStreamRequest( + request = models.AgentsAPIV1ConversationsRestartStreamRequest( conversation_id=conversation_id, conversation_restart_stream_request=models.ConversationRestartStreamRequest( - inputs=utils.get_pydantic_model(inputs, models.ConversationInputs), + inputs=utils.get_pydantic_model( + inputs, Optional[models.ConversationInputs] + ), stream=stream, store=store, handoff_execution=handoff_execution, @@ -2691,7 +2683,7 @@ def restart_stream( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="RestartConversationStream", + operation_id="agents_api_v1_conversations_restart_stream", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -2713,44 +2705,40 @@ def restart_stream( if utils.match_response(http_res, "422", "application/json"): http_res_text = utils.stream_to_text(http_res) response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res, http_res_text + errors.HTTPValidationErrorData, http_res, http_res_text ) - raise models.HTTPValidationError(response_data, http_res, http_res_text) + raise errors.HTTPValidationError(response_data, http_res, http_res_text) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("Unexpected response received", http_res, http_res_text) + raise errors.SDKError("Unexpected response received", http_res, http_res_text) async def restart_stream_async( self, *, conversation_id: str, - inputs: Union[ - models_conversationinputs.ConversationInputs, - models_conversationinputs.ConversationInputsTypedDict, - ], from_entry_id: str, + inputs: Optional[ + Union[models.ConversationInputs, models.ConversationInputsTypedDict] + ] = None, stream: Optional[bool] = True, store: Optional[bool] = True, handoff_execution: Optional[ - models_conversationrestartstreamrequest.ConversationRestartStreamRequestHandoffExecution + models.ConversationRestartStreamRequestHandoffExecution ] = "server", completion_args: Optional[ - Union[ - models_completionargs.CompletionArgs, - models_completionargs.CompletionArgsTypedDict, - ] + Union[models.CompletionArgs, models.CompletionArgsTypedDict] ] = None, metadata: OptionalNullable[Dict[str, Any]] = UNSET, agent_version: OptionalNullable[ Union[ - models_conversationrestartstreamrequest.ConversationRestartStreamRequestAgentVersion, - models_conversationrestartstreamrequest.ConversationRestartStreamRequestAgentVersionTypedDict, + models.ConversationRestartStreamRequestAgentVersion, + models.ConversationRestartStreamRequestAgentVersionTypedDict, ] ] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, @@ -2763,8 +2751,8 @@ async def restart_stream_async( Given a conversation_id and an id, recreate a conversation from this point and run completion. A new conversation is returned with the new entries returned. :param conversation_id: ID of the original conversation which is being restarted. - :param inputs: :param from_entry_id: + :param inputs: :param stream: :param store: Whether to store the results into our servers or not. :param handoff_execution: @@ -2786,10 +2774,12 @@ async def restart_stream_async( else: base_url = self._get_url(base_url, url_variables) - request = models.RestartConversationStreamRequest( + request = models.AgentsAPIV1ConversationsRestartStreamRequest( conversation_id=conversation_id, conversation_restart_stream_request=models.ConversationRestartStreamRequest( - inputs=utils.get_pydantic_model(inputs, models.ConversationInputs), + inputs=utils.get_pydantic_model( + inputs, Optional[models.ConversationInputs] + ), stream=stream, store=store, handoff_execution=handoff_execution, @@ -2838,7 +2828,7 @@ async def restart_stream_async( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="RestartConversationStream", + operation_id="agents_api_v1_conversations_restart_stream", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -2860,15 +2850,15 @@ async def restart_stream_async( if utils.match_response(http_res, "422", "application/json"): http_res_text = await utils.stream_to_text_async(http_res) response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res, http_res_text + errors.HTTPValidationErrorData, http_res, http_res_text ) - raise models.HTTPValidationError(response_data, http_res, http_res_text) + raise errors.HTTPValidationError(response_data, http_res, http_res_text) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("Unexpected response received", http_res, http_res_text) + raise errors.SDKError("Unexpected response received", http_res, http_res_text) diff --git a/src/mistralai/client/documents.py b/src/mistralai/client/documents.py index c78f2944..b3130364 100644 --- a/src/mistralai/client/documents.py +++ b/src/mistralai/client/documents.py @@ -2,12 +2,8 @@ # @generated-id: bcc17286c31c from .basesdk import BaseSDK -from mistralai.client import models, utils +from mistralai.client import errors, models, utils from mistralai.client._hooks import HookContext -from mistralai.client.models import ( - documentupdatein as models_documentupdatein, - file as models_file, -) from mistralai.client.types import OptionalNullable, UNSET from mistralai.client.utils import get_security_from_env from mistralai.client.utils.unmarshal_json_response import unmarshal_json_response @@ -31,7 +27,7 @@ def list( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> models.ListDocumentOut: + ) -> models.ListDocumentsResponse: r"""List documents in a given library. Given a library, lists the document that have been uploaded to that library. @@ -58,7 +54,7 @@ def list( else: base_url = self._get_url(base_url, url_variables) - request = models.ListDocumentsRequest( + request = models.LibrariesDocumentsListV1Request( library_id=library_id, search=search, page_size=page_size, @@ -97,7 +93,7 @@ def list( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="ListDocuments", + operation_id="libraries_documents_list_v1", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -110,20 +106,20 @@ def list( response_data: Any = None if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.ListDocumentOut, http_res) + return unmarshal_json_response(models.ListDocumentsResponse, http_res) if utils.match_response(http_res, "422", "application/json"): response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(response_data, http_res) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) async def list_async( self, @@ -139,7 +135,7 @@ async def list_async( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> models.ListDocumentOut: + ) -> models.ListDocumentsResponse: r"""List documents in a given library. Given a library, lists the document that have been uploaded to that library. @@ -166,7 +162,7 @@ async def list_async( else: base_url = self._get_url(base_url, url_variables) - request = models.ListDocumentsRequest( + request = models.LibrariesDocumentsListV1Request( library_id=library_id, search=search, page_size=page_size, @@ -205,7 +201,7 @@ async def list_async( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="ListDocuments", + operation_id="libraries_documents_list_v1", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -218,31 +214,31 @@ async def list_async( response_data: Any = None if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.ListDocumentOut, http_res) + return unmarshal_json_response(models.ListDocumentsResponse, http_res) if utils.match_response(http_res, "422", "application/json"): response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(response_data, http_res) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) def upload( self, *, library_id: str, - file: Union[models_file.File, models_file.FileTypedDict], + file: Union[models.File, models.FileTypedDict], retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> models.DocumentOut: + ) -> models.Document: r"""Upload a new document. Given a library, upload a new document to that library. It is queued for processing, it status will change it has been processed. The processing has to be completed in order be discoverable for the library search @@ -272,7 +268,7 @@ def upload( else: base_url = self._get_url(base_url, url_variables) - request = models.UploadDocumentRequest( + request = models.LibrariesDocumentsUploadV1Request( library_id=library_id, request_body=models.DocumentUpload( file=utils.get_pydantic_model(file, models.File), @@ -311,7 +307,7 @@ def upload( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="UploadDocument", + operation_id="libraries_documents_upload_v1", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -324,31 +320,31 @@ def upload( response_data: Any = None if utils.match_response(http_res, ["200", "201"], "application/json"): - return unmarshal_json_response(models.DocumentOut, http_res) + return unmarshal_json_response(models.Document, http_res) if utils.match_response(http_res, "422", "application/json"): response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(response_data, http_res) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) async def upload_async( self, *, library_id: str, - file: Union[models_file.File, models_file.FileTypedDict], + file: Union[models.File, models.FileTypedDict], retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> models.DocumentOut: + ) -> models.Document: r"""Upload a new document. Given a library, upload a new document to that library. It is queued for processing, it status will change it has been processed. The processing has to be completed in order be discoverable for the library search @@ -378,7 +374,7 @@ async def upload_async( else: base_url = self._get_url(base_url, url_variables) - request = models.UploadDocumentRequest( + request = models.LibrariesDocumentsUploadV1Request( library_id=library_id, request_body=models.DocumentUpload( file=utils.get_pydantic_model(file, models.File), @@ -417,7 +413,7 @@ async def upload_async( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="UploadDocument", + operation_id="libraries_documents_upload_v1", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -430,20 +426,20 @@ async def upload_async( response_data: Any = None if utils.match_response(http_res, ["200", "201"], "application/json"): - return unmarshal_json_response(models.DocumentOut, http_res) + return unmarshal_json_response(models.Document, http_res) if utils.match_response(http_res, "422", "application/json"): response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(response_data, http_res) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) def get( self, @@ -454,7 +450,7 @@ def get( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> models.DocumentOut: + ) -> models.Document: r"""Retrieve the metadata of a specific document. Given a library and a document in this library, you can retrieve the metadata of that document. @@ -476,7 +472,7 @@ def get( else: base_url = self._get_url(base_url, url_variables) - request = models.GetDocumentRequest( + request = models.LibrariesDocumentsGetV1Request( library_id=library_id, document_id=document_id, ) @@ -510,7 +506,7 @@ def get( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="GetDocument", + operation_id="libraries_documents_get_v1", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -523,20 +519,20 @@ def get( response_data: Any = None if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.DocumentOut, http_res) + return unmarshal_json_response(models.Document, http_res) if utils.match_response(http_res, "422", "application/json"): response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(response_data, http_res) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) async def get_async( self, @@ -547,7 +543,7 @@ async def get_async( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> models.DocumentOut: + ) -> models.Document: r"""Retrieve the metadata of a specific document. Given a library and a document in this library, you can retrieve the metadata of that document. @@ -569,7 +565,7 @@ async def get_async( else: base_url = self._get_url(base_url, url_variables) - request = models.GetDocumentRequest( + request = models.LibrariesDocumentsGetV1Request( library_id=library_id, document_id=document_id, ) @@ -603,7 +599,7 @@ async def get_async( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="GetDocument", + operation_id="libraries_documents_get_v1", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -616,20 +612,20 @@ async def get_async( response_data: Any = None if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.DocumentOut, http_res) + return unmarshal_json_response(models.Document, http_res) if utils.match_response(http_res, "422", "application/json"): response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(response_data, http_res) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) def update( self, @@ -638,16 +634,13 @@ def update( document_id: str, name: OptionalNullable[str] = UNSET, attributes: OptionalNullable[ - Union[ - Dict[str, models_documentupdatein.Attributes], - Dict[str, models_documentupdatein.AttributesTypedDict], - ] + Union[Dict[str, models.Attributes], Dict[str, models.AttributesTypedDict]] ] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> models.DocumentOut: + ) -> models.Document: r"""Update the metadata of a specific document. Given a library and a document in that library, update the name of that document. @@ -671,10 +664,10 @@ def update( else: base_url = self._get_url(base_url, url_variables) - request = models.UpdateDocumentRequest( + request = models.LibrariesDocumentsUpdateV1Request( library_id=library_id, document_id=document_id, - document_update_in=models.DocumentUpdateIn( + update_document_request=models.UpdateDocumentRequest( name=name, attributes=attributes, ), @@ -694,11 +687,11 @@ def update( http_headers=http_headers, security=self.sdk_configuration.security, get_serialized_body=lambda: utils.serialize_request_body( - request.document_update_in, + request.update_document_request, False, False, "json", - models.DocumentUpdateIn, + models.UpdateDocumentRequest, ), allow_empty_value=None, timeout_ms=timeout_ms, @@ -716,7 +709,7 @@ def update( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="UpdateDocument", + operation_id="libraries_documents_update_v1", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -729,20 +722,20 @@ def update( response_data: Any = None if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.DocumentOut, http_res) + return unmarshal_json_response(models.Document, http_res) if utils.match_response(http_res, "422", "application/json"): response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(response_data, http_res) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) async def update_async( self, @@ -751,16 +744,13 @@ async def update_async( document_id: str, name: OptionalNullable[str] = UNSET, attributes: OptionalNullable[ - Union[ - Dict[str, models_documentupdatein.Attributes], - Dict[str, models_documentupdatein.AttributesTypedDict], - ] + Union[Dict[str, models.Attributes], Dict[str, models.AttributesTypedDict]] ] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> models.DocumentOut: + ) -> models.Document: r"""Update the metadata of a specific document. Given a library and a document in that library, update the name of that document. @@ -784,10 +774,10 @@ async def update_async( else: base_url = self._get_url(base_url, url_variables) - request = models.UpdateDocumentRequest( + request = models.LibrariesDocumentsUpdateV1Request( library_id=library_id, document_id=document_id, - document_update_in=models.DocumentUpdateIn( + update_document_request=models.UpdateDocumentRequest( name=name, attributes=attributes, ), @@ -807,11 +797,11 @@ async def update_async( http_headers=http_headers, security=self.sdk_configuration.security, get_serialized_body=lambda: utils.serialize_request_body( - request.document_update_in, + request.update_document_request, False, False, "json", - models.DocumentUpdateIn, + models.UpdateDocumentRequest, ), allow_empty_value=None, timeout_ms=timeout_ms, @@ -829,7 +819,7 @@ async def update_async( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="UpdateDocument", + operation_id="libraries_documents_update_v1", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -842,20 +832,20 @@ async def update_async( response_data: Any = None if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.DocumentOut, http_res) + return unmarshal_json_response(models.Document, http_res) if utils.match_response(http_res, "422", "application/json"): response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(response_data, http_res) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) def delete( self, @@ -888,7 +878,7 @@ def delete( else: base_url = self._get_url(base_url, url_variables) - request = models.DeleteDocumentRequest( + request = models.LibrariesDocumentsDeleteV1Request( library_id=library_id, document_id=document_id, ) @@ -922,7 +912,7 @@ def delete( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="DeleteDocument", + operation_id="libraries_documents_delete_v1", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -938,17 +928,17 @@ def delete( return if utils.match_response(http_res, "422", "application/json"): response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(response_data, http_res) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) async def delete_async( self, @@ -981,7 +971,7 @@ async def delete_async( else: base_url = self._get_url(base_url, url_variables) - request = models.DeleteDocumentRequest( + request = models.LibrariesDocumentsDeleteV1Request( library_id=library_id, document_id=document_id, ) @@ -1015,7 +1005,7 @@ async def delete_async( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="DeleteDocument", + operation_id="libraries_documents_delete_v1", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -1031,17 +1021,17 @@ async def delete_async( return if utils.match_response(http_res, "422", "application/json"): response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(response_data, http_res) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) def text_content( self, @@ -1074,7 +1064,7 @@ def text_content( else: base_url = self._get_url(base_url, url_variables) - request = models.GetDocumentTextContentRequest( + request = models.LibrariesDocumentsGetTextContentV1Request( library_id=library_id, document_id=document_id, ) @@ -1108,7 +1098,7 @@ def text_content( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="GetDocumentTextContent", + operation_id="libraries_documents_get_text_content_v1", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -1124,17 +1114,17 @@ def text_content( return unmarshal_json_response(models.DocumentTextContent, http_res) if utils.match_response(http_res, "422", "application/json"): response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(response_data, http_res) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) async def text_content_async( self, @@ -1167,7 +1157,7 @@ async def text_content_async( else: base_url = self._get_url(base_url, url_variables) - request = models.GetDocumentTextContentRequest( + request = models.LibrariesDocumentsGetTextContentV1Request( library_id=library_id, document_id=document_id, ) @@ -1201,7 +1191,7 @@ async def text_content_async( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="GetDocumentTextContent", + operation_id="libraries_documents_get_text_content_v1", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -1217,17 +1207,17 @@ async def text_content_async( return unmarshal_json_response(models.DocumentTextContent, http_res) if utils.match_response(http_res, "422", "application/json"): response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(response_data, http_res) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) def status( self, @@ -1260,7 +1250,7 @@ def status( else: base_url = self._get_url(base_url, url_variables) - request = models.GetDocumentStatusRequest( + request = models.LibrariesDocumentsGetStatusV1Request( library_id=library_id, document_id=document_id, ) @@ -1294,7 +1284,7 @@ def status( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="GetDocumentStatus", + operation_id="libraries_documents_get_status_v1", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -1310,17 +1300,17 @@ def status( return unmarshal_json_response(models.ProcessingStatusOut, http_res) if utils.match_response(http_res, "422", "application/json"): response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(response_data, http_res) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) async def status_async( self, @@ -1353,7 +1343,7 @@ async def status_async( else: base_url = self._get_url(base_url, url_variables) - request = models.GetDocumentStatusRequest( + request = models.LibrariesDocumentsGetStatusV1Request( library_id=library_id, document_id=document_id, ) @@ -1387,7 +1377,7 @@ async def status_async( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="GetDocumentStatus", + operation_id="libraries_documents_get_status_v1", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -1403,17 +1393,17 @@ async def status_async( return unmarshal_json_response(models.ProcessingStatusOut, http_res) if utils.match_response(http_res, "422", "application/json"): response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(response_data, http_res) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) def get_signed_url( self, @@ -1446,7 +1436,7 @@ def get_signed_url( else: base_url = self._get_url(base_url, url_variables) - request = models.GetDocumentSignedURLRequest( + request = models.LibrariesDocumentsGetSignedURLV1Request( library_id=library_id, document_id=document_id, ) @@ -1480,7 +1470,7 @@ def get_signed_url( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="GetDocumentSignedUrl", + operation_id="libraries_documents_get_signed_url_v1", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -1496,17 +1486,17 @@ def get_signed_url( return unmarshal_json_response(str, http_res) if utils.match_response(http_res, "422", "application/json"): response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(response_data, http_res) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) async def get_signed_url_async( self, @@ -1539,7 +1529,7 @@ async def get_signed_url_async( else: base_url = self._get_url(base_url, url_variables) - request = models.GetDocumentSignedURLRequest( + request = models.LibrariesDocumentsGetSignedURLV1Request( library_id=library_id, document_id=document_id, ) @@ -1573,7 +1563,7 @@ async def get_signed_url_async( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="GetDocumentSignedUrl", + operation_id="libraries_documents_get_signed_url_v1", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -1589,17 +1579,17 @@ async def get_signed_url_async( return unmarshal_json_response(str, http_res) if utils.match_response(http_res, "422", "application/json"): response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(response_data, http_res) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) def extracted_text_signed_url( self, @@ -1632,7 +1622,7 @@ def extracted_text_signed_url( else: base_url = self._get_url(base_url, url_variables) - request = models.GetDocumentExtractedTextSignedURLRequest( + request = models.LibrariesDocumentsGetExtractedTextSignedURLV1Request( library_id=library_id, document_id=document_id, ) @@ -1666,7 +1656,7 @@ def extracted_text_signed_url( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="GetDocumentExtractedTextSignedUrl", + operation_id="libraries_documents_get_extracted_text_signed_url_v1", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -1682,17 +1672,17 @@ def extracted_text_signed_url( return unmarshal_json_response(str, http_res) if utils.match_response(http_res, "422", "application/json"): response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(response_data, http_res) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) async def extracted_text_signed_url_async( self, @@ -1725,7 +1715,7 @@ async def extracted_text_signed_url_async( else: base_url = self._get_url(base_url, url_variables) - request = models.GetDocumentExtractedTextSignedURLRequest( + request = models.LibrariesDocumentsGetExtractedTextSignedURLV1Request( library_id=library_id, document_id=document_id, ) @@ -1759,7 +1749,7 @@ async def extracted_text_signed_url_async( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="GetDocumentExtractedTextSignedUrl", + operation_id="libraries_documents_get_extracted_text_signed_url_v1", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -1775,17 +1765,17 @@ async def extracted_text_signed_url_async( return unmarshal_json_response(str, http_res) if utils.match_response(http_res, "422", "application/json"): response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(response_data, http_res) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) def reprocess( self, @@ -1818,7 +1808,7 @@ def reprocess( else: base_url = self._get_url(base_url, url_variables) - request = models.ReprocessDocumentRequest( + request = models.LibrariesDocumentsReprocessV1Request( library_id=library_id, document_id=document_id, ) @@ -1852,7 +1842,7 @@ def reprocess( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="ReprocessDocument", + operation_id="libraries_documents_reprocess_v1", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -1868,17 +1858,17 @@ def reprocess( return if utils.match_response(http_res, "422", "application/json"): response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(response_data, http_res) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) async def reprocess_async( self, @@ -1911,7 +1901,7 @@ async def reprocess_async( else: base_url = self._get_url(base_url, url_variables) - request = models.ReprocessDocumentRequest( + request = models.LibrariesDocumentsReprocessV1Request( library_id=library_id, document_id=document_id, ) @@ -1945,7 +1935,7 @@ async def reprocess_async( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="ReprocessDocument", + operation_id="libraries_documents_reprocess_v1", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -1961,14 +1951,14 @@ async def reprocess_async( return if utils.match_response(http_res, "422", "application/json"): response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(response_data, http_res) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) diff --git a/src/mistralai/client/embeddings.py b/src/mistralai/client/embeddings.py index 4a056baa..5f9d3b9c 100644 --- a/src/mistralai/client/embeddings.py +++ b/src/mistralai/client/embeddings.py @@ -2,13 +2,8 @@ # @generated-id: f9c17258207e from .basesdk import BaseSDK -from mistralai.client import models, utils +from mistralai.client import errors, models, utils from mistralai.client._hooks import HookContext -from mistralai.client.models import ( - embeddingdtype as models_embeddingdtype, - embeddingrequest as models_embeddingrequest, - encodingformat as models_encodingformat, -) from mistralai.client.types import OptionalNullable, UNSET from mistralai.client.utils import get_security_from_env from mistralai.client.utils.unmarshal_json_response import unmarshal_json_response @@ -23,13 +18,12 @@ def create( *, model: str, inputs: Union[ - models_embeddingrequest.EmbeddingRequestInputs, - models_embeddingrequest.EmbeddingRequestInputsTypedDict, + models.EmbeddingRequestInputs, models.EmbeddingRequestInputsTypedDict ], metadata: OptionalNullable[Dict[str, Any]] = UNSET, output_dimension: OptionalNullable[int] = UNSET, - output_dtype: Optional[models_embeddingdtype.EmbeddingDtype] = None, - encoding_format: Optional[models_encodingformat.EncodingFormat] = None, + output_dtype: Optional[models.EmbeddingDtype] = None, + encoding_format: Optional[models.EncodingFormat] = None, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -117,30 +111,29 @@ def create( return unmarshal_json_response(models.EmbeddingResponse, http_res) if utils.match_response(http_res, "422", "application/json"): response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(response_data, http_res) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) async def create_async( self, *, model: str, inputs: Union[ - models_embeddingrequest.EmbeddingRequestInputs, - models_embeddingrequest.EmbeddingRequestInputsTypedDict, + models.EmbeddingRequestInputs, models.EmbeddingRequestInputsTypedDict ], metadata: OptionalNullable[Dict[str, Any]] = UNSET, output_dimension: OptionalNullable[int] = UNSET, - output_dtype: Optional[models_embeddingdtype.EmbeddingDtype] = None, - encoding_format: Optional[models_encodingformat.EncodingFormat] = None, + output_dtype: Optional[models.EmbeddingDtype] = None, + encoding_format: Optional[models.EncodingFormat] = None, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -228,14 +221,14 @@ async def create_async( return unmarshal_json_response(models.EmbeddingResponse, http_res) if utils.match_response(http_res, "422", "application/json"): response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(response_data, http_res) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) diff --git a/src/mistralai/client/errors/__init__.py b/src/mistralai/client/errors/__init__.py new file mode 100644 index 00000000..58a591a1 --- /dev/null +++ b/src/mistralai/client/errors/__init__.py @@ -0,0 +1,40 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 0b2db51246df + +from .mistralerror import MistralError +from typing import Any, TYPE_CHECKING + +from mistralai.client.utils.dynamic_imports import lazy_getattr, lazy_dir + +if TYPE_CHECKING: + from .httpvalidationerror import HTTPValidationError, HTTPValidationErrorData + from .no_response_error import NoResponseError + from .responsevalidationerror import ResponseValidationError + from .sdkerror import SDKError + +__all__ = [ + "HTTPValidationError", + "HTTPValidationErrorData", + "MistralError", + "NoResponseError", + "ResponseValidationError", + "SDKError", +] + +_dynamic_imports: dict[str, str] = { + "HTTPValidationError": ".httpvalidationerror", + "HTTPValidationErrorData": ".httpvalidationerror", + "NoResponseError": ".no_response_error", + "ResponseValidationError": ".responsevalidationerror", + "SDKError": ".sdkerror", +} + + +def __getattr__(attr_name: str) -> Any: + return lazy_getattr( + attr_name, package=__package__, dynamic_imports=_dynamic_imports + ) + + +def __dir__(): + return lazy_dir(dynamic_imports=_dynamic_imports) diff --git a/src/mistralai/client/models/httpvalidationerror.py b/src/mistralai/client/errors/httpvalidationerror.py similarity index 75% rename from src/mistralai/client/models/httpvalidationerror.py rename to src/mistralai/client/errors/httpvalidationerror.py index e7f0a35b..97b16562 100644 --- a/src/mistralai/client/models/httpvalidationerror.py +++ b/src/mistralai/client/errors/httpvalidationerror.py @@ -1,17 +1,17 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -# @generated-id: 4099f568a6f8 +# @generated-id: ac3de4a52bb6 from __future__ import annotations -from .validationerror import ValidationError from dataclasses import dataclass, field import httpx -from mistralai.client.models import MistralError +from mistralai.client.errors import MistralError +from mistralai.client.models import validationerror as models_validationerror from mistralai.client.types import BaseModel from typing import List, Optional class HTTPValidationErrorData(BaseModel): - detail: Optional[List[ValidationError]] = None + detail: Optional[List[models_validationerror.ValidationError]] = None @dataclass(unsafe_hash=True) diff --git a/src/mistralai/client/models/mistralerror.py b/src/mistralai/client/errors/mistralerror.py similarity index 96% rename from src/mistralai/client/models/mistralerror.py rename to src/mistralai/client/errors/mistralerror.py index 862a6be8..eb73040c 100644 --- a/src/mistralai/client/models/mistralerror.py +++ b/src/mistralai/client/errors/mistralerror.py @@ -1,5 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -# @generated-id: 68ffd8394c2e +# @generated-id: d1f57f0ff1e9 import httpx from typing import Optional diff --git a/src/mistralai/client/models/no_response_error.py b/src/mistralai/client/errors/no_response_error.py similarity index 93% rename from src/mistralai/client/models/no_response_error.py rename to src/mistralai/client/errors/no_response_error.py index 7705f194..d71dfa7b 100644 --- a/src/mistralai/client/models/no_response_error.py +++ b/src/mistralai/client/errors/no_response_error.py @@ -1,5 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -# @generated-id: 2849e0a482e2 +# @generated-id: 8b469ecb0906 from dataclasses import dataclass diff --git a/src/mistralai/client/models/responsevalidationerror.py b/src/mistralai/client/errors/responsevalidationerror.py similarity index 90% rename from src/mistralai/client/models/responsevalidationerror.py rename to src/mistralai/client/errors/responsevalidationerror.py index 1ed0d552..a7b3b9f0 100644 --- a/src/mistralai/client/models/responsevalidationerror.py +++ b/src/mistralai/client/errors/responsevalidationerror.py @@ -1,11 +1,11 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -# @generated-id: c244a88981e0 +# @generated-id: 6cfaa3147abe import httpx from typing import Optional from dataclasses import dataclass -from mistralai.client.models import MistralError +from mistralai.client.errors import MistralError @dataclass(unsafe_hash=True) diff --git a/src/mistralai/client/models/sdkerror.py b/src/mistralai/client/errors/sdkerror.py similarity index 94% rename from src/mistralai/client/models/sdkerror.py rename to src/mistralai/client/errors/sdkerror.py index 101e1e6a..25b87255 100644 --- a/src/mistralai/client/models/sdkerror.py +++ b/src/mistralai/client/errors/sdkerror.py @@ -1,11 +1,11 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -# @generated-id: 12f991dad510 +# @generated-id: c489ffe1e9ca import httpx from typing import Optional from dataclasses import dataclass -from mistralai.client.models import MistralError +from mistralai.client.errors import MistralError MAX_MESSAGE_LEN = 10_000 diff --git a/src/mistralai/client/files.py b/src/mistralai/client/files.py index 57d389f1..a5f3adf6 100644 --- a/src/mistralai/client/files.py +++ b/src/mistralai/client/files.py @@ -3,14 +3,8 @@ from .basesdk import BaseSDK import httpx -from mistralai.client import models, utils +from mistralai.client import errors, models, utils from mistralai.client._hooks import HookContext -from mistralai.client.models import ( - file as models_file, - filepurpose as models_filepurpose, - sampletype as models_sampletype, - source as models_source, -) from mistralai.client.types import OptionalNullable, UNSET from mistralai.client.utils import get_security_from_env from mistralai.client.utils.unmarshal_json_response import unmarshal_json_response @@ -23,13 +17,13 @@ class Files(BaseSDK): def upload( self, *, - file: Union[models_file.File, models_file.FileTypedDict], - purpose: Optional[models_filepurpose.FilePurpose] = None, + file: Union[models.File, models.FileTypedDict], + purpose: Optional[models.FilePurpose] = None, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> models.UploadFileOut: + ) -> models.CreateFileResponse: r"""Upload File Upload a file that can be used across various endpoints. @@ -100,7 +94,7 @@ def upload( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="UploadFile", + operation_id="files_api_routes_upload_file", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -112,26 +106,26 @@ def upload( ) if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.UploadFileOut, http_res) + return unmarshal_json_response(models.CreateFileResponse, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) async def upload_async( self, *, - file: Union[models_file.File, models_file.FileTypedDict], - purpose: Optional[models_filepurpose.FilePurpose] = None, + file: Union[models.File, models.FileTypedDict], + purpose: Optional[models.FilePurpose] = None, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> models.UploadFileOut: + ) -> models.CreateFileResponse: r"""Upload File Upload a file that can be used across various endpoints. @@ -202,7 +196,7 @@ async def upload_async( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="UploadFile", + operation_id="files_api_routes_upload_file", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -214,15 +208,15 @@ async def upload_async( ) if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.UploadFileOut, http_res) + return unmarshal_json_response(models.CreateFileResponse, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) def list( self, @@ -230,16 +224,16 @@ def list( page: Optional[int] = 0, page_size: Optional[int] = 100, include_total: Optional[bool] = True, - sample_type: OptionalNullable[List[models_sampletype.SampleType]] = UNSET, - source: OptionalNullable[List[models_source.Source]] = UNSET, + sample_type: OptionalNullable[List[models.SampleType]] = UNSET, + source: OptionalNullable[List[models.Source]] = UNSET, search: OptionalNullable[str] = UNSET, - purpose: OptionalNullable[models_filepurpose.FilePurpose] = UNSET, + purpose: OptionalNullable[models.FilePurpose] = UNSET, mimetypes: OptionalNullable[List[str]] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> models.ListFilesOut: + ) -> models.ListFilesResponse: r"""List Files Returns a list of files that belong to the user's organization. @@ -267,7 +261,7 @@ def list( else: base_url = self._get_url(base_url, url_variables) - request = models.ListFilesRequest( + request = models.FilesAPIRoutesListFilesRequest( page=page, page_size=page_size, include_total=include_total, @@ -307,7 +301,7 @@ def list( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="ListFiles", + operation_id="files_api_routes_list_files", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -319,15 +313,15 @@ def list( ) if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.ListFilesOut, http_res) + return unmarshal_json_response(models.ListFilesResponse, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) async def list_async( self, @@ -335,16 +329,16 @@ async def list_async( page: Optional[int] = 0, page_size: Optional[int] = 100, include_total: Optional[bool] = True, - sample_type: OptionalNullable[List[models_sampletype.SampleType]] = UNSET, - source: OptionalNullable[List[models_source.Source]] = UNSET, + sample_type: OptionalNullable[List[models.SampleType]] = UNSET, + source: OptionalNullable[List[models.Source]] = UNSET, search: OptionalNullable[str] = UNSET, - purpose: OptionalNullable[models_filepurpose.FilePurpose] = UNSET, + purpose: OptionalNullable[models.FilePurpose] = UNSET, mimetypes: OptionalNullable[List[str]] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> models.ListFilesOut: + ) -> models.ListFilesResponse: r"""List Files Returns a list of files that belong to the user's organization. @@ -372,7 +366,7 @@ async def list_async( else: base_url = self._get_url(base_url, url_variables) - request = models.ListFilesRequest( + request = models.FilesAPIRoutesListFilesRequest( page=page, page_size=page_size, include_total=include_total, @@ -412,7 +406,7 @@ async def list_async( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="ListFiles", + operation_id="files_api_routes_list_files", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -424,15 +418,15 @@ async def list_async( ) if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.ListFilesOut, http_res) + return unmarshal_json_response(models.ListFilesResponse, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) def retrieve( self, @@ -442,7 +436,7 @@ def retrieve( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> models.RetrieveFileOut: + ) -> models.GetFileResponse: r"""Retrieve File Returns information about a specific file. @@ -463,7 +457,7 @@ def retrieve( else: base_url = self._get_url(base_url, url_variables) - request = models.RetrieveFileRequest( + request = models.FilesAPIRoutesRetrieveFileRequest( file_id=file_id, ) @@ -496,7 +490,7 @@ def retrieve( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="RetrieveFile", + operation_id="files_api_routes_retrieve_file", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -508,15 +502,15 @@ def retrieve( ) if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.RetrieveFileOut, http_res) + return unmarshal_json_response(models.GetFileResponse, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) async def retrieve_async( self, @@ -526,7 +520,7 @@ async def retrieve_async( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> models.RetrieveFileOut: + ) -> models.GetFileResponse: r"""Retrieve File Returns information about a specific file. @@ -547,7 +541,7 @@ async def retrieve_async( else: base_url = self._get_url(base_url, url_variables) - request = models.RetrieveFileRequest( + request = models.FilesAPIRoutesRetrieveFileRequest( file_id=file_id, ) @@ -580,7 +574,7 @@ async def retrieve_async( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="RetrieveFile", + operation_id="files_api_routes_retrieve_file", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -592,15 +586,15 @@ async def retrieve_async( ) if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.RetrieveFileOut, http_res) + return unmarshal_json_response(models.GetFileResponse, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) def delete( self, @@ -610,7 +604,7 @@ def delete( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> models.DeleteFileOut: + ) -> models.DeleteFileResponse: r"""Delete File Delete a file. @@ -631,7 +625,7 @@ def delete( else: base_url = self._get_url(base_url, url_variables) - request = models.DeleteFileRequest( + request = models.FilesAPIRoutesDeleteFileRequest( file_id=file_id, ) @@ -664,7 +658,7 @@ def delete( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="DeleteFile", + operation_id="files_api_routes_delete_file", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -676,15 +670,15 @@ def delete( ) if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.DeleteFileOut, http_res) + return unmarshal_json_response(models.DeleteFileResponse, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) async def delete_async( self, @@ -694,7 +688,7 @@ async def delete_async( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> models.DeleteFileOut: + ) -> models.DeleteFileResponse: r"""Delete File Delete a file. @@ -715,7 +709,7 @@ async def delete_async( else: base_url = self._get_url(base_url, url_variables) - request = models.DeleteFileRequest( + request = models.FilesAPIRoutesDeleteFileRequest( file_id=file_id, ) @@ -748,7 +742,7 @@ async def delete_async( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="DeleteFile", + operation_id="files_api_routes_delete_file", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -760,15 +754,15 @@ async def delete_async( ) if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.DeleteFileOut, http_res) + return unmarshal_json_response(models.DeleteFileResponse, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) def download( self, @@ -799,7 +793,7 @@ def download( else: base_url = self._get_url(base_url, url_variables) - request = models.DownloadFileRequest( + request = models.FilesAPIRoutesDownloadFileRequest( file_id=file_id, ) @@ -832,7 +826,7 @@ def download( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="DownloadFile", + operation_id="files_api_routes_download_file", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -848,13 +842,13 @@ def download( return http_res if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("Unexpected response received", http_res, http_res_text) + raise errors.SDKError("Unexpected response received", http_res, http_res_text) async def download_async( self, @@ -885,7 +879,7 @@ async def download_async( else: base_url = self._get_url(base_url, url_variables) - request = models.DownloadFileRequest( + request = models.FilesAPIRoutesDownloadFileRequest( file_id=file_id, ) @@ -918,7 +912,7 @@ async def download_async( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="DownloadFile", + operation_id="files_api_routes_download_file", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -934,13 +928,13 @@ async def download_async( return http_res if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("Unexpected response received", http_res, http_res_text) + raise errors.SDKError("Unexpected response received", http_res, http_res_text) def get_signed_url( self, @@ -951,7 +945,7 @@ def get_signed_url( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> models.FileSignedURL: + ) -> models.GetSignedURLResponse: r"""Get Signed Url :param file_id: @@ -971,7 +965,7 @@ def get_signed_url( else: base_url = self._get_url(base_url, url_variables) - request = models.GetFileSignedURLRequest( + request = models.FilesAPIRoutesGetSignedURLRequest( file_id=file_id, expiry=expiry, ) @@ -1005,7 +999,7 @@ def get_signed_url( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="GetFileSignedUrl", + operation_id="files_api_routes_get_signed_url", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -1017,15 +1011,15 @@ def get_signed_url( ) if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.FileSignedURL, http_res) + return unmarshal_json_response(models.GetSignedURLResponse, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) async def get_signed_url_async( self, @@ -1036,7 +1030,7 @@ async def get_signed_url_async( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> models.FileSignedURL: + ) -> models.GetSignedURLResponse: r"""Get Signed Url :param file_id: @@ -1056,7 +1050,7 @@ async def get_signed_url_async( else: base_url = self._get_url(base_url, url_variables) - request = models.GetFileSignedURLRequest( + request = models.FilesAPIRoutesGetSignedURLRequest( file_id=file_id, expiry=expiry, ) @@ -1090,7 +1084,7 @@ async def get_signed_url_async( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="GetFileSignedUrl", + operation_id="files_api_routes_get_signed_url", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -1102,12 +1096,12 @@ async def get_signed_url_async( ) if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.FileSignedURL, http_res) + return unmarshal_json_response(models.GetSignedURLResponse, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) diff --git a/src/mistralai/client/fim.py b/src/mistralai/client/fim.py index be3f7742..8ffb7730 100644 --- a/src/mistralai/client/fim.py +++ b/src/mistralai/client/fim.py @@ -2,12 +2,8 @@ # @generated-id: 217bea5d701d from .basesdk import BaseSDK -from mistralai.client import models, utils +from mistralai.client import errors, models, utils from mistralai.client._hooks import HookContext -from mistralai.client.models import ( - fimcompletionrequest as models_fimcompletionrequest, - fimcompletionstreamrequest as models_fimcompletionstreamrequest, -) from mistralai.client.types import OptionalNullable, UNSET from mistralai.client.utils import eventstreaming, get_security_from_env from mistralai.client.utils.unmarshal_json_response import unmarshal_json_response @@ -28,8 +24,8 @@ def complete( stream: Optional[bool] = False, stop: Optional[ Union[ - models_fimcompletionrequest.FIMCompletionRequestStop, - models_fimcompletionrequest.FIMCompletionRequestStopTypedDict, + models.FIMCompletionRequestStop, + models.FIMCompletionRequestStopTypedDict, ] ] = None, random_seed: OptionalNullable[int] = UNSET, @@ -133,17 +129,17 @@ def complete( return unmarshal_json_response(models.FIMCompletionResponse, http_res) if utils.match_response(http_res, "422", "application/json"): response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(response_data, http_res) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) async def complete_async( self, @@ -156,8 +152,8 @@ async def complete_async( stream: Optional[bool] = False, stop: Optional[ Union[ - models_fimcompletionrequest.FIMCompletionRequestStop, - models_fimcompletionrequest.FIMCompletionRequestStopTypedDict, + models.FIMCompletionRequestStop, + models.FIMCompletionRequestStopTypedDict, ] ] = None, random_seed: OptionalNullable[int] = UNSET, @@ -261,17 +257,17 @@ async def complete_async( return unmarshal_json_response(models.FIMCompletionResponse, http_res) if utils.match_response(http_res, "422", "application/json"): response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(response_data, http_res) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) def stream( self, @@ -284,8 +280,8 @@ def stream( stream: Optional[bool] = True, stop: Optional[ Union[ - models_fimcompletionstreamrequest.FIMCompletionStreamRequestStop, - models_fimcompletionstreamrequest.FIMCompletionStreamRequestStopTypedDict, + models.FIMCompletionStreamRequestStop, + models.FIMCompletionStreamRequestStopTypedDict, ] ] = None, random_seed: OptionalNullable[int] = UNSET, @@ -396,18 +392,18 @@ def stream( if utils.match_response(http_res, "422", "application/json"): http_res_text = utils.stream_to_text(http_res) response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res, http_res_text + errors.HTTPValidationErrorData, http_res, http_res_text ) - raise models.HTTPValidationError(response_data, http_res, http_res_text) + raise errors.HTTPValidationError(response_data, http_res, http_res_text) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("Unexpected response received", http_res, http_res_text) + raise errors.SDKError("Unexpected response received", http_res, http_res_text) async def stream_async( self, @@ -420,8 +416,8 @@ async def stream_async( stream: Optional[bool] = True, stop: Optional[ Union[ - models_fimcompletionstreamrequest.FIMCompletionStreamRequestStop, - models_fimcompletionstreamrequest.FIMCompletionStreamRequestStopTypedDict, + models.FIMCompletionStreamRequestStop, + models.FIMCompletionStreamRequestStopTypedDict, ] ] = None, random_seed: OptionalNullable[int] = UNSET, @@ -532,15 +528,15 @@ async def stream_async( if utils.match_response(http_res, "422", "application/json"): http_res_text = await utils.stream_to_text_async(http_res) response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res, http_res_text + errors.HTTPValidationErrorData, http_res, http_res_text ) - raise models.HTTPValidationError(response_data, http_res, http_res_text) + raise errors.HTTPValidationError(response_data, http_res, http_res_text) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("Unexpected response received", http_res, http_res_text) + raise errors.SDKError("Unexpected response received", http_res, http_res_text) diff --git a/src/mistralai/client/fine_tuning_jobs.py b/src/mistralai/client/fine_tuning_jobs.py index 9a28ded1..c2ee871b 100644 --- a/src/mistralai/client/fine_tuning_jobs.py +++ b/src/mistralai/client/fine_tuning_jobs.py @@ -3,15 +3,8 @@ from .basesdk import BaseSDK from datetime import datetime -from mistralai.client import models, utils +from mistralai.client import errors, models, utils from mistralai.client._hooks import HookContext -from mistralai.client.models import ( - classifiertargetin as models_classifiertargetin, - finetuneablemodeltype as models_finetuneablemodeltype, - jobin as models_jobin, - listfinetuningjobsop as models_listfinetuningjobsop, - trainingfile as models_trainingfile, -) from mistralai.client.types import OptionalNullable, UNSET from mistralai.client.utils import get_security_from_env from mistralai.client.utils.unmarshal_json_response import unmarshal_json_response @@ -29,7 +22,7 @@ def list( created_before: OptionalNullable[datetime] = UNSET, created_by_me: Optional[bool] = False, status: OptionalNullable[ - models_listfinetuningjobsop.ListFineTuningJobsStatus + models.JobsAPIRoutesFineTuningGetFineTuningJobsStatus ] = UNSET, wandb_project: OptionalNullable[str] = UNSET, wandb_name: OptionalNullable[str] = UNSET, @@ -38,7 +31,7 @@ def list( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> models.JobsOut: + ) -> models.ListFineTuningJobsResponse: r"""Get Fine Tuning Jobs Get a list of fine-tuning jobs for your organization and user. @@ -68,7 +61,7 @@ def list( else: base_url = self._get_url(base_url, url_variables) - request = models.ListFineTuningJobsRequest( + request = models.JobsAPIRoutesFineTuningGetFineTuningJobsRequest( page=page, page_size=page_size, model=model, @@ -110,7 +103,7 @@ def list( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="ListFineTuningJobs", + operation_id="jobs_api_routes_fine_tuning_get_fine_tuning_jobs", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -122,15 +115,15 @@ def list( ) if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.JobsOut, http_res) + return unmarshal_json_response(models.ListFineTuningJobsResponse, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) async def list_async( self, @@ -142,7 +135,7 @@ async def list_async( created_before: OptionalNullable[datetime] = UNSET, created_by_me: Optional[bool] = False, status: OptionalNullable[ - models_listfinetuningjobsop.ListFineTuningJobsStatus + models.JobsAPIRoutesFineTuningGetFineTuningJobsStatus ] = UNSET, wandb_project: OptionalNullable[str] = UNSET, wandb_name: OptionalNullable[str] = UNSET, @@ -151,7 +144,7 @@ async def list_async( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> models.JobsOut: + ) -> models.ListFineTuningJobsResponse: r"""Get Fine Tuning Jobs Get a list of fine-tuning jobs for your organization and user. @@ -181,7 +174,7 @@ async def list_async( else: base_url = self._get_url(base_url, url_variables) - request = models.ListFineTuningJobsRequest( + request = models.JobsAPIRoutesFineTuningGetFineTuningJobsRequest( page=page, page_size=page_size, model=model, @@ -223,7 +216,7 @@ async def list_async( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="ListFineTuningJobs", + operation_id="jobs_api_routes_fine_tuning_get_fine_tuning_jobs", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -235,59 +228,49 @@ async def list_async( ) if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.JobsOut, http_res) + return unmarshal_json_response(models.ListFineTuningJobsResponse, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) def create( self, *, model: str, - hyperparameters: Union[ - models_jobin.Hyperparameters, models_jobin.HyperparametersTypedDict - ], + hyperparameters: Union[models.Hyperparameters, models.HyperparametersTypedDict], training_files: Optional[ - Union[ - List[models_trainingfile.TrainingFile], - List[models_trainingfile.TrainingFileTypedDict], - ] + Union[List[models.TrainingFile], List[models.TrainingFileTypedDict]] ] = None, validation_files: OptionalNullable[List[str]] = UNSET, suffix: OptionalNullable[str] = UNSET, integrations: OptionalNullable[ Union[ - List[models_jobin.JobInIntegration], - List[models_jobin.JobInIntegrationTypedDict], + List[models.CreateFineTuningJobRequestIntegration], + List[models.CreateFineTuningJobRequestIntegrationTypedDict], ] ] = UNSET, auto_start: Optional[bool] = None, invalid_sample_skip_percentage: Optional[float] = 0, - job_type: OptionalNullable[ - models_finetuneablemodeltype.FineTuneableModelType - ] = UNSET, + job_type: OptionalNullable[models.FineTuneableModelType] = UNSET, repositories: OptionalNullable[ Union[ - List[models_jobin.JobInRepository], - List[models_jobin.JobInRepositoryTypedDict], + List[models.CreateFineTuningJobRequestRepository], + List[models.CreateFineTuningJobRequestRepositoryTypedDict], ] ] = UNSET, classifier_targets: OptionalNullable[ - Union[ - List[models_classifiertargetin.ClassifierTargetIn], - List[models_classifiertargetin.ClassifierTargetInTypedDict], - ] + Union[List[models.ClassifierTarget], List[models.ClassifierTargetTypedDict]] ] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> models.CreateFineTuningJobResponse: + ) -> models.JobsAPIRoutesFineTuningCreateFineTuningJobResponse: r"""Create Fine Tuning Job Create a new fine-tuning job, it will be queued for processing. @@ -318,7 +301,7 @@ def create( else: base_url = self._get_url(base_url, url_variables) - request = models.JobIn( + request = models.CreateFineTuningJobRequest( model=model, training_files=utils.get_pydantic_model( training_files, Optional[List[models.TrainingFile]] @@ -326,7 +309,8 @@ def create( validation_files=validation_files, suffix=suffix, integrations=utils.get_pydantic_model( - integrations, OptionalNullable[List[models.JobInIntegration]] + integrations, + OptionalNullable[List[models.CreateFineTuningJobRequestIntegration]], ), auto_start=auto_start, invalid_sample_skip_percentage=invalid_sample_skip_percentage, @@ -335,10 +319,11 @@ def create( hyperparameters, models.Hyperparameters ), repositories=utils.get_pydantic_model( - repositories, OptionalNullable[List[models.JobInRepository]] + repositories, + OptionalNullable[List[models.CreateFineTuningJobRequestRepository]], ), classifier_targets=utils.get_pydantic_model( - classifier_targets, OptionalNullable[List[models.ClassifierTargetIn]] + classifier_targets, OptionalNullable[List[models.ClassifierTarget]] ), ) @@ -356,7 +341,7 @@ def create( http_headers=http_headers, security=self.sdk_configuration.security, get_serialized_body=lambda: utils.serialize_request_body( - request, False, False, "json", models.JobIn + request, False, False, "json", models.CreateFineTuningJobRequest ), allow_empty_value=None, timeout_ms=timeout_ms, @@ -374,7 +359,7 @@ def create( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="CreateFineTuningJob", + operation_id="jobs_api_routes_fine_tuning_create_fine_tuning_job", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -386,59 +371,51 @@ def create( ) if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.CreateFineTuningJobResponse, http_res) + return unmarshal_json_response( + models.JobsAPIRoutesFineTuningCreateFineTuningJobResponse, http_res + ) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) async def create_async( self, *, model: str, - hyperparameters: Union[ - models_jobin.Hyperparameters, models_jobin.HyperparametersTypedDict - ], + hyperparameters: Union[models.Hyperparameters, models.HyperparametersTypedDict], training_files: Optional[ - Union[ - List[models_trainingfile.TrainingFile], - List[models_trainingfile.TrainingFileTypedDict], - ] + Union[List[models.TrainingFile], List[models.TrainingFileTypedDict]] ] = None, validation_files: OptionalNullable[List[str]] = UNSET, suffix: OptionalNullable[str] = UNSET, integrations: OptionalNullable[ Union[ - List[models_jobin.JobInIntegration], - List[models_jobin.JobInIntegrationTypedDict], + List[models.CreateFineTuningJobRequestIntegration], + List[models.CreateFineTuningJobRequestIntegrationTypedDict], ] ] = UNSET, auto_start: Optional[bool] = None, invalid_sample_skip_percentage: Optional[float] = 0, - job_type: OptionalNullable[ - models_finetuneablemodeltype.FineTuneableModelType - ] = UNSET, + job_type: OptionalNullable[models.FineTuneableModelType] = UNSET, repositories: OptionalNullable[ Union[ - List[models_jobin.JobInRepository], - List[models_jobin.JobInRepositoryTypedDict], + List[models.CreateFineTuningJobRequestRepository], + List[models.CreateFineTuningJobRequestRepositoryTypedDict], ] ] = UNSET, classifier_targets: OptionalNullable[ - Union[ - List[models_classifiertargetin.ClassifierTargetIn], - List[models_classifiertargetin.ClassifierTargetInTypedDict], - ] + Union[List[models.ClassifierTarget], List[models.ClassifierTargetTypedDict]] ] = UNSET, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> models.CreateFineTuningJobResponse: + ) -> models.JobsAPIRoutesFineTuningCreateFineTuningJobResponse: r"""Create Fine Tuning Job Create a new fine-tuning job, it will be queued for processing. @@ -469,7 +446,7 @@ async def create_async( else: base_url = self._get_url(base_url, url_variables) - request = models.JobIn( + request = models.CreateFineTuningJobRequest( model=model, training_files=utils.get_pydantic_model( training_files, Optional[List[models.TrainingFile]] @@ -477,7 +454,8 @@ async def create_async( validation_files=validation_files, suffix=suffix, integrations=utils.get_pydantic_model( - integrations, OptionalNullable[List[models.JobInIntegration]] + integrations, + OptionalNullable[List[models.CreateFineTuningJobRequestIntegration]], ), auto_start=auto_start, invalid_sample_skip_percentage=invalid_sample_skip_percentage, @@ -486,10 +464,11 @@ async def create_async( hyperparameters, models.Hyperparameters ), repositories=utils.get_pydantic_model( - repositories, OptionalNullable[List[models.JobInRepository]] + repositories, + OptionalNullable[List[models.CreateFineTuningJobRequestRepository]], ), classifier_targets=utils.get_pydantic_model( - classifier_targets, OptionalNullable[List[models.ClassifierTargetIn]] + classifier_targets, OptionalNullable[List[models.ClassifierTarget]] ), ) @@ -507,7 +486,7 @@ async def create_async( http_headers=http_headers, security=self.sdk_configuration.security, get_serialized_body=lambda: utils.serialize_request_body( - request, False, False, "json", models.JobIn + request, False, False, "json", models.CreateFineTuningJobRequest ), allow_empty_value=None, timeout_ms=timeout_ms, @@ -525,7 +504,7 @@ async def create_async( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="CreateFineTuningJob", + operation_id="jobs_api_routes_fine_tuning_create_fine_tuning_job", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -537,15 +516,17 @@ async def create_async( ) if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.CreateFineTuningJobResponse, http_res) + return unmarshal_json_response( + models.JobsAPIRoutesFineTuningCreateFineTuningJobResponse, http_res + ) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) def get( self, @@ -555,7 +536,7 @@ def get( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> models.GetFineTuningJobResponse: + ) -> models.JobsAPIRoutesFineTuningGetFineTuningJobResponse: r"""Get Fine Tuning Job Get a fine-tuned job details by its UUID. @@ -576,7 +557,7 @@ def get( else: base_url = self._get_url(base_url, url_variables) - request = models.GetFineTuningJobRequest( + request = models.JobsAPIRoutesFineTuningGetFineTuningJobRequest( job_id=job_id, ) @@ -609,7 +590,7 @@ def get( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="GetFineTuningJob", + operation_id="jobs_api_routes_fine_tuning_get_fine_tuning_job", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -621,15 +602,17 @@ def get( ) if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.GetFineTuningJobResponse, http_res) + return unmarshal_json_response( + models.JobsAPIRoutesFineTuningGetFineTuningJobResponse, http_res + ) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) async def get_async( self, @@ -639,7 +622,7 @@ async def get_async( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> models.GetFineTuningJobResponse: + ) -> models.JobsAPIRoutesFineTuningGetFineTuningJobResponse: r"""Get Fine Tuning Job Get a fine-tuned job details by its UUID. @@ -660,7 +643,7 @@ async def get_async( else: base_url = self._get_url(base_url, url_variables) - request = models.GetFineTuningJobRequest( + request = models.JobsAPIRoutesFineTuningGetFineTuningJobRequest( job_id=job_id, ) @@ -693,7 +676,7 @@ async def get_async( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="GetFineTuningJob", + operation_id="jobs_api_routes_fine_tuning_get_fine_tuning_job", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -705,15 +688,17 @@ async def get_async( ) if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.GetFineTuningJobResponse, http_res) + return unmarshal_json_response( + models.JobsAPIRoutesFineTuningGetFineTuningJobResponse, http_res + ) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) def cancel( self, @@ -723,7 +708,7 @@ def cancel( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> models.CancelFineTuningJobResponse: + ) -> models.JobsAPIRoutesFineTuningCancelFineTuningJobResponse: r"""Cancel Fine Tuning Job Request the cancellation of a fine tuning job. @@ -744,7 +729,7 @@ def cancel( else: base_url = self._get_url(base_url, url_variables) - request = models.CancelFineTuningJobRequest( + request = models.JobsAPIRoutesFineTuningCancelFineTuningJobRequest( job_id=job_id, ) @@ -777,7 +762,7 @@ def cancel( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="CancelFineTuningJob", + operation_id="jobs_api_routes_fine_tuning_cancel_fine_tuning_job", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -789,15 +774,17 @@ def cancel( ) if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.CancelFineTuningJobResponse, http_res) + return unmarshal_json_response( + models.JobsAPIRoutesFineTuningCancelFineTuningJobResponse, http_res + ) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) async def cancel_async( self, @@ -807,7 +794,7 @@ async def cancel_async( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> models.CancelFineTuningJobResponse: + ) -> models.JobsAPIRoutesFineTuningCancelFineTuningJobResponse: r"""Cancel Fine Tuning Job Request the cancellation of a fine tuning job. @@ -828,7 +815,7 @@ async def cancel_async( else: base_url = self._get_url(base_url, url_variables) - request = models.CancelFineTuningJobRequest( + request = models.JobsAPIRoutesFineTuningCancelFineTuningJobRequest( job_id=job_id, ) @@ -861,7 +848,7 @@ async def cancel_async( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="CancelFineTuningJob", + operation_id="jobs_api_routes_fine_tuning_cancel_fine_tuning_job", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -873,15 +860,17 @@ async def cancel_async( ) if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.CancelFineTuningJobResponse, http_res) + return unmarshal_json_response( + models.JobsAPIRoutesFineTuningCancelFineTuningJobResponse, http_res + ) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) def start( self, @@ -891,7 +880,7 @@ def start( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> models.StartFineTuningJobResponse: + ) -> models.JobsAPIRoutesFineTuningStartFineTuningJobResponse: r"""Start Fine Tuning Job Request the start of a validated fine tuning job. @@ -912,7 +901,7 @@ def start( else: base_url = self._get_url(base_url, url_variables) - request = models.StartFineTuningJobRequest( + request = models.JobsAPIRoutesFineTuningStartFineTuningJobRequest( job_id=job_id, ) @@ -945,7 +934,7 @@ def start( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="StartFineTuningJob", + operation_id="jobs_api_routes_fine_tuning_start_fine_tuning_job", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -957,15 +946,17 @@ def start( ) if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.StartFineTuningJobResponse, http_res) + return unmarshal_json_response( + models.JobsAPIRoutesFineTuningStartFineTuningJobResponse, http_res + ) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) async def start_async( self, @@ -975,7 +966,7 @@ async def start_async( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> models.StartFineTuningJobResponse: + ) -> models.JobsAPIRoutesFineTuningStartFineTuningJobResponse: r"""Start Fine Tuning Job Request the start of a validated fine tuning job. @@ -996,7 +987,7 @@ async def start_async( else: base_url = self._get_url(base_url, url_variables) - request = models.StartFineTuningJobRequest( + request = models.JobsAPIRoutesFineTuningStartFineTuningJobRequest( job_id=job_id, ) @@ -1029,7 +1020,7 @@ async def start_async( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="StartFineTuningJob", + operation_id="jobs_api_routes_fine_tuning_start_fine_tuning_job", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -1041,12 +1032,14 @@ async def start_async( ) if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.StartFineTuningJobResponse, http_res) + return unmarshal_json_response( + models.JobsAPIRoutesFineTuningStartFineTuningJobResponse, http_res + ) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) diff --git a/src/mistralai/client/libraries.py b/src/mistralai/client/libraries.py index 26ceabe1..b8728362 100644 --- a/src/mistralai/client/libraries.py +++ b/src/mistralai/client/libraries.py @@ -3,7 +3,7 @@ from .basesdk import BaseSDK from .sdkconfiguration import SDKConfiguration -from mistralai.client import models, utils +from mistralai.client import errors, models, utils from mistralai.client._hooks import HookContext from mistralai.client.accesses import Accesses from mistralai.client.documents import Documents @@ -39,7 +39,7 @@ def list( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> models.ListLibraryOut: + ) -> models.ListLibrariesResponse: r"""List all libraries you have access to. List all libraries that you have created or have been shared with you. @@ -87,7 +87,7 @@ def list( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="ListLibraries", + operation_id="libraries_list_v1", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -99,15 +99,15 @@ def list( ) if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.ListLibraryOut, http_res) + return unmarshal_json_response(models.ListLibrariesResponse, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) async def list_async( self, @@ -116,7 +116,7 @@ async def list_async( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> models.ListLibraryOut: + ) -> models.ListLibrariesResponse: r"""List all libraries you have access to. List all libraries that you have created or have been shared with you. @@ -164,7 +164,7 @@ async def list_async( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="ListLibraries", + operation_id="libraries_list_v1", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -176,15 +176,15 @@ async def list_async( ) if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.ListLibraryOut, http_res) + return unmarshal_json_response(models.ListLibrariesResponse, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) def create( self, @@ -196,7 +196,7 @@ def create( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> models.LibraryOut: + ) -> models.Library: r"""Create a new Library. Create a new Library, you will be marked as the owner and only you will have the possibility to share it with others. When first created this will only be accessible by you. @@ -219,7 +219,7 @@ def create( else: base_url = self._get_url(base_url, url_variables) - request = models.LibraryIn( + request = models.CreateLibraryRequest( name=name, description=description, chunk_size=chunk_size, @@ -239,7 +239,7 @@ def create( http_headers=http_headers, security=self.sdk_configuration.security, get_serialized_body=lambda: utils.serialize_request_body( - request, False, False, "json", models.LibraryIn + request, False, False, "json", models.CreateLibraryRequest ), allow_empty_value=None, timeout_ms=timeout_ms, @@ -257,7 +257,7 @@ def create( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="CreateLibrary", + operation_id="libraries_create_v1", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -270,20 +270,20 @@ def create( response_data: Any = None if utils.match_response(http_res, "201", "application/json"): - return unmarshal_json_response(models.LibraryOut, http_res) + return unmarshal_json_response(models.Library, http_res) if utils.match_response(http_res, "422", "application/json"): response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(response_data, http_res) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) async def create_async( self, @@ -295,7 +295,7 @@ async def create_async( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> models.LibraryOut: + ) -> models.Library: r"""Create a new Library. Create a new Library, you will be marked as the owner and only you will have the possibility to share it with others. When first created this will only be accessible by you. @@ -318,7 +318,7 @@ async def create_async( else: base_url = self._get_url(base_url, url_variables) - request = models.LibraryIn( + request = models.CreateLibraryRequest( name=name, description=description, chunk_size=chunk_size, @@ -338,7 +338,7 @@ async def create_async( http_headers=http_headers, security=self.sdk_configuration.security, get_serialized_body=lambda: utils.serialize_request_body( - request, False, False, "json", models.LibraryIn + request, False, False, "json", models.CreateLibraryRequest ), allow_empty_value=None, timeout_ms=timeout_ms, @@ -356,7 +356,7 @@ async def create_async( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="CreateLibrary", + operation_id="libraries_create_v1", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -369,20 +369,20 @@ async def create_async( response_data: Any = None if utils.match_response(http_res, "201", "application/json"): - return unmarshal_json_response(models.LibraryOut, http_res) + return unmarshal_json_response(models.Library, http_res) if utils.match_response(http_res, "422", "application/json"): response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(response_data, http_res) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) def get( self, @@ -392,7 +392,7 @@ def get( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> models.LibraryOut: + ) -> models.Library: r"""Detailed information about a specific Library. Given a library id, details information about that Library. @@ -413,7 +413,7 @@ def get( else: base_url = self._get_url(base_url, url_variables) - request = models.GetLibraryRequest( + request = models.LibrariesGetV1Request( library_id=library_id, ) @@ -446,7 +446,7 @@ def get( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="GetLibrary", + operation_id="libraries_get_v1", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -459,20 +459,20 @@ def get( response_data: Any = None if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.LibraryOut, http_res) + return unmarshal_json_response(models.Library, http_res) if utils.match_response(http_res, "422", "application/json"): response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(response_data, http_res) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) async def get_async( self, @@ -482,7 +482,7 @@ async def get_async( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> models.LibraryOut: + ) -> models.Library: r"""Detailed information about a specific Library. Given a library id, details information about that Library. @@ -503,7 +503,7 @@ async def get_async( else: base_url = self._get_url(base_url, url_variables) - request = models.GetLibraryRequest( + request = models.LibrariesGetV1Request( library_id=library_id, ) @@ -536,7 +536,7 @@ async def get_async( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="GetLibrary", + operation_id="libraries_get_v1", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -549,20 +549,20 @@ async def get_async( response_data: Any = None if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.LibraryOut, http_res) + return unmarshal_json_response(models.Library, http_res) if utils.match_response(http_res, "422", "application/json"): response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(response_data, http_res) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) def delete( self, @@ -572,7 +572,7 @@ def delete( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> models.LibraryOut: + ) -> models.Library: r"""Delete a library and all of it's document. Given a library id, deletes it together with all documents that have been uploaded to that library. @@ -593,7 +593,7 @@ def delete( else: base_url = self._get_url(base_url, url_variables) - request = models.DeleteLibraryRequest( + request = models.LibrariesDeleteV1Request( library_id=library_id, ) @@ -626,7 +626,7 @@ def delete( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="DeleteLibrary", + operation_id="libraries_delete_v1", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -639,20 +639,20 @@ def delete( response_data: Any = None if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.LibraryOut, http_res) + return unmarshal_json_response(models.Library, http_res) if utils.match_response(http_res, "422", "application/json"): response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(response_data, http_res) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) async def delete_async( self, @@ -662,7 +662,7 @@ async def delete_async( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> models.LibraryOut: + ) -> models.Library: r"""Delete a library and all of it's document. Given a library id, deletes it together with all documents that have been uploaded to that library. @@ -683,7 +683,7 @@ async def delete_async( else: base_url = self._get_url(base_url, url_variables) - request = models.DeleteLibraryRequest( + request = models.LibrariesDeleteV1Request( library_id=library_id, ) @@ -716,7 +716,7 @@ async def delete_async( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="DeleteLibrary", + operation_id="libraries_delete_v1", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -729,20 +729,20 @@ async def delete_async( response_data: Any = None if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.LibraryOut, http_res) + return unmarshal_json_response(models.Library, http_res) if utils.match_response(http_res, "422", "application/json"): response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(response_data, http_res) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) def update( self, @@ -754,7 +754,7 @@ def update( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> models.LibraryOut: + ) -> models.Library: r"""Update a library. Given a library id, you can update the name and description. @@ -777,9 +777,9 @@ def update( else: base_url = self._get_url(base_url, url_variables) - request = models.UpdateLibraryRequest( + request = models.LibrariesUpdateV1Request( library_id=library_id, - library_in_update=models.LibraryInUpdate( + update_library_request=models.UpdateLibraryRequest( name=name, description=description, ), @@ -799,7 +799,11 @@ def update( http_headers=http_headers, security=self.sdk_configuration.security, get_serialized_body=lambda: utils.serialize_request_body( - request.library_in_update, False, False, "json", models.LibraryInUpdate + request.update_library_request, + False, + False, + "json", + models.UpdateLibraryRequest, ), allow_empty_value=None, timeout_ms=timeout_ms, @@ -817,7 +821,7 @@ def update( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="UpdateLibrary", + operation_id="libraries_update_v1", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -830,20 +834,20 @@ def update( response_data: Any = None if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.LibraryOut, http_res) + return unmarshal_json_response(models.Library, http_res) if utils.match_response(http_res, "422", "application/json"): response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(response_data, http_res) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) async def update_async( self, @@ -855,7 +859,7 @@ async def update_async( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> models.LibraryOut: + ) -> models.Library: r"""Update a library. Given a library id, you can update the name and description. @@ -878,9 +882,9 @@ async def update_async( else: base_url = self._get_url(base_url, url_variables) - request = models.UpdateLibraryRequest( + request = models.LibrariesUpdateV1Request( library_id=library_id, - library_in_update=models.LibraryInUpdate( + update_library_request=models.UpdateLibraryRequest( name=name, description=description, ), @@ -900,7 +904,11 @@ async def update_async( http_headers=http_headers, security=self.sdk_configuration.security, get_serialized_body=lambda: utils.serialize_request_body( - request.library_in_update, False, False, "json", models.LibraryInUpdate + request.update_library_request, + False, + False, + "json", + models.UpdateLibraryRequest, ), allow_empty_value=None, timeout_ms=timeout_ms, @@ -918,7 +926,7 @@ async def update_async( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="UpdateLibrary", + operation_id="libraries_update_v1", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -931,17 +939,17 @@ async def update_async( response_data: Any = None if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.LibraryOut, http_res) + return unmarshal_json_response(models.Library, http_res) if utils.match_response(http_res, "422", "application/json"): response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(response_data, http_res) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) diff --git a/src/mistralai/client/models/__init__.py b/src/mistralai/client/models/__init__.py index 093ffcbd..5ef8b3f3 100644 --- a/src/mistralai/client/models/__init__.py +++ b/src/mistralai/client/models/__init__.py @@ -1,42 +1,116 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" # @generated-id: e0e8dad92725 -from .mistralerror import MistralError -from typing import TYPE_CHECKING -from importlib import import_module -import builtins -import sys +from typing import Any, TYPE_CHECKING + +from mistralai.client.utils.dynamic_imports import lazy_getattr, lazy_dir if TYPE_CHECKING: - from .agent import Agent, AgentObject, AgentTool, AgentToolTypedDict, AgentTypedDict + from .agent import ( + Agent, + AgentTool, + AgentToolTypedDict, + AgentTypedDict, + UnknownAgentTool, + ) from .agentaliasresponse import AgentAliasResponse, AgentAliasResponseTypedDict from .agentconversation import ( AgentConversation, AgentConversationAgentVersion, AgentConversationAgentVersionTypedDict, - AgentConversationObject, AgentConversationTypedDict, ) - from .agentcreationrequest import ( - AgentCreationRequest, - AgentCreationRequestTool, - AgentCreationRequestToolTypedDict, - AgentCreationRequestTypedDict, - ) from .agenthandoffdoneevent import ( AgentHandoffDoneEvent, AgentHandoffDoneEventTypedDict, ) - from .agenthandoffentry import ( - AgentHandoffEntry, - AgentHandoffEntryObject, - AgentHandoffEntryType, - AgentHandoffEntryTypedDict, - ) + from .agenthandoffentry import AgentHandoffEntry, AgentHandoffEntryTypedDict from .agenthandoffstartedevent import ( AgentHandoffStartedEvent, AgentHandoffStartedEventTypedDict, ) + from .agents_api_v1_agents_create_or_update_aliasop import ( + AgentsAPIV1AgentsCreateOrUpdateAliasRequest, + AgentsAPIV1AgentsCreateOrUpdateAliasRequestTypedDict, + ) + from .agents_api_v1_agents_delete_aliasop import ( + AgentsAPIV1AgentsDeleteAliasRequest, + AgentsAPIV1AgentsDeleteAliasRequestTypedDict, + ) + from .agents_api_v1_agents_deleteop import ( + AgentsAPIV1AgentsDeleteRequest, + AgentsAPIV1AgentsDeleteRequestTypedDict, + ) + from .agents_api_v1_agents_get_versionop import ( + AgentsAPIV1AgentsGetVersionRequest, + AgentsAPIV1AgentsGetVersionRequestTypedDict, + ) + from .agents_api_v1_agents_getop import ( + AgentsAPIV1AgentsGetAgentVersion, + AgentsAPIV1AgentsGetAgentVersionTypedDict, + AgentsAPIV1AgentsGetRequest, + AgentsAPIV1AgentsGetRequestTypedDict, + ) + from .agents_api_v1_agents_list_version_aliasesop import ( + AgentsAPIV1AgentsListVersionAliasesRequest, + AgentsAPIV1AgentsListVersionAliasesRequestTypedDict, + ) + from .agents_api_v1_agents_list_versionsop import ( + AgentsAPIV1AgentsListVersionsRequest, + AgentsAPIV1AgentsListVersionsRequestTypedDict, + ) + from .agents_api_v1_agents_listop import ( + AgentsAPIV1AgentsListRequest, + AgentsAPIV1AgentsListRequestTypedDict, + ) + from .agents_api_v1_agents_update_versionop import ( + AgentsAPIV1AgentsUpdateVersionRequest, + AgentsAPIV1AgentsUpdateVersionRequestTypedDict, + ) + from .agents_api_v1_agents_updateop import ( + AgentsAPIV1AgentsUpdateRequest, + AgentsAPIV1AgentsUpdateRequestTypedDict, + ) + from .agents_api_v1_conversations_append_streamop import ( + AgentsAPIV1ConversationsAppendStreamRequest, + AgentsAPIV1ConversationsAppendStreamRequestTypedDict, + ) + from .agents_api_v1_conversations_appendop import ( + AgentsAPIV1ConversationsAppendRequest, + AgentsAPIV1ConversationsAppendRequestTypedDict, + ) + from .agents_api_v1_conversations_deleteop import ( + AgentsAPIV1ConversationsDeleteRequest, + AgentsAPIV1ConversationsDeleteRequestTypedDict, + ) + from .agents_api_v1_conversations_getop import ( + AgentsAPIV1ConversationsGetRequest, + AgentsAPIV1ConversationsGetRequestTypedDict, + ResponseV1ConversationsGet, + ResponseV1ConversationsGetTypedDict, + ) + from .agents_api_v1_conversations_historyop import ( + AgentsAPIV1ConversationsHistoryRequest, + AgentsAPIV1ConversationsHistoryRequestTypedDict, + ) + from .agents_api_v1_conversations_listop import ( + AgentsAPIV1ConversationsListRequest, + AgentsAPIV1ConversationsListRequestTypedDict, + AgentsAPIV1ConversationsListResponse, + AgentsAPIV1ConversationsListResponseTypedDict, + ) + from .agents_api_v1_conversations_messagesop import ( + AgentsAPIV1ConversationsMessagesRequest, + AgentsAPIV1ConversationsMessagesRequestTypedDict, + ) + from .agents_api_v1_conversations_restart_streamop import ( + AgentsAPIV1ConversationsRestartStreamRequest, + AgentsAPIV1ConversationsRestartStreamRequestTypedDict, + ) + from .agents_api_v1_conversations_restartop import ( + AgentsAPIV1ConversationsRestartRequest, + AgentsAPIV1ConversationsRestartRequestTypedDict, + ) from .agentscompletionrequest import ( AgentsCompletionRequest, AgentsCompletionRequestMessage, @@ -57,28 +131,15 @@ AgentsCompletionStreamRequestToolChoiceTypedDict, AgentsCompletionStreamRequestTypedDict, ) - from .agentupdaterequest import ( - AgentUpdateRequest, - AgentUpdateRequestTool, - AgentUpdateRequestToolTypedDict, - AgentUpdateRequestTypedDict, - ) from .apiendpoint import APIEndpoint - from .appendconversationop import ( - AppendConversationRequest, - AppendConversationRequestTypedDict, - ) - from .appendconversationstreamop import ( - AppendConversationStreamRequest, - AppendConversationStreamRequestTypedDict, + from .archivemodelresponse import ( + ArchiveModelResponse, + ArchiveModelResponseTypedDict, ) - from .archiveftmodelout import ArchiveFTModelOut, ArchiveFTModelOutTypedDict - from .archivemodelop import ArchiveModelRequest, ArchiveModelRequestTypedDict from .assistantmessage import ( AssistantMessage, AssistantMessageContent, AssistantMessageContentTypedDict, - AssistantMessageRole, AssistantMessageTypedDict, ) from .audiochunk import AudioChunk, AudioChunkTypedDict @@ -94,19 +155,10 @@ ) from .basemodelcard import BaseModelCard, BaseModelCardTypedDict from .batcherror import BatchError, BatchErrorTypedDict - from .batchjobin import BatchJobIn, BatchJobInTypedDict - from .batchjobout import BatchJobOut, BatchJobOutTypedDict - from .batchjobsout import BatchJobsOut, BatchJobsOutTypedDict + from .batchjob import BatchJob, BatchJobTypedDict from .batchjobstatus import BatchJobStatus from .batchrequest import BatchRequest, BatchRequestTypedDict from .builtinconnectors import BuiltInConnectors - from .cancelbatchjobop import CancelBatchJobRequest, CancelBatchJobRequestTypedDict - from .cancelfinetuningjobop import ( - CancelFineTuningJobRequest, - CancelFineTuningJobRequestTypedDict, - CancelFineTuningJobResponse, - CancelFineTuningJobResponseTypedDict, - ) from .chatclassificationrequest import ( ChatClassificationRequest, ChatClassificationRequestTypedDict, @@ -150,7 +202,7 @@ ChatModerationRequestInputs3TypedDict, ChatModerationRequestTypedDict, ) - from .checkpointout import CheckpointOut, CheckpointOutTypedDict + from .checkpoint import Checkpoint, CheckpointTypedDict from .classificationrequest import ( ClassificationRequest, ClassificationRequestInputs, @@ -165,60 +217,65 @@ ClassificationTargetResult, ClassificationTargetResultTypedDict, ) - from .classifierdetailedjobout import ( - ClassifierDetailedJobOut, - ClassifierDetailedJobOutIntegration, - ClassifierDetailedJobOutIntegrationTypedDict, - ClassifierDetailedJobOutStatus, - ClassifierDetailedJobOutTypedDict, - ) - from .classifierftmodelout import ( - ClassifierFTModelOut, - ClassifierFTModelOutTypedDict, - ) - from .classifierjobout import ( - ClassifierJobOut, - ClassifierJobOutIntegration, - ClassifierJobOutIntegrationTypedDict, - ClassifierJobOutStatus, - ClassifierJobOutTypedDict, - ) - from .classifiertargetin import ClassifierTargetIn, ClassifierTargetInTypedDict - from .classifiertargetout import ClassifierTargetOut, ClassifierTargetOutTypedDict + from .classifierfinetunedmodel import ( + ClassifierFineTunedModel, + ClassifierFineTunedModelTypedDict, + ) + from .classifierfinetuningjob import ( + ClassifierFineTuningJob, + ClassifierFineTuningJobIntegration, + ClassifierFineTuningJobIntegrationTypedDict, + ClassifierFineTuningJobStatus, + ClassifierFineTuningJobTypedDict, + UnknownClassifierFineTuningJobIntegration, + ) + from .classifierfinetuningjobdetails import ( + ClassifierFineTuningJobDetails, + ClassifierFineTuningJobDetailsIntegration, + ClassifierFineTuningJobDetailsIntegrationTypedDict, + ClassifierFineTuningJobDetailsStatus, + ClassifierFineTuningJobDetailsTypedDict, + UnknownClassifierFineTuningJobDetailsIntegration, + ) + from .classifiertarget import ClassifierTarget, ClassifierTargetTypedDict + from .classifiertargetresult import ( + ClassifierTargetResult, + ClassifierTargetResultTypedDict, + ) from .classifiertrainingparameters import ( ClassifierTrainingParameters, ClassifierTrainingParametersTypedDict, ) - from .classifiertrainingparametersin import ( - ClassifierTrainingParametersIn, - ClassifierTrainingParametersInTypedDict, - ) from .codeinterpretertool import CodeInterpreterTool, CodeInterpreterToolTypedDict from .completionargs import CompletionArgs, CompletionArgsTypedDict from .completionargsstop import CompletionArgsStop, CompletionArgsStopTypedDict from .completionchunk import CompletionChunk, CompletionChunkTypedDict - from .completiondetailedjobout import ( - CompletionDetailedJobOut, - CompletionDetailedJobOutIntegration, - CompletionDetailedJobOutIntegrationTypedDict, - CompletionDetailedJobOutRepository, - CompletionDetailedJobOutRepositoryTypedDict, - CompletionDetailedJobOutStatus, - CompletionDetailedJobOutTypedDict, - ) from .completionevent import CompletionEvent, CompletionEventTypedDict - from .completionftmodelout import ( - CompletionFTModelOut, - CompletionFTModelOutTypedDict, - ) - from .completionjobout import ( - CompletionJobOut, - CompletionJobOutIntegration, - CompletionJobOutIntegrationTypedDict, - CompletionJobOutRepository, - CompletionJobOutRepositoryTypedDict, - CompletionJobOutStatus, - CompletionJobOutTypedDict, + from .completionfinetunedmodel import ( + CompletionFineTunedModel, + CompletionFineTunedModelTypedDict, + ) + from .completionfinetuningjob import ( + CompletionFineTuningJob, + CompletionFineTuningJobIntegration, + CompletionFineTuningJobIntegrationTypedDict, + CompletionFineTuningJobRepository, + CompletionFineTuningJobRepositoryTypedDict, + CompletionFineTuningJobStatus, + CompletionFineTuningJobTypedDict, + UnknownCompletionFineTuningJobIntegration, + UnknownCompletionFineTuningJobRepository, + ) + from .completionfinetuningjobdetails import ( + CompletionFineTuningJobDetails, + CompletionFineTuningJobDetailsIntegration, + CompletionFineTuningJobDetailsIntegrationTypedDict, + CompletionFineTuningJobDetailsRepository, + CompletionFineTuningJobDetailsRepositoryTypedDict, + CompletionFineTuningJobDetailsStatus, + CompletionFineTuningJobDetailsTypedDict, + UnknownCompletionFineTuningJobDetailsIntegration, + UnknownCompletionFineTuningJobDetailsRepository, ) from .completionresponsestreamchoice import ( CompletionResponseStreamChoice, @@ -229,11 +286,7 @@ CompletionTrainingParameters, CompletionTrainingParametersTypedDict, ) - from .completiontrainingparametersin import ( - CompletionTrainingParametersIn, - CompletionTrainingParametersInTypedDict, - ) - from .contentchunk import ContentChunk, ContentChunkTypedDict + from .contentchunk import ContentChunk, ContentChunkTypedDict, UnknownContentChunk from .conversationappendrequest import ( ConversationAppendRequest, ConversationAppendRequestHandoffExecution, @@ -249,10 +302,10 @@ ConversationEventsData, ConversationEventsDataTypedDict, ConversationEventsTypedDict, + UnknownConversationEventsData, ) from .conversationhistory import ( ConversationHistory, - ConversationHistoryObject, ConversationHistoryTypedDict, Entry, EntryTypedDict, @@ -260,7 +313,6 @@ from .conversationinputs import ConversationInputs, ConversationInputsTypedDict from .conversationmessages import ( ConversationMessages, - ConversationMessagesObject, ConversationMessagesTypedDict, ) from .conversationrequest import ( @@ -274,7 +326,6 @@ ) from .conversationresponse import ( ConversationResponse, - ConversationResponseObject, ConversationResponseTypedDict, Output, OutputTypedDict, @@ -302,38 +353,46 @@ ConversationStreamRequestToolTypedDict, ConversationStreamRequestTypedDict, ) + from .conversationthinkchunk import ( + ConversationThinkChunk, + ConversationThinkChunkThinking, + ConversationThinkChunkThinkingTypedDict, + ConversationThinkChunkTypedDict, + ) from .conversationusageinfo import ( ConversationUsageInfo, ConversationUsageInfoTypedDict, ) - from .createfinetuningjobop import ( - CreateFineTuningJobResponse, - CreateFineTuningJobResponseTypedDict, - Response, - ResponseTypedDict, - ) - from .createorupdateagentaliasop import ( - CreateOrUpdateAgentAliasRequest, - CreateOrUpdateAgentAliasRequestTypedDict, - ) - from .deleteagentaliasop import ( - DeleteAgentAliasRequest, - DeleteAgentAliasRequestTypedDict, + from .createagentrequest import ( + CreateAgentRequest, + CreateAgentRequestTool, + CreateAgentRequestToolTypedDict, + CreateAgentRequestTypedDict, + ) + from .createbatchjobrequest import ( + CreateBatchJobRequest, + CreateBatchJobRequestTypedDict, + ) + from .createfileresponse import CreateFileResponse, CreateFileResponseTypedDict + from .createfinetuningjobrequest import ( + CreateFineTuningJobRequest, + CreateFineTuningJobRequestIntegration, + CreateFineTuningJobRequestIntegrationTypedDict, + CreateFineTuningJobRequestRepository, + CreateFineTuningJobRequestRepositoryTypedDict, + CreateFineTuningJobRequestTypedDict, + Hyperparameters, + HyperparametersTypedDict, ) - from .deleteagentop import DeleteAgentRequest, DeleteAgentRequestTypedDict - from .deleteconversationop import ( - DeleteConversationRequest, - DeleteConversationRequestTypedDict, + from .createlibraryrequest import ( + CreateLibraryRequest, + CreateLibraryRequestTypedDict, ) - from .deletedocumentop import DeleteDocumentRequest, DeleteDocumentRequestTypedDict - from .deletefileop import DeleteFileRequest, DeleteFileRequestTypedDict - from .deletefileout import DeleteFileOut, DeleteFileOutTypedDict - from .deletelibraryaccessop import ( - DeleteLibraryAccessRequest, - DeleteLibraryAccessRequestTypedDict, + from .delete_model_v1_models_model_id_deleteop import ( + DeleteModelV1ModelsModelIDDeleteRequest, + DeleteModelV1ModelsModelIDDeleteRequestTypedDict, ) - from .deletelibraryop import DeleteLibraryRequest, DeleteLibraryRequestTypedDict - from .deletemodelop import DeleteModelRequest, DeleteModelRequestTypedDict + from .deletefileresponse import DeleteFileResponse, DeleteFileResponseTypedDict from .deletemodelout import DeleteModelOut, DeleteModelOutTypedDict from .deltamessage import ( DeltaMessage, @@ -341,21 +400,10 @@ DeltaMessageContentTypedDict, DeltaMessageTypedDict, ) + from .document import Document, DocumentTypedDict from .documentlibrarytool import DocumentLibraryTool, DocumentLibraryToolTypedDict - from .documentout import DocumentOut, DocumentOutTypedDict from .documenttextcontent import DocumentTextContent, DocumentTextContentTypedDict - from .documentupdatein import ( - Attributes, - AttributesTypedDict, - DocumentUpdateIn, - DocumentUpdateInTypedDict, - ) - from .documenturlchunk import ( - DocumentURLChunk, - DocumentURLChunkType, - DocumentURLChunkTypedDict, - ) - from .downloadfileop import DownloadFileRequest, DownloadFileRequestTypedDict + from .documenturlchunk import DocumentURLChunk, DocumentURLChunkTypedDict from .embeddingdtype import EmbeddingDtype from .embeddingrequest import ( EmbeddingRequest, @@ -370,12 +418,35 @@ ) from .encodingformat import EncodingFormat from .entitytype import EntityType - from .eventout import EventOut, EventOutTypedDict + from .event import Event, EventTypedDict from .file import File, FileTypedDict from .filechunk import FileChunk, FileChunkTypedDict from .filepurpose import FilePurpose + from .files_api_routes_delete_fileop import ( + FilesAPIRoutesDeleteFileRequest, + FilesAPIRoutesDeleteFileRequestTypedDict, + ) + from .files_api_routes_download_fileop import ( + FilesAPIRoutesDownloadFileRequest, + FilesAPIRoutesDownloadFileRequestTypedDict, + ) + from .files_api_routes_get_signed_urlop import ( + FilesAPIRoutesGetSignedURLRequest, + FilesAPIRoutesGetSignedURLRequestTypedDict, + ) + from .files_api_routes_list_filesop import ( + FilesAPIRoutesListFilesRequest, + FilesAPIRoutesListFilesRequestTypedDict, + ) + from .files_api_routes_retrieve_fileop import ( + FilesAPIRoutesRetrieveFileRequest, + FilesAPIRoutesRetrieveFileRequestTypedDict, + ) + from .files_api_routes_upload_fileop import ( + MultiPartBodyParams, + MultiPartBodyParamsTypedDict, + ) from .fileschema import FileSchema, FileSchemaTypedDict - from .filesignedurl import FileSignedURL, FileSignedURLTypedDict from .fimcompletionrequest import ( FIMCompletionRequest, FIMCompletionRequestStop, @@ -393,11 +464,11 @@ FIMCompletionStreamRequestTypedDict, ) from .finetuneablemodeltype import FineTuneableModelType - from .ftclassifierlossfunction import FTClassifierLossFunction - from .ftmodelcapabilitiesout import ( - FTModelCapabilitiesOut, - FTModelCapabilitiesOutTypedDict, + from .finetunedmodelcapabilities import ( + FineTunedModelCapabilities, + FineTunedModelCapabilitiesTypedDict, ) + from .ftclassifierlossfunction import FTClassifierLossFunction from .ftmodelcard import FTModelCard, FTModelCardTypedDict from .function import Function, FunctionTypedDict from .functioncall import ( @@ -408,157 +479,196 @@ ) from .functioncallentry import ( FunctionCallEntry, - FunctionCallEntryObject, - FunctionCallEntryType, + FunctionCallEntryConfirmationStatus, FunctionCallEntryTypedDict, ) from .functioncallentryarguments import ( FunctionCallEntryArguments, FunctionCallEntryArgumentsTypedDict, ) - from .functioncallevent import FunctionCallEvent, FunctionCallEventTypedDict - from .functionname import FunctionName, FunctionNameTypedDict - from .functionresultentry import ( - FunctionResultEntry, - FunctionResultEntryObject, - FunctionResultEntryType, - FunctionResultEntryTypedDict, + from .functioncallevent import ( + FunctionCallEvent, + FunctionCallEventConfirmationStatus, + FunctionCallEventTypedDict, ) + from .functionname import FunctionName, FunctionNameTypedDict + from .functionresultentry import FunctionResultEntry, FunctionResultEntryTypedDict from .functiontool import FunctionTool, FunctionToolTypedDict - from .getagentop import ( - GetAgentAgentVersion, - GetAgentAgentVersionTypedDict, - GetAgentRequest, - GetAgentRequestTypedDict, - ) - from .getagentversionop import ( - GetAgentVersionRequest, - GetAgentVersionRequestTypedDict, - ) - from .getbatchjobop import GetBatchJobRequest, GetBatchJobRequestTypedDict - from .getconversationhistoryop import ( - GetConversationHistoryRequest, - GetConversationHistoryRequestTypedDict, - ) - from .getconversationmessagesop import ( - GetConversationMessagesRequest, - GetConversationMessagesRequestTypedDict, - ) - from .getconversationop import ( - GetConversationRequest, - GetConversationRequestTypedDict, - ResponseV1ConversationsGet, - ResponseV1ConversationsGetTypedDict, - ) - from .getdocumentextractedtextsignedurlop import ( - GetDocumentExtractedTextSignedURLRequest, - GetDocumentExtractedTextSignedURLRequestTypedDict, - ) - from .getdocumentop import GetDocumentRequest, GetDocumentRequestTypedDict - from .getdocumentsignedurlop import ( - GetDocumentSignedURLRequest, - GetDocumentSignedURLRequestTypedDict, - ) - from .getdocumentstatusop import ( - GetDocumentStatusRequest, - GetDocumentStatusRequestTypedDict, - ) - from .getdocumenttextcontentop import ( - GetDocumentTextContentRequest, - GetDocumentTextContentRequestTypedDict, - ) - from .getfilesignedurlop import ( - GetFileSignedURLRequest, - GetFileSignedURLRequestTypedDict, + from .getfileresponse import GetFileResponse, GetFileResponseTypedDict + from .getsignedurlresponse import ( + GetSignedURLResponse, + GetSignedURLResponseTypedDict, ) - from .getfinetuningjobop import ( - GetFineTuningJobRequest, - GetFineTuningJobRequestTypedDict, - GetFineTuningJobResponse, - GetFineTuningJobResponseTypedDict, - ) - from .getlibraryop import GetLibraryRequest, GetLibraryRequestTypedDict + from .githubrepository import GithubRepository, GithubRepositoryTypedDict from .githubrepositoryin import GithubRepositoryIn, GithubRepositoryInTypedDict - from .githubrepositoryout import GithubRepositoryOut, GithubRepositoryOutTypedDict - from .httpvalidationerror import HTTPValidationError, HTTPValidationErrorData + from .imagedetail import ImageDetail from .imagegenerationtool import ImageGenerationTool, ImageGenerationToolTypedDict from .imageurl import ImageURL, ImageURLTypedDict from .imageurlchunk import ( ImageURLChunk, - ImageURLChunkType, ImageURLChunkTypedDict, ImageURLUnion, ImageURLUnionTypedDict, ) from .inputentries import InputEntries, InputEntriesTypedDict - from .inputs import ( - Inputs, - InputsMessage, - InputsMessageTypedDict, - InputsTypedDict, - InstructRequestInputs, - InstructRequestInputsTypedDict, - ) + from .inputs import Inputs, InputsTypedDict from .instructrequest import ( InstructRequest, InstructRequestMessage, InstructRequestMessageTypedDict, InstructRequestTypedDict, ) - from .jobin import ( - Hyperparameters, - HyperparametersTypedDict, - JobIn, - JobInIntegration, - JobInIntegrationTypedDict, - JobInRepository, - JobInRepositoryTypedDict, - JobInTypedDict, - ) - from .jobmetadataout import JobMetadataOut, JobMetadataOutTypedDict - from .jobsout import JobsOut, JobsOutData, JobsOutDataTypedDict, JobsOutTypedDict - from .jsonschema import JSONSchema, JSONSchemaTypedDict - from .legacyjobmetadataout import ( - LegacyJobMetadataOut, - LegacyJobMetadataOutTypedDict, - ) - from .libraryin import LibraryIn, LibraryInTypedDict - from .libraryinupdate import LibraryInUpdate, LibraryInUpdateTypedDict - from .libraryout import LibraryOut, LibraryOutTypedDict - from .listagentaliasesop import ( - ListAgentAliasesRequest, - ListAgentAliasesRequestTypedDict, - ) - from .listagentsop import ListAgentsRequest, ListAgentsRequestTypedDict - from .listagentversionsop import ( - ListAgentVersionsRequest, - ListAgentVersionsRequestTypedDict, - ) - from .listbatchjobsop import ( - ListBatchJobsRequest, - ListBatchJobsRequestTypedDict, + from .jobmetadata import JobMetadata, JobMetadataTypedDict + from .jobs_api_routes_batch_cancel_batch_jobop import ( + JobsAPIRoutesBatchCancelBatchJobRequest, + JobsAPIRoutesBatchCancelBatchJobRequestTypedDict, + ) + from .jobs_api_routes_batch_get_batch_jobop import ( + JobsAPIRoutesBatchGetBatchJobRequest, + JobsAPIRoutesBatchGetBatchJobRequestTypedDict, + ) + from .jobs_api_routes_batch_get_batch_jobsop import ( + JobsAPIRoutesBatchGetBatchJobsRequest, + JobsAPIRoutesBatchGetBatchJobsRequestTypedDict, OrderBy, ) - from .listconversationsop import ( - ListConversationsRequest, - ListConversationsRequestTypedDict, - ListConversationsResponse, - ListConversationsResponseTypedDict, - ) - from .listdocumentout import ListDocumentOut, ListDocumentOutTypedDict - from .listdocumentsop import ListDocumentsRequest, ListDocumentsRequestTypedDict - from .listfilesop import ListFilesRequest, ListFilesRequestTypedDict - from .listfilesout import ListFilesOut, ListFilesOutTypedDict - from .listfinetuningjobsop import ( - ListFineTuningJobsRequest, - ListFineTuningJobsRequestTypedDict, - ListFineTuningJobsStatus, - ) - from .listlibraryaccessesop import ( - ListLibraryAccessesRequest, - ListLibraryAccessesRequestTypedDict, - ) - from .listlibraryout import ListLibraryOut, ListLibraryOutTypedDict + from .jobs_api_routes_fine_tuning_archive_fine_tuned_modelop import ( + JobsAPIRoutesFineTuningArchiveFineTunedModelRequest, + JobsAPIRoutesFineTuningArchiveFineTunedModelRequestTypedDict, + ) + from .jobs_api_routes_fine_tuning_cancel_fine_tuning_jobop import ( + JobsAPIRoutesFineTuningCancelFineTuningJobRequest, + JobsAPIRoutesFineTuningCancelFineTuningJobRequestTypedDict, + JobsAPIRoutesFineTuningCancelFineTuningJobResponse, + JobsAPIRoutesFineTuningCancelFineTuningJobResponseTypedDict, + UnknownJobsAPIRoutesFineTuningCancelFineTuningJobResponse, + ) + from .jobs_api_routes_fine_tuning_create_fine_tuning_jobop import ( + JobsAPIRoutesFineTuningCreateFineTuningJobResponse, + JobsAPIRoutesFineTuningCreateFineTuningJobResponseTypedDict, + Response, + ResponseTypedDict, + UnknownResponse, + ) + from .jobs_api_routes_fine_tuning_get_fine_tuning_jobop import ( + JobsAPIRoutesFineTuningGetFineTuningJobRequest, + JobsAPIRoutesFineTuningGetFineTuningJobRequestTypedDict, + JobsAPIRoutesFineTuningGetFineTuningJobResponse, + JobsAPIRoutesFineTuningGetFineTuningJobResponseTypedDict, + UnknownJobsAPIRoutesFineTuningGetFineTuningJobResponse, + ) + from .jobs_api_routes_fine_tuning_get_fine_tuning_jobsop import ( + JobsAPIRoutesFineTuningGetFineTuningJobsRequest, + JobsAPIRoutesFineTuningGetFineTuningJobsRequestTypedDict, + JobsAPIRoutesFineTuningGetFineTuningJobsStatus, + ) + from .jobs_api_routes_fine_tuning_start_fine_tuning_jobop import ( + JobsAPIRoutesFineTuningStartFineTuningJobRequest, + JobsAPIRoutesFineTuningStartFineTuningJobRequestTypedDict, + JobsAPIRoutesFineTuningStartFineTuningJobResponse, + JobsAPIRoutesFineTuningStartFineTuningJobResponseTypedDict, + UnknownJobsAPIRoutesFineTuningStartFineTuningJobResponse, + ) + from .jobs_api_routes_fine_tuning_unarchive_fine_tuned_modelop import ( + JobsAPIRoutesFineTuningUnarchiveFineTunedModelRequest, + JobsAPIRoutesFineTuningUnarchiveFineTunedModelRequestTypedDict, + ) + from .jobs_api_routes_fine_tuning_update_fine_tuned_modelop import ( + JobsAPIRoutesFineTuningUpdateFineTunedModelRequest, + JobsAPIRoutesFineTuningUpdateFineTunedModelRequestTypedDict, + JobsAPIRoutesFineTuningUpdateFineTunedModelResponse, + JobsAPIRoutesFineTuningUpdateFineTunedModelResponseTypedDict, + UnknownJobsAPIRoutesFineTuningUpdateFineTunedModelResponse, + ) + from .jsonschema import JSONSchema, JSONSchemaTypedDict + from .legacyjobmetadata import LegacyJobMetadata, LegacyJobMetadataTypedDict + from .libraries_delete_v1op import ( + LibrariesDeleteV1Request, + LibrariesDeleteV1RequestTypedDict, + ) + from .libraries_documents_delete_v1op import ( + LibrariesDocumentsDeleteV1Request, + LibrariesDocumentsDeleteV1RequestTypedDict, + ) + from .libraries_documents_get_extracted_text_signed_url_v1op import ( + LibrariesDocumentsGetExtractedTextSignedURLV1Request, + LibrariesDocumentsGetExtractedTextSignedURLV1RequestTypedDict, + ) + from .libraries_documents_get_signed_url_v1op import ( + LibrariesDocumentsGetSignedURLV1Request, + LibrariesDocumentsGetSignedURLV1RequestTypedDict, + ) + from .libraries_documents_get_status_v1op import ( + LibrariesDocumentsGetStatusV1Request, + LibrariesDocumentsGetStatusV1RequestTypedDict, + ) + from .libraries_documents_get_text_content_v1op import ( + LibrariesDocumentsGetTextContentV1Request, + LibrariesDocumentsGetTextContentV1RequestTypedDict, + ) + from .libraries_documents_get_v1op import ( + LibrariesDocumentsGetV1Request, + LibrariesDocumentsGetV1RequestTypedDict, + ) + from .libraries_documents_list_v1op import ( + LibrariesDocumentsListV1Request, + LibrariesDocumentsListV1RequestTypedDict, + ) + from .libraries_documents_reprocess_v1op import ( + LibrariesDocumentsReprocessV1Request, + LibrariesDocumentsReprocessV1RequestTypedDict, + ) + from .libraries_documents_update_v1op import ( + LibrariesDocumentsUpdateV1Request, + LibrariesDocumentsUpdateV1RequestTypedDict, + ) + from .libraries_documents_upload_v1op import ( + DocumentUpload, + DocumentUploadTypedDict, + LibrariesDocumentsUploadV1Request, + LibrariesDocumentsUploadV1RequestTypedDict, + ) + from .libraries_get_v1op import ( + LibrariesGetV1Request, + LibrariesGetV1RequestTypedDict, + ) + from .libraries_share_create_v1op import ( + LibrariesShareCreateV1Request, + LibrariesShareCreateV1RequestTypedDict, + ) + from .libraries_share_delete_v1op import ( + LibrariesShareDeleteV1Request, + LibrariesShareDeleteV1RequestTypedDict, + ) + from .libraries_share_list_v1op import ( + LibrariesShareListV1Request, + LibrariesShareListV1RequestTypedDict, + ) + from .libraries_update_v1op import ( + LibrariesUpdateV1Request, + LibrariesUpdateV1RequestTypedDict, + ) + from .library import Library, LibraryTypedDict + from .listbatchjobsresponse import ( + ListBatchJobsResponse, + ListBatchJobsResponseTypedDict, + ) + from .listdocumentsresponse import ( + ListDocumentsResponse, + ListDocumentsResponseTypedDict, + ) + from .listfilesresponse import ListFilesResponse, ListFilesResponseTypedDict + from .listfinetuningjobsresponse import ( + ListFineTuningJobsResponse, + ListFineTuningJobsResponseData, + ListFineTuningJobsResponseDataTypedDict, + ListFineTuningJobsResponseTypedDict, + UnknownListFineTuningJobsResponseData, + ) + from .listlibrariesresponse import ( + ListLibrariesResponse, + ListLibrariesResponseTypedDict, + ) from .listsharingout import ListSharingOut, ListSharingOutTypedDict from .messageentries import MessageEntries, MessageEntriesTypedDict from .messageinputcontentchunks import ( @@ -569,10 +679,8 @@ MessageInputEntry, MessageInputEntryContent, MessageInputEntryContentTypedDict, - MessageInputEntryObject, - MessageInputEntryRole, - MessageInputEntryType, MessageInputEntryTypedDict, + Role, ) from .messageoutputcontentchunks import ( MessageOutputContentChunks, @@ -582,43 +690,39 @@ MessageOutputEntry, MessageOutputEntryContent, MessageOutputEntryContentTypedDict, - MessageOutputEntryObject, - MessageOutputEntryRole, - MessageOutputEntryType, MessageOutputEntryTypedDict, ) from .messageoutputevent import ( MessageOutputEvent, MessageOutputEventContent, MessageOutputEventContentTypedDict, - MessageOutputEventRole, MessageOutputEventTypedDict, ) - from .metricout import MetricOut, MetricOutTypedDict + from .metric import Metric, MetricTypedDict from .mistralpromptmode import MistralPromptMode from .modelcapabilities import ModelCapabilities, ModelCapabilitiesTypedDict from .modelconversation import ( ModelConversation, - ModelConversationObject, ModelConversationTool, ModelConversationToolTypedDict, ModelConversationTypedDict, + UnknownModelConversationTool, ) from .modellist import ( ModelList, ModelListData, ModelListDataTypedDict, ModelListTypedDict, + UnknownModelListData, ) from .moderationobject import ModerationObject, ModerationObjectTypedDict from .moderationresponse import ModerationResponse, ModerationResponseTypedDict - from .no_response_error import NoResponseError from .ocrimageobject import OCRImageObject, OCRImageObjectTypedDict from .ocrpagedimensions import OCRPageDimensions, OCRPageDimensionsTypedDict from .ocrpageobject import OCRPageObject, OCRPageObjectTypedDict from .ocrrequest import ( - Document, - DocumentTypedDict, + DocumentUnion, + DocumentUnionTypedDict, OCRRequest, OCRRequestTypedDict, TableFormat, @@ -640,6 +744,18 @@ RealtimeTranscriptionErrorDetailMessageTypedDict, RealtimeTranscriptionErrorDetailTypedDict, ) + from .realtimetranscriptioninputaudioappend import ( + RealtimeTranscriptionInputAudioAppend, + RealtimeTranscriptionInputAudioAppendTypedDict, + ) + from .realtimetranscriptioninputaudioend import ( + RealtimeTranscriptionInputAudioEnd, + RealtimeTranscriptionInputAudioEndTypedDict, + ) + from .realtimetranscriptioninputaudioflush import ( + RealtimeTranscriptionInputAudioFlush, + RealtimeTranscriptionInputAudioFlushTypedDict, + ) from .realtimetranscriptionsession import ( RealtimeTranscriptionSession, RealtimeTranscriptionSessionTypedDict, @@ -652,15 +768,15 @@ RealtimeTranscriptionSessionUpdated, RealtimeTranscriptionSessionUpdatedTypedDict, ) - from .referencechunk import ( - ReferenceChunk, - ReferenceChunkType, - ReferenceChunkTypedDict, + from .realtimetranscriptionsessionupdatemessage import ( + RealtimeTranscriptionSessionUpdateMessage, + RealtimeTranscriptionSessionUpdateMessageTypedDict, ) - from .reprocessdocumentop import ( - ReprocessDocumentRequest, - ReprocessDocumentRequestTypedDict, + from .realtimetranscriptionsessionupdatepayload import ( + RealtimeTranscriptionSessionUpdatePayload, + RealtimeTranscriptionSessionUpdatePayloadTypedDict, ) + from .referencechunk import ReferenceChunk, ReferenceChunkTypedDict from .requestsource import RequestSource from .responsedoneevent import ResponseDoneEvent, ResponseDoneEventTypedDict from .responseerrorevent import ResponseErrorEvent, ResponseErrorEventTypedDict @@ -670,25 +786,14 @@ ResponseStartedEvent, ResponseStartedEventTypedDict, ) - from .responsevalidationerror import ResponseValidationError - from .restartconversationop import ( - RestartConversationRequest, - RestartConversationRequestTypedDict, - ) - from .restartconversationstreamop import ( - RestartConversationStreamRequest, - RestartConversationStreamRequestTypedDict, - ) - from .retrievefileop import RetrieveFileRequest, RetrieveFileRequestTypedDict - from .retrievefileout import RetrieveFileOut, RetrieveFileOutTypedDict - from .retrievemodelop import ( + from .retrieve_model_v1_models_model_id_getop import ( ResponseRetrieveModelV1ModelsModelIDGet, ResponseRetrieveModelV1ModelsModelIDGetTypedDict, - RetrieveModelRequest, - RetrieveModelRequestTypedDict, + RetrieveModelV1ModelsModelIDGetRequest, + RetrieveModelV1ModelsModelIDGetRequestTypedDict, + UnknownResponseRetrieveModelV1ModelsModelIDGet, ) from .sampletype import SampleType - from .sdkerror import SDKError from .security import Security, SecurityTypedDict from .shareenum import ShareEnum from .sharingdelete import SharingDelete, SharingDeleteTypedDict @@ -696,12 +801,6 @@ from .sharingout import SharingOut, SharingOutTypedDict from .source import Source from .ssetypes import SSETypes - from .startfinetuningjobop import ( - StartFineTuningJobRequest, - StartFineTuningJobRequestTypedDict, - StartFineTuningJobResponse, - StartFineTuningJobResponseTypedDict, - ) from .systemmessage import ( SystemMessage, SystemMessageContent, @@ -712,19 +811,24 @@ SystemMessageContentChunks, SystemMessageContentChunksTypedDict, ) - from .textchunk import TextChunk, TextChunkType, TextChunkTypedDict + from .textchunk import TextChunk, TextChunkTypedDict from .thinkchunk import ( ThinkChunk, - ThinkChunkType, + ThinkChunkThinking, + ThinkChunkThinkingTypedDict, ThinkChunkTypedDict, - Thinking, - ThinkingTypedDict, ) from .timestampgranularity import TimestampGranularity from .tool import Tool, ToolTypedDict from .toolcall import ToolCall, ToolCallTypedDict + from .toolcallconfirmation import ( + Confirmation, + ToolCallConfirmation, + ToolCallConfirmationTypedDict, + ) from .toolchoice import ToolChoice, ToolChoiceTypedDict from .toolchoiceenum import ToolChoiceEnum + from .toolconfiguration import ToolConfiguration, ToolConfigurationTypedDict from .toolexecutiondeltaevent import ( ToolExecutionDeltaEvent, ToolExecutionDeltaEventName, @@ -741,8 +845,6 @@ ToolExecutionEntry, ToolExecutionEntryName, ToolExecutionEntryNameTypedDict, - ToolExecutionEntryObject, - ToolExecutionEntryType, ToolExecutionEntryTypedDict, ) from .toolexecutionstartedevent import ( @@ -755,7 +857,6 @@ ToolFileChunk, ToolFileChunkTool, ToolFileChunkToolTypedDict, - ToolFileChunkType, ToolFileChunkTypedDict, ) from .toolmessage import ( @@ -768,7 +869,6 @@ ToolReferenceChunk, ToolReferenceChunkTool, ToolReferenceChunkToolTypedDict, - ToolReferenceChunkType, ToolReferenceChunkTypedDict, ) from .tooltypes import ToolTypes @@ -779,7 +879,6 @@ ) from .transcriptionsegmentchunk import ( TranscriptionSegmentChunk, - TranscriptionSegmentChunkType, TranscriptionSegmentChunkTypedDict, ) from .transcriptionstreamdone import ( @@ -791,6 +890,7 @@ TranscriptionStreamEventsData, TranscriptionStreamEventsDataTypedDict, TranscriptionStreamEventsTypedDict, + UnknownTranscriptionStreamEventsData, ) from .transcriptionstreameventtypes import TranscriptionStreamEventTypes from .transcriptionstreamlanguage import ( @@ -805,34 +905,27 @@ TranscriptionStreamTextDelta, TranscriptionStreamTextDeltaTypedDict, ) - from .unarchiveftmodelout import UnarchiveFTModelOut, UnarchiveFTModelOutTypedDict - from .unarchivemodelop import UnarchiveModelRequest, UnarchiveModelRequestTypedDict - from .updateagentop import UpdateAgentRequest, UpdateAgentRequestTypedDict - from .updateagentversionop import ( - UpdateAgentVersionRequest, - UpdateAgentVersionRequestTypedDict, - ) - from .updatedocumentop import UpdateDocumentRequest, UpdateDocumentRequestTypedDict - from .updateftmodelin import UpdateFTModelIn, UpdateFTModelInTypedDict - from .updatelibraryop import UpdateLibraryRequest, UpdateLibraryRequestTypedDict - from .updatemodelop import ( - UpdateModelRequest, - UpdateModelRequestTypedDict, - UpdateModelResponse, - UpdateModelResponseTypedDict, - ) - from .updateorcreatelibraryaccessop import ( - UpdateOrCreateLibraryAccessRequest, - UpdateOrCreateLibraryAccessRequestTypedDict, - ) - from .uploaddocumentop import ( - DocumentUpload, - DocumentUploadTypedDict, - UploadDocumentRequest, - UploadDocumentRequestTypedDict, + from .unarchivemodelresponse import ( + UnarchiveModelResponse, + UnarchiveModelResponseTypedDict, + ) + from .updateagentrequest import ( + UpdateAgentRequest, + UpdateAgentRequestTool, + UpdateAgentRequestToolTypedDict, + UpdateAgentRequestTypedDict, + ) + from .updatedocumentrequest import ( + Attributes, + AttributesTypedDict, + UpdateDocumentRequest, + UpdateDocumentRequestTypedDict, ) - from .uploadfileop import MultiPartBodyParams, MultiPartBodyParamsTypedDict - from .uploadfileout import UploadFileOut, UploadFileOutTypedDict + from .updatelibraryrequest import ( + UpdateLibraryRequest, + UpdateLibraryRequestTypedDict, + ) + from .updatemodelrequest import UpdateModelRequest, UpdateModelRequestTypedDict from .usageinfo import UsageInfo, UsageInfoTypedDict from .usermessage import ( UserMessage, @@ -847,7 +940,10 @@ ValidationErrorTypedDict, ) from .wandbintegration import WandbIntegration, WandbIntegrationTypedDict - from .wandbintegrationout import WandbIntegrationOut, WandbIntegrationOutTypedDict + from .wandbintegrationresult import ( + WandbIntegrationResult, + WandbIntegrationResultTypedDict, + ) from .websearchpremiumtool import ( WebSearchPremiumTool, WebSearchPremiumToolTypedDict, @@ -862,28 +958,58 @@ "AgentConversation", "AgentConversationAgentVersion", "AgentConversationAgentVersionTypedDict", - "AgentConversationObject", "AgentConversationTypedDict", - "AgentCreationRequest", - "AgentCreationRequestTool", - "AgentCreationRequestToolTypedDict", - "AgentCreationRequestTypedDict", "AgentHandoffDoneEvent", "AgentHandoffDoneEventTypedDict", "AgentHandoffEntry", - "AgentHandoffEntryObject", - "AgentHandoffEntryType", "AgentHandoffEntryTypedDict", "AgentHandoffStartedEvent", "AgentHandoffStartedEventTypedDict", - "AgentObject", "AgentTool", "AgentToolTypedDict", "AgentTypedDict", - "AgentUpdateRequest", - "AgentUpdateRequestTool", - "AgentUpdateRequestToolTypedDict", - "AgentUpdateRequestTypedDict", + "AgentsAPIV1AgentsCreateOrUpdateAliasRequest", + "AgentsAPIV1AgentsCreateOrUpdateAliasRequestTypedDict", + "AgentsAPIV1AgentsDeleteAliasRequest", + "AgentsAPIV1AgentsDeleteAliasRequestTypedDict", + "AgentsAPIV1AgentsDeleteRequest", + "AgentsAPIV1AgentsDeleteRequestTypedDict", + "AgentsAPIV1AgentsGetAgentVersion", + "AgentsAPIV1AgentsGetAgentVersionTypedDict", + "AgentsAPIV1AgentsGetRequest", + "AgentsAPIV1AgentsGetRequestTypedDict", + "AgentsAPIV1AgentsGetVersionRequest", + "AgentsAPIV1AgentsGetVersionRequestTypedDict", + "AgentsAPIV1AgentsListRequest", + "AgentsAPIV1AgentsListRequestTypedDict", + "AgentsAPIV1AgentsListVersionAliasesRequest", + "AgentsAPIV1AgentsListVersionAliasesRequestTypedDict", + "AgentsAPIV1AgentsListVersionsRequest", + "AgentsAPIV1AgentsListVersionsRequestTypedDict", + "AgentsAPIV1AgentsUpdateRequest", + "AgentsAPIV1AgentsUpdateRequestTypedDict", + "AgentsAPIV1AgentsUpdateVersionRequest", + "AgentsAPIV1AgentsUpdateVersionRequestTypedDict", + "AgentsAPIV1ConversationsAppendRequest", + "AgentsAPIV1ConversationsAppendRequestTypedDict", + "AgentsAPIV1ConversationsAppendStreamRequest", + "AgentsAPIV1ConversationsAppendStreamRequestTypedDict", + "AgentsAPIV1ConversationsDeleteRequest", + "AgentsAPIV1ConversationsDeleteRequestTypedDict", + "AgentsAPIV1ConversationsGetRequest", + "AgentsAPIV1ConversationsGetRequestTypedDict", + "AgentsAPIV1ConversationsHistoryRequest", + "AgentsAPIV1ConversationsHistoryRequestTypedDict", + "AgentsAPIV1ConversationsListRequest", + "AgentsAPIV1ConversationsListRequestTypedDict", + "AgentsAPIV1ConversationsListResponse", + "AgentsAPIV1ConversationsListResponseTypedDict", + "AgentsAPIV1ConversationsMessagesRequest", + "AgentsAPIV1ConversationsMessagesRequestTypedDict", + "AgentsAPIV1ConversationsRestartRequest", + "AgentsAPIV1ConversationsRestartRequestTypedDict", + "AgentsAPIV1ConversationsRestartStreamRequest", + "AgentsAPIV1ConversationsRestartStreamRequestTypedDict", "AgentsCompletionRequest", "AgentsCompletionRequestMessage", "AgentsCompletionRequestMessageTypedDict", @@ -900,20 +1026,13 @@ "AgentsCompletionStreamRequestToolChoice", "AgentsCompletionStreamRequestToolChoiceTypedDict", "AgentsCompletionStreamRequestTypedDict", - "AppendConversationRequest", - "AppendConversationRequestTypedDict", - "AppendConversationStreamRequest", - "AppendConversationStreamRequestTypedDict", - "ArchiveFTModelOut", - "ArchiveFTModelOutTypedDict", - "ArchiveModelRequest", - "ArchiveModelRequestTypedDict", + "ArchiveModelResponse", + "ArchiveModelResponseTypedDict", "Arguments", "ArgumentsTypedDict", "AssistantMessage", "AssistantMessageContent", "AssistantMessageContentTypedDict", - "AssistantMessageRole", "AssistantMessageTypedDict", "Attributes", "AttributesTypedDict", @@ -930,22 +1049,12 @@ "BaseModelCardTypedDict", "BatchError", "BatchErrorTypedDict", - "BatchJobIn", - "BatchJobInTypedDict", - "BatchJobOut", - "BatchJobOutTypedDict", + "BatchJob", "BatchJobStatus", - "BatchJobsOut", - "BatchJobsOutTypedDict", + "BatchJobTypedDict", "BatchRequest", "BatchRequestTypedDict", "BuiltInConnectors", - "CancelBatchJobRequest", - "CancelBatchJobRequestTypedDict", - "CancelFineTuningJobRequest", - "CancelFineTuningJobRequestTypedDict", - "CancelFineTuningJobResponse", - "CancelFineTuningJobResponseTypedDict", "ChatClassificationRequest", "ChatClassificationRequestTypedDict", "ChatCompletionChoice", @@ -977,8 +1086,8 @@ "ChatModerationRequestInputs3", "ChatModerationRequestInputs3TypedDict", "ChatModerationRequestTypedDict", - "CheckpointOut", - "CheckpointOutTypedDict", + "Checkpoint", + "CheckpointTypedDict", "ClassificationRequest", "ClassificationRequestInputs", "ClassificationRequestInputsTypedDict", @@ -987,25 +1096,23 @@ "ClassificationResponseTypedDict", "ClassificationTargetResult", "ClassificationTargetResultTypedDict", - "ClassifierDetailedJobOut", - "ClassifierDetailedJobOutIntegration", - "ClassifierDetailedJobOutIntegrationTypedDict", - "ClassifierDetailedJobOutStatus", - "ClassifierDetailedJobOutTypedDict", - "ClassifierFTModelOut", - "ClassifierFTModelOutTypedDict", - "ClassifierJobOut", - "ClassifierJobOutIntegration", - "ClassifierJobOutIntegrationTypedDict", - "ClassifierJobOutStatus", - "ClassifierJobOutTypedDict", - "ClassifierTargetIn", - "ClassifierTargetInTypedDict", - "ClassifierTargetOut", - "ClassifierTargetOutTypedDict", + "ClassifierFineTunedModel", + "ClassifierFineTunedModelTypedDict", + "ClassifierFineTuningJob", + "ClassifierFineTuningJobDetails", + "ClassifierFineTuningJobDetailsIntegration", + "ClassifierFineTuningJobDetailsIntegrationTypedDict", + "ClassifierFineTuningJobDetailsStatus", + "ClassifierFineTuningJobDetailsTypedDict", + "ClassifierFineTuningJobIntegration", + "ClassifierFineTuningJobIntegrationTypedDict", + "ClassifierFineTuningJobStatus", + "ClassifierFineTuningJobTypedDict", + "ClassifierTarget", + "ClassifierTargetResult", + "ClassifierTargetResultTypedDict", + "ClassifierTargetTypedDict", "ClassifierTrainingParameters", - "ClassifierTrainingParametersIn", - "ClassifierTrainingParametersInTypedDict", "ClassifierTrainingParametersTypedDict", "CodeInterpreterTool", "CodeInterpreterToolTypedDict", @@ -1015,31 +1122,30 @@ "CompletionArgsTypedDict", "CompletionChunk", "CompletionChunkTypedDict", - "CompletionDetailedJobOut", - "CompletionDetailedJobOutIntegration", - "CompletionDetailedJobOutIntegrationTypedDict", - "CompletionDetailedJobOutRepository", - "CompletionDetailedJobOutRepositoryTypedDict", - "CompletionDetailedJobOutStatus", - "CompletionDetailedJobOutTypedDict", "CompletionEvent", "CompletionEventTypedDict", - "CompletionFTModelOut", - "CompletionFTModelOutTypedDict", - "CompletionJobOut", - "CompletionJobOutIntegration", - "CompletionJobOutIntegrationTypedDict", - "CompletionJobOutRepository", - "CompletionJobOutRepositoryTypedDict", - "CompletionJobOutStatus", - "CompletionJobOutTypedDict", + "CompletionFineTunedModel", + "CompletionFineTunedModelTypedDict", + "CompletionFineTuningJob", + "CompletionFineTuningJobDetails", + "CompletionFineTuningJobDetailsIntegration", + "CompletionFineTuningJobDetailsIntegrationTypedDict", + "CompletionFineTuningJobDetailsRepository", + "CompletionFineTuningJobDetailsRepositoryTypedDict", + "CompletionFineTuningJobDetailsStatus", + "CompletionFineTuningJobDetailsTypedDict", + "CompletionFineTuningJobIntegration", + "CompletionFineTuningJobIntegrationTypedDict", + "CompletionFineTuningJobRepository", + "CompletionFineTuningJobRepositoryTypedDict", + "CompletionFineTuningJobStatus", + "CompletionFineTuningJobTypedDict", "CompletionResponseStreamChoice", "CompletionResponseStreamChoiceFinishReason", "CompletionResponseStreamChoiceTypedDict", "CompletionTrainingParameters", - "CompletionTrainingParametersIn", - "CompletionTrainingParametersInTypedDict", "CompletionTrainingParametersTypedDict", + "Confirmation", "ContentChunk", "ContentChunkTypedDict", "ConversationAppendRequest", @@ -1053,12 +1159,10 @@ "ConversationEventsDataTypedDict", "ConversationEventsTypedDict", "ConversationHistory", - "ConversationHistoryObject", "ConversationHistoryTypedDict", "ConversationInputs", "ConversationInputsTypedDict", "ConversationMessages", - "ConversationMessagesObject", "ConversationMessagesTypedDict", "ConversationRequest", "ConversationRequestAgentVersion", @@ -1068,7 +1172,6 @@ "ConversationRequestToolTypedDict", "ConversationRequestTypedDict", "ConversationResponse", - "ConversationResponseObject", "ConversationResponseTypedDict", "ConversationRestartRequest", "ConversationRestartRequestAgentVersion", @@ -1087,32 +1190,34 @@ "ConversationStreamRequestTool", "ConversationStreamRequestToolTypedDict", "ConversationStreamRequestTypedDict", + "ConversationThinkChunk", + "ConversationThinkChunkThinking", + "ConversationThinkChunkThinkingTypedDict", + "ConversationThinkChunkTypedDict", "ConversationUsageInfo", "ConversationUsageInfoTypedDict", - "CreateFineTuningJobResponse", - "CreateFineTuningJobResponseTypedDict", - "CreateOrUpdateAgentAliasRequest", - "CreateOrUpdateAgentAliasRequestTypedDict", - "DeleteAgentAliasRequest", - "DeleteAgentAliasRequestTypedDict", - "DeleteAgentRequest", - "DeleteAgentRequestTypedDict", - "DeleteConversationRequest", - "DeleteConversationRequestTypedDict", - "DeleteDocumentRequest", - "DeleteDocumentRequestTypedDict", - "DeleteFileOut", - "DeleteFileOutTypedDict", - "DeleteFileRequest", - "DeleteFileRequestTypedDict", - "DeleteLibraryAccessRequest", - "DeleteLibraryAccessRequestTypedDict", - "DeleteLibraryRequest", - "DeleteLibraryRequestTypedDict", + "CreateAgentRequest", + "CreateAgentRequestTool", + "CreateAgentRequestToolTypedDict", + "CreateAgentRequestTypedDict", + "CreateBatchJobRequest", + "CreateBatchJobRequestTypedDict", + "CreateFileResponse", + "CreateFileResponseTypedDict", + "CreateFineTuningJobRequest", + "CreateFineTuningJobRequestIntegration", + "CreateFineTuningJobRequestIntegrationTypedDict", + "CreateFineTuningJobRequestRepository", + "CreateFineTuningJobRequestRepositoryTypedDict", + "CreateFineTuningJobRequestTypedDict", + "CreateLibraryRequest", + "CreateLibraryRequestTypedDict", + "DeleteFileResponse", + "DeleteFileResponseTypedDict", "DeleteModelOut", "DeleteModelOutTypedDict", - "DeleteModelRequest", - "DeleteModelRequestTypedDict", + "DeleteModelV1ModelsModelIDDeleteRequest", + "DeleteModelV1ModelsModelIDDeleteRequestTypedDict", "DeltaMessage", "DeltaMessageContent", "DeltaMessageContentTypedDict", @@ -1120,20 +1225,15 @@ "Document", "DocumentLibraryTool", "DocumentLibraryToolTypedDict", - "DocumentOut", - "DocumentOutTypedDict", "DocumentTextContent", "DocumentTextContentTypedDict", "DocumentTypedDict", "DocumentURLChunk", - "DocumentURLChunkType", "DocumentURLChunkTypedDict", - "DocumentUpdateIn", - "DocumentUpdateInTypedDict", + "DocumentUnion", + "DocumentUnionTypedDict", "DocumentUpload", "DocumentUploadTypedDict", - "DownloadFileRequest", - "DownloadFileRequestTypedDict", "EmbeddingDtype", "EmbeddingRequest", "EmbeddingRequestInputs", @@ -1147,8 +1247,8 @@ "EntityType", "Entry", "EntryTypedDict", - "EventOut", - "EventOutTypedDict", + "Event", + "EventTypedDict", "FIMCompletionRequest", "FIMCompletionRequestStop", "FIMCompletionRequestStopTypedDict", @@ -1160,8 +1260,6 @@ "FIMCompletionStreamRequestStopTypedDict", "FIMCompletionStreamRequestTypedDict", "FTClassifierLossFunction", - "FTModelCapabilitiesOut", - "FTModelCapabilitiesOutTypedDict", "FTModelCard", "FTModelCardTypedDict", "File", @@ -1170,76 +1268,54 @@ "FilePurpose", "FileSchema", "FileSchemaTypedDict", - "FileSignedURL", - "FileSignedURLTypedDict", "FileTypedDict", + "FilesAPIRoutesDeleteFileRequest", + "FilesAPIRoutesDeleteFileRequestTypedDict", + "FilesAPIRoutesDownloadFileRequest", + "FilesAPIRoutesDownloadFileRequestTypedDict", + "FilesAPIRoutesGetSignedURLRequest", + "FilesAPIRoutesGetSignedURLRequestTypedDict", + "FilesAPIRoutesListFilesRequest", + "FilesAPIRoutesListFilesRequestTypedDict", + "FilesAPIRoutesRetrieveFileRequest", + "FilesAPIRoutesRetrieveFileRequestTypedDict", "FineTuneableModelType", + "FineTunedModelCapabilities", + "FineTunedModelCapabilitiesTypedDict", "Format", "Function", "FunctionCall", "FunctionCallEntry", "FunctionCallEntryArguments", "FunctionCallEntryArgumentsTypedDict", - "FunctionCallEntryObject", - "FunctionCallEntryType", + "FunctionCallEntryConfirmationStatus", "FunctionCallEntryTypedDict", "FunctionCallEvent", + "FunctionCallEventConfirmationStatus", "FunctionCallEventTypedDict", "FunctionCallTypedDict", "FunctionName", "FunctionNameTypedDict", "FunctionResultEntry", - "FunctionResultEntryObject", - "FunctionResultEntryType", "FunctionResultEntryTypedDict", "FunctionTool", "FunctionToolTypedDict", "FunctionTypedDict", - "GetAgentAgentVersion", - "GetAgentAgentVersionTypedDict", - "GetAgentRequest", - "GetAgentRequestTypedDict", - "GetAgentVersionRequest", - "GetAgentVersionRequestTypedDict", - "GetBatchJobRequest", - "GetBatchJobRequestTypedDict", - "GetConversationHistoryRequest", - "GetConversationHistoryRequestTypedDict", - "GetConversationMessagesRequest", - "GetConversationMessagesRequestTypedDict", - "GetConversationRequest", - "GetConversationRequestTypedDict", - "GetDocumentExtractedTextSignedURLRequest", - "GetDocumentExtractedTextSignedURLRequestTypedDict", - "GetDocumentRequest", - "GetDocumentRequestTypedDict", - "GetDocumentSignedURLRequest", - "GetDocumentSignedURLRequestTypedDict", - "GetDocumentStatusRequest", - "GetDocumentStatusRequestTypedDict", - "GetDocumentTextContentRequest", - "GetDocumentTextContentRequestTypedDict", - "GetFileSignedURLRequest", - "GetFileSignedURLRequestTypedDict", - "GetFineTuningJobRequest", - "GetFineTuningJobRequestTypedDict", - "GetFineTuningJobResponse", - "GetFineTuningJobResponseTypedDict", - "GetLibraryRequest", - "GetLibraryRequestTypedDict", + "GetFileResponse", + "GetFileResponseTypedDict", + "GetSignedURLResponse", + "GetSignedURLResponseTypedDict", + "GithubRepository", "GithubRepositoryIn", "GithubRepositoryInTypedDict", - "GithubRepositoryOut", - "GithubRepositoryOutTypedDict", - "HTTPValidationError", - "HTTPValidationErrorData", + "GithubRepositoryTypedDict", "Hyperparameters", "HyperparametersTypedDict", + "ImageDetail", "ImageGenerationTool", "ImageGenerationToolTypedDict", "ImageURL", "ImageURLChunk", - "ImageURLChunkType", "ImageURLChunkTypedDict", "ImageURLTypedDict", "ImageURLUnion", @@ -1247,64 +1323,94 @@ "InputEntries", "InputEntriesTypedDict", "Inputs", - "InputsMessage", - "InputsMessageTypedDict", "InputsTypedDict", "InstructRequest", - "InstructRequestInputs", - "InstructRequestInputsTypedDict", "InstructRequestMessage", "InstructRequestMessageTypedDict", "InstructRequestTypedDict", "JSONSchema", "JSONSchemaTypedDict", - "JobIn", - "JobInIntegration", - "JobInIntegrationTypedDict", - "JobInRepository", - "JobInRepositoryTypedDict", - "JobInTypedDict", - "JobMetadataOut", - "JobMetadataOutTypedDict", - "JobsOut", - "JobsOutData", - "JobsOutDataTypedDict", - "JobsOutTypedDict", - "LegacyJobMetadataOut", - "LegacyJobMetadataOutTypedDict", - "LibraryIn", - "LibraryInTypedDict", - "LibraryInUpdate", - "LibraryInUpdateTypedDict", - "LibraryOut", - "LibraryOutTypedDict", - "ListAgentAliasesRequest", - "ListAgentAliasesRequestTypedDict", - "ListAgentVersionsRequest", - "ListAgentVersionsRequestTypedDict", - "ListAgentsRequest", - "ListAgentsRequestTypedDict", - "ListBatchJobsRequest", - "ListBatchJobsRequestTypedDict", - "ListConversationsRequest", - "ListConversationsRequestTypedDict", - "ListConversationsResponse", - "ListConversationsResponseTypedDict", - "ListDocumentOut", - "ListDocumentOutTypedDict", - "ListDocumentsRequest", - "ListDocumentsRequestTypedDict", - "ListFilesOut", - "ListFilesOutTypedDict", - "ListFilesRequest", - "ListFilesRequestTypedDict", - "ListFineTuningJobsRequest", - "ListFineTuningJobsRequestTypedDict", - "ListFineTuningJobsStatus", - "ListLibraryAccessesRequest", - "ListLibraryAccessesRequestTypedDict", - "ListLibraryOut", - "ListLibraryOutTypedDict", + "JobMetadata", + "JobMetadataTypedDict", + "JobsAPIRoutesBatchCancelBatchJobRequest", + "JobsAPIRoutesBatchCancelBatchJobRequestTypedDict", + "JobsAPIRoutesBatchGetBatchJobRequest", + "JobsAPIRoutesBatchGetBatchJobRequestTypedDict", + "JobsAPIRoutesBatchGetBatchJobsRequest", + "JobsAPIRoutesBatchGetBatchJobsRequestTypedDict", + "JobsAPIRoutesFineTuningArchiveFineTunedModelRequest", + "JobsAPIRoutesFineTuningArchiveFineTunedModelRequestTypedDict", + "JobsAPIRoutesFineTuningCancelFineTuningJobRequest", + "JobsAPIRoutesFineTuningCancelFineTuningJobRequestTypedDict", + "JobsAPIRoutesFineTuningCancelFineTuningJobResponse", + "JobsAPIRoutesFineTuningCancelFineTuningJobResponseTypedDict", + "JobsAPIRoutesFineTuningCreateFineTuningJobResponse", + "JobsAPIRoutesFineTuningCreateFineTuningJobResponseTypedDict", + "JobsAPIRoutesFineTuningGetFineTuningJobRequest", + "JobsAPIRoutesFineTuningGetFineTuningJobRequestTypedDict", + "JobsAPIRoutesFineTuningGetFineTuningJobResponse", + "JobsAPIRoutesFineTuningGetFineTuningJobResponseTypedDict", + "JobsAPIRoutesFineTuningGetFineTuningJobsRequest", + "JobsAPIRoutesFineTuningGetFineTuningJobsRequestTypedDict", + "JobsAPIRoutesFineTuningGetFineTuningJobsStatus", + "JobsAPIRoutesFineTuningStartFineTuningJobRequest", + "JobsAPIRoutesFineTuningStartFineTuningJobRequestTypedDict", + "JobsAPIRoutesFineTuningStartFineTuningJobResponse", + "JobsAPIRoutesFineTuningStartFineTuningJobResponseTypedDict", + "JobsAPIRoutesFineTuningUnarchiveFineTunedModelRequest", + "JobsAPIRoutesFineTuningUnarchiveFineTunedModelRequestTypedDict", + "JobsAPIRoutesFineTuningUpdateFineTunedModelRequest", + "JobsAPIRoutesFineTuningUpdateFineTunedModelRequestTypedDict", + "JobsAPIRoutesFineTuningUpdateFineTunedModelResponse", + "JobsAPIRoutesFineTuningUpdateFineTunedModelResponseTypedDict", + "LegacyJobMetadata", + "LegacyJobMetadataTypedDict", + "LibrariesDeleteV1Request", + "LibrariesDeleteV1RequestTypedDict", + "LibrariesDocumentsDeleteV1Request", + "LibrariesDocumentsDeleteV1RequestTypedDict", + "LibrariesDocumentsGetExtractedTextSignedURLV1Request", + "LibrariesDocumentsGetExtractedTextSignedURLV1RequestTypedDict", + "LibrariesDocumentsGetSignedURLV1Request", + "LibrariesDocumentsGetSignedURLV1RequestTypedDict", + "LibrariesDocumentsGetStatusV1Request", + "LibrariesDocumentsGetStatusV1RequestTypedDict", + "LibrariesDocumentsGetTextContentV1Request", + "LibrariesDocumentsGetTextContentV1RequestTypedDict", + "LibrariesDocumentsGetV1Request", + "LibrariesDocumentsGetV1RequestTypedDict", + "LibrariesDocumentsListV1Request", + "LibrariesDocumentsListV1RequestTypedDict", + "LibrariesDocumentsReprocessV1Request", + "LibrariesDocumentsReprocessV1RequestTypedDict", + "LibrariesDocumentsUpdateV1Request", + "LibrariesDocumentsUpdateV1RequestTypedDict", + "LibrariesDocumentsUploadV1Request", + "LibrariesDocumentsUploadV1RequestTypedDict", + "LibrariesGetV1Request", + "LibrariesGetV1RequestTypedDict", + "LibrariesShareCreateV1Request", + "LibrariesShareCreateV1RequestTypedDict", + "LibrariesShareDeleteV1Request", + "LibrariesShareDeleteV1RequestTypedDict", + "LibrariesShareListV1Request", + "LibrariesShareListV1RequestTypedDict", + "LibrariesUpdateV1Request", + "LibrariesUpdateV1RequestTypedDict", + "Library", + "LibraryTypedDict", + "ListBatchJobsResponse", + "ListBatchJobsResponseTypedDict", + "ListDocumentsResponse", + "ListDocumentsResponseTypedDict", + "ListFilesResponse", + "ListFilesResponseTypedDict", + "ListFineTuningJobsResponse", + "ListFineTuningJobsResponseData", + "ListFineTuningJobsResponseDataTypedDict", + "ListFineTuningJobsResponseTypedDict", + "ListLibrariesResponse", + "ListLibrariesResponseTypedDict", "ListSharingOut", "ListSharingOutTypedDict", "Loc", @@ -1316,32 +1422,23 @@ "MessageInputEntry", "MessageInputEntryContent", "MessageInputEntryContentTypedDict", - "MessageInputEntryObject", - "MessageInputEntryRole", - "MessageInputEntryType", "MessageInputEntryTypedDict", "MessageOutputContentChunks", "MessageOutputContentChunksTypedDict", "MessageOutputEntry", "MessageOutputEntryContent", "MessageOutputEntryContentTypedDict", - "MessageOutputEntryObject", - "MessageOutputEntryRole", - "MessageOutputEntryType", "MessageOutputEntryTypedDict", "MessageOutputEvent", "MessageOutputEventContent", "MessageOutputEventContentTypedDict", - "MessageOutputEventRole", "MessageOutputEventTypedDict", - "MetricOut", - "MetricOutTypedDict", - "MistralError", + "Metric", + "MetricTypedDict", "MistralPromptMode", "ModelCapabilities", "ModelCapabilitiesTypedDict", "ModelConversation", - "ModelConversationObject", "ModelConversationTool", "ModelConversationToolTypedDict", "ModelConversationTypedDict", @@ -1355,7 +1452,6 @@ "ModerationResponseTypedDict", "MultiPartBodyParams", "MultiPartBodyParamsTypedDict", - "NoResponseError", "OCRImageObject", "OCRImageObjectTypedDict", "OCRPageDimensions", @@ -1387,17 +1483,24 @@ "RealtimeTranscriptionErrorDetailMessageTypedDict", "RealtimeTranscriptionErrorDetailTypedDict", "RealtimeTranscriptionErrorTypedDict", + "RealtimeTranscriptionInputAudioAppend", + "RealtimeTranscriptionInputAudioAppendTypedDict", + "RealtimeTranscriptionInputAudioEnd", + "RealtimeTranscriptionInputAudioEndTypedDict", + "RealtimeTranscriptionInputAudioFlush", + "RealtimeTranscriptionInputAudioFlushTypedDict", "RealtimeTranscriptionSession", "RealtimeTranscriptionSessionCreated", "RealtimeTranscriptionSessionCreatedTypedDict", "RealtimeTranscriptionSessionTypedDict", + "RealtimeTranscriptionSessionUpdateMessage", + "RealtimeTranscriptionSessionUpdateMessageTypedDict", + "RealtimeTranscriptionSessionUpdatePayload", + "RealtimeTranscriptionSessionUpdatePayloadTypedDict", "RealtimeTranscriptionSessionUpdated", "RealtimeTranscriptionSessionUpdatedTypedDict", "ReferenceChunk", - "ReferenceChunkType", "ReferenceChunkTypedDict", - "ReprocessDocumentRequest", - "ReprocessDocumentRequestTypedDict", "RequestSource", "Response", "ResponseDoneEvent", @@ -1414,18 +1517,9 @@ "ResponseTypedDict", "ResponseV1ConversationsGet", "ResponseV1ConversationsGetTypedDict", - "ResponseValidationError", - "RestartConversationRequest", - "RestartConversationRequestTypedDict", - "RestartConversationStreamRequest", - "RestartConversationStreamRequestTypedDict", - "RetrieveFileOut", - "RetrieveFileOutTypedDict", - "RetrieveFileRequest", - "RetrieveFileRequestTypedDict", - "RetrieveModelRequest", - "RetrieveModelRequestTypedDict", - "SDKError", + "RetrieveModelV1ModelsModelIDGetRequest", + "RetrieveModelV1ModelsModelIDGetRequestTypedDict", + "Role", "SSETypes", "SampleType", "Security", @@ -1438,10 +1532,6 @@ "SharingOut", "SharingOutTypedDict", "Source", - "StartFineTuningJobRequest", - "StartFineTuningJobRequestTypedDict", - "StartFineTuningJobResponse", - "StartFineTuningJobResponseTypedDict", "SystemMessage", "SystemMessageContent", "SystemMessageContentChunks", @@ -1450,20 +1540,22 @@ "SystemMessageTypedDict", "TableFormat", "TextChunk", - "TextChunkType", "TextChunkTypedDict", "ThinkChunk", - "ThinkChunkType", + "ThinkChunkThinking", + "ThinkChunkThinkingTypedDict", "ThinkChunkTypedDict", - "Thinking", - "ThinkingTypedDict", "TimestampGranularity", "Tool", "ToolCall", + "ToolCallConfirmation", + "ToolCallConfirmationTypedDict", "ToolCallTypedDict", "ToolChoice", "ToolChoiceEnum", "ToolChoiceTypedDict", + "ToolConfiguration", + "ToolConfigurationTypedDict", "ToolExecutionDeltaEvent", "ToolExecutionDeltaEventName", "ToolExecutionDeltaEventNameTypedDict", @@ -1475,8 +1567,6 @@ "ToolExecutionEntry", "ToolExecutionEntryName", "ToolExecutionEntryNameTypedDict", - "ToolExecutionEntryObject", - "ToolExecutionEntryType", "ToolExecutionEntryTypedDict", "ToolExecutionStartedEvent", "ToolExecutionStartedEventName", @@ -1485,7 +1575,6 @@ "ToolFileChunk", "ToolFileChunkTool", "ToolFileChunkToolTypedDict", - "ToolFileChunkType", "ToolFileChunkTypedDict", "ToolMessage", "ToolMessageContent", @@ -1494,7 +1583,6 @@ "ToolReferenceChunk", "ToolReferenceChunkTool", "ToolReferenceChunkToolTypedDict", - "ToolReferenceChunkType", "ToolReferenceChunkTypedDict", "ToolTypedDict", "ToolTypes", @@ -1503,7 +1591,6 @@ "TranscriptionResponse", "TranscriptionResponseTypedDict", "TranscriptionSegmentChunk", - "TranscriptionSegmentChunkType", "TranscriptionSegmentChunkTypedDict", "TranscriptionStreamDone", "TranscriptionStreamDoneTypedDict", @@ -1518,30 +1605,37 @@ "TranscriptionStreamSegmentDeltaTypedDict", "TranscriptionStreamTextDelta", "TranscriptionStreamTextDeltaTypedDict", - "UnarchiveFTModelOut", - "UnarchiveFTModelOutTypedDict", - "UnarchiveModelRequest", - "UnarchiveModelRequestTypedDict", + "UnarchiveModelResponse", + "UnarchiveModelResponseTypedDict", + "UnknownAgentTool", + "UnknownClassifierFineTuningJobDetailsIntegration", + "UnknownClassifierFineTuningJobIntegration", + "UnknownCompletionFineTuningJobDetailsIntegration", + "UnknownCompletionFineTuningJobDetailsRepository", + "UnknownCompletionFineTuningJobIntegration", + "UnknownCompletionFineTuningJobRepository", + "UnknownContentChunk", + "UnknownConversationEventsData", + "UnknownJobsAPIRoutesFineTuningCancelFineTuningJobResponse", + "UnknownJobsAPIRoutesFineTuningGetFineTuningJobResponse", + "UnknownJobsAPIRoutesFineTuningStartFineTuningJobResponse", + "UnknownJobsAPIRoutesFineTuningUpdateFineTunedModelResponse", + "UnknownListFineTuningJobsResponseData", + "UnknownModelConversationTool", + "UnknownModelListData", + "UnknownResponse", + "UnknownResponseRetrieveModelV1ModelsModelIDGet", + "UnknownTranscriptionStreamEventsData", "UpdateAgentRequest", + "UpdateAgentRequestTool", + "UpdateAgentRequestToolTypedDict", "UpdateAgentRequestTypedDict", - "UpdateAgentVersionRequest", - "UpdateAgentVersionRequestTypedDict", "UpdateDocumentRequest", "UpdateDocumentRequestTypedDict", - "UpdateFTModelIn", - "UpdateFTModelInTypedDict", "UpdateLibraryRequest", "UpdateLibraryRequestTypedDict", "UpdateModelRequest", "UpdateModelRequestTypedDict", - "UpdateModelResponse", - "UpdateModelResponseTypedDict", - "UpdateOrCreateLibraryAccessRequest", - "UpdateOrCreateLibraryAccessRequestTypedDict", - "UploadDocumentRequest", - "UploadDocumentRequestTypedDict", - "UploadFileOut", - "UploadFileOutTypedDict", "UsageInfo", "UsageInfoTypedDict", "UserMessage", @@ -1551,8 +1645,8 @@ "ValidationError", "ValidationErrorTypedDict", "WandbIntegration", - "WandbIntegrationOut", - "WandbIntegrationOutTypedDict", + "WandbIntegrationResult", + "WandbIntegrationResultTypedDict", "WandbIntegrationTypedDict", "WebSearchPremiumTool", "WebSearchPremiumToolTypedDict", @@ -1562,29 +1656,66 @@ _dynamic_imports: dict[str, str] = { "Agent": ".agent", - "AgentObject": ".agent", "AgentTool": ".agent", "AgentToolTypedDict": ".agent", "AgentTypedDict": ".agent", + "UnknownAgentTool": ".agent", "AgentAliasResponse": ".agentaliasresponse", "AgentAliasResponseTypedDict": ".agentaliasresponse", "AgentConversation": ".agentconversation", "AgentConversationAgentVersion": ".agentconversation", "AgentConversationAgentVersionTypedDict": ".agentconversation", - "AgentConversationObject": ".agentconversation", "AgentConversationTypedDict": ".agentconversation", - "AgentCreationRequest": ".agentcreationrequest", - "AgentCreationRequestTool": ".agentcreationrequest", - "AgentCreationRequestToolTypedDict": ".agentcreationrequest", - "AgentCreationRequestTypedDict": ".agentcreationrequest", "AgentHandoffDoneEvent": ".agenthandoffdoneevent", "AgentHandoffDoneEventTypedDict": ".agenthandoffdoneevent", "AgentHandoffEntry": ".agenthandoffentry", - "AgentHandoffEntryObject": ".agenthandoffentry", - "AgentHandoffEntryType": ".agenthandoffentry", "AgentHandoffEntryTypedDict": ".agenthandoffentry", "AgentHandoffStartedEvent": ".agenthandoffstartedevent", "AgentHandoffStartedEventTypedDict": ".agenthandoffstartedevent", + "AgentsAPIV1AgentsCreateOrUpdateAliasRequest": ".agents_api_v1_agents_create_or_update_aliasop", + "AgentsAPIV1AgentsCreateOrUpdateAliasRequestTypedDict": ".agents_api_v1_agents_create_or_update_aliasop", + "AgentsAPIV1AgentsDeleteAliasRequest": ".agents_api_v1_agents_delete_aliasop", + "AgentsAPIV1AgentsDeleteAliasRequestTypedDict": ".agents_api_v1_agents_delete_aliasop", + "AgentsAPIV1AgentsDeleteRequest": ".agents_api_v1_agents_deleteop", + "AgentsAPIV1AgentsDeleteRequestTypedDict": ".agents_api_v1_agents_deleteop", + "AgentsAPIV1AgentsGetVersionRequest": ".agents_api_v1_agents_get_versionop", + "AgentsAPIV1AgentsGetVersionRequestTypedDict": ".agents_api_v1_agents_get_versionop", + "AgentsAPIV1AgentsGetAgentVersion": ".agents_api_v1_agents_getop", + "AgentsAPIV1AgentsGetAgentVersionTypedDict": ".agents_api_v1_agents_getop", + "AgentsAPIV1AgentsGetRequest": ".agents_api_v1_agents_getop", + "AgentsAPIV1AgentsGetRequestTypedDict": ".agents_api_v1_agents_getop", + "AgentsAPIV1AgentsListVersionAliasesRequest": ".agents_api_v1_agents_list_version_aliasesop", + "AgentsAPIV1AgentsListVersionAliasesRequestTypedDict": ".agents_api_v1_agents_list_version_aliasesop", + "AgentsAPIV1AgentsListVersionsRequest": ".agents_api_v1_agents_list_versionsop", + "AgentsAPIV1AgentsListVersionsRequestTypedDict": ".agents_api_v1_agents_list_versionsop", + "AgentsAPIV1AgentsListRequest": ".agents_api_v1_agents_listop", + "AgentsAPIV1AgentsListRequestTypedDict": ".agents_api_v1_agents_listop", + "AgentsAPIV1AgentsUpdateVersionRequest": ".agents_api_v1_agents_update_versionop", + "AgentsAPIV1AgentsUpdateVersionRequestTypedDict": ".agents_api_v1_agents_update_versionop", + "AgentsAPIV1AgentsUpdateRequest": ".agents_api_v1_agents_updateop", + "AgentsAPIV1AgentsUpdateRequestTypedDict": ".agents_api_v1_agents_updateop", + "AgentsAPIV1ConversationsAppendStreamRequest": ".agents_api_v1_conversations_append_streamop", + "AgentsAPIV1ConversationsAppendStreamRequestTypedDict": ".agents_api_v1_conversations_append_streamop", + "AgentsAPIV1ConversationsAppendRequest": ".agents_api_v1_conversations_appendop", + "AgentsAPIV1ConversationsAppendRequestTypedDict": ".agents_api_v1_conversations_appendop", + "AgentsAPIV1ConversationsDeleteRequest": ".agents_api_v1_conversations_deleteop", + "AgentsAPIV1ConversationsDeleteRequestTypedDict": ".agents_api_v1_conversations_deleteop", + "AgentsAPIV1ConversationsGetRequest": ".agents_api_v1_conversations_getop", + "AgentsAPIV1ConversationsGetRequestTypedDict": ".agents_api_v1_conversations_getop", + "ResponseV1ConversationsGet": ".agents_api_v1_conversations_getop", + "ResponseV1ConversationsGetTypedDict": ".agents_api_v1_conversations_getop", + "AgentsAPIV1ConversationsHistoryRequest": ".agents_api_v1_conversations_historyop", + "AgentsAPIV1ConversationsHistoryRequestTypedDict": ".agents_api_v1_conversations_historyop", + "AgentsAPIV1ConversationsListRequest": ".agents_api_v1_conversations_listop", + "AgentsAPIV1ConversationsListRequestTypedDict": ".agents_api_v1_conversations_listop", + "AgentsAPIV1ConversationsListResponse": ".agents_api_v1_conversations_listop", + "AgentsAPIV1ConversationsListResponseTypedDict": ".agents_api_v1_conversations_listop", + "AgentsAPIV1ConversationsMessagesRequest": ".agents_api_v1_conversations_messagesop", + "AgentsAPIV1ConversationsMessagesRequestTypedDict": ".agents_api_v1_conversations_messagesop", + "AgentsAPIV1ConversationsRestartStreamRequest": ".agents_api_v1_conversations_restart_streamop", + "AgentsAPIV1ConversationsRestartStreamRequestTypedDict": ".agents_api_v1_conversations_restart_streamop", + "AgentsAPIV1ConversationsRestartRequest": ".agents_api_v1_conversations_restartop", + "AgentsAPIV1ConversationsRestartRequestTypedDict": ".agents_api_v1_conversations_restartop", "AgentsCompletionRequest": ".agentscompletionrequest", "AgentsCompletionRequestMessage": ".agentscompletionrequest", "AgentsCompletionRequestMessageTypedDict": ".agentscompletionrequest", @@ -1601,23 +1732,12 @@ "AgentsCompletionStreamRequestToolChoice": ".agentscompletionstreamrequest", "AgentsCompletionStreamRequestToolChoiceTypedDict": ".agentscompletionstreamrequest", "AgentsCompletionStreamRequestTypedDict": ".agentscompletionstreamrequest", - "AgentUpdateRequest": ".agentupdaterequest", - "AgentUpdateRequestTool": ".agentupdaterequest", - "AgentUpdateRequestToolTypedDict": ".agentupdaterequest", - "AgentUpdateRequestTypedDict": ".agentupdaterequest", "APIEndpoint": ".apiendpoint", - "AppendConversationRequest": ".appendconversationop", - "AppendConversationRequestTypedDict": ".appendconversationop", - "AppendConversationStreamRequest": ".appendconversationstreamop", - "AppendConversationStreamRequestTypedDict": ".appendconversationstreamop", - "ArchiveFTModelOut": ".archiveftmodelout", - "ArchiveFTModelOutTypedDict": ".archiveftmodelout", - "ArchiveModelRequest": ".archivemodelop", - "ArchiveModelRequestTypedDict": ".archivemodelop", + "ArchiveModelResponse": ".archivemodelresponse", + "ArchiveModelResponseTypedDict": ".archivemodelresponse", "AssistantMessage": ".assistantmessage", "AssistantMessageContent": ".assistantmessage", "AssistantMessageContentTypedDict": ".assistantmessage", - "AssistantMessageRole": ".assistantmessage", "AssistantMessageTypedDict": ".assistantmessage", "AudioChunk": ".audiochunk", "AudioChunkTypedDict": ".audiochunk", @@ -1632,22 +1752,12 @@ "BaseModelCardTypedDict": ".basemodelcard", "BatchError": ".batcherror", "BatchErrorTypedDict": ".batcherror", - "BatchJobIn": ".batchjobin", - "BatchJobInTypedDict": ".batchjobin", - "BatchJobOut": ".batchjobout", - "BatchJobOutTypedDict": ".batchjobout", - "BatchJobsOut": ".batchjobsout", - "BatchJobsOutTypedDict": ".batchjobsout", + "BatchJob": ".batchjob", + "BatchJobTypedDict": ".batchjob", "BatchJobStatus": ".batchjobstatus", "BatchRequest": ".batchrequest", "BatchRequestTypedDict": ".batchrequest", "BuiltInConnectors": ".builtinconnectors", - "CancelBatchJobRequest": ".cancelbatchjobop", - "CancelBatchJobRequestTypedDict": ".cancelbatchjobop", - "CancelFineTuningJobRequest": ".cancelfinetuningjobop", - "CancelFineTuningJobRequestTypedDict": ".cancelfinetuningjobop", - "CancelFineTuningJobResponse": ".cancelfinetuningjobop", - "CancelFineTuningJobResponseTypedDict": ".cancelfinetuningjobop", "ChatClassificationRequest": ".chatclassificationrequest", "ChatClassificationRequestTypedDict": ".chatclassificationrequest", "ChatCompletionChoice": ".chatcompletionchoice", @@ -1679,8 +1789,8 @@ "ChatModerationRequestInputs3": ".chatmoderationrequest", "ChatModerationRequestInputs3TypedDict": ".chatmoderationrequest", "ChatModerationRequestTypedDict": ".chatmoderationrequest", - "CheckpointOut": ".checkpointout", - "CheckpointOutTypedDict": ".checkpointout", + "Checkpoint": ".checkpoint", + "CheckpointTypedDict": ".checkpoint", "ClassificationRequest": ".classificationrequest", "ClassificationRequestInputs": ".classificationrequest", "ClassificationRequestInputsTypedDict": ".classificationrequest", @@ -1689,26 +1799,26 @@ "ClassificationResponseTypedDict": ".classificationresponse", "ClassificationTargetResult": ".classificationtargetresult", "ClassificationTargetResultTypedDict": ".classificationtargetresult", - "ClassifierDetailedJobOut": ".classifierdetailedjobout", - "ClassifierDetailedJobOutIntegration": ".classifierdetailedjobout", - "ClassifierDetailedJobOutIntegrationTypedDict": ".classifierdetailedjobout", - "ClassifierDetailedJobOutStatus": ".classifierdetailedjobout", - "ClassifierDetailedJobOutTypedDict": ".classifierdetailedjobout", - "ClassifierFTModelOut": ".classifierftmodelout", - "ClassifierFTModelOutTypedDict": ".classifierftmodelout", - "ClassifierJobOut": ".classifierjobout", - "ClassifierJobOutIntegration": ".classifierjobout", - "ClassifierJobOutIntegrationTypedDict": ".classifierjobout", - "ClassifierJobOutStatus": ".classifierjobout", - "ClassifierJobOutTypedDict": ".classifierjobout", - "ClassifierTargetIn": ".classifiertargetin", - "ClassifierTargetInTypedDict": ".classifiertargetin", - "ClassifierTargetOut": ".classifiertargetout", - "ClassifierTargetOutTypedDict": ".classifiertargetout", + "ClassifierFineTunedModel": ".classifierfinetunedmodel", + "ClassifierFineTunedModelTypedDict": ".classifierfinetunedmodel", + "ClassifierFineTuningJob": ".classifierfinetuningjob", + "ClassifierFineTuningJobIntegration": ".classifierfinetuningjob", + "ClassifierFineTuningJobIntegrationTypedDict": ".classifierfinetuningjob", + "ClassifierFineTuningJobStatus": ".classifierfinetuningjob", + "ClassifierFineTuningJobTypedDict": ".classifierfinetuningjob", + "UnknownClassifierFineTuningJobIntegration": ".classifierfinetuningjob", + "ClassifierFineTuningJobDetails": ".classifierfinetuningjobdetails", + "ClassifierFineTuningJobDetailsIntegration": ".classifierfinetuningjobdetails", + "ClassifierFineTuningJobDetailsIntegrationTypedDict": ".classifierfinetuningjobdetails", + "ClassifierFineTuningJobDetailsStatus": ".classifierfinetuningjobdetails", + "ClassifierFineTuningJobDetailsTypedDict": ".classifierfinetuningjobdetails", + "UnknownClassifierFineTuningJobDetailsIntegration": ".classifierfinetuningjobdetails", + "ClassifierTarget": ".classifiertarget", + "ClassifierTargetTypedDict": ".classifiertarget", + "ClassifierTargetResult": ".classifiertargetresult", + "ClassifierTargetResultTypedDict": ".classifiertargetresult", "ClassifierTrainingParameters": ".classifiertrainingparameters", "ClassifierTrainingParametersTypedDict": ".classifiertrainingparameters", - "ClassifierTrainingParametersIn": ".classifiertrainingparametersin", - "ClassifierTrainingParametersInTypedDict": ".classifiertrainingparametersin", "CodeInterpreterTool": ".codeinterpretertool", "CodeInterpreterToolTypedDict": ".codeinterpretertool", "CompletionArgs": ".completionargs", @@ -1717,33 +1827,36 @@ "CompletionArgsStopTypedDict": ".completionargsstop", "CompletionChunk": ".completionchunk", "CompletionChunkTypedDict": ".completionchunk", - "CompletionDetailedJobOut": ".completiondetailedjobout", - "CompletionDetailedJobOutIntegration": ".completiondetailedjobout", - "CompletionDetailedJobOutIntegrationTypedDict": ".completiondetailedjobout", - "CompletionDetailedJobOutRepository": ".completiondetailedjobout", - "CompletionDetailedJobOutRepositoryTypedDict": ".completiondetailedjobout", - "CompletionDetailedJobOutStatus": ".completiondetailedjobout", - "CompletionDetailedJobOutTypedDict": ".completiondetailedjobout", "CompletionEvent": ".completionevent", "CompletionEventTypedDict": ".completionevent", - "CompletionFTModelOut": ".completionftmodelout", - "CompletionFTModelOutTypedDict": ".completionftmodelout", - "CompletionJobOut": ".completionjobout", - "CompletionJobOutIntegration": ".completionjobout", - "CompletionJobOutIntegrationTypedDict": ".completionjobout", - "CompletionJobOutRepository": ".completionjobout", - "CompletionJobOutRepositoryTypedDict": ".completionjobout", - "CompletionJobOutStatus": ".completionjobout", - "CompletionJobOutTypedDict": ".completionjobout", + "CompletionFineTunedModel": ".completionfinetunedmodel", + "CompletionFineTunedModelTypedDict": ".completionfinetunedmodel", + "CompletionFineTuningJob": ".completionfinetuningjob", + "CompletionFineTuningJobIntegration": ".completionfinetuningjob", + "CompletionFineTuningJobIntegrationTypedDict": ".completionfinetuningjob", + "CompletionFineTuningJobRepository": ".completionfinetuningjob", + "CompletionFineTuningJobRepositoryTypedDict": ".completionfinetuningjob", + "CompletionFineTuningJobStatus": ".completionfinetuningjob", + "CompletionFineTuningJobTypedDict": ".completionfinetuningjob", + "UnknownCompletionFineTuningJobIntegration": ".completionfinetuningjob", + "UnknownCompletionFineTuningJobRepository": ".completionfinetuningjob", + "CompletionFineTuningJobDetails": ".completionfinetuningjobdetails", + "CompletionFineTuningJobDetailsIntegration": ".completionfinetuningjobdetails", + "CompletionFineTuningJobDetailsIntegrationTypedDict": ".completionfinetuningjobdetails", + "CompletionFineTuningJobDetailsRepository": ".completionfinetuningjobdetails", + "CompletionFineTuningJobDetailsRepositoryTypedDict": ".completionfinetuningjobdetails", + "CompletionFineTuningJobDetailsStatus": ".completionfinetuningjobdetails", + "CompletionFineTuningJobDetailsTypedDict": ".completionfinetuningjobdetails", + "UnknownCompletionFineTuningJobDetailsIntegration": ".completionfinetuningjobdetails", + "UnknownCompletionFineTuningJobDetailsRepository": ".completionfinetuningjobdetails", "CompletionResponseStreamChoice": ".completionresponsestreamchoice", "CompletionResponseStreamChoiceFinishReason": ".completionresponsestreamchoice", "CompletionResponseStreamChoiceTypedDict": ".completionresponsestreamchoice", "CompletionTrainingParameters": ".completiontrainingparameters", "CompletionTrainingParametersTypedDict": ".completiontrainingparameters", - "CompletionTrainingParametersIn": ".completiontrainingparametersin", - "CompletionTrainingParametersInTypedDict": ".completiontrainingparametersin", "ContentChunk": ".contentchunk", "ContentChunkTypedDict": ".contentchunk", + "UnknownContentChunk": ".contentchunk", "ConversationAppendRequest": ".conversationappendrequest", "ConversationAppendRequestHandoffExecution": ".conversationappendrequest", "ConversationAppendRequestTypedDict": ".conversationappendrequest", @@ -1754,15 +1867,14 @@ "ConversationEventsData": ".conversationevents", "ConversationEventsDataTypedDict": ".conversationevents", "ConversationEventsTypedDict": ".conversationevents", + "UnknownConversationEventsData": ".conversationevents", "ConversationHistory": ".conversationhistory", - "ConversationHistoryObject": ".conversationhistory", "ConversationHistoryTypedDict": ".conversationhistory", "Entry": ".conversationhistory", "EntryTypedDict": ".conversationhistory", "ConversationInputs": ".conversationinputs", "ConversationInputsTypedDict": ".conversationinputs", "ConversationMessages": ".conversationmessages", - "ConversationMessagesObject": ".conversationmessages", "ConversationMessagesTypedDict": ".conversationmessages", "ConversationRequest": ".conversationrequest", "ConversationRequestAgentVersion": ".conversationrequest", @@ -1772,7 +1884,6 @@ "ConversationRequestToolTypedDict": ".conversationrequest", "ConversationRequestTypedDict": ".conversationrequest", "ConversationResponse": ".conversationresponse", - "ConversationResponseObject": ".conversationresponse", "ConversationResponseTypedDict": ".conversationresponse", "Output": ".conversationresponse", "OutputTypedDict": ".conversationresponse", @@ -1793,53 +1904,48 @@ "ConversationStreamRequestTool": ".conversationstreamrequest", "ConversationStreamRequestToolTypedDict": ".conversationstreamrequest", "ConversationStreamRequestTypedDict": ".conversationstreamrequest", + "ConversationThinkChunk": ".conversationthinkchunk", + "ConversationThinkChunkThinking": ".conversationthinkchunk", + "ConversationThinkChunkThinkingTypedDict": ".conversationthinkchunk", + "ConversationThinkChunkTypedDict": ".conversationthinkchunk", "ConversationUsageInfo": ".conversationusageinfo", "ConversationUsageInfoTypedDict": ".conversationusageinfo", - "CreateFineTuningJobResponse": ".createfinetuningjobop", - "CreateFineTuningJobResponseTypedDict": ".createfinetuningjobop", - "Response": ".createfinetuningjobop", - "ResponseTypedDict": ".createfinetuningjobop", - "CreateOrUpdateAgentAliasRequest": ".createorupdateagentaliasop", - "CreateOrUpdateAgentAliasRequestTypedDict": ".createorupdateagentaliasop", - "DeleteAgentAliasRequest": ".deleteagentaliasop", - "DeleteAgentAliasRequestTypedDict": ".deleteagentaliasop", - "DeleteAgentRequest": ".deleteagentop", - "DeleteAgentRequestTypedDict": ".deleteagentop", - "DeleteConversationRequest": ".deleteconversationop", - "DeleteConversationRequestTypedDict": ".deleteconversationop", - "DeleteDocumentRequest": ".deletedocumentop", - "DeleteDocumentRequestTypedDict": ".deletedocumentop", - "DeleteFileRequest": ".deletefileop", - "DeleteFileRequestTypedDict": ".deletefileop", - "DeleteFileOut": ".deletefileout", - "DeleteFileOutTypedDict": ".deletefileout", - "DeleteLibraryAccessRequest": ".deletelibraryaccessop", - "DeleteLibraryAccessRequestTypedDict": ".deletelibraryaccessop", - "DeleteLibraryRequest": ".deletelibraryop", - "DeleteLibraryRequestTypedDict": ".deletelibraryop", - "DeleteModelRequest": ".deletemodelop", - "DeleteModelRequestTypedDict": ".deletemodelop", + "CreateAgentRequest": ".createagentrequest", + "CreateAgentRequestTool": ".createagentrequest", + "CreateAgentRequestToolTypedDict": ".createagentrequest", + "CreateAgentRequestTypedDict": ".createagentrequest", + "CreateBatchJobRequest": ".createbatchjobrequest", + "CreateBatchJobRequestTypedDict": ".createbatchjobrequest", + "CreateFileResponse": ".createfileresponse", + "CreateFileResponseTypedDict": ".createfileresponse", + "CreateFineTuningJobRequest": ".createfinetuningjobrequest", + "CreateFineTuningJobRequestIntegration": ".createfinetuningjobrequest", + "CreateFineTuningJobRequestIntegrationTypedDict": ".createfinetuningjobrequest", + "CreateFineTuningJobRequestRepository": ".createfinetuningjobrequest", + "CreateFineTuningJobRequestRepositoryTypedDict": ".createfinetuningjobrequest", + "CreateFineTuningJobRequestTypedDict": ".createfinetuningjobrequest", + "Hyperparameters": ".createfinetuningjobrequest", + "HyperparametersTypedDict": ".createfinetuningjobrequest", + "CreateLibraryRequest": ".createlibraryrequest", + "CreateLibraryRequestTypedDict": ".createlibraryrequest", + "DeleteModelV1ModelsModelIDDeleteRequest": ".delete_model_v1_models_model_id_deleteop", + "DeleteModelV1ModelsModelIDDeleteRequestTypedDict": ".delete_model_v1_models_model_id_deleteop", + "DeleteFileResponse": ".deletefileresponse", + "DeleteFileResponseTypedDict": ".deletefileresponse", "DeleteModelOut": ".deletemodelout", "DeleteModelOutTypedDict": ".deletemodelout", "DeltaMessage": ".deltamessage", "DeltaMessageContent": ".deltamessage", "DeltaMessageContentTypedDict": ".deltamessage", "DeltaMessageTypedDict": ".deltamessage", + "Document": ".document", + "DocumentTypedDict": ".document", "DocumentLibraryTool": ".documentlibrarytool", "DocumentLibraryToolTypedDict": ".documentlibrarytool", - "DocumentOut": ".documentout", - "DocumentOutTypedDict": ".documentout", "DocumentTextContent": ".documenttextcontent", "DocumentTextContentTypedDict": ".documenttextcontent", - "Attributes": ".documentupdatein", - "AttributesTypedDict": ".documentupdatein", - "DocumentUpdateIn": ".documentupdatein", - "DocumentUpdateInTypedDict": ".documentupdatein", "DocumentURLChunk": ".documenturlchunk", - "DocumentURLChunkType": ".documenturlchunk", "DocumentURLChunkTypedDict": ".documenturlchunk", - "DownloadFileRequest": ".downloadfileop", - "DownloadFileRequestTypedDict": ".downloadfileop", "EmbeddingDtype": ".embeddingdtype", "EmbeddingRequest": ".embeddingrequest", "EmbeddingRequestInputs": ".embeddingrequest", @@ -1851,17 +1957,27 @@ "EmbeddingResponseDataTypedDict": ".embeddingresponsedata", "EncodingFormat": ".encodingformat", "EntityType": ".entitytype", - "EventOut": ".eventout", - "EventOutTypedDict": ".eventout", + "Event": ".event", + "EventTypedDict": ".event", "File": ".file", "FileTypedDict": ".file", "FileChunk": ".filechunk", "FileChunkTypedDict": ".filechunk", "FilePurpose": ".filepurpose", + "FilesAPIRoutesDeleteFileRequest": ".files_api_routes_delete_fileop", + "FilesAPIRoutesDeleteFileRequestTypedDict": ".files_api_routes_delete_fileop", + "FilesAPIRoutesDownloadFileRequest": ".files_api_routes_download_fileop", + "FilesAPIRoutesDownloadFileRequestTypedDict": ".files_api_routes_download_fileop", + "FilesAPIRoutesGetSignedURLRequest": ".files_api_routes_get_signed_urlop", + "FilesAPIRoutesGetSignedURLRequestTypedDict": ".files_api_routes_get_signed_urlop", + "FilesAPIRoutesListFilesRequest": ".files_api_routes_list_filesop", + "FilesAPIRoutesListFilesRequestTypedDict": ".files_api_routes_list_filesop", + "FilesAPIRoutesRetrieveFileRequest": ".files_api_routes_retrieve_fileop", + "FilesAPIRoutesRetrieveFileRequestTypedDict": ".files_api_routes_retrieve_fileop", + "MultiPartBodyParams": ".files_api_routes_upload_fileop", + "MultiPartBodyParamsTypedDict": ".files_api_routes_upload_fileop", "FileSchema": ".fileschema", "FileSchemaTypedDict": ".fileschema", - "FileSignedURL": ".filesignedurl", - "FileSignedURLTypedDict": ".filesignedurl", "FIMCompletionRequest": ".fimcompletionrequest", "FIMCompletionRequestStop": ".fimcompletionrequest", "FIMCompletionRequestStopTypedDict": ".fimcompletionrequest", @@ -1873,9 +1989,9 @@ "FIMCompletionStreamRequestStopTypedDict": ".fimcompletionstreamrequest", "FIMCompletionStreamRequestTypedDict": ".fimcompletionstreamrequest", "FineTuneableModelType": ".finetuneablemodeltype", + "FineTunedModelCapabilities": ".finetunedmodelcapabilities", + "FineTunedModelCapabilitiesTypedDict": ".finetunedmodelcapabilities", "FTClassifierLossFunction": ".ftclassifierlossfunction", - "FTModelCapabilitiesOut": ".ftmodelcapabilitiesout", - "FTModelCapabilitiesOutTypedDict": ".ftmodelcapabilitiesout", "FTModelCard": ".ftmodelcard", "FTModelCardTypedDict": ".ftmodelcard", "Function": ".function", @@ -1885,134 +2001,138 @@ "FunctionCall": ".functioncall", "FunctionCallTypedDict": ".functioncall", "FunctionCallEntry": ".functioncallentry", - "FunctionCallEntryObject": ".functioncallentry", - "FunctionCallEntryType": ".functioncallentry", + "FunctionCallEntryConfirmationStatus": ".functioncallentry", "FunctionCallEntryTypedDict": ".functioncallentry", "FunctionCallEntryArguments": ".functioncallentryarguments", "FunctionCallEntryArgumentsTypedDict": ".functioncallentryarguments", "FunctionCallEvent": ".functioncallevent", + "FunctionCallEventConfirmationStatus": ".functioncallevent", "FunctionCallEventTypedDict": ".functioncallevent", "FunctionName": ".functionname", "FunctionNameTypedDict": ".functionname", "FunctionResultEntry": ".functionresultentry", - "FunctionResultEntryObject": ".functionresultentry", - "FunctionResultEntryType": ".functionresultentry", "FunctionResultEntryTypedDict": ".functionresultentry", "FunctionTool": ".functiontool", "FunctionToolTypedDict": ".functiontool", - "GetAgentAgentVersion": ".getagentop", - "GetAgentAgentVersionTypedDict": ".getagentop", - "GetAgentRequest": ".getagentop", - "GetAgentRequestTypedDict": ".getagentop", - "GetAgentVersionRequest": ".getagentversionop", - "GetAgentVersionRequestTypedDict": ".getagentversionop", - "GetBatchJobRequest": ".getbatchjobop", - "GetBatchJobRequestTypedDict": ".getbatchjobop", - "GetConversationHistoryRequest": ".getconversationhistoryop", - "GetConversationHistoryRequestTypedDict": ".getconversationhistoryop", - "GetConversationMessagesRequest": ".getconversationmessagesop", - "GetConversationMessagesRequestTypedDict": ".getconversationmessagesop", - "GetConversationRequest": ".getconversationop", - "GetConversationRequestTypedDict": ".getconversationop", - "ResponseV1ConversationsGet": ".getconversationop", - "ResponseV1ConversationsGetTypedDict": ".getconversationop", - "GetDocumentExtractedTextSignedURLRequest": ".getdocumentextractedtextsignedurlop", - "GetDocumentExtractedTextSignedURLRequestTypedDict": ".getdocumentextractedtextsignedurlop", - "GetDocumentRequest": ".getdocumentop", - "GetDocumentRequestTypedDict": ".getdocumentop", - "GetDocumentSignedURLRequest": ".getdocumentsignedurlop", - "GetDocumentSignedURLRequestTypedDict": ".getdocumentsignedurlop", - "GetDocumentStatusRequest": ".getdocumentstatusop", - "GetDocumentStatusRequestTypedDict": ".getdocumentstatusop", - "GetDocumentTextContentRequest": ".getdocumenttextcontentop", - "GetDocumentTextContentRequestTypedDict": ".getdocumenttextcontentop", - "GetFileSignedURLRequest": ".getfilesignedurlop", - "GetFileSignedURLRequestTypedDict": ".getfilesignedurlop", - "GetFineTuningJobRequest": ".getfinetuningjobop", - "GetFineTuningJobRequestTypedDict": ".getfinetuningjobop", - "GetFineTuningJobResponse": ".getfinetuningjobop", - "GetFineTuningJobResponseTypedDict": ".getfinetuningjobop", - "GetLibraryRequest": ".getlibraryop", - "GetLibraryRequestTypedDict": ".getlibraryop", + "GetFileResponse": ".getfileresponse", + "GetFileResponseTypedDict": ".getfileresponse", + "GetSignedURLResponse": ".getsignedurlresponse", + "GetSignedURLResponseTypedDict": ".getsignedurlresponse", + "GithubRepository": ".githubrepository", + "GithubRepositoryTypedDict": ".githubrepository", "GithubRepositoryIn": ".githubrepositoryin", "GithubRepositoryInTypedDict": ".githubrepositoryin", - "GithubRepositoryOut": ".githubrepositoryout", - "GithubRepositoryOutTypedDict": ".githubrepositoryout", - "HTTPValidationError": ".httpvalidationerror", - "HTTPValidationErrorData": ".httpvalidationerror", + "ImageDetail": ".imagedetail", "ImageGenerationTool": ".imagegenerationtool", "ImageGenerationToolTypedDict": ".imagegenerationtool", "ImageURL": ".imageurl", "ImageURLTypedDict": ".imageurl", "ImageURLChunk": ".imageurlchunk", - "ImageURLChunkType": ".imageurlchunk", "ImageURLChunkTypedDict": ".imageurlchunk", "ImageURLUnion": ".imageurlchunk", "ImageURLUnionTypedDict": ".imageurlchunk", "InputEntries": ".inputentries", "InputEntriesTypedDict": ".inputentries", "Inputs": ".inputs", - "InputsMessage": ".inputs", - "InputsMessageTypedDict": ".inputs", "InputsTypedDict": ".inputs", - "InstructRequestInputs": ".inputs", - "InstructRequestInputsTypedDict": ".inputs", "InstructRequest": ".instructrequest", "InstructRequestMessage": ".instructrequest", "InstructRequestMessageTypedDict": ".instructrequest", "InstructRequestTypedDict": ".instructrequest", - "Hyperparameters": ".jobin", - "HyperparametersTypedDict": ".jobin", - "JobIn": ".jobin", - "JobInIntegration": ".jobin", - "JobInIntegrationTypedDict": ".jobin", - "JobInRepository": ".jobin", - "JobInRepositoryTypedDict": ".jobin", - "JobInTypedDict": ".jobin", - "JobMetadataOut": ".jobmetadataout", - "JobMetadataOutTypedDict": ".jobmetadataout", - "JobsOut": ".jobsout", - "JobsOutData": ".jobsout", - "JobsOutDataTypedDict": ".jobsout", - "JobsOutTypedDict": ".jobsout", + "JobMetadata": ".jobmetadata", + "JobMetadataTypedDict": ".jobmetadata", + "JobsAPIRoutesBatchCancelBatchJobRequest": ".jobs_api_routes_batch_cancel_batch_jobop", + "JobsAPIRoutesBatchCancelBatchJobRequestTypedDict": ".jobs_api_routes_batch_cancel_batch_jobop", + "JobsAPIRoutesBatchGetBatchJobRequest": ".jobs_api_routes_batch_get_batch_jobop", + "JobsAPIRoutesBatchGetBatchJobRequestTypedDict": ".jobs_api_routes_batch_get_batch_jobop", + "JobsAPIRoutesBatchGetBatchJobsRequest": ".jobs_api_routes_batch_get_batch_jobsop", + "JobsAPIRoutesBatchGetBatchJobsRequestTypedDict": ".jobs_api_routes_batch_get_batch_jobsop", + "OrderBy": ".jobs_api_routes_batch_get_batch_jobsop", + "JobsAPIRoutesFineTuningArchiveFineTunedModelRequest": ".jobs_api_routes_fine_tuning_archive_fine_tuned_modelop", + "JobsAPIRoutesFineTuningArchiveFineTunedModelRequestTypedDict": ".jobs_api_routes_fine_tuning_archive_fine_tuned_modelop", + "JobsAPIRoutesFineTuningCancelFineTuningJobRequest": ".jobs_api_routes_fine_tuning_cancel_fine_tuning_jobop", + "JobsAPIRoutesFineTuningCancelFineTuningJobRequestTypedDict": ".jobs_api_routes_fine_tuning_cancel_fine_tuning_jobop", + "JobsAPIRoutesFineTuningCancelFineTuningJobResponse": ".jobs_api_routes_fine_tuning_cancel_fine_tuning_jobop", + "JobsAPIRoutesFineTuningCancelFineTuningJobResponseTypedDict": ".jobs_api_routes_fine_tuning_cancel_fine_tuning_jobop", + "UnknownJobsAPIRoutesFineTuningCancelFineTuningJobResponse": ".jobs_api_routes_fine_tuning_cancel_fine_tuning_jobop", + "JobsAPIRoutesFineTuningCreateFineTuningJobResponse": ".jobs_api_routes_fine_tuning_create_fine_tuning_jobop", + "JobsAPIRoutesFineTuningCreateFineTuningJobResponseTypedDict": ".jobs_api_routes_fine_tuning_create_fine_tuning_jobop", + "Response": ".jobs_api_routes_fine_tuning_create_fine_tuning_jobop", + "ResponseTypedDict": ".jobs_api_routes_fine_tuning_create_fine_tuning_jobop", + "UnknownResponse": ".jobs_api_routes_fine_tuning_create_fine_tuning_jobop", + "JobsAPIRoutesFineTuningGetFineTuningJobRequest": ".jobs_api_routes_fine_tuning_get_fine_tuning_jobop", + "JobsAPIRoutesFineTuningGetFineTuningJobRequestTypedDict": ".jobs_api_routes_fine_tuning_get_fine_tuning_jobop", + "JobsAPIRoutesFineTuningGetFineTuningJobResponse": ".jobs_api_routes_fine_tuning_get_fine_tuning_jobop", + "JobsAPIRoutesFineTuningGetFineTuningJobResponseTypedDict": ".jobs_api_routes_fine_tuning_get_fine_tuning_jobop", + "UnknownJobsAPIRoutesFineTuningGetFineTuningJobResponse": ".jobs_api_routes_fine_tuning_get_fine_tuning_jobop", + "JobsAPIRoutesFineTuningGetFineTuningJobsRequest": ".jobs_api_routes_fine_tuning_get_fine_tuning_jobsop", + "JobsAPIRoutesFineTuningGetFineTuningJobsRequestTypedDict": ".jobs_api_routes_fine_tuning_get_fine_tuning_jobsop", + "JobsAPIRoutesFineTuningGetFineTuningJobsStatus": ".jobs_api_routes_fine_tuning_get_fine_tuning_jobsop", + "JobsAPIRoutesFineTuningStartFineTuningJobRequest": ".jobs_api_routes_fine_tuning_start_fine_tuning_jobop", + "JobsAPIRoutesFineTuningStartFineTuningJobRequestTypedDict": ".jobs_api_routes_fine_tuning_start_fine_tuning_jobop", + "JobsAPIRoutesFineTuningStartFineTuningJobResponse": ".jobs_api_routes_fine_tuning_start_fine_tuning_jobop", + "JobsAPIRoutesFineTuningStartFineTuningJobResponseTypedDict": ".jobs_api_routes_fine_tuning_start_fine_tuning_jobop", + "UnknownJobsAPIRoutesFineTuningStartFineTuningJobResponse": ".jobs_api_routes_fine_tuning_start_fine_tuning_jobop", + "JobsAPIRoutesFineTuningUnarchiveFineTunedModelRequest": ".jobs_api_routes_fine_tuning_unarchive_fine_tuned_modelop", + "JobsAPIRoutesFineTuningUnarchiveFineTunedModelRequestTypedDict": ".jobs_api_routes_fine_tuning_unarchive_fine_tuned_modelop", + "JobsAPIRoutesFineTuningUpdateFineTunedModelRequest": ".jobs_api_routes_fine_tuning_update_fine_tuned_modelop", + "JobsAPIRoutesFineTuningUpdateFineTunedModelRequestTypedDict": ".jobs_api_routes_fine_tuning_update_fine_tuned_modelop", + "JobsAPIRoutesFineTuningUpdateFineTunedModelResponse": ".jobs_api_routes_fine_tuning_update_fine_tuned_modelop", + "JobsAPIRoutesFineTuningUpdateFineTunedModelResponseTypedDict": ".jobs_api_routes_fine_tuning_update_fine_tuned_modelop", + "UnknownJobsAPIRoutesFineTuningUpdateFineTunedModelResponse": ".jobs_api_routes_fine_tuning_update_fine_tuned_modelop", "JSONSchema": ".jsonschema", "JSONSchemaTypedDict": ".jsonschema", - "LegacyJobMetadataOut": ".legacyjobmetadataout", - "LegacyJobMetadataOutTypedDict": ".legacyjobmetadataout", - "LibraryIn": ".libraryin", - "LibraryInTypedDict": ".libraryin", - "LibraryInUpdate": ".libraryinupdate", - "LibraryInUpdateTypedDict": ".libraryinupdate", - "LibraryOut": ".libraryout", - "LibraryOutTypedDict": ".libraryout", - "ListAgentAliasesRequest": ".listagentaliasesop", - "ListAgentAliasesRequestTypedDict": ".listagentaliasesop", - "ListAgentsRequest": ".listagentsop", - "ListAgentsRequestTypedDict": ".listagentsop", - "ListAgentVersionsRequest": ".listagentversionsop", - "ListAgentVersionsRequestTypedDict": ".listagentversionsop", - "ListBatchJobsRequest": ".listbatchjobsop", - "ListBatchJobsRequestTypedDict": ".listbatchjobsop", - "OrderBy": ".listbatchjobsop", - "ListConversationsRequest": ".listconversationsop", - "ListConversationsRequestTypedDict": ".listconversationsop", - "ListConversationsResponse": ".listconversationsop", - "ListConversationsResponseTypedDict": ".listconversationsop", - "ListDocumentOut": ".listdocumentout", - "ListDocumentOutTypedDict": ".listdocumentout", - "ListDocumentsRequest": ".listdocumentsop", - "ListDocumentsRequestTypedDict": ".listdocumentsop", - "ListFilesRequest": ".listfilesop", - "ListFilesRequestTypedDict": ".listfilesop", - "ListFilesOut": ".listfilesout", - "ListFilesOutTypedDict": ".listfilesout", - "ListFineTuningJobsRequest": ".listfinetuningjobsop", - "ListFineTuningJobsRequestTypedDict": ".listfinetuningjobsop", - "ListFineTuningJobsStatus": ".listfinetuningjobsop", - "ListLibraryAccessesRequest": ".listlibraryaccessesop", - "ListLibraryAccessesRequestTypedDict": ".listlibraryaccessesop", - "ListLibraryOut": ".listlibraryout", - "ListLibraryOutTypedDict": ".listlibraryout", + "LegacyJobMetadata": ".legacyjobmetadata", + "LegacyJobMetadataTypedDict": ".legacyjobmetadata", + "LibrariesDeleteV1Request": ".libraries_delete_v1op", + "LibrariesDeleteV1RequestTypedDict": ".libraries_delete_v1op", + "LibrariesDocumentsDeleteV1Request": ".libraries_documents_delete_v1op", + "LibrariesDocumentsDeleteV1RequestTypedDict": ".libraries_documents_delete_v1op", + "LibrariesDocumentsGetExtractedTextSignedURLV1Request": ".libraries_documents_get_extracted_text_signed_url_v1op", + "LibrariesDocumentsGetExtractedTextSignedURLV1RequestTypedDict": ".libraries_documents_get_extracted_text_signed_url_v1op", + "LibrariesDocumentsGetSignedURLV1Request": ".libraries_documents_get_signed_url_v1op", + "LibrariesDocumentsGetSignedURLV1RequestTypedDict": ".libraries_documents_get_signed_url_v1op", + "LibrariesDocumentsGetStatusV1Request": ".libraries_documents_get_status_v1op", + "LibrariesDocumentsGetStatusV1RequestTypedDict": ".libraries_documents_get_status_v1op", + "LibrariesDocumentsGetTextContentV1Request": ".libraries_documents_get_text_content_v1op", + "LibrariesDocumentsGetTextContentV1RequestTypedDict": ".libraries_documents_get_text_content_v1op", + "LibrariesDocumentsGetV1Request": ".libraries_documents_get_v1op", + "LibrariesDocumentsGetV1RequestTypedDict": ".libraries_documents_get_v1op", + "LibrariesDocumentsListV1Request": ".libraries_documents_list_v1op", + "LibrariesDocumentsListV1RequestTypedDict": ".libraries_documents_list_v1op", + "LibrariesDocumentsReprocessV1Request": ".libraries_documents_reprocess_v1op", + "LibrariesDocumentsReprocessV1RequestTypedDict": ".libraries_documents_reprocess_v1op", + "LibrariesDocumentsUpdateV1Request": ".libraries_documents_update_v1op", + "LibrariesDocumentsUpdateV1RequestTypedDict": ".libraries_documents_update_v1op", + "DocumentUpload": ".libraries_documents_upload_v1op", + "DocumentUploadTypedDict": ".libraries_documents_upload_v1op", + "LibrariesDocumentsUploadV1Request": ".libraries_documents_upload_v1op", + "LibrariesDocumentsUploadV1RequestTypedDict": ".libraries_documents_upload_v1op", + "LibrariesGetV1Request": ".libraries_get_v1op", + "LibrariesGetV1RequestTypedDict": ".libraries_get_v1op", + "LibrariesShareCreateV1Request": ".libraries_share_create_v1op", + "LibrariesShareCreateV1RequestTypedDict": ".libraries_share_create_v1op", + "LibrariesShareDeleteV1Request": ".libraries_share_delete_v1op", + "LibrariesShareDeleteV1RequestTypedDict": ".libraries_share_delete_v1op", + "LibrariesShareListV1Request": ".libraries_share_list_v1op", + "LibrariesShareListV1RequestTypedDict": ".libraries_share_list_v1op", + "LibrariesUpdateV1Request": ".libraries_update_v1op", + "LibrariesUpdateV1RequestTypedDict": ".libraries_update_v1op", + "Library": ".library", + "LibraryTypedDict": ".library", + "ListBatchJobsResponse": ".listbatchjobsresponse", + "ListBatchJobsResponseTypedDict": ".listbatchjobsresponse", + "ListDocumentsResponse": ".listdocumentsresponse", + "ListDocumentsResponseTypedDict": ".listdocumentsresponse", + "ListFilesResponse": ".listfilesresponse", + "ListFilesResponseTypedDict": ".listfilesresponse", + "ListFineTuningJobsResponse": ".listfinetuningjobsresponse", + "ListFineTuningJobsResponseData": ".listfinetuningjobsresponse", + "ListFineTuningJobsResponseDataTypedDict": ".listfinetuningjobsresponse", + "ListFineTuningJobsResponseTypedDict": ".listfinetuningjobsresponse", + "UnknownListFineTuningJobsResponseData": ".listfinetuningjobsresponse", + "ListLibrariesResponse": ".listlibrariesresponse", + "ListLibrariesResponseTypedDict": ".listlibrariesresponse", "ListSharingOut": ".listsharingout", "ListSharingOutTypedDict": ".listsharingout", "MessageEntries": ".messageentries", @@ -2022,51 +2142,45 @@ "MessageInputEntry": ".messageinputentry", "MessageInputEntryContent": ".messageinputentry", "MessageInputEntryContentTypedDict": ".messageinputentry", - "MessageInputEntryObject": ".messageinputentry", - "MessageInputEntryRole": ".messageinputentry", - "MessageInputEntryType": ".messageinputentry", "MessageInputEntryTypedDict": ".messageinputentry", + "Role": ".messageinputentry", "MessageOutputContentChunks": ".messageoutputcontentchunks", "MessageOutputContentChunksTypedDict": ".messageoutputcontentchunks", "MessageOutputEntry": ".messageoutputentry", "MessageOutputEntryContent": ".messageoutputentry", "MessageOutputEntryContentTypedDict": ".messageoutputentry", - "MessageOutputEntryObject": ".messageoutputentry", - "MessageOutputEntryRole": ".messageoutputentry", - "MessageOutputEntryType": ".messageoutputentry", "MessageOutputEntryTypedDict": ".messageoutputentry", "MessageOutputEvent": ".messageoutputevent", "MessageOutputEventContent": ".messageoutputevent", "MessageOutputEventContentTypedDict": ".messageoutputevent", - "MessageOutputEventRole": ".messageoutputevent", "MessageOutputEventTypedDict": ".messageoutputevent", - "MetricOut": ".metricout", - "MetricOutTypedDict": ".metricout", + "Metric": ".metric", + "MetricTypedDict": ".metric", "MistralPromptMode": ".mistralpromptmode", "ModelCapabilities": ".modelcapabilities", "ModelCapabilitiesTypedDict": ".modelcapabilities", "ModelConversation": ".modelconversation", - "ModelConversationObject": ".modelconversation", "ModelConversationTool": ".modelconversation", "ModelConversationToolTypedDict": ".modelconversation", "ModelConversationTypedDict": ".modelconversation", + "UnknownModelConversationTool": ".modelconversation", "ModelList": ".modellist", "ModelListData": ".modellist", "ModelListDataTypedDict": ".modellist", "ModelListTypedDict": ".modellist", + "UnknownModelListData": ".modellist", "ModerationObject": ".moderationobject", "ModerationObjectTypedDict": ".moderationobject", "ModerationResponse": ".moderationresponse", "ModerationResponseTypedDict": ".moderationresponse", - "NoResponseError": ".no_response_error", "OCRImageObject": ".ocrimageobject", "OCRImageObjectTypedDict": ".ocrimageobject", "OCRPageDimensions": ".ocrpagedimensions", "OCRPageDimensionsTypedDict": ".ocrpagedimensions", "OCRPageObject": ".ocrpageobject", "OCRPageObjectTypedDict": ".ocrpageobject", - "Document": ".ocrrequest", - "DocumentTypedDict": ".ocrrequest", + "DocumentUnion": ".ocrrequest", + "DocumentUnionTypedDict": ".ocrrequest", "OCRRequest": ".ocrrequest", "OCRRequestTypedDict": ".ocrrequest", "TableFormat": ".ocrrequest", @@ -2091,17 +2205,24 @@ "RealtimeTranscriptionErrorDetailMessage": ".realtimetranscriptionerrordetail", "RealtimeTranscriptionErrorDetailMessageTypedDict": ".realtimetranscriptionerrordetail", "RealtimeTranscriptionErrorDetailTypedDict": ".realtimetranscriptionerrordetail", + "RealtimeTranscriptionInputAudioAppend": ".realtimetranscriptioninputaudioappend", + "RealtimeTranscriptionInputAudioAppendTypedDict": ".realtimetranscriptioninputaudioappend", + "RealtimeTranscriptionInputAudioEnd": ".realtimetranscriptioninputaudioend", + "RealtimeTranscriptionInputAudioEndTypedDict": ".realtimetranscriptioninputaudioend", + "RealtimeTranscriptionInputAudioFlush": ".realtimetranscriptioninputaudioflush", + "RealtimeTranscriptionInputAudioFlushTypedDict": ".realtimetranscriptioninputaudioflush", "RealtimeTranscriptionSession": ".realtimetranscriptionsession", "RealtimeTranscriptionSessionTypedDict": ".realtimetranscriptionsession", "RealtimeTranscriptionSessionCreated": ".realtimetranscriptionsessioncreated", "RealtimeTranscriptionSessionCreatedTypedDict": ".realtimetranscriptionsessioncreated", "RealtimeTranscriptionSessionUpdated": ".realtimetranscriptionsessionupdated", "RealtimeTranscriptionSessionUpdatedTypedDict": ".realtimetranscriptionsessionupdated", + "RealtimeTranscriptionSessionUpdateMessage": ".realtimetranscriptionsessionupdatemessage", + "RealtimeTranscriptionSessionUpdateMessageTypedDict": ".realtimetranscriptionsessionupdatemessage", + "RealtimeTranscriptionSessionUpdatePayload": ".realtimetranscriptionsessionupdatepayload", + "RealtimeTranscriptionSessionUpdatePayloadTypedDict": ".realtimetranscriptionsessionupdatepayload", "ReferenceChunk": ".referencechunk", - "ReferenceChunkType": ".referencechunk", "ReferenceChunkTypedDict": ".referencechunk", - "ReprocessDocumentRequest": ".reprocessdocumentop", - "ReprocessDocumentRequestTypedDict": ".reprocessdocumentop", "RequestSource": ".requestsource", "ResponseDoneEvent": ".responsedoneevent", "ResponseDoneEventTypedDict": ".responsedoneevent", @@ -2112,21 +2233,12 @@ "ResponseFormats": ".responseformats", "ResponseStartedEvent": ".responsestartedevent", "ResponseStartedEventTypedDict": ".responsestartedevent", - "ResponseValidationError": ".responsevalidationerror", - "RestartConversationRequest": ".restartconversationop", - "RestartConversationRequestTypedDict": ".restartconversationop", - "RestartConversationStreamRequest": ".restartconversationstreamop", - "RestartConversationStreamRequestTypedDict": ".restartconversationstreamop", - "RetrieveFileRequest": ".retrievefileop", - "RetrieveFileRequestTypedDict": ".retrievefileop", - "RetrieveFileOut": ".retrievefileout", - "RetrieveFileOutTypedDict": ".retrievefileout", - "ResponseRetrieveModelV1ModelsModelIDGet": ".retrievemodelop", - "ResponseRetrieveModelV1ModelsModelIDGetTypedDict": ".retrievemodelop", - "RetrieveModelRequest": ".retrievemodelop", - "RetrieveModelRequestTypedDict": ".retrievemodelop", + "ResponseRetrieveModelV1ModelsModelIDGet": ".retrieve_model_v1_models_model_id_getop", + "ResponseRetrieveModelV1ModelsModelIDGetTypedDict": ".retrieve_model_v1_models_model_id_getop", + "RetrieveModelV1ModelsModelIDGetRequest": ".retrieve_model_v1_models_model_id_getop", + "RetrieveModelV1ModelsModelIDGetRequestTypedDict": ".retrieve_model_v1_models_model_id_getop", + "UnknownResponseRetrieveModelV1ModelsModelIDGet": ".retrieve_model_v1_models_model_id_getop", "SampleType": ".sampletype", - "SDKError": ".sdkerror", "Security": ".security", "SecurityTypedDict": ".security", "ShareEnum": ".shareenum", @@ -2138,10 +2250,6 @@ "SharingOutTypedDict": ".sharingout", "Source": ".source", "SSETypes": ".ssetypes", - "StartFineTuningJobRequest": ".startfinetuningjobop", - "StartFineTuningJobRequestTypedDict": ".startfinetuningjobop", - "StartFineTuningJobResponse": ".startfinetuningjobop", - "StartFineTuningJobResponseTypedDict": ".startfinetuningjobop", "SystemMessage": ".systemmessage", "SystemMessageContent": ".systemmessage", "SystemMessageContentTypedDict": ".systemmessage", @@ -2149,21 +2257,24 @@ "SystemMessageContentChunks": ".systemmessagecontentchunks", "SystemMessageContentChunksTypedDict": ".systemmessagecontentchunks", "TextChunk": ".textchunk", - "TextChunkType": ".textchunk", "TextChunkTypedDict": ".textchunk", "ThinkChunk": ".thinkchunk", - "ThinkChunkType": ".thinkchunk", + "ThinkChunkThinking": ".thinkchunk", + "ThinkChunkThinkingTypedDict": ".thinkchunk", "ThinkChunkTypedDict": ".thinkchunk", - "Thinking": ".thinkchunk", - "ThinkingTypedDict": ".thinkchunk", "TimestampGranularity": ".timestampgranularity", "Tool": ".tool", "ToolTypedDict": ".tool", "ToolCall": ".toolcall", "ToolCallTypedDict": ".toolcall", + "Confirmation": ".toolcallconfirmation", + "ToolCallConfirmation": ".toolcallconfirmation", + "ToolCallConfirmationTypedDict": ".toolcallconfirmation", "ToolChoice": ".toolchoice", "ToolChoiceTypedDict": ".toolchoice", "ToolChoiceEnum": ".toolchoiceenum", + "ToolConfiguration": ".toolconfiguration", + "ToolConfigurationTypedDict": ".toolconfiguration", "ToolExecutionDeltaEvent": ".toolexecutiondeltaevent", "ToolExecutionDeltaEventName": ".toolexecutiondeltaevent", "ToolExecutionDeltaEventNameTypedDict": ".toolexecutiondeltaevent", @@ -2175,8 +2286,6 @@ "ToolExecutionEntry": ".toolexecutionentry", "ToolExecutionEntryName": ".toolexecutionentry", "ToolExecutionEntryNameTypedDict": ".toolexecutionentry", - "ToolExecutionEntryObject": ".toolexecutionentry", - "ToolExecutionEntryType": ".toolexecutionentry", "ToolExecutionEntryTypedDict": ".toolexecutionentry", "ToolExecutionStartedEvent": ".toolexecutionstartedevent", "ToolExecutionStartedEventName": ".toolexecutionstartedevent", @@ -2185,7 +2294,6 @@ "ToolFileChunk": ".toolfilechunk", "ToolFileChunkTool": ".toolfilechunk", "ToolFileChunkToolTypedDict": ".toolfilechunk", - "ToolFileChunkType": ".toolfilechunk", "ToolFileChunkTypedDict": ".toolfilechunk", "ToolMessage": ".toolmessage", "ToolMessageContent": ".toolmessage", @@ -2194,7 +2302,6 @@ "ToolReferenceChunk": ".toolreferencechunk", "ToolReferenceChunkTool": ".toolreferencechunk", "ToolReferenceChunkToolTypedDict": ".toolreferencechunk", - "ToolReferenceChunkType": ".toolreferencechunk", "ToolReferenceChunkTypedDict": ".toolreferencechunk", "ToolTypes": ".tooltypes", "TrainingFile": ".trainingfile", @@ -2202,7 +2309,6 @@ "TranscriptionResponse": ".transcriptionresponse", "TranscriptionResponseTypedDict": ".transcriptionresponse", "TranscriptionSegmentChunk": ".transcriptionsegmentchunk", - "TranscriptionSegmentChunkType": ".transcriptionsegmentchunk", "TranscriptionSegmentChunkTypedDict": ".transcriptionsegmentchunk", "TranscriptionStreamDone": ".transcriptionstreamdone", "TranscriptionStreamDoneTypedDict": ".transcriptionstreamdone", @@ -2210,6 +2316,7 @@ "TranscriptionStreamEventsData": ".transcriptionstreamevents", "TranscriptionStreamEventsDataTypedDict": ".transcriptionstreamevents", "TranscriptionStreamEventsTypedDict": ".transcriptionstreamevents", + "UnknownTranscriptionStreamEventsData": ".transcriptionstreamevents", "TranscriptionStreamEventTypes": ".transcriptionstreameventtypes", "TranscriptionStreamLanguage": ".transcriptionstreamlanguage", "TranscriptionStreamLanguageTypedDict": ".transcriptionstreamlanguage", @@ -2217,34 +2324,20 @@ "TranscriptionStreamSegmentDeltaTypedDict": ".transcriptionstreamsegmentdelta", "TranscriptionStreamTextDelta": ".transcriptionstreamtextdelta", "TranscriptionStreamTextDeltaTypedDict": ".transcriptionstreamtextdelta", - "UnarchiveFTModelOut": ".unarchiveftmodelout", - "UnarchiveFTModelOutTypedDict": ".unarchiveftmodelout", - "UnarchiveModelRequest": ".unarchivemodelop", - "UnarchiveModelRequestTypedDict": ".unarchivemodelop", - "UpdateAgentRequest": ".updateagentop", - "UpdateAgentRequestTypedDict": ".updateagentop", - "UpdateAgentVersionRequest": ".updateagentversionop", - "UpdateAgentVersionRequestTypedDict": ".updateagentversionop", - "UpdateDocumentRequest": ".updatedocumentop", - "UpdateDocumentRequestTypedDict": ".updatedocumentop", - "UpdateFTModelIn": ".updateftmodelin", - "UpdateFTModelInTypedDict": ".updateftmodelin", - "UpdateLibraryRequest": ".updatelibraryop", - "UpdateLibraryRequestTypedDict": ".updatelibraryop", - "UpdateModelRequest": ".updatemodelop", - "UpdateModelRequestTypedDict": ".updatemodelop", - "UpdateModelResponse": ".updatemodelop", - "UpdateModelResponseTypedDict": ".updatemodelop", - "UpdateOrCreateLibraryAccessRequest": ".updateorcreatelibraryaccessop", - "UpdateOrCreateLibraryAccessRequestTypedDict": ".updateorcreatelibraryaccessop", - "DocumentUpload": ".uploaddocumentop", - "DocumentUploadTypedDict": ".uploaddocumentop", - "UploadDocumentRequest": ".uploaddocumentop", - "UploadDocumentRequestTypedDict": ".uploaddocumentop", - "MultiPartBodyParams": ".uploadfileop", - "MultiPartBodyParamsTypedDict": ".uploadfileop", - "UploadFileOut": ".uploadfileout", - "UploadFileOutTypedDict": ".uploadfileout", + "UnarchiveModelResponse": ".unarchivemodelresponse", + "UnarchiveModelResponseTypedDict": ".unarchivemodelresponse", + "UpdateAgentRequest": ".updateagentrequest", + "UpdateAgentRequestTool": ".updateagentrequest", + "UpdateAgentRequestToolTypedDict": ".updateagentrequest", + "UpdateAgentRequestTypedDict": ".updateagentrequest", + "Attributes": ".updatedocumentrequest", + "AttributesTypedDict": ".updatedocumentrequest", + "UpdateDocumentRequest": ".updatedocumentrequest", + "UpdateDocumentRequestTypedDict": ".updatedocumentrequest", + "UpdateLibraryRequest": ".updatelibraryrequest", + "UpdateLibraryRequestTypedDict": ".updatelibraryrequest", + "UpdateModelRequest": ".updatemodelrequest", + "UpdateModelRequestTypedDict": ".updatemodelrequest", "UsageInfo": ".usageinfo", "UsageInfoTypedDict": ".usageinfo", "UserMessage": ".usermessage", @@ -2257,8 +2350,8 @@ "ValidationErrorTypedDict": ".validationerror", "WandbIntegration": ".wandbintegration", "WandbIntegrationTypedDict": ".wandbintegration", - "WandbIntegrationOut": ".wandbintegrationout", - "WandbIntegrationOutTypedDict": ".wandbintegrationout", + "WandbIntegrationResult": ".wandbintegrationresult", + "WandbIntegrationResultTypedDict": ".wandbintegrationresult", "WebSearchPremiumTool": ".websearchpremiumtool", "WebSearchPremiumToolTypedDict": ".websearchpremiumtool", "WebSearchTool": ".websearchtool", @@ -2266,39 +2359,11 @@ } -def dynamic_import(modname, retries=3): - for attempt in range(retries): - try: - return import_module(modname, __package__) - except KeyError: - # Clear any half-initialized module and retry - sys.modules.pop(modname, None) - if attempt == retries - 1: - break - raise KeyError(f"Failed to import module '{modname}' after {retries} attempts") - - -def __getattr__(attr_name: str) -> object: - module_name = _dynamic_imports.get(attr_name) - if module_name is None: - raise AttributeError( - f"No {attr_name} found in _dynamic_imports for module name -> {__name__} " - ) - - try: - module = dynamic_import(module_name) - result = getattr(module, attr_name) - return result - except ImportError as e: - raise ImportError( - f"Failed to import {attr_name} from {module_name}: {e}" - ) from e - except AttributeError as e: - raise AttributeError( - f"Failed to get {attr_name} from {module_name}: {e}" - ) from e +def __getattr__(attr_name: str) -> Any: + return lazy_getattr( + attr_name, package=__package__, dynamic_imports=_dynamic_imports + ) def __dir__(): - lazy_attrs = builtins.list(_dynamic_imports.keys()) - return builtins.sorted(lazy_attrs) + return lazy_dir(dynamic_imports=_dynamic_imports) diff --git a/src/mistralai/client/models/agent.py b/src/mistralai/client/models/agent.py index 05ae24cd..686a6eb8 100644 --- a/src/mistralai/client/models/agent.py +++ b/src/mistralai/client/models/agent.py @@ -10,6 +10,7 @@ from .websearchpremiumtool import WebSearchPremiumTool, WebSearchPremiumToolTypedDict from .websearchtool import WebSearchTool, WebSearchToolTypedDict from datetime import datetime +from functools import partial from mistralai.client.types import ( BaseModel, Nullable, @@ -17,7 +18,11 @@ UNSET, UNSET_SENTINEL, ) -from pydantic import Field, model_serializer +from mistralai.client.utils import validate_const +from mistralai.client.utils.unions import parse_open_union +import pydantic +from pydantic import ConfigDict, model_serializer +from pydantic.functional_validators import AfterValidator, BeforeValidator from typing import Any, Dict, List, Literal, Optional, Union from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict @@ -25,16 +30,36 @@ AgentToolTypedDict = TypeAliasType( "AgentToolTypedDict", Union[ + FunctionToolTypedDict, WebSearchToolTypedDict, WebSearchPremiumToolTypedDict, CodeInterpreterToolTypedDict, ImageGenerationToolTypedDict, - FunctionToolTypedDict, DocumentLibraryToolTypedDict, ], ) +class UnknownAgentTool(BaseModel): + r"""A AgentTool variant the SDK doesn't recognize. Preserves the raw payload.""" + + type: Literal["UNKNOWN"] = "UNKNOWN" + raw: Any + is_unknown: Literal[True] = True + + model_config = ConfigDict(frozen=True) + + +_AGENT_TOOL_VARIANTS: dict[str, Any] = { + "code_interpreter": CodeInterpreterTool, + "document_library": DocumentLibraryTool, + "function": FunctionTool, + "image_generation": ImageGenerationTool, + "web_search": WebSearchTool, + "web_search_premium": WebSearchPremiumTool, +} + + AgentTool = Annotated[ Union[ CodeInterpreterTool, @@ -43,14 +68,20 @@ ImageGenerationTool, WebSearchTool, WebSearchPremiumTool, + UnknownAgentTool, ], - Field(discriminator="TYPE"), + BeforeValidator( + partial( + parse_open_union, + disc_key="type", + variants=_AGENT_TOOL_VARIANTS, + unknown_cls=UnknownAgentTool, + union_name="AgentTool", + ) + ), ] -AgentObject = Literal["agent",] - - class AgentTypedDict(TypedDict): model: str name: str @@ -70,7 +101,7 @@ class AgentTypedDict(TypedDict): description: NotRequired[Nullable[str]] handoffs: NotRequired[Nullable[List[str]]] metadata: NotRequired[Nullable[Dict[str, Any]]] - object: NotRequired[AgentObject] + object: Literal["agent"] version_message: NotRequired[Nullable[str]] @@ -108,51 +139,53 @@ class Agent(BaseModel): metadata: OptionalNullable[Dict[str, Any]] = UNSET - object: Optional[AgentObject] = "agent" + object: Annotated[ + Annotated[Optional[Literal["agent"]], AfterValidator(validate_const("agent"))], + pydantic.Field(alias="object"), + ] = "agent" version_message: OptionalNullable[str] = UNSET @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = [ - "instructions", - "tools", - "completion_args", - "description", - "handoffs", - "metadata", - "object", - "version_message", - ] - nullable_fields = [ - "instructions", - "description", - "handoffs", - "metadata", - "version_message", - ] - null_default_fields = [] - + optional_fields = set( + [ + "instructions", + "tools", + "completion_args", + "description", + "handoffs", + "metadata", + "object", + "version_message", + ] + ) + nullable_fields = set( + ["instructions", "description", "handoffs", "metadata", "version_message"] + ) serialized = handler(self) - m = {} for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) - serialized.pop(k, None) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member + return m - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - return m +try: + Agent.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/agentconversation.py b/src/mistralai/client/models/agentconversation.py index a850d54c..da30c663 100644 --- a/src/mistralai/client/models/agentconversation.py +++ b/src/mistralai/client/models/agentconversation.py @@ -10,12 +10,12 @@ UNSET, UNSET_SENTINEL, ) +from mistralai.client.utils import validate_const +import pydantic from pydantic import model_serializer +from pydantic.functional_validators import AfterValidator from typing import Any, Dict, Literal, Optional, Union -from typing_extensions import NotRequired, TypeAliasType, TypedDict - - -AgentConversationObject = Literal["conversation",] +from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict AgentConversationAgentVersionTypedDict = TypeAliasType( @@ -39,7 +39,7 @@ class AgentConversationTypedDict(TypedDict): r"""Description of the what the conversation is about.""" metadata: NotRequired[Nullable[Dict[str, Any]]] r"""Custom metadata for the conversation.""" - object: NotRequired[AgentConversationObject] + object: Literal["conversation"] agent_version: NotRequired[Nullable[AgentConversationAgentVersionTypedDict]] @@ -61,36 +61,45 @@ class AgentConversation(BaseModel): metadata: OptionalNullable[Dict[str, Any]] = UNSET r"""Custom metadata for the conversation.""" - object: Optional[AgentConversationObject] = "conversation" + object: Annotated[ + Annotated[ + Optional[Literal["conversation"]], + AfterValidator(validate_const("conversation")), + ], + pydantic.Field(alias="object"), + ] = "conversation" agent_version: OptionalNullable[AgentConversationAgentVersion] = UNSET @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = ["name", "description", "metadata", "object", "agent_version"] - nullable_fields = ["name", "description", "metadata", "agent_version"] - null_default_fields = [] - + optional_fields = set( + ["name", "description", "metadata", "object", "agent_version"] + ) + nullable_fields = set(["name", "description", "metadata", "agent_version"]) serialized = handler(self) - m = {} for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) - serialized.pop(k, None) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member + return m - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - return m +try: + AgentConversation.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/agenthandoffdoneevent.py b/src/mistralai/client/models/agenthandoffdoneevent.py index 40bf8497..e2609e3d 100644 --- a/src/mistralai/client/models/agenthandoffdoneevent.py +++ b/src/mistralai/client/models/agenthandoffdoneevent.py @@ -3,9 +3,10 @@ from __future__ import annotations from datetime import datetime -from mistralai.client.types import BaseModel +from mistralai.client.types import BaseModel, UNSET_SENTINEL from mistralai.client.utils import validate_const import pydantic +from pydantic import model_serializer from pydantic.functional_validators import AfterValidator from typing import Literal, Optional from typing_extensions import Annotated, NotRequired, TypedDict @@ -27,7 +28,7 @@ class AgentHandoffDoneEvent(BaseModel): next_agent_name: str - TYPE: Annotated[ + type: Annotated[ Annotated[ Literal["agent.handoff.done"], AfterValidator(validate_const("agent.handoff.done")), @@ -38,3 +39,25 @@ class AgentHandoffDoneEvent(BaseModel): created_at: Optional[datetime] = None output_index: Optional[int] = 0 + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["created_at", "output_index"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m + + +try: + AgentHandoffDoneEvent.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/agenthandoffentry.py b/src/mistralai/client/models/agenthandoffentry.py index b18fe17c..f92ef2cc 100644 --- a/src/mistralai/client/models/agenthandoffentry.py +++ b/src/mistralai/client/models/agenthandoffentry.py @@ -10,15 +10,12 @@ UNSET, UNSET_SENTINEL, ) +from mistralai.client.utils import validate_const +import pydantic from pydantic import model_serializer +from pydantic.functional_validators import AfterValidator from typing import Literal, Optional -from typing_extensions import NotRequired, TypedDict - - -AgentHandoffEntryObject = Literal["entry",] - - -AgentHandoffEntryType = Literal["agent.handoff",] +from typing_extensions import Annotated, NotRequired, TypedDict class AgentHandoffEntryTypedDict(TypedDict): @@ -26,8 +23,8 @@ class AgentHandoffEntryTypedDict(TypedDict): previous_agent_name: str next_agent_id: str next_agent_name: str - object: NotRequired[AgentHandoffEntryObject] - type: NotRequired[AgentHandoffEntryType] + object: Literal["entry"] + type: Literal["agent.handoff"] created_at: NotRequired[datetime] completed_at: NotRequired[Nullable[datetime]] id: NotRequired[str] @@ -42,9 +39,18 @@ class AgentHandoffEntry(BaseModel): next_agent_name: str - object: Optional[AgentHandoffEntryObject] = "entry" + object: Annotated[ + Annotated[Optional[Literal["entry"]], AfterValidator(validate_const("entry"))], + pydantic.Field(alias="object"), + ] = "entry" - type: Optional[AgentHandoffEntryType] = "agent.handoff" + type: Annotated[ + Annotated[ + Optional[Literal["agent.handoff"]], + AfterValidator(validate_const("agent.handoff")), + ], + pydantic.Field(alias="type"), + ] = "agent.handoff" created_at: Optional[datetime] = None @@ -54,30 +60,31 @@ class AgentHandoffEntry(BaseModel): @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = ["object", "type", "created_at", "completed_at", "id"] - nullable_fields = ["completed_at"] - null_default_fields = [] - + optional_fields = set(["object", "type", "created_at", "completed_at", "id"]) + nullable_fields = set(["completed_at"]) serialized = handler(self) - m = {} for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) - serialized.pop(k, None) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member + return m - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - return m +try: + AgentHandoffEntry.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/agenthandoffstartedevent.py b/src/mistralai/client/models/agenthandoffstartedevent.py index e278aef3..2a402341 100644 --- a/src/mistralai/client/models/agenthandoffstartedevent.py +++ b/src/mistralai/client/models/agenthandoffstartedevent.py @@ -3,9 +3,10 @@ from __future__ import annotations from datetime import datetime -from mistralai.client.types import BaseModel +from mistralai.client.types import BaseModel, UNSET_SENTINEL from mistralai.client.utils import validate_const import pydantic +from pydantic import model_serializer from pydantic.functional_validators import AfterValidator from typing import Literal, Optional from typing_extensions import Annotated, NotRequired, TypedDict @@ -27,7 +28,7 @@ class AgentHandoffStartedEvent(BaseModel): previous_agent_name: str - TYPE: Annotated[ + type: Annotated[ Annotated[ Literal["agent.handoff.started"], AfterValidator(validate_const("agent.handoff.started")), @@ -38,3 +39,25 @@ class AgentHandoffStartedEvent(BaseModel): created_at: Optional[datetime] = None output_index: Optional[int] = 0 + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["created_at", "output_index"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m + + +try: + AgentHandoffStartedEvent.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/createorupdateagentaliasop.py b/src/mistralai/client/models/agents_api_v1_agents_create_or_update_aliasop.py similarity index 80% rename from src/mistralai/client/models/createorupdateagentaliasop.py rename to src/mistralai/client/models/agents_api_v1_agents_create_or_update_aliasop.py index cde1dd05..04761ae7 100644 --- a/src/mistralai/client/models/createorupdateagentaliasop.py +++ b/src/mistralai/client/models/agents_api_v1_agents_create_or_update_aliasop.py @@ -1,5 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -# @generated-id: a79cf28bda01 +# @generated-id: 23a832f8f175 from __future__ import annotations from mistralai.client.types import BaseModel @@ -7,13 +7,13 @@ from typing_extensions import Annotated, TypedDict -class CreateOrUpdateAgentAliasRequestTypedDict(TypedDict): +class AgentsAPIV1AgentsCreateOrUpdateAliasRequestTypedDict(TypedDict): agent_id: str alias: str version: int -class CreateOrUpdateAgentAliasRequest(BaseModel): +class AgentsAPIV1AgentsCreateOrUpdateAliasRequest(BaseModel): agent_id: Annotated[ str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) ] diff --git a/src/mistralai/client/models/deleteagentaliasop.py b/src/mistralai/client/models/agents_api_v1_agents_delete_aliasop.py similarity index 78% rename from src/mistralai/client/models/deleteagentaliasop.py rename to src/mistralai/client/models/agents_api_v1_agents_delete_aliasop.py index c52d099e..291a9802 100644 --- a/src/mistralai/client/models/deleteagentaliasop.py +++ b/src/mistralai/client/models/agents_api_v1_agents_delete_aliasop.py @@ -1,5 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -# @generated-id: e4d0d7f75b24 +# @generated-id: 9c9947e768d3 from __future__ import annotations from mistralai.client.types import BaseModel @@ -7,12 +7,12 @@ from typing_extensions import Annotated, TypedDict -class DeleteAgentAliasRequestTypedDict(TypedDict): +class AgentsAPIV1AgentsDeleteAliasRequestTypedDict(TypedDict): agent_id: str alias: str -class DeleteAgentAliasRequest(BaseModel): +class AgentsAPIV1AgentsDeleteAliasRequest(BaseModel): agent_id: Annotated[ str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) ] diff --git a/src/mistralai/client/models/listagentaliasesop.py b/src/mistralai/client/models/agents_api_v1_agents_deleteop.py similarity index 74% rename from src/mistralai/client/models/listagentaliasesop.py rename to src/mistralai/client/models/agents_api_v1_agents_deleteop.py index 83c6d176..5e41fdcd 100644 --- a/src/mistralai/client/models/listagentaliasesop.py +++ b/src/mistralai/client/models/agents_api_v1_agents_deleteop.py @@ -1,5 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -# @generated-id: ff038766a902 +# @generated-id: 95adb6768908 from __future__ import annotations from mistralai.client.types import BaseModel @@ -7,11 +7,11 @@ from typing_extensions import Annotated, TypedDict -class ListAgentAliasesRequestTypedDict(TypedDict): +class AgentsAPIV1AgentsDeleteRequestTypedDict(TypedDict): agent_id: str -class ListAgentAliasesRequest(BaseModel): +class AgentsAPIV1AgentsDeleteRequest(BaseModel): agent_id: Annotated[ str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) ] diff --git a/src/mistralai/client/models/getagentversionop.py b/src/mistralai/client/models/agents_api_v1_agents_get_versionop.py similarity index 78% rename from src/mistralai/client/models/getagentversionop.py rename to src/mistralai/client/models/agents_api_v1_agents_get_versionop.py index 77b8a266..941863d0 100644 --- a/src/mistralai/client/models/getagentversionop.py +++ b/src/mistralai/client/models/agents_api_v1_agents_get_versionop.py @@ -1,5 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -# @generated-id: a0db5a6aab1f +# @generated-id: ef9914284afb from __future__ import annotations from mistralai.client.types import BaseModel @@ -7,12 +7,12 @@ from typing_extensions import Annotated, TypedDict -class GetAgentVersionRequestTypedDict(TypedDict): +class AgentsAPIV1AgentsGetVersionRequestTypedDict(TypedDict): agent_id: str version: str -class GetAgentVersionRequest(BaseModel): +class AgentsAPIV1AgentsGetVersionRequest(BaseModel): agent_id: Annotated[ str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) ] diff --git a/src/mistralai/client/models/agents_api_v1_agents_getop.py b/src/mistralai/client/models/agents_api_v1_agents_getop.py new file mode 100644 index 00000000..dd17580d --- /dev/null +++ b/src/mistralai/client/models/agents_api_v1_agents_getop.py @@ -0,0 +1,66 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: f5918c34f1c7 + +from __future__ import annotations +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from mistralai.client.utils import FieldMetadata, PathParamMetadata, QueryParamMetadata +from pydantic import model_serializer +from typing import Union +from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict + + +AgentsAPIV1AgentsGetAgentVersionTypedDict = TypeAliasType( + "AgentsAPIV1AgentsGetAgentVersionTypedDict", Union[int, str] +) + + +AgentsAPIV1AgentsGetAgentVersion = TypeAliasType( + "AgentsAPIV1AgentsGetAgentVersion", Union[int, str] +) + + +class AgentsAPIV1AgentsGetRequestTypedDict(TypedDict): + agent_id: str + agent_version: NotRequired[Nullable[AgentsAPIV1AgentsGetAgentVersionTypedDict]] + + +class AgentsAPIV1AgentsGetRequest(BaseModel): + agent_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] + + agent_version: Annotated[ + OptionalNullable[AgentsAPIV1AgentsGetAgentVersion], + FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), + ] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["agent_version"]) + nullable_fields = set(["agent_version"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/deleteagentop.py b/src/mistralai/client/models/agents_api_v1_agents_list_version_aliasesop.py similarity index 71% rename from src/mistralai/client/models/deleteagentop.py rename to src/mistralai/client/models/agents_api_v1_agents_list_version_aliasesop.py index 8b14bca7..bb1da602 100644 --- a/src/mistralai/client/models/deleteagentop.py +++ b/src/mistralai/client/models/agents_api_v1_agents_list_version_aliasesop.py @@ -1,5 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -# @generated-id: 089fb7f87aea +# @generated-id: a04815e6c798 from __future__ import annotations from mistralai.client.types import BaseModel @@ -7,11 +7,11 @@ from typing_extensions import Annotated, TypedDict -class DeleteAgentRequestTypedDict(TypedDict): +class AgentsAPIV1AgentsListVersionAliasesRequestTypedDict(TypedDict): agent_id: str -class DeleteAgentRequest(BaseModel): +class AgentsAPIV1AgentsListVersionAliasesRequest(BaseModel): agent_id: Annotated[ str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) ] diff --git a/src/mistralai/client/models/listagentversionsop.py b/src/mistralai/client/models/agents_api_v1_agents_list_versionsop.py similarity index 56% rename from src/mistralai/client/models/listagentversionsop.py rename to src/mistralai/client/models/agents_api_v1_agents_list_versionsop.py index 613d3d85..54b62e90 100644 --- a/src/mistralai/client/models/listagentversionsop.py +++ b/src/mistralai/client/models/agents_api_v1_agents_list_versionsop.py @@ -1,14 +1,15 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -# @generated-id: ccc5fb48e78f +# @generated-id: 19e3310c3907 from __future__ import annotations -from mistralai.client.types import BaseModel +from mistralai.client.types import BaseModel, UNSET_SENTINEL from mistralai.client.utils import FieldMetadata, PathParamMetadata, QueryParamMetadata +from pydantic import model_serializer from typing import Optional from typing_extensions import Annotated, NotRequired, TypedDict -class ListAgentVersionsRequestTypedDict(TypedDict): +class AgentsAPIV1AgentsListVersionsRequestTypedDict(TypedDict): agent_id: str page: NotRequired[int] r"""Page number (0-indexed)""" @@ -16,7 +17,7 @@ class ListAgentVersionsRequestTypedDict(TypedDict): r"""Number of versions per page""" -class ListAgentVersionsRequest(BaseModel): +class AgentsAPIV1AgentsListVersionsRequest(BaseModel): agent_id: Annotated[ str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) ] @@ -32,3 +33,19 @@ class ListAgentVersionsRequest(BaseModel): FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), ] = 20 r"""Number of versions per page""" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["page", "page_size"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m diff --git a/src/mistralai/client/models/listagentsop.py b/src/mistralai/client/models/agents_api_v1_agents_listop.py similarity index 70% rename from src/mistralai/client/models/listagentsop.py rename to src/mistralai/client/models/agents_api_v1_agents_listop.py index 863fc13a..97b1c7f1 100644 --- a/src/mistralai/client/models/listagentsop.py +++ b/src/mistralai/client/models/agents_api_v1_agents_listop.py @@ -1,5 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -# @generated-id: a573a873c404 +# @generated-id: 25a6460a6e19 from __future__ import annotations from .requestsource import RequestSource @@ -16,7 +16,7 @@ from typing_extensions import Annotated, NotRequired, TypedDict -class ListAgentsRequestTypedDict(TypedDict): +class AgentsAPIV1AgentsListRequestTypedDict(TypedDict): page: NotRequired[int] r"""Page number (0-indexed)""" page_size: NotRequired[int] @@ -31,7 +31,7 @@ class ListAgentsRequestTypedDict(TypedDict): metadata: NotRequired[Nullable[Dict[str, Any]]] -class ListAgentsRequest(BaseModel): +class AgentsAPIV1AgentsListRequest(BaseModel): page: Annotated[ Optional[int], FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), @@ -78,46 +78,38 @@ class ListAgentsRequest(BaseModel): @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = [ - "page", - "page_size", - "deployment_chat", - "sources", - "name", - "search", - "id", - "metadata", - ] - nullable_fields = [ - "deployment_chat", - "sources", - "name", - "search", - "id", - "metadata", - ] - null_default_fields = [] - + optional_fields = set( + [ + "page", + "page_size", + "deployment_chat", + "sources", + "name", + "search", + "id", + "metadata", + ] + ) + nullable_fields = set( + ["deployment_chat", "sources", "name", "search", "id", "metadata"] + ) serialized = handler(self) - m = {} for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val return m diff --git a/src/mistralai/client/models/updateagentversionop.py b/src/mistralai/client/models/agents_api_v1_agents_update_versionop.py similarity index 78% rename from src/mistralai/client/models/updateagentversionop.py rename to src/mistralai/client/models/agents_api_v1_agents_update_versionop.py index 114013bc..5ab821ea 100644 --- a/src/mistralai/client/models/updateagentversionop.py +++ b/src/mistralai/client/models/agents_api_v1_agents_update_versionop.py @@ -1,5 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -# @generated-id: 3821dca5b20a +# @generated-id: 63f61b8891bf from __future__ import annotations from mistralai.client.types import BaseModel @@ -7,12 +7,12 @@ from typing_extensions import Annotated, TypedDict -class UpdateAgentVersionRequestTypedDict(TypedDict): +class AgentsAPIV1AgentsUpdateVersionRequestTypedDict(TypedDict): agent_id: str version: int -class UpdateAgentVersionRequest(BaseModel): +class AgentsAPIV1AgentsUpdateVersionRequest(BaseModel): agent_id: Annotated[ str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) ] diff --git a/src/mistralai/client/models/updateagentop.py b/src/mistralai/client/models/agents_api_v1_agents_updateop.py similarity index 62% rename from src/mistralai/client/models/updateagentop.py rename to src/mistralai/client/models/agents_api_v1_agents_updateop.py index 28acc83d..69da5001 100644 --- a/src/mistralai/client/models/updateagentop.py +++ b/src/mistralai/client/models/agents_api_v1_agents_updateop.py @@ -1,24 +1,24 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -# @generated-id: ae3a6abea468 +# @generated-id: bb55993c932d from __future__ import annotations -from .agentupdaterequest import AgentUpdateRequest, AgentUpdateRequestTypedDict +from .updateagentrequest import UpdateAgentRequest, UpdateAgentRequestTypedDict from mistralai.client.types import BaseModel from mistralai.client.utils import FieldMetadata, PathParamMetadata, RequestMetadata from typing_extensions import Annotated, TypedDict -class UpdateAgentRequestTypedDict(TypedDict): +class AgentsAPIV1AgentsUpdateRequestTypedDict(TypedDict): agent_id: str - agent_update_request: AgentUpdateRequestTypedDict + update_agent_request: UpdateAgentRequestTypedDict -class UpdateAgentRequest(BaseModel): +class AgentsAPIV1AgentsUpdateRequest(BaseModel): agent_id: Annotated[ str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) ] - agent_update_request: Annotated[ - AgentUpdateRequest, + update_agent_request: Annotated[ + UpdateAgentRequest, FieldMetadata(request=RequestMetadata(media_type="application/json")), ] diff --git a/src/mistralai/client/models/appendconversationstreamop.py b/src/mistralai/client/models/agents_api_v1_conversations_append_streamop.py similarity index 85% rename from src/mistralai/client/models/appendconversationstreamop.py rename to src/mistralai/client/models/agents_api_v1_conversations_append_streamop.py index 55efca0e..d257dc78 100644 --- a/src/mistralai/client/models/appendconversationstreamop.py +++ b/src/mistralai/client/models/agents_api_v1_conversations_append_streamop.py @@ -1,5 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -# @generated-id: 1ab08b189e9d +# @generated-id: ec00e0905f15 from __future__ import annotations from .conversationappendstreamrequest import ( @@ -11,13 +11,13 @@ from typing_extensions import Annotated, TypedDict -class AppendConversationStreamRequestTypedDict(TypedDict): +class AgentsAPIV1ConversationsAppendStreamRequestTypedDict(TypedDict): conversation_id: str r"""ID of the conversation to which we append entries.""" conversation_append_stream_request: ConversationAppendStreamRequestTypedDict -class AppendConversationStreamRequest(BaseModel): +class AgentsAPIV1ConversationsAppendStreamRequest(BaseModel): conversation_id: Annotated[ str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) ] diff --git a/src/mistralai/client/models/appendconversationop.py b/src/mistralai/client/models/agents_api_v1_conversations_appendop.py similarity index 85% rename from src/mistralai/client/models/appendconversationop.py rename to src/mistralai/client/models/agents_api_v1_conversations_appendop.py index 710b8e1c..61fec083 100644 --- a/src/mistralai/client/models/appendconversationop.py +++ b/src/mistralai/client/models/agents_api_v1_conversations_appendop.py @@ -1,5 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -# @generated-id: 1c47dd1e7c7e +# @generated-id: 39c6125e850c from __future__ import annotations from .conversationappendrequest import ( @@ -11,13 +11,13 @@ from typing_extensions import Annotated, TypedDict -class AppendConversationRequestTypedDict(TypedDict): +class AgentsAPIV1ConversationsAppendRequestTypedDict(TypedDict): conversation_id: str r"""ID of the conversation to which we append entries.""" conversation_append_request: ConversationAppendRequestTypedDict -class AppendConversationRequest(BaseModel): +class AgentsAPIV1ConversationsAppendRequest(BaseModel): conversation_id: Annotated[ str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) ] diff --git a/src/mistralai/client/models/deleteconversationop.py b/src/mistralai/client/models/agents_api_v1_conversations_deleteop.py similarity index 78% rename from src/mistralai/client/models/deleteconversationop.py rename to src/mistralai/client/models/agents_api_v1_conversations_deleteop.py index 39607f40..499645a7 100644 --- a/src/mistralai/client/models/deleteconversationop.py +++ b/src/mistralai/client/models/agents_api_v1_conversations_deleteop.py @@ -1,5 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -# @generated-id: 86fefc353db0 +# @generated-id: 0792e6abbdcb from __future__ import annotations from mistralai.client.types import BaseModel @@ -7,12 +7,12 @@ from typing_extensions import Annotated, TypedDict -class DeleteConversationRequestTypedDict(TypedDict): +class AgentsAPIV1ConversationsDeleteRequestTypedDict(TypedDict): conversation_id: str r"""ID of the conversation from which we are fetching metadata.""" -class DeleteConversationRequest(BaseModel): +class AgentsAPIV1ConversationsDeleteRequest(BaseModel): conversation_id: Annotated[ str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) ] diff --git a/src/mistralai/client/models/getconversationop.py b/src/mistralai/client/models/agents_api_v1_conversations_getop.py similarity index 88% rename from src/mistralai/client/models/getconversationop.py rename to src/mistralai/client/models/agents_api_v1_conversations_getop.py index d204d175..504616ab 100644 --- a/src/mistralai/client/models/getconversationop.py +++ b/src/mistralai/client/models/agents_api_v1_conversations_getop.py @@ -1,5 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -# @generated-id: 1a622b8337ac +# @generated-id: c530f2fc64d0 from __future__ import annotations from .agentconversation import AgentConversation, AgentConversationTypedDict @@ -10,12 +10,12 @@ from typing_extensions import Annotated, TypeAliasType, TypedDict -class GetConversationRequestTypedDict(TypedDict): +class AgentsAPIV1ConversationsGetRequestTypedDict(TypedDict): conversation_id: str r"""ID of the conversation from which we are fetching metadata.""" -class GetConversationRequest(BaseModel): +class AgentsAPIV1ConversationsGetRequest(BaseModel): conversation_id: Annotated[ str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) ] diff --git a/src/mistralai/client/models/getconversationhistoryop.py b/src/mistralai/client/models/agents_api_v1_conversations_historyop.py similarity index 78% rename from src/mistralai/client/models/getconversationhistoryop.py rename to src/mistralai/client/models/agents_api_v1_conversations_historyop.py index c1fbf3de..ef0a4eb0 100644 --- a/src/mistralai/client/models/getconversationhistoryop.py +++ b/src/mistralai/client/models/agents_api_v1_conversations_historyop.py @@ -1,5 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -# @generated-id: c863a4cbeb34 +# @generated-id: 2f5ca33768aa from __future__ import annotations from mistralai.client.types import BaseModel @@ -7,12 +7,12 @@ from typing_extensions import Annotated, TypedDict -class GetConversationHistoryRequestTypedDict(TypedDict): +class AgentsAPIV1ConversationsHistoryRequestTypedDict(TypedDict): conversation_id: str r"""ID of the conversation from which we are fetching entries.""" -class GetConversationHistoryRequest(BaseModel): +class AgentsAPIV1ConversationsHistoryRequest(BaseModel): conversation_id: Annotated[ str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) ] diff --git a/src/mistralai/client/models/listconversationsop.py b/src/mistralai/client/models/agents_api_v1_conversations_listop.py similarity index 59% rename from src/mistralai/client/models/listconversationsop.py rename to src/mistralai/client/models/agents_api_v1_conversations_listop.py index 1c9a347c..8bf66aea 100644 --- a/src/mistralai/client/models/listconversationsop.py +++ b/src/mistralai/client/models/agents_api_v1_conversations_listop.py @@ -1,5 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -# @generated-id: d6007f6c1643 +# @generated-id: 936e36181d36 from __future__ import annotations from .agentconversation import AgentConversation, AgentConversationTypedDict @@ -17,13 +17,13 @@ from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict -class ListConversationsRequestTypedDict(TypedDict): +class AgentsAPIV1ConversationsListRequestTypedDict(TypedDict): page: NotRequired[int] page_size: NotRequired[int] metadata: NotRequired[Nullable[Dict[str, Any]]] -class ListConversationsRequest(BaseModel): +class AgentsAPIV1ConversationsListRequest(BaseModel): page: Annotated[ Optional[int], FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), @@ -41,41 +41,36 @@ class ListConversationsRequest(BaseModel): @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = ["page", "page_size", "metadata"] - nullable_fields = ["metadata"] - null_default_fields = [] - + optional_fields = set(["page", "page_size", "metadata"]) + nullable_fields = set(["metadata"]) serialized = handler(self) - m = {} for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val return m -ListConversationsResponseTypedDict = TypeAliasType( - "ListConversationsResponseTypedDict", +AgentsAPIV1ConversationsListResponseTypedDict = TypeAliasType( + "AgentsAPIV1ConversationsListResponseTypedDict", Union[AgentConversationTypedDict, ModelConversationTypedDict], ) -ListConversationsResponse = TypeAliasType( - "ListConversationsResponse", Union[AgentConversation, ModelConversation] +AgentsAPIV1ConversationsListResponse = TypeAliasType( + "AgentsAPIV1ConversationsListResponse", Union[AgentConversation, ModelConversation] ) diff --git a/src/mistralai/client/models/getconversationmessagesop.py b/src/mistralai/client/models/agents_api_v1_conversations_messagesop.py similarity index 78% rename from src/mistralai/client/models/getconversationmessagesop.py rename to src/mistralai/client/models/agents_api_v1_conversations_messagesop.py index 6666198e..19978a19 100644 --- a/src/mistralai/client/models/getconversationmessagesop.py +++ b/src/mistralai/client/models/agents_api_v1_conversations_messagesop.py @@ -1,5 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -# @generated-id: bb8a90ba7c22 +# @generated-id: b5141764a708 from __future__ import annotations from mistralai.client.types import BaseModel @@ -7,12 +7,12 @@ from typing_extensions import Annotated, TypedDict -class GetConversationMessagesRequestTypedDict(TypedDict): +class AgentsAPIV1ConversationsMessagesRequestTypedDict(TypedDict): conversation_id: str r"""ID of the conversation from which we are fetching messages.""" -class GetConversationMessagesRequest(BaseModel): +class AgentsAPIV1ConversationsMessagesRequest(BaseModel): conversation_id: Annotated[ str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) ] diff --git a/src/mistralai/client/models/restartconversationstreamop.py b/src/mistralai/client/models/agents_api_v1_conversations_restart_streamop.py similarity index 85% rename from src/mistralai/client/models/restartconversationstreamop.py rename to src/mistralai/client/models/agents_api_v1_conversations_restart_streamop.py index 3b2025f5..63c74449 100644 --- a/src/mistralai/client/models/restartconversationstreamop.py +++ b/src/mistralai/client/models/agents_api_v1_conversations_restart_streamop.py @@ -1,5 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -# @generated-id: 16dc9ee5bf22 +# @generated-id: c284a1711148 from __future__ import annotations from .conversationrestartstreamrequest import ( @@ -11,13 +11,13 @@ from typing_extensions import Annotated, TypedDict -class RestartConversationStreamRequestTypedDict(TypedDict): +class AgentsAPIV1ConversationsRestartStreamRequestTypedDict(TypedDict): conversation_id: str r"""ID of the original conversation which is being restarted.""" conversation_restart_stream_request: ConversationRestartStreamRequestTypedDict -class RestartConversationStreamRequest(BaseModel): +class AgentsAPIV1ConversationsRestartStreamRequest(BaseModel): conversation_id: Annotated[ str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) ] diff --git a/src/mistralai/client/models/restartconversationop.py b/src/mistralai/client/models/agents_api_v1_conversations_restartop.py similarity index 85% rename from src/mistralai/client/models/restartconversationop.py rename to src/mistralai/client/models/agents_api_v1_conversations_restartop.py index b09eaed5..3186d5df 100644 --- a/src/mistralai/client/models/restartconversationop.py +++ b/src/mistralai/client/models/agents_api_v1_conversations_restartop.py @@ -1,5 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -# @generated-id: 2f6f3e4bbfd8 +# @generated-id: 3ba234e5a8fc from __future__ import annotations from .conversationrestartrequest import ( @@ -11,13 +11,13 @@ from typing_extensions import Annotated, TypedDict -class RestartConversationRequestTypedDict(TypedDict): +class AgentsAPIV1ConversationsRestartRequestTypedDict(TypedDict): conversation_id: str r"""ID of the original conversation which is being restarted.""" conversation_restart_request: ConversationRestartRequestTypedDict -class RestartConversationRequest(BaseModel): +class AgentsAPIV1ConversationsRestartRequest(BaseModel): conversation_id: Annotated[ str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) ] diff --git a/src/mistralai/client/models/agentscompletionrequest.py b/src/mistralai/client/models/agentscompletionrequest.py index f4a2d646..6955f6ac 100644 --- a/src/mistralai/client/models/agentscompletionrequest.py +++ b/src/mistralai/client/models/agentscompletionrequest.py @@ -148,52 +148,44 @@ class AgentsCompletionRequest(BaseModel): @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = [ - "max_tokens", - "stream", - "stop", - "random_seed", - "metadata", - "response_format", - "tools", - "tool_choice", - "presence_penalty", - "frequency_penalty", - "n", - "prediction", - "parallel_tool_calls", - "prompt_mode", - ] - nullable_fields = [ - "max_tokens", - "random_seed", - "metadata", - "tools", - "n", - "prompt_mode", - ] - null_default_fields = [] - + optional_fields = set( + [ + "max_tokens", + "stream", + "stop", + "random_seed", + "metadata", + "response_format", + "tools", + "tool_choice", + "presence_penalty", + "frequency_penalty", + "n", + "prediction", + "parallel_tool_calls", + "prompt_mode", + ] + ) + nullable_fields = set( + ["max_tokens", "random_seed", "metadata", "tools", "n", "prompt_mode"] + ) serialized = handler(self) - m = {} for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val return m diff --git a/src/mistralai/client/models/agentscompletionstreamrequest.py b/src/mistralai/client/models/agentscompletionstreamrequest.py index 732e2402..c2cf3552 100644 --- a/src/mistralai/client/models/agentscompletionstreamrequest.py +++ b/src/mistralai/client/models/agentscompletionstreamrequest.py @@ -146,52 +146,44 @@ class AgentsCompletionStreamRequest(BaseModel): @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = [ - "max_tokens", - "stream", - "stop", - "random_seed", - "metadata", - "response_format", - "tools", - "tool_choice", - "presence_penalty", - "frequency_penalty", - "n", - "prediction", - "parallel_tool_calls", - "prompt_mode", - ] - nullable_fields = [ - "max_tokens", - "random_seed", - "metadata", - "tools", - "n", - "prompt_mode", - ] - null_default_fields = [] - + optional_fields = set( + [ + "max_tokens", + "stream", + "stop", + "random_seed", + "metadata", + "response_format", + "tools", + "tool_choice", + "presence_penalty", + "frequency_penalty", + "n", + "prediction", + "parallel_tool_calls", + "prompt_mode", + ] + ) + nullable_fields = set( + ["max_tokens", "random_seed", "metadata", "tools", "n", "prompt_mode"] + ) serialized = handler(self) - m = {} for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val return m diff --git a/src/mistralai/client/models/archiveftmodelout.py b/src/mistralai/client/models/archiveftmodelout.py deleted file mode 100644 index 3107116c..00000000 --- a/src/mistralai/client/models/archiveftmodelout.py +++ /dev/null @@ -1,27 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -# @generated-id: bab499599d30 - -from __future__ import annotations -from mistralai.client.types import BaseModel -from mistralai.client.utils import validate_const -import pydantic -from pydantic.functional_validators import AfterValidator -from typing import Literal, Optional -from typing_extensions import Annotated, NotRequired, TypedDict - - -class ArchiveFTModelOutTypedDict(TypedDict): - id: str - object: Literal["model"] - archived: NotRequired[bool] - - -class ArchiveFTModelOut(BaseModel): - id: str - - OBJECT: Annotated[ - Annotated[Optional[Literal["model"]], AfterValidator(validate_const("model"))], - pydantic.Field(alias="object"), - ] = "model" - - archived: Optional[bool] = True diff --git a/src/mistralai/client/models/archivemodelresponse.py b/src/mistralai/client/models/archivemodelresponse.py new file mode 100644 index 00000000..f1116850 --- /dev/null +++ b/src/mistralai/client/models/archivemodelresponse.py @@ -0,0 +1,50 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 2d22c644df64 + +from __future__ import annotations +from mistralai.client.types import BaseModel, UNSET_SENTINEL +from mistralai.client.utils import validate_const +import pydantic +from pydantic import model_serializer +from pydantic.functional_validators import AfterValidator +from typing import Literal, Optional +from typing_extensions import Annotated, NotRequired, TypedDict + + +class ArchiveModelResponseTypedDict(TypedDict): + id: str + object: Literal["model"] + archived: NotRequired[bool] + + +class ArchiveModelResponse(BaseModel): + id: str + + object: Annotated[ + Annotated[Optional[Literal["model"]], AfterValidator(validate_const("model"))], + pydantic.Field(alias="object"), + ] = "model" + + archived: Optional[bool] = True + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["object", "archived"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m + + +try: + ArchiveModelResponse.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/assistantmessage.py b/src/mistralai/client/models/assistantmessage.py index 5a4a2085..26a778c7 100644 --- a/src/mistralai/client/models/assistantmessage.py +++ b/src/mistralai/client/models/assistantmessage.py @@ -11,9 +11,12 @@ UNSET, UNSET_SENTINEL, ) +from mistralai.client.utils import validate_const +import pydantic from pydantic import model_serializer +from pydantic.functional_validators import AfterValidator from typing import List, Literal, Optional, Union -from typing_extensions import NotRequired, TypeAliasType, TypedDict +from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict AssistantMessageContentTypedDict = TypeAliasType( @@ -26,18 +29,22 @@ ) -AssistantMessageRole = Literal["assistant",] - - class AssistantMessageTypedDict(TypedDict): + role: Literal["assistant"] content: NotRequired[Nullable[AssistantMessageContentTypedDict]] tool_calls: NotRequired[Nullable[List[ToolCallTypedDict]]] prefix: NotRequired[bool] r"""Set this to `true` when adding an assistant message as prefix to condition the model response. The role of the prefix message is to force the model to start its answer by the content of the message.""" - role: NotRequired[AssistantMessageRole] class AssistantMessage(BaseModel): + role: Annotated[ + Annotated[ + Optional[Literal["assistant"]], AfterValidator(validate_const("assistant")) + ], + pydantic.Field(alias="role"), + ] = "assistant" + content: OptionalNullable[AssistantMessageContent] = UNSET tool_calls: OptionalNullable[List[ToolCall]] = UNSET @@ -45,34 +52,33 @@ class AssistantMessage(BaseModel): prefix: Optional[bool] = False r"""Set this to `true` when adding an assistant message as prefix to condition the model response. The role of the prefix message is to force the model to start its answer by the content of the message.""" - role: Optional[AssistantMessageRole] = "assistant" - @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = ["content", "tool_calls", "prefix", "role"] - nullable_fields = ["content", "tool_calls"] - null_default_fields = [] - + optional_fields = set(["role", "content", "tool_calls", "prefix"]) + nullable_fields = set(["content", "tool_calls"]) serialized = handler(self) - m = {} for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) - serialized.pop(k, None) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member + return m - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - return m +try: + AssistantMessage.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/audiochunk.py b/src/mistralai/client/models/audiochunk.py index a5186827..68866cd2 100644 --- a/src/mistralai/client/models/audiochunk.py +++ b/src/mistralai/client/models/audiochunk.py @@ -18,9 +18,15 @@ class AudioChunkTypedDict(TypedDict): class AudioChunk(BaseModel): input_audio: str - TYPE: Annotated[ + type: Annotated[ Annotated[ Literal["input_audio"], AfterValidator(validate_const("input_audio")) ], pydantic.Field(alias="type"), ] = "input_audio" + + +try: + AudioChunk.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/audiotranscriptionrequest.py b/src/mistralai/client/models/audiotranscriptionrequest.py index 8c47a83c..fe4c79e3 100644 --- a/src/mistralai/client/models/audiotranscriptionrequest.py +++ b/src/mistralai/client/models/audiotranscriptionrequest.py @@ -58,7 +58,7 @@ class AudioTranscriptionRequest(BaseModel): UNSET ) - STREAM: Annotated[ + stream: Annotated[ Annotated[Optional[Literal[False]], AfterValidator(validate_const(False))], pydantic.Field(alias="stream"), FieldMetadata(multipart=True), @@ -75,40 +75,43 @@ class AudioTranscriptionRequest(BaseModel): @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = [ - "file", - "file_url", - "file_id", - "language", - "temperature", - "stream", - "diarize", - "context_bias", - "timestamp_granularities", - ] - nullable_fields = ["file_url", "file_id", "language", "temperature"] - null_default_fields = [] - + optional_fields = set( + [ + "file", + "file_url", + "file_id", + "language", + "temperature", + "stream", + "diarize", + "context_bias", + "timestamp_granularities", + ] + ) + nullable_fields = set(["file_url", "file_id", "language", "temperature"]) serialized = handler(self) - m = {} for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) - serialized.pop(k, None) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member + return m - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - return m +try: + AudioTranscriptionRequest.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/audiotranscriptionrequeststream.py b/src/mistralai/client/models/audiotranscriptionrequeststream.py index a080cee2..2d1e9269 100644 --- a/src/mistralai/client/models/audiotranscriptionrequeststream.py +++ b/src/mistralai/client/models/audiotranscriptionrequeststream.py @@ -56,7 +56,7 @@ class AudioTranscriptionRequestStream(BaseModel): UNSET ) - STREAM: Annotated[ + stream: Annotated[ Annotated[Optional[Literal[True]], AfterValidator(validate_const(True))], pydantic.Field(alias="stream"), FieldMetadata(multipart=True), @@ -73,40 +73,43 @@ class AudioTranscriptionRequestStream(BaseModel): @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = [ - "file", - "file_url", - "file_id", - "language", - "temperature", - "stream", - "diarize", - "context_bias", - "timestamp_granularities", - ] - nullable_fields = ["file_url", "file_id", "language", "temperature"] - null_default_fields = [] - + optional_fields = set( + [ + "file", + "file_url", + "file_id", + "language", + "temperature", + "stream", + "diarize", + "context_bias", + "timestamp_granularities", + ] + ) + nullable_fields = set(["file_url", "file_id", "language", "temperature"]) serialized = handler(self) - m = {} for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) - serialized.pop(k, None) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member + return m - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - return m +try: + AudioTranscriptionRequestStream.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/basemodelcard.py b/src/mistralai/client/models/basemodelcard.py index 17a3e5c9..9c9e9a20 100644 --- a/src/mistralai/client/models/basemodelcard.py +++ b/src/mistralai/client/models/basemodelcard.py @@ -60,54 +60,59 @@ class BaseModelCard(BaseModel): default_model_temperature: OptionalNullable[float] = UNSET - TYPE: Annotated[ + type: Annotated[ Annotated[Literal["base"], AfterValidator(validate_const("base"))], pydantic.Field(alias="type"), ] = "base" @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = [ - "object", - "created", - "owned_by", - "name", - "description", - "max_context_length", - "aliases", - "deprecation", - "deprecation_replacement_model", - "default_model_temperature", - ] - nullable_fields = [ - "name", - "description", - "deprecation", - "deprecation_replacement_model", - "default_model_temperature", - ] - null_default_fields = [] - + optional_fields = set( + [ + "object", + "created", + "owned_by", + "name", + "description", + "max_context_length", + "aliases", + "deprecation", + "deprecation_replacement_model", + "default_model_temperature", + ] + ) + nullable_fields = set( + [ + "name", + "description", + "deprecation", + "deprecation_replacement_model", + "default_model_temperature", + ] + ) serialized = handler(self) - m = {} for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) - serialized.pop(k, None) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member + return m - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - return m +try: + BaseModelCard.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/batcherror.py b/src/mistralai/client/models/batcherror.py index c1bf722a..8a353cd2 100644 --- a/src/mistralai/client/models/batcherror.py +++ b/src/mistralai/client/models/batcherror.py @@ -2,7 +2,8 @@ # @generated-id: 1563e2a576ec from __future__ import annotations -from mistralai.client.types import BaseModel +from mistralai.client.types import BaseModel, UNSET_SENTINEL +from pydantic import model_serializer from typing import Optional from typing_extensions import NotRequired, TypedDict @@ -16,3 +17,19 @@ class BatchError(BaseModel): message: str count: Optional[int] = 1 + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["count"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m diff --git a/src/mistralai/client/models/batchjobout.py b/src/mistralai/client/models/batchjob.py similarity index 64% rename from src/mistralai/client/models/batchjobout.py rename to src/mistralai/client/models/batchjob.py index 99c2b951..80acac33 100644 --- a/src/mistralai/client/models/batchjobout.py +++ b/src/mistralai/client/models/batchjob.py @@ -1,5 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -# @generated-id: cbf1d872a46e +# @generated-id: 85cd28932cc7 from __future__ import annotations from .batcherror import BatchError, BatchErrorTypedDict @@ -19,7 +19,7 @@ from typing_extensions import Annotated, NotRequired, TypedDict -class BatchJobOutTypedDict(TypedDict): +class BatchJobTypedDict(TypedDict): id: str input_files: List[str] endpoint: str @@ -41,7 +41,7 @@ class BatchJobOutTypedDict(TypedDict): completed_at: NotRequired[Nullable[int]] -class BatchJobOut(BaseModel): +class BatchJob(BaseModel): id: str input_files: List[str] @@ -62,7 +62,7 @@ class BatchJobOut(BaseModel): failed_requests: int - OBJECT: Annotated[ + object: Annotated[ Annotated[Optional[Literal["batch"]], AfterValidator(validate_const("batch"))], pydantic.Field(alias="object"), ] = "batch" @@ -85,49 +85,54 @@ class BatchJobOut(BaseModel): @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = [ - "object", - "metadata", - "model", - "agent_id", - "output_file", - "error_file", - "outputs", - "started_at", - "completed_at", - ] - nullable_fields = [ - "metadata", - "model", - "agent_id", - "output_file", - "error_file", - "outputs", - "started_at", - "completed_at", - ] - null_default_fields = [] - + optional_fields = set( + [ + "object", + "metadata", + "model", + "agent_id", + "output_file", + "error_file", + "outputs", + "started_at", + "completed_at", + ] + ) + nullable_fields = set( + [ + "metadata", + "model", + "agent_id", + "output_file", + "error_file", + "outputs", + "started_at", + "completed_at", + ] + ) serialized = handler(self) - m = {} for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) - serialized.pop(k, None) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member + return m - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - return m +try: + BatchJob.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/batchjobsout.py b/src/mistralai/client/models/batchjobsout.py deleted file mode 100644 index f65fc040..00000000 --- a/src/mistralai/client/models/batchjobsout.py +++ /dev/null @@ -1,28 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -# @generated-id: 20b2516e7efa - -from __future__ import annotations -from .batchjobout import BatchJobOut, BatchJobOutTypedDict -from mistralai.client.types import BaseModel -from mistralai.client.utils import validate_const -import pydantic -from pydantic.functional_validators import AfterValidator -from typing import List, Literal, Optional -from typing_extensions import Annotated, NotRequired, TypedDict - - -class BatchJobsOutTypedDict(TypedDict): - total: int - data: NotRequired[List[BatchJobOutTypedDict]] - object: Literal["list"] - - -class BatchJobsOut(BaseModel): - total: int - - data: Optional[List[BatchJobOut]] = None - - OBJECT: Annotated[ - Annotated[Optional[Literal["list"]], AfterValidator(validate_const("list"))], - pydantic.Field(alias="object"), - ] = "list" diff --git a/src/mistralai/client/models/batchrequest.py b/src/mistralai/client/models/batchrequest.py index 41c45234..911a9a05 100644 --- a/src/mistralai/client/models/batchrequest.py +++ b/src/mistralai/client/models/batchrequest.py @@ -26,30 +26,25 @@ class BatchRequest(BaseModel): @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = ["custom_id"] - nullable_fields = ["custom_id"] - null_default_fields = [] - + optional_fields = set(["custom_id"]) + nullable_fields = set(["custom_id"]) serialized = handler(self) - m = {} for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val return m diff --git a/src/mistralai/client/models/cancelfinetuningjobop.py b/src/mistralai/client/models/cancelfinetuningjobop.py deleted file mode 100644 index ddd445bb..00000000 --- a/src/mistralai/client/models/cancelfinetuningjobop.py +++ /dev/null @@ -1,43 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -# @generated-id: c9a1b39f0d02 - -from __future__ import annotations -from .classifierdetailedjobout import ( - ClassifierDetailedJobOut, - ClassifierDetailedJobOutTypedDict, -) -from .completiondetailedjobout import ( - CompletionDetailedJobOut, - CompletionDetailedJobOutTypedDict, -) -from mistralai.client.types import BaseModel -from mistralai.client.utils import FieldMetadata, PathParamMetadata -from pydantic import Field -from typing import Union -from typing_extensions import Annotated, TypeAliasType, TypedDict - - -class CancelFineTuningJobRequestTypedDict(TypedDict): - job_id: str - r"""The ID of the job to cancel.""" - - -class CancelFineTuningJobRequest(BaseModel): - job_id: Annotated[ - str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) - ] - r"""The ID of the job to cancel.""" - - -CancelFineTuningJobResponseTypedDict = TypeAliasType( - "CancelFineTuningJobResponseTypedDict", - Union[CompletionDetailedJobOutTypedDict, ClassifierDetailedJobOutTypedDict], -) -r"""OK""" - - -CancelFineTuningJobResponse = Annotated[ - Union[ClassifierDetailedJobOut, CompletionDetailedJobOut], - Field(discriminator="JOB_TYPE"), -] -r"""OK""" diff --git a/src/mistralai/client/models/chatclassificationrequest.py b/src/mistralai/client/models/chatclassificationrequest.py index 8b6d07b9..cf2aa78a 100644 --- a/src/mistralai/client/models/chatclassificationrequest.py +++ b/src/mistralai/client/models/chatclassificationrequest.py @@ -4,18 +4,17 @@ from __future__ import annotations from .inputs import Inputs, InputsTypedDict from mistralai.client.types import BaseModel -import pydantic -from typing_extensions import Annotated, TypedDict +from typing_extensions import TypedDict class ChatClassificationRequestTypedDict(TypedDict): model: str - inputs: InputsTypedDict + input: InputsTypedDict r"""Chat to classify""" class ChatClassificationRequest(BaseModel): model: str - inputs: Annotated[Inputs, pydantic.Field(alias="input")] + input: Inputs r"""Chat to classify""" diff --git a/src/mistralai/client/models/chatcompletionrequest.py b/src/mistralai/client/models/chatcompletionrequest.py index 4f7d071b..e871bd92 100644 --- a/src/mistralai/client/models/chatcompletionrequest.py +++ b/src/mistralai/client/models/chatcompletionrequest.py @@ -171,56 +171,55 @@ class ChatCompletionRequest(BaseModel): @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = [ - "temperature", - "top_p", - "max_tokens", - "stream", - "stop", - "random_seed", - "metadata", - "response_format", - "tools", - "tool_choice", - "presence_penalty", - "frequency_penalty", - "n", - "prediction", - "parallel_tool_calls", - "prompt_mode", - "safe_prompt", - ] - nullable_fields = [ - "temperature", - "max_tokens", - "random_seed", - "metadata", - "tools", - "n", - "prompt_mode", - ] - null_default_fields = [] - + optional_fields = set( + [ + "temperature", + "top_p", + "max_tokens", + "stream", + "stop", + "random_seed", + "metadata", + "response_format", + "tools", + "tool_choice", + "presence_penalty", + "frequency_penalty", + "n", + "prediction", + "parallel_tool_calls", + "prompt_mode", + "safe_prompt", + ] + ) + nullable_fields = set( + [ + "temperature", + "max_tokens", + "random_seed", + "metadata", + "tools", + "n", + "prompt_mode", + ] + ) serialized = handler(self) - m = {} for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val return m diff --git a/src/mistralai/client/models/chatcompletionstreamrequest.py b/src/mistralai/client/models/chatcompletionstreamrequest.py index ec7d2ae1..b7b2bff1 100644 --- a/src/mistralai/client/models/chatcompletionstreamrequest.py +++ b/src/mistralai/client/models/chatcompletionstreamrequest.py @@ -169,56 +169,55 @@ class ChatCompletionStreamRequest(BaseModel): @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = [ - "temperature", - "top_p", - "max_tokens", - "stream", - "stop", - "random_seed", - "metadata", - "response_format", - "tools", - "tool_choice", - "presence_penalty", - "frequency_penalty", - "n", - "prediction", - "parallel_tool_calls", - "prompt_mode", - "safe_prompt", - ] - nullable_fields = [ - "temperature", - "max_tokens", - "random_seed", - "metadata", - "tools", - "n", - "prompt_mode", - ] - null_default_fields = [] - + optional_fields = set( + [ + "temperature", + "top_p", + "max_tokens", + "stream", + "stop", + "random_seed", + "metadata", + "response_format", + "tools", + "tool_choice", + "presence_penalty", + "frequency_penalty", + "n", + "prediction", + "parallel_tool_calls", + "prompt_mode", + "safe_prompt", + ] + ) + nullable_fields = set( + [ + "temperature", + "max_tokens", + "random_seed", + "metadata", + "tools", + "n", + "prompt_mode", + ] + ) serialized = handler(self) - m = {} for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val return m diff --git a/src/mistralai/client/models/chatmoderationrequest.py b/src/mistralai/client/models/chatmoderationrequest.py index a8d021e8..228e7d26 100644 --- a/src/mistralai/client/models/chatmoderationrequest.py +++ b/src/mistralai/client/models/chatmoderationrequest.py @@ -86,3 +86,9 @@ class ChatModerationRequest(BaseModel): r"""Chat to classify""" model: str + + +try: + ChatModerationRequest.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/checkpointout.py b/src/mistralai/client/models/checkpoint.py similarity index 81% rename from src/mistralai/client/models/checkpointout.py rename to src/mistralai/client/models/checkpoint.py index 3e8d90e9..c24e433e 100644 --- a/src/mistralai/client/models/checkpointout.py +++ b/src/mistralai/client/models/checkpoint.py @@ -1,14 +1,14 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -# @generated-id: 3866fe32cd7c +# @generated-id: 1a530d3674d8 from __future__ import annotations -from .metricout import MetricOut, MetricOutTypedDict +from .metric import Metric, MetricTypedDict from mistralai.client.types import BaseModel from typing_extensions import TypedDict -class CheckpointOutTypedDict(TypedDict): - metrics: MetricOutTypedDict +class CheckpointTypedDict(TypedDict): + metrics: MetricTypedDict r"""Metrics at the step number during the fine-tuning job. Use these metrics to assess if the training is going smoothly (loss should decrease, token accuracy should increase).""" step_number: int r"""The step number that the checkpoint was created at.""" @@ -16,8 +16,8 @@ class CheckpointOutTypedDict(TypedDict): r"""The UNIX timestamp (in seconds) for when the checkpoint was created.""" -class CheckpointOut(BaseModel): - metrics: MetricOut +class Checkpoint(BaseModel): + metrics: Metric r"""Metrics at the step number during the fine-tuning job. Use these metrics to assess if the training is going smoothly (loss should decrease, token accuracy should increase).""" step_number: int diff --git a/src/mistralai/client/models/classificationrequest.py b/src/mistralai/client/models/classificationrequest.py index 903706c3..25b69413 100644 --- a/src/mistralai/client/models/classificationrequest.py +++ b/src/mistralai/client/models/classificationrequest.py @@ -46,30 +46,31 @@ class ClassificationRequest(BaseModel): @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = ["metadata"] - nullable_fields = ["metadata"] - null_default_fields = [] - + optional_fields = set(["metadata"]) + nullable_fields = set(["metadata"]) serialized = handler(self) - m = {} for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) - serialized.pop(k, None) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member + return m - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - return m +try: + ClassificationRequest.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/classifierdetailedjobout.py b/src/mistralai/client/models/classifierdetailedjobout.py deleted file mode 100644 index bc5c5381..00000000 --- a/src/mistralai/client/models/classifierdetailedjobout.py +++ /dev/null @@ -1,169 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -# @generated-id: d8daeb39ef9f - -from __future__ import annotations -from .checkpointout import CheckpointOut, CheckpointOutTypedDict -from .classifiertargetout import ClassifierTargetOut, ClassifierTargetOutTypedDict -from .classifiertrainingparameters import ( - ClassifierTrainingParameters, - ClassifierTrainingParametersTypedDict, -) -from .eventout import EventOut, EventOutTypedDict -from .jobmetadataout import JobMetadataOut, JobMetadataOutTypedDict -from .wandbintegrationout import WandbIntegrationOut, WandbIntegrationOutTypedDict -from mistralai.client.types import ( - BaseModel, - Nullable, - OptionalNullable, - UNSET, - UNSET_SENTINEL, - UnrecognizedStr, -) -from mistralai.client.utils import validate_const -import pydantic -from pydantic import model_serializer -from pydantic.functional_validators import AfterValidator -from typing import List, Literal, Optional, Union -from typing_extensions import Annotated, NotRequired, TypedDict - - -ClassifierDetailedJobOutStatus = Union[ - Literal[ - "QUEUED", - "STARTED", - "VALIDATING", - "VALIDATED", - "RUNNING", - "FAILED_VALIDATION", - "FAILED", - "SUCCESS", - "CANCELLED", - "CANCELLATION_REQUESTED", - ], - UnrecognizedStr, -] - - -ClassifierDetailedJobOutIntegrationTypedDict = WandbIntegrationOutTypedDict - - -ClassifierDetailedJobOutIntegration = WandbIntegrationOut - - -class ClassifierDetailedJobOutTypedDict(TypedDict): - id: str - auto_start: bool - model: str - status: ClassifierDetailedJobOutStatus - created_at: int - modified_at: int - training_files: List[str] - hyperparameters: ClassifierTrainingParametersTypedDict - classifier_targets: List[ClassifierTargetOutTypedDict] - validation_files: NotRequired[Nullable[List[str]]] - object: Literal["job"] - fine_tuned_model: NotRequired[Nullable[str]] - suffix: NotRequired[Nullable[str]] - integrations: NotRequired[ - Nullable[List[ClassifierDetailedJobOutIntegrationTypedDict]] - ] - trained_tokens: NotRequired[Nullable[int]] - metadata: NotRequired[Nullable[JobMetadataOutTypedDict]] - job_type: Literal["classifier"] - events: NotRequired[List[EventOutTypedDict]] - r"""Event items are created every time the status of a fine-tuning job changes. The timestamped list of all events is accessible here.""" - checkpoints: NotRequired[List[CheckpointOutTypedDict]] - - -class ClassifierDetailedJobOut(BaseModel): - id: str - - auto_start: bool - - model: str - - status: ClassifierDetailedJobOutStatus - - created_at: int - - modified_at: int - - training_files: List[str] - - hyperparameters: ClassifierTrainingParameters - - classifier_targets: List[ClassifierTargetOut] - - validation_files: OptionalNullable[List[str]] = UNSET - - OBJECT: Annotated[ - Annotated[Optional[Literal["job"]], AfterValidator(validate_const("job"))], - pydantic.Field(alias="object"), - ] = "job" - - fine_tuned_model: OptionalNullable[str] = UNSET - - suffix: OptionalNullable[str] = UNSET - - integrations: OptionalNullable[List[ClassifierDetailedJobOutIntegration]] = UNSET - - trained_tokens: OptionalNullable[int] = UNSET - - metadata: OptionalNullable[JobMetadataOut] = UNSET - - JOB_TYPE: Annotated[ - Annotated[Literal["classifier"], AfterValidator(validate_const("classifier"))], - pydantic.Field(alias="job_type"), - ] = "classifier" - - events: Optional[List[EventOut]] = None - r"""Event items are created every time the status of a fine-tuning job changes. The timestamped list of all events is accessible here.""" - - checkpoints: Optional[List[CheckpointOut]] = None - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = [ - "validation_files", - "object", - "fine_tuned_model", - "suffix", - "integrations", - "trained_tokens", - "metadata", - "events", - "checkpoints", - ] - nullable_fields = [ - "validation_files", - "fine_tuned_model", - "suffix", - "integrations", - "trained_tokens", - "metadata", - ] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/client/models/classifierftmodelout.py b/src/mistralai/client/models/classifierfinetunedmodel.py similarity index 56% rename from src/mistralai/client/models/classifierftmodelout.py rename to src/mistralai/client/models/classifierfinetunedmodel.py index 182f4954..fbcf5892 100644 --- a/src/mistralai/client/models/classifierftmodelout.py +++ b/src/mistralai/client/models/classifierfinetunedmodel.py @@ -1,11 +1,14 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -# @generated-id: 2903a7123b06 +# @generated-id: 5a9a7a0153c8 from __future__ import annotations -from .classifiertargetout import ClassifierTargetOut, ClassifierTargetOutTypedDict -from .ftmodelcapabilitiesout import ( - FTModelCapabilitiesOut, - FTModelCapabilitiesOutTypedDict, +from .classifiertargetresult import ( + ClassifierTargetResult, + ClassifierTargetResultTypedDict, +) +from .finetunedmodelcapabilities import ( + FineTunedModelCapabilities, + FineTunedModelCapabilitiesTypedDict, ) from mistralai.client.types import ( BaseModel, @@ -22,7 +25,7 @@ from typing_extensions import Annotated, NotRequired, TypedDict -class ClassifierFTModelOutTypedDict(TypedDict): +class ClassifierFineTunedModelTypedDict(TypedDict): id: str created: int owned_by: str @@ -30,9 +33,9 @@ class ClassifierFTModelOutTypedDict(TypedDict): root: str root_version: str archived: bool - capabilities: FTModelCapabilitiesOutTypedDict + capabilities: FineTunedModelCapabilitiesTypedDict job: str - classifier_targets: List[ClassifierTargetOutTypedDict] + classifier_targets: List[ClassifierTargetResultTypedDict] object: Literal["model"] name: NotRequired[Nullable[str]] description: NotRequired[Nullable[str]] @@ -41,7 +44,7 @@ class ClassifierFTModelOutTypedDict(TypedDict): model_type: Literal["classifier"] -class ClassifierFTModelOut(BaseModel): +class ClassifierFineTunedModel(BaseModel): id: str created: int @@ -56,13 +59,13 @@ class ClassifierFTModelOut(BaseModel): archived: bool - capabilities: FTModelCapabilitiesOut + capabilities: FineTunedModelCapabilities job: str - classifier_targets: List[ClassifierTargetOut] + classifier_targets: List[ClassifierTargetResult] - OBJECT: Annotated[ + object: Annotated[ Annotated[Optional[Literal["model"]], AfterValidator(validate_const("model"))], pydantic.Field(alias="object"), ] = "model" @@ -75,43 +78,40 @@ class ClassifierFTModelOut(BaseModel): aliases: Optional[List[str]] = None - MODEL_TYPE: Annotated[ + model_type: Annotated[ Annotated[Literal["classifier"], AfterValidator(validate_const("classifier"))], pydantic.Field(alias="model_type"), ] = "classifier" @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = [ - "object", - "name", - "description", - "max_context_length", - "aliases", - ] - nullable_fields = ["name", "description"] - null_default_fields = [] - + optional_fields = set( + ["object", "name", "description", "max_context_length", "aliases"] + ) + nullable_fields = set(["name", "description"]) serialized = handler(self) - m = {} for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) - serialized.pop(k, None) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member + return m - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - return m +try: + ClassifierFineTunedModel.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/classifierjobout.py b/src/mistralai/client/models/classifierfinetuningjob.py similarity index 63% rename from src/mistralai/client/models/classifierjobout.py rename to src/mistralai/client/models/classifierfinetuningjob.py index 03a5b11c..fb160cf8 100644 --- a/src/mistralai/client/models/classifierjobout.py +++ b/src/mistralai/client/models/classifierfinetuningjob.py @@ -1,13 +1,16 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -# @generated-id: e19e9c4416cc +# @generated-id: a244d5f2afc5 from __future__ import annotations from .classifiertrainingparameters import ( ClassifierTrainingParameters, ClassifierTrainingParametersTypedDict, ) -from .jobmetadataout import JobMetadataOut, JobMetadataOutTypedDict -from .wandbintegrationout import WandbIntegrationOut, WandbIntegrationOutTypedDict +from .jobmetadata import JobMetadata, JobMetadataTypedDict +from .wandbintegrationresult import ( + WandbIntegrationResult, + WandbIntegrationResultTypedDict, +) from mistralai.client.types import ( BaseModel, Nullable, @@ -18,13 +21,13 @@ ) from mistralai.client.utils import validate_const import pydantic -from pydantic import model_serializer +from pydantic import ConfigDict, model_serializer from pydantic.functional_validators import AfterValidator -from typing import List, Literal, Optional, Union +from typing import Any, List, Literal, Optional, Union from typing_extensions import Annotated, NotRequired, TypedDict -ClassifierJobOutStatus = Union[ +ClassifierFineTuningJobStatus = Union[ Literal[ "QUEUED", "STARTED", @@ -42,18 +45,33 @@ r"""The current status of the fine-tuning job.""" -ClassifierJobOutIntegrationTypedDict = WandbIntegrationOutTypedDict +ClassifierFineTuningJobIntegrationTypedDict = WandbIntegrationResultTypedDict + + +class UnknownClassifierFineTuningJobIntegration(BaseModel): + r"""A ClassifierFineTuningJobIntegration variant the SDK doesn't recognize. Preserves the raw payload.""" + + type: Literal["UNKNOWN"] = "UNKNOWN" + raw: Any + is_unknown: Literal[True] = True + + model_config = ConfigDict(frozen=True) + +_CLASSIFIER_FINE_TUNING_JOB_INTEGRATION_VARIANTS: dict[str, Any] = { + "wandb": WandbIntegrationResult, +} -ClassifierJobOutIntegration = WandbIntegrationOut +ClassifierFineTuningJobIntegration = WandbIntegrationResult -class ClassifierJobOutTypedDict(TypedDict): + +class ClassifierFineTuningJobTypedDict(TypedDict): id: str r"""The ID of the job.""" auto_start: bool model: str - status: ClassifierJobOutStatus + status: ClassifierFineTuningJobStatus r"""The current status of the fine-tuning job.""" created_at: int r"""The UNIX timestamp (in seconds) for when the fine-tuning job was created.""" @@ -70,16 +88,18 @@ class ClassifierJobOutTypedDict(TypedDict): r"""The name of the fine-tuned model that is being created. The value will be `null` if the fine-tuning job is still running.""" suffix: NotRequired[Nullable[str]] r"""Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`.""" - integrations: NotRequired[Nullable[List[ClassifierJobOutIntegrationTypedDict]]] + integrations: NotRequired[ + Nullable[List[ClassifierFineTuningJobIntegrationTypedDict]] + ] r"""A list of integrations enabled for your fine-tuning job.""" trained_tokens: NotRequired[Nullable[int]] r"""Total number of tokens trained.""" - metadata: NotRequired[Nullable[JobMetadataOutTypedDict]] + metadata: NotRequired[Nullable[JobMetadataTypedDict]] job_type: Literal["classifier"] r"""The type of job (`FT` for fine-tuning).""" -class ClassifierJobOut(BaseModel): +class ClassifierFineTuningJob(BaseModel): id: str r"""The ID of the job.""" @@ -87,7 +107,7 @@ class ClassifierJobOut(BaseModel): model: str - status: ClassifierJobOutStatus + status: ClassifierFineTuningJobStatus r"""The current status of the fine-tuning job.""" created_at: int @@ -104,7 +124,7 @@ class ClassifierJobOut(BaseModel): validation_files: OptionalNullable[List[str]] = UNSET r"""A list containing the IDs of uploaded files that contain validation data.""" - OBJECT: Annotated[ + object: Annotated[ Annotated[Optional[Literal["job"]], AfterValidator(validate_const("job"))], pydantic.Field(alias="object"), ] = "job" @@ -116,15 +136,15 @@ class ClassifierJobOut(BaseModel): suffix: OptionalNullable[str] = UNSET r"""Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`.""" - integrations: OptionalNullable[List[ClassifierJobOutIntegration]] = UNSET + integrations: OptionalNullable[List[ClassifierFineTuningJobIntegration]] = UNSET r"""A list of integrations enabled for your fine-tuning job.""" trained_tokens: OptionalNullable[int] = UNSET r"""Total number of tokens trained.""" - metadata: OptionalNullable[JobMetadataOut] = UNSET + metadata: OptionalNullable[JobMetadata] = UNSET - JOB_TYPE: Annotated[ + job_type: Annotated[ Annotated[Literal["classifier"], AfterValidator(validate_const("classifier"))], pydantic.Field(alias="job_type"), ] = "classifier" @@ -132,45 +152,50 @@ class ClassifierJobOut(BaseModel): @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = [ - "validation_files", - "object", - "fine_tuned_model", - "suffix", - "integrations", - "trained_tokens", - "metadata", - ] - nullable_fields = [ - "validation_files", - "fine_tuned_model", - "suffix", - "integrations", - "trained_tokens", - "metadata", - ] - null_default_fields = [] - + optional_fields = set( + [ + "validation_files", + "object", + "fine_tuned_model", + "suffix", + "integrations", + "trained_tokens", + "metadata", + ] + ) + nullable_fields = set( + [ + "validation_files", + "fine_tuned_model", + "suffix", + "integrations", + "trained_tokens", + "metadata", + ] + ) serialized = handler(self) - m = {} for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) - serialized.pop(k, None) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member + return m - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - return m +try: + ClassifierFineTuningJob.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/classifierfinetuningjobdetails.py b/src/mistralai/client/models/classifierfinetuningjobdetails.py new file mode 100644 index 00000000..5d73f55e --- /dev/null +++ b/src/mistralai/client/models/classifierfinetuningjobdetails.py @@ -0,0 +1,197 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 75c5dee8df2e + +from __future__ import annotations +from .checkpoint import Checkpoint, CheckpointTypedDict +from .classifiertargetresult import ( + ClassifierTargetResult, + ClassifierTargetResultTypedDict, +) +from .classifiertrainingparameters import ( + ClassifierTrainingParameters, + ClassifierTrainingParametersTypedDict, +) +from .event import Event, EventTypedDict +from .jobmetadata import JobMetadata, JobMetadataTypedDict +from .wandbintegrationresult import ( + WandbIntegrationResult, + WandbIntegrationResultTypedDict, +) +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, + UnrecognizedStr, +) +from mistralai.client.utils import validate_const +import pydantic +from pydantic import ConfigDict, model_serializer +from pydantic.functional_validators import AfterValidator +from typing import Any, List, Literal, Optional, Union +from typing_extensions import Annotated, NotRequired, TypedDict + + +ClassifierFineTuningJobDetailsStatus = Union[ + Literal[ + "QUEUED", + "STARTED", + "VALIDATING", + "VALIDATED", + "RUNNING", + "FAILED_VALIDATION", + "FAILED", + "SUCCESS", + "CANCELLED", + "CANCELLATION_REQUESTED", + ], + UnrecognizedStr, +] + + +ClassifierFineTuningJobDetailsIntegrationTypedDict = WandbIntegrationResultTypedDict + + +class UnknownClassifierFineTuningJobDetailsIntegration(BaseModel): + r"""A ClassifierFineTuningJobDetailsIntegration variant the SDK doesn't recognize. Preserves the raw payload.""" + + type: Literal["UNKNOWN"] = "UNKNOWN" + raw: Any + is_unknown: Literal[True] = True + + model_config = ConfigDict(frozen=True) + + +_CLASSIFIER_FINE_TUNING_JOB_DETAILS_INTEGRATION_VARIANTS: dict[str, Any] = { + "wandb": WandbIntegrationResult, +} + + +ClassifierFineTuningJobDetailsIntegration = WandbIntegrationResult + + +class ClassifierFineTuningJobDetailsTypedDict(TypedDict): + id: str + auto_start: bool + model: str + status: ClassifierFineTuningJobDetailsStatus + created_at: int + modified_at: int + training_files: List[str] + hyperparameters: ClassifierTrainingParametersTypedDict + classifier_targets: List[ClassifierTargetResultTypedDict] + validation_files: NotRequired[Nullable[List[str]]] + object: Literal["job"] + fine_tuned_model: NotRequired[Nullable[str]] + suffix: NotRequired[Nullable[str]] + integrations: NotRequired[ + Nullable[List[ClassifierFineTuningJobDetailsIntegrationTypedDict]] + ] + trained_tokens: NotRequired[Nullable[int]] + metadata: NotRequired[Nullable[JobMetadataTypedDict]] + job_type: Literal["classifier"] + events: NotRequired[List[EventTypedDict]] + r"""Event items are created every time the status of a fine-tuning job changes. The timestamped list of all events is accessible here.""" + checkpoints: NotRequired[List[CheckpointTypedDict]] + + +class ClassifierFineTuningJobDetails(BaseModel): + id: str + + auto_start: bool + + model: str + + status: ClassifierFineTuningJobDetailsStatus + + created_at: int + + modified_at: int + + training_files: List[str] + + hyperparameters: ClassifierTrainingParameters + + classifier_targets: List[ClassifierTargetResult] + + validation_files: OptionalNullable[List[str]] = UNSET + + object: Annotated[ + Annotated[Optional[Literal["job"]], AfterValidator(validate_const("job"))], + pydantic.Field(alias="object"), + ] = "job" + + fine_tuned_model: OptionalNullable[str] = UNSET + + suffix: OptionalNullable[str] = UNSET + + integrations: OptionalNullable[List[ClassifierFineTuningJobDetailsIntegration]] = ( + UNSET + ) + + trained_tokens: OptionalNullable[int] = UNSET + + metadata: OptionalNullable[JobMetadata] = UNSET + + job_type: Annotated[ + Annotated[Literal["classifier"], AfterValidator(validate_const("classifier"))], + pydantic.Field(alias="job_type"), + ] = "classifier" + + events: Optional[List[Event]] = None + r"""Event items are created every time the status of a fine-tuning job changes. The timestamped list of all events is accessible here.""" + + checkpoints: Optional[List[Checkpoint]] = None + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set( + [ + "validation_files", + "object", + "fine_tuned_model", + "suffix", + "integrations", + "trained_tokens", + "metadata", + "events", + "checkpoints", + ] + ) + nullable_fields = set( + [ + "validation_files", + "fine_tuned_model", + "suffix", + "integrations", + "trained_tokens", + "metadata", + ] + ) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m + + +try: + ClassifierFineTuningJobDetails.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/classifiertargetin.py b/src/mistralai/client/models/classifiertarget.py similarity index 55% rename from src/mistralai/client/models/classifiertargetin.py rename to src/mistralai/client/models/classifiertarget.py index b250109b..4d66d789 100644 --- a/src/mistralai/client/models/classifiertargetin.py +++ b/src/mistralai/client/models/classifiertarget.py @@ -1,5 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -# @generated-id: ed021de1c06c +# @generated-id: 2177d51d9dcf from __future__ import annotations from .ftclassifierlossfunction import FTClassifierLossFunction @@ -15,14 +15,14 @@ from typing_extensions import NotRequired, TypedDict -class ClassifierTargetInTypedDict(TypedDict): +class ClassifierTargetTypedDict(TypedDict): name: str labels: List[str] weight: NotRequired[float] loss_function: NotRequired[Nullable[FTClassifierLossFunction]] -class ClassifierTargetIn(BaseModel): +class ClassifierTarget(BaseModel): name: str labels: List[str] @@ -33,30 +33,25 @@ class ClassifierTargetIn(BaseModel): @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = ["weight", "loss_function"] - nullable_fields = ["loss_function"] - null_default_fields = [] - + optional_fields = set(["weight", "loss_function"]) + nullable_fields = set(["loss_function"]) serialized = handler(self) - m = {} for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val return m diff --git a/src/mistralai/client/models/classifiertargetout.py b/src/mistralai/client/models/classifiertargetresult.py similarity index 79% rename from src/mistralai/client/models/classifiertargetout.py rename to src/mistralai/client/models/classifiertargetresult.py index 3d41a4d9..8ce7c0ca 100644 --- a/src/mistralai/client/models/classifiertargetout.py +++ b/src/mistralai/client/models/classifiertargetresult.py @@ -1,5 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -# @generated-id: 5131f55abefe +# @generated-id: 19c343844888 from __future__ import annotations from .ftclassifierlossfunction import FTClassifierLossFunction @@ -8,14 +8,14 @@ from typing_extensions import TypedDict -class ClassifierTargetOutTypedDict(TypedDict): +class ClassifierTargetResultTypedDict(TypedDict): name: str labels: List[str] weight: float loss_function: FTClassifierLossFunction -class ClassifierTargetOut(BaseModel): +class ClassifierTargetResult(BaseModel): name: str labels: List[str] diff --git a/src/mistralai/client/models/classifiertrainingparameters.py b/src/mistralai/client/models/classifiertrainingparameters.py index f360eda5..14fa4926 100644 --- a/src/mistralai/client/models/classifiertrainingparameters.py +++ b/src/mistralai/client/models/classifiertrainingparameters.py @@ -38,43 +38,36 @@ class ClassifierTrainingParameters(BaseModel): @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = [ - "training_steps", - "learning_rate", - "weight_decay", - "warmup_fraction", - "epochs", - "seq_len", - ] - nullable_fields = [ - "training_steps", - "weight_decay", - "warmup_fraction", - "epochs", - "seq_len", - ] - null_default_fields = [] - + optional_fields = set( + [ + "training_steps", + "learning_rate", + "weight_decay", + "warmup_fraction", + "epochs", + "seq_len", + ] + ) + nullable_fields = set( + ["training_steps", "weight_decay", "warmup_fraction", "epochs", "seq_len"] + ) serialized = handler(self) - m = {} for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val return m diff --git a/src/mistralai/client/models/classifiertrainingparametersin.py b/src/mistralai/client/models/classifiertrainingparametersin.py deleted file mode 100644 index 85360a7e..00000000 --- a/src/mistralai/client/models/classifiertrainingparametersin.py +++ /dev/null @@ -1,92 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -# @generated-id: 4b33d5cf0345 - -from __future__ import annotations -from mistralai.client.types import ( - BaseModel, - Nullable, - OptionalNullable, - UNSET, - UNSET_SENTINEL, -) -from pydantic import model_serializer -from typing import Optional -from typing_extensions import NotRequired, TypedDict - - -class ClassifierTrainingParametersInTypedDict(TypedDict): - r"""The fine-tuning hyperparameter settings used in a classifier fine-tune job.""" - - training_steps: NotRequired[Nullable[int]] - r"""The number of training steps to perform. A training step refers to a single update of the model weights during the fine-tuning process. This update is typically calculated using a batch of samples from the training dataset.""" - learning_rate: NotRequired[float] - r"""A parameter describing how much to adjust the pre-trained model's weights in response to the estimated error each time the weights are updated during the fine-tuning process.""" - weight_decay: NotRequired[Nullable[float]] - r"""(Advanced Usage) Weight decay adds a term to the loss function that is proportional to the sum of the squared weights. This term reduces the magnitude of the weights and prevents them from growing too large.""" - warmup_fraction: NotRequired[Nullable[float]] - r"""(Advanced Usage) A parameter that specifies the percentage of the total training steps at which the learning rate warm-up phase ends. During this phase, the learning rate gradually increases from a small value to the initial learning rate, helping to stabilize the training process and improve convergence. Similar to `pct_start` in [mistral-finetune](https://github.com/mistralai/mistral-finetune)""" - epochs: NotRequired[Nullable[float]] - seq_len: NotRequired[Nullable[int]] - - -class ClassifierTrainingParametersIn(BaseModel): - r"""The fine-tuning hyperparameter settings used in a classifier fine-tune job.""" - - training_steps: OptionalNullable[int] = UNSET - r"""The number of training steps to perform. A training step refers to a single update of the model weights during the fine-tuning process. This update is typically calculated using a batch of samples from the training dataset.""" - - learning_rate: Optional[float] = 0.0001 - r"""A parameter describing how much to adjust the pre-trained model's weights in response to the estimated error each time the weights are updated during the fine-tuning process.""" - - weight_decay: OptionalNullable[float] = UNSET - r"""(Advanced Usage) Weight decay adds a term to the loss function that is proportional to the sum of the squared weights. This term reduces the magnitude of the weights and prevents them from growing too large.""" - - warmup_fraction: OptionalNullable[float] = UNSET - r"""(Advanced Usage) A parameter that specifies the percentage of the total training steps at which the learning rate warm-up phase ends. During this phase, the learning rate gradually increases from a small value to the initial learning rate, helping to stabilize the training process and improve convergence. Similar to `pct_start` in [mistral-finetune](https://github.com/mistralai/mistral-finetune)""" - - epochs: OptionalNullable[float] = UNSET - - seq_len: OptionalNullable[int] = UNSET - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = [ - "training_steps", - "learning_rate", - "weight_decay", - "warmup_fraction", - "epochs", - "seq_len", - ] - nullable_fields = [ - "training_steps", - "weight_decay", - "warmup_fraction", - "epochs", - "seq_len", - ] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/client/models/codeinterpretertool.py b/src/mistralai/client/models/codeinterpretertool.py index f69c7a57..ce14265f 100644 --- a/src/mistralai/client/models/codeinterpretertool.py +++ b/src/mistralai/client/models/codeinterpretertool.py @@ -2,23 +2,65 @@ # @generated-id: 950cd8f4ad49 from __future__ import annotations -from mistralai.client.types import BaseModel +from .toolconfiguration import ToolConfiguration, ToolConfigurationTypedDict +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) from mistralai.client.utils import validate_const import pydantic +from pydantic import model_serializer from pydantic.functional_validators import AfterValidator from typing import Literal -from typing_extensions import Annotated, TypedDict +from typing_extensions import Annotated, NotRequired, TypedDict class CodeInterpreterToolTypedDict(TypedDict): + tool_configuration: NotRequired[Nullable[ToolConfigurationTypedDict]] type: Literal["code_interpreter"] class CodeInterpreterTool(BaseModel): - TYPE: Annotated[ + tool_configuration: OptionalNullable[ToolConfiguration] = UNSET + + type: Annotated[ Annotated[ Literal["code_interpreter"], AfterValidator(validate_const("code_interpreter")), ], pydantic.Field(alias="type"), ] = "code_interpreter" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["tool_configuration"]) + nullable_fields = set(["tool_configuration"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m + + +try: + CodeInterpreterTool.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/completionargs.py b/src/mistralai/client/models/completionargs.py index 918832ac..ab5cf5ff 100644 --- a/src/mistralai/client/models/completionargs.py +++ b/src/mistralai/client/models/completionargs.py @@ -58,51 +58,50 @@ class CompletionArgs(BaseModel): @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = [ - "stop", - "presence_penalty", - "frequency_penalty", - "temperature", - "top_p", - "max_tokens", - "random_seed", - "prediction", - "response_format", - "tool_choice", - ] - nullable_fields = [ - "stop", - "presence_penalty", - "frequency_penalty", - "temperature", - "top_p", - "max_tokens", - "random_seed", - "prediction", - "response_format", - ] - null_default_fields = [] - + optional_fields = set( + [ + "stop", + "presence_penalty", + "frequency_penalty", + "temperature", + "top_p", + "max_tokens", + "random_seed", + "prediction", + "response_format", + "tool_choice", + ] + ) + nullable_fields = set( + [ + "stop", + "presence_penalty", + "frequency_penalty", + "temperature", + "top_p", + "max_tokens", + "random_seed", + "prediction", + "response_format", + ] + ) serialized = handler(self) - m = {} for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val return m diff --git a/src/mistralai/client/models/completionchunk.py b/src/mistralai/client/models/completionchunk.py index 67f447d0..5fd6c173 100644 --- a/src/mistralai/client/models/completionchunk.py +++ b/src/mistralai/client/models/completionchunk.py @@ -7,7 +7,8 @@ CompletionResponseStreamChoiceTypedDict, ) from .usageinfo import UsageInfo, UsageInfoTypedDict -from mistralai.client.types import BaseModel +from mistralai.client.types import BaseModel, UNSET_SENTINEL +from pydantic import model_serializer from typing import List, Optional from typing_extensions import NotRequired, TypedDict @@ -33,3 +34,19 @@ class CompletionChunk(BaseModel): created: Optional[int] = None usage: Optional[UsageInfo] = None + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["object", "created", "usage"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m diff --git a/src/mistralai/client/models/completiondetailedjobout.py b/src/mistralai/client/models/completiondetailedjobout.py deleted file mode 100644 index cd3a86ee..00000000 --- a/src/mistralai/client/models/completiondetailedjobout.py +++ /dev/null @@ -1,176 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -# @generated-id: 9bc38dcfbddf - -from __future__ import annotations -from .checkpointout import CheckpointOut, CheckpointOutTypedDict -from .completiontrainingparameters import ( - CompletionTrainingParameters, - CompletionTrainingParametersTypedDict, -) -from .eventout import EventOut, EventOutTypedDict -from .githubrepositoryout import GithubRepositoryOut, GithubRepositoryOutTypedDict -from .jobmetadataout import JobMetadataOut, JobMetadataOutTypedDict -from .wandbintegrationout import WandbIntegrationOut, WandbIntegrationOutTypedDict -from mistralai.client.types import ( - BaseModel, - Nullable, - OptionalNullable, - UNSET, - UNSET_SENTINEL, - UnrecognizedStr, -) -from mistralai.client.utils import validate_const -import pydantic -from pydantic import model_serializer -from pydantic.functional_validators import AfterValidator -from typing import List, Literal, Optional, Union -from typing_extensions import Annotated, NotRequired, TypedDict - - -CompletionDetailedJobOutStatus = Union[ - Literal[ - "QUEUED", - "STARTED", - "VALIDATING", - "VALIDATED", - "RUNNING", - "FAILED_VALIDATION", - "FAILED", - "SUCCESS", - "CANCELLED", - "CANCELLATION_REQUESTED", - ], - UnrecognizedStr, -] - - -CompletionDetailedJobOutIntegrationTypedDict = WandbIntegrationOutTypedDict - - -CompletionDetailedJobOutIntegration = WandbIntegrationOut - - -CompletionDetailedJobOutRepositoryTypedDict = GithubRepositoryOutTypedDict - - -CompletionDetailedJobOutRepository = GithubRepositoryOut - - -class CompletionDetailedJobOutTypedDict(TypedDict): - id: str - auto_start: bool - model: str - status: CompletionDetailedJobOutStatus - created_at: int - modified_at: int - training_files: List[str] - hyperparameters: CompletionTrainingParametersTypedDict - validation_files: NotRequired[Nullable[List[str]]] - object: Literal["job"] - fine_tuned_model: NotRequired[Nullable[str]] - suffix: NotRequired[Nullable[str]] - integrations: NotRequired[ - Nullable[List[CompletionDetailedJobOutIntegrationTypedDict]] - ] - trained_tokens: NotRequired[Nullable[int]] - metadata: NotRequired[Nullable[JobMetadataOutTypedDict]] - job_type: Literal["completion"] - repositories: NotRequired[List[CompletionDetailedJobOutRepositoryTypedDict]] - events: NotRequired[List[EventOutTypedDict]] - r"""Event items are created every time the status of a fine-tuning job changes. The timestamped list of all events is accessible here.""" - checkpoints: NotRequired[List[CheckpointOutTypedDict]] - - -class CompletionDetailedJobOut(BaseModel): - id: str - - auto_start: bool - - model: str - - status: CompletionDetailedJobOutStatus - - created_at: int - - modified_at: int - - training_files: List[str] - - hyperparameters: CompletionTrainingParameters - - validation_files: OptionalNullable[List[str]] = UNSET - - OBJECT: Annotated[ - Annotated[Optional[Literal["job"]], AfterValidator(validate_const("job"))], - pydantic.Field(alias="object"), - ] = "job" - - fine_tuned_model: OptionalNullable[str] = UNSET - - suffix: OptionalNullable[str] = UNSET - - integrations: OptionalNullable[List[CompletionDetailedJobOutIntegration]] = UNSET - - trained_tokens: OptionalNullable[int] = UNSET - - metadata: OptionalNullable[JobMetadataOut] = UNSET - - JOB_TYPE: Annotated[ - Annotated[Literal["completion"], AfterValidator(validate_const("completion"))], - pydantic.Field(alias="job_type"), - ] = "completion" - - repositories: Optional[List[CompletionDetailedJobOutRepository]] = None - - events: Optional[List[EventOut]] = None - r"""Event items are created every time the status of a fine-tuning job changes. The timestamped list of all events is accessible here.""" - - checkpoints: Optional[List[CheckpointOut]] = None - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = [ - "validation_files", - "object", - "fine_tuned_model", - "suffix", - "integrations", - "trained_tokens", - "metadata", - "repositories", - "events", - "checkpoints", - ] - nullable_fields = [ - "validation_files", - "fine_tuned_model", - "suffix", - "integrations", - "trained_tokens", - "metadata", - ] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/client/models/completionftmodelout.py b/src/mistralai/client/models/completionfinetunedmodel.py similarity index 60% rename from src/mistralai/client/models/completionftmodelout.py rename to src/mistralai/client/models/completionfinetunedmodel.py index 7ecbf54a..54a1c165 100644 --- a/src/mistralai/client/models/completionftmodelout.py +++ b/src/mistralai/client/models/completionfinetunedmodel.py @@ -1,10 +1,10 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -# @generated-id: 0f5277833b3e +# @generated-id: f08c10d149f5 from __future__ import annotations -from .ftmodelcapabilitiesout import ( - FTModelCapabilitiesOut, - FTModelCapabilitiesOutTypedDict, +from .finetunedmodelcapabilities import ( + FineTunedModelCapabilities, + FineTunedModelCapabilitiesTypedDict, ) from mistralai.client.types import ( BaseModel, @@ -21,7 +21,7 @@ from typing_extensions import Annotated, NotRequired, TypedDict -class CompletionFTModelOutTypedDict(TypedDict): +class CompletionFineTunedModelTypedDict(TypedDict): id: str created: int owned_by: str @@ -29,7 +29,7 @@ class CompletionFTModelOutTypedDict(TypedDict): root: str root_version: str archived: bool - capabilities: FTModelCapabilitiesOutTypedDict + capabilities: FineTunedModelCapabilitiesTypedDict job: str object: Literal["model"] name: NotRequired[Nullable[str]] @@ -39,7 +39,7 @@ class CompletionFTModelOutTypedDict(TypedDict): model_type: Literal["completion"] -class CompletionFTModelOut(BaseModel): +class CompletionFineTunedModel(BaseModel): id: str created: int @@ -54,11 +54,11 @@ class CompletionFTModelOut(BaseModel): archived: bool - capabilities: FTModelCapabilitiesOut + capabilities: FineTunedModelCapabilities job: str - OBJECT: Annotated[ + object: Annotated[ Annotated[Optional[Literal["model"]], AfterValidator(validate_const("model"))], pydantic.Field(alias="object"), ] = "model" @@ -71,43 +71,40 @@ class CompletionFTModelOut(BaseModel): aliases: Optional[List[str]] = None - MODEL_TYPE: Annotated[ + model_type: Annotated[ Annotated[Literal["completion"], AfterValidator(validate_const("completion"))], pydantic.Field(alias="model_type"), ] = "completion" @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = [ - "object", - "name", - "description", - "max_context_length", - "aliases", - ] - nullable_fields = ["name", "description"] - null_default_fields = [] - + optional_fields = set( + ["object", "name", "description", "max_context_length", "aliases"] + ) + nullable_fields = set(["name", "description"]) serialized = handler(self) - m = {} for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) - serialized.pop(k, None) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member + return m - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - return m +try: + CompletionFineTunedModel.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/completionjobout.py b/src/mistralai/client/models/completionfinetuningjob.py similarity index 56% rename from src/mistralai/client/models/completionjobout.py rename to src/mistralai/client/models/completionfinetuningjob.py index 42e5f6c6..1bf0a730 100644 --- a/src/mistralai/client/models/completionjobout.py +++ b/src/mistralai/client/models/completionfinetuningjob.py @@ -1,14 +1,17 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -# @generated-id: 712e6c524f9a +# @generated-id: c242237efe9b from __future__ import annotations from .completiontrainingparameters import ( CompletionTrainingParameters, CompletionTrainingParametersTypedDict, ) -from .githubrepositoryout import GithubRepositoryOut, GithubRepositoryOutTypedDict -from .jobmetadataout import JobMetadataOut, JobMetadataOutTypedDict -from .wandbintegrationout import WandbIntegrationOut, WandbIntegrationOutTypedDict +from .githubrepository import GithubRepository, GithubRepositoryTypedDict +from .jobmetadata import JobMetadata, JobMetadataTypedDict +from .wandbintegrationresult import ( + WandbIntegrationResult, + WandbIntegrationResultTypedDict, +) from mistralai.client.types import ( BaseModel, Nullable, @@ -19,13 +22,13 @@ ) from mistralai.client.utils import validate_const import pydantic -from pydantic import model_serializer +from pydantic import ConfigDict, model_serializer from pydantic.functional_validators import AfterValidator -from typing import List, Literal, Optional, Union +from typing import Any, List, Literal, Optional, Union from typing_extensions import Annotated, NotRequired, TypedDict -CompletionJobOutStatus = Union[ +CompletionFineTuningJobStatus = Union[ Literal[ "QUEUED", "STARTED", @@ -43,24 +46,54 @@ r"""The current status of the fine-tuning job.""" -CompletionJobOutIntegrationTypedDict = WandbIntegrationOutTypedDict +CompletionFineTuningJobIntegrationTypedDict = WandbIntegrationResultTypedDict + + +class UnknownCompletionFineTuningJobIntegration(BaseModel): + r"""A CompletionFineTuningJobIntegration variant the SDK doesn't recognize. Preserves the raw payload.""" + + type: Literal["UNKNOWN"] = "UNKNOWN" + raw: Any + is_unknown: Literal[True] = True + + model_config = ConfigDict(frozen=True) + + +_COMPLETION_FINE_TUNING_JOB_INTEGRATION_VARIANTS: dict[str, Any] = { + "wandb": WandbIntegrationResult, +} + + +CompletionFineTuningJobIntegration = WandbIntegrationResult -CompletionJobOutIntegration = WandbIntegrationOut +CompletionFineTuningJobRepositoryTypedDict = GithubRepositoryTypedDict -CompletionJobOutRepositoryTypedDict = GithubRepositoryOutTypedDict +class UnknownCompletionFineTuningJobRepository(BaseModel): + r"""A CompletionFineTuningJobRepository variant the SDK doesn't recognize. Preserves the raw payload.""" + type: Literal["UNKNOWN"] = "UNKNOWN" + raw: Any + is_unknown: Literal[True] = True -CompletionJobOutRepository = GithubRepositoryOut + model_config = ConfigDict(frozen=True) -class CompletionJobOutTypedDict(TypedDict): +_COMPLETION_FINE_TUNING_JOB_REPOSITORY_VARIANTS: dict[str, Any] = { + "github": GithubRepository, +} + + +CompletionFineTuningJobRepository = GithubRepository + + +class CompletionFineTuningJobTypedDict(TypedDict): id: str r"""The ID of the job.""" auto_start: bool model: str - status: CompletionJobOutStatus + status: CompletionFineTuningJobStatus r"""The current status of the fine-tuning job.""" created_at: int r"""The UNIX timestamp (in seconds) for when the fine-tuning job was created.""" @@ -77,17 +110,19 @@ class CompletionJobOutTypedDict(TypedDict): r"""The name of the fine-tuned model that is being created. The value will be `null` if the fine-tuning job is still running.""" suffix: NotRequired[Nullable[str]] r"""Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`.""" - integrations: NotRequired[Nullable[List[CompletionJobOutIntegrationTypedDict]]] + integrations: NotRequired[ + Nullable[List[CompletionFineTuningJobIntegrationTypedDict]] + ] r"""A list of integrations enabled for your fine-tuning job.""" trained_tokens: NotRequired[Nullable[int]] r"""Total number of tokens trained.""" - metadata: NotRequired[Nullable[JobMetadataOutTypedDict]] + metadata: NotRequired[Nullable[JobMetadataTypedDict]] job_type: Literal["completion"] r"""The type of job (`FT` for fine-tuning).""" - repositories: NotRequired[List[CompletionJobOutRepositoryTypedDict]] + repositories: NotRequired[List[CompletionFineTuningJobRepositoryTypedDict]] -class CompletionJobOut(BaseModel): +class CompletionFineTuningJob(BaseModel): id: str r"""The ID of the job.""" @@ -95,7 +130,7 @@ class CompletionJobOut(BaseModel): model: str - status: CompletionJobOutStatus + status: CompletionFineTuningJobStatus r"""The current status of the fine-tuning job.""" created_at: int @@ -112,7 +147,7 @@ class CompletionJobOut(BaseModel): validation_files: OptionalNullable[List[str]] = UNSET r"""A list containing the IDs of uploaded files that contain validation data.""" - OBJECT: Annotated[ + object: Annotated[ Annotated[Optional[Literal["job"]], AfterValidator(validate_const("job"))], pydantic.Field(alias="object"), ] = "job" @@ -124,64 +159,69 @@ class CompletionJobOut(BaseModel): suffix: OptionalNullable[str] = UNSET r"""Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`.""" - integrations: OptionalNullable[List[CompletionJobOutIntegration]] = UNSET + integrations: OptionalNullable[List[CompletionFineTuningJobIntegration]] = UNSET r"""A list of integrations enabled for your fine-tuning job.""" trained_tokens: OptionalNullable[int] = UNSET r"""Total number of tokens trained.""" - metadata: OptionalNullable[JobMetadataOut] = UNSET + metadata: OptionalNullable[JobMetadata] = UNSET - JOB_TYPE: Annotated[ + job_type: Annotated[ Annotated[Literal["completion"], AfterValidator(validate_const("completion"))], pydantic.Field(alias="job_type"), ] = "completion" r"""The type of job (`FT` for fine-tuning).""" - repositories: Optional[List[CompletionJobOutRepository]] = None + repositories: Optional[List[CompletionFineTuningJobRepository]] = None @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = [ - "validation_files", - "object", - "fine_tuned_model", - "suffix", - "integrations", - "trained_tokens", - "metadata", - "repositories", - ] - nullable_fields = [ - "validation_files", - "fine_tuned_model", - "suffix", - "integrations", - "trained_tokens", - "metadata", - ] - null_default_fields = [] - + optional_fields = set( + [ + "validation_files", + "object", + "fine_tuned_model", + "suffix", + "integrations", + "trained_tokens", + "metadata", + "repositories", + ] + ) + nullable_fields = set( + [ + "validation_files", + "fine_tuned_model", + "suffix", + "integrations", + "trained_tokens", + "metadata", + ] + ) serialized = handler(self) - m = {} for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) - serialized.pop(k, None) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member + return m - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - return m +try: + CompletionFineTuningJob.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/completionfinetuningjobdetails.py b/src/mistralai/client/models/completionfinetuningjobdetails.py new file mode 100644 index 00000000..cb787021 --- /dev/null +++ b/src/mistralai/client/models/completionfinetuningjobdetails.py @@ -0,0 +1,216 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: e8379265af48 + +from __future__ import annotations +from .checkpoint import Checkpoint, CheckpointTypedDict +from .completiontrainingparameters import ( + CompletionTrainingParameters, + CompletionTrainingParametersTypedDict, +) +from .event import Event, EventTypedDict +from .githubrepository import GithubRepository, GithubRepositoryTypedDict +from .jobmetadata import JobMetadata, JobMetadataTypedDict +from .wandbintegrationresult import ( + WandbIntegrationResult, + WandbIntegrationResultTypedDict, +) +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, + UnrecognizedStr, +) +from mistralai.client.utils import validate_const +import pydantic +from pydantic import ConfigDict, model_serializer +from pydantic.functional_validators import AfterValidator +from typing import Any, List, Literal, Optional, Union +from typing_extensions import Annotated, NotRequired, TypedDict + + +CompletionFineTuningJobDetailsStatus = Union[ + Literal[ + "QUEUED", + "STARTED", + "VALIDATING", + "VALIDATED", + "RUNNING", + "FAILED_VALIDATION", + "FAILED", + "SUCCESS", + "CANCELLED", + "CANCELLATION_REQUESTED", + ], + UnrecognizedStr, +] + + +CompletionFineTuningJobDetailsIntegrationTypedDict = WandbIntegrationResultTypedDict + + +class UnknownCompletionFineTuningJobDetailsIntegration(BaseModel): + r"""A CompletionFineTuningJobDetailsIntegration variant the SDK doesn't recognize. Preserves the raw payload.""" + + type: Literal["UNKNOWN"] = "UNKNOWN" + raw: Any + is_unknown: Literal[True] = True + + model_config = ConfigDict(frozen=True) + + +_COMPLETION_FINE_TUNING_JOB_DETAILS_INTEGRATION_VARIANTS: dict[str, Any] = { + "wandb": WandbIntegrationResult, +} + + +CompletionFineTuningJobDetailsIntegration = WandbIntegrationResult + + +CompletionFineTuningJobDetailsRepositoryTypedDict = GithubRepositoryTypedDict + + +class UnknownCompletionFineTuningJobDetailsRepository(BaseModel): + r"""A CompletionFineTuningJobDetailsRepository variant the SDK doesn't recognize. Preserves the raw payload.""" + + type: Literal["UNKNOWN"] = "UNKNOWN" + raw: Any + is_unknown: Literal[True] = True + + model_config = ConfigDict(frozen=True) + + +_COMPLETION_FINE_TUNING_JOB_DETAILS_REPOSITORY_VARIANTS: dict[str, Any] = { + "github": GithubRepository, +} + + +CompletionFineTuningJobDetailsRepository = GithubRepository + + +class CompletionFineTuningJobDetailsTypedDict(TypedDict): + id: str + auto_start: bool + model: str + status: CompletionFineTuningJobDetailsStatus + created_at: int + modified_at: int + training_files: List[str] + hyperparameters: CompletionTrainingParametersTypedDict + validation_files: NotRequired[Nullable[List[str]]] + object: Literal["job"] + fine_tuned_model: NotRequired[Nullable[str]] + suffix: NotRequired[Nullable[str]] + integrations: NotRequired[ + Nullable[List[CompletionFineTuningJobDetailsIntegrationTypedDict]] + ] + trained_tokens: NotRequired[Nullable[int]] + metadata: NotRequired[Nullable[JobMetadataTypedDict]] + job_type: Literal["completion"] + repositories: NotRequired[List[CompletionFineTuningJobDetailsRepositoryTypedDict]] + events: NotRequired[List[EventTypedDict]] + r"""Event items are created every time the status of a fine-tuning job changes. The timestamped list of all events is accessible here.""" + checkpoints: NotRequired[List[CheckpointTypedDict]] + + +class CompletionFineTuningJobDetails(BaseModel): + id: str + + auto_start: bool + + model: str + + status: CompletionFineTuningJobDetailsStatus + + created_at: int + + modified_at: int + + training_files: List[str] + + hyperparameters: CompletionTrainingParameters + + validation_files: OptionalNullable[List[str]] = UNSET + + object: Annotated[ + Annotated[Optional[Literal["job"]], AfterValidator(validate_const("job"))], + pydantic.Field(alias="object"), + ] = "job" + + fine_tuned_model: OptionalNullable[str] = UNSET + + suffix: OptionalNullable[str] = UNSET + + integrations: OptionalNullable[List[CompletionFineTuningJobDetailsIntegration]] = ( + UNSET + ) + + trained_tokens: OptionalNullable[int] = UNSET + + metadata: OptionalNullable[JobMetadata] = UNSET + + job_type: Annotated[ + Annotated[Literal["completion"], AfterValidator(validate_const("completion"))], + pydantic.Field(alias="job_type"), + ] = "completion" + + repositories: Optional[List[CompletionFineTuningJobDetailsRepository]] = None + + events: Optional[List[Event]] = None + r"""Event items are created every time the status of a fine-tuning job changes. The timestamped list of all events is accessible here.""" + + checkpoints: Optional[List[Checkpoint]] = None + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set( + [ + "validation_files", + "object", + "fine_tuned_model", + "suffix", + "integrations", + "trained_tokens", + "metadata", + "repositories", + "events", + "checkpoints", + ] + ) + nullable_fields = set( + [ + "validation_files", + "fine_tuned_model", + "suffix", + "integrations", + "trained_tokens", + "metadata", + ] + ) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m + + +try: + CompletionFineTuningJobDetails.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/completionresponsestreamchoice.py b/src/mistralai/client/models/completionresponsestreamchoice.py index 119a9690..a52ae892 100644 --- a/src/mistralai/client/models/completionresponsestreamchoice.py +++ b/src/mistralai/client/models/completionresponsestreamchoice.py @@ -35,30 +35,14 @@ class CompletionResponseStreamChoice(BaseModel): @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = [] - nullable_fields = ["finish_reason"] - null_default_fields = [] - serialized = handler(self) - m = {} for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) - serialized.pop(k, None) - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): + if val != UNSET_SENTINEL: m[k] = val return m diff --git a/src/mistralai/client/models/completiontrainingparameters.py b/src/mistralai/client/models/completiontrainingparameters.py index 4b846b1b..ca50a7ad 100644 --- a/src/mistralai/client/models/completiontrainingparameters.py +++ b/src/mistralai/client/models/completiontrainingparameters.py @@ -41,45 +41,44 @@ class CompletionTrainingParameters(BaseModel): @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = [ - "training_steps", - "learning_rate", - "weight_decay", - "warmup_fraction", - "epochs", - "seq_len", - "fim_ratio", - ] - nullable_fields = [ - "training_steps", - "weight_decay", - "warmup_fraction", - "epochs", - "seq_len", - "fim_ratio", - ] - null_default_fields = [] - + optional_fields = set( + [ + "training_steps", + "learning_rate", + "weight_decay", + "warmup_fraction", + "epochs", + "seq_len", + "fim_ratio", + ] + ) + nullable_fields = set( + [ + "training_steps", + "weight_decay", + "warmup_fraction", + "epochs", + "seq_len", + "fim_ratio", + ] + ) serialized = handler(self) - m = {} for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val return m diff --git a/src/mistralai/client/models/completiontrainingparametersin.py b/src/mistralai/client/models/completiontrainingparametersin.py deleted file mode 100644 index 20b74ad9..00000000 --- a/src/mistralai/client/models/completiontrainingparametersin.py +++ /dev/null @@ -1,97 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -# @generated-id: 0df22b873b5f - -from __future__ import annotations -from mistralai.client.types import ( - BaseModel, - Nullable, - OptionalNullable, - UNSET, - UNSET_SENTINEL, -) -from pydantic import model_serializer -from typing import Optional -from typing_extensions import NotRequired, TypedDict - - -class CompletionTrainingParametersInTypedDict(TypedDict): - r"""The fine-tuning hyperparameter settings used in a fine-tune job.""" - - training_steps: NotRequired[Nullable[int]] - r"""The number of training steps to perform. A training step refers to a single update of the model weights during the fine-tuning process. This update is typically calculated using a batch of samples from the training dataset.""" - learning_rate: NotRequired[float] - r"""A parameter describing how much to adjust the pre-trained model's weights in response to the estimated error each time the weights are updated during the fine-tuning process.""" - weight_decay: NotRequired[Nullable[float]] - r"""(Advanced Usage) Weight decay adds a term to the loss function that is proportional to the sum of the squared weights. This term reduces the magnitude of the weights and prevents them from growing too large.""" - warmup_fraction: NotRequired[Nullable[float]] - r"""(Advanced Usage) A parameter that specifies the percentage of the total training steps at which the learning rate warm-up phase ends. During this phase, the learning rate gradually increases from a small value to the initial learning rate, helping to stabilize the training process and improve convergence. Similar to `pct_start` in [mistral-finetune](https://github.com/mistralai/mistral-finetune)""" - epochs: NotRequired[Nullable[float]] - seq_len: NotRequired[Nullable[int]] - fim_ratio: NotRequired[Nullable[float]] - - -class CompletionTrainingParametersIn(BaseModel): - r"""The fine-tuning hyperparameter settings used in a fine-tune job.""" - - training_steps: OptionalNullable[int] = UNSET - r"""The number of training steps to perform. A training step refers to a single update of the model weights during the fine-tuning process. This update is typically calculated using a batch of samples from the training dataset.""" - - learning_rate: Optional[float] = 0.0001 - r"""A parameter describing how much to adjust the pre-trained model's weights in response to the estimated error each time the weights are updated during the fine-tuning process.""" - - weight_decay: OptionalNullable[float] = UNSET - r"""(Advanced Usage) Weight decay adds a term to the loss function that is proportional to the sum of the squared weights. This term reduces the magnitude of the weights and prevents them from growing too large.""" - - warmup_fraction: OptionalNullable[float] = UNSET - r"""(Advanced Usage) A parameter that specifies the percentage of the total training steps at which the learning rate warm-up phase ends. During this phase, the learning rate gradually increases from a small value to the initial learning rate, helping to stabilize the training process and improve convergence. Similar to `pct_start` in [mistral-finetune](https://github.com/mistralai/mistral-finetune)""" - - epochs: OptionalNullable[float] = UNSET - - seq_len: OptionalNullable[int] = UNSET - - fim_ratio: OptionalNullable[float] = UNSET - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = [ - "training_steps", - "learning_rate", - "weight_decay", - "warmup_fraction", - "epochs", - "seq_len", - "fim_ratio", - ] - nullable_fields = [ - "training_steps", - "weight_decay", - "warmup_fraction", - "epochs", - "seq_len", - "fim_ratio", - ] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/client/models/contentchunk.py b/src/mistralai/client/models/contentchunk.py index eff4b8c6..e3de7591 100644 --- a/src/mistralai/client/models/contentchunk.py +++ b/src/mistralai/client/models/contentchunk.py @@ -9,9 +9,12 @@ from .referencechunk import ReferenceChunk, ReferenceChunkTypedDict from .textchunk import TextChunk, TextChunkTypedDict from .thinkchunk import ThinkChunk, ThinkChunkTypedDict -from mistralai.client.utils import get_discriminator -from pydantic import Discriminator, Tag -from typing import Union +from functools import partial +from mistralai.client.types import BaseModel +from mistralai.client.utils.unions import parse_open_union +from pydantic import ConfigDict +from pydantic.functional_validators import BeforeValidator +from typing import Any, Literal, Union from typing_extensions import Annotated, TypeAliasType @@ -29,15 +32,45 @@ ) +class UnknownContentChunk(BaseModel): + r"""A ContentChunk variant the SDK doesn't recognize. Preserves the raw payload.""" + + type: Literal["UNKNOWN"] = "UNKNOWN" + raw: Any + is_unknown: Literal[True] = True + + model_config = ConfigDict(frozen=True) + + +_CONTENT_CHUNK_VARIANTS: dict[str, Any] = { + "image_url": ImageURLChunk, + "document_url": DocumentURLChunk, + "text": TextChunk, + "reference": ReferenceChunk, + "file": FileChunk, + "thinking": ThinkChunk, + "input_audio": AudioChunk, +} + + ContentChunk = Annotated[ Union[ - Annotated[ImageURLChunk, Tag("image_url")], - Annotated[DocumentURLChunk, Tag("document_url")], - Annotated[TextChunk, Tag("text")], - Annotated[ReferenceChunk, Tag("reference")], - Annotated[FileChunk, Tag("file")], - Annotated[ThinkChunk, Tag("thinking")], - Annotated[AudioChunk, Tag("input_audio")], + ImageURLChunk, + DocumentURLChunk, + TextChunk, + ReferenceChunk, + FileChunk, + ThinkChunk, + AudioChunk, + UnknownContentChunk, ], - Discriminator(lambda m: get_discriminator(m, "type", "type")), + BeforeValidator( + partial( + parse_open_union, + disc_key="type", + variants=_CONTENT_CHUNK_VARIANTS, + unknown_cls=UnknownContentChunk, + union_name="ContentChunk", + ) + ), ] diff --git a/src/mistralai/client/models/conversationappendrequest.py b/src/mistralai/client/models/conversationappendrequest.py index 0f07475e..386714fd 100644 --- a/src/mistralai/client/models/conversationappendrequest.py +++ b/src/mistralai/client/models/conversationappendrequest.py @@ -4,8 +4,16 @@ from __future__ import annotations from .completionargs import CompletionArgs, CompletionArgsTypedDict from .conversationinputs import ConversationInputs, ConversationInputsTypedDict -from mistralai.client.types import BaseModel -from typing import Literal, Optional +from .toolcallconfirmation import ToolCallConfirmation, ToolCallConfirmationTypedDict +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing import List, Literal, Optional from typing_extensions import NotRequired, TypedDict @@ -16,17 +24,18 @@ class ConversationAppendRequestTypedDict(TypedDict): - inputs: ConversationInputsTypedDict + inputs: NotRequired[ConversationInputsTypedDict] stream: NotRequired[bool] store: NotRequired[bool] r"""Whether to store the results into our servers or not.""" handoff_execution: NotRequired[ConversationAppendRequestHandoffExecution] completion_args: NotRequired[CompletionArgsTypedDict] r"""White-listed arguments from the completion API""" + tool_confirmations: NotRequired[Nullable[List[ToolCallConfirmationTypedDict]]] class ConversationAppendRequest(BaseModel): - inputs: ConversationInputs + inputs: Optional[ConversationInputs] = None stream: Optional[bool] = False @@ -37,3 +46,39 @@ class ConversationAppendRequest(BaseModel): completion_args: Optional[CompletionArgs] = None r"""White-listed arguments from the completion API""" + + tool_confirmations: OptionalNullable[List[ToolCallConfirmation]] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set( + [ + "inputs", + "stream", + "store", + "handoff_execution", + "completion_args", + "tool_confirmations", + ] + ) + nullable_fields = set(["tool_confirmations"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/conversationappendstreamrequest.py b/src/mistralai/client/models/conversationappendstreamrequest.py index a0d46f72..32f6b148 100644 --- a/src/mistralai/client/models/conversationappendstreamrequest.py +++ b/src/mistralai/client/models/conversationappendstreamrequest.py @@ -4,8 +4,16 @@ from __future__ import annotations from .completionargs import CompletionArgs, CompletionArgsTypedDict from .conversationinputs import ConversationInputs, ConversationInputsTypedDict -from mistralai.client.types import BaseModel -from typing import Literal, Optional +from .toolcallconfirmation import ToolCallConfirmation, ToolCallConfirmationTypedDict +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing import List, Literal, Optional from typing_extensions import NotRequired, TypedDict @@ -16,17 +24,18 @@ class ConversationAppendStreamRequestTypedDict(TypedDict): - inputs: ConversationInputsTypedDict + inputs: NotRequired[ConversationInputsTypedDict] stream: NotRequired[bool] store: NotRequired[bool] r"""Whether to store the results into our servers or not.""" handoff_execution: NotRequired[ConversationAppendStreamRequestHandoffExecution] completion_args: NotRequired[CompletionArgsTypedDict] r"""White-listed arguments from the completion API""" + tool_confirmations: NotRequired[Nullable[List[ToolCallConfirmationTypedDict]]] class ConversationAppendStreamRequest(BaseModel): - inputs: ConversationInputs + inputs: Optional[ConversationInputs] = None stream: Optional[bool] = True @@ -39,3 +48,39 @@ class ConversationAppendStreamRequest(BaseModel): completion_args: Optional[CompletionArgs] = None r"""White-listed arguments from the completion API""" + + tool_confirmations: OptionalNullable[List[ToolCallConfirmation]] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set( + [ + "inputs", + "stream", + "store", + "handoff_execution", + "completion_args", + "tool_confirmations", + ] + ) + nullable_fields = set(["tool_confirmations"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/conversationevents.py b/src/mistralai/client/models/conversationevents.py index f2476038..17812983 100644 --- a/src/mistralai/client/models/conversationevents.py +++ b/src/mistralai/client/models/conversationevents.py @@ -25,9 +25,12 @@ ToolExecutionStartedEvent, ToolExecutionStartedEventTypedDict, ) +from functools import partial from mistralai.client.types import BaseModel -from pydantic import Field -from typing import Union +from mistralai.client.utils.unions import parse_open_union +from pydantic import ConfigDict +from pydantic.functional_validators import BeforeValidator +from typing import Any, Literal, Union from typing_extensions import Annotated, TypeAliasType, TypedDict @@ -37,17 +40,41 @@ ResponseStartedEventTypedDict, ResponseDoneEventTypedDict, ResponseErrorEventTypedDict, - ToolExecutionStartedEventTypedDict, ToolExecutionDeltaEventTypedDict, ToolExecutionDoneEventTypedDict, AgentHandoffStartedEventTypedDict, AgentHandoffDoneEventTypedDict, - FunctionCallEventTypedDict, + ToolExecutionStartedEventTypedDict, MessageOutputEventTypedDict, + FunctionCallEventTypedDict, ], ) +class UnknownConversationEventsData(BaseModel): + r"""A ConversationEventsData variant the SDK doesn't recognize. Preserves the raw payload.""" + + type: Literal["UNKNOWN"] = "UNKNOWN" + raw: Any + is_unknown: Literal[True] = True + + model_config = ConfigDict(frozen=True) + + +_CONVERSATION_EVENTS_DATA_VARIANTS: dict[str, Any] = { + "agent.handoff.done": AgentHandoffDoneEvent, + "agent.handoff.started": AgentHandoffStartedEvent, + "conversation.response.done": ResponseDoneEvent, + "conversation.response.error": ResponseErrorEvent, + "conversation.response.started": ResponseStartedEvent, + "function.call.delta": FunctionCallEvent, + "message.output.delta": MessageOutputEvent, + "tool.execution.delta": ToolExecutionDeltaEvent, + "tool.execution.done": ToolExecutionDoneEvent, + "tool.execution.started": ToolExecutionStartedEvent, +} + + ConversationEventsData = Annotated[ Union[ AgentHandoffDoneEvent, @@ -60,8 +87,17 @@ ToolExecutionDeltaEvent, ToolExecutionDoneEvent, ToolExecutionStartedEvent, + UnknownConversationEventsData, ], - Field(discriminator="TYPE"), + BeforeValidator( + partial( + parse_open_union, + disc_key="type", + variants=_CONVERSATION_EVENTS_DATA_VARIANTS, + unknown_cls=UnknownConversationEventsData, + union_name="ConversationEventsData", + ) + ), ] diff --git a/src/mistralai/client/models/conversationhistory.py b/src/mistralai/client/models/conversationhistory.py index 92d6cbf9..ceef115b 100644 --- a/src/mistralai/client/models/conversationhistory.py +++ b/src/mistralai/client/models/conversationhistory.py @@ -8,12 +8,13 @@ from .messageinputentry import MessageInputEntry, MessageInputEntryTypedDict from .messageoutputentry import MessageOutputEntry, MessageOutputEntryTypedDict from .toolexecutionentry import ToolExecutionEntry, ToolExecutionEntryTypedDict -from mistralai.client.types import BaseModel +from mistralai.client.types import BaseModel, UNSET_SENTINEL +from mistralai.client.utils import validate_const +import pydantic +from pydantic import model_serializer +from pydantic.functional_validators import AfterValidator from typing import List, Literal, Optional, Union -from typing_extensions import NotRequired, TypeAliasType, TypedDict - - -ConversationHistoryObject = Literal["conversation.history",] +from typing_extensions import Annotated, TypeAliasType, TypedDict EntryTypedDict = TypeAliasType( @@ -21,10 +22,10 @@ Union[ FunctionResultEntryTypedDict, MessageInputEntryTypedDict, - FunctionCallEntryTypedDict, - ToolExecutionEntryTypedDict, MessageOutputEntryTypedDict, AgentHandoffEntryTypedDict, + ToolExecutionEntryTypedDict, + FunctionCallEntryTypedDict, ], ) @@ -34,10 +35,10 @@ Union[ FunctionResultEntry, MessageInputEntry, - FunctionCallEntry, - ToolExecutionEntry, MessageOutputEntry, AgentHandoffEntry, + ToolExecutionEntry, + FunctionCallEntry, ], ) @@ -47,7 +48,7 @@ class ConversationHistoryTypedDict(TypedDict): conversation_id: str entries: List[EntryTypedDict] - object: NotRequired[ConversationHistoryObject] + object: Literal["conversation.history"] class ConversationHistory(BaseModel): @@ -57,4 +58,32 @@ class ConversationHistory(BaseModel): entries: List[Entry] - object: Optional[ConversationHistoryObject] = "conversation.history" + object: Annotated[ + Annotated[ + Optional[Literal["conversation.history"]], + AfterValidator(validate_const("conversation.history")), + ], + pydantic.Field(alias="object"), + ] = "conversation.history" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["object"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m + + +try: + ConversationHistory.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/conversationmessages.py b/src/mistralai/client/models/conversationmessages.py index 1aa294a4..84664b62 100644 --- a/src/mistralai/client/models/conversationmessages.py +++ b/src/mistralai/client/models/conversationmessages.py @@ -3,12 +3,13 @@ from __future__ import annotations from .messageentries import MessageEntries, MessageEntriesTypedDict -from mistralai.client.types import BaseModel +from mistralai.client.types import BaseModel, UNSET_SENTINEL +from mistralai.client.utils import validate_const +import pydantic +from pydantic import model_serializer +from pydantic.functional_validators import AfterValidator from typing import List, Literal, Optional -from typing_extensions import NotRequired, TypedDict - - -ConversationMessagesObject = Literal["conversation.messages",] +from typing_extensions import Annotated, TypedDict class ConversationMessagesTypedDict(TypedDict): @@ -16,7 +17,7 @@ class ConversationMessagesTypedDict(TypedDict): conversation_id: str messages: List[MessageEntriesTypedDict] - object: NotRequired[ConversationMessagesObject] + object: Literal["conversation.messages"] class ConversationMessages(BaseModel): @@ -26,4 +27,32 @@ class ConversationMessages(BaseModel): messages: List[MessageEntries] - object: Optional[ConversationMessagesObject] = "conversation.messages" + object: Annotated[ + Annotated[ + Optional[Literal["conversation.messages"]], + AfterValidator(validate_const("conversation.messages")), + ], + pydantic.Field(alias="object"), + ] = "conversation.messages" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["object"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m + + +try: + ConversationMessages.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/conversationrequest.py b/src/mistralai/client/models/conversationrequest.py index 2005be82..83d599eb 100644 --- a/src/mistralai/client/models/conversationrequest.py +++ b/src/mistralai/client/models/conversationrequest.py @@ -31,11 +31,11 @@ ConversationRequestToolTypedDict = TypeAliasType( "ConversationRequestToolTypedDict", Union[ + FunctionToolTypedDict, WebSearchToolTypedDict, WebSearchPremiumToolTypedDict, CodeInterpreterToolTypedDict, ImageGenerationToolTypedDict, - FunctionToolTypedDict, DocumentLibraryToolTypedDict, ], ) @@ -50,7 +50,7 @@ WebSearchTool, WebSearchPremiumTool, ], - Field(discriminator="TYPE"), + Field(discriminator="type"), ] @@ -111,54 +111,53 @@ class ConversationRequest(BaseModel): @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = [ - "stream", - "store", - "handoff_execution", - "instructions", - "tools", - "completion_args", - "name", - "description", - "metadata", - "agent_id", - "agent_version", - "model", - ] - nullable_fields = [ - "store", - "handoff_execution", - "instructions", - "completion_args", - "name", - "description", - "metadata", - "agent_id", - "agent_version", - "model", - ] - null_default_fields = [] - + optional_fields = set( + [ + "stream", + "store", + "handoff_execution", + "instructions", + "tools", + "completion_args", + "name", + "description", + "metadata", + "agent_id", + "agent_version", + "model", + ] + ) + nullable_fields = set( + [ + "store", + "handoff_execution", + "instructions", + "completion_args", + "name", + "description", + "metadata", + "agent_id", + "agent_version", + "model", + ] + ) serialized = handler(self) - m = {} for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val return m diff --git a/src/mistralai/client/models/conversationresponse.py b/src/mistralai/client/models/conversationresponse.py index 24598ef3..f6c10969 100644 --- a/src/mistralai/client/models/conversationresponse.py +++ b/src/mistralai/client/models/conversationresponse.py @@ -7,28 +7,29 @@ from .functioncallentry import FunctionCallEntry, FunctionCallEntryTypedDict from .messageoutputentry import MessageOutputEntry, MessageOutputEntryTypedDict from .toolexecutionentry import ToolExecutionEntry, ToolExecutionEntryTypedDict -from mistralai.client.types import BaseModel +from mistralai.client.types import BaseModel, UNSET_SENTINEL +from mistralai.client.utils import validate_const +import pydantic +from pydantic import model_serializer +from pydantic.functional_validators import AfterValidator from typing import List, Literal, Optional, Union -from typing_extensions import NotRequired, TypeAliasType, TypedDict - - -ConversationResponseObject = Literal["conversation.response",] +from typing_extensions import Annotated, TypeAliasType, TypedDict OutputTypedDict = TypeAliasType( "OutputTypedDict", Union[ - ToolExecutionEntryTypedDict, - FunctionCallEntryTypedDict, MessageOutputEntryTypedDict, AgentHandoffEntryTypedDict, + ToolExecutionEntryTypedDict, + FunctionCallEntryTypedDict, ], ) Output = TypeAliasType( "Output", - Union[ToolExecutionEntry, FunctionCallEntry, MessageOutputEntry, AgentHandoffEntry], + Union[MessageOutputEntry, AgentHandoffEntry, ToolExecutionEntry, FunctionCallEntry], ) @@ -38,7 +39,7 @@ class ConversationResponseTypedDict(TypedDict): conversation_id: str outputs: List[OutputTypedDict] usage: ConversationUsageInfoTypedDict - object: NotRequired[ConversationResponseObject] + object: Literal["conversation.response"] class ConversationResponse(BaseModel): @@ -50,4 +51,32 @@ class ConversationResponse(BaseModel): usage: ConversationUsageInfo - object: Optional[ConversationResponseObject] = "conversation.response" + object: Annotated[ + Annotated[ + Optional[Literal["conversation.response"]], + AfterValidator(validate_const("conversation.response")), + ], + pydantic.Field(alias="object"), + ] = "conversation.response" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["object"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m + + +try: + ConversationResponse.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/conversationrestartrequest.py b/src/mistralai/client/models/conversationrestartrequest.py index 35d30993..7ae16aff 100644 --- a/src/mistralai/client/models/conversationrestartrequest.py +++ b/src/mistralai/client/models/conversationrestartrequest.py @@ -37,8 +37,8 @@ class ConversationRestartRequestTypedDict(TypedDict): r"""Request to restart a new conversation from a given entry in the conversation.""" - inputs: ConversationInputsTypedDict from_entry_id: str + inputs: NotRequired[ConversationInputsTypedDict] stream: NotRequired[bool] store: NotRequired[bool] r"""Whether to store the results into our servers or not.""" @@ -56,10 +56,10 @@ class ConversationRestartRequestTypedDict(TypedDict): class ConversationRestartRequest(BaseModel): r"""Request to restart a new conversation from a given entry in the conversation.""" - inputs: ConversationInputs - from_entry_id: str + inputs: Optional[ConversationInputs] = None + stream: Optional[bool] = False store: Optional[bool] = True @@ -78,37 +78,35 @@ class ConversationRestartRequest(BaseModel): @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = [ - "stream", - "store", - "handoff_execution", - "completion_args", - "metadata", - "agent_version", - ] - nullable_fields = ["metadata", "agent_version"] - null_default_fields = [] - + optional_fields = set( + [ + "inputs", + "stream", + "store", + "handoff_execution", + "completion_args", + "metadata", + "agent_version", + ] + ) + nullable_fields = set(["metadata", "agent_version"]) serialized = handler(self) - m = {} for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val return m diff --git a/src/mistralai/client/models/conversationrestartstreamrequest.py b/src/mistralai/client/models/conversationrestartstreamrequest.py index 0ddfb130..0e247261 100644 --- a/src/mistralai/client/models/conversationrestartstreamrequest.py +++ b/src/mistralai/client/models/conversationrestartstreamrequest.py @@ -37,8 +37,8 @@ class ConversationRestartStreamRequestTypedDict(TypedDict): r"""Request to restart a new conversation from a given entry in the conversation.""" - inputs: ConversationInputsTypedDict from_entry_id: str + inputs: NotRequired[ConversationInputsTypedDict] stream: NotRequired[bool] store: NotRequired[bool] r"""Whether to store the results into our servers or not.""" @@ -56,10 +56,10 @@ class ConversationRestartStreamRequestTypedDict(TypedDict): class ConversationRestartStreamRequest(BaseModel): r"""Request to restart a new conversation from a given entry in the conversation.""" - inputs: ConversationInputs - from_entry_id: str + inputs: Optional[ConversationInputs] = None + stream: Optional[bool] = True store: Optional[bool] = True @@ -82,37 +82,35 @@ class ConversationRestartStreamRequest(BaseModel): @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = [ - "stream", - "store", - "handoff_execution", - "completion_args", - "metadata", - "agent_version", - ] - nullable_fields = ["metadata", "agent_version"] - null_default_fields = [] - + optional_fields = set( + [ + "inputs", + "stream", + "store", + "handoff_execution", + "completion_args", + "metadata", + "agent_version", + ] + ) + nullable_fields = set(["metadata", "agent_version"]) serialized = handler(self) - m = {} for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val return m diff --git a/src/mistralai/client/models/conversationstreamrequest.py b/src/mistralai/client/models/conversationstreamrequest.py index 379a8f28..a20dccae 100644 --- a/src/mistralai/client/models/conversationstreamrequest.py +++ b/src/mistralai/client/models/conversationstreamrequest.py @@ -31,11 +31,11 @@ ConversationStreamRequestToolTypedDict = TypeAliasType( "ConversationStreamRequestToolTypedDict", Union[ + FunctionToolTypedDict, WebSearchToolTypedDict, WebSearchPremiumToolTypedDict, CodeInterpreterToolTypedDict, ImageGenerationToolTypedDict, - FunctionToolTypedDict, DocumentLibraryToolTypedDict, ], ) @@ -50,7 +50,7 @@ WebSearchTool, WebSearchPremiumTool, ], - Field(discriminator="TYPE"), + Field(discriminator="type"), ] @@ -113,54 +113,53 @@ class ConversationStreamRequest(BaseModel): @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = [ - "stream", - "store", - "handoff_execution", - "instructions", - "tools", - "completion_args", - "name", - "description", - "metadata", - "agent_id", - "agent_version", - "model", - ] - nullable_fields = [ - "store", - "handoff_execution", - "instructions", - "completion_args", - "name", - "description", - "metadata", - "agent_id", - "agent_version", - "model", - ] - null_default_fields = [] - + optional_fields = set( + [ + "stream", + "store", + "handoff_execution", + "instructions", + "tools", + "completion_args", + "name", + "description", + "metadata", + "agent_id", + "agent_version", + "model", + ] + ) + nullable_fields = set( + [ + "store", + "handoff_execution", + "instructions", + "completion_args", + "name", + "description", + "metadata", + "agent_id", + "agent_version", + "model", + ] + ) serialized = handler(self) - m = {} for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val return m diff --git a/src/mistralai/client/models/conversationthinkchunk.py b/src/mistralai/client/models/conversationthinkchunk.py new file mode 100644 index 00000000..e0e172e3 --- /dev/null +++ b/src/mistralai/client/models/conversationthinkchunk.py @@ -0,0 +1,65 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 77e59cde5c0f + +from __future__ import annotations +from .textchunk import TextChunk, TextChunkTypedDict +from .toolreferencechunk import ToolReferenceChunk, ToolReferenceChunkTypedDict +from mistralai.client.types import BaseModel, UNSET_SENTINEL +from mistralai.client.utils import validate_const +import pydantic +from pydantic import model_serializer +from pydantic.functional_validators import AfterValidator +from typing import List, Literal, Optional, Union +from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict + + +ConversationThinkChunkThinkingTypedDict = TypeAliasType( + "ConversationThinkChunkThinkingTypedDict", + Union[TextChunkTypedDict, ToolReferenceChunkTypedDict], +) + + +ConversationThinkChunkThinking = TypeAliasType( + "ConversationThinkChunkThinking", Union[TextChunk, ToolReferenceChunk] +) + + +class ConversationThinkChunkTypedDict(TypedDict): + thinking: List[ConversationThinkChunkThinkingTypedDict] + type: Literal["thinking"] + closed: NotRequired[bool] + + +class ConversationThinkChunk(BaseModel): + thinking: List[ConversationThinkChunkThinking] + + type: Annotated[ + Annotated[ + Optional[Literal["thinking"]], AfterValidator(validate_const("thinking")) + ], + pydantic.Field(alias="type"), + ] = "thinking" + + closed: Optional[bool] = True + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["type", "closed"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m + + +try: + ConversationThinkChunk.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/conversationusageinfo.py b/src/mistralai/client/models/conversationusageinfo.py index 98db0f16..1e80f89e 100644 --- a/src/mistralai/client/models/conversationusageinfo.py +++ b/src/mistralai/client/models/conversationusageinfo.py @@ -35,36 +35,33 @@ class ConversationUsageInfo(BaseModel): @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = [ - "prompt_tokens", - "completion_tokens", - "total_tokens", - "connector_tokens", - "connectors", - ] - nullable_fields = ["connector_tokens", "connectors"] - null_default_fields = [] - + optional_fields = set( + [ + "prompt_tokens", + "completion_tokens", + "total_tokens", + "connector_tokens", + "connectors", + ] + ) + nullable_fields = set(["connector_tokens", "connectors"]) serialized = handler(self) - m = {} for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val return m diff --git a/src/mistralai/client/models/agentcreationrequest.py b/src/mistralai/client/models/createagentrequest.py similarity index 66% rename from src/mistralai/client/models/agentcreationrequest.py rename to src/mistralai/client/models/createagentrequest.py index 898d42a9..54b09880 100644 --- a/src/mistralai/client/models/agentcreationrequest.py +++ b/src/mistralai/client/models/createagentrequest.py @@ -1,5 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -# @generated-id: 35b7f4933b3e +# @generated-id: 442629bd914b from __future__ import annotations from .codeinterpretertool import CodeInterpreterTool, CodeInterpreterToolTypedDict @@ -21,20 +21,20 @@ from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict -AgentCreationRequestToolTypedDict = TypeAliasType( - "AgentCreationRequestToolTypedDict", +CreateAgentRequestToolTypedDict = TypeAliasType( + "CreateAgentRequestToolTypedDict", Union[ + FunctionToolTypedDict, WebSearchToolTypedDict, WebSearchPremiumToolTypedDict, CodeInterpreterToolTypedDict, ImageGenerationToolTypedDict, - FunctionToolTypedDict, DocumentLibraryToolTypedDict, ], ) -AgentCreationRequestTool = Annotated[ +CreateAgentRequestTool = Annotated[ Union[ CodeInterpreterTool, DocumentLibraryTool, @@ -43,16 +43,16 @@ WebSearchTool, WebSearchPremiumTool, ], - Field(discriminator="TYPE"), + Field(discriminator="type"), ] -class AgentCreationRequestTypedDict(TypedDict): +class CreateAgentRequestTypedDict(TypedDict): model: str name: str instructions: NotRequired[Nullable[str]] r"""Instruction prompt the model will follow during the conversation.""" - tools: NotRequired[List[AgentCreationRequestToolTypedDict]] + tools: NotRequired[List[CreateAgentRequestToolTypedDict]] r"""List of tools which are available to the model during the conversation.""" completion_args: NotRequired[CompletionArgsTypedDict] r"""White-listed arguments from the completion API""" @@ -62,7 +62,7 @@ class AgentCreationRequestTypedDict(TypedDict): version_message: NotRequired[Nullable[str]] -class AgentCreationRequest(BaseModel): +class CreateAgentRequest(BaseModel): model: str name: str @@ -70,7 +70,7 @@ class AgentCreationRequest(BaseModel): instructions: OptionalNullable[str] = UNSET r"""Instruction prompt the model will follow during the conversation.""" - tools: Optional[List[AgentCreationRequestTool]] = None + tools: Optional[List[CreateAgentRequestTool]] = None r"""List of tools which are available to the model during the conversation.""" completion_args: Optional[CompletionArgs] = None @@ -86,44 +86,37 @@ class AgentCreationRequest(BaseModel): @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = [ - "instructions", - "tools", - "completion_args", - "description", - "handoffs", - "metadata", - "version_message", - ] - nullable_fields = [ - "instructions", - "description", - "handoffs", - "metadata", - "version_message", - ] - null_default_fields = [] - + optional_fields = set( + [ + "instructions", + "tools", + "completion_args", + "description", + "handoffs", + "metadata", + "version_message", + ] + ) + nullable_fields = set( + ["instructions", "description", "handoffs", "metadata", "version_message"] + ) serialized = handler(self) - m = {} for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val return m diff --git a/src/mistralai/client/models/batchjobin.py b/src/mistralai/client/models/createbatchjobrequest.py similarity index 76% rename from src/mistralai/client/models/batchjobin.py rename to src/mistralai/client/models/createbatchjobrequest.py index a0c3b914..9a901fef 100644 --- a/src/mistralai/client/models/batchjobin.py +++ b/src/mistralai/client/models/createbatchjobrequest.py @@ -1,5 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -# @generated-id: 72b25c2038d4 +# @generated-id: 56e24cd24e98 from __future__ import annotations from .apiendpoint import APIEndpoint @@ -16,7 +16,7 @@ from typing_extensions import NotRequired, TypedDict -class BatchJobInTypedDict(TypedDict): +class CreateBatchJobRequestTypedDict(TypedDict): endpoint: APIEndpoint input_files: NotRequired[Nullable[List[str]]] r"""The list of input files to be used for batch inference, these files should be `jsonl` files, containing the input data corresponding to the bory request for the batch inference in a \"body\" field. An example of such file is the following: ```json {\"custom_id\": \"0\", \"body\": {\"max_tokens\": 100, \"messages\": [{\"role\": \"user\", \"content\": \"What is the best French cheese?\"}]}} {\"custom_id\": \"1\", \"body\": {\"max_tokens\": 100, \"messages\": [{\"role\": \"user\", \"content\": \"What is the best French wine?\"}]}} ```""" @@ -31,7 +31,7 @@ class BatchJobInTypedDict(TypedDict): r"""The timeout in hours for the batch inference job.""" -class BatchJobIn(BaseModel): +class CreateBatchJobRequest(BaseModel): endpoint: APIEndpoint input_files: OptionalNullable[List[str]] = UNSET @@ -53,37 +53,36 @@ class BatchJobIn(BaseModel): @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = [ - "input_files", - "requests", - "model", - "agent_id", - "metadata", - "timeout_hours", - ] - nullable_fields = ["input_files", "requests", "model", "agent_id", "metadata"] - null_default_fields = [] - + optional_fields = set( + [ + "input_files", + "requests", + "model", + "agent_id", + "metadata", + "timeout_hours", + ] + ) + nullable_fields = set( + ["input_files", "requests", "model", "agent_id", "metadata"] + ) serialized = handler(self) - m = {} for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val return m diff --git a/src/mistralai/client/models/uploadfileout.py b/src/mistralai/client/models/createfileresponse.py similarity index 69% rename from src/mistralai/client/models/uploadfileout.py rename to src/mistralai/client/models/createfileresponse.py index be291efb..76821280 100644 --- a/src/mistralai/client/models/uploadfileout.py +++ b/src/mistralai/client/models/createfileresponse.py @@ -1,5 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -# @generated-id: 42466f2bebfb +# @generated-id: fea5e4832dcc from __future__ import annotations from .filepurpose import FilePurpose @@ -17,7 +17,7 @@ from typing_extensions import Annotated, NotRequired, TypedDict -class UploadFileOutTypedDict(TypedDict): +class CreateFileResponseTypedDict(TypedDict): id: str r"""The unique identifier of the file.""" object: str @@ -36,7 +36,7 @@ class UploadFileOutTypedDict(TypedDict): signature: NotRequired[Nullable[str]] -class UploadFileOut(BaseModel): +class CreateFileResponse(BaseModel): id: str r"""The unique identifier of the file.""" @@ -66,30 +66,31 @@ class UploadFileOut(BaseModel): @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = ["num_lines", "mimetype", "signature"] - nullable_fields = ["num_lines", "mimetype", "signature"] - null_default_fields = [] - + optional_fields = set(["num_lines", "mimetype", "signature"]) + nullable_fields = set(["num_lines", "mimetype", "signature"]) serialized = handler(self) - m = {} for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) - serialized.pop(k, None) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member + return m - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - return m +try: + CreateFileResponse.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/createfinetuningjobop.py b/src/mistralai/client/models/createfinetuningjobop.py deleted file mode 100644 index f55deef5..00000000 --- a/src/mistralai/client/models/createfinetuningjobop.py +++ /dev/null @@ -1,33 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -# @generated-id: fd3c305df250 - -from __future__ import annotations -from .classifierjobout import ClassifierJobOut, ClassifierJobOutTypedDict -from .completionjobout import CompletionJobOut, CompletionJobOutTypedDict -from .legacyjobmetadataout import LegacyJobMetadataOut, LegacyJobMetadataOutTypedDict -from pydantic import Field -from typing import Union -from typing_extensions import Annotated, TypeAliasType - - -ResponseTypedDict = TypeAliasType( - "ResponseTypedDict", Union[ClassifierJobOutTypedDict, CompletionJobOutTypedDict] -) - - -Response = Annotated[ - Union[ClassifierJobOut, CompletionJobOut], Field(discriminator="JOB_TYPE") -] - - -CreateFineTuningJobResponseTypedDict = TypeAliasType( - "CreateFineTuningJobResponseTypedDict", - Union[LegacyJobMetadataOutTypedDict, ResponseTypedDict], -) -r"""OK""" - - -CreateFineTuningJobResponse = TypeAliasType( - "CreateFineTuningJobResponse", Union[LegacyJobMetadataOut, Response] -) -r"""OK""" diff --git a/src/mistralai/client/models/jobin.py b/src/mistralai/client/models/createfinetuningjobrequest.py similarity index 56% rename from src/mistralai/client/models/jobin.py rename to src/mistralai/client/models/createfinetuningjobrequest.py index b3cb8998..e328d944 100644 --- a/src/mistralai/client/models/jobin.py +++ b/src/mistralai/client/models/createfinetuningjobrequest.py @@ -1,15 +1,15 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -# @generated-id: f4d176123ccc +# @generated-id: c60d2a45d66b from __future__ import annotations -from .classifiertargetin import ClassifierTargetIn, ClassifierTargetInTypedDict -from .classifiertrainingparametersin import ( - ClassifierTrainingParametersIn, - ClassifierTrainingParametersInTypedDict, +from .classifiertarget import ClassifierTarget, ClassifierTargetTypedDict +from .classifiertrainingparameters import ( + ClassifierTrainingParameters, + ClassifierTrainingParametersTypedDict, ) -from .completiontrainingparametersin import ( - CompletionTrainingParametersIn, - CompletionTrainingParametersInTypedDict, +from .completiontrainingparameters import ( + CompletionTrainingParameters, + CompletionTrainingParametersTypedDict, ) from .finetuneablemodeltype import FineTuneableModelType from .githubrepositoryin import GithubRepositoryIn, GithubRepositoryInTypedDict @@ -27,33 +27,30 @@ from typing_extensions import NotRequired, TypeAliasType, TypedDict -JobInIntegrationTypedDict = WandbIntegrationTypedDict +CreateFineTuningJobRequestIntegrationTypedDict = WandbIntegrationTypedDict -JobInIntegration = WandbIntegration +CreateFineTuningJobRequestIntegration = WandbIntegration HyperparametersTypedDict = TypeAliasType( "HyperparametersTypedDict", - Union[ - ClassifierTrainingParametersInTypedDict, CompletionTrainingParametersInTypedDict - ], + Union[ClassifierTrainingParametersTypedDict, CompletionTrainingParametersTypedDict], ) Hyperparameters = TypeAliasType( - "Hyperparameters", - Union[ClassifierTrainingParametersIn, CompletionTrainingParametersIn], + "Hyperparameters", Union[ClassifierTrainingParameters, CompletionTrainingParameters] ) -JobInRepositoryTypedDict = GithubRepositoryInTypedDict +CreateFineTuningJobRequestRepositoryTypedDict = GithubRepositoryInTypedDict -JobInRepository = GithubRepositoryIn +CreateFineTuningJobRequestRepository = GithubRepositoryIn -class JobInTypedDict(TypedDict): +class CreateFineTuningJobRequestTypedDict(TypedDict): model: str hyperparameters: HyperparametersTypedDict training_files: NotRequired[List[TrainingFileTypedDict]] @@ -61,17 +58,21 @@ class JobInTypedDict(TypedDict): r"""A list containing the IDs of uploaded files that contain validation data. If you provide these files, the data is used to generate validation metrics periodically during fine-tuning. These metrics can be viewed in `checkpoints` when getting the status of a running fine-tuning job. The same data should not be present in both train and validation files.""" suffix: NotRequired[Nullable[str]] r"""A string that will be added to your fine-tuning model name. For example, a suffix of \"my-great-model\" would produce a model name like `ft:open-mistral-7b:my-great-model:xxx...`""" - integrations: NotRequired[Nullable[List[JobInIntegrationTypedDict]]] + integrations: NotRequired[ + Nullable[List[CreateFineTuningJobRequestIntegrationTypedDict]] + ] r"""A list of integrations to enable for your fine-tuning job.""" auto_start: NotRequired[bool] r"""This field will be required in a future release.""" invalid_sample_skip_percentage: NotRequired[float] job_type: NotRequired[Nullable[FineTuneableModelType]] - repositories: NotRequired[Nullable[List[JobInRepositoryTypedDict]]] - classifier_targets: NotRequired[Nullable[List[ClassifierTargetInTypedDict]]] + repositories: NotRequired[ + Nullable[List[CreateFineTuningJobRequestRepositoryTypedDict]] + ] + classifier_targets: NotRequired[Nullable[List[ClassifierTargetTypedDict]]] -class JobIn(BaseModel): +class CreateFineTuningJobRequest(BaseModel): model: str hyperparameters: Hyperparameters @@ -84,7 +85,7 @@ class JobIn(BaseModel): suffix: OptionalNullable[str] = UNSET r"""A string that will be added to your fine-tuning model name. For example, a suffix of \"my-great-model\" would produce a model name like `ft:open-mistral-7b:my-great-model:xxx...`""" - integrations: OptionalNullable[List[JobInIntegration]] = UNSET + integrations: OptionalNullable[List[CreateFineTuningJobRequestIntegration]] = UNSET r"""A list of integrations to enable for your fine-tuning job.""" auto_start: Optional[bool] = None @@ -94,53 +95,52 @@ class JobIn(BaseModel): job_type: OptionalNullable[FineTuneableModelType] = UNSET - repositories: OptionalNullable[List[JobInRepository]] = UNSET + repositories: OptionalNullable[List[CreateFineTuningJobRequestRepository]] = UNSET - classifier_targets: OptionalNullable[List[ClassifierTargetIn]] = UNSET + classifier_targets: OptionalNullable[List[ClassifierTarget]] = UNSET @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = [ - "training_files", - "validation_files", - "suffix", - "integrations", - "auto_start", - "invalid_sample_skip_percentage", - "job_type", - "repositories", - "classifier_targets", - ] - nullable_fields = [ - "validation_files", - "suffix", - "integrations", - "job_type", - "repositories", - "classifier_targets", - ] - null_default_fields = [] - + optional_fields = set( + [ + "training_files", + "validation_files", + "suffix", + "integrations", + "auto_start", + "invalid_sample_skip_percentage", + "job_type", + "repositories", + "classifier_targets", + ] + ) + nullable_fields = set( + [ + "validation_files", + "suffix", + "integrations", + "job_type", + "repositories", + "classifier_targets", + ] + ) serialized = handler(self) - m = {} for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val return m diff --git a/src/mistralai/client/models/libraryin.py b/src/mistralai/client/models/createlibraryrequest.py similarity index 50% rename from src/mistralai/client/models/libraryin.py rename to src/mistralai/client/models/createlibraryrequest.py index 1a71d410..58874e01 100644 --- a/src/mistralai/client/models/libraryin.py +++ b/src/mistralai/client/models/createlibraryrequest.py @@ -1,5 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -# @generated-id: 6147d5df71d9 +# @generated-id: 1c489bec2f53 from __future__ import annotations from mistralai.client.types import ( @@ -13,13 +13,13 @@ from typing_extensions import NotRequired, TypedDict -class LibraryInTypedDict(TypedDict): +class CreateLibraryRequestTypedDict(TypedDict): name: str description: NotRequired[Nullable[str]] chunk_size: NotRequired[Nullable[int]] -class LibraryIn(BaseModel): +class CreateLibraryRequest(BaseModel): name: str description: OptionalNullable[str] = UNSET @@ -28,30 +28,25 @@ class LibraryIn(BaseModel): @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = ["description", "chunk_size"] - nullable_fields = ["description", "chunk_size"] - null_default_fields = [] - + optional_fields = set(["description", "chunk_size"]) + nullable_fields = set(["description", "chunk_size"]) serialized = handler(self) - m = {} for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val return m diff --git a/src/mistralai/client/models/deletemodelop.py b/src/mistralai/client/models/delete_model_v1_models_model_id_deleteop.py similarity index 76% rename from src/mistralai/client/models/deletemodelop.py rename to src/mistralai/client/models/delete_model_v1_models_model_id_deleteop.py index 55c4b242..199614f5 100644 --- a/src/mistralai/client/models/deletemodelop.py +++ b/src/mistralai/client/models/delete_model_v1_models_model_id_deleteop.py @@ -1,5 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -# @generated-id: 2c494d99a44d +# @generated-id: 767aba526e43 from __future__ import annotations from mistralai.client.types import BaseModel @@ -7,12 +7,12 @@ from typing_extensions import Annotated, TypedDict -class DeleteModelRequestTypedDict(TypedDict): +class DeleteModelV1ModelsModelIDDeleteRequestTypedDict(TypedDict): model_id: str r"""The ID of the model to delete.""" -class DeleteModelRequest(BaseModel): +class DeleteModelV1ModelsModelIDDeleteRequest(BaseModel): model_id: Annotated[ str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) ] diff --git a/src/mistralai/client/models/deletefileout.py b/src/mistralai/client/models/deletefileresponse.py similarity index 82% rename from src/mistralai/client/models/deletefileout.py rename to src/mistralai/client/models/deletefileresponse.py index c721f32c..ffd0e0d0 100644 --- a/src/mistralai/client/models/deletefileout.py +++ b/src/mistralai/client/models/deletefileresponse.py @@ -1,12 +1,12 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -# @generated-id: 5578701e7327 +# @generated-id: 3ee464763a32 from __future__ import annotations from mistralai.client.types import BaseModel from typing_extensions import TypedDict -class DeleteFileOutTypedDict(TypedDict): +class DeleteFileResponseTypedDict(TypedDict): id: str r"""The ID of the deleted file.""" object: str @@ -15,7 +15,7 @@ class DeleteFileOutTypedDict(TypedDict): r"""The deletion status.""" -class DeleteFileOut(BaseModel): +class DeleteFileResponse(BaseModel): id: str r"""The ID of the deleted file.""" diff --git a/src/mistralai/client/models/deletemodelout.py b/src/mistralai/client/models/deletemodelout.py index bf22ed17..fa0c20a4 100644 --- a/src/mistralai/client/models/deletemodelout.py +++ b/src/mistralai/client/models/deletemodelout.py @@ -2,7 +2,8 @@ # @generated-id: ef6a1671c739 from __future__ import annotations -from mistralai.client.types import BaseModel +from mistralai.client.types import BaseModel, UNSET_SENTINEL +from pydantic import model_serializer from typing import Optional from typing_extensions import NotRequired, TypedDict @@ -25,3 +26,19 @@ class DeleteModelOut(BaseModel): deleted: Optional[bool] = True r"""The deletion status""" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["object", "deleted"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m diff --git a/src/mistralai/client/models/deltamessage.py b/src/mistralai/client/models/deltamessage.py index fbb8231a..d9fa230e 100644 --- a/src/mistralai/client/models/deltamessage.py +++ b/src/mistralai/client/models/deltamessage.py @@ -41,30 +41,25 @@ class DeltaMessage(BaseModel): @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = ["role", "content", "tool_calls"] - nullable_fields = ["role", "content", "tool_calls"] - null_default_fields = [] - + optional_fields = set(["role", "content", "tool_calls"]) + nullable_fields = set(["role", "content", "tool_calls"]) serialized = handler(self) - m = {} for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val return m diff --git a/src/mistralai/client/models/documentout.py b/src/mistralai/client/models/document.py similarity index 60% rename from src/mistralai/client/models/documentout.py rename to src/mistralai/client/models/document.py index 3b1a5713..31eebbd1 100644 --- a/src/mistralai/client/models/documentout.py +++ b/src/mistralai/client/models/document.py @@ -1,5 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -# @generated-id: 7a85b9dca506 +# @generated-id: fbbf7428328c from __future__ import annotations from datetime import datetime @@ -15,7 +15,7 @@ from typing_extensions import NotRequired, TypedDict -class DocumentOutTypedDict(TypedDict): +class DocumentTypedDict(TypedDict): id: str library_id: str hash: Nullable[str] @@ -24,9 +24,9 @@ class DocumentOutTypedDict(TypedDict): size: Nullable[int] name: str created_at: datetime - processing_status: str uploaded_by_id: Nullable[str] uploaded_by_type: str + processing_status: str tokens_processing_total: int summary: NotRequired[Nullable[str]] last_processed_at: NotRequired[Nullable[datetime]] @@ -37,7 +37,7 @@ class DocumentOutTypedDict(TypedDict): attributes: NotRequired[Nullable[Dict[str, Any]]] -class DocumentOut(BaseModel): +class Document(BaseModel): id: str library_id: str @@ -54,12 +54,12 @@ class DocumentOut(BaseModel): created_at: datetime - processing_status: str - uploaded_by_id: Nullable[str] uploaded_by_type: str + processing_status: str + tokens_processing_total: int summary: OptionalNullable[str] = UNSET @@ -78,51 +78,50 @@ class DocumentOut(BaseModel): @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = [ - "summary", - "last_processed_at", - "number_of_pages", - "tokens_processing_main_content", - "tokens_processing_summary", - "url", - "attributes", - ] - nullable_fields = [ - "hash", - "mime_type", - "extension", - "size", - "summary", - "last_processed_at", - "number_of_pages", - "uploaded_by_id", - "tokens_processing_main_content", - "tokens_processing_summary", - "url", - "attributes", - ] - null_default_fields = [] - + optional_fields = set( + [ + "summary", + "last_processed_at", + "number_of_pages", + "tokens_processing_main_content", + "tokens_processing_summary", + "url", + "attributes", + ] + ) + nullable_fields = set( + [ + "hash", + "mime_type", + "extension", + "size", + "summary", + "last_processed_at", + "number_of_pages", + "uploaded_by_id", + "tokens_processing_main_content", + "tokens_processing_summary", + "url", + "attributes", + ] + ) serialized = handler(self) - m = {} for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val return m diff --git a/src/mistralai/client/models/documentlibrarytool.py b/src/mistralai/client/models/documentlibrarytool.py index ff0f7393..642c3202 100644 --- a/src/mistralai/client/models/documentlibrarytool.py +++ b/src/mistralai/client/models/documentlibrarytool.py @@ -2,17 +2,26 @@ # @generated-id: 3eb3c218f457 from __future__ import annotations -from mistralai.client.types import BaseModel +from .toolconfiguration import ToolConfiguration, ToolConfigurationTypedDict +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) from mistralai.client.utils import validate_const import pydantic +from pydantic import model_serializer from pydantic.functional_validators import AfterValidator from typing import List, Literal -from typing_extensions import Annotated, TypedDict +from typing_extensions import Annotated, NotRequired, TypedDict class DocumentLibraryToolTypedDict(TypedDict): library_ids: List[str] r"""Ids of the library in which to search.""" + tool_configuration: NotRequired[Nullable[ToolConfigurationTypedDict]] type: Literal["document_library"] @@ -20,10 +29,43 @@ class DocumentLibraryTool(BaseModel): library_ids: List[str] r"""Ids of the library in which to search.""" - TYPE: Annotated[ + tool_configuration: OptionalNullable[ToolConfiguration] = UNSET + + type: Annotated[ Annotated[ Literal["document_library"], AfterValidator(validate_const("document_library")), ], pydantic.Field(alias="type"), ] = "document_library" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["tool_configuration"]) + nullable_fields = set(["tool_configuration"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m + + +try: + DocumentLibraryTool.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/documenturlchunk.py b/src/mistralai/client/models/documenturlchunk.py index 304cde2b..43444d98 100644 --- a/src/mistralai/client/models/documenturlchunk.py +++ b/src/mistralai/client/models/documenturlchunk.py @@ -9,55 +9,62 @@ UNSET, UNSET_SENTINEL, ) +from mistralai.client.utils import validate_const +import pydantic from pydantic import model_serializer +from pydantic.functional_validators import AfterValidator from typing import Literal, Optional -from typing_extensions import NotRequired, TypedDict - - -DocumentURLChunkType = Literal["document_url",] +from typing_extensions import Annotated, NotRequired, TypedDict class DocumentURLChunkTypedDict(TypedDict): document_url: str + type: Literal["document_url"] document_name: NotRequired[Nullable[str]] r"""The filename of the document""" - type: NotRequired[DocumentURLChunkType] class DocumentURLChunk(BaseModel): document_url: str + type: Annotated[ + Annotated[ + Optional[Literal["document_url"]], + AfterValidator(validate_const("document_url")), + ], + pydantic.Field(alias="type"), + ] = "document_url" + document_name: OptionalNullable[str] = UNSET r"""The filename of the document""" - type: Optional[DocumentURLChunkType] = "document_url" - @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = ["document_name", "type"] - nullable_fields = ["document_name"] - null_default_fields = [] - + optional_fields = set(["type", "document_name"]) + nullable_fields = set(["document_name"]) serialized = handler(self) - m = {} for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) - serialized.pop(k, None) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member + return m - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - return m +try: + DocumentURLChunk.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/embeddingrequest.py b/src/mistralai/client/models/embeddingrequest.py index f4537ffa..15950590 100644 --- a/src/mistralai/client/models/embeddingrequest.py +++ b/src/mistralai/client/models/embeddingrequest.py @@ -57,35 +57,33 @@ class EmbeddingRequest(BaseModel): @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = [ - "metadata", - "output_dimension", - "output_dtype", - "encoding_format", - ] - nullable_fields = ["metadata", "output_dimension"] - null_default_fields = [] - + optional_fields = set( + ["metadata", "output_dimension", "output_dtype", "encoding_format"] + ) + nullable_fields = set(["metadata", "output_dimension"]) serialized = handler(self) - m = {} for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) - serialized.pop(k, None) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member + return m - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - return m +try: + EmbeddingRequest.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/embeddingresponsedata.py b/src/mistralai/client/models/embeddingresponsedata.py index a689b290..098cfae0 100644 --- a/src/mistralai/client/models/embeddingresponsedata.py +++ b/src/mistralai/client/models/embeddingresponsedata.py @@ -2,7 +2,8 @@ # @generated-id: 6d6ead6f3803 from __future__ import annotations -from mistralai.client.types import BaseModel +from mistralai.client.types import BaseModel, UNSET_SENTINEL +from pydantic import model_serializer from typing import List, Optional from typing_extensions import NotRequired, TypedDict @@ -19,3 +20,19 @@ class EmbeddingResponseData(BaseModel): embedding: Optional[List[float]] = None index: Optional[int] = None + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["object", "embedding", "index"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m diff --git a/src/mistralai/client/models/eventout.py b/src/mistralai/client/models/event.py similarity index 56% rename from src/mistralai/client/models/eventout.py rename to src/mistralai/client/models/event.py index a0247555..c40ae2b1 100644 --- a/src/mistralai/client/models/eventout.py +++ b/src/mistralai/client/models/event.py @@ -1,5 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -# @generated-id: da8ad645a9cb +# @generated-id: e5a68ac2dd57 from __future__ import annotations from mistralai.client.types import ( @@ -14,7 +14,7 @@ from typing_extensions import NotRequired, TypedDict -class EventOutTypedDict(TypedDict): +class EventTypedDict(TypedDict): name: str r"""The name of the event.""" created_at: int @@ -22,7 +22,7 @@ class EventOutTypedDict(TypedDict): data: NotRequired[Nullable[Dict[str, Any]]] -class EventOut(BaseModel): +class Event(BaseModel): name: str r"""The name of the event.""" @@ -33,30 +33,25 @@ class EventOut(BaseModel): @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = ["data"] - nullable_fields = ["data"] - null_default_fields = [] - + optional_fields = set(["data"]) + nullable_fields = set(["data"]) serialized = handler(self) - m = {} for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val return m diff --git a/src/mistralai/client/models/file.py b/src/mistralai/client/models/file.py index dbbc00b5..1b0ea1d4 100644 --- a/src/mistralai/client/models/file.py +++ b/src/mistralai/client/models/file.py @@ -3,9 +3,10 @@ from __future__ import annotations import io -from mistralai.client.types import BaseModel +from mistralai.client.types import BaseModel, UNSET_SENTINEL from mistralai.client.utils import FieldMetadata, MultipartFormMetadata import pydantic +from pydantic import model_serializer from typing import IO, Optional, Union from typing_extensions import Annotated, NotRequired, TypedDict @@ -32,3 +33,19 @@ class File(BaseModel): pydantic.Field(alias="Content-Type"), FieldMetadata(multipart=True), ] = None + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["contentType"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m diff --git a/src/mistralai/client/models/filechunk.py b/src/mistralai/client/models/filechunk.py index 43ef22f8..5c8d2646 100644 --- a/src/mistralai/client/models/filechunk.py +++ b/src/mistralai/client/models/filechunk.py @@ -2,9 +2,10 @@ # @generated-id: ff3c2d33ab1e from __future__ import annotations -from mistralai.client.types import BaseModel +from mistralai.client.types import BaseModel, UNSET_SENTINEL from mistralai.client.utils import validate_const import pydantic +from pydantic import model_serializer from pydantic.functional_validators import AfterValidator from typing import Literal, Optional from typing_extensions import Annotated, TypedDict @@ -18,7 +19,29 @@ class FileChunkTypedDict(TypedDict): class FileChunk(BaseModel): file_id: str - TYPE: Annotated[ + type: Annotated[ Annotated[Optional[Literal["file"]], AfterValidator(validate_const("file"))], pydantic.Field(alias="type"), ] = "file" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["type"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m + + +try: + FileChunk.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/downloadfileop.py b/src/mistralai/client/models/files_api_routes_delete_fileop.py similarity index 74% rename from src/mistralai/client/models/downloadfileop.py rename to src/mistralai/client/models/files_api_routes_delete_fileop.py index fcdc01d6..eaba274b 100644 --- a/src/mistralai/client/models/downloadfileop.py +++ b/src/mistralai/client/models/files_api_routes_delete_fileop.py @@ -1,5 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -# @generated-id: 4d051f08057d +# @generated-id: 2f385cc6138f from __future__ import annotations from mistralai.client.types import BaseModel @@ -7,11 +7,11 @@ from typing_extensions import Annotated, TypedDict -class DownloadFileRequestTypedDict(TypedDict): +class FilesAPIRoutesDeleteFileRequestTypedDict(TypedDict): file_id: str -class DownloadFileRequest(BaseModel): +class FilesAPIRoutesDeleteFileRequest(BaseModel): file_id: Annotated[ str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) ] diff --git a/src/mistralai/client/models/deletefileop.py b/src/mistralai/client/models/files_api_routes_download_fileop.py similarity index 73% rename from src/mistralai/client/models/deletefileop.py rename to src/mistralai/client/models/files_api_routes_download_fileop.py index 4feb7812..83de8e73 100644 --- a/src/mistralai/client/models/deletefileop.py +++ b/src/mistralai/client/models/files_api_routes_download_fileop.py @@ -1,5 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -# @generated-id: 286b4e583638 +# @generated-id: 8184ee3577c3 from __future__ import annotations from mistralai.client.types import BaseModel @@ -7,11 +7,11 @@ from typing_extensions import Annotated, TypedDict -class DeleteFileRequestTypedDict(TypedDict): +class FilesAPIRoutesDownloadFileRequestTypedDict(TypedDict): file_id: str -class DeleteFileRequest(BaseModel): +class FilesAPIRoutesDownloadFileRequest(BaseModel): file_id: Annotated[ str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) ] diff --git a/src/mistralai/client/models/getfilesignedurlop.py b/src/mistralai/client/models/files_api_routes_get_signed_urlop.py similarity index 51% rename from src/mistralai/client/models/getfilesignedurlop.py rename to src/mistralai/client/models/files_api_routes_get_signed_urlop.py index 06ed79ee..64cd6ac5 100644 --- a/src/mistralai/client/models/getfilesignedurlop.py +++ b/src/mistralai/client/models/files_api_routes_get_signed_urlop.py @@ -1,20 +1,21 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -# @generated-id: 1aa50b81c8cf +# @generated-id: 0a1a18c6431e from __future__ import annotations -from mistralai.client.types import BaseModel +from mistralai.client.types import BaseModel, UNSET_SENTINEL from mistralai.client.utils import FieldMetadata, PathParamMetadata, QueryParamMetadata +from pydantic import model_serializer from typing import Optional from typing_extensions import Annotated, NotRequired, TypedDict -class GetFileSignedURLRequestTypedDict(TypedDict): +class FilesAPIRoutesGetSignedURLRequestTypedDict(TypedDict): file_id: str expiry: NotRequired[int] r"""Number of hours before the url becomes invalid. Defaults to 24h""" -class GetFileSignedURLRequest(BaseModel): +class FilesAPIRoutesGetSignedURLRequest(BaseModel): file_id: Annotated[ str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) ] @@ -24,3 +25,19 @@ class GetFileSignedURLRequest(BaseModel): FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), ] = 24 r"""Number of hours before the url becomes invalid. Defaults to 24h""" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["expiry"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m diff --git a/src/mistralai/client/models/listfilesop.py b/src/mistralai/client/models/files_api_routes_list_filesop.py similarity index 70% rename from src/mistralai/client/models/listfilesop.py rename to src/mistralai/client/models/files_api_routes_list_filesop.py index a9af5c70..b03e2f88 100644 --- a/src/mistralai/client/models/listfilesop.py +++ b/src/mistralai/client/models/files_api_routes_list_filesop.py @@ -1,5 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -# @generated-id: e5bd46ac0145 +# @generated-id: b2e92f2a29b4 from __future__ import annotations from .filepurpose import FilePurpose @@ -18,7 +18,7 @@ from typing_extensions import Annotated, NotRequired, TypedDict -class ListFilesRequestTypedDict(TypedDict): +class FilesAPIRoutesListFilesRequestTypedDict(TypedDict): page: NotRequired[int] page_size: NotRequired[int] include_total: NotRequired[bool] @@ -29,7 +29,7 @@ class ListFilesRequestTypedDict(TypedDict): mimetypes: NotRequired[Nullable[List[str]]] -class ListFilesRequest(BaseModel): +class FilesAPIRoutesListFilesRequest(BaseModel): page: Annotated[ Optional[int], FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), @@ -72,39 +72,38 @@ class ListFilesRequest(BaseModel): @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = [ - "page", - "page_size", - "include_total", - "sample_type", - "source", - "search", - "purpose", - "mimetypes", - ] - nullable_fields = ["sample_type", "source", "search", "purpose", "mimetypes"] - null_default_fields = [] - + optional_fields = set( + [ + "page", + "page_size", + "include_total", + "sample_type", + "source", + "search", + "purpose", + "mimetypes", + ] + ) + nullable_fields = set( + ["sample_type", "source", "search", "purpose", "mimetypes"] + ) serialized = handler(self) - m = {} for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val return m diff --git a/src/mistralai/client/models/retrievefileop.py b/src/mistralai/client/models/files_api_routes_retrieve_fileop.py similarity index 73% rename from src/mistralai/client/models/retrievefileop.py rename to src/mistralai/client/models/files_api_routes_retrieve_fileop.py index edd50e57..5f8de05f 100644 --- a/src/mistralai/client/models/retrievefileop.py +++ b/src/mistralai/client/models/files_api_routes_retrieve_fileop.py @@ -1,5 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -# @generated-id: ee73efdf9180 +# @generated-id: 5d5dbb8d5f7a from __future__ import annotations from mistralai.client.types import BaseModel @@ -7,11 +7,11 @@ from typing_extensions import Annotated, TypedDict -class RetrieveFileRequestTypedDict(TypedDict): +class FilesAPIRoutesRetrieveFileRequestTypedDict(TypedDict): file_id: str -class RetrieveFileRequest(BaseModel): +class FilesAPIRoutesRetrieveFileRequest(BaseModel): file_id: Annotated[ str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) ] diff --git a/src/mistralai/client/models/uploadfileop.py b/src/mistralai/client/models/files_api_routes_upload_fileop.py similarity index 70% rename from src/mistralai/client/models/uploadfileop.py rename to src/mistralai/client/models/files_api_routes_upload_fileop.py index 50848f0b..54ff4e49 100644 --- a/src/mistralai/client/models/uploadfileop.py +++ b/src/mistralai/client/models/files_api_routes_upload_fileop.py @@ -1,11 +1,12 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -# @generated-id: d67619670938 +# @generated-id: f13b84de6fa7 from __future__ import annotations from .file import File, FileTypedDict from .filepurpose import FilePurpose -from mistralai.client.types import BaseModel +from mistralai.client.types import BaseModel, UNSET_SENTINEL from mistralai.client.utils import FieldMetadata, MultipartFormMetadata +from pydantic import model_serializer from typing import Optional from typing_extensions import Annotated, NotRequired, TypedDict @@ -39,3 +40,19 @@ class MultiPartBodyParams(BaseModel): """ purpose: Annotated[Optional[FilePurpose], FieldMetadata(multipart=True)] = None + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["purpose"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m diff --git a/src/mistralai/client/models/fileschema.py b/src/mistralai/client/models/fileschema.py index cbe9b0d1..e99066a9 100644 --- a/src/mistralai/client/models/fileschema.py +++ b/src/mistralai/client/models/fileschema.py @@ -66,30 +66,31 @@ class FileSchema(BaseModel): @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = ["num_lines", "mimetype", "signature"] - nullable_fields = ["num_lines", "mimetype", "signature"] - null_default_fields = [] - + optional_fields = set(["num_lines", "mimetype", "signature"]) + nullable_fields = set(["num_lines", "mimetype", "signature"]) serialized = handler(self) - m = {} for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) - serialized.pop(k, None) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member + return m - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - return m +try: + FileSchema.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/fimcompletionrequest.py b/src/mistralai/client/models/fimcompletionrequest.py index e2f60327..ea877213 100644 --- a/src/mistralai/client/models/fimcompletionrequest.py +++ b/src/mistralai/client/models/fimcompletionrequest.py @@ -85,47 +85,46 @@ class FIMCompletionRequest(BaseModel): @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = [ - "temperature", - "top_p", - "max_tokens", - "stream", - "stop", - "random_seed", - "metadata", - "suffix", - "min_tokens", - ] - nullable_fields = [ - "temperature", - "max_tokens", - "random_seed", - "metadata", - "suffix", - "min_tokens", - ] - null_default_fields = [] - + optional_fields = set( + [ + "temperature", + "top_p", + "max_tokens", + "stream", + "stop", + "random_seed", + "metadata", + "suffix", + "min_tokens", + ] + ) + nullable_fields = set( + [ + "temperature", + "max_tokens", + "random_seed", + "metadata", + "suffix", + "min_tokens", + ] + ) serialized = handler(self) - m = {} for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val return m diff --git a/src/mistralai/client/models/fimcompletionstreamrequest.py b/src/mistralai/client/models/fimcompletionstreamrequest.py index 480ed17a..e80efc09 100644 --- a/src/mistralai/client/models/fimcompletionstreamrequest.py +++ b/src/mistralai/client/models/fimcompletionstreamrequest.py @@ -83,47 +83,46 @@ class FIMCompletionStreamRequest(BaseModel): @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = [ - "temperature", - "top_p", - "max_tokens", - "stream", - "stop", - "random_seed", - "metadata", - "suffix", - "min_tokens", - ] - nullable_fields = [ - "temperature", - "max_tokens", - "random_seed", - "metadata", - "suffix", - "min_tokens", - ] - null_default_fields = [] - + optional_fields = set( + [ + "temperature", + "top_p", + "max_tokens", + "stream", + "stop", + "random_seed", + "metadata", + "suffix", + "min_tokens", + ] + ) + nullable_fields = set( + [ + "temperature", + "max_tokens", + "random_seed", + "metadata", + "suffix", + "min_tokens", + ] + ) serialized = handler(self) - m = {} for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val return m diff --git a/src/mistralai/client/models/finetunedmodelcapabilities.py b/src/mistralai/client/models/finetunedmodelcapabilities.py new file mode 100644 index 00000000..2f4cca0b --- /dev/null +++ b/src/mistralai/client/models/finetunedmodelcapabilities.py @@ -0,0 +1,52 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 475c805eab95 + +from __future__ import annotations +from mistralai.client.types import BaseModel, UNSET_SENTINEL +from pydantic import model_serializer +from typing import Optional +from typing_extensions import NotRequired, TypedDict + + +class FineTunedModelCapabilitiesTypedDict(TypedDict): + completion_chat: NotRequired[bool] + completion_fim: NotRequired[bool] + function_calling: NotRequired[bool] + fine_tuning: NotRequired[bool] + classification: NotRequired[bool] + + +class FineTunedModelCapabilities(BaseModel): + completion_chat: Optional[bool] = True + + completion_fim: Optional[bool] = False + + function_calling: Optional[bool] = False + + fine_tuning: Optional[bool] = False + + classification: Optional[bool] = False + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set( + [ + "completion_chat", + "completion_fim", + "function_calling", + "fine_tuning", + "classification", + ] + ) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m diff --git a/src/mistralai/client/models/ftmodelcapabilitiesout.py b/src/mistralai/client/models/ftmodelcapabilitiesout.py deleted file mode 100644 index 42269b78..00000000 --- a/src/mistralai/client/models/ftmodelcapabilitiesout.py +++ /dev/null @@ -1,27 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -# @generated-id: f70517be97d4 - -from __future__ import annotations -from mistralai.client.types import BaseModel -from typing import Optional -from typing_extensions import NotRequired, TypedDict - - -class FTModelCapabilitiesOutTypedDict(TypedDict): - completion_chat: NotRequired[bool] - completion_fim: NotRequired[bool] - function_calling: NotRequired[bool] - fine_tuning: NotRequired[bool] - classification: NotRequired[bool] - - -class FTModelCapabilitiesOut(BaseModel): - completion_chat: Optional[bool] = True - - completion_fim: Optional[bool] = False - - function_calling: Optional[bool] = False - - fine_tuning: Optional[bool] = False - - classification: Optional[bool] = False diff --git a/src/mistralai/client/models/ftmodelcard.py b/src/mistralai/client/models/ftmodelcard.py index 570e95e2..2c26ff2f 100644 --- a/src/mistralai/client/models/ftmodelcard.py +++ b/src/mistralai/client/models/ftmodelcard.py @@ -71,7 +71,7 @@ class FTModelCard(BaseModel): default_model_temperature: OptionalNullable[float] = UNSET - TYPE: Annotated[ + type: Annotated[ Annotated[Literal["fine-tuned"], AfterValidator(validate_const("fine-tuned"))], pydantic.Field(alias="type"), ] = "fine-tuned" @@ -80,48 +80,53 @@ class FTModelCard(BaseModel): @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = [ - "object", - "created", - "owned_by", - "name", - "description", - "max_context_length", - "aliases", - "deprecation", - "deprecation_replacement_model", - "default_model_temperature", - "archived", - ] - nullable_fields = [ - "name", - "description", - "deprecation", - "deprecation_replacement_model", - "default_model_temperature", - ] - null_default_fields = [] - + optional_fields = set( + [ + "object", + "created", + "owned_by", + "name", + "description", + "max_context_length", + "aliases", + "deprecation", + "deprecation_replacement_model", + "default_model_temperature", + "archived", + ] + ) + nullable_fields = set( + [ + "name", + "description", + "deprecation", + "deprecation_replacement_model", + "default_model_temperature", + ] + ) serialized = handler(self) - m = {} for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) - serialized.pop(k, None) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member + return m - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - return m +try: + FTModelCard.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/function.py b/src/mistralai/client/models/function.py index 3632c1af..1da1dcc9 100644 --- a/src/mistralai/client/models/function.py +++ b/src/mistralai/client/models/function.py @@ -2,7 +2,8 @@ # @generated-id: 32275a9d8fee from __future__ import annotations -from mistralai.client.types import BaseModel +from mistralai.client.types import BaseModel, UNSET_SENTINEL +from pydantic import model_serializer from typing import Any, Dict, Optional from typing_extensions import NotRequired, TypedDict @@ -22,3 +23,19 @@ class Function(BaseModel): description: Optional[str] = None strict: Optional[bool] = None + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["description", "strict"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m diff --git a/src/mistralai/client/models/functioncallentry.py b/src/mistralai/client/models/functioncallentry.py index 6ada1d35..d05fad85 100644 --- a/src/mistralai/client/models/functioncallentry.py +++ b/src/mistralai/client/models/functioncallentry.py @@ -13,27 +13,38 @@ OptionalNullable, UNSET, UNSET_SENTINEL, + UnrecognizedStr, ) +from mistralai.client.utils import validate_const +import pydantic from pydantic import model_serializer -from typing import Literal, Optional -from typing_extensions import NotRequired, TypedDict +from pydantic.functional_validators import AfterValidator +from typing import Literal, Optional, Union +from typing_extensions import Annotated, NotRequired, TypedDict -FunctionCallEntryObject = Literal["entry",] - - -FunctionCallEntryType = Literal["function.call",] +FunctionCallEntryConfirmationStatus = Union[ + Literal[ + "pending", + "allowed", + "denied", + ], + UnrecognizedStr, +] class FunctionCallEntryTypedDict(TypedDict): tool_call_id: str name: str arguments: FunctionCallEntryArgumentsTypedDict - object: NotRequired[FunctionCallEntryObject] - type: NotRequired[FunctionCallEntryType] + object: Literal["entry"] + type: Literal["function.call"] created_at: NotRequired[datetime] completed_at: NotRequired[Nullable[datetime]] + agent_id: NotRequired[Nullable[str]] + model: NotRequired[Nullable[str]] id: NotRequired[str] + confirmation_status: NotRequired[Nullable[FunctionCallEntryConfirmationStatus]] class FunctionCallEntry(BaseModel): @@ -43,42 +54,71 @@ class FunctionCallEntry(BaseModel): arguments: FunctionCallEntryArguments - object: Optional[FunctionCallEntryObject] = "entry" + object: Annotated[ + Annotated[Optional[Literal["entry"]], AfterValidator(validate_const("entry"))], + pydantic.Field(alias="object"), + ] = "entry" - type: Optional[FunctionCallEntryType] = "function.call" + type: Annotated[ + Annotated[ + Optional[Literal["function.call"]], + AfterValidator(validate_const("function.call")), + ], + pydantic.Field(alias="type"), + ] = "function.call" created_at: Optional[datetime] = None completed_at: OptionalNullable[datetime] = UNSET + agent_id: OptionalNullable[str] = UNSET + + model: OptionalNullable[str] = UNSET + id: Optional[str] = None + confirmation_status: OptionalNullable[FunctionCallEntryConfirmationStatus] = UNSET + @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = ["object", "type", "created_at", "completed_at", "id"] - nullable_fields = ["completed_at"] - null_default_fields = [] - + optional_fields = set( + [ + "object", + "type", + "created_at", + "completed_at", + "agent_id", + "model", + "id", + "confirmation_status", + ] + ) + nullable_fields = set( + ["completed_at", "agent_id", "model", "confirmation_status"] + ) serialized = handler(self) - m = {} for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) - serialized.pop(k, None) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member + return m - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - return m +try: + FunctionCallEntry.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/functioncallevent.py b/src/mistralai/client/models/functioncallevent.py index 5d871a0e..849eed76 100644 --- a/src/mistralai/client/models/functioncallevent.py +++ b/src/mistralai/client/models/functioncallevent.py @@ -3,14 +3,32 @@ from __future__ import annotations from datetime import datetime -from mistralai.client.types import BaseModel +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, + UnrecognizedStr, +) from mistralai.client.utils import validate_const import pydantic +from pydantic import model_serializer from pydantic.functional_validators import AfterValidator -from typing import Literal, Optional +from typing import Literal, Optional, Union from typing_extensions import Annotated, NotRequired, TypedDict +FunctionCallEventConfirmationStatus = Union[ + Literal[ + "pending", + "allowed", + "denied", + ], + UnrecognizedStr, +] + + class FunctionCallEventTypedDict(TypedDict): id: str name: str @@ -19,6 +37,9 @@ class FunctionCallEventTypedDict(TypedDict): type: Literal["function.call.delta"] created_at: NotRequired[datetime] output_index: NotRequired[int] + model: NotRequired[Nullable[str]] + agent_id: NotRequired[Nullable[str]] + confirmation_status: NotRequired[Nullable[FunctionCallEventConfirmationStatus]] class FunctionCallEvent(BaseModel): @@ -30,7 +51,7 @@ class FunctionCallEvent(BaseModel): arguments: str - TYPE: Annotated[ + type: Annotated[ Annotated[ Literal["function.call.delta"], AfterValidator(validate_const("function.call.delta")), @@ -41,3 +62,42 @@ class FunctionCallEvent(BaseModel): created_at: Optional[datetime] = None output_index: Optional[int] = 0 + + model: OptionalNullable[str] = UNSET + + agent_id: OptionalNullable[str] = UNSET + + confirmation_status: OptionalNullable[FunctionCallEventConfirmationStatus] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set( + ["created_at", "output_index", "model", "agent_id", "confirmation_status"] + ) + nullable_fields = set(["model", "agent_id", "confirmation_status"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m + + +try: + FunctionCallEvent.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/functionresultentry.py b/src/mistralai/client/models/functionresultentry.py index ca73cbb7..01e2e36f 100644 --- a/src/mistralai/client/models/functionresultentry.py +++ b/src/mistralai/client/models/functionresultentry.py @@ -10,22 +10,19 @@ UNSET, UNSET_SENTINEL, ) +from mistralai.client.utils import validate_const +import pydantic from pydantic import model_serializer +from pydantic.functional_validators import AfterValidator from typing import Literal, Optional -from typing_extensions import NotRequired, TypedDict - - -FunctionResultEntryObject = Literal["entry",] - - -FunctionResultEntryType = Literal["function.result",] +from typing_extensions import Annotated, NotRequired, TypedDict class FunctionResultEntryTypedDict(TypedDict): tool_call_id: str result: str - object: NotRequired[FunctionResultEntryObject] - type: NotRequired[FunctionResultEntryType] + object: Literal["entry"] + type: Literal["function.result"] created_at: NotRequired[datetime] completed_at: NotRequired[Nullable[datetime]] id: NotRequired[str] @@ -36,9 +33,18 @@ class FunctionResultEntry(BaseModel): result: str - object: Optional[FunctionResultEntryObject] = "entry" + object: Annotated[ + Annotated[Optional[Literal["entry"]], AfterValidator(validate_const("entry"))], + pydantic.Field(alias="object"), + ] = "entry" - type: Optional[FunctionResultEntryType] = "function.result" + type: Annotated[ + Annotated[ + Optional[Literal["function.result"]], + AfterValidator(validate_const("function.result")), + ], + pydantic.Field(alias="type"), + ] = "function.result" created_at: Optional[datetime] = None @@ -48,30 +54,31 @@ class FunctionResultEntry(BaseModel): @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = ["object", "type", "created_at", "completed_at", "id"] - nullable_fields = ["completed_at"] - null_default_fields = [] - + optional_fields = set(["object", "type", "created_at", "completed_at", "id"]) + nullable_fields = set(["completed_at"]) serialized = handler(self) - m = {} for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) - serialized.pop(k, None) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member + return m - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - return m +try: + FunctionResultEntry.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/functiontool.py b/src/mistralai/client/models/functiontool.py index 13b04496..eae87264 100644 --- a/src/mistralai/client/models/functiontool.py +++ b/src/mistralai/client/models/functiontool.py @@ -19,7 +19,13 @@ class FunctionToolTypedDict(TypedDict): class FunctionTool(BaseModel): function: Function - TYPE: Annotated[ + type: Annotated[ Annotated[Literal["function"], AfterValidator(validate_const("function"))], pydantic.Field(alias="type"), ] = "function" + + +try: + FunctionTool.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/getagentop.py b/src/mistralai/client/models/getagentop.py deleted file mode 100644 index 55d8fe68..00000000 --- a/src/mistralai/client/models/getagentop.py +++ /dev/null @@ -1,69 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -# @generated-id: 5a28bb1e727e - -from __future__ import annotations -from mistralai.client.types import ( - BaseModel, - Nullable, - OptionalNullable, - UNSET, - UNSET_SENTINEL, -) -from mistralai.client.utils import FieldMetadata, PathParamMetadata, QueryParamMetadata -from pydantic import model_serializer -from typing import Union -from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict - - -GetAgentAgentVersionTypedDict = TypeAliasType( - "GetAgentAgentVersionTypedDict", Union[int, str] -) - - -GetAgentAgentVersion = TypeAliasType("GetAgentAgentVersion", Union[int, str]) - - -class GetAgentRequestTypedDict(TypedDict): - agent_id: str - agent_version: NotRequired[Nullable[GetAgentAgentVersionTypedDict]] - - -class GetAgentRequest(BaseModel): - agent_id: Annotated[ - str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) - ] - - agent_version: Annotated[ - OptionalNullable[GetAgentAgentVersion], - FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), - ] = UNSET - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = ["agent_version"] - nullable_fields = ["agent_version"] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/client/models/getdocumenttextcontentop.py b/src/mistralai/client/models/getdocumenttextcontentop.py deleted file mode 100644 index 8a7b4aae..00000000 --- a/src/mistralai/client/models/getdocumenttextcontentop.py +++ /dev/null @@ -1,22 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -# @generated-id: ba23717093ef - -from __future__ import annotations -from mistralai.client.types import BaseModel -from mistralai.client.utils import FieldMetadata, PathParamMetadata -from typing_extensions import Annotated, TypedDict - - -class GetDocumentTextContentRequestTypedDict(TypedDict): - library_id: str - document_id: str - - -class GetDocumentTextContentRequest(BaseModel): - library_id: Annotated[ - str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) - ] - - document_id: Annotated[ - str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) - ] diff --git a/src/mistralai/client/models/retrievefileout.py b/src/mistralai/client/models/getfileresponse.py similarity index 69% rename from src/mistralai/client/models/retrievefileout.py rename to src/mistralai/client/models/getfileresponse.py index 2abf2161..f625c153 100644 --- a/src/mistralai/client/models/retrievefileout.py +++ b/src/mistralai/client/models/getfileresponse.py @@ -1,5 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -# @generated-id: 8bb5859aa0d0 +# @generated-id: 81919086e371 from __future__ import annotations from .filepurpose import FilePurpose @@ -17,7 +17,7 @@ from typing_extensions import Annotated, NotRequired, TypedDict -class RetrieveFileOutTypedDict(TypedDict): +class GetFileResponseTypedDict(TypedDict): id: str r"""The unique identifier of the file.""" object: str @@ -37,7 +37,7 @@ class RetrieveFileOutTypedDict(TypedDict): signature: NotRequired[Nullable[str]] -class RetrieveFileOut(BaseModel): +class GetFileResponse(BaseModel): id: str r"""The unique identifier of the file.""" @@ -69,30 +69,31 @@ class RetrieveFileOut(BaseModel): @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = ["num_lines", "mimetype", "signature"] - nullable_fields = ["num_lines", "mimetype", "signature"] - null_default_fields = [] - + optional_fields = set(["num_lines", "mimetype", "signature"]) + nullable_fields = set(["num_lines", "mimetype", "signature"]) serialized = handler(self) - m = {} for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) - serialized.pop(k, None) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member + return m - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - return m +try: + GetFileResponse.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/getfinetuningjobop.py b/src/mistralai/client/models/getfinetuningjobop.py deleted file mode 100644 index 1fb732f4..00000000 --- a/src/mistralai/client/models/getfinetuningjobop.py +++ /dev/null @@ -1,43 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -# @generated-id: afe997f96d69 - -from __future__ import annotations -from .classifierdetailedjobout import ( - ClassifierDetailedJobOut, - ClassifierDetailedJobOutTypedDict, -) -from .completiondetailedjobout import ( - CompletionDetailedJobOut, - CompletionDetailedJobOutTypedDict, -) -from mistralai.client.types import BaseModel -from mistralai.client.utils import FieldMetadata, PathParamMetadata -from pydantic import Field -from typing import Union -from typing_extensions import Annotated, TypeAliasType, TypedDict - - -class GetFineTuningJobRequestTypedDict(TypedDict): - job_id: str - r"""The ID of the job to analyse.""" - - -class GetFineTuningJobRequest(BaseModel): - job_id: Annotated[ - str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) - ] - r"""The ID of the job to analyse.""" - - -GetFineTuningJobResponseTypedDict = TypeAliasType( - "GetFineTuningJobResponseTypedDict", - Union[CompletionDetailedJobOutTypedDict, ClassifierDetailedJobOutTypedDict], -) -r"""OK""" - - -GetFineTuningJobResponse = Annotated[ - Union[ClassifierDetailedJobOut, CompletionDetailedJobOut], - Field(discriminator="JOB_TYPE"), -] -r"""OK""" diff --git a/src/mistralai/client/models/filesignedurl.py b/src/mistralai/client/models/getsignedurlresponse.py similarity index 65% rename from src/mistralai/client/models/filesignedurl.py rename to src/mistralai/client/models/getsignedurlresponse.py index 53dff812..4ba95894 100644 --- a/src/mistralai/client/models/filesignedurl.py +++ b/src/mistralai/client/models/getsignedurlresponse.py @@ -1,14 +1,14 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -# @generated-id: a1754c725163 +# @generated-id: cee4e4197372 from __future__ import annotations from mistralai.client.types import BaseModel from typing_extensions import TypedDict -class FileSignedURLTypedDict(TypedDict): +class GetSignedURLResponseTypedDict(TypedDict): url: str -class FileSignedURL(BaseModel): +class GetSignedURLResponse(BaseModel): url: str diff --git a/src/mistralai/client/models/githubrepositoryout.py b/src/mistralai/client/models/githubrepository.py similarity index 59% rename from src/mistralai/client/models/githubrepositoryout.py rename to src/mistralai/client/models/githubrepository.py index 514df01c..84b01078 100644 --- a/src/mistralai/client/models/githubrepositoryout.py +++ b/src/mistralai/client/models/githubrepository.py @@ -1,5 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -# @generated-id: d2434a167623 +# @generated-id: 4bc83ce18378 from __future__ import annotations from mistralai.client.types import ( @@ -17,7 +17,7 @@ from typing_extensions import Annotated, NotRequired, TypedDict -class GithubRepositoryOutTypedDict(TypedDict): +class GithubRepositoryTypedDict(TypedDict): name: str owner: str commit_id: str @@ -26,14 +26,14 @@ class GithubRepositoryOutTypedDict(TypedDict): weight: NotRequired[float] -class GithubRepositoryOut(BaseModel): +class GithubRepository(BaseModel): name: str owner: str commit_id: str - TYPE: Annotated[ + type: Annotated[ Annotated[Literal["github"], AfterValidator(validate_const("github"))], pydantic.Field(alias="type"), ] = "github" @@ -44,30 +44,31 @@ class GithubRepositoryOut(BaseModel): @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = ["ref", "weight"] - nullable_fields = ["ref"] - null_default_fields = [] - + optional_fields = set(["ref", "weight"]) + nullable_fields = set(["ref"]) serialized = handler(self) - m = {} for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) - serialized.pop(k, None) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member + return m - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - return m +try: + GithubRepository.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/githubrepositoryin.py b/src/mistralai/client/models/githubrepositoryin.py index e55389c3..38bcc208 100644 --- a/src/mistralai/client/models/githubrepositoryin.py +++ b/src/mistralai/client/models/githubrepositoryin.py @@ -33,7 +33,7 @@ class GithubRepositoryIn(BaseModel): token: str - TYPE: Annotated[ + type: Annotated[ Annotated[Literal["github"], AfterValidator(validate_const("github"))], pydantic.Field(alias="type"), ] = "github" @@ -44,30 +44,31 @@ class GithubRepositoryIn(BaseModel): @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = ["ref", "weight"] - nullable_fields = ["ref"] - null_default_fields = [] - + optional_fields = set(["ref", "weight"]) + nullable_fields = set(["ref"]) serialized = handler(self) - m = {} for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) - serialized.pop(k, None) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member + return m - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - return m +try: + GithubRepositoryIn.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/imagedetail.py b/src/mistralai/client/models/imagedetail.py new file mode 100644 index 00000000..1982d357 --- /dev/null +++ b/src/mistralai/client/models/imagedetail.py @@ -0,0 +1,16 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: c1084b549abb + +from __future__ import annotations +from mistralai.client.types import UnrecognizedStr +from typing import Literal, Union + + +ImageDetail = Union[ + Literal[ + "low", + "auto", + "high", + ], + UnrecognizedStr, +] diff --git a/src/mistralai/client/models/imagegenerationtool.py b/src/mistralai/client/models/imagegenerationtool.py index 680c6ce2..c1789b18 100644 --- a/src/mistralai/client/models/imagegenerationtool.py +++ b/src/mistralai/client/models/imagegenerationtool.py @@ -2,23 +2,65 @@ # @generated-id: e1532275faa0 from __future__ import annotations -from mistralai.client.types import BaseModel +from .toolconfiguration import ToolConfiguration, ToolConfigurationTypedDict +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) from mistralai.client.utils import validate_const import pydantic +from pydantic import model_serializer from pydantic.functional_validators import AfterValidator from typing import Literal -from typing_extensions import Annotated, TypedDict +from typing_extensions import Annotated, NotRequired, TypedDict class ImageGenerationToolTypedDict(TypedDict): + tool_configuration: NotRequired[Nullable[ToolConfigurationTypedDict]] type: Literal["image_generation"] class ImageGenerationTool(BaseModel): - TYPE: Annotated[ + tool_configuration: OptionalNullable[ToolConfiguration] = UNSET + + type: Annotated[ Annotated[ Literal["image_generation"], AfterValidator(validate_const("image_generation")), ], pydantic.Field(alias="type"), ] = "image_generation" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["tool_configuration"]) + nullable_fields = set(["tool_configuration"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m + + +try: + ImageGenerationTool.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/imageurl.py b/src/mistralai/client/models/imageurl.py index 4ff13b1c..ac1030f5 100644 --- a/src/mistralai/client/models/imageurl.py +++ b/src/mistralai/client/models/imageurl.py @@ -2,6 +2,7 @@ # @generated-id: e4bbf5881fbf from __future__ import annotations +from .imagedetail import ImageDetail from mistralai.client.types import ( BaseModel, Nullable, @@ -15,40 +16,35 @@ class ImageURLTypedDict(TypedDict): url: str - detail: NotRequired[Nullable[str]] + detail: NotRequired[Nullable[ImageDetail]] class ImageURL(BaseModel): url: str - detail: OptionalNullable[str] = UNSET + detail: OptionalNullable[ImageDetail] = UNSET @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = ["detail"] - nullable_fields = ["detail"] - null_default_fields = [] - + optional_fields = set(["detail"]) + nullable_fields = set(["detail"]) serialized = handler(self) - m = {} for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val return m diff --git a/src/mistralai/client/models/imageurlchunk.py b/src/mistralai/client/models/imageurlchunk.py index 993185cc..7134b46e 100644 --- a/src/mistralai/client/models/imageurlchunk.py +++ b/src/mistralai/client/models/imageurlchunk.py @@ -3,9 +3,13 @@ from __future__ import annotations from .imageurl import ImageURL, ImageURLTypedDict -from mistralai.client.types import BaseModel +from mistralai.client.types import BaseModel, UNSET_SENTINEL +from mistralai.client.utils import validate_const +import pydantic +from pydantic import model_serializer +from pydantic.functional_validators import AfterValidator from typing import Literal, Optional, Union -from typing_extensions import NotRequired, TypeAliasType, TypedDict +from typing_extensions import Annotated, TypeAliasType, TypedDict ImageURLUnionTypedDict = TypeAliasType( @@ -16,14 +20,11 @@ ImageURLUnion = TypeAliasType("ImageURLUnion", Union[ImageURL, str]) -ImageURLChunkType = Literal["image_url",] - - class ImageURLChunkTypedDict(TypedDict): r"""{\"type\":\"image_url\",\"image_url\":{\"url\":\"data:image/png;base64,iVBORw0""" image_url: ImageURLUnionTypedDict - type: NotRequired[ImageURLChunkType] + type: Literal["image_url"] class ImageURLChunk(BaseModel): @@ -31,4 +32,31 @@ class ImageURLChunk(BaseModel): image_url: ImageURLUnion - type: Optional[ImageURLChunkType] = "image_url" + type: Annotated[ + Annotated[ + Optional[Literal["image_url"]], AfterValidator(validate_const("image_url")) + ], + pydantic.Field(alias="type"), + ] = "image_url" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["type"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m + + +try: + ImageURLChunk.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/inputentries.py b/src/mistralai/client/models/inputentries.py index dc989295..e2da5a80 100644 --- a/src/mistralai/client/models/inputentries.py +++ b/src/mistralai/client/models/inputentries.py @@ -17,10 +17,10 @@ Union[ FunctionResultEntryTypedDict, MessageInputEntryTypedDict, - FunctionCallEntryTypedDict, - ToolExecutionEntryTypedDict, MessageOutputEntryTypedDict, AgentHandoffEntryTypedDict, + ToolExecutionEntryTypedDict, + FunctionCallEntryTypedDict, ], ) @@ -30,9 +30,9 @@ Union[ FunctionResultEntry, MessageInputEntry, - FunctionCallEntry, - ToolExecutionEntry, MessageOutputEntry, AgentHandoffEntry, + ToolExecutionEntry, + FunctionCallEntry, ], ) diff --git a/src/mistralai/client/models/inputs.py b/src/mistralai/client/models/inputs.py index cfcdeb3d..9ecd7f48 100644 --- a/src/mistralai/client/models/inputs.py +++ b/src/mistralai/client/models/inputs.py @@ -2,54 +2,16 @@ # @generated-id: 84a8007518c7 from __future__ import annotations -from .assistantmessage import AssistantMessage, AssistantMessageTypedDict from .instructrequest import InstructRequest, InstructRequestTypedDict -from .systemmessage import SystemMessage, SystemMessageTypedDict -from .toolmessage import ToolMessage, ToolMessageTypedDict -from .usermessage import UserMessage, UserMessageTypedDict -from mistralai.client.types import BaseModel -from mistralai.client.utils import get_discriminator -from pydantic import Discriminator, Tag from typing import List, Union -from typing_extensions import Annotated, TypeAliasType, TypedDict - - -InputsMessageTypedDict = TypeAliasType( - "InputsMessageTypedDict", - Union[ - SystemMessageTypedDict, - UserMessageTypedDict, - AssistantMessageTypedDict, - ToolMessageTypedDict, - ], -) - - -InputsMessage = Annotated[ - Union[ - Annotated[AssistantMessage, Tag("assistant")], - Annotated[SystemMessage, Tag("system")], - Annotated[ToolMessage, Tag("tool")], - Annotated[UserMessage, Tag("user")], - ], - Discriminator(lambda m: get_discriminator(m, "role", "role")), -] - - -class InstructRequestInputsTypedDict(TypedDict): - messages: List[InputsMessageTypedDict] - - -class InstructRequestInputs(BaseModel): - messages: List[InputsMessage] +from typing_extensions import TypeAliasType InputsTypedDict = TypeAliasType( - "InputsTypedDict", - Union[InstructRequestInputsTypedDict, List[InstructRequestTypedDict]], + "InputsTypedDict", Union[InstructRequestTypedDict, List[InstructRequestTypedDict]] ) r"""Chat to classify""" -Inputs = TypeAliasType("Inputs", Union[InstructRequestInputs, List[InstructRequest]]) +Inputs = TypeAliasType("Inputs", Union[InstructRequest, List[InstructRequest]]) r"""Chat to classify""" diff --git a/src/mistralai/client/models/jobmetadataout.py b/src/mistralai/client/models/jobmetadata.py similarity index 52% rename from src/mistralai/client/models/jobmetadataout.py rename to src/mistralai/client/models/jobmetadata.py index 1d386539..f6e96fa1 100644 --- a/src/mistralai/client/models/jobmetadataout.py +++ b/src/mistralai/client/models/jobmetadata.py @@ -1,5 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -# @generated-id: 805f41e3292a +# @generated-id: cfbdde7fc0a2 from __future__ import annotations from mistralai.client.types import ( @@ -13,7 +13,7 @@ from typing_extensions import NotRequired, TypedDict -class JobMetadataOutTypedDict(TypedDict): +class JobMetadataTypedDict(TypedDict): expected_duration_seconds: NotRequired[Nullable[int]] cost: NotRequired[Nullable[float]] cost_currency: NotRequired[Nullable[str]] @@ -23,7 +23,7 @@ class JobMetadataOutTypedDict(TypedDict): estimated_start_time: NotRequired[Nullable[int]] -class JobMetadataOut(BaseModel): +class JobMetadata(BaseModel): expected_duration_seconds: OptionalNullable[int] = UNSET cost: OptionalNullable[float] = UNSET @@ -40,46 +40,45 @@ class JobMetadataOut(BaseModel): @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = [ - "expected_duration_seconds", - "cost", - "cost_currency", - "train_tokens_per_step", - "train_tokens", - "data_tokens", - "estimated_start_time", - ] - nullable_fields = [ - "expected_duration_seconds", - "cost", - "cost_currency", - "train_tokens_per_step", - "train_tokens", - "data_tokens", - "estimated_start_time", - ] - null_default_fields = [] - + optional_fields = set( + [ + "expected_duration_seconds", + "cost", + "cost_currency", + "train_tokens_per_step", + "train_tokens", + "data_tokens", + "estimated_start_time", + ] + ) + nullable_fields = set( + [ + "expected_duration_seconds", + "cost", + "cost_currency", + "train_tokens_per_step", + "train_tokens", + "data_tokens", + "estimated_start_time", + ] + ) serialized = handler(self) - m = {} for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val return m diff --git a/src/mistralai/client/models/cancelbatchjobop.py b/src/mistralai/client/models/jobs_api_routes_batch_cancel_batch_jobop.py similarity index 72% rename from src/mistralai/client/models/cancelbatchjobop.py rename to src/mistralai/client/models/jobs_api_routes_batch_cancel_batch_jobop.py index cd94ee86..de2e6347 100644 --- a/src/mistralai/client/models/cancelbatchjobop.py +++ b/src/mistralai/client/models/jobs_api_routes_batch_cancel_batch_jobop.py @@ -1,5 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -# @generated-id: cebac10b56a9 +# @generated-id: b56cb6c17c95 from __future__ import annotations from mistralai.client.types import BaseModel @@ -7,11 +7,11 @@ from typing_extensions import Annotated, TypedDict -class CancelBatchJobRequestTypedDict(TypedDict): +class JobsAPIRoutesBatchCancelBatchJobRequestTypedDict(TypedDict): job_id: str -class CancelBatchJobRequest(BaseModel): +class JobsAPIRoutesBatchCancelBatchJobRequest(BaseModel): job_id: Annotated[ str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) ] diff --git a/src/mistralai/client/models/getbatchjobop.py b/src/mistralai/client/models/jobs_api_routes_batch_get_batch_jobop.py similarity index 56% rename from src/mistralai/client/models/getbatchjobop.py rename to src/mistralai/client/models/jobs_api_routes_batch_get_batch_jobop.py index 792c3e21..d779e1d9 100644 --- a/src/mistralai/client/models/getbatchjobop.py +++ b/src/mistralai/client/models/jobs_api_routes_batch_get_batch_jobop.py @@ -1,5 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -# @generated-id: 443103fe3b88 +# @generated-id: 36b5a6b3ceee from __future__ import annotations from mistralai.client.types import ( @@ -14,12 +14,12 @@ from typing_extensions import Annotated, NotRequired, TypedDict -class GetBatchJobRequestTypedDict(TypedDict): +class JobsAPIRoutesBatchGetBatchJobRequestTypedDict(TypedDict): job_id: str inline: NotRequired[Nullable[bool]] -class GetBatchJobRequest(BaseModel): +class JobsAPIRoutesBatchGetBatchJobRequest(BaseModel): job_id: Annotated[ str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) ] @@ -31,30 +31,25 @@ class GetBatchJobRequest(BaseModel): @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = ["inline"] - nullable_fields = ["inline"] - null_default_fields = [] - + optional_fields = set(["inline"]) + nullable_fields = set(["inline"]) serialized = handler(self) - m = {} for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val return m diff --git a/src/mistralai/client/models/listbatchjobsop.py b/src/mistralai/client/models/jobs_api_routes_batch_get_batch_jobsop.py similarity index 71% rename from src/mistralai/client/models/listbatchjobsop.py rename to src/mistralai/client/models/jobs_api_routes_batch_get_batch_jobsop.py index 5322df81..89ac3c93 100644 --- a/src/mistralai/client/models/listbatchjobsop.py +++ b/src/mistralai/client/models/jobs_api_routes_batch_get_batch_jobsop.py @@ -1,5 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -# @generated-id: f49af453f5e6 +# @generated-id: d8f0af99c94d from __future__ import annotations from .batchjobstatus import BatchJobStatus @@ -23,7 +23,7 @@ ] -class ListBatchJobsRequestTypedDict(TypedDict): +class JobsAPIRoutesBatchGetBatchJobsRequestTypedDict(TypedDict): page: NotRequired[int] page_size: NotRequired[int] model: NotRequired[Nullable[str]] @@ -35,7 +35,7 @@ class ListBatchJobsRequestTypedDict(TypedDict): order_by: NotRequired[OrderBy] -class ListBatchJobsRequest(BaseModel): +class JobsAPIRoutesBatchGetBatchJobsRequest(BaseModel): page: Annotated[ Optional[int], FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), @@ -83,40 +83,39 @@ class ListBatchJobsRequest(BaseModel): @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = [ - "page", - "page_size", - "model", - "agent_id", - "metadata", - "created_after", - "created_by_me", - "status", - "order_by", - ] - nullable_fields = ["model", "agent_id", "metadata", "created_after", "status"] - null_default_fields = [] - + optional_fields = set( + [ + "page", + "page_size", + "model", + "agent_id", + "metadata", + "created_after", + "created_by_me", + "status", + "order_by", + ] + ) + nullable_fields = set( + ["model", "agent_id", "metadata", "created_after", "status"] + ) serialized = handler(self) - m = {} for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val return m diff --git a/src/mistralai/client/models/archivemodelop.py b/src/mistralai/client/models/jobs_api_routes_fine_tuning_archive_fine_tuned_modelop.py similarity index 73% rename from src/mistralai/client/models/archivemodelop.py rename to src/mistralai/client/models/jobs_api_routes_fine_tuning_archive_fine_tuned_modelop.py index 30b4a9bd..9fa99837 100644 --- a/src/mistralai/client/models/archivemodelop.py +++ b/src/mistralai/client/models/jobs_api_routes_fine_tuning_archive_fine_tuned_modelop.py @@ -1,5 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -# @generated-id: beefa1df3b7c +# @generated-id: 34f89d2af0ec from __future__ import annotations from mistralai.client.types import BaseModel @@ -7,12 +7,12 @@ from typing_extensions import Annotated, TypedDict -class ArchiveModelRequestTypedDict(TypedDict): +class JobsAPIRoutesFineTuningArchiveFineTunedModelRequestTypedDict(TypedDict): model_id: str r"""The ID of the model to archive.""" -class ArchiveModelRequest(BaseModel): +class JobsAPIRoutesFineTuningArchiveFineTunedModelRequest(BaseModel): model_id: Annotated[ str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) ] diff --git a/src/mistralai/client/models/jobs_api_routes_fine_tuning_cancel_fine_tuning_jobop.py b/src/mistralai/client/models/jobs_api_routes_fine_tuning_cancel_fine_tuning_jobop.py new file mode 100644 index 00000000..56fa5340 --- /dev/null +++ b/src/mistralai/client/models/jobs_api_routes_fine_tuning_cancel_fine_tuning_jobop.py @@ -0,0 +1,78 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: d175c6e32ecb + +from __future__ import annotations +from .classifierfinetuningjobdetails import ( + ClassifierFineTuningJobDetails, + ClassifierFineTuningJobDetailsTypedDict, +) +from .completionfinetuningjobdetails import ( + CompletionFineTuningJobDetails, + CompletionFineTuningJobDetailsTypedDict, +) +from functools import partial +from mistralai.client.types import BaseModel +from mistralai.client.utils import FieldMetadata, PathParamMetadata +from mistralai.client.utils.unions import parse_open_union +from pydantic import ConfigDict +from pydantic.functional_validators import BeforeValidator +from typing import Any, Literal, Union +from typing_extensions import Annotated, TypeAliasType, TypedDict + + +class JobsAPIRoutesFineTuningCancelFineTuningJobRequestTypedDict(TypedDict): + job_id: str + r"""The ID of the job to cancel.""" + + +class JobsAPIRoutesFineTuningCancelFineTuningJobRequest(BaseModel): + job_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] + r"""The ID of the job to cancel.""" + + +JobsAPIRoutesFineTuningCancelFineTuningJobResponseTypedDict = TypeAliasType( + "JobsAPIRoutesFineTuningCancelFineTuningJobResponseTypedDict", + Union[ + CompletionFineTuningJobDetailsTypedDict, ClassifierFineTuningJobDetailsTypedDict + ], +) +r"""OK""" + + +class UnknownJobsAPIRoutesFineTuningCancelFineTuningJobResponse(BaseModel): + r"""A JobsAPIRoutesFineTuningCancelFineTuningJobResponse variant the SDK doesn't recognize. Preserves the raw payload.""" + + job_type: Literal["UNKNOWN"] = "UNKNOWN" + raw: Any + is_unknown: Literal[True] = True + + model_config = ConfigDict(frozen=True) + + +_JOBS_API_ROUTES_FINE_TUNING_CANCEL_FINE_TUNING_JOB_RESPONSE_VARIANTS: dict[ + str, Any +] = { + "classifier": ClassifierFineTuningJobDetails, + "completion": CompletionFineTuningJobDetails, +} + + +JobsAPIRoutesFineTuningCancelFineTuningJobResponse = Annotated[ + Union[ + ClassifierFineTuningJobDetails, + CompletionFineTuningJobDetails, + UnknownJobsAPIRoutesFineTuningCancelFineTuningJobResponse, + ], + BeforeValidator( + partial( + parse_open_union, + disc_key="job_type", + variants=_JOBS_API_ROUTES_FINE_TUNING_CANCEL_FINE_TUNING_JOB_RESPONSE_VARIANTS, + unknown_cls=UnknownJobsAPIRoutesFineTuningCancelFineTuningJobResponse, + union_name="JobsAPIRoutesFineTuningCancelFineTuningJobResponse", + ) + ), +] +r"""OK""" diff --git a/src/mistralai/client/models/jobs_api_routes_fine_tuning_create_fine_tuning_jobop.py b/src/mistralai/client/models/jobs_api_routes_fine_tuning_create_fine_tuning_jobop.py new file mode 100644 index 00000000..db857f7d --- /dev/null +++ b/src/mistralai/client/models/jobs_api_routes_fine_tuning_create_fine_tuning_jobop.py @@ -0,0 +1,70 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 81651291187a + +from __future__ import annotations +from .classifierfinetuningjob import ( + ClassifierFineTuningJob, + ClassifierFineTuningJobTypedDict, +) +from .completionfinetuningjob import ( + CompletionFineTuningJob, + CompletionFineTuningJobTypedDict, +) +from .legacyjobmetadata import LegacyJobMetadata, LegacyJobMetadataTypedDict +from functools import partial +from mistralai.client.types import BaseModel +from mistralai.client.utils.unions import parse_open_union +from pydantic import ConfigDict +from pydantic.functional_validators import BeforeValidator +from typing import Any, Literal, Union +from typing_extensions import Annotated, TypeAliasType + + +ResponseTypedDict = TypeAliasType( + "ResponseTypedDict", + Union[ClassifierFineTuningJobTypedDict, CompletionFineTuningJobTypedDict], +) + + +class UnknownResponse(BaseModel): + r"""A Response variant the SDK doesn't recognize. Preserves the raw payload.""" + + job_type: Literal["UNKNOWN"] = "UNKNOWN" + raw: Any + is_unknown: Literal[True] = True + + model_config = ConfigDict(frozen=True) + + +_RESPONSE_VARIANTS: dict[str, Any] = { + "classifier": ClassifierFineTuningJob, + "completion": CompletionFineTuningJob, +} + + +Response = Annotated[ + Union[ClassifierFineTuningJob, CompletionFineTuningJob, UnknownResponse], + BeforeValidator( + partial( + parse_open_union, + disc_key="job_type", + variants=_RESPONSE_VARIANTS, + unknown_cls=UnknownResponse, + union_name="Response", + ) + ), +] + + +JobsAPIRoutesFineTuningCreateFineTuningJobResponseTypedDict = TypeAliasType( + "JobsAPIRoutesFineTuningCreateFineTuningJobResponseTypedDict", + Union[LegacyJobMetadataTypedDict, ResponseTypedDict], +) +r"""OK""" + + +JobsAPIRoutesFineTuningCreateFineTuningJobResponse = TypeAliasType( + "JobsAPIRoutesFineTuningCreateFineTuningJobResponse", + Union[LegacyJobMetadata, Response], +) +r"""OK""" diff --git a/src/mistralai/client/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobop.py b/src/mistralai/client/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobop.py new file mode 100644 index 00000000..ddd9c189 --- /dev/null +++ b/src/mistralai/client/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobop.py @@ -0,0 +1,76 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: d910fd8fe2d6 + +from __future__ import annotations +from .classifierfinetuningjobdetails import ( + ClassifierFineTuningJobDetails, + ClassifierFineTuningJobDetailsTypedDict, +) +from .completionfinetuningjobdetails import ( + CompletionFineTuningJobDetails, + CompletionFineTuningJobDetailsTypedDict, +) +from functools import partial +from mistralai.client.types import BaseModel +from mistralai.client.utils import FieldMetadata, PathParamMetadata +from mistralai.client.utils.unions import parse_open_union +from pydantic import ConfigDict +from pydantic.functional_validators import BeforeValidator +from typing import Any, Literal, Union +from typing_extensions import Annotated, TypeAliasType, TypedDict + + +class JobsAPIRoutesFineTuningGetFineTuningJobRequestTypedDict(TypedDict): + job_id: str + r"""The ID of the job to analyse.""" + + +class JobsAPIRoutesFineTuningGetFineTuningJobRequest(BaseModel): + job_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] + r"""The ID of the job to analyse.""" + + +JobsAPIRoutesFineTuningGetFineTuningJobResponseTypedDict = TypeAliasType( + "JobsAPIRoutesFineTuningGetFineTuningJobResponseTypedDict", + Union[ + CompletionFineTuningJobDetailsTypedDict, ClassifierFineTuningJobDetailsTypedDict + ], +) +r"""OK""" + + +class UnknownJobsAPIRoutesFineTuningGetFineTuningJobResponse(BaseModel): + r"""A JobsAPIRoutesFineTuningGetFineTuningJobResponse variant the SDK doesn't recognize. Preserves the raw payload.""" + + job_type: Literal["UNKNOWN"] = "UNKNOWN" + raw: Any + is_unknown: Literal[True] = True + + model_config = ConfigDict(frozen=True) + + +_JOBS_API_ROUTES_FINE_TUNING_GET_FINE_TUNING_JOB_RESPONSE_VARIANTS: dict[str, Any] = { + "classifier": ClassifierFineTuningJobDetails, + "completion": CompletionFineTuningJobDetails, +} + + +JobsAPIRoutesFineTuningGetFineTuningJobResponse = Annotated[ + Union[ + ClassifierFineTuningJobDetails, + CompletionFineTuningJobDetails, + UnknownJobsAPIRoutesFineTuningGetFineTuningJobResponse, + ], + BeforeValidator( + partial( + parse_open_union, + disc_key="job_type", + variants=_JOBS_API_ROUTES_FINE_TUNING_GET_FINE_TUNING_JOB_RESPONSE_VARIANTS, + unknown_cls=UnknownJobsAPIRoutesFineTuningGetFineTuningJobResponse, + union_name="JobsAPIRoutesFineTuningGetFineTuningJobResponse", + ) + ), +] +r"""OK""" diff --git a/src/mistralai/client/models/listfinetuningjobsop.py b/src/mistralai/client/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobsop.py similarity index 75% rename from src/mistralai/client/models/listfinetuningjobsop.py rename to src/mistralai/client/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobsop.py index 8712c3fa..ec80a158 100644 --- a/src/mistralai/client/models/listfinetuningjobsop.py +++ b/src/mistralai/client/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobsop.py @@ -1,5 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -# @generated-id: b77fe203b929 +# @generated-id: cf43028824bf from __future__ import annotations from datetime import datetime @@ -16,7 +16,7 @@ from typing_extensions import Annotated, NotRequired, TypedDict -ListFineTuningJobsStatus = Literal[ +JobsAPIRoutesFineTuningGetFineTuningJobsStatus = Literal[ "QUEUED", "STARTED", "VALIDATING", @@ -31,7 +31,7 @@ r"""The current job state to filter on. When set, the other results are not displayed.""" -class ListFineTuningJobsRequestTypedDict(TypedDict): +class JobsAPIRoutesFineTuningGetFineTuningJobsRequestTypedDict(TypedDict): page: NotRequired[int] r"""The page number of the results to be returned.""" page_size: NotRequired[int] @@ -43,7 +43,7 @@ class ListFineTuningJobsRequestTypedDict(TypedDict): created_before: NotRequired[Nullable[datetime]] created_by_me: NotRequired[bool] r"""When set, only return results for jobs created by the API caller. Other results are not displayed.""" - status: NotRequired[Nullable[ListFineTuningJobsStatus]] + status: NotRequired[Nullable[JobsAPIRoutesFineTuningGetFineTuningJobsStatus]] r"""The current job state to filter on. When set, the other results are not displayed.""" wandb_project: NotRequired[Nullable[str]] r"""The Weights and Biases project to filter on. When set, the other results are not displayed.""" @@ -53,7 +53,7 @@ class ListFineTuningJobsRequestTypedDict(TypedDict): r"""The model suffix to filter on. When set, the other results are not displayed.""" -class ListFineTuningJobsRequest(BaseModel): +class JobsAPIRoutesFineTuningGetFineTuningJobsRequest(BaseModel): page: Annotated[ Optional[int], FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), @@ -90,7 +90,7 @@ class ListFineTuningJobsRequest(BaseModel): r"""When set, only return results for jobs created by the API caller. Other results are not displayed.""" status: Annotated[ - OptionalNullable[ListFineTuningJobsStatus], + OptionalNullable[JobsAPIRoutesFineTuningGetFineTuningJobsStatus], FieldMetadata(query=QueryParamMetadata(style="form", explode=True)), ] = UNSET r"""The current job state to filter on. When set, the other results are not displayed.""" @@ -115,49 +115,48 @@ class ListFineTuningJobsRequest(BaseModel): @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = [ - "page", - "page_size", - "model", - "created_after", - "created_before", - "created_by_me", - "status", - "wandb_project", - "wandb_name", - "suffix", - ] - nullable_fields = [ - "model", - "created_after", - "created_before", - "status", - "wandb_project", - "wandb_name", - "suffix", - ] - null_default_fields = [] - + optional_fields = set( + [ + "page", + "page_size", + "model", + "created_after", + "created_before", + "created_by_me", + "status", + "wandb_project", + "wandb_name", + "suffix", + ] + ) + nullable_fields = set( + [ + "model", + "created_after", + "created_before", + "status", + "wandb_project", + "wandb_name", + "suffix", + ] + ) serialized = handler(self) - m = {} for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val return m diff --git a/src/mistralai/client/models/jobs_api_routes_fine_tuning_start_fine_tuning_jobop.py b/src/mistralai/client/models/jobs_api_routes_fine_tuning_start_fine_tuning_jobop.py new file mode 100644 index 00000000..cd25fa04 --- /dev/null +++ b/src/mistralai/client/models/jobs_api_routes_fine_tuning_start_fine_tuning_jobop.py @@ -0,0 +1,74 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: e7ff4a4a4edb + +from __future__ import annotations +from .classifierfinetuningjobdetails import ( + ClassifierFineTuningJobDetails, + ClassifierFineTuningJobDetailsTypedDict, +) +from .completionfinetuningjobdetails import ( + CompletionFineTuningJobDetails, + CompletionFineTuningJobDetailsTypedDict, +) +from functools import partial +from mistralai.client.types import BaseModel +from mistralai.client.utils import FieldMetadata, PathParamMetadata +from mistralai.client.utils.unions import parse_open_union +from pydantic import ConfigDict +from pydantic.functional_validators import BeforeValidator +from typing import Any, Literal, Union +from typing_extensions import Annotated, TypeAliasType, TypedDict + + +class JobsAPIRoutesFineTuningStartFineTuningJobRequestTypedDict(TypedDict): + job_id: str + + +class JobsAPIRoutesFineTuningStartFineTuningJobRequest(BaseModel): + job_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] + + +JobsAPIRoutesFineTuningStartFineTuningJobResponseTypedDict = TypeAliasType( + "JobsAPIRoutesFineTuningStartFineTuningJobResponseTypedDict", + Union[ + CompletionFineTuningJobDetailsTypedDict, ClassifierFineTuningJobDetailsTypedDict + ], +) +r"""OK""" + + +class UnknownJobsAPIRoutesFineTuningStartFineTuningJobResponse(BaseModel): + r"""A JobsAPIRoutesFineTuningStartFineTuningJobResponse variant the SDK doesn't recognize. Preserves the raw payload.""" + + job_type: Literal["UNKNOWN"] = "UNKNOWN" + raw: Any + is_unknown: Literal[True] = True + + model_config = ConfigDict(frozen=True) + + +_JOBS_API_ROUTES_FINE_TUNING_START_FINE_TUNING_JOB_RESPONSE_VARIANTS: dict[str, Any] = { + "classifier": ClassifierFineTuningJobDetails, + "completion": CompletionFineTuningJobDetails, +} + + +JobsAPIRoutesFineTuningStartFineTuningJobResponse = Annotated[ + Union[ + ClassifierFineTuningJobDetails, + CompletionFineTuningJobDetails, + UnknownJobsAPIRoutesFineTuningStartFineTuningJobResponse, + ], + BeforeValidator( + partial( + parse_open_union, + disc_key="job_type", + variants=_JOBS_API_ROUTES_FINE_TUNING_START_FINE_TUNING_JOB_RESPONSE_VARIANTS, + unknown_cls=UnknownJobsAPIRoutesFineTuningStartFineTuningJobResponse, + union_name="JobsAPIRoutesFineTuningStartFineTuningJobResponse", + ) + ), +] +r"""OK""" diff --git a/src/mistralai/client/models/unarchivemodelop.py b/src/mistralai/client/models/jobs_api_routes_fine_tuning_unarchive_fine_tuned_modelop.py similarity index 73% rename from src/mistralai/client/models/unarchivemodelop.py rename to src/mistralai/client/models/jobs_api_routes_fine_tuning_unarchive_fine_tuned_modelop.py index 1d68a06a..fd01fe69 100644 --- a/src/mistralai/client/models/unarchivemodelop.py +++ b/src/mistralai/client/models/jobs_api_routes_fine_tuning_unarchive_fine_tuned_modelop.py @@ -1,5 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -# @generated-id: eb18584fd78c +# @generated-id: 7cc1c80335a9 from __future__ import annotations from mistralai.client.types import BaseModel @@ -7,12 +7,12 @@ from typing_extensions import Annotated, TypedDict -class UnarchiveModelRequestTypedDict(TypedDict): +class JobsAPIRoutesFineTuningUnarchiveFineTunedModelRequestTypedDict(TypedDict): model_id: str r"""The ID of the model to unarchive.""" -class UnarchiveModelRequest(BaseModel): +class JobsAPIRoutesFineTuningUnarchiveFineTunedModelRequest(BaseModel): model_id: Annotated[ str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) ] diff --git a/src/mistralai/client/models/jobs_api_routes_fine_tuning_update_fine_tuned_modelop.py b/src/mistralai/client/models/jobs_api_routes_fine_tuning_update_fine_tuned_modelop.py new file mode 100644 index 00000000..296070b4 --- /dev/null +++ b/src/mistralai/client/models/jobs_api_routes_fine_tuning_update_fine_tuned_modelop.py @@ -0,0 +1,83 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 6d9dc624aafd + +from __future__ import annotations +from .classifierfinetunedmodel import ( + ClassifierFineTunedModel, + ClassifierFineTunedModelTypedDict, +) +from .completionfinetunedmodel import ( + CompletionFineTunedModel, + CompletionFineTunedModelTypedDict, +) +from .updatemodelrequest import UpdateModelRequest, UpdateModelRequestTypedDict +from functools import partial +from mistralai.client.types import BaseModel +from mistralai.client.utils import FieldMetadata, PathParamMetadata, RequestMetadata +from mistralai.client.utils.unions import parse_open_union +from pydantic import ConfigDict +from pydantic.functional_validators import BeforeValidator +from typing import Any, Literal, Union +from typing_extensions import Annotated, TypeAliasType, TypedDict + + +class JobsAPIRoutesFineTuningUpdateFineTunedModelRequestTypedDict(TypedDict): + model_id: str + r"""The ID of the model to update.""" + update_model_request: UpdateModelRequestTypedDict + + +class JobsAPIRoutesFineTuningUpdateFineTunedModelRequest(BaseModel): + model_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] + r"""The ID of the model to update.""" + + update_model_request: Annotated[ + UpdateModelRequest, + FieldMetadata(request=RequestMetadata(media_type="application/json")), + ] + + +JobsAPIRoutesFineTuningUpdateFineTunedModelResponseTypedDict = TypeAliasType( + "JobsAPIRoutesFineTuningUpdateFineTunedModelResponseTypedDict", + Union[CompletionFineTunedModelTypedDict, ClassifierFineTunedModelTypedDict], +) +r"""OK""" + + +class UnknownJobsAPIRoutesFineTuningUpdateFineTunedModelResponse(BaseModel): + r"""A JobsAPIRoutesFineTuningUpdateFineTunedModelResponse variant the SDK doesn't recognize. Preserves the raw payload.""" + + model_type: Literal["UNKNOWN"] = "UNKNOWN" + raw: Any + is_unknown: Literal[True] = True + + model_config = ConfigDict(frozen=True) + + +_JOBS_API_ROUTES_FINE_TUNING_UPDATE_FINE_TUNED_MODEL_RESPONSE_VARIANTS: dict[ + str, Any +] = { + "classifier": ClassifierFineTunedModel, + "completion": CompletionFineTunedModel, +} + + +JobsAPIRoutesFineTuningUpdateFineTunedModelResponse = Annotated[ + Union[ + ClassifierFineTunedModel, + CompletionFineTunedModel, + UnknownJobsAPIRoutesFineTuningUpdateFineTunedModelResponse, + ], + BeforeValidator( + partial( + parse_open_union, + disc_key="model_type", + variants=_JOBS_API_ROUTES_FINE_TUNING_UPDATE_FINE_TUNED_MODEL_RESPONSE_VARIANTS, + unknown_cls=UnknownJobsAPIRoutesFineTuningUpdateFineTunedModelResponse, + union_name="JobsAPIRoutesFineTuningUpdateFineTunedModelResponse", + ) + ), +] +r"""OK""" diff --git a/src/mistralai/client/models/jobsout.py b/src/mistralai/client/models/jobsout.py deleted file mode 100644 index a4127a5d..00000000 --- a/src/mistralai/client/models/jobsout.py +++ /dev/null @@ -1,40 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -# @generated-id: 22e91e9631a9 - -from __future__ import annotations -from .classifierjobout import ClassifierJobOut, ClassifierJobOutTypedDict -from .completionjobout import CompletionJobOut, CompletionJobOutTypedDict -from mistralai.client.types import BaseModel -from mistralai.client.utils import validate_const -import pydantic -from pydantic import Field -from pydantic.functional_validators import AfterValidator -from typing import List, Literal, Optional, Union -from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict - - -JobsOutDataTypedDict = TypeAliasType( - "JobsOutDataTypedDict", Union[ClassifierJobOutTypedDict, CompletionJobOutTypedDict] -) - - -JobsOutData = Annotated[ - Union[ClassifierJobOut, CompletionJobOut], Field(discriminator="JOB_TYPE") -] - - -class JobsOutTypedDict(TypedDict): - total: int - data: NotRequired[List[JobsOutDataTypedDict]] - object: Literal["list"] - - -class JobsOut(BaseModel): - total: int - - data: Optional[List[JobsOutData]] = None - - OBJECT: Annotated[ - Annotated[Optional[Literal["list"]], AfterValidator(validate_const("list"))], - pydantic.Field(alias="object"), - ] = "list" diff --git a/src/mistralai/client/models/jsonschema.py b/src/mistralai/client/models/jsonschema.py index 948c94ed..dfababa6 100644 --- a/src/mistralai/client/models/jsonschema.py +++ b/src/mistralai/client/models/jsonschema.py @@ -33,30 +33,31 @@ class JSONSchema(BaseModel): @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = ["description", "strict"] - nullable_fields = ["description"] - null_default_fields = [] - + optional_fields = set(["description", "strict"]) + nullable_fields = set(["description"]) serialized = handler(self) - m = {} for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) - serialized.pop(k, None) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member + return m - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - return m +try: + JSONSchema.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/legacyjobmetadataout.py b/src/mistralai/client/models/legacyjobmetadata.py similarity index 70% rename from src/mistralai/client/models/legacyjobmetadataout.py rename to src/mistralai/client/models/legacyjobmetadata.py index 4453c157..57576758 100644 --- a/src/mistralai/client/models/legacyjobmetadataout.py +++ b/src/mistralai/client/models/legacyjobmetadata.py @@ -1,5 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -# @generated-id: 4f44aa38c864 +# @generated-id: 0330b8930f65 from __future__ import annotations from mistralai.client.types import ( @@ -17,7 +17,7 @@ from typing_extensions import Annotated, NotRequired, TypedDict -class LegacyJobMetadataOutTypedDict(TypedDict): +class LegacyJobMetadataTypedDict(TypedDict): details: str expected_duration_seconds: NotRequired[Nullable[int]] r"""The approximated time (in seconds) for the fine-tuning process to complete.""" @@ -40,7 +40,7 @@ class LegacyJobMetadataOutTypedDict(TypedDict): object: Literal["job.metadata"] -class LegacyJobMetadataOut(BaseModel): +class LegacyJobMetadata(BaseModel): details: str expected_duration_seconds: OptionalNullable[int] = UNSET @@ -71,7 +71,7 @@ class LegacyJobMetadataOut(BaseModel): training_steps: OptionalNullable[int] = UNSET r"""The number of training steps to perform. A training step refers to a single update of the model weights during the fine-tuning process. This update is typically calculated using a batch of samples from the training dataset.""" - OBJECT: Annotated[ + object: Annotated[ Annotated[ Optional[Literal["job.metadata"]], AfterValidator(validate_const("job.metadata")), @@ -81,52 +81,57 @@ class LegacyJobMetadataOut(BaseModel): @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = [ - "expected_duration_seconds", - "cost", - "cost_currency", - "train_tokens_per_step", - "train_tokens", - "data_tokens", - "estimated_start_time", - "deprecated", - "epochs", - "training_steps", - "object", - ] - nullable_fields = [ - "expected_duration_seconds", - "cost", - "cost_currency", - "train_tokens_per_step", - "train_tokens", - "data_tokens", - "estimated_start_time", - "epochs", - "training_steps", - ] - null_default_fields = [] - + optional_fields = set( + [ + "expected_duration_seconds", + "cost", + "cost_currency", + "train_tokens_per_step", + "train_tokens", + "data_tokens", + "estimated_start_time", + "deprecated", + "epochs", + "training_steps", + "object", + ] + ) + nullable_fields = set( + [ + "expected_duration_seconds", + "cost", + "cost_currency", + "train_tokens_per_step", + "train_tokens", + "data_tokens", + "estimated_start_time", + "epochs", + "training_steps", + ] + ) serialized = handler(self) - m = {} for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) - serialized.pop(k, None) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member + return m - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - return m +try: + LegacyJobMetadata.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/deletelibraryop.py b/src/mistralai/client/models/libraries_delete_v1op.py similarity index 76% rename from src/mistralai/client/models/deletelibraryop.py rename to src/mistralai/client/models/libraries_delete_v1op.py index 5eb6fc31..893ab53b 100644 --- a/src/mistralai/client/models/deletelibraryop.py +++ b/src/mistralai/client/models/libraries_delete_v1op.py @@ -1,5 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -# @generated-id: cd0ce9bf8d51 +# @generated-id: b2e8bbd19baa from __future__ import annotations from mistralai.client.types import BaseModel @@ -7,11 +7,11 @@ from typing_extensions import Annotated, TypedDict -class DeleteLibraryRequestTypedDict(TypedDict): +class LibrariesDeleteV1RequestTypedDict(TypedDict): library_id: str -class DeleteLibraryRequest(BaseModel): +class LibrariesDeleteV1Request(BaseModel): library_id: Annotated[ str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) ] diff --git a/src/mistralai/client/models/deletedocumentop.py b/src/mistralai/client/models/libraries_documents_delete_v1op.py similarity index 79% rename from src/mistralai/client/models/deletedocumentop.py rename to src/mistralai/client/models/libraries_documents_delete_v1op.py index 400070a4..0495832e 100644 --- a/src/mistralai/client/models/deletedocumentop.py +++ b/src/mistralai/client/models/libraries_documents_delete_v1op.py @@ -1,5 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -# @generated-id: 62522db1ccf2 +# @generated-id: 81eb34382a3d from __future__ import annotations from mistralai.client.types import BaseModel @@ -7,12 +7,12 @@ from typing_extensions import Annotated, TypedDict -class DeleteDocumentRequestTypedDict(TypedDict): +class LibrariesDocumentsDeleteV1RequestTypedDict(TypedDict): library_id: str document_id: str -class DeleteDocumentRequest(BaseModel): +class LibrariesDocumentsDeleteV1Request(BaseModel): library_id: Annotated[ str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) ] diff --git a/src/mistralai/client/models/libraries_documents_get_extracted_text_signed_url_v1op.py b/src/mistralai/client/models/libraries_documents_get_extracted_text_signed_url_v1op.py new file mode 100644 index 00000000..186baaed --- /dev/null +++ b/src/mistralai/client/models/libraries_documents_get_extracted_text_signed_url_v1op.py @@ -0,0 +1,22 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: a7417ebd6040 + +from __future__ import annotations +from mistralai.client.types import BaseModel +from mistralai.client.utils import FieldMetadata, PathParamMetadata +from typing_extensions import Annotated, TypedDict + + +class LibrariesDocumentsGetExtractedTextSignedURLV1RequestTypedDict(TypedDict): + library_id: str + document_id: str + + +class LibrariesDocumentsGetExtractedTextSignedURLV1Request(BaseModel): + library_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] + + document_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] diff --git a/src/mistralai/client/models/libraries_documents_get_signed_url_v1op.py b/src/mistralai/client/models/libraries_documents_get_signed_url_v1op.py new file mode 100644 index 00000000..ebcf85d7 --- /dev/null +++ b/src/mistralai/client/models/libraries_documents_get_signed_url_v1op.py @@ -0,0 +1,22 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: d4b7b47913ba + +from __future__ import annotations +from mistralai.client.types import BaseModel +from mistralai.client.utils import FieldMetadata, PathParamMetadata +from typing_extensions import Annotated, TypedDict + + +class LibrariesDocumentsGetSignedURLV1RequestTypedDict(TypedDict): + library_id: str + document_id: str + + +class LibrariesDocumentsGetSignedURLV1Request(BaseModel): + library_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] + + document_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] diff --git a/src/mistralai/client/models/getdocumentop.py b/src/mistralai/client/models/libraries_documents_get_status_v1op.py similarity index 78% rename from src/mistralai/client/models/getdocumentop.py rename to src/mistralai/client/models/libraries_documents_get_status_v1op.py index d7b07db7..1f484787 100644 --- a/src/mistralai/client/models/getdocumentop.py +++ b/src/mistralai/client/models/libraries_documents_get_status_v1op.py @@ -1,5 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -# @generated-id: de89ff93d373 +# @generated-id: f314f73e909c from __future__ import annotations from mistralai.client.types import BaseModel @@ -7,12 +7,12 @@ from typing_extensions import Annotated, TypedDict -class GetDocumentRequestTypedDict(TypedDict): +class LibrariesDocumentsGetStatusV1RequestTypedDict(TypedDict): library_id: str document_id: str -class GetDocumentRequest(BaseModel): +class LibrariesDocumentsGetStatusV1Request(BaseModel): library_id: Annotated[ str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) ] diff --git a/src/mistralai/client/models/getdocumentextractedtextsignedurlop.py b/src/mistralai/client/models/libraries_documents_get_text_content_v1op.py similarity index 77% rename from src/mistralai/client/models/getdocumentextractedtextsignedurlop.py rename to src/mistralai/client/models/libraries_documents_get_text_content_v1op.py index 9a71181d..e0508d66 100644 --- a/src/mistralai/client/models/getdocumentextractedtextsignedurlop.py +++ b/src/mistralai/client/models/libraries_documents_get_text_content_v1op.py @@ -1,5 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -# @generated-id: 69099395d631 +# @generated-id: 1ca4e0c41321 from __future__ import annotations from mistralai.client.types import BaseModel @@ -7,12 +7,12 @@ from typing_extensions import Annotated, TypedDict -class GetDocumentExtractedTextSignedURLRequestTypedDict(TypedDict): +class LibrariesDocumentsGetTextContentV1RequestTypedDict(TypedDict): library_id: str document_id: str -class GetDocumentExtractedTextSignedURLRequest(BaseModel): +class LibrariesDocumentsGetTextContentV1Request(BaseModel): library_id: Annotated[ str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) ] diff --git a/src/mistralai/client/models/getdocumentstatusop.py b/src/mistralai/client/models/libraries_documents_get_v1op.py similarity index 80% rename from src/mistralai/client/models/getdocumentstatusop.py rename to src/mistralai/client/models/libraries_documents_get_v1op.py index 4206f593..857dfbe6 100644 --- a/src/mistralai/client/models/getdocumentstatusop.py +++ b/src/mistralai/client/models/libraries_documents_get_v1op.py @@ -1,5 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -# @generated-id: f1f40b8f003f +# @generated-id: 26ff35f0c69d from __future__ import annotations from mistralai.client.types import BaseModel @@ -7,12 +7,12 @@ from typing_extensions import Annotated, TypedDict -class GetDocumentStatusRequestTypedDict(TypedDict): +class LibrariesDocumentsGetV1RequestTypedDict(TypedDict): library_id: str document_id: str -class GetDocumentStatusRequest(BaseModel): +class LibrariesDocumentsGetV1Request(BaseModel): library_id: Annotated[ str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) ] diff --git a/src/mistralai/client/models/listdocumentsop.py b/src/mistralai/client/models/libraries_documents_list_v1op.py similarity index 67% rename from src/mistralai/client/models/listdocumentsop.py rename to src/mistralai/client/models/libraries_documents_list_v1op.py index 0f7c4584..da7d793b 100644 --- a/src/mistralai/client/models/listdocumentsop.py +++ b/src/mistralai/client/models/libraries_documents_list_v1op.py @@ -1,5 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -# @generated-id: 3e42bdc15383 +# @generated-id: 756f26de3cbe from __future__ import annotations from mistralai.client.types import ( @@ -15,7 +15,7 @@ from typing_extensions import Annotated, NotRequired, TypedDict -class ListDocumentsRequestTypedDict(TypedDict): +class LibrariesDocumentsListV1RequestTypedDict(TypedDict): library_id: str search: NotRequired[Nullable[str]] page_size: NotRequired[int] @@ -25,7 +25,7 @@ class ListDocumentsRequestTypedDict(TypedDict): sort_order: NotRequired[str] -class ListDocumentsRequest(BaseModel): +class LibrariesDocumentsListV1Request(BaseModel): library_id: Annotated[ str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) ] @@ -62,37 +62,34 @@ class ListDocumentsRequest(BaseModel): @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = [ - "search", - "page_size", - "page", - "filters_attributes", - "sort_by", - "sort_order", - ] - nullable_fields = ["search", "filters_attributes"] - null_default_fields = [] - + optional_fields = set( + [ + "search", + "page_size", + "page", + "filters_attributes", + "sort_by", + "sort_order", + ] + ) + nullable_fields = set(["search", "filters_attributes"]) serialized = handler(self) - m = {} for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val return m diff --git a/src/mistralai/client/models/getdocumentsignedurlop.py b/src/mistralai/client/models/libraries_documents_reprocess_v1op.py similarity index 78% rename from src/mistralai/client/models/getdocumentsignedurlop.py rename to src/mistralai/client/models/libraries_documents_reprocess_v1op.py index e5d56c54..a2f9ba2a 100644 --- a/src/mistralai/client/models/getdocumentsignedurlop.py +++ b/src/mistralai/client/models/libraries_documents_reprocess_v1op.py @@ -1,5 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -# @generated-id: b8d95511c6d1 +# @generated-id: dbbeb02fc336 from __future__ import annotations from mistralai.client.types import BaseModel @@ -7,12 +7,12 @@ from typing_extensions import Annotated, TypedDict -class GetDocumentSignedURLRequestTypedDict(TypedDict): +class LibrariesDocumentsReprocessV1RequestTypedDict(TypedDict): library_id: str document_id: str -class GetDocumentSignedURLRequest(BaseModel): +class LibrariesDocumentsReprocessV1Request(BaseModel): library_id: Annotated[ str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) ] diff --git a/src/mistralai/client/models/updatedocumentop.py b/src/mistralai/client/models/libraries_documents_update_v1op.py similarity index 64% rename from src/mistralai/client/models/updatedocumentop.py rename to src/mistralai/client/models/libraries_documents_update_v1op.py index 073f22a9..7ad4231f 100644 --- a/src/mistralai/client/models/updatedocumentop.py +++ b/src/mistralai/client/models/libraries_documents_update_v1op.py @@ -1,20 +1,20 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -# @generated-id: eee9ef317180 +# @generated-id: 734ba6c19f5f from __future__ import annotations -from .documentupdatein import DocumentUpdateIn, DocumentUpdateInTypedDict +from .updatedocumentrequest import UpdateDocumentRequest, UpdateDocumentRequestTypedDict from mistralai.client.types import BaseModel from mistralai.client.utils import FieldMetadata, PathParamMetadata, RequestMetadata from typing_extensions import Annotated, TypedDict -class UpdateDocumentRequestTypedDict(TypedDict): +class LibrariesDocumentsUpdateV1RequestTypedDict(TypedDict): library_id: str document_id: str - document_update_in: DocumentUpdateInTypedDict + update_document_request: UpdateDocumentRequestTypedDict -class UpdateDocumentRequest(BaseModel): +class LibrariesDocumentsUpdateV1Request(BaseModel): library_id: Annotated[ str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) ] @@ -23,7 +23,7 @@ class UpdateDocumentRequest(BaseModel): str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) ] - document_update_in: Annotated[ - DocumentUpdateIn, + update_document_request: Annotated[ + UpdateDocumentRequest, FieldMetadata(request=RequestMetadata(media_type="application/json")), ] diff --git a/src/mistralai/client/models/uploaddocumentop.py b/src/mistralai/client/models/libraries_documents_upload_v1op.py similarity index 91% rename from src/mistralai/client/models/uploaddocumentop.py rename to src/mistralai/client/models/libraries_documents_upload_v1op.py index 2c957947..388633d1 100644 --- a/src/mistralai/client/models/uploaddocumentop.py +++ b/src/mistralai/client/models/libraries_documents_upload_v1op.py @@ -1,5 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -# @generated-id: 0018fe7ff48c +# @generated-id: 744466971862 from __future__ import annotations from .file import File, FileTypedDict @@ -41,12 +41,12 @@ class DocumentUpload(BaseModel): """ -class UploadDocumentRequestTypedDict(TypedDict): +class LibrariesDocumentsUploadV1RequestTypedDict(TypedDict): library_id: str request_body: DocumentUploadTypedDict -class UploadDocumentRequest(BaseModel): +class LibrariesDocumentsUploadV1Request(BaseModel): library_id: Annotated[ str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) ] diff --git a/src/mistralai/client/models/getlibraryop.py b/src/mistralai/client/models/libraries_get_v1op.py similarity index 77% rename from src/mistralai/client/models/getlibraryop.py rename to src/mistralai/client/models/libraries_get_v1op.py index bc0b4a23..7a51d605 100644 --- a/src/mistralai/client/models/getlibraryop.py +++ b/src/mistralai/client/models/libraries_get_v1op.py @@ -1,5 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -# @generated-id: c84a92e23a90 +# @generated-id: d493f39e7ebb from __future__ import annotations from mistralai.client.types import BaseModel @@ -7,11 +7,11 @@ from typing_extensions import Annotated, TypedDict -class GetLibraryRequestTypedDict(TypedDict): +class LibrariesGetV1RequestTypedDict(TypedDict): library_id: str -class GetLibraryRequest(BaseModel): +class LibrariesGetV1Request(BaseModel): library_id: Annotated[ str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) ] diff --git a/src/mistralai/client/models/updateorcreatelibraryaccessop.py b/src/mistralai/client/models/libraries_share_create_v1op.py similarity index 81% rename from src/mistralai/client/models/updateorcreatelibraryaccessop.py rename to src/mistralai/client/models/libraries_share_create_v1op.py index 1abe6eda..00ea7482 100644 --- a/src/mistralai/client/models/updateorcreatelibraryaccessop.py +++ b/src/mistralai/client/models/libraries_share_create_v1op.py @@ -1,5 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -# @generated-id: ec9b15418f5c +# @generated-id: feaacfd46dd3 from __future__ import annotations from .sharingin import SharingIn, SharingInTypedDict @@ -8,12 +8,12 @@ from typing_extensions import Annotated, TypedDict -class UpdateOrCreateLibraryAccessRequestTypedDict(TypedDict): +class LibrariesShareCreateV1RequestTypedDict(TypedDict): library_id: str sharing_in: SharingInTypedDict -class UpdateOrCreateLibraryAccessRequest(BaseModel): +class LibrariesShareCreateV1Request(BaseModel): library_id: Annotated[ str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) ] diff --git a/src/mistralai/client/models/deletelibraryaccessop.py b/src/mistralai/client/models/libraries_share_delete_v1op.py similarity index 83% rename from src/mistralai/client/models/deletelibraryaccessop.py rename to src/mistralai/client/models/libraries_share_delete_v1op.py index ca14c3ff..eca3f86a 100644 --- a/src/mistralai/client/models/deletelibraryaccessop.py +++ b/src/mistralai/client/models/libraries_share_delete_v1op.py @@ -1,5 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -# @generated-id: df80945bcf19 +# @generated-id: 7f3a679ca384 from __future__ import annotations from .sharingdelete import SharingDelete, SharingDeleteTypedDict @@ -8,12 +8,12 @@ from typing_extensions import Annotated, TypedDict -class DeleteLibraryAccessRequestTypedDict(TypedDict): +class LibrariesShareDeleteV1RequestTypedDict(TypedDict): library_id: str sharing_delete: SharingDeleteTypedDict -class DeleteLibraryAccessRequest(BaseModel): +class LibrariesShareDeleteV1Request(BaseModel): library_id: Annotated[ str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) ] diff --git a/src/mistralai/client/models/listlibraryaccessesop.py b/src/mistralai/client/models/libraries_share_list_v1op.py similarity index 75% rename from src/mistralai/client/models/listlibraryaccessesop.py rename to src/mistralai/client/models/libraries_share_list_v1op.py index 2206310f..895a2590 100644 --- a/src/mistralai/client/models/listlibraryaccessesop.py +++ b/src/mistralai/client/models/libraries_share_list_v1op.py @@ -1,5 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -# @generated-id: 581b332626b7 +# @generated-id: 8f0af379bf1c from __future__ import annotations from mistralai.client.types import BaseModel @@ -7,11 +7,11 @@ from typing_extensions import Annotated, TypedDict -class ListLibraryAccessesRequestTypedDict(TypedDict): +class LibrariesShareListV1RequestTypedDict(TypedDict): library_id: str -class ListLibraryAccessesRequest(BaseModel): +class LibrariesShareListV1Request(BaseModel): library_id: Annotated[ str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) ] diff --git a/src/mistralai/client/models/updatelibraryop.py b/src/mistralai/client/models/libraries_update_v1op.py similarity index 60% rename from src/mistralai/client/models/updatelibraryop.py rename to src/mistralai/client/models/libraries_update_v1op.py index c5a1ad30..54b0ab70 100644 --- a/src/mistralai/client/models/updatelibraryop.py +++ b/src/mistralai/client/models/libraries_update_v1op.py @@ -1,24 +1,24 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -# @generated-id: 4ba7acdb62c6 +# @generated-id: 92c8d4132252 from __future__ import annotations -from .libraryinupdate import LibraryInUpdate, LibraryInUpdateTypedDict +from .updatelibraryrequest import UpdateLibraryRequest, UpdateLibraryRequestTypedDict from mistralai.client.types import BaseModel from mistralai.client.utils import FieldMetadata, PathParamMetadata, RequestMetadata from typing_extensions import Annotated, TypedDict -class UpdateLibraryRequestTypedDict(TypedDict): +class LibrariesUpdateV1RequestTypedDict(TypedDict): library_id: str - library_in_update: LibraryInUpdateTypedDict + update_library_request: UpdateLibraryRequestTypedDict -class UpdateLibraryRequest(BaseModel): +class LibrariesUpdateV1Request(BaseModel): library_id: Annotated[ str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) ] - library_in_update: Annotated[ - LibraryInUpdate, + update_library_request: Annotated[ + UpdateLibraryRequest, FieldMetadata(request=RequestMetadata(media_type="application/json")), ] diff --git a/src/mistralai/client/models/libraryout.py b/src/mistralai/client/models/library.py similarity index 58% rename from src/mistralai/client/models/libraryout.py rename to src/mistralai/client/models/library.py index c7ab7b8d..1953b6fb 100644 --- a/src/mistralai/client/models/libraryout.py +++ b/src/mistralai/client/models/library.py @@ -1,5 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -# @generated-id: 4e608c7aafc4 +# @generated-id: 028a34b08f9c from __future__ import annotations from datetime import datetime @@ -14,7 +14,7 @@ from typing_extensions import NotRequired, TypedDict -class LibraryOutTypedDict(TypedDict): +class LibraryTypedDict(TypedDict): id: str name: str created_at: datetime @@ -34,7 +34,7 @@ class LibraryOutTypedDict(TypedDict): r"""Generated Name""" -class LibraryOut(BaseModel): +class Library(BaseModel): id: str name: str @@ -70,48 +70,47 @@ class LibraryOut(BaseModel): @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = [ - "emoji", - "description", - "generated_description", - "explicit_user_members_count", - "explicit_workspace_members_count", - "org_sharing_role", - "generated_name", - ] - nullable_fields = [ - "owner_id", - "chunk_size", - "emoji", - "description", - "generated_description", - "explicit_user_members_count", - "explicit_workspace_members_count", - "org_sharing_role", - "generated_name", - ] - null_default_fields = [] - + optional_fields = set( + [ + "emoji", + "description", + "generated_description", + "explicit_user_members_count", + "explicit_workspace_members_count", + "org_sharing_role", + "generated_name", + ] + ) + nullable_fields = set( + [ + "owner_id", + "chunk_size", + "emoji", + "description", + "generated_description", + "explicit_user_members_count", + "explicit_workspace_members_count", + "org_sharing_role", + "generated_name", + ] + ) serialized = handler(self) - m = {} for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val return m diff --git a/src/mistralai/client/models/libraryinupdate.py b/src/mistralai/client/models/libraryinupdate.py deleted file mode 100644 index 328b2de3..00000000 --- a/src/mistralai/client/models/libraryinupdate.py +++ /dev/null @@ -1,54 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -# @generated-id: 300a6bb02e6e - -from __future__ import annotations -from mistralai.client.types import ( - BaseModel, - Nullable, - OptionalNullable, - UNSET, - UNSET_SENTINEL, -) -from pydantic import model_serializer -from typing_extensions import NotRequired, TypedDict - - -class LibraryInUpdateTypedDict(TypedDict): - name: NotRequired[Nullable[str]] - description: NotRequired[Nullable[str]] - - -class LibraryInUpdate(BaseModel): - name: OptionalNullable[str] = UNSET - - description: OptionalNullable[str] = UNSET - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = ["name", "description"] - nullable_fields = ["name", "description"] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/client/models/listbatchjobsresponse.py b/src/mistralai/client/models/listbatchjobsresponse.py new file mode 100644 index 00000000..35a348a1 --- /dev/null +++ b/src/mistralai/client/models/listbatchjobsresponse.py @@ -0,0 +1,51 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 99d94c86a871 + +from __future__ import annotations +from .batchjob import BatchJob, BatchJobTypedDict +from mistralai.client.types import BaseModel, UNSET_SENTINEL +from mistralai.client.utils import validate_const +import pydantic +from pydantic import model_serializer +from pydantic.functional_validators import AfterValidator +from typing import List, Literal, Optional +from typing_extensions import Annotated, NotRequired, TypedDict + + +class ListBatchJobsResponseTypedDict(TypedDict): + total: int + data: NotRequired[List[BatchJobTypedDict]] + object: Literal["list"] + + +class ListBatchJobsResponse(BaseModel): + total: int + + data: Optional[List[BatchJob]] = None + + object: Annotated[ + Annotated[Optional[Literal["list"]], AfterValidator(validate_const("list"))], + pydantic.Field(alias="object"), + ] = "list" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["data", "object"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m + + +try: + ListBatchJobsResponse.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/listdocumentout.py b/src/mistralai/client/models/listdocumentsresponse.py similarity index 60% rename from src/mistralai/client/models/listdocumentout.py rename to src/mistralai/client/models/listdocumentsresponse.py index a636b3de..c48b8c05 100644 --- a/src/mistralai/client/models/listdocumentout.py +++ b/src/mistralai/client/models/listdocumentsresponse.py @@ -1,20 +1,20 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -# @generated-id: b2c96075ce00 +# @generated-id: f593d8e66833 from __future__ import annotations -from .documentout import DocumentOut, DocumentOutTypedDict +from .document import Document, DocumentTypedDict from .paginationinfo import PaginationInfo, PaginationInfoTypedDict from mistralai.client.types import BaseModel from typing import List from typing_extensions import TypedDict -class ListDocumentOutTypedDict(TypedDict): +class ListDocumentsResponseTypedDict(TypedDict): pagination: PaginationInfoTypedDict - data: List[DocumentOutTypedDict] + data: List[DocumentTypedDict] -class ListDocumentOut(BaseModel): +class ListDocumentsResponse(BaseModel): pagination: PaginationInfo - data: List[DocumentOut] + data: List[Document] diff --git a/src/mistralai/client/models/listfilesout.py b/src/mistralai/client/models/listfilesresponse.py similarity index 53% rename from src/mistralai/client/models/listfilesout.py rename to src/mistralai/client/models/listfilesresponse.py index 460822f7..10a60126 100644 --- a/src/mistralai/client/models/listfilesout.py +++ b/src/mistralai/client/models/listfilesresponse.py @@ -1,5 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -# @generated-id: ae5fa21b141c +# @generated-id: 85d6d24c1a19 from __future__ import annotations from .fileschema import FileSchema, FileSchemaTypedDict @@ -15,13 +15,13 @@ from typing_extensions import NotRequired, TypedDict -class ListFilesOutTypedDict(TypedDict): +class ListFilesResponseTypedDict(TypedDict): data: List[FileSchemaTypedDict] object: str total: NotRequired[Nullable[int]] -class ListFilesOut(BaseModel): +class ListFilesResponse(BaseModel): data: List[FileSchema] object: str @@ -30,30 +30,25 @@ class ListFilesOut(BaseModel): @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = ["total"] - nullable_fields = ["total"] - null_default_fields = [] - + optional_fields = set(["total"]) + nullable_fields = set(["total"]) serialized = handler(self) - m = {} for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val return m diff --git a/src/mistralai/client/models/listfinetuningjobsresponse.py b/src/mistralai/client/models/listfinetuningjobsresponse.py new file mode 100644 index 00000000..1e434c59 --- /dev/null +++ b/src/mistralai/client/models/listfinetuningjobsresponse.py @@ -0,0 +1,100 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 118e05dbfbbd + +from __future__ import annotations +from .classifierfinetuningjob import ( + ClassifierFineTuningJob, + ClassifierFineTuningJobTypedDict, +) +from .completionfinetuningjob import ( + CompletionFineTuningJob, + CompletionFineTuningJobTypedDict, +) +from functools import partial +from mistralai.client.types import BaseModel, UNSET_SENTINEL +from mistralai.client.utils import validate_const +from mistralai.client.utils.unions import parse_open_union +import pydantic +from pydantic import ConfigDict, model_serializer +from pydantic.functional_validators import AfterValidator, BeforeValidator +from typing import Any, List, Literal, Optional, Union +from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict + + +ListFineTuningJobsResponseDataTypedDict = TypeAliasType( + "ListFineTuningJobsResponseDataTypedDict", + Union[ClassifierFineTuningJobTypedDict, CompletionFineTuningJobTypedDict], +) + + +class UnknownListFineTuningJobsResponseData(BaseModel): + r"""A ListFineTuningJobsResponseData variant the SDK doesn't recognize. Preserves the raw payload.""" + + job_type: Literal["UNKNOWN"] = "UNKNOWN" + raw: Any + is_unknown: Literal[True] = True + + model_config = ConfigDict(frozen=True) + + +_LIST_FINE_TUNING_JOBS_RESPONSE_DATA_VARIANTS: dict[str, Any] = { + "classifier": ClassifierFineTuningJob, + "completion": CompletionFineTuningJob, +} + + +ListFineTuningJobsResponseData = Annotated[ + Union[ + ClassifierFineTuningJob, + CompletionFineTuningJob, + UnknownListFineTuningJobsResponseData, + ], + BeforeValidator( + partial( + parse_open_union, + disc_key="job_type", + variants=_LIST_FINE_TUNING_JOBS_RESPONSE_DATA_VARIANTS, + unknown_cls=UnknownListFineTuningJobsResponseData, + union_name="ListFineTuningJobsResponseData", + ) + ), +] + + +class ListFineTuningJobsResponseTypedDict(TypedDict): + total: int + data: NotRequired[List[ListFineTuningJobsResponseDataTypedDict]] + object: Literal["list"] + + +class ListFineTuningJobsResponse(BaseModel): + total: int + + data: Optional[List[ListFineTuningJobsResponseData]] = None + + object: Annotated[ + Annotated[Optional[Literal["list"]], AfterValidator(validate_const("list"))], + pydantic.Field(alias="object"), + ] = "list" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["data", "object"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m + + +try: + ListFineTuningJobsResponse.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/listlibrariesresponse.py b/src/mistralai/client/models/listlibrariesresponse.py new file mode 100644 index 00000000..337fe105 --- /dev/null +++ b/src/mistralai/client/models/listlibrariesresponse.py @@ -0,0 +1,16 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: df556a618365 + +from __future__ import annotations +from .library import Library, LibraryTypedDict +from mistralai.client.types import BaseModel +from typing import List +from typing_extensions import TypedDict + + +class ListLibrariesResponseTypedDict(TypedDict): + data: List[LibraryTypedDict] + + +class ListLibrariesResponse(BaseModel): + data: List[Library] diff --git a/src/mistralai/client/models/listlibraryout.py b/src/mistralai/client/models/listlibraryout.py deleted file mode 100644 index 39fa459f..00000000 --- a/src/mistralai/client/models/listlibraryout.py +++ /dev/null @@ -1,16 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -# @generated-id: cb78c529e763 - -from __future__ import annotations -from .libraryout import LibraryOut, LibraryOutTypedDict -from mistralai.client.types import BaseModel -from typing import List -from typing_extensions import TypedDict - - -class ListLibraryOutTypedDict(TypedDict): - data: List[LibraryOutTypedDict] - - -class ListLibraryOut(BaseModel): - data: List[LibraryOut] diff --git a/src/mistralai/client/models/messageinputcontentchunks.py b/src/mistralai/client/models/messageinputcontentchunks.py index 63cf14e7..1e04ce24 100644 --- a/src/mistralai/client/models/messageinputcontentchunks.py +++ b/src/mistralai/client/models/messageinputcontentchunks.py @@ -2,10 +2,13 @@ # @generated-id: 01025c12866a from __future__ import annotations +from .conversationthinkchunk import ( + ConversationThinkChunk, + ConversationThinkChunkTypedDict, +) from .documenturlchunk import DocumentURLChunk, DocumentURLChunkTypedDict from .imageurlchunk import ImageURLChunk, ImageURLChunkTypedDict from .textchunk import TextChunk, TextChunkTypedDict -from .thinkchunk import ThinkChunk, ThinkChunkTypedDict from .toolfilechunk import ToolFileChunk, ToolFileChunkTypedDict from typing import Union from typing_extensions import TypeAliasType @@ -17,7 +20,7 @@ TextChunkTypedDict, ImageURLChunkTypedDict, DocumentURLChunkTypedDict, - ThinkChunkTypedDict, + ConversationThinkChunkTypedDict, ToolFileChunkTypedDict, ], ) @@ -25,5 +28,11 @@ MessageInputContentChunks = TypeAliasType( "MessageInputContentChunks", - Union[TextChunk, ImageURLChunk, DocumentURLChunk, ThinkChunk, ToolFileChunk], + Union[ + TextChunk, + ImageURLChunk, + DocumentURLChunk, + ConversationThinkChunk, + ToolFileChunk, + ], ) diff --git a/src/mistralai/client/models/messageinputentry.py b/src/mistralai/client/models/messageinputentry.py index 15046d25..c948a13e 100644 --- a/src/mistralai/client/models/messageinputentry.py +++ b/src/mistralai/client/models/messageinputentry.py @@ -15,18 +15,15 @@ UNSET_SENTINEL, UnrecognizedStr, ) +from mistralai.client.utils import validate_const +import pydantic from pydantic import model_serializer +from pydantic.functional_validators import AfterValidator from typing import List, Literal, Optional, Union -from typing_extensions import NotRequired, TypeAliasType, TypedDict +from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict -MessageInputEntryObject = Literal["entry",] - - -MessageInputEntryType = Literal["message.input",] - - -MessageInputEntryRole = Union[ +Role = Union[ Literal[ "assistant", "user", @@ -49,10 +46,10 @@ class MessageInputEntryTypedDict(TypedDict): r"""Representation of an input message inside the conversation.""" - role: MessageInputEntryRole + role: Role content: MessageInputEntryContentTypedDict - object: NotRequired[MessageInputEntryObject] - type: NotRequired[MessageInputEntryType] + object: Literal["entry"] + type: Literal["message.input"] created_at: NotRequired[datetime] completed_at: NotRequired[Nullable[datetime]] id: NotRequired[str] @@ -62,13 +59,22 @@ class MessageInputEntryTypedDict(TypedDict): class MessageInputEntry(BaseModel): r"""Representation of an input message inside the conversation.""" - role: MessageInputEntryRole + role: Role content: MessageInputEntryContent - object: Optional[MessageInputEntryObject] = "entry" + object: Annotated[ + Annotated[Optional[Literal["entry"]], AfterValidator(validate_const("entry"))], + pydantic.Field(alias="object"), + ] = "entry" - type: Optional[MessageInputEntryType] = "message.input" + type: Annotated[ + Annotated[ + Optional[Literal["message.input"]], + AfterValidator(validate_const("message.input")), + ], + pydantic.Field(alias="type"), + ] = "message.input" created_at: Optional[datetime] = None @@ -80,37 +86,33 @@ class MessageInputEntry(BaseModel): @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = [ - "object", - "type", - "created_at", - "completed_at", - "id", - "prefix", - ] - nullable_fields = ["completed_at"] - null_default_fields = [] - + optional_fields = set( + ["object", "type", "created_at", "completed_at", "id", "prefix"] + ) + nullable_fields = set(["completed_at"]) serialized = handler(self) - m = {} for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) - serialized.pop(k, None) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member + return m - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - return m +try: + MessageInputEntry.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/messageoutputcontentchunks.py b/src/mistralai/client/models/messageoutputcontentchunks.py index def7a4d2..bf455d17 100644 --- a/src/mistralai/client/models/messageoutputcontentchunks.py +++ b/src/mistralai/client/models/messageoutputcontentchunks.py @@ -2,10 +2,13 @@ # @generated-id: 2ed248515035 from __future__ import annotations +from .conversationthinkchunk import ( + ConversationThinkChunk, + ConversationThinkChunkTypedDict, +) from .documenturlchunk import DocumentURLChunk, DocumentURLChunkTypedDict from .imageurlchunk import ImageURLChunk, ImageURLChunkTypedDict from .textchunk import TextChunk, TextChunkTypedDict -from .thinkchunk import ThinkChunk, ThinkChunkTypedDict from .toolfilechunk import ToolFileChunk, ToolFileChunkTypedDict from .toolreferencechunk import ToolReferenceChunk, ToolReferenceChunkTypedDict from typing import Union @@ -18,7 +21,7 @@ TextChunkTypedDict, ImageURLChunkTypedDict, DocumentURLChunkTypedDict, - ThinkChunkTypedDict, + ConversationThinkChunkTypedDict, ToolFileChunkTypedDict, ToolReferenceChunkTypedDict, ], @@ -31,7 +34,7 @@ TextChunk, ImageURLChunk, DocumentURLChunk, - ThinkChunk, + ConversationThinkChunk, ToolFileChunk, ToolReferenceChunk, ], diff --git a/src/mistralai/client/models/messageoutputentry.py b/src/mistralai/client/models/messageoutputentry.py index 8752fc36..6a9c52ed 100644 --- a/src/mistralai/client/models/messageoutputentry.py +++ b/src/mistralai/client/models/messageoutputentry.py @@ -14,18 +14,12 @@ UNSET, UNSET_SENTINEL, ) +from mistralai.client.utils import validate_const +import pydantic from pydantic import model_serializer +from pydantic.functional_validators import AfterValidator from typing import List, Literal, Optional, Union -from typing_extensions import NotRequired, TypeAliasType, TypedDict - - -MessageOutputEntryObject = Literal["entry",] - - -MessageOutputEntryType = Literal["message.output",] - - -MessageOutputEntryRole = Literal["assistant",] +from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict MessageOutputEntryContentTypedDict = TypeAliasType( @@ -41,70 +35,87 @@ class MessageOutputEntryTypedDict(TypedDict): content: MessageOutputEntryContentTypedDict - object: NotRequired[MessageOutputEntryObject] - type: NotRequired[MessageOutputEntryType] + object: Literal["entry"] + type: Literal["message.output"] created_at: NotRequired[datetime] completed_at: NotRequired[Nullable[datetime]] - id: NotRequired[str] agent_id: NotRequired[Nullable[str]] model: NotRequired[Nullable[str]] - role: NotRequired[MessageOutputEntryRole] + id: NotRequired[str] + role: Literal["assistant"] class MessageOutputEntry(BaseModel): content: MessageOutputEntryContent - object: Optional[MessageOutputEntryObject] = "entry" + object: Annotated[ + Annotated[Optional[Literal["entry"]], AfterValidator(validate_const("entry"))], + pydantic.Field(alias="object"), + ] = "entry" - type: Optional[MessageOutputEntryType] = "message.output" + type: Annotated[ + Annotated[ + Optional[Literal["message.output"]], + AfterValidator(validate_const("message.output")), + ], + pydantic.Field(alias="type"), + ] = "message.output" created_at: Optional[datetime] = None completed_at: OptionalNullable[datetime] = UNSET - id: Optional[str] = None - agent_id: OptionalNullable[str] = UNSET model: OptionalNullable[str] = UNSET - role: Optional[MessageOutputEntryRole] = "assistant" + id: Optional[str] = None + + role: Annotated[ + Annotated[ + Optional[Literal["assistant"]], AfterValidator(validate_const("assistant")) + ], + pydantic.Field(alias="role"), + ] = "assistant" @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = [ - "object", - "type", - "created_at", - "completed_at", - "id", - "agent_id", - "model", - "role", - ] - nullable_fields = ["completed_at", "agent_id", "model"] - null_default_fields = [] - + optional_fields = set( + [ + "object", + "type", + "created_at", + "completed_at", + "agent_id", + "model", + "id", + "role", + ] + ) + nullable_fields = set(["completed_at", "agent_id", "model"]) serialized = handler(self) - m = {} for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) - serialized.pop(k, None) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member + return m - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - return m +try: + MessageOutputEntry.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/messageoutputevent.py b/src/mistralai/client/models/messageoutputevent.py index 39c10139..d765f4fd 100644 --- a/src/mistralai/client/models/messageoutputevent.py +++ b/src/mistralai/client/models/messageoutputevent.py @@ -19,9 +19,6 @@ from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict -MessageOutputEventRole = Literal["assistant",] - - MessageOutputEventContentTypedDict = TypeAliasType( "MessageOutputEventContentTypedDict", Union[str, OutputContentChunksTypedDict] ) @@ -41,7 +38,7 @@ class MessageOutputEventTypedDict(TypedDict): content_index: NotRequired[int] model: NotRequired[Nullable[str]] agent_id: NotRequired[Nullable[str]] - role: NotRequired[MessageOutputEventRole] + role: Literal["assistant"] class MessageOutputEvent(BaseModel): @@ -49,7 +46,7 @@ class MessageOutputEvent(BaseModel): content: MessageOutputEventContent - TYPE: Annotated[ + type: Annotated[ Annotated[ Literal["message.output.delta"], AfterValidator(validate_const("message.output.delta")), @@ -67,41 +64,42 @@ class MessageOutputEvent(BaseModel): agent_id: OptionalNullable[str] = UNSET - role: Optional[MessageOutputEventRole] = "assistant" + role: Annotated[ + Annotated[ + Optional[Literal["assistant"]], AfterValidator(validate_const("assistant")) + ], + pydantic.Field(alias="role"), + ] = "assistant" @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = [ - "created_at", - "output_index", - "content_index", - "model", - "agent_id", - "role", - ] - nullable_fields = ["model", "agent_id"] - null_default_fields = [] - + optional_fields = set( + ["created_at", "output_index", "content_index", "model", "agent_id", "role"] + ) + nullable_fields = set(["model", "agent_id"]) serialized = handler(self) - m = {} for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) - serialized.pop(k, None) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member + return m - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - return m +try: + MessageOutputEvent.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/metricout.py b/src/mistralai/client/models/metric.py similarity index 60% rename from src/mistralai/client/models/metricout.py rename to src/mistralai/client/models/metric.py index 5705c712..1413f589 100644 --- a/src/mistralai/client/models/metricout.py +++ b/src/mistralai/client/models/metric.py @@ -1,5 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -# @generated-id: 92d33621dda7 +# @generated-id: c6a65acdd1a2 from __future__ import annotations from mistralai.client.types import ( @@ -13,7 +13,7 @@ from typing_extensions import NotRequired, TypedDict -class MetricOutTypedDict(TypedDict): +class MetricTypedDict(TypedDict): r"""Metrics at the step number during the fine-tuning job. Use these metrics to assess if the training is going smoothly (loss should decrease, token accuracy should increase).""" train_loss: NotRequired[Nullable[float]] @@ -21,7 +21,7 @@ class MetricOutTypedDict(TypedDict): valid_mean_token_accuracy: NotRequired[Nullable[float]] -class MetricOut(BaseModel): +class Metric(BaseModel): r"""Metrics at the step number during the fine-tuning job. Use these metrics to assess if the training is going smoothly (loss should decrease, token accuracy should increase).""" train_loss: OptionalNullable[float] = UNSET @@ -32,30 +32,25 @@ class MetricOut(BaseModel): @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = ["train_loss", "valid_loss", "valid_mean_token_accuracy"] - nullable_fields = ["train_loss", "valid_loss", "valid_mean_token_accuracy"] - null_default_fields = [] - + optional_fields = set(["train_loss", "valid_loss", "valid_mean_token_accuracy"]) + nullable_fields = set(["train_loss", "valid_loss", "valid_mean_token_accuracy"]) serialized = handler(self) - m = {} for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val return m diff --git a/src/mistralai/client/models/modelcapabilities.py b/src/mistralai/client/models/modelcapabilities.py index c329efbc..d9293ccc 100644 --- a/src/mistralai/client/models/modelcapabilities.py +++ b/src/mistralai/client/models/modelcapabilities.py @@ -2,7 +2,8 @@ # @generated-id: 64d8a422ea29 from __future__ import annotations -from mistralai.client.types import BaseModel +from mistralai.client.types import BaseModel, UNSET_SENTINEL +from pydantic import model_serializer from typing import Optional from typing_extensions import NotRequired, TypedDict @@ -40,3 +41,32 @@ class ModelCapabilities(BaseModel): audio: Optional[bool] = False audio_transcription: Optional[bool] = False + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set( + [ + "completion_chat", + "function_calling", + "completion_fim", + "fine_tuning", + "vision", + "ocr", + "classification", + "moderation", + "audio", + "audio_transcription", + ] + ) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m diff --git a/src/mistralai/client/models/modelconversation.py b/src/mistralai/client/models/modelconversation.py index c0bacb7f..bb33d2e0 100644 --- a/src/mistralai/client/models/modelconversation.py +++ b/src/mistralai/client/models/modelconversation.py @@ -10,6 +10,7 @@ from .websearchpremiumtool import WebSearchPremiumTool, WebSearchPremiumToolTypedDict from .websearchtool import WebSearchTool, WebSearchToolTypedDict from datetime import datetime +from functools import partial from mistralai.client.types import ( BaseModel, Nullable, @@ -17,7 +18,11 @@ UNSET, UNSET_SENTINEL, ) -from pydantic import Field, model_serializer +from mistralai.client.utils import validate_const +from mistralai.client.utils.unions import parse_open_union +import pydantic +from pydantic import ConfigDict, model_serializer +from pydantic.functional_validators import AfterValidator, BeforeValidator from typing import Any, Dict, List, Literal, Optional, Union from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict @@ -25,16 +30,36 @@ ModelConversationToolTypedDict = TypeAliasType( "ModelConversationToolTypedDict", Union[ + FunctionToolTypedDict, WebSearchToolTypedDict, WebSearchPremiumToolTypedDict, CodeInterpreterToolTypedDict, ImageGenerationToolTypedDict, - FunctionToolTypedDict, DocumentLibraryToolTypedDict, ], ) +class UnknownModelConversationTool(BaseModel): + r"""A ModelConversationTool variant the SDK doesn't recognize. Preserves the raw payload.""" + + type: Literal["UNKNOWN"] = "UNKNOWN" + raw: Any + is_unknown: Literal[True] = True + + model_config = ConfigDict(frozen=True) + + +_MODEL_CONVERSATION_TOOL_VARIANTS: dict[str, Any] = { + "code_interpreter": CodeInterpreterTool, + "document_library": DocumentLibraryTool, + "function": FunctionTool, + "image_generation": ImageGenerationTool, + "web_search": WebSearchTool, + "web_search_premium": WebSearchPremiumTool, +} + + ModelConversationTool = Annotated[ Union[ CodeInterpreterTool, @@ -43,14 +68,20 @@ ImageGenerationTool, WebSearchTool, WebSearchPremiumTool, + UnknownModelConversationTool, ], - Field(discriminator="TYPE"), + BeforeValidator( + partial( + parse_open_union, + disc_key="type", + variants=_MODEL_CONVERSATION_TOOL_VARIANTS, + unknown_cls=UnknownModelConversationTool, + union_name="ModelConversationTool", + ) + ), ] -ModelConversationObject = Literal["conversation",] - - class ModelConversationTypedDict(TypedDict): id: str created_at: datetime @@ -68,7 +99,7 @@ class ModelConversationTypedDict(TypedDict): r"""Description of the what the conversation is about.""" metadata: NotRequired[Nullable[Dict[str, Any]]] r"""Custom metadata for the conversation.""" - object: NotRequired[ModelConversationObject] + object: Literal["conversation"] class ModelConversation(BaseModel): @@ -98,42 +129,51 @@ class ModelConversation(BaseModel): metadata: OptionalNullable[Dict[str, Any]] = UNSET r"""Custom metadata for the conversation.""" - object: Optional[ModelConversationObject] = "conversation" + object: Annotated[ + Annotated[ + Optional[Literal["conversation"]], + AfterValidator(validate_const("conversation")), + ], + pydantic.Field(alias="object"), + ] = "conversation" @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = [ - "instructions", - "tools", - "completion_args", - "name", - "description", - "metadata", - "object", - ] - nullable_fields = ["instructions", "name", "description", "metadata"] - null_default_fields = [] - + optional_fields = set( + [ + "instructions", + "tools", + "completion_args", + "name", + "description", + "metadata", + "object", + ] + ) + nullable_fields = set(["instructions", "name", "description", "metadata"]) serialized = handler(self) - m = {} for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) - serialized.pop(k, None) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member + return m - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - return m +try: + ModelConversation.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/modellist.py b/src/mistralai/client/models/modellist.py index c122122c..5fd835f2 100644 --- a/src/mistralai/client/models/modellist.py +++ b/src/mistralai/client/models/modellist.py @@ -4,9 +4,12 @@ from __future__ import annotations from .basemodelcard import BaseModelCard, BaseModelCardTypedDict from .ftmodelcard import FTModelCard, FTModelCardTypedDict -from mistralai.client.types import BaseModel -from pydantic import Field -from typing import List, Optional, Union +from functools import partial +from mistralai.client.types import BaseModel, UNSET_SENTINEL +from mistralai.client.utils.unions import parse_open_union +from pydantic import ConfigDict, model_serializer +from pydantic.functional_validators import BeforeValidator +from typing import Any, List, Literal, Optional, Union from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict @@ -15,8 +18,33 @@ ) +class UnknownModelListData(BaseModel): + r"""A ModelListData variant the SDK doesn't recognize. Preserves the raw payload.""" + + type: Literal["UNKNOWN"] = "UNKNOWN" + raw: Any + is_unknown: Literal[True] = True + + model_config = ConfigDict(frozen=True) + + +_MODEL_LIST_DATA_VARIANTS: dict[str, Any] = { + "base": BaseModelCard, + "fine-tuned": FTModelCard, +} + + ModelListData = Annotated[ - Union[BaseModelCard, FTModelCard], Field(discriminator="TYPE") + Union[BaseModelCard, FTModelCard, UnknownModelListData], + BeforeValidator( + partial( + parse_open_union, + disc_key="type", + variants=_MODEL_LIST_DATA_VARIANTS, + unknown_cls=UnknownModelListData, + union_name="ModelListData", + ) + ), ] @@ -29,3 +57,19 @@ class ModelList(BaseModel): object: Optional[str] = "list" data: Optional[List[ModelListData]] = None + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["object", "data"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m diff --git a/src/mistralai/client/models/moderationobject.py b/src/mistralai/client/models/moderationobject.py index 9aa4eb15..e7ccd8f6 100644 --- a/src/mistralai/client/models/moderationobject.py +++ b/src/mistralai/client/models/moderationobject.py @@ -2,7 +2,8 @@ # @generated-id: 132faad0549a from __future__ import annotations -from mistralai.client.types import BaseModel +from mistralai.client.types import BaseModel, UNSET_SENTINEL +from pydantic import model_serializer from typing import Dict, Optional from typing_extensions import NotRequired, TypedDict @@ -20,3 +21,19 @@ class ModerationObject(BaseModel): category_scores: Optional[Dict[str, float]] = None r"""Moderation result""" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["categories", "category_scores"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m diff --git a/src/mistralai/client/models/ocrimageobject.py b/src/mistralai/client/models/ocrimageobject.py index e95b67e1..365f062b 100644 --- a/src/mistralai/client/models/ocrimageobject.py +++ b/src/mistralai/client/models/ocrimageobject.py @@ -54,37 +54,34 @@ class OCRImageObject(BaseModel): @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = ["image_base64", "image_annotation"] - nullable_fields = [ - "top_left_x", - "top_left_y", - "bottom_right_x", - "bottom_right_y", - "image_base64", - "image_annotation", - ] - null_default_fields = [] - + optional_fields = set(["image_base64", "image_annotation"]) + nullable_fields = set( + [ + "top_left_x", + "top_left_y", + "bottom_right_x", + "bottom_right_y", + "image_base64", + "image_annotation", + ] + ) serialized = handler(self) - m = {} for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val return m diff --git a/src/mistralai/client/models/ocrpageobject.py b/src/mistralai/client/models/ocrpageobject.py index 4f4ccf43..ffc7b3b6 100644 --- a/src/mistralai/client/models/ocrpageobject.py +++ b/src/mistralai/client/models/ocrpageobject.py @@ -63,30 +63,25 @@ class OCRPageObject(BaseModel): @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = ["tables", "hyperlinks", "header", "footer"] - nullable_fields = ["header", "footer", "dimensions"] - null_default_fields = [] - + optional_fields = set(["tables", "hyperlinks", "header", "footer"]) + nullable_fields = set(["header", "footer", "dimensions"]) serialized = handler(self) - m = {} for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val return m diff --git a/src/mistralai/client/models/ocrrequest.py b/src/mistralai/client/models/ocrrequest.py index 18b899dd..4ad337ce 100644 --- a/src/mistralai/client/models/ocrrequest.py +++ b/src/mistralai/client/models/ocrrequest.py @@ -18,14 +18,16 @@ from typing_extensions import NotRequired, TypeAliasType, TypedDict -DocumentTypedDict = TypeAliasType( - "DocumentTypedDict", +DocumentUnionTypedDict = TypeAliasType( + "DocumentUnionTypedDict", Union[FileChunkTypedDict, ImageURLChunkTypedDict, DocumentURLChunkTypedDict], ) r"""Document to run OCR on""" -Document = TypeAliasType("Document", Union[FileChunk, ImageURLChunk, DocumentURLChunk]) +DocumentUnion = TypeAliasType( + "DocumentUnion", Union[FileChunk, ImageURLChunk, DocumentURLChunk] +) r"""Document to run OCR on""" @@ -37,7 +39,7 @@ class OCRRequestTypedDict(TypedDict): model: Nullable[str] - document: DocumentTypedDict + document: DocumentUnionTypedDict r"""Document to run OCR on""" id: NotRequired[str] pages: NotRequired[Nullable[List[int]]] @@ -62,7 +64,7 @@ class OCRRequestTypedDict(TypedDict): class OCRRequest(BaseModel): model: Nullable[str] - document: Document + document: DocumentUnion r"""Document to run OCR on""" id: Optional[str] = None @@ -96,52 +98,51 @@ class OCRRequest(BaseModel): @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = [ - "id", - "pages", - "include_image_base64", - "image_limit", - "image_min_size", - "bbox_annotation_format", - "document_annotation_format", - "document_annotation_prompt", - "table_format", - "extract_header", - "extract_footer", - ] - nullable_fields = [ - "model", - "pages", - "include_image_base64", - "image_limit", - "image_min_size", - "bbox_annotation_format", - "document_annotation_format", - "document_annotation_prompt", - "table_format", - ] - null_default_fields = [] - + optional_fields = set( + [ + "id", + "pages", + "include_image_base64", + "image_limit", + "image_min_size", + "bbox_annotation_format", + "document_annotation_format", + "document_annotation_prompt", + "table_format", + "extract_header", + "extract_footer", + ] + ) + nullable_fields = set( + [ + "model", + "pages", + "include_image_base64", + "image_limit", + "image_min_size", + "bbox_annotation_format", + "document_annotation_format", + "document_annotation_prompt", + "table_format", + ] + ) serialized = handler(self) - m = {} for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val return m diff --git a/src/mistralai/client/models/ocrresponse.py b/src/mistralai/client/models/ocrresponse.py index 0a36e975..e63eed98 100644 --- a/src/mistralai/client/models/ocrresponse.py +++ b/src/mistralai/client/models/ocrresponse.py @@ -40,30 +40,25 @@ class OCRResponse(BaseModel): @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = ["document_annotation"] - nullable_fields = ["document_annotation"] - null_default_fields = [] - + optional_fields = set(["document_annotation"]) + nullable_fields = set(["document_annotation"]) serialized = handler(self) - m = {} for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val return m diff --git a/src/mistralai/client/models/ocrtableobject.py b/src/mistralai/client/models/ocrtableobject.py index e32ad894..66bb050f 100644 --- a/src/mistralai/client/models/ocrtableobject.py +++ b/src/mistralai/client/models/ocrtableobject.py @@ -36,3 +36,9 @@ class OCRTableObject(BaseModel): format_: Annotated[Format, pydantic.Field(alias="format")] r"""Format of the table""" + + +try: + OCRTableObject.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/ocrusageinfo.py b/src/mistralai/client/models/ocrusageinfo.py index a421d850..2ec1322b 100644 --- a/src/mistralai/client/models/ocrusageinfo.py +++ b/src/mistralai/client/models/ocrusageinfo.py @@ -29,30 +29,25 @@ class OCRUsageInfo(BaseModel): @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = ["doc_size_bytes"] - nullable_fields = ["doc_size_bytes"] - null_default_fields = [] - + optional_fields = set(["doc_size_bytes"]) + nullable_fields = set(["doc_size_bytes"]) serialized = handler(self) - m = {} for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val return m diff --git a/src/mistralai/client/models/outputcontentchunks.py b/src/mistralai/client/models/outputcontentchunks.py index 1a115fe8..fab7907b 100644 --- a/src/mistralai/client/models/outputcontentchunks.py +++ b/src/mistralai/client/models/outputcontentchunks.py @@ -2,10 +2,13 @@ # @generated-id: 9ad9741f4975 from __future__ import annotations +from .conversationthinkchunk import ( + ConversationThinkChunk, + ConversationThinkChunkTypedDict, +) from .documenturlchunk import DocumentURLChunk, DocumentURLChunkTypedDict from .imageurlchunk import ImageURLChunk, ImageURLChunkTypedDict from .textchunk import TextChunk, TextChunkTypedDict -from .thinkchunk import ThinkChunk, ThinkChunkTypedDict from .toolfilechunk import ToolFileChunk, ToolFileChunkTypedDict from .toolreferencechunk import ToolReferenceChunk, ToolReferenceChunkTypedDict from typing import Union @@ -18,7 +21,7 @@ TextChunkTypedDict, ImageURLChunkTypedDict, DocumentURLChunkTypedDict, - ThinkChunkTypedDict, + ConversationThinkChunkTypedDict, ToolFileChunkTypedDict, ToolReferenceChunkTypedDict, ], @@ -31,7 +34,7 @@ TextChunk, ImageURLChunk, DocumentURLChunk, - ThinkChunk, + ConversationThinkChunk, ToolFileChunk, ToolReferenceChunk, ], diff --git a/src/mistralai/client/models/prediction.py b/src/mistralai/client/models/prediction.py index 52f4adf1..0c6f4182 100644 --- a/src/mistralai/client/models/prediction.py +++ b/src/mistralai/client/models/prediction.py @@ -2,9 +2,10 @@ # @generated-id: 1cc842a069a5 from __future__ import annotations -from mistralai.client.types import BaseModel +from mistralai.client.types import BaseModel, UNSET_SENTINEL from mistralai.client.utils import validate_const import pydantic +from pydantic import model_serializer from pydantic.functional_validators import AfterValidator from typing import Literal, Optional from typing_extensions import Annotated, NotRequired, TypedDict @@ -20,7 +21,7 @@ class PredictionTypedDict(TypedDict): class Prediction(BaseModel): r"""Enable users to specify an expected completion, optimizing response times by leveraging known or predictable content.""" - TYPE: Annotated[ + type: Annotated[ Annotated[ Optional[Literal["content"]], AfterValidator(validate_const("content")) ], @@ -28,3 +29,25 @@ class Prediction(BaseModel): ] = "content" content: Optional[str] = "" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["type", "content"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m + + +try: + Prediction.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/realtimetranscriptionerror.py b/src/mistralai/client/models/realtimetranscriptionerror.py index f8f2d3da..c661e461 100644 --- a/src/mistralai/client/models/realtimetranscriptionerror.py +++ b/src/mistralai/client/models/realtimetranscriptionerror.py @@ -6,9 +6,10 @@ RealtimeTranscriptionErrorDetail, RealtimeTranscriptionErrorDetailTypedDict, ) -from mistralai.client.types import BaseModel +from mistralai.client.types import BaseModel, UNSET_SENTINEL from mistralai.client.utils import validate_const import pydantic +from pydantic import model_serializer from pydantic.functional_validators import AfterValidator from typing import Literal, Optional from typing_extensions import Annotated, TypedDict @@ -22,7 +23,29 @@ class RealtimeTranscriptionErrorTypedDict(TypedDict): class RealtimeTranscriptionError(BaseModel): error: RealtimeTranscriptionErrorDetail - TYPE: Annotated[ + type: Annotated[ Annotated[Optional[Literal["error"]], AfterValidator(validate_const("error"))], pydantic.Field(alias="type"), ] = "error" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["type"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m + + +try: + RealtimeTranscriptionError.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/realtimetranscriptioninputaudioappend.py b/src/mistralai/client/models/realtimetranscriptioninputaudioappend.py new file mode 100644 index 00000000..8156a270 --- /dev/null +++ b/src/mistralai/client/models/realtimetranscriptioninputaudioappend.py @@ -0,0 +1,52 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 8b03cde6e115 + +from __future__ import annotations +from mistralai.client.types import BaseModel, UNSET_SENTINEL +from mistralai.client.utils import validate_const +import pydantic +from pydantic import model_serializer +from pydantic.functional_validators import AfterValidator +from typing import Literal, Optional +from typing_extensions import Annotated, TypedDict + + +class RealtimeTranscriptionInputAudioAppendTypedDict(TypedDict): + audio: str + r"""Base64-encoded raw PCM bytes matching the current audio_format. Max decoded size: 262144 bytes.""" + type: Literal["input_audio.append"] + + +class RealtimeTranscriptionInputAudioAppend(BaseModel): + audio: str + r"""Base64-encoded raw PCM bytes matching the current audio_format. Max decoded size: 262144 bytes.""" + + type: Annotated[ + Annotated[ + Optional[Literal["input_audio.append"]], + AfterValidator(validate_const("input_audio.append")), + ], + pydantic.Field(alias="type"), + ] = "input_audio.append" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["type"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m + + +try: + RealtimeTranscriptionInputAudioAppend.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/realtimetranscriptioninputaudioend.py b/src/mistralai/client/models/realtimetranscriptioninputaudioend.py new file mode 100644 index 00000000..473eedb7 --- /dev/null +++ b/src/mistralai/client/models/realtimetranscriptioninputaudioend.py @@ -0,0 +1,47 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: c187ba1b551d + +from __future__ import annotations +from mistralai.client.types import BaseModel, UNSET_SENTINEL +from mistralai.client.utils import validate_const +import pydantic +from pydantic import model_serializer +from pydantic.functional_validators import AfterValidator +from typing import Literal, Optional +from typing_extensions import Annotated, TypedDict + + +class RealtimeTranscriptionInputAudioEndTypedDict(TypedDict): + type: Literal["input_audio.end"] + + +class RealtimeTranscriptionInputAudioEnd(BaseModel): + type: Annotated[ + Annotated[ + Optional[Literal["input_audio.end"]], + AfterValidator(validate_const("input_audio.end")), + ], + pydantic.Field(alias="type"), + ] = "input_audio.end" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["type"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m + + +try: + RealtimeTranscriptionInputAudioEnd.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/realtimetranscriptioninputaudioflush.py b/src/mistralai/client/models/realtimetranscriptioninputaudioflush.py new file mode 100644 index 00000000..553d14c7 --- /dev/null +++ b/src/mistralai/client/models/realtimetranscriptioninputaudioflush.py @@ -0,0 +1,47 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: b27b600c310e + +from __future__ import annotations +from mistralai.client.types import BaseModel, UNSET_SENTINEL +from mistralai.client.utils import validate_const +import pydantic +from pydantic import model_serializer +from pydantic.functional_validators import AfterValidator +from typing import Literal, Optional +from typing_extensions import Annotated, TypedDict + + +class RealtimeTranscriptionInputAudioFlushTypedDict(TypedDict): + type: Literal["input_audio.flush"] + + +class RealtimeTranscriptionInputAudioFlush(BaseModel): + type: Annotated[ + Annotated[ + Optional[Literal["input_audio.flush"]], + AfterValidator(validate_const("input_audio.flush")), + ], + pydantic.Field(alias="type"), + ] = "input_audio.flush" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["type"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m + + +try: + RealtimeTranscriptionInputAudioFlush.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/realtimetranscriptionsession.py b/src/mistralai/client/models/realtimetranscriptionsession.py index d20d0d8c..a74a457b 100644 --- a/src/mistralai/client/models/realtimetranscriptionsession.py +++ b/src/mistralai/client/models/realtimetranscriptionsession.py @@ -3,14 +3,22 @@ from __future__ import annotations from .audioformat import AudioFormat, AudioFormatTypedDict -from mistralai.client.types import BaseModel -from typing_extensions import TypedDict +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing_extensions import NotRequired, TypedDict class RealtimeTranscriptionSessionTypedDict(TypedDict): request_id: str model: str audio_format: AudioFormatTypedDict + target_streaming_delay_ms: NotRequired[Nullable[int]] class RealtimeTranscriptionSession(BaseModel): @@ -19,3 +27,30 @@ class RealtimeTranscriptionSession(BaseModel): model: str audio_format: AudioFormat + + target_streaming_delay_ms: OptionalNullable[int] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["target_streaming_delay_ms"]) + nullable_fields = set(["target_streaming_delay_ms"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/realtimetranscriptionsessioncreated.py b/src/mistralai/client/models/realtimetranscriptionsessioncreated.py index c4fa5774..bb96875a 100644 --- a/src/mistralai/client/models/realtimetranscriptionsessioncreated.py +++ b/src/mistralai/client/models/realtimetranscriptionsessioncreated.py @@ -6,9 +6,10 @@ RealtimeTranscriptionSession, RealtimeTranscriptionSessionTypedDict, ) -from mistralai.client.types import BaseModel +from mistralai.client.types import BaseModel, UNSET_SENTINEL from mistralai.client.utils import validate_const import pydantic +from pydantic import model_serializer from pydantic.functional_validators import AfterValidator from typing import Literal, Optional from typing_extensions import Annotated, TypedDict @@ -22,10 +23,32 @@ class RealtimeTranscriptionSessionCreatedTypedDict(TypedDict): class RealtimeTranscriptionSessionCreated(BaseModel): session: RealtimeTranscriptionSession - TYPE: Annotated[ + type: Annotated[ Annotated[ Optional[Literal["session.created"]], AfterValidator(validate_const("session.created")), ], pydantic.Field(alias="type"), ] = "session.created" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["type"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m + + +try: + RealtimeTranscriptionSessionCreated.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/realtimetranscriptionsessionupdated.py b/src/mistralai/client/models/realtimetranscriptionsessionupdated.py index a61fb05e..fea5db4a 100644 --- a/src/mistralai/client/models/realtimetranscriptionsessionupdated.py +++ b/src/mistralai/client/models/realtimetranscriptionsessionupdated.py @@ -6,9 +6,10 @@ RealtimeTranscriptionSession, RealtimeTranscriptionSessionTypedDict, ) -from mistralai.client.types import BaseModel +from mistralai.client.types import BaseModel, UNSET_SENTINEL from mistralai.client.utils import validate_const import pydantic +from pydantic import model_serializer from pydantic.functional_validators import AfterValidator from typing import Literal, Optional from typing_extensions import Annotated, TypedDict @@ -22,10 +23,32 @@ class RealtimeTranscriptionSessionUpdatedTypedDict(TypedDict): class RealtimeTranscriptionSessionUpdated(BaseModel): session: RealtimeTranscriptionSession - TYPE: Annotated[ + type: Annotated[ Annotated[ Optional[Literal["session.updated"]], AfterValidator(validate_const("session.updated")), ], pydantic.Field(alias="type"), ] = "session.updated" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["type"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m + + +try: + RealtimeTranscriptionSessionUpdated.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/realtimetranscriptionsessionupdatemessage.py b/src/mistralai/client/models/realtimetranscriptionsessionupdatemessage.py new file mode 100644 index 00000000..07ad59a4 --- /dev/null +++ b/src/mistralai/client/models/realtimetranscriptionsessionupdatemessage.py @@ -0,0 +1,54 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 4e1b3fd7c5a3 + +from __future__ import annotations +from .realtimetranscriptionsessionupdatepayload import ( + RealtimeTranscriptionSessionUpdatePayload, + RealtimeTranscriptionSessionUpdatePayloadTypedDict, +) +from mistralai.client.types import BaseModel, UNSET_SENTINEL +from mistralai.client.utils import validate_const +import pydantic +from pydantic import model_serializer +from pydantic.functional_validators import AfterValidator +from typing import Literal, Optional +from typing_extensions import Annotated, TypedDict + + +class RealtimeTranscriptionSessionUpdateMessageTypedDict(TypedDict): + session: RealtimeTranscriptionSessionUpdatePayloadTypedDict + type: Literal["session.update"] + + +class RealtimeTranscriptionSessionUpdateMessage(BaseModel): + session: RealtimeTranscriptionSessionUpdatePayload + + type: Annotated[ + Annotated[ + Optional[Literal["session.update"]], + AfterValidator(validate_const("session.update")), + ], + pydantic.Field(alias="type"), + ] = "session.update" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["type"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m + + +try: + RealtimeTranscriptionSessionUpdateMessage.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/realtimetranscriptionsessionupdatepayload.py b/src/mistralai/client/models/realtimetranscriptionsessionupdatepayload.py new file mode 100644 index 00000000..a89441e9 --- /dev/null +++ b/src/mistralai/client/models/realtimetranscriptionsessionupdatepayload.py @@ -0,0 +1,54 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 7033fdb33ad4 + +from __future__ import annotations +from .audioformat import AudioFormat, AudioFormatTypedDict +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing_extensions import NotRequired, TypedDict + + +class RealtimeTranscriptionSessionUpdatePayloadTypedDict(TypedDict): + audio_format: NotRequired[Nullable[AudioFormatTypedDict]] + r"""Set before sending audio. Audio format updates are rejected after audio starts.""" + target_streaming_delay_ms: NotRequired[Nullable[int]] + r"""Set before sending audio. Streaming delay updates are rejected after audio starts.""" + + +class RealtimeTranscriptionSessionUpdatePayload(BaseModel): + audio_format: OptionalNullable[AudioFormat] = UNSET + r"""Set before sending audio. Audio format updates are rejected after audio starts.""" + + target_streaming_delay_ms: OptionalNullable[int] = UNSET + r"""Set before sending audio. Streaming delay updates are rejected after audio starts.""" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["audio_format", "target_streaming_delay_ms"]) + nullable_fields = set(["audio_format", "target_streaming_delay_ms"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/referencechunk.py b/src/mistralai/client/models/referencechunk.py index 7634d8ae..e0bbae4e 100644 --- a/src/mistralai/client/models/referencechunk.py +++ b/src/mistralai/client/models/referencechunk.py @@ -2,20 +2,48 @@ # @generated-id: 921acd3a224a from __future__ import annotations -from mistralai.client.types import BaseModel +from mistralai.client.types import BaseModel, UNSET_SENTINEL +from mistralai.client.utils import validate_const +import pydantic +from pydantic import model_serializer +from pydantic.functional_validators import AfterValidator from typing import List, Literal, Optional -from typing_extensions import NotRequired, TypedDict - - -ReferenceChunkType = Literal["reference",] +from typing_extensions import Annotated, TypedDict class ReferenceChunkTypedDict(TypedDict): reference_ids: List[int] - type: NotRequired[ReferenceChunkType] + type: Literal["reference"] class ReferenceChunk(BaseModel): reference_ids: List[int] - type: Optional[ReferenceChunkType] = "reference" + type: Annotated[ + Annotated[ + Optional[Literal["reference"]], AfterValidator(validate_const("reference")) + ], + pydantic.Field(alias="type"), + ] = "reference" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["type"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m + + +try: + ReferenceChunk.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/reprocessdocumentop.py b/src/mistralai/client/models/reprocessdocumentop.py deleted file mode 100644 index 48a4b72b..00000000 --- a/src/mistralai/client/models/reprocessdocumentop.py +++ /dev/null @@ -1,22 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -# @generated-id: b2913a7aa5c9 - -from __future__ import annotations -from mistralai.client.types import BaseModel -from mistralai.client.utils import FieldMetadata, PathParamMetadata -from typing_extensions import Annotated, TypedDict - - -class ReprocessDocumentRequestTypedDict(TypedDict): - library_id: str - document_id: str - - -class ReprocessDocumentRequest(BaseModel): - library_id: Annotated[ - str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) - ] - - document_id: Annotated[ - str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) - ] diff --git a/src/mistralai/client/models/responsedoneevent.py b/src/mistralai/client/models/responsedoneevent.py index ed331ff1..be38fba8 100644 --- a/src/mistralai/client/models/responsedoneevent.py +++ b/src/mistralai/client/models/responsedoneevent.py @@ -4,9 +4,10 @@ from __future__ import annotations from .conversationusageinfo import ConversationUsageInfo, ConversationUsageInfoTypedDict from datetime import datetime -from mistralai.client.types import BaseModel +from mistralai.client.types import BaseModel, UNSET_SENTINEL from mistralai.client.utils import validate_const import pydantic +from pydantic import model_serializer from pydantic.functional_validators import AfterValidator from typing import Literal, Optional from typing_extensions import Annotated, NotRequired, TypedDict @@ -21,7 +22,7 @@ class ResponseDoneEventTypedDict(TypedDict): class ResponseDoneEvent(BaseModel): usage: ConversationUsageInfo - TYPE: Annotated[ + type: Annotated[ Annotated[ Literal["conversation.response.done"], AfterValidator(validate_const("conversation.response.done")), @@ -30,3 +31,25 @@ class ResponseDoneEvent(BaseModel): ] = "conversation.response.done" created_at: Optional[datetime] = None + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["created_at"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m + + +try: + ResponseDoneEvent.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/responseerrorevent.py b/src/mistralai/client/models/responseerrorevent.py index 8f196a52..fa4d0d01 100644 --- a/src/mistralai/client/models/responseerrorevent.py +++ b/src/mistralai/client/models/responseerrorevent.py @@ -3,9 +3,10 @@ from __future__ import annotations from datetime import datetime -from mistralai.client.types import BaseModel +from mistralai.client.types import BaseModel, UNSET_SENTINEL from mistralai.client.utils import validate_const import pydantic +from pydantic import model_serializer from pydantic.functional_validators import AfterValidator from typing import Literal, Optional from typing_extensions import Annotated, NotRequired, TypedDict @@ -23,7 +24,7 @@ class ResponseErrorEvent(BaseModel): code: int - TYPE: Annotated[ + type: Annotated[ Annotated[ Literal["conversation.response.error"], AfterValidator(validate_const("conversation.response.error")), @@ -32,3 +33,25 @@ class ResponseErrorEvent(BaseModel): ] = "conversation.response.error" created_at: Optional[datetime] = None + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["created_at"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m + + +try: + ResponseErrorEvent.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/responseformat.py b/src/mistralai/client/models/responseformat.py index 409b80d6..b2971412 100644 --- a/src/mistralai/client/models/responseformat.py +++ b/src/mistralai/client/models/responseformat.py @@ -32,30 +32,25 @@ class ResponseFormat(BaseModel): @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = ["type", "json_schema"] - nullable_fields = ["json_schema"] - null_default_fields = [] - + optional_fields = set(["type", "json_schema"]) + nullable_fields = set(["json_schema"]) serialized = handler(self) - m = {} for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val return m diff --git a/src/mistralai/client/models/responsestartedevent.py b/src/mistralai/client/models/responsestartedevent.py index 256d2a6c..84abfcd9 100644 --- a/src/mistralai/client/models/responsestartedevent.py +++ b/src/mistralai/client/models/responsestartedevent.py @@ -3,9 +3,10 @@ from __future__ import annotations from datetime import datetime -from mistralai.client.types import BaseModel +from mistralai.client.types import BaseModel, UNSET_SENTINEL from mistralai.client.utils import validate_const import pydantic +from pydantic import model_serializer from pydantic.functional_validators import AfterValidator from typing import Literal, Optional from typing_extensions import Annotated, NotRequired, TypedDict @@ -20,7 +21,7 @@ class ResponseStartedEventTypedDict(TypedDict): class ResponseStartedEvent(BaseModel): conversation_id: str - TYPE: Annotated[ + type: Annotated[ Annotated[ Literal["conversation.response.started"], AfterValidator(validate_const("conversation.response.started")), @@ -29,3 +30,25 @@ class ResponseStartedEvent(BaseModel): ] = "conversation.response.started" created_at: Optional[datetime] = None + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["created_at"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m + + +try: + ResponseStartedEvent.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/retrieve_model_v1_models_model_id_getop.py b/src/mistralai/client/models/retrieve_model_v1_models_model_id_getop.py new file mode 100644 index 00000000..cd5955c1 --- /dev/null +++ b/src/mistralai/client/models/retrieve_model_v1_models_model_id_getop.py @@ -0,0 +1,64 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 6fefa90ca351 + +from __future__ import annotations +from .basemodelcard import BaseModelCard, BaseModelCardTypedDict +from .ftmodelcard import FTModelCard, FTModelCardTypedDict +from functools import partial +from mistralai.client.types import BaseModel +from mistralai.client.utils import FieldMetadata, PathParamMetadata +from mistralai.client.utils.unions import parse_open_union +from pydantic import ConfigDict +from pydantic.functional_validators import BeforeValidator +from typing import Any, Literal, Union +from typing_extensions import Annotated, TypeAliasType, TypedDict + + +class RetrieveModelV1ModelsModelIDGetRequestTypedDict(TypedDict): + model_id: str + r"""The ID of the model to retrieve.""" + + +class RetrieveModelV1ModelsModelIDGetRequest(BaseModel): + model_id: Annotated[ + str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) + ] + r"""The ID of the model to retrieve.""" + + +ResponseRetrieveModelV1ModelsModelIDGetTypedDict = TypeAliasType( + "ResponseRetrieveModelV1ModelsModelIDGetTypedDict", + Union[BaseModelCardTypedDict, FTModelCardTypedDict], +) +r"""Successful Response""" + + +class UnknownResponseRetrieveModelV1ModelsModelIDGet(BaseModel): + r"""A ResponseRetrieveModelV1ModelsModelIDGet variant the SDK doesn't recognize. Preserves the raw payload.""" + + type: Literal["UNKNOWN"] = "UNKNOWN" + raw: Any + is_unknown: Literal[True] = True + + model_config = ConfigDict(frozen=True) + + +_RESPONSE_RETRIEVE_MODEL_V1_MODELS_MODEL_ID_GET_VARIANTS: dict[str, Any] = { + "base": BaseModelCard, + "fine-tuned": FTModelCard, +} + + +ResponseRetrieveModelV1ModelsModelIDGet = Annotated[ + Union[BaseModelCard, FTModelCard, UnknownResponseRetrieveModelV1ModelsModelIDGet], + BeforeValidator( + partial( + parse_open_union, + disc_key="type", + variants=_RESPONSE_RETRIEVE_MODEL_V1_MODELS_MODEL_ID_GET_VARIANTS, + unknown_cls=UnknownResponseRetrieveModelV1ModelsModelIDGet, + union_name="ResponseRetrieveModelV1ModelsModelIDGet", + ) + ), +] +r"""Successful Response""" diff --git a/src/mistralai/client/models/retrievemodelop.py b/src/mistralai/client/models/retrievemodelop.py deleted file mode 100644 index b4334e9a..00000000 --- a/src/mistralai/client/models/retrievemodelop.py +++ /dev/null @@ -1,36 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -# @generated-id: d883baa79c9e - -from __future__ import annotations -from .basemodelcard import BaseModelCard, BaseModelCardTypedDict -from .ftmodelcard import FTModelCard, FTModelCardTypedDict -from mistralai.client.types import BaseModel -from mistralai.client.utils import FieldMetadata, PathParamMetadata -from pydantic import Field -from typing import Union -from typing_extensions import Annotated, TypeAliasType, TypedDict - - -class RetrieveModelRequestTypedDict(TypedDict): - model_id: str - r"""The ID of the model to retrieve.""" - - -class RetrieveModelRequest(BaseModel): - model_id: Annotated[ - str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) - ] - r"""The ID of the model to retrieve.""" - - -ResponseRetrieveModelV1ModelsModelIDGetTypedDict = TypeAliasType( - "ResponseRetrieveModelV1ModelsModelIDGetTypedDict", - Union[BaseModelCardTypedDict, FTModelCardTypedDict], -) -r"""Successful Response""" - - -ResponseRetrieveModelV1ModelsModelIDGet = Annotated[ - Union[BaseModelCard, FTModelCard], Field(discriminator="TYPE") -] -r"""Successful Response""" diff --git a/src/mistralai/client/models/security.py b/src/mistralai/client/models/security.py index 4fa8b4b2..f3b3423e 100644 --- a/src/mistralai/client/models/security.py +++ b/src/mistralai/client/models/security.py @@ -2,8 +2,9 @@ # @generated-id: c2ca0e2a36b7 from __future__ import annotations -from mistralai.client.types import BaseModel +from mistralai.client.types import BaseModel, UNSET_SENTINEL from mistralai.client.utils import FieldMetadata, SecurityMetadata +from pydantic import model_serializer from typing import Optional from typing_extensions import Annotated, NotRequired, TypedDict @@ -24,3 +25,19 @@ class Security(BaseModel): ) ), ] = None + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["ApiKey"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m diff --git a/src/mistralai/client/models/sharingdelete.py b/src/mistralai/client/models/sharingdelete.py index 202732cf..33ccd7e7 100644 --- a/src/mistralai/client/models/sharingdelete.py +++ b/src/mistralai/client/models/sharingdelete.py @@ -33,30 +33,25 @@ class SharingDelete(BaseModel): @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = ["org_id"] - nullable_fields = ["org_id"] - null_default_fields = [] - + optional_fields = set(["org_id"]) + nullable_fields = set(["org_id"]) serialized = handler(self) - m = {} for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val return m diff --git a/src/mistralai/client/models/sharingin.py b/src/mistralai/client/models/sharingin.py index 8cc3e896..7c1a52b0 100644 --- a/src/mistralai/client/models/sharingin.py +++ b/src/mistralai/client/models/sharingin.py @@ -37,30 +37,25 @@ class SharingIn(BaseModel): @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = ["org_id"] - nullable_fields = ["org_id"] - null_default_fields = [] - + optional_fields = set(["org_id"]) + nullable_fields = set(["org_id"]) serialized = handler(self) - m = {} for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val return m diff --git a/src/mistralai/client/models/sharingout.py b/src/mistralai/client/models/sharingout.py index 77807154..ab3679a4 100644 --- a/src/mistralai/client/models/sharingout.py +++ b/src/mistralai/client/models/sharingout.py @@ -37,30 +37,25 @@ class SharingOut(BaseModel): @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = ["user_id"] - nullable_fields = ["user_id", "share_with_uuid"] - null_default_fields = [] - + optional_fields = set(["user_id"]) + nullable_fields = set(["user_id", "share_with_uuid"]) serialized = handler(self) - m = {} for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val return m diff --git a/src/mistralai/client/models/startfinetuningjobop.py b/src/mistralai/client/models/startfinetuningjobop.py deleted file mode 100644 index 805a8721..00000000 --- a/src/mistralai/client/models/startfinetuningjobop.py +++ /dev/null @@ -1,41 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -# @generated-id: 663886392468 - -from __future__ import annotations -from .classifierdetailedjobout import ( - ClassifierDetailedJobOut, - ClassifierDetailedJobOutTypedDict, -) -from .completiondetailedjobout import ( - CompletionDetailedJobOut, - CompletionDetailedJobOutTypedDict, -) -from mistralai.client.types import BaseModel -from mistralai.client.utils import FieldMetadata, PathParamMetadata -from pydantic import Field -from typing import Union -from typing_extensions import Annotated, TypeAliasType, TypedDict - - -class StartFineTuningJobRequestTypedDict(TypedDict): - job_id: str - - -class StartFineTuningJobRequest(BaseModel): - job_id: Annotated[ - str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) - ] - - -StartFineTuningJobResponseTypedDict = TypeAliasType( - "StartFineTuningJobResponseTypedDict", - Union[CompletionDetailedJobOutTypedDict, ClassifierDetailedJobOutTypedDict], -) -r"""OK""" - - -StartFineTuningJobResponse = Annotated[ - Union[ClassifierDetailedJobOut, CompletionDetailedJobOut], - Field(discriminator="JOB_TYPE"), -] -r"""OK""" diff --git a/src/mistralai/client/models/systemmessage.py b/src/mistralai/client/models/systemmessage.py index 352eca76..2602cd2d 100644 --- a/src/mistralai/client/models/systemmessage.py +++ b/src/mistralai/client/models/systemmessage.py @@ -33,7 +33,13 @@ class SystemMessageTypedDict(TypedDict): class SystemMessage(BaseModel): content: SystemMessageContent - ROLE: Annotated[ + role: Annotated[ Annotated[Literal["system"], AfterValidator(validate_const("system"))], pydantic.Field(alias="role"), ] = "system" + + +try: + SystemMessage.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/textchunk.py b/src/mistralai/client/models/textchunk.py index c0584234..ac9f3137 100644 --- a/src/mistralai/client/models/textchunk.py +++ b/src/mistralai/client/models/textchunk.py @@ -2,20 +2,46 @@ # @generated-id: 9c96fb86a9ab from __future__ import annotations -from mistralai.client.types import BaseModel +from mistralai.client.types import BaseModel, UNSET_SENTINEL +from mistralai.client.utils import validate_const +import pydantic +from pydantic import model_serializer +from pydantic.functional_validators import AfterValidator from typing import Literal, Optional -from typing_extensions import NotRequired, TypedDict - - -TextChunkType = Literal["text",] +from typing_extensions import Annotated, TypedDict class TextChunkTypedDict(TypedDict): text: str - type: NotRequired[TextChunkType] + type: Literal["text"] class TextChunk(BaseModel): text: str - type: Optional[TextChunkType] = "text" + type: Annotated[ + Annotated[Optional[Literal["text"]], AfterValidator(validate_const("text"))], + pydantic.Field(alias="type"), + ] = "text" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["type"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m + + +try: + TextChunk.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/thinkchunk.py b/src/mistralai/client/models/thinkchunk.py index a999f5d7..5995e601 100644 --- a/src/mistralai/client/models/thinkchunk.py +++ b/src/mistralai/client/models/thinkchunk.py @@ -4,33 +4,61 @@ from __future__ import annotations from .referencechunk import ReferenceChunk, ReferenceChunkTypedDict from .textchunk import TextChunk, TextChunkTypedDict -from mistralai.client.types import BaseModel +from mistralai.client.types import BaseModel, UNSET_SENTINEL +from mistralai.client.utils import validate_const +import pydantic +from pydantic import model_serializer +from pydantic.functional_validators import AfterValidator from typing import List, Literal, Optional, Union -from typing_extensions import NotRequired, TypeAliasType, TypedDict +from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict -ThinkingTypedDict = TypeAliasType( - "ThinkingTypedDict", Union[ReferenceChunkTypedDict, TextChunkTypedDict] +ThinkChunkThinkingTypedDict = TypeAliasType( + "ThinkChunkThinkingTypedDict", Union[ReferenceChunkTypedDict, TextChunkTypedDict] ) -Thinking = TypeAliasType("Thinking", Union[ReferenceChunk, TextChunk]) - - -ThinkChunkType = Literal["thinking",] +ThinkChunkThinking = TypeAliasType( + "ThinkChunkThinking", Union[ReferenceChunk, TextChunk] +) class ThinkChunkTypedDict(TypedDict): - thinking: List[ThinkingTypedDict] + thinking: List[ThinkChunkThinkingTypedDict] + type: Literal["thinking"] closed: NotRequired[bool] r"""Whether the thinking chunk is closed or not. Currently only used for prefixing.""" - type: NotRequired[ThinkChunkType] class ThinkChunk(BaseModel): - thinking: List[Thinking] + thinking: List[ThinkChunkThinking] + + type: Annotated[ + Annotated[Literal["thinking"], AfterValidator(validate_const("thinking"))], + pydantic.Field(alias="type"), + ] = "thinking" closed: Optional[bool] = None r"""Whether the thinking chunk is closed or not. Currently only used for prefixing.""" - type: Optional[ThinkChunkType] = "thinking" + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["closed"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m + + +try: + ThinkChunk.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/tool.py b/src/mistralai/client/models/tool.py index a46d31f1..2b9965e5 100644 --- a/src/mistralai/client/models/tool.py +++ b/src/mistralai/client/models/tool.py @@ -4,7 +4,8 @@ from __future__ import annotations from .function import Function, FunctionTypedDict from .tooltypes import ToolTypes -from mistralai.client.types import BaseModel +from mistralai.client.types import BaseModel, UNSET_SENTINEL +from pydantic import model_serializer from typing import Optional from typing_extensions import NotRequired, TypedDict @@ -18,3 +19,19 @@ class Tool(BaseModel): function: Function type: Optional[ToolTypes] = None + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["type"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m diff --git a/src/mistralai/client/models/toolcall.py b/src/mistralai/client/models/toolcall.py index 4a05bbd0..181cec33 100644 --- a/src/mistralai/client/models/toolcall.py +++ b/src/mistralai/client/models/toolcall.py @@ -4,7 +4,8 @@ from __future__ import annotations from .functioncall import FunctionCall, FunctionCallTypedDict from .tooltypes import ToolTypes -from mistralai.client.types import BaseModel +from mistralai.client.types import BaseModel, UNSET_SENTINEL +from pydantic import model_serializer from typing import Optional from typing_extensions import NotRequired, TypedDict @@ -24,3 +25,19 @@ class ToolCall(BaseModel): type: Optional[ToolTypes] = None index: Optional[int] = 0 + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["id", "type", "index"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m diff --git a/src/mistralai/client/models/toolcallconfirmation.py b/src/mistralai/client/models/toolcallconfirmation.py new file mode 100644 index 00000000..fd6eca50 --- /dev/null +++ b/src/mistralai/client/models/toolcallconfirmation.py @@ -0,0 +1,24 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: f2e953cfb4fe + +from __future__ import annotations +from mistralai.client.types import BaseModel +from typing import Literal +from typing_extensions import TypedDict + + +Confirmation = Literal[ + "allow", + "deny", +] + + +class ToolCallConfirmationTypedDict(TypedDict): + tool_call_id: str + confirmation: Confirmation + + +class ToolCallConfirmation(BaseModel): + tool_call_id: str + + confirmation: Confirmation diff --git a/src/mistralai/client/models/toolchoice.py b/src/mistralai/client/models/toolchoice.py index aa2016fb..cb787df1 100644 --- a/src/mistralai/client/models/toolchoice.py +++ b/src/mistralai/client/models/toolchoice.py @@ -4,7 +4,8 @@ from __future__ import annotations from .functionname import FunctionName, FunctionNameTypedDict from .tooltypes import ToolTypes -from mistralai.client.types import BaseModel +from mistralai.client.types import BaseModel, UNSET_SENTINEL +from pydantic import model_serializer from typing import Optional from typing_extensions import NotRequired, TypedDict @@ -24,3 +25,19 @@ class ToolChoice(BaseModel): r"""this restriction of `Function` is used to select a specific function to call""" type: Optional[ToolTypes] = None + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["type"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m diff --git a/src/mistralai/client/models/toolconfiguration.py b/src/mistralai/client/models/toolconfiguration.py new file mode 100644 index 00000000..b903c8b6 --- /dev/null +++ b/src/mistralai/client/models/toolconfiguration.py @@ -0,0 +1,53 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: faec24b75066 + +from __future__ import annotations +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing import List +from typing_extensions import NotRequired, TypedDict + + +class ToolConfigurationTypedDict(TypedDict): + exclude: NotRequired[Nullable[List[str]]] + include: NotRequired[Nullable[List[str]]] + requires_confirmation: NotRequired[Nullable[List[str]]] + + +class ToolConfiguration(BaseModel): + exclude: OptionalNullable[List[str]] = UNSET + + include: OptionalNullable[List[str]] = UNSET + + requires_confirmation: OptionalNullable[List[str]] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["exclude", "include", "requires_confirmation"]) + nullable_fields = set(["exclude", "include", "requires_confirmation"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/toolexecutiondeltaevent.py b/src/mistralai/client/models/toolexecutiondeltaevent.py index 384ec240..5a977ca6 100644 --- a/src/mistralai/client/models/toolexecutiondeltaevent.py +++ b/src/mistralai/client/models/toolexecutiondeltaevent.py @@ -4,9 +4,10 @@ from __future__ import annotations from .builtinconnectors import BuiltInConnectors from datetime import datetime -from mistralai.client.types import BaseModel +from mistralai.client.types import BaseModel, UNSET_SENTINEL from mistralai.client.utils import validate_const import pydantic +from pydantic import model_serializer from pydantic.functional_validators import AfterValidator from typing import Literal, Optional, Union from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict @@ -38,7 +39,7 @@ class ToolExecutionDeltaEvent(BaseModel): arguments: str - TYPE: Annotated[ + type: Annotated[ Annotated[ Literal["tool.execution.delta"], AfterValidator(validate_const("tool.execution.delta")), @@ -49,3 +50,25 @@ class ToolExecutionDeltaEvent(BaseModel): created_at: Optional[datetime] = None output_index: Optional[int] = 0 + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["created_at", "output_index"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m + + +try: + ToolExecutionDeltaEvent.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/toolexecutiondoneevent.py b/src/mistralai/client/models/toolexecutiondoneevent.py index 56f28899..1c9b0ec9 100644 --- a/src/mistralai/client/models/toolexecutiondoneevent.py +++ b/src/mistralai/client/models/toolexecutiondoneevent.py @@ -4,9 +4,10 @@ from __future__ import annotations from .builtinconnectors import BuiltInConnectors from datetime import datetime -from mistralai.client.types import BaseModel +from mistralai.client.types import BaseModel, UNSET_SENTINEL from mistralai.client.utils import validate_const import pydantic +from pydantic import model_serializer from pydantic.functional_validators import AfterValidator from typing import Any, Dict, Literal, Optional, Union from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict @@ -36,7 +37,7 @@ class ToolExecutionDoneEvent(BaseModel): name: ToolExecutionDoneEventName - TYPE: Annotated[ + type: Annotated[ Annotated[ Literal["tool.execution.done"], AfterValidator(validate_const("tool.execution.done")), @@ -49,3 +50,25 @@ class ToolExecutionDoneEvent(BaseModel): output_index: Optional[int] = 0 info: Optional[Dict[str, Any]] = None + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["created_at", "output_index", "info"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m + + +try: + ToolExecutionDoneEvent.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/toolexecutionentry.py b/src/mistralai/client/models/toolexecutionentry.py index 158cbf06..0d6f2a13 100644 --- a/src/mistralai/client/models/toolexecutionentry.py +++ b/src/mistralai/client/models/toolexecutionentry.py @@ -11,15 +11,12 @@ UNSET, UNSET_SENTINEL, ) +from mistralai.client.utils import validate_const +import pydantic from pydantic import model_serializer +from pydantic.functional_validators import AfterValidator from typing import Any, Dict, Literal, Optional, Union -from typing_extensions import NotRequired, TypeAliasType, TypedDict - - -ToolExecutionEntryObject = Literal["entry",] - - -ToolExecutionEntryType = Literal["tool.execution",] +from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict ToolExecutionEntryNameTypedDict = TypeAliasType( @@ -35,10 +32,12 @@ class ToolExecutionEntryTypedDict(TypedDict): name: ToolExecutionEntryNameTypedDict arguments: str - object: NotRequired[ToolExecutionEntryObject] - type: NotRequired[ToolExecutionEntryType] + object: Literal["entry"] + type: Literal["tool.execution"] created_at: NotRequired[datetime] completed_at: NotRequired[Nullable[datetime]] + agent_id: NotRequired[Nullable[str]] + model: NotRequired[Nullable[str]] id: NotRequired[str] info: NotRequired[Dict[str, Any]] @@ -48,44 +47,69 @@ class ToolExecutionEntry(BaseModel): arguments: str - object: Optional[ToolExecutionEntryObject] = "entry" + object: Annotated[ + Annotated[Optional[Literal["entry"]], AfterValidator(validate_const("entry"))], + pydantic.Field(alias="object"), + ] = "entry" - type: Optional[ToolExecutionEntryType] = "tool.execution" + type: Annotated[ + Annotated[ + Optional[Literal["tool.execution"]], + AfterValidator(validate_const("tool.execution")), + ], + pydantic.Field(alias="type"), + ] = "tool.execution" created_at: Optional[datetime] = None completed_at: OptionalNullable[datetime] = UNSET + agent_id: OptionalNullable[str] = UNSET + + model: OptionalNullable[str] = UNSET + id: Optional[str] = None info: Optional[Dict[str, Any]] = None @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = ["object", "type", "created_at", "completed_at", "id", "info"] - nullable_fields = ["completed_at"] - null_default_fields = [] - + optional_fields = set( + [ + "object", + "type", + "created_at", + "completed_at", + "agent_id", + "model", + "id", + "info", + ] + ) + nullable_fields = set(["completed_at", "agent_id", "model"]) serialized = handler(self) - m = {} for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) - serialized.pop(k, None) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member + return m - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - return m +try: + ToolExecutionEntry.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/toolexecutionstartedevent.py b/src/mistralai/client/models/toolexecutionstartedevent.py index 15918669..21e5bfa8 100644 --- a/src/mistralai/client/models/toolexecutionstartedevent.py +++ b/src/mistralai/client/models/toolexecutionstartedevent.py @@ -4,9 +4,16 @@ from __future__ import annotations from .builtinconnectors import BuiltInConnectors from datetime import datetime -from mistralai.client.types import BaseModel +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) from mistralai.client.utils import validate_const import pydantic +from pydantic import model_serializer from pydantic.functional_validators import AfterValidator from typing import Literal, Optional, Union from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict @@ -29,6 +36,8 @@ class ToolExecutionStartedEventTypedDict(TypedDict): type: Literal["tool.execution.started"] created_at: NotRequired[datetime] output_index: NotRequired[int] + model: NotRequired[Nullable[str]] + agent_id: NotRequired[Nullable[str]] class ToolExecutionStartedEvent(BaseModel): @@ -38,7 +47,7 @@ class ToolExecutionStartedEvent(BaseModel): arguments: str - TYPE: Annotated[ + type: Annotated[ Annotated[ Literal["tool.execution.started"], AfterValidator(validate_const("tool.execution.started")), @@ -49,3 +58,38 @@ class ToolExecutionStartedEvent(BaseModel): created_at: Optional[datetime] = None output_index: Optional[int] = 0 + + model: OptionalNullable[str] = UNSET + + agent_id: OptionalNullable[str] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["created_at", "output_index", "model", "agent_id"]) + nullable_fields = set(["model", "agent_id"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m + + +try: + ToolExecutionStartedEvent.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/toolfilechunk.py b/src/mistralai/client/models/toolfilechunk.py index 6eebd562..0708b3ff 100644 --- a/src/mistralai/client/models/toolfilechunk.py +++ b/src/mistralai/client/models/toolfilechunk.py @@ -10,12 +10,12 @@ UNSET, UNSET_SENTINEL, ) +from mistralai.client.utils import validate_const +import pydantic from pydantic import model_serializer +from pydantic.functional_validators import AfterValidator from typing import Literal, Optional, Union -from typing_extensions import NotRequired, TypeAliasType, TypedDict - - -ToolFileChunkType = Literal["tool_file",] +from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict ToolFileChunkToolTypedDict = TypeAliasType( @@ -29,7 +29,7 @@ class ToolFileChunkTypedDict(TypedDict): tool: ToolFileChunkToolTypedDict file_id: str - type: NotRequired[ToolFileChunkType] + type: Literal["tool_file"] file_name: NotRequired[Nullable[str]] file_type: NotRequired[Nullable[str]] @@ -39,7 +39,12 @@ class ToolFileChunk(BaseModel): file_id: str - type: Optional[ToolFileChunkType] = "tool_file" + type: Annotated[ + Annotated[ + Optional[Literal["tool_file"]], AfterValidator(validate_const("tool_file")) + ], + pydantic.Field(alias="type"), + ] = "tool_file" file_name: OptionalNullable[str] = UNSET @@ -47,30 +52,31 @@ class ToolFileChunk(BaseModel): @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = ["type", "file_name", "file_type"] - nullable_fields = ["file_name", "file_type"] - null_default_fields = [] - + optional_fields = set(["type", "file_name", "file_type"]) + nullable_fields = set(["file_name", "file_type"]) serialized = handler(self) - m = {} for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) - serialized.pop(k, None) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member + return m - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - return m +try: + ToolFileChunk.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/toolmessage.py b/src/mistralai/client/models/toolmessage.py index b3e8ffd9..05a0ee63 100644 --- a/src/mistralai/client/models/toolmessage.py +++ b/src/mistralai/client/models/toolmessage.py @@ -28,49 +28,50 @@ class ToolMessageTypedDict(TypedDict): content: Nullable[ToolMessageContentTypedDict] + role: Literal["tool"] tool_call_id: NotRequired[Nullable[str]] name: NotRequired[Nullable[str]] - role: Literal["tool"] class ToolMessage(BaseModel): content: Nullable[ToolMessageContent] - tool_call_id: OptionalNullable[str] = UNSET - - name: OptionalNullable[str] = UNSET - - ROLE: Annotated[ + role: Annotated[ Annotated[Literal["tool"], AfterValidator(validate_const("tool"))], pydantic.Field(alias="role"), ] = "tool" + tool_call_id: OptionalNullable[str] = UNSET + + name: OptionalNullable[str] = UNSET + @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = ["tool_call_id", "name"] - nullable_fields = ["content", "tool_call_id", "name"] - null_default_fields = [] - + optional_fields = set(["tool_call_id", "name"]) + nullable_fields = set(["content", "tool_call_id", "name"]) serialized = handler(self) - m = {} for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) - serialized.pop(k, None) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member + return m - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - return m +try: + ToolMessage.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/toolreferencechunk.py b/src/mistralai/client/models/toolreferencechunk.py index 3c76c8c2..95454fe8 100644 --- a/src/mistralai/client/models/toolreferencechunk.py +++ b/src/mistralai/client/models/toolreferencechunk.py @@ -10,12 +10,12 @@ UNSET, UNSET_SENTINEL, ) +from mistralai.client.utils import validate_const +import pydantic from pydantic import model_serializer +from pydantic.functional_validators import AfterValidator from typing import Literal, Optional, Union -from typing_extensions import NotRequired, TypeAliasType, TypedDict - - -ToolReferenceChunkType = Literal["tool_reference",] +from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict ToolReferenceChunkToolTypedDict = TypeAliasType( @@ -31,7 +31,7 @@ class ToolReferenceChunkTypedDict(TypedDict): tool: ToolReferenceChunkToolTypedDict title: str - type: NotRequired[ToolReferenceChunkType] + type: Literal["tool_reference"] url: NotRequired[Nullable[str]] favicon: NotRequired[Nullable[str]] description: NotRequired[Nullable[str]] @@ -42,7 +42,13 @@ class ToolReferenceChunk(BaseModel): title: str - type: Optional[ToolReferenceChunkType] = "tool_reference" + type: Annotated[ + Annotated[ + Optional[Literal["tool_reference"]], + AfterValidator(validate_const("tool_reference")), + ], + pydantic.Field(alias="type"), + ] = "tool_reference" url: OptionalNullable[str] = UNSET @@ -52,30 +58,31 @@ class ToolReferenceChunk(BaseModel): @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = ["type", "url", "favicon", "description"] - nullable_fields = ["url", "favicon", "description"] - null_default_fields = [] - + optional_fields = set(["type", "url", "favicon", "description"]) + nullable_fields = set(["url", "favicon", "description"]) serialized = handler(self) - m = {} for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) - serialized.pop(k, None) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member + return m - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - return m +try: + ToolReferenceChunk.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/trainingfile.py b/src/mistralai/client/models/trainingfile.py index 1f710ff8..2faeda8b 100644 --- a/src/mistralai/client/models/trainingfile.py +++ b/src/mistralai/client/models/trainingfile.py @@ -2,7 +2,8 @@ # @generated-id: 2edf9bce227d from __future__ import annotations -from mistralai.client.types import BaseModel +from mistralai.client.types import BaseModel, UNSET_SENTINEL +from pydantic import model_serializer from typing import Optional from typing_extensions import NotRequired, TypedDict @@ -16,3 +17,19 @@ class TrainingFile(BaseModel): file_id: str weight: Optional[float] = 1 + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["weight"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m diff --git a/src/mistralai/client/models/transcriptionresponse.py b/src/mistralai/client/models/transcriptionresponse.py index 786863ec..70315463 100644 --- a/src/mistralai/client/models/transcriptionresponse.py +++ b/src/mistralai/client/models/transcriptionresponse.py @@ -48,32 +48,27 @@ def additional_properties(self, value): @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = ["segments"] - nullable_fields = ["language"] - null_default_fields = [] - + optional_fields = set(["segments"]) + nullable_fields = set(["language"]) serialized = handler(self) - m = {} for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val for k, v in serialized.items(): m[k] = v diff --git a/src/mistralai/client/models/transcriptionsegmentchunk.py b/src/mistralai/client/models/transcriptionsegmentchunk.py index c78bec30..b87bfc2f 100644 --- a/src/mistralai/client/models/transcriptionsegmentchunk.py +++ b/src/mistralai/client/models/transcriptionsegmentchunk.py @@ -9,22 +9,21 @@ UNSET, UNSET_SENTINEL, ) +from mistralai.client.utils import validate_const import pydantic from pydantic import ConfigDict, model_serializer +from pydantic.functional_validators import AfterValidator from typing import Any, Dict, Literal, Optional -from typing_extensions import NotRequired, TypedDict - - -TranscriptionSegmentChunkType = Literal["transcription_segment",] +from typing_extensions import Annotated, NotRequired, TypedDict class TranscriptionSegmentChunkTypedDict(TypedDict): text: str start: float end: float + type: Literal["transcription_segment"] score: NotRequired[Nullable[float]] speaker_id: NotRequired[Nullable[str]] - type: NotRequired[TranscriptionSegmentChunkType] class TranscriptionSegmentChunk(BaseModel): @@ -39,12 +38,18 @@ class TranscriptionSegmentChunk(BaseModel): end: float + type: Annotated[ + Annotated[ + Optional[Literal["transcription_segment"]], + AfterValidator(validate_const("transcription_segment")), + ], + pydantic.Field(alias="type"), + ] = "transcription_segment" + score: OptionalNullable[float] = UNSET speaker_id: OptionalNullable[str] = UNSET - type: Optional[TranscriptionSegmentChunkType] = "transcription_segment" - @property def additional_properties(self): return self.__pydantic_extra__ @@ -55,33 +60,34 @@ def additional_properties(self, value): @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = ["score", "speaker_id", "type"] - nullable_fields = ["score", "speaker_id"] - null_default_fields = [] - + optional_fields = set(["type", "score", "speaker_id"]) + nullable_fields = set(["score", "speaker_id"]) serialized = handler(self) - m = {} for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val for k, v in serialized.items(): m[k] = v return m + + +try: + TranscriptionSegmentChunk.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/transcriptionstreamdone.py b/src/mistralai/client/models/transcriptionstreamdone.py index b5740b3b..e3c50169 100644 --- a/src/mistralai/client/models/transcriptionstreamdone.py +++ b/src/mistralai/client/models/transcriptionstreamdone.py @@ -41,7 +41,7 @@ class TranscriptionStreamDone(BaseModel): segments: Optional[List[TranscriptionSegmentChunk]] = None - TYPE: Annotated[ + type: Annotated[ Annotated[ Literal["transcription.done"], AfterValidator(validate_const("transcription.done")), @@ -59,33 +59,34 @@ def additional_properties(self, value): @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = ["segments"] - nullable_fields = ["language"] - null_default_fields = [] - + optional_fields = set(["segments"]) + nullable_fields = set(["language"]) serialized = handler(self) - m = {} for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val for k, v in serialized.items(): m[k] = v return m + + +try: + TranscriptionStreamDone.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/transcriptionstreamevents.py b/src/mistralai/client/models/transcriptionstreamevents.py index 17161a17..073fd99a 100644 --- a/src/mistralai/client/models/transcriptionstreamevents.py +++ b/src/mistralai/client/models/transcriptionstreamevents.py @@ -19,9 +19,12 @@ TranscriptionStreamTextDelta, TranscriptionStreamTextDeltaTypedDict, ) +from functools import partial from mistralai.client.types import BaseModel -from pydantic import Field -from typing import Union +from mistralai.client.utils.unions import parse_open_union +from pydantic import ConfigDict +from pydantic.functional_validators import BeforeValidator +from typing import Any, Literal, Union from typing_extensions import Annotated, TypeAliasType, TypedDict @@ -36,14 +39,41 @@ ) +class UnknownTranscriptionStreamEventsData(BaseModel): + r"""A TranscriptionStreamEventsData variant the SDK doesn't recognize. Preserves the raw payload.""" + + type: Literal["UNKNOWN"] = "UNKNOWN" + raw: Any + is_unknown: Literal[True] = True + + model_config = ConfigDict(frozen=True) + + +_TRANSCRIPTION_STREAM_EVENTS_DATA_VARIANTS: dict[str, Any] = { + "transcription.done": TranscriptionStreamDone, + "transcription.language": TranscriptionStreamLanguage, + "transcription.segment": TranscriptionStreamSegmentDelta, + "transcription.text.delta": TranscriptionStreamTextDelta, +} + + TranscriptionStreamEventsData = Annotated[ Union[ TranscriptionStreamDone, TranscriptionStreamLanguage, TranscriptionStreamSegmentDelta, TranscriptionStreamTextDelta, + UnknownTranscriptionStreamEventsData, ], - Field(discriminator="TYPE"), + BeforeValidator( + partial( + parse_open_union, + disc_key="type", + variants=_TRANSCRIPTION_STREAM_EVENTS_DATA_VARIANTS, + unknown_cls=UnknownTranscriptionStreamEventsData, + union_name="TranscriptionStreamEventsData", + ) + ), ] diff --git a/src/mistralai/client/models/transcriptionstreamlanguage.py b/src/mistralai/client/models/transcriptionstreamlanguage.py index 67b3e979..b6c61906 100644 --- a/src/mistralai/client/models/transcriptionstreamlanguage.py +++ b/src/mistralai/client/models/transcriptionstreamlanguage.py @@ -24,7 +24,7 @@ class TranscriptionStreamLanguage(BaseModel): audio_language: str - TYPE: Annotated[ + type: Annotated[ Annotated[ Literal["transcription.language"], AfterValidator(validate_const("transcription.language")), @@ -39,3 +39,9 @@ def additional_properties(self): @additional_properties.setter def additional_properties(self, value): self.__pydantic_extra__ = value # pyright: ignore[reportIncompatibleVariableOverride] + + +try: + TranscriptionStreamLanguage.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/transcriptionstreamsegmentdelta.py b/src/mistralai/client/models/transcriptionstreamsegmentdelta.py index 8db5e736..32ef8f9b 100644 --- a/src/mistralai/client/models/transcriptionstreamsegmentdelta.py +++ b/src/mistralai/client/models/transcriptionstreamsegmentdelta.py @@ -21,8 +21,8 @@ class TranscriptionStreamSegmentDeltaTypedDict(TypedDict): text: str start: float end: float - speaker_id: NotRequired[Nullable[str]] type: Literal["transcription.segment"] + speaker_id: NotRequired[Nullable[str]] class TranscriptionStreamSegmentDelta(BaseModel): @@ -37,9 +37,7 @@ class TranscriptionStreamSegmentDelta(BaseModel): end: float - speaker_id: OptionalNullable[str] = UNSET - - TYPE: Annotated[ + type: Annotated[ Annotated[ Literal["transcription.segment"], AfterValidator(validate_const("transcription.segment")), @@ -47,6 +45,8 @@ class TranscriptionStreamSegmentDelta(BaseModel): pydantic.Field(alias="type"), ] = "transcription.segment" + speaker_id: OptionalNullable[str] = UNSET + @property def additional_properties(self): return self.__pydantic_extra__ @@ -57,33 +57,34 @@ def additional_properties(self, value): @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = ["speaker_id"] - nullable_fields = ["speaker_id"] - null_default_fields = [] - + optional_fields = set(["speaker_id"]) + nullable_fields = set(["speaker_id"]) serialized = handler(self) - m = {} for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val for k, v in serialized.items(): m[k] = v return m + + +try: + TranscriptionStreamSegmentDelta.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/transcriptionstreamtextdelta.py b/src/mistralai/client/models/transcriptionstreamtextdelta.py index 49338a08..42f0ffb7 100644 --- a/src/mistralai/client/models/transcriptionstreamtextdelta.py +++ b/src/mistralai/client/models/transcriptionstreamtextdelta.py @@ -24,7 +24,7 @@ class TranscriptionStreamTextDelta(BaseModel): text: str - TYPE: Annotated[ + type: Annotated[ Annotated[ Literal["transcription.text.delta"], AfterValidator(validate_const("transcription.text.delta")), @@ -39,3 +39,9 @@ def additional_properties(self): @additional_properties.setter def additional_properties(self, value): self.__pydantic_extra__ = value # pyright: ignore[reportIncompatibleVariableOverride] + + +try: + TranscriptionStreamTextDelta.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/unarchiveftmodelout.py b/src/mistralai/client/models/unarchiveftmodelout.py deleted file mode 100644 index 0249a69e..00000000 --- a/src/mistralai/client/models/unarchiveftmodelout.py +++ /dev/null @@ -1,27 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -# @generated-id: 9dbc3bfb71ed - -from __future__ import annotations -from mistralai.client.types import BaseModel -from mistralai.client.utils import validate_const -import pydantic -from pydantic.functional_validators import AfterValidator -from typing import Literal, Optional -from typing_extensions import Annotated, NotRequired, TypedDict - - -class UnarchiveFTModelOutTypedDict(TypedDict): - id: str - object: Literal["model"] - archived: NotRequired[bool] - - -class UnarchiveFTModelOut(BaseModel): - id: str - - OBJECT: Annotated[ - Annotated[Optional[Literal["model"]], AfterValidator(validate_const("model"))], - pydantic.Field(alias="object"), - ] = "model" - - archived: Optional[bool] = False diff --git a/src/mistralai/client/models/unarchivemodelresponse.py b/src/mistralai/client/models/unarchivemodelresponse.py new file mode 100644 index 00000000..5c75d30e --- /dev/null +++ b/src/mistralai/client/models/unarchivemodelresponse.py @@ -0,0 +1,50 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 22e2ccbb0c80 + +from __future__ import annotations +from mistralai.client.types import BaseModel, UNSET_SENTINEL +from mistralai.client.utils import validate_const +import pydantic +from pydantic import model_serializer +from pydantic.functional_validators import AfterValidator +from typing import Literal, Optional +from typing_extensions import Annotated, NotRequired, TypedDict + + +class UnarchiveModelResponseTypedDict(TypedDict): + id: str + object: Literal["model"] + archived: NotRequired[bool] + + +class UnarchiveModelResponse(BaseModel): + id: str + + object: Annotated[ + Annotated[Optional[Literal["model"]], AfterValidator(validate_const("model"))], + pydantic.Field(alias="object"), + ] = "model" + + archived: Optional[bool] = False + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["object", "archived"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + + if val != UNSET_SENTINEL: + if val is not None or k not in optional_fields: + m[k] = val + + return m + + +try: + UnarchiveModelResponse.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/agentupdaterequest.py b/src/mistralai/client/models/updateagentrequest.py similarity index 66% rename from src/mistralai/client/models/agentupdaterequest.py rename to src/mistralai/client/models/updateagentrequest.py index 96e209d4..b751ff74 100644 --- a/src/mistralai/client/models/agentupdaterequest.py +++ b/src/mistralai/client/models/updateagentrequest.py @@ -1,5 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -# @generated-id: 2d5a3a437819 +# @generated-id: 914b4b2be67a from __future__ import annotations from .codeinterpretertool import CodeInterpreterTool, CodeInterpreterToolTypedDict @@ -21,20 +21,20 @@ from typing_extensions import Annotated, NotRequired, TypeAliasType, TypedDict -AgentUpdateRequestToolTypedDict = TypeAliasType( - "AgentUpdateRequestToolTypedDict", +UpdateAgentRequestToolTypedDict = TypeAliasType( + "UpdateAgentRequestToolTypedDict", Union[ + FunctionToolTypedDict, WebSearchToolTypedDict, WebSearchPremiumToolTypedDict, CodeInterpreterToolTypedDict, ImageGenerationToolTypedDict, - FunctionToolTypedDict, DocumentLibraryToolTypedDict, ], ) -AgentUpdateRequestTool = Annotated[ +UpdateAgentRequestTool = Annotated[ Union[ CodeInterpreterTool, DocumentLibraryTool, @@ -43,14 +43,14 @@ WebSearchTool, WebSearchPremiumTool, ], - Field(discriminator="TYPE"), + Field(discriminator="type"), ] -class AgentUpdateRequestTypedDict(TypedDict): +class UpdateAgentRequestTypedDict(TypedDict): instructions: NotRequired[Nullable[str]] r"""Instruction prompt the model will follow during the conversation.""" - tools: NotRequired[List[AgentUpdateRequestToolTypedDict]] + tools: NotRequired[List[UpdateAgentRequestToolTypedDict]] r"""List of tools which are available to the model during the conversation.""" completion_args: NotRequired[CompletionArgsTypedDict] r"""White-listed arguments from the completion API""" @@ -63,11 +63,11 @@ class AgentUpdateRequestTypedDict(TypedDict): version_message: NotRequired[Nullable[str]] -class AgentUpdateRequest(BaseModel): +class UpdateAgentRequest(BaseModel): instructions: OptionalNullable[str] = UNSET r"""Instruction prompt the model will follow during the conversation.""" - tools: Optional[List[AgentUpdateRequestTool]] = None + tools: Optional[List[UpdateAgentRequestTool]] = None r"""List of tools which are available to the model during the conversation.""" completion_args: Optional[CompletionArgs] = None @@ -89,50 +89,49 @@ class AgentUpdateRequest(BaseModel): @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = [ - "instructions", - "tools", - "completion_args", - "model", - "name", - "description", - "handoffs", - "deployment_chat", - "metadata", - "version_message", - ] - nullable_fields = [ - "instructions", - "model", - "name", - "description", - "handoffs", - "deployment_chat", - "metadata", - "version_message", - ] - null_default_fields = [] - + optional_fields = set( + [ + "instructions", + "tools", + "completion_args", + "model", + "name", + "description", + "handoffs", + "deployment_chat", + "metadata", + "version_message", + ] + ) + nullable_fields = set( + [ + "instructions", + "model", + "name", + "description", + "handoffs", + "deployment_chat", + "metadata", + "version_message", + ] + ) serialized = handler(self) - m = {} for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val return m diff --git a/src/mistralai/client/models/documentupdatein.py b/src/mistralai/client/models/updatedocumentrequest.py similarity index 60% rename from src/mistralai/client/models/documentupdatein.py rename to src/mistralai/client/models/updatedocumentrequest.py index 669554de..61e69655 100644 --- a/src/mistralai/client/models/documentupdatein.py +++ b/src/mistralai/client/models/updatedocumentrequest.py @@ -1,5 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -# @generated-id: d19c1b26a875 +# @generated-id: a8cfda07d337 from __future__ import annotations from datetime import datetime @@ -31,42 +31,37 @@ ) -class DocumentUpdateInTypedDict(TypedDict): +class UpdateDocumentRequestTypedDict(TypedDict): name: NotRequired[Nullable[str]] attributes: NotRequired[Nullable[Dict[str, AttributesTypedDict]]] -class DocumentUpdateIn(BaseModel): +class UpdateDocumentRequest(BaseModel): name: OptionalNullable[str] = UNSET attributes: OptionalNullable[Dict[str, Attributes]] = UNSET @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = ["name", "attributes"] - nullable_fields = ["name", "attributes"] - null_default_fields = [] - + optional_fields = set(["name", "attributes"]) + nullable_fields = set(["name", "attributes"]) serialized = handler(self) - m = {} for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val return m diff --git a/src/mistralai/client/models/updateftmodelin.py b/src/mistralai/client/models/updateftmodelin.py deleted file mode 100644 index 4ac5a8a2..00000000 --- a/src/mistralai/client/models/updateftmodelin.py +++ /dev/null @@ -1,54 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -# @generated-id: 39e2d678e651 - -from __future__ import annotations -from mistralai.client.types import ( - BaseModel, - Nullable, - OptionalNullable, - UNSET, - UNSET_SENTINEL, -) -from pydantic import model_serializer -from typing_extensions import NotRequired, TypedDict - - -class UpdateFTModelInTypedDict(TypedDict): - name: NotRequired[Nullable[str]] - description: NotRequired[Nullable[str]] - - -class UpdateFTModelIn(BaseModel): - name: OptionalNullable[str] = UNSET - - description: OptionalNullable[str] = UNSET - - @model_serializer(mode="wrap") - def serialize_model(self, handler): - optional_fields = ["name", "description"] - nullable_fields = ["name", "description"] - null_default_fields = [] - - serialized = handler(self) - - m = {} - - for n, f in type(self).model_fields.items(): - k = f.alias or n - val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - - return m diff --git a/src/mistralai/client/models/updatelibraryrequest.py b/src/mistralai/client/models/updatelibraryrequest.py new file mode 100644 index 00000000..91cbf2a1 --- /dev/null +++ b/src/mistralai/client/models/updatelibraryrequest.py @@ -0,0 +1,49 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 51bc63885337 + +from __future__ import annotations +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing_extensions import NotRequired, TypedDict + + +class UpdateLibraryRequestTypedDict(TypedDict): + name: NotRequired[Nullable[str]] + description: NotRequired[Nullable[str]] + + +class UpdateLibraryRequest(BaseModel): + name: OptionalNullable[str] = UNSET + + description: OptionalNullable[str] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["name", "description"]) + nullable_fields = set(["name", "description"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/updatemodelop.py b/src/mistralai/client/models/updatemodelop.py deleted file mode 100644 index 023be979..00000000 --- a/src/mistralai/client/models/updatemodelop.py +++ /dev/null @@ -1,43 +0,0 @@ -"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -# @generated-id: ba149ecfe03e - -from __future__ import annotations -from .classifierftmodelout import ClassifierFTModelOut, ClassifierFTModelOutTypedDict -from .completionftmodelout import CompletionFTModelOut, CompletionFTModelOutTypedDict -from .updateftmodelin import UpdateFTModelIn, UpdateFTModelInTypedDict -from mistralai.client.types import BaseModel -from mistralai.client.utils import FieldMetadata, PathParamMetadata, RequestMetadata -from pydantic import Field -from typing import Union -from typing_extensions import Annotated, TypeAliasType, TypedDict - - -class UpdateModelRequestTypedDict(TypedDict): - model_id: str - r"""The ID of the model to update.""" - update_ft_model_in: UpdateFTModelInTypedDict - - -class UpdateModelRequest(BaseModel): - model_id: Annotated[ - str, FieldMetadata(path=PathParamMetadata(style="simple", explode=False)) - ] - r"""The ID of the model to update.""" - - update_ft_model_in: Annotated[ - UpdateFTModelIn, - FieldMetadata(request=RequestMetadata(media_type="application/json")), - ] - - -UpdateModelResponseTypedDict = TypeAliasType( - "UpdateModelResponseTypedDict", - Union[CompletionFTModelOutTypedDict, ClassifierFTModelOutTypedDict], -) -r"""OK""" - - -UpdateModelResponse = Annotated[ - Union[ClassifierFTModelOut, CompletionFTModelOut], Field(discriminator="MODEL_TYPE") -] -r"""OK""" diff --git a/src/mistralai/client/models/updatemodelrequest.py b/src/mistralai/client/models/updatemodelrequest.py new file mode 100644 index 00000000..f685cfcc --- /dev/null +++ b/src/mistralai/client/models/updatemodelrequest.py @@ -0,0 +1,49 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: fe649967751e + +from __future__ import annotations +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) +from pydantic import model_serializer +from typing_extensions import NotRequired, TypedDict + + +class UpdateModelRequestTypedDict(TypedDict): + name: NotRequired[Nullable[str]] + description: NotRequired[Nullable[str]] + + +class UpdateModelRequest(BaseModel): + name: OptionalNullable[str] = UNSET + + description: OptionalNullable[str] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["name", "description"]) + nullable_fields = set(["name", "description"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m diff --git a/src/mistralai/client/models/usageinfo.py b/src/mistralai/client/models/usageinfo.py index e78f92e7..31cbf07e 100644 --- a/src/mistralai/client/models/usageinfo.py +++ b/src/mistralai/client/models/usageinfo.py @@ -46,37 +46,34 @@ def additional_properties(self, value): @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = [ - "prompt_tokens", - "completion_tokens", - "total_tokens", - "prompt_audio_seconds", - ] - nullable_fields = ["prompt_audio_seconds"] - null_default_fields = [] - + optional_fields = set( + [ + "prompt_tokens", + "completion_tokens", + "total_tokens", + "prompt_audio_seconds", + ] + ) + nullable_fields = set(["prompt_audio_seconds"]) serialized = handler(self) - m = {} for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val for k, v in serialized.items(): m[k] = v diff --git a/src/mistralai/client/models/usermessage.py b/src/mistralai/client/models/usermessage.py index 25ccdf80..63e76792 100644 --- a/src/mistralai/client/models/usermessage.py +++ b/src/mistralai/client/models/usermessage.py @@ -28,37 +28,27 @@ class UserMessageTypedDict(TypedDict): class UserMessage(BaseModel): content: Nullable[UserMessageContent] - ROLE: Annotated[ + role: Annotated[ Annotated[Literal["user"], AfterValidator(validate_const("user"))], pydantic.Field(alias="role"), ] = "user" @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = [] - nullable_fields = ["content"] - null_default_fields = [] - serialized = handler(self) - m = {} for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) - serialized.pop(k, None) - - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): + if val != UNSET_SENTINEL: m[k] = val return m + + +try: + UserMessage.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/wandbintegration.py b/src/mistralai/client/models/wandbintegration.py index c5db4a6d..f0df2c77 100644 --- a/src/mistralai/client/models/wandbintegration.py +++ b/src/mistralai/client/models/wandbintegration.py @@ -35,7 +35,7 @@ class WandbIntegration(BaseModel): api_key: str r"""The WandB API key to use for authentication.""" - TYPE: Annotated[ + type: Annotated[ Annotated[Literal["wandb"], AfterValidator(validate_const("wandb"))], pydantic.Field(alias="type"), ] = "wandb" @@ -47,30 +47,31 @@ class WandbIntegration(BaseModel): @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = ["name", "run_name"] - nullable_fields = ["name", "run_name"] - null_default_fields = [] - + optional_fields = set(["name", "run_name"]) + nullable_fields = set(["name", "run_name"]) serialized = handler(self) - m = {} for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) - serialized.pop(k, None) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member + return m - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - return m +try: + WandbIntegration.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/wandbintegrationout.py b/src/mistralai/client/models/wandbintegrationresult.py similarity index 65% rename from src/mistralai/client/models/wandbintegrationout.py rename to src/mistralai/client/models/wandbintegrationresult.py index d0a09bf4..575cbd42 100644 --- a/src/mistralai/client/models/wandbintegrationout.py +++ b/src/mistralai/client/models/wandbintegrationresult.py @@ -1,5 +1,5 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" -# @generated-id: 6b103d74195c +# @generated-id: 8787b4ad5458 from __future__ import annotations from mistralai.client.types import ( @@ -17,7 +17,7 @@ from typing_extensions import Annotated, NotRequired, TypedDict -class WandbIntegrationOutTypedDict(TypedDict): +class WandbIntegrationResultTypedDict(TypedDict): project: str r"""The name of the project that the new run will be created under.""" type: Literal["wandb"] @@ -27,11 +27,11 @@ class WandbIntegrationOutTypedDict(TypedDict): url: NotRequired[Nullable[str]] -class WandbIntegrationOut(BaseModel): +class WandbIntegrationResult(BaseModel): project: str r"""The name of the project that the new run will be created under.""" - TYPE: Annotated[ + type: Annotated[ Annotated[Literal["wandb"], AfterValidator(validate_const("wandb"))], pydantic.Field(alias="type"), ] = "wandb" @@ -45,30 +45,31 @@ class WandbIntegrationOut(BaseModel): @model_serializer(mode="wrap") def serialize_model(self, handler): - optional_fields = ["name", "run_name", "url"] - nullable_fields = ["name", "run_name", "url"] - null_default_fields = [] - + optional_fields = set(["name", "run_name", "url"]) + nullable_fields = set(["name", "run_name", "url"]) serialized = handler(self) - m = {} for n, f in type(self).model_fields.items(): k = f.alias or n val = serialized.get(k) - serialized.pop(k, None) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val - optional_nullable = k in optional_fields and k in nullable_fields - is_set = ( - self.__pydantic_fields_set__.intersection({n}) - or k in null_default_fields - ) # pylint: disable=no-member + return m - if val is not None and val != UNSET_SENTINEL: - m[k] = val - elif val != UNSET_SENTINEL and ( - not k in optional_fields or (optional_nullable and is_set) - ): - m[k] = val - return m +try: + WandbIntegrationResult.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/websearchpremiumtool.py b/src/mistralai/client/models/websearchpremiumtool.py index 9588ab1d..00d4a4b4 100644 --- a/src/mistralai/client/models/websearchpremiumtool.py +++ b/src/mistralai/client/models/websearchpremiumtool.py @@ -2,23 +2,65 @@ # @generated-id: bfe88af887e3 from __future__ import annotations -from mistralai.client.types import BaseModel +from .toolconfiguration import ToolConfiguration, ToolConfigurationTypedDict +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) from mistralai.client.utils import validate_const import pydantic +from pydantic import model_serializer from pydantic.functional_validators import AfterValidator from typing import Literal -from typing_extensions import Annotated, TypedDict +from typing_extensions import Annotated, NotRequired, TypedDict class WebSearchPremiumToolTypedDict(TypedDict): + tool_configuration: NotRequired[Nullable[ToolConfigurationTypedDict]] type: Literal["web_search_premium"] class WebSearchPremiumTool(BaseModel): - TYPE: Annotated[ + tool_configuration: OptionalNullable[ToolConfiguration] = UNSET + + type: Annotated[ Annotated[ Literal["web_search_premium"], AfterValidator(validate_const("web_search_premium")), ], pydantic.Field(alias="type"), ] = "web_search_premium" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["tool_configuration"]) + nullable_fields = set(["tool_configuration"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m + + +try: + WebSearchPremiumTool.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models/websearchtool.py b/src/mistralai/client/models/websearchtool.py index 27502909..6871080f 100644 --- a/src/mistralai/client/models/websearchtool.py +++ b/src/mistralai/client/models/websearchtool.py @@ -2,20 +2,62 @@ # @generated-id: 26b0903423e5 from __future__ import annotations -from mistralai.client.types import BaseModel +from .toolconfiguration import ToolConfiguration, ToolConfigurationTypedDict +from mistralai.client.types import ( + BaseModel, + Nullable, + OptionalNullable, + UNSET, + UNSET_SENTINEL, +) from mistralai.client.utils import validate_const import pydantic +from pydantic import model_serializer from pydantic.functional_validators import AfterValidator from typing import Literal -from typing_extensions import Annotated, TypedDict +from typing_extensions import Annotated, NotRequired, TypedDict class WebSearchToolTypedDict(TypedDict): + tool_configuration: NotRequired[Nullable[ToolConfigurationTypedDict]] type: Literal["web_search"] class WebSearchTool(BaseModel): - TYPE: Annotated[ + tool_configuration: OptionalNullable[ToolConfiguration] = UNSET + + type: Annotated[ Annotated[Literal["web_search"], AfterValidator(validate_const("web_search"))], pydantic.Field(alias="type"), ] = "web_search" + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = set(["tool_configuration"]) + nullable_fields = set(["tool_configuration"]) + serialized = handler(self) + m = {} + + for n, f in type(self).model_fields.items(): + k = f.alias or n + val = serialized.get(k) + is_nullable_and_explicitly_set = ( + k in nullable_fields + and (self.__pydantic_fields_set__.intersection({n})) # pylint: disable=no-member + ) + + if val != UNSET_SENTINEL: + if ( + val is not None + or k not in optional_fields + or is_nullable_and_explicitly_set + ): + m[k] = val + + return m + + +try: + WebSearchTool.model_rebuild() +except NameError: + pass diff --git a/src/mistralai/client/models_.py b/src/mistralai/client/models_.py index 05b33ac7..a287c413 100644 --- a/src/mistralai/client/models_.py +++ b/src/mistralai/client/models_.py @@ -2,7 +2,7 @@ # @generated-id: 1d277958a843 from .basesdk import BaseSDK -from mistralai.client import models, utils +from mistralai.client import errors, models, utils from mistralai.client._hooks import HookContext from mistralai.client.types import OptionalNullable, UNSET from mistralai.client.utils import get_security_from_env @@ -68,7 +68,7 @@ def list( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="ListModels", + operation_id="list_models_v1_models_get", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -83,12 +83,12 @@ def list( return unmarshal_json_response(models.ModelList, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) async def list_async( self, @@ -145,7 +145,7 @@ async def list_async( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="ListModels", + operation_id="list_models_v1_models_get", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -160,12 +160,12 @@ async def list_async( return unmarshal_json_response(models.ModelList, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) def retrieve( self, @@ -196,7 +196,7 @@ def retrieve( else: base_url = self._get_url(base_url, url_variables) - request = models.RetrieveModelRequest( + request = models.RetrieveModelV1ModelsModelIDGetRequest( model_id=model_id, ) @@ -229,7 +229,7 @@ def retrieve( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="RetrieveModel", + operation_id="retrieve_model_v1_models__model_id__get", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -247,17 +247,17 @@ def retrieve( ) if utils.match_response(http_res, "422", "application/json"): response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(response_data, http_res) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) async def retrieve_async( self, @@ -288,7 +288,7 @@ async def retrieve_async( else: base_url = self._get_url(base_url, url_variables) - request = models.RetrieveModelRequest( + request = models.RetrieveModelV1ModelsModelIDGetRequest( model_id=model_id, ) @@ -321,7 +321,7 @@ async def retrieve_async( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="RetrieveModel", + operation_id="retrieve_model_v1_models__model_id__get", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -339,17 +339,17 @@ async def retrieve_async( ) if utils.match_response(http_res, "422", "application/json"): response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(response_data, http_res) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) def delete( self, @@ -380,7 +380,7 @@ def delete( else: base_url = self._get_url(base_url, url_variables) - request = models.DeleteModelRequest( + request = models.DeleteModelV1ModelsModelIDDeleteRequest( model_id=model_id, ) @@ -413,7 +413,7 @@ def delete( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="DeleteModel", + operation_id="delete_model_v1_models__model_id__delete", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -429,17 +429,17 @@ def delete( return unmarshal_json_response(models.DeleteModelOut, http_res) if utils.match_response(http_res, "422", "application/json"): response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(response_data, http_res) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) async def delete_async( self, @@ -470,7 +470,7 @@ async def delete_async( else: base_url = self._get_url(base_url, url_variables) - request = models.DeleteModelRequest( + request = models.DeleteModelV1ModelsModelIDDeleteRequest( model_id=model_id, ) @@ -503,7 +503,7 @@ async def delete_async( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="DeleteModel", + operation_id="delete_model_v1_models__model_id__delete", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -519,17 +519,17 @@ async def delete_async( return unmarshal_json_response(models.DeleteModelOut, http_res) if utils.match_response(http_res, "422", "application/json"): response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(response_data, http_res) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) def update( self, @@ -541,7 +541,7 @@ def update( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> models.UpdateModelResponse: + ) -> models.JobsAPIRoutesFineTuningUpdateFineTunedModelResponse: r"""Update Fine Tuned Model Update a model name or description. @@ -564,9 +564,9 @@ def update( else: base_url = self._get_url(base_url, url_variables) - request = models.UpdateModelRequest( + request = models.JobsAPIRoutesFineTuningUpdateFineTunedModelRequest( model_id=model_id, - update_ft_model_in=models.UpdateFTModelIn( + update_model_request=models.UpdateModelRequest( name=name, description=description, ), @@ -586,7 +586,11 @@ def update( http_headers=http_headers, security=self.sdk_configuration.security, get_serialized_body=lambda: utils.serialize_request_body( - request.update_ft_model_in, False, False, "json", models.UpdateFTModelIn + request.update_model_request, + False, + False, + "json", + models.UpdateModelRequest, ), allow_empty_value=None, timeout_ms=timeout_ms, @@ -604,7 +608,7 @@ def update( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="UpdateModel", + operation_id="jobs_api_routes_fine_tuning_update_fine_tuned_model", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -616,15 +620,17 @@ def update( ) if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.UpdateModelResponse, http_res) + return unmarshal_json_response( + models.JobsAPIRoutesFineTuningUpdateFineTunedModelResponse, http_res + ) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) async def update_async( self, @@ -636,7 +642,7 @@ async def update_async( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> models.UpdateModelResponse: + ) -> models.JobsAPIRoutesFineTuningUpdateFineTunedModelResponse: r"""Update Fine Tuned Model Update a model name or description. @@ -659,9 +665,9 @@ async def update_async( else: base_url = self._get_url(base_url, url_variables) - request = models.UpdateModelRequest( + request = models.JobsAPIRoutesFineTuningUpdateFineTunedModelRequest( model_id=model_id, - update_ft_model_in=models.UpdateFTModelIn( + update_model_request=models.UpdateModelRequest( name=name, description=description, ), @@ -681,7 +687,11 @@ async def update_async( http_headers=http_headers, security=self.sdk_configuration.security, get_serialized_body=lambda: utils.serialize_request_body( - request.update_ft_model_in, False, False, "json", models.UpdateFTModelIn + request.update_model_request, + False, + False, + "json", + models.UpdateModelRequest, ), allow_empty_value=None, timeout_ms=timeout_ms, @@ -699,7 +709,7 @@ async def update_async( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="UpdateModel", + operation_id="jobs_api_routes_fine_tuning_update_fine_tuned_model", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -711,15 +721,17 @@ async def update_async( ) if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.UpdateModelResponse, http_res) + return unmarshal_json_response( + models.JobsAPIRoutesFineTuningUpdateFineTunedModelResponse, http_res + ) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) def archive( self, @@ -729,7 +741,7 @@ def archive( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> models.ArchiveFTModelOut: + ) -> models.ArchiveModelResponse: r"""Archive Fine Tuned Model Archive a fine-tuned model. @@ -750,7 +762,7 @@ def archive( else: base_url = self._get_url(base_url, url_variables) - request = models.ArchiveModelRequest( + request = models.JobsAPIRoutesFineTuningArchiveFineTunedModelRequest( model_id=model_id, ) @@ -783,7 +795,7 @@ def archive( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="ArchiveModel", + operation_id="jobs_api_routes_fine_tuning_archive_fine_tuned_model", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -795,15 +807,15 @@ def archive( ) if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.ArchiveFTModelOut, http_res) + return unmarshal_json_response(models.ArchiveModelResponse, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) async def archive_async( self, @@ -813,7 +825,7 @@ async def archive_async( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> models.ArchiveFTModelOut: + ) -> models.ArchiveModelResponse: r"""Archive Fine Tuned Model Archive a fine-tuned model. @@ -834,7 +846,7 @@ async def archive_async( else: base_url = self._get_url(base_url, url_variables) - request = models.ArchiveModelRequest( + request = models.JobsAPIRoutesFineTuningArchiveFineTunedModelRequest( model_id=model_id, ) @@ -867,7 +879,7 @@ async def archive_async( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="ArchiveModel", + operation_id="jobs_api_routes_fine_tuning_archive_fine_tuned_model", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -879,15 +891,15 @@ async def archive_async( ) if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.ArchiveFTModelOut, http_res) + return unmarshal_json_response(models.ArchiveModelResponse, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) def unarchive( self, @@ -897,7 +909,7 @@ def unarchive( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> models.UnarchiveFTModelOut: + ) -> models.UnarchiveModelResponse: r"""Unarchive Fine Tuned Model Un-archive a fine-tuned model. @@ -918,7 +930,7 @@ def unarchive( else: base_url = self._get_url(base_url, url_variables) - request = models.UnarchiveModelRequest( + request = models.JobsAPIRoutesFineTuningUnarchiveFineTunedModelRequest( model_id=model_id, ) @@ -951,7 +963,7 @@ def unarchive( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="UnarchiveModel", + operation_id="jobs_api_routes_fine_tuning_unarchive_fine_tuned_model", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -963,15 +975,15 @@ def unarchive( ) if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.UnarchiveFTModelOut, http_res) + return unmarshal_json_response(models.UnarchiveModelResponse, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) async def unarchive_async( self, @@ -981,7 +993,7 @@ async def unarchive_async( server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, - ) -> models.UnarchiveFTModelOut: + ) -> models.UnarchiveModelResponse: r"""Unarchive Fine Tuned Model Un-archive a fine-tuned model. @@ -1002,7 +1014,7 @@ async def unarchive_async( else: base_url = self._get_url(base_url, url_variables) - request = models.UnarchiveModelRequest( + request = models.JobsAPIRoutesFineTuningUnarchiveFineTunedModelRequest( model_id=model_id, ) @@ -1035,7 +1047,7 @@ async def unarchive_async( hook_ctx=HookContext( config=self.sdk_configuration, base_url=base_url or "", - operation_id="UnarchiveModel", + operation_id="jobs_api_routes_fine_tuning_unarchive_fine_tuned_model", oauth2_scopes=None, security_source=get_security_from_env( self.sdk_configuration.security, models.Security @@ -1047,12 +1059,12 @@ async def unarchive_async( ) if utils.match_response(http_res, "200", "application/json"): - return unmarshal_json_response(models.UnarchiveFTModelOut, http_res) + return unmarshal_json_response(models.UnarchiveModelResponse, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) diff --git a/src/mistralai/client/ocr.py b/src/mistralai/client/ocr.py index 2aa38229..a46119d1 100644 --- a/src/mistralai/client/ocr.py +++ b/src/mistralai/client/ocr.py @@ -2,12 +2,8 @@ # @generated-id: 2f804a12fc62 from .basesdk import BaseSDK -from mistralai.client import models, utils +from mistralai.client import errors, models, utils from mistralai.client._hooks import HookContext -from mistralai.client.models import ( - ocrrequest as models_ocrrequest, - responseformat as models_responseformat, -) from mistralai.client.types import Nullable, OptionalNullable, UNSET from mistralai.client.utils import get_security_from_env from mistralai.client.utils.unmarshal_json_response import unmarshal_json_response @@ -21,28 +17,20 @@ def process( self, *, model: Nullable[str], - document: Union[ - models_ocrrequest.Document, models_ocrrequest.DocumentTypedDict - ], + document: Union[models.DocumentUnion, models.DocumentUnionTypedDict], id: Optional[str] = None, pages: OptionalNullable[List[int]] = UNSET, include_image_base64: OptionalNullable[bool] = UNSET, image_limit: OptionalNullable[int] = UNSET, image_min_size: OptionalNullable[int] = UNSET, bbox_annotation_format: OptionalNullable[ - Union[ - models_responseformat.ResponseFormat, - models_responseformat.ResponseFormatTypedDict, - ] + Union[models.ResponseFormat, models.ResponseFormatTypedDict] ] = UNSET, document_annotation_format: OptionalNullable[ - Union[ - models_responseformat.ResponseFormat, - models_responseformat.ResponseFormatTypedDict, - ] + Union[models.ResponseFormat, models.ResponseFormatTypedDict] ] = UNSET, document_annotation_prompt: OptionalNullable[str] = UNSET, - table_format: OptionalNullable[models_ocrrequest.TableFormat] = UNSET, + table_format: OptionalNullable[models.TableFormat] = UNSET, extract_header: Optional[bool] = None, extract_footer: Optional[bool] = None, retries: OptionalNullable[utils.RetryConfig] = UNSET, @@ -83,7 +71,7 @@ def process( request = models.OCRRequest( model=model, id=id, - document=utils.get_pydantic_model(document, models.Document), + document=utils.get_pydantic_model(document, models.DocumentUnion), pages=pages, include_image_base64=include_image_base64, image_limit=image_limit, @@ -148,44 +136,36 @@ def process( return unmarshal_json_response(models.OCRResponse, http_res) if utils.match_response(http_res, "422", "application/json"): response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(response_data, http_res) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) async def process_async( self, *, model: Nullable[str], - document: Union[ - models_ocrrequest.Document, models_ocrrequest.DocumentTypedDict - ], + document: Union[models.DocumentUnion, models.DocumentUnionTypedDict], id: Optional[str] = None, pages: OptionalNullable[List[int]] = UNSET, include_image_base64: OptionalNullable[bool] = UNSET, image_limit: OptionalNullable[int] = UNSET, image_min_size: OptionalNullable[int] = UNSET, bbox_annotation_format: OptionalNullable[ - Union[ - models_responseformat.ResponseFormat, - models_responseformat.ResponseFormatTypedDict, - ] + Union[models.ResponseFormat, models.ResponseFormatTypedDict] ] = UNSET, document_annotation_format: OptionalNullable[ - Union[ - models_responseformat.ResponseFormat, - models_responseformat.ResponseFormatTypedDict, - ] + Union[models.ResponseFormat, models.ResponseFormatTypedDict] ] = UNSET, document_annotation_prompt: OptionalNullable[str] = UNSET, - table_format: OptionalNullable[models_ocrrequest.TableFormat] = UNSET, + table_format: OptionalNullable[models.TableFormat] = UNSET, extract_header: Optional[bool] = None, extract_footer: Optional[bool] = None, retries: OptionalNullable[utils.RetryConfig] = UNSET, @@ -226,7 +206,7 @@ async def process_async( request = models.OCRRequest( model=model, id=id, - document=utils.get_pydantic_model(document, models.Document), + document=utils.get_pydantic_model(document, models.DocumentUnion), pages=pages, include_image_base64=include_image_base64, image_limit=image_limit, @@ -291,14 +271,14 @@ async def process_async( return unmarshal_json_response(models.OCRResponse, http_res) if utils.match_response(http_res, "422", "application/json"): response_data = unmarshal_json_response( - models.HTTPValidationErrorData, http_res + errors.HTTPValidationErrorData, http_res ) - raise models.HTTPValidationError(response_data, http_res) + raise errors.HTTPValidationError(response_data, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) diff --git a/src/mistralai/client/sdk.py b/src/mistralai/client/sdk.py index b1ab5493..80bf25a7 100644 --- a/src/mistralai/client/sdk.py +++ b/src/mistralai/client/sdk.py @@ -8,7 +8,7 @@ from .utils.retries import RetryConfig import httpx import importlib -from mistralai.client import models, utils +from mistralai.client import models as models_, utils from mistralai.client._hooks import SDKHooks from mistralai.client.types import OptionalNullable, UNSET import sys @@ -31,10 +31,7 @@ class Mistral(BaseSDK): - r"""Mistral AI API: Dora OpenAPI schema - - Our Chat Completion and Embeddings APIs specification. Create your account on [La Plateforme](https://console.mistral.ai) to get access and read the [docs](https://docs.mistral.ai) to learn how to use it. - """ + r"""Mistral AI API: Our Chat Completion and Embeddings APIs specification. Create your account on [La Plateforme](https://console.mistral.ai) to get access and read the [docs](https://docs.mistral.ai) to learn how to use it.""" models: "Models" r"""Model Management API""" @@ -118,9 +115,9 @@ def __init__( security: Any = None if callable(api_key): # pylint: disable=unnecessary-lambda-assignment - security = lambda: models.Security(api_key=api_key()) + security = lambda: models_.Security(api_key=api_key()) else: - security = models.Security(api_key=api_key) + security = models_.Security(api_key=api_key) if server_url is not None: if url_params is not None: diff --git a/src/mistralai/client/transcriptions.py b/src/mistralai/client/transcriptions.py index f7ef5b0a..7f01917d 100644 --- a/src/mistralai/client/transcriptions.py +++ b/src/mistralai/client/transcriptions.py @@ -2,12 +2,8 @@ # @generated-id: 75b45780c978 from .basesdk import BaseSDK -from mistralai.client import models, utils +from mistralai.client import errors, models, utils from mistralai.client._hooks import HookContext -from mistralai.client.models import ( - file as models_file, - timestampgranularity as models_timestampgranularity, -) from mistralai.client.types import OptionalNullable, UNSET from mistralai.client.utils import eventstreaming, get_security_from_env from mistralai.client.utils.unmarshal_json_response import unmarshal_json_response @@ -21,16 +17,14 @@ def complete( self, *, model: str, - file: Optional[Union[models_file.File, models_file.FileTypedDict]] = None, + file: Optional[Union[models.File, models.FileTypedDict]] = None, file_url: OptionalNullable[str] = UNSET, file_id: OptionalNullable[str] = UNSET, language: OptionalNullable[str] = UNSET, temperature: OptionalNullable[float] = UNSET, diarize: Optional[bool] = False, context_bias: Optional[List[str]] = None, - timestamp_granularities: Optional[ - List[models_timestampgranularity.TimestampGranularity] - ] = None, + timestamp_granularities: Optional[List[models.TimestampGranularity]] = None, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -121,27 +115,25 @@ def complete( return unmarshal_json_response(models.TranscriptionResponse, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) async def complete_async( self, *, model: str, - file: Optional[Union[models_file.File, models_file.FileTypedDict]] = None, + file: Optional[Union[models.File, models.FileTypedDict]] = None, file_url: OptionalNullable[str] = UNSET, file_id: OptionalNullable[str] = UNSET, language: OptionalNullable[str] = UNSET, temperature: OptionalNullable[float] = UNSET, diarize: Optional[bool] = False, context_bias: Optional[List[str]] = None, - timestamp_granularities: Optional[ - List[models_timestampgranularity.TimestampGranularity] - ] = None, + timestamp_granularities: Optional[List[models.TimestampGranularity]] = None, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -232,27 +224,25 @@ async def complete_async( return unmarshal_json_response(models.TranscriptionResponse, http_res) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) - raise models.SDKError("Unexpected response received", http_res) + raise errors.SDKError("Unexpected response received", http_res) def stream( self, *, model: str, - file: Optional[Union[models_file.File, models_file.FileTypedDict]] = None, + file: Optional[Union[models.File, models.FileTypedDict]] = None, file_url: OptionalNullable[str] = UNSET, file_id: OptionalNullable[str] = UNSET, language: OptionalNullable[str] = UNSET, temperature: OptionalNullable[float] = UNSET, diarize: Optional[bool] = False, context_bias: Optional[List[str]] = None, - timestamp_granularities: Optional[ - List[models_timestampgranularity.TimestampGranularity] - ] = None, + timestamp_granularities: Optional[List[models.TimestampGranularity]] = None, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -352,28 +342,26 @@ def stream( ) if utils.match_response(http_res, "4XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) http_res_text = utils.stream_to_text(http_res) - raise models.SDKError("Unexpected response received", http_res, http_res_text) + raise errors.SDKError("Unexpected response received", http_res, http_res_text) async def stream_async( self, *, model: str, - file: Optional[Union[models_file.File, models_file.FileTypedDict]] = None, + file: Optional[Union[models.File, models.FileTypedDict]] = None, file_url: OptionalNullable[str] = UNSET, file_id: OptionalNullable[str] = UNSET, language: OptionalNullable[str] = UNSET, temperature: OptionalNullable[float] = UNSET, diarize: Optional[bool] = False, context_bias: Optional[List[str]] = None, - timestamp_granularities: Optional[ - List[models_timestampgranularity.TimestampGranularity] - ] = None, + timestamp_granularities: Optional[List[models.TimestampGranularity]] = None, retries: OptionalNullable[utils.RetryConfig] = UNSET, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, @@ -473,10 +461,10 @@ async def stream_async( ) if utils.match_response(http_res, "4XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) if utils.match_response(http_res, "5XX", "*"): http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("API error occurred", http_res, http_res_text) + raise errors.SDKError("API error occurred", http_res, http_res_text) http_res_text = await utils.stream_to_text_async(http_res) - raise models.SDKError("Unexpected response received", http_res, http_res_text) + raise errors.SDKError("Unexpected response received", http_res, http_res_text) diff --git a/src/mistralai/client/utils/__init__.py b/src/mistralai/client/utils/__init__.py index 7ed3a420..4bde281a 100644 --- a/src/mistralai/client/utils/__init__.py +++ b/src/mistralai/client/utils/__init__.py @@ -1,15 +1,24 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" # @generated-id: b69505f4b269 -from typing import TYPE_CHECKING -from importlib import import_module -import builtins -import sys +from typing import Any, TYPE_CHECKING, Callable, TypeVar +import asyncio + +from .dynamic_imports import lazy_getattr, lazy_dir + +_T = TypeVar("_T") + + +async def run_sync_in_thread(func: Callable[..., _T], *args) -> _T: + """Run a synchronous function in a thread pool to avoid blocking the event loop.""" + return await asyncio.to_thread(func, *args) + if TYPE_CHECKING: from .annotations import get_discriminator from .datetimes import parse_datetime from .enums import OpenEnumMeta + from .unions import parse_open_union from .headers import get_headers, get_response_headers from .metadata import ( FieldMetadata, @@ -79,6 +88,7 @@ "match_response", "MultipartFormMetadata", "OpenEnumMeta", + "parse_open_union", "PathParamMetadata", "QueryParamMetadata", "remove_suffix", @@ -132,6 +142,7 @@ "match_response": ".values", "MultipartFormMetadata": ".metadata", "OpenEnumMeta": ".enums", + "parse_open_union": ".unions", "PathParamMetadata": ".metadata", "QueryParamMetadata": ".metadata", "remove_suffix": ".url", @@ -161,38 +172,11 @@ } -def dynamic_import(modname, retries=3): - for attempt in range(retries): - try: - return import_module(modname, __package__) - except KeyError: - # Clear any half-initialized module and retry - sys.modules.pop(modname, None) - if attempt == retries - 1: - break - raise KeyError(f"Failed to import module '{modname}' after {retries} attempts") - - -def __getattr__(attr_name: str) -> object: - module_name = _dynamic_imports.get(attr_name) - if module_name is None: - raise AttributeError( - f"no {attr_name} found in _dynamic_imports, module name -> {__name__} " - ) - - try: - module = dynamic_import(module_name) - return getattr(module, attr_name) - except ImportError as e: - raise ImportError( - f"Failed to import {attr_name} from {module_name}: {e}" - ) from e - except AttributeError as e: - raise AttributeError( - f"Failed to get {attr_name} from {module_name}: {e}" - ) from e +def __getattr__(attr_name: str) -> Any: + return lazy_getattr( + attr_name, package=__package__, dynamic_imports=_dynamic_imports + ) def __dir__(): - lazy_attrs = builtins.list(_dynamic_imports.keys()) - return builtins.sorted(lazy_attrs) + return lazy_dir(dynamic_imports=_dynamic_imports) diff --git a/src/mistralai/client/utils/dynamic_imports.py b/src/mistralai/client/utils/dynamic_imports.py new file mode 100644 index 00000000..969f2fc7 --- /dev/null +++ b/src/mistralai/client/utils/dynamic_imports.py @@ -0,0 +1,55 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: ac9918d925c0 + +from importlib import import_module +import builtins +import sys + + +def dynamic_import(package, modname, retries=3): + """Import a module relative to package, retrying on KeyError from half-initialized modules.""" + for attempt in range(retries): + try: + return import_module(modname, package) + except KeyError: + sys.modules.pop(modname, None) + if attempt == retries - 1: + break + raise KeyError(f"Failed to import module '{modname}' after {retries} attempts") + + +def lazy_getattr(attr_name, *, package, dynamic_imports, sub_packages=None): + """Module-level __getattr__ that lazily loads from a dynamic_imports mapping. + + Args: + attr_name: The attribute being looked up. + package: The caller's __package__ (for relative imports). + dynamic_imports: Dict mapping attribute names to relative module paths. + sub_packages: Optional list of subpackage names to lazy-load. + """ + module_name = dynamic_imports.get(attr_name) + if module_name is not None: + try: + module = dynamic_import(package, module_name) + return getattr(module, attr_name) + except ImportError as e: + raise ImportError( + f"Failed to import {attr_name} from {module_name}: {e}" + ) from e + except AttributeError as e: + raise AttributeError( + f"Failed to get {attr_name} from {module_name}: {e}" + ) from e + + if sub_packages and attr_name in sub_packages: + return import_module(f".{attr_name}", package) + + raise AttributeError(f"module '{package}' has no attribute '{attr_name}'") + + +def lazy_dir(*, dynamic_imports, sub_packages=None): + """Module-level __dir__ that lists lazily-loadable attributes.""" + lazy_attrs = builtins.list(dynamic_imports.keys()) + if sub_packages: + lazy_attrs.extend(sub_packages) + return builtins.sorted(lazy_attrs) diff --git a/src/mistralai/client/utils/eventstreaming.py b/src/mistralai/client/utils/eventstreaming.py index 3fe3c7e1..19a12152 100644 --- a/src/mistralai/client/utils/eventstreaming.py +++ b/src/mistralai/client/utils/eventstreaming.py @@ -3,7 +3,9 @@ import re import json +from dataclasses import dataclass, asdict from typing import ( + Any, Callable, Generic, TypeVar, @@ -23,6 +25,7 @@ class EventStream(Generic[T]): client_ref: Optional[object] response: httpx.Response generator: Generator[T, None, None] + _closed: bool def __init__( self, @@ -34,17 +37,21 @@ def __init__( self.response = response self.generator = stream_events(response, decoder, sentinel) self.client_ref = client_ref + self._closed = False def __iter__(self): return self def __next__(self): + if self._closed: + raise StopIteration return next(self.generator) def __enter__(self): return self def __exit__(self, exc_type, exc_val, exc_tb): + self._closed = True self.response.close() @@ -54,6 +61,7 @@ class EventStreamAsync(Generic[T]): client_ref: Optional[object] response: httpx.Response generator: AsyncGenerator[T, None] + _closed: bool def __init__( self, @@ -65,33 +73,45 @@ def __init__( self.response = response self.generator = stream_events_async(response, decoder, sentinel) self.client_ref = client_ref + self._closed = False def __aiter__(self): return self async def __anext__(self): + if self._closed: + raise StopAsyncIteration return await self.generator.__anext__() async def __aenter__(self): return self async def __aexit__(self, exc_type, exc_val, exc_tb): + self._closed = True await self.response.aclose() +@dataclass class ServerEvent: id: Optional[str] = None event: Optional[str] = None - data: Optional[str] = None + data: Any = None retry: Optional[int] = None MESSAGE_BOUNDARIES = [ b"\r\n\r\n", - b"\n\n", + b"\r\n\r", + b"\r\n\n", + b"\r\r\n", + b"\n\r\n", b"\r\r", + b"\n\r", + b"\n\n", ] +UTF8_BOM = b"\xef\xbb\xbf" + async def stream_events_async( response: httpx.Response, @@ -100,14 +120,10 @@ async def stream_events_async( ) -> AsyncGenerator[T, None]: buffer = bytearray() position = 0 - discard = False + event_id: Optional[str] = None async for chunk in response.aiter_bytes(): - # We've encountered the sentinel value and should no longer process - # incoming data. Instead we throw new data away until the server closes - # the connection. - if discard: - continue - + if len(buffer) == 0 and chunk.startswith(UTF8_BOM): + chunk = chunk[len(UTF8_BOM) :] buffer += chunk for i in range(position, len(buffer)): char = buffer[i : i + 1] @@ -122,15 +138,22 @@ async def stream_events_async( block = buffer[position:i] position = i + len(seq) - event, discard = _parse_event(block, decoder, sentinel) + event, discard, event_id = _parse_event( + raw=block, decoder=decoder, sentinel=sentinel, event_id=event_id + ) if event is not None: yield event + if discard: + await response.aclose() + return if position > 0: buffer = buffer[position:] position = 0 - event, discard = _parse_event(buffer, decoder, sentinel) + event, discard, _ = _parse_event( + raw=buffer, decoder=decoder, sentinel=sentinel, event_id=event_id + ) if event is not None: yield event @@ -142,14 +165,10 @@ def stream_events( ) -> Generator[T, None, None]: buffer = bytearray() position = 0 - discard = False + event_id: Optional[str] = None for chunk in response.iter_bytes(): - # We've encountered the sentinel value and should no longer process - # incoming data. Instead we throw new data away until the server closes - # the connection. - if discard: - continue - + if len(buffer) == 0 and chunk.startswith(UTF8_BOM): + chunk = chunk[len(UTF8_BOM) :] buffer += chunk for i in range(position, len(buffer)): char = buffer[i : i + 1] @@ -164,22 +183,33 @@ def stream_events( block = buffer[position:i] position = i + len(seq) - event, discard = _parse_event(block, decoder, sentinel) + event, discard, event_id = _parse_event( + raw=block, decoder=decoder, sentinel=sentinel, event_id=event_id + ) if event is not None: yield event + if discard: + response.close() + return if position > 0: buffer = buffer[position:] position = 0 - event, discard = _parse_event(buffer, decoder, sentinel) + event, discard, _ = _parse_event( + raw=buffer, decoder=decoder, sentinel=sentinel, event_id=event_id + ) if event is not None: yield event def _parse_event( - raw: bytearray, decoder: Callable[[str], T], sentinel: Optional[str] = None -) -> Tuple[Optional[T], bool]: + *, + raw: bytearray, + decoder: Callable[[str], T], + sentinel: Optional[str] = None, + event_id: Optional[str] = None, +) -> Tuple[Optional[T], bool, Optional[str]]: block = raw.decode() lines = re.split(r"\r?\n|\r", block) publish = False @@ -190,13 +220,16 @@ def _parse_event( continue delim = line.find(":") - if delim <= 0: + if delim == 0: continue - field = line[0:delim] - value = line[delim + 1 :] if delim < len(line) - 1 else "" - if len(value) and value[0] == " ": - value = value[1:] + field = line + value = "" + if delim > 0: + field = line[0:delim] + value = line[delim + 1 :] if delim < len(line) - 1 else "" + if len(value) and value[0] == " ": + value = value[1:] if field == "event": event.event = value @@ -205,37 +238,36 @@ def _parse_event( data += value + "\n" publish = True elif field == "id": - event.id = value publish = True + if "\x00" not in value: + event_id = value elif field == "retry": - event.retry = int(value) if value.isdigit() else None + if value.isdigit(): + event.retry = int(value) publish = True + event.id = event_id + if sentinel and data == f"{sentinel}\n": - return None, True + return None, True, event_id if data: data = data[:-1] - event.data = data - - data_is_primitive = ( - data.isnumeric() or data == "true" or data == "false" or data == "null" - ) - data_is_json = ( - data.startswith("{") or data.startswith("[") or data.startswith('"') - ) - - if data_is_primitive or data_is_json: - try: - event.data = json.loads(data) - except Exception: - pass + try: + event.data = json.loads(data) + except json.JSONDecodeError: + event.data = data out = None if publish: - out = decoder(json.dumps(event.__dict__)) - - return out, False + out_dict = { + k: v + for k, v in asdict(event).items() + if v is not None or (k == "data" and data) + } + out = decoder(json.dumps(out_dict)) + + return out, False, event_id def _peek_sequence(position: int, buffer: bytearray, sequence: bytes): diff --git a/src/mistralai/client/utils/forms.py b/src/mistralai/client/utils/forms.py index 2b474b9a..6facec53 100644 --- a/src/mistralai/client/utils/forms.py +++ b/src/mistralai/client/utils/forms.py @@ -143,7 +143,7 @@ def serialize_multipart_form( if field_metadata.file: if isinstance(val, List): # Handle array of files - array_field_name = f_name + "[]" + array_field_name = f_name for file_obj in val: if not _is_set(file_obj): continue @@ -186,7 +186,7 @@ def serialize_multipart_form( continue values.append(_val_to_string(value)) - array_field_name = f_name + "[]" + array_field_name = f_name form[array_field_name] = values else: form[f_name] = _val_to_string(val) diff --git a/src/mistralai/client/utils/retries.py b/src/mistralai/client/utils/retries.py index 90c008b0..bea13041 100644 --- a/src/mistralai/client/utils/retries.py +++ b/src/mistralai/client/utils/retries.py @@ -145,12 +145,7 @@ def do_request() -> httpx.Response: if res.status_code == parsed_code: raise TemporaryError(res) - except httpx.ConnectError as exception: - if retries.config.retry_connection_errors: - raise - - raise PermanentError(exception) from exception - except httpx.TimeoutException as exception: + except (httpx.NetworkError, httpx.TimeoutException) as exception: if retries.config.retry_connection_errors: raise @@ -194,12 +189,7 @@ async def do_request() -> httpx.Response: if res.status_code == parsed_code: raise TemporaryError(res) - except httpx.ConnectError as exception: - if retries.config.retry_connection_errors: - raise - - raise PermanentError(exception) from exception - except httpx.TimeoutException as exception: + except (httpx.NetworkError, httpx.TimeoutException) as exception: if retries.config.retry_connection_errors: raise diff --git a/src/mistralai/client/utils/security.py b/src/mistralai/client/utils/security.py index 4c73806d..d8b9d8fe 100644 --- a/src/mistralai/client/utils/security.py +++ b/src/mistralai/client/utils/security.py @@ -154,6 +154,8 @@ def _parse_security_scheme_value( elif scheme_type == "http": if sub_type == "bearer": headers[header_name] = _apply_bearer(value) + elif sub_type == "basic": + headers[header_name] = value elif sub_type == "custom": return else: diff --git a/src/mistralai/client/utils/unions.py b/src/mistralai/client/utils/unions.py new file mode 100644 index 00000000..14ef1bd5 --- /dev/null +++ b/src/mistralai/client/utils/unions.py @@ -0,0 +1,33 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: d23713342634 + +from typing import Any + +from pydantic import BaseModel, TypeAdapter + + +def parse_open_union( + v: Any, + *, + disc_key: str, + variants: dict[str, Any], + unknown_cls: type, + union_name: str, +) -> Any: + """Parse an open discriminated union value with forward-compatibility. + + Known discriminator values are dispatched to their variant types. + Unknown discriminator values produce an instance of the fallback class, + preserving the raw payload for inspection. + """ + if isinstance(v, BaseModel): + return v + if not isinstance(v, dict) or disc_key not in v: + raise ValueError(f"{union_name}: expected object with '{disc_key}' field") + disc = v[disc_key] + variant_cls = variants.get(disc) + if variant_cls is not None: + if isinstance(variant_cls, type) and issubclass(variant_cls, BaseModel): + return variant_cls.model_validate(v) + return TypeAdapter(variant_cls).validate_python(v) + return unknown_cls(raw=v) diff --git a/src/mistralai/client/utils/unmarshal_json_response.py b/src/mistralai/client/utils/unmarshal_json_response.py index 65190e5c..624433c4 100644 --- a/src/mistralai/client/utils/unmarshal_json_response.py +++ b/src/mistralai/client/utils/unmarshal_json_response.py @@ -6,7 +6,7 @@ import httpx from .serializers import unmarshal_json -from mistralai.client import models +from mistralai.client import errors T = TypeVar("T") @@ -31,7 +31,7 @@ def unmarshal_json_response( try: return unmarshal_json(body, typ) except Exception as e: - raise models.ResponseValidationError( + raise errors.ResponseValidationError( "Response validation failed", http_res, e, diff --git a/src/mistralai/extra/run/context.py b/src/mistralai/extra/run/context.py index 01baa6a9..7ade705f 100644 --- a/src/mistralai/extra/run/context.py +++ b/src/mistralai/extra/run/context.py @@ -22,7 +22,6 @@ create_tool_call, ) from mistralai.client.models import ( - AgentTool, CompletionArgs, CompletionArgsTypedDict, ConversationInputs, @@ -35,6 +34,8 @@ InputEntries, MessageInputEntry, ResponseFormat, + UnknownAgentTool, + UpdateAgentRequestTool, ) from mistralai.client.types.basemodel import BaseModel, OptionalNullable, UNSET @@ -187,8 +188,11 @@ async def prepare_agent_request(self, beta_client: "Beta") -> AgentRequestKwargs ) agent = await beta_client.agents.get_async(agent_id=self.agent_id) agent_tools = agent.tools or [] - updated_tools: list[AgentTool] = [] + updated_tools: list[UpdateAgentRequestTool] = [] for tool in agent_tools: + if isinstance(tool, UnknownAgentTool): + # Skip unknown tools - can't include them in update request + continue if not isinstance(tool, FunctionTool): updated_tools.append(tool) elif tool.function.name in self._callable_tools: From 19f29d139211213280bae77e58aaf1848e967d88 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Wed, 25 Feb 2026 19:00:49 +0100 Subject: [PATCH 210/223] =?UTF-8?q?chore:=20=F0=9F=90=9D=20Update=20SDK=20?= =?UTF-8?q?-=20Generate=20MISTRALAI=20MISTRALAI-SDK=202.0.0b1=20(#372)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * ci: regenerated with OpenAPI Doc , Speakeasy CLI 1.729.0 * chore: set version to 2.0.0b1 --------- Co-authored-by: speakeasybot Co-authored-by: Louis Sanna --- .speakeasy/gen.lock | 15 ++++++++------- .speakeasy/gen.yaml | 2 +- .speakeasy/workflow.lock | 3 ++- RELEASES.md | 12 +++++++++++- pyproject.toml | 2 +- src/mistralai/client/_version.py | 4 ++-- uv.lock | 2 +- 7 files changed, 26 insertions(+), 14 deletions(-) diff --git a/.speakeasy/gen.lock b/.speakeasy/gen.lock index 678c20f2..7314b7b1 100644 --- a/.speakeasy/gen.lock +++ b/.speakeasy/gen.lock @@ -5,15 +5,15 @@ management: docVersion: 1.0.0 speakeasyVersion: 1.729.0 generationVersion: 2.841.0 - releaseVersion: 2.0.0-a3.1 - configChecksum: 134292298710eaf25a0f90f7097e648f + releaseVersion: 2.0.0b1 + configChecksum: 871b5a7d3687bd2a9ebd0e205e4b36a3 repoURL: https://github.com/mistralai/client-python.git installationURL: https://github.com/mistralai/client-python.git published: true persistentEdits: - generation_id: 21ec746f-e476-468a-bb8e-c942c0997501 - pristine_commit_hash: 99ae95385eb06175841ba19bef78319a5921c585 - pristine_tree_hash: 5b06b6f5add0cd16af8139d524a42368532441c6 + generation_id: 1527268d-25cf-4a8c-8a67-09694eaf0d79 + pristine_commit_hash: 5642b69da5a9a00af1e84ca689b7587f2269d0c4 + pristine_tree_hash: f9eae7c82e85b3114e342a4b6500b9704a266493 features: python: additionalDependencies: 1.0.0 @@ -56,6 +56,7 @@ trackedFiles: id: 89aa447020cd last_write_checksum: sha1:f84632c81029fcdda8c3b0c768d02b836fc80526 pristine_git_object: 8d79f0abb72526f1fb34a4c03e5bba612c6ba2ae + deleted: true USAGE.md: id: 3aed33ce6e6f last_write_checksum: sha1:50cc0351d6145a805d1d5ae8be4dfce58178e648 @@ -1606,8 +1607,8 @@ trackedFiles: pristine_git_object: 036d44b8cfc51599873bd5c401a6aed30450536c src/mistralai/client/_version.py: id: cc807b30de19 - last_write_checksum: sha1:03563b818feb27386f7d6a0321a3875e3024a2d2 - pristine_git_object: 1a4d15d66f45d13c7f9cae550138390b5cf5897e + last_write_checksum: sha1:f5109c91723cc927e8513ac9e637512edd91f04e + pristine_git_object: ab2cf01d06f4d4373b52373795db76aa40f00ceb src/mistralai/client/accesses.py: id: 76fc53bfcf59 last_write_checksum: sha1:ed94623aa8a2bd502572a699a2f54c9281ec283e diff --git a/.speakeasy/gen.yaml b/.speakeasy/gen.yaml index 733650dc..e237388a 100644 --- a/.speakeasy/gen.yaml +++ b/.speakeasy/gen.yaml @@ -31,7 +31,7 @@ generation: generateNewTests: false skipResponseBodyAssertions: false python: - version: 2.0.0-a3.1 + version: 2.0.0b1 additionalDependencies: dev: pytest: ^8.2.2 diff --git a/.speakeasy/workflow.lock b/.speakeasy/workflow.lock index d051080f..e3ca5c59 100644 --- a/.speakeasy/workflow.lock +++ b/.speakeasy/workflow.lock @@ -18,6 +18,7 @@ sources: sourceBlobDigest: sha256:8566b35549178910c6fd4d005474d612bb9c476ef58785bb51c46251de145f71 tags: - latest + - speakeasy-sdk-regen-1772040743 targets: mistralai-azure-sdk: source: mistral-azure-source @@ -39,7 +40,7 @@ targets: sourceRevisionDigest: sha256:4f8e25101b35a66b9c93089fe3d491990268bdbefb70a349740e01ba9c8e28f8 sourceBlobDigest: sha256:8566b35549178910c6fd4d005474d612bb9c476ef58785bb51c46251de145f71 codeSamplesNamespace: mistral-openapi-code-samples - codeSamplesRevisionDigest: sha256:f3cf9d6d99a27d6e753bd6e1a2f2c2fb290f412a455576de4bab610ab4825939 + codeSamplesRevisionDigest: sha256:0bcecf3d1523375a194d6aa13116ffba291da8321e44b01399ae5e24f7ce2e33 workflow: workflowVersion: 1.0.0 speakeasyVersion: 1.729.0 diff --git a/RELEASES.md b/RELEASES.md index 90f534ef..48b65760 100644 --- a/RELEASES.md +++ b/RELEASES.md @@ -368,4 +368,14 @@ Based on: ### Generated - [python v1.12.0] . ### Releases -- [PyPI v1.12.0] https://pypi.org/project/mistralai/1.12.0 - . \ No newline at end of file +- [PyPI v1.12.0] https://pypi.org/project/mistralai/1.12.0 - . + +## 2026-02-25 17:32:05 +### Changes +Based on: +- OpenAPI Doc +- Speakeasy CLI 1.729.0 (2.841.0) https://github.com/speakeasy-api/speakeasy +### Generated +- [python v2.0.0b1] . +### Releases +- [PyPI v2.0.0b1] https://pypi.org/project/mistralai/2.0.0b1 - . \ No newline at end of file diff --git a/pyproject.toml b/pyproject.toml index c1762f0a..c42e4260 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "mistralai" -version = "2.0.0a4" +version = "2.0.0b1" description = "Python Client SDK for the Mistral AI API." authors = [{ name = "Mistral" }] requires-python = ">=3.10" diff --git a/src/mistralai/client/_version.py b/src/mistralai/client/_version.py index 1a4d15d6..ab2cf01d 100644 --- a/src/mistralai/client/_version.py +++ b/src/mistralai/client/_version.py @@ -4,10 +4,10 @@ import importlib.metadata __title__: str = "mistralai" -__version__: str = "2.0.0-a3.1" +__version__: str = "2.0.0b1" __openapi_doc_version__: str = "1.0.0" __gen_version__: str = "2.841.0" -__user_agent__: str = "speakeasy-sdk/python 2.0.0-a3.1 2.841.0 1.0.0 mistralai" +__user_agent__: str = "speakeasy-sdk/python 2.0.0b1 2.841.0 1.0.0 mistralai" try: if __package__ is not None: diff --git a/uv.lock b/uv.lock index 8c689c4a..1a37a7d6 100644 --- a/uv.lock +++ b/uv.lock @@ -563,7 +563,7 @@ wheels = [ [[package]] name = "mistralai" -version = "2.0.0a4" +version = "2.0.0b1" source = { editable = "." } dependencies = [ { name = "eval-type-backport" }, From 4b7ac55d694f8cdaaae3226611fdd46b1a81b8d8 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Wed, 25 Feb 2026 19:26:02 +0100 Subject: [PATCH 211/223] =?UTF-8?q?chore:=20=F0=9F=90=9D=20Update=20SDK=20?= =?UTF-8?q?-=20Generate=20MISTRAL-PYTHON-SDK-GOOGLE-CLOUD=20MISTRALAI-GCP-?= =?UTF-8?q?SDK=202.0.0b1=20(#373)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * ci: regenerated with OpenAPI Doc , Speakeasy CLI 1.729.0 * chore: set version to 2.0.0b1 --------- Co-authored-by: speakeasybot Co-authored-by: Louis Sanna --- .speakeasy/workflow.lock | 3 ++- packages/gcp/.speakeasy/gen.lock | 14 +++++++------- packages/gcp/.speakeasy/gen.yaml | 2 +- packages/gcp/RELEASES.md | 12 +++++++++++- packages/gcp/pyproject.toml | 2 +- packages/gcp/src/mistralai/gcp/client/_version.py | 4 ++-- packages/gcp/uv.lock | 2 +- 7 files changed, 25 insertions(+), 14 deletions(-) diff --git a/.speakeasy/workflow.lock b/.speakeasy/workflow.lock index e3ca5c59..bbad9734 100644 --- a/.speakeasy/workflow.lock +++ b/.speakeasy/workflow.lock @@ -12,6 +12,7 @@ sources: sourceBlobDigest: sha256:5a558d5ea7a936723c7a5540db5a1fba63d85d25b453372e1cf16395b30c98d3 tags: - latest + - speakeasy-sdk-regen-1772041030 mistral-openapi: sourceNamespace: mistral-openapi sourceRevisionDigest: sha256:4f8e25101b35a66b9c93089fe3d491990268bdbefb70a349740e01ba9c8e28f8 @@ -33,7 +34,7 @@ targets: sourceRevisionDigest: sha256:4d9938ab74c4d41d62cd24234c8b8109e286c4aeec093e21d369259a43173113 sourceBlobDigest: sha256:5a558d5ea7a936723c7a5540db5a1fba63d85d25b453372e1cf16395b30c98d3 codeSamplesNamespace: mistral-openapi-google-cloud-code-samples - codeSamplesRevisionDigest: sha256:f6c4dc988e9b7be6f8d8087d14b2269be601bb9bff2227b07e1018efe88e1556 + codeSamplesRevisionDigest: sha256:35f30ba8ce4bd70f58b6abc5222d0bbf82eecc3109b09ca99df4406e363e21a0 mistralai-sdk: source: mistral-openapi sourceNamespace: mistral-openapi diff --git a/packages/gcp/.speakeasy/gen.lock b/packages/gcp/.speakeasy/gen.lock index 517e1a85..6e33773d 100644 --- a/packages/gcp/.speakeasy/gen.lock +++ b/packages/gcp/.speakeasy/gen.lock @@ -5,16 +5,16 @@ management: docVersion: 1.0.0 speakeasyVersion: 1.729.0 generationVersion: 2.841.0 - releaseVersion: 2.0.0-a4.1 - configChecksum: bfe17061a2e5ac54039980ad7a48fd77 + releaseVersion: 2.0.0b1 + configChecksum: 9cea6a311ff15502c47b0ef87e9846a2 repoURL: https://github.com/mistralai/client-python.git repoSubDirectory: packages/gcp installationURL: https://github.com/mistralai/client-python.git#subdirectory=packages/gcp published: true persistentEdits: - generation_id: c7e2e696-b223-4993-a79b-2e6f15242c30 - pristine_commit_hash: 86953bc23bb7fcfc3c2525f79114411bc27e8f75 - pristine_tree_hash: 93675a8857b7519918499101d4a5e30fc7fe2c4a + generation_id: e503bb37-7bdd-4ebf-9bed-a8f754c99f8a + pristine_commit_hash: f14b1b1288437b7fc0ba666a384614a225385259 + pristine_tree_hash: 67e6d0a84ae20666a636dcc8ad174647a96b105f features: python: additionalDependencies: 1.0.0 @@ -326,8 +326,8 @@ trackedFiles: pristine_git_object: ea95bed210db9180824efddfb1b3e47f5bf96489 src/mistralai/gcp/client/_version.py: id: f87319e32c7b - last_write_checksum: sha1:85dd6da1d6503d717e8c9bd6d62278b469d3b464 - pristine_git_object: 204c92a656855ad281e86a74467e71ae1b04639f + last_write_checksum: sha1:0d99fadc73b957112022a95eabeb0e3a98d14ff4 + pristine_git_object: 36e44a5e6067e8bd197b38cc238686f660c77244 src/mistralai/gcp/client/basesdk.py: id: 4d594572857b last_write_checksum: sha1:d8ef9e2f4fa97d402eb9f5472ceb80fb39693991 diff --git a/packages/gcp/.speakeasy/gen.yaml b/packages/gcp/.speakeasy/gen.yaml index 54336636..18f4b4d5 100644 --- a/packages/gcp/.speakeasy/gen.yaml +++ b/packages/gcp/.speakeasy/gen.yaml @@ -30,7 +30,7 @@ generation: generateNewTests: false skipResponseBodyAssertions: false python: - version: 2.0.0-a4.1 + version: 2.0.0b1 additionalDependencies: dev: pytest: ^8.2.2 diff --git a/packages/gcp/RELEASES.md b/packages/gcp/RELEASES.md index b503c75f..ec883c62 100644 --- a/packages/gcp/RELEASES.md +++ b/packages/gcp/RELEASES.md @@ -8,4 +8,14 @@ Based on: ### Generated - [python v1.7.0] packages/mistralai_gcp ### Releases -- [PyPI v1.7.0] https://pypi.org/project/mistralai-gcp/1.7.0 - packages/mistralai_gcp \ No newline at end of file +- [PyPI v1.7.0] https://pypi.org/project/mistralai-gcp/1.7.0 - packages/mistralai_gcp + +## 2026-02-25 17:36:50 +### Changes +Based on: +- OpenAPI Doc +- Speakeasy CLI 1.729.0 (2.841.0) https://github.com/speakeasy-api/speakeasy +### Generated +- [python v2.0.0b1] packages/gcp +### Releases +- [PyPI v2.0.0b1] https://pypi.org/project/mistralai-gcp/2.0.0b1 - packages/gcp \ No newline at end of file diff --git a/packages/gcp/pyproject.toml b/packages/gcp/pyproject.toml index 98619ecd..c0497656 100644 --- a/packages/gcp/pyproject.toml +++ b/packages/gcp/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "mistralai-gcp" -version = "2.0.0a4" +version = "2.0.0b1" description = "Python Client SDK for the Mistral AI API in GCP." authors = [{ name = "Mistral" }] requires-python = ">=3.10" diff --git a/packages/gcp/src/mistralai/gcp/client/_version.py b/packages/gcp/src/mistralai/gcp/client/_version.py index 204c92a6..36e44a5e 100644 --- a/packages/gcp/src/mistralai/gcp/client/_version.py +++ b/packages/gcp/src/mistralai/gcp/client/_version.py @@ -3,10 +3,10 @@ import importlib.metadata __title__: str = "mistralai-gcp" -__version__: str = "2.0.0-a4.1" +__version__: str = "2.0.0b1" __openapi_doc_version__: str = "1.0.0" __gen_version__: str = "2.841.0" -__user_agent__: str = "speakeasy-sdk/python 2.0.0-a4.1 2.841.0 1.0.0 mistralai-gcp" +__user_agent__: str = "speakeasy-sdk/python 2.0.0b1 2.841.0 1.0.0 mistralai-gcp" try: if __package__ is not None: diff --git a/packages/gcp/uv.lock b/packages/gcp/uv.lock index a49757c9..9bd9f9b6 100644 --- a/packages/gcp/uv.lock +++ b/packages/gcp/uv.lock @@ -277,7 +277,7 @@ wheels = [ [[package]] name = "mistralai-gcp" -version = "2.0.0a4" +version = "2.0.0b1" source = { editable = "." } dependencies = [ { name = "eval-type-backport" }, From 3eaedb47bbdf810356fa80e3e920d572e0f0b70b Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Wed, 25 Feb 2026 19:26:34 +0100 Subject: [PATCH 212/223] =?UTF-8?q?chore:=20=F0=9F=90=9D=20Update=20SDK=20?= =?UTF-8?q?-=20Generate=20MISTRAL-PYTHON-SDK-AZURE=20MISTRALAI-AZURE-SDK?= =?UTF-8?q?=202.0.0b1=20(#374)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * ci: regenerated with OpenAPI Doc , Speakeasy CLI 1.729.0 * chore: set version to 2.0.0b1 --------- Co-authored-by: speakeasybot Co-authored-by: Louis Sanna --- .speakeasy/workflow.lock | 3 ++- packages/azure/.speakeasy/gen.lock | 14 +++++++------- packages/azure/.speakeasy/gen.yaml | 2 +- packages/azure/RELEASES.md | 12 +++++++++++- packages/azure/pyproject.toml | 2 +- .../azure/src/mistralai/azure/client/_version.py | 4 ++-- packages/azure/uv.lock | 2 +- 7 files changed, 25 insertions(+), 14 deletions(-) diff --git a/.speakeasy/workflow.lock b/.speakeasy/workflow.lock index bbad9734..b26cdf2b 100644 --- a/.speakeasy/workflow.lock +++ b/.speakeasy/workflow.lock @@ -6,6 +6,7 @@ sources: sourceBlobDigest: sha256:2dad2b1b7a79de6917c363ce7e870d11efe31ac08e3bfe0258f72823fe1ad13e tags: - latest + - speakeasy-sdk-regen-1772041212 mistral-google-cloud-source: sourceNamespace: mistral-openapi-google-cloud sourceRevisionDigest: sha256:4d9938ab74c4d41d62cd24234c8b8109e286c4aeec093e21d369259a43173113 @@ -27,7 +28,7 @@ targets: sourceRevisionDigest: sha256:e32d21a6317d1bca6ab29f05603b96038e841752c2698aab47f434ea0d6530b7 sourceBlobDigest: sha256:2dad2b1b7a79de6917c363ce7e870d11efe31ac08e3bfe0258f72823fe1ad13e codeSamplesNamespace: mistral-openapi-azure-code-samples - codeSamplesRevisionDigest: sha256:248e5daaa44589805664ab1479502885758fde0f1da3b384b97b1a09d74c8256 + codeSamplesRevisionDigest: sha256:68866aada6ad13253e32dab06e4876a7aeba4d7759683d81b2ba27f0fb55a342 mistralai-gcp-sdk: source: mistral-google-cloud-source sourceNamespace: mistral-openapi-google-cloud diff --git a/packages/azure/.speakeasy/gen.lock b/packages/azure/.speakeasy/gen.lock index c795c61c..5da824d1 100644 --- a/packages/azure/.speakeasy/gen.lock +++ b/packages/azure/.speakeasy/gen.lock @@ -5,16 +5,16 @@ management: docVersion: 1.0.0 speakeasyVersion: 1.729.0 generationVersion: 2.841.0 - releaseVersion: 2.0.0-a4.1 - configChecksum: e2523ba89eba35872d05ddb673dd862a + releaseVersion: 2.0.0b1 + configChecksum: 01160bf17a4abd1ce038528d20cd4685 repoURL: https://github.com/mistralai/client-python.git repoSubDirectory: packages/azure installationURL: https://github.com/mistralai/client-python.git#subdirectory=packages/azure published: true persistentEdits: - generation_id: 1812b54a-0aa7-4b43-8c53-d70427856543 - pristine_commit_hash: 28db2945de995b5707dc7f310b5291435aaafcbf - pristine_tree_hash: b01973b36166a61d38fa84cf7dae49b7a74e1402 + generation_id: 2f5b7e40-9bd2-4c96-9e97-16a92e4b44af + pristine_commit_hash: 480a8b0e23da7e4752e6ad5b36fc72651e09d2d7 + pristine_tree_hash: 8a4c9b9a253fbe496a52e0496fa7e58e91e32c7c features: python: additionalDependencies: 1.0.0 @@ -354,8 +354,8 @@ trackedFiles: pristine_git_object: 3e4e39555d60adebe84e596c8323ee5b80676fc9 src/mistralai/azure/client/_version.py: id: a77160e60e5d - last_write_checksum: sha1:b1d1971d43e8f92bd55bb45653a228fd9de97af3 - pristine_git_object: 4f985cc69c492521664044337e5910f8e5a26b90 + last_write_checksum: sha1:1b76e9448049c69dbdb690b9de25456378bba0a7 + pristine_git_object: 213648be87a19e24d87160c1286614b2d5df7344 src/mistralai/azure/client/basesdk.py: id: 5a585a95ec21 last_write_checksum: sha1:0c2e686aa42d6aeeb103193aa058d6ddff7bcf74 diff --git a/packages/azure/.speakeasy/gen.yaml b/packages/azure/.speakeasy/gen.yaml index 0b7262e0..55934cc8 100644 --- a/packages/azure/.speakeasy/gen.yaml +++ b/packages/azure/.speakeasy/gen.yaml @@ -30,7 +30,7 @@ generation: generateNewTests: false skipResponseBodyAssertions: false python: - version: 2.0.0-a4.1 + version: 2.0.0b1 additionalDependencies: dev: pytest: ^8.2.2 diff --git a/packages/azure/RELEASES.md b/packages/azure/RELEASES.md index e471af0f..e625ee98 100644 --- a/packages/azure/RELEASES.md +++ b/packages/azure/RELEASES.md @@ -8,4 +8,14 @@ Based on: ### Generated - [python v1.7.0] packages/mistralai_azure ### Releases -- [PyPI v1.7.0] https://pypi.org/project/mistralai_azure/1.7.0 - packages/mistralai_azure \ No newline at end of file +- [PyPI v1.7.0] https://pypi.org/project/mistralai_azure/1.7.0 - packages/mistralai_azure + +## 2026-02-25 17:39:51 +### Changes +Based on: +- OpenAPI Doc +- Speakeasy CLI 1.729.0 (2.841.0) https://github.com/speakeasy-api/speakeasy +### Generated +- [python v2.0.0b1] packages/azure +### Releases +- [PyPI v2.0.0b1] https://pypi.org/project/mistralai-azure/2.0.0b1 - packages/azure \ No newline at end of file diff --git a/packages/azure/pyproject.toml b/packages/azure/pyproject.toml index 3b9aa829..cf80bde8 100644 --- a/packages/azure/pyproject.toml +++ b/packages/azure/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "mistralai-azure" -version = "2.0.0a4" +version = "2.0.0b1" description = "Python Client SDK for the Mistral AI API in Azure." authors = [{ name = "Mistral" }] requires-python = ">=3.10" diff --git a/packages/azure/src/mistralai/azure/client/_version.py b/packages/azure/src/mistralai/azure/client/_version.py index 4f985cc6..213648be 100644 --- a/packages/azure/src/mistralai/azure/client/_version.py +++ b/packages/azure/src/mistralai/azure/client/_version.py @@ -3,10 +3,10 @@ import importlib.metadata __title__: str = "mistralai-azure" -__version__: str = "2.0.0-a4.1" +__version__: str = "2.0.0b1" __openapi_doc_version__: str = "1.0.0" __gen_version__: str = "2.841.0" -__user_agent__: str = "speakeasy-sdk/python 2.0.0-a4.1 2.841.0 1.0.0 mistralai-azure" +__user_agent__: str = "speakeasy-sdk/python 2.0.0b1 2.841.0 1.0.0 mistralai-azure" try: if __package__ is not None: diff --git a/packages/azure/uv.lock b/packages/azure/uv.lock index cedb1ce8..7c090c00 100644 --- a/packages/azure/uv.lock +++ b/packages/azure/uv.lock @@ -154,7 +154,7 @@ wheels = [ [[package]] name = "mistralai-azure" -version = "2.0.0a4" +version = "2.0.0b1" source = { editable = "." } dependencies = [ { name = "httpcore" }, From 7cb12a9cce93040acc6cc19708e09a3c90a60c92 Mon Sep 17 00:00:00 2001 From: Jean-Malo Delignon <56539593+jean-malo@users.noreply.github.com> Date: Thu, 26 Feb 2026 11:44:40 +0100 Subject: [PATCH 213/223] feat(realtime): add support for target streaming delay in realtime transcription (#376) * feat(realtime): add support for target streaming delay in realtime transcription This commit introduces the ability to specify a target streaming delay in milliseconds for realtime transcription sessions. The changes include: 1. Adding a new parameter `target_streaming_delay_ms` to the realtime transcription API 2. Updating the connection management to handle the new delay parameter 3. Adding support for audio flushing to ensure proper handling of streaming delays 4. Creating a new example demonstrating dual-delay transcription with two parallel streams 5. Improving error handling for microphone access and PyAudio loading The changes allow for more precise control over the latency/accuracy tradeoff in realtime transcription, enabling use cases that require different streaming delays for different purposes. * ci: skip dual-delay realtime example in run_examples --- ...ime_transcription_dual_delay_microphone.py | 473 ++++++++++++++++++ ...async_realtime_transcription_microphone.py | 32 +- .../async_realtime_transcription_stream.py | 7 + examples/mistral/audio/pyaudio_utils.py | 38 ++ scripts/run_examples.sh | 1 + src/mistralai/extra/realtime/connection.py | 61 ++- src/mistralai/extra/realtime/transcription.py | 11 +- 7 files changed, 600 insertions(+), 23 deletions(-) create mode 100644 examples/mistral/audio/async_realtime_transcription_dual_delay_microphone.py create mode 100644 examples/mistral/audio/pyaudio_utils.py diff --git a/examples/mistral/audio/async_realtime_transcription_dual_delay_microphone.py b/examples/mistral/audio/async_realtime_transcription_dual_delay_microphone.py new file mode 100644 index 00000000..7653b0ed --- /dev/null +++ b/examples/mistral/audio/async_realtime_transcription_dual_delay_microphone.py @@ -0,0 +1,473 @@ +#!/usr/bin/env python +# /// script +# requires-python = ">=3.9" +# dependencies = [ +# "mistralai[realtime]", +# "pyaudio", +# "rich", +# ] +# [tool.uv.sources] +# mistralai = { path = "../../..", editable = true } +# /// + +import argparse +import asyncio +import difflib +import os +import sys +from dataclasses import dataclass +from typing import AsyncIterator, Sequence + +from rich.align import Align +from rich.console import Console +from rich.layout import Layout +from rich.live import Live +from rich.panel import Panel +from rich.text import Text + +from mistralai.client import Mistral +from mistralai.extra.realtime import UnknownRealtimeEvent +from mistralai.client.models import ( + AudioFormat, + RealtimeTranscriptionError, + RealtimeTranscriptionSessionCreated, + TranscriptionStreamDone, + TranscriptionStreamTextDelta, +) + +from pyaudio_utils import load_pyaudio + +console = Console() + + +@dataclass +class DualTranscriptState: + """Tracks transcript state for dual-delay transcription.""" + + fast_full_text: str = "" + slow_full_text: str = "" + fast_status: str = "🔌 Connecting..." + slow_status: str = "🔌 Connecting..." + error: str | None = None + fast_done: bool = False + slow_done: bool = False + + def set_error(self, message: str) -> None: + self.error = message + self.fast_status = "❌ Error" + self.slow_status = "❌ Error" + + +class DualTranscriptDisplay: + """Renders a live dual-delay transcription UI.""" + + def __init__( + self, + *, + model: str, + fast_delay_ms: int, + slow_delay_ms: int, + state: DualTranscriptState, + ) -> None: + self.model = model + self.fast_delay_ms = fast_delay_ms + self.slow_delay_ms = slow_delay_ms + self.state = state + + @staticmethod + def _normalize_word(word: str) -> str: + return word.strip(".,!?;:\"'()[]{}").lower() + + def _compute_display_texts(self) -> tuple[str, str]: + slow_words = self.state.slow_full_text.split() + fast_words = self.state.fast_full_text.split() + + if not slow_words: + partial_text = f" {self.state.fast_full_text}".rstrip() + return "", partial_text + + slow_norm = [self._normalize_word(word) for word in slow_words] + fast_norm = [self._normalize_word(word) for word in fast_words] + + matcher = difflib.SequenceMatcher(None, slow_norm, fast_norm) + last_fast_index = 0 + slow_progress = 0 + for block in matcher.get_matching_blocks(): + if block.size == 0: + continue + slow_end = block.a + block.size + if slow_end > slow_progress: + slow_progress = slow_end + last_fast_index = block.b + block.size + + if last_fast_index < len(fast_words): + ahead_words = fast_words[last_fast_index:] + partial_text = " " + " ".join(ahead_words) if ahead_words else "" + else: + partial_text = "" + + return self.state.slow_full_text, partial_text + + @staticmethod + def _status_style(status: str) -> str: + if "Listening" in status: + return "green" + if "Connecting" in status: + return "yellow dim" + if "Done" in status or "Stopped" in status: + return "dim" + return "red" + + def render(self) -> Layout: + layout = Layout() + + header_text = Text() + header_text.append("│ ", style="dim") + header_text.append(self.model, style="dim") + header_text.append(" │ ", style="dim") + header_text.append( + f"fast {self.fast_delay_ms}ms", style="bright_yellow" + ) + header_text.append( + f" {self.state.fast_status}", + style=self._status_style(self.state.fast_status), + ) + header_text.append(" │ ", style="dim") + header_text.append(f"slow {self.slow_delay_ms}ms", style="white") + header_text.append( + f" {self.state.slow_status}", + style=self._status_style(self.state.slow_status), + ) + + header = Align.left(header_text, vertical="middle", pad=False) + + final_text, partial_text = self._compute_display_texts() + transcript_text = Text() + if final_text or partial_text: + transcript_text.append(final_text, style="white") + transcript_text.append(partial_text, style="bright_yellow") + else: + transcript_text.append("...", style="dim") + + transcript = Panel( + Align.left(transcript_text, vertical="top"), + border_style="dim", + padding=(1, 2), + ) + + footer_text = Text() + footer_text.append("ctrl+c", style="dim") + footer_text.append(" quit", style="dim italic") + footer = Align.left(footer_text, vertical="middle", pad=False) + + if self.state.error: + layout.split_column( + Layout(header, name="header", size=1), + Layout(transcript, name="body"), + Layout( + Panel(Text(self.state.error, style="red"), border_style="red"), + name="error", + size=4, + ), + Layout(footer, name="footer", size=1), + ) + else: + layout.split_column( + Layout(header, name="header", size=1), + Layout(transcript, name="body"), + Layout(footer, name="footer", size=1), + ) + + return layout + + +async def iter_microphone( + *, + sample_rate: int, + chunk_duration_ms: int, +) -> AsyncIterator[bytes]: + """ + Yield microphone PCM chunks using PyAudio (16-bit mono). + Encoding is always pcm_s16le. + """ + pyaudio = load_pyaudio() + + p = pyaudio.PyAudio() + chunk_samples = int(sample_rate * chunk_duration_ms / 1000) + + stream = p.open( + format=pyaudio.paInt16, + channels=1, + rate=sample_rate, + input=True, + frames_per_buffer=chunk_samples, + ) + + loop = asyncio.get_running_loop() + try: + while True: + data = await loop.run_in_executor(None, stream.read, chunk_samples, False) + yield data + finally: + stream.stop_stream() + stream.close() + p.terminate() + + +async def queue_audio_iter( + queue: asyncio.Queue[bytes | None], +) -> AsyncIterator[bytes]: + """Yield audio chunks from a queue until a None sentinel is received.""" + while True: + chunk = await queue.get() + if chunk is None: + break + yield chunk + + +async def broadcast_microphone( + *, + sample_rate: int, + chunk_duration_ms: int, + queues: Sequence[asyncio.Queue[bytes | None]], +) -> None: + """Read from the microphone once and broadcast to multiple queues.""" + try: + async for chunk in iter_microphone( + sample_rate=sample_rate, chunk_duration_ms=chunk_duration_ms + ): + for queue in queues: + await queue.put(chunk) + finally: + for queue in queues: + while True: + try: + queue.put_nowait(None) + break + except asyncio.QueueFull: + try: + queue.get_nowait() + except asyncio.QueueEmpty: + break + + +def _status_for_event(event: object) -> str: + if isinstance(event, RealtimeTranscriptionSessionCreated): + return "🎤 Listening..." + return "✅ Done" + + +def parse_args() -> argparse.Namespace: + parser = argparse.ArgumentParser( + description="Dual-delay real-time microphone transcription." + ) + parser.add_argument( + "--model", + default="voxtral-mini-transcribe-realtime-2602", + help="Model ID", + ) + parser.add_argument( + "--fast-delay-ms", + type=int, + default=240, + help="Fast target streaming delay in ms", + ) + parser.add_argument( + "--slow-delay-ms", + type=int, + default=2400, + help="Slow target streaming delay in ms", + ) + parser.add_argument( + "--sample-rate", + type=int, + default=16000, + choices=[8000, 16000, 22050, 44100, 48000], + help="Sample rate in Hz", + ) + parser.add_argument( + "--chunk-duration", + type=int, + default=10, + help="Chunk duration in ms", + ) + parser.add_argument( + "--api-key", + default=os.environ.get("MISTRAL_API_KEY"), + help="Mistral API key", + ) + parser.add_argument( + "--base-url", + default=os.environ.get("MISTRAL_BASE_URL", "wss://api.mistral.ai"), + ) + return parser.parse_args() + + +async def run_stream( + *, + client: Mistral, + model: str, + delay_ms: int, + audio_stream: AsyncIterator[bytes], + audio_format: AudioFormat, + state: DualTranscriptState, + update_queue: asyncio.Queue[None], + is_fast: bool, +) -> None: + try: + async for event in client.audio.realtime.transcribe_stream( + audio_stream=audio_stream, + model=model, + audio_format=audio_format, + target_streaming_delay_ms=delay_ms, + ): + if isinstance(event, RealtimeTranscriptionSessionCreated): + if is_fast: + state.fast_status = _status_for_event(event) + else: + state.slow_status = _status_for_event(event) + elif isinstance(event, TranscriptionStreamTextDelta): + if is_fast: + state.fast_full_text += event.text + else: + state.slow_full_text += event.text + elif isinstance(event, TranscriptionStreamDone): + if is_fast: + state.fast_status = _status_for_event(event) + state.fast_done = True + else: + state.slow_status = _status_for_event(event) + state.slow_done = True + break + elif isinstance(event, RealtimeTranscriptionError): + state.set_error(str(event.error)) + break + elif isinstance(event, UnknownRealtimeEvent): + continue + + if update_queue.empty(): + update_queue.put_nowait(None) + except Exception as exc: # pragma: no cover - safety net for UI demo + state.set_error(str(exc)) + if update_queue.empty(): + update_queue.put_nowait(None) + + +async def ui_loop( + display: DualTranscriptDisplay, + update_queue: asyncio.Queue[None], + stop_event: asyncio.Event, + *, + refresh_hz: float = 12.0, +) -> None: + with Live( + display.render(), console=console, refresh_per_second=refresh_hz, screen=True + ) as live: + while not stop_event.is_set(): + try: + await asyncio.wait_for(update_queue.get(), timeout=0.25) + except asyncio.TimeoutError: + pass + live.update(display.render()) + + +async def main() -> int: + args = parse_args() + api_key = args.api_key or os.environ["MISTRAL_API_KEY"] + + try: + load_pyaudio() + except RuntimeError as exc: + console.print(str(exc), style="red") + return 1 + + state = DualTranscriptState() + display = DualTranscriptDisplay( + model=args.model, + fast_delay_ms=args.fast_delay_ms, + slow_delay_ms=args.slow_delay_ms, + state=state, + ) + + client = Mistral(api_key=api_key, server_url=args.base_url) + audio_format = AudioFormat(encoding="pcm_s16le", sample_rate=args.sample_rate) + + fast_queue: asyncio.Queue[bytes | None] = asyncio.Queue(maxsize=50) + slow_queue: asyncio.Queue[bytes | None] = asyncio.Queue(maxsize=50) + + stop_event = asyncio.Event() + update_queue: asyncio.Queue[None] = asyncio.Queue(maxsize=1) + + broadcaster = asyncio.create_task( + broadcast_microphone( + sample_rate=args.sample_rate, + chunk_duration_ms=args.chunk_duration, + queues=(fast_queue, slow_queue), + ) + ) + + fast_task = asyncio.create_task( + run_stream( + client=client, + model=args.model, + delay_ms=args.fast_delay_ms, + audio_stream=queue_audio_iter(fast_queue), + audio_format=audio_format, + state=state, + update_queue=update_queue, + is_fast=True, + ) + ) + + slow_task = asyncio.create_task( + run_stream( + client=client, + model=args.model, + delay_ms=args.slow_delay_ms, + audio_stream=queue_audio_iter(slow_queue), + audio_format=audio_format, + state=state, + update_queue=update_queue, + is_fast=False, + ) + ) + + ui_task = asyncio.create_task( + ui_loop(display, update_queue, stop_event, refresh_hz=12.0) + ) + + try: + while True: + await asyncio.sleep(0.1) + for task in (broadcaster, fast_task, slow_task): + if not task.done(): + continue + exc = task.exception() + if exc: + state.set_error(str(exc)) + if update_queue.empty(): + update_queue.put_nowait(None) + stop_event.set() + break + if state.error: + stop_event.set() + break + if state.fast_done and state.slow_done: + stop_event.set() + break + except KeyboardInterrupt: + state.fast_status = "⏹️ Stopped" + state.slow_status = "⏹️ Stopped" + stop_event.set() + finally: + broadcaster.cancel() + fast_task.cancel() + slow_task.cancel() + await asyncio.gather(broadcaster, fast_task, slow_task, return_exceptions=True) + await ui_task + + return 0 if not state.error else 1 + + +if __name__ == "__main__": + sys.exit(asyncio.run(main())) diff --git a/examples/mistral/audio/async_realtime_transcription_microphone.py b/examples/mistral/audio/async_realtime_transcription_microphone.py index 191a21e4..49568aea 100644 --- a/examples/mistral/audio/async_realtime_transcription_microphone.py +++ b/examples/mistral/audio/async_realtime_transcription_microphone.py @@ -33,14 +33,17 @@ TranscriptionStreamTextDelta, ) +from pyaudio_utils import load_pyaudio + console = Console() class TranscriptDisplay: """Manages the live transcript display.""" - def __init__(self, model: str) -> None: + def __init__(self, model: str, target_streaming_delay_ms: int | None) -> None: self.model = model + self.target_streaming_delay_ms = target_streaming_delay_ms self.transcript = "" self.status = "🔌 Connecting..." self.error: str | None = None @@ -65,6 +68,10 @@ def render(self) -> Layout: header_text = Text() header_text.append("│ ", style="dim") header_text.append(self.model, style="dim") + if self.target_streaming_delay_ms is not None: + header_text.append( + f" · delay {self.target_streaming_delay_ms}ms", style="dim" + ) header_text.append(" │ ", style="dim") if "Listening" in self.status: @@ -126,7 +133,7 @@ async def iter_microphone( Yield microphone PCM chunks using PyAudio (16-bit mono). Encoding is always pcm_s16le. """ - import pyaudio + pyaudio = load_pyaudio() p = pyaudio.PyAudio() chunk_samples = int(sample_rate * chunk_duration_ms / 1000) @@ -164,6 +171,12 @@ def parse_args() -> argparse.Namespace: parser.add_argument( "--chunk-duration", type=int, default=10, help="Chunk duration in ms" ) + parser.add_argument( + "--target-streaming-delay-ms", + type=int, + default=None, + help="Target streaming delay in milliseconds", + ) parser.add_argument( "--api-key", default=os.environ.get("MISTRAL_API_KEY"), help="Mistral API key" ) @@ -178,6 +191,12 @@ async def main() -> int: args = parse_args() api_key = args.api_key or os.environ["MISTRAL_API_KEY"] + try: + load_pyaudio() + except RuntimeError as exc: + console.print(str(exc), style="red") + return 1 + client = Mistral(api_key=api_key, server_url=args.base_url) # microphone is always pcm_s16le here @@ -187,7 +206,9 @@ async def main() -> int: sample_rate=args.sample_rate, chunk_duration_ms=args.chunk_duration ) - display = TranscriptDisplay(model=args.model) + display = TranscriptDisplay( + model=args.model, target_streaming_delay_ms=args.target_streaming_delay_ms + ) with Live( display.render(), console=console, refresh_per_second=10, screen=True @@ -197,6 +218,7 @@ async def main() -> int: audio_stream=mic_stream, model=args.model, audio_format=audio_format, + target_streaming_delay_ms=args.target_streaming_delay_ms, ): if isinstance(event, RealtimeTranscriptionSessionCreated): display.set_listening() @@ -217,6 +239,10 @@ async def main() -> int: except KeyboardInterrupt: display.status = "⏹️ Stopped" live.update(display.render()) + except Exception as exc: + display.set_error(str(exc)) + live.update(display.render()) + return 1 return 0 diff --git a/examples/mistral/audio/async_realtime_transcription_stream.py b/examples/mistral/audio/async_realtime_transcription_stream.py index 0a0ac609..c005cf3f 100644 --- a/examples/mistral/audio/async_realtime_transcription_stream.py +++ b/examples/mistral/audio/async_realtime_transcription_stream.py @@ -90,6 +90,12 @@ def parse_args() -> argparse.Namespace: default=0.01, help="Delay between chunks in seconds", ) + parser.add_argument( + "--target-streaming-delay-ms", + type=int, + default=None, + help="Target streaming delay in milliseconds", + ) parser.add_argument( "--no-convert", action="store_true", @@ -120,6 +126,7 @@ async def main() -> int: ), model=args.model, audio_format=AudioFormat(encoding="pcm_s16le", sample_rate=16000), + target_streaming_delay_ms=args.target_streaming_delay_ms, ): if isinstance(event, TranscriptionStreamTextDelta): print(event.text, end="", flush=True) diff --git a/examples/mistral/audio/pyaudio_utils.py b/examples/mistral/audio/pyaudio_utils.py new file mode 100644 index 00000000..af72a885 --- /dev/null +++ b/examples/mistral/audio/pyaudio_utils.py @@ -0,0 +1,38 @@ +from __future__ import annotations + +from types import ModuleType + + +def load_pyaudio() -> ModuleType: + """ + Import PyAudio with a friendly error when PortAudio is missing. + + Raises: + RuntimeError: If PyAudio/PortAudio cannot be imported. + """ + try: + import pyaudio + except Exception as exc: + details = str(exc).lower() + if isinstance(exc, ModuleNotFoundError) and exc.name == "pyaudio": + message = ( + "PyAudio is required to use the microphone.\n" + "Install PortAudio (eg. for macos: brew install portaudio), then " + "reinstall PyAudio." + ) + elif "pyaudio._portaudio" in details or "portaudio" in details: + message = ( + "PyAudio is installed, but the PortAudio native library is missing or " + "failed to load.\n" + "Install PortAudio (eg. for macos: brew install portaudio), then " + "reinstall PyAudio." + ) + else: + message = ( + "PyAudio is required to use the microphone, but it could not be " + "imported.\n" + "Install PortAudio (eg. for macos: brew install portaudio), then " + "reinstall PyAudio." + ) + raise RuntimeError(message) from exc + return pyaudio diff --git a/scripts/run_examples.sh b/scripts/run_examples.sh index 998b8dbe..eca854b4 100755 --- a/scripts/run_examples.sh +++ b/scripts/run_examples.sh @@ -48,6 +48,7 @@ exclude_files=( "examples/mistral/agents/async_conversation_run_mcp_remote.py" "examples/mistral/audio/async_realtime_transcription_microphone.py" "examples/mistral/audio/async_realtime_transcription_stream.py" + "examples/mistral/audio/async_realtime_transcription_dual_delay_microphone.py" ) # Files that require extra dependencies (agents, mcp, audio, etc.) diff --git a/src/mistralai/extra/realtime/connection.py b/src/mistralai/extra/realtime/connection.py index ffbbc735..6547052b 100644 --- a/src/mistralai/extra/realtime/connection.py +++ b/src/mistralai/extra/realtime/connection.py @@ -18,15 +18,21 @@ from mistralai.client.models import ( AudioFormat, + RealtimeTranscriptionInputAudioAppend, + RealtimeTranscriptionInputAudioEnd, + RealtimeTranscriptionInputAudioFlush, RealtimeTranscriptionError, RealtimeTranscriptionSession, RealtimeTranscriptionSessionCreated, RealtimeTranscriptionSessionUpdated, + RealtimeTranscriptionSessionUpdateMessage, + RealtimeTranscriptionSessionUpdatePayload, TranscriptionStreamDone, TranscriptionStreamLanguage, TranscriptionStreamSegmentDelta, TranscriptionStreamTextDelta, ) +from mistralai.client.types import UNSET class UnknownRealtimeEvent(BaseModel): @@ -36,6 +42,7 @@ class UnknownRealtimeEvent(BaseModel): - invalid JSON payload - schema validation failure """ + type: Optional[str] content: Any error: Optional[str] = None @@ -56,7 +63,6 @@ class UnknownRealtimeEvent(BaseModel): UnknownRealtimeEvent, ] - _MESSAGE_MODELS: dict[str, Any] = { "session.created": RealtimeTranscriptionSessionCreated, "session.updated": RealtimeTranscriptionSessionUpdated, @@ -108,7 +114,6 @@ def __init__( ) -> None: self._websocket = websocket self._session = session - self._audio_format = session.audio_format self._closed = False self._initial_events: Deque[RealtimeEvent] = deque(initial_events or []) @@ -122,7 +127,7 @@ def session(self) -> RealtimeTranscriptionSession: @property def audio_format(self) -> AudioFormat: - return self._audio_format + return self._session.audio_format @property def is_closed(self) -> bool: @@ -134,27 +139,46 @@ async def send_audio( if self._closed: raise RuntimeError("Connection is closed") - message = { - "type": "input_audio.append", - "audio": base64.b64encode(bytes(audio_bytes)).decode("ascii"), - } - await self._websocket.send(json.dumps(message)) + message = RealtimeTranscriptionInputAudioAppend( + audio=base64.b64encode(bytes(audio_bytes)).decode("ascii") + ) + await self._websocket.send(message.model_dump_json()) - async def update_session(self, audio_format: AudioFormat) -> None: + async def flush_audio(self) -> None: if self._closed: raise RuntimeError("Connection is closed") + await self._websocket.send( + RealtimeTranscriptionInputAudioFlush().model_dump_json() + ) - self._audio_format = audio_format - message = { - "type": "session.update", - "session": {"audio_format": audio_format.model_dump(mode="json")}, - } - await self._websocket.send(json.dumps(message)) + async def update_session( + self, + audio_format: Optional[AudioFormat] = None, + *, + target_streaming_delay_ms: Optional[int] = None, + ) -> None: + if self._closed: + raise RuntimeError("Connection is closed") + + if audio_format is None and target_streaming_delay_ms is None: + raise ValueError("At least one session field must be provided") + + message = RealtimeTranscriptionSessionUpdateMessage( + session=RealtimeTranscriptionSessionUpdatePayload( + audio_format=audio_format if audio_format is not None else UNSET, + target_streaming_delay_ms=target_streaming_delay_ms + if target_streaming_delay_ms is not None + else UNSET, + ) + ) + await self._websocket.send(message.model_dump_json()) async def end_audio(self) -> None: if self._closed: return - await self._websocket.send(json.dumps({"type": "input_audio.end"})) + await self._websocket.send( + RealtimeTranscriptionInputAudioEnd().model_dump_json() + ) async def close(self, *, code: int = 1000, reason: str = "") -> None: if self._closed: @@ -202,6 +226,7 @@ async def events(self) -> AsyncIterator[RealtimeEvent]: await self.close() def _apply_session_updates(self, ev: RealtimeEvent) -> None: - if isinstance(ev, RealtimeTranscriptionSessionCreated) or isinstance(ev, RealtimeTranscriptionSessionUpdated): + if isinstance(ev, RealtimeTranscriptionSessionCreated) or isinstance( + ev, RealtimeTranscriptionSessionUpdated + ): self._session = ev.session - self._audio_format = ev.session.audio_format diff --git a/src/mistralai/extra/realtime/transcription.py b/src/mistralai/extra/realtime/transcription.py index 655fd9c1..b216e676 100644 --- a/src/mistralai/extra/realtime/transcription.py +++ b/src/mistralai/extra/realtime/transcription.py @@ -67,6 +67,7 @@ async def connect( self, model: str, audio_format: Optional[AudioFormat] = None, + target_streaming_delay_ms: Optional[int] = None, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, @@ -122,8 +123,11 @@ async def connect( initial_events=initial_events, ) - if audio_format is not None: - await connection.update_session(audio_format) + if audio_format is not None or target_streaming_delay_ms is not None: + await connection.update_session( + audio_format, + target_streaming_delay_ms=target_streaming_delay_ms, + ) return connection @@ -141,6 +145,7 @@ async def transcribe_stream( audio_stream: AsyncIterator[bytes], model: str, audio_format: Optional[AudioFormat] = None, + target_streaming_delay_ms: Optional[int] = None, server_url: Optional[str] = None, timeout_ms: Optional[int] = None, http_headers: Optional[Mapping[str, str]] = None, @@ -154,6 +159,7 @@ async def transcribe_stream( async with await self.connect( model=model, audio_format=audio_format, + target_streaming_delay_ms=target_streaming_delay_ms, server_url=server_url, timeout_ms=timeout_ms, http_headers=http_headers, @@ -164,6 +170,7 @@ async def _send() -> None: if connection.is_closed: break await connection.send_audio(chunk) + await connection.flush_audio() await connection.end_audio() send_task = asyncio.create_task(_send()) From 11babe1c440712a31d362c4a8c216bb8ac688d61 Mon Sep 17 00:00:00 2001 From: Louis Sanna <85956496+louis-sanna-dev@users.noreply.github.com> Date: Thu, 26 Feb 2026 16:25:52 +0100 Subject: [PATCH 214/223] fix: correct inaccurate migration guide documentation (#375) - Remove false "Type Name Changes" section (claimed renamings don't exist) - Remove false "Shorter Request/Response Class Names" section (long names still used) - Simplify installation section with accurate info - Add clarifying note for v2 users reading v0->v1 section --- MIGRATION.md | 33 +++++---------------------------- 1 file changed, 5 insertions(+), 28 deletions(-) diff --git a/MIGRATION.md b/MIGRATION.md index 906173fe..fe5c8423 100644 --- a/MIGRATION.md +++ b/MIGRATION.md @@ -46,41 +46,16 @@ Azure and GCP SDKs now live under the `mistralai` namespace as separate distribu #### Installation Changes -| v1 | v2 | -|---|---| -| `pip install mistralai` | `pip install mistralai` (includes Azure and GCP) | -| `pip install mistralai[gcp]` (for GCP auth) | `pip install "mistralai[gcp]"` (for GCP auth dependencies) | +The main `mistralai` package now bundles Azure and GCP support. You can also install `mistralai-azure` or `mistralai-gcp` as standalone distributions. -Azure and GCP are now standalone distributions that can be installed independently of the core SDK. The `mistralai[azure]` and `mistralai[gcp]` extras are syntactic sugar that pull in the respective distributions. +For GCP authentication dependencies, use `pip install "mistralai[gcp]"`. ### What Stays the Same - The `Mistral` client API is unchanged - All models (`UserMessage`, `AssistantMessage`, etc.) work the same way -### Type Name Changes - -Some type names have been updated for clarity and consistency: - -| Old Name | New Name | -|---|---| -| `Tools` | `ConversationRequestTool` | -| `ToolsTypedDict` | `ConversationRequestToolTypedDict` | -| `HandoffExecution` | `ConversationRequestHandoffExecution` | -| `AgentVersion` | `ConversationRequestAgentVersion` | - -### Shorter Request/Response Class Names - -Internal request and response wrapper classes now use concise names: - -| Old Name | New Name | -|---|---| -| `JobsAPIRoutesFineTuningArchiveFineTunedModelRequest` | `ArchiveModelRequest` | -| `JobsAPIRoutesFineTuningCreateFineTuningJobResponse` | `CreateFineTuningJobResponse` | -| `FilesAPIRoutesUploadFileRequest` | `UploadFileRequest` | -| `AgentsAPIV1ConversationsAppendRequest` | `AppendConversationRequest` | - -This affects all operation-specific request/response types. Core models like `UserMessage`, `ChatCompletionRequest`, etc. are unchanged. +### Enums Enums now accept unknown values for forward compatibility with API changes. @@ -90,6 +65,8 @@ Enums now accept unknown values for forward compatibility with API changes. Version 1.0 introduced significant changes to improve usability and consistency. +> **Note:** The v1.x examples below use v1-style imports (e.g., `from mistralai import Mistral`). If you're on v2.x, combine these API changes with the [v1 to v2 import changes](#migrating-from-v1x-to-v2x) above. + ### Major Changes 1. **Unified Client Class**: `MistralClient` and `MistralAsyncClient` consolidated into a single `Mistral` class From ac5195f601fd1f232e009f940fe3fc71324b0426 Mon Sep 17 00:00:00 2001 From: Louis Sanna <85956496+louis-sanna-dev@users.noreply.github.com> Date: Thu, 26 Feb 2026 17:11:38 +0100 Subject: [PATCH 215/223] fix: remove inaccurate installation claims in migration docs (#378) Removes incorrect statements about Azure/GCP bundling changes, since the installation behavior hasn't changed between versions. --- MIGRATION.md | 2 -- 1 file changed, 2 deletions(-) diff --git a/MIGRATION.md b/MIGRATION.md index fe5c8423..2fc3d13d 100644 --- a/MIGRATION.md +++ b/MIGRATION.md @@ -46,8 +46,6 @@ Azure and GCP SDKs now live under the `mistralai` namespace as separate distribu #### Installation Changes -The main `mistralai` package now bundles Azure and GCP support. You can also install `mistralai-azure` or `mistralai-gcp` as standalone distributions. - For GCP authentication dependencies, use `pip install "mistralai[gcp]"`. ### What Stays the Same From 7a1a8ac90fb4945491de49677877538193af8c6b Mon Sep 17 00:00:00 2001 From: Louis Sanna <85956496+louis-sanna-dev@users.noreply.github.com> Date: Fri, 27 Feb 2026 09:47:19 +0100 Subject: [PATCH 216/223] feat: align pyproject.toml version with gen.lock in SDK generation (#383) * feat: add workflow to align pyproject.toml version with gen.lock Add a PR-triggered workflow that automatically updates pyproject.toml version to match gen.lock when speakeasybot creates/updates a PR. This fixes version mismatch between gen.lock and pyproject.toml caused by pyproject.toml being in .genignore (Speakeasy cannot update it). The workflow: - Triggers on PR events when gen.lock files change - Only runs for PRs from speakeasybot - Reads releaseVersion from gen.lock - Updates pyproject.toml using uv version - Commits and pushes the change * fix: strip quotes from version extraction in align workflow Handle potential quoted YAML values in gen.lock releaseVersion field. --- .../workflows/align_pyproject_version.yaml | 58 +++++++++++++++++++ 1 file changed, 58 insertions(+) create mode 100644 .github/workflows/align_pyproject_version.yaml diff --git a/.github/workflows/align_pyproject_version.yaml b/.github/workflows/align_pyproject_version.yaml new file mode 100644 index 00000000..0f6d7463 --- /dev/null +++ b/.github/workflows/align_pyproject_version.yaml @@ -0,0 +1,58 @@ +name: Align pyproject.toml version + +on: + pull_request: + types: [opened, synchronize] + paths: + - ".speakeasy/gen.lock" + - "packages/azure/.speakeasy/gen.lock" + - "packages/gcp/.speakeasy/gen.lock" + +permissions: + contents: write + +jobs: + align-version: + if: github.actor == 'speakeasybot' + runs-on: ubuntu-latest + steps: + - name: Checkout PR branch + uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4 + with: + ref: ${{ github.head_ref }} + + - name: Install uv + uses: astral-sh/setup-uv@d0cc045d04ccac9d8b7881df0226f9e82c39688e # v6 + + - name: Align main SDK version + if: hashFiles('.speakeasy/gen.lock') != '' + run: | + VERSION=$(grep 'releaseVersion:' .speakeasy/gen.lock | head -1 | awk '{print $2}' | tr -d '"') + echo "Aligning main SDK to version: $VERSION" + uv version "$VERSION" + + - name: Align Azure SDK version + if: hashFiles('packages/azure/.speakeasy/gen.lock') != '' + run: | + VERSION=$(grep 'releaseVersion:' packages/azure/.speakeasy/gen.lock | head -1 | awk '{print $2}' | tr -d '"') + echo "Aligning Azure SDK to version: $VERSION" + uv version "$VERSION" --directory packages/azure + + - name: Align GCP SDK version + if: hashFiles('packages/gcp/.speakeasy/gen.lock') != '' + run: | + VERSION=$(grep 'releaseVersion:' packages/gcp/.speakeasy/gen.lock | head -1 | awk '{print $2}' | tr -d '"') + echo "Aligning GCP SDK to version: $VERSION" + uv version "$VERSION" --directory packages/gcp + + - name: Commit and push + run: | + git config user.email "action@github.com" + git config user.name "GitHub Action" + git add pyproject.toml packages/*/pyproject.toml 2>/dev/null || true + if git diff --cached --quiet; then + echo "No version change needed" + else + git commit -m "chore: align pyproject.toml version with gen.lock" + git push + fi From e2cdc231291a20de4e300c932f036c66a08b210e Mon Sep 17 00:00:00 2001 From: Louis Sanna <85956496+louis-sanna-dev@users.noreply.github.com> Date: Fri, 27 Feb 2026 10:51:48 +0100 Subject: [PATCH 217/223] fix: check for github-actions[bot] instead of speakeasybot (#387) Speakeasy PRs are authored by github-actions[bot], not speakeasybot. --- .github/workflows/align_pyproject_version.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/align_pyproject_version.yaml b/.github/workflows/align_pyproject_version.yaml index 0f6d7463..aeea2096 100644 --- a/.github/workflows/align_pyproject_version.yaml +++ b/.github/workflows/align_pyproject_version.yaml @@ -13,7 +13,7 @@ permissions: jobs: align-version: - if: github.actor == 'speakeasybot' + if: github.actor == 'github-actions[bot]' runs-on: ubuntu-latest steps: - name: Checkout PR branch From ed618e8a432af423800e698c474d62e7c21d78a3 Mon Sep 17 00:00:00 2001 From: Louis Sanna <85956496+louis-sanna-dev@users.noreply.github.com> Date: Fri, 27 Feb 2026 15:25:21 +0100 Subject: [PATCH 218/223] fix: use workflow_run trigger for version alignment (#390) * fix: use workflow_run trigger instead of pull_request GitHub Actions using GITHUB_TOKEN don't trigger pull_request workflows (security measure to prevent infinite loops). Using workflow_run instead triggers when Speakeasy generation completes, then checks out the PR branch. * fix: restrict to speakeasy branches to address CodeQL warning * fix: use GitHub API instead of checkout to avoid CodeQL warning No checkout means no untrusted code execution. Uses gh api to: - Fetch gen.lock content from PR branch - Fetch and update pyproject.toml - Commit changes via API * refactor: extract reusable function and improve error handling - Extract align_version() function to eliminate code duplication - Add proper error handling with set -euo pipefail - Use ::group:: for cleaner logs - Continue processing all SDKs even if one fails - Track exit code to fail workflow if any SDK fails --- .../workflows/align_pyproject_version.yaml | 117 +++++++++++------- 1 file changed, 75 insertions(+), 42 deletions(-) diff --git a/.github/workflows/align_pyproject_version.yaml b/.github/workflows/align_pyproject_version.yaml index aeea2096..77a5b9b5 100644 --- a/.github/workflows/align_pyproject_version.yaml +++ b/.github/workflows/align_pyproject_version.yaml @@ -1,58 +1,91 @@ name: Align pyproject.toml version on: - pull_request: - types: [opened, synchronize] - paths: - - ".speakeasy/gen.lock" - - "packages/azure/.speakeasy/gen.lock" - - "packages/gcp/.speakeasy/gen.lock" + workflow_run: + workflows: + - "Generate MISTRALAI" + - "Generate MISTRAL-PYTHON-SDK-AZURE" + - "Generate MISTRAL-PYTHON-SDK-GOOGLE-CLOUD" + types: [completed] permissions: contents: write jobs: align-version: - if: github.actor == 'github-actions[bot]' + if: github.event.workflow_run.conclusion == 'success' runs-on: ubuntu-latest + env: + GH_TOKEN: ${{ github.token }} + BRANCH: ${{ github.event.workflow_run.head_branch }} + REPO: ${{ github.repository }} steps: - - name: Checkout PR branch - uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4 - with: - ref: ${{ github.head_ref }} + - name: Align SDK versions + run: | + set -euo pipefail - - name: Install uv - uses: astral-sh/setup-uv@d0cc045d04ccac9d8b7881df0226f9e82c39688e # v6 + align_version() { + local gen_lock_path="$1" + local pyproject_path="$2" + local sdk_name="$3" - - name: Align main SDK version - if: hashFiles('.speakeasy/gen.lock') != '' - run: | - VERSION=$(grep 'releaseVersion:' .speakeasy/gen.lock | head -1 | awk '{print $2}' | tr -d '"') - echo "Aligning main SDK to version: $VERSION" - uv version "$VERSION" + echo "::group::Aligning $sdk_name" - - name: Align Azure SDK version - if: hashFiles('packages/azure/.speakeasy/gen.lock') != '' - run: | - VERSION=$(grep 'releaseVersion:' packages/azure/.speakeasy/gen.lock | head -1 | awk '{print $2}' | tr -d '"') - echo "Aligning Azure SDK to version: $VERSION" - uv version "$VERSION" --directory packages/azure + # Fetch gen.lock from PR branch via API + if ! GEN_LOCK=$(gh api "repos/$REPO/contents/$gen_lock_path?ref=$BRANCH" --jq '.content' 2>/dev/null | base64 -d); then + echo "No gen.lock found at $gen_lock_path, skipping" + echo "::endgroup::" + return 0 + fi - - name: Align GCP SDK version - if: hashFiles('packages/gcp/.speakeasy/gen.lock') != '' - run: | - VERSION=$(grep 'releaseVersion:' packages/gcp/.speakeasy/gen.lock | head -1 | awk '{print $2}' | tr -d '"') - echo "Aligning GCP SDK to version: $VERSION" - uv version "$VERSION" --directory packages/gcp + VERSION=$(echo "$GEN_LOCK" | grep 'releaseVersion:' | head -1 | awk '{print $2}' | tr -d '"') + if [ -z "$VERSION" ]; then + echo "No releaseVersion found in $gen_lock_path" + echo "::endgroup::" + return 0 + fi + echo "Found version: $VERSION" - - name: Commit and push - run: | - git config user.email "action@github.com" - git config user.name "GitHub Action" - git add pyproject.toml packages/*/pyproject.toml 2>/dev/null || true - if git diff --cached --quiet; then - echo "No version change needed" - else - git commit -m "chore: align pyproject.toml version with gen.lock" - git push - fi + # Fetch current pyproject.toml + if ! PYPROJECT_RESPONSE=$(gh api "repos/$REPO/contents/$pyproject_path?ref=$BRANCH" 2>/dev/null); then + echo "Failed to fetch $pyproject_path" + echo "::endgroup::" + return 1 + fi + + CURRENT_SHA=$(echo "$PYPROJECT_RESPONSE" | jq -r '.sha') + PYPROJECT=$(echo "$PYPROJECT_RESPONSE" | jq -r '.content' | base64 -d) + + # Update version in pyproject.toml + UPDATED=$(echo "$PYPROJECT" | sed "s/^version = \".*\"/version = \"$VERSION\"/") + + if [ "$PYPROJECT" = "$UPDATED" ]; then + echo "Version already aligned to $VERSION" + echo "::endgroup::" + return 0 + fi + + # Commit updated file via API + ENCODED=$(echo "$UPDATED" | base64 -w 0) + if ! gh api "repos/$REPO/contents/$pyproject_path" \ + -X PUT \ + -f message="chore: align $sdk_name pyproject.toml version with gen.lock" \ + -f content="$ENCODED" \ + -f sha="$CURRENT_SHA" \ + -f branch="$BRANCH" > /dev/null; then + echo "Failed to commit $pyproject_path (may have been updated by another job)" + echo "::endgroup::" + return 1 + fi + + echo "Updated $pyproject_path to version $VERSION" + echo "::endgroup::" + } + + # Align all SDKs (continue on failure to attempt all) + EXIT_CODE=0 + align_version ".speakeasy/gen.lock" "pyproject.toml" "main SDK" || EXIT_CODE=$? + align_version "packages/azure/.speakeasy/gen.lock" "packages/azure/pyproject.toml" "Azure SDK" || EXIT_CODE=$? + align_version "packages/gcp/.speakeasy/gen.lock" "packages/gcp/pyproject.toml" "GCP SDK" || EXIT_CODE=$? + + exit $EXIT_CODE From 176b844dd57b8451b6327dacf3a3432224594b9f Mon Sep 17 00:00:00 2001 From: Louis Sanna <85956496+louis-sanna-dev@users.noreply.github.com> Date: Fri, 27 Feb 2026 15:49:19 +0100 Subject: [PATCH 219/223] fix: align pyproject.toml version directly in generation workflows (#392) Instead of using a separate workflow with workflow_run trigger (which runs on the wrong branch), embed the version alignment logic directly in each SDK generation workflow as a job that runs after generation. This approach works because: - Same workflow = same GITHUB_TOKEN permissions - needs: generate ensures it runs after PR is created - No trigger gymnastics or branch detection issues Deletes the standalone align_pyproject_version.yaml workflow. --- .../workflows/align_pyproject_version.yaml | 91 ------------------- .../sdk_generation_mistralai_azure_sdk.yaml | 58 ++++++++++++ .../sdk_generation_mistralai_gcp_sdk.yaml | 58 ++++++++++++ .../sdk_generation_mistralai_sdk.yaml | 58 ++++++++++++ 4 files changed, 174 insertions(+), 91 deletions(-) delete mode 100644 .github/workflows/align_pyproject_version.yaml diff --git a/.github/workflows/align_pyproject_version.yaml b/.github/workflows/align_pyproject_version.yaml deleted file mode 100644 index 77a5b9b5..00000000 --- a/.github/workflows/align_pyproject_version.yaml +++ /dev/null @@ -1,91 +0,0 @@ -name: Align pyproject.toml version - -on: - workflow_run: - workflows: - - "Generate MISTRALAI" - - "Generate MISTRAL-PYTHON-SDK-AZURE" - - "Generate MISTRAL-PYTHON-SDK-GOOGLE-CLOUD" - types: [completed] - -permissions: - contents: write - -jobs: - align-version: - if: github.event.workflow_run.conclusion == 'success' - runs-on: ubuntu-latest - env: - GH_TOKEN: ${{ github.token }} - BRANCH: ${{ github.event.workflow_run.head_branch }} - REPO: ${{ github.repository }} - steps: - - name: Align SDK versions - run: | - set -euo pipefail - - align_version() { - local gen_lock_path="$1" - local pyproject_path="$2" - local sdk_name="$3" - - echo "::group::Aligning $sdk_name" - - # Fetch gen.lock from PR branch via API - if ! GEN_LOCK=$(gh api "repos/$REPO/contents/$gen_lock_path?ref=$BRANCH" --jq '.content' 2>/dev/null | base64 -d); then - echo "No gen.lock found at $gen_lock_path, skipping" - echo "::endgroup::" - return 0 - fi - - VERSION=$(echo "$GEN_LOCK" | grep 'releaseVersion:' | head -1 | awk '{print $2}' | tr -d '"') - if [ -z "$VERSION" ]; then - echo "No releaseVersion found in $gen_lock_path" - echo "::endgroup::" - return 0 - fi - echo "Found version: $VERSION" - - # Fetch current pyproject.toml - if ! PYPROJECT_RESPONSE=$(gh api "repos/$REPO/contents/$pyproject_path?ref=$BRANCH" 2>/dev/null); then - echo "Failed to fetch $pyproject_path" - echo "::endgroup::" - return 1 - fi - - CURRENT_SHA=$(echo "$PYPROJECT_RESPONSE" | jq -r '.sha') - PYPROJECT=$(echo "$PYPROJECT_RESPONSE" | jq -r '.content' | base64 -d) - - # Update version in pyproject.toml - UPDATED=$(echo "$PYPROJECT" | sed "s/^version = \".*\"/version = \"$VERSION\"/") - - if [ "$PYPROJECT" = "$UPDATED" ]; then - echo "Version already aligned to $VERSION" - echo "::endgroup::" - return 0 - fi - - # Commit updated file via API - ENCODED=$(echo "$UPDATED" | base64 -w 0) - if ! gh api "repos/$REPO/contents/$pyproject_path" \ - -X PUT \ - -f message="chore: align $sdk_name pyproject.toml version with gen.lock" \ - -f content="$ENCODED" \ - -f sha="$CURRENT_SHA" \ - -f branch="$BRANCH" > /dev/null; then - echo "Failed to commit $pyproject_path (may have been updated by another job)" - echo "::endgroup::" - return 1 - fi - - echo "Updated $pyproject_path to version $VERSION" - echo "::endgroup::" - } - - # Align all SDKs (continue on failure to attempt all) - EXIT_CODE=0 - align_version ".speakeasy/gen.lock" "pyproject.toml" "main SDK" || EXIT_CODE=$? - align_version "packages/azure/.speakeasy/gen.lock" "packages/azure/pyproject.toml" "Azure SDK" || EXIT_CODE=$? - align_version "packages/gcp/.speakeasy/gen.lock" "packages/gcp/pyproject.toml" "GCP SDK" || EXIT_CODE=$? - - exit $EXIT_CODE diff --git a/.github/workflows/sdk_generation_mistralai_azure_sdk.yaml b/.github/workflows/sdk_generation_mistralai_azure_sdk.yaml index 22af64aa..b4c2e908 100644 --- a/.github/workflows/sdk_generation_mistralai_azure_sdk.yaml +++ b/.github/workflows/sdk_generation_mistralai_azure_sdk.yaml @@ -27,3 +27,61 @@ jobs: github_access_token: ${{ secrets.GITHUB_TOKEN }} pypi_token: ${{ secrets.PYPI_TOKEN }} speakeasy_api_key: ${{ secrets.SPEAKEASY_API_KEY }} + + align-version: + needs: generate + runs-on: ubuntu-latest + steps: + - name: Align pyproject.toml version with gen.lock + env: + GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} + REPO: ${{ github.repository }} + run: | + set -euo pipefail + + # Find the most recent PR created by github-actions bot + PR_BRANCH=$(gh pr list --repo "$REPO" --author "app/github-actions" \ + --json headRefName,updatedAt --jq 'sort_by(.updatedAt) | reverse | .[0].headRefName // empty') + + if [ -z "$PR_BRANCH" ]; then + echo "No PR found from github-actions bot, skipping" + exit 0 + fi + echo "Found PR branch: $PR_BRANCH" + + # Fetch gen.lock from PR branch + if ! GEN_LOCK=$(gh api "repos/$REPO/contents/packages/azure/.speakeasy/gen.lock?ref=$PR_BRANCH" --jq '.content' 2>/dev/null | base64 -d); then + echo "No gen.lock found, skipping" + exit 0 + fi + + VERSION=$(echo "$GEN_LOCK" | grep 'releaseVersion:' | head -1 | awk '{print $2}' | tr -d '"') + if [ -z "$VERSION" ]; then + echo "No releaseVersion found in gen.lock" + exit 0 + fi + echo "Found version: $VERSION" + + # Fetch current pyproject.toml + PYPROJECT_RESPONSE=$(gh api "repos/$REPO/contents/packages/azure/pyproject.toml?ref=$PR_BRANCH") + CURRENT_SHA=$(echo "$PYPROJECT_RESPONSE" | jq -r '.sha') + PYPROJECT=$(echo "$PYPROJECT_RESPONSE" | jq -r '.content' | base64 -d) + + # Update version + UPDATED=$(echo "$PYPROJECT" | sed "s/^version = \".*\"/version = \"$VERSION\"/") + + if [ "$PYPROJECT" = "$UPDATED" ]; then + echo "Version already aligned to $VERSION" + exit 0 + fi + + # Commit updated file + ENCODED=$(echo "$UPDATED" | base64 -w 0) + gh api "repos/$REPO/contents/packages/azure/pyproject.toml" \ + -X PUT \ + -f message="chore: align Azure pyproject.toml version to $VERSION" \ + -f content="$ENCODED" \ + -f sha="$CURRENT_SHA" \ + -f branch="$PR_BRANCH" + + echo "Updated packages/azure/pyproject.toml to version $VERSION" diff --git a/.github/workflows/sdk_generation_mistralai_gcp_sdk.yaml b/.github/workflows/sdk_generation_mistralai_gcp_sdk.yaml index bf1d19b1..5a6cbe71 100644 --- a/.github/workflows/sdk_generation_mistralai_gcp_sdk.yaml +++ b/.github/workflows/sdk_generation_mistralai_gcp_sdk.yaml @@ -27,3 +27,61 @@ jobs: github_access_token: ${{ secrets.GITHUB_TOKEN }} pypi_token: ${{ secrets.PYPI_TOKEN }} speakeasy_api_key: ${{ secrets.SPEAKEASY_API_KEY }} + + align-version: + needs: generate + runs-on: ubuntu-latest + steps: + - name: Align pyproject.toml version with gen.lock + env: + GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} + REPO: ${{ github.repository }} + run: | + set -euo pipefail + + # Find the most recent PR created by github-actions bot + PR_BRANCH=$(gh pr list --repo "$REPO" --author "app/github-actions" \ + --json headRefName,updatedAt --jq 'sort_by(.updatedAt) | reverse | .[0].headRefName // empty') + + if [ -z "$PR_BRANCH" ]; then + echo "No PR found from github-actions bot, skipping" + exit 0 + fi + echo "Found PR branch: $PR_BRANCH" + + # Fetch gen.lock from PR branch + if ! GEN_LOCK=$(gh api "repos/$REPO/contents/packages/gcp/.speakeasy/gen.lock?ref=$PR_BRANCH" --jq '.content' 2>/dev/null | base64 -d); then + echo "No gen.lock found, skipping" + exit 0 + fi + + VERSION=$(echo "$GEN_LOCK" | grep 'releaseVersion:' | head -1 | awk '{print $2}' | tr -d '"') + if [ -z "$VERSION" ]; then + echo "No releaseVersion found in gen.lock" + exit 0 + fi + echo "Found version: $VERSION" + + # Fetch current pyproject.toml + PYPROJECT_RESPONSE=$(gh api "repos/$REPO/contents/packages/gcp/pyproject.toml?ref=$PR_BRANCH") + CURRENT_SHA=$(echo "$PYPROJECT_RESPONSE" | jq -r '.sha') + PYPROJECT=$(echo "$PYPROJECT_RESPONSE" | jq -r '.content' | base64 -d) + + # Update version + UPDATED=$(echo "$PYPROJECT" | sed "s/^version = \".*\"/version = \"$VERSION\"/") + + if [ "$PYPROJECT" = "$UPDATED" ]; then + echo "Version already aligned to $VERSION" + exit 0 + fi + + # Commit updated file + ENCODED=$(echo "$UPDATED" | base64 -w 0) + gh api "repos/$REPO/contents/packages/gcp/pyproject.toml" \ + -X PUT \ + -f message="chore: align GCP pyproject.toml version to $VERSION" \ + -f content="$ENCODED" \ + -f sha="$CURRENT_SHA" \ + -f branch="$PR_BRANCH" + + echo "Updated packages/gcp/pyproject.toml to version $VERSION" diff --git a/.github/workflows/sdk_generation_mistralai_sdk.yaml b/.github/workflows/sdk_generation_mistralai_sdk.yaml index cbe8f1e8..37b8a523 100644 --- a/.github/workflows/sdk_generation_mistralai_sdk.yaml +++ b/.github/workflows/sdk_generation_mistralai_sdk.yaml @@ -27,3 +27,61 @@ jobs: github_access_token: ${{ secrets.GITHUB_TOKEN }} pypi_token: ${{ secrets.PYPI_TOKEN }} speakeasy_api_key: ${{ secrets.SPEAKEASY_API_KEY }} + + align-version: + needs: generate + runs-on: ubuntu-latest + steps: + - name: Align pyproject.toml version with gen.lock + env: + GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} + REPO: ${{ github.repository }} + run: | + set -euo pipefail + + # Find the most recent PR created by github-actions bot + PR_BRANCH=$(gh pr list --repo "$REPO" --author "app/github-actions" \ + --json headRefName,updatedAt --jq 'sort_by(.updatedAt) | reverse | .[0].headRefName // empty') + + if [ -z "$PR_BRANCH" ]; then + echo "No PR found from github-actions bot, skipping" + exit 0 + fi + echo "Found PR branch: $PR_BRANCH" + + # Fetch gen.lock from PR branch + if ! GEN_LOCK=$(gh api "repos/$REPO/contents/.speakeasy/gen.lock?ref=$PR_BRANCH" --jq '.content' 2>/dev/null | base64 -d); then + echo "No gen.lock found, skipping" + exit 0 + fi + + VERSION=$(echo "$GEN_LOCK" | grep 'releaseVersion:' | head -1 | awk '{print $2}' | tr -d '"') + if [ -z "$VERSION" ]; then + echo "No releaseVersion found in gen.lock" + exit 0 + fi + echo "Found version: $VERSION" + + # Fetch current pyproject.toml + PYPROJECT_RESPONSE=$(gh api "repos/$REPO/contents/pyproject.toml?ref=$PR_BRANCH") + CURRENT_SHA=$(echo "$PYPROJECT_RESPONSE" | jq -r '.sha') + PYPROJECT=$(echo "$PYPROJECT_RESPONSE" | jq -r '.content' | base64 -d) + + # Update version + UPDATED=$(echo "$PYPROJECT" | sed "s/^version = \".*\"/version = \"$VERSION\"/") + + if [ "$PYPROJECT" = "$UPDATED" ]; then + echo "Version already aligned to $VERSION" + exit 0 + fi + + # Commit updated file + ENCODED=$(echo "$UPDATED" | base64 -w 0) + gh api "repos/$REPO/contents/pyproject.toml" \ + -X PUT \ + -f message="chore: align pyproject.toml version to $VERSION" \ + -f content="$ENCODED" \ + -f sha="$CURRENT_SHA" \ + -f branch="$PR_BRANCH" + + echo "Updated pyproject.toml to version $VERSION" From b5f8c81946bcf2b82b0baca8b934097f553fad0f Mon Sep 17 00:00:00 2001 From: Louis Sanna <85956496+louis-sanna-dev@users.noreply.github.com> Date: Fri, 27 Feb 2026 16:09:05 +0100 Subject: [PATCH 220/223] fix: use checkout + uv version to also update uv.lock (#393) The API-only approach only updated pyproject.toml but not uv.lock. This version: - Checks out the PR branch (safe for workflow_dispatch context) - Uses `uv version` to update both pyproject.toml and uv.lock - Commits and pushes the changes Security note: checkout is safe here because: - workflow_dispatch trigger (not pull_request_target) - Branch is in same repo, not a fork - Branch was just created by Speakeasy in the same workflow run --- .../sdk_generation_mistralai_azure_sdk.yaml | 70 +++++++++---------- .../sdk_generation_mistralai_gcp_sdk.yaml | 70 +++++++++---------- .../sdk_generation_mistralai_sdk.yaml | 70 +++++++++---------- 3 files changed, 105 insertions(+), 105 deletions(-) diff --git a/.github/workflows/sdk_generation_mistralai_azure_sdk.yaml b/.github/workflows/sdk_generation_mistralai_azure_sdk.yaml index b4c2e908..b5d0741b 100644 --- a/.github/workflows/sdk_generation_mistralai_azure_sdk.yaml +++ b/.github/workflows/sdk_generation_mistralai_azure_sdk.yaml @@ -32,56 +32,56 @@ jobs: needs: generate runs-on: ubuntu-latest steps: - - name: Align pyproject.toml version with gen.lock + - name: Find PR branch + id: find-pr env: GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} - REPO: ${{ github.repository }} run: | - set -euo pipefail - - # Find the most recent PR created by github-actions bot - PR_BRANCH=$(gh pr list --repo "$REPO" --author "app/github-actions" \ + PR_BRANCH=$(gh pr list --repo ${{ github.repository }} --author "app/github-actions" \ --json headRefName,updatedAt --jq 'sort_by(.updatedAt) | reverse | .[0].headRefName // empty') + echo "branch=$PR_BRANCH" >> $GITHUB_OUTPUT - if [ -z "$PR_BRANCH" ]; then - echo "No PR found from github-actions bot, skipping" - exit 0 - fi - echo "Found PR branch: $PR_BRANCH" + - name: Checkout PR branch + if: steps.find-pr.outputs.branch != '' + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 + with: + ref: ${{ steps.find-pr.outputs.branch }} - # Fetch gen.lock from PR branch - if ! GEN_LOCK=$(gh api "repos/$REPO/contents/packages/azure/.speakeasy/gen.lock?ref=$PR_BRANCH" --jq '.content' 2>/dev/null | base64 -d); then - echo "No gen.lock found, skipping" - exit 0 - fi + - name: Install uv + if: steps.find-pr.outputs.branch != '' + uses: astral-sh/setup-uv@d0cc045d04ccac9d8b7881df0226f9e82c39688e # v6 - VERSION=$(echo "$GEN_LOCK" | grep 'releaseVersion:' | head -1 | awk '{print $2}' | tr -d '"') + - name: Align version using uv + if: steps.find-pr.outputs.branch != '' + run: | + set -euo pipefail + + VERSION=$(grep 'releaseVersion:' packages/azure/.speakeasy/gen.lock | head -1 | awk '{print $2}' | tr -d '"') if [ -z "$VERSION" ]; then echo "No releaseVersion found in gen.lock" exit 0 fi echo "Found version: $VERSION" - # Fetch current pyproject.toml - PYPROJECT_RESPONSE=$(gh api "repos/$REPO/contents/packages/azure/pyproject.toml?ref=$PR_BRANCH") - CURRENT_SHA=$(echo "$PYPROJECT_RESPONSE" | jq -r '.sha') - PYPROJECT=$(echo "$PYPROJECT_RESPONSE" | jq -r '.content' | base64 -d) - - # Update version - UPDATED=$(echo "$PYPROJECT" | sed "s/^version = \".*\"/version = \"$VERSION\"/") - - if [ "$PYPROJECT" = "$UPDATED" ]; then + CURRENT=$(grep '^version = ' packages/azure/pyproject.toml | head -1 | sed 's/version = "\(.*\)"/\1/') + if [ "$CURRENT" = "$VERSION" ]; then echo "Version already aligned to $VERSION" exit 0 fi - # Commit updated file - ENCODED=$(echo "$UPDATED" | base64 -w 0) - gh api "repos/$REPO/contents/packages/azure/pyproject.toml" \ - -X PUT \ - -f message="chore: align Azure pyproject.toml version to $VERSION" \ - -f content="$ENCODED" \ - -f sha="$CURRENT_SHA" \ - -f branch="$PR_BRANCH" + echo "Updating version from $CURRENT to $VERSION" + uv version "$VERSION" --directory packages/azure - echo "Updated packages/azure/pyproject.toml to version $VERSION" + - name: Commit and push + if: steps.find-pr.outputs.branch != '' + run: | + git config user.email "action@github.com" + git config user.name "GitHub Action" + git add packages/azure/pyproject.toml packages/azure/uv.lock + if git diff --cached --quiet; then + echo "No changes to commit" + else + VERSION=$(grep '^version = ' packages/azure/pyproject.toml | head -1 | sed 's/version = "\(.*\)"/\1/') + git commit -m "chore: align Azure pyproject.toml and uv.lock to version $VERSION" + git push + fi diff --git a/.github/workflows/sdk_generation_mistralai_gcp_sdk.yaml b/.github/workflows/sdk_generation_mistralai_gcp_sdk.yaml index 5a6cbe71..05f88e25 100644 --- a/.github/workflows/sdk_generation_mistralai_gcp_sdk.yaml +++ b/.github/workflows/sdk_generation_mistralai_gcp_sdk.yaml @@ -32,56 +32,56 @@ jobs: needs: generate runs-on: ubuntu-latest steps: - - name: Align pyproject.toml version with gen.lock + - name: Find PR branch + id: find-pr env: GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} - REPO: ${{ github.repository }} run: | - set -euo pipefail - - # Find the most recent PR created by github-actions bot - PR_BRANCH=$(gh pr list --repo "$REPO" --author "app/github-actions" \ + PR_BRANCH=$(gh pr list --repo ${{ github.repository }} --author "app/github-actions" \ --json headRefName,updatedAt --jq 'sort_by(.updatedAt) | reverse | .[0].headRefName // empty') + echo "branch=$PR_BRANCH" >> $GITHUB_OUTPUT - if [ -z "$PR_BRANCH" ]; then - echo "No PR found from github-actions bot, skipping" - exit 0 - fi - echo "Found PR branch: $PR_BRANCH" + - name: Checkout PR branch + if: steps.find-pr.outputs.branch != '' + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 + with: + ref: ${{ steps.find-pr.outputs.branch }} - # Fetch gen.lock from PR branch - if ! GEN_LOCK=$(gh api "repos/$REPO/contents/packages/gcp/.speakeasy/gen.lock?ref=$PR_BRANCH" --jq '.content' 2>/dev/null | base64 -d); then - echo "No gen.lock found, skipping" - exit 0 - fi + - name: Install uv + if: steps.find-pr.outputs.branch != '' + uses: astral-sh/setup-uv@d0cc045d04ccac9d8b7881df0226f9e82c39688e # v6 - VERSION=$(echo "$GEN_LOCK" | grep 'releaseVersion:' | head -1 | awk '{print $2}' | tr -d '"') + - name: Align version using uv + if: steps.find-pr.outputs.branch != '' + run: | + set -euo pipefail + + VERSION=$(grep 'releaseVersion:' packages/gcp/.speakeasy/gen.lock | head -1 | awk '{print $2}' | tr -d '"') if [ -z "$VERSION" ]; then echo "No releaseVersion found in gen.lock" exit 0 fi echo "Found version: $VERSION" - # Fetch current pyproject.toml - PYPROJECT_RESPONSE=$(gh api "repos/$REPO/contents/packages/gcp/pyproject.toml?ref=$PR_BRANCH") - CURRENT_SHA=$(echo "$PYPROJECT_RESPONSE" | jq -r '.sha') - PYPROJECT=$(echo "$PYPROJECT_RESPONSE" | jq -r '.content' | base64 -d) - - # Update version - UPDATED=$(echo "$PYPROJECT" | sed "s/^version = \".*\"/version = \"$VERSION\"/") - - if [ "$PYPROJECT" = "$UPDATED" ]; then + CURRENT=$(grep '^version = ' packages/gcp/pyproject.toml | head -1 | sed 's/version = "\(.*\)"/\1/') + if [ "$CURRENT" = "$VERSION" ]; then echo "Version already aligned to $VERSION" exit 0 fi - # Commit updated file - ENCODED=$(echo "$UPDATED" | base64 -w 0) - gh api "repos/$REPO/contents/packages/gcp/pyproject.toml" \ - -X PUT \ - -f message="chore: align GCP pyproject.toml version to $VERSION" \ - -f content="$ENCODED" \ - -f sha="$CURRENT_SHA" \ - -f branch="$PR_BRANCH" + echo "Updating version from $CURRENT to $VERSION" + uv version "$VERSION" --directory packages/gcp - echo "Updated packages/gcp/pyproject.toml to version $VERSION" + - name: Commit and push + if: steps.find-pr.outputs.branch != '' + run: | + git config user.email "action@github.com" + git config user.name "GitHub Action" + git add packages/gcp/pyproject.toml packages/gcp/uv.lock + if git diff --cached --quiet; then + echo "No changes to commit" + else + VERSION=$(grep '^version = ' packages/gcp/pyproject.toml | head -1 | sed 's/version = "\(.*\)"/\1/') + git commit -m "chore: align GCP pyproject.toml and uv.lock to version $VERSION" + git push + fi diff --git a/.github/workflows/sdk_generation_mistralai_sdk.yaml b/.github/workflows/sdk_generation_mistralai_sdk.yaml index 37b8a523..59fe1150 100644 --- a/.github/workflows/sdk_generation_mistralai_sdk.yaml +++ b/.github/workflows/sdk_generation_mistralai_sdk.yaml @@ -32,56 +32,56 @@ jobs: needs: generate runs-on: ubuntu-latest steps: - - name: Align pyproject.toml version with gen.lock + - name: Find PR branch + id: find-pr env: GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} - REPO: ${{ github.repository }} run: | - set -euo pipefail - - # Find the most recent PR created by github-actions bot - PR_BRANCH=$(gh pr list --repo "$REPO" --author "app/github-actions" \ + PR_BRANCH=$(gh pr list --repo ${{ github.repository }} --author "app/github-actions" \ --json headRefName,updatedAt --jq 'sort_by(.updatedAt) | reverse | .[0].headRefName // empty') + echo "branch=$PR_BRANCH" >> $GITHUB_OUTPUT - if [ -z "$PR_BRANCH" ]; then - echo "No PR found from github-actions bot, skipping" - exit 0 - fi - echo "Found PR branch: $PR_BRANCH" + - name: Checkout PR branch + if: steps.find-pr.outputs.branch != '' + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 + with: + ref: ${{ steps.find-pr.outputs.branch }} - # Fetch gen.lock from PR branch - if ! GEN_LOCK=$(gh api "repos/$REPO/contents/.speakeasy/gen.lock?ref=$PR_BRANCH" --jq '.content' 2>/dev/null | base64 -d); then - echo "No gen.lock found, skipping" - exit 0 - fi + - name: Install uv + if: steps.find-pr.outputs.branch != '' + uses: astral-sh/setup-uv@d0cc045d04ccac9d8b7881df0226f9e82c39688e # v6 - VERSION=$(echo "$GEN_LOCK" | grep 'releaseVersion:' | head -1 | awk '{print $2}' | tr -d '"') + - name: Align version using uv + if: steps.find-pr.outputs.branch != '' + run: | + set -euo pipefail + + VERSION=$(grep 'releaseVersion:' .speakeasy/gen.lock | head -1 | awk '{print $2}' | tr -d '"') if [ -z "$VERSION" ]; then echo "No releaseVersion found in gen.lock" exit 0 fi echo "Found version: $VERSION" - # Fetch current pyproject.toml - PYPROJECT_RESPONSE=$(gh api "repos/$REPO/contents/pyproject.toml?ref=$PR_BRANCH") - CURRENT_SHA=$(echo "$PYPROJECT_RESPONSE" | jq -r '.sha') - PYPROJECT=$(echo "$PYPROJECT_RESPONSE" | jq -r '.content' | base64 -d) - - # Update version - UPDATED=$(echo "$PYPROJECT" | sed "s/^version = \".*\"/version = \"$VERSION\"/") - - if [ "$PYPROJECT" = "$UPDATED" ]; then + CURRENT=$(grep '^version = ' pyproject.toml | head -1 | sed 's/version = "\(.*\)"/\1/') + if [ "$CURRENT" = "$VERSION" ]; then echo "Version already aligned to $VERSION" exit 0 fi - # Commit updated file - ENCODED=$(echo "$UPDATED" | base64 -w 0) - gh api "repos/$REPO/contents/pyproject.toml" \ - -X PUT \ - -f message="chore: align pyproject.toml version to $VERSION" \ - -f content="$ENCODED" \ - -f sha="$CURRENT_SHA" \ - -f branch="$PR_BRANCH" + echo "Updating version from $CURRENT to $VERSION" + uv version "$VERSION" - echo "Updated pyproject.toml to version $VERSION" + - name: Commit and push + if: steps.find-pr.outputs.branch != '' + run: | + git config user.email "action@github.com" + git config user.name "GitHub Action" + git add pyproject.toml uv.lock + if git diff --cached --quiet; then + echo "No changes to commit" + else + VERSION=$(grep '^version = ' pyproject.toml | head -1 | sed 's/version = "\(.*\)"/\1/') + git commit -m "chore: align pyproject.toml and uv.lock to version $VERSION" + git push + fi From 79b53d81907eebba35a4d15cf313bd4583d153bf Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Mon, 2 Mar 2026 13:43:50 +0100 Subject: [PATCH 221/223] =?UTF-8?q?chore:=20=F0=9F=90=9D=20Update=20SDK=20?= =?UTF-8?q?-=20Generate=20MISTRALAI=20MISTRALAI-SDK=202.0.0rc1=20(#394)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * ## Python SDK Changes: * `mistral.beta.libraries.documents.list()`: `response.data[].process_status` **Added** * `mistral.beta.libraries.documents.upload()`: `response.process_status` **Added** * `mistral.beta.libraries.documents.get()`: `response.process_status` **Added** * `mistral.beta.libraries.documents.update()`: `response.process_status` **Added** * `mistral.beta.libraries.documents.status()`: `response.process_status` **Added** * chore: align pyproject.toml and uv.lock to version 2.0.0rc1 --------- Co-authored-by: speakeasybot Co-authored-by: GitHub Action --- .speakeasy/gen.lock | 81 +++++++++++-------- .speakeasy/gen.yaml | 2 +- .speakeasy/workflow.lock | 12 +-- RELEASES.md | 12 ++- docs/models/document.md | 1 + docs/models/embeddingrequest.md | 4 +- docs/models/embeddingrequestinputs.md | 2 +- docs/models/processingstatusout.md | 9 ++- docs/models/processstatus.md | 15 ++++ docs/sdks/embeddings/README.md | 4 +- pyproject.toml | 2 +- src/mistralai/client/_version.py | 4 +- src/mistralai/client/embeddings.py | 8 +- src/mistralai/client/models/__init__.py | 3 + src/mistralai/client/models/document.py | 4 + .../client/models/embeddingrequest.py | 12 +-- .../client/models/processingstatusout.py | 4 + src/mistralai/client/models/processstatus.py | 21 +++++ uv.lock | 2 +- 19 files changed, 138 insertions(+), 64 deletions(-) create mode 100644 docs/models/processstatus.md create mode 100644 src/mistralai/client/models/processstatus.py diff --git a/.speakeasy/gen.lock b/.speakeasy/gen.lock index 7314b7b1..8e89c12b 100644 --- a/.speakeasy/gen.lock +++ b/.speakeasy/gen.lock @@ -1,19 +1,19 @@ lockVersion: 2.0.0 id: 2d045ec7-2ebb-4f4d-ad25-40953b132161 management: - docChecksum: b66b034aac7aa9b38c4fb47a3b3d843e + docChecksum: 9ea68a20ee2ef4565df16947f204034b docVersion: 1.0.0 speakeasyVersion: 1.729.0 generationVersion: 2.841.0 - releaseVersion: 2.0.0b1 - configChecksum: 871b5a7d3687bd2a9ebd0e205e4b36a3 + releaseVersion: 2.0.0rc1 + configChecksum: ba30d47e402a93dc30b5001c33116a3d repoURL: https://github.com/mistralai/client-python.git installationURL: https://github.com/mistralai/client-python.git published: true persistentEdits: - generation_id: 1527268d-25cf-4a8c-8a67-09694eaf0d79 - pristine_commit_hash: 5642b69da5a9a00af1e84ca689b7587f2269d0c4 - pristine_tree_hash: f9eae7c82e85b3114e342a4b6500b9704a266493 + generation_id: 92ab8a00-49e7-471b-bca6-d18f761863df + pristine_commit_hash: 5eb9662433e80c22603fb3a3bf921f6b285fa2d4 + pristine_tree_hash: 9e781b9b07960a689815c5fa6008765ae4a60716 features: python: additionalDependencies: 1.0.0 @@ -643,8 +643,8 @@ trackedFiles: pristine_git_object: 8142772d7ea33ad8a75cf9cf822564ba3f630de2 docs/models/document.md: id: cd1d2a444370 - last_write_checksum: sha1:77076e66dea6f4582e73ecc5a55ef750f026448a - pristine_git_object: 284babb98fbb0279bef2626fa18eada0035572c5 + last_write_checksum: sha1:c10641b02547bedcc982b8997097083dfc562598 + pristine_git_object: 42c639a6a9b235ab4c754286486fa7e8872a0e7e docs/models/documentlibrarytool.md: id: 68083b0ef8f3 last_write_checksum: sha1:76b9f47c399915a338abe929cb10c1b37282eadf @@ -671,12 +671,12 @@ trackedFiles: pristine_git_object: 01656b0a85aa87f19909b18100bb6981f89683fc docs/models/embeddingrequest.md: id: bebee24421b4 - last_write_checksum: sha1:8e2bfa35f55b55f83fa2ebf7bee28cd00cb681d1 - pristine_git_object: 7269c0551a0c1040693eafdd99e1b8ebe98478a5 + last_write_checksum: sha1:087230e81cfbbc539edc7cc1c0a490728276d217 + pristine_git_object: 71d139cdf5c556a1224d707be70f3fabe032fc27 docs/models/embeddingrequestinputs.md: id: 6a35f3b1910a - last_write_checksum: sha1:e12ca056fac504e5af06a304d09154d3ecd17919 - pristine_git_object: 527a089b38b5cd316173ced4dc74a1429c8e4406 + last_write_checksum: sha1:f3bf6b89f279f59010124aa402e282c7c691eb03 + pristine_git_object: a3f82c1c67c726d3ef8e5e5ea5513386acc7c2f4 docs/models/embeddingresponse.md: id: 31cd0f6b7bb5 last_write_checksum: sha1:1d7351c68b075aba8e91e53d29bdab3c6dd5c3a2 @@ -1175,8 +1175,12 @@ trackedFiles: pristine_git_object: fae3c1ca4ba2c2ddb3b7de401ecdc8d56dcc7740 docs/models/processingstatusout.md: id: 83c8c59c1802 - last_write_checksum: sha1:046375bb3035cc033d4484099cd7f5a4f53ce88c - pristine_git_object: 7b67583f4209778ac6f945631c0ee03ba1f4c663 + last_write_checksum: sha1:7dbbfe790616ab4388e532bd78ffc1a5183b332d + pristine_git_object: bc40d3209c4c641dd7416c925b965c1bf7b73b1b + docs/models/processstatus.md: + id: "336054835357" + last_write_checksum: sha1:9b87de1980428307af6c29c2086c0e1f612ebd72 + pristine_git_object: 3a9c004e55cc31abb52d1f0bb450290465d42a1c docs/models/realtimetranscriptionerror.md: id: 4bc5e819565b last_write_checksum: sha1:c93e4b19a0aa68723ea69973a9f22a581c7b2ff6 @@ -1551,8 +1555,8 @@ trackedFiles: pristine_git_object: 9c219b6709d5d5bfa28113efca92012e8c5a5112 docs/sdks/embeddings/README.md: id: 15b5b04486c1 - last_write_checksum: sha1:76cb4876eebccfd2ab9a10a1b25570477a96a5c1 - pristine_git_object: eecb5c9e991dcd2fd5c1f0688efe3b64b4c6de3b + last_write_checksum: sha1:4a279bf9bcd84a9878ef979c78b8b75af3d52f02 + pristine_git_object: cb207d8be2ca86b00dc797fc06eabd1498adb770 docs/sdks/files/README.md: id: e576d7a117f0 last_write_checksum: sha1:f5861c42227b901742fd8afe7155ed6d634b1b4c @@ -1607,8 +1611,8 @@ trackedFiles: pristine_git_object: 036d44b8cfc51599873bd5c401a6aed30450536c src/mistralai/client/_version.py: id: cc807b30de19 - last_write_checksum: sha1:f5109c91723cc927e8513ac9e637512edd91f04e - pristine_git_object: ab2cf01d06f4d4373b52373795db76aa40f00ceb + last_write_checksum: sha1:a48c2cc43ac028eb2e31a129a9551ad2fda3d33f + pristine_git_object: 805648e42e48831658907f664d6536e8bdcd98c0 src/mistralai/client/accesses.py: id: 76fc53bfcf59 last_write_checksum: sha1:ed94623aa8a2bd502572a699a2f54c9281ec283e @@ -1659,8 +1663,8 @@ trackedFiles: pristine_git_object: b3130364c0f3cc90ed1e4407a070bd99e3cce606 src/mistralai/client/embeddings.py: id: f9c17258207e - last_write_checksum: sha1:0fbf92b59fde3199c770a522ead030f8fa65ff5c - pristine_git_object: 5f9d3b9cb611943e509caeda9ddd175e3baee2c3 + last_write_checksum: sha1:d1610bf12dba8b2f8cb27d2f0aa592594dfe6b3a + pristine_git_object: 5d55ffc43c0c98d46e04b238ab23a08d1b9e6a6a src/mistralai/client/errors/__init__.py: id: 0b2db51246df last_write_checksum: sha1:0befddc505c9c47388683126750c7ad0e3fbef52 @@ -1711,8 +1715,8 @@ trackedFiles: pristine_git_object: b8728362b87349118ac6f163f50613dd18c43340 src/mistralai/client/models/__init__.py: id: e0e8dad92725 - last_write_checksum: sha1:50727667552480e8298431f5a3dcc78457c53331 - pristine_git_object: 5ef8b3f3dd9fbb32d4675f7e11808c29fc218c57 + last_write_checksum: sha1:0ac0c956f0f87979e871a00c32884ee3102b6d2b + pristine_git_object: 7d2dfd970d48d54d798f1661206abdc697134434 src/mistralai/client/models/agent.py: id: 1336849c84fb last_write_checksum: sha1:6090ddf2b5b40656dfbf3325f1022a40ae418948 @@ -2075,8 +2079,8 @@ trackedFiles: pristine_git_object: d9fa230e93d4e0886f21c836cf3813855eb8f9fd src/mistralai/client/models/document.py: id: fbbf7428328c - last_write_checksum: sha1:2a5a28c54f0aec50059b6badc1001b1cd120e7d3 - pristine_git_object: 31eebbd1a7d7fdcb498259837c533bfc8008a6f9 + last_write_checksum: sha1:db2e184f7cc97b24e7ec80887c35c32222afd8a8 + pristine_git_object: fcc5bca541a814143b0489528fd20a69004104b1 src/mistralai/client/models/documentlibrarytool.py: id: 3eb3c218f457 last_write_checksum: sha1:d03a6136192b56778bd739d834d9bdc80a09cc23 @@ -2095,8 +2099,8 @@ trackedFiles: pristine_git_object: 732c4ebe3678563ebcdbafd519f93317261586fb src/mistralai/client/models/embeddingrequest.py: id: eadbe3f9040c - last_write_checksum: sha1:e36282eb015b782804b4bdf3d18b596607b020fd - pristine_git_object: 15950590fec8b82a4fb28d69009a6f6cfb83c9ee + last_write_checksum: sha1:3ef4e321a698c4a10389280f8b1c7d0da20f4faf + pristine_git_object: 5fa2d2f63796523e58e281f2d3bcc05a92111842 src/mistralai/client/models/embeddingresponse.py: id: f7d790e84b65 last_write_checksum: sha1:9bb53a5a860c8e10d4d504648d84da73068c0a83 @@ -2503,8 +2507,12 @@ trackedFiles: pristine_git_object: 0c6f4182ca8140e595f601b12fbd582034257587 src/mistralai/client/models/processingstatusout.py: id: 3df842c4140f - last_write_checksum: sha1:007a476e4101cac4d2a9eef94d289f0f486d763a - pristine_git_object: 3acadcc9792c286cd31031a80e108b74bc2c0c4e + last_write_checksum: sha1:d5acc98adcfc76cdc4fc26e090ecfc4d7835a438 + pristine_git_object: ed2a4f22dcffe787ce69bb9c6011a95216cf3928 + src/mistralai/client/models/processstatus.py: + id: "0205512146e6" + last_write_checksum: sha1:22ad3d5fc80fbf3f83db61512e7bc79295c5fc91 + pristine_git_object: 15bdce2056f2f642096fbbdb2ac32ce1e5b8c9cf src/mistralai/client/models/realtimetranscriptionerror.py: id: 8c2267378f48 last_write_checksum: sha1:78637de61d6fc3bc1fff8e95c0a6f5ffc1a3e111 @@ -3463,7 +3471,7 @@ examples: sort_order: "desc" responses: "200": - application/json: {"pagination": {"total_items": 23246, "total_pages": 881485, "current_page": 173326, "page_size": 318395, "has_more": false}, "data": [{"id": "5106c0c7-30fb-4fd3-9083-129b77f9f509", "library_id": "71eb68a2-756e-48b0-9d2b-a04d7bf95ff5", "hash": "", "mime_type": "", "extension": "pdf", "size": 367159, "name": "", "created_at": "2024-09-24T04:50:43.988Z", "uploaded_by_id": "7d65f4d8-1997-479f-bfb4-535c0144b48c", "uploaded_by_type": "", "processing_status": "", "tokens_processing_total": 957230}]} + application/json: {"pagination": {"total_items": 23246, "total_pages": 881485, "current_page": 173326, "page_size": 318395, "has_more": false}, "data": [{"id": "5106c0c7-30fb-4fd3-9083-129b77f9f509", "library_id": "71eb68a2-756e-48b0-9d2b-a04d7bf95ff5", "hash": "", "mime_type": "", "extension": "pdf", "size": 367159, "name": "", "created_at": "2024-09-24T04:50:43.988Z", "process_status": "noop", "uploaded_by_id": "7d65f4d8-1997-479f-bfb4-535c0144b48c", "uploaded_by_type": "", "processing_status": "", "tokens_processing_total": 957230}]} "422": application/json: {} libraries_documents_upload_v1: @@ -3475,7 +3483,7 @@ examples: multipart/form-data: {"file": "x-file: example.file"} responses: "200": - application/json: {"id": "d40f9b56-c832-405d-aa99-b3e442254dd8", "library_id": "868d7955-009a-4433-bfc6-ad7b4be4e7e4", "hash": "", "mime_type": "", "extension": "m2v", "size": 418415, "name": "", "created_at": "2025-04-30T20:11:27.130Z", "uploaded_by_id": "7db8d896-09c9-438c-b6dc-aa5c70102b3f", "uploaded_by_type": "", "processing_status": "", "tokens_processing_total": 61161} + application/json: {"id": "d40f9b56-c832-405d-aa99-b3e442254dd8", "library_id": "868d7955-009a-4433-bfc6-ad7b4be4e7e4", "hash": "", "mime_type": "", "extension": "m2v", "size": 418415, "name": "", "created_at": "2025-04-30T20:11:27.130Z", "process_status": "in_progress", "uploaded_by_id": "7db8d896-09c9-438c-b6dc-aa5c70102b3f", "uploaded_by_type": "", "processing_status": "", "tokens_processing_total": 61161} "422": application/json: {} libraries_documents_get_v1: @@ -3486,7 +3494,7 @@ examples: document_id: "90973aec-0508-4375-8b00-91d732414745" responses: "200": - application/json: {"id": "0de60230-717d-459a-8c0f-fbb9360c01be", "library_id": "e0bf3cf9-cd3b-405b-b842-ac7fcb9c373e", "hash": "", "mime_type": "", "extension": "jpe", "size": 402478, "name": "", "created_at": "2023-07-29T21:43:20.750Z", "uploaded_by_id": "d5eadabe-d7f2-4f87-a337-f80c192f886d", "uploaded_by_type": "", "processing_status": "", "tokens_processing_total": 793889} + application/json: {"id": "0de60230-717d-459a-8c0f-fbb9360c01be", "library_id": "e0bf3cf9-cd3b-405b-b842-ac7fcb9c373e", "hash": "", "mime_type": "", "extension": "jpe", "size": 402478, "name": "", "created_at": "2023-07-29T21:43:20.750Z", "process_status": "self_managed", "uploaded_by_id": "d5eadabe-d7f2-4f87-a337-f80c192f886d", "uploaded_by_type": "", "processing_status": "", "tokens_processing_total": 793889} "422": application/json: {} libraries_documents_update_v1: @@ -3499,7 +3507,7 @@ examples: application/json: {} responses: "200": - application/json: {"id": "1111e519-9ba5-42de-9301-938fbfee59fc", "library_id": "70aac5e3-23f7-439b-bbef-090e4c1dbd6d", "hash": "", "mime_type": "", "extension": "m1v", "size": 802305, "name": "", "created_at": "2024-07-02T20:02:03.680Z", "uploaded_by_id": "08471957-b27d-4437-8242-57256727dc49", "uploaded_by_type": "", "processing_status": "", "tokens_processing_total": 806683} + application/json: {"id": "1111e519-9ba5-42de-9301-938fbfee59fc", "library_id": "70aac5e3-23f7-439b-bbef-090e4c1dbd6d", "hash": "", "mime_type": "", "extension": "m1v", "size": 802305, "name": "", "created_at": "2024-07-02T20:02:03.680Z", "process_status": "missing_content", "uploaded_by_id": "08471957-b27d-4437-8242-57256727dc49", "uploaded_by_type": "", "processing_status": "", "tokens_processing_total": 806683} "422": application/json: {} libraries_documents_delete_v1: @@ -3530,7 +3538,7 @@ examples: document_id: "2c904915-d831-4e9d-a345-8ce405bcef66" responses: "200": - application/json: {"document_id": "90473b79-1fd5-437f-bee0-6638bdf69c90", "processing_status": ""} + application/json: {"document_id": "90473b79-1fd5-437f-bee0-6638bdf69c90", "process_status": "waiting_for_capacity", "processing_status": ""} "422": application/json: {} libraries_documents_get_signed_url_v1: @@ -4302,6 +4310,13 @@ examples: application/json: {} examplesVersion: 1.0.2 generatedTests: {} +releaseNotes: | + ## Python SDK Changes: + * `mistral.beta.libraries.documents.list()`: `response.data[].process_status` **Added** + * `mistral.beta.libraries.documents.upload()`: `response.process_status` **Added** + * `mistral.beta.libraries.documents.get()`: `response.process_status` **Added** + * `mistral.beta.libraries.documents.update()`: `response.process_status` **Added** + * `mistral.beta.libraries.documents.status()`: `response.process_status` **Added** generatedFiles: - .gitattributes - .vscode/settings.json diff --git a/.speakeasy/gen.yaml b/.speakeasy/gen.yaml index e237388a..1c82d91a 100644 --- a/.speakeasy/gen.yaml +++ b/.speakeasy/gen.yaml @@ -31,7 +31,7 @@ generation: generateNewTests: false skipResponseBodyAssertions: false python: - version: 2.0.0b1 + version: 2.0.0rc1 additionalDependencies: dev: pytest: ^8.2.2 diff --git a/.speakeasy/workflow.lock b/.speakeasy/workflow.lock index b26cdf2b..2b919f8b 100644 --- a/.speakeasy/workflow.lock +++ b/.speakeasy/workflow.lock @@ -16,11 +16,11 @@ sources: - speakeasy-sdk-regen-1772041030 mistral-openapi: sourceNamespace: mistral-openapi - sourceRevisionDigest: sha256:4f8e25101b35a66b9c93089fe3d491990268bdbefb70a349740e01ba9c8e28f8 - sourceBlobDigest: sha256:8566b35549178910c6fd4d005474d612bb9c476ef58785bb51c46251de145f71 + sourceRevisionDigest: sha256:52cd52dd6272c5afe08072790b36e34de9a65c41357bab87a45cf6635dc30db0 + sourceBlobDigest: sha256:7eb63e6d0b2226456aad34b5ae9edd75cc8e015643d478c09b717852e2852065 tags: - latest - - speakeasy-sdk-regen-1772040743 + - speakeasy-sdk-regen-1772205200 targets: mistralai-azure-sdk: source: mistral-azure-source @@ -39,10 +39,10 @@ targets: mistralai-sdk: source: mistral-openapi sourceNamespace: mistral-openapi - sourceRevisionDigest: sha256:4f8e25101b35a66b9c93089fe3d491990268bdbefb70a349740e01ba9c8e28f8 - sourceBlobDigest: sha256:8566b35549178910c6fd4d005474d612bb9c476ef58785bb51c46251de145f71 + sourceRevisionDigest: sha256:52cd52dd6272c5afe08072790b36e34de9a65c41357bab87a45cf6635dc30db0 + sourceBlobDigest: sha256:7eb63e6d0b2226456aad34b5ae9edd75cc8e015643d478c09b717852e2852065 codeSamplesNamespace: mistral-openapi-code-samples - codeSamplesRevisionDigest: sha256:0bcecf3d1523375a194d6aa13116ffba291da8321e44b01399ae5e24f7ce2e33 + codeSamplesRevisionDigest: sha256:534088a1428d166f80e9669ec6bc67d277e22113c745ef8904789f0c6e6381d9 workflow: workflowVersion: 1.0.0 speakeasyVersion: 1.729.0 diff --git a/RELEASES.md b/RELEASES.md index 48b65760..1a631692 100644 --- a/RELEASES.md +++ b/RELEASES.md @@ -378,4 +378,14 @@ Based on: ### Generated - [python v2.0.0b1] . ### Releases -- [PyPI v2.0.0b1] https://pypi.org/project/mistralai/2.0.0b1 - . \ No newline at end of file +- [PyPI v2.0.0b1] https://pypi.org/project/mistralai/2.0.0b1 - . + +## 2026-02-27 15:12:59 +### Changes +Based on: +- OpenAPI Doc +- Speakeasy CLI 1.729.0 (2.841.0) https://github.com/speakeasy-api/speakeasy +### Generated +- [python v2.0.0rc1] . +### Releases +- [PyPI v2.0.0rc1] https://pypi.org/project/mistralai/2.0.0rc1 - . \ No newline at end of file diff --git a/docs/models/document.md b/docs/models/document.md index 284babb9..42c639a6 100644 --- a/docs/models/document.md +++ b/docs/models/document.md @@ -16,6 +16,7 @@ | `created_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_check_mark: | N/A | | `last_processed_at` | [date](https://docs.python.org/3/library/datetime.html#date-objects) | :heavy_minus_sign: | N/A | | `number_of_pages` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | +| `process_status` | [models.ProcessStatus](../models/processstatus.md) | :heavy_check_mark: | N/A | | `uploaded_by_id` | *Nullable[str]* | :heavy_check_mark: | N/A | | `uploaded_by_type` | *str* | :heavy_check_mark: | N/A | | `tokens_processing_main_content` | *OptionalNullable[int]* | :heavy_minus_sign: | N/A | diff --git a/docs/models/embeddingrequest.md b/docs/models/embeddingrequest.md index 7269c055..71d139cd 100644 --- a/docs/models/embeddingrequest.md +++ b/docs/models/embeddingrequest.md @@ -5,9 +5,9 @@ | Field | Type | Required | Description | Example | | ------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------ | -| `model` | *str* | :heavy_check_mark: | The ID of the model to be used for embedding. | mistral-embed | +| `model` | *str* | :heavy_check_mark: | ID of the model to use. | mistral-embed | | `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | | -| `inputs` | [models.EmbeddingRequestInputs](../models/embeddingrequestinputs.md) | :heavy_check_mark: | The text content to be embedded, can be a string or an array of strings for fast processing in bulk. | [
"Embed this sentence.",
"As well as this one."
] | +| `inputs` | [models.EmbeddingRequestInputs](../models/embeddingrequestinputs.md) | :heavy_check_mark: | Text to embed. | [
"Embed this sentence.",
"As well as this one."
] | | `output_dimension` | *OptionalNullable[int]* | :heavy_minus_sign: | The dimension of the output embeddings when feature available. If not provided, a default output dimension will be used. | | | `output_dtype` | [Optional[models.EmbeddingDtype]](../models/embeddingdtype.md) | :heavy_minus_sign: | N/A | | | `encoding_format` | [Optional[models.EncodingFormat]](../models/encodingformat.md) | :heavy_minus_sign: | N/A | | \ No newline at end of file diff --git a/docs/models/embeddingrequestinputs.md b/docs/models/embeddingrequestinputs.md index 527a089b..a3f82c1c 100644 --- a/docs/models/embeddingrequestinputs.md +++ b/docs/models/embeddingrequestinputs.md @@ -1,6 +1,6 @@ # EmbeddingRequestInputs -The text content to be embedded, can be a string or an array of strings for fast processing in bulk. +Text to embed. ## Supported Types diff --git a/docs/models/processingstatusout.md b/docs/models/processingstatusout.md index 7b67583f..bc40d320 100644 --- a/docs/models/processingstatusout.md +++ b/docs/models/processingstatusout.md @@ -3,7 +3,8 @@ ## Fields -| Field | Type | Required | Description | -| ------------------- | ------------------- | ------------------- | ------------------- | -| `document_id` | *str* | :heavy_check_mark: | N/A | -| `processing_status` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file +| Field | Type | Required | Description | +| -------------------------------------------------- | -------------------------------------------------- | -------------------------------------------------- | -------------------------------------------------- | +| `document_id` | *str* | :heavy_check_mark: | N/A | +| `process_status` | [models.ProcessStatus](../models/processstatus.md) | :heavy_check_mark: | N/A | +| `processing_status` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/processstatus.md b/docs/models/processstatus.md new file mode 100644 index 00000000..3a9c004e --- /dev/null +++ b/docs/models/processstatus.md @@ -0,0 +1,15 @@ +# ProcessStatus + + +## Values + +| Name | Value | +| ---------------------- | ---------------------- | +| `SELF_MANAGED` | self_managed | +| `MISSING_CONTENT` | missing_content | +| `NOOP` | noop | +| `DONE` | done | +| `TODO` | todo | +| `IN_PROGRESS` | in_progress | +| `ERROR` | error | +| `WAITING_FOR_CAPACITY` | waiting_for_capacity | \ No newline at end of file diff --git a/docs/sdks/embeddings/README.md b/docs/sdks/embeddings/README.md index eecb5c9e..cb207d8b 100644 --- a/docs/sdks/embeddings/README.md +++ b/docs/sdks/embeddings/README.md @@ -38,8 +38,8 @@ with Mistral( | Parameter | Type | Required | Description | Example | | ------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------ | -| `model` | *str* | :heavy_check_mark: | The ID of the model to be used for embedding. | mistral-embed | -| `inputs` | [models.EmbeddingRequestInputs](../../models/embeddingrequestinputs.md) | :heavy_check_mark: | The text content to be embedded, can be a string or an array of strings for fast processing in bulk. | [
"Embed this sentence.",
"As well as this one."
] | +| `model` | *str* | :heavy_check_mark: | ID of the model to use. | mistral-embed | +| `inputs` | [models.EmbeddingRequestInputs](../../models/embeddingrequestinputs.md) | :heavy_check_mark: | Text to embed. | [
"Embed this sentence.",
"As well as this one."
] | | `metadata` | Dict[str, *Any*] | :heavy_minus_sign: | N/A | | | `output_dimension` | *OptionalNullable[int]* | :heavy_minus_sign: | The dimension of the output embeddings when feature available. If not provided, a default output dimension will be used. | | | `output_dtype` | [Optional[models.EmbeddingDtype]](../../models/embeddingdtype.md) | :heavy_minus_sign: | N/A | | diff --git a/pyproject.toml b/pyproject.toml index c42e4260..56d23bb8 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "mistralai" -version = "2.0.0b1" +version = "2.0.0rc1" description = "Python Client SDK for the Mistral AI API." authors = [{ name = "Mistral" }] requires-python = ">=3.10" diff --git a/src/mistralai/client/_version.py b/src/mistralai/client/_version.py index ab2cf01d..805648e4 100644 --- a/src/mistralai/client/_version.py +++ b/src/mistralai/client/_version.py @@ -4,10 +4,10 @@ import importlib.metadata __title__: str = "mistralai" -__version__: str = "2.0.0b1" +__version__: str = "2.0.0rc1" __openapi_doc_version__: str = "1.0.0" __gen_version__: str = "2.841.0" -__user_agent__: str = "speakeasy-sdk/python 2.0.0b1 2.841.0 1.0.0 mistralai" +__user_agent__: str = "speakeasy-sdk/python 2.0.0rc1 2.841.0 1.0.0 mistralai" try: if __package__ is not None: diff --git a/src/mistralai/client/embeddings.py b/src/mistralai/client/embeddings.py index 5f9d3b9c..5d55ffc4 100644 --- a/src/mistralai/client/embeddings.py +++ b/src/mistralai/client/embeddings.py @@ -33,8 +33,8 @@ def create( Embeddings - :param model: The ID of the model to be used for embedding. - :param inputs: The text content to be embedded, can be a string or an array of strings for fast processing in bulk. + :param model: ID of the model to use. + :param inputs: Text to embed. :param metadata: :param output_dimension: The dimension of the output embeddings when feature available. If not provided, a default output dimension will be used. :param output_dtype: @@ -143,8 +143,8 @@ async def create_async( Embeddings - :param model: The ID of the model to be used for embedding. - :param inputs: The text content to be embedded, can be a string or an array of strings for fast processing in bulk. + :param model: ID of the model to use. + :param inputs: Text to embed. :param metadata: :param output_dimension: The dimension of the output embeddings when feature available. If not provided, a default output dimension will be used. :param output_dtype: diff --git a/src/mistralai/client/models/__init__.py b/src/mistralai/client/models/__init__.py index 5ef8b3f3..7d2dfd97 100644 --- a/src/mistralai/client/models/__init__.py +++ b/src/mistralai/client/models/__init__.py @@ -734,6 +734,7 @@ from .paginationinfo import PaginationInfo, PaginationInfoTypedDict from .prediction import Prediction, PredictionTypedDict from .processingstatusout import ProcessingStatusOut, ProcessingStatusOutTypedDict + from .processstatus import ProcessStatus from .realtimetranscriptionerror import ( RealtimeTranscriptionError, RealtimeTranscriptionErrorTypedDict, @@ -1475,6 +1476,7 @@ "PaginationInfoTypedDict", "Prediction", "PredictionTypedDict", + "ProcessStatus", "ProcessingStatusOut", "ProcessingStatusOutTypedDict", "RealtimeTranscriptionError", @@ -2199,6 +2201,7 @@ "PredictionTypedDict": ".prediction", "ProcessingStatusOut": ".processingstatusout", "ProcessingStatusOutTypedDict": ".processingstatusout", + "ProcessStatus": ".processstatus", "RealtimeTranscriptionError": ".realtimetranscriptionerror", "RealtimeTranscriptionErrorTypedDict": ".realtimetranscriptionerror", "RealtimeTranscriptionErrorDetail": ".realtimetranscriptionerrordetail", diff --git a/src/mistralai/client/models/document.py b/src/mistralai/client/models/document.py index 31eebbd1..fcc5bca5 100644 --- a/src/mistralai/client/models/document.py +++ b/src/mistralai/client/models/document.py @@ -2,6 +2,7 @@ # @generated-id: fbbf7428328c from __future__ import annotations +from .processstatus import ProcessStatus from datetime import datetime from mistralai.client.types import ( BaseModel, @@ -24,6 +25,7 @@ class DocumentTypedDict(TypedDict): size: Nullable[int] name: str created_at: datetime + process_status: ProcessStatus uploaded_by_id: Nullable[str] uploaded_by_type: str processing_status: str @@ -54,6 +56,8 @@ class Document(BaseModel): created_at: datetime + process_status: ProcessStatus + uploaded_by_id: Nullable[str] uploaded_by_type: str diff --git a/src/mistralai/client/models/embeddingrequest.py b/src/mistralai/client/models/embeddingrequest.py index 15950590..5fa2d2f6 100644 --- a/src/mistralai/client/models/embeddingrequest.py +++ b/src/mistralai/client/models/embeddingrequest.py @@ -20,18 +20,18 @@ EmbeddingRequestInputsTypedDict = TypeAliasType( "EmbeddingRequestInputsTypedDict", Union[str, List[str]] ) -r"""The text content to be embedded, can be a string or an array of strings for fast processing in bulk.""" +r"""Text to embed.""" EmbeddingRequestInputs = TypeAliasType("EmbeddingRequestInputs", Union[str, List[str]]) -r"""The text content to be embedded, can be a string or an array of strings for fast processing in bulk.""" +r"""Text to embed.""" class EmbeddingRequestTypedDict(TypedDict): model: str - r"""The ID of the model to be used for embedding.""" + r"""ID of the model to use.""" inputs: EmbeddingRequestInputsTypedDict - r"""The text content to be embedded, can be a string or an array of strings for fast processing in bulk.""" + r"""Text to embed.""" metadata: NotRequired[Nullable[Dict[str, Any]]] output_dimension: NotRequired[Nullable[int]] r"""The dimension of the output embeddings when feature available. If not provided, a default output dimension will be used.""" @@ -41,10 +41,10 @@ class EmbeddingRequestTypedDict(TypedDict): class EmbeddingRequest(BaseModel): model: str - r"""The ID of the model to be used for embedding.""" + r"""ID of the model to use.""" inputs: Annotated[EmbeddingRequestInputs, pydantic.Field(alias="input")] - r"""The text content to be embedded, can be a string or an array of strings for fast processing in bulk.""" + r"""Text to embed.""" metadata: OptionalNullable[Dict[str, Any]] = UNSET diff --git a/src/mistralai/client/models/processingstatusout.py b/src/mistralai/client/models/processingstatusout.py index 3acadcc9..ed2a4f22 100644 --- a/src/mistralai/client/models/processingstatusout.py +++ b/src/mistralai/client/models/processingstatusout.py @@ -2,16 +2,20 @@ # @generated-id: 3df842c4140f from __future__ import annotations +from .processstatus import ProcessStatus from mistralai.client.types import BaseModel from typing_extensions import TypedDict class ProcessingStatusOutTypedDict(TypedDict): document_id: str + process_status: ProcessStatus processing_status: str class ProcessingStatusOut(BaseModel): document_id: str + process_status: ProcessStatus + processing_status: str diff --git a/src/mistralai/client/models/processstatus.py b/src/mistralai/client/models/processstatus.py new file mode 100644 index 00000000..15bdce20 --- /dev/null +++ b/src/mistralai/client/models/processstatus.py @@ -0,0 +1,21 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" +# @generated-id: 0205512146e6 + +from __future__ import annotations +from mistralai.client.types import UnrecognizedStr +from typing import Literal, Union + + +ProcessStatus = Union[ + Literal[ + "self_managed", + "missing_content", + "noop", + "done", + "todo", + "in_progress", + "error", + "waiting_for_capacity", + ], + UnrecognizedStr, +] diff --git a/uv.lock b/uv.lock index 1a37a7d6..7139deab 100644 --- a/uv.lock +++ b/uv.lock @@ -563,7 +563,7 @@ wheels = [ [[package]] name = "mistralai" -version = "2.0.0b1" +version = "2.0.0rc1" source = { editable = "." } dependencies = [ { name = "eval-type-backport" }, From 40dc89035b7d07e54370beff4b9ecf5f8751525b Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Mon, 2 Mar 2026 14:11:25 +0100 Subject: [PATCH 222/223] =?UTF-8?q?chore:=20=F0=9F=90=9D=20Update=20SDK=20?= =?UTF-8?q?-=20Generate=20MISTRAL-PYTHON-SDK-GOOGLE-CLOUD=20MISTRALAI-GCP-?= =?UTF-8?q?SDK=202.0.0rc1=20(#396)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * ci: regenerated with OpenAPI Doc , Speakeasy CLI 1.729.0 * chore: align GCP pyproject.toml and uv.lock to version 2.0.0rc1 --------- Co-authored-by: speakeasybot Co-authored-by: GitHub Action --- .speakeasy/workflow.lock | 12 ++++++------ packages/gcp/.speakeasy/gen.lock | 16 ++++++++-------- packages/gcp/.speakeasy/gen.yaml | 2 +- packages/gcp/RELEASES.md | 12 +++++++++++- packages/gcp/pyproject.toml | 2 +- .../gcp/src/mistralai/gcp/client/_version.py | 4 ++-- packages/gcp/uv.lock | 2 +- 7 files changed, 30 insertions(+), 20 deletions(-) diff --git a/.speakeasy/workflow.lock b/.speakeasy/workflow.lock index 2b919f8b..72bce856 100644 --- a/.speakeasy/workflow.lock +++ b/.speakeasy/workflow.lock @@ -9,11 +9,11 @@ sources: - speakeasy-sdk-regen-1772041212 mistral-google-cloud-source: sourceNamespace: mistral-openapi-google-cloud - sourceRevisionDigest: sha256:4d9938ab74c4d41d62cd24234c8b8109e286c4aeec093e21d369259a43173113 - sourceBlobDigest: sha256:5a558d5ea7a936723c7a5540db5a1fba63d85d25b453372e1cf16395b30c98d3 + sourceRevisionDigest: sha256:b2ffad81388f92b1018cb5fe2d409920d795a4b9cd18d8d4147d06b6e53585eb + sourceBlobDigest: sha256:9fbc256371243c39284852f9c44ea59244205fe4265fddf867903d3eb60f17fd tags: - latest - - speakeasy-sdk-regen-1772041030 + - speakeasy-sdk-regen-1772455561 mistral-openapi: sourceNamespace: mistral-openapi sourceRevisionDigest: sha256:52cd52dd6272c5afe08072790b36e34de9a65c41357bab87a45cf6635dc30db0 @@ -32,10 +32,10 @@ targets: mistralai-gcp-sdk: source: mistral-google-cloud-source sourceNamespace: mistral-openapi-google-cloud - sourceRevisionDigest: sha256:4d9938ab74c4d41d62cd24234c8b8109e286c4aeec093e21d369259a43173113 - sourceBlobDigest: sha256:5a558d5ea7a936723c7a5540db5a1fba63d85d25b453372e1cf16395b30c98d3 + sourceRevisionDigest: sha256:b2ffad81388f92b1018cb5fe2d409920d795a4b9cd18d8d4147d06b6e53585eb + sourceBlobDigest: sha256:9fbc256371243c39284852f9c44ea59244205fe4265fddf867903d3eb60f17fd codeSamplesNamespace: mistral-openapi-google-cloud-code-samples - codeSamplesRevisionDigest: sha256:35f30ba8ce4bd70f58b6abc5222d0bbf82eecc3109b09ca99df4406e363e21a0 + codeSamplesRevisionDigest: sha256:79c3a6d89d6c6f01f0400a619531f4f1cf18875754ff488558c337535fd83ce9 mistralai-sdk: source: mistral-openapi sourceNamespace: mistral-openapi diff --git a/packages/gcp/.speakeasy/gen.lock b/packages/gcp/.speakeasy/gen.lock index 6e33773d..e7932c9e 100644 --- a/packages/gcp/.speakeasy/gen.lock +++ b/packages/gcp/.speakeasy/gen.lock @@ -1,20 +1,20 @@ lockVersion: 2.0.0 id: ec60f2d8-7869-45c1-918e-773d41a8cf74 management: - docChecksum: bc4a0ba9c38418d84a6a8a76b503977b + docChecksum: 58bc2dcdd83a2b7c4856971baa20641b docVersion: 1.0.0 speakeasyVersion: 1.729.0 generationVersion: 2.841.0 - releaseVersion: 2.0.0b1 - configChecksum: 9cea6a311ff15502c47b0ef87e9846a2 + releaseVersion: 2.0.0rc1 + configChecksum: 5b0554f4a04c849bc6f376bec69654aa repoURL: https://github.com/mistralai/client-python.git repoSubDirectory: packages/gcp installationURL: https://github.com/mistralai/client-python.git#subdirectory=packages/gcp published: true persistentEdits: - generation_id: e503bb37-7bdd-4ebf-9bed-a8f754c99f8a - pristine_commit_hash: f14b1b1288437b7fc0ba666a384614a225385259 - pristine_tree_hash: 67e6d0a84ae20666a636dcc8ad174647a96b105f + generation_id: 724d8b32-d30d-4743-9e65-0bd450961ed2 + pristine_commit_hash: 7ef9f2e9ed9ed33b6f502afc01b1354bd0c499d1 + pristine_tree_hash: 19a1b62b3168a95e58538e2da4215028949b1ba2 features: python: additionalDependencies: 1.0.0 @@ -326,8 +326,8 @@ trackedFiles: pristine_git_object: ea95bed210db9180824efddfb1b3e47f5bf96489 src/mistralai/gcp/client/_version.py: id: f87319e32c7b - last_write_checksum: sha1:0d99fadc73b957112022a95eabeb0e3a98d14ff4 - pristine_git_object: 36e44a5e6067e8bd197b38cc238686f660c77244 + last_write_checksum: sha1:05656d6552e7c9cc97c9bbe6483ee906050d28ea + pristine_git_object: 7415341f2a4519b074f4b8ffa0f06c5ac2ac45e3 src/mistralai/gcp/client/basesdk.py: id: 4d594572857b last_write_checksum: sha1:d8ef9e2f4fa97d402eb9f5472ceb80fb39693991 diff --git a/packages/gcp/.speakeasy/gen.yaml b/packages/gcp/.speakeasy/gen.yaml index 18f4b4d5..35a47062 100644 --- a/packages/gcp/.speakeasy/gen.yaml +++ b/packages/gcp/.speakeasy/gen.yaml @@ -30,7 +30,7 @@ generation: generateNewTests: false skipResponseBodyAssertions: false python: - version: 2.0.0b1 + version: 2.0.0rc1 additionalDependencies: dev: pytest: ^8.2.2 diff --git a/packages/gcp/RELEASES.md b/packages/gcp/RELEASES.md index ec883c62..2261c857 100644 --- a/packages/gcp/RELEASES.md +++ b/packages/gcp/RELEASES.md @@ -18,4 +18,14 @@ Based on: ### Generated - [python v2.0.0b1] packages/gcp ### Releases -- [PyPI v2.0.0b1] https://pypi.org/project/mistralai-gcp/2.0.0b1 - packages/gcp \ No newline at end of file +- [PyPI v2.0.0b1] https://pypi.org/project/mistralai-gcp/2.0.0b1 - packages/gcp + +## 2026-03-02 12:45:41 +### Changes +Based on: +- OpenAPI Doc +- Speakeasy CLI 1.729.0 (2.841.0) https://github.com/speakeasy-api/speakeasy +### Generated +- [python v2.0.0rc1] packages/gcp +### Releases +- [PyPI v2.0.0rc1] https://pypi.org/project/mistralai-gcp/2.0.0rc1 - packages/gcp \ No newline at end of file diff --git a/packages/gcp/pyproject.toml b/packages/gcp/pyproject.toml index c0497656..e0c4af9a 100644 --- a/packages/gcp/pyproject.toml +++ b/packages/gcp/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "mistralai-gcp" -version = "2.0.0b1" +version = "2.0.0rc1" description = "Python Client SDK for the Mistral AI API in GCP." authors = [{ name = "Mistral" }] requires-python = ">=3.10" diff --git a/packages/gcp/src/mistralai/gcp/client/_version.py b/packages/gcp/src/mistralai/gcp/client/_version.py index 36e44a5e..7415341f 100644 --- a/packages/gcp/src/mistralai/gcp/client/_version.py +++ b/packages/gcp/src/mistralai/gcp/client/_version.py @@ -3,10 +3,10 @@ import importlib.metadata __title__: str = "mistralai-gcp" -__version__: str = "2.0.0b1" +__version__: str = "2.0.0rc1" __openapi_doc_version__: str = "1.0.0" __gen_version__: str = "2.841.0" -__user_agent__: str = "speakeasy-sdk/python 2.0.0b1 2.841.0 1.0.0 mistralai-gcp" +__user_agent__: str = "speakeasy-sdk/python 2.0.0rc1 2.841.0 1.0.0 mistralai-gcp" try: if __package__ is not None: diff --git a/packages/gcp/uv.lock b/packages/gcp/uv.lock index 9bd9f9b6..0c978164 100644 --- a/packages/gcp/uv.lock +++ b/packages/gcp/uv.lock @@ -277,7 +277,7 @@ wheels = [ [[package]] name = "mistralai-gcp" -version = "2.0.0b1" +version = "2.0.0rc1" source = { editable = "." } dependencies = [ { name = "eval-type-backport" }, From d6f19949c8cc57cbffb36c19067c94a7769b93b5 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Mon, 2 Mar 2026 14:41:02 +0100 Subject: [PATCH 223/223] =?UTF-8?q?chore:=20=F0=9F=90=9D=20Update=20SDK=20?= =?UTF-8?q?-=20Generate=20MISTRAL-PYTHON-SDK-AZURE=20MISTRALAI-AZURE-SDK?= =?UTF-8?q?=202.0.0rc1=20(#397)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * ci: regenerated with OpenAPI Doc , Speakeasy CLI 1.729.0 * chore: align Azure pyproject.toml and uv.lock to version 2.0.0rc1 --------- Co-authored-by: speakeasybot Co-authored-by: GitHub Action --- .speakeasy/workflow.lock | 12 ++++++------ packages/azure/.speakeasy/gen.lock | 16 ++++++++-------- packages/azure/.speakeasy/gen.yaml | 2 +- packages/azure/RELEASES.md | 12 +++++++++++- packages/azure/pyproject.toml | 2 +- .../azure/src/mistralai/azure/client/_version.py | 4 ++-- packages/azure/uv.lock | 2 +- 7 files changed, 30 insertions(+), 20 deletions(-) diff --git a/.speakeasy/workflow.lock b/.speakeasy/workflow.lock index 72bce856..0e0db8ba 100644 --- a/.speakeasy/workflow.lock +++ b/.speakeasy/workflow.lock @@ -2,11 +2,11 @@ speakeasyVersion: 1.729.0 sources: mistral-azure-source: sourceNamespace: mistral-openapi-azure - sourceRevisionDigest: sha256:e32d21a6317d1bca6ab29f05603b96038e841752c2698aab47f434ea0d6530b7 - sourceBlobDigest: sha256:2dad2b1b7a79de6917c363ce7e870d11efe31ac08e3bfe0258f72823fe1ad13e + sourceRevisionDigest: sha256:0ac44fcf0cc7e7b8e101cb781a3699d5ccd37e0d80a5583c582ac7f0396803ac + sourceBlobDigest: sha256:4707ee1cbefa98516d39020e9cb29d7593fbd101a7d5c9c5fa50c44da3d4dce6 tags: - latest - - speakeasy-sdk-regen-1772041212 + - speakeasy-sdk-regen-1772457322 mistral-google-cloud-source: sourceNamespace: mistral-openapi-google-cloud sourceRevisionDigest: sha256:b2ffad81388f92b1018cb5fe2d409920d795a4b9cd18d8d4147d06b6e53585eb @@ -25,10 +25,10 @@ targets: mistralai-azure-sdk: source: mistral-azure-source sourceNamespace: mistral-openapi-azure - sourceRevisionDigest: sha256:e32d21a6317d1bca6ab29f05603b96038e841752c2698aab47f434ea0d6530b7 - sourceBlobDigest: sha256:2dad2b1b7a79de6917c363ce7e870d11efe31ac08e3bfe0258f72823fe1ad13e + sourceRevisionDigest: sha256:0ac44fcf0cc7e7b8e101cb781a3699d5ccd37e0d80a5583c582ac7f0396803ac + sourceBlobDigest: sha256:4707ee1cbefa98516d39020e9cb29d7593fbd101a7d5c9c5fa50c44da3d4dce6 codeSamplesNamespace: mistral-openapi-azure-code-samples - codeSamplesRevisionDigest: sha256:68866aada6ad13253e32dab06e4876a7aeba4d7759683d81b2ba27f0fb55a342 + codeSamplesRevisionDigest: sha256:67fa6ead333ffe784141b51b231bca05411189fd92f2e5fe3b9f861b97db5942 mistralai-gcp-sdk: source: mistral-google-cloud-source sourceNamespace: mistral-openapi-google-cloud diff --git a/packages/azure/.speakeasy/gen.lock b/packages/azure/.speakeasy/gen.lock index 5da824d1..ad85f076 100644 --- a/packages/azure/.speakeasy/gen.lock +++ b/packages/azure/.speakeasy/gen.lock @@ -1,20 +1,20 @@ lockVersion: 2.0.0 id: dc40fa48-2c4d-46ad-ac8b-270749770f34 management: - docChecksum: 571037b8485712afcef86703debb7f15 + docChecksum: be10e9d3b72d49632e8d6d311b9b8193 docVersion: 1.0.0 speakeasyVersion: 1.729.0 generationVersion: 2.841.0 - releaseVersion: 2.0.0b1 - configChecksum: 01160bf17a4abd1ce038528d20cd4685 + releaseVersion: 2.0.0rc1 + configChecksum: b06d8b151630e956c8b778cdc6c54c06 repoURL: https://github.com/mistralai/client-python.git repoSubDirectory: packages/azure installationURL: https://github.com/mistralai/client-python.git#subdirectory=packages/azure published: true persistentEdits: - generation_id: 2f5b7e40-9bd2-4c96-9e97-16a92e4b44af - pristine_commit_hash: 480a8b0e23da7e4752e6ad5b36fc72651e09d2d7 - pristine_tree_hash: 8a4c9b9a253fbe496a52e0496fa7e58e91e32c7c + generation_id: f76a86de-7619-48b9-8987-e2ecf1378ceb + pristine_commit_hash: 8b77e997fc0b0da49806cf251be6f273cf32cdfb + pristine_tree_hash: 2ff5921bc10f855310c77650bdc4622a57fc63fb features: python: additionalDependencies: 1.0.0 @@ -354,8 +354,8 @@ trackedFiles: pristine_git_object: 3e4e39555d60adebe84e596c8323ee5b80676fc9 src/mistralai/azure/client/_version.py: id: a77160e60e5d - last_write_checksum: sha1:1b76e9448049c69dbdb690b9de25456378bba0a7 - pristine_git_object: 213648be87a19e24d87160c1286614b2d5df7344 + last_write_checksum: sha1:79494d1fb83ebffd4ad7b285299d4fea4e5a4e92 + pristine_git_object: 4d15bdc8f877a1616c6d1f978037f76e0487bd6c src/mistralai/azure/client/basesdk.py: id: 5a585a95ec21 last_write_checksum: sha1:0c2e686aa42d6aeeb103193aa058d6ddff7bcf74 diff --git a/packages/azure/.speakeasy/gen.yaml b/packages/azure/.speakeasy/gen.yaml index 55934cc8..518e1e25 100644 --- a/packages/azure/.speakeasy/gen.yaml +++ b/packages/azure/.speakeasy/gen.yaml @@ -30,7 +30,7 @@ generation: generateNewTests: false skipResponseBodyAssertions: false python: - version: 2.0.0b1 + version: 2.0.0rc1 additionalDependencies: dev: pytest: ^8.2.2 diff --git a/packages/azure/RELEASES.md b/packages/azure/RELEASES.md index e625ee98..2090c67a 100644 --- a/packages/azure/RELEASES.md +++ b/packages/azure/RELEASES.md @@ -18,4 +18,14 @@ Based on: ### Generated - [python v2.0.0b1] packages/azure ### Releases -- [PyPI v2.0.0b1] https://pypi.org/project/mistralai-azure/2.0.0b1 - packages/azure \ No newline at end of file +- [PyPI v2.0.0b1] https://pypi.org/project/mistralai-azure/2.0.0b1 - packages/azure + +## 2026-03-02 13:15:00 +### Changes +Based on: +- OpenAPI Doc +- Speakeasy CLI 1.729.0 (2.841.0) https://github.com/speakeasy-api/speakeasy +### Generated +- [python v2.0.0rc1] packages/azure +### Releases +- [PyPI v2.0.0rc1] https://pypi.org/project/mistralai-azure/2.0.0rc1 - packages/azure \ No newline at end of file diff --git a/packages/azure/pyproject.toml b/packages/azure/pyproject.toml index cf80bde8..000dd558 100644 --- a/packages/azure/pyproject.toml +++ b/packages/azure/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "mistralai-azure" -version = "2.0.0b1" +version = "2.0.0rc1" description = "Python Client SDK for the Mistral AI API in Azure." authors = [{ name = "Mistral" }] requires-python = ">=3.10" diff --git a/packages/azure/src/mistralai/azure/client/_version.py b/packages/azure/src/mistralai/azure/client/_version.py index 213648be..4d15bdc8 100644 --- a/packages/azure/src/mistralai/azure/client/_version.py +++ b/packages/azure/src/mistralai/azure/client/_version.py @@ -3,10 +3,10 @@ import importlib.metadata __title__: str = "mistralai-azure" -__version__: str = "2.0.0b1" +__version__: str = "2.0.0rc1" __openapi_doc_version__: str = "1.0.0" __gen_version__: str = "2.841.0" -__user_agent__: str = "speakeasy-sdk/python 2.0.0b1 2.841.0 1.0.0 mistralai-azure" +__user_agent__: str = "speakeasy-sdk/python 2.0.0rc1 2.841.0 1.0.0 mistralai-azure" try: if __package__ is not None: diff --git a/packages/azure/uv.lock b/packages/azure/uv.lock index 7c090c00..df3719ff 100644 --- a/packages/azure/uv.lock +++ b/packages/azure/uv.lock @@ -154,7 +154,7 @@ wheels = [ [[package]] name = "mistralai-azure" -version = "2.0.0b1" +version = "2.0.0rc1" source = { editable = "." } dependencies = [ { name = "httpcore" },